1
0
mirror of https://github.com/facebookincubator/mvfst.git synced 2025-08-01 01:44:22 +03:00

Add retriable udp socket error counts

Reviewed By: kvtsoy

Differential Revision: D63718295

fbshipit-source-id: f60c01f607def4ee8073238533b4af18e79a3706
This commit is contained in:
Crystal Jin
2024-10-02 13:45:06 -07:00
committed by Facebook GitHub Bot
parent 0541f3c8ee
commit 924183d2d3
6 changed files with 44 additions and 1 deletions

View File

@ -71,6 +71,7 @@ bool IOBufQuicBatch::flushInternal() {
auto consumed = batchWriter_->write(sock_, peerAddress_);
if (consumed < 0) {
firstSocketErrno = errno;
lastRetryableErrno_ = errno;
}
written = (consumed >= 0);
if (happyEyeballsState_) {

View File

@ -42,6 +42,10 @@ class IOBufQuicBatch {
return result_;
}
[[nodiscard]] int getLastRetryableErrno() const {
return lastRetryableErrno_;
}
private:
void reset();
@ -59,6 +63,7 @@ class IOBufQuicBatch {
QuicTransportStatsCallback* statsCallback_{nullptr};
QuicClientConnectionState::HappyEyeballsState* happyEyeballsState_;
BufQuicBatchResult result_;
int lastRetryableErrno_{};
};
} // namespace quic

View File

@ -221,6 +221,17 @@ WriteQuicDataResult writeQuicDataToSocketImpl(
return result;
}
void updateErrnoCount(
QuicConnectionStateBase& connection,
IOBufQuicBatch& ioBufBatch) {
int lastErrno = ioBufBatch.getLastRetryableErrno();
if (lastErrno == EAGAIN || lastErrno == EWOULDBLOCK) {
connection.eagainOrEwouldblockCount++;
} else if (lastErrno == ENOBUFS) {
connection.enobufsCount++;
}
}
DataPathResult continuousMemoryBuildScheduleEncrypt(
QuicConnectionStateBase& connection,
PacketHeader header,
@ -254,6 +265,7 @@ DataPathResult continuousMemoryBuildScheduleEncrypt(
if (!packet || packet->packet.frames.empty()) {
rollbackBuf();
ioBufBatch.flush();
updateErrnoCount(connection, ioBufBatch);
if (connection.loopDetectorCallback) {
connection.writeDebugState.noWriteReason = NoWriteReason::NO_FRAME;
}
@ -263,6 +275,7 @@ DataPathResult continuousMemoryBuildScheduleEncrypt(
// No more space remaining.
rollbackBuf();
ioBufBatch.flush();
updateErrnoCount(connection, ioBufBatch);
if (connection.loopDetectorCallback) {
connection.writeDebugState.noWriteReason = NoWriteReason::NO_BODY;
}
@ -307,6 +320,7 @@ DataPathResult continuousMemoryBuildScheduleEncrypt(
}
// TODO: I think we should add an API that doesn't need a buffer.
bool ret = ioBufBatch.write(nullptr /* no need to pass buf */, encodedSize);
updateErrnoCount(connection, ioBufBatch);
return DataPathResult::makeWriteResult(
ret, std::move(result), encodedSize, encodedBodySize);
}
@ -333,6 +347,7 @@ DataPathResult iobufChainBasedBuildScheduleEncrypt(
auto& packet = result.packet;
if (!packet || packet->packet.frames.empty()) {
ioBufBatch.flush();
updateErrnoCount(connection, ioBufBatch);
if (connection.loopDetectorCallback) {
connection.writeDebugState.noWriteReason = NoWriteReason::NO_FRAME;
}
@ -341,6 +356,7 @@ DataPathResult iobufChainBasedBuildScheduleEncrypt(
if (packet->body.empty()) {
// No more space remaining.
ioBufBatch.flush();
updateErrnoCount(connection, ioBufBatch);
if (connection.loopDetectorCallback) {
connection.writeDebugState.noWriteReason = NoWriteReason::NO_BODY;
}
@ -378,6 +394,7 @@ DataPathResult iobufChainBasedBuildScheduleEncrypt(
<< " encodedBodySize=" << encodedBodySize;
}
bool ret = ioBufBatch.write(std::move(packetBuf), encodedSize);
updateErrnoCount(connection, ioBufBatch);
return DataPathResult::makeWriteResult(
ret, std::move(result), encodedSize, encodedBodySize);
}
@ -1525,7 +1542,9 @@ WriteQuicDataResult writeConnectionDataToSocket(
// If we have a pending write to retry. Flush that first and make sure it
// succeeds before scheduling any new data.
if (pendingBufferedWrite) {
if (!ioBufBatch.flush()) {
bool flushSuccess = ioBufBatch.flush();
updateErrnoCount(connection, ioBufBatch);
if (!flushSuccess) {
// Could not flush retried data. Return empty write result and wait for
// next retry.
return {0, 0, 0};
@ -1591,6 +1610,7 @@ WriteQuicDataResult writeConnectionDataToSocket(
// If we're returning because we couldn't schedule more packets,
// make sure we flush the buffer in this function.
ioBufBatch.flush();
updateErrnoCount(connection, ioBufBatch);
return {ioBufBatch.getPktSent(), 0, bytesWritten};
}
// If we build a packet, we updateConnection(), even if write might have
@ -1632,11 +1652,13 @@ WriteQuicDataResult writeConnectionDataToSocket(
// With SinglePacketInplaceBatchWriter we always write one packet, and so
// ioBufBatch needs a flush.
ioBufBatch.flush();
updateErrnoCount(connection, ioBufBatch);
}
}
// Ensure that the buffer is flushed before returning
ioBufBatch.flush();
updateErrnoCount(connection, ioBufBatch);
if (connection.transportSettings.dataPathType ==
DataPathType::ContinuousMemory) {

View File

@ -1881,4 +1881,12 @@ uint64_t QuicClientTransport::getNumPingFramesSent() const {
return conn_->numPingFramesSent;
}
uint64_t QuicClientTransport::getEagainOrEwouldblockCount() const {
return conn_->eagainOrEwouldblockCount;
}
uint64_t QuicClientTransport::getEnobufsCount() const {
return conn_->enobufsCount;
}
} // namespace quic

View File

@ -222,6 +222,10 @@ class QuicClientTransport
uint64_t getNumPingFramesSent() const;
uint64_t getEagainOrEwouldblockCount() const;
uint64_t getEnobufsCount() const;
class HappyEyeballsConnAttemptDelayTimeout : public QuicTimerCallback {
public:
explicit HappyEyeballsConnAttemptDelayTimeout(

View File

@ -501,6 +501,9 @@ struct QuicConnectionStateBase : public folly::DelayedDestruction {
uint64_t numPingFramesSent{0};
uint64_t eagainOrEwouldblockCount{0};
uint64_t enobufsCount{0};
struct ConnectionFlowControlState {
// The size of the connection flow control window.
uint64_t windowSize{0};