mirror of
https://github.com/facebookincubator/mvfst.git
synced 2025-08-08 09:42:06 +03:00
Add retriable udp socket error counts
Reviewed By: kvtsoy Differential Revision: D63718295 fbshipit-source-id: f60c01f607def4ee8073238533b4af18e79a3706
This commit is contained in:
committed by
Facebook GitHub Bot
parent
0541f3c8ee
commit
924183d2d3
@@ -71,6 +71,7 @@ bool IOBufQuicBatch::flushInternal() {
|
|||||||
auto consumed = batchWriter_->write(sock_, peerAddress_);
|
auto consumed = batchWriter_->write(sock_, peerAddress_);
|
||||||
if (consumed < 0) {
|
if (consumed < 0) {
|
||||||
firstSocketErrno = errno;
|
firstSocketErrno = errno;
|
||||||
|
lastRetryableErrno_ = errno;
|
||||||
}
|
}
|
||||||
written = (consumed >= 0);
|
written = (consumed >= 0);
|
||||||
if (happyEyeballsState_) {
|
if (happyEyeballsState_) {
|
||||||
|
@@ -42,6 +42,10 @@ class IOBufQuicBatch {
|
|||||||
return result_;
|
return result_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
[[nodiscard]] int getLastRetryableErrno() const {
|
||||||
|
return lastRetryableErrno_;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void reset();
|
void reset();
|
||||||
|
|
||||||
@@ -59,6 +63,7 @@ class IOBufQuicBatch {
|
|||||||
QuicTransportStatsCallback* statsCallback_{nullptr};
|
QuicTransportStatsCallback* statsCallback_{nullptr};
|
||||||
QuicClientConnectionState::HappyEyeballsState* happyEyeballsState_;
|
QuicClientConnectionState::HappyEyeballsState* happyEyeballsState_;
|
||||||
BufQuicBatchResult result_;
|
BufQuicBatchResult result_;
|
||||||
|
int lastRetryableErrno_{};
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace quic
|
} // namespace quic
|
||||||
|
@@ -221,6 +221,17 @@ WriteQuicDataResult writeQuicDataToSocketImpl(
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void updateErrnoCount(
|
||||||
|
QuicConnectionStateBase& connection,
|
||||||
|
IOBufQuicBatch& ioBufBatch) {
|
||||||
|
int lastErrno = ioBufBatch.getLastRetryableErrno();
|
||||||
|
if (lastErrno == EAGAIN || lastErrno == EWOULDBLOCK) {
|
||||||
|
connection.eagainOrEwouldblockCount++;
|
||||||
|
} else if (lastErrno == ENOBUFS) {
|
||||||
|
connection.enobufsCount++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
DataPathResult continuousMemoryBuildScheduleEncrypt(
|
DataPathResult continuousMemoryBuildScheduleEncrypt(
|
||||||
QuicConnectionStateBase& connection,
|
QuicConnectionStateBase& connection,
|
||||||
PacketHeader header,
|
PacketHeader header,
|
||||||
@@ -254,6 +265,7 @@ DataPathResult continuousMemoryBuildScheduleEncrypt(
|
|||||||
if (!packet || packet->packet.frames.empty()) {
|
if (!packet || packet->packet.frames.empty()) {
|
||||||
rollbackBuf();
|
rollbackBuf();
|
||||||
ioBufBatch.flush();
|
ioBufBatch.flush();
|
||||||
|
updateErrnoCount(connection, ioBufBatch);
|
||||||
if (connection.loopDetectorCallback) {
|
if (connection.loopDetectorCallback) {
|
||||||
connection.writeDebugState.noWriteReason = NoWriteReason::NO_FRAME;
|
connection.writeDebugState.noWriteReason = NoWriteReason::NO_FRAME;
|
||||||
}
|
}
|
||||||
@@ -263,6 +275,7 @@ DataPathResult continuousMemoryBuildScheduleEncrypt(
|
|||||||
// No more space remaining.
|
// No more space remaining.
|
||||||
rollbackBuf();
|
rollbackBuf();
|
||||||
ioBufBatch.flush();
|
ioBufBatch.flush();
|
||||||
|
updateErrnoCount(connection, ioBufBatch);
|
||||||
if (connection.loopDetectorCallback) {
|
if (connection.loopDetectorCallback) {
|
||||||
connection.writeDebugState.noWriteReason = NoWriteReason::NO_BODY;
|
connection.writeDebugState.noWriteReason = NoWriteReason::NO_BODY;
|
||||||
}
|
}
|
||||||
@@ -307,6 +320,7 @@ DataPathResult continuousMemoryBuildScheduleEncrypt(
|
|||||||
}
|
}
|
||||||
// TODO: I think we should add an API that doesn't need a buffer.
|
// TODO: I think we should add an API that doesn't need a buffer.
|
||||||
bool ret = ioBufBatch.write(nullptr /* no need to pass buf */, encodedSize);
|
bool ret = ioBufBatch.write(nullptr /* no need to pass buf */, encodedSize);
|
||||||
|
updateErrnoCount(connection, ioBufBatch);
|
||||||
return DataPathResult::makeWriteResult(
|
return DataPathResult::makeWriteResult(
|
||||||
ret, std::move(result), encodedSize, encodedBodySize);
|
ret, std::move(result), encodedSize, encodedBodySize);
|
||||||
}
|
}
|
||||||
@@ -333,6 +347,7 @@ DataPathResult iobufChainBasedBuildScheduleEncrypt(
|
|||||||
auto& packet = result.packet;
|
auto& packet = result.packet;
|
||||||
if (!packet || packet->packet.frames.empty()) {
|
if (!packet || packet->packet.frames.empty()) {
|
||||||
ioBufBatch.flush();
|
ioBufBatch.flush();
|
||||||
|
updateErrnoCount(connection, ioBufBatch);
|
||||||
if (connection.loopDetectorCallback) {
|
if (connection.loopDetectorCallback) {
|
||||||
connection.writeDebugState.noWriteReason = NoWriteReason::NO_FRAME;
|
connection.writeDebugState.noWriteReason = NoWriteReason::NO_FRAME;
|
||||||
}
|
}
|
||||||
@@ -341,6 +356,7 @@ DataPathResult iobufChainBasedBuildScheduleEncrypt(
|
|||||||
if (packet->body.empty()) {
|
if (packet->body.empty()) {
|
||||||
// No more space remaining.
|
// No more space remaining.
|
||||||
ioBufBatch.flush();
|
ioBufBatch.flush();
|
||||||
|
updateErrnoCount(connection, ioBufBatch);
|
||||||
if (connection.loopDetectorCallback) {
|
if (connection.loopDetectorCallback) {
|
||||||
connection.writeDebugState.noWriteReason = NoWriteReason::NO_BODY;
|
connection.writeDebugState.noWriteReason = NoWriteReason::NO_BODY;
|
||||||
}
|
}
|
||||||
@@ -378,6 +394,7 @@ DataPathResult iobufChainBasedBuildScheduleEncrypt(
|
|||||||
<< " encodedBodySize=" << encodedBodySize;
|
<< " encodedBodySize=" << encodedBodySize;
|
||||||
}
|
}
|
||||||
bool ret = ioBufBatch.write(std::move(packetBuf), encodedSize);
|
bool ret = ioBufBatch.write(std::move(packetBuf), encodedSize);
|
||||||
|
updateErrnoCount(connection, ioBufBatch);
|
||||||
return DataPathResult::makeWriteResult(
|
return DataPathResult::makeWriteResult(
|
||||||
ret, std::move(result), encodedSize, encodedBodySize);
|
ret, std::move(result), encodedSize, encodedBodySize);
|
||||||
}
|
}
|
||||||
@@ -1525,7 +1542,9 @@ WriteQuicDataResult writeConnectionDataToSocket(
|
|||||||
// If we have a pending write to retry. Flush that first and make sure it
|
// If we have a pending write to retry. Flush that first and make sure it
|
||||||
// succeeds before scheduling any new data.
|
// succeeds before scheduling any new data.
|
||||||
if (pendingBufferedWrite) {
|
if (pendingBufferedWrite) {
|
||||||
if (!ioBufBatch.flush()) {
|
bool flushSuccess = ioBufBatch.flush();
|
||||||
|
updateErrnoCount(connection, ioBufBatch);
|
||||||
|
if (!flushSuccess) {
|
||||||
// Could not flush retried data. Return empty write result and wait for
|
// Could not flush retried data. Return empty write result and wait for
|
||||||
// next retry.
|
// next retry.
|
||||||
return {0, 0, 0};
|
return {0, 0, 0};
|
||||||
@@ -1591,6 +1610,7 @@ WriteQuicDataResult writeConnectionDataToSocket(
|
|||||||
// If we're returning because we couldn't schedule more packets,
|
// If we're returning because we couldn't schedule more packets,
|
||||||
// make sure we flush the buffer in this function.
|
// make sure we flush the buffer in this function.
|
||||||
ioBufBatch.flush();
|
ioBufBatch.flush();
|
||||||
|
updateErrnoCount(connection, ioBufBatch);
|
||||||
return {ioBufBatch.getPktSent(), 0, bytesWritten};
|
return {ioBufBatch.getPktSent(), 0, bytesWritten};
|
||||||
}
|
}
|
||||||
// If we build a packet, we updateConnection(), even if write might have
|
// If we build a packet, we updateConnection(), even if write might have
|
||||||
@@ -1632,11 +1652,13 @@ WriteQuicDataResult writeConnectionDataToSocket(
|
|||||||
// With SinglePacketInplaceBatchWriter we always write one packet, and so
|
// With SinglePacketInplaceBatchWriter we always write one packet, and so
|
||||||
// ioBufBatch needs a flush.
|
// ioBufBatch needs a flush.
|
||||||
ioBufBatch.flush();
|
ioBufBatch.flush();
|
||||||
|
updateErrnoCount(connection, ioBufBatch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the buffer is flushed before returning
|
// Ensure that the buffer is flushed before returning
|
||||||
ioBufBatch.flush();
|
ioBufBatch.flush();
|
||||||
|
updateErrnoCount(connection, ioBufBatch);
|
||||||
|
|
||||||
if (connection.transportSettings.dataPathType ==
|
if (connection.transportSettings.dataPathType ==
|
||||||
DataPathType::ContinuousMemory) {
|
DataPathType::ContinuousMemory) {
|
||||||
|
@@ -1881,4 +1881,12 @@ uint64_t QuicClientTransport::getNumPingFramesSent() const {
|
|||||||
return conn_->numPingFramesSent;
|
return conn_->numPingFramesSent;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uint64_t QuicClientTransport::getEagainOrEwouldblockCount() const {
|
||||||
|
return conn_->eagainOrEwouldblockCount;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t QuicClientTransport::getEnobufsCount() const {
|
||||||
|
return conn_->enobufsCount;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace quic
|
} // namespace quic
|
||||||
|
@@ -222,6 +222,10 @@ class QuicClientTransport
|
|||||||
|
|
||||||
uint64_t getNumPingFramesSent() const;
|
uint64_t getNumPingFramesSent() const;
|
||||||
|
|
||||||
|
uint64_t getEagainOrEwouldblockCount() const;
|
||||||
|
|
||||||
|
uint64_t getEnobufsCount() const;
|
||||||
|
|
||||||
class HappyEyeballsConnAttemptDelayTimeout : public QuicTimerCallback {
|
class HappyEyeballsConnAttemptDelayTimeout : public QuicTimerCallback {
|
||||||
public:
|
public:
|
||||||
explicit HappyEyeballsConnAttemptDelayTimeout(
|
explicit HappyEyeballsConnAttemptDelayTimeout(
|
||||||
|
@@ -501,6 +501,9 @@ struct QuicConnectionStateBase : public folly::DelayedDestruction {
|
|||||||
|
|
||||||
uint64_t numPingFramesSent{0};
|
uint64_t numPingFramesSent{0};
|
||||||
|
|
||||||
|
uint64_t eagainOrEwouldblockCount{0};
|
||||||
|
uint64_t enobufsCount{0};
|
||||||
|
|
||||||
struct ConnectionFlowControlState {
|
struct ConnectionFlowControlState {
|
||||||
// The size of the connection flow control window.
|
// The size of the connection flow control window.
|
||||||
uint64_t windowSize{0};
|
uint64_t windowSize{0};
|
||||||
|
Reference in New Issue
Block a user