mirror of
https://github.com/facebookincubator/mvfst.git
synced 2025-08-08 09:42:06 +03:00
Add new batch writer SinglePacketBackpressureBatchWriter to retry failed writes
Summary: The existing batch writers do not handle failed writes to the AsyncUDPSocket. A packet that fails to be written is detected as a packet loss later when feedback is received from the peer. This negatively impacts the congestion controller because of the fake loss signal, and artificially inflates the number of retransmitted packets/bytes. This change adds a new batch writer (SinglePacketBackpressuretBatchWriter) that retains the buffers when a write fails. For subsequent writes, the writer retries the same buffer. No new packets are scheduled until the retried buffer succeeds. Notes: - To make sure that retry writes are scheduled, the write callback is installed on the socket when a buffer needs to be retried. - The retries are for an already scheduled packet. The connection state reflects the timing of the first attempt. This could still have an impact on rtt samples, etc. but it this is a milder impact compared to fake losses/retranmissions. - Any changes outside of the batch writer only impact the new batch writer. Existing batch writers do not use the fields and are not affected by the changes in this diff. Reviewed By: kvtsoy Differential Revision: D57597576 fbshipit-source-id: 9476d71ce52e383c5946466f64bb5eecd4f5d549
This commit is contained in:
committed by
Facebook GitHub Bot
parent
9674b08e71
commit
71b8af4b1a
@@ -334,6 +334,11 @@ class TestQuicTransport
|
||||
conn_->transportSettings.writeConnectionDataPacketsLimit);
|
||||
}
|
||||
|
||||
// This is to expose the protected pacedWriteDataToSocket() function
|
||||
void pacedWriteDataToSocketThroughTransportBase() {
|
||||
pacedWriteDataToSocket();
|
||||
}
|
||||
|
||||
bool hasWriteCipher() const {
|
||||
return conn_->oneRttWriteCipher != nullptr;
|
||||
}
|
||||
@@ -4825,5 +4830,81 @@ TEST_P(QuicTransportImplTestBase, TestOnSocketWritable) {
|
||||
transport.reset();
|
||||
}
|
||||
|
||||
TEST_P(
|
||||
QuicTransportImplTestBase,
|
||||
TestBackpressureWriterArmsSocketWritableEvent) {
|
||||
transport->setServerConnectionId();
|
||||
auto transportSettings = transport->getTransportSettings();
|
||||
|
||||
transportSettings.useSockWritableEvents = true;
|
||||
transportSettings.batchingMode = QuicBatchingMode::BATCHING_MODE_NONE;
|
||||
transportSettings.maxBatchSize = 1;
|
||||
transportSettings.dataPathType = DataPathType::ChainedMemory;
|
||||
transportSettings.enableWriterBackpressure = true;
|
||||
|
||||
transport->setTransportSettings(transportSettings);
|
||||
transport->getConnectionState().streamManager->refreshTransportSettings(
|
||||
transportSettings);
|
||||
|
||||
transport->transportConn->oneRttWriteCipher = test::createNoOpAead();
|
||||
|
||||
// Create a stream with outgoing data.
|
||||
auto streamId = transport->createBidirectionalStream().value();
|
||||
const auto& conn = transport->transportConn;
|
||||
auto stream = transport->getStream(streamId);
|
||||
std::string testString = "hello";
|
||||
stream->writeBuffer.append(IOBuf::copyBuffer(testString));
|
||||
conn->flowControlState.sumCurStreamBufferLen = testString.length();
|
||||
|
||||
// Insert streamId into the list.
|
||||
conn->streamManager->addWritable(*stream);
|
||||
conn->streamManager->updateWritableStreams(*stream);
|
||||
|
||||
// Mock arming the write callback
|
||||
bool writeCallbackArmed = false;
|
||||
EXPECT_CALL(*socketPtr, isWritableCallbackSet()).WillRepeatedly(Invoke([&]() {
|
||||
return writeCallbackArmed;
|
||||
}));
|
||||
EXPECT_CALL(*socketPtr, resumeWrite(_))
|
||||
.WillOnce(Invoke([&](QuicAsyncUDPSocket::WriteCallback*) {
|
||||
writeCallbackArmed = true;
|
||||
return folly::makeExpected<folly::AsyncSocketException>(folly::Unit());
|
||||
}));
|
||||
|
||||
// Fail the first write loop.
|
||||
EXPECT_CALL(*socketPtr, write(_, _))
|
||||
.Times(2) // We attempt to flush the batch twice inside the write loop.
|
||||
// Fail both.
|
||||
.WillRepeatedly(Invoke([&](const auto& /* addr */,
|
||||
const std::unique_ptr<folly::IOBuf>& /*buf*/) {
|
||||
errno = EAGAIN;
|
||||
return 0;
|
||||
}));
|
||||
|
||||
transport->writeLooper()->run(true /* thisIteration */);
|
||||
EXPECT_TRUE(transport->writeLooper()->isRunning());
|
||||
|
||||
// A write attempt will cache the failed write, stop the write looper, and arm
|
||||
// the write callback.
|
||||
transport->pacedWriteDataToSocketThroughTransportBase();
|
||||
|
||||
// The transport has cached the failed write buffer.
|
||||
EXPECT_TRUE(conn->pendingWriteBatch_.buf);
|
||||
// Write looper stopped.
|
||||
EXPECT_FALSE(transport->writeLooper()->isRunning());
|
||||
// Write callback armed.
|
||||
EXPECT_TRUE(writeCallbackArmed);
|
||||
|
||||
// Reset will make one write attempt. We don't care what happens to it
|
||||
EXPECT_CALL(*socketPtr, write(_, _))
|
||||
.Times(1)
|
||||
.WillRepeatedly(Invoke([&](const auto& /* addr */,
|
||||
const std::unique_ptr<folly::IOBuf>& buf) {
|
||||
errno = 0;
|
||||
return buf->computeChainDataLength();
|
||||
}));
|
||||
transport.reset();
|
||||
}
|
||||
|
||||
} // namespace test
|
||||
} // namespace quic
|
||||
|
Reference in New Issue
Block a user