1
0
mirror of https://github.com/facebookincubator/mvfst.git synced 2025-08-08 09:42:06 +03:00

Limit write loop time by a fraction of RTT

Summary:
Before we have pacing, we limit write function to loop at most 5
times. Then pacing came in, and pacing burst is used as the limit when pacing
is enabled. Then pacing token was introduced to increase send rate when pacing
is enabled. Then when cwnd is large enough, pacing burst size can be really
large, that means this write function can loop for long time. When use loopback
as network interface, for example, the write function can loop more than 1 RTT,
which delays receive time when peer packets already arrived, which leads to
both wrong RTT estimation and wrong BBR bandwidth estimation.

This diff limits the write to a fraction of RTT as well, and current default
value will be 1/25 SRTT.

Reviewed By: mjoras

Differential Revision: D18864699

fbshipit-source-id: 0b57ee4138e4788d132152a4aa363959065f6f7f
This commit is contained in:
Yang Chi
2019-12-30 15:10:33 -08:00
committed by Facebook Github Bot
parent b8bac00147
commit 68e0a1add2
4 changed files with 52 additions and 1 deletions

View File

@@ -235,6 +235,8 @@ constexpr uint64_t kDefaultMinBurstPackets = 5;
// but the notifications can get delayed if the event loop is busy // but the notifications can get delayed if the event loop is busy
// this is subject to testing but I would suggest a value >= 200usec // this is subject to testing but I would suggest a value >= 200usec
constexpr std::chrono::microseconds kDefaultPacingTimerTickInterval{1000}; constexpr std::chrono::microseconds kDefaultPacingTimerTickInterval{1000};
// Fraction of RTT that is used to limit how long a write function can loop
constexpr std::chrono::microseconds::rep kDefaultWriteLimitRttFraction = 25;
// Congestion control: // Congestion control:
constexpr folly::StringPiece kCongestionControlCubicStr = "cubic"; constexpr folly::StringPiece kCongestionControlCubicStr = "cubic";

View File

@@ -927,7 +927,22 @@ uint64_t writeConnectionDataToSocket(
connection.debugState.noWriteReason = NoWriteReason::EMPTY_SCHEDULER; connection.debugState.noWriteReason = NoWriteReason::EMPTY_SCHEDULER;
} }
} }
while (scheduler.hasData() && ioBufBatch.getPktSent() < packetLimit) { auto writeLoopBeginTime = Clock::now();
// helper functor to check if we have been write in a loop for longer than the
// RTT fraction that we are allowed to write. Only kicks in if we have write
// one batch in batching write mode.
auto timeLimitHelper = [&]() -> bool {
auto batchSize = connection.transportSettings.batchingMode ==
quic::QuicBatchingMode::BATCHING_MODE_NONE
? connection.transportSettings.writeConnectionDataPacketsLimit
: connection.transportSettings.maxBatchSize;
return ioBufBatch.getPktSent() < batchSize ||
connection.lossState.srtt == 0us ||
Clock::now() - writeLoopBeginTime < connection.lossState.srtt /
connection.transportSettings.writeLimitRttFraction;
};
while (scheduler.hasData() && ioBufBatch.getPktSent() < packetLimit &&
timeLimitHelper()) {
auto packetNum = getNextPacketNum(connection, pnSpace); auto packetNum = getNextPacketNum(connection, pnSpace);
auto header = builder(srcConnId, dstConnId, packetNum, version, token); auto header = builder(srcConnId, dstConnId, packetNum, version, token);
uint32_t writableBytes = folly::to<uint32_t>(std::min<uint64_t>( uint32_t writableBytes = folly::to<uint32_t>(std::min<uint64_t>(

View File

@@ -1985,5 +1985,36 @@ TEST_F(QuicTransportFunctionsTest, TimeoutBasedRetxCountUpdate) {
EXPECT_EQ(247, conn->lossState.timeoutBasedRtxCount); EXPECT_EQ(247, conn->lossState.timeoutBasedRtxCount);
} }
TEST_F(QuicTransportFunctionsTest, WriteLimitBytRttFraction) {
auto conn = createConn();
conn->lossState.srtt = 50ms;
auto mockCongestionController = std::make_unique<MockCongestionController>();
auto rawCongestionController = mockCongestionController.get();
conn->congestionController = std::move(mockCongestionController);
EventBase evb;
auto socket = std::make_unique<folly::test::MockAsyncUDPSocket>(&evb);
auto rawSocket = socket.get();
auto stream1 = conn->streamManager->createNextBidirectionalStream().value();
auto buf = buildRandomInputData(2048 * 1024);
writeDataToQuicStream(*stream1, buf->clone(), true);
EXPECT_CALL(*rawSocket, write(_, _)).WillRepeatedly(Return(1));
EXPECT_CALL(*rawCongestionController, getWritableBytes())
.WillRepeatedly(Return(50));
EXPECT_GT(
500,
writeQuicDataToSocket(
*rawSocket,
*conn,
*conn->clientConnectionId,
*conn->serverConnectionId,
*aead,
*headerCipher,
getVersion(*conn),
500 /* packetLimit */));
}
} // namespace test } // namespace test
} // namespace quic } // namespace quic

View File

@@ -90,6 +90,9 @@ struct TransportSettings {
// writeConnectionDataToSocket. // writeConnectionDataToSocket.
uint64_t writeConnectionDataPacketsLimit{ uint64_t writeConnectionDataPacketsLimit{
kDefaultWriteConnectionDataPacketLimit}; kDefaultWriteConnectionDataPacketLimit};
// Fraction of RTT that is used to limit how long a write function can loop
std::chrono::microseconds::rep writeLimitRttFraction{
kDefaultWriteLimitRttFraction};
// Frequency of sending flow control updates. We can send one update every // Frequency of sending flow control updates. We can send one update every
// flowControlRttFrequency * RTT if the flow control changes. // flowControlRttFrequency * RTT if the flow control changes.
uint16_t flowControlRttFrequency{2}; uint16_t flowControlRttFrequency{2};