mirror of
https://github.com/facebookincubator/mvfst.git
synced 2025-08-08 09:42:06 +03:00
Allow calling getTimeUntilNextWrite with a timepoint to avoid test flakiness
Summary: This solves test flake that resulted from using EXPECT_NEAR with varying timing on devservers and CI situations Reviewed By: yangchi Differential Revision: D29574796 fbshipit-source-id: 804935e8e0148fcccec642a1894e8863bee4465b
This commit is contained in:
committed by
Facebook GitHub Bot
parent
ec799b3860
commit
a9171a1cfa
@@ -3170,7 +3170,7 @@ TEST_F(QuicTransportTest, AlreadyScheduledPacingNoWrite) {
|
||||
EXPECT_CALL(*socket_, write(_, _)).WillOnce(Return(0));
|
||||
EXPECT_CALL(*rawPacer, updateAndGetWriteBatchSize(_))
|
||||
.WillRepeatedly(Return(1));
|
||||
EXPECT_CALL(*rawPacer, getTimeUntilNextWrite())
|
||||
EXPECT_CALL(*rawPacer, getTimeUntilNextWrite(_))
|
||||
.WillRepeatedly(Return(3600000ms));
|
||||
// This will write out 100 bytes, leave 100 bytes behind. FunctionLooper will
|
||||
// schedule a pacing timeout.
|
||||
|
@@ -99,8 +99,8 @@ void TokenlessPacer::onPacketSent() {}
|
||||
|
||||
void TokenlessPacer::onPacketsLoss() {}
|
||||
|
||||
std::chrono::microseconds TokenlessPacer::getTimeUntilNextWrite() const {
|
||||
auto now = Clock::now();
|
||||
std::chrono::microseconds TokenlessPacer::getTimeUntilNextWrite(
|
||||
TimePoint now) const {
|
||||
// If we don't have a lastWriteTime_, we want to write immediately.
|
||||
auto timeSinceLastWrite =
|
||||
std::chrono::duration_cast<std::chrono::microseconds>(
|
||||
|
@@ -35,7 +35,8 @@ class TokenlessPacer : public Pacer {
|
||||
|
||||
void setRttFactor(uint8_t numerator, uint8_t denominator) override;
|
||||
|
||||
std::chrono::microseconds getTimeUntilNextWrite() const override;
|
||||
std::chrono::microseconds getTimeUntilNextWrite(
|
||||
TimePoint currentTime = Clock::now()) const override;
|
||||
|
||||
uint64_t updateAndGetWriteBatchSize(TimePoint currentTime) override;
|
||||
|
||||
|
@@ -120,41 +120,42 @@ TEST_F(TokenlessPacerTest, ChangeMaxPacingRate) {
|
||||
.build();
|
||||
});
|
||||
auto rtt = 500 * 1000us;
|
||||
auto timestamp = Clock::now();
|
||||
// Request pacing at 50 Mbps
|
||||
pacer.refreshPacingRate(3125000, rtt);
|
||||
EXPECT_EQ(1, calculatorCallCount);
|
||||
EXPECT_EQ(
|
||||
3125000 / kDefaultUDPSendPacketLen,
|
||||
pacer.updateAndGetWriteBatchSize(Clock::now()));
|
||||
EXPECT_NEAR(rtt.count(), pacer.getTimeUntilNextWrite().count(), 100);
|
||||
pacer.updateAndGetWriteBatchSize(timestamp));
|
||||
EXPECT_EQ(rtt.count(), pacer.getTimeUntilNextWrite(timestamp).count());
|
||||
|
||||
// Set max pacing rate to 40 Mbps
|
||||
pacer.setMaxPacingRate(5 * 1000 * 1000u); // Bytes per second
|
||||
// This should bring down the pacer rate to 40 Mbps
|
||||
EXPECT_EQ(0us, pacer.getTimeUntilNextWrite());
|
||||
auto burst = pacer.updateAndGetWriteBatchSize(Clock::now());
|
||||
auto interval = pacer.getTimeUntilNextWrite();
|
||||
EXPECT_EQ(0us, pacer.getTimeUntilNextWrite(timestamp));
|
||||
auto burst = pacer.updateAndGetWriteBatchSize(timestamp);
|
||||
auto interval = pacer.getTimeUntilNextWrite(timestamp);
|
||||
uint64_t pacerRate =
|
||||
burst * kDefaultUDPSendPacketLen * std::chrono::seconds{1} / interval;
|
||||
EXPECT_NEAR(5 * 1000 * 1000u, pacerRate, 5000); // 0.1% cushion for timing
|
||||
EXPECT_EQ(5 * 1000 * 1000u, pacerRate);
|
||||
pacer.reset();
|
||||
// Requesting a rate of 50 Mbps should not change interval or burst
|
||||
pacer.refreshPacingRate(3125000, rtt);
|
||||
EXPECT_EQ(1, calculatorCallCount); // Calculator not called again.
|
||||
EXPECT_EQ(burst, pacer.updateAndGetWriteBatchSize(Clock::now()));
|
||||
EXPECT_NEAR(interval.count(), pacer.getTimeUntilNextWrite().count(), 1000);
|
||||
EXPECT_EQ(burst, pacer.updateAndGetWriteBatchSize(timestamp));
|
||||
EXPECT_EQ(interval.count(), pacer.getTimeUntilNextWrite(timestamp).count());
|
||||
pacer.reset();
|
||||
|
||||
// The setPacingRate API shouldn't make changes either
|
||||
pacer.setPacingRate(6250 * 1000u); // 50 Mbps
|
||||
EXPECT_EQ(burst, pacer.updateAndGetWriteBatchSize(Clock::now()));
|
||||
EXPECT_NEAR(interval.count(), pacer.getTimeUntilNextWrite().count(), 1000);
|
||||
EXPECT_EQ(burst, pacer.updateAndGetWriteBatchSize(timestamp));
|
||||
EXPECT_EQ(interval.count(), pacer.getTimeUntilNextWrite(timestamp).count());
|
||||
pacer.reset();
|
||||
|
||||
// Increasing max pacing rate to 75 Mbps shouldn't make changes
|
||||
pacer.setMaxPacingRate(9375 * 1000u);
|
||||
EXPECT_EQ(burst, pacer.updateAndGetWriteBatchSize(Clock::now()));
|
||||
EXPECT_NEAR(interval.count(), pacer.getTimeUntilNextWrite().count(), 1000);
|
||||
EXPECT_EQ(burst, pacer.updateAndGetWriteBatchSize(timestamp));
|
||||
EXPECT_EQ(interval.count(), pacer.getTimeUntilNextWrite(timestamp).count());
|
||||
pacer.reset();
|
||||
|
||||
// Increase pacing to 50 Mbps and ensure it takes effect
|
||||
@@ -162,45 +163,48 @@ TEST_F(TokenlessPacerTest, ChangeMaxPacingRate) {
|
||||
EXPECT_EQ(2, calculatorCallCount); // Calculator called
|
||||
EXPECT_EQ(
|
||||
3125000 / kDefaultUDPSendPacketLen,
|
||||
pacer.updateAndGetWriteBatchSize(Clock::now()));
|
||||
EXPECT_NEAR(rtt.count(), pacer.getTimeUntilNextWrite().count(), 1000);
|
||||
pacer.updateAndGetWriteBatchSize(timestamp));
|
||||
EXPECT_EQ(rtt.count(), pacer.getTimeUntilNextWrite(timestamp).count());
|
||||
pacer.reset();
|
||||
|
||||
// Increase pacing to 80 Mbps using alternative API and ensure rate is limited
|
||||
// to 75 Mbps
|
||||
pacer.setPacingRate(10 * 1000 * 1000u);
|
||||
burst = pacer.updateAndGetWriteBatchSize(Clock::now());
|
||||
interval = pacer.getTimeUntilNextWrite();
|
||||
burst = pacer.updateAndGetWriteBatchSize(timestamp);
|
||||
interval = pacer.getTimeUntilNextWrite(timestamp);
|
||||
pacerRate =
|
||||
burst * kDefaultUDPSendPacketLen * std::chrono::seconds{1} / interval;
|
||||
EXPECT_NEAR(9375 * 1000u, pacerRate, 9375); // 0.1% cushion for timing
|
||||
EXPECT_NEAR(9375 * 1000u, pacerRate, 1000); // To accommodate rounding
|
||||
}
|
||||
|
||||
TEST_F(TokenlessPacerTest, SetMaxPacingRateOnUnlimitedPacer) {
|
||||
auto timestamp = Clock::now();
|
||||
// Pacing is currently not pacing
|
||||
EXPECT_EQ(0us, pacer.getTimeUntilNextWrite());
|
||||
EXPECT_NE(0, pacer.updateAndGetWriteBatchSize(Clock::now()));
|
||||
EXPECT_EQ(0us, pacer.getTimeUntilNextWrite());
|
||||
EXPECT_EQ(0us, pacer.getTimeUntilNextWrite(timestamp));
|
||||
EXPECT_NE(0, pacer.updateAndGetWriteBatchSize(timestamp));
|
||||
EXPECT_EQ(0us, pacer.getTimeUntilNextWrite(timestamp));
|
||||
|
||||
// Set max pacing rate 40 Mbps and ensure it took effect
|
||||
pacer.setMaxPacingRate(5 * 1000 * 1000u); // Bytes per second
|
||||
EXPECT_EQ(0us, pacer.getTimeUntilNextWrite());
|
||||
auto burst = pacer.updateAndGetWriteBatchSize(Clock::now());
|
||||
auto interval = pacer.getTimeUntilNextWrite();
|
||||
EXPECT_EQ(0us, pacer.getTimeUntilNextWrite(timestamp));
|
||||
auto burst = pacer.updateAndGetWriteBatchSize(timestamp);
|
||||
auto interval = pacer.getTimeUntilNextWrite(timestamp);
|
||||
uint64_t pacerRate =
|
||||
burst * kDefaultUDPSendPacketLen * std::chrono::seconds{1} / interval;
|
||||
EXPECT_NEAR(5 * 1000 * 1000u, pacerRate, 5000); // 0.1% cushion for timing
|
||||
EXPECT_NEAR(5 * 1000 * 1000u, pacerRate, 1000); // To accommodate rounding
|
||||
}
|
||||
|
||||
TEST_F(TokenlessPacerTest, SetZeroPacingRate) {
|
||||
auto timestamp = Clock::now();
|
||||
// A Zero pacing rate should not result in a divide-by-zero
|
||||
conn.transportSettings.pacingTimerTickInterval = 1000us;
|
||||
pacer.setPacingRate(0);
|
||||
EXPECT_EQ(0, pacer.updateAndGetWriteBatchSize(Clock::now()));
|
||||
EXPECT_NEAR(1000, pacer.getTimeUntilNextWrite().count(), 100);
|
||||
EXPECT_EQ(0, pacer.updateAndGetWriteBatchSize(timestamp));
|
||||
EXPECT_EQ(1000, pacer.getTimeUntilNextWrite(timestamp).count());
|
||||
}
|
||||
|
||||
TEST_F(TokenlessPacerTest, RefreshPacingRateWhenRTTIsZero) {
|
||||
auto timestamp = Clock::now();
|
||||
// rtt=0 should not result in a divide-by-zero
|
||||
conn.transportSettings.pacingTimerTickInterval = 1000us;
|
||||
pacer.refreshPacingRate(100, 0us);
|
||||
@@ -208,8 +212,8 @@ TEST_F(TokenlessPacerTest, RefreshPacingRateWhenRTTIsZero) {
|
||||
// 0us right after writing
|
||||
EXPECT_EQ(
|
||||
conn.transportSettings.writeConnectionDataPacketsLimit,
|
||||
pacer.updateAndGetWriteBatchSize(Clock::now()));
|
||||
EXPECT_EQ(0us, pacer.getTimeUntilNextWrite());
|
||||
pacer.updateAndGetWriteBatchSize(timestamp));
|
||||
EXPECT_EQ(0us, pacer.getTimeUntilNextWrite(timestamp));
|
||||
}
|
||||
|
||||
} // namespace test
|
||||
|
@@ -187,7 +187,8 @@ struct Pacer {
|
||||
/**
|
||||
* API for Trnasport to query the interval before next write
|
||||
*/
|
||||
virtual std::chrono::microseconds getTimeUntilNextWrite() const = 0;
|
||||
[[nodiscard]] virtual std::chrono::microseconds getTimeUntilNextWrite(
|
||||
TimePoint currentTime = Clock::now()) const = 0;
|
||||
|
||||
/**
|
||||
* API for Transport to query a recalculated batch size based on currentTime
|
||||
|
@@ -41,7 +41,9 @@ class MockPacer : public Pacer {
|
||||
MOCK_METHOD1(setMaxPacingRate, void(uint64_t));
|
||||
MOCK_METHOD0(reset, void());
|
||||
MOCK_METHOD2(setRttFactor, void(uint8_t, uint8_t));
|
||||
MOCK_CONST_METHOD0(getTimeUntilNextWrite, std::chrono::microseconds());
|
||||
MOCK_CONST_METHOD1(
|
||||
getTimeUntilNextWrite,
|
||||
std::chrono::microseconds(TimePoint));
|
||||
MOCK_METHOD1(updateAndGetWriteBatchSize, uint64_t(TimePoint));
|
||||
MOCK_CONST_METHOD0(getCachedWriteBatchSize, uint64_t());
|
||||
MOCK_METHOD1(setAppLimited, void(bool));
|
||||
|
Reference in New Issue
Block a user