mirror of
https://github.com/facebookincubator/mvfst.git
synced 2025-04-18 17:24:03 +03:00
Propagate error in scheduleFramesForPacket and writeData
Summary: As in title, this is more of a theme on adding an Expected return. Reviewed By: kvtsoy Differential Revision: D72579218 fbshipit-source-id: 25735535368838f1a4315667cd7e9e9b5df1c485
This commit is contained in:
parent
28b13b22d8
commit
2a8fba588f
@ -218,7 +218,8 @@ FrameScheduler::FrameScheduler(
|
||||
QuicConnectionStateBase& conn)
|
||||
: name_(name), conn_(conn) {}
|
||||
|
||||
SchedulingResult FrameScheduler::scheduleFramesForPacket(
|
||||
folly::Expected<SchedulingResult, QuicError>
|
||||
FrameScheduler::scheduleFramesForPacket(
|
||||
PacketBuilderInterface&& builder,
|
||||
uint32_t writableBytes) {
|
||||
size_t shortHeaderPadding = 0;
|
||||
@ -796,7 +797,8 @@ bool CloningScheduler::hasData() const {
|
||||
conn_.outstandings.numOutstanding() > conn_.outstandings.dsrCount;
|
||||
}
|
||||
|
||||
SchedulingResult CloningScheduler::scheduleFramesForPacket(
|
||||
folly::Expected<SchedulingResult, QuicError>
|
||||
CloningScheduler::scheduleFramesForPacket(
|
||||
PacketBuilderInterface&& builder,
|
||||
uint32_t writableBytes) {
|
||||
// Store header type information before any moves
|
||||
@ -903,9 +905,8 @@ SchedulingResult CloningScheduler::scheduleFramesForPacket(
|
||||
|
||||
// Rebuilder will write the rest of frames
|
||||
auto rebuildResultExpected = rebuilder.rebuildFromPacket(outstandingPacket);
|
||||
// TODO handle error better.
|
||||
if (rebuildResultExpected.hasError()) {
|
||||
return SchedulingResult(none, none, 0);
|
||||
return folly::makeUnexpected(rebuildResultExpected.error());
|
||||
}
|
||||
if (rebuildResultExpected.value()) {
|
||||
return SchedulingResult(
|
||||
|
@ -53,7 +53,8 @@ class QuicPacketScheduler {
|
||||
* packet is a clone and the associated ClonedPacketIdentifier for both origin
|
||||
* and clone.
|
||||
*/
|
||||
virtual SchedulingResult scheduleFramesForPacket(
|
||||
[[nodiscard]] virtual folly::Expected<SchedulingResult, QuicError>
|
||||
scheduleFramesForPacket(
|
||||
PacketBuilderInterface&& builder,
|
||||
uint32_t writableBytes) = 0;
|
||||
|
||||
@ -282,7 +283,8 @@ class FrameScheduler : public QuicPacketScheduler {
|
||||
|
||||
FrameScheduler(folly::StringPiece name, QuicConnectionStateBase& conn);
|
||||
|
||||
SchedulingResult scheduleFramesForPacket(
|
||||
[[nodiscard]] folly::Expected<SchedulingResult, QuicError>
|
||||
scheduleFramesForPacket(
|
||||
PacketBuilderInterface&& builder,
|
||||
uint32_t writableBytes) override;
|
||||
|
||||
@ -337,7 +339,8 @@ class CloningScheduler : public QuicPacketScheduler {
|
||||
* packet is a clone and the associated ClonedPacketIdentifier for both origin
|
||||
* and clone.
|
||||
*/
|
||||
SchedulingResult scheduleFramesForPacket(
|
||||
[[nodiscard]] folly::Expected<SchedulingResult, QuicError>
|
||||
scheduleFramesForPacket(
|
||||
PacketBuilderInterface&& builder,
|
||||
uint32_t writableBytes) override;
|
||||
|
||||
|
@ -174,7 +174,12 @@ void QuicTransportBaseLite::onNetworkData(
|
||||
} else {
|
||||
// In the closed state, we would want to write a close if possible
|
||||
// however the write looper will not be set.
|
||||
writeSocketData();
|
||||
auto result = writeSocketData();
|
||||
if (result.hasError()) {
|
||||
VLOG(4) << __func__ << " " << result.error().message << " " << *this;
|
||||
exceptionCloseWhat_ = result.error().message;
|
||||
closeImpl(result.error());
|
||||
}
|
||||
}
|
||||
} catch (const QuicTransportException& ex) {
|
||||
VLOG(4) << __func__ << " " << ex.what() << " " << *this;
|
||||
@ -1203,7 +1208,13 @@ void QuicTransportBaseLite::checkForClosedStream() {
|
||||
void QuicTransportBaseLite::writeSocketDataAndCatch() {
|
||||
[[maybe_unused]] auto self = sharedGuard();
|
||||
try {
|
||||
writeSocketData();
|
||||
auto result = writeSocketData();
|
||||
if (result.hasError()) {
|
||||
VLOG(4) << __func__ << " " << result.error().message << " " << *this;
|
||||
exceptionCloseWhat_ = result.error().message;
|
||||
closeImpl(result.error());
|
||||
return;
|
||||
}
|
||||
processCallbacksAfterWriteData();
|
||||
} catch (const QuicTransportException& ex) {
|
||||
VLOG(4) << __func__ << ex.what() << " " << *this;
|
||||
@ -1264,7 +1275,8 @@ void QuicTransportBaseLite::pacedWriteDataToSocket() {
|
||||
writeSocketDataAndCatch();
|
||||
}
|
||||
|
||||
void QuicTransportBaseLite::writeSocketData() {
|
||||
folly::Expected<folly::Unit, QuicError>
|
||||
QuicTransportBaseLite::writeSocketData() {
|
||||
if (socket_) {
|
||||
++(conn_->writeCount); // incremented on each write (or write attempt)
|
||||
|
||||
@ -1284,12 +1296,15 @@ void QuicTransportBaseLite::writeSocketData() {
|
||||
conn_->appLimitedTracker.setNotAppLimited();
|
||||
notifyStartWritingFromAppRateLimited();
|
||||
}
|
||||
writeData();
|
||||
auto result = writeData();
|
||||
if (result.hasError()) {
|
||||
return result;
|
||||
}
|
||||
if (closeState_ != CloseState::CLOSED) {
|
||||
if (conn_->pendingEvents.closeTransport == true) {
|
||||
throw QuicTransportException(
|
||||
"Max packet number reached",
|
||||
TransportErrorCode::PROTOCOL_VIOLATION);
|
||||
return folly::makeUnexpected(QuicError(
|
||||
TransportErrorCode::PROTOCOL_VIOLATION,
|
||||
"Max packet number reached"));
|
||||
}
|
||||
setLossDetectionAlarm(*conn_, *this);
|
||||
|
||||
@ -1372,6 +1387,7 @@ void QuicTransportBaseLite::writeSocketData() {
|
||||
scheduleAckTimeout();
|
||||
schedulePathValidationTimeout();
|
||||
updateWriteLooper(false);
|
||||
return folly::unit;
|
||||
}
|
||||
|
||||
// TODO: t64691045 change the closeImpl API to include both the sanitized and
|
||||
@ -1555,16 +1571,14 @@ void QuicTransportBaseLite::closeImpl(
|
||||
|
||||
// We don't need no congestion control.
|
||||
conn_->congestionController = nullptr;
|
||||
|
||||
sendCloseImmediately = sendCloseImmediately && !isReset && !isAbandon;
|
||||
if (sendCloseImmediately) {
|
||||
// We might be invoked from the destructor, so just send the connection
|
||||
// close directly.
|
||||
try {
|
||||
writeData();
|
||||
} catch (const std::exception& ex) {
|
||||
// This could happen if the writes fail.
|
||||
LOG(ERROR) << "close threw exception " << ex.what() << " " << *this;
|
||||
auto result = writeData();
|
||||
if (result.hasError()) {
|
||||
LOG(ERROR) << "close failed with error: " << result.error().message << " "
|
||||
<< *this;
|
||||
}
|
||||
}
|
||||
drainConnection =
|
||||
|
@ -29,7 +29,7 @@ class QuicTransportBaseLite : virtual public QuicSocketLite,
|
||||
* It may also throw an exception in case of an error in which case the
|
||||
* connection will be closed.
|
||||
*/
|
||||
virtual void writeData() = 0;
|
||||
[[nodiscard]] virtual folly::Expected<folly::Unit, QuicError> writeData() = 0;
|
||||
|
||||
// Interface with the Transport layer when data is available.
|
||||
// This is invoked when new data is received from the UDP socket.
|
||||
@ -617,7 +617,7 @@ class QuicTransportBaseLite : virtual public QuicSocketLite,
|
||||
* both pacing oblivious and writeLooper oblivious. Caller needs to explicitly
|
||||
* invoke updateWriteLooper afterwards if that's desired.
|
||||
*/
|
||||
void writeSocketData();
|
||||
[[nodiscard]] folly::Expected<folly::Unit, QuicError> writeSocketData();
|
||||
|
||||
void closeImpl(
|
||||
Optional<QuicError> error,
|
||||
|
@ -236,7 +236,8 @@ void updateErrnoCount(
|
||||
}
|
||||
}
|
||||
|
||||
DataPathResult continuousMemoryBuildScheduleEncrypt(
|
||||
[[nodiscard]] folly::Expected<DataPathResult, QuicError>
|
||||
continuousMemoryBuildScheduleEncrypt(
|
||||
QuicConnectionStateBase& connection,
|
||||
PacketHeader header,
|
||||
PacketNumberSpace pnSpace,
|
||||
@ -265,7 +266,10 @@ DataPathResult continuousMemoryBuildScheduleEncrypt(
|
||||
auto result =
|
||||
scheduler.scheduleFramesForPacket(std::move(pktBuilder), writableBytes);
|
||||
CHECK(connection.bufAccessor->ownsBuffer());
|
||||
auto& packet = result.packet;
|
||||
if (result.hasError()) {
|
||||
return folly::makeUnexpected(result.error());
|
||||
}
|
||||
auto& packet = result->packet;
|
||||
if (!packet || packet->packet.frames.empty()) {
|
||||
rollbackBuf();
|
||||
ioBufBatch.flush();
|
||||
@ -326,10 +330,11 @@ DataPathResult continuousMemoryBuildScheduleEncrypt(
|
||||
bool ret = ioBufBatch.write(nullptr /* no need to pass buf */, encodedSize);
|
||||
updateErrnoCount(connection, ioBufBatch);
|
||||
return DataPathResult::makeWriteResult(
|
||||
ret, std::move(result), encodedSize, encodedBodySize);
|
||||
ret, std::move(result.value()), encodedSize, encodedBodySize);
|
||||
}
|
||||
|
||||
DataPathResult iobufChainBasedBuildScheduleEncrypt(
|
||||
[[nodiscard]] folly::Expected<DataPathResult, QuicError>
|
||||
iobufChainBasedBuildScheduleEncrypt(
|
||||
QuicConnectionStateBase& connection,
|
||||
PacketHeader header,
|
||||
PacketNumberSpace pnSpace,
|
||||
@ -348,7 +353,10 @@ DataPathResult iobufChainBasedBuildScheduleEncrypt(
|
||||
pktBuilder.accountForCipherOverhead(cipherOverhead);
|
||||
auto result =
|
||||
scheduler.scheduleFramesForPacket(std::move(pktBuilder), writableBytes);
|
||||
auto& packet = result.packet;
|
||||
if (result.hasError()) {
|
||||
return folly::makeUnexpected(result.error());
|
||||
}
|
||||
auto& packet = result->packet;
|
||||
if (!packet || packet->packet.frames.empty()) {
|
||||
ioBufBatch.flush();
|
||||
updateErrnoCount(connection, ioBufBatch);
|
||||
@ -400,7 +408,7 @@ DataPathResult iobufChainBasedBuildScheduleEncrypt(
|
||||
bool ret = ioBufBatch.write(std::move(packetBuf), encodedSize);
|
||||
updateErrnoCount(connection, ioBufBatch);
|
||||
return DataPathResult::makeWriteResult(
|
||||
ret, std::move(result), encodedSize, encodedBodySize);
|
||||
ret, std::move(result.value()), encodedSize, encodedBodySize);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
@ -534,7 +542,7 @@ void handleRetransmissionBufMetaWritten(
|
||||
* with new data, as well as retranmissions. Returns true if the data sent is
|
||||
* new data.
|
||||
*/
|
||||
bool handleStreamWritten(
|
||||
folly::Expected<bool, QuicError> handleStreamWritten(
|
||||
QuicConnectionStateBase& conn,
|
||||
QuicStreamLike& stream,
|
||||
uint64_t frameOffset,
|
||||
@ -548,13 +556,13 @@ bool handleStreamWritten(
|
||||
handleNewStreamDataWritten(stream, frameLen, frameFin);
|
||||
writtenNewData = true;
|
||||
} else if (frameOffset > stream.currentWriteOffset) {
|
||||
throw QuicTransportException(
|
||||
return folly::makeUnexpected(QuicError(
|
||||
TransportErrorCode::INTERNAL_ERROR,
|
||||
fmt::format(
|
||||
"Byte offset of first byte in written stream frame ({}) is "
|
||||
"greater than stream's current write offset ({})",
|
||||
frameOffset,
|
||||
stream.currentWriteOffset),
|
||||
TransportErrorCode::INTERNAL_ERROR);
|
||||
stream.currentWriteOffset)));
|
||||
}
|
||||
|
||||
if (writtenNewData) {
|
||||
@ -687,7 +695,7 @@ folly::Expected<folly::Unit, QuicError> updateConnection(
|
||||
packetNum,
|
||||
packetNumberSpace);
|
||||
} else {
|
||||
newStreamDataWritten = handleStreamWritten(
|
||||
auto streamWrittenResult = handleStreamWritten(
|
||||
conn,
|
||||
*stream,
|
||||
writeStreamFrame.offset,
|
||||
@ -695,6 +703,10 @@ folly::Expected<folly::Unit, QuicError> updateConnection(
|
||||
writeStreamFrame.fin,
|
||||
packetNum,
|
||||
packetNumberSpace);
|
||||
if (streamWrittenResult.hasError()) {
|
||||
return folly::makeUnexpected(streamWrittenResult.error());
|
||||
}
|
||||
newStreamDataWritten = streamWrittenResult.value();
|
||||
}
|
||||
if (newStreamDataWritten) {
|
||||
auto flowControlResult =
|
||||
@ -719,7 +731,7 @@ folly::Expected<folly::Unit, QuicError> updateConnection(
|
||||
// NewSessionTicket is sent in crypto frame encrypted with 1-rtt key,
|
||||
// however, it is not part of handshake
|
||||
auto encryptionLevel = protectionTypeToEncryptionLevel(protectionType);
|
||||
handleStreamWritten(
|
||||
auto cryptoWritten = handleStreamWritten(
|
||||
conn,
|
||||
*getCryptoStream(*conn.cryptoState, encryptionLevel),
|
||||
writeCryptoFrame.offset,
|
||||
@ -727,6 +739,9 @@ folly::Expected<folly::Unit, QuicError> updateConnection(
|
||||
false /* fin */,
|
||||
packetNum,
|
||||
packetNumberSpace);
|
||||
if (cryptoWritten.hasError()) {
|
||||
return folly::makeUnexpected(cryptoWritten.error());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case QuicWriteFrame::Type::WriteAckFrame: {
|
||||
@ -1661,7 +1676,11 @@ folly::Expected<WriteQuicDataResult, QuicError> writeConnectionDataToSocket(
|
||||
aead,
|
||||
headerCipher);
|
||||
|
||||
if (!ret.buildSuccess) {
|
||||
// This is a fatal error vs. a build error.
|
||||
if (ret.hasError()) {
|
||||
return folly::makeUnexpected(ret.error());
|
||||
}
|
||||
if (!ret->buildSuccess) {
|
||||
// If we're returning because we couldn't schedule more packets,
|
||||
// make sure we flush the buffer in this function.
|
||||
ioBufBatch.flush();
|
||||
@ -1673,20 +1692,20 @@ folly::Expected<WriteQuicDataResult, QuicError> writeConnectionDataToSocket(
|
||||
// matter the write result. We are basically treating this case as if we
|
||||
// pretend write was also successful but packet is lost somewhere in the
|
||||
// network.
|
||||
bytesWritten += ret.encodedSize;
|
||||
if (ret.result && ret.result->shortHeaderPadding > 0) {
|
||||
bytesWritten += ret->encodedSize;
|
||||
if (ret->result && ret->result->shortHeaderPadding > 0) {
|
||||
shortHeaderPaddingCount++;
|
||||
shortHeaderPadding += ret.result->shortHeaderPadding;
|
||||
shortHeaderPadding += ret->result->shortHeaderPadding;
|
||||
}
|
||||
|
||||
auto& result = ret.result;
|
||||
auto& result = ret->result;
|
||||
auto updateConnResult = updateConnection(
|
||||
connection,
|
||||
std::move(result->clonedPacketIdentifier),
|
||||
std::move(result->packet->packet),
|
||||
sentTime,
|
||||
folly::to<uint32_t>(ret.encodedSize),
|
||||
folly::to<uint32_t>(ret.encodedBodySize),
|
||||
folly::to<uint32_t>(ret->encodedSize),
|
||||
folly::to<uint32_t>(ret->encodedBodySize),
|
||||
false /* isDSRPacket */);
|
||||
if (updateConnResult.hasError()) {
|
||||
return folly::makeUnexpected(updateConnResult.error());
|
||||
@ -1694,7 +1713,7 @@ folly::Expected<WriteQuicDataResult, QuicError> writeConnectionDataToSocket(
|
||||
|
||||
// if ioBufBatch.write returns false
|
||||
// it is because a flush() call failed
|
||||
if (!ret.writeSuccess) {
|
||||
if (!ret->writeSuccess) {
|
||||
if (connection.loopDetectorCallback) {
|
||||
connection.writeDebugState.noWriteReason =
|
||||
NoWriteReason::SOCKET_FAILURE;
|
||||
@ -1967,6 +1986,7 @@ void implicitAckCryptoStream(
|
||||
implicitAck,
|
||||
[](const auto&) {
|
||||
// ackedPacketVisitor. No action needed.
|
||||
return folly::unit;
|
||||
},
|
||||
[&](auto&, auto& packetFrame) {
|
||||
switch (packetFrame.type()) {
|
||||
@ -2109,7 +2129,7 @@ void maybeInitiateKeyUpdate(QuicConnectionStateBase& conn) {
|
||||
}
|
||||
}
|
||||
|
||||
void maybeVerifyPendingKeyUpdate(
|
||||
folly::Expected<folly::Unit, QuicError> maybeVerifyPendingKeyUpdate(
|
||||
QuicConnectionStateBase& conn,
|
||||
const OutstandingPacketWrapper& outstandingPacket,
|
||||
const RegularQuicPacket& ackPacket) {
|
||||
@ -2117,7 +2137,7 @@ void maybeVerifyPendingKeyUpdate(
|
||||
outstandingPacket.packet.header.getProtectionType()) ==
|
||||
EncryptionLevel::AppData)) {
|
||||
// This is not an app data packet. We can't have initiated a key update yet.
|
||||
return;
|
||||
return folly::unit;
|
||||
}
|
||||
|
||||
if (conn.oneRttWritePendingVerificationPacketNumber &&
|
||||
@ -2130,11 +2150,12 @@ void maybeVerifyPendingKeyUpdate(
|
||||
conn.oneRttWritePendingVerificationPacketNumber.reset();
|
||||
conn.oneRttWritePendingVerification = false;
|
||||
} else {
|
||||
throw QuicTransportException(
|
||||
"Packet with key update was acked in the wrong phase",
|
||||
TransportErrorCode::CRYPTO_ERROR);
|
||||
return folly::makeUnexpected(QuicError(
|
||||
TransportErrorCode::CRYPTO_ERROR,
|
||||
"Packet with key update was acked in the wrong phase"));
|
||||
}
|
||||
}
|
||||
return folly::unit;
|
||||
}
|
||||
|
||||
// Unfortunate, we should make this more portable.
|
||||
|
@ -181,7 +181,7 @@ void handleRetransmissionWritten(
|
||||
* with new data, as well as retranmissions. Returns true if the data sent is
|
||||
* new data.
|
||||
*/
|
||||
bool handleStreamWritten(
|
||||
[[nodiscard]] folly::Expected<bool, QuicError> handleStreamWritten(
|
||||
QuicConnectionStateBase& conn,
|
||||
QuicStreamLike& stream,
|
||||
uint64_t frameOffset,
|
||||
@ -340,7 +340,8 @@ void updateOneRttWriteCipher(
|
||||
ProtectionType oneRttPhase);
|
||||
void maybeHandleIncomingKeyUpdate(QuicConnectionStateBase& conn);
|
||||
void maybeInitiateKeyUpdate(QuicConnectionStateBase& conn);
|
||||
void maybeVerifyPendingKeyUpdate(
|
||||
[[nodiscard]] folly::Expected<folly::Unit, QuicError>
|
||||
maybeVerifyPendingKeyUpdate(
|
||||
QuicConnectionStateBase& conn,
|
||||
const OutstandingPacketWrapper& outstandingPacket,
|
||||
const RegularQuicPacket& ackPacket);
|
||||
|
@ -30,7 +30,7 @@ class MockFrameScheduler : public FrameScheduler {
|
||||
: FrameScheduler("mock", *conn) {}
|
||||
|
||||
// override methods accepting rvalue ref since gmock doesn't support it
|
||||
SchedulingResult scheduleFramesForPacket(
|
||||
folly::Expected<SchedulingResult, QuicError> scheduleFramesForPacket(
|
||||
PacketBuilderInterface&& builderIn,
|
||||
uint32_t writableBytes) override {
|
||||
return _scheduleFramesForPacket(&builderIn, writableBytes);
|
||||
@ -39,7 +39,7 @@ class MockFrameScheduler : public FrameScheduler {
|
||||
MOCK_METHOD((bool), hasData, (), (const));
|
||||
MOCK_METHOD((bool), hasImmediateData, (), (const));
|
||||
MOCK_METHOD(
|
||||
SchedulingResult,
|
||||
(folly::Expected<SchedulingResult, QuicError>),
|
||||
_scheduleFramesForPacket,
|
||||
(PacketBuilderInterface*, uint32_t));
|
||||
};
|
||||
|
@ -265,8 +265,9 @@ TEST_F(QuicPacketSchedulerTest, CryptoPaddingInitialPacket) {
|
||||
conn.cryptoState->initialStream, folly::IOBuf::copyBuffer("chlo"));
|
||||
auto result = cryptoOnlyScheduler.scheduleFramesForPacket(
|
||||
std::move(builder), conn.udpSendPacketLen);
|
||||
auto packetLength = result.packet->header.computeChainDataLength() +
|
||||
result.packet->body.computeChainDataLength();
|
||||
ASSERT_FALSE(result.hasError());
|
||||
auto packetLength = result.value().packet->header.computeChainDataLength() +
|
||||
result.value().packet->body.computeChainDataLength();
|
||||
EXPECT_EQ(conn.udpSendPacketLen, packetLength);
|
||||
}
|
||||
|
||||
@ -298,8 +299,9 @@ TEST_F(QuicPacketSchedulerTest, PaddingInitialPureAcks) {
|
||||
.build();
|
||||
auto result = acksOnlyScheduler.scheduleFramesForPacket(
|
||||
std::move(builder), conn.udpSendPacketLen);
|
||||
auto packetLength = result.packet->header.computeChainDataLength() +
|
||||
result.packet->body.computeChainDataLength();
|
||||
ASSERT_FALSE(result.hasError());
|
||||
auto packetLength = result.value().packet->header.computeChainDataLength() +
|
||||
result.value().packet->body.computeChainDataLength();
|
||||
EXPECT_EQ(conn.udpSendPacketLen, packetLength);
|
||||
}
|
||||
|
||||
@ -332,8 +334,9 @@ TEST_F(QuicPacketSchedulerTest, InitialPaddingDoesNotUseWrapper) {
|
||||
.build();
|
||||
auto result = acksOnlyScheduler.scheduleFramesForPacket(
|
||||
std::move(builder), conn.udpSendPacketLen - cipherOverhead);
|
||||
auto packetLength = result.packet->header.computeChainDataLength() +
|
||||
result.packet->body.computeChainDataLength();
|
||||
ASSERT_FALSE(result.hasError());
|
||||
auto packetLength = result.value().packet->header.computeChainDataLength() +
|
||||
result.value().packet->body.computeChainDataLength();
|
||||
EXPECT_EQ(conn.udpSendPacketLen, packetLength);
|
||||
}
|
||||
|
||||
@ -365,8 +368,9 @@ TEST_F(QuicPacketSchedulerTest, CryptoServerInitialPadded) {
|
||||
conn.cryptoState->initialStream, folly::IOBuf::copyBuffer("shlo"));
|
||||
auto result = scheduler.scheduleFramesForPacket(
|
||||
std::move(builder1), conn.udpSendPacketLen);
|
||||
auto packetLength = result.packet->header.computeChainDataLength() +
|
||||
result.packet->body.computeChainDataLength();
|
||||
ASSERT_FALSE(result.hasError());
|
||||
auto packetLength = result.value().packet->header.computeChainDataLength() +
|
||||
result.value().packet->body.computeChainDataLength();
|
||||
EXPECT_EQ(conn.udpSendPacketLen, packetLength);
|
||||
}
|
||||
|
||||
@ -398,8 +402,9 @@ TEST_F(QuicPacketSchedulerTest, PadTwoInitialPackets) {
|
||||
conn.cryptoState->initialStream, folly::IOBuf::copyBuffer("shlo"));
|
||||
auto result = scheduler.scheduleFramesForPacket(
|
||||
std::move(builder1), conn.udpSendPacketLen);
|
||||
auto packetLength = result.packet->header.computeChainDataLength() +
|
||||
result.packet->body.computeChainDataLength();
|
||||
ASSERT_FALSE(result.hasError());
|
||||
auto packetLength = result.value().packet->header.computeChainDataLength() +
|
||||
result.value().packet->body.computeChainDataLength();
|
||||
EXPECT_EQ(conn.udpSendPacketLen, packetLength);
|
||||
|
||||
increaseNextPacketNum(conn, PacketNumberSpace::Initial);
|
||||
@ -417,8 +422,9 @@ TEST_F(QuicPacketSchedulerTest, PadTwoInitialPackets) {
|
||||
conn.cryptoState->initialStream, folly::IOBuf::copyBuffer("shlo again"));
|
||||
auto result2 = scheduler.scheduleFramesForPacket(
|
||||
std::move(builder2), conn.udpSendPacketLen);
|
||||
packetLength = result2.packet->header.computeChainDataLength() +
|
||||
result2.packet->body.computeChainDataLength();
|
||||
ASSERT_FALSE(result2.hasError());
|
||||
packetLength = result2.value().packet->header.computeChainDataLength() +
|
||||
result2.value().packet->body.computeChainDataLength();
|
||||
EXPECT_EQ(conn.udpSendPacketLen, packetLength);
|
||||
}
|
||||
|
||||
@ -451,8 +457,9 @@ TEST_F(QuicPacketSchedulerTest, CryptoPaddingRetransmissionClientInitial) {
|
||||
WriteStreamBuffer{std::move(clientHelloData), 0, false});
|
||||
auto result = std::move(scheduler).scheduleFramesForPacket(
|
||||
std::move(builder), conn.udpSendPacketLen);
|
||||
auto packetLength = result.packet->header.computeChainDataLength() +
|
||||
result.packet->body.computeChainDataLength();
|
||||
ASSERT_FALSE(result.hasError());
|
||||
auto packetLength = result.value().packet->header.computeChainDataLength() +
|
||||
result.value().packet->body.computeChainDataLength();
|
||||
EXPECT_EQ(conn.udpSendPacketLen, packetLength);
|
||||
}
|
||||
|
||||
@ -516,10 +523,11 @@ TEST_F(QuicPacketSchedulerTest, CryptoWritePartialLossBuffer) {
|
||||
ChainedByteRangeHead(lossBuffer), 0, false);
|
||||
auto result = cryptoOnlyScheduler.scheduleFramesForPacket(
|
||||
std::move(builder), conn.udpSendPacketLen);
|
||||
auto packetLength = result.packet->header.computeChainDataLength() +
|
||||
result.packet->body.computeChainDataLength();
|
||||
ASSERT_FALSE(result.hasError());
|
||||
auto packetLength = result->packet->header.computeChainDataLength() +
|
||||
result->packet->body.computeChainDataLength();
|
||||
EXPECT_LE(packetLength, 25);
|
||||
EXPECT_TRUE(result.packet->packet.frames[0].asWriteCryptoFrame() != nullptr);
|
||||
EXPECT_TRUE(result->packet->packet.frames[0].asWriteCryptoFrame() != nullptr);
|
||||
EXPECT_FALSE(conn.cryptoState->initialStream.lossBuffer.empty());
|
||||
}
|
||||
|
||||
@ -616,8 +624,9 @@ TEST_F(QuicPacketSchedulerTest, NoCloningForDSR) {
|
||||
conn.ackStates.appDataAckState.largestAckedByPeer.value_or(0));
|
||||
auto result = cloningScheduler.scheduleFramesForPacket(
|
||||
std::move(builder), kDefaultUDPSendPacketLen);
|
||||
EXPECT_FALSE(result.clonedPacketIdentifier.hasValue());
|
||||
EXPECT_FALSE(result.packet.hasValue());
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_FALSE(result->clonedPacketIdentifier.hasValue());
|
||||
EXPECT_FALSE(result->packet.hasValue());
|
||||
}
|
||||
|
||||
TEST_F(QuicPacketSchedulerTest, CloningSchedulerTest) {
|
||||
@ -644,9 +653,10 @@ TEST_F(QuicPacketSchedulerTest, CloningSchedulerTest) {
|
||||
conn.ackStates.appDataAckState.largestAckedByPeer.value_or(0));
|
||||
auto result = cloningScheduler.scheduleFramesForPacket(
|
||||
std::move(builder), kDefaultUDPSendPacketLen);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_TRUE(
|
||||
result.clonedPacketIdentifier.has_value() && result.packet.has_value());
|
||||
EXPECT_EQ(packetNum, result.clonedPacketIdentifier->packetNumber);
|
||||
result->clonedPacketIdentifier.has_value() && result->packet.has_value());
|
||||
EXPECT_EQ(packetNum, result->clonedPacketIdentifier->packetNumber);
|
||||
}
|
||||
|
||||
TEST_F(QuicPacketSchedulerTest, WriteOnlyOutstandingPacketsTest) {
|
||||
@ -695,11 +705,12 @@ TEST_F(QuicPacketSchedulerTest, WriteOnlyOutstandingPacketsTest) {
|
||||
|
||||
auto result = cloningScheduler.scheduleFramesForPacket(
|
||||
std::move(regularBuilder), kDefaultUDPSendPacketLen);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_TRUE(
|
||||
result.clonedPacketIdentifier.hasValue() && result.packet.hasValue());
|
||||
EXPECT_EQ(packetNum, result.clonedPacketIdentifier->packetNumber);
|
||||
result->clonedPacketIdentifier.hasValue() && result->packet.hasValue());
|
||||
EXPECT_EQ(packetNum, result->clonedPacketIdentifier->packetNumber);
|
||||
// written packet should not have any frame in the builder
|
||||
auto& writtenPacket = *result.packet;
|
||||
auto& writtenPacket = *result->packet;
|
||||
auto shortHeader = writtenPacket.packet.header.asShort();
|
||||
CHECK(shortHeader);
|
||||
EXPECT_EQ(ProtectionType::KeyPhaseOne, shortHeader->getProtectionType());
|
||||
@ -752,9 +763,10 @@ TEST_F(QuicPacketSchedulerTest, DoNotCloneProcessedClonedPacket) {
|
||||
conn.ackStates.initialAckState->largestAckedByPeer.value_or(0));
|
||||
auto result = cloningScheduler.scheduleFramesForPacket(
|
||||
std::move(builder), kDefaultUDPSendPacketLen);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_TRUE(
|
||||
result.clonedPacketIdentifier.has_value() && result.packet.has_value());
|
||||
EXPECT_EQ(expected, result.clonedPacketIdentifier->packetNumber);
|
||||
result->clonedPacketIdentifier.has_value() && result->packet.has_value());
|
||||
EXPECT_EQ(expected, result->clonedPacketIdentifier->packetNumber);
|
||||
}
|
||||
|
||||
class CloneAllPacketsWithCryptoFrameTest
|
||||
@ -834,18 +846,20 @@ TEST_P(
|
||||
conn.ackStates.initialAckState->largestAckedByPeer.value_or(0));
|
||||
auto result = cloningScheduler.scheduleFramesForPacket(
|
||||
std::move(builder), kDefaultUDPSendPacketLen);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
if (conn.transportSettings.cloneAllPacketsWithCryptoFrame &&
|
||||
conn.transportSettings.cloneCryptoPacketsAtMostOnce) {
|
||||
// First and second packets already cloned, skip all and schedule no packet
|
||||
EXPECT_FALSE(result.clonedPacketIdentifier.has_value());
|
||||
EXPECT_FALSE(result.packet.has_value());
|
||||
EXPECT_FALSE(result->clonedPacketIdentifier.has_value());
|
||||
EXPECT_FALSE(result->packet.has_value());
|
||||
} else {
|
||||
EXPECT_TRUE(
|
||||
result.clonedPacketIdentifier.has_value() && result.packet.has_value());
|
||||
result->clonedPacketIdentifier.has_value() &&
|
||||
result->packet.has_value());
|
||||
EXPECT_EQ(
|
||||
conn.transportSettings.cloneAllPacketsWithCryptoFrame ? secondPacketNum
|
||||
: firstPacketNum,
|
||||
result.clonedPacketIdentifier->packetNumber);
|
||||
result->clonedPacketIdentifier->packetNumber);
|
||||
}
|
||||
}
|
||||
|
||||
@ -888,9 +902,10 @@ TEST_F(QuicPacketSchedulerTest, DoNotSkipUnclonedCryptoPacket) {
|
||||
conn.ackStates.initialAckState->largestAckedByPeer.value_or(0));
|
||||
auto result = cloningScheduler.scheduleFramesForPacket(
|
||||
std::move(builder), kDefaultUDPSendPacketLen);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_TRUE(
|
||||
result.clonedPacketIdentifier.has_value() && result.packet.has_value());
|
||||
EXPECT_EQ(firstPacketNum, result.clonedPacketIdentifier->packetNumber);
|
||||
result->clonedPacketIdentifier.has_value() && result->packet.has_value());
|
||||
EXPECT_EQ(firstPacketNum, result->clonedPacketIdentifier->packetNumber);
|
||||
}
|
||||
|
||||
TEST_F(QuicPacketSchedulerTest, CloneSchedulerHasHandshakeData) {
|
||||
@ -968,14 +983,15 @@ TEST_F(QuicPacketSchedulerTest, CloneSchedulerHasHandshakeDataAndAcks) {
|
||||
// Clone the packet.
|
||||
auto result = cloningScheduler.scheduleFramesForPacket(
|
||||
std::move(builder), kDefaultUDPSendPacketLen);
|
||||
EXPECT_TRUE(result.clonedPacketIdentifier.has_value());
|
||||
EXPECT_TRUE(result.packet.has_value());
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_TRUE(result->clonedPacketIdentifier.has_value());
|
||||
EXPECT_TRUE(result->packet.has_value());
|
||||
|
||||
// Cloned packet has to have crypto data and no acks.
|
||||
bool hasAckFrame = false;
|
||||
bool hasCryptoFrame = false;
|
||||
for (auto iter = result.packet->packet.frames.cbegin();
|
||||
iter != result.packet->packet.frames.cend();
|
||||
for (auto iter = result->packet->packet.frames.cbegin();
|
||||
iter != result->packet->packet.frames.cend();
|
||||
iter++) {
|
||||
const QuicWriteFrame& frame = *iter;
|
||||
switch (frame.type()) {
|
||||
@ -1039,9 +1055,10 @@ TEST_F(QuicPacketSchedulerTest, DoNotCloneHandshake) {
|
||||
conn.ackStates.appDataAckState.largestAckedByPeer.value_or(0));
|
||||
auto result = cloningScheduler.scheduleFramesForPacket(
|
||||
std::move(builder), kDefaultUDPSendPacketLen);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_TRUE(
|
||||
result.clonedPacketIdentifier.has_value() && result.packet.has_value());
|
||||
EXPECT_EQ(expected, result.clonedPacketIdentifier->packetNumber);
|
||||
result->clonedPacketIdentifier.has_value() && result->packet.has_value());
|
||||
EXPECT_EQ(expected, result->clonedPacketIdentifier->packetNumber);
|
||||
}
|
||||
|
||||
TEST_F(QuicPacketSchedulerTest, CloneSchedulerUseNormalSchedulerFirst) {
|
||||
@ -1080,23 +1097,24 @@ TEST_F(QuicPacketSchedulerTest, CloneSchedulerUseNormalSchedulerFirst) {
|
||||
conn.ackStates.appDataAckState.largestAckedByPeer.value_or(0));
|
||||
auto result = cloningScheduler.scheduleFramesForPacket(
|
||||
std::move(builder), kDefaultUDPSendPacketLen);
|
||||
EXPECT_EQ(none, result.clonedPacketIdentifier);
|
||||
EXPECT_EQ(result.packet->packet.header.getHeaderForm(), HeaderForm::Short);
|
||||
ShortHeader& shortHeader = *result.packet->packet.header.asShort();
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_EQ(none, result->clonedPacketIdentifier);
|
||||
EXPECT_EQ(result->packet->packet.header.getHeaderForm(), HeaderForm::Short);
|
||||
ShortHeader& shortHeader = *result->packet->packet.header.asShort();
|
||||
EXPECT_EQ(ProtectionType::KeyPhaseOne, shortHeader.getProtectionType());
|
||||
EXPECT_EQ(
|
||||
conn.ackStates.appDataAckState.nextPacketNum,
|
||||
shortHeader.getPacketSequenceNum());
|
||||
EXPECT_EQ(1, result.packet->packet.frames.size());
|
||||
EXPECT_EQ(1, result->packet->packet.frames.size());
|
||||
MaxDataFrame* maxDataFrame =
|
||||
result.packet->packet.frames.front().asMaxDataFrame();
|
||||
result->packet->packet.frames.front().asMaxDataFrame();
|
||||
ASSERT_NE(maxDataFrame, nullptr);
|
||||
EXPECT_EQ(2832, maxDataFrame->maximumData);
|
||||
EXPECT_TRUE(folly::IOBufEqualTo{}(
|
||||
*folly::IOBuf::copyBuffer("if you are the dealer"),
|
||||
result.packet->header));
|
||||
result->packet->header));
|
||||
EXPECT_TRUE(folly::IOBufEqualTo{}(
|
||||
*folly::IOBuf::copyBuffer("I'm out of the game"), result.packet->body));
|
||||
*folly::IOBuf::copyBuffer("I'm out of the game"), result->packet->body));
|
||||
}
|
||||
|
||||
TEST_F(QuicPacketSchedulerTest, CloneWillGenerateNewWindowUpdate) {
|
||||
@ -1131,10 +1149,11 @@ TEST_F(QuicPacketSchedulerTest, CloneWillGenerateNewWindowUpdate) {
|
||||
conn.ackStates.appDataAckState.largestAckedByPeer.value_or(0));
|
||||
auto packetResult = cloningScheduler.scheduleFramesForPacket(
|
||||
std::move(builder), conn.udpSendPacketLen);
|
||||
ASSERT_FALSE(packetResult.hasError());
|
||||
EXPECT_EQ(
|
||||
expectedClonedPacketIdentifier, *packetResult.clonedPacketIdentifier);
|
||||
expectedClonedPacketIdentifier, *packetResult->clonedPacketIdentifier);
|
||||
int32_t verifyConnWindowUpdate = 1, verifyStreamWindowUpdate = 1;
|
||||
for (const auto& frame : packetResult.packet->packet.frames) {
|
||||
for (const auto& frame : packetResult->packet->packet.frames) {
|
||||
switch (frame.type()) {
|
||||
case QuicWriteFrame::Type::MaxStreamDataFrame: {
|
||||
const MaxStreamDataFrame& maxStreamDataFrame =
|
||||
@ -1159,10 +1178,10 @@ TEST_F(QuicPacketSchedulerTest, CloneWillGenerateNewWindowUpdate) {
|
||||
EXPECT_EQ(0, verifyConnWindowUpdate);
|
||||
|
||||
// Verify the built out packet has refreshed window update values
|
||||
EXPECT_GE(packetResult.packet->packet.frames.size(), 2);
|
||||
EXPECT_GE(packetResult->packet->packet.frames.size(), 2);
|
||||
uint32_t streamWindowUpdateCounter = 0;
|
||||
uint32_t connWindowUpdateCounter = 0;
|
||||
for (auto& frame : packetResult.packet->packet.frames) {
|
||||
for (auto& frame : packetResult->packet->packet.frames) {
|
||||
auto streamFlowControl = frame.asMaxStreamDataFrame();
|
||||
if (!streamFlowControl) {
|
||||
continue;
|
||||
@ -1170,7 +1189,7 @@ TEST_F(QuicPacketSchedulerTest, CloneWillGenerateNewWindowUpdate) {
|
||||
streamWindowUpdateCounter++;
|
||||
EXPECT_EQ(1700, streamFlowControl->maximumData);
|
||||
}
|
||||
for (auto& frame : packetResult.packet->packet.frames) {
|
||||
for (auto& frame : packetResult->packet->packet.frames) {
|
||||
auto connFlowControl = frame.asMaxDataFrame();
|
||||
if (!connFlowControl) {
|
||||
continue;
|
||||
@ -1213,9 +1232,11 @@ TEST_F(QuicPacketSchedulerTest, CloningSchedulerWithInplaceBuilder) {
|
||||
conn.ackStates.appDataAckState.largestAckedByPeer.value_or(0));
|
||||
auto result = cloningScheduler.scheduleFramesForPacket(
|
||||
std::move(builder), kDefaultUDPSendPacketLen);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_TRUE(
|
||||
result.clonedPacketIdentifier.has_value() && result.packet.has_value());
|
||||
EXPECT_EQ(packetNum, result.clonedPacketIdentifier->packetNumber);
|
||||
result.value().clonedPacketIdentifier.has_value() &&
|
||||
result.value().packet.has_value());
|
||||
EXPECT_EQ(packetNum, result.value().clonedPacketIdentifier->packetNumber);
|
||||
|
||||
// Something was written into the buffer:
|
||||
EXPECT_TRUE(bufAccessor.ownsBuffer());
|
||||
@ -1261,13 +1282,14 @@ TEST_F(QuicPacketSchedulerTest, CloningSchedulerWithInplaceBuilderFullPacket) {
|
||||
ASSERT_TRUE(scheduler.hasData());
|
||||
auto result = scheduler.scheduleFramesForPacket(
|
||||
std::move(builder), conn.udpSendPacketLen);
|
||||
auto bufferLength = result.packet->header.computeChainDataLength() +
|
||||
result.packet->body.computeChainDataLength();
|
||||
ASSERT_FALSE(result.hasError());
|
||||
auto bufferLength = result->packet->header.computeChainDataLength() +
|
||||
result->packet->body.computeChainDataLength();
|
||||
EXPECT_EQ(conn.udpSendPacketLen, bufferLength);
|
||||
auto updateResult = updateConnection(
|
||||
conn,
|
||||
none,
|
||||
result.packet->packet,
|
||||
result->packet->packet,
|
||||
Clock::now(),
|
||||
bufferLength,
|
||||
0,
|
||||
@ -1295,10 +1317,11 @@ TEST_F(QuicPacketSchedulerTest, CloningSchedulerWithInplaceBuilderFullPacket) {
|
||||
conn.ackStates.appDataAckState.largestAckedByPeer.value_or(0));
|
||||
auto cloneResult = cloningScheduler.scheduleFramesForPacket(
|
||||
std::move(internalBuilder), conn.udpSendPacketLen);
|
||||
ASSERT_FALSE(cloneResult.hasError());
|
||||
EXPECT_TRUE(
|
||||
cloneResult.clonedPacketIdentifier.has_value() &&
|
||||
cloneResult.packet.has_value());
|
||||
EXPECT_EQ(packetNum, cloneResult.clonedPacketIdentifier->packetNumber);
|
||||
cloneResult->clonedPacketIdentifier.has_value() &&
|
||||
cloneResult->packet.has_value());
|
||||
EXPECT_EQ(packetNum, cloneResult->clonedPacketIdentifier->packetNumber);
|
||||
|
||||
// Something was written into the buffer:
|
||||
EXPECT_TRUE(bufAccessor.ownsBuffer());
|
||||
@ -1337,13 +1360,14 @@ TEST_F(QuicPacketSchedulerTest, CloneLargerThanOriginalPacket) {
|
||||
conn.ackStates.appDataAckState.largestAckedByPeer.value_or(0));
|
||||
auto packetResult = scheduler.scheduleFramesForPacket(
|
||||
std::move(builder), conn.udpSendPacketLen - cipherOverhead);
|
||||
auto encodedSize = packetResult.packet->body.computeChainDataLength() +
|
||||
packetResult.packet->header.computeChainDataLength() + cipherOverhead;
|
||||
ASSERT_FALSE(packetResult.hasError());
|
||||
auto encodedSize = packetResult->packet->body.computeChainDataLength() +
|
||||
packetResult->packet->header.computeChainDataLength() + cipherOverhead;
|
||||
EXPECT_EQ(encodedSize, conn.udpSendPacketLen);
|
||||
auto updateResult = updateConnection(
|
||||
conn,
|
||||
none,
|
||||
packetResult.packet->packet,
|
||||
packetResult->packet->packet,
|
||||
Clock::now(),
|
||||
encodedSize,
|
||||
0,
|
||||
@ -1365,8 +1389,9 @@ TEST_F(QuicPacketSchedulerTest, CloneLargerThanOriginalPacket) {
|
||||
noopScheduler, conn, "CopyCat", cipherOverhead);
|
||||
auto cloneResult = cloningScheduler.scheduleFramesForPacket(
|
||||
std::move(throwawayBuilder), kDefaultUDPSendPacketLen);
|
||||
EXPECT_FALSE(cloneResult.packet.hasValue());
|
||||
EXPECT_FALSE(cloneResult.clonedPacketIdentifier.hasValue());
|
||||
ASSERT_FALSE(cloneResult.hasError());
|
||||
EXPECT_FALSE(cloneResult->packet.hasValue());
|
||||
EXPECT_FALSE(cloneResult->clonedPacketIdentifier.hasValue());
|
||||
}
|
||||
|
||||
class AckSchedulingTest : public TestWithParam<PacketNumberSpace> {};
|
||||
@ -1804,7 +1829,8 @@ TEST_F(
|
||||
conn.ackStates.appDataAckState.largestAckedByPeer.value_or(0));
|
||||
auto result = cloningScheduler.scheduleFramesForPacket(
|
||||
std::move(builder), kDefaultUDPSendPacketLen);
|
||||
EXPECT_FALSE(result.clonedPacketIdentifier.has_value());
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_FALSE(result.value().clonedPacketIdentifier.has_value());
|
||||
|
||||
// Nothing was written into the buffer:
|
||||
EXPECT_TRUE(bufAccessor.ownsBuffer());
|
||||
@ -1844,7 +1870,8 @@ TEST_F(
|
||||
conn.ackStates.appDataAckState.largestAckedByPeer.value_or(0));
|
||||
auto result = cloningScheduler.scheduleFramesForPacket(
|
||||
std::move(builder), kDefaultUDPSendPacketLen);
|
||||
EXPECT_FALSE(result.clonedPacketIdentifier.has_value());
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_FALSE(result.value().clonedPacketIdentifier.has_value());
|
||||
|
||||
// Nothing was written into the buffer:
|
||||
EXPECT_TRUE(bufAccessor.ownsBuffer());
|
||||
@ -2351,20 +2378,22 @@ TEST_F(QuicPacketSchedulerTest, ShortHeaderPaddingWithSpaceForPadding) {
|
||||
|
||||
auto result1 = scheduler.scheduleFramesForPacket(
|
||||
std::move(builder1), conn.udpSendPacketLen);
|
||||
EXPECT_GT(result1.shortHeaderPadding, 0);
|
||||
ASSERT_FALSE(result1.hasError());
|
||||
EXPECT_GT(result1.value().shortHeaderPadding, 0);
|
||||
auto result2 = scheduler.scheduleFramesForPacket(
|
||||
std::move(builder2), conn.udpSendPacketLen);
|
||||
EXPECT_GT(result2.shortHeaderPadding, 0);
|
||||
ASSERT_FALSE(result2.hasError());
|
||||
EXPECT_GT(result2.value().shortHeaderPadding, 0);
|
||||
|
||||
auto headerLength1 = result1.packet->header.computeChainDataLength();
|
||||
auto bodyLength1 = result1.packet->body.computeChainDataLength();
|
||||
auto headerLength1 = result1.value().packet->header.computeChainDataLength();
|
||||
auto bodyLength1 = result1.value().packet->body.computeChainDataLength();
|
||||
auto packetLength1 = headerLength1 + bodyLength1;
|
||||
auto expectedPadding1 =
|
||||
(conn.udpSendPacketLen - (inputDataLength1 + headerLength1)) %
|
||||
paddingModulo;
|
||||
|
||||
auto headerLength2 = result2.packet->header.computeChainDataLength();
|
||||
auto bodyLength2 = result2.packet->body.computeChainDataLength();
|
||||
auto headerLength2 = result2.value().packet->header.computeChainDataLength();
|
||||
auto bodyLength2 = result2.value().packet->body.computeChainDataLength();
|
||||
auto packetLength2 = headerLength2 + bodyLength2;
|
||||
auto expectedPadding2 =
|
||||
(conn.udpSendPacketLen - (inputDataLength2 + headerLength2)) %
|
||||
@ -2415,10 +2444,11 @@ TEST_F(QuicPacketSchedulerTest, ShortHeaderFixedPaddingAtStart) {
|
||||
// Schedule frames
|
||||
auto result = scheduler.scheduleFramesForPacket(
|
||||
std::move(builder), conn.udpSendPacketLen);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
|
||||
// Verify padding frames were added at start
|
||||
EXPECT_TRUE(result.packet.hasValue());
|
||||
const auto& frames = result.packet->packet.frames;
|
||||
EXPECT_TRUE(result.value().packet.hasValue());
|
||||
const auto& frames = result.value().packet->packet.frames;
|
||||
ASSERT_EQ(frames.size(), 3);
|
||||
EXPECT_TRUE(frames[0].asPaddingFrame());
|
||||
EXPECT_TRUE(frames[1].asWriteStreamFrame());
|
||||
@ -2461,10 +2491,11 @@ TEST_F(QuicPacketSchedulerTest, ShortHeaderPaddingNearMaxPacketLength) {
|
||||
|
||||
auto result = scheduler.scheduleFramesForPacket(
|
||||
std::move(builder), conn.udpSendPacketLen);
|
||||
EXPECT_GT(result.shortHeaderPadding, 0);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_GT(result.value().shortHeaderPadding, 0);
|
||||
|
||||
auto headerLength = result.packet->header.computeChainDataLength();
|
||||
auto bodyLength = result.packet->body.computeChainDataLength();
|
||||
auto headerLength = result.value().packet->header.computeChainDataLength();
|
||||
auto bodyLength = result.value().packet->body.computeChainDataLength();
|
||||
|
||||
auto packetLength = headerLength + bodyLength;
|
||||
|
||||
@ -2516,10 +2547,11 @@ TEST_F(QuicPacketSchedulerTest, ShortHeaderPaddingMaxPacketLength) {
|
||||
|
||||
auto result = scheduler.scheduleFramesForPacket(
|
||||
std::move(builder), conn.udpSendPacketLen);
|
||||
EXPECT_EQ(result.shortHeaderPadding, 0);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_EQ(result.value().shortHeaderPadding, 0);
|
||||
|
||||
auto headerLength = result.packet->header.computeChainDataLength();
|
||||
auto bodyLength = result.packet->body.computeChainDataLength();
|
||||
auto headerLength = result.value().packet->header.computeChainDataLength();
|
||||
auto bodyLength = result.value().packet->body.computeChainDataLength();
|
||||
|
||||
auto packetLength = headerLength + bodyLength;
|
||||
|
||||
@ -2558,8 +2590,9 @@ TEST_F(QuicPacketSchedulerTest, ImmediateAckFrameSchedulerOnRequest) {
|
||||
auto result =
|
||||
std::move(immediateAckOnlyScheduler)
|
||||
.scheduleFramesForPacket(std::move(builder), conn.udpSendPacketLen);
|
||||
auto packetLength = result.packet->header.computeChainDataLength() +
|
||||
result.packet->body.computeChainDataLength();
|
||||
ASSERT_FALSE(result.hasError());
|
||||
auto packetLength = result.value().packet->header.computeChainDataLength() +
|
||||
result.value().packet->body.computeChainDataLength();
|
||||
EXPECT_EQ(conn.udpSendPacketLen, packetLength);
|
||||
}
|
||||
|
||||
@ -2595,8 +2628,9 @@ TEST_F(QuicPacketSchedulerTest, ImmediateAckFrameSchedulerNotRequested) {
|
||||
auto result =
|
||||
std::move(immediateAckOnlyScheduler)
|
||||
.scheduleFramesForPacket(std::move(builder), conn.udpSendPacketLen);
|
||||
auto packetLength = result.packet->header.computeChainDataLength() +
|
||||
result.packet->body.computeChainDataLength();
|
||||
ASSERT_FALSE(result.hasError());
|
||||
auto packetLength = result.value().packet->header.computeChainDataLength() +
|
||||
result.value().packet->body.computeChainDataLength();
|
||||
// The immediate ACK scheduler was not triggered. This packet has no
|
||||
// frames and it shouldn't get padded.
|
||||
EXPECT_LT(packetLength, conn.udpSendPacketLen);
|
||||
@ -2638,12 +2672,15 @@ TEST_F(QuicPacketSchedulerTest, RstStreamSchedulerReliableReset) {
|
||||
conn.ackStates.appDataAckState.largestAckedByPeer.value_or(0));
|
||||
auto packetResult1 = scheduler.scheduleFramesForPacket(
|
||||
std::move(builder1), conn.udpSendPacketLen - cipherOverhead);
|
||||
auto encodedSize1 = packetResult1.packet->body.computeChainDataLength() +
|
||||
packetResult1.packet->header.computeChainDataLength() + cipherOverhead;
|
||||
ASSERT_FALSE(packetResult1.hasError());
|
||||
auto encodedSize1 =
|
||||
packetResult1.value().packet->body.computeChainDataLength() +
|
||||
packetResult1.value().packet->header.computeChainDataLength() +
|
||||
cipherOverhead;
|
||||
ASSERT_FALSE(updateConnection(
|
||||
conn,
|
||||
none,
|
||||
packetResult1.packet->packet,
|
||||
packetResult1.value().packet->packet,
|
||||
Clock::now(),
|
||||
encodedSize1,
|
||||
0,
|
||||
@ -2665,12 +2702,15 @@ TEST_F(QuicPacketSchedulerTest, RstStreamSchedulerReliableReset) {
|
||||
conn.ackStates.appDataAckState.largestAckedByPeer.value_or(0));
|
||||
auto packetResult2 = scheduler.scheduleFramesForPacket(
|
||||
std::move(builder2), conn.udpSendPacketLen - cipherOverhead);
|
||||
auto encodedSize2 = packetResult1.packet->body.computeChainDataLength() +
|
||||
packetResult2.packet->header.computeChainDataLength() + cipherOverhead;
|
||||
ASSERT_FALSE(packetResult2.hasError());
|
||||
auto encodedSize2 =
|
||||
packetResult1.value().packet->body.computeChainDataLength() +
|
||||
packetResult2.value().packet->header.computeChainDataLength() +
|
||||
cipherOverhead;
|
||||
ASSERT_FALSE(updateConnection(
|
||||
conn,
|
||||
none,
|
||||
packetResult2.packet->packet,
|
||||
packetResult2.value().packet->packet,
|
||||
Clock::now(),
|
||||
encodedSize2,
|
||||
0,
|
||||
@ -2776,11 +2816,12 @@ TEST_F(QuicPacketSchedulerTest, FixedShortHeaderPadding) {
|
||||
// Schedule frames
|
||||
auto result = scheduler.scheduleFramesForPacket(
|
||||
std::move(builder), conn.udpSendPacketLen);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
|
||||
// Verify padding frames were added
|
||||
// at start
|
||||
EXPECT_TRUE(result.packet.hasValue());
|
||||
const auto& frames = result.packet->packet.frames;
|
||||
EXPECT_TRUE(result.value().packet.hasValue());
|
||||
const auto& frames = result.value().packet->packet.frames;
|
||||
ASSERT_EQ(frames.size(), 2);
|
||||
EXPECT_TRUE(frames[0].asPaddingFrame());
|
||||
EXPECT_TRUE(frames[1].asWriteStreamFrame());
|
||||
|
@ -352,17 +352,20 @@ class TestQuicTransport
|
||||
return folly::unit;
|
||||
}
|
||||
|
||||
void writeData() override {
|
||||
CHECK(!writeQuicDataToSocket(
|
||||
*socket_,
|
||||
*conn_,
|
||||
*conn_->serverConnectionId,
|
||||
*conn_->clientConnectionId,
|
||||
*aead,
|
||||
*headerCipher,
|
||||
*conn_->version,
|
||||
conn_->transportSettings.writeConnectionDataPacketsLimit)
|
||||
.hasError());
|
||||
[[nodiscard]] folly::Expected<folly::Unit, QuicError> writeData() override {
|
||||
auto result = writeQuicDataToSocket(
|
||||
*socket_,
|
||||
*conn_,
|
||||
conn_->serverConnectionId.value_or(ConnectionId::createRandom(0)),
|
||||
conn_->clientConnectionId.value_or(ConnectionId::createRandom(0)),
|
||||
*aead,
|
||||
*headerCipher,
|
||||
*conn_->version,
|
||||
conn_->transportSettings.writeConnectionDataPacketsLimit);
|
||||
if (result.hasError()) {
|
||||
return folly::makeUnexpected(result.error());
|
||||
}
|
||||
return folly::unit;
|
||||
}
|
||||
|
||||
// This is to expose the protected pacedWriteDataToSocket() function
|
||||
@ -548,7 +551,11 @@ class TestQuicTransport
|
||||
}
|
||||
|
||||
void invokeWriteSocketData() {
|
||||
writeSocketData();
|
||||
CHECK(!writeSocketData().hasError());
|
||||
}
|
||||
|
||||
[[nodiscard]] auto invokeWriteSocketDataReturn() {
|
||||
return writeSocketData();
|
||||
}
|
||||
|
||||
void invokeProcessCallbacksAfterNetworkData() {
|
||||
@ -3074,10 +3081,15 @@ TEST_P(QuicTransportImplTestClose, TestNotifyPendingWriteOnCloseWithError) {
|
||||
TEST_P(QuicTransportImplTestBase, TestTransportCloseWithMaxPacketNumber) {
|
||||
transport->setServerConnectionId();
|
||||
transport->transportConn->pendingEvents.closeTransport = false;
|
||||
EXPECT_NO_THROW(transport->invokeWriteSocketData());
|
||||
ASSERT_FALSE(transport->invokeWriteSocketDataReturn().hasError());
|
||||
|
||||
transport->transportConn->pendingEvents.closeTransport = true;
|
||||
EXPECT_THROW(transport->invokeWriteSocketData(), QuicTransportException);
|
||||
auto result = transport->invokeWriteSocketDataReturn();
|
||||
ASSERT_TRUE(result.hasError());
|
||||
ASSERT_NE(result.error().code.asTransportErrorCode(), nullptr);
|
||||
EXPECT_EQ(
|
||||
*result.error().code.asTransportErrorCode(),
|
||||
TransportErrorCode::PROTOCOL_VIOLATION);
|
||||
}
|
||||
|
||||
TEST_P(QuicTransportImplTestBase, TestGracefulCloseWithActiveStream) {
|
||||
|
@ -4819,15 +4819,15 @@ TEST_F(QuicTransportFunctionsTest, MissingStreamFrameBytes) {
|
||||
WriteStreamFrame writeStreamFrame(
|
||||
stream->id, 5 /* offset */, 2 /* len */, false /* fin */);
|
||||
packet.packet.frames.push_back(writeStreamFrame);
|
||||
EXPECT_ANY_THROW(ASSERT_FALSE(updateConnection(
|
||||
*conn,
|
||||
none,
|
||||
packet.packet,
|
||||
TimePoint(),
|
||||
getEncodedSize(packet),
|
||||
getEncodedBodySize(packet),
|
||||
false /* isDSRPacket */)
|
||||
.hasError()));
|
||||
ASSERT_TRUE(updateConnection(
|
||||
*conn,
|
||||
none,
|
||||
packet.packet,
|
||||
TimePoint(),
|
||||
getEncodedSize(packet),
|
||||
getEncodedBodySize(packet),
|
||||
false /* isDSRPacket */)
|
||||
.hasError());
|
||||
}
|
||||
}
|
||||
|
||||
@ -4867,15 +4867,15 @@ TEST_F(QuicTransportFunctionsTest, MissingStreamFrameBytesEof) {
|
||||
WriteStreamFrame writeStreamFrame(
|
||||
stream->id, offset /* offset */, len /* len */, true /* fin */);
|
||||
packet.packet.frames.push_back(writeStreamFrame);
|
||||
EXPECT_ANY_THROW(ASSERT_FALSE(updateConnection(
|
||||
*conn,
|
||||
none,
|
||||
packet.packet,
|
||||
TimePoint(),
|
||||
getEncodedSize(packet),
|
||||
getEncodedBodySize(packet),
|
||||
false /* isDSRPacket */)
|
||||
.hasError()));
|
||||
ASSERT_TRUE(updateConnection(
|
||||
*conn,
|
||||
none,
|
||||
packet.packet,
|
||||
TimePoint(),
|
||||
getEncodedSize(packet),
|
||||
getEncodedBodySize(packet),
|
||||
false /* isDSRPacket */)
|
||||
.hasError());
|
||||
}
|
||||
}
|
||||
|
||||
@ -4910,15 +4910,15 @@ TEST_F(QuicTransportFunctionsTest, MissingStreamFrameBytesSingleByteWrite) {
|
||||
WriteStreamFrame writeStreamFrame(
|
||||
stream->id, 5 /* offset */, 1 /* len */, false /* fin */);
|
||||
packet.packet.frames.push_back(writeStreamFrame);
|
||||
EXPECT_ANY_THROW(ASSERT_FALSE(updateConnection(
|
||||
*conn,
|
||||
none,
|
||||
packet.packet,
|
||||
TimePoint(),
|
||||
getEncodedSize(packet),
|
||||
getEncodedBodySize(packet),
|
||||
false /* isDSRPacket */)
|
||||
.hasError()));
|
||||
ASSERT_TRUE(updateConnection(
|
||||
*conn,
|
||||
none,
|
||||
packet.packet,
|
||||
TimePoint(),
|
||||
getEncodedSize(packet),
|
||||
getEncodedBodySize(packet),
|
||||
false /* isDSRPacket */)
|
||||
.hasError());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -91,22 +91,24 @@ class TestQuicTransport
|
||||
return folly::unit;
|
||||
}
|
||||
|
||||
void writeData() override {
|
||||
[[nodiscard]] folly::Expected<folly::Unit, QuicError> writeData() override {
|
||||
if (closed) {
|
||||
return;
|
||||
return folly::unit;
|
||||
}
|
||||
auto result = writeQuicDataToSocket(
|
||||
*socket_,
|
||||
*conn_,
|
||||
*conn_->clientConnectionId,
|
||||
*conn_->serverConnectionId,
|
||||
*aead,
|
||||
*headerCipher,
|
||||
getVersion(),
|
||||
(isConnectionPaced(*conn_)
|
||||
? conn_->pacer->updateAndGetWriteBatchSize(Clock::now())
|
||||
: conn_->transportSettings.writeConnectionDataPacketsLimit));
|
||||
if (result.hasError()) {
|
||||
return folly::makeUnexpected(result.error());
|
||||
}
|
||||
CHECK(!writeQuicDataToSocket(
|
||||
*socket_,
|
||||
*conn_,
|
||||
*conn_->clientConnectionId,
|
||||
*conn_->serverConnectionId,
|
||||
*aead,
|
||||
*headerCipher,
|
||||
getVersion(),
|
||||
(isConnectionPaced(*conn_)
|
||||
? conn_->pacer->updateAndGetWriteBatchSize(Clock::now())
|
||||
: conn_->transportSettings.writeConnectionDataPacketsLimit))
|
||||
.hasError());
|
||||
writePacketizationRequest(
|
||||
*dynamic_cast<QuicServerConnectionState*>(conn_.get()),
|
||||
*conn_->clientConnectionId,
|
||||
@ -115,6 +117,7 @@ class TestQuicTransport
|
||||
: conn_->transportSettings.writeConnectionDataPacketsLimit),
|
||||
*aead,
|
||||
Clock::now());
|
||||
return folly::unit;
|
||||
}
|
||||
|
||||
void closeTransport() override {
|
||||
|
@ -269,7 +269,10 @@ QuicClientTransportLite::processUdpPacketData(
|
||||
// TODO (amsharma): verify the "original_connection_id" parameter
|
||||
// upon receiving a subsequent initial from the server.
|
||||
|
||||
startCryptoHandshake();
|
||||
auto handshakeResult = startCryptoHandshake();
|
||||
if (handshakeResult.hasError()) {
|
||||
return folly::makeUnexpected(handshakeResult.error());
|
||||
}
|
||||
return folly::unit; // Retry processed successfully
|
||||
}
|
||||
|
||||
@ -425,7 +428,8 @@ QuicClientTransportLite::processUdpPacketData(
|
||||
// processing loop.
|
||||
conn_->handshakeLayer->handshakeConfirmed();
|
||||
}
|
||||
maybeVerifyPendingKeyUpdate(*conn_, outstandingPacket, regularPacket);
|
||||
return maybeVerifyPendingKeyUpdate(
|
||||
*conn_, outstandingPacket, regularPacket);
|
||||
};
|
||||
AckedFrameVisitor ackedFrameVisitor =
|
||||
[&](const OutstandingPacketWrapper& outstandingPacket,
|
||||
@ -989,7 +993,7 @@ QuicClientTransportLite::setDSRPacketizationRequestSender(
|
||||
return folly::makeUnexpected(LocalErrorCode::INVALID_OPERATION);
|
||||
}
|
||||
|
||||
void QuicClientTransportLite::writeData() {
|
||||
folly::Expected<folly::Unit, QuicError> QuicClientTransportLite::writeData() {
|
||||
QuicVersion version = conn_->version.value_or(*conn_->originalVersion);
|
||||
const ConnectionId& srcConnId = *conn_->clientConnectionId;
|
||||
const ConnectionId& destConnId = conn_->serverConnectionId.value_or(
|
||||
@ -1001,7 +1005,7 @@ void QuicClientTransportLite::writeData() {
|
||||
: clientConn_->lossState.srtt;
|
||||
if (clientConn_->lastCloseSentTime &&
|
||||
Clock::now() - *clientConn_->lastCloseSentTime < rtt) {
|
||||
return;
|
||||
return folly::unit;
|
||||
}
|
||||
clientConn_->lastCloseSentTime = Clock::now();
|
||||
if (clientConn_->clientHandshakeLayer->getPhase() ==
|
||||
@ -1042,7 +1046,7 @@ void QuicClientTransportLite::writeData() {
|
||||
*conn_->initialHeaderCipher,
|
||||
version);
|
||||
}
|
||||
return;
|
||||
return folly::unit;
|
||||
}
|
||||
|
||||
uint64_t packetLimit =
|
||||
@ -1062,24 +1066,22 @@ void QuicClientTransportLite::writeData() {
|
||||
auto result =
|
||||
handleInitialWriteDataCommon(srcConnId, destConnId, packetLimit, token);
|
||||
if (result.hasError()) {
|
||||
throw QuicTransportException(
|
||||
result.error().message, *result.error().code.asTransportErrorCode());
|
||||
return folly::makeUnexpected(result.error());
|
||||
}
|
||||
packetLimit -= result->packetsWritten;
|
||||
if (!packetLimit && !conn_->pendingEvents.anyProbePackets()) {
|
||||
return;
|
||||
return folly::unit;
|
||||
}
|
||||
}
|
||||
if (conn_->handshakeWriteCipher) {
|
||||
auto result =
|
||||
handleHandshakeWriteDataCommon(srcConnId, destConnId, packetLimit);
|
||||
if (result.hasError()) {
|
||||
throw QuicTransportException(
|
||||
result.error().message, *result.error().code.asTransportErrorCode());
|
||||
return folly::makeUnexpected(result.error());
|
||||
}
|
||||
packetLimit -= result->packetsWritten;
|
||||
if (!packetLimit && !conn_->pendingEvents.anyProbePackets()) {
|
||||
return;
|
||||
return folly::unit;
|
||||
}
|
||||
}
|
||||
if (clientConn_->zeroRttWriteCipher && !conn_->oneRttWriteCipher) {
|
||||
@ -1094,13 +1096,12 @@ void QuicClientTransportLite::writeData() {
|
||||
version,
|
||||
packetLimit);
|
||||
if (result.hasError()) {
|
||||
throw QuicTransportException(
|
||||
result.error().message, *result.error().code.asTransportErrorCode());
|
||||
return folly::makeUnexpected(result.error());
|
||||
}
|
||||
packetLimit -= *result;
|
||||
}
|
||||
if (!packetLimit && !conn_->pendingEvents.anyProbePackets()) {
|
||||
return;
|
||||
return folly::unit;
|
||||
}
|
||||
if (conn_->oneRttWriteCipher) {
|
||||
CHECK(clientConn_->oneRttWriteHeaderCipher);
|
||||
@ -1114,13 +1115,14 @@ void QuicClientTransportLite::writeData() {
|
||||
version,
|
||||
packetLimit);
|
||||
if (result.hasError()) {
|
||||
throw QuicTransportException(
|
||||
result.error().message, *result.error().code.asTransportErrorCode());
|
||||
return folly::makeUnexpected(result.error());
|
||||
}
|
||||
}
|
||||
return folly::unit;
|
||||
}
|
||||
|
||||
void QuicClientTransportLite::startCryptoHandshake() {
|
||||
folly::Expected<folly::Unit, QuicError>
|
||||
QuicClientTransportLite::startCryptoHandshake() {
|
||||
auto self = this->shared_from_this();
|
||||
setIdleTimer();
|
||||
// We need to update the flow control settings every time we start a crypto
|
||||
@ -1172,7 +1174,11 @@ void QuicClientTransportLite::startCryptoHandshake() {
|
||||
}
|
||||
handshakeLayer->connect(hostname_, std::move(paramsExtension));
|
||||
|
||||
writeSocketData();
|
||||
auto writeResult = writeSocketData();
|
||||
if (writeResult.hasError()) {
|
||||
return folly::makeUnexpected(writeResult.error());
|
||||
}
|
||||
|
||||
if (!transportReadyNotified_ && clientConn_->zeroRttWriteCipher) {
|
||||
transportReadyNotified_ = true;
|
||||
runOnEvbAsync([](auto self) {
|
||||
@ -1182,6 +1188,8 @@ void QuicClientTransportLite::startCryptoHandshake() {
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return folly::unit;
|
||||
}
|
||||
|
||||
bool QuicClientTransportLite::hasWriteCipher() const {
|
||||
@ -1768,7 +1776,13 @@ void QuicClientTransportLite::start(
|
||||
socketOptions_);
|
||||
// adjust the GRO buffers
|
||||
adjustGROBuffers();
|
||||
startCryptoHandshake();
|
||||
auto handshakeResult = startCryptoHandshake();
|
||||
if (handshakeResult.hasError()) {
|
||||
runOnEvbAsync([error = handshakeResult.error()](auto self) {
|
||||
auto clientPtr = dynamic_cast<QuicClientTransportLite*>(self.get());
|
||||
clientPtr->closeImpl(error);
|
||||
});
|
||||
}
|
||||
} catch (const QuicTransportException& ex) {
|
||||
runOnEvbAsync([ex](auto self) {
|
||||
auto clientPtr = dynamic_cast<QuicClientTransportLite*>(self.get());
|
||||
|
@ -160,7 +160,7 @@ class QuicClientTransportLite
|
||||
folly::Expected<folly::Unit, QuicError> onReadData(
|
||||
const folly::SocketAddress& peer,
|
||||
ReceivedUdpPacket&& udpPacket) override;
|
||||
void writeData() override;
|
||||
folly::Expected<folly::Unit, QuicError> writeData() override;
|
||||
void closeTransport() override;
|
||||
void unbindConnection() override;
|
||||
bool hasWriteCipher() const override;
|
||||
@ -337,7 +337,8 @@ class QuicClientTransportLite
|
||||
const folly::SocketAddress& peer,
|
||||
ReceivedUdpPacket& udpPacket);
|
||||
|
||||
void startCryptoHandshake();
|
||||
[[nodiscard]]
|
||||
folly::Expected<folly::Unit, QuicError> startCryptoHandshake();
|
||||
|
||||
void happyEyeballsConnAttemptDelayTimeoutExpired() noexcept;
|
||||
|
||||
|
@ -56,7 +56,7 @@ class QuicClientTransportMock : public QuicClientTransport {
|
||||
MOCK_METHOD((bool), isTLSResumed, (), (const));
|
||||
MOCK_METHOD((ZeroRttAttemptState), getZeroRttState, ());
|
||||
MOCK_METHOD((void), close, (Optional<QuicError>));
|
||||
MOCK_METHOD((void), writeData, ());
|
||||
MOCK_METHOD((folly::Expected<folly::Unit, QuicError>), writeData, ());
|
||||
MOCK_METHOD((void), closeSecondSocket, ());
|
||||
MOCK_METHOD((void), setHappyEyeballsEnabled, (bool));
|
||||
MOCK_METHOD(
|
||||
|
@ -708,7 +708,7 @@ TEST_F(BbrTest, BytesCounting) {
|
||||
ReadAckFrame ackFrame;
|
||||
ackFrame.largestAcked = packetNum;
|
||||
ackFrame.ackBlocks.emplace_back(packetNum, packetNum);
|
||||
auto ackPacketVisitor = [](auto&) {};
|
||||
auto ackPacketVisitor = [](auto&) { return folly::unit; };
|
||||
auto ackFrameVisitor = [](auto&, auto&) { return folly::unit; };
|
||||
auto lossVisitor = [](auto&, auto&, bool) { return folly::unit; };
|
||||
ASSERT_FALSE(processAckFrame(
|
||||
|
@ -152,10 +152,10 @@ TEST_F(DSRMultiWriteTest, TwoRequestsWithLoss) {
|
||||
auto split = stream->writeBufMeta.split(500);
|
||||
stream->lossBufMetas.push_back(split);
|
||||
size_t packetLimit = 10;
|
||||
EXPECT_EQ(
|
||||
2,
|
||||
writePacketizationRequest(
|
||||
conn_, getTestConnectionId(), packetLimit, *aead_));
|
||||
auto packetizationResult = writePacketizationRequest(
|
||||
conn_, getTestConnectionId(), packetLimit, *aead_);
|
||||
ASSERT_FALSE(packetizationResult.hasError());
|
||||
EXPECT_EQ(2, packetizationResult.value());
|
||||
EXPECT_EQ(2, countInstructions(streamId));
|
||||
EXPECT_EQ(2, conn_.outstandings.packets.size());
|
||||
auto& packet1 = conn_.outstandings.packets.front().packet;
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include <quic/dsr/frontend/WriteFunctions.h>
|
||||
|
||||
namespace quic {
|
||||
uint64_t writePacketizationRequest(
|
||||
folly::Expected<uint64_t, QuicError> writePacketizationRequest(
|
||||
QuicServerConnectionState& connection,
|
||||
const ConnectionId& dstCid,
|
||||
size_t packetLimit,
|
||||
@ -95,9 +95,7 @@ uint64_t writePacketizationRequest(
|
||||
true /* isDSRPacket */);
|
||||
|
||||
if (updateResult.hasError()) {
|
||||
throw QuicTransportException(
|
||||
updateResult.error().message,
|
||||
*updateResult.error().code.asTransportErrorCode());
|
||||
return folly::makeUnexpected(updateResult.error());
|
||||
}
|
||||
connection.dsrPacketCount++;
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
#include <quic/server/state/ServerStateMachine.h>
|
||||
|
||||
namespace quic {
|
||||
uint64_t writePacketizationRequest(
|
||||
folly::Expected<uint64_t, QuicError> writePacketizationRequest(
|
||||
QuicServerConnectionState& connection,
|
||||
const ConnectionId& dstCid,
|
||||
size_t packetLimit,
|
||||
|
@ -24,7 +24,9 @@ TEST_F(WriteFunctionsTest, SchedulerNoData) {
|
||||
prepareFlowControlAndStreamLimit();
|
||||
auto cid = getTestConnectionId();
|
||||
size_t packetLimit = 20;
|
||||
EXPECT_EQ(0, writePacketizationRequest(conn_, cid, packetLimit, *aead_));
|
||||
auto result = writePacketizationRequest(conn_, cid, packetLimit, *aead_);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_EQ(0, result.value());
|
||||
}
|
||||
|
||||
TEST_F(WriteFunctionsTest, CwndBlockd) {
|
||||
@ -37,7 +39,9 @@ TEST_F(WriteFunctionsTest, CwndBlockd) {
|
||||
.WillRepeatedly(Return(0));
|
||||
auto cid = getTestConnectionId();
|
||||
size_t packetLimit = 20;
|
||||
EXPECT_EQ(0, writePacketizationRequest(conn_, cid, packetLimit, *aead_));
|
||||
auto result = writePacketizationRequest(conn_, cid, packetLimit, *aead_);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_EQ(0, result.value());
|
||||
}
|
||||
|
||||
TEST_F(WriteFunctionsTest, FlowControlBlockded) {
|
||||
@ -50,7 +54,9 @@ TEST_F(WriteFunctionsTest, FlowControlBlockded) {
|
||||
.WillRepeatedly(Return(0));
|
||||
auto cid = getTestConnectionId();
|
||||
size_t packetLimit = 20;
|
||||
EXPECT_EQ(0, writePacketizationRequest(conn_, cid, packetLimit, *aead_));
|
||||
auto result = writePacketizationRequest(conn_, cid, packetLimit, *aead_);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_EQ(0, result.value());
|
||||
}
|
||||
|
||||
TEST_F(WriteFunctionsTest, WriteOne) {
|
||||
@ -60,7 +66,9 @@ TEST_F(WriteFunctionsTest, WriteOne) {
|
||||
auto stream = conn_.streamManager->findStream(streamId);
|
||||
auto currentBufMetaOffset = stream->writeBufMeta.offset;
|
||||
size_t packetLimit = 20;
|
||||
EXPECT_EQ(1, writePacketizationRequest(conn_, cid, packetLimit, *aead_));
|
||||
auto result = writePacketizationRequest(conn_, cid, packetLimit, *aead_);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_EQ(1, result.value());
|
||||
EXPECT_GT(stream->writeBufMeta.offset, currentBufMetaOffset);
|
||||
EXPECT_EQ(1, stream->retransmissionBufMetas.size());
|
||||
EXPECT_EQ(1, countInstructions(streamId));
|
||||
@ -81,7 +89,9 @@ TEST_F(WriteFunctionsTest, WriteLoopTimeLimit) {
|
||||
auto currentBufMetaOffset = stream->writeBufMeta.offset;
|
||||
size_t packetLimit = 2;
|
||||
conn_.lossState.srtt = 100ms;
|
||||
EXPECT_EQ(2, writePacketizationRequest(conn_, cid, packetLimit, *aead_));
|
||||
auto result = writePacketizationRequest(conn_, cid, packetLimit, *aead_);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_EQ(2, result.value());
|
||||
EXPECT_GT(stream->writeBufMeta.offset, currentBufMetaOffset);
|
||||
EXPECT_EQ(2, stream->retransmissionBufMetas.size());
|
||||
EXPECT_EQ(2, countInstructions(streamId));
|
||||
@ -90,10 +100,10 @@ TEST_F(WriteFunctionsTest, WriteLoopTimeLimit) {
|
||||
|
||||
// Fake the time so it's in the past.
|
||||
auto writeLoopBeginTime = Clock::now() - 200ms;
|
||||
EXPECT_EQ(
|
||||
0,
|
||||
writePacketizationRequest(
|
||||
conn_, cid, packetLimit, *aead_, writeLoopBeginTime));
|
||||
result = writePacketizationRequest(
|
||||
conn_, cid, packetLimit, *aead_, writeLoopBeginTime);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_EQ(0, result.value());
|
||||
EXPECT_EQ(2, stream->retransmissionBufMetas.size());
|
||||
EXPECT_EQ(2, countInstructions(streamId));
|
||||
EXPECT_EQ(2, conn_.outstandings.packets.size());
|
||||
@ -114,7 +124,9 @@ TEST_F(WriteFunctionsTest, WriteLoopTimeLimitNoLimit) {
|
||||
size_t packetLimit = 2;
|
||||
conn_.lossState.srtt = 100ms;
|
||||
conn_.transportSettings.writeLimitRttFraction = 0;
|
||||
EXPECT_EQ(2, writePacketizationRequest(conn_, cid, packetLimit, *aead_));
|
||||
auto result = writePacketizationRequest(conn_, cid, packetLimit, *aead_);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_EQ(2, result.value());
|
||||
EXPECT_GT(stream->writeBufMeta.offset, currentBufMetaOffset);
|
||||
EXPECT_EQ(2, stream->retransmissionBufMetas.size());
|
||||
EXPECT_EQ(2, countInstructions(streamId));
|
||||
@ -123,10 +135,10 @@ TEST_F(WriteFunctionsTest, WriteLoopTimeLimitNoLimit) {
|
||||
|
||||
// Fake the time so it's in the past.
|
||||
auto writeLoopBeginTime = Clock::now() - 200ms;
|
||||
EXPECT_EQ(
|
||||
1,
|
||||
writePacketizationRequest(
|
||||
conn_, cid, packetLimit, *aead_, writeLoopBeginTime));
|
||||
result = writePacketizationRequest(
|
||||
conn_, cid, packetLimit, *aead_, writeLoopBeginTime);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_EQ(1, result.value());
|
||||
EXPECT_EQ(3, stream->retransmissionBufMetas.size());
|
||||
EXPECT_EQ(3, countInstructions(streamId));
|
||||
EXPECT_EQ(3, conn_.outstandings.packets.size());
|
||||
@ -144,7 +156,9 @@ TEST_F(WriteFunctionsTest, WriteTwoInstructions) {
|
||||
conn_.streamManager->updateWritableStreams(*stream);
|
||||
auto cid = getTestConnectionId();
|
||||
size_t packetLimit = 20;
|
||||
EXPECT_EQ(2, writePacketizationRequest(conn_, cid, packetLimit, *aead_));
|
||||
auto result = writePacketizationRequest(conn_, cid, packetLimit, *aead_);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_EQ(2, result.value());
|
||||
EXPECT_EQ(2, stream->retransmissionBufMetas.size());
|
||||
EXPECT_EQ(2, countInstructions(streamId));
|
||||
EXPECT_EQ(2, conn_.outstandings.packets.size());
|
||||
@ -172,7 +186,9 @@ TEST_F(WriteFunctionsTest, PacketLimit) {
|
||||
.WillRepeatedly(Return(1000));
|
||||
auto cid = getTestConnectionId();
|
||||
size_t packetLimit = 20;
|
||||
EXPECT_EQ(20, writePacketizationRequest(conn_, cid, packetLimit, *aead_));
|
||||
auto result = writePacketizationRequest(conn_, cid, packetLimit, *aead_);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_EQ(20, result.value());
|
||||
EXPECT_EQ(20, stream->retransmissionBufMetas.size());
|
||||
EXPECT_EQ(20, countInstructions(streamId));
|
||||
EXPECT_EQ(20, conn_.outstandings.packets.size());
|
||||
@ -197,7 +213,9 @@ TEST_F(WriteFunctionsTest, WriteTwoStreams) {
|
||||
conn_.streamManager->updateWritableStreams(*stream2);
|
||||
auto cid = getTestConnectionId();
|
||||
size_t packetLimit = 20;
|
||||
EXPECT_EQ(2, writePacketizationRequest(conn_, cid, packetLimit, *aead_));
|
||||
auto result = writePacketizationRequest(conn_, cid, packetLimit, *aead_);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_EQ(2, result.value());
|
||||
EXPECT_EQ(1, stream1->retransmissionBufMetas.size());
|
||||
EXPECT_EQ(1, stream2->retransmissionBufMetas.size());
|
||||
EXPECT_EQ(1, countInstructions(streamId1));
|
||||
@ -218,13 +236,17 @@ TEST_F(WriteFunctionsTest, WriteThreeStreamsNonDsrAndDsr) {
|
||||
size_t packetLimit = 20;
|
||||
// First loop only write a single packet because it will find there's non-DSR
|
||||
// data to write on the next stream.
|
||||
EXPECT_EQ(1, writePacketizationRequest(conn_, cid, packetLimit, *aead_));
|
||||
auto result = writePacketizationRequest(conn_, cid, packetLimit, *aead_);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_EQ(1, result.value());
|
||||
// Pretend we sent the non DSR data for last stream
|
||||
stream3->ackedIntervals.insert(0, stream3->writeBuffer.chainLength() - 1);
|
||||
stream3->currentWriteOffset = stream3->writeBuffer.chainLength();
|
||||
ChainedByteRangeHead(std::move(stream3->pendingWrites));
|
||||
conn_.streamManager->updateWritableStreams(*stream3);
|
||||
EXPECT_EQ(2, writePacketizationRequest(conn_, cid, packetLimit, *aead_));
|
||||
result = writePacketizationRequest(conn_, cid, packetLimit, *aead_);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_EQ(2, result.value());
|
||||
EXPECT_EQ(1, stream1->retransmissionBufMetas.size());
|
||||
EXPECT_EQ(1, stream2->retransmissionBufMetas.size());
|
||||
EXPECT_EQ(1, stream3->retransmissionBufMetas.size());
|
||||
@ -250,7 +272,9 @@ TEST_F(WriteFunctionsTest, WriteTwoStreamsNonIncremental) {
|
||||
conn_.streamManager->updateWritableStreams(*stream1);
|
||||
auto cid = getTestConnectionId();
|
||||
size_t packetLimit = 2;
|
||||
EXPECT_EQ(2, writePacketizationRequest(conn_, cid, packetLimit, *aead_));
|
||||
auto result = writePacketizationRequest(conn_, cid, packetLimit, *aead_);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_EQ(2, result.value());
|
||||
EXPECT_EQ(2, stream1->retransmissionBufMetas.size());
|
||||
EXPECT_EQ(0, stream2->retransmissionBufMetas.size());
|
||||
EXPECT_EQ(2, countInstructions(streamId1));
|
||||
@ -274,7 +298,9 @@ TEST_F(WriteFunctionsTest, WriteTwoStreamsIncremental) {
|
||||
conn_.streamManager->updateWritableStreams(*stream2);
|
||||
auto cid = getTestConnectionId();
|
||||
size_t packetLimit = 2;
|
||||
EXPECT_EQ(2, writePacketizationRequest(conn_, cid, packetLimit, *aead_));
|
||||
auto result = writePacketizationRequest(conn_, cid, packetLimit, *aead_);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_EQ(2, result.value());
|
||||
EXPECT_EQ(1, stream1->retransmissionBufMetas.size());
|
||||
EXPECT_EQ(1, stream2->retransmissionBufMetas.size());
|
||||
EXPECT_EQ(1, countInstructions(streamId1));
|
||||
@ -297,10 +323,10 @@ TEST_F(WriteFunctionsTest, LossAndFreshTwoInstructionsInTwoPackets) {
|
||||
auto split = stream->writeBufMeta.split(500);
|
||||
stream->lossBufMetas.push_back(split);
|
||||
size_t packetLimit = 10;
|
||||
EXPECT_EQ(
|
||||
2,
|
||||
writePacketizationRequest(
|
||||
conn_, getTestConnectionId(), packetLimit, *aead_));
|
||||
auto result = writePacketizationRequest(
|
||||
conn_, getTestConnectionId(), packetLimit, *aead_);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_EQ(2, result.value());
|
||||
EXPECT_EQ(2, countInstructions(streamId));
|
||||
EXPECT_EQ(2, conn_.outstandings.packets.size());
|
||||
auto& packet1 = conn_.outstandings.packets.front().packet;
|
||||
@ -331,10 +357,10 @@ TEST_F(
|
||||
conn_.flowControlState.peerAdvertisedMaxOffset;
|
||||
size_t packetLimit = 10;
|
||||
// Should only write lost data
|
||||
EXPECT_EQ(
|
||||
1,
|
||||
writePacketizationRequest(
|
||||
conn_, getTestConnectionId(), packetLimit, *aead_));
|
||||
auto result = writePacketizationRequest(
|
||||
conn_, getTestConnectionId(), packetLimit, *aead_);
|
||||
ASSERT_FALSE(result.hasError());
|
||||
EXPECT_EQ(1, result.value());
|
||||
EXPECT_EQ(1, countInstructions(streamId));
|
||||
ASSERT_EQ(1, conn_.outstandings.packets.size());
|
||||
auto& packet1 = conn_.outstandings.packets.front().packet;
|
||||
|
@ -403,7 +403,7 @@ TEST_F(QuicLossFunctionsTest, ClearEarlyRetranTimer) {
|
||||
*conn,
|
||||
PacketNumberSpace::Initial,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[&](auto&, auto&) { return folly::unit; },
|
||||
lossVisitor,
|
||||
Clock::now())
|
||||
@ -927,7 +927,7 @@ TEST_F(QuicLossFunctionsTest, TestHandleAckedPacket) {
|
||||
return folly::unit;
|
||||
};
|
||||
|
||||
auto ackPacketVisitor = [](auto&) {};
|
||||
auto ackPacketVisitor = [](auto&) { return folly::unit; };
|
||||
auto ackFrameVisitor = [&](auto&, auto&) { return folly::unit; };
|
||||
|
||||
// process and remove the acked packet.
|
||||
@ -2524,14 +2524,15 @@ TEST_F(QuicLossFunctionsTest, LossVisitorDSRTest) {
|
||||
ASSERT_TRUE(stream->writeBufMeta.eof);
|
||||
ASSERT_EQ(bufMetaStartingOffset + 1000, *stream->finalWriteOffset);
|
||||
// Send real data
|
||||
handleStreamWritten(
|
||||
*conn,
|
||||
*stream,
|
||||
0,
|
||||
bufMetaStartingOffset,
|
||||
false,
|
||||
0 /* PacketNum */,
|
||||
PacketNumberSpace::AppData);
|
||||
ASSERT_FALSE(handleStreamWritten(
|
||||
*conn,
|
||||
*stream,
|
||||
0,
|
||||
bufMetaStartingOffset,
|
||||
false,
|
||||
0 /* PacketNum */,
|
||||
PacketNumberSpace::AppData)
|
||||
.hasError());
|
||||
ASSERT_EQ(0, stream->pendingWrites.chainLength());
|
||||
auto retxIter = stream->retransmissionBuffer.find(0);
|
||||
ASSERT_NE(stream->retransmissionBuffer.end(), retxIter);
|
||||
|
@ -216,9 +216,9 @@ void QuicServerTransport::accept() {
|
||||
std::make_unique<DefaultAppTokenValidator>(serverConn_));
|
||||
}
|
||||
|
||||
void QuicServerTransport::writeData() {
|
||||
folly::Expected<folly::Unit, QuicError> QuicServerTransport::writeData() {
|
||||
if (!conn_->clientConnectionId || !conn_->serverConnectionId) {
|
||||
return;
|
||||
return folly::unit;
|
||||
}
|
||||
auto version = conn_->version.value_or(*(conn_->originalVersion));
|
||||
const ConnectionId& srcConnId = *conn_->serverConnectionId;
|
||||
@ -227,12 +227,12 @@ void QuicServerTransport::writeData() {
|
||||
if (conn_->peerConnectionError &&
|
||||
hasReceivedUdpPacketsAtLastCloseSent(*conn_)) {
|
||||
// The peer sent us an error, we are in draining state now.
|
||||
return;
|
||||
return folly::unit;
|
||||
}
|
||||
if (hasReceivedUdpPacketsAtLastCloseSent(*conn_) &&
|
||||
hasNotReceivedNewPacketsSinceLastCloseSent(*conn_)) {
|
||||
// We did not receive any new packets, do not sent a new close frame.
|
||||
return;
|
||||
return folly::unit;
|
||||
}
|
||||
updateLargestReceivedUdpPacketsAtLastCloseSent(*conn_);
|
||||
if (conn_->oneRttWriteCipher) {
|
||||
@ -271,7 +271,7 @@ void QuicServerTransport::writeData() {
|
||||
*conn_->initialHeaderCipher,
|
||||
version);
|
||||
}
|
||||
return;
|
||||
return folly::unit;
|
||||
}
|
||||
uint64_t packetLimit =
|
||||
(isConnectionPaced(*conn_)
|
||||
@ -286,32 +286,31 @@ void QuicServerTransport::writeData() {
|
||||
if (conn_->initialWriteCipher) {
|
||||
auto res = handleInitialWriteDataCommon(srcConnId, destConnId, packetLimit);
|
||||
if (res.hasError()) {
|
||||
throw QuicTransportException(
|
||||
res.error().message, *res.error().code.asTransportErrorCode());
|
||||
return folly::makeUnexpected(res.error());
|
||||
}
|
||||
packetLimit -= res->packetsWritten;
|
||||
serverConn_->numHandshakeBytesSent += res->bytesWritten;
|
||||
if (!packetLimit && !conn_->pendingEvents.anyProbePackets()) {
|
||||
return;
|
||||
return folly::unit;
|
||||
}
|
||||
}
|
||||
if (conn_->handshakeWriteCipher) {
|
||||
auto res =
|
||||
handleHandshakeWriteDataCommon(srcConnId, destConnId, packetLimit);
|
||||
if (res.hasError()) {
|
||||
throw QuicTransportException(
|
||||
res.error().message, *res.error().code.asTransportErrorCode());
|
||||
return folly::makeUnexpected(res.error());
|
||||
}
|
||||
packetLimit -= res->packetsWritten;
|
||||
serverConn_->numHandshakeBytesSent += res->bytesWritten;
|
||||
if (!packetLimit && !conn_->pendingEvents.anyProbePackets()) {
|
||||
return;
|
||||
return folly::unit;
|
||||
}
|
||||
}
|
||||
if (conn_->oneRttWriteCipher) {
|
||||
CHECK(conn_->oneRttWriteHeaderCipher);
|
||||
auto writeLoopBeginTime = Clock::now();
|
||||
auto nonDsrPath = [&](auto limit) {
|
||||
auto nonDsrPath =
|
||||
[&](auto limit) -> folly::Expected<WriteQuicDataResult, QuicError> {
|
||||
auto result = writeQuicDataToSocket(
|
||||
*socket_,
|
||||
*conn_,
|
||||
@ -323,25 +322,26 @@ void QuicServerTransport::writeData() {
|
||||
limit,
|
||||
writeLoopBeginTime);
|
||||
if (result.hasError()) {
|
||||
throw QuicTransportException(
|
||||
result.error().message,
|
||||
*result.error().code.asTransportErrorCode());
|
||||
return folly::makeUnexpected(result.error());
|
||||
}
|
||||
return *result;
|
||||
return result.value();
|
||||
};
|
||||
auto dsrPath = [&](auto limit) {
|
||||
auto dsrPath =
|
||||
[&](auto limit) -> folly::Expected<WriteQuicDataResult, QuicError> {
|
||||
auto bytesBefore = conn_->lossState.totalBytesSent;
|
||||
// The DSR path can't write probes.
|
||||
// This is packetsWritte, probesWritten, bytesWritten.
|
||||
auto dsrResult = writePacketizationRequest(
|
||||
*serverConn_,
|
||||
destConnId,
|
||||
limit,
|
||||
*conn_->oneRttWriteCipher,
|
||||
writeLoopBeginTime);
|
||||
if (dsrResult.hasError()) {
|
||||
return folly::makeUnexpected(dsrResult.error());
|
||||
}
|
||||
auto result = WriteQuicDataResult{
|
||||
writePacketizationRequest(
|
||||
*serverConn_,
|
||||
destConnId,
|
||||
limit,
|
||||
*conn_->oneRttWriteCipher,
|
||||
writeLoopBeginTime),
|
||||
0,
|
||||
conn_->lossState.totalBytesSent - bytesBefore};
|
||||
dsrResult.value(), 0, conn_->lossState.totalBytesSent - bytesBefore};
|
||||
return result;
|
||||
};
|
||||
// We need a while loop because both paths write streams from the same
|
||||
@ -351,14 +351,20 @@ void QuicServerTransport::writeData() {
|
||||
// Give the non-DSR path a chance first for things like ACKs and flow
|
||||
// control.
|
||||
auto written = nonDsrPath(packetLimit);
|
||||
if (written.hasError()) {
|
||||
return folly::makeUnexpected(written.error());
|
||||
}
|
||||
// For both paths we only consider full packets against the packet
|
||||
// limit. While this is slightly more aggressive than the intended
|
||||
// packet limit it also helps ensure that small packets don't cause
|
||||
// us to underutilize the link when mixing between DSR and non-DSR.
|
||||
packetLimit -= written.bytesWritten / conn_->udpSendPacketLen;
|
||||
packetLimit -= written->bytesWritten / conn_->udpSendPacketLen;
|
||||
if (packetLimit && congestionControlWritableBytes(*serverConn_)) {
|
||||
written = dsrPath(packetLimit);
|
||||
packetLimit -= written.bytesWritten / conn_->udpSendPacketLen;
|
||||
auto dsrWritten = dsrPath(packetLimit);
|
||||
if (dsrWritten.hasError()) {
|
||||
return folly::makeUnexpected(dsrWritten.error());
|
||||
}
|
||||
packetLimit -= dsrWritten->bytesWritten / conn_->udpSendPacketLen;
|
||||
}
|
||||
if (totalSentBefore == conn_->lossState.totalBytesSent) {
|
||||
// We haven't written anything with either path, so we're done.
|
||||
@ -366,6 +372,7 @@ void QuicServerTransport::writeData() {
|
||||
}
|
||||
}
|
||||
}
|
||||
return folly::unit;
|
||||
}
|
||||
|
||||
void QuicServerTransport::closeTransport() {
|
||||
@ -444,7 +451,11 @@ void QuicServerTransport::onCryptoEventAvailable() noexcept {
|
||||
maybeNotifyConnectionIdBound();
|
||||
maybeNotifyHandshakeFinished();
|
||||
maybeIssueConnectionIds();
|
||||
writeSocketData();
|
||||
auto writeResult = writeSocketData();
|
||||
if (writeResult.hasError()) {
|
||||
closeImpl(writeResult.error());
|
||||
return;
|
||||
}
|
||||
maybeNotifyTransportReady();
|
||||
} catch (const QuicTransportException& ex) {
|
||||
VLOG(4) << "onCryptoEventAvailable() error " << ex.what() << " " << *this;
|
||||
|
@ -140,7 +140,7 @@ class QuicServerTransport
|
||||
folly::Expected<folly::Unit, QuicError> onReadData(
|
||||
const folly::SocketAddress& peer,
|
||||
ReceivedUdpPacket&& udpPacket) override;
|
||||
void writeData() override;
|
||||
folly::Expected<folly::Unit, QuicError> writeData() override;
|
||||
void closeTransport() override;
|
||||
void unbindConnection() override;
|
||||
bool hasWriteCipher() const override;
|
||||
|
@ -1080,7 +1080,8 @@ void onServerReadDataFromOpen(
|
||||
|
||||
AckedPacketVisitor ackedPacketVisitor =
|
||||
[&](const OutstandingPacketWrapper& outstandingPacket) {
|
||||
maybeVerifyPendingKeyUpdate(conn, outstandingPacket, regularPacket);
|
||||
return maybeVerifyPendingKeyUpdate(
|
||||
conn, outstandingPacket, regularPacket);
|
||||
};
|
||||
AckedFrameVisitor ackedFrameVisitor = [&](const OutstandingPacketWrapper&,
|
||||
const QuicWriteFrame& packetFrame)
|
||||
|
@ -224,7 +224,10 @@ folly::Expected<AckEvent, QuicError> processAckFrame(
|
||||
auto& outstandingPacket = packetWithHandlerContextItr->outstandingPacket;
|
||||
|
||||
// run the ACKed packet visitor
|
||||
ackedPacketVisitor(*outstandingPacket);
|
||||
auto ackedPacketResult = ackedPacketVisitor(*outstandingPacket);
|
||||
if (ackedPacketResult.hasError()) {
|
||||
return folly::makeUnexpected(ackedPacketResult.error());
|
||||
}
|
||||
|
||||
// Update ecn counts
|
||||
incrementEcnCountForAckedPacket(conn, pnSpace);
|
||||
|
@ -19,8 +19,9 @@ using AckVisitor = std::function<void(
|
||||
const QuicWriteFrame&,
|
||||
const ReadAckFrame&)>;
|
||||
|
||||
using AckedPacketVisitor = std::function<void(
|
||||
const OutstandingPacketWrapper&)>; // outstanding packet acked
|
||||
using AckedPacketVisitor =
|
||||
std::function<folly::Expected<folly::Unit, QuicError>(
|
||||
const OutstandingPacketWrapper&)>; // outstanding packet acked
|
||||
|
||||
using AckedFrameVisitor = std::function<folly::Expected<folly::Unit, QuicError>(
|
||||
const OutstandingPacketWrapper&, // outstanding packet acked
|
||||
|
@ -1145,8 +1145,9 @@ TEST_F(QuicOpenStateTest, DSRFullStreamAcked) {
|
||||
size_t len = buf->computeChainDataLength();
|
||||
ASSERT_FALSE(
|
||||
writeDataToQuicStream(*stream, std::move(buf), false).hasError());
|
||||
handleStreamWritten(
|
||||
*conn, *stream, 0, len, false, 1, PacketNumberSpace::AppData);
|
||||
ASSERT_FALSE(handleStreamWritten(
|
||||
*conn, *stream, 0, len, false, 1, PacketNumberSpace::AppData)
|
||||
.hasError());
|
||||
ASSERT_EQ(stream->retransmissionBuffer.size(), 1);
|
||||
ASSERT_FALSE(
|
||||
writeBufMetaToQuicStream(*stream, BufferMeta(1000), true).hasError());
|
||||
|
@ -227,7 +227,7 @@ TEST_P(AckHandlersTest, TestAckMultipleSequentialBlocks) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[&](const auto&, const auto& packetFrame)
|
||||
-> folly::Expected<folly::Unit, QuicError> {
|
||||
auto& stream = *packetFrame.asWriteStreamFrame();
|
||||
@ -290,7 +290,7 @@ TEST_P(AckHandlersTest, TestAckWithECN) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
ackReceiveTime);
|
||||
@ -382,7 +382,7 @@ TEST_P(AckHandlersTest, TestSpuriousLossFullRemoval) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
startTime + 30ms);
|
||||
@ -466,7 +466,7 @@ TEST_P(AckHandlersTest, TestSpuriousLossSplitMiddleRemoval) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
startTime + 30ms);
|
||||
@ -556,7 +556,7 @@ TEST_P(AckHandlersTest, TestSpuriousLossTrimFrontRemoval) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
startTime + 30ms)
|
||||
@ -643,7 +643,7 @@ TEST_P(AckHandlersTest, TestSpuriousLossSplitFrontRemoval) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
startTime + 30ms)
|
||||
@ -721,7 +721,7 @@ TEST_P(AckHandlersTest, TestPacketDestructionAcks) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
Clock::now())
|
||||
@ -815,7 +815,7 @@ TEST_P(AckHandlersTest, TestPacketDestructionSpuriousLoss) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
startTime + 260ms)
|
||||
@ -868,7 +868,7 @@ TEST_P(AckHandlersTest, TestPacketDestructionSpuriousLoss) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame1,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
startTime + 600ms)
|
||||
@ -943,7 +943,7 @@ TEST_P(AckHandlersTest, TestPacketDestructionBigDeque) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
Clock::now())
|
||||
@ -967,7 +967,7 @@ TEST_P(AckHandlersTest, TestPacketDestructionBigDeque) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame1,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
Clock::now())
|
||||
@ -988,7 +988,7 @@ TEST_P(AckHandlersTest, TestPacketDestructionBigDeque) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame2,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
Clock::now())
|
||||
@ -1064,7 +1064,7 @@ TEST_P(AckHandlersTest, TestAckMultipleSequentialBlocksLoss) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[&](const auto&, const auto& packetFrame) {
|
||||
auto& stream = *packetFrame.asWriteStreamFrame();
|
||||
streams.emplace_back(stream);
|
||||
@ -1126,7 +1126,7 @@ TEST_P(AckHandlersTest, TestAckMultipleSequentialBlocksLoss) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](auto&, auto) { return folly::unit; },
|
||||
[](auto&, auto&, auto) { return folly::unit; },
|
||||
Clock::now())
|
||||
@ -1145,7 +1145,7 @@ TEST_P(AckHandlersTest, TestAckMultipleSequentialBlocksLoss) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](auto&, auto) { return folly::unit; },
|
||||
[](auto&, auto&, auto) { return folly::unit; },
|
||||
Clock::now() + 2 * calculatePTO(conn))
|
||||
@ -1223,7 +1223,7 @@ TEST_P(AckHandlersTest, TestAckBlocksWithGaps) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[&](const auto&, const auto& packetFrame) {
|
||||
auto& stream = *packetFrame.asWriteStreamFrame();
|
||||
streams.emplace_back(stream);
|
||||
@ -1360,7 +1360,7 @@ TEST_P(AckHandlersTest, TestNonSequentialPacketNumbers) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[&](const auto&, const auto& packetFrame)
|
||||
-> folly::Expected<folly::Unit, QuicError> {
|
||||
auto& stream = *packetFrame.asWriteStreamFrame();
|
||||
@ -1460,7 +1460,7 @@ TEST_P(AckHandlersTest, AckVisitorForAckTest) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
firstReceivedAck,
|
||||
[](const auto&) {},
|
||||
[](const auto&) { return folly::unit; },
|
||||
[&](const auto& outstandingPacket, const auto& packetFrame)
|
||||
-> folly::Expected<folly::Unit, QuicError> {
|
||||
auto ackedPacketNum =
|
||||
@ -1492,7 +1492,7 @@ TEST_P(AckHandlersTest, AckVisitorForAckTest) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
secondReceivedAck,
|
||||
[](const auto&) {},
|
||||
[](const auto&) { return folly::unit; },
|
||||
[&](const auto&, const auto& packetFrame)
|
||||
-> folly::Expected<folly::Unit, QuicError> {
|
||||
const WriteAckFrame* frame = packetFrame.asWriteAckFrame();
|
||||
@ -1540,7 +1540,7 @@ TEST_P(AckHandlersTest, NoNewAckedPacket) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&,
|
||||
const auto&) -> folly::Expected<folly::Unit, QuicError> {
|
||||
return folly::unit;
|
||||
@ -1566,7 +1566,7 @@ TEST_P(AckHandlersTest, LossByAckedRecovered) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&,
|
||||
const auto&) -> folly::Expected<folly::Unit, QuicError> {
|
||||
return folly::unit;
|
||||
@ -1626,7 +1626,7 @@ TEST_P(AckHandlersTest, AckPacketNumDoesNotExist) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&,
|
||||
const auto&) -> folly::Expected<folly::Unit, QuicError> {
|
||||
return folly::unit;
|
||||
@ -1673,7 +1673,7 @@ TEST_P(AckHandlersTest, TestHandshakeCounterUpdate) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[&](const auto&,
|
||||
const auto&) -> folly::Expected<folly::Unit, QuicError> {
|
||||
return folly::unit;
|
||||
@ -1892,7 +1892,7 @@ TEST_P(AckHandlersTest, NoSkipAckVisitor) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
countingAckVisitor,
|
||||
[](auto& /* conn */, auto& /* packet */, bool /* processed */
|
||||
) { return folly::unit; },
|
||||
@ -1960,7 +1960,7 @@ TEST_P(AckHandlersTest, SkipAckVisitor) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
countingAckVisitor,
|
||||
[](auto& /* conn */, auto& /* packet */, bool /* processed */
|
||||
) { return folly::unit; },
|
||||
@ -2035,7 +2035,7 @@ TEST_P(AckHandlersTest, MultiplePacketProcessors) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[&](const auto&, const auto&) { return folly::unit; },
|
||||
[&](auto&, auto&, bool) { return folly::unit; },
|
||||
Clock::now())
|
||||
@ -2104,7 +2104,7 @@ TEST_P(AckHandlersTest, NoDoubleProcess) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame1,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
countingAckVisitor,
|
||||
[](auto& /* conn */, auto& /* packet */, bool /* processed */
|
||||
) { return folly::unit; },
|
||||
@ -2120,7 +2120,7 @@ TEST_P(AckHandlersTest, NoDoubleProcess) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame2,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
countingAckVisitor,
|
||||
[&](auto&, auto&, bool) { return folly::unit; },
|
||||
Clock::now())
|
||||
@ -2188,7 +2188,7 @@ TEST_P(AckHandlersTest, ClonedPacketsCounter) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
countingAckVisitor,
|
||||
[&](auto&, auto&, bool) { return folly::unit; },
|
||||
Clock::now())
|
||||
@ -2230,7 +2230,7 @@ TEST_P(AckHandlersTest, UpdateMaxAckDelay) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[&](const auto&, const auto&) { return folly::unit; },
|
||||
[&](auto&, auto&, bool) { return folly::unit; },
|
||||
receiveTime)
|
||||
@ -2313,7 +2313,7 @@ TEST_P(AckHandlersTest, AckNotOutstandingButLoss) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
countingAckVisitor,
|
||||
[](auto& /* conn */, auto& /* packet */, bool /* processed */
|
||||
) { return folly::unit; },
|
||||
@ -2359,7 +2359,7 @@ TEST_P(AckHandlersTest, UpdatePendingAckStates) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[&](auto&, auto) { return folly::unit; },
|
||||
[&](auto&, auto&, auto) { return folly::unit; },
|
||||
receiveTime)
|
||||
@ -2488,7 +2488,7 @@ TEST_P(AckHandlersTest, AckEventCreation) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
ackTime);
|
||||
@ -2618,7 +2618,7 @@ TEST_P(AckHandlersTest, AckEventCreationSingleWrite) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
ackTime);
|
||||
@ -2728,7 +2728,7 @@ TEST_P(AckHandlersTest, AckEventCreationNoCongestionController) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
ackTime);
|
||||
@ -2786,7 +2786,7 @@ TEST_P(AckHandlersTest, AckEventReceiveTimestamps) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto& /*outstandingPacket*/, const auto& /*frame*/) {
|
||||
return folly::unit;
|
||||
},
|
||||
@ -2883,7 +2883,7 @@ TEST_P(AckHandlersTest, AckEventReceiveTimestampsGaps) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto& /*outstandingPacket*/, const auto& /*frame*/) {
|
||||
return folly::unit;
|
||||
},
|
||||
@ -2945,7 +2945,7 @@ TEST_P(AckHandlersTest, AckEventReceiveTimestampsDuplicatesAll) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto& /*outstandingPacket*/, const auto& /*frame*/) {
|
||||
return folly::unit;
|
||||
},
|
||||
@ -2975,7 +2975,7 @@ TEST_P(AckHandlersTest, AckEventReceiveTimestampsDuplicatesAll) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto& /*outstandingPacket*/, const auto& /*frame*/) {
|
||||
return folly::unit;
|
||||
},
|
||||
@ -3042,7 +3042,7 @@ TEST_P(AckHandlersTest, AckEventReceiveTimestampsPartialDuplicates) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto& /*outstandingPacket*/, const auto& /*frame*/) {
|
||||
return folly::unit;
|
||||
},
|
||||
@ -3159,7 +3159,7 @@ TEST_P(AckHandlersTest, AckEventReceiveTimestampsOutOfOrderAcks) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto& /*outstandingPacket*/, const auto& /*frame*/) {
|
||||
return folly::unit;
|
||||
},
|
||||
@ -3215,7 +3215,7 @@ TEST_P(AckHandlersTest, AckEventReceiveTimestampsOutOfOrderAcks) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame2,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto& /*outstandingPacket*/, const auto& /*frame*/) {
|
||||
return folly::unit;
|
||||
},
|
||||
@ -3290,7 +3290,7 @@ TEST_P(AckHandlersTest, AckEventReceiveTimestampsMaxCheck) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto& /*outstandingPacket*/, const auto& /*frame*/) {
|
||||
return folly::unit;
|
||||
},
|
||||
@ -3476,7 +3476,7 @@ TEST_P(AckHandlersTest, AckEventCreationInvalidAckDelay) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
ackTime)
|
||||
@ -3577,7 +3577,7 @@ TEST_P(AckHandlersTest, AckEventCreationRttMinusAckDelayIsZero) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
ackTime)
|
||||
@ -3705,7 +3705,7 @@ TEST_P(AckHandlersTest, AckEventCreationReorderingLargestPacketAcked) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
ackTime)
|
||||
@ -3762,7 +3762,7 @@ TEST_P(AckHandlersTest, AckEventCreationReorderingLargestPacketAcked) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
ackTime)
|
||||
@ -3819,7 +3819,7 @@ TEST_P(AckHandlersTest, AckEventCreationReorderingLargestPacketAcked) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
ackTime)
|
||||
@ -3939,7 +3939,7 @@ TEST_P(AckHandlersTest, AckEventCreationNoMatchingPacketDueToLoss) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
ackTime)
|
||||
@ -3965,7 +3965,7 @@ TEST_P(AckHandlersTest, AckEventCreationNoMatchingPacketDueToLoss) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
ackTime)
|
||||
@ -4065,7 +4065,7 @@ TEST_P(AckHandlersTest, ImplictAckEventCreation) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
ackTime)
|
||||
@ -4178,7 +4178,7 @@ TEST_P(AckHandlersTest, ObserverRttSample) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackData.ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
ackData.ackTime)
|
||||
@ -4273,7 +4273,7 @@ TEST_P(AckHandlersTest, ObserverSpuriousLostEventReorderThreshold) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
startTime + 30ms)
|
||||
@ -4373,7 +4373,7 @@ TEST_P(AckHandlersTest, ObserverSpuriousLostEventTimeout) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
startTime + 510ms)
|
||||
@ -4418,7 +4418,7 @@ TEST_P(AckHandlersTest, SubMicrosecondRTT) {
|
||||
conn,
|
||||
GetParam().pnSpace,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[](const auto&, const auto&) { return folly::unit; },
|
||||
[](auto&, auto&, bool) { return folly::unit; },
|
||||
ackReceiveTime)
|
||||
@ -4584,7 +4584,7 @@ class AckEventForAppDataTest : public Test {
|
||||
*conn_,
|
||||
PacketNumberSpace::AppData,
|
||||
ackFrame,
|
||||
[](auto&) {},
|
||||
[](auto&) { return folly::unit; },
|
||||
[&](const OutstandingPacketWrapper& /* packet */,
|
||||
const QuicWriteFrame& packetFrame)
|
||||
-> folly::Expected<folly::Unit, QuicError> {
|
||||
|
Loading…
x
Reference in New Issue
Block a user