1
0
mirror of https://github.com/facebookincubator/mvfst.git synced 2025-04-18 17:24:03 +03:00

Remove exception throwing from the stream manager and flow control.

Summary: I started with the QuicStreamManager, but it turns out that the path from the manager up to the close path touches a LOT, and so this is a big diff. The strategy is basically the same everywhere, add a folly::Expected and check it on every function and enforce that with [[nodiscard]]

Reviewed By: kvtsoy

Differential Revision: D72347215

fbshipit-source-id: 452868b541754d2ecab646d6c3cbd6aacf317d7f
This commit is contained in:
Matt Joras 2025-04-07 23:45:33 -07:00 committed by Facebook GitHub Bot
parent 43af96e4f9
commit 67ce39cfdd
60 changed files with 5534 additions and 3893 deletions

View File

@ -571,7 +571,8 @@ bool RstStreamScheduler::writeRsts(PacketBuilderInterface& builder) {
bool rstWritten = false;
for (const auto& resetStream : conn_.pendingEvents.resets) {
auto streamId = resetStream.first;
QuicStreamState* streamState = conn_.streamManager->getStream(streamId);
QuicStreamState* streamState =
conn_.streamManager->getStream(streamId).value_or(nullptr);
CHECK(streamState) << "Stream " << streamId
<< " not found when going through resets";
if (streamState->pendingWrites.empty() &&
@ -901,10 +902,14 @@ SchedulingResult CloningScheduler::scheduleFramesForPacket(
// Or we can throw away the built packet and send a ping.
// Rebuilder will write the rest of frames
auto rebuildResult = rebuilder.rebuildFromPacket(outstandingPacket);
if (rebuildResult) {
auto rebuildResultExpected = rebuilder.rebuildFromPacket(outstandingPacket);
// TODO handle error better.
if (rebuildResultExpected.hasError()) {
return SchedulingResult(none, none, 0);
}
if (rebuildResultExpected.value()) {
return SchedulingResult(
std::move(rebuildResult),
std::move(rebuildResultExpected.value()),
std::move(*internalBuilder).buildPacket(),
0);
} else if (

View File

@ -124,7 +124,8 @@ folly::Expected<size_t, LocalErrorCode> QuicTransportBase::getStreamWriteOffset(
return folly::makeUnexpected(LocalErrorCode::STREAM_NOT_EXISTS);
}
try {
auto stream = CHECK_NOTNULL(conn_->streamManager->getStream(id));
auto stream =
CHECK_NOTNULL(conn_->streamManager->getStream(id).value_or(nullptr));
return stream->currentWriteOffset;
} catch (const QuicInternalException& ex) {
VLOG(4) << __func__ << " " << ex.what() << " " << *this;
@ -147,7 +148,8 @@ QuicTransportBase::getStreamWriteBufferedBytes(StreamId id) const {
return folly::makeUnexpected(LocalErrorCode::STREAM_NOT_EXISTS);
}
try {
auto stream = CHECK_NOTNULL(conn_->streamManager->getStream(id));
auto stream =
CHECK_NOTNULL(conn_->streamManager->getStream(id).value_or(nullptr));
return stream->pendingWrites.chainLength();
} catch (const QuicInternalException& ex) {
VLOG(4) << __func__ << " " << ex.what() << " " << *this;
@ -179,7 +181,8 @@ QuicTransportBase::getMaxWritableOnStream(StreamId id) const {
return folly::makeUnexpected(LocalErrorCode::INVALID_OPERATION);
}
auto stream = CHECK_NOTNULL(conn_->streamManager->getStream(id));
auto stream =
CHECK_NOTNULL(conn_->streamManager->getStream(id).value_or(nullptr));
return maxWritableOnStream(*stream);
}
@ -204,7 +207,8 @@ QuicTransportBase::setStreamFlowControlWindow(
if (!conn_->streamManager->streamExists(id)) {
return folly::makeUnexpected(LocalErrorCode::STREAM_NOT_EXISTS);
}
auto stream = CHECK_NOTNULL(conn_->streamManager->getStream(id));
auto stream =
CHECK_NOTNULL(conn_->streamManager->getStream(id).value_or(nullptr));
stream->flowControlState.windowSize = windowSize;
maybeSendStreamWindowUpdate(*stream, Clock::now());
updateWriteLooper(true);
@ -347,7 +351,8 @@ folly::Expected<folly::Unit, LocalErrorCode> QuicTransportBase::peek(
if (!conn_->streamManager->streamExists(id)) {
return folly::makeUnexpected(LocalErrorCode::STREAM_NOT_EXISTS);
}
auto stream = CHECK_NOTNULL(conn_->streamManager->getStream(id));
auto stream =
CHECK_NOTNULL(conn_->streamManager->getStream(id).value_or(nullptr));
if (stream->streamReadError) {
switch (stream->streamReadError->type()) {
@ -369,7 +374,8 @@ folly::Expected<folly::Unit, LocalErrorCode> QuicTransportBase::consume(
if (!conn_->streamManager->streamExists(id)) {
return folly::makeUnexpected(LocalErrorCode::STREAM_NOT_EXISTS);
}
auto stream = CHECK_NOTNULL(conn_->streamManager->getStream(id));
auto stream =
CHECK_NOTNULL(conn_->streamManager->getStream(id).value_or(nullptr));
auto result = consume(id, stream->currentReadOffset, amount);
if (result.hasError()) {
return folly::makeUnexpected(result.error().first);
@ -399,7 +405,8 @@ QuicTransportBase::consume(StreamId id, uint64_t offset, size_t amount) {
return folly::makeUnexpected(
ConsumeError{LocalErrorCode::STREAM_NOT_EXISTS, readOffset});
}
auto stream = CHECK_NOTNULL(conn_->streamManager->getStream(id));
auto stream =
CHECK_NOTNULL(conn_->streamManager->getStream(id).value_or(nullptr));
readOffset = stream->currentReadOffset;
if (stream->currentReadOffset != offset) {
return folly::makeUnexpected(
@ -567,7 +574,8 @@ void QuicTransportBase::resetNonControlStreams(
auto readCallbackIt = readCallbacks_.find(id);
if (readCallbackIt != readCallbacks_.end() &&
readCallbackIt->second.readCb) {
auto stream = CHECK_NOTNULL(conn_->streamManager->getStream(id));
auto stream = CHECK_NOTNULL(
conn_->streamManager->getStream(id).value_or(nullptr));
if (!stream->groupId) {
readCallbackIt->second.readCb->readError(
id, QuicError(error, errorMsg.str()));

View File

@ -239,7 +239,7 @@ folly::Expected<folly::Unit, LocalErrorCode> QuicTransportBaseLite::stopSending(
if (!conn_->streamManager->streamExists(id)) {
return folly::makeUnexpected(LocalErrorCode::STREAM_NOT_EXISTS);
}
auto* stream = conn_->streamManager->getStream(id);
auto* stream = conn_->streamManager->getStream(id).value_or(nullptr);
CHECK(stream) << "Invalid stream in " << __func__ << ": " << id;
if (stream->recvState == StreamRecvState::Closed) {
// skip STOP_SENDING if ingress is already closed
@ -299,7 +299,7 @@ QuicSocketLite::WriteResult QuicTransportBaseLite::writeChain(
if (!conn_->streamManager->streamExists(id)) {
return folly::makeUnexpected(LocalErrorCode::STREAM_NOT_EXISTS);
}
auto stream = conn_->streamManager->getStream(id);
auto stream = conn_->streamManager->getStream(id).value_or(nullptr);
CHECK(stream) << "Invalid stream in " << __func__ << ": " << id;
if (!stream->writable()) {
return folly::makeUnexpected(LocalErrorCode::STREAM_CLOSED);
@ -319,7 +319,15 @@ QuicSocketLite::WriteResult QuicTransportBaseLite::writeChain(
wasAppLimitedOrIdle = conn_->congestionController->isAppLimited();
wasAppLimitedOrIdle |= conn_->streamManager->isAppIdle();
}
writeDataToQuicStream(*stream, std::move(data), eof);
auto result = writeDataToQuicStream(*stream, std::move(data), eof);
if (result.hasError()) {
VLOG(4) << __func__ << " streamId=" << id << " " << result.error().message
<< " " << *this;
exceptionCloseWhat_ = result.error().message;
closeImpl(
QuicError(result.error().code, std::string("writeChain() error")));
return folly::makeUnexpected(LocalErrorCode::TRANSPORT_ERROR);
}
// If we were previously app limited restart pacing with the current rate.
if (wasAppLimitedOrIdle && conn_->pacer) {
conn_->pacer->reset();
@ -417,7 +425,8 @@ QuicTransportBaseLite::updateReliableDeliveryCheckpoint(StreamId id) {
if (!conn_->streamManager->streamExists(id)) {
return folly::makeUnexpected(LocalErrorCode::STREAM_NOT_EXISTS);
}
auto stream = CHECK_NOTNULL(conn_->streamManager->getStream(id));
auto stream =
CHECK_NOTNULL(conn_->streamManager->getStream(id).value_or(nullptr));
if (stream->sendState == StreamSendState::ResetSent) {
// We already sent a reset, so there's really no reason why we should be
// doing any more checkpointing, especially since we cannot
@ -482,7 +491,7 @@ QuicTransportBaseLite::notifyPendingWriteOnStream(
if (!conn_->streamManager->streamExists(id)) {
return folly::makeUnexpected(LocalErrorCode::STREAM_NOT_EXISTS);
}
auto stream = conn_->streamManager->getStream(id);
auto stream = conn_->streamManager->getStream(id).value_or(nullptr);
CHECK(stream) << "Invalid stream in " << __func__ << ": " << id;
if (!stream->writable()) {
return folly::makeUnexpected(LocalErrorCode::STREAM_CLOSED);
@ -515,7 +524,8 @@ QuicTransportBaseLite::notifyPendingWriteOnStream(
id, QuicError(LocalErrorCode::STREAM_NOT_EXISTS));
return;
}
auto stream = CHECK_NOTNULL(self->conn_->streamManager->getStream(id));
auto stream = CHECK_NOTNULL(
self->conn_->streamManager->getStream(id).value_or(nullptr));
if (!stream->writable()) {
self->pendingWriteCallbacks_.erase(wcbIt);
writeCallback->onStreamWriteError(
@ -582,7 +592,7 @@ QuicTransportBaseLite::registerByteEventCallback(
}
byteEventMapIt->second.emplace(pos, offset, cb);
}
auto stream = conn_->streamManager->getStream(id);
auto stream = conn_->streamManager->getStream(id).value_or(nullptr);
CHECK(stream) << "Invalid stream in " << __func__ << ": " << id;
// Notify recipients that the registration was successful.
@ -675,7 +685,7 @@ Optional<LocalErrorCode> QuicTransportBaseLite::setControlStream(StreamId id) {
if (!conn_->streamManager->streamExists(id)) {
return LocalErrorCode::STREAM_NOT_EXISTS;
}
auto stream = conn_->streamManager->getStream(id);
auto stream = conn_->streamManager->getStream(id).value_or(nullptr);
CHECK(stream) << "Invalid stream in " << __func__ << ": " << id;
conn_->streamManager->setStreamAsControl(*stream);
return none;
@ -716,9 +726,17 @@ QuicTransportBaseLite::read(StreamId id, size_t maxLen) {
if (!conn_->streamManager->streamExists(id)) {
return folly::makeUnexpected(LocalErrorCode::STREAM_NOT_EXISTS);
}
auto stream = conn_->streamManager->getStream(id);
auto stream = conn_->streamManager->getStream(id).value_or(nullptr);
CHECK(stream) << "Invalid stream in " << __func__ << ": " << id;
auto result = readDataFromQuicStream(*stream, maxLen);
auto readResult = readDataFromQuicStream(*stream, maxLen);
if (readResult.hasError()) {
VLOG(4) << "read() error " << readResult.error().message << " " << *this;
exceptionCloseWhat_ = readResult.error().message;
closeImpl(QuicError(
QuicErrorCode(readResult.error().code), std::string("read() error")));
return folly::makeUnexpected(LocalErrorCode::TRANSPORT_ERROR);
}
auto result = std::move(readResult.value());
if (result.second) {
VLOG(10) << "Delivered eof to app for stream=" << stream->id << " "
<< *this;
@ -891,7 +909,7 @@ QuicTransportBaseLite::getStreamTransportInfo(StreamId id) const {
if (!conn_->streamManager->streamExists(id)) {
return folly::makeUnexpected(LocalErrorCode::STREAM_NOT_EXISTS);
}
auto stream = conn_->streamManager->getStream(id);
auto stream = conn_->streamManager->getStream(id).value_or(nullptr);
CHECK(stream) << "Invalid stream in " << __func__ << ": " << id;
auto packets = getNumPacketsTxWithNewData(*stream);
return StreamTransportInfo{
@ -932,7 +950,7 @@ QuicTransportBaseLite::getStreamFlowControl(StreamId id) const {
if (!conn_->streamManager->streamExists(id)) {
return folly::makeUnexpected(LocalErrorCode::STREAM_NOT_EXISTS);
}
auto stream = conn_->streamManager->getStream(id);
auto stream = conn_->streamManager->getStream(id).value_or(nullptr);
CHECK(stream) << "Invalid stream in " << __func__ << ": " << id;
return QuicSocketLite::FlowControlState(
getSendStreamFlowControlBytesAPI(*stream),
@ -1159,7 +1177,13 @@ void QuicTransportBaseLite::checkForClosedStream() {
if (connCallback_) {
connCallback_->onStreamPreReaped(*itr);
}
conn_->streamManager->removeClosedStream(*itr);
auto result = conn_->streamManager->removeClosedStream(*itr);
if (result.hasError()) {
exceptionCloseWhat_ = result.error().message;
closeImpl(QuicError(
result.error().code, std::string("checkForClosedStream() error")));
return;
}
maybeSendStreamLimitUpdates(*conn_);
if (readCbIt != readCallbacks_.end()) {
readCallbacks_.erase(readCbIt);
@ -1646,7 +1670,7 @@ QuicTransportBaseLite::resetStreamInternal(
if (!conn_->streamManager->streamExists(id)) {
return folly::makeUnexpected(LocalErrorCode::STREAM_NOT_EXISTS);
}
auto stream = conn_->streamManager->getStream(id);
auto stream = conn_->streamManager->getStream(id).value_or(nullptr);
CHECK(stream) << "Invalid stream in " << __func__ << ": " << id;
if (stream->appErrorCodeToPeer &&
*stream->appErrorCodeToPeer != errorCode) {
@ -1669,7 +1693,15 @@ QuicTransportBaseLite::resetStreamInternal(
return folly::makeUnexpected(LocalErrorCode::INVALID_OPERATION);
}
// Invoke state machine
sendRstSMHandler(*stream, errorCode, maybeReliableSize);
auto result = sendRstSMHandler(*stream, errorCode, maybeReliableSize);
if (result.hasError()) {
VLOG(4) << __func__ << " streamId=" << id << " " << result.error().message
<< " " << *this;
exceptionCloseWhat_ = result.error().message;
closeImpl(
QuicError(result.error().code, std::string("resetStream() error")));
return folly::makeUnexpected(LocalErrorCode::TRANSPORT_ERROR);
}
// Cancel all byte events for this stream which have offsets that don't
// need to be reliably delivered.
@ -1910,7 +1942,8 @@ void QuicTransportBaseLite::handleDeliveryCallbacks() {
auto deliverableStreamId = conn_->streamManager->popDeliverable();
while (deliverableStreamId.has_value()) {
auto streamId = *deliverableStreamId;
auto stream = CHECK_NOTNULL(conn_->streamManager->getStream(streamId));
auto stream = CHECK_NOTNULL(
conn_->streamManager->getStream(streamId).value_or(nullptr));
auto maxOffsetToDeliver = getLargestDeliverableOffset(*stream);
if (maxOffsetToDeliver.has_value()) {
@ -1957,7 +1990,8 @@ void QuicTransportBaseLite::handleStreamFlowControlUpdatedCallbacks(
streamStorage = conn_->streamManager->consumeFlowControlUpdated();
const auto& flowControlUpdated = streamStorage;
for (auto streamId : flowControlUpdated) {
auto stream = CHECK_NOTNULL(conn_->streamManager->getStream(streamId));
auto stream = CHECK_NOTNULL(
conn_->streamManager->getStream(streamId).value_or(nullptr));
if (!stream->writable()) {
pendingWriteCallbacks_.erase(streamId);
continue;
@ -1967,7 +2001,8 @@ void QuicTransportBaseLite::handleStreamFlowControlUpdatedCallbacks(
return;
}
// In case the callback modified the stream map, get it again.
stream = CHECK_NOTNULL(conn_->streamManager->getStream(streamId));
stream = CHECK_NOTNULL(
conn_->streamManager->getStream(streamId).value_or(nullptr));
auto maxStreamWritable = maxWritableOnStream(*stream);
if (maxStreamWritable != 0 && !pendingWriteCallbacks_.empty()) {
auto pendingWriteIt = pendingWriteCallbacks_.find(stream->id);
@ -2015,7 +2050,8 @@ void QuicTransportBaseLite::handleConnWritable() {
auto streamId = writeCallbackIt->first;
auto wcb = writeCallbackIt->second;
++writeCallbackIt;
auto stream = CHECK_NOTNULL(conn_->streamManager->getStream(streamId));
auto stream = CHECK_NOTNULL(
conn_->streamManager->getStream(streamId).value_or(nullptr));
if (!stream->writable()) {
pendingWriteCallbacks_.erase(streamId);
continue;
@ -2040,7 +2076,8 @@ void QuicTransportBaseLite::cleanupAckEventState() {
} // memory allocated for vector will be freed
}
WriteQuicDataResult QuicTransportBaseLite::handleInitialWriteDataCommon(
folly::Expected<WriteQuicDataResult, QuicError>
QuicTransportBaseLite::handleInitialWriteDataCommon(
const ConnectionId& srcConnId,
const ConnectionId& dstConnId,
uint64_t packetLimit,
@ -2073,7 +2110,8 @@ WriteQuicDataResult QuicTransportBaseLite::handleInitialWriteDataCommon(
return WriteQuicDataResult{};
}
WriteQuicDataResult QuicTransportBaseLite::handleHandshakeWriteDataCommon(
folly::Expected<WriteQuicDataResult, QuicError>
QuicTransportBaseLite::handleHandshakeWriteDataCommon(
const ConnectionId& srcConnId,
const ConnectionId& dstConnId,
uint64_t packetLimit) {
@ -2176,7 +2214,13 @@ void QuicTransportBaseLite::lossTimeoutExpired() noexcept {
// onLossDetectionAlarm will set packetToSend in pending events
[[maybe_unused]] auto self = sharedGuard();
try {
onLossDetectionAlarm(*conn_, markPacketLoss);
auto result = onLossDetectionAlarm(*conn_, markPacketLoss);
if (result.hasError()) {
closeImpl(QuicError(
result.error().code, std::string("lossTimeoutExpired() error")));
return;
}
if (conn_->qLogger) {
conn_->qLogger->addTransportStateUpdate(kLossTimeoutExpired);
}
@ -2327,7 +2371,8 @@ void QuicTransportBaseLite::cancelAllAppCallbacks(
continue;
}
if (it->second.readCb) {
auto stream = CHECK_NOTNULL(conn_->streamManager->getStream(streamId));
auto stream = CHECK_NOTNULL(
conn_->streamManager->getStream(streamId).value_or(nullptr));
if (!stream->groupId) {
it->second.readCb->readError(streamId, err);
} else {
@ -2530,7 +2575,8 @@ void QuicTransportBaseLite::invokeReadDataAndCallbacks(
continue;
}
auto readCb = callback->second.readCb;
auto stream = CHECK_NOTNULL(conn_->streamManager->getStream(streamId));
auto stream = CHECK_NOTNULL(
conn_->streamManager->getStream(streamId).value_or(nullptr));
if (readCb && stream->streamReadError &&
(!stream->reliableSizeFromPeer ||
*stream->reliableSizeFromPeer <= stream->currentReadOffset)) {
@ -2607,7 +2653,8 @@ void QuicTransportBaseLite::invokePeekDataAndCallbacks() {
continue;
}
auto peekCb = callback->second.peekCb;
auto stream = CHECK_NOTNULL(conn_->streamManager->getStream(streamId));
auto stream = CHECK_NOTNULL(
conn_->streamManager->getStream(streamId).value_or(nullptr));
if (peekCb && stream->streamReadError) {
VLOG(10) << "invoking peek error callbacks on stream=" << streamId << " "
<< *this;
@ -2780,7 +2827,8 @@ void QuicTransportBaseLite::processCallbacksAfterWriteData() {
auto txStreamId = conn_->streamManager->popTx();
while (txStreamId.has_value()) {
auto streamId = *txStreamId;
auto stream = CHECK_NOTNULL(conn_->streamManager->getStream(streamId));
auto stream = CHECK_NOTNULL(
conn_->streamManager->getStream(streamId).value_or(nullptr));
auto largestOffsetTxed = getLargestWriteOffsetTxed(*stream);
// if it's in the set of streams with TX, we should have a valid offset
CHECK(largestOffsetTxed.has_value());
@ -2880,7 +2928,9 @@ void QuicTransportBaseLite::setTransportSettings(
conn_->bufAccessor ||
transportSettings.dataPathType != DataPathType::ContinuousMemory);
conn_->transportSettings = std::move(transportSettings);
conn_->streamManager->refreshTransportSettings(conn_->transportSettings);
auto result = conn_->streamManager->refreshTransportSettings(
conn_->transportSettings);
LOG_IF(FATAL, result.hasError()) << result.error().message;
}
// A few values cannot be overridden to be lower than default:
@ -3291,7 +3341,8 @@ void QuicTransportBaseLite::handleNewGroupedStreams(
const auto& newPeerStreamIds = streamStorage;
for (const auto& streamId : newPeerStreamIds) {
CHECK_NOTNULL(connCallback_.get());
auto stream = CHECK_NOTNULL(conn_->streamManager->getStream(streamId));
auto stream = CHECK_NOTNULL(
conn_->streamManager->getStream(streamId).value_or(nullptr));
CHECK(stream->groupId);
if (isBidirectionalStream(streamId)) {
connCallback_->onNewBidirectionalStreamInGroup(

View File

@ -653,13 +653,15 @@ class QuicTransportBaseLite : virtual public QuicSocketLite,
void handleConnWritable();
void cleanupAckEventState();
WriteQuicDataResult handleInitialWriteDataCommon(
[[nodiscard]] folly::Expected<WriteQuicDataResult, QuicError>
handleInitialWriteDataCommon(
const ConnectionId& srcConnId,
const ConnectionId& dstConnId,
uint64_t packetLimit,
const std::string& token = "");
WriteQuicDataResult handleHandshakeWriteDataCommon(
[[nodiscard]] folly::Expected<WriteQuicDataResult, QuicError>
handleHandshakeWriteDataCommon(
const ConnectionId& srcConnId,
const ConnectionId& dstConnId,
uint64_t packetLimit);

View File

@ -116,7 +116,7 @@ uint64_t maybeUnvalidatedClientWritableBytes(
conn.udpSendPacketLen;
}
WriteQuicDataResult writeQuicDataToSocketImpl(
folly::Expected<WriteQuicDataResult, QuicError> writeQuicDataToSocketImpl(
QuicAsyncUDPSocket& sock,
QuicConnectionStateBase& connection,
const ConnectionId& srcConnId,
@ -165,8 +165,11 @@ WriteQuicDataResult writeQuicDataToSocketImpl(
aead,
headerCipher,
version);
probesWritten = probeResult.probesWritten;
bytesWritten += probeResult.bytesWritten;
if (!probeResult.hasValue()) {
return folly::makeUnexpected(probeResult.error());
}
probesWritten = probeResult->probesWritten;
bytesWritten += probeResult->bytesWritten;
// We only get one chance to write out the probes.
numProbePackets = 0;
packetLimit =
@ -209,8 +212,11 @@ WriteQuicDataResult writeQuicDataToSocketImpl(
headerCipher,
version,
writeLoopBeginTime);
packetsWritten += connectionDataResult.packetsWritten;
bytesWritten += connectionDataResult.bytesWritten;
if (!connectionDataResult.hasValue()) {
return folly::makeUnexpected(connectionDataResult.error());
}
packetsWritten += connectionDataResult->packetsWritten;
bytesWritten += connectionDataResult->bytesWritten;
VLOG_IF(10, packetsWritten || probesWritten)
<< nodeToString(connection.nodeType) << " written data "
<< (exceptCryptoStream ? "without crypto data " : "")
@ -633,7 +639,7 @@ bool handleStreamBufMetaWritten(
return false;
}
void updateConnection(
folly::Expected<folly::Unit, QuicError> updateConnection(
QuicConnectionStateBase& conn,
Optional<ClonedPacketIdentifier> clonedPacketIdentifier,
RegularQuicWritePacket packet,
@ -664,8 +670,12 @@ void updateConnection(
case QuicWriteFrame::Type::WriteStreamFrame: {
const WriteStreamFrame& writeStreamFrame = *frame.asWriteStreamFrame();
retransmittable = true;
auto stream = CHECK_NOTNULL(
conn.streamManager->getStream(writeStreamFrame.streamId));
auto streamResult =
conn.streamManager->getStream(writeStreamFrame.streamId);
if (!streamResult) {
return folly::makeUnexpected(streamResult.error());
}
auto stream = streamResult.value();
bool newStreamDataWritten = false;
if (writeStreamFrame.fromBufMeta) {
newStreamDataWritten = handleStreamBufMetaWritten(
@ -687,7 +697,11 @@ void updateConnection(
packetNumberSpace);
}
if (newStreamDataWritten) {
updateFlowControlOnWriteToSocket(*stream, writeStreamFrame.len);
auto flowControlResult =
updateFlowControlOnWriteToSocket(*stream, writeStreamFrame.len);
if (flowControlResult.hasError()) {
return folly::makeUnexpected(flowControlResult.error());
}
maybeWriteBlockAfterSocketWrite(*stream);
maybeWriteDataBlockedAfterSocketWrite(conn);
conn.streamManager->addTx(writeStreamFrame.streamId);
@ -775,8 +789,12 @@ void updateConnection(
case QuicWriteFrame::Type::MaxStreamDataFrame: {
const MaxStreamDataFrame& maxStreamDataFrame =
*frame.asMaxStreamDataFrame();
auto stream = CHECK_NOTNULL(
conn.streamManager->getStream(maxStreamDataFrame.streamId));
auto streamResult =
conn.streamManager->getStream(maxStreamDataFrame.streamId);
if (streamResult.hasError()) {
return folly::makeUnexpected(streamResult.error());
}
auto stream = streamResult.value();
retransmittable = true;
VLOG(10) << nodeToString(conn.nodeType)
<< " sent packet with window update packetNum=" << packetNum
@ -873,7 +891,7 @@ void updateConnection(
if (!retransmittable && !isPing) {
DCHECK(!clonedPacketIdentifier);
return;
return folly::unit;
}
conn.lossState.totalAckElicitingPacketsSent++;
@ -962,6 +980,7 @@ void updateConnection(
} else {
++conn.outstandings.packetCount[packetNumberSpace];
}
return folly::unit;
}
uint64_t probePacketWritableBytes(QuicConnectionStateBase& conn) {
@ -1045,7 +1064,7 @@ HeaderBuilder ShortHeaderBuilder(ProtectionType keyPhase) {
};
}
WriteQuicDataResult writeCryptoAndAckDataToSocket(
folly::Expected<WriteQuicDataResult, QuicError> writeCryptoAndAckDataToSocket(
QuicAsyncUDPSocket& sock,
QuicConnectionStateBase& connection,
const ConnectionId& srcConnId,
@ -1093,8 +1112,11 @@ WriteQuicDataResult writeCryptoAndAckDataToSocket(
headerCipher,
version,
token);
probesWritten += probeResult.probesWritten;
bytesWritten += probeResult.bytesWritten;
if (probeResult.hasError()) {
return folly::makeUnexpected(probeResult.error());
}
probesWritten += probeResult->probesWritten;
bytesWritten += probeResult->bytesWritten;
}
packetLimit = probesWritten > packetLimit ? 0 : (packetLimit - probesWritten);
// Only get one chance to write probes.
@ -1116,8 +1138,12 @@ WriteQuicDataResult writeCryptoAndAckDataToSocket(
Clock::now(),
token);
packetsWritten += writeResult.packetsWritten;
bytesWritten += writeResult.bytesWritten;
if (writeResult.hasError()) {
return folly::makeUnexpected(writeResult.error());
}
packetsWritten += writeResult->packetsWritten;
bytesWritten += writeResult->bytesWritten;
if (connection.transportSettings.immediatelyRetransmitInitialPackets &&
packetsWritten > 0 && packetsWritten < packetLimit) {
@ -1136,8 +1162,11 @@ WriteQuicDataResult writeCryptoAndAckDataToSocket(
headerCipher,
version,
token);
probesWritten += cloneResult.probesWritten;
bytesWritten += cloneResult.bytesWritten;
if (cloneResult.hasError()) {
return folly::makeUnexpected(cloneResult.error());
}
probesWritten += cloneResult->probesWritten;
bytesWritten += cloneResult->bytesWritten;
}
VLOG_IF(10, packetsWritten || probesWritten)
@ -1149,7 +1178,7 @@ WriteQuicDataResult writeCryptoAndAckDataToSocket(
return result;
}
WriteQuicDataResult writeQuicDataToSocket(
folly::Expected<WriteQuicDataResult, QuicError> writeQuicDataToSocket(
QuicAsyncUDPSocket& sock,
QuicConnectionStateBase& connection,
const ConnectionId& srcConnId,
@ -1172,7 +1201,8 @@ WriteQuicDataResult writeQuicDataToSocket(
writeLoopBeginTime);
}
WriteQuicDataResult writeQuicDataExceptCryptoStreamToSocket(
folly::Expected<WriteQuicDataResult, QuicError>
writeQuicDataExceptCryptoStreamToSocket(
QuicAsyncUDPSocket& socket,
QuicConnectionStateBase& connection,
const ConnectionId& srcConnId,
@ -1194,7 +1224,7 @@ WriteQuicDataResult writeQuicDataExceptCryptoStreamToSocket(
Clock::now());
}
uint64_t writeZeroRttDataToSocket(
folly::Expected<uint64_t, QuicError> writeZeroRttDataToSocket(
QuicAsyncUDPSocket& socket,
QuicConnectionStateBase& connection,
const ConnectionId& srcConnId,
@ -1221,21 +1251,26 @@ uint64_t writeZeroRttDataToSocket(
.blockedFrames()
.simpleFrames())
.build();
auto written = writeConnectionDataToSocket(
socket,
connection,
srcConnId,
dstConnId,
std::move(builder),
LongHeader::typeToPacketNumberSpace(type),
scheduler,
congestionControlWritableBytes,
packetLimit,
aead,
headerCipher,
version,
Clock::now())
.packetsWritten;
auto writeResult = writeConnectionDataToSocket(
socket,
connection,
srcConnId,
dstConnId,
std::move(builder),
LongHeader::typeToPacketNumberSpace(type),
scheduler,
congestionControlWritableBytes,
packetLimit,
aead,
headerCipher,
version,
Clock::now());
if (writeResult.hasError()) {
return folly::makeUnexpected(writeResult.error());
}
auto written = writeResult->packetsWritten;
VLOG_IF(10, written > 0) << nodeToString(connection.nodeType)
<< " written zero rtt data, packets=" << written
<< " " << connection;
@ -1487,7 +1522,7 @@ void encryptPacketHeader(
* network, since currently there is no way to rewind scheduler and connection
* state after the packets have been written to a batch.
*/
WriteQuicDataResult writeConnectionDataToSocket(
folly::Expected<WriteQuicDataResult, QuicError> writeConnectionDataToSocket(
QuicAsyncUDPSocket& sock,
QuicConnectionStateBase& connection,
const ConnectionId& srcConnId,
@ -1516,7 +1551,7 @@ WriteQuicDataResult writeConnectionDataToSocket(
if (connection.loopDetectorCallback) {
connection.writeDebugState.noWriteReason = NoWriteReason::EMPTY_SCHEDULER;
}
return {0, 0, 0};
return WriteQuicDataResult{0, 0, 0};
}
VLOG(10) << nodeToString(connection.nodeType)
@ -1567,7 +1602,7 @@ WriteQuicDataResult writeConnectionDataToSocket(
if (!flushSuccess) {
// Could not flush retried data. Return empty write result and wait for
// next retry.
return {0, 0, 0};
return WriteQuicDataResult{0, 0, 0};
}
}
@ -1631,7 +1666,7 @@ WriteQuicDataResult writeConnectionDataToSocket(
// make sure we flush the buffer in this function.
ioBufBatch.flush();
updateErrnoCount(connection, ioBufBatch);
return {ioBufBatch.getPktSent(), 0, bytesWritten};
return WriteQuicDataResult{ioBufBatch.getPktSent(), 0, bytesWritten};
}
// If we build a packet, we updateConnection(), even if write might have
// been failed. Because if it builds, a lot of states need to be updated no
@ -1645,7 +1680,7 @@ WriteQuicDataResult writeConnectionDataToSocket(
}
auto& result = ret.result;
updateConnection(
auto updateConnResult = updateConnection(
connection,
std::move(result->clonedPacketIdentifier),
std::move(result->packet->packet),
@ -1653,6 +1688,9 @@ WriteQuicDataResult writeConnectionDataToSocket(
folly::to<uint32_t>(ret.encodedSize),
folly::to<uint32_t>(ret.encodedBodySize),
false /* isDSRPacket */);
if (updateConnResult.hasError()) {
return folly::makeUnexpected(updateConnResult.error());
}
// if ioBufBatch.write returns false
// it is because a flush() call failed
@ -1661,7 +1699,7 @@ WriteQuicDataResult writeConnectionDataToSocket(
connection.writeDebugState.noWriteReason =
NoWriteReason::SOCKET_FAILURE;
}
return {ioBufBatch.getPktSent(), 0, bytesWritten};
return WriteQuicDataResult{ioBufBatch.getPktSent(), 0, bytesWritten};
}
if ((connection.transportSettings.batchingMode ==
@ -1687,10 +1725,10 @@ WriteQuicDataResult writeConnectionDataToSocket(
connection.bufAccessor->length() == 0 &&
connection.bufAccessor->headroom() == 0);
}
return {ioBufBatch.getPktSent(), 0, bytesWritten};
return WriteQuicDataResult{ioBufBatch.getPktSent(), 0, bytesWritten};
}
WriteQuicDataResult writeProbingDataToSocket(
folly::Expected<WriteQuicDataResult, QuicError> writeProbingDataToSocket(
QuicAsyncUDPSocket& sock,
QuicConnectionStateBase& connection,
const ConnectionId& srcConnId,
@ -1735,8 +1773,11 @@ WriteQuicDataResult writeProbingDataToSocket(
version,
writeLoopBeginTime,
token);
auto probesWritten = cloningResult.packetsWritten;
auto bytesWritten = cloningResult.bytesWritten;
if (cloningResult.hasError()) {
return folly::makeUnexpected(cloningResult.error());
}
auto probesWritten = cloningResult->packetsWritten;
auto bytesWritten = cloningResult->bytesWritten;
if (probesWritten < probesToSend) {
// If we can use an IMMEDIATE_ACK, that's better than a PING.
auto probeSchedulerBuilder = FrameScheduler::Builder(
@ -1768,13 +1809,16 @@ WriteQuicDataResult writeProbingDataToSocket(
headerCipher,
version,
writeLoopBeginTime);
probesWritten += probingResult.packetsWritten;
bytesWritten += probingResult.bytesWritten;
if (probingResult.hasError()) {
return folly::makeUnexpected(probingResult.error());
}
probesWritten += probingResult->packetsWritten;
bytesWritten += probingResult->bytesWritten;
}
VLOG_IF(10, probesWritten > 0)
<< nodeToString(connection.nodeType)
<< " writing probes using scheduler=CloningScheduler " << connection;
return {0, probesWritten, bytesWritten};
return WriteQuicDataResult{0, probesWritten, bytesWritten};
}
WriteDataReason shouldWriteData(/*const*/ QuicConnectionStateBase& conn) {
@ -1917,7 +1961,7 @@ void implicitAckCryptoStream(
implicitAck.largestAcked = ackBlocks.back().end;
implicitAck.ackBlocks.emplace_back(
ackBlocks.front().start, implicitAck.largestAcked);
processAckFrame(
auto result = processAckFrame(
conn,
packetNumSpace,
implicitAck,
@ -1943,13 +1987,17 @@ void implicitAckCryptoStream(
// our outstanding packets.
}
}
return folly::unit;
},
// We shouldn't mark anything as lost from the implicit ACK, as it should
// be ACKing the entire rangee.
[](auto&, auto&, auto) {
LOG(FATAL) << "Got loss from implicit crypto ACK.";
return folly::unit;
},
implicitAckTime);
// TODO handle error
CHECK(result.hasValue());
// Clear our the loss buffer explicitly. The implicit ACK itself will not
// remove data already in the loss buffer.
auto cryptoStream = getCryptoStream(*conn.cryptoState, encryptionLevel);

View File

@ -89,7 +89,8 @@ struct WriteQuicDataResult {
* Attempts to write data from all frames in the QUIC connection into the UDP
* socket supplied with the aead and the headerCipher.
*/
WriteQuicDataResult writeQuicDataToSocket(
[[nodiscard]] folly::Expected<WriteQuicDataResult, QuicError>
writeQuicDataToSocket(
QuicAsyncUDPSocket& sock,
QuicConnectionStateBase& connection,
const ConnectionId& srcConnId,
@ -105,7 +106,8 @@ WriteQuicDataResult writeQuicDataToSocket(
*
* return the number of packets written to socket.
*/
WriteQuicDataResult writeCryptoAndAckDataToSocket(
[[nodiscard]] folly::Expected<WriteQuicDataResult, QuicError>
writeCryptoAndAckDataToSocket(
QuicAsyncUDPSocket& sock,
QuicConnectionStateBase& connection,
const ConnectionId& srcConnId,
@ -122,7 +124,8 @@ WriteQuicDataResult writeCryptoAndAckDataToSocket(
* This is useful when the crypto stream still needs to be sent in separate
* packets and cannot use the encryption of the data key.
*/
WriteQuicDataResult writeQuicDataExceptCryptoStreamToSocket(
[[nodiscard]] folly::Expected<WriteQuicDataResult, QuicError>
writeQuicDataExceptCryptoStreamToSocket(
QuicAsyncUDPSocket& socket,
QuicConnectionStateBase& connection,
const ConnectionId& srcConnId,
@ -136,7 +139,7 @@ WriteQuicDataResult writeQuicDataExceptCryptoStreamToSocket(
* Writes frame data including zero rtt data to the socket with the supplied
* zero rtt cipher.
*/
uint64_t writeZeroRttDataToSocket(
[[nodiscard]] folly::Expected<uint64_t, QuicError> writeZeroRttDataToSocket(
QuicAsyncUDPSocket& socket,
QuicConnectionStateBase& connection,
const ConnectionId& srcConnId,
@ -199,7 +202,7 @@ bool handleStreamBufMetaWritten(
/**
* Update the connection state after sending a new packet.
*/
void updateConnection(
[[nodiscard]] folly::Expected<folly::Unit, QuicError> updateConnection(
QuicConnectionStateBase& conn,
Optional<ClonedPacketIdentifier> clonedPacketIdentifier,
RegularQuicWritePacket packet,
@ -279,7 +282,8 @@ void encryptPacketHeader(
* data allowed by the writableBytesFunc and will only write a maximum
* number of packetLimit packets at each invocation.
*/
WriteQuicDataResult writeConnectionDataToSocket(
[[nodiscard]] folly::Expected<WriteQuicDataResult, QuicError>
writeConnectionDataToSocket(
QuicAsyncUDPSocket& sock,
QuicConnectionStateBase& connection,
const ConnectionId& srcConnId,
@ -295,7 +299,8 @@ WriteQuicDataResult writeConnectionDataToSocket(
TimePoint writeLoopBeginTime,
const std::string& token = std::string());
WriteQuicDataResult writeProbingDataToSocket(
[[nodiscard]] folly::Expected<WriteQuicDataResult, QuicError>
writeProbingDataToSocket(
QuicAsyncUDPSocket& sock,
QuicConnectionStateBase& connection,
const ConnectionId& srcConnId,

View File

@ -109,7 +109,9 @@ std::unique_ptr<QuicClientConnectionState>
createConn(uint32_t maxStreams, uint64_t maxOffset, uint64_t initialMaxOffset) {
auto conn = std::make_unique<QuicClientConnectionState>(
FizzClientQuicHandshakeContext::Builder().build());
conn->streamManager->setMaxLocalBidirectionalStreams(maxStreams);
auto result =
conn->streamManager->setMaxLocalBidirectionalStreams(maxStreams);
CHECK(!result.hasError()) << "Failed to set max local bidirectional streams";
conn->flowControlState.peerAdvertisedMaxOffset = maxOffset;
conn->flowControlState.peerAdvertisedInitialMaxStreamOffsetBidiRemote =
initialMaxOffset;
@ -157,7 +159,9 @@ WriteStreamFrame writeDataToStream(
auto stream = conn.streamManager->findStream(streamId);
auto length = data.size();
CHECK(stream);
writeDataToQuicStream(*stream, folly::IOBuf::copyBuffer(data), false);
auto result =
writeDataToQuicStream(*stream, folly::IOBuf::copyBuffer(data), false);
CHECK(!result.hasError());
return {streamId, 0, length, false};
}
@ -168,7 +172,8 @@ WriteStreamFrame writeDataToStream(
std::unique_ptr<folly::IOBuf> buf) {
auto stream = conn.streamManager->findStream(streamId);
auto length = buf->computeChainDataLength();
writeDataToQuicStream(*stream, std::move(buf), false);
auto result = writeDataToQuicStream(*stream, std::move(buf), false);
CHECK(!result.hasError());
return {streamId, 0, length, false};
}
@ -521,7 +526,8 @@ TEST_F(QuicPacketSchedulerTest, CryptoWritePartialLossBuffer) {
TEST_F(QuicPacketSchedulerTest, StreamFrameSchedulerExists) {
QuicServerConnectionState conn(
FizzServerQuicHandshakeContext::Builder().build());
conn.streamManager->setMaxLocalBidirectionalStreams(10);
ASSERT_FALSE(
conn.streamManager->setMaxLocalBidirectionalStreams(10).hasError());
auto connId = getTestConnectionId();
auto stream = conn.streamManager->createNextBidirectionalStream().value();
@ -544,7 +550,8 @@ TEST_F(QuicPacketSchedulerTest, StreamFrameSchedulerExists) {
TEST_F(QuicPacketSchedulerTest, StreamFrameNoSpace) {
QuicServerConnectionState conn(
FizzServerQuicHandshakeContext::Builder().build());
conn.streamManager->setMaxLocalBidirectionalStreams(10);
ASSERT_FALSE(
conn.streamManager->setMaxLocalBidirectionalStreams(10).hasError());
auto connId = getTestConnectionId();
auto stream = conn.streamManager->createNextBidirectionalStream().value();
@ -1095,7 +1102,8 @@ TEST_F(QuicPacketSchedulerTest, CloneSchedulerUseNormalSchedulerFirst) {
TEST_F(QuicPacketSchedulerTest, CloneWillGenerateNewWindowUpdate) {
QuicClientConnectionState conn(
FizzClientQuicHandshakeContext::Builder().build());
conn.streamManager->setMaxLocalBidirectionalStreams(10);
auto result = conn.streamManager->setMaxLocalBidirectionalStreams(10);
ASSERT_FALSE(result.hasError());
auto stream = conn.streamManager->createNextBidirectionalStream().value();
FrameScheduler noopScheduler("frame", conn);
CloningScheduler cloningScheduler(noopScheduler, conn, "GiantsShoulder", 0);
@ -1218,7 +1226,8 @@ TEST_F(QuicPacketSchedulerTest, CloningSchedulerWithInplaceBuilder) {
TEST_F(QuicPacketSchedulerTest, CloningSchedulerWithInplaceBuilderFullPacket) {
QuicClientConnectionState conn(
FizzClientQuicHandshakeContext::Builder().build());
conn.streamManager->setMaxLocalBidirectionalStreams(10);
auto streamResult = conn.streamManager->setMaxLocalBidirectionalStreams(10);
ASSERT_FALSE(streamResult.hasError());
conn.flowControlState.peerAdvertisedMaxOffset = 100000;
conn.flowControlState.peerAdvertisedInitialMaxStreamOffsetBidiRemote = 100000;
conn.transportSettings.dataPathType = DataPathType::ContinuousMemory;
@ -1229,7 +1238,8 @@ TEST_F(QuicPacketSchedulerTest, CloningSchedulerWithInplaceBuilderFullPacket) {
conn.bufAccessor = &bufAccessor;
auto stream = *conn.streamManager->createNextBidirectionalStream();
auto inBuf = buildRandomInputData(conn.udpSendPacketLen * 10);
writeDataToQuicStream(*stream, inBuf->clone(), false);
auto writeResult = writeDataToQuicStream(*stream, inBuf->clone(), false);
ASSERT_FALSE(writeResult.hasError());
FrameScheduler scheduler = std::move(FrameScheduler::Builder(
conn,
@ -1254,7 +1264,7 @@ TEST_F(QuicPacketSchedulerTest, CloningSchedulerWithInplaceBuilderFullPacket) {
auto bufferLength = result.packet->header.computeChainDataLength() +
result.packet->body.computeChainDataLength();
EXPECT_EQ(conn.udpSendPacketLen, bufferLength);
updateConnection(
auto updateResult = updateConnection(
conn,
none,
result.packet->packet,
@ -1262,6 +1272,7 @@ TEST_F(QuicPacketSchedulerTest, CloningSchedulerWithInplaceBuilderFullPacket) {
bufferLength,
0,
false /* isDSRPacket */);
ASSERT_FALSE(updateResult.hasError());
buf = bufAccessor.obtain();
ASSERT_EQ(conn.udpSendPacketLen, buf->length());
buf->clear();
@ -1299,12 +1310,14 @@ TEST_F(QuicPacketSchedulerTest, CloneLargerThanOriginalPacket) {
QuicClientConnectionState conn(
FizzClientQuicHandshakeContext::Builder().build());
conn.udpSendPacketLen = 1000;
conn.streamManager->setMaxLocalBidirectionalStreams(10);
auto result = conn.streamManager->setMaxLocalBidirectionalStreams(10);
ASSERT_FALSE(result.hasError());
conn.flowControlState.peerAdvertisedMaxOffset = 100000;
conn.flowControlState.peerAdvertisedInitialMaxStreamOffsetBidiRemote = 100000;
auto stream = conn.streamManager->createNextBidirectionalStream().value();
auto inputData = buildRandomInputData(conn.udpSendPacketLen * 10);
writeDataToQuicStream(*stream, inputData->clone(), false);
auto writeResult = writeDataToQuicStream(*stream, inputData->clone(), false);
ASSERT_FALSE(writeResult.hasError());
FrameScheduler scheduler = std::move(FrameScheduler::Builder(
conn,
EncryptionLevel::AppData,
@ -1327,7 +1340,7 @@ TEST_F(QuicPacketSchedulerTest, CloneLargerThanOriginalPacket) {
auto encodedSize = packetResult.packet->body.computeChainDataLength() +
packetResult.packet->header.computeChainDataLength() + cipherOverhead;
EXPECT_EQ(encodedSize, conn.udpSendPacketLen);
updateConnection(
auto updateResult = updateConnection(
conn,
none,
packetResult.packet->packet,
@ -1335,6 +1348,7 @@ TEST_F(QuicPacketSchedulerTest, CloneLargerThanOriginalPacket) {
encodedSize,
0,
false /* isDSRPacket */);
ASSERT_FALSE(updateResult.hasError());
// make packetNum too larger to be encoded into the same size:
packetNum += 0xFF;
@ -1579,8 +1593,9 @@ TEST_F(
BufferMeta bufMeta(20);
writeDataToStream(conn, stream4, "some data");
writeBufMetaToQuicStream(
*conn.streamManager->findStream(stream4), bufMeta, true /* eof */);
ASSERT_FALSE(writeBufMetaToQuicStream(
*conn.streamManager->findStream(stream4), bufMeta, true)
.hasError());
// Pretend we sent the non DSR data
dsrStream->ackedIntervals.insert(0, dsrStream->writeBuffer.chainLength() - 1);
@ -1861,14 +1876,16 @@ TEST_F(QuicPacketSchedulerTest, HighPriNewDataBeforeLowPriLossData) {
TEST_F(QuicPacketSchedulerTest, WriteLossWithoutFlowControl) {
QuicServerConnectionState conn(
FizzServerQuicHandshakeContext::Builder().build());
conn.streamManager->setMaxLocalBidirectionalStreams(10);
ASSERT_FALSE(
conn.streamManager->setMaxLocalBidirectionalStreams(10).hasError());
conn.flowControlState.peerAdvertisedMaxOffset = 1000;
conn.flowControlState.peerAdvertisedInitialMaxStreamOffsetBidiRemote = 1000;
auto streamId = (*conn.streamManager->createNextBidirectionalStream())->id;
auto stream = conn.streamManager->findStream(streamId);
auto data = buildRandomInputData(1000);
writeDataToQuicStream(*stream, std::move(data), true);
ASSERT_FALSE(
writeDataToQuicStream(*stream, std::move(data), true).hasError());
conn.streamManager->updateWritableStreams(*stream);
StreamFrameScheduler scheduler(conn);
@ -1884,8 +1901,10 @@ TEST_F(QuicPacketSchedulerTest, WriteLossWithoutFlowControl) {
builder1.encodePacketHeader();
scheduler.writeStreams(builder1);
auto packet1 = std::move(builder1).buildPacket().packet;
updateConnection(
conn, none, packet1, Clock::now(), 1000, 0, false /* isDSR */);
ASSERT_FALSE(
updateConnection(
conn, none, packet1, Clock::now(), 1000, 0, false /* isDSR */)
.hasError());
EXPECT_EQ(1, packet1.frames.size());
auto& writeStreamFrame1 = *packet1.frames[0].asWriteStreamFrame();
EXPECT_EQ(streamId, writeStreamFrame1.streamId);
@ -1912,8 +1931,10 @@ TEST_F(QuicPacketSchedulerTest, WriteLossWithoutFlowControl) {
builder2.encodePacketHeader();
scheduler.writeStreams(builder2);
auto packet2 = std::move(builder2).buildPacket().packet;
updateConnection(
conn, none, packet2, Clock::now(), 1000, 0, false /* isDSR */);
ASSERT_FALSE(
updateConnection(
conn, none, packet2, Clock::now(), 1000, 0, false /* isDSR */)
.hasError());
EXPECT_EQ(1, packet2.frames.size());
auto& writeStreamFrame2 = *packet2.frames[0].asWriteStreamFrame();
EXPECT_EQ(streamId, writeStreamFrame2.streamId);
@ -1926,7 +1947,8 @@ TEST_F(QuicPacketSchedulerTest, WriteLossWithoutFlowControl) {
TEST_F(QuicPacketSchedulerTest, WriteLossWithoutFlowControlIgnoreDSR) {
QuicServerConnectionState conn(
FizzServerQuicHandshakeContext::Builder().build());
conn.streamManager->setMaxLocalBidirectionalStreams(10);
ASSERT_FALSE(
conn.streamManager->setMaxLocalBidirectionalStreams(10).hasError());
conn.flowControlState.peerAdvertisedMaxOffset = 1000;
conn.flowControlState.peerAdvertisedInitialMaxStreamOffsetBidiRemote = 1000;
@ -1934,7 +1956,8 @@ TEST_F(QuicPacketSchedulerTest, WriteLossWithoutFlowControlIgnoreDSR) {
auto dsrStream = conn.streamManager->createNextBidirectionalStream().value();
auto stream = conn.streamManager->findStream(streamId);
auto data = buildRandomInputData(1000);
writeDataToQuicStream(*stream, std::move(data), true);
ASSERT_FALSE(
writeDataToQuicStream(*stream, std::move(data), true).hasError());
WriteBufferMeta bufMeta{};
bufMeta.offset = 0;
bufMeta.length = 100;
@ -1956,8 +1979,10 @@ TEST_F(QuicPacketSchedulerTest, WriteLossWithoutFlowControlIgnoreDSR) {
builder1.encodePacketHeader();
scheduler.writeStreams(builder1);
auto packet1 = std::move(builder1).buildPacket().packet;
updateConnection(
conn, none, packet1, Clock::now(), 1000, 0, false /* isDSR */);
ASSERT_FALSE(
updateConnection(
conn, none, packet1, Clock::now(), 1000, 0, false /* isDSR */)
.hasError());
EXPECT_EQ(1, packet1.frames.size());
auto& writeStreamFrame1 = *packet1.frames[0].asWriteStreamFrame();
EXPECT_EQ(streamId, writeStreamFrame1.streamId);
@ -1972,7 +1997,8 @@ TEST_F(QuicPacketSchedulerTest, WriteLossWithoutFlowControlIgnoreDSR) {
TEST_F(QuicPacketSchedulerTest, WriteLossWithoutFlowControlSequential) {
QuicServerConnectionState conn(
FizzServerQuicHandshakeContext::Builder().build());
conn.streamManager->setMaxLocalBidirectionalStreams(10);
ASSERT_FALSE(
conn.streamManager->setMaxLocalBidirectionalStreams(10).hasError());
conn.flowControlState.peerAdvertisedMaxOffset = 1000;
conn.flowControlState.peerAdvertisedInitialMaxStreamOffsetBidiRemote = 1000;
@ -1980,7 +2006,8 @@ TEST_F(QuicPacketSchedulerTest, WriteLossWithoutFlowControlSequential) {
conn.streamManager->setStreamPriority(streamId, Priority(0, false));
auto stream = conn.streamManager->findStream(streamId);
auto data = buildRandomInputData(1000);
writeDataToQuicStream(*stream, std::move(data), true);
ASSERT_FALSE(
writeDataToQuicStream(*stream, std::move(data), true).hasError());
conn.streamManager->updateWritableStreams(*stream);
StreamFrameScheduler scheduler(conn);
@ -1996,8 +2023,10 @@ TEST_F(QuicPacketSchedulerTest, WriteLossWithoutFlowControlSequential) {
builder1.encodePacketHeader();
scheduler.writeStreams(builder1);
auto packet1 = std::move(builder1).buildPacket().packet;
updateConnection(
conn, none, packet1, Clock::now(), 1000, 0, false /* isDSR */);
ASSERT_FALSE(
updateConnection(
conn, none, packet1, Clock::now(), 1000, 0, false /* isDSR */)
.hasError());
EXPECT_EQ(1, packet1.frames.size());
auto& writeStreamFrame1 = *packet1.frames[0].asWriteStreamFrame();
EXPECT_EQ(streamId, writeStreamFrame1.streamId);
@ -2024,8 +2053,10 @@ TEST_F(QuicPacketSchedulerTest, WriteLossWithoutFlowControlSequential) {
builder2.encodePacketHeader();
scheduler.writeStreams(builder2);
auto packet2 = std::move(builder2).buildPacket().packet;
updateConnection(
conn, none, packet2, Clock::now(), 1000, 0, false /* isDSR */);
ASSERT_FALSE(
updateConnection(
conn, none, packet2, Clock::now(), 1000, 0, false /* isDSR */)
.hasError());
EXPECT_EQ(1, packet2.frames.size());
auto& writeStreamFrame2 = *packet2.frames[0].asWriteStreamFrame();
EXPECT_EQ(streamId, writeStreamFrame2.streamId);
@ -2038,7 +2069,8 @@ TEST_F(QuicPacketSchedulerTest, WriteLossWithoutFlowControlSequential) {
TEST_F(QuicPacketSchedulerTest, RunOutFlowControlDuringStreamWrite) {
QuicServerConnectionState conn(
FizzServerQuicHandshakeContext::Builder().build());
conn.streamManager->setMaxLocalBidirectionalStreams(10);
ASSERT_FALSE(
conn.streamManager->setMaxLocalBidirectionalStreams(10).hasError());
conn.flowControlState.peerAdvertisedMaxOffset = 1000;
conn.flowControlState.peerAdvertisedInitialMaxStreamOffsetBidiRemote = 1000;
conn.udpSendPacketLen = 2000;
@ -2048,7 +2080,8 @@ TEST_F(QuicPacketSchedulerTest, RunOutFlowControlDuringStreamWrite) {
auto stream1 = conn.streamManager->findStream(streamId1);
auto stream2 = conn.streamManager->findStream(streamId2);
auto newData = buildRandomInputData(1000);
writeDataToQuicStream(*stream1, std::move(newData), true);
ASSERT_FALSE(
writeDataToQuicStream(*stream1, std::move(newData), true).hasError());
conn.streamManager->updateWritableStreams(*stream1);
// Fake a loss data for stream2:
@ -2069,8 +2102,10 @@ TEST_F(QuicPacketSchedulerTest, RunOutFlowControlDuringStreamWrite) {
builder1.encodePacketHeader();
scheduler.writeStreams(builder1);
auto packet1 = std::move(builder1).buildPacket().packet;
updateConnection(
conn, none, packet1, Clock::now(), 1200, 0, false /* isDSR */);
ASSERT_FALSE(
updateConnection(
conn, none, packet1, Clock::now(), 1200, 0, false /* isDSR */)
.hasError());
ASSERT_EQ(2, packet1.frames.size());
auto& writeStreamFrame1 = *packet1.frames[0].asWriteStreamFrame();
EXPECT_EQ(streamId1, writeStreamFrame1.streamId);
@ -2090,15 +2125,18 @@ TEST_F(QuicPacketSchedulerTest, RunOutFlowControlDuringStreamWrite) {
TEST_F(QuicPacketSchedulerTest, WritingFINFromBufWithBufMetaFirst) {
QuicServerConnectionState conn(
FizzServerQuicHandshakeContext::Builder().build());
conn.streamManager->setMaxLocalBidirectionalStreams(10);
ASSERT_FALSE(
conn.streamManager->setMaxLocalBidirectionalStreams(10).hasError());
conn.flowControlState.peerAdvertisedMaxOffset = 100000;
auto* stream = *(conn.streamManager->createNextBidirectionalStream());
stream->flowControlState.peerAdvertisedMaxOffset = 100000;
writeDataToQuicStream(*stream, folly::IOBuf::copyBuffer("Ascent"), false);
ASSERT_FALSE(
writeDataToQuicStream(*stream, folly::IOBuf::copyBuffer("Ascent"), false)
.hasError());
stream->dsrSender = std::make_unique<MockDSRPacketizationRequestSender>();
BufferMeta bufferMeta(5000);
writeBufMetaToQuicStream(*stream, bufferMeta, true);
ASSERT_FALSE(writeBufMetaToQuicStream(*stream, bufferMeta, true).hasError());
EXPECT_TRUE(stream->finalWriteOffset.hasValue());
stream->writeBufMeta.split(5000);
@ -2131,15 +2169,18 @@ TEST_F(QuicPacketSchedulerTest, WritingFINFromBufWithBufMetaFirst) {
TEST_F(QuicPacketSchedulerTest, NoFINWriteWhenBufMetaWrittenFIN) {
QuicServerConnectionState conn(
FizzServerQuicHandshakeContext::Builder().build());
conn.streamManager->setMaxLocalBidirectionalStreams(10);
ASSERT_FALSE(
conn.streamManager->setMaxLocalBidirectionalStreams(10).hasError());
conn.flowControlState.peerAdvertisedMaxOffset = 100000;
auto* stream = *(conn.streamManager->createNextBidirectionalStream());
stream->flowControlState.peerAdvertisedMaxOffset = 100000;
writeDataToQuicStream(*stream, folly::IOBuf::copyBuffer("Ascent"), false);
ASSERT_FALSE(
writeDataToQuicStream(*stream, folly::IOBuf::copyBuffer("Ascent"), false)
.hasError());
stream->dsrSender = std::make_unique<MockDSRPacketizationRequestSender>();
BufferMeta bufferMeta(5000);
writeBufMetaToQuicStream(*stream, bufferMeta, true);
ASSERT_FALSE(writeBufMetaToQuicStream(*stream, bufferMeta, true).hasError());
EXPECT_TRUE(stream->finalWriteOffset.hasValue());
PacketNum packetNum = 0;
ShortHeader header(
@ -2345,10 +2386,12 @@ TEST_F(QuicPacketSchedulerTest, ShortHeaderFixedPaddingAtStart) {
1000000;
// Create stream and write data
conn.streamManager->setMaxLocalBidirectionalStreams(10);
ASSERT_FALSE(
conn.streamManager->setMaxLocalBidirectionalStreams(10).hasError());
auto stream = conn.streamManager->createNextBidirectionalStream().value();
auto data = buildRandomInputData(50); // Small enough to fit in one packet
writeDataToQuicStream(*stream, std::move(data), false);
ASSERT_FALSE(
writeDataToQuicStream(*stream, std::move(data), false).hasError());
// Set up scheduler and builder
FrameScheduler scheduler = std::move(FrameScheduler::Builder(
@ -2562,13 +2605,14 @@ TEST_F(QuicPacketSchedulerTest, ImmediateAckFrameSchedulerNotRequested) {
TEST_F(QuicPacketSchedulerTest, RstStreamSchedulerReliableReset) {
QuicClientConnectionState conn(
FizzClientQuicHandshakeContext::Builder().build());
conn.streamManager->setMaxLocalBidirectionalStreams(10);
ASSERT_FALSE(
conn.streamManager->setMaxLocalBidirectionalStreams(10).hasError());
conn.flowControlState.peerAdvertisedMaxOffset = 100000;
conn.flowControlState.peerAdvertisedInitialMaxStreamOffsetBidiRemote = 100000;
auto stream = conn.streamManager->createNextBidirectionalStream().value();
auto buf = folly::IOBuf::copyBuffer("cupcake");
auto bufLen = buf->computeChainDataLength();
writeDataToQuicStream(*stream, buf->clone(), false);
ASSERT_FALSE(writeDataToQuicStream(*stream, buf->clone(), false).hasError());
// Reliable reset with reliableSize = bufLen
conn.pendingEvents.resets.emplace(
@ -2596,14 +2640,15 @@ TEST_F(QuicPacketSchedulerTest, RstStreamSchedulerReliableReset) {
std::move(builder1), conn.udpSendPacketLen - cipherOverhead);
auto encodedSize1 = packetResult1.packet->body.computeChainDataLength() +
packetResult1.packet->header.computeChainDataLength() + cipherOverhead;
updateConnection(
conn,
none,
packetResult1.packet->packet,
Clock::now(),
encodedSize1,
0,
false /* isDSRPacket */);
ASSERT_FALSE(updateConnection(
conn,
none,
packetResult1.packet->packet,
Clock::now(),
encodedSize1,
0,
false /* isDSRPacket */)
.hasError());
// We shouldn't send the reliable reset just yet, because we haven't yet
// egressed all the stream data upto the reliable offset.
@ -2622,14 +2667,15 @@ TEST_F(QuicPacketSchedulerTest, RstStreamSchedulerReliableReset) {
std::move(builder2), conn.udpSendPacketLen - cipherOverhead);
auto encodedSize2 = packetResult1.packet->body.computeChainDataLength() +
packetResult2.packet->header.computeChainDataLength() + cipherOverhead;
updateConnection(
conn,
none,
packetResult2.packet->packet,
Clock::now(),
encodedSize2,
0,
false /* isDSRPacket */);
ASSERT_FALSE(updateConnection(
conn,
none,
packetResult2.packet->packet,
Clock::now(),
encodedSize2,
0,
false /* isDSRPacket */)
.hasError());
// Now we should have egressed all the stream data upto the reliable offset,
// so we should have sent the reliable reset.
@ -2700,10 +2746,12 @@ TEST_F(QuicPacketSchedulerTest, FixedShortHeaderPadding) {
1000000;
// Create stream and write data
conn.streamManager->setMaxLocalBidirectionalStreams(10);
ASSERT_FALSE(
conn.streamManager->setMaxLocalBidirectionalStreams(10).hasError());
auto stream = conn.streamManager->createNextBidirectionalStream().value();
auto data = buildRandomInputData(50); // Small enough to fit in one packet
writeDataToQuicStream(*stream, std::move(data), false);
ASSERT_FALSE(
writeDataToQuicStream(*stream, std::move(data), false).hasError());
conn.streamManager->updateWritableStreams(*stream);
// Set up scheduler and builder

View File

@ -299,16 +299,21 @@ class TestQuicTransport
auto type = static_cast<TestFrameType>(cursor.readBE<uint8_t>());
if (type == TestFrameType::CRYPTO) {
auto cryptoBuffer = decodeCryptoBuffer(cursor);
appendDataToReadBuffer(
conn_->cryptoState->initialStream, std::move(cryptoBuffer));
CHECK(!appendDataToReadBuffer(
conn_->cryptoState->initialStream, std::move(cryptoBuffer))
.hasError());
} else if (type == TestFrameType::MAX_STREAMS) {
auto maxStreamsFrame = decodeMaxStreamsFrame(cursor);
if (maxStreamsFrame.isForBidirectionalStream()) {
conn_->streamManager->setMaxLocalBidirectionalStreams(
maxStreamsFrame.maxStreams);
CHECK(
!conn_->streamManager
->setMaxLocalBidirectionalStreams(maxStreamsFrame.maxStreams)
.hasError());
} else {
conn_->streamManager->setMaxLocalUnidirectionalStreams(
maxStreamsFrame.maxStreams);
CHECK(!conn_->streamManager
->setMaxLocalUnidirectionalStreams(
maxStreamsFrame.maxStreams)
.hasError());
}
} else if (type == TestFrameType::DATAGRAM) {
auto buffer = decodeDatagramFrame(cursor);
@ -316,21 +321,30 @@ class TestQuicTransport
handleDatagram(*conn_, frame, udpPacket.timings.receiveTimePoint);
} else if (type == TestFrameType::STREAM_GROUP) {
auto res = decodeStreamGroupBuffer(cursor);
QuicStreamState* stream =
auto streamResult =
conn_->streamManager->getStream(res.id, res.groupId);
if (streamResult.hasError()) {
return folly::makeUnexpected(streamResult.error());
}
QuicStreamState* stream = streamResult.value();
if (!stream) {
continue;
}
appendDataToReadBuffer(*stream, std::move(res.buf));
CHECK(!appendDataToReadBuffer(*stream, std::move(res.buf)).hasError());
conn_->streamManager->updateReadableStreams(*stream);
conn_->streamManager->updatePeekableStreams(*stream);
} else {
auto buffer = decodeStreamBuffer(cursor);
QuicStreamState* stream = conn_->streamManager->getStream(buffer.first);
auto streamResult = conn_->streamManager->getStream(buffer.first);
if (streamResult.hasError()) {
return folly::makeUnexpected(streamResult.error());
}
QuicStreamState* stream = streamResult.value();
if (!stream) {
continue;
}
appendDataToReadBuffer(*stream, std::move(buffer.second));
(void)appendDataToReadBuffer(*stream, std::move(buffer.second))
.hasError();
conn_->streamManager->updateReadableStreams(*stream);
conn_->streamManager->updatePeekableStreams(*stream);
}
@ -339,15 +353,16 @@ class TestQuicTransport
}
void writeData() override {
writeQuicDataToSocket(
*socket_,
*conn_,
*conn_->serverConnectionId,
*conn_->clientConnectionId,
*aead,
*headerCipher,
*conn_->version,
conn_->transportSettings.writeConnectionDataPacketsLimit);
CHECK(!writeQuicDataToSocket(
*socket_,
*conn_,
*conn_->serverConnectionId,
*conn_->clientConnectionId,
*aead,
*headerCipher,
*conn_->version,
conn_->transportSettings.writeConnectionDataPacketsLimit)
.hasError());
}
// This is to expose the protected pacedWriteDataToSocket() function
@ -441,7 +456,9 @@ class TestQuicTransport
}
void addStreamReadError(StreamId id, QuicErrorCode ex) {
QuicStreamState* stream = conn_->streamManager->getStream(id);
auto streamResult = conn_->streamManager->getStream(id);
ASSERT_FALSE(streamResult.hasError());
QuicStreamState* stream = streamResult.value();
stream->streamReadError = ex;
conn_->streamManager->updateReadableStreams(*stream);
conn_->streamManager->updatePeekableStreams(*stream);
@ -457,7 +474,9 @@ class TestQuicTransport
}
void closeStream(StreamId id) {
QuicStreamState* stream = conn_->streamManager->getStream(id);
auto streamResult = conn_->streamManager->getStream(id);
ASSERT_FALSE(streamResult.hasError());
QuicStreamState* stream = streamResult.value();
stream->sendState = StreamSendState::Closed;
stream->recvState = StreamRecvState::Closed;
conn_->streamManager->addClosed(id);
@ -503,7 +522,7 @@ class TestQuicTransport
}
QuicStreamState* getStream(StreamId id) {
return conn_->streamManager->getStream(id);
return conn_->streamManager->getStream(id).value_or(nullptr);
}
void setServerConnectionId() {
@ -637,10 +656,14 @@ class QuicTransportImplTest : public Test {
kDefaultStreamFlowControlWindow;
conn.flowControlState.peerAdvertisedMaxOffset =
kDefaultConnectionFlowControlWindow;
conn.streamManager->setMaxLocalBidirectionalStreams(
kDefaultMaxStreamsBidirectional);
conn.streamManager->setMaxLocalUnidirectionalStreams(
kDefaultMaxStreamsUnidirectional);
CHECK(
!conn.streamManager
->setMaxLocalBidirectionalStreams(kDefaultMaxStreamsBidirectional)
.hasError());
CHECK(!conn.streamManager
->setMaxLocalUnidirectionalStreams(
kDefaultMaxStreamsUnidirectional)
.hasError());
maybeSetNotifyOnNewStreamsExplicitly();
}
@ -706,8 +729,9 @@ TEST_P(QuicTransportImplTestBase, IdleTimeoutExpiredDestroysTransport) {
}
TEST_P(QuicTransportImplTestBase, DelayConnCallback) {
transport->transportConn->streamManager->setMaxLocalBidirectionalStreams(
0, /*force=*/true);
ASSERT_FALSE(transport->transportConn->streamManager
->setMaxLocalBidirectionalStreams(0, /*force=*/true)
.hasError());
transport->setConnectionCallback(nullptr);
transport->addMaxStreamsFrame(
@ -793,7 +817,7 @@ TEST_P(QuicTransportImplTestBase, StopSendingClosesIngress) {
// suppose we tx a rst stream (and rx its corresponding ack), expect
// terminal state and queued in closed streams
transport->resetStream(streamID, GenericApplicationErrorCode::NO_ERROR);
sendRstAckSMHandler(*stream, folly::none);
ASSERT_FALSE(sendRstAckSMHandler(*stream, folly::none).hasError());
EXPECT_TRUE(stream->inTerminalStates());
EXPECT_TRUE(streamManager.closedStreams().contains(streamID));
transport->driveReadCallbacks();
@ -802,8 +826,10 @@ TEST_P(QuicTransportImplTestBase, StopSendingClosesIngress) {
EXPECT_TRUE(streamManager.streamExists(streamID));
EXPECT_CALL(readCb1, readError(streamID, QuicError(unknownErrorCode)))
.Times(1);
receiveRstStreamSMHandler(
*stream, RstStreamFrame(streamID, unknownErrorCode, ingressDataLen));
ASSERT_FALSE(
receiveRstStreamSMHandler(
*stream, RstStreamFrame(streamID, unknownErrorCode, ingressDataLen))
.hasError());
transport->readLooper()->runLoopCallback();
// same test as above, but we tx a rst stream first followed by send stop
@ -827,7 +853,7 @@ TEST_P(QuicTransportImplTestBase, StopSendingClosesIngress) {
// suppose we tx a rst stream (and rx its corresponding ack)
transport->resetStream(streamID, GenericApplicationErrorCode::NO_ERROR);
sendRstAckSMHandler(*stream, folly::none);
ASSERT_FALSE(sendRstAckSMHandler(*stream, folly::none).hasError());
EXPECT_EQ(stream->sendState, StreamSendState::Closed);
EXPECT_EQ(stream->recvState, StreamRecvState::Open);
transport->driveReadCallbacks();
@ -848,8 +874,10 @@ TEST_P(QuicTransportImplTestBase, StopSendingClosesIngress) {
// delivering callback to application
EXPECT_CALL(readCb2, readError(streamID, QuicError(unknownErrorCode)))
.Times(1);
receiveRstStreamSMHandler(
*stream, RstStreamFrame(streamID, unknownErrorCode, ingressDataLen));
ASSERT_FALSE(
receiveRstStreamSMHandler(
*stream, RstStreamFrame(streamID, unknownErrorCode, ingressDataLen))
.hasError());
EXPECT_TRUE(stream->inTerminalStates());
EXPECT_TRUE(streamManager.closedStreams().contains(streamID));
transport->readLooper()->runLoopCallback();
@ -866,9 +894,11 @@ TEST_P(QuicTransportImplTestBase, NoopStopSendingIngressClosed) {
EXPECT_EQ(stream->recvState, StreamRecvState::Open);
// suppose we rx a reset from peer which closes our ingress SM
receiveRstStreamSMHandler(
*stream,
RstStreamFrame(stream->id, GenericApplicationErrorCode::NO_ERROR, 0));
ASSERT_FALSE(
receiveRstStreamSMHandler(
*stream,
RstStreamFrame(stream->id, GenericApplicationErrorCode::NO_ERROR, 0))
.hasError());
EXPECT_EQ(stream->sendState, StreamSendState::Open);
EXPECT_EQ(stream->recvState, StreamRecvState::Closed);
@ -881,14 +911,18 @@ TEST_P(QuicTransportImplTestBase, NoopStopSendingIngressClosed) {
auto nextPeerUniStream =
streamManager.nextAcceptablePeerUnidirectionalStreamId();
EXPECT_TRUE(nextPeerUniStream.has_value());
stream = streamManager.getStream(*nextPeerUniStream);
auto streamResult = streamManager.getStream(*nextPeerUniStream);
ASSERT_FALSE(streamResult.hasError());
stream = streamResult.value();
EXPECT_EQ(stream->sendState, StreamSendState::Invalid);
EXPECT_EQ(stream->recvState, StreamRecvState::Open);
// suppose we rx a reset from peer which closes our ingress SM
receiveRstStreamSMHandler(
*stream,
RstStreamFrame(stream->id, GenericApplicationErrorCode::NO_ERROR, 0));
ASSERT_FALSE(
receiveRstStreamSMHandler(
*stream,
RstStreamFrame(stream->id, GenericApplicationErrorCode::NO_ERROR, 0))
.hasError());
EXPECT_EQ(stream->sendState, StreamSendState::Invalid);
EXPECT_EQ(stream->recvState, StreamRecvState::Closed);
EXPECT_TRUE(stream->inTerminalStates());
@ -984,16 +1018,18 @@ TEST_P(QuicTransportImplTestBase, ReliableResetReadCallback) {
transport->driveReadCallbacks();
// Simulate receiving a reliable reset with a reliableSize of 29
receiveRstStreamSMHandler(
*transport->getStream(stream),
RstStreamFrame(stream, GenericApplicationErrorCode::UNKNOWN, 100, 29));
ASSERT_FALSE(
receiveRstStreamSMHandler(
*transport->getStream(stream),
RstStreamFrame(stream, GenericApplicationErrorCode::UNKNOWN, 100, 29))
.hasError());
// The application hasn't yet read all of the reliable data, so we
// shouldn't fire the readError callback yet.
EXPECT_CALL(readCb, readAvailable(stream));
transport->driveReadCallbacks();
transport->read(stream, 29);
ASSERT_FALSE(transport->read(stream, 29).hasError());
// The application has yet read all of the reliable data, so we should fire
// the readError callback.
@ -1017,7 +1053,9 @@ TEST_P(
auto nextPeerUniStream =
streamManager.nextAcceptablePeerUnidirectionalStreamId();
EXPECT_TRUE(nextPeerUniStream.has_value());
StreamId qpackStream = streamManager.getStream(*nextPeerUniStream)->id;
auto qpackStreamResult = streamManager.getStream(*nextPeerUniStream);
ASSERT_FALSE(qpackStreamResult.hasError());
StreamId qpackStream = qpackStreamResult.value()->id;
auto requestStream = transport->createBidirectionalStream().value();
@ -1667,8 +1705,9 @@ TEST_P(QuicTransportImplTestBase, CreateBothStream) {
}
TEST_P(QuicTransportImplTestBase, CreateStreamLimitsBidirectionalZero) {
transport->transportConn->streamManager->setMaxLocalBidirectionalStreams(
0, true);
ASSERT_FALSE(transport->transportConn->streamManager
->setMaxLocalBidirectionalStreams(0, true)
.hasError());
EXPECT_EQ(transport->getNumOpenableBidirectionalStreams(), 0);
auto result = transport->createBidirectionalStream();
ASSERT_FALSE(result);
@ -1679,8 +1718,9 @@ TEST_P(QuicTransportImplTestBase, CreateStreamLimitsBidirectionalZero) {
}
TEST_P(QuicTransportImplTestBase, CreateStreamLimitsUnidirectionalZero) {
transport->transportConn->streamManager->setMaxLocalUnidirectionalStreams(
0, true);
ASSERT_FALSE(transport->transportConn->streamManager
->setMaxLocalUnidirectionalStreams(0, true)
.hasError());
EXPECT_EQ(transport->getNumOpenableUnidirectionalStreams(), 0);
auto result = transport->createUnidirectionalStream();
ASSERT_FALSE(result);
@ -1691,8 +1731,9 @@ TEST_P(QuicTransportImplTestBase, CreateStreamLimitsUnidirectionalZero) {
}
TEST_P(QuicTransportImplTestBase, CreateStreamLimitsBidirectionalFew) {
transport->transportConn->streamManager->setMaxLocalBidirectionalStreams(
10, true);
ASSERT_FALSE(transport->transportConn->streamManager
->setMaxLocalBidirectionalStreams(10, true)
.hasError());
EXPECT_EQ(transport->getNumOpenableBidirectionalStreams(), 10);
for (int i = 0; i < 10; i++) {
EXPECT_TRUE(transport->createBidirectionalStream());
@ -1706,8 +1747,9 @@ TEST_P(QuicTransportImplTestBase, CreateStreamLimitsBidirectionalFew) {
}
TEST_P(QuicTransportImplTestBase, CreateStreamLimitsUnidirectionalFew) {
transport->transportConn->streamManager->setMaxLocalUnidirectionalStreams(
10, true);
ASSERT_FALSE(transport->transportConn->streamManager
->setMaxLocalUnidirectionalStreams(10, true)
.hasError());
EXPECT_EQ(transport->getNumOpenableUnidirectionalStreams(), 10);
for (int i = 0; i < 10; i++) {
EXPECT_TRUE(transport->createUnidirectionalStream());
@ -1721,8 +1763,9 @@ TEST_P(QuicTransportImplTestBase, CreateStreamLimitsUnidirectionalFew) {
}
TEST_P(QuicTransportImplTestBase, onBidiStreamsAvailableCallback) {
transport->transportConn->streamManager->setMaxLocalBidirectionalStreams(
0, /*force=*/true);
ASSERT_FALSE(transport->transportConn->streamManager
->setMaxLocalBidirectionalStreams(0, /*force=*/true)
.hasError());
EXPECT_CALL(connCallback, onBidirectionalStreamsAvailable(_))
.WillOnce(Invoke([](uint64_t numAvailableStreams) {
@ -1738,8 +1781,9 @@ TEST_P(QuicTransportImplTestBase, onBidiStreamsAvailableCallback) {
}
TEST_P(QuicTransportImplTestBase, onBidiStreamsAvailableCallbackAfterExausted) {
transport->transportConn->streamManager->setMaxLocalBidirectionalStreams(
0, /*force=*/true);
ASSERT_FALSE(transport->transportConn->streamManager
->setMaxLocalBidirectionalStreams(0, /*force=*/true)
.hasError());
EXPECT_CALL(connCallback, onBidirectionalStreamsAvailable(_)).Times(2);
transport->addMaxStreamsFrame(MaxStreamsFrame(
@ -1757,8 +1801,9 @@ TEST_P(QuicTransportImplTestBase, onBidiStreamsAvailableCallbackAfterExausted) {
}
TEST_P(QuicTransportImplTestBase, oneUniStreamsAvailableCallback) {
transport->transportConn->streamManager->setMaxLocalUnidirectionalStreams(
0, /*force=*/true);
ASSERT_FALSE(transport->transportConn->streamManager
->setMaxLocalUnidirectionalStreams(0, /*force=*/true)
.hasError());
EXPECT_CALL(connCallback, onUnidirectionalStreamsAvailable(_))
.WillOnce(Invoke([](uint64_t numAvailableStreams) {
@ -1774,8 +1819,9 @@ TEST_P(QuicTransportImplTestBase, oneUniStreamsAvailableCallback) {
}
TEST_P(QuicTransportImplTestBase, onUniStreamsAvailableCallbackAfterExausted) {
transport->transportConn->streamManager->setMaxLocalUnidirectionalStreams(
0, /*force=*/true);
ASSERT_FALSE(transport->transportConn->streamManager
->setMaxLocalUnidirectionalStreams(0, /*force=*/true)
.hasError());
EXPECT_CALL(connCallback, onUnidirectionalStreamsAvailable(_)).Times(2);
transport->addMaxStreamsFrame(
@ -2149,8 +2195,10 @@ TEST_P(QuicTransportImplTestBase, RegisterTxDeliveryCallbackLowerThanExpected) {
transport->registerDeliveryCallback(stream, 20, &dcb2);
Mock::VerifyAndClearExpectations(&txcb1);
Mock::VerifyAndClearExpectations(&txcb2);
auto streamState = transport->transportConn->streamManager->getStream(stream);
auto streamStateResult =
transport->transportConn->streamManager->getStream(stream);
ASSERT_FALSE(streamStateResult.hasError());
auto streamState = streamStateResult.value();
streamState->currentWriteOffset = 7;
streamState->ackedIntervals.insert(0, 6);
@ -2182,7 +2230,10 @@ TEST_F(
auto stream = transport->createBidirectionalStream().value();
StrictMock<MockByteEventCallback> txcb;
NiceMock<MockDeliveryCallback> dcb;
auto streamState = transport->transportConn->streamManager->getStream(stream);
auto streamStateResult =
transport->transportConn->streamManager->getStream(stream);
ASSERT_FALSE(streamStateResult.hasError());
auto streamState = streamStateResult.value();
streamState->currentWriteOffset = 7;
EXPECT_CALL(txcb, onByteEventRegistered(getTxMatcher(stream, 2)));
@ -2204,7 +2255,10 @@ TEST_P(
StrictMock<MockByteEventCallback> txcb2;
// Set the current write offset to 7.
auto streamState = transport->transportConn->streamManager->getStream(stream);
auto streamStateResult =
transport->transportConn->streamManager->getStream(stream);
ASSERT_FALSE(streamStateResult.hasError());
auto streamState = streamStateResult.value();
streamState->currentWriteOffset = 7;
streamState->ackedIntervals.insert(0, 6);
@ -2240,7 +2294,10 @@ TEST_F(
StrictMock<MockByteEventCallback> txcb2;
// Set the current write offset to 7.
auto streamState = transport->transportConn->streamManager->getStream(stream);
auto streamStateResult =
transport->transportConn->streamManager->getStream(stream);
ASSERT_FALSE(streamStateResult.hasError());
auto streamState = streamStateResult.value();
streamState->currentWriteOffset = 7;
streamState->ackedIntervals.insert(0, 6);
@ -2278,7 +2335,10 @@ TEST_P(
StrictMock<MockByteEventCallback> txcb2;
// Set the current write offset to 7.
auto streamState = transport->transportConn->streamManager->getStream(stream);
auto streamStateResult =
transport->transportConn->streamManager->getStream(stream);
ASSERT_FALSE(streamStateResult.hasError());
auto streamState = streamStateResult.value();
streamState->currentWriteOffset = 7;
streamState->ackedIntervals.insert(0, 6);
@ -2316,7 +2376,10 @@ TEST_P(
StrictMock<MockByteEventCallback> txcb2;
// Set the current write offset to 7.
auto streamState = transport->transportConn->streamManager->getStream(stream);
auto streamStateResult =
transport->transportConn->streamManager->getStream(stream);
ASSERT_FALSE(streamStateResult.hasError());
auto streamState = streamStateResult.value();
streamState->currentWriteOffset = 7;
streamState->ackedIntervals.insert(0, 6);
@ -2352,7 +2415,10 @@ TEST_P(QuicTransportImplTestBase, RegisterDeliveryCallbackAsyncDeliveryTx) {
StrictMock<MockByteEventCallback> txcb2;
// Set the current write offset to 7.
auto streamState = transport->transportConn->streamManager->getStream(stream);
auto streamStateResult =
transport->transportConn->streamManager->getStream(stream);
ASSERT_FALSE(streamStateResult.hasError());
auto streamState = streamStateResult.value();
streamState->currentWriteOffset = 7;
streamState->ackedIntervals.insert(0, 6);
@ -2397,7 +2463,10 @@ TEST_P(QuicTransportImplTestBase, RegisterDeliveryCallbackAsyncDeliveryAck) {
StrictMock<MockByteEventCallback> txcb2;
// Set the current write offset to 7.
auto streamState = transport->transportConn->streamManager->getStream(stream);
auto streamStateResult =
transport->transportConn->streamManager->getStream(stream);
ASSERT_FALSE(streamStateResult.hasError());
auto streamState = streamStateResult.value();
streamState->currentWriteOffset = 7;
streamState->ackedIntervals.insert(0, 6);
@ -3058,8 +3127,10 @@ TEST_P(QuicTransportImplTestBase, TestGracefulCloseWithActiveStream) {
transport->addDataToStream(
stream, StreamBuffer(IOBuf::copyBuffer("hello"), 0, false));
EXPECT_FALSE(transport->transportConn->streamManager->getStream(stream)
->readBuffer.empty());
auto streamResult =
transport->transportConn->streamManager->getStream(stream);
ASSERT_FALSE(streamResult.hasError());
EXPECT_FALSE(streamResult.value()->readBuffer.empty());
// Close the last stream.
// TODO: replace this when we call conn callbacks.
@ -3096,7 +3167,10 @@ TEST_P(QuicTransportImplTestBase, TestGracefulCloseWithNoActiveStream) {
EXPECT_FALSE(transport->registerTxCallback(stream, 4, &txCb).hasError());
// Close the last stream.
auto streamState = transport->transportConn->streamManager->getStream(stream);
auto streamStateResult =
transport->transportConn->streamManager->getStream(stream);
ASSERT_FALSE(streamStateResult.hasError());
auto streamState = streamStateResult.value();
// Fake that the data was TXed and delivered to keep all the state
// consistent.
streamState->currentWriteOffset = 7;
@ -3216,8 +3290,10 @@ TEST_P(QuicTransportImplTestBase, TestImmediateClose) {
transport->addDataToStream(
stream, StreamBuffer(IOBuf::copyBuffer("hello"), 0, false));
EXPECT_EQ(
transport->transportConn->streamManager->getStream(stream), nullptr);
auto streamResult =
transport->transportConn->streamManager->getStream(stream);
ASSERT_FALSE(streamResult.hasError());
EXPECT_EQ(streamResult.value(), nullptr);
qEvb->loopOnce();
EXPECT_EQ(resetCount, 1);
}
@ -3312,7 +3388,10 @@ TEST_P(QuicTransportImplTestBase, GetLocalAddressBadSocket) {
TEST_P(QuicTransportImplTestBase, AsyncStreamFlowControlWrite) {
transport->transportConn->oneRttWriteCipher = test::createNoOpAead();
auto stream = transport->createBidirectionalStream().value();
auto streamState = transport->transportConn->streamManager->getStream(stream);
auto streamStateResult =
transport->transportConn->streamManager->getStream(stream);
ASSERT_FALSE(streamStateResult.hasError());
auto streamState = streamStateResult.value();
transport->setServerConnectionId();
transport->writeLooper()->stop();
streamState->flowControlState.advertisedMaxOffset = 0; // Easier to calculate
@ -4435,8 +4514,9 @@ TEST_P(QuicTransportImplTestWithGroups, ReadCallbackWithGroupsDataAvailable) {
auto transportSettings = transport->getTransportSettings();
transportSettings.advertisedMaxStreamGroups = 16;
transport->setTransportSettings(transportSettings);
transport->getConnectionState().streamManager->refreshTransportSettings(
transportSettings);
ASSERT_FALSE(transport->getConnectionState()
.streamManager->refreshTransportSettings(transportSettings)
.hasError());
auto groupId = transport->createBidirectionalStreamGroup();
EXPECT_TRUE(groupId.hasValue());
@ -4485,8 +4565,9 @@ TEST_P(QuicTransportImplTestWithGroups, ReadErrorCallbackWithGroups) {
auto transportSettings = transport->getTransportSettings();
transportSettings.advertisedMaxStreamGroups = 16;
transport->setTransportSettings(transportSettings);
transport->getConnectionState().streamManager->refreshTransportSettings(
transportSettings);
ASSERT_FALSE(transport->getConnectionState()
.streamManager->refreshTransportSettings(transportSettings)
.hasError());
auto groupId = transport->createBidirectionalStreamGroup();
EXPECT_TRUE(groupId.hasValue());
@ -4514,8 +4595,9 @@ TEST_P(
auto transportSettings = transport->getTransportSettings();
transportSettings.advertisedMaxStreamGroups = 16;
transport->setTransportSettings(transportSettings);
transport->getConnectionState().streamManager->refreshTransportSettings(
transportSettings);
ASSERT_FALSE(transport->getConnectionState()
.streamManager->refreshTransportSettings(transportSettings)
.hasError());
auto groupId = transport->createBidirectionalStreamGroup();
EXPECT_TRUE(groupId.hasValue());
@ -4550,8 +4632,9 @@ TEST_P(QuicTransportImplTestWithGroups, onNewStreamsAndGroupsCallbacks) {
auto transportSettings = transport->getTransportSettings();
transportSettings.advertisedMaxStreamGroups = 16;
transport->setTransportSettings(transportSettings);
transport->getConnectionState().streamManager->refreshTransportSettings(
transportSettings);
ASSERT_FALSE(transport->getConnectionState()
.streamManager->refreshTransportSettings(transportSettings)
.hasError());
auto readData = folly::IOBuf::copyBuffer("actual stream data");
@ -4584,8 +4667,9 @@ TEST_P(
auto transportSettings = transport->getTransportSettings();
transportSettings.advertisedMaxStreamGroups = 16;
transport->setTransportSettings(transportSettings);
transport->getConnectionState().streamManager->refreshTransportSettings(
transportSettings);
ASSERT_FALSE(transport->getConnectionState()
.streamManager->refreshTransportSettings(transportSettings)
.hasError());
const StreamGroupId groupId = 0x00;
const QuicStreamGroupRetransmissionPolicy policy;
@ -4597,8 +4681,9 @@ TEST_P(
// Test policy set not allowed.
transportSettings.advertisedMaxStreamGroups = 0;
transport->setTransportSettings(transportSettings);
transport->getConnectionState().streamManager->refreshTransportSettings(
transportSettings);
ASSERT_FALSE(transport->getConnectionState()
.streamManager->refreshTransportSettings(transportSettings)
.hasError());
res = transport->setStreamGroupRetransmissionPolicy(groupId, policy);
EXPECT_TRUE(res.hasError());
EXPECT_EQ(res.error(), LocalErrorCode::INVALID_OPERATION);
@ -4613,8 +4698,9 @@ TEST_P(
auto transportSettings = transport->getTransportSettings();
transportSettings.advertisedMaxStreamGroups = 16;
transport->setTransportSettings(transportSettings);
transport->getConnectionState().streamManager->refreshTransportSettings(
transportSettings);
ASSERT_FALSE(transport->getConnectionState()
.streamManager->refreshTransportSettings(transportSettings)
.hasError());
const StreamGroupId groupId = 0x00;
QuicStreamGroupRetransmissionPolicy policy;
@ -4649,8 +4735,9 @@ TEST_P(
auto transportSettings = transport->getTransportSettings();
transportSettings.advertisedMaxStreamGroups = 16;
transport->setTransportSettings(transportSettings);
transport->getConnectionState().streamManager->refreshTransportSettings(
transportSettings);
ASSERT_FALSE(transport->getConnectionState()
.streamManager->refreshTransportSettings(transportSettings)
.hasError());
// Add a policy.
const StreamGroupId groupId = 0x00;
@ -4685,8 +4772,9 @@ TEST_P(
auto transportSettings = transport->getTransportSettings();
transportSettings.advertisedMaxStreamGroups = 1;
transport->setTransportSettings(transportSettings);
transport->getConnectionState().streamManager->refreshTransportSettings(
transportSettings);
ASSERT_FALSE(transport->getConnectionState()
.streamManager->refreshTransportSettings(transportSettings)
.hasError());
// Add a policy.
const StreamGroupId groupId = 0x00;
@ -4709,8 +4797,9 @@ TEST_P(QuicTransportImplTestBase, TestUpdateWriteLooperWithWritableCallback) {
auto transportSettings = transport->getTransportSettings();
transportSettings.useSockWritableEvents = true;
transport->setTransportSettings(transportSettings);
transport->getConnectionState().streamManager->refreshTransportSettings(
transportSettings);
ASSERT_FALSE(transport->getConnectionState()
.streamManager->refreshTransportSettings(transportSettings)
.hasError());
EXPECT_CALL(*socketPtr, isWritableCallbackSet()).WillOnce(Return(true));
transport->updateWriteLooper(true /* thisIteration */);
@ -4719,8 +4808,9 @@ TEST_P(QuicTransportImplTestBase, TestUpdateWriteLooperWithWritableCallback) {
transportSettings = transport->getTransportSettings();
transportSettings.useSockWritableEvents = false;
transport->setTransportSettings(transportSettings);
transport->getConnectionState().streamManager->refreshTransportSettings(
transportSettings);
ASSERT_FALSE(transport->getConnectionState()
.streamManager->refreshTransportSettings(transportSettings)
.hasError());
EXPECT_CALL(*socketPtr, isWritableCallbackSet()).Times(0);
transport->updateWriteLooper(true /* thisIteration */);
@ -4734,8 +4824,9 @@ TEST_P(
auto transportSettings = transport->getTransportSettings();
transportSettings.useSockWritableEvents = true;
transport->setTransportSettings(transportSettings);
transport->getConnectionState().streamManager->refreshTransportSettings(
transportSettings);
ASSERT_FALSE(transport->getConnectionState()
.streamManager->refreshTransportSettings(transportSettings)
.hasError());
transport->transportConn->oneRttWriteCipher = test::createNoOpAead();
@ -4772,8 +4863,9 @@ TEST_P(
auto transportSettings = transport->getTransportSettings();
transportSettings.useSockWritableEvents = true;
transport->setTransportSettings(transportSettings);
transport->getConnectionState().streamManager->refreshTransportSettings(
transportSettings);
ASSERT_FALSE(transport->getConnectionState()
.streamManager->refreshTransportSettings(transportSettings)
.hasError());
transport->transportConn->oneRttWriteCipher = test::createNoOpAead();
@ -4797,8 +4889,9 @@ TEST_P(
auto transportSettings = transport->getTransportSettings();
transportSettings.useSockWritableEvents = true;
transport->setTransportSettings(transportSettings);
transport->getConnectionState().streamManager->refreshTransportSettings(
transportSettings);
ASSERT_FALSE(transport->getConnectionState()
.streamManager->refreshTransportSettings(transportSettings)
.hasError());
transport->transportConn->oneRttWriteCipher = test::createNoOpAead();
@ -4832,8 +4925,9 @@ TEST_P(
auto transportSettings = transport->getTransportSettings();
transportSettings.useSockWritableEvents = true;
transport->setTransportSettings(transportSettings);
transport->getConnectionState().streamManager->refreshTransportSettings(
transportSettings);
ASSERT_FALSE(transport->getConnectionState()
.streamManager->refreshTransportSettings(transportSettings)
.hasError());
transport->transportConn->oneRttWriteCipher = test::createNoOpAead();
@ -4871,8 +4965,9 @@ TEST_P(
auto transportSettings = transport->getTransportSettings();
transportSettings.useSockWritableEvents = true;
transport->setTransportSettings(transportSettings);
transport->getConnectionState().streamManager->refreshTransportSettings(
transportSettings);
ASSERT_FALSE(transport->getConnectionState()
.streamManager->refreshTransportSettings(transportSettings)
.hasError());
transport->transportConn->oneRttWriteCipher = test::createNoOpAead();
@ -4909,8 +5004,9 @@ TEST_P(QuicTransportImplTestBase, TestOnSocketWritable) {
auto transportSettings = transport->getTransportSettings();
transportSettings.useSockWritableEvents = true;
transport->setTransportSettings(transportSettings);
transport->getConnectionState().streamManager->refreshTransportSettings(
transportSettings);
ASSERT_FALSE(transport->getConnectionState()
.streamManager->refreshTransportSettings(transportSettings)
.hasError());
// Write looper is not running.
EXPECT_FALSE(transport->writeLooper()->isRunning());
@ -4937,8 +5033,9 @@ TEST_P(
transportSettings.enableWriterBackpressure = true;
transport->setTransportSettings(transportSettings);
transport->getConnectionState().streamManager->refreshTransportSettings(
transportSettings);
ASSERT_FALSE(transport->getConnectionState()
.streamManager->refreshTransportSettings(transportSettings)
.hasError());
transport->transportConn->oneRttWriteCipher = test::createNoOpAead();
@ -5008,8 +5105,9 @@ TEST_P(
auto transportSettings = transport->getTransportSettings();
transportSettings.useSockWritableEvents = true;
transport->setTransportSettings(transportSettings);
transport->getConnectionState().streamManager->refreshTransportSettings(
transportSettings);
ASSERT_FALSE(transport->getConnectionState()
.streamManager->refreshTransportSettings(transportSettings)
.hasError());
transport->transportConn->oneRttWriteCipher = test::createNoOpAead();

File diff suppressed because it is too large Load Diff

View File

@ -102,12 +102,16 @@ class QuicTransportTest : public Test {
kDefaultStreamFlowControlWindow;
transport_->getConnectionState().flowControlState.peerAdvertisedMaxOffset =
kDefaultConnectionFlowControlWindow;
transport_->getConnectionState()
.streamManager->setMaxLocalBidirectionalStreams(
kDefaultMaxStreamsBidirectional);
transport_->getConnectionState()
.streamManager->setMaxLocalUnidirectionalStreams(
kDefaultMaxStreamsUnidirectional);
CHECK(
!transport_->getConnectionState()
.streamManager
->setMaxLocalBidirectionalStreams(kDefaultMaxStreamsBidirectional)
.hasError());
CHECK(!transport_->getConnectionState()
.streamManager
->setMaxLocalUnidirectionalStreams(
kDefaultMaxStreamsUnidirectional)
.hasError());
}
void loopForWrites() {
@ -1425,10 +1429,12 @@ TEST_F(QuicTransportTest, ObserverStreamEventBidirectionalLocalOpenClose) {
StreamDirectionality::Bidirectional,
transport_->getStreamDirectionality(id));
EXPECT_EQ(StreamInitiator::Local, transport_->getStreamInitiator(id));
EXPECT_CALL(*cb1, streamClosed(transport_.get(), streamEventMatcher));
auto stream = CHECK_NOTNULL(
transport_->getConnectionState().streamManager->getStream(id));
auto streamExpected =
transport_->getConnectionState().streamManager->getStream(id);
ASSERT_FALSE(streamExpected.hasError());
auto stream = streamExpected.value();
ASSERT_NE(stream, nullptr);
stream->sendState = StreamSendState::Closed;
stream->recvState = StreamRecvState::Closed;
transport_->getConnectionState().streamManager->addClosed(id);
@ -1463,9 +1469,11 @@ TEST_F(QuicTransportTest, ObserverStreamEventBidirectionalRemoteOpenClose) {
id, StreamInitiator::Remote, StreamDirectionality::Bidirectional);
EXPECT_CALL(*cb1, streamOpened(transport_.get(), streamEventMatcher));
auto stream = CHECK_NOTNULL(
transport_->getConnectionState().streamManager->getStream(id));
EXPECT_THAT(stream, NotNull());
auto streamExpected =
transport_->getConnectionState().streamManager->getStream(id);
ASSERT_FALSE(streamExpected.hasError());
auto stream = streamExpected.value();
ASSERT_NE(stream, nullptr);
EXPECT_EQ(
StreamDirectionality::Bidirectional,
@ -1515,8 +1523,11 @@ TEST_F(QuicTransportTest, ObserverStreamEventUnidirectionalLocalOpenClose) {
EXPECT_EQ(StreamInitiator::Local, transport_->getStreamInitiator(id));
EXPECT_CALL(*cb1, streamClosed(transport_.get(), streamEventMatcher));
auto stream = CHECK_NOTNULL(
transport_->getConnectionState().streamManager->getStream(id));
auto streamExpected =
transport_->getConnectionState().streamManager->getStream(id);
ASSERT_FALSE(streamExpected.hasError());
auto stream = streamExpected.value();
ASSERT_NE(stream, nullptr);
stream->sendState = StreamSendState::Closed;
stream->recvState = StreamRecvState::Closed;
transport_->getConnectionState().streamManager->addClosed(id);
@ -1551,8 +1562,11 @@ TEST_F(QuicTransportTest, ObserverStreamEventUnidirectionalRemoteOpenClose) {
id, StreamInitiator::Remote, StreamDirectionality::Unidirectional);
EXPECT_CALL(*cb1, streamOpened(transport_.get(), streamEventMatcher));
auto stream = CHECK_NOTNULL(
transport_->getConnectionState().streamManager->getStream(id));
auto streamExpected =
transport_->getConnectionState().streamManager->getStream(id);
ASSERT_FALSE(streamExpected.hasError());
auto stream = streamExpected.value();
ASSERT_NE(stream, nullptr);
EXPECT_EQ(
StreamDirectionality::Unidirectional,
@ -1589,7 +1603,11 @@ TEST_F(QuicTransportTest, StreamBidirectionalLocal) {
TEST_F(QuicTransportTest, StreamBidirectionalRemote) {
const auto id = 0x00;
// trigger tracking of new remote stream via getStream()
CHECK_NOTNULL(transport_->getConnectionState().streamManager->getStream(id));
auto streamExpected =
transport_->getConnectionState().streamManager->getStream(id);
ASSERT_FALSE(streamExpected.hasError());
auto stream = streamExpected.value();
ASSERT_NE(stream, nullptr);
EXPECT_EQ(
StreamDirectionality::Bidirectional,
@ -1610,7 +1628,11 @@ TEST_F(QuicTransportTest, StreamUnidirectionalLocal) {
TEST_F(QuicTransportTest, StreamUnidirectionalRemote) {
const auto id = 0x02;
// trigger tracking of new remote stream via getStream()
CHECK_NOTNULL(transport_->getConnectionState().streamManager->getStream(id));
auto streamExpected =
transport_->getConnectionState().streamManager->getStream(id);
ASSERT_FALSE(streamExpected.hasError());
auto stream = streamExpected.value();
ASSERT_NE(stream, nullptr);
EXPECT_EQ(
StreamDirectionality::Unidirectional,
@ -1635,7 +1657,7 @@ TEST_F(QuicTransportTest, WriteSmall) {
dropPackets(conn);
EXPECT_CALL(*socket_, write(_, _, _))
.WillOnce(testing::WithArgs<1, 2>(Invoke(getTotalIovecLen)));
writeQuicDataToSocket(
auto writeRes = writeQuicDataToSocket(
*socket_,
conn,
*conn.clientConnectionId,
@ -1644,6 +1666,7 @@ TEST_F(QuicTransportTest, WriteSmall) {
*conn.oneRttWriteHeaderCipher,
transport_->getVersion(),
conn.transportSettings.writeConnectionDataPacketsLimit);
ASSERT_FALSE(writeRes.hasError());
verifyCorrectness(conn, 0, stream, *buf);
EXPECT_EQ(WriteDataReason::NO_WRITE, shouldWriteData(conn));
@ -1670,7 +1693,7 @@ TEST_F(QuicTransportTest, WriteLarge) {
EXPECT_CALL(*socket_, write(_, _, _))
.Times(NumFullPackets + 1)
.WillRepeatedly(testing::WithArgs<1, 2>(Invoke(getTotalIovecLen)));
writeQuicDataToSocket(
auto writeRes = writeQuicDataToSocket(
*socket_,
conn,
*conn.clientConnectionId,
@ -1679,6 +1702,7 @@ TEST_F(QuicTransportTest, WriteLarge) {
*conn.oneRttWriteHeaderCipher,
transport_->getVersion(),
conn.transportSettings.writeConnectionDataPacketsLimit);
ASSERT_FALSE(writeRes.hasError());
EXPECT_EQ(NumFullPackets + 1, conn.outstandings.packets.size());
verifyCorrectness(conn, 0, stream, *buf);
EXPECT_EQ(WriteDataReason::NO_WRITE, shouldWriteData(conn));
@ -1731,7 +1755,7 @@ TEST_F(QuicTransportTest, WriteMultipleStreams) {
// Should retransmit lost streams in a single packet
EXPECT_CALL(*socket_, write(_, _, _))
.WillOnce(testing::WithArgs<1, 2>(Invoke(getTotalIovecLen)));
writeQuicDataToSocket(
auto writeRes = writeQuicDataToSocket(
*socket_,
conn,
*conn.clientConnectionId,
@ -1740,6 +1764,7 @@ TEST_F(QuicTransportTest, WriteMultipleStreams) {
*conn.oneRttWriteHeaderCipher,
transport_->getVersion(),
conn.transportSettings.writeConnectionDataPacketsLimit);
ASSERT_FALSE(writeRes.hasError());
verifyCorrectness(conn, 0, s1, *buf);
verifyCorrectness(conn, 0, s2, *buf2);
}
@ -1750,7 +1775,9 @@ TEST_F(QuicTransportTest, WriteFlowControl) {
conn.qLogger = mockQLogger;
auto streamId = transport_->createBidirectionalStream().value();
auto stream = conn.streamManager->getStream(streamId);
auto streamResult = conn.streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto& stream = streamResult.value();
stream->flowControlState.peerAdvertisedMaxOffset = 100;
stream->currentWriteOffset = 100;
stream->conn.flowControlState.sumCurWriteOffset = 100;
@ -1796,7 +1823,7 @@ TEST_F(QuicTransportTest, WriteFlowControl) {
conn.streamManager->updateWritableStreams(*stream);
EXPECT_CALL(*socket_, write(_, _, _))
.WillRepeatedly(testing::WithArgs<1, 2>(Invoke(getTotalIovecLen)));
writeQuicDataToSocket(
auto writeRes = writeQuicDataToSocket(
*socket_,
conn,
*conn.clientConnectionId,
@ -1805,6 +1832,7 @@ TEST_F(QuicTransportTest, WriteFlowControl) {
*conn.oneRttWriteHeaderCipher,
transport_->getVersion(),
conn.transportSettings.writeConnectionDataPacketsLimit);
ASSERT_FALSE(writeRes.hasError());
verifyCorrectness(conn, 100, streamId, *buf1, false, false);
// Connection flow controlled
@ -1814,7 +1842,7 @@ TEST_F(QuicTransportTest, WriteFlowControl) {
EXPECT_CALL(*socket_, write(_, _, _))
.Times(2)
.WillRepeatedly(testing::WithArgs<1, 2>(Invoke(getTotalIovecLen)));
writeQuicDataToSocket(
auto writeRes2 = writeQuicDataToSocket(
*socket_,
conn,
*conn.clientConnectionId,
@ -1823,6 +1851,7 @@ TEST_F(QuicTransportTest, WriteFlowControl) {
*conn.oneRttWriteHeaderCipher,
transport_->getVersion(),
conn.transportSettings.writeConnectionDataPacketsLimit);
ASSERT_FALSE(writeRes2.hasError());
auto buf2 = buf->clone();
buf2->trimEnd(30);
verifyCorrectness(conn, 100, streamId, *buf2, false, false);
@ -1846,7 +1875,7 @@ TEST_F(QuicTransportTest, WriteFlowControl) {
// Try again, verify that there should not be any Data blocked frame emitted
// again.
EXPECT_CALL(*socket_, write(_, _, _)).Times(0);
writeQuicDataToSocket(
auto writeRes3 = writeQuicDataToSocket(
*socket_,
conn,
*conn.clientConnectionId,
@ -1855,12 +1884,13 @@ TEST_F(QuicTransportTest, WriteFlowControl) {
*conn.oneRttWriteHeaderCipher,
transport_->getVersion(),
conn.transportSettings.writeConnectionDataPacketsLimit);
ASSERT_FALSE(writeRes3.hasError());
// Flow control lifted
stream->conn.flowControlState.peerAdvertisedMaxOffset = 300;
EXPECT_CALL(*socket_, write(_, _, _))
.WillOnce(testing::WithArgs<1, 2>(Invoke(getTotalIovecLen)));
writeQuicDataToSocket(
auto writeRes4 = writeQuicDataToSocket(
*socket_,
conn,
*conn.clientConnectionId,
@ -1869,6 +1899,7 @@ TEST_F(QuicTransportTest, WriteFlowControl) {
*conn.oneRttWriteHeaderCipher,
transport_->getVersion(),
conn.transportSettings.writeConnectionDataPacketsLimit);
ASSERT_FALSE(writeRes4.hasError());
verifyCorrectness(conn, 100, streamId, *buf, false, false);
}
@ -1914,7 +1945,7 @@ TEST_F(QuicTransportTest, WriteFin) {
dropPackets(conn);
EXPECT_CALL(*socket_, write(_, _, _))
.WillOnce(testing::WithArgs<1, 2>(Invoke(getTotalIovecLen)));
writeQuicDataToSocket(
auto writeRes = writeQuicDataToSocket(
*socket_,
conn,
*conn.clientConnectionId,
@ -1923,6 +1954,7 @@ TEST_F(QuicTransportTest, WriteFin) {
*conn.oneRttWriteHeaderCipher,
transport_->getVersion(),
conn.transportSettings.writeConnectionDataPacketsLimit);
ASSERT_FALSE(writeRes.hasError());
verifyCorrectness(conn, 0, stream, *buf, true);
EXPECT_EQ(WriteDataReason::NO_WRITE, shouldWriteData(conn));
}
@ -1945,7 +1977,7 @@ TEST_F(QuicTransportTest, WriteOnlyFin) {
dropPackets(conn);
EXPECT_CALL(*socket_, write(_, _, _))
.WillOnce(testing::WithArgs<1, 2>(Invoke(getTotalIovecLen)));
writeQuicDataToSocket(
auto writeRes = writeQuicDataToSocket(
*socket_,
conn,
*conn.clientConnectionId,
@ -1954,6 +1986,7 @@ TEST_F(QuicTransportTest, WriteOnlyFin) {
*conn.oneRttWriteHeaderCipher,
transport_->getVersion(),
conn.transportSettings.writeConnectionDataPacketsLimit);
ASSERT_FALSE(writeRes.hasError());
verifyCorrectness(conn, 0, stream, *buf, true);
EXPECT_EQ(WriteDataReason::NO_WRITE, shouldWriteData(conn));
}
@ -1989,7 +2022,7 @@ TEST_F(QuicTransportTest, WriteImmediateAcks) {
addAckStatesWithCurrentTimestamps(conn.ackStates.appDataAckState, start, end);
EXPECT_CALL(*socket_, write(_, _, _))
.WillOnce(testing::WithArgs<1, 2>(Invoke(getTotalIovecLen)));
writeQuicDataToSocket(
auto writeRes = writeQuicDataToSocket(
*socket_,
conn,
*conn.clientConnectionId,
@ -1998,6 +2031,7 @@ TEST_F(QuicTransportTest, WriteImmediateAcks) {
*conn.oneRttWriteHeaderCipher,
transport_->getVersion(),
conn.transportSettings.writeConnectionDataPacketsLimit);
ASSERT_FALSE(writeRes.hasError());
EXPECT_TRUE(conn.outstandings.packets.empty());
EXPECT_EQ(conn.ackStates.appDataAckState.largestAckScheduled, end);
EXPECT_FALSE(conn.ackStates.appDataAckState.needsToSendAckImmediately);
@ -2322,7 +2356,8 @@ TEST_F(QuicTransportTest, RstStreamReliably) {
for (auto& frame : packet.packet.frames) {
auto maybeWriteStreamFrame = frame.asWriteStreamFrame();
if (maybeWriteStreamFrame) {
sendAckSMHandler(*stream, *maybeWriteStreamFrame);
auto ackResult = sendAckSMHandler(*stream, *maybeWriteStreamFrame);
ASSERT_FALSE(ackResult.hasError());
}
}
}
@ -2332,7 +2367,8 @@ TEST_F(QuicTransportTest, RstStreamReliably) {
EXPECT_EQ(stream->sendState, StreamSendState::ResetSent);
// ACK the reliable reset
sendRstAckSMHandler(*stream, 12);
auto rstAckResult = sendRstAckSMHandler(*stream, 12);
ASSERT_FALSE(rstAckResult.hasError());
// We should transition to Closed because the peer has ACKed the reliable
// reset, and has also ACKed all of the reliable bytes.
@ -2651,7 +2687,8 @@ TEST_F(QuicTransportTest, ResendPathChallengeOnLoss) {
->packet;
EXPECT_FALSE(conn.pendingEvents.pathChallenge);
markPacketLoss(conn, packet, false);
auto result = markPacketLoss(conn, packet, false);
ASSERT_FALSE(result.hasError());
EXPECT_EQ(*conn.pendingEvents.pathChallenge, pathChallenge);
}
@ -2676,7 +2713,8 @@ TEST_F(QuicTransportTest, DoNotResendLostPathChallengeIfNotOutstanding) {
transport_->getPathValidationTimeout().timeoutExpired();
EXPECT_FALSE(conn.pendingEvents.pathChallenge);
markPacketLoss(conn, packet, false);
auto result = markPacketLoss(conn, packet, false);
ASSERT_FALSE(result.hasError());
EXPECT_FALSE(conn.pendingEvents.pathChallenge);
}
@ -2717,7 +2755,9 @@ TEST_F(QuicTransportTest, CloneAfterRecvReset) {
transport_->writeChain(streamId, IOBuf::create(0), true);
loopForWrites();
EXPECT_EQ(1, conn.outstandings.packets.size());
auto stream = conn.streamManager->getStream(streamId);
auto streamResult = conn.streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto& stream = streamResult.value();
EXPECT_EQ(1, stream->retransmissionBuffer.size());
EXPECT_EQ(0, stream->retransmissionBuffer.at(0)->data.chainLength());
EXPECT_TRUE(stream->retransmissionBuffer.at(0)->eof);
@ -2727,7 +2767,8 @@ TEST_F(QuicTransportTest, CloneAfterRecvReset) {
EXPECT_EQ(0, *stream->finalWriteOffset);
RstStreamFrame rstFrame(streamId, GenericApplicationErrorCode::UNKNOWN, 0);
receiveRstStreamSMHandler(*stream, std::move(rstFrame));
ASSERT_FALSE(
receiveRstStreamSMHandler(*stream, std::move(rstFrame)).hasError());
// This will clone twice. :/ Maybe we should change this to clone only once in
// the future, thus the EXPECT were written with LT and LE. But it will clone
@ -2795,7 +2836,8 @@ TEST_F(QuicTransportTest, ResendPathResponseOnLoss) {
EXPECT_EQ(numPathResponseFrames, 1);
// pathResponseFrame should be queued for re-tx on packet loss
markPacketLoss(conn, packet, false);
auto result = markPacketLoss(conn, packet, false);
ASSERT_FALSE(result.hasError());
EXPECT_EQ(conn.pendingEvents.frames.size(), 1);
numPathResponseFrames = std::count_if(
conn.pendingEvents.frames.begin(),
@ -2936,7 +2978,8 @@ TEST_F(QuicTransportTest, ResendNewConnectionIdOnLoss) {
->packet;
EXPECT_TRUE(conn.pendingEvents.frames.empty());
markPacketLoss(conn, packet, false);
auto result = markPacketLoss(conn, packet, false);
ASSERT_FALSE(result.hasError());
EXPECT_EQ(conn.pendingEvents.frames.size(), 1);
NewConnectionIdFrame* connIdFrame =
conn.pendingEvents.frames.front().asNewConnectionIdFrame();
@ -3021,7 +3064,8 @@ TEST_F(QuicTransportTest, ResendRetireConnectionIdOnLoss) {
->packet;
EXPECT_TRUE(conn.pendingEvents.frames.empty());
markPacketLoss(conn, packet, false);
auto result = markPacketLoss(conn, packet, false);
ASSERT_FALSE(result.hasError());
EXPECT_EQ(conn.pendingEvents.frames.size(), 1);
RetireConnectionIdFrame* retireFrame =
conn.pendingEvents.frames.front().asRetireConnectionIdFrame();
@ -3035,7 +3079,9 @@ TEST_F(QuicTransportTest, NonWritableStreamAPI) {
EXPECT_CALL(*socket_, write(_, _, _))
.WillRepeatedly(testing::WithArgs<1, 2>(Invoke(getTotalIovecLen)));
auto& conn = transport_->getConnectionState();
auto streamState = conn.streamManager->getStream(streamId);
auto streamResult = conn.streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto& streamState = streamResult.value();
// write EOF
transport_->writeChain(streamId, buf->clone(), true);
@ -3244,7 +3290,8 @@ TEST_F(QuicTransportTest, WriteAckNotSetLossAlarm) {
*conn.oneRttWriteHeaderCipher,
transport_->getVersion(),
conn.transportSettings.writeConnectionDataPacketsLimit);
EXPECT_EQ(1, res.packetsWritten); // Write one packet out
ASSERT_FALSE(res.hasError());
EXPECT_EQ(1, res->packetsWritten); // Write one packet out
EXPECT_FALSE(transport_->isLossTimeoutScheduled()); // no alarm scheduled
}
@ -3264,7 +3311,8 @@ TEST_F(QuicTransportTest, WriteWindowUpdate) {
*conn.oneRttWriteHeaderCipher,
transport_->getVersion(),
conn.transportSettings.writeConnectionDataPacketsLimit);
EXPECT_EQ(1, res.packetsWritten); // Write one packet out
ASSERT_FALSE(res.hasError());
EXPECT_EQ(1, res->packetsWritten); // Write one packet out
EXPECT_EQ(1, conn.outstandings.packets.size());
auto packet =
getLastOutstandingPacket(conn, PacketNumberSpace::AppData)->packet;
@ -3280,12 +3328,12 @@ TEST_F(QuicTransportTest, WriteWindowUpdate) {
}
EXPECT_TRUE(connWindowFound);
EXPECT_EQ(conn.flowControlState.advertisedMaxOffset, 100);
conn.outstandings.reset();
auto stream = transport_->createBidirectionalStream().value();
auto streamState = conn.streamManager->getStream(stream);
auto streamResult = conn.streamManager->getStream(stream);
ASSERT_FALSE(streamResult.hasError());
auto& streamState = streamResult.value();
streamState->flowControlState.windowSize = 100;
streamState->flowControlState.advertisedMaxOffset = 0;
MaxStreamDataFrame frame(stream, 100);
@ -3301,7 +3349,8 @@ TEST_F(QuicTransportTest, WriteWindowUpdate) {
*conn.oneRttWriteHeaderCipher,
transport_->getVersion(),
conn.transportSettings.writeConnectionDataPacketsLimit);
EXPECT_EQ(1, res.packetsWritten); // Write one packet out
ASSERT_FALSE(res.hasError());
EXPECT_EQ(1, res->packetsWritten); // Write one packet out
EXPECT_EQ(1, conn.outstandings.packets.size());
auto packet1 =
getLastOutstandingPacket(conn, PacketNumberSpace::AppData)->packet;
@ -3314,14 +3363,16 @@ TEST_F(QuicTransportTest, FlowControlCallbacks) {
auto stream = transport_->createBidirectionalStream().value();
auto stream2 = transport_->createBidirectionalStream().value();
auto& conn = transport_->getConnectionState();
auto streamState = conn.streamManager->getStream(stream);
auto streamState1 = conn.streamManager->getStream(stream);
ASSERT_FALSE(streamState1.hasError());
auto streamState2 = conn.streamManager->getStream(stream2);
ASSERT_FALSE(streamState2.hasError());
conn.streamManager->queueFlowControlUpdated(streamState->id);
conn.streamManager->queueFlowControlUpdated(streamState2->id);
EXPECT_CALL(connCallback_, onFlowControlUpdate(streamState->id));
conn.streamManager->queueFlowControlUpdated(streamState1.value()->id);
conn.streamManager->queueFlowControlUpdated(streamState2.value()->id);
EXPECT_CALL(connCallback_, onFlowControlUpdate(streamState1.value()->id));
// We should be able to create streams from this callback.
EXPECT_CALL(connCallback_, onFlowControlUpdate(streamState2->id))
EXPECT_CALL(connCallback_, onFlowControlUpdate(streamState2.value()->id))
.WillOnce(Invoke([&](auto) { transport_->createBidirectionalStream(); }));
transport_->onNetworkData(
SocketAddress("::1", 10000),
@ -3352,7 +3403,9 @@ TEST_F(QuicTransportTest, DeliveryCallbackClosesTransportOnDelivered) {
loopForWrites();
auto& conn = transport_->getConnectionState();
auto streamState = conn.streamManager->getStream(stream1);
auto streamResult = conn.streamManager->getStream(stream1);
ASSERT_FALSE(streamResult.hasError());
auto streamState = streamResult.value();
conn.streamManager->addDeliverable(stream1);
folly::SocketAddress addr;
NetworkData emptyData;
@ -3372,7 +3425,9 @@ TEST_F(QuicTransportTest, InvokeDeliveryCallbacksNothingDelivered) {
loopForWrites();
auto& conn = transport_->getConnectionState();
auto streamState = conn.streamManager->getStream(stream);
auto streamResult = conn.streamManager->getStream(stream);
ASSERT_FALSE(streamResult.hasError());
auto streamState = streamResult.value();
folly::SocketAddress addr;
NetworkData emptyData;
@ -3410,7 +3465,9 @@ TEST_F(QuicTransportTest, InvokeDeliveryCallbacksAllDelivered) {
// Faking a delivery:
conn.streamManager->addDeliverable(stream);
conn.lossState.srtt = 100us;
auto streamState = conn.streamManager->getStream(stream);
auto streamResult = conn.streamManager->getStream(stream);
ASSERT_FALSE(streamResult.hasError());
auto streamState = streamResult.value();
streamState->retransmissionBuffer.clear();
streamState->ackedIntervals.insert(0, 1);
@ -3436,7 +3493,9 @@ TEST_F(QuicTransportTest, InvokeDeliveryCallbacksPartialDelivered) {
// Faking a delivery:
conn.streamManager->addDeliverable(stream);
conn.lossState.srtt = 100us;
auto streamState = conn.streamManager->getStream(stream);
auto streamResult = conn.streamManager->getStream(stream);
ASSERT_FALSE(streamResult.hasError());
auto streamState = streamResult.value();
streamState->retransmissionBuffer.clear();
folly::SocketAddress addr;
@ -3478,7 +3537,9 @@ TEST_F(QuicTransportTest, InvokeDeliveryCallbacksRetxBuffer) {
// Faking a delivery and retx:
conn.streamManager->addDeliverable(stream);
conn.lossState.srtt = 100us;
auto streamState = conn.streamManager->getStream(stream);
auto streamResult = conn.streamManager->getStream(stream);
ASSERT_FALSE(streamResult.hasError());
auto streamState = streamResult.value();
streamState->retransmissionBuffer.clear();
auto retxBufferData = folly::IOBuf::copyBuffer("But i'm not delivered yet");
streamState->retransmissionBuffer.emplace(
@ -3527,7 +3588,9 @@ TEST_F(QuicTransportTest, InvokeDeliveryCallbacksLossAndRetxBuffer) {
// Faking a delivery, retx and loss:
conn.streamManager->addDeliverable(stream);
conn.lossState.srtt = 100us;
auto streamState = conn.streamManager->getStream(stream);
auto streamResult = conn.streamManager->getStream(stream);
ASSERT_FALSE(streamResult.hasError());
auto streamState = streamResult.value();
streamState->retransmissionBuffer.clear();
streamState->lossBuffer.clear();
auto retxBufferData = folly::IOBuf::copyBuffer("But i'm not delivered yet");
@ -3589,7 +3652,9 @@ TEST_F(QuicTransportTest, InvokeDeliveryCallbacksSingleByte) {
conn.streamManager->addDeliverable(stream);
conn.lossState.srtt = 100us;
NetworkData networkData;
auto streamState = conn.streamManager->getStream(stream);
auto streamResult = conn.streamManager->getStream(stream);
ASSERT_FALSE(streamResult.hasError());
auto streamState = streamResult.value();
streamState->ackedIntervals.insert(0, 0);
EXPECT_CALL(writeChainDeliveryCb, onDeliveryAck(stream, 0, 100us)).Times(1);
EXPECT_CALL(firstByteDeliveryCb, onDeliveryAck(stream, 0, 100us)).Times(1);
@ -3642,7 +3707,9 @@ TEST_F(QuicTransportTest, InvokeDeliveryCallbacksSingleByteWithFin) {
conn.streamManager->addDeliverable(stream);
conn.lossState.srtt = 100us;
NetworkData networkData;
auto streamState = conn.streamManager->getStream(stream);
auto streamResult = conn.streamManager->getStream(stream);
ASSERT_FALSE(streamResult.hasError());
auto streamState = streamResult.value();
streamState->ackedIntervals.insert(0, 1);
EXPECT_CALL(writeChainDeliveryCb, onDeliveryAck(stream, 1, 100us)).Times(1);
EXPECT_CALL(firstByteDeliveryCb, onDeliveryAck(stream, 0, 100us)).Times(1);
@ -4030,12 +4097,13 @@ TEST_F(
EXPECT_CALL(txCb3, onByteEvent(getTxMatcher(stream, 20))).Times(1);
loopForWrites();
Mock::VerifyAndClearExpectations(&txCb3);
folly::SocketAddress addr;
conn.streamManager->addDeliverable(stream);
conn.lossState.srtt = 100us;
NetworkData networkData;
auto streamState = conn.streamManager->getStream(stream);
auto streamResult = conn.streamManager->getStream(stream);
ASSERT_FALSE(streamResult.hasError());
auto streamState = streamResult.value();
streamState->ackedIntervals.insert(0, 20);
EXPECT_CALL(deliveryCb1, onDeliveryAck(stream, 9, 100us)).Times(1);
EXPECT_CALL(deliveryCb2, onDeliveryAck(stream, 19, 100us)).Times(1);
@ -4086,10 +4154,13 @@ TEST_F(QuicTransportTest, NotifyPendingWriteConnAsync) {
auto streamId = transport_->createBidirectionalStream().value();
auto& conn = transport_->getConnectionState();
auto stream = conn.streamManager->getStream(streamId);
auto streamResult = conn.streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
// Artificially restrict the conn flow control to have no bytes remaining.
updateFlowControlOnWriteToStream(
*stream, conn.flowControlState.peerAdvertisedMaxOffset);
ASSERT_FALSE(updateFlowControlOnWriteToStream(
*stream, conn.flowControlState.peerAdvertisedMaxOffset)
.hasError());
EXPECT_CALL(writeCallback_, onConnectionWriteReady(_)).Times(0);
transport_->notifyPendingWriteOnConnection(&writeCallback_);
@ -4115,10 +4186,12 @@ TEST_F(QuicTransportTest, NotifyPendingWriteConnBufferFreeUpSpace) {
auto streamId = transport_->createBidirectionalStream().value();
auto& conn = transport_->getConnectionState();
auto stream = conn.streamManager->getStream(streamId);
auto streamResult = conn.streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
// Fill up the buffer to its limit
updateFlowControlOnWriteToStream(*stream, 100);
ASSERT_FALSE(updateFlowControlOnWriteToStream(*stream, 100).hasError());
transport_->notifyPendingWriteOnConnection(&writeCallback_);
EXPECT_CALL(writeCallback_, onConnectionWriteReady(_)).Times(0);
@ -4126,7 +4199,7 @@ TEST_F(QuicTransportTest, NotifyPendingWriteConnBufferFreeUpSpace) {
evb_.loop();
// Write 10 bytes to the socket to free up space
updateFlowControlOnWriteToSocket(*stream, 10);
ASSERT_FALSE(updateFlowControlOnWriteToSocket(*stream, 10).hasError());
EXPECT_CALL(writeCallback_, onConnectionWriteReady(_));
transport_->onNetworkData(
@ -4172,10 +4245,12 @@ TEST_F(QuicTransportTest, NotifyPendingWriteConnBufferUseTotalSpace) {
auto streamId = transport_->createBidirectionalStream().value();
auto& conn = transport_->getConnectionState();
auto stream = conn.streamManager->getStream(streamId);
auto streamResult = conn.streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
// Fill up the buffer to its limit
updateFlowControlOnWriteToStream(*stream, 100);
ASSERT_FALSE(updateFlowControlOnWriteToStream(*stream, 100).hasError());
transport_->notifyPendingWriteOnConnection(&writeCallback_);
EXPECT_CALL(writeCallback_, onConnectionWriteReady(_)).Times(0);
@ -4196,10 +4271,12 @@ TEST_F(QuicTransportTest, NotifyPendingWriteConnBufferOveruseSpace) {
auto streamId = transport_->createBidirectionalStream().value();
auto& conn = transport_->getConnectionState();
auto stream = conn.streamManager->getStream(streamId);
auto streamResult = conn.streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
// Fill up the buffer to its limit
updateFlowControlOnWriteToStream(*stream, 1000);
ASSERT_FALSE(updateFlowControlOnWriteToStream(*stream, 1000).hasError());
transport_->notifyPendingWriteOnConnection(&writeCallback_);
EXPECT_CALL(writeCallback_, onConnectionWriteReady(_)).Times(0);
@ -4223,11 +4300,14 @@ TEST_F(
transport_->setTransportSettings(transportSettings);
auto streamId = transport_->createBidirectionalStream().value();
auto stream = conn.streamManager->getStream(streamId);
auto streamResult = conn.streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
// Use up the entire flow control (but not the buffer space)
updateFlowControlOnWriteToStream(
*stream, conn.flowControlState.peerAdvertisedMaxOffset);
ASSERT_FALSE(updateFlowControlOnWriteToStream(
*stream, conn.flowControlState.peerAdvertisedMaxOffset)
.hasError());
transport_->notifyPendingWriteOnConnection(&writeCallback_);
EXPECT_CALL(writeCallback_, onConnectionWriteReady(_)).Times(0);
@ -4252,10 +4332,13 @@ TEST_F(QuicTransportTest, NotifyPendingWriteStreamAsyncConnBlocked) {
auto streamId = transport_->createBidirectionalStream().value();
auto& conn = transport_->getConnectionState();
auto stream = conn.streamManager->getStream(streamId);
auto streamResult = conn.streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
// Artificially restrict the conn flow control to have no bytes remaining.
updateFlowControlOnWriteToStream(
*stream, conn.flowControlState.peerAdvertisedMaxOffset);
ASSERT_FALSE(updateFlowControlOnWriteToStream(
*stream, conn.flowControlState.peerAdvertisedMaxOffset)
.hasError());
EXPECT_CALL(writeCallback_, onStreamWriteReady(stream->id, _)).Times(0);
transport_->notifyPendingWriteOnStream(stream->id, &writeCallback_);
@ -4283,12 +4366,16 @@ TEST_F(QuicTransportTest, NotifyPendingWriteStreamWritableBytesBackpressure) {
auto& conn = transport_->getConnectionState();
conn.transportSettings.backpressureHeadroomFactor = 1;
auto stream = conn.streamManager->getStream(streamId);
auto streamResult = conn.streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
// Artificially restrict the conn flow control to have no bytes remaining.
updateFlowControlOnWriteToStream(
*stream, conn.flowControlState.peerAdvertisedMaxOffset);
updateFlowControlOnWriteToSocket(
*stream, conn.flowControlState.peerAdvertisedMaxOffset);
ASSERT_FALSE(updateFlowControlOnWriteToStream(
*stream, conn.flowControlState.peerAdvertisedMaxOffset)
.hasError());
ASSERT_FALSE(updateFlowControlOnWriteToSocket(
*stream, conn.flowControlState.peerAdvertisedMaxOffset)
.hasError());
EXPECT_CALL(writeCallback_, onStreamWriteReady(stream->id, _)).Times(0);
transport_->notifyPendingWriteOnStream(stream->id, &writeCallback_);
@ -4327,7 +4414,9 @@ TEST_F(QuicTransportTest, NotifyPendingWriteStreamAsyncStreamBlocked) {
auto streamId = transport_->createBidirectionalStream().value();
auto& conn = transport_->getConnectionState();
auto stream = conn.streamManager->getStream(streamId);
auto streamResult = conn.streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
// Artificially restrict the stream flow control to have no bytes remaining.
stream->currentWriteOffset = stream->flowControlState.peerAdvertisedMaxOffset;
@ -4354,10 +4443,13 @@ TEST_F(QuicTransportTest, NotifyPendingWriteConnTwice) {
auto streamId = transport_->createBidirectionalStream().value();
auto& conn = transport_->getConnectionState();
auto stream = conn.streamManager->getStream(streamId);
auto streamResult = conn.streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
// Artificially restrict the conn flow control to have no bytes remaining.
updateFlowControlOnWriteToStream(
*stream, conn.flowControlState.peerAdvertisedMaxOffset);
ASSERT_FALSE(updateFlowControlOnWriteToStream(
*stream, conn.flowControlState.peerAdvertisedMaxOffset)
.hasError());
EXPECT_CALL(writeCallback_, onConnectionWriteReady(_)).Times(0);
EXPECT_FALSE(
@ -4371,7 +4463,9 @@ TEST_F(QuicTransportTest, NotifyPendingWriteStreamTwice) {
auto streamId = transport_->createBidirectionalStream().value();
auto& conn = transport_->getConnectionState();
auto stream = conn.streamManager->getStream(streamId);
auto streamResult = conn.streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
// Artificially restrict the stream flow control to have no bytes remaining.
stream->currentWriteOffset = stream->flowControlState.peerAdvertisedMaxOffset;
@ -4391,10 +4485,13 @@ TEST_F(QuicTransportTest, NotifyPendingWriteConnDuringClose) {
auto streamId2 = transport_->createBidirectionalStream().value();
auto& conn = transport_->getConnectionState();
auto stream = conn.streamManager->getStream(streamId);
auto streamResult = conn.streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
// Artificially restrict the conn flow control to have no bytes remaining.
updateFlowControlOnWriteToStream(
*stream, conn.flowControlState.peerAdvertisedMaxOffset);
ASSERT_FALSE(updateFlowControlOnWriteToStream(
*stream, conn.flowControlState.peerAdvertisedMaxOffset)
.hasError());
transport_->notifyPendingWriteOnStream(stream->id, &writeCallback_);
transport_->notifyPendingWriteOnStream(streamId2, &writeCallback_);
@ -4425,8 +4522,12 @@ TEST_F(QuicTransportTest, NotifyPendingWriteStreamDuringClose) {
auto streamId2 = transport_->createBidirectionalStream().value();
auto& conn = transport_->getConnectionState();
auto stream = conn.streamManager->getStream(streamId);
auto stream2 = conn.streamManager->getStream(streamId2);
auto streamResult = conn.streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
auto streamResult2 = conn.streamManager->getStream(streamId2);
ASSERT_FALSE(streamResult2.hasError());
auto stream2 = streamResult2.value();
// Artificially restrict the stream flow control to have no bytes remaining.
stream->currentWriteOffset = stream->flowControlState.peerAdvertisedMaxOffset;
stream2->currentWriteOffset =
@ -4468,25 +4569,32 @@ TEST_F(QuicTransportTest, WriteStreamFromMiddleOfMap) {
return res;
}));
auto stream1 = conn.streamManager->getStream(s1);
auto streamResult1 = conn.streamManager->getStream(s1);
ASSERT_FALSE(streamResult1.hasError());
auto stream1 = streamResult1.value();
auto buf1 = buildRandomInputData(kDefaultUDPSendPacketLen);
writeDataToQuicStream(*stream1, buf1->clone(), false);
ASSERT_FALSE(
writeDataToQuicStream(*stream1, buf1->clone(), false).hasError());
auto buf2 = buildRandomInputData(kDefaultUDPSendPacketLen);
auto stream2 = conn.streamManager->getStream(s2);
writeDataToQuicStream(*stream2, buf2->clone(), false);
auto streamResult2 = conn.streamManager->getStream(s2);
ASSERT_FALSE(streamResult2.hasError());
auto stream2 = streamResult2.value();
ASSERT_FALSE(
writeDataToQuicStream(*stream2, buf2->clone(), false).hasError());
EXPECT_CALL(*socket_, write(_, _, _))
.WillOnce(testing::WithArgs<1, 2>(Invoke(getTotalIovecLen)));
writeQuicDataToSocket(
*socket_,
conn,
*conn.clientConnectionId,
*conn.serverConnectionId,
*aead_,
*conn.oneRttWriteHeaderCipher,
transport_->getVersion(),
conn.transportSettings.writeConnectionDataPacketsLimit);
ASSERT_FALSE(writeQuicDataToSocket(
*socket_,
conn,
*conn.clientConnectionId,
*conn.serverConnectionId,
*aead_,
*conn.oneRttWriteHeaderCipher,
transport_->getVersion(),
conn.transportSettings.writeConnectionDataPacketsLimit)
.hasError());
EXPECT_EQ(1, conn.outstandings.packets.size());
auto& packet = *getFirstOutstandingPacket(conn, PacketNumberSpace::AppData);
EXPECT_EQ(1, packet.packet.frames.size());
@ -4502,15 +4610,16 @@ TEST_F(QuicTransportTest, WriteStreamFromMiddleOfMap) {
EXPECT_CALL(*socket_, write(_, _, _))
.WillOnce(testing::WithArgs<1, 2>(Invoke(getTotalIovecLen)));
writeQuicDataToSocket(
*socket_,
conn,
*conn.clientConnectionId,
*conn.serverConnectionId,
*aead_,
*conn.oneRttWriteHeaderCipher,
transport_->getVersion(),
conn.transportSettings.writeConnectionDataPacketsLimit);
ASSERT_FALSE(writeQuicDataToSocket(
*socket_,
conn,
*conn.clientConnectionId,
*conn.serverConnectionId,
*aead_,
*conn.oneRttWriteHeaderCipher,
transport_->getVersion(),
conn.transportSettings.writeConnectionDataPacketsLimit)
.hasError());
EXPECT_EQ(1, conn.outstandings.packets.size());
auto& outstandingPacket2 =
*getFirstOutstandingPacket(conn, PacketNumberSpace::AppData);
@ -4527,15 +4636,16 @@ TEST_F(QuicTransportTest, WriteStreamFromMiddleOfMap) {
writableBytes = kDefaultUDPSendPacketLen;
EXPECT_CALL(*socket_, write(_, _, _))
.WillOnce(testing::WithArgs<1, 2>(Invoke(getTotalIovecLen)));
writeQuicDataToSocket(
*socket_,
conn,
*conn.clientConnectionId,
*conn.serverConnectionId,
*aead_,
*conn.oneRttWriteHeaderCipher,
transport_->getVersion(),
conn.transportSettings.writeConnectionDataPacketsLimit);
ASSERT_FALSE(writeQuicDataToSocket(
*socket_,
conn,
*conn.clientConnectionId,
*conn.serverConnectionId,
*aead_,
*conn.oneRttWriteHeaderCipher,
transport_->getVersion(),
conn.transportSettings.writeConnectionDataPacketsLimit)
.hasError());
EXPECT_EQ(1, conn.outstandings.packets.size());
auto& outstandingPacket3 =
*getFirstOutstandingPacket(conn, PacketNumberSpace::AppData);
@ -4555,15 +4665,16 @@ TEST_F(QuicTransportTest, WriteStreamFromMiddleOfMap) {
TEST_F(QuicTransportTest, NoStream) {
auto& conn = transport_->getConnectionState();
EventBase evb;
writeQuicDataToSocket(
*socket_,
conn,
*conn.clientConnectionId,
*conn.serverConnectionId,
*aead_,
*conn.oneRttWriteHeaderCipher,
transport_->getVersion(),
conn.transportSettings.writeConnectionDataPacketsLimit);
ASSERT_FALSE(writeQuicDataToSocket(
*socket_,
conn,
*conn.clientConnectionId,
*conn.serverConnectionId,
*aead_,
*conn.oneRttWriteHeaderCipher,
transport_->getVersion(),
conn.transportSettings.writeConnectionDataPacketsLimit)
.hasError());
EXPECT_TRUE(conn.outstandings.packets.empty());
}
@ -5009,7 +5120,9 @@ TEST_F(QuicTransportTest, GetSetReceiveWindowOnIncomingUnidirectionalStream) {
// Stream ID is for a peer-initiated unidirectional stream
StreamId id = 0b110;
uint64_t windowSize = 1500;
auto stream = conn.streamManager->getStream(id);
auto streamResult = conn.streamManager->getStream(id);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
EXPECT_FALSE(stream->writable());
EXPECT_TRUE(stream->shouldSendFlowControl());
auto res1 = transport_->setStreamFlowControlWindow(id, windowSize);
@ -5045,7 +5158,8 @@ TEST_F(QuicTransportTest, GetMaxWritableOnIncomingUnidirectionalStream) {
auto& conn = transport_->getConnectionState();
// Stream ID is for a peer-initiated unidirectional stream
StreamId id = 0b110;
conn.streamManager->getStream(id);
auto streamResult = conn.streamManager->getStream(id);
ASSERT_FALSE(streamResult.hasError());
auto maxWritable = transport_->getMaxWritableOnStream(id);
// max writable on receive only stream returns an error
EXPECT_TRUE(maxWritable.hasError());
@ -5056,7 +5170,9 @@ TEST_F(QuicTransportTest, GetMaxWritableStreamFlowControlLimited) {
auto transportSettings = transport_->getTransportSettings();
// Stream ID is for a peer-initiated bidirectional stream
StreamId id = 0b100;
auto stream = conn.streamManager->getStream(id);
auto streamResult = conn.streamManager->getStream(id);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
// set stream fcw < both conn fc and total buffer space available, such that
// we're limited by stream fcw
@ -5075,7 +5191,9 @@ TEST_F(QuicTransportTest, GetMaxWritableConnFlowControlLimited) {
auto transportSettings = transport_->getTransportSettings();
// Stream ID is for a peer-initiated bidirectional stream
StreamId id = 0b100;
auto stream = conn.streamManager->getStream(id);
auto streamResult = conn.streamManager->getStream(id);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
// set conn fcw < both stream fcw and buffer space, such that we're conn fcw
// limited
@ -5094,7 +5212,9 @@ TEST_F(QuicTransportTest, GetMaxWritableBufferSpaceLimited) {
auto transportSettings = transport_->getTransportSettings();
// Stream ID is for a peer-initiated bidirectional stream
StreamId id = 0b100;
auto stream = conn.streamManager->getStream(id);
auto streamResult = conn.streamManager->getStream(id);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
// set total buffer space available < stream and conn fcw, such that
// we're limited by buffer space
@ -5116,11 +5236,15 @@ TEST_F(QuicTransportTest, GetMaxWritableStreamFlowControlLimitedTwoStreams) {
StreamId id2 = 0b1100;
// let's assume we have 1k bytes left in each window
auto stream1 = conn.streamManager->getStream(id1);
auto streamResult1 = conn.streamManager->getStream(id1);
ASSERT_FALSE(streamResult1.hasError());
auto stream1 = streamResult1.value();
stream1->currentWriteOffset = 1000;
stream1->flowControlState.peerAdvertisedMaxOffset = 2000;
auto stream2 = conn.streamManager->getStream(id2);
auto streamResult2 = conn.streamManager->getStream(id2);
ASSERT_FALSE(streamResult2.hasError());
auto stream2 = streamResult2.value();
stream2->currentWriteOffset = 1000;
stream2->flowControlState.peerAdvertisedMaxOffset = 2000;
@ -5150,11 +5274,15 @@ TEST_F(QuicTransportTest, GetMaxWritableConnFlowControlLimitedTwoStreams) {
StreamId id2 = 0b1100;
// let's assume we have 1k bytes left in each window
auto stream1 = conn.streamManager->getStream(id1);
auto streamResult1 = conn.streamManager->getStream(id1);
ASSERT_FALSE(streamResult1.hasError());
auto stream1 = streamResult1.value();
stream1->currentWriteOffset = 1000;
stream1->flowControlState.peerAdvertisedMaxOffset = 2000;
auto stream2 = conn.streamManager->getStream(id2);
auto streamResult2 = conn.streamManager->getStream(id2);
ASSERT_FALSE(streamResult2.hasError());
auto stream2 = streamResult2.value();
stream2->currentWriteOffset = 1000;
stream2->flowControlState.peerAdvertisedMaxOffset = 2000;
@ -5184,11 +5312,15 @@ TEST_F(QuicTransportTest, GetMaxWritableBufferSpaceLimitedTwoStreams) {
StreamId id2 = 0b1100;
// let's assume we have 1k bytes left in each window
auto stream1 = conn.streamManager->getStream(id1);
auto streamResult1 = conn.streamManager->getStream(id1);
ASSERT_FALSE(streamResult1.hasError());
auto stream1 = streamResult1.value();
stream1->currentWriteOffset = 1000;
stream1->flowControlState.peerAdvertisedMaxOffset = 2000;
auto stream2 = conn.streamManager->getStream(id2);
auto streamResult2 = conn.streamManager->getStream(id2);
ASSERT_FALSE(streamResult2.hasError());
auto stream2 = streamResult2.value();
stream2->currentWriteOffset = 1000;
stream2->flowControlState.peerAdvertisedMaxOffset = 2000;

View File

@ -1094,13 +1094,14 @@ TYPED_TEST(
EXPECT_CALL(
cb, onDeliveryAck(streamId, combined->computeChainDataLength(), _))
.Times(0);
// deliver an ACK for all of the outstanding packets
this->deliverPacket(this->buildAckPacketForSentAppDataPackets(
std::vector<Optional<typename TestFixture::NewOutstandingPacketInterval>>{
maybeWrittenPackets1, maybeWrittenPackets2}));
auto stream = this->getNonConstConn().streamManager->getStream(streamId);
ASSERT_TRUE(stream);
auto streamExpected =
this->getNonConstConn().streamManager->getStream(streamId);
ASSERT_FALSE(streamExpected.hasError());
auto stream = streamExpected.value();
EXPECT_FALSE(stream->ackedIntervals.empty());
EXPECT_EQ(stream->ackedIntervals.size(), 1);
EXPECT_EQ(stream->ackedIntervals.front().start, 0);
@ -1131,8 +1132,10 @@ TYPED_TEST(
// deliver an ACK for all of the outstanding packets
this->deliverPacket(
this->buildAckPacketForSentAppDataPackets(maybeWrittenPackets1));
auto stream = this->getNonConstConn().streamManager->getStream(streamId);
ASSERT_TRUE(stream);
auto streamExpected =
this->getNonConstConn().streamManager->getStream(streamId);
ASSERT_FALSE(streamExpected.hasError());
auto stream = streamExpected.value();
EXPECT_FALSE(stream->ackedIntervals.empty());
EXPECT_EQ(stream->ackedIntervals.size(), 1);
EXPECT_EQ(stream->ackedIntervals.front().start, 0);
@ -1162,8 +1165,10 @@ TYPED_TEST(
this->deliverPacket(
this->buildAckPacketForSentAppDataPackets(maybeWrittenPackets1),
std::chrono::steady_clock::time_point());
auto stream = this->getNonConstConn().streamManager->getStream(streamId);
ASSERT_TRUE(stream);
auto streamExpected =
this->getNonConstConn().streamManager->getStream(streamId);
ASSERT_FALSE(streamExpected.hasError());
auto stream = streamExpected.value();
EXPECT_FALSE(stream->ackedIntervals.empty());
EXPECT_EQ(stream->ackedIntervals.size(), 1);
EXPECT_EQ(stream->ackedIntervals.front().start, 0);

View File

@ -95,17 +95,18 @@ class TestQuicTransport
if (closed) {
return;
}
writeQuicDataToSocket(
*socket_,
*conn_,
*conn_->clientConnectionId,
*conn_->serverConnectionId,
*aead,
*headerCipher,
getVersion(),
(isConnectionPaced(*conn_)
? conn_->pacer->updateAndGetWriteBatchSize(Clock::now())
: conn_->transportSettings.writeConnectionDataPacketsLimit));
CHECK(!writeQuicDataToSocket(
*socket_,
*conn_,
*conn_->clientConnectionId,
*conn_->serverConnectionId,
*aead,
*headerCipher,
getVersion(),
(isConnectionPaced(*conn_)
? conn_->pacer->updateAndGetWriteBatchSize(Clock::now())
: conn_->transportSettings.writeConnectionDataPacketsLimit))
.hasError());
writePacketizationRequest(
*dynamic_cast<QuicServerConnectionState*>(conn_.get()),
*conn_->clientConnectionId,

View File

@ -427,62 +427,69 @@ QuicClientTransportLite::processUdpPacketData(
}
maybeVerifyPendingKeyUpdate(*conn_, outstandingPacket, regularPacket);
};
AckedFrameVisitor ackedFrameVisitor =
[&](const OutstandingPacketWrapper& outstandingPacket,
const QuicWriteFrame& packetFrame) {
auto outstandingProtectionType =
outstandingPacket.packet.header.getProtectionType();
switch (packetFrame.type()) {
case QuicWriteFrame::Type::WriteAckFrame: {
const WriteAckFrame& frame = *packetFrame.asWriteAckFrame();
DCHECK(!frame.ackBlocks.empty());
VLOG(4) << "Client received ack for largestAcked="
<< frame.ackBlocks.front().end << " " << *this;
commonAckVisitorForAckFrame(ackState, frame);
break;
}
case QuicWriteFrame::Type::RstStreamFrame: {
const RstStreamFrame& frame = *packetFrame.asRstStreamFrame();
VLOG(4) << "Client received ack for reset frame stream="
<< frame.streamId << " " << *this;
const QuicWriteFrame& packetFrame)
-> folly::Expected<folly::Unit, QuicError> {
auto outstandingProtectionType =
outstandingPacket.packet.header.getProtectionType();
switch (packetFrame.type()) {
case QuicWriteFrame::Type::WriteAckFrame: {
const WriteAckFrame& frame = *packetFrame.asWriteAckFrame();
DCHECK(!frame.ackBlocks.empty());
VLOG(4) << "Client received ack for largestAcked="
<< frame.ackBlocks.front().end << " " << *this;
commonAckVisitorForAckFrame(ackState, frame);
break;
}
case QuicWriteFrame::Type::RstStreamFrame: {
const RstStreamFrame& frame = *packetFrame.asRstStreamFrame();
VLOG(4) << "Client received ack for reset frame stream="
<< frame.streamId << " " << *this;
auto stream = conn_->streamManager->getStream(frame.streamId);
if (stream) {
sendRstAckSMHandler(*stream, frame.reliableSize);
}
break;
}
case QuicWriteFrame::Type::WriteStreamFrame: {
const WriteStreamFrame& frame = *packetFrame.asWriteStreamFrame();
auto ackedStream = conn_->streamManager->getStream(frame.streamId);
VLOG(4) << "Client got ack for stream=" << frame.streamId
<< " offset=" << frame.offset << " fin=" << frame.fin
<< " data=" << frame.len
<< " closed=" << (ackedStream == nullptr) << " " << *this;
if (ackedStream) {
sendAckSMHandler(*ackedStream, frame);
}
break;
}
case QuicWriteFrame::Type::WriteCryptoFrame: {
const WriteCryptoFrame& frame = *packetFrame.asWriteCryptoFrame();
auto cryptoStream = getCryptoStream(
*conn_->cryptoState,
protectionTypeToEncryptionLevel(outstandingProtectionType));
processCryptoStreamAck(*cryptoStream, frame.offset, frame.len);
break;
}
case QuicWriteFrame::Type::PingFrame:
conn_->pendingEvents.cancelPingTimeout = true;
break;
case QuicWriteFrame::Type::QuicSimpleFrame:
default:
// ignore other frames.
break;
auto stream =
conn_->streamManager->getStream(frame.streamId).value_or(nullptr);
if (stream) {
return sendRstAckSMHandler(*stream, frame.reliableSize);
}
};
break;
}
case QuicWriteFrame::Type::WriteStreamFrame: {
const WriteStreamFrame& frame = *packetFrame.asWriteStreamFrame();
auto ackedStreamResult =
conn_->streamManager->getStream(frame.streamId);
if (ackedStreamResult.hasError()) {
return folly::makeUnexpected(ackedStreamResult.error());
}
auto& ackedStream = ackedStreamResult.value();
VLOG(4) << "Client got ack for stream=" << frame.streamId
<< " offset=" << frame.offset << " fin=" << frame.fin
<< " data=" << frame.len
<< " closed=" << (ackedStream == nullptr) << " " << *this;
if (ackedStream) {
return sendAckSMHandler(*ackedStream, frame);
}
break;
}
case QuicWriteFrame::Type::WriteCryptoFrame: {
const WriteCryptoFrame& frame = *packetFrame.asWriteCryptoFrame();
auto cryptoStream = getCryptoStream(
*conn_->cryptoState,
protectionTypeToEncryptionLevel(outstandingProtectionType));
processCryptoStreamAck(*cryptoStream, frame.offset, frame.len);
break;
}
case QuicWriteFrame::Type::PingFrame:
conn_->pendingEvents.cancelPingTimeout = true;
break;
case QuicWriteFrame::Type::QuicSimpleFrame:
default:
// ignore other frames.
break;
}
return folly::unit;
};
for (auto& quicFrame : regularPacket.frames) {
switch (quicFrame.type()) {
@ -505,14 +512,18 @@ QuicClientTransportLite::processUdpPacketData(
"Received unexpected ACK_RECEIVE_TIMESTAMPS frame"));
}
conn_->lastProcessedAckEvents.emplace_back(processAckFrame(
auto result = processAckFrame(
*conn_,
pnSpace,
ackFrame,
ackedPacketVisitor,
ackedFrameVisitor,
markPacketLoss,
udpPacket.timings.receiveTimePoint));
udpPacket.timings.receiveTimePoint);
if (result.hasError()) {
return folly::makeUnexpected(result.error());
}
conn_->lastProcessedAckEvents.emplace_back(std::move(result.value()));
break;
}
case QuicFrame::Type::RstStreamFrame: {
@ -526,12 +537,18 @@ QuicClientTransportLite::processUdpPacketData(
"Reliable resets not supported"));
}
pktHasRetransmittableData = true;
auto streamId = frame.streamId;
auto stream = conn_->streamManager->getStream(streamId);
auto streamResult = conn_->streamManager->getStream(frame.streamId);
if (streamResult.hasError()) {
return folly::makeUnexpected(streamResult.error());
}
auto& stream = streamResult.value();
if (!stream) {
break;
}
receiveRstStreamSMHandler(*stream, frame);
auto rstResult = receiveRstStreamSMHandler(*stream, frame);
if (rstResult.hasError()) {
return folly::makeUnexpected(rstResult.error());
}
break;
}
case QuicFrame::Type::ReadCryptoFrame: {
@ -541,10 +558,13 @@ QuicClientTransportLite::processUdpPacketData(
VLOG(10) << "Client received crypto data offset=" << cryptoFrame.offset
<< " len=" << cryptoFrame.data->computeChainDataLength()
<< " packetNum=" << packetNum << " " << *this;
appendDataToReadBuffer(
auto appendResult = appendDataToReadBuffer(
*getCryptoStream(*conn_->cryptoState, encryptionLevel),
StreamBuffer(
std::move(cryptoFrame.data), cryptoFrame.offset, false));
if (appendResult.hasError()) {
return folly::makeUnexpected(appendResult.error());
}
break;
}
case QuicFrame::Type::ReadStreamFrame: {
@ -554,15 +574,22 @@ QuicClientTransportLite::processUdpPacketData(
<< " len=" << frame.data->computeChainDataLength()
<< " fin=" << frame.fin << " packetNum=" << packetNum << " "
<< *this;
auto stream = conn_->streamManager->getStream(
frame.streamId, frame.streamGroupId);
auto streamResult = conn_->streamManager->getStream(frame.streamId);
if (streamResult.hasError()) {
return folly::makeUnexpected(streamResult.error());
}
auto& stream = streamResult.value();
pktHasRetransmittableData = true;
if (!stream) {
VLOG(10) << "Could not find stream=" << frame.streamId << " "
<< *conn_;
break;
}
receiveReadStreamFrameSMHandler(*stream, std::move(frame));
auto readResult =
receiveReadStreamFrameSMHandler(*stream, std::move(frame));
if (readResult.hasError()) {
return folly::makeUnexpected(readResult.error());
}
break;
}
case QuicFrame::Type::ReadNewTokenFrame: {
@ -596,8 +623,12 @@ QuicClientTransportLite::processUdpPacketData(
"Received MaxStreamDataFrame for receiving stream."));
}
pktHasRetransmittableData = true;
auto stream =
auto streamResult =
conn_->streamManager->getStream(streamWindowUpdate.streamId);
if (streamResult.hasError()) {
return folly::makeUnexpected(streamResult.error());
}
auto& stream = streamResult.value();
if (stream) {
handleStreamWindowUpdate(
*stream, streamWindowUpdate.maximumData, packetNum);
@ -617,7 +648,11 @@ QuicClientTransportLite::processUdpPacketData(
VLOG(10) << "Client received blocked stream=" << blocked.streamId << " "
<< *this;
pktHasRetransmittableData = true;
auto stream = conn_->streamManager->getStream(blocked.streamId);
auto streamResult = conn_->streamManager->getStream(blocked.streamId);
if (streamResult.hasError()) {
return folly::makeUnexpected(streamResult.error());
}
auto& stream = streamResult.value();
if (stream) {
handleStreamBlocked(*stream);
}
@ -659,12 +694,15 @@ QuicClientTransportLite::processUdpPacketData(
case QuicFrame::Type::QuicSimpleFrame: {
QuicSimpleFrame& simpleFrame = *quicFrame.asQuicSimpleFrame();
pktHasRetransmittableData = true;
updateSimpleFrameOnPacketReceived(
auto updateResult = updateSimpleFrameOnPacketReceived(
*conn_,
simpleFrame,
longHeader ? longHeader->getDestinationConnId()
: shortHeader->getConnectionId(),
false);
if (updateResult.hasError()) {
return folly::makeUnexpected(updateResult.error());
}
break;
}
case QuicFrame::Type::DatagramFrame: {
@ -776,8 +814,11 @@ QuicClientTransportLite::processUdpPacketData(
auto maxStreamsUni = getIntegerParameter(
TransportParameterId::initial_max_streams_uni,
serverParams->parameters);
processServerInitialParams(
auto processResult = processServerInitialParams(
*clientConn_, serverParams.value(), packetNum);
if (processResult.hasError()) {
return folly::makeUnexpected(processResult.error());
}
cacheServerInitialParams(
*clientConn_,
@ -865,7 +906,10 @@ QuicClientTransportLite::processUdpPacketData(
// state.
clientConn_->zeroRttWriteCipher.reset();
clientConn_->zeroRttWriteHeaderCipher.reset();
markZeroRttPacketsLost(*conn_, markPacketLoss);
auto result = markZeroRttPacketsLost(*conn_, markPacketLoss);
if (result.hasError()) {
return result;
}
}
}
updateAckSendStateOnRecvPacket(
@ -1015,24 +1059,32 @@ void QuicClientTransportLite::writeData() {
const std::string& token = clientConn_->retryToken.empty()
? clientConn_->newToken
: clientConn_->retryToken;
packetLimit -=
handleInitialWriteDataCommon(srcConnId, destConnId, packetLimit, token)
.packetsWritten;
auto result =
handleInitialWriteDataCommon(srcConnId, destConnId, packetLimit, token);
if (result.hasError()) {
throw QuicTransportException(
result.error().message, *result.error().code.asTransportErrorCode());
}
packetLimit -= result->packetsWritten;
if (!packetLimit && !conn_->pendingEvents.anyProbePackets()) {
return;
}
}
if (conn_->handshakeWriteCipher) {
packetLimit -=
handleHandshakeWriteDataCommon(srcConnId, destConnId, packetLimit)
.packetsWritten;
auto result =
handleHandshakeWriteDataCommon(srcConnId, destConnId, packetLimit);
if (result.hasError()) {
throw QuicTransportException(
result.error().message, *result.error().code.asTransportErrorCode());
}
packetLimit -= result->packetsWritten;
if (!packetLimit && !conn_->pendingEvents.anyProbePackets()) {
return;
}
}
if (clientConn_->zeroRttWriteCipher && !conn_->oneRttWriteCipher) {
CHECK(clientConn_->zeroRttWriteHeaderCipher);
packetLimit -= writeZeroRttDataToSocket(
auto result = writeZeroRttDataToSocket(
*socket_,
*conn_,
srcConnId /* src */,
@ -1041,13 +1093,18 @@ void QuicClientTransportLite::writeData() {
*clientConn_->zeroRttWriteHeaderCipher,
version,
packetLimit);
if (result.hasError()) {
throw QuicTransportException(
result.error().message, *result.error().code.asTransportErrorCode());
}
packetLimit -= *result;
}
if (!packetLimit && !conn_->pendingEvents.anyProbePackets()) {
return;
}
if (conn_->oneRttWriteCipher) {
CHECK(clientConn_->oneRttWriteHeaderCipher);
writeQuicDataExceptCryptoStreamToSocket(
auto result = writeQuicDataExceptCryptoStreamToSocket(
*socket_,
*conn_,
srcConnId,
@ -1056,6 +1113,10 @@ void QuicClientTransportLite::writeData() {
*conn_->oneRttWriteHeaderCipher,
version,
packetLimit);
if (result.hasError()) {
throw QuicTransportException(
result.error().message, *result.error().code.asTransportErrorCode());
}
}
}
@ -1660,7 +1721,10 @@ void QuicClientTransportLite::
happyEyeballsStartSecondSocket(clientConn_->happyEyeballsState);
// If this gets called from the write path then we haven't added the packets
// to the outstanding packet list yet.
runOnEvbAsync([&](auto) { markZeroRttPacketsLost(*conn_, markPacketLoss); });
runOnEvbAsync([&](auto) {
auto result = markZeroRttPacketsLost(*conn_, markPacketLoss);
LOG_IF(ERROR, result.hasError()) << "Failed to mark 0-RTT packets as lost.";
});
}
void QuicClientTransportLite::start(

View File

@ -48,8 +48,14 @@ void ClientHandshake::connect(
cachedServerTransportParams->receiveTimestampsExponent,
cachedServerTransportParams->reliableStreamResetSupport,
cachedServerTransportParams->extendedAckFeatures);
updateTransportParamsFromCachedEarlyParams(
auto result = updateTransportParamsFromCachedEarlyParams(
*conn_, *cachedServerTransportParams);
// TODO remove throw
if (result.hasError()) {
raiseError(QuicTransportException(
result.error().message, *result.error().code.asTransportErrorCode()));
return;
}
}
}

View File

@ -95,7 +95,10 @@ std::unique_ptr<QuicClientConnectionState> undoAllClientStateForRetry(
newConn->transportSettings,
std::move(*conn->streamManager));
markZeroRttPacketsLost(*newConn, markPacketLoss);
auto result = markZeroRttPacketsLost(*newConn, markPacketLoss);
if (result.hasError()) {
LOG(FATAL) << "error marking packets lost";
}
return newConn;
}
@ -219,11 +222,17 @@ folly::Expected<folly::Unit, QuicError> processServerInitialParams(
maxStreamDataBidiRemote.value_or(0);
conn.flowControlState.peerAdvertisedInitialMaxStreamOffsetUni =
maxStreamDataUni.value_or(0);
conn.streamManager->setMaxLocalBidirectionalStreams(
auto resultBidi = conn.streamManager->setMaxLocalBidirectionalStreams(
maxStreamsBidi.value_or(0));
if (resultBidi.hasError()) {
return folly::makeUnexpected(resultBidi.error());
}
conn.peerAdvertisedInitialMaxStreamsBidi = maxStreamsBidi.value_or(0);
conn.streamManager->setMaxLocalUnidirectionalStreams(
auto resultUni = conn.streamManager->setMaxLocalUnidirectionalStreams(
maxStreamsUni.value_or(0));
if (resultUni.hasError()) {
return folly::makeUnexpected(resultUni.error());
}
conn.peerAdvertisedInitialMaxStreamsUni = maxStreamsUni.value_or(0);
conn.peerIdleTimeout = std::chrono::milliseconds(idleTimeout.value_or(0));
conn.peerIdleTimeout = timeMin(conn.peerIdleTimeout, kMaxIdleTimeout);
@ -375,7 +384,8 @@ CachedServerTransportParameters getServerCachedTransportParameters(
return transportParams;
}
void updateTransportParamsFromCachedEarlyParams(
folly::Expected<folly::Unit, QuicError>
updateTransportParamsFromCachedEarlyParams(
QuicClientConnectionState& conn,
const CachedServerTransportParameters& transportParams) {
conn.peerIdleTimeout = std::chrono::milliseconds(transportParams.idleTimeout);
@ -387,10 +397,16 @@ void updateTransportParamsFromCachedEarlyParams(
transportParams.initialMaxStreamDataBidiRemote;
conn.flowControlState.peerAdvertisedInitialMaxStreamOffsetUni =
transportParams.initialMaxStreamDataUni;
conn.streamManager->setMaxLocalBidirectionalStreams(
auto resultBidi = conn.streamManager->setMaxLocalBidirectionalStreams(
transportParams.initialMaxStreamsBidi);
conn.streamManager->setMaxLocalUnidirectionalStreams(
if (resultBidi.hasError()) {
return folly::makeUnexpected(resultBidi.error());
}
auto resultUni = conn.streamManager->setMaxLocalUnidirectionalStreams(
transportParams.initialMaxStreamsUni);
if (resultUni.hasError()) {
return folly::makeUnexpected(resultUni.error());
}
conn.peerAdvertisedKnobFrameSupport = transportParams.knobFrameSupport;
conn.peerAdvertisedReliableStreamResetSupport =
transportParams.reliableStreamResetSupport;
@ -407,5 +423,6 @@ void updateTransportParamsFromCachedEarlyParams(
conn.maybePeerAckReceiveTimestampsConfig.clear();
}
conn.peerAdvertisedExtendedAckFeatures = transportParams.extendedAckFeatures;
return folly::unit;
}
} // namespace quic

View File

@ -141,7 +141,8 @@ struct QuicClientConnectionState : public QuicConnectionStateBase {
std::unique_ptr<QuicClientConnectionState> undoAllClientStateForRetry(
std::unique_ptr<QuicClientConnectionState> conn);
folly::Expected<folly::Unit, QuicError> processServerInitialParams(
[[nodiscard]] folly::Expected<folly::Unit, QuicError>
processServerInitialParams(
QuicClientConnectionState& conn,
const ServerTransportParameters& serverParams,
PacketNum packetNum);
@ -164,7 +165,8 @@ void cacheServerInitialParams(
CachedServerTransportParameters getServerCachedTransportParameters(
const QuicClientConnectionState& conn);
void updateTransportParamsFromCachedEarlyParams(
[[nodiscard]] folly::Expected<folly::Unit, QuicError>
updateTransportParamsFromCachedEarlyParams(
QuicClientConnectionState& conn,
const CachedServerTransportParameters& transportParams);

View File

@ -74,7 +74,8 @@ class ClientStateMachineTest : public Test {
};
TEST_F(ClientStateMachineTest, TestUpdateTransportParamsNotIgnorePathMTU) {
updateTransportParamsFromCachedEarlyParams(*client_, kParams);
ASSERT_FALSE(
updateTransportParamsFromCachedEarlyParams(*client_, kParams).hasError());
EXPECT_EQ(client_->udpSendPacketLen, kDefaultUDPSendPacketLen);
}
@ -85,7 +86,8 @@ TEST_F(ClientStateMachineTest, TestUpdateTransportParamsFromCachedEarlyParams) {
client_->maybePeerAckReceiveTimestampsConfig.assign(
{.maxReceiveTimestampsPerAck = 10, .receiveTimestampsExponent = 0});
updateTransportParamsFromCachedEarlyParams(*client_, kParams);
ASSERT_FALSE(
updateTransportParamsFromCachedEarlyParams(*client_, kParams).hasError());
EXPECT_EQ(client_->peerIdleTimeout, idleTimeout);
EXPECT_NE(client_->udpSendPacketLen, maxRecvPacketSize);
EXPECT_EQ(client_->flowControlState.peerAdvertisedMaxOffset, initialMaxData);
@ -182,7 +184,7 @@ TEST_F(ClientStateMachineTest, TestProcessMaxDatagramSizeBelowMin) {
auto result =
processServerInitialParams(clientConn, serverTransportParams, 0);
EXPECT_TRUE(result.hasError());
ASSERT_TRUE(result.hasError());
EXPECT_EQ(result.error().code, TransportErrorCode::TRANSPORT_PARAMETER_ERROR);
}
@ -194,7 +196,8 @@ TEST_F(ClientStateMachineTest, TestProcessMaxDatagramSizeZeroOk) {
encodeIntegerParameter(TransportParameterId::max_datagram_frame_size, 0));
ServerTransportParameters serverTransportParams = {
std::move(transportParams)};
processServerInitialParams(clientConn, serverTransportParams, 0);
ASSERT_FALSE(processServerInitialParams(clientConn, serverTransportParams, 0)
.hasError());
EXPECT_EQ(clientConn.datagramState.maxWriteFrameSize, 0);
}
@ -207,7 +210,8 @@ TEST_F(ClientStateMachineTest, TestProcessMaxDatagramSizeOk) {
kMaxDatagramPacketOverhead + 1));
ServerTransportParameters serverTransportParams = {
std::move(transportParams)};
processServerInitialParams(clientConn, serverTransportParams, 0);
ASSERT_FALSE(processServerInitialParams(clientConn, serverTransportParams, 0)
.hasError());
EXPECT_EQ(
clientConn.datagramState.maxWriteFrameSize,
kMaxDatagramPacketOverhead + 1);
@ -221,7 +225,8 @@ TEST_F(ClientStateMachineTest, TestProcessKnobFramesSupportedParamEnabled) {
encodeIntegerParameter(TransportParameterId::knob_frames_supported, 1));
ServerTransportParameters serverTransportParams = {
std::move(transportParams)};
processServerInitialParams(clientConn, serverTransportParams, 0);
ASSERT_FALSE(processServerInitialParams(clientConn, serverTransportParams, 0)
.hasError());
EXPECT_TRUE(clientConn.peerAdvertisedKnobFrameSupport);
}
@ -233,7 +238,8 @@ TEST_F(ClientStateMachineTest, TestProcessKnobFramesSupportedParamDisabled) {
encodeIntegerParameter(TransportParameterId::knob_frames_supported, 0));
ServerTransportParameters serverTransportParams = {
std::move(transportParams)};
processServerInitialParams(clientConn, serverTransportParams, 0);
ASSERT_FALSE(processServerInitialParams(clientConn, serverTransportParams, 0)
.hasError());
EXPECT_FALSE(clientConn.peerAdvertisedKnobFrameSupport);
}
@ -245,7 +251,8 @@ TEST_F(ClientStateMachineTest, TestProcessExtendedAckSupportedParam) {
encodeIntegerParameter(TransportParameterId::extended_ack_features, 3));
ServerTransportParameters serverTransportParams = {
std::move(transportParams)};
processServerInitialParams(clientConn, serverTransportParams, 0);
ASSERT_FALSE(processServerInitialParams(clientConn, serverTransportParams, 0)
.hasError());
EXPECT_EQ(clientConn.peerAdvertisedExtendedAckFeatures, 3);
}
@ -255,7 +262,8 @@ TEST_F(ClientStateMachineTest, TestProcessExtendedAckSupportedParamDefault) {
std::vector<TransportParameter> transportParams;
ServerTransportParameters serverTransportParams = {
std::move(transportParams)};
processServerInitialParams(clientConn, serverTransportParams, 0);
ASSERT_FALSE(processServerInitialParams(clientConn, serverTransportParams, 0)
.hasError());
EXPECT_EQ(clientConn.peerAdvertisedExtendedAckFeatures, 0);
}
@ -269,7 +277,8 @@ TEST_F(
encodeEmptyParameter(TransportParameterId::reliable_stream_reset));
ServerTransportParameters serverTransportParams = {
std::move(transportParams)};
processServerInitialParams(clientConn, serverTransportParams, 0);
ASSERT_FALSE(processServerInitialParams(clientConn, serverTransportParams, 0)
.hasError());
EXPECT_TRUE(clientConn.peerAdvertisedReliableStreamResetSupport);
}
@ -281,7 +290,8 @@ TEST_F(
std::vector<TransportParameter> transportParams;
ServerTransportParameters serverTransportParams = {
std::move(transportParams)};
processServerInitialParams(clientConn, serverTransportParams, 0);
ASSERT_FALSE(processServerInitialParams(clientConn, serverTransportParams, 0)
.hasError());
EXPECT_FALSE(clientConn.peerAdvertisedReliableStreamResetSupport);
}
@ -354,7 +364,8 @@ TEST_P(
}
ServerTransportParameters serverTransportParams = {
std::move(transportParams)};
processServerInitialParams(clientConn, serverTransportParams, 0);
ASSERT_FALSE(processServerInitialParams(clientConn, serverTransportParams, 0)
.hasError());
EXPECT_EQ(
clientConn.peerAdvertisedMaxStreamGroups,

View File

@ -116,8 +116,9 @@ TEST_F(QuicClientTransportTest, TestReadWithRecvmsgSinglePacketLoop) {
quicClient_->setTransportSettings(std::move(transportSettings));
quicClient_->getClientConn()->oneRttWriteCipher = test::createNoOpAead();
quicClient_->getClientConn()->streamManager->setMaxLocalBidirectionalStreams(
128);
ASSERT_FALSE(quicClient_->getClientConn()
->streamManager->setMaxLocalBidirectionalStreams(128)
.hasError());
StreamId streamId = quicClient_->createBidirectionalStream().value();
quicClient_->writeChain(streamId, folly::IOBuf::copyBuffer("test"), false);

View File

@ -46,8 +46,8 @@ ClonedPacketIdentifier PacketRebuilder::cloneOutstandingPacket(
return *packet.maybeClonedPacketIdentifier;
}
Optional<ClonedPacketIdentifier> PacketRebuilder::rebuildFromPacket(
OutstandingPacketWrapper& packet) {
folly::Expected<Optional<ClonedPacketIdentifier>, QuicError>
PacketRebuilder::rebuildFromPacket(OutstandingPacketWrapper& packet) {
// TODO: if PMTU changes between the transmission of the original packet and
// now, then we cannot clone everything in the packet.
@ -87,7 +87,16 @@ Optional<ClonedPacketIdentifier> PacketRebuilder::rebuildFromPacket(
}
case QuicWriteFrame::Type::WriteStreamFrame: {
const WriteStreamFrame& streamFrame = *frame.asWriteStreamFrame();
auto stream = conn_.streamManager->getStream(streamFrame.streamId);
auto streamResult =
conn_.streamManager->getStream(streamFrame.streamId);
if (streamResult.hasError()) {
VLOG(4) << "Failed to get stream " << streamFrame.streamId
<< " for cloning WriteStreamFrame: "
<< streamResult.error().message;
// Propagate error
return folly::makeUnexpected(streamResult.error());
}
auto* stream = streamResult.value();
if (stream && retransmittable(*stream)) {
auto streamData = cloneRetransmissionBuffer(streamFrame, stream);
auto bufferLen = streamData ? streamData->chainLength() : 0;
@ -104,9 +113,11 @@ Optional<ClonedPacketIdentifier> PacketRebuilder::rebuildFromPacket(
lastFrame && bufferLen && !hasAckFrame,
streamFrame.streamGroupId);
if (res.hasError()) {
throw QuicInternalException(
res.error().message, *res.error().code.asLocalErrorCode());
VLOG(4) << "Failed to write stream frame header for cloning: "
<< res.error().message;
return folly::makeUnexpected(res.error());
}
auto dataLen = *res;
bool ret = dataLen.has_value() && *dataLen == streamFrame.len;
if (ret) {
@ -143,6 +154,7 @@ Optional<ClonedPacketIdentifier> PacketRebuilder::rebuildFromPacket(
auto cryptoWriteResult =
writeCryptoFrame(cryptoFrame.offset, *buf, builder_);
bool ret = cryptoWriteResult.has_value() &&
cryptoWriteResult->offset == cryptoFrame.offset &&
cryptoWriteResult->len == cryptoFrame.len;
notPureAck |= ret;
@ -160,14 +172,22 @@ Optional<ClonedPacketIdentifier> PacketRebuilder::rebuildFromPacket(
case QuicWriteFrame::Type::MaxStreamDataFrame: {
const MaxStreamDataFrame& maxStreamDataFrame =
*frame.asMaxStreamDataFrame();
auto stream =
auto streamResult =
conn_.streamManager->getStream(maxStreamDataFrame.streamId);
if (streamResult.hasError()) {
VLOG(4) << "Failed to get stream " << maxStreamDataFrame.streamId
<< " for cloning MaxStreamDataFrame: "
<< streamResult.error().message;
return folly::makeUnexpected(streamResult.error());
}
auto* stream = streamResult.value();
if (!stream || !stream->shouldSendFlowControl()) {
writeSuccess = true;
break;
}
shouldWriteWindowUpdate = true;
auto ret =
bool ret =
0 != writeFrame(generateMaxStreamDataFrame(*stream), builder_);
windowUpdateWritten |= ret;
notPureAck |= ret;
@ -200,9 +220,7 @@ Optional<ClonedPacketIdentifier> PacketRebuilder::rebuildFromPacket(
break;
}
case QuicWriteFrame::Type::DatagramFrame:
// Do not clone Datagram frames. If datagram frame is the only frame in
// the packet, notPureAck will be false, and the function will return
// none correctly.
// Do not clone Datagram frames.
writeSuccess = true;
break;
default: {
@ -216,6 +234,7 @@ Optional<ClonedPacketIdentifier> PacketRebuilder::rebuildFromPacket(
return none;
}
}
// If this packet had a WriteAckFrame, build a new one it with
// fresh AckState on best-effort basis. If writing
// that ACK fails, just ignore it and use the rest of the
@ -228,6 +247,7 @@ Optional<ClonedPacketIdentifier> PacketRebuilder::rebuildFromPacket(
AckScheduler ackScheduler(conn_, ackState);
ackScheduler.writeNextAcks(builder_);
}
// We shouldn't clone if:
// (1) we only end up cloning only acks, ping, or paddings.
// (2) we should write window update, but didn't, and wrote nothing else.

View File

@ -27,8 +27,8 @@ class PacketRebuilder {
PacketBuilderInterface& regularBuilder,
QuicConnectionStateBase& conn);
Optional<ClonedPacketIdentifier> rebuildFromPacket(
OutstandingPacketWrapper& packet);
[[nodiscard]] folly::Expected<Optional<ClonedPacketIdentifier>, QuicError>
rebuildFromPacket(OutstandingPacketWrapper& packet);
// TODO: Same as passing cipherOverhead into the CloningScheduler, this really
// is a sad way to solve the writableBytes problem.

View File

@ -81,7 +81,9 @@ TEST_F(QuicPacketRebuilderTest, RebuildSmallInitial) {
ASSERT_EQ(packet.packet.frames.size(), 2);
EXPECT_FALSE(packet.body.empty());
regularBuilder2.encodePacketHeader();
ASSERT_TRUE(rebuilder.rebuildFromPacket(outstanding).has_value());
auto rebuildResult = rebuilder.rebuildFromPacket(outstanding);
ASSERT_FALSE(rebuildResult.hasError());
ASSERT_TRUE(rebuildResult.value().hasValue());
auto rebuilt = std::move(regularBuilder2).buildPacket();
EXPECT_FALSE(rebuilt.header.empty());
ASSERT_EQ(rebuilt.packet.frames.size(), 3);
@ -115,7 +117,8 @@ TEST_F(QuicPacketRebuilderTest, RebuildPacket) {
.ackDelayExponent = static_cast<uint8_t>(kDefaultAckDelayExponent)};
QuicServerConnectionState conn(
FizzServerQuicHandshakeContext::Builder().build());
conn.streamManager->setMaxLocalBidirectionalStreams(10);
ASSERT_FALSE(
conn.streamManager->setMaxLocalBidirectionalStreams(10).hasError());
auto stream = conn.streamManager->createNextBidirectionalStream().value();
auto streamId = stream->id;
auto buf =
@ -171,7 +174,9 @@ TEST_F(QuicPacketRebuilderTest, RebuildPacket) {
regularBuilder2.encodePacketHeader();
PacketRebuilder rebuilder(regularBuilder2, conn);
auto outstanding = makeDummyOutstandingPacket(packet1.packet, 1000);
EXPECT_TRUE(rebuilder.rebuildFromPacket(outstanding).has_value());
auto rebuildResult = rebuilder.rebuildFromPacket(outstanding);
ASSERT_FALSE(rebuildResult.hasError());
ASSERT_TRUE(rebuildResult.value().hasValue());
auto packet2 = std::move(regularBuilder2).buildPacket();
// rebuilder writes frames to regularBuilder2
EXPECT_EQ(packet1.packet.frames.size(), packet2.packet.frames.size());
@ -259,7 +264,8 @@ TEST_F(QuicPacketRebuilderTest, RebuildAfterResetStream) {
regularBuilder1.encodePacketHeader();
QuicServerConnectionState conn(
FizzServerQuicHandshakeContext::Builder().build());
conn.streamManager->setMaxLocalBidirectionalStreams(10);
ASSERT_FALSE(
conn.streamManager->setMaxLocalBidirectionalStreams(10).hasError());
auto stream = conn.streamManager->createNextBidirectionalStream().value();
auto streamId = stream->id;
auto buf = folly::IOBuf::copyBuffer("A million miles away.");
@ -277,7 +283,8 @@ TEST_F(QuicPacketRebuilderTest, RebuildAfterResetStream) {
ASSERT_EQ(1, packet1.packet.frames.size());
// Then we reset the stream
sendRstSMHandler(*stream, GenericApplicationErrorCode::UNKNOWN);
ASSERT_FALSE(sendRstSMHandler(*stream, GenericApplicationErrorCode::UNKNOWN)
.hasError());
ShortHeader shortHeader2(
ProtectionType::KeyPhaseZero, getTestConnectionId(), 0);
RegularQuicPacketBuilder regularBuilder2(
@ -285,7 +292,9 @@ TEST_F(QuicPacketRebuilderTest, RebuildAfterResetStream) {
regularBuilder2.encodePacketHeader();
PacketRebuilder rebuilder(regularBuilder2, conn);
auto outstanding = makeDummyOutstandingPacket(packet1.packet, 1000);
EXPECT_FALSE(rebuilder.rebuildFromPacket(outstanding).has_value());
auto rebuildResult = rebuilder.rebuildFromPacket(outstanding);
ASSERT_FALSE(rebuildResult.hasError());
EXPECT_FALSE(rebuildResult.value().hasValue());
}
TEST_F(QuicPacketRebuilderTest, FinOnlyStreamRebuild) {
@ -296,7 +305,8 @@ TEST_F(QuicPacketRebuilderTest, FinOnlyStreamRebuild) {
regularBuilder1.encodePacketHeader();
QuicServerConnectionState conn(
FizzServerQuicHandshakeContext::Builder().build());
conn.streamManager->setMaxLocalBidirectionalStreams(10);
ASSERT_FALSE(
conn.streamManager->setMaxLocalBidirectionalStreams(10).hasError());
auto stream = conn.streamManager->createNextBidirectionalStream().value();
auto streamId = stream->id;
@ -318,7 +328,9 @@ TEST_F(QuicPacketRebuilderTest, FinOnlyStreamRebuild) {
regularBuilder2.encodePacketHeader();
PacketRebuilder rebuilder(regularBuilder2, conn);
auto outstanding = makeDummyOutstandingPacket(packet1.packet, 2000);
EXPECT_TRUE(rebuilder.rebuildFromPacket(outstanding).has_value());
auto rebuildResult = rebuilder.rebuildFromPacket(outstanding);
ASSERT_FALSE(rebuildResult.hasError());
ASSERT_TRUE(rebuildResult.value().hasValue());
auto packet2 = std::move(regularBuilder2).buildPacket();
EXPECT_EQ(packet1.packet.frames.size(), packet2.packet.frames.size());
EXPECT_TRUE(
@ -342,7 +354,8 @@ TEST_F(QuicPacketRebuilderTest, RebuildDataStreamAndEmptyCryptoStream) {
// Get a bunch frames
QuicServerConnectionState conn(
FizzServerQuicHandshakeContext::Builder().build());
conn.streamManager->setMaxLocalBidirectionalStreams(10);
ASSERT_FALSE(
conn.streamManager->setMaxLocalBidirectionalStreams(10).hasError());
auto stream = conn.streamManager->createNextBidirectionalStream().value();
StreamId streamId = stream->id;
auto buf =
@ -381,7 +394,9 @@ TEST_F(QuicPacketRebuilderTest, RebuildDataStreamAndEmptyCryptoStream) {
regularBuilder2.encodePacketHeader();
PacketRebuilder rebuilder(regularBuilder2, conn);
auto outstanding = makeDummyOutstandingPacket(packet1.packet, 1000);
EXPECT_TRUE(rebuilder.rebuildFromPacket(outstanding).has_value());
auto rebuildResult = rebuilder.rebuildFromPacket(outstanding);
ASSERT_FALSE(rebuildResult.hasError());
ASSERT_TRUE(rebuildResult.value().hasValue());
auto packet2 = std::move(regularBuilder2).buildPacket();
// rebuilder writes frames to regularBuilder2
EXPECT_EQ(packet1.packet.frames.size(), packet2.packet.frames.size() + 1);
@ -426,7 +441,9 @@ TEST_F(QuicPacketRebuilderTest, CannotRebuildEmptyCryptoStream) {
regularBuilder2.encodePacketHeader();
PacketRebuilder rebuilder(regularBuilder2, conn);
auto outstanding = makeDummyOutstandingPacket(packet1.packet, 1000);
EXPECT_FALSE(rebuilder.rebuildFromPacket(outstanding).has_value());
auto rebuildResult = rebuilder.rebuildFromPacket(outstanding);
ASSERT_FALSE(rebuildResult.hasError());
EXPECT_FALSE(rebuildResult.value().hasValue());
}
TEST_F(QuicPacketRebuilderTest, CannotRebuild) {
@ -451,7 +468,8 @@ TEST_F(QuicPacketRebuilderTest, CannotRebuild) {
.ackDelayExponent = static_cast<uint8_t>(kDefaultAckDelayExponent)};
QuicServerConnectionState conn(
FizzServerQuicHandshakeContext::Builder().build());
conn.streamManager->setMaxLocalBidirectionalStreams(10);
ASSERT_FALSE(
conn.streamManager->setMaxLocalBidirectionalStreams(10).hasError());
auto stream = conn.streamManager->createNextBidirectionalStream().value();
auto streamId = stream->id;
auto buf =
@ -492,7 +510,9 @@ TEST_F(QuicPacketRebuilderTest, CannotRebuild) {
regularBuilder2.encodePacketHeader();
PacketRebuilder rebuilder(regularBuilder2, conn);
auto outstanding = makeDummyOutstandingPacket(packet1.packet, 1000);
EXPECT_FALSE(rebuilder.rebuildFromPacket(outstanding).has_value());
auto rebuildResult = rebuilder.rebuildFromPacket(outstanding);
ASSERT_FALSE(rebuildResult.hasError());
EXPECT_FALSE(rebuildResult.value().hasValue());
}
TEST_F(QuicPacketRebuilderTest, CloneCounter) {
@ -513,7 +533,7 @@ TEST_F(QuicPacketRebuilderTest, CloneCounter) {
kDefaultUDPSendPacketLen, std::move(shortHeader2), 0 /* largestAcked */);
regularBuilder2.encodePacketHeader();
PacketRebuilder rebuilder(regularBuilder2, conn);
rebuilder.rebuildFromPacket(outstandingPacket);
ASSERT_FALSE(rebuilder.rebuildFromPacket(outstandingPacket).hasError());
EXPECT_TRUE(outstandingPacket.maybeClonedPacketIdentifier.has_value());
EXPECT_EQ(1, conn.outstandings.numClonedPackets());
}
@ -537,7 +557,9 @@ TEST_F(QuicPacketRebuilderTest, PurePingWillRebuild) {
kDefaultUDPSendPacketLen, std::move(shortHeader2), 0);
regularBuilder2.encodePacketHeader();
PacketRebuilder rebuilder(regularBuilder2, conn);
EXPECT_TRUE(rebuilder.rebuildFromPacket(outstandingPacket).has_value());
auto rebuildResult = rebuilder.rebuildFromPacket(outstandingPacket);
ASSERT_FALSE(rebuildResult.hasError());
ASSERT_TRUE(rebuildResult.value().hasValue());
EXPECT_TRUE(outstandingPacket.maybeClonedPacketIdentifier.has_value());
EXPECT_EQ(1, conn.outstandings.numClonedPackets());
}
@ -545,7 +567,8 @@ TEST_F(QuicPacketRebuilderTest, PurePingWillRebuild) {
TEST_F(QuicPacketRebuilderTest, LastStreamFrameSkipLen) {
QuicServerConnectionState conn(
FizzServerQuicHandshakeContext::Builder().build());
conn.streamManager->setMaxLocalBidirectionalStreams(100);
ASSERT_FALSE(
conn.streamManager->setMaxLocalBidirectionalStreams(100).hasError());
auto stream = conn.streamManager->createNextBidirectionalStream().value();
auto streamId = stream->id;
auto buf1 =
@ -589,7 +612,6 @@ TEST_F(QuicPacketRebuilderTest, LastStreamFrameSkipLen) {
std::forward_as_tuple(buf1->computeChainDataLength()),
std::forward_as_tuple(std::make_unique<WriteStreamBuffer>(
ChainedByteRangeHead(buf2), buf1->computeChainDataLength(), true)));
MockQuicPacketBuilder mockBuilder;
size_t packetLimit = 1200;
EXPECT_CALL(mockBuilder, remainingSpaceInPkt()).WillRepeatedly(Invoke([&]() {
@ -615,13 +637,16 @@ TEST_F(QuicPacketRebuilderTest, LastStreamFrameSkipLen) {
}));
PacketRebuilder rebuilder(mockBuilder, conn);
EXPECT_TRUE(rebuilder.rebuildFromPacket(outstandingPacket).has_value());
auto rebuildResult = rebuilder.rebuildFromPacket(outstandingPacket);
ASSERT_FALSE(rebuildResult.hasError());
ASSERT_TRUE(rebuildResult.value().hasValue());
}
TEST_F(QuicPacketRebuilderTest, LastStreamFrameFinOnlySkipLen) {
QuicServerConnectionState conn(
FizzServerQuicHandshakeContext::Builder().build());
conn.streamManager->setMaxLocalBidirectionalStreams(100);
ASSERT_FALSE(
conn.streamManager->setMaxLocalBidirectionalStreams(100).hasError());
auto stream = conn.streamManager->createNextBidirectionalStream().value();
auto streamId = stream->id;
auto buf1 =
@ -689,6 +714,8 @@ TEST_F(QuicPacketRebuilderTest, LastStreamFrameFinOnlySkipLen) {
}));
PacketRebuilder rebuilder(mockBuilder, conn);
EXPECT_TRUE(rebuilder.rebuildFromPacket(outstandingPacket).has_value());
auto rebuildResult = rebuilder.rebuildFromPacket(outstandingPacket);
ASSERT_FALSE(rebuildResult.hasError());
ASSERT_TRUE(rebuildResult.value().hasValue());
}
} // namespace quic::test

View File

@ -40,16 +40,17 @@ const RegularQuicWritePacket& writeQuicPacket(
auto version = conn.version.value_or(*conn.originalVersion);
auto aead = createNoOpAead();
auto headerCipher = createNoOpHeaderCipher();
writeDataToQuicStream(stream, data.clone(), eof);
writeQuicDataToSocket(
sock,
conn,
srcConnId,
dstConnId,
*aead,
*headerCipher,
version,
conn.transportSettings.writeConnectionDataPacketsLimit);
CHECK(!writeDataToQuicStream(stream, data.clone(), eof).hasError());
CHECK(!writeQuicDataToSocket(
sock,
conn,
srcConnId,
dstConnId,
*aead,
*headerCipher,
version,
conn.transportSettings.writeConnectionDataPacketsLimit)
.hasError());
CHECK(
conn.outstandings.packets.rend() !=
getLastOutstandingPacket(conn, PacketNumberSpace::AppData));
@ -64,16 +65,17 @@ PacketNum rstStreamAndSendPacket(
auto aead = createNoOpAead();
auto headerCipher = createNoOpHeaderCipher();
auto version = conn.version.value_or(*conn.originalVersion);
sendRstSMHandler(stream, errorCode);
writeQuicDataToSocket(
sock,
conn,
*conn.clientConnectionId,
*conn.serverConnectionId,
*aead,
*headerCipher,
version,
conn.transportSettings.writeConnectionDataPacketsLimit);
CHECK(!sendRstSMHandler(stream, errorCode).hasError());
CHECK(!writeQuicDataToSocket(
sock,
conn,
*conn.clientConnectionId,
*conn.serverConnectionId,
*aead,
*headerCipher,
version,
conn.transportSettings.writeConnectionDataPacketsLimit)
.hasError());
for (const auto& packet : conn.outstandings.packets) {
for (const auto& frame : packet.packet.frames) {

View File

@ -37,7 +37,6 @@ mvfst_cpp_library(
"CongestionControlFunctions.h",
],
deps = [
"//quic:constants",
"//quic/common:time_util",
],
exported_deps = [
@ -62,7 +61,6 @@ mvfst_cpp_library(
":copa2",
":cubic",
":newreno",
":static_cwnd_congestion_controller",
],
exported_deps = [
"//quic:constants",
@ -106,7 +104,6 @@ mvfst_cpp_library(
":copa2",
":cubic",
":newreno",
":static_cwnd_congestion_controller",
],
exported_deps = [
":congestion_controller_factory",
@ -266,9 +263,6 @@ mvfst_cpp_library(
headers = [
"BbrBandwidthSampler.h",
],
deps = [
"//quic/logging:qlogger_constants",
],
exported_deps = [
":bbr",
"//quic/congestion_control/third_party:chromium_windowed_filter",

View File

@ -709,16 +709,17 @@ TEST_F(BbrTest, BytesCounting) {
ackFrame.largestAcked = packetNum;
ackFrame.ackBlocks.emplace_back(packetNum, packetNum);
auto ackPacketVisitor = [](auto&) {};
auto ackFrameVisitor = [](auto&, auto&) {};
auto lossVisitor = [](auto&, auto&, bool) {};
processAckFrame(
conn,
PacketNumberSpace::AppData,
ackFrame,
ackPacketVisitor,
ackFrameVisitor,
lossVisitor,
Clock::now());
auto ackFrameVisitor = [](auto&, auto&) { return folly::unit; };
auto lossVisitor = [](auto&, auto&, bool) { return folly::unit; };
ASSERT_FALSE(processAckFrame(
conn,
PacketNumberSpace::AppData,
ackFrame,
ackPacketVisitor,
ackFrameVisitor,
lossVisitor,
Clock::now())
.hasError());
EXPECT_EQ(1200, conn.lossState.totalBytesAcked);
}

View File

@ -82,7 +82,7 @@ uint64_t writePacketizationRequest(
// Similar to the regular write case, if we build, we update connection
// states. The connection states are changed already no matter the result
// of addSendInstruction() call.
updateConnection(
auto updateResult = updateConnection(
connection,
none /* Packet Event */,
packet.packet,
@ -93,6 +93,12 @@ uint64_t writePacketizationRequest(
// used, so setting it to 0
0,
true /* isDSRPacket */);
if (updateResult.hasError()) {
throw QuicTransportException(
updateResult.error().message,
*updateResult.error().code.asTransportErrorCode());
}
connection.dsrPacketCount++;
if (instructionAddError) {

View File

@ -32,10 +32,11 @@ TEST_F(SchedulerTest, ScheduleStream) {
auto stream = *conn_.streamManager->createNextBidirectionalStream();
stream->flowControlState.peerAdvertisedMaxOffset = 200;
stream->dsrSender = std::make_unique<MockDSRPacketizationRequestSender>();
writeDataToQuicStream(
*stream, folly::IOBuf::copyBuffer("New York Bagles"), false);
ASSERT_FALSE(writeDataToQuicStream(
*stream, folly::IOBuf::copyBuffer("New York Bagles"), false)
.hasError());
BufferMeta bufMeta(200);
writeBufMetaToQuicStream(*stream, bufMeta, true);
ASSERT_FALSE(writeBufMetaToQuicStream(*stream, bufMeta, true).hasError());
auto expectedBufMetaOffset = stream->writeBufMeta.offset;
ASSERT_TRUE(
conn_.streamManager->hasWritable() &&

View File

@ -54,19 +54,27 @@ class DSRCommonTestFixture : public testing::Test {
kDefaultStreamFlowControlWindow;
conn_.flowControlState.peerAdvertisedMaxOffset =
kDefaultConnectionFlowControlWindow;
conn_.streamManager->setMaxLocalBidirectionalStreams(
kDefaultMaxStreamsBidirectional);
conn_.streamManager->setMaxLocalUnidirectionalStreams(
kDefaultMaxStreamsUnidirectional);
CHECK(
!conn_.streamManager
->setMaxLocalBidirectionalStreams(kDefaultMaxStreamsBidirectional)
.hasError());
CHECK(!conn_.streamManager
->setMaxLocalUnidirectionalStreams(
kDefaultMaxStreamsUnidirectional)
.hasError());
}
StreamId prepareOneStream(
size_t bufMetaLength = 1000,
uint64_t peeMaxOffsetSimulated = std::numeric_limits<uint64_t>::max()) {
conn_.streamManager->setMaxLocalBidirectionalStreams(
kDefaultMaxStreamsBidirectional);
conn_.streamManager->setMaxLocalUnidirectionalStreams(
kDefaultMaxStreamsUnidirectional);
CHECK(
!conn_.streamManager
->setMaxLocalBidirectionalStreams(kDefaultMaxStreamsBidirectional)
.hasError());
CHECK(!conn_.streamManager
->setMaxLocalUnidirectionalStreams(
kDefaultMaxStreamsUnidirectional)
.hasError());
auto id = conn_.streamManager->createNextBidirectionalStream().value()->id;
auto stream = conn_.streamManager->findStream(id);
stream->flowControlState.peerAdvertisedMaxOffset = peeMaxOffsetSimulated;
@ -85,12 +93,14 @@ class DSRCommonTestFixture : public testing::Test {
}));
ON_CALL(*sender, flush()).WillByDefault(testing::Return(true));
stream->dsrSender = std::move(sender);
writeDataToQuicStream(
*stream,
folly::IOBuf::copyBuffer("MetroCard Customer Claims"),
false /* eof */);
CHECK(!writeDataToQuicStream(
*stream,
folly::IOBuf::copyBuffer("MetroCard Customer Claims"),
false /* eof */)
.hasError());
BufferMeta bufMeta(bufMetaLength);
writeBufMetaToQuicStream(*stream, bufMeta, true /* eof */);
CHECK(
!writeBufMetaToQuicStream(*stream, bufMeta, true /* eof */).hasError());
return id;
}

View File

@ -3775,8 +3775,10 @@ TEST_F(
PacketNumberSpace::AppData));
deliverData(packet2->coalesce());
ASSERT_EQ(
client->getNonConstConn().streamManager->getStream(streamId), nullptr);
auto streamResult =
client->getNonConstConn().streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
ASSERT_EQ(streamResult.value(), nullptr);
client->close(none);
}
@ -3812,8 +3814,10 @@ TEST_F(QuicClientTransportAfterStartTest, StreamClosedIfReadCallbackNull) {
0 /* largestAcked */));
deliverData(packet->coalesce());
ASSERT_EQ(
client->getNonConstConn().streamManager->getStream(streamId), nullptr);
auto streamResult =
client->getNonConstConn().streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
ASSERT_EQ(streamResult.value(), nullptr);
client->close(none);
}
@ -4314,7 +4318,9 @@ TEST_F(QuicClientTransportAfterStartTest, ResetClearsPendingLoss) {
RegularQuicWritePacket* forceLossPacket =
CHECK_NOTNULL(findPacketWithStream(client->getNonConstConn(), streamId));
markPacketLoss(client->getNonConstConn(), *forceLossPacket, false);
auto result =
markPacketLoss(client->getNonConstConn(), *forceLossPacket, false);
ASSERT_FALSE(result.hasError());
ASSERT_TRUE(client->getConn().streamManager->hasLoss());
client->resetStream(streamId, GenericApplicationErrorCode::UNKNOWN);
@ -4335,9 +4341,13 @@ TEST_F(QuicClientTransportAfterStartTest, LossAfterResetStream) {
RegularQuicWritePacket* forceLossPacket =
CHECK_NOTNULL(findPacketWithStream(client->getNonConstConn(), streamId));
markPacketLoss(client->getNonConstConn(), *forceLossPacket, false);
auto stream = CHECK_NOTNULL(
client->getNonConstConn().streamManager->getStream(streamId));
auto result =
markPacketLoss(client->getNonConstConn(), *forceLossPacket, false);
ASSERT_FALSE(result.hasError());
auto streamResult =
client->getNonConstConn().streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
ASSERT_TRUE(stream->lossBuffer.empty());
ASSERT_FALSE(client->getConn().streamManager->hasLoss());
}
@ -5251,7 +5261,7 @@ TEST_F(QuicZeroRttClientTest, TestEarlyRetransmit0Rtt) {
EXPECT_TRUE(zeroRttPacketsOutstanding());
// The PTO should trigger marking all the zero-rtt data as lost.
onPTOAlarm(client->getNonConstConn());
ASSERT_FALSE(onPTOAlarm(client->getNonConstConn()).hasError());
EXPECT_FALSE(zeroRttPacketsOutstanding());
// Transport parameters did not change since zero rtt was accepted.

View File

@ -515,10 +515,16 @@ class QuicClientTransportTestBase : public virtual testing::Test {
setupCryptoLayer();
start();
client->getNonConstConn().streamManager->setMaxLocalBidirectionalStreams(
std::numeric_limits<uint32_t>::max());
client->getNonConstConn().streamManager->setMaxLocalUnidirectionalStreams(
std::numeric_limits<uint32_t>::max());
CHECK(!client->getNonConstConn()
.streamManager
->setMaxLocalBidirectionalStreams(
std::numeric_limits<uint32_t>::max())
.hasError());
CHECK(!client->getNonConstConn()
.streamManager
->setMaxLocalUnidirectionalStreams(
std::numeric_limits<uint32_t>::max())
.hasError());
}
void destroyTransport() {

View File

@ -57,21 +57,27 @@ Optional<uint64_t> calculateNewWindowUpdate(
}
template <typename T>
inline void incrementWithOverFlowCheck(T& num, T diff) {
[[nodiscard]] inline folly::Expected<folly::Unit, QuicError>
incrementWithOverFlowCheck(T& num, T diff) {
if (num > std::numeric_limits<T>::max() - diff) {
throw QuicInternalException(
"flow control state overflow", LocalErrorCode::INTERNAL_ERROR);
return folly::makeUnexpected(QuicError(
QuicErrorCode(LocalErrorCode::INTERNAL_ERROR),
"flow control state overflow"));
}
num += diff;
return folly::unit;
}
template <typename T>
inline void decrementWithOverFlowCheck(T& num, T diff) {
[[nodiscard]] inline folly::Expected<folly::Unit, QuicError>
decrementWithOverFlowCheck(T& num, T diff) {
if (num < std::numeric_limits<T>::min() + diff) {
throw QuicInternalException(
"flow control state overflow", LocalErrorCode::INTERNAL_ERROR);
return folly::makeUnexpected(QuicError(
QuicErrorCode(LocalErrorCode::INTERNAL_ERROR),
"flow control state overflow"));
}
num -= diff;
return folly::unit;
}
inline uint64_t calculateMaximumData(const QuicStreamState& stream) {
@ -180,33 +186,36 @@ bool maybeSendStreamWindowUpdate(
return false;
}
void updateFlowControlOnStreamData(
folly::Expected<folly::Unit, QuicError> updateFlowControlOnStreamData(
QuicStreamState& stream,
uint64_t previousMaxOffsetObserved,
uint64_t bufferEndOffset) {
if (stream.flowControlState.advertisedMaxOffset < bufferEndOffset) {
throw QuicTransportException(
return folly::makeUnexpected(QuicError(
TransportErrorCode::FLOW_CONTROL_ERROR,
folly::to<std::string>(
"Stream flow control violation on stream ", stream.id),
TransportErrorCode::FLOW_CONTROL_ERROR);
"Stream flow control violation on stream ", stream.id)));
}
auto curMaxOffsetObserved =
std::max(previousMaxOffsetObserved, bufferEndOffset);
auto& connFlowControlState = stream.conn.flowControlState;
uint64_t connMaxObservedOffset = connFlowControlState.sumMaxObservedOffset;
incrementWithOverFlowCheck(
auto incrementResult = incrementWithOverFlowCheck(
connMaxObservedOffset, curMaxOffsetObserved - previousMaxOffsetObserved);
if (connMaxObservedOffset > connFlowControlState.advertisedMaxOffset) {
throw QuicTransportException(
"Connection flow control violation",
TransportErrorCode::FLOW_CONTROL_ERROR);
if (incrementResult.hasError()) {
return incrementResult;
}
incrementWithOverFlowCheck(
if (connMaxObservedOffset > connFlowControlState.advertisedMaxOffset) {
return folly::makeUnexpected(QuicError(
TransportErrorCode::FLOW_CONTROL_ERROR,
"Connection flow control violation"));
}
return incrementWithOverFlowCheck(
connFlowControlState.sumMaxObservedOffset,
curMaxOffsetObserved - previousMaxOffsetObserved);
}
void updateFlowControlOnRead(
folly::Expected<folly::Unit, QuicError> updateFlowControlOnRead(
QuicStreamState& stream,
uint64_t lastReadOffset,
TimePoint readTime) {
@ -223,8 +232,11 @@ void updateFlowControlOnRead(
} else {
diff = stream.currentReadOffset - lastReadOffset;
}
incrementWithOverFlowCheck(
auto incrementResult = incrementWithOverFlowCheck(
stream.conn.flowControlState.sumCurReadOffset, diff);
if (incrementResult.hasError()) {
return incrementResult;
}
if (maybeSendConnWindowUpdate(stream.conn, readTime)) {
VLOG(4) << "Read trigger conn window update "
<< " readOffset=" << stream.conn.flowControlState.sumCurReadOffset
@ -237,9 +249,10 @@ void updateFlowControlOnRead(
<< " maxOffset=" << stream.flowControlState.advertisedMaxOffset
<< " window=" << stream.flowControlState.windowSize;
}
return folly::unit;
}
void updateFlowControlOnReceiveReset(
folly::Expected<folly::Unit, QuicError> updateFlowControlOnReceiveReset(
QuicStreamState& stream,
TimePoint resetTime) {
CHECK(stream.reliableSizeFromPeer.hasValue())
@ -254,8 +267,11 @@ void updateFlowControlOnReceiveReset(
// earlier because we'll buffer additional data that arrives.
auto diff = *stream.finalReadOffset - stream.currentReadOffset;
stream.currentReadOffset = *stream.finalReadOffset;
incrementWithOverFlowCheck(
auto incrementResult = incrementWithOverFlowCheck(
stream.conn.flowControlState.sumCurReadOffset, diff);
if (incrementResult.hasError()) {
return incrementResult;
}
if (maybeSendConnWindowUpdate(stream.conn, resetTime)) {
VLOG(4) << "Reset trigger conn window update "
<< " readOffset=" << stream.conn.flowControlState.sumCurReadOffset
@ -264,13 +280,17 @@ void updateFlowControlOnReceiveReset(
<< " window=" << stream.conn.flowControlState.windowSize;
}
}
return folly::unit;
}
void updateFlowControlOnWriteToSocket(
folly::Expected<folly::Unit, QuicError> updateFlowControlOnWriteToSocket(
QuicStreamState& stream,
uint64_t length) {
incrementWithOverFlowCheck(
auto incrementResult = incrementWithOverFlowCheck(
stream.conn.flowControlState.sumCurWriteOffset, length);
if (incrementResult.hasError()) {
return incrementResult;
}
DCHECK_GE(stream.conn.flowControlState.sumCurStreamBufferLen, length);
stream.conn.flowControlState.sumCurStreamBufferLen -= length;
if (stream.conn.flowControlState.sumCurWriteOffset ==
@ -281,16 +301,17 @@ void updateFlowControlOnWriteToSocket(
}
QUIC_STATS(stream.conn.statsCallback, onConnFlowControlBlocked);
}
return folly::unit;
}
void updateFlowControlOnWriteToStream(
folly::Expected<folly::Unit, QuicError> updateFlowControlOnWriteToStream(
QuicStreamState& stream,
uint64_t length) {
incrementWithOverFlowCheck(
return incrementWithOverFlowCheck(
stream.conn.flowControlState.sumCurStreamBufferLen, length);
}
void updateFlowControlOnResetStream(
folly::Expected<folly::Unit, QuicError> updateFlowControlOnResetStream(
QuicStreamState& stream,
folly::Optional<uint64_t> reliableSize) {
uint64_t decrementAmount = 0;
@ -316,7 +337,7 @@ void updateFlowControlOnResetStream(
stream.pendingWrites.chainLength() + stream.writeBufMeta.length);
}
decrementWithOverFlowCheck(
return decrementWithOverFlowCheck(
stream.conn.flowControlState.sumCurStreamBufferLen, decrementAmount);
}

View File

@ -32,32 +32,34 @@ bool maybeSendConnWindowUpdate(
TimePoint updateTime);
bool maybeSendStreamWindowUpdate(QuicStreamState& stream, TimePoint updateTime);
/**
* Update the connection flow control state based on receiving data on the
* stream. previousMaxOffsetObserved is the maxOffsetObserved on the stream
* before receiving the data. bufferEndOffset is the end offset of the current
* buffer.
*/
void updateFlowControlOnStreamData(
[[nodiscard]] folly::Expected<folly::Unit, QuicError>
updateFlowControlOnStreamData(
QuicStreamState& stream,
uint64_t previousMaxOffsetObserved,
uint64_t bufferEndOffset);
void updateFlowControlOnRead(
[[nodiscard]] folly::Expected<folly::Unit, QuicError> updateFlowControlOnRead(
QuicStreamState& stream,
uint64_t lastReadOffset,
TimePoint readTime);
void updateFlowControlOnReceiveReset(
QuicStreamState& stream,
TimePoint resetTime);
[[nodiscard]] folly::Expected<folly::Unit, QuicError>
updateFlowControlOnReceiveReset(QuicStreamState& stream, TimePoint resetTime);
void updateFlowControlOnWriteToSocket(QuicStreamState& stream, uint64_t length);
[[nodiscard]] folly::Expected<folly::Unit, QuicError>
updateFlowControlOnWriteToSocket(QuicStreamState& stream, uint64_t length);
void updateFlowControlOnWriteToStream(QuicStreamState& stream, uint64_t length);
[[nodiscard]] folly::Expected<folly::Unit, QuicError>
updateFlowControlOnWriteToStream(QuicStreamState& stream, uint64_t length);
void updateFlowControlOnResetStream(
[[nodiscard]] folly::Expected<folly::Unit, QuicError>
updateFlowControlOnResetStream(
QuicStreamState& stream,
folly::Optional<uint64_t> reliableSize = folly::none);

View File

@ -656,8 +656,9 @@ TEST_F(QuicFlowControlTest, UpdateFlowControlOnStreamData) {
auto data1 = buildRandomInputData(10);
uint64_t buffer1EndOffset = 200 + data1->computeChainDataLength();
updateFlowControlOnStreamData(
auto result = updateFlowControlOnStreamData(
stream, stream.maxOffsetObserved, buffer1EndOffset);
ASSERT_FALSE(result.hasError());
EXPECT_EQ(conn_.flowControlState.sumMaxObservedOffset, 560);
}
@ -674,8 +675,9 @@ TEST_F(QuicFlowControlTest, UpdateFlowControlOnStreamDataUnchangedOffset) {
stream.flowControlState.advertisedMaxOffset = 250;
uint64_t buffer1EndOffset = 100;
updateFlowControlOnStreamData(
auto result = updateFlowControlOnStreamData(
stream, stream.maxOffsetObserved, buffer1EndOffset);
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.maxOffsetObserved, 200);
EXPECT_EQ(conn_.flowControlState.sumMaxObservedOffset, 550);
}
@ -695,22 +697,25 @@ TEST_F(QuicFlowControlTest, UpdateBadFlowControlOnStreamData) {
auto data1 = buildRandomInputData(100);
uint64_t buffer1EndOffset = 200 + data1->computeChainDataLength();
// Stream flow control violation
EXPECT_THROW(
updateFlowControlOnStreamData(
stream, stream.maxOffsetObserved, buffer1EndOffset),
QuicTransportException);
auto result1 = updateFlowControlOnStreamData(
stream, stream.maxOffsetObserved, buffer1EndOffset);
EXPECT_TRUE(result1.hasError());
EXPECT_NE(result1.error().code.asTransportErrorCode(), nullptr);
EXPECT_FALSE(result1.error().message.empty());
stream.currentReadOffset = 200;
// Connection flow control violation
EXPECT_THROW(
updateFlowControlOnStreamData(
stream, stream.maxOffsetObserved, buffer1EndOffset),
QuicTransportException);
auto result2 = updateFlowControlOnStreamData(
stream, stream.maxOffsetObserved, buffer1EndOffset);
EXPECT_TRUE(result2.hasError());
EXPECT_NE(result2.error().code.asTransportErrorCode(), nullptr);
EXPECT_FALSE(result2.error().message.empty());
auto data2 = buildRandomInputData(50);
uint64_t buffer2EndOffset = 200 + data2->computeChainDataLength();
EXPECT_NO_THROW(updateFlowControlOnStreamData(
stream, stream.maxOffsetObserved, buffer2EndOffset));
auto result3 = updateFlowControlOnStreamData(
stream, stream.maxOffsetObserved, buffer2EndOffset);
ASSERT_FALSE(result3.hasError());
EXPECT_EQ(conn_.flowControlState.sumMaxObservedOffset, 600);
}
@ -729,7 +734,8 @@ TEST_F(QuicFlowControlTest, UpdateFlowControlOnReadBasic) {
conn_.flowControlState.sumCurReadOffset = 100;
EXPECT_CALL(*quicStats_, onConnFlowControlUpdate());
EXPECT_CALL(*quicStats_, onStreamFlowControlUpdate());
updateFlowControlOnRead(stream, 100, Clock::now());
auto result = updateFlowControlOnRead(stream, 100, Clock::now());
ASSERT_FALSE(result.hasError());
EXPECT_EQ(conn_.flowControlState.sumCurReadOffset, 200);
EXPECT_TRUE(conn_.streamManager->pendingWindowUpdate(stream.id));
@ -760,7 +766,8 @@ TEST_F(QuicFlowControlTest, UpdateFlowControlOnReadReliableReset1) {
// Simulate the reading of 10 bytes
stream.currentReadOffset = 20;
updateFlowControlOnRead(stream, 10, Clock::now());
auto result = updateFlowControlOnRead(stream, 10, Clock::now());
ASSERT_FALSE(result.hasError());
EXPECT_EQ(conn_.flowControlState.sumCurReadOffset, 50);
EXPECT_EQ(stream.currentReadOffset, 20);
}
@ -782,7 +789,8 @@ TEST_F(QuicFlowControlTest, UpdateFlowControlOnReadReliableReset2) {
// Simulate the reading of 90 bytes
stream.currentReadOffset = 100;
updateFlowControlOnRead(stream, 10, Clock::now());
auto result = updateFlowControlOnRead(stream, 10, Clock::now());
ASSERT_FALSE(result.hasError());
EXPECT_EQ(conn_.flowControlState.sumCurReadOffset, 180);
EXPECT_EQ(stream.currentReadOffset, 150);
@ -804,7 +812,8 @@ TEST_F(QuicFlowControlTest, UpdateFlowControlOnReceiveReset1) {
// Simulate the receiving of a reliable reset
stream.reliableSizeFromPeer = 100;
stream.finalReadOffset = 150;
updateFlowControlOnReceiveReset(stream, Clock::now());
auto result = updateFlowControlOnReceiveReset(stream, Clock::now());
ASSERT_FALSE(result.hasError());
EXPECT_EQ(conn_.flowControlState.sumCurReadOffset, 40);
EXPECT_EQ(stream.currentReadOffset, 10);
}
@ -825,7 +834,8 @@ TEST_F(QuicFlowControlTest, UpdateFlowControlOnReceiveReset2) {
// Simulate the receiving of a reliable reset
stream.reliableSizeFromPeer = 10;
stream.finalReadOffset = 150;
updateFlowControlOnReceiveReset(stream, Clock::now());
auto result = updateFlowControlOnReceiveReset(stream, Clock::now());
ASSERT_FALSE(result.hasError());
EXPECT_EQ(conn_.flowControlState.sumCurReadOffset, 180);
EXPECT_EQ(stream.currentReadOffset, 150);
}
@ -845,7 +855,8 @@ TEST_F(QuicFlowControlTest, UpdateFlowControlOnReceiveReset3) {
// Simulate the receiving of a non-reliable reset
stream.reliableSizeFromPeer = 0;
stream.finalReadOffset = 150;
updateFlowControlOnReceiveReset(stream, Clock::now());
auto result = updateFlowControlOnReceiveReset(stream, Clock::now());
ASSERT_FALSE(result.hasError());
EXPECT_EQ(conn_.flowControlState.sumCurReadOffset, 180);
EXPECT_EQ(stream.currentReadOffset, 150);
}
@ -858,10 +869,12 @@ TEST_F(QuicFlowControlTest, UpdateFlowControlOnWrite) {
conn_.flowControlState.sumCurWriteOffset = 200;
EXPECT_CALL(*quicStats_, onConnFlowControlBlocked()).Times(0);
updateFlowControlOnWriteToStream(stream, 100);
auto result1 = updateFlowControlOnWriteToStream(stream, 100);
ASSERT_FALSE(result1.hasError());
EXPECT_EQ(conn_.flowControlState.sumCurStreamBufferLen, 100);
EXPECT_CALL(*quicStats_, onConnFlowControlBlocked()).Times(0);
updateFlowControlOnWriteToSocket(stream, 100);
auto result2 = updateFlowControlOnWriteToSocket(stream, 100);
ASSERT_FALSE(result2.hasError());
EXPECT_EQ(conn_.flowControlState.sumCurWriteOffset, 300);
EXPECT_EQ(conn_.flowControlState.sumCurStreamBufferLen, 0);
@ -869,11 +882,13 @@ TEST_F(QuicFlowControlTest, UpdateFlowControlOnWrite) {
stream.currentWriteOffset = 300;
EXPECT_CALL(*quicStats_, onConnFlowControlBlocked()).Times(0);
updateFlowControlOnWriteToStream(stream, 100);
auto result3 = updateFlowControlOnWriteToStream(stream, 100);
ASSERT_FALSE(result3.hasError());
EXPECT_EQ(conn_.flowControlState.sumCurStreamBufferLen, 100);
EXPECT_CALL(*quicStats_, onConnFlowControlBlocked()).Times(0);
updateFlowControlOnWriteToSocket(stream, 100);
auto result4 = updateFlowControlOnWriteToSocket(stream, 100);
ASSERT_FALSE(result4.hasError());
EXPECT_EQ(conn_.flowControlState.sumCurStreamBufferLen, 0);
EXPECT_EQ(conn_.flowControlState.sumCurWriteOffset, 400);
EXPECT_FALSE(conn_.streamManager->flowControlUpdatedContains(id));
@ -882,7 +897,8 @@ TEST_F(QuicFlowControlTest, UpdateFlowControlOnWrite) {
conn_.flowControlState.sumCurStreamBufferLen = 100;
stream.flowControlState.peerAdvertisedMaxOffset = 600;
EXPECT_CALL(*quicStats_, onConnFlowControlBlocked()).Times(1);
updateFlowControlOnWriteToSocket(stream, 100);
auto result5 = updateFlowControlOnWriteToSocket(stream, 100);
ASSERT_FALSE(result5.hasError());
}
TEST_F(QuicFlowControlTest, UpdateFlowControlOnWriteToStream) {
@ -893,13 +909,16 @@ TEST_F(QuicFlowControlTest, UpdateFlowControlOnWriteToStream) {
conn_.flowControlState.sumCurStreamBufferLen = 100;
stream.flowControlState.peerAdvertisedMaxOffset = 300;
updateFlowControlOnWriteToStream(stream, 100);
auto result1 = updateFlowControlOnWriteToStream(stream, 100);
ASSERT_FALSE(result1.hasError());
EXPECT_EQ(conn_.flowControlState.sumCurStreamBufferLen, 200);
updateFlowControlOnWriteToSocket(stream, 150);
auto result2 = updateFlowControlOnWriteToSocket(stream, 150);
ASSERT_FALSE(result2.hasError());
EXPECT_EQ(conn_.flowControlState.sumCurStreamBufferLen, 50);
updateFlowControlOnWriteToStream(stream, 100);
auto result3 = updateFlowControlOnWriteToStream(stream, 100);
ASSERT_FALSE(result3.hasError());
EXPECT_EQ(conn_.flowControlState.sumCurStreamBufferLen, 150);
}
@ -957,7 +976,7 @@ TEST_F(QuicFlowControlTest, WritableList) {
auto buf = IOBuf::create(100);
buf->append(100);
writeDataToQuicStream(stream, std::move(buf), false);
ASSERT_FALSE(writeDataToQuicStream(stream, std::move(buf), false).hasError());
conn_.streamManager->updateWritableStreams(stream);
EXPECT_TRUE(writableContains(*conn_.streamManager, id));
@ -967,7 +986,7 @@ TEST_F(QuicFlowControlTest, WritableList) {
EXPECT_FALSE(writableContains(*conn_.streamManager, id));
// Fin
writeDataToQuicStream(stream, nullptr, true);
ASSERT_FALSE(writeDataToQuicStream(stream, nullptr, true).hasError());
stream.writeBuffer.move();
ChainedByteRangeHead(std::move(stream.pendingWrites));
stream.currentWriteOffset += 100;
@ -1129,7 +1148,8 @@ TEST_F(QuicFlowControlTest, ReliableSizeNonDsrReset1) {
stream.writeBuffer.append(std::move(inputData));
stream.conn.flowControlState.sumCurStreamBufferLen = 5;
updateFlowControlOnResetStream(stream, 22);
auto result = updateFlowControlOnResetStream(stream, 22);
ASSERT_FALSE(result.hasError());
// We threw away 3 bytes due to the reliable reset
EXPECT_EQ(stream.conn.flowControlState.sumCurStreamBufferLen, 2);
@ -1147,7 +1167,8 @@ TEST_F(QuicFlowControlTest, ReliableSizeNonDsrReset2) {
stream.writeBuffer.append(std::move(inputData));
stream.conn.flowControlState.sumCurStreamBufferLen = 5;
updateFlowControlOnResetStream(stream, 10);
auto result = updateFlowControlOnResetStream(stream, 10);
ASSERT_FALSE(result.hasError());
// We threw away all 5 bytes due to the reliable reset
EXPECT_EQ(stream.conn.flowControlState.sumCurStreamBufferLen, 0);
@ -1165,7 +1186,8 @@ TEST_F(QuicFlowControlTest, ReliableSizeNonDsrReset3) {
stream.writeBuffer.append(std::move(inputData));
stream.conn.flowControlState.sumCurStreamBufferLen = 5;
updateFlowControlOnResetStream(stream, 30);
auto result = updateFlowControlOnResetStream(stream, 30);
ASSERT_FALSE(result.hasError());
// We didn't throw away any bytes after the reliable reset
EXPECT_EQ(stream.conn.flowControlState.sumCurStreamBufferLen, 5);
@ -1181,7 +1203,8 @@ TEST_F(QuicFlowControlTest, ReliableSizeDsrReset1) {
stream.writeBufMeta.length = 5;
stream.conn.flowControlState.sumCurStreamBufferLen = 5;
updateFlowControlOnResetStream(stream, 22);
auto result = updateFlowControlOnResetStream(stream, 22);
ASSERT_FALSE(result.hasError());
// We threw away 3 bytes due to the reliable reset
EXPECT_EQ(stream.conn.flowControlState.sumCurStreamBufferLen, 2);
@ -1198,7 +1221,8 @@ TEST_F(QuicFlowControlTest, ReliableSizeDsrReset2) {
stream.conn.flowControlState.sumCurStreamBufferLen = 5;
updateFlowControlOnResetStream(stream, 10);
auto result = updateFlowControlOnResetStream(stream, 10);
ASSERT_FALSE(result.hasError());
// We threw away all 5 bytes due to the reliable reset
EXPECT_EQ(stream.conn.flowControlState.sumCurStreamBufferLen, 0);
@ -1215,7 +1239,8 @@ TEST_F(QuicFlowControlTest, ReliableSizeDsrReset3) {
stream.conn.flowControlState.sumCurStreamBufferLen = 5;
updateFlowControlOnResetStream(stream, 30);
auto result = updateFlowControlOnResetStream(stream, 30);
ASSERT_FALSE(result.hasError());
// We didn't throw away any bytes after the reliable reset
EXPECT_EQ(stream.conn.flowControlState.sumCurStreamBufferLen, 5);

View File

@ -16,6 +16,7 @@ mvfst_cpp_library(
],
exported_deps = [
"//folly:chrono",
"//folly:expected",
"//quic:constants",
"//quic/codec:types",
"//quic/common:optional",

View File

@ -6,6 +6,7 @@
*/
#include <folly/small_vector.h>
#include <quic/loss/QuicLossFunctions.h>
#include <quic/state/QuicStreamFunctions.h>
@ -44,7 +45,8 @@ bool isPersistentCongestion(
return it == ack.ackedPackets.cend();
}
void onPTOAlarm(QuicConnectionStateBase& conn) {
folly::Expected<folly::Unit, QuicError> onPTOAlarm(
QuicConnectionStateBase& conn) {
VLOG(10) << __func__ << " " << conn;
QUIC_STATS(conn.statsCallback, onPTO);
conn.lossState.ptoCount++;
@ -56,9 +58,10 @@ void onPTOAlarm(QuicConnectionStateBase& conn) {
conn.outstandings.numOutstanding(),
kPtoAlarm);
}
if (conn.lossState.ptoCount == conn.transportSettings.maxNumPTOs) {
throw QuicInternalException(
"Exceeded max PTO", LocalErrorCode::CONNECTION_ABANDONED);
if (conn.lossState.ptoCount >= conn.transportSettings.maxNumPTOs) {
return folly::makeUnexpected(QuicError(
QuicErrorCode(LocalErrorCode::CONNECTION_ABANDONED),
"Exceeded max PTO"));
}
// The first PTO after the oneRttWriteCipher is available is an opportunity to
@ -66,7 +69,12 @@ void onPTOAlarm(QuicConnectionStateBase& conn) {
if (conn.transportSettings.earlyRetransmit0Rtt &&
!conn.lossState.attemptedEarlyRetransmit0Rtt && conn.oneRttWriteCipher) {
conn.lossState.attemptedEarlyRetransmit0Rtt = true;
markZeroRttPacketsLost(conn, markPacketLoss);
auto markResult = markZeroRttPacketsLost(conn, markPacketLoss);
if (markResult.hasError()) {
VLOG(3) << "Closing connection due to error marking 0-RTT packets lost: "
<< markResult.error().message;
return markResult;
}
}
// We should avoid sending pointless PTOs if we don't have packets in the loss
@ -100,6 +108,7 @@ void onPTOAlarm(QuicConnectionStateBase& conn) {
packetCount[PacketNumberSpace::AppData];
}
}
return folly::unit;
}
template <class T, size_t N>
@ -117,13 +126,15 @@ using InlineSet = folly::heap_vector_set<
void,
Container>;
void markPacketLoss(
folly::Expected<folly::Unit, QuicError> markPacketLoss(
QuicConnectionStateBase& conn,
RegularQuicWritePacket& packet,
bool processed) {
QUIC_STATS(conn.statsCallback, onPacketLoss);
InlineSet<uint64_t, 10> streamsWithAddedStreamLossForPacket;
for (auto& packetFrame : packet.frames) {
folly::Expected<QuicStreamState*, QuicError> streamResult = nullptr;
switch (packetFrame.type()) {
case QuicWriteFrame::Type::MaxStreamDataFrame: {
MaxStreamDataFrame& frame = *packetFrame.asMaxStreamDataFrame();
@ -131,7 +142,14 @@ void markPacketLoss(
// packet, or if the clone and its siblings have never been processed.
// But for both MaxData and MaxStreamData, we opportunistically send
// an update to avoid stalling the peer.
auto stream = conn.streamManager->getStream(frame.streamId);
streamResult = conn.streamManager->getStream(frame.streamId);
if (streamResult.hasError()) {
VLOG(4) << "Failed to get stream " << frame.streamId
<< " in markPacketLoss (MaxStreamDataFrame): "
<< streamResult.error().message;
return folly::makeUnexpected(streamResult.error());
}
auto* stream = streamResult.value();
if (!stream) {
break;
}
@ -144,7 +162,7 @@ void markPacketLoss(
onConnWindowUpdateLost(conn);
break;
}
// For other frame types, we only process them if the packet is not a
// For other frame types, we only process them if the packet is not a
// processed clone.
case QuicWriteFrame::Type::DataBlockedFrame: {
if (processed) {
@ -158,16 +176,21 @@ void markPacketLoss(
if (processed) {
break;
}
auto stream = conn.streamManager->getStream(frame.streamId);
streamResult = conn.streamManager->getStream(frame.streamId);
if (streamResult.hasError()) {
VLOG(4) << "Failed to get stream " << frame.streamId
<< " in markPacketLoss (WriteStreamFrame): "
<< streamResult.error().message;
return folly::makeUnexpected(streamResult.error());
}
auto* stream = streamResult.value();
if (!stream) {
break;
}
if (!frame.fromBufMeta) {
auto bufferItr = stream->retransmissionBuffer.find(frame.offset);
if (bufferItr == stream->retransmissionBuffer.end()) {
// It's possible that the stream was reset or data on the stream was
// skipped while we discovered that its packet was lost so we might
// not have the offset.
break;
}
if (!streamRetransmissionDisabled(conn, *stream)) {
@ -185,10 +208,6 @@ void markPacketLoss(
if (retxBufMetaItr == stream->retransmissionBufMetas.end()) {
break;
}
auto& bufMeta = retxBufMetaItr->second;
CHECK_EQ(bufMeta.offset, frame.offset);
CHECK_EQ(bufMeta.length, frame.len);
CHECK_EQ(bufMeta.eof, frame.fin);
if (!streamRetransmissionDisabled(conn, *stream)) {
stream->insertIntoLossBufMeta(retxBufMetaItr->second);
}
@ -213,8 +232,6 @@ void markPacketLoss(
auto bufferItr = cryptoStream->retransmissionBuffer.find(frame.offset);
if (bufferItr == cryptoStream->retransmissionBuffer.end()) {
// It's possible that the stream was reset while we discovered that
// it's packet was lost so we might not have the offset.
break;
}
DCHECK_EQ(bufferItr->second->offset, frame.offset);
@ -227,14 +244,18 @@ void markPacketLoss(
if (processed) {
break;
}
auto stream = conn.streamManager->getStream(frame.streamId);
streamResult = conn.streamManager->getStream(frame.streamId);
if (streamResult.hasError()) {
VLOG(4) << "Failed to get stream " << frame.streamId
<< " in markPacketLoss (RstStreamFrame): "
<< streamResult.error().message;
return folly::makeUnexpected(streamResult.error());
}
auto* stream = streamResult.value();
if (!stream) {
// If the stream is dead, ignore the retransmissions of the rst
// stream.
break;
}
// Add the lost RstStreamFrame back to pendingEvents:
conn.pendingEvents.resets.insert({frame.streamId, frame});
conn.pendingEvents.resets.emplace(frame.streamId, frame);
break;
}
case QuicWriteFrame::Type::StreamDataBlockedFrame: {
@ -242,7 +263,14 @@ void markPacketLoss(
if (processed) {
break;
}
auto stream = conn.streamManager->getStream(frame.streamId);
streamResult = conn.streamManager->getStream(frame.streamId);
if (streamResult.hasError()) {
VLOG(4) << "Failed to get stream " << frame.streamId
<< " in markPacketLoss (StreamDataBlockedFrame): "
<< streamResult.error().message;
return folly::makeUnexpected(streamResult.error());
}
auto* stream = streamResult.value();
if (!stream) {
break;
}
@ -258,17 +286,17 @@ void markPacketLoss(
break;
}
default:
// ignore the rest of the frames.
break;
}
}
return folly::unit;
}
/**
* Processes outstandings for loss and returns true if the loss timer should be
* set. False otherwise.
*/
bool processOutstandingsForLoss(
folly::Expected<bool, QuicError> processOutstandingsForLoss(
QuicConnectionStateBase& conn,
PacketNum largestAcked,
const PacketNumberSpace& pnSpace,
@ -299,7 +327,6 @@ bool processOutstandingsForLoss(
iter++;
continue;
}
// We now have to determine the largest ACKed packet number we should use
// for the reordering threshold loss determination.
auto maybeStreamFrame = pkt.packet.frames.empty()
@ -342,6 +369,7 @@ bool processOutstandingsForLoss(
largestAckedForComparison =
std::max(largestAckedForComparison, currentPacketNum);
// TODO, should we ignore this if srtt == 0?
bool lostByTimeout = (lossTime - pkt.metadata.time) > delayUntilLost;
const auto reorderDistance = largestAckedForComparison - currentPacketNum;
auto reorderingThreshold = conn.lossState.reorderingThreshold;
@ -368,15 +396,19 @@ bool processOutstandingsForLoss(
CHECK(conn.outstandings.clonedPacketCount[pnSpace]);
--conn.outstandings.clonedPacketCount[pnSpace];
}
// Invoke LossVisitor if the packet doesn't have a associated
// ClonedPacketIdentifier; or if the ClonedPacketIdentifier is present in
// conn.outstandings.clonedPacketIdentifiers.
bool processed = pkt.maybeClonedPacketIdentifier &&
!conn.outstandings.clonedPacketIdentifiers.count(
*pkt.maybeClonedPacketIdentifier);
lossVisitor(conn, pkt.packet, processed);
// Remove the ClonedPacketIdentifier from the
// outstandings.clonedPacketIdentifiers set
auto visitorResult = lossVisitor(conn, pkt.packet, processed);
if (visitorResult.hasError()) {
return folly::makeUnexpected(visitorResult.error());
}
if (pkt.maybeClonedPacketIdentifier) {
conn.outstandings.clonedPacketIdentifiers.erase(
*pkt.maybeClonedPacketIdentifier);
@ -385,6 +417,7 @@ bool processOutstandingsForLoss(
CHECK(conn.outstandings.packetCount[currentPacketNumberSpace]);
--conn.outstandings.packetCount[currentPacketNumberSpace];
}
VLOG(10) << __func__ << " lost packetNum=" << currentPacketNum;
// Rather than erasing here, instead mark the packet as lost so we can
// determine if this was spurious later.
@ -416,7 +449,8 @@ bool processOutstandingsForLoss(
* This function should be invoked after some event that is possible to
* trigger loss detection, for example: packets are acked
*/
Optional<CongestionController::LossEvent> detectLossPackets(
folly::Expected<Optional<CongestionController::LossEvent>, QuicError>
detectLossPackets(
QuicConnectionStateBase& conn,
const AckState& ackState,
const LossVisitor& lossVisitor,
@ -443,6 +477,7 @@ Optional<CongestionController::LossEvent> detectLossPackets(
observerLossEvent.emplace(lossTime);
}
}
// Note that time based loss detection is also within the same PNSpace.
// Loop over all ACKed packets and collect the largest ACKed packet per DSR
@ -451,7 +486,7 @@ Optional<CongestionController::LossEvent> detectLossPackets(
// multiple DSR senders. Similarly track the largest non-DSR ACKed, for the
// reason but when DSR packets are reordered "before" non-DSR packets.
// These two variables hold DSR and non-DSR sequence numbers not actual packet
// numbers
// numbers InlineMap<StreamId, PacketNum, 20> largestDsrAckedSeqNo;
InlineMap<StreamId, PacketNum, 20> largestDsrAckedSeqNo;
Optional<PacketNum> largestNonDsrAckedSeqNo;
if (ackEvent) {
@ -485,7 +520,7 @@ Optional<CongestionController::LossEvent> detectLossPackets(
bool shouldSetTimer = false;
if (ackState.largestAckedByPeer.has_value()) {
shouldSetTimer = processOutstandingsForLoss(
auto processResult = processOutstandingsForLoss(
conn,
*ackState.largestAckedByPeer,
pnSpace,
@ -493,13 +528,17 @@ Optional<CongestionController::LossEvent> detectLossPackets(
largestNonDsrAckedSeqNo,
lossTime,
rttSample,
lossVisitor,
lossVisitor, // Pass the visitor (which returns Expected)
delayUntilLost,
lossEvent,
observerLossEvent);
if (processResult.hasError()) {
return folly::makeUnexpected(processResult.error());
}
shouldSetTimer = processResult.value();
}
// notify observers
{
const auto socketObserverContainer = conn.getSocketObserverContainer();
if (observerLossEvent && observerLossEvent->hasPackets() &&
@ -508,7 +547,7 @@ Optional<CongestionController::LossEvent> detectLossPackets(
SocketObserverInterface::Events::lossEvents>()) {
socketObserverContainer
->invokeInterfaceMethod<SocketObserverInterface::Events::lossEvents>(
[observerLossEvent](auto observer, auto observed) {
[&](auto observer, auto observed) {
observer->packetLossDetected(observed, *observerLossEvent);
});
}
@ -526,6 +565,7 @@ Optional<CongestionController::LossEvent> detectLossPackets(
break;
}
}
if (shouldSetTimer && earliest != conn.outstandings.packets.end()) {
// We are eligible to set a loss timer and there are a few packets which
// are unacked, so we can set the early retransmit timer for them.
@ -534,6 +574,7 @@ Optional<CongestionController::LossEvent> detectLossPackets(
<< delayUntilLost.count() << "us" << " " << conn;
getLossTime(conn, pnSpace) = delayUntilLost + earliest->metadata.time;
}
if (lossEvent.largestLostPacketNum.hasValue()) {
DCHECK(lossEvent.largestLostSentTime && lossEvent.smallestLostSentTime);
if (conn.qLogger) {
@ -551,7 +592,8 @@ Optional<CongestionController::LossEvent> detectLossPackets(
return none;
}
Optional<CongestionController::LossEvent> handleAckForLoss(
folly::Expected<Optional<CongestionController::LossEvent>, QuicError>
handleAckForLoss(
QuicConnectionStateBase& conn,
const LossVisitor& lossVisitor,
CongestionController::AckEvent& ack,
@ -574,8 +616,13 @@ Optional<CongestionController::LossEvent> handleAckForLoss(
largestNewlyAckedPacket->nonDsrPacketSequenceNumber);
}
}
auto lossEvent = detectLossPackets(
auto lossEventResult = detectLossPackets(
conn, ackState, lossVisitor, ack.ackTime, pnSpace, &ack);
if (lossEventResult.hasError()) {
return folly::makeUnexpected(lossEventResult.error());
}
conn.pendingEvents.setLossDetectionAlarm =
conn.outstandings.numOutstanding() > 0;
VLOG(10) << __func__ << " largestAckedInPacket="
@ -588,7 +635,7 @@ Optional<CongestionController::LossEvent> handleAckForLoss(
<< " handshakePackets="
<< conn.outstandings.packetCount[PacketNumberSpace::Handshake] << " "
<< conn;
return lossEvent;
}
return lossEventResult.value();
}
} // namespace quic

View File

@ -8,6 +8,7 @@
#pragma once
#include <folly/Chrono.h>
#include <folly/Expected.h>
#include <quic/QuicConstants.h>
#include <quic/codec/Types.h>
#include <quic/common/Optional.h>
@ -184,10 +185,11 @@ void setLossDetectionAlarm(QuicConnectionStateBase& conn, Timeout& timeout) {
}
/**
* Processes outstandings for loss and returns true if the loss timer should be
* set. False otherwise.
* Processes outstandings for loss.
* Returns true if the loss timer should be set, false otherwise.
* Returns QuicError if the LossVisitor fails.
*/
bool processOutstandingsForLoss(
[[nodiscard]] folly::Expected<bool, QuicError> processOutstandingsForLoss(
QuicConnectionStateBase& conn,
PacketNum largestAcked,
const PacketNumberSpace& pnSpace,
@ -195,54 +197,68 @@ bool processOutstandingsForLoss(
const Optional<PacketNum>& largestNonDsrAckedSequenceNumber,
const TimePoint& lossTime,
const std::chrono::microseconds& rttSample,
const LossVisitor& lossVisitor,
const LossVisitor& lossVisitor, // Visitor now returns Expected
std::chrono::microseconds& delayUntilLost,
CongestionController::LossEvent& lossEvent,
Optional<SocketObserverInterface::LossEvent>& observerLossEvent);
/*
* This function should be invoked after some event that is possible to
* trigger loss detection, for example: packets are acked
* Detects losses based on ACKs or timeout.
* Returns a LossEvent on success (possibly empty), or a QuicError if
* processing encountered an error (e.g., from the lossVisitor).
*/
Optional<CongestionController::LossEvent> detectLossPackets(
QuicConnectionStateBase& conn,
const AckState& ackState,
const LossVisitor& lossVisitor,
const TimePoint lossTime,
const PacketNumberSpace pnSpace,
const CongestionController::AckEvent* ackEvent = nullptr);
[[nodiscard]] folly::
Expected<Optional<CongestionController::LossEvent>, QuicError>
detectLossPackets(
QuicConnectionStateBase& conn,
const AckState& ackState,
const LossVisitor& lossVisitor,
const TimePoint lossTime,
const PacketNumberSpace pnSpace,
const CongestionController::AckEvent* ackEvent = nullptr);
void onPTOAlarm(QuicConnectionStateBase& conn);
/*
* Function invoked when PTO alarm fires. Handles errors internally.
*/
[[nodiscard]] folly::Expected<folly::Unit, QuicError> onPTOAlarm(
QuicConnectionStateBase& conn);
/*
* Function invoked when loss detection timer fires
*/
template <class ClockType = Clock>
void onLossDetectionAlarm(
[[nodiscard]] folly::Expected<folly::Unit, QuicError> onLossDetectionAlarm(
QuicConnectionStateBase& conn,
const LossVisitor& lossVisitor) {
auto now = ClockType::now();
if (conn.outstandings.packets.empty()) {
VLOG(10) << "Transmission alarm fired with no outstanding packets " << conn;
return;
return folly::unit;
}
if (conn.lossState.currentAlarmMethod ==
LossState::AlarmMethod::EarlyRetransmitOrReordering) {
auto lossTimeAndSpace = earliestLossTimer(conn);
CHECK(lossTimeAndSpace.first);
auto lossEvent = detectLossPackets(
auto lossEventResult = detectLossPackets(
conn,
getAckState(conn, lossTimeAndSpace.second),
lossVisitor,
now,
lossTimeAndSpace.second);
if (lossEventResult.hasError()) {
return folly::makeUnexpected(lossEventResult.error());
}
auto& lossEvent = lossEventResult.value();
if (conn.congestionController && lossEvent) {
DCHECK(lossEvent->largestLostSentTime && lossEvent->smallestLostSentTime);
conn.congestionController->onPacketAckOrLoss(
nullptr, lossEvent.get_pointer());
}
} else {
onPTOAlarm(conn);
auto result = onPTOAlarm(conn);
if (result.hasError()) {
return folly::makeUnexpected(result.error());
}
}
conn.pendingEvents.setLossDetectionAlarm =
conn.outstandings.numOutstanding() > 0;
@ -254,32 +270,42 @@ void onLossDetectionAlarm(
<< " handshakePackets="
<< conn.outstandings.packetCount[PacketNumberSpace::Handshake] << " "
<< conn;
return folly::unit;
}
/*
* Process streams in a RegularQuicWritePacket for loss
*
* processed: whether this packet is a already processed clone
* Process streams in a RegularQuicWritePacket for loss.
* This is the canonical implementation often used *by* the LossVisitor.
* Returns folly::unit on success, or QuicError if accessing stream state fails.
*/
void markPacketLoss(
[[nodiscard]] folly::Expected<folly::Unit, QuicError> markPacketLoss(
QuicConnectionStateBase& conn,
RegularQuicWritePacket& packet,
bool processed);
Optional<CongestionController::LossEvent> handleAckForLoss(
QuicConnectionStateBase& conn,
const LossVisitor& lossVisitor,
CongestionController::AckEvent& ack,
PacketNumberSpace pnSpace);
/*
* Handles ACK processing related to loss detection.
* Returns a LossEvent on success (possibly empty), or QuicError if processing
* failed.
*/
[[nodiscard]] folly::
Expected<Optional<CongestionController::LossEvent>, QuicError>
handleAckForLoss(
QuicConnectionStateBase& conn,
const LossVisitor& lossVisitor, // Visitor now returns Expected
CongestionController::AckEvent& ack,
PacketNumberSpace pnSpace);
/**
* We force mark zero rtt packets as lost during zero rtt rejection.
* Force marks zero rtt packets as lost during zero rtt rejection.
* Returns folly::unit on success, or QuicError if marking fails.
*/
template <class ClockType = Clock>
void markZeroRttPacketsLost(
[[nodiscard]] folly::Expected<folly::Unit, QuicError> markZeroRttPacketsLost(
QuicConnectionStateBase& conn,
const LossVisitor& lossVisitor) {
CongestionController::LossEvent lossEvent(ClockType::now());
auto iter = getFirstOutstandingPacket(conn, PacketNumberSpace::AppData);
while (iter != conn.outstandings.packets.end()) {
DCHECK_EQ(
@ -291,9 +317,12 @@ void markZeroRttPacketsLost(
bool processed = pkt.maybeClonedPacketIdentifier &&
!conn.outstandings.clonedPacketIdentifiers.count(
*pkt.maybeClonedPacketIdentifier);
lossVisitor(conn, pkt.packet, processed);
// Remove the ClonedPacketIdentifier from the
// outstandings.clonedPacketIdentifiers set
auto visitorResult = lossVisitor(conn, pkt.packet, processed);
if (visitorResult.hasError()) {
return folly::makeUnexpected(visitorResult.error());
}
if (pkt.maybeClonedPacketIdentifier) {
conn.outstandings.clonedPacketIdentifiers.erase(
*pkt.maybeClonedPacketIdentifier);
@ -312,10 +341,13 @@ void markZeroRttPacketsLost(
getNextOutstandingPacket(conn, PacketNumberSpace::AppData, iter + 1);
}
}
conn.lossState.rtxCount += lossEvent.lostPackets;
if (conn.congestionController && lossEvent.largestLostPacketNum.hasValue()) {
conn.congestionController->onRemoveBytesFromInflight(lossEvent.lostBytes);
}
VLOG(10) << __func__ << " marked=" << lossEvent.lostPackets;
return folly::unit;
}
} // namespace quic

File diff suppressed because it is too large Load Diff

View File

@ -17,7 +17,6 @@
#include <quic/common/Optional.h>
#include <quic/common/TransportKnobs.h>
#include <algorithm>
#include <chrono>
#include <memory>
#include <stdexcept>
@ -190,7 +189,10 @@ folly::Expected<folly::Unit, QuicError> QuicServerTransport::onReadData(
if (serverConn_->transportSettings.enableWritableBytesLimit &&
serverConn_->numProbesWritableBytesLimited &&
prevWritableBytes < curWritableBytes) {
onPTOAlarm(*serverConn_);
auto ptoAlarmResult = onPTOAlarm(*serverConn_);
if (ptoAlarmResult.hasError()) {
return ptoAlarmResult;
}
serverConn_->numProbesWritableBytesLimited = 0;
}
@ -283,8 +285,12 @@ void QuicServerTransport::writeData() {
};
if (conn_->initialWriteCipher) {
auto res = handleInitialWriteDataCommon(srcConnId, destConnId, packetLimit);
packetLimit -= res.packetsWritten;
serverConn_->numHandshakeBytesSent += res.bytesWritten;
if (res.hasError()) {
throw QuicTransportException(
res.error().message, *res.error().code.asTransportErrorCode());
}
packetLimit -= res->packetsWritten;
serverConn_->numHandshakeBytesSent += res->bytesWritten;
if (!packetLimit && !conn_->pendingEvents.anyProbePackets()) {
return;
}
@ -292,8 +298,12 @@ void QuicServerTransport::writeData() {
if (conn_->handshakeWriteCipher) {
auto res =
handleHandshakeWriteDataCommon(srcConnId, destConnId, packetLimit);
packetLimit -= res.packetsWritten;
serverConn_->numHandshakeBytesSent += res.bytesWritten;
if (res.hasError()) {
throw QuicTransportException(
res.error().message, *res.error().code.asTransportErrorCode());
}
packetLimit -= res->packetsWritten;
serverConn_->numHandshakeBytesSent += res->bytesWritten;
if (!packetLimit && !conn_->pendingEvents.anyProbePackets()) {
return;
}
@ -302,7 +312,7 @@ void QuicServerTransport::writeData() {
CHECK(conn_->oneRttWriteHeaderCipher);
auto writeLoopBeginTime = Clock::now();
auto nonDsrPath = [&](auto limit) {
return writeQuicDataToSocket(
auto result = writeQuicDataToSocket(
*socket_,
*conn_,
srcConnId /* src */,
@ -312,12 +322,18 @@ void QuicServerTransport::writeData() {
version,
limit,
writeLoopBeginTime);
if (result.hasError()) {
throw QuicTransportException(
result.error().message,
*result.error().code.asTransportErrorCode());
}
return *result;
};
auto dsrPath = [&](auto limit) {
auto bytesBefore = conn_->lossState.totalBytesSent;
// The DSR path can't write probes.
// This is packetsWritte, probesWritten, bytesWritten.
return WriteQuicDataResult{
auto result = WriteQuicDataResult{
writePacketizationRequest(
*serverConn_,
destConnId,
@ -326,6 +342,7 @@ void QuicServerTransport::writeData() {
writeLoopBeginTime),
0,
conn_->lossState.totalBytesSent - bytesBefore};
return result;
};
// We need a while loop because both paths write streams from the same
// queue, which can result in empty writes.
@ -1195,7 +1212,8 @@ QuicSocket::WriteResult QuicServerTransport::writeBufMeta(
if (!conn_->streamManager->streamExists(id)) {
return folly::makeUnexpected(LocalErrorCode::STREAM_NOT_EXISTS);
}
auto stream = CHECK_NOTNULL(conn_->streamManager->getStream(id));
auto stream =
CHECK_NOTNULL(conn_->streamManager->getStream(id).value_or(nullptr));
if (!stream->writable()) {
return folly::makeUnexpected(LocalErrorCode::STREAM_CLOSED);
}
@ -1220,7 +1238,12 @@ QuicSocket::WriteResult QuicServerTransport::writeBufMeta(
wasAppLimitedOrIdle = conn_->congestionController->isAppLimited();
wasAppLimitedOrIdle |= conn_->streamManager->isAppIdle();
}
writeBufMetaToQuicStream(*stream, data, eof);
auto writeResult = writeBufMetaToQuicStream(*stream, data, eof);
if (writeResult.hasError()) {
throw QuicTransportException(
writeResult.error().message,
*writeResult.error().code.asTransportErrorCode());
}
// If we were previously app limited restart pacing with the current rate.
if (wasAppLimitedOrIdle && conn_->pacer) {
conn_->pacer->reset();
@ -1268,7 +1291,8 @@ QuicSocket::WriteResult QuicServerTransport::setDSRPacketizationRequestSender(
if (!conn_->streamManager->streamExists(id)) {
return folly::makeUnexpected(LocalErrorCode::STREAM_NOT_EXISTS);
}
auto stream = CHECK_NOTNULL(conn_->streamManager->getStream(id));
auto stream =
CHECK_NOTNULL(conn_->streamManager->getStream(id).value_or(nullptr));
// Only allow resetting it back to nullptr once set.
if (stream->dsrSender && sender != nullptr) {
return folly::makeUnexpected(LocalErrorCode::INVALID_OPERATION);

View File

@ -295,7 +295,6 @@ void processClientInitialParams(
"Retry Source Connection ID is received by server",
TransportErrorCode::TRANSPORT_PARAMETER_ERROR);
}
if (maxAckDelay && *maxAckDelay >= kMaxAckDelay) {
throw QuicTransportException(
"Max Ack Delay is greater than 2^14 ",
@ -326,10 +325,25 @@ void processClientInitialParams(
maxStreamDataBidiRemote.value_or(0);
conn.flowControlState.peerAdvertisedInitialMaxStreamOffsetUni =
maxStreamDataUni.value_or(0);
conn.streamManager->setMaxLocalBidirectionalStreams(
maxStreamsBidi.value_or(0));
conn.streamManager->setMaxLocalUnidirectionalStreams(
maxStreamsUni.value_or(0));
auto maxBidiStreamsResult =
conn.streamManager->setMaxLocalBidirectionalStreams(
maxStreamsBidi.value_or(0));
if (!maxBidiStreamsResult) {
throw QuicTransportException(
"Failed to set max local bidirectional streams",
TransportErrorCode::TRANSPORT_PARAMETER_ERROR);
}
auto maxUniStreamsResult =
conn.streamManager->setMaxLocalUnidirectionalStreams(
maxStreamsUni.value_or(0));
if (!maxUniStreamsResult) {
throw QuicTransportException(
"Failed to set max local unidirectional streams",
TransportErrorCode::TRANSPORT_PARAMETER_ERROR);
}
conn.peerIdleTimeout = std::chrono::milliseconds(idleTimeout.value_or(0));
conn.peerIdleTimeout = timeMin(conn.peerIdleTimeout, kMaxIdleTimeout);
if (ackDelayExponent && *ackDelayExponent > kMaxAckDelayExponent) {
@ -1068,65 +1082,77 @@ void onServerReadDataFromOpen(
[&](const OutstandingPacketWrapper& outstandingPacket) {
maybeVerifyPendingKeyUpdate(conn, outstandingPacket, regularPacket);
};
AckedFrameVisitor ackedFrameVisitor =
[&](const OutstandingPacketWrapper&,
const QuicWriteFrame& packetFrame) {
switch (packetFrame.type()) {
case QuicWriteFrame::Type::WriteStreamFrame: {
const WriteStreamFrame& frame = *packetFrame.asWriteStreamFrame();
VLOG(4) << "Server received ack for stream=" << frame.streamId
<< " offset=" << frame.offset << " fin=" << frame.fin
<< " len=" << frame.len << " " << conn;
auto ackedStream = conn.streamManager->getStream(frame.streamId);
if (ackedStream) {
sendAckSMHandler(*ackedStream, frame);
}
break;
}
case QuicWriteFrame::Type::WriteCryptoFrame: {
const WriteCryptoFrame& frame = *packetFrame.asWriteCryptoFrame();
auto cryptoStream =
getCryptoStream(*conn.cryptoState, encryptionLevel);
processCryptoStreamAck(*cryptoStream, frame.offset, frame.len);
break;
}
case QuicWriteFrame::Type::RstStreamFrame: {
const RstStreamFrame& frame = *packetFrame.asRstStreamFrame();
VLOG(4) << "Server received ack for reset stream="
<< frame.streamId << " " << conn;
auto stream = conn.streamManager->getStream(frame.streamId);
if (stream) {
sendRstAckSMHandler(*stream, frame.reliableSize);
}
break;
}
case QuicWriteFrame::Type::WriteAckFrame: {
const WriteAckFrame& frame = *packetFrame.asWriteAckFrame();
DCHECK(!frame.ackBlocks.empty());
VLOG(4) << "Server received ack for largestAcked="
<< frame.ackBlocks.front().end << " " << conn;
commonAckVisitorForAckFrame(ackState, frame);
break;
}
case QuicWriteFrame::Type::PingFrame:
conn.pendingEvents.cancelPingTimeout = true;
return;
case QuicWriteFrame::Type::QuicSimpleFrame: {
const QuicSimpleFrame& frame = *packetFrame.asQuicSimpleFrame();
// ACK of HandshakeDone is a server-specific behavior.
if (frame.asHandshakeDoneFrame()) {
// Call handshakeConfirmed outside of the packet
// processing loop to avoid a re-entrancy.
handshakeConfirmedThisLoop = true;
}
break;
}
default: {
break;
AckedFrameVisitor ackedFrameVisitor = [&](const OutstandingPacketWrapper&,
const QuicWriteFrame& packetFrame)
-> folly::Expected<folly::Unit, QuicError> {
switch (packetFrame.type()) {
case QuicWriteFrame::Type::WriteStreamFrame: {
const WriteStreamFrame& frame = *packetFrame.asWriteStreamFrame();
VLOG(4) << "Server received ack for stream=" << frame.streamId
<< " offset=" << frame.offset << " fin=" << frame.fin
<< " len=" << frame.len << " " << conn;
auto ackedStream =
conn.streamManager->getStream(frame.streamId).value_or(nullptr);
if (ackedStream) {
auto result = sendAckSMHandler(*ackedStream, frame);
if (result.hasError()) {
throw QuicTransportException(
result.error().message,
*result.error().code.asTransportErrorCode());
}
}
};
break;
}
case QuicWriteFrame::Type::WriteCryptoFrame: {
const WriteCryptoFrame& frame = *packetFrame.asWriteCryptoFrame();
auto cryptoStream =
getCryptoStream(*conn.cryptoState, encryptionLevel);
processCryptoStreamAck(*cryptoStream, frame.offset, frame.len);
break;
}
case QuicWriteFrame::Type::RstStreamFrame: {
const RstStreamFrame& frame = *packetFrame.asRstStreamFrame();
VLOG(4) << "Server received ack for reset stream=" << frame.streamId
<< " " << conn;
auto stream =
conn.streamManager->getStream(frame.streamId).value_or(nullptr);
if (stream) {
auto result = sendRstAckSMHandler(*stream, frame.reliableSize);
if (result.hasError()) {
throw QuicTransportException(
result.error().message,
*result.error().code.asTransportErrorCode());
}
}
break;
}
case QuicWriteFrame::Type::WriteAckFrame: {
const WriteAckFrame& frame = *packetFrame.asWriteAckFrame();
DCHECK(!frame.ackBlocks.empty());
VLOG(4) << "Server received ack for largestAcked="
<< frame.ackBlocks.front().end << " " << conn;
commonAckVisitorForAckFrame(ackState, frame);
break;
}
case QuicWriteFrame::Type::PingFrame:
conn.pendingEvents.cancelPingTimeout = true;
break;
case QuicWriteFrame::Type::QuicSimpleFrame: {
const QuicSimpleFrame& frame = *packetFrame.asQuicSimpleFrame();
// ACK of HandshakeDone is a server-specific behavior.
if (frame.asHandshakeDoneFrame()) {
// Call handshakeConfirmed outside of the packet
// processing loop to avoid a re-entrancy.
handshakeConfirmedThisLoop = true;
}
break;
}
default: {
break;
}
}
return folly::unit;
};
for (auto& quicFrame : regularPacket.frames) {
switch (quicFrame.type()) {
@ -1150,14 +1176,20 @@ void onServerReadDataFromOpen(
TransportErrorCode::PROTOCOL_VIOLATION);
}
conn.lastProcessedAckEvents.emplace_back(processAckFrame(
auto result = processAckFrame(
conn,
packetNumberSpace,
ackFrame,
ackedPacketVisitor,
ackedFrameVisitor,
markPacketLoss,
readData.udpPacket.timings.receiveTimePoint));
readData.udpPacket.timings.receiveTimePoint);
if (result.hasError()) {
throw QuicTransportException(
result.error().message,
*result.error().code.asTransportErrorCode());
}
conn.lastProcessedAckEvents.emplace_back(std::move(result.value()));
break;
}
case QuicFrame::Type::RstStreamFrame: {
@ -1172,11 +1204,24 @@ void onServerReadDataFromOpen(
<< conn;
pktHasRetransmittableData = true;
isNonProbingPacket = true;
auto stream = conn.streamManager->getStream(frame.streamId);
auto streamResult = conn.streamManager->getStream(frame.streamId);
if (streamResult.hasError()) {
// TODO don't throw
throw QuicTransportException(
streamResult.error().message,
*streamResult.error().code.asTransportErrorCode());
}
auto& stream = streamResult.value();
if (!stream) {
break;
}
receiveRstStreamSMHandler(*stream, frame);
auto result = receiveRstStreamSMHandler(*stream, frame);
if (result.hasError()) {
throw QuicTransportException(
result.error().message,
*result.error().code.asTransportErrorCode());
}
break;
}
case QuicFrame::Type::ReadCryptoFrame: {
@ -1188,16 +1233,19 @@ void onServerReadDataFromOpen(
<< cryptoFrame.offset
<< " len=" << cryptoFrame.data->computeChainDataLength()
<< " currentReadOffset="
<< getCryptoStream(*conn.cryptoState, encryptionLevel)
->currentReadOffset
<< " " << conn;
<< getCryptoStream(*conn.cryptoState, encryptionLevel);
auto cryptoStream =
getCryptoStream(*conn.cryptoState, encryptionLevel);
auto readBufferSize = cryptoStream->readBuffer.size();
appendDataToReadBuffer(
auto result = appendDataToReadBuffer(
*cryptoStream,
StreamBuffer(
std::move(cryptoFrame.data), cryptoFrame.offset, false));
if (result.hasError()) {
throw QuicTransportException(
result.error().message,
*result.error().code.asTransportErrorCode());
}
if (isQuicInitialPacket &&
readBufferSize != cryptoStream->readBuffer.size()) {
++conn.uniqueInitialCryptoFramesReceived;
@ -1214,12 +1262,25 @@ void onServerReadDataFromOpen(
<< " fin=" << frame.fin << " " << conn;
pktHasRetransmittableData = true;
isNonProbingPacket = true;
auto stream = conn.streamManager->getStream(
auto streamResult = conn.streamManager->getStream(
frame.streamId, frame.streamGroupId);
if (streamResult.hasError()) {
// TODO don't throw
throw QuicTransportException(
streamResult.error().message,
*streamResult.error().code.asTransportErrorCode());
}
auto& stream = streamResult.value();
// Ignore data from closed streams that we don't have the
// state for any more.
if (stream) {
receiveReadStreamFrameSMHandler(*stream, std::move(frame));
auto result =
receiveReadStreamFrameSMHandler(*stream, std::move(frame));
if (result.hasError()) {
throw QuicTransportException(
result.error().message,
*result.error().code.asTransportErrorCode());
}
}
break;
}
@ -1246,8 +1307,14 @@ void onServerReadDataFromOpen(
}
pktHasRetransmittableData = true;
isNonProbingPacket = true;
auto stream =
auto streamResult =
conn.streamManager->getStream(streamWindowUpdate.streamId);
if (streamResult.hasError()) {
throw QuicTransportException(
streamResult.error().message,
*streamResult.error().code.asTransportErrorCode());
}
auto& stream = streamResult.value();
if (stream) {
handleStreamWindowUpdate(
*stream, streamWindowUpdate.maximumData, packetNum);
@ -1268,7 +1335,13 @@ void onServerReadDataFromOpen(
<< " " << conn;
pktHasRetransmittableData = true;
isNonProbingPacket = true;
auto stream = conn.streamManager->getStream(blocked.streamId);
auto streamResult = conn.streamManager->getStream(blocked.streamId);
if (streamResult.hasError()) {
throw QuicTransportException(
streamResult.error().message,
*streamResult.error().code.asTransportErrorCode());
}
auto& stream = streamResult.value();
if (stream) {
handleStreamBlocked(*stream);
}
@ -1319,8 +1392,14 @@ void onServerReadDataFromOpen(
: regularPacket.header.asLong()->getDestinationConnId();
pktHasRetransmittableData = true;
QuicSimpleFrame& simpleFrame = *quicFrame.asQuicSimpleFrame();
isNonProbingPacket |= updateSimpleFrameOnPacketReceived(
auto simpleResult = updateSimpleFrameOnPacketReceived(
conn, simpleFrame, dstConnId, readData.peer != conn.peerAddress);
if (simpleResult.hasError()) {
throw QuicTransportException(
simpleResult.error().message,
*simpleResult.error().code.asTransportErrorCode());
}
isNonProbingPacket |= simpleResult.value();
break;
}
case QuicFrame::Type::DatagramFrame: {
@ -1366,8 +1445,8 @@ void onServerReadDataFromOpen(
maybeHandleIncomingKeyUpdate(conn);
// Update writable limit before processing the handshake data. This is so
// that if we haven't decided whether or not to validate the peer, we won't
// increase the limit.
// that if we haven't decided whether or not to validate the peer, we
// won't increase the limit.
updateWritableByteLimitOnRecvPacket(conn);
if (conn.peerAddress != readData.peer) {

View File

@ -142,18 +142,19 @@ TEST_F(QuicServerTransportTest, TestReadMultipleStreams) {
clientPacketNum);
ASSERT_EQ(server->getConn().streamManager->streamCount(), 2);
IOBufEqualTo eq;
auto stream = server->getNonConstConn().streamManager->findStream(0x08);
ASSERT_TRUE(stream);
auto streamData = readDataFromQuicStream(*stream);
EXPECT_TRUE(eq(buf1, streamData.first));
EXPECT_TRUE(streamData.second);
ASSERT_FALSE(streamData.hasError());
EXPECT_TRUE(eq(buf1, streamData->first));
EXPECT_TRUE(streamData->second);
auto stream2 = server->getNonConstConn().streamManager->findStream(0x0C);
ASSERT_TRUE(stream2);
auto streamData2 = readDataFromQuicStream(*stream2);
EXPECT_TRUE(eq(buf2, streamData2.first));
EXPECT_TRUE(streamData2.second);
ASSERT_FALSE(streamData2.hasError());
EXPECT_TRUE(eq(buf2, streamData2->first));
EXPECT_TRUE(streamData2->second);
EXPECT_CALL(*quicStats_, onQuicStreamClosed()).Times(2);
}
@ -610,7 +611,10 @@ TEST_F(QuicServerTransportTest, TestOpenAckStreamFrame) {
server->writeChain(streamId, data->clone(), false);
loopForWrites();
auto stream = server->getNonConstConn().streamManager->getStream(streamId);
auto streamResult =
server->getNonConstConn().streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
ASSERT_FALSE(server->getConn().outstandings.packets.empty());
ASSERT_FALSE(stream->retransmissionBuffer.empty());
// We need more than one packet for this test.
@ -728,7 +732,10 @@ TEST_F(QuicServerTransportTest, RecvRstStreamFrameNonexistClientStream) {
auto packet = std::move(builder).buildPacket();
deliverData(packetToBuf(packet));
auto stream = server->getNonConstConn().streamManager->getStream(streamId);
auto streamResult =
server->getNonConstConn().streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
ASSERT_TRUE(stream->streamReadError.has_value());
}
@ -814,7 +821,10 @@ TEST_F(QuicServerTransportTest, RecvRstStreamFrame) {
};
StreamId streamId = 0x00;
auto stream = server->getNonConstConn().streamManager->getStream(streamId);
auto streamResult =
server->getNonConstConn().streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
stream->readBuffer.emplace_back(IOBuf::copyBuffer(words.at(0)), 0, false);
stream->readBuffer.emplace_back(
IOBuf::copyBuffer(words.at(1)), words.at(0).length(), false);
@ -825,7 +835,9 @@ TEST_F(QuicServerTransportTest, RecvRstStreamFrame) {
std::forward_as_tuple(0),
std::forward_as_tuple(std::make_unique<WriteStreamBuffer>(
ChainedByteRangeHead(wordsBuf2), 0, false)));
writeDataToQuicStream(*stream, IOBuf::copyBuffer(words.at(3)), false);
ASSERT_FALSE(
writeDataToQuicStream(*stream, IOBuf::copyBuffer(words.at(3)), false)
.hasError());
stream->currentWriteOffset = words.at(2).length() + words.at(3).length();
stream->currentReadOffset = words.at(0).length() + words.at(1).length();
@ -868,7 +880,9 @@ TEST_F(QuicServerTransportTest, RecvReliableRstStreamFrame) {
clientNextAppDataPacketNum = 3;
StreamId streamId = 0x00;
server->getNonConstConn().streamManager->getStream(streamId);
auto streamResult =
server->getNonConstConn().streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
ShortHeader header(
ProtectionType::KeyPhaseZero,
*server->getConn().serverConnectionId,
@ -899,7 +913,10 @@ TEST_F(QuicServerTransportTest, RecvStopSendingFrame) {
};
StreamId streamId = 0x00;
auto stream = server->getNonConstConn().streamManager->getStream(streamId);
auto streamResult =
server->getNonConstConn().streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
stream->readBuffer.emplace_back(IOBuf::copyBuffer(words.at(0)), 0, false);
stream->readBuffer.emplace_back(
IOBuf::copyBuffer(words.at(1)), words.at(0).length(), false);
@ -944,7 +961,10 @@ TEST_F(QuicServerTransportTest, RecvStopSendingFrameAfterCloseStream) {
};
StreamId streamId = 0x00;
auto stream = server->getNonConstConn().streamManager->getStream(streamId);
auto streamResult =
server->getNonConstConn().streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
stream->readBuffer.emplace_back(IOBuf::copyBuffer(words.at(0)), 0, false);
stream->readBuffer.emplace_back(
IOBuf::copyBuffer(words.at(1)), words.at(0).length(), false);
@ -990,7 +1010,10 @@ TEST_F(QuicServerTransportTest, RecvInvalidMaxStreamData) {
};
StreamId streamId = 0x02;
auto stream = server->getNonConstConn().streamManager->getStream(streamId);
auto streamResult =
server->getNonConstConn().streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
stream->readBuffer.emplace_back(IOBuf::copyBuffer(words.at(0)), 0, false);
stream->readBuffer.emplace_back(
IOBuf::copyBuffer(words.at(1)), words.at(0).length(), false);
@ -1033,7 +1056,10 @@ TEST_F(QuicServerTransportTest, RecvStopSendingFrameAfterHalfCloseRemote) {
};
StreamId streamId = 0x00;
auto stream = server->getNonConstConn().streamManager->getStream(streamId);
auto streamResult =
server->getNonConstConn().streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
stream->readBuffer.emplace_back(IOBuf::copyBuffer(words.at(0)), 0, false);
stream->readBuffer.emplace_back(
IOBuf::copyBuffer(words.at(1)), words.at(0).length(), false);
@ -1117,7 +1143,10 @@ TEST_F(QuicServerTransportTest, RecvStopSendingFrameAfterReset) {
StreamId streamId1 = 0x00;
StreamId streamId2 = 0x04;
auto stream1 = server->getNonConstConn().streamManager->getStream(streamId1);
auto stream1Result =
server->getNonConstConn().streamManager->getStream(streamId1);
ASSERT_FALSE(stream1Result.hasError());
auto stream1 = stream1Result.value();
stream1->readBuffer.emplace_back(IOBuf::copyBuffer(words.at(0)), 0, false);
stream1->readBuffer.emplace_back(
IOBuf::copyBuffer(words.at(1)), words.at(0).length(), false);
@ -1130,7 +1159,10 @@ TEST_F(QuicServerTransportTest, RecvStopSendingFrameAfterReset) {
stream1->writeBuffer.append(IOBuf::copyBuffer(words.at(3)));
stream1->currentWriteOffset = words.at(2).length() + words.at(3).length();
stream1->currentReadOffset = words.at(0).length() + words.at(1).length();
auto stream2 = server->getNonConstConn().streamManager->getStream(streamId2);
auto stream2Result =
server->getNonConstConn().streamManager->getStream(streamId2);
ASSERT_FALSE(stream2Result.hasError());
auto stream2 = stream2Result.value();
stream2->readBuffer.emplace_back(IOBuf::copyBuffer(words.at(0)), 0, false);
stream2->readBuffer.emplace_back(
IOBuf::copyBuffer(words.at(1)), words.at(0).length(), false);
@ -1173,7 +1205,9 @@ TEST_F(QuicServerTransportTest, RecvStopSendingFrameAfterReset) {
TEST_F(QuicServerTransportTest, StopSendingLoss) {
server->getNonConstConn().ackStates.appDataAckState.nextPacketNum = 3;
auto streamId = server->createBidirectionalStream().value();
server->getNonConstConn().streamManager->getStream(streamId);
auto streamResult =
server->getNonConstConn().streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
ShortHeader header(
ProtectionType::KeyPhaseZero,
*server->getConn().serverConnectionId,
@ -1189,7 +1223,8 @@ TEST_F(QuicServerTransportTest, StopSendingLoss) {
ASSERT_TRUE(builder.canBuildPacket());
writeFrame(QuicSimpleFrame(stopSendingFrame), builder);
auto packet = std::move(builder).buildPacket();
markPacketLoss(server->getNonConstConn(), packet.packet, false);
ASSERT_FALSE(markPacketLoss(server->getNonConstConn(), packet.packet, false)
.hasError());
EXPECT_EQ(server->getNonConstConn().pendingEvents.frames.size(), 1);
StopSendingFrame* stopFrame = server->getNonConstConn()
.pendingEvents.frames.front()
@ -1201,7 +1236,9 @@ TEST_F(QuicServerTransportTest, StopSendingLoss) {
TEST_F(QuicServerTransportTest, StopSendingLossAfterStreamClosed) {
server->getNonConstConn().ackStates.appDataAckState.nextPacketNum = 3;
auto streamId = server->createBidirectionalStream().value();
server->getNonConstConn().streamManager->getStream(streamId);
auto streamResult =
server->getNonConstConn().streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
ShortHeader header(
ProtectionType::KeyPhaseZero,
*server->getConn().serverConnectionId,
@ -1221,7 +1258,8 @@ TEST_F(QuicServerTransportTest, StopSendingLossAfterStreamClosed) {
// clear out all the streams, this is not a great way to simulate closed
// streams, but good enough for this test.
server->getNonConstConn().streamManager->clearOpenStreams();
markPacketLoss(server->getNonConstConn(), packet.packet, false);
ASSERT_FALSE(markPacketLoss(server->getNonConstConn(), packet.packet, false)
.hasError());
EXPECT_EQ(server->getNonConstConn().pendingEvents.frames.size(), 0);
}
@ -1229,7 +1267,9 @@ TEST_F(QuicServerTransportTest, TestCloneStopSending) {
auto streamId = server->createBidirectionalStream().value();
auto qLogger = std::make_shared<quic::FileQLogger>(VantagePoint::Server);
server->getNonConstConn().qLogger = qLogger;
server->getNonConstConn().streamManager->getStream(streamId);
auto streamResult =
server->getNonConstConn().streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
// knock every handshake outstanding packets out
server->getNonConstConn().outstandings.reset();
for (auto& t : server->getNonConstConn().lossState.lossTimes) {
@ -1266,7 +1306,9 @@ TEST_F(QuicServerTransportTest, TestCloneStopSending) {
TEST_F(QuicServerTransportTest, TestAckStopSending) {
auto streamId = server->createBidirectionalStream().value();
server->getNonConstConn().streamManager->getStream(streamId);
auto streamResult =
server->getNonConstConn().streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
server->stopSending(streamId, GenericApplicationErrorCode::UNKNOWN);
loopForWrites();
auto match = findFrameInPacketFunc<QuicSimpleFrame::Type::StopSendingFrame>();
@ -1316,7 +1358,10 @@ TEST_F(QuicServerTransportTest, RecvPathChallenge) {
TEST_F(QuicServerTransportTest, TestAckRstStream) {
auto streamId = server->createUnidirectionalStream().value();
auto stream = server->getNonConstConn().streamManager->getStream(streamId);
auto streamResult =
server->getNonConstConn().streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
auto packetNum = rstStreamAndSendPacket(
server->getNonConstConn(),
@ -3325,13 +3370,13 @@ TEST_F(QuicServerTransportTest, ResetDSRStream) {
EXPECT_CALL(*dsrSender, release()).Times(1);
server->setDSRPacketizationRequestSender(streamId, std::move(dsrSender));
EXPECT_TRUE(server->writeChain(streamId, std::move(buf), false).hasValue());
ASSERT_NE(conn.streamManager->getStream(streamId), nullptr);
auto streamResult = conn.streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
EXPECT_TRUE(server->writeBufMeta(streamId, meta, false).hasValue());
loopForWrites();
auto stream = conn.streamManager->getStream(streamId);
ASSERT_NE(stream, nullptr);
conn.streamManager->getStream(streamId)->writeBufMeta.split(
conn.udpSendPacketLen - 200);
stream->writeBufMeta.split(conn.udpSendPacketLen - 200);
server->resetStream(streamId, GenericApplicationErrorCode::UNKNOWN);
loopForWrites();
@ -3487,7 +3532,8 @@ TEST_F(QuicServerTransportTest, InvokeDeliveryCallbacksSingleByteWithDSR) {
conn.lossState.srtt = 100us;
NetworkData networkData;
auto streamState = conn.streamManager->getStream(stream);
streamState->ackedIntervals.insert(0, 1);
ASSERT_FALSE(streamState.hasError());
streamState.value()->ackedIntervals.insert(0, 1);
EXPECT_CALL(writeChainDeliveryCb, onDeliveryAck(stream, 0, 100us)).Times(1);
EXPECT_CALL(writeBufMetaDeliveryCb, onDeliveryAck(stream, 1, 100us)).Times(1);
EXPECT_CALL(firstByteDeliveryCb, onDeliveryAck(stream, 0, 100us)).Times(1);
@ -4351,7 +4397,10 @@ TEST_F(QuicUnencryptedServerTransportTest, TestEncryptedDataBeforeCFIN) {
StreamId streamId = 4;
recvEncryptedStream(streamId, *IOBuf::copyBuffer("hello"));
auto stream = server->getNonConstConn().streamManager->getStream(streamId);
auto streamResult =
server->getNonConstConn().streamManager->getStream(streamId);
ASSERT_FALSE(streamResult.hasError());
auto stream = streamResult.value();
ASSERT_TRUE(stream->readBuffer.empty());
}

View File

@ -57,7 +57,7 @@ void removeOutstandingsForAck(
*
*/
AckEvent processAckFrame(
folly::Expected<AckEvent, QuicError> processAckFrame(
QuicConnectionStateBase& conn,
PacketNumberSpace pnSpace,
const ReadAckFrame& frame,
@ -283,7 +283,10 @@ AckEvent processAckFrame(
}(packetFrame);
// run the ACKed frame visitor
ackedFrameVisitor(*outstandingPacket, packetFrame);
auto result = ackedFrameVisitor(*outstandingPacket, packetFrame);
if (result.hasError()) {
return folly::makeUnexpected(result.error());
}
// Part 2 and 3: Process current state relative to the PreAckVistorState.
if (maybePreAckVisitorState.has_value()) {
@ -354,7 +357,11 @@ AckEvent processAckFrame(
<< originalPacketCount[PacketNumberSpace::Handshake] << ","
<< originalPacketCount[PacketNumberSpace::AppData] << "}";
CHECK_GE(updatedOustandingPacketsCount, conn.outstandings.numClonedPackets());
auto lossEvent = handleAckForLoss(conn, lossVisitor, ack, pnSpace);
auto lossEventExpected = handleAckForLoss(conn, lossVisitor, ack, pnSpace);
if (lossEventExpected.hasError()) {
return folly::makeUnexpected(lossEventExpected.error());
}
auto& lossEvent = lossEventExpected.value();
if (conn.congestionController &&
(ack.largestNewlyAckedPacket.has_value() || lossEvent)) {
if (lossEvent) {

View File

@ -22,7 +22,7 @@ using AckVisitor = std::function<void(
using AckedPacketVisitor = std::function<void(
const OutstandingPacketWrapper&)>; // outstanding packet acked
using AckedFrameVisitor = std::function<void(
using AckedFrameVisitor = std::function<folly::Expected<folly::Unit, QuicError>(
const OutstandingPacketWrapper&, // outstanding packet acked
const QuicWriteFrame&)>; // outstanding frame acked
@ -40,7 +40,7 @@ void removeOutstandingsForAck(
*
* Returns AckEvent with information about what was observed during processing
*/
AckEvent processAckFrame(
[[nodiscard]] folly::Expected<AckEvent, QuicError> processAckFrame(
QuicConnectionStateBase& conn,
PacketNumberSpace pnSpace,
const ReadAckFrame& ackFrame,

View File

@ -23,8 +23,8 @@ void prependToBuf(quic::Buf& buf, quic::Buf toAppend) {
} // namespace
namespace quic {
void writeDataToQuicStream(QuicStreamState& stream, Buf data, bool eof) {
folly::Expected<folly::Unit, QuicError>
writeDataToQuicStream(QuicStreamState& stream, Buf data, bool eof) {
auto neverWrittenBufMeta = (0 == stream.writeBufMeta.offset);
uint64_t len = 0;
if (data) {
@ -47,11 +47,15 @@ void writeDataToQuicStream(QuicStreamState& stream, Buf data, bool eof) {
auto bufferSize = stream.pendingWrites.chainLength();
stream.finalWriteOffset = stream.currentWriteOffset + bufferSize;
}
updateFlowControlOnWriteToStream(stream, len);
auto result = updateFlowControlOnWriteToStream(stream, len);
if (result.hasError()) {
return folly::makeUnexpected(result.error());
}
stream.conn.streamManager->updateWritableStreams(stream);
return folly::unit;
}
void writeBufMetaToQuicStream(
folly::Expected<folly::Unit, QuicError> writeBufMetaToQuicStream(
QuicStreamState& stream,
const BufferMeta& data,
bool eof) {
@ -75,8 +79,12 @@ void writeBufMetaToQuicStream(
stream.writeBufMeta.offset + stream.writeBufMeta.length;
stream.writeBufMeta.eof = true;
}
updateFlowControlOnWriteToStream(stream, data.length);
auto result = updateFlowControlOnWriteToStream(stream, data.length);
if (result.hasError()) {
return folly::makeUnexpected(result.error());
}
stream.conn.streamManager->updateWritableStreams(stream);
return folly::unit;
}
void writeDataToQuicStream(QuicCryptoStream& stream, Buf data) {
@ -116,7 +124,7 @@ static void pushToTail(folly::IOBuf* dst, Buf src, size_t allocSize) {
}
}
void appendDataToReadBufferCommon(
folly::Expected<folly::Unit, QuicError> appendDataToReadBufferCommon(
QuicStreamLike& stream,
StreamBuffer buffer,
uint32_t coalescingSize,
@ -131,25 +139,28 @@ void appendDataToReadBufferCommon(
bufferEofOffset = bufferEndOffset;
} else if (buffer.data.chainLength() == 0) {
VLOG(10) << "Empty stream without EOF";
return;
return folly::unit;
}
if (stream.finalReadOffset && bufferEofOffset &&
*stream.finalReadOffset != *bufferEofOffset) {
throw QuicTransportException(
"Invalid EOF", TransportErrorCode::FINAL_SIZE_ERROR);
return folly::makeUnexpected(QuicError(
QuicErrorCode(TransportErrorCode::FINAL_SIZE_ERROR),
std::string("Invalid EOF")));
} else if (bufferEofOffset) {
// Do some consistency checks on the stream.
if (stream.maxOffsetObserved > *bufferEofOffset) {
throw QuicTransportException(
"EOF in middle of stream", TransportErrorCode::FINAL_SIZE_ERROR);
return folly::makeUnexpected(QuicError(
QuicErrorCode(TransportErrorCode::FINAL_SIZE_ERROR),
std::string("EOF in middle of stream")));
}
stream.finalReadOffset = bufferEofOffset;
} else if (stream.finalReadOffset) {
// We did not receive a segment with an EOF set.
if (buffer.offset + buffer.data.chainLength() > *stream.finalReadOffset) {
throw QuicTransportException(
"Invalid data after EOF", TransportErrorCode::FINAL_SIZE_ERROR);
return folly::makeUnexpected(QuicError(
QuicErrorCode(TransportErrorCode::FINAL_SIZE_ERROR),
std::string("Invalid data after EOF")));
}
}
// Update the flow control information before changing max offset observed on
@ -161,7 +172,7 @@ void appendDataToReadBufferCommon(
if (buffer.data.chainLength() == 0) {
// Nothing more to do since we already processed the EOF
// case.
return;
return folly::unit;
}
if (buffer.offset < stream.currentReadOffset) {
@ -169,14 +180,14 @@ void appendDataToReadBufferCommon(
buffer.data.trimStartAtMost(stream.currentReadOffset - buffer.offset);
buffer.offset = stream.currentReadOffset;
if (buffer.data.chainLength() == 0) {
return;
return folly::unit;
}
}
// Nothing in the buffer, just append it.
if (it == readBuffer.end()) {
readBuffer.emplace_back(std::move(buffer));
return;
return folly::unit;
}
// Start overlap will point to the first buffer that overlaps with the
@ -269,34 +280,39 @@ void appendDataToReadBufferCommon(
*startOverlap != readBuffer.end() || *endOverlap == readBuffer.end());
auto insertIt = readBuffer.erase(*startOverlap, *endOverlap);
readBuffer.emplace(insertIt, std::move(*current));
return;
return folly::unit;
} else if (currentAlreadyInserted) {
DCHECK(startOverlap);
DCHECK(endOverlap);
DCHECK(
*startOverlap != readBuffer.end() || *endOverlap == readBuffer.end());
readBuffer.erase(*startOverlap, *endOverlap);
return;
return folly::unit;
}
auto last = readBuffer.end() - 1;
if (current->offset > last->offset + last->data.chainLength()) {
readBuffer.emplace_back(std::move(*current));
}
return folly::unit;
}
void appendDataToReadBuffer(QuicStreamState& stream, StreamBuffer buffer) {
appendDataToReadBufferCommon(
folly::Expected<folly::Unit, QuicError> appendDataToReadBuffer(
QuicStreamState& stream,
StreamBuffer buffer) {
return appendDataToReadBufferCommon(
stream,
std::move(buffer),
stream.conn.transportSettings.readCoalescingSize,
[&stream](uint64_t previousMaxOffsetObserved, uint64_t bufferEndOffset) {
updateFlowControlOnStreamData(
return updateFlowControlOnStreamData(
stream, previousMaxOffsetObserved, bufferEndOffset);
});
}
void appendDataToReadBuffer(QuicCryptoStream& stream, StreamBuffer buffer) {
appendDataToReadBufferCommon(
folly::Expected<folly::Unit, QuicError> appendDataToReadBuffer(
QuicCryptoStream& stream,
StreamBuffer buffer) {
return appendDataToReadBufferCommon(
stream, std::move(buffer), 0, [](uint64_t, uint64_t) {});
}
@ -351,7 +367,7 @@ Buf readDataFromCryptoStream(QuicCryptoStream& stream, uint64_t amount) {
return readDataInOrderFromReadBuffer(stream, amount).first;
}
std::pair<Buf, bool> readDataFromQuicStream(
folly::Expected<std::pair<Buf, bool>, QuicError> readDataFromQuicStream(
QuicStreamState& stream,
uint64_t amount) {
auto eof = stream.finalReadOffset &&
@ -371,7 +387,11 @@ std::pair<Buf, bool> readDataFromQuicStream(
std::tie(data, eof) = readDataInOrderFromReadBuffer(stream, amount);
// Update flow control before handling eof as eof is not subject to flow
// control
updateFlowControlOnRead(stream, lastReadOffset, Clock::now());
auto flowControlResult =
updateFlowControlOnRead(stream, lastReadOffset, Clock::now());
if (flowControlResult.hasError()) {
return folly::makeUnexpected(flowControlResult.error());
}
eof = stream.finalReadOffset &&
stream.currentReadOffset == *stream.finalReadOffset;
if (eof) {
@ -398,7 +418,9 @@ void peekDataFromQuicStream(
* Same as readDataFromQuicStream(),
* only releases existing data instead of returning it.
*/
void consumeDataFromQuicStream(QuicStreamState& stream, uint64_t amount) {
folly::Expected<folly::Unit, QuicError> consumeDataFromQuicStream(
QuicStreamState& stream,
uint64_t amount) {
bool eof = stream.finalReadOffset &&
stream.currentReadOffset >= *stream.finalReadOffset;
if (eof) {
@ -407,7 +429,7 @@ void consumeDataFromQuicStream(QuicStreamState& stream, uint64_t amount) {
}
stream.conn.streamManager->updateReadableStreams(stream);
stream.conn.streamManager->updatePeekableStreams(stream);
return;
return folly::unit;
}
uint64_t lastReadOffset = stream.currentReadOffset;
@ -415,7 +437,11 @@ void consumeDataFromQuicStream(QuicStreamState& stream, uint64_t amount) {
readDataInOrderFromReadBuffer(stream, amount, true /* sinkData */);
// Update flow control before handling eof as eof is not subject to flow
// control
updateFlowControlOnRead(stream, lastReadOffset, Clock::now());
auto flowControlResult =
updateFlowControlOnRead(stream, lastReadOffset, Clock::now());
if (flowControlResult.hasError()) {
return folly::makeUnexpected(flowControlResult.error());
}
eof = stream.finalReadOffset &&
stream.currentReadOffset == *stream.finalReadOffset;
if (eof) {
@ -423,6 +449,7 @@ void consumeDataFromQuicStream(QuicStreamState& stream, uint64_t amount) {
}
stream.conn.streamManager->updateReadableStreams(stream);
stream.conn.streamManager->updatePeekableStreams(stream);
return folly::unit;
}
bool allBytesTillFinAcked(const QuicStreamState& stream) {

View File

@ -8,7 +8,6 @@
#pragma once
#include <quic/state/StateData.h>
#include <algorithm>
namespace quic {
@ -18,7 +17,8 @@ namespace quic {
*
* @throws QuicTransportException on error.
*/
void writeDataToQuicStream(QuicStreamState& stream, Buf data, bool eof);
[[nodiscard]] folly::Expected<folly::Unit, QuicError>
writeDataToQuicStream(QuicStreamState& stream, Buf data, bool eof);
/**
* Adds data represented in the form of BufferMeta to the end of the Buffer
@ -26,7 +26,7 @@ void writeDataToQuicStream(QuicStreamState& stream, Buf data, bool eof);
*
* TODO: move to dsr directory.
*/
void writeBufMetaToQuicStream(
[[nodiscard]] folly::Expected<folly::Unit, QuicError> writeBufMetaToQuicStream(
QuicStreamState& stream,
const BufferMeta& data,
bool eof);
@ -43,7 +43,9 @@ void writeDataToQuicStream(QuicCryptoStream& stream, Buf data);
*
* @throws QuicTransportException on error.
*/
void appendDataToReadBuffer(QuicStreamState& stream, StreamBuffer buffer);
[[nodiscard]] folly::Expected<folly::Unit, QuicError> appendDataToReadBuffer(
QuicStreamState& stream,
StreamBuffer buffer);
/**
* Process data received from the network to add it to the crypto stream.
@ -51,14 +53,16 @@ void appendDataToReadBuffer(QuicStreamState& stream, StreamBuffer buffer);
*
* @throws QuicTransportException on error.
*/
void appendDataToReadBuffer(QuicCryptoStream& stream, StreamBuffer buffer);
[[nodiscard]] folly::Expected<folly::Unit, QuicError> appendDataToReadBuffer(
QuicCryptoStream& stream,
StreamBuffer buffer);
/**
* Reads data from the QUIC stream if data exists.
* Returns a pair of data and whether or not EOF was reached on the stream.
* amount == 0 reads all the pending data in the stream.
*/
std::pair<Buf, bool> readDataFromQuicStream(
folly::Expected<std::pair<Buf, bool>, QuicError> readDataFromQuicStream(
QuicStreamState& state,
uint64_t amount = 0);
@ -84,7 +88,9 @@ void peekDataFromQuicStream(
* Same as readDataFromQuicStream,
* releases data instead of returning it.
*/
void consumeDataFromQuicStream(QuicStreamState& stream, uint64_t amount);
folly::Expected<folly::Unit, QuicError> consumeDataFromQuicStream(
QuicStreamState& stream,
uint64_t amount);
bool allBytesTillFinAcked(const QuicStreamState& state);
@ -134,7 +140,8 @@ uint64_t getNumPacketsTxWithNewData(const QuicStreamState& stream);
* object. Callers should provide a connFlowControlVisitor which will be invoked
* when flow control operations need to be performed.
*/
void appendDataToReadBufferCommon(
[[nodiscard]] folly::Expected<folly::Unit, QuicError>
appendDataToReadBufferCommon(
QuicStreamLike& stream,
StreamBuffer buffer,
uint32_t coalescingSize,

View File

@ -26,16 +26,8 @@ namespace quic {
* 2. You were head of line blocked, but you receive a reset from the peer.
*/
static void updateHolBlockedTime(QuicStreamState& stream) {
// No data has arrived, or the current stream offset matches
// the stream offset that has been read so far. Stream is not HOL-blocked
// (although may be blocked on missing data).
// If there is no more data to read, or if the current read offset
// matches the read offset in the front queue, a potential HOL block
// becomes unblocked.
if (stream.readBuffer.empty() ||
(stream.currentReadOffset == stream.readBuffer.front().offset)) {
// If we were previously HOL blocked, we're not any more.
// Update the total HOLB time and reset the latch.
if (stream.lastHolbTime) {
stream.totalHolbTime +=
std::chrono::duration_cast<std::chrono::microseconds>(
@ -45,12 +37,9 @@ static void updateHolBlockedTime(QuicStreamState& stream) {
return;
}
// No HOL unblocking event has occurred. If we are already HOL blocked,
// we remain HOL blocked.
if (stream.lastHolbTime) {
return;
}
// If we were previously not HOL blocked, we are now.
stream.lastHolbTime = Clock::now();
stream.holbCount++;
}
@ -139,13 +128,14 @@ QuicStreamState* QuicStreamManager::findStream(StreamId streamId) {
}
}
void QuicStreamManager::setMaxLocalBidirectionalStreams(
folly::Expected<folly::Unit, QuicError>
QuicStreamManager::setMaxLocalBidirectionalStreams(
uint64_t maxStreams,
bool force) {
if (maxStreams > kMaxMaxStreams) {
throw QuicTransportException(
"Attempt to set maxStreams beyond the max allowed.",
TransportErrorCode::STREAM_LIMIT_ERROR);
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_LIMIT_ERROR,
"Attempt to set maxStreams beyond the max allowed."));
}
StreamId maxStreamId = maxStreams * detail::kStreamIncrement +
initialLocalBidirectionalStreamId_;
@ -153,15 +143,17 @@ void QuicStreamManager::setMaxLocalBidirectionalStreams(
maxLocalBidirectionalStreamId_ = maxStreamId;
maxLocalBidirectionalStreamIdIncreased_ = true;
}
return folly::unit;
}
void QuicStreamManager::setMaxLocalUnidirectionalStreams(
folly::Expected<folly::Unit, QuicError>
QuicStreamManager::setMaxLocalUnidirectionalStreams(
uint64_t maxStreams,
bool force) {
if (maxStreams > kMaxMaxStreams) {
throw QuicTransportException(
"Attempt to set maxStreams beyond the max allowed.",
TransportErrorCode::STREAM_LIMIT_ERROR);
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_LIMIT_ERROR,
"Attempt to set maxStreams beyond the max allowed."));
}
StreamId maxStreamId = maxStreams * detail::kStreamIncrement +
initialLocalUnidirectionalStreamId_;
@ -169,44 +161,53 @@ void QuicStreamManager::setMaxLocalUnidirectionalStreams(
maxLocalUnidirectionalStreamId_ = maxStreamId;
maxLocalUnidirectionalStreamIdIncreased_ = true;
}
return folly::unit;
}
void QuicStreamManager::setMaxRemoteBidirectionalStreams(uint64_t maxStreams) {
setMaxRemoteBidirectionalStreamsInternal(maxStreams, false);
// Public API now returns Expected to propagate internal errors
folly::Expected<folly::Unit, QuicError>
QuicStreamManager::setMaxRemoteBidirectionalStreams(uint64_t maxStreams) {
return setMaxRemoteBidirectionalStreamsInternal(maxStreams, false);
}
void QuicStreamManager::setMaxRemoteUnidirectionalStreams(uint64_t maxStreams) {
setMaxRemoteUnidirectionalStreamsInternal(maxStreams, false);
// Public API now returns Expected to propagate internal errors
folly::Expected<folly::Unit, QuicError>
QuicStreamManager::setMaxRemoteUnidirectionalStreams(uint64_t maxStreams) {
return setMaxRemoteUnidirectionalStreamsInternal(maxStreams, false);
}
void QuicStreamManager::setMaxRemoteBidirectionalStreamsInternal(
folly::Expected<folly::Unit, QuicError>
QuicStreamManager::setMaxRemoteBidirectionalStreamsInternal(
uint64_t maxStreams,
bool force) {
if (maxStreams > kMaxMaxStreams) {
throw QuicTransportException(
"Attempt to set maxStreams beyond the max allowed.",
TransportErrorCode::STREAM_LIMIT_ERROR);
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_LIMIT_ERROR,
"Attempt to set maxStreams beyond the max allowed."));
}
StreamId maxStreamId = maxStreams * detail::kStreamIncrement +
initialRemoteBidirectionalStreamId_;
if (force || maxStreamId > maxRemoteBidirectionalStreamId_) {
maxRemoteBidirectionalStreamId_ = maxStreamId;
}
return folly::unit;
}
void QuicStreamManager::setMaxRemoteUnidirectionalStreamsInternal(
folly::Expected<folly::Unit, QuicError>
QuicStreamManager::setMaxRemoteUnidirectionalStreamsInternal(
uint64_t maxStreams,
bool force) {
if (maxStreams > kMaxMaxStreams) {
throw QuicTransportException(
"Attempt to set maxStreams beyond the max allowed.",
TransportErrorCode::STREAM_LIMIT_ERROR);
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_LIMIT_ERROR,
"Attempt to set maxStreams beyond the max allowed."));
}
StreamId maxStreamId = maxStreams * detail::kStreamIncrement +
initialRemoteUnidirectionalStreamId_;
if (force || maxStreamId > maxRemoteUnidirectionalStreamId_) {
maxRemoteUnidirectionalStreamId_ = maxStreamId;
}
return folly::unit;
}
bool QuicStreamManager::consumeMaxLocalBidirectionalStreamIdIncreased() {
@ -235,73 +236,129 @@ bool QuicStreamManager::setStreamPriority(StreamId id, Priority newPriority) {
return false;
}
void QuicStreamManager::refreshTransportSettings(
const TransportSettings& settings) {
folly::Expected<folly::Unit, QuicError>
QuicStreamManager::refreshTransportSettings(const TransportSettings& settings) {
transportSettings_ = &settings;
setMaxRemoteBidirectionalStreamsInternal(
auto resultBidi = setMaxRemoteBidirectionalStreamsInternal(
transportSettings_->advertisedInitialMaxStreamsBidi, true);
setMaxRemoteUnidirectionalStreamsInternal(
if (resultBidi.hasError()) {
// Propagate the error
return folly::makeUnexpected(resultBidi.error());
}
auto resultUni = setMaxRemoteUnidirectionalStreamsInternal(
transportSettings_->advertisedInitialMaxStreamsUni, true);
if (resultUni.hasError()) {
// Propagate the error
return folly::makeUnexpected(resultUni.error());
}
return folly::unit;
}
// We create local streams lazily. If a local stream was created
// but not allocated yet, this will allocate a stream.
// This will return nullptr if a stream is closed or un-opened.
QuicStreamState* FOLLY_NULLABLE
folly::Expected<QuicStreamState*, QuicError>
QuicStreamManager::getOrCreateOpenedLocalStream(StreamId streamId) {
auto& openLocalStreams = isUnidirectionalStream(streamId)
? openUnidirectionalLocalStreams_
: openBidirectionalLocalStreams_;
if (openLocalStreams.contains(streamId)) {
// Open a lazily created stream.
auto it = streams_.emplace(
std::piecewise_construct,
std::forward_as_tuple(streamId),
std::forward_as_tuple(streamId, conn_));
QUIC_STATS(conn_.statsCallback, onNewQuicStream);
if (!it.second) {
throw QuicTransportException(
"Creating an active stream", TransportErrorCode::STREAM_STATE_ERROR);
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_STATE_ERROR, "Creating an active stream"));
}
QUIC_STATS(conn_.statsCallback, onNewQuicStream);
return &it.first->second;
}
return nullptr;
}
QuicStreamState* QuicStreamManager::getStream(
folly::Expected<QuicStreamState*, QuicError> QuicStreamManager::getStream(
StreamId streamId,
OptionalIntegral<StreamGroupId> streamGroupId) {
if (isRemoteStream(nodeType_, streamId)) {
auto stream = getOrCreatePeerStream(streamId, std::move(streamGroupId));
updateAppIdleState();
return stream;
auto streamResult =
getOrCreatePeerStream(streamId, std::move(streamGroupId));
// If successful (has value, which could be nullptr or a valid ptr), update
// state.
if (streamResult.hasValue()) {
updateAppIdleState();
}
// Propagate error, or return the contained value (ptr or nullptr)
return streamResult;
}
// Handle local streams
auto it = streams_.find(streamId);
if (it != streams_.end()) {
// Stream state already exists
updateAppIdleState();
return &it->second;
}
auto stream = getOrCreateOpenedLocalStream(streamId);
// Try to get/create state for an already opened (but not instantiated) local
// stream
auto streamResult = getOrCreateOpenedLocalStream(streamId);
if (streamResult.hasError()) {
// This indicates an internal error during lazy creation
// Propagate as QuicError
return folly::makeUnexpected(QuicError(
TransportErrorCode::INTERNAL_ERROR,
"Failed to create local stream state"));
}
auto* stream = streamResult.value(); // Can be nullptr if not in open set
// Check if the stream is genuinely unopened locally
auto nextAcceptableStreamId = isUnidirectionalStream(streamId)
? nextAcceptableLocalUnidirectionalStreamId_
: nextAcceptableLocalBidirectionalStreamId_;
if (!stream && isStreamUnopened(streamId, nextAcceptableStreamId)) {
throw QuicTransportException(
"Trying to get unopened local stream",
TransportErrorCode::STREAM_STATE_ERROR);
// The stream ID is higher than the next acceptable one, meaning it hasn't
// been opened yet. This was previously a throw -> return error.
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_STATE_ERROR,
"Trying to get unopened local stream"));
}
// If stream is null here, it means streamId < nextAcceptableStreamId
// but it wasn't found in the `streams_` map and wasn't lazily created.
// This implies it was previously closed and removed.
// Returning nullptr is the correct behavior in this case.
updateAppIdleState();
return stream;
return stream; // Can be nullptr if stream was closed
}
// Note: This function returns LocalErrorCode because it's primarily used
// internally or by APIs that expect local errors for stream creation failures.
// However, the underlying call to createStream returns QuicError, which we must
// handle.
folly::Expected<QuicStreamState*, LocalErrorCode>
QuicStreamManager::createNextBidirectionalStream(
OptionalIntegral<StreamGroupId> streamGroupId) {
auto stream =
auto streamResult =
createStream(nextBidirectionalStreamId_, std::move(streamGroupId));
if (stream.hasValue()) {
if (streamResult.hasValue()) {
nextBidirectionalStreamId_ += detail::kStreamIncrement;
return streamResult.value();
} else {
// createStream failed, map the QuicError to a suitable LocalErrorCode
// This mapping loses original error detail but fits the expected return
// type. Callers needing the precise QuicError should call createStream
// directly.
auto& error = streamResult.error();
LOG(WARNING) << "createStream failed: "
<< error.message; // Log the original error
if (error.code == TransportErrorCode::STREAM_LIMIT_ERROR) {
return folly::makeUnexpected(LocalErrorCode::STREAM_LIMIT_EXCEEDED);
} else if (error.code == TransportErrorCode::STREAM_STATE_ERROR) {
return folly::makeUnexpected(
LocalErrorCode::CREATING_EXISTING_STREAM); // Or other state error?
} else {
return folly::makeUnexpected(LocalErrorCode::INTERNAL_ERROR);
}
}
return stream;
}
folly::Expected<StreamGroupId, LocalErrorCode>
@ -310,15 +367,28 @@ QuicStreamManager::createNextBidirectionalStreamGroup() {
nextBidirectionalStreamGroupId_, openBidirectionalLocalStreamGroups_);
}
// Note: Similar to createNextBidirectionalStream regarding LocalErrorCode
// return.
folly::Expected<QuicStreamState*, LocalErrorCode>
QuicStreamManager::createNextUnidirectionalStream(
OptionalIntegral<StreamGroupId> streamGroupId) {
auto stream =
auto streamResult =
createStream(nextUnidirectionalStreamId_, std::move(streamGroupId));
if (stream.hasValue()) {
if (streamResult.hasValue()) {
nextUnidirectionalStreamId_ += detail::kStreamIncrement;
return streamResult.value();
} else {
// Map QuicError to LocalErrorCode
auto& error = streamResult.error();
LOG(WARNING) << "createStream failed: " << error.message;
if (error.code == TransportErrorCode::STREAM_LIMIT_ERROR) {
return folly::makeUnexpected(LocalErrorCode::STREAM_LIMIT_EXCEEDED);
} else if (error.code == TransportErrorCode::STREAM_STATE_ERROR) {
return folly::makeUnexpected(LocalErrorCode::CREATING_EXISTING_STREAM);
} else {
return folly::makeUnexpected(LocalErrorCode::INTERNAL_ERROR);
}
}
return stream;
}
QuicStreamState* FOLLY_NULLABLE QuicStreamManager::instantiatePeerStream(
@ -341,12 +411,20 @@ QuicStreamState* FOLLY_NULLABLE QuicStreamManager::instantiatePeerStream(
newGroupedPeerStreams_.push_back(streamId);
}
}
auto it = streams_.emplace(
std::piecewise_construct,
std::forward_as_tuple(streamId),
std::forward_as_tuple(streamId, groupId, conn_));
QUIC_STATS(conn_.statsCallback, onNewQuicStream);
return &it.first->second;
// Use try_emplace to avoid potential double-check issues if called directly
auto [it, inserted] =
streams_.try_emplace(streamId, streamId, groupId, conn_);
if (!inserted && it->second.groupId != groupId) {
LOG(ERROR) << "Stream " << streamId
<< " already exists with different group ID";
return nullptr;
}
if (inserted) {
QUIC_STATS(conn_.statsCallback, onNewQuicStream);
}
return &it->second;
}
folly::Expected<StreamGroupId, LocalErrorCode>
@ -374,177 +452,266 @@ QuicStreamManager::createNextStreamGroup(
return id;
}
QuicStreamState* FOLLY_NULLABLE QuicStreamManager::getOrCreatePeerStream(
// Returns QuicError for transport-level issues (limits, state), nullptr if
// closed.
folly::Expected<QuicStreamState*, QuicError>
QuicStreamManager::getOrCreatePeerStream(
StreamId streamId,
OptionalIntegral<StreamGroupId> streamGroupId) {
// This function maintains 3 invariants:
// 1. Streams below nextAcceptableStreamId are streams that have been
// seen before. Everything above can be opened.
// 2. Streams that have been seen before, always have an entry in
// openPeerStreams. If a stream below nextAcceptableStreamId does not
// have an entry in openPeerStreams, then it is closed.
// 3. If streamId n is open all streams < n will be seen.
// It also tries to create the entire state for a stream in a lazy manner.
// Validate the stream id is correct
// Validate stream direction based on node type
if (nodeType_ == QuicNodeType::Client && isClientStream(streamId)) {
throw QuicTransportException(
"Attempted getting client peer stream on client",
TransportErrorCode::STREAM_STATE_ERROR);
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_STATE_ERROR,
"Attempted getting client peer stream on client"));
} else if (nodeType_ == QuicNodeType::Server && isServerStream(streamId)) {
throw QuicTransportException(
"Attempted getting server peer stream on server",
TransportErrorCode::STREAM_STATE_ERROR);
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_STATE_ERROR,
"Attempted getting server peer stream on server"));
} else if (!isClientStream(streamId) && !isServerStream(streamId)) {
throw QuicTransportException(
"Invalid stream", TransportErrorCode::STREAM_STATE_ERROR);
} else if (streamGroupId) {
// Validate stream ID format itself
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_STATE_ERROR, "Invalid stream ID format"));
}
// Validate group properties if group is specified
if (streamGroupId) {
if (nodeType_ == QuicNodeType::Client &&
isClientStreamGroup(*streamGroupId)) {
throw QuicTransportException(
"Received a client stream group id on client",
TransportErrorCode::STREAM_STATE_ERROR);
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_STATE_ERROR,
"Received a client stream group id on client"));
} else if (
nodeType_ == QuicNodeType::Server &&
isServerStreamGroup(*streamGroupId)) {
throw QuicTransportException(
"Received a server stream group id on server",
TransportErrorCode::STREAM_STATE_ERROR);
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_STATE_ERROR,
"Received a server stream group id on server"));
}
// Validate group ID limit (peer perspective)
auto maxPeerStreamGroupId = std::min(
transportSettings_->advertisedMaxStreamGroups *
conn_.transportSettings
.advertisedMaxStreamGroups * // Use conn_.transportSettings here
detail::kStreamGroupIncrement,
detail::kMaxStreamGroupId);
if (*streamGroupId >= maxPeerStreamGroupId) {
throw QuicTransportException(
"Invalid stream group id", TransportErrorCode::STREAM_LIMIT_ERROR);
// Peer used a group ID we didn't advertise support for
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_LIMIT_ERROR, // Or
// FEATURE_NEGOTIATION_ERROR?
// Limit seems better.
"Invalid peer stream group id (exceeds limit)"));
}
}
// TODO when we can rely on C++17, this is a good candidate for try_emplace.
auto peerStream = streams_.find(streamId);
if (peerStream != streams_.end()) {
return &peerStream->second;
// Check if stream state already exists in the map
auto peerStreamIt = streams_.find(streamId);
if (peerStreamIt != streams_.end()) {
// TODO: Validate streamGroupId if provided matches existing stream's group?
// If streamGroupId.has_value() && peerStreamIt->second.groupId !=
// streamGroupId ... return error?
return &peerStreamIt->second;
}
// Check if stream was previously opened (in the StreamIdSet)
auto& openPeerStreams = isUnidirectionalStream(streamId)
? openUnidirectionalPeerStreams_
: openBidirectionalPeerStreams_;
if (openPeerStreams.contains(streamId)) {
// Stream was already open, create the state for it lazily.
return instantiatePeerStream(streamId, streamGroupId);
auto* streamPtr = instantiatePeerStream(streamId, streamGroupId);
if (!streamPtr) {
// Propagate internal inconsistency as QuicError
return folly::makeUnexpected(QuicError(
TransportErrorCode::INTERNAL_ERROR,
"Failed to instantiate known open peer stream"));
}
return streamPtr;
}
// Stream state doesn't exist and it's not marked as open yet.
// Try to open it (and streams below it) now.
auto& nextAcceptableStreamId = isUnidirectionalStream(streamId)
? nextAcceptablePeerUnidirectionalStreamId_
: nextAcceptablePeerBidirectionalStreamId_;
auto maxStreamId = isUnidirectionalStream(streamId)
? maxRemoteUnidirectionalStreamId_
: maxRemoteBidirectionalStreamId_;
auto* newPeerStreams =
// Determine where to store newly opened stream IDs for notification
auto* newPeerStreamsList =
streamGroupId ? &newGroupedPeerStreams_ : &newPeerStreams_;
bool notifyExplicitly = transportSettings_->notifyOnNewStreamsExplicitly;
// openPeerStreamIfNotClosed checks limits and adds to the StreamIdSet
auto openedResult = openPeerStreamIfNotClosed(
streamId,
openPeerStreams,
nextAcceptableStreamId,
maxStreamId,
(transportSettings_->notifyOnNewStreamsExplicitly ? nullptr
: newPeerStreams));
notifyExplicitly ? nullptr : newPeerStreamsList);
// check if limit has been saturated by peer
if (nextAcceptableStreamId == maxStreamId && conn_.statsCallback) {
// Check if the peer exceeded the stream limit
if (openedResult == LocalErrorCode::STREAM_LIMIT_EXCEEDED) {
// This was previously a throw -> return error
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_LIMIT_ERROR, "Peer exceeded stream limit."));
}
// Check if stream ID was below nextAcceptable (already seen/closed)
if (openedResult == LocalErrorCode::CREATING_EXISTING_STREAM) {
// This means streamId < nextAcceptableStreamId, but it wasn't found in
// streams_ map and wasn't in openPeerStreams set -> implies it was closed.
return nullptr; // Correctly indicates a closed stream
}
// If we reached here, openedResult must be NO_ERROR.
DCHECK(openedResult == LocalErrorCode::NO_ERROR);
// Check if peer saturated the limit *after* opening this stream
if (nextAcceptableStreamId >= maxStreamId && conn_.statsCallback) {
auto limitSaturatedFn = isBidirectionalStream(streamId)
? &QuicTransportStatsCallback::onPeerMaxBidiStreamsLimitSaturated
: &QuicTransportStatsCallback::onPeerMaxUniStreamsLimitSaturated;
folly::invoke(limitSaturatedFn, conn_.statsCallback);
}
if (openedResult == LocalErrorCode::CREATING_EXISTING_STREAM) {
// Stream could be closed here.
return nullptr;
} else if (openedResult == LocalErrorCode::STREAM_LIMIT_EXCEEDED) {
throw QuicTransportException(
"Exceeded stream limit.", TransportErrorCode::STREAM_LIMIT_ERROR);
// Stream(s) successfully marked as open, now instantiate the specific one
// requested.
auto* streamPtr = instantiatePeerStream(streamId, streamGroupId);
if (!streamPtr) {
// Propagate internal inconsistency as QuicError
return folly::makeUnexpected(QuicError(
TransportErrorCode::INTERNAL_ERROR,
"Failed to instantiate newly opened peer stream"));
}
return instantiatePeerStream(streamId, streamGroupId);
return streamPtr;
}
folly::Expected<QuicStreamState*, LocalErrorCode>
QuicStreamManager::createStream(
// Returns QuicError for transport-level issues (limits, state, internal)
folly::Expected<QuicStreamState*, QuicError> QuicStreamManager::createStream(
StreamId streamId,
OptionalIntegral<StreamGroupId> streamGroupId) {
// Validate stream direction based on node type
if (nodeType_ == QuicNodeType::Client && !isClientStream(streamId)) {
throw QuicTransportException(
"Attempted creating non-client stream on client",
TransportErrorCode::STREAM_STATE_ERROR);
// Previously threw -> return error
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_STATE_ERROR,
"Attempted creating non-client stream on client"));
} else if (nodeType_ == QuicNodeType::Server && !isServerStream(streamId)) {
throw QuicTransportException(
"Attempted creating non-server stream on server",
TransportErrorCode::STREAM_STATE_ERROR);
// Previously threw -> return error
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_STATE_ERROR,
"Attempted creating non-server stream on server"));
}
bool isUni = isUnidirectionalStream(streamId);
// Validate group properties if group is specified
if (streamGroupId) {
const auto& openGroups = isUni ? openUnidirectionalLocalStreamGroups_
: openBidirectionalLocalStreamGroups_;
if (!openGroups.contains(*streamGroupId)) {
throw QuicTransportException(
"Attempted creating a stream in non-existent group",
TransportErrorCode::STREAM_STATE_ERROR);
// Previously threw -> return error
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_STATE_ERROR,
"Attempted creating a stream in non-existent local group"));
}
// Ensure group ID matches node type
if (nodeType_ == QuicNodeType::Client &&
!isClientStreamGroup(*streamGroupId)) {
throw QuicTransportException(
"Attempted creating a stream in non-client stream group on client",
TransportErrorCode::STREAM_STATE_ERROR);
// Previously threw -> return error
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_STATE_ERROR,
"Attempted creating a stream in non-client stream group on client"));
} else if (
nodeType_ == QuicNodeType::Server &&
!isServerStreamGroup(*streamGroupId)) {
throw QuicTransportException(
"Attempted creating a stream in non-server stream group on server",
TransportErrorCode::STREAM_STATE_ERROR);
// Previously threw -> return error
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_STATE_ERROR,
"Attempted creating a stream in non-server stream group on server"));
}
}
auto existingStream = getOrCreateOpenedLocalStream(streamId);
if (existingStream) {
return existingStream;
// Check if stream was already implicitly opened but not yet instantiated
auto openedStreamResult = getOrCreateOpenedLocalStream(streamId);
if (openedStreamResult.hasError()) {
// Propagate internal error as QuicError
return openedStreamResult;
}
if (openedStreamResult.value()) {
// Stream was opened, now instantiated. Check/update group ID.
if (streamGroupId.has_value() &&
openedStreamResult.value()->groupId != streamGroupId) {
if (openedStreamResult.value()->groupId.has_value()) {
// Previously threw -> return error
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_STATE_ERROR,
"Stream exists lazily with different group ID"));
}
openedStreamResult.value()->groupId = streamGroupId;
}
return openedStreamResult.value();
}
// Stream doesn't exist and wasn't previously opened; try to open it now.
auto& nextAcceptableStreamId = isUni
? nextAcceptableLocalUnidirectionalStreamId_
: nextAcceptableLocalBidirectionalStreamId_;
auto maxStreamId =
isUni ? maxLocalUnidirectionalStreamId_ : maxLocalBidirectionalStreamId_;
auto& openLocalStreams =
isUni ? openUnidirectionalLocalStreams_ : openBidirectionalLocalStreams_;
auto openedResult = openLocalStreamIfNotClosed(
// Use openLocalStreamIfNotClosed to check limits and mark as open in
// StreamIdSet
auto openedResultCode = openLocalStreamIfNotClosed(
streamId, openLocalStreams, nextAcceptableStreamId, maxStreamId);
if (openedResult != LocalErrorCode::NO_ERROR) {
return folly::makeUnexpected(openedResult);
if (openedResultCode == LocalErrorCode::STREAM_LIMIT_EXCEEDED) {
// Previously threw -> return error
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_LIMIT_ERROR,
"Cannot create stream: limit exceeded"));
}
auto it = streams_.emplace(
std::piecewise_construct,
std::forward_as_tuple(streamId),
std::forward_as_tuple(streamId, streamGroupId, conn_));
if (openedResultCode == LocalErrorCode::CREATING_EXISTING_STREAM) {
// Previously threw -> return error
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_STATE_ERROR,
"Cannot create stream: already exists or closed"));
}
DCHECK(openedResultCode == LocalErrorCode::NO_ERROR);
// Stream is now officially open, instantiate its state in the map.
auto [it, inserted] =
streams_.try_emplace(streamId, streamId, streamGroupId, conn_);
if (!inserted) {
// Propagate internal error as QuicError
LOG(ERROR) << "Failed to emplace stream " << streamId
<< " after opening check";
return folly::makeUnexpected(QuicError(
TransportErrorCode::INTERNAL_ERROR,
"Failed to emplace stream state after opening"));
}
QUIC_STATS(conn_.statsCallback, onNewQuicStream);
updateAppIdleState();
return &it.first->second;
return &it->second;
}
void QuicStreamManager::removeClosedStream(StreamId streamId) {
folly::Expected<folly::Unit, QuicError> QuicStreamManager::removeClosedStream(
StreamId streamId) {
auto it = streams_.find(streamId);
if (it == streams_.end()) {
VLOG(10) << "Trying to remove already closed stream=" << streamId;
return;
return folly::unit;
}
VLOG(10) << "Removing closed stream=" << streamId;
DCHECK(it->second.inTerminalStates());
// Clear from various tracking sets
if (conn_.pendingEvents.resets.contains(streamId)) {
// This can happen when we send two reliable resets, one of which is
// egressed and ACKed.
conn_.pendingEvents.resets.erase(streamId);
}
if (conn_.transportSettings.unidirectionalStreamsReadCallbacksFirst &&
@ -554,19 +721,25 @@ void QuicStreamManager::removeClosedStream(StreamId streamId) {
readableStreams_.erase(streamId);
}
peekableStreams_.erase(streamId);
removeWritable(it->second);
removeWritable(it->second); // Also removes from loss sets and write queue
blockedStreams_.erase(streamId);
deliverableStreams_.erase(streamId);
txStreams_.erase(streamId);
windowUpdates_.erase(streamId);
stopSendingStreams_.erase(streamId);
flowControlUpdated_.erase(streamId);
// Adjust control stream count if needed
if (it->second.isControl) {
DCHECK_GT(numControlStreams_, 0);
numControlStreams_--;
}
// Erase the main stream state
streams_.erase(it);
QUIC_STATS(conn_.statsCallback, onQuicStreamClosed);
// Handle stream limit updates for remote streams
if (isRemoteStream(nodeType_, streamId)) {
auto& openPeerStreams = isUnidirectionalStream(streamId)
? openUnidirectionalPeerStreams_
@ -579,6 +752,7 @@ void QuicStreamManager::removeClosedStream(StreamId streamId) {
? transportSettings_->advertisedInitialMaxStreamsUni
: transportSettings_->advertisedInitialMaxStreamsBidi;
uint64_t streamWindow = initialStreamLimit / streamLimitWindowingFraction_;
uint64_t openableRemoteStreams = isUnidirectionalStream(streamId)
? openableRemoteUnidirectionalStreams()
: openableRemoteBidirectionalStreams();
@ -591,17 +765,27 @@ void QuicStreamManager::removeClosedStream(StreamId streamId) {
uint64_t maxStreams = (maxRemoteUnidirectionalStreamId_ -
initialRemoteUnidirectionalStreamId_) /
detail::kStreamIncrement;
setMaxRemoteUnidirectionalStreams(maxStreams + streamCredit);
auto result =
setMaxRemoteUnidirectionalStreams(maxStreams + streamCredit);
if (result.hasError()) {
return folly::makeUnexpected(result.error());
}
remoteUnidirectionalStreamLimitUpdate_ = maxStreams + streamCredit;
} else {
uint64_t maxStreams = (maxRemoteBidirectionalStreamId_ -
initialRemoteBidirectionalStreamId_) /
detail::kStreamIncrement;
setMaxRemoteBidirectionalStreams(maxStreams + streamCredit);
auto result =
setMaxRemoteBidirectionalStreams(maxStreams + streamCredit);
if (result.hasError()) {
return folly::makeUnexpected(result.error());
}
remoteBidirectionalStreamLimitUpdate_ = maxStreams + streamCredit;
}
}
} else {
// Local stream closed, remove from local open set
auto& openLocalStreams = isUnidirectionalStream(streamId)
? openUnidirectionalLocalStreams_
: openBidirectionalLocalStreams_;
@ -609,6 +793,7 @@ void QuicStreamManager::removeClosedStream(StreamId streamId) {
}
updateAppIdleState();
return folly::unit;
}
void QuicStreamManager::addToReadableStreams(const QuicStreamState& stream) {
@ -640,16 +825,21 @@ void QuicStreamManager::updateReadableStreams(QuicStreamState& stream) {
}
void QuicStreamManager::updateWritableStreams(QuicStreamState& stream) {
// Check for terminal write errors first
if (stream.streamWriteError.has_value() && !stream.reliableSizeToPeer) {
CHECK(stream.lossBuffer.empty());
CHECK(stream.lossBufMetas.empty());
removeWritable(stream);
return;
}
// Check if paused
if (stream.priority.paused && !transportSettings_->disablePausedPriority) {
removeWritable(stream);
return;
}
// Update writable/loss sets based on data/meta presence
if (stream.hasWritableData()) {
writableStreams_.emplace(stream.id);
} else {
@ -670,6 +860,8 @@ void QuicStreamManager::updateWritableStreams(QuicStreamState& stream) {
} else {
lossDSRStreams_.erase(stream.id);
}
// Update the actual scheduling queues (PriorityQueue or control set)
if (stream.hasSchedulableData() || stream.hasSchedulableDsr()) {
if (stream.isControl) {
controlWriteQueue_.emplace(stream.id);
@ -677,6 +869,7 @@ void QuicStreamManager::updateWritableStreams(QuicStreamState& stream) {
writeQueue_.insertOrUpdate(stream.id, stream.priority);
}
} else {
// Not schedulable, remove from queues
if (stream.isControl) {
controlWriteQueue_.erase(stream.id);
} else {
@ -686,8 +879,7 @@ void QuicStreamManager::updateWritableStreams(QuicStreamState& stream) {
}
void QuicStreamManager::updatePeekableStreams(QuicStreamState& stream) {
// In the PeekCallback, the API peekError() is added, so change the condition
// and allow streamReadError in the peekableStreams
// Stream is peekable if it has data OR a read error to report via peekError()
if (stream.hasPeekableData() || stream.streamReadError.has_value()) {
peekableStreams_.emplace(stream.id);
} else {
@ -723,12 +915,14 @@ bool QuicStreamManager::isAppIdle() const {
}
void QuicStreamManager::clearOpenStreams() {
// Call stats callback before clearing
QUIC_STATS_FOR_EACH(
streams().cbegin(),
streams().cend(),
conn_.statsCallback,
onQuicStreamClosed);
// Clear all stream sets and maps
openBidirectionalLocalStreams_.clear();
openUnidirectionalLocalStreams_.clear();
openBidirectionalPeerStreams_.clear();

View File

@ -130,7 +130,17 @@ class QuicStreamManager {
StreamIdSet(nextBidirectionalStreamGroupId_);
openUnidirectionalLocalStreamGroups_ =
StreamIdSet(nextUnidirectionalStreamGroupId_);
refreshTransportSettings(transportSettings);
// Call refreshTransportSettings which now returns Expected
auto refreshResult = refreshTransportSettings(transportSettings);
if (refreshResult.hasError()) {
// Constructor cannot return error easily. Log or handle internally.
LOG(ERROR) << "Failed initial transport settings refresh: "
<< refreshResult.error().message;
// Consider throwing here if construction must fail, or setting an error
// state. For now, logging is consistent with previous changes.
}
writeQueue_.setMaxNextsPerStream(
transportSettings.priorityQueueWritesPerStream);
}
@ -194,6 +204,8 @@ class QuicStreamManager {
newPeerStreamGroups_ = std::move(other.newPeerStreamGroups_);
peerUnidirectionalStreamGroupsSeen_ =
std::move(other.peerUnidirectionalStreamGroupsSeen_);
peerBidirectionalStreamGroupsSeen_ = // Added missing move
std::move(other.peerBidirectionalStreamGroupsSeen_);
newGroupedPeerStreams_ = std::move(other.newGroupedPeerStreams_);
blockedStreams_ = std::move(other.blockedStreams_);
stopSendingStreams_ = std::move(other.stopSendingStreams_);
@ -218,82 +230,74 @@ class QuicStreamManager {
maxLocalUnidirectionalStreamIdIncreased_ =
other.maxLocalUnidirectionalStreamIdIncreased_;
/**
* We can't simply std::move the streams as the underlying
* QuicStreamState(s) hold a reference to the other.conn_.
*/
for (auto& pair : other.streams_) {
streams_.emplace(
std::piecewise_construct,
std::forward_as_tuple(pair.first),
std::forward_as_tuple(
/* migrate state to new conn ref */ conn_,
conn_, // Use the new conn ref
std::move(pair.second)));
}
}
/*
* Create the state for a stream if it does not exist and return it. Note this
* function is only used internally or for testing.
* Create the state for a stream if it does not exist and return it.
*/
folly::Expected<QuicStreamState*, LocalErrorCode> createStream(
[[nodiscard]] folly::Expected<QuicStreamState*, QuicError> createStream(
StreamId streamId,
OptionalIntegral<StreamGroupId> streamGroupId = std::nullopt);
/*
* Create a new bidirectional stream group.
*/
folly::Expected<StreamGroupId, LocalErrorCode>
[[nodiscard]] folly::Expected<StreamGroupId, LocalErrorCode>
createNextBidirectionalStreamGroup();
/*
* Create and return the state for the next available bidirectional stream.
*/
folly::Expected<QuicStreamState*, LocalErrorCode>
[[nodiscard]] folly::Expected<QuicStreamState*, LocalErrorCode>
createNextBidirectionalStream(
OptionalIntegral<StreamGroupId> streamGroupId = std::nullopt);
/*
* Create a new unidirectional stream group.
*/
folly::Expected<StreamGroupId, LocalErrorCode>
[[nodiscard]] folly::Expected<StreamGroupId, LocalErrorCode>
createNextUnidirectionalStreamGroup();
/*
* Create and return the state for the next available unidirectional stream.
*/
folly::Expected<QuicStreamState*, LocalErrorCode>
[[nodiscard]] folly::Expected<QuicStreamState*, LocalErrorCode>
createNextUnidirectionalStream(
OptionalIntegral<StreamGroupId> streamGroupId = std::nullopt);
/*
* Return the stream state or create it if the state has not yet been created.
* Note that this is only valid for streams that are currently open.
*/
QuicStreamState* FOLLY_NULLABLE getStream(
[[nodiscard]] folly::Expected<QuicStreamState*, QuicError> getStream(
StreamId streamId,
OptionalIntegral<StreamGroupId> streamGroupId = std::nullopt);
/*
* Remove all the state for a stream that is being closed.
*/
void removeClosedStream(StreamId streamId);
[[nodiscard]] folly::Expected<folly::Unit, QuicError> removeClosedStream(
StreamId streamId);
/*
* Update the current readable streams for the given stream state. This will
* either add or remove it from the collection of currently readable streams.
* Update the current readable streams for the given stream state.
*/
void updateReadableStreams(QuicStreamState& stream);
/*
* Update the current peehable streams for the given stream state. This will
* either add or remove it from the collection of currently peekable streams.
* Update the current peehable streams for the given stream state.
*/
void updatePeekableStreams(QuicStreamState& stream);
/*
* Update the current writable streams for the given stream state. This will
* either add or remove it from the collection of currently writable streams.
* Update the current writable streams for the given stream state.
*/
void updateWritableStreams(QuicStreamState& stream);
@ -304,8 +308,7 @@ class QuicStreamManager {
QuicStreamState* FOLLY_NULLABLE findStream(StreamId streamId);
/*
* Check whether the stream exists. This returns false for the crypto stream,
* thus the caller must check separately for the crypto stream.
* Check whether the stream exists.
*/
bool streamExists(StreamId streamId);
@ -345,11 +348,6 @@ class QuicStreamManager {
detail::kStreamIncrement;
}
/*
* Returns the next acceptable (usable) remote bidirectional stream ID.
*
* If the maximum has been reached, empty optional returned.
*/
Optional<StreamId> nextAcceptablePeerBidirectionalStreamId() {
const auto max = maxRemoteBidirectionalStreamId_;
const auto next = nextAcceptablePeerBidirectionalStreamId_;
@ -360,11 +358,6 @@ class QuicStreamManager {
return next;
}
/*
* Returns the next acceptable (usable) remote undirectional stream ID.
*
* If the maximum has been reached, empty optional returned.
*/
Optional<StreamId> nextAcceptablePeerUnidirectionalStreamId() {
const auto max = maxRemoteUnidirectionalStreamId_;
const auto next = nextAcceptablePeerUnidirectionalStreamId_;
@ -375,11 +368,6 @@ class QuicStreamManager {
return next;
}
/*
* Returns the next acceptable (usable) local bidirectional stream ID.
*
* If the maximum has been reached, empty optional returned.
*/
Optional<StreamId> nextAcceptableLocalBidirectionalStreamId() {
const auto max = maxLocalBidirectionalStreamId_;
const auto next = nextAcceptableLocalBidirectionalStreamId_;
@ -390,11 +378,6 @@ class QuicStreamManager {
return next;
}
/*
* Returns the next acceptable (usable) local unidirectional stream ID.
*
* If the maximum has been reached, empty optional returned.
*/
Optional<StreamId> nextAcceptableLocalUnidirectionalStreamId() {
const auto max = maxLocalUnidirectionalStreamId_;
const auto next = nextAcceptableLocalUnidirectionalStreamId_;
@ -412,7 +395,7 @@ class QuicStreamManager {
/*
* Return a const reference to the underlying container holding the stream
* state. Only really useful for iterating.
* state.
*/
const auto& streams() const {
return streams_;
@ -427,36 +410,29 @@ class QuicStreamManager {
}
}
// Considers _any_ type of stream data being lost.
FOLLY_NODISCARD bool hasLoss() const {
[[nodiscard]] bool hasLoss() const {
return !lossStreams_.empty() || !lossDSRStreams_.empty();
}
// Considers non-DSR data being lost.
FOLLY_NODISCARD bool hasNonDSRLoss() const {
[[nodiscard]] bool hasNonDSRLoss() const {
return !lossStreams_.empty();
}
// Considers non-DSR data being lost.
FOLLY_NODISCARD bool hasDSRLoss() const {
[[nodiscard]] bool hasDSRLoss() const {
return !lossDSRStreams_.empty();
}
// Should only used directly by tests.
void removeLoss(StreamId id) {
lossStreams_.erase(id);
lossDSRStreams_.erase(id);
}
// Should only used directly by tests.
void addLoss(StreamId id) {
lossStreams_.insert(id);
}
/**
* Update stream priority if the stream indicated by id exists, and the
* passed in values are different from current priority. Return true if
* stream priority is update, false otherwise.
* Update stream priority if the stream indicated by id exists.
*/
bool setStreamPriority(StreamId id, Priority priority);
@ -464,11 +440,6 @@ class QuicStreamManager {
return writableDSRStreams_;
}
// TODO figure out a better interface here.
/*
* Returns a mutable reference to the container holding the writable stream
* IDs.
*/
auto& controlWriteQueue() {
return controlWriteQueue_;
}
@ -477,14 +448,11 @@ class QuicStreamManager {
return writeQueue_;
}
/*
* Returns if there are any writable streams.
*/
bool hasWritable() const {
return !writeQueue_.empty() || !controlWriteQueue_.empty();
}
FOLLY_NODISCARD bool hasDSRWritable() const {
[[nodiscard]] bool hasDSRWritable() const {
return !writableDSRStreams_.empty();
}
@ -492,9 +460,6 @@ class QuicStreamManager {
return !writableStreams_.empty() || !controlWriteQueue_.empty();
}
/*
* Remove a writable stream id.
*/
void removeWritable(const QuicStreamState& stream) {
if (stream.isControl) {
controlWriteQueue_.erase(stream.id);
@ -507,9 +472,6 @@ class QuicStreamManager {
lossDSRStreams_.erase(stream.id);
}
/*
* Clear the writable streams.
*/
void clearWritable() {
writableStreams_.clear();
writableDSRStreams_.clear();
@ -517,183 +479,110 @@ class QuicStreamManager {
controlWriteQueue_.clear();
}
/*
* Returns a const reference to the underlying blocked streams container.
*/
const auto& blockedStreams() const {
return blockedStreams_;
}
/*
* Queue a blocked event for the given stream id at the given offset.
*/
void queueBlocked(StreamId streamId, uint64_t offset) {
blockedStreams_.emplace(streamId, StreamDataBlockedFrame(streamId, offset));
}
/*
* Remove a blocked stream.
*/
void removeBlocked(StreamId streamId) {
blockedStreams_.erase(streamId);
}
/*
* Returns if there are any blocked streams.
*/
bool hasBlocked() const {
return !blockedStreams_.empty();
}
/*
* Set the max number of local bidirectional streams. Can only be increased
* unless force is true.
* Set the max number of local bidirectional streams.
*/
void setMaxLocalBidirectionalStreams(uint64_t maxStreams, bool force = false);
[[nodiscard]] folly::Expected<folly::Unit, QuicError>
setMaxLocalBidirectionalStreams(uint64_t maxStreams, bool force = false);
/*
* Set the max number of local unidirectional streams. Can only be increased
* unless force is true.
* Set the max number of local unidirectional streams.
*/
void setMaxLocalUnidirectionalStreams(
uint64_t maxStreams,
bool force = false);
[[nodiscard]] folly::Expected<folly::Unit, QuicError>
setMaxLocalUnidirectionalStreams(uint64_t maxStreams, bool force = false);
/*
* Set the max number of remote bidirectional streams. Can only be increased
* unless force is true.
* Set the max number of remote bidirectional streams.
*/
void setMaxRemoteBidirectionalStreams(uint64_t maxStreams);
[[nodiscard]] folly::Expected<folly::Unit, QuicError>
setMaxRemoteBidirectionalStreams(uint64_t maxStreams);
/*
* Set the max number of remote unidirectional streams. Can only be increased
* unless force is true.
* Set the max number of remote unidirectional streams.
*/
void setMaxRemoteUnidirectionalStreams(uint64_t maxStreams);
[[nodiscard]] folly::Expected<folly::Unit, QuicError>
setMaxRemoteUnidirectionalStreams(uint64_t maxStreams);
/*
* Returns true if MaxLocalBidirectionalStreamId was increased
* since last call of this function (resets flag).
*/
bool consumeMaxLocalBidirectionalStreamIdIncreased();
/*
* Returns true if MaxLocalUnidirectionalStreamId was increased
* since last call of this function (resets flag).
*/
bool consumeMaxLocalUnidirectionalStreamIdIncreased();
void refreshTransportSettings(const TransportSettings& settings);
[[nodiscard]] folly::Expected<folly::Unit, QuicError>
refreshTransportSettings(const TransportSettings& settings);
/*
* Sets the "window-by" fraction for sending stream limit updates. E.g.
* setting the fraction to two when the initial stream limit was 100 will
* cause the stream manager to update the relevant stream limit update when
* 50 streams have been closed.
*/
void setStreamLimitWindowingFraction(uint64_t fraction) {
if (fraction > 0) {
streamLimitWindowingFraction_ = fraction;
}
}
/*
* The next value that should be sent in a bidirectional max streams frame,
* if any. This is potentially updated every time a bidirectional stream is
* closed. Calling this function "consumes" the update.
*/
Optional<uint64_t> remoteBidirectionalStreamLimitUpdate() {
auto ret = remoteBidirectionalStreamLimitUpdate_;
remoteBidirectionalStreamLimitUpdate_.reset();
return ret;
}
/*
* The next value that should be sent in a unidirectional max streams frame,
* if any. This is potentially updated every time a unidirectional stream is
* closed. Calling this function "consumes" the update.
*/
Optional<uint64_t> remoteUnidirectionalStreamLimitUpdate() {
auto ret = remoteUnidirectionalStreamLimitUpdate_;
remoteUnidirectionalStreamLimitUpdate_.reset();
return ret;
}
/*
* Returns a const reference to the underlying stream window updates
* container.
*/
const auto& windowUpdates() const {
return windowUpdates_;
}
/*
* Returns whether a given stream id has a pending window update.
*/
bool pendingWindowUpdate(StreamId streamId) {
return windowUpdates_.count(streamId) > 0;
}
/*
* Queue a pending window update for the given stream id.
*/
void queueWindowUpdate(StreamId streamId) {
windowUpdates_.emplace(streamId);
}
/*
* Clear the window updates.
*/
void removeWindowUpdate(StreamId streamId) {
windowUpdates_.erase(streamId);
}
/*
* Returns whether any stream has a pending window update.
*/
bool hasWindowUpdates() const {
return !windowUpdates_.empty();
}
// TODO figure out a better interface here.
/*
* Return a mutable reference to the underlying closed streams container.
*/
auto& closedStreams() {
return closedStreams_;
}
/*
* Add a closed stream.
*/
void addClosed(StreamId streamId) {
closedStreams_.insert(streamId);
}
/*
* Returns a const reference to the underlying deliverable streams container.
*/
const auto& deliverableStreams() const {
return deliverableStreams_;
}
/*
* Add a deliverable stream.
*/
void addDeliverable(StreamId streamId) {
deliverableStreams_.insert(streamId);
}
/*
* Remove a deliverable stream.
*/
void removeDeliverable(StreamId streamId) {
deliverableStreams_.erase(streamId);
}
/*
* Pop a deliverable stream id and return it.
*/
Optional<StreamId> popDeliverable() {
auto itr = deliverableStreams_.begin();
if (itr == deliverableStreams_.end()) {
@ -704,44 +593,26 @@ class QuicStreamManager {
return ret;
}
/*
* Returns if there are any deliverable streams.
*/
bool hasDeliverable() const {
return !deliverableStreams_.empty();
}
/*
* Returns if the stream is in the deliverable container.
*/
bool deliverableContains(StreamId streamId) const {
return deliverableStreams_.count(streamId) > 0;
}
/*
* Returns a const reference to the underlying TX streams container.
*/
FOLLY_NODISCARD const auto& txStreams() const {
[[nodiscard]] const auto& txStreams() const {
return txStreams_;
}
/*
* Add a stream to list of streams that have transmitted.
*/
void addTx(StreamId streamId) {
txStreams_.insert(streamId);
}
/*
* Remove a TX stream.
*/
void removeTx(StreamId streamId) {
txStreams_.erase(streamId);
}
/*
* Pop a TX stream id and return it.
*/
Optional<StreamId> popTx() {
auto itr = txStreams_.begin();
if (itr == txStreams_.end()) {
@ -753,24 +624,14 @@ class QuicStreamManager {
}
}
/*
* Returns if there are any TX streams.
*/
FOLLY_NODISCARD bool hasTx() const {
[[nodiscard]] bool hasTx() const {
return !txStreams_.empty();
}
/*
* Returns if the stream is in the TX container.
*/
FOLLY_NODISCARD bool txContains(StreamId streamId) const {
[[nodiscard]] bool txContains(StreamId streamId) const {
return txStreams_.count(streamId) > 0;
}
// TODO figure out a better interface here.
/*
* Returns a mutable reference to the underlying readable streams container.
*/
auto& readableStreams() {
return readableStreams_;
}
@ -779,25 +640,14 @@ class QuicStreamManager {
return unidirectionalReadableStreams_;
}
// TODO figure out a better interface here.
/*
* Returns a mutable reference to the underlying peekable streams container.
*/
auto& peekableStreams() {
return peekableStreams_;
}
/*
* Returns a mutable reference to the underlying container of streams which
* had their flow control updated.
*/
const auto& flowControlUpdated() {
return flowControlUpdated_;
}
/*
* Consume the flow control updated streams using the parameter vector.
*/
std::vector<StreamId> consumeFlowControlUpdated() {
std::vector<StreamId> result(
flowControlUpdated_.begin(), flowControlUpdated_.end());
@ -805,16 +655,10 @@ class QuicStreamManager {
return result;
}
/*
* Queue a stream which has had its flow control updated.
*/
void queueFlowControlUpdated(StreamId streamId) {
flowControlUpdated_.emplace(streamId);
}
/*
* Pop and return a stream which has had its flow control updated.
*/
Optional<StreamId> popFlowControlUpdated() {
auto itr = flowControlUpdated_.begin();
if (itr == flowControlUpdated_.end()) {
@ -826,114 +670,61 @@ class QuicStreamManager {
}
}
/*
* Remove the specified stream from the flow control updated container.
*/
void removeFlowControlUpdated(StreamId streamId) {
flowControlUpdated_.erase(streamId);
}
/*
* Returns if the the given stream is in the flow control updated container.
*/
bool flowControlUpdatedContains(StreamId streamId) {
return flowControlUpdated_.count(streamId) > 0;
}
/*
* Clear the flow control updated container.
*/
void clearFlowControlUpdated() {
flowControlUpdated_.clear();
}
// TODO figure out a better interface here.
/*
* Returns a mutable reference to the underlying open bidirectional peer
* streams container.
*/
auto& openBidirectionalPeerStreams() {
return openBidirectionalPeerStreams_;
}
// TODO figure out a better interface here.
/*
* Returns a mutable reference to the underlying open peer unidirectional
* streams container.
*/
auto& openUnidirectionalPeerStreams() {
return openUnidirectionalPeerStreams_;
}
// TODO figure out a better interface here.
/*
* Returns a mutable reference to the underlying open local unidirectional
* streams container.
*/
auto& openUnidirectionalLocalStreams() {
return openUnidirectionalLocalStreams_;
}
// TODO figure out a better interface here.
/*
* Returns a mutable reference to the underlying open local unidirectional
* streams container.
*/
auto& openBidirectionalLocalStreams() {
return openBidirectionalLocalStreams_;
}
// TODO figure out a better interface here.
/*
* Returns a mutable reference to the underlying new peer streams container.
*/
auto& newPeerStreams() {
return newPeerStreams_;
}
/*
* Consume the new peer streams using the parameter vector.
*/
std::vector<StreamId> consumeNewPeerStreams() {
std::vector<StreamId> res{std::move(newPeerStreams_)};
return res;
}
/*
* Consume the new peer streams in groups using the parameter vector.
*/
std::vector<StreamId> consumeNewGroupedPeerStreams() {
std::vector<StreamId> res{std::move(newGroupedPeerStreams_)};
return res;
}
/*
* Consume the new peer stream groups using the parameter vector.
*/
auto consumeNewPeerStreamGroups() {
decltype(newPeerStreamGroups_) result{std::move(newPeerStreamGroups_)};
return result;
}
/*
* Returns the number of streams open and active (for which we have created
* the stream state).
*/
size_t streamCount() {
return streams_.size();
}
/*
* Returns a const reference to the container of streams with pending
* StopSending events.
*/
const auto& stopSendingStreams() const {
return stopSendingStreams_;
}
/*
* Consume the stop sending streams.
*/
auto consumeStopSending() {
std::vector<std::pair<const StreamId, const ApplicationErrorCode>> result(
stopSendingStreams_.begin(), stopSendingStreams_.end());
@ -941,42 +732,24 @@ class QuicStreamManager {
return result;
}
/*
* Clear the StopSending streams.
*/
void clearStopSending() {
stopSendingStreams_.clear();
}
/*
* Add a stream to the StopSending streams.
*/
void addStopSending(StreamId streamId, ApplicationErrorCode error) {
stopSendingStreams_.emplace(streamId, error);
}
/*
* Returns if the stream manager has any non-control streams.
*/
bool hasNonCtrlStreams() {
return streams_.size() != numControlStreams_;
}
/*
* Returns number of control streams.
*/
auto numControlStreams() {
return numControlStreams_;
}
/*
* Sets the given stream to be tracked as a control stream.
*/
void setStreamAsControl(QuicStreamState& stream);
/*
* Clear the tracking of streams which can trigger API callbacks.
*/
void clearActionable() {
deliverableStreams_.clear();
txStreams_.clear();
@ -988,16 +761,10 @@ class QuicStreamManager {
bool isAppIdle() const;
/*
* Returns number of bidirectional groups.
*/
[[nodiscard]] bool getNumBidirectionalGroups() const {
return openBidirectionalLocalStreamGroups_.size();
}
/*
* Returns number of unidirectional group exists.
*/
[[nodiscard]] bool getNumUnidirectionalGroups() const {
return openUnidirectionalLocalStreamGroups_.size();
}
@ -1012,36 +779,27 @@ class QuicStreamManager {
}
private:
// Updates the congestion controller app-idle state, after a change in the
// number of streams.
// App-idle state is set to true if there was at least one non-control
// before the update and there are none after. It is set to false if instead
// there were no non-control streams before and there is at least one at the
// time of calling
void updateAppIdleState();
QuicStreamState* FOLLY_NULLABLE
[[nodiscard]] folly::Expected<QuicStreamState*, QuicError>
getOrCreateOpenedLocalStream(StreamId streamId);
QuicStreamState* FOLLY_NULLABLE getOrCreatePeerStream(
[[nodiscard]] folly::Expected<QuicStreamState*, QuicError>
getOrCreatePeerStream(
StreamId streamId,
OptionalIntegral<StreamGroupId> streamGroupId = std::nullopt);
void setMaxRemoteBidirectionalStreamsInternal(
uint64_t maxStreams,
bool force);
void setMaxRemoteUnidirectionalStreamsInternal(
uint64_t maxStreams,
bool force);
[[nodiscard]] folly::Expected<folly::Unit, QuicError>
setMaxRemoteBidirectionalStreamsInternal(uint64_t maxStreams, bool force);
[[nodiscard]] folly::Expected<folly::Unit, QuicError>
setMaxRemoteUnidirectionalStreamsInternal(uint64_t maxStreams, bool force);
// helper to create a new peer stream.
QuicStreamState* FOLLY_NULLABLE instantiatePeerStream(
StreamId streamId,
OptionalIntegral<StreamGroupId> groupId);
folly::Expected<StreamGroupId, LocalErrorCode> createNextStreamGroup(
StreamGroupId& groupId,
StreamIdSet& streamGroups);
[[nodiscard]] folly::Expected<StreamGroupId, LocalErrorCode>
createNextStreamGroup(StreamGroupId& groupId, StreamIdSet& streamGroups);
void addToReadableStreams(const QuicStreamState& stream);
void removeFromReadableStreams(const QuicStreamState& stream);
@ -1049,152 +807,66 @@ class QuicStreamManager {
QuicConnectionStateBase& conn_;
QuicNodeType nodeType_;
// Next acceptable bidirectional stream id that can be opened by the peer.
// Used to keep track of closed streams.
StreamId nextAcceptablePeerBidirectionalStreamId_{0};
// Next acceptable unidirectional stream id that can be opened by the peer.
// Used to keep track of closed streams.
StreamId nextAcceptablePeerUnidirectionalStreamId_{0};
// Next acceptable bidirectional stream id that can be opened locally.
// Used to keep track of closed streams.
StreamId nextAcceptableLocalBidirectionalStreamId_{0};
// Next acceptable bidirectional stream id that can be opened locally.
// Used to keep track of closed streams.
StreamId nextAcceptableLocalUnidirectionalStreamId_{0};
// Next bidirectional stream id to use when creating a stream.
StreamId nextBidirectionalStreamId_{0};
// Next bidirectional stream group id to use.
StreamGroupId nextBidirectionalStreamGroupId_{0};
// Next unidirectional stream id to use when creating a stream.
StreamId nextUnidirectionalStreamId_{0};
// Next unidirectional stream group id to use.
StreamGroupId nextUnidirectionalStreamGroupId_{0};
StreamId maxLocalBidirectionalStreamId_{0};
StreamId maxLocalUnidirectionalStreamId_{0};
StreamId maxRemoteBidirectionalStreamId_{0};
StreamId maxRemoteUnidirectionalStreamId_{0};
StreamId initialLocalBidirectionalStreamId_{0};
StreamId initialLocalUnidirectionalStreamId_{0};
StreamId initialRemoteBidirectionalStreamId_{0};
StreamId initialRemoteUnidirectionalStreamId_{0};
// The fraction to determine the window by which we will signal the need to
// send stream limit updates
uint64_t streamLimitWindowingFraction_{2};
// Contains the value of a stream window update that should be sent for
// remote bidirectional streams.
Optional<uint64_t> remoteBidirectionalStreamLimitUpdate_;
// Contains the value of a stream window update that should be sent for
// remote bidirectional streams.
Optional<uint64_t> remoteUnidirectionalStreamLimitUpdate_;
uint64_t numControlStreams_{0};
// Bidirectional streams that are opened by the peer on the connection.
StreamIdSet openBidirectionalPeerStreams_;
// Unidirectional streams that are opened by the peer on the connection.
StreamIdSet openUnidirectionalPeerStreams_;
// Bidirectional streams that are opened locally on the connection.
StreamIdSet openBidirectionalLocalStreams_;
// Unidirectional streams that are opened locally on the connection.
StreamIdSet openUnidirectionalLocalStreams_;
// Bidirectional stream groupss that are opened locally on the connection.
StreamIdSet openBidirectionalLocalStreamGroups_;
// Unidirectional stream groups that are opened locally on the connection.
StreamIdSet openUnidirectionalLocalStreamGroups_;
// A map of streams that are active.
folly::F14FastMap<StreamId, QuicStreamState> streams_;
// Recently opened peer streams.
std::vector<StreamId> newPeerStreams_;
// Recently opened peer streams with groups.
std::vector<StreamId> newGroupedPeerStreams_;
// Recently opened peer stream groups.
folly::F14FastSet<StreamGroupId> newPeerStreamGroups_;
// Peer group ids seen.
StreamIdSet peerUnidirectionalStreamGroupsSeen_;
StreamIdSet peerBidirectionalStreamGroupsSeen_;
// Map of streams that were blocked
folly::F14FastMap<StreamId, StreamDataBlockedFrame> blockedStreams_;
// Map of streams where the peer was asked to stop sending
folly::F14FastMap<StreamId, ApplicationErrorCode> stopSendingStreams_;
// Streams that had their stream window change and potentially need a window
// update sent
folly::F14FastSet<StreamId> windowUpdates_;
// Streams that had their flow control updated
folly::F14FastSet<StreamId> flowControlUpdated_;
// Streams that have bytes in loss buffer
folly::F14FastSet<StreamId> lossStreams_;
// DSR Streams that have bytes in loss buff meta
folly::F14FastSet<StreamId> lossDSRStreams_;
// Set of streams that have pending reads
folly::F14FastSet<StreamId> readableStreams_;
// Set of unidirectional streams that have pending reads.
// Used separately from readableStreams_ when
// unidirectionalStreamsReadCallbacksFirst = true to prioritize unidirectional
// streams read callbacks.
folly::F14FastSet<StreamId> unidirectionalReadableStreams_;
// Set of streams that have pending peeks
folly::F14FastSet<StreamId> peekableStreams_;
// Set of !control streams that have writable data used for frame scheduling
PriorityQueue writeQueue_;
// Set of control streams that have writable data
std::set<StreamId> controlWriteQueue_;
folly::F14FastSet<StreamId> writableStreams_;
folly::F14FastSet<StreamId> writableDSRStreams_;
// Streams that may be able to call TxCallback
folly::F14FastSet<StreamId> txStreams_;
// Streams that may be able to callback DeliveryCallback
folly::F14FastSet<StreamId> deliverableStreams_;
// Streams that are closed but we still have state for
folly::F14FastSet<StreamId> closedStreams_;
// Record whether or not we are app-idle.
bool isAppIdle_{false};
const TransportSettings* FOLLY_NONNULL transportSettings_;
bool maxLocalBidirectionalStreamIdIncreased_{false};
bool maxLocalUnidirectionalStreamIdIncreased_{false};
};

View File

@ -108,7 +108,7 @@ void updateSimpleFrameOnPacketLoss(
}
}
bool updateSimpleFrameOnPacketReceived(
folly::Expected<bool, QuicError> updateSimpleFrameOnPacketReceived(
QuicConnectionStateBase& conn,
const QuicSimpleFrame& frame,
const ConnectionId& dstConnId,
@ -116,18 +116,25 @@ bool updateSimpleFrameOnPacketReceived(
switch (frame.type()) {
case QuicSimpleFrame::Type::StopSendingFrame: {
const StopSendingFrame& stopSending = *frame.asStopSendingFrame();
auto stream = conn.streamManager->getStream(stopSending.streamId);
auto streamResult = conn.streamManager->getStream(stopSending.streamId);
if (streamResult.hasError()) {
return folly::makeUnexpected(streamResult.error());
}
auto& stream = streamResult.value();
if (stream) {
sendStopSendingSMHandler(*stream, stopSending);
auto result = sendStopSendingSMHandler(*stream, stopSending);
if (result.hasError()) {
return folly::makeUnexpected(result.error());
}
}
return true;
}
case QuicSimpleFrame::Type::PathChallengeFrame: {
bool rotatedId = conn.retireAndSwitchPeerConnectionIds();
if (!rotatedId) {
throw QuicTransportException(
"No more connection ids to use for new path.",
TransportErrorCode::INVALID_MIGRATION);
return folly::makeUnexpected(QuicError(
TransportErrorCode::INVALID_MIGRATION,
"No more connection ids to use for new path."));
}
const PathChallengeFrame& pathChallenge = *frame.asPathChallengeFrame();
@ -165,18 +172,18 @@ bool updateSimpleFrameOnPacketReceived(
// TODO vchynaro Ensure we ignore smaller subsequent retirePriorTos
// than the largest seen so far.
if (newConnectionId.retirePriorTo > newConnectionId.sequenceNumber) {
throw QuicTransportException(
"Retire prior to greater than sequence number",
TransportErrorCode::PROTOCOL_VIOLATION);
return folly::makeUnexpected(QuicError(
TransportErrorCode::PROTOCOL_VIOLATION,
"Retire prior to greater than sequence number"));
}
for (const auto& existingPeerConnIdData : conn.peerConnectionIds) {
if (existingPeerConnIdData.connId == newConnectionId.connectionId) {
if (existingPeerConnIdData.sequenceNumber !=
newConnectionId.sequenceNumber) {
throw QuicTransportException(
"Repeated connection id with different sequence number.",
TransportErrorCode::PROTOCOL_VIOLATION);
return folly::makeUnexpected(QuicError(
TransportErrorCode::PROTOCOL_VIOLATION,
"Repeated connection id with different sequence number."));
} else {
// No-op on repeated conn id.
return false;
@ -191,9 +198,9 @@ bool updateSimpleFrameOnPacketReceived(
(conn.nodeType == QuicNodeType::Client ? conn.serverConnectionId
: conn.clientConnectionId);
if (!peerConnId || peerConnId->size() == 0) {
throw QuicTransportException(
"Endpoint is already using 0-len connection ids.",
TransportErrorCode::PROTOCOL_VIOLATION);
return folly::makeUnexpected(QuicError(
TransportErrorCode::PROTOCOL_VIOLATION,
"Endpoint is already using 0-len connection ids."));
}
// TODO vchynaro Implement retire_prior_to logic
@ -216,11 +223,17 @@ bool updateSimpleFrameOnPacketReceived(
case QuicSimpleFrame::Type::MaxStreamsFrame: {
const MaxStreamsFrame& maxStreamsFrame = *frame.asMaxStreamsFrame();
if (maxStreamsFrame.isForBidirectionalStream()) {
conn.streamManager->setMaxLocalBidirectionalStreams(
auto result = conn.streamManager->setMaxLocalBidirectionalStreams(
maxStreamsFrame.maxStreams);
if (result.hasError()) {
return folly::makeUnexpected(result.error());
}
} else {
conn.streamManager->setMaxLocalUnidirectionalStreams(
auto result = conn.streamManager->setMaxLocalUnidirectionalStreams(
maxStreamsFrame.maxStreams);
if (result.hasError()) {
return folly::makeUnexpected(result.error());
}
}
return true;
}
@ -229,9 +242,9 @@ bool updateSimpleFrameOnPacketReceived(
? conn.serverConnectionId
: conn.clientConnectionId;
if (!curNodeConnId || curNodeConnId->size() == 0) {
throw QuicTransportException(
"Peer issued RETIRE_CONNECTION_ID_FRAME to endpoint using 0-len connection ids.",
TransportErrorCode::PROTOCOL_VIOLATION);
return folly::makeUnexpected(QuicError(
TransportErrorCode::PROTOCOL_VIOLATION,
"Peer issued RETIRE_CONNECTION_ID_FRAME to endpoint using 0-len connection ids."));
}
const RetireConnectionIdFrame& retireConnIdFrame =
*frame.asRetireConnectionIdFrame();
@ -249,9 +262,9 @@ bool updateSimpleFrameOnPacketReceived(
}
if (dstConnId == it->connId) {
throw QuicTransportException(
"Peer issued RETIRE_CONNECTION_ID_FRAME refers to dst conn id field of containing packet.",
TransportErrorCode::PROTOCOL_VIOLATION);
return folly::makeUnexpected(QuicError(
TransportErrorCode::PROTOCOL_VIOLATION,
"Peer issued RETIRE_CONNECTION_ID_FRAME refers to dst conn id field of containing packet."));
}
if (conn.nodeType == QuicNodeType::Server) {
@ -264,9 +277,9 @@ bool updateSimpleFrameOnPacketReceived(
}
case QuicSimpleFrame::Type::HandshakeDoneFrame: {
if (conn.nodeType == QuicNodeType::Server) {
throw QuicTransportException(
"Received HANDSHAKE_DONE from client.",
TransportErrorCode::PROTOCOL_VIOLATION);
return folly::makeUnexpected(QuicError(
TransportErrorCode::PROTOCOL_VIOLATION,
"Received HANDSHAKE_DONE from client."));
}
// Mark the handshake confirmed in the handshake layer before doing
// any dropping, as this gives us a chance to process ACKs in this
@ -284,9 +297,9 @@ bool updateSimpleFrameOnPacketReceived(
if (!conn.transportSettings.minAckDelay.has_value()) {
// We do not accept ACK_FREQUENCY frames. This is a protocol
// violation.
throw QuicTransportException(
"Received ACK_FREQUENCY frame without announcing min_ack_delay",
TransportErrorCode::PROTOCOL_VIOLATION);
return folly::makeUnexpected(QuicError(
TransportErrorCode::PROTOCOL_VIOLATION,
"Received ACK_FREQUENCY frame without announcing min_ack_delay"));
}
const auto ackFrequencyFrame = frame.asAckFrequencyFrame();
auto& ackState = conn.ackStates.appDataAckState;
@ -308,7 +321,8 @@ bool updateSimpleFrameOnPacketReceived(
return true;
}
}
folly::assume_unreachable();
return folly::makeUnexpected(
QuicError(TransportErrorCode::INTERNAL_ERROR, "Unknown frame type"));
}
} // namespace quic

View File

@ -44,7 +44,8 @@ void updateSimpleFrameOnPacketLoss(
* Update the connection state on receipt of the given simple frame.
* Returns true if the frame is NOT a probing frame
*/
bool updateSimpleFrameOnPacketReceived(
[[nodiscard]] folly::Expected<bool, QuicError>
updateSimpleFrameOnPacketReceived(
QuicConnectionStateBase& conn,
const QuicSimpleFrame& frameIn,
const ConnectionId& dstConnId,

View File

@ -767,7 +767,9 @@ struct AckStateVersion {
bool operator!=(const AckStateVersion& other) const;
};
using LossVisitor = std::function<
void(QuicConnectionStateBase&, RegularQuicWritePacket&, bool)>;
using LossVisitor = std::function<folly::Expected<folly::Unit, QuicError>(
QuicConnectionStateBase& conn,
RegularQuicWritePacket& packet,
bool processed)>;
} // namespace quic

View File

@ -34,16 +34,18 @@ namespace quic {
* Receive::Closed <---------+
*
*/
void receiveReadStreamFrameSMHandler(
folly::Expected<folly::Unit, QuicError> receiveReadStreamFrameSMHandler(
QuicStreamState& stream,
ReadStreamFrame&& frame) {
switch (stream.recvState) {
case StreamRecvState::Open: {
VLOG_IF(10, frame.fin) << "Open: Received data with fin"
<< " stream=" << stream.id << " " << stream.conn;
appendDataToReadBuffer(
auto appendResult = appendDataToReadBuffer(
stream, StreamBuffer(std::move(frame.data), frame.offset, frame.fin));
if (appendResult.hasError()) {
return appendResult;
}
bool allDataTillReliableSizeReceived = stream.reliableSizeFromPeer &&
(*stream.reliableSizeFromPeer == 0 ||
isAllDataReceivedUntil(stream, *stream.reliableSizeFromPeer - 1));
@ -67,23 +69,27 @@ void receiveReadStreamFrameSMHandler(
break;
}
case StreamRecvState::Invalid: {
throw QuicTransportException(
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_STATE_ERROR,
folly::to<std::string>(
"Invalid transition from state=",
streamStateToString(stream.recvState)),
TransportErrorCode::STREAM_STATE_ERROR);
streamStateToString(stream.recvState))));
}
}
return folly::unit;
}
void receiveRstStreamSMHandler(
folly::Expected<folly::Unit, QuicError> receiveRstStreamSMHandler(
QuicStreamState& stream,
const RstStreamFrame& rst) {
switch (stream.recvState) {
case StreamRecvState::Closed: {
// This will check whether the reset is still consistent with the
// stream.
onResetQuicStream(stream, rst);
auto resetResult = onResetQuicStream(stream, rst);
if (resetResult.hasError()) {
return resetResult;
}
break;
}
case StreamRecvState::Open: {
@ -100,18 +106,21 @@ void receiveRstStreamSMHandler(
stream.conn.streamManager->addClosed(stream.id);
}
}
onResetQuicStream(stream, rst);
auto resetResult = onResetQuicStream(stream, rst);
if (resetResult.hasError()) {
return resetResult;
}
break;
}
case StreamRecvState::Invalid: {
throw QuicTransportException(
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_STATE_ERROR,
folly::to<std::string>(
"Invalid transition from state=",
streamStateToString(stream.recvState)),
TransportErrorCode::STREAM_STATE_ERROR);
break;
streamStateToString(stream.recvState))));
}
}
return folly::unit;
}
} // namespace quic

View File

@ -11,12 +11,12 @@
#include <quic/state/QuicStreamUtilities.h>
namespace quic {
void receiveReadStreamFrameSMHandler(
[[nodiscard]] folly::Expected<folly::Unit, QuicError>
receiveReadStreamFrameSMHandler(
QuicStreamState& stream,
ReadStreamFrame&& frame);
void receiveRstStreamSMHandler(
[[nodiscard]] folly::Expected<folly::Unit, QuicError> receiveRstStreamSMHandler(
QuicStreamState& stream,
const RstStreamFrame& rst);

View File

@ -41,8 +41,7 @@ namespace quic {
* ACKed.
*
*/
void sendStopSendingSMHandler(
folly::Expected<folly::Unit, QuicError> sendStopSendingSMHandler(
QuicStreamState& stream,
const StopSendingFrame& frame) {
switch (stream.sendState) {
@ -66,16 +65,17 @@ void sendStopSendingSMHandler(
break;
}
case StreamSendState::Invalid: {
throw QuicTransportException(
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_STATE_ERROR,
folly::to<std::string>(
"Invalid transition from state=",
streamStateToString(stream.sendState)),
TransportErrorCode::STREAM_STATE_ERROR);
streamStateToString(stream.sendState))));
}
}
return folly::unit;
}
void sendRstSMHandler(
folly::Expected<folly::Unit, QuicError> sendRstSMHandler(
QuicStreamState& stream,
ApplicationErrorCode errorCode,
const Optional<uint64_t>& reliableSize) {
@ -101,7 +101,10 @@ void sendRstSMHandler(
<< "are increasing the reliable size";
}
stream.appErrorCodeToPeer = errorCode;
resetQuicStream(stream, errorCode, reliableSize);
auto resetResult = resetQuicStream(stream, errorCode, reliableSize);
if (resetResult.hasError()) {
return resetResult;
}
appendPendingStreamReset(stream.conn, stream, errorCode, reliableSize);
stream.sendState = StreamSendState::ResetSent;
break;
@ -115,16 +118,17 @@ void sendRstSMHandler(
break;
}
case StreamSendState::Invalid: {
throw QuicTransportException(
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_STATE_ERROR,
folly::to<std::string>(
"Invalid transition from state=",
streamStateToString(stream.sendState)),
TransportErrorCode::STREAM_STATE_ERROR);
streamStateToString(stream.sendState))));
}
}
return folly::unit;
}
void sendAckSMHandler(
folly::Expected<folly::Unit, QuicError> sendAckSMHandler(
QuicStreamState& stream,
const WriteStreamFrame& ackedFrame) {
switch (stream.sendState) {
@ -190,17 +194,17 @@ void sendAckSMHandler(
break;
}
case StreamSendState::Invalid: {
throw QuicTransportException(
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_STATE_ERROR,
folly::to<std::string>(
"Invalid transition from state=",
streamStateToString(stream.sendState)),
TransportErrorCode::STREAM_STATE_ERROR);
break;
streamStateToString(stream.sendState))));
}
}
return folly::unit;
}
void sendRstAckSMHandler(
folly::Expected<folly::Unit, QuicError> sendRstAckSMHandler(
QuicStreamState& stream,
folly::Optional<uint64_t> reliableSize) {
switch (stream.sendState) {
@ -232,13 +236,14 @@ void sendRstAckSMHandler(
}
case StreamSendState::Open:
case StreamSendState::Invalid: {
throw QuicTransportException(
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_STATE_ERROR,
folly::to<std::string>(
"Invalid transition from state=",
streamStateToString(stream.sendState)),
TransportErrorCode::STREAM_STATE_ERROR);
streamStateToString(stream.sendState))));
}
}
return folly::unit;
}
} // namespace quic

View File

@ -11,21 +11,20 @@
#include <quic/state/stream/StreamStateFunctions.h>
namespace quic {
void sendStopSendingSMHandler(
[[nodiscard]] folly::Expected<folly::Unit, QuicError> sendStopSendingSMHandler(
QuicStreamState& stream,
const StopSendingFrame& frame);
void sendRstSMHandler(
[[nodiscard]] folly::Expected<folly::Unit, QuicError> sendRstSMHandler(
QuicStreamState& stream,
ApplicationErrorCode errorCode,
const Optional<uint64_t>& reliableSize = folly::none);
void sendAckSMHandler(
[[nodiscard]] folly::Expected<folly::Unit, QuicError> sendAckSMHandler(
QuicStreamState& stream,
const WriteStreamFrame& ackedFrame);
void sendRstAckSMHandler(
[[nodiscard]] folly::Expected<folly::Unit, QuicError> sendRstAckSMHandler(
QuicStreamState& stream,
folly::Optional<uint64_t> reliableSize);

View File

@ -10,12 +10,15 @@
#include <quic/flowcontrol/QuicFlowController.h>
namespace quic {
void resetQuicStream(
folly::Expected<folly::Unit, QuicError> resetQuicStream(
QuicStreamState& stream,
ApplicationErrorCode error,
Optional<uint64_t> reliableSize) {
updateFlowControlOnResetStream(stream, reliableSize);
auto updateResult = updateFlowControlOnResetStream(stream, reliableSize);
if (!updateResult) {
return folly::makeUnexpected(updateResult.error());
}
if (reliableSize && *reliableSize > 0) {
stream.reliableSizeToPeer = *reliableSize;
stream.removeFromRetransmissionBufStartingAtOffset(*reliableSize);
@ -43,26 +46,30 @@ void resetQuicStream(
}
stream.conn.streamManager->updateReadableStreams(stream);
stream.conn.streamManager->updateWritableStreams(stream);
return folly::unit;
}
void onResetQuicStream(QuicStreamState& stream, const RstStreamFrame& frame) {
folly::Expected<folly::Unit, QuicError> onResetQuicStream(
QuicStreamState& stream,
const RstStreamFrame& frame) {
if (stream.finalReadOffset &&
stream.finalReadOffset.value() != frame.finalSize) {
throw QuicTransportException(
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_STATE_ERROR,
"Read offset mismatch, " +
folly::to<std::string>(stream.finalReadOffset.value()) +
" != " + folly::to<std::string>(frame.finalSize),
TransportErrorCode::STREAM_STATE_ERROR);
" != " + folly::to<std::string>(frame.finalSize)));
}
if (stream.streamReadError &&
stream.streamReadError.value().asApplicationErrorCode() &&
*stream.streamReadError.value().asApplicationErrorCode() !=
frame.errorCode) {
throw QuicTransportException(
return folly::makeUnexpected(QuicError(
TransportErrorCode::STREAM_STATE_ERROR,
"Reset error code mismatch, " +
toString(stream.streamReadError.value()) +
" != " + toString(frame.errorCode),
TransportErrorCode::STREAM_STATE_ERROR);
" != " + toString(frame.errorCode)));
}
if (stream.reliableSizeFromPeer && frame.reliableSize &&
*frame.reliableSize > *stream.reliableSizeFromPeer) {
@ -70,15 +77,15 @@ void onResetQuicStream(QuicStreamState& stream, const RstStreamFrame& frame) {
// than before, but not to send one with a higher offset than before. Due
// to reordering, we may receive a RESET_STREAM_AT frame with a higher
// offset than before. In this case, we should ignore the frame.
return;
return folly::unit;
}
stream.reliableSizeFromPeer =
frame.reliableSize.hasValue() ? *frame.reliableSize : 0;
// Mark eofoffset:
if (stream.maxOffsetObserved > frame.finalSize) {
throw QuicTransportException(
"Reset in middle of stream", TransportErrorCode::FINAL_SIZE_ERROR);
return folly::makeUnexpected(QuicError(
TransportErrorCode::FINAL_SIZE_ERROR, "Reset in middle of stream"));
}
// Drop non-reliable data:
stream.removeFromReadBufferStartingAtOffset(*stream.reliableSizeFromPeer);
@ -94,14 +101,21 @@ void onResetQuicStream(QuicStreamState& stream, const RstStreamFrame& frame) {
*frame.reliableSize == 0 ||
isAllDataReceivedUntil(stream, *frame.reliableSize - 1);
if (!appReadAllBytes && allReliableBytesReceived) {
updateFlowControlOnStreamData(
auto flowControlResult = updateFlowControlOnStreamData(
stream, stream.maxOffsetObserved, frame.finalSize);
if (!flowControlResult) {
return folly::makeUnexpected(flowControlResult.error());
}
stream.maxOffsetObserved = frame.finalSize;
updateFlowControlOnReceiveReset(stream, Clock::now());
auto result = updateFlowControlOnReceiveReset(stream, Clock::now());
if (!result) {
return folly::makeUnexpected(result.error());
}
}
stream.conn.streamManager->updateReadableStreams(stream);
stream.conn.streamManager->updateWritableStreams(stream);
QUIC_STATS(stream.conn.statsCallback, onQuicStreamReset, frame.errorCode);
return folly::unit;
}
bool isAllDataReceived(const QuicStreamState& stream) {

View File

@ -12,13 +12,15 @@
namespace quic {
// Common operations to conduct on QuicStreamState when send reset on it
void resetQuicStream(
[[nodiscard]] folly::Expected<folly::Unit, QuicError> resetQuicStream(
QuicStreamState& stream,
ApplicationErrorCode error,
Optional<uint64_t> reliableSize = folly::none);
// Common operations to conduct on QuicStreamState when receive reset on it
void onResetQuicStream(QuicStreamState& stream, const RstStreamFrame& frame);
[[nodiscard]] folly::Expected<folly::Unit, QuicError> onResetQuicStream(
QuicStreamState& stream,
const RstStreamFrame& frame);
bool isAllDataReceived(const QuicStreamState& stream);

View File

@ -28,13 +28,18 @@ TEST_F(StreamStateFunctionsTests, BasicResetTest) {
FizzServerQuicHandshakeContext::Builder().build());
StreamId streamId = 0xbaad;
QuicStreamState stream(streamId, conn);
appendDataToReadBuffer(
stream, StreamBuffer(folly::IOBuf::copyBuffer("It is a hotdog!"), 0));
appendDataToReadBuffer(
stream,
StreamBuffer(folly::IOBuf::copyBuffer(" It is not a hotdog."), 15));
writeDataToQuicStream(
stream, folly::IOBuf::copyBuffer("What is it then?"), false);
ASSERT_FALSE(
appendDataToReadBuffer(
stream, StreamBuffer(folly::IOBuf::copyBuffer("It is a hotdog!"), 0))
.hasError());
ASSERT_FALSE(
appendDataToReadBuffer(
stream,
StreamBuffer(folly::IOBuf::copyBuffer(" It is not a hotdog."), 15))
.hasError());
ASSERT_FALSE(writeDataToQuicStream(
stream, folly::IOBuf::copyBuffer("What is it then?"), false)
.hasError());
std::string retxBufData = "How would I know?";
Buf retxBuf = folly::IOBuf::copyBuffer(retxBufData);
@ -45,7 +50,8 @@ TEST_F(StreamStateFunctionsTests, BasicResetTest) {
auto currentReadOffset = stream.currentReadOffset;
EXPECT_TRUE(stream.writable());
sendRstSMHandler(stream, GenericApplicationErrorCode::UNKNOWN);
ASSERT_FALSE(sendRstSMHandler(stream, GenericApplicationErrorCode::UNKNOWN)
.hasError());
// Something are cleared:
EXPECT_TRUE(stream.writeBuffer.empty());
@ -64,13 +70,18 @@ TEST_F(StreamStateFunctionsTests, BasicReliableResetTest) {
FizzServerQuicHandshakeContext::Builder().build());
StreamId streamId = 0xbaad;
QuicStreamState stream(streamId, conn);
appendDataToReadBuffer(
stream, StreamBuffer(folly::IOBuf::copyBuffer("It is a hotdog!"), 0));
appendDataToReadBuffer(
stream,
StreamBuffer(folly::IOBuf::copyBuffer(" It is not a hotdog."), 15));
writeDataToQuicStream(
stream, folly::IOBuf::copyBuffer("What is it then?"), false);
ASSERT_FALSE(
appendDataToReadBuffer(
stream, StreamBuffer(folly::IOBuf::copyBuffer("It is a hotdog!"), 0))
.hasError());
ASSERT_FALSE(
appendDataToReadBuffer(
stream,
StreamBuffer(folly::IOBuf::copyBuffer(" It is not a hotdog."), 15))
.hasError());
ASSERT_FALSE(writeDataToQuicStream(
stream, folly::IOBuf::copyBuffer("What is it then?"), false)
.hasError());
std::string retxBufData = "How would I know?";
Buf retxBuf = folly::IOBuf::copyBuffer(retxBufData);
@ -81,7 +92,8 @@ TEST_F(StreamStateFunctionsTests, BasicReliableResetTest) {
auto currentReadOffset = stream.currentReadOffset;
EXPECT_TRUE(stream.writable());
sendRstSMHandler(stream, GenericApplicationErrorCode::UNKNOWN, 5);
ASSERT_FALSE(sendRstSMHandler(stream, GenericApplicationErrorCode::UNKNOWN, 5)
.hasError());
// The writeBuffer is going to have bytes 0-4 because the reliableSize is 5.
EXPECT_EQ(stream.writeBuffer.chainLength(), 5);
@ -115,10 +127,13 @@ TEST_F(StreamStateFunctionsTests, IsAllDataReceivedReadBufferHasHole) {
StreamId id = 3;
QuicStreamState stream(id, conn);
stream.currentReadOffset = 100;
appendDataToReadBuffer(
stream,
StreamBuffer(
folly::IOBuf::copyBuffer("Your read buffer has a hole"), 150, true));
ASSERT_FALSE(appendDataToReadBuffer(
stream,
StreamBuffer(
folly::IOBuf::copyBuffer("Your read buffer has a hole"),
150,
true))
.hasError());
EXPECT_FALSE(isAllDataReceived(stream));
}
@ -128,9 +143,12 @@ TEST_F(StreamStateFunctionsTests, IsAllDataReceivedReadBufferNoHoleNoFin) {
StreamId id = 3;
QuicStreamState stream(id, conn);
stream.currentReadOffset = 100;
appendDataToReadBuffer(
stream,
StreamBuffer(folly::IOBuf::copyBuffer("Your haven't seen FIN yet"), 100));
ASSERT_FALSE(
appendDataToReadBuffer(
stream,
StreamBuffer(
folly::IOBuf::copyBuffer("Your haven't seen FIN yet"), 100))
.hasError());
EXPECT_FALSE(isAllDataReceived(stream));
}
@ -140,8 +158,9 @@ TEST_F(StreamStateFunctionsTests, IsAllDataReceivedReadBufferEmptyBufferFin) {
StreamId id = 3;
QuicStreamState stream(id, conn);
stream.currentReadOffset = 100;
appendDataToReadBuffer(
stream, StreamBuffer(folly::IOBuf::create(0), 100, true));
ASSERT_FALSE(appendDataToReadBuffer(
stream, StreamBuffer(folly::IOBuf::create(0), 100, true))
.hasError());
EXPECT_TRUE(isAllDataReceived(stream));
}
@ -151,10 +170,12 @@ TEST_F(StreamStateFunctionsTests, IsAllDataReceivedReadBufferBufferFin) {
StreamId id = 3;
QuicStreamState stream(id, conn);
stream.currentReadOffset = 100;
appendDataToReadBuffer(
stream,
StreamBuffer(
folly::IOBuf::copyBuffer("you may say im a dreamer"), 100, true));
ASSERT_FALSE(
appendDataToReadBuffer(
stream,
StreamBuffer(
folly::IOBuf::copyBuffer("you may say im a dreamer"), 100, true))
.hasError());
EXPECT_TRUE(isAllDataReceived(stream));
}
@ -164,14 +185,20 @@ TEST_F(StreamStateFunctionsTests, IsAllDataReceivedMultipleStreamDataNoHole) {
StreamId id = 3;
QuicStreamState stream(id, conn);
stream.currentReadOffset = 100;
appendDataToReadBuffer(
stream, StreamBuffer(folly::IOBuf::copyBuffer("0123456789"), 100));
appendDataToReadBuffer(
stream,
StreamBuffer(folly::IOBuf::copyBuffer("01234567890123456789"), 110));
appendDataToReadBuffer(
stream,
StreamBuffer(folly::IOBuf::copyBuffer("Counting is hard"), 130, true));
ASSERT_FALSE(
appendDataToReadBuffer(
stream, StreamBuffer(folly::IOBuf::copyBuffer("0123456789"), 100))
.hasError());
ASSERT_FALSE(
appendDataToReadBuffer(
stream,
StreamBuffer(folly::IOBuf::copyBuffer("01234567890123456789"), 110))
.hasError());
ASSERT_FALSE(
appendDataToReadBuffer(
stream,
StreamBuffer(folly::IOBuf::copyBuffer("Counting is hard"), 130, true))
.hasError());
EXPECT_TRUE(isAllDataReceived(stream));
}
@ -181,14 +208,20 @@ TEST_F(StreamStateFunctionsTests, IsAllDataReceivedMultipleStreamDataHasHole) {
StreamId id = 3;
QuicStreamState stream(id, conn);
stream.currentReadOffset = 100;
appendDataToReadBuffer(
stream, StreamBuffer(folly::IOBuf::copyBuffer("0123456789"), 100));
appendDataToReadBuffer(
stream,
StreamBuffer(folly::IOBuf::copyBuffer("01234567890123456789"), 115));
appendDataToReadBuffer(
stream,
StreamBuffer(folly::IOBuf::copyBuffer("Counting is hard"), 130, true));
ASSERT_FALSE(
appendDataToReadBuffer(
stream, StreamBuffer(folly::IOBuf::copyBuffer("0123456789"), 100))
.hasError());
ASSERT_FALSE(
appendDataToReadBuffer(
stream,
StreamBuffer(folly::IOBuf::copyBuffer("01234567890123456789"), 115))
.hasError());
ASSERT_FALSE(
appendDataToReadBuffer(
stream,
StreamBuffer(folly::IOBuf::copyBuffer("Counting is hard"), 130, true))
.hasError());
EXPECT_FALSE(isAllDataReceived(stream));
}
@ -211,14 +244,18 @@ TEST_F(StreamStateFunctionsTests, SendReset) {
QuicStreamState stream(id, conn);
auto initialConnWindow = getSendConnFlowControlBytesAPI(conn);
EXPECT_EQ(initialConnWindow, 1024);
writeDataToQuicStream(stream, folly::IOBuf::copyBuffer("hello"), true);
ASSERT_FALSE(
writeDataToQuicStream(stream, folly::IOBuf::copyBuffer("hello"), true)
.hasError());
EXPECT_EQ(conn.flowControlState.sumCurStreamBufferLen, 5);
EXPECT_EQ(getSendConnFlowControlBytesAPI(conn), initialConnWindow - 5);
appendDataToReadBuffer(
stream, StreamBuffer(folly::IOBuf::copyBuffer("hi"), 0));
ASSERT_FALSE(appendDataToReadBuffer(
stream, StreamBuffer(folly::IOBuf::copyBuffer("hi"), 0))
.hasError());
EXPECT_FALSE(stream.writeBuffer.empty());
EXPECT_FALSE(stream.readBuffer.empty());
resetQuicStream(stream, GenericApplicationErrorCode::UNKNOWN);
ASSERT_FALSE(
resetQuicStream(stream, GenericApplicationErrorCode::UNKNOWN).hasError());
EXPECT_EQ(getSendConnFlowControlBytesAPI(conn), initialConnWindow);
EXPECT_TRUE(stream.writeBuffer.empty());
}
@ -230,21 +267,25 @@ TEST_F(StreamStateFunctionsTests, SendResetDSRStream) {
StreamId id = 1;
QuicStreamState stream(id, conn);
auto initialConnWindow = getSendConnFlowControlBytesAPI(conn);
writeDataToQuicStream(stream, folly::IOBuf::copyBuffer("aloha"), false);
ASSERT_FALSE(
writeDataToQuicStream(stream, folly::IOBuf::copyBuffer("aloha"), false)
.hasError());
auto mockDSRSender = std::make_unique<MockDSRPacketizationRequestSender>();
EXPECT_CALL(*mockDSRSender, release()).Times(1);
stream.flowControlState.peerAdvertisedMaxOffset =
std::numeric_limits<uint64_t>::max();
stream.dsrSender = std::move(mockDSRSender);
BufferMeta bufMeta(2000);
writeBufMetaToQuicStream(stream, bufMeta, true);
ASSERT_FALSE(writeBufMetaToQuicStream(stream, bufMeta, true).hasError());
EXPECT_EQ(conn.flowControlState.sumCurStreamBufferLen, 5 + 2000);
EXPECT_EQ(getSendConnFlowControlBytesAPI(conn), initialConnWindow - 5 - 2000);
appendDataToReadBuffer(
stream, StreamBuffer(folly::IOBuf::copyBuffer("hi"), 0));
ASSERT_FALSE(appendDataToReadBuffer(
stream, StreamBuffer(folly::IOBuf::copyBuffer("hi"), 0))
.hasError());
EXPECT_FALSE(stream.writeBuffer.empty());
EXPECT_FALSE(stream.readBuffer.empty());
resetQuicStream(stream, GenericApplicationErrorCode::UNKNOWN);
ASSERT_FALSE(
resetQuicStream(stream, GenericApplicationErrorCode::UNKNOWN).hasError());
EXPECT_EQ(getSendConnFlowControlBytesAPI(conn), initialConnWindow);
EXPECT_TRUE(stream.streamWriteError.hasValue());
EXPECT_TRUE(stream.writeBuffer.empty());
@ -257,7 +298,9 @@ TEST_F(StreamStateFunctionsTests, ResetNoFlowControlGenerated) {
FizzServerQuicHandshakeContext::Builder().build());
StreamId id = 1;
QuicStreamState stream(id, conn);
writeDataToQuicStream(stream, folly::IOBuf::copyBuffer("hello"), true);
ASSERT_FALSE(
writeDataToQuicStream(stream, folly::IOBuf::copyBuffer("hello"), true)
.hasError());
EXPECT_GT(conn.flowControlState.sumCurStreamBufferLen, 0);
RstStreamFrame rst(id, GenericApplicationErrorCode::UNKNOWN, 90);
@ -270,7 +313,7 @@ TEST_F(StreamStateFunctionsTests, ResetNoFlowControlGenerated) {
conn.flowControlState.sumCurReadOffset = 80;
conn.flowControlState.windowSize = 10000;
onResetQuicStream(stream, std::move(rst));
ASSERT_FALSE(onResetQuicStream(stream, std::move(rst)).hasError());
EXPECT_EQ(stream.currentReadOffset, 90);
EXPECT_EQ(conn.flowControlState.sumCurReadOffset, 90);
EXPECT_FALSE(conn.pendingEvents.connWindowUpdate);
@ -284,7 +327,9 @@ TEST_F(StreamStateFunctionsTests, ResetFlowControlGenerated) {
StreamId id = 1;
QuicStreamState stream(id, conn);
writeDataToQuicStream(stream, folly::IOBuf::copyBuffer("hello"), true);
ASSERT_FALSE(
writeDataToQuicStream(stream, folly::IOBuf::copyBuffer("hello"), true)
.hasError());
EXPECT_GT(conn.flowControlState.sumCurStreamBufferLen, 0);
RstStreamFrame rst(id, GenericApplicationErrorCode::UNKNOWN, 100);
stream.currentReadOffset = 80;
@ -296,7 +341,7 @@ TEST_F(StreamStateFunctionsTests, ResetFlowControlGenerated) {
conn.flowControlState.sumCurReadOffset = 80;
conn.flowControlState.windowSize = 100;
onResetQuicStream(stream, std::move(rst));
ASSERT_FALSE(onResetQuicStream(stream, std::move(rst)).hasError());
EXPECT_EQ(stream.currentReadOffset, 100);
EXPECT_EQ(conn.flowControlState.sumCurReadOffset, 100);
EXPECT_TRUE(conn.pendingEvents.connWindowUpdate);
@ -322,8 +367,12 @@ TEST_F(StreamStateFunctionsTests, ResetOffsetNotMatch) {
stream.maxOffsetObserved = 100;
stream.finalReadOffset = 100;
stream.flowControlState.advertisedMaxOffset = 300;
EXPECT_THROW(
onResetQuicStream(stream, std::move(rst)), QuicTransportException);
auto result = onResetQuicStream(stream, std::move(rst));
ASSERT_TRUE(result.hasError());
ASSERT_NE(result.error().code.asTransportErrorCode(), nullptr);
EXPECT_EQ(
*result.error().code.asTransportErrorCode(),
TransportErrorCode::STREAM_STATE_ERROR);
}
TEST_F(StreamStateFunctionsTests, ResetFinalSizeChange) {
@ -334,12 +383,12 @@ TEST_F(StreamStateFunctionsTests, ResetFinalSizeChange) {
stream.finalReadOffset = 11;
stream.streamReadError = GenericApplicationErrorCode::UNKNOWN;
RstStreamFrame rst(id, GenericApplicationErrorCode::UNKNOWN, 10);
try {
onResetQuicStream(stream, rst);
FAIL() << "Should throw QuicTransportException";
} catch (QuicTransportException& exc) {
EXPECT_EQ(exc.errorCode(), TransportErrorCode::STREAM_STATE_ERROR);
}
auto result = onResetQuicStream(stream, rst);
ASSERT_TRUE(result.hasError());
ASSERT_NE(result.error().code.asTransportErrorCode(), nullptr);
EXPECT_EQ(
*result.error().code.asTransportErrorCode(),
TransportErrorCode::STREAM_STATE_ERROR);
}
TEST_F(StreamStateFunctionsTests, ResetErrorCodeChange) {
@ -350,12 +399,12 @@ TEST_F(StreamStateFunctionsTests, ResetErrorCodeChange) {
stream.finalReadOffset = 10;
stream.streamReadError = GenericApplicationErrorCode::UNKNOWN + 1;
RstStreamFrame rst(id, GenericApplicationErrorCode::UNKNOWN, 10);
try {
onResetQuicStream(stream, rst);
FAIL() << "Should throw QuicTransportException";
} catch (QuicTransportException& exc) {
EXPECT_EQ(exc.errorCode(), TransportErrorCode::STREAM_STATE_ERROR);
}
auto result = onResetQuicStream(stream, rst);
ASSERT_TRUE(result.hasError());
ASSERT_NE(result.error().code.asTransportErrorCode(), nullptr);
EXPECT_EQ(
*result.error().code.asTransportErrorCode(),
TransportErrorCode::STREAM_STATE_ERROR);
}
TEST_F(StreamStateFunctionsTests, ResetOffsetLessThanMaxObserved) {
@ -367,8 +416,12 @@ TEST_F(StreamStateFunctionsTests, ResetOffsetLessThanMaxObserved) {
stream.currentReadOffset = 20;
stream.maxOffsetObserved = 100;
stream.flowControlState.advertisedMaxOffset = 300;
EXPECT_THROW(
onResetQuicStream(stream, std::move(rst)), QuicTransportException);
auto result = onResetQuicStream(stream, std::move(rst));
ASSERT_TRUE(result.hasError());
ASSERT_NE(result.error().code.asTransportErrorCode(), nullptr);
EXPECT_EQ(
*result.error().code.asTransportErrorCode(),
TransportErrorCode::FINAL_SIZE_ERROR);
}
TEST_F(StreamStateFunctionsTests, ResetOffsetGreaterThanStreamFlowControl) {
@ -380,8 +433,12 @@ TEST_F(StreamStateFunctionsTests, ResetOffsetGreaterThanStreamFlowControl) {
stream.currentReadOffset = 20;
stream.maxOffsetObserved = 30;
stream.flowControlState.advertisedMaxOffset = 100;
EXPECT_THROW(
onResetQuicStream(stream, std::move(rst)), QuicTransportException);
auto result = onResetQuicStream(stream, std::move(rst));
ASSERT_TRUE(result.hasError());
ASSERT_NE(result.error().code.asTransportErrorCode(), nullptr);
EXPECT_EQ(
*result.error().code.asTransportErrorCode(),
TransportErrorCode::FLOW_CONTROL_ERROR);
}
TEST_F(StreamStateFunctionsTests, ResetOffsetGreaterThanConnFlowControl) {
@ -400,8 +457,12 @@ TEST_F(StreamStateFunctionsTests, ResetOffsetGreaterThanConnFlowControl) {
conn.flowControlState.sumMaxObservedOffset = 30;
conn.flowControlState.advertisedMaxOffset = 100;
conn.flowControlState.windowSize = 100;
EXPECT_THROW(
onResetQuicStream(stream, std::move(rst)), QuicTransportException);
auto result = onResetQuicStream(stream, std::move(rst));
ASSERT_TRUE(result.hasError());
ASSERT_NE(result.error().code.asTransportErrorCode(), nullptr);
EXPECT_EQ(
*result.error().code.asTransportErrorCode(),
TransportErrorCode::FLOW_CONTROL_ERROR);
}
TEST_F(StreamStateFunctionsTests, ResetAfterReadingAllBytesTillFin) {
@ -414,7 +475,7 @@ TEST_F(StreamStateFunctionsTests, ResetAfterReadingAllBytesTillFin) {
stream.finalReadOffset = 100;
stream.maxOffsetObserved = 100;
stream.flowControlState.advertisedMaxOffset = 300;
onResetQuicStream(stream, std::move(rst));
ASSERT_FALSE(onResetQuicStream(stream, std::move(rst)).hasError());
EXPECT_EQ(stream.currentReadOffset, 101);
EXPECT_FALSE(conn.streamManager->hasWindowUpdates());
EXPECT_FALSE(conn.pendingEvents.connWindowUpdate);

View File

@ -46,10 +46,13 @@ std::unique_ptr<QuicServerConnectionState> createConn() {
kDefaultStreamFlowControlWindow;
conn->flowControlState.peerAdvertisedMaxOffset =
kDefaultConnectionFlowControlWindow;
conn->streamManager->setMaxLocalBidirectionalStreams(
kDefaultMaxStreamsBidirectional);
conn->streamManager->setMaxLocalUnidirectionalStreams(
kDefaultMaxStreamsUnidirectional);
CHECK(!conn->streamManager
->setMaxLocalBidirectionalStreams(kDefaultMaxStreamsBidirectional)
.hasError());
CHECK(
!conn->streamManager
->setMaxLocalUnidirectionalStreams(kDefaultMaxStreamsUnidirectional)
.hasError());
return conn;
}
@ -63,7 +66,8 @@ TEST_F(QuicOpenStateTest, ReadStreamDataNotFin) {
bool fin = false;
ReadStreamFrame frame(id, offset, fin);
frame.data = IOBuf::copyBuffer("hey");
receiveReadStreamFrameSMHandler(stream, std::move(frame));
auto result = receiveReadStreamFrameSMHandler(stream, std::move(frame));
ASSERT_FALSE(result.hasError());
EXPECT_TRUE(stream.hasReadableData());
EXPECT_TRUE(stream.hasPeekableData());
EXPECT_EQ(stream.recvState, StreamRecvState::Open);
@ -79,16 +83,17 @@ TEST_F(QuicOpenStateTest, ReadInvalidData) {
// EOF in middle of stream
ReadStreamFrame frame1(id, offset1, fin1);
frame1.data = IOBuf::copyBuffer("hey");
receiveReadStreamFrameSMHandler(stream, std::move(frame1));
auto result1 = receiveReadStreamFrameSMHandler(stream, std::move(frame1));
ASSERT_FALSE(result1.hasError());
EXPECT_EQ(stream.recvState, StreamRecvState::Open);
uint64_t offset2 = 1;
bool fin2 = true;
ReadStreamFrame frame2(id, offset2, fin2);
frame2.data = IOBuf::copyBuffer("e");
EXPECT_THROW(
receiveReadStreamFrameSMHandler(stream, std::move(frame2)),
QuicTransportException);
auto result = receiveReadStreamFrameSMHandler(stream, std::move(frame2));
ASSERT_TRUE(result.hasError());
EXPECT_NE(result.error().code.asTransportErrorCode(), nullptr);
}
TEST_F(QuicOpenStateTest, InvalidEvent) {
@ -96,8 +101,9 @@ TEST_F(QuicOpenStateTest, InvalidEvent) {
StreamId id = 5;
QuicStreamState stream(id, *conn);
RstStreamFrame frame(1, GenericApplicationErrorCode::UNKNOWN, 0);
EXPECT_THROW(
sendRstAckSMHandler(stream, folly::none), QuicTransportException);
auto result = sendRstAckSMHandler(stream, folly::none);
ASSERT_TRUE(result.hasError());
EXPECT_NE(result.error().code.asTransportErrorCode(), nullptr);
}
TEST_F(QuicOpenStateTest, ReceiveStreamFrameWithFIN) {
@ -110,7 +116,9 @@ TEST_F(QuicOpenStateTest, ReceiveStreamFrameWithFIN) {
ReadStreamFrame receivedStreamFrame(stream->id, 100, true);
receivedStreamFrame.data = folly::IOBuf::create(10);
receivedStreamFrame.data->append(10);
receiveReadStreamFrameSMHandler(*stream, std::move(receivedStreamFrame));
auto result =
receiveReadStreamFrameSMHandler(*stream, std::move(receivedStreamFrame));
ASSERT_FALSE(result.hasError());
ASSERT_EQ(stream->recvState, StreamRecvState::Closed);
}
@ -124,7 +132,9 @@ TEST_F(QuicOpenStateTest, ReceiveStreamFrameWithFINReadbuffHole) {
ReadStreamFrame receivedStreamFrame(stream->id, 200, true);
receivedStreamFrame.data = folly::IOBuf::create(10);
receivedStreamFrame.data->append(10);
receiveReadStreamFrameSMHandler(*stream, std::move(receivedStreamFrame));
auto result =
receiveReadStreamFrameSMHandler(*stream, std::move(receivedStreamFrame));
ASSERT_FALSE(result.hasError());
ASSERT_EQ(stream->recvState, StreamRecvState::Open);
}
@ -138,7 +148,9 @@ TEST_F(QuicOpenStateTest, ReceiveStreamFrameWithoutFIN) {
ReadStreamFrame receivedStreamFrame(stream->id, 100, false);
receivedStreamFrame.data = folly::IOBuf::create(10);
receivedStreamFrame.data->append(10);
receiveReadStreamFrameSMHandler(*stream, std::move(receivedStreamFrame));
auto result =
receiveReadStreamFrameSMHandler(*stream, std::move(receivedStreamFrame));
ASSERT_FALSE(result.hasError());
ASSERT_EQ(stream->recvState, StreamRecvState::Open);
}
@ -170,10 +182,12 @@ TEST_F(QuicOpenStateTest, AckStream) {
->packet.frames.front()
.asWriteStreamFrame();
sendAckSMHandler(*stream, streamFrame);
auto result1 = sendAckSMHandler(*stream, streamFrame);
ASSERT_FALSE(result1.hasError());
ASSERT_EQ(stream->sendState, StreamSendState::Closed);
sendAckSMHandler(*stream, streamFrame);
auto result2 = sendAckSMHandler(*stream, streamFrame);
ASSERT_FALSE(result2.hasError());
ASSERT_EQ(stream->sendState, StreamSendState::Closed);
}
@ -219,7 +233,8 @@ TEST_F(QuicOpenStateTest, AckStreamMulti) {
auto& streamFrame3 =
*conn->outstandings.packets[2].packet.frames[0].asWriteStreamFrame();
sendAckSMHandler(*stream, streamFrame3);
auto result1 = sendAckSMHandler(*stream, streamFrame3);
ASSERT_FALSE(result1.hasError());
ASSERT_EQ(stream->sendState, StreamSendState::Open);
ASSERT_EQ(stream->ackedIntervals.front().start, 10);
ASSERT_EQ(stream->ackedIntervals.front().end, 20);
@ -227,7 +242,8 @@ TEST_F(QuicOpenStateTest, AckStreamMulti) {
auto& streamFrame2 =
*conn->outstandings.packets[1].packet.frames[0].asWriteStreamFrame();
sendAckSMHandler(*stream, streamFrame2);
auto result2 = sendAckSMHandler(*stream, streamFrame2);
ASSERT_FALSE(result2.hasError());
ASSERT_EQ(stream->sendState, StreamSendState::Open);
ASSERT_EQ(stream->ackedIntervals.front().start, 5);
ASSERT_EQ(stream->ackedIntervals.front().end, 20);
@ -235,7 +251,8 @@ TEST_F(QuicOpenStateTest, AckStreamMulti) {
auto& streamFrame1 =
*conn->outstandings.packets[0].packet.frames[0].asWriteStreamFrame();
sendAckSMHandler(*stream, streamFrame1);
auto result3 = sendAckSMHandler(*stream, streamFrame1);
ASSERT_FALSE(result3.hasError());
ASSERT_EQ(stream->sendState, StreamSendState::Open);
ASSERT_EQ(stream->ackedIntervals.front().start, 0);
ASSERT_EQ(stream->ackedIntervals.front().end, 20);
@ -283,7 +300,8 @@ TEST_F(QuicOpenStateTest, RetxBufferSortedAfterAck) {
auto streamFrame = *conn->outstandings.packets[std::rand() % 3]
.packet.frames.front()
.asWriteStreamFrame();
sendAckSMHandler(*stream, streamFrame);
auto result = sendAckSMHandler(*stream, streamFrame);
ASSERT_FALSE(result.hasError());
EXPECT_EQ(2, stream->retransmissionBuffer.size());
}
@ -300,7 +318,8 @@ TEST_F(QuicResetSentStateTest, RstAck) {
stream.readBuffer.emplace_back(
folly::IOBuf::copyBuffer("One more thing"), 0xABCD, false);
RstStreamFrame frame(id, GenericApplicationErrorCode::UNKNOWN, 0);
sendRstAckSMHandler(stream, folly::none);
auto result = sendRstAckSMHandler(stream, folly::none);
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::Closed);
EXPECT_FALSE(stream.finalReadOffset);
@ -324,7 +343,8 @@ TEST_F(QuicResetSentStateTest, ReliableRstAckNoReduction) {
folly::IOBuf::copyBuffer("One more thing"), 0xABCD, false);
RstStreamFrame frame(id, GenericApplicationErrorCode::UNKNOWN, 0);
stream.updateAckedIntervals(0, 3, false);
sendRstAckSMHandler(stream, 5);
auto result = sendRstAckSMHandler(stream, 5);
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::Closed);
EXPECT_FALSE(stream.finalReadOffset);
@ -348,7 +368,8 @@ TEST_F(QuicResetSentStateTest, ReliableRstAckReduction) {
folly::IOBuf::copyBuffer("One more thing"), 0xABCD, false);
RstStreamFrame frame(id, GenericApplicationErrorCode::UNKNOWN, 0);
stream.updateAckedIntervals(0, 1, false);
sendRstAckSMHandler(stream, 1);
auto result = sendRstAckSMHandler(stream, 1);
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::Closed);
EXPECT_FALSE(stream.finalReadOffset);
@ -370,7 +391,8 @@ TEST_F(QuicResetSentStateTest, ReliableRstAckFirstTime) {
stream.readBuffer.emplace_back(
folly::IOBuf::copyBuffer("One more thing"), 0xABCD, false);
RstStreamFrame frame(id, GenericApplicationErrorCode::UNKNOWN, 0);
sendRstAckSMHandler(stream, 1);
auto result = sendRstAckSMHandler(stream, 1);
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::ResetSent);
EXPECT_FALSE(stream.finalReadOffset);
@ -393,7 +415,8 @@ TEST_F(QuicResetSentStateTest, RstAfterReliableRst) {
stream.readBuffer.emplace_back(
folly::IOBuf::copyBuffer("One more thing"), 0xABCD, false);
RstStreamFrame frame(id, GenericApplicationErrorCode::UNKNOWN, 0);
sendRstAckSMHandler(stream, folly::none);
auto result = sendRstAckSMHandler(stream, folly::none);
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::Closed);
EXPECT_FALSE(stream.finalReadOffset);
@ -409,7 +432,8 @@ TEST_F(QuicResetSentStateTest, ResetSentToClosedTransition1) {
QuicStreamState stream(id, *conn);
stream.sendState = StreamSendState::ResetSent;
stream.updateAckedIntervals(0, 5, false);
sendRstAckSMHandler(stream, 5);
auto result = sendRstAckSMHandler(stream, 5);
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::Closed);
}
@ -421,7 +445,8 @@ TEST_F(QuicResetSentStateTest, ResetSentToClosedTransition2) {
QuicStreamState stream(id, *conn);
stream.sendState = StreamSendState::ResetSent;
stream.updateAckedIntervals(0, 4, false);
sendRstAckSMHandler(stream, 5);
auto result = sendRstAckSMHandler(stream, 5);
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::ResetSent);
}
@ -441,7 +466,8 @@ TEST_F(QuicResetSentStateTest, ResetSentToClosedTransition3) {
std::forward_as_tuple(0),
std::forward_as_tuple(std::make_unique<WriteStreamBuffer>(
ChainedByteRangeHead(buf), 0, false)));
sendAckSMHandler(stream, streamFrame);
auto result = sendAckSMHandler(stream, streamFrame);
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::Closed);
}
@ -462,7 +488,8 @@ TEST_F(QuicResetSentStateTest, ResetSentToClosedTransition4) {
std::forward_as_tuple(0),
std::forward_as_tuple(std::make_unique<WriteStreamBuffer>(
ChainedByteRangeHead(buf), 0, false)));
sendAckSMHandler(stream, streamFrame);
auto result = sendAckSMHandler(stream, streamFrame);
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::ResetSent);
}
@ -474,7 +501,8 @@ TEST_F(QuicClosedStateTest, RstAck) {
QuicStreamState stream(id, *conn);
stream.sendState = StreamSendState::Closed;
RstStreamFrame frame(id, GenericApplicationErrorCode::UNKNOWN, 0);
sendRstAckSMHandler(stream, folly::none);
auto result = sendRstAckSMHandler(stream, folly::none);
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::Closed);
}
@ -492,7 +520,9 @@ TEST_F(QuicHalfClosedLocalStateTest, ReceiveStreamFrameWithFIN) {
ReadStreamFrame receivedStreamFrame(stream->id, 100, true);
receivedStreamFrame.data = folly::IOBuf::create(10);
receivedStreamFrame.data->append(10);
receiveReadStreamFrameSMHandler(*stream, std::move(receivedStreamFrame));
auto result =
receiveReadStreamFrameSMHandler(*stream, std::move(receivedStreamFrame));
ASSERT_FALSE(result.hasError());
ASSERT_EQ(stream->sendState, StreamSendState::Closed);
ASSERT_EQ(stream->recvState, StreamRecvState::Closed);
}
@ -509,7 +539,9 @@ TEST_F(QuicHalfClosedLocalStateTest, ReceiveStreamFrameWithFINReadbuffHole) {
ReadStreamFrame receivedStreamFrame(stream->id, 200, true);
receivedStreamFrame.data = folly::IOBuf::create(10);
receivedStreamFrame.data->append(10);
receiveReadStreamFrameSMHandler(*stream, std::move(receivedStreamFrame));
auto result =
receiveReadStreamFrameSMHandler(*stream, std::move(receivedStreamFrame));
ASSERT_FALSE(result.hasError());
ASSERT_EQ(stream->sendState, StreamSendState::Closed);
ASSERT_EQ(stream->recvState, StreamRecvState::Open);
}
@ -526,7 +558,9 @@ TEST_F(QuicHalfClosedLocalStateTest, ReceiveStreamFrameWithoutFIN) {
ReadStreamFrame receivedStreamFrame(stream->id, 100, false);
receivedStreamFrame.data = folly::IOBuf::create(10);
receivedStreamFrame.data->append(10);
receiveReadStreamFrameSMHandler(*stream, std::move(receivedStreamFrame));
auto result =
receiveReadStreamFrameSMHandler(*stream, std::move(receivedStreamFrame));
ASSERT_FALSE(result.hasError());
ASSERT_EQ(stream->sendState, StreamSendState::Closed);
ASSERT_EQ(stream->recvState, StreamRecvState::Open);
@ -566,10 +600,12 @@ TEST_F(QuicHalfClosedRemoteStateTest, AckStream) {
->packet.frames.front()
.asWriteStreamFrame();
sendAckSMHandler(*stream, streamFrame);
auto result = sendAckSMHandler(*stream, streamFrame);
ASSERT_FALSE(result.hasError());
ASSERT_EQ(stream->sendState, StreamSendState::Closed);
sendAckSMHandler(*stream, streamFrame);
result = sendAckSMHandler(*stream, streamFrame);
ASSERT_FALSE(result.hasError());
ASSERT_EQ(stream->sendState, StreamSendState::Closed);
}
@ -579,7 +615,8 @@ TEST_F(QuicSendResetTest, FromOpen) {
auto conn = createConn();
StreamId id = 5;
QuicStreamState stream(id, *conn);
sendRstSMHandler(stream, GenericApplicationErrorCode::UNKNOWN);
auto result = sendRstSMHandler(stream, GenericApplicationErrorCode::UNKNOWN);
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::ResetSent);
}
@ -590,7 +627,8 @@ TEST_F(QuicSendResetTest, FromHalfCloseRemote) {
stream.sendState = StreamSendState::Open;
stream.recvState = StreamRecvState::Closed;
sendRstSMHandler(stream, GenericApplicationErrorCode::UNKNOWN);
auto result = sendRstSMHandler(stream, GenericApplicationErrorCode::UNKNOWN);
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::ResetSent);
}
@ -600,7 +638,8 @@ TEST_F(QuicSendResetTest, FromHalfCloseLocal) {
QuicStreamState stream(id, *conn);
stream.sendState = StreamSendState::Closed;
stream.recvState = StreamRecvState::Open;
sendRstSMHandler(stream, GenericApplicationErrorCode::UNKNOWN);
auto result = sendRstSMHandler(stream, GenericApplicationErrorCode::UNKNOWN);
ASSERT_FALSE(result.hasError());
// You cannot send a reset after FIN has been acked
EXPECT_EQ(stream.sendState, StreamSendState::Closed);
@ -612,7 +651,8 @@ TEST_F(QuicSendResetTest, FromClosed) {
QuicStreamState stream(id, *conn);
stream.sendState = StreamSendState::Closed;
sendRstSMHandler(stream, GenericApplicationErrorCode::UNKNOWN);
auto result = sendRstSMHandler(stream, GenericApplicationErrorCode::UNKNOWN);
ASSERT_FALSE(result.hasError());
}
TEST_F(QuicSendResetTest, FromResetSent) {
@ -620,7 +660,8 @@ TEST_F(QuicSendResetTest, FromResetSent) {
StreamId id = 5;
QuicStreamState stream(id, *conn);
stream.sendState = StreamSendState::ResetSent;
sendRstSMHandler(stream, GenericApplicationErrorCode::UNKNOWN);
auto result = sendRstSMHandler(stream, GenericApplicationErrorCode::UNKNOWN);
ASSERT_FALSE(result.hasError());
}
class QuicRecvResetTest : public Test {};
@ -631,7 +672,8 @@ TEST_F(QuicRecvResetTest, FromOpen) {
StreamId rstStream = 1;
QuicStreamState stream(id, *conn);
RstStreamFrame rst(rstStream, GenericApplicationErrorCode::UNKNOWN, 100);
receiveRstStreamSMHandler(stream, std::move(rst));
auto result = receiveRstStreamSMHandler(stream, std::move(rst));
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::Open);
EXPECT_EQ(stream.recvState, StreamRecvState::Closed);
@ -645,9 +687,9 @@ TEST_F(QuicRecvResetTest, FromOpenReadEOFMismatch) {
QuicStreamState stream(id, *conn);
RstStreamFrame rst(1, GenericApplicationErrorCode::UNKNOWN, 100);
stream.finalReadOffset = 1024;
EXPECT_THROW(
receiveRstStreamSMHandler(stream, std::move(rst)),
QuicTransportException);
auto result = receiveRstStreamSMHandler(stream, std::move(rst));
ASSERT_TRUE(result.hasError());
EXPECT_NE(result.error().code.asTransportErrorCode(), nullptr);
}
TEST_F(QuicRecvResetTest, FromHalfClosedRemoteNoReadOffsetYet) {
@ -656,8 +698,9 @@ TEST_F(QuicRecvResetTest, FromHalfClosedRemoteNoReadOffsetYet) {
QuicStreamState stream(id, *conn);
stream.sendState = StreamSendState::Open;
stream.recvState = StreamRecvState::Closed;
receiveRstStreamSMHandler(
auto result = receiveRstStreamSMHandler(
stream, RstStreamFrame(1, GenericApplicationErrorCode::UNKNOWN, 100));
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::Open);
EXPECT_EQ(stream.recvState, StreamRecvState::Closed);
@ -672,8 +715,9 @@ TEST_F(QuicRecvResetTest, FromHalfClosedRemoteReadOffsetMatch) {
stream.recvState = StreamRecvState::Closed;
stream.finalReadOffset = 1024;
receiveRstStreamSMHandler(
auto result = receiveRstStreamSMHandler(
stream, RstStreamFrame(1, GenericApplicationErrorCode::UNKNOWN, 1024));
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::Open);
EXPECT_EQ(stream.recvState, StreamRecvState::Closed);
verifyStreamReset(stream, 1024);
@ -686,10 +730,10 @@ TEST_F(QuicRecvResetTest, FromHalfClosedRemoteReadOffsetMismatch) {
stream.sendState = StreamSendState::Open;
stream.recvState = StreamRecvState::Closed;
stream.finalReadOffset = 1024;
EXPECT_THROW(
receiveRstStreamSMHandler(
stream, RstStreamFrame(1, GenericApplicationErrorCode::UNKNOWN, 100)),
QuicTransportException);
auto result = receiveRstStreamSMHandler(
stream, RstStreamFrame(1, GenericApplicationErrorCode::UNKNOWN, 100));
ASSERT_TRUE(result.hasError());
EXPECT_NE(result.error().code.asTransportErrorCode(), nullptr);
}
TEST_F(QuicRecvResetTest, FromHalfClosedLocal) {
@ -698,8 +742,9 @@ TEST_F(QuicRecvResetTest, FromHalfClosedLocal) {
QuicStreamState stream(id, *conn);
stream.sendState = StreamSendState::Closed;
stream.recvState = StreamRecvState::Open;
receiveRstStreamSMHandler(
auto result = receiveRstStreamSMHandler(
stream, RstStreamFrame(1, GenericApplicationErrorCode::UNKNOWN, 200));
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::Closed);
EXPECT_EQ(stream.recvState, StreamRecvState::Closed);
verifyStreamReset(stream, 200);
@ -712,10 +757,10 @@ TEST_F(QuicRecvResetTest, FromHalfClosedLocalReadEOFMismatch) {
stream.sendState = StreamSendState::Closed;
stream.recvState = StreamRecvState::Open;
stream.finalReadOffset = 2014;
EXPECT_THROW(
receiveRstStreamSMHandler(
stream, RstStreamFrame(1, GenericApplicationErrorCode::UNKNOWN, 200)),
QuicTransportException);
auto result = receiveRstStreamSMHandler(
stream, RstStreamFrame(1, GenericApplicationErrorCode::UNKNOWN, 200));
ASSERT_TRUE(result.hasError());
EXPECT_NE(result.error().code.asTransportErrorCode(), nullptr);
}
TEST_F(QuicRecvResetTest, FromResetSentNoReadOffsetYet) {
@ -725,8 +770,9 @@ TEST_F(QuicRecvResetTest, FromResetSentNoReadOffsetYet) {
stream.sendState = StreamSendState::ResetSent;
stream.recvState = StreamRecvState::Open;
receiveRstStreamSMHandler(
auto result = receiveRstStreamSMHandler(
stream, RstStreamFrame(1, GenericApplicationErrorCode::UNKNOWN, 200));
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::ResetSent);
EXPECT_EQ(stream.recvState, StreamRecvState::Closed);
verifyStreamReset(stream, 200);
@ -740,8 +786,9 @@ TEST_F(QuicRecvResetTest, FromResetSentOffsetMatch) {
stream.recvState = StreamRecvState::Open;
stream.finalReadOffset = 200;
receiveRstStreamSMHandler(
auto result = receiveRstStreamSMHandler(
stream, RstStreamFrame(1, GenericApplicationErrorCode::UNKNOWN, 200));
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::ResetSent);
EXPECT_EQ(stream.recvState, StreamRecvState::Closed);
verifyStreamReset(stream, 200);
@ -754,10 +801,10 @@ TEST_F(QuicRecvResetTest, FromResetSentOffsetMismatch) {
stream.sendState = StreamSendState::ResetSent;
stream.recvState = StreamRecvState::Open;
stream.finalReadOffset = 300;
EXPECT_THROW(
receiveRstStreamSMHandler(
stream, RstStreamFrame(1, GenericApplicationErrorCode::UNKNOWN, 200)),
QuicTransportException);
auto result = receiveRstStreamSMHandler(
stream, RstStreamFrame(1, GenericApplicationErrorCode::UNKNOWN, 200));
ASSERT_TRUE(result.hasError());
EXPECT_NE(result.error().code.asTransportErrorCode(), nullptr);
}
TEST_F(QuicRecvResetTest, FromClosedNoReadOffsetYet) {
@ -766,8 +813,9 @@ TEST_F(QuicRecvResetTest, FromClosedNoReadOffsetYet) {
QuicStreamState stream(id, *conn);
stream.sendState = StreamSendState::Closed;
stream.recvState = StreamRecvState::Closed;
receiveRstStreamSMHandler(
auto result = receiveRstStreamSMHandler(
stream, RstStreamFrame(1, GenericApplicationErrorCode::UNKNOWN, 200));
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::Closed);
EXPECT_EQ(stream.recvState, StreamRecvState::Closed);
verifyStreamReset(stream, 200);
@ -780,8 +828,9 @@ TEST_F(QuicRecvResetTest, FromClosedOffsetMatch) {
stream.sendState = StreamSendState::Closed;
stream.recvState = StreamRecvState::Closed;
stream.finalReadOffset = 1234;
receiveRstStreamSMHandler(
auto result = receiveRstStreamSMHandler(
stream, RstStreamFrame(1, GenericApplicationErrorCode::UNKNOWN, 1234));
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::Closed);
EXPECT_EQ(stream.recvState, StreamRecvState::Closed);
verifyStreamReset(stream, 1234);
@ -794,11 +843,10 @@ TEST_F(QuicRecvResetTest, FromClosedOffsetMismatch) {
stream.sendState = StreamSendState::Closed;
stream.recvState = StreamRecvState::Closed;
stream.finalReadOffset = 123;
EXPECT_THROW(
receiveRstStreamSMHandler(
stream,
RstStreamFrame(1, GenericApplicationErrorCode::UNKNOWN, 1234)),
QuicTransportException);
auto result = receiveRstStreamSMHandler(
stream, RstStreamFrame(1, GenericApplicationErrorCode::UNKNOWN, 1234));
ASSERT_TRUE(result.hasError());
EXPECT_NE(result.error().code.asTransportErrorCode(), nullptr);
}
class QuicReliableResetTransitionTest : public Test {};
@ -809,7 +857,8 @@ TEST_F(QuicReliableResetTransitionTest, FromOpenReliableDataNotYetReceived) {
QuicStreamState stream(id, *conn);
RstStreamFrame rst(id, GenericApplicationErrorCode::UNKNOWN, 100, 10);
stream.currentReadOffset = 9;
receiveRstStreamSMHandler(stream, rst);
auto result = receiveRstStreamSMHandler(stream, rst);
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::Open);
EXPECT_EQ(stream.recvState, StreamRecvState::Open);
@ -821,7 +870,8 @@ TEST_F(QuicReliableResetTransitionTest, FromOpenReliableDataReceived) {
QuicStreamState stream(id, *conn);
RstStreamFrame rst(id, GenericApplicationErrorCode::UNKNOWN, 100, 10);
stream.currentReadOffset = 10;
receiveRstStreamSMHandler(stream, rst);
auto result = receiveRstStreamSMHandler(stream, rst);
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::Open);
EXPECT_EQ(stream.recvState, StreamRecvState::Closed);
@ -833,9 +883,10 @@ TEST_F(QuicReliableResetTransitionTest, DataReceivedTillReliableSize) {
QuicStreamState stream(id, *conn);
stream.reliableSizeFromPeer = 10;
stream.currentReadOffset = 1;
receiveReadStreamFrameSMHandler(
auto result = receiveReadStreamFrameSMHandler(
stream,
ReadStreamFrame(id, 1, folly::IOBuf::copyBuffer("999999999"), false));
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::Open);
EXPECT_EQ(stream.recvState, StreamRecvState::Closed);
}
@ -846,9 +897,10 @@ TEST_F(QuicReliableResetTransitionTest, DataNotReceivedTillReliableSize) {
QuicStreamState stream(id, *conn);
stream.reliableSizeFromPeer = 10;
stream.currentReadOffset = 1;
receiveReadStreamFrameSMHandler(
auto result = receiveReadStreamFrameSMHandler(
stream,
ReadStreamFrame(id, 1, folly::IOBuf::copyBuffer("99999999"), false));
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::Open);
EXPECT_EQ(stream.recvState, StreamRecvState::Open);
}
@ -861,9 +913,10 @@ TEST_F(QuicUnidirectionalStreamTest, OpenInvalidReadStream) {
QuicStreamState stream(id, *conn);
stream.sendState = StreamSendState::Open;
stream.recvState = StreamRecvState::Invalid;
EXPECT_THROW(
receiveReadStreamFrameSMHandler(stream, ReadStreamFrame(id, 1, false)),
QuicTransportException);
auto result =
receiveReadStreamFrameSMHandler(stream, ReadStreamFrame(id, 1, false));
ASSERT_TRUE(result.hasError());
EXPECT_NE(result.error().code.asTransportErrorCode(), nullptr);
}
TEST_F(QuicUnidirectionalStreamTest, OpenInvalidRstStream) {
@ -872,11 +925,10 @@ TEST_F(QuicUnidirectionalStreamTest, OpenInvalidRstStream) {
QuicStreamState stream(id, *conn);
stream.sendState = StreamSendState::Open;
stream.recvState = StreamRecvState::Invalid;
EXPECT_THROW(
receiveRstStreamSMHandler(
stream,
RstStreamFrame(1, GenericApplicationErrorCode::UNKNOWN, 1234)),
QuicTransportException);
auto result = receiveRstStreamSMHandler(
stream, RstStreamFrame(1, GenericApplicationErrorCode::UNKNOWN, 1234));
ASSERT_TRUE(result.hasError());
EXPECT_NE(result.error().code.asTransportErrorCode(), nullptr);
}
TEST_F(QuicUnidirectionalStreamTest, OpenInvalidSendReset) {
@ -886,9 +938,9 @@ TEST_F(QuicUnidirectionalStreamTest, OpenInvalidSendReset) {
stream.sendState = StreamSendState::Invalid;
stream.recvState = StreamRecvState::Open;
EXPECT_THROW(
sendRstSMHandler(stream, GenericApplicationErrorCode::UNKNOWN),
QuicTransportException);
auto result = sendRstSMHandler(stream, GenericApplicationErrorCode::UNKNOWN);
ASSERT_TRUE(result.hasError());
EXPECT_NE(result.error().code.asTransportErrorCode(), nullptr);
}
TEST_F(QuicUnidirectionalStreamTest, OpenInvalidAckStreamFrame) {
@ -898,7 +950,9 @@ TEST_F(QuicUnidirectionalStreamTest, OpenInvalidAckStreamFrame) {
stream.sendState = StreamSendState::Invalid;
stream.recvState = StreamRecvState::Open;
WriteStreamFrame ackedFrame(id, 0, 0, false);
EXPECT_THROW(sendAckSMHandler(stream, ackedFrame), QuicTransportException);
auto result = sendAckSMHandler(stream, ackedFrame);
ASSERT_TRUE(result.hasError());
EXPECT_NE(result.error().code.asTransportErrorCode(), nullptr);
}
TEST_F(QuicUnidirectionalStreamTest, OpenInvalidStopSending) {
@ -907,10 +961,10 @@ TEST_F(QuicUnidirectionalStreamTest, OpenInvalidStopSending) {
QuicStreamState stream(id, *conn);
stream.sendState = StreamSendState::Invalid;
stream.recvState = StreamRecvState::Open;
EXPECT_THROW(
sendStopSendingSMHandler(
stream, StopSendingFrame(id, GenericApplicationErrorCode::UNKNOWN)),
QuicTransportException);
auto result = sendStopSendingSMHandler(
stream, StopSendingFrame(id, GenericApplicationErrorCode::UNKNOWN));
ASSERT_TRUE(result.hasError());
EXPECT_NE(result.error().code.asTransportErrorCode(), nullptr);
}
TEST_F(QuicUnidirectionalStreamTest, ClosedInvalidReadStream) {
@ -919,9 +973,10 @@ TEST_F(QuicUnidirectionalStreamTest, ClosedInvalidReadStream) {
QuicStreamState stream(id, *conn);
stream.sendState = StreamSendState::Open;
stream.recvState = StreamRecvState::Invalid;
EXPECT_THROW(
receiveReadStreamFrameSMHandler(stream, ReadStreamFrame(id, 1, false)),
QuicTransportException);
auto result =
receiveReadStreamFrameSMHandler(stream, ReadStreamFrame(id, 1, false));
ASSERT_TRUE(result.hasError());
EXPECT_NE(result.error().code.asTransportErrorCode(), nullptr);
}
TEST_F(QuicUnidirectionalStreamTest, ClosedInvalidRstStream) {
@ -930,11 +985,10 @@ TEST_F(QuicUnidirectionalStreamTest, ClosedInvalidRstStream) {
QuicStreamState stream(id, *conn);
stream.sendState = StreamSendState::Open;
stream.recvState = StreamRecvState::Invalid;
EXPECT_THROW(
receiveRstStreamSMHandler(
stream,
RstStreamFrame(1, GenericApplicationErrorCode::UNKNOWN, 1234)),
QuicTransportException);
auto result = receiveRstStreamSMHandler(
stream, RstStreamFrame(1, GenericApplicationErrorCode::UNKNOWN, 1234));
ASSERT_TRUE(result.hasError());
EXPECT_NE(result.error().code.asTransportErrorCode(), nullptr);
}
TEST_F(QuicUnidirectionalStreamTest, ClosedInvalidSendReset) {
@ -943,9 +997,9 @@ TEST_F(QuicUnidirectionalStreamTest, ClosedInvalidSendReset) {
QuicStreamState stream(id, *conn);
stream.sendState = StreamSendState::Invalid;
stream.recvState = StreamRecvState::Closed;
EXPECT_THROW(
sendRstSMHandler(stream, GenericApplicationErrorCode::UNKNOWN),
QuicTransportException);
auto result = sendRstSMHandler(stream, GenericApplicationErrorCode::UNKNOWN);
ASSERT_TRUE(result.hasError());
EXPECT_NE(result.error().code.asTransportErrorCode(), nullptr);
}
TEST_F(QuicUnidirectionalStreamTest, ClosedInvalidAckStreamFrame) {
@ -956,7 +1010,9 @@ TEST_F(QuicUnidirectionalStreamTest, ClosedInvalidAckStreamFrame) {
stream.recvState = StreamRecvState::Closed;
WriteStreamFrame ackedFrame(id, 0, 0, false);
EXPECT_THROW(sendAckSMHandler(stream, ackedFrame), QuicTransportException);
auto result = sendAckSMHandler(stream, ackedFrame);
ASSERT_TRUE(result.hasError());
EXPECT_NE(result.error().code.asTransportErrorCode(), nullptr);
}
TEST_F(QuicUnidirectionalStreamTest, ClosedInvalidStopSending) {
@ -965,10 +1021,10 @@ TEST_F(QuicUnidirectionalStreamTest, ClosedInvalidStopSending) {
QuicStreamState stream(id, *conn);
stream.sendState = StreamSendState::Invalid;
stream.recvState = StreamRecvState::Closed;
EXPECT_THROW(
sendStopSendingSMHandler(
stream, StopSendingFrame(id, GenericApplicationErrorCode::UNKNOWN)),
QuicTransportException);
auto result = sendStopSendingSMHandler(
stream, StopSendingFrame(id, GenericApplicationErrorCode::UNKNOWN));
ASSERT_TRUE(result.hasError());
EXPECT_NE(result.error().code.asTransportErrorCode(), nullptr);
}
TEST_F(QuicUnidirectionalStreamTest, OpenReadStreamFin) {
@ -981,7 +1037,9 @@ TEST_F(QuicUnidirectionalStreamTest, OpenReadStreamFin) {
ReadStreamFrame receivedStreamFrame(stream.id, 100, true);
receivedStreamFrame.data = folly::IOBuf::create(10);
receivedStreamFrame.data->append(10);
receiveReadStreamFrameSMHandler(stream, std::move(receivedStreamFrame));
auto result =
receiveReadStreamFrameSMHandler(stream, std::move(receivedStreamFrame));
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::Invalid);
EXPECT_EQ(stream.recvState, StreamRecvState::Closed);
}
@ -993,8 +1051,9 @@ TEST_F(QuicUnidirectionalStreamTest, OpenRstStream) {
stream.sendState = StreamSendState::Invalid;
stream.recvState = StreamRecvState::Open;
receiveRstStreamSMHandler(
auto result = receiveRstStreamSMHandler(
stream, RstStreamFrame(1, GenericApplicationErrorCode::UNKNOWN, 1234));
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::Invalid);
EXPECT_EQ(stream.recvState, StreamRecvState::Closed);
}
@ -1015,7 +1074,8 @@ TEST_F(QuicUnidirectionalStreamTest, OpenFinalAckStreamFrame) {
std::forward_as_tuple(1),
std::forward_as_tuple(std::make_unique<WriteStreamBuffer>(
ChainedByteRangeHead(buf), 1, false)));
sendAckSMHandler(stream, streamFrame);
auto result = sendAckSMHandler(stream, streamFrame);
ASSERT_FALSE(result.hasError());
EXPECT_EQ(stream.sendState, StreamSendState::Closed);
EXPECT_EQ(stream.recvState, StreamRecvState::Invalid);
}
@ -1026,9 +1086,10 @@ TEST_F(QuicUnidirectionalStreamTest, ResetSentInvalidReadStream) {
QuicStreamState stream(id, *conn);
stream.sendState = StreamSendState::ResetSent;
stream.recvState = StreamRecvState::Invalid;
EXPECT_THROW(
receiveReadStreamFrameSMHandler(stream, ReadStreamFrame(id, 1, false)),
QuicTransportException);
auto result =
receiveReadStreamFrameSMHandler(stream, ReadStreamFrame(id, 1, false));
ASSERT_TRUE(result.hasError());
EXPECT_NE(result.error().code.asTransportErrorCode(), nullptr);
}
TEST_F(QuicUnidirectionalStreamTest, ResetSentInvalidRstStream) {
@ -1037,11 +1098,10 @@ TEST_F(QuicUnidirectionalStreamTest, ResetSentInvalidRstStream) {
QuicStreamState stream(id, *conn);
stream.sendState = StreamSendState::ResetSent;
stream.recvState = StreamRecvState::Invalid;
EXPECT_THROW(
receiveRstStreamSMHandler(
stream,
RstStreamFrame(1, GenericApplicationErrorCode::UNKNOWN, 1234)),
QuicTransportException);
auto result = receiveRstStreamSMHandler(
stream, RstStreamFrame(1, GenericApplicationErrorCode::UNKNOWN, 1234));
ASSERT_TRUE(result.hasError());
EXPECT_NE(result.error().code.asTransportErrorCode(), nullptr);
}
TEST_F(QuicOpenStateTest, DSRStreamAcked) {
@ -1049,11 +1109,13 @@ TEST_F(QuicOpenStateTest, DSRStreamAcked) {
conn->clientConnectionId = getTestConnectionId(0);
conn->serverConnectionId = getTestConnectionId(1);
auto stream = conn->streamManager->createNextBidirectionalStream().value();
writeDataToQuicStream(
*stream,
folly::IOBuf::copyBuffer("Big ship stucks in small water"),
false);
writeBufMetaToQuicStream(*stream, BufferMeta(1000), true);
ASSERT_FALSE(writeDataToQuicStream(
*stream,
folly::IOBuf::copyBuffer("Big ship stucks in small water"),
false)
.hasError());
ASSERT_FALSE(
writeBufMetaToQuicStream(*stream, BufferMeta(1000), true).hasError());
auto bufMetaStartingOffset = stream->writeBufMeta.offset;
handleStreamBufMetaWritten(
*conn,
@ -1068,7 +1130,8 @@ TEST_F(QuicOpenStateTest, DSRStreamAcked) {
stream->retransmissionBufMetas.find(bufMetaStartingOffset));
WriteStreamFrame frame(stream->id, bufMetaStartingOffset, 300, false);
frame.fromBufMeta = true;
sendAckSMHandler(*stream, frame);
auto result = sendAckSMHandler(*stream, frame);
ASSERT_FALSE(result.hasError());
EXPECT_TRUE(stream->retransmissionBufMetas.empty());
EXPECT_EQ(stream->sendState, StreamSendState::Open);
}
@ -1080,11 +1143,13 @@ TEST_F(QuicOpenStateTest, DSRFullStreamAcked) {
auto stream = conn->streamManager->createNextBidirectionalStream().value();
auto buf = folly::IOBuf::copyBuffer("Big ship stucks in small water");
size_t len = buf->computeChainDataLength();
writeDataToQuicStream(*stream, std::move(buf), false);
ASSERT_FALSE(
writeDataToQuicStream(*stream, std::move(buf), false).hasError());
handleStreamWritten(
*conn, *stream, 0, len, false, 1, PacketNumberSpace::AppData);
ASSERT_EQ(stream->retransmissionBuffer.size(), 1);
writeBufMetaToQuicStream(*stream, BufferMeta(1000), true);
ASSERT_FALSE(
writeBufMetaToQuicStream(*stream, BufferMeta(1000), true).hasError());
auto bufMetaStartingOffset = stream->writeBufMeta.offset;
handleStreamBufMetaWritten(
*conn,
@ -1100,12 +1165,14 @@ TEST_F(QuicOpenStateTest, DSRFullStreamAcked) {
stream->retransmissionBufMetas.find(bufMetaStartingOffset));
WriteStreamFrame frame(stream->id, bufMetaStartingOffset, 1000, true);
frame.fromBufMeta = true;
sendAckSMHandler(*stream, frame);
auto result = sendAckSMHandler(*stream, frame);
ASSERT_FALSE(result.hasError());
frame.offset = 0;
frame.len = len;
frame.fin = false;
frame.fromBufMeta = false;
sendAckSMHandler(*stream, frame);
result = sendAckSMHandler(*stream, frame);
ASSERT_FALSE(result.hasError());
EXPECT_TRUE(stream->retransmissionBuffer.empty());
EXPECT_TRUE(stream->retransmissionBufMetas.empty());
EXPECT_EQ(stream->sendState, StreamSendState::Closed);

File diff suppressed because it is too large Load Diff

View File

@ -1026,9 +1026,9 @@ TEST_F(QuicStateFunctionsTest, TestInvokeStreamStateMachineConnectionError) {
QuicStreamState stream(1, conn);
RstStreamFrame rst(1, GenericApplicationErrorCode::UNKNOWN, 100);
stream.finalReadOffset = 1024;
EXPECT_THROW(
receiveRstStreamSMHandler(stream, std::move(rst)),
QuicTransportException);
auto result = receiveRstStreamSMHandler(stream, std::move(rst));
ASSERT_TRUE(result.hasError());
ASSERT_NE(result.error().code.asTransportErrorCode(), nullptr);
// This doesn't change the send state machine implicitly anymore
bool matches = (stream.sendState == StreamSendState::Open);
EXPECT_TRUE(matches);
@ -1044,7 +1044,8 @@ TEST_F(QuicStateFunctionsTest, InvokeResetDoesNotSendFlowControl) {
stream.flowControlState.windowSize = 100;
conn.flowControlState.advertisedMaxOffset = 100;
conn.flowControlState.windowSize = 100;
receiveRstStreamSMHandler(stream, std::move(rst));
auto result = receiveRstStreamSMHandler(stream, std::move(rst));
EXPECT_TRUE(result.hasValue());
bool matches = (stream.recvState == StreamRecvState::Closed);
EXPECT_TRUE(matches);
EXPECT_FALSE(conn.streamManager->hasWindowUpdates());
@ -1058,12 +1059,12 @@ TEST_F(QuicStateFunctionsTest, TestInvokeStreamStateMachineStreamError) {
FizzServerQuicHandshakeContext::Builder().build());
QuicStreamState stream(1, conn);
RstStreamFrame rst(1, GenericApplicationErrorCode::UNKNOWN, 100);
try {
sendRstAckSMHandler(stream, folly::none);
ADD_FAILURE();
} catch (QuicTransportException& ex) {
EXPECT_EQ(ex.errorCode(), TransportErrorCode::STREAM_STATE_ERROR);
}
auto result = sendRstAckSMHandler(stream, folly::none);
ASSERT_TRUE(result.hasError());
ASSERT_NE(result.error().code.asTransportErrorCode(), nullptr);
EXPECT_EQ(
*result.error().code.asTransportErrorCode(),
TransportErrorCode::STREAM_STATE_ERROR);
bool matches = (stream.sendState == StreamSendState::Open);
EXPECT_TRUE(matches);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff