mirror of
https://github.com/facebookincubator/mvfst.git
synced 2025-04-18 17:24:03 +03:00
Fix missed cases in QuicInteger -> Expected change
Summary: These cases were missed due to the fact that writeFrame was returning something different. Also, revert the change in the codec for receive timestamps that was using value_or and suppressing something too large being encoded. Reviewed By: jbeshay, kvtsoy Differential Revision: D72904909 fbshipit-source-id: 47e415e7b12208c8a2917325ed42f51c63992687
This commit is contained in:
parent
d14872347d
commit
58d7c06951
@ -259,7 +259,11 @@ FrameScheduler::scheduleFramesForPacket(
|
||||
cryptoDataWritten = cryptoDataRes.value();
|
||||
}
|
||||
if (rstScheduler_ && rstScheduler_->hasPendingRsts()) {
|
||||
rstWritten = rstScheduler_->writeRsts(wrapper);
|
||||
auto rstWrittenRes = rstScheduler_->writeRsts(wrapper);
|
||||
if (rstWrittenRes.hasError()) {
|
||||
return folly::makeUnexpected(rstWrittenRes.error());
|
||||
}
|
||||
rstWritten = rstWrittenRes.value();
|
||||
}
|
||||
// Long time ago we decided RST has higher priority than Acks.
|
||||
if (hasPendingAcks()) {
|
||||
@ -291,10 +295,16 @@ FrameScheduler::scheduleFramesForPacket(
|
||||
}
|
||||
if (windowUpdateScheduler_ &&
|
||||
windowUpdateScheduler_->hasPendingWindowUpdates()) {
|
||||
windowUpdateScheduler_->writeWindowUpdates(wrapper);
|
||||
auto result = windowUpdateScheduler_->writeWindowUpdates(wrapper);
|
||||
if (result.hasError()) {
|
||||
return folly::makeUnexpected(result.error());
|
||||
}
|
||||
}
|
||||
if (blockedScheduler_ && blockedScheduler_->hasPendingBlockedFrames()) {
|
||||
blockedScheduler_->writeBlockedFrames(wrapper);
|
||||
auto result = blockedScheduler_->writeBlockedFrames(wrapper);
|
||||
if (result.hasError()) {
|
||||
return folly::makeUnexpected(result.error());
|
||||
}
|
||||
}
|
||||
// Simple frames should be scheduled before stream frames and retx frames
|
||||
// because those frames might fill up all available bytes for writing.
|
||||
@ -593,7 +603,8 @@ bool RstStreamScheduler::hasPendingRsts() const {
|
||||
return !conn_.pendingEvents.resets.empty();
|
||||
}
|
||||
|
||||
bool RstStreamScheduler::writeRsts(PacketBuilderInterface& builder) {
|
||||
folly::Expected<bool, QuicError> RstStreamScheduler::writeRsts(
|
||||
PacketBuilderInterface& builder) {
|
||||
bool rstWritten = false;
|
||||
for (const auto& resetStream : conn_.pendingEvents.resets) {
|
||||
auto streamId = resetStream.first;
|
||||
@ -608,8 +619,11 @@ bool RstStreamScheduler::writeRsts(PacketBuilderInterface& builder) {
|
||||
// While this is not something that's mandated by the spec, we're doing
|
||||
// it in this implementation because it dramatically simplifies flow
|
||||
// control accounting.
|
||||
auto bytesWritten = writeFrame(resetStream.second, builder);
|
||||
if (!bytesWritten) {
|
||||
auto bytesWrittenResult = writeFrame(resetStream.second, builder);
|
||||
if (bytesWrittenResult.hasError()) {
|
||||
return folly::makeUnexpected(bytesWrittenResult.error());
|
||||
}
|
||||
if (!bytesWrittenResult.value()) {
|
||||
break;
|
||||
}
|
||||
rstWritten = true;
|
||||
@ -713,13 +727,16 @@ bool WindowUpdateScheduler::hasPendingWindowUpdates() const {
|
||||
conn_.pendingEvents.connWindowUpdate;
|
||||
}
|
||||
|
||||
void WindowUpdateScheduler::writeWindowUpdates(
|
||||
PacketBuilderInterface& builder) {
|
||||
folly::Expected<folly::Unit, QuicError>
|
||||
WindowUpdateScheduler::writeWindowUpdates(PacketBuilderInterface& builder) {
|
||||
if (conn_.pendingEvents.connWindowUpdate) {
|
||||
auto maxDataFrame = generateMaxDataFrame(conn_);
|
||||
auto maximumData = maxDataFrame.maximumData;
|
||||
auto bytes = writeFrame(std::move(maxDataFrame), builder);
|
||||
if (bytes) {
|
||||
auto bytesResult = writeFrame(std::move(maxDataFrame), builder);
|
||||
if (bytesResult.hasError()) {
|
||||
return folly::makeUnexpected(bytesResult.error());
|
||||
}
|
||||
if (bytesResult.value()) {
|
||||
VLOG(4) << "Wrote max_data=" << maximumData << " " << conn_;
|
||||
}
|
||||
}
|
||||
@ -730,13 +747,17 @@ void WindowUpdateScheduler::writeWindowUpdates(
|
||||
}
|
||||
auto maxStreamDataFrame = generateMaxStreamDataFrame(*stream);
|
||||
auto maximumData = maxStreamDataFrame.maximumData;
|
||||
auto bytes = writeFrame(std::move(maxStreamDataFrame), builder);
|
||||
if (!bytes) {
|
||||
auto bytesResult = writeFrame(std::move(maxStreamDataFrame), builder);
|
||||
if (bytesResult.hasError()) {
|
||||
return folly::makeUnexpected(bytesResult.error());
|
||||
}
|
||||
if (!bytesResult.value()) {
|
||||
break;
|
||||
}
|
||||
VLOG(4) << "Wrote max_stream_data stream=" << stream->id
|
||||
<< " maximumData=" << maximumData << " " << conn_;
|
||||
}
|
||||
return folly::unit;
|
||||
}
|
||||
|
||||
BlockedScheduler::BlockedScheduler(const QuicConnectionStateBase& conn)
|
||||
@ -747,25 +768,33 @@ bool BlockedScheduler::hasPendingBlockedFrames() const {
|
||||
conn_.pendingEvents.sendDataBlocked;
|
||||
}
|
||||
|
||||
void BlockedScheduler::writeBlockedFrames(PacketBuilderInterface& builder) {
|
||||
folly::Expected<folly::Unit, QuicError> BlockedScheduler::writeBlockedFrames(
|
||||
PacketBuilderInterface& builder) {
|
||||
if (conn_.pendingEvents.sendDataBlocked) {
|
||||
// Connection is write blocked due to connection level flow control.
|
||||
DataBlockedFrame blockedFrame(
|
||||
conn_.flowControlState.peerAdvertisedMaxOffset);
|
||||
auto result = writeFrame(blockedFrame, builder);
|
||||
if (!result) {
|
||||
if (result.hasError()) {
|
||||
return folly::makeUnexpected(result.error());
|
||||
}
|
||||
if (!result.value()) {
|
||||
// If there is not enough room to write data blocked frame in the
|
||||
// current packet, we won't be able to write stream blocked frames either
|
||||
// so just return.
|
||||
return;
|
||||
return folly::unit;
|
||||
}
|
||||
}
|
||||
for (const auto& blockedStream : conn_.streamManager->blockedStreams()) {
|
||||
auto bytesWritten = writeFrame(blockedStream.second, builder);
|
||||
if (!bytesWritten) {
|
||||
auto bytesWrittenResult = writeFrame(blockedStream.second, builder);
|
||||
if (bytesWrittenResult.hasError()) {
|
||||
return folly::makeUnexpected(bytesWrittenResult.error());
|
||||
}
|
||||
if (!bytesWrittenResult.value()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return folly::unit;
|
||||
}
|
||||
|
||||
CryptoStreamScheduler::CryptoStreamScheduler(
|
||||
|
@ -134,7 +134,8 @@ class RstStreamScheduler {
|
||||
|
||||
bool hasPendingRsts() const;
|
||||
|
||||
bool writeRsts(PacketBuilderInterface& builder);
|
||||
[[nodiscard]] folly::Expected<bool, QuicError> writeRsts(
|
||||
PacketBuilderInterface& builder);
|
||||
|
||||
private:
|
||||
const QuicConnectionStateBase& conn_;
|
||||
@ -187,7 +188,8 @@ class WindowUpdateScheduler {
|
||||
|
||||
bool hasPendingWindowUpdates() const;
|
||||
|
||||
void writeWindowUpdates(PacketBuilderInterface& builder);
|
||||
[[nodiscard]] folly::Expected<folly::Unit, QuicError> writeWindowUpdates(
|
||||
PacketBuilderInterface& builder);
|
||||
|
||||
private:
|
||||
const QuicConnectionStateBase& conn_;
|
||||
@ -199,7 +201,8 @@ class BlockedScheduler {
|
||||
|
||||
bool hasPendingBlockedFrames() const;
|
||||
|
||||
void writeBlockedFrames(PacketBuilderInterface& builder);
|
||||
[[nodiscard]] folly::Expected<folly::Unit, QuicError> writeBlockedFrames(
|
||||
PacketBuilderInterface& builder);
|
||||
|
||||
private:
|
||||
const QuicConnectionStateBase& conn_;
|
||||
|
@ -560,7 +560,7 @@ TEST_F(QuicPacketSchedulerTest, StreamFrameSchedulerExists) {
|
||||
ASSERT_FALSE(builder.encodePacketHeader().hasError());
|
||||
auto originalSpace = builder.remainingSpaceInPkt();
|
||||
conn.streamManager->queueWindowUpdate(stream->id);
|
||||
scheduler.writeWindowUpdates(builder);
|
||||
ASSERT_FALSE(scheduler.writeWindowUpdates(builder).hasError());
|
||||
EXPECT_LT(builder.remainingSpaceInPkt(), originalSpace);
|
||||
}
|
||||
|
||||
@ -585,7 +585,7 @@ TEST_F(QuicPacketSchedulerTest, StreamFrameNoSpace) {
|
||||
PacketBuilderWrapper builderWrapper(builder, 2);
|
||||
auto originalSpace = builder.remainingSpaceInPkt();
|
||||
conn.streamManager->queueWindowUpdate(stream->id);
|
||||
scheduler.writeWindowUpdates(builderWrapper);
|
||||
ASSERT_FALSE(scheduler.writeWindowUpdates(builderWrapper).hasError());
|
||||
EXPECT_EQ(builder.remainingSpaceInPkt(), originalSpace);
|
||||
}
|
||||
|
||||
@ -607,7 +607,7 @@ TEST_F(QuicPacketSchedulerTest, StreamFrameSchedulerStreamNotExists) {
|
||||
ASSERT_FALSE(builder.encodePacketHeader().hasError());
|
||||
auto originalSpace = builder.remainingSpaceInPkt();
|
||||
conn.streamManager->queueWindowUpdate(nonExistentStream);
|
||||
scheduler.writeWindowUpdates(builder);
|
||||
ASSERT_FALSE(scheduler.writeWindowUpdates(builder).hasError());
|
||||
EXPECT_EQ(builder.remainingSpaceInPkt(), originalSpace);
|
||||
}
|
||||
|
||||
|
@ -601,22 +601,13 @@ computeReceiveTimestampsMinimumSpace(
|
||||
}
|
||||
|
||||
auto countRangesSize = getQuicIntegerSize(countTimestampRanges);
|
||||
if (countRangesSize.hasError()) {
|
||||
return folly::makeUnexpected(countRangesSize.error());
|
||||
}
|
||||
|
||||
auto lastPktNumSize = getQuicIntegerSize(maybeLastPktNum);
|
||||
if (lastPktNumSize.hasError()) {
|
||||
return folly::makeUnexpected(lastPktNumSize.error());
|
||||
}
|
||||
|
||||
auto lastPktTsDeltaSize = getQuicIntegerSize(maybeLastPktTsDelta.count());
|
||||
if (lastPktTsDeltaSize.hasError()) {
|
||||
return folly::makeUnexpected(lastPktTsDeltaSize.error());
|
||||
}
|
||||
|
||||
return countRangesSize.value() + lastPktNumSize.value() +
|
||||
lastPktTsDeltaSize.value();
|
||||
return countRangesSize.value_or(0) + lastPktNumSize.value_or(0) +
|
||||
lastPktTsDeltaSize.value_or(0);
|
||||
}
|
||||
|
||||
static void writeECNFieldsToAck(
|
||||
|
Loading…
x
Reference in New Issue
Block a user