mirror of
https://github.com/facebookincubator/mvfst.git
synced 2025-08-08 09:42:06 +03:00
Optionally write one stream frame per packet.
Summary: It may be desirable for some applications to limit the number of streams written to a single packet, for example if they are treating streams like individual messages. Reviewed By: yangchi Differential Revision: D21067175 fbshipit-source-id: 75edcb1051bf7f1eb2c9b730806350f9857eabec
This commit is contained in:
committed by
Facebook GitHub Bot
parent
08f3316678
commit
93844d05fa
@@ -239,7 +239,8 @@ StreamId StreamFrameScheduler::writeStreamsHelper(
|
||||
PacketBuilderInterface& builder,
|
||||
const std::set<StreamId>& writableStreams,
|
||||
StreamId nextScheduledStream,
|
||||
uint64_t& connWritableBytes) {
|
||||
uint64_t& connWritableBytes,
|
||||
bool streamPerPacket) {
|
||||
MiddleStartingIterationWrapper wrapper(writableStreams, nextScheduledStream);
|
||||
auto writableStreamItr = wrapper.cbegin();
|
||||
// This will write the stream frames in a round robin fashion ordered by
|
||||
@@ -250,6 +251,9 @@ StreamId StreamFrameScheduler::writeStreamsHelper(
|
||||
while (writableStreamItr != wrapper.cend() && connWritableBytes > 0) {
|
||||
if (writeNextStreamFrame(builder, *writableStreamItr, connWritableBytes)) {
|
||||
writableStreamItr++;
|
||||
if (streamPerPacket) {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
@@ -271,7 +275,8 @@ void StreamFrameScheduler::writeStreams(PacketBuilderInterface& builder) {
|
||||
builder,
|
||||
writableControlStreams,
|
||||
conn_.schedulingState.nextScheduledControlStream,
|
||||
connWritableBytes);
|
||||
connWritableBytes,
|
||||
conn_.transportSettings.streamFramePerPacket);
|
||||
}
|
||||
if (connWritableBytes == 0) {
|
||||
return;
|
||||
@@ -282,7 +287,8 @@ void StreamFrameScheduler::writeStreams(PacketBuilderInterface& builder) {
|
||||
builder,
|
||||
writableStreams,
|
||||
conn_.schedulingState.nextScheduledStream,
|
||||
connWritableBytes);
|
||||
connWritableBytes,
|
||||
conn_.transportSettings.streamFramePerPacket);
|
||||
}
|
||||
} // namespace quic
|
||||
|
||||
|
@@ -170,7 +170,8 @@ class StreamFrameScheduler {
|
||||
PacketBuilderInterface& builder,
|
||||
const std::set<StreamId>& writableStreams,
|
||||
StreamId nextScheduledStream,
|
||||
uint64_t& connWritableBytes);
|
||||
uint64_t& connWritableBytes,
|
||||
bool streamPerPacket);
|
||||
|
||||
using WritableStreamItr =
|
||||
MiddleStartingIterationWrapper::MiddleStartingIterator;
|
||||
|
@@ -776,6 +776,75 @@ TEST_F(QuicPacketSchedulerTest, StreamFrameSchedulerRoundRobin) {
|
||||
EXPECT_EQ(*frames[2].asWriteStreamFrame(), f3);
|
||||
}
|
||||
|
||||
TEST_F(QuicPacketSchedulerTest, StreamFrameSchedulerRoundRobinStreamPerPacket) {
|
||||
QuicClientConnectionState conn(
|
||||
FizzClientQuicHandshakeContext::Builder().build());
|
||||
conn.streamManager->setMaxLocalBidirectionalStreams(10);
|
||||
conn.flowControlState.peerAdvertisedMaxOffset = 100000;
|
||||
conn.flowControlState.peerAdvertisedInitialMaxStreamOffsetBidiRemote = 100000;
|
||||
conn.transportSettings.streamFramePerPacket = true;
|
||||
auto connId = getTestConnectionId();
|
||||
StreamFrameScheduler scheduler(conn);
|
||||
ShortHeader shortHeader1(
|
||||
ProtectionType::KeyPhaseZero,
|
||||
connId,
|
||||
getNextPacketNum(conn, PacketNumberSpace::AppData));
|
||||
RegularQuicPacketBuilder builder1(
|
||||
conn.udpSendPacketLen,
|
||||
std::move(shortHeader1),
|
||||
conn.ackStates.appDataAckState.largestAckedByPeer);
|
||||
auto stream1 =
|
||||
conn.streamManager->createNextBidirectionalStream().value()->id;
|
||||
auto stream2 =
|
||||
conn.streamManager->createNextBidirectionalStream().value()->id;
|
||||
auto stream3 =
|
||||
conn.streamManager->createNextBidirectionalStream().value()->id;
|
||||
auto largeBuf = folly::IOBuf::createChain(conn.udpSendPacketLen * 2, 4096);
|
||||
auto curBuf = largeBuf.get();
|
||||
do {
|
||||
curBuf->append(curBuf->capacity());
|
||||
curBuf = curBuf->next();
|
||||
} while (curBuf != largeBuf.get());
|
||||
auto chainLen = largeBuf->computeChainDataLength();
|
||||
writeDataToQuicStream(
|
||||
*conn.streamManager->findStream(stream1), std::move(largeBuf), false);
|
||||
writeDataToQuicStream(
|
||||
*conn.streamManager->findStream(stream2),
|
||||
folly::IOBuf::copyBuffer("some data"),
|
||||
false);
|
||||
writeDataToQuicStream(
|
||||
*conn.streamManager->findStream(stream3),
|
||||
folly::IOBuf::copyBuffer("some data"),
|
||||
false);
|
||||
// Force the wraparound initially.
|
||||
conn.schedulingState.nextScheduledStream = stream3 + 8;
|
||||
scheduler.writeStreams(builder1);
|
||||
EXPECT_EQ(conn.schedulingState.nextScheduledStream, 4);
|
||||
|
||||
// Should write frames for stream2, stream3, followed by stream1 again.
|
||||
NiceMock<MockQuicPacketBuilder> builder2;
|
||||
EXPECT_CALL(builder2, remainingSpaceInPkt()).WillRepeatedly(Return(4096));
|
||||
EXPECT_CALL(builder2, appendFrame(_)).WillRepeatedly(Invoke([&](auto f) {
|
||||
builder2.frames_.push_back(f);
|
||||
}));
|
||||
auto& frames = builder2.frames_;
|
||||
scheduler.writeStreams(builder2);
|
||||
ASSERT_EQ(frames.size(), 1);
|
||||
scheduler.writeStreams(builder2);
|
||||
ASSERT_EQ(frames.size(), 2);
|
||||
scheduler.writeStreams(builder2);
|
||||
ASSERT_EQ(frames.size(), 3);
|
||||
WriteStreamFrame f1(stream2, 0, 9, false);
|
||||
WriteStreamFrame f2(stream3, 0, 9, false);
|
||||
WriteStreamFrame f3(stream1, 0, chainLen, false);
|
||||
ASSERT_TRUE(frames[0].asWriteStreamFrame());
|
||||
EXPECT_EQ(*frames[0].asWriteStreamFrame(), f1);
|
||||
ASSERT_TRUE(frames[1].asWriteStreamFrame());
|
||||
EXPECT_EQ(*frames[1].asWriteStreamFrame(), f2);
|
||||
ASSERT_TRUE(frames[2].asWriteStreamFrame());
|
||||
EXPECT_EQ(*frames[2].asWriteStreamFrame(), f3);
|
||||
}
|
||||
|
||||
TEST_F(QuicPacketSchedulerTest, StreamFrameSchedulerRoundRobinControl) {
|
||||
QuicClientConnectionState conn(
|
||||
FizzClientQuicHandshakeContext::Builder().build());
|
||||
|
@@ -168,6 +168,9 @@ struct TransportSettings {
|
||||
// A temporary type to control DataPath write style. Will be gone after we
|
||||
// are done with experiment.
|
||||
DataPathType dataPathType{DataPathType::ChainedMemory};
|
||||
// Whether or not we should stop writing a packet after writing a single
|
||||
// stream frame to it.
|
||||
bool streamFramePerPacket{false};
|
||||
};
|
||||
|
||||
} // namespace quic
|
||||
|
Reference in New Issue
Block a user