mirror of
https://github.com/facebookincubator/mvfst.git
synced 2025-08-09 20:42:44 +03:00
Summary: This doesn't belong in the generic state. Untangling it is a little difficult, but I think this solution is cleaner than having it in the generic state. Reviewed By: JunqiWang Differential Revision: D29856391 fbshipit-source-id: 1042109ed29cd1d20d139e08548d187b469c8398
150 lines
4.9 KiB
C++
150 lines
4.9 KiB
C++
/*
|
|
* Copyright (c) Facebook, Inc. and its affiliates.
|
|
*
|
|
* This source code is licensed under the MIT license found in the
|
|
* LICENSE file in the root directory of this source tree.
|
|
*
|
|
*/
|
|
|
|
#include <quic/api/QuicTransportFunctions.h>
|
|
#include <quic/dsr/backend/DSRPacketizer.h>
|
|
|
|
namespace quic {
|
|
bool writeSingleQuicPacket(
|
|
IOBufQuicBatch& ioBufBatch,
|
|
ConnectionId dcid,
|
|
PacketNum packetNum,
|
|
PacketNum largestAckedByPeer,
|
|
const Aead& aead,
|
|
const PacketNumberCipher& headerCipher,
|
|
StreamId streamId,
|
|
size_t offset,
|
|
size_t length,
|
|
bool eof,
|
|
Buf buf) {
|
|
if (buf->computeChainDataLength() < length) {
|
|
LOG(ERROR) << "Insufficient data buffer";
|
|
return false;
|
|
}
|
|
ShortHeader shortHeader(ProtectionType::KeyPhaseZero, dcid, packetNum);
|
|
// The the stream length limit calculated by the frontend should have
|
|
// already taken the PMTU limit into account. Thus the packet builder uses
|
|
// uint32 max value as packet size limit.
|
|
// TODO: InplaceQuicPacketBuilder in the future
|
|
RegularQuicPacketBuilder builder(
|
|
std::numeric_limits<uint32_t>::max() /* udpSendPacketLen */,
|
|
std::move(shortHeader),
|
|
largestAckedByPeer);
|
|
builder.encodePacketHeader();
|
|
builder.accountForCipherOverhead(aead.getCipherOverhead());
|
|
// frontend has already limited the length to flow control, thus
|
|
// flowControlLen == length
|
|
auto dataLen = writeStreamFrameHeader(
|
|
builder,
|
|
streamId,
|
|
offset,
|
|
length,
|
|
length /* flow control len*/,
|
|
eof,
|
|
true /* skip length field in stream header */);
|
|
BufQueue bufQueue(std::move(buf));
|
|
writeStreamFrameData(builder, bufQueue, *dataLen);
|
|
auto packet = std::move(builder).buildPacket();
|
|
|
|
if (packet.packet.frames.empty()) {
|
|
LOG(ERROR) << "DSR Send failed: Build empty packet.";
|
|
ioBufBatch.flush();
|
|
return false;
|
|
}
|
|
if (!packet.body) {
|
|
LOG(ERROR) << "DSR Send failed: Build empty body buffer";
|
|
ioBufBatch.flush();
|
|
return false;
|
|
}
|
|
packet.header->coalesce();
|
|
auto headerLen = packet.header->length();
|
|
auto bodyLen = packet.body->computeChainDataLength();
|
|
auto unencrypted =
|
|
folly::IOBuf::create(headerLen + bodyLen + aead.getCipherOverhead());
|
|
auto bodyCursor = folly::io::Cursor(packet.body.get());
|
|
bodyCursor.pull(unencrypted->writableData() + headerLen, bodyLen);
|
|
unencrypted->advance(headerLen);
|
|
unencrypted->append(bodyLen);
|
|
auto packetBuf = aead.inplaceEncrypt(
|
|
std::move(unencrypted), packet.header.get(), packetNum);
|
|
DCHECK(packetBuf->headroom() == headerLen);
|
|
packetBuf->clear();
|
|
auto headerCursor = folly::io::Cursor(packet.header.get());
|
|
headerCursor.pull(packetBuf->writableData(), headerLen);
|
|
packetBuf->append(headerLen + bodyLen + aead.getCipherOverhead());
|
|
encryptPacketHeader(
|
|
HeaderForm::Short,
|
|
packetBuf->writableData(),
|
|
headerLen,
|
|
packetBuf->data() + headerLen,
|
|
packetBuf->length() - headerLen,
|
|
headerCipher);
|
|
auto encodedSize = packetBuf->computeChainDataLength();
|
|
bool ret = ioBufBatch.write(std::move(packetBuf), encodedSize);
|
|
// If ret is false, IOBufQuicBatch::flush() inside the IOBufQuicBatch::write()
|
|
// above has failed, no need to try flush() again.
|
|
return ret;
|
|
}
|
|
|
|
size_t writePacketsGroup(
|
|
folly::AsyncUDPSocket& sock,
|
|
RequestGroup& reqGroup,
|
|
const std::function<Buf(const PacketizationRequest& req)>& bufProvider) {
|
|
if (reqGroup.empty()) {
|
|
LOG(ERROR) << "Empty packetization request";
|
|
return 0;
|
|
}
|
|
// TODO: Why don't I just limit the batch size to reqGroup.size()? What can go
|
|
// wrong?
|
|
auto batchWriter =
|
|
BatchWriterPtr(new GSOPacketBatchWriter(kDefaultQuicMaxBatchSize));
|
|
// This doesn't matter:
|
|
IOBufQuicBatch ioBufBatch(
|
|
std::move(batchWriter),
|
|
false /* thread local batching */,
|
|
sock,
|
|
reqGroup[0].clientAddress,
|
|
nullptr /* statsCallback */,
|
|
nullptr /* happyEyeballsState */);
|
|
// TODO: Instead of building ciphers every time, we should cache them into a
|
|
// CipherMap and look them up.
|
|
CipherBuilder cipherBuilder;
|
|
auto cipherPair = cipherBuilder.buildCiphers(
|
|
std::move(reqGroup[0].trafficKey),
|
|
reqGroup[0].cipherSuite,
|
|
std::move(reqGroup[0].packetProtectionKey));
|
|
if (!cipherPair.aead || !cipherPair.headerCipher) {
|
|
LOG(ERROR) << "Failed to create ciphers";
|
|
return 0;
|
|
}
|
|
// It's ok if reqGourp's size is larger than ioBufBatch's batch size. The
|
|
// ioBufBatch will flush when it hits the limit then start a new batch
|
|
// transparently.
|
|
for (const auto& request : reqGroup) {
|
|
auto ret = writeSingleQuicPacket(
|
|
ioBufBatch,
|
|
request.dcid,
|
|
request.packetNum,
|
|
request.largestAckedPacketNum,
|
|
*cipherPair.aead,
|
|
*cipherPair.headerCipher,
|
|
request.streamId,
|
|
request.offset,
|
|
request.len,
|
|
request.fin,
|
|
bufProvider(request));
|
|
if (!ret) {
|
|
return ioBufBatch.getPktSent();
|
|
}
|
|
}
|
|
ioBufBatch.flush();
|
|
return ioBufBatch.getPktSent();
|
|
}
|
|
|
|
} // namespace quic
|