1
0
mirror of https://github.com/matrix-org/matrix-js-sdk.git synced 2025-11-29 16:43:09 +03:00

Send device messages for the same user in same API call.

Currently we split the device messages up to limit the number per call,
but that can end up splitting messages to a given users device over
separate API calls. This is fine, but means that the server can't e.g.
bundle them into a single EDU for remote users or sanity check that the
client is sending to the right set of devices (i.e. its device list
cache isn't wrong).
This commit is contained in:
Erik Johnston
2020-01-13 13:43:00 +00:00
parent 92df82bfa9
commit ed3fded8e8

View File

@@ -330,6 +330,9 @@ MegolmEncryption.prototype._prepareNewSession = async function() {
}; };
/** /**
* Splits the user device map into multiple chunks to reduce the number of
* devices we encrypt to per API call.
*
* @private * @private
* *
* @param {module:crypto/algorithms/megolm.OutboundSessionInfo} session * @param {module:crypto/algorithms/megolm.OutboundSessionInfo} session
@@ -386,11 +389,6 @@ MegolmEncryption.prototype._splitUserDeviceMap = function(
"share keys with device " + userId + ":" + deviceId, "share keys with device " + userId + ":" + deviceId,
); );
if (entriesInCurrentSlice > maxToDeviceMessagesPerRequest) {
// the current slice is filled up. Start inserting into the next slice
entriesInCurrentSlice = 0;
currentSliceId++;
}
if (!mapSlices[currentSliceId]) { if (!mapSlices[currentSliceId]) {
mapSlices[currentSliceId] = []; mapSlices[currentSliceId] = [];
} }
@@ -402,6 +400,17 @@ MegolmEncryption.prototype._splitUserDeviceMap = function(
entriesInCurrentSlice++; entriesInCurrentSlice++;
} }
// We do this in the per-user loop as we prefer that all messages to the
// same user end up in the same API call to make it easier for the
// server (e.g. only have to send one EDU if a remote user, etc). This
// does mean that if a user has many devices we may go over the desired
// limit, but its not a hard limit so that is fine.
if (entriesInCurrentSlice > maxToDeviceMessagesPerRequest) {
// the current slice is filled up. Start inserting into the next slice
entriesInCurrentSlice = 0;
currentSliceId++;
}
} }
return mapSlices; return mapSlices;
}; };