1
0
mirror of https://github.com/matrix-org/matrix-js-sdk.git synced 2025-11-26 17:03:12 +03:00

Spread out device verification work

Avoid a big freeze when we process the results of a device query, by splitting
the work up by user.
This commit is contained in:
Richard van der Hoff
2017-02-10 13:37:41 +00:00
parent bf2e6a33c2
commit 207bce61ad
3 changed files with 69 additions and 40 deletions

View File

@@ -130,7 +130,12 @@ function expectAliClaimKeys() {
return {one_time_keys: result}; return {one_time_keys: result};
}); });
return aliTestClient.httpBackend.flush("/keys/claim", 1); // it can take a while to process the key query, so give it some extra
// time, and make sure the claim actually happens tather than ploughing on
// confusingly.
return aliTestClient.httpBackend.flush("/keys/claim", 1, 20).then((r) => {
expect(r).toEqual(1);
});
} }
@@ -263,16 +268,16 @@ function sendMessage(client) {
function expectSendMessageRequest(httpBackend) { function expectSendMessageRequest(httpBackend) {
const path = "/send/m.room.encrypted/"; const path = "/send/m.room.encrypted/";
let sent; const deferred = q.defer();
httpBackend.when("PUT", path).respond(200, function(path, content) { httpBackend.when("PUT", path).respond(200, function(path, content) {
sent = content; deferred.resolve(content);
return { return {
event_id: "asdfgh", event_id: "asdfgh",
}; };
}); });
return httpBackend.flush(path, 1).then(function() {
return sent; // it can take a while to process the key query, so give it 20ms
}); return httpBackend.flush(path, 1, 20).then(() => deferred.promise);
} }
function aliRecvMessage() { function aliRecvMessage() {

View File

@@ -36,15 +36,23 @@ HttpBackend.prototype = {
* Respond to all of the requests (flush the queue). * Respond to all of the requests (flush the queue).
* @param {string} path The path to flush (optional) default: all. * @param {string} path The path to flush (optional) default: all.
* @param {integer} numToFlush The number of things to flush (optional), default: all. * @param {integer} numToFlush The number of things to flush (optional), default: all.
* @return {Promise} resolved when there is nothing left to flush. * @param {integer=} waitTime The time (in ms) to wait for a request to happen.
* default: 5
*
* @return {Promise} resolves when there is nothing left to flush, with the
* number of requests flushed
*/ */
flush: function(path, numToFlush) { flush: function(path, numToFlush, waitTime) {
const defer = q.defer(); const defer = q.defer();
const self = this; const self = this;
let flushed = 0; let flushed = 0;
let triedWaiting = false; let triedWaiting = false;
if (waitTime === undefined) {
waitTime = 5;
}
console.log( console.log(
"HTTP backend flushing... (path=%s numToFlush=%s)", path, numToFlush, "HTTP backend flushing... (path=%s numToFlush=%s waitTime=%s)",
path, numToFlush, waitTime,
); );
const tryFlush = function() { const tryFlush = function() {
// if there's more real requests and more expected requests, flush 'em. // if there's more real requests and more expected requests, flush 'em.
@@ -57,7 +65,7 @@ HttpBackend.prototype = {
flushed += 1; flushed += 1;
if (numToFlush && flushed === numToFlush) { if (numToFlush && flushed === numToFlush) {
console.log(" Flushed assigned amount: %s", numToFlush); console.log(" Flushed assigned amount: %s", numToFlush);
defer.resolve(); defer.resolve(flushed);
} else { } else {
console.log(" flushed. Trying for more."); console.log(" flushed. Trying for more.");
setTimeout(tryFlush, 0); setTimeout(tryFlush, 0);
@@ -65,11 +73,11 @@ HttpBackend.prototype = {
} else if (flushed === 0 && !triedWaiting) { } else if (flushed === 0 && !triedWaiting) {
// we may not have made the request yet, wait a generous amount of // we may not have made the request yet, wait a generous amount of
// time before giving up. // time before giving up.
setTimeout(tryFlush, 5); setTimeout(tryFlush, waitTime);
triedWaiting = true; triedWaiting = true;
} else { } else {
console.log(" no more flushes."); console.log(" no more flushes.");
defer.resolve(); defer.resolve(flushed);
} }
}; };

View File

@@ -306,8 +306,29 @@ export default class DeviceList {
).then((res) => { ).then((res) => {
const dk = res.device_keys || {}; const dk = res.device_keys || {};
// do each user in a separate promise, to avoid wedging the CPU
// (https://github.com/vector-im/riot-web/issues/3158)
//
// of course we ought to do this in a web worker or similar, but
// this serves as an easy solution for now.
let prom = q();
for (const userId of downloadUsers) { for (const userId of downloadUsers) {
console.log('got keys for ' + userId + ':', dk[userId]); prom = prom.delay(5).then(() => {
this._processQueryResponseForUser(userId, dk[userId]);
});
}
return prom;
}).then(() => {
if (token) {
this._sessionStore.storeEndToEndDeviceSyncToken(token);
}
console.log('Completed key download for ' + downloadUsers);
});
}
_processQueryResponseForUser(userId, response) {
console.log('got keys for ' + userId + ':', response);
// map from deviceid -> deviceinfo for this user // map from deviceid -> deviceinfo for this user
const userStore = {}; const userStore = {};
@@ -320,7 +341,7 @@ export default class DeviceList {
} }
_updateStoredDeviceKeysForUser( _updateStoredDeviceKeysForUser(
this._olmDevice, userId, userStore, dk[userId] || {}, this._olmDevice, userId, userStore, response || {},
); );
// update the session store // update the session store
@@ -332,15 +353,10 @@ export default class DeviceList {
this._sessionStore.storeEndToEndDevicesForUser( this._sessionStore.storeEndToEndDevicesForUser(
userId, storage, userId, storage,
); );
if (token) {
this._sessionStore.storeEndToEndDeviceSyncToken(token);
}
}
});
} }
} }
function _updateStoredDeviceKeysForUser(_olmDevice, userId, userStore, function _updateStoredDeviceKeysForUser(_olmDevice, userId, userStore,
userResult) { userResult) {
let updated = false; let updated = false;