1
0
mirror of https://github.com/matrix-org/matrix-js-sdk.git synced 2025-07-31 15:24:23 +03:00

Fix the queueToDevice tests for the new fakeindexeddb (#4225)

https://github.com/dumbmatter/fakeIndexedDB/pull/93 causes a bunch
of tests to start failing because the fake timers need running in
order for fake indexeddb to work. It also seems to cause failures
to bleed between tests somehow if fake timers are enabled/disabled.
This keeps all the fake timer tests in one suite and all the others
in another, which appears to work.

This should allow https://github.com/matrix-org/matrix-js-sdk/pull/4224
to be merged.
This commit is contained in:
David Baker
2024-05-30 16:16:42 +01:00
committed by GitHub
parent 8dfb6de3cc
commit 5c27e30302

View File

@ -61,9 +61,13 @@ describe.each([[StoreType.Memory], [StoreType.IndexedDB]])("queueToDevice (%s st
let httpBackend: MockHttpBackend;
let client: MatrixClient;
/**
* We need to split the tests into regular ones (these) and ones that use fake timers,
* because the fake indexeddb uses timers too and appears make tests cause other tests
* to fail if we keep enabling/disabling fake timers within the same test suite.
*/
describe("non-timed tests", () => {
beforeEach(async function () {
jest.runOnlyPendingTimers();
jest.useRealTimers();
httpBackend = new MockHttpBackend();
let store: IStore;
@ -84,7 +88,6 @@ describe.each([[StoreType.Memory], [StoreType.IndexedDB]])("queueToDevice (%s st
});
afterEach(function () {
jest.useRealTimers();
client.stopClient();
});
@ -108,91 +111,6 @@ describe.each([[StoreType.Memory], [StoreType.IndexedDB]])("queueToDevice (%s st
await flushPromises();
});
it("retries on error", async function () {
jest.useFakeTimers();
httpBackend.when("PUT", "/sendToDevice/org.example.foo/").respond(500);
httpBackend
.when("PUT", "/sendToDevice/org.example.foo/")
.check((request) => {
expect(request.data).toEqual(EXPECTED_BODY);
})
.respond(200, {});
await client.queueToDevice({
eventType: "org.example.foo",
batch: [FAKE_MSG],
});
await flushAndRunTimersUntil(() => httpBackend.requests.length > 0);
expect(httpBackend.flushSync(undefined, 1)).toEqual(1);
await flushAndRunTimersUntil(() => httpBackend.requests.length > 0);
expect(httpBackend.flushSync(undefined, 1)).toEqual(1);
// flush, as per comment in first test
await flushPromises();
});
it("stops retrying on 4xx errors", async function () {
jest.useFakeTimers();
httpBackend.when("PUT", "/sendToDevice/org.example.foo/").respond(400);
await client.queueToDevice({
eventType: "org.example.foo",
batch: [FAKE_MSG],
});
await flushAndRunTimersUntil(() => httpBackend.requests.length > 0);
expect(httpBackend.flushSync(undefined, 1)).toEqual(1);
// Asserting that another request is never made is obviously
// a bit tricky - we just flush the queue what should hopefully
// be plenty of times and assert that nothing comes through.
let tries = 0;
await flushAndRunTimersUntil(() => ++tries === 10);
expect(httpBackend.requests.length).toEqual(0);
});
it("honours ratelimiting", async function () {
jest.useFakeTimers();
// pick something obscure enough it's unlikley to clash with a
// retry delay the algorithm uses anyway
const retryDelay = 279 * 1000;
httpBackend.when("PUT", "/sendToDevice/org.example.foo/").respond(429, {
errcode: "M_LIMIT_EXCEEDED",
retry_after_ms: retryDelay,
});
httpBackend.when("PUT", "/sendToDevice/org.example.foo/").respond(200, {});
await client.queueToDevice({
eventType: "org.example.foo",
batch: [FAKE_MSG],
});
await flushAndRunTimersUntil(() => httpBackend.requests.length > 0);
expect(httpBackend.flushSync(undefined, 1)).toEqual(1);
await flushPromises();
logger.info("Advancing clock to just before expected retry time...");
jest.advanceTimersByTime(retryDelay - 1000);
await flushPromises();
expect(httpBackend.requests.length).toEqual(0);
logger.info("Advancing clock past expected retry time...");
jest.advanceTimersByTime(2000);
await flushPromises();
expect(httpBackend.flushSync(undefined, 1)).toEqual(1);
});
it("retries on retryImmediately()", async function () {
httpBackend.when("GET", "/_matrix/client/versions").respond(200, {
versions: ["v1.1"],
@ -308,4 +226,128 @@ describe.each([[StoreType.Memory], [StoreType.IndexedDB]])("queueToDevice (%s st
// flush, as per comment in first test
await flushPromises();
});
});
describe("async tests", () => {
beforeAll(() => {
jest.useFakeTimers();
});
afterAll(() => {
jest.useRealTimers();
});
beforeEach(async function () {
httpBackend = new MockHttpBackend();
let store: IStore;
if (storeType === StoreType.IndexedDB) {
const idbStore = new IndexedDBStore({ indexedDB: fakeIndexedDB });
let storeStarted = false;
idbStore.startup().then(() => {
storeStarted = true;
});
await flushAndRunTimersUntil(() => storeStarted);
store = idbStore;
} else {
store = new MemoryStore();
}
client = new MatrixClient({
baseUrl: "https://my.home.server",
accessToken: "my.access.token",
fetchFn: httpBackend.fetchFn as typeof global.fetch,
store,
});
});
afterEach(function () {
client.stopClient();
});
it("retries on error", async function () {
httpBackend.when("PUT", "/sendToDevice/org.example.foo/").respond(500);
httpBackend
.when("PUT", "/sendToDevice/org.example.foo/")
.check((request) => {
expect(request.data).toEqual(EXPECTED_BODY);
})
.respond(200, {});
client
.queueToDevice({
eventType: "org.example.foo",
batch: [FAKE_MSG],
})
.then();
await flushAndRunTimersUntil(() => httpBackend.requests.length > 0);
expect(httpBackend.flushSync(undefined, 1)).toEqual(1);
await flushAndRunTimersUntil(() => httpBackend.requests.length > 0);
expect(httpBackend.flushSync(undefined, 1)).toEqual(1);
// flush, as per comment in first test
await flushPromises();
});
it("stops retrying on 4xx errors", async function () {
httpBackend.when("PUT", "/sendToDevice/org.example.foo/").respond(400);
client
.queueToDevice({
eventType: "org.example.foo",
batch: [FAKE_MSG],
})
.then();
await flushAndRunTimersUntil(() => httpBackend.requests.length > 0);
expect(httpBackend.flushSync(undefined, 1)).toEqual(1);
// Asserting that another request is never made is obviously
// a bit tricky - we just flush the queue what should hopefully
// be plenty of times and assert that nothing comes through.
let tries = 0;
await flushAndRunTimersUntil(() => ++tries === 10);
expect(httpBackend.requests.length).toEqual(0);
});
it("honours ratelimiting", async function () {
// pick something obscure enough it's unlikley to clash with a
// retry delay the algorithm uses anyway
const retryDelay = 279 * 1000;
httpBackend.when("PUT", "/sendToDevice/org.example.foo/").respond(429, {
errcode: "M_LIMIT_EXCEEDED",
retry_after_ms: retryDelay,
});
httpBackend.when("PUT", "/sendToDevice/org.example.foo/").respond(200, {});
client
.queueToDevice({
eventType: "org.example.foo",
batch: [FAKE_MSG],
})
.then();
await flushAndRunTimersUntil(() => httpBackend.requests.length > 0);
expect(httpBackend.flushSync(undefined, 1)).toEqual(1);
await flushPromises();
logger.info("Advancing clock to just before expected retry time...");
jest.advanceTimersByTime(retryDelay - 1000);
await flushPromises();
expect(httpBackend.requests.length).toEqual(0);
logger.info("Advancing clock past expected retry time...");
jest.advanceTimersByTime(2000);
await flushPromises();
expect(httpBackend.flushSync(undefined, 1)).toEqual(1);
});
});
});