1
0
mirror of https://github.com/redis/node-redis.git synced 2025-12-11 09:22:35 +03:00

tests: Adjust scenario tests according to latest maint naming changes (#3090)

* rename maint options according to the latest client options
* adjust env variables
cae repo uses RE_FAULT_INJECTOR_URL for fault injector
DATABASE_NAME is needed to choose from the many databases in cae
* fix connection cleanup test
This commit is contained in:
Nikolay Karadzhov
2025-10-06 18:30:18 +03:00
committed by GitHub
parent 0438865b8a
commit adb19c5c5f
9 changed files with 419 additions and 209 deletions

View File

@@ -146,6 +146,7 @@ describe("Timeout Handling During Notifications", () => {
type: "migrate",
parameters: {
cluster_index: 0,
bdb_id: clientConfig.bdbId.toString(),
},
});
@@ -163,7 +164,7 @@ describe("Timeout Handling During Notifications", () => {
"Command Timeout error should be instanceof Error"
);
assert.ok(
durationMigrate > NORMAL_COMMAND_TIMEOUT &&
durationMigrate >= NORMAL_COMMAND_TIMEOUT &&
durationMigrate < NORMAL_COMMAND_TIMEOUT * 1.1,
`Normal command should timeout within normal timeout ms`
);
@@ -198,7 +199,7 @@ describe("Timeout Handling During Notifications", () => {
"Command Timeout error should be instanceof Error"
);
assert.ok(
durationBind > NORMAL_COMMAND_TIMEOUT &&
durationBind >= NORMAL_COMMAND_TIMEOUT &&
durationBind < NORMAL_COMMAND_TIMEOUT * 1.1,
`Normal command should timeout within normal timeout ms`
);
@@ -208,83 +209,4 @@ describe("Timeout Handling During Notifications", () => {
"Command Timeout error should be TimeoutError"
);
});
it("should relax command timeout on FAILING_OVER", async () => {
const notifications: Array<DiagnosticsEvent["type"]> = ["FAILING_OVER"];
const result: Record<
DiagnosticsEvent["type"],
{ error: any; duration: number }
> = {};
const onMessageHandler = createNotificationMessageHandler(
client,
result,
notifications
);
diagnostics_channel.subscribe("redis.maintenance", onMessageHandler);
const { action_id: failoverActionId } =
await faultInjectorClient.triggerAction({
type: "failover",
parameters: {
bdb_id: clientConfig.bdbId.toString(),
cluster_index: 0,
},
});
await faultInjectorClient.waitForAction(failoverActionId);
diagnostics_channel.unsubscribe("redis.maintenance", onMessageHandler);
notifications.forEach((notification) => {
assert.ok(
result[notification]?.error instanceof Error,
`${notification} notification error should be instanceof Error`
);
assert.ok(
result[notification]?.duration > RELAXED_COMMAND_TIMEOUT &&
result[notification]?.duration < RELAXED_COMMAND_TIMEOUT * 1.1,
`${notification} notification should timeout within relaxed timeout`
);
assert.strictEqual(
result[notification]?.error?.constructor?.name,
"CommandTimeoutDuringMaintenanceError",
`${notification} notification error should be CommandTimeoutDuringMaintenanceError`
);
});
});
it("should unrelax command timeout after FAILED_OVER", async () => {
const { action_id: failoverActionId } =
await faultInjectorClient.triggerAction({
type: "failover",
parameters: {
bdb_id: clientConfig.bdbId.toString(),
cluster_index: 0,
},
});
await faultInjectorClient.waitForAction(failoverActionId);
const { error, duration } = await blockCommand(async () => {
await client.set("key", "value");
});
assert.ok(
error instanceof Error,
"Command Timeout error should be instanceof Error"
);
assert.ok(
duration > NORMAL_COMMAND_TIMEOUT &&
duration < NORMAL_COMMAND_TIMEOUT * 1.1,
`Normal command should timeout within normal timeout ms`
);
assert.strictEqual(
error?.constructor?.name,
"TimeoutError",
"Command Timeout error should be TimeoutError"
);
});
});