You've already forked node-redis
mirror of
https://github.com/redis/node-redis.git
synced 2025-12-11 09:22:35 +03:00
tests: Adjust scenario tests according to latest maint naming changes (#3090)
* rename maint options according to the latest client options * adjust env variables cae repo uses RE_FAULT_INJECTOR_URL for fault injector DATABASE_NAME is needed to choose from the many databases in cae * fix connection cleanup test
This commit is contained in:
committed by
GitHub
parent
0438865b8a
commit
adb19c5c5f
@@ -11,7 +11,7 @@ import {
|
|||||||
} from "./test-scenario.util";
|
} from "./test-scenario.util";
|
||||||
import { createClient } from "../../..";
|
import { createClient } from "../../..";
|
||||||
import { FaultInjectorClient } from "./fault-injector-client";
|
import { FaultInjectorClient } from "./fault-injector-client";
|
||||||
import { MovingEndpointType } from "../../../dist/lib/client/enterprise-maintenance-manager";
|
import { MovingEndpointType } from "../../../lib/client/enterprise-maintenance-manager";
|
||||||
import { RedisTcpSocketOptions } from "../../client/socket";
|
import { RedisTcpSocketOptions } from "../../client/socket";
|
||||||
|
|
||||||
describe("Client Configuration and Handshake", () => {
|
describe("Client Configuration and Handshake", () => {
|
||||||
@@ -59,7 +59,7 @@ describe("Client Configuration and Handshake", () => {
|
|||||||
it(`clientHandshakeWithEndpointType '${endpointType}'`, async () => {
|
it(`clientHandshakeWithEndpointType '${endpointType}'`, async () => {
|
||||||
try {
|
try {
|
||||||
client = await createTestClient(clientConfig, {
|
client = await createTestClient(clientConfig, {
|
||||||
maintMovingEndpointType: endpointType,
|
maintEndpointType: endpointType
|
||||||
});
|
});
|
||||||
client.on("error", () => {});
|
client.on("error", () => {});
|
||||||
|
|
||||||
@@ -154,7 +154,7 @@ describe("Client Configuration and Handshake", () => {
|
|||||||
describe("Feature Enablement", () => {
|
describe("Feature Enablement", () => {
|
||||||
it("connectionHandshakeIncludesEnablingNotifications", async () => {
|
it("connectionHandshakeIncludesEnablingNotifications", async () => {
|
||||||
client = await createTestClient(clientConfig, {
|
client = await createTestClient(clientConfig, {
|
||||||
maintPushNotifications: "enabled",
|
maintNotifications: "enabled"
|
||||||
});
|
});
|
||||||
|
|
||||||
const { action_id } = await faultInjectorClient.migrateAndBindAction({
|
const { action_id } = await faultInjectorClient.migrateAndBindAction({
|
||||||
@@ -180,7 +180,7 @@ describe("Client Configuration and Handshake", () => {
|
|||||||
it("disabledDontReceiveNotifications", async () => {
|
it("disabledDontReceiveNotifications", async () => {
|
||||||
try {
|
try {
|
||||||
client = await createTestClient(clientConfig, {
|
client = await createTestClient(clientConfig, {
|
||||||
maintPushNotifications: "disabled",
|
maintNotifications: "disabled",
|
||||||
socket: {
|
socket: {
|
||||||
reconnectStrategy: false
|
reconnectStrategy: false
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -86,25 +86,25 @@ describe("Connection Handoff", () => {
|
|||||||
{
|
{
|
||||||
name: "external-ip",
|
name: "external-ip",
|
||||||
clientOptions: {
|
clientOptions: {
|
||||||
maintMovingEndpointType: "external-ip",
|
maintEndpointType: "external-ip",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "external-fqdn",
|
name: "external-fqdn",
|
||||||
clientOptions: {
|
clientOptions: {
|
||||||
maintMovingEndpointType: "external-fqdn",
|
maintEndpointType: "external-fqdn",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "auto",
|
name: "auto",
|
||||||
clientOptions: {
|
clientOptions: {
|
||||||
maintMovingEndpointType: "auto",
|
maintEndpointType: "auto",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "none",
|
name: "none",
|
||||||
clientOptions: {
|
clientOptions: {
|
||||||
maintMovingEndpointType: "none",
|
maintEndpointType: "none",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
];
|
];
|
||||||
@@ -156,6 +156,7 @@ describe("Connection Handoff", () => {
|
|||||||
|
|
||||||
describe("Connection Cleanup", () => {
|
describe("Connection Cleanup", () => {
|
||||||
it("should shut down old connection", async () => {
|
it("should shut down old connection", async () => {
|
||||||
|
client = await createTestClient(clientConfig);
|
||||||
const spyObject = spyOnTemporaryClientInstanceMethod(client, "destroy");
|
const spyObject = spyOnTemporaryClientInstanceMethod(client, "destroy");
|
||||||
|
|
||||||
const { action_id: lowTimeoutBindAndMigrateActionId } =
|
const { action_id: lowTimeoutBindAndMigrateActionId } =
|
||||||
|
|||||||
@@ -54,6 +54,20 @@ export class FaultInjectorClient {
|
|||||||
return this.#request<T>("POST", "/action", action);
|
return this.#request<T>("POST", "/action", action);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// public async printStatus() {
|
||||||
|
// const action = {
|
||||||
|
// type: 'execute_rladmin_command',
|
||||||
|
// parameters: {
|
||||||
|
// rladmin_command: "status",
|
||||||
|
// bdb_id: "1"
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// const { action_id } = await this.#request<{action_id: string}>("POST", "/action", action);
|
||||||
|
// const status = await this.waitForAction(action_id);
|
||||||
|
// //@ts-ignore
|
||||||
|
// console.log(status.output.output);
|
||||||
|
// }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the status of a specific action.
|
* Gets the status of a specific action.
|
||||||
* @param actionId The ID of the action to check
|
* @param actionId The ID of the action to check
|
||||||
@@ -87,7 +101,13 @@ export class FaultInjectorClient {
|
|||||||
while (Date.now() - startTime < maxWaitTime) {
|
while (Date.now() - startTime < maxWaitTime) {
|
||||||
const action = await this.getActionStatus<ActionStatus>(actionId);
|
const action = await this.getActionStatus<ActionStatus>(actionId);
|
||||||
|
|
||||||
if (["finished", "failed", "success"].includes(action.status)) {
|
if (action.status === "failed") {
|
||||||
|
throw new Error(
|
||||||
|
`Action id: ${actionId} failed! Error: ${action.error}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (["finished", "success"].includes(action.status)) {
|
||||||
return action;
|
return action;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -118,6 +138,7 @@ export class FaultInjectorClient {
|
|||||||
type: "migrate",
|
type: "migrate",
|
||||||
params: {
|
params: {
|
||||||
cluster_index: clusterIndexStr,
|
cluster_index: clusterIndexStr,
|
||||||
|
bdb_id: bdbIdStr,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ describe("Negative tests", () => {
|
|||||||
() =>
|
() =>
|
||||||
createClient({
|
createClient({
|
||||||
RESP: 2,
|
RESP: 2,
|
||||||
maintPushNotifications: "enabled",
|
maintNotifications: "enabled",
|
||||||
}),
|
}),
|
||||||
"Error: Graceful Maintenance is only supported with RESP3",
|
"Error: Graceful Maintenance is only supported with RESP3",
|
||||||
);
|
);
|
||||||
|
|||||||
226
packages/client/lib/tests/test-scenario/pn-failover.e2e.ts
Normal file
226
packages/client/lib/tests/test-scenario/pn-failover.e2e.ts
Normal file
@@ -0,0 +1,226 @@
|
|||||||
|
import assert from "node:assert";
|
||||||
|
import diagnostics_channel from "node:diagnostics_channel";
|
||||||
|
import { FaultInjectorClient } from "./fault-injector-client";
|
||||||
|
import {
|
||||||
|
createTestClient,
|
||||||
|
getDatabaseConfig,
|
||||||
|
getDatabaseConfigFromEnv,
|
||||||
|
getEnvConfig,
|
||||||
|
RedisConnectionConfig,
|
||||||
|
} from "./test-scenario.util";
|
||||||
|
import { createClient } from "../../..";
|
||||||
|
import { DiagnosticsEvent } from "../../client/enterprise-maintenance-manager";
|
||||||
|
import { before } from "mocha";
|
||||||
|
|
||||||
|
describe("Push Notifications", () => {
|
||||||
|
const createNotificationMessageHandler = (
|
||||||
|
result: Record<DiagnosticsEvent["type"], number>,
|
||||||
|
notifications: Array<DiagnosticsEvent["type"]>
|
||||||
|
) => {
|
||||||
|
return (message: unknown) => {
|
||||||
|
if (notifications.includes((message as DiagnosticsEvent).type)) {
|
||||||
|
const event = message as DiagnosticsEvent;
|
||||||
|
result[event.type] = (result[event.type] ?? 0) + 1;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
let onMessageHandler: ReturnType<typeof createNotificationMessageHandler>;
|
||||||
|
let clientConfig: RedisConnectionConfig;
|
||||||
|
let client: ReturnType<typeof createClient<any, any, any, any>>;
|
||||||
|
let faultInjectorClient: FaultInjectorClient;
|
||||||
|
|
||||||
|
before(() => {
|
||||||
|
const envConfig = getEnvConfig();
|
||||||
|
const redisConfig = getDatabaseConfigFromEnv(
|
||||||
|
envConfig.redisEndpointsConfigPath
|
||||||
|
);
|
||||||
|
|
||||||
|
faultInjectorClient = new FaultInjectorClient(envConfig.faultInjectorUrl);
|
||||||
|
clientConfig = getDatabaseConfig(redisConfig);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
if (onMessageHandler!) {
|
||||||
|
diagnostics_channel.unsubscribe("redis.maintenance", onMessageHandler);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (client && client.isOpen) {
|
||||||
|
client.destroy();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("Push Notifications Enabled", () => {
|
||||||
|
beforeEach(async () => {
|
||||||
|
client = await createTestClient(clientConfig);
|
||||||
|
|
||||||
|
await client.flushAll();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should receive FAILING_OVER and FAILED_OVER push notifications", async () => {
|
||||||
|
const notifications: Array<DiagnosticsEvent["type"]> = [
|
||||||
|
"FAILING_OVER",
|
||||||
|
"FAILED_OVER",
|
||||||
|
];
|
||||||
|
|
||||||
|
const diagnosticsMap: Record<DiagnosticsEvent["type"], number> = {};
|
||||||
|
|
||||||
|
onMessageHandler = createNotificationMessageHandler(
|
||||||
|
diagnosticsMap,
|
||||||
|
notifications
|
||||||
|
);
|
||||||
|
|
||||||
|
diagnostics_channel.subscribe("redis.maintenance", onMessageHandler);
|
||||||
|
|
||||||
|
const { action_id: failoverActionId } =
|
||||||
|
await faultInjectorClient.triggerAction({
|
||||||
|
type: "failover",
|
||||||
|
parameters: {
|
||||||
|
bdb_id: clientConfig.bdbId.toString(),
|
||||||
|
cluster_index: 0,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
await faultInjectorClient.waitForAction(failoverActionId);
|
||||||
|
|
||||||
|
assert.strictEqual(
|
||||||
|
diagnosticsMap.FAILING_OVER,
|
||||||
|
1,
|
||||||
|
"Should have received exactly one FAILING_OVER notification"
|
||||||
|
);
|
||||||
|
assert.strictEqual(
|
||||||
|
diagnosticsMap.FAILED_OVER,
|
||||||
|
1,
|
||||||
|
"Should have received exactly one FAILED_OVER notification"
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("Push Notifications Disabled - Client", () => {
|
||||||
|
beforeEach(async () => {
|
||||||
|
client = await createTestClient(clientConfig, {
|
||||||
|
maintNotifications: "disabled",
|
||||||
|
});
|
||||||
|
|
||||||
|
client.on("error", (_err) => {
|
||||||
|
// Expect the socket to be closed
|
||||||
|
// Ignore errors
|
||||||
|
});
|
||||||
|
|
||||||
|
await client.flushAll();
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should NOT receive FAILING_OVER and FAILED_OVER push notifications", async () => {
|
||||||
|
const notifications: Array<DiagnosticsEvent["type"]> = [
|
||||||
|
"FAILING_OVER",
|
||||||
|
"FAILED_OVER",
|
||||||
|
];
|
||||||
|
|
||||||
|
const diagnosticsMap: Record<DiagnosticsEvent["type"], number> = {};
|
||||||
|
|
||||||
|
onMessageHandler = createNotificationMessageHandler(
|
||||||
|
diagnosticsMap,
|
||||||
|
notifications
|
||||||
|
);
|
||||||
|
|
||||||
|
diagnostics_channel.subscribe("redis.maintenance", onMessageHandler);
|
||||||
|
|
||||||
|
const { action_id: failoverActionId } =
|
||||||
|
await faultInjectorClient.triggerAction({
|
||||||
|
type: "failover",
|
||||||
|
parameters: {
|
||||||
|
bdb_id: clientConfig.bdbId.toString(),
|
||||||
|
cluster_index: 0,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
await faultInjectorClient.waitForAction(failoverActionId);
|
||||||
|
|
||||||
|
assert.strictEqual(
|
||||||
|
diagnosticsMap.FAILING_OVER,
|
||||||
|
undefined,
|
||||||
|
"Should have received exactly one FAILING_OVER notification"
|
||||||
|
);
|
||||||
|
assert.strictEqual(
|
||||||
|
diagnosticsMap.FAILED_OVER,
|
||||||
|
undefined,
|
||||||
|
"Should have received exactly one FAILED_OVER notification"
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe("Push Notifications Disabled - Server", () => {
|
||||||
|
beforeEach(async () => {
|
||||||
|
client = await createTestClient(clientConfig);
|
||||||
|
|
||||||
|
client.on("error", (_err) => {
|
||||||
|
// Expect the socket to be closed
|
||||||
|
// Ignore errors
|
||||||
|
});
|
||||||
|
|
||||||
|
await client.flushAll();
|
||||||
|
});
|
||||||
|
|
||||||
|
before(async () => {
|
||||||
|
const { action_id: disablePushNotificationsActionId } =
|
||||||
|
await faultInjectorClient.triggerAction({
|
||||||
|
type: "update_cluster_config",
|
||||||
|
parameters: {
|
||||||
|
config: { client_maint_notifications: false },
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
await faultInjectorClient.waitForAction(disablePushNotificationsActionId);
|
||||||
|
});
|
||||||
|
|
||||||
|
after(async () => {
|
||||||
|
const { action_id: enablePushNotificationsActionId } =
|
||||||
|
await faultInjectorClient.triggerAction({
|
||||||
|
type: "update_cluster_config",
|
||||||
|
parameters: {
|
||||||
|
config: { client_maint_notifications: true },
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
await faultInjectorClient.waitForAction(enablePushNotificationsActionId);
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should NOT receive FAILING_OVER and FAILED_OVER push notifications", async () => {
|
||||||
|
const notifications: Array<DiagnosticsEvent["type"]> = [
|
||||||
|
"FAILING_OVER",
|
||||||
|
"FAILED_OVER",
|
||||||
|
];
|
||||||
|
|
||||||
|
const diagnosticsMap: Record<DiagnosticsEvent["type"], number> = {};
|
||||||
|
|
||||||
|
onMessageHandler = createNotificationMessageHandler(
|
||||||
|
diagnosticsMap,
|
||||||
|
notifications
|
||||||
|
);
|
||||||
|
|
||||||
|
diagnostics_channel.subscribe("redis.maintenance", onMessageHandler);
|
||||||
|
|
||||||
|
const { action_id: failoverActionId } =
|
||||||
|
await faultInjectorClient.triggerAction({
|
||||||
|
type: "failover",
|
||||||
|
parameters: {
|
||||||
|
bdb_id: clientConfig.bdbId.toString(),
|
||||||
|
cluster_index: 0,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
await faultInjectorClient.waitForAction(failoverActionId);
|
||||||
|
|
||||||
|
assert.strictEqual(
|
||||||
|
diagnosticsMap.FAILING_OVER,
|
||||||
|
undefined,
|
||||||
|
"Should have received exactly one FAILING_OVER notification"
|
||||||
|
);
|
||||||
|
assert.strictEqual(
|
||||||
|
diagnosticsMap.FAILED_OVER,
|
||||||
|
undefined,
|
||||||
|
"Should have received exactly one FAILED_OVER notification"
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
@@ -98,49 +98,12 @@ describe("Push Notifications", () => {
|
|||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
it("should receive FAILING_OVER and FAILED_OVER push notifications", async () => {
|
|
||||||
const notifications: Array<DiagnosticsEvent["type"]> = [
|
|
||||||
"FAILING_OVER",
|
|
||||||
"FAILED_OVER",
|
|
||||||
];
|
|
||||||
|
|
||||||
const diagnosticsMap: Record<DiagnosticsEvent["type"], number> = {};
|
|
||||||
|
|
||||||
onMessageHandler = createNotificationMessageHandler(
|
|
||||||
diagnosticsMap,
|
|
||||||
notifications
|
|
||||||
);
|
|
||||||
|
|
||||||
diagnostics_channel.subscribe("redis.maintenance", onMessageHandler);
|
|
||||||
|
|
||||||
const { action_id: failoverActionId } =
|
|
||||||
await faultInjectorClient.triggerAction({
|
|
||||||
type: "failover",
|
|
||||||
parameters: {
|
|
||||||
bdb_id: clientConfig.bdbId.toString(),
|
|
||||||
cluster_index: 0,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
await faultInjectorClient.waitForAction(failoverActionId);
|
|
||||||
|
|
||||||
assert.strictEqual(
|
|
||||||
diagnosticsMap.FAILING_OVER,
|
|
||||||
1,
|
|
||||||
"Should have received exactly one FAILING_OVER notification"
|
|
||||||
);
|
|
||||||
assert.strictEqual(
|
|
||||||
diagnosticsMap.FAILED_OVER,
|
|
||||||
1,
|
|
||||||
"Should have received exactly one FAILED_OVER notification"
|
|
||||||
);
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
|
|
||||||
describe("Push Notifications Disabled - Client", () => {
|
describe("Push Notifications Disabled - Client", () => {
|
||||||
beforeEach(async () => {
|
beforeEach(async () => {
|
||||||
client = await createTestClient(clientConfig, {
|
client = await createTestClient(clientConfig, {
|
||||||
maintPushNotifications: "disabled",
|
maintNotifications: "disabled",
|
||||||
});
|
});
|
||||||
|
|
||||||
client.on("error", (_err) => {
|
client.on("error", (_err) => {
|
||||||
@@ -192,43 +155,6 @@ describe("Push Notifications", () => {
|
|||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
it("should NOT receive FAILING_OVER and FAILED_OVER push notifications", async () => {
|
|
||||||
const notifications: Array<DiagnosticsEvent["type"]> = [
|
|
||||||
"FAILING_OVER",
|
|
||||||
"FAILED_OVER",
|
|
||||||
];
|
|
||||||
|
|
||||||
const diagnosticsMap: Record<DiagnosticsEvent["type"], number> = {};
|
|
||||||
|
|
||||||
onMessageHandler = createNotificationMessageHandler(
|
|
||||||
diagnosticsMap,
|
|
||||||
notifications
|
|
||||||
);
|
|
||||||
|
|
||||||
diagnostics_channel.subscribe("redis.maintenance", onMessageHandler);
|
|
||||||
|
|
||||||
const { action_id: failoverActionId } =
|
|
||||||
await faultInjectorClient.triggerAction({
|
|
||||||
type: "failover",
|
|
||||||
parameters: {
|
|
||||||
bdb_id: clientConfig.bdbId.toString(),
|
|
||||||
cluster_index: 0,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
await faultInjectorClient.waitForAction(failoverActionId);
|
|
||||||
|
|
||||||
assert.strictEqual(
|
|
||||||
diagnosticsMap.FAILING_OVER,
|
|
||||||
undefined,
|
|
||||||
"Should have received exactly one FAILING_OVER notification"
|
|
||||||
);
|
|
||||||
assert.strictEqual(
|
|
||||||
diagnosticsMap.FAILED_OVER,
|
|
||||||
undefined,
|
|
||||||
"Should have received exactly one FAILED_OVER notification"
|
|
||||||
);
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
|
|
||||||
describe("Push Notifications Disabled - Server", () => {
|
describe("Push Notifications Disabled - Server", () => {
|
||||||
@@ -308,42 +234,5 @@ describe("Push Notifications", () => {
|
|||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
it("should NOT receive FAILING_OVER and FAILED_OVER push notifications", async () => {
|
|
||||||
const notifications: Array<DiagnosticsEvent["type"]> = [
|
|
||||||
"FAILING_OVER",
|
|
||||||
"FAILED_OVER",
|
|
||||||
];
|
|
||||||
|
|
||||||
const diagnosticsMap: Record<DiagnosticsEvent["type"], number> = {};
|
|
||||||
|
|
||||||
onMessageHandler = createNotificationMessageHandler(
|
|
||||||
diagnosticsMap,
|
|
||||||
notifications
|
|
||||||
);
|
|
||||||
|
|
||||||
diagnostics_channel.subscribe("redis.maintenance", onMessageHandler);
|
|
||||||
|
|
||||||
const { action_id: failoverActionId } =
|
|
||||||
await faultInjectorClient.triggerAction({
|
|
||||||
type: "failover",
|
|
||||||
parameters: {
|
|
||||||
bdb_id: clientConfig.bdbId.toString(),
|
|
||||||
cluster_index: 0,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
await faultInjectorClient.waitForAction(failoverActionId);
|
|
||||||
|
|
||||||
assert.strictEqual(
|
|
||||||
diagnosticsMap.FAILING_OVER,
|
|
||||||
undefined,
|
|
||||||
"Should have received exactly one FAILING_OVER notification"
|
|
||||||
);
|
|
||||||
assert.strictEqual(
|
|
||||||
diagnosticsMap.FAILED_OVER,
|
|
||||||
undefined,
|
|
||||||
"Should have received exactly one FAILED_OVER notification"
|
|
||||||
);
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -43,13 +43,13 @@ export function getEnvConfig(): EnvConfig {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!process.env.FAULT_INJECTION_API_URL) {
|
if (!process.env.RE_FAULT_INJECTOR_URL) {
|
||||||
throw new Error("FAULT_INJECTION_API_URL environment variable must be set");
|
throw new Error("RE_FAULT_INJECTOR_URL environment variable must be set");
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
redisEndpointsConfigPath: process.env.REDIS_ENDPOINTS_CONFIG_PATH,
|
redisEndpointsConfigPath: process.env.REDIS_ENDPOINTS_CONFIG_PATH,
|
||||||
faultInjectorUrl: process.env.FAULT_INJECTION_API_URL,
|
faultInjectorUrl: process.env.RE_FAULT_INJECTOR_URL,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -86,7 +86,7 @@ export interface RedisConnectionConfig {
|
|||||||
*/
|
*/
|
||||||
export function getDatabaseConfig(
|
export function getDatabaseConfig(
|
||||||
databasesConfig: DatabasesConfig,
|
databasesConfig: DatabasesConfig,
|
||||||
databaseName?: string
|
databaseName = process.env.DATABASE_NAME
|
||||||
): RedisConnectionConfig {
|
): RedisConnectionConfig {
|
||||||
const dbConfig = databaseName
|
const dbConfig = databaseName
|
||||||
? databasesConfig[databaseName]
|
? databasesConfig[databaseName]
|
||||||
@@ -163,8 +163,8 @@ export async function createTestClient(
|
|||||||
password: clientConfig.password,
|
password: clientConfig.password,
|
||||||
username: clientConfig.username,
|
username: clientConfig.username,
|
||||||
RESP: 3,
|
RESP: 3,
|
||||||
maintPushNotifications: "auto",
|
maintNotifications: "auto",
|
||||||
maintMovingEndpointType: "auto",
|
maintEndpointType: "auto",
|
||||||
...options,
|
...options,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|||||||
@@ -146,6 +146,7 @@ describe("Timeout Handling During Notifications", () => {
|
|||||||
type: "migrate",
|
type: "migrate",
|
||||||
parameters: {
|
parameters: {
|
||||||
cluster_index: 0,
|
cluster_index: 0,
|
||||||
|
bdb_id: clientConfig.bdbId.toString(),
|
||||||
},
|
},
|
||||||
});
|
});
|
||||||
|
|
||||||
@@ -163,7 +164,7 @@ describe("Timeout Handling During Notifications", () => {
|
|||||||
"Command Timeout error should be instanceof Error"
|
"Command Timeout error should be instanceof Error"
|
||||||
);
|
);
|
||||||
assert.ok(
|
assert.ok(
|
||||||
durationMigrate > NORMAL_COMMAND_TIMEOUT &&
|
durationMigrate >= NORMAL_COMMAND_TIMEOUT &&
|
||||||
durationMigrate < NORMAL_COMMAND_TIMEOUT * 1.1,
|
durationMigrate < NORMAL_COMMAND_TIMEOUT * 1.1,
|
||||||
`Normal command should timeout within normal timeout ms`
|
`Normal command should timeout within normal timeout ms`
|
||||||
);
|
);
|
||||||
@@ -198,7 +199,7 @@ describe("Timeout Handling During Notifications", () => {
|
|||||||
"Command Timeout error should be instanceof Error"
|
"Command Timeout error should be instanceof Error"
|
||||||
);
|
);
|
||||||
assert.ok(
|
assert.ok(
|
||||||
durationBind > NORMAL_COMMAND_TIMEOUT &&
|
durationBind >= NORMAL_COMMAND_TIMEOUT &&
|
||||||
durationBind < NORMAL_COMMAND_TIMEOUT * 1.1,
|
durationBind < NORMAL_COMMAND_TIMEOUT * 1.1,
|
||||||
`Normal command should timeout within normal timeout ms`
|
`Normal command should timeout within normal timeout ms`
|
||||||
);
|
);
|
||||||
@@ -208,83 +209,4 @@ describe("Timeout Handling During Notifications", () => {
|
|||||||
"Command Timeout error should be TimeoutError"
|
"Command Timeout error should be TimeoutError"
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
it("should relax command timeout on FAILING_OVER", async () => {
|
|
||||||
const notifications: Array<DiagnosticsEvent["type"]> = ["FAILING_OVER"];
|
|
||||||
|
|
||||||
const result: Record<
|
|
||||||
DiagnosticsEvent["type"],
|
|
||||||
{ error: any; duration: number }
|
|
||||||
> = {};
|
|
||||||
|
|
||||||
const onMessageHandler = createNotificationMessageHandler(
|
|
||||||
client,
|
|
||||||
result,
|
|
||||||
notifications
|
|
||||||
);
|
|
||||||
|
|
||||||
diagnostics_channel.subscribe("redis.maintenance", onMessageHandler);
|
|
||||||
|
|
||||||
const { action_id: failoverActionId } =
|
|
||||||
await faultInjectorClient.triggerAction({
|
|
||||||
type: "failover",
|
|
||||||
parameters: {
|
|
||||||
bdb_id: clientConfig.bdbId.toString(),
|
|
||||||
cluster_index: 0,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
await faultInjectorClient.waitForAction(failoverActionId);
|
|
||||||
|
|
||||||
diagnostics_channel.unsubscribe("redis.maintenance", onMessageHandler);
|
|
||||||
|
|
||||||
notifications.forEach((notification) => {
|
|
||||||
assert.ok(
|
|
||||||
result[notification]?.error instanceof Error,
|
|
||||||
`${notification} notification error should be instanceof Error`
|
|
||||||
);
|
|
||||||
assert.ok(
|
|
||||||
result[notification]?.duration > RELAXED_COMMAND_TIMEOUT &&
|
|
||||||
result[notification]?.duration < RELAXED_COMMAND_TIMEOUT * 1.1,
|
|
||||||
`${notification} notification should timeout within relaxed timeout`
|
|
||||||
);
|
|
||||||
assert.strictEqual(
|
|
||||||
result[notification]?.error?.constructor?.name,
|
|
||||||
"CommandTimeoutDuringMaintenanceError",
|
|
||||||
`${notification} notification error should be CommandTimeoutDuringMaintenanceError`
|
|
||||||
);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it("should unrelax command timeout after FAILED_OVER", async () => {
|
|
||||||
const { action_id: failoverActionId } =
|
|
||||||
await faultInjectorClient.triggerAction({
|
|
||||||
type: "failover",
|
|
||||||
parameters: {
|
|
||||||
bdb_id: clientConfig.bdbId.toString(),
|
|
||||||
cluster_index: 0,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
await faultInjectorClient.waitForAction(failoverActionId);
|
|
||||||
|
|
||||||
const { error, duration } = await blockCommand(async () => {
|
|
||||||
await client.set("key", "value");
|
|
||||||
});
|
|
||||||
|
|
||||||
assert.ok(
|
|
||||||
error instanceof Error,
|
|
||||||
"Command Timeout error should be instanceof Error"
|
|
||||||
);
|
|
||||||
assert.ok(
|
|
||||||
duration > NORMAL_COMMAND_TIMEOUT &&
|
|
||||||
duration < NORMAL_COMMAND_TIMEOUT * 1.1,
|
|
||||||
`Normal command should timeout within normal timeout ms`
|
|
||||||
);
|
|
||||||
assert.strictEqual(
|
|
||||||
error?.constructor?.name,
|
|
||||||
"TimeoutError",
|
|
||||||
"Command Timeout error should be TimeoutError"
|
|
||||||
);
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
|
|||||||
151
packages/client/lib/tests/test-scenario/to-failover.e2e.ts
Normal file
151
packages/client/lib/tests/test-scenario/to-failover.e2e.ts
Normal file
@@ -0,0 +1,151 @@
|
|||||||
|
import assert from "node:assert";
|
||||||
|
|
||||||
|
import { FaultInjectorClient } from "./fault-injector-client";
|
||||||
|
import {
|
||||||
|
getDatabaseConfig,
|
||||||
|
getDatabaseConfigFromEnv,
|
||||||
|
getEnvConfig,
|
||||||
|
RedisConnectionConfig,
|
||||||
|
blockCommand,
|
||||||
|
createTestClient,
|
||||||
|
} from "./test-scenario.util";
|
||||||
|
import { createClient } from "../../..";
|
||||||
|
import { before } from "mocha";
|
||||||
|
import diagnostics_channel from "node:diagnostics_channel";
|
||||||
|
import { DiagnosticsEvent } from "../../client/enterprise-maintenance-manager";
|
||||||
|
|
||||||
|
describe("Timeout Handling During Notifications", () => {
|
||||||
|
let clientConfig: RedisConnectionConfig;
|
||||||
|
let faultInjectorClient: FaultInjectorClient;
|
||||||
|
let client: ReturnType<typeof createClient<any, any, any, any>>;
|
||||||
|
|
||||||
|
const NORMAL_COMMAND_TIMEOUT = 50;
|
||||||
|
const RELAXED_COMMAND_TIMEOUT = 2000;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a handler for the `redis.maintenance` channel that will execute and block a command on the client
|
||||||
|
* when a notification is received and save the result in the `result` object.
|
||||||
|
* This is used to test that the command timeout is relaxed during notifications.
|
||||||
|
*/
|
||||||
|
const createNotificationMessageHandler = (
|
||||||
|
client: ReturnType<typeof createClient<any, any, any, any>>,
|
||||||
|
result: Record<DiagnosticsEvent["type"], { error: any; duration: number }>,
|
||||||
|
notifications: Array<DiagnosticsEvent["type"]>
|
||||||
|
) => {
|
||||||
|
return (message: unknown) => {
|
||||||
|
if (notifications.includes((message as DiagnosticsEvent).type)) {
|
||||||
|
setImmediate(async () => {
|
||||||
|
result[(message as DiagnosticsEvent).type] = await blockCommand(
|
||||||
|
async () => {
|
||||||
|
await client.set("key", "value");
|
||||||
|
}
|
||||||
|
);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
before(() => {
|
||||||
|
const envConfig = getEnvConfig();
|
||||||
|
const redisConfig = getDatabaseConfigFromEnv(
|
||||||
|
envConfig.redisEndpointsConfigPath
|
||||||
|
);
|
||||||
|
|
||||||
|
clientConfig = getDatabaseConfig(redisConfig);
|
||||||
|
faultInjectorClient = new FaultInjectorClient(envConfig.faultInjectorUrl);
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(async () => {
|
||||||
|
client = await createTestClient(clientConfig, {
|
||||||
|
commandOptions: { timeout: NORMAL_COMMAND_TIMEOUT },
|
||||||
|
maintRelaxedCommandTimeout: RELAXED_COMMAND_TIMEOUT,
|
||||||
|
});
|
||||||
|
|
||||||
|
await client.flushAll();
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
if (client && client.isOpen) {
|
||||||
|
client.destroy();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should relax command timeout on FAILING_OVER", async () => {
|
||||||
|
const notifications: Array<DiagnosticsEvent["type"]> = ["FAILING_OVER"];
|
||||||
|
|
||||||
|
const result: Record<
|
||||||
|
DiagnosticsEvent["type"],
|
||||||
|
{ error: any; duration: number }
|
||||||
|
> = {};
|
||||||
|
|
||||||
|
const onMessageHandler = createNotificationMessageHandler(
|
||||||
|
client,
|
||||||
|
result,
|
||||||
|
notifications
|
||||||
|
);
|
||||||
|
|
||||||
|
diagnostics_channel.subscribe("redis.maintenance", onMessageHandler);
|
||||||
|
|
||||||
|
const { action_id: failoverActionId } =
|
||||||
|
await faultInjectorClient.triggerAction({
|
||||||
|
type: "failover",
|
||||||
|
parameters: {
|
||||||
|
bdb_id: clientConfig.bdbId.toString(),
|
||||||
|
cluster_index: 0,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
await faultInjectorClient.waitForAction(failoverActionId);
|
||||||
|
|
||||||
|
diagnostics_channel.unsubscribe("redis.maintenance", onMessageHandler);
|
||||||
|
|
||||||
|
notifications.forEach((notification) => {
|
||||||
|
assert.ok(
|
||||||
|
result[notification]?.error instanceof Error,
|
||||||
|
`${notification} notification error should be instanceof Error`
|
||||||
|
);
|
||||||
|
assert.ok(
|
||||||
|
result[notification]?.duration > RELAXED_COMMAND_TIMEOUT &&
|
||||||
|
result[notification]?.duration < RELAXED_COMMAND_TIMEOUT * 1.1,
|
||||||
|
`${notification} notification should timeout within relaxed timeout`
|
||||||
|
);
|
||||||
|
assert.strictEqual(
|
||||||
|
result[notification]?.error?.constructor?.name,
|
||||||
|
"CommandTimeoutDuringMaintenanceError",
|
||||||
|
`${notification} notification error should be CommandTimeoutDuringMaintenanceError`
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it("should unrelax command timeout after FAILED_OVER", async () => {
|
||||||
|
const { action_id: failoverActionId } =
|
||||||
|
await faultInjectorClient.triggerAction({
|
||||||
|
type: "failover",
|
||||||
|
parameters: {
|
||||||
|
bdb_id: clientConfig.bdbId.toString(),
|
||||||
|
cluster_index: 0,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
await faultInjectorClient.waitForAction(failoverActionId);
|
||||||
|
|
||||||
|
const { error, duration } = await blockCommand(async () => {
|
||||||
|
await client.set("key", "value");
|
||||||
|
});
|
||||||
|
|
||||||
|
assert.ok(
|
||||||
|
error instanceof Error,
|
||||||
|
"Command Timeout error should be instanceof Error"
|
||||||
|
);
|
||||||
|
assert.ok(
|
||||||
|
duration > NORMAL_COMMAND_TIMEOUT &&
|
||||||
|
duration < NORMAL_COMMAND_TIMEOUT * 1.1,
|
||||||
|
`Normal command should timeout within normal timeout ms`
|
||||||
|
);
|
||||||
|
assert.strictEqual(
|
||||||
|
error?.constructor?.name,
|
||||||
|
"TimeoutError",
|
||||||
|
"Command Timeout error should be TimeoutError"
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
Reference in New Issue
Block a user