You've already forked node-redis
mirror of
https://github.com/redis/node-redis.git
synced 2025-12-12 21:21:15 +03:00
* feat(errors): Add specialized timeout error types for maintenance scenarios - Added `SocketTimeoutDuringMaintananceError`, a subclass of `TimeoutError`, to handle socket timeouts during maintenance. - Added `CommandTimeoutDuringMaintenanceError`, another subclass of `TimeoutError`, to address command write timeouts during maintenance. * feat(linked-list): Add EmptyAwareSinglyLinkedList and enhance DoublyLinkedList functionality - Introduced `EmptyAwareSinglyLinkedList`, a subclass of `SinglyLinkedList` that emits an `empty` event when the list becomes empty due to `reset`, `shift`, or `remove` operations. - Added `nodes()` iterator method to `DoublyLinkedList` for iterating over nodes directly. - Enhanced unit tests for `DoublyLinkedList` and `SinglyLinkedList` to cover edge cases and new functionality. - Added comprehensive tests for `EmptyAwareSinglyLinkedList` to validate `empty` event emission under various scenarios. - Improved code formatting and consistency. * refactor(commands-queue): Improve push notification handling - Replaced `setInvalidateCallback` with a more flexible `addPushHandler` method, allowing multiple handlers for push notifications. - Introduced the `PushHandler` type to standardize push notification processing. - Refactored `RedisCommandsQueue` to use a `#pushHandlers` array, enabling dynamic and modular handling of push notifications. - Updated `RedisClient` to leverage the new handler mechanism for `invalidate` push notifications, simplifying and decoupling logic. * feat(commands-queue): Add method to wait for in-flight commands to complete - Introduced `waitForInflightCommandsToComplete` method to asynchronously wait for all in-flight commands to finish processing. - Utilized the `empty` event from `#waitingForReply` to signal when all commands have been completed. * feat(commands-queue): Introduce maintenance mode support for commands-queue - Added `#maintenanceCommandTimeout` and `setMaintenanceCommandTimeout` method to dynamically adjust command timeouts during maintenance * refator(client): Extract socket event listener setup into helper method * refactor(socket): Add maintenance mode support and dynamic timeout handling - Added `#maintenanceTimeout` and `setMaintenanceTimeout` method to dynamically adjust socket timeouts during maintenance. * feat(client): Add Redis Enterprise maintenance configuration options - Added `maintPushNotifications` option to control how the client handles Redis Enterprise maintenance push notifications (`disabled`, `enabled`, `au to`). - Added `maintMovingEndpointType` option to specify the endpoint type for reconnecting during a MOVING notification (`auto`, `internal-ip`, `external-ip`, etc.). - Added `maintRelaxedCommandTimeout` option to define a relaxed timeout for commands during maintenance. - Added `maintRelaxedSocketTimeout` option to define a relaxed timeout for the socket during maintenance. - Enforced RESP3 requirement for maintenance-related features (`maintPushNotifications`). * feat(client): Add socket helpers and pause mechanism - Introduced `#paused` flag with corresponding `_pause` and `_unpause` methods to temporarily halt writing commands to the socket during maintenance windows. - Updated `#write` method to respect the `#paused` flag, preventing new commands from being written during maintenance. - Added `_ejectSocket` method to safely detach from and return the current socket - Added `_insertSocket` method to receive and start using a new socket * feat(client): Add Redis Enterprise maintenance handling capabilities - Introduced `EnterpriseMaintenanceManager` to manage Redis Enterprise maintenance events and push notifications. - Integrated `EnterpriseMaintenanceManager` into `RedisClient` to handle maintenance push notifications and manage socket transitions. - Implemented graceful handling of MOVING, MIGRATING, and FAILOVER push notifications, including socket replacement and timeout adjustments. * test: add E2E test infrastructure for Redis maintenance scenarios * test: add E2E tests for Redis Enterprise maintenance timeout handling (#3) * test: add connection handoff test --------- Co-authored-by: Pavel Pashov <pavel.pashov@redis.com> Co-authored-by: Pavel Pashov <60297174+PavelPashov@users.noreply.github.com>
160 lines
4.6 KiB
TypeScript
160 lines
4.6 KiB
TypeScript
import assert from "node:assert";
|
|
|
|
import { FaultInjectorClient } from "./fault-injector-client";
|
|
import {
|
|
ClientFactory,
|
|
getDatabaseConfig,
|
|
getDatabaseConfigFromEnv,
|
|
getEnvConfig,
|
|
RedisConnectionConfig,
|
|
blockSetImmediate
|
|
} from "./test-scenario.util";
|
|
import { createClient } from "../../..";
|
|
import { before } from "mocha";
|
|
import { TestCommandRunner } from "./test-command-runner";
|
|
|
|
describe("Timeout Handling During Notifications", () => {
|
|
let clientConfig: RedisConnectionConfig;
|
|
let clientFactory: ClientFactory;
|
|
let faultInjectorClient: FaultInjectorClient;
|
|
let defaultClient: ReturnType<typeof createClient<any, any, any, any>>;
|
|
|
|
before(() => {
|
|
const envConfig = getEnvConfig();
|
|
const redisConfig = getDatabaseConfigFromEnv(
|
|
envConfig.redisEndpointsConfigPath
|
|
);
|
|
|
|
clientConfig = getDatabaseConfig(redisConfig);
|
|
faultInjectorClient = new FaultInjectorClient(envConfig.faultInjectorUrl);
|
|
clientFactory = new ClientFactory(clientConfig);
|
|
});
|
|
|
|
beforeEach(async () => {
|
|
defaultClient = await clientFactory.create("default");
|
|
|
|
await defaultClient.flushAll();
|
|
});
|
|
|
|
afterEach(async () => {
|
|
clientFactory.destroyAll();
|
|
});
|
|
|
|
it("should relax command timeout on MOVING, MIGRATING, and MIGRATED", async () => {
|
|
// PART 1
|
|
// Set very low timeout to trigger errors
|
|
const lowTimeoutClient = await clientFactory.create("lowTimeout", {
|
|
maintRelaxedCommandTimeout: 50,
|
|
});
|
|
|
|
const { action_id: lowTimeoutBindAndMigrateActionId } =
|
|
await faultInjectorClient.migrateAndBindAction({
|
|
bdbId: clientConfig.bdbId,
|
|
clusterIndex: 0,
|
|
});
|
|
|
|
const lowTimeoutWaitPromise = faultInjectorClient.waitForAction(
|
|
lowTimeoutBindAndMigrateActionId
|
|
);
|
|
|
|
const lowTimeoutCommandPromises =
|
|
await TestCommandRunner.fireCommandsUntilStopSignal(
|
|
lowTimeoutClient,
|
|
lowTimeoutWaitPromise
|
|
);
|
|
|
|
const lowTimeoutRejectedCommands = (
|
|
await Promise.all(lowTimeoutCommandPromises.commandPromises)
|
|
).filter((result) => result.status === "rejected");
|
|
|
|
assert.ok(lowTimeoutRejectedCommands.length > 0);
|
|
assert.strictEqual(
|
|
lowTimeoutRejectedCommands.filter((rejected) => {
|
|
return (
|
|
// TODO instanceof doesn't work for some reason
|
|
rejected.error.constructor.name ===
|
|
"CommandTimeoutDuringMaintananceError"
|
|
);
|
|
}).length,
|
|
lowTimeoutRejectedCommands.length
|
|
);
|
|
|
|
// PART 2
|
|
// Set high timeout to avoid errors
|
|
const highTimeoutClient = await clientFactory.create("highTimeout", {
|
|
maintRelaxedCommandTimeout: 10000,
|
|
});
|
|
|
|
const { action_id: highTimeoutBindAndMigrateActionId } =
|
|
await faultInjectorClient.migrateAndBindAction({
|
|
bdbId: clientConfig.bdbId,
|
|
clusterIndex: 0,
|
|
});
|
|
|
|
const highTimeoutWaitPromise = faultInjectorClient.waitForAction(
|
|
highTimeoutBindAndMigrateActionId
|
|
);
|
|
|
|
const highTimeoutCommandPromises =
|
|
await TestCommandRunner.fireCommandsUntilStopSignal(
|
|
highTimeoutClient,
|
|
highTimeoutWaitPromise
|
|
);
|
|
|
|
const highTimeoutRejectedCommands = (
|
|
await Promise.all(highTimeoutCommandPromises.commandPromises)
|
|
).filter((result) => result.status === "rejected");
|
|
|
|
assert.strictEqual(highTimeoutRejectedCommands.length, 0);
|
|
});
|
|
|
|
it("should unrelax command timeout after MAINTENANCE", async () => {
|
|
const clientWithCommandTimeout = await clientFactory.create(
|
|
"clientWithCommandTimeout",
|
|
{
|
|
commandOptions: {
|
|
timeout: 100,
|
|
},
|
|
}
|
|
);
|
|
|
|
const { action_id: bindAndMigrateActionId } =
|
|
await faultInjectorClient.migrateAndBindAction({
|
|
bdbId: clientConfig.bdbId,
|
|
clusterIndex: 0,
|
|
});
|
|
|
|
const lowTimeoutWaitPromise = faultInjectorClient.waitForAction(
|
|
bindAndMigrateActionId
|
|
);
|
|
|
|
const relaxedTimeoutCommandPromises =
|
|
await TestCommandRunner.fireCommandsUntilStopSignal(
|
|
clientWithCommandTimeout,
|
|
lowTimeoutWaitPromise
|
|
);
|
|
|
|
const relaxedTimeoutRejectedCommands = (
|
|
await Promise.all(relaxedTimeoutCommandPromises.commandPromises)
|
|
).filter((result) => result.status === "rejected");
|
|
|
|
assert.ok(relaxedTimeoutRejectedCommands.length === 0);
|
|
|
|
const start = performance.now();
|
|
|
|
let error: any;
|
|
await blockSetImmediate(async () => {
|
|
try {
|
|
await clientWithCommandTimeout.set("key", "value");
|
|
} catch (err: any) {
|
|
error = err;
|
|
}
|
|
});
|
|
|
|
// Make sure it took less than 1sec to fail
|
|
assert.ok(performance.now() - start < 1000);
|
|
assert.ok(error instanceof Error);
|
|
assert.ok(error.constructor.name === "TimeoutError");
|
|
});
|
|
});
|