You've already forked node-redis
mirror of
https://github.com/redis/node-redis.git
synced 2025-08-07 13:22:56 +03:00
cluster
This commit is contained in:
@@ -15,23 +15,13 @@ import { ScanOptions, ScanCommonOptions } from '../commands/SCAN';
|
||||
import { RedisLegacyClient, RedisLegacyClientType } from './legacy-mode';
|
||||
import { RedisPoolOptions, RedisClientPool } from './pool';
|
||||
|
||||
interface ClientCommander<
|
||||
M extends RedisModules,
|
||||
F extends RedisFunctions,
|
||||
S extends RedisScripts,
|
||||
RESP extends RespVersions,
|
||||
TYPE_MAPPING extends TypeMapping
|
||||
> extends CommanderConfig<M, F, S, RESP> {
|
||||
commandOptions?: CommandOptions<TYPE_MAPPING>;
|
||||
}
|
||||
|
||||
export interface RedisClientOptions<
|
||||
M extends RedisModules = RedisModules,
|
||||
F extends RedisFunctions = RedisFunctions,
|
||||
S extends RedisScripts = RedisScripts,
|
||||
RESP extends RespVersions = RespVersions,
|
||||
TYPE_MAPPING extends TypeMapping = TypeMapping
|
||||
> extends ClientCommander<M, F, S, RESP, TYPE_MAPPING> {
|
||||
> extends CommanderConfig<M, F, S, RESP> {
|
||||
/**
|
||||
* `redis[s]://[[username][:password]@][host][:port][/db-number]`
|
||||
* See [`redis`](https://www.iana.org/assignments/uri-schemes/prov/redis) and [`rediss`](https://www.iana.org/assignments/uri-schemes/prov/rediss) IANA registration for more details
|
||||
@@ -75,6 +65,10 @@ export interface RedisClientOptions<
|
||||
* Useful with Redis deployments that do not honor TCP Keep-Alive.
|
||||
*/
|
||||
pingInterval?: number;
|
||||
/**
|
||||
* TODO
|
||||
*/
|
||||
commandOptions?: CommandOptions<TYPE_MAPPING>;
|
||||
}
|
||||
|
||||
type WithCommands<
|
||||
@@ -205,9 +199,8 @@ export default class RedisClient<
|
||||
M extends RedisModules = {},
|
||||
F extends RedisFunctions = {},
|
||||
S extends RedisScripts = {},
|
||||
RESP extends RespVersions = 2,
|
||||
TYPE_MAPPING extends TypeMapping = {}
|
||||
>(config?: ClientCommander<M, F, S, RESP, TYPE_MAPPING>) {
|
||||
RESP extends RespVersions = 2
|
||||
>(config?: CommanderConfig<M, F, S, RESP>) {
|
||||
const Client = attachConfig({
|
||||
BaseClass: RedisClient,
|
||||
commands: COMMANDS,
|
||||
@@ -220,7 +213,9 @@ export default class RedisClient<
|
||||
|
||||
Client.prototype.Multi = RedisClientMultiCommand.extend(config);
|
||||
|
||||
return (options?: Omit<RedisClientOptions, keyof Exclude<typeof config, undefined>>) => {
|
||||
return <TYPE_MAPPING extends TypeMapping = {}>(
|
||||
options?: Omit<RedisClientOptions<M, F, S, RESP, TYPE_MAPPING>, keyof Exclude<typeof config, undefined>>
|
||||
) => {
|
||||
// returning a "proxy" to prevent the namespaces.self to leak between "proxies"
|
||||
return Object.create(new Client(options)) as RedisClientType<M, F, S, RESP, TYPE_MAPPING>;
|
||||
};
|
||||
|
@@ -4,7 +4,7 @@ import RedisClient, { RedisClientOptions, RedisClientType } from '../client';
|
||||
import { types } from 'node:util';
|
||||
import { EventEmitter } from 'node:stream';
|
||||
import { ChannelListeners, PubSubType, PubSubTypeListeners } from '../client/pub-sub';
|
||||
import { RedisArgument, RedisFunctions, RedisModules, RedisScripts, RespVersions } from '../RESP/types';
|
||||
import { RedisArgument, RedisFunctions, RedisModules, RedisScripts, RespVersions, TypeMapping } from '../RESP/types';
|
||||
|
||||
// TODO: ?!
|
||||
// We need to use 'require', because it's not possible with Typescript to import
|
||||
@@ -21,34 +21,26 @@ export type NodeAddressMap = {
|
||||
[address: string]: NodeAddress;
|
||||
} | ((address: string) => NodeAddress | undefined);
|
||||
|
||||
type ValueOrPromise<T> = T | Promise<T>;
|
||||
|
||||
type ClientOrPromise<
|
||||
M extends RedisModules,
|
||||
F extends RedisFunctions,
|
||||
S extends RedisScripts,
|
||||
RESP extends RespVersions = 2
|
||||
> = ValueOrPromise<RedisClientType<M, F, S, RESP>>;
|
||||
|
||||
export interface Node<
|
||||
M extends RedisModules,
|
||||
F extends RedisFunctions,
|
||||
S extends RedisScripts,
|
||||
RESP extends RespVersions
|
||||
RESP extends RespVersions,
|
||||
TYPE_MAPPING extends TypeMapping
|
||||
> {
|
||||
address: string;
|
||||
client?: ClientOrPromise<M, F, S, RESP>;
|
||||
client?: RedisClientType<M, F, S, RESP, TYPE_MAPPING>;
|
||||
connectPromise?: Promise<RedisClientType<M, F, S, RESP, TYPE_MAPPING>>;
|
||||
}
|
||||
|
||||
export interface ShardNode<
|
||||
M extends RedisModules,
|
||||
F extends RedisFunctions,
|
||||
S extends RedisScripts,
|
||||
RESP extends RespVersions
|
||||
> extends Node<M, F, S, RESP> {
|
||||
RESP extends RespVersions,
|
||||
TYPE_MAPPING extends TypeMapping
|
||||
> extends Node<M, F, S, RESP, TYPE_MAPPING>, NodeAddress {
|
||||
id: string;
|
||||
host: string;
|
||||
port: number;
|
||||
readonly: boolean;
|
||||
}
|
||||
|
||||
@@ -56,35 +48,45 @@ export interface MasterNode<
|
||||
M extends RedisModules,
|
||||
F extends RedisFunctions,
|
||||
S extends RedisScripts,
|
||||
RESP extends RespVersions
|
||||
> extends ShardNode<M, F, S, RESP> {
|
||||
pubSubClient?: ClientOrPromise<M, F, S, RESP>;
|
||||
RESP extends RespVersions,
|
||||
TYPE_MAPPING extends TypeMapping
|
||||
> extends ShardNode<M, F, S, RESP, TYPE_MAPPING> {
|
||||
pubSub?: {
|
||||
connectPromise?: Promise<RedisClientType<M, F, S, RESP, TYPE_MAPPING>>;
|
||||
client: RedisClientType<M, F, S, RESP, TYPE_MAPPING>;
|
||||
};
|
||||
}
|
||||
|
||||
export interface Shard<
|
||||
M extends RedisModules,
|
||||
F extends RedisFunctions,
|
||||
S extends RedisScripts,
|
||||
RESP extends RespVersions
|
||||
RESP extends RespVersions,
|
||||
TYPE_MAPPING extends TypeMapping
|
||||
> {
|
||||
master: MasterNode<M, F, S, RESP>;
|
||||
replicas?: Array<ShardNode<M, F, S, RESP>>;
|
||||
nodesIterator?: IterableIterator<ShardNode<M, F, S, RESP>>;
|
||||
master: MasterNode<M, F, S, RESP, TYPE_MAPPING>;
|
||||
replicas?: Array<ShardNode<M, F, S, RESP, TYPE_MAPPING>>;
|
||||
nodesIterator?: IterableIterator<ShardNode<M, F, S, RESP, TYPE_MAPPING>>;
|
||||
}
|
||||
|
||||
type ShardWithReplicas<
|
||||
M extends RedisModules,
|
||||
F extends RedisFunctions,
|
||||
S extends RedisScripts,
|
||||
RESP extends RespVersions
|
||||
> = Shard<M, F, S, RESP> & Required<Pick<Shard<M, F, S, RESP>, 'replicas'>>;
|
||||
RESP extends RespVersions,
|
||||
TYPE_MAPPING extends TypeMapping
|
||||
> = Shard<M, F, S, RESP, TYPE_MAPPING> & Required<Pick<Shard<M, F, S, RESP, TYPE_MAPPING>, 'replicas'>>;
|
||||
|
||||
export type PubSubNode<
|
||||
type PubSubNode<
|
||||
M extends RedisModules,
|
||||
F extends RedisFunctions,
|
||||
S extends RedisScripts,
|
||||
RESP extends RespVersions
|
||||
> = Required<Node<M, F, S, RESP>>;
|
||||
RESP extends RespVersions,
|
||||
TYPE_MAPPING extends TypeMapping
|
||||
> = (
|
||||
Exclude<Node<M, F, S, RESP, TYPE_MAPPING>, 'client'> &
|
||||
Required<Pick<Node<M, F, S, RESP, TYPE_MAPPING>, 'client'>>
|
||||
);
|
||||
|
||||
type PubSubToResubscribe = Record<
|
||||
PubSubType.CHANNELS | PubSubType.PATTERNS,
|
||||
@@ -101,19 +103,19 @@ export default class RedisClusterSlots<
|
||||
M extends RedisModules,
|
||||
F extends RedisFunctions,
|
||||
S extends RedisScripts,
|
||||
RESP extends RespVersions
|
||||
RESP extends RespVersions,
|
||||
TYPE_MAPPING extends TypeMapping
|
||||
> {
|
||||
private static _SLOTS = 16384;
|
||||
|
||||
private readonly _options: RedisClusterOptions<M, F, S, RESP>;
|
||||
private readonly _options: RedisClusterOptions<M, F, S, RESP, TYPE_MAPPING>;
|
||||
private readonly _clientFactory: ReturnType<typeof RedisClient.factory<M, F, S, RESP>>;
|
||||
private readonly _emit: EventEmitter['emit'];
|
||||
slots = new Array<Shard<M, F, S, RESP>>(RedisClusterSlots._SLOTS);
|
||||
shards = new Array<Shard<M, F, S,RESP>>();
|
||||
masters = new Array<ShardNode<M, F, S, RESP>>();
|
||||
replicas = new Array<ShardNode<M, F, S, RESP>>();
|
||||
readonly nodeByAddress = new Map<string, MasterNode<M, F, S, RESP> | ShardNode<M, F, S, RESP>>();
|
||||
pubSubNode?: PubSubNode<M, F, S, RESP>;
|
||||
slots = new Array<Shard<M, F, S, RESP, TYPE_MAPPING>>(RedisClusterSlots._SLOTS);
|
||||
masters = new Array<MasterNode<M, F, S, RESP, TYPE_MAPPING>>();
|
||||
replicas = new Array<ShardNode<M, F, S, RESP, TYPE_MAPPING>>();
|
||||
readonly nodeByAddress = new Map<string, MasterNode<M, F, S, RESP, TYPE_MAPPING> | ShardNode<M, F, S, RESP, TYPE_MAPPING>>();
|
||||
pubSubNode?: PubSubNode<M, F, S, RESP, TYPE_MAPPING>;
|
||||
|
||||
private _isOpen = false;
|
||||
|
||||
@@ -122,7 +124,7 @@ export default class RedisClusterSlots<
|
||||
}
|
||||
|
||||
constructor(
|
||||
options: RedisClusterOptions<M, F, S, RESP>,
|
||||
options: RedisClusterOptions<M, F, S, RESP, TYPE_MAPPING>,
|
||||
emit: EventEmitter['emit']
|
||||
) {
|
||||
this._options = options;
|
||||
@@ -147,10 +149,12 @@ export default class RedisClusterSlots<
|
||||
private async _discoverWithRootNodes() {
|
||||
let start = Math.floor(Math.random() * this._options.rootNodes.length);
|
||||
for (let i = start; i < this._options.rootNodes.length; i++) {
|
||||
if (!this._isOpen) throw new Error('Cluster closed');
|
||||
if (await this._discover(this._options.rootNodes[i])) return;
|
||||
}
|
||||
|
||||
for (let i = 0; i < start; i++) {
|
||||
if (!this._isOpen) throw new Error('Cluster closed');
|
||||
if (await this._discover(this._options.rootNodes[i])) return;
|
||||
}
|
||||
|
||||
@@ -159,7 +163,6 @@ export default class RedisClusterSlots<
|
||||
|
||||
private _resetSlots() {
|
||||
this.slots = new Array(RedisClusterSlots._SLOTS);
|
||||
this.shards = [];
|
||||
this.masters = [];
|
||||
this.replicas = [];
|
||||
this._randomNodeIterator = undefined;
|
||||
@@ -167,15 +170,13 @@ export default class RedisClusterSlots<
|
||||
|
||||
private async _discover(rootNode: RedisClusterClientOptions) {
|
||||
this._resetSlots();
|
||||
const addressesInUse = new Set<string>();
|
||||
|
||||
try {
|
||||
const shards = await this._getShards(rootNode),
|
||||
const addressesInUse = new Set<string>(),
|
||||
promises: Array<Promise<unknown>> = [],
|
||||
eagerConnect = this._options.minimizeConnections !== true;
|
||||
|
||||
for (const { from, to, master, replicas } of shards) {
|
||||
const shard: Shard<M, F, S, RESP> = {
|
||||
for (const { from, to, master, replicas } of await this._getShards(rootNode)) {
|
||||
const shard: Shard<M, F, S, RESP, TYPE_MAPPING> = {
|
||||
master: this._initiateSlotNode(master, false, eagerConnect, addressesInUse, promises)
|
||||
};
|
||||
|
||||
@@ -185,25 +186,17 @@ export default class RedisClusterSlots<
|
||||
);
|
||||
}
|
||||
|
||||
this.shards.push(shard);
|
||||
|
||||
for (let i = from; i <= to; i++) {
|
||||
this.slots[i] = shard;
|
||||
}
|
||||
}
|
||||
|
||||
if (this.pubSubNode && !addressesInUse.has(this.pubSubNode.address)) {
|
||||
if (types.isPromise(this.pubSubNode.client)) {
|
||||
promises.push(
|
||||
this.pubSubNode.client.then(client => client.disconnect())
|
||||
);
|
||||
this.pubSubNode = undefined;
|
||||
} else {
|
||||
promises.push(this.pubSubNode.client.disconnect());
|
||||
|
||||
const channelsListeners = this.pubSubNode.client.getPubSubListeners(PubSubType.CHANNELS),
|
||||
patternsListeners = this.pubSubNode.client.getPubSubListeners(PubSubType.PATTERNS);
|
||||
|
||||
this.pubSubNode.client.destroy();
|
||||
|
||||
if (channelsListeners.size || patternsListeners.size) {
|
||||
promises.push(
|
||||
this._initiatePubSubClient({
|
||||
@@ -213,22 +206,17 @@ export default class RedisClusterSlots<
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (const [address, node] of this.nodeByAddress.entries()) {
|
||||
if (addressesInUse.has(address)) continue;
|
||||
|
||||
if (node.client) {
|
||||
promises.push(
|
||||
this._execOnNodeClient(node.client, client => client.disconnect())
|
||||
);
|
||||
node.client.destroy();
|
||||
}
|
||||
|
||||
const { pubSubClient } = node as MasterNode<M, F, S, RESP>;
|
||||
if (pubSubClient) {
|
||||
promises.push(
|
||||
this._execOnNodeClient(pubSubClient, client => client.disconnect())
|
||||
);
|
||||
const { pubSub } = node as MasterNode<M, F, S, RESP, TYPE_MAPPING>;
|
||||
if (pubSub) {
|
||||
pubSub.client.destroy();
|
||||
}
|
||||
|
||||
this.nodeByAddress.delete(address);
|
||||
@@ -248,12 +236,12 @@ export default class RedisClusterSlots<
|
||||
options.socket ??= {};
|
||||
options.socket.reconnectStrategy = false;
|
||||
options.RESP = this._options.RESP;
|
||||
options.commandOptions = undefined;
|
||||
|
||||
const client = RedisClient.factory(this._options)(options);
|
||||
|
||||
client.on('error', err => this._emit('error', err));
|
||||
|
||||
await client.connect();
|
||||
// TODO: find a way to avoid type casting
|
||||
const client = await this._clientFactory(options as RedisClientOptions<M, F, S, RESP, {}>)
|
||||
.on('error', err => this._emit('error', err))
|
||||
.connect();
|
||||
|
||||
try {
|
||||
// switch to `CLUSTER SHARDS` when Redis 7.0 will be the minimum supported version
|
||||
@@ -273,7 +261,7 @@ export default class RedisClusterSlots<
|
||||
}
|
||||
}
|
||||
|
||||
private _clientOptionsDefaults(options?: RedisClientOptions): RedisClientOptions | undefined {
|
||||
private _clientOptionsDefaults(options?: RedisClientOptions<M, F, S, RESP, TYPE_MAPPING>) {
|
||||
if (!this._options.defaults) return options;
|
||||
|
||||
let socket;
|
||||
@@ -301,7 +289,6 @@ export default class RedisClusterSlots<
|
||||
promises: Array<Promise<unknown>>
|
||||
) {
|
||||
const address = `${shard.host}:${shard.port}`;
|
||||
addressesInUse.add(address);
|
||||
|
||||
let node = this.nodeByAddress.get(address);
|
||||
if (!node) {
|
||||
@@ -309,7 +296,8 @@ export default class RedisClusterSlots<
|
||||
...shard,
|
||||
address,
|
||||
readonly,
|
||||
client: undefined
|
||||
client: undefined,
|
||||
connectPromise: undefined
|
||||
};
|
||||
|
||||
if (eagerConnent) {
|
||||
@@ -319,16 +307,16 @@ export default class RedisClusterSlots<
|
||||
this.nodeByAddress.set(address, node);
|
||||
}
|
||||
|
||||
if (!addressesInUse.has(address)) {
|
||||
addressesInUse.add(address);
|
||||
(readonly ? this.replicas : this.masters).push(node);
|
||||
}
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
private async _createClient(
|
||||
node: ShardNode<M, F, S, RESP>,
|
||||
readonly = node.readonly
|
||||
) {
|
||||
const client = this._clientFactory(
|
||||
private _createClient(node: ShardNode<M, F, S, RESP, TYPE_MAPPING>, readonly = node.readonly) {
|
||||
return this._clientFactory(
|
||||
this._clientOptionsDefaults({
|
||||
socket: this._getNodeAddress(node.address) ?? {
|
||||
host: node.host,
|
||||
@@ -337,38 +325,29 @@ export default class RedisClusterSlots<
|
||||
readonly,
|
||||
RESP: this._options.RESP
|
||||
})
|
||||
).on('error', err => console.error(err));
|
||||
}
|
||||
|
||||
private _createNodeClient(node: ShardNode<M, F, S, RESP, TYPE_MAPPING>, readonly?: boolean) {
|
||||
const client = node.client = this._createClient(node, readonly);
|
||||
return node.connectPromise = client.connect()
|
||||
.finally(() => node.connectPromise = undefined);
|
||||
}
|
||||
|
||||
nodeClient(node: ShardNode<M, F, S, RESP, TYPE_MAPPING>) {
|
||||
return (
|
||||
node.connectPromise ?? // if the node is connecting
|
||||
node.client ?? // if the node is connected
|
||||
this._createNodeClient(node) // if the not is disconnected
|
||||
);
|
||||
client.on('error', err => this._emit('error', err));
|
||||
|
||||
await client.connect();
|
||||
|
||||
return client;
|
||||
}
|
||||
|
||||
private _createNodeClient(node: ShardNode<M, F, S, RESP>) {
|
||||
const promise = this._createClient(node)
|
||||
.then(client => {
|
||||
node.client = client;
|
||||
return client;
|
||||
})
|
||||
.catch(err => {
|
||||
node.client = undefined;
|
||||
throw err;
|
||||
});
|
||||
node.client = promise;
|
||||
return promise;
|
||||
}
|
||||
|
||||
nodeClient(node: ShardNode<M, F, S, RESP>) {
|
||||
return node.client ?? this._createNodeClient(node);
|
||||
}
|
||||
|
||||
#runningRediscoverPromise?: Promise<void>;
|
||||
private _runningRediscoverPromise?: Promise<void>;
|
||||
|
||||
async rediscover(startWith: RedisClientType<M, F, S, RESP>): Promise<void> {
|
||||
this.#runningRediscoverPromise ??= this._rediscover(startWith)
|
||||
.finally(() => this.#runningRediscoverPromise = undefined);
|
||||
return this.#runningRediscoverPromise;
|
||||
this._runningRediscoverPromise ??= this._rediscover(startWith)
|
||||
.finally(() => this._runningRediscoverPromise = undefined);
|
||||
return this._runningRediscoverPromise;
|
||||
}
|
||||
|
||||
private async _rediscover(startWith: RedisClientType<M, F, S, RESP>): Promise<void> {
|
||||
@@ -399,11 +378,11 @@ export default class RedisClusterSlots<
|
||||
this._isOpen = false;
|
||||
|
||||
for (const client of this._clients()) {
|
||||
this._execOnNodeClient(client, client => client.destroy());
|
||||
client.destroy();
|
||||
}
|
||||
|
||||
if (this.pubSubNode) {
|
||||
this._execOnNodeClient(this.pubSubNode.client, client => client.destroy());
|
||||
this.pubSubNode.client.destroy();
|
||||
this.pubSubNode = undefined;
|
||||
}
|
||||
|
||||
@@ -412,21 +391,19 @@ export default class RedisClusterSlots<
|
||||
}
|
||||
|
||||
private *_clients() {
|
||||
for (const { master, replicas } of this.shards) {
|
||||
for (const master of this.masters) {
|
||||
if (master.client) {
|
||||
yield master.client;
|
||||
}
|
||||
|
||||
if (master.pubSubClient) {
|
||||
yield master.pubSubClient;
|
||||
if (master.pubSub) {
|
||||
yield master.pubSub.client;
|
||||
}
|
||||
}
|
||||
|
||||
if (replicas) {
|
||||
for (const { client } of replicas) {
|
||||
if (client) {
|
||||
yield client;
|
||||
}
|
||||
}
|
||||
for (const replica of this.replicas) {
|
||||
if (replica.client) {
|
||||
yield replica.client;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -436,11 +413,11 @@ export default class RedisClusterSlots<
|
||||
|
||||
const promises = [];
|
||||
for (const client of this._clients()) {
|
||||
promises.push(this._execOnNodeClient(client, fn));
|
||||
promises.push(fn(client));
|
||||
}
|
||||
|
||||
if (this.pubSubNode) {
|
||||
promises.push(this._execOnNodeClient(this.pubSubNode.client, fn));
|
||||
promises.push(fn(this.pubSubNode.client));
|
||||
this.pubSubNode = undefined;
|
||||
}
|
||||
|
||||
@@ -450,19 +427,10 @@ export default class RedisClusterSlots<
|
||||
await Promise.allSettled(promises);
|
||||
}
|
||||
|
||||
private _execOnNodeClient<T>(
|
||||
client: ClientOrPromise<M, F, S, RESP>,
|
||||
fn: (client: RedisClientType<M, F, S, RESP>) => T
|
||||
): T | Promise<T> {
|
||||
return types.isPromise(client) ?
|
||||
client.then(fn) :
|
||||
fn(client);
|
||||
}
|
||||
|
||||
getClient(
|
||||
firstKey: RedisArgument | undefined,
|
||||
isReadonly: boolean | undefined
|
||||
): ClientOrPromise<M, F, S, RESP> {
|
||||
) {
|
||||
if (!firstKey) {
|
||||
return this.nodeClient(this.getRandomNode());
|
||||
}
|
||||
@@ -503,14 +471,14 @@ export default class RedisClusterSlots<
|
||||
}
|
||||
}
|
||||
|
||||
_randomNodeIterator?: IterableIterator<ShardNode<M, F, S, RESP>>;
|
||||
_randomNodeIterator?: IterableIterator<ShardNode<M, F, S, RESP, TYPE_MAPPING>>;
|
||||
|
||||
getRandomNode() {
|
||||
this._randomNodeIterator ??= this._iterateAllNodes();
|
||||
return this._randomNodeIterator.next().value as ShardNode<M, F, S, RESP>;
|
||||
return this._randomNodeIterator.next().value as ShardNode<M, F, S, RESP, TYPE_MAPPING>;
|
||||
}
|
||||
|
||||
private *_slotNodesIterator(slot: ShardWithReplicas<M, F, S, RESP>) {
|
||||
private *_slotNodesIterator(slot: ShardWithReplicas<M, F, S, RESP, TYPE_MAPPING>) {
|
||||
let i = Math.floor(Math.random() * (1 + slot.replicas.length));
|
||||
if (i < slot.replicas.length) {
|
||||
do {
|
||||
@@ -533,8 +501,8 @@ export default class RedisClusterSlots<
|
||||
return slot.master;
|
||||
}
|
||||
|
||||
slot.nodesIterator ??= this._slotNodesIterator(slot as ShardWithReplicas<M, F, S, RESP>);
|
||||
return slot.nodesIterator.next().value as ShardNode<M, F, S, RESP>;
|
||||
slot.nodesIterator ??= this._slotNodesIterator(slot as ShardWithReplicas<M, F, S, RESP, TYPE_MAPPING>);
|
||||
return slot.nodesIterator.next().value as ShardNode<M, F, S, RESP, TYPE_MAPPING>;
|
||||
}
|
||||
|
||||
getMasterByAddress(address: string) {
|
||||
@@ -545,20 +513,22 @@ export default class RedisClusterSlots<
|
||||
}
|
||||
|
||||
getPubSubClient() {
|
||||
return this.pubSubNode ?
|
||||
this.pubSubNode.client :
|
||||
this._initiatePubSubClient();
|
||||
if (!this.pubSubNode) return this._initiatePubSubClient();
|
||||
|
||||
return this.pubSubNode.connectPromise ?? this.pubSubNode.client;
|
||||
}
|
||||
|
||||
private async _initiatePubSubClient(toResubscribe?: PubSubToResubscribe) {
|
||||
const index = Math.floor(Math.random() * (this.masters.length + this.replicas.length)),
|
||||
node = index < this.masters.length ?
|
||||
this.masters[index] :
|
||||
this.replicas[index - this.masters.length];
|
||||
this.replicas[index - this.masters.length],
|
||||
client = this._createClient(node, true);
|
||||
|
||||
this.pubSubNode = {
|
||||
address: node.address,
|
||||
client: this._createClient(node, true)
|
||||
client,
|
||||
connectPromise: client.connect()
|
||||
.then(async client => {
|
||||
if (toResubscribe) {
|
||||
await Promise.all([
|
||||
@@ -567,7 +537,7 @@ export default class RedisClusterSlots<
|
||||
]);
|
||||
}
|
||||
|
||||
this.pubSubNode!.client = client;
|
||||
this.pubSubNode!.connectPromise = undefined;
|
||||
return client;
|
||||
})
|
||||
.catch(err => {
|
||||
@@ -576,7 +546,7 @@ export default class RedisClusterSlots<
|
||||
})
|
||||
};
|
||||
|
||||
return this.pubSubNode.client as Promise<RedisClientType<M, F, S, RESP>>;
|
||||
return this.pubSubNode.connectPromise!;
|
||||
}
|
||||
|
||||
async executeUnsubscribeCommand(
|
||||
@@ -593,17 +563,17 @@ export default class RedisClusterSlots<
|
||||
|
||||
getShardedPubSubClient(channel: string) {
|
||||
const { master } = this.slots[calculateSlot(channel)];
|
||||
return master.pubSubClient ?? this.#initiateShardedPubSubClient(master);
|
||||
if (!master.pubSub) return this._initiateShardedPubSubClient(master);
|
||||
return master.pubSub.connectPromise ?? master.pubSub.client;
|
||||
}
|
||||
|
||||
#initiateShardedPubSubClient(master: MasterNode<M, F, S, RESP>) {
|
||||
const promise = this._createClient(master, true)
|
||||
.then(client => {
|
||||
client.on('server-sunsubscribe', async (channel, listeners) => {
|
||||
private async _initiateShardedPubSubClient(master: MasterNode<M, F, S, RESP, TYPE_MAPPING>) {
|
||||
const client = this._createClient(master, true)
|
||||
.on('server-sunsubscribe', async (channel, listeners) => {
|
||||
try {
|
||||
await this.rediscover(client);
|
||||
const redirectTo = await this.getShardedPubSubClient(channel);
|
||||
redirectTo.extendPubSubChannelListeners(
|
||||
await redirectTo.extendPubSubChannelListeners(
|
||||
PubSubType.SHARDED,
|
||||
channel,
|
||||
listeners
|
||||
@@ -613,32 +583,38 @@ export default class RedisClusterSlots<
|
||||
}
|
||||
});
|
||||
|
||||
master.pubSubClient = client;
|
||||
master.pubSub = {
|
||||
client,
|
||||
connectPromise: client.connect()
|
||||
.then(client => {
|
||||
master.pubSub!.connectPromise = undefined;
|
||||
return client;
|
||||
})
|
||||
.catch(err => {
|
||||
master.pubSubClient = undefined;
|
||||
master.pubSub = undefined;
|
||||
throw err;
|
||||
});
|
||||
})
|
||||
};
|
||||
|
||||
master.pubSubClient = promise;
|
||||
|
||||
return promise;
|
||||
return master.pubSub.connectPromise!;
|
||||
}
|
||||
|
||||
async executeShardedUnsubscribeCommand(
|
||||
channel: string,
|
||||
unsubscribe: (client: RedisClientType<M, F, S, RESP>) => Promise<void>
|
||||
): Promise<void> {
|
||||
unsubscribe: (client: RedisClientType<M, F, S, RESP, TYPE_MAPPING>) => Promise<void>
|
||||
) {
|
||||
const { master } = this.slots[calculateSlot(channel)];
|
||||
if (!master.pubSubClient) return Promise.resolve();
|
||||
if (!master.pubSub) return;
|
||||
|
||||
const client = master.pubSub.connectPromise ?
|
||||
await master.pubSub.connectPromise :
|
||||
master.pubSub.client;
|
||||
|
||||
const client = await master.pubSubClient;
|
||||
await unsubscribe(client);
|
||||
|
||||
if (!client.isPubSubActive) {
|
||||
await client.disconnect();
|
||||
master.pubSubClient = undefined;
|
||||
client.destroy();
|
||||
master.pubSub = undefined;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,11 +1,9 @@
|
||||
import { strict as assert } from 'node:assert';
|
||||
import testUtils, { GLOBAL, waitTillBeenCalled } from '../test-utils';
|
||||
import RedisCluster from '.';
|
||||
// import { ClusterSlotStates } from '../commands/CLUSTER_SETSLOT';
|
||||
import { SQUARE_SCRIPT } from '../client/index.spec';
|
||||
import { RootNodesUnavailableError } from '../errors';
|
||||
import { spy } from 'sinon';
|
||||
// import { setTimeout } from 'node:timers/promises';
|
||||
import RedisClient from '../client';
|
||||
|
||||
describe('Cluster', () => {
|
||||
@@ -69,58 +67,58 @@ describe('Cluster', () => {
|
||||
}
|
||||
});
|
||||
|
||||
// testUtils.testWithCluster('should handle live resharding', async cluster => {
|
||||
// const slot = 12539,
|
||||
// key = 'key',
|
||||
// value = 'value';
|
||||
// await cluster.set(key, value);
|
||||
testUtils.testWithCluster('should handle live resharding', async cluster => {
|
||||
const slot = 12539,
|
||||
key = 'key',
|
||||
value = 'value';
|
||||
await cluster.set(key, value);
|
||||
|
||||
// const importing = cluster.slots[0].master,
|
||||
// migrating = cluster.slots[slot].master,
|
||||
// [importingClient, migratingClient] = await Promise.all([
|
||||
// cluster.nodeClient(importing),
|
||||
// cluster.nodeClient(migrating)
|
||||
// ]);
|
||||
const importing = cluster.slots[0].master,
|
||||
migrating = cluster.slots[slot].master,
|
||||
[importingClient, migratingClient] = await Promise.all([
|
||||
cluster.nodeClient(importing),
|
||||
cluster.nodeClient(migrating)
|
||||
]);
|
||||
|
||||
// await Promise.all([
|
||||
// importingClient.clusterSetSlot(slot, ClusterSlotStates.IMPORTING, migrating.id),
|
||||
// migratingClient.clusterSetSlot(slot, ClusterSlotStates.MIGRATING, importing.id)
|
||||
// ]);
|
||||
await Promise.all([
|
||||
importingClient.clusterSetSlot(slot, 'IMPORTING', migrating.id),
|
||||
migratingClient.clusterSetSlot(slot, 'MIGRATING', importing.id)
|
||||
]);
|
||||
|
||||
// // should be able to get the key from the migrating node
|
||||
// assert.equal(
|
||||
// await cluster.get(key),
|
||||
// value
|
||||
// );
|
||||
// should be able to get the key from the migrating node
|
||||
assert.equal(
|
||||
await cluster.get(key),
|
||||
value
|
||||
);
|
||||
|
||||
// await migratingClient.migrate(
|
||||
// importing.host,
|
||||
// importing.port,
|
||||
// key,
|
||||
// 0,
|
||||
// 10
|
||||
// );
|
||||
await migratingClient.migrate(
|
||||
importing.host,
|
||||
importing.port,
|
||||
key,
|
||||
0,
|
||||
10
|
||||
);
|
||||
|
||||
// // should be able to get the key from the importing node using `ASKING`
|
||||
// assert.equal(
|
||||
// await cluster.get(key),
|
||||
// value
|
||||
// );
|
||||
// should be able to get the key from the importing node using `ASKING`
|
||||
assert.equal(
|
||||
await cluster.get(key),
|
||||
value
|
||||
);
|
||||
|
||||
// await Promise.all([
|
||||
// importingClient.clusterSetSlot(slot, ClusterSlotStates.NODE, importing.id),
|
||||
// migratingClient.clusterSetSlot(slot, ClusterSlotStates.NODE, importing.id),
|
||||
// ]);
|
||||
await Promise.all([
|
||||
importingClient.clusterSetSlot(slot, 'NODE', importing.id),
|
||||
migratingClient.clusterSetSlot(slot, 'NODE', importing.id),
|
||||
]);
|
||||
|
||||
// // should handle `MOVED` errors
|
||||
// assert.equal(
|
||||
// await cluster.get(key),
|
||||
// value
|
||||
// );
|
||||
// }, {
|
||||
// serverArguments: [],
|
||||
// numberOfMasters: 2
|
||||
// });
|
||||
// should handle `MOVED` errors
|
||||
assert.equal(
|
||||
await cluster.get(key),
|
||||
value
|
||||
);
|
||||
}, {
|
||||
serverArguments: [],
|
||||
numberOfMasters: 2
|
||||
});
|
||||
|
||||
testUtils.testWithCluster('getRandomNode should spread the the load evenly', async cluster => {
|
||||
const totalNodes = cluster.masters.length + cluster.replicas.length,
|
||||
@@ -145,7 +143,6 @@ describe('Cluster', () => {
|
||||
testUtils.testWithCluster('cluster topology', async cluster => {
|
||||
assert.equal(cluster.slots.length, 16384);
|
||||
const { numberOfMasters, numberOfReplicas } = GLOBAL.CLUSTERS.WITH_REPLICAS;
|
||||
assert.equal(cluster.shards.length, numberOfMasters);
|
||||
assert.equal(cluster.masters.length, numberOfMasters);
|
||||
assert.equal(cluster.replicas.length, numberOfReplicas * numberOfMasters);
|
||||
assert.equal(cluster.nodeByAddress.size, numberOfMasters + numberOfMasters * numberOfReplicas);
|
||||
@@ -239,54 +236,53 @@ describe('Cluster', () => {
|
||||
assert.equal(cluster.pubSubNode, undefined);
|
||||
}, GLOBAL.CLUSTERS.OPEN);
|
||||
|
||||
// testUtils.testWithCluster('should move listeners when PubSub node disconnects from the cluster', async cluster => {
|
||||
// const listener = spy();
|
||||
// await cluster.subscribe('channel', listener);
|
||||
testUtils.testWithCluster('should move listeners when PubSub node disconnects from the cluster', async cluster => {
|
||||
const listener = spy();
|
||||
await cluster.subscribe('channel', listener);
|
||||
|
||||
// assert.ok(cluster.pubSubNode);
|
||||
// const [migrating, importing] = cluster.masters[0].address === cluster.pubSubNode.address ?
|
||||
// cluster.masters :
|
||||
// [cluster.masters[1], cluster.masters[0]],
|
||||
// [migratingClient, importingClient] = await Promise.all([
|
||||
// cluster.nodeClient(migrating),
|
||||
// cluster.nodeClient(importing)
|
||||
// ]);
|
||||
assert.ok(cluster.pubSubNode);
|
||||
const [migrating, importing] = cluster.masters[0].address === cluster.pubSubNode.address ?
|
||||
cluster.masters :
|
||||
[cluster.masters[1], cluster.masters[0]],
|
||||
[migratingClient, importingClient] = await Promise.all([
|
||||
cluster.nodeClient(migrating),
|
||||
cluster.nodeClient(importing)
|
||||
]);
|
||||
|
||||
// const range = cluster.slots[0].master === migrating ? {
|
||||
// key: 'bar', // 5061
|
||||
// start: 0,
|
||||
// end: 8191
|
||||
// } : {
|
||||
// key: 'foo', // 12182
|
||||
// start: 8192,
|
||||
// end: 16383
|
||||
// };
|
||||
const range = cluster.slots[0].master === migrating ? {
|
||||
key: 'bar', // 5061
|
||||
start: 0,
|
||||
end: 8191
|
||||
} : {
|
||||
key: 'foo', // 12182
|
||||
start: 8192,
|
||||
end: 16383
|
||||
};
|
||||
|
||||
// await Promise.all([
|
||||
// migratingClient.clusterDelSlotsRange(range),
|
||||
// importingClient.clusterDelSlotsRange(range),
|
||||
// importingClient.clusterAddSlotsRange(range)
|
||||
// ]);
|
||||
// TODO: is there a better way to migrate slots without causing CLUSTERDOWN?
|
||||
const promises: Array<Promise<unknown>> = [];
|
||||
for (let i = range.start; i <= range.end; i++) {
|
||||
promises.push(
|
||||
migratingClient.clusterSetSlot(i, 'NODE', importing.id),
|
||||
importingClient.clusterSetSlot(i, 'NODE', importing.id)
|
||||
);
|
||||
}
|
||||
await Promise.all(promises);
|
||||
|
||||
// // wait for migrating node to be notified about the new topology
|
||||
// while ((await migratingClient.clusterInfo()).state !== 'ok') {
|
||||
// await setTimeout(50);
|
||||
// }
|
||||
// make sure to cause `MOVED` error
|
||||
await cluster.get(range.key);
|
||||
|
||||
// // make sure to cause `MOVED` error
|
||||
// await cluster.get(range.key);
|
||||
await Promise.all([
|
||||
cluster.publish('channel', 'message'),
|
||||
waitTillBeenCalled(listener)
|
||||
]);
|
||||
|
||||
// await Promise.all([
|
||||
// cluster.publish('channel', 'message'),
|
||||
// waitTillBeenCalled(listener)
|
||||
// ]);
|
||||
|
||||
// assert.ok(listener.calledOnceWithExactly('message', 'channel'));
|
||||
// }, {
|
||||
// serverArguments: [],
|
||||
// numberOfMasters: 2,
|
||||
// minimumDockerVersion: [7]
|
||||
// });
|
||||
assert.ok(listener.calledOnceWithExactly('message', 'channel'));
|
||||
}, {
|
||||
serverArguments: [],
|
||||
numberOfMasters: 2,
|
||||
minimumDockerVersion: [7]
|
||||
});
|
||||
|
||||
testUtils.testWithCluster('ssubscribe & sunsubscribe', async cluster => {
|
||||
const listener = spy();
|
||||
@@ -303,46 +299,44 @@ describe('Cluster', () => {
|
||||
await cluster.sUnsubscribe('channel', listener);
|
||||
|
||||
// 10328 is the slot of `channel`
|
||||
assert.equal(cluster.slots[10328].master.pubSubClient, undefined);
|
||||
assert.equal(cluster.slots[10328].master.pubSub, undefined);
|
||||
}, {
|
||||
...GLOBAL.CLUSTERS.OPEN,
|
||||
minimumDockerVersion: [7]
|
||||
});
|
||||
|
||||
// testUtils.testWithCluster('should handle sharded-channel-moved events', async cluster => {
|
||||
// const SLOT = 10328,
|
||||
// migrating = cluster.slots[SLOT].master,
|
||||
// importing = cluster.masters.find(master => master !== migrating)!,
|
||||
// [migratingClient, importingClient] = await Promise.all([
|
||||
// cluster.nodeClient(migrating),
|
||||
// cluster.nodeClient(importing)
|
||||
// ]);
|
||||
testUtils.testWithCluster('should handle sharded-channel-moved events', async cluster => {
|
||||
const SLOT = 10328,
|
||||
migrating = cluster.slots[SLOT].master,
|
||||
importing = cluster.masters.find(master => master !== migrating)!,
|
||||
[migratingClient, importingClient] = await Promise.all([
|
||||
cluster.nodeClient(migrating),
|
||||
cluster.nodeClient(importing)
|
||||
]);
|
||||
|
||||
// await Promise.all([
|
||||
// migratingClient.clusterDelSlots(SLOT),
|
||||
// importingClient.clusterDelSlots(SLOT),
|
||||
// importingClient.clusterAddSlots(SLOT)
|
||||
// ]);
|
||||
await Promise.all([
|
||||
migratingClient.clusterDelSlots(SLOT),
|
||||
importingClient.clusterDelSlots(SLOT),
|
||||
importingClient.clusterAddSlots(SLOT),
|
||||
// cause "topology refresh" on both nodes
|
||||
migratingClient.clusterSetSlot(SLOT, 'NODE', importing.id),
|
||||
importingClient.clusterSetSlot(SLOT, 'NODE', importing.id)
|
||||
]);
|
||||
|
||||
// // wait for migrating node to be notified about the new topology
|
||||
// while ((await migratingClient.clusterInfo()).state !== 'ok') {
|
||||
// await setTimeout(50);
|
||||
// }
|
||||
const listener = spy();
|
||||
|
||||
// const listener = spy();
|
||||
// will trigger `MOVED` error
|
||||
await cluster.sSubscribe('channel', listener);
|
||||
|
||||
// // will trigger `MOVED` error
|
||||
// await cluster.sSubscribe('channel', listener);
|
||||
await Promise.all([
|
||||
waitTillBeenCalled(listener),
|
||||
cluster.sPublish('channel', 'message')
|
||||
]);
|
||||
|
||||
// await Promise.all([
|
||||
// waitTillBeenCalled(listener),
|
||||
// cluster.sPublish('channel', 'message')
|
||||
// ]);
|
||||
|
||||
// assert.ok(listener.calledOnceWithExactly('message', 'channel'));
|
||||
// }, {
|
||||
// serverArguments: [],
|
||||
// minimumDockerVersion: [7]
|
||||
// });
|
||||
assert.ok(listener.calledOnceWithExactly('message', 'channel'));
|
||||
}, {
|
||||
serverArguments: [],
|
||||
minimumDockerVersion: [7]
|
||||
});
|
||||
});
|
||||
});
|
||||
|
@@ -303,7 +303,7 @@ export default class RedisCluster<
|
||||
|
||||
private readonly _options: RedisClusterOptions<M, F, S, RESP, TYPE_MAPPING/*, POLICIES*/>;
|
||||
|
||||
private readonly _slots: RedisClusterSlots<M, F, S, RESP>;
|
||||
private readonly _slots: RedisClusterSlots<M, F, S, RESP, TYPE_MAPPING>;
|
||||
|
||||
private _commandOptions?: ClusterCommandOptions<TYPE_MAPPING/*, POLICIES*/>;
|
||||
|
||||
@@ -315,14 +315,6 @@ export default class RedisCluster<
|
||||
return this._slots.slots;
|
||||
}
|
||||
|
||||
/**
|
||||
* An array of cluster shards, each shard contain its `master` and `replicas`.
|
||||
* Use with {@link RedisCluster.prototype.nodeClient} to get the client for a specific node (master or replica).
|
||||
*/
|
||||
get shards() {
|
||||
return this._slots.shards;
|
||||
}
|
||||
|
||||
/**
|
||||
* An array of the cluster masters.
|
||||
* Use with {@link RedisCluster.prototype.nodeClient} to get the client for a specific master node.
|
||||
@@ -442,7 +434,7 @@ export default class RedisCluster<
|
||||
private async _execute<T>(
|
||||
firstKey: RedisArgument | undefined,
|
||||
isReadonly: boolean | undefined,
|
||||
fn: (client: RedisClientType<M, F, S, RESP>) => Promise<T>
|
||||
fn: (client: RedisClientType<M, F, S, RESP, TYPE_MAPPING>) => Promise<T>
|
||||
): Promise<T> {
|
||||
const maxCommandRedirections = this._options.maxCommandRedirections ?? 16;
|
||||
let client = await this._slots.getClient(firstKey, isReadonly),
|
||||
@@ -655,7 +647,7 @@ export default class RedisCluster<
|
||||
return this._slots.destroy();
|
||||
}
|
||||
|
||||
nodeClient(node: ShardNode<M, F, S, RESP>) {
|
||||
nodeClient(node: ShardNode<M, F, S, RESP, TYPE_MAPPING>) {
|
||||
return this._slots.nodeClient(node);
|
||||
}
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
import { SimpleStringReply, Command } from '@redis/client/dist/lib/RESP/types';
|
||||
import { SimpleStringReply, Command } from '../RESP/types';
|
||||
|
||||
export default {
|
||||
FIRST_KEY_INDEX: undefined,
|
||||
|
@@ -1,4 +1,4 @@
|
||||
import { BlobStringReply, Command } from '@redis/client/dist/lib/RESP/types';
|
||||
import { BlobStringReply, Command } from '../RESP/types';
|
||||
|
||||
export default {
|
||||
FIRST_KEY_INDEX: undefined,
|
||||
|
@@ -11,10 +11,11 @@ describe('CLUSTER REPLICAS', () => {
|
||||
});
|
||||
|
||||
testUtils.testWithCluster('clusterNode.clusterReplicas', async cluster => {
|
||||
const client = await cluster.nodeClient(cluster.masters[0]);
|
||||
assert.equal(
|
||||
typeof await client.clusterReplicas(cluster.masters[0].id),
|
||||
'string'
|
||||
);
|
||||
const client = await cluster.nodeClient(cluster.masters[0]),
|
||||
reply = await client.clusterReplicas(cluster.masters[0].id);
|
||||
assert.ok(Array.isArray(reply));
|
||||
for (const replica of reply) {
|
||||
assert.equal(typeof replica, 'string');
|
||||
}
|
||||
}, GLOBAL.CLUSTERS.OPEN);
|
||||
});
|
||||
|
@@ -1,4 +1,4 @@
|
||||
import { RedisArgument, VerbatimStringReply, Command } from '../RESP/types';
|
||||
import { RedisArgument, ArrayReply, BlobStringReply, Command } from '../RESP/types';
|
||||
|
||||
export default {
|
||||
FIRST_KEY_INDEX: undefined,
|
||||
@@ -6,5 +6,5 @@ export default {
|
||||
transformArguments(nodeId: RedisArgument) {
|
||||
return ['CLUSTER', 'REPLICAS', nodeId];
|
||||
},
|
||||
transformReply: undefined as unknown as () => VerbatimStringReply
|
||||
transformReply: undefined as unknown as () => ArrayReply<BlobStringReply>
|
||||
} as const satisfies Command;
|
||||
|
@@ -21,7 +21,7 @@ describe('FCALL', () => {
|
||||
loadMathFunction(client),
|
||||
client.set('key', '2'),
|
||||
client.fCall(MATH_FUNCTION.library.square.NAME, {
|
||||
arguments: ['key']
|
||||
keys: ['key']
|
||||
})
|
||||
]);
|
||||
|
||||
|
@@ -21,7 +21,7 @@ describe('FCALL_RO', () => {
|
||||
loadMathFunction(client),
|
||||
client.set('key', '2'),
|
||||
client.fCallRo(MATH_FUNCTION.library.square.NAME, {
|
||||
arguments: ['key']
|
||||
keys: ['key']
|
||||
})
|
||||
]);
|
||||
|
||||
|
@@ -4,6 +4,8 @@ import FUNCTION_LOAD from './FUNCTION_LOAD';
|
||||
import { RedisClientType } from '../client';
|
||||
import { NumberReply, RedisFunctions, RedisModules, RedisScripts, RespVersions } from '../RESP/types';
|
||||
|
||||
|
||||
|
||||
export const MATH_FUNCTION = {
|
||||
name: 'math',
|
||||
engine: 'LUA',
|
||||
@@ -11,10 +13,10 @@ export const MATH_FUNCTION = {
|
||||
`#!LUA name=math
|
||||
redis.register_function {
|
||||
function_name = "square",
|
||||
callback = function(keys, args) {
|
||||
callback = function(keys, args)
|
||||
local number = redis.call('GET', keys[1])
|
||||
return number * number
|
||||
},
|
||||
end,
|
||||
flags = { "no-writes" }
|
||||
}`,
|
||||
library: {
|
||||
|
@@ -1,25 +1,25 @@
|
||||
import { strict as assert } from 'node:assert';
|
||||
import { transformArguments } from './MIGRATE';
|
||||
import MIGRATE from './MIGRATE';
|
||||
|
||||
describe('MIGRATE', () => {
|
||||
describe('transformArguments', () => {
|
||||
it('single key', () => {
|
||||
assert.deepEqual(
|
||||
transformArguments('127.0.0.1', 6379, 'key', 0, 10),
|
||||
MIGRATE.transformArguments('127.0.0.1', 6379, 'key', 0, 10),
|
||||
['MIGRATE', '127.0.0.1', '6379', 'key', '0', '10']
|
||||
);
|
||||
});
|
||||
|
||||
it('multiple keys', () => {
|
||||
assert.deepEqual(
|
||||
transformArguments('127.0.0.1', 6379, ['1', '2'], 0, 10),
|
||||
MIGRATE.transformArguments('127.0.0.1', 6379, ['1', '2'], 0, 10),
|
||||
['MIGRATE', '127.0.0.1', '6379', '', '0', '10', 'KEYS', '1', '2']
|
||||
);
|
||||
});
|
||||
|
||||
it('with COPY', () => {
|
||||
assert.deepEqual(
|
||||
transformArguments('127.0.0.1', 6379, 'key', 0, 10, {
|
||||
MIGRATE.transformArguments('127.0.0.1', 6379, 'key', 0, 10, {
|
||||
COPY: true
|
||||
}),
|
||||
['MIGRATE', '127.0.0.1', '6379', 'key', '0', '10', 'COPY']
|
||||
@@ -28,7 +28,7 @@ describe('MIGRATE', () => {
|
||||
|
||||
it('with REPLACE', () => {
|
||||
assert.deepEqual(
|
||||
transformArguments('127.0.0.1', 6379, 'key', 0, 10, {
|
||||
MIGRATE.transformArguments('127.0.0.1', 6379, 'key', 0, 10, {
|
||||
REPLACE: true
|
||||
}),
|
||||
['MIGRATE', '127.0.0.1', '6379', 'key', '0', '10', 'REPLACE']
|
||||
@@ -38,7 +38,7 @@ describe('MIGRATE', () => {
|
||||
describe('with AUTH', () => {
|
||||
it('password only', () => {
|
||||
assert.deepEqual(
|
||||
transformArguments('127.0.0.1', 6379, 'key', 0, 10, {
|
||||
MIGRATE.transformArguments('127.0.0.1', 6379, 'key', 0, 10, {
|
||||
AUTH: {
|
||||
password: 'password'
|
||||
}
|
||||
@@ -49,7 +49,7 @@ describe('MIGRATE', () => {
|
||||
|
||||
it('username & password', () => {
|
||||
assert.deepEqual(
|
||||
transformArguments('127.0.0.1', 6379, 'key', 0, 10, {
|
||||
MIGRATE.transformArguments('127.0.0.1', 6379, 'key', 0, 10, {
|
||||
AUTH: {
|
||||
username: 'username',
|
||||
password: 'password'
|
||||
@@ -62,7 +62,7 @@ describe('MIGRATE', () => {
|
||||
|
||||
it('with COPY, REPLACE, AUTH', () => {
|
||||
assert.deepEqual(
|
||||
transformArguments('127.0.0.1', 6379, 'key', 0, 10, {
|
||||
MIGRATE.transformArguments('127.0.0.1', 6379, 'key', 0, 10, {
|
||||
COPY: true,
|
||||
REPLACE: true,
|
||||
AUTH: {
|
||||
|
@@ -1,13 +1,14 @@
|
||||
import { RedisArgument, SimpleStringReply, Command } from '../RESP/types';
|
||||
import { AuthOptions } from './AUTH';
|
||||
|
||||
interface MigrateOptions {
|
||||
export interface MigrateOptions {
|
||||
COPY?: true;
|
||||
REPLACE?: true;
|
||||
AUTH?: AuthOptions;
|
||||
}
|
||||
|
||||
export default {
|
||||
IS_READ_ONLY: false,
|
||||
transformArguments(
|
||||
host: RedisArgument,
|
||||
port: number,
|
||||
@@ -62,5 +63,5 @@ export default {
|
||||
|
||||
return args;
|
||||
},
|
||||
transformReply: undefined as unknown as () => SimpleStringReply
|
||||
transformReply: undefined as unknown as () => SimpleStringReply<'OK'>
|
||||
} as const satisfies Command;
|
||||
|
@@ -1,7 +1,7 @@
|
||||
import { RedisArgument, SimpleStringReply, Command } from '../RESP/types';
|
||||
|
||||
export default {
|
||||
FIRST_KEY_INDEX: undefined,
|
||||
FIRST_KEY_INDEX: 1,
|
||||
IS_READ_ONLY: true,
|
||||
transformArguments(key: RedisArgument, newKey: RedisArgument) {
|
||||
return ['RENAME', key, newKey];
|
||||
|
@@ -1,7 +1,7 @@
|
||||
import { RedisArgument, NumberReply, Command } from '../RESP/types';
|
||||
|
||||
export default {
|
||||
FIRST_KEY_INDEX: undefined,
|
||||
FIRST_KEY_INDEX: 1,
|
||||
IS_READ_ONLY: true,
|
||||
transformArguments(key: RedisArgument, newKey: RedisArgument) {
|
||||
return ['RENAMENX', key, newKey];
|
||||
|
@@ -2,7 +2,7 @@ import { RedisArgument, Command } from '../RESP/types';
|
||||
import { transformSortedSetReply } from './generic-transformers';
|
||||
|
||||
export default {
|
||||
FIRST_KEY_INDEX: undefined,
|
||||
FIRST_KEY_INDEX: 1,
|
||||
IS_READ_ONLY: false,
|
||||
transformArguments(key: RedisArgument, count: number) {
|
||||
return ['ZPOPMAX', key, count.toString()];
|
||||
|
@@ -184,6 +184,7 @@ import MEMORY_PURGE from './MEMORY_PURGE';
|
||||
import MEMORY_STATS from './MEMORY_STATS';
|
||||
import MEMORY_USAGE from './MEMORY_USAGE';
|
||||
import MGET from './MGET';
|
||||
import MIGRATE from './MIGRATE';
|
||||
import MODULE_LIST from './MODULE_LIST';
|
||||
import MODULE_LOAD from './MODULE_LOAD';
|
||||
import MODULE_UNLOAD from './MODULE_UNLOAD';
|
||||
@@ -703,6 +704,8 @@ export default {
|
||||
memoryUsage: MEMORY_USAGE,
|
||||
MGET,
|
||||
mGet: MGET,
|
||||
MIGRATE,
|
||||
migrate: MIGRATE,
|
||||
MODULE_LIST,
|
||||
moduleList: MODULE_LIST,
|
||||
MODULE_LOAD,
|
||||
|
@@ -20,7 +20,7 @@ describe('Multi Command', () => {
|
||||
multi.addScript(SQUARE_SCRIPT, ['1']);
|
||||
assert.deepEqual(
|
||||
Array.from(multi.queue.at(-1).args),
|
||||
['EVAL', SQUARE_SCRIPT.SCRIPT, '0', '1']
|
||||
['EVAL', SQUARE_SCRIPT.SCRIPT, '1', '1']
|
||||
);
|
||||
});
|
||||
|
||||
@@ -28,7 +28,7 @@ describe('Multi Command', () => {
|
||||
multi.addScript(SQUARE_SCRIPT, ['2']);
|
||||
assert.deepEqual(
|
||||
Array.from(multi.queue.at(-1).args),
|
||||
['EVALSHA', SQUARE_SCRIPT.SHA1, '0', '2']
|
||||
['EVALSHA', SQUARE_SCRIPT.SHA1, '1', '2']
|
||||
);
|
||||
});
|
||||
|
||||
|
@@ -200,9 +200,9 @@ export default class TestUtils {
|
||||
// POLICIES extends CommandPolicies
|
||||
>(cluster: RedisClusterType<M, F, S, RESP, TYPE_MAPPING/*, POLICIES*/>): Promise<unknown> {
|
||||
return Promise.all(
|
||||
cluster.masters.map(async ({ client }) => {
|
||||
if (client) {
|
||||
await (await client).flushAll();
|
||||
cluster.masters.map(async master => {
|
||||
if (master.client) {
|
||||
(await cluster.nodeClient(master)).flushAll();
|
||||
}
|
||||
})
|
||||
);
|
||||
@@ -256,7 +256,7 @@ export default class TestUtils {
|
||||
await fn(cluster);
|
||||
} finally {
|
||||
await TestUtils.#clusterFlushAll(cluster);
|
||||
await cluster.disconnect();
|
||||
cluster.destroy();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
Reference in New Issue
Block a user