1
0
mirror of https://github.com/redis/node-redis.git synced 2025-08-10 11:43:01 +03:00
Files
node-redis/lib/commands-queue.ts
Leibale Eidelman 4e6d018d77 V4 (#1624)
* init v4

* add .gitignore to benchmark

* spawn redis-servers for tests,
add some tests,
fix client auth on connect

* add tests coverage report

* add tests workflow, replace nyc text reporter with text-summary

* run tests with node 16.x & redis 6.x only (for now)

* add socket events on client,
stop reconnectiong when manually calling disconnect,
remove abort signal listener when a command is written on the socket

* add isOpen boolean getter on client, add maxLength option to command queue, add test for client.multi

* move to use CommonJS

* add MULTI and EXEC commands to when executing multi command, make client.multi return type innerit the module commands, clean some tests, exclute spec files from coverage report

* missing file from commit 61edd4f1b5

* exclude spec files from coverage report

* add support for options in a command function (.get, .set, ...), add support for the SELECT command, implement a couple of commands, fix client socket reconnection strategy, add support for using replicas (RO) in cluster, and more..

* fix client.blPop test

* use which to find redis-server path

* change command options to work with Symbol rather then WeakSet

* implement more commands

* Add support for lua scripts in client & muilti, fix client socket initiator, implement simple cluster nodes discovery strategy

* replace `callbackify` with `legacyMode`

* add the SCAN command and client.scanIterator

* rename scanIterator

* init benchmark workflow

* fix benchmark workflow

* fix benchmark workflow

* fix benchmark workflow

* push coverage report to Coveralls

* fix Coveralls

* generator lcov (for Coveralls)

* fix .nycrc.json

* PubSub

* add support for all set commands (including sScanIterator)

* support pipeline

* fix KEEPTTL in SET

* remove console.log

* add HyperLogLog commands

* update README.md (thanks to @guyroyse)

* add support for most of the "keys commands"

* fix EXPIREAT.spec.ts

* add support for date in both EXPIREAT & EXPIRE

* add tests

* better cluster nodes discorvery strategy after MOVED error, add PubSub test

* fix PubSub UNSUBSCRIBE/PUNSUBSCRIBE without channel and/or listener

* fix PubSub

* add release-it to dev dependencies

* Release 4.0.0-next.0

* fix .npmignore

* Release 4.0.0-next.1

* fix links in README.md

* fix .npmignore

* Release 4.0.0-next.2

* add support for all sorted set commands

* add support for most stream commands

* add missing file from commit 53de279afe

* lots of todo commends

* make PubSub test more stable

* clean ZPOPMAX

* add support for lua scripts and modules in cluster, spawn cluster for tests, add some cluster tests, fix pubsub listener arguments

* GET.spec.ts

* add support for List commands, fix some Sorted Set commands, add some cluster commands, spawn cluster for testing, add support for command options in cluster, and more

* add missing file from commit faab94fab2

* clean ZRANK and ZREVRANK

* add XREAD and XREADGROUP commands

* remove unused files

* implement a couple of more commands, make cluster random iterator be per node (instead of per slot)

* Release 4.0.0-next.3

* app spec files to npmignore

* fix some code analyzers (LGTM, deepsource, codeclimate) issues

* fix CLUSTER_NODES, add some tests

* add HSCAN, clean some commands, add tests for generic transformers

* add missing files from 0feb35a1fb

* update README.md (thanks to @guyroyse)

* handle ASK errors, add some commands and tests

* Release 4.0.0-next.4

* replace "modern" with "v4"

* remove unused imports

* add all ACL subcommands, all MODULE subcommands, and some other commands

* remove 2 unused imports

* fix BITFIELD command

* fix XTRIM spec file

* clean code

* fix package.json types field

* better modules support, fix some bugs in legacy mode, add some tests

* remove unused function

* add test for hScanIterator

* change node mimimum version to 12 (latest LTS)

* update tsconfig.json to support node 12, run tests on Redis 5 & 6 and on all node live versions

* remove future node releases :P

* remove "lib" from ts compiler options

* Update tsconfig.json

* fix build

* run some tests only on supported redis versions, use coveralls parallel mode

* fix tests

* Do not use "timers/promises", fix "isRedisVersionGreaterThan"

* skip AbortController tests when not available

* use 'fs'.promises instead of 'fs/promises'

* add some missing commands

* run GETDEL tests only if the redis version is greater than 6.2

* implement some GEO commands, improve scan generic transformer, expose RPUSHX

* fix GEOSEARCH & GEOSEARCHSTORE

* use socket.setNoDelay and queueMicrotask to improve latency

* commands-queue.ts: String length / byte length counting issue (#1630)

* Update commands-queue.ts

Hopefully fixing #1628

* Reverted 2fa5ea6, and implemented test for byte length check

* Changed back to Buffer.byteLength, due to issue author input. Updated test to look for 4 bytes.

* Fixed. There were two places that length was calculated.

* Removed redundant string assignment

* add 2 bytes test as well

Co-authored-by: Leibale Eidelman <leibale1998@gmail.com>

* fix scripts in multi

* do not hide bugs in redis

* fix for e7bf09644b

* remove unused import

* implement WATCH command, fix ZRANGESTORE & GEOSEARCHSTORE tests

* update README.md

Co-authored-by: @GuyRoyse

* use typedoc to auto generate documentation

* run "npm install" before "npm run documentation"

* clean documentation workflow

* fix WATCH spec file

* increase "CLUSTER_NODE_TIMEOUT" to 5000ms to avoid "CLUSTERDOWN" errors in tests

* pull cluster state every 100 ms

* await meetPromises before pulling the cluster state

* enhance the way commanders (client/multi/cluster) get extended with modules and scripts

* add test for socket retry strategy

* implement more commands

* set GETEX minimum version to 6.2

* remove unused imports

* add support for multi in cluster

* upgrade dependencies

* Release 4.0.0-next.5

* remove unused imports

* improve benchmarking

* use the same Multi with duplicated clients

* exclude some files from the documentation, add some exports, clean code

* fix #1636 - handle null in multi.exec

* remove unused import

* add supoprt for tuples in HSET

* add FIRST_KEY_INDEX to HSET

* add a bunch of missing commands, fix MSET and HELLO, add some tests

* add FIRST_KEY_INDEX to MSET and MSETNX

* upgrade actions

* fix coverallsapp/github-action version

* Update documentation.yml

* Update documentation.yml

* clean code

* remove unused imports

* use "npm ci" instead of "npm install"

* fix `self` binding on client modules, use connection pool for `duplicateConnection`

* add client.executeIsolated, rename "duplicateConnection" to "isolated", update README.md (thanks to @GuyRoyse and @SimonPrickett)

* update README (thanks to @GuyRoyse), add some tests

* try to fix "cluster is down" errors in tests

* try to fix "cluster is down" errors in tests

* upgrade dependencies

* update package-lock

* Release 4.0.0-next.6

* fix #1636 - fix WatchError

* fix for f1bf0beebf - remove .only from multi tests

* Release 4.0.0-next.7

* update README and other markdown files

Co-authored-by: @GuyRoyse & @SimonPrickett

* Doc updates. (#1640)

* update docs, upgrade dependencies

* fix README

* Release 4.0.0-rc.0

* Update README.md

* update docs, add `connectTimeout` options, fix tls

Co-authored-by: Guy Royse <guy@guyroyse.com>

* npm update, "fix" some tests, clean code

* fix AssertionError import

* fix #1642 - fix XREAD, XREADGROUP and XTRIM

* fix #1644 - add the QUIT command

* add socket.noDelay and socket.keepAlive configurations

* Update README.md (#1645)

* Update README.md

Fixed issue with how connection string was specified.
Now you can have user@host without having to specify a password, which just makes more sense

* Update client-configuration.md as well

Co-authored-by: Leibale Eidelman <leibale1998@gmail.com>

* update socket.reconnectStrategy description

* fix borken link in v3-to-v4.md

* increase test coverage, fix bug in cluster redirection strategy, implement CLIENT_ID, remove unused EXEC command

Co-authored-by: Nova <novaw@warrenservices.co.uk>
Co-authored-by: Simon Prickett <simon@crudworks.org>
Co-authored-by: Guy Royse <guy@guyroyse.com>
2021-09-02 10:04:48 -04:00

334 lines
11 KiB
TypeScript

import LinkedList from 'yallist';
import RedisParser from 'redis-parser';
import { AbortError } from './errors';
import { RedisReply } from './commands';
import { encodeCommand } from './commander';
export interface QueueCommandOptions {
asap?: boolean;
signal?: any; // TODO: `AbortSignal` type is incorrect
chainId?: symbol;
}
interface CommandWaitingToBeSent extends CommandWaitingForReply {
encodedCommand: string;
chainId?: symbol;
abort?: {
signal: any; // TODO: `AbortSignal` type is incorrect
listener(): void;
};
}
interface CommandWaitingForReply {
resolve(reply?: any): void;
reject(err: Error): void;
channelsCounter?: number;
}
export type CommandsQueueExecutor = (encodedCommands: string) => boolean | undefined;
export enum PubSubSubscribeCommands {
SUBSCRIBE = 'SUBSCRIBE',
PSUBSCRIBE = 'PSUBSCRIBE'
}
export enum PubSubUnsubscribeCommands {
UNSUBSCRIBE = 'UNSUBSCRIBE',
PUNSUBSCRIBE = 'PUNSUBSCRIBE'
}
export type PubSubListener = (message: string, channel: string) => unknown;
export type PubSubListenersMap = Map<string, Set<PubSubListener>>;
export default class RedisCommandsQueue {
static #flushQueue<T extends CommandWaitingForReply>(queue: LinkedList<T>, err: Error): void {
while (queue.length) {
queue.shift()!.reject(err);
}
}
static #emitPubSubMessage(listeners: Set<PubSubListener>, message: string, channel: string): void {
for (const listener of listeners) {
listener(message, channel);
}
}
readonly #maxLength: number | null | undefined;
readonly #executor: CommandsQueueExecutor;
readonly #waitingToBeSent = new LinkedList<CommandWaitingToBeSent>();
#waitingToBeSentCommandsLength = 0;
get waitingToBeSentCommandsLength() {
return this.#waitingToBeSentCommandsLength;
}
readonly #waitingForReply = new LinkedList<CommandWaitingForReply>();
readonly #pubSubState = {
subscribing: 0,
subscribed: 0,
unsubscribing: 0
};
readonly #pubSubListeners = {
channels: <PubSubListenersMap>new Map(),
patterns: <PubSubListenersMap>new Map()
};
readonly #parser = new RedisParser({
returnReply: (reply: unknown) => {
if ((this.#pubSubState.subscribing || this.#pubSubState.subscribed) && Array.isArray(reply)) {
switch (reply[0]) {
case 'message':
return RedisCommandsQueue.#emitPubSubMessage(
this.#pubSubListeners.channels.get(reply[1])!,
reply[2],
reply[1]
);
case 'pmessage':
return RedisCommandsQueue.#emitPubSubMessage(
this.#pubSubListeners.patterns.get(reply[1])!,
reply[3],
reply[2]
);
case 'subscribe':
case 'psubscribe':
if (--this.#waitingForReply.head!.value.channelsCounter! === 0) {
this.#shiftWaitingForReply().resolve();
}
return;
}
}
this.#shiftWaitingForReply().resolve(reply);
},
returnError: (err: Error) => this.#shiftWaitingForReply().reject(err)
});
#chainInExecution: symbol | undefined;
constructor(maxLength: number | null | undefined, executor: CommandsQueueExecutor) {
this.#maxLength = maxLength;
this.#executor = executor;
}
addEncodedCommand<T = RedisReply>(encodedCommand: string, options?: QueueCommandOptions): Promise<T> {
if (this.#pubSubState.subscribing || this.#pubSubState.subscribed) {
return Promise.reject(new Error('Cannot send commands in PubSub mode'));
} else if (this.#maxLength && this.#waitingToBeSent.length + this.#waitingForReply.length >= this.#maxLength) {
return Promise.reject(new Error('The queue is full'));
} else if (options?.signal?.aborted) {
return Promise.reject(new AbortError());
}
return new Promise((resolve, reject) => {
const node = new LinkedList.Node<CommandWaitingToBeSent>({
encodedCommand,
chainId: options?.chainId,
resolve,
reject
});
if (options?.signal) {
const listener = () => {
this.#waitingToBeSent.removeNode(node);
node.value.reject(new AbortError());
};
node.value.abort = {
signal: options.signal,
listener
};
options.signal.addEventListener('abort', listener, {
once: true
});
}
if (options?.asap) {
this.#waitingToBeSent.unshiftNode(node);
} else {
this.#waitingToBeSent.pushNode(node);
}
this.#waitingToBeSentCommandsLength += encodedCommand.length;
});
}
subscribe(command: PubSubSubscribeCommands, channels: string | Array<string>, listener: PubSubListener): Promise<void> {
const channelsToSubscribe: Array<string> = [],
listeners = command === PubSubSubscribeCommands.SUBSCRIBE ? this.#pubSubListeners.channels : this.#pubSubListeners.patterns;
for (const channel of (Array.isArray(channels) ? channels : [channels])) {
if (listeners.has(channel)) {
listeners.get(channel)!.add(listener);
continue;
}
listeners.set(channel, new Set([listener]));
channelsToSubscribe.push(channel);
}
if (!channelsToSubscribe.length) {
return Promise.resolve();
}
return this.#pushPubSubCommand(command, channelsToSubscribe);
}
unsubscribe(command: PubSubUnsubscribeCommands, channels?: string | Array<string>, listener?: PubSubListener): Promise<void> {
const listeners = command === PubSubUnsubscribeCommands.UNSUBSCRIBE ? this.#pubSubListeners.channels : this.#pubSubListeners.patterns;
if (!channels) {
listeners.clear();
return this.#pushPubSubCommand(command);
}
const channelsToUnsubscribe = [];
for (const channel of (Array.isArray(channels) ? channels : [channels])) {
const set = listeners.get(channel);
if (!set) continue;
let shouldUnsubscribe = !listener;
if (listener) {
set.delete(listener);
shouldUnsubscribe = set.size === 0;
}
if (shouldUnsubscribe) {
channelsToUnsubscribe.push(channel);
listeners.delete(channel);
}
}
if (!channelsToUnsubscribe.length) {
return Promise.resolve();
}
return this.#pushPubSubCommand(command, channelsToUnsubscribe);
}
#pushPubSubCommand(command: PubSubSubscribeCommands | PubSubUnsubscribeCommands, channels?: Array<string>): Promise<void> {
return new Promise((resolve, reject) => {
const isSubscribe = command === PubSubSubscribeCommands.SUBSCRIBE || command === PubSubSubscribeCommands.PSUBSCRIBE,
inProgressKey = isSubscribe ? 'subscribing' : 'unsubscribing',
commandArgs: Array<string> = [command];
let channelsCounter: number;
if (channels?.length) {
commandArgs.push(...channels);
channelsCounter = channels.length;
} else {
// unsubscribe only
channelsCounter = (
command[0] === 'P' ?
this.#pubSubListeners.patterns :
this.#pubSubListeners.channels
).size;
}
this.#pubSubState[inProgressKey] += channelsCounter;
this.#waitingToBeSent.push({
encodedCommand: encodeCommand(commandArgs),
channelsCounter,
resolve: () => {
this.#pubSubState[inProgressKey] -= channelsCounter;
this.#pubSubState.subscribed += channelsCounter * (isSubscribe ? 1 : -1);
resolve();
},
reject: () => {
this.#pubSubState[inProgressKey] -= channelsCounter;
reject();
}
});
});
}
resubscribe(): Promise<any> | undefined {
if (!this.#pubSubState.subscribed && !this.#pubSubState.subscribing) {
return;
}
this.#pubSubState.subscribed = this.#pubSubState.subscribing = 0;
// TODO: acl error on one channel/pattern will reject the whole command
return Promise.all([
this.#pushPubSubCommand(PubSubSubscribeCommands.SUBSCRIBE, [...this.#pubSubListeners.channels.keys()]),
this.#pushPubSubCommand(PubSubSubscribeCommands.PSUBSCRIBE, [...this.#pubSubListeners.patterns.keys()])
]);
}
executeChunk(recommendedSize: number): boolean | undefined {
if (!this.#waitingToBeSent.length) return;
const encoded: Array<string> = [];
let size = 0,
lastCommandChainId: symbol | undefined;
for (const command of this.#waitingToBeSent) {
encoded.push(command.encodedCommand);
size += command.encodedCommand.length;
if (size > recommendedSize) {
lastCommandChainId = command.chainId;
break;
}
}
if (!lastCommandChainId && encoded.length === this.#waitingToBeSent.length) {
lastCommandChainId = this.#waitingToBeSent.tail!.value.chainId;
}
lastCommandChainId ??= this.#waitingToBeSent.tail?.value.chainId;
this.#executor(encoded.join(''));
for (let i = 0; i < encoded.length; i++) {
const waitingToBeSent = this.#waitingToBeSent.shift()!;
if (waitingToBeSent.abort) {
waitingToBeSent.abort.signal.removeEventListener('abort', waitingToBeSent.abort.listener);
}
this.#waitingForReply.push({
resolve: waitingToBeSent.resolve,
reject: waitingToBeSent.reject,
channelsCounter: waitingToBeSent.channelsCounter
});
}
this.#chainInExecution = lastCommandChainId;
this.#waitingToBeSentCommandsLength -= size;
}
parseResponse(data: Buffer): void {
this.#parser.execute(data);
}
#shiftWaitingForReply(): CommandWaitingForReply {
if (!this.#waitingForReply.length) {
throw new Error('Got an unexpected reply from Redis');
}
return this.#waitingForReply.shift()!;
}
flushWaitingForReply(err: Error): void {
RedisCommandsQueue.#flushQueue(this.#waitingForReply, err);
if (!this.#chainInExecution) {
return;
}
while (this.#waitingToBeSent.head?.value.chainId === this.#chainInExecution) {
this.#waitingToBeSent.shift();
}
this.#chainInExecution = undefined;
}
flushAll(err: Error): void {
RedisCommandsQueue.#flushQueue(this.#waitingForReply, err);
RedisCommandsQueue.#flushQueue(this.#waitingToBeSent, err);
}
}