1
0
mirror of https://github.com/redis/node-redis.git synced 2025-08-06 02:15:48 +03:00

Better pipelining

Add fallback mode
This commit is contained in:
Ruben Bridgewater
2015-10-08 02:29:07 +02:00
parent 9ee1e3c764
commit 7d2bb8edec
3 changed files with 105 additions and 91 deletions

101
README.md
View File

@@ -493,7 +493,7 @@ Redis. The interface in `node_redis` is to return an individual `Batch` object b
The only difference between .batch and .multi is that no transaction is going to be used.
Be aware that the errors are - just like in multi statements - in the result. Otherwise both, errors and results could be returned at the same time.
If you fire many commands at once this is going to boost the execution speed significantly (see the benchmark section). Please remember that all commands are kept in memory until they are fired.
If you fire many commands at once this is going to **boost the execution speed by up to 400%** [sic!] compared to fireing the same commands in a loop without waiting for the result! See the benchmarks for further comparison. Please remember that all commands are kept in memory until they are fired.
## Monitor mode
@@ -637,55 +637,56 @@ Here are results of `multi_bench.js` which is similar to `redis-benchmark` from
hiredis parser (Lenovo T450s i7-5600U):
Client count: 5, node version: 4.1.1, server version: 3.0.3, parser: hiredis
PING, 1/5 min/max/avg/p95: 0/ 11/ 0.03/ 0.00 1412ms total, 35410.76 ops/sec
PING, 50/5 min/max/avg/p95: 0/ 9/ 0.54/ 1.00 539ms total, 92764.38 ops/sec
PING, batch 50/5 min/max/avg/p95: 0/ 3/ 0.32/ 1.00 327ms total, 152905.20 ops/sec
SET 4B str, 1/5 min/max/avg/p95: 0/ 4/ 0.03/ 0.00 1450ms total, 34482.76 ops/sec
SET 4B str, 50/5 min/max/avg/p95: 0/ 2/ 0.55/ 1.00 548ms total, 91240.88 ops/sec
SET 4B str, batch 50/5 min/max/avg/p95: 0/ 10/ 0.36/ 1.00 362ms total, 138121.55 ops/sec
SET 4B buf, 1/5 min/max/avg/p95: 0/ 5/ 0.06/ 0.55 2838ms total, 17618.04 ops/sec
SET 4B buf, 50/5 min/max/avg/p95: 0/ 9/ 1.70/ 3.00 1699ms total, 29429.08 ops/sec
SET 4B buf, batch 50/5 min/max/avg/p95: 1/ 11/ 1.69/ 3.00 1694ms total, 29515.94 ops/sec
GET 4B str, 1/5 min/max/avg/p95: 0/ 4/ 0.03/ 0.00 1350ms total, 37037.04 ops/sec
GET 4B str, 50/5 min/max/avg/p95: 0/ 7/ 0.54/ 1.00 539ms total, 92764.38 ops/sec
GET 4B str, batch 50/5 min/max/avg/p95: 0/ 2/ 0.48/ 1.00 483ms total, 103519.67 ops/sec
GET 4B buf, 1/5 min/max/avg/p95: 0/ 9/ 0.03/ 0.00 1373ms total, 36416.61 ops/sec
GET 4B buf, 50/5 min/max/avg/p95: 0/ 2/ 0.53/ 1.00 534ms total, 93632.96 ops/sec
GET 4B buf, batch 50/5 min/max/avg/p95: 0/ 10/ 0.60/ 1.00 605ms total, 82644.63 ops/sec
SET 4KiB str, 1/5 min/max/avg/p95: 0/ 5/ 0.03/ 0.00 1790ms total, 27932.96 ops/sec
SET 4KiB str, 50/5 min/max/avg/p95: 0/ 7/ 0.80/ 2.00 798ms total, 62656.64 ops/sec
SET 4KiB str, batch 50/5 min/max/avg/p95: 0/ 10/ 0.92/ 1.00 924ms total, 54112.55 ops/sec
SET 4KiB buf, 1/5 min/max/avg/p95: 0/ 16/ 0.05/ 1.00 2687ms total, 18608.11 ops/sec
SET 4KiB buf, 50/5 min/max/avg/p95: 0/ 16/ 1.88/ 3.00 1885ms total, 26525.20 ops/sec
SET 4KiB buf, batch 50/5 min/max/avg/p95: 1/ 6/ 1.83/ 3.00 1832ms total, 27292.58 ops/sec
GET 4KiB str, 1/5 min/max/avg/p95: 0/ 7/ 0.04/ 0.00 1909ms total, 26191.72 ops/sec
GET 4KiB str, 50/5 min/max/avg/p95: 0/ 8/ 0.88/ 2.00 887ms total, 56369.79 ops/sec
GET 4KiB str, batch 50/5 min/max/avg/p95: 0/ 4/ 0.57/ 1.00 570ms total, 87719.30 ops/sec
GET 4KiB buf, 1/5 min/max/avg/p95: 0/ 7/ 0.03/ 0.00 1754ms total, 28506.27 ops/sec
GET 4KiB buf, 50/5 min/max/avg/p95: 0/ 6/ 0.72/ 1.00 717ms total, 69735.01 ops/sec
GET 4KiB buf, batch 50/5 min/max/avg/p95: 0/ 1/ 0.47/ 1.00 472ms total, 105932.20 ops/sec
INCR, 1/5 min/max/avg/p95: 0/ 8/ 0.03/ 0.00 1531ms total, 32658.39 ops/sec
INCR, 50/5 min/max/avg/p95: 0/ 5/ 0.64/ 1.00 638ms total, 78369.91 ops/sec
INCR, batch 50/5 min/max/avg/p95: 0/ 13/ 0.45/ 1.00 452ms total, 110619.47 ops/sec
LPUSH, 1/5 min/max/avg/p95: 0/ 4/ 0.03/ 0.00 1445ms total, 34602.08 ops/sec
LPUSH, 50/5 min/max/avg/p95: 0/ 9/ 0.67/ 1.00 670ms total, 74626.87 ops/sec
LPUSH, batch 50/5 min/max/avg/p95: 0/ 2/ 0.34/ 1.00 339ms total, 147492.63 ops/sec
LRANGE 10, 1/5 min/max/avg/p95: 0/ 9/ 0.03/ 0.00 1739ms total, 28752.16 ops/sec
LRANGE 10, 50/5 min/max/avg/p95: 0/ 11/ 0.76/ 2.00 759ms total, 65876.15 ops/sec
LRANGE 10, batch 50/5 min/max/avg/p95: 0/ 4/ 0.49/ 1.00 497ms total, 100603.62 ops/sec
LRANGE 100, 1/5 min/max/avg/p95: 0/ 7/ 0.06/ 1.00 3252ms total, 15375.15 ops/sec
LRANGE 100, 50/5 min/max/avg/p95: 0/ 9/ 1.90/ 3.00 1905ms total, 26246.72 ops/sec
LRANGE 100, batch 50/5 min/max/avg/p95: 1/ 5/ 1.81/ 2.00 1816ms total, 27533.04 ops/sec
SET 4MiB buf, 1/5 min/max/avg/p95: 2/ 5/ 2.32/ 3.00 1160ms total, 431.03 ops/sec
SET 4MiB buf, 50/5 min/max/avg/p95: 19/ 134/ 102.27/ 118.00 1071ms total, 466.85 ops/sec
SET 4MiB buf, batch 50/5 min/max/avg/p95: 97/ 129/ 104.90/ 129.00 1049ms total, 476.64 ops/sec
GET 4MiB str, 1/5 min/max/avg/p95: 4/ 19/ 6.59/ 11.00 660ms total, 151.52 ops/sec
GET 4MiB str, 50/5 min/max/avg/p95: 19/ 278/ 200.11/ 258.85 503ms total, 198.81 ops/sec
GET 4MiB str, batch 50/5 min/max/avg/p95: 229/ 235/ 232.00/ 235.00 465ms total, 215.05 ops/sec
GET 4MiB buf, 1/5 min/max/avg/p95: 4/ 27/ 7.11/ 13.95 713ms total, 140.25 ops/sec
GET 4MiB buf, 50/5 min/max/avg/p95: 7/ 293/ 204.74/ 269.00 518ms total, 193.05 ops/sec
GET 4MiB buf, batch 50/5 min/max/avg/p95: 219/ 261/ 240.00/ 261.00 480ms total, 208.33 ops/sec
Client count: 5, node version: 4.1.2, server version: 3.0.3, parser: hiredis
PING, 1/5 min/max/avg/p95: 0/ 5/ 0.03/ 0.00 1537ms total, 32530.90 ops/sec
PING, 50/5 min/max/avg/p95: 0/ 4/ 0.49/ 1.00 491ms total, 101832.99 ops/sec
PING, batch 50/5 min/max/avg/p95: 0/ 2/ 0.17/ 1.00 178ms total, 280898.88 ops/sec
SET 4B str, 1/5 min/max/avg/p95: 0/ 2/ 0.03/ 0.00 1400ms total, 35714.29 ops/sec
SET 4B str, 50/5 min/max/avg/p95: 0/ 3/ 0.61/ 1.00 610ms total, 81967.21 ops/sec
SET 4B str, batch 50/5 min/max/avg/p95: 0/ 1/ 0.19/ 1.00 198ms total, 252525.25 ops/sec
SET 4B buf, 1/5 min/max/avg/p95: 0/ 3/ 0.05/ 0.00 2349ms total, 21285.65 ops/sec
SET 4B buf, 50/5 min/max/avg/p95: 0/ 5/ 1.63/ 3.00 1632ms total, 30637.25 ops/sec
SET 4B buf, batch 50/5 min/max/avg/p95: 0/ 1/ 0.37/ 1.00 366ms total, 136612.02 ops/sec
GET 4B str, 1/5 min/max/avg/p95: 0/ 3/ 0.03/ 0.00 1348ms total, 37091.99 ops/sec
GET 4B str, 50/5 min/max/avg/p95: 0/ 3/ 0.51/ 1.00 513ms total, 97465.89 ops/sec
GET 4B str, batch 50/5 min/max/avg/p95: 0/ 1/ 0.18/ 1.00 177ms total, 282485.88 ops/sec
GET 4B buf, 1/5 min/max/avg/p95: 0/ 3/ 0.03/ 0.00 1336ms total, 37425.15 ops/sec
GET 4B buf, 50/5 min/max/avg/p95: 0/ 4/ 0.52/ 1.00 525ms total, 95238.10 ops/sec
GET 4B buf, batch 50/5 min/max/avg/p95: 0/ 1/ 0.18/ 1.00 177ms total, 282485.88 ops/sec
SET 4KiB str, 1/5 min/max/avg/p95: 0/ 2/ 0.03/ 0.00 1674ms total, 29868.58 ops/sec
SET 4KiB str, 50/5 min/max/avg/p95: 0/ 3/ 0.77/ 1.00 775ms total, 64516.13 ops/sec
SET 4KiB str, batch 50/5 min/max/avg/p95: 0/ 3/ 0.50/ 1.00 500ms total, 100000.00 ops/sec
SET 4KiB buf, 1/5 min/max/avg/p95: 0/ 2/ 0.05/ 0.00 2410ms total, 20746.89 ops/sec
SET 4KiB buf, 50/5 min/max/avg/p95: 0/ 5/ 1.64/ 3.00 1643ms total, 30432.14 ops/sec
SET 4KiB buf, batch 50/5 min/max/avg/p95: 0/ 1/ 0.41/ 1.00 409ms total, 122249.39 ops/sec
GET 4KiB str, 1/5 min/max/avg/p95: 0/ 2/ 0.03/ 0.00 1422ms total, 35161.74 ops/sec
GET 4KiB str, 50/5 min/max/avg/p95: 0/ 4/ 0.68/ 1.00 680ms total, 73529.41 ops/sec
GET 4KiB str, batch 50/5 min/max/avg/p95: 0/ 2/ 0.39/ 1.00 391ms total, 127877.24 ops/sec
GET 4KiB buf, 1/5 min/max/avg/p95: 0/ 1/ 0.03/ 0.00 1420ms total, 35211.27 ops/sec
GET 4KiB buf, 50/5 min/max/avg/p95: 0/ 4/ 0.68/ 1.00 681ms total, 73421.44 ops/sec
GET 4KiB buf, batch 50/5 min/max/avg/p95: 0/ 2/ 0.39/ 1.00 387ms total, 129198.97 ops/sec
INCR, 1/5 min/max/avg/p95: 0/ 2/ 0.03/ 0.00 1334ms total, 37481.26 ops/sec
INCR, 50/5 min/max/avg/p95: 0/ 4/ 0.51/ 1.00 513ms total, 97465.89 ops/sec
INCR, batch 50/5 min/max/avg/p95: 0/ 1/ 0.18/ 1.00 179ms total, 279329.61 ops/sec
LPUSH, 1/5 min/max/avg/p95: 0/ 2/ 0.03/ 0.00 1351ms total, 37009.62 ops/sec
LPUSH, 50/5 min/max/avg/p95: 0/ 3/ 0.52/ 1.00 521ms total, 95969.29 ops/sec
LPUSH, batch 50/5 min/max/avg/p95: 0/ 2/ 0.20/ 1.00 200ms total, 250000.00 ops/sec
LRANGE 10, 1/5 min/max/avg/p95: 0/ 1/ 0.03/ 0.00 1562ms total, 32010.24 ops/sec
LRANGE 10, 50/5 min/max/avg/p95: 0/ 4/ 0.69/ 1.00 690ms total, 72463.77 ops/sec
LRANGE 10, batch 50/5 min/max/avg/p95: 0/ 2/ 0.39/ 1.00 393ms total, 127226.46 ops/sec
LRANGE 100, 1/5 min/max/avg/p95: 0/ 3/ 0.06/ 1.00 3009ms total, 16616.82 ops/sec
LRANGE 100, 50/5 min/max/avg/p95: 0/ 5/ 1.85/ 3.00 1850ms total, 27027.03 ops/sec
LRANGE 100, batch 50/5 min/max/avg/p95: 2/ 4/ 2.15/ 3.00 2153ms total, 23223.41 ops/sec
SET 4MiB buf, 1/5 min/max/avg/p95: 1/ 5/ 1.91/ 3.00 957ms total, 522.47 ops/sec
SET 4MiB buf, 50/5 min/max/avg/p95: 13/ 109/ 94.20/ 102.00 987ms total, 506.59 ops/sec
SET 4MiB buf, batch 50/5 min/max/avg/p95: 90/ 107/ 93.10/ 107.00 931ms total, 537.06 ops/sec
GET 4MiB str, 1/5 min/max/avg/p95: 4/ 16/ 5.97/ 10.00 598ms total, 167.22 ops/sec
GET 4MiB str, 50/5 min/max/avg/p95: 10/ 249/ 179.47/ 231.90 454ms total, 220.26 ops/sec
GET 4MiB str, batch 50/5 min/max/avg/p95: 215/ 226/ 220.50/ 226.00 441ms total, 226.76 ops/sec
GET 4MiB buf, 1/5 min/max/avg/p95: 3/ 26/ 6.55/ 11.95 658ms total, 151.98 ops/sec
GET 4MiB buf, 50/5 min/max/avg/p95: 11/ 265/ 186.73/ 241.90 469ms total, 213.22 ops/sec
GET 4MiB buf, batch 50/5 min/max/avg/p95: 226/ 247/ 236.50/ 247.00 473ms total, 211.42 ops/sec
End of tests. Total time elapsed: 44952 ms
The hiredis and js parser should most of the time be on the same level. The js parser lacks speed for large responses though.
Therefor the hiredis parser is the default used in node_redis. To use `hiredis`, do:

View File

@@ -1,28 +1,32 @@
Changelog
=========
## v.2.2.0 - 07, 2015 - The peregrino falcon
## v.2.2.0 - 08, 2015 - The peregrino falcon
The peregrino falcon is the fasted bird on earth and this is what this release is all about: We increased performance for heavy usage by up to **400%** [sic!] and increased overall performance for any command as well. Please check the benchmarks in the [README.md](README.md) for further details.
Features
- Added disable_resubscribing option to prevent a client from resubscribing after reconnecting (@BridgeAR)
- Added rename_commands options to handle renamed commands from the redis config (@digmxl & @BridgeAR)
- Increased performance (@BridgeAR)
- exchanging built in queue with [Petka Antonov's](@petkaantonov) [double-ended queue](https://github.com/petkaantonov/deque)
- Added rename_commands options to handle renamed commands from the redis config ([@digmxl](https://github.com/digmxl) & [@BridgeAR](https://github.com/BridgeAR))
- Added disable_resubscribing option to prevent a client from resubscribing after reconnecting ([@BridgeAR](https://github.com/BridgeAR))
- Increased performance ([@BridgeAR](https://github.com/BridgeAR))
- exchanging built in queue with [@petkaantonov](https://github.com/petkaantonov)'s [double-ended queue](https://github.com/petkaantonov/deque)
- prevent polymorphism
- optimize statements
- Added .batch command, similar to multi but without transaction (@BridgeAR)
- Improved pipelining to minimize the [RTT](http://redis.io/topics/pipelining) further (@BridgeAR)
This release is mainly focusing on further speed improvements and we can proudly say that node_redis is very likely outperforming any other node redis client.
If you do not rely on transactions but want to reduze the RTT you can use .batch from now on. It'll behave just the same as .multi but it does not have any transaction and therefor won't roll back any failed commands.
Both .multi and .batch are from now on going to fire the commands in bulk without doing any other operation in between.
- Added *.batch* command, similar to .multi but without transaction ([@BridgeAR](https://github.com/BridgeAR))
- Improved pipelining to minimize the [RTT](http://redis.io/topics/pipelining) further ([@BridgeAR](https://github.com/BridgeAR))
Bugfixes
- Fix a javascript parser regression introduced in 2.0 that could result in timeouts on high load. (@BridgeAR)
- Fixed should_buffer boolean for .exec, .select and .auth commands not being returned (@BridgeAR)
- Fix a javascript parser regression introduced in 2.0 that could result in timeouts on high load. ([@BridgeAR](https://github.com/BridgeAR))
- Fixed should_buffer boolean for .exec, .select and .auth commands not being returned ([@BridgeAR](https://github.com/BridgeAR))
If you do not rely on transactions but want to reduce the RTT you can use .batch from now on. It'll behave just the same as .multi but it does not have any transaction and therefor won't roll back any failed commands.<br>
Both .multi and .batch are from now on going to cache the commands and release them while calling .exec.
Please consider using .batch instead of looping through a lot of commands one by one. This will significantly improve your performance.
To conclude: we can proudly say that node_redis is very likely outperforming any other node redis client.
## v2.1.0 - Oct 02, 2015

View File

@@ -35,6 +35,13 @@ parsers.push(require('./lib/parsers/javascript'));
function RedisClient(stream, options) {
options = options || {};
if (!stream.cork) {
stream.cork = function noop() {};
stream.uncork = function noop() {};
stream.__write = stream.write;
stream.write = this.writeStream.bind(this);
}
this.stream = stream;
this.options = options;
@@ -650,26 +657,6 @@ RedisClient.prototype.return_reply = function (reply) {
}
};
RedisClient.prototype.writeStream = function (data) {
var stream = this.stream;
var nr = 0;
// Do not use a pipeline
if (this.pipeline === 0) {
return !stream.write(data);
}
this.pipeline--;
this.pipeline_queue.push(data);
if (this.pipeline === 0) {
var len = this.pipeline_queue.length;
while (len--) {
nr += !stream.write(this.pipeline_queue.shift());
}
return !nr;
}
return true;
};
RedisClient.prototype.send_command = function (command, args, callback) {
var arg, command_obj, i, err,
stream = this.stream,
@@ -775,21 +762,21 @@ RedisClient.prototype.send_command = function (command, args, callback) {
command_str += '$' + Buffer.byteLength(arg) + '\r\n' + arg + '\r\n';
}
debug('Send ' + this.address + ' id ' + this.connection_id + ': ' + command_str);
buffered_writes += !this.writeStream(command_str);
buffered_writes += !stream.write(command_str);
} else {
debug('Send command (' + command_str + ') has Buffer arguments');
buffered_writes += !this.writeStream(command_str);
buffered_writes += !stream.write(command_str);
for (i = 0; i < args.length; i += 1) {
arg = args[i];
if (Buffer.isBuffer(arg)) {
if (arg.length === 0) {
debug('send_command: using empty string for 0 length buffer');
buffered_writes += !this.writeStream('$0\r\n\r\n');
buffered_writes += !stream.write('$0\r\n\r\n');
} else {
buffered_writes += !this.writeStream('$' + arg.length + '\r\n');
buffered_writes += !this.writeStream(arg);
buffered_writes += !this.writeStream('\r\n');
buffered_writes += !stream.write('$' + arg.length + '\r\n');
buffered_writes += !stream.write(arg);
buffered_writes += !stream.write('\r\n');
debug('send_command: buffer send ' + arg.length + ' bytes');
}
} else {
@@ -797,7 +784,7 @@ RedisClient.prototype.send_command = function (command, args, callback) {
arg = String(arg);
}
debug('send_command: string send ' + Buffer.byteLength(arg) + ' bytes: ' + arg);
buffered_writes += !this.writeStream('$' + Buffer.byteLength(arg) + '\r\n' + arg + '\r\n');
buffered_writes += !stream.write('$' + Buffer.byteLength(arg) + '\r\n' + arg + '\r\n');
}
}
}
@@ -808,6 +795,25 @@ RedisClient.prototype.send_command = function (command, args, callback) {
return !this.should_buffer;
};
RedisClient.prototype.writeStream = function (data) {
var nr = 0;
// Do not use a pipeline
if (this.pipeline === 0) {
return !this.stream.__write(data);
}
this.pipeline--;
this.pipeline_queue.push(data);
if (this.pipeline === 0) {
var len = this.pipeline_queue.length;
while (len--) {
nr += !this.stream.__write(this.pipeline_queue.shift());
}
return !nr;
}
return true;
};
RedisClient.prototype.pub_sub_command = function (command_obj) {
var i, key, command, args;
@@ -862,6 +868,7 @@ RedisClient.prototype.end = function (flush) {
};
function Multi(client, args, transaction) {
client.stream.cork();
this._client = client;
this.queue = [];
if (transaction) {
@@ -1091,6 +1098,7 @@ Multi.prototype.exec_transaction = function (callback) {
this.send_command(command, args, index, cb);
}
this._client.stream.uncork();
return this._client.send_command('exec', [], function(err, replies) {
self.execute_callback(err, replies);
});
@@ -1198,6 +1206,7 @@ Multi.prototype.exec = Multi.prototype.EXEC = function (callback) {
this._client.send_command(command, args, cb);
index++;
}
this._client.stream.uncork();
return this._client.should_buffer;
};