diff --git a/programs/fileio.c b/programs/fileio.c index 89ee524b3..c3eddad49 100644 --- a/programs/fileio.c +++ b/programs/fileio.c @@ -837,9 +837,13 @@ FIO_compressZstdFrame(const cRess_t* ressPtr, csuzfp = zfp; lastFlushedSize = compressedfilesize; assert(inputPresented > 0); + DISPLAYLEVEL(6, "input blocked %u/%u(%.2f) - ingested:%u vs %u:consumed - flushed:%u vs %u:produced \n", + inputBlocked, inputPresented, (double)inputBlocked/inputPresented*100, + (U32)newlyIngested, (U32)newlyConsumed, + (U32)newlyFlushed, (U32)newlyProduced); if ( (inputBlocked > inputPresented / 8) /* input is waiting often, because input buffers is full : compression or output too slow */ - && (newlyFlushed * 17 / 16 > newlyProduced) /* flush everything that is produced */ - && (newlyIngested * 17 / 16 > newlyConsumed) /* can't keep up with input speed */ + && (newlyFlushed * 33 / 32 > newlyProduced) /* flush everything that is produced */ + && (newlyIngested * 33 / 32 > newlyConsumed) /* input speed as fast or faster than compression speed */ ) { DISPLAYLEVEL(6, "recommend faster as in(%llu) >= (%llu)comp(%llu) <= out(%llu) \n", newlyIngested, newlyConsumed, newlyProduced, newlyFlushed); diff --git a/programs/zstd.1.md b/programs/zstd.1.md index b71d5d5bf..5f3701864 100644 --- a/programs/zstd.1.md +++ b/programs/zstd.1.md @@ -139,6 +139,7 @@ the last one takes effect. The current compression level can be observed live by using command `-v`. Works with multi-threading and `--long` mode. Does not work with `--single-thread`. + Due to the chaotic nature of dynamic adaptation, compressed result is not reproducible. * `-D file`: use `file` as Dictionary to compress or decompress FILE(s) * `--no-dictID`: diff --git a/tests/rateLimiter.py b/tests/rateLimiter.py index 134ef8971..15222e016 100755 --- a/tests/rateLimiter.py +++ b/tests/rateLimiter.py @@ -19,7 +19,7 @@ import time MB = 1024 * 1024 rate = float(sys.argv[1]) * MB -rate *= 1.25 # compensation for excluding write time (experimentally determined) +rate *= 1.4 # compensation for excluding i/o time (experimentally determined) start = time.time() total_read = 0 @@ -29,9 +29,14 @@ while len(buf): to_read = max(int(rate * (now - start) - total_read), 1) max_buf_size = 1 * MB to_read = min(to_read, max_buf_size) + + read_start = time.time() buf = sys.stdin.buffer.read(to_read) - write_start = time.time() + + write_start = read_end = time.time() sys.stdout.buffer.write(buf) write_end = time.time() - start += write_end - write_start # exclude write delay + + wait_time = max(read_end - read_start, write_end - write_start) + start += wait_time # exclude delay of the slowest total_read += len(buf)