1
0
mirror of https://github.com/facebook/zstd.git synced 2025-07-30 22:23:13 +03:00

modified util::time API

for easier invocation.
- no longer expose frequency timer :
it's either useless, or stored internally in a static variable (init is only necessary once).
- UTIL_getTime() provides result by function return.
This commit is contained in:
Yann Collet
2017-09-12 18:12:46 -07:00
parent 5bbb465d3e
commit c95c0c9725
4 changed files with 93 additions and 77 deletions

View File

@ -161,7 +161,6 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
ZSTD_CCtx* const ctx = ZSTD_createCCtx();
ZSTD_DCtx* const dctx = ZSTD_createDCtx();
U32 nbBlocks;
UTIL_freq_t ticksPerSecond;
/* checks */
if (!compressedBuffer || !resultBuffer || !blockTable || !ctx || !dctx)
@ -169,7 +168,6 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
/* init */
if (strlen(displayName)>17) displayName += strlen(displayName)-17; /* can only display 17 characters */
UTIL_initTimer(&ticksPerSecond);
/* Init blockTable data */
{ z_const char* srcPtr = (z_const char*)srcBuffer;
@ -209,17 +207,17 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
size_t cSize = 0;
double ratio = 0.;
UTIL_getTime(&coolTime);
coolTime = UTIL_getTime();
DISPLAYLEVEL(2, "\r%79s\r", "");
while (!cCompleted | !dCompleted) {
UTIL_time_t clockStart;
U64 clockLoop = g_nbIterations ? TIMELOOP_MICROSEC : 1;
/* overheat protection */
if (UTIL_clockSpanMicro(coolTime, ticksPerSecond) > ACTIVEPERIOD_MICROSEC) {
if (UTIL_clockSpanMicro(coolTime) > ACTIVEPERIOD_MICROSEC) {
DISPLAYLEVEL(2, "\rcooling down ... \r");
UTIL_sleep(COOLPERIOD_SEC);
UTIL_getTime(&coolTime);
coolTime = UTIL_getTime();
}
/* Compression */
@ -227,8 +225,8 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
if (!cCompleted) memset(compressedBuffer, 0xE5, maxCompressedSize); /* warm up and erase result buffer */
UTIL_sleepMilli(1); /* give processor time to other processes */
UTIL_waitForNextTick(ticksPerSecond);
UTIL_getTime(&clockStart);
UTIL_waitForNextTick();
clockStart = UTIL_getTime();
if (!cCompleted) { /* still some time to do compression tests */
U32 nbLoops = 0;
@ -256,7 +254,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
blockTable[blockNb].cSize = rSize;
}
nbLoops++;
} while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
} while (UTIL_clockSpanMicro(clockStart) < clockLoop);
ZSTD_freeCDict(cdict);
} else if (compressor == BMK_ZSTD_STREAM) {
ZSTD_parameters const zparams = ZSTD_getParams(cLevel, avgSize, dictBufferSize);
@ -285,7 +283,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
blockTable[blockNb].cSize = outBuffer.pos;
}
nbLoops++;
} while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
} while (UTIL_clockSpanMicro(clockStart) < clockLoop);
ZSTD_freeCStream(zbc);
} else if (compressor == BMK_ZWRAP_ZLIB_REUSE || compressor == BMK_ZWRAP_ZSTD_REUSE || compressor == BMK_ZLIB_REUSE) {
z_stream def;
@ -326,7 +324,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
blockTable[blockNb].cSize = def.total_out;
}
nbLoops++;
} while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
} while (UTIL_clockSpanMicro(clockStart) < clockLoop);
ret = deflateEnd(&def);
if (ret != Z_OK) EXM_THROW(1, "deflateEnd failure");
} else {
@ -359,9 +357,9 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
blockTable[blockNb].cSize = def.total_out;
}
nbLoops++;
} while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
} while (UTIL_clockSpanMicro(clockStart) < clockLoop);
}
{ U64 const clockSpan = UTIL_clockSpanMicro(clockStart, ticksPerSecond);
{ U64 const clockSpan = UTIL_clockSpanMicro(clockStart);
if (clockSpan < fastestC*nbLoops) fastestC = clockSpan / nbLoops;
totalCTime += clockSpan;
cCompleted = totalCTime>maxTime;
@ -381,8 +379,8 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
if (!dCompleted) memset(resultBuffer, 0xD6, srcSize); /* warm result buffer */
UTIL_sleepMilli(1); /* give processor time to other processes */
UTIL_waitForNextTick(ticksPerSecond);
UTIL_getTime(&clockStart);
UTIL_waitForNextTick();
clockStart = UTIL_getTime();
if (!dCompleted) {
U32 nbLoops = 0;
@ -405,7 +403,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
blockTable[blockNb].resSize = regenSize;
}
nbLoops++;
} while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
} while (UTIL_clockSpanMicro(clockStart) < clockLoop);
ZSTD_freeDDict(ddict);
} else if (compressor == BMK_ZSTD_STREAM) {
ZSTD_inBuffer inBuffer;
@ -431,7 +429,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
blockTable[blockNb].resSize = outBuffer.pos;
}
nbLoops++;
} while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
} while (UTIL_clockSpanMicro(clockStart) < clockLoop);
ZSTD_freeDStream(zbd);
} else if (compressor == BMK_ZWRAP_ZLIB_REUSE || compressor == BMK_ZWRAP_ZSTD_REUSE || compressor == BMK_ZLIB_REUSE) {
z_stream inf;
@ -467,7 +465,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
blockTable[blockNb].resSize = inf.total_out;
}
nbLoops++;
} while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
} while (UTIL_clockSpanMicro(clockStart) < clockLoop);
ret = inflateEnd(&inf);
if (ret != Z_OK) EXM_THROW(1, "inflateEnd failure");
} else {
@ -501,9 +499,9 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
blockTable[blockNb].resSize = inf.total_out;
}
nbLoops++;
} while (UTIL_clockSpanMicro(clockStart, ticksPerSecond) < clockLoop);
} while (UTIL_clockSpanMicro(clockStart) < clockLoop);
}
{ U64 const clockSpan = UTIL_clockSpanMicro(clockStart, ticksPerSecond);
{ U64 const clockSpan = UTIL_clockSpanMicro(clockStart);
if (clockSpan < fastestD*nbLoops) fastestD = clockSpan / nbLoops;
totalDTime += clockSpan;
dCompleted = totalDTime>maxTime;