1
0
mirror of https://github.com/facebook/zstd.git synced 2025-07-29 11:21:22 +03:00

bench: changed creation/reset function to timedFnState

for consistency
This commit is contained in:
Yann Collet
2018-08-21 18:19:27 -07:00
parent 1af27a7ed7
commit 77e805e3db
3 changed files with 287 additions and 277 deletions

View File

@ -68,7 +68,7 @@ static const int g_maxNbVariations = 64;
#define FADT_MIN 0
#define FADT_MAX ((U32)-1)
#define ZSTD_TARGETLENGTH_MIN 0
#define ZSTD_TARGETLENGTH_MIN 0
#define ZSTD_TARGETLENGTH_MAX 999
#define WLOG_RANGE (ZSTD_WINDOWLOG_MAX - ZSTD_WINDOWLOG_MIN + 1)
@ -115,27 +115,27 @@ typedef struct {
} paramValues_t;
/* maximum value of parameters */
static const U32 mintable[NUM_PARAMS] =
static const U32 mintable[NUM_PARAMS] =
{ ZSTD_WINDOWLOG_MIN, ZSTD_CHAINLOG_MIN, ZSTD_HASHLOG_MIN, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLENGTH_MIN, ZSTD_TARGETLENGTH_MIN, ZSTD_fast, FADT_MIN };
/* minimum value of parameters */
static const U32 maxtable[NUM_PARAMS] =
static const U32 maxtable[NUM_PARAMS] =
{ ZSTD_WINDOWLOG_MAX, ZSTD_CHAINLOG_MAX, ZSTD_HASHLOG_MAX, ZSTD_SEARCHLOG_MAX, ZSTD_SEARCHLENGTH_MAX, ZSTD_TARGETLENGTH_MAX, ZSTD_btultra, FADT_MAX };
/* # of values parameters can take on */
static const U32 rangetable[NUM_PARAMS] =
static const U32 rangetable[NUM_PARAMS] =
{ WLOG_RANGE, CLOG_RANGE, HLOG_RANGE, SLOG_RANGE, SLEN_RANGE, TLEN_RANGE, STRT_RANGE, FADT_RANGE };
/* ZSTD_cctxSetParameter() index to set */
static const ZSTD_cParameter cctxSetParamTable[NUM_PARAMS] =
static const ZSTD_cParameter cctxSetParamTable[NUM_PARAMS] =
{ ZSTD_p_windowLog, ZSTD_p_chainLog, ZSTD_p_hashLog, ZSTD_p_searchLog, ZSTD_p_minMatch, ZSTD_p_targetLength, ZSTD_p_compressionStrategy, ZSTD_p_forceAttachDict };
/* names of parameters */
static const char* g_paramNames[NUM_PARAMS] =
static const char* g_paramNames[NUM_PARAMS] =
{ "windowLog", "chainLog", "hashLog","searchLog", "searchLength", "targetLength", "strategy", "forceAttachDict" };
/* shortened names of parameters */
static const char* g_shortParamNames[NUM_PARAMS] =
static const char* g_shortParamNames[NUM_PARAMS] =
{ "wlog", "clog", "hlog","slog", "slen", "tlen", "strt", "fadt" };
/* maps value from { 0 to rangetable[param] - 1 } to valid paramvalues */
@ -178,7 +178,7 @@ static int invRangeMap(varInds_t param, U32 value) {
hi = mid;
}
}
return lo;
return lo;
}
case fadt_ind:
return (int)value + 1;
@ -201,11 +201,11 @@ static void displayParamVal(FILE* f, varInds_t param, U32 value, int width) {
switch(param) {
case fadt_ind: if(width) { fprintf(f, "%*d", width, (int)value); } else { fprintf(f, "%d", (int)value); } break;
case strt_ind: if(width) { fprintf(f, "%*s", width, g_stratName[value]); } else { fprintf(f, "%s", g_stratName[value]); } break;
case wlog_ind:
case clog_ind:
case hlog_ind:
case slog_ind:
case slen_ind:
case wlog_ind:
case clog_ind:
case hlog_ind:
case slog_ind:
case slen_ind:
case tlen_ind: if(width) { fprintf(f, "%*u", width, value); } else { fprintf(f, "%u", value); } break;
case NUM_PARAMS:
DISPLAY("Error, not a valid param\n "); break;
@ -265,7 +265,7 @@ typedef struct {
typedef struct {
U32 cSpeed; /* bytes / sec */
U32 dSpeed;
U32 cMem; /* bytes */
U32 cMem; /* bytes */
} constraint_t;
typedef struct winner_ll_node winner_ll_node;
@ -285,7 +285,7 @@ static winner_ll_node* g_winners; /* linked list sorted ascending by cSize & cSp
*/
/*-*******************************************************
* General Util Functions
* General Util Functions
*********************************************************/
/* nullified useless params, to ensure count stats */
@ -502,10 +502,10 @@ static int feasible(const BMK_result_t results, const constraint_t target) {
}
/* hill climbing value for part 1 */
/* Scoring here is a linear reward for all set constraints normalized between 0 to 1
* (with 0 at 0 and 1 being fully fulfilling the constraint), summed with a logarithmic
/* Scoring here is a linear reward for all set constraints normalized between 0 to 1
* (with 0 at 0 and 1 being fully fulfilling the constraint), summed with a logarithmic
* bonus to exceeding the constraint value. We also give linear ratio for compression ratio.
* The constant factors are experimental.
* The constant factors are experimental.
*/
static double resultScore(const BMK_result_t res, const size_t srcSize, const constraint_t target) {
double cs = 0., ds = 0., rt, cm = 0.;
@ -516,7 +516,7 @@ static double resultScore(const BMK_result_t res, const size_t srcSize, const co
if(target.cMem != (U32)-1) { cm = (double)target.cMem / res.cMem; }
rt = ((double)srcSize / res.cSize);
ret = (MIN(1, cs) + MIN(1, ds) + MIN(1, cm))*r1 + rt * rtr +
ret = (MIN(1, cs) + MIN(1, ds) + MIN(1, cm))*r1 + rt * rtr +
(MAX(0, log(cs))+ MAX(0, log(ds))+ MAX(0, log(cm))) * r2;
return ret;
@ -532,7 +532,7 @@ static double resultDistLvl(const BMK_result_t result1, const BMK_result_t lvlRe
return normalizedRatioGain1 * g_ratioMultiplier + normalizedCSpeedGain1;
}
/* return true if r2 strictly better than r1 */
/* return true if r2 strictly better than r1 */
static int compareResultLT(const BMK_result_t result1, const BMK_result_t result2, const constraint_t target, size_t srcSize) {
if(feasible(result1, target) && feasible(result2, target)) {
if(g_optmode) {
@ -547,7 +547,7 @@ static int compareResultLT(const BMK_result_t result1, const BMK_result_t result
static constraint_t relaxTarget(constraint_t target) {
target.cMem = (U32)-1;
target.cSpeed *= ((double)g_strictness) / 100;
target.cSpeed *= ((double)g_strictness) / 100;
target.dSpeed *= ((double)g_strictness) / 100;
return target;
}
@ -598,7 +598,7 @@ static void optimizerAdjustInput(paramValues_t* pc, const size_t maxBlockSize) {
DISPLAY("Warning: hashlog too much larger than windowLog size, adjusted to %u\n", pc->vals[hlog_ind]);
}
}
if(pc->vals[slog_ind] != PARAM_UNSET && pc->vals[clog_ind] != PARAM_UNSET) {
if(pc->vals[slog_ind] > pc->vals[clog_ind]) {
pc->vals[clog_ind] = pc->vals[slog_ind];
@ -608,17 +608,17 @@ static void optimizerAdjustInput(paramValues_t* pc, const size_t maxBlockSize) {
}
static int redundantParams(const paramValues_t paramValues, const constraint_t target, const size_t maxBlockSize) {
return
return
(ZSTD_estimateCStreamSize_usingCParams(pvalsToCParams(paramValues)) > (size_t)target.cMem) /* Uses too much memory */
|| ((1ULL << (paramValues.vals[wlog_ind] - 1)) >= maxBlockSize && paramValues.vals[wlog_ind] != mintable[wlog_ind]) /* wlog too much bigger than src size */
|| (paramValues.vals[clog_ind] > (paramValues.vals[wlog_ind] + (paramValues.vals[strt_ind] > ZSTD_btlazy2))) /* chainLog larger than windowLog*/
|| (paramValues.vals[slog_ind] > paramValues.vals[clog_ind]) /* searchLog larger than chainLog */
|| (paramValues.vals[hlog_ind] > paramValues.vals[wlog_ind] + 1); /* hashLog larger than windowLog + 1 */
}
/*-************************************
* Display Functions
* Display Functions
**************************************/
static void BMK_translateAdvancedParams(FILE* f, const paramValues_t params) {
@ -629,7 +629,7 @@ static void BMK_translateAdvancedParams(FILE* f, const paramValues_t params) {
if(g_silenceParams[v]) { continue; }
if(!first) { fprintf(f, ","); }
fprintf(f,"%s=", g_paramNames[v]);
if(v == strt_ind) { fprintf(f,"%u", params.vals[v]); }
else { displayParamVal(f, v, params.vals[v], 0); }
first = 0;
@ -668,10 +668,10 @@ static void BMK_printWinner(FILE* f, const int cLevel, const BMK_result_t result
snprintf(lvlstr, 15, " Level %2d ", cLevel);
}
if(TIMED) {
if(TIMED) {
const U64 time = UTIL_clockSpanNano(g_time);
const U64 minutes = time / (60ULL * TIMELOOP_NANOSEC);
fprintf(f, "%1lu:%2lu:%05.2f - ", (unsigned long) minutes / 60,(unsigned long) minutes % 60, (double)(time - minutes * TIMELOOP_NANOSEC * 60ULL)/TIMELOOP_NANOSEC);
fprintf(f, "%1lu:%2lu:%05.2f - ", (unsigned long) minutes / 60,(unsigned long) minutes % 60, (double)(time - minutes * TIMELOOP_NANOSEC * 60ULL)/TIMELOOP_NANOSEC);
}
fprintf(f, "/* %s */ ", lvlstr);
@ -735,7 +735,7 @@ static int insertWinner(const winnerInfo_t w, const constraint_t targetConstrain
tmp = cur_node->next;
cur_node->next = cur_node->next->next;
free(tmp);
break;
break;
}
case SIZE_RESULT:
{
@ -754,7 +754,7 @@ static int insertWinner(const winnerInfo_t w, const constraint_t targetConstrain
cur_node->next = newnode;
return 0;
}
}
}
}
@ -792,9 +792,9 @@ static int insertWinner(const winnerInfo_t w, const constraint_t targetConstrain
cur_node->next = newnode;
return 0;
}
default:
default:
return 1;
}
}
}
static void BMK_printWinnerOpt(FILE* f, const U32 cLevel, const BMK_result_t result, const paramValues_t params, const constraint_t targetConstraints, const size_t srcSize)
@ -814,7 +814,7 @@ static void BMK_printWinnerOpt(FILE* f, const U32 cLevel, const BMK_result_t res
g_winner.result = result;
g_winner.params = params;
}
}
}
if(g_optmode && g_optimizer && (DEBUG || g_displayLevel == 3)) {
winnerInfo_t w;
@ -824,8 +824,8 @@ static void BMK_printWinnerOpt(FILE* f, const U32 cLevel, const BMK_result_t res
insertWinner(w, targetConstraints);
if(!DEBUG) { fprintf(f, "\033c"); }
fprintf(f, "\n");
fprintf(f, "\n");
/* the table */
fprintf(f, "================================\n");
for(n = g_winners; n != NULL; n = n->next) {
@ -909,8 +909,8 @@ static size_t local_initDCtx(void* payload) {
/* additional argument is just the context */
static size_t local_defaultCompress(
const void* srcBuffer, size_t srcSize,
void* dstBuffer, size_t dstSize,
const void* srcBuffer, size_t srcSize,
void* dstBuffer, size_t dstSize,
void* addArgs) {
size_t moreToFlush = 1;
ZSTD_CCtx* ctx = (ZSTD_CCtx*)addArgs;
@ -937,8 +937,8 @@ static size_t local_defaultCompress(
/* additional argument is just the context */
static size_t local_defaultDecompress(
const void* srcBuffer, size_t srcSize,
void* dstBuffer, size_t dstSize,
const void* srcBuffer, size_t srcSize,
void* dstBuffer, size_t dstSize,
void* addArgs) {
size_t moreToFlush = 1;
ZSTD_DCtx* dctx = (ZSTD_DCtx*)addArgs;
@ -965,7 +965,7 @@ static size_t local_defaultDecompress(
}
/*-************************************
* Data Initialization Functions
* Data Initialization Functions
**************************************/
typedef struct {
@ -1041,7 +1041,7 @@ static int createBuffersFromMemory(buffers_t* buff, void * srcBuffer, const size
buff->dstSizes = (size_t*)malloc(maxNbBlocks * sizeof(size_t));
buff->resPtrs = (void**)calloc(maxNbBlocks, sizeof(void*));
buff->resSizes = (size_t*)malloc(maxNbBlocks * sizeof(size_t));
buff->resSizes = (size_t*)malloc(maxNbBlocks * sizeof(size_t));
if(!buff->srcPtrs || !buff->srcSizes || !buff->dstPtrs || !buff->dstCapacities || !buff->dstSizes || !buff->resPtrs || !buff->resSizes) {
DISPLAY("alloc error\n");
@ -1090,18 +1090,18 @@ static int createBuffersFromMemory(buffers_t* buff, void * srcBuffer, const size
buff->nbBlocks = blockNb;
return 0;
}
}
/* allocates buffer's arguments. returns success / failuere */
static int createBuffers(buffers_t* buff, const char* const * const fileNamesTable,
static int createBuffers(buffers_t* buff, const char* const * const fileNamesTable,
size_t nbFiles) {
size_t pos = 0;
size_t n;
size_t totalSizeToLoad = UTIL_getTotalFileSize(fileNamesTable, (U32)nbFiles);
size_t benchedSize = MIN(BMK_findMaxMem(totalSizeToLoad * 3) / 3, totalSizeToLoad);
size_t* fileSizes = calloc(sizeof(size_t), nbFiles);
void* srcBuffer = NULL;
int ret = 0;
void* srcBuffer = NULL;
int ret = 0;
if(!totalSizeToLoad || !benchedSize) {
ret = 1;
@ -1139,7 +1139,7 @@ static int createBuffers(buffers_t* buff, const char* const * const fileNamesTab
if (fileSize + pos > benchedSize) fileSize = benchedSize - pos, nbFiles=n; /* buffer too small - stop after this file */
{
char* buffer = (char*)(srcBuffer);
char* buffer = (char*)(srcBuffer);
size_t const readSize = fread((buffer)+pos, 1, (size_t)fileSize, f);
fclose(f);
if (readSize != (size_t)fileSize) {
@ -1181,14 +1181,14 @@ static int createContexts(contexts_t* ctx, const char* dictFileName) {
ctx->dictBuffer = malloc(ctx->dictSize);
f = fopen(dictFileName, "rb");
if(!f) {
DISPLAY("unable to open file\n");
fclose(f);
freeContexts(*ctx);
return 1;
}
if(ctx->dictSize > 64 MB || !(ctx->dictBuffer)) {
DISPLAY("dictionary too large\n");
fclose(f);
@ -1207,7 +1207,7 @@ static int createContexts(contexts_t* ctx, const char* dictFileName) {
}
/*-************************************
* Optimizer Memoization Functions
* Optimizer Memoization Functions
**************************************/
/* return: new length */
@ -1218,7 +1218,7 @@ static size_t sanitizeVarArray(varInds_t* varNew, const size_t varLength, const
for(i = 0; i < varLength; i++) {
if( !((varArray[i] == clog_ind && strat == ZSTD_fast)
|| (varArray[i] == slog_ind && strat == ZSTD_fast)
|| (varArray[i] == slog_ind && strat == ZSTD_dfast)
|| (varArray[i] == slog_ind && strat == ZSTD_dfast)
|| (varArray[i] == tlen_ind && strat != ZSTD_btopt && strat != ZSTD_btultra && strat != ZSTD_fast))) {
varNew[j] = varArray[i];
j++;
@ -1305,7 +1305,7 @@ static void freeMemoTableArray(memoTable_t* const mtAll) {
static memoTable_t* createMemoTableArray(const paramValues_t p, const varInds_t* const varyParams, const size_t varyLen, const U32 memoTableLog) {
memoTable_t* mtAll = (memoTable_t*)calloc(sizeof(memoTable_t),(ZSTD_btultra + 1));
ZSTD_strategy i, stratMin = ZSTD_fast, stratMax = ZSTD_btultra;
if(mtAll == NULL) {
return NULL;
}
@ -1324,7 +1324,7 @@ static memoTable_t* createMemoTableArray(const paramValues_t p, const varInds_t*
return mtAll;
}
if(p.vals[strt_ind] != PARAM_UNSET) {
stratMin = p.vals[strt_ind];
stratMax = p.vals[strt_ind];
@ -1348,7 +1348,7 @@ static memoTable_t* createMemoTableArray(const paramValues_t p, const varInds_t*
return NULL;
}
}
return mtAll;
}
@ -1363,7 +1363,7 @@ static void randomConstrainedParams(paramValues_t* pc, const memoTable_t* memoTa
int i;
for(i = 0; i < NUM_PARAMS; i++) {
varInds_t v = mt.varArray[i];
if(v == strt_ind) continue;
if(v == strt_ind) continue;
pc->vals[v] = rangeMap(v, FUZ_rand(&g_rand) % rangetable[v]);
}
@ -1382,7 +1382,7 @@ static void randomConstrainedParams(paramValues_t* pc, const memoTable_t* memoTa
/* if in decodeOnly, then srcPtr's will be compressed blocks, and uncompressedBlocks will be written to dstPtrs */
/* dictionary nullable, nothing else though. */
static BMK_return_t BMK_benchMemInvertible(const buffers_t buf, const contexts_t ctx,
static BMK_return_t BMK_benchMemInvertible(const buffers_t buf, const contexts_t ctx,
const int cLevel, const paramValues_t* comprParams,
const BMK_mode_t mode, const BMK_loopMode_t loopMode, const unsigned nbSeconds) {
@ -1427,9 +1427,9 @@ static BMK_return_t BMK_benchMemInvertible(const buffers_t buf, const contexts_t
if(loopMode == BMK_timeMode) {
BMK_customTimedReturn_t intermediateResultCompress;
BMK_customTimedReturn_t intermediateResultDecompress;
BMK_timedFnState_t* timeStateCompress = BMK_createTimeState(nbSeconds);
BMK_timedFnState_t* timeStateDecompress = BMK_createTimeState(nbSeconds);
BMK_customTimedReturn_t intermediateResultDecompress;
BMK_timedFnState_t* timeStateCompress = BMK_createTimedFnState(nbSeconds);
BMK_timedFnState_t* timeStateDecompress = BMK_createTimedFnState(nbSeconds);
if(mode == BMK_compressOnly) {
intermediateResultCompress.completed = 0;
intermediateResultDecompress.completed = 1;
@ -1447,8 +1447,8 @@ static BMK_return_t BMK_benchMemInvertible(const buffers_t buf, const contexts_t
if(intermediateResultCompress.result.error) {
results.error = intermediateResultCompress.result.error;
BMK_freeTimeState(timeStateCompress);
BMK_freeTimeState(timeStateDecompress);
BMK_freeTimedFnState(timeStateCompress);
BMK_freeTimedFnState(timeStateDecompress);
return results;
}
results.result.cSpeed = (srcSize * TIMELOOP_NANOSEC) / intermediateResultCompress.result.result.nanoSecPerRun;
@ -1461,21 +1461,21 @@ static BMK_return_t BMK_benchMemInvertible(const buffers_t buf, const contexts_t
if(intermediateResultDecompress.result.error) {
results.error = intermediateResultDecompress.result.error;
BMK_freeTimeState(timeStateCompress);
BMK_freeTimeState(timeStateDecompress);
BMK_freeTimedFnState(timeStateCompress);
BMK_freeTimedFnState(timeStateDecompress);
return results;
}
results.result.dSpeed = (srcSize * TIMELOOP_NANOSEC) / intermediateResultDecompress.result.result.nanoSecPerRun;
}
BMK_freeTimeState(timeStateCompress);
BMK_freeTimeState(timeStateDecompress);
BMK_freeTimedFnState(timeStateCompress);
BMK_freeTimedFnState(timeStateDecompress);
} else { /* iterMode; */
if(mode != BMK_decodeOnly) {
BMK_customReturn_t compressionResults = BMK_benchFunction(&local_defaultCompress, (void*)cctx, &local_initCCtx, (void*)&cctxprep,
nbBlocks, srcPtrs, srcSizes, dstPtrs, dstCapacities, dstSizes, nbSeconds);
nbBlocks, srcPtrs, srcSizes, dstPtrs, dstCapacities, dstSizes, nbSeconds);
if(compressionResults.error) {
results.error = compressionResults.error;
return results;
@ -1537,7 +1537,7 @@ static int BMK_benchParam(BMK_result_t* resultPtr,
}
/* Benchmarking which stops when we are sufficiently sure the solution is infeasible / worse than the winner */
#define VARIANCE 1.2
#define VARIANCE 1.2
static int allBench(BMK_result_t* resultPtr,
const buffers_t buf, const contexts_t ctx,
const paramValues_t cParams,
@ -1557,13 +1557,13 @@ static int allBench(BMK_result_t* resultPtr,
/* calculate uncertainty in compression / decompression runs */
if(benchres.cSpeed) {
loopDurationC = ((buf.srcSize * TIMELOOP_NANOSEC) / benchres.cSpeed);
uncertaintyConstantC = ((loopDurationC + (double)(2 * g_clockGranularity))/loopDurationC);
loopDurationC = ((buf.srcSize * TIMELOOP_NANOSEC) / benchres.cSpeed);
uncertaintyConstantC = ((loopDurationC + (double)(2 * g_clockGranularity))/loopDurationC);
}
if(benchres.dSpeed) {
loopDurationD = ((buf.srcSize * TIMELOOP_NANOSEC) / benchres.dSpeed);
uncertaintyConstantD = ((loopDurationD + (double)(2 * g_clockGranularity))/loopDurationD);
loopDurationD = ((buf.srcSize * TIMELOOP_NANOSEC) / benchres.dSpeed);
uncertaintyConstantD = ((loopDurationD + (double)(2 * g_clockGranularity))/loopDurationD);
}
/* anything with worse ratio in feas is definitely worse, discard */
@ -1596,18 +1596,18 @@ static int allBench(BMK_result_t* resultPtr,
/* compare by resultScore when in infeas */
/* compare by compareResultLT when in feas */
if((!feas && (resultScore(benchres, buf.srcSize, target) > resultScore(*winnerResult, buf.srcSize, target))) ||
(feas && (compareResultLT(*winnerResult, benchres, target, buf.srcSize))) ) {
return BETTER_RESULT;
} else {
return WORSE_RESULT;
if((!feas && (resultScore(benchres, buf.srcSize, target) > resultScore(*winnerResult, buf.srcSize, target))) ||
(feas && (compareResultLT(*winnerResult, benchres, target, buf.srcSize))) ) {
return BETTER_RESULT;
} else {
return WORSE_RESULT;
}
}
#define INFEASIBLE_THRESHOLD 200
/* Memoized benchmarking, won't benchmark anything which has already been benchmarked before. */
static int benchMemo(BMK_result_t* resultPtr,
const buffers_t buf, const contexts_t ctx,
const buffers_t buf, const contexts_t ctx,
const paramValues_t cParams,
const constraint_t target,
BMK_result_t* winnerResult, memoTable_t* const memoTableArray,
@ -1615,7 +1615,7 @@ static int benchMemo(BMK_result_t* resultPtr,
static int bmcount = 0;
int res;
if(memoTableGet(memoTableArray, cParams) >= INFEASIBLE_THRESHOLD || redundantParams(cParams, target, buf.maxBlockSize)) { return WORSE_RESULT; }
if(memoTableGet(memoTableArray, cParams) >= INFEASIBLE_THRESHOLD || redundantParams(cParams, target, buf.maxBlockSize)) { return WORSE_RESULT; }
res = allBench(resultPtr, buf, ctx, cParams, target, winnerResult, feas);
@ -1659,7 +1659,7 @@ static void BMK_init_level_constraints(int bytePerSec_level1)
} }
}
static int BMK_seed(winnerInfo_t* winners, const paramValues_t params,
static int BMK_seed(winnerInfo_t* winners, const paramValues_t params,
const buffers_t buf, const contexts_t ctx)
{
BMK_result_t testResult;
@ -1781,7 +1781,7 @@ static void playAround(FILE* f, winnerInfo_t* winners,
if (nbVariations++ > g_maxNbVariations) break;
do { for(i = 0; i < 4; i++) { paramVaryOnce(FUZ_rand(&g_rand) % (strt_ind + 1), ((FUZ_rand(&g_rand) & 1) << 1) - 1, &p); } }
do { for(i = 0; i < 4; i++) { paramVaryOnce(FUZ_rand(&g_rand) % (strt_ind + 1), ((FUZ_rand(&g_rand) & 1) << 1) - 1, &p); } }
while(!paramValid(p));
/* exclude faster if already played params */
@ -1815,7 +1815,7 @@ static void BMK_selectRandomStart(
}
}
static void BMK_benchFullTable(const buffers_t buf, const contexts_t ctx)
static void BMK_benchFullTable(const buffers_t buf, const contexts_t ctx)
{
paramValues_t params;
winnerInfo_t winners[NB_LEVELS_TRACKED+1];
@ -1888,7 +1888,7 @@ static int benchSample(double compressibility, int cLevel)
buffers_t buf;
contexts_t ctx;
if(srcBuffer == NULL) {
DISPLAY("Out of Memory\n");
return 2;
@ -1967,12 +1967,12 @@ int benchFiles(const char** fileNamesTable, int nbFiles, const char* dictFileNam
* Local Optimization Functions
**************************************/
/* One iteration of hill climbing. Specifically, it first tries all
/* One iteration of hill climbing. Specifically, it first tries all
* valid parameter configurations w/ manhattan distance 1 and picks the best one
* failing that, it progressively tries candidates further and further away (up to #dim + 2)
* if it finds a candidate exceeding winnerInfo, it will repeat. Otherwise, it will stop the
* current stage of hill climbing.
* Each iteration of hill climbing proceeds in 2 'phases'. Phase 1 climbs according to
* if it finds a candidate exceeding winnerInfo, it will repeat. Otherwise, it will stop the
* current stage of hill climbing.
* Each iteration of hill climbing proceeds in 2 'phases'. Phase 1 climbs according to
* the resultScore function, which is effectively a linear increase in reward until it reaches
* the constraint-satisfying value, it which point any excess results in only logarithmic reward.
* This aims to find some constraint-satisfying point.
@ -1980,14 +1980,14 @@ int benchFiles(const char** fileNamesTable, int nbFiles, const char* dictFileNam
* all feasible solutions valued over all infeasible solutions.
*/
/* sanitize all params here.
/* sanitize all params here.
* all generation after random should be sanitized. (maybe sanitize random)
*/
static winnerInfo_t climbOnce(const constraint_t target,
memoTable_t* mtAll,
static winnerInfo_t climbOnce(const constraint_t target,
memoTable_t* mtAll,
const buffers_t buf, const contexts_t ctx,
const paramValues_t init) {
/*
/*
* cparam - currently considered 'center'
* candidate - params to benchmark/results
* winner - best option found so far.
@ -2017,8 +2017,8 @@ static winnerInfo_t climbOnce(const constraint_t target,
for(offset = -1; offset <= 1; offset += 2) {
CHECKTIME(winnerInfo);
candidateInfo.params = cparam;
paramVaryOnce(mtAll[cparam.vals[strt_ind]].varArray[i], offset, &candidateInfo.params);
paramVaryOnce(mtAll[cparam.vals[strt_ind]].varArray[i], offset, &candidateInfo.params);
if(paramValid(candidateInfo.params)) {
int res;
res = benchMemo(&candidateInfo.result, buf, ctx,
@ -2047,7 +2047,7 @@ static winnerInfo_t climbOnce(const constraint_t target,
/* param error checking already done here */
paramVariation(&candidateInfo.params, mtAll, (U32)dist);
res = benchMemo(&candidateInfo.result, buf, ctx,
res = benchMemo(&candidateInfo.result, buf, ctx,
sanitizeParams(candidateInfo.params), target, &winnerInfo.result, mtAll, feas);
DEBUGOUTPUT("Res: %d\n", res);
if(res == BETTER_RESULT) { /* synonymous with better in this case*/
@ -2065,8 +2065,8 @@ static winnerInfo_t climbOnce(const constraint_t target,
}
}
if(!better) { /* infeas -> feas -> stop */
if(feas) { return winnerInfo; }
if(!better) { /* infeas -> feas -> stop */
if(feas) { return winnerInfo; }
feas = 1;
better = 1;
@ -2084,17 +2084,17 @@ static winnerInfo_t climbOnce(const constraint_t target,
/* flexible parameters: iterations of failed climbing (or if we do non-random, maybe this is when everything is close to visitied)
weight more on visit for bad results, less on good results/more on later results / ones with more failures.
allocate memoTable here.
allocate memoTable here.
*/
static winnerInfo_t optimizeFixedStrategy(
const buffers_t buf, const contexts_t ctx,
const buffers_t buf, const contexts_t ctx,
const constraint_t target, paramValues_t paramTarget,
const ZSTD_strategy strat,
const ZSTD_strategy strat,
memoTable_t* memoTableArray, const int tries) {
int i = 0;
paramValues_t init;
winnerInfo_t winnerInfo, candidateInfo;
winnerInfo_t winnerInfo, candidateInfo;
winnerInfo = initWinnerInfo(emptyParams());
/* so climb is given the right fixed strategy */
paramTarget.vals[strt_ind] = strat;
@ -2104,7 +2104,7 @@ static winnerInfo_t optimizeFixedStrategy(
init = paramTarget;
for(i = 0; i < tries; i++) {
DEBUGOUTPUT("Restart\n");
DEBUGOUTPUT("Restart\n");
do { randomConstrainedParams(&init, memoTableArray, strat); } while(redundantParams(init, target, buf.maxBlockSize));
candidateInfo = climbOnce(target, memoTableArray, buf, ctx, init);
if(compareResultLT(winnerInfo.result, candidateInfo.result, target, buf.srcSize)) {
@ -2153,9 +2153,9 @@ static int nextStrategy(const int currentStrategy, const int bestStrategy) {
/* main fn called when using --optimize */
/* Does strategy selection by benchmarking default compression levels
* then optimizes by strategy, starting with the best one and moving
* then optimizes by strategy, starting with the best one and moving
* progressively moving further away by number
* args:
* args:
* fileNamesTable - list of files to benchmark
* nbFiles - length of fileNamesTable
* dictFileName - name of dictionary file if one, else NULL
@ -2167,7 +2167,7 @@ static int nextStrategy(const int currentStrategy, const int bestStrategy) {
static int g_maxTries = 5;
#define TRY_DECAY 1
static int optimizeForSize(const char* const * const fileNamesTable, const size_t nbFiles, const char* dictFileName, constraint_t target, paramValues_t paramTarget,
static int optimizeForSize(const char* const * const fileNamesTable, const size_t nbFiles, const char* dictFileName, constraint_t target, paramValues_t paramTarget,
const int cLevelOpt, const int cLevelRun, const U32 memoTableLog)
{
varInds_t varArray [NUM_PARAMS];
@ -2194,7 +2194,7 @@ static int optimizeForSize(const char* const * const fileNamesTable, const size_
if(nbFiles == 1) {
DISPLAYLEVEL(2, "Loading %s... \r", fileNamesTable[0]);
} else {
DISPLAYLEVEL(2, "Loading %lu Files... \r", (unsigned long)nbFiles);
DISPLAYLEVEL(2, "Loading %lu Files... \r", (unsigned long)nbFiles);
}
/* sanitize paramTarget */
@ -2231,14 +2231,14 @@ static int optimizeForSize(const char* const * const fileNamesTable, const size_
ret = 3;
goto _cleanUp;
}
g_lvltarget = winner.result;
g_lvltarget = winner.result;
g_lvltarget.cSpeed *= ((double)g_strictness) / 100;
g_lvltarget.dSpeed *= ((double)g_strictness) / 100;
g_lvltarget.cSize /= ((double)g_strictness) / 100;
target.cSpeed = (U32)g_lvltarget.cSpeed;
target.dSpeed = (U32)g_lvltarget.dSpeed;
target.cSpeed = (U32)g_lvltarget.cSpeed;
target.dSpeed = (U32)g_lvltarget.dSpeed;
BMK_printWinnerOpt(stdout, cLevelOpt, winner.result, winner.params, target, buf.srcSize);
}
@ -2272,7 +2272,7 @@ static int optimizeForSize(const char* const * const fileNamesTable, const size_
DISPLAYLEVEL(2, "\n");
findClockGranularity();
{
{
paramValues_t CParams;
/* find best solution from default params */
@ -2305,13 +2305,13 @@ static int optimizeForSize(const char* const * const fileNamesTable, const size_
DEBUGOUTPUT("Real Opt\n");
/* start 'real' optimization */
{
{
int bestStrategy = (int)winner.params.vals[strt_ind];
if(paramTarget.vals[strt_ind] == PARAM_UNSET) {
int st = bestStrategy;
int tries = g_maxTries;
{
{
/* one iterations of hill climbing with the level-defined parameters. */
winnerInfo_t w1 = climbOnce(target, allMT, buf, ctx, winner.params);
if(compareResultLT(winner.result, w1.result, target, buf.srcSize)) {
@ -2323,7 +2323,7 @@ static int optimizeForSize(const char* const * const fileNamesTable, const size_
while(st && tries > 0) {
winnerInfo_t wc;
DEBUGOUTPUT("StrategySwitch: %s\n", g_stratName[st]);
wc = optimizeFixedStrategy(buf, ctx, target, paramBase, st, allMT, tries);
if(compareResultLT(winner.result, wc.result, target, buf.srcSize)) {
@ -2349,7 +2349,7 @@ static int optimizeForSize(const char* const * const fileNamesTable, const size_
goto _cleanUp;
}
/* end summary */
_displayCleanUp:
_displayCleanUp:
if(g_displayLevel >= 0) { BMK_displayOneResult(stdout, winner, buf.srcSize); }
BMK_translateAdvancedParams(stdout, winner.params);
DISPLAYLEVEL(1, "grillParams size - optimizer completed \n");
@ -2427,7 +2427,7 @@ static double readDoubleFromChar(const char** stringPtr)
}
(*stringPtr)++;
while ((**stringPtr >='0') && (**stringPtr <='9')) {
result += (double)(**stringPtr - '0') / divide, divide *= 10, (*stringPtr)++ ;
result += (double)(**stringPtr - '0') / divide, divide *= 10, (*stringPtr)++ ;
}
return result;
}
@ -2503,7 +2503,7 @@ int main(int argc, const char** argv)
int seperateFiles = 0;
double compressibility = COMPRESSIBILITY_DEFAULT;
U32 memoTableLog = PARAM_UNSET;
constraint_t target = { 0, 0, (U32)-1 };
constraint_t target = { 0, 0, (U32)-1 };
paramValues_t paramTarget = emptyParams();
g_params = emptyParams();
@ -2522,7 +2522,7 @@ int main(int argc, const char** argv)
for ( ; ;) {
if(parse_params(&argument, &paramTarget)) { if(argument[0] == ',') { argument++; continue; } else break; }
PARSE_SUB_ARGS("compressionSpeed=" , "cSpeed=", target.cSpeed);
PARSE_SUB_ARGS("decompressionSpeed=", "dSpeed=", target.dSpeed);
PARSE_SUB_ARGS("decompressionSpeed=", "dSpeed=", target.dSpeed);
PARSE_SUB_ARGS("compressionMemory=" , "cMem=", target.cMem);
PARSE_SUB_ARGS("strict=", "stc=", g_strictness);
PARSE_SUB_ARGS("maxTries=", "tries=", g_maxTries);
@ -2701,7 +2701,7 @@ int main(int argc, const char** argv)
/* load dictionary file (only applicable for optimizer rn) */
case 'D':
if(i == argc - 1) { /* last argument, return error. */
if(i == argc - 1) { /* last argument, return error. */
DISPLAY("Dictionary file expected but not given : %d\n", i);
return 1;
} else {
@ -2749,7 +2749,7 @@ int main(int argc, const char** argv)
} else {
result = benchFiles(argv+filenamesStart, argc-filenamesStart, dictFileName, cLevelRun);
}
}
}
}
if (main_pause) { int unused; printf("press enter...\n"); unused = getchar(); (void)unused; }