diff --git a/contrib/linux-kernel/lib/zstd/bitstream.h b/contrib/linux-kernel/lib/zstd/bitstream.h index 9d2154082..fc2fe1189 100644 --- a/contrib/linux-kernel/lib/zstd/bitstream.h +++ b/contrib/linux-kernel/lib/zstd/bitstream.h @@ -367,7 +367,7 @@ MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer; return BIT_DStream_completed; } - { U32 nbBytes = bitD->bitsConsumed >> 3; + { U32 nbBytes = bitD->bitsConsumed >> 3; BIT_DStream_status result = BIT_DStream_unfinished; if (bitD->ptr - nbBytes < bitD->start) { nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ diff --git a/contrib/linux-kernel/lib/zstd/compress.c b/contrib/linux-kernel/lib/zstd/compress.c index cd184b78b..b419274d7 100644 --- a/contrib/linux-kernel/lib/zstd/compress.c +++ b/contrib/linux-kernel/lib/zstd/compress.c @@ -177,14 +177,14 @@ ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, u if (srcSize+dictSize == 0) return cPar; /* no size information available : no adjustment */ /* resize params, to use less memory when necessary */ - { U32 const minSrcSize = (srcSize==0) ? 500 : 0; + { U32 const minSrcSize = (srcSize==0) ? 500 : 0; U64 const rSize = srcSize + dictSize + minSrcSize; if (rSize < ((U64)1< srcLog) cPar.windowLog = srcLog; } } if (cPar.hashLog > cPar.windowLog) cPar.hashLog = cPar.windowLog; - { U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); + { U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); if (cycleLog > cPar.windowLog) cPar.chainLog -= (cycleLog - cPar.windowLog); } @@ -236,7 +236,7 @@ static size_t ZSTD_resetCCtx_advanced (ZSTD_CCtx* zc, return ZSTD_continueCCtx(zc, params, frameContentSize); } - { size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog); + { size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog); U32 const divider = (params.cParams.searchLength==3) ? 3 : 4; size_t const maxNbSeq = blockSize / divider; size_t const tokenSpace = blockSize + 11*maxNbSeq; @@ -248,7 +248,7 @@ static size_t ZSTD_resetCCtx_advanced (ZSTD_CCtx* zc, void* ptr; /* Check if workSpace is large enough, alloc a new one if needed */ - { size_t const optSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem)); - { ZSTD_parameters params = srcCCtx->params; + { ZSTD_parameters params = srcCCtx->params; params.fParams.contentSizeFlag = (pledgedSrcSize > 0); ZSTD_resetCCtx_advanced(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset); } /* copy tables */ - { size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog); + { size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog); size_t const hSize = ((size_t)1) << srcCCtx->params.cParams.hashLog; size_t const h3Size = (size_t)1 << srcCCtx->hashLog3; size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); @@ -476,12 +476,12 @@ static size_t ZSTD_compressLiterals (ZSTD_CCtx* zc, /* small ? don't even attempt compression (speed opt) */ # define LITERAL_NOENTROPY 63 - { size_t const minLitSize = zc->flagStaticHufTable == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY; + { size_t const minLitSize = zc->flagStaticHufTable == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY; if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); } if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall); /* not enough space for compression */ - { HUF_repeat repeat = zc->flagStaticHufTable; + { HUF_repeat repeat = zc->flagStaticHufTable; int const preferRepeat = zc->params.cParams.strategy < ZSTD_lazy ? srcSize <= 1024 : 0; if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1; cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, zc->tmpCounters, sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat) @@ -503,18 +503,18 @@ static size_t ZSTD_compressLiterals (ZSTD_CCtx* zc, switch(lhSize) { case 3: /* 2 - 2 - 10 - 10 */ - { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14); + { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14); MEM_writeLE24(ostart, lhc); break; } case 4: /* 2 - 2 - 14 - 14 */ - { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18); + { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18); MEM_writeLE32(ostart, lhc); break; } default: /* should not be necessary, lhSize is only {3,4,5} */ case 5: /* 2 - 2 - 18 - 18 */ - { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22); + { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22); MEM_writeLE32(ostart, lhc); ostart[4] = (BYTE)(cLitSize >> 10); break; @@ -589,7 +589,7 @@ MEM_STATIC size_t ZSTD_compressSequences (ZSTD_CCtx* zc, BYTE scratchBuffer[1<litStart; + { const BYTE* const literals = seqStorePtr->litStart; size_t const litSize = seqStorePtr->lit - literals; size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize); if (ZSTD_isError(cSize)) return cSize; @@ -613,7 +613,7 @@ MEM_STATIC size_t ZSTD_compressSequences (ZSTD_CCtx* zc, ZSTD_seqToCodes(seqStorePtr); /* CTable for Literal Lengths */ - { U32 max = MaxLL; + { U32 max = MaxLL; size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, zc->tmpCounters); if ((mostFrequent == nbSeq) && (nbSeq > 2)) { *op++ = llCodeTable[0]; @@ -637,7 +637,7 @@ MEM_STATIC size_t ZSTD_compressSequences (ZSTD_CCtx* zc, } } /* CTable for Offsets */ - { U32 max = MaxOff; + { U32 max = MaxOff; size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, zc->tmpCounters); if ((mostFrequent == nbSeq) && (nbSeq > 2)) { *op++ = ofCodeTable[0]; @@ -661,7 +661,7 @@ MEM_STATIC size_t ZSTD_compressSequences (ZSTD_CCtx* zc, } } /* CTable for MatchLengths */ - { U32 max = MaxML; + { U32 max = MaxML; size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, zc->tmpCounters); if ((mostFrequent == nbSeq) && (nbSeq > 2)) { *op++ = *mlCodeTable; @@ -688,7 +688,7 @@ MEM_STATIC size_t ZSTD_compressSequences (ZSTD_CCtx* zc, zc->flagStaticTables = 0; /* Encoding Sequences */ - { BIT_CStream_t blockStream; + { BIT_CStream_t blockStream; FSE_CState_t stateMatchLength; FSE_CState_t stateOffsetBits; FSE_CState_t stateLitLength; @@ -717,7 +717,7 @@ MEM_STATIC size_t ZSTD_compressSequences (ZSTD_CCtx* zc, } BIT_flushBits(&blockStream); - { size_t n; + { size_t n; for (n=nbSeq-2 ; n= maxCSize) { zc->flagStaticHufTable = HUF_repeat_none; @@ -989,7 +989,7 @@ void ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx, /* init */ ip += (ip==lowest); - { U32 const maxRep = (U32)(ip-lowest); + { U32 const maxRep = (U32)(ip-lowest); if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; } @@ -1049,7 +1049,7 @@ void ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx, cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved; /* Last Literals */ - { size_t const lastLLSize = iend - anchor; + { size_t const lastLLSize = iend - anchor; memcpy(seqStorePtr->lit, anchor, lastLLSize); seqStorePtr->lit += lastLLSize; } @@ -1121,7 +1121,7 @@ static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx, ip += ((ip-anchor) >> g_searchStrength) + 1; continue; } - { const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend; + { const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend; const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr; U32 offset; mLength = ZSTD_count_2segments(ip+EQUAL_READ32, match+EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32; @@ -1163,7 +1163,7 @@ static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx, ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2; /* Last Literals */ - { size_t const lastLLSize = iend - anchor; + { size_t const lastLLSize = iend - anchor; memcpy(seqStorePtr->lit, anchor, lastLLSize); seqStorePtr->lit += lastLLSize; } @@ -1234,7 +1234,7 @@ void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx, /* init */ ip += (ip==lowest); - { U32 const maxRep = (U32)(ip-lowest); + { U32 const maxRep = (U32)(ip-lowest); if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; } @@ -1318,7 +1318,7 @@ void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx, cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved; /* Last Literals */ - { size_t const lastLLSize = iend - anchor; + { size_t const lastLLSize = iend - anchor; memcpy(seqStorePtr->lit, anchor, lastLLSize); seqStorePtr->lit += lastLLSize; } @@ -1467,7 +1467,7 @@ static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx, ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2; /* Last Literals */ - { size_t const lastLLSize = iend - anchor; + { size_t const lastLLSize = iend - anchor; memcpy(seqStorePtr->lit, anchor, lastLLSize); seqStorePtr->lit += lastLLSize; } @@ -1901,7 +1901,7 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx, /* init */ ip += (ip==base); ctx->nextToUpdate3 = ctx->nextToUpdate; - { U32 const maxRep = (U32)(ip-base); + { U32 const maxRep = (U32)(ip-base); if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0; if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0; } @@ -1920,7 +1920,7 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx, } /* first search (depth 0) */ - { size_t offsetFound = 99999999; + { size_t offsetFound = 99999999; size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls); if (ml2 > matchLength) matchLength = ml2, start = ip, offset=offsetFound; @@ -1942,7 +1942,7 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx, if ((mlRep >= EQUAL_READ32) && (gain2 > gain1)) matchLength = mlRep, offset = 0, start = ip; } - { size_t offset2=99999999; + { size_t offset2=99999999; size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); @@ -1961,7 +1961,7 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx, if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) matchLength = ml2, offset = 0, start = ip; } - { size_t offset2=99999999; + { size_t offset2=99999999; size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); @@ -1981,7 +1981,7 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx, /* store sequence */ _storeSequence: - { size_t const litLength = start - anchor; + { size_t const litLength = start - anchor; ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH); anchor = ip = start + matchLength; } @@ -2004,7 +2004,7 @@ _storeSequence: ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset; /* Last Literals */ - { size_t const lastLLSize = iend - anchor; + { size_t const lastLLSize = iend - anchor; memcpy(seqStorePtr->lit, anchor, lastLLSize); seqStorePtr->lit += lastLLSize; } @@ -2073,7 +2073,7 @@ void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx, U32 curr = (U32)(ip-base); /* check repCode */ - { const U32 repIndex = (U32)(curr+1 - offset_1); + { const U32 repIndex = (U32)(curr+1 - offset_1); const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; const BYTE* const repMatch = repBase + repIndex; if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ @@ -2085,7 +2085,7 @@ void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx, } } /* first search (depth 0) */ - { size_t offsetFound = 99999999; + { size_t offsetFound = 99999999; size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls); if (ml2 > matchLength) matchLength = ml2, start = ip, offset=offsetFound; @@ -2118,7 +2118,7 @@ void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx, } } /* search match, depth 1 */ - { size_t offset2=99999999; + { size_t offset2=99999999; size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); @@ -2148,7 +2148,7 @@ void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx, } } /* search match, depth 2 */ - { size_t offset2=99999999; + { size_t offset2=99999999; size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); @@ -2170,7 +2170,7 @@ void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx, /* store sequence */ _storeSequence: - { size_t const litLength = start - anchor; + { size_t const litLength = start - anchor; ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH); anchor = ip = start + matchLength; } @@ -2198,7 +2198,7 @@ _storeSequence: ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2; /* Last Literals */ - { size_t const lastLLSize = iend - anchor; + { size_t const lastLLSize = iend - anchor; memcpy(seqStorePtr->lit, anchor, lastLLSize); seqStorePtr->lit += lastLLSize; } @@ -2572,12 +2572,12 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t cctx->dictID = cctx->params.fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr); dictPtr += 4; - { size_t const hufHeaderSize = HUF_readCTable(cctx->hufTable, 255, dictPtr, dictEnd-dictPtr); + { size_t const hufHeaderSize = HUF_readCTable(cctx->hufTable, 255, dictPtr, dictEnd-dictPtr); if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted); dictPtr += hufHeaderSize; } - { unsigned offcodeLog; + { unsigned offcodeLog; size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted); if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted); @@ -2586,7 +2586,7 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictPtr += offcodeHeaderSize; } - { short matchlengthNCount[MaxML+1]; + { short matchlengthNCount[MaxML+1]; unsigned matchlengthMaxValue = MaxML, matchlengthLog; size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted); @@ -2597,7 +2597,7 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictPtr += matchlengthHeaderSize; } - { short litlengthNCount[MaxLL+1]; + { short litlengthNCount[MaxLL+1]; unsigned litlengthMaxValue = MaxLL, litlengthLog; size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted); @@ -2614,7 +2614,7 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t cctx->rep[2] = MEM_readLE32(dictPtr+8); dictPtr += 12; - { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); + { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); U32 offcodeMax = MaxOff; if (dictContentSize <= ((U32)-1) - 128 KB) { U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */ @@ -2623,7 +2623,7 @@ static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t /* All offset values <= dictContentSize + 128 KB must be representable */ CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff))); /* All repCodes must be <= dictContentSize and != 0*/ - { U32 u; + { U32 u; for (u=0; u<3; u++) { if (cctx->rep[u] == 0) return ERROR(dictionary_corrupted); if (cctx->rep[u] > dictContentSize) return ERROR(dictionary_corrupted); @@ -2781,7 +2781,7 @@ static ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dict { if (!customMem.customAlloc || !customMem.customFree) return NULL; - { ZSTD_CDict* const cdict = (ZSTD_CDict*) ZSTD_malloc(sizeof(ZSTD_CDict), customMem); + { ZSTD_CDict* const cdict = (ZSTD_CDict*) ZSTD_malloc(sizeof(ZSTD_CDict), customMem); ZSTD_CCtx* const cctx = ZSTD_createCCtx_advanced(customMem); if (!cdict || !cctx) { @@ -2801,7 +2801,7 @@ static ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dict cdict->dictContent = internalBuffer; } - { size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0); + { size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0); if (ZSTD_isError(errorCode)) { ZSTD_free(cdict->dictBuffer, customMem); ZSTD_free(cdict, customMem); @@ -2824,7 +2824,7 @@ ZSTD_CDict* ZSTD_initCDict(const void* dict, size_t dictSize, ZSTD_parameters pa size_t ZSTD_freeCDict(ZSTD_CDict* cdict) { if (cdict==NULL) return 0; /* support free on NULL */ - { ZSTD_customMem const cMem = cdict->refContext->customMem; + { ZSTD_customMem const cMem = cdict->refContext->customMem; ZSTD_freeCCtx(cdict->refContext); ZSTD_free(cdict->dictBuffer, cMem); ZSTD_free(cdict, cMem); @@ -2926,7 +2926,7 @@ ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem) size_t ZSTD_freeCStream(ZSTD_CStream* zcs) { if (zcs==NULL) return 0; /* support free on NULL */ - { ZSTD_customMem const cMem = zcs->customMem; + { ZSTD_customMem const cMem = zcs->customMem; ZSTD_freeCCtx(zcs->cctx); zcs->cctx = NULL; ZSTD_freeCDict(zcs->cdictLocal); @@ -2977,7 +2977,7 @@ static size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, ZSTD_parameters params, unsigned long long pledgedSrcSize) { /* allocate buffers */ - { size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog; + { size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog; if (zcs->inBuffSize < neededInBuffSize) { zcs->inBuffSize = neededInBuffSize; ZSTD_free(zcs->inBuff, zcs->customMem); @@ -3061,7 +3061,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, case zcss_load: /* complete inBuffer */ - { size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; + { size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; size_t const loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend-ip); zcs->inBuffPos += loaded; ip += loaded; @@ -3069,7 +3069,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, someMoreWork = 0; break; /* not enough input to get a full block : stop there, wait for more */ } } /* compress curr block (note : this stage cannot be stopped in the middle) */ - { void* cDst; + { void* cDst; size_t cSize; size_t const iSize = zcs->inBuffPos - zcs->inToCompress; size_t oSize = oend-op; @@ -3094,7 +3094,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, } case zcss_flush: - { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; + { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; size_t const flushed = ZSTD_limitCopy(op, oend-op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush); op += flushed; zcs->outBuffFlushedSize += flushed; @@ -3117,7 +3117,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, *dstCapacityPtr = op - ostart; zcs->inputProcessed += *srcSizePtr; if (zcs->frameEnded) return 0; - { size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos; + { size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos; if (hintInSize==0) hintInSize = zcs->blockSize; return hintInSize; } @@ -3181,7 +3181,7 @@ size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) } /* flush epilogue */ - { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; + { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; size_t const flushed = ZSTD_limitCopy(op, oend-op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush); op += flushed; zcs->outBuffFlushedSize += flushed; diff --git a/contrib/linux-kernel/lib/zstd/decompress.c b/contrib/linux-kernel/lib/zstd/decompress.c index 73cf06f5f..688d68539 100644 --- a/contrib/linux-kernel/lib/zstd/decompress.c +++ b/contrib/linux-kernel/lib/zstd/decompress.c @@ -188,7 +188,7 @@ static void ZSTD_refDDict(ZSTD_DCtx* dstDCtx, const ZSTD_DDict* ddict); unsigned ZSTD_isFrame(const void* buffer, size_t size) { if (size < 4) return 0; - { U32 const magic = MEM_readLE32(buffer); + { U32 const magic = MEM_readLE32(buffer); if (magic == ZSTD_MAGICNUMBER) return 1; if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) return 1; } @@ -202,7 +202,7 @@ unsigned ZSTD_isFrame(const void* buffer, size_t size) static size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize) { if (srcSize < ZSTD_frameHeaderSize_prefix) return ERROR(srcSize_wrong); - { BYTE const fhd = ((const BYTE*)src)[4]; + { BYTE const fhd = ((const BYTE*)src)[4]; U32 const dictID= fhd & 3; U32 const singleSegment = (fhd >> 5) & 1; U32 const fcsId = fhd >> 6; @@ -237,7 +237,7 @@ size_t ZSTD_getFrameParams(ZSTD_frameParams* fparamsPtr, const void* src, size_t { size_t const fhsize = ZSTD_frameHeaderSize(src, srcSize); if (srcSize < fhsize) return fhsize; } - { BYTE const fhdByte = ip[4]; + { BYTE const fhdByte = ip[4]; size_t pos = 5; U32 const dictIDSizeCode = fhdByte&3; U32 const checksumFlag = (fhdByte>>2)&1; @@ -383,7 +383,7 @@ typedef struct size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr) { if (srcSize < ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); - { U32 const cBlockHeader = MEM_readLE24(src); + { U32 const cBlockHeader = MEM_readLE24(src); U32 const cSize = cBlockHeader >> 3; bpPtr->lastBlock = cBlockHeader & 1; bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3); @@ -418,7 +418,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, { if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected); - { const BYTE* const istart = (const BYTE*) src; + { const BYTE* const istart = (const BYTE*) src; symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3); switch(litEncType) @@ -428,7 +428,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, /* fall-through */ case set_compressed: if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */ - { size_t lhSize, litSize, litCSize; + { size_t lhSize, litSize, litCSize; U32 singleStream=0; U32 const lhlCode = (istart[0] >> 2) & 3; U32 const lhc = MEM_readLE32(istart); @@ -475,7 +475,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, } case set_basic: - { size_t litSize, lhSize; + { size_t litSize, lhSize; U32 const lhlCode = ((istart[0]) >> 2) & 3; switch(lhlCode) { @@ -508,7 +508,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, } case set_rle: - { U32 const lhlCode = ((istart[0]) >> 2) & 3; + { U32 const lhlCode = ((istart[0]) >> 2) & 3; size_t litSize, lhSize; switch(lhlCode) { @@ -742,7 +742,7 @@ static size_t ZSTD_buildSeqTable(FSE_DTable* DTableSpace, const FSE_DTable** DTa return 0; default : /* impossible */ case set_compressed : - { U32 tableLog; + { U32 tableLog; S16 norm[MaxSeq+1]; size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); if (FSE_isError(headerSize)) return ERROR(corruption_detected); @@ -764,7 +764,7 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong); /* SeqHead */ - { int nbSeq = *ip++; + { int nbSeq = *ip++; if (!nbSeq) { *nbSeqPtr=0; return 1; } if (nbSeq > 0x7F) { if (nbSeq == 0xFF) { @@ -780,25 +780,25 @@ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, /* FSE table descriptors */ if (ip+4 > iend) return ERROR(srcSize_wrong); /* minimum possible size */ - { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6); + { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6); symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3); symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3); ip++; /* Build DTables */ - { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, + { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, LLtype, MaxLL, LLFSELog, ip, iend-ip, LL_defaultDTable, dctx->fseEntropy); if (ZSTD_isError(llhSize)) return ERROR(corruption_detected); ip += llhSize; } - { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, + { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, OFtype, MaxOff, OffFSELog, ip, iend-ip, OF_defaultDTable, dctx->fseEntropy); if (ZSTD_isError(ofhSize)) return ERROR(corruption_detected); ip += ofhSize; } - { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, + { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, MLtype, MaxML, MLFSELog, ip, iend-ip, ML_defaultDTable, dctx->fseEntropy); if (ZSTD_isError(mlhSize)) return ERROR(corruption_detected); @@ -865,7 +865,7 @@ size_t ZSTD_execSequenceLast7(BYTE* op, return sequenceLength; } /* span extDict & currPrefixSegment */ - { size_t const length1 = dictEnd - match; + { size_t const length1 = dictEnd - match; memmove(oLitEnd, match, length1); op = oLitEnd + length1; sequence.matchLength -= length1; @@ -909,7 +909,7 @@ static seq_t ZSTD_decodeSequence(seqState_t* seqState) 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD }; /* sequence */ - { size_t offset; + { size_t offset; if (!ofCode) offset = 0; else { @@ -988,7 +988,7 @@ size_t ZSTD_execSequence(BYTE* op, return sequenceLength; } /* span extDict & currPrefixSegment */ - { size_t const length1 = dictEnd - match; + { size_t const length1 = dictEnd - match; memmove(oLitEnd, match, length1); op = oLitEnd + length1; sequence.matchLength -= length1; @@ -1051,7 +1051,7 @@ static size_t ZSTD_decompressSequences( int nbSeq; /* Build Decoding Tables */ - { size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize); + { size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize); if (ZSTD_isError(seqHSize)) return seqHSize; ip += seqHSize; } @@ -1068,7 +1068,7 @@ static size_t ZSTD_decompressSequences( for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) { nbSeq--; - { seq_t const sequence = ZSTD_decodeSequence(&seqState); + { seq_t const sequence = ZSTD_decodeSequence(&seqState); size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd); if (ZSTD_isError(oneSeqSize)) return oneSeqSize; op += oneSeqSize; @@ -1081,7 +1081,7 @@ static size_t ZSTD_decompressSequences( } /* last literal segment */ - { size_t const lastLLSize = litEnd - litPtr; + { size_t const lastLLSize = litEnd - litPtr; if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall); memcpy(op, litPtr, lastLLSize); op += lastLLSize; @@ -1122,7 +1122,7 @@ FORCE_INLINE seq_t ZSTD_decodeSequenceLong_generic(seqState_t* seqState, int con 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD }; /* sequence */ - { size_t offset; + { size_t offset; if (!ofCode) offset = 0; else { @@ -1163,7 +1163,7 @@ FORCE_INLINE seq_t ZSTD_decodeSequenceLong_generic(seqState_t* seqState, int con if (MEM_32bits() || (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BIT_reloadDStream(&seqState->DStream); - { size_t const pos = seqState->pos + seq.litLength; + { size_t const pos = seqState->pos + seq.litLength; seq.match = seqState->base + pos - seq.offset; /* single memory segment */ if (seq.offset > pos) seq.match += seqState->gotoDict; /* separate memory segment */ seqState->pos = pos + seq.matchLength; @@ -1223,7 +1223,7 @@ size_t ZSTD_execSequenceLong(BYTE* op, return sequenceLength; } /* span extDict & currPrefixSegment */ - { size_t const length1 = dictEnd - match; + { size_t const length1 = dictEnd - match; memmove(oLitEnd, match, length1); op = oLitEnd + length1; sequence.matchLength -= length1; @@ -1287,7 +1287,7 @@ static size_t ZSTD_decompressSequencesLong( int nbSeq; /* Build Decoding Tables */ - { size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize); + { size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize); if (ZSTD_isError(seqHSize)) return seqHSize; ip += seqHSize; } @@ -1341,7 +1341,7 @@ static size_t ZSTD_decompressSequencesLong( } /* last literal segment */ - { size_t const lastLLSize = litEnd - litPtr; + { size_t const lastLLSize = litEnd - litPtr; if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall); memcpy(op, litPtr, lastLLSize); op += lastLLSize; @@ -1360,7 +1360,7 @@ static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, if (srcSize >= ZSTD_BLOCKSIZE_ABSOLUTEMAX) return ERROR(srcSize_wrong); /* Decode literals section */ - { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize); + { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize); if (ZSTD_isError(litCSize)) return litCSize; ip += litCSize; srcSize -= litCSize; @@ -1434,7 +1434,7 @@ size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize) if (ZSTD_isError(headerSize)) return headerSize; /* Frame Header */ - { size_t const ret = ZSTD_getFrameParams(&fParams, ip, remainingSize); + { size_t const ret = ZSTD_getFrameParams(&fParams, ip, remainingSize); if (ZSTD_isError(ret)) return ret; if (ret > 0) return ERROR(srcSize_wrong); } @@ -1482,7 +1482,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, if (remainingSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); /* Frame Header */ - { size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix); + { size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix); if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize; if (remainingSize < frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); CHECK_F(ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize)); @@ -1594,7 +1594,7 @@ static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, } ZSTD_checkContinuity(dctx, dst); - { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, + { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, &src, &srcSize); if (ZSTD_isError(res)) return res; /* don't need to bounds check this, ZSTD_decompressFrame will have @@ -1687,7 +1687,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c return 0; case ZSTDds_decodeBlockHeader: - { blockProperties_t bp; + { blockProperties_t bp; size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); if (ZSTD_isError(cBlockSize)) return cBlockSize; dctx->expected = cBlockSize; @@ -1714,7 +1714,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c } case ZSTDds_decompressLastBlock: case ZSTDds_decompressBlock: - { size_t rSize; + { size_t rSize; switch(dctx->bType) { case bt_compressed: @@ -1749,7 +1749,7 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c return rSize; } case ZSTDds_checkChecksum: - { U32 const h32 = (U32)xxh64_digest(&dctx->xxhState); + { U32 const h32 = (U32)xxh64_digest(&dctx->xxhState); U32 const check32 = MEM_readLE32(src); /* srcSize == 4, guaranteed by dctx->expected */ if (check32 != h32) return ERROR(checksum_wrong); dctx->expected = 0; @@ -1757,13 +1757,13 @@ size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, c return 0; } case ZSTDds_decodeSkippableHeader: - { memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected); + { memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected); dctx->expected = MEM_readLE32(dctx->headerBuffer + 4); dctx->stage = ZSTDds_skipFrame; return 0; } case ZSTDds_skipFrame: - { dctx->expected = 0; + { dctx->expected = 0; dctx->stage = ZSTDds_getFrameHeaderSize; return 0; } @@ -1794,12 +1794,12 @@ static size_t ZSTD_loadEntropy(ZSTD_entropyTables_t* entropy, const void* const dictPtr += 8; /* skip header = magic + dictID */ - { size_t const hSize = HUF_readDTableX4(entropy->hufTable, dictPtr, dictEnd-dictPtr); + { size_t const hSize = HUF_readDTableX4(entropy->hufTable, dictPtr, dictEnd-dictPtr); if (HUF_isError(hSize)) return ERROR(dictionary_corrupted); dictPtr += hSize; } - { short offcodeNCount[MaxOff+1]; + { short offcodeNCount[MaxOff+1]; U32 offcodeMaxValue = MaxOff, offcodeLog; size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted); @@ -1808,7 +1808,7 @@ static size_t ZSTD_loadEntropy(ZSTD_entropyTables_t* entropy, const void* const dictPtr += offcodeHeaderSize; } - { short matchlengthNCount[MaxML+1]; + { short matchlengthNCount[MaxML+1]; unsigned matchlengthMaxValue = MaxML, matchlengthLog; size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted); @@ -1817,7 +1817,7 @@ static size_t ZSTD_loadEntropy(ZSTD_entropyTables_t* entropy, const void* const dictPtr += matchlengthHeaderSize; } - { short litlengthNCount[MaxLL+1]; + { short litlengthNCount[MaxLL+1]; unsigned litlengthMaxValue = MaxLL, litlengthLog; size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted); @@ -1827,7 +1827,7 @@ static size_t ZSTD_loadEntropy(ZSTD_entropyTables_t* entropy, const void* const } if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted); - { int i; + { int i; size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12)); for (i=0; i<3; i++) { U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4; @@ -1841,14 +1841,14 @@ static size_t ZSTD_loadEntropy(ZSTD_entropyTables_t* entropy, const void* const static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) { if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize); - { U32 const magic = MEM_readLE32(dict); + { U32 const magic = MEM_readLE32(dict); if (magic != ZSTD_DICT_MAGIC) { return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */ } } dctx->dictID = MEM_readLE32((const char*)dict + 4); /* load entropy tables */ - { size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize); + { size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize); if (ZSTD_isError(eSize)) return ERROR(dictionary_corrupted); dict = (const char*)dict + eSize; dictSize -= eSize; @@ -1925,7 +1925,7 @@ static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict* ddict) ddict->dictID = 0; ddict->entropyPresent = 0; if (ddict->dictSize < 8) return 0; - { U32 const magic = MEM_readLE32(ddict->dictContent); + { U32 const magic = MEM_readLE32(ddict->dictContent); if (magic != ZSTD_DICT_MAGIC) return 0; /* pure content mode */ } ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + 4); @@ -1941,7 +1941,7 @@ static ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, { if (!customMem.customAlloc || !customMem.customFree) return NULL; - { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem); + { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem); if (!ddict) return NULL; ddict->cMem = customMem; @@ -1958,7 +1958,7 @@ static ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, ddict->dictSize = dictSize; ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ /* parse dictionary content */ - { size_t const errorCode = ZSTD_loadEntropy_inDDict(ddict); + { size_t const errorCode = ZSTD_loadEntropy_inDDict(ddict); if (ZSTD_isError(errorCode)) { ZSTD_freeDDict(ddict); return NULL; @@ -1982,7 +1982,7 @@ ZSTD_DDict* ZSTD_initDDict(const void* dict, size_t dictSize, void* workspace, s size_t ZSTD_freeDDict(ZSTD_DDict* ddict) { if (ddict==NULL) return 0; /* support free on NULL */ - { ZSTD_customMem const cMem = ddict->cMem; + { ZSTD_customMem const cMem = ddict->cMem; ZSTD_free(ddict->dictBuffer, cMem); ZSTD_free(ddict, cMem); return 0; @@ -2129,7 +2129,7 @@ ZSTD_DStream* ZSTD_initDStream_usingDDict(size_t maxWindowSize, const ZSTD_DDict size_t ZSTD_freeDStream(ZSTD_DStream* zds) { if (zds==NULL) return 0; /* support free on null */ - { ZSTD_customMem const cMem = zds->customMem; + { ZSTD_customMem const cMem = zds->customMem; ZSTD_freeDCtx(zds->dctx); zds->dctx = NULL; ZSTD_freeDDict(zds->ddictLocal); @@ -2186,7 +2186,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB /* fall-through */ case zdss_loadHeader : - { size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize); + { size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize); if (ZSTD_isError(hSize)) return hSize; if (hSize != 0) { /* need more input */ @@ -2218,9 +2218,9 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB /* Consume header */ ZSTD_refDDict(zds->dctx, zds->ddict); - { size_t const h1Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); /* == ZSTD_frameHeaderSize_prefix */ + { size_t const h1Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); /* == ZSTD_frameHeaderSize_prefix */ CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer, h1Size)); - { size_t const h2Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); + { size_t const h2Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer+h1Size, h2Size)); } } @@ -2228,7 +2228,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB if (zds->fParams.windowSize > zds->maxWindowSize) return ERROR(frameParameter_windowTooLarge); /* Adapt buffer sizes to frame header instructions */ - { size_t const blockSize = MIN(zds->fParams.windowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX); + { size_t const blockSize = MIN(zds->fParams.windowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX); size_t const neededOutSize = zds->fParams.windowSize + blockSize + WILDCOPY_OVERLENGTH * 2; zds->blockSize = blockSize; if (zds->inBuffSize < blockSize) { @@ -2247,7 +2247,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB /* pass-through */ case zdss_read: - { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); + { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); if (neededInSize==0) { /* end of frame */ zds->stage = zdss_init; someMoreWork = 0; @@ -2271,7 +2271,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB } case zdss_load: - { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); + { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); size_t const toLoad = neededInSize - zds->inPos; /* should always be <= remaining space within inBuff */ size_t loadedSize; if (toLoad > zds->inBuffSize - zds->inPos) return ERROR(corruption_detected); /* should never happen */ @@ -2294,7 +2294,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB } } case zdss_flush: - { size_t const toFlushSize = zds->outEnd - zds->outStart; + { size_t const toFlushSize = zds->outEnd - zds->outStart; size_t const flushedSize = ZSTD_limitCopy(op, oend-op, zds->outBuff + zds->outStart, toFlushSize); op += flushedSize; zds->outStart += flushedSize; @@ -2314,7 +2314,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB /* result */ input->pos += (size_t)(ip-istart); output->pos += (size_t)(op-ostart); - { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds->dctx); + { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds->dctx); if (!nextSrcSizeHint) { /* frame fully decoded */ if (zds->outEnd == zds->outStart) { /* output fully flushed */ if (zds->hostageByte) { diff --git a/contrib/linux-kernel/lib/zstd/entropy_common.c b/contrib/linux-kernel/lib/zstd/entropy_common.c index 68d880827..36ad26653 100644 --- a/contrib/linux-kernel/lib/zstd/entropy_common.c +++ b/contrib/linux-kernel/lib/zstd/entropy_common.c @@ -107,7 +107,7 @@ size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* t } else { bitStream >>= 2; } } - { int const max = (2*threshold-1) - remaining; + { int const max = (2*threshold-1) - remaining; int count; if ((bitStream & (threshold-1)) < (U32)max) { @@ -172,7 +172,7 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, if (iSize+1 > srcSize) return ERROR(srcSize_wrong); if (oSize >= hwSize) return ERROR(corruption_detected); ip += 1; - { U32 n; + { U32 n; for (n=0; n> 4; huffWeight[n+1] = ip[n/2] & 15; @@ -187,7 +187,7 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, /* collect weight stats */ memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32)); weightTotal = 0; - { U32 n; for (n=0; n= HUF_TABLELOG_MAX) return ERROR(corruption_detected); rankStats[huffWeight[n]]++; weightTotal += (1 << huffWeight[n]) >> 1; @@ -195,11 +195,11 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, if (weightTotal == 0) return ERROR(corruption_detected); /* get last non-null symbol weight (implied, total must be 2^n) */ - { U32 const tableLog = BIT_highbit32(weightTotal) + 1; + { U32 const tableLog = BIT_highbit32(weightTotal) + 1; if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected); *tableLogPtr = tableLog; /* determine last weight */ - { U32 const total = 1 << tableLog; + { U32 const total = 1 << tableLog; U32 const rest = total - weightTotal; U32 const verif = 1 << BIT_highbit32(rest); U32 const lastWeight = BIT_highbit32(rest) + 1; diff --git a/contrib/linux-kernel/lib/zstd/fse.h b/contrib/linux-kernel/lib/zstd/fse.h index 14fa439ee..6a7895722 100644 --- a/contrib/linux-kernel/lib/zstd/fse.h +++ b/contrib/linux-kernel/lib/zstd/fse.h @@ -459,7 +459,7 @@ MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct) MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol) { FSE_initCState(statePtr, ct); - { const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; + { const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; const U16* stateTable = (const U16*)(statePtr->stateTable); U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16); statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits; diff --git a/contrib/linux-kernel/lib/zstd/fse_compress.c b/contrib/linux-kernel/lib/zstd/fse_compress.c index b6a6d4693..672c72d94 100644 --- a/contrib/linux-kernel/lib/zstd/fse_compress.c +++ b/contrib/linux-kernel/lib/zstd/fse_compress.c @@ -106,7 +106,7 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsi * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ /* symbol start positions */ - { U32 u; + { U32 u; cumul[0] = 0; for (u=1; u<=maxSymbolValue+1; u++) { if (normalizedCounter[u-1]==-1) { /* Low proba symbol */ @@ -119,7 +119,7 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsi } /* Spread symbols */ - { U32 position = 0; + { U32 position = 0; U32 symbol; for (symbol=0; symbol<=maxSymbolValue; symbol++) { int nbOccurences; @@ -133,13 +133,13 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsi } /* Build table */ - { U32 u; for (u=0; u>= 16; bitCount -= 16; } } - { int count = normalizedCounter[charnum++]; + { int count = normalizedCounter[charnum++]; int const max = (2*threshold-1)-remaining; remaining -= count < 0 ? -count : count; count++; /* +1 for extra accuracy */ @@ -337,7 +337,7 @@ static size_t FSE_count_parallel_wksp( if (!maxSymbolValue) maxSymbolValue = 255; /* 0 == default */ /* by stripes of 16 bytes */ - { U32 cached = MEM_read32(ip); ip += 4; + { U32 cached = MEM_read32(ip); ip += 4; while (ip < iend-15) { U32 c = cached; cached = MEM_read32(ip); ip += 4; Counting1[(BYTE) c ]++; @@ -372,7 +372,7 @@ static size_t FSE_count_parallel_wksp( if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall); } } - { U32 s; for (s=0; s<=maxSymbolValue; s++) { + { U32 s; for (s=0; s<=maxSymbolValue; s++) { count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s]; if (count[s] > max) max = count[s]; } } @@ -517,7 +517,7 @@ static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, return 0; } - { U64 const vStepLog = 62 - tableLog; + { U64 const vStepLog = 62 - tableLog; U64 const mid = (1ULL << (vStepLog-1)) - 1; U64 const rStep = ((((U64)1< FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */ if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */ - { U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 }; + { U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 }; U64 const scale = 62 - tableLog; U64 const step = ((U64)1<<62) / total; /* <== here, one division ! */ U64 const vStep = 1ULL<<(scale-20); @@ -582,7 +582,7 @@ size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog, } #if 0 - { /* Print Table (debug) */ + { /* Print Table (debug) */ U32 s; U32 nTotal = 0; for (s=0; s<=maxSymbolValue; s++) @@ -623,7 +623,7 @@ size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits) tableU16[s] = (U16)(tableSize + s); /* Build Symbol Transformation Table */ - { const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits); + { const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits); for (s=0; s<=maxSymbolValue; s++) { symbolTT[s].deltaNbBits = deltaNbBits; symbolTT[s].deltaFindState = s-1; @@ -757,7 +757,7 @@ size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t src if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG; /* Scan input and build symbol stats */ - { CHECK_V_F(maxCount, FSE_count_wksp(count, &maxSymbolValue, src, srcSize, (unsigned*)scratchBuffer) ); + { CHECK_V_F(maxCount, FSE_count_wksp(count, &maxSymbolValue, src, srcSize, (unsigned*)scratchBuffer) ); if (maxCount == srcSize) return 1; /* only a single symbol in src : rle */ if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */ if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */ @@ -767,13 +767,13 @@ size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t src CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) ); /* Write table description header */ - { CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); + { CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); op += nc_err; } /* Compress */ CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) ); - { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) ); + { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) ); if (cSize == 0) return 0; /* not enough space for compressed data */ op += cSize; } diff --git a/contrib/linux-kernel/lib/zstd/fse_decompress.c b/contrib/linux-kernel/lib/zstd/fse_decompress.c index 2a35f1703..6de541131 100644 --- a/contrib/linux-kernel/lib/zstd/fse_decompress.c +++ b/contrib/linux-kernel/lib/zstd/fse_decompress.c @@ -98,10 +98,10 @@ size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Init, lay down lowprob symbols */ - { FSE_DTableHeader DTableH; + { FSE_DTableHeader DTableH; DTableH.tableLog = (U16)tableLog; DTableH.fastMode = 1; - { S16 const largeLimit= (S16)(1 << (tableLog-1)); + { S16 const largeLimit= (S16)(1 << (tableLog-1)); U32 s; for (s=0; s not compressible */ } @@ -96,13 +96,13 @@ size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) ); /* Write table description header */ - { CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); + { CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); op += hSize; } /* Compress */ CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) ); - { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) ); + { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) ); if (cSize == 0) return 0; /* not enough space for compressed data */ op += cSize; } @@ -138,7 +138,7 @@ size_t HUF_writeCTable (void* dst, size_t maxDstSize, huffWeight[n] = bitsToWeight[CTable[n].nbBits]; /* attempt weights compression by FSE */ - { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, huffWeight, maxSymbolValue) ); + { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, huffWeight, maxSymbolValue) ); if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */ op[0] = (BYTE)hSize; return hSize+1; @@ -170,7 +170,7 @@ size_t HUF_readCTable (HUF_CElt* CTable, U32 maxSymbolValue, const void* src, si if (nbSymbols > maxSymbolValue+1) return ERROR(maxSymbolValue_tooSmall); /* Prepare base value per rank */ - { U32 n, nextRankStart = 0; + { U32 n, nextRankStart = 0; for (n=1; n<=tableLog; n++) { U32 curr = nextRankStart; nextRankStart += (rankVal[n] << (n-1)); @@ -178,18 +178,18 @@ size_t HUF_readCTable (HUF_CElt* CTable, U32 maxSymbolValue, const void* src, si } } /* fill nbBits */ - { U32 n; for (n=0; nn=tableLog+1 */ + { U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */ U16 valPerRank[HUF_TABLELOG_MAX+2] = {0}; { U32 n; for (n=0; n0; n--) { /* start at n=tablelog <-> w=1 */ valPerRank[n] = min; /* get starting value within each rank */ min += nbPerRank[n]; @@ -216,7 +216,7 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits) if (largestBits <= maxNbBits) return largestBits; /* early exit : no elt > maxNbBits */ /* there are several too large elements (at least >= 2) */ - { int totalCost = 0; + { int totalCost = 0; const U32 baseCost = 1 << (largestBits - maxNbBits); U32 n = lastNonNull; @@ -231,13 +231,13 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits) totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */ /* repay normalized cost */ - { U32 const noSymbol = 0xF0F0F0F0; + { U32 const noSymbol = 0xF0F0F0F0; U32 rankLast[HUF_TABLELOG_MAX+2]; int pos; /* Get pos of last (smallest) symbol per rank */ memset(rankLast, 0xF0, sizeof(rankLast)); - { U32 currNbBits = maxNbBits; + { U32 currNbBits = maxNbBits; for (pos=n ; pos >= 0; pos--) { if (huffNode[pos].nbBits >= currNbBits) continue; currNbBits = huffNode[pos].nbBits; /* < maxNbBits */ @@ -251,7 +251,7 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits) U32 lowPos = rankLast[nBitsToDecrease-1]; if (highPos == noSymbol) continue; if (lowPos == noSymbol) break; - { U32 const highTotal = huffNode[highPos].count; + { U32 const highTotal = huffNode[highPos].count; U32 const lowTotal = 2 * huffNode[lowPos].count; if (highTotal <= lowTotal) break; } } @@ -369,13 +369,13 @@ size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValu maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits); /* fill result into tree (val, nbBits) */ - { U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0}; + { U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0}; U16 valPerRank[HUF_TABLELOG_MAX+1] = {0}; if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */ for (n=0; n<=nonNullRank; n++) nbPerRank[huffNode[n].nbBits]++; /* determine stating value per rank */ - { U16 min = 0; + { U16 min = 0; for (n=maxNbBits; n>0; n--) { valPerRank[n] = min; /* get starting value within each rank */ min += nbPerRank[n]; @@ -480,28 +480,28 @@ size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, si if (srcSize < 12) return 0; /* no saving possible : too small input */ op += 6; /* jumpTable */ - { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); if (cSize==0) return 0; MEM_writeLE16(ostart, (U16)cSize); op += cSize; } ip += segmentSize; - { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); if (cSize==0) return 0; MEM_writeLE16(ostart+2, (U16)cSize); op += cSize; } ip += segmentSize; - { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); if (cSize==0) return 0; MEM_writeLE16(ostart+4, (U16)cSize); op += cSize; } ip += segmentSize; - { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, iend-ip, CTable) ); + { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, iend-ip, CTable) ); if (cSize==0) return 0; op += cSize; } @@ -567,7 +567,7 @@ static size_t HUF_compress_internal ( } /* Scan input and build symbol stats */ - { CHECK_V_F(largest, FSE_count_wksp (count, &maxSymbolValue, (const BYTE*)src, srcSize, (U32*)workSpace) ); + { CHECK_V_F(largest, FSE_count_wksp (count, &maxSymbolValue, (const BYTE*)src, srcSize, (U32*)workSpace) ); if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */ if (largest <= (srcSize >> 7)+1) return 0; /* Fast heuristic : not compressible enough */ } @@ -583,14 +583,14 @@ static size_t HUF_compress_internal ( /* Build Huffman Tree */ huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); - { CHECK_V_F(maxBits, HUF_buildCTable_wksp (CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize) ); + { CHECK_V_F(maxBits, HUF_buildCTable_wksp (CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize) ); huffLog = (U32)maxBits; /* Zero the unused symbols so we can check it for validity */ memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt)); } /* Write table description header */ - { CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, CTable, maxSymbolValue, huffLog) ); + { CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, CTable, maxSymbolValue, huffLog) ); /* Check if using the previous table will be beneficial */ if (repeat && *repeat != HUF_repeat_none) { size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue); diff --git a/contrib/linux-kernel/lib/zstd/huf_decompress.c b/contrib/linux-kernel/lib/zstd/huf_decompress.c index a47cb72ff..2d9b33b12 100644 --- a/contrib/linux-kernel/lib/zstd/huf_decompress.c +++ b/contrib/linux-kernel/lib/zstd/huf_decompress.c @@ -91,7 +91,7 @@ size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize) if (HUF_isError(iSize)) return iSize; /* Table header */ - { DTableDesc dtd = HUF_getDTableDesc(DTable); + { DTableDesc dtd = HUF_getDTableDesc(DTable); if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */ dtd.tableType = 0; dtd.tableLog = (BYTE)tableLog; @@ -99,7 +99,7 @@ size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize) } /* Calculate starting value for each rank */ - { U32 n, nextRankStart = 0; + { U32 n, nextRankStart = 0; for (n=1; n> 1; @@ -220,7 +220,7 @@ static size_t HUF_decompress4X2_usingDTable_internal( /* Check */ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ - { const BYTE* const istart = (const BYTE*) cSrc; + { const BYTE* const istart = (const BYTE*) cSrc; BYTE* const ostart = (BYTE*) dst; BYTE* const oend = ostart + dstSize; const void* const dtPtr = DTable + 1; @@ -359,7 +359,7 @@ static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 co } /* fill DTable */ - { U32 s; for (s=0; s= dstSize) || (cSrcSize <= 1)) return ERROR(corruption_detected); /* invalid */ - { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); + { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); return algoNb ? HUF_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; } @@ -828,7 +828,7 @@ size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ - { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); + { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); return algoNb ? HUF_decompress1X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : HUF_decompress1X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; } diff --git a/contrib/linux-kernel/lib/zstd/zstd_opt.h b/contrib/linux-kernel/lib/zstd/zstd_opt.h index 321f8c410..9bd5303a6 100644 --- a/contrib/linux-kernel/lib/zstd/zstd_opt.h +++ b/contrib/linux-kernel/lib/zstd/zstd_opt.h @@ -127,7 +127,7 @@ FORCE_INLINE U32 ZSTD_getLiteralPrice(seqStore_t* ssPtr, U32 litLength, const BY } /* literal Length */ - { const BYTE LL_deltaCode = 19; + { const BYTE LL_deltaCode = 19; const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; price += LL_bits[llCode] + ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[llCode]+1); } @@ -149,7 +149,7 @@ FORCE_INLINE U32 ZSTD_getPrice(seqStore_t* seqStorePtr, U32 litLength, const BYT if (!ultra && offCode >= 20) price += (offCode-19)*2; /* match Length */ - { const BYTE ML_deltaCode = 36; + { const BYTE ML_deltaCode = 36; const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength]; price += ML_bits[mlCode] + seqStorePtr->log2matchLengthSum - ZSTD_highbit32(seqStorePtr->matchLengthFreq[mlCode]+1); } @@ -168,20 +168,20 @@ MEM_STATIC void ZSTD_updatePrice(seqStore_t* seqStorePtr, U32 litLength, const B seqStorePtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD; /* literal Length */ - { const BYTE LL_deltaCode = 19; + { const BYTE LL_deltaCode = 19; const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; seqStorePtr->litLengthFreq[llCode]++; seqStorePtr->litLengthSum++; } /* match offset */ - { BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1); + { BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1); seqStorePtr->offCodeSum++; seqStorePtr->offCodeFreq[offCode]++; } /* match Length */ - { const BYTE ML_deltaCode = 36; + { const BYTE ML_deltaCode = 36; const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength]; seqStorePtr->matchLengthFreq[mlCode]++; seqStorePtr->matchLengthSum++; @@ -192,7 +192,7 @@ MEM_STATIC void ZSTD_updatePrice(seqStore_t* seqStorePtr, U32 litLength, const B #define SET_PRICE(pos, mlen_, offset_, litlen_, price_) \ - { \ + { \ while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } \ opt[pos].mlen = mlen_; \ opt[pos].off = offset_; \ @@ -435,7 +435,7 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx, litlen = (U32)(ip - anchor); /* check repCode */ - { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor); + { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor); for (i=(ip == anchor); i 0) && (repCur < (S32)(ip-prefixStart)) @@ -520,7 +520,7 @@ void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx, } best_mlen = minMatch; - { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1); + { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1); for (i=(opt[cur].mlen != 1); i 0) && (repCur < (S32)(inr-prefixStart)) @@ -639,7 +639,7 @@ _storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */ { int i; for (i=0; irepToConfirm[i] = rep[i]; } /* Last Literals */ - { size_t const lastLLSize = iend - anchor; + { size_t const lastLLSize = iend - anchor; memcpy(seqStorePtr->lit, anchor, lastLLSize); seqStorePtr->lit += lastLLSize; } @@ -690,7 +690,7 @@ void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx, opt[0].litlen = (U32)(ip - anchor); /* check repCode */ - { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor); + { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor); for (i = (ip==anchor); irepToConfirm[i] = rep[i]; } /* Last Literals */ - { size_t lastLLSize = iend - anchor; + { size_t lastLLSize = iend - anchor; memcpy(seqStorePtr->lit, anchor, lastLLSize); seqStorePtr->lit += lastLLSize; }