1
0
mirror of https://github.com/facebook/zstd.git synced 2025-08-07 06:23:00 +03:00

Stop suppressing pointer-overflow UBSAN errors

* Remove all pointer-overflow suppressions from our UBSAN builds/tests.
* Add `ZSTD_ALLOW_POINTER_OVERFLOW_ATTR` macro to suppress
  pointer-overflow at a per-function level. This is a superior approach
  because it also applies to users who build zstd with UBSAN.
* Add `ZSTD_wrappedPtr{Diff,Add,Sub}()` that use these suppressions.
  The end goal is to only tag these functions with
  `ZSTD_ALLOW_POINTER_OVERFLOW`. But we can start by annoting functions
  that rely on pointer overflow, and gradually transition to using
  these.
* Add `ZSTD_maybeNullPtrAdd()` to simplify pointer addition when the
  pointer may be `NULL`.
* Fix all the fuzzer issues that came up. I'm sure there will be a lot
  more, but these are the ones that came up within a few minutes of
  running the fuzzers, and while running GitHub CI.
This commit is contained in:
Nick Terrell
2023-09-26 17:53:26 -07:00
committed by Nick Terrell
parent 3daed7017a
commit 43118da8a7
23 changed files with 252 additions and 103 deletions

View File

@@ -317,7 +317,7 @@ update_regressionResults:
# run UBsan with -fsanitize-recover=pointer-overflow # run UBsan with -fsanitize-recover=pointer-overflow
# this only works with recent compilers such as gcc 8+ # this only works with recent compilers such as gcc 8+
usan: clean usan: clean
$(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=pointer-overflow -fsanitize=undefined -Werror $(MOREFLAGS)" $(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=undefined -Werror $(MOREFLAGS)"
asan: clean asan: clean
$(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=address -Werror $(MOREFLAGS)" $(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=address -Werror $(MOREFLAGS)"
@@ -335,10 +335,10 @@ asan32: clean
$(MAKE) -C $(TESTDIR) test32 CC=clang MOREFLAGS="-g -fsanitize=address $(MOREFLAGS)" $(MAKE) -C $(TESTDIR) test32 CC=clang MOREFLAGS="-g -fsanitize=address $(MOREFLAGS)"
uasan: clean uasan: clean
$(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=pointer-overflow -fsanitize=address,undefined -Werror $(MOREFLAGS)" $(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=address,undefined -Werror $(MOREFLAGS)"
uasan-%: clean uasan-%: clean
LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=pointer-overflow -fsanitize=address,undefined -Werror $(MOREFLAGS)" $(MAKE) -C $(TESTDIR) $* LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=address,undefined -Werror $(MOREFLAGS)" $(MAKE) -C $(TESTDIR) $*
tsan-%: clean tsan-%: clean
LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=thread -Werror $(MOREFLAGS)" $(MAKE) -C $(TESTDIR) $* FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)" LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=thread -Werror $(MOREFLAGS)" $(MAKE) -C $(TESTDIR) $* FUZZER_FLAGS="--no-big-tests $(FUZZER_FLAGS)"

View File

@@ -11,6 +11,8 @@
#ifndef ZSTD_COMPILER_H #ifndef ZSTD_COMPILER_H
#define ZSTD_COMPILER_H #define ZSTD_COMPILER_H
#include <stddef.h>
#include "portability_macros.h" #include "portability_macros.h"
/*-******************************************************* /*-*******************************************************
@@ -302,6 +304,74 @@
* Sanitizer * Sanitizer
*****************************************************************/ *****************************************************************/
/**
* Zstd relies on pointer overflow in its decompressor.
* We add this attribute to functions that rely on pointer overflow.
*/
#ifndef ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
# if __has_attribute(no_sanitize)
# if !defined(__clang__) && defined(__GNUC__) && __GNUC__ < 8
/* gcc < 8 only has signed-integer-overlow which triggers on pointer overflow */
# define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR __attribute__((no_sanitize("signed-integer-overflow")))
# else
/* older versions of clang [3.7, 5.0) will warn that pointer-overflow is ignored. */
# define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR __attribute__((no_sanitize("pointer-overflow")))
# endif
# else
# define ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
# endif
#endif
/**
* Helper function to perform a wrapped pointer difference without trigging
* UBSAN.
*
* @returns lhs - rhs with wrapping
*/
MEM_STATIC
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
ptrdiff_t ZSTD_wrappedPtrDiff(unsigned char const* lhs, unsigned char const* rhs)
{
return lhs - rhs;
}
/**
* Helper function to perform a wrapped pointer add without triggering UBSAN.
*
* @return ptr + add with wrapping
*/
MEM_STATIC
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
unsigned char const* ZSTD_wrappedPtrAdd(unsigned char const* ptr, ptrdiff_t add)
{
return ptr + add;
}
/**
* Helper function to perform a wrapped pointer subtraction without triggering
* UBSAN.
*
* @return ptr - sub with wrapping
*/
MEM_STATIC
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
unsigned char const* ZSTD_wrappedPtrSub(unsigned char const* ptr, ptrdiff_t sub)
{
return ptr - sub;
}
/**
* Helper function to add to a pointer that works around C's undefined behavior
* of adding 0 to NULL.
*
* @returns `ptr + add` except it defines `NULL + 0 == NULL`.
*/
MEM_STATIC
unsigned char* ZSTD_maybeNullPtrAdd(unsigned char* ptr, ptrdiff_t add)
{
return add > 0 ? ptr + add : ptr;
}
/* Issue #3240 reports an ASAN failure on an llvm-mingw build. Out of an /* Issue #3240 reports an ASAN failure on an llvm-mingw build. Out of an
* abundance of caution, disable our custom poisoning on mingw. */ * abundance of caution, disable our custom poisoning on mingw. */
#ifdef __MINGW32__ #ifdef __MINGW32__

View File

@@ -1053,7 +1053,9 @@ MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
* The least significant cycleLog bits of the indices must remain the same, * The least significant cycleLog bits of the indices must remain the same,
* which may be 0. Every index up to maxDist in the past must be valid. * which may be 0. Every index up to maxDist in the past must be valid.
*/ */
MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog, MEM_STATIC
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
U32 maxDist, void const* src) U32 maxDist, void const* src)
{ {
/* preemptive overflow correction: /* preemptive overflow correction:
@@ -1246,7 +1248,9 @@ MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
* forget about the extDict. Handles overlap of the prefix and extDict. * forget about the extDict. Handles overlap of the prefix and extDict.
* Returns non-zero if the segment is contiguous. * Returns non-zero if the segment is contiguous.
*/ */
MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window, MEM_STATIC
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
U32 ZSTD_window_update(ZSTD_window_t* window,
void const* src, size_t srcSize, void const* src, size_t srcSize,
int forceNonContiguous) int forceNonContiguous)
{ {

View File

@@ -13,7 +13,9 @@
#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR #ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
static void ZSTD_fillDoubleHashTableForCDict(ZSTD_matchState_t* ms, static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
void ZSTD_fillDoubleHashTableForCDict(ZSTD_matchState_t* ms,
void const* end, ZSTD_dictTableLoadMethod_e dtlm) void const* end, ZSTD_dictTableLoadMethod_e dtlm)
{ {
const ZSTD_compressionParameters* const cParams = &ms->cParams; const ZSTD_compressionParameters* const cParams = &ms->cParams;
@@ -49,7 +51,9 @@ static void ZSTD_fillDoubleHashTableForCDict(ZSTD_matchState_t* ms,
} } } }
} }
static void ZSTD_fillDoubleHashTableForCCtx(ZSTD_matchState_t* ms, static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
void ZSTD_fillDoubleHashTableForCCtx(ZSTD_matchState_t* ms,
void const* end, ZSTD_dictTableLoadMethod_e dtlm) void const* end, ZSTD_dictTableLoadMethod_e dtlm)
{ {
const ZSTD_compressionParameters* const cParams = &ms->cParams; const ZSTD_compressionParameters* const cParams = &ms->cParams;
@@ -97,6 +101,7 @@ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
FORCE_INLINE_TEMPLATE FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_doubleFast_noDict_generic( size_t ZSTD_compressBlock_doubleFast_noDict_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize, U32 const mls /* template */) void const* src, size_t srcSize, U32 const mls /* template */)
@@ -307,6 +312,7 @@ _match_stored:
FORCE_INLINE_TEMPLATE FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic( size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize, void const* src, size_t srcSize,
@@ -591,7 +597,9 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState(
} }
static size_t ZSTD_compressBlock_doubleFast_extDict_generic( static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_doubleFast_extDict_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize, void const* src, size_t srcSize,
U32 const mls /* template */) U32 const mls /* template */)

View File

@@ -11,7 +11,9 @@
#include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */ #include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
#include "zstd_fast.h" #include "zstd_fast.h"
static void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms, static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms,
const void* const end, const void* const end,
ZSTD_dictTableLoadMethod_e dtlm) ZSTD_dictTableLoadMethod_e dtlm)
{ {
@@ -46,7 +48,9 @@ static void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms,
} } } } } } } }
} }
static void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms, static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms,
const void* const end, const void* const end,
ZSTD_dictTableLoadMethod_e dtlm) ZSTD_dictTableLoadMethod_e dtlm)
{ {
@@ -139,8 +143,9 @@ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
* *
* This is also the work we do at the beginning to enter the loop initially. * This is also the work we do at the beginning to enter the loop initially.
*/ */
FORCE_INLINE_TEMPLATE size_t FORCE_INLINE_TEMPLATE
ZSTD_compressBlock_fast_noDict_generic( ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_fast_noDict_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize, void const* src, size_t srcSize,
U32 const mls, U32 const hasStep) U32 const mls, U32 const hasStep)
@@ -456,6 +461,7 @@ size_t ZSTD_compressBlock_fast(
} }
FORCE_INLINE_TEMPLATE FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_fast_dictMatchState_generic( size_t ZSTD_compressBlock_fast_dictMatchState_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize, U32 const mls, U32 const hasStep) void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
@@ -681,7 +687,9 @@ size_t ZSTD_compressBlock_fast_dictMatchState(
} }
static size_t ZSTD_compressBlock_fast_extDict_generic( static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_fast_extDict_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
void const* src, size_t srcSize, U32 const mls, U32 const hasStep) void const* src, size_t srcSize, U32 const mls, U32 const hasStep)
{ {

View File

@@ -24,8 +24,9 @@
* Binary Tree search * Binary Tree search
***************************************/ ***************************************/
static void static
ZSTD_updateDUBT(ZSTD_matchState_t* ms, ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
void ZSTD_updateDUBT(ZSTD_matchState_t* ms,
const BYTE* ip, const BYTE* iend, const BYTE* ip, const BYTE* iend,
U32 mls) U32 mls)
{ {
@@ -68,8 +69,9 @@ ZSTD_updateDUBT(ZSTD_matchState_t* ms,
* sort one already inserted but unsorted position * sort one already inserted but unsorted position
* assumption : curr >= btlow == (curr - btmask) * assumption : curr >= btlow == (curr - btmask)
* doesn't fail */ * doesn't fail */
static void static
ZSTD_insertDUBT1(const ZSTD_matchState_t* ms, ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
void ZSTD_insertDUBT1(const ZSTD_matchState_t* ms,
U32 curr, const BYTE* inputEnd, U32 curr, const BYTE* inputEnd,
U32 nbCompares, U32 btLow, U32 nbCompares, U32 btLow,
const ZSTD_dictMode_e dictMode) const ZSTD_dictMode_e dictMode)
@@ -157,8 +159,9 @@ ZSTD_insertDUBT1(const ZSTD_matchState_t* ms,
} }
static size_t static
ZSTD_DUBT_findBetterDictMatch ( ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_DUBT_findBetterDictMatch (
const ZSTD_matchState_t* ms, const ZSTD_matchState_t* ms,
const BYTE* const ip, const BYTE* const iend, const BYTE* const ip, const BYTE* const iend,
size_t* offsetPtr, size_t* offsetPtr,
@@ -235,8 +238,9 @@ ZSTD_DUBT_findBetterDictMatch (
} }
static size_t static
ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms, ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
const BYTE* const ip, const BYTE* const iend, const BYTE* const ip, const BYTE* const iend,
size_t* offBasePtr, size_t* offBasePtr,
U32 const mls, U32 const mls,
@@ -386,8 +390,9 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */ /** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
FORCE_INLINE_TEMPLATE size_t FORCE_INLINE_TEMPLATE
ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms, ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
const BYTE* const ip, const BYTE* const iLimit, const BYTE* const ip, const BYTE* const iLimit,
size_t* offBasePtr, size_t* offBasePtr,
const U32 mls /* template */, const U32 mls /* template */,
@@ -622,7 +627,9 @@ size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nb
/* Update chains up to ip (excluded) /* Update chains up to ip (excluded)
Assumption : always within prefix (i.e. not within extDict) */ Assumption : always within prefix (i.e. not within extDict) */
FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal( FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
U32 ZSTD_insertAndFindFirstIndex_internal(
ZSTD_matchState_t* ms, ZSTD_matchState_t* ms,
const ZSTD_compressionParameters* const cParams, const ZSTD_compressionParameters* const cParams,
const BYTE* ip, U32 const mls, U32 const lazySkipping) const BYTE* ip, U32 const mls, U32 const lazySkipping)
@@ -656,6 +663,7 @@ U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
/* inlining is important to hardwire a hot branch (template emulation) */ /* inlining is important to hardwire a hot branch (template emulation) */
FORCE_INLINE_TEMPLATE FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_HcFindBestMatch( size_t ZSTD_HcFindBestMatch(
ZSTD_matchState_t* ms, ZSTD_matchState_t* ms,
const BYTE* const ip, const BYTE* const iLimit, const BYTE* const ip, const BYTE* const iLimit,
@@ -824,7 +832,9 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, BYTE const* t
* Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries, * Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries,
* but not beyond iLimit. * but not beyond iLimit.
*/ */
FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base, FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base,
U32 const rowLog, U32 const mls, U32 const rowLog, U32 const mls,
U32 idx, const BYTE* const iLimit) U32 idx, const BYTE* const iLimit)
{ {
@@ -850,7 +860,9 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const B
* Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at * Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at
* base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable. * base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable.
*/ */
FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable, FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable,
BYTE const* tagTable, BYTE const* base, BYTE const* tagTable, BYTE const* base,
U32 idx, U32 const hashLog, U32 idx, U32 const hashLog,
U32 const rowLog, U32 const mls, U32 const rowLog, U32 const mls,
@@ -868,10 +880,12 @@ FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTab
/* ZSTD_row_update_internalImpl(): /* ZSTD_row_update_internalImpl():
* Updates the hash table with positions starting from updateStartIdx until updateEndIdx. * Updates the hash table with positions starting from updateStartIdx until updateEndIdx.
*/ */
FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms, FORCE_INLINE_TEMPLATE
U32 updateStartIdx, U32 const updateEndIdx, ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
U32 const mls, U32 const rowLog, void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,
U32 const rowMask, U32 const useCache) U32 updateStartIdx, U32 const updateEndIdx,
U32 const mls, U32 const rowLog,
U32 const rowMask, U32 const useCache)
{ {
U32* const hashTable = ms->hashTable; U32* const hashTable = ms->hashTable;
BYTE* const tagTable = ms->tagTable; BYTE* const tagTable = ms->tagTable;
@@ -897,9 +911,11 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,
* Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate. * Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate.
* Skips sections of long matches as is necessary. * Skips sections of long matches as is necessary.
*/ */
FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip, FORCE_INLINE_TEMPLATE
U32 const mls, U32 const rowLog, ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
U32 const rowMask, U32 const useCache) void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip,
U32 const mls, U32 const rowLog,
U32 const rowMask, U32 const useCache)
{ {
U32 idx = ms->nextToUpdate; U32 idx = ms->nextToUpdate;
const BYTE* const base = ms->window.base; const BYTE* const base = ms->window.base;
@@ -1121,6 +1137,7 @@ ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 headGr
* - Pick the longest match. * - Pick the longest match.
*/ */
FORCE_INLINE_TEMPLATE FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_RowFindBestMatch( size_t ZSTD_RowFindBestMatch(
ZSTD_matchState_t* ms, ZSTD_matchState_t* ms,
const BYTE* const ip, const BYTE* const iLimit, const BYTE* const ip, const BYTE* const iLimit,
@@ -1494,8 +1511,9 @@ FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax(
* Common parser - lazy strategy * Common parser - lazy strategy
*********************************/ *********************************/
FORCE_INLINE_TEMPLATE size_t FORCE_INLINE_TEMPLATE
ZSTD_compressBlock_lazy_generic( ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_lazy_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, ZSTD_matchState_t* ms, seqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM], U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize, const void* src, size_t srcSize,
@@ -1915,6 +1933,7 @@ size_t ZSTD_compressBlock_btlazy2_dictMatchState(
|| !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \ || !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR) \
|| !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) || !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR)
FORCE_INLINE_TEMPLATE FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_compressBlock_lazy_extDict_generic( size_t ZSTD_compressBlock_lazy_extDict_generic(
ZSTD_matchState_t* ms, seqStore_t* seqStore, ZSTD_matchState_t* ms, seqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM], U32 rep[ZSTD_REP_NUM],

View File

@@ -322,7 +322,9 @@ static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
} }
} }
static size_t ZSTD_ldm_generateSequences_internal( static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_ldm_generateSequences_internal(
ldmState_t* ldmState, rawSeqStore_t* rawSeqStore, ldmState_t* ldmState, rawSeqStore_t* rawSeqStore,
ldmParams_t const* params, void const* src, size_t srcSize) ldmParams_t const* params, void const* src, size_t srcSize)
{ {

View File

@@ -405,9 +405,11 @@ MEM_STATIC U32 ZSTD_readMINMATCH(const void* memPtr, U32 length)
/* Update hashTable3 up to ip (excluded) /* Update hashTable3 up to ip (excluded)
Assumption : always within prefix (i.e. not within extDict) */ Assumption : always within prefix (i.e. not within extDict) */
static U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms, static
U32* nextToUpdate3, ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
const BYTE* const ip) U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms,
U32* nextToUpdate3,
const BYTE* const ip)
{ {
U32* const hashTable3 = ms->hashTable3; U32* const hashTable3 = ms->hashTable3;
U32 const hashLog3 = ms->hashLog3; U32 const hashLog3 = ms->hashLog3;
@@ -434,7 +436,9 @@ static U32 ZSTD_insertAndFindFirstIndexHash3 (const ZSTD_matchState_t* ms,
* @param ip assumed <= iend-8 . * @param ip assumed <= iend-8 .
* @param target The target of ZSTD_updateTree_internal() - we are filling to this position * @param target The target of ZSTD_updateTree_internal() - we are filling to this position
* @return : nb of positions added */ * @return : nb of positions added */
static U32 ZSTD_insertBt1( static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
U32 ZSTD_insertBt1(
const ZSTD_matchState_t* ms, const ZSTD_matchState_t* ms,
const BYTE* const ip, const BYTE* const iend, const BYTE* const ip, const BYTE* const iend,
U32 const target, U32 const target,
@@ -553,6 +557,7 @@ static U32 ZSTD_insertBt1(
} }
FORCE_INLINE_TEMPLATE FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
void ZSTD_updateTree_internal( void ZSTD_updateTree_internal(
ZSTD_matchState_t* ms, ZSTD_matchState_t* ms,
const BYTE* const ip, const BYTE* const iend, const BYTE* const ip, const BYTE* const iend,
@@ -578,7 +583,9 @@ void ZSTD_updateTree(ZSTD_matchState_t* ms, const BYTE* ip, const BYTE* iend) {
ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict); ZSTD_updateTree_internal(ms, ip, iend, ms->cParams.minMatch, ZSTD_noDict);
} }
FORCE_INLINE_TEMPLATE U32 FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
U32
ZSTD_insertBtAndGetAllMatches ( ZSTD_insertBtAndGetAllMatches (
ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */ ZSTD_match_t* matches, /* store result (found matches) in this table (presumed large enough) */
ZSTD_matchState_t* ms, ZSTD_matchState_t* ms,
@@ -819,7 +826,9 @@ typedef U32 (*ZSTD_getAllMatchesFn)(
U32 const ll0, U32 const ll0,
U32 const lengthToBeat); U32 const lengthToBeat);
FORCE_INLINE_TEMPLATE U32 ZSTD_btGetAllMatches_internal( FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
U32 ZSTD_btGetAllMatches_internal(
ZSTD_match_t* matches, ZSTD_match_t* matches,
ZSTD_matchState_t* ms, ZSTD_matchState_t* ms,
U32* nextToUpdate3, U32* nextToUpdate3,
@@ -1060,7 +1069,9 @@ listStats(const U32* table, int lastEltID)
#endif #endif
FORCE_INLINE_TEMPLATE size_t FORCE_INLINE_TEMPLATE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t
ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms, ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
seqStore_t* seqStore, seqStore_t* seqStore,
U32 rep[ZSTD_REP_NUM], U32 rep[ZSTD_REP_NUM],
@@ -1388,11 +1399,12 @@ size_t ZSTD_compressBlock_btopt(
* only works on first block, with no dictionary and no ldm. * only works on first block, with no dictionary and no ldm.
* this function cannot error out, its narrow contract must be respected. * this function cannot error out, its narrow contract must be respected.
*/ */
static void static
ZSTD_initStats_ultra(ZSTD_matchState_t* ms, ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
seqStore_t* seqStore, void ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
U32 rep[ZSTD_REP_NUM], seqStore_t* seqStore,
const void* src, size_t srcSize) U32 rep[ZSTD_REP_NUM],
const void* src, size_t srcSize)
{ {
U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */ U32 tmpRep[ZSTD_REP_NUM]; /* updated rep codes will sink here */
ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep)); ZSTD_memcpy(tmpRep, rep, sizeof(tmpRep));

View File

@@ -188,7 +188,7 @@ static size_t HUF_DecompressFastArgs_init(HUF_DecompressFastArgs* args, void* ds
const BYTE* const ilimit = (const BYTE*)src + 6 + 8; const BYTE* const ilimit = (const BYTE*)src + 6 + 8;
BYTE* const oend = (BYTE*)dst + dstSize; BYTE* const oend = ZSTD_maybeNullPtrAdd((BYTE*)dst, dstSize);
/* The fast decoding loop assumes 64-bit little-endian. /* The fast decoding loop assumes 64-bit little-endian.
* This condition is false on x32. * This condition is false on x32.
@@ -546,7 +546,7 @@ HUF_decompress1X1_usingDTable_internal_body(
const HUF_DTable* DTable) const HUF_DTable* DTable)
{ {
BYTE* op = (BYTE*)dst; BYTE* op = (BYTE*)dst;
BYTE* const oend = op + dstSize; BYTE* const oend = ZSTD_maybeNullPtrAdd(op, dstSize);
const void* dtPtr = DTable + 1; const void* dtPtr = DTable + 1;
const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr; const HUF_DEltX1* const dt = (const HUF_DEltX1*)dtPtr;
BIT_DStream_t bitD; BIT_DStream_t bitD;
@@ -574,6 +574,7 @@ HUF_decompress4X1_usingDTable_internal_body(
{ {
/* Check */ /* Check */
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */
{ const BYTE* const istart = (const BYTE*) cSrc; { const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst; BYTE* const ostart = (BYTE*) dst;
@@ -609,7 +610,7 @@ HUF_decompress4X1_usingDTable_internal_body(
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */ if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */
if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */ assert(dstSize >= 6); /* validated above */
CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
@@ -798,7 +799,7 @@ HUF_decompress4X1_usingDTable_internal_fast(
{ {
void const* dt = DTable + 1; void const* dt = DTable + 1;
const BYTE* const iend = (const BYTE*)cSrc + 6; const BYTE* const iend = (const BYTE*)cSrc + 6;
BYTE* const oend = (BYTE*)dst + dstSize; BYTE* const oend = ZSTD_maybeNullPtrAdd((BYTE*)dst, dstSize);
HUF_DecompressFastArgs args; HUF_DecompressFastArgs args;
{ size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); { size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);
FORWARD_IF_ERROR(ret, "Failed to init fast loop args"); FORWARD_IF_ERROR(ret, "Failed to init fast loop args");
@@ -1307,7 +1308,7 @@ HUF_decompress1X2_usingDTable_internal_body(
/* decode */ /* decode */
{ BYTE* const ostart = (BYTE*) dst; { BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize; BYTE* const oend = ZSTD_maybeNullPtrAdd(ostart, dstSize);
const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */ const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */
const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
DTableDesc const dtd = HUF_getDTableDesc(DTable); DTableDesc const dtd = HUF_getDTableDesc(DTable);
@@ -1332,6 +1333,7 @@ HUF_decompress4X2_usingDTable_internal_body(
const HUF_DTable* DTable) const HUF_DTable* DTable)
{ {
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */
{ const BYTE* const istart = (const BYTE*) cSrc; { const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst; BYTE* const ostart = (BYTE*) dst;
@@ -1367,7 +1369,7 @@ HUF_decompress4X2_usingDTable_internal_body(
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */ if (opStart4 > oend) return ERROR(corruption_detected); /* overflow */
if (dstSize < 6) return ERROR(corruption_detected); /* stream 4-split doesn't work */ assert(dstSize >= 6 /* validated above */);
CHECK_F( BIT_initDStream(&bitD1, istart1, length1) ); CHECK_F( BIT_initDStream(&bitD1, istart1, length1) );
CHECK_F( BIT_initDStream(&bitD2, istart2, length2) ); CHECK_F( BIT_initDStream(&bitD2, istart2, length2) );
CHECK_F( BIT_initDStream(&bitD3, istart3, length3) ); CHECK_F( BIT_initDStream(&bitD3, istart3, length3) );
@@ -1612,7 +1614,7 @@ HUF_decompress4X2_usingDTable_internal_fast(
HUF_DecompressFastLoopFn loopFn) { HUF_DecompressFastLoopFn loopFn) {
void const* dt = DTable + 1; void const* dt = DTable + 1;
const BYTE* const iend = (const BYTE*)cSrc + 6; const BYTE* const iend = (const BYTE*)cSrc + 6;
BYTE* const oend = (BYTE*)dst + dstSize; BYTE* const oend = ZSTD_maybeNullPtrAdd((BYTE*)dst, dstSize);
HUF_DecompressFastArgs args; HUF_DecompressFastArgs args;
{ {
size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable); size_t const ret = HUF_DecompressFastArgs_init(&args, dst, dstSize, cSrc, cSrcSize, DTable);

View File

@@ -1058,7 +1058,9 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
return (size_t)(op-ostart); return (size_t)(op-ostart);
} }
static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, static
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity, void* dst, size_t dstCapacity,
const void* src, size_t srcSize, const void* src, size_t srcSize,
const void* dict, size_t dictSize, const void* dict, size_t dictSize,

View File

@@ -902,6 +902,7 @@ static void ZSTD_safecopyDstBeforeSrc(BYTE* op, const BYTE* ip, ptrdiff_t length
* to be optimized for many small sequences, since those fall into ZSTD_execSequence(). * to be optimized for many small sequences, since those fall into ZSTD_execSequence().
*/ */
FORCE_NOINLINE FORCE_NOINLINE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_execSequenceEnd(BYTE* op, size_t ZSTD_execSequenceEnd(BYTE* op,
BYTE* const oend, seq_t sequence, BYTE* const oend, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit, const BYTE** litPtr, const BYTE* const litLimit,
@@ -949,6 +950,7 @@ size_t ZSTD_execSequenceEnd(BYTE* op,
* This version is intended to be used during instances where the litBuffer is still split. It is kept separate to avoid performance impact for the good case. * This version is intended to be used during instances where the litBuffer is still split. It is kept separate to avoid performance impact for the good case.
*/ */
FORCE_NOINLINE FORCE_NOINLINE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_execSequenceEndSplitLitBuffer(BYTE* op, size_t ZSTD_execSequenceEndSplitLitBuffer(BYTE* op,
BYTE* const oend, const BYTE* const oend_w, seq_t sequence, BYTE* const oend, const BYTE* const oend_w, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit, const BYTE** litPtr, const BYTE* const litLimit,
@@ -994,6 +996,7 @@ size_t ZSTD_execSequenceEndSplitLitBuffer(BYTE* op,
} }
HINT_INLINE HINT_INLINE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_execSequence(BYTE* op, size_t ZSTD_execSequence(BYTE* op,
BYTE* const oend, seq_t sequence, BYTE* const oend, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit, const BYTE** litPtr, const BYTE* const litLimit,
@@ -1092,6 +1095,7 @@ size_t ZSTD_execSequence(BYTE* op,
} }
HINT_INLINE HINT_INLINE
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op, size_t ZSTD_execSequenceSplitLitBuffer(BYTE* op,
BYTE* const oend, const BYTE* const oend_w, seq_t sequence, BYTE* const oend, const BYTE* const oend_w, seq_t sequence,
const BYTE** litPtr, const BYTE* const litLimit, const BYTE** litPtr, const BYTE* const litLimit,
@@ -1403,7 +1407,7 @@ ZSTD_decompressSequences_bodySplitLitBuffer( ZSTD_DCtx* dctx,
const BYTE* ip = (const BYTE*)seqStart; const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize; const BYTE* const iend = ip + seqSize;
BYTE* const ostart = (BYTE*)dst; BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + maxDstSize; BYTE* const oend = ZSTD_maybeNullPtrAdd(ostart, maxDstSize);
BYTE* op = ostart; BYTE* op = ostart;
const BYTE* litPtr = dctx->litPtr; const BYTE* litPtr = dctx->litPtr;
const BYTE* litBufferEnd = dctx->litBufferEnd; const BYTE* litBufferEnd = dctx->litBufferEnd;
@@ -1612,7 +1616,7 @@ ZSTD_decompressSequences_body(ZSTD_DCtx* dctx,
const BYTE* ip = (const BYTE*)seqStart; const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize; const BYTE* const iend = ip + seqSize;
BYTE* const ostart = (BYTE*)dst; BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = dctx->litBufferLocation == ZSTD_not_in_dst ? ostart + maxDstSize : dctx->litBuffer; BYTE* const oend = dctx->litBufferLocation == ZSTD_not_in_dst ? ZSTD_maybeNullPtrAdd(ostart, maxDstSize) : dctx->litBuffer;
BYTE* op = ostart; BYTE* op = ostart;
const BYTE* litPtr = dctx->litPtr; const BYTE* litPtr = dctx->litPtr;
const BYTE* const litEnd = litPtr + dctx->litSize; const BYTE* const litEnd = litPtr + dctx->litSize;
@@ -1700,14 +1704,16 @@ ZSTD_decompressSequencesSplitLitBuffer_default(ZSTD_DCtx* dctx,
#ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT #ifndef ZSTD_FORCE_DECOMPRESS_SEQUENCES_SHORT
FORCE_INLINE_TEMPLATE size_t FORCE_INLINE_TEMPLATE
ZSTD_prefetchMatch(size_t prefetchPos, seq_t const sequence,
size_t ZSTD_prefetchMatch(size_t prefetchPos, seq_t const sequence,
const BYTE* const prefixStart, const BYTE* const dictEnd) const BYTE* const prefixStart, const BYTE* const dictEnd)
{ {
prefetchPos += sequence.litLength; prefetchPos += sequence.litLength;
{ const BYTE* const matchBase = (sequence.offset > prefetchPos) ? dictEnd : prefixStart; { const BYTE* const matchBase = (sequence.offset > prefetchPos) ? dictEnd : prefixStart;
const BYTE* const match = matchBase + prefetchPos - sequence.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted. /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
* No consequence though : memory address is only used for prefetching, not for dereferencing */ * No consequence though : memory address is only used for prefetching, not for dereferencing */
const BYTE* const match = ZSTD_wrappedPtrSub(ZSTD_wrappedPtrAdd(matchBase, prefetchPos), sequence.offset);
PREFETCH_L1(match); PREFETCH_L1(match+CACHELINE_SIZE); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */ PREFETCH_L1(match); PREFETCH_L1(match+CACHELINE_SIZE); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
} }
return prefetchPos + sequence.matchLength; return prefetchPos + sequence.matchLength;
@@ -1727,7 +1733,7 @@ ZSTD_decompressSequencesLong_body(
const BYTE* ip = (const BYTE*)seqStart; const BYTE* ip = (const BYTE*)seqStart;
const BYTE* const iend = ip + seqSize; const BYTE* const iend = ip + seqSize;
BYTE* const ostart = (BYTE*)dst; BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = dctx->litBufferLocation == ZSTD_in_dst ? dctx->litBuffer : ostart + maxDstSize; BYTE* const oend = dctx->litBufferLocation == ZSTD_in_dst ? dctx->litBuffer : ZSTD_maybeNullPtrAdd(ostart, maxDstSize);
BYTE* op = ostart; BYTE* op = ostart;
const BYTE* litPtr = dctx->litPtr; const BYTE* litPtr = dctx->litPtr;
const BYTE* litBufferEnd = dctx->litBufferEnd; const BYTE* litBufferEnd = dctx->litBufferEnd;
@@ -2088,7 +2094,7 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
* Additionally, take the min with dstCapacity to ensure that the totalHistorySize fits in a size_t. * Additionally, take the min with dstCapacity to ensure that the totalHistorySize fits in a size_t.
*/ */
size_t const blockSizeMax = MIN(dstCapacity, ZSTD_blockSizeMax(dctx)); size_t const blockSizeMax = MIN(dstCapacity, ZSTD_blockSizeMax(dctx));
size_t const totalHistorySize = ZSTD_totalHistorySize((BYTE*)dst + blockSizeMax, (BYTE const*)dctx->virtualStart); size_t const totalHistorySize = ZSTD_totalHistorySize(ZSTD_maybeNullPtrAdd((BYTE*)dst, blockSizeMax), (BYTE const*)dctx->virtualStart);
/* isLongOffset must be true if there are long offsets. /* isLongOffset must be true if there are long offsets.
* Offsets are long if they are larger than ZSTD_maxShortOffset(). * Offsets are long if they are larger than ZSTD_maxShortOffset().
* We don't expect that to be the case in 64-bit mode. * We don't expect that to be the case in 64-bit mode.
@@ -2168,6 +2174,7 @@ ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
} }
ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize) void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst, size_t dstSize)
{ {
if (dst != dctx->previousDstEnd && dstSize > 0) { /* not contiguous */ if (dst != dctx->previousDstEnd && dstSize > 0) { /* not contiguous */
@@ -2187,6 +2194,7 @@ size_t ZSTD_decompressBlock_deprecated(ZSTD_DCtx* dctx,
dctx->isFrameDecompression = 0; dctx->isFrameDecompression = 0;
ZSTD_checkContinuity(dctx, dst, dstCapacity); ZSTD_checkContinuity(dctx, dst, dstCapacity);
dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, not_streaming); dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, not_streaming);
FORWARD_IF_ERROR(dSize, "");
dctx->previousDstEnd = (char*)dst + dSize; dctx->previousDstEnd = (char*)dst + dSize;
return dSize; return dSize;
} }

View File

@@ -124,6 +124,20 @@ MEM_STATIC size_t ZSTD_decompressLegacy(
const void* dict,size_t dictSize) const void* dict,size_t dictSize)
{ {
U32 const version = ZSTD_isLegacy(src, compressedSize); U32 const version = ZSTD_isLegacy(src, compressedSize);
char x;
/* Avoid passing NULL to legacy decoding. */
if (dst == NULL) {
assert(dstCapacity == 0);
dst = &x;
}
if (src == NULL) {
assert(compressedSize == 0);
src = &x;
}
if (dict == NULL) {
assert(dictSize == 0);
dict = &x;
}
(void)dst; (void)dstCapacity; (void)dict; (void)dictSize; /* unused when ZSTD_LEGACY_SUPPORT >= 8 */ (void)dst; (void)dstCapacity; (void)dict; (void)dictSize; /* unused when ZSTD_LEGACY_SUPPORT >= 8 */
switch(version) switch(version)
{ {
@@ -287,6 +301,12 @@ MEM_STATIC size_t ZSTD_freeLegacyStreamContext(void* legacyContext, U32 version)
MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U32 newVersion, MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U32 newVersion,
const void* dict, size_t dictSize) const void* dict, size_t dictSize)
{ {
char x;
/* Avoid passing NULL to legacy decoding. */
if (dict == NULL) {
assert(dictSize == 0);
dict = &x;
}
DEBUGLOG(5, "ZSTD_initLegacyStream for v0.%u", newVersion); DEBUGLOG(5, "ZSTD_initLegacyStream for v0.%u", newVersion);
if (prevVersion != newVersion) ZSTD_freeLegacyStreamContext(*legacyContext, prevVersion); if (prevVersion != newVersion) ZSTD_freeLegacyStreamContext(*legacyContext, prevVersion);
switch(newVersion) switch(newVersion)
@@ -346,6 +366,16 @@ MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U
MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version, MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,
ZSTD_outBuffer* output, ZSTD_inBuffer* input) ZSTD_outBuffer* output, ZSTD_inBuffer* input)
{ {
static char x;
/* Avoid passing NULL to legacy decoding. */
if (output->dst == NULL) {
assert(output->size == 0);
output->dst = &x;
}
if (input->src == NULL) {
assert(input->size == 0);
input->src = &x;
}
DEBUGLOG(5, "ZSTD_decompressLegacyStream for v0.%u", version); DEBUGLOG(5, "ZSTD_decompressLegacyStream for v0.%u", version);
switch(version) switch(version)
{ {

View File

@@ -14,6 +14,7 @@
******************************************/ ******************************************/
#include <stddef.h> /* size_t, ptrdiff_t */ #include <stddef.h> /* size_t, ptrdiff_t */
#include "zstd_v01.h" #include "zstd_v01.h"
#include "../common/compiler.h"
#include "../common/error_private.h" #include "../common/error_private.h"
@@ -2118,6 +2119,7 @@ size_t ZSTDv01_decompressContinue(ZSTDv01_Dctx* dctx, void* dst, size_t maxDstSi
} }
ctx->phase = 1; ctx->phase = 1;
ctx->expected = ZSTD_blockHeaderSize; ctx->expected = ZSTD_blockHeaderSize;
if (ZSTDv01_isError(rSize)) return rSize;
ctx->previousDstEnd = (void*)( ((char*)dst) + rSize); ctx->previousDstEnd = (void*)( ((char*)dst) + rSize);
return rSize; return rSize;
} }

View File

@@ -11,6 +11,7 @@
#include <stddef.h> /* size_t, ptrdiff_t */ #include <stddef.h> /* size_t, ptrdiff_t */
#include "zstd_v02.h" #include "zstd_v02.h"
#include "../common/compiler.h"
#include "../common/error_private.h" #include "../common/error_private.h"
@@ -71,20 +72,6 @@ extern "C" {
#include <string.h> /* memcpy */ #include <string.h> /* memcpy */
/******************************************
* Compiler-specific
******************************************/
#if defined(__GNUC__)
# define MEM_STATIC static __attribute__((unused))
#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
# define MEM_STATIC static inline
#elif defined(_MSC_VER)
# define MEM_STATIC static __inline
#else
# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
#endif
/**************************************************************** /****************************************************************
* Basic Types * Basic Types
*****************************************************************/ *****************************************************************/
@@ -3431,6 +3418,7 @@ static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSi
} }
ctx->phase = 1; ctx->phase = 1;
ctx->expected = ZSTD_blockHeaderSize; ctx->expected = ZSTD_blockHeaderSize;
if (ZSTD_isError(rSize)) return rSize;
ctx->previousDstEnd = (void*)( ((char*)dst) + rSize); ctx->previousDstEnd = (void*)( ((char*)dst) + rSize);
return rSize; return rSize;
} }

View File

@@ -11,6 +11,7 @@
#include <stddef.h> /* size_t, ptrdiff_t */ #include <stddef.h> /* size_t, ptrdiff_t */
#include "zstd_v03.h" #include "zstd_v03.h"
#include "../common/compiler.h"
#include "../common/error_private.h" #include "../common/error_private.h"
@@ -72,20 +73,6 @@ extern "C" {
#include <string.h> /* memcpy */ #include <string.h> /* memcpy */
/******************************************
* Compiler-specific
******************************************/
#if defined(__GNUC__)
# define MEM_STATIC static __attribute__((unused))
#elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
# define MEM_STATIC static inline
#elif defined(_MSC_VER)
# define MEM_STATIC static __inline
#else
# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
#endif
/**************************************************************** /****************************************************************
* Basic Types * Basic Types
*****************************************************************/ *****************************************************************/
@@ -3071,6 +3058,7 @@ static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSi
} }
ctx->phase = 1; ctx->phase = 1;
ctx->expected = ZSTD_blockHeaderSize; ctx->expected = ZSTD_blockHeaderSize;
if (ZSTD_isError(rSize)) return rSize;
ctx->previousDstEnd = (void*)( ((char*)dst) + rSize); ctx->previousDstEnd = (void*)( ((char*)dst) + rSize);
return rSize; return rSize;
} }

View File

@@ -16,6 +16,7 @@
#include <string.h> /* memcpy */ #include <string.h> /* memcpy */
#include "zstd_v04.h" #include "zstd_v04.h"
#include "../common/compiler.h"
#include "../common/error_private.h" #include "../common/error_private.h"
@@ -3209,6 +3210,7 @@ static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSi
} }
ctx->stage = ZSTDds_decodeBlockHeader; ctx->stage = ZSTDds_decodeBlockHeader;
ctx->expected = ZSTD_blockHeaderSize; ctx->expected = ZSTD_blockHeaderSize;
if (ZSTD_isError(rSize)) return rSize;
ctx->previousDstEnd = (char*)dst + rSize; ctx->previousDstEnd = (char*)dst + rSize;
return rSize; return rSize;
} }
@@ -3536,8 +3538,8 @@ static size_t ZBUFF_decompressContinue(ZBUFF_DCtx* zbc, void* dst, size_t* maxDs
unsigned ZBUFFv04_isError(size_t errorCode) { return ERR_isError(errorCode); } unsigned ZBUFFv04_isError(size_t errorCode) { return ERR_isError(errorCode); }
const char* ZBUFFv04_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); } const char* ZBUFFv04_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
size_t ZBUFFv04_recommendedDInSize() { return BLOCKSIZE + 3; } size_t ZBUFFv04_recommendedDInSize(void) { return BLOCKSIZE + 3; }
size_t ZBUFFv04_recommendedDOutSize() { return BLOCKSIZE; } size_t ZBUFFv04_recommendedDOutSize(void) { return BLOCKSIZE; }

View File

@@ -3600,6 +3600,7 @@ size_t ZSTDv05_decompressContinue(ZSTDv05_DCtx* dctx, void* dst, size_t maxDstSi
} }
dctx->stage = ZSTDv05ds_decodeBlockHeader; dctx->stage = ZSTDv05ds_decodeBlockHeader;
dctx->expected = ZSTDv05_blockHeaderSize; dctx->expected = ZSTDv05_blockHeaderSize;
if (ZSTDv05_isError(rSize)) return rSize;
dctx->previousDstEnd = (char*)dst + rSize; dctx->previousDstEnd = (char*)dst + rSize;
return rSize; return rSize;
} }

View File

@@ -14,6 +14,7 @@
#include <stddef.h> /* size_t, ptrdiff_t */ #include <stddef.h> /* size_t, ptrdiff_t */
#include <string.h> /* memcpy */ #include <string.h> /* memcpy */
#include <stdlib.h> /* malloc, free, qsort */ #include <stdlib.h> /* malloc, free, qsort */
#include "../common/compiler.h"
#include "../common/error_private.h" #include "../common/error_private.h"
@@ -3736,6 +3737,7 @@ size_t ZSTDv06_decompressContinue(ZSTDv06_DCtx* dctx, void* dst, size_t dstCapac
} }
dctx->stage = ZSTDds_decodeBlockHeader; dctx->stage = ZSTDds_decodeBlockHeader;
dctx->expected = ZSTDv06_blockHeaderSize; dctx->expected = ZSTDv06_blockHeaderSize;
if (ZSTDv06_isError(rSize)) return rSize;
dctx->previousDstEnd = (char*)dst + rSize; dctx->previousDstEnd = (char*)dst + rSize;
return rSize; return rSize;
} }

View File

@@ -24,6 +24,7 @@
#define HUFv07_STATIC_LINKING_ONLY /* HUFv07_TABLELOG_ABSOLUTEMAX */ #define HUFv07_STATIC_LINKING_ONLY /* HUFv07_TABLELOG_ABSOLUTEMAX */
#define ZSTDv07_STATIC_LINKING_ONLY #define ZSTDv07_STATIC_LINKING_ONLY
#include "../common/compiler.h"
#include "../common/error_private.h" #include "../common/error_private.h"
@@ -4006,8 +4007,8 @@ size_t ZSTDv07_decompressContinue(ZSTDv07_DCtx* dctx, void* dst, size_t dstCapac
} }
dctx->stage = ZSTDds_decodeBlockHeader; dctx->stage = ZSTDds_decodeBlockHeader;
dctx->expected = ZSTDv07_blockHeaderSize; dctx->expected = ZSTDv07_blockHeaderSize;
dctx->previousDstEnd = (char*)dst + rSize;
if (ZSTDv07_isError(rSize)) return rSize; if (ZSTDv07_isError(rSize)) return rSize;
dctx->previousDstEnd = (char*)dst + rSize;
if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize); if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);
return rSize; return rSize;
} }

View File

@@ -732,7 +732,7 @@ generateSequences(U32* seed, frame_t* frame, seqStore_t* seqStore,
} }
} while (((!info.useDict) && (offset > (size_t)((BYTE*)srcPtr - (BYTE*)frame->srcStart))) || offset == 0); } while (((!info.useDict) && (offset > (size_t)((BYTE*)srcPtr - (BYTE*)frame->srcStart))) || offset == 0);
{ BYTE* const dictEnd = info.dictContent + info.dictContentSize; { BYTE* const dictEnd = ZSTD_maybeNullPtrAdd(info.dictContent, info.dictContentSize);
size_t j; size_t j;
for (j = 0; j < matchLen; j++) { for (j = 0; j < matchLen; j++) {
if ((U32)((BYTE*)srcPtr - (BYTE*)frame->srcStart) < offset) { if ((U32)((BYTE*)srcPtr - (BYTE*)frame->srcStart) < offset) {

View File

@@ -250,10 +250,10 @@ def build_parser(args):
action='store_true', action='store_true',
help='Enable UBSAN') help='Enable UBSAN')
parser.add_argument( parser.add_argument(
'--enable-ubsan-pointer-overflow', '--disable-ubsan-pointer-overflow',
dest='ubsan_pointer_overflow', dest='ubsan_pointer_overflow',
action='store_true', action='store_false',
help='Enable UBSAN pointer overflow check (known failure)') help='Disable UBSAN pointer overflow check (known failure)')
parser.add_argument( parser.add_argument(
'--enable-msan', dest='msan', action='store_true', help='Enable MSAN') '--enable-msan', dest='msan', action='store_true', help='Enable MSAN')
parser.add_argument( parser.add_argument(
@@ -383,8 +383,6 @@ def build_parser(args):
raise RuntimeError('MSAN may not be used with any other sanitizers') raise RuntimeError('MSAN may not be used with any other sanitizers')
if args.msan_track_origins and not args.msan: if args.msan_track_origins and not args.msan:
raise RuntimeError('--enable-msan-track-origins requires MSAN') raise RuntimeError('--enable-msan-track-origins requires MSAN')
if args.ubsan_pointer_overflow and not args.ubsan:
raise RuntimeError('--enable-ubsan-pointer-overflow requires UBSAN')
if args.sanitize_recover and not args.sanitize: if args.sanitize_recover and not args.sanitize:
raise RuntimeError('--enable-sanitize-recover but no sanitizers used') raise RuntimeError('--enable-sanitize-recover but no sanitizers used')

View File

@@ -116,7 +116,7 @@ static size_t decodeSequences(void* dst, size_t nbSequences,
} }
} }
for (; j < matchLength; ++j) { for (; j < matchLength; ++j) {
op[j] = op[j - generatedSequences[i].offset]; op[j] = op[(ptrdiff_t)(j - generatedSequences[i].offset)];
} }
op += j; op += j;
FUZZ_ASSERT(generatedSequences[i].matchLength == j + k); FUZZ_ASSERT(generatedSequences[i].matchLength == j + k);

View File

@@ -328,7 +328,7 @@ static void FUZ_decodeSequences(BYTE* dst, ZSTD_Sequence* seqs, size_t seqsSize,
if (seqs[i].offset != 0) { if (seqs[i].offset != 0) {
for (j = 0; j < seqs[i].matchLength; ++j) for (j = 0; j < seqs[i].matchLength; ++j)
dst[j] = dst[j - seqs[i].offset]; dst[j] = dst[(ptrdiff_t)(j - seqs[i].offset)];
dst += seqs[i].matchLength; dst += seqs[i].matchLength;
src += seqs[i].matchLength; src += seqs[i].matchLength;
size -= seqs[i].matchLength; size -= seqs[i].matchLength;
@@ -3684,11 +3684,13 @@ static int basicUnitTests(U32 const seed, double compressibility)
/* Test with block delimiters roundtrip */ /* Test with block delimiters roundtrip */
seqsSize = ZSTD_generateSequences(cctx, seqs, srcSize, src, srcSize); seqsSize = ZSTD_generateSequences(cctx, seqs, srcSize, src, srcSize);
CHECK_Z(seqsSize);
FUZ_decodeSequences(decoded, seqs, seqsSize, src, srcSize, ZSTD_sf_explicitBlockDelimiters); FUZ_decodeSequences(decoded, seqs, seqsSize, src, srcSize, ZSTD_sf_explicitBlockDelimiters);
assert(!memcmp(CNBuffer, compressedBuffer, srcSize)); assert(!memcmp(CNBuffer, compressedBuffer, srcSize));
/* Test no block delimiters roundtrip */ /* Test no block delimiters roundtrip */
seqsSize = ZSTD_mergeBlockDelimiters(seqs, seqsSize); seqsSize = ZSTD_mergeBlockDelimiters(seqs, seqsSize);
CHECK_Z(seqsSize);
FUZ_decodeSequences(decoded, seqs, seqsSize, src, srcSize, ZSTD_sf_noBlockDelimiters); FUZ_decodeSequences(decoded, seqs, seqsSize, src, srcSize, ZSTD_sf_noBlockDelimiters);
assert(!memcmp(CNBuffer, compressedBuffer, srcSize)); assert(!memcmp(CNBuffer, compressedBuffer, srcSize));