diff --git a/.buckconfig b/.buckconfig index d698b35ba..483f6053b 100644 --- a/.buckconfig +++ b/.buckconfig @@ -1,7 +1,7 @@ [cxx] - cppflags = -DXXH_NAMESPACE=ZSTD_ -DZSTD_LEGACY_SUPPORT=1 + cppflags = -DXXH_NAMESPACE=ZSTD_ -DZSTD_LEGACY_SUPPORT=4 cflags = -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement -Wstrict-prototypes -Wundef -Wpointer-arith - cxxppflags = -DXXH_NAMESPACE=ZSTD_ -DZSTD_LEGACY_SUPPORT=1 + cxxppflags = -DXXH_NAMESPACE=ZSTD_ -DZSTD_LEGACY_SUPPORT=4 cxxflags = -std=c++11 -Wno-deprecated-declarations gtest_dep = //contrib/pzstd:gtest diff --git a/.gitignore b/.gitignore index e02119883..5fe9afd41 100644 --- a/.gitignore +++ b/.gitignore @@ -24,6 +24,12 @@ zstdmt tmp* dictionary* +# Build artefacts +projects/ +bin/ +.buckd/ +buck-out/ + # Other files .directory _codelite/ @@ -34,8 +40,3 @@ _zstdbench/ .DS_Store googletest/ *.d - -# Directories -bin/ -.buckd/ -buck-out/ diff --git a/.travis.yml b/.travis.yml index 9c1e10e15..a52d57af3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,39 +1,39 @@ +# Medium Tests: Run on all commits/PRs to dev branch + language: c sudo: required dist: trusty matrix: - fast_finish: true include: # Ubuntu 14.04 - - env: Cmd="make libc6install && make -C tests test32" - - env: Cmd='make valgrindinstall arminstall ppcinstall arm-ppc-compilation && make clean lib && CFLAGS="-O1 -g" make -C zlibWrapper valgrindTest && make -C tests valgrindTest' + - env: Cmd='make gcc6install && CC=gcc-6 make clean uasan-test-zstd' + - env: Cmd='make gcc6install libc6install && CC=gcc-6 make clean uasan-test-zstd32' + - env: Cmd='make clang38install && CC=clang-3.8 make clean msan-test-zstd' - - env: Cmd='CC=gcc-6 make gcc6install uasan-test' - - env: Cmd='CC=gcc-6 make gcc6install uasan-test32' - - env: Cmd="make arminstall armtest && make clean && make aarch64test" - - env: Cmd='make ppcinstall ppctest && make clean && make ppc64test' - - env: Cmd='make gpp6install zlibwrapper && make -C tests clean test-zstd-nolegacy && make -C tests versionsTest && make clean && cd contrib/pzstd && make test-pzstd && make test-pzstd32 && make test-pzstd-tsan && make test-pzstd-asan' - install: - - export CXX="g++-6" CC="gcc-6" + - env: Cmd='make gcc6install && CC=gcc-6 make clean uasan-fuzztest' + - env: Cmd='make gcc6install libc6install && CC=gcc-6 CFLAGS=-m32 make clean uasan-fuzztest' + - env: Cmd='make clang38install && CC=clang-3.8 make clean msan-fuzztest' + - env: Cmd='make clang38install && CC=clang-3.8 make clean tsan-test-zstream' - # OS X Mavericks - - env: Cmd="make gnu90build && make clean && make test && make clean && make travis-install" - os: osx + - env: Cmd='make valgrindinstall && make -C tests clean valgrindTest' + + - env: Cmd='make arminstall && make armfuzz' + - env: Cmd='make arminstall && make aarch64fuzz' + - env: Cmd='make ppcinstall && make ppcfuzz' + - env: Cmd='make ppcinstall && make ppc64fuzz' + +git: + depth: 1 + +branches: + only: + - dev + - master script: - JOB_NUMBER=$(echo $TRAVIS_JOB_NUMBER | sed -e 's:[0-9][0-9]*\.\(.*\):\1:') - # cron & master => full tests, as this is the final step towards a Release - # pull requests => normal tests (job numbers 1-3) - # other feature branches => short tests (job numbers 1-2) - echo JOB_NUMBER=$JOB_NUMBER TRAVIS_BRANCH=$TRAVIS_BRANCH TRAVIS_EVENT_TYPE=$TRAVIS_EVENT_TYPE TRAVIS_PULL_REQUEST=$TRAVIS_PULL_REQUEST - - if [ "$TRAVIS_EVENT_TYPE" = "cron" ] || [ "$TRAVIS_BRANCH" = "master" ]; then - FUZZERTEST=-T7mn sh -c "$Cmd" || travis_terminate 1; - else - if [ "$TRAVIS_EVENT_TYPE" = "pull_request" ] && [ $JOB_NUMBER -lt 4 ]; then - sh -c "$Cmd" || travis_terminate 1; - else - if [ $JOB_NUMBER -lt 3 ]; then - sh -c "$Cmd" || travis_terminate 1; - fi - fi - fi + - export FUZZERTEST=-T2mn; + export ZSTREAM_TESTTIME=-T2mn; + export DECODECORPUS_TESTTIME=-T1mn; + sh -c "$Cmd" || travis_terminate 1; diff --git a/Makefile b/Makefile index e10d29267..54652665b 100644 --- a/Makefile +++ b/Makefile @@ -90,6 +90,10 @@ examples: manual: $(MAKE) -C contrib/gen_html $@ +.PHONY: cleanTabs +cleanTabs: + cd contrib; ./cleanTabs + .PHONY: clean clean: @$(MAKE) -C $(ZSTDDIR) $@ > $(VOID) @@ -105,9 +109,15 @@ clean: # make install is validated only for Linux, OSX, Hurd and some BSD targets #------------------------------------------------------------------------------ ifneq (,$(filter $(shell uname),Linux Darwin GNU/kFreeBSD GNU FreeBSD DragonFly NetBSD)) -HOST_OS = POSIX -.PHONY: install uninstall travis-install clangtest gpptest armtest usan asan uasan +HOST_OS = POSIX +CMAKE_PARAMS = -DZSTD_BUILD_CONTRIB:BOOL=ON -DZSTD_BUILD_STATIC:BOOL=ON -DZSTD_BUILD_TESTS:BOOL=ON -DZSTD_ZLIB_SUPPORT:BOOL=ON -DZSTD_LZMA_SUPPORT:BOOL=ON + +.PHONY: list +list: + @$(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | sort | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | xargs + +.PHONY: install uninstall travis-install clangtest gpptest armtest usan asan uasan install: @$(MAKE) -C $(ZSTDDIR) $@ @$(MAKE) -C $(PRGDIR) $@ @@ -151,6 +161,18 @@ ppcbuild: clean ppc64build: clean CC=powerpc-linux-gnu-gcc CFLAGS="-m64 -Werror" $(MAKE) allarch +armfuzz: clean + CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static MOREFLAGS="-static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest + +aarch64fuzz: clean + CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static MOREFLAGS="-static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest + +ppcfuzz: clean + CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static MOREFLAGS="-static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest + +ppc64fuzz: clean + CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static MOREFLAGS="-m64 -static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest + gpptest: clean CC=g++ $(MAKE) -C $(PRGDIR) all CFLAGS="-O3 -Wall -Wextra -Wundef -Wshadow -Wcast-align -Werror" @@ -168,19 +190,19 @@ clangtest: clean armtest: clean $(MAKE) -C $(TESTDIR) datagen # use native, faster - $(MAKE) -C $(TESTDIR) test CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static ZSTDRTTEST= MOREFLAGS="-Werror -static" + $(MAKE) -C $(TESTDIR) test CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static ZSTDRTTEST= MOREFLAGS="-Werror -static" FUZZER_FLAGS=--no-big-tests aarch64test: $(MAKE) -C $(TESTDIR) datagen # use native, faster - $(MAKE) -C $(TESTDIR) test CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static ZSTDRTTEST= MOREFLAGS="-Werror -static" + $(MAKE) -C $(TESTDIR) test CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static ZSTDRTTEST= MOREFLAGS="-Werror -static" FUZZER_FLAGS=--no-big-tests ppctest: clean $(MAKE) -C $(TESTDIR) datagen # use native, faster - $(MAKE) -C $(TESTDIR) test CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static ZSTDRTTEST= MOREFLAGS="-Werror -Wno-attributes -static" + $(MAKE) -C $(TESTDIR) test CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static ZSTDRTTEST= MOREFLAGS="-Werror -Wno-attributes -static" FUZZER_FLAGS=--no-big-tests ppc64test: clean $(MAKE) -C $(TESTDIR) datagen # use native, faster - $(MAKE) -C $(TESTDIR) test CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static ZSTDRTTEST= MOREFLAGS="-m64 -static" + $(MAKE) -C $(TESTDIR) test CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static ZSTDRTTEST= MOREFLAGS="-m64 -static" FUZZER_FLAGS=--no-big-tests arm-ppc-compilation: $(MAKE) -C $(PRGDIR) clean zstd CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static ZSTDRTTEST= MOREFLAGS="-Werror -static" @@ -188,24 +210,36 @@ arm-ppc-compilation: $(MAKE) -C $(PRGDIR) clean zstd CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static ZSTDRTTEST= MOREFLAGS="-Werror -Wno-attributes -static" $(MAKE) -C $(PRGDIR) clean zstd CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static ZSTDRTTEST= MOREFLAGS="-m64 -static" +# run UBsan with -fsanitize-recover=signed-integer-overflow +# due to a bug in UBsan when doing pointer subtraction +# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63303 + usan: clean - $(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=undefined" + $(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=signed-integer-overflow -fsanitize=undefined" asan: clean $(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=address" +asan-%: clean + LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=address" $(MAKE) -C $(TESTDIR) $* + msan: clean $(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=memory -fno-omit-frame-pointer" # datagen.c fails this test for no obvious reason +msan-%: clean + LDFLAGS=-fuse-ld=gold MOREFLAGS="-fno-sanitize-recover=all -fsanitize=memory -fno-omit-frame-pointer" $(MAKE) -C $(TESTDIR) $* + asan32: clean $(MAKE) -C $(TESTDIR) test32 CC=clang MOREFLAGS="-g -fsanitize=address" uasan: clean - $(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=address -fsanitize=undefined" + $(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=signed-integer-overflow -fsanitize=address,undefined" uasan-%: clean - LDFLAGS=-fuse-ld=gold CFLAGS="-Og -fsanitize=address -fsanitize=undefined" $(MAKE) -C $(TESTDIR) $* + LDFLAGS=-fuse-ld=gold MOREFLAGS="-Og -fno-sanitize-recover=all -fsanitize-recover=signed-integer-overflow -fsanitize=address,undefined" $(MAKE) -C $(TESTDIR) $* +tsan-%: clean + LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=thread" $(MAKE) -C $(TESTDIR) $* apt-install: sudo apt-get -yq --no-install-suggests --no-install-recommends --force-yes install $(APT_PACKAGES) @@ -217,7 +251,7 @@ ppcinstall: APT_PACKAGES="qemu-system-ppc qemu-user-static gcc-powerpc-linux-gnu" $(MAKE) apt-install arminstall: - APT_PACKAGES="qemu-system-arm qemu-user-static gcc-powerpc-linux-gnu gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross" $(MAKE) apt-install + APT_PACKAGES="qemu-system-arm qemu-user-static gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross" $(MAKE) apt-install valgrindinstall: APT_PACKAGES="valgrind" $(MAKE) apt-install @@ -231,12 +265,15 @@ gcc6install: apt-add-repo gpp6install: apt-add-repo APT_PACKAGES="libc6-dev-i386 g++-multilib gcc-6 g++-6 g++-6-multilib" $(MAKE) apt-install +clang38install: + APT_PACKAGES="clang-3.8" $(MAKE) apt-install + endif ifneq (,$(filter MSYS%,$(shell uname))) HOST_OS = MSYS -CMAKE_PARAMS = -G"MSYS Makefiles" +CMAKE_PARAMS = -G"MSYS Makefiles" -DZSTD_MULTITHREAD_SUPPORT:BOOL=OFF -DZSTD_BUILD_STATIC:BOOL=ON -DZSTD_BUILD_TESTS:BOOL=ON endif @@ -248,7 +285,7 @@ cmakebuild: cmake --version $(RM) -r $(BUILDIR)/cmake/build mkdir $(BUILDIR)/cmake/build - cd $(BUILDIR)/cmake/build ; cmake -DPREFIX:STRING=~/install_test_dir $(CMAKE_PARAMS) .. ; $(MAKE) install ; $(MAKE) uninstall + cd $(BUILDIR)/cmake/build ; cmake -DCMAKE_INSTALL_PREFIX:PATH=~/install_test_dir $(CMAKE_PARAMS) .. ; $(MAKE) install ; $(MAKE) uninstall c90build: clean gcc -v diff --git a/NEWS b/NEWS index 9073a8724..7d9c9c94e 100644 --- a/NEWS +++ b/NEWS @@ -1,15 +1,39 @@ +v1.2.0 +cli : changed : Multithreading enabled by default (use target zstd-nomt or HAVE_THREAD=0 to disable) +cli : new : command -T0 means "detect and use nb of cores", by Sean Purcell +cli : new : zstdmt symlink hardwired to `zstd -T0` +cli : new : command --threads=# (#671) +cli : changed : cover dictionary builder by default, for improved quality, by Nick Terrell +cli : new : commands --train-cover and --train-legacy, to select dictionary algorithm and parameters +cli : experimental targets `zstd4` and `xzstd4`, with support for lz4 format, by Sean Purcell +cli : fix : does not output compressed data on console +cli : fix : ignore symbolic links unless --force specified, +API : breaking change : ZSTD_createCDict_advanced(), only use compressionParameters as argument +API : added : prototypes ZSTD_*_usingCDict_advanced(), for direct control over frameParameters. +API : improved: ZSTDMT_compressCCtx() reduced memory usage +API : fix : ZSTDMT_compressCCtx() now provides srcSize in header (#634) +API : fix : src size stored in frame header is controlled at end of frame +API : fix : enforced consistent rules for pledgedSrcSize==0 (#641) +API : fix : error code "GENERIC" replaced by "dstSizeTooSmall" when appropriate +build: improved cmake script, by @Majlen +build: enabled Multi-threading support for *BSD, by Baptiste Daroussin +tools: updated Paramgrill. Command -O# provides best parameters for sample and speed target. +new : contrib/linux-kernel version, by Nick Terrell + v1.1.4 cli : new : can compress in *.gz format, using --format=gzip command, by Przemyslaw Skibinski cli : new : advanced benchmark command --priority=rt cli : fix : write on sparse-enabled file systems in 32-bits mode, by @ds77 cli : fix : --rm remains silent when input is stdin +cli : experimental : xzstd, with support for xz/lzma decoding, by Przemyslaw Skibinski speed : improved decompression speed in streaming mode for single shot scenarios (+5%) -memory : DDict (decompression dictionary) memory usage down from 150 KB to 20 KB -arch : 32-bits variant able to generate and decode very long matches (>32 MB), by Sean Purcell +memory: DDict (decompression dictionary) memory usage down from 150 KB to 20 KB +arch: 32-bits variant able to generate and decode very long matches (>32 MB), by Sean Purcell API : new : ZSTD_findFrameCompressedSize(), ZSTD_getFrameContentSize(), ZSTD_findDecompressedSize() -build: new: meson build system in contrib/meson, by Dima Krasner -build: improved cmake script, by @Majlen -build: added -Wformat-security flag, as recommended by Padraig Brady +API : changed : dropped support of legacy versions <= v0.3 (can be changed by modifying ZSTD_LEGACY_SUPPORT value) +build : new: meson build system in contrib/meson, by Dima Krasner +build : improved cmake script, by @Majlen +build : added -Wformat-security flag, as recommended by Padraig Brady doc : new : educational decoder, by Sean Purcell v1.1.3 diff --git a/README.md b/README.md index 6de5a1079..eee92f917 100644 --- a/README.md +++ b/README.md @@ -6,19 +6,26 @@ and a command line utility producing and decoding `.zst` and `.gz` files. For other programming languages, you can consult a list of known ports on [Zstandard homepage](http://www.zstd.net/#other-languages). -|Branch |Status | -|------------|---------| -|master | [![Build Status](https://travis-ci.org/facebook/zstd.svg?branch=master)](https://travis-ci.org/facebook/zstd) | -|dev | [![Build Status](https://travis-ci.org/facebook/zstd.svg?branch=dev)](https://travis-ci.org/facebook/zstd) | +| dev branch status | +|-------------------| +| [![Build Status][travisDevBadge]][travisLink] [![Build status][AppveyorDevBadge]][AppveyorLink] [![Build status][CircleDevBadge]][CircleLink] + +[travisDevBadge]: https://travis-ci.org/facebook/zstd.svg?branch=dev "Continuous Integration test suite" +[travisLink]: https://travis-ci.org/facebook/zstd +[AppveyorDevBadge]: https://ci.appveyor.com/api/projects/status/xt38wbdxjk5mrbem/branch/dev?svg=true "Windows test suite" +[AppveyorLink]: https://ci.appveyor.com/project/YannCollet/zstd-p0yf0 +[CircleDevBadge]: https://circleci.com/gh/facebook/zstd/tree/dev.svg?style=shield "Short test suite" +[CircleLink]: https://circleci.com/gh/facebook/zstd + As a reference, several fast compression algorithms were tested and compared -on a server running Linux Mint Debian Edition (`Linux version 4.8.0-1-amd64`), +on a server running Linux Debian (`Linux version 4.8.0-1-amd64`), with a Core i7-6700K CPU @ 4.0GHz, -using [lzbench v1.6], an open-source in-memory benchmark by @inikep +using [lzbench], an open-source in-memory benchmark by @inikep compiled with GCC 6.3.0, on the [Silesia compression corpus]. -[lzbench v1.6]: https://github.com/inikep/lzbench +[lzbench]: https://github.com/inikep/lzbench [Silesia compression corpus]: http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia | Compressor name | Ratio | Compression| Decompress.| @@ -38,7 +45,12 @@ on the [Silesia compression corpus]. Zstd can also offer stronger compression ratios at the cost of compression speed. Speed vs Compression trade-off is configurable by small increments. Decompression speed is preserved and remains roughly the same at all settings, a property shared by most LZ compression algorithms, such as [zlib] or lzma. -The following tests were run on a Core i7-3930K CPU @ 4.5GHz, using [lzbench], an open-source in-memory benchmark by @inikep compiled with GCC 5.2.1, on the [Silesia compression corpus]. +The following tests were run +on a server running Linux Debian (`Linux version 4.8.0-1-amd64`) +with a Core i7-6700K CPU @ 4.0GHz, +using [lzbench], an open-source in-memory benchmark by @inikep +compiled with GCC 6.3.0, +on the [Silesia compression corpus]. Compression Speed vs Ratio | Decompression Speed ---------------------------|-------------------- diff --git a/TESTING.md b/TESTING.md new file mode 100644 index 000000000..1fa5fe8c2 --- /dev/null +++ b/TESTING.md @@ -0,0 +1,44 @@ +Testing +======= + +Zstandard CI testing is split up into three sections: +short, medium, and long tests. + +Short Tests +----------- +Short tests run on CircleCI for new commits on every branch and pull request. +They consist of the following tests: +- Compilation on all supported targets (x86, x86_64, ARM, AArch64, PowerPC, and PowerPC64) +- Compilation on various versions of gcc, clang, and g++ +- `tests/playTests.sh` on x86_64, without the tests on long data (CLI tests) +- Small tests (`tests/legacy.c`, `tests/longmatch.c`, `tests/symbols.c`) on x64_64 + +Medium Tests +------------ +Medium tests run on every commit and pull request to `dev` branch, on TravisCI. +They consist of the following tests: +- The following tests run with UBsan and Asan on x86_64 and x86, as well as with + Msan on x86_64 + - `tests/playTests.sh --test-long-data` + - Fuzzer tests: `tests/fuzzer.c`, `tests/zstreamtest.c`, and `tests/decodecorpus.c` +- `tests/zstreamtest.c` under Tsan (streaming mode, including multithreaded mode) +- Valgrind Test (`make -C tests valgrindTest`) (testing CLI and fuzzer under valgrind) +- Fuzzer tests (see above) on ARM, AArch64, PowerPC, and PowerPC64 + +Long Tests +---------- +Long tests run on all commits to `master` branch, +and once a day on the current version of `dev` branch, +on TravisCI. +They consist of the following tests: +- Entire test suite (including fuzzers and some other specialized tests) on: + - x86_64 and x86 with UBsan and Asan + - x86_64 with Msan + - ARM, AArch64, PowerPC, and PowerPC64 +- Streaming mode fuzzer with Tsan (for the `zstdmt` testing) +- ZlibWrapper tests, including under valgrind +- Versions test (ensuring `zstd` can decode files from all previous versions) +- `pzstd` with asan and tsan, as well as in 32-bits mode +- Testing `zstd` with legacy mode off +- Testing `zbuff` (old streaming API) +- Entire test suite and make install on OS X diff --git a/appveyor.yml b/appveyor.yml index 51ff488a4..1f8b8cf8a 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,66 +1,101 @@ -version: 1.0.{build} -environment: - matrix: - - COMPILER: "gcc" - PLATFORM: "mingw64" - MAKE_PARAMS: '"make test && make lib && make -C tests test-symbols fullbench-dll fullbench-lib"' - - COMPILER: "gcc" - PLATFORM: "mingw32" - MAKE_PARAMS: '"make -C tests test-zstd test-fullbench test-fuzzer test-invalidDictionaries"' - - COMPILER: "gcc" - PLATFORM: "clang" - MAKE_PARAMS: '"make -C tests zstd fullbench fuzzer paramgrill datagen CC=clang MOREFLAGS="--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion""' - - COMPILER: "visual" - CONFIGURATION: "Debug" - PLATFORM: "x64" - - COMPILER: "visual" - CONFIGURATION: "Debug" - PLATFORM: "Win32" - - COMPILER: "visual" - CONFIGURATION: "Release" - PLATFORM: "x64" - - COMPILER: "visual" - CONFIGURATION: "Release" - PLATFORM: "Win32" +- + version: 1.0.{build} + branches: + only: + - dev + - master + environment: + matrix: + - COMPILER: "gcc" + HOST: "mingw" + PLATFORM: "x64" + SCRIPT: "make allarch && make -C tests test-symbols fullbench-dll fullbench-lib" + ARTIFACT: "true" + BUILD: "true" + - COMPILER: "gcc" + HOST: "mingw" + PLATFORM: "x86" + SCRIPT: "make allarch" + ARTIFACT: "true" + BUILD: "true" + - COMPILER: "clang" + HOST: "mingw" + PLATFORM: "x64" + SCRIPT: "MOREFLAGS='--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion' make allarch" + BUILD: "true" -install: + - COMPILER: "gcc" + HOST: "mingw" + PLATFORM: "x64" + SCRIPT: "" + TEST: "cmake" + + - COMPILER: "gcc" + HOST: "mingw" + PLATFORM: "x64" + SCRIPT: "" + TEST: "pzstd" + + - COMPILER: "visual" + HOST: "visual" + PLATFORM: "x64" + CONFIGURATION: "Debug" + - COMPILER: "visual" + HOST: "visual" + PLATFORM: "Win32" + CONFIGURATION: "Debug" + - COMPILER: "visual" + HOST: "visual" + PLATFORM: "x64" + CONFIGURATION: "Release" + - COMPILER: "visual" + HOST: "visual" + PLATFORM: "Win32" + CONFIGURATION: "Release" + + install: - ECHO Installing %COMPILER% %PLATFORM% %CONFIGURATION% - - MKDIR bin - - if [%COMPILER%]==[gcc] SET PATH_ORIGINAL=%PATH% - - if [%COMPILER%]==[gcc] ( - SET "PATH_MINGW32=c:\MinGW\bin;c:\MinGW\usr\bin" && - SET "PATH_MINGW64=c:\msys64\mingw64\bin;c:\msys64\usr\bin" && - COPY C:\msys64\usr\bin\make.exe C:\MinGW\bin\make.exe && - COPY C:\MinGW\bin\gcc.exe C:\MinGW\bin\cc.exe - ) else ( - IF [%PLATFORM%]==[x64] (SET ADDITIONALPARAM=/p:LibraryPath="C:\Program Files\Microsoft SDKs\Windows\v7.1\lib\x64;c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\lib\amd64;C:\Program Files (x86)\Microsoft Visual Studio 10.0\;C:\Program Files (x86)\Microsoft Visual Studio 10.0\lib\amd64;") + - SET PATH_ORIGINAL=%PATH% + - if [%HOST%]==[mingw] ( + SET "PATH_MINGW32=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin" && + SET "PATH_MINGW64=C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin" && + COPY C:\msys64\usr\bin\make.exe C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin\make.exe && + COPY C:\msys64\usr\bin\make.exe C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin\make.exe + ) + - IF [%HOST%]==[visual] IF [%PLATFORM%]==[x64] ( + SET ADDITIONALPARAM=/p:LibraryPath="C:\Program Files\Microsoft SDKs\Windows\v7.1\lib\x64;c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\lib\amd64;C:\Program Files (x86)\Microsoft Visual Studio 10.0\;C:\Program Files (x86)\Microsoft Visual Studio 10.0\lib\amd64;" ) -build_script: - - ECHO Building %COMPILER% %PLATFORM% %CONFIGURATION% - - if [%PLATFORM%]==[mingw32] SET PATH=%PATH_MINGW32%;%PATH_ORIGINAL% - - if [%PLATFORM%]==[mingw64] SET PATH=%PATH_MINGW64%;%PATH_ORIGINAL% - - if [%PLATFORM%]==[clang] SET PATH=%PATH_MINGW64%;%PATH_ORIGINAL% - - if [%COMPILER%]==[gcc] ( - ECHO *** && - ECHO *** Building %PLATFORM% && - ECHO *** && + build_script: + - if [%HOST%]==[mingw] ( + ( if [%PLATFORM%]==[x64] ( + SET "PATH=%PATH_MINGW64%;%PATH_ORIGINAL%" + ) else if [%PLATFORM%]==[x86] ( + SET "PATH=%PATH_MINGW32%;%PATH_ORIGINAL%" + ) ) + ) + - if [%HOST%]==[mingw] if [%BUILD%]==[true] ( make -v && - cc -v && - ECHO %MAKE_PARAMS% && - sh -c %MAKE_PARAMS% + sh -c "%COMPILER% -v" && + ECHO Building zlib to static link && + SET "CC=%COMPILER%" && + sh -c "cd .. && git clone --depth 1 --branch v1.2.11 https://github.com/madler/zlib" && + sh -c "cd ../zlib && make -f win32/Makefile.gcc libz.a" + ECHO Building zstd && + SET "CPPFLAGS=-I../../zlib" && + SET "LDFLAGS=../../zlib/libz.a" && + sh -c "%SCRIPT%" && + ( if [%COMPILER%]==[gcc] if [%ARTIFACT%]==[true] + lib\dll\example\build_package.bat && + make -C programs DEBUGFLAGS= clean zstd && + cd programs\ && 7z a -tzip -mx9 zstd-win-binary-%PLATFORM%.zip zstd.exe && + appveyor PushArtifact zstd-win-binary-%PLATFORM%.zip && + cp zstd.exe ..\bin\zstd.exe && + cd ..\bin\ && 7z a -tzip -mx9 zstd-win-release-%PLATFORM%.zip * && + appveyor PushArtifact zstd-win-release-%PLATFORM%.zip + ) ) - - if [%PLATFORM%]==[clang] COPY tests\fuzzer.exe tests\fuzzer_clang.exe - - if [%COMPILER%]==[gcc] if [%PLATFORM%]==[mingw64] ( - COPY programs\zstd.exe bin\zstd.exe && - appveyor PushArtifact bin\zstd.exe - ) - - if [%COMPILER%]==[gcc] if [%PLATFORM%]==[mingw32] ( - COPY programs\zstd.exe bin\zstd32.exe && - appveyor PushArtifact bin\zstd32.exe - ) - - if [%COMPILER%]==[gcc] make clean - - if [%COMPILER%]==[visual] ( + - if [%HOST%]==[visual] ( ECHO *** && ECHO *** Building Visual Studio 2008 %PLATFORM%\%CONFIGURATION% in %APPVEYOR_BUILD_FOLDER% && ECHO *** && @@ -111,29 +146,26 @@ build_script: COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe tests\ ) -test_script: + test_script: - ECHO Testing %COMPILER% %PLATFORM% %CONFIGURATION% - - SET FUZZERTEST=-T1mn - - if [%COMPILER%]==[gcc] if [%PLATFORM%]==[clang] ( - tests\fuzzer_clang.exe %FUZZERTEST% && - ECHO *** && - ECHO *** Building cmake for %PLATFORM% && - ECHO *** && + - SET "CC=gcc" + - SET "CXX=g++" + - if [%TEST%]==[cmake] ( mkdir build\cmake\build && cd build\cmake\build && cmake -G "Visual Studio 14 2015 Win64" .. && cd ..\..\.. && - make clean && - ECHO *** && - ECHO *** Building pzstd for %PLATFORM% && - ECHO *** && + make clean + ) + - if [%TEST%]==[pzstd] ( make -C contrib\pzstd googletest-mingw64 && make -C contrib\pzstd pzstd.exe && make -C contrib\pzstd tests && make -C contrib\pzstd check && make -C contrib\pzstd clean ) - - if [%COMPILER%]==[visual] if [%CONFIGURATION%]==[Release] ( + - SET "FUZZERTEST=-T30s" + - if [%HOST%]==[visual] if [%CONFIGURATION%]==[Release] ( CD tests && SET ZSTD=./zstd.exe && sh -e playTests.sh --test-large-data && @@ -146,28 +178,76 @@ test_script: fuzzer_VS2015_%PLATFORM%_Release.exe %FUZZERTEST% ) -artifacts: - - path: bin\zstd.exe - - path: bin\zstd32.exe +- + version: 1.0.{build} + environment: + matrix: + - COMPILER: "gcc" + HOST: "mingw" + PLATFORM: "x64" + SCRIPT: "make allarch" + - COMPILER: "gcc" + HOST: "mingw" + PLATFORM: "x86" + SCRIPT: "make allarch" + - COMPILER: "clang" + HOST: "mingw" + PLATFORM: "x64" + SCRIPT: "MOREFLAGS='--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion' make allarch" -deploy: -- provider: GitHub - auth_token: - secure: LgJo8emYc3sFnlNWkGl4/VYK3nk/8+RagcsqDlAi3xeqNGNutnKjcftjg84uJoT4 - artifact: bin\zstd.exe - force_update: true - on: - branch: autobuild - COMPILER: gcc - PLATFORM: "mingw64" - appveyor_repo_tag: true -- provider: GitHub - auth_token: - secure: LgJo8emYc3sFnlNWkGl4/VYK3nk/8+RagcsqDlAi3xeqNGNutnKjcftjg84uJoT4 - artifact: bin\zstd32.exe - force_update: true - on: - branch: autobuild - COMPILER: gcc - PLATFORM: "mingw32" - appveyor_repo_tag: true + - COMPILER: "visual" + HOST: "visual" + PLATFORM: "x64" + CONFIGURATION: "Debug" + - COMPILER: "visual" + HOST: "visual" + PLATFORM: "Win32" + CONFIGURATION: "Debug" + - COMPILER: "visual" + HOST: "visual" + PLATFORM: "x64" + CONFIGURATION: "Release" + - COMPILER: "visual" + HOST: "visual" + PLATFORM: "Win32" + CONFIGURATION: "Release" + + install: + - ECHO Installing %COMPILER% %PLATFORM% %CONFIGURATION% + - SET PATH_ORIGINAL=%PATH% + - if [%HOST%]==[mingw] ( + SET "PATH_MINGW32=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin" && + SET "PATH_MINGW64=C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin" && + COPY C:\msys64\usr\bin\make.exe C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin\make.exe && + COPY C:\msys64\usr\bin\make.exe C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin\make.exe + ) + - IF [%HOST%]==[visual] IF [%PLATFORM%]==[x64] ( + SET ADDITIONALPARAM=/p:LibraryPath="C:\Program Files\Microsoft SDKs\Windows\v7.1\lib\x64;c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\lib\amd64;C:\Program Files (x86)\Microsoft Visual Studio 10.0\;C:\Program Files (x86)\Microsoft Visual Studio 10.0\lib\amd64;" + ) + + build_script: + - ECHO Building %COMPILER% %PLATFORM% %CONFIGURATION% + - if [%HOST%]==[mingw] ( + ( if [%PLATFORM%]==[x64] ( + SET "PATH=%PATH_MINGW64%;%PATH_ORIGINAL%" + ) else if [%PLATFORM%]==[x86] ( + SET "PATH=%PATH_MINGW32%;%PATH_ORIGINAL%" + ) ) && + make -v && + sh -c "%COMPILER% -v" && + set "CC=%COMPILER%" && + sh -c "%SCRIPT%" + ) + - if [%HOST%]==[visual] ( + ECHO *** && + ECHO *** Building Visual Studio 2015 %PLATFORM%\%CONFIGURATION% && + ECHO *** && + msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v140 /p:ForceImportBeforeCppTargets=%APPVEYOR_BUILD_FOLDER%\build\VS2010\CompileAsCpp.props /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && + DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && + MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && + msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v140 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" && + DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe && + MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe && + COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\fuzzer.exe tests\fuzzer_VS2015_%PLATFORM%_%CONFIGURATION%.exe && + COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe tests\ + ) diff --git a/build/.gitignore b/build/.gitignore index f03aac8b3..85bc9287d 100644 --- a/build/.gitignore +++ b/build/.gitignore @@ -17,4 +17,4 @@ VS2013/bin/ VS2015/bin/ # CMake -cmake/ +cmake/build/ diff --git a/build/VS2005/zstd/zstd.vcproj b/build/VS2005/zstd/zstd.vcproj index 58f254bc8..46cabbf6e 100644 --- a/build/VS2005/zstd/zstd.vcproj +++ b/build/VS2005/zstd/zstd.vcproj @@ -44,7 +44,7 @@ Name="VCCLCompilerTool" Optimization="0" AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress" - PreprocessorDefinitions="ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE" + PreprocessorDefinitions="ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE" MinimalRebuild="true" BasicRuntimeChecks="3" RuntimeLibrary="3" @@ -121,7 +121,7 @@ EnableIntrinsicFunctions="true" OmitFramePointers="true" AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress" - PreprocessorDefinitions="ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE" + PreprocessorDefinitions="ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE" RuntimeLibrary="0" EnableFunctionLevelLinking="true" UsePrecompiledHeader="0" @@ -196,7 +196,7 @@ Name="VCCLCompilerTool" Optimization="0" AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress" - PreprocessorDefinitions="ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE" + PreprocessorDefinitions="ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE" MinimalRebuild="true" BasicRuntimeChecks="3" RuntimeLibrary="3" @@ -274,7 +274,7 @@ EnableIntrinsicFunctions="true" OmitFramePointers="true" AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress" - PreprocessorDefinitions="ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE" + PreprocessorDefinitions="ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE" RuntimeLibrary="0" EnableFunctionLevelLinking="true" UsePrecompiledHeader="0" diff --git a/build/VS2005/zstdlib/zstdlib.vcproj b/build/VS2005/zstdlib/zstdlib.vcproj index f4c9950ff..f77df786f 100644 --- a/build/VS2005/zstdlib/zstdlib.vcproj +++ b/build/VS2005/zstdlib/zstdlib.vcproj @@ -44,7 +44,7 @@ Name="VCCLCompilerTool" Optimization="0" AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs\legacy;$(SolutionDir)..\..\lib\dictBuilder" - PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE" + PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE" MinimalRebuild="true" BasicRuntimeChecks="3" RuntimeLibrary="3" @@ -120,7 +120,7 @@ EnableIntrinsicFunctions="true" OmitFramePointers="true" AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs\legacy;$(SolutionDir)..\..\lib\dictBuilder" - PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE" + PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE" RuntimeLibrary="0" EnableFunctionLevelLinking="true" UsePrecompiledHeader="0" @@ -194,7 +194,7 @@ Name="VCCLCompilerTool" Optimization="0" AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs\legacy;$(SolutionDir)..\..\lib\dictBuilder" - PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE" + PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE" MinimalRebuild="true" BasicRuntimeChecks="3" RuntimeLibrary="3" @@ -271,7 +271,7 @@ EnableIntrinsicFunctions="true" OmitFramePointers="true" AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs\legacy;$(SolutionDir)..\..\lib\dictBuilder" - PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE" + PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE" RuntimeLibrary="0" EnableFunctionLevelLinking="true" UsePrecompiledHeader="0" diff --git a/build/VS2008/fuzzer/fuzzer.vcproj b/build/VS2008/fuzzer/fuzzer.vcproj index 72540d243..f1719e8ac 100644 --- a/build/VS2008/fuzzer/fuzzer.vcproj +++ b/build/VS2008/fuzzer/fuzzer.vcproj @@ -44,7 +44,7 @@ true false - $(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\dictBuilder;$(UniversalCRT_IncludePath); + $(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress;$(UniversalCRT_IncludePath); true false - $(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\dictBuilder;$(UniversalCRT_IncludePath); + $(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress;$(UniversalCRT_IncludePath); false false - $(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\dictBuilder;$(UniversalCRT_IncludePath); + $(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress;$(UniversalCRT_IncludePath); false false - $(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\dictBuilder;$(UniversalCRT_IncludePath); + $(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress;$(UniversalCRT_IncludePath); diff --git a/build/VS2010/libzstd-dll/libzstd-dll.vcxproj b/build/VS2010/libzstd-dll/libzstd-dll.vcxproj index f78598fb4..364b3bea5 100644 --- a/build/VS2010/libzstd-dll/libzstd-dll.vcxproj +++ b/build/VS2010/libzstd-dll/libzstd-dll.vcxproj @@ -149,7 +149,7 @@ Level4 Disabled - ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL @@ -169,7 +169,7 @@ Level4 Disabled - ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL @@ -189,7 +189,7 @@ MaxSpeed true true - ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) false MultiThreaded ProgramDatabase @@ -211,7 +211,7 @@ MaxSpeed true true - ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) false false MultiThreaded diff --git a/build/VS2010/libzstd/libzstd.vcxproj b/build/VS2010/libzstd/libzstd.vcxproj index 727795514..6087d737c 100644 --- a/build/VS2010/libzstd/libzstd.vcxproj +++ b/build/VS2010/libzstd/libzstd.vcxproj @@ -146,7 +146,7 @@ Level4 Disabled - ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL @@ -166,7 +166,7 @@ Level4 Disabled - ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) true EnableFastChecks MultiThreadedDebugDLL @@ -186,7 +186,7 @@ MaxSpeed true true - ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) false MultiThreaded ProgramDatabase @@ -208,7 +208,7 @@ MaxSpeed true true - ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) false false MultiThreaded diff --git a/build/VS2010/zstd/zstd.vcxproj b/build/VS2010/zstd/zstd.vcxproj index 62c0fe10f..438dc6173 100644 --- a/build/VS2010/zstd/zstd.vcxproj +++ b/build/VS2010/zstd/zstd.vcxproj @@ -155,7 +155,7 @@ Level4 Disabled - ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true false @@ -171,7 +171,7 @@ Level4 Disabled - ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) + ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions) true false @@ -189,7 +189,7 @@ MaxSpeed true true - ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) false false MultiThreaded @@ -210,7 +210,7 @@ MaxSpeed true true - ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) + ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions) false false MultiThreaded diff --git a/build/cmake/.gitignore b/build/cmake/.gitignore index 98f29c79d..ad4283f99 100644 --- a/build/cmake/.gitignore +++ b/build/cmake/.gitignore @@ -1,6 +1,7 @@ -# cmake producted +# cmake artefacts CMakeCache.txt CMakeFiles Makefile cmake_install.cmake cmake_uninstall.cmake +*.1 diff --git a/build/cmake/CMakeLists.txt b/build/cmake/CMakeLists.txt index 6b7c28925..5c4eca61c 100644 --- a/build/cmake/CMakeLists.txt +++ b/build/cmake/CMakeLists.txt @@ -8,33 +8,61 @@ # ################################################################ PROJECT(zstd) -CMAKE_MINIMUM_REQUIRED(VERSION 2.8.7) +CMAKE_MINIMUM_REQUIRED(VERSION 2.8.9) +SET(ZSTD_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../..") +LIST(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules") +#----------------------------------------------------------------------------- +# Add extra compilation flags +#----------------------------------------------------------------------------- +INCLUDE(AddZstdCompilationFlags) +ADD_ZSTD_COMPILATION_FLAGS() + +#----------------------------------------------------------------------------- +# Options +#----------------------------------------------------------------------------- OPTION(ZSTD_LEGACY_SUPPORT "LEGACY SUPPORT" OFF) -OPTION(ZSTD_MULTITHREAD_SUPPORT "MULTITHREADING SUPPORT" ON) +IF (UNIX) + OPTION(ZSTD_MULTITHREAD_SUPPORT "MULTITHREADING SUPPORT" ON) +ELSE (UNIX) + OPTION(ZSTD_MULTITHREAD_SUPPORT "MULTITHREADING SUPPORT" OFF) +ENDIF (UNIX) +OPTION(ZSTD_BUILD_PROGRAMS "BUILD PROGRAMS" ON) OPTION(ZSTD_BUILD_CONTRIB "BUILD CONTRIB" OFF) +OPTION(ZSTD_BUILD_TESTS "BUILD TESTS" OFF) IF (ZSTD_LEGACY_SUPPORT) MESSAGE(STATUS "ZSTD_LEGACY_SUPPORT defined!") - ADD_DEFINITIONS(-DZSTD_LEGACY_SUPPORT=1) + ADD_DEFINITIONS(-DZSTD_LEGACY_SUPPORT=4) ELSE (ZSTD_LEGACY_SUPPORT) MESSAGE(STATUS "ZSTD_LEGACY_SUPPORT not defined!") ADD_DEFINITIONS(-DZSTD_LEGACY_SUPPORT=0) ENDIF (ZSTD_LEGACY_SUPPORT) +#----------------------------------------------------------------------------- +# Add source directories +#----------------------------------------------------------------------------- ADD_SUBDIRECTORY(lib) -ADD_SUBDIRECTORY(programs) -ADD_SUBDIRECTORY(tests) + +IF (ZSTD_BUILD_PROGRAMS) + ADD_SUBDIRECTORY(programs) +ENDIF (ZSTD_BUILD_PROGRAMS) + +IF (ZSTD_BUILD_TESTS) + IF (NOT ZSTD_BUILD_STATIC) + MESSAGE(SEND_ERROR "You need to build static library to build tests") + ENDIF (NOT ZSTD_BUILD_STATIC) + + ADD_SUBDIRECTORY(tests) +ENDIF (ZSTD_BUILD_TESTS) + IF (ZSTD_BUILD_CONTRIB) ADD_SUBDIRECTORY(contrib) ENDIF (ZSTD_BUILD_CONTRIB) #----------------------------------------------------------------------------- -# Add extra compilation flags +# Add clean-all target #----------------------------------------------------------------------------- -INCLUDE(CMakeModules/AddExtraCompilationFlags.cmake) -ADD_EXTRA_COMPILATION_FLAGS() - ADD_CUSTOM_TARGET(clean-all COMMAND ${CMAKE_BUILD_TOOL} clean COMMAND rm -rf ${CMAKE_BINARY_DIR}/ diff --git a/build/cmake/CMakeModules/AddExtraCompilationFlags.cmake b/build/cmake/CMakeModules/AddExtraCompilationFlags.cmake deleted file mode 100644 index e480c7ead..000000000 --- a/build/cmake/CMakeModules/AddExtraCompilationFlags.cmake +++ /dev/null @@ -1,329 +0,0 @@ -MACRO(ADD_EXTRA_COMPILATION_FLAGS) - include(CheckCXXCompilerFlag) - include(CheckCCompilerFlag) - if (CMAKE_COMPILER_IS_GNUCXX OR MINGW) #Not only UNIX but also WIN32 for MinGW - - set(POSITION_INDEPENDENT_CODE_FLAG "-fPIC") - CHECK_C_COMPILER_FLAG(${POSITION_INDEPENDENT_CODE_FLAG} POSITION_INDEPENDENT_CODE_FLAG_ALLOWED) - if (POSITION_INDEPENDENT_CODE_FLAG_ALLOWED) - MESSAGE("Compiler flag ${POSITION_INDEPENDENT_CODE_FLAG} allowed") - set(ACTIVATE_POSITION_INDEPENDENT_CODE_FLAG "ON" CACHE BOOL "activate -fPIC flag") - else () - MESSAGE("Compiler flag ${POSITION_INDEPENDENT_CODE_FLAG} not allowed") - endif (POSITION_INDEPENDENT_CODE_FLAG_ALLOWED) - - set(WARNING_UNDEF "-Wundef") - CHECK_C_COMPILER_FLAG(${WARNING_UNDEF} WARNING_UNDEF_ALLOWED) - if (WARNING_UNDEF_ALLOWED) - MESSAGE("Compiler flag ${WARNING_UNDEF} allowed") - set(ACTIVATE_WARNING_UNDEF "ON" CACHE BOOL "activate -Wundef flag") - else () - MESSAGE("Compiler flag ${WARNING_UNDEF} not allowed") - endif (WARNING_UNDEF_ALLOWED) - - set(WARNING_SHADOW "-Wshadow") - CHECK_C_COMPILER_FLAG(${WARNING_SHADOW} WARNING_SHADOW_ALLOWED) - if (WARNING_SHADOW_ALLOWED) - MESSAGE("Compiler flag ${WARNING_SHADOW} allowed") - set(ACTIVATE_WARNING_SHADOW "ON" CACHE BOOL "activate -Wshadow flag") - else () - MESSAGE("Compiler flag ${WARNING_SHADOW} not allowed") - endif (WARNING_SHADOW_ALLOWED) - - set(WARNING_CAST_ALIGN "-Wcast-align") - CHECK_C_COMPILER_FLAG(${WARNING_CAST_ALIGN} WARNING_CAST_ALIGN_ALLOWED) - if (WARNING_CAST_ALIGN_ALLOWED) - MESSAGE("Compiler flag ${WARNING_CAST_ALIGN} allowed") - set(ACTIVATE_WARNING_CAST_ALIGN "ON" CACHE BOOL "activate -Wcast-align flag") - else () - MESSAGE("Compiler flag ${WARNING_CAST_ALIGN} not allowed") - endif (WARNING_CAST_ALIGN_ALLOWED) - - set(WARNING_CAST_QUAL "-Wcast-qual") - CHECK_C_COMPILER_FLAG(${WARNING_CAST_QUAL} WARNING_CAST_QUAL_ALLOWED) - if (WARNING_CAST_QUAL_ALLOWED) - MESSAGE("Compiler flag ${WARNING_CAST_QUAL} allowed") - set(ACTIVATE_WARNING_CAST_QUAL "ON" CACHE BOOL "activate -Wcast-qual flag") - else () - MESSAGE("Compiler flag ${WARNING_CAST_QUAL} not allowed") - endif (WARNING_CAST_QUAL_ALLOWED) - - set(WARNING_STRICT_PROTOTYPES "-Wstrict-prototypes") - CHECK_C_COMPILER_FLAG(${WARNING_STRICT_PROTOTYPES} WARNING_STRICT_PROTOTYPES_ALLOWED) - if (WARNING_STRICT_PROTOTYPES_ALLOWED) - MESSAGE("Compiler flag ${WARNING_STRICT_PROTOTYPES} allowed") - set(ACTIVATE_WARNING_STRICT_PROTOTYPES "ON" CACHE BOOL "activate -Wstrict-prototypes flag") - else () - MESSAGE("Compiler flag ${WARNING_STRICT_PROTOTYPES} not allowed") - endif (WARNING_STRICT_PROTOTYPES_ALLOWED) - - set(WARNING_ALL "-Wall") - CHECK_C_COMPILER_FLAG(${WARNING_ALL} WARNING_ALL_ALLOWED) - if (WARNING_ALL_ALLOWED) - MESSAGE("Compiler flag ${WARNING_ALL} allowed") - set(ACTIVATE_WARNING_ALL "ON" CACHE BOOL "activate -Wall flag") - else () - MESSAGE("Compiler flag ${WARNING_ALL} not allowed") - endif (WARNING_ALL_ALLOWED) - - set(WARNING_EXTRA "-Wextra") - CHECK_C_COMPILER_FLAG(${WARNING_EXTRA} WARNING_EXTRA_ALLOWED) - if (WARNING_EXTRA_ALLOWED) - MESSAGE("Compiler flag ${WARNING_EXTRA} allowed") - set(ACTIVATE_WARNING_EXTRA "ON" CACHE BOOL "activate -Wextra flag") - else () - MESSAGE("Compiler flag ${WARNING_EXTRA} not allowed") - endif (WARNING_EXTRA_ALLOWED) - - set(WARNING_FLOAT_EQUAL "-Wfloat-equal") - CHECK_C_COMPILER_FLAG(${WARNING_FLOAT_EQUAL} WARNING_FLOAT_EQUAL_ALLOWED) - if (WARNING_FLOAT_EQUAL_ALLOWED) - MESSAGE("Compiler flag ${WARNING_FLOAT_EQUAL} allowed") - set(ACTIVATE_WARNING_FLOAT_EQUAL "OFF" CACHE BOOL "activate -Wfloat-equal flag") - else () - MESSAGE("Compiler flag ${WARNING_FLOAT_EQUAL} not allowed") - endif (WARNING_FLOAT_EQUAL_ALLOWED) - - set(WARNING_SIGN_CONVERSION "-Wsign-conversion") - CHECK_C_COMPILER_FLAG(${WARNING_SIGN_CONVERSION} WARNING_SIGN_CONVERSION_ALLOWED) - if (WARNING_SIGN_CONVERSION_ALLOWED) - MESSAGE("Compiler flag ${WARNING_SIGN_CONVERSION} allowed") - set(ACTIVATE_WARNING_SIGN_CONVERSION "OFF" CACHE BOOL "activate -Wsign-conversion flag") - else () - MESSAGE("Compiler flag ${WARNING_SIGN_CONVERSION} not allowed") - endif (WARNING_SIGN_CONVERSION_ALLOWED) - - if (ACTIVATE_POSITION_INDEPENDENT_CODE_FLAG) - list(APPEND CMAKE_C_FLAGS ${POSITION_INDEPENDENT_CODE_FLAG}) - else () - string(REPLACE ${POSITION_INDEPENDENT_CODE_FLAG} "" CMAKE_C_FLAGS "${POSITION_INDEPENDENT_CODE_FLAG}") - endif (ACTIVATE_POSITION_INDEPENDENT_CODE_FLAG) - - if (ACTIVATE_WARNING_UNDEF) - list(APPEND CMAKE_CXX_FLAGS ${WARNING_UNDEF}) - list(APPEND CMAKE_C_FLAGS ${WARNING_UNDEF}) - else () - string(REPLACE ${WARNING_UNDEF} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") - string(REPLACE ${WARNING_UNDEF} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") - endif (ACTIVATE_WARNING_UNDEF) - - if (ACTIVATE_WARNING_SHADOW) - list(APPEND CMAKE_CXX_FLAGS ${WARNING_SHADOW}) - list(APPEND CMAKE_C_FLAGS ${WARNING_SHADOW}) - else () - string(REPLACE ${WARNING_SHADOW} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") - string(REPLACE ${WARNING_SHADOW} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") - endif (ACTIVATE_WARNING_SHADOW) - - if (ACTIVATE_WARNING_CAST_QUAL) - list(APPEND CMAKE_CXX_FLAGS ${WARNING_CAST_QUAL}) - list(APPEND CMAKE_C_FLAGS ${WARNING_CAST_QUAL}) - else () - string(REPLACE ${WARNING_CAST_QUAL} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") - string(REPLACE ${WARNING_CAST_QUAL} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") - endif (ACTIVATE_WARNING_CAST_QUAL) - - if (ACTIVATE_WARNING_CAST_ALIGN) - list(APPEND CMAKE_CXX_FLAGS ${WARNING_CAST_ALIGN}) - list(APPEND CMAKE_C_FLAGS ${WARNING_CAST_ALIGN}) - else () - string(REPLACE ${WARNING_CAST_ALIGN} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") - string(REPLACE ${WARNING_CAST_ALIGN} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") - endif (ACTIVATE_WARNING_CAST_ALIGN) - - if (ACTIVATE_WARNING_STRICT_PROTOTYPES) - list(APPEND CMAKE_C_FLAGS ${WARNING_STRICT_PROTOTYPES}) - else () - string(REPLACE ${WARNING_STRICT_PROTOTYPES} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") - endif (ACTIVATE_WARNING_STRICT_PROTOTYPES) - - if (ACTIVATE_WARNING_ALL) - list(APPEND CMAKE_CXX_FLAGS ${WARNING_ALL}) - list(APPEND CMAKE_C_FLAGS ${WARNING_ALL}) - else () - string(REPLACE ${WARNING_ALL} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") - string(REPLACE ${WARNING_ALL} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") - endif (ACTIVATE_WARNING_ALL) - - if (ACTIVATE_WARNING_EXTRA) - list(APPEND CMAKE_CXX_FLAGS ${WARNING_EXTRA}) - list(APPEND CMAKE_C_FLAGS ${WARNING_EXTRA}) - else () - string(REPLACE ${WARNING_EXTRA} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") - string(REPLACE ${WARNING_EXTRA} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") - endif (ACTIVATE_WARNING_EXTRA) - - if (ACTIVATE_WARNING_FLOAT_EQUAL) - list(APPEND CMAKE_CXX_FLAGS ${WARNING_FLOAT_EQUAL}) - list(APPEND CMAKE_C_FLAGS ${WARNING_FLOAT_EQUAL}) - else () - string(REPLACE ${WARNING_FLOAT_EQUAL} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") - string(REPLACE ${WARNING_FLOAT_EQUAL} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") - endif (ACTIVATE_WARNING_FLOAT_EQUAL) - - if (ACTIVATE_WARNING_SIGN_CONVERSION) - list(APPEND CMAKE_CXX_FLAGS ${WARNING_SIGN_CONVERSION}) - list(APPEND CMAKE_C_FLAGS ${WARNING_SIGN_CONVERSION}) - else () - string(REPLACE ${WARNING_SIGN_CONVERSION} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") - string(REPLACE ${WARNING_SIGN_CONVERSION} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") - endif (ACTIVATE_WARNING_SIGN_CONVERSION) - - #Set c++11 by default - list(APPEND CMAKE_CXX_FLAGS "-std=c++11") - - #Set c99 by default - list(APPEND CMAKE_C_FLAGS "-std=c99") - - elseif (MSVC) - # Add specific compilation flags for Windows Visual - - set(WARNING_ALL "/Wall") - CHECK_C_COMPILER_FLAG(${WARNING_ALL} WARNING_ALL_ALLOWED) - if (WARNING_ALL_ALLOWED) - MESSAGE("Compiler flag ${WARNING_ALL} allowed") - set(ACTIVATE_WARNING_ALL "OFF" CACHE BOOL "activate /Wall flag") - else () - MESSAGE("Compiler flag ${WARNING_ALL} not allowed") - endif (WARNING_ALL_ALLOWED) - - set(RTC_FLAG "/RTC1") - CHECK_C_COMPILER_FLAG(${RTC_FLAG} RTC_FLAG_ALLOWED) - if (RTC_FLAG_ALLOWED) - MESSAGE("Compiler flag ${RTC_FLAG} allowed") - set(ACTIVATE_RTC_FLAG "ON" CACHE BOOL "activate /RTC1 flag") - else () - MESSAGE("Compiler flag ${RTC_FLAG} not allowed") - endif (RTC_FLAG_ALLOWED) - - set(ZC_FLAG "/Zc:forScope") - CHECK_C_COMPILER_FLAG(${ZC_FLAG} ZC_FLAG_ALLOWED) - if (ZC_FLAG_ALLOWED) - MESSAGE("Compiler flag ${ZC_FLAG} allowed") - set(ACTIVATE_ZC_FLAG "ON" CACHE BOOL "activate /Zc:forScope flag") - else () - MESSAGE("Compiler flag ${ZC_FLAG} not allowed") - endif (ZC_FLAG_ALLOWED) - - set(GD_FLAG "/Gd") - CHECK_C_COMPILER_FLAG(${GD_FLAG} GD_FLAG_ALLOWED) - if (GD_FLAG_ALLOWED) - MESSAGE("Compiler flag ${GD_FLAG} allowed") - set(ACTIVATE_GD_FLAG "ON" CACHE BOOL "activate /Gd flag") - else () - MESSAGE("Compiler flag ${GD_FLAG} not allowed") - endif (GD_FLAG_ALLOWED) - - set(ANALYZE_FLAG "/analyze:stacksize25000") - CHECK_C_COMPILER_FLAG(${ANALYZE_FLAG} ANALYZE_FLAG_ALLOWED) - if (ANALYZE_FLAG_ALLOWED) - MESSAGE("Compiler flag ${ANALYZE_FLAG} allowed") - set(ACTIVATE_ANALYZE_FLAG "ON" CACHE BOOL "activate /ANALYZE flag") - else () - MESSAGE("Compiler flag ${ANALYZE_FLAG} not allowed") - endif (ANALYZE_FLAG_ALLOWED) - - if (ACTIVATE_WARNING_ALL) - list(APPEND CMAKE_CXX_FLAGS ${WARNING_ALL}) - list(APPEND CMAKE_C_FLAGS ${WARNING_ALL}) - else () - string(REPLACE ${WARNING_ALL} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") - string(REPLACE ${WARNING_ALL} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") - endif (ACTIVATE_WARNING_ALL) - - # Only for DEBUG version - if (ACTIVATE_RTC_FLAG) - list(APPEND CMAKE_CXX_FLAGS_DEBUG ${RTC_FLAG}) - list(APPEND CMAKE_C_FLAGS_DEBUG ${RTC_FLAG}) - else () - string(REPLACE ${RTC_FLAG} "" CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG}") - string(REPLACE ${RTC_FLAG} "" CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG}") - endif (ACTIVATE_RTC_FLAG) - - if (ACTIVATE_ZC_FLAG) - list(APPEND CMAKE_CXX_FLAGS ${ZC_FLAG}) - list(APPEND CMAKE_C_FLAGS ${ZC_FLAG}) - else () - string(REPLACE ${ZC_FLAG} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") - string(REPLACE ${ZC_FLAG} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") - endif (ACTIVATE_ZC_FLAG) - - if (ACTIVATE_GD_FLAG) - list(APPEND CMAKE_CXX_FLAGS ${GD_FLAG}) - list(APPEND CMAKE_C_FLAGS ${GD_FLAG}) - else () - string(REPLACE ${GD_FLAG} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") - string(REPLACE ${GD_FLAG} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") - endif (ACTIVATE_GD_FLAG) - - if (ACTIVATE_ANALYZE_FLAG) - list(APPEND CMAKE_CXX_FLAGS ${ANALYZE_FLAG}) - list(APPEND CMAKE_C_FLAGS ${ANALYZE_FLAG}) - else () - string(REPLACE ${ANALYZE_FLAG} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") - string(REPLACE ${ANALYZE_FLAG} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") - endif (ACTIVATE_ANALYZE_FLAG) - - if (MSVC80 OR MSVC90 OR MSVC10 OR MSVC11) - # To avoid compiler warning (level 4) C4571, compile with /EHa if you still want - # your catch(...) blocks to catch structured exceptions. - list(APPEND CMAKE_CXX_FLAGS "/EHa") - endif (MSVC80 OR MSVC90 OR MSVC10 OR MSVC11) - - set(MULTITHREADED_COMPILATION "/MP") - MESSAGE("Compiler flag ${MULTITHREADED_COMPILATION} allowed") - set(ACTIVATE_MULTITHREADED_COMPILATION "ON" CACHE BOOL "activate /MP flag") - - if (ACTIVATE_MULTITHREADED_COMPILATION) - list(APPEND CMAKE_CXX_FLAGS ${MULTITHREADED_COMPILATION}) - list(APPEND CMAKE_C_FLAGS ${MULTITHREADED_COMPILATION}) - else () - string(REPLACE ${MULTITHREADED_COMPILATION} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") - string(REPLACE ${MULTITHREADED_COMPILATION} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}") - endif (ACTIVATE_MULTITHREADED_COMPILATION) - - #For exceptions - list(APPEND CMAKE_CXX_FLAGS "/EHsc") - list(APPEND CMAKE_C_FLAGS "/EHsc") - - # UNICODE SUPPORT - list(APPEND CMAKE_CXX_FLAGS "/D_UNICODE /DUNICODE") - list(APPEND CMAKE_C_FLAGS "/D_UNICODE /DUNICODE") - endif () - - # Remove duplicates compilation flags - FOREACH (flag_var CMAKE_C_FLAGS CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE - CMAKE_C_FLAGS_MINSIZEREL CMAKE_C_FLAGS_RELWITHDEBINFO - CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE - CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) - separate_arguments(${flag_var}) - list(REMOVE_DUPLICATES ${flag_var}) - string(REPLACE ";" " " ${flag_var} "${${flag_var}}") - set(${flag_var} "${${flag_var}}" CACHE STRING "common build flags" FORCE) - ENDFOREACH (flag_var) - - if (MSVC) - # Replace /MT to /MD flag - # Replace /O2 to /O3 flag - FOREACH (flag_var CMAKE_C_FLAGS CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE - CMAKE_C_FLAGS_MINSIZEREL CMAKE_C_FLAGS_RELWITHDEBINFO - CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE - CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) - STRING(REGEX REPLACE "/MT" "/MD" ${flag_var} "${${flag_var}}") - STRING(REGEX REPLACE "/O2" "/Ox" ${flag_var} "${${flag_var}}") - ENDFOREACH (flag_var) - endif () - - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}" CACHE STRING "Updated flags" FORCE) - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG}" CACHE STRING "Updated flags" FORCE) - set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE}" CACHE STRING "Updated flags" FORCE) - set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL}" CACHE STRING "Updated flags" FORCE) - set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO}" CACHE STRING "Updated flags" FORCE) - - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}" CACHE STRING "Updated flags" FORCE) - set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG}" CACHE STRING "Updated flags" FORCE) - set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE}" CACHE STRING "Updated flags" FORCE) - set(CMAKE_C_FLAGS_MINSIZEREL "${CMAKE_C_FLAGS_MINSIZEREL}" CACHE STRING "Updated flags" FORCE) - set(CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO}" CACHE STRING "Updated flags" FORCE) - -ENDMACRO(ADD_EXTRA_COMPILATION_FLAGS) diff --git a/build/cmake/CMakeModules/AddZstdCompilationFlags.cmake b/build/cmake/CMakeModules/AddZstdCompilationFlags.cmake new file mode 100644 index 000000000..e812418e3 --- /dev/null +++ b/build/cmake/CMakeModules/AddZstdCompilationFlags.cmake @@ -0,0 +1,86 @@ +include(CheckCXXCompilerFlag) +include(CheckCCompilerFlag) + +function(EnableCompilerFlag _flag _C _CXX) + string(REGEX REPLACE "\\+" "PLUS" varname "${_flag}") + string(REGEX REPLACE "[^A-Za-z0-9]+" "_" varname "${varname}") + string(REGEX REPLACE "^_+" "" varname "${varname}") + string(TOUPPER "${varname}" varname) + if (_C) + CHECK_C_COMPILER_FLAG(${_flag} C_FLAG_${varname}) + if (C_FLAG_${varname}) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${_flag}" PARENT_SCOPE) + endif () + endif () + if (_CXX) + CHECK_CXX_COMPILER_FLAG(${_flag} CXX_FLAG_${varname}) + if (CXX_FLAG_${varname}) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${_flag}" PARENT_SCOPE) + endif () + endif () +endfunction() + +MACRO(ADD_ZSTD_COMPILATION_FLAGS) + if (CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang" OR MINGW) #Not only UNIX but also WIN32 for MinGW + #Set c++11 by default + EnableCompilerFlag("-std=c++11" false true) + #Set c99 by default + EnableCompilerFlag("-std=c99" true false) + EnableCompilerFlag("-Wall" true true) + EnableCompilerFlag("-Wextra" true true) + EnableCompilerFlag("-Wundef" true true) + EnableCompilerFlag("-Wshadow" true true) + EnableCompilerFlag("-Wcast-align" true true) + EnableCompilerFlag("-Wcast-qual" true true) + EnableCompilerFlag("-Wstrict-prototypes" true false) + elseif (MSVC) # Add specific compilation flags for Windows Visual + EnableCompilerFlag("/Wall" true true) + + # Only for DEBUG version + EnableCompilerFlag("/RTC1" true true) + EnableCompilerFlag("/Zc:forScope" true true) + EnableCompilerFlag("/Gd" true true) + EnableCompilerFlag("/analyze:stacksize25000" true true) + + if (MSVC80 OR MSVC90 OR MSVC10 OR MSVC11) + # To avoid compiler warning (level 4) C4571, compile with /EHa if you still want + # your catch(...) blocks to catch structured exceptions. + EnableCompilerFlag("/EHa" false true) + endif (MSVC80 OR MSVC90 OR MSVC10 OR MSVC11) + + set(ACTIVATE_MULTITHREADED_COMPILATION "ON" CACHE BOOL "activate multi-threaded compilation (/MP flag)") + if (ACTIVATE_MULTITHREADED_COMPILATION) + EnableCompilerFlag("/MP" true true) + endif () + + #For exceptions + EnableCompilerFlag("/EHsc" true true) + + # UNICODE SUPPORT + EnableCompilerFlag("/D_UNICODE" true true) + EnableCompilerFlag("/DUNICODE" true true) + endif () + + # Remove duplicates compilation flags + FOREACH (flag_var CMAKE_C_FLAGS CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE + CMAKE_C_FLAGS_MINSIZEREL CMAKE_C_FLAGS_RELWITHDEBINFO + CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE + CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) + separate_arguments(${flag_var}) + list(REMOVE_DUPLICATES ${flag_var}) + string(REPLACE ";" " " ${flag_var} "${${flag_var}}") + ENDFOREACH (flag_var) + + if (MSVC) + # Replace /MT to /MD flag + # Replace /O2 to /O3 flag + FOREACH (flag_var CMAKE_C_FLAGS CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE + CMAKE_C_FLAGS_MINSIZEREL CMAKE_C_FLAGS_RELWITHDEBINFO + CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE + CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO) + STRING(REGEX REPLACE "/MT" "/MD" ${flag_var} "${${flag_var}}") + STRING(REGEX REPLACE "/O2" "/Ox" ${flag_var} "${${flag_var}}") + ENDFOREACH (flag_var) + endif () + +ENDMACRO(ADD_ZSTD_COMPILATION_FLAGS) diff --git a/build/cmake/CMakeModules/GetZstdLibraryVersion.cmake b/build/cmake/CMakeModules/GetZstdLibraryVersion.cmake new file mode 100644 index 000000000..8b6f394da --- /dev/null +++ b/build/cmake/CMakeModules/GetZstdLibraryVersion.cmake @@ -0,0 +1,9 @@ +function(GetZstdLibraryVersion _header _major _minor _release) + # Read file content + FILE(READ ${_header} CONTENT) + + string(REGEX MATCH ".*define ZSTD_VERSION_MAJOR *([0-9]+).*define ZSTD_VERSION_MINOR *([0-9]+).*define ZSTD_VERSION_RELEASE *([0-9]+)" VERSION_REGEX "${CONTENT}") + SET(${_major} ${CMAKE_MATCH_1} PARENT_SCOPE) + SET(${_minor} ${CMAKE_MATCH_2} PARENT_SCOPE) + SET(${_release} ${CMAKE_MATCH_3} PARENT_SCOPE) +endfunction() diff --git a/build/cmake/contrib/CMakeLists.txt b/build/cmake/contrib/CMakeLists.txt index 68e0881c5..c7d97aa95 100644 --- a/build/cmake/contrib/CMakeLists.txt +++ b/build/cmake/contrib/CMakeLists.txt @@ -13,4 +13,5 @@ PROJECT(contrib) ADD_SUBDIRECTORY(pzstd) +ADD_SUBDIRECTORY(gen_html) diff --git a/build/cmake/contrib/gen_html/CMakeLists.txt b/build/cmake/contrib/gen_html/CMakeLists.txt new file mode 100644 index 000000000..c10c62b54 --- /dev/null +++ b/build/cmake/contrib/gen_html/CMakeLists.txt @@ -0,0 +1,33 @@ +# ################################################################ +# * Copyright (c) 2015-present, Yann Collet, Facebook, Inc. +# * All rights reserved. +# * +# * This source code is licensed under the BSD-style license found in the +# * LICENSE file in the root directory of this source tree. An additional grant +# * of patent rights can be found in the PATENTS file in the same directory. +# +# You can contact the author at : +# - zstd homepage : http://www.zstd.net/ +# ################################################################ + +PROJECT(gen_html) +INCLUDE(GetZstdLibraryVersion) + +SET(CMAKE_INCLUDE_CURRENT_DIR TRUE) + +# Define programs directory, where sources and header files are located +SET(LIBRARY_DIR ${ZSTD_SOURCE_DIR}/lib) +SET(PROGRAMS_DIR ${ZSTD_SOURCE_DIR}/programs) +SET(GENHTML_DIR ${ZSTD_SOURCE_DIR}/contrib/gen_html) +SET(GENHTML_BINARY ${PROJECT_BINARY_DIR}/gen_html${CMAKE_EXECUTABLE_SUFFIX}) +INCLUDE_DIRECTORIES(${PROGRAMS_DIR} ${LIBRARY_DIR} ${LIBRARY_DIR}/common ${GENHTML_DIR}) + +ADD_EXECUTABLE(gen_html ${GENHTML_DIR}/gen_html.cpp) + +GetZstdLibraryVersion(${LIBRARY_DIR}/zstd.h VMAJOR VMINOR VRELEASE) +SET(LIBVERSION "${VMAJOR}.${VMINOR}.${VRELEASE}") +ADD_CUSTOM_TARGET(zstd_manual.html ALL + ${GENHTML_BINARY} "${LIBVERSION}" "${LIBRARY_DIR}/zstd.h" "${PROJECT_BINARY_DIR}/zstd_manual.html" + DEPENDS gen_html COMMENT "Update zstd manual") + +INSTALL(FILES "${PROJECT_BINARY_DIR}/zstd_manual.html" DESTINATION "share/doc") diff --git a/build/cmake/contrib/pzstd/CMakeLists.txt b/build/cmake/contrib/pzstd/CMakeLists.txt index 2a3663f31..71def02cd 100644 --- a/build/cmake/contrib/pzstd/CMakeLists.txt +++ b/build/cmake/contrib/pzstd/CMakeLists.txt @@ -14,17 +14,22 @@ PROJECT(pzstd) SET(CMAKE_INCLUDE_CURRENT_DIR TRUE) -# Define project root directory -SET(ROOT_DIR ../../../..) - # Define programs directory, where sources and header files are located -SET(LIBRARY_DIR ${ROOT_DIR}/lib) -SET(PROGRAMS_DIR ${ROOT_DIR}/programs) -SET(PZSTD_DIR ${ROOT_DIR}/contrib/pzstd) +SET(LIBRARY_DIR ${ZSTD_SOURCE_DIR}/lib) +SET(PROGRAMS_DIR ${ZSTD_SOURCE_DIR}/programs) +SET(PZSTD_DIR ${ZSTD_SOURCE_DIR}/contrib/pzstd) INCLUDE_DIRECTORIES(${PROGRAMS_DIR} ${LIBRARY_DIR} ${LIBRARY_DIR}/common ${PZSTD_DIR}) ADD_EXECUTABLE(pzstd ${PZSTD_DIR}/main.cpp ${PZSTD_DIR}/Options.cpp ${PZSTD_DIR}/Pzstd.cpp ${PZSTD_DIR}/SkippableFrame.cpp) -TARGET_LINK_LIBRARIES(pzstd libzstd_static pthread) -SET_TARGET_PROPERTIES(pzstd PROPERTIES COMPILE_DEFINITIONS "NDEBUG") -SET_TARGET_PROPERTIES(pzstd PROPERTIES COMPILE_OPTIONS "-Wno-shadow") +SET_PROPERTY(TARGET pzstd APPEND PROPERTY COMPILE_DEFINITIONS "NDEBUG") +SET_PROPERTY(TARGET pzstd APPEND PROPERTY COMPILE_OPTIONS "-Wno-shadow") +SET(THREADS_PREFER_PTHREAD_FLAG ON) +FIND_PACKAGE(Threads REQUIRED) +IF (CMAKE_USE_PTHREADS_INIT) + TARGET_LINK_LIBRARIES(pzstd libzstd_shared ${CMAKE_THREAD_LIBS_INIT}) +ELSE() + MESSAGE(SEND_ERROR "ZSTD currently does not support thread libraries other than pthreads") +ENDIF() + +INSTALL(TARGETS pzstd RUNTIME DESTINATION "bin") diff --git a/build/cmake/lib/.gitignore b/build/cmake/lib/.gitignore new file mode 100644 index 000000000..a4444c8d3 --- /dev/null +++ b/build/cmake/lib/.gitignore @@ -0,0 +1,2 @@ +# cmake build artefact +libzstd.pc diff --git a/build/cmake/lib/CMakeLists.txt b/build/cmake/lib/CMakeLists.txt index 1950d97cd..429d49449 100644 --- a/build/cmake/lib/CMakeLists.txt +++ b/build/cmake/lib/CMakeLists.txt @@ -10,30 +10,18 @@ # - zstd homepage : http://www.zstd.net/ # ################################################################ -# Get library version based on information from input content (use regular exp) -function(GetLibraryVersion _content _outputVar1 _outputVar2 _outputVar3) - string(REGEX MATCHALL ".*define ZSTD_VERSION_MAJOR+.* ([0-9]+).*define ZSTD_VERSION_MINOR+.* ([0-9]+).*define ZSTD_VERSION_RELEASE+.* ([0-9]+)" VERSION_REGEX "${_content}") - SET(${_outputVar1} ${CMAKE_MATCH_1} PARENT_SCOPE) - SET(${_outputVar2} ${CMAKE_MATCH_2} PARENT_SCOPE) - SET(${_outputVar3} ${CMAKE_MATCH_3} PARENT_SCOPE) -endfunction() - PROJECT(libzstd) SET(CMAKE_INCLUDE_CURRENT_DIR TRUE) - -# Define project root directory -SET(ROOT_DIR ../../..) +OPTION(ZSTD_BUILD_STATIC "BUILD STATIC LIBRARIES" OFF) # Define library directory, where sources and header files are located -SET(LIBRARY_DIR ${ROOT_DIR}/lib) +SET(LIBRARY_DIR ${ZSTD_SOURCE_DIR}/lib) INCLUDE_DIRECTORIES(${LIBRARY_DIR} ${LIBRARY_DIR}/common) -# Read file content -FILE(READ ${LIBRARY_DIR}/zstd.h HEADER_CONTENT) - # Parse version -GetLibraryVersion("${HEADER_CONTENT}" LIBVER_MAJOR LIBVER_MINOR LIBVER_RELEASE) +INCLUDE(GetZstdLibraryVersion) +GetZstdLibraryVersion(${LIBRARY_DIR}/zstd.h LIBVER_MAJOR LIBVER_MINOR LIBVER_RELEASE) MESSAGE("ZSTD VERSION ${LIBVER_MAJOR}.${LIBVER_MINOR}.${LIBVER_RELEASE}") SET(Sources @@ -97,96 +85,76 @@ IF (ZSTD_LEGACY_SUPPORT) ENDIF (ZSTD_LEGACY_SUPPORT) IF (MSVC) - SET(MSVC_RESOURCE_DIR ${ROOT_DIR}/build/VS2010/libzstd-dll) + SET(MSVC_RESOURCE_DIR ${ZSTD_SOURCE_DIR}/build/VS2010/libzstd-dll) SET(PlatformDependResources ${MSVC_RESOURCE_DIR}/libzstd-dll.rc) ENDIF (MSVC) # Split project to static and shared libraries build -ADD_LIBRARY(libzstd_static STATIC ${Sources} ${Headers}) ADD_LIBRARY(libzstd_shared SHARED ${Sources} ${Headers} ${PlatformDependResources}) +IF (ZSTD_BUILD_STATIC) + ADD_LIBRARY(libzstd_static STATIC ${Sources} ${Headers}) +ENDIF (ZSTD_BUILD_STATIC) # Add specific compile definitions for MSVC project IF (MSVC) - SET_TARGET_PROPERTIES(libzstd_static PROPERTIES COMPILE_DEFINITIONS "ZSTD_HEAPMODE=0;_CRT_SECURE_NO_WARNINGS") - SET_TARGET_PROPERTIES(libzstd_shared PROPERTIES COMPILE_DEFINITIONS "ZSTD_DLL_EXPORT=1;ZSTD_HEAPMODE=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS") + SET_PROPERTY(TARGET libzstd_shared APPEND PROPERTY COMPILE_DEFINITIONS "ZSTD_DLL_EXPORT=1;ZSTD_HEAPMODE=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS") + IF (ZSTD_BUILD_STATIC) + SET_PROPERTY(TARGET libzstd_static APPEND PROPERTY COMPILE_DEFINITIONS "ZSTD_HEAPMODE=0;_CRT_SECURE_NO_WARNINGS") + ENDIF (ZSTD_BUILD_STATIC) ENDIF (MSVC) # Define library base name IF (MSVC) - SET(LIBRARY_BASE_NAME zstdlib) -ELSE () - SET(LIBRARY_BASE_NAME libzstd) -ENDIF (MSVC) -IF (MSVC) IF (CMAKE_SIZEOF_VOID_P MATCHES "8") - SET(LIBRARY_ARCH_SUFFIX "_x64") + SET(LIBRARY_BASE_NAME "zstdlib_x64") ELSE () - SET(LIBRARY_ARCH_SUFFIX "_x86") + SET(LIBRARY_BASE_NAME "zstdlib_x86") ENDIF (CMAKE_SIZEOF_VOID_P MATCHES "8") ELSE () - SET(LIBRARY_ARCH_SUFFIX "") + SET(LIBRARY_BASE_NAME zstd) ENDIF (MSVC) # Define static and shared library names -SET(STATIC_LIBRARY_OUTPUT_NAME ${LIBRARY_BASE_NAME}${LIBRARY_ARCH_SUFFIX} CACHE STRING "Static library output name") -SET(SHARED_LIBRARY_OUTPUT_NAME ${LIBRARY_BASE_NAME}.${LIBVER_MAJOR}.${LIBVER_MINOR}.${LIBVER_RELEASE}${LIBRARY_ARCH_SUFFIX} CACHE STRING "Shared library output name") - -SET_TARGET_PROPERTIES( - libzstd_static - PROPERTIES - PREFIX "" - OUTPUT_NAME ${STATIC_LIBRARY_OUTPUT_NAME}) - SET_TARGET_PROPERTIES( libzstd_shared PROPERTIES - PREFIX "" - OUTPUT_NAME ${SHARED_LIBRARY_OUTPUT_NAME}) + OUTPUT_NAME ${LIBRARY_BASE_NAME} + SOVERSION ${LIBVER_MAJOR}.${LIBVER_MINOR}.${LIBVER_RELEASE}) + +IF (ZSTD_BUILD_STATIC) + SET_TARGET_PROPERTIES( + libzstd_static + PROPERTIES + OUTPUT_NAME ${LIBRARY_BASE_NAME}) +ENDIF (ZSTD_BUILD_STATIC) IF (UNIX) - IF ("${PREFIX}" STREQUAL "") - SET(PREFIX /usr/local) - ENDIF() - MESSAGE("the variable PREFIX=${PREFIX}") - SET(INSTALL_LIBRARY_DIR ${PREFIX}/lib) - SET(INSTALL_INCLUDE_DIR ${PREFIX}/include) + # pkg-config + SET(PREFIX "${CMAKE_INSTALL_PREFIX}") + SET(LIBDIR "${CMAKE_INSTALL_PREFIX}/lib") + SET(INCLUDEDIR "${CMAKE_INSTALL_PREFIX}/include") + SET(VERSION "${LIBVER_MAJOR}.${LIBVER_MINOR}.${LIBVER_RELEASE}") + ADD_CUSTOM_TARGET(libzstd.pc ALL + ${CMAKE_COMMAND} -DIN="${LIBRARY_DIR}/libzstd.pc.in" -DOUT="libzstd.pc" + -DPREFIX="${PREFIX}" -DLIBDIR="${LIBDIR}" -DINCLUDEDIR="${INCLUDEDIR}" -DVERSION="${VERSION}" + -P "${CMAKE_CURRENT_SOURCE_DIR}/pkgconfig.cmake" + COMMENT "Creating pkg-config file") # install target - INSTALL(FILES ${LIBRARY_DIR}/zstd.h ${LIBRARY_DIR}/deprecated/zbuff.h ${LIBRARY_DIR}/dictBuilder/zdict.h DESTINATION ${INSTALL_INCLUDE_DIR}) - INSTALL(TARGETS libzstd_static DESTINATION ${INSTALL_LIBRARY_DIR}) - INSTALL(TARGETS libzstd_shared LIBRARY DESTINATION ${INSTALL_LIBRARY_DIR}) - - # Create symlinks and setup this files - SET(SHARED_LIBRARY_LINK ${SHARED_LIBRARY_OUTPUT_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX}) - SET(SHARED_LIBRARY_SYMLINK1 ${LIBRARY_BASE_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX}) - SET(SHARED_LIBRARY_SYMLINK2 ${LIBRARY_BASE_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX}.${LIBVER_MAJOR}) - - SET(SHARED_LIBRARY_LINK_PATH ${CMAKE_CURRENT_BINARY_DIR}/${SHARED_LIBRARY_LINK}) - SET(SHARED_LIBRARY_SYMLINK1_PATH ${CMAKE_CURRENT_BINARY_DIR}/${SHARED_LIBRARY_SYMLINK1}) - SET(SHARED_LIBRARY_SYMLINK2_PATH ${CMAKE_CURRENT_BINARY_DIR}/${SHARED_LIBRARY_SYMLINK2}) - - ADD_CUSTOM_COMMAND(TARGET libzstd_shared POST_BUILD - COMMAND ${CMAKE_COMMAND} -E create_symlink ${SHARED_LIBRARY_LINK} ${SHARED_LIBRARY_SYMLINK1} - DEPENDS ${SHARED_LIBRARY_LINK_PATH} - COMMENT "Generating symbolic link ${SHARED_LIBRARY_LINK} -> ${SHARED_LIBRARY_SYMLINK1}") - - ADD_CUSTOM_COMMAND(TARGET libzstd_shared POST_BUILD - COMMAND ${CMAKE_COMMAND} -E create_symlink ${SHARED_LIBRARY_LINK} ${SHARED_LIBRARY_SYMLINK2} - DEPENDS ${SHARED_LIBRARY_LINK_PATH} - COMMENT "Generating symbolic link ${SHARED_LIBRARY_LINK} -> ${SHARED_LIBRARY_SYMLINK2}") - - SET_DIRECTORY_PROPERTIES(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES "${SHARED_LIBRARY_SYMLINK1};${SHARED_LIBRARY_SYMLINK2}") - - INSTALL(FILES ${SHARED_LIBRARY_SYMLINK1_PATH} DESTINATION ${INSTALL_LIBRARY_DIR}) - INSTALL(FILES ${SHARED_LIBRARY_SYMLINK2_PATH} DESTINATION ${INSTALL_LIBRARY_DIR}) + INSTALL(FILES ${LIBRARY_DIR}/zstd.h ${LIBRARY_DIR}/deprecated/zbuff.h ${LIBRARY_DIR}/dictBuilder/zdict.h DESTINATION "include") + INSTALL(FILES "${CMAKE_CURRENT_BINARY_DIR}/libzstd.pc" DESTINATION "share/pkgconfig") + INSTALL(TARGETS libzstd_shared LIBRARY DESTINATION "lib") + IF (ZSTD_BUILD_STATIC) + INSTALL(TARGETS libzstd_static ARCHIVE DESTINATION "lib") + ENDIF (ZSTD_BUILD_STATIC) # uninstall target CONFIGURE_FILE( - "${CMAKE_SOURCE_DIR}/cmake_uninstall.cmake.in" - "${CMAKE_BINARY_DIR}/cmake_uninstall.cmake" + "${CMAKE_CURRENT_SOURCE_DIR}/cmake_uninstall.cmake.in" + "${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake" IMMEDIATE @ONLY) ADD_CUSTOM_TARGET(uninstall - COMMAND ${CMAKE_COMMAND} -P ${CMAKE_BINARY_DIR}/cmake_uninstall.cmake) + COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake) ENDIF (UNIX) diff --git a/build/cmake/cmake_uninstall.cmake.in b/build/cmake/lib/cmake_uninstall.cmake.in similarity index 100% rename from build/cmake/cmake_uninstall.cmake.in rename to build/cmake/lib/cmake_uninstall.cmake.in diff --git a/build/cmake/lib/pkgconfig.cmake b/build/cmake/lib/pkgconfig.cmake new file mode 100644 index 000000000..5434ff75c --- /dev/null +++ b/build/cmake/lib/pkgconfig.cmake @@ -0,0 +1 @@ +CONFIGURE_FILE("${IN}" "${OUT}" @ONLY) diff --git a/build/cmake/programs/.gitignore b/build/cmake/programs/.gitignore index f04c5b429..ae3a8a356 100644 --- a/build/cmake/programs/.gitignore +++ b/build/cmake/programs/.gitignore @@ -1,3 +1,5 @@ # produced by make zstd zstd-frugal +unzstd +zstdcat diff --git a/build/cmake/programs/CMakeLists.txt b/build/cmake/programs/CMakeLists.txt index c88ee5cc9..cb6aa9218 100644 --- a/build/cmake/programs/CMakeLists.txt +++ b/build/cmake/programs/CMakeLists.txt @@ -14,12 +14,9 @@ PROJECT(programs) SET(CMAKE_INCLUDE_CURRENT_DIR TRUE) -# Define project root directory -SET(ROOT_DIR ../../..) - # Define programs directory, where sources and header files are located -SET(LIBRARY_DIR ${ROOT_DIR}/lib) -SET(PROGRAMS_DIR ${ROOT_DIR}/programs) +SET(LIBRARY_DIR ${ZSTD_SOURCE_DIR}/lib) +SET(PROGRAMS_DIR ${ZSTD_SOURCE_DIR}/programs) INCLUDE_DIRECTORIES(${PROGRAMS_DIR} ${LIBRARY_DIR} ${LIBRARY_DIR}/common ${LIBRARY_DIR}/compress ${LIBRARY_DIR}/dictBuilder) IF (ZSTD_LEGACY_SUPPORT) @@ -28,23 +25,71 @@ IF (ZSTD_LEGACY_SUPPORT) ENDIF (ZSTD_LEGACY_SUPPORT) IF (MSVC) - SET(MSVC_RESOURCE_DIR ${ROOT_DIR}/build/VS2010/zstd) + SET(MSVC_RESOURCE_DIR ${ZSTD_SOURCE_DIR}/build/VS2010/zstd) SET(PlatformDependResources ${MSVC_RESOURCE_DIR}/zstd.rc) ENDIF (MSVC) ADD_EXECUTABLE(zstd ${PROGRAMS_DIR}/zstdcli.c ${PROGRAMS_DIR}/fileio.c ${PROGRAMS_DIR}/bench.c ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/dibio.c ${PlatformDependResources}) -TARGET_LINK_LIBRARIES(zstd libzstd_static) +TARGET_LINK_LIBRARIES(zstd libzstd_shared) +ADD_CUSTOM_TARGET(zstdcat ALL ${CMAKE_COMMAND} -E create_symlink zstd zstdcat DEPENDS zstd COMMENT "Creating zstdcat symlink") +ADD_CUSTOM_TARGET(unzstd ALL ${CMAKE_COMMAND} -E create_symlink zstd unzstd DEPENDS zstd COMMENT "Creating unzstd symlink") +INSTALL(TARGETS zstd RUNTIME DESTINATION "bin") +INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/zstdcat DESTINATION "bin") +INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/unzstd DESTINATION "bin") + IF (UNIX) + ADD_CUSTOM_TARGET(zstd.1 ALL + ${CMAKE_COMMAND} -E copy ${PROGRAMS_DIR}/zstd.1 . + COMMENT "Copying manpage zstd.1") + ADD_CUSTOM_TARGET(zstdcat.1 ALL ${CMAKE_COMMAND} -E create_symlink zstd.1 zstdcat.1 DEPENDS zstd.1 COMMENT "Creating zstdcat.1 symlink") + ADD_CUSTOM_TARGET(unzstd.1 ALL ${CMAKE_COMMAND} -E create_symlink zstd.1 unzstd.1 DEPENDS zstd.1 COMMENT "Creating unzstd.1 symlink") + INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/zstd.1 DESTINATION "share/man/man1") + INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/zstdcat.1 DESTINATION "share/man/man1") + INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/unzstd.1 DESTINATION "share/man/man1") + ADD_EXECUTABLE(zstd-frugal ${PROGRAMS_DIR}/zstdcli.c ${PROGRAMS_DIR}/fileio.c) - TARGET_LINK_LIBRARIES(zstd-frugal libzstd_static) - SET_TARGET_PROPERTIES(zstd-frugal PROPERTIES COMPILE_DEFINITIONS "ZSTD_NOBENCH;ZSTD_NODICT") + TARGET_LINK_LIBRARIES(zstd-frugal libzstd_shared) + SET_PROPERTY(TARGET zstd-frugal APPEND PROPERTY COMPILE_DEFINITIONS "ZSTD_NOBENCH;ZSTD_NODICT") ENDIF (UNIX) IF (ZSTD_MULTITHREAD_SUPPORT) - ADD_EXECUTABLE(zstdmt ${PROGRAMS_DIR}/zstdcli.c ${PROGRAMS_DIR}/fileio.c ${PROGRAMS_DIR}/bench.c ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/dibio.c ${PlatformDependResources}) - SET_TARGET_PROPERTIES(zstdmt PROPERTIES COMPILE_DEFINITIONS "ZSTD_MULTITHREAD") - TARGET_LINK_LIBRARIES(zstdmt libzstd_static) - IF (UNIX) - TARGET_LINK_LIBRARIES(zstdmt pthread) - ENDIF (UNIX) + SET_PROPERTY(TARGET zstd APPEND PROPERTY COMPILE_DEFINITIONS "ZSTD_MULTITHREAD") + + SET(THREADS_PREFER_PTHREAD_FLAG ON) + FIND_PACKAGE(Threads REQUIRED) + IF (CMAKE_USE_PTHREADS_INIT) + TARGET_LINK_LIBRARIES(zstd ${CMAKE_THREAD_LIBS_INIT}) + ELSE() + MESSAGE(SEND_ERROR "ZSTD currently does not support thread libraries other than pthreads") + ENDIF() + + ADD_CUSTOM_TARGET(zstdmt ALL ${CMAKE_COMMAND} -E create_symlink zstd zstdmt DEPENDS zstd COMMENT "Creating zstdmt symlink") + INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/zstdmt DESTINATION "bin") ENDIF (ZSTD_MULTITHREAD_SUPPORT) + +OPTION(ZSTD_ZLIB_SUPPORT "ZLIB SUPPORT" OFF) +OPTION(ZSTD_LZMA_SUPPORT "LZMA SUPPORT" OFF) + +IF (ZSTD_ZLIB_SUPPORT) + FIND_PACKAGE(ZLIB REQUIRED) + + IF (ZLIB_FOUND) + INCLUDE_DIRECTORIES(${ZLIB_INCLUDE_DIRS}) + TARGET_LINK_LIBRARIES(zstd ${ZLIB_LIBRARIES}) + SET_PROPERTY(TARGET zstd APPEND PROPERTY COMPILE_DEFINITIONS "ZSTD_GZCOMPRESS;ZSTD_GZDECOMPRESS") + ELSE () + MESSAGE(SEND_ERROR "zlib library is missing") + ENDIF () +ENDIF () + +IF (ZSTD_LZMA_SUPPORT) + FIND_PACKAGE(LibLZMA REQUIRED) + + IF (LIBLZMA_FOUND) + INCLUDE_DIRECTORIES(${LIBLZMA_INCLUDE_DIRS}) + TARGET_LINK_LIBRARIES(zstd ${LIBLZMA_LIBRARIES}) + SET_PROPERTY(TARGET zstd APPEND PROPERTY COMPILE_DEFINITIONS "ZSTD_LZMACOMPRESS;ZSTD_LZMADECOMPRESS") + ELSE () + MESSAGE(SEND_ERROR "lzma library is missing") + ENDIF () +ENDIF () diff --git a/build/cmake/tests/CMakeLists.txt b/build/cmake/tests/CMakeLists.txt index 53a699449..cb327e48c 100644 --- a/build/cmake/tests/CMakeLists.txt +++ b/build/cmake/tests/CMakeLists.txt @@ -34,13 +34,10 @@ PROJECT(tests) SET(CMAKE_INCLUDE_CURRENT_DIR TRUE) -# Define project root directory -SET(ROOT_DIR ../../..) - # Define programs directory, where sources and header files are located -SET(LIBRARY_DIR ${ROOT_DIR}/lib) -SET(PROGRAMS_DIR ${ROOT_DIR}/programs) -SET(TESTS_DIR ${ROOT_DIR}/tests) +SET(LIBRARY_DIR ${ZSTD_SOURCE_DIR}/lib) +SET(PROGRAMS_DIR ${ZSTD_SOURCE_DIR}/programs) +SET(TESTS_DIR ${ZSTD_SOURCE_DIR}/tests) INCLUDE_DIRECTORIES(${TESTS_DIR} ${PROGRAMS_DIR} ${LIBRARY_DIR} ${LIBRARY_DIR}/common ${LIBRARY_DIR}/compress ${LIBRARY_DIR}/dictBuilder) ADD_EXECUTABLE(fullbench ${PROGRAMS_DIR}/datagen.c ${TESTS_DIR}/fullbench.c) diff --git a/circle.yml b/circle.yml index 298569d14..218e33bfc 100644 --- a/circle.yml +++ b/circle.yml @@ -3,7 +3,7 @@ dependencies: - sudo dpkg --add-architecture i386 - sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test; sudo apt-get -y -qq update - sudo apt-get -y install gcc-powerpc-linux-gnu gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross - - sudo apt-get -y install libstdc++-6-dev clang gcc g++ gcc-5 gcc-6 + - sudo apt-get -y install libstdc++-6-dev clang gcc g++ gcc-5 gcc-6 zlib1g-dev liblzma-dev - sudo apt-get -y install linux-libc-dev:i386 libc6-dev-i386 test: diff --git a/contrib/cleanTabs b/contrib/cleanTabs new file mode 100755 index 000000000..215913a90 --- /dev/null +++ b/contrib/cleanTabs @@ -0,0 +1,2 @@ +#!/bin/sh +sed -i '' $'s/\t/ /g' ../lib/**/*.{h,c} ../programs/*.{h,c} ../tests/*.c ./**/*.{h,cpp} ../examples/*.c ../zlibWrapper/*.{h,c} diff --git a/contrib/gen_html/gen_html.cpp b/contrib/gen_html/gen_html.cpp index 22ff65b10..e5261c086 100644 --- a/contrib/gen_html/gen_html.cpp +++ b/contrib/gen_html/gen_html.cpp @@ -19,7 +19,7 @@ void trim(string& s, string characters) { size_t p = s.find_first_not_of(characters); s.erase(0, p); - + p = s.find_last_not_of(characters); if (string::npos != p) s.erase(p+1); @@ -48,7 +48,7 @@ vector get_lines(vector& input, int& linenum, string terminator) line = input[linenum]; if (terminator.empty() && line.empty()) { linenum--; break; } - + epos = line.find(terminator); if (!terminator.empty() && epos!=string::npos) { out.push_back(line); @@ -168,7 +168,11 @@ int main(int argc, char *argv[]) { sout << "
";
             for (l=0; l

"; for (l=0; l" << endl << "" << endl; return 0; -} \ No newline at end of file +} diff --git a/contrib/linux-kernel/.gitignore b/contrib/linux-kernel/.gitignore new file mode 100644 index 000000000..d8dfeef21 --- /dev/null +++ b/contrib/linux-kernel/.gitignore @@ -0,0 +1,4 @@ +!lib/zstd +!lib/zstd/* +*.o +*.a diff --git a/contrib/linux-kernel/README.md b/contrib/linux-kernel/README.md new file mode 100644 index 000000000..16bc2d48f --- /dev/null +++ b/contrib/linux-kernel/README.md @@ -0,0 +1,89 @@ +# Linux Kernel Patch + +There are three pieces, the `zstd_compress` and `zstd_decompress` kernel modules, the BtrFS patch, and the SquashFS patch. +The patches are based off of the linux kernel master branch (version 4.10). + +## Zstd Kernel modules + +* The header is in `include/linux/zstd.h`. +* It is split up into `zstd_compress` and `zstd_decompress`, which can be loaded independently. +* Source files are in `lib/zstd/`. +* `lib/Kconfig` and `lib/Makefile` need to be modified by applying `lib/Kconfig.diff` and `lib/Makefile.diff` respectively. +* `test/UserlandTest.cpp` contains tests for the patch in userland by mocking the kernel headers. + It can be run with the following commands: + ``` + cd test + make googletest + make UserlandTest + ./UserlandTest + ``` + +## BtrFS + +* The patch is located in `btrfs.diff`. +* Additionally `fs/btrfs/zstd.c` is provided as a source for convenience. +* The patch seems to be working, it doesn't crash the kernel, and compresses at speeds and ratios that are expected. + It could still use some more testing for fringe features, like printing options. + +### Benchmarks + +Benchmarks run on a Ubuntu 14.04 with 2 cores and 4 GiB of RAM. +The VM is running on a Macbook Pro with a 3.1 GHz Intel Core i7 processor, +16 GB of ram, and a SSD. +The kernel running was built from the master branch with the patch (version 4.10). + +The compression benchmark is copying 10 copies of the +unzipped [silesia corpus](http://mattmahoney.net/dc/silesia.html) into a BtrFS +filesystem mounted with `-o compress-force={none, lzo, zlib, zstd}`. +The decompression benchmark is timing how long it takes to `tar` all 10 copies +into `/dev/null`. +The compression ratio is measured by comparing the output of `df` and `du`. +See `btrfs-benchmark.sh` for details. + +| Algorithm | Compression ratio | Compression speed | Decompression speed | +|-----------|-------------------|-------------------|---------------------| +| None | 0.99 | 504 MB/s | 686 MB/s | +| lzo | 1.66 | 398 MB/s | 442 MB/s | +| zlib | 2.58 | 65 MB/s | 241 MB/s | +| zstd 1 | 2.57 | 260 MB/s | 383 MB/s | +| zstd 3 | 2.71 | 174 MB/s | 408 MB/s | +| zstd 6 | 2.87 | 70 MB/s | 398 MB/s | +| zstd 9 | 2.92 | 43 MB/s | 406 MB/s | +| zstd 12 | 2.93 | 21 MB/s | 408 MB/s | +| zstd 15 | 3.01 | 11 MB/s | 354 MB/s | + + +## SquashFS + +* The patch is located in `squashfs.diff` +* Additionally `fs/squashfs/zstd_wrapper.c` is provided as a source for convenience. +* The patch has been tested on a 4.10 kernel. + +### Benchmarks + +Benchmarks run on a Ubuntu 14.04 with 2 cores and 4 GiB of RAM. +The VM is running on a Macbook Pro with a 3.1 GHz Intel Core i7 processor, +16 GB of ram, and a SSD. +The kernel running was built from the master branch with the patch (version 4.10). + +The compression benchmark is the file tree from the SquashFS archive found in the +Ubuntu 16.10 desktop image (ubuntu-16.10-desktop-amd64.iso). +The compression benchmark uses mksquashfs with the default block size (128 KB) +and various compression algorithms/compression levels. +`xz` and `zstd` are also benchmarked with 256 KB blocks. +The decompression benchmark is timing how long it takes to `tar` the file tree +into `/dev/null`. +See `squashfs-benchmark.sh` for details. + +| Algorithm | Compression ratio | Compression speed | Decompression speed | +|----------------|-------------------|-------------------|---------------------| +| gzip | 2.92 | 15 MB/s | 128 MB/s | +| lzo | 2.64 | 9.5 MB/s | 217 MB/s | +| lz4 | 2.12 | 94 MB/s | 218 MB/s | +| xz | 3.43 | 5.5 MB/s | 35 MB/s | +| xz 256 KB | 3.53 | 5.4 MB/s | 40 MB/s | +| zstd 1 | 2.71 | 96 MB/s | 210 MB/s | +| zstd 5 | 2.93 | 69 MB/s | 198 MB/s | +| zstd 10 | 3.01 | 41 MB/s | 225 MB/s | +| zstd 15 | 3.13 | 11.4 MB/s | 224 MB/s | +| zstd 16 256 KB | 3.24 | 8.1 MB/s | 210 MB/s | diff --git a/contrib/linux-kernel/btrfs-benchmark.sh b/contrib/linux-kernel/btrfs-benchmark.sh new file mode 100755 index 000000000..5e28da9c6 --- /dev/null +++ b/contrib/linux-kernel/btrfs-benchmark.sh @@ -0,0 +1,104 @@ +# !/bin/sh +set -e + +# Benchmarks run on a Ubuntu 14.04 VM with 2 cores and 4 GiB of RAM. +# The VM is running on a Macbook Pro with a 3.1 GHz Intel Core i7 processor and +# 16 GB of RAM and an SSD. + +# silesia is a directory that can be downloaded from +# http://mattmahoney.net/dc/silesia.html +# ls -l silesia/ +# total 203M +# -rwxr-xr-x 1 terrelln 9.8M Apr 12 2002 dickens +# -rwxr-xr-x 1 terrelln 49M May 31 2002 mozilla +# -rwxr-xr-x 1 terrelln 9.6M Mar 20 2003 mr +# -rwxr-xr-x 1 terrelln 32M Apr 2 2002 nci +# -rwxr-xr-x 1 terrelln 5.9M Jul 4 2002 ooffice +# -rwxr-xr-x 1 terrelln 9.7M Apr 11 2002 osdb +# -rwxr-xr-x 1 terrelln 6.4M Apr 2 2002 reymont +# -rwxr-xr-x 1 terrelln 21M Mar 25 2002 samba +# -rwxr-xr-x 1 terrelln 7.0M Mar 24 2002 sao +# -rwxr-xr-x 1 terrelln 40M Mar 25 2002 webster +# -rwxr-xr-x 1 terrelln 8.1M Apr 4 2002 x-ray +# -rwxr-xr-x 1 terrelln 5.1M Nov 30 2000 xml + +# $HOME is on a ext4 filesystem +BENCHMARK_DIR="$HOME/silesia/" +N=10 + +# Normalize the environment +sudo umount /mnt/btrfs 2> /dev/null > /dev/null || true +sudo mount -t btrfs $@ /dev/sda3 /mnt/btrfs +sudo rm -rf /mnt/btrfs/* +sync +sudo umount /mnt/btrfs +sudo mount -t btrfs $@ /dev/sda3 /mnt/btrfs + +# Run the benchmark +echo "Compression" +time sh -c "for i in \$(seq $N); do sudo cp -r $BENCHMARK_DIR /mnt/btrfs/\$i; done; sync" + +echo "Approximate compression ratio" +printf "%d / %d\n" \ + $(df /mnt/btrfs --output=used -B 1 | tail -n 1) \ + $(sudo du /mnt/btrfs -b -d 0 | tr '\t' '\n' | head -n 1); + +# Unmount and remount to avoid any caching +sudo umount /mnt/btrfs +sudo mount -t btrfs $@ /dev/sda3 /mnt/btrfs + +echo "Decompression" +time sudo tar -c /mnt/btrfs 2> /dev/null | wc -c > /dev/null + +sudo rm -rf /mnt/btrfs/* +sudo umount /mnt/btrfs + +# Run for each of -o compress-force={none, lzo, zlib, zstd} 5 times and take the +# min time and ratio. +# Ran zstd with compression levels {1, 3, 6, 9, 12, 15}. +# Original size: 2119415342 B (using du /mnt/btrfs) + +# none +# compress: 4.205 s +# decompress: 3.090 s +# ratio: 0.99 + +# lzo +# compress: 5.328 s +# decompress: 4.793 s +# ratio: 1.66 + +# zlib +# compress: 32.588 s +# decompress: 8.791 s +# ratio : 2.58 + +# zstd 1 +# compress: 8.147 s +# decompress: 5.527 s +# ratio : 2.57 + +# zstd 3 +# compress: 12.207 s +# decompress: 5.195 s +# ratio : 2.71 + +# zstd 6 +# compress: 30.253 s +# decompress: 5.324 s +# ratio : 2.87 + +# zstd 9 +# compress: 49.659 s +# decompress: 5.220 s +# ratio : 2.92 + +# zstd 12 +# compress: 99.245 s +# decompress: 5.193 s +# ratio : 2.93 + +# zstd 15 +# compress: 196.997 s +# decompress: 5.992 s +# ratio : 3.01 diff --git a/contrib/linux-kernel/btrfs.diff b/contrib/linux-kernel/btrfs.diff new file mode 100644 index 000000000..92a6e2057 --- /dev/null +++ b/contrib/linux-kernel/btrfs.diff @@ -0,0 +1,633 @@ +diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig +index 80e9c18..a26c63b 100644 +--- a/fs/btrfs/Kconfig ++++ b/fs/btrfs/Kconfig +@@ -6,6 +6,8 @@ config BTRFS_FS + select ZLIB_DEFLATE + select LZO_COMPRESS + select LZO_DECOMPRESS ++ select ZSTD_COMPRESS ++ select ZSTD_DECOMPRESS + select RAID6_PQ + select XOR_BLOCKS + select SRCU +diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile +index 128ce17..962a95a 100644 +--- a/fs/btrfs/Makefile ++++ b/fs/btrfs/Makefile +@@ -6,7 +6,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ + transaction.o inode.o file.o tree-defrag.o \ + extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ + extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ +- export.o tree-log.o free-space-cache.o zlib.o lzo.o \ ++ export.o tree-log.o free-space-cache.o zlib.o lzo.o zstd.o \ + compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \ + reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \ + uuid-tree.o props.o hash.o free-space-tree.o +diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c +index c7721a6..66d4ced 100644 +--- a/fs/btrfs/compression.c ++++ b/fs/btrfs/compression.c +@@ -761,6 +761,7 @@ static struct { + static const struct btrfs_compress_op * const btrfs_compress_op[] = { + &btrfs_zlib_compress, + &btrfs_lzo_compress, ++ &btrfs_zstd_compress, + }; + + void __init btrfs_init_compress(void) +diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h +index 39ec43a..d99fc21 100644 +--- a/fs/btrfs/compression.h ++++ b/fs/btrfs/compression.h +@@ -60,8 +60,9 @@ enum btrfs_compression_type { + BTRFS_COMPRESS_NONE = 0, + BTRFS_COMPRESS_ZLIB = 1, + BTRFS_COMPRESS_LZO = 2, +- BTRFS_COMPRESS_TYPES = 2, +- BTRFS_COMPRESS_LAST = 3, ++ BTRFS_COMPRESS_ZSTD = 3, ++ BTRFS_COMPRESS_TYPES = 3, ++ BTRFS_COMPRESS_LAST = 4, + }; + + struct btrfs_compress_op { +@@ -92,5 +93,6 @@ struct btrfs_compress_op { + + extern const struct btrfs_compress_op btrfs_zlib_compress; + extern const struct btrfs_compress_op btrfs_lzo_compress; ++extern const struct btrfs_compress_op btrfs_zstd_compress; + + #endif +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h +index 29b7fc2..878b23b9 100644 +--- a/fs/btrfs/ctree.h ++++ b/fs/btrfs/ctree.h +@@ -270,6 +270,7 @@ struct btrfs_super_block { + BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \ + BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \ + BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \ ++ BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD | \ + BTRFS_FEATURE_INCOMPAT_RAID56 | \ + BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \ + BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \ +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 08b74da..0c43e4e 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -2853,6 +2853,8 @@ int open_ctree(struct super_block *sb, + features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; + if (fs_info->compress_type == BTRFS_COMPRESS_LZO) + features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; ++ else if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_ZSTD) ++ features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD; + + if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) + btrfs_info(fs_info, "has skinny extents"); +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index dabfc7a..d8ea727 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -327,8 +327,10 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) + + if (fs_info->compress_type == BTRFS_COMPRESS_LZO) + comp = "lzo"; +- else ++ else if (fs_info->compress_type == BTRFS_COMPRESS_ZLIB) + comp = "zlib"; ++ else ++ comp = "zstd"; + ret = btrfs_set_prop(inode, "btrfs.compression", + comp, strlen(comp), 0); + if (ret) +@@ -1463,6 +1465,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, + + if (range->compress_type == BTRFS_COMPRESS_LZO) { + btrfs_set_fs_incompat(fs_info, COMPRESS_LZO); ++ } else if (range->compress_type == BTRFS_COMPRESS_ZSTD) { ++ btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD); + } + + ret = defrag_count; +diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c +index d6cb155..162105f 100644 +--- a/fs/btrfs/props.c ++++ b/fs/btrfs/props.c +@@ -383,6 +383,8 @@ static int prop_compression_validate(const char *value, size_t len) + return 0; + else if (!strncmp("zlib", value, len)) + return 0; ++ else if (!strncmp("zstd", value, len)) ++ return 0; + + return -EINVAL; + } +@@ -405,6 +407,8 @@ static int prop_compression_apply(struct inode *inode, + type = BTRFS_COMPRESS_LZO; + else if (!strncmp("zlib", value, len)) + type = BTRFS_COMPRESS_ZLIB; ++ else if (!strncmp("zstd", value, len)) ++ type = BTRFS_COMPRESS_ZSTD; + else + return -EINVAL; + +@@ -422,6 +426,8 @@ static const char *prop_compression_extract(struct inode *inode) + return "zlib"; + case BTRFS_COMPRESS_LZO: + return "lzo"; ++ case BTRFS_COMPRESS_ZSTD: ++ return "zstd"; + } + + return NULL; +diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c +index da687dc..b064456 100644 +--- a/fs/btrfs/super.c ++++ b/fs/btrfs/super.c +@@ -513,6 +513,14 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options, + btrfs_clear_opt(info->mount_opt, NODATASUM); + btrfs_set_fs_incompat(info, COMPRESS_LZO); + no_compress = 0; ++ } else if (strcmp(args[0].from, "zstd") == 0) { ++ compress_type = "zstd"; ++ info->compress_type = BTRFS_COMPRESS_ZSTD; ++ btrfs_set_opt(info->mount_opt, COMPRESS); ++ btrfs_clear_opt(info->mount_opt, NODATACOW); ++ btrfs_clear_opt(info->mount_opt, NODATASUM); ++ btrfs_set_fs_incompat(info, COMPRESS_ZSTD); ++ no_compress = 0; + } else if (strncmp(args[0].from, "no", 2) == 0) { + compress_type = "no"; + btrfs_clear_opt(info->mount_opt, COMPRESS); +@@ -1230,8 +1238,10 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) + if (btrfs_test_opt(info, COMPRESS)) { + if (info->compress_type == BTRFS_COMPRESS_ZLIB) + compress_type = "zlib"; +- else ++ else if (info->compress_type == BTRFS_COMPRESS_LZO) + compress_type = "lzo"; ++ else ++ compress_type = "zstd"; + if (btrfs_test_opt(info, FORCE_COMPRESS)) + seq_printf(seq, ",compress-force=%s", compress_type); + else +diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c +index 1f157fb..b0dec90 100644 +--- a/fs/btrfs/sysfs.c ++++ b/fs/btrfs/sysfs.c +@@ -200,6 +200,7 @@ BTRFS_FEAT_ATTR_INCOMPAT(mixed_backref, MIXED_BACKREF); + BTRFS_FEAT_ATTR_INCOMPAT(default_subvol, DEFAULT_SUBVOL); + BTRFS_FEAT_ATTR_INCOMPAT(mixed_groups, MIXED_GROUPS); + BTRFS_FEAT_ATTR_INCOMPAT(compress_lzo, COMPRESS_LZO); ++BTRFS_FEAT_ATTR_INCOMPAT(compress_zstd, COMPRESS_ZSTD); + BTRFS_FEAT_ATTR_INCOMPAT(big_metadata, BIG_METADATA); + BTRFS_FEAT_ATTR_INCOMPAT(extended_iref, EXTENDED_IREF); + BTRFS_FEAT_ATTR_INCOMPAT(raid56, RAID56); +@@ -212,6 +213,7 @@ static struct attribute *btrfs_supported_feature_attrs[] = { + BTRFS_FEAT_ATTR_PTR(default_subvol), + BTRFS_FEAT_ATTR_PTR(mixed_groups), + BTRFS_FEAT_ATTR_PTR(compress_lzo), ++ BTRFS_FEAT_ATTR_PTR(compress_zstd), + BTRFS_FEAT_ATTR_PTR(big_metadata), + BTRFS_FEAT_ATTR_PTR(extended_iref), + BTRFS_FEAT_ATTR_PTR(raid56), +diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c +new file mode 100644 +index 0000000..010548c +--- /dev/null ++++ b/fs/btrfs/zstd.c +@@ -0,0 +1,415 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "compression.h" ++ ++#define ZSTD_BTRFS_MAX_WINDOWLOG 17 ++#define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG) ++ ++static ZSTD_parameters zstd_get_btrfs_parameters(size_t src_len) ++{ ++ ZSTD_parameters params = ZSTD_getParams(3, src_len, 0); ++ ++ if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG) ++ params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG; ++ WARN_ON(src_len > ZSTD_BTRFS_MAX_INPUT); ++ return params; ++} ++ ++struct workspace { ++ void *mem; ++ size_t size; ++ char *buf; ++ struct list_head list; ++}; ++ ++static void zstd_free_workspace(struct list_head *ws) ++{ ++ struct workspace *workspace = list_entry(ws, struct workspace, list); ++ ++ vfree(workspace->mem); ++ kfree(workspace->buf); ++ kfree(workspace); ++} ++ ++static struct list_head *zstd_alloc_workspace(void) ++{ ++ ZSTD_parameters params = ++ zstd_get_btrfs_parameters(ZSTD_BTRFS_MAX_INPUT); ++ struct workspace *workspace; ++ ++ workspace = kzalloc(sizeof(*workspace), GFP_NOFS); ++ if (!workspace) ++ return ERR_PTR(-ENOMEM); ++ ++ workspace->size = max_t(size_t, ++ ZSTD_CStreamWorkspaceBound(params.cParams), ++ ZSTD_DStreamWorkspaceBound(ZSTD_BTRFS_MAX_INPUT)); ++ workspace->mem = vmalloc(workspace->size); ++ workspace->buf = kmalloc(PAGE_SIZE, GFP_NOFS); ++ if (!workspace->mem || !workspace->buf) ++ goto fail; ++ ++ INIT_LIST_HEAD(&workspace->list); ++ ++ return &workspace->list; ++fail: ++ zstd_free_workspace(&workspace->list); ++ return ERR_PTR(-ENOMEM); ++} ++ ++static int zstd_compress_pages(struct list_head *ws, ++ struct address_space *mapping, ++ u64 start, ++ struct page **pages, ++ unsigned long *out_pages, ++ unsigned long *total_in, ++ unsigned long *total_out) ++{ ++ struct workspace *workspace = list_entry(ws, struct workspace, list); ++ ZSTD_CStream *stream; ++ int ret = 0; ++ int nr_pages = 0; ++ struct page *in_page = NULL; /* The current page to read */ ++ struct page *out_page = NULL; /* The current page to write to */ ++ ZSTD_inBuffer in_buf = { NULL, 0, 0 }; ++ ZSTD_outBuffer out_buf = { NULL, 0, 0 }; ++ unsigned long tot_in = 0; ++ unsigned long tot_out = 0; ++ unsigned long len = *total_out; ++ const unsigned long nr_dest_pages = *out_pages; ++ unsigned long max_out = nr_dest_pages * PAGE_SIZE; ++ ZSTD_parameters params = zstd_get_btrfs_parameters(len); ++ ++ *out_pages = 0; ++ *total_out = 0; ++ *total_in = 0; ++ ++ /* Initialize the stream */ ++ stream = ZSTD_initCStream(params, len, workspace->mem, ++ workspace->size); ++ if (!stream) { ++ pr_warn("BTRFS: ZSTD_initCStream failed\n"); ++ ret = -EIO; ++ goto out; ++ } ++ ++ /* map in the first page of input data */ ++ in_page = find_get_page(mapping, start >> PAGE_SHIFT); ++ in_buf.src = kmap(in_page); ++ in_buf.pos = 0; ++ in_buf.size = min_t(size_t, len, PAGE_SIZE); ++ ++ ++ /* Allocate and map in the output buffer */ ++ out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); ++ if (out_page == NULL) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ pages[nr_pages++] = out_page; ++ out_buf.dst = kmap(out_page); ++ out_buf.pos = 0; ++ out_buf.size = min_t(size_t, max_out, PAGE_SIZE); ++ ++ while (1) { ++ size_t ret2; ++ ++ ret2 = ZSTD_compressStream(stream, &out_buf, &in_buf); ++ if (ZSTD_isError(ret2)) { ++ pr_debug("BTRFS: ZSTD_compressStream returned %d\n", ++ ZSTD_getErrorCode(ret2)); ++ ret = -EIO; ++ goto out; ++ } ++ ++ /* Check to see if we are making it bigger */ ++ if (tot_in + in_buf.pos > 8192 && ++ tot_in + in_buf.pos < ++ tot_out + out_buf.pos) { ++ ret = -E2BIG; ++ goto out; ++ } ++ ++ /* We've reached the end of our output range */ ++ if (out_buf.pos >= max_out) { ++ tot_out += out_buf.pos; ++ ret = -E2BIG; ++ goto out; ++ } ++ ++ /* Check if we need more output space */ ++ if (out_buf.pos == out_buf.size) { ++ tot_out += PAGE_SIZE; ++ max_out -= PAGE_SIZE; ++ kunmap(out_page); ++ if (nr_pages == nr_dest_pages) { ++ out_page = NULL; ++ ret = -E2BIG; ++ goto out; ++ } ++ out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); ++ if (out_page == NULL) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ pages[nr_pages++] = out_page; ++ out_buf.dst = kmap(out_page); ++ out_buf.pos = 0; ++ out_buf.size = min_t(size_t, max_out, PAGE_SIZE); ++ } ++ ++ /* We've reached the end of the input */ ++ if (in_buf.pos >= len) { ++ tot_in += in_buf.pos; ++ break; ++ } ++ ++ /* Check if we need more input */ ++ if (in_buf.pos == in_buf.size) { ++ tot_in += PAGE_SIZE; ++ kunmap(in_page); ++ put_page(in_page); ++ ++ start += PAGE_SIZE; ++ len -= PAGE_SIZE; ++ in_page = find_get_page(mapping, start >> PAGE_SHIFT); ++ in_buf.src = kmap(in_page); ++ in_buf.pos = 0; ++ in_buf.size = min_t(size_t, len, PAGE_SIZE); ++ } ++ } ++ while (1) { ++ size_t ret2; ++ ++ ret2 = ZSTD_endStream(stream, &out_buf); ++ if (ZSTD_isError(ret2)) { ++ pr_debug("BTRFS: ZSTD_endStream returned %d\n", ++ ZSTD_getErrorCode(ret2)); ++ ret = -EIO; ++ goto out; ++ } ++ if (ret2 == 0) { ++ tot_out += out_buf.pos; ++ break; ++ } ++ if (out_buf.pos >= max_out) { ++ tot_out += out_buf.pos; ++ ret = -E2BIG; ++ goto out; ++ } ++ ++ tot_out += PAGE_SIZE; ++ max_out -= PAGE_SIZE; ++ kunmap(out_page); ++ if (nr_pages == nr_dest_pages) { ++ out_page = NULL; ++ ret = -E2BIG; ++ goto out; ++ } ++ out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); ++ if (out_page == NULL) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ pages[nr_pages++] = out_page; ++ out_buf.dst = kmap(out_page); ++ out_buf.pos = 0; ++ out_buf.size = min_t(size_t, max_out, PAGE_SIZE); ++ } ++ ++ if (tot_out >= tot_in) { ++ ret = -E2BIG; ++ goto out; ++ } ++ ++ ret = 0; ++ *total_in = tot_in; ++ *total_out = tot_out; ++out: ++ *out_pages = nr_pages; ++ /* Cleanup */ ++ if (in_page) { ++ kunmap(in_page); ++ put_page(in_page); ++ } ++ if (out_page) ++ kunmap(out_page); ++ return ret; ++} ++ ++static int zstd_decompress_bio(struct list_head *ws, struct page **pages_in, ++ u64 disk_start, ++ struct bio *orig_bio, ++ size_t srclen) ++{ ++ struct workspace *workspace = list_entry(ws, struct workspace, list); ++ ZSTD_DStream *stream; ++ int ret = 0; ++ unsigned long page_in_index = 0; ++ unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE); ++ unsigned long buf_start; ++ unsigned long total_out = 0; ++ ZSTD_inBuffer in_buf = { NULL, 0, 0 }; ++ ZSTD_outBuffer out_buf = { NULL, 0, 0 }; ++ ++ stream = ZSTD_initDStream( ++ ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size); ++ if (!stream) { ++ pr_debug("BTRFS: ZSTD_initDStream failed\n"); ++ ret = -EIO; ++ goto done; ++ } ++ ++ in_buf.src = kmap(pages_in[page_in_index]); ++ in_buf.pos = 0; ++ in_buf.size = min_t(size_t, srclen, PAGE_SIZE); ++ ++ out_buf.dst = workspace->buf; ++ out_buf.pos = 0; ++ out_buf.size = PAGE_SIZE; ++ ++ while (1) { ++ size_t ret2; ++ ++ ret2 = ZSTD_decompressStream(stream, &out_buf, &in_buf); ++ if (ZSTD_isError(ret2)) { ++ pr_debug("BTRFS: ZSTD_decompressStream returned %d\n", ++ ZSTD_getErrorCode(ret2)); ++ ret = -EIO; ++ goto done; ++ } ++ buf_start = total_out; ++ total_out += out_buf.pos; ++ out_buf.pos = 0; ++ ++ ret = btrfs_decompress_buf2page(out_buf.dst, buf_start, ++ total_out, disk_start, orig_bio); ++ if (ret == 0) ++ break; ++ ++ if (in_buf.pos >= srclen) ++ break; ++ ++ /* Check if we've hit the end of a frame */ ++ if (ret2 == 0) ++ break; ++ ++ if (in_buf.pos == in_buf.size) { ++ kunmap(pages_in[page_in_index++]); ++ if (page_in_index >= total_pages_in) { ++ in_buf.src = NULL; ++ ret = -EIO; ++ goto done; ++ } ++ srclen -= PAGE_SIZE; ++ in_buf.src = kmap(pages_in[page_in_index]); ++ in_buf.pos = 0; ++ in_buf.size = min_t(size_t, srclen, PAGE_SIZE); ++ } ++ } ++ ret = 0; ++ zero_fill_bio(orig_bio); ++done: ++ if (in_buf.src) ++ kunmap(pages_in[page_in_index]); ++ return ret; ++} ++ ++static int zstd_decompress(struct list_head *ws, unsigned char *data_in, ++ struct page *dest_page, ++ unsigned long start_byte, ++ size_t srclen, size_t destlen) ++{ ++ struct workspace *workspace = list_entry(ws, struct workspace, list); ++ ZSTD_DStream *stream; ++ int ret = 0; ++ size_t ret2; ++ ZSTD_inBuffer in_buf = { NULL, 0, 0 }; ++ ZSTD_outBuffer out_buf = { NULL, 0, 0 }; ++ unsigned long total_out = 0; ++ unsigned long pg_offset = 0; ++ char *kaddr; ++ ++ stream = ZSTD_initDStream( ++ ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size); ++ if (!stream) { ++ pr_warn("BTRFS: ZSTD_initDStream failed\n"); ++ ret = -EIO; ++ goto finish; ++ } ++ ++ destlen = min_t(size_t, destlen, PAGE_SIZE); ++ ++ in_buf.src = data_in; ++ in_buf.pos = 0; ++ in_buf.size = srclen; ++ ++ out_buf.dst = workspace->buf; ++ out_buf.pos = 0; ++ out_buf.size = PAGE_SIZE; ++ ++ ret2 = 1; ++ while (pg_offset < destlen && in_buf.pos < in_buf.size) { ++ unsigned long buf_start; ++ unsigned long buf_offset; ++ unsigned long bytes; ++ ++ /* Check if the frame is over and we still need more input */ ++ if (ret2 == 0) { ++ pr_debug("BTRFS: ZSTD_decompressStream ended early\n"); ++ ret = -EIO; ++ goto finish; ++ } ++ ret2 = ZSTD_decompressStream(stream, &out_buf, &in_buf); ++ if (ZSTD_isError(ret2)) { ++ pr_debug("BTRFS: ZSTD_decompressStream returned %d\n", ++ ZSTD_getErrorCode(ret2)); ++ ret = -EIO; ++ goto finish; ++ } ++ ++ buf_start = total_out; ++ total_out += out_buf.pos; ++ out_buf.pos = 0; ++ ++ if (total_out <= start_byte) ++ continue; ++ ++ if (total_out > start_byte && buf_start < start_byte) ++ buf_offset = start_byte - buf_start; ++ else ++ buf_offset = 0; ++ ++ bytes = min_t(unsigned long, destlen - pg_offset, ++ out_buf.size - buf_offset); ++ ++ kaddr = kmap_atomic(dest_page); ++ memcpy(kaddr + pg_offset, out_buf.dst + buf_offset, bytes); ++ kunmap_atomic(kaddr); ++ ++ pg_offset += bytes; ++ } ++ ret = 0; ++finish: ++ if (pg_offset < destlen) { ++ kaddr = kmap_atomic(dest_page); ++ memset(kaddr + pg_offset, 0, destlen - pg_offset); ++ kunmap_atomic(kaddr); ++ } ++ return ret; ++} ++ ++const struct btrfs_compress_op btrfs_zstd_compress = { ++ .alloc_workspace = zstd_alloc_workspace, ++ .free_workspace = zstd_free_workspace, ++ .compress_pages = zstd_compress_pages, ++ .decompress_bio = zstd_decompress_bio, ++ .decompress = zstd_decompress, ++}; +diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h +index db4c253..f26c34f 100644 +--- a/include/uapi/linux/btrfs.h ++++ b/include/uapi/linux/btrfs.h +@@ -255,13 +255,7 @@ struct btrfs_ioctl_fs_info_args { + #define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1) + #define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2) + #define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO (1ULL << 3) +-/* +- * some patches floated around with a second compression method +- * lets save that incompat here for when they do get in +- * Note we don't actually support it, we're just reserving the +- * number +- */ +-#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZOv2 (1ULL << 4) ++#define BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD (1ULL << 4) + + /* + * older kernels tried to do bigger metadata blocks, but the diff --git a/contrib/linux-kernel/fs/btrfs/zstd.c b/contrib/linux-kernel/fs/btrfs/zstd.c new file mode 100644 index 000000000..706fa66ef --- /dev/null +++ b/contrib/linux-kernel/fs/btrfs/zstd.c @@ -0,0 +1,415 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "compression.h" + +#define ZSTD_BTRFS_MAX_WINDOWLOG 17 +#define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG) + +static ZSTD_parameters zstd_get_btrfs_parameters(size_t src_len) +{ + ZSTD_parameters params = ZSTD_getParams(3, src_len, 0); + + if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG) + params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG; + WARN_ON(src_len > ZSTD_BTRFS_MAX_INPUT); + return params; +} + +struct workspace { + void *mem; + size_t size; + char *buf; + struct list_head list; +}; + +static void zstd_free_workspace(struct list_head *ws) +{ + struct workspace *workspace = list_entry(ws, struct workspace, list); + + vfree(workspace->mem); + kfree(workspace->buf); + kfree(workspace); +} + +static struct list_head *zstd_alloc_workspace(void) +{ + ZSTD_parameters params = + zstd_get_btrfs_parameters(ZSTD_BTRFS_MAX_INPUT); + struct workspace *workspace; + + workspace = kzalloc(sizeof(*workspace), GFP_NOFS); + if (!workspace) + return ERR_PTR(-ENOMEM); + + workspace->size = max_t(size_t, + ZSTD_CStreamWorkspaceBound(params.cParams), + ZSTD_DStreamWorkspaceBound(ZSTD_BTRFS_MAX_INPUT)); + workspace->mem = vmalloc(workspace->size); + workspace->buf = kmalloc(PAGE_SIZE, GFP_NOFS); + if (!workspace->mem || !workspace->buf) + goto fail; + + INIT_LIST_HEAD(&workspace->list); + + return &workspace->list; +fail: + zstd_free_workspace(&workspace->list); + return ERR_PTR(-ENOMEM); +} + +static int zstd_compress_pages(struct list_head *ws, + struct address_space *mapping, + u64 start, + struct page **pages, + unsigned long *out_pages, + unsigned long *total_in, + unsigned long *total_out) +{ + struct workspace *workspace = list_entry(ws, struct workspace, list); + ZSTD_CStream *stream; + int ret = 0; + int nr_pages = 0; + struct page *in_page = NULL; /* The current page to read */ + struct page *out_page = NULL; /* The current page to write to */ + ZSTD_inBuffer in_buf = { NULL, 0, 0 }; + ZSTD_outBuffer out_buf = { NULL, 0, 0 }; + unsigned long tot_in = 0; + unsigned long tot_out = 0; + unsigned long len = *total_out; + const unsigned long nr_dest_pages = *out_pages; + unsigned long max_out = nr_dest_pages * PAGE_SIZE; + ZSTD_parameters params = zstd_get_btrfs_parameters(len); + + *out_pages = 0; + *total_out = 0; + *total_in = 0; + + /* Initialize the stream */ + stream = ZSTD_initCStream(params, len, workspace->mem, + workspace->size); + if (!stream) { + pr_warn("BTRFS: ZSTD_initCStream failed\n"); + ret = -EIO; + goto out; + } + + /* map in the first page of input data */ + in_page = find_get_page(mapping, start >> PAGE_SHIFT); + in_buf.src = kmap(in_page); + in_buf.pos = 0; + in_buf.size = min_t(size_t, len, PAGE_SIZE); + + + /* Allocate and map in the output buffer */ + out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + if (out_page == NULL) { + ret = -ENOMEM; + goto out; + } + pages[nr_pages++] = out_page; + out_buf.dst = kmap(out_page); + out_buf.pos = 0; + out_buf.size = min_t(size_t, max_out, PAGE_SIZE); + + while (1) { + size_t ret2; + + ret2 = ZSTD_compressStream(stream, &out_buf, &in_buf); + if (ZSTD_isError(ret2)) { + pr_debug("BTRFS: ZSTD_compressStream returned %d\n", + ZSTD_getErrorCode(ret2)); + ret = -EIO; + goto out; + } + + /* Check to see if we are making it bigger */ + if (tot_in + in_buf.pos > 8192 && + tot_in + in_buf.pos < + tot_out + out_buf.pos) { + ret = -E2BIG; + goto out; + } + + /* We've reached the end of our output range */ + if (out_buf.pos >= max_out) { + tot_out += out_buf.pos; + ret = -E2BIG; + goto out; + } + + /* Check if we need more output space */ + if (out_buf.pos == out_buf.size) { + tot_out += PAGE_SIZE; + max_out -= PAGE_SIZE; + kunmap(out_page); + if (nr_pages == nr_dest_pages) { + out_page = NULL; + ret = -E2BIG; + goto out; + } + out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + if (out_page == NULL) { + ret = -ENOMEM; + goto out; + } + pages[nr_pages++] = out_page; + out_buf.dst = kmap(out_page); + out_buf.pos = 0; + out_buf.size = min_t(size_t, max_out, PAGE_SIZE); + } + + /* We've reached the end of the input */ + if (in_buf.pos >= len) { + tot_in += in_buf.pos; + break; + } + + /* Check if we need more input */ + if (in_buf.pos == in_buf.size) { + tot_in += PAGE_SIZE; + kunmap(in_page); + put_page(in_page); + + start += PAGE_SIZE; + len -= PAGE_SIZE; + in_page = find_get_page(mapping, start >> PAGE_SHIFT); + in_buf.src = kmap(in_page); + in_buf.pos = 0; + in_buf.size = min_t(size_t, len, PAGE_SIZE); + } + } + while (1) { + size_t ret2; + + ret2 = ZSTD_endStream(stream, &out_buf); + if (ZSTD_isError(ret2)) { + pr_debug("BTRFS: ZSTD_endStream returned %d\n", + ZSTD_getErrorCode(ret2)); + ret = -EIO; + goto out; + } + if (ret2 == 0) { + tot_out += out_buf.pos; + break; + } + if (out_buf.pos >= max_out) { + tot_out += out_buf.pos; + ret = -E2BIG; + goto out; + } + + tot_out += PAGE_SIZE; + max_out -= PAGE_SIZE; + kunmap(out_page); + if (nr_pages == nr_dest_pages) { + out_page = NULL; + ret = -E2BIG; + goto out; + } + out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); + if (out_page == NULL) { + ret = -ENOMEM; + goto out; + } + pages[nr_pages++] = out_page; + out_buf.dst = kmap(out_page); + out_buf.pos = 0; + out_buf.size = min_t(size_t, max_out, PAGE_SIZE); + } + + if (tot_out >= tot_in) { + ret = -E2BIG; + goto out; + } + + ret = 0; + *total_in = tot_in; + *total_out = tot_out; +out: + *out_pages = nr_pages; + /* Cleanup */ + if (in_page) { + kunmap(in_page); + put_page(in_page); + } + if (out_page) + kunmap(out_page); + return ret; +} + +static int zstd_decompress_bio(struct list_head *ws, struct page **pages_in, + u64 disk_start, + struct bio *orig_bio, + size_t srclen) +{ + struct workspace *workspace = list_entry(ws, struct workspace, list); + ZSTD_DStream *stream; + int ret = 0; + unsigned long page_in_index = 0; + unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE); + unsigned long buf_start; + unsigned long total_out = 0; + ZSTD_inBuffer in_buf = { NULL, 0, 0 }; + ZSTD_outBuffer out_buf = { NULL, 0, 0 }; + + stream = ZSTD_initDStream( + ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size); + if (!stream) { + pr_debug("BTRFS: ZSTD_initDStream failed\n"); + ret = -EIO; + goto done; + } + + in_buf.src = kmap(pages_in[page_in_index]); + in_buf.pos = 0; + in_buf.size = min_t(size_t, srclen, PAGE_SIZE); + + out_buf.dst = workspace->buf; + out_buf.pos = 0; + out_buf.size = PAGE_SIZE; + + while (1) { + size_t ret2; + + ret2 = ZSTD_decompressStream(stream, &out_buf, &in_buf); + if (ZSTD_isError(ret2)) { + pr_debug("BTRFS: ZSTD_decompressStream returned %d\n", + ZSTD_getErrorCode(ret2)); + ret = -EIO; + goto done; + } + buf_start = total_out; + total_out += out_buf.pos; + out_buf.pos = 0; + + ret = btrfs_decompress_buf2page(out_buf.dst, buf_start, + total_out, disk_start, orig_bio); + if (ret == 0) + break; + + if (in_buf.pos >= srclen) + break; + + /* Check if we've hit the end of a frame */ + if (ret2 == 0) + break; + + if (in_buf.pos == in_buf.size) { + kunmap(pages_in[page_in_index++]); + if (page_in_index >= total_pages_in) { + in_buf.src = NULL; + ret = -EIO; + goto done; + } + srclen -= PAGE_SIZE; + in_buf.src = kmap(pages_in[page_in_index]); + in_buf.pos = 0; + in_buf.size = min_t(size_t, srclen, PAGE_SIZE); + } + } + ret = 0; + zero_fill_bio(orig_bio); +done: + if (in_buf.src) + kunmap(pages_in[page_in_index]); + return ret; +} + +static int zstd_decompress(struct list_head *ws, unsigned char *data_in, + struct page *dest_page, + unsigned long start_byte, + size_t srclen, size_t destlen) +{ + struct workspace *workspace = list_entry(ws, struct workspace, list); + ZSTD_DStream *stream; + int ret = 0; + size_t ret2; + ZSTD_inBuffer in_buf = { NULL, 0, 0 }; + ZSTD_outBuffer out_buf = { NULL, 0, 0 }; + unsigned long total_out = 0; + unsigned long pg_offset = 0; + char *kaddr; + + stream = ZSTD_initDStream( + ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size); + if (!stream) { + pr_warn("BTRFS: ZSTD_initDStream failed\n"); + ret = -EIO; + goto finish; + } + + destlen = min_t(size_t, destlen, PAGE_SIZE); + + in_buf.src = data_in; + in_buf.pos = 0; + in_buf.size = srclen; + + out_buf.dst = workspace->buf; + out_buf.pos = 0; + out_buf.size = PAGE_SIZE; + + ret2 = 1; + while (pg_offset < destlen && in_buf.pos < in_buf.size) { + unsigned long buf_start; + unsigned long buf_offset; + unsigned long bytes; + + /* Check if the frame is over and we still need more input */ + if (ret2 == 0) { + pr_debug("BTRFS: ZSTD_decompressStream ended early\n"); + ret = -EIO; + goto finish; + } + ret2 = ZSTD_decompressStream(stream, &out_buf, &in_buf); + if (ZSTD_isError(ret2)) { + pr_debug("BTRFS: ZSTD_decompressStream returned %d\n", + ZSTD_getErrorCode(ret2)); + ret = -EIO; + goto finish; + } + + buf_start = total_out; + total_out += out_buf.pos; + out_buf.pos = 0; + + if (total_out <= start_byte) + continue; + + if (total_out > start_byte && buf_start < start_byte) + buf_offset = start_byte - buf_start; + else + buf_offset = 0; + + bytes = min_t(unsigned long, destlen - pg_offset, + out_buf.size - buf_offset); + + kaddr = kmap_atomic(dest_page); + memcpy(kaddr + pg_offset, out_buf.dst + buf_offset, bytes); + kunmap_atomic(kaddr); + + pg_offset += bytes; + } + ret = 0; +finish: + if (pg_offset < destlen) { + kaddr = kmap_atomic(dest_page); + memset(kaddr + pg_offset, 0, destlen - pg_offset); + kunmap_atomic(kaddr); + } + return ret; +} + +const struct btrfs_compress_op btrfs_zstd_compress = { + .alloc_workspace = zstd_alloc_workspace, + .free_workspace = zstd_free_workspace, + .compress_pages = zstd_compress_pages, + .decompress_bio = zstd_decompress_bio, + .decompress = zstd_decompress, +}; diff --git a/contrib/linux-kernel/fs/squashfs/zstd_wrapper.c b/contrib/linux-kernel/fs/squashfs/zstd_wrapper.c new file mode 100644 index 000000000..7cc93030b --- /dev/null +++ b/contrib/linux-kernel/fs/squashfs/zstd_wrapper.c @@ -0,0 +1,149 @@ +/* + * Squashfs - a compressed read only filesystem for Linux + * + * Copyright (c) 2017 Facebook + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2, + * or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * zstd_wrapper.c + */ + +#include +#include +#include +#include +#include + +#include "squashfs_fs.h" +#include "squashfs_fs_sb.h" +#include "squashfs.h" +#include "decompressor.h" +#include "page_actor.h" + +struct workspace { + void *mem; + size_t mem_size; +}; + +static void *zstd_init(struct squashfs_sb_info *msblk, void *buff) +{ + struct workspace *wksp = kmalloc(sizeof(*wksp), GFP_KERNEL); + if (wksp == NULL) + goto failed; + wksp->mem_size = ZSTD_DStreamWorkspaceBound(max_t(size_t, + msblk->block_size, SQUASHFS_METADATA_SIZE)); + wksp->mem = vmalloc(wksp->mem_size); + if (wksp->mem == NULL) + goto failed; + + return wksp; + +failed: + ERROR("Failed to allocate zstd workspace\n"); + kfree(wksp); + return ERR_PTR(-ENOMEM); +} + + +static void zstd_free(void *strm) +{ + struct workspace *wksp = strm; + + if (wksp) + vfree(wksp->mem); + kfree(wksp); +} + + +static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm, + struct buffer_head **bh, int b, int offset, int length, + struct squashfs_page_actor *output) +{ + struct workspace *wksp = strm; + ZSTD_DStream *stream; + size_t total_out = 0; + size_t zstd_err; + int k = 0; + ZSTD_inBuffer in_buf = { NULL, 0, 0 }; + ZSTD_outBuffer out_buf = { NULL, 0, 0 }; + + stream = ZSTD_initDStream(wksp->mem_size, wksp->mem, wksp->mem_size); + + if (!stream) { + ERROR("Failed to initialize zstd decompressor\n"); + goto out; + } + + out_buf.size = PAGE_SIZE; + out_buf.dst = squashfs_first_page(output); + + do { + if (in_buf.pos == in_buf.size && k < b) { + int avail = min(length, msblk->devblksize - offset); + length -= avail; + in_buf.src = bh[k]->b_data + offset; + in_buf.size = avail; + in_buf.pos = 0; + offset = 0; + } + + if (out_buf.pos == out_buf.size) { + out_buf.dst = squashfs_next_page(output); + if (out_buf.dst == NULL) { + /* shouldn't run out of pages before stream is + * done */ + squashfs_finish_page(output); + goto out; + } + out_buf.pos = 0; + out_buf.size = PAGE_SIZE; + } + + total_out -= out_buf.pos; + zstd_err = ZSTD_decompressStream(stream, &out_buf, &in_buf); + total_out += out_buf.pos; /* add the additional data produced */ + + if (in_buf.pos == in_buf.size && k < b) + put_bh(bh[k++]); + } while (zstd_err != 0 && !ZSTD_isError(zstd_err)); + + squashfs_finish_page(output); + + if (ZSTD_isError(zstd_err)) { + ERROR("zstd decompression error: %d\n", + (int)ZSTD_getErrorCode(zstd_err)); + goto out; + } + + if (k < b) + goto out; + + return (int)total_out; + +out: + for (; k < b; k++) + put_bh(bh[k]); + + return -EIO; +} + +const struct squashfs_decompressor squashfs_zstd_comp_ops = { + .init = zstd_init, + .free = zstd_free, + .decompress = zstd_uncompress, + .id = ZSTD_COMPRESSION, + .name = "zstd", + .supported = 1 +}; diff --git a/contrib/linux-kernel/include/linux/zstd.h b/contrib/linux-kernel/include/linux/zstd.h new file mode 100644 index 000000000..ee7bd8207 --- /dev/null +++ b/contrib/linux-kernel/include/linux/zstd.h @@ -0,0 +1,1150 @@ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#ifndef ZSTD_H +#define ZSTD_H + +/* ====== Dependency ======*/ +#include /* size_t */ + + +/*-***************************************************************************** + * Introduction + * + * zstd, short for Zstandard, is a fast lossless compression algorithm, + * targeting real-time compression scenarios at zlib-level and better + * compression ratios. The zstd compression library provides in-memory + * compression and decompression functions. The library supports compression + * levels from 1 up to ZSTD_maxCLevel() which is 22. Levels >= 20, labeled + * ultra, should be used with caution, as they require more memory. + * Compression can be done in: + * - a single step, reusing a context (described as Explicit memory management) + * - unbounded multiple steps (described as Streaming compression) + * The compression ratio achievable on small data can be highly improved using + * compression with a dictionary in: + * - a single step (described as Simple dictionary API) + * - a single step, reusing a dictionary (described as Fast dictionary API) + ******************************************************************************/ + +/*====== Helper functions ======*/ + +/** + * enum ZSTD_ErrorCode - zstd error codes + * + * Functions that return size_t can be checked for errors using ZSTD_isError() + * and the ZSTD_ErrorCode can be extracted using ZSTD_getErrorCode(). + */ +typedef enum { + ZSTD_error_no_error, + ZSTD_error_GENERIC, + ZSTD_error_prefix_unknown, + ZSTD_error_version_unsupported, + ZSTD_error_parameter_unknown, + ZSTD_error_frameParameter_unsupported, + ZSTD_error_frameParameter_unsupportedBy32bits, + ZSTD_error_frameParameter_windowTooLarge, + ZSTD_error_compressionParameter_unsupported, + ZSTD_error_init_missing, + ZSTD_error_memory_allocation, + ZSTD_error_stage_wrong, + ZSTD_error_dstSize_tooSmall, + ZSTD_error_srcSize_wrong, + ZSTD_error_corruption_detected, + ZSTD_error_checksum_wrong, + ZSTD_error_tableLog_tooLarge, + ZSTD_error_maxSymbolValue_tooLarge, + ZSTD_error_maxSymbolValue_tooSmall, + ZSTD_error_dictionary_corrupted, + ZSTD_error_dictionary_wrong, + ZSTD_error_dictionaryCreation_failed, + ZSTD_error_maxCode +} ZSTD_ErrorCode; + +/** + * ZSTD_maxCLevel() - maximum compression level available + * + * Return: Maximum compression level available. + */ +int ZSTD_maxCLevel(void); +/** + * ZSTD_compressBound() - maximum compressed size in worst case scenario + * @srcSize: The size of the data to compress. + * + * Return: The maximum compressed size in the worst case scenario. + */ +size_t ZSTD_compressBound(size_t srcSize); +/** + * ZSTD_isError() - tells if a size_t function result is an error code + * @code: The function result to check for error. + * + * Return: Non-zero iff the code is an error. + */ +static __attribute__((unused)) unsigned int ZSTD_isError(size_t code) +{ + return code > (size_t)-ZSTD_error_maxCode; +} +/** + * ZSTD_getErrorCode() - translates an error function result to a ZSTD_ErrorCode + * @functionResult: The result of a function for which ZSTD_isError() is true. + * + * Return: The ZSTD_ErrorCode corresponding to the functionResult or 0 + * if the functionResult isn't an error. + */ +static __attribute__((unused)) ZSTD_ErrorCode ZSTD_getErrorCode( + size_t functionResult) +{ + if (!ZSTD_isError(functionResult)) + return (ZSTD_ErrorCode)0; + return (ZSTD_ErrorCode)(0 - functionResult); +} + +/** + * enum ZSTD_strategy - zstd compression search strategy + * + * From faster to stronger. + */ +typedef enum { + ZSTD_fast, + ZSTD_dfast, + ZSTD_greedy, + ZSTD_lazy, + ZSTD_lazy2, + ZSTD_btlazy2, + ZSTD_btopt, + ZSTD_btopt2 +} ZSTD_strategy; + +/** + * struct ZSTD_compressionParameters - zstd compression parameters + * @windowLog: Log of the largest match distance. Larger means more + * compression, and more memory needed during decompression. + * @chainLog: Fully searched segment. Larger means more compression, slower, + * and more memory (useless for fast). + * @hashLog: Dispatch table. Larger means more compression, + * slower, and more memory. + * @searchLog: Number of searches. Larger means more compression and slower. + * @searchLength: Match length searched. Larger means faster decompression, + * sometimes less compression. + * @targetLength: Acceptable match size for optimal parser (only). Larger means + * more compression, and slower. + * @strategy: The zstd compression strategy. + */ +typedef struct { + unsigned int windowLog; + unsigned int chainLog; + unsigned int hashLog; + unsigned int searchLog; + unsigned int searchLength; + unsigned int targetLength; + ZSTD_strategy strategy; +} ZSTD_compressionParameters; + +/** + * struct ZSTD_frameParameters - zstd frame parameters + * @contentSizeFlag: Controls whether content size will be present in the frame + * header (when known). + * @checksumFlag: Controls whether a 32-bit checksum is generated at the end + * of the frame for error detection. + * @noDictIDFlag: Controls whether dictID will be saved into the frame header + * when using dictionary compression. + * + * The default value is all fields set to 0. + */ +typedef struct { + unsigned int contentSizeFlag; + unsigned int checksumFlag; + unsigned int noDictIDFlag; +} ZSTD_frameParameters; + +/** + * struct ZSTD_parameters - zstd parameters + * @cParams: The compression parameters. + * @fParams: The frame parameters. + */ +typedef struct { + ZSTD_compressionParameters cParams; + ZSTD_frameParameters fParams; +} ZSTD_parameters; + +/** + * ZSTD_getCParams() - returns ZSTD_compressionParameters for selected level + * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel(). + * @estimatedSrcSize: The estimated source size to compress or 0 if unknown. + * @dictSize: The dictionary size or 0 if a dictionary isn't being used. + * + * Return: The selected ZSTD_compressionParameters. + */ +ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, + unsigned long long estimatedSrcSize, size_t dictSize); + +/** + * ZSTD_getParams() - returns ZSTD_parameters for selected level + * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel(). + * @estimatedSrcSize: The estimated source size to compress or 0 if unknown. + * @dictSize: The dictionary size or 0 if a dictionary isn't being used. + * + * The same as ZSTD_getCParams() except also selects the default frame + * parameters (all zero). + * + * Return: The selected ZSTD_parameters. + */ +ZSTD_parameters ZSTD_getParams(int compressionLevel, + unsigned long long estimatedSrcSize, size_t dictSize); + +/*-************************************* + * Explicit memory management + **************************************/ + +/** + * ZSTD_CCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_CCtx + * @cParams: The compression parameters to be used for compression. + * + * If multiple compression parameters might be used, the caller must call + * ZSTD_CCtxWorkspaceBound() for each set of parameters and use the maximum + * size. + * + * Return: A lower bound on the size of the workspace that is passed to + * ZSTD_initCCtx(). + */ +size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams); + +/** + * struct ZSTD_CCtx - the zstd compression context + * + * When compressing many times it is recommended to allocate a context just once + * and reuse it for each successive compression operation. + */ +typedef struct ZSTD_CCtx_s ZSTD_CCtx; +/** + * ZSTD_initCCtx() - initialize a zstd compression context + * @workspace: The workspace to emplace the context into. It must outlive + * the returned context. + * @workspaceSize: The size of workspace. Use ZSTD_CCtxWorkspaceBound() to + * determine how large the workspace must be. + * + * Return: A compression context emplaced into workspace. + */ +ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize); + +/** + * ZSTD_compressCCtx() - compress src into dst + * @ctx: The context. Must have been initialized with a workspace at + * least as large as ZSTD_CCtxWorkspaceBound(params.cParams). + * @dst: The buffer to compress src into. + * @dstCapacity: The size of the destination buffer. May be any size, but + * ZSTD_compressBound(srcSize) is guaranteed to be large enough. + * @src: The data to compress. + * @srcSize: The size of the data to compress. + * @params: The parameters to use for compression. See ZSTD_getParams(). + * + * Return: The compressed size or an error, which can be checked using + * ZSTD_isError(). + */ +size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize, ZSTD_parameters params); + +/** + * ZSTD_DCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_DCtx + * + * Return: A lower bound on the size of the workspace that is passed to + * ZSTD_initDCtx(). + */ +size_t ZSTD_DCtxWorkspaceBound(void); + +/** + * struct ZSTD_DCtx - the zstd decompression context + * + * When decompressing many times it is recommended to allocate a context just + * once and reuse it for each successive decompression operation. + */ +typedef struct ZSTD_DCtx_s ZSTD_DCtx; +/** + * ZSTD_initDCtx() - initialize a zstd decompression context + * @workspace: The workspace to emplace the context into. It must outlive + * the returned context. + * @workspaceSize: The size of workspace. Use ZSTD_DCtxWorkspaceBound() to + * determine how large the workspace must be. + * + * Return: A decompression context emplaced into workspace. + */ +ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize); + +/** + * ZSTD_decompressDCtx() - decompress zstd compressed src into dst + * @ctx: The decompression context. + * @dst: The buffer to decompress src into. + * @dstCapacity: The size of the destination buffer. Must be at least as large + * as the decompressed size. If the caller cannot upper bound the + * decompressed size, then it's better to use the streaming API. + * @src: The zstd compressed data to decompress. Multiple concatenated + * frames and skippable frames are allowed. + * @srcSize: The exact size of the data to decompress. + * + * Return: The decompressed size or an error, which can be checked using + * ZSTD_isError(). + */ +size_t ZSTD_decompressDCtx(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize); + +/*-************************ + * Simple dictionary API + **************************/ + +/** + * ZSTD_compress_usingDict() - compress src into dst using a dictionary + * @ctx: The context. Must have been initialized with a workspace at + * least as large as ZSTD_CCtxWorkspaceBound(params.cParams). + * @dst: The buffer to compress src into. + * @dstCapacity: The size of the destination buffer. May be any size, but + * ZSTD_compressBound(srcSize) is guaranteed to be large enough. + * @src: The data to compress. + * @srcSize: The size of the data to compress. + * @dict: The dictionary to use for compression. + * @dictSize: The size of the dictionary. + * @params: The parameters to use for compression. See ZSTD_getParams(). + * + * Compression using a predefined dictionary. The same dictionary must be used + * during decompression. + * + * Return: The compressed size or an error, which can be checked using + * ZSTD_isError(). + */ +size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize, const void *dict, size_t dictSize, + ZSTD_parameters params); + +/** + * ZSTD_decompress_usingDict() - decompress src into dst using a dictionary + * @ctx: The decompression context. + * @dst: The buffer to decompress src into. + * @dstCapacity: The size of the destination buffer. Must be at least as large + * as the decompressed size. If the caller cannot upper bound the + * decompressed size, then it's better to use the streaming API. + * @src: The zstd compressed data to decompress. Multiple concatenated + * frames and skippable frames are allowed. + * @srcSize: The exact size of the data to decompress. + * @dict: The dictionary to use for decompression. The same dictionary + * must've been used to compress the data. + * @dictSize: The size of the dictionary. + * + * Return: The decompressed size or an error, which can be checked using + * ZSTD_isError(). + */ +size_t ZSTD_decompress_usingDict(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize, const void *dict, size_t dictSize); + +/*-************************** + * Fast dictionary API + ***************************/ + +/** + * ZSTD_CDictWorkspaceBound() - memory needed to initialize a ZSTD_CDict + * @cParams: The compression parameters to be used for compression. + * + * Return: A lower bound on the size of the workspace that is passed to + * ZSTD_initCDict(). + */ +size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams); + +/** + * struct ZSTD_CDict - a digested dictionary to be used for compression + */ +typedef struct ZSTD_CDict_s ZSTD_CDict; + +/** + * ZSTD_initCDict() - initialize a digested dictionary for compression + * @dictBuffer: The dictionary to digest. The buffer is referenced by the + * ZSTD_CDict so it must outlive the returned ZSTD_CDict. + * @dictSize: The size of the dictionary. + * @params: The parameters to use for compression. See ZSTD_getParams(). + * @workspace: The workspace. It must outlive the returned ZSTD_CDict. + * @workspaceSize: The workspace size. Must be at least + * ZSTD_CDictWorkspaceBound(params.cParams). + * + * When compressing multiple messages / blocks with the same dictionary it is + * recommended to load it just once. The ZSTD_CDict merely references the + * dictBuffer, so it must outlive the returned ZSTD_CDict. + * + * Return: The digested dictionary emplaced into workspace. + */ +ZSTD_CDict *ZSTD_initCDict(const void *dictBuffer, size_t dictSize, + ZSTD_parameters params, void *workspace, size_t workspaceSize); + +/** + * ZSTD_compress_usingCDict() - compress src into dst using a ZSTD_CDict + * @ctx: The context. Must have been initialized with a workspace at + * least as large as ZSTD_CCtxWorkspaceBound(cParams) where + * cParams are the compression parameters used to initialize the + * cdict. + * @dst: The buffer to compress src into. + * @dstCapacity: The size of the destination buffer. May be any size, but + * ZSTD_compressBound(srcSize) is guaranteed to be large enough. + * @src: The data to compress. + * @srcSize: The size of the data to compress. + * @cdict: The digested dictionary to use for compression. + * @params: The parameters to use for compression. See ZSTD_getParams(). + * + * Compression using a digested dictionary. The same dictionary must be used + * during decompression. + * + * Return: The compressed size or an error, which can be checked using + * ZSTD_isError(). + */ +size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize, const ZSTD_CDict *cdict); + + +/** + * ZSTD_DDictWorkspaceBound() - memory needed to initialize a ZSTD_DDict + * + * Return: A lower bound on the size of the workspace that is passed to + * ZSTD_initDDict(). + */ +size_t ZSTD_DDictWorkspaceBound(void); + +/** + * struct ZSTD_DDict - a digested dictionary to be used for decompression + */ +typedef struct ZSTD_DDict_s ZSTD_DDict; + +/** + * ZSTD_initDDict() - initialize a digested dictionary for decompression + * @dictBuffer: The dictionary to digest. The buffer is referenced by the + * ZSTD_DDict so it must outlive the returned ZSTD_DDict. + * @dictSize: The size of the dictionary. + * @workspace: The workspace. It must outlive the returned ZSTD_DDict. + * @workspaceSize: The workspace size. Must be at least + * ZSTD_DDictWorkspaceBound(). + * + * When decompressing multiple messages / blocks with the same dictionary it is + * recommended to load it just once. The ZSTD_DDict merely references the + * dictBuffer, so it must outlive the returned ZSTD_DDict. + * + * Return: The digested dictionary emplaced into workspace. + */ +ZSTD_DDict *ZSTD_initDDict(const void *dictBuffer, size_t dictSize, + void *workspace, size_t workspaceSize); + +/** + * ZSTD_decompress_usingDDict() - decompress src into dst using a ZSTD_DDict + * @ctx: The decompression context. + * @dst: The buffer to decompress src into. + * @dstCapacity: The size of the destination buffer. Must be at least as large + * as the decompressed size. If the caller cannot upper bound the + * decompressed size, then it's better to use the streaming API. + * @src: The zstd compressed data to decompress. Multiple concatenated + * frames and skippable frames are allowed. + * @srcSize: The exact size of the data to decompress. + * @ddict: The digested dictionary to use for decompression. The same + * dictionary must've been used to compress the data. + * + * Return: The decompressed size or an error, which can be checked using + * ZSTD_isError(). + */ +size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst, + size_t dstCapacity, const void *src, size_t srcSize, + const ZSTD_DDict *ddict); + + +/*-************************** + * Streaming + ***************************/ + +/** + * struct ZSTD_inBuffer - input buffer for streaming + * @src: Start of the input buffer. + * @size: Size of the input buffer. + * @pos: Position where reading stopped. Will be updated. + * Necessarily 0 <= pos <= size. + */ +typedef struct ZSTD_inBuffer_s { + const void *src; + size_t size; + size_t pos; +} ZSTD_inBuffer; + +/** + * struct ZSTD_outBuffer - output buffer for streaming + * @dst: Start of the output buffer. + * @size: Size of the output buffer. + * @pos: Position where writing stopped. Will be updated. + * Necessarily 0 <= pos <= size. + */ +typedef struct ZSTD_outBuffer_s { + void *dst; + size_t size; + size_t pos; +} ZSTD_outBuffer; + + + +/*-***************************************************************************** + * Streaming compression - HowTo + * + * A ZSTD_CStream object is required to track streaming operation. + * Use ZSTD_initCStream() to initialize a ZSTD_CStream object. + * ZSTD_CStream objects can be reused multiple times on consecutive compression + * operations. It is recommended to re-use ZSTD_CStream in situations where many + * streaming operations will be achieved consecutively. Use one separate + * ZSTD_CStream per thread for parallel execution. + * + * Use ZSTD_compressStream() repetitively to consume input stream. + * The function will automatically update both `pos` fields. + * Note that it may not consume the entire input, in which case `pos < size`, + * and it's up to the caller to present again remaining data. + * It returns a hint for the preferred number of bytes to use as an input for + * the next function call. + * + * At any moment, it's possible to flush whatever data remains within internal + * buffer, using ZSTD_flushStream(). `output->pos` will be updated. There might + * still be some content left within the internal buffer if `output->size` is + * too small. It returns the number of bytes left in the internal buffer and + * must be called until it returns 0. + * + * ZSTD_endStream() instructs to finish a frame. It will perform a flush and + * write frame epilogue. The epilogue is required for decoders to consider a + * frame completed. Similar to ZSTD_flushStream(), it may not be able to flush + * the full content if `output->size` is too small. In which case, call again + * ZSTD_endStream() to complete the flush. It returns the number of bytes left + * in the internal buffer and must be called until it returns 0. + ******************************************************************************/ + +/** + * ZSTD_CStreamWorkspaceBound() - memory needed to initialize a ZSTD_CStream + * @cParams: The compression parameters to be used for compression. + * + * Return: A lower bound on the size of the workspace that is passed to + * ZSTD_initCStream() and ZSTD_initCStream_usingCDict(). + */ +size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams); + +/** + * struct ZSTD_CStream - the zstd streaming compression context + */ +typedef struct ZSTD_CStream_s ZSTD_CStream; + +/*===== ZSTD_CStream management functions =====*/ +/** + * ZSTD_initCStream() - initialize a zstd streaming compression context + * @params: The zstd compression parameters. + * @pledgedSrcSize: If params.fParams.contentSizeFlag == 1 then the caller must + * pass the source size (zero means empty source). Otherwise, + * the caller may optionally pass the source size, or zero if + * unknown. + * @workspace: The workspace to emplace the context into. It must outlive + * the returned context. + * @workspaceSize: The size of workspace. + * Use ZSTD_CStreamWorkspaceBound(params.cParams) to determine + * how large the workspace must be. + * + * Return: The zstd streaming compression context. + */ +ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params, + unsigned long long pledgedSrcSize, void *workspace, + size_t workspaceSize); + +/** + * ZSTD_initCStream_usingCDict() - initialize a streaming compression context + * @cdict: The digested dictionary to use for compression. + * @pledgedSrcSize: Optionally the source size, or zero if unknown. + * @workspace: The workspace to emplace the context into. It must outlive + * the returned context. + * @workspaceSize: The size of workspace. Call ZSTD_CStreamWorkspaceBound() + * with the cParams used to initialize the cdict to determine + * how large the workspace must be. + * + * Return: The zstd streaming compression context. + */ +ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict, + unsigned long long pledgedSrcSize, void *workspace, + size_t workspaceSize); + +/*===== Streaming compression functions =====*/ +/** + * ZSTD_resetCStream() - reset the context using parameters from creation + * @zcs: The zstd streaming compression context to reset. + * @pledgedSrcSize: Optionally the source size, or zero if unknown. + * + * Resets the context using the parameters from creation. Skips dictionary + * loading, since it can be reused. If `pledgedSrcSize` is non-zero the frame + * content size is always written into the frame header. + * + * Return: Zero or an error, which can be checked using ZSTD_isError(). + */ +size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize); +/** + * ZSTD_compressStream() - streaming compress some of input into output + * @zcs: The zstd streaming compression context. + * @output: Destination buffer. `output->pos` is updated to indicate how much + * compressed data was written. + * @input: Source buffer. `input->pos` is updated to indicate how much data was + * read. Note that it may not consume the entire input, in which case + * `input->pos < input->size`, and it's up to the caller to present + * remaining data again. + * + * The `input` and `output` buffers may be any size. Guaranteed to make some + * forward progress if `input` and `output` are not empty. + * + * Return: A hint for the number of bytes to use as the input for the next + * function call or an error, which can be checked using + * ZSTD_isError(). + */ +size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output, + ZSTD_inBuffer *input); +/** + * ZSTD_flushStream() - flush internal buffers into output + * @zcs: The zstd streaming compression context. + * @output: Destination buffer. `output->pos` is updated to indicate how much + * compressed data was written. + * + * ZSTD_flushStream() must be called until it returns 0, meaning all the data + * has been flushed. Since ZSTD_flushStream() causes a block to be ended, + * calling it too often will degrade the compression ratio. + * + * Return: The number of bytes still present within internal buffers or an + * error, which can be checked using ZSTD_isError(). + */ +size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output); +/** + * ZSTD_endStream() - flush internal buffers into output and end the frame + * @zcs: The zstd streaming compression context. + * @output: Destination buffer. `output->pos` is updated to indicate how much + * compressed data was written. + * + * ZSTD_endStream() must be called until it returns 0, meaning all the data has + * been flushed and the frame epilogue has been written. + * + * Return: The number of bytes still present within internal buffers or an + * error, which can be checked using ZSTD_isError(). + */ +size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output); + +/** + * ZSTD_CStreamInSize() - recommended size for the input buffer + * + * Return: The recommended size for the input buffer. + */ +size_t ZSTD_CStreamInSize(void); +/** + * ZSTD_CStreamOutSize() - recommended size for the output buffer + * + * When the output buffer is at least this large, it is guaranteed to be large + * enough to flush at least one complete compressed block. + * + * Return: The recommended size for the output buffer. + */ +size_t ZSTD_CStreamOutSize(void); + + + +/*-***************************************************************************** + * Streaming decompression - HowTo + * + * A ZSTD_DStream object is required to track streaming operations. + * Use ZSTD_initDStream() to initialize a ZSTD_DStream object. + * ZSTD_DStream objects can be re-used multiple times. + * + * Use ZSTD_decompressStream() repetitively to consume your input. + * The function will update both `pos` fields. + * If `input->pos < input->size`, some input has not been consumed. + * It's up to the caller to present again remaining data. + * If `output->pos < output->size`, decoder has flushed everything it could. + * Returns 0 iff a frame is completely decoded and fully flushed. + * Otherwise it returns a suggested next input size that will never load more + * than the current frame. + ******************************************************************************/ + +/** + * ZSTD_DStreamWorkspaceBound() - memory needed to initialize a ZSTD_DStream + * @maxWindowSize: The maximum window size allowed for compressed frames. + * + * Return: A lower bound on the size of the workspace that is passed to + * ZSTD_initDStream() and ZSTD_initDStream_usingDDict(). + */ +size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize); + +/** + * struct ZSTD_DStream - the zstd streaming decompression context + */ +typedef struct ZSTD_DStream_s ZSTD_DStream; +/*===== ZSTD_DStream management functions =====*/ +/** + * ZSTD_initDStream() - initialize a zstd streaming decompression context + * @maxWindowSize: The maximum window size allowed for compressed frames. + * @workspace: The workspace to emplace the context into. It must outlive + * the returned context. + * @workspaceSize: The size of workspace. + * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine + * how large the workspace must be. + * + * Return: The zstd streaming decompression context. + */ +ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace, + size_t workspaceSize); +/** + * ZSTD_initDStream_usingDDict() - initialize streaming decompression context + * @maxWindowSize: The maximum window size allowed for compressed frames. + * @ddict: The digested dictionary to use for decompression. + * @workspace: The workspace to emplace the context into. It must outlive + * the returned context. + * @workspaceSize: The size of workspace. + * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine + * how large the workspace must be. + * + * Return: The zstd streaming decompression context. + */ +ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize, + const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize); + +/*===== Streaming decompression functions =====*/ +/** + * ZSTD_resetDStream() - reset the context using parameters from creation + * @zds: The zstd streaming decompression context to reset. + * + * Resets the context using the parameters from creation. Skips dictionary + * loading, since it can be reused. + * + * Return: Zero or an error, which can be checked using ZSTD_isError(). + */ +size_t ZSTD_resetDStream(ZSTD_DStream *zds); +/** + * ZSTD_decompressStream() - streaming decompress some of input into output + * @zds: The zstd streaming decompression context. + * @output: Destination buffer. `output.pos` is updated to indicate how much + * decompressed data was written. + * @input: Source buffer. `input.pos` is updated to indicate how much data was + * read. Note that it may not consume the entire input, in which case + * `input.pos < input.size`, and it's up to the caller to present + * remaining data again. + * + * The `input` and `output` buffers may be any size. Guaranteed to make some + * forward progress if `input` and `output` are not empty. + * ZSTD_decompressStream() will not consume the last byte of the frame until + * the entire frame is flushed. + * + * Return: Returns 0 iff a frame is completely decoded and fully flushed. + * Otherwise returns a hint for the number of bytes to use as the input + * for the next function call or an error, which can be checked using + * ZSTD_isError(). The size hint will never load more than the frame. + */ +size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, + ZSTD_inBuffer *input); + +/** + * ZSTD_DStreamInSize() - recommended size for the input buffer + * + * Return: The recommended size for the input buffer. + */ +size_t ZSTD_DStreamInSize(void); +/** + * ZSTD_DStreamOutSize() - recommended size for the output buffer + * + * When the output buffer is at least this large, it is guaranteed to be large + * enough to flush at least one complete decompressed block. + * + * Return: The recommended size for the output buffer. + */ +size_t ZSTD_DStreamOutSize(void); + + +/* --- Constants ---*/ +#define ZSTD_MAGICNUMBER 0xFD2FB528 /* >= v0.8.0 */ +#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50U + +#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1) +#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2) + +#define ZSTD_WINDOWLOG_MAX_32 27 +#define ZSTD_WINDOWLOG_MAX_64 27 +#define ZSTD_WINDOWLOG_MAX \ + ((unsigned int)(sizeof(size_t) == 4 \ + ? ZSTD_WINDOWLOG_MAX_32 \ + : ZSTD_WINDOWLOG_MAX_64)) +#define ZSTD_WINDOWLOG_MIN 10 +#define ZSTD_HASHLOG_MAX ZSTD_WINDOWLOG_MAX +#define ZSTD_HASHLOG_MIN 6 +#define ZSTD_CHAINLOG_MAX (ZSTD_WINDOWLOG_MAX+1) +#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN +#define ZSTD_HASHLOG3_MAX 17 +#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1) +#define ZSTD_SEARCHLOG_MIN 1 +/* only for ZSTD_fast, other strategies are limited to 6 */ +#define ZSTD_SEARCHLENGTH_MAX 7 +/* only for ZSTD_btopt, other strategies are limited to 4 */ +#define ZSTD_SEARCHLENGTH_MIN 3 +#define ZSTD_TARGETLENGTH_MIN 4 +#define ZSTD_TARGETLENGTH_MAX 999 + +/* for static allocation */ +#define ZSTD_FRAMEHEADERSIZE_MAX 18 +#define ZSTD_FRAMEHEADERSIZE_MIN 6 +static const size_t ZSTD_frameHeaderSize_prefix = 5; +static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN; +static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX; +/* magic number + skippable frame length */ +static const size_t ZSTD_skippableHeaderSize = 8; + + +/*-************************************* + * Compressed size functions + **************************************/ + +/** + * ZSTD_findFrameCompressedSize() - returns the size of a compressed frame + * @src: Source buffer. It should point to the start of a zstd encoded frame + * or a skippable frame. + * @srcSize: The size of the source buffer. It must be at least as large as the + * size of the frame. + * + * Return: The compressed size of the frame pointed to by `src` or an error, + * which can be check with ZSTD_isError(). + * Suitable to pass to ZSTD_decompress() or similar functions. + */ +size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize); + +/*-************************************* + * Decompressed size functions + **************************************/ +/** + * ZSTD_getFrameContentSize() - returns the content size in a zstd frame header + * @src: It should point to the start of a zstd encoded frame. + * @srcSize: The size of the source buffer. It must be at least as large as the + * frame header. `ZSTD_frameHeaderSize_max` is always large enough. + * + * Return: The frame content size stored in the frame header if known. + * `ZSTD_CONTENTSIZE_UNKNOWN` if the content size isn't stored in the + * frame header. `ZSTD_CONTENTSIZE_ERROR` on invalid input. + */ +unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize); + +/** + * ZSTD_findDecompressedSize() - returns decompressed size of a series of frames + * @src: It should point to the start of a series of zstd encoded and/or + * skippable frames. + * @srcSize: The exact size of the series of frames. + * + * If any zstd encoded frame in the series doesn't have the frame content size + * set, `ZSTD_CONTENTSIZE_UNKNOWN` is returned. But frame content size is always + * set when using ZSTD_compress(). The decompressed size can be very large. + * If the source is untrusted, the decompressed size could be wrong or + * intentionally modified. Always ensure the result fits within the + * application's authorized limits. ZSTD_findDecompressedSize() handles multiple + * frames, and so it must traverse the input to read each frame header. This is + * efficient as most of the data is skipped, however it does mean that all frame + * data must be present and valid. + * + * Return: Decompressed size of all the data contained in the frames if known. + * `ZSTD_CONTENTSIZE_UNKNOWN` if the decompressed size is unknown. + * `ZSTD_CONTENTSIZE_ERROR` if an error occurred. + */ +unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize); + +/*-************************************* + * Advanced compression functions + **************************************/ +/** + * ZSTD_checkCParams() - ensure parameter values remain within authorized range + * @cParams: The zstd compression parameters. + * + * Return: Zero or an error, which can be checked using ZSTD_isError(). + */ +size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams); + +/** + * ZSTD_adjustCParams() - optimize parameters for a given srcSize and dictSize + * @srcSize: Optionally the estimated source size, or zero if unknown. + * @dictSize: Optionally the estimated dictionary size, or zero if unknown. + * + * Return: The optimized parameters. + */ +ZSTD_compressionParameters ZSTD_adjustCParams( + ZSTD_compressionParameters cParams, unsigned long long srcSize, + size_t dictSize); + +/*--- Advanced decompression functions ---*/ + +/** + * ZSTD_isFrame() - returns true iff the buffer starts with a valid frame + * @buffer: The source buffer to check. + * @size: The size of the source buffer, must be at least 4 bytes. + * + * Return: True iff the buffer starts with a zstd or skippable frame identifier. + */ +unsigned int ZSTD_isFrame(const void *buffer, size_t size); + +/** + * ZSTD_getDictID_fromDict() - returns the dictionary id stored in a dictionary + * @dict: The dictionary buffer. + * @dictSize: The size of the dictionary buffer. + * + * Return: The dictionary id stored within the dictionary or 0 if the + * dictionary is not a zstd dictionary. If it returns 0 the + * dictionary can still be loaded as a content-only dictionary. + */ +unsigned int ZSTD_getDictID_fromDict(const void *dict, size_t dictSize); + +/** + * ZSTD_getDictID_fromDDict() - returns the dictionary id stored in a ZSTD_DDict + * @ddict: The ddict to find the id of. + * + * Return: The dictionary id stored within `ddict` or 0 if the dictionary is not + * a zstd dictionary. If it returns 0 `ddict` will be loaded as a + * content-only dictionary. + */ +unsigned int ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict); + +/** + * ZSTD_getDictID_fromFrame() - returns the dictionary id stored in a zstd frame + * @src: Source buffer. It must be a zstd encoded frame. + * @srcSize: The size of the source buffer. It must be at least as large as the + * frame header. `ZSTD_frameHeaderSize_max` is always large enough. + * + * Return: The dictionary id required to decompress the frame stored within + * `src` or 0 if the dictionary id could not be decoded. It can return + * 0 if the frame does not require a dictionary, the dictionary id + * wasn't stored in the frame, `src` is not a zstd frame, or `srcSize` + * is too small. + */ +unsigned int ZSTD_getDictID_fromFrame(const void *src, size_t srcSize); + +/** + * struct ZSTD_frameParams - zstd frame parameters stored in the frame header + * @frameContentSize: The frame content size, or 0 if not present. + * @windowSize: The window size, or 0 if the frame is a skippable frame. + * @dictID: The dictionary id, or 0 if not present. + * @checksumFlag: Whether a checksum was used. + */ +typedef struct { + unsigned long long frameContentSize; + unsigned int windowSize; + unsigned int dictID; + unsigned int checksumFlag; +} ZSTD_frameParams; + +/** + * ZSTD_getFrameParams() - extracts parameters from a zstd or skippable frame + * @fparamsPtr: On success the frame parameters are written here. + * @src: The source buffer. It must point to a zstd or skippable frame. + * @srcSize: The size of the source buffer. `ZSTD_frameHeaderSize_max` is + * always large enough to succeed. + * + * Return: 0 on success. If more data is required it returns how many bytes + * must be provided to make forward progress. Otherwise it returns + * an error, which can be checked using ZSTD_isError(). + */ +size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src, + size_t srcSize); + +/*-***************************************************************************** + * Buffer-less and synchronous inner streaming functions + * + * This is an advanced API, giving full control over buffer management, for + * users which need direct control over memory. + * But it's also a complex one, with many restrictions (documented below). + * Prefer using normal streaming API for an easier experience + ******************************************************************************/ + +/*-***************************************************************************** + * Buffer-less streaming compression (synchronous mode) + * + * A ZSTD_CCtx object is required to track streaming operations. + * Use ZSTD_initCCtx() to initialize a context. + * ZSTD_CCtx object can be re-used multiple times within successive compression + * operations. + * + * Start by initializing a context. + * Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary + * compression, + * or ZSTD_compressBegin_advanced(), for finer parameter control. + * It's also possible to duplicate a reference context which has already been + * initialized, using ZSTD_copyCCtx() + * + * Then, consume your input using ZSTD_compressContinue(). + * There are some important considerations to keep in mind when using this + * advanced function : + * - ZSTD_compressContinue() has no internal buffer. It uses externally provided + * buffer only. + * - Interface is synchronous : input is consumed entirely and produce 1+ + * (or more) compressed blocks. + * - Caller must ensure there is enough space in `dst` to store compressed data + * under worst case scenario. Worst case evaluation is provided by + * ZSTD_compressBound(). + * ZSTD_compressContinue() doesn't guarantee recover after a failed + * compression. + * - ZSTD_compressContinue() presumes prior input ***is still accessible and + * unmodified*** (up to maximum distance size, see WindowLog). + * It remembers all previous contiguous blocks, plus one separated memory + * segment (which can itself consists of multiple contiguous blocks) + * - ZSTD_compressContinue() detects that prior input has been overwritten when + * `src` buffer overlaps. In which case, it will "discard" the relevant memory + * section from its history. + * + * Finish a frame with ZSTD_compressEnd(), which will write the last block(s) + * and optional checksum. It's possible to use srcSize==0, in which case, it + * will write a final empty block to end the frame. Without last block mark, + * frames will be considered unfinished (corrupted) by decoders. + * + * `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new + * frame. + ******************************************************************************/ + +/*===== Buffer-less streaming compression functions =====*/ +size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel); +size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict, + size_t dictSize, int compressionLevel); +size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict, + size_t dictSize, ZSTD_parameters params, + unsigned long long pledgedSrcSize); +size_t ZSTD_copyCCtx(ZSTD_CCtx *cctx, const ZSTD_CCtx *preparedCCtx, + unsigned long long pledgedSrcSize); +size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict, + unsigned long long pledgedSrcSize); +size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize); +size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize); + + + +/*-***************************************************************************** + * Buffer-less streaming decompression (synchronous mode) + * + * A ZSTD_DCtx object is required to track streaming operations. + * Use ZSTD_initDCtx() to initialize a context. + * A ZSTD_DCtx object can be re-used multiple times. + * + * First typical operation is to retrieve frame parameters, using + * ZSTD_getFrameParams(). It fills a ZSTD_frameParams structure which provide + * important information to correctly decode the frame, such as the minimum + * rolling buffer size to allocate to decompress data (`windowSize`), and the + * dictionary ID used. + * Note: content size is optional, it may not be present. 0 means unknown. + * Note that these values could be wrong, either because of data malformation, + * or because an attacker is spoofing deliberate false information. As a + * consequence, check that values remain within valid application range, + * especially `windowSize`, before allocation. Each application can set its own + * limit, depending on local restrictions. For extended interoperability, it is + * recommended to support at least 8 MB. + * Frame parameters are extracted from the beginning of the compressed frame. + * Data fragment must be large enough to ensure successful decoding, typically + * `ZSTD_frameHeaderSize_max` bytes. + * Result: 0: successful decoding, the `ZSTD_frameParams` structure is filled. + * >0: `srcSize` is too small, provide at least this many bytes. + * errorCode, which can be tested using ZSTD_isError(). + * + * Start decompression, with ZSTD_decompressBegin() or + * ZSTD_decompressBegin_usingDict(). Alternatively, you can copy a prepared + * context, using ZSTD_copyDCtx(). + * + * Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() + * alternatively. + * ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' + * to ZSTD_decompressContinue(). + * ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will + * fail. + * + * The result of ZSTD_decompressContinue() is the number of bytes regenerated + * within 'dst' (necessarily <= dstCapacity). It can be zero, which is not an + * error; it just means ZSTD_decompressContinue() has decoded some metadata + * item. It can also be an error code, which can be tested with ZSTD_isError(). + * + * ZSTD_decompressContinue() needs previous data blocks during decompression, up + * to `windowSize`. They should preferably be located contiguously, prior to + * current block. Alternatively, a round buffer of sufficient size is also + * possible. Sufficient size is determined by frame parameters. + * ZSTD_decompressContinue() is very sensitive to contiguity, if 2 blocks don't + * follow each other, make sure that either the compressor breaks contiguity at + * the same place, or that previous contiguous segment is large enough to + * properly handle maximum back-reference. + * + * A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero. + * Context can then be reset to start a new decompression. + * + * Note: it's possible to know if next input to present is a header or a block, + * using ZSTD_nextInputType(). This information is not required to properly + * decode a frame. + * + * == Special case: skippable frames == + * + * Skippable frames allow integration of user-defined data into a flow of + * concatenated frames. Skippable frames will be ignored (skipped) by a + * decompressor. The format of skippable frames is as follows: + * a) Skippable frame ID - 4 Bytes, Little endian format, any value from + * 0x184D2A50 to 0x184D2A5F + * b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits + * c) Frame Content - any content (User Data) of length equal to Frame Size + * For skippable frames ZSTD_decompressContinue() always returns 0. + * For skippable frames ZSTD_getFrameParams() returns fparamsPtr->windowLog==0 + * what means that a frame is skippable. + * Note: If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might + * actually be a zstd encoded frame with no content. For purposes of + * decompression, it is valid in both cases to skip the frame using + * ZSTD_findFrameCompressedSize() to find its size in bytes. + * It also returns frame size as fparamsPtr->frameContentSize. + ******************************************************************************/ + +/*===== Buffer-less streaming decompression functions =====*/ +size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx); +size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict, + size_t dictSize); +void ZSTD_copyDCtx(ZSTD_DCtx *dctx, const ZSTD_DCtx *preparedDCtx); +size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx); +size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize); +typedef enum { + ZSTDnit_frameHeader, + ZSTDnit_blockHeader, + ZSTDnit_block, + ZSTDnit_lastBlock, + ZSTDnit_checksum, + ZSTDnit_skippableFrame +} ZSTD_nextInputType_e; +ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx); + +/*-***************************************************************************** + * Block functions + * + * Block functions produce and decode raw zstd blocks, without frame metadata. + * Frame metadata cost is typically ~18 bytes, which can be non-negligible for + * very small blocks (< 100 bytes). User will have to take in charge required + * information to regenerate data, such as compressed and content sizes. + * + * A few rules to respect: + * - Compressing and decompressing require a context structure + * + Use ZSTD_initCCtx() and ZSTD_initDCtx() + * - It is necessary to init context before starting + * + compression : ZSTD_compressBegin() + * + decompression : ZSTD_decompressBegin() + * + variants _usingDict() are also allowed + * + copyCCtx() and copyDCtx() work too + * - Block size is limited, it must be <= ZSTD_getBlockSizeMax() + * + If you need to compress more, cut data into multiple blocks + * + Consider using the regular ZSTD_compress() instead, as frame metadata + * costs become negligible when source size is large. + * - When a block is considered not compressible enough, ZSTD_compressBlock() + * result will be zero. In which case, nothing is produced into `dst`. + * + User must test for such outcome and deal directly with uncompressed data + * + ZSTD_decompressBlock() doesn't accept uncompressed data as input!!! + * + In case of multiple successive blocks, decoder must be informed of + * uncompressed block existence to follow proper history. Use + * ZSTD_insertBlock() in such a case. + ******************************************************************************/ + +/* Define for static allocation */ +#define ZSTD_BLOCKSIZE_ABSOLUTEMAX (128 * 1024) +/*===== Raw zstd block functions =====*/ +size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx); +size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize); +size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize); +size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart, + size_t blockSize); + +#endif /* ZSTD_H */ diff --git a/contrib/linux-kernel/lib/Kconfig.diff b/contrib/linux-kernel/lib/Kconfig.diff new file mode 100644 index 000000000..07ae5398f --- /dev/null +++ b/contrib/linux-kernel/lib/Kconfig.diff @@ -0,0 +1,17 @@ +diff --git a/lib/Kconfig b/lib/Kconfig +index 260a80e..39d9347 100644 +--- a/lib/Kconfig ++++ b/lib/Kconfig +@@ -239,6 +239,12 @@ config LZ4HC_COMPRESS + config LZ4_DECOMPRESS + tristate + ++config ZSTD_COMPRESS ++ tristate ++ ++config ZSTD_DECOMPRESS ++ tristate ++ + source "lib/xz/Kconfig" + + # diff --git a/contrib/linux-kernel/lib/Makefile.diff b/contrib/linux-kernel/lib/Makefile.diff new file mode 100644 index 000000000..be6182b39 --- /dev/null +++ b/contrib/linux-kernel/lib/Makefile.diff @@ -0,0 +1,13 @@ +diff --git a/lib/Makefile b/lib/Makefile +index 50144a3..b30a998 100644 +--- a/lib/Makefile ++++ b/lib/Makefile +@@ -106,6 +106,8 @@ obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ + obj-$(CONFIG_LZ4_COMPRESS) += lz4/ + obj-$(CONFIG_LZ4HC_COMPRESS) += lz4/ + obj-$(CONFIG_LZ4_DECOMPRESS) += lz4/ ++obj-$(CONFIG_ZSTD_COMPRESS) += zstd/ ++obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd/ + obj-$(CONFIG_XZ_DEC) += xz/ + obj-$(CONFIG_RAID6_PQ) += raid6/ + diff --git a/contrib/linux-kernel/lib/zstd/Makefile b/contrib/linux-kernel/lib/zstd/Makefile new file mode 100644 index 000000000..067f68d19 --- /dev/null +++ b/contrib/linux-kernel/lib/zstd/Makefile @@ -0,0 +1,9 @@ +obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o +obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o + +ccflags-y += -O3 + +zstd_compress-y := entropy_common.o fse_decompress.o xxhash.o zstd_common.o \ + fse_compress.o huf_compress.o compress.o +zstd_decompress-y := entropy_common.o fse_decompress.o xxhash.o zstd_common.o \ + huf_decompress.o decompress.o diff --git a/contrib/linux-kernel/lib/zstd/bitstream.h b/contrib/linux-kernel/lib/zstd/bitstream.h new file mode 100644 index 000000000..9d2154082 --- /dev/null +++ b/contrib/linux-kernel/lib/zstd/bitstream.h @@ -0,0 +1,391 @@ +/* ****************************************************************** + bitstream + Part of FSE library + header file (to include) + Copyright (C) 2013-2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - Source repository : https://github.com/Cyan4973/FiniteStateEntropy +****************************************************************** */ +#ifndef BITSTREAM_H_MODULE +#define BITSTREAM_H_MODULE + +/* +* This API consists of small unitary functions, which must be inlined for best performance. +* Since link-time-optimization is not available for all compilers, +* these functions are defined into a .h to be included. +*/ + +/*-**************************************** +* Dependencies +******************************************/ +#include "mem.h" /* unaligned access routines */ +#include "error_private.h" /* error codes and messages */ + + +/*========================================= +* Target specific +=========================================*/ +#define STREAM_ACCUMULATOR_MIN_32 25 +#define STREAM_ACCUMULATOR_MIN_64 57 +#define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64)) + +/*-****************************************** +* bitStream encoding API (write forward) +********************************************/ +/* bitStream can mix input from multiple sources. +* A critical property of these streams is that they encode and decode in **reverse** direction. +* So the first bit sequence you add will be the last to be read, like a LIFO stack. +*/ +typedef struct +{ + size_t bitContainer; + int bitPos; + char* startPtr; + char* ptr; + char* endPtr; +} BIT_CStream_t; + +MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity); +MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits); +MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC); +MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC); + +/* Start with initCStream, providing the size of buffer to write into. +* bitStream will never write outside of this buffer. +* `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code. +* +* bits are first added to a local register. +* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems. +* Writing data into memory is an explicit operation, performed by the flushBits function. +* Hence keep track how many bits are potentially stored into local register to avoid register overflow. +* After a flushBits, a maximum of 7 bits might still be stored into local register. +* +* Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers. +* +* Last operation is to close the bitStream. +* The function returns the final size of CStream in bytes. +* If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable) +*/ + + +/*-******************************************** +* bitStream decoding API (read backward) +**********************************************/ +typedef struct +{ + size_t bitContainer; + unsigned bitsConsumed; + const char* ptr; + const char* start; +} BIT_DStream_t; + +typedef enum { BIT_DStream_unfinished = 0, + BIT_DStream_endOfBuffer = 1, + BIT_DStream_completed = 2, + BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */ + /* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */ + +MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize); +MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits); +MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD); +MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD); + + +/* Start by invoking BIT_initDStream(). +* A chunk of the bitStream is then stored into a local register. +* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t). +* You can then retrieve bitFields stored into the local register, **in reverse order**. +* Local register is explicitly reloaded from memory by the BIT_reloadDStream() method. +* A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished. +* Otherwise, it can be less than that, so proceed accordingly. +* Checking if DStream has reached its end can be performed with BIT_endOfDStream(). +*/ + + +/*-**************************************** +* unsafe API +******************************************/ +MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits); +/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */ + +MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC); +/* unsafe version; does not check buffer overflow */ + +MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits); +/* faster, but works only if nbBits >= 1 */ + + + +/*-************************************************************** +* Internal functions +****************************************************************/ +MEM_STATIC unsigned BIT_highbit32 (register U32 val) +{ +# if defined(_MSC_VER) /* Visual */ + unsigned long r=0; + _BitScanReverse ( &r, val ); + return (unsigned) r; +# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */ + return 31 - __builtin_clz (val); +# else /* Software version */ + static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; + U32 v = val; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27]; +# endif +} + +/*===== Local Constants =====*/ +static const unsigned BIT_mask[] = { 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF, 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF }; /* up to 26 bits */ + + +/*-************************************************************** +* bitStream encoding +****************************************************************/ +/*! BIT_initCStream() : + * `dstCapacity` must be > sizeof(void*) + * @return : 0 if success, + otherwise an error code (can be tested using ERR_isError() ) */ +MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* startPtr, size_t dstCapacity) +{ + bitC->bitContainer = 0; + bitC->bitPos = 0; + bitC->startPtr = (char*)startPtr; + bitC->ptr = bitC->startPtr; + bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->ptr); + if (dstCapacity <= sizeof(bitC->ptr)) return ERROR(dstSize_tooSmall); + return 0; +} + +/*! BIT_addBits() : + can add up to 26 bits into `bitC`. + Does not check for register overflow ! */ +MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits) +{ + bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos; + bitC->bitPos += nbBits; +} + +/*! BIT_addBitsFast() : + * works only if `value` is _clean_, meaning all high bits above nbBits are 0 */ +MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits) +{ + bitC->bitContainer |= value << bitC->bitPos; + bitC->bitPos += nbBits; +} + +/*! BIT_flushBitsFast() : + * unsafe version; does not check buffer overflow */ +MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC) +{ + size_t const nbBytes = bitC->bitPos >> 3; + MEM_writeLEST(bitC->ptr, bitC->bitContainer); + bitC->ptr += nbBytes; + bitC->bitPos &= 7; + bitC->bitContainer >>= nbBytes*8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */ +} + +/*! BIT_flushBits() : + * safe version; check for buffer overflow, and prevents it. + * note : does not signal buffer overflow. This will be revealed later on using BIT_closeCStream() */ +MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC) +{ + size_t const nbBytes = bitC->bitPos >> 3; + MEM_writeLEST(bitC->ptr, bitC->bitContainer); + bitC->ptr += nbBytes; + if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr; + bitC->bitPos &= 7; + bitC->bitContainer >>= nbBytes*8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */ +} + +/*! BIT_closeCStream() : + * @return : size of CStream, in bytes, + or 0 if it could not fit into dstBuffer */ +MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC) +{ + BIT_addBitsFast(bitC, 1, 1); /* endMark */ + BIT_flushBits(bitC); + + if (bitC->ptr >= bitC->endPtr) return 0; /* doesn't fit within authorized budget : cancel */ + + return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0); +} + + +/*-******************************************************** +* bitStream decoding +**********************************************************/ +/*! BIT_initDStream() : +* Initialize a BIT_DStream_t. +* `bitD` : a pointer to an already allocated BIT_DStream_t structure. +* `srcSize` must be the *exact* size of the bitStream, in bytes. +* @return : size of stream (== srcSize) or an errorCode if a problem is detected +*/ +MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize) +{ + if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); } + + if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */ + bitD->start = (const char*)srcBuffer; + bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer); + bitD->bitContainer = MEM_readLEST(bitD->ptr); + { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; + bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */ + if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ } + } else { + bitD->start = (const char*)srcBuffer; + bitD->ptr = bitD->start; + bitD->bitContainer = *(const BYTE*)(bitD->start); + switch(srcSize) + { + case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16); + case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24); + case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32); + case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24; + case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16; + case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8; + default:; + } + { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1]; + bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; + if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ } + bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8; + } + + return srcSize; +} + +MEM_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start) +{ + return bitContainer >> start; +} + +MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits) +{ + return (bitContainer >> start) & BIT_mask[nbBits]; +} + +MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits) +{ + return bitContainer & BIT_mask[nbBits]; +} + +/*! BIT_lookBits() : + * Provides next n bits from local register. + * local register is not modified. + * On 32-bits, maxNbBits==24. + * On 64-bits, maxNbBits==56. + * @return : value extracted + */ + MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits) +{ + U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1; + return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask); +} + +/*! BIT_lookBitsFast() : +* unsafe version; only works only if nbBits >= 1 */ +MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits) +{ + U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1; + return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask); +} + +MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits) +{ + bitD->bitsConsumed += nbBits; +} + +/*! BIT_readBits() : + * Read (consume) next n bits from local register and update. + * Pay attention to not read more than nbBits contained into local register. + * @return : extracted value. + */ +MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits) +{ + size_t const value = BIT_lookBits(bitD, nbBits); + BIT_skipBits(bitD, nbBits); + return value; +} + +/*! BIT_readBitsFast() : +* unsafe version; only works only if nbBits >= 1 */ +MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits) +{ + size_t const value = BIT_lookBitsFast(bitD, nbBits); + BIT_skipBits(bitD, nbBits); + return value; +} + +/*! BIT_reloadDStream() : +* Refill `bitD` from buffer previously set in BIT_initDStream() . +* This function is safe, it guarantees it will not read beyond src buffer. +* @return : status of `BIT_DStream_t` internal register. + if status == BIT_DStream_unfinished, internal register is filled with >= (sizeof(bitD->bitContainer)*8 - 7) bits */ +MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD) +{ + if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should not happen => corruption detected */ + return BIT_DStream_overflow; + + if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) { + bitD->ptr -= bitD->bitsConsumed >> 3; + bitD->bitsConsumed &= 7; + bitD->bitContainer = MEM_readLEST(bitD->ptr); + return BIT_DStream_unfinished; + } + if (bitD->ptr == bitD->start) { + if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer; + return BIT_DStream_completed; + } + { U32 nbBytes = bitD->bitsConsumed >> 3; + BIT_DStream_status result = BIT_DStream_unfinished; + if (bitD->ptr - nbBytes < bitD->start) { + nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */ + result = BIT_DStream_endOfBuffer; + } + bitD->ptr -= nbBytes; + bitD->bitsConsumed -= nbBytes*8; + bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */ + return result; + } +} + +/*! BIT_endOfDStream() : +* @return Tells if DStream has exactly reached its end (all bits consumed). +*/ +MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream) +{ + return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8)); +} + +#endif /* BITSTREAM_H_MODULE */ diff --git a/contrib/linux-kernel/lib/zstd/compress.c b/contrib/linux-kernel/lib/zstd/compress.c new file mode 100644 index 000000000..5f6d955a4 --- /dev/null +++ b/contrib/linux-kernel/lib/zstd/compress.c @@ -0,0 +1,3384 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + + +/*-************************************* +* Dependencies +***************************************/ +#include +#include +#include /* memset */ +#include "mem.h" +#include "fse.h" +#include "huf.h" +#include "zstd_internal.h" /* includes zstd.h */ + +#ifdef current +# undef current +#endif + +/*-************************************* +* Constants +***************************************/ +static const U32 g_searchStrength = 8; /* control skip over incompressible data */ +#define HASH_READ_SIZE 8 +typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e; + + +/*-************************************* +* Helper functions +***************************************/ +#define ZSTD_STATIC_ASSERT(c) { enum { ZSTD_static_assert = 1/(int)(!!(c)) }; } +size_t ZSTD_compressBound(size_t srcSize) { return FSE_compressBound(srcSize) + 12; } + + +/*-************************************* +* Sequence storage +***************************************/ +static void ZSTD_resetSeqStore(seqStore_t* ssPtr) +{ + ssPtr->lit = ssPtr->litStart; + ssPtr->sequences = ssPtr->sequencesStart; + ssPtr->longLengthID = 0; +} + + +/*-************************************* +* Context memory management +***************************************/ +struct ZSTD_CCtx_s { + const BYTE* nextSrc; /* next block here to continue on current prefix */ + const BYTE* base; /* All regular indexes relative to this position */ + const BYTE* dictBase; /* extDict indexes relative to this position */ + U32 dictLimit; /* below that point, need extDict */ + U32 lowLimit; /* below that point, no more data */ + U32 nextToUpdate; /* index from which to continue dictionary update */ + U32 nextToUpdate3; /* index from which to continue dictionary update */ + U32 hashLog3; /* dispatch table : larger == faster, more memory */ + U32 loadedDictEnd; /* index of end of dictionary */ + U32 forceWindow; /* force back-references to respect limit of 1<3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog); + size_t const h3Size = ((size_t)1) << hashLog3; + size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); + size_t const optSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<customMem = customMem; + return cctx; +} + +ZSTD_CCtx* ZSTD_initCCtx(void* workspace, size_t workspaceSize) +{ + ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); + ZSTD_CCtx* cctx = ZSTD_createCCtx_advanced(stackMem); + if (cctx) { + cctx->workSpace = ZSTD_stackAllocAll(cctx->customMem.opaque, &cctx->workSpaceSize); + } + return cctx; +} + +size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx) +{ + if (cctx==NULL) return 0; /* support free on NULL */ + ZSTD_free(cctx->workSpace, cctx->customMem); + ZSTD_free(cctx, cctx->customMem); + return 0; /* reserved as a potential error code in the future */ +} + +const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) /* hidden interface */ +{ + return &(ctx->seqStore); +} + +static ZSTD_parameters ZSTD_getParamsFromCCtx(const ZSTD_CCtx* cctx) +{ + return cctx->params; +} + + +/** ZSTD_checkParams() : + ensure param values remain within authorized range. + @return : 0, or an error code if one value is beyond authorized range */ +size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams) +{ +# define CLAMPCHECK(val,min,max) { if ((valmax)) return ERROR(compressionParameter_unsupported); } + CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX); + CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX); + CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX); + CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX); + CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX); + CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX); + if ((U32)(cParams.strategy) > (U32)ZSTD_btopt2) return ERROR(compressionParameter_unsupported); + return 0; +} + + +/** ZSTD_cycleLog() : + * condition for correct operation : hashLog > 1 */ +static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat) +{ + U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2); + return hashLog - btScale; +} + +/** ZSTD_adjustCParams() : + optimize `cPar` for a given input (`srcSize` and `dictSize`). + mostly downsizing to reduce memory consumption and initialization. + Both `srcSize` and `dictSize` are optional (use 0 if unknown), + but if both are 0, no optimization can be done. + Note : cPar is considered validated at this stage. Use ZSTD_checkParams() to ensure that. */ +ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize) +{ + if (srcSize+dictSize == 0) return cPar; /* no size information available : no adjustment */ + + /* resize params, to use less memory when necessary */ + { U32 const minSrcSize = (srcSize==0) ? 500 : 0; + U64 const rSize = srcSize + dictSize + minSrcSize; + if (rSize < ((U64)1< srcLog) cPar.windowLog = srcLog; + } } + if (cPar.hashLog > cPar.windowLog) cPar.hashLog = cPar.windowLog; + { U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy); + if (cycleLog > cPar.windowLog) cPar.chainLog -= (cycleLog - cPar.windowLog); + } + + if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */ + + return cPar; +} + + +static U32 ZSTD_equivalentParams(ZSTD_parameters param1, ZSTD_parameters param2) +{ + return (param1.cParams.hashLog == param2.cParams.hashLog) + & (param1.cParams.chainLog == param2.cParams.chainLog) + & (param1.cParams.strategy == param2.cParams.strategy) + & ((param1.cParams.searchLength==3) == (param2.cParams.searchLength==3)); +} + +/*! ZSTD_continueCCtx() : + reuse CCtx without reset (note : requires no dictionary) */ +static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_parameters params, U64 frameContentSize) +{ + U32 const end = (U32)(cctx->nextSrc - cctx->base); + cctx->params = params; + cctx->frameContentSize = frameContentSize; + cctx->lowLimit = end; + cctx->dictLimit = end; + cctx->nextToUpdate = end+1; + cctx->stage = ZSTDcs_init; + cctx->dictID = 0; + cctx->loadedDictEnd = 0; + { int i; for (i=0; irep[i] = repStartValue[i]; } + cctx->seqStore.litLengthSum = 0; /* force reset of btopt stats */ + XXH64_reset(&cctx->xxhState, 0); + return 0; +} + +typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset, ZSTDcrp_fullReset } ZSTD_compResetPolicy_e; + +/*! ZSTD_resetCCtx_advanced() : + note : `params` must be validated */ +static size_t ZSTD_resetCCtx_advanced (ZSTD_CCtx* zc, + ZSTD_parameters params, U64 frameContentSize, + ZSTD_compResetPolicy_e const crp) +{ + if (crp == ZSTDcrp_continue) + if (ZSTD_equivalentParams(params, zc->params)) { + zc->flagStaticTables = 0; + zc->flagStaticHufTable = HUF_repeat_none; + return ZSTD_continueCCtx(zc, params, frameContentSize); + } + + { size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog); + U32 const divider = (params.cParams.searchLength==3) ? 3 : 4; + size_t const maxNbSeq = blockSize / divider; + size_t const tokenSpace = blockSize + 11*maxNbSeq; + size_t const chainSize = (params.cParams.strategy == ZSTD_fast) ? 0 : (1 << params.cParams.chainLog); + size_t const hSize = ((size_t)1) << params.cParams.hashLog; + U32 const hashLog3 = (params.cParams.searchLength>3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, params.cParams.windowLog); + size_t const h3Size = ((size_t)1) << hashLog3; + size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); + void* ptr; + + /* Check if workSpace is large enough, alloc a new one if needed */ + { size_t const optSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<workSpaceSize < neededSpace) { + ZSTD_free(zc->workSpace, zc->customMem); + zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem); + if (zc->workSpace == NULL) return ERROR(memory_allocation); + zc->workSpaceSize = neededSpace; + } } + + if (crp!=ZSTDcrp_noMemset) memset(zc->workSpace, 0, tableSpace); /* reset tables only */ + XXH64_reset(&zc->xxhState, 0); + zc->hashLog3 = hashLog3; + zc->hashTable = (U32*)(zc->workSpace); + zc->chainTable = zc->hashTable + hSize; + zc->hashTable3 = zc->chainTable + chainSize; + ptr = zc->hashTable3 + h3Size; + zc->hufTable = (HUF_CElt*)ptr; + zc->flagStaticTables = 0; + zc->flagStaticHufTable = HUF_repeat_none; + ptr = ((U32*)ptr) + 256; /* note : HUF_CElt* is incomplete type, size is simulated using U32 */ + + zc->nextToUpdate = 1; + zc->nextSrc = NULL; + zc->base = NULL; + zc->dictBase = NULL; + zc->dictLimit = 0; + zc->lowLimit = 0; + zc->params = params; + zc->blockSize = blockSize; + zc->frameContentSize = frameContentSize; + { int i; for (i=0; irep[i] = repStartValue[i]; } + + if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) { + zc->seqStore.litFreq = (U32*)ptr; + zc->seqStore.litLengthFreq = zc->seqStore.litFreq + (1<seqStore.matchLengthFreq = zc->seqStore.litLengthFreq + (MaxLL+1); + zc->seqStore.offCodeFreq = zc->seqStore.matchLengthFreq + (MaxML+1); + ptr = zc->seqStore.offCodeFreq + (MaxOff+1); + zc->seqStore.matchTable = (ZSTD_match_t*)ptr; + ptr = zc->seqStore.matchTable + ZSTD_OPT_NUM+1; + zc->seqStore.priceTable = (ZSTD_optimal_t*)ptr; + ptr = zc->seqStore.priceTable + ZSTD_OPT_NUM+1; + zc->seqStore.litLengthSum = 0; + } + zc->seqStore.sequencesStart = (seqDef*)ptr; + ptr = zc->seqStore.sequencesStart + maxNbSeq; + zc->seqStore.llCode = (BYTE*) ptr; + zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq; + zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq; + zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq; + + zc->stage = ZSTDcs_init; + zc->dictID = 0; + zc->loadedDictEnd = 0; + + return 0; + } +} + +/* ZSTD_invalidateRepCodes() : + * ensures next compression will not use repcodes from previous block. + * Note : only works with regular variant; + * do not use with extDict variant ! */ +void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) { + int i; + for (i=0; irep[i] = 0; +} + +/*! ZSTD_copyCCtx() : +* Duplicate an existing context `srcCCtx` into another one `dstCCtx`. +* Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()). +* @return : 0, or an error code */ +size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize) +{ + if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong); + + + memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem)); + { ZSTD_parameters params = srcCCtx->params; + params.fParams.contentSizeFlag = (pledgedSrcSize > 0); + ZSTD_resetCCtx_advanced(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset); + } + + /* copy tables */ + { size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog); + size_t const hSize = ((size_t)1) << srcCCtx->params.cParams.hashLog; + size_t const h3Size = (size_t)1 << srcCCtx->hashLog3; + size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32); + memcpy(dstCCtx->workSpace, srcCCtx->workSpace, tableSpace); + } + + /* copy dictionary offsets */ + dstCCtx->nextToUpdate = srcCCtx->nextToUpdate; + dstCCtx->nextToUpdate3= srcCCtx->nextToUpdate3; + dstCCtx->nextSrc = srcCCtx->nextSrc; + dstCCtx->base = srcCCtx->base; + dstCCtx->dictBase = srcCCtx->dictBase; + dstCCtx->dictLimit = srcCCtx->dictLimit; + dstCCtx->lowLimit = srcCCtx->lowLimit; + dstCCtx->loadedDictEnd= srcCCtx->loadedDictEnd; + dstCCtx->dictID = srcCCtx->dictID; + + /* copy entropy tables */ + dstCCtx->flagStaticTables = srcCCtx->flagStaticTables; + dstCCtx->flagStaticHufTable = srcCCtx->flagStaticHufTable; + if (srcCCtx->flagStaticTables) { + memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, sizeof(dstCCtx->litlengthCTable)); + memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, sizeof(dstCCtx->matchlengthCTable)); + memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, sizeof(dstCCtx->offcodeCTable)); + } + if (srcCCtx->flagStaticHufTable) { + memcpy(dstCCtx->hufTable, srcCCtx->hufTable, 256*4); + } + + return 0; +} + + +/*! ZSTD_reduceTable() : +* reduce table indexes by `reducerValue` */ +static void ZSTD_reduceTable (U32* const table, U32 const size, U32 const reducerValue) +{ + U32 u; + for (u=0 ; u < size ; u++) { + if (table[u] < reducerValue) table[u] = 0; + else table[u] -= reducerValue; + } +} + +/*! ZSTD_reduceIndex() : +* rescale all indexes to avoid future overflow (indexes are U32) */ +static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue) +{ + { U32 const hSize = 1 << zc->params.cParams.hashLog; + ZSTD_reduceTable(zc->hashTable, hSize, reducerValue); } + + { U32 const chainSize = (zc->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << zc->params.cParams.chainLog); + ZSTD_reduceTable(zc->chainTable, chainSize, reducerValue); } + + { U32 const h3Size = (zc->hashLog3) ? 1 << zc->hashLog3 : 0; + ZSTD_reduceTable(zc->hashTable3, h3Size, reducerValue); } +} + + +/*-******************************************************* +* Block entropic compression +*********************************************************/ + +/* See doc/zstd_compression_format.md for detailed format description */ + +size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + if (srcSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall); + memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize); + MEM_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw); + return ZSTD_blockHeaderSize+srcSize; +} + + +static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + BYTE* const ostart = (BYTE* const)dst; + U32 const flSize = 1 + (srcSize>31) + (srcSize>4095); + + if (srcSize + flSize > dstCapacity) return ERROR(dstSize_tooSmall); + + switch(flSize) + { + case 1: /* 2 - 1 - 5 */ + ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3)); + break; + case 2: /* 2 - 2 - 12 */ + MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4))); + break; + default: /*note : should not be necessary : flSize is within {1,2,3} */ + case 3: /* 2 - 2 - 20 */ + MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4))); + break; + } + + memcpy(ostart + flSize, src, srcSize); + return srcSize + flSize; +} + +static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + BYTE* const ostart = (BYTE* const)dst; + U32 const flSize = 1 + (srcSize>31) + (srcSize>4095); + + (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */ + + switch(flSize) + { + case 1: /* 2 - 1 - 5 */ + ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3)); + break; + case 2: /* 2 - 2 - 12 */ + MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4))); + break; + default: /*note : should not be necessary : flSize is necessarily within {1,2,3} */ + case 3: /* 2 - 2 - 20 */ + MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4))); + break; + } + + ostart[flSize] = *(const BYTE*)src; + return flSize+1; +} + + +static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; } + +static size_t ZSTD_compressLiterals (ZSTD_CCtx* zc, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + size_t const minGain = ZSTD_minGain(srcSize); + size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB); + BYTE* const ostart = (BYTE*)dst; + U32 singleStream = srcSize < 256; + symbolEncodingType_e hType = set_compressed; + size_t cLitSize; + + + /* small ? don't even attempt compression (speed opt) */ +# define LITERAL_NOENTROPY 63 + { size_t const minLitSize = zc->flagStaticHufTable == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY; + if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); + } + + if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall); /* not enough space for compression */ + { HUF_repeat repeat = zc->flagStaticHufTable; + int const preferRepeat = zc->params.cParams.strategy < ZSTD_lazy ? srcSize <= 1024 : 0; + if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1; + cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, zc->tmpCounters, sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat) + : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, zc->tmpCounters, sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat); + if (repeat != HUF_repeat_none) { hType = set_repeat; } /* reused the existing table */ + else { zc->flagStaticHufTable = HUF_repeat_check; } /* now have a table to reuse */ + } + + if ((cLitSize==0) | (cLitSize >= srcSize - minGain)) { + zc->flagStaticHufTable = HUF_repeat_none; + return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize); + } + if (cLitSize==1) { + zc->flagStaticHufTable = HUF_repeat_none; + return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize); + } + + /* Build header */ + switch(lhSize) + { + case 3: /* 2 - 2 - 10 - 10 */ + { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14); + MEM_writeLE24(ostart, lhc); + break; + } + case 4: /* 2 - 2 - 14 - 14 */ + { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18); + MEM_writeLE32(ostart, lhc); + break; + } + default: /* should not be necessary, lhSize is only {3,4,5} */ + case 5: /* 2 - 2 - 18 - 18 */ + { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22); + MEM_writeLE32(ostart, lhc); + ostart[4] = (BYTE)(cLitSize >> 10); + break; + } + } + return lhSize+cLitSize; +} + +static const BYTE LL_Code[64] = { 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 16, 17, 17, 18, 18, 19, 19, + 20, 20, 20, 20, 21, 21, 21, 21, + 22, 22, 22, 22, 22, 22, 22, 22, + 23, 23, 23, 23, 23, 23, 23, 23, + 24, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24 }; + +static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, + 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, + 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, + 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 }; + + +void ZSTD_seqToCodes(const seqStore_t* seqStorePtr) +{ + BYTE const LL_deltaCode = 19; + BYTE const ML_deltaCode = 36; + const seqDef* const sequences = seqStorePtr->sequencesStart; + BYTE* const llCodeTable = seqStorePtr->llCode; + BYTE* const ofCodeTable = seqStorePtr->ofCode; + BYTE* const mlCodeTable = seqStorePtr->mlCode; + U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); + U32 u; + for (u=0; u 63) ? (BYTE)ZSTD_highbit32(llv) + LL_deltaCode : LL_Code[llv]; + ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset); + mlCodeTable[u] = (mlv>127) ? (BYTE)ZSTD_highbit32(mlv) + ML_deltaCode : ML_Code[mlv]; + } + if (seqStorePtr->longLengthID==1) + llCodeTable[seqStorePtr->longLengthPos] = MaxLL; + if (seqStorePtr->longLengthID==2) + mlCodeTable[seqStorePtr->longLengthPos] = MaxML; +} + +MEM_STATIC size_t ZSTD_compressSequences (ZSTD_CCtx* zc, + void* dst, size_t dstCapacity, + size_t srcSize) +{ + const int longOffsets = zc->params.cParams.windowLog > STREAM_ACCUMULATOR_MIN; + const seqStore_t* seqStorePtr = &(zc->seqStore); + U32 count[MaxSeq+1]; + S16 norm[MaxSeq+1]; + FSE_CTable* CTable_LitLength = zc->litlengthCTable; + FSE_CTable* CTable_OffsetBits = zc->offcodeCTable; + FSE_CTable* CTable_MatchLength = zc->matchlengthCTable; + U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */ + const seqDef* const sequences = seqStorePtr->sequencesStart; + const BYTE* const ofCodeTable = seqStorePtr->ofCode; + const BYTE* const llCodeTable = seqStorePtr->llCode; + const BYTE* const mlCodeTable = seqStorePtr->mlCode; + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = ostart + dstCapacity; + BYTE* op = ostart; + size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart; + BYTE* seqHead; + BYTE scratchBuffer[1<litStart; + size_t const litSize = seqStorePtr->lit - literals; + size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize); + if (ZSTD_isError(cSize)) return cSize; + op += cSize; + } + + /* Sequences Header */ + if ((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead */) return ERROR(dstSize_tooSmall); + if (nbSeq < 0x7F) *op++ = (BYTE)nbSeq; + else if (nbSeq < LONGNBSEQ) op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2; + else op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3; + if (nbSeq==0) goto _check_compressibility; + + /* seqHead : flags for FSE encoding type */ + seqHead = op++; + +#define MIN_SEQ_FOR_DYNAMIC_FSE 64 +#define MAX_SEQ_FOR_STATIC_FSE 1000 + + /* convert length/distances into codes */ + ZSTD_seqToCodes(seqStorePtr); + + /* CTable for Literal Lengths */ + { U32 max = MaxLL; + size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, zc->tmpCounters); + if ((mostFrequent == nbSeq) && (nbSeq > 2)) { + *op++ = llCodeTable[0]; + FSE_buildCTable_rle(CTable_LitLength, (BYTE)max); + LLtype = set_rle; + } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) { + LLtype = set_repeat; + } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (LL_defaultNormLog-1)))) { + FSE_buildCTable_wksp(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog, scratchBuffer, sizeof(scratchBuffer)); + LLtype = set_basic; + } else { + size_t nbSeq_1 = nbSeq; + const U32 tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max); + if (count[llCodeTable[nbSeq-1]]>1) { count[llCodeTable[nbSeq-1]]--; nbSeq_1--; } + FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max); + { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */ + if (FSE_isError(NCountSize)) return NCountSize; + op += NCountSize; } + FSE_buildCTable_wksp(CTable_LitLength, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer)); + LLtype = set_compressed; + } } + + /* CTable for Offsets */ + { U32 max = MaxOff; + size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, zc->tmpCounters); + if ((mostFrequent == nbSeq) && (nbSeq > 2)) { + *op++ = ofCodeTable[0]; + FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max); + Offtype = set_rle; + } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) { + Offtype = set_repeat; + } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (OF_defaultNormLog-1)))) { + FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog, scratchBuffer, sizeof(scratchBuffer)); + Offtype = set_basic; + } else { + size_t nbSeq_1 = nbSeq; + const U32 tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max); + if (count[ofCodeTable[nbSeq-1]]>1) { count[ofCodeTable[nbSeq-1]]--; nbSeq_1--; } + FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max); + { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */ + if (FSE_isError(NCountSize)) return NCountSize; + op += NCountSize; } + FSE_buildCTable_wksp(CTable_OffsetBits, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer)); + Offtype = set_compressed; + } } + + /* CTable for MatchLengths */ + { U32 max = MaxML; + size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, zc->tmpCounters); + if ((mostFrequent == nbSeq) && (nbSeq > 2)) { + *op++ = *mlCodeTable; + FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max); + MLtype = set_rle; + } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) { + MLtype = set_repeat; + } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (ML_defaultNormLog-1)))) { + FSE_buildCTable_wksp(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog, scratchBuffer, sizeof(scratchBuffer)); + MLtype = set_basic; + } else { + size_t nbSeq_1 = nbSeq; + const U32 tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max); + if (count[mlCodeTable[nbSeq-1]]>1) { count[mlCodeTable[nbSeq-1]]--; nbSeq_1--; } + FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max); + { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */ + if (FSE_isError(NCountSize)) return NCountSize; + op += NCountSize; } + FSE_buildCTable_wksp(CTable_MatchLength, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer)); + MLtype = set_compressed; + } } + + *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2)); + zc->flagStaticTables = 0; + + /* Encoding Sequences */ + { BIT_CStream_t blockStream; + FSE_CState_t stateMatchLength; + FSE_CState_t stateOffsetBits; + FSE_CState_t stateLitLength; + + CHECK_E(BIT_initCStream(&blockStream, op, oend-op), dstSize_tooSmall); /* not enough space remaining */ + + /* first symbols */ + FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]); + FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]); + FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]); + BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]); + if (MEM_32bits()) BIT_flushBits(&blockStream); + BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]); + if (MEM_32bits()) BIT_flushBits(&blockStream); + if (longOffsets) { + U32 const ofBits = ofCodeTable[nbSeq-1]; + int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); + if (extraBits) { + BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits); + BIT_flushBits(&blockStream); + } + BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits, + ofBits - extraBits); + } else { + BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]); + } + BIT_flushBits(&blockStream); + + { size_t n; + for (n=nbSeq-2 ; n= 64-7-(LLFSELog+MLFSELog+OffFSELog))) + BIT_flushBits(&blockStream); /* (7)*/ + BIT_addBits(&blockStream, sequences[n].litLength, llBits); + if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream); + BIT_addBits(&blockStream, sequences[n].matchLength, mlBits); + if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/ + if (longOffsets) { + int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1); + if (extraBits) { + BIT_addBits(&blockStream, sequences[n].offset, extraBits); + BIT_flushBits(&blockStream); /* (7)*/ + } + BIT_addBits(&blockStream, sequences[n].offset >> extraBits, + ofBits - extraBits); /* 31 */ + } else { + BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */ + } + BIT_flushBits(&blockStream); /* (7)*/ + } } + + FSE_flushCState(&blockStream, &stateMatchLength); + FSE_flushCState(&blockStream, &stateOffsetBits); + FSE_flushCState(&blockStream, &stateLitLength); + + { size_t const streamSize = BIT_closeCStream(&blockStream); + if (streamSize==0) return ERROR(dstSize_tooSmall); /* not enough space */ + op += streamSize; + } } + + /* check compressibility */ +_check_compressibility: + { size_t const minGain = ZSTD_minGain(srcSize); + size_t const maxCSize = srcSize - minGain; + if ((size_t)(op-ostart) >= maxCSize) { + zc->flagStaticHufTable = HUF_repeat_none; + return 0; + } } + + /* confirm repcodes */ + { int i; for (i=0; irep[i] = zc->repToConfirm[i]; } + + return op - ostart; +} + +#if 0 /* for debug */ +# define STORESEQ_DEBUG +U32 g_startDebug = 0; +const BYTE* g_start = NULL; +#endif + +/*! ZSTD_storeSeq() : + Store a sequence (literal length, literals, offset code and match length code) into seqStore_t. + `offsetCode` : distance to match, or 0 == repCode. + `matchCode` : matchLength - MINMATCH +*/ +MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t matchCode) +{ +#ifdef STORESEQ_DEBUG + if (g_startDebug) { + const U32 pos = (U32)((const BYTE*)literals - g_start); + if (g_start==NULL) g_start = (const BYTE*)literals; + if ((pos > 1895000) && (pos < 1895300)) + fprintf(stderr, "Cpos %6u :%5u literals & match %3u bytes at distance %6u \n", + pos, (U32)litLength, (U32)matchCode+MINMATCH, (U32)offsetCode); + } +#endif + /* copy Literals */ + ZSTD_wildcopy(seqStorePtr->lit, literals, litLength); + seqStorePtr->lit += litLength; + + /* literal Length */ + if (litLength>0xFFFF) { seqStorePtr->longLengthID = 1; seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); } + seqStorePtr->sequences[0].litLength = (U16)litLength; + + /* match offset */ + seqStorePtr->sequences[0].offset = offsetCode + 1; + + /* match Length */ + if (matchCode>0xFFFF) { seqStorePtr->longLengthID = 2; seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); } + seqStorePtr->sequences[0].matchLength = (U16)matchCode; + + seqStorePtr->sequences++; +} + + +/*-************************************* +* Match length counter +***************************************/ +static unsigned ZSTD_NbCommonBytes (register size_t val) +{ + if (MEM_isLittleEndian()) { + if (MEM_64bits()) { +# if defined(_MSC_VER) && defined(_WIN64) + unsigned long r = 0; + _BitScanForward64( &r, (U64)val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_ctzll((U64)val) >> 3); +# else + static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 }; + return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58]; +# endif + } else { /* 32 bits */ +# if defined(_MSC_VER) + unsigned long r=0; + _BitScanForward( &r, (U32)val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_ctz((U32)val) >> 3); +# else + static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 }; + return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27]; +# endif + } + } else { /* Big Endian CPU */ + if (MEM_64bits()) { +# if defined(_MSC_VER) && defined(_WIN64) + unsigned long r = 0; + _BitScanReverse64( &r, val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_clzll(val) >> 3); +# else + unsigned r; + const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */ + if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; } + if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; } + r += (!val); + return r; +# endif + } else { /* 32 bits */ +# if defined(_MSC_VER) + unsigned long r = 0; + _BitScanReverse( &r, (unsigned long)val ); + return (unsigned)(r>>3); +# elif defined(__GNUC__) && (__GNUC__ >= 3) + return (__builtin_clz((U32)val) >> 3); +# else + unsigned r; + if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; } + r += (!val); + return r; +# endif + } } +} + + +static size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit) +{ + const BYTE* const pStart = pIn; + const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1); + + while (pIn < pInLoopLimit) { + size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn); + if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; } + pIn += ZSTD_NbCommonBytes(diff); + return (size_t)(pIn - pStart); + } + if (MEM_64bits()) if ((pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; } + if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; } + if ((pIn> (32-h) ; } +MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */ + +static const U32 prime4bytes = 2654435761U; +static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; } +static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); } + +static const U64 prime5bytes = 889523592379ULL; +static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; } +static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); } + +static const U64 prime6bytes = 227718039650203ULL; +static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; } +static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); } + +static const U64 prime7bytes = 58295818150454627ULL; +static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; } +static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); } + +static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL; +static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; } +static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); } + +static size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls) +{ + switch(mls) + { + //case 3: return ZSTD_hash3Ptr(p, hBits); + default: + case 4: return ZSTD_hash4Ptr(p, hBits); + case 5: return ZSTD_hash5Ptr(p, hBits); + case 6: return ZSTD_hash6Ptr(p, hBits); + case 7: return ZSTD_hash7Ptr(p, hBits); + case 8: return ZSTD_hash8Ptr(p, hBits); + } +} + + +/*-************************************* +* Fast Scan +***************************************/ +static void ZSTD_fillHashTable (ZSTD_CCtx* zc, const void* end, const U32 mls) +{ + U32* const hashTable = zc->hashTable; + U32 const hBits = zc->params.cParams.hashLog; + const BYTE* const base = zc->base; + const BYTE* ip = base + zc->nextToUpdate; + const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; + const size_t fastHashFillStep = 3; + + while(ip <= iend) { + hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base); + ip += fastHashFillStep; + } +} + + +FORCE_INLINE +void ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx, + const void* src, size_t srcSize, + const U32 mls) +{ + U32* const hashTable = cctx->hashTable; + U32 const hBits = cctx->params.cParams.hashLog; + seqStore_t* seqStorePtr = &(cctx->seqStore); + const BYTE* const base = cctx->base; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const U32 lowestIndex = cctx->dictLimit; + const BYTE* const lowest = base + lowestIndex; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - HASH_READ_SIZE; + U32 offset_1=cctx->rep[0], offset_2=cctx->rep[1]; + U32 offsetSaved = 0; + + /* init */ + ip += (ip==lowest); + { U32 const maxRep = (U32)(ip-lowest); + if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; + if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; + } + + /* Main Search Loop */ + while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ + size_t mLength; + size_t const h = ZSTD_hashPtr(ip, hBits, mls); + U32 const current = (U32)(ip-base); + U32 const matchIndex = hashTable[h]; + const BYTE* match = base + matchIndex; + hashTable[h] = current; /* update hash table */ + + if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { + mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; + ip++; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH); + } else { + U32 offset; + if ( (matchIndex <= lowestIndex) || (MEM_read32(match) != MEM_read32(ip)) ) { + ip += ((ip-anchor) >> g_searchStrength) + 1; + continue; + } + mLength = ZSTD_count(ip+4, match+4, iend) + 4; + offset = (U32)(ip-match); + while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ + offset_2 = offset_1; + offset_1 = offset; + + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + } + + /* match found */ + ip += mLength; + anchor = ip; + + if (ip <= ilimit) { + /* Fill Table */ + hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2; /* here because current+2 could be > iend-8 */ + hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base); + /* check immediate repcode */ + while ( (ip <= ilimit) + && ( (offset_2>0) + & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { + /* store sequence */ + size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; + { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */ + hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip-base); + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH); + ip += rLength; + anchor = ip; + continue; /* faster when present ... (?) */ + } } } + + /* save reps for next block */ + cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved; + cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved; + + /* Last Literals */ + { size_t const lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } +} + + +static void ZSTD_compressBlock_fast(ZSTD_CCtx* ctx, + const void* src, size_t srcSize) +{ + const U32 mls = ctx->params.cParams.searchLength; + switch(mls) + { + default: /* includes case 3 */ + case 4 : + ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4); return; + case 5 : + ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5); return; + case 6 : + ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6); return; + case 7 : + ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7); return; + } +} + + +static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx, + const void* src, size_t srcSize, + const U32 mls) +{ + U32* hashTable = ctx->hashTable; + const U32 hBits = ctx->params.cParams.hashLog; + seqStore_t* seqStorePtr = &(ctx->seqStore); + const BYTE* const base = ctx->base; + const BYTE* const dictBase = ctx->dictBase; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const U32 lowestIndex = ctx->lowLimit; + const BYTE* const dictStart = dictBase + lowestIndex; + const U32 dictLimit = ctx->dictLimit; + const BYTE* const lowPrefixPtr = base + dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + U32 offset_1=ctx->rep[0], offset_2=ctx->rep[1]; + + /* Search Loop */ + while (ip < ilimit) { /* < instead of <=, because (ip+1) */ + const size_t h = ZSTD_hashPtr(ip, hBits, mls); + const U32 matchIndex = hashTable[h]; + const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base; + const BYTE* match = matchBase + matchIndex; + const U32 current = (U32)(ip-base); + const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */ + const BYTE* repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* repMatch = repBase + repIndex; + size_t mLength; + hashTable[h] = current; /* update hash table */ + + if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) + && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { + const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend; + mLength = ZSTD_count_2segments(ip+1+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repMatchEnd, lowPrefixPtr) + EQUAL_READ32; + ip++; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH); + } else { + if ( (matchIndex < lowestIndex) || + (MEM_read32(match) != MEM_read32(ip)) ) { + ip += ((ip-anchor) >> g_searchStrength) + 1; + continue; + } + { const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend; + const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr; + U32 offset; + mLength = ZSTD_count_2segments(ip+EQUAL_READ32, match+EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32; + while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ + offset = current - matchIndex; + offset_2 = offset_1; + offset_1 = offset; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + } } + + /* found a match : store it */ + ip += mLength; + anchor = ip; + + if (ip <= ilimit) { + /* Fill Table */ + hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2; + hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base); + /* check immediate repcode */ + while (ip <= ilimit) { + U32 const current2 = (U32)(ip-base); + U32 const repIndex2 = current2 - offset_2; + const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2; + if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */ + && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { + const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend; + size_t repLength2 = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch2+EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32; + U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH); + hashTable[ZSTD_hashPtr(ip, hBits, mls)] = current2; + ip += repLength2; + anchor = ip; + continue; + } + break; + } } } + + /* save reps for next block */ + ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2; + + /* Last Literals */ + { size_t const lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } +} + + +static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx* ctx, + const void* src, size_t srcSize) +{ + U32 const mls = ctx->params.cParams.searchLength; + switch(mls) + { + default: /* includes case 3 */ + case 4 : + ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4); return; + case 5 : + ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5); return; + case 6 : + ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6); return; + case 7 : + ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7); return; + } +} + + +/*-************************************* +* Double Fast +***************************************/ +static void ZSTD_fillDoubleHashTable (ZSTD_CCtx* cctx, const void* end, const U32 mls) +{ + U32* const hashLarge = cctx->hashTable; + U32 const hBitsL = cctx->params.cParams.hashLog; + U32* const hashSmall = cctx->chainTable; + U32 const hBitsS = cctx->params.cParams.chainLog; + const BYTE* const base = cctx->base; + const BYTE* ip = base + cctx->nextToUpdate; + const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE; + const size_t fastHashFillStep = 3; + + while(ip <= iend) { + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base); + hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base); + ip += fastHashFillStep; + } +} + + +FORCE_INLINE +void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx, + const void* src, size_t srcSize, + const U32 mls) +{ + U32* const hashLong = cctx->hashTable; + const U32 hBitsL = cctx->params.cParams.hashLog; + U32* const hashSmall = cctx->chainTable; + const U32 hBitsS = cctx->params.cParams.chainLog; + seqStore_t* seqStorePtr = &(cctx->seqStore); + const BYTE* const base = cctx->base; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const U32 lowestIndex = cctx->dictLimit; + const BYTE* const lowest = base + lowestIndex; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - HASH_READ_SIZE; + U32 offset_1=cctx->rep[0], offset_2=cctx->rep[1]; + U32 offsetSaved = 0; + + /* init */ + ip += (ip==lowest); + { U32 const maxRep = (U32)(ip-lowest); + if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0; + if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; + } + + /* Main Search Loop */ + while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */ + size_t mLength; + size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8); + size_t const h = ZSTD_hashPtr(ip, hBitsS, mls); + U32 const current = (U32)(ip-base); + U32 const matchIndexL = hashLong[h2]; + U32 const matchIndexS = hashSmall[h]; + const BYTE* matchLong = base + matchIndexL; + const BYTE* match = base + matchIndexS; + hashLong[h2] = hashSmall[h] = current; /* update hash tables */ + + if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { /* note : by construction, offset_1 <= current */ + mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; + ip++; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH); + } else { + U32 offset; + if ( (matchIndexL > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip)) ) { + mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8; + offset = (U32)(ip-matchLong); + while (((ip>anchor) & (matchLong>lowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ + } else if ( (matchIndexS > lowestIndex) && (MEM_read32(match) == MEM_read32(ip)) ) { + size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8); + U32 const matchIndex3 = hashLong[h3]; + const BYTE* match3 = base + matchIndex3; + hashLong[h3] = current + 1; + if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) { + mLength = ZSTD_count(ip+9, match3+8, iend) + 8; + ip++; + offset = (U32)(ip-match3); + while (((ip>anchor) & (match3>lowest)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */ + } else { + mLength = ZSTD_count(ip+4, match+4, iend) + 4; + offset = (U32)(ip-match); + while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ + } + } else { + ip += ((ip-anchor) >> g_searchStrength) + 1; + continue; + } + + offset_2 = offset_1; + offset_1 = offset; + + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + } + + /* match found */ + ip += mLength; + anchor = ip; + + if (ip <= ilimit) { + /* Fill Table */ + hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = + hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; /* here because current+2 could be > iend-8 */ + hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = + hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base); + + /* check immediate repcode */ + while ( (ip <= ilimit) + && ( (offset_2>0) + & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { + /* store sequence */ + size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4; + { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */ + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base); + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base); + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH); + ip += rLength; + anchor = ip; + continue; /* faster when present ... (?) */ + } } } + + /* save reps for next block */ + cctx->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved; + cctx->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved; + + /* Last Literals */ + { size_t const lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } +} + + +static void ZSTD_compressBlock_doubleFast(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + const U32 mls = ctx->params.cParams.searchLength; + switch(mls) + { + default: /* includes case 3 */ + case 4 : + ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4); return; + case 5 : + ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5); return; + case 6 : + ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6); return; + case 7 : + ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7); return; + } +} + + +static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx, + const void* src, size_t srcSize, + const U32 mls) +{ + U32* const hashLong = ctx->hashTable; + U32 const hBitsL = ctx->params.cParams.hashLog; + U32* const hashSmall = ctx->chainTable; + U32 const hBitsS = ctx->params.cParams.chainLog; + seqStore_t* seqStorePtr = &(ctx->seqStore); + const BYTE* const base = ctx->base; + const BYTE* const dictBase = ctx->dictBase; + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const U32 lowestIndex = ctx->lowLimit; + const BYTE* const dictStart = dictBase + lowestIndex; + const U32 dictLimit = ctx->dictLimit; + const BYTE* const lowPrefixPtr = base + dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + U32 offset_1=ctx->rep[0], offset_2=ctx->rep[1]; + + /* Search Loop */ + while (ip < ilimit) { /* < instead of <=, because (ip+1) */ + const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls); + const U32 matchIndex = hashSmall[hSmall]; + const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base; + const BYTE* match = matchBase + matchIndex; + + const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8); + const U32 matchLongIndex = hashLong[hLong]; + const BYTE* matchLongBase = matchLongIndex < dictLimit ? dictBase : base; + const BYTE* matchLong = matchLongBase + matchLongIndex; + + const U32 current = (U32)(ip-base); + const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */ + const BYTE* repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* repMatch = repBase + repIndex; + size_t mLength; + hashSmall[hSmall] = hashLong[hLong] = current; /* update hash table */ + + if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex)) + && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) { + const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend; + mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4; + ip++; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH); + } else { + if ((matchLongIndex > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) { + const BYTE* matchEnd = matchLongIndex < dictLimit ? dictEnd : iend; + const BYTE* lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr; + U32 offset; + mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, lowPrefixPtr) + 8; + offset = current - matchLongIndex; + while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */ + offset_2 = offset_1; + offset_1 = offset; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + + } else if ((matchIndex > lowestIndex) && (MEM_read32(match) == MEM_read32(ip))) { + size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8); + U32 const matchIndex3 = hashLong[h3]; + const BYTE* const match3Base = matchIndex3 < dictLimit ? dictBase : base; + const BYTE* match3 = match3Base + matchIndex3; + U32 offset; + hashLong[h3] = current + 1; + if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) { + const BYTE* matchEnd = matchIndex3 < dictLimit ? dictEnd : iend; + const BYTE* lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr; + mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, lowPrefixPtr) + 8; + ip++; + offset = current+1 - matchIndex3; + while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */ + } else { + const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend; + const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr; + mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4; + offset = current - matchIndex; + while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */ + } + offset_2 = offset_1; + offset_1 = offset; + ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH); + + } else { + ip += ((ip-anchor) >> g_searchStrength) + 1; + continue; + } } + + /* found a match : store it */ + ip += mLength; + anchor = ip; + + if (ip <= ilimit) { + /* Fill Table */ + hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; + hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = current+2; + hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base); + hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base); + /* check immediate repcode */ + while (ip <= ilimit) { + U32 const current2 = (U32)(ip-base); + U32 const repIndex2 = current2 - offset_2; + const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2; + if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */ + && (MEM_read32(repMatch2) == MEM_read32(ip)) ) { + const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend; + size_t const repLength2 = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch2+EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32; + U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */ + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH); + hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2; + hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2; + ip += repLength2; + anchor = ip; + continue; + } + break; + } } } + + /* save reps for next block */ + ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2; + + /* Last Literals */ + { size_t const lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } +} + + +static void ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx* ctx, + const void* src, size_t srcSize) +{ + U32 const mls = ctx->params.cParams.searchLength; + switch(mls) + { + default: /* includes case 3 */ + case 4 : + ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4); return; + case 5 : + ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5); return; + case 6 : + ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6); return; + case 7 : + ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7); return; + } +} + + +/*-************************************* +* Binary Tree search +***************************************/ +/** ZSTD_insertBt1() : add one or multiple positions to tree. +* ip : assumed <= iend-8 . +* @return : nb of positions added */ +static U32 ZSTD_insertBt1(ZSTD_CCtx* zc, const BYTE* const ip, const U32 mls, const BYTE* const iend, U32 nbCompares, + U32 extDict) +{ + U32* const hashTable = zc->hashTable; + U32 const hashLog = zc->params.cParams.hashLog; + size_t const h = ZSTD_hashPtr(ip, hashLog, mls); + U32* const bt = zc->chainTable; + U32 const btLog = zc->params.cParams.chainLog - 1; + U32 const btMask = (1 << btLog) - 1; + U32 matchIndex = hashTable[h]; + size_t commonLengthSmaller=0, commonLengthLarger=0; + const BYTE* const base = zc->base; + const BYTE* const dictBase = zc->dictBase; + const U32 dictLimit = zc->dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const BYTE* match; + const U32 current = (U32)(ip-base); + const U32 btLow = btMask >= current ? 0 : current - btMask; + U32* smallerPtr = bt + 2*(current&btMask); + U32* largerPtr = smallerPtr + 1; + U32 dummy32; /* to be nullified at the end */ + U32 const windowLow = zc->lowLimit; + U32 matchEndIdx = current+8; + size_t bestLength = 8; +#ifdef ZSTD_C_PREDICT + U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0); + U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1); + predictedSmall += (predictedSmall>0); + predictedLarge += (predictedLarge>0); +#endif /* ZSTD_C_PREDICT */ + + hashTable[h] = current; /* Update Hash Table */ + + while (nbCompares-- && (matchIndex > windowLow)) { + U32* const nextPtr = bt + 2*(matchIndex & btMask); + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ + +#ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */ + const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */ + if (matchIndex == predictedSmall) { + /* no need to check length, result known */ + *smallerPtr = matchIndex; + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ + matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ + predictedSmall = predictPtr[1] + (predictPtr[1]>0); + continue; + } + if (matchIndex == predictedLarge) { + *largerPtr = matchIndex; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + predictedLarge = predictPtr[0] + (predictPtr[0]>0); + continue; + } +#endif + if ((!extDict) || (matchIndex+matchLength >= dictLimit)) { + match = base + matchIndex; + if (match[matchLength] == ip[matchLength]) + matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iend) +1; + } else { + match = dictBase + matchIndex; + matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); + if (matchIndex+matchLength >= dictLimit) + match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ + } + + if (matchLength > bestLength) { + bestLength = matchLength; + if (matchLength > matchEndIdx - matchIndex) + matchEndIdx = matchIndex + (U32)matchLength; + } + + if (ip+matchLength == iend) /* equal : no way to know if inf or sup */ + break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */ + + if (match[matchLength] < ip[matchLength]) { /* necessarily within correct buffer */ + /* match is smaller than current */ + *smallerPtr = matchIndex; /* update smaller idx */ + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ + matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ + } else { + /* match is larger than current */ + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + } } + + *smallerPtr = *largerPtr = 0; + if (bestLength > 384) return MIN(192, (U32)(bestLength - 384)); /* speed optimization */ + if (matchEndIdx > current + 8) return matchEndIdx - current - 8; + return 1; +} + + +static size_t ZSTD_insertBtAndFindBestMatch ( + ZSTD_CCtx* zc, + const BYTE* const ip, const BYTE* const iend, + size_t* offsetPtr, + U32 nbCompares, const U32 mls, + U32 extDict) +{ + U32* const hashTable = zc->hashTable; + U32 const hashLog = zc->params.cParams.hashLog; + size_t const h = ZSTD_hashPtr(ip, hashLog, mls); + U32* const bt = zc->chainTable; + U32 const btLog = zc->params.cParams.chainLog - 1; + U32 const btMask = (1 << btLog) - 1; + U32 matchIndex = hashTable[h]; + size_t commonLengthSmaller=0, commonLengthLarger=0; + const BYTE* const base = zc->base; + const BYTE* const dictBase = zc->dictBase; + const U32 dictLimit = zc->dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const U32 current = (U32)(ip-base); + const U32 btLow = btMask >= current ? 0 : current - btMask; + const U32 windowLow = zc->lowLimit; + U32* smallerPtr = bt + 2*(current&btMask); + U32* largerPtr = bt + 2*(current&btMask) + 1; + U32 matchEndIdx = current+8; + U32 dummy32; /* to be nullified at the end */ + size_t bestLength = 0; + + hashTable[h] = current; /* Update Hash Table */ + + while (nbCompares-- && (matchIndex > windowLow)) { + U32* const nextPtr = bt + 2*(matchIndex & btMask); + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ + const BYTE* match; + + if ((!extDict) || (matchIndex+matchLength >= dictLimit)) { + match = base + matchIndex; + if (match[matchLength] == ip[matchLength]) + matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iend) +1; + } else { + match = dictBase + matchIndex; + matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart); + if (matchIndex+matchLength >= dictLimit) + match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ + } + + if (matchLength > bestLength) { + if (matchLength > matchEndIdx - matchIndex) + matchEndIdx = matchIndex + (U32)matchLength; + if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) + bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex; + if (ip+matchLength == iend) /* equal : no way to know if inf or sup */ + break; /* drop, to guarantee consistency (miss a little bit of compression) */ + } + + if (match[matchLength] < ip[matchLength]) { + /* match is smaller than current */ + *smallerPtr = matchIndex; /* update smaller idx */ + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ + matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ + } else { + /* match is larger than current */ + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + } } + + *smallerPtr = *largerPtr = 0; + + zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1; + return bestLength; +} + + +static void ZSTD_updateTree(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls) +{ + const BYTE* const base = zc->base; + const U32 target = (U32)(ip - base); + U32 idx = zc->nextToUpdate; + + while(idx < target) + idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares, 0); +} + +/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */ +static size_t ZSTD_BtFindBestMatch ( + ZSTD_CCtx* zc, + const BYTE* const ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 maxNbAttempts, const U32 mls) +{ + if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */ + ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls); + return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0); +} + + +static size_t ZSTD_BtFindBestMatch_selectMLS ( + ZSTD_CCtx* zc, /* Index table will be updated */ + const BYTE* ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 maxNbAttempts, const U32 matchLengthSearch) +{ + switch(matchLengthSearch) + { + default : /* includes case 3 */ + case 4 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4); + case 5 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5); + case 7 : + case 6 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6); + } +} + + +static void ZSTD_updateTree_extDict(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls) +{ + const BYTE* const base = zc->base; + const U32 target = (U32)(ip - base); + U32 idx = zc->nextToUpdate; + + while (idx < target) idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares, 1); +} + + +/** Tree updater, providing best match */ +static size_t ZSTD_BtFindBestMatch_extDict ( + ZSTD_CCtx* zc, + const BYTE* const ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 maxNbAttempts, const U32 mls) +{ + if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */ + ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls); + return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1); +} + + +static size_t ZSTD_BtFindBestMatch_selectMLS_extDict ( + ZSTD_CCtx* zc, /* Index table will be updated */ + const BYTE* ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 maxNbAttempts, const U32 matchLengthSearch) +{ + switch(matchLengthSearch) + { + default : /* includes case 3 */ + case 4 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4); + case 5 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5); + case 7 : + case 6 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6); + } +} + + + +/* ********************************* +* Hash Chain +***********************************/ +#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & mask] + +/* Update chains up to ip (excluded) + Assumption : always within prefix (i.e. not within extDict) */ +FORCE_INLINE +U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls) +{ + U32* const hashTable = zc->hashTable; + const U32 hashLog = zc->params.cParams.hashLog; + U32* const chainTable = zc->chainTable; + const U32 chainMask = (1 << zc->params.cParams.chainLog) - 1; + const BYTE* const base = zc->base; + const U32 target = (U32)(ip - base); + U32 idx = zc->nextToUpdate; + + while(idx < target) { /* catch up */ + size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls); + NEXT_IN_CHAIN(idx, chainMask) = hashTable[h]; + hashTable[h] = idx; + idx++; + } + + zc->nextToUpdate = target; + return hashTable[ZSTD_hashPtr(ip, hashLog, mls)]; +} + + + +FORCE_INLINE /* inlining is important to hardwire a hot branch (template emulation) */ +size_t ZSTD_HcFindBestMatch_generic ( + ZSTD_CCtx* zc, /* Index table will be updated */ + const BYTE* const ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 maxNbAttempts, const U32 mls, const U32 extDict) +{ + U32* const chainTable = zc->chainTable; + const U32 chainSize = (1 << zc->params.cParams.chainLog); + const U32 chainMask = chainSize-1; + const BYTE* const base = zc->base; + const BYTE* const dictBase = zc->dictBase; + const U32 dictLimit = zc->dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const U32 lowLimit = zc->lowLimit; + const U32 current = (U32)(ip-base); + const U32 minChain = current > chainSize ? current - chainSize : 0; + int nbAttempts=maxNbAttempts; + size_t ml=EQUAL_READ32-1; + + /* HC4 match finder */ + U32 matchIndex = ZSTD_insertAndFindFirstIndex (zc, ip, mls); + + for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) { + const BYTE* match; + size_t currentMl=0; + if ((!extDict) || matchIndex >= dictLimit) { + match = base + matchIndex; + if (match[ml] == ip[ml]) /* potentially better */ + currentMl = ZSTD_count(ip, match, iLimit); + } else { + match = dictBase + matchIndex; + if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */ + currentMl = ZSTD_count_2segments(ip+EQUAL_READ32, match+EQUAL_READ32, iLimit, dictEnd, prefixStart) + EQUAL_READ32; + } + + /* save best solution */ + if (currentMl > ml) { ml = currentMl; *offsetPtr = current - matchIndex + ZSTD_REP_MOVE; if (ip+currentMl == iLimit) break; /* best possible, and avoid read overflow*/ } + + if (matchIndex <= minChain) break; + matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask); + } + + return ml; +} + + +FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS ( + ZSTD_CCtx* zc, + const BYTE* ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 maxNbAttempts, const U32 matchLengthSearch) +{ + switch(matchLengthSearch) + { + default : /* includes case 3 */ + case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0); + case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0); + case 7 : + case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0); + } +} + + +FORCE_INLINE size_t ZSTD_HcFindBestMatch_extDict_selectMLS ( + ZSTD_CCtx* zc, + const BYTE* ip, const BYTE* const iLimit, + size_t* offsetPtr, + const U32 maxNbAttempts, const U32 matchLengthSearch) +{ + switch(matchLengthSearch) + { + default : /* includes case 3 */ + case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1); + case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1); + case 7 : + case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1); + } +} + + +/* ******************************* +* Common parser - lazy strategy +*********************************/ +FORCE_INLINE +void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx, + const void* src, size_t srcSize, + const U32 searchMethod, const U32 depth) +{ + seqStore_t* seqStorePtr = &(ctx->seqStore); + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + const BYTE* const base = ctx->base + ctx->dictLimit; + + U32 const maxSearches = 1 << ctx->params.cParams.searchLog; + U32 const mls = ctx->params.cParams.searchLength; + + typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit, + size_t* offsetPtr, + U32 maxNbAttempts, U32 matchLengthSearch); + searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS; + U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1], savedOffset=0; + + /* init */ + ip += (ip==base); + ctx->nextToUpdate3 = ctx->nextToUpdate; + { U32 const maxRep = (U32)(ip-base); + if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0; + if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0; + } + + /* Match Loop */ + while (ip < ilimit) { + size_t matchLength=0; + size_t offset=0; + const BYTE* start=ip+1; + + /* check repCode */ + if ((offset_1>0) & (MEM_read32(ip+1) == MEM_read32(ip+1 - offset_1))) { + /* repcode : we take it */ + matchLength = ZSTD_count(ip+1+EQUAL_READ32, ip+1+EQUAL_READ32-offset_1, iend) + EQUAL_READ32; + if (depth==0) goto _storeSequence; + } + + /* first search (depth 0) */ + { size_t offsetFound = 99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls); + if (ml2 > matchLength) + matchLength = ml2, start = ip, offset=offsetFound; + } + + if (matchLength < EQUAL_READ32) { + ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */ + continue; + } + + /* let's try to find a better solution */ + if (depth>=1) + while (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { + size_t const mlRep = ZSTD_count(ip+EQUAL_READ32, ip+EQUAL_READ32-offset_1, iend) + EQUAL_READ32; + int const gain2 = (int)(mlRep * 3); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); + if ((mlRep >= EQUAL_READ32) && (gain2 > gain1)) + matchLength = mlRep, offset = 0, start = ip; + } + { size_t offset2=99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); + if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) { + matchLength = ml2, offset = offset2, start = ip; + continue; /* search a better one */ + } } + + /* let's find an even better one */ + if ((depth==2) && (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { + size_t const ml2 = ZSTD_count(ip+EQUAL_READ32, ip+EQUAL_READ32-offset_1, iend) + EQUAL_READ32; + int const gain2 = (int)(ml2 * 4); + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); + if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) + matchLength = ml2, offset = 0, start = ip; + } + { size_t offset2=99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); + if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) { + matchLength = ml2, offset = offset2, start = ip; + continue; + } } } + break; /* nothing found : store previous solution */ + } + + /* catch up */ + if (offset) { + while ((start>anchor) && (start>base+offset-ZSTD_REP_MOVE) && (start[-1] == start[-1-offset+ZSTD_REP_MOVE])) /* only search for offset within prefix */ + { start--; matchLength++; } + offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE); + } + + /* store sequence */ +_storeSequence: + { size_t const litLength = start - anchor; + ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH); + anchor = ip = start + matchLength; + } + + /* check immediate repcode */ + while ( (ip <= ilimit) + && ((offset_2>0) + & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) { + /* store sequence */ + matchLength = ZSTD_count(ip+EQUAL_READ32, ip+EQUAL_READ32-offset_2, iend) + EQUAL_READ32; + offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */ + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH); + ip += matchLength; + anchor = ip; + continue; /* faster when present ... (?) */ + } } + + /* Save reps for next block */ + ctx->repToConfirm[0] = offset_1 ? offset_1 : savedOffset; + ctx->repToConfirm[1] = offset_2 ? offset_2 : savedOffset; + + /* Last Literals */ + { size_t const lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } +} + + +static void ZSTD_compressBlock_btlazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2); +} + +static void ZSTD_compressBlock_lazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2); +} + +static void ZSTD_compressBlock_lazy(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1); +} + +static void ZSTD_compressBlock_greedy(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0); +} + + +FORCE_INLINE +void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx, + const void* src, size_t srcSize, + const U32 searchMethod, const U32 depth) +{ + seqStore_t* seqStorePtr = &(ctx->seqStore); + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + const BYTE* const base = ctx->base; + const U32 dictLimit = ctx->dictLimit; + const U32 lowestIndex = ctx->lowLimit; + const BYTE* const prefixStart = base + dictLimit; + const BYTE* const dictBase = ctx->dictBase; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const dictStart = dictBase + ctx->lowLimit; + + const U32 maxSearches = 1 << ctx->params.cParams.searchLog; + const U32 mls = ctx->params.cParams.searchLength; + + typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit, + size_t* offsetPtr, + U32 maxNbAttempts, U32 matchLengthSearch); + searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS; + + U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1]; + + /* init */ + ctx->nextToUpdate3 = ctx->nextToUpdate; + ip += (ip == prefixStart); + + /* Match Loop */ + while (ip < ilimit) { + size_t matchLength=0; + size_t offset=0; + const BYTE* start=ip+1; + U32 current = (U32)(ip-base); + + /* check repCode */ + { const U32 repIndex = (U32)(current+1 - offset_1); + const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* const repMatch = repBase + repIndex; + if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ + if (MEM_read32(ip+1) == MEM_read32(repMatch)) { + /* repcode detected we should take it */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + matchLength = ZSTD_count_2segments(ip+1+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32; + if (depth==0) goto _storeSequence; + } } + + /* first search (depth 0) */ + { size_t offsetFound = 99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls); + if (ml2 > matchLength) + matchLength = ml2, start = ip, offset=offsetFound; + } + + if (matchLength < EQUAL_READ32) { + ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */ + continue; + } + + /* let's try to find a better solution */ + if (depth>=1) + while (ip= 3) & (repIndex > lowestIndex)) /* intentional overflow */ + if (MEM_read32(ip) == MEM_read32(repMatch)) { + /* repcode detected */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + size_t const repLength = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32; + int const gain2 = (int)(repLength * 3); + int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1); + if ((repLength >= EQUAL_READ32) && (gain2 > gain1)) + matchLength = repLength, offset = 0, start = ip; + } } + + /* search match, depth 1 */ + { size_t offset2=99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4); + if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) { + matchLength = ml2, offset = offset2, start = ip; + continue; /* search a better one */ + } } + + /* let's find an even better one */ + if ((depth==2) && (ip= 3) & (repIndex > lowestIndex)) /* intentional overflow */ + if (MEM_read32(ip) == MEM_read32(repMatch)) { + /* repcode detected */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + size_t repLength = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32; + int gain2 = (int)(repLength * 4); + int gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1); + if ((repLength >= EQUAL_READ32) && (gain2 > gain1)) + matchLength = repLength, offset = 0, start = ip; + } } + + /* search match, depth 2 */ + { size_t offset2=99999999; + size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls); + int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */ + int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7); + if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) { + matchLength = ml2, offset = offset2, start = ip; + continue; + } } } + break; /* nothing found : store previous solution */ + } + + /* catch up */ + if (offset) { + U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE)); + const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex; + const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart; + while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */ + offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE); + } + + /* store sequence */ +_storeSequence: + { size_t const litLength = start - anchor; + ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH); + anchor = ip = start + matchLength; + } + + /* check immediate repcode */ + while (ip <= ilimit) { + const U32 repIndex = (U32)((ip-base) - offset_2); + const BYTE* const repBase = repIndex < dictLimit ? dictBase : base; + const BYTE* const repMatch = repBase + repIndex; + if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */ + if (MEM_read32(ip) == MEM_read32(repMatch)) { + /* repcode detected we should take it */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + matchLength = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32; + offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */ + ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH); + ip += matchLength; + anchor = ip; + continue; /* faster when present ... (?) */ + } + break; + } } + + /* Save reps for next block */ + ctx->repToConfirm[0] = offset_1; ctx->repToConfirm[1] = offset_2; + + /* Last Literals */ + { size_t const lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } +} + + +void ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0); +} + +static void ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1); +} + +static void ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2); +} + +static void ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ + ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2); +} + + +/* The optimal parser */ +#include "zstd_opt.h" + +static void ZSTD_compressBlock_btopt(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ +#ifdef ZSTD_OPT_H_91842398743 + ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0); +#else + (void)ctx; (void)src; (void)srcSize; + return; +#endif +} + +static void ZSTD_compressBlock_btopt2(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ +#ifdef ZSTD_OPT_H_91842398743 + ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 1); +#else + (void)ctx; (void)src; (void)srcSize; + return; +#endif +} + +static void ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ +#ifdef ZSTD_OPT_H_91842398743 + ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 0); +#else + (void)ctx; (void)src; (void)srcSize; + return; +#endif +} + +static void ZSTD_compressBlock_btopt2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize) +{ +#ifdef ZSTD_OPT_H_91842398743 + ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 1); +#else + (void)ctx; (void)src; (void)srcSize; + return; +#endif +} + + +typedef void (*ZSTD_blockCompressor) (ZSTD_CCtx* ctx, const void* src, size_t srcSize); + +static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict) +{ + static const ZSTD_blockCompressor blockCompressor[2][8] = { + { ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt, ZSTD_compressBlock_btopt2 }, + { ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict,ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btopt2_extDict } + }; + + return blockCompressor[extDict][(U32)strat]; +} + + +static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->params.cParams.strategy, zc->lowLimit < zc->dictLimit); + const BYTE* const base = zc->base; + const BYTE* const istart = (const BYTE*)src; + const U32 current = (U32)(istart-base); + if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) return 0; /* don't even attempt compression below a certain srcSize */ + ZSTD_resetSeqStore(&(zc->seqStore)); + if (current > zc->nextToUpdate + 384) + zc->nextToUpdate = current - MIN(192, (U32)(current - zc->nextToUpdate - 384)); /* update tree not updated after finding very long rep matches */ + blockCompressor(zc, src, srcSize); + return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize); +} + + +/*! ZSTD_compress_generic() : +* Compress a chunk of data into one or multiple blocks. +* All blocks will be terminated, all input will be consumed. +* Function will issue an error if there is not enough `dstCapacity` to hold the compressed content. +* Frame is supposed already started (header already produced) +* @return : compressed size, or an error code +*/ +static size_t ZSTD_compress_generic (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + U32 lastFrameChunk) +{ + size_t blockSize = cctx->blockSize; + size_t remaining = srcSize; + const BYTE* ip = (const BYTE*)src; + BYTE* const ostart = (BYTE*)dst; + BYTE* op = ostart; + U32 const maxDist = 1 << cctx->params.cParams.windowLog; + + if (cctx->params.fParams.checksumFlag && srcSize) + XXH64_update(&cctx->xxhState, src, srcSize); + + while (remaining) { + U32 const lastBlock = lastFrameChunk & (blockSize >= remaining); + size_t cSize; + + if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE) return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */ + if (remaining < blockSize) blockSize = remaining; + + /* preemptive overflow correction */ + if (cctx->lowLimit > (3U<<29)) { + U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->params.cParams.hashLog, cctx->params.cParams.strategy)) - 1; + U32 const current = (U32)(ip - cctx->base); + U32 const newCurrent = (current & cycleMask) + (1 << cctx->params.cParams.windowLog); + U32 const correction = current - newCurrent; + ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64 <= 30); + ZSTD_reduceIndex(cctx, correction); + cctx->base += correction; + cctx->dictBase += correction; + cctx->lowLimit -= correction; + cctx->dictLimit -= correction; + if (cctx->nextToUpdate < correction) cctx->nextToUpdate = 0; + else cctx->nextToUpdate -= correction; + } + + if ((U32)(ip+blockSize - cctx->base) > cctx->loadedDictEnd + maxDist) { + /* enforce maxDist */ + U32 const newLowLimit = (U32)(ip+blockSize - cctx->base) - maxDist; + if (cctx->lowLimit < newLowLimit) cctx->lowLimit = newLowLimit; + if (cctx->dictLimit < cctx->lowLimit) cctx->dictLimit = cctx->lowLimit; + } + + cSize = ZSTD_compressBlock_internal(cctx, op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize, ip, blockSize); + if (ZSTD_isError(cSize)) return cSize; + + if (cSize == 0) { /* block is not compressible */ + U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(blockSize << 3); + if (blockSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall); + MEM_writeLE32(op, cBlockHeader24); /* no pb, 4th byte will be overwritten */ + memcpy(op + ZSTD_blockHeaderSize, ip, blockSize); + cSize = ZSTD_blockHeaderSize+blockSize; + } else { + U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3); + MEM_writeLE24(op, cBlockHeader24); + cSize += ZSTD_blockHeaderSize; + } + + remaining -= blockSize; + dstCapacity -= cSize; + ip += blockSize; + op += cSize; + } + + if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending; + return op-ostart; +} + + +static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity, + ZSTD_parameters params, U64 pledgedSrcSize, U32 dictID) +{ BYTE* const op = (BYTE*)dst; + U32 const dictIDSizeCode = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */ + U32 const checksumFlag = params.fParams.checksumFlag>0; + U32 const windowSize = 1U << params.cParams.windowLog; + U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize); + BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3); + U32 const fcsCode = params.fParams.contentSizeFlag ? + (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : /* 0-3 */ + 0; + BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) ); + size_t pos; + + if (dstCapacity < ZSTD_frameHeaderSize_max) return ERROR(dstSize_tooSmall); + + MEM_writeLE32(dst, ZSTD_MAGICNUMBER); + op[4] = frameHeaderDecriptionByte; pos=5; + if (!singleSegment) op[pos++] = windowLogByte; + switch(dictIDSizeCode) + { + default: /* impossible */ + case 0 : break; + case 1 : op[pos] = (BYTE)(dictID); pos++; break; + case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break; + case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break; + } + switch(fcsCode) + { + default: /* impossible */ + case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break; + case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break; + case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break; + case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break; + } + return pos; +} + + +static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + U32 frame, U32 lastFrameChunk) +{ + const BYTE* const ip = (const BYTE*) src; + size_t fhSize = 0; + + if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */ + + if (frame && (cctx->stage==ZSTDcs_init)) { + fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, cctx->frameContentSize, cctx->dictID); + if (ZSTD_isError(fhSize)) return fhSize; + dstCapacity -= fhSize; + dst = (char*)dst + fhSize; + cctx->stage = ZSTDcs_ongoing; + } + + /* Check if blocks follow each other */ + if (src != cctx->nextSrc) { + /* not contiguous */ + ptrdiff_t const delta = cctx->nextSrc - ip; + cctx->lowLimit = cctx->dictLimit; + cctx->dictLimit = (U32)(cctx->nextSrc - cctx->base); + cctx->dictBase = cctx->base; + cctx->base -= delta; + cctx->nextToUpdate = cctx->dictLimit; + if (cctx->dictLimit - cctx->lowLimit < HASH_READ_SIZE) cctx->lowLimit = cctx->dictLimit; /* too small extDict */ + } + + /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */ + if ((ip+srcSize > cctx->dictBase + cctx->lowLimit) & (ip < cctx->dictBase + cctx->dictLimit)) { + ptrdiff_t const highInputIdx = (ip + srcSize) - cctx->dictBase; + U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)cctx->dictLimit) ? cctx->dictLimit : (U32)highInputIdx; + cctx->lowLimit = lowLimitMax; + } + + cctx->nextSrc = ip + srcSize; + + if (srcSize) { + size_t const cSize = frame ? + ZSTD_compress_generic (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) : + ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize); + if (ZSTD_isError(cSize)) return cSize; + return cSize + fhSize; + } else + return fhSize; +} + + +size_t ZSTD_compressContinue (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 0); +} + + +size_t ZSTD_getBlockSizeMax(ZSTD_CCtx* cctx) +{ + return MIN (ZSTD_BLOCKSIZE_ABSOLUTEMAX, 1 << cctx->params.cParams.windowLog); +} + +size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + size_t const blockSizeMax = ZSTD_getBlockSizeMax(cctx); + if (srcSize > blockSizeMax) return ERROR(srcSize_wrong); + return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0, 0); +} + +/*! ZSTD_loadDictionaryContent() : + * @return : 0, or an error code + */ +static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx* zc, const void* src, size_t srcSize) +{ + const BYTE* const ip = (const BYTE*) src; + const BYTE* const iend = ip + srcSize; + + /* input becomes current prefix */ + zc->lowLimit = zc->dictLimit; + zc->dictLimit = (U32)(zc->nextSrc - zc->base); + zc->dictBase = zc->base; + zc->base += ip - zc->nextSrc; + zc->nextToUpdate = zc->dictLimit; + zc->loadedDictEnd = zc->forceWindow ? 0 : (U32)(iend - zc->base); + + zc->nextSrc = iend; + if (srcSize <= HASH_READ_SIZE) return 0; + + switch(zc->params.cParams.strategy) + { + case ZSTD_fast: + ZSTD_fillHashTable (zc, iend, zc->params.cParams.searchLength); + break; + + case ZSTD_dfast: + ZSTD_fillDoubleHashTable (zc, iend, zc->params.cParams.searchLength); + break; + + case ZSTD_greedy: + case ZSTD_lazy: + case ZSTD_lazy2: + if (srcSize >= HASH_READ_SIZE) + ZSTD_insertAndFindFirstIndex(zc, iend-HASH_READ_SIZE, zc->params.cParams.searchLength); + break; + + case ZSTD_btlazy2: + case ZSTD_btopt: + case ZSTD_btopt2: + if (srcSize >= HASH_READ_SIZE) + ZSTD_updateTree(zc, iend-HASH_READ_SIZE, iend, 1 << zc->params.cParams.searchLog, zc->params.cParams.searchLength); + break; + + default: + return ERROR(GENERIC); /* strategy doesn't exist; impossible */ + } + + zc->nextToUpdate = (U32)(iend - zc->base); + return 0; +} + + +/* Dictionaries that assign zero probability to symbols that show up causes problems + when FSE encoding. Refuse dictionaries that assign zero probability to symbols + that we may encounter during compression. + NOTE: This behavior is not standard and could be improved in the future. */ +static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) { + U32 s; + if (dictMaxSymbolValue < maxSymbolValue) return ERROR(dictionary_corrupted); + for (s = 0; s <= maxSymbolValue; ++s) { + if (normalizedCounter[s] == 0) return ERROR(dictionary_corrupted); + } + return 0; +} + + +/* Dictionary format : + * See : + * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format + */ +/*! ZSTD_loadZstdDictionary() : + * @return : 0, or an error code + * assumptions : magic number supposed already checked + * dictSize supposed > 8 + */ +static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize) +{ + const BYTE* dictPtr = (const BYTE*)dict; + const BYTE* const dictEnd = dictPtr + dictSize; + short offcodeNCount[MaxOff+1]; + unsigned offcodeMaxValue = MaxOff; + BYTE scratchBuffer[1<dictID = cctx->params.fParams.noDictIDFlag ? 0 : MEM_readLE32(dictPtr); + dictPtr += 4; + + { size_t const hufHeaderSize = HUF_readCTable(cctx->hufTable, 255, dictPtr, dictEnd-dictPtr); + if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted); + dictPtr += hufHeaderSize; + } + + { unsigned offcodeLog; + size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted); + if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted); + /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */ + CHECK_E (FSE_buildCTable_wksp(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, scratchBuffer, sizeof(scratchBuffer)), dictionary_corrupted); + dictPtr += offcodeHeaderSize; + } + + { short matchlengthNCount[MaxML+1]; + unsigned matchlengthMaxValue = MaxML, matchlengthLog; + size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted); + if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted); + /* Every match length code must have non-zero probability */ + CHECK_F (ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML)); + CHECK_E (FSE_buildCTable_wksp(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, scratchBuffer, sizeof(scratchBuffer)), dictionary_corrupted); + dictPtr += matchlengthHeaderSize; + } + + { short litlengthNCount[MaxLL+1]; + unsigned litlengthMaxValue = MaxLL, litlengthLog; + size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted); + if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted); + /* Every literal length code must have non-zero probability */ + CHECK_F (ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL)); + CHECK_E(FSE_buildCTable_wksp(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, scratchBuffer, sizeof(scratchBuffer)), dictionary_corrupted); + dictPtr += litlengthHeaderSize; + } + + if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted); + cctx->rep[0] = MEM_readLE32(dictPtr+0); + cctx->rep[1] = MEM_readLE32(dictPtr+4); + cctx->rep[2] = MEM_readLE32(dictPtr+8); + dictPtr += 12; + + { size_t const dictContentSize = (size_t)(dictEnd - dictPtr); + U32 offcodeMax = MaxOff; + if (dictContentSize <= ((U32)-1) - 128 KB) { + U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */ + offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */ + } + /* All offset values <= dictContentSize + 128 KB must be representable */ + CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff))); + /* All repCodes must be <= dictContentSize and != 0*/ + { U32 u; + for (u=0; u<3; u++) { + if (cctx->rep[u] == 0) return ERROR(dictionary_corrupted); + if (cctx->rep[u] > dictContentSize) return ERROR(dictionary_corrupted); + } } + + cctx->flagStaticTables = 1; + cctx->flagStaticHufTable = HUF_repeat_valid; + return ZSTD_loadDictionaryContent(cctx, dictPtr, dictContentSize); + } +} + +/** ZSTD_compress_insertDictionary() : +* @return : 0, or an error code */ +static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize) +{ + if ((dict==NULL) || (dictSize<=8)) return 0; + + /* dict as pure content */ + if ((MEM_readLE32(dict) != ZSTD_DICT_MAGIC) || (cctx->forceRawDict)) + return ZSTD_loadDictionaryContent(cctx, dict, dictSize); + + /* dict as zstd dictionary */ + return ZSTD_loadZstdDictionary(cctx, dict, dictSize); +} + +/*! ZSTD_compressBegin_internal() : +* @return : 0, or an error code */ +static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx, + const void* dict, size_t dictSize, + ZSTD_parameters params, U64 pledgedSrcSize) +{ + ZSTD_compResetPolicy_e const crp = dictSize ? ZSTDcrp_fullReset : ZSTDcrp_continue; + CHECK_F(ZSTD_resetCCtx_advanced(cctx, params, pledgedSrcSize, crp)); + return ZSTD_compress_insertDictionary(cctx, dict, dictSize); +} + + +/*! ZSTD_compressBegin_advanced() : +* @return : 0, or an error code */ +size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, + const void* dict, size_t dictSize, + ZSTD_parameters params, unsigned long long pledgedSrcSize) +{ + /* compression parameters verification and optimization */ + CHECK_F(ZSTD_checkCParams(params.cParams)); + return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, pledgedSrcSize); +} + + +size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel) +{ + ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize); + return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, 0); +} + + +size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel) +{ + return ZSTD_compressBegin_usingDict(cctx, NULL, 0, compressionLevel); +} + + +/*! ZSTD_writeEpilogue() : +* Ends a frame. +* @return : nb of bytes written into dst (or an error code) */ +static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity) +{ + BYTE* const ostart = (BYTE*)dst; + BYTE* op = ostart; + size_t fhSize = 0; + + if (cctx->stage == ZSTDcs_created) return ERROR(stage_wrong); /* init missing */ + + /* special case : empty frame */ + if (cctx->stage == ZSTDcs_init) { + fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, 0, 0); + if (ZSTD_isError(fhSize)) return fhSize; + dstCapacity -= fhSize; + op += fhSize; + cctx->stage = ZSTDcs_ongoing; + } + + if (cctx->stage != ZSTDcs_ending) { + /* write one last empty block, make it the "last" block */ + U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0; + if (dstCapacity<4) return ERROR(dstSize_tooSmall); + MEM_writeLE32(op, cBlockHeader24); + op += ZSTD_blockHeaderSize; + dstCapacity -= ZSTD_blockHeaderSize; + } + + if (cctx->params.fParams.checksumFlag) { + U32 const checksum = (U32) XXH64_digest(&cctx->xxhState); + if (dstCapacity<4) return ERROR(dstSize_tooSmall); + MEM_writeLE32(op, checksum); + op += 4; + } + + cctx->stage = ZSTDcs_created; /* return to "created but no init" status */ + return op-ostart; +} + + +size_t ZSTD_compressEnd (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + size_t endResult; + size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 1); + if (ZSTD_isError(cSize)) return cSize; + endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize); + if (ZSTD_isError(endResult)) return endResult; + return cSize + endResult; +} + + +static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void* dict,size_t dictSize, + ZSTD_parameters params) +{ + CHECK_F(ZSTD_compressBegin_internal(cctx, dict, dictSize, params, srcSize)); + return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); +} + +size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize, ZSTD_parameters params) +{ + return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params); +} + + +size_t ZSTD_compressCCtx(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, ZSTD_parameters params) +{ + return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, NULL, 0, params); +} + + +/* ===== Dictionary API ===== */ + +struct ZSTD_CDict_s { + void* dictBuffer; + const void* dictContent; + size_t dictContentSize; + ZSTD_CCtx* refContext; +}; /* typedef'd tp ZSTD_CDict within "zstd.h" */ + +size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams) +{ + return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CDict)); +} + +static ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, unsigned byReference, + ZSTD_parameters params, ZSTD_customMem customMem) +{ + if (!customMem.customAlloc || !customMem.customFree) return NULL; + + { ZSTD_CDict* const cdict = (ZSTD_CDict*) ZSTD_malloc(sizeof(ZSTD_CDict), customMem); + ZSTD_CCtx* const cctx = ZSTD_createCCtx_advanced(customMem); + + if (!cdict || !cctx) { + ZSTD_free(cdict, customMem); + ZSTD_freeCCtx(cctx); + return NULL; + } + + if ((byReference) || (!dictBuffer) || (!dictSize)) { + cdict->dictBuffer = NULL; + cdict->dictContent = dictBuffer; + } else { + void* const internalBuffer = ZSTD_malloc(dictSize, customMem); + if (!internalBuffer) { ZSTD_free(cctx, customMem); ZSTD_free(cdict, customMem); return NULL; } + memcpy(internalBuffer, dictBuffer, dictSize); + cdict->dictBuffer = internalBuffer; + cdict->dictContent = internalBuffer; + } + + { size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0); + if (ZSTD_isError(errorCode)) { + ZSTD_free(cdict->dictBuffer, customMem); + ZSTD_free(cdict, customMem); + ZSTD_freeCCtx(cctx); + return NULL; + } } + + cdict->refContext = cctx; + cdict->dictContentSize = dictSize; + return cdict; + } +} + +ZSTD_CDict* ZSTD_initCDict(const void* dict, size_t dictSize, ZSTD_parameters params, void* workspace, size_t workspaceSize) +{ + ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); + return ZSTD_createCDict_advanced(dict, dictSize, 1, params, stackMem); +} + +size_t ZSTD_freeCDict(ZSTD_CDict* cdict) +{ + if (cdict==NULL) return 0; /* support free on NULL */ + { ZSTD_customMem const cMem = cdict->refContext->customMem; + ZSTD_freeCCtx(cdict->refContext); + ZSTD_free(cdict->dictBuffer, cMem); + ZSTD_free(cdict, cMem); + return 0; + } +} + +static ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict* cdict) { + return ZSTD_getParamsFromCCtx(cdict->refContext); +} + +size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize) +{ + if (cdict->dictContentSize) CHECK_F(ZSTD_copyCCtx(cctx, cdict->refContext, pledgedSrcSize)) + else { + ZSTD_parameters params = cdict->refContext->params; + params.fParams.contentSizeFlag = (pledgedSrcSize > 0); + CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, params, pledgedSrcSize)); + } + return 0; +} + +/*! ZSTD_compress_usingCDict() : +* Compression using a digested Dictionary. +* Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. +* Note that compression level is decided during dictionary creation */ +size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_CDict* cdict) +{ + CHECK_F(ZSTD_compressBegin_usingCDict(cctx, cdict, srcSize)); + + if (cdict->refContext->params.fParams.contentSizeFlag==1) { + cctx->params.fParams.contentSizeFlag = 1; + cctx->frameContentSize = srcSize; + } else { + cctx->params.fParams.contentSizeFlag = 0; + } + + return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize); +} + + + +/* ****************************************************************** +* Streaming +********************************************************************/ + +typedef enum { zcss_init, zcss_load, zcss_flush, zcss_final } ZSTD_cStreamStage; + +struct ZSTD_CStream_s { + ZSTD_CCtx* cctx; + ZSTD_CDict* cdictLocal; + const ZSTD_CDict* cdict; + char* inBuff; + size_t inBuffSize; + size_t inToCompress; + size_t inBuffPos; + size_t inBuffTarget; + size_t blockSize; + char* outBuff; + size_t outBuffSize; + size_t outBuffContentSize; + size_t outBuffFlushedSize; + ZSTD_cStreamStage stage; + U32 checksum; + U32 frameEnded; + U64 pledgedSrcSize; + U64 inputProcessed; + ZSTD_parameters params; + ZSTD_customMem customMem; +}; /* typedef'd to ZSTD_CStream within "zstd.h" */ + +size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams) +{ + size_t const inBuffSize = (size_t)1 << cParams.windowLog; + size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, inBuffSize); + size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1; + + return ZSTD_CCtxWorkspaceBound(cParams) + ZSTD_ALIGN(sizeof(ZSTD_CStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize); +} + +ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem) +{ + ZSTD_CStream* zcs; + + if (!customMem.customAlloc || !customMem.customFree) return NULL; + + zcs = (ZSTD_CStream*)ZSTD_malloc(sizeof(ZSTD_CStream), customMem); + if (zcs==NULL) return NULL; + memset(zcs, 0, sizeof(ZSTD_CStream)); + memcpy(&zcs->customMem, &customMem, sizeof(ZSTD_customMem)); + zcs->cctx = ZSTD_createCCtx_advanced(customMem); + if (zcs->cctx == NULL) { ZSTD_freeCStream(zcs); return NULL; } + return zcs; +} + +size_t ZSTD_freeCStream(ZSTD_CStream* zcs) +{ + if (zcs==NULL) return 0; /* support free on NULL */ + { ZSTD_customMem const cMem = zcs->customMem; + ZSTD_freeCCtx(zcs->cctx); + zcs->cctx = NULL; + ZSTD_freeCDict(zcs->cdictLocal); + zcs->cdictLocal = NULL; + ZSTD_free(zcs->inBuff, cMem); + zcs->inBuff = NULL; + ZSTD_free(zcs->outBuff, cMem); + zcs->outBuff = NULL; + ZSTD_free(zcs, cMem); + return 0; + } +} + + +/*====== Initialization ======*/ + +size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; } +size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_ABSOLUTEMAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ; } + +static size_t ZSTD_resetCStream_internal(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize) +{ + if (zcs->inBuffSize==0) return ERROR(stage_wrong); /* zcs has not been init at least once => can't reset */ + + if (zcs->cdict) CHECK_F(ZSTD_compressBegin_usingCDict(zcs->cctx, zcs->cdict, pledgedSrcSize)) + else CHECK_F(ZSTD_compressBegin_advanced(zcs->cctx, NULL, 0, zcs->params, pledgedSrcSize)); + + zcs->inToCompress = 0; + zcs->inBuffPos = 0; + zcs->inBuffTarget = zcs->blockSize; + zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; + zcs->stage = zcss_load; + zcs->frameEnded = 0; + zcs->pledgedSrcSize = pledgedSrcSize; + zcs->inputProcessed = 0; + return 0; /* ready to go */ +} + +size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize) +{ + + zcs->params.fParams.contentSizeFlag = (pledgedSrcSize > 0); + + return ZSTD_resetCStream_internal(zcs, pledgedSrcSize); +} + +static size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, + const void* dict, size_t dictSize, + ZSTD_parameters params, unsigned long long pledgedSrcSize) +{ + /* allocate buffers */ + { size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog; + if (zcs->inBuffSize < neededInBuffSize) { + zcs->inBuffSize = neededInBuffSize; + ZSTD_free(zcs->inBuff, zcs->customMem); + zcs->inBuff = (char*) ZSTD_malloc(neededInBuffSize, zcs->customMem); + if (zcs->inBuff == NULL) return ERROR(memory_allocation); + } + zcs->blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, neededInBuffSize); + } + if (zcs->outBuffSize < ZSTD_compressBound(zcs->blockSize)+1) { + zcs->outBuffSize = ZSTD_compressBound(zcs->blockSize)+1; + ZSTD_free(zcs->outBuff, zcs->customMem); + zcs->outBuff = (char*) ZSTD_malloc(zcs->outBuffSize, zcs->customMem); + if (zcs->outBuff == NULL) return ERROR(memory_allocation); + } + + if (dict && dictSize >= 8) { + ZSTD_freeCDict(zcs->cdictLocal); + zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, 0, params, zcs->customMem); + if (zcs->cdictLocal == NULL) return ERROR(memory_allocation); + zcs->cdict = zcs->cdictLocal; + } else zcs->cdict = NULL; + + zcs->checksum = params.fParams.checksumFlag > 0; + zcs->params = params; + + return ZSTD_resetCStream_internal(zcs, pledgedSrcSize); +} + +ZSTD_CStream* ZSTD_initCStream(ZSTD_parameters params, unsigned long long pledgedSrcSize, void* workspace, size_t workspaceSize) +{ + ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); + ZSTD_CStream* const zcs = ZSTD_createCStream_advanced(stackMem); + if (zcs) { + size_t const code = ZSTD_initCStream_advanced(zcs, NULL, 0, params, pledgedSrcSize); + if (ZSTD_isError(code)) { return NULL; } + } + return zcs; +} + +ZSTD_CStream* ZSTD_initCStream_usingCDict(const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize, void* workspace, size_t workspaceSize) +{ + ZSTD_parameters const params = ZSTD_getParamsFromCDict(cdict); + ZSTD_CStream* const zcs = ZSTD_initCStream(params, pledgedSrcSize, workspace, workspaceSize); + if (zcs) { + zcs->cdict = cdict; + if (ZSTD_isError(ZSTD_resetCStream_internal(zcs, pledgedSrcSize))) { + return NULL; + } + } + return zcs; +} + +/*====== Compression ======*/ + +typedef enum { zsf_gather, zsf_flush, zsf_end } ZSTD_flush_e; + +MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + size_t const length = MIN(dstCapacity, srcSize); + memcpy(dst, src, length); + return length; +} + +static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs, + void* dst, size_t* dstCapacityPtr, + const void* src, size_t* srcSizePtr, + ZSTD_flush_e const flush) +{ + U32 someMoreWork = 1; + const char* const istart = (const char*)src; + const char* const iend = istart + *srcSizePtr; + const char* ip = istart; + char* const ostart = (char*)dst; + char* const oend = ostart + *dstCapacityPtr; + char* op = ostart; + + while (someMoreWork) { + switch(zcs->stage) + { + case zcss_init: return ERROR(init_missing); /* call ZBUFF_compressInit() first ! */ + + case zcss_load: + /* complete inBuffer */ + { size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos; + size_t const loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend-ip); + zcs->inBuffPos += loaded; + ip += loaded; + if ( (zcs->inBuffPos==zcs->inToCompress) || (!flush && (toLoad != loaded)) ) { + someMoreWork = 0; break; /* not enough input to get a full block : stop there, wait for more */ + } } + /* compress current block (note : this stage cannot be stopped in the middle) */ + { void* cDst; + size_t cSize; + size_t const iSize = zcs->inBuffPos - zcs->inToCompress; + size_t oSize = oend-op; + if (oSize >= ZSTD_compressBound(iSize)) + cDst = op; /* compress directly into output buffer (avoid flush stage) */ + else + cDst = zcs->outBuff, oSize = zcs->outBuffSize; + cSize = (flush == zsf_end) ? + ZSTD_compressEnd(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize) : + ZSTD_compressContinue(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize); + if (ZSTD_isError(cSize)) return cSize; + if (flush == zsf_end) zcs->frameEnded = 1; + /* prepare next block */ + zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize; + if (zcs->inBuffTarget > zcs->inBuffSize) + zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; /* note : inBuffSize >= blockSize */ + zcs->inToCompress = zcs->inBuffPos; + if (cDst == op) { op += cSize; break; } /* no need to flush */ + zcs->outBuffContentSize = cSize; + zcs->outBuffFlushedSize = 0; + zcs->stage = zcss_flush; /* pass-through to flush stage */ + } + + case zcss_flush: + { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; + size_t const flushed = ZSTD_limitCopy(op, oend-op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush); + op += flushed; + zcs->outBuffFlushedSize += flushed; + if (toFlush!=flushed) { someMoreWork = 0; break; } /* dst too small to store flushed data : stop there */ + zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0; + zcs->stage = zcss_load; + break; + } + + case zcss_final: + someMoreWork = 0; /* do nothing */ + break; + + default: + return ERROR(GENERIC); /* impossible */ + } + } + + *srcSizePtr = ip - istart; + *dstCapacityPtr = op - ostart; + zcs->inputProcessed += *srcSizePtr; + if (zcs->frameEnded) return 0; + { size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos; + if (hintInSize==0) hintInSize = zcs->blockSize; + return hintInSize; + } +} + +size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input) +{ + size_t sizeRead = input->size - input->pos; + size_t sizeWritten = output->size - output->pos; + size_t const result = ZSTD_compressStream_generic(zcs, + (char*)(output->dst) + output->pos, &sizeWritten, + (const char*)(input->src) + input->pos, &sizeRead, zsf_gather); + input->pos += sizeRead; + output->pos += sizeWritten; + return result; +} + + +/*====== Finalize ======*/ + +/*! ZSTD_flushStream() : +* @return : amount of data remaining to flush */ +size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) +{ + size_t srcSize = 0; + size_t sizeWritten = output->size - output->pos; + size_t const result = ZSTD_compressStream_generic(zcs, + (char*)(output->dst) + output->pos, &sizeWritten, + &srcSize, &srcSize, /* use a valid src address instead of NULL */ + zsf_flush); + output->pos += sizeWritten; + if (ZSTD_isError(result)) return result; + return zcs->outBuffContentSize - zcs->outBuffFlushedSize; /* remaining to flush */ +} + + +size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output) +{ + BYTE* const ostart = (BYTE*)(output->dst) + output->pos; + BYTE* const oend = (BYTE*)(output->dst) + output->size; + BYTE* op = ostart; + + if ((zcs->pledgedSrcSize) && (zcs->inputProcessed != zcs->pledgedSrcSize)) + return ERROR(srcSize_wrong); /* pledgedSrcSize not respected */ + + if (zcs->stage != zcss_final) { + /* flush whatever remains */ + size_t srcSize = 0; + size_t sizeWritten = output->size - output->pos; + size_t const notEnded = ZSTD_compressStream_generic(zcs, ostart, &sizeWritten, &srcSize, &srcSize, zsf_end); /* use a valid src address instead of NULL */ + size_t const remainingToFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; + op += sizeWritten; + if (remainingToFlush) { + output->pos += sizeWritten; + return remainingToFlush + ZSTD_BLOCKHEADERSIZE /* final empty block */ + (zcs->checksum * 4); + } + /* create epilogue */ + zcs->stage = zcss_final; + zcs->outBuffContentSize = !notEnded ? 0 : + ZSTD_compressEnd(zcs->cctx, zcs->outBuff, zcs->outBuffSize, NULL, 0); /* write epilogue, including final empty block, into outBuff */ + } + + /* flush epilogue */ + { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize; + size_t const flushed = ZSTD_limitCopy(op, oend-op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush); + op += flushed; + zcs->outBuffFlushedSize += flushed; + output->pos += op-ostart; + if (toFlush==flushed) zcs->stage = zcss_init; /* end reached */ + return toFlush - flushed; + } +} + + + +/*-===== Pre-defined compression levels =====-*/ + +#define ZSTD_DEFAULT_CLEVEL 1 +#define ZSTD_MAX_CLEVEL 22 +int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; } + +static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = { +{ /* "default" */ + /* W, C, H, S, L, TL, strat */ + { 18, 12, 12, 1, 7, 16, ZSTD_fast }, /* level 0 - never used */ + { 19, 13, 14, 1, 7, 16, ZSTD_fast }, /* level 1 */ + { 19, 15, 16, 1, 6, 16, ZSTD_fast }, /* level 2 */ + { 20, 16, 17, 1, 5, 16, ZSTD_dfast }, /* level 3.*/ + { 20, 18, 18, 1, 5, 16, ZSTD_dfast }, /* level 4.*/ + { 20, 15, 18, 3, 5, 16, ZSTD_greedy }, /* level 5 */ + { 21, 16, 19, 2, 5, 16, ZSTD_lazy }, /* level 6 */ + { 21, 17, 20, 3, 5, 16, ZSTD_lazy }, /* level 7 */ + { 21, 18, 20, 3, 5, 16, ZSTD_lazy2 }, /* level 8 */ + { 21, 20, 20, 3, 5, 16, ZSTD_lazy2 }, /* level 9 */ + { 21, 19, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */ + { 22, 20, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 11 */ + { 22, 20, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 12 */ + { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 13 */ + { 22, 21, 22, 6, 5, 16, ZSTD_lazy2 }, /* level 14 */ + { 22, 21, 21, 5, 5, 16, ZSTD_btlazy2 }, /* level 15 */ + { 23, 22, 22, 5, 5, 16, ZSTD_btlazy2 }, /* level 16 */ + { 23, 21, 22, 4, 5, 24, ZSTD_btopt }, /* level 17 */ + { 23, 23, 22, 6, 5, 32, ZSTD_btopt }, /* level 18 */ + { 23, 23, 22, 6, 3, 48, ZSTD_btopt }, /* level 19 */ + { 25, 25, 23, 7, 3, 64, ZSTD_btopt2 }, /* level 20 */ + { 26, 26, 23, 7, 3,256, ZSTD_btopt2 }, /* level 21 */ + { 27, 27, 25, 9, 3,512, ZSTD_btopt2 }, /* level 22 */ +}, +{ /* for srcSize <= 256 KB */ + /* W, C, H, S, L, T, strat */ + { 0, 0, 0, 0, 0, 0, ZSTD_fast }, /* level 0 - not used */ + { 18, 13, 14, 1, 6, 8, ZSTD_fast }, /* level 1 */ + { 18, 14, 13, 1, 5, 8, ZSTD_dfast }, /* level 2 */ + { 18, 16, 15, 1, 5, 8, ZSTD_dfast }, /* level 3 */ + { 18, 15, 17, 1, 5, 8, ZSTD_greedy }, /* level 4.*/ + { 18, 16, 17, 4, 5, 8, ZSTD_greedy }, /* level 5.*/ + { 18, 16, 17, 3, 5, 8, ZSTD_lazy }, /* level 6.*/ + { 18, 17, 17, 4, 4, 8, ZSTD_lazy }, /* level 7 */ + { 18, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ + { 18, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ + { 18, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ + { 18, 18, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 11.*/ + { 18, 18, 17, 7, 4, 8, ZSTD_lazy2 }, /* level 12.*/ + { 18, 19, 17, 6, 4, 8, ZSTD_btlazy2 }, /* level 13 */ + { 18, 18, 18, 4, 4, 16, ZSTD_btopt }, /* level 14.*/ + { 18, 18, 18, 4, 3, 16, ZSTD_btopt }, /* level 15.*/ + { 18, 19, 18, 6, 3, 32, ZSTD_btopt }, /* level 16.*/ + { 18, 19, 18, 8, 3, 64, ZSTD_btopt }, /* level 17.*/ + { 18, 19, 18, 9, 3,128, ZSTD_btopt }, /* level 18.*/ + { 18, 19, 18, 10, 3,256, ZSTD_btopt }, /* level 19.*/ + { 18, 19, 18, 11, 3,512, ZSTD_btopt2 }, /* level 20.*/ + { 18, 19, 18, 12, 3,512, ZSTD_btopt2 }, /* level 21.*/ + { 18, 19, 18, 13, 3,512, ZSTD_btopt2 }, /* level 22.*/ +}, +{ /* for srcSize <= 128 KB */ + /* W, C, H, S, L, T, strat */ + { 17, 12, 12, 1, 7, 8, ZSTD_fast }, /* level 0 - not used */ + { 17, 12, 13, 1, 6, 8, ZSTD_fast }, /* level 1 */ + { 17, 13, 16, 1, 5, 8, ZSTD_fast }, /* level 2 */ + { 17, 16, 16, 2, 5, 8, ZSTD_dfast }, /* level 3 */ + { 17, 13, 15, 3, 4, 8, ZSTD_greedy }, /* level 4 */ + { 17, 15, 17, 4, 4, 8, ZSTD_greedy }, /* level 5 */ + { 17, 16, 17, 3, 4, 8, ZSTD_lazy }, /* level 6 */ + { 17, 15, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 7 */ + { 17, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */ + { 17, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */ + { 17, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */ + { 17, 17, 17, 7, 4, 8, ZSTD_lazy2 }, /* level 11 */ + { 17, 17, 17, 8, 4, 8, ZSTD_lazy2 }, /* level 12 */ + { 17, 18, 17, 6, 4, 8, ZSTD_btlazy2 }, /* level 13.*/ + { 17, 17, 17, 7, 3, 8, ZSTD_btopt }, /* level 14.*/ + { 17, 17, 17, 7, 3, 16, ZSTD_btopt }, /* level 15.*/ + { 17, 18, 17, 7, 3, 32, ZSTD_btopt }, /* level 16.*/ + { 17, 18, 17, 7, 3, 64, ZSTD_btopt }, /* level 17.*/ + { 17, 18, 17, 7, 3,256, ZSTD_btopt }, /* level 18.*/ + { 17, 18, 17, 8, 3,256, ZSTD_btopt }, /* level 19.*/ + { 17, 18, 17, 9, 3,256, ZSTD_btopt2 }, /* level 20.*/ + { 17, 18, 17, 10, 3,256, ZSTD_btopt2 }, /* level 21.*/ + { 17, 18, 17, 11, 3,512, ZSTD_btopt2 }, /* level 22.*/ +}, +{ /* for srcSize <= 16 KB */ + /* W, C, H, S, L, T, strat */ + { 14, 12, 12, 1, 7, 6, ZSTD_fast }, /* level 0 - not used */ + { 14, 14, 14, 1, 6, 6, ZSTD_fast }, /* level 1 */ + { 14, 14, 14, 1, 4, 6, ZSTD_fast }, /* level 2 */ + { 14, 14, 14, 1, 4, 6, ZSTD_dfast }, /* level 3.*/ + { 14, 14, 14, 4, 4, 6, ZSTD_greedy }, /* level 4.*/ + { 14, 14, 14, 3, 4, 6, ZSTD_lazy }, /* level 5.*/ + { 14, 14, 14, 4, 4, 6, ZSTD_lazy2 }, /* level 6 */ + { 14, 14, 14, 5, 4, 6, ZSTD_lazy2 }, /* level 7 */ + { 14, 14, 14, 6, 4, 6, ZSTD_lazy2 }, /* level 8.*/ + { 14, 15, 14, 6, 4, 6, ZSTD_btlazy2 }, /* level 9.*/ + { 14, 15, 14, 3, 3, 6, ZSTD_btopt }, /* level 10.*/ + { 14, 15, 14, 6, 3, 8, ZSTD_btopt }, /* level 11.*/ + { 14, 15, 14, 6, 3, 16, ZSTD_btopt }, /* level 12.*/ + { 14, 15, 14, 6, 3, 24, ZSTD_btopt }, /* level 13.*/ + { 14, 15, 15, 6, 3, 48, ZSTD_btopt }, /* level 14.*/ + { 14, 15, 15, 6, 3, 64, ZSTD_btopt }, /* level 15.*/ + { 14, 15, 15, 6, 3, 96, ZSTD_btopt }, /* level 16.*/ + { 14, 15, 15, 6, 3,128, ZSTD_btopt }, /* level 17.*/ + { 14, 15, 15, 6, 3,256, ZSTD_btopt }, /* level 18.*/ + { 14, 15, 15, 7, 3,256, ZSTD_btopt }, /* level 19.*/ + { 14, 15, 15, 8, 3,256, ZSTD_btopt2 }, /* level 20.*/ + { 14, 15, 15, 9, 3,256, ZSTD_btopt2 }, /* level 21.*/ + { 14, 15, 15, 10, 3,256, ZSTD_btopt2 }, /* level 22.*/ +}, +}; + +/*! ZSTD_getCParams() : +* @return ZSTD_compressionParameters structure for a selected compression level, `srcSize` and `dictSize`. +* Size values are optional, provide 0 if not known or unused */ +ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSize, size_t dictSize) +{ + ZSTD_compressionParameters cp; + size_t const addedSize = srcSize ? 0 : 500; + U64 const rSize = srcSize+dictSize ? srcSize+dictSize+addedSize : (U64)-1; + U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); /* intentional underflow for srcSizeHint == 0 */ + if (compressionLevel <= 0) compressionLevel = ZSTD_DEFAULT_CLEVEL; /* 0 == default; no negative compressionLevel yet */ + if (compressionLevel > ZSTD_MAX_CLEVEL) compressionLevel = ZSTD_MAX_CLEVEL; + cp = ZSTD_defaultCParameters[tableID][compressionLevel]; + if (MEM_32bits()) { /* auto-correction, for 32-bits mode */ + if (cp.windowLog > ZSTD_WINDOWLOG_MAX) cp.windowLog = ZSTD_WINDOWLOG_MAX; + if (cp.chainLog > ZSTD_CHAINLOG_MAX) cp.chainLog = ZSTD_CHAINLOG_MAX; + if (cp.hashLog > ZSTD_HASHLOG_MAX) cp.hashLog = ZSTD_HASHLOG_MAX; + } + cp = ZSTD_adjustCParams(cp, srcSize, dictSize); + return cp; +} + +/*! ZSTD_getParams() : +* same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`). +* All fields of `ZSTD_frameParameters` are set to default (0) */ +ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSize, size_t dictSize) { + ZSTD_parameters params; + ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSize, dictSize); + memset(¶ms, 0, sizeof(params)); + params.cParams = cParams; + return params; +} + +EXPORT_SYMBOL(ZSTD_maxCLevel); +EXPORT_SYMBOL(ZSTD_compressBound); + +EXPORT_SYMBOL(ZSTD_CCtxWorkspaceBound); +EXPORT_SYMBOL(ZSTD_initCCtx); +EXPORT_SYMBOL(ZSTD_compressCCtx); +EXPORT_SYMBOL(ZSTD_compress_usingDict); + +EXPORT_SYMBOL(ZSTD_CDictWorkspaceBound); +EXPORT_SYMBOL(ZSTD_initCDict); +EXPORT_SYMBOL(ZSTD_compress_usingCDict); + +EXPORT_SYMBOL(ZSTD_CStreamWorkspaceBound); +EXPORT_SYMBOL(ZSTD_initCStream); +EXPORT_SYMBOL(ZSTD_initCStream_usingCDict); +EXPORT_SYMBOL(ZSTD_resetCStream); +EXPORT_SYMBOL(ZSTD_compressStream); +EXPORT_SYMBOL(ZSTD_flushStream); +EXPORT_SYMBOL(ZSTD_endStream); +EXPORT_SYMBOL(ZSTD_CStreamInSize); +EXPORT_SYMBOL(ZSTD_CStreamOutSize); + +EXPORT_SYMBOL(ZSTD_getCParams); +EXPORT_SYMBOL(ZSTD_getParams); +EXPORT_SYMBOL(ZSTD_checkCParams); +EXPORT_SYMBOL(ZSTD_adjustCParams); + +EXPORT_SYMBOL(ZSTD_compressBegin); +EXPORT_SYMBOL(ZSTD_compressBegin_usingDict); +EXPORT_SYMBOL(ZSTD_compressBegin_advanced); +EXPORT_SYMBOL(ZSTD_copyCCtx); +EXPORT_SYMBOL(ZSTD_compressBegin_usingCDict); +EXPORT_SYMBOL(ZSTD_compressContinue); +EXPORT_SYMBOL(ZSTD_compressEnd); + +EXPORT_SYMBOL(ZSTD_getBlockSizeMax); +EXPORT_SYMBOL(ZSTD_compressBlock); + +MODULE_LICENSE("BSD"); +MODULE_DESCRIPTION("Zstd Compressor"); diff --git a/contrib/linux-kernel/lib/zstd/decompress.c b/contrib/linux-kernel/lib/zstd/decompress.c new file mode 100644 index 000000000..94f5fd560 --- /dev/null +++ b/contrib/linux-kernel/lib/zstd/decompress.c @@ -0,0 +1,2377 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + + +/* *************************************************************** +* Tuning parameters +*****************************************************************/ +/*! +* MAXWINDOWSIZE_DEFAULT : +* maximum window size accepted by DStream, by default. +* Frames requiring more memory will be rejected. +*/ +#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT +# define ZSTD_MAXWINDOWSIZE_DEFAULT ((1 << ZSTD_WINDOWLOG_MAX) + 1) /* defined within zstd.h */ +#endif + + +/*-******************************************************* +* Dependencies +*********************************************************/ +#include +#include +#include /* memcpy, memmove, memset */ +#include "mem.h" /* low level memory routines */ +#include "fse.h" +#include "huf.h" +#include "zstd_internal.h" + +#define ZSTD_PREFETCH(ptr) __builtin_prefetch(ptr, 0, 0) + +/*-************************************* +* Macros +***************************************/ +#define ZSTD_isError ERR_isError /* for inlining */ +#define FSE_isError ERR_isError +#define HUF_isError ERR_isError + + +/*_******************************************************* +* Memory operations +**********************************************************/ +static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); } + + +/*-************************************************************* +* Context management +***************************************************************/ +typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader, + ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock, + ZSTDds_decompressLastBlock, ZSTDds_checkChecksum, + ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage; + +typedef struct { + FSE_DTable LLTable[FSE_DTABLE_SIZE_U32(LLFSELog)]; + FSE_DTable OFTable[FSE_DTABLE_SIZE_U32(OffFSELog)]; + FSE_DTable MLTable[FSE_DTABLE_SIZE_U32(MLFSELog)]; + HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */ + U32 rep[ZSTD_REP_NUM]; +} ZSTD_entropyTables_t; + +struct ZSTD_DCtx_s +{ + const FSE_DTable* LLTptr; + const FSE_DTable* MLTptr; + const FSE_DTable* OFTptr; + const HUF_DTable* HUFptr; + ZSTD_entropyTables_t entropy; + const void* previousDstEnd; /* detect continuity */ + const void* base; /* start of current segment */ + const void* vBase; /* virtual start of previous segment if it was just before current one */ + const void* dictEnd; /* end of previous segment */ + size_t expected; + ZSTD_frameParams fParams; + blockType_e bType; /* used in ZSTD_decompressContinue(), to transfer blockType between header decoding and block decoding stages */ + ZSTD_dStage stage; + U32 litEntropy; + U32 fseEntropy; + XXH64_state_t xxhState; + size_t headerSize; + U32 dictID; + const BYTE* litPtr; + ZSTD_customMem customMem; + size_t litSize; + size_t rleSize; + BYTE litBuffer[ZSTD_BLOCKSIZE_ABSOLUTEMAX + WILDCOPY_OVERLENGTH]; + BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; +}; /* typedef'd to ZSTD_DCtx within "zstd.h" */ + +size_t ZSTD_DCtxWorkspaceBound(void) +{ + return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DCtx)); +} + +size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx) +{ + dctx->expected = ZSTD_frameHeaderSize_prefix; + dctx->stage = ZSTDds_getFrameHeaderSize; + dctx->previousDstEnd = NULL; + dctx->base = NULL; + dctx->vBase = NULL; + dctx->dictEnd = NULL; + dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ + dctx->litEntropy = dctx->fseEntropy = 0; + dctx->dictID = 0; + MEM_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue)); + memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */ + dctx->LLTptr = dctx->entropy.LLTable; + dctx->MLTptr = dctx->entropy.MLTable; + dctx->OFTptr = dctx->entropy.OFTable; + dctx->HUFptr = dctx->entropy.hufTable; + return 0; +} + +ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem) +{ + ZSTD_DCtx* dctx; + + if (!customMem.customAlloc || !customMem.customFree) return NULL; + + dctx = (ZSTD_DCtx*)ZSTD_malloc(sizeof(ZSTD_DCtx), customMem); + if (!dctx) return NULL; + memcpy(&dctx->customMem, &customMem, sizeof(customMem)); + ZSTD_decompressBegin(dctx); + return dctx; +} + +ZSTD_DCtx* ZSTD_initDCtx(void* workspace, size_t workspaceSize) +{ + ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); + return ZSTD_createDCtx_advanced(stackMem); +} + +size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx) +{ + if (dctx==NULL) return 0; /* support free on NULL */ + ZSTD_free(dctx, dctx->customMem); + return 0; /* reserved as a potential error code in the future */ +} + +void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx) +{ + size_t const workSpaceSize = (ZSTD_BLOCKSIZE_ABSOLUTEMAX+WILDCOPY_OVERLENGTH) + ZSTD_frameHeaderSize_max; + memcpy(dstDCtx, srcDCtx, sizeof(ZSTD_DCtx) - workSpaceSize); /* no need to copy workspace */ +} + +#if 0 +/* deprecated */ +static void ZSTD_refDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx) +{ + ZSTD_decompressBegin(dstDCtx); /* init */ + if (srcDCtx) { /* support refDCtx on NULL */ + dstDCtx->dictEnd = srcDCtx->dictEnd; + dstDCtx->vBase = srcDCtx->vBase; + dstDCtx->base = srcDCtx->base; + dstDCtx->previousDstEnd = srcDCtx->previousDstEnd; + dstDCtx->dictID = srcDCtx->dictID; + dstDCtx->litEntropy = srcDCtx->litEntropy; + dstDCtx->fseEntropy = srcDCtx->fseEntropy; + dstDCtx->LLTptr = srcDCtx->entropy.LLTable; + dstDCtx->MLTptr = srcDCtx->entropy.MLTable; + dstDCtx->OFTptr = srcDCtx->entropy.OFTable; + dstDCtx->HUFptr = srcDCtx->entropy.hufTable; + dstDCtx->entropy.rep[0] = srcDCtx->entropy.rep[0]; + dstDCtx->entropy.rep[1] = srcDCtx->entropy.rep[1]; + dstDCtx->entropy.rep[2] = srcDCtx->entropy.rep[2]; + } +} +#endif + +static void ZSTD_refDDict(ZSTD_DCtx* dstDCtx, const ZSTD_DDict* ddict); + + +/*-************************************************************* +* Decompression section +***************************************************************/ + +/*! ZSTD_isFrame() : + * Tells if the content of `buffer` starts with a valid Frame Identifier. + * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0. + * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled. + * Note 3 : Skippable Frame Identifiers are considered valid. */ +unsigned ZSTD_isFrame(const void* buffer, size_t size) +{ + if (size < 4) return 0; + { U32 const magic = MEM_readLE32(buffer); + if (magic == ZSTD_MAGICNUMBER) return 1; + if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) return 1; + } + return 0; +} + + +/** ZSTD_frameHeaderSize() : +* srcSize must be >= ZSTD_frameHeaderSize_prefix. +* @return : size of the Frame Header */ +static size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize) +{ + if (srcSize < ZSTD_frameHeaderSize_prefix) return ERROR(srcSize_wrong); + { BYTE const fhd = ((const BYTE*)src)[4]; + U32 const dictID= fhd & 3; + U32 const singleSegment = (fhd >> 5) & 1; + U32 const fcsId = fhd >> 6; + return ZSTD_frameHeaderSize_prefix + !singleSegment + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId] + + (singleSegment && !fcsId); + } +} + + +/** ZSTD_getFrameParams() : +* decode Frame Header, or require larger `srcSize`. +* @return : 0, `fparamsPtr` is correctly filled, +* >0, `srcSize` is too small, result is expected `srcSize`, +* or an error code, which can be tested using ZSTD_isError() */ +size_t ZSTD_getFrameParams(ZSTD_frameParams* fparamsPtr, const void* src, size_t srcSize) +{ + const BYTE* ip = (const BYTE*)src; + + if (srcSize < ZSTD_frameHeaderSize_prefix) return ZSTD_frameHeaderSize_prefix; + if (MEM_readLE32(src) != ZSTD_MAGICNUMBER) { + if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { + if (srcSize < ZSTD_skippableHeaderSize) return ZSTD_skippableHeaderSize; /* magic number + skippable frame length */ + memset(fparamsPtr, 0, sizeof(*fparamsPtr)); + fparamsPtr->frameContentSize = MEM_readLE32((const char *)src + 4); + fparamsPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */ + return 0; + } + return ERROR(prefix_unknown); + } + + /* ensure there is enough `srcSize` to fully read/decode frame header */ + { size_t const fhsize = ZSTD_frameHeaderSize(src, srcSize); + if (srcSize < fhsize) return fhsize; } + + { BYTE const fhdByte = ip[4]; + size_t pos = 5; + U32 const dictIDSizeCode = fhdByte&3; + U32 const checksumFlag = (fhdByte>>2)&1; + U32 const singleSegment = (fhdByte>>5)&1; + U32 const fcsID = fhdByte>>6; + U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; + U32 windowSize = 0; + U32 dictID = 0; + U64 frameContentSize = 0; + if ((fhdByte & 0x08) != 0) return ERROR(frameParameter_unsupported); /* reserved bits, which must be zero */ + if (!singleSegment) { + BYTE const wlByte = ip[pos++]; + U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN; + if (windowLog > ZSTD_WINDOWLOG_MAX) return ERROR(frameParameter_windowTooLarge); /* avoids issue with 1 << windowLog */ + windowSize = (1U << windowLog); + windowSize += (windowSize >> 3) * (wlByte&7); + } + + switch(dictIDSizeCode) + { + default: /* impossible */ + case 0 : break; + case 1 : dictID = ip[pos]; pos++; break; + case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break; + case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break; + } + switch(fcsID) + { + default: /* impossible */ + case 0 : if (singleSegment) frameContentSize = ip[pos]; break; + case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break; + case 2 : frameContentSize = MEM_readLE32(ip+pos); break; + case 3 : frameContentSize = MEM_readLE64(ip+pos); break; + } + if (!windowSize) windowSize = (U32)frameContentSize; + if (windowSize > windowSizeMax) return ERROR(frameParameter_windowTooLarge); + fparamsPtr->frameContentSize = frameContentSize; + fparamsPtr->windowSize = windowSize; + fparamsPtr->dictID = dictID; + fparamsPtr->checksumFlag = checksumFlag; + } + return 0; +} + +/** ZSTD_getFrameContentSize() : +* compatible with legacy mode +* @return : decompressed size of the single frame pointed to be `src` if known, otherwise +* - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined +* - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */ +unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize) +{ + { + ZSTD_frameParams fParams; + if (ZSTD_getFrameParams(&fParams, src, srcSize) != 0) return ZSTD_CONTENTSIZE_ERROR; + if (fParams.windowSize == 0) { + /* Either skippable or empty frame, size == 0 either way */ + return 0; + } else if (fParams.frameContentSize != 0) { + return fParams.frameContentSize; + } else { + return ZSTD_CONTENTSIZE_UNKNOWN; + } + } +} + +/** ZSTD_findDecompressedSize() : + * compatible with legacy mode + * `srcSize` must be the exact length of some number of ZSTD compressed and/or + * skippable frames + * @return : decompressed size of the frames contained */ +unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize) +{ + { + unsigned long long totalDstSize = 0; + while (srcSize >= ZSTD_frameHeaderSize_prefix) { + const U32 magicNumber = MEM_readLE32(src); + + if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { + size_t skippableSize; + if (srcSize < ZSTD_skippableHeaderSize) + return ERROR(srcSize_wrong); + skippableSize = MEM_readLE32((const BYTE *)src + 4) + + ZSTD_skippableHeaderSize; + if (srcSize < skippableSize) { + return ZSTD_CONTENTSIZE_ERROR; + } + + src = (const BYTE *)src + skippableSize; + srcSize -= skippableSize; + continue; + } + + { + unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize); + if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret; + + /* check for overflow */ + if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR; + totalDstSize += ret; + } + { + size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize); + if (ZSTD_isError(frameSrcSize)) { + return ZSTD_CONTENTSIZE_ERROR; + } + + src = (const BYTE *)src + frameSrcSize; + srcSize -= frameSrcSize; + } + } + + if (srcSize) { + return ZSTD_CONTENTSIZE_ERROR; + } + + return totalDstSize; + } +} + +/** ZSTD_decodeFrameHeader() : +* `headerSize` must be the size provided by ZSTD_frameHeaderSize(). +* @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */ +static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize) +{ + size_t const result = ZSTD_getFrameParams(&(dctx->fParams), src, headerSize); + if (ZSTD_isError(result)) return result; /* invalid header */ + if (result>0) return ERROR(srcSize_wrong); /* headerSize too small */ + if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID)) return ERROR(dictionary_wrong); + if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0); + return 0; +} + + +typedef struct +{ + blockType_e blockType; + U32 lastBlock; + U32 origSize; +} blockProperties_t; + +/*! ZSTD_getcBlockSize() : +* Provides the size of compressed block from block header `src` */ +size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr) +{ + if (srcSize < ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); + { U32 const cBlockHeader = MEM_readLE24(src); + U32 const cSize = cBlockHeader >> 3; + bpPtr->lastBlock = cBlockHeader & 1; + bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3); + bpPtr->origSize = cSize; /* only useful for RLE */ + if (bpPtr->blockType == bt_rle) return 1; + if (bpPtr->blockType == bt_reserved) return ERROR(corruption_detected); + return cSize; + } +} + + +static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall); + memcpy(dst, src, srcSize); + return srcSize; +} + + +static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize, size_t regenSize) +{ + if (srcSize != 1) return ERROR(srcSize_wrong); + if (regenSize > dstCapacity) return ERROR(dstSize_tooSmall); + memset(dst, *(const BYTE*)src, regenSize); + return regenSize; +} + +/*! ZSTD_decodeLiteralsBlock() : + @return : nb of bytes read from src (< srcSize ) */ +size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx, + const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */ +{ + if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected); + + { const BYTE* const istart = (const BYTE*) src; + symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3); + + switch(litEncType) + { + case set_repeat: + if (dctx->litEntropy==0) return ERROR(dictionary_corrupted); + /* fall-through */ + case set_compressed: + if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */ + { size_t lhSize, litSize, litCSize; + U32 singleStream=0; + U32 const lhlCode = (istart[0] >> 2) & 3; + U32 const lhc = MEM_readLE32(istart); + switch(lhlCode) + { + case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */ + /* 2 - 2 - 10 - 10 */ + singleStream = !lhlCode; + lhSize = 3; + litSize = (lhc >> 4) & 0x3FF; + litCSize = (lhc >> 14) & 0x3FF; + break; + case 2: + /* 2 - 2 - 14 - 14 */ + lhSize = 4; + litSize = (lhc >> 4) & 0x3FFF; + litCSize = lhc >> 18; + break; + case 3: + /* 2 - 2 - 18 - 18 */ + lhSize = 5; + litSize = (lhc >> 4) & 0x3FFFF; + litCSize = (lhc >> 22) + (istart[4] << 10); + break; + } + if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX) return ERROR(corruption_detected); + if (litCSize + lhSize > srcSize) return ERROR(corruption_detected); + + if (HUF_isError((litEncType==set_repeat) ? + ( singleStream ? + HUF_decompress1X_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr) : + HUF_decompress4X_usingDTable(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr) ) : + ( singleStream ? + HUF_decompress1X2_DCtx(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize) : + HUF_decompress4X_hufOnly (dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize)) )) + return ERROR(corruption_detected); + + dctx->litPtr = dctx->litBuffer; + dctx->litSize = litSize; + dctx->litEntropy = 1; + if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable; + memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); + return litCSize + lhSize; + } + + case set_basic: + { size_t litSize, lhSize; + U32 const lhlCode = ((istart[0]) >> 2) & 3; + switch(lhlCode) + { + case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ + lhSize = 1; + litSize = istart[0] >> 3; + break; + case 1: + lhSize = 2; + litSize = MEM_readLE16(istart) >> 4; + break; + case 3: + lhSize = 3; + litSize = MEM_readLE24(istart) >> 4; + break; + } + + if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */ + if (litSize+lhSize > srcSize) return ERROR(corruption_detected); + memcpy(dctx->litBuffer, istart+lhSize, litSize); + dctx->litPtr = dctx->litBuffer; + dctx->litSize = litSize; + memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH); + return lhSize+litSize; + } + /* direct reference into compressed stream */ + dctx->litPtr = istart+lhSize; + dctx->litSize = litSize; + return lhSize+litSize; + } + + case set_rle: + { U32 const lhlCode = ((istart[0]) >> 2) & 3; + size_t litSize, lhSize; + switch(lhlCode) + { + case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */ + lhSize = 1; + litSize = istart[0] >> 3; + break; + case 1: + lhSize = 2; + litSize = MEM_readLE16(istart) >> 4; + break; + case 3: + lhSize = 3; + litSize = MEM_readLE24(istart) >> 4; + if (srcSize<4) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */ + break; + } + if (litSize > ZSTD_BLOCKSIZE_ABSOLUTEMAX) return ERROR(corruption_detected); + memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH); + dctx->litPtr = dctx->litBuffer; + dctx->litSize = litSize; + return lhSize+1; + } + default: + return ERROR(corruption_detected); /* impossible */ + } + } +} + + +typedef union { + FSE_decode_t realData; + U32 alignedBy4; +} FSE_decode_t4; + +static const FSE_decode_t4 LL_defaultDTable[(1< max) return ERROR(corruption_detected); + FSE_buildDTable_rle(DTableSpace, *(const BYTE*)src); + *DTablePtr = DTableSpace; + return 1; + case set_basic : + *DTablePtr = (const FSE_DTable*)tmpPtr; + return 0; + case set_repeat: + if (!flagRepeatTable) return ERROR(corruption_detected); + return 0; + default : /* impossible */ + case set_compressed : + { U32 tableLog; + S16 norm[MaxSeq+1]; + size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize); + if (FSE_isError(headerSize)) return ERROR(corruption_detected); + if (tableLog > maxLog) return ERROR(corruption_detected); + FSE_buildDTable(DTableSpace, norm, max, tableLog); + *DTablePtr = DTableSpace; + return headerSize; + } } +} + +size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr, + const void* src, size_t srcSize) +{ + const BYTE* const istart = (const BYTE* const)src; + const BYTE* const iend = istart + srcSize; + const BYTE* ip = istart; + + /* check */ + if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong); + + /* SeqHead */ + { int nbSeq = *ip++; + if (!nbSeq) { *nbSeqPtr=0; return 1; } + if (nbSeq > 0x7F) { + if (nbSeq == 0xFF) { + if (ip+2 > iend) return ERROR(srcSize_wrong); + nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2; + } else { + if (ip >= iend) return ERROR(srcSize_wrong); + nbSeq = ((nbSeq-0x80)<<8) + *ip++; + } + } + *nbSeqPtr = nbSeq; + } + + /* FSE table descriptors */ + if (ip+4 > iend) return ERROR(srcSize_wrong); /* minimum possible size */ + { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6); + symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3); + symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3); + ip++; + + /* Build DTables */ + { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr, + LLtype, MaxLL, LLFSELog, + ip, iend-ip, LL_defaultDTable, dctx->fseEntropy); + if (ZSTD_isError(llhSize)) return ERROR(corruption_detected); + ip += llhSize; + } + { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr, + OFtype, MaxOff, OffFSELog, + ip, iend-ip, OF_defaultDTable, dctx->fseEntropy); + if (ZSTD_isError(ofhSize)) return ERROR(corruption_detected); + ip += ofhSize; + } + { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr, + MLtype, MaxML, MLFSELog, + ip, iend-ip, ML_defaultDTable, dctx->fseEntropy); + if (ZSTD_isError(mlhSize)) return ERROR(corruption_detected); + ip += mlhSize; + } + } + + return ip-istart; +} + + +typedef struct { + size_t litLength; + size_t matchLength; + size_t offset; + const BYTE* match; +} seq_t; + +typedef struct { + BIT_DStream_t DStream; + FSE_DState_t stateLL; + FSE_DState_t stateOffb; + FSE_DState_t stateML; + size_t prevOffset[ZSTD_REP_NUM]; + const BYTE* base; + size_t pos; + uPtrDiff gotoDict; +} seqState_t; + + +FORCE_NOINLINE +size_t ZSTD_execSequenceLast7(BYTE* op, + BYTE* const oend, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd) +{ + BYTE* const oLitEnd = op + sequence.litLength; + size_t const sequenceLength = sequence.litLength + sequence.matchLength; + BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ + BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; + const BYTE* const iLitEnd = *litPtr + sequence.litLength; + const BYTE* match = oLitEnd - sequence.offset; + + /* check */ + if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ + if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ + if (oLitEnd <= oend_w) return ERROR(GENERIC); /* Precondition */ + + /* copy literals */ + if (op < oend_w) { + ZSTD_wildcopy(op, *litPtr, oend_w - op); + *litPtr += oend_w - op; + op = oend_w; + } + while (op < oLitEnd) *op++ = *(*litPtr)++; + + /* copy Match */ + if (sequence.offset > (size_t)(oLitEnd - base)) { + /* offset beyond prefix */ + if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected); + match = dictEnd - (base-match); + if (match + sequence.matchLength <= dictEnd) { + memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; + } + /* span extDict & currentPrefixSegment */ + { size_t const length1 = dictEnd - match; + memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = base; + } } + while (op < oMatchEnd) *op++ = *match++; + return sequenceLength; +} + + + + +static seq_t ZSTD_decodeSequence(seqState_t* seqState) +{ + seq_t seq; + + U32 const llCode = FSE_peekSymbol(&seqState->stateLL); + U32 const mlCode = FSE_peekSymbol(&seqState->stateML); + U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */ + + U32 const llBits = LL_bits[llCode]; + U32 const mlBits = ML_bits[mlCode]; + U32 const ofBits = ofCode; + U32 const totalBits = llBits+mlBits+ofBits; + + static const U32 LL_base[MaxLL+1] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, + 0x2000, 0x4000, 0x8000, 0x10000 }; + + static const U32 ML_base[MaxML+1] = { + 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, + 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 }; + + static const U32 OF_base[MaxOff+1] = { + 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, + 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, + 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, + 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD }; + + /* sequence */ + { size_t offset; + if (!ofCode) + offset = 0; + else { + offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); + } + + if (ofCode <= 1) { + offset += (llCode==0); + if (offset) { + size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; + temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ + if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; + seqState->prevOffset[1] = seqState->prevOffset[0]; + seqState->prevOffset[0] = offset = temp; + } else { + offset = seqState->prevOffset[0]; + } + } else { + seqState->prevOffset[2] = seqState->prevOffset[1]; + seqState->prevOffset[1] = seqState->prevOffset[0]; + seqState->prevOffset[0] = offset; + } + seq.offset = offset; + } + + seq.matchLength = ML_base[mlCode] + ((mlCode>31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */ + if (MEM_32bits() && (mlBits+llBits>24)) BIT_reloadDStream(&seqState->DStream); + + seq.litLength = LL_base[llCode] + ((llCode>15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */ + if (MEM_32bits() || + (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BIT_reloadDStream(&seqState->DStream); + + /* ANS state update */ + FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ + FSE_updateState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ + FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ + + return seq; +} + + +FORCE_INLINE +size_t ZSTD_execSequence(BYTE* op, + BYTE* const oend, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd) +{ + BYTE* const oLitEnd = op + sequence.litLength; + size_t const sequenceLength = sequence.litLength + sequence.matchLength; + BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ + BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; + const BYTE* const iLitEnd = *litPtr + sequence.litLength; + const BYTE* match = oLitEnd - sequence.offset; + + /* check */ + if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ + if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ + if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd); + + /* copy Literals */ + ZSTD_copy8(op, *litPtr); + if (sequence.litLength > 8) + ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ + op = oLitEnd; + *litPtr = iLitEnd; /* update for next sequence */ + + /* copy Match */ + if (sequence.offset > (size_t)(oLitEnd - base)) { + /* offset beyond prefix */ + if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected); + match = dictEnd + (match - base); + if (match + sequence.matchLength <= dictEnd) { + memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; + } + /* span extDict & currentPrefixSegment */ + { size_t const length1 = dictEnd - match; + memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = base; + if (op > oend_w || sequence.matchLength < MINMATCH) { + U32 i; + for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i]; + return sequenceLength; + } + } } + /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */ + + /* match within prefix */ + if (sequence.offset < 8) { + /* close range match, overlap */ + static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */ + static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */ + int const sub2 = dec64table[sequence.offset]; + op[0] = match[0]; + op[1] = match[1]; + op[2] = match[2]; + op[3] = match[3]; + match += dec32table[sequence.offset]; + ZSTD_copy4(op+4, match); + match -= sub2; + } else { + ZSTD_copy8(op, match); + } + op += 8; match += 8; + + if (oMatchEnd > oend-(16-MINMATCH)) { + if (op < oend_w) { + ZSTD_wildcopy(op, match, oend_w - op); + match += oend_w - op; + op = oend_w; + } + while (op < oMatchEnd) *op++ = *match++; + } else { + ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */ + } + return sequenceLength; +} + + +static size_t ZSTD_decompressSequences( + ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize) +{ + const BYTE* ip = (const BYTE*)seqStart; + const BYTE* const iend = ip + seqSize; + BYTE* const ostart = (BYTE* const)dst; + BYTE* const oend = ostart + maxDstSize; + BYTE* op = ostart; + const BYTE* litPtr = dctx->litPtr; + const BYTE* const litEnd = litPtr + dctx->litSize; + const BYTE* const base = (const BYTE*) (dctx->base); + const BYTE* const vBase = (const BYTE*) (dctx->vBase); + const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); + int nbSeq; + + /* Build Decoding Tables */ + { size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize); + if (ZSTD_isError(seqHSize)) return seqHSize; + ip += seqHSize; + } + + /* Regen sequences */ + if (nbSeq) { + seqState_t seqState; + dctx->fseEntropy = 1; + { U32 i; for (i=0; ientropy.rep[i]; } + CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected); + FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); + FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); + FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); + + for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) { + nbSeq--; + { seq_t const sequence = ZSTD_decodeSequence(&seqState); + size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd); + if (ZSTD_isError(oneSeqSize)) return oneSeqSize; + op += oneSeqSize; + } } + + /* check if reached exact end */ + if (nbSeq) return ERROR(corruption_detected); + /* save reps for next block */ + { U32 i; for (i=0; ientropy.rep[i] = (U32)(seqState.prevOffset[i]); } + } + + /* last literal segment */ + { size_t const lastLLSize = litEnd - litPtr; + if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall); + memcpy(op, litPtr, lastLLSize); + op += lastLLSize; + } + + return op-ostart; +} + + +FORCE_INLINE seq_t ZSTD_decodeSequenceLong_generic(seqState_t* seqState, int const longOffsets) +{ + seq_t seq; + + U32 const llCode = FSE_peekSymbol(&seqState->stateLL); + U32 const mlCode = FSE_peekSymbol(&seqState->stateML); + U32 const ofCode = FSE_peekSymbol(&seqState->stateOffb); /* <= maxOff, by table construction */ + + U32 const llBits = LL_bits[llCode]; + U32 const mlBits = ML_bits[mlCode]; + U32 const ofBits = ofCode; + U32 const totalBits = llBits+mlBits+ofBits; + + static const U32 LL_base[MaxLL+1] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000, + 0x2000, 0x4000, 0x8000, 0x10000 }; + + static const U32 ML_base[MaxML+1] = { + 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, + 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803, + 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 }; + + static const U32 OF_base[MaxOff+1] = { + 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D, + 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD, + 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD, + 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD }; + + /* sequence */ + { size_t offset; + if (!ofCode) + offset = 0; + else { + if (longOffsets) { + int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN); + offset = OF_base[ofCode] + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits); + if (MEM_32bits() || extraBits) BIT_reloadDStream(&seqState->DStream); + if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits); + } else { + offset = OF_base[ofCode] + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); + } + } + + if (ofCode <= 1) { + offset += (llCode==0); + if (offset) { + size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset]; + temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */ + if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1]; + seqState->prevOffset[1] = seqState->prevOffset[0]; + seqState->prevOffset[0] = offset = temp; + } else { + offset = seqState->prevOffset[0]; + } + } else { + seqState->prevOffset[2] = seqState->prevOffset[1]; + seqState->prevOffset[1] = seqState->prevOffset[0]; + seqState->prevOffset[0] = offset; + } + seq.offset = offset; + } + + seq.matchLength = ML_base[mlCode] + ((mlCode>31) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */ + if (MEM_32bits() && (mlBits+llBits>24)) BIT_reloadDStream(&seqState->DStream); + + seq.litLength = LL_base[llCode] + ((llCode>15) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */ + if (MEM_32bits() || + (totalBits > 64 - 7 - (LLFSELog+MLFSELog+OffFSELog)) ) BIT_reloadDStream(&seqState->DStream); + + { size_t const pos = seqState->pos + seq.litLength; + seq.match = seqState->base + pos - seq.offset; /* single memory segment */ + if (seq.offset > pos) seq.match += seqState->gotoDict; /* separate memory segment */ + seqState->pos = pos + seq.matchLength; + } + + /* ANS state update */ + FSE_updateState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */ + FSE_updateState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */ + if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */ + FSE_updateState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */ + + return seq; +} + +static seq_t ZSTD_decodeSequenceLong(seqState_t* seqState, unsigned const windowSize) { + if (ZSTD_highbit32(windowSize) > STREAM_ACCUMULATOR_MIN) { + return ZSTD_decodeSequenceLong_generic(seqState, 1); + } else { + return ZSTD_decodeSequenceLong_generic(seqState, 0); + } +} + +FORCE_INLINE +size_t ZSTD_execSequenceLong(BYTE* op, + BYTE* const oend, seq_t sequence, + const BYTE** litPtr, const BYTE* const litLimit, + const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd) +{ + BYTE* const oLitEnd = op + sequence.litLength; + size_t const sequenceLength = sequence.litLength + sequence.matchLength; + BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */ + BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH; + const BYTE* const iLitEnd = *litPtr + sequence.litLength; + const BYTE* match = sequence.match; + + /* check */ +#if 1 + if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */ + if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */ + if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd); +#endif + + /* copy Literals */ + ZSTD_copy8(op, *litPtr); + if (sequence.litLength > 8) + ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */ + op = oLitEnd; + *litPtr = iLitEnd; /* update for next sequence */ + + /* copy Match */ +#if 1 + if (sequence.offset > (size_t)(oLitEnd - base)) { + /* offset beyond prefix */ + if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected); + if (match + sequence.matchLength <= dictEnd) { + memmove(oLitEnd, match, sequence.matchLength); + return sequenceLength; + } + /* span extDict & currentPrefixSegment */ + { size_t const length1 = dictEnd - match; + memmove(oLitEnd, match, length1); + op = oLitEnd + length1; + sequence.matchLength -= length1; + match = base; + if (op > oend_w || sequence.matchLength < MINMATCH) { + U32 i; + for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i]; + return sequenceLength; + } + } } + /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */ +#endif + + /* match within prefix */ + if (sequence.offset < 8) { + /* close range match, overlap */ + static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */ + static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */ + int const sub2 = dec64table[sequence.offset]; + op[0] = match[0]; + op[1] = match[1]; + op[2] = match[2]; + op[3] = match[3]; + match += dec32table[sequence.offset]; + ZSTD_copy4(op+4, match); + match -= sub2; + } else { + ZSTD_copy8(op, match); + } + op += 8; match += 8; + + if (oMatchEnd > oend-(16-MINMATCH)) { + if (op < oend_w) { + ZSTD_wildcopy(op, match, oend_w - op); + match += oend_w - op; + op = oend_w; + } + while (op < oMatchEnd) *op++ = *match++; + } else { + ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */ + } + return sequenceLength; +} + +static size_t ZSTD_decompressSequencesLong( + ZSTD_DCtx* dctx, + void* dst, size_t maxDstSize, + const void* seqStart, size_t seqSize) +{ + const BYTE* ip = (const BYTE*)seqStart; + const BYTE* const iend = ip + seqSize; + BYTE* const ostart = (BYTE* const)dst; + BYTE* const oend = ostart + maxDstSize; + BYTE* op = ostart; + const BYTE* litPtr = dctx->litPtr; + const BYTE* const litEnd = litPtr + dctx->litSize; + const BYTE* const base = (const BYTE*) (dctx->base); + const BYTE* const vBase = (const BYTE*) (dctx->vBase); + const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd); + unsigned const windowSize = dctx->fParams.windowSize; + int nbSeq; + + /* Build Decoding Tables */ + { size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, seqSize); + if (ZSTD_isError(seqHSize)) return seqHSize; + ip += seqHSize; + } + + /* Regen sequences */ + if (nbSeq) { +#define STORED_SEQS 4 +#define STOSEQ_MASK (STORED_SEQS-1) +#define ADVANCED_SEQS 4 + seq_t sequences[STORED_SEQS]; + int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS); + seqState_t seqState; + int seqNb; + dctx->fseEntropy = 1; + { U32 i; for (i=0; ientropy.rep[i]; } + seqState.base = base; + seqState.pos = (size_t)(op-base); + seqState.gotoDict = (uPtrDiff)dictEnd - (uPtrDiff)base; /* cast to avoid undefined behaviour */ + CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected); + FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr); + FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr); + FSE_initDState(&seqState.stateML, &seqState.DStream, dctx->MLTptr); + + /* prepare in advance */ + for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && seqNbentropy.rep[i] = (U32)(seqState.prevOffset[i]); } + } + + /* last literal segment */ + { size_t const lastLLSize = litEnd - litPtr; + if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall); + memcpy(op, litPtr, lastLLSize); + op += lastLLSize; + } + + return op-ostart; +} + + +static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ /* blockType == blockCompressed */ + const BYTE* ip = (const BYTE*)src; + + if (srcSize >= ZSTD_BLOCKSIZE_ABSOLUTEMAX) return ERROR(srcSize_wrong); + + /* Decode literals section */ + { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize); + if (ZSTD_isError(litCSize)) return litCSize; + ip += litCSize; + srcSize -= litCSize; + } + if (sizeof(size_t) > 4) /* do not enable prefetching on 32-bits x86, as it's performance detrimental */ + /* likely because of register pressure */ + /* if that's the correct cause, then 32-bits ARM should be affected differently */ + /* it would be good to test this on ARM real hardware, to see if prefetch version improves speed */ + if (dctx->fParams.windowSize > (1<<23)) + return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize); + return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize); +} + + +static void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst) +{ + if (dst != dctx->previousDstEnd) { /* not contiguous */ + dctx->dictEnd = dctx->previousDstEnd; + dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base)); + dctx->base = dst; + dctx->previousDstEnd = dst; + } +} + +size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize) +{ + size_t dSize; + ZSTD_checkContinuity(dctx, dst); + dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); + dctx->previousDstEnd = (char*)dst + dSize; + return dSize; +} + + +/** ZSTD_insertBlock() : + insert `src` block into `dctx` history. Useful to track uncompressed blocks. */ +size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize) +{ + ZSTD_checkContinuity(dctx, blockStart); + dctx->previousDstEnd = (const char*)blockStart + blockSize; + return blockSize; +} + + +size_t ZSTD_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length) +{ + if (length > dstCapacity) return ERROR(dstSize_tooSmall); + memset(dst, byte, length); + return length; +} + +/** ZSTD_findFrameCompressedSize() : + * compatible with legacy mode + * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame + * `srcSize` must be at least as large as the frame contained + * @return : the compressed size of the frame starting at `src` */ +size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize) +{ + if (srcSize >= ZSTD_skippableHeaderSize && + (MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { + return ZSTD_skippableHeaderSize + MEM_readLE32((const BYTE*)src + 4); + } else { + const BYTE* ip = (const BYTE*)src; + const BYTE* const ipstart = ip; + size_t remainingSize = srcSize; + ZSTD_frameParams fParams; + + size_t const headerSize = ZSTD_frameHeaderSize(ip, remainingSize); + if (ZSTD_isError(headerSize)) return headerSize; + + /* Frame Header */ + { size_t const ret = ZSTD_getFrameParams(&fParams, ip, remainingSize); + if (ZSTD_isError(ret)) return ret; + if (ret > 0) return ERROR(srcSize_wrong); + } + + ip += headerSize; + remainingSize -= headerSize; + + /* Loop on each block */ + while (1) { + blockProperties_t blockProperties; + size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); + if (ZSTD_isError(cBlockSize)) return cBlockSize; + + if (ZSTD_blockHeaderSize + cBlockSize > remainingSize) return ERROR(srcSize_wrong); + + ip += ZSTD_blockHeaderSize + cBlockSize; + remainingSize -= ZSTD_blockHeaderSize + cBlockSize; + + if (blockProperties.lastBlock) break; + } + + if (fParams.checksumFlag) { /* Frame content checksum */ + if (remainingSize < 4) return ERROR(srcSize_wrong); + ip += 4; + remainingSize -= 4; + } + + return ip - ipstart; + } +} + +/*! ZSTD_decompressFrame() : +* @dctx must be properly initialized */ +static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void** srcPtr, size_t *srcSizePtr) +{ + const BYTE* ip = (const BYTE*)(*srcPtr); + BYTE* const ostart = (BYTE* const)dst; + BYTE* const oend = ostart + dstCapacity; + BYTE* op = ostart; + size_t remainingSize = *srcSizePtr; + + /* check */ + if (remainingSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); + + /* Frame Header */ + { size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix); + if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize; + if (remainingSize < frameHeaderSize+ZSTD_blockHeaderSize) return ERROR(srcSize_wrong); + CHECK_F(ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize)); + ip += frameHeaderSize; remainingSize -= frameHeaderSize; + } + + /* Loop on each block */ + while (1) { + size_t decodedSize; + blockProperties_t blockProperties; + size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties); + if (ZSTD_isError(cBlockSize)) return cBlockSize; + + ip += ZSTD_blockHeaderSize; + remainingSize -= ZSTD_blockHeaderSize; + if (cBlockSize > remainingSize) return ERROR(srcSize_wrong); + + switch(blockProperties.blockType) + { + case bt_compressed: + decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize); + break; + case bt_raw : + decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize); + break; + case bt_rle : + decodedSize = ZSTD_generateNxBytes(op, oend-op, *ip, blockProperties.origSize); + break; + case bt_reserved : + default: + return ERROR(corruption_detected); + } + + if (ZSTD_isError(decodedSize)) return decodedSize; + if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, op, decodedSize); + op += decodedSize; + ip += cBlockSize; + remainingSize -= cBlockSize; + if (blockProperties.lastBlock) break; + } + + if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */ + U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState); + U32 checkRead; + if (remainingSize<4) return ERROR(checksum_wrong); + checkRead = MEM_readLE32(ip); + if (checkRead != checkCalc) return ERROR(checksum_wrong); + ip += 4; + remainingSize -= 4; + } + + /* Allow caller to get size read */ + *srcPtr = ip; + *srcSizePtr = remainingSize; + return op-ostart; +} + +static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict); +static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict); + +static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const void *dict, size_t dictSize, + const ZSTD_DDict* ddict) +{ + void* const dststart = dst; + + if (ddict) { + if (dict) { + /* programmer error, these two cases should be mutually exclusive */ + return ERROR(GENERIC); + } + + dict = ZSTD_DDictDictContent(ddict); + dictSize = ZSTD_DDictDictSize(ddict); + } + + while (srcSize >= ZSTD_frameHeaderSize_prefix) { + U32 magicNumber; + + magicNumber = MEM_readLE32(src); + if (magicNumber != ZSTD_MAGICNUMBER) { + if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { + size_t skippableSize; + if (srcSize < ZSTD_skippableHeaderSize) + return ERROR(srcSize_wrong); + skippableSize = MEM_readLE32((const BYTE *)src + 4) + + ZSTD_skippableHeaderSize; + if (srcSize < skippableSize) { + return ERROR(srcSize_wrong); + } + + src = (const BYTE *)src + skippableSize; + srcSize -= skippableSize; + continue; + } else { + return ERROR(prefix_unknown); + } + } + + if (ddict) { + /* we were called from ZSTD_decompress_usingDDict */ + ZSTD_refDDict(dctx, ddict); + } else { + /* this will initialize correctly with no dict if dict == NULL, so + * use this in all cases but ddict */ + CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize)); + } + ZSTD_checkContinuity(dctx, dst); + + { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity, + &src, &srcSize); + if (ZSTD_isError(res)) return res; + /* don't need to bounds check this, ZSTD_decompressFrame will have + * already */ + dst = (BYTE*)dst + res; + dstCapacity -= res; + } + } + + if (srcSize) return ERROR(srcSize_wrong); /* input not entirely consumed */ + + return (BYTE*)dst - (BYTE*)dststart; +} + +size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void *dict, size_t dictSize) +{ + return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL); +} + + +size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0); +} + + +/*-************************************** +* Advanced Streaming Decompression API +* Bufferless and synchronous +****************************************/ +size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; } + +ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) { + switch(dctx->stage) + { + default: /* should not happen */ + case ZSTDds_getFrameHeaderSize: + case ZSTDds_decodeFrameHeader: + return ZSTDnit_frameHeader; + case ZSTDds_decodeBlockHeader: + return ZSTDnit_blockHeader; + case ZSTDds_decompressBlock: + return ZSTDnit_block; + case ZSTDds_decompressLastBlock: + return ZSTDnit_lastBlock; + case ZSTDds_checkChecksum: + return ZSTDnit_checksum; + case ZSTDds_decodeSkippableHeader: + case ZSTDds_skipFrame: + return ZSTDnit_skippableFrame; + } +} + +int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; } /* for zbuff */ + +/** ZSTD_decompressContinue() : +* @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity) +* or an error code, which can be tested using ZSTD_isError() */ +size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + /* Sanity check */ + if (srcSize != dctx->expected) return ERROR(srcSize_wrong); + if (dstCapacity) ZSTD_checkContinuity(dctx, dst); + + switch (dctx->stage) + { + case ZSTDds_getFrameHeaderSize : + if (srcSize != ZSTD_frameHeaderSize_prefix) return ERROR(srcSize_wrong); /* impossible */ + if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */ + memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix); + dctx->expected = ZSTD_skippableHeaderSize - ZSTD_frameHeaderSize_prefix; /* magic number + skippable frame length */ + dctx->stage = ZSTDds_decodeSkippableHeader; + return 0; + } + dctx->headerSize = ZSTD_frameHeaderSize(src, ZSTD_frameHeaderSize_prefix); + if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize; + memcpy(dctx->headerBuffer, src, ZSTD_frameHeaderSize_prefix); + if (dctx->headerSize > ZSTD_frameHeaderSize_prefix) { + dctx->expected = dctx->headerSize - ZSTD_frameHeaderSize_prefix; + dctx->stage = ZSTDds_decodeFrameHeader; + return 0; + } + dctx->expected = 0; /* not necessary to copy more */ + + case ZSTDds_decodeFrameHeader: + memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected); + CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize)); + dctx->expected = ZSTD_blockHeaderSize; + dctx->stage = ZSTDds_decodeBlockHeader; + return 0; + + case ZSTDds_decodeBlockHeader: + { blockProperties_t bp; + size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp); + if (ZSTD_isError(cBlockSize)) return cBlockSize; + dctx->expected = cBlockSize; + dctx->bType = bp.blockType; + dctx->rleSize = bp.origSize; + if (cBlockSize) { + dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock; + return 0; + } + /* empty block */ + if (bp.lastBlock) { + if (dctx->fParams.checksumFlag) { + dctx->expected = 4; + dctx->stage = ZSTDds_checkChecksum; + } else { + dctx->expected = 0; /* end of frame */ + dctx->stage = ZSTDds_getFrameHeaderSize; + } + } else { + dctx->expected = 3; /* go directly to next header */ + dctx->stage = ZSTDds_decodeBlockHeader; + } + return 0; + } + case ZSTDds_decompressLastBlock: + case ZSTDds_decompressBlock: + { size_t rSize; + switch(dctx->bType) + { + case bt_compressed: + rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize); + break; + case bt_raw : + rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize); + break; + case bt_rle : + rSize = ZSTD_setRleBlock(dst, dstCapacity, src, srcSize, dctx->rleSize); + break; + case bt_reserved : /* should never happen */ + default: + return ERROR(corruption_detected); + } + if (ZSTD_isError(rSize)) return rSize; + if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize); + + if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */ + if (dctx->fParams.checksumFlag) { /* another round for frame checksum */ + dctx->expected = 4; + dctx->stage = ZSTDds_checkChecksum; + } else { + dctx->expected = 0; /* ends here */ + dctx->stage = ZSTDds_getFrameHeaderSize; + } + } else { + dctx->stage = ZSTDds_decodeBlockHeader; + dctx->expected = ZSTD_blockHeaderSize; + dctx->previousDstEnd = (char*)dst + rSize; + } + return rSize; + } + case ZSTDds_checkChecksum: + { U32 const h32 = (U32)XXH64_digest(&dctx->xxhState); + U32 const check32 = MEM_readLE32(src); /* srcSize == 4, guaranteed by dctx->expected */ + if (check32 != h32) return ERROR(checksum_wrong); + dctx->expected = 0; + dctx->stage = ZSTDds_getFrameHeaderSize; + return 0; + } + case ZSTDds_decodeSkippableHeader: + { memcpy(dctx->headerBuffer + ZSTD_frameHeaderSize_prefix, src, dctx->expected); + dctx->expected = MEM_readLE32(dctx->headerBuffer + 4); + dctx->stage = ZSTDds_skipFrame; + return 0; + } + case ZSTDds_skipFrame: + { dctx->expected = 0; + dctx->stage = ZSTDds_getFrameHeaderSize; + return 0; + } + default: + return ERROR(GENERIC); /* impossible */ + } +} + + +static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) +{ + dctx->dictEnd = dctx->previousDstEnd; + dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base)); + dctx->base = dict; + dctx->previousDstEnd = (const char*)dict + dictSize; + return 0; +} + +/* ZSTD_loadEntropy() : + * dict : must point at beginning of a valid zstd dictionary + * @return : size of entropy tables read */ +static size_t ZSTD_loadEntropy(ZSTD_entropyTables_t* entropy, const void* const dict, size_t const dictSize) +{ + const BYTE* dictPtr = (const BYTE*)dict; + const BYTE* const dictEnd = dictPtr + dictSize; + + if (dictSize <= 8) return ERROR(dictionary_corrupted); + dictPtr += 8; /* skip header = magic + dictID */ + + + { size_t const hSize = HUF_readDTableX4(entropy->hufTable, dictPtr, dictEnd-dictPtr); + if (HUF_isError(hSize)) return ERROR(dictionary_corrupted); + dictPtr += hSize; + } + + { short offcodeNCount[MaxOff+1]; + U32 offcodeMaxValue = MaxOff, offcodeLog; + size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted); + if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted); + CHECK_E(FSE_buildDTable(entropy->OFTable, offcodeNCount, offcodeMaxValue, offcodeLog), dictionary_corrupted); + dictPtr += offcodeHeaderSize; + } + + { short matchlengthNCount[MaxML+1]; + unsigned matchlengthMaxValue = MaxML, matchlengthLog; + size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted); + if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted); + CHECK_E(FSE_buildDTable(entropy->MLTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog), dictionary_corrupted); + dictPtr += matchlengthHeaderSize; + } + + { short litlengthNCount[MaxLL+1]; + unsigned litlengthMaxValue = MaxLL, litlengthLog; + size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr); + if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted); + if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted); + CHECK_E(FSE_buildDTable(entropy->LLTable, litlengthNCount, litlengthMaxValue, litlengthLog), dictionary_corrupted); + dictPtr += litlengthHeaderSize; + } + + if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted); + { int i; + size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12)); + for (i=0; i<3; i++) { + U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4; + if (rep==0 || rep >= dictContentSize) return ERROR(dictionary_corrupted); + entropy->rep[i] = rep; + } } + + return dictPtr - (const BYTE*)dict; +} + +static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) +{ + if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize); + { U32 const magic = MEM_readLE32(dict); + if (magic != ZSTD_DICT_MAGIC) { + return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */ + } } + dctx->dictID = MEM_readLE32((const char*)dict + 4); + + /* load entropy tables */ + { size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize); + if (ZSTD_isError(eSize)) return ERROR(dictionary_corrupted); + dict = (const char*)dict + eSize; + dictSize -= eSize; + } + dctx->litEntropy = dctx->fseEntropy = 1; + + /* reference dictionary content */ + return ZSTD_refDictContent(dctx, dict, dictSize); +} + +size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize) +{ + CHECK_F(ZSTD_decompressBegin(dctx)); + if (dict && dictSize) CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted); + return 0; +} + + +/* ====== ZSTD_DDict ====== */ + +struct ZSTD_DDict_s { + void* dictBuffer; + const void* dictContent; + size_t dictSize; + ZSTD_entropyTables_t entropy; + U32 dictID; + U32 entropyPresent; + ZSTD_customMem cMem; +}; /* typedef'd to ZSTD_DDict within "zstd.h" */ + +size_t ZSTD_DDictWorkspaceBound(void) +{ + return ZSTD_ALIGN(sizeof(ZSTD_stack)) + ZSTD_ALIGN(sizeof(ZSTD_DDict)); +} + +static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict) +{ + return ddict->dictContent; +} + +static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict) +{ + return ddict->dictSize; +} + +static void ZSTD_refDDict(ZSTD_DCtx* dstDCtx, const ZSTD_DDict* ddict) +{ + ZSTD_decompressBegin(dstDCtx); /* init */ + if (ddict) { /* support refDDict on NULL */ + dstDCtx->dictID = ddict->dictID; + dstDCtx->base = ddict->dictContent; + dstDCtx->vBase = ddict->dictContent; + dstDCtx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize; + dstDCtx->previousDstEnd = dstDCtx->dictEnd; + if (ddict->entropyPresent) { + dstDCtx->litEntropy = 1; + dstDCtx->fseEntropy = 1; + dstDCtx->LLTptr = ddict->entropy.LLTable; + dstDCtx->MLTptr = ddict->entropy.MLTable; + dstDCtx->OFTptr = ddict->entropy.OFTable; + dstDCtx->HUFptr = ddict->entropy.hufTable; + dstDCtx->entropy.rep[0] = ddict->entropy.rep[0]; + dstDCtx->entropy.rep[1] = ddict->entropy.rep[1]; + dstDCtx->entropy.rep[2] = ddict->entropy.rep[2]; + } else { + dstDCtx->litEntropy = 0; + dstDCtx->fseEntropy = 0; + } + } +} + +static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict* ddict) +{ + ddict->dictID = 0; + ddict->entropyPresent = 0; + if (ddict->dictSize < 8) return 0; + { U32 const magic = MEM_readLE32(ddict->dictContent); + if (magic != ZSTD_DICT_MAGIC) return 0; /* pure content mode */ + } + ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + 4); + + /* load entropy tables */ + CHECK_E( ZSTD_loadEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize), dictionary_corrupted ); + ddict->entropyPresent = 1; + return 0; +} + + +static ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize, unsigned byReference, ZSTD_customMem customMem) +{ + if (!customMem.customAlloc || !customMem.customFree) return NULL; + + { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem); + if (!ddict) return NULL; + ddict->cMem = customMem; + + if ((byReference) || (!dict) || (!dictSize)) { + ddict->dictBuffer = NULL; + ddict->dictContent = dict; + } else { + void* const internalBuffer = ZSTD_malloc(dictSize, customMem); + if (!internalBuffer) { ZSTD_freeDDict(ddict); return NULL; } + memcpy(internalBuffer, dict, dictSize); + ddict->dictBuffer = internalBuffer; + ddict->dictContent = internalBuffer; + } + ddict->dictSize = dictSize; + ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */ + /* parse dictionary content */ + { size_t const errorCode = ZSTD_loadEntropy_inDDict(ddict); + if (ZSTD_isError(errorCode)) { + ZSTD_freeDDict(ddict); + return NULL; + } } + + return ddict; + } +} + +/*! ZSTD_initDDict() : +* Create a digested dictionary, to start decompression without startup delay. +* `dict` content is copied inside DDict. +* Consequently, `dict` can be released after `ZSTD_DDict` creation */ +ZSTD_DDict* ZSTD_initDDict(const void* dict, size_t dictSize, void* workspace, size_t workspaceSize) +{ + ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); + return ZSTD_createDDict_advanced(dict, dictSize, 1, stackMem); +} + + +size_t ZSTD_freeDDict(ZSTD_DDict* ddict) +{ + if (ddict==NULL) return 0; /* support free on NULL */ + { ZSTD_customMem const cMem = ddict->cMem; + ZSTD_free(ddict->dictBuffer, cMem); + ZSTD_free(ddict, cMem); + return 0; + } +} + +/*! ZSTD_getDictID_fromDict() : + * Provides the dictID stored within dictionary. + * if @return == 0, the dictionary is not conformant with Zstandard specification. + * It can still be loaded, but as a content-only dictionary. */ +unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize) +{ + if (dictSize < 8) return 0; + if (MEM_readLE32(dict) != ZSTD_DICT_MAGIC) return 0; + return MEM_readLE32((const char*)dict + 4); +} + +/*! ZSTD_getDictID_fromDDict() : + * Provides the dictID of the dictionary loaded into `ddict`. + * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty. + * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */ +unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict) +{ + if (ddict==NULL) return 0; + return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize); +} + +/*! ZSTD_getDictID_fromFrame() : + * Provides the dictID required to decompressed the frame stored within `src`. + * If @return == 0, the dictID could not be decoded. + * This could for one of the following reasons : + * - The frame does not require a dictionary to be decoded (most common case). + * - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information. + * Note : this use case also happens when using a non-conformant dictionary. + * - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`). + * - This is not a Zstandard frame. + * When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code. */ +unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize) +{ + ZSTD_frameParams zfp = { 0 , 0 , 0 , 0 }; + size_t const hError = ZSTD_getFrameParams(&zfp, src, srcSize); + if (ZSTD_isError(hError)) return 0; + return zfp.dictID; +} + + +/*! ZSTD_decompress_usingDDict() : +* Decompression using a pre-digested Dictionary +* Use dictionary without significant overhead. */ +size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx, + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_DDict* ddict) +{ + /* pass content and size in case legacy frames are encountered */ + return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, + NULL, 0, + ddict); +} + + +/*===================================== +* Streaming decompression +*====================================*/ + +typedef enum { zdss_init, zdss_loadHeader, + zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage; + +/* *** Resource management *** */ +struct ZSTD_DStream_s { + ZSTD_DCtx* dctx; + ZSTD_DDict* ddictLocal; + const ZSTD_DDict* ddict; + ZSTD_frameParams fParams; + ZSTD_dStreamStage stage; + char* inBuff; + size_t inBuffSize; + size_t inPos; + size_t maxWindowSize; + char* outBuff; + size_t outBuffSize; + size_t outStart; + size_t outEnd; + size_t blockSize; + BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX]; /* tmp buffer to store frame header */ + size_t lhSize; + ZSTD_customMem customMem; + void* legacyContext; + U32 previousLegacyVersion; + U32 legacyVersion; + U32 hostageByte; +}; /* typedef'd to ZSTD_DStream within "zstd.h" */ + +size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize) { + size_t const blockSize = MIN(maxWindowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX); + size_t const inBuffSize = blockSize; + size_t const outBuffSize = maxWindowSize + blockSize + WILDCOPY_OVERLENGTH * 2; + return ZSTD_DCtxWorkspaceBound() + ZSTD_ALIGN(sizeof(ZSTD_DStream)) + ZSTD_ALIGN(inBuffSize) + ZSTD_ALIGN(outBuffSize); +} + +static ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem) +{ + ZSTD_DStream* zds; + + if (!customMem.customAlloc || !customMem.customFree) return NULL; + + zds = (ZSTD_DStream*) ZSTD_malloc(sizeof(ZSTD_DStream), customMem); + if (zds==NULL) return NULL; + memset(zds, 0, sizeof(ZSTD_DStream)); + memcpy(&zds->customMem, &customMem, sizeof(ZSTD_customMem)); + zds->dctx = ZSTD_createDCtx_advanced(customMem); + if (zds->dctx == NULL) { ZSTD_freeDStream(zds); return NULL; } + zds->stage = zdss_init; + zds->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT; + return zds; +} + +ZSTD_DStream* ZSTD_initDStream(size_t maxWindowSize, void* workspace, size_t workspaceSize) +{ + ZSTD_customMem const stackMem = ZSTD_initStack(workspace, workspaceSize); + ZSTD_DStream* zds = ZSTD_createDStream_advanced(stackMem); + if (!zds) { return NULL; } + + zds->maxWindowSize = maxWindowSize; + zds->stage = zdss_loadHeader; + zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; + ZSTD_freeDDict(zds->ddictLocal); + zds->ddictLocal = NULL; + zds->ddict = zds->ddictLocal; + zds->legacyVersion = 0; + zds->hostageByte = 0; + return zds; +} + +ZSTD_DStream* ZSTD_initDStream_usingDDict(size_t maxWindowSize, const ZSTD_DDict* ddict, void* workspace, size_t workspaceSize) +{ + ZSTD_DStream* zds = ZSTD_initDStream(maxWindowSize, workspace, workspaceSize); + if (zds) { + zds->ddict = ddict; + } + return zds; +} + +size_t ZSTD_freeDStream(ZSTD_DStream* zds) +{ + if (zds==NULL) return 0; /* support free on null */ + { ZSTD_customMem const cMem = zds->customMem; + ZSTD_freeDCtx(zds->dctx); + zds->dctx = NULL; + ZSTD_freeDDict(zds->ddictLocal); + zds->ddictLocal = NULL; + ZSTD_free(zds->inBuff, cMem); + zds->inBuff = NULL; + ZSTD_free(zds->outBuff, cMem); + zds->outBuff = NULL; + ZSTD_free(zds, cMem); + return 0; + } +} + + +/* *** Initialization *** */ + +size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX + ZSTD_blockHeaderSize; } +size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; } + +size_t ZSTD_resetDStream(ZSTD_DStream* zds) +{ + zds->stage = zdss_loadHeader; + zds->lhSize = zds->inPos = zds->outStart = zds->outEnd = 0; + zds->legacyVersion = 0; + zds->hostageByte = 0; + return ZSTD_frameHeaderSize_prefix; +} + +/* ***** Decompression ***** */ + +MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize) +{ + size_t const length = MIN(dstCapacity, srcSize); + memcpy(dst, src, length); + return length; +} + + +size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input) +{ + const char* const istart = (const char*)(input->src) + input->pos; + const char* const iend = (const char*)(input->src) + input->size; + const char* ip = istart; + char* const ostart = (char*)(output->dst) + output->pos; + char* const oend = (char*)(output->dst) + output->size; + char* op = ostart; + U32 someMoreWork = 1; + + while (someMoreWork) { + switch(zds->stage) + { + case zdss_init : + ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */ + /* fall-through */ + + case zdss_loadHeader : + { size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize); + if (ZSTD_isError(hSize)) + return hSize; + if (hSize != 0) { /* need more input */ + size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */ + if (toLoad > (size_t)(iend-ip)) { /* not enough input to load full header */ + memcpy(zds->headerBuffer + zds->lhSize, ip, iend-ip); + zds->lhSize += iend-ip; + input->pos = input->size; + return (MAX(ZSTD_frameHeaderSize_min, hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */ + } + memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad; + break; + } } + + /* check for single-pass mode opportunity */ + if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */ + && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) { + size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend-istart); + if (cSize <= (size_t)(iend-istart)) { + size_t const decompressedSize = ZSTD_decompress_usingDDict(zds->dctx, op, oend-op, istart, cSize, zds->ddict); + if (ZSTD_isError(decompressedSize)) return decompressedSize; + ip = istart + cSize; + op += decompressedSize; + zds->dctx->expected = 0; + zds->stage = zdss_init; + someMoreWork = 0; + break; + } } + + /* Consume header */ + ZSTD_refDDict(zds->dctx, zds->ddict); + { size_t const h1Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); /* == ZSTD_frameHeaderSize_prefix */ + CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer, h1Size)); + { size_t const h2Size = ZSTD_nextSrcSizeToDecompress(zds->dctx); + CHECK_F(ZSTD_decompressContinue(zds->dctx, NULL, 0, zds->headerBuffer+h1Size, h2Size)); + } } + + zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN); + if (zds->fParams.windowSize > zds->maxWindowSize) return ERROR(frameParameter_windowTooLarge); + + /* Adapt buffer sizes to frame header instructions */ + { size_t const blockSize = MIN(zds->fParams.windowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX); + size_t const neededOutSize = zds->fParams.windowSize + blockSize + WILDCOPY_OVERLENGTH * 2; + zds->blockSize = blockSize; + if (zds->inBuffSize < blockSize) { + ZSTD_free(zds->inBuff, zds->customMem); + zds->inBuffSize = blockSize; + zds->inBuff = (char*)ZSTD_malloc(blockSize, zds->customMem); + if (zds->inBuff == NULL) return ERROR(memory_allocation); + } + if (zds->outBuffSize < neededOutSize) { + ZSTD_free(zds->outBuff, zds->customMem); + zds->outBuffSize = neededOutSize; + zds->outBuff = (char*)ZSTD_malloc(neededOutSize, zds->customMem); + if (zds->outBuff == NULL) return ERROR(memory_allocation); + } } + zds->stage = zdss_read; + /* pass-through */ + + case zdss_read: + { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); + if (neededInSize==0) { /* end of frame */ + zds->stage = zdss_init; + someMoreWork = 0; + break; + } + if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */ + const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx); + size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, + zds->outBuff + zds->outStart, (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart), + ip, neededInSize); + if (ZSTD_isError(decodedSize)) return decodedSize; + ip += neededInSize; + if (!decodedSize && !isSkipFrame) break; /* this was just a header */ + zds->outEnd = zds->outStart + decodedSize; + zds->stage = zdss_flush; + break; + } + if (ip==iend) { someMoreWork = 0; break; } /* no more input */ + zds->stage = zdss_load; + /* pass-through */ + } + + case zdss_load: + { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds->dctx); + size_t const toLoad = neededInSize - zds->inPos; /* should always be <= remaining space within inBuff */ + size_t loadedSize; + if (toLoad > zds->inBuffSize - zds->inPos) return ERROR(corruption_detected); /* should never happen */ + loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend-ip); + ip += loadedSize; + zds->inPos += loadedSize; + if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */ + + /* decode loaded input */ + { const int isSkipFrame = ZSTD_isSkipFrame(zds->dctx); + size_t const decodedSize = ZSTD_decompressContinue(zds->dctx, + zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart, + zds->inBuff, neededInSize); + if (ZSTD_isError(decodedSize)) return decodedSize; + zds->inPos = 0; /* input is consumed */ + if (!decodedSize && !isSkipFrame) { zds->stage = zdss_read; break; } /* this was just a header */ + zds->outEnd = zds->outStart + decodedSize; + zds->stage = zdss_flush; + /* pass-through */ + } } + + case zdss_flush: + { size_t const toFlushSize = zds->outEnd - zds->outStart; + size_t const flushedSize = ZSTD_limitCopy(op, oend-op, zds->outBuff + zds->outStart, toFlushSize); + op += flushedSize; + zds->outStart += flushedSize; + if (flushedSize == toFlushSize) { /* flush completed */ + zds->stage = zdss_read; + if (zds->outStart + zds->blockSize > zds->outBuffSize) + zds->outStart = zds->outEnd = 0; + break; + } + /* cannot complete flush */ + someMoreWork = 0; + break; + } + default: return ERROR(GENERIC); /* impossible */ + } } + + /* result */ + input->pos += (size_t)(ip-istart); + output->pos += (size_t)(op-ostart); + { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds->dctx); + if (!nextSrcSizeHint) { /* frame fully decoded */ + if (zds->outEnd == zds->outStart) { /* output fully flushed */ + if (zds->hostageByte) { + if (input->pos >= input->size) { zds->stage = zdss_read; return 1; } /* can't release hostage (not present) */ + input->pos++; /* release hostage */ + } + return 0; + } + if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */ + input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */ + zds->hostageByte=1; + } + return 1; + } + nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds->dctx) == ZSTDnit_block); /* preload header of next block */ + if (zds->inPos > nextSrcSizeHint) return ERROR(GENERIC); /* should never happen */ + nextSrcSizeHint -= zds->inPos; /* already loaded*/ + return nextSrcSizeHint; + } +} + +EXPORT_SYMBOL(ZSTD_DCtxWorkspaceBound); +EXPORT_SYMBOL(ZSTD_initDCtx); +EXPORT_SYMBOL(ZSTD_decompressDCtx); +EXPORT_SYMBOL(ZSTD_decompress_usingDict); + +EXPORT_SYMBOL(ZSTD_DDictWorkspaceBound); +EXPORT_SYMBOL(ZSTD_initDDict); +EXPORT_SYMBOL(ZSTD_decompress_usingDDict); + +EXPORT_SYMBOL(ZSTD_DStreamWorkspaceBound); +EXPORT_SYMBOL(ZSTD_initDStream); +EXPORT_SYMBOL(ZSTD_initDStream_usingDDict); +EXPORT_SYMBOL(ZSTD_resetDStream); +EXPORT_SYMBOL(ZSTD_decompressStream); +EXPORT_SYMBOL(ZSTD_DStreamInSize); +EXPORT_SYMBOL(ZSTD_DStreamOutSize); + +EXPORT_SYMBOL(ZSTD_findFrameCompressedSize); +EXPORT_SYMBOL(ZSTD_getFrameContentSize); +EXPORT_SYMBOL(ZSTD_findDecompressedSize); + +EXPORT_SYMBOL(ZSTD_isFrame); +EXPORT_SYMBOL(ZSTD_getDictID_fromDict); +EXPORT_SYMBOL(ZSTD_getDictID_fromDDict); +EXPORT_SYMBOL(ZSTD_getDictID_fromFrame); + +EXPORT_SYMBOL(ZSTD_getFrameParams); +EXPORT_SYMBOL(ZSTD_decompressBegin); +EXPORT_SYMBOL(ZSTD_decompressBegin_usingDict); +EXPORT_SYMBOL(ZSTD_copyDCtx); +EXPORT_SYMBOL(ZSTD_nextSrcSizeToDecompress); +EXPORT_SYMBOL(ZSTD_decompressContinue); +EXPORT_SYMBOL(ZSTD_nextInputType); + +EXPORT_SYMBOL(ZSTD_decompressBlock); +EXPORT_SYMBOL(ZSTD_insertBlock); + +MODULE_LICENSE("BSD"); +MODULE_DESCRIPTION("Zstd Decompressor"); diff --git a/contrib/linux-kernel/lib/zstd/entropy_common.c b/contrib/linux-kernel/lib/zstd/entropy_common.c new file mode 100644 index 000000000..68d880827 --- /dev/null +++ b/contrib/linux-kernel/lib/zstd/entropy_common.c @@ -0,0 +1,217 @@ +/* + Common functions of New Generation Entropy library + Copyright (C) 2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +*************************************************************************** */ + +/* ************************************* +* Dependencies +***************************************/ +#include "mem.h" +#include "error_private.h" /* ERR_*, ERROR */ +#include "fse.h" +#include "huf.h" + + +/*=== Version ===*/ +unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; } + + +/*=== Error Management ===*/ +unsigned FSE_isError(size_t code) { return ERR_isError(code); } + +unsigned HUF_isError(size_t code) { return ERR_isError(code); } + + +/*-************************************************************** +* FSE NCount encoding-decoding +****************************************************************/ +size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr, + const void* headerBuffer, size_t hbSize) +{ + const BYTE* const istart = (const BYTE*) headerBuffer; + const BYTE* const iend = istart + hbSize; + const BYTE* ip = istart; + int nbBits; + int remaining; + int threshold; + U32 bitStream; + int bitCount; + unsigned charnum = 0; + int previous0 = 0; + + if (hbSize < 4) return ERROR(srcSize_wrong); + bitStream = MEM_readLE32(ip); + nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */ + if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge); + bitStream >>= 4; + bitCount = 4; + *tableLogPtr = nbBits; + remaining = (1<1) & (charnum<=*maxSVPtr)) { + if (previous0) { + unsigned n0 = charnum; + while ((bitStream & 0xFFFF) == 0xFFFF) { + n0 += 24; + if (ip < iend-5) { + ip += 2; + bitStream = MEM_readLE32(ip) >> bitCount; + } else { + bitStream >>= 16; + bitCount += 16; + } } + while ((bitStream & 3) == 3) { + n0 += 3; + bitStream >>= 2; + bitCount += 2; + } + n0 += bitStream & 3; + bitCount += 2; + if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall); + while (charnum < n0) normalizedCounter[charnum++] = 0; + if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { + ip += bitCount>>3; + bitCount &= 7; + bitStream = MEM_readLE32(ip) >> bitCount; + } else { + bitStream >>= 2; + } } + { int const max = (2*threshold-1) - remaining; + int count; + + if ((bitStream & (threshold-1)) < (U32)max) { + count = bitStream & (threshold-1); + bitCount += nbBits-1; + } else { + count = bitStream & (2*threshold-1); + if (count >= threshold) count -= max; + bitCount += nbBits; + } + + count--; /* extra accuracy */ + remaining -= count < 0 ? -count : count; /* -1 means +1 */ + normalizedCounter[charnum++] = (short)count; + previous0 = !count; + while (remaining < threshold) { + nbBits--; + threshold >>= 1; + } + + if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) { + ip += bitCount>>3; + bitCount &= 7; + } else { + bitCount -= (int)(8 * (iend - 4 - ip)); + ip = iend - 4; + } + bitStream = MEM_readLE32(ip) >> (bitCount & 31); + } } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */ + if (remaining != 1) return ERROR(corruption_detected); + if (bitCount > 32) return ERROR(corruption_detected); + *maxSVPtr = charnum-1; + + ip += (bitCount+7)>>3; + return ip-istart; +} + + +/*! HUF_readStats() : + Read compact Huffman tree, saved by HUF_writeCTable(). + `huffWeight` is destination buffer. + `rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32. + @return : size read from `src` , or an error Code . + Note : Needed by HUF_readCTable() and HUF_readDTableX?() . +*/ +size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, + U32* nbSymbolsPtr, U32* tableLogPtr, + const void* src, size_t srcSize) +{ + U32 weightTotal; + const BYTE* ip = (const BYTE*) src; + size_t iSize; + size_t oSize; + + if (!srcSize) return ERROR(srcSize_wrong); + iSize = ip[0]; + /* memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */ + + if (iSize >= 128) { /* special header */ + oSize = iSize - 127; + iSize = ((oSize+1)/2); + if (iSize+1 > srcSize) return ERROR(srcSize_wrong); + if (oSize >= hwSize) return ERROR(corruption_detected); + ip += 1; + { U32 n; + for (n=0; n> 4; + huffWeight[n+1] = ip[n/2] & 15; + } } } + else { /* header compressed with FSE (normal case) */ + FSE_DTable fseWorkspace[FSE_DTABLE_SIZE_U32(6)]; /* 6 is max possible tableLog for HUF header (maybe even 5, to be tested) */ + if (iSize+1 > srcSize) return ERROR(srcSize_wrong); + oSize = FSE_decompress_wksp(huffWeight, hwSize-1, ip+1, iSize, fseWorkspace, 6); /* max (hwSize-1) values decoded, as last one is implied */ + if (FSE_isError(oSize)) return oSize; + } + + /* collect weight stats */ + memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32)); + weightTotal = 0; + { U32 n; for (n=0; n= HUF_TABLELOG_MAX) return ERROR(corruption_detected); + rankStats[huffWeight[n]]++; + weightTotal += (1 << huffWeight[n]) >> 1; + } } + if (weightTotal == 0) return ERROR(corruption_detected); + + /* get last non-null symbol weight (implied, total must be 2^n) */ + { U32 const tableLog = BIT_highbit32(weightTotal) + 1; + if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected); + *tableLogPtr = tableLog; + /* determine last weight */ + { U32 const total = 1 << tableLog; + U32 const rest = total - weightTotal; + U32 const verif = 1 << BIT_highbit32(rest); + U32 const lastWeight = BIT_highbit32(rest) + 1; + if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */ + huffWeight[oSize] = (BYTE)lastWeight; + rankStats[lastWeight]++; + } } + + /* check tree construction validity */ + if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */ + + /* results */ + *nbSymbolsPtr = (U32)(oSize+1); + return iSize+1; +} diff --git a/contrib/linux-kernel/lib/zstd/error_private.h b/contrib/linux-kernel/lib/zstd/error_private.h new file mode 100644 index 000000000..8cf148bc4 --- /dev/null +++ b/contrib/linux-kernel/lib/zstd/error_private.h @@ -0,0 +1,44 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +/* Note : this module is expected to remain private, do not expose it */ + +#ifndef ERROR_H_MODULE +#define ERROR_H_MODULE + +/* **************************************** +* Dependencies +******************************************/ +#include /* size_t */ +#include /* enum list */ + + +/* **************************************** +* Compiler-specific +******************************************/ +#define ERR_STATIC static __attribute__((unused)) + + +/*-**************************************** +* Customization (error_public.h) +******************************************/ +typedef ZSTD_ErrorCode ERR_enum; +#define PREFIX(name) ZSTD_error_##name + + +/*-**************************************** +* Error codes handling +******************************************/ +#define ERROR(name) ((size_t)-PREFIX(name)) + +ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); } + +ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); } + +#endif /* ERROR_H_MODULE */ diff --git a/contrib/linux-kernel/lib/zstd/fse.h b/contrib/linux-kernel/lib/zstd/fse.h new file mode 100644 index 000000000..14fa439ee --- /dev/null +++ b/contrib/linux-kernel/lib/zstd/fse.h @@ -0,0 +1,606 @@ +/* ****************************************************************** + FSE : Finite State Entropy codec + Public Prototypes declaration + Copyright (C) 2013-2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - Source repository : https://github.com/Cyan4973/FiniteStateEntropy +****************************************************************** */ +#ifndef FSE_H +#define FSE_H + + +/*-***************************************** +* Dependencies +******************************************/ +#include /* size_t, ptrdiff_t */ + + +/*-***************************************** +* FSE_PUBLIC_API : control library symbols visibility +******************************************/ +#define FSE_PUBLIC_API + +/*------ Version ------*/ +#define FSE_VERSION_MAJOR 0 +#define FSE_VERSION_MINOR 9 +#define FSE_VERSION_RELEASE 0 + +#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE +#define FSE_QUOTE(str) #str +#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str) +#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION) + +#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE) +FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */ + +/*-***************************************** +* Tool functions +******************************************/ +FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */ + +/* Error Management */ +FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */ + + +/*-***************************************** +* FSE detailed API +******************************************/ +/*! +FSE_compress() does the following: +1. count symbol occurrence from source[] into table count[] +2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog) +3. save normalized counters to memory buffer using writeNCount() +4. build encoding table 'CTable' from normalized counters +5. encode the data stream using encoding table 'CTable' + +FSE_decompress() does the following: +1. read normalized counters with readNCount() +2. build decoding table 'DTable' from normalized counters +3. decode the data stream using decoding table 'DTable' + +The following API allows targeting specific sub-functions for advanced tasks. +For example, it's possible to compress several blocks using the same 'CTable', +or to save and provide normalized distribution using external method. +*/ + +/* *** COMPRESSION *** */ +/*! FSE_optimalTableLog(): + dynamically downsize 'tableLog' when conditions are met. + It saves CPU time, by using smaller tables, while preserving or even improving compression ratio. + @return : recommended tableLog (necessarily <= 'maxTableLog') */ +FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); + +/*! FSE_normalizeCount(): + normalize counts so that sum(count[]) == Power_of_2 (2^tableLog) + 'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1). + @return : tableLog, + or an errorCode, which can be tested using FSE_isError() */ +FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog, const unsigned* count, size_t srcSize, unsigned maxSymbolValue); + +/*! FSE_NCountWriteBound(): + Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'. + Typically useful for allocation purpose. */ +FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog); + +/*! FSE_writeNCount(): + Compactly save 'normalizedCounter' into 'buffer'. + @return : size of the compressed table, + or an errorCode, which can be tested using FSE_isError(). */ +FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); + + +/*! Constructor and Destructor of FSE_CTable. + Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */ +typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */ + +/*! FSE_compress_usingCTable(): + Compress `src` using `ct` into `dst` which must be already allocated. + @return : size of compressed data (<= `dstCapacity`), + or 0 if compressed data could not fit into `dst`, + or an errorCode, which can be tested using FSE_isError() */ +FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct); + +/*! +Tutorial : +---------- +The first step is to count all symbols. FSE_count() does this job very fast. +Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells. +'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0] +maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value) +FSE_count() will return the number of occurrence of the most frequent symbol. +This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility. +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). + +The next step is to normalize the frequencies. +FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'. +It also guarantees a minimum of 1 to any Symbol with frequency >= 1. +You can use 'tableLog'==0 to mean "use default tableLog value". +If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(), +which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default"). + +The result of FSE_normalizeCount() will be saved into a table, +called 'normalizedCounter', which is a table of signed short. +'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells. +The return value is tableLog if everything proceeded as expected. +It is 0 if there is a single symbol within distribution. +If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()). + +'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount(). +'buffer' must be already allocated. +For guaranteed success, buffer size must be at least FSE_headerBound(). +The result of the function is the number of bytes written into 'buffer'. +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small). + +'normalizedCounter' can then be used to create the compression table 'CTable'. +The space required by 'CTable' must be already allocated, using FSE_createCTable(). +You can then use FSE_buildCTable() to fill 'CTable'. +If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()). + +'CTable' can then be used to compress 'src', with FSE_compress_usingCTable(). +Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize' +The function returns the size of compressed data (without header), necessarily <= `dstCapacity`. +If it returns '0', compressed data could not fit into 'dst'. +If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()). +*/ + + +/* *** DECOMPRESSION *** */ + +/*! FSE_readNCount(): + Read compactly saved 'normalizedCounter' from 'rBuffer'. + @return : size read from 'rBuffer', + or an errorCode, which can be tested using FSE_isError(). + maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */ +FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize); + +/*! Constructor and Destructor of FSE_DTable. + Note that its size depends on 'tableLog' */ +typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */ + +/*! FSE_buildDTable(): + Builds 'dt', which must be already allocated, using FSE_createDTable(). + return : 0, or an errorCode, which can be tested using FSE_isError() */ +FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog); + +/*! FSE_decompress_usingDTable(): + Decompress compressed source `cSrc` of size `cSrcSize` using `dt` + into `dst` which must be already allocated. + @return : size of regenerated data (necessarily <= `dstCapacity`), + or an errorCode, which can be tested using FSE_isError() */ +FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt); + +/*! +Tutorial : +---------- +(Note : these functions only decompress FSE-compressed blocks. + If block is uncompressed, use memcpy() instead + If block is a single repeated byte, use memset() instead ) + +The first step is to obtain the normalized frequencies of symbols. +This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount(). +'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short. +In practice, that means it's necessary to know 'maxSymbolValue' beforehand, +or size the table to handle worst case situations (typically 256). +FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'. +The result of FSE_readNCount() is the number of bytes read from 'rBuffer'. +Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that. +If there is an error, the function will return an error code, which can be tested using FSE_isError(). + +The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'. +This is performed by the function FSE_buildDTable(). +The space required by 'FSE_DTable' must be already allocated using FSE_createDTable(). +If there is an error, the function will return an error code, which can be tested using FSE_isError(). + +`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable(). +`cSrcSize` must be strictly correct, otherwise decompression will fail. +FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`). +If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small) +*/ + + +/* *** Dependency *** */ +#include "bitstream.h" + + +/* ***************************************** +* Static allocation +*******************************************/ +/* FSE buffer bounds */ +#define FSE_NCOUNTBOUND 512 +#define FSE_BLOCKBOUND(size) (size + (size>>7)) +#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ + +/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */ +#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2)) +#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<= `1024` unsigned + */ +size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize, unsigned* workSpace); + +/* FSE_countFast_wksp() : + * Same as FSE_countFast(), but using an externally provided scratch buffer. + * `workSpace` must be a table of minimum `1024` unsigned + */ +size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* workSpace); + +/*! FSE_count_simple + * Same as FSE_countFast(), but does not use any additional memory (not even on stack). + * This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`). +*/ +size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize); + + + +unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus); +/**< same as FSE_optimalTableLog(), which used `minus==2` */ + +/* FSE_compress_wksp() : + * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`). + * FSE_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable. + */ +#define FSE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) ) +size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); + +size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits); +/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */ + +size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue); +/**< build a fake FSE_CTable, designed to compress always the same symbolValue */ + +/* FSE_buildCTable_wksp() : + * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). + * `wkspSize` must be >= `(1<= BIT_DStream_completed + +When it's done, verify decompression is fully completed, by checking both DStream and the relevant states. +Checking if DStream has reached its end is performed by : + BIT_endOfDStream(&DStream); +Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible. + FSE_endOfDState(&DState); +*/ + + +/* ***************************************** +* FSE unsafe API +*******************************************/ +static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD); +/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */ + + +/* ***************************************** +* Implementation of inlined functions +*******************************************/ +typedef struct { + int deltaFindState; + U32 deltaNbBits; +} FSE_symbolCompressionTransform; /* total 8 bytes */ + +MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct) +{ + const void* ptr = ct; + const U16* u16ptr = (const U16*) ptr; + const U32 tableLog = MEM_read16(ptr); + statePtr->value = (ptrdiff_t)1<stateTable = u16ptr+2; + statePtr->symbolTT = ((const U32*)ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1)); + statePtr->stateLog = tableLog; +} + + +/*! FSE_initCState2() : +* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read) +* uses the smallest state value possible, saving the cost of this symbol */ +MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol) +{ + FSE_initCState(statePtr, ct); + { const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; + const U16* stateTable = (const U16*)(statePtr->stateTable); + U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16); + statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits; + statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; + } +} + +MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, U32 symbol) +{ + const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol]; + const U16* const stateTable = (const U16*)(statePtr->stateTable); + U32 nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16); + BIT_addBits(bitC, statePtr->value, nbBitsOut); + statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState]; +} + +MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr) +{ + BIT_addBits(bitC, statePtr->value, statePtr->stateLog); + BIT_flushBits(bitC); +} + + +/* ====== Decompression ====== */ + +typedef struct { + U16 tableLog; + U16 fastMode; +} FSE_DTableHeader; /* sizeof U32 */ + +typedef struct +{ + unsigned short newState; + unsigned char symbol; + unsigned char nbBits; +} FSE_decode_t; /* size == U32 */ + +MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt) +{ + const void* ptr = dt; + const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr; + DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog); + BIT_reloadDStream(bitD); + DStatePtr->table = dt + 1; +} + +MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr) +{ + FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + return DInfo.symbol; +} + +MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) +{ + FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + U32 const nbBits = DInfo.nbBits; + size_t const lowBits = BIT_readBits(bitD, nbBits); + DStatePtr->state = DInfo.newState + lowBits; +} + +MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) +{ + FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + U32 const nbBits = DInfo.nbBits; + BYTE const symbol = DInfo.symbol; + size_t const lowBits = BIT_readBits(bitD, nbBits); + + DStatePtr->state = DInfo.newState + lowBits; + return symbol; +} + +/*! FSE_decodeSymbolFast() : + unsafe, only works if no symbol has a probability > 50% */ +MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD) +{ + FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state]; + U32 const nbBits = DInfo.nbBits; + BYTE const symbol = DInfo.symbol; + size_t const lowBits = BIT_readBitsFast(bitD, nbBits); + + DStatePtr->state = DInfo.newState + lowBits; + return symbol; +} + +MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr) +{ + return DStatePtr->state == 0; +} + + + +#ifndef FSE_COMMONDEFS_ONLY + +/* ************************************************************** +* Tuning parameters +****************************************************************/ +/*!MEMORY_USAGE : +* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) +* Increasing memory usage improves compression ratio +* Reduced memory usage can improve speed, due to cache effect +* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ +#ifndef FSE_MAX_MEMORY_USAGE +# define FSE_MAX_MEMORY_USAGE 14 +#endif +#ifndef FSE_DEFAULT_MEMORY_USAGE +# define FSE_DEFAULT_MEMORY_USAGE 13 +#endif + +/*!FSE_MAX_SYMBOL_VALUE : +* Maximum symbol value authorized. +* Required for proper stack allocation */ +#ifndef FSE_MAX_SYMBOL_VALUE +# define FSE_MAX_SYMBOL_VALUE 255 +#endif + +/* ************************************************************** +* template functions type & suffix +****************************************************************/ +#define FSE_FUNCTION_TYPE BYTE +#define FSE_FUNCTION_EXTENSION +#define FSE_DECODE_TYPE FSE_decode_t + + +#endif /* !FSE_COMMONDEFS_ONLY */ + + +/* *************************************************************** +* Constants +*****************************************************************/ +#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2) +#define FSE_MAX_TABLESIZE (1U< FSE_TABLELOG_ABSOLUTE_MAX +# error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported" +#endif + +#define FSE_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3) + + +#endif /* FSE_H */ diff --git a/contrib/linux-kernel/lib/zstd/fse_compress.c b/contrib/linux-kernel/lib/zstd/fse_compress.c new file mode 100644 index 000000000..b6a6d4693 --- /dev/null +++ b/contrib/linux-kernel/lib/zstd/fse_compress.c @@ -0,0 +1,788 @@ +/* ****************************************************************** + FSE : Finite State Entropy encoder + Copyright (C) 2013-2015, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ + +/* ************************************************************** +* Compiler specifics +****************************************************************/ +#define FORCE_INLINE static __always_inline + + +/* ************************************************************** +* Includes +****************************************************************/ +#include +#include /* memcpy, memset */ +#include "bitstream.h" +#include "fse.h" + + +/* ************************************************************** +* Error Management +****************************************************************/ +#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ + + +/* ************************************************************** +* Templates +****************************************************************/ +/* + designed to be included + for type-specific functions (template emulation in C) + Objective is to write these functions only once, for improved maintenance +*/ + +/* safety checks */ +#ifndef FSE_FUNCTION_EXTENSION +# error "FSE_FUNCTION_EXTENSION must be defined" +#endif +#ifndef FSE_FUNCTION_TYPE +# error "FSE_FUNCTION_TYPE must be defined" +#endif + +/* Function names */ +#define FSE_CAT(X,Y) X##Y +#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) +#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) + + +/* Function templates */ + +/* FSE_buildCTable_wksp() : + * Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`). + * wkspSize should be sized to handle worst case situation, which is `1<>1 : 1) ; + FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); + U32 const step = FSE_TABLESTEP(tableSize); + U32 cumul[FSE_MAX_SYMBOL_VALUE+2]; + + FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)workSpace; + U32 highThreshold = tableSize-1; + + /* CTable header */ + if (((size_t)1 << tableLog) * sizeof(FSE_FUNCTION_TYPE) > wkspSize) return ERROR(tableLog_tooLarge); + tableU16[-2] = (U16) tableLog; + tableU16[-1] = (U16) maxSymbolValue; + + /* For explanations on how to distribute symbol values over the table : + * http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */ + + /* symbol start positions */ + { U32 u; + cumul[0] = 0; + for (u=1; u<=maxSymbolValue+1; u++) { + if (normalizedCounter[u-1]==-1) { /* Low proba symbol */ + cumul[u] = cumul[u-1] + 1; + tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1); + } else { + cumul[u] = cumul[u-1] + normalizedCounter[u-1]; + } } + cumul[maxSymbolValue+1] = tableSize+1; + } + + /* Spread symbols */ + { U32 position = 0; + U32 symbol; + for (symbol=0; symbol<=maxSymbolValue; symbol++) { + int nbOccurences; + for (nbOccurences=0; nbOccurences highThreshold) position = (position + step) & tableMask; /* Low proba area */ + } } + + if (position!=0) return ERROR(GENERIC); /* Must have gone through all positions */ + } + + /* Build table */ + { U32 u; for (u=0; u> 3) + 3; + return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */ +} + +static size_t FSE_writeNCount_generic (void* header, size_t headerBufferSize, + const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, + unsigned writeIsSafe) +{ + BYTE* const ostart = (BYTE*) header; + BYTE* out = ostart; + BYTE* const oend = ostart + headerBufferSize; + int nbBits; + const int tableSize = 1 << tableLog; + int remaining; + int threshold; + U32 bitStream; + int bitCount; + unsigned charnum = 0; + int previous0 = 0; + + bitStream = 0; + bitCount = 0; + /* Table Size */ + bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount; + bitCount += 4; + + /* Init */ + remaining = tableSize+1; /* +1 for extra accuracy */ + threshold = tableSize; + nbBits = tableLog+1; + + while (remaining>1) { /* stops at 1 */ + if (previous0) { + unsigned start = charnum; + while (!normalizedCounter[charnum]) charnum++; + while (charnum >= start+24) { + start+=24; + bitStream += 0xFFFFU << bitCount; + if ((!writeIsSafe) && (out > oend-2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ + out[0] = (BYTE) bitStream; + out[1] = (BYTE)(bitStream>>8); + out+=2; + bitStream>>=16; + } + while (charnum >= start+3) { + start+=3; + bitStream += 3 << bitCount; + bitCount += 2; + } + bitStream += (charnum-start) << bitCount; + bitCount += 2; + if (bitCount>16) { + if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ + out[0] = (BYTE)bitStream; + out[1] = (BYTE)(bitStream>>8); + out += 2; + bitStream >>= 16; + bitCount -= 16; + } } + { int count = normalizedCounter[charnum++]; + int const max = (2*threshold-1)-remaining; + remaining -= count < 0 ? -count : count; + count++; /* +1 for extra accuracy */ + if (count>=threshold) count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */ + bitStream += count << bitCount; + bitCount += nbBits; + bitCount -= (count>=1; + } + if (bitCount>16) { + if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ + out[0] = (BYTE)bitStream; + out[1] = (BYTE)(bitStream>>8); + out += 2; + bitStream >>= 16; + bitCount -= 16; + } } + + /* flush remaining bitStream */ + if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */ + out[0] = (BYTE)bitStream; + out[1] = (BYTE)(bitStream>>8); + out+= (bitCount+7) /8; + + if (charnum > maxSymbolValue + 1) return ERROR(GENERIC); + + return (out-ostart); +} + + +size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) +{ + if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */ + if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */ + + if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog)) + return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0); + + return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1); +} + + + +/*-************************************************************** +* Counting histogram +****************************************************************/ +/*! FSE_count_simple + This function counts byte values within `src`, and store the histogram into table `count`. + It doesn't use any additional memory. + But this function is unsafe : it doesn't check that all values within `src` can fit into `count`. + For this reason, prefer using a table `count` with 256 elements. + @return : count of most numerous element +*/ +size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, + const void* src, size_t srcSize) +{ + const BYTE* ip = (const BYTE*)src; + const BYTE* const end = ip + srcSize; + unsigned maxSymbolValue = *maxSymbolValuePtr; + unsigned max=0; + + memset(count, 0, (maxSymbolValue+1)*sizeof(*count)); + if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; } + + while (ip max) max = count[s]; } + + return (size_t)max; +} + + +/* FSE_count_parallel_wksp() : + * Same as FSE_count_parallel(), but using an externally provided scratch buffer. + * `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`` */ +static size_t FSE_count_parallel_wksp( + unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize, + unsigned checkMax, unsigned* const workSpace) +{ + const BYTE* ip = (const BYTE*)source; + const BYTE* const iend = ip+sourceSize; + unsigned maxSymbolValue = *maxSymbolValuePtr; + unsigned max=0; + U32* const Counting1 = workSpace; + U32* const Counting2 = Counting1 + 256; + U32* const Counting3 = Counting2 + 256; + U32* const Counting4 = Counting3 + 256; + + memset(Counting1, 0, 4*256*sizeof(unsigned)); + + /* safety checks */ + if (!sourceSize) { + memset(count, 0, maxSymbolValue + 1); + *maxSymbolValuePtr = 0; + return 0; + } + if (!maxSymbolValue) maxSymbolValue = 255; /* 0 == default */ + + /* by stripes of 16 bytes */ + { U32 cached = MEM_read32(ip); ip += 4; + while (ip < iend-15) { + U32 c = cached; cached = MEM_read32(ip); ip += 4; + Counting1[(BYTE) c ]++; + Counting2[(BYTE)(c>>8) ]++; + Counting3[(BYTE)(c>>16)]++; + Counting4[ c>>24 ]++; + c = cached; cached = MEM_read32(ip); ip += 4; + Counting1[(BYTE) c ]++; + Counting2[(BYTE)(c>>8) ]++; + Counting3[(BYTE)(c>>16)]++; + Counting4[ c>>24 ]++; + c = cached; cached = MEM_read32(ip); ip += 4; + Counting1[(BYTE) c ]++; + Counting2[(BYTE)(c>>8) ]++; + Counting3[(BYTE)(c>>16)]++; + Counting4[ c>>24 ]++; + c = cached; cached = MEM_read32(ip); ip += 4; + Counting1[(BYTE) c ]++; + Counting2[(BYTE)(c>>8) ]++; + Counting3[(BYTE)(c>>16)]++; + Counting4[ c>>24 ]++; + } + ip-=4; + } + + /* finish last symbols */ + while (ipmaxSymbolValue; s--) { + Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s]; + if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall); + } } + + { U32 s; for (s=0; s<=maxSymbolValue; s++) { + count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s]; + if (count[s] > max) max = count[s]; + } } + + while (!count[maxSymbolValue]) maxSymbolValue--; + *maxSymbolValuePtr = maxSymbolValue; + return (size_t)max; +} + +/* FSE_countFast_wksp() : + * Same as FSE_countFast(), but using an externally provided scratch buffer. + * `workSpace` size must be table of >= `1024` unsigned */ +size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize, unsigned* workSpace) +{ + if (sourceSize < 1500) return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize); + return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace); +} + +/* FSE_count_wksp() : + * Same as FSE_count(), but using an externally provided scratch buffer. + * `workSpace` size must be table of >= `1024` unsigned */ +size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr, + const void* source, size_t sourceSize, unsigned* workSpace) +{ + if (*maxSymbolValuePtr < 255) + return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace); + *maxSymbolValuePtr = 255; + return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace); +} + + +/*-************************************************************** +* FSE Compression Code +****************************************************************/ +/*! FSE_sizeof_CTable() : + FSE_CTable is a variable size structure which contains : + `U16 tableLog;` + `U16 maxSymbolValue;` + `U16 nextStateNumber[1 << tableLog];` // This size is variable + `FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];` // This size is variable +Allocation is manual (C standard does not support variable-size structures). +*/ +size_t FSE_sizeof_CTable (unsigned maxSymbolValue, unsigned tableLog) +{ + if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); + return FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32); +} + +/* provides the minimum logSize to safely represent a distribution */ +static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue) +{ + U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1; + U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2; + U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols; + return minBits; +} + +unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus) +{ + U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus; + U32 tableLog = maxTableLog; + U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue); + if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG; + if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */ + if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */ + if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG; + if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG; + return tableLog; +} + +unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) +{ + return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2); +} + + +/* Secondary normalization method. + To be used when primary method fails. */ + +static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue) +{ + short const NOT_YET_ASSIGNED = -2; + U32 s; + U32 distributed = 0; + U32 ToDistribute; + + /* Init */ + U32 const lowThreshold = (U32)(total >> tableLog); + U32 lowOne = (U32)((total * 3) >> (tableLog + 1)); + + for (s=0; s<=maxSymbolValue; s++) { + if (count[s] == 0) { + norm[s]=0; + continue; + } + if (count[s] <= lowThreshold) { + norm[s] = -1; + distributed++; + total -= count[s]; + continue; + } + if (count[s] <= lowOne) { + norm[s] = 1; + distributed++; + total -= count[s]; + continue; + } + + norm[s]=NOT_YET_ASSIGNED; + } + ToDistribute = (1 << tableLog) - distributed; + + if ((total / ToDistribute) > lowOne) { + /* risk of rounding to zero */ + lowOne = (U32)((total * 3) / (ToDistribute * 2)); + for (s=0; s<=maxSymbolValue; s++) { + if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) { + norm[s] = 1; + distributed++; + total -= count[s]; + continue; + } } + ToDistribute = (1 << tableLog) - distributed; + } + + if (distributed == maxSymbolValue+1) { + /* all values are pretty poor; + probably incompressible data (should have already been detected); + find max, then give all remaining points to max */ + U32 maxV = 0, maxC = 0; + for (s=0; s<=maxSymbolValue; s++) + if (count[s] > maxC) maxV=s, maxC=count[s]; + norm[maxV] += (short)ToDistribute; + return 0; + } + + if (total == 0) { + /* all of the symbols were low enough for the lowOne or lowThreshold */ + for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1)) + if (norm[s] > 0) ToDistribute--, norm[s]++; + return 0; + } + + { U64 const vStepLog = 62 - tableLog; + U64 const mid = (1ULL << (vStepLog-1)) - 1; + U64 const rStep = ((((U64)1<> vStepLog); + U32 const sEnd = (U32)(end >> vStepLog); + U32 const weight = sEnd - sStart; + if (weight < 1) + return ERROR(GENERIC); + norm[s] = (short)weight; + tmpTotal = end; + } } } + + return 0; +} + + +size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog, + const unsigned* count, size_t total, + unsigned maxSymbolValue) +{ + /* Sanity checks */ + if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG; + if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported size */ + if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */ + if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */ + + { U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 }; + U64 const scale = 62 - tableLog; + U64 const step = ((U64)1<<62) / total; /* <== here, one division ! */ + U64 const vStep = 1ULL<<(scale-20); + int stillToDistribute = 1<> tableLog); + + for (s=0; s<=maxSymbolValue; s++) { + if (count[s] == total) return 0; /* rle special case */ + if (count[s] == 0) { normalizedCounter[s]=0; continue; } + if (count[s] <= lowThreshold) { + normalizedCounter[s] = -1; + stillToDistribute--; + } else { + short proba = (short)((count[s]*step) >> scale); + if (proba<8) { + U64 restToBeat = vStep * rtbTable[proba]; + proba += (count[s]*step) - ((U64)proba< restToBeat; + } + if (proba > largestP) largestP=proba, largest=s; + normalizedCounter[s] = proba; + stillToDistribute -= proba; + } } + if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) { + /* corner case, need another normalization method */ + size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue); + if (FSE_isError(errorCode)) return errorCode; + } + else normalizedCounter[largest] += (short)stillToDistribute; + } + +#if 0 + { /* Print Table (debug) */ + U32 s; + U32 nTotal = 0; + for (s=0; s<=maxSymbolValue; s++) + printf("%3i: %4i \n", s, normalizedCounter[s]); + for (s=0; s<=maxSymbolValue; s++) + nTotal += abs(normalizedCounter[s]); + if (nTotal != (1U<>1); /* assumption : tableLog >= 1 */ + FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT); + unsigned s; + + /* Sanity checks */ + if (nbBits < 1) return ERROR(GENERIC); /* min size */ + + /* header */ + tableU16[-2] = (U16) nbBits; + tableU16[-1] = (U16) maxSymbolValue; + + /* Build table */ + for (s=0; s FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) { /* test bit 2 */ + FSE_encodeSymbol(&bitC, &CState2, *--ip); + FSE_encodeSymbol(&bitC, &CState1, *--ip); + FSE_FLUSHBITS(&bitC); + } + + /* 2 or 4 encoding per loop */ + while ( ip>istart ) { + + FSE_encodeSymbol(&bitC, &CState2, *--ip); + + if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 ) /* this test must be static */ + FSE_FLUSHBITS(&bitC); + + FSE_encodeSymbol(&bitC, &CState1, *--ip); + + if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) { /* this test must be static */ + FSE_encodeSymbol(&bitC, &CState2, *--ip); + FSE_encodeSymbol(&bitC, &CState1, *--ip); + } + + FSE_FLUSHBITS(&bitC); + } + + FSE_flushCState(&bitC, &CState2); + FSE_flushCState(&bitC, &CState1); + return BIT_closeCStream(&bitC); +} + +size_t FSE_compress_usingCTable (void* dst, size_t dstSize, + const void* src, size_t srcSize, + const FSE_CTable* ct) +{ + unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize)); + + if (fast) + return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1); + else + return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0); +} + + +size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); } + +#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return f +#define CHECK_F(f) { CHECK_V_F(_var_err__, f); } + +/* FSE_compress_wksp() : + * Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`). + * `wkspSize` size must be `(1< not compressible */ + if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */ + } + + tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue); + CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) ); + + /* Write table description header */ + { CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); + op += nc_err; + } + + /* Compress */ + CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) ); + { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) ); + if (cSize == 0) return 0; /* not enough space for compressed data */ + op += cSize; + } + + /* check compressibility */ + if ( (size_t)(op-ostart) >= srcSize-1 ) return 0; + + return op-ostart; +} + + +#endif /* FSE_COMMONDEFS_ONLY */ diff --git a/contrib/linux-kernel/lib/zstd/fse_decompress.c b/contrib/linux-kernel/lib/zstd/fse_decompress.c new file mode 100644 index 000000000..2a35f1703 --- /dev/null +++ b/contrib/linux-kernel/lib/zstd/fse_decompress.c @@ -0,0 +1,292 @@ +/* ****************************************************************** + FSE : Finite State Entropy decoder + Copyright (C) 2013-2015, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ + + +/* ************************************************************** +* Compiler specifics +****************************************************************/ +#define FORCE_INLINE static __always_inline + + +/* ************************************************************** +* Includes +****************************************************************/ +#include +#include /* memcpy, memset */ +#include "bitstream.h" +#include "fse.h" + + +/* ************************************************************** +* Error Management +****************************************************************/ +#define FSE_isError ERR_isError +#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ + +/* check and forward error code */ +#define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; } + + +/* ************************************************************** +* Templates +****************************************************************/ +/* + designed to be included + for type-specific functions (template emulation in C) + Objective is to write these functions only once, for improved maintenance +*/ + +/* safety checks */ +#ifndef FSE_FUNCTION_EXTENSION +# error "FSE_FUNCTION_EXTENSION must be defined" +#endif +#ifndef FSE_FUNCTION_TYPE +# error "FSE_FUNCTION_TYPE must be defined" +#endif + +/* Function names */ +#define FSE_CAT(X,Y) X##Y +#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y) +#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y) + + +/* Function templates */ + +size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) +{ + void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */ + FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr); + U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1]; + + U32 const maxSV1 = maxSymbolValue + 1; + U32 const tableSize = 1 << tableLog; + U32 highThreshold = tableSize-1; + + /* Sanity Checks */ + if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge); + if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); + + /* Init, lay down lowprob symbols */ + { FSE_DTableHeader DTableH; + DTableH.tableLog = (U16)tableLog; + DTableH.fastMode = 1; + { S16 const largeLimit= (S16)(1 << (tableLog-1)); + U32 s; + for (s=0; s= largeLimit) DTableH.fastMode=0; + symbolNext[s] = normalizedCounter[s]; + } } } + memcpy(dt, &DTableH, sizeof(DTableH)); + } + + /* Spread symbols */ + { U32 const tableMask = tableSize-1; + U32 const step = FSE_TABLESTEP(tableSize); + U32 s, position = 0; + for (s=0; s highThreshold) position = (position + step) & tableMask; /* lowprob area */ + } } + if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */ + } + + /* Build Decoding table */ + { U32 u; + for (u=0; utableLog = 0; + DTableH->fastMode = 0; + + cell->newState = 0; + cell->symbol = symbolValue; + cell->nbBits = 0; + + return 0; +} + + +size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits) +{ + void* ptr = dt; + FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr; + void* dPtr = dt + 1; + FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr; + const unsigned tableSize = 1 << nbBits; + const unsigned tableMask = tableSize - 1; + const unsigned maxSV1 = tableMask+1; + unsigned s; + + /* Sanity checks */ + if (nbBits < 1) return ERROR(GENERIC); /* min size */ + + /* Build Decoding Table */ + DTableH->tableLog = (U16)nbBits; + DTableH->fastMode = 1; + for (s=0; s sizeof(bitD.bitContainer)*8) /* This test must be static */ + BIT_reloadDStream(&bitD); + + op[1] = FSE_GETSYMBOL(&state2); + + if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ + { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } } + + op[2] = FSE_GETSYMBOL(&state1); + + if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */ + BIT_reloadDStream(&bitD); + + op[3] = FSE_GETSYMBOL(&state2); + } + + /* tail */ + /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */ + while (1) { + if (op>(omax-2)) return ERROR(dstSize_tooSmall); + *op++ = FSE_GETSYMBOL(&state1); + if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) { + *op++ = FSE_GETSYMBOL(&state2); + break; + } + + if (op>(omax-2)) return ERROR(dstSize_tooSmall); + *op++ = FSE_GETSYMBOL(&state2); + if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) { + *op++ = FSE_GETSYMBOL(&state1); + break; + } } + + return op-ostart; +} + + +size_t FSE_decompress_usingDTable(void* dst, size_t originalSize, + const void* cSrc, size_t cSrcSize, + const FSE_DTable* dt) +{ + const void* ptr = dt; + const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr; + const U32 fastMode = DTableH->fastMode; + + /* select fast mode (static) */ + if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1); + return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0); +} + + +size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog) +{ + const BYTE* const istart = (const BYTE*)cSrc; + const BYTE* ip = istart; + short counting[FSE_MAX_SYMBOL_VALUE+1]; + unsigned tableLog; + unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE; + + /* normal FSE decoding mode */ + size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize); + if (FSE_isError(NCountLength)) return NCountLength; + //if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size; supposed to be already checked in NCountLength, only remaining case : NCountLength==cSrcSize */ + if (tableLog > maxLog) return ERROR(tableLog_tooLarge); + ip += NCountLength; + cSrcSize -= NCountLength; + + CHECK_F( FSE_buildDTable (workSpace, counting, maxSymbolValue, tableLog) ); + + return FSE_decompress_usingDTable (dst, dstCapacity, ip, cSrcSize, workSpace); /* always return, even if it is an error code */ +} + + +#endif /* FSE_COMMONDEFS_ONLY */ diff --git a/contrib/linux-kernel/lib/zstd/huf.h b/contrib/linux-kernel/lib/zstd/huf.h new file mode 100644 index 000000000..f36aded00 --- /dev/null +++ b/contrib/linux-kernel/lib/zstd/huf.h @@ -0,0 +1,203 @@ +/* ****************************************************************** + Huffman coder, part of New Generation Entropy library + header file + Copyright (C) 2013-2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - Source repository : https://github.com/Cyan4973/FiniteStateEntropy +****************************************************************** */ +#ifndef HUF_H_298734234 +#define HUF_H_298734234 + + +/* *** Dependencies *** */ +#include /* size_t */ + + +/* *** Tool functions *** */ +#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */ +size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */ + +/* Error Management */ +unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */ + + +/* *** Advanced function *** */ + +/** HUF_compress4X_wksp() : +* Same as HUF_compress2(), but uses externally allocated `workSpace`, which must be a table of >= 1024 unsigned */ +size_t HUF_compress4X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ + + + +/* *** Dependencies *** */ +#include "mem.h" /* U32 */ + + +/* *** Constants *** */ +#define HUF_TABLELOG_MAX 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */ +#define HUF_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */ +#define HUF_SYMBOLVALUE_MAX 255 + +#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */ +#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX) +# error "HUF_TABLELOG_MAX is too large !" +#endif + + +/* **************************************** +* Static allocation +******************************************/ +/* HUF buffer bounds */ +#define HUF_CTABLEBOUND 129 +#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true if incompressible pre-filtered with fast heuristic */ +#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */ + +/* static allocation of HUF's Compression Table */ +#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \ + U32 name##hb[maxSymbolValue+1]; \ + void* name##hv = &(name##hb); \ + HUF_CElt* name = (HUF_CElt*)(name##hv) /* no final ; */ + +/* static allocation of HUF's DTable */ +typedef U32 HUF_DTable; +#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog))) +#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \ + HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) } +#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \ + HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) } + +/* The workspace must have alignment at least 4 and be at least this large */ +#define HUF_WORKSPACE_SIZE (6 << 10) +#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32)) + + +/* **************************************** +* Advanced decompression functions +******************************************/ +size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */ +size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */ +size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ +size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ + + +/* **************************************** +* HUF detailed API +******************************************/ +/*! +HUF_compress() does the following: +1. count symbol occurrence from source[] into table count[] using FSE_count() +2. (optional) refine tableLog using HUF_optimalTableLog() +3. build Huffman table from count using HUF_buildCTable() +4. save Huffman table to memory buffer using HUF_writeCTable() +5. encode the data stream using HUF_compress4X_usingCTable() + +The following API allows targeting specific sub-functions for advanced tasks. +For example, it's possible to compress several blocks using the same 'CTable', +or to save and regenerate 'CTable' using external methods. +*/ +/* FSE_count() : find it within "fse.h" */ +unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue); +typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */ +size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog); +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); + +typedef enum { + HUF_repeat_none, /**< Cannot use the previous table */ + HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */ + HUF_repeat_valid /**< Can use the previous table and it is asumed to be valid */ + } HUF_repeat; +/** HUF_compress4X_repeat() : +* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. +* If it uses hufTable it does not modify hufTable or repeat. +* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. +* If preferRepeat then the old table will always be used if valid. */ +size_t HUF_compress4X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ + +/** HUF_buildCTable_wksp() : + * Same as HUF_buildCTable(), but using externally allocated scratch buffer. + * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned. + */ +size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize); + +/*! HUF_readStats() : + Read compact Huffman tree, saved by HUF_writeCTable(). + `huffWeight` is destination buffer. + @return : size read from `src` , or an error Code . + Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */ +size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats, + U32* nbSymbolsPtr, U32* tableLogPtr, + const void* src, size_t srcSize); + +/** HUF_readCTable() : +* Loading a CTable saved with HUF_writeCTable() */ +size_t HUF_readCTable (HUF_CElt* CTable, unsigned maxSymbolValue, const void* src, size_t srcSize); + + +/* +HUF_decompress() does the following: +1. select the decompression algorithm (X2, X4) based on pre-computed heuristics +2. build Huffman table from save, using HUF_readDTableXn() +3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable +*/ + +/** HUF_selectDecoder() : +* Tells which decoder is likely to decode faster, +* based on a set of pre-determined metrics. +* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 . +* Assumption : 0 < cSrcSize < dstSize <= 128 KB */ +U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize); + +size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize); +size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize); + +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); +size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); +size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); + + +/* single stream variants */ + +size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable); +/** HUF_compress1X_repeat() : +* Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none. +* If it uses hufTable it does not modify hufTable or repeat. +* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used. +* If preferRepeat then the old table will always be used if valid. */ +size_t HUF_compress1X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */ + +size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); +size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */ +size_t HUF_decompress1X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */ + +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */ +size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); +size_t HUF_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); + +#endif /* HUF_H_298734234 */ diff --git a/contrib/linux-kernel/lib/zstd/huf_compress.c b/contrib/linux-kernel/lib/zstd/huf_compress.c new file mode 100644 index 000000000..a1a1d454a --- /dev/null +++ b/contrib/linux-kernel/lib/zstd/huf_compress.c @@ -0,0 +1,644 @@ +/* ****************************************************************** + Huffman encoder, part of New Generation Entropy library + Copyright (C) 2013-2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ + + +/* ************************************************************** +* Includes +****************************************************************/ +#include /* memcpy, memset */ +#include "bitstream.h" +#include "fse.h" /* header compression */ +#include "huf.h" + + +/* ************************************************************** +* Error Management +****************************************************************/ +#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ +#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return f +#define CHECK_F(f) { CHECK_V_F(_var_err__, f); } + + +/* ************************************************************** +* Utils +****************************************************************/ +unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue) +{ + return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); +} + + +/* ******************************************************* +* HUF : Huffman block compression +*********************************************************/ +/* HUF_compressWeights() : + * Same as FSE_compress(), but dedicated to huff0's weights compression. + * The use case needs much less stack memory. + * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX. + */ +#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6 +size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize) +{ + BYTE* const ostart = (BYTE*) dst; + BYTE* op = ostart; + BYTE* const oend = ostart + dstSize; + + U32 maxSymbolValue = HUF_TABLELOG_MAX; + U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER; + + FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)]; + BYTE scratchBuffer[1< not compressible */ + } + + tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); + CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) ); + + /* Write table description header */ + { CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) ); + op += hSize; + } + + /* Compress */ + CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) ); + { CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) ); + if (cSize == 0) return 0; /* not enough space for compressed data */ + op += cSize; + } + + return op-ostart; +} + + +struct HUF_CElt_s { + U16 val; + BYTE nbBits; +}; /* typedef'd to HUF_CElt within "huf.h" */ + +/*! HUF_writeCTable() : + `CTable` : Huffman tree to save, using huf representation. + @return : size of saved CTable */ +size_t HUF_writeCTable (void* dst, size_t maxDstSize, + const HUF_CElt* CTable, U32 maxSymbolValue, U32 huffLog) +{ + BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */ + BYTE huffWeight[HUF_SYMBOLVALUE_MAX]; + BYTE* op = (BYTE*)dst; + U32 n; + + /* check conditions */ + if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); + + /* convert to weight */ + bitsToWeight[0] = 0; + for (n=1; n1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */ + op[0] = (BYTE)hSize; + return hSize+1; + } } + + /* write raw values as 4-bits (max : 15) */ + if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */ + if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */ + op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1)); + huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */ + for (n=0; n HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); + if (nbSymbols > maxSymbolValue+1) return ERROR(maxSymbolValue_tooSmall); + + /* Prepare base value per rank */ + { U32 n, nextRankStart = 0; + for (n=1; n<=tableLog; n++) { + U32 current = nextRankStart; + nextRankStart += (rankVal[n] << (n-1)); + rankVal[n] = current; + } } + + /* fill nbBits */ + { U32 n; for (n=0; nn=tableLog+1 */ + U16 valPerRank[HUF_TABLELOG_MAX+2] = {0}; + { U32 n; for (n=0; n0; n--) { /* start at n=tablelog <-> w=1 */ + valPerRank[n] = min; /* get starting value within each rank */ + min += nbPerRank[n]; + min >>= 1; + } } + /* assign value within rank, symbol order */ + { U32 n; for (n=0; n<=maxSymbolValue; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; } + } + + return readSize; +} + + +typedef struct nodeElt_s { + U32 count; + U16 parent; + BYTE byte; + BYTE nbBits; +} nodeElt; + +static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits) +{ + const U32 largestBits = huffNode[lastNonNull].nbBits; + if (largestBits <= maxNbBits) return largestBits; /* early exit : no elt > maxNbBits */ + + /* there are several too large elements (at least >= 2) */ + { int totalCost = 0; + const U32 baseCost = 1 << (largestBits - maxNbBits); + U32 n = lastNonNull; + + while (huffNode[n].nbBits > maxNbBits) { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)); + huffNode[n].nbBits = (BYTE)maxNbBits; + n --; + } /* n stops at huffNode[n].nbBits <= maxNbBits */ + while (huffNode[n].nbBits == maxNbBits) n--; /* n end at index of smallest symbol using < maxNbBits */ + + /* renorm totalCost */ + totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */ + + /* repay normalized cost */ + { U32 const noSymbol = 0xF0F0F0F0; + U32 rankLast[HUF_TABLELOG_MAX+2]; + int pos; + + /* Get pos of last (smallest) symbol per rank */ + memset(rankLast, 0xF0, sizeof(rankLast)); + { U32 currentNbBits = maxNbBits; + for (pos=n ; pos >= 0; pos--) { + if (huffNode[pos].nbBits >= currentNbBits) continue; + currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */ + rankLast[maxNbBits-currentNbBits] = pos; + } } + + while (totalCost > 0) { + U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1; + for ( ; nBitsToDecrease > 1; nBitsToDecrease--) { + U32 highPos = rankLast[nBitsToDecrease]; + U32 lowPos = rankLast[nBitsToDecrease-1]; + if (highPos == noSymbol) continue; + if (lowPos == noSymbol) break; + { U32 const highTotal = huffNode[highPos].count; + U32 const lowTotal = 2 * huffNode[lowPos].count; + if (highTotal <= lowTotal) break; + } } + /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */ + while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol)) /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */ + nBitsToDecrease ++; + totalCost -= 1 << (nBitsToDecrease-1); + if (rankLast[nBitsToDecrease-1] == noSymbol) + rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */ + huffNode[rankLast[nBitsToDecrease]].nbBits ++; + if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */ + rankLast[nBitsToDecrease] = noSymbol; + else { + rankLast[nBitsToDecrease]--; + if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease) + rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */ + } } /* while (totalCost > 0) */ + + while (totalCost < 0) { /* Sometimes, cost correction overshoot */ + if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ + while (huffNode[n].nbBits == maxNbBits) n--; + huffNode[n+1].nbBits--; + rankLast[1] = n+1; + totalCost++; + continue; + } + huffNode[ rankLast[1] + 1 ].nbBits--; + rankLast[1]++; + totalCost ++; + } } } /* there are several too large elements (at least >= 2) */ + + return maxNbBits; +} + + +typedef struct { + U32 base; + U32 current; +} rankPos; + +static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue) +{ + rankPos rank[32]; + U32 n; + + memset(rank, 0, sizeof(rank)); + for (n=0; n<=maxSymbolValue; n++) { + U32 r = BIT_highbit32(count[n] + 1); + rank[r].base ++; + } + for (n=30; n>0; n--) rank[n-1].base += rank[n].base; + for (n=0; n<32; n++) rank[n].current = rank[n].base; + for (n=0; n<=maxSymbolValue; n++) { + U32 const c = count[n]; + U32 const r = BIT_highbit32(c+1) + 1; + U32 pos = rank[r].current++; + while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) huffNode[pos]=huffNode[pos-1], pos--; + huffNode[pos].count = c; + huffNode[pos].byte = (BYTE)n; + } +} + + +/** HUF_buildCTable_wksp() : + * Same as HUF_buildCTable(), but using externally allocated scratch buffer. + * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned. + */ +#define STARTNODE (HUF_SYMBOLVALUE_MAX+1) +typedef nodeElt huffNodeTable[2*HUF_SYMBOLVALUE_MAX+1 +1]; +size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize) +{ + nodeElt* const huffNode0 = (nodeElt*)workSpace; + nodeElt* const huffNode = huffNode0+1; + U32 n, nonNullRank; + int lowS, lowN; + U16 nodeNb = STARTNODE; + U32 nodeRoot; + + /* safety checks */ + if (wkspSize < sizeof(huffNodeTable)) return ERROR(GENERIC); /* workSpace is not large enough */ + if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT; + if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(GENERIC); + memset(huffNode0, 0, sizeof(huffNodeTable)); + + /* sort, decreasing order */ + HUF_sort(huffNode, count, maxSymbolValue); + + /* init for parents */ + nonNullRank = maxSymbolValue; + while(huffNode[nonNullRank].count == 0) nonNullRank--; + lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb; + huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count; + huffNode[lowS].parent = huffNode[lowS-1].parent = nodeNb; + nodeNb++; lowS-=2; + for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30); + huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */ + + /* create parents */ + while (nodeNb <= nodeRoot) { + U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; + U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; + huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count; + huffNode[n1].parent = huffNode[n2].parent = nodeNb; + nodeNb++; + } + + /* distribute weights (unlimited tree height) */ + huffNode[nodeRoot].nbBits = 0; + for (n=nodeRoot-1; n>=STARTNODE; n--) + huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; + for (n=0; n<=nonNullRank; n++) + huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; + + /* enforce maxTableLog */ + maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits); + + /* fill result into tree (val, nbBits) */ + { U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0}; + U16 valPerRank[HUF_TABLELOG_MAX+1] = {0}; + if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */ + for (n=0; n<=nonNullRank; n++) + nbPerRank[huffNode[n].nbBits]++; + /* determine stating value per rank */ + { U16 min = 0; + for (n=maxNbBits; n>0; n--) { + valPerRank[n] = min; /* get starting value within each rank */ + min += nbPerRank[n]; + min >>= 1; + } } + for (n=0; n<=maxSymbolValue; n++) + tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */ + for (n=0; n<=maxSymbolValue; n++) + tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */ + } + + return maxNbBits; +} + +static size_t HUF_estimateCompressedSize(HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) +{ + size_t nbBits = 0; + int s; + for (s = 0; s <= (int)maxSymbolValue; ++s) { + nbBits += CTable[s].nbBits * count[s]; + } + return nbBits >> 3; +} + +static int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { + int bad = 0; + int s; + for (s = 0; s <= (int)maxSymbolValue; ++s) { + bad |= (count[s] != 0) & (CTable[s].nbBits == 0); + } + return !bad; +} + +static void HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable) +{ + BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits); +} + +size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); } + +#define HUF_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s)) + +#define HUF_FLUSHBITS_1(stream) \ + if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream) + +#define HUF_FLUSHBITS_2(stream) \ + if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream) + +size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) +{ + const BYTE* ip = (const BYTE*) src; + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = ostart + dstSize; + BYTE* op = ostart; + size_t n; + const unsigned fast = (dstSize >= HUF_BLOCKBOUND(srcSize)); + BIT_CStream_t bitC; + + /* init */ + if (dstSize < 8) return 0; /* not enough space to compress */ + { size_t const initErr = BIT_initCStream(&bitC, op, oend-op); + if (HUF_isError(initErr)) return 0; } + + n = srcSize & ~3; /* join to mod 4 */ + switch (srcSize & 3) + { + case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable); + HUF_FLUSHBITS_2(&bitC); + case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable); + HUF_FLUSHBITS_1(&bitC); + case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable); + HUF_FLUSHBITS(&bitC); + case 0 : + default: ; + } + + for (; n>0; n-=4) { /* note : n&3==0 at this stage */ + HUF_encodeSymbol(&bitC, ip[n- 1], CTable); + HUF_FLUSHBITS_1(&bitC); + HUF_encodeSymbol(&bitC, ip[n- 2], CTable); + HUF_FLUSHBITS_2(&bitC); + HUF_encodeSymbol(&bitC, ip[n- 3], CTable); + HUF_FLUSHBITS_1(&bitC); + HUF_encodeSymbol(&bitC, ip[n- 4], CTable); + HUF_FLUSHBITS(&bitC); + } + + return BIT_closeCStream(&bitC); +} + + +size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable) +{ + size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */ + const BYTE* ip = (const BYTE*) src; + const BYTE* const iend = ip + srcSize; + BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + BYTE* op = ostart; + + if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */ + if (srcSize < 12) return 0; /* no saving possible : too small input */ + op += 6; /* jumpTable */ + + { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); + if (cSize==0) return 0; + MEM_writeLE16(ostart, (U16)cSize); + op += cSize; + } + + ip += segmentSize; + { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); + if (cSize==0) return 0; + MEM_writeLE16(ostart+2, (U16)cSize); + op += cSize; + } + + ip += segmentSize; + { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) ); + if (cSize==0) return 0; + MEM_writeLE16(ostart+4, (U16)cSize); + op += cSize; + } + + ip += segmentSize; + { CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, iend-ip, CTable) ); + if (cSize==0) return 0; + op += cSize; + } + + return op-ostart; +} + + +static size_t HUF_compressCTable_internal( + BYTE* const ostart, BYTE* op, BYTE* const oend, + const void* src, size_t srcSize, + unsigned singleStream, const HUF_CElt* CTable) +{ + size_t const cSize = singleStream ? + HUF_compress1X_usingCTable(op, oend - op, src, srcSize, CTable) : + HUF_compress4X_usingCTable(op, oend - op, src, srcSize, CTable); + if (HUF_isError(cSize)) { return cSize; } + if (cSize==0) { return 0; } /* uncompressible */ + op += cSize; + /* check compressibility */ + if ((size_t)(op-ostart) >= srcSize-1) { return 0; } + return op-ostart; +} + + +/* `workSpace` must a table of at least 1024 unsigned */ +static size_t HUF_compress_internal ( + void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog, + unsigned singleStream, + void* workSpace, size_t wkspSize, + HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat) +{ + BYTE* const ostart = (BYTE*)dst; + BYTE* const oend = ostart + dstSize; + BYTE* op = ostart; + + U32* count; + size_t const countSize = sizeof(U32) * (HUF_SYMBOLVALUE_MAX + 1); + HUF_CElt* CTable; + size_t const CTableSize = sizeof(HUF_CElt) * (HUF_SYMBOLVALUE_MAX + 1); + + /* checks & inits */ + if (wkspSize < sizeof(huffNodeTable) + countSize + CTableSize) return ERROR(GENERIC); + if (!srcSize) return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */ + if (!dstSize) return 0; /* cannot fit within dst budget */ + if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */ + if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); + if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX; + if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT; + + count = (U32*)workSpace; + workSpace = (BYTE*)workSpace + countSize; + wkspSize -= countSize; + CTable = (HUF_CElt*)workSpace; + workSpace = (BYTE*)workSpace + CTableSize; + wkspSize -= CTableSize; + + /* Heuristic : If we don't need to check the validity of the old table use the old table for small inputs */ + if (preferRepeat && repeat && *repeat == HUF_repeat_valid) { + return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); + } + + /* Scan input and build symbol stats */ + { CHECK_V_F(largest, FSE_count_wksp (count, &maxSymbolValue, (const BYTE*)src, srcSize, (U32*)workSpace) ); + if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */ + if (largest <= (srcSize >> 7)+1) return 0; /* Fast heuristic : not compressible enough */ + } + + /* Check validity of previous table */ + if (repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, count, maxSymbolValue)) { + *repeat = HUF_repeat_none; + } + /* Heuristic : use existing table for small inputs */ + if (preferRepeat && repeat && *repeat != HUF_repeat_none) { + return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); + } + + /* Build Huffman Tree */ + huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); + { CHECK_V_F(maxBits, HUF_buildCTable_wksp (CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize) ); + huffLog = (U32)maxBits; + /* Zero the unused symbols so we can check it for validity */ + memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt)); + } + + /* Write table description header */ + { CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, CTable, maxSymbolValue, huffLog) ); + /* Check if using the previous table will be beneficial */ + if (repeat && *repeat != HUF_repeat_none) { + size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue); + size_t const newSize = HUF_estimateCompressedSize(CTable, count, maxSymbolValue); + if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { + return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable); + } + } + /* Use the new table */ + if (hSize + 12ul >= srcSize) { return 0; } + op += hSize; + if (repeat) { *repeat = HUF_repeat_none; } + if (oldHufTable) { memcpy(oldHufTable, CTable, CTableSize); } /* Save the new table */ + } + return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, CTable); +} + + +size_t HUF_compress1X_wksp (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog, + void* workSpace, size_t wkspSize) +{ + return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, NULL, NULL, 0); +} + +size_t HUF_compress1X_repeat (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog, + void* workSpace, size_t wkspSize, + HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat) +{ + return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, hufTable, repeat, preferRepeat); +} + +size_t HUF_compress4X_wksp (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog, + void* workSpace, size_t wkspSize) +{ + return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, NULL, NULL, 0); +} + +size_t HUF_compress4X_repeat (void* dst, size_t dstSize, + const void* src, size_t srcSize, + unsigned maxSymbolValue, unsigned huffLog, + void* workSpace, size_t wkspSize, + HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat) +{ + return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, hufTable, repeat, preferRepeat); +} diff --git a/contrib/linux-kernel/lib/zstd/huf_decompress.c b/contrib/linux-kernel/lib/zstd/huf_decompress.c new file mode 100644 index 000000000..f73223c4e --- /dev/null +++ b/contrib/linux-kernel/lib/zstd/huf_decompress.c @@ -0,0 +1,835 @@ +/* ****************************************************************** + Huffman decoder, part of New Generation Entropy library + Copyright (C) 2013-2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy + - Public forum : https://groups.google.com/forum/#!forum/lz4c +****************************************************************** */ + +/* ************************************************************** +* Compiler specifics +****************************************************************/ +#define FORCE_INLINE static __always_inline + + +/* ************************************************************** +* Dependencies +****************************************************************/ +#include +#include /* memcpy, memset */ +#include "bitstream.h" /* BIT_* */ +#include "fse.h" /* header compression */ +#include "huf.h" + + +/* ************************************************************** +* Error Management +****************************************************************/ +#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ + + +/*-***************************/ +/* generic DTableDesc */ +/*-***************************/ + +typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc; + +static DTableDesc HUF_getDTableDesc(const HUF_DTable* table) +{ + DTableDesc dtd; + memcpy(&dtd, table, sizeof(dtd)); + return dtd; +} + + +/*-***************************/ +/* single-symbol decoding */ +/*-***************************/ + +typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */ + +size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize) +{ + BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; + U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */ + U32 tableLog = 0; + U32 nbSymbols = 0; + size_t iSize; + void* const dtPtr = DTable + 1; + HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr; + + HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable)); + /* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */ + + iSize = HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize); + if (HUF_isError(iSize)) return iSize; + + /* Table header */ + { DTableDesc dtd = HUF_getDTableDesc(DTable); + if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */ + dtd.tableType = 0; + dtd.tableLog = (BYTE)tableLog; + memcpy(DTable, &dtd, sizeof(dtd)); + } + + /* Calculate starting value for each rank */ + { U32 n, nextRankStart = 0; + for (n=1; n> 1; + U32 u; + HUF_DEltX2 D; + D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w); + for (u = rankVal[w]; u < rankVal[w] + length; u++) + dt[u] = D; + rankVal[w] += length; + } } + + return iSize; +} + + +static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog) +{ + size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */ + BYTE const c = dt[val].byte; + BIT_skipBits(Dstream, dt[val].nbBits); + return c; +} + +#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \ + *ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog) + +#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \ + if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ + HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) + +#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \ + if (MEM_64bits()) \ + HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) + +FORCE_INLINE size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog) +{ + BYTE* const pStart = p; + + /* up to 4 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4)) { + HUF_DECODE_SYMBOLX2_2(p, bitDPtr); + HUF_DECODE_SYMBOLX2_1(p, bitDPtr); + HUF_DECODE_SYMBOLX2_2(p, bitDPtr); + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + } + + /* closer to the end */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd)) + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + + /* no more data to retrieve from bitstream, hence no need to reload */ + while (p < pEnd) + HUF_DECODE_SYMBOLX2_0(p, bitDPtr); + + return pEnd-pStart; +} + +static size_t HUF_decompress1X2_usingDTable_internal( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + BYTE* op = (BYTE*)dst; + BYTE* const oend = op + dstSize; + const void* dtPtr = DTable + 1; + const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; + BIT_DStream_t bitD; + DTableDesc const dtd = HUF_getDTableDesc(DTable); + U32 const dtLog = dtd.tableLog; + + { size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); + if (HUF_isError(errorCode)) return errorCode; } + + HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog); + + /* check */ + if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected); + + return dstSize; +} + +size_t HUF_decompress1X2_usingDTable( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc dtd = HUF_getDTableDesc(DTable); + if (dtd.tableType != 0) return ERROR(GENERIC); + return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); +} + +size_t HUF_decompress1X2_DCtx (HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + const BYTE* ip = (const BYTE*) cSrc; + + size_t const hSize = HUF_readDTableX2 (DCtx, cSrc, cSrcSize); + if (HUF_isError(hSize)) return hSize; + if (hSize >= cSrcSize) return ERROR(srcSize_wrong); + ip += hSize; cSrcSize -= hSize; + + return HUF_decompress1X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx); +} + + +static size_t HUF_decompress4X2_usingDTable_internal( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + /* Check */ + if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ + + { const BYTE* const istart = (const BYTE*) cSrc; + BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + const void* const dtPtr = DTable + 1; + const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr; + + /* Init */ + BIT_DStream_t bitD1; + BIT_DStream_t bitD2; + BIT_DStream_t bitD3; + BIT_DStream_t bitD4; + size_t const length1 = MEM_readLE16(istart); + size_t const length2 = MEM_readLE16(istart+2); + size_t const length3 = MEM_readLE16(istart+4); + size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); + const BYTE* const istart1 = istart + 6; /* jumpTable */ + const BYTE* const istart2 = istart1 + length1; + const BYTE* const istart3 = istart2 + length2; + const BYTE* const istart4 = istart3 + length3; + const size_t segmentSize = (dstSize+3) / 4; + BYTE* const opStart2 = ostart + segmentSize; + BYTE* const opStart3 = opStart2 + segmentSize; + BYTE* const opStart4 = opStart3 + segmentSize; + BYTE* op1 = ostart; + BYTE* op2 = opStart2; + BYTE* op3 = opStart3; + BYTE* op4 = opStart4; + U32 endSignal; + DTableDesc const dtd = HUF_getDTableDesc(DTable); + U32 const dtLog = dtd.tableLog; + + if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + { size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1); + if (HUF_isError(errorCode)) return errorCode; } + { size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2); + if (HUF_isError(errorCode)) return errorCode; } + { size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3); + if (HUF_isError(errorCode)) return errorCode; } + { size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4); + if (HUF_isError(errorCode)) return errorCode; } + + /* 16-32 symbols per loop (4-8 symbols per stream) */ + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); + for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) { + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_1(op1, &bitD1); + HUF_DECODE_SYMBOLX2_1(op2, &bitD2); + HUF_DECODE_SYMBOLX2_1(op3, &bitD3); + HUF_DECODE_SYMBOLX2_1(op4, &bitD4); + HUF_DECODE_SYMBOLX2_2(op1, &bitD1); + HUF_DECODE_SYMBOLX2_2(op2, &bitD2); + HUF_DECODE_SYMBOLX2_2(op3, &bitD3); + HUF_DECODE_SYMBOLX2_2(op4, &bitD4); + HUF_DECODE_SYMBOLX2_0(op1, &bitD1); + HUF_DECODE_SYMBOLX2_0(op2, &bitD2); + HUF_DECODE_SYMBOLX2_0(op3, &bitD3); + HUF_DECODE_SYMBOLX2_0(op4, &bitD4); + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); + } + + /* check corruption */ + if (op1 > opStart2) return ERROR(corruption_detected); + if (op2 > opStart3) return ERROR(corruption_detected); + if (op3 > opStart4) return ERROR(corruption_detected); + /* note : op4 supposed already verified within main loop */ + + /* finish bitStreams one by one */ + HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog); + HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog); + HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog); + HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog); + + /* check */ + endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); + if (!endSignal) return ERROR(corruption_detected); + + /* decoded size */ + return dstSize; + } +} + + +size_t HUF_decompress4X2_usingDTable( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc dtd = HUF_getDTableDesc(DTable); + if (dtd.tableType != 0) return ERROR(GENERIC); + return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); +} + + +size_t HUF_decompress4X2_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + const BYTE* ip = (const BYTE*) cSrc; + + size_t const hSize = HUF_readDTableX2 (dctx, cSrc, cSrcSize); + if (HUF_isError(hSize)) return hSize; + if (hSize >= cSrcSize) return ERROR(srcSize_wrong); + ip += hSize; cSrcSize -= hSize; + + return HUF_decompress4X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, dctx); +} + +/* *************************/ +/* double-symbols decoding */ +/* *************************/ +typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */ + +typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t; + +/* HUF_fillDTableX4Level2() : + * `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */ +static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed, + const U32* rankValOrigin, const int minWeight, + const sortedSymbol_t* sortedSymbols, const U32 sortedListSize, + U32 nbBitsBaseline, U16 baseSeq) +{ + HUF_DEltX4 DElt; + U32 rankVal[HUF_TABLELOG_MAX + 1]; + + /* get pre-calculated rankVal */ + memcpy(rankVal, rankValOrigin, sizeof(rankVal)); + + /* fill skipped values */ + if (minWeight>1) { + U32 i, skipSize = rankVal[minWeight]; + MEM_writeLE16(&(DElt.sequence), baseSeq); + DElt.nbBits = (BYTE)(consumed); + DElt.length = 1; + for (i = 0; i < skipSize; i++) + DTable[i] = DElt; + } + + /* fill DTable */ + { U32 s; for (s=0; s= 1 */ + + rankVal[weight] += length; + } } +} + +typedef U32 rankVal_t[HUF_TABLELOG_MAX][HUF_TABLELOG_MAX + 1]; + +static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog, + const sortedSymbol_t* sortedList, const U32 sortedListSize, + const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, + const U32 nbBitsBaseline) +{ + U32 rankVal[HUF_TABLELOG_MAX + 1]; + const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */ + const U32 minBits = nbBitsBaseline - maxWeight; + U32 s; + + memcpy(rankVal, rankValOrigin, sizeof(rankVal)); + + /* fill DTable */ + for (s=0; s= minBits) { /* enough room for a second symbol */ + U32 sortedRank; + int minWeight = nbBits + scaleLog; + if (minWeight < 1) minWeight = 1; + sortedRank = rankStart[minWeight]; + HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits, + rankValOrigin[nbBits], minWeight, + sortedList+sortedRank, sortedListSize-sortedRank, + nbBitsBaseline, symbol); + } else { + HUF_DEltX4 DElt; + MEM_writeLE16(&(DElt.sequence), symbol); + DElt.nbBits = (BYTE)(nbBits); + DElt.length = 1; + { U32 const end = start + length; + U32 u; + for (u = start; u < end; u++) DTable[u] = DElt; + } } + rankVal[weight] += length; + } +} + +size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize) +{ + BYTE weightList[HUF_SYMBOLVALUE_MAX + 1]; + sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1]; + U32 rankStats[HUF_TABLELOG_MAX + 1] = { 0 }; + U32 rankStart0[HUF_TABLELOG_MAX + 2] = { 0 }; + U32* const rankStart = rankStart0+1; + rankVal_t rankVal; + U32 tableLog, maxW, sizeOfSort, nbSymbols; + DTableDesc dtd = HUF_getDTableDesc(DTable); + U32 const maxTableLog = dtd.maxTableLog; + size_t iSize; + void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */ + HUF_DEltX4* const dt = (HUF_DEltX4*)dtPtr; + + HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */ + if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); + /* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */ + + iSize = HUF_readStats(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize); + if (HUF_isError(iSize)) return iSize; + + /* check result */ + if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */ + + /* find maxWeight */ + for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */ + + /* Get start index of each weight */ + { U32 w, nextRankStart = 0; + for (w=1; w> consumed; + } } } } + + HUF_fillDTableX4(dt, maxTableLog, + sortedSymbol, sizeOfSort, + rankStart0, rankVal, maxW, + tableLog+1); + + dtd.tableLog = (BYTE)maxTableLog; + dtd.tableType = 1; + memcpy(DTable, &dtd, sizeof(dtd)); + return iSize; +} + + +static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) +{ + size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ + memcpy(op, dt+val, 2); + BIT_skipBits(DStream, dt[val].nbBits); + return dt[val].length; +} + +static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog) +{ + size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */ + memcpy(op, dt+val, 1); + if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits); + else { + if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) { + BIT_skipBits(DStream, dt[val].nbBits); + if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8)) + DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */ + } } + return 1; +} + + +#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \ + ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) + +#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \ + if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \ + ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) + +#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \ + if (MEM_64bits()) \ + ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog) + +FORCE_INLINE size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog) +{ + BYTE* const pStart = p; + + /* up to 8 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) { + HUF_DECODE_SYMBOLX4_2(p, bitDPtr); + HUF_DECODE_SYMBOLX4_1(p, bitDPtr); + HUF_DECODE_SYMBOLX4_2(p, bitDPtr); + HUF_DECODE_SYMBOLX4_0(p, bitDPtr); + } + + /* closer to end : up to 2 symbols at a time */ + while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2)) + HUF_DECODE_SYMBOLX4_0(p, bitDPtr); + + while (p <= pEnd-2) + HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */ + + if (p < pEnd) + p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog); + + return p-pStart; +} + + +static size_t HUF_decompress1X4_usingDTable_internal( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + BIT_DStream_t bitD; + + /* Init */ + { size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize); + if (HUF_isError(errorCode)) return errorCode; + } + + /* decode */ + { BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */ + const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr; + DTableDesc const dtd = HUF_getDTableDesc(DTable); + HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog); + } + + /* check */ + if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected); + + /* decoded size */ + return dstSize; +} + +size_t HUF_decompress1X4_usingDTable( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc dtd = HUF_getDTableDesc(DTable); + if (dtd.tableType != 1) return ERROR(GENERIC); + return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); +} + +size_t HUF_decompress1X4_DCtx (HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + const BYTE* ip = (const BYTE*) cSrc; + + size_t const hSize = HUF_readDTableX4 (DCtx, cSrc, cSrcSize); + if (HUF_isError(hSize)) return hSize; + if (hSize >= cSrcSize) return ERROR(srcSize_wrong); + ip += hSize; cSrcSize -= hSize; + + return HUF_decompress1X4_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx); +} + +static size_t HUF_decompress4X4_usingDTable_internal( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */ + + { const BYTE* const istart = (const BYTE*) cSrc; + BYTE* const ostart = (BYTE*) dst; + BYTE* const oend = ostart + dstSize; + const void* const dtPtr = DTable+1; + const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr; + + /* Init */ + BIT_DStream_t bitD1; + BIT_DStream_t bitD2; + BIT_DStream_t bitD3; + BIT_DStream_t bitD4; + size_t const length1 = MEM_readLE16(istart); + size_t const length2 = MEM_readLE16(istart+2); + size_t const length3 = MEM_readLE16(istart+4); + size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6); + const BYTE* const istart1 = istart + 6; /* jumpTable */ + const BYTE* const istart2 = istart1 + length1; + const BYTE* const istart3 = istart2 + length2; + const BYTE* const istart4 = istart3 + length3; + size_t const segmentSize = (dstSize+3) / 4; + BYTE* const opStart2 = ostart + segmentSize; + BYTE* const opStart3 = opStart2 + segmentSize; + BYTE* const opStart4 = opStart3 + segmentSize; + BYTE* op1 = ostart; + BYTE* op2 = opStart2; + BYTE* op3 = opStart3; + BYTE* op4 = opStart4; + U32 endSignal; + DTableDesc const dtd = HUF_getDTableDesc(DTable); + U32 const dtLog = dtd.tableLog; + + if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */ + { size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1); + if (HUF_isError(errorCode)) return errorCode; } + { size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2); + if (HUF_isError(errorCode)) return errorCode; } + { size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3); + if (HUF_isError(errorCode)) return errorCode; } + { size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4); + if (HUF_isError(errorCode)) return errorCode; } + + /* 16-32 symbols per loop (4-8 symbols per stream) */ + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); + for ( ; (endSignal==BIT_DStream_unfinished) & (op4<(oend-(sizeof(bitD4.bitContainer)-1))) ; ) { + HUF_DECODE_SYMBOLX4_2(op1, &bitD1); + HUF_DECODE_SYMBOLX4_2(op2, &bitD2); + HUF_DECODE_SYMBOLX4_2(op3, &bitD3); + HUF_DECODE_SYMBOLX4_2(op4, &bitD4); + HUF_DECODE_SYMBOLX4_1(op1, &bitD1); + HUF_DECODE_SYMBOLX4_1(op2, &bitD2); + HUF_DECODE_SYMBOLX4_1(op3, &bitD3); + HUF_DECODE_SYMBOLX4_1(op4, &bitD4); + HUF_DECODE_SYMBOLX4_2(op1, &bitD1); + HUF_DECODE_SYMBOLX4_2(op2, &bitD2); + HUF_DECODE_SYMBOLX4_2(op3, &bitD3); + HUF_DECODE_SYMBOLX4_2(op4, &bitD4); + HUF_DECODE_SYMBOLX4_0(op1, &bitD1); + HUF_DECODE_SYMBOLX4_0(op2, &bitD2); + HUF_DECODE_SYMBOLX4_0(op3, &bitD3); + HUF_DECODE_SYMBOLX4_0(op4, &bitD4); + + endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4); + } + + /* check corruption */ + if (op1 > opStart2) return ERROR(corruption_detected); + if (op2 > opStart3) return ERROR(corruption_detected); + if (op3 > opStart4) return ERROR(corruption_detected); + /* note : op4 already verified within main loop */ + + /* finish bitStreams one by one */ + HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog); + HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog); + HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog); + HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog); + + /* check */ + { U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4); + if (!endCheck) return ERROR(corruption_detected); } + + /* decoded size */ + return dstSize; + } +} + + +size_t HUF_decompress4X4_usingDTable( + void* dst, size_t dstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc dtd = HUF_getDTableDesc(DTable); + if (dtd.tableType != 1) return ERROR(GENERIC); + return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable); +} + + +size_t HUF_decompress4X4_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + const BYTE* ip = (const BYTE*) cSrc; + + size_t hSize = HUF_readDTableX4 (dctx, cSrc, cSrcSize); + if (HUF_isError(hSize)) return hSize; + if (hSize >= cSrcSize) return ERROR(srcSize_wrong); + ip += hSize; cSrcSize -= hSize; + + return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx); +} + + +/* ********************************/ +/* Generic decompression selector */ +/* ********************************/ + +size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc const dtd = HUF_getDTableDesc(DTable); + return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) : + HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable); +} + +size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, + const void* cSrc, size_t cSrcSize, + const HUF_DTable* DTable) +{ + DTableDesc const dtd = HUF_getDTableDesc(DTable); + return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) : + HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable); +} + + +typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t; +static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] = +{ + /* single, double, quad */ + {{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */ + {{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */ + {{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */ + {{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */ + {{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */ + {{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */ + {{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */ + {{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */ + {{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */ + {{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */ + {{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */ + {{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */ + {{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */ + {{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */ + {{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */ + {{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */ +}; + +/** HUF_selectDecoder() : +* Tells which decoder is likely to decode faster, +* based on a set of pre-determined metrics. +* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 . +* Assumption : 0 < cSrcSize < dstSize <= 128 KB */ +U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize) +{ + /* decoder timing evaluation */ + U32 const Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */ + U32 const D256 = (U32)(dstSize >> 8); + U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256); + U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256); + DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, for cache eviction */ + + return DTime1 < DTime0; +} + + +typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); + +size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + /* validation checks */ + if (dstSize == 0) return ERROR(dstSize_tooSmall); + if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ + if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ + if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ + + { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); + return algoNb ? HUF_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : + HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; + } +} + +size_t HUF_decompress4X_hufOnly (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + /* validation checks */ + if (dstSize == 0) return ERROR(dstSize_tooSmall); + if ((cSrcSize >= dstSize) || (cSrcSize <= 1)) return ERROR(corruption_detected); /* invalid */ + + { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); + return algoNb ? HUF_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : + HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; + } +} + +size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize) +{ + /* validation checks */ + if (dstSize == 0) return ERROR(dstSize_tooSmall); + if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */ + if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */ + if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */ + + { U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize); + return algoNb ? HUF_decompress1X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) : + HUF_decompress1X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ; + } +} diff --git a/contrib/linux-kernel/lib/zstd/mem.h b/contrib/linux-kernel/lib/zstd/mem.h new file mode 100644 index 000000000..76cae04fa --- /dev/null +++ b/contrib/linux-kernel/lib/zstd/mem.h @@ -0,0 +1,209 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#ifndef MEM_H_MODULE +#define MEM_H_MODULE + +/*-**************************************** +* Dependencies +******************************************/ +#include +#include /* size_t, ptrdiff_t */ +#include /* memcpy */ + + +/*-**************************************** +* Compiler specifics +******************************************/ +#define MEM_STATIC static __inline __attribute__((unused)) + +/* code only tested on 32 and 64 bits systems */ +#define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; } +MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); } + + +/*-************************************************************** +* Basic Types +*****************************************************************/ +typedef uint8_t BYTE; +typedef uint16_t U16; +typedef int16_t S16; +typedef uint32_t U32; +typedef int32_t S32; +typedef uint64_t U64; +typedef int64_t S64; +typedef ptrdiff_t iPtrDiff; +typedef uintptr_t uPtrDiff; + + +/*-************************************************************** +* Memory I/O +*****************************************************************/ +MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; } +MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; } + +#if defined(__LITTLE_ENDIAN) +# define MEM_LITTLE_ENDIAN 1 +#else +# define MEM_LITTLE_ENDIAN 0 +#endif + +MEM_STATIC unsigned MEM_isLittleEndian(void) +{ + return MEM_LITTLE_ENDIAN; +} + +MEM_STATIC U16 MEM_read16(const void* memPtr) +{ + return get_unaligned((const U16*)memPtr); +} + +MEM_STATIC U32 MEM_read32(const void* memPtr) +{ + return get_unaligned((const U32*)memPtr); +} + +MEM_STATIC U64 MEM_read64(const void* memPtr) +{ + return get_unaligned((const U64*)memPtr); +} + +MEM_STATIC size_t MEM_readST(const void* memPtr) +{ + return get_unaligned((const size_t*)memPtr); +} + +MEM_STATIC void MEM_write16(void* memPtr, U16 value) +{ + put_unaligned(value, (U16*)memPtr); +} + +MEM_STATIC void MEM_write32(void* memPtr, U32 value) +{ + put_unaligned(value, (U32*)memPtr); +} + +MEM_STATIC void MEM_write64(void* memPtr, U64 value) +{ + put_unaligned(value, (U64*)memPtr); +} + +/*=== Little endian r/w ===*/ + +MEM_STATIC U16 MEM_readLE16(const void* memPtr) +{ + return get_unaligned_le16(memPtr); +} + +MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val) +{ + put_unaligned_le16(val, memPtr); +} + +MEM_STATIC U32 MEM_readLE24(const void* memPtr) +{ + return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16); +} + +MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val) +{ + MEM_writeLE16(memPtr, (U16)val); + ((BYTE*)memPtr)[2] = (BYTE)(val>>16); +} + +MEM_STATIC U32 MEM_readLE32(const void* memPtr) +{ + return get_unaligned_le32(memPtr); +} + +MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32) +{ + put_unaligned_le32(val32, memPtr); +} + +MEM_STATIC U64 MEM_readLE64(const void* memPtr) +{ + return get_unaligned_le64(memPtr); +} + +MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64) +{ + put_unaligned_le64(val64, memPtr); +} + +MEM_STATIC size_t MEM_readLEST(const void* memPtr) +{ + if (MEM_32bits()) + return (size_t)MEM_readLE32(memPtr); + else + return (size_t)MEM_readLE64(memPtr); +} + +MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val) +{ + if (MEM_32bits()) + MEM_writeLE32(memPtr, (U32)val); + else + MEM_writeLE64(memPtr, (U64)val); +} + +/*=== Big endian r/w ===*/ + +MEM_STATIC U32 MEM_readBE32(const void* memPtr) +{ + return get_unaligned_be32(memPtr); +} + +MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32) +{ + put_unaligned_be32(val32, memPtr); +} + +MEM_STATIC U64 MEM_readBE64(const void* memPtr) +{ + return get_unaligned_be64(memPtr); +} + +MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64) +{ + put_unaligned_be64(val64, memPtr); +} + +MEM_STATIC size_t MEM_readBEST(const void* memPtr) +{ + if (MEM_32bits()) + return (size_t)MEM_readBE32(memPtr); + else + return (size_t)MEM_readBE64(memPtr); +} + +MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val) +{ + if (MEM_32bits()) + MEM_writeBE32(memPtr, (U32)val); + else + MEM_writeBE64(memPtr, (U64)val); +} + + +/* function safe only for comparisons */ +MEM_STATIC U32 MEM_readMINMATCH(const void* memPtr, U32 length) +{ + switch (length) + { + default : + case 4 : return MEM_read32(memPtr); + case 3 : if (MEM_isLittleEndian()) + return MEM_read32(memPtr)<<8; + else + return MEM_read32(memPtr)>>8; + } +} + +#endif /* MEM_H_MODULE */ diff --git a/contrib/linux-kernel/lib/zstd/xxhash.c b/contrib/linux-kernel/lib/zstd/xxhash.c new file mode 100644 index 000000000..0d301ad86 --- /dev/null +++ b/contrib/linux-kernel/lib/zstd/xxhash.c @@ -0,0 +1,700 @@ +/* +* xxHash - Fast Hash algorithm +* Copyright (C) 2012-2016, Yann Collet +* +* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following disclaimer +* in the documentation and/or other materials provided with the +* distribution. +* +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +* You can contact the author at : +* - xxHash homepage: http://www.xxhash.com +* - xxHash source repository : https://github.com/Cyan4973/xxHash +*/ + + +/* ************************************* +* Tuning parameters +***************************************/ +/*!XXH_ACCEPT_NULL_INPUT_POINTER : + * If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer. + * When this option is enabled, xxHash output for null input pointers will be the same as a null-length input. + * By default, this option is disabled. To enable it, uncomment below define : + */ +/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */ + +/*!XXH_FORCE_NATIVE_FORMAT : + * By default, xxHash library provides endian-independant Hash values, based on little-endian convention. + * Results are therefore identical for little-endian and big-endian CPU. + * This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format. + * Should endian-independance be of no importance for your application, you may set the #define below to 1, + * to improve speed for Big-endian CPU. + * This option has no impact on Little_Endian CPU. + */ +#define XXH_FORCE_NATIVE_FORMAT 0 + +/*!XXH_FORCE_ALIGN_CHECK : + * This is a minor performance trick, only useful with lots of very small keys. + * It means : check for aligned/unaligned input. + * The check costs one initial branch per hash; set to 0 when the input data + * is guaranteed to be aligned. + */ +#define XXH_FORCE_ALIGN_CHECK 0 + + +/* ************************************* +* Includes & Memory related functions +***************************************/ +/* Modify the local functions below should you wish to use some other memory routines */ +/* for memcpy() */ +#include +static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); } + +#include "xxhash.h" +#include "mem.h" + + +/* ************************************* +* Compiler Specific Options +***************************************/ +#include +#define FORCE_INLINE static __always_inline + + +/* **************************************** +* Compiler-specific Functions and Macros +******************************************/ +#define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) +#define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) + +/* ************************************* +* Architecture Macros +***************************************/ +typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess; + +/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */ +#ifndef XXH_CPU_LITTLE_ENDIAN +# define XXH_CPU_LITTLE_ENDIAN MEM_LITTLE_ENDIAN +#endif + + +/* *************************** +* Memory reads +*****************************/ +typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment; + +FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align) +{ + (void)endian; + (void)align; + return MEM_readLE32(ptr); +} + +FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian) +{ + return XXH_readLE32_align(ptr, endian, XXH_unaligned); +} + +static U32 XXH_readBE32(const void* ptr) +{ + return MEM_readBE32(ptr); +} + +FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align) +{ + (void)endian; + (void)align; + return MEM_readLE64(ptr); +} + +FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian) +{ + return XXH_readLE64_align(ptr, endian, XXH_unaligned); +} + +static U64 XXH_readBE64(const void* ptr) +{ + return MEM_readBE64(ptr); +} + + +/* ************************************* +* Macros +***************************************/ +#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */ + + +/* ************************************* +* Constants +***************************************/ +static const U32 PRIME32_1 = 2654435761U; +static const U32 PRIME32_2 = 2246822519U; +static const U32 PRIME32_3 = 3266489917U; +static const U32 PRIME32_4 = 668265263U; +static const U32 PRIME32_5 = 374761393U; + +static const U64 PRIME64_1 = 11400714785074694791ULL; +static const U64 PRIME64_2 = 14029467366897019727ULL; +static const U64 PRIME64_3 = 1609587929392839161ULL; +static const U64 PRIME64_4 = 9650029242287828579ULL; +static const U64 PRIME64_5 = 2870177450012600261ULL; + +XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; } + + +/* ************************** +* Utils +****************************/ +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState) +{ + memcpy(dstState, srcState, sizeof(*dstState)); +} + +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState) +{ + memcpy(dstState, srcState, sizeof(*dstState)); +} + + +/* *************************** +* Simple Hash Functions +*****************************/ + +static U32 XXH32_round(U32 seed, U32 input) +{ + seed += input * PRIME32_2; + seed = XXH_rotl32(seed, 13); + seed *= PRIME32_1; + return seed; +} + +FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* bEnd = p + len; + U32 h32; +#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align) + +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER + if (p==NULL) { + len=0; + bEnd=p=(const BYTE*)(size_t)16; + } +#endif + + if (len>=16) { + const BYTE* const limit = bEnd - 16; + U32 v1 = seed + PRIME32_1 + PRIME32_2; + U32 v2 = seed + PRIME32_2; + U32 v3 = seed + 0; + U32 v4 = seed - PRIME32_1; + + do { + v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4; + v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4; + v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4; + v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4; + } while (p<=limit); + + h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18); + } else { + h32 = seed + PRIME32_5; + } + + h32 += (U32) len; + + while (p+4<=bEnd) { + h32 += XXH_get32bits(p) * PRIME32_3; + h32 = XXH_rotl32(h32, 17) * PRIME32_4 ; + p+=4; + } + + while (p> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + + return h32; +} + + +XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed) +{ +#if 0 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH32_CREATESTATE_STATIC(state); + XXH32_reset(state, seed); + XXH32_update(state, input, len); + return XXH32_digest(state); +#else + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */ + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); + else + return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); + } } + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); + else + return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); +#endif +} + + +static U64 XXH64_round(U64 acc, U64 input) +{ + acc += input * PRIME64_2; + acc = XXH_rotl64(acc, 31); + acc *= PRIME64_1; + return acc; +} + +static U64 XXH64_mergeRound(U64 acc, U64 val) +{ + val = XXH64_round(0, val); + acc ^= val; + acc = acc * PRIME64_1 + PRIME64_4; + return acc; +} + +FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; + U64 h64; +#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align) + +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER + if (p==NULL) { + len=0; + bEnd=p=(const BYTE*)(size_t)32; + } +#endif + + if (len>=32) { + const BYTE* const limit = bEnd - 32; + U64 v1 = seed + PRIME64_1 + PRIME64_2; + U64 v2 = seed + PRIME64_2; + U64 v3 = seed + 0; + U64 v4 = seed - PRIME64_1; + + do { + v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8; + v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8; + v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8; + v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8; + } while (p<=limit); + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + + } else { + h64 = seed + PRIME64_5; + } + + h64 += (U64) len; + + while (p+8<=bEnd) { + U64 const k1 = XXH64_round(0, XXH_get64bits(p)); + h64 ^= k1; + h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; + p+=8; + } + + if (p+4<=bEnd) { + h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; + h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + p+=4; + } + + while (p> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + + return h64; +} + + +XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed) +{ +#if 0 + /* Simple version, good for code maintenance, but unfortunately slow for small inputs */ + XXH64_CREATESTATE_STATIC(state); + XXH64_reset(state, seed); + XXH64_update(state, input, len); + return XXH64_digest(state); +#else + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if (XXH_FORCE_ALIGN_CHECK) { + if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */ + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned); + else + return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned); + } } + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned); + else + return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned); +#endif +} + + +/* ************************************************** +* Advanced Hash Functions +****************************************************/ + + +/*** Hash feed ***/ + +XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed) +{ + XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */ + state.v1 = seed + PRIME32_1 + PRIME32_2; + state.v2 = seed + PRIME32_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME32_1; + memcpy(statePtr, &state, sizeof(state)); + return XXH_OK; +} + + +XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed) +{ + XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */ + memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */ + state.v1 = seed + PRIME64_1 + PRIME64_2; + state.v2 = seed + PRIME64_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME64_1; + memcpy(statePtr, &state, sizeof(state)); + return XXH_OK; +} + + +FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; + +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER + if (input==NULL) return XXH_ERROR; +#endif + + state->total_len_32 += (unsigned)len; + state->large_len |= (len>=16) | (state->total_len_32>=16); + + if (state->memsize + len < 16) { /* fill in tmp buffer */ + XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len); + state->memsize += (unsigned)len; + return XXH_OK; + } + + if (state->memsize) { /* some data left from previous update */ + XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize); + { const U32* p32 = state->mem32; + state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++; + state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++; + state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++; + state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++; + } + p += 16-state->memsize; + state->memsize = 0; + } + + if (p <= bEnd-16) { + const BYTE* const limit = bEnd - 16; + U32 v1 = state->v1; + U32 v2 = state->v2; + U32 v3 = state->v3; + U32 v4 = state->v4; + + do { + v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4; + v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4; + v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4; + v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < bEnd) { + XXH_memcpy(state->mem32, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_update_endian(state_in, input, len, XXH_littleEndian); + else + return XXH32_update_endian(state_in, input, len, XXH_bigEndian); +} + + + +FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian) +{ + const BYTE * p = (const BYTE*)state->mem32; + const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize; + U32 h32; + + if (state->large_len) { + h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18); + } else { + h32 = state->v3 /* == seed */ + PRIME32_5; + } + + h32 += state->total_len_32; + + while (p+4<=bEnd) { + h32 += XXH_readLE32(p, endian) * PRIME32_3; + h32 = XXH_rotl32(h32, 17) * PRIME32_4; + p+=4; + } + + while (p> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + + return h32; +} + + +XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH32_digest_endian(state_in, XXH_littleEndian); + else + return XXH32_digest_endian(state_in, XXH_bigEndian); +} + + + +/* **** XXH64 **** */ + +FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian) +{ + const BYTE* p = (const BYTE*)input; + const BYTE* const bEnd = p + len; + +#ifdef XXH_ACCEPT_NULL_INPUT_POINTER + if (input==NULL) return XXH_ERROR; +#endif + + state->total_len += len; + + if (state->memsize + len < 32) { /* fill in tmp buffer */ + XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len); + state->memsize += (U32)len; + return XXH_OK; + } + + if (state->memsize) { /* tmp buffer is full */ + XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize); + state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian)); + state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian)); + state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian)); + state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian)); + p += 32-state->memsize; + state->memsize = 0; + } + + if (p+32 <= bEnd) { + const BYTE* const limit = bEnd - 32; + U64 v1 = state->v1; + U64 v2 = state->v2; + U64 v3 = state->v3; + U64 v4 = state->v4; + + do { + v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8; + v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8; + v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8; + v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8; + } while (p<=limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < bEnd) { + XXH_memcpy(state->mem64, p, (size_t)(bEnd-p)); + state->memsize = (unsigned)(bEnd-p); + } + + return XXH_OK; +} + +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_update_endian(state_in, input, len, XXH_littleEndian); + else + return XXH64_update_endian(state_in, input, len, XXH_bigEndian); +} + + + +FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian) +{ + const BYTE * p = (const BYTE*)state->mem64; + const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize; + U64 h64; + + if (state->total_len >= 32) { + U64 const v1 = state->v1; + U64 const v2 = state->v2; + U64 const v3 = state->v3; + U64 const v4 = state->v4; + + h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18); + h64 = XXH64_mergeRound(h64, v1); + h64 = XXH64_mergeRound(h64, v2); + h64 = XXH64_mergeRound(h64, v3); + h64 = XXH64_mergeRound(h64, v4); + } else { + h64 = state->v3 + PRIME64_5; + } + + h64 += (U64) state->total_len; + + while (p+8<=bEnd) { + U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian)); + h64 ^= k1; + h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; + p+=8; + } + + if (p+4<=bEnd) { + h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1; + h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + p+=4; + } + + while (p> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + + return h64; +} + + +XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in) +{ + XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN; + + if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT) + return XXH64_digest_endian(state_in, XXH_littleEndian); + else + return XXH64_digest_endian(state_in, XXH_bigEndian); +} + + +/* ************************** +* Canonical representation +****************************/ + +/*! Default XXH result types are basic unsigned 32 and 64 bits. +* The canonical representation follows human-readable write convention, aka big-endian (large digits first). +* These functions allow transformation of hash result into and from its canonical format. +* This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs. +*/ + +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t)); + MEM_writeBE32(dst, hash); +} + +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash) +{ + XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t)); + MEM_writeBE64(dst, hash); +} + +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src) +{ + return XXH_readBE32(src); +} + +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src) +{ + return XXH_readBE64(src); +} diff --git a/contrib/linux-kernel/lib/zstd/xxhash.h b/contrib/linux-kernel/lib/zstd/xxhash.h new file mode 100644 index 000000000..974a81c48 --- /dev/null +++ b/contrib/linux-kernel/lib/zstd/xxhash.h @@ -0,0 +1,235 @@ +/* + xxHash - Extremely Fast Hash algorithm + Header File + Copyright (C) 2012-2016, Yann Collet. + + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + You can contact the author at : + - xxHash source repository : https://github.com/Cyan4973/xxHash +*/ + +/* Notice extracted from xxHash homepage : + +xxHash is an extremely fast Hash algorithm, running at RAM speed limits. +It also successfully passes all tests from the SMHasher suite. + +Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz) + +Name Speed Q.Score Author +xxHash 5.4 GB/s 10 +CrapWow 3.2 GB/s 2 Andrew +MumurHash 3a 2.7 GB/s 10 Austin Appleby +SpookyHash 2.0 GB/s 10 Bob Jenkins +SBox 1.4 GB/s 9 Bret Mulvey +Lookup3 1.2 GB/s 9 Bob Jenkins +SuperFastHash 1.2 GB/s 1 Paul Hsieh +CityHash64 1.05 GB/s 10 Pike & Alakuijala +FNV 0.55 GB/s 5 Fowler, Noll, Vo +CRC32 0.43 GB/s 9 +MD5-32 0.33 GB/s 10 Ronald L. Rivest +SHA1-32 0.28 GB/s 10 + +Q.Score is a measure of quality of the hash function. +It depends on successfully passing SMHasher test set. +10 is a perfect score. + +A 64-bits version, named XXH64, is available since r35. +It offers much better speed, but for 64-bits applications only. +Name Speed on 64 bits Speed on 32 bits +XXH64 13.8 GB/s 1.9 GB/s +XXH32 6.8 GB/s 6.0 GB/s +*/ + +#ifndef XXHASH_H_5627135585666179 +#define XXHASH_H_5627135585666179 1 + + +/* **************************** +* Definitions +******************************/ +#include /* size_t */ +typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode; + + +/* **************************** +* API modifier +******************************/ +/** XXH_PRIVATE_API +* This is useful if you want to include xxhash functions in `static` mode +* in order to inline them, and remove their symbol from the public list. +* Methodology : +* #define XXH_PRIVATE_API +* #include "xxhash.h" +* `xxhash.c` is automatically included. +* It's not useful to compile and link it as a separate module anymore. +*/ +#define XXH_PUBLIC_API /* do nothing */ + +/*!XXH_NAMESPACE, aka Namespace Emulation : + +If you want to include _and expose_ xxHash functions from within your own library, +but also want to avoid symbol collisions with another library which also includes xxHash, + +you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library +with the value of XXH_NAMESPACE (so avoid to keep it NULL and avoid numeric values). + +Note that no change is required within the calling program as long as it includes `xxhash.h` : +regular symbol name will be automatically translated by this header. +*/ + + +/* ************************************* +* Version +***************************************/ +#define XXH_VERSION_MAJOR 0 +#define XXH_VERSION_MINOR 6 +#define XXH_VERSION_RELEASE 2 +#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE) +XXH_PUBLIC_API unsigned XXH_versionNumber (void); + + +/* **************************** +* Simple Hash Functions +******************************/ +typedef unsigned int XXH32_hash_t; +typedef unsigned long long XXH64_hash_t; + +XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed); +XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed); + +/*! +XXH32() : + Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input". + The memory between input & input+length must be valid (allocated and read-accessible). + "seed" can be used to alter the result predictably. + Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s +XXH64() : + Calculate the 64-bits hash of sequence of length "len" stored at memory address "input". + "seed" can be used to alter the result predictably. + This function runs 2x faster on 64-bits systems, but slower on 32-bits systems (see benchmark). +*/ + + +/* **************************** +* Streaming Hash Functions +******************************/ +typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */ +typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */ + + +/* hash streaming */ + +XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed); +XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr); + +XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed); +XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length); +XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr); + +/* +These functions generate the xxHash of an input provided in multiple segments. +Note that, for small input, they are slower than single-call functions, due to state management. +For small input, prefer `XXH32()` and `XXH64()` . + +XXH state must first be allocated, using XXH*_createState() . + +Start a new hash by initializing state with a seed, using XXH*_reset(). + +Then, feed the hash state by calling XXH*_update() as many times as necessary. +Obviously, input must be allocated and read accessible. +The function returns an error code, with 0 meaning OK, and any other value meaning there is an error. + +Finally, a hash value can be produced anytime, by using XXH*_digest(). +This function returns the nn-bits hash as an int or long long. + +It's still possible to continue inserting input into the hash state after a digest, +and generate some new hashes later on, by calling again XXH*_digest(). + +When done, free XXH state space if it was allocated dynamically. +*/ + + +/* ************************** +* Utils +****************************/ +XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state); +XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state); + + +/* ************************** +* Canonical representation +****************************/ +/* Default result type for XXH functions are primitive unsigned 32 and 64 bits. +* The canonical representation uses human-readable write convention, aka big-endian (large digits first). +* These functions allow transformation of hash result into and from its canonical format. +* This way, hash values can be written into a file / memory, and remain comparable on different systems and programs. +*/ +typedef struct { unsigned char digest[4]; } XXH32_canonical_t; +typedef struct { unsigned char digest[8]; } XXH64_canonical_t; + +XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash); +XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash); + +XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src); +XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src); + + +/* ================================================================================================ + This section contains definitions which are not guaranteed to remain stable. + They may change in future versions, becoming incompatible with a different version of the library. + They shall only be used with static linking. + Never use these definitions in association with dynamic linking ! +=================================================================================================== */ +/* These definitions are only meant to allow allocation of XXH state + statically, on stack, or in a struct for example. + Do not use members directly. */ + +struct XXH32_state_s { + unsigned total_len_32; + unsigned large_len; + unsigned v1; + unsigned v2; + unsigned v3; + unsigned v4; + unsigned mem32[4]; /* buffer defined as U32 for alignment */ + unsigned memsize; + unsigned reserved; /* never read nor write, will be removed in a future version */ +}; /* typedef'd to XXH32_state_t */ + +struct XXH64_state_s { + unsigned long long total_len; + unsigned long long v1; + unsigned long long v2; + unsigned long long v3; + unsigned long long v4; + unsigned long long mem64[4]; /* buffer defined as U64 for alignment */ + unsigned memsize; + unsigned reserved[2]; /* never read nor write, will be removed in a future version */ +}; /* typedef'd to XXH64_state_t */ + +#endif /* XXHASH_H_5627135585666179 */ diff --git a/contrib/linux-kernel/lib/zstd/zstd_common.c b/contrib/linux-kernel/lib/zstd/zstd_common.c new file mode 100644 index 000000000..106f54055 --- /dev/null +++ b/contrib/linux-kernel/lib/zstd/zstd_common.c @@ -0,0 +1,69 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + + + +/*-************************************* +* Dependencies +***************************************/ +#include "error_private.h" +#include "zstd_internal.h" /* declaration of ZSTD_isError, ZSTD_getErrorName, ZSTD_getErrorCode, ZSTD_getErrorString, ZSTD_versionNumber */ +#include + + +/*=************************************************************** +* Custom allocator +****************************************************************/ + +#define stack_push(stack, size) ({ \ + void* const ptr = ZSTD_PTR_ALIGN((stack)->ptr); \ + (stack)->ptr = (char*)ptr + (size); \ + (stack)->ptr <= (stack)->end ? ptr : NULL; \ + }) + +ZSTD_customMem ZSTD_initStack(void* workspace, size_t workspaceSize) { + ZSTD_customMem stackMem = { ZSTD_stackAlloc, ZSTD_stackFree, workspace }; + ZSTD_stack* stack = (ZSTD_stack*) workspace; + /* Verify preconditions */ + if (!workspace || workspaceSize < sizeof(ZSTD_stack) || workspace != ZSTD_PTR_ALIGN(workspace)) { + ZSTD_customMem error = {NULL, NULL, NULL}; + return error; + } + /* Initialize the stack */ + stack->ptr = workspace; + stack->end = (char*)workspace + workspaceSize; + stack_push(stack, sizeof(ZSTD_stack)); + return stackMem; +} + +void* ZSTD_stackAllocAll(void* opaque, size_t* size) { + ZSTD_stack* stack = (ZSTD_stack*)opaque; + *size = stack->end - ZSTD_PTR_ALIGN(stack->ptr); + return stack_push(stack, *size); +} + +void* ZSTD_stackAlloc(void* opaque, size_t size) { + ZSTD_stack* stack = (ZSTD_stack*)opaque; + return stack_push(stack, size); +} +void ZSTD_stackFree(void* opaque, void* address) { + (void)opaque; + (void)address; +} + +void* ZSTD_malloc(size_t size, ZSTD_customMem customMem) +{ + return customMem.customAlloc(customMem.opaque, size); +} + +void ZSTD_free(void* ptr, ZSTD_customMem customMem) +{ + if (ptr!=NULL) + customMem.customFree(customMem.opaque, ptr); +} diff --git a/contrib/linux-kernel/lib/zstd/zstd_internal.h b/contrib/linux-kernel/lib/zstd/zstd_internal.h new file mode 100644 index 000000000..479d6827d --- /dev/null +++ b/contrib/linux-kernel/lib/zstd/zstd_internal.h @@ -0,0 +1,274 @@ +/** + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#ifndef ZSTD_CCOMMON_H_MODULE +#define ZSTD_CCOMMON_H_MODULE + +/*-******************************************************* +* Compiler specifics +*********************************************************/ +#define FORCE_INLINE static __always_inline +#define FORCE_NOINLINE static noinline + + +/*-************************************* +* Dependencies +***************************************/ +#include +#include +#include +#include "mem.h" +#include "error_private.h" +#include "xxhash.h" /* XXH_reset, update, digest */ + + +/*-************************************* +* shared macros +***************************************/ +#define MIN(a,b) ((a)<(b) ? (a) : (b)) +#define MAX(a,b) ((a)>(b) ? (a) : (b)) +#define CHECK_F(f) { size_t const errcod = f; if (ERR_isError(errcod)) return errcod; } /* check and Forward error code */ +#define CHECK_E(f, e) { size_t const errcod = f; if (ERR_isError(errcod)) return ERROR(e); } /* check and send Error code */ + + +/*-************************************* +* Common constants +***************************************/ +#define ZSTD_OPT_NUM (1<<12) +#define ZSTD_DICT_MAGIC 0xEC30A437 /* v0.7+ */ + +#define ZSTD_REP_NUM 3 /* number of repcodes */ +#define ZSTD_REP_CHECK (ZSTD_REP_NUM) /* number of repcodes to check by the optimal parser */ +#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1) +#define ZSTD_REP_MOVE_OPT (ZSTD_REP_NUM) +static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 }; + +#define KB *(1 <<10) +#define MB *(1 <<20) +#define GB *(1U<<30) + +#define BIT7 128 +#define BIT6 64 +#define BIT5 32 +#define BIT4 16 +#define BIT1 2 +#define BIT0 1 + +#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10 +static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 }; +static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 }; + +#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */ +static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE; +typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e; + +#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */ +#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */ + +#define HufLog 12 +typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e; + +#define LONGNBSEQ 0x7F00 + +#define MINMATCH 3 +#define EQUAL_READ32 4 + +#define Litbits 8 +#define MaxLit ((1<= 3) /* GCC Intrinsic */ + return 31 - __builtin_clz(val); +# else /* Software version */ + static const int DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 }; + U32 v = val; + int r; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + r = DeBruijnClz[(U32)(v * 0x07C4ACDDU) >> 27]; + return r; +# endif +} + + +/* hidden functions */ + +/* ZSTD_invalidateRepCodes() : + * ensures next compression will not use repcodes from previous block. + * Note : only works with regular variant; + * do not use with extDict variant ! */ +void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); + +size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx); +size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx); +size_t ZSTD_freeCDict(ZSTD_CDict* cdict); +size_t ZSTD_freeDDict(ZSTD_DDict* cdict); +size_t ZSTD_freeCStream(ZSTD_CStream* zcs); +size_t ZSTD_freeDStream(ZSTD_DStream* zds); + + +#endif /* ZSTD_CCOMMON_H_MODULE */ diff --git a/contrib/linux-kernel/lib/zstd/zstd_opt.h b/contrib/linux-kernel/lib/zstd/zstd_opt.h new file mode 100644 index 000000000..297a71559 --- /dev/null +++ b/contrib/linux-kernel/lib/zstd/zstd_opt.h @@ -0,0 +1,921 @@ +/** + * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + + +/* Note : this file is intended to be included within zstd_compress.c */ + + +#ifndef ZSTD_OPT_H_91842398743 +#define ZSTD_OPT_H_91842398743 + + +#define ZSTD_LITFREQ_ADD 2 +#define ZSTD_FREQ_DIV 4 +#define ZSTD_MAX_PRICE (1<<30) + +/*-************************************* +* Price functions for optimal parser +***************************************/ +FORCE_INLINE void ZSTD_setLog2Prices(seqStore_t* ssPtr) +{ + ssPtr->log2matchLengthSum = ZSTD_highbit32(ssPtr->matchLengthSum+1); + ssPtr->log2litLengthSum = ZSTD_highbit32(ssPtr->litLengthSum+1); + ssPtr->log2litSum = ZSTD_highbit32(ssPtr->litSum+1); + ssPtr->log2offCodeSum = ZSTD_highbit32(ssPtr->offCodeSum+1); + ssPtr->factor = 1 + ((ssPtr->litSum>>5) / ssPtr->litLengthSum) + ((ssPtr->litSum<<1) / (ssPtr->litSum + ssPtr->matchSum)); +} + + +MEM_STATIC void ZSTD_rescaleFreqs(seqStore_t* ssPtr, const BYTE* src, size_t srcSize) +{ + unsigned u; + + ssPtr->cachedLiterals = NULL; + ssPtr->cachedPrice = ssPtr->cachedLitLength = 0; + ssPtr->staticPrices = 0; + + if (ssPtr->litLengthSum == 0) { + if (srcSize <= 1024) ssPtr->staticPrices = 1; + + for (u=0; u<=MaxLit; u++) + ssPtr->litFreq[u] = 0; + for (u=0; ulitFreq[src[u]]++; + + ssPtr->litSum = 0; + ssPtr->litLengthSum = MaxLL+1; + ssPtr->matchLengthSum = MaxML+1; + ssPtr->offCodeSum = (MaxOff+1); + ssPtr->matchSum = (ZSTD_LITFREQ_ADD<litFreq[u] = 1 + (ssPtr->litFreq[u]>>ZSTD_FREQ_DIV); + ssPtr->litSum += ssPtr->litFreq[u]; + } + for (u=0; u<=MaxLL; u++) + ssPtr->litLengthFreq[u] = 1; + for (u=0; u<=MaxML; u++) + ssPtr->matchLengthFreq[u] = 1; + for (u=0; u<=MaxOff; u++) + ssPtr->offCodeFreq[u] = 1; + } else { + ssPtr->matchLengthSum = 0; + ssPtr->litLengthSum = 0; + ssPtr->offCodeSum = 0; + ssPtr->matchSum = 0; + ssPtr->litSum = 0; + + for (u=0; u<=MaxLit; u++) { + ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u]>>(ZSTD_FREQ_DIV+1)); + ssPtr->litSum += ssPtr->litFreq[u]; + } + for (u=0; u<=MaxLL; u++) { + ssPtr->litLengthFreq[u] = 1 + (ssPtr->litLengthFreq[u]>>(ZSTD_FREQ_DIV+1)); + ssPtr->litLengthSum += ssPtr->litLengthFreq[u]; + } + for (u=0; u<=MaxML; u++) { + ssPtr->matchLengthFreq[u] = 1 + (ssPtr->matchLengthFreq[u]>>ZSTD_FREQ_DIV); + ssPtr->matchLengthSum += ssPtr->matchLengthFreq[u]; + ssPtr->matchSum += ssPtr->matchLengthFreq[u] * (u + 3); + } + ssPtr->matchSum *= ZSTD_LITFREQ_ADD; + for (u=0; u<=MaxOff; u++) { + ssPtr->offCodeFreq[u] = 1 + (ssPtr->offCodeFreq[u]>>ZSTD_FREQ_DIV); + ssPtr->offCodeSum += ssPtr->offCodeFreq[u]; + } + } + + ZSTD_setLog2Prices(ssPtr); +} + + +FORCE_INLINE U32 ZSTD_getLiteralPrice(seqStore_t* ssPtr, U32 litLength, const BYTE* literals) +{ + U32 price, u; + + if (ssPtr->staticPrices) + return ZSTD_highbit32((U32)litLength+1) + (litLength*6); + + if (litLength == 0) + return ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[0]+1); + + /* literals */ + if (ssPtr->cachedLiterals == literals) { + U32 const additional = litLength - ssPtr->cachedLitLength; + const BYTE* literals2 = ssPtr->cachedLiterals + ssPtr->cachedLitLength; + price = ssPtr->cachedPrice + additional * ssPtr->log2litSum; + for (u=0; u < additional; u++) + price -= ZSTD_highbit32(ssPtr->litFreq[literals2[u]]+1); + ssPtr->cachedPrice = price; + ssPtr->cachedLitLength = litLength; + } else { + price = litLength * ssPtr->log2litSum; + for (u=0; u < litLength; u++) + price -= ZSTD_highbit32(ssPtr->litFreq[literals[u]]+1); + + if (litLength >= 12) { + ssPtr->cachedLiterals = literals; + ssPtr->cachedPrice = price; + ssPtr->cachedLitLength = litLength; + } + } + + /* literal Length */ + { const BYTE LL_deltaCode = 19; + const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; + price += LL_bits[llCode] + ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[llCode]+1); + } + + return price; +} + + +FORCE_INLINE U32 ZSTD_getPrice(seqStore_t* seqStorePtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength, const int ultra) +{ + /* offset */ + U32 price; + BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1); + + if (seqStorePtr->staticPrices) + return ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + ZSTD_highbit32((U32)matchLength+1) + 16 + offCode; + + price = offCode + seqStorePtr->log2offCodeSum - ZSTD_highbit32(seqStorePtr->offCodeFreq[offCode]+1); + if (!ultra && offCode >= 20) price += (offCode-19)*2; + + /* match Length */ + { const BYTE ML_deltaCode = 36; + const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength]; + price += ML_bits[mlCode] + seqStorePtr->log2matchLengthSum - ZSTD_highbit32(seqStorePtr->matchLengthFreq[mlCode]+1); + } + + return price + ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + seqStorePtr->factor; +} + + +MEM_STATIC void ZSTD_updatePrice(seqStore_t* seqStorePtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength) +{ + U32 u; + + /* literals */ + seqStorePtr->litSum += litLength*ZSTD_LITFREQ_ADD; + for (u=0; u < litLength; u++) + seqStorePtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD; + + /* literal Length */ + { const BYTE LL_deltaCode = 19; + const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength]; + seqStorePtr->litLengthFreq[llCode]++; + seqStorePtr->litLengthSum++; + } + + /* match offset */ + { BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1); + seqStorePtr->offCodeSum++; + seqStorePtr->offCodeFreq[offCode]++; + } + + /* match Length */ + { const BYTE ML_deltaCode = 36; + const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength]; + seqStorePtr->matchLengthFreq[mlCode]++; + seqStorePtr->matchLengthSum++; + } + + ZSTD_setLog2Prices(seqStorePtr); +} + + +#define SET_PRICE(pos, mlen_, offset_, litlen_, price_) \ + { \ + while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } \ + opt[pos].mlen = mlen_; \ + opt[pos].off = offset_; \ + opt[pos].litlen = litlen_; \ + opt[pos].price = price_; \ + } + + + +/* Update hashTable3 up to ip (excluded) + Assumption : always within prefix (i.e. not within extDict) */ +FORCE_INLINE +U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_CCtx* zc, const BYTE* ip) +{ + U32* const hashTable3 = zc->hashTable3; + U32 const hashLog3 = zc->hashLog3; + const BYTE* const base = zc->base; + U32 idx = zc->nextToUpdate3; + const U32 target = zc->nextToUpdate3 = (U32)(ip - base); + const size_t hash3 = ZSTD_hash3Ptr(ip, hashLog3); + + while(idx < target) { + hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx; + idx++; + } + + return hashTable3[hash3]; +} + + +/*-************************************* +* Binary Tree search +***************************************/ +static U32 ZSTD_insertBtAndGetAllMatches ( + ZSTD_CCtx* zc, + const BYTE* const ip, const BYTE* const iLimit, + U32 nbCompares, const U32 mls, + U32 extDict, ZSTD_match_t* matches, const U32 minMatchLen) +{ + const BYTE* const base = zc->base; + const U32 current = (U32)(ip-base); + const U32 hashLog = zc->params.cParams.hashLog; + const size_t h = ZSTD_hashPtr(ip, hashLog, mls); + U32* const hashTable = zc->hashTable; + U32 matchIndex = hashTable[h]; + U32* const bt = zc->chainTable; + const U32 btLog = zc->params.cParams.chainLog - 1; + const U32 btMask= (1U << btLog) - 1; + size_t commonLengthSmaller=0, commonLengthLarger=0; + const BYTE* const dictBase = zc->dictBase; + const U32 dictLimit = zc->dictLimit; + const BYTE* const dictEnd = dictBase + dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const U32 btLow = btMask >= current ? 0 : current - btMask; + const U32 windowLow = zc->lowLimit; + U32* smallerPtr = bt + 2*(current&btMask); + U32* largerPtr = bt + 2*(current&btMask) + 1; + U32 matchEndIdx = current+8; + U32 dummy32; /* to be nullified at the end */ + U32 mnum = 0; + + const U32 minMatch = (mls == 3) ? 3 : 4; + size_t bestLength = minMatchLen-1; + + if (minMatch == 3) { /* HC3 match finder */ + U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3 (zc, ip); + if (matchIndex3>windowLow && (current - matchIndex3 < (1<<18))) { + const BYTE* match; + size_t currentMl=0; + if ((!extDict) || matchIndex3 >= dictLimit) { + match = base + matchIndex3; + if (match[bestLength] == ip[bestLength]) currentMl = ZSTD_count(ip, match, iLimit); + } else { + match = dictBase + matchIndex3; + if (MEM_readMINMATCH(match, MINMATCH) == MEM_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */ + currentMl = ZSTD_count_2segments(ip+MINMATCH, match+MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH; + } + + /* save best solution */ + if (currentMl > bestLength) { + bestLength = currentMl; + matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex3; + matches[mnum].len = (U32)currentMl; + mnum++; + if (currentMl > ZSTD_OPT_NUM) goto update; + if (ip+currentMl == iLimit) goto update; /* best possible, and avoid read overflow*/ + } + } + } + + hashTable[h] = current; /* Update Hash Table */ + + while (nbCompares-- && (matchIndex > windowLow)) { + U32* nextPtr = bt + 2*(matchIndex & btMask); + size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */ + const BYTE* match; + + if ((!extDict) || (matchIndex+matchLength >= dictLimit)) { + match = base + matchIndex; + if (match[matchLength] == ip[matchLength]) { + matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iLimit) +1; + } + } else { + match = dictBase + matchIndex; + matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart); + if (matchIndex+matchLength >= dictLimit) + match = base + matchIndex; /* to prepare for next usage of match[matchLength] */ + } + + if (matchLength > bestLength) { + if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength; + bestLength = matchLength; + matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex; + matches[mnum].len = (U32)matchLength; + mnum++; + if (matchLength > ZSTD_OPT_NUM) break; + if (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */ + break; /* drop, to guarantee consistency (miss a little bit of compression) */ + } + + if (match[matchLength] < ip[matchLength]) { + /* match is smaller than current */ + *smallerPtr = matchIndex; /* update smaller idx */ + commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */ + if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + smallerPtr = nextPtr+1; /* new "smaller" => larger of match */ + matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */ + } else { + /* match is larger than current */ + *largerPtr = matchIndex; + commonLengthLarger = matchLength; + if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */ + largerPtr = nextPtr; + matchIndex = nextPtr[0]; + } } + + *smallerPtr = *largerPtr = 0; + +update: + zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1; + return mnum; +} + + +/** Tree updater, providing best match */ +static U32 ZSTD_BtGetAllMatches ( + ZSTD_CCtx* zc, + const BYTE* const ip, const BYTE* const iLimit, + const U32 maxNbAttempts, const U32 mls, ZSTD_match_t* matches, const U32 minMatchLen) +{ + if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */ + ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls); + return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 0, matches, minMatchLen); +} + + +static U32 ZSTD_BtGetAllMatches_selectMLS ( + ZSTD_CCtx* zc, /* Index table will be updated */ + const BYTE* ip, const BYTE* const iHighLimit, + const U32 maxNbAttempts, const U32 matchLengthSearch, ZSTD_match_t* matches, const U32 minMatchLen) +{ + switch(matchLengthSearch) + { + case 3 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen); + default : + case 4 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen); + case 5 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen); + case 7 : + case 6 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen); + } +} + +/** Tree updater, providing best match */ +static U32 ZSTD_BtGetAllMatches_extDict ( + ZSTD_CCtx* zc, + const BYTE* const ip, const BYTE* const iLimit, + const U32 maxNbAttempts, const U32 mls, ZSTD_match_t* matches, const U32 minMatchLen) +{ + if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */ + ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls); + return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 1, matches, minMatchLen); +} + + +static U32 ZSTD_BtGetAllMatches_selectMLS_extDict ( + ZSTD_CCtx* zc, /* Index table will be updated */ + const BYTE* ip, const BYTE* const iHighLimit, + const U32 maxNbAttempts, const U32 matchLengthSearch, ZSTD_match_t* matches, const U32 minMatchLen) +{ + switch(matchLengthSearch) + { + case 3 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen); + default : + case 4 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen); + case 5 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen); + case 7 : + case 6 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen); + } +} + + +/*-******************************* +* Optimal parser +*********************************/ +FORCE_INLINE +void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx, + const void* src, size_t srcSize, const int ultra) +{ + seqStore_t* seqStorePtr = &(ctx->seqStore); + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + const BYTE* const base = ctx->base; + const BYTE* const prefixStart = base + ctx->dictLimit; + + const U32 maxSearches = 1U << ctx->params.cParams.searchLog; + const U32 sufficient_len = ctx->params.cParams.targetLength; + const U32 mls = ctx->params.cParams.searchLength; + const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4; + + ZSTD_optimal_t* opt = seqStorePtr->priceTable; + ZSTD_match_t* matches = seqStorePtr->matchTable; + const BYTE* inr; + U32 offset, rep[ZSTD_REP_NUM]; + + /* init */ + ctx->nextToUpdate3 = ctx->nextToUpdate; + ZSTD_rescaleFreqs(seqStorePtr, (const BYTE*)src, srcSize); + ip += (ip==prefixStart); + { U32 i; for (i=0; irep[i]; } + + /* Match Loop */ + while (ip < ilimit) { + U32 cur, match_num, last_pos, litlen, price; + U32 u, mlen, best_mlen, best_off, litLength; + memset(opt, 0, sizeof(ZSTD_optimal_t)); + last_pos = 0; + litlen = (U32)(ip - anchor); + + /* check repCode */ + { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor); + for (i=(ip == anchor); i 0) && (repCur < (S32)(ip-prefixStart)) + && (MEM_readMINMATCH(ip, minMatch) == MEM_readMINMATCH(ip - repCur, minMatch))) { + mlen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repCur, iend) + minMatch; + if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) { + best_mlen = mlen; best_off = i; cur = 0; last_pos = 1; + goto _storeSequence; + } + best_off = i - (ip == anchor); + do { + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); + if (mlen > last_pos || price < opt[mlen].price) + SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */ + mlen--; + } while (mlen >= minMatch); + } } } + + match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, ip, iend, maxSearches, mls, matches, minMatch); + + if (!last_pos && !match_num) { ip++; continue; } + + if (match_num && (matches[match_num-1].len > sufficient_len || matches[match_num-1].len >= ZSTD_OPT_NUM)) { + best_mlen = matches[match_num-1].len; + best_off = matches[match_num-1].off; + cur = 0; + last_pos = 1; + goto _storeSequence; + } + + /* set prices using matches at position = 0 */ + best_mlen = (last_pos) ? last_pos : minMatch; + for (u = 0; u < match_num; u++) { + mlen = (u>0) ? matches[u-1].len+1 : best_mlen; + best_mlen = matches[u].len; + while (mlen <= best_mlen) { + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra); + if (mlen > last_pos || price < opt[mlen].price) + SET_PRICE(mlen, mlen, matches[u].off, litlen, price); /* note : macro modifies last_pos */ + mlen++; + } } + + if (last_pos < minMatch) { ip++; continue; } + + /* initialize opt[0] */ + { U32 i ; for (i=0; i litlen) { + price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-litlen); + } else + price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor); + } else { + litlen = 1; + price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-1); + } + + if (cur > last_pos || price <= opt[cur].price) + SET_PRICE(cur, 1, 0, litlen, price); + + if (cur == last_pos) break; + + if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */ + continue; + + mlen = opt[cur].mlen; + if (opt[cur].off > ZSTD_REP_MOVE_OPT) { + opt[cur].rep[2] = opt[cur-mlen].rep[1]; + opt[cur].rep[1] = opt[cur-mlen].rep[0]; + opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT; + } else { + opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur-mlen].rep[1] : opt[cur-mlen].rep[2]; + opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur-mlen].rep[0] : opt[cur-mlen].rep[1]; + opt[cur].rep[0] = ((opt[cur].off==ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur-mlen].rep[0] - 1) : (opt[cur-mlen].rep[opt[cur].off]); + } + + best_mlen = minMatch; + { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1); + for (i=(opt[cur].mlen != 1); i 0) && (repCur < (S32)(inr-prefixStart)) + && (MEM_readMINMATCH(inr, minMatch) == MEM_readMINMATCH(inr - repCur, minMatch))) { + mlen = (U32)ZSTD_count(inr+minMatch, inr+minMatch - repCur, iend) + minMatch; + + if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) { + best_mlen = mlen; best_off = i; last_pos = cur + 1; + goto _storeSequence; + } + + best_off = i - (opt[cur].mlen != 1); + if (mlen > best_mlen) best_mlen = mlen; + + do { + if (opt[cur].mlen == 1) { + litlen = opt[cur].litlen; + if (cur > litlen) { + price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra); + } else + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); + } else { + litlen = 0; + price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra); + } + + if (cur + mlen > last_pos || price <= opt[cur + mlen].price) + SET_PRICE(cur + mlen, mlen, i, litlen, price); + mlen--; + } while (mlen >= minMatch); + } } } + + match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, inr, iend, maxSearches, mls, matches, best_mlen); + + if (match_num > 0 && (matches[match_num-1].len > sufficient_len || cur + matches[match_num-1].len >= ZSTD_OPT_NUM)) { + best_mlen = matches[match_num-1].len; + best_off = matches[match_num-1].off; + last_pos = cur + 1; + goto _storeSequence; + } + + /* set prices using matches at position = cur */ + for (u = 0; u < match_num; u++) { + mlen = (u>0) ? matches[u-1].len+1 : best_mlen; + best_mlen = matches[u].len; + + while (mlen <= best_mlen) { + if (opt[cur].mlen == 1) { + litlen = opt[cur].litlen; + if (cur > litlen) + price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra); + else + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra); + } else { + litlen = 0; + price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra); + } + + if (cur + mlen > last_pos || (price < opt[cur + mlen].price)) + SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price); + + mlen++; + } } } + + best_mlen = opt[last_pos].mlen; + best_off = opt[last_pos].off; + cur = last_pos - best_mlen; + + /* store sequence */ +_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */ + opt[0].mlen = 1; + + while (1) { + mlen = opt[cur].mlen; + offset = opt[cur].off; + opt[cur].mlen = best_mlen; + opt[cur].off = best_off; + best_mlen = mlen; + best_off = offset; + if (mlen > cur) break; + cur -= mlen; + } + + for (u = 0; u <= last_pos;) { + u += opt[u].mlen; + } + + for (cur=0; cur < last_pos; ) { + mlen = opt[cur].mlen; + if (mlen == 1) { ip++; cur++; continue; } + offset = opt[cur].off; + cur += mlen; + litLength = (U32)(ip - anchor); + + if (offset > ZSTD_REP_MOVE_OPT) { + rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = offset - ZSTD_REP_MOVE_OPT; + offset--; + } else { + if (offset != 0) { + best_off = (offset==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]); + if (offset != 1) rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = best_off; + } + if (litLength==0) offset--; + } + + ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH); + ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH); + anchor = ip = ip + mlen; + } } /* for (cur=0; cur < last_pos; ) */ + + /* Save reps for next block */ + { int i; for (i=0; irepToConfirm[i] = rep[i]; } + + /* Last Literals */ + { size_t const lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } +} + + +FORCE_INLINE +void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx, + const void* src, size_t srcSize, const int ultra) +{ + seqStore_t* seqStorePtr = &(ctx->seqStore); + const BYTE* const istart = (const BYTE*)src; + const BYTE* ip = istart; + const BYTE* anchor = istart; + const BYTE* const iend = istart + srcSize; + const BYTE* const ilimit = iend - 8; + const BYTE* const base = ctx->base; + const U32 lowestIndex = ctx->lowLimit; + const U32 dictLimit = ctx->dictLimit; + const BYTE* const prefixStart = base + dictLimit; + const BYTE* const dictBase = ctx->dictBase; + const BYTE* const dictEnd = dictBase + dictLimit; + + const U32 maxSearches = 1U << ctx->params.cParams.searchLog; + const U32 sufficient_len = ctx->params.cParams.targetLength; + const U32 mls = ctx->params.cParams.searchLength; + const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4; + + ZSTD_optimal_t* opt = seqStorePtr->priceTable; + ZSTD_match_t* matches = seqStorePtr->matchTable; + const BYTE* inr; + + /* init */ + U32 offset, rep[ZSTD_REP_NUM]; + { U32 i; for (i=0; irep[i]; } + + ctx->nextToUpdate3 = ctx->nextToUpdate; + ZSTD_rescaleFreqs(seqStorePtr, (const BYTE*)src, srcSize); + ip += (ip==prefixStart); + + /* Match Loop */ + while (ip < ilimit) { + U32 cur, match_num, last_pos, litlen, price; + U32 u, mlen, best_mlen, best_off, litLength; + U32 current = (U32)(ip-base); + memset(opt, 0, sizeof(ZSTD_optimal_t)); + last_pos = 0; + opt[0].litlen = (U32)(ip - anchor); + + /* check repCode */ + { U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor); + for (i = (ip==anchor); i 0 && repCur <= (S32)current) + && (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */ + && (MEM_readMINMATCH(ip, minMatch) == MEM_readMINMATCH(repMatch, minMatch)) ) { + /* repcode detected we should take it */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + mlen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iend, repEnd, prefixStart) + minMatch; + + if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) { + best_mlen = mlen; best_off = i; cur = 0; last_pos = 1; + goto _storeSequence; + } + + best_off = i - (ip==anchor); + litlen = opt[0].litlen; + do { + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); + if (mlen > last_pos || price < opt[mlen].price) + SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */ + mlen--; + } while (mlen >= minMatch); + } } } + + match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, ip, iend, maxSearches, mls, matches, minMatch); /* first search (depth 0) */ + + if (!last_pos && !match_num) { ip++; continue; } + + { U32 i; for (i=0; i sufficient_len || matches[match_num-1].len >= ZSTD_OPT_NUM)) { + best_mlen = matches[match_num-1].len; + best_off = matches[match_num-1].off; + cur = 0; + last_pos = 1; + goto _storeSequence; + } + + best_mlen = (last_pos) ? last_pos : minMatch; + + /* set prices using matches at position = 0 */ + for (u = 0; u < match_num; u++) { + mlen = (u>0) ? matches[u-1].len+1 : best_mlen; + best_mlen = matches[u].len; + litlen = opt[0].litlen; + while (mlen <= best_mlen) { + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra); + if (mlen > last_pos || price < opt[mlen].price) + SET_PRICE(mlen, mlen, matches[u].off, litlen, price); + mlen++; + } } + + if (last_pos < minMatch) { + ip++; continue; + } + + /* check further positions */ + for (cur = 1; cur <= last_pos; cur++) { + inr = ip + cur; + + if (opt[cur-1].mlen == 1) { + litlen = opt[cur-1].litlen + 1; + if (cur > litlen) { + price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-litlen); + } else + price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor); + } else { + litlen = 1; + price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-1); + } + + if (cur > last_pos || price <= opt[cur].price) + SET_PRICE(cur, 1, 0, litlen, price); + + if (cur == last_pos) break; + + if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */ + continue; + + mlen = opt[cur].mlen; + if (opt[cur].off > ZSTD_REP_MOVE_OPT) { + opt[cur].rep[2] = opt[cur-mlen].rep[1]; + opt[cur].rep[1] = opt[cur-mlen].rep[0]; + opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT; + } else { + opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur-mlen].rep[1] : opt[cur-mlen].rep[2]; + opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur-mlen].rep[0] : opt[cur-mlen].rep[1]; + opt[cur].rep[0] = ((opt[cur].off==ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur-mlen].rep[0] - 1) : (opt[cur-mlen].rep[opt[cur].off]); + } + + best_mlen = minMatch; + { U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1); + for (i = (mlen != 1); i 0 && repCur <= (S32)(current+cur)) + && (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */ + && (MEM_readMINMATCH(inr, minMatch) == MEM_readMINMATCH(repMatch, minMatch)) ) { + /* repcode detected */ + const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend; + mlen = (U32)ZSTD_count_2segments(inr+minMatch, repMatch+minMatch, iend, repEnd, prefixStart) + minMatch; + + if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) { + best_mlen = mlen; best_off = i; last_pos = cur + 1; + goto _storeSequence; + } + + best_off = i - (opt[cur].mlen != 1); + if (mlen > best_mlen) best_mlen = mlen; + + do { + if (opt[cur].mlen == 1) { + litlen = opt[cur].litlen; + if (cur > litlen) { + price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra); + } else + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra); + } else { + litlen = 0; + price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra); + } + + if (cur + mlen > last_pos || price <= opt[cur + mlen].price) + SET_PRICE(cur + mlen, mlen, i, litlen, price); + mlen--; + } while (mlen >= minMatch); + } } } + + match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch); + + if (match_num > 0 && (matches[match_num-1].len > sufficient_len || cur + matches[match_num-1].len >= ZSTD_OPT_NUM)) { + best_mlen = matches[match_num-1].len; + best_off = matches[match_num-1].off; + last_pos = cur + 1; + goto _storeSequence; + } + + /* set prices using matches at position = cur */ + for (u = 0; u < match_num; u++) { + mlen = (u>0) ? matches[u-1].len+1 : best_mlen; + best_mlen = matches[u].len; + + while (mlen <= best_mlen) { + if (opt[cur].mlen == 1) { + litlen = opt[cur].litlen; + if (cur > litlen) + price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra); + else + price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra); + } else { + litlen = 0; + price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra); + } + + if (cur + mlen > last_pos || (price < opt[cur + mlen].price)) + SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price); + + mlen++; + } } } /* for (cur = 1; cur <= last_pos; cur++) */ + + best_mlen = opt[last_pos].mlen; + best_off = opt[last_pos].off; + cur = last_pos - best_mlen; + + /* store sequence */ +_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */ + opt[0].mlen = 1; + + while (1) { + mlen = opt[cur].mlen; + offset = opt[cur].off; + opt[cur].mlen = best_mlen; + opt[cur].off = best_off; + best_mlen = mlen; + best_off = offset; + if (mlen > cur) break; + cur -= mlen; + } + + for (u = 0; u <= last_pos; ) { + u += opt[u].mlen; + } + + for (cur=0; cur < last_pos; ) { + mlen = opt[cur].mlen; + if (mlen == 1) { ip++; cur++; continue; } + offset = opt[cur].off; + cur += mlen; + litLength = (U32)(ip - anchor); + + if (offset > ZSTD_REP_MOVE_OPT) { + rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = offset - ZSTD_REP_MOVE_OPT; + offset--; + } else { + if (offset != 0) { + best_off = (offset==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]); + if (offset != 1) rep[2] = rep[1]; + rep[1] = rep[0]; + rep[0] = best_off; + } + + if (litLength==0) offset--; + } + + ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH); + ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH); + anchor = ip = ip + mlen; + } } /* for (cur=0; cur < last_pos; ) */ + + /* Save reps for next block */ + { int i; for (i=0; irepToConfirm[i] = rep[i]; } + + /* Last Literals */ + { size_t lastLLSize = iend - anchor; + memcpy(seqStorePtr->lit, anchor, lastLLSize); + seqStorePtr->lit += lastLLSize; + } +} + +#endif /* ZSTD_OPT_H_91842398743 */ diff --git a/contrib/linux-kernel/spaces_to_tabs.sh b/contrib/linux-kernel/spaces_to_tabs.sh new file mode 100755 index 000000000..ebde5fbaa --- /dev/null +++ b/contrib/linux-kernel/spaces_to_tabs.sh @@ -0,0 +1,28 @@ +#!/bin/sh +set -e + +# Constants +INCLUDE='include/' +LIB='lib/' +SPACES=' ' +TAB=$'\t' +TMP="replacements.tmp" + +echo "Files: " $INCLUDE* $LIB* + +# Check files for existing tabs +grep "$TAB" $INCLUDE* $LIB* && exit 1 || true +# Replace the first tab on every line +sed -i '' "s/^$SPACES/$TAB/" $INCLUDE* $LIB* + +# Execute once and then execute as long as replacements are happening +more_work="yes" +while [ ! -z "$more_work" ] +do + rm -f $TMP + # Replaces $SPACES that directly follow a $TAB with a $TAB. + # $TMP will be non-empty if any replacements took place. + sed -i '' "s/$TAB$SPACES/$TAB$TAB/w $TMP" $INCLUDE* $LIB* + more_work=$(cat "$TMP") +done +rm -f $TMP diff --git a/contrib/linux-kernel/squashfs-benchmark.sh b/contrib/linux-kernel/squashfs-benchmark.sh new file mode 100755 index 000000000..02dfd7325 --- /dev/null +++ b/contrib/linux-kernel/squashfs-benchmark.sh @@ -0,0 +1,39 @@ +# !/bin/sh +set -e + +# Benchmarks run on a Ubuntu 14.04 VM with 2 cores and 4 GiB of RAM. +# The VM is running on a Macbook Pro with a 3.1 GHz Intel Core i7 processor and +# 16 GB of RAM and an SSD. + +# $BENCHMARK_DIR is generated with the following commands, from the Ubuntu image +# ubuntu-16.10-desktop-amd64.iso. +# > mkdir mnt +# > sudo mount -o loop ubuntu-16.10-desktop-amd64.iso mnt +# > cp mnt/casper/filesystem.squashfs . +# > sudo unsquashfs filesystem.squashfs + +# $HOME is on a ext4 filesystem +BENCHMARK_DIR="$HOME/squashfs-root/" +BENCHMARK_FS="$HOME/filesystem.squashfs" + +# Normalize the environment +sudo rm -f $BENCHMARK_FS 2> /dev/null > /dev/null || true +sudo umount /mnt/squashfs 2> /dev/null > /dev/null || true + +# Run the benchmark +echo "Compression" +echo "sudo mksquashfs $BENCHMARK_DIR $BENCHMARK_FS $@" +time sudo mksquashfs $BENCHMARK_DIR $BENCHMARK_FS $@ 2> /dev/null > /dev/null + +echo "Approximate compression ratio" +printf "%d / %d\n" \ + $(sudo du -sx --block-size=1 $BENCHMARK_DIR | cut -f1) \ + $(sudo du -sx --block-size=1 $BENCHMARK_FS | cut -f1); + +# Mount the filesystem +sudo mount -t squashfs $BENCHMARK_FS /mnt/squashfs + +echo "Decompression" +time sudo tar -c /mnt/squashfs 2> /dev/null | wc -c > /dev/null + +sudo umount /mnt/squashfs diff --git a/contrib/linux-kernel/squashfs.diff b/contrib/linux-kernel/squashfs.diff new file mode 100644 index 000000000..ddf7b3578 --- /dev/null +++ b/contrib/linux-kernel/squashfs.diff @@ -0,0 +1,245 @@ +commit 16bb6b9fd684eadba41a36223d67805d7ea741e7 +Author: Sean Purcell +Date: Thu Apr 27 17:17:58 2017 -0700 + + Add zstd support to squashfs + +diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig +index ffb093e..1adb334 100644 +--- a/fs/squashfs/Kconfig ++++ b/fs/squashfs/Kconfig +@@ -165,6 +165,20 @@ config SQUASHFS_XZ + + If unsure, say N. + ++config SQUASHFS_ZSTD ++ bool "Include support for ZSTD compressed file systems" ++ depends on SQUASHFS ++ select ZSTD_DECOMPRESS ++ help ++ Saying Y here includes support for reading Squashfs file systems ++ compressed with ZSTD compression. ZSTD gives better compression than ++ the default ZLIB compression, while using less CPU. ++ ++ ZSTD is not the standard compression used in Squashfs and so most ++ file systems will be readable without selecting this option. ++ ++ If unsure, say N. ++ + config SQUASHFS_4K_DEVBLK_SIZE + bool "Use 4K device block size?" + depends on SQUASHFS +diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile +index 246a6f3..6655631 100644 +--- a/fs/squashfs/Makefile ++++ b/fs/squashfs/Makefile +@@ -15,3 +15,4 @@ squashfs-$(CONFIG_SQUASHFS_LZ4) += lz4_wrapper.o + squashfs-$(CONFIG_SQUASHFS_LZO) += lzo_wrapper.o + squashfs-$(CONFIG_SQUASHFS_XZ) += xz_wrapper.o + squashfs-$(CONFIG_SQUASHFS_ZLIB) += zlib_wrapper.o ++squashfs-$(CONFIG_SQUASHFS_ZSTD) += zstd_wrapper.o +diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c +index d2bc136..8366398 100644 +--- a/fs/squashfs/decompressor.c ++++ b/fs/squashfs/decompressor.c +@@ -65,6 +65,12 @@ static const struct squashfs_decompressor squashfs_zlib_comp_ops = { + }; + #endif + ++#ifndef CONFIG_SQUASHFS_ZSTD ++static const struct squashfs_decompressor squashfs_zstd_comp_ops = { ++ NULL, NULL, NULL, NULL, ZSTD_COMPRESSION, "zstd", 0 ++}; ++#endif ++ + static const struct squashfs_decompressor squashfs_unknown_comp_ops = { + NULL, NULL, NULL, NULL, 0, "unknown", 0 + }; +@@ -75,6 +81,7 @@ static const struct squashfs_decompressor *decompressor[] = { + &squashfs_lzo_comp_ops, + &squashfs_xz_comp_ops, + &squashfs_lzma_unsupported_comp_ops, ++ &squashfs_zstd_comp_ops, + &squashfs_unknown_comp_ops + }; + +diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h +index a25713c..0f5a8e4 100644 +--- a/fs/squashfs/decompressor.h ++++ b/fs/squashfs/decompressor.h +@@ -58,4 +58,8 @@ extern const struct squashfs_decompressor squashfs_lzo_comp_ops; + extern const struct squashfs_decompressor squashfs_zlib_comp_ops; + #endif + ++#ifdef CONFIG_SQUASHFS_ZSTD ++extern const struct squashfs_decompressor squashfs_zstd_comp_ops; ++#endif ++ + #endif +diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h +index 506f4ba..24d12fd 100644 +--- a/fs/squashfs/squashfs_fs.h ++++ b/fs/squashfs/squashfs_fs.h +@@ -241,6 +241,7 @@ struct meta_index { + #define LZO_COMPRESSION 3 + #define XZ_COMPRESSION 4 + #define LZ4_COMPRESSION 5 ++#define ZSTD_COMPRESSION 6 + + struct squashfs_super_block { + __le32 s_magic; +diff --git a/fs/squashfs/zstd_wrapper.c b/fs/squashfs/zstd_wrapper.c +new file mode 100644 +index 0000000..7cc9303 +--- /dev/null ++++ b/fs/squashfs/zstd_wrapper.c +@@ -0,0 +1,149 @@ ++/* ++ * Squashfs - a compressed read only filesystem for Linux ++ * ++ * Copyright (c) 2017 Facebook ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * as published by the Free Software Foundation; either version 2, ++ * or (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. ++ * ++ * zstd_wrapper.c ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "squashfs_fs.h" ++#include "squashfs_fs_sb.h" ++#include "squashfs.h" ++#include "decompressor.h" ++#include "page_actor.h" ++ ++struct workspace { ++ void *mem; ++ size_t mem_size; ++}; ++ ++static void *zstd_init(struct squashfs_sb_info *msblk, void *buff) ++{ ++ struct workspace *wksp = kmalloc(sizeof(*wksp), GFP_KERNEL); ++ if (wksp == NULL) ++ goto failed; ++ wksp->mem_size = ZSTD_DStreamWorkspaceBound(max_t(size_t, ++ msblk->block_size, SQUASHFS_METADATA_SIZE)); ++ wksp->mem = vmalloc(wksp->mem_size); ++ if (wksp->mem == NULL) ++ goto failed; ++ ++ return wksp; ++ ++failed: ++ ERROR("Failed to allocate zstd workspace\n"); ++ kfree(wksp); ++ return ERR_PTR(-ENOMEM); ++} ++ ++ ++static void zstd_free(void *strm) ++{ ++ struct workspace *wksp = strm; ++ ++ if (wksp) ++ vfree(wksp->mem); ++ kfree(wksp); ++} ++ ++ ++static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm, ++ struct buffer_head **bh, int b, int offset, int length, ++ struct squashfs_page_actor *output) ++{ ++ struct workspace *wksp = strm; ++ ZSTD_DStream *stream; ++ size_t total_out = 0; ++ size_t zstd_err; ++ int k = 0; ++ ZSTD_inBuffer in_buf = { NULL, 0, 0 }; ++ ZSTD_outBuffer out_buf = { NULL, 0, 0 }; ++ ++ stream = ZSTD_initDStream(wksp->mem_size, wksp->mem, wksp->mem_size); ++ ++ if (!stream) { ++ ERROR("Failed to initialize zstd decompressor\n"); ++ goto out; ++ } ++ ++ out_buf.size = PAGE_SIZE; ++ out_buf.dst = squashfs_first_page(output); ++ ++ do { ++ if (in_buf.pos == in_buf.size && k < b) { ++ int avail = min(length, msblk->devblksize - offset); ++ length -= avail; ++ in_buf.src = bh[k]->b_data + offset; ++ in_buf.size = avail; ++ in_buf.pos = 0; ++ offset = 0; ++ } ++ ++ if (out_buf.pos == out_buf.size) { ++ out_buf.dst = squashfs_next_page(output); ++ if (out_buf.dst == NULL) { ++ /* shouldn't run out of pages before stream is ++ * done */ ++ squashfs_finish_page(output); ++ goto out; ++ } ++ out_buf.pos = 0; ++ out_buf.size = PAGE_SIZE; ++ } ++ ++ total_out -= out_buf.pos; ++ zstd_err = ZSTD_decompressStream(stream, &out_buf, &in_buf); ++ total_out += out_buf.pos; /* add the additional data produced */ ++ ++ if (in_buf.pos == in_buf.size && k < b) ++ put_bh(bh[k++]); ++ } while (zstd_err != 0 && !ZSTD_isError(zstd_err)); ++ ++ squashfs_finish_page(output); ++ ++ if (ZSTD_isError(zstd_err)) { ++ ERROR("zstd decompression error: %d\n", ++ (int)ZSTD_getErrorCode(zstd_err)); ++ goto out; ++ } ++ ++ if (k < b) ++ goto out; ++ ++ return (int)total_out; ++ ++out: ++ for (; k < b; k++) ++ put_bh(bh[k]); ++ ++ return -EIO; ++} ++ ++const struct squashfs_decompressor squashfs_zstd_comp_ops = { ++ .init = zstd_init, ++ .free = zstd_free, ++ .decompress = zstd_uncompress, ++ .id = ZSTD_COMPRESSION, ++ .name = "zstd", ++ .supported = 1 ++}; diff --git a/contrib/linux-kernel/test/.gitignore b/contrib/linux-kernel/test/.gitignore new file mode 100644 index 000000000..4fc10228d --- /dev/null +++ b/contrib/linux-kernel/test/.gitignore @@ -0,0 +1 @@ +*Test diff --git a/contrib/linux-kernel/test/Makefile b/contrib/linux-kernel/test/Makefile new file mode 100644 index 000000000..01e877b7e --- /dev/null +++ b/contrib/linux-kernel/test/Makefile @@ -0,0 +1,27 @@ + +IFLAGS := -isystem include/ -I ../include/ -I ../lib/zstd/ -isystem googletest/googletest/include + +SOURCES := $(wildcard ../lib/zstd/*.c) +OBJECTS := $(patsubst %.c,%.o,$(SOURCES)) + +ARFLAGS := rcs +CXXFLAGS += -std=c++11 +CFLAGS += -g -O0 +CPPFLAGS += $(IFLAGS) + +../lib/zstd/libzstd.a: $(OBJECTS) + $(AR) $(ARFLAGS) $@ $^ + +UserlandTest: UserlandTest.cpp ../lib/zstd/libzstd.a + $(CXX) $(CXXFLAGS) $(CFLAGS) $(CPPFLAGS) $^ googletest/build/googlemock/gtest/libgtest.a googletest/build/googlemock/gtest/libgtest_main.a -o $@ + +# Install googletest +.PHONY: googletest +googletest: + @$(RM) -rf googletest + @git clone https://github.com/google/googletest + @mkdir -p googletest/build + @cd googletest/build && cmake .. && $(MAKE) + +clean: + $(RM) -f *.{o,a} ../lib/zstd/*.{o,a} diff --git a/contrib/linux-kernel/test/UserlandTest.cpp b/contrib/linux-kernel/test/UserlandTest.cpp new file mode 100644 index 000000000..73b30be4c --- /dev/null +++ b/contrib/linux-kernel/test/UserlandTest.cpp @@ -0,0 +1,554 @@ +extern "C" { +#include +} +#include +#include +#include +#include + +using namespace std; + +namespace { +struct WorkspaceDeleter { + void *memory; + + template void operator()(T const *) { free(memory); } +}; + +std::unique_ptr +createCCtx(ZSTD_compressionParameters cParams) { + size_t const workspaceSize = ZSTD_CCtxWorkspaceBound(cParams); + void *workspace = malloc(workspaceSize); + std::unique_ptr cctx{ + ZSTD_initCCtx(workspace, workspaceSize), WorkspaceDeleter{workspace}}; + if (!cctx) { + throw std::runtime_error{"Bad cctx"}; + } + return cctx; +} + +std::unique_ptr +createCCtx(int level, unsigned long long estimatedSrcSize = 0, + size_t dictSize = 0) { + auto const cParams = ZSTD_getCParams(level, estimatedSrcSize, dictSize); + return createCCtx(cParams); +} + +std::unique_ptr +createDCtx() { + size_t const workspaceSize = ZSTD_DCtxWorkspaceBound(); + void *workspace = malloc(workspaceSize); + std::unique_ptr dctx{ + ZSTD_initDCtx(workspace, workspaceSize), WorkspaceDeleter{workspace}}; + if (!dctx) { + throw std::runtime_error{"Bad dctx"}; + } + return dctx; +} + +std::unique_ptr +createCDict(std::string const& dict, ZSTD_parameters params) { + size_t const workspaceSize = ZSTD_CDictWorkspaceBound(params.cParams); + void *workspace = malloc(workspaceSize); + std::unique_ptr cdict{ + ZSTD_initCDict(dict.data(), dict.size(), params, workspace, + workspaceSize), + WorkspaceDeleter{workspace}}; + if (!cdict) { + throw std::runtime_error{"Bad cdict"}; + } + return cdict; +} + +std::unique_ptr +createCDict(std::string const& dict, int level) { + auto const params = ZSTD_getParams(level, 0, dict.size()); + return createCDict(dict, params); +} + +std::unique_ptr +createDDict(std::string const& dict) { + size_t const workspaceSize = ZSTD_DDictWorkspaceBound(); + void *workspace = malloc(workspaceSize); + std::unique_ptr ddict{ + ZSTD_initDDict(dict.data(), dict.size(), workspace, workspaceSize), + WorkspaceDeleter{workspace}}; + if (!ddict) { + throw std::runtime_error{"Bad ddict"}; + } + return ddict; +} + +std::unique_ptr +createCStream(ZSTD_parameters params, unsigned long long pledgedSrcSize = 0) { + size_t const workspaceSize = ZSTD_CStreamWorkspaceBound(params.cParams); + void *workspace = malloc(workspaceSize); + std::unique_ptr zcs{ + ZSTD_initCStream(params, pledgedSrcSize, workspace, workspaceSize)}; + if (!zcs) { + throw std::runtime_error{"bad cstream"}; + } + return zcs; +} + +std::unique_ptr +createCStream(ZSTD_compressionParameters cParams, ZSTD_CDict const &cdict, + unsigned long long pledgedSrcSize = 0) { + size_t const workspaceSize = ZSTD_CStreamWorkspaceBound(cParams); + void *workspace = malloc(workspaceSize); + std::unique_ptr zcs{ + ZSTD_initCStream_usingCDict(&cdict, pledgedSrcSize, workspace, + workspaceSize)}; + if (!zcs) { + throw std::runtime_error{"bad cstream"}; + } + return zcs; +} + +std::unique_ptr +createCStream(int level, unsigned long long pledgedSrcSize = 0) { + auto const params = ZSTD_getParams(level, pledgedSrcSize, 0); + return createCStream(params, pledgedSrcSize); +} + +std::unique_ptr +createDStream(size_t maxWindowSize = (1ULL << ZSTD_WINDOWLOG_MAX), + ZSTD_DDict const *ddict = nullptr) { + size_t const workspaceSize = ZSTD_DStreamWorkspaceBound(maxWindowSize); + void *workspace = malloc(workspaceSize); + std::unique_ptr zds{ + ddict == nullptr + ? ZSTD_initDStream(maxWindowSize, workspace, workspaceSize) + : ZSTD_initDStream_usingDDict(maxWindowSize, ddict, workspace, + workspaceSize)}; + if (!zds) { + throw std::runtime_error{"bad dstream"}; + } + return zds; +} + +std::string compress(ZSTD_CCtx &cctx, std::string const &data, + ZSTD_parameters params, std::string const &dict = "") { + std::string compressed; + compressed.resize(ZSTD_compressBound(data.size())); + size_t const rc = + dict.empty() + ? ZSTD_compressCCtx(&cctx, &compressed[0], compressed.size(), + data.data(), data.size(), params) + : ZSTD_compress_usingDict(&cctx, &compressed[0], compressed.size(), + data.data(), data.size(), dict.data(), + dict.size(), params); + if (ZSTD_isError(rc)) { + throw std::runtime_error{"compression error"}; + } + compressed.resize(rc); + return compressed; +} + +std::string compress(ZSTD_CCtx& cctx, std::string const& data, int level, std::string const& dict = "") { + auto const params = ZSTD_getParams(level, 0, dict.size()); + return compress(cctx, data, params, dict); +} + +std::string decompress(ZSTD_DCtx& dctx, std::string const& compressed, size_t decompressedSize, std::string const& dict = "") { + std::string decompressed; + decompressed.resize(decompressedSize); + size_t const rc = + dict.empty() + ? ZSTD_decompressDCtx(&dctx, &decompressed[0], decompressed.size(), + compressed.data(), compressed.size()) + : ZSTD_decompress_usingDict( + &dctx, &decompressed[0], decompressed.size(), compressed.data(), + compressed.size(), dict.data(), dict.size()); + if (ZSTD_isError(rc)) { + throw std::runtime_error{"decompression error"}; + } + decompressed.resize(rc); + return decompressed; +} + +std::string compress(ZSTD_CCtx& cctx, std::string const& data, ZSTD_CDict& cdict) { + std::string compressed; + compressed.resize(ZSTD_compressBound(data.size())); + size_t const rc = + ZSTD_compress_usingCDict(&cctx, &compressed[0], compressed.size(), + data.data(), data.size(), &cdict); + if (ZSTD_isError(rc)) { + throw std::runtime_error{"compression error"}; + } + compressed.resize(rc); + return compressed; +} + +std::string decompress(ZSTD_DCtx& dctx, std::string const& compressed, size_t decompressedSize, ZSTD_DDict& ddict) { + std::string decompressed; + decompressed.resize(decompressedSize); + size_t const rc = + ZSTD_decompress_usingDDict(&dctx, &decompressed[0], decompressed.size(), + compressed.data(), compressed.size(), &ddict); + if (ZSTD_isError(rc)) { + throw std::runtime_error{"decompression error"}; + } + decompressed.resize(rc); + return decompressed; +} + +std::string compress(ZSTD_CStream& zcs, std::string const& data) { + std::string compressed; + compressed.resize(ZSTD_compressBound(data.size())); + ZSTD_inBuffer in = {data.data(), data.size(), 0}; + ZSTD_outBuffer out = {&compressed[0], compressed.size(), 0}; + while (in.pos != in.size) { + size_t const rc = ZSTD_compressStream(&zcs, &out, &in); + if (ZSTD_isError(rc)) { + throw std::runtime_error{"compress stream failed"}; + } + } + size_t const rc = ZSTD_endStream(&zcs, &out); + if (rc != 0) { + throw std::runtime_error{"compress end failed"}; + } + compressed.resize(out.pos); + return compressed; +} + +std::string decompress(ZSTD_DStream &zds, std::string const &compressed, + size_t decompressedSize) { + std::string decompressed; + decompressed.resize(decompressedSize); + ZSTD_inBuffer in = {compressed.data(), compressed.size(), 0}; + ZSTD_outBuffer out = {&decompressed[0], decompressed.size(), 0}; + while (in.pos != in.size) { + size_t const rc = ZSTD_decompressStream(&zds, &out, &in); + if (ZSTD_isError(rc)) { + throw std::runtime_error{"decompress stream failed"}; + } + } + decompressed.resize(out.pos); + return decompressed; +} + +std::string makeData(size_t size) { + std::string result; + result.reserve(size + 20); + while (result.size() < size) { + result += "Hello world"; + } + return result; +} + +std::string const kData = "Hello world"; +std::string const kPlainDict = makeData(10000); +std::string const kZstdDict{ + "\x37\xA4\x30\xEC\x99\x69\x58\x1C\x21\x10\xD8\x4A\x84\x01\xCC\xF3" + "\x3C\xCF\x9B\x25\xBB\xC9\x6E\xB2\x9B\xEC\x26\xAD\xCF\xDF\x4E\xCD" + "\xF3\x2C\x3A\x21\x84\x10\x42\x08\x21\x01\x33\xF1\x78\x3C\x1E\x8F" + "\xC7\xE3\xF1\x78\x3C\xCF\xF3\xBC\xF7\xD4\x42\x41\x41\x41\x41\x41" + "\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41" + "\x41\x41\x41\x41\xA1\x50\x28\x14\x0A\x85\x42\xA1\x50\x28\x14\x0A" + "\x85\xA2\x28\x8A\xA2\x28\x4A\x29\x7D\x74\xE1\xE1\xE1\xE1\xE1\xE1" + "\xE1\xE1\xE1\xE1\xE1\xE1\xE1\xE1\xE1\xE1\xE1\xE1\xE1\xF1\x78\x3C" + "\x1E\x8F\xC7\xE3\xF1\x78\x9E\xE7\x79\xEF\x01\x01\x00\x00\x00\x04" + "\x00\x00\x00\x08\x00\x00\x00" + "0123456789", + 161}; +} + +TEST(Block, CCtx) { + auto cctx = createCCtx(1); + auto const compressed = compress(*cctx, kData, 1); + auto dctx = createDCtx(); + auto const decompressed = decompress(*dctx, compressed, kData.size()); + EXPECT_EQ(kData, decompressed); +} + +TEST(Block, NoContentSize) { + auto cctx = createCCtx(1); + auto const c = compress(*cctx, kData, 1); + auto const size = ZSTD_findDecompressedSize(c.data(), c.size()); + EXPECT_EQ(ZSTD_CONTENTSIZE_UNKNOWN, size); +} + +TEST(Block, ContentSize) { + auto cctx = createCCtx(1); + auto params = ZSTD_getParams(1, 0, 0); + params.fParams.contentSizeFlag = 1; + auto const c = compress(*cctx, kData, params); + auto const size = ZSTD_findDecompressedSize(c.data(), c.size()); + EXPECT_EQ(kData.size(), size); +} + +TEST(Block, CCtxLevelIncrease) { + std::string c; + auto cctx = createCCtx(6); + auto dctx = createDCtx(); + for (int level = 1; level <= 6; ++level) { + auto compressed = compress(*cctx, kData, level); + auto const decompressed = decompress(*dctx, compressed, kData.size()); + EXPECT_EQ(kData, decompressed); + } +} + +TEST(Block, PlainDict) { + auto cctx = createCCtx(1); + auto const compressed = compress(*cctx, kData, 1, kPlainDict); + auto dctx = createDCtx(); + EXPECT_ANY_THROW(decompress(*dctx, compressed, kData.size())); + auto const decompressed = + decompress(*dctx, compressed, kData.size(), kPlainDict); + EXPECT_EQ(kData, decompressed); +} + +TEST(Block, ZstdDict) { + auto cctx = createCCtx(1); + auto const compressed = compress(*cctx, kData, 1, kZstdDict); + auto dctx = createDCtx(); + EXPECT_ANY_THROW(decompress(*dctx, compressed, kData.size())); + auto const decompressed = + decompress(*dctx, compressed, kData.size(), kZstdDict); + EXPECT_EQ(kData, decompressed); +} + +TEST(Block, PreprocessedPlainDict) { + auto cctx = createCCtx(1); + auto const cdict = createCDict(kPlainDict, 1); + auto const compressed = compress(*cctx, kData, *cdict); + auto dctx = createDCtx(); + auto const ddict = createDDict(kPlainDict); + EXPECT_ANY_THROW(decompress(*dctx, compressed, kData.size())); + auto const decompressed = + decompress(*dctx, compressed, kData.size(), *ddict); + EXPECT_EQ(kData, decompressed); +} + +TEST(Block, PreprocessedZstdDict) { + auto cctx = createCCtx(1); + auto const cdict = createCDict(kZstdDict, 1); + auto const compressed = compress(*cctx, kData, *cdict); + auto dctx = createDCtx(); + auto const ddict = createDDict(kZstdDict); + EXPECT_ANY_THROW(decompress(*dctx, compressed, kData.size())); + auto const decompressed = + decompress(*dctx, compressed, kData.size(), *ddict); + EXPECT_EQ(kData, decompressed); +} + +TEST(Block, ReinitializeCCtx) { + auto cctx = createCCtx(1); + { + auto const compressed = compress(*cctx, kData, 1); + auto dctx = createDCtx(); + auto const decompressed = decompress(*dctx, compressed, kData.size()); + EXPECT_EQ(kData, decompressed); + } + // Create the cctx with the same memory + auto d = cctx.get_deleter(); + auto raw = cctx.release(); + auto params = ZSTD_getParams(1, 0, 0); + cctx.reset( + ZSTD_initCCtx(d.memory, ZSTD_CCtxWorkspaceBound(params.cParams))); + // Repeat + { + auto const compressed = compress(*cctx, kData, 1); + auto dctx = createDCtx(); + auto const decompressed = decompress(*dctx, compressed, kData.size()); + EXPECT_EQ(kData, decompressed); + } +} + +TEST(Block, ReinitializeDCtx) { + auto dctx = createDCtx(); + { + auto cctx = createCCtx(1); + auto const compressed = compress(*cctx, kData, 1); + auto const decompressed = decompress(*dctx, compressed, kData.size()); + EXPECT_EQ(kData, decompressed); + } + // Create the cctx with the same memory + auto d = dctx.get_deleter(); + auto raw = dctx.release(); + dctx.reset(ZSTD_initDCtx(d.memory, ZSTD_DCtxWorkspaceBound())); + // Repeat + { + auto cctx = createCCtx(1); + auto const compressed = compress(*cctx, kData, 1); + auto dctx = createDCtx(); + auto const decompressed = decompress(*dctx, compressed, kData.size()); + EXPECT_EQ(kData, decompressed); + } +} + +TEST(Stream, Basic) { + auto zcs = createCStream(1); + auto const compressed = compress(*zcs, kData); + auto zds = createDStream(); + auto const decompressed = decompress(*zds, compressed, kData.size()); + EXPECT_EQ(kData, decompressed); +} + +TEST(Stream, PlainDict) { + auto params = ZSTD_getParams(1, kData.size(), kPlainDict.size()); + params.cParams.windowLog = 17; + auto cdict = createCDict(kPlainDict, params); + auto zcs = createCStream(params.cParams, *cdict, kData.size()); + auto const compressed = compress(*zcs, kData); + auto const contentSize = + ZSTD_findDecompressedSize(compressed.data(), compressed.size()); + EXPECT_ANY_THROW(decompress(*createDStream(), compressed, kData.size())); + auto ddict = createDDict(kPlainDict); + auto zds = createDStream(1 << 17, ddict.get()); + auto const decompressed = decompress(*zds, compressed, kData.size()); + EXPECT_EQ(kData, decompressed); +} + +TEST(Stream, ZstdDict) { + auto params = ZSTD_getParams(1, 0, 0); + params.cParams.windowLog = 17; + auto cdict = createCDict(kZstdDict, 1); + auto zcs = createCStream(params.cParams, *cdict); + auto const compressed = compress(*zcs, kData); + EXPECT_ANY_THROW(decompress(*createDStream(), compressed, kData.size())); + auto ddict = createDDict(kZstdDict); + auto zds = createDStream(1 << 17, ddict.get()); + auto const decompressed = decompress(*zds, compressed, kData.size()); + EXPECT_EQ(kData, decompressed); +} + +TEST(Stream, ResetCStream) { + auto zcs = createCStream(1); + auto zds = createDStream(); + { + auto const compressed = compress(*zcs, kData); + auto const decompressed = decompress(*zds, compressed, kData.size()); + EXPECT_EQ(kData, decompressed); + } + { + ZSTD_resetCStream(zcs.get(), 0); + auto const compressed = compress(*zcs, kData); + auto const decompressed = decompress(*zds, compressed, kData.size()); + EXPECT_EQ(kData, decompressed); + } +} + +TEST(Stream, ResetDStream) { + auto zcs = createCStream(1); + auto zds = createDStream(); + auto const compressed = compress(*zcs, kData); + EXPECT_ANY_THROW(decompress(*zds, kData, kData.size())); + EXPECT_ANY_THROW(decompress(*zds, compressed, kData.size())); + ZSTD_resetDStream(zds.get()); + auto const decompressed = decompress(*zds, compressed, kData.size()); + EXPECT_EQ(kData, decompressed); +} + +TEST(Stream, Flush) { + auto zcs = createCStream(1); + auto zds = createDStream(); + std::string compressed; + { + compressed.resize(ZSTD_compressBound(kData.size())); + ZSTD_inBuffer in = {kData.data(), kData.size(), 0}; + ZSTD_outBuffer out = {&compressed[0], compressed.size(), 0}; + while (in.pos != in.size) { + size_t const rc = ZSTD_compressStream(zcs.get(), &out, &in); + if (ZSTD_isError(rc)) { + throw std::runtime_error{"compress stream failed"}; + } + } + EXPECT_EQ(0, out.pos); + size_t const rc = ZSTD_flushStream(zcs.get(), &out); + if (rc != 0) { + throw std::runtime_error{"compress end failed"}; + } + compressed.resize(out.pos); + EXPECT_LT(0, out.pos); + } + std::string decompressed; + { + decompressed.resize(kData.size()); + ZSTD_inBuffer in = {compressed.data(), compressed.size(), 0}; + ZSTD_outBuffer out = {&decompressed[0], decompressed.size(), 0}; + while (in.pos != in.size) { + size_t const rc = ZSTD_decompressStream(zds.get(), &out, &in); + if (ZSTD_isError(rc)) { + throw std::runtime_error{"decompress stream failed"}; + } + } + } + EXPECT_EQ(kData, decompressed); +} + +#define TEST_SYMBOL(symbol) \ + do { \ + extern void *__##symbol; \ + EXPECT_NE((void *)0, __##symbol); \ + } while (0) + +TEST(API, Symbols) { + TEST_SYMBOL(ZSTD_CCtxWorkspaceBound); + TEST_SYMBOL(ZSTD_initCCtx); + TEST_SYMBOL(ZSTD_compressCCtx); + TEST_SYMBOL(ZSTD_compress_usingDict); + TEST_SYMBOL(ZSTD_DCtxWorkspaceBound); + TEST_SYMBOL(ZSTD_initDCtx); + TEST_SYMBOL(ZSTD_decompressDCtx); + TEST_SYMBOL(ZSTD_decompress_usingDict); + + TEST_SYMBOL(ZSTD_CDictWorkspaceBound); + TEST_SYMBOL(ZSTD_initCDict); + TEST_SYMBOL(ZSTD_compress_usingCDict); + TEST_SYMBOL(ZSTD_DDictWorkspaceBound); + TEST_SYMBOL(ZSTD_initDDict); + TEST_SYMBOL(ZSTD_decompress_usingDDict); + + TEST_SYMBOL(ZSTD_CStreamWorkspaceBound); + TEST_SYMBOL(ZSTD_initCStream); + TEST_SYMBOL(ZSTD_initCStream_usingCDict); + TEST_SYMBOL(ZSTD_resetCStream); + TEST_SYMBOL(ZSTD_compressStream); + TEST_SYMBOL(ZSTD_flushStream); + TEST_SYMBOL(ZSTD_endStream); + TEST_SYMBOL(ZSTD_CStreamInSize); + TEST_SYMBOL(ZSTD_CStreamOutSize); + TEST_SYMBOL(ZSTD_DStreamWorkspaceBound); + TEST_SYMBOL(ZSTD_initDStream); + TEST_SYMBOL(ZSTD_initDStream_usingDDict); + TEST_SYMBOL(ZSTD_resetDStream); + TEST_SYMBOL(ZSTD_decompressStream); + TEST_SYMBOL(ZSTD_DStreamInSize); + TEST_SYMBOL(ZSTD_DStreamOutSize); + + TEST_SYMBOL(ZSTD_findFrameCompressedSize); + TEST_SYMBOL(ZSTD_getFrameContentSize); + TEST_SYMBOL(ZSTD_findDecompressedSize); + + TEST_SYMBOL(ZSTD_getCParams); + TEST_SYMBOL(ZSTD_getParams); + TEST_SYMBOL(ZSTD_checkCParams); + TEST_SYMBOL(ZSTD_adjustCParams); + + TEST_SYMBOL(ZSTD_isFrame); + TEST_SYMBOL(ZSTD_getDictID_fromDict); + TEST_SYMBOL(ZSTD_getDictID_fromDDict); + TEST_SYMBOL(ZSTD_getDictID_fromFrame); + + TEST_SYMBOL(ZSTD_compressBegin); + TEST_SYMBOL(ZSTD_compressBegin_usingDict); + TEST_SYMBOL(ZSTD_compressBegin_advanced); + TEST_SYMBOL(ZSTD_copyCCtx); + TEST_SYMBOL(ZSTD_compressBegin_usingCDict); + TEST_SYMBOL(ZSTD_compressContinue); + TEST_SYMBOL(ZSTD_compressEnd); + TEST_SYMBOL(ZSTD_getFrameParams); + TEST_SYMBOL(ZSTD_decompressBegin); + TEST_SYMBOL(ZSTD_decompressBegin_usingDict); + TEST_SYMBOL(ZSTD_copyDCtx); + TEST_SYMBOL(ZSTD_nextSrcSizeToDecompress); + TEST_SYMBOL(ZSTD_decompressContinue); + TEST_SYMBOL(ZSTD_nextInputType); + + TEST_SYMBOL(ZSTD_getBlockSizeMax); + TEST_SYMBOL(ZSTD_compressBlock); + TEST_SYMBOL(ZSTD_decompressBlock); + TEST_SYMBOL(ZSTD_insertBlock); +} diff --git a/contrib/linux-kernel/test/include/asm/unaligned.h b/contrib/linux-kernel/test/include/asm/unaligned.h new file mode 100644 index 000000000..4f4828126 --- /dev/null +++ b/contrib/linux-kernel/test/include/asm/unaligned.h @@ -0,0 +1,177 @@ +#ifndef ASM_UNALIGNED_H +#define ASM_UNALIGNED_H + +#include +#include +#include + +#define _LITTLE_ENDIAN 1 + +static unsigned _isLittleEndian(void) +{ + const union { uint32_t u; uint8_t c[4]; } one = { 1 }; + assert(_LITTLE_ENDIAN == one.c[0]); + return _LITTLE_ENDIAN; +} + +static uint16_t _swap16(uint16_t in) +{ + return ((in & 0xF) << 8) + ((in & 0xF0) >> 8); +} + +static uint32_t _swap32(uint32_t in) +{ + return __builtin_bswap32(in); +} + +static uint64_t _swap64(uint64_t in) +{ + return __builtin_bswap64(in); +} + +/* Little endian */ +static uint16_t get_unaligned_le16(const void* memPtr) +{ + uint16_t val; + memcpy(&val, memPtr, sizeof(val)); + if (!_isLittleEndian()) _swap16(val); + return val; +} + +static uint32_t get_unaligned_le32(const void* memPtr) +{ + uint32_t val; + memcpy(&val, memPtr, sizeof(val)); + if (!_isLittleEndian()) _swap32(val); + return val; +} + +static uint64_t get_unaligned_le64(const void* memPtr) +{ + uint64_t val; + memcpy(&val, memPtr, sizeof(val)); + if (!_isLittleEndian()) _swap64(val); + return val; +} + +static void put_unaligned_le16(uint16_t value, void* memPtr) +{ + if (!_isLittleEndian()) value = _swap16(value); + memcpy(memPtr, &value, sizeof(value)); +} + +static void put_unaligned_le32(uint32_t value, void* memPtr) +{ + if (!_isLittleEndian()) value = _swap32(value); + memcpy(memPtr, &value, sizeof(value)); +} + +static void put_unaligned_le64(uint64_t value, void* memPtr) +{ + if (!_isLittleEndian()) value = _swap64(value); + memcpy(memPtr, &value, sizeof(value)); +} + +/* big endian */ +static uint32_t get_unaligned_be32(const void* memPtr) +{ + uint32_t val; + memcpy(&val, memPtr, sizeof(val)); + if (_isLittleEndian()) _swap32(val); + return val; +} + +static uint64_t get_unaligned_be64(const void* memPtr) +{ + uint64_t val; + memcpy(&val, memPtr, sizeof(val)); + if (_isLittleEndian()) _swap64(val); + return val; +} + +static void put_unaligned_be32(uint32_t value, void* memPtr) +{ + if (_isLittleEndian()) value = _swap32(value); + memcpy(memPtr, &value, sizeof(value)); +} + +static void put_unaligned_be64(uint64_t value, void* memPtr) +{ + if (_isLittleEndian()) value = _swap64(value); + memcpy(memPtr, &value, sizeof(value)); +} + +/* generic */ +extern void __bad_unaligned_access_size(void); + +#define __get_unaligned_le(ptr) ((typeof(*(ptr)))({ \ + __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ + __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \ + __bad_unaligned_access_size())))); \ + })) + +#define __get_unaligned_be(ptr) ((typeof(*(ptr)))({ \ + __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ + __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \ + __bad_unaligned_access_size())))); \ + })) + +#define __put_unaligned_le(val, ptr) \ + ({ \ + void *__gu_p = (ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + *(uint8_t *)__gu_p = (uint8_t)(val); \ + break; \ + case 2: \ + put_unaligned_le16((uint16_t)(val), __gu_p); \ + break; \ + case 4: \ + put_unaligned_le32((uint32_t)(val), __gu_p); \ + break; \ + case 8: \ + put_unaligned_le64((uint64_t)(val), __gu_p); \ + break; \ + default: \ + __bad_unaligned_access_size(); \ + break; \ + } \ + (void)0; \ + }) + +#define __put_unaligned_be(val, ptr) \ + ({ \ + void *__gu_p = (ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + *(uint8_t *)__gu_p = (uint8_t)(val); \ + break; \ + case 2: \ + put_unaligned_be16((uint16_t)(val), __gu_p); \ + break; \ + case 4: \ + put_unaligned_be32((uint32_t)(val), __gu_p); \ + break; \ + case 8: \ + put_unaligned_be64((uint64_t)(val), __gu_p); \ + break; \ + default: \ + __bad_unaligned_access_size(); \ + break; \ + } \ + (void)0; \ + }) + +#if _LITTLE_ENDIAN +# define get_unaligned __get_unaligned_le +# define put_unaligned __put_unaligned_le +#else +# define get_unaligned __get_unaligned_be +# define put_unaligned __put_unaligned_be +#endif + +#endif // ASM_UNALIGNED_H diff --git a/contrib/linux-kernel/test/include/linux/compiler.h b/contrib/linux-kernel/test/include/linux/compiler.h new file mode 100644 index 000000000..7991b8b29 --- /dev/null +++ b/contrib/linux-kernel/test/include/linux/compiler.h @@ -0,0 +1,12 @@ +#ifndef LINUX_COMIPLER_H_ +#define LINUX_COMIPLER_H_ + +#ifndef __always_inline +# define __always_inline inline +#endif + +#ifndef noinline +# define noinline __attribute__((__noinline__)) +#endif + +#endif // LINUX_COMIPLER_H_ diff --git a/contrib/linux-kernel/test/include/linux/kernel.h b/contrib/linux-kernel/test/include/linux/kernel.h new file mode 100644 index 000000000..b208e23ba --- /dev/null +++ b/contrib/linux-kernel/test/include/linux/kernel.h @@ -0,0 +1,14 @@ +#ifndef LINUX_KERNEL_H_ +#define LINUX_KERNEL_H_ + +#define ALIGN(x, a) ({ \ + typeof(x) const __xe = (x); \ + typeof(a) const __ae = (a); \ + typeof(a) const __m = __ae - 1; \ + typeof(x) const __r = __xe & __m; \ + __xe + (__r ? (__ae - __r) : 0); \ + }) + +#define PTR_ALIGN(p, a) (typeof(p))ALIGN((unsigned long long)(p), (a)) + +#endif // LINUX_KERNEL_H_ diff --git a/contrib/linux-kernel/test/include/linux/module.h b/contrib/linux-kernel/test/include/linux/module.h new file mode 100644 index 000000000..ef514c349 --- /dev/null +++ b/contrib/linux-kernel/test/include/linux/module.h @@ -0,0 +1,10 @@ +#ifndef LINUX_MODULE_H_ +#define LINUX_MODULE_H_ + +#define EXPORT_SYMBOL(symbol) \ + void* __##symbol = symbol +#define MODULE_LICENSE(license) static char const *const LICENSE = license +#define MODULE_DESCRIPTION(description) \ + static char const *const DESCRIPTION = description + +#endif // LINUX_MODULE_H_ diff --git a/contrib/linux-kernel/test/include/linux/string.h b/contrib/linux-kernel/test/include/linux/string.h new file mode 100644 index 000000000..3b2f59002 --- /dev/null +++ b/contrib/linux-kernel/test/include/linux/string.h @@ -0,0 +1 @@ +#include diff --git a/contrib/linux-kernel/test/include/linux/types.h b/contrib/linux-kernel/test/include/linux/types.h new file mode 100644 index 000000000..c2d4f4b72 --- /dev/null +++ b/contrib/linux-kernel/test/include/linux/types.h @@ -0,0 +1,2 @@ +#include +#include diff --git a/contrib/meson/meson.build b/contrib/meson/meson.build index 369461335..8cbdcabec 100644 --- a/contrib/meson/meson.build +++ b/contrib/meson/meson.build @@ -15,7 +15,7 @@ libzstd_includes = [include_directories(common_dir, dictbuilder_dir, compress_di if get_option('legacy_support') message('Enabling legacy support') - libzstd_cflags = ['-DZSTD_LEGACY_SUPPORT=1'] + libzstd_cflags = ['-DZSTD_LEGACY_SUPPORT=4'] legacy_dir = join_paths(lib_dir, 'legacy') libzstd_includes += [include_directories(legacy_dir)] diff --git a/contrib/pzstd/Options.cpp b/contrib/pzstd/Options.cpp index a0d969393..1f53f2bff 100644 --- a/contrib/pzstd/Options.cpp +++ b/contrib/pzstd/Options.cpp @@ -91,7 +91,7 @@ void usage() { std::fprintf(stderr, " -# : # compression level (1-%d, default:%d)\n", kMaxNonUltraCompressionLevel, kDefaultCompressionLevel); std::fprintf(stderr, " -d, --decompress : decompression\n"); std::fprintf(stderr, " -o file : result stored into `file` (only if 1 input file)\n"); - std::fprintf(stderr, " -f, --force : overwrite output without prompting\n"); + std::fprintf(stderr, " -f, --force : overwrite output without prompting, (de)compress links\n"); std::fprintf(stderr, " --rm : remove source file(s) after successful (de)compression\n"); std::fprintf(stderr, " -k, --keep : preserve source file(s) (default)\n"); std::fprintf(stderr, " -h, --help : display help and exit\n"); @@ -121,6 +121,7 @@ Options::Status Options::parse(int argc, const char **argv) { bool recursive = false; bool ultra = false; bool forceStdout = false; + bool followLinks = false; // Local copy of input files, which are pointers into argv. std::vector localInputFiles; for (int i = 1; i < argc; ++i) { @@ -255,6 +256,7 @@ Options::Status Options::parse(int argc, const char **argv) { case 'f': overwrite = true; forceStdout = true; + followLinks = true; break; case 't': test = true; @@ -328,13 +330,29 @@ Options::Status Options::parse(int argc, const char **argv) { } } + g_utilDisplayLevel = verbosity; + // Remove local input files that are symbolic links + if (!followLinks) { + std::remove_if(localInputFiles.begin(), localInputFiles.end(), + [&](const char *path) { + bool isLink = UTIL_isLink(path); + if (isLink && verbosity >= 2) { + std::fprintf( + stderr, + "Warning : %s is symbolic link, ignoring\n", + path); + } + return isLink; + }); + } + // Translate input files/directories into files to (de)compress if (recursive) { char *scratchBuffer = nullptr; unsigned numFiles = 0; const char **files = UTIL_createFileList(localInputFiles.data(), localInputFiles.size(), - &scratchBuffer, &numFiles); + &scratchBuffer, &numFiles, followLinks); if (files == nullptr) { std::fprintf(stderr, "Error traversing directories\n"); return Status::Failure; diff --git a/contrib/pzstd/utils/test/ThreadPoolTest.cpp b/contrib/pzstd/utils/test/ThreadPoolTest.cpp index 1d857aae8..89085afd4 100644 --- a/contrib/pzstd/utils/test/ThreadPoolTest.cpp +++ b/contrib/pzstd/utils/test/ThreadPoolTest.cpp @@ -10,6 +10,7 @@ #include #include +#include #include #include @@ -34,16 +35,19 @@ TEST(ThreadPool, AllJobsFinished) { std::atomic numFinished{0}; std::atomic start{false}; { + std::cerr << "Creating executor" << std::endl; ThreadPool executor(5); for (int i = 0; i < 10; ++i) { executor.add([ &numFinished, &start ] { while (!start.load()) { - // spin + std::this_thread::yield(); } ++numFinished; }); } + std::cerr << "Starting" << std::endl; start.store(true); + std::cerr << "Finishing" << std::endl; } EXPECT_EQ(10, numFinished.load()); } diff --git a/contrib/pzstd/utils/test/WorkQueueTest.cpp b/contrib/pzstd/utils/test/WorkQueueTest.cpp index 7f58ccb3f..8caf170d2 100644 --- a/contrib/pzstd/utils/test/WorkQueueTest.cpp +++ b/contrib/pzstd/utils/test/WorkQueueTest.cpp @@ -10,6 +10,7 @@ #include "utils/WorkQueue.h" #include +#include #include #include #include @@ -201,11 +202,13 @@ TEST(WorkQueue, BoundedSizeMPMC) { WorkQueue queue(10); std::vector results(200, -1); std::mutex mutex; + std::cerr << "Creating popperThreads" << std::endl; std::vector popperThreads; for (int i = 0; i < 4; ++i) { popperThreads.emplace_back(Popper{&queue, results.data(), &mutex}); } + std::cerr << "Creating pusherThreads" << std::endl; std::vector pusherThreads; for (int i = 0; i < 2; ++i) { auto min = i * 100; @@ -218,15 +221,19 @@ TEST(WorkQueue, BoundedSizeMPMC) { }); } + std::cerr << "Joining pusherThreads" << std::endl; for (auto& thread : pusherThreads) { thread.join(); } + std::cerr << "Finishing queue" << std::endl; queue.finish(); + std::cerr << "Joining popperThreads" << std::endl; for (auto& thread : popperThreads) { thread.join(); } + std::cerr << "Inspecting results" << std::endl; for (int i = 0; i < 200; ++i) { EXPECT_EQ(i, results[i]); } diff --git a/contrib/seekable_format/examples/.gitignore b/contrib/seekable_format/examples/.gitignore new file mode 100644 index 000000000..df2f9ab07 --- /dev/null +++ b/contrib/seekable_format/examples/.gitignore @@ -0,0 +1,4 @@ +seekable_compression +seekable_decompression +parallel_processing +parallel_compression diff --git a/contrib/seekable_format/examples/Makefile b/contrib/seekable_format/examples/Makefile new file mode 100644 index 000000000..625e1fcc8 --- /dev/null +++ b/contrib/seekable_format/examples/Makefile @@ -0,0 +1,42 @@ +# ################################################################ +# Copyright (c) 2017-present, Facebook, Inc. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. An additional grant +# of patent rights can be found in the PATENTS file in the same directory. +# ################################################################ + +# This Makefile presumes libzstd is built, using `make` in / or /lib/ + +LDFLAGS += ../../../lib/libzstd.a +CPPFLAGS += -I../ -I../../../lib -I../../../lib/common + +CFLAGS ?= -O3 +CFLAGS += -g + +SEEKABLE_OBJS = ../zstdseek_compress.c ../zstdseek_decompress.c + +.PHONY: default all clean test + +default: all + +all: seekable_compression seekable_decompression parallel_processing + +seekable_compression : seekable_compression.c $(SEEKABLE_OBJS) + $(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@ + +seekable_decompression : seekable_decompression.c $(SEEKABLE_OBJS) + $(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@ + +parallel_processing : parallel_processing.c $(SEEKABLE_OBJS) + $(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@ -pthread + +parallel_compression : parallel_compression.c $(SEEKABLE_OBJS) + $(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@ -pthread + +clean: + @rm -f core *.o tmp* result* *.zst \ + seekable_compression seekable_decompression \ + parallel_processing parallel_compression + @echo Cleaning completed diff --git a/contrib/seekable_format/examples/parallel_compression.c b/contrib/seekable_format/examples/parallel_compression.c new file mode 100644 index 000000000..89a13185f --- /dev/null +++ b/contrib/seekable_format/examples/parallel_compression.c @@ -0,0 +1,214 @@ +/** + * Copyright 2017-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the license found in the + * LICENSE-examples file in the root directory of this source tree. + */ + +#include // malloc, free, exit, atoi +#include // fprintf, perror, feof, fopen, etc. +#include // strlen, memset, strcat +#define ZSTD_STATIC_LINKING_ONLY +#include // presumes zstd library is installed +#include +#if defined(WIN32) || defined(_WIN32) +# include +# define SLEEP(x) Sleep(x) +#else +# include +# define SLEEP(x) usleep(x * 1000) +#endif + +#define XXH_NAMESPACE ZSTD_ +#include "xxhash.h" + +#include "pool.h" // use zstd thread pool for demo + +#include "zstd_seekable.h" + +static void* malloc_orDie(size_t size) +{ + void* const buff = malloc(size); + if (buff) return buff; + /* error */ + perror("malloc:"); + exit(1); +} + +static FILE* fopen_orDie(const char *filename, const char *instruction) +{ + FILE* const inFile = fopen(filename, instruction); + if (inFile) return inFile; + /* error */ + perror(filename); + exit(3); +} + +static size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file) +{ + size_t const readSize = fread(buffer, 1, sizeToRead, file); + if (readSize == sizeToRead) return readSize; /* good */ + if (feof(file)) return readSize; /* good, reached end of file */ + /* error */ + perror("fread"); + exit(4); +} + +static size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file) +{ + size_t const writtenSize = fwrite(buffer, 1, sizeToWrite, file); + if (writtenSize == sizeToWrite) return sizeToWrite; /* good */ + /* error */ + perror("fwrite"); + exit(5); +} + +static size_t fclose_orDie(FILE* file) +{ + if (!fclose(file)) return 0; + /* error */ + perror("fclose"); + exit(6); +} + +static void fseek_orDie(FILE* file, long int offset, int origin) +{ + if (!fseek(file, offset, origin)) { + if (!fflush(file)) return; + } + /* error */ + perror("fseek"); + exit(7); +} + +static long int ftell_orDie(FILE* file) +{ + long int off = ftell(file); + if (off != -1) return off; + /* error */ + perror("ftell"); + exit(8); +} + +struct job { + const void* src; + size_t srcSize; + void* dst; + size_t dstSize; + + unsigned checksum; + + int compressionLevel; + int done; +}; + +static void compressFrame(void* opaque) +{ + struct job* job = opaque; + + job->checksum = XXH64(job->src, job->srcSize, 0); + + size_t ret = ZSTD_compress(job->dst, job->dstSize, job->src, job->srcSize, job->compressionLevel); + if (ZSTD_isError(ret)) { + fprintf(stderr, "ZSTD_compress() error : %s \n", ZSTD_getErrorName(ret)); + exit(20); + } + + job->dstSize = ret; + job->done = 1; +} + +static void compressFile_orDie(const char* fname, const char* outName, int cLevel, unsigned frameSize, int nbThreads) +{ + POOL_ctx* pool = POOL_create(nbThreads, nbThreads); + if (pool == NULL) { fprintf(stderr, "POOL_create() error \n"); exit(9); } + + FILE* const fin = fopen_orDie(fname, "rb"); + FILE* const fout = fopen_orDie(outName, "wb"); + + if (ZSTD_compressBound(frameSize) > 0xFFFFFFFFU) { fprintf(stderr, "Frame size too large \n"); exit(10); } + unsigned dstSize = ZSTD_compressBound(frameSize); + + + fseek_orDie(fin, 0, SEEK_END); + long int length = ftell_orDie(fin); + fseek_orDie(fin, 0, SEEK_SET); + + size_t numFrames = (length + frameSize - 1) / frameSize; + + struct job* jobs = malloc_orDie(sizeof(struct job) * numFrames); + + size_t i; + for(i = 0; i < numFrames; i++) { + void* in = malloc_orDie(frameSize); + void* out = malloc_orDie(dstSize); + + size_t inSize = fread_orDie(in, frameSize, fin); + + jobs[i].src = in; + jobs[i].srcSize = inSize; + jobs[i].dst = out; + jobs[i].dstSize = dstSize; + jobs[i].compressionLevel = cLevel; + jobs[i].done = 0; + POOL_add(pool, compressFrame, &jobs[i]); + } + + ZSTD_frameLog* fl = ZSTD_seekable_createFrameLog(1); + if (fl == NULL) { fprintf(stderr, "ZSTD_seekable_createFrameLog() failed \n"); exit(11); } + for (i = 0; i < numFrames; i++) { + while (!jobs[i].done) SLEEP(5); /* wake up every 5 milliseconds to check */ + fwrite_orDie(jobs[i].dst, jobs[i].dstSize, fout); + free((void*)jobs[i].src); + free(jobs[i].dst); + + size_t ret = ZSTD_seekable_logFrame(fl, jobs[i].dstSize, jobs[i].srcSize, jobs[i].checksum); + if (ZSTD_isError(ret)) { fprintf(stderr, "ZSTD_seekable_logFrame() error : %s \n", ZSTD_getErrorName(ret)); } + } + + { unsigned char seekTableBuff[1024]; + ZSTD_outBuffer out = {seekTableBuff, 1024, 0}; + while (ZSTD_seekable_writeSeekTable(fl, &out) != 0) { + fwrite_orDie(seekTableBuff, out.pos, fout); + out.pos = 0; + } + fwrite_orDie(seekTableBuff, out.pos, fout); + } + + ZSTD_seekable_freeFrameLog(fl); + free(jobs); + fclose_orDie(fout); + fclose_orDie(fin); +} + +static const char* createOutFilename_orDie(const char* filename) +{ + size_t const inL = strlen(filename); + size_t const outL = inL + 5; + void* outSpace = malloc_orDie(outL); + memset(outSpace, 0, outL); + strcat(outSpace, filename); + strcat(outSpace, ".zst"); + return (const char*)outSpace; +} + +int main(int argc, const char** argv) { + const char* const exeName = argv[0]; + if (argc!=4) { + printf("wrong arguments\n"); + printf("usage:\n"); + printf("%s FILE FRAME_SIZE NB_THREADS\n", exeName); + return 1; + } + + { const char* const inFileName = argv[1]; + unsigned const frameSize = (unsigned)atoi(argv[2]); + int const nbThreads = atoi(argv[3]); + + const char* const outFileName = createOutFilename_orDie(inFileName); + compressFile_orDie(inFileName, outFileName, 5, frameSize, nbThreads); + } + + return 0; +} diff --git a/contrib/seekable_format/examples/parallel_processing.c b/contrib/seekable_format/examples/parallel_processing.c new file mode 100644 index 000000000..cea4d5364 --- /dev/null +++ b/contrib/seekable_format/examples/parallel_processing.c @@ -0,0 +1,193 @@ +/** + * Copyright 2017-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the license found in the + * LICENSE-examples file in the root directory of this source tree. + */ + +/* + * A simple demo that sums up all the bytes in the file in parallel using + * seekable decompression and the zstd thread pool + */ + +#include // malloc, exit +#include // fprintf, perror, feof +#include // strerror +#include // errno +#define ZSTD_STATIC_LINKING_ONLY +#include // presumes zstd library is installed +#include +#if defined(WIN32) || defined(_WIN32) +# include +# define SLEEP(x) Sleep(x) +#else +# include +# define SLEEP(x) usleep(x * 1000) +#endif + +#include "pool.h" // use zstd thread pool for demo + +#include "zstd_seekable.h" + +#define MIN(a, b) ((a) < (b) ? (a) : (b)) + +static void* malloc_orDie(size_t size) +{ + void* const buff = malloc(size); + if (buff) return buff; + /* error */ + perror("malloc"); + exit(1); +} + +static void* realloc_orDie(void* ptr, size_t size) +{ + ptr = realloc(ptr, size); + if (ptr) return ptr; + /* error */ + perror("realloc"); + exit(1); +} + +static FILE* fopen_orDie(const char *filename, const char *instruction) +{ + FILE* const inFile = fopen(filename, instruction); + if (inFile) return inFile; + /* error */ + perror(filename); + exit(3); +} + +static size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file) +{ + size_t const readSize = fread(buffer, 1, sizeToRead, file); + if (readSize == sizeToRead) return readSize; /* good */ + if (feof(file)) return readSize; /* good, reached end of file */ + /* error */ + perror("fread"); + exit(4); +} + +static size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file) +{ + size_t const writtenSize = fwrite(buffer, 1, sizeToWrite, file); + if (writtenSize == sizeToWrite) return sizeToWrite; /* good */ + /* error */ + perror("fwrite"); + exit(5); +} + +static size_t fclose_orDie(FILE* file) +{ + if (!fclose(file)) return 0; + /* error */ + perror("fclose"); + exit(6); +} + +static void fseek_orDie(FILE* file, long int offset, int origin) { + if (!fseek(file, offset, origin)) { + if (!fflush(file)) return; + } + /* error */ + perror("fseek"); + exit(7); +} + +struct sum_job { + const char* fname; + unsigned long long sum; + unsigned frameNb; + int done; +}; + +static void sumFrame(void* opaque) +{ + struct sum_job* job = (struct sum_job*)opaque; + job->done = 0; + + FILE* const fin = fopen_orDie(job->fname, "rb"); + + ZSTD_seekable* const seekable = ZSTD_seekable_create(); + if (seekable==NULL) { fprintf(stderr, "ZSTD_seekable_create() error \n"); exit(10); } + + size_t const initResult = ZSTD_seekable_initFile(seekable, fin); + if (ZSTD_isError(initResult)) { fprintf(stderr, "ZSTD_seekable_init() error : %s \n", ZSTD_getErrorName(initResult)); exit(11); } + + size_t const frameSize = ZSTD_seekable_getFrameDecompressedSize(seekable, job->frameNb); + unsigned char* data = malloc_orDie(frameSize); + + size_t result = ZSTD_seekable_decompressFrame(seekable, data, frameSize, job->frameNb); + if (ZSTD_isError(result)) { fprintf(stderr, "ZSTD_seekable_decompressFrame() error : %s \n", ZSTD_getErrorName(result)); exit(12); } + + unsigned long long sum = 0; + size_t i; + for (i = 0; i < frameSize; i++) { + sum += data[i]; + } + job->sum = sum; + job->done = 1; + + fclose(fin); + ZSTD_seekable_free(seekable); + free(data); +} + +static void sumFile_orDie(const char* fname, int nbThreads) +{ + POOL_ctx* pool = POOL_create(nbThreads, nbThreads); + if (pool == NULL) { fprintf(stderr, "POOL_create() error \n"); exit(9); } + + FILE* const fin = fopen_orDie(fname, "rb"); + + ZSTD_seekable* const seekable = ZSTD_seekable_create(); + if (seekable==NULL) { fprintf(stderr, "ZSTD_seekable_create() error \n"); exit(10); } + + size_t const initResult = ZSTD_seekable_initFile(seekable, fin); + if (ZSTD_isError(initResult)) { fprintf(stderr, "ZSTD_seekable_init() error : %s \n", ZSTD_getErrorName(initResult)); exit(11); } + + size_t const numFrames = ZSTD_seekable_getNumFrames(seekable); + struct sum_job* jobs = (struct sum_job*)malloc(numFrames * sizeof(struct sum_job)); + + size_t i; + for (i = 0; i < numFrames; i++) { + jobs[i] = (struct sum_job){ fname, 0, i, 0 }; + POOL_add(pool, sumFrame, &jobs[i]); + } + + unsigned long long total = 0; + + for (i = 0; i < numFrames; i++) { + while (!jobs[i].done) SLEEP(5); /* wake up every 5 milliseconds to check */ + total += jobs[i].sum; + } + + printf("Sum: %llu\n", total); + + POOL_free(pool); + ZSTD_seekable_free(seekable); + fclose(fin); + free(jobs); +} + + +int main(int argc, const char** argv) +{ + const char* const exeName = argv[0]; + + if (argc!=3) { + fprintf(stderr, "wrong arguments\n"); + fprintf(stderr, "usage:\n"); + fprintf(stderr, "%s FILE NB_THREADS\n", exeName); + return 1; + } + + { + const char* const inFilename = argv[1]; + int const nbThreads = atoi(argv[2]); + sumFile_orDie(inFilename, nbThreads); + } + + return 0; +} diff --git a/contrib/seekable_format/examples/seekable_compression.c b/contrib/seekable_format/examples/seekable_compression.c new file mode 100644 index 000000000..a33952d93 --- /dev/null +++ b/contrib/seekable_format/examples/seekable_compression.c @@ -0,0 +1,131 @@ +/** + * Copyright 2017-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the license found in the + * LICENSE-examples file in the root directory of this source tree. + */ + +#include // malloc, free, exit, atoi +#include // fprintf, perror, feof, fopen, etc. +#include // strlen, memset, strcat +#define ZSTD_STATIC_LINKING_ONLY +#include // presumes zstd library is installed + +#include "zstd_seekable.h" + +static void* malloc_orDie(size_t size) +{ + void* const buff = malloc(size); + if (buff) return buff; + /* error */ + perror("malloc:"); + exit(1); +} + +static FILE* fopen_orDie(const char *filename, const char *instruction) +{ + FILE* const inFile = fopen(filename, instruction); + if (inFile) return inFile; + /* error */ + perror(filename); + exit(3); +} + +static size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file) +{ + size_t const readSize = fread(buffer, 1, sizeToRead, file); + if (readSize == sizeToRead) return readSize; /* good */ + if (feof(file)) return readSize; /* good, reached end of file */ + /* error */ + perror("fread"); + exit(4); +} + +static size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file) +{ + size_t const writtenSize = fwrite(buffer, 1, sizeToWrite, file); + if (writtenSize == sizeToWrite) return sizeToWrite; /* good */ + /* error */ + perror("fwrite"); + exit(5); +} + +static size_t fclose_orDie(FILE* file) +{ + if (!fclose(file)) return 0; + /* error */ + perror("fclose"); + exit(6); +} + +static void compressFile_orDie(const char* fname, const char* outName, int cLevel, unsigned frameSize) +{ + FILE* const fin = fopen_orDie(fname, "rb"); + FILE* const fout = fopen_orDie(outName, "wb"); + size_t const buffInSize = ZSTD_CStreamInSize(); /* can always read one full block */ + void* const buffIn = malloc_orDie(buffInSize); + size_t const buffOutSize = ZSTD_CStreamOutSize(); /* can always flush a full block */ + void* const buffOut = malloc_orDie(buffOutSize); + + ZSTD_seekable_CStream* const cstream = ZSTD_seekable_createCStream(); + if (cstream==NULL) { fprintf(stderr, "ZSTD_seekable_createCStream() error \n"); exit(10); } + size_t const initResult = ZSTD_seekable_initCStream(cstream, cLevel, 1, frameSize); + if (ZSTD_isError(initResult)) { fprintf(stderr, "ZSTD_seekable_initCStream() error : %s \n", ZSTD_getErrorName(initResult)); exit(11); } + + size_t read, toRead = buffInSize; + while( (read = fread_orDie(buffIn, toRead, fin)) ) { + ZSTD_inBuffer input = { buffIn, read, 0 }; + while (input.pos < input.size) { + ZSTD_outBuffer output = { buffOut, buffOutSize, 0 }; + toRead = ZSTD_seekable_compressStream(cstream, &output , &input); /* toRead is guaranteed to be <= ZSTD_CStreamInSize() */ + if (ZSTD_isError(toRead)) { fprintf(stderr, "ZSTD_seekable_compressStream() error : %s \n", ZSTD_getErrorName(toRead)); exit(12); } + if (toRead > buffInSize) toRead = buffInSize; /* Safely handle case when `buffInSize` is manually changed to a value < ZSTD_CStreamInSize()*/ + fwrite_orDie(buffOut, output.pos, fout); + } + } + + while (1) { + ZSTD_outBuffer output = { buffOut, buffOutSize, 0 }; + size_t const remainingToFlush = ZSTD_seekable_endStream(cstream, &output); /* close stream */ + if (ZSTD_isError(remainingToFlush)) { fprintf(stderr, "ZSTD_seekable_endStream() error : %s \n", ZSTD_getErrorName(remainingToFlush)); exit(13); } + fwrite_orDie(buffOut, output.pos, fout); + if (!remainingToFlush) break; + } + + ZSTD_seekable_freeCStream(cstream); + fclose_orDie(fout); + fclose_orDie(fin); + free(buffIn); + free(buffOut); +} + +static const char* createOutFilename_orDie(const char* filename) +{ + size_t const inL = strlen(filename); + size_t const outL = inL + 5; + void* outSpace = malloc_orDie(outL); + memset(outSpace, 0, outL); + strcat(outSpace, filename); + strcat(outSpace, ".zst"); + return (const char*)outSpace; +} + +int main(int argc, const char** argv) { + const char* const exeName = argv[0]; + if (argc!=3) { + printf("wrong arguments\n"); + printf("usage:\n"); + printf("%s FILE FRAME_SIZE\n", exeName); + return 1; + } + + { const char* const inFileName = argv[1]; + unsigned const frameSize = (unsigned)atoi(argv[2]); + + const char* const outFileName = createOutFilename_orDie(inFileName); + compressFile_orDie(inFileName, outFileName, 5, frameSize); + } + + return 0; +} diff --git a/contrib/seekable_format/examples/seekable_decompression.c b/contrib/seekable_format/examples/seekable_decompression.c new file mode 100644 index 000000000..b765a7591 --- /dev/null +++ b/contrib/seekable_format/examples/seekable_decompression.c @@ -0,0 +1,137 @@ +/** + * Copyright 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the license found in the + * LICENSE-examples file in the root directory of this source tree. + */ + + +#include // malloc, exit +#include // fprintf, perror, feof +#include // strerror +#include // errno +#define ZSTD_STATIC_LINKING_ONLY +#include // presumes zstd library is installed +#include + +#include "zstd_seekable.h" + +#define MIN(a, b) ((a) < (b) ? (a) : (b)) + +static void* malloc_orDie(size_t size) +{ + void* const buff = malloc(size); + if (buff) return buff; + /* error */ + perror("malloc"); + exit(1); +} + +static void* realloc_orDie(void* ptr, size_t size) +{ + ptr = realloc(ptr, size); + if (ptr) return ptr; + /* error */ + perror("realloc"); + exit(1); +} + +static FILE* fopen_orDie(const char *filename, const char *instruction) +{ + FILE* const inFile = fopen(filename, instruction); + if (inFile) return inFile; + /* error */ + perror(filename); + exit(3); +} + +static size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file) +{ + size_t const readSize = fread(buffer, 1, sizeToRead, file); + if (readSize == sizeToRead) return readSize; /* good */ + if (feof(file)) return readSize; /* good, reached end of file */ + /* error */ + perror("fread"); + exit(4); +} + +static size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file) +{ + size_t const writtenSize = fwrite(buffer, 1, sizeToWrite, file); + if (writtenSize == sizeToWrite) return sizeToWrite; /* good */ + /* error */ + perror("fwrite"); + exit(5); +} + +static size_t fclose_orDie(FILE* file) +{ + if (!fclose(file)) return 0; + /* error */ + perror("fclose"); + exit(6); +} + +static void fseek_orDie(FILE* file, long int offset, int origin) { + if (!fseek(file, offset, origin)) { + if (!fflush(file)) return; + } + /* error */ + perror("fseek"); + exit(7); +} + + +static void decompressFile_orDie(const char* fname, unsigned startOffset, unsigned endOffset) +{ + FILE* const fin = fopen_orDie(fname, "rb"); + FILE* const fout = stdout; + size_t const buffOutSize = ZSTD_DStreamOutSize(); /* Guarantee to successfully flush at least one complete compressed block in all circumstances. */ + void* const buffOut = malloc_orDie(buffOutSize); + + ZSTD_seekable* const seekable = ZSTD_seekable_create(); + if (seekable==NULL) { fprintf(stderr, "ZSTD_seekable_create() error \n"); exit(10); } + + size_t const initResult = ZSTD_seekable_initFile(seekable, fin); + if (ZSTD_isError(initResult)) { fprintf(stderr, "ZSTD_seekable_init() error : %s \n", ZSTD_getErrorName(initResult)); exit(11); } + + while (startOffset < endOffset) { + size_t const result = ZSTD_seekable_decompress(seekable, buffOut, MIN(endOffset - startOffset, buffOutSize), startOffset); + + if (ZSTD_isError(result)) { + fprintf(stderr, "ZSTD_seekable_decompress() error : %s \n", + ZSTD_getErrorName(result)); + exit(12); + } + fwrite_orDie(buffOut, result, fout); + startOffset += result; + } + + ZSTD_seekable_free(seekable); + fclose_orDie(fin); + fclose_orDie(fout); + free(buffOut); +} + + +int main(int argc, const char** argv) +{ + const char* const exeName = argv[0]; + + if (argc!=4) { + fprintf(stderr, "wrong arguments\n"); + fprintf(stderr, "usage:\n"); + fprintf(stderr, "%s FILE START END\n", exeName); + return 1; + } + + { + const char* const inFilename = argv[1]; + unsigned const startOffset = (unsigned) atoi(argv[2]); + unsigned const endOffset = (unsigned) atoi(argv[3]); + decompressFile_orDie(inFilename, startOffset, endOffset); + } + + return 0; +} diff --git a/contrib/seekable_format/zstd_seekable.h b/contrib/seekable_format/zstd_seekable.h new file mode 100644 index 000000000..438ac2014 --- /dev/null +++ b/contrib/seekable_format/zstd_seekable.h @@ -0,0 +1,184 @@ +#ifndef SEEKABLE_H +#define SEEKABLE_H + +#if defined (__cplusplus) +extern "C" { +#endif + +#include + +static const unsigned ZSTD_seekTableFooterSize = 9; + +#define ZSTD_SEEKABLE_MAGICNUMBER 0x8F92EAB1 + +#define ZSTD_SEEKABLE_MAXFRAMES 0x8000000U + +/* Limit the maximum size to avoid any potential issues storing the compressed size */ +#define ZSTD_SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE 0x80000000U + +/*-**************************************************************************** +* Seekable Format +* +* The seekable format splits the compressed data into a series of "frames", +* each compressed individually so that decompression of a section in the +* middle of an archive only requires zstd to decompress at most a frame's +* worth of extra data, instead of the entire archive. +******************************************************************************/ + +typedef struct ZSTD_seekable_CStream_s ZSTD_seekable_CStream; +typedef struct ZSTD_seekable_s ZSTD_seekable; + +/*-**************************************************************************** +* Seekable compression - HowTo +* A ZSTD_seekable_CStream object is required to tracking streaming operation. +* Use ZSTD_seekable_createCStream() and ZSTD_seekable_freeCStream() to create/ +* release resources. +* +* Streaming objects are reusable to avoid allocation and deallocation, +* to start a new compression operation call ZSTD_seekable_initCStream() on the +* compressor. +* +* Data streamed to the seekable compressor will automatically be split into +* frames of size `maxFrameSize` (provided in ZSTD_seekable_initCStream()), +* or if none is provided, will be cut off whenever ZSTD_seekable_endFrame() is +* called or when the default maximum frame size (2GB) is reached. +* +* Use ZSTD_seekable_initCStream() to initialize a ZSTD_seekable_CStream object +* for a new compression operation. +* `maxFrameSize` indicates the size at which to automatically start a new +* seekable frame. `maxFrameSize == 0` implies the default maximum size. +* `checksumFlag` indicates whether or not the seek table should include frame +* checksums on the uncompressed data for verification. +* @return : a size hint for input to provide for compression, or an error code +* checkable with ZSTD_isError() +* +* Use ZSTD_seekable_compressStream() repetitively to consume input stream. +* The function will automatically update both `pos` fields. +* Note that it may not consume the entire input, in which case `pos < size`, +* and it's up to the caller to present again remaining data. +* @return : a size hint, preferred nb of bytes to use as input for next +* function call or an error code, which can be tested using +* ZSTD_isError(). +* Note 1 : it's just a hint, to help latency a little, any other +* value will work fine. +* +* At any time, call ZSTD_seekable_endFrame() to end the current frame and +* start a new one. +* +* ZSTD_seekable_endStream() will end the current frame, and then write the seek +* table so that decompressors can efficiently find compressed frames. +* ZSTD_seekable_endStream() may return a number > 0 if it was unable to flush +* all the necessary data to `output`. In this case, it should be called again +* until all remaining data is flushed out and 0 is returned. +******************************************************************************/ + +/*===== Seekable compressor management =====*/ +ZSTDLIB_API ZSTD_seekable_CStream* ZSTD_seekable_createCStream(void); +ZSTDLIB_API size_t ZSTD_seekable_freeCStream(ZSTD_seekable_CStream* zcs); + +/*===== Seekable compression functions =====*/ +ZSTDLIB_API size_t ZSTD_seekable_initCStream(ZSTD_seekable_CStream* zcs, int compressionLevel, int checksumFlag, unsigned maxFrameSize); +ZSTDLIB_API size_t ZSTD_seekable_compressStream(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input); +ZSTDLIB_API size_t ZSTD_seekable_endFrame(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output); +ZSTDLIB_API size_t ZSTD_seekable_endStream(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output); + +/*= Raw seek table API + * These functions allow for the seek table to be constructed directly. + * This table can then be appended to a file of concatenated frames. + * This allows the frames to be compressed independently, even in parallel, + * and compiled together afterward into a seekable archive. + * + * Use ZSTD_seekable_createFrameLog() to allocate and initialize a tracking + * structure. + * + * Call ZSTD_seekable_logFrame() once for each frame in the archive. + * checksum is optional, and will not be used if checksumFlag was 0 when the + * frame log was created. If present, it should be the least significant 32 + * bits of the XXH64 hash of the uncompressed data. + * + * Call ZSTD_seekable_writeSeekTable to serialize the data into a seek table. + * If the entire table was written, the return value will be 0. Otherwise, + * it will be equal to the number of bytes left to write. */ +typedef struct ZSTD_frameLog_s ZSTD_frameLog; +ZSTDLIB_API ZSTD_frameLog* ZSTD_seekable_createFrameLog(int checksumFlag); +ZSTDLIB_API size_t ZSTD_seekable_freeFrameLog(ZSTD_frameLog* fl); +ZSTDLIB_API size_t ZSTD_seekable_logFrame(ZSTD_frameLog* fl, unsigned compressedSize, unsigned decompressedSize, unsigned checksum); +ZSTDLIB_API size_t ZSTD_seekable_writeSeekTable(ZSTD_frameLog* fl, ZSTD_outBuffer* output); + +/*-**************************************************************************** +* Seekable decompression - HowTo +* A ZSTD_seekable object is required to tracking the seekTable. +* +* Call ZSTD_seekable_init* to initialize a ZSTD_seekable object with the +* the seek table provided in the input. +* There are three modes for ZSTD_seekable_init: +* - ZSTD_seekable_initBuff() : An in-memory API. The data contained in +* `src` should be the entire seekable file, including the seek table. +* `src` should be kept alive and unmodified until the ZSTD_seekable object +* is freed or reset. +* - ZSTD_seekable_initFile() : A simplified file API using stdio. fread and +* fseek will be used to access the required data for building the seek +* table and doing decompression operations. `src` should not be closed +* or modified until the ZSTD_seekable object is freed or reset. +* - ZSTD_seekable_initAdvanced() : A general API allowing the client to +* provide its own read and seek callbacks. +* + ZSTD_seekable_read() : read exactly `n` bytes into `buffer`. +* Premature EOF should be treated as an error. +* + ZSTD_seekable_seek() : seek the read head to `offset` from `origin`, +* where origin is either SEEK_SET (beginning of +* file), or SEEK_END (end of file). +* Both functions should return a non-negative value in case of success, and a +* negative value in case of failure. If implementing using this API and +* stdio, be careful with files larger than 4GB and fseek. All of these +* functions return an error code checkable with ZSTD_isError(). +* +* Call ZSTD_seekable_decompress to decompress `dstSize` bytes at decompressed +* offset `offset`. ZSTD_seekable_decompress may have to decompress the entire +* prefix of the frame before the desired data if it has not already processed +* this section. If ZSTD_seekable_decompress is called multiple times for a +* consecutive range of data, it will efficiently retain the decompressor object +* and avoid redecompressing frame prefixes. The return value is the number of +* bytes decompressed, or an error code checkable with ZSTD_isError(). +* +* The seek table access functions can be used to obtain the data contained +* in the seek table. If frameIndex is larger than the value returned by +* ZSTD_seekable_getNumFrames(), they will return error codes checkable with +* ZSTD_isError(). Note that since the offset access functions return +* unsigned long long instead of size_t, in this case they will instead return +* the value ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE. +******************************************************************************/ + +/*===== Seekable decompressor management =====*/ +ZSTDLIB_API ZSTD_seekable* ZSTD_seekable_create(void); +ZSTDLIB_API size_t ZSTD_seekable_free(ZSTD_seekable* zs); + +/*===== Seekable decompression functions =====*/ +ZSTDLIB_API size_t ZSTD_seekable_initBuff(ZSTD_seekable* zs, const void* src, size_t srcSize); +ZSTDLIB_API size_t ZSTD_seekable_initFile(ZSTD_seekable* zs, FILE* src); +ZSTDLIB_API size_t ZSTD_seekable_decompress(ZSTD_seekable* zs, void* dst, size_t dstSize, unsigned long long offset); +ZSTDLIB_API size_t ZSTD_seekable_decompressFrame(ZSTD_seekable* zs, void* dst, size_t dstSize, unsigned frameIndex); + +#define ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE (0ULL-2) +/*===== Seek Table access functions =====*/ +ZSTDLIB_API unsigned ZSTD_seekable_getNumFrames(ZSTD_seekable* const zs); +ZSTDLIB_API unsigned long long ZSTD_seekable_getFrameCompressedOffset(ZSTD_seekable* const zs, unsigned frameIndex); +ZSTDLIB_API unsigned long long ZSTD_seekable_getFrameDecompressedOffset(ZSTD_seekable* const zs, unsigned frameIndex); +ZSTDLIB_API size_t ZSTD_seekable_getFrameCompressedSize(ZSTD_seekable* const zs, unsigned frameIndex); +ZSTDLIB_API size_t ZSTD_seekable_getFrameDecompressedSize(ZSTD_seekable* const zs, unsigned frameIndex); +ZSTDLIB_API unsigned ZSTD_seekable_offsetToFrameIndex(ZSTD_seekable* const zs, unsigned long long offset); + +/*===== Seekable advanced I/O API =====*/ +typedef int(ZSTD_seekable_read)(void* opaque, void* buffer, size_t n); +typedef int(ZSTD_seekable_seek)(void* opaque, long long offset, int origin); +typedef struct { + void* opaque; + ZSTD_seekable_read* read; + ZSTD_seekable_seek* seek; +} ZSTD_seekable_customFile; +ZSTDLIB_API size_t ZSTD_seekable_initAdvanced(ZSTD_seekable* zs, ZSTD_seekable_customFile src); + +#if defined (__cplusplus) +} +#endif + +#endif diff --git a/contrib/seekable_format/zstd_seekable_compression_format.md b/contrib/seekable_format/zstd_seekable_compression_format.md new file mode 100644 index 000000000..bf3080f7b --- /dev/null +++ b/contrib/seekable_format/zstd_seekable_compression_format.md @@ -0,0 +1,116 @@ +# Zstandard Seekable Format + +### Notices + +Copyright (c) 2017-present Facebook, Inc. + +Permission is granted to copy and distribute this document +for any purpose and without charge, +including translations into other languages +and incorporation into compilations, +provided that the copyright notice and this notice are preserved, +and that any substantive changes or deletions from the original +are clearly marked. +Distribution of this document is unlimited. + +### Version +0.1.0 (11/04/17) + +## Introduction +This document defines a format for compressed data to be stored so that subranges of the data can be efficiently decompressed without requiring the entire document to be decompressed. +This is done by splitting up the input data into frames, +each of which are compressed independently, +and so can be decompressed independently. +Decompression then takes advantage of a provided 'seek table', which allows the decompressor to immediately jump to the desired data. This is done in a way that is compatible with the original Zstandard format by placing the seek table in a Zstandard skippable frame. + +### Overall conventions +In this document: +- square brackets i.e. `[` and `]` are used to indicate optional fields or parameters. +- the naming convention for identifiers is `Mixed_Case_With_Underscores` +- All numeric fields are little-endian unless specified otherwise + +## Format + +The format consists of a number of frames (Zstandard compressed frames and skippable frames), followed by a final skippable frame at the end containing the seek table. + +### Seek Table Format +The structure of the seek table frame is as follows: + +|`Skippable_Magic_Number`|`Frame_Size`|`[Seek_Table_Entries]`|`Seek_Table_Footer`| +|------------------------|------------|----------------------|-------------------| +| 4 bytes | 4 bytes | 8-12 bytes each | 9 bytes | + +__`Skippable_Magic_Number`__ + +Value : 0x184D2A5E. +This is for compatibility with [Zstandard skippable frames]. +Since it is legal for other Zstandard skippable frames to use the same +magic number, it is not recommended for a decoder to recognize frames +solely on this. + +__`Frame_Size`__ + +The total size of the skippable frame, not including the `Skippable_Magic_Number` or `Frame_Size`. +This is for compatibility with [Zstandard skippable frames]. + +[Zstandard skippable frames]: https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#skippable-frames + +#### `Seek_Table_Footer` +The seek table footer format is as follows: + +|`Number_Of_Frames`|`Seek_Table_Descriptor`|`Seekable_Magic_Number`| +|------------------|-----------------------|-----------------------| +| 4 bytes | 1 byte | 4 bytes | + +__`Seekable_Magic_Number`__ + +Value : 0x8F92EAB1. +This value must be the last bytes present in the compressed file so that decoders +can efficiently find it and determine if there is an actual seek table present. + +__`Number_Of_Frames`__ + +The number of stored frames in the data. + +__`Seek_Table_Descriptor`__ + +A bitfield describing the format of the seek table. + +| Bit number | Field name | +| ---------- | ---------- | +| 7 | `Checksum_Flag` | +| 6-2 | `Reserved_Bits` | +| 1-0 | `Unused_Bits` | + +While only `Checksum_Flag` currently exists, there are 7 other bits in this field that can be used for future changes to the format, +for example the addition of inline dictionaries. + +__`Checksum_Flag`__ + +If the checksum flag is set, each of the seek table entries contains a 4 byte checksum of the uncompressed data contained in its frame. + +`Reserved_Bits` are not currently used but may be used in the future for breaking changes, so a compliant decoder should ensure they are set to 0. `Unused_Bits` may be used in the future for non-breaking changes, so a compliant decoder should not interpret these bits. + +#### __`Seek_Table_Entries`__ + +`Seek_Table_Entries` consists of `Number_Of_Frames` (one for each frame in the data, not including the seek table frame) entries of the following form, in sequence: + +|`Compressed_Size`|`Decompressed_Size`|`[Checksum]`| +|-----------------|-------------------|------------| +| 4 bytes | 4 bytes | 4 bytes | + +__`Compressed_Size`__ + +The compressed size of the frame. +The cumulative sum of the `Compressed_Size` fields of frames `0` to `i` gives the offset in the compressed file of frame `i+1`. + +__`Decompressed_Size`__ + +The size of the decompressed data contained in the frame. For skippable or otherwise empty frames, this value is 0. + +__`Checksum`__ + +Only present if `Checksum_Flag` is set in the `Seek_Table_Descriptor`. Value : the least significant 32 bits of the XXH64 digest of the uncompressed data, stored in little-endian format. + +## Version Changes +- 0.1.0: initial version diff --git a/contrib/seekable_format/zstdseek_compress.c b/contrib/seekable_format/zstdseek_compress.c new file mode 100644 index 000000000..5fe26ed28 --- /dev/null +++ b/contrib/seekable_format/zstdseek_compress.c @@ -0,0 +1,366 @@ +/** + * Copyright (c) 2017-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +#include /* malloc, free */ + +#define XXH_STATIC_LINKING_ONLY +#define XXH_NAMESPACE ZSTD_ +#include "xxhash.h" + +#define ZSTD_STATIC_LINKING_ONLY +#include "zstd.h" +#include "zstd_errors.h" +#include "mem.h" +#include "zstd_seekable.h" + +#define CHECK_Z(f) { size_t const ret = (f); if (ret != 0) return ret; } + +#undef ERROR +#define ERROR(name) ((size_t)-ZSTD_error_##name) + +#undef MIN +#undef MAX +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + +typedef struct { + U32 cSize; + U32 dSize; + U32 checksum; +} framelogEntry_t; + +struct ZSTD_frameLog_s { + framelogEntry_t* entries; + U32 size; + U32 capacity; + + int checksumFlag; + + /* for use when streaming out the seek table */ + U32 seekTablePos; + U32 seekTableIndex; +} framelog_t; + +struct ZSTD_seekable_CStream_s { + ZSTD_CStream* cstream; + ZSTD_frameLog framelog; + + U32 frameCSize; + U32 frameDSize; + + XXH64_state_t xxhState; + + U32 maxFrameSize; + + int writingSeekTable; +}; + +size_t ZSTD_seekable_frameLog_allocVec(ZSTD_frameLog* fl) +{ + /* allocate some initial space */ + size_t const FRAMELOG_STARTING_CAPACITY = 16; + fl->entries = (framelogEntry_t*)malloc( + sizeof(framelogEntry_t) * FRAMELOG_STARTING_CAPACITY); + if (fl->entries == NULL) return ERROR(memory_allocation); + fl->capacity = FRAMELOG_STARTING_CAPACITY; + + return 0; +} + +size_t ZSTD_seekable_frameLog_freeVec(ZSTD_frameLog* fl) +{ + if (fl != NULL) free(fl->entries); + return 0; +} + +ZSTD_frameLog* ZSTD_seekable_createFrameLog(int checksumFlag) +{ + ZSTD_frameLog* fl = malloc(sizeof(ZSTD_frameLog)); + if (fl == NULL) return NULL; + + if (ZSTD_isError(ZSTD_seekable_frameLog_allocVec(fl))) { + free(fl); + return NULL; + } + + fl->checksumFlag = checksumFlag; + fl->seekTablePos = 0; + fl->seekTableIndex = 0; + fl->size = 0; + + return fl; +} + +size_t ZSTD_seekable_freeFrameLog(ZSTD_frameLog* fl) +{ + ZSTD_seekable_frameLog_freeVec(fl); + free(fl); + return 0; +} + +ZSTD_seekable_CStream* ZSTD_seekable_createCStream() +{ + ZSTD_seekable_CStream* zcs = malloc(sizeof(ZSTD_seekable_CStream)); + + if (zcs == NULL) return NULL; + + memset(zcs, 0, sizeof(*zcs)); + + zcs->cstream = ZSTD_createCStream(); + if (zcs->cstream == NULL) goto failed1; + + if (ZSTD_isError(ZSTD_seekable_frameLog_allocVec(&zcs->framelog))) goto failed2; + + return zcs; + +failed2: + ZSTD_freeCStream(zcs->cstream); +failed1: + free(zcs); + return NULL; +} + +size_t ZSTD_seekable_freeCStream(ZSTD_seekable_CStream* zcs) +{ + if (zcs == NULL) return 0; /* support free on null */ + ZSTD_freeCStream(zcs->cstream); + ZSTD_seekable_frameLog_freeVec(&zcs->framelog); + free(zcs); + + return 0; +} + +size_t ZSTD_seekable_initCStream(ZSTD_seekable_CStream* zcs, + int compressionLevel, + int checksumFlag, + U32 maxFrameSize) +{ + zcs->framelog.size = 0; + zcs->frameCSize = 0; + zcs->frameDSize = 0; + + /* make sure maxFrameSize has a reasonable value */ + if (maxFrameSize > ZSTD_SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE) { + return ERROR(compressionParameter_unsupported); + } + + zcs->maxFrameSize = maxFrameSize + ? maxFrameSize + : ZSTD_SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE; + + zcs->framelog.checksumFlag = checksumFlag; + if (zcs->framelog.checksumFlag) { + XXH64_reset(&zcs->xxhState, 0); + } + + zcs->framelog.seekTablePos = 0; + zcs->framelog.seekTableIndex = 0; + zcs->writingSeekTable = 0; + + return ZSTD_initCStream(zcs->cstream, compressionLevel); +} + +size_t ZSTD_seekable_logFrame(ZSTD_frameLog* fl, + unsigned compressedSize, + unsigned decompressedSize, + unsigned checksum) +{ + if (fl->size == ZSTD_SEEKABLE_MAXFRAMES) + return ERROR(frameIndex_tooLarge); + + /* grow the buffer if required */ + if (fl->size == fl->capacity) { + /* exponential size increase for constant amortized runtime */ + size_t const newCapacity = fl->capacity * 2; + framelogEntry_t* const newEntries = realloc(fl->entries, + sizeof(framelogEntry_t) * newCapacity); + + if (newEntries == NULL) return ERROR(memory_allocation); + + fl->entries = newEntries; + fl->capacity = newCapacity; + } + + fl->entries[fl->size] = (framelogEntry_t){ + compressedSize, decompressedSize, checksum + }; + fl->size++; + + return 0; +} + +size_t ZSTD_seekable_endFrame(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output) +{ + size_t const prevOutPos = output->pos; + /* end the frame */ + size_t ret = ZSTD_endStream(zcs->cstream, output); + + zcs->frameCSize += output->pos - prevOutPos; + + /* need to flush before doing the rest */ + if (ret) return ret; + + /* frame done */ + + /* store the frame data for later */ + ret = ZSTD_seekable_logFrame( + &zcs->framelog, zcs->frameCSize, zcs->frameDSize, + zcs->framelog.checksumFlag + ? XXH64_digest(&zcs->xxhState) & 0xFFFFFFFFU + : 0); + if (ret) return ret; + + /* reset for the next frame */ + zcs->frameCSize = 0; + zcs->frameDSize = 0; + + ZSTD_resetCStream(zcs->cstream, 0); + if (zcs->framelog.checksumFlag) + XXH64_reset(&zcs->xxhState, 0); + + return 0; +} + +size_t ZSTD_seekable_compressStream(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input) +{ + const BYTE* const inBase = (const BYTE*) input->src + input->pos; + size_t inLen = input->size - input->pos; + + inLen = MIN(inLen, (size_t)(zcs->maxFrameSize - zcs->frameDSize)); + + /* if we haven't finished flushing the last frame, don't start writing a new one */ + if (inLen > 0) { + ZSTD_inBuffer inTmp = { inBase, inLen, 0 }; + size_t const prevOutPos = output->pos; + + size_t const ret = ZSTD_compressStream(zcs->cstream, output, &inTmp); + + if (zcs->framelog.checksumFlag) { + XXH64_update(&zcs->xxhState, inBase, inTmp.pos); + } + + zcs->frameCSize += output->pos - prevOutPos; + zcs->frameDSize += inTmp.pos; + + input->pos += inTmp.pos; + + if (ZSTD_isError(ret)) return ret; + } + + if (zcs->maxFrameSize == zcs->frameDSize) { + /* log the frame and start over */ + size_t const ret = ZSTD_seekable_endFrame(zcs, output); + if (ZSTD_isError(ret)) return ret; + + /* get the client ready for the next frame */ + return (size_t)zcs->maxFrameSize; + } + + return (size_t)(zcs->maxFrameSize - zcs->frameDSize); +} + +static inline size_t ZSTD_seekable_seekTableSize(const ZSTD_frameLog* fl) +{ + size_t const sizePerFrame = 8 + (fl->checksumFlag?4:0); + size_t const seekTableLen = ZSTD_skippableHeaderSize + + sizePerFrame * fl->size + + ZSTD_seekTableFooterSize; + + return seekTableLen; +} + +static inline size_t ZSTD_stwrite32(ZSTD_frameLog* fl, + ZSTD_outBuffer* output, U32 const value, + U32 const offset) +{ + if (fl->seekTablePos < offset + 4) { + BYTE tmp[4]; /* so that we can work with buffers too small to write a whole word to */ + size_t const lenWrite = + MIN(output->size - output->pos, offset + 4 - fl->seekTablePos); + MEM_writeLE32(tmp, value); + memcpy((BYTE*)output->dst + output->pos, + tmp + (fl->seekTablePos - offset), lenWrite); + output->pos += lenWrite; + fl->seekTablePos += lenWrite; + + if (lenWrite < 4) return ZSTD_seekable_seekTableSize(fl) - fl->seekTablePos; + } + return 0; +} + +size_t ZSTD_seekable_writeSeekTable(ZSTD_frameLog* fl, ZSTD_outBuffer* output) +{ + /* seekTableIndex: the current index in the table and + * seekTableSize: the amount of the table written so far + * + * This function is written this way so that if it has to return early + * because of a small buffer, it can keep going where it left off. + */ + + size_t const sizePerFrame = 8 + (fl->checksumFlag?4:0); + size_t const seekTableLen = ZSTD_seekable_seekTableSize(fl); + + CHECK_Z(ZSTD_stwrite32(fl, output, ZSTD_MAGIC_SKIPPABLE_START | 0xE, 0)); + CHECK_Z(ZSTD_stwrite32(fl, output, seekTableLen - ZSTD_skippableHeaderSize, + 4)); + + while (fl->seekTableIndex < fl->size) { + CHECK_Z(ZSTD_stwrite32(fl, output, + fl->entries[fl->seekTableIndex].cSize, + ZSTD_skippableHeaderSize + + sizePerFrame * fl->seekTableIndex + 0)); + + CHECK_Z(ZSTD_stwrite32(fl, output, + fl->entries[fl->seekTableIndex].dSize, + ZSTD_skippableHeaderSize + + sizePerFrame * fl->seekTableIndex + 4)); + + if (fl->checksumFlag) { + CHECK_Z(ZSTD_stwrite32( + fl, output, fl->entries[fl->seekTableIndex].checksum, + ZSTD_skippableHeaderSize + + sizePerFrame * fl->seekTableIndex + 8)); + } + + fl->seekTableIndex++; + } + + CHECK_Z(ZSTD_stwrite32(fl, output, fl->size, + seekTableLen - ZSTD_seekTableFooterSize)); + + if (output->size - output->pos < 1) return seekTableLen - fl->seekTablePos; + if (fl->seekTablePos < seekTableLen - 4) { + BYTE sfd = 0; + sfd |= (fl->checksumFlag) << 7; + + ((BYTE*)output->dst)[output->pos] = sfd; + output->pos++; + fl->seekTablePos++; + } + + CHECK_Z(ZSTD_stwrite32(fl, output, ZSTD_SEEKABLE_MAGICNUMBER, + seekTableLen - 4)); + + if (fl->seekTablePos != seekTableLen) return ERROR(GENERIC); + return 0; +} + +size_t ZSTD_seekable_endStream(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output) +{ + if (!zcs->writingSeekTable && zcs->frameDSize) { + const size_t endFrame = ZSTD_seekable_endFrame(zcs, output); + if (ZSTD_isError(endFrame)) return endFrame; + /* return an accurate size hint */ + if (endFrame) return endFrame + ZSTD_seekable_seekTableSize(&zcs->framelog); + } + + zcs->writingSeekTable = 1; + + return ZSTD_seekable_writeSeekTable(&zcs->framelog, output); +} diff --git a/contrib/seekable_format/zstdseek_decompress.c b/contrib/seekable_format/zstdseek_decompress.c new file mode 100644 index 000000000..4a8b4e568 --- /dev/null +++ b/contrib/seekable_format/zstdseek_decompress.c @@ -0,0 +1,461 @@ +/* + * Copyright (c) 2017-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + */ + +/* ********************************************************* +* Turn on Large Files support (>4GB) for 32-bit Linux/Unix +***********************************************************/ +#if !defined(__64BIT__) || defined(__MINGW32__) /* No point defining Large file for 64 bit but MinGW-w64 requires it */ +# if !defined(_FILE_OFFSET_BITS) +# define _FILE_OFFSET_BITS 64 /* turn off_t into a 64-bit type for ftello, fseeko */ +# endif +# if !defined(_LARGEFILE_SOURCE) /* obsolete macro, replaced with _FILE_OFFSET_BITS */ +# define _LARGEFILE_SOURCE 1 /* Large File Support extension (LFS) - fseeko, ftello */ +# endif +# if defined(_AIX) || defined(__hpux) +# define _LARGE_FILES /* Large file support on 32-bits AIX and HP-UX */ +# endif +#endif + +/* ************************************************************ +* Avoid fseek()'s 2GiB barrier with MSVC, MacOS, *BSD, MinGW +***************************************************************/ +#if defined(_MSC_VER) && _MSC_VER >= 1400 +# define LONG_SEEK _fseeki64 +#elif !defined(__64BIT__) && (PLATFORM_POSIX_VERSION >= 200112L) /* No point defining Large file for 64 bit */ +# define LONG_SEEK fseeko +#elif defined(__MINGW32__) && !defined(__STRICT_ANSI__) && !defined(__NO_MINGW_LFS) && defined(__MSVCRT__) +# define LONG_SEEK fseeko64 +#elif defined(_WIN32) && !defined(__DJGPP__) +# include + static int LONG_SEEK(FILE* file, __int64 offset, int origin) { + LARGE_INTEGER off; + DWORD method; + off.QuadPart = offset; + if (origin == SEEK_END) + method = FILE_END; + else if (origin == SEEK_CUR) + method = FILE_CURRENT; + else + method = FILE_BEGIN; + + if (SetFilePointerEx((HANDLE) _get_osfhandle(_fileno(file)), off, NULL, method)) + return 0; + else + return -1; + } +#else +# define LONG_SEEK fseek +#endif + +#include /* malloc, free */ +#include /* FILE* */ + +#define XXH_STATIC_LINKING_ONLY +#define XXH_NAMESPACE ZSTD_ +#include "xxhash.h" + +#define ZSTD_STATIC_LINKING_ONLY +#include "zstd.h" +#include "zstd_errors.h" +#include "mem.h" +#include "zstd_seekable.h" + +#undef ERROR +#define ERROR(name) ((size_t)-ZSTD_error_##name) + +#define CHECK_IO(f) { int const errcod = (f); if (errcod < 0) return ERROR(seekableIO); } + +#undef MIN +#undef MAX +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + +/* Special-case callbacks for FILE* and in-memory modes, so that we can treat + * them the same way as the advanced API */ +static int ZSTD_seekable_read_FILE(void* opaque, void* buffer, size_t n) +{ + size_t const result = fread(buffer, 1, n, (FILE*)opaque); + if (result != n) { + return -1; + } + return 0; +} + +static int ZSTD_seekable_seek_FILE(void* opaque, S64 offset, int origin) +{ + int const ret = LONG_SEEK((FILE*)opaque, offset, origin); + if (ret) return ret; + return fflush((FILE*)opaque); +} + +typedef struct { + const void *ptr; + size_t size; + size_t pos; +} buffWrapper_t; + +static int ZSTD_seekable_read_buff(void* opaque, void* buffer, size_t n) +{ + buffWrapper_t* buff = (buffWrapper_t*) opaque; + if (buff->size + n > buff->pos) return -1; + memcpy(buffer, (const BYTE*)buff->ptr + buff->pos, n); + buff->pos += n; + return 0; +} + +static int ZSTD_seekable_seek_buff(void* opaque, S64 offset, int origin) +{ + buffWrapper_t* buff = (buffWrapper_t*) opaque; + unsigned long long newOffset; + switch (origin) { + case SEEK_SET: + newOffset = offset; + break; + case SEEK_CUR: + newOffset = (unsigned long long)buff->pos + offset; + break; + case SEEK_END: + newOffset = (unsigned long long)buff->size - offset; + break; + } + if (newOffset < 0 || newOffset > buff->size) { + return -1; + } + buff->pos = newOffset; + return 0; +} + +typedef struct { + U64 cOffset; + U64 dOffset; + U32 checksum; +} seekEntry_t; + +typedef struct { + seekEntry_t* entries; + size_t tableLen; + + int checksumFlag; +} seekTable_t; + +#define SEEKABLE_BUFF_SIZE ZSTD_BLOCKSIZE_ABSOLUTEMAX + +struct ZSTD_seekable_s { + ZSTD_DStream* dstream; + seekTable_t seekTable; + ZSTD_seekable_customFile src; + + U64 decompressedOffset; + U32 curFrame; + + BYTE inBuff[SEEKABLE_BUFF_SIZE]; /* need to do our own input buffering */ + BYTE outBuff[SEEKABLE_BUFF_SIZE]; /* so we can efficiently decompress the + starts of chunks before we get to the + desired section */ + ZSTD_inBuffer in; /* maintain continuity across ZSTD_seekable_decompress operations */ + buffWrapper_t buffWrapper; /* for `src.opaque` in in-memory mode */ + + XXH64_state_t xxhState; +}; + +ZSTD_seekable* ZSTD_seekable_create(void) +{ + ZSTD_seekable* zs = malloc(sizeof(ZSTD_seekable)); + + if (zs == NULL) return NULL; + + /* also initializes stage to zsds_init */ + memset(zs, 0, sizeof(*zs)); + + zs->dstream = ZSTD_createDStream(); + if (zs->dstream == NULL) { + free(zs); + return NULL; + } + + return zs; +} + +size_t ZSTD_seekable_free(ZSTD_seekable* zs) +{ + if (zs == NULL) return 0; /* support free on null */ + ZSTD_freeDStream(zs->dstream); + free(zs->seekTable.entries); + free(zs); + + return 0; +} + +/** ZSTD_seekable_offsetToFrameIndex() : + * Performs a binary search to find the last frame with a decompressed offset + * <= pos + * @return : the frame's index */ +U32 ZSTD_seekable_offsetToFrameIndex(ZSTD_seekable* const zs, U64 pos) +{ + U32 lo = 0; + U32 hi = zs->seekTable.tableLen; + + if (pos >= zs->seekTable.entries[zs->seekTable.tableLen].dOffset) { + return zs->seekTable.tableLen; + } + + while (lo + 1 < hi) { + U32 const mid = lo + ((hi - lo) >> 1); + if (zs->seekTable.entries[mid].dOffset <= pos) { + lo = mid; + } else { + hi = mid; + } + } + return lo; +} + +U32 ZSTD_seekable_getNumFrames(ZSTD_seekable* const zs) +{ + return zs->seekTable.tableLen; +} + +U64 ZSTD_seekable_getFrameCompressedOffset(ZSTD_seekable* const zs, U32 frameIndex) +{ + if (frameIndex >= zs->seekTable.tableLen) return ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE; + return zs->seekTable.entries[frameIndex].cOffset; +} + +U64 ZSTD_seekable_getFrameDecompressedOffset(ZSTD_seekable* const zs, U32 frameIndex) +{ + if (frameIndex >= zs->seekTable.tableLen) return ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE; + return zs->seekTable.entries[frameIndex].dOffset; +} + +size_t ZSTD_seekable_getFrameCompressedSize(ZSTD_seekable* const zs, U32 frameIndex) +{ + if (frameIndex >= zs->seekTable.tableLen) return ERROR(frameIndex_tooLarge); + return zs->seekTable.entries[frameIndex + 1].cOffset - + zs->seekTable.entries[frameIndex].cOffset; +} + +size_t ZSTD_seekable_getFrameDecompressedSize(ZSTD_seekable* const zs, U32 frameIndex) +{ + if (frameIndex > zs->seekTable.tableLen) return ERROR(frameIndex_tooLarge); + return zs->seekTable.entries[frameIndex + 1].dOffset - + zs->seekTable.entries[frameIndex].dOffset; +} + +static size_t ZSTD_seekable_loadSeekTable(ZSTD_seekable* zs) +{ + int checksumFlag; + ZSTD_seekable_customFile src = zs->src; + /* read the footer, fixed size */ + CHECK_IO(src.seek(src.opaque, -(int)ZSTD_seekTableFooterSize, SEEK_END)); + CHECK_IO(src.read(src.opaque, zs->inBuff, ZSTD_seekTableFooterSize)); + + if (MEM_readLE32(zs->inBuff + 5) != ZSTD_SEEKABLE_MAGICNUMBER) { + return ERROR(prefix_unknown); + } + + { BYTE const sfd = zs->inBuff[4]; + checksumFlag = sfd >> 7; + + /* check reserved bits */ + if ((checksumFlag >> 2) & 0x1f) { + return ERROR(corruption_detected); + } + } + + { U32 const numFrames = MEM_readLE32(zs->inBuff); + U32 const sizePerEntry = 8 + (checksumFlag?4:0); + U32 const tableSize = sizePerEntry * numFrames; + U32 const frameSize = tableSize + ZSTD_seekTableFooterSize + ZSTD_skippableHeaderSize; + + U32 remaining = frameSize - ZSTD_seekTableFooterSize; /* don't need to re-read footer */ + { + U32 const toRead = MIN(remaining, SEEKABLE_BUFF_SIZE); + + CHECK_IO(src.seek(src.opaque, -(S64)frameSize, SEEK_END)); + CHECK_IO(src.read(src.opaque, zs->inBuff, toRead)); + + remaining -= toRead; + } + + if (MEM_readLE32(zs->inBuff) != (ZSTD_MAGIC_SKIPPABLE_START | 0xE)) { + return ERROR(prefix_unknown); + } + if (MEM_readLE32(zs->inBuff+4) + ZSTD_skippableHeaderSize != frameSize) { + return ERROR(prefix_unknown); + } + + { /* Allocate an extra entry at the end so that we can do size + * computations on the last element without special case */ + seekEntry_t* entries = (seekEntry_t*)malloc(sizeof(seekEntry_t) * (numFrames + 1)); + const BYTE* tableBase = zs->inBuff + ZSTD_skippableHeaderSize; + + U32 idx = 0; + U32 pos = 8; + + + U64 cOffset = 0; + U64 dOffset = 0; + + if (!entries) { + free(entries); + return ERROR(memory_allocation); + } + + /* compute cumulative positions */ + for (; idx < numFrames; idx++) { + if (pos + sizePerEntry > SEEKABLE_BUFF_SIZE) { + U32 const toRead = MIN(remaining, SEEKABLE_BUFF_SIZE); + U32 const offset = SEEKABLE_BUFF_SIZE - pos; + memmove(zs->inBuff, zs->inBuff + pos, offset); /* move any data we haven't read yet */ + CHECK_IO(src.read(src.opaque, zs->inBuff+offset, toRead)); + remaining -= toRead; + pos = 0; + } + entries[idx].cOffset = cOffset; + entries[idx].dOffset = dOffset; + + cOffset += MEM_readLE32(zs->inBuff + pos); + pos += 4; + dOffset += MEM_readLE32(zs->inBuff + pos); + pos += 4; + if (checksumFlag) { + entries[idx].checksum = MEM_readLE32(zs->inBuff + pos); + pos += 4; + } + } + entries[numFrames].cOffset = cOffset; + entries[numFrames].dOffset = dOffset; + + zs->seekTable.entries = entries; + zs->seekTable.tableLen = numFrames; + zs->seekTable.checksumFlag = checksumFlag; + return 0; + } + } +} + +size_t ZSTD_seekable_initBuff(ZSTD_seekable* zs, const void* src, size_t srcSize) +{ + zs->buffWrapper = (buffWrapper_t){src, srcSize, 0}; + { ZSTD_seekable_customFile srcFile = {&zs->buffWrapper, + &ZSTD_seekable_read_buff, + &ZSTD_seekable_seek_buff}; + return ZSTD_seekable_initAdvanced(zs, srcFile); } +} + +size_t ZSTD_seekable_initFile(ZSTD_seekable* zs, FILE* src) +{ + ZSTD_seekable_customFile srcFile = {src, &ZSTD_seekable_read_FILE, + &ZSTD_seekable_seek_FILE}; + return ZSTD_seekable_initAdvanced(zs, srcFile); +} + +size_t ZSTD_seekable_initAdvanced(ZSTD_seekable* zs, ZSTD_seekable_customFile src) +{ + zs->src = src; + + { const size_t seekTableInit = ZSTD_seekable_loadSeekTable(zs); + if (ZSTD_isError(seekTableInit)) return seekTableInit; } + + zs->decompressedOffset = (U64)-1; + zs->curFrame = (U32)-1; + + { const size_t dstreamInit = ZSTD_initDStream(zs->dstream); + if (ZSTD_isError(dstreamInit)) return dstreamInit; } + return 0; +} + +size_t ZSTD_seekable_decompress(ZSTD_seekable* zs, void* dst, size_t len, U64 offset) +{ + U32 targetFrame = ZSTD_seekable_offsetToFrameIndex(zs, offset); + do { + /* check if we can continue from a previous decompress job */ + if (targetFrame != zs->curFrame || offset != zs->decompressedOffset) { + zs->decompressedOffset = zs->seekTable.entries[targetFrame].dOffset; + zs->curFrame = targetFrame; + + CHECK_IO(zs->src.seek(zs->src.opaque, + zs->seekTable.entries[targetFrame].cOffset, + SEEK_SET)); + zs->in = (ZSTD_inBuffer){zs->inBuff, 0, 0}; + XXH64_reset(&zs->xxhState, 0); + ZSTD_resetDStream(zs->dstream); + } + + while (zs->decompressedOffset < offset + len) { + size_t toRead; + ZSTD_outBuffer outTmp; + size_t prevOutPos; + if (zs->decompressedOffset < offset) { + /* dummy decompressions until we get to the target offset */ + outTmp = (ZSTD_outBuffer){zs->outBuff, MIN(SEEKABLE_BUFF_SIZE, offset - zs->decompressedOffset), 0}; + } else { + outTmp = (ZSTD_outBuffer){dst, len, zs->decompressedOffset - offset}; + } + + prevOutPos = outTmp.pos; + toRead = ZSTD_decompressStream(zs->dstream, &outTmp, &zs->in); + if (ZSTD_isError(toRead)) { + return toRead; + } + + if (zs->seekTable.checksumFlag) { + XXH64_update(&zs->xxhState, (BYTE*)outTmp.dst + prevOutPos, + outTmp.pos - prevOutPos); + } + zs->decompressedOffset += outTmp.pos - prevOutPos; + + if (toRead == 0) { + /* frame complete */ + + /* verify checksum */ + if (zs->seekTable.checksumFlag && + (XXH64_digest(&zs->xxhState) & 0xFFFFFFFFU) != + zs->seekTable.entries[targetFrame].checksum) { + return ERROR(corruption_detected); + } + + if (zs->decompressedOffset < offset + len) { + /* go back to the start and force a reset of the stream */ + targetFrame = ZSTD_seekable_offsetToFrameIndex(zs, zs->decompressedOffset); + } + break; + } + + /* read in more data if we're done with this buffer */ + if (zs->in.pos == zs->in.size) { + toRead = MIN(toRead, SEEKABLE_BUFF_SIZE); + CHECK_IO(zs->src.read(zs->src.opaque, zs->inBuff, toRead)); + zs->in.size = toRead; + zs->in.pos = 0; + } + } + } while (zs->decompressedOffset != offset + len); + + return len; +} + +size_t ZSTD_seekable_decompressFrame(ZSTD_seekable* zs, void* dst, size_t dstSize, U32 frameIndex) +{ + if (frameIndex >= zs->seekTable.tableLen) { + return ERROR(frameIndex_tooLarge); + } + + { + size_t const decompressedSize = + zs->seekTable.entries[frameIndex + 1].dOffset - + zs->seekTable.entries[frameIndex].dOffset; + if (dstSize < decompressedSize) { + return ERROR(dstSize_tooSmall); + } + return ZSTD_seekable_decompress( + zs, dst, decompressedSize, + zs->seekTable.entries[frameIndex].dOffset); + } +} diff --git a/doc/educational_decoder/zstd_decompress.c b/doc/educational_decoder/zstd_decompress.c index ae4eaa81c..7c8d8114d 100644 --- a/doc/educational_decoder/zstd_decompress.c +++ b/doc/educational_decoder/zstd_decompress.c @@ -27,16 +27,19 @@ size_t ZSTD_decompress_with_dict(void *const dst, const size_t dst_len, /// Get the decompressed size of an input stream so memory can be allocated in /// advance +/// Returns -1 if the size can't be determined size_t ZSTD_get_decompressed_size(const void *const src, const size_t src_len); /******* UTILITY MACROS AND TYPES *********************************************/ -// Max block size decompressed size is 128 KB and literal blocks must be smaller -// than that +// Max block size decompressed size is 128 KB and literal blocks can't be +// larger than their block #define MAX_LITERALS_SIZE ((size_t)128 * 1024) #define MAX(a, b) ((a) > (b) ? (a) : (b)) #define MIN(a, b) ((a) < (b) ? (a) : (b)) +/// This decoder calls exit(1) when it encounters an error, however a production +/// library should propagate error codes #define ERROR(s) \ do { \ fprintf(stderr, "Error: %s\n", s); \ @@ -67,29 +70,31 @@ typedef int64_t i64; /// decompression functions. /*** IO STREAM OPERATIONS *************/ -/// These structs are the interface for IO, and do bounds checking on all -/// operations. They should be used opaquely to ensure safety. -/// Output is always done byte-by-byte +/// ostream_t/istream_t are used to wrap the pointers/length data passed into +/// ZSTD_decompress, so that all IO operations are safely bounds checked +/// They are written/read forward, and reads are treated as little-endian +/// They should be used opaquely to ensure safety typedef struct { u8 *ptr; size_t len; } ostream_t; -/// Input often reads a few bits at a time, so maintain an internal offset typedef struct { const u8 *ptr; - int bit_offset; size_t len; + + // Input often reads a few bits at a time, so maintain an internal offset + int bit_offset; } istream_t; /// The following two functions are the only ones that allow the istream to be /// non-byte aligned /// Reads `num` bits from a bitstream, and updates the internal offset -static inline u64 IO_read_bits(istream_t *const in, const int num); -/// Rewinds the stream by `num` bits -static inline void IO_rewind_bits(istream_t *const in, const int num); +static inline u64 IO_read_bits(istream_t *const in, const int num_bits); +/// Backs-up the stream by `num` bits so they can be read again +static inline void IO_rewind_bits(istream_t *const in, const int num_bits); /// If the remaining bits in a byte will be unused, advance to the end of the /// byte static inline void IO_align_stream(istream_t *const in); @@ -101,30 +106,31 @@ static inline void IO_write_byte(ostream_t *const out, u8 symb); /// be byte aligned. static inline size_t IO_istream_len(const istream_t *const in); -/// Returns a pointer where `len` bytes can be read, and advances the internal -/// state. The stream must be byte aligned. +/// Advances the stream by `len` bytes, and returns a pointer to the chunk that +/// was skipped. The stream must be byte aligned. static inline const u8 *IO_read_bytes(istream_t *const in, size_t len); -/// Returns a pointer where `len` bytes can be written, and advances the internal -/// state. The stream must be byte aligned. +/// Advances the stream by `len` bytes, and returns a pointer to the chunk that +/// was skipped so it can be written to. static inline u8 *IO_write_bytes(ostream_t *const out, size_t len); /// Advance the inner state by `len` bytes. The stream must be byte aligned. static inline void IO_advance_input(istream_t *const in, size_t len); -/// Returns an `ostream_t` constructed from the given pointer and length +/// Returns an `ostream_t` constructed from the given pointer and length. static inline ostream_t IO_make_ostream(u8 *out, size_t len); -/// Returns an `istream_t` constructed from the given pointer and length +/// Returns an `istream_t` constructed from the given pointer and length. static inline istream_t IO_make_istream(const u8 *in, size_t len); -/// Returns an `istream_t` with the same base as `in`, and length `len` -/// Then, advance `in` to account for the consumed bytes -/// `in` must be byte aligned +/// Returns an `istream_t` with the same base as `in`, and length `len`. +/// Then, advance `in` to account for the consumed bytes. +/// `in` must be byte aligned. static inline istream_t IO_make_sub_istream(istream_t *const in, size_t len); /*** END IO STREAM OPERATIONS *********/ /*** BITSTREAM OPERATIONS *************/ -/// Read `num` bits (up to 64) from `src + offset`, where `offset` is in bits -static inline u64 read_bits_LE(const u8 *src, const int num, +/// Read `num` bits (up to 64) from `src + offset`, where `offset` is in bits, +/// and return them interpreted as a little-endian unsigned integer. +static inline u64 read_bits_LE(const u8 *src, const int num_bits, const size_t offset); /// Read bits from the end of a HUF or FSE bitstream. `offset` is in bits, so @@ -136,9 +142,8 @@ static inline u64 STREAM_read_bits(const u8 *src, const int bits, /*** END BITSTREAM OPERATIONS *********/ /*** BIT COUNTING OPERATIONS **********/ -/// Returns `x`, where `2^x` is the largest power of 2 less than or equal to -/// `num`, or `-1` if `num == 0`. -static inline int log2inf(const u64 num); +/// Returns the index of the highest set bit in `num`, or `-1` if `num == 0` +static inline int highest_set_bit(const u64 num); /*** END BIT COUNTING OPERATIONS ******/ /*** HUFFMAN PRIMITIVES ***************/ @@ -384,8 +389,8 @@ size_t ZSTD_decompress_with_dict(void *const dst, const size_t dst_len, parse_dictionary(&parsed_dict, (const u8 *)dict, dict_len); } - istream_t in = {(const u8 *)src, 0, src_len}; - ostream_t out = {(u8 *)dst, dst_len}; + istream_t in = IO_make_istream(src, src_len); + ostream_t out = IO_make_ostream(dst, dst_len); // "A content compressed by Zstandard is transformed into a Zstandard frame. // Multiple frames can be appended into a single file or stream. A frame is @@ -633,6 +638,7 @@ static void frame_context_apply_dict(frame_context_t *const ctx, FSE_copy_dtable(&ctx->of_dtable, &dict->of_dtable); FSE_copy_dtable(&ctx->ml_dtable, &dict->ml_dtable); + // Copy the repeated offsets memcpy(ctx->previous_offsets, dict->previous_offsets, sizeof(ctx->previous_offsets)); } @@ -668,7 +674,7 @@ static void decompress_data(frame_context_t *const ctx, ostream_t *const out, // number of bytes to read and copy." const u8 *const read_ptr = IO_read_bytes(in, block_len); u8 *const write_ptr = IO_write_bytes(out, block_len); - // + // Copy the raw data into the output memcpy(write_ptr, read_ptr, block_len); @@ -682,7 +688,7 @@ static void decompress_data(frame_context_t *const ctx, ostream_t *const out, const u8 *const read_ptr = IO_read_bytes(in, 1); u8 *const write_ptr = IO_write_bytes(out, block_len); - // Copy `block_len` copies of `streams->src[0]` to the output + // Copy `block_len` copies of `read_ptr[0]` to the output memset(write_ptr, read_ptr[0], block_len); ctx->current_total_output += block_len; @@ -751,7 +757,7 @@ static size_t decode_literals_compressed(frame_context_t *const ctx, u8 **const literals, const int block_type, const int size_format); -static void decode_huf_table(istream_t *const in, HUF_dtable *const dtable); +static void decode_huf_table(HUF_dtable *const dtable, istream_t *const in); static void fse_decode_hufweights(ostream_t *weights, istream_t *const in, int *const num_symbs); @@ -894,12 +900,12 @@ static size_t decode_literals_compressed(frame_context_t *const ctx, istream_t huf_stream = IO_make_sub_istream(in, compressed_size); if (block_type == 2) { - // Decode provided Huffman table + // Decode the provided Huffman table // "This section is only present when Literals_Block_Type type is // Compressed_Literals_Block (2)." HUF_free_dtable(&ctx->literals_dtable); - decode_huf_table(&huf_stream, &ctx->literals_dtable); + decode_huf_table(&ctx->literals_dtable, &huf_stream); } else { // If the previous Huffman table is being repeated, ensure it exists if (!ctx->literals_dtable.symbols) { @@ -922,13 +928,13 @@ static size_t decode_literals_compressed(frame_context_t *const ctx, } // Decode the Huffman table description -static void decode_huf_table(istream_t *const in, HUF_dtable *const dtable) { - const u8 header = IO_read_bits(in, 8); - +static void decode_huf_table(HUF_dtable *const dtable, istream_t *const in) { // "All literal values from zero (included) to last present one (excluded) // are represented by Weight with values from 0 to Max_Number_of_Bits." // "This is a single byte value (0-255), which describes how to decode the list of weights." + const u8 header = IO_read_bits(in, 8); + u8 weights[HUF_MAX_SYMBS]; memset(weights, 0, sizeof(weights)); @@ -997,7 +1003,7 @@ typedef struct { u16 ll_state; u16 of_state; u16 ml_state; -} sequence_state_t; +} sequence_states_t; /// Different modes to signal to decode_seq_tables what to do typedef enum { @@ -1052,10 +1058,10 @@ static void decompress_sequences(frame_context_t *const ctx, istream_t *const in, sequence_command_t *const sequences, const size_t num_sequences); -static sequence_command_t decode_sequence(sequence_state_t *const state, +static sequence_command_t decode_sequence(sequence_states_t *const state, const u8 *const src, i64 *const offset); -static void decode_seq_table(istream_t *const in, FSE_dtable *const table, +static void decode_seq_table(FSE_dtable *const table, istream_t *const in, const seq_part_t type, const seq_mode_t mode); static size_t decode_sequences(frame_context_t *const ctx, istream_t *in, @@ -1131,34 +1137,33 @@ static void decompress_sequences(frame_context_t *const ctx, istream_t *in, // Offsets // Match Lengths" // Update the tables we have stored in the context - decode_seq_table(in, &ctx->ll_dtable, seq_literal_length, + decode_seq_table(&ctx->ll_dtable, in, seq_literal_length, (compression_modes >> 6) & 3); - decode_seq_table(in, &ctx->of_dtable, seq_offset, + decode_seq_table(&ctx->of_dtable, in, seq_offset, (compression_modes >> 4) & 3); - decode_seq_table(in, &ctx->ml_dtable, seq_match_length, + decode_seq_table(&ctx->ml_dtable, in, seq_match_length, (compression_modes >> 2) & 3); - // Check to make sure none of the tables are uninitialized - if (!ctx->ll_dtable.symbols || !ctx->of_dtable.symbols || - !ctx->ml_dtable.symbols) { - CORRUPTION(); + + sequence_states_t states; + + // Initialize the decoding tables + { + states.ll_table = ctx->ll_dtable; + states.of_table = ctx->of_dtable; + states.ml_table = ctx->ml_dtable; } - sequence_state_t state; - // Copy the context's tables into the local state - memcpy(&state.ll_table, &ctx->ll_dtable, sizeof(FSE_dtable)); - memcpy(&state.of_table, &ctx->of_dtable, sizeof(FSE_dtable)); - memcpy(&state.ml_table, &ctx->ml_dtable, sizeof(FSE_dtable)); - - size_t len = IO_istream_len(in); + const size_t len = IO_istream_len(in); const u8 *const src = IO_read_bytes(in, len); // "After writing the last bit containing information, the compressor writes // a single 1-bit and then fills the byte with 0-7 0 bits of padding." - const int padding = 8 - log2inf(src[len - 1]); - i64 offset = len * 8 - padding; + const int padding = 8 - highest_set_bit(src[len - 1]); + // The offset starts at the end because FSE streams are read backwards + i64 bit_offset = len * 8 - padding; // "The bitstream starts with initial state values, each using the required // number of bits in their respective accuracy, decoded previously from @@ -1166,24 +1171,22 @@ static void decompress_sequences(frame_context_t *const ctx, istream_t *in, // // It starts by Literals_Length_State, followed by Offset_State, and finally // Match_Length_State." - FSE_init_state(&state.ll_table, &state.ll_state, src, &offset); - FSE_init_state(&state.of_table, &state.of_state, src, &offset); - FSE_init_state(&state.ml_table, &state.ml_state, src, &offset); + FSE_init_state(&states.ll_table, &states.ll_state, src, &bit_offset); + FSE_init_state(&states.of_table, &states.of_state, src, &bit_offset); + FSE_init_state(&states.ml_table, &states.ml_state, src, &bit_offset); for (size_t i = 0; i < num_sequences; i++) { // Decode sequences one by one - sequences[i] = decode_sequence(&state, src, &offset); + sequences[i] = decode_sequence(&states, src, &bit_offset); } - if (offset != 0) { + if (bit_offset != 0) { CORRUPTION(); } - - // Don't free tables so they can be used in the next block } // Decode a single sequence and update the state -static sequence_command_t decode_sequence(sequence_state_t *const state, +static sequence_command_t decode_sequence(sequence_states_t *const states, const u8 *const src, i64 *const offset) { // "Each symbol is a code in its own context, which specifies Baseline and @@ -1191,9 +1194,9 @@ static sequence_command_t decode_sequence(sequence_state_t *const state, // additional bits in the same bitstream." // Decode symbols, but don't update states - const u8 of_code = FSE_peek_symbol(&state->of_table, state->of_state); - const u8 ll_code = FSE_peek_symbol(&state->ll_table, state->ll_state); - const u8 ml_code = FSE_peek_symbol(&state->ml_table, state->ml_state); + const u8 of_code = FSE_peek_symbol(&states->of_table, states->of_state); + const u8 ll_code = FSE_peek_symbol(&states->ll_table, states->ll_state); + const u8 ml_code = FSE_peek_symbol(&states->ml_table, states->ml_state); // Offset doesn't need a max value as it's not decoded using a table if (ll_code > SEQ_MAX_CODES[seq_literal_length] || @@ -1221,17 +1224,18 @@ static sequence_command_t decode_sequence(sequence_state_t *const state, // then Offset_State." // If the stream is complete don't read bits to update state if (*offset != 0) { - FSE_update_state(&state->ll_table, &state->ll_state, src, offset); - FSE_update_state(&state->ml_table, &state->ml_state, src, offset); - FSE_update_state(&state->of_table, &state->of_state, src, offset); + FSE_update_state(&states->ll_table, &states->ll_state, src, offset); + FSE_update_state(&states->ml_table, &states->ml_state, src, offset); + FSE_update_state(&states->of_table, &states->of_state, src, offset); } return seq; } /// Given a sequence part and table mode, decode the FSE distribution -static void decode_seq_table(istream_t *const in, FSE_dtable *const table, - const seq_part_t type, const seq_mode_t mode) { +/// Errors if the mode is `seq_repeat` without a pre-existing table in `table` +static void decode_seq_table(FSE_dtable *const table, istream_t *const in, + const seq_part_t type, const seq_mode_t mode) { // Constant arrays indexed by seq_part_t const i16 *const default_distributions[] = {SEQ_LITERAL_LENGTH_DEFAULT_DIST, SEQ_OFFSET_DEFAULT_DIST, @@ -1272,12 +1276,17 @@ static void decode_seq_table(istream_t *const in, FSE_dtable *const table, // "Repeat_Mode : re-use distribution table from previous compressed // block." // Nothing to do here, table will be unchanged + if (!table->symbols) { + // This mode is invalid if we don't already have a table + CORRUPTION(); + } break; default: // Impossible, as mode is from 0-3 IMPOSSIBLE(); break; } + } /******* END SEQUENCE DECODING ************************************************/ @@ -1296,6 +1305,8 @@ static void execute_sequences(frame_context_t *const ctx, ostream_t *const out, const sequence_command_t seq = sequences[i]; { + // If the sequence asks for more literals than are left, the + // sequence must be corrupted if (seq.literal_length > IO_istream_len(&litstream)) { CORRUPTION(); } @@ -1336,7 +1347,8 @@ static void execute_sequences(frame_context_t *const ctx, ostream_t *const out, // as per the exception listed above offset = idx < 3 ? offset_hist[idx] : offset_hist[0] - 1; - // If idx == 1 we don't need to modify offset_hist[2] + // If idx == 1 we don't need to modify offset_hist[2], since + // we're using the second-most recent code if (idx > 1) { offset_hist[2] = offset_hist[1]; } @@ -1344,6 +1356,8 @@ static void execute_sequences(frame_context_t *const ctx, ostream_t *const out, offset_hist[0] = offset; } } else { + // When it's not a repeat offset: + // "if (Offset_Value > 3) offset = Offset_Value - 3;" offset = seq.offset - 3; // Shift back history @@ -1391,11 +1405,11 @@ static void execute_sequences(frame_context_t *const ctx, ostream_t *const out, total_output += seq.match_length; } + // Copy any leftover literals { size_t len = IO_istream_len(&litstream); u8 *const write_ptr = IO_write_bytes(out, len); const u8 *const read_ptr = IO_read_bytes(&litstream, len); - // Copy any leftover literals memcpy(write_ptr, read_ptr, len); total_output += len; @@ -1517,10 +1531,10 @@ static void parse_dictionary(dictionary_t *const dict, const u8 *src, // recent offsets (instead of using {1,4,8}), stored in order, 4-bytes // little-endian each, for a total of 12 bytes. Each recent offset must have // a value < dictionary size." - decode_huf_table(&in, &dict->literals_dtable); - decode_seq_table(&in, &dict->of_dtable, seq_offset, seq_fse); - decode_seq_table(&in, &dict->ml_dtable, seq_match_length, seq_fse); - decode_seq_table(&in, &dict->ll_dtable, seq_literal_length, seq_fse); + decode_huf_table(&dict->literals_dtable, &in); + decode_seq_table(&dict->of_dtable, &in, seq_offset, seq_fse); + decode_seq_table(&dict->ml_dtable, &in, seq_match_length, seq_fse); + decode_seq_table(&dict->ll_dtable, &in, seq_literal_length, seq_fse); // Read in the previous offset history dict->previous_offsets[0] = IO_read_bits(&in, 32); @@ -1571,20 +1585,20 @@ static void free_dictionary(dictionary_t *const dict) { /******* IO STREAM OPERATIONS *************************************************/ #define UNALIGNED() ERROR("Attempting to operate on a non-byte aligned stream") /// Reads `num` bits from a bitstream, and updates the internal offset -static inline u64 IO_read_bits(istream_t *const in, const int num) { - if (num > 64 || num <= 0) { +static inline u64 IO_read_bits(istream_t *const in, const int num_bits) { + if (num_bits > 64 || num_bits <= 0) { ERROR("Attempt to read an invalid number of bits"); } - const size_t bytes = (num + in->bit_offset + 7) / 8; - const size_t full_bytes = (num + in->bit_offset) / 8; + const size_t bytes = (num_bits + in->bit_offset + 7) / 8; + const size_t full_bytes = (num_bits + in->bit_offset) / 8; if (bytes > in->len) { INP_SIZE(); } - const u64 result = read_bits_LE(in->ptr, num, in->bit_offset); + const u64 result = read_bits_LE(in->ptr, num_bits, in->bit_offset); - in->bit_offset = (num + in->bit_offset) % 8; + in->bit_offset = (num_bits + in->bit_offset) % 8; in->ptr += full_bytes; in->len -= full_bytes; @@ -1593,16 +1607,21 @@ static inline u64 IO_read_bits(istream_t *const in, const int num) { /// If a non-zero number of bits have been read from the current byte, advance /// the offset to the next byte -static inline void IO_rewind_bits(istream_t *const in, int num) { - if (num < 0) { +static inline void IO_rewind_bits(istream_t *const in, int num_bits) { + if (num_bits < 0) { ERROR("Attempting to rewind stream by a negative number of bits"); } - const int new_offset = in->bit_offset - num; - const i64 bytes = (new_offset - 7) / 8; + // move the offset back by `num_bits` bits + const int new_offset = in->bit_offset - num_bits; + // determine the number of whole bytes we have to rewind, rounding up to an + // integer number (e.g. if `new_offset == -5`, `bytes == 1`) + const i64 bytes = -(new_offset - 7) / 8; - in->ptr += bytes; - in->len -= bytes; + in->ptr -= bytes; + in->len += bytes; + // make sure the resulting `bit_offset` is positive, as mod in C does not + // convert numbers from negative to positive (e.g. -22 % 8 == -6) in->bit_offset = ((new_offset % 8) + 8) % 8; } @@ -1683,33 +1702,26 @@ static inline ostream_t IO_make_ostream(u8 *out, size_t len) { /// Returns an `istream_t` constructed from the given pointer and length static inline istream_t IO_make_istream(const u8 *in, size_t len) { - return (istream_t) { in, 0, len }; + return (istream_t) { in, len, 0 }; } /// Returns an `istream_t` with the same base as `in`, and length `len` /// Then, advance `in` to account for the consumed bytes /// `in` must be byte aligned static inline istream_t IO_make_sub_istream(istream_t *const in, size_t len) { - if (len > in->len) { - INP_SIZE(); - } - if (in->bit_offset != 0) { - UNALIGNED(); - } - const istream_t sub = { in->ptr, in->bit_offset, len }; + // Consume `len` bytes of the parent stream + const u8 *const ptr = IO_read_bytes(in, len); - in->ptr += len; - in->len -= len; - - return sub; + // Make a substream using the pointer to those `len` bytes + return IO_make_istream(ptr, len); } /******* END IO STREAM OPERATIONS *********************************************/ /******* BITSTREAM OPERATIONS *************************************************/ /// Read `num` bits (up to 64) from `src + offset`, where `offset` is in bits -static inline u64 read_bits_LE(const u8 *src, const int num, +static inline u64 read_bits_LE(const u8 *src, const int num_bits, const size_t offset) { - if (num > 64) { + if (num_bits > 64) { ERROR("Attempt to read an invalid number of bits"); } @@ -1719,10 +1731,10 @@ static inline u64 read_bits_LE(const u8 *src, const int num, u64 res = 0; int shift = 0; - int left = num; + int left = num_bits; while (left > 0) { u64 mask = left >= 8 ? 0xff : (((u64)1 << left) - 1); - // Dead the next byte, shift it to account for the offset, and then mask + // Read the next byte, shift it to account for the offset, and then mask // out the top part if we don't need all the bits res += (((u64)*src++ >> bit_offset) & mask) << shift; shift += 8 - bit_offset; @@ -1761,7 +1773,7 @@ static inline u64 STREAM_read_bits(const u8 *const src, const int bits, /******* BIT COUNTING OPERATIONS **********************************************/ /// Returns `x`, where `2^x` is the largest power of 2 less than or equal to /// `num`, or `-1` if `num == 0`. -static inline int log2inf(const u64 num) { +static inline int highest_set_bit(const u64 num) { for (int i = 63; i >= 0; i--) { if (((u64)1 << i) <= num) { return i; @@ -1813,17 +1825,18 @@ static size_t HUF_decompress_1stream(const HUF_dtable *const dtable, // final-bit-flag. Consequently, a last byte of 0 is not possible. And the // final-bit-flag itself is not part of the useful bitstream. Hence, the // last byte contains between 0 and 7 useful bits." - const int padding = 8 - log2inf(src[len - 1]); + const int padding = 8 - highest_set_bit(src[len - 1]); - i64 offset = len * 8 - padding; + // Offset starts at the end because HUF streams are read backwards + i64 bit_offset = len * 8 - padding; u16 state; - HUF_init_state(dtable, &state, src, &offset); + HUF_init_state(dtable, &state, src, &bit_offset); size_t symbols_written = 0; - while (offset > -dtable->max_bits) { + while (bit_offset > -dtable->max_bits) { // Iterate over the stream, decoding one symbol at a time - IO_write_byte(out, HUF_decode_symbol(dtable, &state, src, &offset)); + IO_write_byte(out, HUF_decode_symbol(dtable, &state, src, &bit_offset)); symbols_written++; } // "The process continues up to reading the required number of symbols per @@ -1836,7 +1849,7 @@ static size_t HUF_decompress_1stream(const HUF_dtable *const dtable, // before the start of `src` // Therefore `offset`, the edge to start reading new bits at, should be // dtable->max_bits before the start of the stream - if (offset != -dtable->max_bits) { + if (bit_offset != -dtable->max_bits) { CORRUPTION(); } @@ -1960,7 +1973,7 @@ static void HUF_init_dtable_usingweights(HUF_dtable *const table, } // Find the first power of 2 larger than the sum - const int max_bits = log2inf(weight_sum) + 1; + const int max_bits = highest_set_bit(weight_sum) + 1; const u64 left_over = ((u64)1 << max_bits) - weight_sum; // If the left over isn't a power of 2, the weights are invalid if (left_over & (left_over - 1)) { @@ -1969,7 +1982,7 @@ static void HUF_init_dtable_usingweights(HUF_dtable *const table, // left_over is used to find the last weight as it's not transmitted // by inverting 2^(weight - 1) we can determine the value of last_weight - const int last_weight = log2inf(left_over) + 1; + const int last_weight = highest_set_bit(left_over) + 1; for (int i = 0; i < num_symbs; i++) { // "Number_of_Bits = Number_of_Bits ? Max_Number_of_Bits + 1 - Weight : 0" @@ -2063,7 +2076,7 @@ static size_t FSE_decompress_interleaved2(const FSE_dtable *const dtable, // final-bit-flag. Consequently, a last byte of 0 is not possible. And the // final-bit-flag itself is not part of the useful bitstream. Hence, the // last byte contains between 0 and 7 useful bits." - const int padding = 8 - log2inf(src[len - 1]); + const int padding = 8 - highest_set_bit(src[len - 1]); i64 offset = len * 8 - padding; u16 state1, state2; @@ -2184,7 +2197,7 @@ static void FSE_init_dtable(FSE_dtable *const dtable, u16 next_state_desc = state_desc[symbol]++; // Fills in the table appropriately, next_state_desc increases by symbol // over time, decreasing number of bits - dtable->num_bits[i] = (u8)(accuracy_log - log2inf(next_state_desc)); + dtable->num_bits[i] = (u8)(accuracy_log - highest_set_bit(next_state_desc)); // Baseline increases until the bit threshold is passed, at which point // it resets to 0 dtable->new_state_base[i] = @@ -2235,7 +2248,7 @@ static void FSE_decode_header(FSE_dtable *const dtable, istream_t *const in, int symb = 0; while (remaining > 0 && symb < FSE_MAX_SYMBS) { // Log of the number of possible values we could read - int bits = log2inf(remaining + 1) + 1; + int bits = highest_set_bit(remaining + 1) + 1; u16 val = IO_read_bits(in, bits); diff --git a/doc/images/Cspeed4.png b/doc/images/Cspeed4.png index f0ca0ffba..318204c00 100644 Binary files a/doc/images/Cspeed4.png and b/doc/images/Cspeed4.png differ diff --git a/doc/images/Dspeed4.png b/doc/images/Dspeed4.png index eba485d0d..b7baef1ff 100644 Binary files a/doc/images/Dspeed4.png and b/doc/images/Dspeed4.png differ diff --git a/doc/images/dict-cr.png b/doc/images/dict-cr.png index f555a46c7..f3a9ce2bd 100644 Binary files a/doc/images/dict-cr.png and b/doc/images/dict-cr.png differ diff --git a/doc/images/dict-cs.png b/doc/images/dict-cs.png index ccc02b0d1..55e5ef518 100644 Binary files a/doc/images/dict-cs.png and b/doc/images/dict-cs.png differ diff --git a/doc/images/dict-ds.png b/doc/images/dict-ds.png index 858cad685..1153f1b95 100644 Binary files a/doc/images/dict-ds.png and b/doc/images/dict-ds.png differ diff --git a/doc/zstd_compression_format.md b/doc/zstd_compression_format.md index d4b46548a..1f212fea2 100644 --- a/doc/zstd_compression_format.md +++ b/doc/zstd_compression_format.md @@ -16,7 +16,8 @@ Distribution of this document is unlimited. ### Version -0.2.4 (17/02/17) +0.2.5 (31/03/17) + Introduction ------------ @@ -109,7 +110,7 @@ The structure of a single Zstandard frame is following: __`Magic_Number`__ -4 Bytes, little-endian format. +4 Bytes, __little-endian__ format. Value : 0xFD2FB528 __`Frame_Header`__ @@ -127,7 +128,7 @@ An optional 32-bit checksum, only present if `Content_Checksum_flag` is set. The content checksum is the result of [xxh64() hash function](http://www.xxhash.org) digesting the original (decoded) data as input, and a seed of zero. -The low 4 bytes of the checksum are stored in little endian format. +The low 4 bytes of the checksum are stored in __little-endian__ format. ### `Frame_Header` @@ -154,41 +155,42 @@ Decoding this byte is enough to tell the size of `Frame_Header`. | 2 | `Content_Checksum_flag` | | 1-0 | `Dictionary_ID_flag` | -In this table, bit 7 the is highest bit, while bit 0 the is lowest. +In this table, bit 7 is the highest bit, while bit 0 is the lowest one. __`Frame_Content_Size_flag`__ This is a 2-bits flag (`= Frame_Header_Descriptor >> 6`), -specifying if decompressed data size is provided within the header. -The `Flag_Value` can be converted into `Field_Size`, +specifying if `Frame_Content_Size` (the decompressed data size) +is provided within the header. +`Flag_Value` provides `FCS_Field_Size`, which is the number of bytes used by `Frame_Content_Size` according to the following table: -|`Flag_Value`| 0 | 1 | 2 | 3 | -| ---------- | ------ | --- | --- | --- | -|`Field_Size`| 0 or 1 | 2 | 4 | 8 | +| `Flag_Value` | 0 | 1 | 2 | 3 | +| -------------- | ------ | --- | --- | --- | +|`FCS_Field_Size`| 0 or 1 | 2 | 4 | 8 | -When `Flag_Value` is `0`, `Field_Size` depends on `Single_Segment_flag` : +When `Flag_Value` is `0`, `FCS_Field_Size` depends on `Single_Segment_flag` : if `Single_Segment_flag` is set, `Field_Size` is 1. -Otherwise, `Field_Size` is 0 (content size not provided). +Otherwise, `Field_Size` is 0 : `Frame_Content_Size` is not provided. __`Single_Segment_flag`__ If this flag is set, data must be regenerated within a single continuous memory segment. -In this case, `Frame_Content_Size` is necessarily present, -but `Window_Descriptor` byte is skipped. +In this case, `Window_Descriptor` byte is skipped, +but `Frame_Content_Size` is necessarily present. As a consequence, the decoder must allocate a memory segment of size equal or bigger than `Frame_Content_Size`. In order to preserve the decoder from unreasonable memory requirements, -a decoder can reject a compressed frame +a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. -This is just a recommendation, +This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. @@ -224,37 +226,38 @@ It also specifies the size of this field as `Field_Size`. #### `Window_Descriptor` -Provides guarantees on maximum back-reference distance -that will be used within compressed data. +Provides guarantees on minimum memory buffer required to decompress a frame. This information is important for decoders to allocate enough memory. -The `Window_Descriptor` byte is optional. It is absent when `Single_Segment_flag` is set. -In this case, the maximum back-reference distance is the content size itself, -which can be any value from 1 to 2^64-1 bytes (16 EB). +The `Window_Descriptor` byte is optional. +When `Single_Segment_flag` is set, `Window_Descriptor` is not present. +In this case, `Window_Size` is `Frame_Content_Size`, +which can be any value from 0 to 2^64-1 bytes (16 ExaBytes). | Bit numbers | 7-3 | 2-0 | | ----------- | ---------- | ---------- | | Field name | `Exponent` | `Mantissa` | -Maximum distance is given by the following formulas : +The minimum memory buffer size is called `Window_Size`. +It is described by the following formulas : ``` windowLog = 10 + Exponent; windowBase = 1 << windowLog; windowAdd = (windowBase / 8) * Mantissa; Window_Size = windowBase + windowAdd; ``` -The minimum window size is 1 KB. -The maximum size is `15*(1<<38)` bytes, which is 1.875 TB. +The minimum `Window_Size` is 1 KB. +The maximum `Window_Size` is `(1<<41) + 7*(1<<38)` bytes, which is 3.75 TB. To properly decode compressed data, a decoder will need to allocate a buffer of at least `Window_Size` bytes. In order to preserve decoder from unreasonable memory requirements, -a decoder can refuse a compressed frame +a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. For improved interoperability, -decoders are recommended to be compatible with window sizes of 8 MB, +decoders are recommended to be compatible with `Window_Size >= 8 MB`, and encoders are recommended to not request more than 8 MB. It's merely a recommendation though, decoders are free to support larger or lower limits, @@ -264,112 +267,118 @@ depending on local limitations. This is a variable size field, which contains the ID of the dictionary required to properly decode the frame. -Note that this field is optional. When it's not present, +`Dictionary_ID` field is optional. When it's not present, it's up to the decoder to make sure it uses the correct dictionary. -Format is little-endian. Field size depends on `Dictionary_ID_flag`. 1 byte can represent an ID 0-255. 2 bytes can represent an ID 0-65535. 4 bytes can represent an ID 0-4294967295. +Format is __little-endian__. It's allowed to represent a small ID (for example `13`) -with a large 4-bytes dictionary ID, losing some compacity in the process. +with a large 4-bytes dictionary ID, even if it is less efficient. _Reserved ranges :_ If the frame is going to be distributed in a private environment, any dictionary ID can be used. However, for public distribution of compressed frames using a dictionary, -the following ranges are reserved for future use and should not be used : -- low range : 1 - 32767 -- high range : >= (2^31) - +the following ranges are reserved and shall not be used : +- low range : `<= 32767` +- high range : `>= (1 << 31)` #### `Frame_Content_Size` This is the original (uncompressed) size. This information is optional. -The `Field_Size` is provided according to value of `Frame_Content_Size_flag`. -The `Field_Size` can be equal to 0 (not present), 1, 2, 4 or 8 bytes. -Format is little-endian. +`Frame_Content_Size` uses a variable number of bytes, provided by `FCS_Field_Size`. +`FCS_Field_Size` is provided by the value of `Frame_Content_Size_flag`. +`FCS_Field_Size` can be equal to 0 (not present), 1, 2, 4 or 8 bytes. -| `Field_Size` | Range | -| ------------ | ---------- | -| 1 | 0 - 255 | -| 2 | 256 - 65791| -| 4 | 0 - 2^32-1 | -| 8 | 0 - 2^64-1 | +| `FCS_Field_Size` | Range | +| ---------------- | ---------- | +| 0 | unknown | +| 1 | 0 - 255 | +| 2 | 256 - 65791| +| 4 | 0 - 2^32-1 | +| 8 | 0 - 2^64-1 | -When `Field_Size` is 1, 4 or 8 bytes, the value is read directly. -When `Field_Size` is 2, _the offset of 256 is added_. +`Frame_Content_Size` format is __little-endian__. +When `FCS_Field_Size` is 1, 4 or 8 bytes, the value is read directly. +When `FCS_Field_Size` is 2, _the offset of 256 is added_. It's allowed to represent a small size (for example `18`) using any compatible variant. + Blocks ------- -After the magic number and header of each block, -there are some number of blocks. -Each frame must have at least one block but there is no upper limit -on the number of blocks per frame. + +After `Magic_Number` and `Frame_Header`, there are some number of blocks. +Each frame must have at least one block, +but there is no upper limit on the number of blocks per frame. The structure of a block is as follows: -| `Last_Block` | `Block_Type` | `Block_Size` | `Block_Content` | -|:------------:|:------------:|:------------:|:---------------:| -| 1 bit | 2 bits | 21 bits | n bytes | +| `Block_Header` | `Block_Content` | +|:--------------:|:---------------:| +| 3 bytes | n bytes | -The block header (`Last_Block`, `Block_Type`, and `Block_Size`) uses 3-bytes. +`Block_Header` uses 3 bytes, written using __little-endian__ convention. +It contains 3 fields : + +| `Last_Block` | `Block_Type` | `Block_Size` | +|:------------:|:------------:|:------------:| +| bit 0 | bits 1-2 | bits 3-23 | __`Last_Block`__ The lowest bit signals if this block is the last one. -The frame will end after this one. +The frame will end after this last block. It may be followed by an optional `Content_Checksum` (see [Zstandard Frames](#zstandard-frames)). -__`Block_Type` and `Block_Size`__ - -The next 2 bits represent the `Block_Type`, -while the remaining 21 bits represent the `Block_Size`. -Format is __little-endian__. +__`Block_Type`__ +The next 2 bits represent the `Block_Type`. There are 4 block types : -| Value | 0 | 1 | 2 | 3 | +| Value | 0 | 1 | 2 | 3 | | ------------ | ----------- | ----------- | ------------------ | --------- | | `Block_Type` | `Raw_Block` | `RLE_Block` | `Compressed_Block` | `Reserved`| - `Raw_Block` - this is an uncompressed block. - `Block_Content` contains `Block_Size` bytes to read and copy - as decoded data. + `Block_Content` contains `Block_Size` bytes. -- `RLE_Block` - this is a single byte, repeated N times. - `Block_Content` consists of a single byte, - and `Block_Size` is the number of times this byte should be repeated. +- `RLE_Block` - this is a single byte, repeated `Block_Size` times. + `Block_Content` consists of a single byte. + On the decompression side, this byte must be repeated `Block_Size` times. - `Compressed_Block` - this is a [Zstandard compressed block](#compressed-blocks), explained later on. `Block_Size` is the length of `Block_Content`, the compressed data. - The decompressed size is unknown, + The decompressed size is not known, but its maximum possible value is guaranteed (see below) - `Reserved` - this is not a block. This value cannot be used with current version of this specification. +__`Block_Size`__ + +The upper 21 bits of `Block_Header` represent the `Block_Size`. + Block sizes must respect a few rules : -- In compressed mode, compressed size is always strictly less than decompressed size. -- Block decompressed size is always <= maximum back-reference distance. +- For `Compressed_Block`, `Block_Size` is always strictly less than decompressed size. +- Block decompressed size is always <= `Window_Size` - Block decompressed size is always <= 128 KB. -A data block is not necessarily "full" : -since an arbitrary “flush” may happen anytime, -block decompressed content can be any size (even empty), +A block can contain any number of bytes (even empty), up to `Block_Maximum_Decompressed_Size`, which is the smallest of : -- Maximum back-reference distance +- `Window_Size` - 128 KB + Compressed Blocks ----------------- -To decompress a compressed block, the compressed size must be provided from -`Block_Size` field in the block header. +To decompress a compressed block, the compressed size must be provided +from `Block_Size` field within `Block_Header`. A compressed block consists of 2 sections : - [Literals Section](#literals-section) @@ -381,36 +390,34 @@ data in [Sequence Execution](#sequence-execution) #### Prerequisites To decode a compressed block, the following elements are necessary : - Previous decoded data, up to a distance of `Window_Size`, - or all previous data when `Single_Segment_flag` is set. -- List of "recent offsets" from the previous compressed block. -- Decoding tables of the previous compressed block for each symbol type + or all previously decoded data when `Single_Segment_flag` is set. +- List of "recent offsets" from previous `Compressed_Block`. +- Decoding tables of previous `Compressed_Block` for each symbol type (literals, literals lengths, match lengths, offsets). Literals Section ---------------- -During sequence execution, symbols from the literals section -During sequence phase, literals will be entangled with match copy operations. All literals are regrouped in the first part of the block. -They can be decoded first, and then copied during sequence operations, -or they can be decoded on the flow, as needed by sequence commands. - -| `Literals_Section_Header` | [`Huffman_Tree_Description`] | Stream1 | [Stream2] | [Stream3] | [Stream4] | -| ------------------------- | ---------------------------- | ------- | --------- | --------- | --------- | +They can be decoded first, and then copied during [Sequence Execution], +or they can be decoded on the flow during [Sequence Execution]. Literals can be stored uncompressed or compressed using Huffman prefix codes. When compressed, an optional tree description can be present, followed by 1 or 4 streams. +| `Literals_Section_Header` | [`Huffman_Tree_Description`] | Stream1 | [Stream2] | [Stream3] | [Stream4] | +| ------------------------- | ---------------------------- | ------- | --------- | --------- | --------- | + #### `Literals_Section_Header` Header is in charge of describing how literals are packed. It's a byte-aligned variable-size bitfield, ranging from 1 to 5 bytes, -using little-endian convention. +using __little-endian__ convention. | `Literals_Block_Type` | `Size_Format` | `Regenerated_Size` | [`Compressed_Size`] | -| --------------------- | ------------- | ------------------ | ----------------- | -| 2 bits | 1 - 2 bits | 5 - 20 bits | 0 - 18 bits | +| --------------------- | ------------- | ------------------ | ------------------- | +| 2 bits | 1 - 2 bits | 5 - 20 bits | 0 - 18 bits | In this representation, bits on the left are the lowest bits. @@ -418,33 +425,38 @@ __`Literals_Block_Type`__ This field uses 2 lowest bits of first byte, describing 4 different block types : -| `Literals_Block_Type` | Value | -| ----------------------------- | ----- | -| `Raw_Literals_Block` | 0 | -| `RLE_Literals_Block` | 1 | -| `Compressed_Literals_Block` | 2 | -| `Repeat_Stats_Literals_Block` | 3 | +| `Literals_Block_Type` | Value | +| --------------------------- | ----- | +| `Raw_Literals_Block` | 0 | +| `RLE_Literals_Block` | 1 | +| `Compressed_Literals_Block` | 2 | +| `Treeless_Literals_Block` | 3 | - `Raw_Literals_Block` - Literals are stored uncompressed. -- `RLE_Literals_Block` - Literals consist of a single byte value repeated N times. +- `RLE_Literals_Block` - Literals consist of a single byte value + repeated `Regenerated_Size` times. - `Compressed_Literals_Block` - This is a standard Huffman-compressed block, starting with a Huffman tree description. See details below. -- `Repeat_Stats_Literals_Block` - This is a Huffman-compressed block, +- `Treeless_Literals_Block` - This is a Huffman-compressed block, using Huffman tree _from previous Huffman-compressed literals block_. - Huffman tree description will be skipped. - Note: If this mode is used without any previous Huffman-table in the frame - (or [dictionary](#dictionary-format)), this should be treated as corruption. + `Huffman_Tree_Description` will be skipped. + Note: If this mode is triggered without any previous Huffman-table in the frame + (or [dictionary](#dictionary-format)), this should be treated as data corruption. __`Size_Format`__ `Size_Format` is divided into 2 families : -- For `Raw_Literals_Block` and `RLE_Literals_Block` it's enough to decode `Regenerated_Size`. -- For `Compressed_Block`, its required to decode both `Compressed_Size` - and `Regenerated_Size` (the decompressed size). It will also decode the number of streams. +- For `Raw_Literals_Block` and `RLE_Literals_Block`, + it's only necessary to decode `Regenerated_Size`. + There is no `Compressed_Size` field. +- For `Compressed_Block` and `Treeless_Literals_Block`, + it's required to decode both `Compressed_Size` + and `Regenerated_Size` (the decompressed size). + It's also necessary to decode the number of streams (1 or 4). -For values spanning several bytes, convention is little-endian. +For values spanning several bytes, convention is __little-endian__. __`Size_Format` for `Raw_Literals_Block` and `RLE_Literals_Block`__ : @@ -463,9 +475,9 @@ __`Size_Format` for `Raw_Literals_Block` and `RLE_Literals_Block`__ : Only Stream1 is present for these cases. Note : it's allowed to represent a short value (for example `13`) -using a long format, accepting the increased compressed data size. +using a long format, even if it's less efficient. -__`Size_Format` for `Compressed_Literals_Block` and `Repeat_Stats_Literals_Block`__ : +__`Size_Format` for `Compressed_Literals_Block` and `Treeless_Literals_Block`__ : - Value 00 : _A single stream_. Both `Regenerated_Size` and `Compressed_Size` use 10 bits (0-1023). @@ -480,67 +492,68 @@ __`Size_Format` for `Compressed_Literals_Block` and `Repeat_Stats_Literals_Block Both `Regenerated_Size` and `Compressed_Size` use 18 bits (0-262143). `Literals_Section_Header` has 5 bytes. -Both `Compressed_Size` and `Regenerated_Size` fields follow little-endian convention. -Note: `Compressed_Size` __includes__ the size of the Huffman Tree description if it -is present. +Both `Compressed_Size` and `Regenerated_Size` fields follow __little-endian__ convention. +Note: `Compressed_Size` __includes__ the size of the Huffman Tree description +_when_ it is present. ### Raw Literals Block -The data in Stream1 is `Regenerated_Size` bytes long, and contains the raw literals data -to be used in sequence execution. +The data in Stream1 is `Regenerated_Size` bytes long, +it contains the raw literals data to be used during [Sequence Execution]. ### RLE Literals Block Stream1 consists of a single byte which should be repeated `Regenerated_Size` times to generate the decoded literals. -### Compressed Literals Block and Repeat Stats Literals Block -Both of these modes contain Huffman encoded data +### Compressed Literals Block and Treeless Literals Block +Both of these modes contain Huffman encoded data. +`Treeless_Literals_Block` does not have a `Huffman_Tree_Description`. #### `Huffman_Tree_Description` This section is only present when `Literals_Block_Type` type is `Compressed_Literals_Block` (`2`). The format of the Huffman tree description can be found at [Huffman Tree description](#huffman-tree-description). -The size Huffman Tree description will be determined during the decoding process, -and must be used to determine where the compressed Huffman streams begin. +The size of `Huffman_Tree_Description` is determined during decoding process, +it must be used to determine where streams begin. +`Total_Streams_Size = Compressed_Size - Huffman_Tree_Description_Size`. -If repeat stats mode is used, the Huffman table used in the previous compressed block will -be used to decompress this block as well. +For `Treeless_Literals_Block`, +the Huffman table comes from previously compressed literals block. -Huffman compressed data consists either 1 or 4 Huffman-coded streams. +Huffman compressed data consists of either 1 or 4 Huffman-coded streams. If only one stream is present, it is a single bitstream occupying the entire -remaining portion of the literals block, encoded as described at +remaining portion of the literals block, encoded as described within [Huffman-Coded Streams](#huffman-coded-streams). If there are four streams, the literals section header only provides enough -information to know the regenerated and compressed sizes of all four streams combined. -The regenerated size of each stream is equal to `(totalSize+3)/4`, except for the last stream, -which may be up to 3 bytes smaller, to reach a total decompressed size match that described -in the literals header. +information to know the decompressed and compressed sizes of all four streams _combined_. +The decompressed size of each stream is equal to `(Regenerated_Size+3)/4`, +except for the last stream which may be up to 3 bytes smaller, +to reach a total decompressed size as specified in `Regenerated_Size`. -The compressed size of each stream is provided explicitly: the first 6 bytes of the compressed -data consist of three 2-byte little endian fields, describing the compressed sizes -of the first three streams. -The last streams size is computed from the total compressed size and the size of the other -three streams. +The compressed size of each stream is provided explicitly: +the first 6 bytes of the compressed data consist of three 2-byte __little-endian__ fields, +describing the compressed sizes of the first three streams. +`Stream4_Size` is computed from total `Total_Streams_Size` minus sizes of other streams. -`stream4CSize = totalCSize - 6 - stream1CSize - stream2CSize - stream3CSize`. +`Stream4_Size = Total_Streams_Size - 6 - Stream1_Size - Stream2_Size - Stream3_Size`. -Note: remember that totalCSize may be smaller than the `Compressed_Size` found in the literals -block header as `Compressed_Size` also contains the size of the Huffman Tree description if it -is present. +Note: remember that `Total_Streams_Size` can be smaller than `Compressed_Size` in header, +because `Compressed_Size` also contains `Huffman_Tree_Description_Size` when it is present. Each of these 4 bitstreams is then decoded independently as a Huffman-Coded stream, as described at [Huffman-Coded Streams](#huffman-coded-streams) + Sequences Section ----------------- A compressed block is a succession of _sequences_ . A sequence is a literal copy command, followed by a match copy command. A literal copy command specifies a length. -It is the number of bytes to be copied (or extracted) from the literal section. +It is the number of bytes to be copied (or extracted) from the Literals Section. A match copy command specifies an offset and a length. When all _sequences_ are decoded, -if there is are any literals left in the _literal section_, +if there are literals left in the _literal section_, these bytes are added at the end of the block. This is described in more detail in [Sequence Execution](#sequence-execution) @@ -557,7 +570,7 @@ followed by the bitstream. | -------------------------- | ------------------------- | ---------------- | ---------------------- | --------- | To decode the `Sequences_Section`, it's required to know its size. -This size is deduced from `blockSize - literalSectionSize`. +This size is deduced from `Block_Size - Literals_Section_Size`. #### `Sequences_Section_Header` @@ -572,7 +585,7 @@ This is a variable size field using between 1 and 3 bytes. Let's call its first byte `byte0`. - `if (byte0 == 0)` : there are no sequences. The sequence section stops there. - Regenerated content is defined entirely by literals section. + Decompressed content is defined entirely as Literals Section content. - `if (byte0 < 128)` : `Number_of_Sequences = byte0` . Uses 1 byte. - `if (byte0 < 255)` : `Number_of_Sequences = ((byte0-128) << 8) + byte1` . Uses 2 bytes. - `if (byte0 == 255)`: `Number_of_Sequences = byte1 + (byte2<<8) + 0x7F00` . Uses 3 bytes. @@ -581,14 +594,14 @@ __Symbol compression modes__ This is a single byte, defining the compression mode of each symbol type. -|Bit number| 7-6 | 5-4 | 3-2 | 1-0 | +|Bit number| 7-6 | 5-4 | 3-2 | 1-0 | | -------- | ----------------------- | -------------- | -------------------- | ---------- | |Field name| `Literals_Lengths_Mode` | `Offsets_Mode` | `Match_Lengths_Mode` | `Reserved` | The last field, `Reserved`, must be all-zeroes. `Literals_Lengths_Mode`, `Offsets_Mode` and `Match_Lengths_Mode` define the `Compression_Mode` of -literals lengths, offsets, and match lengths respectively. +literals lengths, offsets, and match lengths symbols respectively. They follow the same enumeration : @@ -598,17 +611,17 @@ They follow the same enumeration : - `Predefined_Mode` : A predefined FSE distribution table is used, defined in [default distributions](#default-distributions). - The table takes no space in the compressed data. + No distribution table will be present. - `RLE_Mode` : The table description consists of a single byte. - This code will be repeated for every sequence. + This code will be repeated for all sequences. - `Repeat_Mode` : The table used in the previous compressed block will be used again. No distribution table will be present. - Note: this includes RLE mode, so if repeat_mode follows rle_mode the same symbol will be repeated. + Note: this includes RLE mode, so if `Repeat_Mode` follows `RLE_Mode`, the same symbol will be repeated. If this mode is used without any previous sequence table in the frame (or [dictionary](#dictionary-format)) to repeat, this should be treated as corruption. - `FSE_Compressed_Mode` : standard FSE compression. A distribution table will be present. - The format of this distribution table is described in (FSE Table Description)[#fse-table-description]. + The format of this distribution table is described in [FSE Table Description](#fse-table-description). Note that the maximum allowed accuracy log for literals length and match length tables is 9, and the maximum accuracy log for the offsets table is 8. @@ -625,7 +638,7 @@ Literals length codes are values ranging from `0` to `35` included. They define lengths from 0 to 131071 bytes. The literals length is equal to the decoded `Baseline` plus the result of reading `Number_of_Bits` bits from the bitstream, -as a little-endian value. +as a __little-endian__ value. | `Literals_Length_Code` | 0-15 | | ---------------------- | ---------------------- | @@ -654,7 +667,7 @@ Match length codes are values ranging from `0` to `52` included. They define lengths from 3 to 131074 bytes. The match length is equal to the decoded `Baseline` plus the result of reading `Number_of_Bits` bits from the bitstream, -as a little-endian value. +as a __little-endian__ value. | `Match_Length_Code` | 0-31 | | ------------------- | ----------------------- | @@ -685,7 +698,7 @@ Recommendation is to support at least up to `22`. For information, at the time of this writing. the reference decoder supports a maximum `N` value of `28` in 64-bits mode. -An offset code is also the number of additional bits to read in little-endian fashion, +An offset code is also the number of additional bits to read in __little-endian__ fashion, and can be translated into an `Offset_Value` using the following formulas : ``` @@ -720,8 +733,8 @@ begins. FSE decoding requires a 'state' to be carried from symbol to symbol. For more explanation on FSE decoding, see the [FSE section](#fse). -For sequence decoding, a separate state must be kept track of for each of -literal lengths, offsets, and match lengths. +For sequence decoding, a separate state keeps track of each +literal lengths, offsets, and match lengths symbols. Some FSE primitives are also used. For more details on the operation of these primitives, see the [FSE section](#fse). @@ -753,8 +766,7 @@ See the [description of the codes] for how to determine these values. [description of the codes]: #the-codes-for-literals-lengths-match-lengths-and-offsets Decoding starts by reading the `Number_of_Bits` required to decode `Offset`. -It then does the same for `Match_Length`, -and then for `Literals_Length`. +It then does the same for `Match_Length`, and then for `Literals_Length`. This sequence is then used for [sequence execution](#sequence-execution). If it is not the last sequence in the block, @@ -807,6 +819,7 @@ short offsetCodes_defaultDistribution[29] = 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1 }; ``` + Sequence Execution ------------------ Once literals and sequences have been decoded, @@ -826,7 +839,8 @@ in this case. The offset is defined as from the current position, so an offset of 6 and a match length of 3 means that 3 bytes should be copied from 6 bytes back. -Note that all offsets must be at most equal to the window size defined by the frame header. +Note that all offsets leading to previously decoded data +must be smaller than `Window_Size` defined in `Frame_Header_Descriptor`. #### Repeat offsets As seen in [Sequence Execution](#sequence-execution), @@ -842,11 +856,10 @@ so an `offset_value` of 1 means `Repeated_Offset2`, an `offset_value` of 2 means `Repeated_Offset3`, and an `offset_value` of 3 means `Repeated_Offset1 - 1_byte`. -In the first block, the offset history is populated with the following values : 1, 4 and 8 (in order). +For the first block, the starting offset history is populated with the following values : 1, 4 and 8 (in order). -Then each block gets its starting offset history from the ending values of the most recent compressed block. -Note that non-compressed blocks are skipped, -they do not contribute to offset history. +Then each block gets its starting offset history from the ending values of the most recent `Compressed_Block`. +Note that blocks which are not `Compressed_Block` are skipped, they do not contribute to offset history. [Offset Codes]: #offset-codes @@ -859,6 +872,7 @@ This means that when `Repeated_Offset1` (most recent) is used, history is unmodi When `Repeated_Offset2` is used, it's swapped with `Repeated_Offset1`. If any other offset is used, it becomes `Repeated_Offset1` and the rest are shift back by one. + Skippable Frames ---------------- @@ -878,7 +892,7 @@ Skippable frames defined in this specification are compatible with [LZ4] ones. __`Magic_Number`__ -4 Bytes, little-endian format. +4 Bytes, __little-endian__ format. Value : 0x184D2A5?, which means any value from 0x184D2A50 to 0x184D2A5F. All 16 values are valid to identify a skippable frame. @@ -886,13 +900,14 @@ __`Frame_Size`__ This is the size, in bytes, of the following `User_Data` (without including the magic number nor the size field itself). -This field is represented using 4 Bytes, little-endian format, unsigned 32-bits. +This field is represented using 4 Bytes, __little-endian__ format, unsigned 32-bits. This means `User_Data` can’t be bigger than (2^32-1) bytes. __`User_Data`__ The `User_Data` can be anything. Data will just be skipped by the decoder. + Entropy Encoding ---------------- Two types of entropy encoding are used by the Zstandard format: @@ -900,7 +915,7 @@ FSE, and Huffman coding. FSE --- -FSE, or FiniteStateEntropy is an entropy coding based on [ANS]. +FSE, short for Finite State Entropy, is an entropy codec based on [ANS]. FSE encoding/decoding involves a state that is carried over between symbols, so decoding must be done in the opposite direction as encoding. Therefore, all FSE bitstreams are read from end to beginning. @@ -909,15 +924,15 @@ For additional details on FSE, see [Finite State Entropy]. [Finite State Entropy]:https://github.com/Cyan4973/FiniteStateEntropy/ -FSE decoding involves a decoding table which has a power of 2 size and three elements: +FSE decoding involves a decoding table which has a power of 2 size, and contain three elements: `Symbol`, `Num_Bits`, and `Baseline`. The `log2` of the table size is its `Accuracy_Log`. The FSE state represents an index in this table. -The next symbol in the stream is the symbol indicated by the table value for that state. -To obtain the next state value, -the decoder should consume `Num_Bits` bits from the stream as a little endian value and add it to baseline. -To obtain the initial state value, consume `Accuracy_Log` bits from the stream as a little endian value. +To obtain the initial state value, consume `Accuracy_Log` bits from the stream as a __little-endian__ value. +The next symbol in the stream is the `Symbol` indicated in the table for that state. +To obtain the next state value, +the decoder should consume `Num_Bits` bits from the stream as a __little-endian__ value and add it to `Baseline`. [ANS]: https://en.wikipedia.org/wiki/Asymmetric_Numeral_Systems @@ -929,7 +944,7 @@ An FSE distribution table describes the probabilities of all symbols from `0` to the last present one (included) on a normalized scale of `1 << Accuracy_Log` . -It's a bitstream which is read forward, in little-endian fashion. +It's a bitstream which is read forward, in __little-endian__ fashion. It's not necessary to know its exact size, since it will be discovered and reported by the decoding process. @@ -1064,7 +1079,7 @@ Huffman Coding -------------- Zstandard Huffman-coded streams are read backwards, similar to the FSE bitstreams. -Therefore, to find the start of the bitstream it is therefore necessary to +Therefore, to find the start of the bitstream, it is therefore to know the offset of the last byte of the Huffman-coded stream. After writing the last bit containing information, the compressor @@ -1077,7 +1092,7 @@ byte to read. The decompressor needs to skip 0-7 initial `0`-bits and the first `1`-bit it occurs. Afterwards, the useful part of the bitstream begins. -The bitstream contains Huffman-coded symbols in little-endian order, +The bitstream contains Huffman-coded symbols in __little-endian__ order, with the codes defined by the method below. ### Huffman Tree Description @@ -1182,14 +1197,14 @@ The Huffman header compression uses 2 states, which share the same FSE distribution table. The first state (`State1`) encodes the even indexed symbols, and the second (`State2`) encodes the odd indexes. -State1 is initialized first, and then State2, and they take turns decoding -a single symbol and updating their state. +`State1` is initialized first, and then `State2`, and they take turns +decoding a single symbol and updating their state. For more details on these FSE operations, see the [FSE section](#fse). The number of symbols to decode is determined by tracking bitStream overflow condition: If updating state after decoding a symbol would require more bits than -remain in the stream, it is assumed the extra bits are 0. Then, +remain in the stream, it is assumed that extra bits are 0. Then, the symbols for each of the final states are decoded and the process is complete. ##### Conversion from weights to Huffman prefix codes @@ -1245,7 +1260,7 @@ it would be encoded as: |Encoding|`0000`|`0001`|`01`|`1`| `10000` | Starting from the end, -it's possible to read the bitstream in a little-endian fashion, +it's possible to read the bitstream in a __little-endian__ fashion, keeping track of already used bits. Since the bitstream is encoded in reverse order, by starting at the end the symbols can be read in forward order. @@ -1258,13 +1273,14 @@ If a bitstream is not entirely and exactly consumed, hence reaching exactly its beginning position with _all_ bits consumed, the decoding process is considered faulty. + Dictionary Format ----------------- -Zstandard is compatible with "raw content" dictionaries, free of any format restriction, -except that they must be at least 8 bytes. -These dictionaries function as if they were just the `Content` block of a formatted -dictionary. +Zstandard is compatible with "raw content" dictionaries, +free of any format restriction, except that they must be at least 8 bytes. +These dictionaries function as if they were just the `Content` part +of a formatted dictionary. But dictionaries created by `zstd --train` follow a format, described here. @@ -1274,9 +1290,9 @@ __Pre-requisites__ : a dictionary has a size, | `Magic_Number` | `Dictionary_ID` | `Entropy_Tables` | `Content` | | -------------- | --------------- | ---------------- | --------- | -__`Magic_Number`__ : 4 bytes ID, value 0xEC30A437, little-endian format +__`Magic_Number`__ : 4 bytes ID, value 0xEC30A437, __little-endian__ format -__`Dictionary_ID`__ : 4 bytes, stored in little-endian format. +__`Dictionary_ID`__ : 4 bytes, stored in __little-endian__ format. `Dictionary_ID` can be any value, except 0 (which means no `Dictionary_ID`). It's used by decoders to check if they use the correct dictionary. @@ -1284,9 +1300,9 @@ _Reserved ranges :_ If the frame is going to be distributed in a private environment, any `Dictionary_ID` can be used. However, for public distribution of compressed frames, - the following ranges are reserved for future use and should not be used : + the following ranges are reserved and shall not be used : - - low range : 1 - 32767 + - low range : <= 32767 - high range : >= (2^31) __`Entropy_Tables`__ : following the same format as the tables in compressed blocks. @@ -1298,26 +1314,30 @@ __`Entropy_Tables`__ : following the same format as the tables in compressed blo These tables populate the Repeat Stats literals mode and Repeat distribution mode for sequence decoding. It's finally followed by 3 offset values, populating recent offsets (instead of using `{1,4,8}`), - stored in order, 4-bytes little-endian each, for a total of 12 bytes. + stored in order, 4-bytes __little-endian__ each, for a total of 12 bytes. Each recent offset must have a value < dictionary size. __`Content`__ : The rest of the dictionary is its content. The content act as a "past" in front of data to compress or decompress, so it can be referenced in sequence commands. As long as the amount of data decoded from this frame is less than or - equal to the window-size, sequence commands may specify offsets longer - than the lenght of total decoded output so far to reference back to the - dictionary. After the total output has surpassed the window size however, + equal to `Window_Size`, sequence commands may specify offsets longer + than the total length of decoded output so far to reference back to the + dictionary. After the total output has surpassed `Window_Size` however, this is no longer allowed and the dictionary is no longer accessible. [compressed blocks]: #the-format-of-compressed_block + + Appendix A - Decoding tables for predefined codes ------------------------------------------------- -This appendix contains FSE decoding tables for the predefined literal length, match length, and offset -codes. The tables have been constructed using the algorithm as given above in the -"from normalized distribution to decoding tables" chapter. The tables here can be used as examples -to crosscheck that an implementation implements the decoding table generation algorithm correctly. +This appendix contains FSE decoding tables +for the predefined literal length, match length, and offset codes. +The tables have been constructed using the algorithm as given above in chapter +"from normalized distribution to decoding tables". +The tables here can be used as examples +to crosscheck that an implementation build its decoding tables correctly. #### Literal Length Code: @@ -1496,6 +1516,7 @@ to crosscheck that an implementation implements the decoding table generation al Version changes --------------- +- 0.2.5 : minor typos and clarifications - 0.2.4 : section restructuring, by Sean Purcell - 0.2.3 : clarified several details, by Sean Purcell - 0.2.2 : added predefined codes, by Johannes Rudolph diff --git a/doc/zstd_manual.html b/doc/zstd_manual.html index 8be0e6863..59d146874 100644 --- a/doc/zstd_manual.html +++ b/doc/zstd_manual.html @@ -1,10 +1,10 @@ -zstd 1.1.4 Manual +zstd 1.3.0 Manual -

zstd 1.1.4 Manual

+

zstd 1.3.0 Manual


Contents

    @@ -19,8 +19,8 @@
  1. Streaming decompression - HowTo
  2. START OF ADVANCED AND EXPERIMENTAL FUNCTIONS
  3. Advanced types
  4. -
  5. Compressed size functions
  6. -
  7. Decompressed size functions
  8. +
  9. Frame size functions
  10. +
  11. Context memory usage
  12. Advanced compression functions
  13. Advanced decompression functions
  14. Advanced streaming functions
  15. @@ -55,48 +55,48 @@

    Simple API

    
     
     
    size_t ZSTD_compress( void* dst, size_t dstCapacity,
    -                            const void* src, size_t srcSize,
    -                                  int compressionLevel);
    -

    Compresses `src` content as a single zstd compressed frame into already allocated `dst`. - Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`. - @return : compressed size written into `dst` (<= `dstCapacity), - or an error code if it fails (which can be tested using ZSTD_isError()). + const void* src, size_t srcSize, + int compressionLevel); +

    Compresses `src` content as a single zstd compressed frame into already allocated `dst`. + Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`. + @return : compressed size written into `dst` (<= `dstCapacity), + or an error code if it fails (which can be tested using ZSTD_isError()).


    size_t ZSTD_decompress( void* dst, size_t dstCapacity,
    -                              const void* src, size_t compressedSize);
    -

    `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames. - `dstCapacity` is an upper bound of originalSize. - If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data. - @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), - or an errorCode if it fails (which can be tested using ZSTD_isError()). + const void* src, size_t compressedSize); +

    `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames. + `dstCapacity` is an upper bound of originalSize. + If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data. + @return : the number of bytes decompressed into `dst` (<= `dstCapacity`), + or an errorCode if it fails (which can be tested using ZSTD_isError()).


    unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
    -

    NOTE: This function is planned to be obsolete, in favour of ZSTD_getFrameContentSize. - ZSTD_getFrameContentSize functions the same way, returning the decompressed size of a single - frame, but distinguishes empty frames from frames with an unknown size, or errors. +

    NOTE: This function is planned to be obsolete, in favour of ZSTD_getFrameContentSize. + ZSTD_getFrameContentSize functions the same way, returning the decompressed size of a single + frame, but distinguishes empty frames from frames with an unknown size, or errors. - Additionally, ZSTD_findDecompressedSize can be used instead. It can handle multiple - concatenated frames in one buffer, and so is more general. - As a result however, it requires more computation and entire frames to be passed to it, - as opposed to ZSTD_getFrameContentSize which requires only a single frame's header. + Additionally, ZSTD_findDecompressedSize can be used instead. It can handle multiple + concatenated frames in one buffer, and so is more general. + As a result however, it requires more computation and entire frames to be passed to it, + as opposed to ZSTD_getFrameContentSize which requires only a single frame's header. - 'src' is the start of a zstd compressed frame. - @return : content size to be decompressed, as a 64-bits value _if known_, 0 otherwise. - note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode. - When `return==0`, data to decompress could be any size. - In which case, it's necessary to use streaming mode to decompress data. - Optionally, application can still use ZSTD_decompress() while relying on implied limits. - (For example, data may be necessarily cut into blocks <= 16 KB). - note 2 : decompressed size is always present when compression is done with ZSTD_compress() - note 3 : decompressed size can be very large (64-bits value), - potentially larger than what local system can handle as a single memory segment. - In which case, it's necessary to use streaming mode to decompress data. - note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified. - Always ensure result fits within application's authorized limits. - Each application can set its own limits. - note 5 : when `return==0`, if precise failure cause is needed, use ZSTD_getFrameParams() to know more. + 'src' is the start of a zstd compressed frame. + @return : content size to be decompressed, as a 64-bits value _if known_, 0 otherwise. + note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode. + When `return==0`, data to decompress could be any size. + In which case, it's necessary to use streaming mode to decompress data. + Optionally, application can still use ZSTD_decompress() while relying on implied limits. + (For example, data may be necessarily cut into blocks <= 16 KB). + note 2 : decompressed size is always present when compression is done with ZSTD_compress() + note 3 : decompressed size can be very large (64-bits value), + potentially larger than what local system can handle as a single memory segment. + In which case, it's necessary to use streaming mode to decompress data. + note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified. + Always ensure result fits within application's authorized limits. + Each application can set its own limits. + note 5 : when `return==0`, if precise failure cause is needed, use ZSTD_getFrameParams() to know more.


    Helper functions

    int         ZSTD_maxCLevel(void);               /*!< maximum compression level available */
    @@ -106,42 +106,46 @@ const char* ZSTD_getErrorName(size_t code);     /*!< provides readable strin
     

    Explicit memory management

    
     
    -

    Compression context

       When compressing many times,
    -   it is recommended to allocate a context just once, and re-use it for each successive compression operation.
    -   This will make workload friendlier for system's memory.
    -   Use one context per thread for parallel execution in multi-threaded environments. 
    +

    Compression context

      When compressing many times,
    +  it is recommended to allocate a context just once, and re-use it for each successive compression operation.
    +  This will make workload friendlier for system's memory.
    +  Use one context per thread for parallel execution in multi-threaded environments. 
     
    typedef struct ZSTD_CCtx_s ZSTD_CCtx;
     ZSTD_CCtx* ZSTD_createCCtx(void);
     size_t     ZSTD_freeCCtx(ZSTD_CCtx* cctx);
     

    size_t ZSTD_compressCCtx(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel);
    -

    Same as ZSTD_compress(), requires an allocated ZSTD_CCtx (see ZSTD_createCCtx()). +

    Same as ZSTD_compress(), requires an allocated ZSTD_CCtx (see ZSTD_createCCtx()).


    -

    Decompression context

    typedef struct ZSTD_DCtx_s ZSTD_DCtx;
    +

    Decompression context

      When decompressing many times,
    +  it is recommended to allocate a context just once, and re-use it for each successive compression operation.
    +  This will make workload friendlier for system's memory.
    +  Use one context per thread for parallel execution in multi-threaded environments. 
    +
    typedef struct ZSTD_DCtx_s ZSTD_DCtx;
     ZSTD_DCtx* ZSTD_createDCtx(void);
     size_t     ZSTD_freeDCtx(ZSTD_DCtx* dctx);
     

    size_t ZSTD_decompressDCtx(ZSTD_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
    -

    Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx (see ZSTD_createDCtx()). +

    Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx (see ZSTD_createDCtx()).


    Simple dictionary API

    
     
     
    size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
    -                                           void* dst, size_t dstCapacity,
    -                                     const void* src, size_t srcSize,
    -                                     const void* dict,size_t dictSize,
    -                                           int compressionLevel);
    +                               void* dst, size_t dstCapacity,
    +                         const void* src, size_t srcSize,
    +                         const void* dict,size_t dictSize,
    +                               int compressionLevel);
     

    Compression using a predefined Dictionary (see dictBuilder/zdict.h). Note : This function loads the dictionary, resulting in significant startup delay. Note : When `dict == NULL || dictSize < 8` no dictionary is used.


    size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
    -                                             void* dst, size_t dstCapacity,
    -                                       const void* src, size_t srcSize,
    -                                       const void* dict,size_t dictSize);
    +                                 void* dst, size_t dstCapacity,
    +                           const void* src, size_t srcSize,
    +                           const void* dict,size_t dictSize);
     

    Decompression using a predefined Dictionary (see dictBuilder/zdict.h). Dictionary must be identical to the one used during compression. Note : This function loads the dictionary, resulting in significant startup delay. @@ -162,12 +166,13 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);


    size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
    -                                            void* dst, size_t dstCapacity,
    -                                      const void* src, size_t srcSize,
    -                                      const ZSTD_CDict* cdict);
    -

    Compression using a digested Dictionary. - Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. - Note that compression level is decided during dictionary creation. + void* dst, size_t dstCapacity, + const void* src, size_t srcSize, + const ZSTD_CDict* cdict); +

    Compression using a digested Dictionary. + Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times. + Note that compression level is decided during dictionary creation. + Frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no)


    ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
    @@ -180,9 +185,9 @@ size_t     ZSTD_freeDCtx(ZSTD_DCtx* dctx);
     


    size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
    -                                              void* dst, size_t dstCapacity,
    -                                        const void* src, size_t srcSize,
    -                                        const ZSTD_DDict* ddict);
    +                                  void* dst, size_t dstCapacity,
    +                            const void* src, size_t srcSize,
    +                            const ZSTD_DDict* ddict);
     

    Decompression using a digested Dictionary. Faster startup than ZSTD_decompress_usingDict(), recommended when same dictionary is used multiple times.


    @@ -239,6 +244,16 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
    +
    typedef ZSTD_CCtx ZSTD_CStream;  /**< CCtx and CStream are effectively same object */
    +

    +

    ZSTD_CStream management functions

    ZSTD_CStream* ZSTD_createCStream(void);
    +size_t ZSTD_freeCStream(ZSTD_CStream* zcs);
    +

    +

    Streaming compression functions

    size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
    +size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
    +size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
    +size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
    +

    size_t ZSTD_CStreamInSize(void);    /**< recommended size for input buffer */
     

    size_t ZSTD_CStreamOutSize(void);   /**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block in all circumstances. */
    @@ -264,6 +279,12 @@ size_t     ZSTD_freeDCtx(ZSTD_DCtx* dctx);
      
     
    +

    ZSTD_DStream management functions

    ZSTD_DStream* ZSTD_createDStream(void);
    +size_t ZSTD_freeDStream(ZSTD_DStream* zds);
    +

    +

    Streaming decompression functions

    size_t ZSTD_initDStream(ZSTD_DStream* zds);
    +size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
    +

    size_t ZSTD_DStreamInSize(void);    /*!< recommended size for input buffer */
     

    size_t ZSTD_DStreamOutSize(void);   /*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */
    @@ -300,70 +321,101 @@ size_t     ZSTD_freeDCtx(ZSTD_DCtx* dctx);
         ZSTD_frameParameters fParams;
     } ZSTD_parameters;
     

    +
    typedef struct {
    +    unsigned long long frameContentSize;
    +    unsigned windowSize;
    +    unsigned dictID;
    +    unsigned checksumFlag;
    +} ZSTD_frameHeader;
    +

    Custom memory allocation functions

    typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
     typedef void  (*ZSTD_freeFunction) (void* opaque, void* address);
     typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
     

    -

    Compressed size functions

    
    +

    Frame size functions

    
     
     
    size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
     

    `src` should point to the start of a ZSTD encoded frame or skippable frame `srcSize` must be at least as large as the frame - @return : the compressed size of the frame pointed to by `src`, suitable to pass to - `ZSTD_decompress` or similar, or an error code if given invalid input. + @return : the compressed size of the frame pointed to by `src`, + suitable to pass to `ZSTD_decompress` or similar, + or an error code if given invalid input.


    -

    Decompressed size functions

    
    -
    -
    unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
    -

    `src` should point to the start of a ZSTD encoded frame - `srcSize` must be at least as large as the frame header. A value greater than or equal - to `ZSTD_frameHeaderSize_max` is guaranteed to be large enough in all cases. - @return : decompressed size of the frame pointed to be `src` if known, otherwise - - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined - - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) +

    #define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
    +#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
    +unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
    +

    `src` should point to the start of a ZSTD encoded frame. + `srcSize` must be at least as large as the frame header. + A value >= `ZSTD_frameHeaderSize_max` is guaranteed to be large enough. + @return : - decompressed size of the frame pointed to be `src` if known + - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined + - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)


    unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
    -

    `src` should point the start of a series of ZSTD encoded and/or skippable frames - `srcSize` must be the _exact_ size of this series +

    `src` should point the start of a series of ZSTD encoded and/or skippable frames + `srcSize` must be the _exact_ size of this series (i.e. there should be a frame boundary exactly `srcSize` bytes after `src`) - @return : the decompressed size of all data in the contained frames, as a 64-bit value _if known_ - - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN - - if an error occurred: ZSTD_CONTENTSIZE_ERROR + @return : - decompressed size of all data in all successive frames + - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN + - if an error occurred: ZSTD_CONTENTSIZE_ERROR - note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode. - When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. - In which case, it's necessary to use streaming mode to decompress data. - Optionally, application can still use ZSTD_decompress() while relying on implied limits. - (For example, data may be necessarily cut into blocks <= 16 KB). - note 2 : decompressed size is always present when compression is done with ZSTD_compress() - note 3 : decompressed size can be very large (64-bits value), - potentially larger than what local system can handle as a single memory segment. - In which case, it's necessary to use streaming mode to decompress data. - note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified. - Always ensure result fits within application's authorized limits. - Each application can set its own limits. - note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to - read each contained frame header. This is efficient as most of the data is skipped, - however it does mean that all frame data must be present and valid. + note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode. + When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size. + In which case, it's necessary to use streaming mode to decompress data. + Optionally, application can still use ZSTD_decompress() while relying on implied limits. + (For example, data may be necessarily cut into blocks <= 16 KB). + note 2 : decompressed size is always present when compression is done with ZSTD_compress() + note 3 : decompressed size can be very large (64-bits value), + potentially larger than what local system can handle as a single memory segment. + In which case, it's necessary to use streaming mode to decompress data. + note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified. + Always ensure result fits within application's authorized limits. + Each application can set its own limits. + note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to + read each contained frame header. This is efficient as most of the data is skipped, + however it does mean that all frame data must be present and valid. +


    + +

    Context memory usage

    
    +
    +
    size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
    +size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
    +size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
    +size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
    +size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
    +size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
    +

    These functions give the current memory usage of selected object. + Object memory usage can evolve if it's re-used multiple times. +


    + +
    size_t ZSTD_estimateCCtxSize(ZSTD_compressionParameters cParams);
    +size_t ZSTD_estimateDCtxSize(void);
    +

    These functions make it possible to estimate memory usage + of a future target object, before its allocation, + given a set of parameters, which vary depending on target object. + The objective is to guide decision before allocation. +


    + +
    size_t ZSTD_estimateCStreamSize(ZSTD_compressionParameters cParams);
    +size_t ZSTD_estimateDStreamSize(ZSTD_frameHeader fHeader);
    +

    Note : if streaming is init with function ZSTD_init?Stream_usingDict(), + an internal ?Dict will be created, which size is not estimated. + In this case, get additional size by using ZSTD_estimate?DictSize +


    + +
    size_t ZSTD_estimateCDictSize(ZSTD_compressionParameters cParams, size_t dictSize);
    +size_t ZSTD_estimateDDictSize(size_t dictSize);
    +

    Note : if dictionary is created "byReference", reduce estimation by dictSize


    Advanced compression functions

    
     
    -
    size_t ZSTD_estimateCCtxSize(ZSTD_compressionParameters cParams);
    -

    Gives the amount of memory allocated for a ZSTD_CCtx given a set of compression parameters. - `frameContentSize` is an optional parameter, provide `0` if unknown -


    -
    ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
     

    Create a ZSTD compression context using external alloc and free functions


    -
    size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
    -

    Gives the amount of memory used by a given ZSTD_CCtx -


    -
    typedef enum {
         ZSTD_p_forceWindow,   /* Force back-references to remain < windowSize, even when referencing Dictionary content (default:0) */
         ZSTD_p_forceRawDict   /* Force loading dictionary in "content-only" mode (no header analysis) */
    @@ -381,14 +433,10 @@ typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; v
     


    ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, unsigned byReference,
    -                                                  ZSTD_parameters params, ZSTD_customMem customMem);
    +                                      ZSTD_compressionParameters cParams, ZSTD_customMem customMem);
     

    Create a ZSTD_CDict using external alloc and free, and customized compression parameters


    -
    size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
    -

    Gives the amount of memory used by a given ZSTD_sizeof_CDict -


    -
    ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
     

    @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize. `estimatedSrcSize` value is optional, select 0 if not known @@ -408,12 +456,19 @@ typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; v both values are optional, select `0` if unknown.


    -
    size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
    -                                           void* dst, size_t dstCapacity,
    -                                     const void* src, size_t srcSize,
    -                                     const void* dict,size_t dictSize,
    -                                           ZSTD_parameters params);
    -

    Same as ZSTD_compress_usingDict(), with fine-tune control of each compression parameter +

    size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
    +                      void* dst, size_t dstCapacity,
    +                const void* src, size_t srcSize,
    +                const void* dict,size_t dictSize,
    +                      ZSTD_parameters params);
    +

    Same as ZSTD_compress_usingDict(), with fine-tune control over each compression parameter +


    + +
    size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
    +                      void* dst, size_t dstCapacity,
    +                const void* src, size_t srcSize,
    +                const ZSTD_CDict* cdict, ZSTD_frameParameters fParams);
    +

    Same as ZSTD_compress_usingCDict(), with fine-tune control over frame parameters


    Advanced decompression functions

    
    @@ -425,26 +480,19 @@ typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; v
       Note 3 : Skippable Frame Identifiers are considered valid. 
     


    -
    size_t ZSTD_estimateDCtxSize(void);
    -

    Gives the potential amount of memory allocated to create a ZSTD_DCtx -


    -
    ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
     

    Create a ZSTD decompression context using external alloc and free functions


    -
    size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
    -

    Gives the amount of memory used by a given ZSTD_DCtx -


    -
    ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
     

    Create a digested dictionary, ready to start decompression operation without startup delay. Dictionary content is simply referenced, and therefore stays in dictBuffer. It is important that dictBuffer outlives DDict, it must remain read accessible throughout the lifetime of DDict


    -
    size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
    -

    Gives the amount of memory used by a given ZSTD_DDict +

    ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
    +                                      unsigned byReference, ZSTD_customMem customMem);
    +

    Create a ZSTD_DDict using external alloc and free, optionally by reference


    unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
    @@ -468,27 +516,34 @@ typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; v
         Note : this use case also happens when using a non-conformant dictionary.
       - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
       - This is not a Zstandard frame.
    -  When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code. 
    +  When identifying the exact failure cause, it's possible to use ZSTD_getFrameParams(), which will provide a more precise error code. 
     


    Advanced streaming functions

    
     
     

    Advanced Streaming compression functions

    ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
     size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize);   /**< pledgedSrcSize must be correct, a size of 0 means unknown.  for a frame size of 0 use initCStream_advanced */
    -size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /**< note: a dict will not be used if dict == NULL or dictSize < 8 */
    +size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /**< note: a dict will not be used if dict == NULL or dictSize < 8. This result in the creation of an internal CDict */
     size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
                                                  ZSTD_parameters params, unsigned long long pledgedSrcSize);  /**< pledgedSrcSize is optional and can be 0 (meaning unknown). note: if the contentSizeFlag is set, pledgedSrcSize == 0 means the source size is actually 0 */
     size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);  /**< note : cdict will just be referenced, and must outlive compression session */
    -size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);  /**< re-use compression parameters from previous init; skip dictionary loading stage; zcs must be init at least once before. note: pledgedSrcSize must be correct, a size of 0 means unknown.  for a frame size of 0 use initCStream_advanced */
    -size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
    +size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize, ZSTD_frameParameters fParams);  /**< same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
     

    +
    size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
    +

    start a new compression job, using same parameters from previous job. + This is typically useful to skip dictionary loading stage, since it will re-use it in-place.. + Note that zcs must be init at least once before using ZSTD_resetCStream(). + pledgedSrcSize==0 means "srcSize unknown". + If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end. + @return : 0, or an error code (which can be tested using ZSTD_isError()) +


    +

    Advanced Streaming decompression functions

    typedef enum { DStream_p_maxWindowSize } ZSTD_DStreamParameter_e;
     ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
    -size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /**< note: a dict will not be used if dict == NULL or dictSize < 8 */
     size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, ZSTD_DStreamParameter_e paramType, unsigned paramValue);
    +size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /**< note: a dict will not be used if dict == NULL or dictSize < 8 */
     size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);  /**< note : ddict will just be referenced, and must outlive decompression session */
     size_t ZSTD_resetDStream(ZSTD_DStream* zds);  /**< re-use decompression parameters from previous init; saves dictionary loading */
    -size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
     

    Buffer-less and synchronous inner streaming functions

       This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
    @@ -529,10 +584,9 @@ size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
     

    Buffer-less streaming compression functions

    size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
     size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
     size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize is optional and can be 0 (meaning unknown). note: if the contentSizeFlag is set, pledgedSrcSize == 0 means the source size is actually 0 */
    +size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
    +size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize);   /* compression parameters are already set within cdict. pledgedSrcSize=0 means null-size */
     size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**<  note: if pledgedSrcSize can be 0, indicating unknown size.  if it is non-zero, it must be accurate.  for 0 size frames, use compressBegin_advanced */
    -size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize can be 0, indicating unknown size.  if it is non-zero, it must be accurate.  for 0 size frames, use compressBegin_advanced */
    -size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
    -size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
     

    Buffer-less streaming decompression (synchronous mode)

       A ZSTD_DCtx object is required to track streaming operations.
    @@ -592,14 +646,7 @@ size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const vo
       It also returns Frame Size as fparamsPtr->frameContentSize.
     
    -
    typedef struct {
    -    unsigned long long frameContentSize;
    -    unsigned windowSize;
    -    unsigned dictID;
    -    unsigned checksumFlag;
    -} ZSTD_frameParams;
    -

    -

    Buffer-less streaming decompression functions

    size_t ZSTD_getFrameParams(ZSTD_frameParams* fparamsPtr, const void* src, size_t srcSize);   /**< doesn't consume input, see details below */
    +

    Buffer-less streaming decompression functions

    size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize);   /**< doesn't consume input, see details below */
     size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
     size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
     void   ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
    @@ -617,19 +664,20 @@ ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
         - Compressing and decompressing require a context structure
           + Use ZSTD_createCCtx() and ZSTD_createDCtx()
         - It is necessary to init context before starting
    -      + compression : ZSTD_compressBegin()
    -      + decompression : ZSTD_decompressBegin()
    -      + variants _usingDict() are also allowed
    -      + copyCCtx() and copyDCtx() work too
    -    - Block size is limited, it must be <= ZSTD_getBlockSizeMax()
    -      + If you need to compress more, cut data into multiple blocks
    -      + Consider using the regular ZSTD_compress() instead, as frame metadata costs become negligible when source size is large.
    +      + compression : any ZSTD_compressBegin*() variant, including with dictionary
    +      + decompression : any ZSTD_decompressBegin*() variant, including with dictionary
    +      + copyCCtx() and copyDCtx() can be used too
    +    - Block size is limited, it must be <= ZSTD_getBlockSizeMax() <= ZSTD_BLOCKSIZE_ABSOLUTEMAX
    +      + If input is larger than a block size, it's necessary to split input data into multiple blocks
    +      + For inputs larger than a single block size, consider using the regular ZSTD_compress() instead.
    +        Frame metadata is not that costly, and quickly becomes negligible as source size grows larger.
         - When a block is considered not compressible enough, ZSTD_compressBlock() result will be zero.
           In which case, nothing is produced into `dst`.
           + User must test for such outcome and deal directly with uncompressed data
           + ZSTD_decompressBlock() doesn't accept uncompressed data as input !!!
    -      + In case of multiple successive blocks, decoder must be informed of uncompressed block existence to follow proper history.
    -        Use ZSTD_insertBlock() in such a case.
    +      + In case of multiple successive blocks, should some of them be uncompressed,
    +        decoder must be informed of their existence in order to follow proper history.
    +        Use ZSTD_insertBlock() for such a case.
     

    Raw zstd block functions

    size_t ZSTD_getBlockSizeMax(ZSTD_CCtx* cctx);
    diff --git a/examples/simple_compression.c b/examples/simple_compression.c
    index 9d448712e..ab1131475 100644
    --- a/examples/simple_compression.c
    +++ b/examples/simple_compression.c
    @@ -116,7 +116,6 @@ static char* createOutFilename_orDie(const char* filename)
     int main(int argc, const char** argv)
     {
         const char* const exeName = argv[0];
    -    const char* const inFilename = argv[1];
     
         if (argc!=2) {
             printf("wrong arguments\n");
    @@ -125,6 +124,8 @@ int main(int argc, const char** argv)
             return 1;
         }
     
    +    const char* const inFilename = argv[1];
    +
         char* const outFilename = createOutFilename_orDie(inFilename);
         compress_orDie(inFilename, outFilename);
         free(outFilename);
    diff --git a/examples/streaming_compression.c b/examples/streaming_compression.c
    index 4c2c1a1d8..24ad15bd6 100644
    --- a/examples/streaming_compression.c
    +++ b/examples/streaming_compression.c
    @@ -112,7 +112,6 @@ static const char* createOutFilename_orDie(const char* filename)
     int main(int argc, const char** argv)
     {
         const char* const exeName = argv[0];
    -    const char* const inFilename = argv[1];
     
         if (argc!=2) {
             printf("wrong arguments\n");
    @@ -121,6 +120,8 @@ int main(int argc, const char** argv)
             return 1;
         }
     
    +    const char* const inFilename = argv[1];
    +
         const char* const outFilename = createOutFilename_orDie(inFilename);
         compressFile_orDie(inFilename, outFilename, 1);
     
    diff --git a/examples/streaming_decompression.c b/examples/streaming_decompression.c
    index 400aa673d..bb2d80987 100644
    --- a/examples/streaming_decompression.c
    +++ b/examples/streaming_decompression.c
    @@ -99,7 +99,6 @@ static void decompressFile_orDie(const char* fname)
     int main(int argc, const char** argv)
     {
         const char* const exeName = argv[0];
    -    const char* const inFilename = argv[1];
     
         if (argc!=2) {
             fprintf(stderr, "wrong arguments\n");
    @@ -108,6 +107,8 @@ int main(int argc, const char** argv)
             return 1;
         }
     
    +    const char* const inFilename = argv[1];
    +
         decompressFile_orDie(inFilename);
         return 0;
     }
    diff --git a/lib/Makefile b/lib/Makefile
    index 58f99baf5..f5f610372 100644
    --- a/lib/Makefile
    +++ b/lib/Makefile
    @@ -22,7 +22,7 @@ VERSION?= $(LIBVER)
     
     CPPFLAGS+= -I. -I./common -DXXH_NAMESPACE=ZSTD_
     CFLAGS  ?= -O3
    -DEBUGFLAGS = -g -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
    +DEBUGFLAGS = -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
                -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \
                -Wstrict-prototypes -Wundef -Wpointer-arith -Wformat-security
     CFLAGS  += $(DEBUGFLAGS) $(MOREFLAGS)
    @@ -31,12 +31,15 @@ FLAGS    = $(CPPFLAGS) $(CFLAGS)
     
     ZSTD_FILES := $(wildcard common/*.c compress/*.c decompress/*.c dictBuilder/*.c deprecated/*.c)
     
    -ifeq ($(ZSTD_LEGACY_SUPPORT), 0)
    -CPPFLAGS  += -DZSTD_LEGACY_SUPPORT=0
    -else
    -CPPFLAGS  += -I./legacy -DZSTD_LEGACY_SUPPORT=1
    -ZSTD_FILES+= $(wildcard legacy/*.c)
    +ZSTD_LEGACY_SUPPORT ?= 4
    +
    +ifneq ($(ZSTD_LEGACY_SUPPORT), 0)
    +ifeq ($(shell test $(ZSTD_LEGACY_SUPPORT) -lt 8; echo $$?), 0)
    +	ZSTD_FILES += $(shell ls legacy/*.c | grep 'v0[$(ZSTD_LEGACY_SUPPORT)-7]')
     endif
    +	CPPFLAGS += -I./legacy
    +endif
    +CPPFLAGS  += -DZSTD_LEGACY_SUPPORT=$(ZSTD_LEGACY_SUPPORT)
     
     ZSTD_OBJ   := $(patsubst %.c,%.o,$(ZSTD_FILES))
     
    @@ -68,6 +71,9 @@ libzstd.a: $(ZSTD_OBJ)
     	@echo compiling static library
     	@$(AR) $(ARFLAGS) $@ $^
     
    +libzstd.a-mt: CPPFLAGS += -DZSTD_MULTHREAD
    +libzstd.a-mt: libzstd.a
    +
     $(LIBZSTD): LDFLAGS += -shared -fPIC -fvisibility=hidden
     $(LIBZSTD): $(ZSTD_FILES)
     	@echo compiling dynamic library $(LIBVER)
    @@ -83,10 +89,17 @@ endif
     
     libzstd : $(LIBZSTD)
     
    +libzstd-mt : CPPFLAGS += -DZSTD_MULTITHREAD
    +libzstd-mt : libzstd
    +
     lib: libzstd.a libzstd
     
    -lib-release: DEBUGFLAGS :=
    +lib-mt: CPPFLAGS += -DZSTD_MULTITHREAD
    +lib-mt: lib
    +
    +lib-release lib-release-mt: DEBUGFLAGS :=
     lib-release: lib
    +lib-release-mt: lib-mt
     
     clean:
     	@$(RM) -r *.dSYM   # Mac OS-X specific
    diff --git a/lib/README.md b/lib/README.md
    index 3357e3d87..79b6fd500 100644
    --- a/lib/README.md
    +++ b/lib/README.md
    @@ -22,6 +22,14 @@ Some additional API may be useful if you're looking into advanced features :
                               They are not "stable", their definition may change in the future.
                               Only static linking is allowed.
     
    +#### ZSTDMT API
    +
    +To enable multithreaded compression within the library, invoke `make lib-mt` target.
    +Prototypes are defined in header file `compress/zstdmt_compress.h`.
    +When linking a program that uses ZSTDMT API against libzstd.a on a POSIX system,
    +`-pthread` flag must be provided to the compiler and linker.
    +Note : ZSTDMT prototypes can still be used with a library built without multithread support,
    +but in this case, they will be single threaded only.
     
     #### Modular build
     
    diff --git a/lib/common/bitstream.h b/lib/common/bitstream.h
    index 0e3d2fc55..ca42850df 100644
    --- a/lib/common/bitstream.h
    +++ b/lib/common/bitstream.h
    @@ -2,7 +2,7 @@
        bitstream
        Part of FSE library
        header file (to include)
    -   Copyright (C) 2013-2016, Yann Collet.
    +   Copyright (C) 2013-2017, Yann Collet.
     
        BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
     
    @@ -53,6 +53,16 @@ extern "C" {
     #include "error_private.h"  /* error codes and messages */
     
     
    +/*-*************************************
    +*  Debug
    +***************************************/
    +#if defined(BIT_DEBUG) && (BIT_DEBUG>=1)
    +#  include 
    +#else
    +#  define assert(condition) ((void)0)
    +#endif
    +
    +
     /*=========================================
     *  Target specific
     =========================================*/
    @@ -74,7 +84,7 @@ extern "C" {
     typedef struct
     {
         size_t bitContainer;
    -    int    bitPos;
    +    unsigned bitPos;
         char*  startPtr;
         char*  ptr;
         char*  endPtr;
    @@ -112,6 +122,7 @@ typedef struct
         unsigned bitsConsumed;
         const char* ptr;
         const char* start;
    +    const char* limitPtr;
     } BIT_DStream_t;
     
     typedef enum { BIT_DStream_unfinished = 0,
    @@ -163,7 +174,10 @@ MEM_STATIC unsigned BIT_highbit32 (register U32 val)
     #   elif defined(__GNUC__) && (__GNUC__ >= 3)   /* Use GCC Intrinsic */
         return 31 - __builtin_clz (val);
     #   else   /* Software version */
    -    static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
    +    static const unsigned DeBruijnClz[32] = { 0,  9,  1, 10, 13, 21,  2, 29,
    +                                             11, 14, 16, 18, 22, 25,  3, 30,
    +                                              8, 12, 20, 28, 15, 17, 24,  7,
    +                                             19, 27, 23,  6, 26,  5,  4, 31 };
         U32 v = val;
         v |= v >> 1;
         v |= v >> 2;
    @@ -175,31 +189,36 @@ MEM_STATIC unsigned BIT_highbit32 (register U32 val)
     }
     
     /*=====    Local Constants   =====*/
    -static const unsigned BIT_mask[] = { 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF, 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF,  0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF };   /* up to 26 bits */
    +static const unsigned BIT_mask[] = { 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F,
    +                                    0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF,
    +                                    0xFFFF, 0x1FFFF, 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF,
    +                                    0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF };   /* up to 26 bits */
     
     
     /*-**************************************************************
     *  bitStream encoding
     ****************************************************************/
     /*! BIT_initCStream() :
    - *  `dstCapacity` must be > sizeof(void*)
    + *  `dstCapacity` must be > sizeof(size_t)
      *  @return : 0 if success,
                   otherwise an error code (can be tested using ERR_isError() ) */
    -MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* startPtr, size_t dstCapacity)
    +MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC,
    +                                  void* startPtr, size_t dstCapacity)
     {
         bitC->bitContainer = 0;
         bitC->bitPos = 0;
         bitC->startPtr = (char*)startPtr;
         bitC->ptr = bitC->startPtr;
    -    bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->ptr);
    -    if (dstCapacity <= sizeof(bitC->ptr)) return ERROR(dstSize_tooSmall);
    +    bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer);
    +    if (dstCapacity <= sizeof(bitC->bitContainer)) return ERROR(dstSize_tooSmall);
         return 0;
     }
     
     /*! BIT_addBits() :
         can add up to 26 bits into `bitC`.
         Does not check for register overflow ! */
    -MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits)
    +MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC,
    +                            size_t value, unsigned nbBits)
     {
         bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
         bitC->bitPos += nbBits;
    @@ -207,34 +226,42 @@ MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits)
     
     /*! BIT_addBitsFast() :
      *  works only if `value` is _clean_, meaning all high bits above nbBits are 0 */
    -MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits)
    +MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC,
    +                                size_t value, unsigned nbBits)
     {
    +    assert((value>>nbBits) == 0);
         bitC->bitContainer |= value << bitC->bitPos;
         bitC->bitPos += nbBits;
     }
     
     /*! BIT_flushBitsFast() :
    + *  assumption : bitContainer has not overflowed
      *  unsafe version; does not check buffer overflow */
     MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
     {
         size_t const nbBytes = bitC->bitPos >> 3;
    +    assert( bitC->bitPos <= (sizeof(bitC->bitContainer)*8) );
         MEM_writeLEST(bitC->ptr, bitC->bitContainer);
         bitC->ptr += nbBytes;
    +    assert(bitC->ptr <= bitC->endPtr);
         bitC->bitPos &= 7;
    -    bitC->bitContainer >>= nbBytes*8;   /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */
    +    bitC->bitContainer >>= nbBytes*8;
     }
     
     /*! BIT_flushBits() :
    + *  assumption : bitContainer has not overflowed
      *  safe version; check for buffer overflow, and prevents it.
    - *  note : does not signal buffer overflow. This will be revealed later on using BIT_closeCStream() */
    + *  note : does not signal buffer overflow.
    + *  overflow will be revealed later on using BIT_closeCStream() */
     MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
     {
         size_t const nbBytes = bitC->bitPos >> 3;
    +    assert( bitC->bitPos <= (sizeof(bitC->bitContainer)*8) );
         MEM_writeLEST(bitC->ptr, bitC->bitContainer);
         bitC->ptr += nbBytes;
         if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
         bitC->bitPos &= 7;
    -    bitC->bitContainer >>= nbBytes*8;   /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */
    +    bitC->bitContainer >>= nbBytes*8;
     }
     
     /*! BIT_closeCStream() :
    @@ -244,9 +271,7 @@ MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
     {
         BIT_addBitsFast(bitC, 1, 1);   /* endMark */
         BIT_flushBits(bitC);
    -
    -    if (bitC->ptr >= bitC->endPtr) return 0; /* doesn't fit within authorized budget : cancel */
    -
    +    if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */
         return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
     }
     
    @@ -264,15 +289,16 @@ MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, si
     {
         if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
     
    +    bitD->start = (const char*)srcBuffer;
    +    bitD->limitPtr = bitD->start + sizeof(bitD->bitContainer);
    +
         if (srcSize >=  sizeof(bitD->bitContainer)) {  /* normal case */
    -        bitD->start = (const char*)srcBuffer;
             bitD->ptr   = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
             bitD->bitContainer = MEM_readLEST(bitD->ptr);
             { BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
               bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;  /* ensures bitsConsumed is always set */
               if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
         } else {
    -        bitD->start = (const char*)srcBuffer;
             bitD->ptr   = bitD->start;
             bitD->bitContainer = *(const BYTE*)(bitD->start);
             switch(srcSize)
    @@ -330,17 +356,18 @@ MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
     #if defined(__BMI__) && defined(__GNUC__)   /* experimental; fails if bitD->bitsConsumed + nbBits > sizeof(bitD->bitContainer)*8 */
         return BIT_getMiddleBits(bitD->bitContainer, (sizeof(bitD->bitContainer)*8) - bitD->bitsConsumed - nbBits, nbBits);
     #else
    -    U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;
    -    return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
    +    U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
    +    return ((bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> 1) >> ((regMask-nbBits) & regMask);
     #endif
     }
     
     /*! BIT_lookBitsFast() :
    -*   unsafe version; only works only if nbBits >= 1 */
    + *  unsafe version; only works if nbBits >= 1 */
     MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
     {
    -    U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;
    -    return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
    +    U32 const regMask = sizeof(bitD->bitContainer)*8 - 1;
    +    assert(nbBits >= 1);
    +    return (bitD->bitContainer << (bitD->bitsConsumed & regMask)) >> (((regMask+1)-nbBits) & regMask);
     }
     
     MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
    @@ -365,6 +392,7 @@ MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
     MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
     {
         size_t const value = BIT_lookBitsFast(bitD, nbBits);
    +    assert(nbBits >= 1);
         BIT_skipBits(bitD, nbBits);
         return value;
     }
    @@ -376,10 +404,10 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
                   if status == BIT_DStream_unfinished, internal register is filled with >= (sizeof(bitD->bitContainer)*8 - 7) bits */
     MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
     {
    -    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should not happen => corruption detected */
    +    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* overflow detected, like end of stream */
             return BIT_DStream_overflow;
     
    -    if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
    +    if (bitD->ptr >= bitD->limitPtr) {
             bitD->ptr -= bitD->bitsConsumed >> 3;
             bitD->bitsConsumed &= 7;
             bitD->bitContainer = MEM_readLEST(bitD->ptr);
    @@ -389,6 +417,7 @@ MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
             if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
             return BIT_DStream_completed;
         }
    +    /* start < ptr < limitPtr */
         {   U32 nbBytes = bitD->bitsConsumed >> 3;
             BIT_DStream_status result = BIT_DStream_unfinished;
             if (bitD->ptr - nbBytes < bitD->start) {
    @@ -397,7 +426,7 @@ MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
             }
             bitD->ptr -= nbBytes;
             bitD->bitsConsumed -= nbBytes*8;
    -        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD) */
    +        bitD->bitContainer = MEM_readLEST(bitD->ptr);   /* reminder : srcSize > sizeof(bitD->bitContainer), otherwise bitD->ptr == bitD->start */
             return result;
         }
     }
    diff --git a/lib/common/error_private.c b/lib/common/error_private.c
    index a0fa1724a..6bc86da7a 100644
    --- a/lib/common/error_private.c
    +++ b/lib/common/error_private.c
    @@ -29,7 +29,7 @@ const char* ERR_getErrorString(ERR_enum code)
         case PREFIX(memory_allocation): return "Allocation error : not enough memory";
         case PREFIX(stage_wrong): return "Operation not authorized at current processing stage";
         case PREFIX(dstSize_tooSmall): return "Destination buffer is too small";
    -    case PREFIX(srcSize_wrong): return "Src size incorrect";
    +    case PREFIX(srcSize_wrong): return "Src size is incorrect";
         case PREFIX(corruption_detected): return "Corrupted block detected";
         case PREFIX(checksum_wrong): return "Restored data doesn't match checksum";
         case PREFIX(tableLog_tooLarge): return "tableLog requires too much memory : unsupported";
    @@ -37,6 +37,9 @@ const char* ERR_getErrorString(ERR_enum code)
         case PREFIX(maxSymbolValue_tooSmall): return "Specified maxSymbolValue is too small";
         case PREFIX(dictionary_corrupted): return "Dictionary is corrupted";
         case PREFIX(dictionary_wrong): return "Dictionary mismatch";
    +    case PREFIX(dictionaryCreation_failed): return "Cannot create Dictionary from provided samples";
    +    case PREFIX(frameIndex_tooLarge): return "Frame index is too large";
    +    case PREFIX(seekableIO): return "An I/O error occurred when reading/seeking";
         case PREFIX(maxCode):
         default: return notErrorCode;
         }
    diff --git a/lib/common/fse.h b/lib/common/fse.h
    index baac39032..6d5d41def 100644
    --- a/lib/common/fse.h
    +++ b/lib/common/fse.h
    @@ -316,6 +316,10 @@ If there is an error, the function will return an error code, which can be teste
     #define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue)   (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
     #define FSE_DTABLE_SIZE_U32(maxTableLog)                   (1 + (1<2)?(maxTableLog-2):0)) )
    +#define FSE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue)   ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )
     size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
     
     size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);
    @@ -550,9 +554,9 @@ MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U3
     
     MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, U32 symbol)
     {
    -    const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
    +    FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
         const U16* const stateTable = (const U16*)(statePtr->stateTable);
    -    U32 nbBitsOut  = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
    +    U32 const nbBitsOut  = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
         BIT_addBits(bitC, statePtr->value, nbBitsOut);
         statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
     }
    diff --git a/lib/common/huf.h b/lib/common/huf.h
    index e5572760a..7873ca3d4 100644
    --- a/lib/common/huf.h
    +++ b/lib/common/huf.h
    @@ -43,6 +43,21 @@ extern "C" {
     #include     /* size_t */
     
     
    +/* *** library symbols visibility *** */
    +/* Note : when linking with -fvisibility=hidden on gcc, or by default on Visual,
    + *        HUF symbols remain "private" (internal symbols for library only).
    + *        Set macro FSE_DLL_EXPORT to 1 if you want HUF symbols visible on DLL interface */
    +#if defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1) && defined(__GNUC__) && (__GNUC__ >= 4)
    +#  define HUF_PUBLIC_API __attribute__ ((visibility ("default")))
    +#elif defined(FSE_DLL_EXPORT) && (FSE_DLL_EXPORT==1)   /* Visual expected */
    +#  define HUF_PUBLIC_API __declspec(dllexport)
    +#elif defined(FSE_DLL_IMPORT) && (FSE_DLL_IMPORT==1)
    +#  define HUF_PUBLIC_API __declspec(dllimport)  /* not required, just to generate faster code (saves a function pointer load from IAT and an indirect jump) */
    +#else
    +#  define HUF_PUBLIC_API
    +#endif
    +
    +
     /* *** simple functions *** */
     /**
     HUF_compress() :
    @@ -55,8 +70,8 @@ HUF_compress() :
                          if return == 1, srcData is a single repeated byte symbol (RLE compression).
                          if HUF_isError(return), compression failed (more details using HUF_getErrorName())
     */
    -size_t HUF_compress(void* dst, size_t dstCapacity,
    -              const void* src, size_t srcSize);
    +HUF_PUBLIC_API size_t HUF_compress(void* dst, size_t dstCapacity,
    +                             const void* src, size_t srcSize);
     
     /**
     HUF_decompress() :
    @@ -69,32 +84,42 @@ HUF_decompress() :
         @return : size of regenerated data (== originalSize),
                   or an error code, which can be tested using HUF_isError()
     */
    -size_t HUF_decompress(void* dst,  size_t originalSize,
    -                const void* cSrc, size_t cSrcSize);
    +HUF_PUBLIC_API size_t HUF_decompress(void* dst,  size_t originalSize,
    +                               const void* cSrc, size_t cSrcSize);
     
     
     /* ***   Tool functions *** */
    -#define HUF_BLOCKSIZE_MAX (128 * 1024)       /**< maximum input size for a single block compressed with HUF_compress */
    -size_t HUF_compressBound(size_t size);       /**< maximum compressed size (worst case) */
    +#define HUF_BLOCKSIZE_MAX (128 * 1024)                  /**< maximum input size for a single block compressed with HUF_compress */
    +HUF_PUBLIC_API size_t HUF_compressBound(size_t size);   /**< maximum compressed size (worst case) */
     
     /* Error Management */
    -unsigned    HUF_isError(size_t code);        /**< tells if a return value is an error code */
    -const char* HUF_getErrorName(size_t code);   /**< provides error code string (useful for debugging) */
    +HUF_PUBLIC_API unsigned    HUF_isError(size_t code);       /**< tells if a return value is an error code */
    +HUF_PUBLIC_API const char* HUF_getErrorName(size_t code);  /**< provides error code string (useful for debugging) */
     
     
     /* ***   Advanced function   *** */
     
     /** HUF_compress2() :
    - *   Same as HUF_compress(), but offers direct control over `maxSymbolValue` and `tableLog` .
    - *   `tableLog` must be `<= HUF_TABLELOG_MAX` . */
    -size_t HUF_compress2 (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
    + *  Same as HUF_compress(), but offers direct control over `maxSymbolValue` and `tableLog`.
    + *  `tableLog` must be `<= HUF_TABLELOG_MAX` . */
    +HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
     
     /** HUF_compress4X_wksp() :
    -*   Same as HUF_compress2(), but uses externally allocated `workSpace`, which must be a table of >= 1024 unsigned */
    -size_t HUF_compress4X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);  /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
    + *  Same as HUF_compress2(), but uses externally allocated `workSpace`.
    + *  `workspace` must have minimum alignment of 4, and be at least as large as following macro */
    +#define HUF_WORKSPACE_SIZE (6 << 10)
    +#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))
    +HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
     
     
     
    +/* ******************************************************************
    + *  WARNING !!
    + *  The following section contains advanced and experimental definitions
    + *  which shall never be used in the context of dll
    + *  because they are not guaranteed to remain stable in the future.
    + *  Only consider them in association with static linking.
    + *******************************************************************/
     #ifdef HUF_STATIC_LINKING_ONLY
     
     /* *** Dependencies *** */
    @@ -117,12 +142,14 @@ size_t HUF_compress4X_wksp (void* dst, size_t dstSize, const void* src, size_t s
     ******************************************/
     /* HUF buffer bounds */
     #define HUF_CTABLEBOUND 129
    -#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true if incompressible pre-filtered with fast heuristic */
    +#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8)   /* only true when incompressible is pre-filtered with fast heuristic */
     #define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size))   /* Macro version, useful for static allocation */
     
     /* static allocation of HUF's Compression Table */
    +#define HUF_CTABLE_SIZE_U32(maxSymbolValue)   ((maxSymbolValue)+1)   /* Use tables of U32, for proper alignment */
    +#define HUF_CTABLE_SIZE(maxSymbolValue)       (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32))
     #define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
    -    U32 name##hb[maxSymbolValue+1]; \
    +    U32 name##hb[HUF_CTABLE_SIZE_U32(maxSymbolValue)]; \
         void* name##hv = &(name##hb); \
         HUF_CElt* name = (HUF_CElt*)(name##hv)   /* no final ; */
     
    @@ -134,10 +161,6 @@ typedef U32 HUF_DTable;
     #define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
             HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
     
    -/* The workspace must have alignment at least 4 and be at least this large */
    -#define HUF_WORKSPACE_SIZE (6 << 10)
    -#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))
    -
     
     /* ****************************************
     *  Advanced decompression functions
    diff --git a/lib/common/mem.h b/lib/common/mem.h
    index 1e28fec46..4773a8b93 100644
    --- a/lib/common/mem.h
    +++ b/lib/common/mem.h
    @@ -48,14 +48,15 @@ MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (size
     *****************************************************************/
     #if  !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
     # include 
    -  typedef  uint8_t BYTE;
    -  typedef uint16_t U16;
    -  typedef  int16_t S16;
    -  typedef uint32_t U32;
    -  typedef  int32_t S32;
    -  typedef uint64_t U64;
    -  typedef  int64_t S64;
    -  typedef intptr_t iPtrDiff;
    +  typedef   uint8_t BYTE;
    +  typedef  uint16_t U16;
    +  typedef   int16_t S16;
    +  typedef  uint32_t U32;
    +  typedef   int32_t S32;
    +  typedef  uint64_t U64;
    +  typedef   int64_t S64;
    +  typedef  intptr_t iPtrDiff;
    +  typedef uintptr_t uPtrDiff;
     #else
       typedef unsigned char      BYTE;
       typedef unsigned short      U16;
    @@ -65,6 +66,7 @@ MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (size
       typedef unsigned long long  U64;
       typedef   signed long long  S64;
       typedef ptrdiff_t      iPtrDiff;
    +  typedef size_t         uPtrDiff;
     #endif
     
     
    @@ -87,8 +89,7 @@ MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (size
     #ifndef MEM_FORCE_MEMORY_ACCESS   /* can be defined externally, on command line for example */
     #  if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
     #    define MEM_FORCE_MEMORY_ACCESS 2
    -#  elif defined(__INTEL_COMPILER) /*|| defined(_MSC_VER)*/ || \
    -  (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
    +#  elif defined(__INTEL_COMPILER) || defined(__GNUC__)
     #    define MEM_FORCE_MEMORY_ACCESS 1
     #  endif
     #endif
    diff --git a/lib/common/zstd_errors.h b/lib/common/zstd_errors.h
    index 949dbd0ff..de0fc8984 100644
    --- a/lib/common/zstd_errors.h
    +++ b/lib/common/zstd_errors.h
    @@ -57,6 +57,9 @@ typedef enum {
       ZSTD_error_maxSymbolValue_tooSmall,
       ZSTD_error_dictionary_corrupted,
       ZSTD_error_dictionary_wrong,
    +  ZSTD_error_dictionaryCreation_failed,
    +  ZSTD_error_frameIndex_tooLarge,
    +  ZSTD_error_seekableIO,
       ZSTD_error_maxCode
     } ZSTD_ErrorCode;
     
    diff --git a/lib/common/zstd_internal.h b/lib/common/zstd_internal.h
    index 5c5b28732..2533333ba 100644
    --- a/lib/common/zstd_internal.h
    +++ b/lib/common/zstd_internal.h
    @@ -16,9 +16,9 @@
     #ifdef _MSC_VER    /* Visual Studio */
     #  define FORCE_INLINE static __forceinline
     #  include                     /* For Visual 2005 */
    +#  pragma warning(disable : 4100)        /* disable: C4100: unreferenced formal parameter */
     #  pragma warning(disable : 4127)        /* disable: C4127: conditional expression is constant */
     #  pragma warning(disable : 4324)        /* disable: C4324: padded structure */
    -#  pragma warning(disable : 4100)        /* disable: C4100: unreferenced formal parameter */
     #else
     #  if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L   /* C99 */
     #    ifdef __GNUC__
    @@ -58,6 +58,8 @@
     /*-*************************************
     *  shared macros
     ***************************************/
    +#undef MIN
    +#undef MAX
     #define MIN(a,b) ((a)<(b) ? (a) : (b))
     #define MAX(a,b) ((a)>(b) ? (a) : (b))
     #define CHECK_F(f) { size_t const errcod = f; if (ERR_isError(errcod)) return errcod; }  /* check and Forward error code */
    @@ -104,7 +106,6 @@ typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingTy
     #define LONGNBSEQ 0x7F00
     
     #define MINMATCH 3
    -#define EQUAL_READ32 4
     
     #define Litbits  8
     #define MaxLit ((1< FSE_MAX_TABLELOG) return ERROR(GENERIC);   /* Unsupported */
    +    if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);   /* Unsupported */
         if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC);   /* Unsupported */
     
         if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
    @@ -808,7 +808,7 @@ size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t src
         if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG;
     
         /* Scan input and build symbol stats */
    -    {   CHECK_V_F(maxCount, FSE_count(count, &maxSymbolValue, src, srcSize) );
    +    {   CHECK_V_F(maxCount, FSE_count_wksp(count, &maxSymbolValue, src, srcSize, (unsigned*)scratchBuffer) );
             if (maxCount == srcSize) return 1;   /* only a single symbol in src : rle */
             if (maxCount == 1) return 0;         /* each symbol present maximum once => not compressible */
             if (maxCount < (srcSize >> 7)) return 0;   /* Heuristic : not compressible enough */
    diff --git a/lib/compress/zstd_compress.c b/lib/compress/zstd_compress.c
    index 4a6c49d6e..9499a76e2 100644
    --- a/lib/compress/zstd_compress.c
    +++ b/lib/compress/zstd_compress.c
    @@ -20,6 +20,26 @@
     #include "zstd_internal.h"  /* includes zstd.h */
     
     
    +/*-*************************************
    +*  Debug
    +***************************************/
    +#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=1)
    +#  include 
    +#else
    +#  define assert(condition) ((void)0)
    +#endif
    +
    +#define ZSTD_STATIC_ASSERT(c) { enum { ZSTD_static_assert = 1/(int)(!!(c)) }; }
    +
    +#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=2)
    +#  include 
    +   static unsigned g_debugLevel = ZSTD_DEBUG;
    +#  define DEBUGLOG(l, ...) if (l<=g_debugLevel) { fprintf(stderr, __FILE__ ": "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, " \n"); }
    +#else
    +#  define DEBUGLOG(l, ...)      {}    /* disabled */
    +#endif
    +
    +
     /*-*************************************
     *  Constants
     ***************************************/
    @@ -27,12 +47,22 @@ static const U32 g_searchStrength = 8;   /* control skip over incompressible dat
     #define HASH_READ_SIZE 8
     typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
     
    +/* entropy tables always have same size */
    +static size_t const hufCTable_size = HUF_CTABLE_SIZE(255);
    +static size_t const litlengthCTable_size = FSE_CTABLE_SIZE(LLFSELog, MaxLL);
    +static size_t const offcodeCTable_size = FSE_CTABLE_SIZE(OffFSELog, MaxOff);
    +static size_t const matchlengthCTable_size = FSE_CTABLE_SIZE(MLFSELog, MaxML);
    +static size_t const entropyScratchSpace_size = HUF_WORKSPACE_SIZE;
    +
     
     /*-*************************************
     *  Helper functions
     ***************************************/
    -#define ZSTD_STATIC_ASSERT(c) { enum { ZSTD_static_assert = 1/(int)(!!(c)) }; }
    -size_t ZSTD_compressBound(size_t srcSize) { return FSE_compressBound(srcSize) + 12; }
    +size_t ZSTD_compressBound(size_t srcSize) {
    +    size_t const lowLimit = 256 KB;
    +    size_t const margin = (srcSize < lowLimit) ? (lowLimit-srcSize) >> 12 : 0;  /* from 64 to 0 */
    +    return srcSize + (srcSize >> 8) + margin;
    +}
     
     
     /*-*************************************
    @@ -49,6 +79,8 @@ static void ZSTD_resetSeqStore(seqStore_t* ssPtr)
     /*-*************************************
     *  Context memory management
     ***************************************/
    +typedef enum { zcss_init, zcss_load, zcss_flush, zcss_final } ZSTD_cStreamStage;
    +
     struct ZSTD_CCtx_s {
         const BYTE* nextSrc;    /* next block here to continue on current prefix */
         const BYTE* base;       /* All regular indexes relative to this position */
    @@ -70,6 +102,7 @@ struct ZSTD_CCtx_s {
         size_t workSpaceSize;
         size_t blockSize;
         U64 frameContentSize;
    +    U64 consumedSrcSize;
         XXH64_state_t xxhState;
         ZSTD_customMem customMem;
     
    @@ -77,13 +110,29 @@ struct ZSTD_CCtx_s {
         U32* hashTable;
         U32* hashTable3;
         U32* chainTable;
    -    HUF_CElt* hufTable;
    -    U32 flagStaticTables;
    -    HUF_repeat flagStaticHufTable;
    -    FSE_CTable offcodeCTable  [FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
    -    FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
    -    FSE_CTable litlengthCTable  [FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
    -    unsigned tmpCounters[HUF_WORKSPACE_SIZE_U32];
    +    HUF_repeat hufCTable_repeatMode;
    +    HUF_CElt* hufCTable;
    +    U32 fseCTables_ready;
    +    FSE_CTable* offcodeCTable;
    +    FSE_CTable* matchlengthCTable;
    +    FSE_CTable* litlengthCTable;
    +    unsigned* entropyScratchSpace;
    +
    +    /* streaming */
    +    ZSTD_CDict* cdictLocal;
    +    const ZSTD_CDict* cdict;
    +    char*  inBuff;
    +    size_t inBuffSize;
    +    size_t inToCompress;
    +    size_t inBuffPos;
    +    size_t inBuffTarget;
    +    char*  outBuff;
    +    size_t outBuffSize;
    +    size_t outBuffContentSize;
    +    size_t outBuffFlushedSize;
    +    ZSTD_cStreamStage streamStage;
    +    U32    frameEnded;
    +    U64    pledgedSrcSize;
     };
     
     ZSTD_CCtx* ZSTD_createCCtx(void)
    @@ -109,6 +158,13 @@ size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
     {
         if (cctx==NULL) return 0;   /* support free on NULL */
         ZSTD_free(cctx->workSpace, cctx->customMem);
    +    cctx->workSpace = NULL;
    +    ZSTD_freeCDict(cctx->cdictLocal);
    +    cctx->cdictLocal = NULL;
    +    ZSTD_free(cctx->inBuff, cctx->customMem);
    +    cctx->inBuff = NULL;
    +    ZSTD_free(cctx->outBuff, cctx->customMem);
    +    cctx->outBuff = NULL;
         ZSTD_free(cctx, cctx->customMem);
         return 0;   /* reserved as a potential error code in the future */
     }
    @@ -116,7 +172,9 @@ size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
     size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
     {
         if (cctx==NULL) return 0;   /* support sizeof on NULL */
    -    return sizeof(*cctx) + cctx->workSpaceSize;
    +    return sizeof(*cctx) + cctx->workSpaceSize
    +           + ZSTD_sizeof_CDict(cctx->cdictLocal)
    +           + cctx->outBuffSize + cctx->inBuffSize;
     }
     
     size_t ZSTD_setCCtxParameter(ZSTD_CCtx* cctx, ZSTD_CCtxParameter param, unsigned value)
    @@ -150,9 +208,7 @@ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
         CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
         CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
         CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
    -    { U32 const searchLengthMin = ((cParams.strategy == ZSTD_fast) | (cParams.strategy == ZSTD_greedy)) ? ZSTD_SEARCHLENGTH_MIN+1 : ZSTD_SEARCHLENGTH_MIN;
    -      U32 const searchLengthMax = (cParams.strategy == ZSTD_fast) ? ZSTD_SEARCHLENGTH_MAX : ZSTD_SEARCHLENGTH_MAX-1;
    -      CLAMPCHECK(cParams.searchLength, searchLengthMin, searchLengthMax); }
    +    CLAMPCHECK(cParams.searchLength, ZSTD_SEARCHLENGTH_MIN, ZSTD_SEARCHLENGTH_MAX);
         CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX);
         if ((U32)(cParams.strategy) > (U32)ZSTD_btultra) return ERROR(compressionParameter_unsupported);
         return 0;
    @@ -206,12 +262,15 @@ size_t ZSTD_estimateCCtxSize(ZSTD_compressionParameters cParams)
         size_t const hSize = ((size_t)1) << cParams.hashLog;
         U32    const hashLog3 = (cParams.searchLength>3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog);
         size_t const h3Size = ((size_t)1) << hashLog3;
    +    size_t const entropySpace = hufCTable_size + litlengthCTable_size
    +                              + offcodeCTable_size + matchlengthCTable_size
    +                              + entropyScratchSpace_size;
         size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
     
    -    size_t const optSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<nextSrc - cctx->base);
         cctx->params = params;
         cctx->frameContentSize = frameContentSize;
    +    cctx->consumedSrcSize = 0;
         cctx->lowLimit = end;
         cctx->dictLimit = end;
         cctx->nextToUpdate = end+1;
    @@ -246,16 +306,19 @@ static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_parameters params, U64 fra
     
     typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset, ZSTDcrp_fullReset } ZSTD_compResetPolicy_e;
     
    -/*! ZSTD_resetCCtx_advanced() :
    +/*! ZSTD_resetCCtx_internal() :
         note : `params` must be validated */
    -static size_t ZSTD_resetCCtx_advanced (ZSTD_CCtx* zc,
    +static size_t ZSTD_resetCCtx_internal (ZSTD_CCtx* zc,
                                            ZSTD_parameters params, U64 frameContentSize,
                                            ZSTD_compResetPolicy_e const crp)
     {
    +    DEBUGLOG(5, "ZSTD_resetCCtx_internal \n");
    +
         if (crp == ZSTDcrp_continue)
             if (ZSTD_equivalentParams(params, zc->params)) {
    -            zc->flagStaticTables = 0;
    -            zc->flagStaticHufTable = HUF_repeat_none;
    +            DEBUGLOG(5, "ZSTD_equivalentParams()==1 \n");
    +            zc->fseCTables_ready = 0;
    +            zc->hufCTable_repeatMode = HUF_repeat_none;
                 return ZSTD_continueCCtx(zc, params, frameContentSize);
             }
     
    @@ -271,41 +334,71 @@ static size_t ZSTD_resetCCtx_advanced (ZSTD_CCtx* zc,
             void* ptr;
     
             /* Check if workSpace is large enough, alloc a new one if needed */
    -        {   size_t const optSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<workSpaceSize < neededSpace) {
    +                DEBUGLOG(5, "Need to update workSpaceSize from %uK to %uK \n",
    +                            (unsigned)zc->workSpaceSize>>10, (unsigned)neededSpace>>10);
    +                zc->workSpaceSize = 0;
                     ZSTD_free(zc->workSpace, zc->customMem);
                     zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
                     if (zc->workSpace == NULL) return ERROR(memory_allocation);
                     zc->workSpaceSize = neededSpace;
    +                ptr = zc->workSpace;
    +
    +                /* entropy space */
    +                zc->hufCTable = (HUF_CElt*)ptr;
    +                ptr = (char*)zc->hufCTable + hufCTable_size;  /* note : HUF_CElt* is incomplete type, size is estimated via macro */
    +                zc->offcodeCTable = (FSE_CTable*) ptr;
    +                ptr = (char*)ptr + offcodeCTable_size;
    +                zc->matchlengthCTable = (FSE_CTable*) ptr;
    +                ptr = (char*)ptr + matchlengthCTable_size;
    +                zc->litlengthCTable = (FSE_CTable*) ptr;
    +                ptr = (char*)ptr + litlengthCTable_size;
    +                assert(((size_t)ptr & 3) == 0);   /* ensure correct alignment */
    +                zc->entropyScratchSpace = (unsigned*) ptr;
             }   }
     
    -        if (crp!=ZSTDcrp_noMemset) memset(zc->workSpace, 0, tableSpace);   /* reset tables only */
    -        XXH64_reset(&zc->xxhState, 0);
    -        zc->hashLog3 = hashLog3;
    -        zc->hashTable = (U32*)(zc->workSpace);
    -        zc->chainTable = zc->hashTable + hSize;
    -        zc->hashTable3 = zc->chainTable + chainSize;
    -        ptr = zc->hashTable3 + h3Size;
    -        zc->hufTable = (HUF_CElt*)ptr;
    -        zc->flagStaticTables = 0;
    -        zc->flagStaticHufTable = HUF_repeat_none;
    -        ptr = ((U32*)ptr) + 256;  /* note : HUF_CElt* is incomplete type, size is simulated using U32 */
    +        /* init params */
    +        zc->params = params;
    +        zc->blockSize = blockSize;
    +        DEBUGLOG(5, "blockSize = %uK \n", (U32)blockSize>>10);
    +        zc->frameContentSize = frameContentSize;
    +        zc->consumedSrcSize = 0;
     
    +        XXH64_reset(&zc->xxhState, 0);
    +        zc->stage = ZSTDcs_init;
    +        zc->dictID = 0;
    +        zc->loadedDictEnd = 0;
    +        zc->fseCTables_ready = 0;
    +        zc->hufCTable_repeatMode = HUF_repeat_none;
             zc->nextToUpdate = 1;
             zc->nextSrc = NULL;
             zc->base = NULL;
             zc->dictBase = NULL;
             zc->dictLimit = 0;
             zc->lowLimit = 0;
    -        zc->params = params;
    -        zc->blockSize = blockSize;
    -        zc->frameContentSize = frameContentSize;
             { int i; for (i=0; irep[i] = repStartValue[i]; }
    +        zc->hashLog3 = hashLog3;
    +        zc->seqStore.litLengthSum = 0;
     
    +        /* ensure entropy tables are close together at the beginning */
    +        assert((void*)zc->hufCTable == zc->workSpace);
    +        assert((char*)zc->offcodeCTable == (char*)zc->hufCTable + hufCTable_size);
    +        assert((char*)zc->matchlengthCTable == (char*)zc->offcodeCTable + offcodeCTable_size);
    +        assert((char*)zc->litlengthCTable == (char*)zc->matchlengthCTable + matchlengthCTable_size);
    +        assert((char*)zc->entropyScratchSpace == (char*)zc->litlengthCTable + litlengthCTable_size);
    +        ptr = (char*)zc->entropyScratchSpace + entropyScratchSpace_size;
    +
    +        /* opt parser space */
             if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btultra)) {
    +            DEBUGLOG(5, "reserving optimal parser space ");
    +            assert(((size_t)ptr & 3) == 0);  /* ensure ptr is properly aligned */
                 zc->seqStore.litFreq = (U32*)ptr;
                 zc->seqStore.litLengthFreq = zc->seqStore.litFreq + (1<seqStore.matchLengthFreq = zc->seqStore.litLengthFreq + (MaxLL+1);
    @@ -315,8 +408,17 @@ static size_t ZSTD_resetCCtx_advanced (ZSTD_CCtx* zc,
                 ptr = zc->seqStore.matchTable + ZSTD_OPT_NUM+1;
                 zc->seqStore.priceTable = (ZSTD_optimal_t*)ptr;
                 ptr = zc->seqStore.priceTable + ZSTD_OPT_NUM+1;
    -            zc->seqStore.litLengthSum = 0;
             }
    +
    +        /* table Space */
    +        if (crp!=ZSTDcrp_noMemset) memset(ptr, 0, tableSpace);   /* reset tables only */
    +        assert(((size_t)ptr & 3) == 0);  /* ensure ptr is properly aligned */
    +        zc->hashTable = (U32*)(ptr);
    +        zc->chainTable = zc->hashTable + hSize;
    +        zc->hashTable3 = zc->chainTable + chainSize;
    +        ptr = zc->hashTable3 + h3Size;
    +
    +        /* sequences storage */
             zc->seqStore.sequencesStart = (seqDef*)ptr;
             ptr = zc->seqStore.sequencesStart + maxNbSeq;
             zc->seqStore.llCode = (BYTE*) ptr;
    @@ -324,10 +426,6 @@ static size_t ZSTD_resetCCtx_advanced (ZSTD_CCtx* zc,
             zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
             zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
     
    -        zc->stage = ZSTDcs_init;
    -        zc->dictID = 0;
    -        zc->loadedDictEnd = 0;
    -
             return 0;
         }
     }
    @@ -341,27 +439,33 @@ void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx) {
         for (i=0; irep[i] = 0;
     }
     
    -/*! ZSTD_copyCCtx() :
    -*   Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
    -*   Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
    -*   @return : 0, or an error code */
    -size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
    -{
    -    if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong);
     
    +/*! ZSTD_copyCCtx_internal() :
    + *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
    + *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
    + *  pledgedSrcSize=0 means "empty" if fParams.contentSizeFlag=1
    + *  @return : 0, or an error code */
    +size_t ZSTD_copyCCtx_internal(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx,
    +                              ZSTD_frameParameters fParams, unsigned long long pledgedSrcSize)
    +{
    +    DEBUGLOG(5, "ZSTD_copyCCtx_internal \n");
    +    if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong);
     
         memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
         {   ZSTD_parameters params = srcCCtx->params;
    -        params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
    -        ZSTD_resetCCtx_advanced(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset);
    +        params.fParams = fParams;
    +        DEBUGLOG(5, "ZSTD_resetCCtx_internal : dictIDFlag : %u \n", !fParams.noDictIDFlag);
    +        ZSTD_resetCCtx_internal(dstCCtx, params, pledgedSrcSize, ZSTDcrp_noMemset);
         }
     
         /* copy tables */
         {   size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog);
    -        size_t const hSize = ((size_t)1) << srcCCtx->params.cParams.hashLog;
    +        size_t const hSize =  (size_t)1 << srcCCtx->params.cParams.hashLog;
             size_t const h3Size = (size_t)1 << srcCCtx->hashLog3;
             size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
    -        memcpy(dstCCtx->workSpace, srcCCtx->workSpace, tableSpace);
    +        assert((U32*)dstCCtx->chainTable == (U32*)dstCCtx->hashTable + hSize);  /* chainTable must follow hashTable */
    +        assert((U32*)dstCCtx->hashTable3 == (U32*)dstCCtx->chainTable + chainSize);
    +        memcpy(dstCCtx->hashTable, srcCCtx->hashTable, tableSpace);   /* presumes all tables follow each other */
         }
     
         /* copy dictionary offsets */
    @@ -376,23 +480,36 @@ size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long
         dstCCtx->dictID       = srcCCtx->dictID;
     
         /* copy entropy tables */
    -    dstCCtx->flagStaticTables = srcCCtx->flagStaticTables;
    -    dstCCtx->flagStaticHufTable = srcCCtx->flagStaticHufTable;
    -    if (srcCCtx->flagStaticTables) {
    -        memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, sizeof(dstCCtx->litlengthCTable));
    -        memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, sizeof(dstCCtx->matchlengthCTable));
    -        memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, sizeof(dstCCtx->offcodeCTable));
    +    dstCCtx->fseCTables_ready = srcCCtx->fseCTables_ready;
    +    if (srcCCtx->fseCTables_ready) {
    +        memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, litlengthCTable_size);
    +        memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, matchlengthCTable_size);
    +        memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, offcodeCTable_size);
         }
    -    if (srcCCtx->flagStaticHufTable) {
    -        memcpy(dstCCtx->hufTable, srcCCtx->hufTable, 256*4);
    +    dstCCtx->hufCTable_repeatMode = srcCCtx->hufCTable_repeatMode;
    +    if (srcCCtx->hufCTable_repeatMode) {
    +        memcpy(dstCCtx->hufCTable, srcCCtx->hufCTable, hufCTable_size);
         }
     
         return 0;
     }
     
    +/*! ZSTD_copyCCtx() :
    + *  Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
    + *  Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
    + *  pledgedSrcSize==0 means "unknown".
    +*   @return : 0, or an error code */
    +size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
    +{
    +    ZSTD_frameParameters fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
    +    fParams.contentSizeFlag = pledgedSrcSize>0;
    +
    +    return ZSTD_copyCCtx_internal(dstCCtx, srcCCtx, fParams, pledgedSrcSize);
    +}
    +
     
     /*! ZSTD_reduceTable() :
    -*   reduce table indexes by `reducerValue` */
    + *  reduce table indexes by `reducerValue` */
     static void ZSTD_reduceTable (U32* const table, U32 const size, U32 const reducerValue)
     {
         U32 u;
    @@ -499,26 +616,28 @@ static size_t ZSTD_compressLiterals (ZSTD_CCtx* zc,
     
         /* small ? don't even attempt compression (speed opt) */
     #   define LITERAL_NOENTROPY 63
    -    {   size_t const minLitSize = zc->flagStaticHufTable == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY;
    +    {   size_t const minLitSize = zc->hufCTable_repeatMode == HUF_repeat_valid ? 6 : LITERAL_NOENTROPY;
             if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
         }
     
         if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall);   /* not enough space for compression */
    -    {   HUF_repeat repeat = zc->flagStaticHufTable;
    +    {   HUF_repeat repeat = zc->hufCTable_repeatMode;
             int const preferRepeat = zc->params.cParams.strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
             if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
    -        cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, zc->tmpCounters, sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat)
    -                                : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, zc->tmpCounters, sizeof(zc->tmpCounters), zc->hufTable, &repeat, preferRepeat);
    +        cLitSize = singleStream ? HUF_compress1X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
    +                                      zc->entropyScratchSpace, entropyScratchSpace_size, zc->hufCTable, &repeat, preferRepeat)
    +                                : HUF_compress4X_repeat(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11,
    +                                      zc->entropyScratchSpace, entropyScratchSpace_size, zc->hufCTable, &repeat, preferRepeat);
             if (repeat != HUF_repeat_none) { hType = set_repeat; }    /* reused the existing table */
    -        else { zc->flagStaticHufTable = HUF_repeat_check; }       /* now have a table to reuse */
    +        else { zc->hufCTable_repeatMode = HUF_repeat_check; }       /* now have a table to reuse */
         }
     
         if ((cLitSize==0) | (cLitSize >= srcSize - minGain)) {
    -        zc->flagStaticHufTable = HUF_repeat_none;
    +        zc->hufCTable_repeatMode = HUF_repeat_none;
             return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
         }
         if (cLitSize==1) {
    -        zc->flagStaticHufTable = HUF_repeat_none;
    +        zc->hufCTable_repeatMode = HUF_repeat_none;
             return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
         }
     
    @@ -637,12 +756,12 @@ MEM_STATIC size_t ZSTD_compressSequences (ZSTD_CCtx* zc,
     
         /* CTable for Literal Lengths */
         {   U32 max = MaxLL;
    -        size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, zc->tmpCounters);
    +        size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, zc->entropyScratchSpace);
             if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
                 *op++ = llCodeTable[0];
                 FSE_buildCTable_rle(CTable_LitLength, (BYTE)max);
                 LLtype = set_rle;
    -        } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
    +        } else if ((zc->fseCTables_ready) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
                 LLtype = set_repeat;
             } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (LL_defaultNormLog-1)))) {
                 FSE_buildCTable_wksp(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog, scratchBuffer, sizeof(scratchBuffer));
    @@ -653,7 +772,7 @@ MEM_STATIC size_t ZSTD_compressSequences (ZSTD_CCtx* zc,
                 if (count[llCodeTable[nbSeq-1]]>1) { count[llCodeTable[nbSeq-1]]--; nbSeq_1--; }
                 FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
                 { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog);   /* overflow protected */
    -              if (FSE_isError(NCountSize)) return ERROR(GENERIC);
    +              if (FSE_isError(NCountSize)) return NCountSize;
                   op += NCountSize; }
                 FSE_buildCTable_wksp(CTable_LitLength, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer));
                 LLtype = set_compressed;
    @@ -661,12 +780,12 @@ MEM_STATIC size_t ZSTD_compressSequences (ZSTD_CCtx* zc,
     
         /* CTable for Offsets */
         {   U32 max = MaxOff;
    -        size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, zc->tmpCounters);
    +        size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, zc->entropyScratchSpace);
             if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
                 *op++ = ofCodeTable[0];
                 FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max);
                 Offtype = set_rle;
    -        } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
    +        } else if ((zc->fseCTables_ready) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
                 Offtype = set_repeat;
             } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (OF_defaultNormLog-1)))) {
                 FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog, scratchBuffer, sizeof(scratchBuffer));
    @@ -677,7 +796,7 @@ MEM_STATIC size_t ZSTD_compressSequences (ZSTD_CCtx* zc,
                 if (count[ofCodeTable[nbSeq-1]]>1) { count[ofCodeTable[nbSeq-1]]--; nbSeq_1--; }
                 FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
                 { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog);   /* overflow protected */
    -              if (FSE_isError(NCountSize)) return ERROR(GENERIC);
    +              if (FSE_isError(NCountSize)) return NCountSize;
                   op += NCountSize; }
                 FSE_buildCTable_wksp(CTable_OffsetBits, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer));
                 Offtype = set_compressed;
    @@ -685,12 +804,12 @@ MEM_STATIC size_t ZSTD_compressSequences (ZSTD_CCtx* zc,
     
         /* CTable for MatchLengths */
         {   U32 max = MaxML;
    -        size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, zc->tmpCounters);
    +        size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, zc->entropyScratchSpace);
             if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
                 *op++ = *mlCodeTable;
                 FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max);
                 MLtype = set_rle;
    -        } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
    +        } else if ((zc->fseCTables_ready) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
                 MLtype = set_repeat;
             } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (ML_defaultNormLog-1)))) {
                 FSE_buildCTable_wksp(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog, scratchBuffer, sizeof(scratchBuffer));
    @@ -701,14 +820,14 @@ MEM_STATIC size_t ZSTD_compressSequences (ZSTD_CCtx* zc,
                 if (count[mlCodeTable[nbSeq-1]]>1) { count[mlCodeTable[nbSeq-1]]--; nbSeq_1--; }
                 FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
                 { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog);   /* overflow protected */
    -              if (FSE_isError(NCountSize)) return ERROR(GENERIC);
    +              if (FSE_isError(NCountSize)) return NCountSize;
                   op += NCountSize; }
                 FSE_buildCTable_wksp(CTable_MatchLength, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer));
                 MLtype = set_compressed;
         }   }
     
         *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
    -    zc->flagStaticTables = 0;
    +    zc->fseCTables_ready = 0;
     
         /* Encoding Sequences */
         {   BIT_CStream_t blockStream;
    @@ -787,7 +906,7 @@ _check_compressibility:
         {   size_t const minGain = ZSTD_minGain(srcSize);
             size_t const maxCSize = srcSize - minGain;
             if ((size_t)(op-ostart) >= maxCSize) {
    -            zc->flagStaticHufTable = HUF_repeat_none;
    +            zc->hufCTable_repeatMode = HUF_repeat_none;
                 return 0;
         }   }
     
    @@ -816,7 +935,7 @@ MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const v
             const U32 pos = (U32)((const BYTE*)literals - g_start);
             if (g_start==NULL) g_start = (const BYTE*)literals;
             if ((pos > 1895000) && (pos < 1895300))
    -            fprintf(stderr, "Cpos %6u :%5u literals & match %3u bytes at distance %6u \n",
    +            DEBUGLOG(5, "Cpos %6u :%5u literals & match %3u bytes at distance %6u \n",
                        pos, (U32)litLength, (U32)matchCode+MINMATCH, (U32)offsetCode);
         }
     #endif
    @@ -825,14 +944,20 @@ MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const v
         seqStorePtr->lit += litLength;
     
         /* literal Length */
    -    if (litLength>0xFFFF) { seqStorePtr->longLengthID = 1; seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); }
    +    if (litLength>0xFFFF) {
    +        seqStorePtr->longLengthID = 1;
    +        seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
    +    }
         seqStorePtr->sequences[0].litLength = (U16)litLength;
     
         /* match offset */
         seqStorePtr->sequences[0].offset = offsetCode + 1;
     
         /* match Length */
    -    if (matchCode>0xFFFF) { seqStorePtr->longLengthID = 2; seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); }
    +    if (matchCode>0xFFFF) {
    +        seqStorePtr->longLengthID = 2;
    +        seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
    +    }
         seqStorePtr->sequences[0].matchLength = (U16)matchCode;
     
         seqStorePtr->sequences++;
    @@ -853,7 +978,14 @@ static unsigned ZSTD_NbCommonBytes (register size_t val)
     #       elif defined(__GNUC__) && (__GNUC__ >= 3)
                 return (__builtin_ctzll((U64)val) >> 3);
     #       else
    -            static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
    +            static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
    +                                                     0, 3, 1, 3, 1, 4, 2, 7,
    +                                                     0, 2, 3, 6, 1, 5, 3, 5,
    +                                                     1, 3, 4, 4, 2, 5, 6, 7,
    +                                                     7, 0, 1, 2, 3, 3, 4, 6,
    +                                                     2, 6, 5, 5, 3, 4, 5, 6,
    +                                                     7, 1, 2, 4, 6, 4, 4, 5,
    +                                                     7, 2, 6, 5, 7, 6, 7, 7 };
                 return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
     #       endif
             } else { /* 32 bits */
    @@ -864,7 +996,10 @@ static unsigned ZSTD_NbCommonBytes (register size_t val)
     #       elif defined(__GNUC__) && (__GNUC__ >= 3)
                 return (__builtin_ctz((U32)val) >> 3);
     #       else
    -            static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
    +            static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
    +                                                     3, 2, 2, 1, 3, 2, 0, 1,
    +                                                     3, 3, 1, 2, 2, 2, 2, 0,
    +                                                     3, 1, 2, 0, 1, 0, 1, 1 };
                 return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
     #       endif
             }
    @@ -936,7 +1071,7 @@ static size_t ZSTD_count_2segments(const BYTE* ip, const BYTE* match, const BYTE
     ***************************************/
     static const U32 prime3bytes = 506832829U;
     static U32    ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes)  >> (32-h) ; }
    -MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); }   /* only in zstd_opt.h */
    +MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
     
     static const U32 prime4bytes = 2654435761U;
     static U32    ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
    @@ -1085,7 +1220,7 @@ static void ZSTD_compressBlock_fast(ZSTD_CCtx* ctx,
         const U32 mls = ctx->params.cParams.searchLength;
         switch(mls)
         {
    -    default:
    +    default: /* includes case 3 */
         case 4 :
             ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4); return;
         case 5 :
    @@ -1135,7 +1270,7 @@ static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
             if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
                && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
                 const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
    -            mLength = ZSTD_count_2segments(ip+1+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repMatchEnd, lowPrefixPtr) + EQUAL_READ32;
    +            mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
                 ip++;
                 ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
             } else {
    @@ -1147,7 +1282,7 @@ static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
                 {   const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
                     const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
                     U32 offset;
    -                mLength = ZSTD_count_2segments(ip+EQUAL_READ32, match+EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32;
    +                mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4;
                     while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; }   /* catch up */
                     offset = current - matchIndex;
                     offset_2 = offset_1;
    @@ -1171,7 +1306,7 @@ static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
                     if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex))  /* intentional overflow */
                        && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
                         const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
    -                    size_t repLength2 = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch2+EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
    +                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4;
                         U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
                         ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
                         hashTable[ZSTD_hashPtr(ip, hBits, mls)] = current2;
    @@ -1199,7 +1334,7 @@ static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx* ctx,
         U32 const mls = ctx->params.cParams.searchLength;
         switch(mls)
         {
    -    default:
    +    default: /* includes case 3 */
         case 4 :
             ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4); return;
         case 5 :
    @@ -1274,7 +1409,9 @@ void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx,
             const BYTE* match = base + matchIndexS;
             hashLong[h2] = hashSmall[h] = current;   /* update hash tables */
     
    -        if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { /* note : by construction, offset_1 <= current */
    +        assert(offset_1 <= current);   /* supposed guaranteed by construction */
    +        if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
    +            /* favor repcode */
                 mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
                 ip++;
                 ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
    @@ -1285,15 +1422,15 @@ void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx,
                     offset = (U32)(ip-matchLong);
                     while (((ip>anchor) & (matchLong>lowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
                 } else if ( (matchIndexS > lowestIndex) && (MEM_read32(match) == MEM_read32(ip)) ) {
    -                size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
    -                U32 const matchIndex3 = hashLong[h3];
    -                const BYTE* match3 = base + matchIndex3;
    -                hashLong[h3] = current + 1;
    -                if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
    -                    mLength = ZSTD_count(ip+9, match3+8, iend) + 8;
    +                size_t const hl3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
    +                U32 const matchIndexL3 = hashLong[hl3];
    +                const BYTE* matchL3 = base + matchIndexL3;
    +                hashLong[hl3] = current + 1;
    +                if ( (matchIndexL3 > lowestIndex) && (MEM_read64(matchL3) == MEM_read64(ip+1)) ) {
    +                    mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
                         ip++;
    -                    offset = (U32)(ip-match3);
    -                    while (((ip>anchor) & (match3>lowest)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
    +                    offset = (U32)(ip-matchL3);
    +                    while (((ip>anchor) & (matchL3>lowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
                     } else {
                         mLength = ZSTD_count(ip+4, match+4, iend) + 4;
                         offset = (U32)(ip-match);
    @@ -1353,7 +1490,7 @@ static void ZSTD_compressBlock_doubleFast(ZSTD_CCtx* ctx, const void* src, size_
         const U32 mls = ctx->params.cParams.searchLength;
         switch(mls)
         {
    -    default:
    +    default: /* includes case 3 */
         case 4 :
             ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4); return;
         case 5 :
    @@ -1474,7 +1611,7 @@ static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx,
                     if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex))  /* intentional overflow */
                        && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
                         const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
    -                    size_t const repLength2 = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch2+EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
    +                    size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4;
                         U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset;   /* swap offset_2 <=> offset_1 */
                         ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
                         hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
    @@ -1503,7 +1640,7 @@ static void ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx* ctx,
         U32 const mls = ctx->params.cParams.searchLength;
         switch(mls)
         {
    -    default:
    +    default: /* includes case 3 */
         case 4 :
             ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4); return;
         case 5 :
    @@ -1733,9 +1870,10 @@ static size_t ZSTD_BtFindBestMatch_selectMLS (
     {
         switch(matchLengthSearch)
         {
    -    default :
    +    default : /* includes case 3 */
         case 4 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
         case 5 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
    +    case 7 :
         case 6 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
         }
     }
    @@ -1772,9 +1910,10 @@ static size_t ZSTD_BtFindBestMatch_selectMLS_extDict (
     {
         switch(matchLengthSearch)
         {
    -    default :
    +    default : /* includes case 3 */
         case 4 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
         case 5 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
    +    case 7 :
         case 6 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
         }
     }
    @@ -1831,7 +1970,7 @@ size_t ZSTD_HcFindBestMatch_generic (
         const U32 current = (U32)(ip-base);
         const U32 minChain = current > chainSize ? current - chainSize : 0;
         int nbAttempts=maxNbAttempts;
    -    size_t ml=EQUAL_READ32-1;
    +    size_t ml=4-1;
     
         /* HC4 match finder */
         U32 matchIndex = ZSTD_insertAndFindFirstIndex (zc, ip, mls);
    @@ -1846,11 +1985,15 @@ size_t ZSTD_HcFindBestMatch_generic (
             } else {
                 match = dictBase + matchIndex;
                 if (MEM_read32(match) == MEM_read32(ip))   /* assumption : matchIndex <= dictLimit-4 (by table construction) */
    -                currentMl = ZSTD_count_2segments(ip+EQUAL_READ32, match+EQUAL_READ32, iLimit, dictEnd, prefixStart) + EQUAL_READ32;
    +                currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
             }
     
             /* save best solution */
    -        if (currentMl > ml) { ml = currentMl; *offsetPtr = current - matchIndex + ZSTD_REP_MOVE; if (ip+currentMl == iLimit) break; /* best possible, and avoid read overflow*/ }
    +        if (currentMl > ml) {
    +            ml = currentMl;
    +            *offsetPtr = current - matchIndex + ZSTD_REP_MOVE;
    +            if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
    +        }
     
             if (matchIndex <= minChain) break;
             matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
    @@ -1868,9 +2011,10 @@ FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS (
     {
         switch(matchLengthSearch)
         {
    -    default :
    +    default : /* includes case 3 */
         case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0);
         case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0);
    +    case 7 :
         case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0);
         }
     }
    @@ -1884,9 +2028,10 @@ FORCE_INLINE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
     {
         switch(matchLengthSearch)
         {
    -    default :
    +    default : /* includes case 3 */
         case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1);
         case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1);
    +    case 7 :
         case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1);
         }
     }
    @@ -1934,7 +2079,7 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
             /* check repCode */
             if ((offset_1>0) & (MEM_read32(ip+1) == MEM_read32(ip+1 - offset_1))) {
                 /* repcode : we take it */
    -            matchLength = ZSTD_count(ip+1+EQUAL_READ32, ip+1+EQUAL_READ32-offset_1, iend) + EQUAL_READ32;
    +            matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
                 if (depth==0) goto _storeSequence;
             }
     
    @@ -1945,7 +2090,7 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
                     matchLength = ml2, start = ip, offset=offsetFound;
             }
     
    -        if (matchLength < EQUAL_READ32) {
    +        if (matchLength < 4) {
                 ip += ((ip-anchor) >> g_searchStrength) + 1;   /* jump faster over incompressible sections */
                 continue;
             }
    @@ -1955,17 +2100,17 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
             while (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
    -                size_t const mlRep = ZSTD_count(ip+EQUAL_READ32, ip+EQUAL_READ32-offset_1, iend) + EQUAL_READ32;
    +                size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
                     int const gain2 = (int)(mlRep * 3);
                     int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
    -                if ((mlRep >= EQUAL_READ32) && (gain2 > gain1))
    +                if ((mlRep >= 4) && (gain2 > gain1))
                         matchLength = mlRep, offset = 0, start = ip;
                 }
                 {   size_t offset2=99999999;
                     size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
                     int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
                     int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
    -                if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
    +                if ((ml2 >= 4) && (gain2 > gain1)) {
                         matchLength = ml2, offset = offset2, start = ip;
                         continue;   /* search a better one */
                 }   }
    @@ -1974,17 +2119,17 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
                 if ((depth==2) && (ip0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
    -                    size_t const ml2 = ZSTD_count(ip+EQUAL_READ32, ip+EQUAL_READ32-offset_1, iend) + EQUAL_READ32;
    +                    size_t const ml2 = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
                         int const gain2 = (int)(ml2 * 4);
                         int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
    -                    if ((ml2 >= EQUAL_READ32) && (gain2 > gain1))
    +                    if ((ml2 >= 4) && (gain2 > gain1))
                             matchLength = ml2, offset = 0, start = ip;
                     }
                     {   size_t offset2=99999999;
                         size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
                         int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
                         int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
    -                    if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
    +                    if ((ml2 >= 4) && (gain2 > gain1)) {
                             matchLength = ml2, offset = offset2, start = ip;
                             continue;
                 }   }   }
    @@ -1993,7 +2138,9 @@ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
     
             /* catch up */
             if (offset) {
    -            while ((start>anchor) && (start>base+offset-ZSTD_REP_MOVE) && (start[-1] == start[-1-offset+ZSTD_REP_MOVE]))   /* only search for offset within prefix */
    +            while ( (start > anchor)
    +                 && (start > base+offset-ZSTD_REP_MOVE)
    +                 && (start[-1] == start[-1-offset+ZSTD_REP_MOVE]) )  /* only search for offset within prefix */
                     { start--; matchLength++; }
                 offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
             }
    @@ -2010,7 +2157,7 @@ _storeSequence:
                  && ((offset_2>0)
                  & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
                 /* store sequence */
    -            matchLength = ZSTD_count(ip+EQUAL_READ32, ip+EQUAL_READ32-offset_2, iend) + EQUAL_READ32;
    +            matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
                 offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
                 ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH);
                 ip += matchLength;
    @@ -2099,7 +2246,7 @@ void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
                 if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
                     /* repcode detected we should take it */
                     const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
    -                matchLength = ZSTD_count_2segments(ip+1+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
    +                matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;
                     if (depth==0) goto _storeSequence;
             }   }
     
    @@ -2110,7 +2257,7 @@ void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
                     matchLength = ml2, start = ip, offset=offsetFound;
             }
     
    -         if (matchLength < EQUAL_READ32) {
    +         if (matchLength < 4) {
                 ip += ((ip-anchor) >> g_searchStrength) + 1;   /* jump faster over incompressible sections */
                 continue;
             }
    @@ -2129,10 +2276,10 @@ void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
                     if (MEM_read32(ip) == MEM_read32(repMatch)) {
                         /* repcode detected */
                         const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
    -                    size_t const repLength = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
    +                    size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
                         int const gain2 = (int)(repLength * 3);
                         int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
    -                    if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
    +                    if ((repLength >= 4) && (gain2 > gain1))
                             matchLength = repLength, offset = 0, start = ip;
                 }   }
     
    @@ -2141,7 +2288,7 @@ void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
                     size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
                     int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
                     int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
    -                if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
    +                if ((ml2 >= 4) && (gain2 > gain1)) {
                         matchLength = ml2, offset = offset2, start = ip;
                         continue;   /* search a better one */
                 }   }
    @@ -2159,10 +2306,10 @@ void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
                         if (MEM_read32(ip) == MEM_read32(repMatch)) {
                             /* repcode detected */
                             const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
    -                        size_t repLength = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
    -                        int gain2 = (int)(repLength * 4);
    -                        int gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
    -                        if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
    +                        size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
    +                        int const gain2 = (int)(repLength * 4);
    +                        int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
    +                        if ((repLength >= 4) && (gain2 > gain1))
                                 matchLength = repLength, offset = 0, start = ip;
                     }   }
     
    @@ -2171,7 +2318,7 @@ void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
                         size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
                         int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1));   /* raw approx */
                         int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
    -                    if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
    +                    if ((ml2 >= 4) && (gain2 > gain1)) {
                             matchLength = ml2, offset = offset2, start = ip;
                             continue;
                 }   }   }
    @@ -2203,7 +2350,7 @@ _storeSequence:
                 if (MEM_read32(ip) == MEM_read32(repMatch)) {
                     /* repcode detected we should take it */
                     const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
    -                matchLength = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
    +                matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
                     offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset;   /* swap offset history */
                     ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH);
                     ip += matchLength;
    @@ -2294,8 +2441,12 @@ typedef void (*ZSTD_blockCompressor) (ZSTD_CCtx* ctx, const void* src, size_t sr
     static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict)
     {
         static const ZSTD_blockCompressor blockCompressor[2][8] = {
    -        { ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt, ZSTD_compressBlock_btultra },
    -        { ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict,ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btultra_extDict }
    +        { ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy,
    +          ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, ZSTD_compressBlock_btlazy2,
    +          ZSTD_compressBlock_btopt, ZSTD_compressBlock_btultra },
    +        { ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict,
    +          ZSTD_compressBlock_lazy_extDict,ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict,
    +          ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btultra_extDict }
         };
     
         return blockCompressor[extDict][(U32)strat];
    @@ -2311,7 +2462,7 @@ static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCa
         if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) return 0;   /* don't even attempt compression below a certain srcSize */
         ZSTD_resetSeqStore(&(zc->seqStore));
         if (current > zc->nextToUpdate + 384)
    -        zc->nextToUpdate = current - MIN(192, (U32)(current - zc->nextToUpdate - 384));   /* update tree not updated after finding very long rep matches */
    +        zc->nextToUpdate = current - MIN(192, (U32)(current - zc->nextToUpdate - 384));   /* limited update after finding a very long match */
         blockCompressor(zc, src, srcSize);
         return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize);
     }
    @@ -2343,7 +2494,8 @@ static size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
             U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
             size_t cSize;
     
    -        if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE) return ERROR(dstSize_tooSmall);   /* not enough space to store compressed block */
    +        if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE)
    +            return ERROR(dstSize_tooSmall);   /* not enough space to store compressed block */
             if (remaining < blockSize) blockSize = remaining;
     
             /* preemptive overflow correction */
    @@ -2398,7 +2550,8 @@ static size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
     static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
                                         ZSTD_parameters params, U64 pledgedSrcSize, U32 dictID)
     {   BYTE* const op = (BYTE*)dst;
    -    U32   const dictIDSizeCode = (dictID>0) + (dictID>=256) + (dictID>=65536);   /* 0-3 */
    +    U32   const dictIDSizeCodeLength = (dictID>0) + (dictID>=256) + (dictID>=65536);   /* 0-3 */
    +    U32   const dictIDSizeCode = params.fParams.noDictIDFlag ? 0 : dictIDSizeCodeLength;   /* 0-3 */
         U32   const checksumFlag = params.fParams.checksumFlag>0;
         U32   const windowSize = 1U << params.cParams.windowLog;
         U32   const singleSegment = params.fParams.contentSizeFlag && (windowSize >= pledgedSrcSize);
    @@ -2410,6 +2563,8 @@ static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
         size_t pos;
     
         if (dstCapacity < ZSTD_frameHeaderSize_max) return ERROR(dstSize_tooSmall);
    +    DEBUGLOG(5, "ZSTD_writeFrameHeader : dictIDFlag : %u ; dictID : %u ; dictIDSizeCode : %u \n",
    +                !params.fParams.noDictIDFlag, dictID,  dictIDSizeCode);
     
         MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
         op[4] = frameHeaderDecriptionByte; pos=5;
    @@ -2478,6 +2633,7 @@ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
                                  ZSTD_compress_generic (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
                                  ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
             if (ZSTD_isError(cSize)) return cSize;
    +        cctx->consumedSrcSize += srcSize;
             return cSize + fhSize;
         } else
             return fhSize;
    @@ -2488,7 +2644,7 @@ size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
                                   void* dst, size_t dstCapacity,
                             const void* src, size_t srcSize)
     {
    -    return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 0);
    +    return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1 /* frame mode */, 0 /* last chunk */);
     }
     
     
    @@ -2501,10 +2657,12 @@ size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const
     {
         size_t const blockSizeMax = ZSTD_getBlockSizeMax(cctx);
         if (srcSize > blockSizeMax) return ERROR(srcSize_wrong);
    -    return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0, 0);
    +    return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0 /* frame mode */, 0 /* last chunk */);
     }
     
    -
    +/*! ZSTD_loadDictionaryContent() :
    + *  @return : 0, or an error code
    + */
     static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx* zc, const void* src, size_t srcSize)
     {
         const BYTE* const ip = (const BYTE*) src;
    @@ -2534,13 +2692,15 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx* zc, const void* src, size_t
         case ZSTD_greedy:
         case ZSTD_lazy:
         case ZSTD_lazy2:
    -        ZSTD_insertAndFindFirstIndex (zc, iend-HASH_READ_SIZE, zc->params.cParams.searchLength);
    +        if (srcSize >= HASH_READ_SIZE)
    +            ZSTD_insertAndFindFirstIndex(zc, iend-HASH_READ_SIZE, zc->params.cParams.searchLength);
             break;
     
         case ZSTD_btlazy2:
         case ZSTD_btopt:
         case ZSTD_btultra:
    -        ZSTD_updateTree(zc, iend-HASH_READ_SIZE, iend, 1 << zc->params.cParams.searchLog, zc->params.cParams.searchLength);
    +        if (srcSize >= HASH_READ_SIZE)
    +            ZSTD_updateTree(zc, iend-HASH_READ_SIZE, iend, 1 << zc->params.cParams.searchLog, zc->params.cParams.searchLength);
             break;
     
         default:
    @@ -2567,18 +2727,15 @@ static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSym
     
     
     /* Dictionary format :
    -    Magic == ZSTD_DICT_MAGIC (4 bytes)
    -    HUF_writeCTable(256)
    -    FSE_writeNCount(off)
    -    FSE_writeNCount(ml)
    -    FSE_writeNCount(ll)
    -    RepOffsets
    -    Dictionary content
    -*/
    -/*! ZSTD_loadDictEntropyStats() :
    -    @return : size read from dictionary
    -    note : magic number supposed already checked */
    -static size_t ZSTD_loadDictEntropyStats(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
    + * See :
    + * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
    + */
    +/*! ZSTD_loadZstdDictionary() :
    + * @return : 0, or an error code
    + *  assumptions : magic number supposed already checked
    + *                dictSize supposed > 8
    + */
    +static size_t ZSTD_loadZstdDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
     {
         const BYTE* dictPtr = (const BYTE*)dict;
         const BYTE* const dictEnd = dictPtr + dictSize;
    @@ -2586,7 +2743,11 @@ static size_t ZSTD_loadDictEntropyStats(ZSTD_CCtx* cctx, const void* dict, size_
         unsigned offcodeMaxValue = MaxOff;
         BYTE scratchBuffer[1<hufTable, 255, dict, dictSize);
    +    dictPtr += 4;   /* skip magic number */
    +    cctx->dictID = cctx->params.fParams.noDictIDFlag ? 0 :  MEM_readLE32(dictPtr);
    +    dictPtr += 4;
    +
    +    {   size_t const hufHeaderSize = HUF_readCTable(cctx->hufCTable, 255, dictPtr, dictEnd-dictPtr);
             if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
             dictPtr += hufHeaderSize;
         }
    @@ -2596,7 +2757,8 @@ static size_t ZSTD_loadDictEntropyStats(ZSTD_CCtx* cctx, const void* dict, size_
             if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
             if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
             /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
    -        CHECK_E (FSE_buildCTable_wksp(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, scratchBuffer, sizeof(scratchBuffer)), dictionary_corrupted);
    +        CHECK_E( FSE_buildCTable_wksp(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, scratchBuffer, sizeof(scratchBuffer)),
    +                 dictionary_corrupted);
             dictPtr += offcodeHeaderSize;
         }
     
    @@ -2606,8 +2768,9 @@ static size_t ZSTD_loadDictEntropyStats(ZSTD_CCtx* cctx, const void* dict, size_
             if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
             if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
             /* Every match length code must have non-zero probability */
    -        CHECK_F (ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
    -        CHECK_E (FSE_buildCTable_wksp(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, scratchBuffer, sizeof(scratchBuffer)), dictionary_corrupted);
    +        CHECK_F( ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
    +        CHECK_E( FSE_buildCTable_wksp(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, scratchBuffer, sizeof(scratchBuffer)),
    +                 dictionary_corrupted);
             dictPtr += matchlengthHeaderSize;
         }
     
    @@ -2617,49 +2780,51 @@ static size_t ZSTD_loadDictEntropyStats(ZSTD_CCtx* cctx, const void* dict, size_
             if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
             if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
             /* Every literal length code must have non-zero probability */
    -        CHECK_F (ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
    -        CHECK_E(FSE_buildCTable_wksp(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, scratchBuffer, sizeof(scratchBuffer)), dictionary_corrupted);
    +        CHECK_F( ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
    +        CHECK_E( FSE_buildCTable_wksp(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, scratchBuffer, sizeof(scratchBuffer)),
    +                 dictionary_corrupted);
             dictPtr += litlengthHeaderSize;
         }
     
         if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
    -    cctx->rep[0] = MEM_readLE32(dictPtr+0); if (cctx->rep[0] == 0 || cctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted);
    -    cctx->rep[1] = MEM_readLE32(dictPtr+4); if (cctx->rep[1] == 0 || cctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted);
    -    cctx->rep[2] = MEM_readLE32(dictPtr+8); if (cctx->rep[2] == 0 || cctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted);
    +    cctx->rep[0] = MEM_readLE32(dictPtr+0);
    +    cctx->rep[1] = MEM_readLE32(dictPtr+4);
    +    cctx->rep[2] = MEM_readLE32(dictPtr+8);
         dictPtr += 12;
     
    -    {   U32 offcodeMax = MaxOff;
    -        if ((size_t)(dictEnd - dictPtr) <= ((U32)-1) - 128 KB) {
    -            U32 const maxOffset = (U32)(dictEnd - dictPtr) + 128 KB; /* The maximum offset that must be supported */
    -            /* Calculate minimum offset code required to represent maxOffset */
    -            offcodeMax = ZSTD_highbit32(maxOffset);
    +    {   size_t const dictContentSize = (size_t)(dictEnd - dictPtr);
    +        U32 offcodeMax = MaxOff;
    +        if (dictContentSize <= ((U32)-1) - 128 KB) {
    +            U32 const maxOffset = (U32)dictContentSize + 128 KB; /* The maximum offset that must be supported */
    +            offcodeMax = ZSTD_highbit32(maxOffset); /* Calculate minimum offset code required to represent maxOffset */
             }
    -        /* Every possible supported offset <= dictContentSize + 128 KB must be representable */
    +        /* All offset values <= dictContentSize + 128 KB must be representable */
             CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
    -    }
    +        /* All repCodes must be <= dictContentSize and != 0*/
    +        {   U32 u;
    +            for (u=0; u<3; u++) {
    +                if (cctx->rep[u] == 0) return ERROR(dictionary_corrupted);
    +                if (cctx->rep[u] > dictContentSize) return ERROR(dictionary_corrupted);
    +        }   }
     
    -    cctx->flagStaticTables = 1;
    -    cctx->flagStaticHufTable = HUF_repeat_valid;
    -    return dictPtr - (const BYTE*)dict;
    +        cctx->fseCTables_ready = 1;
    +        cctx->hufCTable_repeatMode = HUF_repeat_valid;
    +        return ZSTD_loadDictionaryContent(cctx, dictPtr, dictContentSize);
    +    }
     }
     
     /** ZSTD_compress_insertDictionary() :
     *   @return : 0, or an error code */
    -static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx* zc, const void* dict, size_t dictSize)
    +static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
     {
         if ((dict==NULL) || (dictSize<=8)) return 0;
     
         /* dict as pure content */
    -    if ((MEM_readLE32(dict) != ZSTD_DICT_MAGIC) || (zc->forceRawDict))
    -        return ZSTD_loadDictionaryContent(zc, dict, dictSize);
    -    zc->dictID = zc->params.fParams.noDictIDFlag ? 0 :  MEM_readLE32((const char*)dict+4);
    +    if ((MEM_readLE32(dict) != ZSTD_DICT_MAGIC) || (cctx->forceRawDict))
    +        return ZSTD_loadDictionaryContent(cctx, dict, dictSize);
     
    -    /* known magic number : dict is parsed for entropy stats and content */
    -    {   size_t const loadError = ZSTD_loadDictEntropyStats(zc, (const char*)dict+8 /* skip dictHeader */, dictSize-8);
    -        size_t const eSize = loadError + 8;
    -        if (ZSTD_isError(loadError)) return loadError;
    -        return ZSTD_loadDictionaryContent(zc, (const char*)dict+eSize, dictSize-eSize);
    -    }
    +    /* dict as zstd dictionary */
    +    return ZSTD_loadZstdDictionary(cctx, dict, dictSize);
     }
     
     /*! ZSTD_compressBegin_internal() :
    @@ -2669,7 +2834,8 @@ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
                                        ZSTD_parameters params, U64 pledgedSrcSize)
     {
         ZSTD_compResetPolicy_e const crp = dictSize ? ZSTDcrp_fullReset : ZSTDcrp_continue;
    -    CHECK_F(ZSTD_resetCCtx_advanced(cctx, params, pledgedSrcSize, crp));
    +    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
    +    CHECK_F(ZSTD_resetCCtx_internal(cctx, params, pledgedSrcSize, crp));
         return ZSTD_compress_insertDictionary(cctx, dict, dictSize);
     }
     
    @@ -2708,6 +2874,7 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
         BYTE* op = ostart;
         size_t fhSize = 0;
     
    +    DEBUGLOG(5, "ZSTD_writeEpilogue \n");
         if (cctx->stage == ZSTDcs_created) return ERROR(stage_wrong);  /* init missing */
     
         /* special case : empty frame */
    @@ -2745,10 +2912,15 @@ size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
                        const void* src, size_t srcSize)
     {
         size_t endResult;
    -    size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 1);
    +    size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize,
    +                               1 /* frame mode */, 1 /* last chunk */);
         if (ZSTD_isError(cSize)) return cSize;
         endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
         if (ZSTD_isError(endResult)) return endResult;
    +    if (cctx->params.fParams.contentSizeFlag) {  /* control src size */
    +        if (cctx->frameContentSize != cctx->consumedSrcSize)
    +            return ERROR(srcSize_wrong);
    +    }
         return cSize + endResult;
     }
     
    @@ -2773,7 +2945,8 @@ size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
         return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
     }
     
    -size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize, int compressionLevel)
    +size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize,
    +                               const void* dict, size_t dictSize, int compressionLevel)
     {
         ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, dict ? dictSize : 0);
         params.fParams.contentSizeFlag = 1;
    @@ -2806,14 +2979,30 @@ struct ZSTD_CDict_s {
         ZSTD_CCtx* refContext;
     };  /* typedef'd tp ZSTD_CDict within "zstd.h" */
     
    +/*! ZSTD_estimateCDictSize() :
    + *  Estimate amount of memory that will be needed to create a dictionary with following arguments */
    +size_t ZSTD_estimateCDictSize(ZSTD_compressionParameters cParams, size_t dictSize)
    +{
    +    cParams = ZSTD_adjustCParams(cParams, 0, dictSize);
    +    return sizeof(ZSTD_CDict) + dictSize + ZSTD_estimateCCtxSize(cParams);
    +}
    +
     size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
     {
         if (cdict==NULL) return 0;   /* support sizeof on NULL */
         return ZSTD_sizeof_CCtx(cdict->refContext) + (cdict->dictBuffer ? cdict->dictContentSize : 0) + sizeof(*cdict);
     }
     
    +static ZSTD_parameters ZSTD_makeParams(ZSTD_compressionParameters cParams, ZSTD_frameParameters fParams)
    +{
    +    ZSTD_parameters params;
    +    params.cParams = cParams;
    +    params.fParams = fParams;
    +    return params;
    +}
    +
     ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, unsigned byReference,
    -                                      ZSTD_parameters params, ZSTD_customMem customMem)
    +                                      ZSTD_compressionParameters cParams, ZSTD_customMem customMem)
     {
         if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem;
         if (!customMem.customAlloc || !customMem.customFree) return NULL;
    @@ -2838,7 +3027,9 @@ ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, u
                 cdict->dictContent = internalBuffer;
             }
     
    -        {   size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0);
    +        {   ZSTD_frameParameters const fParams = { 0 /* contentSizeFlag */, 0 /* checksumFlag */, 0 /* noDictIDFlag */ };   /* dummy */
    +            ZSTD_parameters const params = ZSTD_makeParams(cParams, fParams);
    +            size_t const errorCode = ZSTD_compressBegin_advanced(cctx, cdict->dictContent, dictSize, params, 0);
                 if (ZSTD_isError(errorCode)) {
                     ZSTD_free(cdict->dictBuffer, customMem);
                     ZSTD_free(cdict, customMem);
    @@ -2855,17 +3046,15 @@ ZSTD_CDict* ZSTD_createCDict_advanced(const void* dictBuffer, size_t dictSize, u
     ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
     {
         ZSTD_customMem const allocator = { NULL, NULL, NULL };
    -    ZSTD_parameters params = ZSTD_getParams(compressionLevel, 0, dictSize);
    -    params.fParams.contentSizeFlag = 1;
    -    return ZSTD_createCDict_advanced(dict, dictSize, 0, params, allocator);
    +    ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
    +    return ZSTD_createCDict_advanced(dict, dictSize, 0, cParams, allocator);
     }
     
     ZSTD_CDict* ZSTD_createCDict_byReference(const void* dict, size_t dictSize, int compressionLevel)
     {
         ZSTD_customMem const allocator = { NULL, NULL, NULL };
    -    ZSTD_parameters params = ZSTD_getParams(compressionLevel, 0, dictSize);
    -    params.fParams.contentSizeFlag = 1;
    -    return ZSTD_createCDict_advanced(dict, dictSize, 1, params, allocator);
    +    ZSTD_compressionParameters cParams = ZSTD_getCParams(compressionLevel, 0, dictSize);
    +    return ZSTD_createCDict_advanced(dict, dictSize, 1, cParams, allocator);
     }
     
     size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
    @@ -2883,34 +3072,55 @@ static ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict* cdict) {
         return ZSTD_getParamsFromCCtx(cdict->refContext);
     }
     
    -size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize)
    +/* ZSTD_compressBegin_usingCDict_advanced() :
    + * cdict must be != NULL */
    +size_t ZSTD_compressBegin_usingCDict_advanced(
    +    ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict,
    +    ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize)
     {
    -    if (cdict->dictContentSize) CHECK_F(ZSTD_copyCCtx(cctx, cdict->refContext, pledgedSrcSize))
    +    if (cdict==NULL) return ERROR(GENERIC);  /* does not support NULL cdict */
    +    DEBUGLOG(5, "ZSTD_compressBegin_usingCDict_advanced : dictIDFlag == %u \n", !fParams.noDictIDFlag);
    +    if (cdict->dictContentSize)
    +        CHECK_F( ZSTD_copyCCtx_internal(cctx, cdict->refContext, fParams, pledgedSrcSize) )
         else {
             ZSTD_parameters params = cdict->refContext->params;
    -        params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
    -        CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, params, pledgedSrcSize));
    +        params.fParams = fParams;
    +        CHECK_F(ZSTD_compressBegin_internal(cctx, NULL, 0, params, pledgedSrcSize));
         }
         return 0;
     }
     
    +/* ZSTD_compressBegin_usingCDict() :
    + * pledgedSrcSize=0 means "unknown"
    + * if pledgedSrcSize>0, it will enable contentSizeFlag */
    +size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict)
    +{
    +    ZSTD_frameParameters const fParams = { 0 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
    +    DEBUGLOG(5, "ZSTD_compressBegin_usingCDict : dictIDFlag == %u \n", !fParams.noDictIDFlag);
    +    return ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, 0);
    +}
    +
    +size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
    +                                void* dst, size_t dstCapacity,
    +                                const void* src, size_t srcSize,
    +                                const ZSTD_CDict* cdict, ZSTD_frameParameters fParams)
    +{
    +    CHECK_F (ZSTD_compressBegin_usingCDict_advanced(cctx, cdict, fParams, srcSize));   /* will check if cdict != NULL */
    +    return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
    +}
    +
     /*! ZSTD_compress_usingCDict() :
    -*   Compression using a digested Dictionary.
    -*   Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
    -*   Note that compression level is decided during dictionary creation */
    + *  Compression using a digested Dictionary.
    + *  Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
    + *  Note that compression parameters are decided at CDict creation time
    + *  while frame parameters are hardcoded */
     size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
                                     void* dst, size_t dstCapacity,
                                     const void* src, size_t srcSize,
                                     const ZSTD_CDict* cdict)
     {
    -    CHECK_F(ZSTD_compressBegin_usingCDict(cctx, cdict, srcSize));
    -
    -    if (cdict->refContext->params.fParams.contentSizeFlag==1) {
    -        cctx->params.fParams.contentSizeFlag = 1;
    -        cctx->frameContentSize = srcSize;
    -    }
    -
    -    return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
    +    ZSTD_frameParameters const fParams = { 1 /*content*/, 0 /*checksum*/, 0 /*noDictID*/ };
    +    return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, fParams);
     }
     
     
    @@ -2919,31 +3129,6 @@ size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
     *  Streaming
     ********************************************************************/
     
    -typedef enum { zcss_init, zcss_load, zcss_flush, zcss_final } ZSTD_cStreamStage;
    -
    -struct ZSTD_CStream_s {
    -    ZSTD_CCtx* cctx;
    -    ZSTD_CDict* cdictLocal;
    -    const ZSTD_CDict* cdict;
    -    char*  inBuff;
    -    size_t inBuffSize;
    -    size_t inToCompress;
    -    size_t inBuffPos;
    -    size_t inBuffTarget;
    -    size_t blockSize;
    -    char*  outBuff;
    -    size_t outBuffSize;
    -    size_t outBuffContentSize;
    -    size_t outBuffFlushedSize;
    -    ZSTD_cStreamStage stage;
    -    U32    checksum;
    -    U32    frameEnded;
    -    U64    pledgedSrcSize;
    -    U64    inputProcessed;
    -    ZSTD_parameters params;
    -    ZSTD_customMem customMem;
    -};   /* typedef'd to ZSTD_CStream within "zstd.h" */
    -
     ZSTD_CStream* ZSTD_createCStream(void)
     {
         return ZSTD_createCStream_advanced(defaultCustomMem);
    @@ -2951,131 +3136,166 @@ ZSTD_CStream* ZSTD_createCStream(void)
     
     ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
     {
    -    ZSTD_CStream* zcs;
    -
    -    if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem;
    -    if (!customMem.customAlloc || !customMem.customFree) return NULL;
    -
    -    zcs = (ZSTD_CStream*)ZSTD_malloc(sizeof(ZSTD_CStream), customMem);
    -    if (zcs==NULL) return NULL;
    -    memset(zcs, 0, sizeof(ZSTD_CStream));
    -    memcpy(&zcs->customMem, &customMem, sizeof(ZSTD_customMem));
    -    zcs->cctx = ZSTD_createCCtx_advanced(customMem);
    -    if (zcs->cctx == NULL) { ZSTD_freeCStream(zcs); return NULL; }
    -    return zcs;
    +    /* CStream and CCtx are now same object */
    +    return ZSTD_createCCtx_advanced(customMem);
     }
     
     size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
     {
    -    if (zcs==NULL) return 0;   /* support free on NULL */
    -    {   ZSTD_customMem const cMem = zcs->customMem;
    -        ZSTD_freeCCtx(zcs->cctx);
    -        ZSTD_freeCDict(zcs->cdictLocal);
    -        ZSTD_free(zcs->inBuff, cMem);
    -        ZSTD_free(zcs->outBuff, cMem);
    -        ZSTD_free(zcs, cMem);
    -        return 0;
    -    }
    +    return ZSTD_freeCCtx(zcs);   /* same object */
    +}
    +
    +size_t ZSTD_estimateCStreamSize(ZSTD_compressionParameters cParams)
    +{
    +    size_t const CCtxSize = ZSTD_estimateCCtxSize(cParams);
    +    size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << cParams.windowLog);
    +    size_t const inBuffSize = ((size_t)1 << cParams.windowLog) + blockSize;
    +    size_t const outBuffSize = ZSTD_compressBound(blockSize) + 1;
    +    size_t const streamingSize = inBuffSize + outBuffSize;
    +
    +    return CCtxSize + streamingSize;
     }
     
     
     /*======   Initialization   ======*/
     
     size_t ZSTD_CStreamInSize(void)  { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; }
    -size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_ABSOLUTEMAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ; }
     
    -static size_t ZSTD_resetCStream_internal(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
    +size_t ZSTD_CStreamOutSize(void)
    +{
    +    return ZSTD_compressBound(ZSTD_BLOCKSIZE_ABSOLUTEMAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ;
    +}
    +
    +static size_t ZSTD_resetCStream_internal(ZSTD_CStream* zcs, ZSTD_parameters params, unsigned long long pledgedSrcSize)
     {
         if (zcs->inBuffSize==0) return ERROR(stage_wrong);   /* zcs has not been init at least once => can't reset */
     
    -    if (zcs->cdict) CHECK_F(ZSTD_compressBegin_usingCDict(zcs->cctx, zcs->cdict, pledgedSrcSize))
    -    else CHECK_F(ZSTD_compressBegin_advanced(zcs->cctx, NULL, 0, zcs->params, pledgedSrcSize));
    +    DEBUGLOG(5, "ZSTD_resetCStream_internal : dictIDFlag == %u \n", !zcs->params.fParams.noDictIDFlag);
    +
    +    if (zcs->cdict) CHECK_F(ZSTD_compressBegin_usingCDict_advanced(zcs, zcs->cdict, params.fParams, pledgedSrcSize))
    +    else CHECK_F(ZSTD_compressBegin_internal(zcs, NULL, 0, params, pledgedSrcSize));
     
         zcs->inToCompress = 0;
         zcs->inBuffPos = 0;
         zcs->inBuffTarget = zcs->blockSize;
         zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
    -    zcs->stage = zcss_load;
    +    zcs->streamStage = zcss_load;
         zcs->frameEnded = 0;
         zcs->pledgedSrcSize = pledgedSrcSize;
    -    zcs->inputProcessed = 0;
         return 0;   /* ready to go */
     }
     
     size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
     {
     
    -    zcs->params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
    +    ZSTD_parameters params = zcs->params;
    +    params.fParams.contentSizeFlag = (pledgedSrcSize > 0);
    +    DEBUGLOG(5, "ZSTD_resetCStream : dictIDFlag == %u \n", !zcs->params.fParams.noDictIDFlag);
    +    return ZSTD_resetCStream_internal(zcs, params, pledgedSrcSize);
    +}
     
    -    return ZSTD_resetCStream_internal(zcs, pledgedSrcSize);
    +/* ZSTD_initCStream_internal() :
    + * params are supposed validated at this stage
    + * and zcs->cdict is supposed to be correct */
    +static size_t ZSTD_initCStream_stage2(ZSTD_CStream* zcs,
    +                                const ZSTD_parameters params,
    +                                unsigned long long pledgedSrcSize)
    +{
    +    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
    +    zcs->blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog);
    +
    +    /* allocate buffers */
    +    {   size_t const neededInBuffSize = ((size_t)1 << params.cParams.windowLog) + zcs->blockSize;
    +        if (zcs->inBuffSize < neededInBuffSize) {
    +            zcs->inBuffSize = 0;
    +            ZSTD_free(zcs->inBuff, zcs->customMem);
    +            zcs->inBuff = (char*)ZSTD_malloc(neededInBuffSize, zcs->customMem);
    +            if (zcs->inBuff == NULL) return ERROR(memory_allocation);
    +            zcs->inBuffSize = neededInBuffSize;
    +        }
    +    }
    +    if (zcs->outBuffSize < ZSTD_compressBound(zcs->blockSize)+1) {
    +        size_t const outBuffSize = ZSTD_compressBound(zcs->blockSize)+1;
    +        zcs->outBuffSize = 0;
    +        ZSTD_free(zcs->outBuff, zcs->customMem);
    +        zcs->outBuff = (char*)ZSTD_malloc(outBuffSize, zcs->customMem);
    +        if (zcs->outBuff == NULL) return ERROR(memory_allocation);
    +        zcs->outBuffSize = outBuffSize;
    +    }
    +
    +    DEBUGLOG(5, "ZSTD_initCStream_stage2 : dictIDFlag == %u \n", !params.fParams.noDictIDFlag);
    +    return ZSTD_resetCStream_internal(zcs, params, pledgedSrcSize);
    +}
    +
    +/* ZSTD_initCStream_usingCDict_advanced() :
    + * same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
    +size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize, ZSTD_frameParameters fParams)
    +{
    +    if (!cdict) return ERROR(GENERIC);   /* cannot handle NULL cdict (does not know what to do) */
    +    {   ZSTD_parameters params = ZSTD_getParamsFromCDict(cdict);
    +        params.fParams = fParams;
    +        zcs->cdict = cdict;
    +        return ZSTD_initCStream_stage2(zcs, params, pledgedSrcSize);
    +    }
    +}
    +
    +/* note : cdict must outlive compression session */
    +size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
    +{
    +    ZSTD_frameParameters const fParams = { 0 /* content */, 0 /* checksum */, 0 /* noDictID */ };
    +    return ZSTD_initCStream_usingCDict_advanced(zcs, cdict, 0, fParams);
    +}
    +
    +static size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
    +                                const void* dict, size_t dictSize,
    +                                ZSTD_parameters params, unsigned long long pledgedSrcSize)
    +{
    +    assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
    +    zcs->cdict = NULL;
    +
    +    if (dict && dictSize >= 8) {
    +        ZSTD_freeCDict(zcs->cdictLocal);
    +        zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, 0 /* copy */, params.cParams, zcs->customMem);
    +        if (zcs->cdictLocal == NULL) return ERROR(memory_allocation);
    +        zcs->cdict = zcs->cdictLocal;
    +    }
    +
    +    DEBUGLOG(5, "ZSTD_initCStream_internal : dictIDFlag == %u \n", !params.fParams.noDictIDFlag);
    +    return ZSTD_initCStream_stage2(zcs, params, pledgedSrcSize);
     }
     
     size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
                                      const void* dict, size_t dictSize,
                                      ZSTD_parameters params, unsigned long long pledgedSrcSize)
     {
    -    /* allocate buffers */
    -    {   size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog;
    -        if (zcs->inBuffSize < neededInBuffSize) {
    -            zcs->inBuffSize = neededInBuffSize;
    -            ZSTD_free(zcs->inBuff, zcs->customMem);
    -            zcs->inBuff = (char*) ZSTD_malloc(neededInBuffSize, zcs->customMem);
    -            if (zcs->inBuff == NULL) return ERROR(memory_allocation);
    -        }
    -        zcs->blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, neededInBuffSize);
    -    }
    -    if (zcs->outBuffSize < ZSTD_compressBound(zcs->blockSize)+1) {
    -        zcs->outBuffSize = ZSTD_compressBound(zcs->blockSize)+1;
    -        ZSTD_free(zcs->outBuff, zcs->customMem);
    -        zcs->outBuff = (char*) ZSTD_malloc(zcs->outBuffSize, zcs->customMem);
    -        if (zcs->outBuff == NULL) return ERROR(memory_allocation);
    -    }
    -
    -    if (dict && dictSize >= 8) {
    -        ZSTD_freeCDict(zcs->cdictLocal);
    -        zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, 0, params, zcs->customMem);
    -        if (zcs->cdictLocal == NULL) return ERROR(memory_allocation);
    -        zcs->cdict = zcs->cdictLocal;
    -    } else zcs->cdict = NULL;
    -
    -    zcs->checksum = params.fParams.checksumFlag > 0;
    -    zcs->params = params;
    -
    -    return ZSTD_resetCStream_internal(zcs, pledgedSrcSize);
    -}
    -
    -/* note : cdict must outlive compression session */
    -size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
    -{
    -    ZSTD_parameters const params = ZSTD_getParamsFromCDict(cdict);
    -    size_t const initError =  ZSTD_initCStream_advanced(zcs, NULL, 0, params, 0);
    -    zcs->cdict = cdict;
    -    zcs->cctx->dictID = params.fParams.noDictIDFlag ? 0 : cdict->refContext->dictID;
    -    return initError;
    +    CHECK_F( ZSTD_checkCParams(params.cParams) );
    +    DEBUGLOG(5, "ZSTD_initCStream_advanced : pledgedSrcSize == %u \n", (U32)pledgedSrcSize);
    +    DEBUGLOG(5, "wlog %u \n", params.cParams.windowLog);
    +    return ZSTD_initCStream_internal(zcs, dict, dictSize, params, pledgedSrcSize);
     }
     
     size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
     {
         ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
    -    return ZSTD_initCStream_advanced(zcs, dict, dictSize, params, 0);
    +    return ZSTD_initCStream_internal(zcs, dict, dictSize, params, 0);
     }
     
     size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize)
     {
         ZSTD_parameters params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0);
    -    if (pledgedSrcSize) params.fParams.contentSizeFlag = 1;
    -    return ZSTD_initCStream_advanced(zcs, NULL, 0, params, pledgedSrcSize);
    +    params.fParams.contentSizeFlag = (pledgedSrcSize>0);
    +    return ZSTD_initCStream_internal(zcs, NULL, 0, params, pledgedSrcSize);
     }
     
     size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
     {
    -    return ZSTD_initCStream_usingDict(zcs, NULL, 0, compressionLevel);
    +    ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, 0);
    +    return ZSTD_initCStream_internal(zcs, NULL, 0, params, 0);
     }
     
     size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
     {
    -    if (zcs==NULL) return 0;   /* support sizeof on NULL */
    -    return sizeof(*zcs) + ZSTD_sizeof_CCtx(zcs->cctx) + ZSTD_sizeof_CDict(zcs->cdictLocal) + zcs->outBuffSize + zcs->inBuffSize;
    +    return ZSTD_sizeof_CCtx(zcs);   /* same object */
     }
     
     /*======   Compression   ======*/
    @@ -3102,8 +3322,9 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
         char* const oend = ostart + *dstCapacityPtr;
         char* op = ostart;
     
    +    DEBUGLOG(5, "ZSTD_compressStream_generic \n");
         while (someMoreWork) {
    -        switch(zcs->stage)
    +        switch(zcs->streamStage)
             {
             case zcss_init: return ERROR(init_missing);   /* call ZBUFF_compressInit() first ! */
     
    @@ -3111,12 +3332,14 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
                 /* complete inBuffer */
                 {   size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
                     size_t const loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend-ip);
    +                DEBUGLOG(5, "loading %u/%u \n", (U32)loaded, (U32)toLoad);
                     zcs->inBuffPos += loaded;
                     ip += loaded;
                     if ( (zcs->inBuffPos==zcs->inToCompress) || (!flush && (toLoad != loaded)) ) {
                         someMoreWork = 0; break;  /* not enough input to get a full block : stop there, wait for more */
                 }   }
                 /* compress current block (note : this stage cannot be stopped in the middle) */
    +            DEBUGLOG(5, "stream compression stage (flush==%u)\n", flush);
                 {   void* cDst;
                     size_t cSize;
                     size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
    @@ -3126,29 +3349,33 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
                     else
                         cDst = zcs->outBuff, oSize = zcs->outBuffSize;
                     cSize = (flush == zsf_end) ?
    -                        ZSTD_compressEnd(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize) :
    -                        ZSTD_compressContinue(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize);
    +                        ZSTD_compressEnd(zcs, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize) :
    +                        ZSTD_compressContinue(zcs, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize);
                     if (ZSTD_isError(cSize)) return cSize;
    +                DEBUGLOG(5, "cSize = %u \n", (U32)cSize);
                     if (flush == zsf_end) zcs->frameEnded = 1;
                     /* prepare next block */
                     zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
                     if (zcs->inBuffTarget > zcs->inBuffSize)
    -                    zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;   /* note : inBuffSize >= blockSize */
    +                    zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize;   /* note : inBuffTarget == blockSize <= inBuffSize */
    +                assert(zcs->inBuffTarget <= zcs->inBuffSize);
                     zcs->inToCompress = zcs->inBuffPos;
                     if (cDst == op) { op += cSize; break; }   /* no need to flush */
                     zcs->outBuffContentSize = cSize;
                     zcs->outBuffFlushedSize = 0;
    -                zcs->stage = zcss_flush;   /* pass-through to flush stage */
    +                zcs->streamStage = zcss_flush;   /* pass-through to flush stage */
                 }
     
             case zcss_flush:
    +            DEBUGLOG(5, "flush stage \n");
                 {   size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
                     size_t const flushed = ZSTD_limitCopy(op, oend-op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
    +                DEBUGLOG(5, "toFlush: %u  ; flushed: %u \n", (U32)toFlush, (U32)flushed);
                     op += flushed;
                     zcs->outBuffFlushedSize += flushed;
                     if (toFlush!=flushed) { someMoreWork = 0; break; }  /* dst too small to store flushed data : stop there */
                     zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
    -                zcs->stage = zcss_load;
    +                zcs->streamStage = zcss_load;
                     break;
                 }
     
    @@ -3163,7 +3390,6 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
     
         *srcSizePtr = ip - istart;
         *dstCapacityPtr = op - ostart;
    -    zcs->inputProcessed += *srcSizePtr;
         if (zcs->frameEnded) return 0;
         {   size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
             if (hintInSize==0) hintInSize = zcs->blockSize;
    @@ -3208,24 +3434,26 @@ size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
         BYTE* const oend = (BYTE*)(output->dst) + output->size;
         BYTE* op = ostart;
     
    -    if ((zcs->pledgedSrcSize) && (zcs->inputProcessed != zcs->pledgedSrcSize))
    -        return ERROR(srcSize_wrong);   /* pledgedSrcSize not respected */
    -
    -    if (zcs->stage != zcss_final) {
    +    DEBUGLOG(5, "ZSTD_endStream (dstCapacity : %u) \n", (U32)(oend-op));
    +    if (zcs->streamStage != zcss_final) {
             /* flush whatever remains */
             size_t srcSize = 0;
             size_t sizeWritten = output->size - output->pos;
    -        size_t const notEnded = ZSTD_compressStream_generic(zcs, ostart, &sizeWritten, &srcSize, &srcSize, zsf_end);  /* use a valid src address instead of NULL */
    +        size_t const notEnded = ZSTD_compressStream_generic(zcs, ostart, &sizeWritten,
    +                                     &srcSize /* use a valid src address instead of NULL */, &srcSize, zsf_end);
             size_t const remainingToFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
             op += sizeWritten;
             if (remainingToFlush) {
                 output->pos += sizeWritten;
    -            return remainingToFlush + ZSTD_BLOCKHEADERSIZE /* final empty block */ + (zcs->checksum * 4);
    +            return remainingToFlush + ZSTD_BLOCKHEADERSIZE /* final empty block */
    +                   + ((zcs->params.fParams.checksumFlag > 0) * 4) /* optional 32-bits checksum */;
             }
             /* create epilogue */
    -        zcs->stage = zcss_final;
    +        zcs->streamStage = zcss_final;
             zcs->outBuffContentSize = !notEnded ? 0 :
    -            ZSTD_compressEnd(zcs->cctx, zcs->outBuff, zcs->outBuffSize, NULL, 0);  /* write epilogue, including final empty block, into outBuff */
    +            /* write epilogue, including final empty block, into outBuff */
    +            ZSTD_compressEnd(zcs, zcs->outBuff, zcs->outBuffSize, NULL, 0);
    +        if (ZSTD_isError(zcs->outBuffContentSize)) return zcs->outBuffContentSize;
         }
     
         /* flush epilogue */
    @@ -3234,7 +3462,7 @@ size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
             op += flushed;
             zcs->outBuffFlushedSize += flushed;
             output->pos += op-ostart;
    -        if (toFlush==flushed) zcs->stage = zcss_init;  /* end reached */
    +        if (toFlush==flushed) zcs->streamStage = zcss_init;  /* end reached */
             return toFlush - flushed;
         }
     }
    @@ -3268,7 +3496,7 @@ static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEV
         { 22, 21, 21,  5,  5, 16, ZSTD_btlazy2 },  /* level 15 */
         { 23, 22, 22,  5,  5, 16, ZSTD_btlazy2 },  /* level 16 */
         { 23, 21, 22,  4,  5, 24, ZSTD_btopt   },  /* level 17 */
    -    { 23, 23, 22,  6,  5, 32, ZSTD_btopt   },  /* level 18 */
    +    { 23, 22, 22,  5,  4, 32, ZSTD_btopt   },  /* level 18 */
         { 23, 23, 22,  6,  3, 48, ZSTD_btopt   },  /* level 19 */
         { 25, 25, 23,  7,  3, 64, ZSTD_btultra  },  /* level 20 */
         { 26, 26, 23,  7,  3,256, ZSTD_btultra  },  /* level 21 */
    diff --git a/lib/compress/zstd_opt.h b/lib/compress/zstd_opt.h
    index 2c4014325..543761191 100644
    --- a/lib/compress/zstd_opt.h
    +++ b/lib/compress/zstd_opt.h
    @@ -360,6 +360,7 @@ static U32 ZSTD_BtGetAllMatches_selectMLS (
         default :
         case 4 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
         case 5 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
    +    case 7 :
         case 6 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
         }
     }
    @@ -387,6 +388,7 @@ static U32 ZSTD_BtGetAllMatches_selectMLS_extDict (
         default :
         case 4 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
         case 5 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
    +    case 7 :
         case 6 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
         }
     }
    diff --git a/lib/compress/zstdmt_compress.c b/lib/compress/zstdmt_compress.c
    index 45514a81a..fc7f52a29 100644
    --- a/lib/compress/zstdmt_compress.c
    +++ b/lib/compress/zstdmt_compress.c
    @@ -33,7 +33,7 @@
     #  include 
     #  include 
     #  include 
    -   static unsigned g_debugLevel = 3;
    +   static unsigned g_debugLevel = 5;
     #  define DEBUGLOGRAW(l, ...) if (l<=g_debugLevel) { fprintf(stderr, __VA_ARGS__); }
     #  define DEBUGLOG(l, ...) if (l<=g_debugLevel) { fprintf(stderr, __FILE__ ": "); fprintf(stderr, __VA_ARGS__); fprintf(stderr, " \n"); }
     
    @@ -44,26 +44,26 @@
         DEBUGLOGRAW(l, " \n");       \
     }
     
    -static unsigned long long GetCurrentClockTimeMicroseconds()
    +static unsigned long long GetCurrentClockTimeMicroseconds(void)
     {
        static clock_t _ticksPerSecond = 0;
        if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK);
     
    -   struct tms junk; clock_t newTicks = (clock_t) times(&junk);
    -   return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond);
    +   { struct tms junk; clock_t newTicks = (clock_t) times(&junk);
    +     return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond); }
     }
     
     #define MUTEX_WAIT_TIME_DLEVEL 5
     #define PTHREAD_MUTEX_LOCK(mutex) \
     if (g_debugLevel>=MUTEX_WAIT_TIME_DLEVEL) { \
    -   unsigned long long beforeTime = GetCurrentClockTimeMicroseconds(); \
    -   pthread_mutex_lock(mutex); \
    -   unsigned long long afterTime = GetCurrentClockTimeMicroseconds(); \
    -   unsigned long long elapsedTime = (afterTime-beforeTime); \
    -   if (elapsedTime > 1000) {  /* or whatever threshold you like; I'm using 1 millisecond here */ \
    -      DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \
    +    unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \
    +    pthread_mutex_lock(mutex); \
    +    {   unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \
    +        unsigned long long const elapsedTime = (afterTime-beforeTime); \
    +        if (elapsedTime > 1000) {  /* or whatever threshold you like; I'm using 1 millisecond here */ \
    +            DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \
                    elapsedTime, #mutex); \
    -  } \
    +    }   } \
     } else pthread_mutex_lock(mutex);
     
     #else
    @@ -228,17 +228,19 @@ void ZSTDMT_compressChunk(void* jobDescription)
         ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
         const void* const src = (const char*)job->srcStart + job->dictSize;
         buffer_t const dstBuff = job->dstBuff;
    -    DEBUGLOG(3, "job (first:%u) (last:%u) : dictSize %u, srcSize %u", job->firstChunk, job->lastChunk, (U32)job->dictSize, (U32)job->srcSize);
    +    DEBUGLOG(3, "job (first:%u) (last:%u) : dictSize %u, srcSize %u",
    +                 job->firstChunk, job->lastChunk, (U32)job->dictSize, (U32)job->srcSize);
         if (job->cdict) {  /* should only happen for first segment */
    -        size_t const initError = ZSTD_compressBegin_usingCDict(job->cctx, job->cdict, job->fullFrameSize);
    +        size_t const initError = ZSTD_compressBegin_usingCDict_advanced(job->cctx, job->cdict, job->params.fParams, job->fullFrameSize);
             if (job->cdict) DEBUGLOG(3, "using CDict ");
             if (ZSTD_isError(initError)) { job->cSize = initError; goto _endJob; }
         } else {  /* srcStart points at reloaded section */
    -        size_t const dictModeError = ZSTD_setCCtxParameter(job->cctx, ZSTD_p_forceRawDict, 1);  /* Force loading dictionary in "content-only" mode (no header analysis) */
    -        size_t const initError = ZSTD_compressBegin_advanced(job->cctx, job->srcStart, job->dictSize, job->params, 0);
    -        if (ZSTD_isError(initError) || ZSTD_isError(dictModeError)) { job->cSize = initError; goto _endJob; }
    -        ZSTD_setCCtxParameter(job->cctx, ZSTD_p_forceWindow, 1);
    -    }
    +        if (!job->firstChunk) job->params.fParams.contentSizeFlag = 0;  /* ensure no srcSize control */
    +        {   size_t const dictModeError = ZSTD_setCCtxParameter(job->cctx, ZSTD_p_forceRawDict, 1);  /* Force loading dictionary in "content-only" mode (no header analysis) */
    +            size_t const initError = ZSTD_compressBegin_advanced(job->cctx, job->srcStart, job->dictSize, job->params, job->fullFrameSize);
    +            if (ZSTD_isError(initError) || ZSTD_isError(dictModeError)) { job->cSize = initError; goto _endJob; }
    +            ZSTD_setCCtxParameter(job->cctx, ZSTD_p_forceWindow, 1);
    +    }   }
         if (!job->firstChunk) {  /* flush and overwrite frame header when it's not first segment */
             size_t const hSize = ZSTD_compressContinue(job->cctx, dstBuff.start, dstBuff.size, src, 0);
             if (ZSTD_isError(hSize)) { job->cSize = hSize; goto _endJob; }
    @@ -250,7 +252,9 @@ void ZSTDMT_compressChunk(void* jobDescription)
         job->cSize = (job->lastChunk) ?
                      ZSTD_compressEnd     (job->cctx, dstBuff.start, dstBuff.size, src, job->srcSize) :
                      ZSTD_compressContinue(job->cctx, dstBuff.start, dstBuff.size, src, job->srcSize);
    -    DEBUGLOG(3, "compressed %u bytes into %u bytes   (first:%u) (last:%u)", (unsigned)job->srcSize, (unsigned)job->cSize, job->firstChunk, job->lastChunk);
    +    DEBUGLOG(3, "compressed %u bytes into %u bytes   (first:%u) (last:%u)",
    +                (unsigned)job->srcSize, (unsigned)job->cSize, job->firstChunk, job->lastChunk);
    +    DEBUGLOG(5, "dstBuff.size : %u ; => %s", (U32)dstBuff.size, ZSTD_getErrorName(job->cSize));
     
     _endJob:
         PTHREAD_MUTEX_LOCK(job->jobCompleted_mutex);
    @@ -388,14 +392,17 @@ size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
                                int compressionLevel)
     {
         ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0);
    +    U32 const overlapLog = (compressionLevel >= ZSTD_maxCLevel()) ? 0 : 3;
    +    size_t const overlapSize = (size_t)1 << (params.cParams.windowLog - overlapLog);
         size_t const chunkTargetSize = (size_t)1 << (params.cParams.windowLog + 2);
    -    unsigned const nbChunksMax = (unsigned)(srcSize / chunkTargetSize) + (srcSize < chunkTargetSize) /* min 1 */;
    +    unsigned const nbChunksMax = (unsigned)(srcSize / chunkTargetSize) + 1;
         unsigned nbChunks = MIN(nbChunksMax, mtctx->nbThreads);
         size_t const proposedChunkSize = (srcSize + (nbChunks-1)) / nbChunks;
         size_t const avgChunkSize = ((proposedChunkSize & 0x1FFFF) < 0xFFFF) ? proposedChunkSize + 0xFFFF : proposedChunkSize;   /* avoid too small last block */
         size_t remainingSrcSize = srcSize;
         const char* const srcStart = (const char*)src;
    -    size_t frameStartPos = 0;
    +    unsigned const compressWithinDst = (dstCapacity >= ZSTD_compressBound(srcSize)) ? nbChunks : (unsigned)(dstCapacity / ZSTD_compressBound(avgChunkSize));  /* presumes avgChunkSize >= 256 KB, which should be the case */
    +    size_t frameStartPos = 0, dstBufferPos = 0;
     
         DEBUGLOG(3, "windowLog : %2u => chunkTargetSize : %u bytes  ", params.cParams.windowLog, (U32)chunkTargetSize);
         DEBUGLOG(2, "nbChunks  : %2u   (chunkSize : %u bytes)   ", nbChunks, (U32)avgChunkSize);
    @@ -409,10 +416,11 @@ size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
         {   unsigned u;
             for (u=0; ubuffPool, dstBufferCapacity) : dstAsBuffer;
    +            size_t const dstBufferCapacity = ZSTD_compressBound(chunkSize);
    +            buffer_t const dstAsBuffer = { (char*)dst + dstBufferPos, dstBufferCapacity };
    +            buffer_t const dstBuffer = u < compressWithinDst ? dstAsBuffer : ZSTDMT_getBuffer(mtctx->buffPool, dstBufferCapacity);
                 ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(mtctx->cctxPool);
    +            size_t dictSize = u ? overlapSize : 0;
     
                 if ((cctx==NULL) || (dstBuffer.start==NULL)) {
                     mtctx->jobs[u].cSize = ERROR(memory_allocation);   /* job result */
    @@ -421,7 +429,8 @@ size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
                     break;   /* let's wait for previous jobs to complete, but don't start new ones */
                 }
     
    -            mtctx->jobs[u].srcStart = srcStart + frameStartPos;
    +            mtctx->jobs[u].srcStart = srcStart + frameStartPos - dictSize;
    +            mtctx->jobs[u].dictSize = dictSize;
                 mtctx->jobs[u].srcSize = chunkSize;
                 mtctx->jobs[u].fullFrameSize = srcSize;
                 mtctx->jobs[u].params = params;
    @@ -438,6 +447,7 @@ size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
                 POOL_add(mtctx->factory, ZSTDMT_compressChunk, &mtctx->jobs[u]);
     
                 frameStartPos += chunkSize;
    +            dstBufferPos += dstBufferCapacity;
                 remainingSrcSize -= chunkSize;
         }   }
         /* note : since nbChunks <= nbThreads, all jobs should be running immediately in parallel */
    @@ -461,8 +471,10 @@ size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
                     if (ZSTD_isError(cSize)) error = cSize;
                     if ((!error) && (dstPos + cSize > dstCapacity)) error = ERROR(dstSize_tooSmall);
                     if (chunkID) {   /* note : chunk 0 is already written directly into dst */
    -                    if (!error) memcpy((char*)dst + dstPos, mtctx->jobs[chunkID].dstBuff.start, cSize);
    -                    ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[chunkID].dstBuff);
    +                    if (!error)
    +                        memmove((char*)dst + dstPos, mtctx->jobs[chunkID].dstBuff.start, cSize);  /* may overlap if chunk decompressed within dst */
    +                    if (chunkID >= compressWithinDst)   /* otherwise, it decompresses within dst */
    +                        ZSTDMT_releaseBuffer(mtctx->buffPool, mtctx->jobs[chunkID].dstBuff);
                         mtctx->jobs[chunkID].dstBuff = g_nullBuffer;
                     }
                     dstPos += cSize ;
    @@ -509,7 +521,7 @@ static size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* zcs,
         if (updateDict) {
             ZSTD_freeCDict(zcs->cdict); zcs->cdict = NULL;
             if (dict && dictSize) {
    -            zcs->cdict = ZSTD_createCDict_advanced(dict, dictSize, 0, params, cmem);
    +            zcs->cdict = ZSTD_createCDict_advanced(dict, dictSize, 0, params.cParams, cmem);
                 if (zcs->cdict == NULL) return ERROR(memory_allocation);
         }   }
         zcs->frameContentSize = pledgedSrcSize;
    diff --git a/lib/decompress/zstd_decompress.c b/lib/decompress/zstd_decompress.c
    index 482c334ff..379842b57 100644
    --- a/lib/decompress/zstd_decompress.c
    +++ b/lib/decompress/zstd_decompress.c
    @@ -105,7 +105,7 @@ struct ZSTD_DCtx_s
         const void* vBase;            /* virtual start of previous segment if it was just before current one */
         const void* dictEnd;          /* end of previous segment */
         size_t expected;
    -    ZSTD_frameParams fParams;
    +    ZSTD_frameHeader fParams;
         blockType_e bType;   /* used in ZSTD_decompressContinue(), to transfer blockType between header decoding and block decoding stages */
         ZSTD_dStage stage;
         U32 litEntropy;
    @@ -177,30 +177,6 @@ void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
         memcpy(dstDCtx, srcDCtx, sizeof(ZSTD_DCtx) - workSpaceSize);  /* no need to copy workspace */
     }
     
    -#if 0
    -/* deprecated */
    -static void ZSTD_refDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
    -{
    -    ZSTD_decompressBegin(dstDCtx);  /* init */
    -    if (srcDCtx) {   /* support refDCtx on NULL */
    -        dstDCtx->dictEnd = srcDCtx->dictEnd;
    -        dstDCtx->vBase = srcDCtx->vBase;
    -        dstDCtx->base = srcDCtx->base;
    -        dstDCtx->previousDstEnd = srcDCtx->previousDstEnd;
    -        dstDCtx->dictID = srcDCtx->dictID;
    -        dstDCtx->litEntropy = srcDCtx->litEntropy;
    -        dstDCtx->fseEntropy = srcDCtx->fseEntropy;
    -        dstDCtx->LLTptr = srcDCtx->entropy.LLTable;
    -        dstDCtx->MLTptr = srcDCtx->entropy.MLTable;
    -        dstDCtx->OFTptr = srcDCtx->entropy.OFTable;
    -        dstDCtx->HUFptr = srcDCtx->entropy.hufTable;
    -        dstDCtx->entropy.rep[0] = srcDCtx->entropy.rep[0];
    -        dstDCtx->entropy.rep[1] = srcDCtx->entropy.rep[1];
    -        dstDCtx->entropy.rep[2] = srcDCtx->entropy.rep[2];
    -    }
    -}
    -#endif
    -
     static void ZSTD_refDDict(ZSTD_DCtx* dstDCtx, const ZSTD_DDict* ddict);
     
     
    @@ -243,22 +219,23 @@ static size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
     }
     
     
    -/** ZSTD_getFrameParams() :
    +/** ZSTD_getFrameHeader() :
     *   decode Frame Header, or require larger `srcSize`.
    -*   @return : 0, `fparamsPtr` is correctly filled,
    +*   @return : 0, `zfhPtr` is correctly filled,
     *            >0, `srcSize` is too small, result is expected `srcSize`,
     *             or an error code, which can be tested using ZSTD_isError() */
    -size_t ZSTD_getFrameParams(ZSTD_frameParams* fparamsPtr, const void* src, size_t srcSize)
    +size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
     {
         const BYTE* ip = (const BYTE*)src;
    -
         if (srcSize < ZSTD_frameHeaderSize_prefix) return ZSTD_frameHeaderSize_prefix;
    +
         if (MEM_readLE32(src) != ZSTD_MAGICNUMBER) {
             if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
    +            /* skippable frame */
                 if (srcSize < ZSTD_skippableHeaderSize) return ZSTD_skippableHeaderSize; /* magic number + skippable frame length */
    -            memset(fparamsPtr, 0, sizeof(*fparamsPtr));
    -            fparamsPtr->frameContentSize = MEM_readLE32((const char *)src + 4);
    -            fparamsPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */
    +            memset(zfhPtr, 0, sizeof(*zfhPtr));
    +            zfhPtr->frameContentSize = MEM_readLE32((const char *)src + 4);
    +            zfhPtr->windowSize = 0; /* windowSize==0 means a frame is skippable */
                 return 0;
             }
             return ERROR(prefix_unknown);
    @@ -305,10 +282,10 @@ size_t ZSTD_getFrameParams(ZSTD_frameParams* fparamsPtr, const void* src, size_t
             }
             if (!windowSize) windowSize = (U32)frameContentSize;
             if (windowSize > windowSizeMax) return ERROR(frameParameter_windowTooLarge);
    -        fparamsPtr->frameContentSize = frameContentSize;
    -        fparamsPtr->windowSize = windowSize;
    -        fparamsPtr->dictID = dictID;
    -        fparamsPtr->checksumFlag = checksumFlag;
    +        zfhPtr->frameContentSize = frameContentSize;
    +        zfhPtr->windowSize = windowSize;
    +        zfhPtr->dictID = dictID;
    +        zfhPtr->checksumFlag = checksumFlag;
         }
         return 0;
     }
    @@ -320,15 +297,14 @@ size_t ZSTD_getFrameParams(ZSTD_frameParams* fparamsPtr, const void* src, size_t
     *             - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
     unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
     {
    -#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT==1)
    +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
         if (ZSTD_isLegacy(src, srcSize)) {
             unsigned long long const ret = ZSTD_getDecompressedSize_legacy(src, srcSize);
             return ret == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : ret;
         }
     #endif
    -    {
    -        ZSTD_frameParams fParams;
    -        if (ZSTD_getFrameParams(&fParams, src, srcSize) != 0) return ZSTD_CONTENTSIZE_ERROR;
    +    {   ZSTD_frameHeader fParams;
    +        if (ZSTD_getFrameHeader(&fParams, src, srcSize) != 0) return ZSTD_CONTENTSIZE_ERROR;
             if (fParams.windowSize == 0) {
                 /* Either skippable or empty frame, size == 0 either way */
                 return 0;
    @@ -413,7 +389,7 @@ unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
     *   @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
     static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize)
     {
    -    size_t const result = ZSTD_getFrameParams(&(dctx->fParams), src, headerSize);
    +    size_t const result = ZSTD_getFrameHeader(&(dctx->fParams), src, headerSize);
         if (ZSTD_isError(result)) return result;  /* invalid header */
         if (result>0) return ERROR(srcSize_wrong);   /* headerSize too small */
         if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID)) return ERROR(dictionary_wrong);
    @@ -431,7 +407,8 @@ typedef struct
     
     /*! ZSTD_getcBlockSize() :
     *   Provides the size of compressed block from block header `src` */
    -size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bpPtr)
    +size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
    +                          blockProperties_t* bpPtr)
     {
         if (srcSize < ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
         {   U32 const cBlockHeader = MEM_readLE24(src);
    @@ -446,7 +423,8 @@ size_t ZSTD_getcBlockSize(const void* src, size_t srcSize, blockProperties_t* bp
     }
     
     
    -static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
    +static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
    +                          const void* src, size_t srcSize)
     {
         if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
         memcpy(dst, src, srcSize);
    @@ -454,7 +432,9 @@ static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity, const void* src,
     }
     
     
    -static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize, size_t regenSize)
    +static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
    +                         const void* src, size_t srcSize,
    +                               size_t regenSize)
     {
         if (srcSize != 1) return ERROR(srcSize_wrong);
         if (regenSize > dstCapacity) return ERROR(dstSize_tooSmall);
    @@ -595,176 +575,70 @@ typedef union {
         U32 alignedBy4;
     } FSE_decode_t4;
     
    +/* Default FSE distribution table for Literal Lengths */
     static const FSE_decode_t4 LL_defaultDTable[(1< (size_t)(oLitEnd - base)) {
    -        /* offset beyond prefix */
    +        /* offset beyond prefix -> go into extDict */
             if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
    -        match += (dictEnd-base);
    +        match = dictEnd + (match - base);
             if (match + sequence.matchLength <= dictEnd) {
                 memmove(oLitEnd, match, sequence.matchLength);
                 return sequenceLength;
    @@ -1156,21 +1033,26 @@ FORCE_INLINE seq_t ZSTD_decodeSequenceLong_generic(seqState_t* seqState, int con
         U32 const totalBits = llBits+mlBits+ofBits;
     
         static const U32 LL_base[MaxLL+1] = {
    -                             0,  1,  2,  3,  4,  5,  6,  7,  8,  9,   10,    11,    12,    13,    14,     15,
    -                            16, 18, 20, 22, 24, 28, 32, 40, 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
    +                             0,  1,    2,     3,     4,     5,     6,      7,
    +                             8,  9,   10,    11,    12,    13,    14,     15,
    +                            16, 18,   20,    22,    24,    28,    32,     40,
    +                            48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
                                 0x2000, 0x4000, 0x8000, 0x10000 };
     
         static const U32 ML_base[MaxML+1] = {
    -                             3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13,   14,    15,    16,    17,    18,
    -                            19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,   30,    31,    32,    33,    34,
    -                            35, 37, 39, 41, 43, 47, 51, 59, 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
    +                             3,  4,  5,    6,     7,     8,     9,    10,
    +                            11, 12, 13,   14,    15,    16,    17,    18,
    +                            19, 20, 21,   22,    23,    24,    25,    26,
    +                            27, 28, 29,   30,    31,    32,    33,    34,
    +                            35, 37, 39,   41,    43,    47,    51,    59,
    +                            67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
                                 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
     
         static const U32 OF_base[MaxOff+1] = {
    -                             0,        1,       1,       5,     0xD,     0x1D,     0x3D,     0x7D,
    -                             0xFD,   0x1FD,   0x3FD,   0x7FD,   0xFFD,   0x1FFD,   0x3FFD,   0x7FFD,
    -                             0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
    -                             0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD };
    +                     0,        1,       1,       5,     0xD,     0x1D,     0x3D,     0x7D,
    +                     0xFD,   0x1FD,   0x3FD,   0x7FD,   0xFFD,   0x1FFD,   0x3FFD,   0x7FFD,
    +                     0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
    +                     0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD };
     
         /* sequence */
         {   size_t offset;
    @@ -1356,7 +1238,7 @@ static size_t ZSTD_decompressSequencesLong(
             { U32 i; for (i=0; ientropy.rep[i]; }
             seqState.base = base;
             seqState.pos = (size_t)(op-base);
    -        seqState.gotoDict = (iPtrDiff)(dictEnd - base);
    +        seqState.gotoDict = (uPtrDiff)dictEnd - (uPtrDiff)base; /* cast to avoid undefined behaviour */
             CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
             FSE_initDState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
             FSE_initDState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
    @@ -1472,23 +1354,23 @@ size_t ZSTD_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t len
      *  @return : the compressed size of the frame starting at `src` */
     size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
     {
    -#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT==1)
    +#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
         if (ZSTD_isLegacy(src, srcSize)) return ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
     #endif
         if (srcSize >= ZSTD_skippableHeaderSize &&
    -            (MEM_readLE32(src) & 0xFFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
    +            (MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
             return ZSTD_skippableHeaderSize + MEM_readLE32((const BYTE*)src + 4);
         } else {
             const BYTE* ip = (const BYTE*)src;
             const BYTE* const ipstart = ip;
             size_t remainingSize = srcSize;
    -        ZSTD_frameParams fParams;
    +        ZSTD_frameHeader fParams;
     
             size_t const headerSize = ZSTD_frameHeaderSize(ip, remainingSize);
             if (ZSTD_isError(headerSize)) return headerSize;
     
             /* Frame Header */
    -        {   size_t const ret = ZSTD_getFrameParams(&fParams, ip, remainingSize);
    +        {   size_t const ret = ZSTD_getFrameHeader(&fParams, ip, remainingSize);
                 if (ZSTD_isError(ret)) return ret;
                 if (ret > 0) return ERROR(srcSize_wrong);
             }
    @@ -2087,6 +1969,14 @@ size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
         }
     }
     
    +/*! ZSTD_estimateDDictSize() :
    + *  Estimate amount of memory that will be needed to create a dictionary for decompression.
    + *  Note : if dictionary is created "byReference", reduce this amount by dictSize */
    +size_t ZSTD_estimateDDictSize(size_t dictSize)
    +{
    +    return dictSize + sizeof(ZSTD_DDict);
    +}
    +
     size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
     {
         if (ddict==NULL) return 0;   /* support sizeof on NULL */
    @@ -2115,19 +2005,22 @@ unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
     }
     
     /*! ZSTD_getDictID_fromFrame() :
    - *  Provides the dictID required to decompressed the frame stored within `src`.
    + *  Provides the dictID required to decompresse frame stored within `src`.
      *  If @return == 0, the dictID could not be decoded.
      *  This could for one of the following reasons :
    - *  - The frame does not require a dictionary to be decoded (most common case).
    - *  - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
    + *  - The frame does not require a dictionary (most common case).
    + *  - The frame was built with dictID intentionally removed.
    + *    Needed dictionary is a hidden information.
      *    Note : this use case also happens when using a non-conformant dictionary.
    - *  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
    + *  - `srcSize` is too small, and as a result, frame header could not be decoded.
    + *    Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`.
      *  - This is not a Zstandard frame.
    - *  When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code. */
    + *  When identifying the exact failure cause, it's possible to use
    + *  ZSTD_getFrameHeader(), which will provide a more precise error code. */
     unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
     {
    -    ZSTD_frameParams zfp = { 0 , 0 , 0 , 0 };
    -    size_t const hError = ZSTD_getFrameParams(&zfp, src, srcSize);
    +    ZSTD_frameHeader zfp = { 0 , 0 , 0 , 0 };
    +    size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);
         if (ZSTD_isError(hError)) return 0;
         return zfp.dictID;
     }
    @@ -2160,7 +2053,7 @@ struct ZSTD_DStream_s {
         ZSTD_DCtx* dctx;
         ZSTD_DDict* ddictLocal;
         const ZSTD_DDict* ddict;
    -    ZSTD_frameParams fParams;
    +    ZSTD_frameHeader fParams;
         ZSTD_dStreamStage stage;
         char*  inBuff;
         size_t inBuffSize;
    @@ -2209,9 +2102,13 @@ size_t ZSTD_freeDStream(ZSTD_DStream* zds)
         if (zds==NULL) return 0;   /* support free on null */
         {   ZSTD_customMem const cMem = zds->customMem;
             ZSTD_freeDCtx(zds->dctx);
    +        zds->dctx = NULL;
             ZSTD_freeDDict(zds->ddictLocal);
    +        zds->ddictLocal = NULL;
             ZSTD_free(zds->inBuff, cMem);
    +        zds->inBuff = NULL;
             ZSTD_free(zds->outBuff, cMem);
    +        zds->outBuff = NULL;
     #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
             if (zds->legacyContext)
                 ZSTD_freeLegacyStreamContext(zds->legacyContext, zds->previousLegacyVersion);
    @@ -2247,7 +2144,9 @@ size_t ZSTD_initDStream(ZSTD_DStream* zds)
         return ZSTD_initDStream_usingDict(zds, NULL, 0);
     }
     
    -size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict)  /**< note : ddict will just be referenced, and must outlive decompression session */
    +/* ZSTD_initDStream_usingDDict() :
    + * ddict will just be referenced, and must outlive decompression session */
    +size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict)
     {
         size_t const initResult = ZSTD_initDStream(zds);
         zds->ddict = ddict;
    @@ -2277,8 +2176,20 @@ size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds,
     
     size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds)
     {
    -    if (zds==NULL) return 0;   /* support sizeof on NULL */
    -    return sizeof(*zds) + ZSTD_sizeof_DCtx(zds->dctx) + ZSTD_sizeof_DDict(zds->ddictLocal) + zds->inBuffSize + zds->outBuffSize;
    +    if (zds==NULL) return 0;   /* support sizeof NULL */
    +    return sizeof(*zds)
    +           + ZSTD_sizeof_DCtx(zds->dctx)
    +           + ZSTD_sizeof_DDict(zds->ddictLocal)
    +           + zds->inBuffSize + zds->outBuffSize;
    +}
    +
    +size_t ZSTD_estimateDStreamSize(ZSTD_frameHeader fHeader)
    +{
    +    size_t const windowSize = fHeader.windowSize;
    +    size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_ABSOLUTEMAX);
    +    size_t const inBuffSize = blockSize;  /* no block can be larger */
    +    size_t const outBuffSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);
    +    return sizeof(ZSTD_DStream) + ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize;
     }
     
     
    @@ -2315,7 +2226,7 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
                 /* fall-through */
     
             case zdss_loadHeader :
    -            {   size_t const hSize = ZSTD_getFrameParams(&zds->fParams, zds->headerBuffer, zds->lhSize);
    +            {   size_t const hSize = ZSTD_getFrameHeader(&zds->fParams, zds->headerBuffer, zds->lhSize);
                     if (ZSTD_isError(hSize))
     #if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
                     {   U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart);
    @@ -2376,15 +2287,17 @@ size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inB
                     zds->blockSize = blockSize;
                     if (zds->inBuffSize < blockSize) {
                         ZSTD_free(zds->inBuff, zds->customMem);
    -                    zds->inBuffSize = blockSize;
    +                    zds->inBuffSize = 0;
                         zds->inBuff = (char*)ZSTD_malloc(blockSize, zds->customMem);
                         if (zds->inBuff == NULL) return ERROR(memory_allocation);
    +                    zds->inBuffSize = blockSize;
                     }
                     if (zds->outBuffSize < neededOutSize) {
                         ZSTD_free(zds->outBuff, zds->customMem);
    -                    zds->outBuffSize = neededOutSize;
    +                    zds->outBuffSize = 0;
                         zds->outBuff = (char*)ZSTD_malloc(neededOutSize, zds->customMem);
                         if (zds->outBuff == NULL) return ERROR(memory_allocation);
    +                    zds->outBuffSize = neededOutSize;
                 }   }
                 zds->stage = zdss_read;
                 /* pass-through */
    diff --git a/lib/dictBuilder/cover.c b/lib/dictBuilder/cover.c
    index 3a7b9f39f..1863c8f34 100644
    --- a/lib/dictBuilder/cover.c
    +++ b/lib/dictBuilder/cover.c
    @@ -59,8 +59,6 @@ static int g_displayLevel = 2;
         if ((clock() - g_time > refreshRate) || (displayLevel >= 4)) {             \
           g_time = clock();                                                        \
           DISPLAY(__VA_ARGS__);                                                    \
    -      if (displayLevel >= 4)                                                   \
    -        fflush(stdout);                                                        \
         }                                                                          \
       }
     #define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__)
    @@ -236,10 +234,22 @@ static size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) {
      * Returns 1 if the dmer at lp is greater than the dmer at rp.
      */
     static int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) {
    -  const U32 lhs = *(const U32 *)lp;
    -  const U32 rhs = *(const U32 *)rp;
    +  U32 const lhs = *(U32 const *)lp;
    +  U32 const rhs = *(U32 const *)rp;
       return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d);
     }
    +/**
    + * Faster version for d <= 8.
    + */
    +static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) {
    +  U64 const mask = (ctx->d == 8) ? (U64)-1 : (((U64)1 << (8 * ctx->d)) - 1);
    +  U64 const lhs = MEM_readLE64(ctx->samples + *(U32 const *)lp) & mask;
    +  U64 const rhs = MEM_readLE64(ctx->samples + *(U32 const *)rp) & mask;
    +  if (lhs < rhs) {
    +    return -1;
    +  }
    +  return (lhs > rhs);
    +}
     
     /**
      * Same as COVER_cmp() except ties are broken by pointer value
    @@ -253,6 +263,16 @@ static int COVER_strict_cmp(const void *lp, const void *rp) {
       }
       return result;
     }
    +/**
    + * Faster version for d <= 8.
    + */
    +static int COVER_strict_cmp8(const void *lp, const void *rp) {
    +  int result = COVER_cmp8(g_ctx, lp, rp);
    +  if (result == 0) {
    +    result = lp < rp ? -1 : 1;
    +  }
    +  return result;
    +}
     
     /**
      * Returns the first pointer in [first, last) whose element does not compare
    @@ -508,7 +528,7 @@ static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
       const BYTE *const samples = (const BYTE *)samplesBuffer;
       const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
       /* Checks */
    -  if (totalSamplesSize < d ||
    +  if (totalSamplesSize < MAX(d, sizeof(U64)) ||
           totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) {
         DISPLAYLEVEL(1, "Total samples size is too large, maximum size is %u MB\n",
                      (COVER_MAX_SAMPLES_SIZE >> 20));
    @@ -522,7 +542,7 @@ static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
       ctx->samplesSizes = samplesSizes;
       ctx->nbSamples = nbSamples;
       /* Partial suffix array */
    -  ctx->suffixSize = totalSamplesSize - d + 1;
    +  ctx->suffixSize = totalSamplesSize - MAX(d, sizeof(U64)) + 1;
       ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
       /* Maps index to the dmerID */
       ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
    @@ -556,7 +576,8 @@ static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
         }
         /* qsort doesn't take an opaque pointer, so pass as a global */
         g_ctx = ctx;
    -    qsort(ctx->suffix, ctx->suffixSize, sizeof(U32), &COVER_strict_cmp);
    +    qsort(ctx->suffix, ctx->suffixSize, sizeof(U32),
    +          (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
       }
       DISPLAYLEVEL(2, "Computing frequencies\n");
       /* For each dmer group (group of positions with the same first d bytes):
    @@ -566,8 +587,8 @@ static int COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
        * 2. We calculate how many samples the dmer occurs in and save it in
        *    freqs[dmerId].
        */
    -  COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx, &COVER_cmp,
    -                &COVER_group);
    +  COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx,
    +                (ctx->d <= 8 ? &COVER_cmp8 : &COVER_cmp), &COVER_group);
       ctx->freqs = ctx->suffix;
       ctx->suffix = NULL;
       return 1;
    @@ -918,10 +939,10 @@ ZDICTLIB_API size_t COVER_optimizeTrainFromBuffer(void *dictBuffer,
       /* constants */
       const unsigned nbThreads = parameters->nbThreads;
       const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
    -  const unsigned kMaxD = parameters->d == 0 ? 16 : parameters->d;
    -  const unsigned kMinK = parameters->k == 0 ? kMaxD : parameters->k;
    -  const unsigned kMaxK = parameters->k == 0 ? 2048 : parameters->k;
    -  const unsigned kSteps = parameters->steps == 0 ? 32 : parameters->steps;
    +  const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;
    +  const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;
    +  const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k;
    +  const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps;
       const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);
       const unsigned kIterations =
           (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
    diff --git a/lib/dictBuilder/zdict.c b/lib/dictBuilder/zdict.c
    index 0757dbbbb..179e02eff 100644
    --- a/lib/dictBuilder/zdict.c
    +++ b/lib/dictBuilder/zdict.c
    @@ -11,8 +11,9 @@
     /*-**************************************
     *  Tuning parameters
     ****************************************/
    +#define MINRATIO 4   /* minimum nb of apparition to be selected in dictionary */
     #define ZDICT_MAX_SAMPLES_SIZE (2000U << 20)
    -#define ZDICT_MIN_SAMPLES_SIZE 512
    +#define ZDICT_MIN_SAMPLES_SIZE (ZDICT_CONTENTSIZE_MIN * MINRATIO)
     
     
     /*-**************************************
    @@ -59,11 +60,8 @@
     
     #define NOISELENGTH 32
     
    -#define MINRATIO 4
     static const int g_compressionLevel_default = 6;
     static const U32 g_selectivity_default = 9;
    -static const size_t g_provision_entropySize = 200;
    -static const size_t g_min_fast_dictContent = 192;
     
     
     /*-*************************************
    @@ -308,10 +306,10 @@ static dictItem ZDICT_analyzePos(
             /* look backward */
             length = MINMATCHLENGTH;
             while ((length >= MINMATCHLENGTH) & (start > 0)) {
    -        	length = ZDICT_count(b + pos, b + suffix[start - 1]);
    -        	if (length >= LLIMIT) length = LLIMIT - 1;
    -        	lengthList[length]++;
    -        	if (length >= MINMATCHLENGTH) start--;
    +            length = ZDICT_count(b + pos, b + suffix[start - 1]);
    +            if (length >= LLIMIT) length = LLIMIT - 1;
    +            lengthList[length]++;
    +            if (length >= MINMATCHLENGTH) start--;
             }
     
             /* largest useful length */
    @@ -363,21 +361,35 @@ static dictItem ZDICT_analyzePos(
     }
     
     
    +static int isIncluded(const void* in, const void* container, size_t length)
    +{
    +    const char* const ip = (const char*) in;
    +    const char* const into = (const char*) container;
    +    size_t u;
    +
    +    for (u=0; upos;
         const U32 eltEnd = elt.pos + elt.length;
    +    const char* const buf = (const char*) buffer;
     
         /* tail overlap */
         U32 u; for (u=1; u elt.pos) && (table[u].pos <= eltEnd)) {  /* overlap, existing > new */
                 /* append */
    -            U32 addedLength = table[u].pos - elt.pos;
    +            U32 const addedLength = table[u].pos - elt.pos;
                 table[u].length += addedLength;
                 table[u].pos = elt.pos;
                 table[u].savings += elt.savings * addedLength / elt.length;   /* rough approx */
    @@ -393,9 +405,10 @@ static U32 ZDICT_checkMerge(dictItem* table, dictItem elt, U32 eltNbToSkip)
         /* front overlap */
         for (u=1; u= elt.pos) && (table[u].pos < elt.pos)) {  /* overlap, existing < new */
                 /* append */
    -            int addedLength = (int)eltEnd - (table[u].pos + table[u].length);
    +            int const addedLength = (int)eltEnd - (table[u].pos + table[u].length);
                 table[u].savings += elt.length / 8;    /* rough approx bonus */
                 if (addedLength > 0) {   /* otherwise, elt fully included into existing */
                     table[u].length += addedLength;
    @@ -407,7 +420,18 @@ static U32 ZDICT_checkMerge(dictItem* table, dictItem elt, U32 eltNbToSkip)
                     table[u] = table[u-1], u--;
                 table[u] = elt;
                 return u;
    -    }   }
    +        }
    +
    +        if (MEM_read64(buf + table[u].pos) == MEM_read64(buf + elt.pos + 1)) {
    +            if (isIncluded(buf + table[u].pos, buf + elt.pos + 1, table[u].length)) {
    +                size_t const addedLength = MAX( (int)elt.length - (int)table[u].length , 1 );
    +                table[u].pos = elt.pos;
    +                table[u].savings += (U32)(elt.savings * addedLength / elt.length);
    +                table[u].length = MIN(elt.length, table[u].length + 1);
    +                return u;
    +            }
    +        }
    +    }
     
         return 0;
     }
    @@ -425,14 +449,14 @@ static void ZDICT_removeDictItem(dictItem* table, U32 id)
     }
     
     
    -static void ZDICT_insertDictItem(dictItem* table, U32 maxSize, dictItem elt)
    +static void ZDICT_insertDictItem(dictItem* table, U32 maxSize, dictItem elt, const void* buffer)
     {
         /* merge if possible */
    -    U32 mergeId = ZDICT_checkMerge(table, elt, 0);
    +    U32 mergeId = ZDICT_tryMerge(table, elt, 0, buffer);
         if (mergeId) {
             U32 newMerge = 1;
             while (newMerge) {
    -            newMerge = ZDICT_checkMerge(table, table[mergeId], mergeId);
    +            newMerge = ZDICT_tryMerge(table, table[mergeId], mergeId, buffer);
                 if (newMerge) ZDICT_removeDictItem(table, mergeId);
                 mergeId = newMerge;
             }
    @@ -480,7 +504,7 @@ static size_t ZDICT_trainBuffer(dictItem* dictList, U32 dictListSize,
     #   define DISPLAYUPDATE(l, ...) if (notificationLevel>=l) { \
                 if (ZDICT_clockSpan(displayClock) > refreshRate)  \
                 { displayClock = clock(); DISPLAY(__VA_ARGS__); \
    -            if (notificationLevel>=4) fflush(stdout); } }
    +            if (notificationLevel>=4) fflush(stderr); } }
     
         /* init */
         DISPLAYLEVEL(2, "\r%70s\r", "");   /* clean display line */
    @@ -521,7 +545,7 @@ static size_t ZDICT_trainBuffer(dictItem* dictList, U32 dictListSize,
                 if (doneMarks[cursor]) { cursor++; continue; }
                 solution = ZDICT_analyzePos(doneMarks, suffix, reverseSuffix[cursor], buffer, minRatio, notificationLevel);
                 if (solution.length==0) { cursor++; continue; }
    -            ZDICT_insertDictItem(dictList, dictListSize, solution);
    +            ZDICT_insertDictItem(dictList, dictListSize, solution, buffer);
                 cursor += solution.length;
                 DISPLAYUPDATE(2, "\r%4.2f %% \r", (double)cursor / bufferSize * 100);
         }   }
    @@ -683,19 +707,19 @@ static size_t ZDICT_analyzeEntropy(void*  dstBuffer, size_t maxDstSize,
             goto _cleanup;
         }
         if (offcodeMax>OFFCODE_MAX) { eSize = ERROR(dictionary_wrong); goto _cleanup; }   /* too large dictionary */
    -    for (u=0; u<256; u++) countLit[u]=1;   /* any character must be described */
    -    for (u=0; u<=offcodeMax; u++) offcodeCount[u]=1;
    -    for (u=0; u<=MaxML; u++) matchLengthCount[u]=1;
    -    for (u=0; u<=MaxLL; u++) litLengthCount[u]=1;
    +    for (u=0; u<256; u++) countLit[u] = 1;   /* any character must be described */
    +    for (u=0; u<=offcodeMax; u++) offcodeCount[u] = 1;
    +    for (u=0; u<=MaxML; u++) matchLengthCount[u] = 1;
    +    for (u=0; u<=MaxLL; u++) litLengthCount[u] = 1;
         memset(repOffset, 0, sizeof(repOffset));
         repOffset[1] = repOffset[4] = repOffset[8] = 1;
         memset(bestRepOffset, 0, sizeof(bestRepOffset));
    -    if (compressionLevel==0) compressionLevel=g_compressionLevel_default;
    +    if (compressionLevel==0) compressionLevel = g_compressionLevel_default;
         params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize);
         {   size_t const beginResult = ZSTD_compressBegin_advanced(esr.ref, dictBuffer, dictBufferSize, params, 0);
    -            if (ZSTD_isError(beginResult)) {
    +        if (ZSTD_isError(beginResult)) {
    +            DISPLAYLEVEL(1, "error : ZSTD_compressBegin_advanced() failed : %s \n", ZSTD_getErrorName(beginResult));
                 eSize = ERROR(GENERIC);
    -            DISPLAYLEVEL(1, "error : ZSTD_compressBegin_advanced failed \n");
                 goto _cleanup;
         }   }
     
    @@ -812,7 +836,6 @@ static size_t ZDICT_analyzeEntropy(void*  dstBuffer, size_t maxDstSize,
         MEM_writeLE32(dstPtr+4, repStartValue[1]);
         MEM_writeLE32(dstPtr+8, repStartValue[2]);
     #endif
    -    //dstPtr += 12;
         eSize += 12;
     
     _cleanup:
    @@ -831,7 +854,7 @@ size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
                               ZDICT_params_t params)
     {
         size_t hSize;
    -#define HBUFFSIZE 256
    +#define HBUFFSIZE 256   /* should prove large enough for all entropy headers */
         BYTE header[HBUFFSIZE];
         int const compressionLevel = (params.compressionLevel <= 0) ? g_compressionLevel_default : params.compressionLevel;
         U32 const notificationLevel = params.notificationLevel;
    @@ -877,20 +900,11 @@ size_t ZDICT_addEntropyTablesFromBuffer_advanced(void* dictBuffer, size_t dictCo
                                                      const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
                                                      ZDICT_params_t params)
     {
    -    size_t hSize;
         int const compressionLevel = (params.compressionLevel <= 0) ? g_compressionLevel_default : params.compressionLevel;
         U32 const notificationLevel = params.notificationLevel;
    +    size_t hSize = 8;
     
    -    /* dictionary header */
    -    MEM_writeLE32(dictBuffer, ZSTD_DICT_MAGIC);
    -    {   U64 const randomID = XXH64((char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, 0);
    -        U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;
    -        U32 const dictID = params.dictID ? params.dictID : compliantID;
    -        MEM_writeLE32((char*)dictBuffer+4, dictID);
    -    }
    -    hSize = 8;
    -
    -    /* entropy tables */
    +    /* calculate entropy tables */
         DISPLAYLEVEL(2, "\r%70s\r", "");   /* clean display line */
         DISPLAYLEVEL(2, "statistics ... \n");
         {   size_t const eSize = ZDICT_analyzeEntropy((char*)dictBuffer+hSize, dictBufferCapacity-hSize,
    @@ -902,6 +916,13 @@ size_t ZDICT_addEntropyTablesFromBuffer_advanced(void* dictBuffer, size_t dictCo
             hSize += eSize;
         }
     
    +    /* add dictionary header (after entropy tables) */
    +    MEM_writeLE32(dictBuffer, ZSTD_DICT_MAGIC);
    +    {   U64 const randomID = XXH64((char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, 0);
    +        U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;
    +        U32 const dictID = params.dictID ? params.dictID : compliantID;
    +        MEM_writeLE32((char*)dictBuffer+4, dictID);
    +    }
     
         if (hSize + dictContentSize < dictBufferCapacity)
             memmove((char*)dictBuffer + hSize, (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize);
    @@ -929,8 +950,8 @@ size_t ZDICT_trainFromBuffer_unsafe(
     
         /* checks */
         if (!dictList) return ERROR(memory_allocation);
    -    if (maxDictSize <= g_provision_entropySize + g_min_fast_dictContent) { free(dictList); return ERROR(dstSize_tooSmall); }
    -    if (samplesBuffSize < ZDICT_MIN_SAMPLES_SIZE) { free(dictList); return 0; }   /* not enough source to create dictionary */
    +    if (maxDictSize < ZDICT_DICTSIZE_MIN) { free(dictList); return ERROR(dstSize_tooSmall); }   /* requested dictionary size is too small */
    +    if (samplesBuffSize < ZDICT_MIN_SAMPLES_SIZE) { free(dictList); return ERROR(dictionaryCreation_failed); }   /* not enough source to create dictionary */
     
         /* init */
         ZDICT_initDictItem(dictList);
    @@ -963,14 +984,15 @@ size_t ZDICT_trainFromBuffer_unsafe(
     
         /* create dictionary */
         {   U32 dictContentSize = ZDICT_dictSize(dictList);
    -        if (dictContentSize < targetDictSize/3) {
    +        if (dictContentSize < ZDICT_CONTENTSIZE_MIN) { free(dictList); return ERROR(dictionaryCreation_failed); }   /* dictionary content too small */
    +        if (dictContentSize < targetDictSize/4) {
                 DISPLAYLEVEL(2, "!  warning : selected content significantly smaller than requested (%u < %u) \n", dictContentSize, (U32)maxDictSize);
    +            if (samplesBuffSize < 10 * targetDictSize)
    +                DISPLAYLEVEL(2, "!  consider increasing the number of samples (total size : %u MB)\n", (U32)(samplesBuffSize>>20));
                 if (minRep > MINRATIO) {
                     DISPLAYLEVEL(2, "!  consider increasing selectivity to produce larger dictionary (-s%u) \n", selectivity+1);
                     DISPLAYLEVEL(2, "!  note : larger dictionaries are not necessarily better, test its efficiency on samples \n");
                 }
    -            if (samplesBuffSize < 10 * targetDictSize)
    -                DISPLAYLEVEL(2, "!  consider increasing the number of samples (total size : %u MB)\n", (U32)(samplesBuffSize>>20));
             }
     
             if ((dictContentSize > targetDictSize*3) && (nbSamples > 2*MINRATIO) && (selectivity>1)) {
    @@ -978,7 +1000,7 @@ size_t ZDICT_trainFromBuffer_unsafe(
                 while ((nbSamples >> proposedSelectivity) <= MINRATIO) { proposedSelectivity--; }
                 DISPLAYLEVEL(2, "!  note : calculated dictionary significantly larger than requested (%u > %u) \n", dictContentSize, (U32)maxDictSize);
                 DISPLAYLEVEL(2, "!  consider increasing dictionary size, or produce denser dictionary (-s%u) \n", proposedSelectivity);
    -            DISPLAYLEVEL(2, "!  always test dictionary efficiency on samples \n");
    +            DISPLAYLEVEL(2, "!  always test dictionary efficiency on real samples \n");
             }
     
             /* limit dictionary size */
    diff --git a/lib/dictBuilder/zdict.h b/lib/dictBuilder/zdict.h
    index 4ead4474f..9b53de346 100644
    --- a/lib/dictBuilder/zdict.h
    +++ b/lib/dictBuilder/zdict.h
    @@ -88,7 +88,7 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_advanced(void* dictBuffer, size_t dict
     
     /*! COVER_params_t :
         For all values 0 means default.
    -    kMin and d are the only required parameters.
    +    k and d are the only required parameters.
     */
     typedef struct {
         unsigned k;                  /* Segment size : constraint: 0 < k : Reasonable range [16, 2048+] */
    @@ -147,18 +147,18 @@ ZDICTLIB_API size_t COVER_optimizeTrainFromBuffer(void* dictBuffer, size_t dictB
         Samples must be stored concatenated in a flat buffer `samplesBuffer`,
         supplied with an array of sizes `samplesSizes`, providing the size of each sample in order.
     
    -    dictContentSize must be > ZDICT_CONTENTSIZE_MIN bytes.
    -    maxDictSize must be >= dictContentSize, and must be > ZDICT_DICTSIZE_MIN bytes.
    +    dictContentSize must be >= ZDICT_CONTENTSIZE_MIN bytes.
    +    maxDictSize must be >= dictContentSize, and must be >= ZDICT_DICTSIZE_MIN bytes.
     
         @return : size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`),
                   or an error code, which can be tested by ZDICT_isError().
         note : ZDICT_finalizeDictionary() will push notifications into stderr if instructed to, using notificationLevel>0.
    -    note 2 : dictBuffer and customDictContent can overlap
    +    note 2 : dictBuffer and dictContent can overlap
     */
    -#define ZDICT_CONTENTSIZE_MIN 256
    -#define ZDICT_DICTSIZE_MIN    512
    +#define ZDICT_CONTENTSIZE_MIN 128
    +#define ZDICT_DICTSIZE_MIN    256
     ZDICTLIB_API size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,
    -                                const void* customDictContent, size_t dictContentSize,
    +                                const void* dictContent, size_t dictContentSize,
                                     const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
                                     ZDICT_params_t parameters);
     
    diff --git a/lib/dll/example/README.md b/lib/dll/example/README.md
    index 957a29f35..e231f59c5 100644
    --- a/lib/dll/example/README.md
    +++ b/lib/dll/example/README.md
    @@ -4,11 +4,11 @@ ZSTD Windows binary package
     #### The package contents
     
     - `zstd.exe`                  : Command Line Utility, supporting gzip-like arguments
    -- `dll\libzstd.dll`           : The DLL of ZSTD library
    -- `dll\libzstd.lib`           : The import library of ZSTD library for Visual C++
    -- `example\`                  : The example of usage of ZSTD library
    -- `include\`                  : Header files required with ZSTD library
    -- `static\libzstd_static.lib` : The static ZSTD library
    +- `dll\libzstd.dll`           : The ZSTD dynamic library (DLL)
    +- `dll\libzstd.lib`           : The import library of the ZSTD dynamic library (DLL) for Visual C++
    +- `example\`                  : The example of usage of the ZSTD library
    +- `include\`                  : Header files required by the ZSTD library
    +- `static\libzstd_static.lib` : The static ZSTD library (LIB)
     
     
     #### Usage of Command Line Interface
    diff --git a/lib/dll/example/build_package.bat b/lib/dll/example/build_package.bat
    index b225af8d8..cae0a15cb 100644
    --- a/lib/dll/example/build_package.bat
    +++ b/lib/dll/example/build_package.bat
    @@ -9,6 +9,7 @@ COPY lib\common\mem.h bin\example\
     COPY lib\common\zstd_errors.h bin\example\
     COPY lib\common\zstd_internal.h bin\example\
     COPY lib\common\error_private.h bin\example\
    +COPY lib\common\xxhash.h bin\example\
     COPY lib\zstd.h bin\include\
     COPY lib\libzstd.a bin\static\libzstd_static.lib
     COPY lib\dll\libzstd.* bin\dll\
    diff --git a/lib/legacy/zstd_legacy.h b/lib/legacy/zstd_legacy.h
    index 707e76f0a..3c9798f88 100644
    --- a/lib/legacy/zstd_legacy.h
    +++ b/lib/legacy/zstd_legacy.h
    @@ -20,14 +20,33 @@ extern "C" {
     #include "mem.h"            /* MEM_STATIC */
     #include "error_private.h"  /* ERROR */
     #include "zstd.h"           /* ZSTD_inBuffer, ZSTD_outBuffer */
    -#include "zstd_v01.h"
    -#include "zstd_v02.h"
    -#include "zstd_v03.h"
    -#include "zstd_v04.h"
    -#include "zstd_v05.h"
    -#include "zstd_v06.h"
    -#include "zstd_v07.h"
     
    +#if !defined (ZSTD_LEGACY_SUPPORT) || (ZSTD_LEGACY_SUPPORT == 0)
    +#  undef ZSTD_LEGACY_SUPPORT
    +#  define ZSTD_LEGACY_SUPPORT 8
    +#endif
    +
    +#if (ZSTD_LEGACY_SUPPORT <= 1)
    +#  include "zstd_v01.h"
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 2)
    +#  include "zstd_v02.h"
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 3)
    +#  include "zstd_v03.h"
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 4)
    +#  include "zstd_v04.h"
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 5)
    +#  include "zstd_v05.h"
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 6)
    +#  include "zstd_v06.h"
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 7)
    +#  include "zstd_v07.h"
    +#endif
     
     /** ZSTD_isLegacy() :
         @return : > 0 if supported by legacy decoder. 0 otherwise.
    @@ -40,13 +59,27 @@ MEM_STATIC unsigned ZSTD_isLegacy(const void* src, size_t srcSize)
         magicNumberLE = MEM_readLE32(src);
         switch(magicNumberLE)
         {
    +#if (ZSTD_LEGACY_SUPPORT <= 1)
             case ZSTDv01_magicNumberLE:return 1;
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 2)
             case ZSTDv02_magicNumber : return 2;
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 3)
             case ZSTDv03_magicNumber : return 3;
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 4)
             case ZSTDv04_magicNumber : return 4;
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 5)
             case ZSTDv05_MAGICNUMBER : return 5;
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 6)
             case ZSTDv06_MAGICNUMBER : return 6;
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 7)
             case ZSTDv07_MAGICNUMBER : return 7;
    +#endif
             default : return 0;
         }
     }
    @@ -56,24 +89,30 @@ MEM_STATIC unsigned long long ZSTD_getDecompressedSize_legacy(const void* src, s
     {
         U32 const version = ZSTD_isLegacy(src, srcSize);
         if (version < 5) return 0;  /* no decompressed size in frame header, or not a legacy format */
    +#if (ZSTD_LEGACY_SUPPORT <= 5)
         if (version==5) {
             ZSTDv05_parameters fParams;
             size_t const frResult = ZSTDv05_getFrameParams(&fParams, src, srcSize);
             if (frResult != 0) return 0;
             return fParams.srcSize;
         }
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 6)
         if (version==6) {
             ZSTDv06_frameParams fParams;
             size_t const frResult = ZSTDv06_getFrameParams(&fParams, src, srcSize);
             if (frResult != 0) return 0;
             return fParams.frameContentSize;
         }
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 7)
         if (version==7) {
             ZSTDv07_frameParams fParams;
             size_t const frResult = ZSTDv07_getFrameParams(&fParams, src, srcSize);
             if (frResult != 0) return 0;
             return fParams.frameContentSize;
         }
    +#endif
         return 0;   /* should not be possible */
     }
     
    @@ -86,14 +125,23 @@ MEM_STATIC size_t ZSTD_decompressLegacy(
         U32 const version = ZSTD_isLegacy(src, compressedSize);
         switch(version)
         {
    +#if (ZSTD_LEGACY_SUPPORT <= 1)
             case 1 :
                 return ZSTDv01_decompress(dst, dstCapacity, src, compressedSize);
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 2)
             case 2 :
                 return ZSTDv02_decompress(dst, dstCapacity, src, compressedSize);
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 3)
             case 3 :
                 return ZSTDv03_decompress(dst, dstCapacity, src, compressedSize);
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 4)
             case 4 :
                 return ZSTDv04_decompress(dst, dstCapacity, src, compressedSize);
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 5)
             case 5 :
                 {   size_t result;
                     ZSTDv05_DCtx* const zd = ZSTDv05_createDCtx();
    @@ -102,6 +150,8 @@ MEM_STATIC size_t ZSTD_decompressLegacy(
                     ZSTDv05_freeDCtx(zd);
                     return result;
                 }
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 6)
             case 6 :
                 {   size_t result;
                     ZSTDv06_DCtx* const zd = ZSTDv06_createDCtx();
    @@ -110,6 +160,8 @@ MEM_STATIC size_t ZSTD_decompressLegacy(
                     ZSTDv06_freeDCtx(zd);
                     return result;
                 }
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 7)
             case 7 :
                 {   size_t result;
                     ZSTDv07_DCtx* const zd = ZSTDv07_createDCtx();
    @@ -118,6 +170,7 @@ MEM_STATIC size_t ZSTD_decompressLegacy(
                     ZSTDv07_freeDCtx(zd);
                     return result;
                 }
    +#endif
             default :
                 return ERROR(prefix_unknown);
         }
    @@ -129,20 +182,34 @@ MEM_STATIC size_t ZSTD_findFrameCompressedSizeLegacy(const void *src,
         U32 const version = ZSTD_isLegacy(src, compressedSize);
         switch(version)
         {
    +#if (ZSTD_LEGACY_SUPPORT <= 1)
             case 1 :
                 return ZSTDv01_findFrameCompressedSize(src, compressedSize);
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 2)
             case 2 :
                 return ZSTDv02_findFrameCompressedSize(src, compressedSize);
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 3)
             case 3 :
                 return ZSTDv03_findFrameCompressedSize(src, compressedSize);
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 4)
             case 4 :
                 return ZSTDv04_findFrameCompressedSize(src, compressedSize);
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 5)
             case 5 :
                 return ZSTDv05_findFrameCompressedSize(src, compressedSize);
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 6)
             case 6 :
                 return ZSTDv06_findFrameCompressedSize(src, compressedSize);
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 7)
             case 7 :
                 return ZSTDv07_findFrameCompressedSize(src, compressedSize);
    +#endif
             default :
                 return ERROR(prefix_unknown);
         }
    @@ -157,10 +224,18 @@ MEM_STATIC size_t ZSTD_freeLegacyStreamContext(void* legacyContext, U32 version)
             case 2 :
             case 3 :
                 return ERROR(version_unsupported);
    +#if (ZSTD_LEGACY_SUPPORT <= 4)
             case 4 : return ZBUFFv04_freeDCtx((ZBUFFv04_DCtx*)legacyContext);
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 5)
             case 5 : return ZBUFFv05_freeDCtx((ZBUFFv05_DCtx*)legacyContext);
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 6)
             case 6 : return ZBUFFv06_freeDCtx((ZBUFFv06_DCtx*)legacyContext);
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 7)
             case 7 : return ZBUFFv07_freeDCtx((ZBUFFv07_DCtx*)legacyContext);
    +#endif
         }
     }
     
    @@ -176,6 +251,7 @@ MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U
             case 2 :
             case 3 :
                 return 0;
    +#if (ZSTD_LEGACY_SUPPORT <= 4)
             case 4 :
             {
                 ZBUFFv04_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv04_createDCtx() : (ZBUFFv04_DCtx*)*legacyContext;
    @@ -185,6 +261,8 @@ MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U
                 *legacyContext = dctx;
                 return 0;
             }
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 5)
             case 5 :
             {
                 ZBUFFv05_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv05_createDCtx() : (ZBUFFv05_DCtx*)*legacyContext;
    @@ -193,6 +271,8 @@ MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U
                 *legacyContext = dctx;
                 return 0;
             }
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 6)
             case 6 :
             {
                 ZBUFFv06_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv06_createDCtx() : (ZBUFFv06_DCtx*)*legacyContext;
    @@ -201,6 +281,8 @@ MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U
                 *legacyContext = dctx;
                 return 0;
             }
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 7)
             case 7 :
             {
                 ZBUFFv07_DCtx* dctx = (prevVersion != newVersion) ? ZBUFFv07_createDCtx() : (ZBUFFv07_DCtx*)*legacyContext;
    @@ -209,6 +291,7 @@ MEM_STATIC size_t ZSTD_initLegacyStream(void** legacyContext, U32 prevVersion, U
                 *legacyContext = dctx;
                 return 0;
             }
    +#endif
         }
     }
     
    @@ -224,6 +307,7 @@ MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,
             case 2 :
             case 3 :
                 return ERROR(version_unsupported);
    +#if (ZSTD_LEGACY_SUPPORT <= 4)
             case 4 :
                 {
                     ZBUFFv04_DCtx* dctx = (ZBUFFv04_DCtx*) legacyContext;
    @@ -236,6 +320,8 @@ MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,
                     input->pos += readSize;
                     return hintSize;
                 }
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 5)
             case 5 :
                 {
                     ZBUFFv05_DCtx* dctx = (ZBUFFv05_DCtx*) legacyContext;
    @@ -248,6 +334,8 @@ MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,
                     input->pos += readSize;
                     return hintSize;
                 }
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 6)
             case 6 :
                 {
                     ZBUFFv06_DCtx* dctx = (ZBUFFv06_DCtx*) legacyContext;
    @@ -260,6 +348,8 @@ MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,
                     input->pos += readSize;
                     return hintSize;
                 }
    +#endif
    +#if (ZSTD_LEGACY_SUPPORT <= 7)
             case 7 :
                 {
                     ZBUFFv07_DCtx* dctx = (ZBUFFv07_DCtx*) legacyContext;
    @@ -272,6 +362,7 @@ MEM_STATIC size_t ZSTD_decompressLegacyStream(void* legacyContext, U32 version,
                     input->pos += readSize;
                     return hintSize;
                 }
    +#endif
         }
     }
     
    diff --git a/lib/legacy/zstd_v01.c b/lib/legacy/zstd_v01.c
    index bcacb8d5d..cf5354d6a 100644
    --- a/lib/legacy/zstd_v01.c
    +++ b/lib/legacy/zstd_v01.c
    @@ -1432,7 +1432,7 @@ typedef struct ZSTD_Cctx_s
     #else
         U32 hashTable[HASH_TABLESIZE];
     #endif
    -	BYTE buffer[WORKPLACESIZE];
    +    BYTE buffer[WORKPLACESIZE];
     } cctxi_t;
     
     
    diff --git a/lib/legacy/zstd_v02.c b/lib/legacy/zstd_v02.c
    index 2297b28c8..3cf8f4778 100644
    --- a/lib/legacy/zstd_v02.c
    +++ b/lib/legacy/zstd_v02.c
    @@ -475,8 +475,8 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
     
     MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
     {
    -	if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */
    -		return BIT_DStream_overflow;
    +    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */
    +        return BIT_DStream_overflow;
     
         if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
         {
    @@ -1334,8 +1334,8 @@ static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsi
                     else
                     {
                         bitCount -= (int)(8 * (iend - 4 - ip));
    -					ip = iend - 4;
    -				}
    +                    ip = iend - 4;
    +                }
                     bitStream = MEM_readLE32(ip) >> (bitCount & 31);
                 }
             }
    @@ -2040,7 +2040,7 @@ static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
             rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
         }
     
    -	/* Build rankVal */
    +    /* Build rankVal */
         {
             const U32 minBits = tableLog+1 - maxW;
             U32 nextRankVal = 0;
    @@ -2374,7 +2374,7 @@ static size_t HUF_readDTableX6 (U32* DTable, const void* src, size_t srcSize)
             rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
         }
     
    -	/* Build rankVal */
    +    /* Build rankVal */
         {
             const U32 minBits = tableLog+1 - maxW;
             U32 nextRankVal = 0;
    @@ -2948,14 +2948,14 @@ static size_t ZSTD_decodeLiteralsBlock(void* ctx,
                 const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
                 if (litSize > srcSize-11)   /* risk of reading too far with wildcopy */
                 {
    -				if (litSize > srcSize-3) return ERROR(corruption_detected);
    -				memcpy(dctx->litBuffer, istart, litSize);
    -				dctx->litPtr = dctx->litBuffer;
    -				dctx->litSize = litSize;
    -				memset(dctx->litBuffer + dctx->litSize, 0, 8);
    -				return litSize+3;
    -			}
    -			/* direct reference into compressed stream */
    +                if (litSize > srcSize-3) return ERROR(corruption_detected);
    +                memcpy(dctx->litBuffer, istart, litSize);
    +                dctx->litPtr = dctx->litBuffer;
    +                dctx->litSize = litSize;
    +                memset(dctx->litBuffer + dctx->litSize, 0, 8);
    +                return litSize+3;
    +            }
    +            /* direct reference into compressed stream */
                 dctx->litPtr = istart+3;
                 dctx->litSize = litSize;
                 return litSize+3;
    @@ -3515,13 +3515,13 @@ static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSi
     
     unsigned ZSTDv02_isError(size_t code)
     {
    -	return ZSTD_isError(code);
    +    return ZSTD_isError(code);
     }
     
     size_t ZSTDv02_decompress( void* dst, size_t maxOriginalSize,
                          const void* src, size_t compressedSize)
     {
    -	return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);
    +    return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);
     }
     
     size_t ZSTDv02_findFrameCompressedSize(const void *src, size_t compressedSize)
    @@ -3531,25 +3531,25 @@ size_t ZSTDv02_findFrameCompressedSize(const void *src, size_t compressedSize)
     
     ZSTDv02_Dctx* ZSTDv02_createDCtx(void)
     {
    -	return (ZSTDv02_Dctx*)ZSTD_createDCtx();
    +    return (ZSTDv02_Dctx*)ZSTD_createDCtx();
     }
     
     size_t ZSTDv02_freeDCtx(ZSTDv02_Dctx* dctx)
     {
    -	return ZSTD_freeDCtx((ZSTD_DCtx*)dctx);
    +    return ZSTD_freeDCtx((ZSTD_DCtx*)dctx);
     }
     
     size_t ZSTDv02_resetDCtx(ZSTDv02_Dctx* dctx)
     {
    -	return ZSTD_resetDCtx((ZSTD_DCtx*)dctx);
    +    return ZSTD_resetDCtx((ZSTD_DCtx*)dctx);
     }
     
     size_t ZSTDv02_nextSrcSizeToDecompress(ZSTDv02_Dctx* dctx)
     {
    -	return ZSTD_nextSrcSizeToDecompress((ZSTD_DCtx*)dctx);
    +    return ZSTD_nextSrcSizeToDecompress((ZSTD_DCtx*)dctx);
     }
     
     size_t ZSTDv02_decompressContinue(ZSTDv02_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
     {
    -	return ZSTD_decompressContinue((ZSTD_DCtx*)dctx, dst, maxDstSize, src, srcSize);
    +    return ZSTD_decompressContinue((ZSTD_DCtx*)dctx, dst, maxDstSize, src, srcSize);
     }
    diff --git a/lib/legacy/zstd_v03.c b/lib/legacy/zstd_v03.c
    index ef654931f..f438330a4 100644
    --- a/lib/legacy/zstd_v03.c
    +++ b/lib/legacy/zstd_v03.c
    @@ -477,8 +477,8 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
     
     MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
     {
    -	if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */
    -		return BIT_DStream_overflow;
    +    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */
    +        return BIT_DStream_overflow;
     
         if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
         {
    @@ -1335,8 +1335,8 @@ static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsi
                     else
                     {
                         bitCount -= (int)(8 * (iend - 4 - ip));
    -					ip = iend - 4;
    -				}
    +                    ip = iend - 4;
    +                }
                     bitStream = MEM_readLE32(ip) >> (bitCount & 31);
                 }
             }
    @@ -2037,7 +2037,7 @@ static size_t HUF_readDTableX4 (U32* DTable, const void* src, size_t srcSize)
             rankStart[0] = 0;   /* forget 0w symbols; this is beginning of weight(1) */
         }
     
    -	/* Build rankVal */
    +    /* Build rankVal */
         {
             const U32 minBits = tableLog+1 - maxW;
             U32 nextRankVal = 0;
    @@ -2589,14 +2589,14 @@ static size_t ZSTD_decodeLiteralsBlock(void* ctx,
                 const size_t litSize = (MEM_readLE32(istart) & 0xFFFFFF) >> 2;   /* no buffer issue : srcSize >= MIN_CBLOCK_SIZE */
                 if (litSize > srcSize-11)   /* risk of reading too far with wildcopy */
                 {
    -				if (litSize > srcSize-3) return ERROR(corruption_detected);
    -				memcpy(dctx->litBuffer, istart, litSize);
    -				dctx->litPtr = dctx->litBuffer;
    -				dctx->litSize = litSize;
    -				memset(dctx->litBuffer + dctx->litSize, 0, 8);
    -				return litSize+3;
    -			}
    -			/* direct reference into compressed stream */
    +                if (litSize > srcSize-3) return ERROR(corruption_detected);
    +                memcpy(dctx->litBuffer, istart, litSize);
    +                dctx->litPtr = dctx->litBuffer;
    +                dctx->litSize = litSize;
    +                memset(dctx->litBuffer + dctx->litSize, 0, 8);
    +                return litSize+3;
    +            }
    +            /* direct reference into compressed stream */
                 dctx->litPtr = istart+3;
                 dctx->litSize = litSize;
                 return litSize+3;
    @@ -3156,13 +3156,13 @@ static size_t ZSTD_decompressContinue(ZSTD_DCtx* ctx, void* dst, size_t maxDstSi
     
     unsigned ZSTDv03_isError(size_t code)
     {
    -	return ZSTD_isError(code);
    +    return ZSTD_isError(code);
     }
     
     size_t ZSTDv03_decompress( void* dst, size_t maxOriginalSize,
                          const void* src, size_t compressedSize)
     {
    -	return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);
    +    return ZSTD_decompress(dst, maxOriginalSize, src, compressedSize);
     }
     
     size_t ZSTDv03_findFrameCompressedSize(const void* src, size_t srcSize)
    @@ -3172,25 +3172,25 @@ size_t ZSTDv03_findFrameCompressedSize(const void* src, size_t srcSize)
     
     ZSTDv03_Dctx* ZSTDv03_createDCtx(void)
     {
    -	return (ZSTDv03_Dctx*)ZSTD_createDCtx();
    +    return (ZSTDv03_Dctx*)ZSTD_createDCtx();
     }
     
     size_t ZSTDv03_freeDCtx(ZSTDv03_Dctx* dctx)
     {
    -	return ZSTD_freeDCtx((ZSTD_DCtx*)dctx);
    +    return ZSTD_freeDCtx((ZSTD_DCtx*)dctx);
     }
     
     size_t ZSTDv03_resetDCtx(ZSTDv03_Dctx* dctx)
     {
    -	return ZSTD_resetDCtx((ZSTD_DCtx*)dctx);
    +    return ZSTD_resetDCtx((ZSTD_DCtx*)dctx);
     }
     
     size_t ZSTDv03_nextSrcSizeToDecompress(ZSTDv03_Dctx* dctx)
     {
    -	return ZSTD_nextSrcSizeToDecompress((ZSTD_DCtx*)dctx);
    +    return ZSTD_nextSrcSizeToDecompress((ZSTD_DCtx*)dctx);
     }
     
     size_t ZSTDv03_decompressContinue(ZSTDv03_Dctx* dctx, void* dst, size_t maxDstSize, const void* src, size_t srcSize)
     {
    -	return ZSTD_decompressContinue((ZSTD_DCtx*)dctx, dst, maxDstSize, src, srcSize);
    +    return ZSTD_decompressContinue((ZSTD_DCtx*)dctx, dst, maxDstSize, src, srcSize);
     }
    diff --git a/lib/legacy/zstd_v04.c b/lib/legacy/zstd_v04.c
    index 09040e68e..1a29da92d 100644
    --- a/lib/legacy/zstd_v04.c
    +++ b/lib/legacy/zstd_v04.c
    @@ -882,8 +882,8 @@ MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
     
     MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
     {
    -	if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */
    -		return BIT_DStream_overflow;
    +    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */
    +        return BIT_DStream_overflow;
     
         if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer))
         {
    @@ -1451,8 +1451,8 @@ static size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsi
                     else
                     {
                         bitCount -= (int)(8 * (iend - 4 - ip));
    -					ip = iend - 4;
    -				}
    +                    ip = iend - 4;
    +                }
                     bitStream = MEM_readLE32(ip) >> (bitCount & 31);
                 }
             }
    diff --git a/lib/legacy/zstd_v05.c b/lib/legacy/zstd_v05.c
    index a6f5f5dbb..674f5b0e4 100644
    --- a/lib/legacy/zstd_v05.c
    +++ b/lib/legacy/zstd_v05.c
    @@ -884,8 +884,8 @@ MEM_STATIC size_t BITv05_readBitsFast(BITv05_DStream_t* bitD, U32 nbBits)
     
     MEM_STATIC BITv05_DStream_status BITv05_reloadDStream(BITv05_DStream_t* bitD)
     {
    -	if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */
    -		return BITv05_DStream_overflow;
    +    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */
    +        return BITv05_DStream_overflow;
     
         if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
             bitD->ptr -= bitD->bitsConsumed >> 3;
    diff --git a/lib/legacy/zstd_v06.c b/lib/legacy/zstd_v06.c
    index a4258b67a..ad8c4cd31 100644
    --- a/lib/legacy/zstd_v06.c
    +++ b/lib/legacy/zstd_v06.c
    @@ -982,8 +982,8 @@ MEM_STATIC size_t BITv06_readBitsFast(BITv06_DStream_t* bitD, U32 nbBits)
                   if status == unfinished, internal register is filled with >= (sizeof(bitD->bitContainer)*8 - 7) bits */
     MEM_STATIC BITv06_DStream_status BITv06_reloadDStream(BITv06_DStream_t* bitD)
     {
    -	if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */
    -		return BITv06_DStream_overflow;
    +    if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8))  /* should never happen */
    +        return BITv06_DStream_overflow;
     
         if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
             bitD->ptr -= bitD->bitsConsumed >> 3;
    diff --git a/lib/zstd.h b/lib/zstd.h
    index 94b31a8e5..256117662 100644
    --- a/lib/zstd.h
    +++ b/lib/zstd.h
    @@ -55,8 +55,8 @@ extern "C" {
     
     /*------   Version   ------*/
     #define ZSTD_VERSION_MAJOR    1
    -#define ZSTD_VERSION_MINOR    1
    -#define ZSTD_VERSION_RELEASE  4
    +#define ZSTD_VERSION_MINOR    3
    +#define ZSTD_VERSION_RELEASE  0
     
     #define ZSTD_LIB_VERSION ZSTD_VERSION_MAJOR.ZSTD_VERSION_MINOR.ZSTD_VERSION_RELEASE
     #define ZSTD_QUOTE(str) #str
    @@ -71,48 +71,48 @@ ZSTDLIB_API unsigned ZSTD_versionNumber(void);   /**< library version number; to
     *  Simple API
     ***************************************/
     /*! ZSTD_compress() :
    -    Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
    -    Hint : compression runs faster if `dstCapacity` >=  `ZSTD_compressBound(srcSize)`.
    -    @return : compressed size written into `dst` (<= `dstCapacity),
    -              or an error code if it fails (which can be tested using ZSTD_isError()). */
    + *  Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
    + *  Hint : compression runs faster if `dstCapacity` >=  `ZSTD_compressBound(srcSize)`.
    + *  @return : compressed size written into `dst` (<= `dstCapacity),
    + *            or an error code if it fails (which can be tested using ZSTD_isError()). */
     ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
                                 const void* src, size_t srcSize,
                                       int compressionLevel);
     
     /*! ZSTD_decompress() :
    -    `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
    -    `dstCapacity` is an upper bound of originalSize.
    -    If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
    -    @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
    -              or an errorCode if it fails (which can be tested using ZSTD_isError()). */
    + *  `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
    + *  `dstCapacity` is an upper bound of originalSize.
    + *  If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
    + *  @return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
    + *            or an errorCode if it fails (which can be tested using ZSTD_isError()). */
     ZSTDLIB_API size_t ZSTD_decompress( void* dst, size_t dstCapacity,
                                   const void* src, size_t compressedSize);
     
     /*! ZSTD_getDecompressedSize() :
    -*   NOTE: This function is planned to be obsolete, in favour of ZSTD_getFrameContentSize.
    -*   ZSTD_getFrameContentSize functions the same way, returning the decompressed size of a single
    -*   frame, but distinguishes empty frames from frames with an unknown size, or errors.
    -*
    -*   Additionally, ZSTD_findDecompressedSize can be used instead.  It can handle multiple
    -*   concatenated frames in one buffer, and so is more general.
    -*   As a result however, it requires more computation and entire frames to be passed to it,
    -*   as opposed to ZSTD_getFrameContentSize which requires only a single frame's header.
    -*
    -*   'src' is the start of a zstd compressed frame.
    -*   @return : content size to be decompressed, as a 64-bits value _if known_, 0 otherwise.
    -*    note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
    -*             When `return==0`, data to decompress could be any size.
    -*             In which case, it's necessary to use streaming mode to decompress data.
    -*             Optionally, application can still use ZSTD_decompress() while relying on implied limits.
    -*             (For example, data may be necessarily cut into blocks <= 16 KB).
    -*    note 2 : decompressed size is always present when compression is done with ZSTD_compress()
    -*    note 3 : decompressed size can be very large (64-bits value),
    -*             potentially larger than what local system can handle as a single memory segment.
    -*             In which case, it's necessary to use streaming mode to decompress data.
    -*    note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
    -*             Always ensure result fits within application's authorized limits.
    -*             Each application can set its own limits.
    -*    note 5 : when `return==0`, if precise failure cause is needed, use ZSTD_getFrameParams() to know more. */
    + *  NOTE: This function is planned to be obsolete, in favour of ZSTD_getFrameContentSize.
    + *  ZSTD_getFrameContentSize functions the same way, returning the decompressed size of a single
    + *  frame, but distinguishes empty frames from frames with an unknown size, or errors.
    + *
    + *  Additionally, ZSTD_findDecompressedSize can be used instead.  It can handle multiple
    + *  concatenated frames in one buffer, and so is more general.
    + *  As a result however, it requires more computation and entire frames to be passed to it,
    + *  as opposed to ZSTD_getFrameContentSize which requires only a single frame's header.
    + *
    + *  'src' is the start of a zstd compressed frame.
    + *  @return : content size to be decompressed, as a 64-bits value _if known_, 0 otherwise.
    + *   note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
    + *            When `return==0`, data to decompress could be any size.
    + *            In which case, it's necessary to use streaming mode to decompress data.
    + *            Optionally, application can still use ZSTD_decompress() while relying on implied limits.
    + *            (For example, data may be necessarily cut into blocks <= 16 KB).
    + *   note 2 : decompressed size is always present when compression is done with ZSTD_compress()
    + *   note 3 : decompressed size can be very large (64-bits value),
    + *            potentially larger than what local system can handle as a single memory segment.
    + *            In which case, it's necessary to use streaming mode to decompress data.
    + *   note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
    + *            Always ensure result fits within application's authorized limits.
    + *            Each application can set its own limits.
    + *   note 5 : when `return==0`, if precise failure cause is needed, use ZSTD_getFrameParams() to know more. */
     ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
     
     
    @@ -127,25 +127,29 @@ ZSTDLIB_API const char* ZSTD_getErrorName(size_t code);     /*!< provides readab
     *  Explicit memory management
     ***************************************/
     /*= Compression context
    -*   When compressing many times,
    -*   it is recommended to allocate a context just once, and re-use it for each successive compression operation.
    -*   This will make workload friendlier for system's memory.
    -*   Use one context per thread for parallel execution in multi-threaded environments. */
    + *  When compressing many times,
    + *  it is recommended to allocate a context just once, and re-use it for each successive compression operation.
    + *  This will make workload friendlier for system's memory.
    + *  Use one context per thread for parallel execution in multi-threaded environments. */
     typedef struct ZSTD_CCtx_s ZSTD_CCtx;
     ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx(void);
     ZSTDLIB_API size_t     ZSTD_freeCCtx(ZSTD_CCtx* cctx);
     
     /*! ZSTD_compressCCtx() :
    -    Same as ZSTD_compress(), requires an allocated ZSTD_CCtx (see ZSTD_createCCtx()). */
    + *  Same as ZSTD_compress(), requires an allocated ZSTD_CCtx (see ZSTD_createCCtx()). */
     ZSTDLIB_API size_t ZSTD_compressCCtx(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel);
     
    -/*= Decompression context */
    +/*= Decompression context
    + *  When decompressing many times,
    + *  it is recommended to allocate a context just once, and re-use it for each successive compression operation.
    + *  This will make workload friendlier for system's memory.
    + *  Use one context per thread for parallel execution in multi-threaded environments. */
     typedef struct ZSTD_DCtx_s ZSTD_DCtx;
     ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx(void);
     ZSTDLIB_API size_t     ZSTD_freeDCtx(ZSTD_DCtx* dctx);
     
     /*! ZSTD_decompressDCtx() :
    -*   Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx (see ZSTD_createDCtx()). */
    + *  Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx (see ZSTD_createDCtx()). */
     ZSTDLIB_API size_t ZSTD_decompressDCtx(ZSTD_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
     
     
    @@ -190,9 +194,10 @@ ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict(const void* dictBuffer, size_t dictSize
     ZSTDLIB_API size_t      ZSTD_freeCDict(ZSTD_CDict* CDict);
     
     /*! ZSTD_compress_usingCDict() :
    -*   Compression using a digested Dictionary.
    -*   Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
    -*   Note that compression level is decided during dictionary creation. */
    + *  Compression using a digested Dictionary.
    + *  Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
    + *  Note that compression level is decided during dictionary creation.
    + *  Frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no) */
     ZSTDLIB_API size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
                                                 void* dst, size_t dstCapacity,
                                           const void* src, size_t srcSize,
    @@ -276,10 +281,13 @@ typedef struct ZSTD_outBuffer_s {
     *
     * *******************************************************************/
     
    -typedef struct ZSTD_CStream_s ZSTD_CStream;
    +typedef ZSTD_CCtx ZSTD_CStream;  /**< CCtx and CStream are effectively same object */
    +                                 /* Continue due distinghish them for compatibility with versions <= v1.2.0 */
    +/*===== ZSTD_CStream management functions =====*/
     ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream(void);
     ZSTDLIB_API size_t ZSTD_freeCStream(ZSTD_CStream* zcs);
     
    +/*===== Streaming compression functions =====*/
     ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
     ZSTDLIB_API size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
     ZSTDLIB_API size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
    @@ -313,9 +321,11 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void);   /**< recommended size for output
     * *******************************************************************************/
     
     typedef struct ZSTD_DStream_s ZSTD_DStream;
    +/*===== ZSTD_DStream management functions =====*/
     ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream(void);
     ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds);
     
    +/*===== Streaming decompression functions =====*/
     ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
     ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
     
    @@ -340,9 +350,6 @@ ZSTDLIB_API size_t ZSTD_DStreamOutSize(void);   /*!< recommended size for output
     #define ZSTD_MAGICNUMBER            0xFD2FB528   /* >= v0.8.0 */
     #define ZSTD_MAGIC_SKIPPABLE_START  0x184D2A50U
     
    -#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
    -#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
    -
     #define ZSTD_WINDOWLOG_MAX_32  27
     #define ZSTD_WINDOWLOG_MAX_64  27
     #define ZSTD_WINDOWLOG_MAX    ((unsigned)(sizeof(size_t) == 4 ? ZSTD_WINDOWLOG_MAX_32 : ZSTD_WINDOWLOG_MAX_64))
    @@ -391,76 +398,109 @@ typedef struct {
         ZSTD_frameParameters fParams;
     } ZSTD_parameters;
     
    +typedef struct {
    +    unsigned long long frameContentSize;
    +    unsigned windowSize;
    +    unsigned dictID;
    +    unsigned checksumFlag;
    +} ZSTD_frameHeader;
    +
     /*= Custom memory allocation functions */
     typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
     typedef void  (*ZSTD_freeFunction) (void* opaque, void* address);
     typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
     
     /***************************************
    -*  Compressed size functions
    +*  Frame size functions
     ***************************************/
     
     /*! ZSTD_findFrameCompressedSize() :
      *  `src` should point to the start of a ZSTD encoded frame or skippable frame
      *  `srcSize` must be at least as large as the frame
    - *  @return : the compressed size of the frame pointed to by `src`, suitable to pass to
    - *      `ZSTD_decompress` or similar, or an error code if given invalid input. */
    + *  @return : the compressed size of the frame pointed to by `src`,
    + *            suitable to pass to `ZSTD_decompress` or similar,
    + *            or an error code if given invalid input. */
     ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
     
    -/***************************************
    -*  Decompressed size functions
    -***************************************/
     /*! ZSTD_getFrameContentSize() :
    -*   `src` should point to the start of a ZSTD encoded frame
    -*   `srcSize` must be at least as large as the frame header.  A value greater than or equal
    -*       to `ZSTD_frameHeaderSize_max` is guaranteed to be large enough in all cases.
    -*   @return : decompressed size of the frame pointed to be `src` if known, otherwise
    -*             - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
    -*             - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
    + *  `src` should point to the start of a ZSTD encoded frame.
    + *  `srcSize` must be at least as large as the frame header.
    + *       A value >= `ZSTD_frameHeaderSize_max` is guaranteed to be large enough.
    + *  @return : - decompressed size of the frame pointed to be `src` if known
    + *            - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
    + *            - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
    +#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
    +#define ZSTD_CONTENTSIZE_ERROR   (0ULL - 2)
     ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
     
     /*! ZSTD_findDecompressedSize() :
    -*   `src` should point the start of a series of ZSTD encoded and/or skippable frames
    -*   `srcSize` must be the _exact_ size of this series
    -*       (i.e. there should be a frame boundary exactly `srcSize` bytes after `src`)
    -*   @return : the decompressed size of all data in the contained frames, as a 64-bit value _if known_
    -*             - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
    -*             - if an error occurred: ZSTD_CONTENTSIZE_ERROR
    -*
    -*    note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
    -*             When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
    -*             In which case, it's necessary to use streaming mode to decompress data.
    -*             Optionally, application can still use ZSTD_decompress() while relying on implied limits.
    -*             (For example, data may be necessarily cut into blocks <= 16 KB).
    -*    note 2 : decompressed size is always present when compression is done with ZSTD_compress()
    -*    note 3 : decompressed size can be very large (64-bits value),
    -*             potentially larger than what local system can handle as a single memory segment.
    -*             In which case, it's necessary to use streaming mode to decompress data.
    -*    note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
    -*             Always ensure result fits within application's authorized limits.
    -*             Each application can set its own limits.
    -*    note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
    -*             read each contained frame header.  This is efficient as most of the data is skipped,
    -*             however it does mean that all frame data must be present and valid. */
    + *  `src` should point the start of a series of ZSTD encoded and/or skippable frames
    + *  `srcSize` must be the _exact_ size of this series
    + *       (i.e. there should be a frame boundary exactly `srcSize` bytes after `src`)
    + *  @return : - decompressed size of all data in all successive frames
    + *            - if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
    + *            - if an error occurred: ZSTD_CONTENTSIZE_ERROR
    + *
    + *   note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
    + *            When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
    + *            In which case, it's necessary to use streaming mode to decompress data.
    + *            Optionally, application can still use ZSTD_decompress() while relying on implied limits.
    + *            (For example, data may be necessarily cut into blocks <= 16 KB).
    + *   note 2 : decompressed size is always present when compression is done with ZSTD_compress()
    + *   note 3 : decompressed size can be very large (64-bits value),
    + *            potentially larger than what local system can handle as a single memory segment.
    + *            In which case, it's necessary to use streaming mode to decompress data.
    + *   note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
    + *            Always ensure result fits within application's authorized limits.
    + *            Each application can set its own limits.
    + *   note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
    + *            read each contained frame header.  This is efficient as most of the data is skipped,
    + *            however it does mean that all frame data must be present and valid. */
     ZSTDLIB_API unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
     
     
    +/***************************************
    +*  Context memory usage
    +***************************************/
    +
    +/*! ZSTD_sizeof_*() :
    + *  These functions give the current memory usage of selected object.
    + *  Object memory usage can evolve if it's re-used multiple times. */
    +ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
    +ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
    +ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
    +ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
    +ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
    +ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
    +
    +/*! ZSTD_estimate*() :
    + *  These functions make it possible to estimate memory usage
    + *  of a future target object, before its allocation,
    + *  given a set of parameters, which vary depending on target object.
    + *  The objective is to guide decision before allocation. */
    +ZSTDLIB_API size_t ZSTD_estimateCCtxSize(ZSTD_compressionParameters cParams);
    +ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
    +
    +/*! ZSTD_estimate?StreamSize() :
    + *  Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
    + *         an internal ?Dict will be created, which size is not estimated.
    + *         In this case, get additional size by using ZSTD_estimate?DictSize */
    +ZSTDLIB_API size_t ZSTD_estimateCStreamSize(ZSTD_compressionParameters cParams);
    +ZSTDLIB_API size_t ZSTD_estimateDStreamSize(ZSTD_frameHeader fHeader);
    +
    +/*! ZSTD_estimate?DictSize() :
    + *  Note : if dictionary is created "byReference", reduce estimation by dictSize */
    +ZSTDLIB_API size_t ZSTD_estimateCDictSize(ZSTD_compressionParameters cParams, size_t dictSize);
    +ZSTDLIB_API size_t ZSTD_estimateDDictSize(size_t dictSize);
    +
    +
     /***************************************
     *  Advanced compression functions
     ***************************************/
    -/*! ZSTD_estimateCCtxSize() :
    - *  Gives the amount of memory allocated for a ZSTD_CCtx given a set of compression parameters.
    - *  `frameContentSize` is an optional parameter, provide `0` if unknown */
    -ZSTDLIB_API size_t ZSTD_estimateCCtxSize(ZSTD_compressionParameters cParams);
    -
     /*! ZSTD_createCCtx_advanced() :
      *  Create a ZSTD compression context using external alloc and free functions */
     ZSTDLIB_API ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
     
    -/*! ZSTD_sizeofCCtx() :
    - *  Gives the amount of memory used by a given ZSTD_CCtx */
    -ZSTDLIB_API size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
    -
     typedef enum {
         ZSTD_p_forceWindow,   /* Force back-references to remain < windowSize, even when referencing Dictionary content (default:0) */
         ZSTD_p_forceRawDict   /* Force loading dictionary in "content-only" mode (no header analysis) */
    @@ -479,11 +519,7 @@ ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_byReference(const void* dictBuffer, siz
     /*! ZSTD_createCDict_advanced() :
      *  Create a ZSTD_CDict using external alloc and free, and customized compression parameters */
     ZSTDLIB_API ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, unsigned byReference,
    -                                                  ZSTD_parameters params, ZSTD_customMem customMem);
    -
    -/*! ZSTD_sizeof_CDict() :
    - *  Gives the amount of memory used by a given ZSTD_sizeof_CDict */
    -ZSTDLIB_API size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
    +                                                  ZSTD_compressionParameters cParams, ZSTD_customMem customMem);
     
     /*! ZSTD_getCParams() :
     *   @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
    @@ -505,12 +541,19 @@ ZSTDLIB_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
     ZSTDLIB_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
     
     /*! ZSTD_compress_advanced() :
    -*   Same as ZSTD_compress_usingDict(), with fine-tune control of each compression parameter */
    -ZSTDLIB_API size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
    -                                           void* dst, size_t dstCapacity,
    -                                     const void* src, size_t srcSize,
    -                                     const void* dict,size_t dictSize,
    -                                           ZSTD_parameters params);
    +*   Same as ZSTD_compress_usingDict(), with fine-tune control over each compression parameter */
    +ZSTDLIB_API size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
    +                                  void* dst, size_t dstCapacity,
    +                            const void* src, size_t srcSize,
    +                            const void* dict,size_t dictSize,
    +                                  ZSTD_parameters params);
    +
    +/*! ZSTD_compress_usingCDict_advanced() :
    +*   Same as ZSTD_compress_usingCDict(), with fine-tune control over frame parameters */
    +ZSTDLIB_API size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
    +                                  void* dst, size_t dstCapacity,
    +                            const void* src, size_t srcSize,
    +                            const ZSTD_CDict* cdict, ZSTD_frameParameters fParams);
     
     
     /*--- Advanced decompression functions ---*/
    @@ -522,31 +565,21 @@ ZSTDLIB_API size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
      *  Note 3 : Skippable Frame Identifiers are considered valid. */
     ZSTDLIB_API unsigned ZSTD_isFrame(const void* buffer, size_t size);
     
    -/*! ZSTD_estimateDCtxSize() :
    - *  Gives the potential amount of memory allocated to create a ZSTD_DCtx */
    -ZSTDLIB_API size_t ZSTD_estimateDCtxSize(void);
    -
     /*! ZSTD_createDCtx_advanced() :
      *  Create a ZSTD decompression context using external alloc and free functions */
     ZSTDLIB_API ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
     
    -/*! ZSTD_sizeof_DCtx() :
    - *  Gives the amount of memory used by a given ZSTD_DCtx */
    -ZSTDLIB_API size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
    -
     /*! ZSTD_createDDict_byReference() :
      *  Create a digested dictionary, ready to start decompression operation without startup delay.
      *  Dictionary content is simply referenced, and therefore stays in dictBuffer.
      *  It is important that dictBuffer outlives DDict, it must remain read accessible throughout the lifetime of DDict */
     ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
     
    +/*! ZSTD_createDDict_advanced() :
    + *  Create a ZSTD_DDict using external alloc and free, optionally by reference */
     ZSTDLIB_API ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
                                                       unsigned byReference, ZSTD_customMem customMem);
     
    -/*! ZSTD_sizeof_DDict() :
    - *  Gives the amount of memory used by a given ZSTD_DDict */
    -ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
    -
     /*! ZSTD_getDictID_fromDict() :
      *  Provides the dictID stored within dictionary.
      *  if @return == 0, the dictionary is not conformant with Zstandard specification.
    @@ -568,7 +601,7 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
      *    Note : this use case also happens when using a non-conformant dictionary.
      *  - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
      *  - This is not a Zstandard frame.
    - *  When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code. */
    + *  When identifying the exact failure cause, it's possible to use ZSTD_getFrameParams(), which will provide a more precise error code. */
     ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
     
     
    @@ -579,22 +612,29 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
     /*=====   Advanced Streaming compression functions  =====*/
     ZSTDLIB_API ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
     ZSTDLIB_API size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize);   /**< pledgedSrcSize must be correct, a size of 0 means unknown.  for a frame size of 0 use initCStream_advanced */
    -ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /**< note: a dict will not be used if dict == NULL or dictSize < 8 */
    +ZSTDLIB_API size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); /**< note: a dict will not be used if dict == NULL or dictSize < 8. This result in the creation of an internal CDict */
     ZSTDLIB_API size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
                                                  ZSTD_parameters params, unsigned long long pledgedSrcSize);  /**< pledgedSrcSize is optional and can be 0 (meaning unknown). note: if the contentSizeFlag is set, pledgedSrcSize == 0 means the source size is actually 0 */
     ZSTDLIB_API size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);  /**< note : cdict will just be referenced, and must outlive compression session */
    -ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);  /**< re-use compression parameters from previous init; skip dictionary loading stage; zcs must be init at least once before. note: pledgedSrcSize must be correct, a size of 0 means unknown.  for a frame size of 0 use initCStream_advanced */
    -ZSTDLIB_API size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
    +ZSTDLIB_API size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize, ZSTD_frameParameters fParams);  /**< same as ZSTD_initCStream_usingCDict(), with control over frame parameters */
    +
    +/*! ZSTD_resetCStream() :
    + *  start a new compression job, using same parameters from previous job.
    + *  This is typically useful to skip dictionary loading stage, since it will re-use it in-place..
    + *  Note that zcs must be init at least once before using ZSTD_resetCStream().
    + *  pledgedSrcSize==0 means "srcSize unknown".
    + *  If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.
    + *  @return : 0, or an error code (which can be tested using ZSTD_isError()) */
    +ZSTDLIB_API size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
     
     
     /*=====   Advanced Streaming decompression functions  =====*/
     typedef enum { DStream_p_maxWindowSize } ZSTD_DStreamParameter_e;
     ZSTDLIB_API ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
    -ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /**< note: a dict will not be used if dict == NULL or dictSize < 8 */
     ZSTDLIB_API size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, ZSTD_DStreamParameter_e paramType, unsigned paramValue);
    +ZSTDLIB_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); /**< note: a dict will not be used if dict == NULL or dictSize < 8 */
     ZSTDLIB_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);  /**< note : ddict will just be referenced, and must outlive decompression session */
     ZSTDLIB_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);  /**< re-use decompression parameters from previous init; saves dictionary loading */
    -ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
     
     
     /*********************************************************************
    @@ -640,8 +680,10 @@ ZSTDLIB_API size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
     ZSTDLIB_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
     ZSTDLIB_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
     ZSTDLIB_API size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize is optional and can be 0 (meaning unknown). note: if the contentSizeFlag is set, pledgedSrcSize == 0 means the source size is actually 0 */
    +ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
    +ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize);   /* compression parameters are already set within cdict. pledgedSrcSize=0 means null-size */
     ZSTDLIB_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**<  note: if pledgedSrcSize can be 0, indicating unknown size.  if it is non-zero, it must be accurate.  for 0 size frames, use compressBegin_advanced */
    -ZSTDLIB_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize can be 0, indicating unknown size.  if it is non-zero, it must be accurate.  for 0 size frames, use compressBegin_advanced */
    +
     ZSTDLIB_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
     ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
     
    @@ -707,15 +749,8 @@ ZSTDLIB_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapaci
       It also returns Frame Size as fparamsPtr->frameContentSize.
     */
     
    -typedef struct {
    -    unsigned long long frameContentSize;
    -    unsigned windowSize;
    -    unsigned dictID;
    -    unsigned checksumFlag;
    -} ZSTD_frameParams;
    -
     /*=====   Buffer-less streaming decompression functions  =====*/
    -ZSTDLIB_API size_t ZSTD_getFrameParams(ZSTD_frameParams* fparamsPtr, const void* src, size_t srcSize);   /**< doesn't consume input, see details below */
    +ZSTDLIB_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize);   /**< doesn't consume input, see details below */
     ZSTDLIB_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
     ZSTDLIB_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
     ZSTDLIB_API void   ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
    @@ -735,19 +770,20 @@ ZSTDLIB_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
         - Compressing and decompressing require a context structure
           + Use ZSTD_createCCtx() and ZSTD_createDCtx()
         - It is necessary to init context before starting
    -      + compression : ZSTD_compressBegin()
    -      + decompression : ZSTD_decompressBegin()
    -      + variants _usingDict() are also allowed
    -      + copyCCtx() and copyDCtx() work too
    -    - Block size is limited, it must be <= ZSTD_getBlockSizeMax()
    -      + If you need to compress more, cut data into multiple blocks
    -      + Consider using the regular ZSTD_compress() instead, as frame metadata costs become negligible when source size is large.
    +      + compression : any ZSTD_compressBegin*() variant, including with dictionary
    +      + decompression : any ZSTD_decompressBegin*() variant, including with dictionary
    +      + copyCCtx() and copyDCtx() can be used too
    +    - Block size is limited, it must be <= ZSTD_getBlockSizeMax() <= ZSTD_BLOCKSIZE_ABSOLUTEMAX
    +      + If input is larger than a block size, it's necessary to split input data into multiple blocks
    +      + For inputs larger than a single block size, consider using the regular ZSTD_compress() instead.
    +        Frame metadata is not that costly, and quickly becomes negligible as source size grows larger.
         - When a block is considered not compressible enough, ZSTD_compressBlock() result will be zero.
           In which case, nothing is produced into `dst`.
           + User must test for such outcome and deal directly with uncompressed data
           + ZSTD_decompressBlock() doesn't accept uncompressed data as input !!!
    -      + In case of multiple successive blocks, decoder must be informed of uncompressed block existence to follow proper history.
    -        Use ZSTD_insertBlock() in such a case.
    +      + In case of multiple successive blocks, should some of them be uncompressed,
    +        decoder must be informed of their existence in order to follow proper history.
    +        Use ZSTD_insertBlock() for such a case.
     */
     
     #define ZSTD_BLOCKSIZE_ABSOLUTEMAX (128 * 1024)   /* define, for static allocation */
    diff --git a/programs/Makefile b/programs/Makefile
    index 5620a25a3..bb40253b9 100644
    --- a/programs/Makefile
    +++ b/programs/Makefile
    @@ -18,6 +18,19 @@
     
     ZSTDDIR = ../lib
     
    +# Version numbers
    +LIBVER_SRC := $(ZSTDDIR)/zstd.h
    +LIBVER_MAJOR_SCRIPT:=`sed -n '/define ZSTD_VERSION_MAJOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LIBVER_SRC)`
    +LIBVER_MINOR_SCRIPT:=`sed -n '/define ZSTD_VERSION_MINOR/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LIBVER_SRC)`
    +LIBVER_PATCH_SCRIPT:=`sed -n '/define ZSTD_VERSION_RELEASE/s/.*[[:blank:]]\([0-9][0-9]*\).*/\1/p' < $(LIBVER_SRC)`
    +LIBVER_SCRIPT:= $(LIBVER_MAJOR_SCRIPT).$(LIBVER_MINOR_SCRIPT).$(LIBVER_PATCH_SCRIPT)
    +LIBVER_MAJOR := $(shell echo $(LIBVER_MAJOR_SCRIPT))
    +LIBVER_MINOR := $(shell echo $(LIBVER_MINOR_SCRIPT))
    +LIBVER_PATCH := $(shell echo $(LIBVER_PATCH_SCRIPT))
    +LIBVER  := $(shell echo $(LIBVER_SCRIPT))
    +
    +ZSTD_VERSION=$(LIBVER)
    +
     ifeq ($(shell $(CC) -v 2>&1 | grep -c "gcc version "), 1)
     ALIGN_LOOP = -falign-loops=32
     else
    @@ -28,7 +41,7 @@ CPPFLAGS+= -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(ZSTDDIR)/compress \
                -I$(ZSTDDIR)/dictBuilder \
                -DXXH_NAMESPACE=ZSTD_   # because xxhash.o already compiled with this macro from library
     CFLAGS  ?= -O3
    -DEBUGFLAGS = -g -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
    +DEBUGFLAGS = -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
               -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \
               -Wstrict-prototypes -Wundef -Wpointer-arith -Wformat-security
     CFLAGS  += $(DEBUGFLAGS) $(MOREFLAGS)
    @@ -42,12 +55,14 @@ ZSTD_FILES := $(ZSTDDECOMP_FILES) $(ZSTDCOMMON_FILES) $(ZSTDCOMP_FILES)
     ZDICT_FILES := $(ZSTDDIR)/dictBuilder/*.c
     ZSTDDECOMP_O = $(ZSTDDIR)/decompress/zstd_decompress.o
     
    -ifeq ($(ZSTD_LEGACY_SUPPORT), 0)
    +ZSTD_LEGACY_SUPPORT ?= 4
     ZSTDLEGACY_FILES:=
    +ifneq ($(ZSTD_LEGACY_SUPPORT), 0)
    +ifeq ($(shell test $(ZSTD_LEGACY_SUPPORT) -lt 8; echo $$?), 0)
    +	ZSTDLEGACY_FILES += $(shell ls $(ZSTDDIR)/legacy/*.c | grep 'v0[$(ZSTD_LEGACY_SUPPORT)-7]')
    +endif
    +	CPPFLAGS += -I$(ZSTDDIR)/legacy
     else
    -ZSTD_LEGACY_SUPPORT:=1
    -CPPFLAGS  += -I$(ZSTDDIR)/legacy
    -ZSTDLEGACY_FILES:= $(ZSTDDIR)/legacy/*.c
     endif
     
     ZSTDLIB_FILES := $(wildcard $(ZSTD_FILES)) $(wildcard $(ZSTDLEGACY_FILES)) $(wildcard $(ZDICT_FILES))
    @@ -67,12 +82,51 @@ else
     EXT =
     endif
     
    -# zlib detection
     VOID = /dev/null
    -HAVE_ZLIB := $(shell printf '\#include \nint main(){}' | $(CC) -o have_zlib -x c - -lz 2> $(VOID) && rm have_zlib$(EXT) && echo 1 || echo 0)
    +
    +# thread detection
    +NO_THREAD_MSG := ==> no threads, building without multithreading support
    +HAVE_PTHREAD := $(shell printf '\#include \nint main(void) { return 0; }' | $(CC) $(FLAGS) -o have_pthread$(EXT) -x c - -pthread 2> $(VOID) && rm have_pthread$(EXT) && echo 1 || echo 0)
    +HAVE_THREAD := $(shell [ "$(HAVE_PTHREAD)" -eq "1" -o -n "$(filter Windows%,$(OS))" ] && echo 1 || echo 0)
    +ifeq ($(HAVE_THREAD), 1)
    +THREAD_MSG := ==> building with threading support
    +THREAD_CPP := -DZSTD_MULTITHREAD
    +THREAD_LD := -pthread
    +else
    +THREAD_MSG := $(NO_THREAD_MSG)
    +endif
    +
    +# zlib detection
    +NO_ZLIB_MSG := ==> no zlib, building zstd without .gz support
    +HAVE_ZLIB := $(shell printf '\#include \nint main(void) { return 0; }' | $(CC) $(FLAGS) -o have_zlib$(EXT) -x c - -lz 2> $(VOID) && rm have_zlib$(EXT) && echo 1 || echo 0)
     ifeq ($(HAVE_ZLIB), 1)
    +ZLIB_MSG := ==> building zstd with .gz compression support
     ZLIBCPP = -DZSTD_GZCOMPRESS -DZSTD_GZDECOMPRESS
     ZLIBLD = -lz
    +else
    +ZLIB_MSG := $(NO_ZLIB_MSG)
    +endif
    +
    +# lzma detection
    +NO_LZMA_MSG := ==> no liblzma, building zstd without .xz/.lzma support
    +HAVE_LZMA := $(shell printf '\#include \nint main(void) { return 0; }' | $(CC) $(FLAGS) -o have_lzma$(EXT) -x c - -llzma 2> $(VOID) && rm have_lzma$(EXT) && echo 1 || echo 0)
    +ifeq ($(HAVE_LZMA), 1)
    +LZMA_MSG := ==> building zstd with .xz/.lzma compression support
    +LZMACPP = -DZSTD_LZMACOMPRESS -DZSTD_LZMADECOMPRESS
    +LZMALD = -llzma
    +else
    +LZMA_MSG := $(NO_LZMA_MSG)
    +endif
    +
    +# lz4 detection
    +NO_LZ4_MSG := ==> no liblz4, building zstd without .lz4 support
    +HAVE_LZ4 := $(shell printf '\#include \n\#include \nint main(void) { return 0; }' | $(CC) $(FLAGS) -o have_lz4$(EXT) -x c - -llz4 2> $(VOID) && rm have_lz4$(EXT) && echo 1 || echo 0)
    +ifeq ($(HAVE_LZ4), 1)
    +LZ4_MSG := ==> building zstd with .lz4 compression support
    +LZ4CPP = -DZSTD_LZ4COMPRESS -DZSTD_LZ4DECOMPRESS
    +LZ4LD = -llz4
    +else
    +LZ4_MSG := $(NO_LZ4_MSG)
     endif
     
     .PHONY: default all clean clean_decomp_o install uninstall generate_res
    @@ -83,25 +137,25 @@ all: zstd
     
     $(ZSTDDECOMP_O): CFLAGS += $(ALIGN_LOOP)
     
    -zstd-internal : CPPFLAGS += -DZSTD_LEGACY_SUPPORT=$(ZSTD_LEGACY_SUPPORT)
    -zstd-internal : $(ZSTDLIB_OBJ) zstdcli.o fileio.o bench.o datagen.o dibio.o
    -ifeq ($(HAVE_ZLIB), 1)
    -	@echo "==> building zstd with .gz decompression support "
    -else
    -	@echo "==> no zlib, building zstd with .zst support only (no .gz support) "
    -endif
    +zstd xzstd zstd4 xzstd4 : CPPFLAGS += $(THREAD_CPP) $(ZLIBCPP)
    +zstd xzstd zstd4 xzstd4 : LDFLAGS += $(THREAD_LD) $(ZLIBLD)
    +xzstd xzstd4 : CPPFLAGS += $(LZMACPP)
    +xzstd xzstd4 : LDFLAGS += $(LZMALD)
    +zstd4 xzstd4 : CPPFLAGS += $(LZ4CPP)
    +zstd4 xzstd4 : LDFLAGS += $(LZ4LD)
    +zstd zstd4 : LZMA_MSG := - xz/lzma support is disabled
    +zstd xzstd : LZ4_MSG := - lz4 support is disabled
    +zstd xzstd zstd4 xzstd4 : CPPFLAGS += -DZSTD_LEGACY_SUPPORT=$(ZSTD_LEGACY_SUPPORT)
    +zstd xzstd zstd4 xzstd4 : $(ZSTDLIB_FILES) zstdcli.o fileio.o bench.o datagen.o dibio.o
    +	@echo "$(THREAD_MSG)"
    +	@echo "$(ZLIB_MSG)"
    +	@echo "$(LZMA_MSG)"
    +	@echo "$(LZ4_MSG)"
     ifneq (,$(filter Windows%,$(OS)))
     	windres/generate_res.bat
     endif
     	$(CC) $(FLAGS) $^ $(RES_FILE) -o zstd$(EXT) $(LDFLAGS)
     
    -zstd-nogz : HAVE_ZLIB=0
    -zstd-nogz : zstd-internal
    -
    -zstd : CPPFLAGS += $(ZLIBCPP)
    -zstd : LDFLAGS += $(ZLIBLD)
    -zstd : zstd-internal
    -
     zstd-release: DEBUGFLAGS :=
     zstd-release: zstd
     
    @@ -112,10 +166,20 @@ ifneq (,$(filter Windows%,$(OS)))
     endif
     	$(CC) -m32 $(FLAGS) $^ $(RES32_FILE) -o $@$(EXT)
     
    -
     zstd-nolegacy : clean_decomp_o
     	$(MAKE) zstd ZSTD_LEGACY_SUPPORT=0
     
    +zstd-nomt : THREAD_CPP :=
    +zstd-nomt : THREAD_LD :=
    +zstd-nomt : THREAD_MSG := - multi-threading disabled
    +zstd-nomt : zstd
    +
    +zstd-nogz : ZLIBCPP :=
    +zstd-nogz : ZLIBLD :=
    +zstd-nogz : ZLIB_MSG := - gzip support is disabled
    +zstd-nogz : zstd
    +
    +
     zstd-pgo : MOREFLAGS = -fprofile-generate
     zstd-pgo : clean zstd
     	./zstd -b19i1 $(PROFILE_WITH)
    @@ -128,22 +192,18 @@ zstd-pgo : clean zstd
     	$(RM) $(ZSTDDECOMP_O)
     	$(MAKE) zstd MOREFLAGS=-fprofile-use
     
    -zstd-frugal: $(ZSTD_FILES) zstdcli.c fileio.c
    +# minimal target, with only zstd compression and decompression. no bench. no legacy.
    +zstd-small: CFLAGS = "-Os -s"
    +zstd-frugal zstd-small: $(ZSTD_FILES) zstdcli.c fileio.c
     	$(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODICT $^ -o zstd$(EXT)
     
    -zstd-small:
    -	CFLAGS="-Os -s" $(MAKE) zstd-frugal
    -
     zstd-decompress: $(ZSTDCOMMON_FILES) $(ZSTDDECOMP_FILES) zstdcli.c fileio.c
     	$(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODICT -DZSTD_NOCOMPRESS $^ -o $@$(EXT)
     
     zstd-compress: $(ZSTDCOMMON_FILES) $(ZSTDCOMP_FILES) zstdcli.c fileio.c
     	$(CC) $(FLAGS) -DZSTD_NOBENCH -DZSTD_NODICT -DZSTD_NODECOMPRESS $^ -o $@$(EXT)
     
    -zstdmt: CPPFLAGS += -DZSTD_MULTITHREAD
    -ifeq (,$(filter Windows%,$(OS)))
    -zstdmt: LDFLAGS += -lpthread
    -endif
    +# zstd is now built with Multi-threading by default
     zstdmt: zstd
     
     generate_res:
    @@ -160,6 +220,19 @@ clean:
     clean_decomp_o:
     	@$(RM) $(ZSTDDECOMP_O)
     
    +MD2ROFF = ronn
    +MD2ROFF_FLAGS = --roff --warnings --manual="User Commands" --organization="zstd $(ZSTD_VERSION)"
    +
    +zstd.1: zstd.1.md
    +	cat $^ | $(MD2ROFF) $(MD2ROFF_FLAGS) | sed -n '/^\.\\\".*/!p' > $@
    +
    +man: zstd.1
    +
    +clean-man:
    +	rm zstd.1
    +
    +preview-man: clean-man man
    +	man ./zstd.1
     
     #-----------------------------------------------------------------------------
     # make install is validated only for Linux, OSX, BSD, Hurd and Solaris targets
    @@ -192,6 +265,7 @@ install: zstd
     	@$(INSTALL_PROGRAM) zstd $(DESTDIR)$(BINDIR)/zstd
     	@ln -sf zstd $(DESTDIR)$(BINDIR)/zstdcat
     	@ln -sf zstd $(DESTDIR)$(BINDIR)/unzstd
    +	@ln -sf zstd $(DESTDIR)$(BINDIR)/zstdmt
     	@$(INSTALL_SCRIPT) zstdless $(DESTDIR)$(BINDIR)/zstdless
     	@$(INSTALL_SCRIPT) zstdgrep $(DESTDIR)$(BINDIR)/zstdgrep
     	@echo Installing man pages
    diff --git a/programs/README.md b/programs/README.md
    index 203fd7b49..d7922a096 100644
    --- a/programs/README.md
    +++ b/programs/README.md
    @@ -11,8 +11,29 @@ There are however other Makefile targets that create different variations of CLI
     - `zstd-decompress` : decompressor-only version of CLI; without dictionary builder, benchmark, and support for decompression of legacy zstd versions
     
     
    +#### Compilation variables
    +`zstd` tries to detect and use the following features automatically :
    +
    +- __HAVE_THREAD__ : multithreading is automatically enabled when `pthread` is detected.
    +  It's possible to disable multithread support, by either compiling `zstd-nomt` target or using HAVE_THREAD=0 variable.
    +  Example : make zstd HAVE_THREAD=0
    +  It's also possible to force compilation with multithread support, using HAVE_THREAD=1.
    +  In which case, linking stage will fail if `pthread` library cannot be found.
    +  This might be useful to prevent silent feature disabling.
    +
    +- __HAVE_ZLIB__ : `zstd` can compress and decompress files in `.gz` format.
    +  This is done through command `--format=gzip`.
    +  Alternatively, symlinks named `gzip` or `gunzip` will mimic intended behavior.
    +  .gz support is automatically enabled when `zlib` library is detected at build time.
    +  It's possible to disable .gz support, by either compiling `zstd-nogz` target or using HAVE_ZLIB=0 variable.
    +  Example : make zstd HAVE_ZLIB=0
    +  It's also possible to force compilation with zlib support, using HAVE_ZLIB=1.
    +  In which case, linking stage will fail if `zlib` library cannot be found.
    +  This might be useful to prevent silent feature disabling.
    +
    +
     #### Aggregation of parameters
    -CLI supports aggregation of parameters i.e. `-b1`, `-e18`, and `-i1` can be joined into `-b1e18i1`. 
    +CLI supports aggregation of parameters i.e. `-b1`, `-e18`, and `-i1` can be joined into `-b1e18i1`.
     
     
     #### Dictionary builder in Command Line Interface
    @@ -23,7 +44,7 @@ which can be loaded before compression and decompression.
     
     Using a dictionary, the compression ratio achievable on small data improves dramatically.
     These compression gains are achieved while simultaneously providing faster compression and decompression speeds.
    -Dictionary work if there is some correlation in a family of small data (there is no universal dictionary). 
    +Dictionary work if there is some correlation in a family of small data (there is no universal dictionary).
     Hence, deploying one dictionary per type of data will provide the greater benefits.
     Dictionary gains are mostly effective in the first few KB. Then, the compression algorithm
     will rely more and more on previously decoded content to compress the rest of the file.
    @@ -35,7 +56,6 @@ Usage of the dictionary builder and created dictionaries with CLI:
     3. Decompress with the dictionary: `zstd --decompress FILE.zst -D dictionaryName`
     
     
    -
     #### Benchmark in Command Line Interface
     CLI includes in-memory compression benchmark module for zstd.
     The benchmark is conducted using given filenames. The files are read into memory and joined together.
    @@ -48,7 +68,6 @@ One can select compression levels starting from `-b` and ending with `-e`.
     The `-i` parameter selects minimal time used for each of tested levels.
     
     
    -
     #### Usage of Command Line Interface
     The full list of options can be obtained with `-h` or `-H` parameter:
     ```
    @@ -62,33 +81,40 @@ Arguments :
      -d     : decompression
      -D file: use `file` as Dictionary
      -o file: result stored into `file` (only if 1 input file)
    - -f     : overwrite output without prompting
    + -f     : overwrite output without prompting and (de)compress links
     --rm    : remove source file(s) after successful de/compression
      -k     : preserve source file(s) (default)
      -h/-H  : display help/long help and exit
     
     Advanced arguments :
      -V     : display Version number and exit
    - -v     : verbose mode; specify multiple times to increase log level (default:2)
    + -v     : verbose mode; specify multiple times to increase verbosity
      -q     : suppress warnings; specify twice to suppress errors too
      -c     : force write to standard output, even if it is the console
    - -r     : operate recursively on directories
     --ultra : enable levels beyond 19, up to 22 (requires more memory)
    + -T#    : use # threads for compression (default:1)
    + -B#    : select size of each job (default:0==automatic)
     --no-dictID : don't write dictID into header (dictionary compression)
     --[no-]check : integrity check (default:enabled)
    + -r     : operate recursively on directories
    +--format=gzip : compress files to the .gz format
     --test  : test compressed file integrity
    ---[no-]sparse : sparse mode (default:enabled on file, disabled on stdout)
    +--[no-]sparse : sparse mode (default:disabled)
    + -M#    : Set a memory usage limit for decompression
    +--      : All arguments after "--" are treated as files
     
     Dictionary builder :
     --train ## : create a dictionary from a training set of files
    +--train-cover[=k=#,d=#,steps=#] : use the cover algorithm with optional args
    +--train-legacy[=s=#] : use the legacy algorithm with selectivity (default: 9)
      -o file : `file` is dictionary name (default: dictionary)
    ---maxdict ## : limit dictionary to specified size (default : 112640)
    - -s#    : dictionary selectivity level (default: 9)
    ---dictID ## : force dictionary ID to specified value (default: random)
    +--maxdict=# : limit dictionary to specified size (default : 112640)
    +--dictID=# : force dictionary ID to specified value (default: random)
     
     Benchmark arguments :
      -b#    : benchmark file(s), using # compression level (default : 1)
      -e#    : test all compression levels from -bX to # (default: 1)
      -i#    : minimum evaluation time in seconds (default : 3s)
      -B#    : cut file into independent blocks of size # (default: no block)
    - ```
    \ No newline at end of file
    +--priority=rt : set process priority to real-time
    +```
    diff --git a/programs/bench.c b/programs/bench.c
    index 2dd1cfb0f..22b871952 100644
    --- a/programs/bench.c
    +++ b/programs/bench.c
    @@ -70,12 +70,12 @@ static U32 g_compressibilityDefault = 50;
     ***************************************/
     #define DISPLAY(...)         fprintf(stderr, __VA_ARGS__)
     #define DISPLAYLEVEL(l, ...) if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); }
    -static U32 g_displayLevel = 2;   /* 0 : no display;   1: errors;   2 : + result + interaction + warnings;   3 : + progression;   4 : + information */
    +static int g_displayLevel = 2;   /* 0 : no display;   1: errors;   2 : + result + interaction + warnings;   3 : + progression;   4 : + information */
     
     #define DISPLAYUPDATE(l, ...) if (g_displayLevel>=l) { \
                 if ((clock() - g_time > refreshRate) || (g_displayLevel>=4)) \
                 { g_time = clock(); DISPLAY(__VA_ARGS__); \
    -            if (g_displayLevel>=4) fflush(stdout); } }
    +            if (g_displayLevel>=4) fflush(stderr); } }
     static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100;
     static clock_t g_time = 0;
     
    @@ -89,7 +89,7 @@ static clock_t g_time = 0;
     #define DEBUGOUTPUT(...) if (DEBUG) DISPLAY(__VA_ARGS__);
     #define EXM_THROW(error, ...)                                             \
     {                                                                         \
    -    DEBUGOUTPUT("Error defined at %s, line %i : \n", __FILE__, __LINE__); \
    +    DEBUGOUTPUT("%s: %i: \n", __FILE__, __LINE__); \
         DISPLAYLEVEL(1, "Error %i : ", error);                                \
         DISPLAYLEVEL(1, __VA_ARGS__);                                         \
         DISPLAYLEVEL(1, " \n");                                               \
    @@ -146,17 +146,20 @@ typedef struct {
     } blockParam_t;
     
     
    -#define MIN(a,b) ((a)<(b) ? (a) : (b))
    -#define MAX(a,b) ((a)>(b) ? (a) : (b))
    +
    +#undef MIN
    +#undef MAX
    +#define MIN(a,b)    ((a) < (b) ? (a) : (b))
    +#define MAX(a,b)    ((a) > (b) ? (a) : (b))
     
     static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
                             const char* displayName, int cLevel,
                             const size_t* fileSizes, U32 nbFiles,
                             const void* dictBuffer, size_t dictBufferSize,
    -                        ZSTD_compressionParameters *comprParams)
    +                        const ZSTD_compressionParameters* comprParams)
     {
         size_t const blockSize = ((g_blockSize>=32 && !g_decodeOnly) ? g_blockSize : srcSize) + (!srcSize) /* avoid div by 0 */ ;
    -    size_t const avgSize = MIN(g_blockSize, (srcSize / nbFiles));
    +    size_t const avgSize = MIN(blockSize, (srcSize / nbFiles));
         U32 const maxNbBlocks = (U32) ((srcSize + (blockSize-1)) / blockSize) + nbFiles;
         blockParam_t* const blockTable = (blockParam_t*) malloc(maxNbBlocks * sizeof(blockParam_t));
         size_t const maxCompressedSize = ZSTD_compressBound(srcSize) + (maxNbBlocks * 1024);   /* add some room for safety */
    @@ -176,22 +179,21 @@ static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
             EXM_THROW(31, "allocation error : not enough memory");
     
         /* init */
    -    if (strlen(displayName)>17) displayName += strlen(displayName)-17;   /* can only display 17 characters */
    +    if (strlen(displayName)>17) displayName += strlen(displayName)-17;   /* display last 17 characters */
         UTIL_initTimer(&ticksPerSecond);
     
    -    if (g_decodeOnly) {
    -        const char* srcPtr = (const char*) srcBuffer;
    -        U64 dSize64 = 0;
    +    if (g_decodeOnly) {  /* benchmark only decompression : source must be already compressed */
    +        const char* srcPtr = (const char*)srcBuffer;
    +        U64 totalDSize64 = 0;
             U32 fileNb;
             for (fileNb=0; fileNb decodedSize) EXM_THROW(32, "original size is too large");
    -            if (decodedSize==0) EXM_THROW(32, "Impossible to determine original size ");
    +        {   size_t const decodedSize = (size_t)totalDSize64;
    +            if (totalDSize64 > decodedSize) EXM_THROW(32, "original size is too large");   /* size_t overflow */
                 free(resultBuffer);
                 resultBuffer = malloc(decodedSize);
                 if (!resultBuffer) EXM_THROW(33, "not enough memory");
    @@ -260,12 +262,11 @@ static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
                     UTIL_getTime(&clockStart);
     
                     if (!cCompleted) {   /* still some time to do compression tests */
    -                    ZSTD_parameters zparams = ZSTD_getParams(cLevel, avgSize, dictBufferSize);
                         ZSTD_customMem const cmem = { NULL, NULL, NULL };
    -                    U64 clockLoop = g_nbSeconds ? TIMELOOP_MICROSEC : 1;
    +                    U64 const clockLoop = g_nbSeconds ? TIMELOOP_MICROSEC : 1;
                         U32 nbLoops = 0;
    -                    ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, 1, zparams, cmem);
    -                    if (cdict==NULL) EXM_THROW(1, "ZSTD_createCDict_advanced() allocation failure");
    +                    ZSTD_parameters zparams = ZSTD_getParams(cLevel, avgSize, dictBufferSize);
    +                    ZSTD_CDict* cdict;
                         if (comprParams->windowLog) zparams.cParams.windowLog = comprParams->windowLog;
                         if (comprParams->chainLog) zparams.cParams.chainLog = comprParams->chainLog;
                         if (comprParams->hashLog) zparams.cParams.hashLog = comprParams->hashLog;
    @@ -273,6 +274,8 @@ static int BMK_benchMem(const void* srcBuffer, size_t srcSize,
                         if (comprParams->searchLength) zparams.cParams.searchLength = comprParams->searchLength;
                         if (comprParams->targetLength) zparams.cParams.targetLength = comprParams->targetLength;
                         if (comprParams->strategy) zparams.cParams.strategy = (ZSTD_strategy)(comprParams->strategy - 1);
    +                    cdict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, 1, zparams.cParams, cmem);
    +                    if (cdict==NULL) EXM_THROW(1, "ZSTD_createCDict_advanced() allocation failure");
                         do {
                             U32 blockNb;
                             size_t rSize;
    diff --git a/programs/dibio.c b/programs/dibio.c
    index 5ef202c8a..aac36425c 100644
    --- a/programs/dibio.c
    +++ b/programs/dibio.c
    @@ -53,12 +53,12 @@ static const size_t maxMemory = (sizeof(size_t) == 4) ? (2 GB - 64 MB) : ((size_
     ***************************************/
     #define DISPLAY(...)         fprintf(stderr, __VA_ARGS__)
     #define DISPLAYLEVEL(l, ...) if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); }
    -static unsigned g_displayLevel = 0;   /* 0 : no display;   1: errors;   2: default;  4: full information */
    +static int g_displayLevel = 0;   /* 0 : no display;   1: errors;   2: default;  4: full information */
     
     #define DISPLAYUPDATE(l, ...) if (g_displayLevel>=l) { \
                 if ((DIB_clockSpan(g_time) > refreshRate) || (g_displayLevel>=4)) \
                 { g_time = clock(); DISPLAY(__VA_ARGS__); \
    -            if (g_displayLevel>=4) fflush(stdout); } }
    +            if (g_displayLevel>=4) fflush(stderr); } }
     static const clock_t refreshRate = CLOCKS_PER_SEC * 2 / 10;
     static clock_t g_time = 0;
     
    @@ -89,7 +89,8 @@ unsigned DiB_isError(size_t errorCode) { return ERR_isError(errorCode); }
     
     const char* DiB_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }
     
    -#define MIN(a,b)   ( (a) < (b) ? (a) : (b) )
    +#undef MIN
    +#define MIN(a,b)    ((a) < (b) ? (a) : (b))
     
     
     /* ********************************************************
    diff --git a/programs/fileio.c b/programs/fileio.c
    index 41daa125e..ba15555dd 100644
    --- a/programs/fileio.c
    +++ b/programs/fileio.c
    @@ -31,6 +31,11 @@
     #include        /* clock */
     #include       /* errno */
     
    +#if defined (_MSC_VER)
    +#  include 
    +#  include 
    +#endif
    +
     #include "mem.h"
     #include "fileio.h"
     #define ZSTD_STATIC_LINKING_ONLY   /* ZSTD_magicNumber, ZSTD_frameHeaderSize_max */
    @@ -44,6 +49,15 @@
     #    define z_const
     #  endif
     #endif
    +#if defined(ZSTD_LZMACOMPRESS) || defined(ZSTD_LZMADECOMPRESS)
    +#  include 
    +#endif
    +
    +#define LZ4_MAGICNUMBER 0x184D2204
    +#if defined(ZSTD_LZ4COMPRESS) || defined(ZSTD_LZ4DECOMPRESS)
    +#  include 
    +#  include 
    +#endif
     
     
     /*-*************************************
    @@ -68,10 +82,9 @@
     
     #define CACHELINE 64
     
    -#define MAX_DICT_SIZE (8 MB)   /* protection against large input (attack scenario) */
    +#define DICTSIZE_MAX (32 MB)   /* protection against large input (attack scenario) */
     
     #define FNSPACE 30
    -#define GZ_EXTENSION ".gz"
     
     
     /*-*************************************
    @@ -79,18 +92,20 @@
     ***************************************/
     #define DISPLAY(...)         fprintf(stderr, __VA_ARGS__)
     #define DISPLAYLEVEL(l, ...) { if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); } }
    -static U32 g_displayLevel = 2;   /* 0 : no display;   1: errors;   2 : + result + interaction + warnings;   3 : + progression;   4 : + information */
    +static int g_displayLevel = 2;   /* 0 : no display;   1: errors;   2 : + result + interaction + warnings;   3 : + progression;   4 : + information */
     void FIO_setNotificationLevel(unsigned level) { g_displayLevel=level; }
     
     #define DISPLAYUPDATE(l, ...) { if (g_displayLevel>=l) { \
                 if ((clock() - g_time > refreshRate) || (g_displayLevel>=4)) \
                 { g_time = clock(); DISPLAY(__VA_ARGS__); \
    -            if (g_displayLevel>=4) fflush(stdout); } } }
    +            if (g_displayLevel>=4) fflush(stderr); } } }
     static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100;
     static clock_t g_time = 0;
     
    +#undef MIN
     #define MIN(a,b)    ((a) < (b) ? (a) : (b))
     
    +
     /* ************************************************************
     * Avoid fseek()'s 2GiB barrier with MSVC, MacOS, *BSD, MinGW
     ***************************************************************/
    @@ -186,6 +201,18 @@ void FIO_setOverlapLog(unsigned overlapLog){
     /*-*************************************
     *  Functions
     ***************************************/
    +/** FIO_remove() :
    + * @result : Unlink `fileName`, even if it's read-only */
    +static int FIO_remove(const char* path)
    +{
    +#if defined(_WIN32) || defined(WIN32)
    +    /* windows doesn't allow remove read-only files,
    +     * so try to make it writable first */
    +    chmod(path, _S_IWRITE);
    +#endif
    +    return remove(path);
    +}
    +
     /** FIO_openSrcFile() :
      * condition : `dstFileName` must be non-NULL.
      * @result : FILE* to `dstFileName`, or NULL if it fails */
    @@ -225,23 +252,32 @@ static FILE* FIO_openDstFile(const char* dstFileName)
                 DISPLAYLEVEL(4, "Sparse File Support is automatically disabled on stdout ; try --sparse \n");
             }
         } else {
    -        if (!g_overwrite && strcmp (dstFileName, nulmark)) {  /* Check if destination file already exists */
    +        if (g_sparseFileSupport == 1) {
    +            g_sparseFileSupport = ZSTD_SPARSE_DEFAULT;
    +        }
    +        if (strcmp (dstFileName, nulmark)) {  /* Check if destination file already exists */
                 f = fopen( dstFileName, "rb" );
                 if (f != 0) {  /* dest file exists, prompt for overwrite authorization */
                     fclose(f);
    -                if (g_displayLevel <= 1) {
    -                    /* No interaction possible */
    -                    DISPLAY("zstd: %s already exists; not overwritten  \n", dstFileName);
    -                    return NULL;
    -                }
    -                DISPLAY("zstd: %s already exists; do you wish to overwrite (y/N) ? ", dstFileName);
    -                {   int ch = getchar();
    -                    if ((ch!='Y') && (ch!='y')) {
    -                        DISPLAY("    not overwritten  \n");
    +                if (!g_overwrite) {
    +                    if (g_displayLevel <= 1) {
    +                        /* No interaction possible */
    +                        DISPLAY("zstd: %s already exists; not overwritten  \n", dstFileName);
                             return NULL;
                         }
    -                    while ((ch!=EOF) && (ch!='\n')) ch = getchar();  /* flush rest of input line */
    -        }   }   }
    +                    DISPLAY("zstd: %s already exists; do you wish to overwrite (y/N) ? ", dstFileName);
    +                    {   int ch = getchar();
    +                        if ((ch!='Y') && (ch!='y')) {
    +                            DISPLAY("    not overwritten  \n");
    +                            return NULL;
    +                        }
    +                        while ((ch!=EOF) && (ch!='\n')) ch = getchar();  /* flush rest of input line */
    +                    }
    +                }
    +
    +                /* need to unlink */
    +                FIO_remove(dstFileName);
    +        }   }
             f = fopen( dstFileName, "wb" );
             if (f==NULL) DISPLAYLEVEL(1, "zstd: %s: %s\n", dstFileName, strerror(errno));
         }
    @@ -250,13 +286,13 @@ static FILE* FIO_openDstFile(const char* dstFileName)
     }
     
     
    -/*! FIO_loadFile() :
    -*   creates a buffer, pointed by `*bufferPtr`,
    -*   loads `filename` content into it,
    -*   up to MAX_DICT_SIZE bytes.
    -*   @return : loaded size
    -*/
    -static size_t FIO_loadFile(void** bufferPtr, const char* fileName)
    +/*! FIO_createDictBuffer() :
    + *  creates a buffer, pointed by `*bufferPtr`,
    + *  loads `filename` content into it, up to DICTSIZE_MAX bytes.
    + *  @return : loaded size
    + *  if fileName==NULL, returns 0 and a NULL pointer
    + */
    +static size_t FIO_createDictBuffer(void** bufferPtr, const char* fileName)
     {
         FILE* fileHandle;
         U64 fileSize;
    @@ -268,14 +304,7 @@ static size_t FIO_loadFile(void** bufferPtr, const char* fileName)
         fileHandle = fopen(fileName, "rb");
         if (fileHandle==0) EXM_THROW(31, "zstd: %s: %s", fileName, strerror(errno));
         fileSize = UTIL_getFileSize(fileName);
    -    if (fileSize > MAX_DICT_SIZE) {
    -        int seekResult;
    -        if (fileSize > 1 GB) EXM_THROW(32, "Dictionary file %s is too large", fileName);   /* avoid extreme cases */
    -        DISPLAYLEVEL(2,"Dictionary %s is too large : using last %u bytes only \n", fileName, (U32)MAX_DICT_SIZE);
    -        seekResult = fseek(fileHandle, (long int)(fileSize-MAX_DICT_SIZE), SEEK_SET);   /* use end of file */
    -        if (seekResult != 0) EXM_THROW(33, "zstd: %s: %s", fileName, strerror(errno));
    -        fileSize = MAX_DICT_SIZE;
    -    }
    +    if (fileSize > DICTSIZE_MAX) EXM_THROW(32, "Dictionary file %s is too large (> %u MB)", fileName, DICTSIZE_MAX >> 20);   /* avoid extreme cases */
         *bufferPtr = malloc((size_t)fileSize);
         if (*bufferPtr==NULL) EXM_THROW(34, "zstd: %s", strerror(errno));
         { size_t const readSize = fread(*bufferPtr, 1, (size_t)fileSize, fileHandle);
    @@ -328,7 +357,7 @@ static cRess_t FIO_createCResources(const char* dictFileName, int cLevel,
     
         /* dictionary */
         {   void* dictBuffer;
    -        size_t const dictBuffSize = FIO_loadFile(&dictBuffer, dictFileName);
    +        size_t const dictBuffSize = FIO_createDictBuffer(&dictBuffer, dictFileName);   /* works with dictFileName==NULL */
             if (dictFileName && (dictBuffer==NULL)) EXM_THROW(32, "zstd: allocation error : can't create dictBuffer");
             {   ZSTD_parameters params = ZSTD_getParams(cLevel, srcSize, dictBuffSize);
                 params.fParams.contentSizeFlag = srcRegFile;
    @@ -340,7 +369,7 @@ static cRess_t FIO_createCResources(const char* dictFileName, int cLevel,
                 if (comprParams->searchLog) params.cParams.searchLog = comprParams->searchLog;
                 if (comprParams->searchLength) params.cParams.searchLength = comprParams->searchLength;
                 if (comprParams->targetLength) params.cParams.targetLength = comprParams->targetLength;
    -            if (comprParams->strategy) params.cParams.strategy = (ZSTD_strategy)(comprParams->strategy - 1);
    +            if (comprParams->strategy) params.cParams.strategy = (ZSTD_strategy)(comprParams->strategy - 1);   /* 0 means : do not change */
     #ifdef ZSTD_MULTITHREAD
                 {   size_t const errorCode = ZSTDMT_initCStream_advanced(ress.cctx, dictBuffer, dictBuffSize, params, srcSize);
                     if (ZSTD_isError(errorCode)) EXM_THROW(33, "Error initializing CStream : %s", ZSTD_getErrorName(errorCode));
    @@ -434,6 +463,143 @@ static unsigned long long FIO_compressGzFrame(cRess_t* ress, const char* srcFile
     #endif
     
     
    +#ifdef ZSTD_LZMACOMPRESS
    +static unsigned long long FIO_compressLzmaFrame(cRess_t* ress, const char* srcFileName, U64 const srcFileSize, int compressionLevel, U64* readsize, int plain_lzma)
    +{
    +    unsigned long long inFileSize = 0, outFileSize = 0;
    +    lzma_stream strm = LZMA_STREAM_INIT;
    +    lzma_action action = LZMA_RUN;
    +    lzma_ret ret;
    +
    +    if (compressionLevel < 0) compressionLevel = 0;
    +    if (compressionLevel > 9) compressionLevel = 9;
    +
    +    if (plain_lzma) {
    +        lzma_options_lzma opt_lzma;
    +        if (lzma_lzma_preset(&opt_lzma, compressionLevel)) EXM_THROW(71, "zstd: %s: lzma_lzma_preset error", srcFileName);
    +        ret = lzma_alone_encoder(&strm, &opt_lzma); /* LZMA */
    +        if (ret != LZMA_OK) EXM_THROW(71, "zstd: %s: lzma_alone_encoder error %d", srcFileName, ret);
    +    } else {
    +        ret = lzma_easy_encoder(&strm, compressionLevel, LZMA_CHECK_CRC64); /* XZ */
    +        if (ret != LZMA_OK) EXM_THROW(71, "zstd: %s: lzma_easy_encoder error %d", srcFileName, ret);
    +    }
    +
    +    strm.next_in = 0;
    +    strm.avail_in = 0;
    +    strm.next_out = ress->dstBuffer;
    +    strm.avail_out = ress->dstBufferSize;
    +
    +    while (1) {
    +        if (strm.avail_in == 0) {
    +            size_t const inSize = fread(ress->srcBuffer, 1, ress->srcBufferSize, ress->srcFile);
    +            if (inSize == 0) action = LZMA_FINISH;
    +            inFileSize += inSize;
    +            strm.next_in = ress->srcBuffer;
    +            strm.avail_in = inSize;
    +        }
    +
    +        ret = lzma_code(&strm, action);
    +
    +        if (ret != LZMA_OK && ret != LZMA_STREAM_END) EXM_THROW(72, "zstd: %s: lzma_code encoding error %d", srcFileName, ret);
    +        {   size_t const compBytes = ress->dstBufferSize - strm.avail_out;
    +            if (compBytes) {
    +                if (fwrite(ress->dstBuffer, 1, compBytes, ress->dstFile) != compBytes) EXM_THROW(73, "Write error : cannot write to output file");
    +                outFileSize += compBytes;
    +                strm.next_out = ress->dstBuffer;
    +                strm.avail_out = ress->dstBufferSize;
    +            }
    +        }
    +        if (!srcFileSize) DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%", (U32)(inFileSize>>20), (double)outFileSize/inFileSize*100)
    +        else DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%", (U32)(inFileSize>>20), (U32)(srcFileSize>>20), (double)outFileSize/inFileSize*100);
    +        if (ret == LZMA_STREAM_END) break;
    +    }
    +
    +    lzma_end(&strm);
    +    *readsize = inFileSize;
    +
    +    return outFileSize;
    +}
    +#endif
    +
    +#ifdef ZSTD_LZ4COMPRESS
    +static int FIO_LZ4_GetBlockSize_FromBlockId (int id) { return (1 << (8 + (2 * id))); }
    +static unsigned long long FIO_compressLz4Frame(cRess_t* ress, const char* srcFileName, U64 const srcFileSize, int compressionLevel, U64* readsize)
    +{
    +    unsigned long long inFileSize = 0, outFileSize = 0;
    +
    +    LZ4F_preferences_t prefs;
    +    LZ4F_compressionContext_t ctx;
    +
    +    LZ4F_errorCode_t const errorCode = LZ4F_createCompressionContext(&ctx, LZ4F_VERSION);
    +    if (LZ4F_isError(errorCode)) EXM_THROW(31, "zstd: failed to create lz4 compression context");
    +
    +    memset(&prefs, 0, sizeof(prefs));
    +
    +#if LZ4_VERSION_NUMBER <= 10600
    +#define LZ4F_blockIndependent blockIndependent
    +#define LZ4F_max4MB max4MB
    +#endif
    +
    +    prefs.autoFlush = 1;
    +    prefs.compressionLevel = compressionLevel;
    +    prefs.frameInfo.blockMode = LZ4F_blockIndependent; /* stick to defaults for lz4 cli */
    +    prefs.frameInfo.blockSizeID = LZ4F_max4MB;
    +    prefs.frameInfo.contentChecksumFlag = (contentChecksum_t)g_checksumFlag;
    +#if LZ4_VERSION_NUMBER >= 10600
    +    prefs.frameInfo.contentSize = srcFileSize;
    +#endif
    +
    +    {
    +        size_t blockSize = FIO_LZ4_GetBlockSize_FromBlockId(LZ4F_max4MB);
    +        size_t readSize;
    +        size_t headerSize = LZ4F_compressBegin(ctx, ress->dstBuffer, ress->dstBufferSize, &prefs);
    +        if (LZ4F_isError(headerSize)) EXM_THROW(33, "File header generation failed : %s", LZ4F_getErrorName(headerSize));
    +        { size_t const sizeCheck = fwrite(ress->dstBuffer, 1, headerSize, ress->dstFile);
    +          if (sizeCheck!=headerSize) EXM_THROW(34, "Write error : cannot write header"); }
    +        outFileSize += headerSize;
    +
    +        /* Read first block */
    +        readSize  = fread(ress->srcBuffer, (size_t)1, (size_t)blockSize, ress->srcFile);
    +        inFileSize += readSize;
    +
    +        /* Main Loop */
    +        while (readSize>0) {
    +            size_t outSize;
    +
    +            /* Compress Block */
    +            outSize = LZ4F_compressUpdate(ctx, ress->dstBuffer, ress->dstBufferSize, ress->srcBuffer, readSize, NULL);
    +            if (LZ4F_isError(outSize)) EXM_THROW(35, "zstd: %s: lz4 compression failed : %s", srcFileName, LZ4F_getErrorName(outSize));
    +            outFileSize += outSize;
    +            if (!srcFileSize) DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%", (U32)(inFileSize>>20), (double)outFileSize/inFileSize*100)
    +            else DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%", (U32)(inFileSize>>20), (U32)(srcFileSize>>20), (double)outFileSize/inFileSize*100);
    +
    +            /* Write Block */
    +            { size_t const sizeCheck = fwrite(ress->dstBuffer, 1, outSize, ress->dstFile);
    +              if (sizeCheck!=outSize) EXM_THROW(36, "Write error : cannot write compressed block"); }
    +
    +            /* Read next block */
    +            readSize  = fread(ress->srcBuffer, (size_t)1, (size_t)blockSize, ress->srcFile);
    +            inFileSize += readSize;
    +        }
    +        if (ferror(ress->srcFile)) EXM_THROW(37, "Error reading %s ", srcFileName);
    +
    +        /* End of Stream mark */
    +        headerSize = LZ4F_compressEnd(ctx, ress->dstBuffer, ress->dstBufferSize, NULL);
    +        if (LZ4F_isError(headerSize)) EXM_THROW(38, "zstd: %s: lz4 end of file generation failed : %s", srcFileName, LZ4F_getErrorName(headerSize));
    +
    +        { size_t const sizeCheck = fwrite(ress->dstBuffer, 1, headerSize, ress->dstFile);
    +          if (sizeCheck!=headerSize) EXM_THROW(39, "Write error : cannot write end of stream"); }
    +        outFileSize += headerSize;
    +    }
    +
    +    *readsize = inFileSize;
    +    LZ4F_freeCompressionContext(ctx);
    +
    +    return outFileSize;
    +}
    +#endif
    +
    +
     /*! FIO_compressFilename_internal() :
      *  same as FIO_compressFilename_extRess(), with `ress.desFile` already opened.
      *  @return : 0 : compression completed correctly,
    @@ -448,14 +614,37 @@ static int FIO_compressFilename_internal(cRess_t ress,
         U64 compressedfilesize = 0;
         U64 const fileSize = UTIL_getFileSize(srcFileName);
     
    -    if (g_compressionType) {
    +    switch (g_compressionType) {
    +        case FIO_zstdCompression:
    +            break;
    +
    +        case FIO_gzipCompression:
     #ifdef ZSTD_GZCOMPRESS
    -        compressedfilesize = FIO_compressGzFrame(&ress, srcFileName, fileSize, compressionLevel, &readsize);
    +            compressedfilesize = FIO_compressGzFrame(&ress, srcFileName, fileSize, compressionLevel, &readsize);
     #else
    -        (void)compressionLevel;
    -        EXM_THROW(20, "zstd: %s: file cannot be compressed as gzip (zstd compiled without ZSTD_GZCOMPRESS) -- ignored \n", srcFileName);
    +            (void)compressionLevel;
    +            EXM_THROW(20, "zstd: %s: file cannot be compressed as gzip (zstd compiled without ZSTD_GZCOMPRESS) -- ignored \n", srcFileName);
     #endif
    -        goto finish;
    +            goto finish;
    +
    +        case FIO_xzCompression:
    +        case FIO_lzmaCompression:
    +#ifdef ZSTD_LZMACOMPRESS
    +            compressedfilesize = FIO_compressLzmaFrame(&ress, srcFileName, fileSize, compressionLevel, &readsize, g_compressionType==FIO_lzmaCompression);
    +#else
    +            (void)compressionLevel;
    +            EXM_THROW(20, "zstd: %s: file cannot be compressed as xz/lzma (zstd compiled without ZSTD_LZMACOMPRESS) -- ignored \n", srcFileName);
    +#endif
    +            goto finish;
    +
    +        case FIO_lz4Compression:
    +#ifdef ZSTD_LZ4COMPRESS
    +            compressedfilesize = FIO_compressLz4Frame(&ress, srcFileName, fileSize, compressionLevel, &readsize);
    +#else
    +            (void)compressionLevel;
    +            EXM_THROW(20, "zstd: %s: file cannot be compressed as lz4 (zstd compiled without ZSTD_LZ4COMPRESS) -- ignored \n", srcFileName);
    +#endif
    +            goto finish;
         }
     
         /* init */
    @@ -475,8 +664,8 @@ static int FIO_compressFilename_internal(cRess_t ress,
             readsize += inSize;
     
             {   ZSTD_inBuffer  inBuff = { ress.srcBuffer, inSize, 0 };
    -            while (inBuff.pos != inBuff.size) {   /* note : is there any possibility of endless loop ? for example, if outBuff is not large enough ? */
    -                ZSTD_outBuffer outBuff= { ress.dstBuffer, ress.dstBufferSize, 0 };
    +            while (inBuff.pos != inBuff.size) {
    +                ZSTD_outBuffer outBuff = { ress.dstBuffer, ress.dstBufferSize, 0 };
     #ifdef ZSTD_MULTITHREAD
                     size_t const result = ZSTDMT_compressStream(ress.cctx, &outBuff, &inBuff);
     #else
    @@ -490,13 +679,13 @@ static int FIO_compressFilename_internal(cRess_t ress,
                         if (sizeCheck!=outBuff.pos) EXM_THROW(25, "Write error : cannot write compressed block into %s", dstFileName);
                         compressedfilesize += outBuff.pos;
             }   }   }
    -#ifdef ZSTD_MULTITHREAD
    -        if (!fileSize) DISPLAYUPDATE(2, "\rRead : %u MB", (U32)(readsize>>20))
    -        else DISPLAYUPDATE(2, "\rRead : %u / %u MB", (U32)(readsize>>20), (U32)(fileSize>>20));
    -#else
    -        if (!fileSize) DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%", (U32)(readsize>>20), (double)compressedfilesize/readsize*100)
    -        else DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%", (U32)(readsize>>20), (U32)(fileSize>>20), (double)compressedfilesize/readsize*100);
    -#endif
    +        if (g_nbThreads > 1) {
    +            if (!fileSize) DISPLAYUPDATE(2, "\rRead : %u MB", (U32)(readsize>>20))
    +            else DISPLAYUPDATE(2, "\rRead : %u / %u MB", (U32)(readsize>>20), (U32)(fileSize>>20));
    +        } else {
    +            if (!fileSize) DISPLAYUPDATE(2, "\rRead : %u MB ==> %.2f%%", (U32)(readsize>>20), (double)compressedfilesize/readsize*100)
    +            else DISPLAYUPDATE(2, "\rRead : %u / %u MB ==> %.2f%%", (U32)(readsize>>20), (U32)(fileSize>>20), (double)compressedfilesize/readsize*100);
    +        }
         }
     
         /* End of Frame */
    @@ -677,7 +866,7 @@ static dRess_t FIO_createDResources(const char* dictFileName)
     
         /* dictionary */
         {   void* dictBuffer;
    -        size_t const dictBufferSize = FIO_loadFile(&dictBuffer, dictFileName);
    +        size_t const dictBufferSize = FIO_createDictBuffer(&dictBuffer, dictFileName);
             size_t const initError = ZSTD_initDStream_usingDict(ress.dctx, dictBuffer, dictBufferSize);
             if (ZSTD_isError(initError)) EXM_THROW(61, "ZSTD_initDStream_usingDict error : %s", ZSTD_getErrorName(initError));
             free(dictBuffer);
    @@ -763,10 +952,10 @@ static void FIO_fwriteSparseEnd(FILE* file, unsigned storedSkips)
     {
         if (storedSkips-->0) {   /* implies g_sparseFileSupport>0 */
             int const seekResult = LONG_SEEK(file, storedSkips, SEEK_CUR);
    -        if (seekResult != 0) EXM_THROW(69, "Final skip error (sparse file)\n");
    +        if (seekResult != 0) EXM_THROW(69, "Final skip error (sparse file)");
             {   const char lastZeroByte[1] = { 0 };
                 size_t const sizeCheck = fwrite(lastZeroByte, 1, 1, file);
    -            if (sizeCheck != 1) EXM_THROW(69, "Write error : cannot write last zero\n");
    +            if (sizeCheck != 1) EXM_THROW(69, "Write error : cannot write last zero");
         }   }
     }
     
    @@ -794,16 +983,19 @@ static unsigned FIO_passThrough(FILE* foutput, FILE* finput, void* buffer, size_
     
     
     /** FIO_decompressFrame() :
    -    @return : size of decoded frame
    +    @return : size of decoded frame, or an error code
     */
    +#define FIO_ERROR_ZSTD_DECODING   ((unsigned long long)(-2))
     unsigned long long FIO_decompressFrame(dRess_t* ress,
                                            FILE* finput,
    +                                       const char* srcFileName,
                                            U64 alreadyDecoded)
     {
         U64 frameSize = 0;
         U32 storedSkips = 0;
     
         ZSTD_resetDStream(ress->dctx);
    +    if (strlen(srcFileName)>20) srcFileName += strlen(srcFileName)-20;   /* display last 20 characters */
     
         /* Header loading (optional, saves one loop) */
         {   size_t const toRead = 9;
    @@ -816,12 +1008,17 @@ unsigned long long FIO_decompressFrame(dRess_t* ress,
             ZSTD_inBuffer  inBuff = { ress->srcBuffer, ress->srcBufferLoaded, 0 };
             ZSTD_outBuffer outBuff= { ress->dstBuffer, ress->dstBufferSize, 0 };
             size_t const readSizeHint = ZSTD_decompressStream(ress->dctx, &outBuff, &inBuff);
    -        if (ZSTD_isError(readSizeHint)) EXM_THROW(36, "Decoding error : %s", ZSTD_getErrorName(readSizeHint));
    +        if (ZSTD_isError(readSizeHint)) {
    +            DISPLAYLEVEL(1, "%s : Decoding error (36) : %s \n",
    +                            srcFileName, ZSTD_getErrorName(readSizeHint));
    +            return FIO_ERROR_ZSTD_DECODING;
    +        }
     
             /* Write block */
             storedSkips = FIO_fwriteSparse(ress->dstFile, ress->dstBuffer, outBuff.pos, storedSkips);
             frameSize += outBuff.pos;
    -        DISPLAYUPDATE(2, "\rDecoded : %u MB...     ", (U32)((alreadyDecoded+frameSize)>>20) );
    +        DISPLAYUPDATE(2, "\r%-20.20s : %u MB...     ",
    +                         srcFileName, (U32)((alreadyDecoded+frameSize)>>20) );
     
             if (inBuff.pos > 0) {
                 memmove(ress->srcBuffer, (char*)ress->srcBuffer + inBuff.pos, inBuff.size - inBuff.pos);
    @@ -829,14 +1026,22 @@ unsigned long long FIO_decompressFrame(dRess_t* ress,
             }
     
             if (readSizeHint == 0) break;   /* end of frame */
    -        if (inBuff.size != inBuff.pos) EXM_THROW(37, "Decoding error : should consume entire input");
    +        if (inBuff.size != inBuff.pos) {
    +            DISPLAYLEVEL(1, "%s : Decoding error (37) : should consume entire input \n",
    +                            srcFileName);
    +            return FIO_ERROR_ZSTD_DECODING;
    +        }
     
             /* Fill input buffer */
             {   size_t const toRead = MIN(readSizeHint, ress->srcBufferSize);  /* support large skippable frames */
                 if (ress->srcBufferLoaded < toRead)
    -                ress->srcBufferLoaded += fread(((char*)ress->srcBuffer) + ress->srcBufferLoaded, 1, toRead - ress->srcBufferLoaded, finput);
    -            if (ress->srcBufferLoaded < toRead) EXM_THROW(39, "Read error : premature end");
    -    }   }
    +                ress->srcBufferLoaded += fread((char*)ress->srcBuffer + ress->srcBufferLoaded,
    +                                               1, toRead - ress->srcBufferLoaded, finput);
    +            if (ress->srcBufferLoaded < toRead) {
    +                DISPLAYLEVEL(1, "%s : Read error (39) : premature end \n",
    +                                srcFileName);
    +                return FIO_ERROR_ZSTD_DECODING;
    +    }   }   }
     
         FIO_fwriteSparseEnd(ress->dstFile, storedSkips);
     
    @@ -849,6 +1054,7 @@ static unsigned long long FIO_decompressGzFrame(dRess_t* ress, FILE* srcFile, co
     {
         unsigned long long outFileSize = 0;
         z_stream strm;
    +    int flush = Z_NO_FLUSH;
         int ret;
     
         strm.zalloc = Z_NULL;
    @@ -866,11 +1072,12 @@ static unsigned long long FIO_decompressGzFrame(dRess_t* ress, FILE* srcFile, co
         for ( ; ; ) {
             if (strm.avail_in == 0) {
                 ress->srcBufferLoaded = fread(ress->srcBuffer, 1, ress->srcBufferSize, srcFile);
    -            if (ress->srcBufferLoaded == 0) break;
    +            if (ress->srcBufferLoaded == 0) flush = Z_FINISH;
                 strm.next_in = (z_const unsigned char*)ress->srcBuffer;
                 strm.avail_in = (uInt)ress->srcBufferLoaded;
             }
    -        ret = inflate(&strm, Z_NO_FLUSH);
    +        ret = inflate(&strm, flush);
    +        if (ret == Z_BUF_ERROR) EXM_THROW(39, "zstd: %s: premature end", srcFileName);
             if (ret != Z_OK && ret != Z_STREAM_END) { DISPLAY("zstd: %s: inflate error %d \n", srcFileName, ret); return 0; }
             {   size_t const decompBytes = ress->dstBufferSize - strm.avail_out;
                 if (decompBytes) {
    @@ -886,12 +1093,125 @@ static unsigned long long FIO_decompressGzFrame(dRess_t* ress, FILE* srcFile, co
         if (strm.avail_in > 0) memmove(ress->srcBuffer, strm.next_in, strm.avail_in);
         ress->srcBufferLoaded = strm.avail_in;
         ret = inflateEnd(&strm);
    -    if (ret != Z_OK) EXM_THROW(32, "zstd: %s: inflateEnd error %d \n", srcFileName, ret);
    +    if (ret != Z_OK) EXM_THROW(32, "zstd: %s: inflateEnd error %d", srcFileName, ret);
         return outFileSize;
     }
     #endif
     
     
    +#ifdef ZSTD_LZMADECOMPRESS
    +static unsigned long long FIO_decompressLzmaFrame(dRess_t* ress, FILE* srcFile, const char* srcFileName, int plain_lzma)
    +{
    +    unsigned long long outFileSize = 0;
    +    lzma_stream strm = LZMA_STREAM_INIT;
    +    lzma_action action = LZMA_RUN;
    +    lzma_ret ret;
    +
    +    strm.next_in = 0;
    +    strm.avail_in = 0;
    +    if (plain_lzma) {
    +        ret = lzma_alone_decoder(&strm, UINT64_MAX); /* LZMA */
    +    } else {
    +        ret = lzma_stream_decoder(&strm, UINT64_MAX, 0); /* XZ */
    +    }
    +
    +    if (ret != LZMA_OK) EXM_THROW(71, "zstd: %s: lzma_alone_decoder/lzma_stream_decoder error %d", srcFileName, ret);
    +
    +    strm.next_out = ress->dstBuffer;
    +    strm.avail_out = ress->dstBufferSize;
    +    strm.avail_in = ress->srcBufferLoaded;
    +    strm.next_in = ress->srcBuffer;
    +
    +    for ( ; ; ) {
    +        if (strm.avail_in == 0) {
    +            ress->srcBufferLoaded = fread(ress->srcBuffer, 1, ress->srcBufferSize, srcFile);
    +            if (ress->srcBufferLoaded == 0) action = LZMA_FINISH;
    +            strm.next_in = ress->srcBuffer;
    +            strm.avail_in = ress->srcBufferLoaded;
    +        }
    +        ret = lzma_code(&strm, action);
    +
    +        if (ret == LZMA_BUF_ERROR) EXM_THROW(39, "zstd: %s: premature end", srcFileName);
    +        if (ret != LZMA_OK && ret != LZMA_STREAM_END) { DISPLAY("zstd: %s: lzma_code decoding error %d \n", srcFileName, ret); return 0; }
    +        {   size_t const decompBytes = ress->dstBufferSize - strm.avail_out;
    +            if (decompBytes) {
    +                if (fwrite(ress->dstBuffer, 1, decompBytes, ress->dstFile) != decompBytes) EXM_THROW(31, "Write error : cannot write to output file");
    +                outFileSize += decompBytes;
    +                strm.next_out = ress->dstBuffer;
    +                strm.avail_out = ress->dstBufferSize;
    +            }
    +        }
    +        if (ret == LZMA_STREAM_END) break;
    +    }
    +
    +    if (strm.avail_in > 0) memmove(ress->srcBuffer, strm.next_in, strm.avail_in);
    +    ress->srcBufferLoaded = strm.avail_in;
    +    lzma_end(&strm);
    +    return outFileSize;
    +}
    +#endif
    +
    +#ifdef ZSTD_LZ4DECOMPRESS
    +static unsigned long long FIO_decompressLz4Frame(dRess_t* ress, FILE* srcFile, const char* srcFileName)
    +{
    +    unsigned long long filesize = 0;
    +    LZ4F_errorCode_t nextToLoad;
    +    LZ4F_decompressionContext_t dCtx;
    +    LZ4F_errorCode_t const errorCode = LZ4F_createDecompressionContext(&dCtx, LZ4F_VERSION);
    +
    +    if (LZ4F_isError(errorCode)) EXM_THROW(61, "zstd: failed to create lz4 decompression context");
    +
    +    /* Init feed with magic number (already consumed from FILE* sFile) */
    +    {   size_t inSize = 4;
    +        size_t outSize= 0;
    +        MEM_writeLE32(ress->srcBuffer, LZ4_MAGICNUMBER);
    +        nextToLoad = LZ4F_decompress(dCtx, ress->dstBuffer, &outSize, ress->srcBuffer, &inSize, NULL);
    +        if (LZ4F_isError(nextToLoad)) EXM_THROW(62, "zstd: %s: lz4 header error : %s", srcFileName, LZ4F_getErrorName(nextToLoad));
    +    }
    +
    +    /* Main Loop */
    +    for (;nextToLoad;) {
    +        size_t readSize;
    +        size_t pos = 0;
    +        size_t decodedBytes = ress->dstBufferSize;
    +
    +        /* Read input */
    +        if (nextToLoad > ress->srcBufferSize) nextToLoad = ress->srcBufferSize;
    +        readSize = fread(ress->srcBuffer, 1, nextToLoad, srcFile);
    +        if (!readSize) break;   /* reached end of file or stream */
    +
    +        while ((pos < readSize) || (decodedBytes == ress->dstBufferSize)) {  /* still to read, or still to flush */
    +            /* Decode Input (at least partially) */
    +            size_t remaining = readSize - pos;
    +            decodedBytes = ress->dstBufferSize;
    +            nextToLoad = LZ4F_decompress(dCtx, ress->dstBuffer, &decodedBytes, (char*)(ress->srcBuffer)+pos, &remaining, NULL);
    +            if (LZ4F_isError(nextToLoad)) EXM_THROW(66, "zstd: %s: decompression error : %s", srcFileName, LZ4F_getErrorName(nextToLoad));
    +            pos += remaining;
    +
    +            /* Write Block */
    +            if (decodedBytes) {
    +                if (fwrite(ress->dstBuffer, 1, decodedBytes, ress->dstFile) != decodedBytes) EXM_THROW(63, "Write error : cannot write to output file");
    +                filesize += decodedBytes;
    +                DISPLAYUPDATE(2, "\rDecompressed : %u MB  ", (unsigned)(filesize>>20));
    +            }
    +
    +            if (!nextToLoad) break;
    +        }
    +    }
    +    /* can be out because readSize == 0, which could be an fread() error */
    +    if (ferror(srcFile)) EXM_THROW(67, "zstd: %s: read error", srcFileName);
    +
    +    if (nextToLoad!=0) EXM_THROW(68, "zstd: %s: unfinished stream", srcFileName);
    +
    +    LZ4F_freeDecompressionContext(dCtx);
    +    ress->srcBufferLoaded = 0; /* LZ4F will go to the frame boundary */
    +
    +    return filesize;
    +}
    +#endif
    +
    +
    +
     /** FIO_decompressSrcFile() :
         Decompression `srcFileName` into `ress.dstFile`
         @return : 0 : OK
    @@ -917,14 +1237,19 @@ static int FIO_decompressSrcFile(dRess_t ress, const char* dstFileName, const ch
             size_t const toRead = 4;
             const BYTE* buf = (const BYTE*)ress.srcBuffer;
             if (ress.srcBufferLoaded < toRead)
    -            ress.srcBufferLoaded += fread((char*)ress.srcBuffer + ress.srcBufferLoaded, (size_t)1, toRead - ress.srcBufferLoaded, srcFile);
    +            ress.srcBufferLoaded += fread((char*)ress.srcBuffer + ress.srcBufferLoaded,
    +                                          (size_t)1, toRead - ress.srcBufferLoaded, srcFile);
             if (ress.srcBufferLoaded==0) {
    -            if (readSomething==0) { DISPLAY("zstd: %s: unexpected end of file \n", srcFileName); fclose(srcFile); return 1; }  /* srcFileName is empty */
    +            if (readSomething==0) {
    +                DISPLAY("zstd: %s: unexpected end of file \n", srcFileName);
    +                fclose(srcFile);
    +                return 1;
    +            }  /* srcFileName is empty */
                 break;   /* no more input */
             }
             readSomething = 1;   /* there is at least >= 4 bytes in srcFile */
             if (ress.srcBufferLoaded < toRead) { DISPLAY("zstd: %s: unknown header \n", srcFileName); fclose(srcFile); return 1; }  /* srcFileName is empty */
    -        if (buf[0] == 31 && buf[1] == 139) { /* gz header */
    +        if (buf[0] == 31 && buf[1] == 139) { /* gz magic number */
     #ifdef ZSTD_GZDECOMPRESS
                 unsigned long long const result = FIO_decompressGzFrame(&ress, srcFile, srcFileName);
                 if (result == 0) return 1;
    @@ -932,11 +1257,31 @@ static int FIO_decompressSrcFile(dRess_t ress, const char* dstFileName, const ch
     #else
                 DISPLAYLEVEL(1, "zstd: %s: gzip file cannot be uncompressed (zstd compiled without ZSTD_GZDECOMPRESS) -- ignored \n", srcFileName);
                 return 1;
    +#endif
    +        } else if ((buf[0] == 0xFD && buf[1] == 0x37)  /* xz magic number */
    +                || (buf[0] == 0x5D && buf[1] == 0x00)) { /* lzma header (no magic number) */
    +#ifdef ZSTD_LZMADECOMPRESS
    +            unsigned long long const result = FIO_decompressLzmaFrame(&ress, srcFile, srcFileName, buf[0] != 0xFD);
    +            if (result == 0) return 1;
    +            filesize += result;
    +#else
    +            DISPLAYLEVEL(1, "zstd: %s: xz/lzma file cannot be uncompressed (zstd compiled without ZSTD_LZMADECOMPRESS) -- ignored \n", srcFileName);
    +            return 1;
    +#endif
    +        } else if (MEM_readLE32(buf) == LZ4_MAGICNUMBER) {
    +#ifdef ZSTD_LZ4DECOMPRESS
    +            unsigned long long const result = FIO_decompressLz4Frame(&ress, srcFile, srcFileName);
    +            if (result == 0) return 1;
    +            filesize += result;
    +#else
    +            DISPLAYLEVEL(1, "zstd: %s: lz4 file cannot be uncompressed (zstd compiled without ZSTD_LZ4DECOMPRESS) -- ignored \n", srcFileName);
    +            return 1;
     #endif
             } else {
                 if (!ZSTD_isFrame(ress.srcBuffer, toRead)) {
                     if ((g_overwrite) && !strcmp (dstFileName, stdoutmark)) {  /* pass-through mode */
    -                    unsigned const result = FIO_passThrough(ress.dstFile, srcFile, ress.srcBuffer, ress.srcBufferSize, ress.srcBufferLoaded);
    +                    unsigned const result = FIO_passThrough(ress.dstFile, srcFile,
    +                                                            ress.srcBuffer, ress.srcBufferSize, ress.srcBufferLoaded);
                         if (fclose(srcFile)) EXM_THROW(32, "zstd: %s close error", srcFileName);  /* error should never happen */
                         return result;
                     } else {
    @@ -944,9 +1289,15 @@ static int FIO_decompressSrcFile(dRess_t ress, const char* dstFileName, const ch
                         fclose(srcFile);
                         return 1;
                 }   }
    -            filesize += FIO_decompressFrame(&ress, srcFile, filesize);
    +            {   unsigned long long const frameSize = FIO_decompressFrame(&ress, srcFile, srcFileName, filesize);
    +                if (frameSize == FIO_ERROR_ZSTD_DECODING) {
    +                    fclose(srcFile);
    +                    return 1;
    +                }
    +                filesize += frameSize;
    +            }
             }
    -    }
    +    }   /* for each frame */
     
         /* Final Status */
         DISPLAYLEVEL(2, "\r%79s\r", "");
    @@ -1020,37 +1371,41 @@ int FIO_decompressMultipleFilenames(const char** srcNamesTable, unsigned nbFiles
                 missingFiles += FIO_decompressSrcFile(ress, suffix, srcNamesTable[u]);
             if (fclose(ress.dstFile)) EXM_THROW(72, "Write error : cannot properly close stdout");
         } else {
    -        size_t const suffixSize = strlen(suffix);
    -        size_t const gzipSuffixSize = strlen(GZ_EXTENSION);
    +        size_t suffixSize;
             size_t dfnSize = FNSPACE;
             unsigned u;
             char* dstFileName = (char*)malloc(FNSPACE);
             if (dstFileName==NULL) EXM_THROW(73, "not enough memory for dstFileName");
             for (u=0; u= 1)) || (PLATFORM_POSIX_VERSION >= 200112L) || defined(__DJGPP__)
     #  include    /* isatty */
     #  define IS_CONSOLE(stdStream) isatty(fileno(stdStream))
    -#elif defined(MSDOS) || defined(OS2) || defined(WIN32) || defined(_WIN32) || defined(__CYGWIN__)
    +#elif defined(MSDOS) || defined(OS2) || defined(__CYGWIN__)
     #  include        /* _isatty */
     #  define IS_CONSOLE(stdStream) _isatty(_fileno(stdStream))
    +#elif defined(WIN32) || defined(_WIN32)
    +#  include       /* _isatty */
    +#  include  /* DeviceIoControl, HANDLE, FSCTL_SET_SPARSE */
    +#  include    /* FILE */
    +static __inline int IS_CONSOLE(FILE* stdStream)
    +{
    +    DWORD dummy;
    +    return _isatty(_fileno(stdStream)) && GetConsoleMode((HANDLE)_get_osfhandle(_fileno(stdStream)), &dummy);
    +}
     #else
     #  define IS_CONSOLE(stdStream) 0
     #endif
    @@ -129,6 +138,14 @@ extern "C" {
     #endif
     
     
    +#ifndef ZSTD_SPARSE_DEFAULT
    +#  if (defined(__APPLE__) && defined(__MACH__))
    +#    define ZSTD_SPARSE_DEFAULT 0
    +#  else
    +#    define ZSTD_SPARSE_DEFAULT 1
    +#  endif
    +#endif
    +
     
     #if defined (__cplusplus)
     }
    diff --git a/programs/util.h b/programs/util.h
    index 59e19d027..5f437b2b2 100644
    --- a/programs/util.h
    +++ b/programs/util.h
    @@ -1,6 +1,6 @@
     /**
      * util.h - utility functions
    - * 
    + *
      * Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
      * All rights reserved.
      *
    @@ -25,6 +25,7 @@ extern "C" {
     #include        /* malloc */
     #include        /* size_t, ptrdiff_t */
     #include         /* fprintf */
    +#include        /* strncmp */
     #include     /* stat, utime */
     #include      /* stat */
     #if defined(_MSC_VER)
    @@ -166,8 +167,8 @@ UTIL_STATIC void UTIL_waitForNextTick(UTIL_freq_t ticksPerSecond)
     *  File functions
     ******************************************/
     #if defined(_MSC_VER)
    -	#define chmod _chmod
    -	typedef struct __stat64 stat_t;
    +    #define chmod _chmod
    +    typedef struct __stat64 stat_t;
     #else
         typedef struct stat stat_t;
     #endif
    @@ -178,9 +179,9 @@ UTIL_STATIC int UTIL_setFileStat(const char *filename, stat_t *statbuf)
         int res = 0;
         struct utimbuf timebuf;
     
    -	timebuf.actime = time(NULL);
    -	timebuf.modtime = statbuf->st_mtime;
    -	res += utime(filename, &timebuf);  /* set access and modification times */
    +    timebuf.actime = time(NULL);
    +    timebuf.modtime = statbuf->st_mtime;
    +    res += utime(filename, &timebuf);  /* set access and modification times */
     
     #if !defined(_WIN32)
         res += chown(filename, statbuf->st_uid, statbuf->st_gid);  /* Copy ownership */
    @@ -228,6 +229,20 @@ UTIL_STATIC U32 UTIL_isDirectory(const char* infilename)
         return 0;
     }
     
    +UTIL_STATIC U32 UTIL_isLink(const char* infilename)
    +{
    +#if defined(_WIN32)
    +    /* no symlinks on windows */
    +    (void)infilename;
    +#else
    +    int r;
    +    stat_t statbuf;
    +    r = lstat(infilename, &statbuf);
    +    if (!r && S_ISLNK(statbuf.st_mode)) return 1;
    +#endif
    +    return 0;
    +}
    +
     
     UTIL_STATIC U64 UTIL_getFileSize(const char* infilename)
     {
    @@ -271,11 +286,14 @@ UTIL_STATIC void *UTIL_realloc(void *ptr, size_t size)
         return NULL;
     }
     
    +static int g_utilDisplayLevel;
    +#define UTIL_DISPLAY(...)         fprintf(stderr, __VA_ARGS__)
    +#define UTIL_DISPLAYLEVEL(l, ...) { if (g_utilDisplayLevel>=l) { UTIL_DISPLAY(__VA_ARGS__); } }
     
     #ifdef _WIN32
     #  define UTIL_HAS_CREATEFILELIST
     
    -UTIL_STATIC int UTIL_prepareFileList(const char *dirName, char** bufStart, size_t* pos, char** bufEnd)
    +UTIL_STATIC int UTIL_prepareFileList(const char *dirName, char** bufStart, size_t* pos, char** bufEnd, int followLinks)
     {
         char* path;
         int dirLength, fnameLength, pathLength, nbFiles = 0;
    @@ -311,7 +329,7 @@ UTIL_STATIC int UTIL_prepareFileList(const char *dirName, char** bufStart, size_
                 if (strcmp (cFile.cFileName, "..") == 0 ||
                     strcmp (cFile.cFileName, ".") == 0) continue;
     
    -            nbFiles += UTIL_prepareFileList(path, bufStart, pos, bufEnd);  /* Recursively call "UTIL_prepareFileList" with the new path. */
    +            nbFiles += UTIL_prepareFileList(path, bufStart, pos, bufEnd, followLinks);  /* Recursively call "UTIL_prepareFileList" with the new path. */
                 if (*bufStart == NULL) { free(path); FindClose(hFile); return 0; }
             }
             else if ((cFile.dwFileAttributes & FILE_ATTRIBUTE_NORMAL) || (cFile.dwFileAttributes & FILE_ATTRIBUTE_ARCHIVE) || (cFile.dwFileAttributes & FILE_ATTRIBUTE_COMPRESSED)) {
    @@ -339,7 +357,7 @@ UTIL_STATIC int UTIL_prepareFileList(const char *dirName, char** bufStart, size_
     #  include        /* opendir, readdir */
     #  include        /* strerror, memcpy */
     
    -UTIL_STATIC int UTIL_prepareFileList(const char *dirName, char** bufStart, size_t* pos, char** bufEnd)
    +UTIL_STATIC int UTIL_prepareFileList(const char *dirName, char** bufStart, size_t* pos, char** bufEnd, int followLinks)
     {
         DIR *dir;
         struct dirent *entry;
    @@ -360,13 +378,19 @@ UTIL_STATIC int UTIL_prepareFileList(const char *dirName, char** bufStart, size_
             path = (char*) malloc(dirLength + fnameLength + 2);
             if (!path) { closedir(dir); return 0; }
             memcpy(path, dirName, dirLength);
    +
             path[dirLength] = '/';
             memcpy(path+dirLength+1, entry->d_name, fnameLength);
             pathLength = dirLength+1+fnameLength;
             path[pathLength] = 0;
     
    +        if (!followLinks && UTIL_isLink(path)) {
    +            UTIL_DISPLAYLEVEL(2, "Warning : %s is a symbolic link, ignoring\n", path);
    +            continue;
    +        }
    +
             if (UTIL_isDirectory(path)) {
    -            nbFiles += UTIL_prepareFileList(path, bufStart, pos, bufEnd);  /* Recursively call "UTIL_prepareFileList" with the new path. */
    +            nbFiles += UTIL_prepareFileList(path, bufStart, pos, bufEnd, followLinks);  /* Recursively call "UTIL_prepareFileList" with the new path. */
                 if (*bufStart == NULL) { free(path); closedir(dir); return 0; }
             } else {
                 if (*bufStart + *pos + pathLength >= *bufEnd) {
    @@ -396,7 +420,7 @@ UTIL_STATIC int UTIL_prepareFileList(const char *dirName, char** bufStart, size_
     
     #else
     
    -UTIL_STATIC int UTIL_prepareFileList(const char *dirName, char** bufStart, size_t* pos, char** bufEnd)
    +UTIL_STATIC int UTIL_prepareFileList(const char *dirName, char** bufStart, size_t* pos, char** bufEnd, int followLinks)
     {
         (void)bufStart; (void)bufEnd; (void)pos;
         fprintf(stderr, "Directory %s ignored (compiled without _WIN32 or _POSIX_C_SOURCE)\n", dirName);
    @@ -411,7 +435,7 @@ UTIL_STATIC int UTIL_prepareFileList(const char *dirName, char** bufStart, size_
      * After finishing usage of the list the structures should be freed with UTIL_freeFileList(params: return value, allocatedBuffer)
      * In case of error UTIL_createFileList returns NULL and UTIL_freeFileList should not be called.
      */
    -UTIL_STATIC const char** UTIL_createFileList(const char **inputNames, unsigned inputNamesNb, char** allocatedBuffer, unsigned* allocatedNamesNb)
    +UTIL_STATIC const char** UTIL_createFileList(const char **inputNames, unsigned inputNamesNb, char** allocatedBuffer, unsigned* allocatedNamesNb, int followLinks)
     {
         size_t pos;
         unsigned i, nbFiles;
    @@ -436,7 +460,7 @@ UTIL_STATIC const char** UTIL_createFileList(const char **inputNames, unsigned i
                     nbFiles++;
                 }
             } else {
    -            nbFiles += UTIL_prepareFileList(inputNames[i], &buf, &pos, &bufend);
    +            nbFiles += UTIL_prepareFileList(inputNames[i], &buf, &pos, &bufend, followLinks);
                 if (buf == NULL) return NULL;
         }   }
     
    @@ -465,6 +489,201 @@ UTIL_STATIC void UTIL_freeFileList(const char** filenameTable, char* allocatedBu
         if (filenameTable) free((void*)filenameTable);
     }
     
    +/* count the number of physical cores */
    +#if defined(_WIN32) || defined(WIN32)
    +
    +#include 
    +
    +typedef BOOL(WINAPI* LPFN_GLPI)(PSYSTEM_LOGICAL_PROCESSOR_INFORMATION, PDWORD);
    +
    +UTIL_STATIC int UTIL_countPhysicalCores(void)
    +{
    +    static int numPhysicalCores = 0;
    +    if (numPhysicalCores != 0) return numPhysicalCores;
    +
    +    {   LPFN_GLPI glpi;
    +        BOOL done = FALSE;
    +        PSYSTEM_LOGICAL_PROCESSOR_INFORMATION buffer = NULL;
    +        PSYSTEM_LOGICAL_PROCESSOR_INFORMATION ptr = NULL;
    +        DWORD returnLength = 0;
    +        size_t byteOffset = 0;
    +
    +        glpi = (LPFN_GLPI)GetProcAddress(GetModuleHandle(TEXT("kernel32")),
    +                                         "GetLogicalProcessorInformation");
    +
    +        if (glpi == NULL) {
    +            goto failed;
    +        }
    +
    +        while(!done) {
    +            DWORD rc = glpi(buffer, &returnLength);
    +            if (FALSE == rc) {
    +                if (GetLastError() == ERROR_INSUFFICIENT_BUFFER) {
    +                    if (buffer)
    +                        free(buffer);
    +                    buffer = (PSYSTEM_LOGICAL_PROCESSOR_INFORMATION)malloc(returnLength);
    +
    +                    if (buffer == NULL) {
    +                        perror("zstd");
    +                        exit(1);
    +                    }
    +                } else {
    +                    /* some other error */
    +                    goto failed;
    +                }
    +            } else {
    +                done = TRUE;
    +            }
    +        }
    +
    +        ptr = buffer;
    +
    +        while (byteOffset + sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION) <= returnLength) {
    +
    +            if (ptr->Relationship == RelationProcessorCore) {
    +                numPhysicalCores++;
    +            }
    +
    +            ptr++;
    +            byteOffset += sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
    +        }
    +
    +        free(buffer);
    +
    +        return numPhysicalCores;
    +    }
    +
    +failed:
    +    /* try to fall back on GetSystemInfo */
    +    {   SYSTEM_INFO sysinfo;
    +        GetSystemInfo(&sysinfo);
    +        numPhysicalCores = sysinfo.dwNumberOfProcessors;
    +        if (numPhysicalCores == 0) numPhysicalCores = 1; /* just in case */
    +    }
    +    return numPhysicalCores;
    +}
    +
    +#elif defined(__APPLE__)
    +
    +#include 
    +
    +/* Use apple-provided syscall
    + * see: man 3 sysctl */
    +UTIL_STATIC int UTIL_countPhysicalCores(void)
    +{
    +    static S32 numPhysicalCores = 0; /* apple specifies int32_t */
    +    if (numPhysicalCores != 0) return numPhysicalCores;
    +
    +    {   size_t size = sizeof(S32);
    +        int const ret = sysctlbyname("hw.physicalcpu", &numPhysicalCores, &size, NULL, 0);
    +        if (ret != 0) {
    +            if (errno == ENOENT) {
    +                /* entry not present, fall back on 1 */
    +                numPhysicalCores = 1;
    +            } else {
    +                perror("zstd: can't get number of physical cpus");
    +                exit(1);
    +            }
    +        }
    +
    +        return numPhysicalCores;
    +    }
    +}
    +
    +#elif defined(__linux__)
    +
    +/* parse /proc/cpuinfo
    + * siblings / cpu cores should give hyperthreading ratio
    + * otherwise fall back on sysconf */
    +UTIL_STATIC int UTIL_countPhysicalCores(void)
    +{
    +    static int numPhysicalCores = 0;
    +
    +    if (numPhysicalCores != 0) return numPhysicalCores;
    +
    +    numPhysicalCores = (int)sysconf(_SC_NPROCESSORS_ONLN);
    +    if (numPhysicalCores == -1) {
    +        /* value not queryable, fall back on 1 */
    +        return numPhysicalCores = 1;
    +    }
    +
    +    /* try to determine if there's hyperthreading */
    +    {   FILE* const cpuinfo = fopen("/proc/cpuinfo", "r");
    +        size_t const BUF_SIZE = 80;
    +        char buff[BUF_SIZE];
    +
    +        int siblings = 0;
    +        int cpu_cores = 0;
    +        int ratio = 1;
    +
    +        if (cpuinfo == NULL) {
    +            /* fall back on the sysconf value */
    +            return numPhysicalCores;
    +        }
    +
    +        /* assume the cpu cores/siblings values will be constant across all
    +         * present processors */
    +        while (!feof(cpuinfo)) {
    +            if (fgets(buff, BUF_SIZE, cpuinfo) != NULL) {
    +                if (strncmp(buff, "siblings", 8) == 0) {
    +                    const char* const sep = strchr(buff, ':');
    +                    if (*sep == '\0') {
    +                        /* formatting was broken? */
    +                        goto failed;
    +                    }
    +
    +                    siblings = atoi(sep + 1);
    +                }
    +                if (strncmp(buff, "cpu cores", 9) == 0) {
    +                    const char* const sep = strchr(buff, ':');
    +                    if (*sep == '\0') {
    +                        /* formatting was broken? */
    +                        goto failed;
    +                    }
    +
    +                    cpu_cores = atoi(sep + 1);
    +                }
    +            } else if (ferror(cpuinfo)) {
    +                /* fall back on the sysconf value */
    +                goto failed;
    +            }
    +        }
    +        if (siblings && cpu_cores) {
    +            ratio = siblings / cpu_cores;
    +        }
    +failed:
    +        fclose(cpuinfo);
    +        return numPhysicalCores = numPhysicalCores / ratio;
    +    }
    +}
    +
    +#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
    +
    +/* Use apple-provided syscall
    + * see: man 3 sysctl */
    +UTIL_STATIC int UTIL_countPhysicalCores(void)
    +{
    +    static int numPhysicalCores = 0;
    +
    +    if (numPhysicalCores != 0) return numPhysicalCores;
    +
    +    numPhysicalCores = (int)sysconf(_SC_NPROCESSORS_ONLN);
    +    if (numPhysicalCores == -1) {
    +        /* value not queryable, fall back on 1 */
    +        return numPhysicalCores = 1;
    +    }
    +    return numPhysicalCores;
    +}
    +
    +#else
    +
    +UTIL_STATIC int UTIL_countPhysicalCores(void)
    +{
    +    /* assume 1 */
    +    return 1;
    +}
    +
    +#endif
     
     #if defined (__cplusplus)
     }
    diff --git a/programs/zstd.1 b/programs/zstd.1
    index 2e4875fb9..0150b05df 100644
    --- a/programs/zstd.1
    +++ b/programs/zstd.1
    @@ -1,408 +1,334 @@
    -\"
    -\" zstd.1: This is a manual page for 'zstd' program. This file is part of the
    -\" zstd  project.
    -\" Author: Yann Collet
    -\"
    -
    -\" No hyphenation
    -.hy 0
    -.nr HY 0
    -
    -.TH zstd "1" "2015-08-22" "zstd" "User Commands"
    -.SH NAME
    -\fBzstd, unzstd, zstdcat\fR - Compress or decompress .zst files
    -
    -.SH SYNOPSIS
    -.TP 5
    -\fBzstd\fR [\fBOPTIONS\fR] [-|INPUT-FILE] [-o ]
    -.PP
    -.B unzstd
    -is equivalent to
    -.BR "zstd \-d"
    -.br
    -.B zstdcat
    -is equivalent to
    -.BR "zstd \-dcf"
    -.br
    -
    -.SH DESCRIPTION
    -.PP
    -\fBzstd\fR is a fast lossless compression algorithm
    -and data compression tool,
    -with command line syntax similar to \fB gzip (1) \fR and \fB xz (1) \fR .
    -It is based on the \fBLZ77\fR family, with further FSE & huff0 entropy stages.
    -\fBzstd\fR offers highly configurable compression speed,
    -with fast modes at > 200 MB/s per core,
    -and strong modes nearing lzma compression ratios.
    -It also features a very fast decoder, with speeds > 500 MB/s per core.
    -
    -\fBzstd\fR command line syntax is generally similar to gzip,
    -but features the following differences :
    - - Source files are preserved by default.
    -   It's possible to remove them automatically by using \fB--rm\fR command.
    - - When compressing a single file, \fBzstd\fR displays progress notifications and result summary by default.
    -   Use \fB-q\fR to turn them off
    -
    -.PP
    -.B zstd
    -compresses or decompresses each
    -.I file
    -according to the selected operation mode.
    -If no
    -.I files
    -are given or
    -.I file
    -is
    -.BR \- ,
    -.B zstd
    -reads from standard input and writes the processed data
    -to standard output.
    -.B zstd
    -will refuse (display an error and skip the
    -.IR file )
    -to write compressed data to standard output if it is a terminal.
    -Similarly,
    -.B zstd
    -will refuse to read compressed data
    -from standard input if it is a terminal.
    -
    -.PP
    -Unless
    -.B \-\-stdout
    -or
    -.B \-o
    -is specified,
    -.I files
    -are written to a new file whose name is derived from the source
    -.I file
    -name:
    -.IP \(bu 3
    -When compressing, the suffix
    -.B .zst
    -is appended to the source filename to get the target filename.
    -.IP \(bu 3
    -When decompressing, the
    -.B .zst
    -suffix is removed from the filename to get the target filename.
    -
    -.SS "Concatenation with .zst files"
    -It is possible to concatenate
    -.B .zst
    -files as is.
    -.B zstd
    -will decompress such files as if they were a single
    -.B .zst
    -file.
    -
    -
    -
    -.SH OPTIONS
    -
    +.
    +.TH "ZSTD" "1" "May 2017" "zstd 1.3.0" "User Commands"
    +.
    +.SH "NAME"
    +\fBzstd\fR \- zstd, zstdmt, unzstd, zstdcat \- Compress or decompress \.zst files
    +.
    +.SH "SYNOPSIS"
    +\fBzstd\fR [\fIOPTIONS\fR] [\-|\fIINPUT\-FILE\fR] [\-o \fIOUTPUT\-FILE\fR]
    +.
    +.P
    +\fBzstdmt\fR is equivalent to \fBzstd \-T0\fR
    +.
    +.P
    +\fBunzstd\fR is equivalent to \fBzstd \-d\fR
    +.
    +.P
    +\fBzstdcat\fR is equivalent to \fBzstd \-dcf\fR
    +.
    +.SH "DESCRIPTION"
    +\fBzstd\fR is a fast lossless compression algorithm and data compression tool, with command line syntax similar to \fBgzip (1)\fR and \fBxz (1)\fR\. It is based on the \fBLZ77\fR family, with further FSE & huff0 entropy stages\. \fBzstd\fR offers highly configurable compression speed, with fast modes at > 200 MB/s per code, and strong modes nearing lzma compression ratios\. It also features a very fast decoder, with speeds > 500 MB/s per core\.
    +.
    +.P
    +\fBzstd\fR command line syntax is generally similar to gzip, but features the following differences :
    +.
    +.IP "\(bu" 4
    +Source files are preserved by default\. It\'s possible to remove them automatically by using the \fB\-\-rm\fR command\.
    +.
    +.IP "\(bu" 4
    +When compressing a single file, \fBzstd\fR displays progress notifications and result summary by default\. Use \fB\-q\fR to turn them off\.
    +.
    +.IP "\(bu" 4
    +\fBzstd\fR does not accept input from console, but it properly accepts \fBstdin\fR when it\'s not the console\.
    +.
    +.IP "\(bu" 4
    +\fBzstd\fR displays a short help page when command line is an error\. Use \fB\-q\fR to turn it off\.
    +.
    +.IP "" 0
    +.
    +.P
    +\fBzstd\fR compresses or decompresses each \fIfile\fR according to the selected operation mode\. If no \fIfiles\fR are given or \fIfile\fR is \fB\-\fR, \fBzstd\fR reads from standard input and writes the processed data to standard output\. \fBzstd\fR will refuse to write compressed data to standard output if it is a terminal : it will display an error message and skip the \fIfile\fR\. Similarly, \fBzstd\fR will refuse to read compressed data from standard input if it is a terminal\.
    +.
    +.P
    +Unless \fB\-\-stdout\fR or \fB\-o\fR is specified, \fIfiles\fR are written to a new file whose name is derived from the source \fIfile\fR name:
    +.
    +.IP "\(bu" 4
    +When compressing, the suffix \fB\.zst\fR is appended to the source filename to get the target filename\.
    +.
    +.IP "\(bu" 4
    +When decompressing, the \fB\.zst\fR suffix is removed from the source filename to get the target filename
    +.
    +.IP "" 0
    +.
    +.SS "Concatenation with \.zst files"
    +It is possible to concatenate \fB\.zst\fR files as is\. \fBzstd\fR will decompress such files as if they were a single \fB\.zst\fR file\.
    +.
    +.SH "OPTIONS"
     .
     .SS "Integer suffixes and special values"
    -In most places where an integer argument is expected,
    -an optional suffix is supported to easily indicate large integers.
    -There must be no space between the integer and the suffix.
    +In most places where an integer argument is expected, an optional suffix is supported to easily indicate large integers\. There must be no space between the integer and the suffix\.
    +.
     .TP
    -.B KiB
    -Multiply the integer by 1,024 (2^10).
    -.BR Ki ,
    -.BR K ,
    -and
    -.B KB
    -are accepted as synonyms for
    -.BR KiB .
    +\fBKiB\fR
    +Multiply the integer by 1,024 (2^10)\. \fBKi\fR, \fBK\fR, and \fBKB\fR are accepted as synonyms for \fBKiB\fR\.
    +.
     .TP
    -.B MiB
    -Multiply the integer by 1,048,576 (2^20).
    -.BR Mi ,
    -.BR M ,
    -and
    -.B MB
    -are accepted as synonyms for
    -.BR MiB .
    -
    +\fBMiB\fR
    +Multiply the integer by 1,048,576 (2^20)\. \fBMi\fR, \fBM\fR, and \fBMB\fR are accepted as synonyms for \fBMiB\fR\.
     .
     .SS "Operation mode"
    -If multiple operation mode options are given,
    -the last one takes effect.
    +If multiple operation mode options are given, the last one takes effect\.
    +.
     .TP
    -.BR \-z ", " \-\-compress
    -Compress.
    -This is the default operation mode when no operation mode option
    -is specified and no other operation mode is implied from
    -the command name (for example,
    -.B unzstd
    -implies
    -.BR \-\-decompress ).
    +\fB\-z\fR, \fB\-\-compress\fR
    +Compress\. This is the default operation mode when no operation mode option is specified and no other operation mode is implied from the command name (for example, \fBunzstd\fR implies \fB\-\-decompress\fR)\.
    +.
     .TP
    -.BR \-d ", " \-\-decompress ", " \-\-uncompress
    -Decompress.
    +\fB\-d\fR, \fB\-\-decompress\fR, \fB\-\-uncompress\fR
    +Decompress\.
    +.
     .TP
    -.BR \-t ", " \-\-test
    -Test the integrity of compressed
    -.IR files .
    -This option is equivalent to
    -.B "\-\-decompress \-\-stdout"
    -except that the decompressed data is discarded instead of being
    -written to standard output.
    -No files are created or removed.
    +\fB\-t\fR, \fB\-\-test\fR
    +Test the integrity of compressed \fIfiles\fR\. This option is equivalent to \fB\-\-decompress \-\-stdout\fR except that the decompressed data is discarded instead of being written to standard output\. No files are created or removed\.
    +.
     .TP
    -.B \-b#
    - benchmark file(s) using compression level #
    +\fB\-b#\fR
    +Benchmark file(s) using compression level #
    +.
     .TP
    -.B \--train FILEs
    - use FILEs as training set to create a dictionary. The training set should contain a lot of small files (> 100).
    -
    +\fB\-\-train FILEs\fR
    +Use FILEs as a training set to create a dictionary\. The training set should contain a lot of small files (> 100)\.
     .
     .SS "Operation modifiers"
    +.
     .TP
    -.B \-#
    - # compression level [1-19] (default:3)
    +\fB\-#\fR
    +\fB#\fR compression level [1\-19] (default: 3)
    +.
     .TP
    -.BR \--ultra
    - unlocks high compression levels 20+ (maximum 22), using a lot more memory.
    -Note that decompression will also require more memory when using these levels.
    +\fB\-\-ultra\fR
    +unlocks high compression levels 20+ (maximum 22), using a lot more memory\. Note that decompression will also require more memory when using these levels\.
    +.
     .TP
    -.B \-D file
    - use `file` as Dictionary to compress or decompress FILE(s)
    +\fB\-T#\fR, \fB\-\-threads=#\fR
    +Compress using \fB#\fR threads (default: 1)\. If \fB#\fR is 0, attempt to detect and use the number of physical CPU cores\. This modifier does nothing if \fBzstd\fR is compiled without multithread support\.
    +.
     .TP
    -.BR \--no-dictID
    - do not store dictionary ID within frame header (dictionary compression).
    - The decoder will have to rely on implicit knowledge about which dictionary to use,
    -it won't be able to check if it's correct.
    +\fB\-D file\fR
    +use \fBfile\fR as Dictionary to compress or decompress FILE(s)
    +.
     .TP
    -.B \-o file
    - save result into `file` (only possible with a single INPUT-FILE)
    +\fB\-\-nodictID\fR
    +do not store dictionary ID within frame header (dictionary compression)\. The decoder will have to rely on implicit knowledge about which dictionary to use, it won\'t be able to check if it\'s correct\.
    +.
     .TP
    -.BR \-f ", " --force
    - overwrite output without prompting
    +\fB\-o file\fR
    +save result into \fBfile\fR (only possible with a single \fIINPUT\-FILE\fR)
    +.
     .TP
    -.BR \-c ", " --stdout
    - force write to standard output, even if it is the console
    +\fB\-f\fR, \fB\-\-force\fR
    +overwrite output without prompting, and (de)compress symbolic links
    +.
     .TP
    -.BR \--[no-]sparse
    - enable / disable sparse FS support, to make files with many zeroes smaller on disk.
    - Creating sparse files may save disk space and speed up the decompression
    -by reducing the amount of disk I/O.
    - default : enabled when output is into a file, and disabled when output is stdout.
    - This setting overrides default and can force sparse mode over stdout.
    +\fB\-c\fR, \fB\-\-stdout\fR
    +force write to standard output, even if it is the console
    +.
     .TP
    -.BR \--rm
    - remove source file(s) after successful compression or decompression
    +\fB\-\-[no\-]sparse\fR
    +enable / disable sparse FS support, to make files with many zeroes smaller on disk\. Creating sparse files may save disk space and speed up decompression by reducing the amount of disk I/O\. default : enabled when output is into a file, and disabled when output is stdout\. This setting overrides default and can force sparse mode over stdout\.
    +.
     .TP
    -.BR \-k ", " --keep
    - keep source file(s) after successful compression or decompression.
    - This is the default behavior.
    +\fB\-\-rm\fR
    +remove source file(s) after successful compression or decompression
    +.
     .TP
    -.BR \-r
    - operate recursively on directories
    +\fB\-k\fR, \fB\-\-keep\fR
    +keep source file(s) after successful compression or decompression\. This is the default behavior\.
    +.
     .TP
    -.BR \-h/\-H ", " --help
    - display help/long help and exit
    +\fB\-r\fR
    +operate recursively on dictionaries
    +.
     .TP
    -.BR \-V ", " --version
    - display Version number and exit
    +\fB\-h\fR/\fB\-H\fR, \fB\-\-help\fR
    +display help/long help and exit
    +.
     .TP
    -.BR \-v ", " --verbose
    - verbose mode
    +\fB\-V\fR, \fB\-\-version\fR
    +display version number and exit
    +.
     .TP
    -.BR \-q ", " --quiet
    - suppress warnings, interactivity and notifications.
    - specify twice to suppress errors too.
    +\fB\-v\fR
    +verbose mode
    +.
     .TP
    -.BR \-C ", " --[no-]check
    - add integrity check computed from uncompressed data (default : enabled)
    +\fB\-q\fR, \fB\-\-quiet\fR
    +suppress warnings, interactivity, and notifications\. specify twice to suppress errors too\.
    +.
     .TP
    -.BR \-t ", " --test
    - Test the integrity of compressed files. This option is equivalent to \fB--decompress --stdout > /dev/null\fR.
    - No files are created or removed.
    +\fB\-C\fR, \fB\-\-[no\-]check\fR
    +add integrity check computed from uncompressed data (default : enabled)
    +.
     .TP
    -.BR --
    - All arguments after -- are treated as files
    -
    -
    -.SH DICTIONARY BUILDER
    -.PP
    -\fBzstd\fR offers \fIdictionary\fR compression, useful for very small files and messages.
    -It's possible to train \fBzstd\fR with some samples, the result of which is saved into a file called `dictionary`.
    -Then during compression and decompression, make reference to the same dictionary.
    -It will improve compression ratio of small files.
    -Typical gains range from ~10% (at 64KB) to x5 better (at <1KB).
    +\fB\-\-\fR
    +All arguments after \fB\-\-\fR are treated as files
    +.
    +.SH "DICTIONARY BUILDER"
    +\fBzstd\fR offers \fIdictionary\fR compression, useful for very small files and messages\. It\'s possible to train \fBzstd\fR with some samples, the result of which is saved into a file called a \fBdictionary\fR\. Then during compression and decompression, reference the same dictionary\. It will improve compression ratio of small files\. Typical gains range from 10% (at 64KB) to x5 better (at <1KB)\.
    +.
     .TP
    -.B \--train FILEs
    - use FILEs as training set to create a dictionary. The training set should contain a lot of small files (> 100),
    -and weight typically 100x the target dictionary size (for example, 10 MB for a 100 KB dictionary)
    +\fB\-\-train FILEs\fR
    +Use FILEs as training set to create a dictionary\. The training set should contain a lot of small files (> 100), and weight typically 100x the target dictionary size (for example, 10 MB for a 100 KB dictionary)\.
    +.
    +.IP
    +Supports multithreading if \fBzstd\fR is compiled with threading support\. Additional parameters can be specified with \fB\-\-train\-cover\fR\. The legacy dictionary builder can be accessed with \fB\-\-train\-legacy\fR\. Equivalent to \fB\-\-train\-cover=d=8,steps=4\fR\.
    +.
     .TP
    -.B \-o file
    - dictionary saved into `file` (default: dictionary)
    +\fB\-o file\fR
    +Dictionary saved into \fBfile\fR (default name: dictionary)\.
    +.
     .TP
    -.B \--maxdict #
    - limit dictionary to specified size (default : 112640)
    +\fB\-\-maxdict=#\fR
    +Limit dictionary to specified size (default: 112640)\.
    +.
     .TP
    -.B \--dictID #
    - A dictionary ID is a locally unique ID that a decoder can use to verify it is using the right dictionary.
    - By default, zstd will create a 4-bytes random number ID.
    - It's possible to give a precise number instead.
    - Short numbers have an advantage : an ID < 256 will only need 1 byte in the compressed frame header,
    - and an ID < 65536 will only need 2 bytes. This compares favorably to 4 bytes default.
    - However, it's up to the dictionary manager to not assign twice the same ID to 2 different dictionaries.
    +\fB\-\-dictID=#\fR
    +A dictionary ID is a locally unique ID that a decoder can use to verify it is using the right dictionary\. By default, zstd will create a 4\-bytes random number ID\. It\'s possible to give a precise number instead\. Short numbers have an advantage : an ID < 256 will only need 1 byte in the compressed frame header, and an ID < 65536 will only need 2 bytes\. This compares favorably to 4 bytes default\. However, it\'s up to the dictionary manager to not assign twice the same ID to 2 different dictionaries\.
    +.
     .TP
    -.B \-s#
    - dictionary selectivity level (default: 9)
    - the smaller the value, the denser the dictionary, improving its efficiency but reducing its possible maximum size.
    +\fB\-\-train\-cover[=k#,d=#,steps=#]\fR
    +Select parameters for the default dictionary builder algorithm named cover\. If \fId\fR is not specified, then it tries \fId\fR = 6 and \fId\fR = 8\. If \fIk\fR is not specified, then it tries \fIsteps\fR values in the range [50, 2000]\. If \fIsteps\fR is not specified, then the default value of 40 is used\. Requires that \fId\fR <= \fIk\fR\.
    +.
    +.IP
    +Selects segments of size \fIk\fR with highest score to put in the dictionary\. The score of a segment is computed by the sum of the frequencies of all the subsegments of size \fId\fR\. Generally \fId\fR should be in the range [6, 8], occasionally up to 16, but the algorithm will run faster with d <= \fI8\fR\. Good values for \fIk\fR vary widely based on the input data, but a safe range is [2 * \fId\fR, 2000]\. Supports multithreading if \fBzstd\fR is compiled with threading support\.
    +.
    +.IP
    +Examples:
    +.
    +.IP
    +\fBzstd \-\-train\-cover FILEs\fR
    +.
    +.IP
    +\fBzstd \-\-train\-cover=k=50,d=8 FILEs\fR
    +.
    +.IP
    +\fBzstd \-\-train\-cover=d=8,steps=500 FILEs\fR
    +.
    +.IP
    +\fBzstd \-\-train\-cover=k=50 FILEs\fR
    +.
     .TP
    -.B \--cover=k=#,d=#
    - Use alternate dictionary builder algorithm named cover with parameters \fIk\fR and \fId\fR with \fId\fR <= \fIk\fR.
    - Selects segments of size \fIk\fR with the highest score to put in the dictionary.
    - The score of a segment is computed by the sum of the frequencies of all the subsegments of of size \fId\fR.
    - Generally \fId\fR should be in the range [6, 24].
    - Good values for \fIk\fR vary widely based on the input data, but a safe range is [32, 2048].
    - Example: \fB--train --cover=k=64,d=8 FILEs\fR.
    +\fB\-\-train\-legacy[=selectivity=#]\fR
    +Use legacy dictionary builder algorithm with the given dictionary \fIselectivity\fR (default: 9)\. The smaller the \fIselectivity\fR value, the denser the dictionary, improving its efficiency but reducing its possible maximum size\. \fB\-\-train\-legacy=s=#\fR is also accepted\.
    +.
    +.IP
    +Examples:
    +.
    +.IP
    +\fBzstd \-\-train\-legacy FILEs\fR
    +.
    +.IP
    +\fBzstd \-\-train\-legacy=selectivity=8 FILEs\fR
    +.
    +.SH "BENCHMARK"
    +.
     .TP
    -.B \--optimize-cover[=steps=#,k=#,d=#]
    - If \fIsteps\fR is not specified, the default value of 32 is used.
    - If \fIk\fR is not specified, \fIsteps\fR values in [16, 2048] are checked for each value of \fId\fR.
    - If \fId\fR is not specified, the values checked are [6, 8, ..., 16].
    -
    - Runs the cover dictionary builder for each parameter set saves the optimal parameters and dictionary.
    - Prints the optimal parameters and writes the optimal dictionary to the output file.
    - Supports multithreading if \fBzstd\fR is compiled with threading support.
    -
    - The parameter \fIk\fR is more sensitve than \fId\fR, and is faster to optimize over.
    - Suggested use is to run with a \fIsteps\fR <= 32 with neither \fIk\fR nor \fId\fR set.
    - Once it completes, use the value of \fId\fR it selects with a higher \fIsteps\fR (in the range [256, 1024]).
    - \fBzstd --train --optimize-cover FILEs
    - \fBzstd --train --optimize-cover=d=d,steps=512 FILEs
    +\fB\-b#\fR
    +benchmark file(s) using compression level #
    +.
     .TP
    -
    -.SH BENCHMARK
    +\fB\-e#\fR
    +benchmark file(s) using multiple compression levels, from \fB\-b#\fR to \fB\-e#\fR (inclusive)
    +.
     .TP
    -.B \-b#
    - benchmark file(s) using compression level #
    +\fB\-i#\fR
    +minimum evaluation time, in seconds (default : 3s), benchmark mode only
    +.
     .TP
    -.B \-e#
    - benchmark file(s) using multiple compression levels, from -b# to -e# (included).
    +\fB\-B#\fR
    +cut file into independent blocks of size # (default: no block)
    +.
     .TP
    -.B \-i#
    - minimum evaluation time, in seconds (default : 3s), benchmark mode only
    +\fB\-\-priority=rt\fR
    +set process priority to real\-time
    +.
    +.SH "ADVANCED COMPRESSION OPTIONS"
    +.
    +.SS "\-\-zstd[=options]:"
    +\fBzstd\fR provides 22 predefined compression levels\. The selected or default predefined compression level can be changed with advanced compression options\. The \fIoptions\fR are provided as a comma\-separated list\. You may specify only the options you want to change and the rest will be taken from the selected or default compression level\. The list of available \fIoptions\fR:
    +.
     .TP
    -.B \-B#
    - cut file into independent blocks of size # (default: no block)
    -.B \--priority=rt
    - set process priority to real-time
    -
    -.SH ADVANCED COMPRESSION OPTIONS
    +\fBstrategy\fR=\fIstrat\fR, \fBstrat\fR=\fIstrat\fR
    +Specify a strategy used by a match finder\.
    +.
    +.IP
    +There are 8 strategies numbered from 0 to 7, from faster to stronger: 0=ZSTD_fast, 1=ZSTD_dfast, 2=ZSTD_greedy, 3=ZSTD_lazy, 4=ZSTD_lazy2, 5=ZSTD_btlazy2, 6=ZSTD_btopt, 7=ZSTD_btopt2\.
    +.
     .TP
    -.B \--zstd[=\fIoptions\fR]
    -.PD
    -\fBzstd\fR provides 22 predefined compression levels. The selected or default predefined compression level can be changed with advanced compression options.
    -The \fIoptions\fR are provided as a comma-separated list. You may specify only the \fIoptions\fR you want to change and the rest will be taken from the selected or default compression level.
    -The list of available \fIoptions\fR:
    -.RS
    -
    +\fBwindowLog\fR=\fIwlog\fR, \fBwlog\fR=\fIwlog\fR
    +Specify the maximum number of bits for a match distance\.
    +.
    +.IP
    +The higher number of increases the chance to find a match which usually improves compression ratio\. It also increases memory requirements for the compressor and decompressor\. The minimum \fIwlog\fR is 10 (1 KiB) and the maximum is 27 (128 MiB)\.
    +.
     .TP
    -.BI strategy= strat
    -.PD 0
    +\fBhashLog\fR=\fIhlog\fR, \fBhlog\fR=\fIhlog\fR
    +Specify the maximum number of bits for a hash table\.
    +.
    +.IP
    +Bigger hash tables cause less collisions which usually makes compression faster, but requires more memory during compression\.
    +.
    +.IP
    +The minimum \fIhlog\fR is 6 (64 B) and the maximum is 26 (128 MiB)\.
    +.
     .TP
    -.BI strat= strat
    -.PD
    -Specify a strategy used by a match finder.
    -.IP ""
    -There are 8 strategies numbered from 0 to 7, from faster to stronger:
    -0=ZSTD_fast, 1=ZSTD_dfast, 2=ZSTD_greedy, 3=ZSTD_lazy, 4=ZSTD_lazy2, 5=ZSTD_btlazy2, 6=ZSTD_btopt, 7=ZSTD_btultra.
    -.IP ""
    -
    +\fBchainLog\fR=\fIclog\fR, \fBclog\fR=\fIclog\fR
    +Specify the maximum number of bits for a hash chain or a binary tree\.
    +.
    +.IP
    +Higher numbers of bits increases the chance to find a match which usually improves compression ratio\. It also slows down compression speed and increases memory requirements for compression\. This option is ignored for the ZSTD_fast strategy\.
    +.
    +.IP
    +The minimum \fIclog\fR is 6 (64 B) and the maximum is 28 (256 MiB)\.
    +.
     .TP
    -.BI windowLog= wlog
    -.PD 0
    +\fBsearchLog\fR=\fIslog\fR, \fBslog\fR=\fIslog\fR
    +Specify the maximum number of searches in a hash chain or a binary tree using logarithmic scale\.
    +.
    +.IP
    +More searches increases the chance to find a match which usually increases compression ratio but decreases compression speed\.
    +.
    +.IP
    +The minimum \fIslog\fR is 1 and the maximum is 26\.
    +.
     .TP
    -.BI wlog= wlog
    -.PD
    -Specify the maximum number of bits for a match distance.
    -.IP ""
    -The higher number of bits increases the chance to find a match what usually improves compression ratio.
    -It also increases memory requirements for compressor and decompressor.
    -.IP ""
    -The minimum \fIwlog\fR is 10 (1 KiB) and the maximum is 25 (32 MiB) for 32-bit compilation and 27 (128 MiB) for 64-bit compilation.
    -.IP ""
    -
    +\fBsearchLength\fR=\fIslen\fR, \fBslen\fR=\fIslen\fR
    +Specify the minimum searched length of a match in a hash table\.
    +.
    +.IP
    +Larger search lengths usually decrease compression ratio but improve decompression speed\.
    +.
    +.IP
    +The minimum \fIslen\fR is 3 and the maximum is 7\.
    +.
     .TP
    -.BI hashLog= hlog
    -.PD 0
    +\fBtargetLen\fR=\fItlen\fR, \fBtlen\fR=\fItlen\fR
    +Specify the minimum match length that causes a match finder to stop searching for better matches\.
    +.
    +.IP
    +A larger minimum match length usually improves compression ratio but decreases compression speed\. This option is only used with strategies ZSTD_btopt and ZSTD_btopt2\.
    +.
    +.IP
    +The minimum \fItlen\fR is 4 and the maximum is 999\.
    +.
     .TP
    -.BI hlog= hlog
    -.PD
    -Specify the maximum number of bits for a hash table.
    -.IP ""
    -The bigger hash table causes less collisions what usually make compression faster but requires more memory during compression.
    -.IP ""
    -The minimum \fIhlog\fR is 6 (64 B) and the maximum is 25 (32 MiB) for 32-bit compilation and 27 (128 MiB) for 64-bit compilation.
    -
    -.TP
    -.BI chainLog= clog
    -.PD 0
    -.TP
    -.BI clog= clog
    -.PD
    -Specify the maximum number of bits for a hash chain or a binary tree.
    -.IP ""
    -The higher number of bits increases the chance to find a match what usually improves compression ratio.
    -It also slows down compression speed and increases memory requirements for compression.
    -This option is ignored for the ZSTD_fast strategy.
    -.IP ""
    -The minimum \fIclog\fR is 6 (64 B) and the maximum is 26 (64 MiB) for 32-bit compilation and 28 (256 MiB) for 64-bit compilation.
    -.IP ""
    -
    -.TP
    -.BI searchLog= slog
    -.PD 0
    -.TP
    -.BI slog= slog
    -.PD
    -Specify the maximum number of searches in a hash chain or a binary tree using logarithmic scale.
    -.IP ""
    -The bigger number of searches increases the chance to find a match what usually improves compression ratio but decreases compression speed.
    -.IP ""
    -The minimum \fIslog\fR is 1 and the maximum is 24 for 32-bit compilation and 26 for 64-bit compilation.
    -.IP ""
    -
    -.TP
    -.BI searchLength= slen
    -.PD 0
    -.TP
    -.BI slen= slen
    -.PD
    -Specify the minimum searched length of a match in a hash table.
    -.IP ""
    -The bigger search length usually decreases compression ratio but improves decompression speed.
    -.IP ""
    -The minimum \fIslen\fR is 3 and the maximum is 7.
    -.IP ""
    -
    -.TP
    -.BI targetLength= tlen
    -.PD 0
    -.TP
    -.BI tlen= tlen
    -.PD
    -Specify the minimum match length that causes a match finder to interrupt searching of better matches.
    -.IP ""
    -The bigger minimum match length usually improves compression ratio but decreases compression speed.
    -This option is used only with ZSTD_btopt and ZSTD_btultra strategies.
    -.IP ""
    -The minimum \fItlen\fR is 4 and the maximum is 999.
    -.IP ""
    -
    -.PP
    -.B An example
    -.br
    -The following parameters sets advanced compression options to predefined level 19 for files bigger than 256 KB:
    -.IP ""
    -\fB--zstd=\fRwindowLog=23,chainLog=23,hashLog=22,searchLog=6,searchLength=3,targetLength=48,strategy=6
    -
    -.SH BUGS
    -Report bugs at:- https://github.com/facebook/zstd/issues
    -
    -.SH AUTHOR
    +\fBoverlapLog\fR=\fIovlog\fR, \fBovlog\fR=\fIovlog\fR
    +Determine \fBoverlapSize\fR, amount of data reloaded from previous job\. This parameter is only available when multithreading is enabled\. Reloading more data improves compression ratio, but decreases speed\.
    +.
    +.IP
    +The minimum \fIovlog\fR is 0, and the maximum is 9\. 0 means "no overlap", hence completely independent jobs\. 9 means "full overlap", meaning up to \fBwindowSize\fR is reloaded from previous job\. Reducing \fIovlog\fR by 1 reduces the amount of reload by a factor 2\. Default \fIovlog\fR is 6, which means "reload \fBwindowSize / 8\fR"\. Exception : the maximum compression level (22) has a default \fIovlog\fR of 9\.
    +.
    +.SS "\-B#:"
    +Select the size of each compression job\. This parameter is available only when multi\-threading is enabled\. Default value is \fB4 * windowSize\fR, which means it varies depending on compression level\. \fB\-B#\fR makes it possible to select a custom value\. Note that job size must respect a minimum value which is enforced transparently\. This minimum is either 1 MB, or \fBoverlapSize\fR, whichever is largest\.
    +.
    +.SS "Example"
    +The following parameters sets advanced compression options to those of predefined level 19 for files bigger than 256 KB:
    +.
    +.P
    +\fB\-\-zstd\fR=windowLog=23,chainLog=23,hashLog=22,searchLog=6,searchLength=3,targetLength=48,strategy=6
    +.
    +.SH "BUGS"
    +Report bugs at: https://github\.com/facebook/zstd/issues
    +.
    +.SH "AUTHOR"
     Yann Collet
    diff --git a/programs/zstd.1.md b/programs/zstd.1.md
    new file mode 100644
    index 000000000..118c9f2f8
    --- /dev/null
    +++ b/programs/zstd.1.md
    @@ -0,0 +1,343 @@
    +zstd(1) -- zstd, zstdmt, unzstd, zstdcat - Compress or decompress .zst files
    +============================================================================
    +
    +SYNOPSIS
    +--------
    +
    +`zstd` [*OPTIONS*] [-|_INPUT-FILE_] [-o _OUTPUT-FILE_]
    +
    +`zstdmt` is equivalent to `zstd -T0`
    +
    +`unzstd` is equivalent to `zstd -d`
    +
    +`zstdcat` is equivalent to `zstd -dcf`
    +
    +
    +DESCRIPTION
    +-----------
    +`zstd` is a fast lossless compression algorithm and data compression tool,
    +with command line syntax similar to `gzip (1)` and `xz (1)`.
    +It is based on the **LZ77** family, with further FSE & huff0 entropy stages.
    +`zstd` offers highly configurable compression speed,
    +with fast modes at > 200 MB/s per code,
    +and strong modes nearing lzma compression ratios.
    +It also features a very fast decoder, with speeds > 500 MB/s per core.
    +
    +`zstd` command line syntax is generally similar to gzip,
    +but features the following differences :
    +
    +  - Source files are preserved by default.
    +    It's possible to remove them automatically by using the `--rm` command.
    +  - When compressing a single file, `zstd` displays progress notifications
    +    and result summary by default.
    +    Use `-q` to turn them off.
    +  - `zstd` does not accept input from console,
    +    but it properly accepts `stdin` when it's not the console.
    +  - `zstd` displays a short help page when command line is an error.
    +    Use `-q` to turn it off.
    +
    +`zstd` compresses or decompresses each _file_ according to the selected
    +operation mode.
    +If no _files_ are given or _file_ is `-`, `zstd` reads from standard input
    +and writes the processed data to standard output.
    +`zstd` will refuse to write compressed data to standard output
    +if it is a terminal : it will display an error message and skip the _file_.
    +Similarly, `zstd` will refuse to read compressed data from standard input
    +if it is a terminal.
    +
    +Unless `--stdout` or `-o` is specified, _files_ are written to a new file
    +whose name is derived from the source _file_ name:
    +
    +* When compressing, the suffix `.zst` is appended to the source filename to
    +  get the target filename.
    +* When decompressing, the `.zst` suffix is removed from the source filename to
    +  get the target filename
    +
    +### Concatenation with .zst files
    +It is possible to concatenate `.zst` files as is.
    +`zstd` will decompress such files as if they were a single `.zst` file.
    +
    +OPTIONS
    +-------
    +
    +### Integer suffixes and special values
    +In most places where an integer argument is expected,
    +an optional suffix is supported to easily indicate large integers.
    +There must be no space between the integer and the suffix.
    +
    +* `KiB`:
    +    Multiply the integer by 1,024 (2\^10).
    +    `Ki`, `K`, and `KB` are accepted as synonyms for `KiB`.
    +* `MiB`:
    +    Multiply the integer by 1,048,576 (2\^20).
    +    `Mi`, `M`, and `MB` are accepted as synonyms for `MiB`.
    +
    +### Operation mode
    +If multiple operation mode options are given,
    +the last one takes effect.
    +
    +* `-z`, `--compress`:
    +    Compress.
    +    This is the default operation mode when no operation mode option is specified
    +    and no other operation mode is implied from the command name
    +    (for example, `unzstd` implies `--decompress`).
    +* `-d`, `--decompress`, `--uncompress`:
    +    Decompress.
    +* `-t`, `--test`:
    +    Test the integrity of compressed _files_.
    +    This option is equivalent to `--decompress --stdout` except that the
    +    decompressed data is discarded instead of being written to standard output.
    +    No files are created or removed.
    +* `-b#`:
    +    Benchmark file(s) using compression level #
    +* `--train FILEs`:
    +    Use FILEs as a training set to create a dictionary.
    +    The training set should contain a lot of small files (> 100).
    +
    +### Operation modifiers
    +
    +* `-#`:
    +    `#` compression level \[1-19] (default: 3)
    +* `--ultra`:
    +    unlocks high compression levels 20+ (maximum 22), using a lot more memory.
    +    Note that decompression will also require more memory when using these levels.
    +* `-T#`, `--threads=#`:
    +    Compress using `#` threads (default: 1).
    +    If `#` is 0, attempt to detect and use the number of physical CPU cores.
    +    This modifier does nothing if `zstd` is compiled without multithread support.
    +* `-D file`:
    +    use `file` as Dictionary to compress or decompress FILE(s)
    +* `--nodictID`:
    +    do not store dictionary ID within frame header (dictionary compression).
    +    The decoder will have to rely on implicit knowledge about which dictionary to use,
    +    it won't be able to check if it's correct.
    +* `-o file`:
    +    save result into `file` (only possible with a single _INPUT-FILE_)
    +* `-f`, `--force`:
    +    overwrite output without prompting, and (de)compress symbolic links
    +* `-c`, `--stdout`:
    +    force write to standard output, even if it is the console
    +* `--[no-]sparse`:
    +    enable / disable sparse FS support,
    +    to make files with many zeroes smaller on disk.
    +    Creating sparse files may save disk space and speed up decompression by
    +    reducing the amount of disk I/O.
    +    default : enabled when output is into a file,
    +    and disabled when output is stdout.
    +    This setting overrides default and can force sparse mode over stdout.
    +* `--rm`:
    +    remove source file(s) after successful compression or decompression
    +* `-k`, `--keep`:
    +    keep source file(s) after successful compression or decompression.
    +    This is the default behavior.
    +* `-r`:
    +    operate recursively on dictionaries
    +* `-h`/`-H`, `--help`:
    +    display help/long help and exit
    +* `-V`, `--version`:
    +    display version number and exit
    +* `-v`:
    +    verbose mode
    +* `-q`, `--quiet`:
    +    suppress warnings, interactivity, and notifications.
    +    specify twice to suppress errors too.
    +* `-C`, `--[no-]check`:
    +    add integrity check computed from uncompressed data (default : enabled)
    +* `--`:
    +    All arguments after `--` are treated as files
    +
    +
    +DICTIONARY BUILDER
    +------------------
    +`zstd` offers _dictionary_ compression,
    +useful for very small files and messages.
    +It's possible to train `zstd` with some samples,
    +the result of which is saved into a file called a `dictionary`.
    +Then during compression and decompression, reference the same dictionary.
    +It will improve compression ratio of small files.
    +Typical gains range from 10% (at 64KB) to x5 better (at <1KB).
    +
    +* `--train FILEs`:
    +    Use FILEs as training set to create a dictionary.
    +    The training set should contain a lot of small files (> 100),
    +    and weight typically 100x the target dictionary size
    +    (for example, 10 MB for a 100 KB dictionary).
    +
    +    Supports multithreading if `zstd` is compiled with threading support.
    +    Additional parameters can be specified with `--train-cover`.
    +    The legacy dictionary builder can be accessed with `--train-legacy`.
    +    Equivalent to `--train-cover=d=8,steps=4`.
    +* `-o file`:
    +    Dictionary saved into `file` (default name: dictionary).
    +* `--maxdict=#`:
    +    Limit dictionary to specified size (default: 112640).
    +* `--dictID=#`:
    +    A dictionary ID is a locally unique ID that a decoder can use to verify it is
    +    using the right dictionary.
    +    By default, zstd will create a 4-bytes random number ID.
    +    It's possible to give a precise number instead.
    +    Short numbers have an advantage : an ID < 256 will only need 1 byte in the
    +    compressed frame header, and an ID < 65536 will only need 2 bytes.
    +    This compares favorably to 4 bytes default.
    +    However, it's up to the dictionary manager to not assign twice the same ID to
    +    2 different dictionaries.
    +* `--train-cover[=k#,d=#,steps=#]`:
    +    Select parameters for the default dictionary builder algorithm named cover.
    +    If _d_ is not specified, then it tries _d_ = 6 and _d_ = 8.
    +    If _k_ is not specified, then it tries _steps_ values in the range [50, 2000].
    +    If _steps_ is not specified, then the default value of 40 is used.
    +    Requires that _d_ <= _k_.
    +
    +    Selects segments of size _k_ with highest score to put in the dictionary.
    +    The score of a segment is computed by the sum of the frequencies of all the
    +    subsegments of size _d_.
    +    Generally _d_ should be in the range [6, 8], occasionally up to 16, but the
    +    algorithm will run faster with d <= _8_.
    +    Good values for _k_ vary widely based on the input data, but a safe range is
    +    [2 * _d_, 2000].
    +    Supports multithreading if `zstd` is compiled with threading support.
    +
    +    Examples:
    +
    +    `zstd --train-cover FILEs`
    +
    +    `zstd --train-cover=k=50,d=8 FILEs`
    +
    +    `zstd --train-cover=d=8,steps=500 FILEs`
    +
    +    `zstd --train-cover=k=50 FILEs`
    +
    +* `--train-legacy[=selectivity=#]`:
    +    Use legacy dictionary builder algorithm with the given dictionary
    +    _selectivity_ (default: 9).
    +    The smaller the _selectivity_ value, the denser the dictionary,
    +    improving its efficiency but reducing its possible maximum size.
    +    `--train-legacy=s=#` is also accepted.
    +
    +    Examples:
    +
    +    `zstd --train-legacy FILEs`
    +
    +    `zstd --train-legacy=selectivity=8 FILEs`
    +
    +
    +BENCHMARK
    +---------
    +
    +* `-b#`:
    +    benchmark file(s) using compression level #
    +* `-e#`:
    +    benchmark file(s) using multiple compression levels, from `-b#` to `-e#` (inclusive)
    +* `-i#`:
    +    minimum evaluation time, in seconds (default : 3s), benchmark mode only
    +* `-B#`:
    +    cut file into independent blocks of size # (default: no block)
    +* `--priority=rt`:
    +    set process priority to real-time
    +
    +
    +ADVANCED COMPRESSION OPTIONS
    +----------------------------
    +### --zstd[=options]:
    +`zstd` provides 22 predefined compression levels.
    +The selected or default predefined compression level can be changed with
    +advanced compression options.
    +The _options_ are provided as a comma-separated list.
    +You may specify only the options you want to change and the rest will be
    +taken from the selected or default compression level.
    +The list of available _options_:
    +
    +- `strategy`=_strat_, `strat`=_strat_:
    +    Specify a strategy used by a match finder.
    +
    +    There are 8 strategies numbered from 0 to 7, from faster to stronger:
    +    0=ZSTD\_fast, 1=ZSTD\_dfast, 2=ZSTD\_greedy, 3=ZSTD\_lazy,
    +    4=ZSTD\_lazy2, 5=ZSTD\_btlazy2, 6=ZSTD\_btopt, 7=ZSTD\_btopt2.
    +
    +- `windowLog`=_wlog_, `wlog`=_wlog_:
    +    Specify the maximum number of bits for a match distance.
    +
    +    The higher number of increases the chance to find a match which usually
    +    improves compression ratio.
    +    It also increases memory requirements for the compressor and decompressor.
    +    The minimum _wlog_ is 10 (1 KiB) and the maximum is 27 (128 MiB).
    +
    +- `hashLog`=_hlog_, `hlog`=_hlog_:
    +    Specify the maximum number of bits for a hash table.
    +
    +    Bigger hash tables cause less collisions which usually makes compression
    +    faster, but requires more memory during compression.
    +
    +    The minimum _hlog_ is 6 (64 B) and the maximum is 26 (128 MiB).
    +
    +- `chainLog`=_clog_, `clog`=_clog_:
    +    Specify the maximum number of bits for a hash chain or a binary tree.
    +
    +    Higher numbers of bits increases the chance to find a match which usually
    +    improves compression ratio.
    +    It also slows down compression speed and increases memory requirements for
    +    compression.
    +    This option is ignored for the ZSTD_fast strategy.
    +
    +    The minimum _clog_ is 6 (64 B) and the maximum is 28 (256 MiB).
    +
    +- `searchLog`=_slog_, `slog`=_slog_:
    +    Specify the maximum number of searches in a hash chain or a binary tree
    +    using logarithmic scale.
    +
    +    More searches increases the chance to find a match which usually increases
    +    compression ratio but decreases compression speed.
    +
    +    The minimum _slog_ is 1 and the maximum is 26.
    +
    +- `searchLength`=_slen_, `slen`=_slen_:
    +    Specify the minimum searched length of a match in a hash table.
    +
    +    Larger search lengths usually decrease compression ratio but improve
    +    decompression speed.
    +
    +    The minimum _slen_ is 3 and the maximum is 7.
    +
    +- `targetLen`=_tlen_, `tlen`=_tlen_:
    +    Specify the minimum match length that causes a match finder to stop
    +    searching for better matches.
    +
    +    A larger minimum match length usually improves compression ratio but
    +    decreases compression speed.
    +    This option is only used with strategies ZSTD_btopt and ZSTD_btopt2.
    +
    +    The minimum _tlen_ is 4 and the maximum is 999.
    +
    +- `overlapLog`=_ovlog_,  `ovlog`=_ovlog_:
    +    Determine `overlapSize`, amount of data reloaded from previous job.
    +    This parameter is only available when multithreading is enabled.
    +    Reloading more data improves compression ratio, but decreases speed.
    +
    +    The minimum _ovlog_ is 0, and the maximum is 9.
    +    0 means "no overlap", hence completely independent jobs.
    +    9 means "full overlap", meaning up to `windowSize` is reloaded from previous job.
    +    Reducing _ovlog_ by 1 reduces the amount of reload by a factor 2.
    +    Default _ovlog_ is 6, which means "reload `windowSize / 8`".
    +    Exception : the maximum compression level (22) has a default _ovlog_ of 9.
    +
    +### -B#:
    +Select the size of each compression job.
    +This parameter is available only when multi-threading is enabled.
    +Default value is `4 * windowSize`, which means it varies depending on compression level.
    +`-B#` makes it possible to select a custom value.
    +Note that job size must respect a minimum value which is enforced transparently.
    +This minimum is either 1 MB, or `overlapSize`, whichever is largest.
    +
    +### Example
    +The following parameters sets advanced compression options to those of
    +predefined level 19 for files bigger than 256 KB:
    +
    +`--zstd`=windowLog=23,chainLog=23,hashLog=22,searchLog=6,searchLength=3,targetLength=48,strategy=6
    +
    +BUGS
    +----
    +Report bugs at: https://github.com/facebook/zstd/issues
    +
    +AUTHOR
    +------
    +Yann Collet
    diff --git a/programs/zstdcli.c b/programs/zstdcli.c
    index 050d7a6a2..32fef9993 100644
    --- a/programs/zstdcli.c
    +++ b/programs/zstdcli.c
    @@ -49,13 +49,14 @@
     #define AUTHOR "Yann Collet"
     #define WELCOME_MESSAGE "*** %s %i-bits %s, by %s ***\n", COMPRESSOR_NAME, (int)(sizeof(size_t)*8), ZSTD_VERSION, AUTHOR
     
    -#define GZ_EXTENSION ".gz"
    -#define ZSTD_EXTENSION ".zst"
    +#define ZSTD_ZSTDMT "zstdmt"
     #define ZSTD_UNZSTD "unzstd"
     #define ZSTD_CAT "zstdcat"
     #define ZSTD_GZ "gzip"
     #define ZSTD_GUNZIP "gunzip"
     #define ZSTD_GZCAT "gzcat"
    +#define ZSTD_LZMA "lzma"
    +#define ZSTD_XZ "xz"
     
     #define KB *(1 <<10)
     #define MB *(1 <<20)
    @@ -74,10 +75,10 @@ static U32 g_overlapLog = OVERLAP_LOG_DEFAULT;
     /*-************************************
     *  Display Macros
     **************************************/
    -#define DISPLAY(...)           fprintf(displayOut, __VA_ARGS__)
    -#define DISPLAYLEVEL(l, ...)   if (displayLevel>=l) { DISPLAY(__VA_ARGS__); }
    -static FILE* displayOut;
    -static unsigned displayLevel = DEFAULT_DISPLAY_LEVEL;   /* 0 : no display,  1: errors,  2 : + result + interaction + warnings,  3 : + progression,  4 : + information */
    +#define DISPLAY(...)         fprintf(g_displayOut, __VA_ARGS__)
    +#define DISPLAYLEVEL(l, ...) { if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); } }
    +static int g_displayLevel = DEFAULT_DISPLAY_LEVEL;   /* 0 : no display,  1: errors,  2 : + result + interaction + warnings,  3 : + progression,  4 : + information */
    +static FILE* g_displayOut;
     
     
     /*-************************************
    @@ -99,7 +100,7 @@ static int usage(const char* programName)
     #endif
         DISPLAY( " -D file: use `file` as Dictionary \n");
         DISPLAY( " -o file: result stored into `file` (only if 1 input file) \n");
    -    DISPLAY( " -f     : overwrite output without prompting \n");
    +    DISPLAY( " -f     : overwrite output without prompting and (de)compress links \n");
         DISPLAY( "--rm    : remove source file(s) after successful de/compression \n");
         DISPLAY( " -k     : preserve source file(s) (default) \n");
         DISPLAY( " -h/-H  : display help/long help and exit\n");
    @@ -113,27 +114,38 @@ static int usage_advanced(const char* programName)
         DISPLAY( "\n");
         DISPLAY( "Advanced arguments :\n");
         DISPLAY( " -V     : display Version number and exit\n");
    -    DISPLAY( " -v     : verbose mode; specify multiple times to increase log level (default:%d)\n", DEFAULT_DISPLAY_LEVEL);
    +    DISPLAY( " -v     : verbose mode; specify multiple times to increase verbosity\n");
         DISPLAY( " -q     : suppress warnings; specify twice to suppress errors too\n");
         DISPLAY( " -c     : force write to standard output, even if it is the console\n");
    -#ifdef UTIL_HAS_CREATEFILELIST
    -    DISPLAY( " -r     : operate recursively on directories \n");
    -#endif
     #ifndef ZSTD_NOCOMPRESS
         DISPLAY( "--ultra : enable levels beyond %i, up to %i (requires more memory)\n", ZSTDCLI_CLEVEL_MAX, ZSTD_maxCLevel());
    -    DISPLAY( "--no-dictID : don't write dictID into header (dictionary compression)\n");
    -    DISPLAY( "--[no-]check : integrity check (default:enabled) \n");
     #ifdef ZSTD_MULTITHREAD
         DISPLAY( " -T#    : use # threads for compression (default:1) \n");
    -    DISPLAY( " -B#    : select size of independent sections (default:0==automatic) \n");
    +    DISPLAY( " -B#    : select size of each job (default:0==automatic) \n");
    +#endif
    +    DISPLAY( "--no-dictID : don't write dictID into header (dictionary compression)\n");
    +    DISPLAY( "--[no-]check : integrity check (default:enabled) \n");
    +#endif
    +#ifdef UTIL_HAS_CREATEFILELIST
    +    DISPLAY( " -r     : operate recursively on directories \n");
     #endif
     #ifdef ZSTD_GZCOMPRESS
         DISPLAY( "--format=gzip : compress files to the .gz format \n");
     #endif
    +#ifdef ZSTD_LZMACOMPRESS
    +    DISPLAY( "--format=xz : compress files to the .xz format \n");
    +    DISPLAY( "--format=lzma : compress files to the .lzma format \n");
    +#endif
    +#ifdef ZSTD_LZ4COMPRESS
    +    DISPLAY( "--format=lz4 : compress files to the .lz4 format \n");
     #endif
     #ifndef ZSTD_NODECOMPRESS
         DISPLAY( "--test  : test compressed file integrity \n");
    +#if ZSTD_SPARSE_DEFAULT
         DISPLAY( "--[no-]sparse : sparse mode (default:enabled on file, disabled on stdout)\n");
    +#else
    +    DISPLAY( "--[no-]sparse : sparse mode (default:disabled)\n");
    +#endif
     #endif
         DISPLAY( " -M#    : Set a memory usage limit for decompression \n");
         DISPLAY( "--      : All arguments after \"--\" are treated as files \n");
    @@ -141,12 +153,11 @@ static int usage_advanced(const char* programName)
         DISPLAY( "\n");
         DISPLAY( "Dictionary builder :\n");
         DISPLAY( "--train ## : create a dictionary from a training set of files \n");
    -    DISPLAY( "--cover=k=#,d=# : use the cover algorithm with parameters k and d \n");
    -    DISPLAY( "--optimize-cover[=steps=#,k=#,d=#] : optimize cover parameters with optional parameters\n");
    +    DISPLAY( "--train-cover[=k=#,d=#,steps=#] : use the cover algorithm with optional args\n");
    +    DISPLAY( "--train-legacy[=s=#] : use the legacy algorithm with selectivity (default: %u)\n", g_defaultSelectivityLevel);
         DISPLAY( " -o file : `file` is dictionary name (default: %s) \n", g_defaultDictName);
    -    DISPLAY( "--maxdict ## : limit dictionary to specified size (default : %u) \n", g_defaultMaxDictSize);
    -    DISPLAY( " -s#    : dictionary selectivity level (default: %u)\n", g_defaultSelectivityLevel);
    -    DISPLAY( "--dictID ## : force dictionary ID to specified value (default: random)\n");
    +    DISPLAY( "--maxdict=# : limit dictionary to specified size (default : %u) \n", g_defaultMaxDictSize);
    +    DISPLAY( "--dictID=# : force dictionary ID to specified value (default: random)\n");
     #endif
     #ifndef ZSTD_NOBENCH
         DISPLAY( "\n");
    @@ -163,7 +174,7 @@ static int usage_advanced(const char* programName)
     static int badusage(const char* programName)
     {
         DISPLAYLEVEL(1, "Incorrect parameters\n");
    -    if (displayLevel >= 1) usage(programName);
    +    if (g_displayLevel >= 2) usage(programName);
         return 1;
     }
     
    @@ -175,6 +186,23 @@ static void waitEnter(void)
         (void)unused;
     }
     
    +static const char* lastNameFromPath(const char* path)
    +{
    +    const char* name = path;
    +    if (strrchr(name, '/')) name = strrchr(name, '/') + 1;
    +    if (strrchr(name, '\\')) name = strrchr(name, '\\') + 1; /* windows */
    +    return name;
    +}
    +
    +/*! exeNameMatch() :
    +    @return : a non-zero value if exeName matches test, excluding the extension
    +   */
    +static int exeNameMatch(const char* exeName, const char* test)
    +{
    +    return !strncmp(exeName, test, strlen(test)) &&
    +        (exeName[strlen(test)] == '\0' || exeName[strlen(test)] == '.');
    +}
    +
     /*! readU32FromChar() :
         @return : unsigned integer value read from input in `char` format
         allows and interprets K, KB, KiB, M, MB and MiB suffix.
    @@ -212,11 +240,11 @@ static unsigned longCommandWArg(const char** stringPtr, const char* longCommand)
     #ifndef ZSTD_NODICT
     /**
      * parseCoverParameters() :
    - * reads cover parameters from *stringPtr (e.g. "--cover=smoothing=100,kmin=48,kstep=4,kmax=64,d=8") into *params
    + * reads cover parameters from *stringPtr (e.g. "--train-cover=k=48,d=8,steps=32") into *params
      * @return 1 means that cover parameters were correct
      * @return 0 in case of malformed parameters
      */
    -static unsigned parseCoverParameters(const char* stringPtr, COVER_params_t *params)
    +static unsigned parseCoverParameters(const char* stringPtr, COVER_params_t* params)
     {
         memset(params, 0, sizeof(*params));
         for (; ;) {
    @@ -226,9 +254,33 @@ static unsigned parseCoverParameters(const char* stringPtr, COVER_params_t *para
             return 0;
         }
         if (stringPtr[0] != 0) return 0;
    -    DISPLAYLEVEL(4, "k=%u\nd=%u\nsteps=%u\n", params->k, params->d, params->steps);
    +    DISPLAYLEVEL(4, "cover: k=%u\nd=%u\nsteps=%u\n", params->k, params->d, params->steps);
         return 1;
     }
    +
    +/**
    + * parseLegacyParameters() :
    + * reads legacy dictioanry builter parameters from *stringPtr (e.g. "--train-legacy=selectivity=8") into *selectivity
    + * @return 1 means that legacy dictionary builder parameters were correct
    + * @return 0 in case of malformed parameters
    + */
    +static unsigned parseLegacyParameters(const char* stringPtr, unsigned* selectivity)
    +{
    +    if (!longCommandWArg(&stringPtr, "s=") && !longCommandWArg(&stringPtr, "selectivity=")) { return 0; }
    +    *selectivity = readU32FromChar(&stringPtr);
    +    if (stringPtr[0] != 0) return 0;
    +    DISPLAYLEVEL(4, "legacy: selectivity=%u\n", *selectivity);
    +    return 1;
    +}
    +
    +static COVER_params_t defaultCoverParams(void)
    +{
    +    COVER_params_t params;
    +    memset(¶ms, 0, sizeof(params));
    +    params.d = 8;
    +    params.steps = 4;
    +    return params;
    +}
     #endif
     
     
    @@ -266,6 +318,7 @@ int main(int argCount, const char* argv[])
     {
         int argNb,
             forceStdout=0,
    +        followLinks=0,
             main_pause=0,
             nextEntryIsDictionary=0,
             operationResult=0,
    @@ -301,8 +354,8 @@ int main(int argCount, const char* argv[])
         unsigned fileNamesNb;
     #endif
     #ifndef ZSTD_NODICT
    -    COVER_params_t coverParams;
    -    int cover = 0;
    +    COVER_params_t coverParams = defaultCoverParams();
    +    int cover = 1;
     #endif
     
         /* init */
    @@ -312,19 +365,19 @@ int main(int argCount, const char* argv[])
         (void)memLimit;   /* not used when ZSTD_NODECOMPRESS set */
         if (filenameTable==NULL) { DISPLAY("zstd: %s \n", strerror(errno)); exit(1); }
         filenameTable[0] = stdinmark;
    -    displayOut = stderr;
    -    /* Pick out program name from path. Don't rely on stdlib because of conflicting behavior */
    -    {   size_t pos;
    -        for (pos = (int)strlen(programName); pos > 0; pos--) { if (programName[pos] == '/') { pos++; break; } }
    -        programName += pos;
    -    }
    +    g_displayOut = stderr;
    +
    +    programName = lastNameFromPath(programName);
     
         /* preset behaviors */
    -    if (!strcmp(programName, ZSTD_UNZSTD)) operation=zom_decompress;
    -    if (!strcmp(programName, ZSTD_CAT)) { operation=zom_decompress; forceStdout=1; FIO_overwriteMode(); outFileName=stdoutmark; displayLevel=1; }
    -    if (!strcmp(programName, ZSTD_GZ)) { suffix = GZ_EXTENSION; FIO_setCompressionType(FIO_gzipCompression); FIO_setRemoveSrcFile(1); }    /* behave like gzip */
    -    if (!strcmp(programName, ZSTD_GUNZIP)) { operation=zom_decompress; FIO_setRemoveSrcFile(1); }                                          /* behave like gunzip */
    -    if (!strcmp(programName, ZSTD_GZCAT)) { operation=zom_decompress; forceStdout=1; FIO_overwriteMode(); outFileName=stdoutmark; displayLevel=1; }  /* behave like gzcat */
    +    if (exeNameMatch(programName, ZSTD_ZSTDMT)) nbThreads=0;
    +    if (exeNameMatch(programName, ZSTD_UNZSTD)) operation=zom_decompress;
    +    if (exeNameMatch(programName, ZSTD_CAT)) { operation=zom_decompress; forceStdout=1; FIO_overwriteMode(); outFileName=stdoutmark; g_displayLevel=1; }
    +    if (exeNameMatch(programName, ZSTD_GZ)) { suffix = GZ_EXTENSION; FIO_setCompressionType(FIO_gzipCompression); FIO_setRemoveSrcFile(1); }    /* behave like gzip */
    +    if (exeNameMatch(programName, ZSTD_GUNZIP)) { operation=zom_decompress; FIO_setRemoveSrcFile(1); }                                          /* behave like gunzip */
    +    if (exeNameMatch(programName, ZSTD_GZCAT)) { operation=zom_decompress; forceStdout=1; FIO_overwriteMode(); outFileName=stdoutmark; g_displayLevel=1; }  /* behave like gzcat */
    +    if (exeNameMatch(programName, ZSTD_LZMA)) { suffix = LZMA_EXTENSION; FIO_setCompressionType(FIO_lzmaCompression); FIO_setRemoveSrcFile(1); }    /* behave like lzma */
    +    if (exeNameMatch(programName, ZSTD_XZ)) { suffix = XZ_EXTENSION; FIO_setCompressionType(FIO_xzCompression); FIO_setRemoveSrcFile(1); }    /* behave like xz */
         memset(&compressionParams, 0, sizeof(compressionParams));
     
         /* command switches */
    @@ -338,7 +391,7 @@ int main(int argCount, const char* argv[])
                     if (!filenameIdx) {
                         filenameIdx=1, filenameTable[0]=stdinmark;
                         outFileName=stdoutmark;
    -                    displayLevel-=(displayLevel==2);
    +                    g_displayLevel-=(g_displayLevel==2);
                         continue;
                 }   }
     
    @@ -351,12 +404,12 @@ int main(int argCount, const char* argv[])
                         if (!strcmp(argument, "--compress")) { operation=zom_compress; continue; }
                         if (!strcmp(argument, "--decompress")) { operation=zom_decompress; continue; }
                         if (!strcmp(argument, "--uncompress")) { operation=zom_decompress; continue; }
    -                    if (!strcmp(argument, "--force")) { FIO_overwriteMode(); continue; }
    -                    if (!strcmp(argument, "--version")) { displayOut=stdout; DISPLAY(WELCOME_MESSAGE); CLEAN_RETURN(0); }
    -                    if (!strcmp(argument, "--help")) { displayOut=stdout; CLEAN_RETURN(usage_advanced(programName)); }
    -                    if (!strcmp(argument, "--verbose")) { displayLevel++; continue; }
    -                    if (!strcmp(argument, "--quiet")) { displayLevel--; continue; }
    -                    if (!strcmp(argument, "--stdout")) { forceStdout=1; outFileName=stdoutmark; displayLevel-=(displayLevel==2); continue; }
    +                    if (!strcmp(argument, "--force")) { FIO_overwriteMode(); forceStdout=1; followLinks=1; continue; }
    +                    if (!strcmp(argument, "--version")) { g_displayOut=stdout; DISPLAY(WELCOME_MESSAGE); CLEAN_RETURN(0); }
    +                    if (!strcmp(argument, "--help")) { g_displayOut=stdout; CLEAN_RETURN(usage_advanced(programName)); }
    +                    if (!strcmp(argument, "--verbose")) { g_displayLevel++; continue; }
    +                    if (!strcmp(argument, "--quiet")) { g_displayLevel--; continue; }
    +                    if (!strcmp(argument, "--stdout")) { forceStdout=1; outFileName=stdoutmark; g_displayLevel-=(g_displayLevel==2); continue; }
                         if (!strcmp(argument, "--ultra")) { ultra=1; continue; }
                         if (!strcmp(argument, "--check")) { FIO_setChecksumFlag(2); continue; }
                         if (!strcmp(argument, "--no-check")) { FIO_setChecksumFlag(0); continue; }
    @@ -364,8 +417,8 @@ int main(int argCount, const char* argv[])
                         if (!strcmp(argument, "--no-sparse")) { FIO_setSparseWrite(0); continue; }
                         if (!strcmp(argument, "--test")) { operation=zom_test; continue; }
                         if (!strcmp(argument, "--train")) { operation=zom_train; outFileName=g_defaultDictName; continue; }
    -                    if (!strcmp(argument, "--maxdict")) { nextArgumentIsMaxDict=1; lastCommand=1; continue; }
    -                    if (!strcmp(argument, "--dictID")) { nextArgumentIsDictID=1; lastCommand=1; continue; }
    +                    if (!strcmp(argument, "--maxdict")) { nextArgumentIsMaxDict=1; lastCommand=1; continue; }  /* kept available for compatibility with old syntax ; will be removed one day */
    +                    if (!strcmp(argument, "--dictID")) { nextArgumentIsDictID=1; lastCommand=1; continue; }  /* kept available for compatibility with old syntax ; will be removed one day */
                         if (!strcmp(argument, "--no-dictID")) { FIO_setDictIDFlag(0); continue; }
                         if (!strcmp(argument, "--keep")) { FIO_setRemoveSrcFile(0); continue; }
                         if (!strcmp(argument, "--rm")) { FIO_setRemoveSrcFile(1); continue; }
    @@ -373,26 +426,44 @@ int main(int argCount, const char* argv[])
     #ifdef ZSTD_GZCOMPRESS
                         if (!strcmp(argument, "--format=gzip")) { suffix = GZ_EXTENSION; FIO_setCompressionType(FIO_gzipCompression); continue; }
     #endif
    +#ifdef ZSTD_LZMACOMPRESS
    +                    if (!strcmp(argument, "--format=lzma")) { suffix = LZMA_EXTENSION; FIO_setCompressionType(FIO_lzmaCompression);  continue; }
    +                    if (!strcmp(argument, "--format=xz")) { suffix = XZ_EXTENSION; FIO_setCompressionType(FIO_xzCompression);  continue; }
    +#endif
    +#ifdef ZSTD_LZ4COMPRESS
    +                    if (!strcmp(argument, "--format=lz4")) { suffix = LZ4_EXTENSION; FIO_setCompressionType(FIO_lz4Compression);  continue; }
    +#endif
     
                         /* long commands with arguments */
     #ifndef ZSTD_NODICT
    -                    if (longCommandWArg(&argument, "--cover=")) {
    -                      cover=1; if (!parseCoverParameters(argument, &coverParams)) CLEAN_RETURN(badusage(programName));
    -                      continue;
    -                    }
    -                    if (longCommandWArg(&argument, "--optimize-cover")) {
    -                      cover=2;
    +                    if (longCommandWArg(&argument, "--train-cover")) {
    +                      operation = zom_train;
    +                      outFileName = g_defaultDictName;
    +                      cover = 1;
                           /* Allow optional arguments following an = */
                           if (*argument == 0) { memset(&coverParams, 0, sizeof(coverParams)); }
                           else if (*argument++ != '=') { CLEAN_RETURN(badusage(programName)); }
                           else if (!parseCoverParameters(argument, &coverParams)) { CLEAN_RETURN(badusage(programName)); }
                           continue;
                         }
    +                    if (longCommandWArg(&argument, "--train-legacy")) {
    +                      operation = zom_train;
    +                      outFileName = g_defaultDictName;
    +                      cover = 0;
    +                      /* Allow optional arguments following an = */
    +                      if (*argument == 0) { continue; }
    +                      else if (*argument++ != '=') { CLEAN_RETURN(badusage(programName)); }
    +                      else if (!parseLegacyParameters(argument, &dictSelect)) { CLEAN_RETURN(badusage(programName)); }
    +                      continue;
    +                    }
     #endif
    +                    if (longCommandWArg(&argument, "--threads=")) { nbThreads = readU32FromChar(&argument); continue; }
                         if (longCommandWArg(&argument, "--memlimit=")) { memLimit = readU32FromChar(&argument); continue; }
                         if (longCommandWArg(&argument, "--memory=")) { memLimit = readU32FromChar(&argument); continue; }
                         if (longCommandWArg(&argument, "--memlimit-decompress=")) { memLimit = readU32FromChar(&argument); continue; }
                         if (longCommandWArg(&argument, "--block-size=")) { blockSize = readU32FromChar(&argument); continue; }
    +                    if (longCommandWArg(&argument, "--maxdict=")) { maxDictSize = readU32FromChar(&argument); continue; }
    +                    if (longCommandWArg(&argument, "--dictID=")) { dictID = readU32FromChar(&argument); continue; }
                         if (longCommandWArg(&argument, "--zstd=")) { if (!parseCompressionParameters(argument, &compressionParams)) CLEAN_RETURN(badusage(programName)); continue; }
                         /* fall-through, will trigger bad_usage() later on */
                     }
    @@ -414,9 +485,9 @@ int main(int argCount, const char* argv[])
                         switch(argument[0])
                         {
                             /* Display help */
    -                    case 'V': displayOut=stdout; DISPLAY(WELCOME_MESSAGE); CLEAN_RETURN(0);   /* Version Only */
    +                    case 'V': g_displayOut=stdout; DISPLAY(WELCOME_MESSAGE); CLEAN_RETURN(0);   /* Version Only */
                         case 'H':
    -                    case 'h': displayOut=stdout; CLEAN_RETURN(usage_advanced(programName));
    +                    case 'h': g_displayOut=stdout; CLEAN_RETURN(usage_advanced(programName));
     
                              /* Compress */
                         case 'z': operation=zom_compress; argument++; break;
    @@ -435,19 +506,19 @@ int main(int argCount, const char* argv[])
                         case 'D': nextEntryIsDictionary = 1; lastCommand = 1; argument++; break;
     
                             /* Overwrite */
    -                    case 'f': FIO_overwriteMode(); forceStdout=1; argument++; break;
    +                    case 'f': FIO_overwriteMode(); forceStdout=1; followLinks=1; argument++; break;
     
                             /* Verbose mode */
    -                    case 'v': displayLevel++; argument++; break;
    +                    case 'v': g_displayLevel++; argument++; break;
     
                             /* Quiet mode */
    -                    case 'q': displayLevel--; argument++; break;
    +                    case 'q': g_displayLevel--; argument++; break;
     
    -                        /* keep source file (default); for gzip/xz compatibility */
    +                        /* keep source file (default) */
                         case 'k': FIO_setRemoveSrcFile(0); argument++; break;
     
                             /* Checksum */
    -                    case 'C': argument++; FIO_setChecksumFlag(2); break;
    +                    case 'C': FIO_setChecksumFlag(2); argument++; break;
     
                             /* test compressed file */
                         case 't': operation=zom_test; argument++; break;
    @@ -522,14 +593,14 @@ int main(int argCount, const char* argv[])
                     continue;
                 }   /* if (argument[0]=='-') */
     
    -            if (nextArgumentIsMaxDict) {
    +            if (nextArgumentIsMaxDict) {  /* kept available for compatibility with old syntax ; will be removed one day */
                     nextArgumentIsMaxDict = 0;
                     lastCommand = 0;
                     maxDictSize = readU32FromChar(&argument);
                     continue;
                 }
     
    -            if (nextArgumentIsDictID) {
    +            if (nextArgumentIsDictID) {  /* kept available for compatibility with old syntax ; will be removed one day */
                     nextArgumentIsDictID = 0;
                     lastCommand = 0;
                     dictID = readU32FromChar(&argument);
    @@ -571,9 +642,27 @@ int main(int argCount, const char* argv[])
         DISPLAYLEVEL(4, "PLATFORM_POSIX_VERSION defined: %ldL\n", (long) PLATFORM_POSIX_VERSION);
     #endif
     
    +    if (nbThreads == 0) {
    +        /* try to guess */
    +        nbThreads = UTIL_countPhysicalCores();
    +        DISPLAYLEVEL(3, "Note: %d physical core(s) detected\n", nbThreads);
    +    }
    +
    +    g_utilDisplayLevel = g_displayLevel;
    +    if (!followLinks) {
    +        unsigned u;
    +        for (u=0, fileNamesNb=0; u1) & (displayLevel==2)) displayLevel=1;
    +    if (!strcmp(filenameTable[0], stdinmark) && outFileName && !strcmp(outFileName,stdoutmark) && (g_displayLevel==2)) g_displayLevel=1;
    +    if ((filenameIdx>1) & (g_displayLevel==2)) g_displayLevel=1;
     
         /* IO Stream/File */
    -    FIO_setNotificationLevel(displayLevel);
    +    FIO_setNotificationLevel(g_displayLevel);
         if (operation==zom_compress) {
     #ifndef ZSTD_NOCOMPRESS
             FIO_setNbThreads(nbThreads);
    diff --git a/tests/Makefile b/tests/Makefile
    index 8b19aa3d5..ea58c0fe5 100644
    --- a/tests/Makefile
    +++ b/tests/Makefile
    @@ -26,9 +26,12 @@ PYTHON ?= python3
     TESTARTEFACT := versionsTest namespaceTest
     
     
    -CPPFLAGS+= -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(ZSTDDIR)/compress -I$(ZSTDDIR)/dictBuilder -I$(ZSTDDIR)/deprecated -I$(PRGDIR)
    +DEBUGFLAGS=-g -DZSTD_DEBUG=1
    +CPPFLAGS+= -I$(ZSTDDIR) -I$(ZSTDDIR)/common -I$(ZSTDDIR)/compress \
    +           -I$(ZSTDDIR)/dictBuilder -I$(ZSTDDIR)/deprecated -I$(PRGDIR) \
    +           $(DEBUGFLAG)
     CFLAGS  ?= -O3
    -CFLAGS  += -g -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
    +CFLAGS  += -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow \
                -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement \
                -Wstrict-prototypes -Wundef -Wformat-security
     CFLAGS  += $(MOREFLAGS)
    @@ -60,23 +63,23 @@ endif
     MULTITHREAD = $(MULTITHREAD_CPP) $(MULTITHREAD_LD)
     
     VOID = /dev/null
    -ZSTREAM_TESTTIME = -T2mn
    +ZSTREAM_TESTTIME ?= -T2mn
     FUZZERTEST ?= -T5mn
     ZSTDRTTEST = --test-large-data
    -DECODECORPUS_TESTTIME = -T30
    +DECODECORPUS_TESTTIME ?= -T30
     
    -.PHONY: default all all32 dll clean test test32 test-all namespaceTest versionsTest
    +.PHONY: default all all32 allnothread dll clean test test32 test-all namespaceTest versionsTest
     
     default: fullbench
     
    -all: fullbench fuzzer zstreamtest paramgrill datagen zbufftest
    +all: fullbench fuzzer zstreamtest paramgrill datagen zbufftest decodecorpus
     
     all32: fullbench32 fuzzer32 zstreamtest32 zbufftest32
     
    +allnothread: fullbench fuzzer paramgrill datagen zbufftest decodecorpus
    +
     dll: fuzzer-dll zstreamtest-dll zbufftest-dll
     
    -
    -
     zstd:
     	$(MAKE) -C $(PRGDIR) $@
     
    @@ -154,6 +157,7 @@ zstreamtest-dll : $(ZSTDDIR)/common/xxhash.c $(PRGDIR)/datagen.c zstreamtest.c
     	$(MAKE) -C $(ZSTDDIR) libzstd
     	$(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@$(EXT)
     
    +paramgrill : DEBUGFLAG =
     paramgrill : $(ZSTD_FILES) $(PRGDIR)/datagen.c paramgrill.c
     	$(CC)      $(FLAGS) $^ -lm -o $@$(EXT)
     
    @@ -169,7 +173,7 @@ longmatch  : $(ZSTD_FILES) longmatch.c
     invalidDictionaries  : $(ZSTD_FILES) invalidDictionaries.c
     	$(CC)      $(FLAGS) $^ -o $@$(EXT)
     
    -legacy : CFLAGS+= -DZSTD_LEGACY_SUPPORT=1
    +legacy : CFLAGS+= -DZSTD_LEGACY_SUPPORT=4
     legacy : CPPFLAGS+= -I$(ZSTDDIR)/legacy
     legacy : $(ZSTD_FILES) $(wildcard $(ZSTDDIR)/legacy/*.c) legacy.c
     	$(CC)      $(FLAGS) $^ -o $@$(EXT)
    @@ -257,6 +261,8 @@ zstd-playTests: datagen
     shortest: ZSTDRTTEST=
     shortest: test-zstd
     
    +fuzztest: test-fuzzer test-zstream test-decodecorpus
    +
     test: test-zstd test-fullbench test-fuzzer test-zstream test-invalidDictionaries test-legacy test-decodecorpus
     ifeq ($(QEMU_SYS),)
     test: test-pool
    @@ -300,10 +306,10 @@ test-fullbench32: fullbench32 datagen
     	$(QEMU_SYS) ./fullbench32 -i1 -P0
     
     test-fuzzer: fuzzer
    -	$(QEMU_SYS) ./fuzzer $(FUZZERTEST)
    +	$(QEMU_SYS) ./fuzzer $(FUZZERTEST) $(FUZZER_FLAGS)
     
     test-fuzzer32: fuzzer32
    -	$(QEMU_SYS) ./fuzzer32 $(FUZZERTEST)
    +	$(QEMU_SYS) ./fuzzer32 $(FUZZERTEST) $(FUZZER_FLAGS)
     
     test-zbuff: zbufftest
     	$(QEMU_SYS) ./zbufftest $(ZSTREAM_TESTTIME)
    @@ -312,10 +318,10 @@ test-zbuff32: zbufftest32
     	$(QEMU_SYS) ./zbufftest32 $(ZSTREAM_TESTTIME)
     
     test-zstream: zstreamtest
    -	$(QEMU_SYS) ./zstreamtest $(ZSTREAM_TESTTIME)
    +	$(QEMU_SYS) ./zstreamtest $(ZSTREAM_TESTTIME) $(FUZZER_FLAGS)
     
     test-zstream32: zstreamtest32
    -	$(QEMU_SYS) ./zstreamtest32 $(ZSTREAM_TESTTIME)
    +	$(QEMU_SYS) ./zstreamtest32 $(ZSTREAM_TESTTIME) $(FUZZER_FLAGS)
     
     test-longmatch: longmatch
     	$(QEMU_SYS) ./longmatch
    diff --git a/tests/decodecorpus.c b/tests/decodecorpus.c
    index 183d20f74..f7b3c854f 100644
    --- a/tests/decodecorpus.c
    +++ b/tests/decodecorpus.c
    @@ -98,7 +98,7 @@ static void RAND_bufferMaxSymb(U32* seed, void* ptr, size_t size, int maxSymb)
         BYTE* op = ptr;
     
         for (i = 0; i < size; i++) {
    -        op[i] = RAND(seed) % (maxSymb + 1);
    +        op[i] = (BYTE) (RAND(seed) % (maxSymb + 1));
         }
     }
     
    @@ -134,8 +134,8 @@ static void RAND_genDist(U32* seed, BYTE* dist, double weight)
     {
         size_t i = 0;
         size_t statesLeft = DISTSIZE;
    -    BYTE symb = RAND(seed) % 256;
    -    BYTE step = (RAND(seed) % 256) | 1; /* force it to be odd so it's relatively prime to 256 */
    +    BYTE symb = (BYTE) (RAND(seed) % 256);
    +    BYTE step = (BYTE) ((RAND(seed) % 256) | 1); /* force it to be odd so it's relatively prime to 256 */
     
         while (i < DISTSIZE) {
             size_t states = ((size_t)(weight * statesLeft)) + 1;
    @@ -259,7 +259,7 @@ static void writeFrameHeader(U32* seed, frame_t* frame)
             /* Follow window algorithm from specification */
             int const exponent = RAND(seed) % (MAX_WINDOW_LOG - 10);
             int const mantissa = RAND(seed) % 8;
    -        windowByte = (exponent << 3) | mantissa;
    +        windowByte = (BYTE) ((exponent << 3) | mantissa);
             fh.windowSize = (1U << (exponent + 10));
             fh.windowSize += fh.windowSize / 8 * mantissa;
         }
    @@ -284,7 +284,7 @@ static void writeFrameHeader(U32* seed, frame_t* frame)
     
             if (contentSizeFlag && (fh.contentSize == 0 || !(RAND(seed) & 7))) {
                 /* do single segment sometimes */
    -            fh.windowSize = fh.contentSize;
    +            fh.windowSize = (U32) fh.contentSize;
                 singleSegment = 1;
             }
         }
    @@ -307,7 +307,7 @@ static void writeFrameHeader(U32* seed, frame_t* frame)
     
         {
             BYTE const frameHeaderDescriptor =
    -                (fcsCode << 6) | (singleSegment << 5) | (1 << 2);
    +                (BYTE) ((fcsCode << 6) | (singleSegment << 5) | (1 << 2));
             op[pos++] = frameHeaderDescriptor;
         }
     
    @@ -318,14 +318,14 @@ static void writeFrameHeader(U32* seed, frame_t* frame)
         if (contentSizeFlag) {
             switch (fcsCode) {
             default: /* Impossible */
    -        case 0: op[pos++] = fh.contentSize; break;
    -        case 1: MEM_writeLE16(op + pos, fh.contentSize - 256); pos += 2; break;
    -        case 2: MEM_writeLE32(op + pos, fh.contentSize); pos += 4; break;
    -        case 3: MEM_writeLE64(op + pos, fh.contentSize); pos += 8; break;
    +        case 0: op[pos++] = (BYTE) fh.contentSize; break;
    +        case 1: MEM_writeLE16(op + pos, (U16) (fh.contentSize - 256)); pos += 2; break;
    +        case 2: MEM_writeLE32(op + pos, (U32) fh.contentSize); pos += 4; break;
    +        case 3: MEM_writeLE64(op + pos, (U64) fh.contentSize); pos += 8; break;
             }
         }
     
    -    DISPLAYLEVEL(2, " frame content size:\t%zu\n", fh.contentSize);
    +    DISPLAYLEVEL(2, " frame content size:\t%u\n", (U32)fh.contentSize);
         DISPLAYLEVEL(2, " frame window size:\t%u\n", fh.windowSize);
         DISPLAYLEVEL(2, " content size flag:\t%d\n", contentSizeFlag);
         DISPLAYLEVEL(2, " single segment flag:\t%d\n", singleSegment);
    @@ -385,7 +385,7 @@ static size_t writeLiteralsBlockSimple(U32* seed, frame_t* frame, size_t content
             op += litSize;
         } else {
             /* RLE literals */
    -        BYTE const symb = RAND(seed) % 256;
    +        BYTE const symb = (BYTE) (RAND(seed) % 256);
     
             DISPLAYLEVEL(4, "   rle literals: 0x%02x\n", (U32)symb);
     
    @@ -542,8 +542,8 @@ static size_t writeLiteralsBlockCompressed(U32* seed, frame_t* frame, size_t con
             op += compressedSize;
     
             compressedSize += hufHeaderSize;
    -        DISPLAYLEVEL(5, "    regenerated size: %zu\n", litSize);
    -        DISPLAYLEVEL(5, "    compressed size: %zu\n", compressedSize);
    +        DISPLAYLEVEL(5, "    regenerated size: %u\n", (U32)litSize);
    +        DISPLAYLEVEL(5, "    compressed size: %u\n", (U32)compressedSize);
             if (compressedSize >= litSize) {
                 DISPLAYLEVEL(5, "     trying again\n");
                 /* if we have to try again, reset the stats so we don't accidentally
    @@ -611,7 +611,7 @@ static U32 generateSequences(U32* seed, frame_t* frame, seqStore_t* seqStore,
         size_t const remainingMatch = contentSize - literalsSize;
         size_t excessMatch = 0;
         U32 numSequences = 0;
    -  
    +
         U32 i;
     
     
    @@ -628,7 +628,7 @@ static U32 generateSequences(U32* seed, frame_t* frame, seqStore_t* seqStore,
             excessMatch = remainingMatch - numSequences * MIN_SEQ_LEN;
         }
     
    -    DISPLAYLEVEL(5, "    total match lengths: %zu\n", remainingMatch);
    +    DISPLAYLEVEL(5, "    total match lengths: %u\n", (U32)remainingMatch);
     
         for (i = 0; i < numSequences; i++) {
             /* Generate match and literal lengths by exponential distribution to
    @@ -647,10 +647,10 @@ static U32 generateSequences(U32* seed, frame_t* frame, seqStore_t* seqStore,
             U32 offset, offsetCode, repIndex;
     
             /* bounds checks */
    -        matchLen = MIN(matchLen, excessMatch + MIN_SEQ_LEN);
    -        literalLen = MIN(literalLen, literalsSize);
    +        matchLen = (U32) MIN(matchLen, excessMatch + MIN_SEQ_LEN);
    +        literalLen = MIN(literalLen, (U32) literalsSize);
             if (i == 0 && srcPtr == frame->srcStart && literalLen == 0) literalLen = 1;
    -        if (i + 1 == numSequences) matchLen = MIN_SEQ_LEN + excessMatch;
    +        if (i + 1 == numSequences) matchLen = MIN_SEQ_LEN + (U32) excessMatch;
     
             memcpy(srcPtr, literals, literalLen);
             srcPtr += literalLen;
    @@ -694,8 +694,8 @@ static U32 generateSequences(U32* seed, frame_t* frame, seqStore_t* seqStore,
             }
     
             DISPLAYLEVEL(6, "      LL: %5u OF: %5u ML: %5u", literalLen, offset, matchLen);
    -        DISPLAYLEVEL(7, " srcPos: %8tu seqNb: %3u",
    -                     (BYTE*)srcPtr - (BYTE*)frame->srcStart, i);
    +        DISPLAYLEVEL(7, " srcPos: %8u seqNb: %3u",
    +                     (U32)((BYTE*)srcPtr - (BYTE*)frame->srcStart), i);
             DISPLAYLEVEL(6, "\n");
             if (offsetCode < 3) {
                 DISPLAYLEVEL(7, "        repeat offset: %d\n", repIndex);
    @@ -711,8 +711,8 @@ static U32 generateSequences(U32* seed, frame_t* frame, seqStore_t* seqStore,
     
         memcpy(srcPtr, literals, literalsSize);
         srcPtr += literalsSize;
    -    DISPLAYLEVEL(6, "      excess literals: %5zu", literalsSize);
    -    DISPLAYLEVEL(7, " srcPos: %8tu", (BYTE*)srcPtr - (BYTE*)frame->srcStart);
    +    DISPLAYLEVEL(6, "      excess literals: %5u", (U32)literalsSize);
    +    DISPLAYLEVEL(7, " srcPos: %8u", (U32)((BYTE*)srcPtr - (BYTE*)frame->srcStart));
         DISPLAYLEVEL(6, "\n");
     
         return numSequences;
    @@ -957,11 +957,11 @@ static size_t writeCompressedBlock(U32* seed, frame_t* frame, size_t contentSize
     
         literalsSize = writeLiteralsBlock(seed, frame, contentSize);
     
    -    DISPLAYLEVEL(4, "   literals size: %zu\n", literalsSize);
    +    DISPLAYLEVEL(4, "   literals size: %u\n", (U32)literalsSize);
     
         nbSeq = writeSequencesBlock(seed, frame, contentSize, literalsSize);
     
    -    DISPLAYLEVEL(4, "   number of sequences: %zu\n", nbSeq);
    +    DISPLAYLEVEL(4, "   number of sequences: %u\n", (U32)nbSeq);
     
         return (BYTE*)frame->data - blockStart;
     }
    @@ -977,7 +977,7 @@ static void writeBlock(U32* seed, frame_t* frame, size_t contentSize,
         BYTE *op = header + 3;
     
         DISPLAYLEVEL(3, " block:\n");
    -    DISPLAYLEVEL(3, "  block content size: %zu\n", contentSize);
    +    DISPLAYLEVEL(3, "  block content size: %u\n", (U32)contentSize);
         DISPLAYLEVEL(3, "  last block: %s\n", lastBlock ? "yes" : "no");
     
         if (blockTypeDesc == 0) {
    @@ -1025,10 +1025,10 @@ static void writeBlock(U32* seed, frame_t* frame, size_t contentSize,
         frame->src = (BYTE*)frame->src + contentSize;
     
         DISPLAYLEVEL(3, "  block type: %s\n", BLOCK_TYPES[blockType]);
    -    DISPLAYLEVEL(3, "  block size field: %zu\n", blockSize);
    +    DISPLAYLEVEL(3, "  block size field: %u\n", (U32)blockSize);
     
    -    header[0] = (lastBlock | (blockType << 1) | (blockSize << 3)) & 0xff;
    -    MEM_writeLE16(header + 1, blockSize >> 5);
    +    header[0] = (BYTE) ((lastBlock | (blockType << 1) | (blockSize << 3)) & 0xff);
    +    MEM_writeLE16(header + 1, (U16) (blockSize >> 5));
     
         frame->data = op;
     }
    @@ -1300,7 +1300,7 @@ static int generateCorpus(U32 seed, unsigned numFiles, const char* const path,
     *********************************************************/
     static U32 makeSeed(void)
     {
    -    U32 t = time(NULL);
    +    U32 t = (U32) time(NULL);
         return XXH32(&t, sizeof(t), 0) % 65536;
     }
     
    diff --git a/tests/fullbench.c b/tests/fullbench.c
    index 940d315a7..13323aec1 100644
    --- a/tests/fullbench.c
    +++ b/tests/fullbench.c
    @@ -251,14 +251,14 @@ static size_t benchMem(const void* src, size_t srcSize, U32 benchNb)
         case 13:
             benchFunction = local_ZSTD_decompressContinue; benchName = "ZSTD_decompressContinue";
             break;
    -	case 31:
    +    case 31:
             benchFunction = local_ZSTD_decodeLiteralsBlock; benchName = "ZSTD_decodeLiteralsBlock";
             break;
         case 32:
             benchFunction = local_ZSTD_decodeSeqHeaders; benchName = "ZSTD_decodeSeqHeaders";
             break;
     #endif
    -	case 41:
    +    case 41:
             benchFunction = local_ZSTD_compressStream; benchName = "ZSTD_compressStream";
             break;
         case 42:
    @@ -297,10 +297,10 @@ static size_t benchMem(const void* src, size_t srcSize, U32 benchNb)
         case 31:  /* ZSTD_decodeLiteralsBlock */
             if (g_zdc==NULL) g_zdc = ZSTD_createDCtx();
             {   blockProperties_t bp;
    -            ZSTD_frameParams zfp;
    +            ZSTD_frameHeader zfp;
                 size_t frameHeaderSize, skippedSize;
                 g_cSize = ZSTD_compress(dstBuff, dstBuffSize, src, srcSize, 1);
    -            frameHeaderSize = ZSTD_getFrameParams(&zfp, dstBuff, ZSTD_frameHeaderSize_min);
    +            frameHeaderSize = ZSTD_getFrameHeader(&zfp, dstBuff, ZSTD_frameHeaderSize_min);
                 if (frameHeaderSize==0) frameHeaderSize = ZSTD_frameHeaderSize_min;
                 ZSTD_getcBlockSize(dstBuff+frameHeaderSize, dstBuffSize, &bp);  /* Get 1st block type */
                 if (bp.blockType != bt_compressed) {
    @@ -315,13 +315,13 @@ static size_t benchMem(const void* src, size_t srcSize, U32 benchNb)
         case 32:   /* ZSTD_decodeSeqHeaders */
             if (g_zdc==NULL) g_zdc = ZSTD_createDCtx();
             {   blockProperties_t bp;
    -            ZSTD_frameParams zfp;
    +            ZSTD_frameHeader zfp;
                 const BYTE* ip = dstBuff;
                 const BYTE* iend;
                 size_t frameHeaderSize, cBlockSize;
                 ZSTD_compress(dstBuff, dstBuffSize, src, srcSize, 1);   /* it would be better to use direct block compression here */
                 g_cSize = ZSTD_compress(dstBuff, dstBuffSize, src, srcSize, 1);
    -            frameHeaderSize = ZSTD_getFrameParams(&zfp, dstBuff, ZSTD_frameHeaderSize_min);
    +            frameHeaderSize = ZSTD_getFrameHeader(&zfp, dstBuff, ZSTD_frameHeaderSize_min);
                 if (frameHeaderSize==0) frameHeaderSize = ZSTD_frameHeaderSize_min;
                 ip += frameHeaderSize;   /* Skip frame Header */
                 cBlockSize = ZSTD_getcBlockSize(ip, dstBuffSize, &bp);   /* Get 1st block type */
    diff --git a/tests/fuzzer.c b/tests/fuzzer.c
    index 6fb69972a..4b9cd97a5 100644
    --- a/tests/fuzzer.c
    +++ b/tests/fuzzer.c
    @@ -28,6 +28,7 @@
     #define ZSTD_STATIC_LINKING_ONLY   /* ZSTD_compressContinue, ZSTD_compressBlock */
     #include "zstd.h"         /* ZSTD_VERSION_STRING */
     #include "zstd_errors.h"  /* ZSTD_getErrorCode */
    +#include "zstdmt_compress.h"
     #define ZDICT_STATIC_LINKING_ONLY
     #include "zdict.h"        /* ZDICT_trainFromBuffer */
     #include "datagen.h"      /* RDG_genBuffer */
    @@ -57,7 +58,7 @@ static U32 g_displayLevel = 2;
     #define DISPLAYUPDATE(l, ...) if (g_displayLevel>=l) { \
                 if ((FUZ_clockSpan(g_displayClock) > g_refreshRate) || (g_displayLevel>=4)) \
                 { g_displayClock = clock(); DISPLAY(__VA_ARGS__); \
    -            if (g_displayLevel>=4) fflush(stdout); } }
    +            if (g_displayLevel>=4) fflush(stderr); } }
     static const clock_t g_refreshRate = CLOCKS_PER_SEC / 6;
     static clock_t g_displayClock = 0;
     
    @@ -66,6 +67,7 @@ static clock_t g_displayClock = 0;
     *  Fuzzer functions
     *********************************************************/
     #define MIN(a,b) ((a)<(b)?(a):(b))
    +#define MAX(a,b) ((a)>(b)?(a):(b))
     
     static clock_t FUZ_clockSpan(clock_t cStart)
     {
    @@ -132,13 +134,21 @@ static int basicUnitTests(U32 seed, double compressibility)
             DISPLAYLEVEL(4, "OK : %s \n", errorString);
         }
     
    +
         DISPLAYLEVEL(4, "test%3i : compress %u bytes : ", testNb++, (U32)CNBuffSize);
         CHECKPLUS(r, ZSTD_compress(compressedBuffer, ZSTD_compressBound(CNBuffSize),
                                    CNBuffer, CNBuffSize, 1),
                   cSize=r );
         DISPLAYLEVEL(4, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100);
     
    -    DISPLAYLEVEL(4, "test%3i : decompressed size test : ", testNb++);
    +
    +    DISPLAYLEVEL(4, "test%3i : ZSTD_getFrameContentSize test : ", testNb++);
    +    {   unsigned long long const rSize = ZSTD_getFrameContentSize(compressedBuffer, cSize);
    +        if (rSize != CNBuffSize) goto _output_error;
    +    }
    +    DISPLAYLEVEL(4, "OK \n");
    +
    +    DISPLAYLEVEL(4, "test%3i : ZSTD_findDecompressedSize test : ", testNb++);
         {   unsigned long long const rSize = ZSTD_findDecompressedSize(compressedBuffer, cSize);
             if (rSize != CNBuffSize) goto _output_error;
         }
    @@ -156,6 +166,7 @@ static int basicUnitTests(U32 seed, double compressibility)
         }   }
         DISPLAYLEVEL(4, "OK \n");
     
    +
         DISPLAYLEVEL(4, "test%3i : decompress with null dict : ", testNb++);
         { size_t const r = ZSTD_decompress_usingDict(dctx, decodedBuffer, CNBuffSize, compressedBuffer, cSize, NULL, 0);
           if (r != CNBuffSize) goto _output_error; }
    @@ -178,6 +189,49 @@ static int basicUnitTests(U32 seed, double compressibility)
           if (ZSTD_getErrorCode(r) != ZSTD_error_srcSize_wrong) goto _output_error; }
         DISPLAYLEVEL(4, "OK \n");
     
    +
    +    /* ZSTDMT simple MT compression test */
    +    DISPLAYLEVEL(4, "test%3i : create ZSTDMT CCtx : ", testNb++);
    +    {   ZSTDMT_CCtx* mtctx = ZSTDMT_createCCtx(2);
    +        if (mtctx==NULL) {
    +            DISPLAY("mtctx : mot enough memory, aborting \n");
    +            testResult = 1;
    +            goto _end;
    +        }
    +        DISPLAYLEVEL(4, "OK \n");
    +
    +        DISPLAYLEVEL(4, "test%3i : compress %u bytes with 2 threads : ", testNb++, (U32)CNBuffSize);
    +        CHECKPLUS(r, ZSTDMT_compressCCtx(mtctx,
    +                                compressedBuffer, ZSTD_compressBound(CNBuffSize),
    +                                CNBuffer, CNBuffSize,
    +                                1),
    +                  cSize=r );
    +        DISPLAYLEVEL(4, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBuffSize*100);
    +
    +        DISPLAYLEVEL(4, "test%3i : decompressed size test : ", testNb++);
    +        {   unsigned long long const rSize = ZSTD_getFrameContentSize(compressedBuffer, cSize);
    +            if (rSize != CNBuffSize)  {
    +                DISPLAY("ZSTD_getFrameContentSize incorrect : %u != %u \n", (U32)rSize, (U32)CNBuffSize);
    +                goto _output_error;
    +        }   }
    +        DISPLAYLEVEL(4, "OK \n");
    +
    +        DISPLAYLEVEL(4, "test%3i : decompress %u bytes : ", testNb++, (U32)CNBuffSize);
    +        { size_t const r = ZSTD_decompress(decodedBuffer, CNBuffSize, compressedBuffer, cSize);
    +          if (r != CNBuffSize) goto _output_error; }
    +        DISPLAYLEVEL(4, "OK \n");
    +
    +        DISPLAYLEVEL(4, "test%3i : check decompressed result : ", testNb++);
    +        {   size_t u;
    +            for (u=0; u "); DISPLAY(__VA_ARGS__); \
                              DISPLAY(" (seed %u, test nb %u)  \n", seed, testNb); goto _output_error; }
     
    -static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxDurationS, double compressibility)
    +static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxDurationS, double compressibility, int bigTests)
     {
         static const U32 maxSrcLog = 23;
         static const U32 maxSampleLog = 22;
    @@ -635,6 +772,7 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxD
         U32 coreSeed = seed, lseed = 0;
         clock_t const startClock = clock();
         clock_t const maxClockSpan = maxDurationS * CLOCKS_PER_SEC;
    +    int const cLevelLimiter = bigTests ? 3 : 2;
     
         /* allocation */
         cNoiseBuffer[0] = (BYTE*)malloc (srcBufferSize);
    @@ -661,7 +799,6 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxD
         for ( ; (testNb <= nbTests) || (FUZ_clockSpan(startClock) < maxClockSpan); testNb++ ) {
             size_t sampleSize, maxTestSize, totalTestSize;
             size_t cSize, totalCSize, totalGenSize;
    -        XXH64_state_t xxhState;
             U64 crcOrig;
             BYTE* sampleBuffer;
             const BYTE* dict;
    @@ -700,7 +837,10 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxD
             crcOrig = XXH64(sampleBuffer, sampleSize, 0);
     
             /* compression tests */
    -        {   unsigned const cLevel = (FUZ_rand(&lseed) % (ZSTD_maxCLevel() - (FUZ_highbit32((U32)sampleSize)/3))) + 1;
    +        {   unsigned const cLevel =
    +                    ( FUZ_rand(&lseed) %
    +                     (ZSTD_maxCLevel() - (FUZ_highbit32((U32)sampleSize) / cLevelLimiter)) )
    +                     + 1;
                 cSize = ZSTD_compressCCtx(ctx, cBuffer, cBufferSize, sampleBuffer, sampleSize, cLevel);
                 CHECK(ZSTD_isError(cSize), "ZSTD_compressCCtx failed : %s", ZSTD_getErrorName(cSize));
     
    @@ -722,8 +862,8 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxD
             }
     
             /* frame header decompression test */
    -        {   ZSTD_frameParams dParams;
    -            size_t const check = ZSTD_getFrameParams(&dParams, cBuffer, cSize);
    +        {   ZSTD_frameHeader dParams;
    +            size_t const check = ZSTD_getFrameHeader(&dParams, cBuffer, cSize);
                 CHECK(ZSTD_isError(check), "Frame Parameters extraction failed");
                 CHECK(dParams.frameContentSize != sampleSize, "Frame content size incorrect");
             }
    @@ -799,11 +939,15 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxD
             /*=====   Streaming compression test, scattered segments and dictionary   =====*/
     
             {   U32 const testLog = FUZ_rand(&lseed) % maxSrcLog;
    -            int const cLevel = (FUZ_rand(&lseed) % (ZSTD_maxCLevel() - (testLog/3))) + 1;
    +            U32 const dictLog = FUZ_rand(&lseed) % maxSrcLog;
    +            int const cLevel = (FUZ_rand(&lseed) %
    +                                (ZSTD_maxCLevel() -
    +                                 (MAX(testLog, dictLog) / cLevelLimiter))) +
    +                               1;
                 maxTestSize = FUZ_rLogLength(&lseed, testLog);
                 if (maxTestSize >= dstBufferSize) maxTestSize = dstBufferSize-1;
     
    -            dictSize = FUZ_randomLength(&lseed, maxSampleLog);   /* needed also for decompression */
    +            dictSize = FUZ_rLogLength(&lseed, dictLog);   /* needed also for decompression */
                 dict = srcBuffer + (FUZ_rand(&lseed) % (srcBufferSize - dictSize));
     
                 if (FUZ_rand(&lseed) & 0xF) {
    @@ -811,22 +955,22 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, U32 const maxD
                     CHECK (ZSTD_isError(errorCode), "ZSTD_compressBegin_usingDict error : %s", ZSTD_getErrorName(errorCode));
                 } else {
                     ZSTD_compressionParameters const cPar = ZSTD_getCParams(cLevel, 0, dictSize);
    -                ZSTD_frameParameters const fpar = { FUZ_rand(&lseed)&1 /* contentSizeFlag */,
    +                ZSTD_frameParameters const fPar = { FUZ_rand(&lseed)&1 /* contentSizeFlag */,
                                                         !(FUZ_rand(&lseed)&3) /* contentChecksumFlag*/,
                                                         0 /*NodictID*/ };   /* note : since dictionary is fake, dictIDflag has no impact */
    -                ZSTD_parameters p;
    -                size_t errorCode;
    -                p.cParams = cPar; p.fParams = fpar;
    -                errorCode = ZSTD_compressBegin_advanced(refCtx, dict, dictSize, p, 0);
    +                ZSTD_parameters const p = FUZ_makeParams(cPar, fPar);
    +                size_t const errorCode = ZSTD_compressBegin_advanced(refCtx, dict, dictSize, p, 0);
                     CHECK (ZSTD_isError(errorCode), "ZSTD_compressBegin_advanced error : %s", ZSTD_getErrorName(errorCode));
                 }
                 {   size_t const errorCode = ZSTD_copyCCtx(ctx, refCtx, 0);
                     CHECK (ZSTD_isError(errorCode), "ZSTD_copyCCtx error : %s", ZSTD_getErrorName(errorCode));
             }   }
    -        XXH64_reset(&xxhState, 0);
             ZSTD_setCCtxParameter(ctx, ZSTD_p_forceWindow, FUZ_rand(&lseed) & 1);
    +
             {   U32 const nbChunks = (FUZ_rand(&lseed) & 127) + 2;
                 U32 n;
    +            XXH64_state_t xxhState;
    +            XXH64_reset(&xxhState, 0);
                 for (totalTestSize=0, cSize=0, n=0 ; n MAX_UINT */
    +static unsigned readU32FromChar(const char** stringPtr)
    +{
    +    unsigned result = 0;
    +    while ((**stringPtr >='0') && (**stringPtr <='9'))
    +        result *= 10, result += **stringPtr - '0', (*stringPtr)++ ;
    +    if ((**stringPtr=='K') || (**stringPtr=='M')) {
    +        result <<= 10;
    +        if (**stringPtr=='M') result <<= 10;
    +        (*stringPtr)++ ;
    +        if (**stringPtr=='i') (*stringPtr)++;
    +        if (**stringPtr=='B') (*stringPtr)++;
    +    }
    +    return result;
    +}
     
     int main(int argc, const char** argv)
     {
    -    U32 seed=0;
    -    int seedset=0;
    +    U32 seed = 0;
    +    int seedset = 0;
         int argNb;
         int nbTests = nbTestsDefault;
         int testNb = 0;
         U32 proba = FUZ_compressibility_default;
    -    int result=0;
    +    int result = 0;
         U32 mainPause = 0;
         U32 maxDuration = 0;
    -    const char* programName = argv[0];
    +    int bigTests = 1;
    +    const char* const programName = argv[0];
     
         /* Check command line */
         for (argNb=1; argNb='0') && (*argument<='9')) {
    -                        nbTests *= 10;
    -                        nbTests += *argument - '0';
    -                        argument++;
    -                    }
    +                    argument++; maxDuration = 0;
    +                    nbTests = readU32FromChar(&argument);
                         break;
     
                     case 'T':
                         argument++;
    -                    nbTests=0; maxDuration=0;
    -                    while ((*argument>='0') && (*argument<='9')) {
    -                        maxDuration *= 10;
    -                        maxDuration += *argument - '0';
    -                        argument++;
    -                    }
    -                    if (*argument=='m') maxDuration *=60, argument++;
    +                    nbTests = 0;
    +                    maxDuration = readU32FromChar(&argument);
    +                    if (*argument=='s') argument++;   /* seconds */
    +                    if (*argument=='m') maxDuration *= 60, argument++;   /* minutes */
                         if (*argument=='n') argument++;
                         break;
     
                     case 's':
                         argument++;
    -                    seed=0;
    -                    seedset=1;
    -                    while ((*argument>='0') && (*argument<='9')) {
    -                        seed *= 10;
    -                        seed += *argument - '0';
    -                        argument++;
    -                    }
    +                    seedset = 1;
    +                    seed = readU32FromChar(&argument);
                         break;
     
                     case 't':
                         argument++;
    -                    testNb=0;
    -                    while ((*argument>='0') && (*argument<='9')) {
    -                        testNb *= 10;
    -                        testNb += *argument - '0';
    -                        argument++;
    -                    }
    +                    testNb = readU32FromChar(&argument);
                         break;
     
                     case 'P':   /* compressibility % */
                         argument++;
    -                    proba=0;
    -                    while ((*argument>='0') && (*argument<='9')) {
    -                        proba *= 10;
    -                        proba += *argument - '0';
    -                        argument++;
    -                    }
    -                    if (proba>100) proba=100;
    +                    proba = readU32FromChar(&argument);
    +                    if (proba>100) proba = 100;
                         break;
     
                     default:
    -                    return FUZ_usage(programName);
    +                    return (FUZ_usage(programName), 1);
         }   }   }   }   /* for (argNb=1; argNb (b) ? (a) : (b) )
    +
     
     /*-************************************
     *  Benchmark Parameters
    @@ -106,7 +111,11 @@ static size_t BMK_findMaxMem(U64 requiredMem)
     }
     
     
    -#  define FUZ_rotl32(x,r) ((x << r) | (x >> (32 - r)))
    +static U32 FUZ_rotl32(U32 x, U32 r)
    +{
    +    return ((x << r) | (x >> (32 - r)));
    +}
    +
     U32 FUZ_rand(U32* src)
     {
         const U32 prime1 = 2654435761U;
    @@ -125,7 +134,7 @@ U32 FUZ_rand(U32* src)
     *********************************************************/
     typedef struct {
         size_t cSize;
    -    double cSpeed;
    +    double cSpeed;   /* bytes / sec */
         double dSpeed;
     } BMK_result_t;
     
    @@ -141,8 +150,6 @@ typedef struct
     } blockParam_t;
     
     
    -#define MIN(a,b)  ( (a) < (b) ? (a) : (b) )
    -
     static size_t BMK_benchParam(BMK_result_t* resultPtr,
                                  const void* srcBuffer, size_t srcSize,
                                  ZSTD_CCtx* ctx,
    @@ -165,6 +172,11 @@ static size_t BMK_benchParam(BMK_result_t* resultPtr,
         char name[30] = { 0 };
         U64 crcOrig;
     
    +    /* init result for early exit */
    +    resultPtr->cSize = srcSize;
    +    resultPtr->cSpeed = 0.;
    +    resultPtr->dSpeed = 0.;
    +
         /* Memory allocation & restrictions */
         snprintf(name, 30, "Sw%02uc%02uh%02us%02ul%1ut%03uS%1u", Wlog, Clog, Hlog, Slog, Slength, Tlength, strat);
         if (!compressedBuffer || !resultBuffer || !blockTable) {
    @@ -206,7 +218,6 @@ static size_t BMK_benchParam(BMK_result_t* resultPtr,
             size_t cSize = 0;
             double fastestC = 100000000., fastestD = 100000000.;
             double ratio = 0.;
    -        U64 crcCheck = 0;
             clock_t const benchStart = clock();
     
             DISPLAY("\r%79s\r", "");
    @@ -242,8 +253,8 @@ static size_t BMK_benchParam(BMK_result_t* resultPtr,
                 cSize = 0;
                 for (blockNb=0; blockNb", loopNb, name, (U32)srcSize);
                 DISPLAY(" %9u (%4.3f),%7.1f MB/s", (U32)cSize, ratio, (double)srcSize / fastestC / 1000000.);
    @@ -273,18 +284,18 @@ static size_t BMK_benchParam(BMK_result_t* resultPtr,
                 resultPtr->dSpeed = (double)srcSize / fastestD;
     
                 /* CRC Checking */
    -            crcCheck = XXH64(resultBuffer, srcSize, 0);
    -            if (crcOrig!=crcCheck) {
    -                unsigned u;
    -                unsigned eBlockSize = (unsigned)(MIN(65536*2, blockSize));
    -                DISPLAY("\n!!! WARNING !!! Invalid Checksum : %x != %x\n", (unsigned)crcOrig, (unsigned)crcCheck);
    -                for (u=0; u> 3) & PARAMTABLEMASK]
     
     
    -#define MAX(a,b)   ( (a) > (b) ? (a) : (b) )
    -
     static void playAround(FILE* f, winnerInfo_t* winners,
                            ZSTD_compressionParameters params,
                            const void* srcBuffer, size_t srcSize,
    @@ -711,6 +720,14 @@ int benchFiles(const char** fileNamesTable, int nbFiles)
     }
     
     
    +static void BMK_translateAdvancedParams(ZSTD_compressionParameters params)
    +{
    +    DISPLAY("--zstd=windowLog=%u,chainLog=%u,hashLog=%u,searchLog=%u,searchLength=%u,targetLength=%u,strategy=%u \n",
    +             params.windowLog, params.chainLog, params.hashLog, params.searchLog, params.searchLength, params.targetLength, (U32)(params.strategy));
    +}
    +
    +/* optimizeForSize():
    + * targetSpeed : expressed in MB/s */
     int optimizeForSize(const char* inFileName, U32 targetSpeed)
     {
         FILE* const inFile = fopen( inFileName, "rb" );
    @@ -723,8 +740,11 @@ int optimizeForSize(const char* inFileName, U32 targetSpeed)
     
         /* Memory allocation & restrictions */
         if ((U64)benchedSize > inFileSize) benchedSize = (size_t)inFileSize;
    -    if (benchedSize < inFileSize)
    -        DISPLAY("Not enough memory for '%s' full size; testing %i MB only...\n", inFileName, (int)(benchedSize>>20));
    +    if (benchedSize < inFileSize) {
    +        DISPLAY("Not enough memory for '%s' \n", inFileName);
    +        fclose(inFile);
    +        return 11;
    +    }
     
         /* Alloc */
         origBuff = malloc(benchedSize);
    @@ -747,10 +767,9 @@ int optimizeForSize(const char* inFileName, U32 targetSpeed)
         /* bench */
         DISPLAY("\r%79s\r", "");
         DISPLAY("optimizing for %s - limit speed %u MB/s \n", inFileName, targetSpeed);
    -    targetSpeed *= 1000;
    +    targetSpeed *= 1000000;
     
         {   ZSTD_CCtx* const ctx = ZSTD_createCCtx();
    -        ZSTD_compressionParameters params;
             winnerInfo_t winner;
             BMK_result_t candidate;
             const size_t blockSize = g_blockSize ? g_blockSize : benchedSize;
    @@ -764,26 +783,28 @@ int optimizeForSize(const char* inFileName, U32 targetSpeed)
             {   const int maxSeeds = g_noSeed ? 1 : ZSTD_maxCLevel();
                 int i;
                 for (i=1; i<=maxSeeds; i++) {
    -                params = ZSTD_getCParams(i, blockSize, 0);
    -                BMK_benchParam(&candidate, origBuff, benchedSize, ctx, params);
    +                ZSTD_compressionParameters const CParams = ZSTD_getCParams(i, blockSize, 0);
    +                BMK_benchParam(&candidate, origBuff, benchedSize, ctx, CParams);
                     if (candidate.cSpeed < targetSpeed)
                         break;
                     if ( (candidate.cSize < winner.result.cSize)
                        | ((candidate.cSize == winner.result.cSize) & (candidate.cSpeed > winner.result.cSpeed)) )
                     {
    -                    winner.params = params;
    +                    winner.params = CParams;
                         winner.result = candidate;
                         BMK_printWinner(stdout, i, winner.result, winner.params, benchedSize);
                 }   }
             }
             BMK_printWinner(stdout, 99, winner.result, winner.params, benchedSize);
    +        BMK_translateAdvancedParams(winner.params);
     
             /* start tests */
             {   time_t const grillStart = time(NULL);
                 do {
    -                params = winner.params;
    +                ZSTD_compressionParameters params = winner.params;
                     paramVariation(¶ms);
    -                if ((FUZ_rand(&g_rand) & 15) == 3) params = randomParams();
    +                if ((FUZ_rand(&g_rand) & 31) == 3) params = randomParams();  /* totally random config to improve search space */
    +                params = ZSTD_adjustCParams(params, blockSize, 0);
     
                     /* exclude faster if already played set of params */
                     if (FUZ_rand(&g_rand) & ((1 << NB_TESTS_PLAYED(params))-1)) continue;
    @@ -800,6 +821,7 @@ int optimizeForSize(const char* inFileName, U32 targetSpeed)
                         winner.params = params;
                         winner.result = candidate;
                         BMK_printWinner(stdout, 99, winner.result, winner.params, benchedSize);
    +                    BMK_translateAdvancedParams(winner.params);
                     }
                 } while (BMK_timeSpan(grillStart) < g_grillDuration_s);
             }
    @@ -833,7 +855,7 @@ static int usage_advanced(void)
         DISPLAY( " -T#    : set level 1 speed objective \n");
         DISPLAY( " -B#    : cut input into blocks of size # (default : single block) \n");
         DISPLAY( " -i#    : iteration loops [1-9](default : %i) \n", NBLOOPS);
    -    DISPLAY( " -O#    : find Optimized parameters for # target speed (default : 0) \n");
    +    DISPLAY( " -O#    : find Optimized parameters for # MB/s compression speed (default : 0) \n");
         DISPLAY( " -S     : Single run \n");
         DISPLAY( " -P#    : generated sample compressibility (default : %.1f%%) \n", COMPRESSIBILITY_DEFAULT * 100);
         return 0;
    diff --git a/tests/playTests.sh b/tests/playTests.sh
    index 653aaf3c8..e69574588 100755
    --- a/tests/playTests.sh
    +++ b/tests/playTests.sh
    @@ -11,6 +11,7 @@ roundTripTest() {
             local_p="$2"
         else
             local_c="$2"
    +        local_p=""
         fi
     
         rm -f tmp1 tmp2
    @@ -20,13 +21,36 @@ roundTripTest() {
         $DIFF -q tmp1 tmp2
     }
     
    +fileRoundTripTest() {
    +    if [ -n "$3" ]; then
    +        local_c="$3"
    +        local_p="$2"
    +    else
    +        local_c="$2"
    +        local_p=""
    +    fi
    +
    +    rm -f tmp.zstd tmp.md5.1 tmp.md5.2
    +    $ECHO "fileRoundTripTest: ./datagen $1 $local_p > tmp && $ZSTD -v$local_c -c tmp | $ZSTD -d"
    +    ./datagen $1 $local_p > tmp
    +    cat tmp | $MD5SUM > tmp.md5.1
    +    $ZSTD --ultra -v$local_c -c tmp | $ZSTD -d | $MD5SUM > tmp.md5.2
    +    $DIFF -q tmp.md5.1 tmp.md5.2
    +}
    +
    +isTerminal=false
    +if [ -t 0 ] && [ -t 1 ]
    +then
    +    isTerminal=true
    +fi
    +
     isWindows=false
    -ECHO="echo"
    +ECHO="echo -e"
     INTOVOID="/dev/null"
     case "$OS" in
       Windows*)
         isWindows=true
    -    ECHO="echo -e"
    +    INTOVOID="NUL"
         ;;
     esac
     
    @@ -42,11 +66,17 @@ case "$UNAME" in
       SunOS) DIFF="gdiff" ;;
     esac
     
    -
     $ECHO "\nStarting playTests.sh isWindows=$isWindows ZSTD='$ZSTD'"
     
     [ -n "$ZSTD" ] || die "ZSTD variable must be defined!"
     
    +if [ -n "$(echo hello | $ZSTD -v -T2 2>&1 > $INTOVOID | grep 'multi-threading is disabled')" ]
    +then
    +    hasMT=""
    +else
    +    hasMT="true"
    +fi
    +
     $ECHO "\n**** simple tests **** "
     
     ./datagen > tmp
    @@ -62,7 +92,7 @@ $ZSTD tmp --stdout > tmpCompressed       # long command format
     $ECHO "test : compress to named file"
     rm tmpCompressed
     $ZSTD tmp -o tmpCompressed
    -ls tmpCompressed   # must work
    +test -f tmpCompressed   # file must be created
     $ECHO "test : -o must be followed by filename (must fail)"
     $ZSTD tmp -of tmpCompressed && die "-o must be followed by filename "
     $ECHO "test : force write, correct order"
    @@ -72,6 +102,12 @@ cp tmp tmp2
     $ZSTD tmp2 -fo && die "-o must be followed by filename "
     $ECHO "test : implied stdout when input is stdin"
     $ECHO bob | $ZSTD | $ZSTD -d
    +if [ "$isTerminal" = true ]; then
    +$ECHO "test : compressed data to terminal"
    +$ECHO bob | $ZSTD && die "should have refused : compressed data to terminal"
    +$ECHO "test : compressed data from terminal (a hang here is a test fail, zstd is wrongly waiting on data from terminal)"
    +$ZSTD -d > $INTOVOID && die "should have refused : compressed data from terminal"
    +fi
     $ECHO "test : null-length file roundtrip"
     $ECHO -n '' | $ZSTD - --stdout | $ZSTD -d --stdout
     $ECHO "test : decompress file with wrong suffix (must fail)"
    @@ -96,23 +132,31 @@ $ZSTD -q tmp && die "overwrite check failed!"
     $ECHO "test : force overwrite"
     $ZSTD -q -f tmp
     $ZSTD -q --force tmp
    +$ECHO "test : overwrite readonly file"
    +rm -f tmpro tmpro.zst
    +$ECHO foo > tmpro.zst
    +$ECHO foo > tmpro
    +chmod 400 tmpro.zst
    +$ZSTD -q tmpro && die "should have refused to overwrite read-only file"
    +$ZSTD -q -f tmpro
    +rm -f tmpro tmpro.zst
     $ECHO "test : file removal"
     $ZSTD -f --rm tmp
    -ls tmp && die "tmp should no longer be present"
    +test ! -f tmp  # tmp should no longer be present
     $ZSTD -f -d --rm tmp.zst
    -ls tmp.zst && die "tmp.zst should no longer be present"
    +test ! -f tmp.zst   # tmp.zst should no longer be present
     $ECHO "test : --rm on stdin"
     $ECHO a | $ZSTD --rm > $INTOVOID   # --rm should remain silent
     rm tmp
     $ZSTD -f tmp && die "tmp not present : should have failed"
    -ls tmp.zst && die "tmp.zst should not be created"
    +test ! -f tmp.zst  # tmp.zst should not be created
     
     
     $ECHO "\n**** Advanced compression parameters **** "
     $ECHO "Hello world!" | $ZSTD --zstd=windowLog=21,      - -o tmp.zst && die "wrong parameters not detected!"
     $ECHO "Hello world!" | $ZSTD --zstd=windowLo=21        - -o tmp.zst && die "wrong parameters not detected!"
     $ECHO "Hello world!" | $ZSTD --zstd=windowLog=21,slog  - -o tmp.zst && die "wrong parameters not detected!"
    -ls tmp.zst && die "tmp.zst should not be created"
    +test ! -f tmp.zst  # tmp.zst should not be created
     roundTripTest -g512K
     roundTripTest -g512K " --zstd=slen=3,tlen=48,strat=6"
     roundTripTest -g512K " --zstd=strat=6,wlog=23,clog=23,hlog=22,slog=6"
    @@ -156,6 +200,20 @@ $ECHO "$ECHO foo | $ZSTD > /dev/full"
     $ECHO foo | $ZSTD > /dev/full && die "write error not detected!"
     $ECHO "$ECHO foo | $ZSTD | $ZSTD -d > /dev/full"
     $ECHO foo | $ZSTD | $ZSTD -d > /dev/full && die "write error not detected!"
    +
    +
    +$ECHO "\n**** symbolic link test **** "
    +
    +rm -f hello.tmp world.tmp hello.tmp.zst world.tmp.zst
    +$ECHO "hello world" > hello.tmp
    +ln -s hello.tmp world.tmp
    +$ZSTD world.tmp hello.tmp
    +test -f hello.tmp.zst  # regular file should have been compressed!
    +test ! -f world.tmp.zst  # symbolic link should not have been compressed!
    +$ZSTD world.tmp hello.tmp -f
    +test -f world.tmp.zst  # symbolic link should have been compressed with --force
    +rm -f hello.tmp world.tmp hello.tmp.zst world.tmp.zst
    +
     fi
     
     
    @@ -168,10 +226,10 @@ $ZSTD tmpSparse -c | $ZSTD -dv --sparse -c > tmpOutSparse
     $DIFF -s tmpSparse tmpOutSparse
     $ZSTD tmpSparse -c | $ZSTD -dv --no-sparse -c > tmpOutNoSparse
     $DIFF -s tmpSparse tmpOutNoSparse
    -ls -ls tmpSparse*
    +ls -ls tmpSparse*  # look at file size and block size on disk
     ./datagen -s1 -g1200007 -P100 | $ZSTD | $ZSTD -dv --sparse -c > tmpSparseOdd   # Odd size file (to not finish on an exact nb of blocks)
     ./datagen -s1 -g1200007 -P100 | $DIFF -s - tmpSparseOdd
    -ls -ls tmpSparseOdd
    +ls -ls tmpSparseOdd  # look at file size and block size on disk
     $ECHO "\n Sparse Compatibility with Console :"
     $ECHO "Hello World 1 !" | $ZSTD | $ZSTD -d -c
     $ECHO "Hello World 2 !" | $ZSTD | $ZSTD -d | cat
    @@ -181,7 +239,7 @@ cat tmpSparse1M tmpSparse1M > tmpSparse2M
     $ZSTD -v -f tmpSparse1M -o tmpSparseCompressed
     $ZSTD -d -v -f tmpSparseCompressed -o tmpSparseRegenerated
     $ZSTD -d -v -f tmpSparseCompressed -c >> tmpSparseRegenerated
    -ls -ls tmpSparse*
    +ls -ls tmpSparse*  # look at file size and block size on disk
     $DIFF tmpSparse2M tmpSparseRegenerated
     rm tmpSparse*
     
    @@ -200,11 +258,11 @@ $ZSTD -df *.zst
     ls -ls tmp*
     $ECHO "compress tmp* into stdout > tmpall : "
     $ZSTD -c tmp1 tmp2 tmp3 > tmpall
    -ls -ls tmp*
    +ls -ls tmp*  # check size of tmpall (should be tmp1.zst + tmp2.zst + tmp3.zst)
     $ECHO "decompress tmpall* into stdout > tmpdec : "
     cp tmpall tmpall2
     $ZSTD -dc tmpall* > tmpdec
    -ls -ls tmp*
    +ls -ls tmp* # check size of tmpdec (should be 2*(tmp1 + tmp2 + tmp3))
     $ECHO "compress multiple files including a missing one (notHere) : "
     $ZSTD -f tmp1 notHere tmp2 && die "missing file not detected!"
     
    @@ -227,12 +285,12 @@ $ECHO "- Create second (different) dictionary "
     $ZSTD --train *.c ../programs/*.c ../programs/*.h -o tmpDictC
     $ZSTD -d tmp.zst -D tmpDictC -fo result && die "wrong dictionary not detected!"
     $ECHO "- Create dictionary with short dictID"
    -$ZSTD --train *.c ../programs/*.c --dictID 1 -o tmpDict1
    +$ZSTD --train *.c ../programs/*.c --dictID=1 -o tmpDict1
     cmp tmpDict tmpDict1 && die "dictionaries should have different ID !"
     $ECHO "- Create dictionary with wrong dictID parameter order (must fail)"
     $ZSTD --train *.c ../programs/*.c --dictID -o 1 tmpDict1 && die "wrong order : --dictID must be followed by argument "
     $ECHO "- Create dictionary with size limit"
    -$ZSTD --train *.c ../programs/*.c -o tmpDict2 --maxdict 4K -v
    +$ZSTD --train *.c ../programs/*.c -o tmpDict2 --maxdict=4K -v
     $ECHO "- Create dictionary with wrong parameter order (must fail)"
     $ZSTD --train *.c ../programs/*.c -o tmpDict2 --maxdict -v 4K && die "wrong order : --maxdict must be followed by argument "
     $ECHO "- Compress without dictID"
    @@ -240,7 +298,7 @@ $ZSTD -f tmp -D tmpDict1 --no-dictID
     $ZSTD -d tmp.zst -D tmpDict -fo result
     $DIFF $TESTFILE result
     $ECHO "- Compress with wrong argument order (must fail)"
    -$ZSTD tmp -Df tmpDict1 -c > /dev/null && die "-D must be followed by dictionary name "
    +$ZSTD tmp -Df tmpDict1 -c > $INTOVOID && die "-D must be followed by dictionary name "
     $ECHO "- Compress multiple files with dictionary"
     rm -rf dirTestDict
     mkdir dirTestDict
    @@ -255,6 +313,11 @@ case "$UNAME" in
       *) $MD5SUM -c tmph1 ;;
     esac
     rm -rf dirTestDict
    +$ECHO "- dictionary builder on bogus input"
    +$ECHO "Hello World" > tmp
    +$ZSTD --train-legacy -q tmp && die "Dictionary training should fail : not enough input source"
    +./datagen -P0 -g10M > tmp
    +$ZSTD --train-legacy -q tmp && die "Dictionary training should fail : source is pure noise"
     rm tmp*
     
     
    @@ -263,19 +326,39 @@ $ECHO "\n**** cover dictionary tests **** "
     TESTFILE=../programs/zstdcli.c
     ./datagen > tmpDict
     $ECHO "- Create first dictionary"
    -$ZSTD --train --cover=k=46,d=8 *.c ../programs/*.c -o tmpDict
    +$ZSTD --train-cover=k=46,d=8 *.c ../programs/*.c -o tmpDict
     cp $TESTFILE tmp
     $ZSTD -f tmp -D tmpDict
     $ZSTD -d tmp.zst -D tmpDict -fo result
     $DIFF $TESTFILE result
     $ECHO "- Create second (different) dictionary"
    -$ZSTD --train --cover=k=56,d=8 *.c ../programs/*.c ../programs/*.h -o tmpDictC
    +$ZSTD --train-cover=k=56,d=8 *.c ../programs/*.c ../programs/*.h -o tmpDictC
     $ZSTD -d tmp.zst -D tmpDictC -fo result && die "wrong dictionary not detected!"
     $ECHO "- Create dictionary with short dictID"
    -$ZSTD --train --cover=k=46,d=8 *.c ../programs/*.c --dictID 1 -o tmpDict1
    +$ZSTD --train-cover=k=46,d=8 *.c ../programs/*.c --dictID=1 -o tmpDict1
     cmp tmpDict tmpDict1 && die "dictionaries should have different ID !"
     $ECHO "- Create dictionary with size limit"
    -$ZSTD --train --optimize-cover=steps=8 *.c ../programs/*.c -o tmpDict2 --maxdict 4K
    +$ZSTD --train-cover=steps=8 *.c ../programs/*.c -o tmpDict2 --maxdict=4K
    +rm tmp*
    +
    +$ECHO "\n**** legacy dictionary tests **** "
    +
    +TESTFILE=../programs/zstdcli.c
    +./datagen > tmpDict
    +$ECHO "- Create first dictionary"
    +$ZSTD --train-legacy=selectivity=8 *.c ../programs/*.c -o tmpDict
    +cp $TESTFILE tmp
    +$ZSTD -f tmp -D tmpDict
    +$ZSTD -d tmp.zst -D tmpDict -fo result
    +$DIFF $TESTFILE result
    +$ECHO "- Create second (different) dictionary"
    +$ZSTD --train-legacy=s=5 *.c ../programs/*.c ../programs/*.h -o tmpDictC
    +$ZSTD -d tmp.zst -D tmpDictC -fo result && die "wrong dictionary not detected!"
    +$ECHO "- Create dictionary with short dictID"
    +$ZSTD --train-legacy -s5 *.c ../programs/*.c --dictID=1 -o tmpDict1
    +cmp tmpDict tmpDict1 && die "dictionaries should have different ID !"
    +$ECHO "- Create dictionary with size limit"
    +$ZSTD --train-legacy -s9 *.c ../programs/*.c -o tmpDict2 --maxdict=4K
     rm tmp*
     
     
    @@ -297,7 +380,9 @@ $ZSTD -t tmp2.zst && die "bad file not detected !"
     $ZSTD -t tmp3 && die "bad file not detected !"   # detects 0-sized files as bad
     $ECHO "test --rm and --test combined "
     $ZSTD -t --rm tmp1.zst
    -ls -ls tmp1.zst  # check file is still present
    +test -f tmp1.zst   # check file is still present
    +split -b16384 tmp1.zst tmpSplit.
    +$ZSTD -t tmpSplit.* && die "bad file not detected !"
     
     
     $ECHO "\n**** benchmark mode tests **** "
    @@ -325,6 +410,7 @@ if [ $GZIPMODE -eq 1 ]; then
             gzip -t -v tmp.gz
             gzip -f tmp
             $ZSTD -d -f -v tmp.gz
    +        rm tmp*
         else
             $ECHO "gzip binary not detected"
         fi
    @@ -333,6 +419,98 @@ else
     fi
     
     
    +$ECHO "\n**** gzip frame tests **** "
    +
    +if [ $GZIPMODE -eq 1 ]; then
    +    ./datagen > tmp
    +    $ZSTD -f --format=gzip tmp
    +    $ZSTD -f tmp
    +    cat tmp.gz tmp.zst tmp.gz tmp.zst | $ZSTD -d -f -o tmp
    +    head -c -1 tmp.gz | $ZSTD -t > $INTOVOID && die "incomplete frame not detected !"
    +    rm tmp*
    +else
    +    $ECHO "gzip mode not supported"
    +fi
    +
    +
    +$ECHO "\n**** xz compatibility tests **** "
    +
    +LZMAMODE=1
    +$ZSTD --format=xz -V || LZMAMODE=0
    +if [ $LZMAMODE -eq 1 ]; then
    +    $ECHO "xz support detected"
    +    XZEXE=1
    +    xz -V && lzma -V || XZEXE=0
    +    if [ $XZEXE -eq 1 ]; then
    +        ./datagen > tmp
    +        $ZSTD --format=lzma -f tmp
    +        $ZSTD --format=xz -f tmp
    +        xz -t -v tmp.xz
    +        xz -t -v tmp.lzma
    +        xz -f -k tmp
    +        lzma -f -k --lzma1 tmp
    +        $ZSTD -d -f -v tmp.xz
    +        $ZSTD -d -f -v tmp.lzma
    +        rm tmp*
    +    else
    +        $ECHO "xz binary not detected"
    +    fi
    +else
    +    $ECHO "xz mode not supported"
    +fi
    +
    +
    +$ECHO "\n**** xz frame tests **** "
    +
    +if [ $LZMAMODE -eq 1 ]; then
    +    ./datagen > tmp
    +    $ZSTD -f --format=xz tmp
    +    $ZSTD -f --format=lzma tmp
    +    $ZSTD -f tmp
    +    cat tmp.xz tmp.lzma tmp.zst tmp.lzma tmp.xz tmp.zst | $ZSTD -d -f -o tmp
    +    head -c -1 tmp.xz | $ZSTD -t > $INTOVOID && die "incomplete frame not detected !"
    +    head -c -1 tmp.lzma | $ZSTD -t > $INTOVOID && die "incomplete frame not detected !"
    +    rm tmp*
    +else
    +    $ECHO "xz mode not supported"
    +fi
    +
    +$ECHO "\n**** lz4 compatibility tests **** "
    +
    +LZ4MODE=1
    +$ZSTD --format=lz4 -V || LZ4MODE=0
    +if [ $LZ4MODE -eq 1 ]; then
    +    $ECHO "lz4 support detected"
    +    LZ4EXE=1
    +    lz4 -V || LZ4EXE=0
    +    if [ $LZ4EXE -eq 1 ]; then
    +        ./datagen > tmp
    +        $ZSTD --format=lz4 -f tmp
    +        lz4 -t -v tmp.lz4
    +        lz4 -f tmp
    +        $ZSTD -d -f -v tmp.lz4
    +        rm tmp*
    +    else
    +        $ECHO "lz4 binary not detected"
    +    fi
    +else
    +    $ECHO "lz4 mode not supported"
    +fi
    +
    +
    +$ECHO "\n**** lz4 frame tests **** "
    +
    +if [ $LZ4MODE -eq 1 ]; then
    +    ./datagen > tmp
    +    $ZSTD -f --format=lz4 tmp
    +    $ZSTD -f tmp
    +    cat tmp.lz4 tmp.zst tmp.lz4 tmp.zst | $ZSTD -d -f -o tmp
    +    head -c -1 tmp.lz4 | $ZSTD -t > $INTOVOID && die "incomplete frame not detected !"
    +    rm tmp*
    +else
    +    $ECHO "lz4 mode not supported"
    +fi
    +
     $ECHO "\n**** zstd round-trip tests **** "
     
     roundTripTest
    @@ -344,6 +522,19 @@ roundTripTest -g519K 6    # greedy, hash chain
     roundTripTest -g517K 16   # btlazy2
     roundTripTest -g516K 19   # btopt
     
    +fileRoundTripTest -g500K
    +
    +if [ -n "$hasMT" ]
    +then
    +    $ECHO "\n**** zstdmt round-trip tests **** "
    +    roundTripTest -g4M "1 -T0"
    +    roundTripTest -g8M "3 -T2"
    +    roundTripTest -g8000K "2 --threads=2"
    +    fileRoundTripTest -g4M "19 -T2 -B1M"
    +else
    +    $ECHO "\n**** no multithreading, skipping zstdmt tests **** "
    +fi
    +
     rm tmp*
     
     if [ "$1" != "--test-large-data" ]; then
    @@ -379,4 +570,16 @@ roundTripTest -g50000000 -P94 19
     roundTripTest -g99000000 -P99 20
     roundTripTest -g6000000000 -P99 1
     
    +fileRoundTripTest -g4193M -P99 1
    +
    +if [ -n "$hasMT" ]
    +then
    +    $ECHO "\n**** zstdmt long round-trip tests **** "
    +    roundTripTest -g99000000 -P99 "20 -T2"
    +    roundTripTest -g6000000000 -P99 "1 -T2"
    +    fileRoundTripTest -g4193M -P98 " -T0"
    +else
    +    $ECHO "\n**** no multithreading, skipping zstdmt tests **** "
    +fi
    +
     rm tmp*
    diff --git a/tests/symbols.c b/tests/symbols.c
    index 7dacfc058..ade3aa02c 100644
    --- a/tests/symbols.c
    +++ b/tests/symbols.c
    @@ -88,7 +88,7 @@ static const void *symbols[] = {
       &ZSTD_copyCCtx,
       &ZSTD_compressContinue,
       &ZSTD_compressEnd,
    -  &ZSTD_getFrameParams,
    +  &ZSTD_getFrameHeader,
       &ZSTD_decompressBegin,
       &ZSTD_decompressBegin_usingDict,
       &ZSTD_copyDCtx,
    diff --git a/tests/test-zstd-speed.py b/tests/test-zstd-speed.py
    index 23d4f477c..56108a5ca 100755
    --- a/tests/test-zstd-speed.py
    +++ b/tests/test-zstd-speed.py
    @@ -14,14 +14,15 @@
     # - dir1/zstd and dir2/zstd will be merged in a single results file
     
     import argparse
    -import os
    +import os           # getloadavg
     import string
     import subprocess
    -import time
    +import time         # strftime
     import traceback
     import hashlib
    +import platform     # system
     
    -script_version = 'v1.1.1 (2016-10-28)'
    +script_version = 'v1.1.2 (2017-03-26)'
     default_repo_url = 'https://github.com/facebook/zstd.git'
     working_dir_name = 'speedTest'
     working_path = os.getcwd() + '/' + working_dir_name     # /path/to/zstd/tests/speedTest
    @@ -152,10 +153,15 @@ def benchmark_and_compare(branch, commit, last_commit, args, executableName, md5
                 % (os.getloadavg()[0], args.maxLoadAvg, sleepTime))
             time.sleep(sleepTime)
         start_load = str(os.getloadavg())
    -    if args.dictionary:
    -        result = execute('programs/%s -rqi5b1e%s -D %s %s' % (executableName, args.lastCLevel, args.dictionary, testFilePath), print_output=True)
    +    osType = platform.system()
    +    if osType == 'Linux':
    +        cpuSelector = "taskset --cpu-list 0"
         else:
    -        result = execute('programs/%s -rqi5b1e%s %s' % (executableName, args.lastCLevel, testFilePath), print_output=True)   
    +        cpuSelector = ""
    +    if args.dictionary:
    +        result = execute('%s programs/%s -rqi5b1e%s -D %s %s' % (cpuSelector, executableName, args.lastCLevel, args.dictionary, testFilePath), print_output=True)
    +    else:
    +        result = execute('%s programs/%s -rqi5b1e%s %s' % (cpuSelector, executableName, args.lastCLevel, testFilePath), print_output=True)
         end_load = str(os.getloadavg())
         linesExpected = args.lastCLevel + 1
         if len(result) != linesExpected:
    @@ -291,7 +297,7 @@ if __name__ == '__main__':
             log("ERROR: e-mail senders 'mail' or 'mutt' not found")
             exit(1)
     
    -    clang_version = execute("clang -v 2>&1 | grep 'clang version' | sed -e 's:.*version \\([0-9.]*\\).*:\\1:' -e 's:\\.\\([0-9][0-9]\\):\\1:g'", verbose)[0];
    +    clang_version = execute("clang -v 2>&1 | grep ' version ' | sed -e 's:.*version \\([0-9.]*\\).*:\\1:' -e 's:\\.\\([0-9][0-9]\\):\\1:g'", verbose)[0];
         gcc_version = execute("gcc -dumpversion", verbose)[0];
     
         if verbose:
    diff --git a/tests/zbufftest.c b/tests/zbufftest.c
    index 14b739233..601aa808d 100644
    --- a/tests/zbufftest.c
    +++ b/tests/zbufftest.c
    @@ -60,7 +60,7 @@ static U32 g_displayLevel = 2;
     #define DISPLAYUPDATE(l, ...) if (g_displayLevel>=l) { \
                 if ((FUZ_GetClockSpan(g_displayClock) > g_refreshRate) || (g_displayLevel>=4)) \
                 { g_displayClock = clock(); DISPLAY(__VA_ARGS__); \
    -            if (g_displayLevel>=4) fflush(stdout); } }
    +            if (g_displayLevel>=4) fflush(stderr); } }
     static const clock_t g_refreshRate = CLOCKS_PER_SEC * 15 / 100;
     static clock_t g_displayClock = 0;
     
    diff --git a/tests/zstreamtest.c b/tests/zstreamtest.c
    index 54b890266..5c166bfb8 100644
    --- a/tests/zstreamtest.c
    +++ b/tests/zstreamtest.c
    @@ -53,13 +53,15 @@ static const U32 prime32 = 2654435761U;
     *  Display Macros
     **************************************/
     #define DISPLAY(...)          fprintf(stderr, __VA_ARGS__)
    -#define DISPLAYLEVEL(l, ...)  if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); }
    +#define DISPLAYLEVEL(l, ...)  if (g_displayLevel>=l) {                     \
    +                                  DISPLAY(__VA_ARGS__);                    \
    +                                  if (g_displayLevel>=4) fflush(stderr); }
     static U32 g_displayLevel = 2;
     
     #define DISPLAYUPDATE(l, ...) if (g_displayLevel>=l) { \
                 if ((FUZ_GetClockSpan(g_displayClock) > g_refreshRate) || (g_displayLevel>=4)) \
                 { g_displayClock = clock(); DISPLAY(__VA_ARGS__); \
    -            if (g_displayLevel>=4) fflush(stdout); } }
    +              if (g_displayLevel>=4) fflush(stderr); } }
     static const clock_t g_refreshRate = CLOCKS_PER_SEC / 6;
     static clock_t g_displayClock = 0;
     
    @@ -131,7 +133,7 @@ static buffer_t FUZ_createDictionary(const void* src, size_t srcSize, size_t blo
         }
         {   size_t const dictSize = ZDICT_trainFromBuffer(dict.start, requestedDictSize, src, blockSizes, (unsigned)nbBlocks);
             free(blockSizes);
    -        if (ZDICT_isError(dictSize)) { free(dict.start); return (buffer_t){ NULL, 0, 0 }; }
    +        if (ZDICT_isError(dictSize)) { free(dict.start); return g_nullBuffer; }
             dict.size = requestedDictSize;
             dict.filled = dictSize;
             return dict;   /* how to return dictSize ? */
    @@ -155,12 +157,13 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
         void* decodedBuffer = malloc(decodedBufferSize);
         size_t cSize;
         int testResult = 0;
    -    U32 testNb=0;
    +    U32 testNb = 1;
         ZSTD_CStream* zc = ZSTD_createCStream_advanced(customMem);
         ZSTD_DStream* zd = ZSTD_createDStream_advanced(customMem);
         ZSTD_inBuffer  inBuff, inBuff2;
         ZSTD_outBuffer outBuff;
         buffer_t dictionary = g_nullBuffer;
    +    size_t const dictSize = 128 KB;
         unsigned dictID = 0;
     
         /* Create compressible test buffer */
    @@ -186,7 +189,7 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
     
         /* Basic compression test */
         DISPLAYLEVEL(3, "test%3i : compress %u bytes : ", testNb++, COMPRESSIBLE_NOISE_LENGTH);
    -    ZSTD_initCStream_usingDict(zc, CNBuffer, 128 KB, 1);
    +    ZSTD_initCStream_usingDict(zc, CNBuffer, dictSize, 1);
         outBuff.dst = (char*)(compressedBuffer)+cSize;
         outBuff.size = compressedBufferSize;
         outBuff.pos = 0;
    @@ -201,15 +204,34 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
         cSize += outBuff.pos;
         DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/COMPRESSIBLE_NOISE_LENGTH*100);
     
    -    DISPLAYLEVEL(3, "test%3i : check CStream size : ", testNb++);
    +    /* context size functions */
    +    DISPLAYLEVEL(3, "test%3i : estimate CStream size : ", testNb++);
    +    {   ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBufferSize, dictSize);
    +        size_t const s = ZSTD_estimateCStreamSize(cParams)
    +                       + ZSTD_estimateCDictSize(cParams, dictSize);  /* uses ZSTD_initCStream_usingDict() */
    +            if (ZSTD_isError(s)) goto _output_error;
    +            DISPLAYLEVEL(3, "OK (%u bytes) \n", (U32)s);
    +    }
    +
    +    DISPLAYLEVEL(3, "test%3i : check actual CStream size : ", testNb++);
         { size_t const s = ZSTD_sizeof_CStream(zc);
           if (ZSTD_isError(s)) goto _output_error;
           DISPLAYLEVEL(3, "OK (%u bytes) \n", (U32)s);
         }
     
    +    /* Attempt bad compression parameters */
    +    DISPLAYLEVEL(3, "test%3i : use bad compression parameters : ", testNb++);
    +    {   size_t r;
    +        ZSTD_parameters params = ZSTD_getParams(1, 0, 0);
    +        params.cParams.searchLength = 2;
    +        r = ZSTD_initCStream_advanced(zc, NULL, 0, params, 0);
    +        if (!ZSTD_isError(r)) goto _output_error;
    +        DISPLAYLEVEL(3, "init error : %s \n", ZSTD_getErrorName(r));
    +    }
    +
         /* skippable frame test */
         DISPLAYLEVEL(3, "test%3i : decompress skippable frame : ", testNb++);
    -    ZSTD_initDStream_usingDict(zd, CNBuffer, 128 KB);
    +    ZSTD_initDStream_usingDict(zd, CNBuffer, dictSize);
         inBuff.src = compressedBuffer;
         inBuff.size = cSize;
         inBuff.pos = 0;
    @@ -224,7 +246,7 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
         /* Basic decompression test */
         inBuff2 = inBuff;
         DISPLAYLEVEL(3, "test%3i : decompress %u bytes : ", testNb++, COMPRESSIBLE_NOISE_LENGTH);
    -    ZSTD_initDStream_usingDict(zd, CNBuffer, 128 KB);
    +    ZSTD_initDStream_usingDict(zd, CNBuffer, dictSize);
         { size_t const r = ZSTD_setDStreamParameter(zd, DStream_p_maxWindowSize, 1000000000);  /* large limit */
           if (ZSTD_isError(r)) goto _output_error; }
         { size_t const remaining = ZSTD_decompressStream(zd, &outBuff, &inBuff);
    @@ -250,7 +272,20 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
         }   }
         DISPLAYLEVEL(3, "OK \n");
     
    -    DISPLAYLEVEL(3, "test%3i : check DStream size : ", testNb++);
    +    /* context size functions */
    +    DISPLAYLEVEL(3, "test%3i : estimate DStream size : ", testNb++);
    +    {   ZSTD_frameHeader fhi;
    +        const void* cStart = (char*)compressedBuffer + (skippableFrameSize + 8);
    +        size_t const gfhError = ZSTD_getFrameHeader(&fhi, cStart, cSize);
    +        if (gfhError!=0) goto _output_error;
    +        DISPLAYLEVEL(5, " (windowSize : %u) ", fhi.windowSize);
    +        {   size_t const s = ZSTD_estimateDStreamSize(fhi)
    +                           + ZSTD_estimateDDictSize(dictSize);  /* uses ZSTD_initDStream_usingDict() */
    +            if (ZSTD_isError(s)) goto _output_error;
    +            DISPLAYLEVEL(3, "OK (%u bytes) \n", (U32)s);
    +    }   }
    +
    +    DISPLAYLEVEL(3, "test%3i : check actual DStream size : ", testNb++);
         { size_t const s = ZSTD_sizeof_DStream(zd);
           if (ZSTD_isError(s)) goto _output_error;
           DISPLAYLEVEL(3, "OK (%u bytes) \n", (U32)s);
    @@ -260,7 +295,7 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
         DISPLAYLEVEL(3, "test%3i : decompress byte-by-byte : ", testNb++);
         {   /* skippable frame */
             size_t r = 1;
    -        ZSTD_initDStream_usingDict(zd, CNBuffer, 128 KB);
    +        ZSTD_initDStream_usingDict(zd, CNBuffer, dictSize);
             inBuff.src = compressedBuffer;
             outBuff.dst = decodedBuffer;
             inBuff.pos = 0;
    @@ -272,7 +307,7 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
                 if (ZSTD_isError(r)) goto _output_error;
             }
             /* normal frame */
    -        ZSTD_initDStream_usingDict(zd, CNBuffer, 128 KB);
    +        ZSTD_initDStream_usingDict(zd, CNBuffer, dictSize);
             r=1;
             while (r) {
                 inBuff.size = inBuff.pos + 1;
    @@ -334,6 +369,7 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
         if (zc==NULL) goto _output_error;   /* memory allocation issue */
         /* use 1 */
         {   size_t const inSize = 513;
    +        DISPLAYLEVEL(5, "use1 ");
             ZSTD_initCStream_advanced(zc, NULL, 0, ZSTD_getParams(19, inSize, 0), inSize);   /* needs btopt + search3 to trigger hashLog3 */
             inBuff.src = CNBuffer;
             inBuff.size = inSize;
    @@ -341,14 +377,17 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
             outBuff.dst = (char*)(compressedBuffer)+cSize;
             outBuff.size = ZSTD_compressBound(inSize);
             outBuff.pos = 0;
    +        DISPLAYLEVEL(5, "compress1 ");
             { size_t const r = ZSTD_compressStream(zc, &outBuff, &inBuff);
                 if (ZSTD_isError(r)) goto _output_error; }
             if (inBuff.pos != inBuff.size) goto _output_error;   /* entire input should be consumed */
    +        DISPLAYLEVEL(5, "end1 ");
             { size_t const r = ZSTD_endStream(zc, &outBuff);
                 if (r != 0) goto _output_error; }  /* error, or some data not flushed */
         }
         /* use 2 */
         {   size_t const inSize = 1025;   /* will not continue, because tables auto-adjust and are therefore different size */
    +        DISPLAYLEVEL(5, "use2 ");
             ZSTD_initCStream_advanced(zc, NULL, 0, ZSTD_getParams(19, inSize, 0), inSize);   /* needs btopt + search3 to trigger hashLog3 */
             inBuff.src = CNBuffer;
             inBuff.size = inSize;
    @@ -356,9 +395,11 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
             outBuff.dst = (char*)(compressedBuffer)+cSize;
             outBuff.size = ZSTD_compressBound(inSize);
             outBuff.pos = 0;
    +        DISPLAYLEVEL(5, "compress2 ");
             { size_t const r = ZSTD_compressStream(zc, &outBuff, &inBuff);
                 if (ZSTD_isError(r)) goto _output_error; }
             if (inBuff.pos != inBuff.size) goto _output_error;   /* entire input should be consumed */
    +        DISPLAYLEVEL(5, "end2 ");
             { size_t const r = ZSTD_endStream(zc, &outBuff);
                 if (r != 0) goto _output_error; }  /* error, or some data not flushed */
         }
    @@ -425,7 +466,7 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
     
         /* Memory restriction */
         DISPLAYLEVEL(3, "test%3i : maxWindowSize < frame requirement : ", testNb++);
    -    ZSTD_initDStream_usingDict(zd, CNBuffer, 128 KB);
    +    ZSTD_initDStream_usingDict(zd, CNBuffer, dictSize);
         { size_t const r = ZSTD_setDStreamParameter(zd, DStream_p_maxWindowSize, 1000);  /* too small limit */
           if (ZSTD_isError(r)) goto _output_error; }
         inBuff.src = compressedBuffer;
    @@ -438,11 +479,64 @@ static int basicUnitTests(U32 seed, double compressibility, ZSTD_customMem custo
           if (!ZSTD_isError(r)) goto _output_error;  /* must fail : frame requires > 100 bytes */
           DISPLAYLEVEL(3, "OK (%s)\n", ZSTD_getErrorName(r)); }
     
    -    /* Unknown srcSize */
    +    DISPLAYLEVEL(3, "test%3i : ZSTD_initCStream_usingCDict_advanced with masked dictID : ", testNb++);
    +    {   ZSTD_compressionParameters const cParams = ZSTD_getCParams(1, CNBufferSize, dictionary.filled);
    +        ZSTD_frameParameters const fParams = { 1 /* contentSize */, 1 /* checksum */, 1 /* noDictID */};
    +        ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictionary.start, dictionary.filled, 1 /* byReference */, cParams, customMem);
    +        size_t const initError = ZSTD_initCStream_usingCDict_advanced(zc, cdict, CNBufferSize, fParams);
    +        if (ZSTD_isError(initError)) goto _output_error;
    +        cSize = 0;
    +        outBuff.dst = compressedBuffer;
    +        outBuff.size = compressedBufferSize;
    +        outBuff.pos = 0;
    +        inBuff.src = CNBuffer;
    +        inBuff.size = CNBufferSize;
    +        inBuff.pos = 0;
    +        { size_t const r = ZSTD_compressStream(zc, &outBuff, &inBuff);
    +          if (ZSTD_isError(r)) goto _output_error; }
    +        if (inBuff.pos != inBuff.size) goto _output_error;   /* entire input should be consumed */
    +        { size_t const r = ZSTD_endStream(zc, &outBuff);
    +          if (r != 0) goto _output_error; }  /* error, or some data not flushed */
    +        cSize = outBuff.pos;
    +        ZSTD_freeCDict(cdict);
    +        DISPLAYLEVEL(3, "OK (%u bytes : %.2f%%)\n", (U32)cSize, (double)cSize/CNBufferSize*100);
    +    }
    +
    +    DISPLAYLEVEL(3, "test%3i : try retrieving dictID from frame : ", testNb++);
    +    {   U32 const did = ZSTD_getDictID_fromFrame(compressedBuffer, cSize);
    +        if (did != 0) goto _output_error;
    +    }
    +    DISPLAYLEVEL(3, "OK (not detected) \n");
    +
    +    DISPLAYLEVEL(3, "test%3i : decompress without dictionary : ", testNb++);
    +    {   size_t const r = ZSTD_decompress(decodedBuffer, CNBufferSize, compressedBuffer, cSize);
    +        if (!ZSTD_isError(r)) goto _output_error;  /* must fail : dictionary not used */
    +        DISPLAYLEVEL(3, "OK (%s)\n", ZSTD_getErrorName(r));
    +    }
    +
    +    /* Empty srcSize */
    +    DISPLAYLEVEL(3, "test%3i : ZSTD_initCStream_advanced with pledgedSrcSize=0 and dict : ", testNb++);
    +    {   ZSTD_parameters params = ZSTD_getParams(5, 0, 0);
    +        params.fParams.contentSizeFlag = 1;
    +        ZSTD_initCStream_advanced(zc, dictionary.start, dictionary.filled, params, 0);
    +    } /* cstream advanced shall write content size = 0 */
    +    inBuff.src = CNBuffer;
    +    inBuff.size = 0;
    +    inBuff.pos = 0;
    +    outBuff.dst = compressedBuffer;
    +    outBuff.size = compressedBufferSize;
    +    outBuff.pos = 0;
    +    if (ZSTD_isError(ZSTD_compressStream(zc, &outBuff, &inBuff))) goto _output_error;
    +    if (ZSTD_endStream(zc, &outBuff) != 0) goto _output_error;
    +    cSize = outBuff.pos;
    +    if (ZSTD_findDecompressedSize(compressedBuffer, cSize) != 0) goto _output_error;
    +    DISPLAYLEVEL(3, "OK \n");
    +
         DISPLAYLEVEL(3, "test%3i : pledgedSrcSize == 0 behaves properly : ", testNb++);
         {   ZSTD_parameters params = ZSTD_getParams(5, 0, 0);
             params.fParams.contentSizeFlag = 1;
    -        ZSTD_initCStream_advanced(zc, NULL, 0, params, 0); } /* cstream advanced should write the 0 size field */
    +        ZSTD_initCStream_advanced(zc, NULL, 0, params, 0);
    +    } /* cstream advanced shall write content size = 0 */
         inBuff.src = CNBuffer;
         inBuff.size = 0;
         inBuff.pos = 0;
    @@ -552,7 +646,7 @@ static size_t FUZ_randomLength(U32* seed, U32 maxLog)
     #define CHECK(cond, ...) if (cond) { DISPLAY("Error => "); DISPLAY(__VA_ARGS__); \
                              DISPLAY(" (seed %u, test nb %u)  \n", seed, testNb); goto _output_error; }
     
    -static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, double compressibility)
    +static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, double compressibility, int bigTests)
     {
         static const U32 maxSrcLog = 24;
         static const U32 maxSampleLog = 19;
    @@ -574,6 +668,7 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, double compres
         const BYTE* dict=NULL;   /* can keep same dict on 2 consecutive tests */
         size_t dictSize = 0;
         U32 oldTestLog = 0;
    +    int const cLevelLimiter = bigTests ? 3 : 2;
     
         /* allocations */
         cNoiseBuffer[0] = (BYTE*)malloc (srcBufferSize);
    @@ -638,18 +733,23 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, double compres
             if ((FUZ_rand(&lseed)&1) /* at beginning, to keep same nb of rand */
                 && oldTestLog /* at least one test happened */ && resetAllowed) {
                 maxTestSize = FUZ_randomLength(&lseed, oldTestLog+2);
    -            if (maxTestSize >= srcBufferSize) maxTestSize = srcBufferSize-1;
    +            if (maxTestSize >= srcBufferSize)
    +                maxTestSize = srcBufferSize-1;
                 {   U64 const pledgedSrcSize = (FUZ_rand(&lseed) & 3) ? 0 : maxTestSize;
                     size_t const resetError = ZSTD_resetCStream(zc, pledgedSrcSize);
                     CHECK(ZSTD_isError(resetError), "ZSTD_resetCStream error : %s", ZSTD_getErrorName(resetError));
                 }
             } else {
                 U32 const testLog = FUZ_rand(&lseed) % maxSrcLog;
    -            U32 const cLevel = (FUZ_rand(&lseed) % (ZSTD_maxCLevel() - (testLog/3))) + 1;
    +            U32 const dictLog = FUZ_rand(&lseed) % maxSrcLog;
    +            U32 const cLevel = ( FUZ_rand(&lseed) %
    +                                (ZSTD_maxCLevel() -
    +                                (MAX(testLog, dictLog) / cLevelLimiter)))
    +                                 + 1;
                 maxTestSize = FUZ_rLogLength(&lseed, testLog);
                 oldTestLog = testLog;
                 /* random dictionary selection */
    -            dictSize  = ((FUZ_rand(&lseed)&63)==1) ? FUZ_randomLength(&lseed, maxSampleLog) : 0;
    +            dictSize  = ((FUZ_rand(&lseed)&1)==1) ? FUZ_rLogLength(&lseed, dictLog) : 0;
                 {   size_t const dictStart = FUZ_rand(&lseed) % (srcBufferSize - dictSize);
                     dict = srcBuffer + dictStart;
                 }
    @@ -701,7 +801,9 @@ static int fuzzerTests(U32 seed, U32 nbTests, unsigned startTest, double compres
                         outBuff.size = outBuff.pos + adjustedDstSize;
                         remainingToFlush = ZSTD_endStream(zc, &outBuff);
                         CHECK (ZSTD_isError(remainingToFlush), "flush error : %s", ZSTD_getErrorName(remainingToFlush));
    -                    CHECK (enoughDstSize && remainingToFlush, "ZSTD_endStream() not fully flushed (%u remaining), but enough space available", (U32)remainingToFlush);
    +                    CHECK (enoughDstSize && remainingToFlush,
    +                           "ZSTD_endStream() not fully flushed (%u remaining), but enough space available (%u)",
    +                           (U32)remainingToFlush, (U32)adjustedDstSize);
                 }   }
                 crcOrig = XXH64_digest(&xxhState);
                 cSize = outBuff.pos;
    @@ -784,7 +886,7 @@ _output_error:
     
     
     /* Multi-threading version of fuzzer Tests */
    -static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest, double compressibility)
    +static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest, double compressibility, int bigTests)
     {
         static const U32 maxSrcLog = 24;
         static const U32 maxSampleLog = 19;
    @@ -806,6 +908,7 @@ static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest, double comp
         const BYTE* dict=NULL;   /* can keep same dict on 2 consecutive tests */
         size_t dictSize = 0;
         U32 oldTestLog = 0;
    +    int const cLevelLimiter = bigTests ? 3 : 2;
     
         /* allocations */
         cNoiseBuffer[0] = (BYTE*)malloc (srcBufferSize);
    @@ -850,6 +953,7 @@ static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest, double comp
             /* some issues can only happen when reusing states */
             if ((FUZ_rand(&lseed) & 0xFF) == 131) {
                 U32 const nbThreads = (FUZ_rand(&lseed) % 6) + 1;
    +            DISPLAYLEVEL(5, "Creating new context with %u threads \n", nbThreads);
                 ZSTDMT_freeCCtx(zc);
                 zc = ZSTDMT_createCCtx(nbThreads);
                 resetAllowed=0;
    @@ -886,19 +990,26 @@ static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest, double comp
                 }
             } else {
                 U32 const testLog = FUZ_rand(&lseed) % maxSrcLog;
    -            U32 const cLevel = (FUZ_rand(&lseed) % (ZSTD_maxCLevel() - (testLog/3))) + 1;
    +            U32 const dictLog = FUZ_rand(&lseed) % maxSrcLog;
    +            U32 const cLevel = (FUZ_rand(&lseed) %
    +                                (ZSTD_maxCLevel() -
    +                                 (MAX(testLog, dictLog) / cLevelLimiter))) +
    +                               1;
                 maxTestSize = FUZ_rLogLength(&lseed, testLog);
                 oldTestLog = testLog;
                 /* random dictionary selection */
    -            dictSize  = ((FUZ_rand(&lseed)&63)==1) ? FUZ_randomLength(&lseed, maxSampleLog) : 0;
    +            dictSize  = ((FUZ_rand(&lseed)&63)==1) ? FUZ_rLogLength(&lseed, dictLog) : 0;
                 {   size_t const dictStart = FUZ_rand(&lseed) % (srcBufferSize - dictSize);
                     dict = srcBuffer + dictStart;
                 }
                 {   U64 const pledgedSrcSize = (FUZ_rand(&lseed) & 3) ? 0 : maxTestSize;
                     ZSTD_parameters params = ZSTD_getParams(cLevel, pledgedSrcSize, dictSize);
    -                DISPLAYLEVEL(5, "Init with windowLog = %u \n", params.cParams.windowLog);
    +                DISPLAYLEVEL(5, "Init with windowLog = %u and pledgedSrcSize = %u \n",
    +                    params.cParams.windowLog, (U32)pledgedSrcSize);
                     params.fParams.checksumFlag = FUZ_rand(&lseed) & 1;
                     params.fParams.noDictIDFlag = FUZ_rand(&lseed) & 1;
    +                params.fParams.contentSizeFlag = pledgedSrcSize>0;
    +                DISPLAYLEVEL(5, "checksumFlag : %u \n", params.fParams.checksumFlag);
                     { size_t const initError = ZSTDMT_initCStream_advanced(zc, dict, dictSize, params, pledgedSrcSize);
                       CHECK (ZSTD_isError(initError),"ZSTDMT_initCStream_advanced error : %s", ZSTD_getErrorName(initError)); }
                     ZSTDMT_setMTCtxParameter(zc, ZSTDMT_p_overlapSectionLog, FUZ_rand(&lseed) % 12);
    @@ -936,7 +1047,7 @@ static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest, double comp
                         outBuff.size = outBuff.pos + adjustedDstSize;
                         DISPLAYLEVEL(5, "Flushing into dst buffer of size %u \n", (U32)adjustedDstSize);
                         {   size_t const flushError = ZSTDMT_flushStream(zc, &outBuff);
    -                        CHECK (ZSTD_isError(flushError), "flush error : %s", ZSTD_getErrorName(flushError));
    +                        CHECK (ZSTD_isError(flushError), "ZSTDMT_flushStream error : %s", ZSTD_getErrorName(flushError));
                 }   }   }
     
                 /* final frame epilogue */
    @@ -947,12 +1058,12 @@ static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest, double comp
                         outBuff.size = outBuff.pos + adjustedDstSize;
                         DISPLAYLEVEL(5, "Ending into dst buffer of size %u \n", (U32)adjustedDstSize);
                         remainingToFlush = ZSTDMT_endStream(zc, &outBuff);
    -                    CHECK (ZSTD_isError(remainingToFlush), "flush error : %s", ZSTD_getErrorName(remainingToFlush));
    +                    CHECK (ZSTD_isError(remainingToFlush), "ZSTDMT_endStream error : %s", ZSTD_getErrorName(remainingToFlush));
                         DISPLAYLEVEL(5, "endStream : remainingToFlush : %u \n", (U32)remainingToFlush);
                 }   }
    -            DISPLAYLEVEL(5, "Frame completed \n");
                 crcOrig = XXH64_digest(&xxhState);
                 cSize = outBuff.pos;
    +            DISPLAYLEVEL(5, "Frame completed : %u bytes \n", (U32)cSize);
             }
     
             /* multi - fragments decompression test */
    @@ -970,8 +1081,10 @@ static int fuzzerTests_MT(U32 seed, U32 nbTests, unsigned startTest, double comp
                     size_t const dstBuffSize = MIN(dstBufferSize - totalGenSize, randomDstSize);
                     inBuff.size = inBuff.pos + readCSrcSize;
                     outBuff.size = inBuff.pos + dstBuffSize;
    +                DISPLAYLEVEL(5, "ZSTD_decompressStream input %u bytes \n", (U32)readCSrcSize);
                     decompressionResult = ZSTD_decompressStream(zd, &outBuff, &inBuff);
                     CHECK (ZSTD_isError(decompressionResult), "decompression error : %s", ZSTD_getErrorName(decompressionResult));
    +                DISPLAYLEVEL(5, "inBuff.pos = %u \n", (U32)readCSrcSize);
                 }
                 CHECK (outBuff.pos != totalTestSize, "decompressed data : wrong size (%u != %u)", (U32)outBuff.pos, (U32)totalTestSize);
                 CHECK (inBuff.pos != cSize, "compressed data should be fully read (%u != %u)", (U32)inBuff.pos, (U32)cSize);
    @@ -1061,6 +1174,7 @@ int main(int argc, const char** argv)
         int result=0;
         int mainPause = 0;
         int mtOnly = 0;
    +    int bigTests = 1;
         const char* const programName = argv[0];
         ZSTD_customMem const customMem = { allocFunction, freeFunction, NULL };
         ZSTD_customMem const customNULL = { NULL, NULL, NULL };
    @@ -1074,6 +1188,7 @@ int main(int argc, const char** argv)
             if (argument[0]=='-') {
     
                 if (!strcmp(argument, "--mt")) { mtOnly=1; continue; }
    +            if (!strcmp(argument, "--no-big-tests")) { bigTests=0; continue; }
     
                 argument++;
                 while (*argument!=0) {
    @@ -1179,8 +1294,8 @@ int main(int argc, const char** argv)
                 result = basicUnitTests(0, ((double)proba) / 100, customMem);  /* use custom memory allocation functions */
         }   }
     
    -    if (!result && !mtOnly) result = fuzzerTests(seed, nbTests, testNb, ((double)proba) / 100);
    -    if (!result) result = fuzzerTests_MT(seed, nbTests, testNb, ((double)proba) / 100);
    +    if (!result && !mtOnly) result = fuzzerTests(seed, nbTests, testNb, ((double)proba) / 100, bigTests);
    +    if (!result) result = fuzzerTests_MT(seed, nbTests, testNb, ((double)proba) / 100, bigTests);
     
         if (mainPause) {
             int unused;
    diff --git a/zlibWrapper/examples/zwrapbench.c b/zlibWrapper/examples/zwrapbench.c
    index 23c3ca4da..a57ed51ec 100644
    --- a/zlibWrapper/examples/zwrapbench.c
    +++ b/zlibWrapper/examples/zwrapbench.c
    @@ -73,13 +73,13 @@ static U32 g_compressibilityDefault = 50;
     #define DEFAULT_DISPLAY_LEVEL 2
     #define DISPLAY(...)         fprintf(displayOut, __VA_ARGS__)
     #define DISPLAYLEVEL(l, ...) if (g_displayLevel>=l) { DISPLAY(__VA_ARGS__); }
    -static U32 g_displayLevel = DEFAULT_DISPLAY_LEVEL;   /* 0 : no display;   1: errors;   2 : + result + interaction + warnings;   3 : + progression;   4 : + information */
    +static int g_displayLevel = DEFAULT_DISPLAY_LEVEL;   /* 0 : no display;   1: errors;   2 : + result + interaction + warnings;   3 : + progression;   4 : + information */
     static FILE* displayOut;
     
     #define DISPLAYUPDATE(l, ...) if (g_displayLevel>=l) { \
                 if ((clock() - g_time > refreshRate) || (g_displayLevel>=4)) \
                 { g_time = clock(); DISPLAY(__VA_ARGS__); \
    -            if (g_displayLevel>=4) fflush(stdout); } }
    +            if (g_displayLevel>=4) fflush(displayOut); } }
     static const clock_t refreshRate = CLOCKS_PER_SEC * 15 / 100;
     static clock_t g_time = 0;
     
    @@ -128,6 +128,11 @@ void BMK_SetBlockSize(size_t blockSize)
     /* ********************************************************
     *  Bench functions
     **********************************************************/
    +#undef MIN
    +#undef MAX
    +#define MIN(a,b) ((a)<(b) ? (a) : (b))
    +#define MAX(a,b) ((a)>(b) ? (a) : (b))
    +
     typedef struct
     {
         z_const char* srcPtr;
    @@ -142,9 +147,6 @@ typedef struct
     typedef enum { BMK_ZSTD, BMK_ZSTD_STREAM, BMK_ZLIB, BMK_ZWRAP_ZLIB, BMK_ZWRAP_ZSTD, BMK_ZLIB_REUSE, BMK_ZWRAP_ZLIB_REUSE, BMK_ZWRAP_ZSTD_REUSE } BMK_compressor;
     
     
    -#define MIN(a,b) ((a)<(b) ? (a) : (b))
    -#define MAX(a,b) ((a)>(b) ? (a) : (b))
    -
     static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
                             const char* displayName, int cLevel,
                             const size_t* fileSizes, U32 nbFiles,
    @@ -234,7 +236,7 @@ static int BMK_benchMem(z_const void* srcBuffer, size_t srcSize,
                     if (compressor == BMK_ZSTD) {
                         ZSTD_parameters const zparams = ZSTD_getParams(cLevel, avgSize, dictBufferSize);
                         ZSTD_customMem const cmem = { NULL, NULL, NULL };
    -                    ZSTD_CDict* cdict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, 1, zparams, cmem);
    +                    ZSTD_CDict* const cdict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, 1, zparams.cParams, cmem);
                         if (cdict==NULL) EXM_THROW(1, "ZSTD_createCDict_advanced() allocation failure");
     
                         do {
    @@ -982,7 +984,7 @@ int main(int argCount, char** argv)
     
     #ifdef UTIL_HAS_CREATEFILELIST
         if (recursive) {
    -        fileNamesTable = UTIL_createFileList(filenameTable, filenameIdx, &fileNamesBuf, &fileNamesNb);
    +        fileNamesTable = UTIL_createFileList(filenameTable, filenameIdx, &fileNamesBuf, &fileNamesNb, 1);
             if (fileNamesTable) {
                 unsigned u;
                 for (u=0; u