1
0
mirror of https://github.com/facebook/zstd.git synced 2025-07-30 22:23:13 +03:00

Merge remote-tracking branch 'upstream/dev' into btopt

* upstream/dev: (305 commits)
  added test for ZSTD_estimateCStreamSize()
  changed variable name, for clarity
  fixed ZSTD_estimateCStreamSize()
  shortened ZSTD_createCStream_Advanced()
  fixed symbols test
  added ZSTD_estimateDStreamSize()
  changed name frameParams into frameHeader
  regroup memory usage function declarations
  separated ZSTD_estimateCStreamSize() from ZSTD_estimateCCtxSize()
  bumped version number
  added ZSTD_estimateCDictSize() and ZSTD_estimateDDictSize()
  Updated ZSTD_freeCCtx()
  updated ZSTD_estimateCCtxSize()
  Updated ZSTD_sizeof_CCtx()
  merged CCtx and CStream as a single same object
  cli : -d and -t do not stop after a failed decompression
  added dev branch CircleCI badge
  added dev branch Appveyor badge
  keep dev branch status only
  creates a binary archive without the `programs` directory
  ...
This commit is contained in:
Nick Terrell
2017-05-10 16:49:58 -07:00
149 changed files with 22690 additions and 2821 deletions

View File

@ -1,7 +1,7 @@
[cxx]
cppflags = -DXXH_NAMESPACE=ZSTD_ -DZSTD_LEGACY_SUPPORT=1
cppflags = -DXXH_NAMESPACE=ZSTD_ -DZSTD_LEGACY_SUPPORT=4
cflags = -Wall -Wextra -Wcast-qual -Wcast-align -Wshadow -Wstrict-aliasing=1 -Wswitch-enum -Wdeclaration-after-statement -Wstrict-prototypes -Wundef -Wpointer-arith
cxxppflags = -DXXH_NAMESPACE=ZSTD_ -DZSTD_LEGACY_SUPPORT=1
cxxppflags = -DXXH_NAMESPACE=ZSTD_ -DZSTD_LEGACY_SUPPORT=4
cxxflags = -std=c++11 -Wno-deprecated-declarations
gtest_dep = //contrib/pzstd:gtest

11
.gitignore vendored
View File

@ -24,6 +24,12 @@ zstdmt
tmp*
dictionary*
# Build artefacts
projects/
bin/
.buckd/
buck-out/
# Other files
.directory
_codelite/
@ -34,8 +40,3 @@ _zstdbench/
.DS_Store
googletest/
*.d
# Directories
bin/
.buckd/
buck-out/

View File

@ -1,39 +1,39 @@
# Medium Tests: Run on all commits/PRs to dev branch
language: c
sudo: required
dist: trusty
matrix:
fast_finish: true
include:
# Ubuntu 14.04
- env: Cmd="make libc6install && make -C tests test32"
- env: Cmd='make valgrindinstall arminstall ppcinstall arm-ppc-compilation && make clean lib && CFLAGS="-O1 -g" make -C zlibWrapper valgrindTest && make -C tests valgrindTest'
- env: Cmd='make gcc6install && CC=gcc-6 make clean uasan-test-zstd'
- env: Cmd='make gcc6install libc6install && CC=gcc-6 make clean uasan-test-zstd32'
- env: Cmd='make clang38install && CC=clang-3.8 make clean msan-test-zstd'
- env: Cmd='CC=gcc-6 make gcc6install uasan-test'
- env: Cmd='CC=gcc-6 make gcc6install uasan-test32'
- env: Cmd="make arminstall armtest && make clean && make aarch64test"
- env: Cmd='make ppcinstall ppctest && make clean && make ppc64test'
- env: Cmd='make gpp6install zlibwrapper && make -C tests clean test-zstd-nolegacy && make -C tests versionsTest && make clean && cd contrib/pzstd && make test-pzstd && make test-pzstd32 && make test-pzstd-tsan && make test-pzstd-asan'
install:
- export CXX="g++-6" CC="gcc-6"
- env: Cmd='make gcc6install && CC=gcc-6 make clean uasan-fuzztest'
- env: Cmd='make gcc6install libc6install && CC=gcc-6 CFLAGS=-m32 make clean uasan-fuzztest'
- env: Cmd='make clang38install && CC=clang-3.8 make clean msan-fuzztest'
- env: Cmd='make clang38install && CC=clang-3.8 make clean tsan-test-zstream'
# OS X Mavericks
- env: Cmd="make gnu90build && make clean && make test && make clean && make travis-install"
os: osx
- env: Cmd='make valgrindinstall && make -C tests clean valgrindTest'
- env: Cmd='make arminstall && make armfuzz'
- env: Cmd='make arminstall && make aarch64fuzz'
- env: Cmd='make ppcinstall && make ppcfuzz'
- env: Cmd='make ppcinstall && make ppc64fuzz'
git:
depth: 1
branches:
only:
- dev
- master
script:
- JOB_NUMBER=$(echo $TRAVIS_JOB_NUMBER | sed -e 's:[0-9][0-9]*\.\(.*\):\1:')
# cron & master => full tests, as this is the final step towards a Release
# pull requests => normal tests (job numbers 1-3)
# other feature branches => short tests (job numbers 1-2)
- echo JOB_NUMBER=$JOB_NUMBER TRAVIS_BRANCH=$TRAVIS_BRANCH TRAVIS_EVENT_TYPE=$TRAVIS_EVENT_TYPE TRAVIS_PULL_REQUEST=$TRAVIS_PULL_REQUEST
- if [ "$TRAVIS_EVENT_TYPE" = "cron" ] || [ "$TRAVIS_BRANCH" = "master" ]; then
FUZZERTEST=-T7mn sh -c "$Cmd" || travis_terminate 1;
else
if [ "$TRAVIS_EVENT_TYPE" = "pull_request" ] && [ $JOB_NUMBER -lt 4 ]; then
sh -c "$Cmd" || travis_terminate 1;
else
if [ $JOB_NUMBER -lt 3 ]; then
sh -c "$Cmd" || travis_terminate 1;
fi
fi
fi
- export FUZZERTEST=-T2mn;
export ZSTREAM_TESTTIME=-T2mn;
export DECODECORPUS_TESTTIME=-T1mn;
sh -c "$Cmd" || travis_terminate 1;

View File

@ -90,6 +90,10 @@ examples:
manual:
$(MAKE) -C contrib/gen_html $@
.PHONY: cleanTabs
cleanTabs:
cd contrib; ./cleanTabs
.PHONY: clean
clean:
@$(MAKE) -C $(ZSTDDIR) $@ > $(VOID)
@ -105,9 +109,15 @@ clean:
# make install is validated only for Linux, OSX, Hurd and some BSD targets
#------------------------------------------------------------------------------
ifneq (,$(filter $(shell uname),Linux Darwin GNU/kFreeBSD GNU FreeBSD DragonFly NetBSD))
HOST_OS = POSIX
.PHONY: install uninstall travis-install clangtest gpptest armtest usan asan uasan
HOST_OS = POSIX
CMAKE_PARAMS = -DZSTD_BUILD_CONTRIB:BOOL=ON -DZSTD_BUILD_STATIC:BOOL=ON -DZSTD_BUILD_TESTS:BOOL=ON -DZSTD_ZLIB_SUPPORT:BOOL=ON -DZSTD_LZMA_SUPPORT:BOOL=ON
.PHONY: list
list:
@$(MAKE) -pRrq -f $(lastword $(MAKEFILE_LIST)) : 2>/dev/null | awk -v RS= -F: '/^# File/,/^# Finished Make data base/ {if ($$1 !~ "^[#.]") {print $$1}}' | sort | egrep -v -e '^[^[:alnum:]]' -e '^$@$$' | xargs
.PHONY: install uninstall travis-install clangtest gpptest armtest usan asan uasan
install:
@$(MAKE) -C $(ZSTDDIR) $@
@$(MAKE) -C $(PRGDIR) $@
@ -151,6 +161,18 @@ ppcbuild: clean
ppc64build: clean
CC=powerpc-linux-gnu-gcc CFLAGS="-m64 -Werror" $(MAKE) allarch
armfuzz: clean
CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static MOREFLAGS="-static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest
aarch64fuzz: clean
CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static MOREFLAGS="-static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest
ppcfuzz: clean
CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static MOREFLAGS="-static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest
ppc64fuzz: clean
CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static MOREFLAGS="-m64 -static" FUZZER_FLAGS=--no-big-tests $(MAKE) -C $(TESTDIR) fuzztest
gpptest: clean
CC=g++ $(MAKE) -C $(PRGDIR) all CFLAGS="-O3 -Wall -Wextra -Wundef -Wshadow -Wcast-align -Werror"
@ -168,19 +190,19 @@ clangtest: clean
armtest: clean
$(MAKE) -C $(TESTDIR) datagen # use native, faster
$(MAKE) -C $(TESTDIR) test CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static ZSTDRTTEST= MOREFLAGS="-Werror -static"
$(MAKE) -C $(TESTDIR) test CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static ZSTDRTTEST= MOREFLAGS="-Werror -static" FUZZER_FLAGS=--no-big-tests
aarch64test:
$(MAKE) -C $(TESTDIR) datagen # use native, faster
$(MAKE) -C $(TESTDIR) test CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static ZSTDRTTEST= MOREFLAGS="-Werror -static"
$(MAKE) -C $(TESTDIR) test CC=aarch64-linux-gnu-gcc QEMU_SYS=qemu-aarch64-static ZSTDRTTEST= MOREFLAGS="-Werror -static" FUZZER_FLAGS=--no-big-tests
ppctest: clean
$(MAKE) -C $(TESTDIR) datagen # use native, faster
$(MAKE) -C $(TESTDIR) test CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static ZSTDRTTEST= MOREFLAGS="-Werror -Wno-attributes -static"
$(MAKE) -C $(TESTDIR) test CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static ZSTDRTTEST= MOREFLAGS="-Werror -Wno-attributes -static" FUZZER_FLAGS=--no-big-tests
ppc64test: clean
$(MAKE) -C $(TESTDIR) datagen # use native, faster
$(MAKE) -C $(TESTDIR) test CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static ZSTDRTTEST= MOREFLAGS="-m64 -static"
$(MAKE) -C $(TESTDIR) test CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static ZSTDRTTEST= MOREFLAGS="-m64 -static" FUZZER_FLAGS=--no-big-tests
arm-ppc-compilation:
$(MAKE) -C $(PRGDIR) clean zstd CC=arm-linux-gnueabi-gcc QEMU_SYS=qemu-arm-static ZSTDRTTEST= MOREFLAGS="-Werror -static"
@ -188,24 +210,36 @@ arm-ppc-compilation:
$(MAKE) -C $(PRGDIR) clean zstd CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc-static ZSTDRTTEST= MOREFLAGS="-Werror -Wno-attributes -static"
$(MAKE) -C $(PRGDIR) clean zstd CC=powerpc-linux-gnu-gcc QEMU_SYS=qemu-ppc64-static ZSTDRTTEST= MOREFLAGS="-m64 -static"
# run UBsan with -fsanitize-recover=signed-integer-overflow
# due to a bug in UBsan when doing pointer subtraction
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63303
usan: clean
$(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=undefined"
$(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=signed-integer-overflow -fsanitize=undefined"
asan: clean
$(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=address"
asan-%: clean
LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=address" $(MAKE) -C $(TESTDIR) $*
msan: clean
$(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=memory -fno-omit-frame-pointer" # datagen.c fails this test for no obvious reason
msan-%: clean
LDFLAGS=-fuse-ld=gold MOREFLAGS="-fno-sanitize-recover=all -fsanitize=memory -fno-omit-frame-pointer" $(MAKE) -C $(TESTDIR) $*
asan32: clean
$(MAKE) -C $(TESTDIR) test32 CC=clang MOREFLAGS="-g -fsanitize=address"
uasan: clean
$(MAKE) test CC=clang MOREFLAGS="-g -fsanitize=address -fsanitize=undefined"
$(MAKE) test CC=clang MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize-recover=signed-integer-overflow -fsanitize=address,undefined"
uasan-%: clean
LDFLAGS=-fuse-ld=gold CFLAGS="-Og -fsanitize=address -fsanitize=undefined" $(MAKE) -C $(TESTDIR) $*
LDFLAGS=-fuse-ld=gold MOREFLAGS="-Og -fno-sanitize-recover=all -fsanitize-recover=signed-integer-overflow -fsanitize=address,undefined" $(MAKE) -C $(TESTDIR) $*
tsan-%: clean
LDFLAGS=-fuse-ld=gold MOREFLAGS="-g -fno-sanitize-recover=all -fsanitize=thread" $(MAKE) -C $(TESTDIR) $*
apt-install:
sudo apt-get -yq --no-install-suggests --no-install-recommends --force-yes install $(APT_PACKAGES)
@ -217,7 +251,7 @@ ppcinstall:
APT_PACKAGES="qemu-system-ppc qemu-user-static gcc-powerpc-linux-gnu" $(MAKE) apt-install
arminstall:
APT_PACKAGES="qemu-system-arm qemu-user-static gcc-powerpc-linux-gnu gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross" $(MAKE) apt-install
APT_PACKAGES="qemu-system-arm qemu-user-static gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross" $(MAKE) apt-install
valgrindinstall:
APT_PACKAGES="valgrind" $(MAKE) apt-install
@ -231,12 +265,15 @@ gcc6install: apt-add-repo
gpp6install: apt-add-repo
APT_PACKAGES="libc6-dev-i386 g++-multilib gcc-6 g++-6 g++-6-multilib" $(MAKE) apt-install
clang38install:
APT_PACKAGES="clang-3.8" $(MAKE) apt-install
endif
ifneq (,$(filter MSYS%,$(shell uname)))
HOST_OS = MSYS
CMAKE_PARAMS = -G"MSYS Makefiles"
CMAKE_PARAMS = -G"MSYS Makefiles" -DZSTD_MULTITHREAD_SUPPORT:BOOL=OFF -DZSTD_BUILD_STATIC:BOOL=ON -DZSTD_BUILD_TESTS:BOOL=ON
endif
@ -248,7 +285,7 @@ cmakebuild:
cmake --version
$(RM) -r $(BUILDIR)/cmake/build
mkdir $(BUILDIR)/cmake/build
cd $(BUILDIR)/cmake/build ; cmake -DPREFIX:STRING=~/install_test_dir $(CMAKE_PARAMS) .. ; $(MAKE) install ; $(MAKE) uninstall
cd $(BUILDIR)/cmake/build ; cmake -DCMAKE_INSTALL_PREFIX:PATH=~/install_test_dir $(CMAKE_PARAMS) .. ; $(MAKE) install ; $(MAKE) uninstall
c90build: clean
gcc -v

34
NEWS
View File

@ -1,15 +1,39 @@
v1.2.0
cli : changed : Multithreading enabled by default (use target zstd-nomt or HAVE_THREAD=0 to disable)
cli : new : command -T0 means "detect and use nb of cores", by Sean Purcell
cli : new : zstdmt symlink hardwired to `zstd -T0`
cli : new : command --threads=# (#671)
cli : changed : cover dictionary builder by default, for improved quality, by Nick Terrell
cli : new : commands --train-cover and --train-legacy, to select dictionary algorithm and parameters
cli : experimental targets `zstd4` and `xzstd4`, with support for lz4 format, by Sean Purcell
cli : fix : does not output compressed data on console
cli : fix : ignore symbolic links unless --force specified,
API : breaking change : ZSTD_createCDict_advanced(), only use compressionParameters as argument
API : added : prototypes ZSTD_*_usingCDict_advanced(), for direct control over frameParameters.
API : improved: ZSTDMT_compressCCtx() reduced memory usage
API : fix : ZSTDMT_compressCCtx() now provides srcSize in header (#634)
API : fix : src size stored in frame header is controlled at end of frame
API : fix : enforced consistent rules for pledgedSrcSize==0 (#641)
API : fix : error code "GENERIC" replaced by "dstSizeTooSmall" when appropriate
build: improved cmake script, by @Majlen
build: enabled Multi-threading support for *BSD, by Baptiste Daroussin
tools: updated Paramgrill. Command -O# provides best parameters for sample and speed target.
new : contrib/linux-kernel version, by Nick Terrell
v1.1.4
cli : new : can compress in *.gz format, using --format=gzip command, by Przemyslaw Skibinski
cli : new : advanced benchmark command --priority=rt
cli : fix : write on sparse-enabled file systems in 32-bits mode, by @ds77
cli : fix : --rm remains silent when input is stdin
cli : experimental : xzstd, with support for xz/lzma decoding, by Przemyslaw Skibinski
speed : improved decompression speed in streaming mode for single shot scenarios (+5%)
memory : DDict (decompression dictionary) memory usage down from 150 KB to 20 KB
arch : 32-bits variant able to generate and decode very long matches (>32 MB), by Sean Purcell
memory: DDict (decompression dictionary) memory usage down from 150 KB to 20 KB
arch: 32-bits variant able to generate and decode very long matches (>32 MB), by Sean Purcell
API : new : ZSTD_findFrameCompressedSize(), ZSTD_getFrameContentSize(), ZSTD_findDecompressedSize()
build: new: meson build system in contrib/meson, by Dima Krasner
build: improved cmake script, by @Majlen
build: added -Wformat-security flag, as recommended by Padraig Brady
API : changed : dropped support of legacy versions <= v0.3 (can be changed by modifying ZSTD_LEGACY_SUPPORT value)
build : new: meson build system in contrib/meson, by Dima Krasner
build : improved cmake script, by @Majlen
build : added -Wformat-security flag, as recommended by Padraig Brady
doc : new : educational decoder, by Sean Purcell
v1.1.3

View File

@ -6,19 +6,26 @@ and a command line utility producing and decoding `.zst` and `.gz` files.
For other programming languages,
you can consult a list of known ports on [Zstandard homepage](http://www.zstd.net/#other-languages).
|Branch |Status |
|------------|---------|
|master | [![Build Status](https://travis-ci.org/facebook/zstd.svg?branch=master)](https://travis-ci.org/facebook/zstd) |
|dev | [![Build Status](https://travis-ci.org/facebook/zstd.svg?branch=dev)](https://travis-ci.org/facebook/zstd) |
| dev branch status |
|-------------------|
| [![Build Status][travisDevBadge]][travisLink] [![Build status][AppveyorDevBadge]][AppveyorLink] [![Build status][CircleDevBadge]][CircleLink]
[travisDevBadge]: https://travis-ci.org/facebook/zstd.svg?branch=dev "Continuous Integration test suite"
[travisLink]: https://travis-ci.org/facebook/zstd
[AppveyorDevBadge]: https://ci.appveyor.com/api/projects/status/xt38wbdxjk5mrbem/branch/dev?svg=true "Windows test suite"
[AppveyorLink]: https://ci.appveyor.com/project/YannCollet/zstd-p0yf0
[CircleDevBadge]: https://circleci.com/gh/facebook/zstd/tree/dev.svg?style=shield "Short test suite"
[CircleLink]: https://circleci.com/gh/facebook/zstd
As a reference, several fast compression algorithms were tested and compared
on a server running Linux Mint Debian Edition (`Linux version 4.8.0-1-amd64`),
on a server running Linux Debian (`Linux version 4.8.0-1-amd64`),
with a Core i7-6700K CPU @ 4.0GHz,
using [lzbench v1.6], an open-source in-memory benchmark by @inikep
using [lzbench], an open-source in-memory benchmark by @inikep
compiled with GCC 6.3.0,
on the [Silesia compression corpus].
[lzbench v1.6]: https://github.com/inikep/lzbench
[lzbench]: https://github.com/inikep/lzbench
[Silesia compression corpus]: http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia
| Compressor name | Ratio | Compression| Decompress.|
@ -38,7 +45,12 @@ on the [Silesia compression corpus].
Zstd can also offer stronger compression ratios at the cost of compression speed.
Speed vs Compression trade-off is configurable by small increments. Decompression speed is preserved and remains roughly the same at all settings, a property shared by most LZ compression algorithms, such as [zlib] or lzma.
The following tests were run on a Core i7-3930K CPU @ 4.5GHz, using [lzbench], an open-source in-memory benchmark by @inikep compiled with GCC 5.2.1, on the [Silesia compression corpus].
The following tests were run
on a server running Linux Debian (`Linux version 4.8.0-1-amd64`)
with a Core i7-6700K CPU @ 4.0GHz,
using [lzbench], an open-source in-memory benchmark by @inikep
compiled with GCC 6.3.0,
on the [Silesia compression corpus].
Compression Speed vs Ratio | Decompression Speed
---------------------------|--------------------

44
TESTING.md Normal file
View File

@ -0,0 +1,44 @@
Testing
=======
Zstandard CI testing is split up into three sections:
short, medium, and long tests.
Short Tests
-----------
Short tests run on CircleCI for new commits on every branch and pull request.
They consist of the following tests:
- Compilation on all supported targets (x86, x86_64, ARM, AArch64, PowerPC, and PowerPC64)
- Compilation on various versions of gcc, clang, and g++
- `tests/playTests.sh` on x86_64, without the tests on long data (CLI tests)
- Small tests (`tests/legacy.c`, `tests/longmatch.c`, `tests/symbols.c`) on x64_64
Medium Tests
------------
Medium tests run on every commit and pull request to `dev` branch, on TravisCI.
They consist of the following tests:
- The following tests run with UBsan and Asan on x86_64 and x86, as well as with
Msan on x86_64
- `tests/playTests.sh --test-long-data`
- Fuzzer tests: `tests/fuzzer.c`, `tests/zstreamtest.c`, and `tests/decodecorpus.c`
- `tests/zstreamtest.c` under Tsan (streaming mode, including multithreaded mode)
- Valgrind Test (`make -C tests valgrindTest`) (testing CLI and fuzzer under valgrind)
- Fuzzer tests (see above) on ARM, AArch64, PowerPC, and PowerPC64
Long Tests
----------
Long tests run on all commits to `master` branch,
and once a day on the current version of `dev` branch,
on TravisCI.
They consist of the following tests:
- Entire test suite (including fuzzers and some other specialized tests) on:
- x86_64 and x86 with UBsan and Asan
- x86_64 with Msan
- ARM, AArch64, PowerPC, and PowerPC64
- Streaming mode fuzzer with Tsan (for the `zstdmt` testing)
- ZlibWrapper tests, including under valgrind
- Versions test (ensuring `zstd` can decode files from all previous versions)
- `pzstd` with asan and tsan, as well as in 32-bits mode
- Testing `zstd` with legacy mode off
- Testing `zbuff` (old streaming API)
- Entire test suite and make install on OS X

View File

@ -1,66 +1,101 @@
version: 1.0.{build}
environment:
matrix:
- COMPILER: "gcc"
PLATFORM: "mingw64"
MAKE_PARAMS: '"make test && make lib && make -C tests test-symbols fullbench-dll fullbench-lib"'
- COMPILER: "gcc"
PLATFORM: "mingw32"
MAKE_PARAMS: '"make -C tests test-zstd test-fullbench test-fuzzer test-invalidDictionaries"'
- COMPILER: "gcc"
PLATFORM: "clang"
MAKE_PARAMS: '"make -C tests zstd fullbench fuzzer paramgrill datagen CC=clang MOREFLAGS="--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion""'
- COMPILER: "visual"
CONFIGURATION: "Debug"
PLATFORM: "x64"
- COMPILER: "visual"
CONFIGURATION: "Debug"
PLATFORM: "Win32"
- COMPILER: "visual"
CONFIGURATION: "Release"
PLATFORM: "x64"
- COMPILER: "visual"
CONFIGURATION: "Release"
PLATFORM: "Win32"
-
version: 1.0.{build}
branches:
only:
- dev
- master
environment:
matrix:
- COMPILER: "gcc"
HOST: "mingw"
PLATFORM: "x64"
SCRIPT: "make allarch && make -C tests test-symbols fullbench-dll fullbench-lib"
ARTIFACT: "true"
BUILD: "true"
- COMPILER: "gcc"
HOST: "mingw"
PLATFORM: "x86"
SCRIPT: "make allarch"
ARTIFACT: "true"
BUILD: "true"
- COMPILER: "clang"
HOST: "mingw"
PLATFORM: "x64"
SCRIPT: "MOREFLAGS='--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion' make allarch"
BUILD: "true"
install:
- COMPILER: "gcc"
HOST: "mingw"
PLATFORM: "x64"
SCRIPT: ""
TEST: "cmake"
- COMPILER: "gcc"
HOST: "mingw"
PLATFORM: "x64"
SCRIPT: ""
TEST: "pzstd"
- COMPILER: "visual"
HOST: "visual"
PLATFORM: "x64"
CONFIGURATION: "Debug"
- COMPILER: "visual"
HOST: "visual"
PLATFORM: "Win32"
CONFIGURATION: "Debug"
- COMPILER: "visual"
HOST: "visual"
PLATFORM: "x64"
CONFIGURATION: "Release"
- COMPILER: "visual"
HOST: "visual"
PLATFORM: "Win32"
CONFIGURATION: "Release"
install:
- ECHO Installing %COMPILER% %PLATFORM% %CONFIGURATION%
- MKDIR bin
- if [%COMPILER%]==[gcc] SET PATH_ORIGINAL=%PATH%
- if [%COMPILER%]==[gcc] (
SET "PATH_MINGW32=c:\MinGW\bin;c:\MinGW\usr\bin" &&
SET "PATH_MINGW64=c:\msys64\mingw64\bin;c:\msys64\usr\bin" &&
COPY C:\msys64\usr\bin\make.exe C:\MinGW\bin\make.exe &&
COPY C:\MinGW\bin\gcc.exe C:\MinGW\bin\cc.exe
) else (
IF [%PLATFORM%]==[x64] (SET ADDITIONALPARAM=/p:LibraryPath="C:\Program Files\Microsoft SDKs\Windows\v7.1\lib\x64;c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\lib\amd64;C:\Program Files (x86)\Microsoft Visual Studio 10.0\;C:\Program Files (x86)\Microsoft Visual Studio 10.0\lib\amd64;")
- SET PATH_ORIGINAL=%PATH%
- if [%HOST%]==[mingw] (
SET "PATH_MINGW32=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin" &&
SET "PATH_MINGW64=C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin" &&
COPY C:\msys64\usr\bin\make.exe C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin\make.exe &&
COPY C:\msys64\usr\bin\make.exe C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin\make.exe
)
- IF [%HOST%]==[visual] IF [%PLATFORM%]==[x64] (
SET ADDITIONALPARAM=/p:LibraryPath="C:\Program Files\Microsoft SDKs\Windows\v7.1\lib\x64;c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\lib\amd64;C:\Program Files (x86)\Microsoft Visual Studio 10.0\;C:\Program Files (x86)\Microsoft Visual Studio 10.0\lib\amd64;"
)
build_script:
- ECHO Building %COMPILER% %PLATFORM% %CONFIGURATION%
- if [%PLATFORM%]==[mingw32] SET PATH=%PATH_MINGW32%;%PATH_ORIGINAL%
- if [%PLATFORM%]==[mingw64] SET PATH=%PATH_MINGW64%;%PATH_ORIGINAL%
- if [%PLATFORM%]==[clang] SET PATH=%PATH_MINGW64%;%PATH_ORIGINAL%
- if [%COMPILER%]==[gcc] (
ECHO *** &&
ECHO *** Building %PLATFORM% &&
ECHO *** &&
build_script:
- if [%HOST%]==[mingw] (
( if [%PLATFORM%]==[x64] (
SET "PATH=%PATH_MINGW64%;%PATH_ORIGINAL%"
) else if [%PLATFORM%]==[x86] (
SET "PATH=%PATH_MINGW32%;%PATH_ORIGINAL%"
) )
)
- if [%HOST%]==[mingw] if [%BUILD%]==[true] (
make -v &&
cc -v &&
ECHO %MAKE_PARAMS% &&
sh -c %MAKE_PARAMS%
sh -c "%COMPILER% -v" &&
ECHO Building zlib to static link &&
SET "CC=%COMPILER%" &&
sh -c "cd .. && git clone --depth 1 --branch v1.2.11 https://github.com/madler/zlib" &&
sh -c "cd ../zlib && make -f win32/Makefile.gcc libz.a"
ECHO Building zstd &&
SET "CPPFLAGS=-I../../zlib" &&
SET "LDFLAGS=../../zlib/libz.a" &&
sh -c "%SCRIPT%" &&
( if [%COMPILER%]==[gcc] if [%ARTIFACT%]==[true]
lib\dll\example\build_package.bat &&
make -C programs DEBUGFLAGS= clean zstd &&
cd programs\ && 7z a -tzip -mx9 zstd-win-binary-%PLATFORM%.zip zstd.exe &&
appveyor PushArtifact zstd-win-binary-%PLATFORM%.zip &&
cp zstd.exe ..\bin\zstd.exe &&
cd ..\bin\ && 7z a -tzip -mx9 zstd-win-release-%PLATFORM%.zip * &&
appveyor PushArtifact zstd-win-release-%PLATFORM%.zip
)
)
- if [%PLATFORM%]==[clang] COPY tests\fuzzer.exe tests\fuzzer_clang.exe
- if [%COMPILER%]==[gcc] if [%PLATFORM%]==[mingw64] (
COPY programs\zstd.exe bin\zstd.exe &&
appveyor PushArtifact bin\zstd.exe
)
- if [%COMPILER%]==[gcc] if [%PLATFORM%]==[mingw32] (
COPY programs\zstd.exe bin\zstd32.exe &&
appveyor PushArtifact bin\zstd32.exe
)
- if [%COMPILER%]==[gcc] make clean
- if [%COMPILER%]==[visual] (
- if [%HOST%]==[visual] (
ECHO *** &&
ECHO *** Building Visual Studio 2008 %PLATFORM%\%CONFIGURATION% in %APPVEYOR_BUILD_FOLDER% &&
ECHO *** &&
@ -111,29 +146,26 @@ build_script:
COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe tests\
)
test_script:
test_script:
- ECHO Testing %COMPILER% %PLATFORM% %CONFIGURATION%
- SET FUZZERTEST=-T1mn
- if [%COMPILER%]==[gcc] if [%PLATFORM%]==[clang] (
tests\fuzzer_clang.exe %FUZZERTEST% &&
ECHO *** &&
ECHO *** Building cmake for %PLATFORM% &&
ECHO *** &&
- SET "CC=gcc"
- SET "CXX=g++"
- if [%TEST%]==[cmake] (
mkdir build\cmake\build &&
cd build\cmake\build &&
cmake -G "Visual Studio 14 2015 Win64" .. &&
cd ..\..\.. &&
make clean &&
ECHO *** &&
ECHO *** Building pzstd for %PLATFORM% &&
ECHO *** &&
make clean
)
- if [%TEST%]==[pzstd] (
make -C contrib\pzstd googletest-mingw64 &&
make -C contrib\pzstd pzstd.exe &&
make -C contrib\pzstd tests &&
make -C contrib\pzstd check &&
make -C contrib\pzstd clean
)
- if [%COMPILER%]==[visual] if [%CONFIGURATION%]==[Release] (
- SET "FUZZERTEST=-T30s"
- if [%HOST%]==[visual] if [%CONFIGURATION%]==[Release] (
CD tests &&
SET ZSTD=./zstd.exe &&
sh -e playTests.sh --test-large-data &&
@ -146,28 +178,76 @@ test_script:
fuzzer_VS2015_%PLATFORM%_Release.exe %FUZZERTEST%
)
artifacts:
- path: bin\zstd.exe
- path: bin\zstd32.exe
-
version: 1.0.{build}
environment:
matrix:
- COMPILER: "gcc"
HOST: "mingw"
PLATFORM: "x64"
SCRIPT: "make allarch"
- COMPILER: "gcc"
HOST: "mingw"
PLATFORM: "x86"
SCRIPT: "make allarch"
- COMPILER: "clang"
HOST: "mingw"
PLATFORM: "x64"
SCRIPT: "MOREFLAGS='--target=x86_64-w64-mingw32 -Werror -Wconversion -Wno-sign-conversion' make allarch"
deploy:
- provider: GitHub
auth_token:
secure: LgJo8emYc3sFnlNWkGl4/VYK3nk/8+RagcsqDlAi3xeqNGNutnKjcftjg84uJoT4
artifact: bin\zstd.exe
force_update: true
on:
branch: autobuild
COMPILER: gcc
PLATFORM: "mingw64"
appveyor_repo_tag: true
- provider: GitHub
auth_token:
secure: LgJo8emYc3sFnlNWkGl4/VYK3nk/8+RagcsqDlAi3xeqNGNutnKjcftjg84uJoT4
artifact: bin\zstd32.exe
force_update: true
on:
branch: autobuild
COMPILER: gcc
PLATFORM: "mingw32"
appveyor_repo_tag: true
- COMPILER: "visual"
HOST: "visual"
PLATFORM: "x64"
CONFIGURATION: "Debug"
- COMPILER: "visual"
HOST: "visual"
PLATFORM: "Win32"
CONFIGURATION: "Debug"
- COMPILER: "visual"
HOST: "visual"
PLATFORM: "x64"
CONFIGURATION: "Release"
- COMPILER: "visual"
HOST: "visual"
PLATFORM: "Win32"
CONFIGURATION: "Release"
install:
- ECHO Installing %COMPILER% %PLATFORM% %CONFIGURATION%
- SET PATH_ORIGINAL=%PATH%
- if [%HOST%]==[mingw] (
SET "PATH_MINGW32=C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin" &&
SET "PATH_MINGW64=C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin" &&
COPY C:\msys64\usr\bin\make.exe C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin\make.exe &&
COPY C:\msys64\usr\bin\make.exe C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin\make.exe
)
- IF [%HOST%]==[visual] IF [%PLATFORM%]==[x64] (
SET ADDITIONALPARAM=/p:LibraryPath="C:\Program Files\Microsoft SDKs\Windows\v7.1\lib\x64;c:\Program Files (x86)\Microsoft Visual Studio 10.0\VC\lib\amd64;C:\Program Files (x86)\Microsoft Visual Studio 10.0\;C:\Program Files (x86)\Microsoft Visual Studio 10.0\lib\amd64;"
)
build_script:
- ECHO Building %COMPILER% %PLATFORM% %CONFIGURATION%
- if [%HOST%]==[mingw] (
( if [%PLATFORM%]==[x64] (
SET "PATH=%PATH_MINGW64%;%PATH_ORIGINAL%"
) else if [%PLATFORM%]==[x86] (
SET "PATH=%PATH_MINGW32%;%PATH_ORIGINAL%"
) ) &&
make -v &&
sh -c "%COMPILER% -v" &&
set "CC=%COMPILER%" &&
sh -c "%SCRIPT%"
)
- if [%HOST%]==[visual] (
ECHO *** &&
ECHO *** Building Visual Studio 2015 %PLATFORM%\%CONFIGURATION% &&
ECHO *** &&
msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v140 /p:ForceImportBeforeCppTargets=%APPVEYOR_BUILD_FOLDER%\build\VS2010\CompileAsCpp.props /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" &&
DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe &&
MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe &&
msbuild "build\VS2010\zstd.sln" /m /verbosity:minimal /property:PlatformToolset=v140 /t:Clean,Build /p:Platform=%PLATFORM% /p:Configuration=%CONFIGURATION% /logger:"C:\Program Files\AppVeyor\BuildAgent\Appveyor.MSBuildLogger.dll" &&
DIR build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe &&
MD5sum build/VS2010/bin/%PLATFORM%_%CONFIGURATION%/*.exe &&
COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\fuzzer.exe tests\fuzzer_VS2015_%PLATFORM%_%CONFIGURATION%.exe &&
COPY build\VS2010\bin\%PLATFORM%_%CONFIGURATION%\*.exe tests\
)

2
build/.gitignore vendored
View File

@ -17,4 +17,4 @@ VS2013/bin/
VS2015/bin/
# CMake
cmake/
cmake/build/

View File

@ -44,7 +44,7 @@
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress"
PreprocessorDefinitions="ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE"
PreprocessorDefinitions="ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="3"
@ -121,7 +121,7 @@
EnableIntrinsicFunctions="true"
OmitFramePointers="true"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress"
PreprocessorDefinitions="ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE"
PreprocessorDefinitions="ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE"
RuntimeLibrary="0"
EnableFunctionLevelLinking="true"
UsePrecompiledHeader="0"
@ -196,7 +196,7 @@
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress"
PreprocessorDefinitions="ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE"
PreprocessorDefinitions="ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="3"
@ -274,7 +274,7 @@
EnableIntrinsicFunctions="true"
OmitFramePointers="true"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress"
PreprocessorDefinitions="ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE"
PreprocessorDefinitions="ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE"
RuntimeLibrary="0"
EnableFunctionLevelLinking="true"
UsePrecompiledHeader="0"

View File

@ -44,7 +44,7 @@
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs\legacy;$(SolutionDir)..\..\lib\dictBuilder"
PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE"
PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="3"
@ -120,7 +120,7 @@
EnableIntrinsicFunctions="true"
OmitFramePointers="true"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs\legacy;$(SolutionDir)..\..\lib\dictBuilder"
PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE"
PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE"
RuntimeLibrary="0"
EnableFunctionLevelLinking="true"
UsePrecompiledHeader="0"
@ -194,7 +194,7 @@
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs\legacy;$(SolutionDir)..\..\lib\dictBuilder"
PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE"
PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="3"
@ -271,7 +271,7 @@
EnableIntrinsicFunctions="true"
OmitFramePointers="true"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs\legacy;$(SolutionDir)..\..\lib\dictBuilder"
PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE"
PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE"
RuntimeLibrary="0"
EnableFunctionLevelLinking="true"
UsePrecompiledHeader="0"

View File

@ -44,7 +44,7 @@
<Tool
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\compress;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs"
PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
@ -120,7 +120,7 @@
Optimization="2"
EnableIntrinsicFunctions="true"
OmitFramePointers="true"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\compress;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs"
PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
RuntimeLibrary="0"
EnableFunctionLevelLinking="true"
@ -194,7 +194,7 @@
<Tool
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\compress;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs"
PreprocessorDefinitions="WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
@ -271,7 +271,7 @@
Optimization="2"
EnableIntrinsicFunctions="true"
OmitFramePointers="true"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\compress;$(SolutionDir)..\..\programs"
PreprocessorDefinitions="WIN32;NDEBUG;_CONSOLE"
RuntimeLibrary="0"
EnableFunctionLevelLinking="true"

View File

@ -45,7 +45,7 @@
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress"
PreprocessorDefinitions="ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE"
PreprocessorDefinitions="ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="3"
@ -122,7 +122,7 @@
EnableIntrinsicFunctions="true"
OmitFramePointers="true"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress"
PreprocessorDefinitions="ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE"
PreprocessorDefinitions="ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE"
RuntimeLibrary="0"
EnableFunctionLevelLinking="true"
UsePrecompiledHeader="0"
@ -197,7 +197,7 @@
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress"
PreprocessorDefinitions="ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE"
PreprocessorDefinitions="ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="3"
@ -275,7 +275,7 @@
EnableIntrinsicFunctions="true"
OmitFramePointers="true"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress"
PreprocessorDefinitions="ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE"
PreprocessorDefinitions="ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE"
RuntimeLibrary="0"
EnableFunctionLevelLinking="true"
UsePrecompiledHeader="0"

View File

@ -45,7 +45,7 @@
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs\legacy;$(SolutionDir)..\..\lib\dictBuilder"
PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE"
PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="3"
@ -121,7 +121,7 @@
EnableIntrinsicFunctions="true"
OmitFramePointers="true"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs\legacy;$(SolutionDir)..\..\lib\dictBuilder"
PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE"
PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE"
RuntimeLibrary="0"
EnableFunctionLevelLinking="true"
UsePrecompiledHeader="0"
@ -195,7 +195,7 @@
Name="VCCLCompilerTool"
Optimization="0"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs\legacy;$(SolutionDir)..\..\lib\dictBuilder"
PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE"
PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE"
MinimalRebuild="true"
BasicRuntimeChecks="3"
RuntimeLibrary="3"
@ -272,7 +272,7 @@
EnableIntrinsicFunctions="true"
OmitFramePointers="true"
AdditionalIncludeDirectories="$(SolutionDir)..\..\lib;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\programs\legacy;$(SolutionDir)..\..\lib\dictBuilder"
PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE"
PreprocessorDefinitions="ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE"
RuntimeLibrary="0"
EnableFunctionLevelLinking="true"
UsePrecompiledHeader="0"

View File

@ -67,22 +67,22 @@
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<LinkIncremental>true</LinkIncremental>
<RunCodeAnalysis>false</RunCodeAnalysis>
<IncludePath>$(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\dictBuilder;$(UniversalCRT_IncludePath);</IncludePath>
<IncludePath>$(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress;$(UniversalCRT_IncludePath);</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<LinkIncremental>true</LinkIncremental>
<RunCodeAnalysis>false</RunCodeAnalysis>
<IncludePath>$(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\dictBuilder;$(UniversalCRT_IncludePath);</IncludePath>
<IncludePath>$(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress;$(UniversalCRT_IncludePath);</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
<LinkIncremental>false</LinkIncremental>
<RunCodeAnalysis>false</RunCodeAnalysis>
<IncludePath>$(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\dictBuilder;$(UniversalCRT_IncludePath);</IncludePath>
<IncludePath>$(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress;$(UniversalCRT_IncludePath);</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<LinkIncremental>false</LinkIncremental>
<RunCodeAnalysis>false</RunCodeAnalysis>
<IncludePath>$(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\dictBuilder;$(UniversalCRT_IncludePath);</IncludePath>
<IncludePath>$(IncludePath);$(SolutionDir)..\..\lib;$(SolutionDir)..\..\programs;$(SolutionDir)..\..\lib\legacy;$(SolutionDir)..\..\lib\common;$(SolutionDir)..\..\lib\dictBuilder;$(SolutionDir)..\..\lib\compress;$(UniversalCRT_IncludePath);</IncludePath>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
<ClCompile>

View File

@ -149,7 +149,7 @@
</PrecompiledHeader>
<WarningLevel>Level4</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<MinimalRebuild>true</MinimalRebuild>
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
@ -169,7 +169,7 @@
</PrecompiledHeader>
<WarningLevel>Level4</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<TreatWarningAsError>true</TreatWarningAsError>
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
@ -189,7 +189,7 @@
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<EnablePREfast>false</EnablePREfast>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
@ -211,7 +211,7 @@
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<TreatWarningAsError>false</TreatWarningAsError>
<EnablePREfast>false</EnablePREfast>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>

View File

@ -146,7 +146,7 @@
</PrecompiledHeader>
<WarningLevel>Level4</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<MinimalRebuild>true</MinimalRebuild>
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
@ -166,7 +166,7 @@
</PrecompiledHeader>
<WarningLevel>Level4</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<TreatWarningAsError>true</TreatWarningAsError>
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
<RuntimeLibrary>MultiThreadedDebugDLL</RuntimeLibrary>
@ -186,7 +186,7 @@
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<EnablePREfast>false</EnablePREfast>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
@ -208,7 +208,7 @@
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>ZSTD_DLL_EXPORT=1;ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>ZSTD_DLL_EXPORT=1;ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<TreatWarningAsError>false</TreatWarningAsError>
<EnablePREfast>false</EnablePREfast>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>

View File

@ -155,7 +155,7 @@
</PrecompiledHeader>
<WarningLevel>Level4</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<TreatWarningAsError>true</TreatWarningAsError>
<EnablePREfast>false</EnablePREfast>
</ClCompile>
@ -171,7 +171,7 @@
</PrecompiledHeader>
<WarningLevel>Level4</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>ZSTD_LEGACY_SUPPORT=1;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<TreatWarningAsError>true</TreatWarningAsError>
<EnablePREfast>false</EnablePREfast>
</ClCompile>
@ -189,7 +189,7 @@
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<EnablePREfast>false</EnablePREfast>
<TreatWarningAsError>false</TreatWarningAsError>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
@ -210,7 +210,7 @@
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>ZSTD_LEGACY_SUPPORT=1;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>ZSTD_MULTITHREAD=1;ZSTD_LEGACY_SUPPORT=4;WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<TreatWarningAsError>false</TreatWarningAsError>
<EnablePREfast>false</EnablePREfast>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>

View File

@ -1,6 +1,7 @@
# cmake producted
# cmake artefacts
CMakeCache.txt
CMakeFiles
Makefile
cmake_install.cmake
cmake_uninstall.cmake
*.1

View File

@ -8,33 +8,61 @@
# ################################################################
PROJECT(zstd)
CMAKE_MINIMUM_REQUIRED(VERSION 2.8.7)
CMAKE_MINIMUM_REQUIRED(VERSION 2.8.9)
SET(ZSTD_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../..")
LIST(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules")
#-----------------------------------------------------------------------------
# Add extra compilation flags
#-----------------------------------------------------------------------------
INCLUDE(AddZstdCompilationFlags)
ADD_ZSTD_COMPILATION_FLAGS()
#-----------------------------------------------------------------------------
# Options
#-----------------------------------------------------------------------------
OPTION(ZSTD_LEGACY_SUPPORT "LEGACY SUPPORT" OFF)
OPTION(ZSTD_MULTITHREAD_SUPPORT "MULTITHREADING SUPPORT" ON)
IF (UNIX)
OPTION(ZSTD_MULTITHREAD_SUPPORT "MULTITHREADING SUPPORT" ON)
ELSE (UNIX)
OPTION(ZSTD_MULTITHREAD_SUPPORT "MULTITHREADING SUPPORT" OFF)
ENDIF (UNIX)
OPTION(ZSTD_BUILD_PROGRAMS "BUILD PROGRAMS" ON)
OPTION(ZSTD_BUILD_CONTRIB "BUILD CONTRIB" OFF)
OPTION(ZSTD_BUILD_TESTS "BUILD TESTS" OFF)
IF (ZSTD_LEGACY_SUPPORT)
MESSAGE(STATUS "ZSTD_LEGACY_SUPPORT defined!")
ADD_DEFINITIONS(-DZSTD_LEGACY_SUPPORT=1)
ADD_DEFINITIONS(-DZSTD_LEGACY_SUPPORT=4)
ELSE (ZSTD_LEGACY_SUPPORT)
MESSAGE(STATUS "ZSTD_LEGACY_SUPPORT not defined!")
ADD_DEFINITIONS(-DZSTD_LEGACY_SUPPORT=0)
ENDIF (ZSTD_LEGACY_SUPPORT)
#-----------------------------------------------------------------------------
# Add source directories
#-----------------------------------------------------------------------------
ADD_SUBDIRECTORY(lib)
ADD_SUBDIRECTORY(programs)
ADD_SUBDIRECTORY(tests)
IF (ZSTD_BUILD_PROGRAMS)
ADD_SUBDIRECTORY(programs)
ENDIF (ZSTD_BUILD_PROGRAMS)
IF (ZSTD_BUILD_TESTS)
IF (NOT ZSTD_BUILD_STATIC)
MESSAGE(SEND_ERROR "You need to build static library to build tests")
ENDIF (NOT ZSTD_BUILD_STATIC)
ADD_SUBDIRECTORY(tests)
ENDIF (ZSTD_BUILD_TESTS)
IF (ZSTD_BUILD_CONTRIB)
ADD_SUBDIRECTORY(contrib)
ENDIF (ZSTD_BUILD_CONTRIB)
#-----------------------------------------------------------------------------
# Add extra compilation flags
# Add clean-all target
#-----------------------------------------------------------------------------
INCLUDE(CMakeModules/AddExtraCompilationFlags.cmake)
ADD_EXTRA_COMPILATION_FLAGS()
ADD_CUSTOM_TARGET(clean-all
COMMAND ${CMAKE_BUILD_TOOL} clean
COMMAND rm -rf ${CMAKE_BINARY_DIR}/

View File

@ -1,329 +0,0 @@
MACRO(ADD_EXTRA_COMPILATION_FLAGS)
include(CheckCXXCompilerFlag)
include(CheckCCompilerFlag)
if (CMAKE_COMPILER_IS_GNUCXX OR MINGW) #Not only UNIX but also WIN32 for MinGW
set(POSITION_INDEPENDENT_CODE_FLAG "-fPIC")
CHECK_C_COMPILER_FLAG(${POSITION_INDEPENDENT_CODE_FLAG} POSITION_INDEPENDENT_CODE_FLAG_ALLOWED)
if (POSITION_INDEPENDENT_CODE_FLAG_ALLOWED)
MESSAGE("Compiler flag ${POSITION_INDEPENDENT_CODE_FLAG} allowed")
set(ACTIVATE_POSITION_INDEPENDENT_CODE_FLAG "ON" CACHE BOOL "activate -fPIC flag")
else ()
MESSAGE("Compiler flag ${POSITION_INDEPENDENT_CODE_FLAG} not allowed")
endif (POSITION_INDEPENDENT_CODE_FLAG_ALLOWED)
set(WARNING_UNDEF "-Wundef")
CHECK_C_COMPILER_FLAG(${WARNING_UNDEF} WARNING_UNDEF_ALLOWED)
if (WARNING_UNDEF_ALLOWED)
MESSAGE("Compiler flag ${WARNING_UNDEF} allowed")
set(ACTIVATE_WARNING_UNDEF "ON" CACHE BOOL "activate -Wundef flag")
else ()
MESSAGE("Compiler flag ${WARNING_UNDEF} not allowed")
endif (WARNING_UNDEF_ALLOWED)
set(WARNING_SHADOW "-Wshadow")
CHECK_C_COMPILER_FLAG(${WARNING_SHADOW} WARNING_SHADOW_ALLOWED)
if (WARNING_SHADOW_ALLOWED)
MESSAGE("Compiler flag ${WARNING_SHADOW} allowed")
set(ACTIVATE_WARNING_SHADOW "ON" CACHE BOOL "activate -Wshadow flag")
else ()
MESSAGE("Compiler flag ${WARNING_SHADOW} not allowed")
endif (WARNING_SHADOW_ALLOWED)
set(WARNING_CAST_ALIGN "-Wcast-align")
CHECK_C_COMPILER_FLAG(${WARNING_CAST_ALIGN} WARNING_CAST_ALIGN_ALLOWED)
if (WARNING_CAST_ALIGN_ALLOWED)
MESSAGE("Compiler flag ${WARNING_CAST_ALIGN} allowed")
set(ACTIVATE_WARNING_CAST_ALIGN "ON" CACHE BOOL "activate -Wcast-align flag")
else ()
MESSAGE("Compiler flag ${WARNING_CAST_ALIGN} not allowed")
endif (WARNING_CAST_ALIGN_ALLOWED)
set(WARNING_CAST_QUAL "-Wcast-qual")
CHECK_C_COMPILER_FLAG(${WARNING_CAST_QUAL} WARNING_CAST_QUAL_ALLOWED)
if (WARNING_CAST_QUAL_ALLOWED)
MESSAGE("Compiler flag ${WARNING_CAST_QUAL} allowed")
set(ACTIVATE_WARNING_CAST_QUAL "ON" CACHE BOOL "activate -Wcast-qual flag")
else ()
MESSAGE("Compiler flag ${WARNING_CAST_QUAL} not allowed")
endif (WARNING_CAST_QUAL_ALLOWED)
set(WARNING_STRICT_PROTOTYPES "-Wstrict-prototypes")
CHECK_C_COMPILER_FLAG(${WARNING_STRICT_PROTOTYPES} WARNING_STRICT_PROTOTYPES_ALLOWED)
if (WARNING_STRICT_PROTOTYPES_ALLOWED)
MESSAGE("Compiler flag ${WARNING_STRICT_PROTOTYPES} allowed")
set(ACTIVATE_WARNING_STRICT_PROTOTYPES "ON" CACHE BOOL "activate -Wstrict-prototypes flag")
else ()
MESSAGE("Compiler flag ${WARNING_STRICT_PROTOTYPES} not allowed")
endif (WARNING_STRICT_PROTOTYPES_ALLOWED)
set(WARNING_ALL "-Wall")
CHECK_C_COMPILER_FLAG(${WARNING_ALL} WARNING_ALL_ALLOWED)
if (WARNING_ALL_ALLOWED)
MESSAGE("Compiler flag ${WARNING_ALL} allowed")
set(ACTIVATE_WARNING_ALL "ON" CACHE BOOL "activate -Wall flag")
else ()
MESSAGE("Compiler flag ${WARNING_ALL} not allowed")
endif (WARNING_ALL_ALLOWED)
set(WARNING_EXTRA "-Wextra")
CHECK_C_COMPILER_FLAG(${WARNING_EXTRA} WARNING_EXTRA_ALLOWED)
if (WARNING_EXTRA_ALLOWED)
MESSAGE("Compiler flag ${WARNING_EXTRA} allowed")
set(ACTIVATE_WARNING_EXTRA "ON" CACHE BOOL "activate -Wextra flag")
else ()
MESSAGE("Compiler flag ${WARNING_EXTRA} not allowed")
endif (WARNING_EXTRA_ALLOWED)
set(WARNING_FLOAT_EQUAL "-Wfloat-equal")
CHECK_C_COMPILER_FLAG(${WARNING_FLOAT_EQUAL} WARNING_FLOAT_EQUAL_ALLOWED)
if (WARNING_FLOAT_EQUAL_ALLOWED)
MESSAGE("Compiler flag ${WARNING_FLOAT_EQUAL} allowed")
set(ACTIVATE_WARNING_FLOAT_EQUAL "OFF" CACHE BOOL "activate -Wfloat-equal flag")
else ()
MESSAGE("Compiler flag ${WARNING_FLOAT_EQUAL} not allowed")
endif (WARNING_FLOAT_EQUAL_ALLOWED)
set(WARNING_SIGN_CONVERSION "-Wsign-conversion")
CHECK_C_COMPILER_FLAG(${WARNING_SIGN_CONVERSION} WARNING_SIGN_CONVERSION_ALLOWED)
if (WARNING_SIGN_CONVERSION_ALLOWED)
MESSAGE("Compiler flag ${WARNING_SIGN_CONVERSION} allowed")
set(ACTIVATE_WARNING_SIGN_CONVERSION "OFF" CACHE BOOL "activate -Wsign-conversion flag")
else ()
MESSAGE("Compiler flag ${WARNING_SIGN_CONVERSION} not allowed")
endif (WARNING_SIGN_CONVERSION_ALLOWED)
if (ACTIVATE_POSITION_INDEPENDENT_CODE_FLAG)
list(APPEND CMAKE_C_FLAGS ${POSITION_INDEPENDENT_CODE_FLAG})
else ()
string(REPLACE ${POSITION_INDEPENDENT_CODE_FLAG} "" CMAKE_C_FLAGS "${POSITION_INDEPENDENT_CODE_FLAG}")
endif (ACTIVATE_POSITION_INDEPENDENT_CODE_FLAG)
if (ACTIVATE_WARNING_UNDEF)
list(APPEND CMAKE_CXX_FLAGS ${WARNING_UNDEF})
list(APPEND CMAKE_C_FLAGS ${WARNING_UNDEF})
else ()
string(REPLACE ${WARNING_UNDEF} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE ${WARNING_UNDEF} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
endif (ACTIVATE_WARNING_UNDEF)
if (ACTIVATE_WARNING_SHADOW)
list(APPEND CMAKE_CXX_FLAGS ${WARNING_SHADOW})
list(APPEND CMAKE_C_FLAGS ${WARNING_SHADOW})
else ()
string(REPLACE ${WARNING_SHADOW} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE ${WARNING_SHADOW} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
endif (ACTIVATE_WARNING_SHADOW)
if (ACTIVATE_WARNING_CAST_QUAL)
list(APPEND CMAKE_CXX_FLAGS ${WARNING_CAST_QUAL})
list(APPEND CMAKE_C_FLAGS ${WARNING_CAST_QUAL})
else ()
string(REPLACE ${WARNING_CAST_QUAL} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE ${WARNING_CAST_QUAL} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
endif (ACTIVATE_WARNING_CAST_QUAL)
if (ACTIVATE_WARNING_CAST_ALIGN)
list(APPEND CMAKE_CXX_FLAGS ${WARNING_CAST_ALIGN})
list(APPEND CMAKE_C_FLAGS ${WARNING_CAST_ALIGN})
else ()
string(REPLACE ${WARNING_CAST_ALIGN} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE ${WARNING_CAST_ALIGN} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
endif (ACTIVATE_WARNING_CAST_ALIGN)
if (ACTIVATE_WARNING_STRICT_PROTOTYPES)
list(APPEND CMAKE_C_FLAGS ${WARNING_STRICT_PROTOTYPES})
else ()
string(REPLACE ${WARNING_STRICT_PROTOTYPES} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
endif (ACTIVATE_WARNING_STRICT_PROTOTYPES)
if (ACTIVATE_WARNING_ALL)
list(APPEND CMAKE_CXX_FLAGS ${WARNING_ALL})
list(APPEND CMAKE_C_FLAGS ${WARNING_ALL})
else ()
string(REPLACE ${WARNING_ALL} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE ${WARNING_ALL} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
endif (ACTIVATE_WARNING_ALL)
if (ACTIVATE_WARNING_EXTRA)
list(APPEND CMAKE_CXX_FLAGS ${WARNING_EXTRA})
list(APPEND CMAKE_C_FLAGS ${WARNING_EXTRA})
else ()
string(REPLACE ${WARNING_EXTRA} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE ${WARNING_EXTRA} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
endif (ACTIVATE_WARNING_EXTRA)
if (ACTIVATE_WARNING_FLOAT_EQUAL)
list(APPEND CMAKE_CXX_FLAGS ${WARNING_FLOAT_EQUAL})
list(APPEND CMAKE_C_FLAGS ${WARNING_FLOAT_EQUAL})
else ()
string(REPLACE ${WARNING_FLOAT_EQUAL} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE ${WARNING_FLOAT_EQUAL} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
endif (ACTIVATE_WARNING_FLOAT_EQUAL)
if (ACTIVATE_WARNING_SIGN_CONVERSION)
list(APPEND CMAKE_CXX_FLAGS ${WARNING_SIGN_CONVERSION})
list(APPEND CMAKE_C_FLAGS ${WARNING_SIGN_CONVERSION})
else ()
string(REPLACE ${WARNING_SIGN_CONVERSION} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE ${WARNING_SIGN_CONVERSION} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
endif (ACTIVATE_WARNING_SIGN_CONVERSION)
#Set c++11 by default
list(APPEND CMAKE_CXX_FLAGS "-std=c++11")
#Set c99 by default
list(APPEND CMAKE_C_FLAGS "-std=c99")
elseif (MSVC)
# Add specific compilation flags for Windows Visual
set(WARNING_ALL "/Wall")
CHECK_C_COMPILER_FLAG(${WARNING_ALL} WARNING_ALL_ALLOWED)
if (WARNING_ALL_ALLOWED)
MESSAGE("Compiler flag ${WARNING_ALL} allowed")
set(ACTIVATE_WARNING_ALL "OFF" CACHE BOOL "activate /Wall flag")
else ()
MESSAGE("Compiler flag ${WARNING_ALL} not allowed")
endif (WARNING_ALL_ALLOWED)
set(RTC_FLAG "/RTC1")
CHECK_C_COMPILER_FLAG(${RTC_FLAG} RTC_FLAG_ALLOWED)
if (RTC_FLAG_ALLOWED)
MESSAGE("Compiler flag ${RTC_FLAG} allowed")
set(ACTIVATE_RTC_FLAG "ON" CACHE BOOL "activate /RTC1 flag")
else ()
MESSAGE("Compiler flag ${RTC_FLAG} not allowed")
endif (RTC_FLAG_ALLOWED)
set(ZC_FLAG "/Zc:forScope")
CHECK_C_COMPILER_FLAG(${ZC_FLAG} ZC_FLAG_ALLOWED)
if (ZC_FLAG_ALLOWED)
MESSAGE("Compiler flag ${ZC_FLAG} allowed")
set(ACTIVATE_ZC_FLAG "ON" CACHE BOOL "activate /Zc:forScope flag")
else ()
MESSAGE("Compiler flag ${ZC_FLAG} not allowed")
endif (ZC_FLAG_ALLOWED)
set(GD_FLAG "/Gd")
CHECK_C_COMPILER_FLAG(${GD_FLAG} GD_FLAG_ALLOWED)
if (GD_FLAG_ALLOWED)
MESSAGE("Compiler flag ${GD_FLAG} allowed")
set(ACTIVATE_GD_FLAG "ON" CACHE BOOL "activate /Gd flag")
else ()
MESSAGE("Compiler flag ${GD_FLAG} not allowed")
endif (GD_FLAG_ALLOWED)
set(ANALYZE_FLAG "/analyze:stacksize25000")
CHECK_C_COMPILER_FLAG(${ANALYZE_FLAG} ANALYZE_FLAG_ALLOWED)
if (ANALYZE_FLAG_ALLOWED)
MESSAGE("Compiler flag ${ANALYZE_FLAG} allowed")
set(ACTIVATE_ANALYZE_FLAG "ON" CACHE BOOL "activate /ANALYZE flag")
else ()
MESSAGE("Compiler flag ${ANALYZE_FLAG} not allowed")
endif (ANALYZE_FLAG_ALLOWED)
if (ACTIVATE_WARNING_ALL)
list(APPEND CMAKE_CXX_FLAGS ${WARNING_ALL})
list(APPEND CMAKE_C_FLAGS ${WARNING_ALL})
else ()
string(REPLACE ${WARNING_ALL} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE ${WARNING_ALL} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
endif (ACTIVATE_WARNING_ALL)
# Only for DEBUG version
if (ACTIVATE_RTC_FLAG)
list(APPEND CMAKE_CXX_FLAGS_DEBUG ${RTC_FLAG})
list(APPEND CMAKE_C_FLAGS_DEBUG ${RTC_FLAG})
else ()
string(REPLACE ${RTC_FLAG} "" CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG}")
string(REPLACE ${RTC_FLAG} "" CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG}")
endif (ACTIVATE_RTC_FLAG)
if (ACTIVATE_ZC_FLAG)
list(APPEND CMAKE_CXX_FLAGS ${ZC_FLAG})
list(APPEND CMAKE_C_FLAGS ${ZC_FLAG})
else ()
string(REPLACE ${ZC_FLAG} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE ${ZC_FLAG} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
endif (ACTIVATE_ZC_FLAG)
if (ACTIVATE_GD_FLAG)
list(APPEND CMAKE_CXX_FLAGS ${GD_FLAG})
list(APPEND CMAKE_C_FLAGS ${GD_FLAG})
else ()
string(REPLACE ${GD_FLAG} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE ${GD_FLAG} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
endif (ACTIVATE_GD_FLAG)
if (ACTIVATE_ANALYZE_FLAG)
list(APPEND CMAKE_CXX_FLAGS ${ANALYZE_FLAG})
list(APPEND CMAKE_C_FLAGS ${ANALYZE_FLAG})
else ()
string(REPLACE ${ANALYZE_FLAG} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE ${ANALYZE_FLAG} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
endif (ACTIVATE_ANALYZE_FLAG)
if (MSVC80 OR MSVC90 OR MSVC10 OR MSVC11)
# To avoid compiler warning (level 4) C4571, compile with /EHa if you still want
# your catch(...) blocks to catch structured exceptions.
list(APPEND CMAKE_CXX_FLAGS "/EHa")
endif (MSVC80 OR MSVC90 OR MSVC10 OR MSVC11)
set(MULTITHREADED_COMPILATION "/MP")
MESSAGE("Compiler flag ${MULTITHREADED_COMPILATION} allowed")
set(ACTIVATE_MULTITHREADED_COMPILATION "ON" CACHE BOOL "activate /MP flag")
if (ACTIVATE_MULTITHREADED_COMPILATION)
list(APPEND CMAKE_CXX_FLAGS ${MULTITHREADED_COMPILATION})
list(APPEND CMAKE_C_FLAGS ${MULTITHREADED_COMPILATION})
else ()
string(REPLACE ${MULTITHREADED_COMPILATION} "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
string(REPLACE ${MULTITHREADED_COMPILATION} "" CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
endif (ACTIVATE_MULTITHREADED_COMPILATION)
#For exceptions
list(APPEND CMAKE_CXX_FLAGS "/EHsc")
list(APPEND CMAKE_C_FLAGS "/EHsc")
# UNICODE SUPPORT
list(APPEND CMAKE_CXX_FLAGS "/D_UNICODE /DUNICODE")
list(APPEND CMAKE_C_FLAGS "/D_UNICODE /DUNICODE")
endif ()
# Remove duplicates compilation flags
FOREACH (flag_var CMAKE_C_FLAGS CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE
CMAKE_C_FLAGS_MINSIZEREL CMAKE_C_FLAGS_RELWITHDEBINFO
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
separate_arguments(${flag_var})
list(REMOVE_DUPLICATES ${flag_var})
string(REPLACE ";" " " ${flag_var} "${${flag_var}}")
set(${flag_var} "${${flag_var}}" CACHE STRING "common build flags" FORCE)
ENDFOREACH (flag_var)
if (MSVC)
# Replace /MT to /MD flag
# Replace /O2 to /O3 flag
FOREACH (flag_var CMAKE_C_FLAGS CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE
CMAKE_C_FLAGS_MINSIZEREL CMAKE_C_FLAGS_RELWITHDEBINFO
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
STRING(REGEX REPLACE "/MT" "/MD" ${flag_var} "${${flag_var}}")
STRING(REGEX REPLACE "/O2" "/Ox" ${flag_var} "${${flag_var}}")
ENDFOREACH (flag_var)
endif ()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}" CACHE STRING "Updated flags" FORCE)
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG}" CACHE STRING "Updated flags" FORCE)
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE}" CACHE STRING "Updated flags" FORCE)
set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL}" CACHE STRING "Updated flags" FORCE)
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO}" CACHE STRING "Updated flags" FORCE)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS}" CACHE STRING "Updated flags" FORCE)
set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG}" CACHE STRING "Updated flags" FORCE)
set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE}" CACHE STRING "Updated flags" FORCE)
set(CMAKE_C_FLAGS_MINSIZEREL "${CMAKE_C_FLAGS_MINSIZEREL}" CACHE STRING "Updated flags" FORCE)
set(CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO}" CACHE STRING "Updated flags" FORCE)
ENDMACRO(ADD_EXTRA_COMPILATION_FLAGS)

View File

@ -0,0 +1,86 @@
include(CheckCXXCompilerFlag)
include(CheckCCompilerFlag)
function(EnableCompilerFlag _flag _C _CXX)
string(REGEX REPLACE "\\+" "PLUS" varname "${_flag}")
string(REGEX REPLACE "[^A-Za-z0-9]+" "_" varname "${varname}")
string(REGEX REPLACE "^_+" "" varname "${varname}")
string(TOUPPER "${varname}" varname)
if (_C)
CHECK_C_COMPILER_FLAG(${_flag} C_FLAG_${varname})
if (C_FLAG_${varname})
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${_flag}" PARENT_SCOPE)
endif ()
endif ()
if (_CXX)
CHECK_CXX_COMPILER_FLAG(${_flag} CXX_FLAG_${varname})
if (CXX_FLAG_${varname})
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${_flag}" PARENT_SCOPE)
endif ()
endif ()
endfunction()
MACRO(ADD_ZSTD_COMPILATION_FLAGS)
if (CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang" OR MINGW) #Not only UNIX but also WIN32 for MinGW
#Set c++11 by default
EnableCompilerFlag("-std=c++11" false true)
#Set c99 by default
EnableCompilerFlag("-std=c99" true false)
EnableCompilerFlag("-Wall" true true)
EnableCompilerFlag("-Wextra" true true)
EnableCompilerFlag("-Wundef" true true)
EnableCompilerFlag("-Wshadow" true true)
EnableCompilerFlag("-Wcast-align" true true)
EnableCompilerFlag("-Wcast-qual" true true)
EnableCompilerFlag("-Wstrict-prototypes" true false)
elseif (MSVC) # Add specific compilation flags for Windows Visual
EnableCompilerFlag("/Wall" true true)
# Only for DEBUG version
EnableCompilerFlag("/RTC1" true true)
EnableCompilerFlag("/Zc:forScope" true true)
EnableCompilerFlag("/Gd" true true)
EnableCompilerFlag("/analyze:stacksize25000" true true)
if (MSVC80 OR MSVC90 OR MSVC10 OR MSVC11)
# To avoid compiler warning (level 4) C4571, compile with /EHa if you still want
# your catch(...) blocks to catch structured exceptions.
EnableCompilerFlag("/EHa" false true)
endif (MSVC80 OR MSVC90 OR MSVC10 OR MSVC11)
set(ACTIVATE_MULTITHREADED_COMPILATION "ON" CACHE BOOL "activate multi-threaded compilation (/MP flag)")
if (ACTIVATE_MULTITHREADED_COMPILATION)
EnableCompilerFlag("/MP" true true)
endif ()
#For exceptions
EnableCompilerFlag("/EHsc" true true)
# UNICODE SUPPORT
EnableCompilerFlag("/D_UNICODE" true true)
EnableCompilerFlag("/DUNICODE" true true)
endif ()
# Remove duplicates compilation flags
FOREACH (flag_var CMAKE_C_FLAGS CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE
CMAKE_C_FLAGS_MINSIZEREL CMAKE_C_FLAGS_RELWITHDEBINFO
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
separate_arguments(${flag_var})
list(REMOVE_DUPLICATES ${flag_var})
string(REPLACE ";" " " ${flag_var} "${${flag_var}}")
ENDFOREACH (flag_var)
if (MSVC)
# Replace /MT to /MD flag
# Replace /O2 to /O3 flag
FOREACH (flag_var CMAKE_C_FLAGS CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE
CMAKE_C_FLAGS_MINSIZEREL CMAKE_C_FLAGS_RELWITHDEBINFO
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
STRING(REGEX REPLACE "/MT" "/MD" ${flag_var} "${${flag_var}}")
STRING(REGEX REPLACE "/O2" "/Ox" ${flag_var} "${${flag_var}}")
ENDFOREACH (flag_var)
endif ()
ENDMACRO(ADD_ZSTD_COMPILATION_FLAGS)

View File

@ -0,0 +1,9 @@
function(GetZstdLibraryVersion _header _major _minor _release)
# Read file content
FILE(READ ${_header} CONTENT)
string(REGEX MATCH ".*define ZSTD_VERSION_MAJOR *([0-9]+).*define ZSTD_VERSION_MINOR *([0-9]+).*define ZSTD_VERSION_RELEASE *([0-9]+)" VERSION_REGEX "${CONTENT}")
SET(${_major} ${CMAKE_MATCH_1} PARENT_SCOPE)
SET(${_minor} ${CMAKE_MATCH_2} PARENT_SCOPE)
SET(${_release} ${CMAKE_MATCH_3} PARENT_SCOPE)
endfunction()

View File

@ -13,4 +13,5 @@
PROJECT(contrib)
ADD_SUBDIRECTORY(pzstd)
ADD_SUBDIRECTORY(gen_html)

View File

@ -0,0 +1,33 @@
# ################################################################
# * Copyright (c) 2015-present, Yann Collet, Facebook, Inc.
# * All rights reserved.
# *
# * This source code is licensed under the BSD-style license found in the
# * LICENSE file in the root directory of this source tree. An additional grant
# * of patent rights can be found in the PATENTS file in the same directory.
#
# You can contact the author at :
# - zstd homepage : http://www.zstd.net/
# ################################################################
PROJECT(gen_html)
INCLUDE(GetZstdLibraryVersion)
SET(CMAKE_INCLUDE_CURRENT_DIR TRUE)
# Define programs directory, where sources and header files are located
SET(LIBRARY_DIR ${ZSTD_SOURCE_DIR}/lib)
SET(PROGRAMS_DIR ${ZSTD_SOURCE_DIR}/programs)
SET(GENHTML_DIR ${ZSTD_SOURCE_DIR}/contrib/gen_html)
SET(GENHTML_BINARY ${PROJECT_BINARY_DIR}/gen_html${CMAKE_EXECUTABLE_SUFFIX})
INCLUDE_DIRECTORIES(${PROGRAMS_DIR} ${LIBRARY_DIR} ${LIBRARY_DIR}/common ${GENHTML_DIR})
ADD_EXECUTABLE(gen_html ${GENHTML_DIR}/gen_html.cpp)
GetZstdLibraryVersion(${LIBRARY_DIR}/zstd.h VMAJOR VMINOR VRELEASE)
SET(LIBVERSION "${VMAJOR}.${VMINOR}.${VRELEASE}")
ADD_CUSTOM_TARGET(zstd_manual.html ALL
${GENHTML_BINARY} "${LIBVERSION}" "${LIBRARY_DIR}/zstd.h" "${PROJECT_BINARY_DIR}/zstd_manual.html"
DEPENDS gen_html COMMENT "Update zstd manual")
INSTALL(FILES "${PROJECT_BINARY_DIR}/zstd_manual.html" DESTINATION "share/doc")

View File

@ -14,17 +14,22 @@ PROJECT(pzstd)
SET(CMAKE_INCLUDE_CURRENT_DIR TRUE)
# Define project root directory
SET(ROOT_DIR ../../../..)
# Define programs directory, where sources and header files are located
SET(LIBRARY_DIR ${ROOT_DIR}/lib)
SET(PROGRAMS_DIR ${ROOT_DIR}/programs)
SET(PZSTD_DIR ${ROOT_DIR}/contrib/pzstd)
SET(LIBRARY_DIR ${ZSTD_SOURCE_DIR}/lib)
SET(PROGRAMS_DIR ${ZSTD_SOURCE_DIR}/programs)
SET(PZSTD_DIR ${ZSTD_SOURCE_DIR}/contrib/pzstd)
INCLUDE_DIRECTORIES(${PROGRAMS_DIR} ${LIBRARY_DIR} ${LIBRARY_DIR}/common ${PZSTD_DIR})
ADD_EXECUTABLE(pzstd ${PZSTD_DIR}/main.cpp ${PZSTD_DIR}/Options.cpp ${PZSTD_DIR}/Pzstd.cpp ${PZSTD_DIR}/SkippableFrame.cpp)
TARGET_LINK_LIBRARIES(pzstd libzstd_static pthread)
SET_TARGET_PROPERTIES(pzstd PROPERTIES COMPILE_DEFINITIONS "NDEBUG")
SET_TARGET_PROPERTIES(pzstd PROPERTIES COMPILE_OPTIONS "-Wno-shadow")
SET_PROPERTY(TARGET pzstd APPEND PROPERTY COMPILE_DEFINITIONS "NDEBUG")
SET_PROPERTY(TARGET pzstd APPEND PROPERTY COMPILE_OPTIONS "-Wno-shadow")
SET(THREADS_PREFER_PTHREAD_FLAG ON)
FIND_PACKAGE(Threads REQUIRED)
IF (CMAKE_USE_PTHREADS_INIT)
TARGET_LINK_LIBRARIES(pzstd libzstd_shared ${CMAKE_THREAD_LIBS_INIT})
ELSE()
MESSAGE(SEND_ERROR "ZSTD currently does not support thread libraries other than pthreads")
ENDIF()
INSTALL(TARGETS pzstd RUNTIME DESTINATION "bin")

2
build/cmake/lib/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
# cmake build artefact
libzstd.pc

View File

@ -10,30 +10,18 @@
# - zstd homepage : http://www.zstd.net/
# ################################################################
# Get library version based on information from input content (use regular exp)
function(GetLibraryVersion _content _outputVar1 _outputVar2 _outputVar3)
string(REGEX MATCHALL ".*define ZSTD_VERSION_MAJOR+.* ([0-9]+).*define ZSTD_VERSION_MINOR+.* ([0-9]+).*define ZSTD_VERSION_RELEASE+.* ([0-9]+)" VERSION_REGEX "${_content}")
SET(${_outputVar1} ${CMAKE_MATCH_1} PARENT_SCOPE)
SET(${_outputVar2} ${CMAKE_MATCH_2} PARENT_SCOPE)
SET(${_outputVar3} ${CMAKE_MATCH_3} PARENT_SCOPE)
endfunction()
PROJECT(libzstd)
SET(CMAKE_INCLUDE_CURRENT_DIR TRUE)
# Define project root directory
SET(ROOT_DIR ../../..)
OPTION(ZSTD_BUILD_STATIC "BUILD STATIC LIBRARIES" OFF)
# Define library directory, where sources and header files are located
SET(LIBRARY_DIR ${ROOT_DIR}/lib)
SET(LIBRARY_DIR ${ZSTD_SOURCE_DIR}/lib)
INCLUDE_DIRECTORIES(${LIBRARY_DIR} ${LIBRARY_DIR}/common)
# Read file content
FILE(READ ${LIBRARY_DIR}/zstd.h HEADER_CONTENT)
# Parse version
GetLibraryVersion("${HEADER_CONTENT}" LIBVER_MAJOR LIBVER_MINOR LIBVER_RELEASE)
INCLUDE(GetZstdLibraryVersion)
GetZstdLibraryVersion(${LIBRARY_DIR}/zstd.h LIBVER_MAJOR LIBVER_MINOR LIBVER_RELEASE)
MESSAGE("ZSTD VERSION ${LIBVER_MAJOR}.${LIBVER_MINOR}.${LIBVER_RELEASE}")
SET(Sources
@ -97,96 +85,76 @@ IF (ZSTD_LEGACY_SUPPORT)
ENDIF (ZSTD_LEGACY_SUPPORT)
IF (MSVC)
SET(MSVC_RESOURCE_DIR ${ROOT_DIR}/build/VS2010/libzstd-dll)
SET(MSVC_RESOURCE_DIR ${ZSTD_SOURCE_DIR}/build/VS2010/libzstd-dll)
SET(PlatformDependResources ${MSVC_RESOURCE_DIR}/libzstd-dll.rc)
ENDIF (MSVC)
# Split project to static and shared libraries build
ADD_LIBRARY(libzstd_static STATIC ${Sources} ${Headers})
ADD_LIBRARY(libzstd_shared SHARED ${Sources} ${Headers} ${PlatformDependResources})
IF (ZSTD_BUILD_STATIC)
ADD_LIBRARY(libzstd_static STATIC ${Sources} ${Headers})
ENDIF (ZSTD_BUILD_STATIC)
# Add specific compile definitions for MSVC project
IF (MSVC)
SET_TARGET_PROPERTIES(libzstd_static PROPERTIES COMPILE_DEFINITIONS "ZSTD_HEAPMODE=0;_CRT_SECURE_NO_WARNINGS")
SET_TARGET_PROPERTIES(libzstd_shared PROPERTIES COMPILE_DEFINITIONS "ZSTD_DLL_EXPORT=1;ZSTD_HEAPMODE=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS")
SET_PROPERTY(TARGET libzstd_shared APPEND PROPERTY COMPILE_DEFINITIONS "ZSTD_DLL_EXPORT=1;ZSTD_HEAPMODE=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS")
IF (ZSTD_BUILD_STATIC)
SET_PROPERTY(TARGET libzstd_static APPEND PROPERTY COMPILE_DEFINITIONS "ZSTD_HEAPMODE=0;_CRT_SECURE_NO_WARNINGS")
ENDIF (ZSTD_BUILD_STATIC)
ENDIF (MSVC)
# Define library base name
IF (MSVC)
SET(LIBRARY_BASE_NAME zstdlib)
ELSE ()
SET(LIBRARY_BASE_NAME libzstd)
ENDIF (MSVC)
IF (MSVC)
IF (CMAKE_SIZEOF_VOID_P MATCHES "8")
SET(LIBRARY_ARCH_SUFFIX "_x64")
SET(LIBRARY_BASE_NAME "zstdlib_x64")
ELSE ()
SET(LIBRARY_ARCH_SUFFIX "_x86")
SET(LIBRARY_BASE_NAME "zstdlib_x86")
ENDIF (CMAKE_SIZEOF_VOID_P MATCHES "8")
ELSE ()
SET(LIBRARY_ARCH_SUFFIX "")
SET(LIBRARY_BASE_NAME zstd)
ENDIF (MSVC)
# Define static and shared library names
SET(STATIC_LIBRARY_OUTPUT_NAME ${LIBRARY_BASE_NAME}${LIBRARY_ARCH_SUFFIX} CACHE STRING "Static library output name")
SET(SHARED_LIBRARY_OUTPUT_NAME ${LIBRARY_BASE_NAME}.${LIBVER_MAJOR}.${LIBVER_MINOR}.${LIBVER_RELEASE}${LIBRARY_ARCH_SUFFIX} CACHE STRING "Shared library output name")
SET_TARGET_PROPERTIES(
libzstd_static
PROPERTIES
PREFIX ""
OUTPUT_NAME ${STATIC_LIBRARY_OUTPUT_NAME})
SET_TARGET_PROPERTIES(
libzstd_shared
PROPERTIES
PREFIX ""
OUTPUT_NAME ${SHARED_LIBRARY_OUTPUT_NAME})
OUTPUT_NAME ${LIBRARY_BASE_NAME}
SOVERSION ${LIBVER_MAJOR}.${LIBVER_MINOR}.${LIBVER_RELEASE})
IF (ZSTD_BUILD_STATIC)
SET_TARGET_PROPERTIES(
libzstd_static
PROPERTIES
OUTPUT_NAME ${LIBRARY_BASE_NAME})
ENDIF (ZSTD_BUILD_STATIC)
IF (UNIX)
IF ("${PREFIX}" STREQUAL "")
SET(PREFIX /usr/local)
ENDIF()
MESSAGE("the variable PREFIX=${PREFIX}")
SET(INSTALL_LIBRARY_DIR ${PREFIX}/lib)
SET(INSTALL_INCLUDE_DIR ${PREFIX}/include)
# pkg-config
SET(PREFIX "${CMAKE_INSTALL_PREFIX}")
SET(LIBDIR "${CMAKE_INSTALL_PREFIX}/lib")
SET(INCLUDEDIR "${CMAKE_INSTALL_PREFIX}/include")
SET(VERSION "${LIBVER_MAJOR}.${LIBVER_MINOR}.${LIBVER_RELEASE}")
ADD_CUSTOM_TARGET(libzstd.pc ALL
${CMAKE_COMMAND} -DIN="${LIBRARY_DIR}/libzstd.pc.in" -DOUT="libzstd.pc"
-DPREFIX="${PREFIX}" -DLIBDIR="${LIBDIR}" -DINCLUDEDIR="${INCLUDEDIR}" -DVERSION="${VERSION}"
-P "${CMAKE_CURRENT_SOURCE_DIR}/pkgconfig.cmake"
COMMENT "Creating pkg-config file")
# install target
INSTALL(FILES ${LIBRARY_DIR}/zstd.h ${LIBRARY_DIR}/deprecated/zbuff.h ${LIBRARY_DIR}/dictBuilder/zdict.h DESTINATION ${INSTALL_INCLUDE_DIR})
INSTALL(TARGETS libzstd_static DESTINATION ${INSTALL_LIBRARY_DIR})
INSTALL(TARGETS libzstd_shared LIBRARY DESTINATION ${INSTALL_LIBRARY_DIR})
# Create symlinks and setup this files
SET(SHARED_LIBRARY_LINK ${SHARED_LIBRARY_OUTPUT_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX})
SET(SHARED_LIBRARY_SYMLINK1 ${LIBRARY_BASE_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX})
SET(SHARED_LIBRARY_SYMLINK2 ${LIBRARY_BASE_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX}.${LIBVER_MAJOR})
SET(SHARED_LIBRARY_LINK_PATH ${CMAKE_CURRENT_BINARY_DIR}/${SHARED_LIBRARY_LINK})
SET(SHARED_LIBRARY_SYMLINK1_PATH ${CMAKE_CURRENT_BINARY_DIR}/${SHARED_LIBRARY_SYMLINK1})
SET(SHARED_LIBRARY_SYMLINK2_PATH ${CMAKE_CURRENT_BINARY_DIR}/${SHARED_LIBRARY_SYMLINK2})
ADD_CUSTOM_COMMAND(TARGET libzstd_shared POST_BUILD
COMMAND ${CMAKE_COMMAND} -E create_symlink ${SHARED_LIBRARY_LINK} ${SHARED_LIBRARY_SYMLINK1}
DEPENDS ${SHARED_LIBRARY_LINK_PATH}
COMMENT "Generating symbolic link ${SHARED_LIBRARY_LINK} -> ${SHARED_LIBRARY_SYMLINK1}")
ADD_CUSTOM_COMMAND(TARGET libzstd_shared POST_BUILD
COMMAND ${CMAKE_COMMAND} -E create_symlink ${SHARED_LIBRARY_LINK} ${SHARED_LIBRARY_SYMLINK2}
DEPENDS ${SHARED_LIBRARY_LINK_PATH}
COMMENT "Generating symbolic link ${SHARED_LIBRARY_LINK} -> ${SHARED_LIBRARY_SYMLINK2}")
SET_DIRECTORY_PROPERTIES(PROPERTIES ADDITIONAL_MAKE_CLEAN_FILES "${SHARED_LIBRARY_SYMLINK1};${SHARED_LIBRARY_SYMLINK2}")
INSTALL(FILES ${SHARED_LIBRARY_SYMLINK1_PATH} DESTINATION ${INSTALL_LIBRARY_DIR})
INSTALL(FILES ${SHARED_LIBRARY_SYMLINK2_PATH} DESTINATION ${INSTALL_LIBRARY_DIR})
INSTALL(FILES ${LIBRARY_DIR}/zstd.h ${LIBRARY_DIR}/deprecated/zbuff.h ${LIBRARY_DIR}/dictBuilder/zdict.h DESTINATION "include")
INSTALL(FILES "${CMAKE_CURRENT_BINARY_DIR}/libzstd.pc" DESTINATION "share/pkgconfig")
INSTALL(TARGETS libzstd_shared LIBRARY DESTINATION "lib")
IF (ZSTD_BUILD_STATIC)
INSTALL(TARGETS libzstd_static ARCHIVE DESTINATION "lib")
ENDIF (ZSTD_BUILD_STATIC)
# uninstall target
CONFIGURE_FILE(
"${CMAKE_SOURCE_DIR}/cmake_uninstall.cmake.in"
"${CMAKE_BINARY_DIR}/cmake_uninstall.cmake"
"${CMAKE_CURRENT_SOURCE_DIR}/cmake_uninstall.cmake.in"
"${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake"
IMMEDIATE @ONLY)
ADD_CUSTOM_TARGET(uninstall
COMMAND ${CMAKE_COMMAND} -P ${CMAKE_BINARY_DIR}/cmake_uninstall.cmake)
COMMAND ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake)
ENDIF (UNIX)

View File

@ -0,0 +1 @@
CONFIGURE_FILE("${IN}" "${OUT}" @ONLY)

View File

@ -1,3 +1,5 @@
# produced by make
zstd
zstd-frugal
unzstd
zstdcat

View File

@ -14,12 +14,9 @@ PROJECT(programs)
SET(CMAKE_INCLUDE_CURRENT_DIR TRUE)
# Define project root directory
SET(ROOT_DIR ../../..)
# Define programs directory, where sources and header files are located
SET(LIBRARY_DIR ${ROOT_DIR}/lib)
SET(PROGRAMS_DIR ${ROOT_DIR}/programs)
SET(LIBRARY_DIR ${ZSTD_SOURCE_DIR}/lib)
SET(PROGRAMS_DIR ${ZSTD_SOURCE_DIR}/programs)
INCLUDE_DIRECTORIES(${PROGRAMS_DIR} ${LIBRARY_DIR} ${LIBRARY_DIR}/common ${LIBRARY_DIR}/compress ${LIBRARY_DIR}/dictBuilder)
IF (ZSTD_LEGACY_SUPPORT)
@ -28,23 +25,71 @@ IF (ZSTD_LEGACY_SUPPORT)
ENDIF (ZSTD_LEGACY_SUPPORT)
IF (MSVC)
SET(MSVC_RESOURCE_DIR ${ROOT_DIR}/build/VS2010/zstd)
SET(MSVC_RESOURCE_DIR ${ZSTD_SOURCE_DIR}/build/VS2010/zstd)
SET(PlatformDependResources ${MSVC_RESOURCE_DIR}/zstd.rc)
ENDIF (MSVC)
ADD_EXECUTABLE(zstd ${PROGRAMS_DIR}/zstdcli.c ${PROGRAMS_DIR}/fileio.c ${PROGRAMS_DIR}/bench.c ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/dibio.c ${PlatformDependResources})
TARGET_LINK_LIBRARIES(zstd libzstd_static)
TARGET_LINK_LIBRARIES(zstd libzstd_shared)
ADD_CUSTOM_TARGET(zstdcat ALL ${CMAKE_COMMAND} -E create_symlink zstd zstdcat DEPENDS zstd COMMENT "Creating zstdcat symlink")
ADD_CUSTOM_TARGET(unzstd ALL ${CMAKE_COMMAND} -E create_symlink zstd unzstd DEPENDS zstd COMMENT "Creating unzstd symlink")
INSTALL(TARGETS zstd RUNTIME DESTINATION "bin")
INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/zstdcat DESTINATION "bin")
INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/unzstd DESTINATION "bin")
IF (UNIX)
ADD_CUSTOM_TARGET(zstd.1 ALL
${CMAKE_COMMAND} -E copy ${PROGRAMS_DIR}/zstd.1 .
COMMENT "Copying manpage zstd.1")
ADD_CUSTOM_TARGET(zstdcat.1 ALL ${CMAKE_COMMAND} -E create_symlink zstd.1 zstdcat.1 DEPENDS zstd.1 COMMENT "Creating zstdcat.1 symlink")
ADD_CUSTOM_TARGET(unzstd.1 ALL ${CMAKE_COMMAND} -E create_symlink zstd.1 unzstd.1 DEPENDS zstd.1 COMMENT "Creating unzstd.1 symlink")
INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/zstd.1 DESTINATION "share/man/man1")
INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/zstdcat.1 DESTINATION "share/man/man1")
INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/unzstd.1 DESTINATION "share/man/man1")
ADD_EXECUTABLE(zstd-frugal ${PROGRAMS_DIR}/zstdcli.c ${PROGRAMS_DIR}/fileio.c)
TARGET_LINK_LIBRARIES(zstd-frugal libzstd_static)
SET_TARGET_PROPERTIES(zstd-frugal PROPERTIES COMPILE_DEFINITIONS "ZSTD_NOBENCH;ZSTD_NODICT")
TARGET_LINK_LIBRARIES(zstd-frugal libzstd_shared)
SET_PROPERTY(TARGET zstd-frugal APPEND PROPERTY COMPILE_DEFINITIONS "ZSTD_NOBENCH;ZSTD_NODICT")
ENDIF (UNIX)
IF (ZSTD_MULTITHREAD_SUPPORT)
ADD_EXECUTABLE(zstdmt ${PROGRAMS_DIR}/zstdcli.c ${PROGRAMS_DIR}/fileio.c ${PROGRAMS_DIR}/bench.c ${PROGRAMS_DIR}/datagen.c ${PROGRAMS_DIR}/dibio.c ${PlatformDependResources})
SET_TARGET_PROPERTIES(zstdmt PROPERTIES COMPILE_DEFINITIONS "ZSTD_MULTITHREAD")
TARGET_LINK_LIBRARIES(zstdmt libzstd_static)
IF (UNIX)
TARGET_LINK_LIBRARIES(zstdmt pthread)
ENDIF (UNIX)
SET_PROPERTY(TARGET zstd APPEND PROPERTY COMPILE_DEFINITIONS "ZSTD_MULTITHREAD")
SET(THREADS_PREFER_PTHREAD_FLAG ON)
FIND_PACKAGE(Threads REQUIRED)
IF (CMAKE_USE_PTHREADS_INIT)
TARGET_LINK_LIBRARIES(zstd ${CMAKE_THREAD_LIBS_INIT})
ELSE()
MESSAGE(SEND_ERROR "ZSTD currently does not support thread libraries other than pthreads")
ENDIF()
ADD_CUSTOM_TARGET(zstdmt ALL ${CMAKE_COMMAND} -E create_symlink zstd zstdmt DEPENDS zstd COMMENT "Creating zstdmt symlink")
INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/zstdmt DESTINATION "bin")
ENDIF (ZSTD_MULTITHREAD_SUPPORT)
OPTION(ZSTD_ZLIB_SUPPORT "ZLIB SUPPORT" OFF)
OPTION(ZSTD_LZMA_SUPPORT "LZMA SUPPORT" OFF)
IF (ZSTD_ZLIB_SUPPORT)
FIND_PACKAGE(ZLIB REQUIRED)
IF (ZLIB_FOUND)
INCLUDE_DIRECTORIES(${ZLIB_INCLUDE_DIRS})
TARGET_LINK_LIBRARIES(zstd ${ZLIB_LIBRARIES})
SET_PROPERTY(TARGET zstd APPEND PROPERTY COMPILE_DEFINITIONS "ZSTD_GZCOMPRESS;ZSTD_GZDECOMPRESS")
ELSE ()
MESSAGE(SEND_ERROR "zlib library is missing")
ENDIF ()
ENDIF ()
IF (ZSTD_LZMA_SUPPORT)
FIND_PACKAGE(LibLZMA REQUIRED)
IF (LIBLZMA_FOUND)
INCLUDE_DIRECTORIES(${LIBLZMA_INCLUDE_DIRS})
TARGET_LINK_LIBRARIES(zstd ${LIBLZMA_LIBRARIES})
SET_PROPERTY(TARGET zstd APPEND PROPERTY COMPILE_DEFINITIONS "ZSTD_LZMACOMPRESS;ZSTD_LZMADECOMPRESS")
ELSE ()
MESSAGE(SEND_ERROR "lzma library is missing")
ENDIF ()
ENDIF ()

View File

@ -34,13 +34,10 @@ PROJECT(tests)
SET(CMAKE_INCLUDE_CURRENT_DIR TRUE)
# Define project root directory
SET(ROOT_DIR ../../..)
# Define programs directory, where sources and header files are located
SET(LIBRARY_DIR ${ROOT_DIR}/lib)
SET(PROGRAMS_DIR ${ROOT_DIR}/programs)
SET(TESTS_DIR ${ROOT_DIR}/tests)
SET(LIBRARY_DIR ${ZSTD_SOURCE_DIR}/lib)
SET(PROGRAMS_DIR ${ZSTD_SOURCE_DIR}/programs)
SET(TESTS_DIR ${ZSTD_SOURCE_DIR}/tests)
INCLUDE_DIRECTORIES(${TESTS_DIR} ${PROGRAMS_DIR} ${LIBRARY_DIR} ${LIBRARY_DIR}/common ${LIBRARY_DIR}/compress ${LIBRARY_DIR}/dictBuilder)
ADD_EXECUTABLE(fullbench ${PROGRAMS_DIR}/datagen.c ${TESTS_DIR}/fullbench.c)

View File

@ -3,7 +3,7 @@ dependencies:
- sudo dpkg --add-architecture i386
- sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test; sudo apt-get -y -qq update
- sudo apt-get -y install gcc-powerpc-linux-gnu gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross
- sudo apt-get -y install libstdc++-6-dev clang gcc g++ gcc-5 gcc-6
- sudo apt-get -y install libstdc++-6-dev clang gcc g++ gcc-5 gcc-6 zlib1g-dev liblzma-dev
- sudo apt-get -y install linux-libc-dev:i386 libc6-dev-i386
test:

2
contrib/cleanTabs Executable file
View File

@ -0,0 +1,2 @@
#!/bin/sh
sed -i '' $'s/\t/ /g' ../lib/**/*.{h,c} ../programs/*.{h,c} ../tests/*.c ./**/*.{h,cpp} ../examples/*.c ../zlibWrapper/*.{h,c}

View File

@ -19,7 +19,7 @@ void trim(string& s, string characters)
{
size_t p = s.find_first_not_of(characters);
s.erase(0, p);
p = s.find_last_not_of(characters);
if (string::npos != p)
s.erase(p+1);
@ -48,7 +48,7 @@ vector<string> get_lines(vector<string>& input, int& linenum, string terminator)
line = input[linenum];
if (terminator.empty() && line.empty()) { linenum--; break; }
epos = line.find(terminator);
if (!terminator.empty() && epos!=string::npos) {
out.push_back(line);
@ -168,7 +168,11 @@ int main(int argc, char *argv[]) {
sout << "<pre><b>";
for (l=0; l<lines.size(); l++) {
// fprintf(stderr, "line[%d]=%s\n", l, lines[l].c_str());
print_line(sout, lines[l]);
string fline = lines[l];
if (fline.substr(0, 12) == "ZSTDLIB_API " ||
fline.substr(0, 12) == string(12, ' '))
fline = fline.substr(12);
print_line(sout, fline);
}
sout << "</b><p>";
for (l=0; l<comments.size(); l++) {
@ -217,4 +221,4 @@ int main(int argc, char *argv[]) {
ostream << "</html>" << endl << "</body>" << endl;
return 0;
}
}

4
contrib/linux-kernel/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
!lib/zstd
!lib/zstd/*
*.o
*.a

View File

@ -0,0 +1,89 @@
# Linux Kernel Patch
There are three pieces, the `zstd_compress` and `zstd_decompress` kernel modules, the BtrFS patch, and the SquashFS patch.
The patches are based off of the linux kernel master branch (version 4.10).
## Zstd Kernel modules
* The header is in `include/linux/zstd.h`.
* It is split up into `zstd_compress` and `zstd_decompress`, which can be loaded independently.
* Source files are in `lib/zstd/`.
* `lib/Kconfig` and `lib/Makefile` need to be modified by applying `lib/Kconfig.diff` and `lib/Makefile.diff` respectively.
* `test/UserlandTest.cpp` contains tests for the patch in userland by mocking the kernel headers.
It can be run with the following commands:
```
cd test
make googletest
make UserlandTest
./UserlandTest
```
## BtrFS
* The patch is located in `btrfs.diff`.
* Additionally `fs/btrfs/zstd.c` is provided as a source for convenience.
* The patch seems to be working, it doesn't crash the kernel, and compresses at speeds and ratios that are expected.
It could still use some more testing for fringe features, like printing options.
### Benchmarks
Benchmarks run on a Ubuntu 14.04 with 2 cores and 4 GiB of RAM.
The VM is running on a Macbook Pro with a 3.1 GHz Intel Core i7 processor,
16 GB of ram, and a SSD.
The kernel running was built from the master branch with the patch (version 4.10).
The compression benchmark is copying 10 copies of the
unzipped [silesia corpus](http://mattmahoney.net/dc/silesia.html) into a BtrFS
filesystem mounted with `-o compress-force={none, lzo, zlib, zstd}`.
The decompression benchmark is timing how long it takes to `tar` all 10 copies
into `/dev/null`.
The compression ratio is measured by comparing the output of `df` and `du`.
See `btrfs-benchmark.sh` for details.
| Algorithm | Compression ratio | Compression speed | Decompression speed |
|-----------|-------------------|-------------------|---------------------|
| None | 0.99 | 504 MB/s | 686 MB/s |
| lzo | 1.66 | 398 MB/s | 442 MB/s |
| zlib | 2.58 | 65 MB/s | 241 MB/s |
| zstd 1 | 2.57 | 260 MB/s | 383 MB/s |
| zstd 3 | 2.71 | 174 MB/s | 408 MB/s |
| zstd 6 | 2.87 | 70 MB/s | 398 MB/s |
| zstd 9 | 2.92 | 43 MB/s | 406 MB/s |
| zstd 12 | 2.93 | 21 MB/s | 408 MB/s |
| zstd 15 | 3.01 | 11 MB/s | 354 MB/s |
## SquashFS
* The patch is located in `squashfs.diff`
* Additionally `fs/squashfs/zstd_wrapper.c` is provided as a source for convenience.
* The patch has been tested on a 4.10 kernel.
### Benchmarks
Benchmarks run on a Ubuntu 14.04 with 2 cores and 4 GiB of RAM.
The VM is running on a Macbook Pro with a 3.1 GHz Intel Core i7 processor,
16 GB of ram, and a SSD.
The kernel running was built from the master branch with the patch (version 4.10).
The compression benchmark is the file tree from the SquashFS archive found in the
Ubuntu 16.10 desktop image (ubuntu-16.10-desktop-amd64.iso).
The compression benchmark uses mksquashfs with the default block size (128 KB)
and various compression algorithms/compression levels.
`xz` and `zstd` are also benchmarked with 256 KB blocks.
The decompression benchmark is timing how long it takes to `tar` the file tree
into `/dev/null`.
See `squashfs-benchmark.sh` for details.
| Algorithm | Compression ratio | Compression speed | Decompression speed |
|----------------|-------------------|-------------------|---------------------|
| gzip | 2.92 | 15 MB/s | 128 MB/s |
| lzo | 2.64 | 9.5 MB/s | 217 MB/s |
| lz4 | 2.12 | 94 MB/s | 218 MB/s |
| xz | 3.43 | 5.5 MB/s | 35 MB/s |
| xz 256 KB | 3.53 | 5.4 MB/s | 40 MB/s |
| zstd 1 | 2.71 | 96 MB/s | 210 MB/s |
| zstd 5 | 2.93 | 69 MB/s | 198 MB/s |
| zstd 10 | 3.01 | 41 MB/s | 225 MB/s |
| zstd 15 | 3.13 | 11.4 MB/s | 224 MB/s |
| zstd 16 256 KB | 3.24 | 8.1 MB/s | 210 MB/s |

View File

@ -0,0 +1,104 @@
# !/bin/sh
set -e
# Benchmarks run on a Ubuntu 14.04 VM with 2 cores and 4 GiB of RAM.
# The VM is running on a Macbook Pro with a 3.1 GHz Intel Core i7 processor and
# 16 GB of RAM and an SSD.
# silesia is a directory that can be downloaded from
# http://mattmahoney.net/dc/silesia.html
# ls -l silesia/
# total 203M
# -rwxr-xr-x 1 terrelln 9.8M Apr 12 2002 dickens
# -rwxr-xr-x 1 terrelln 49M May 31 2002 mozilla
# -rwxr-xr-x 1 terrelln 9.6M Mar 20 2003 mr
# -rwxr-xr-x 1 terrelln 32M Apr 2 2002 nci
# -rwxr-xr-x 1 terrelln 5.9M Jul 4 2002 ooffice
# -rwxr-xr-x 1 terrelln 9.7M Apr 11 2002 osdb
# -rwxr-xr-x 1 terrelln 6.4M Apr 2 2002 reymont
# -rwxr-xr-x 1 terrelln 21M Mar 25 2002 samba
# -rwxr-xr-x 1 terrelln 7.0M Mar 24 2002 sao
# -rwxr-xr-x 1 terrelln 40M Mar 25 2002 webster
# -rwxr-xr-x 1 terrelln 8.1M Apr 4 2002 x-ray
# -rwxr-xr-x 1 terrelln 5.1M Nov 30 2000 xml
# $HOME is on a ext4 filesystem
BENCHMARK_DIR="$HOME/silesia/"
N=10
# Normalize the environment
sudo umount /mnt/btrfs 2> /dev/null > /dev/null || true
sudo mount -t btrfs $@ /dev/sda3 /mnt/btrfs
sudo rm -rf /mnt/btrfs/*
sync
sudo umount /mnt/btrfs
sudo mount -t btrfs $@ /dev/sda3 /mnt/btrfs
# Run the benchmark
echo "Compression"
time sh -c "for i in \$(seq $N); do sudo cp -r $BENCHMARK_DIR /mnt/btrfs/\$i; done; sync"
echo "Approximate compression ratio"
printf "%d / %d\n" \
$(df /mnt/btrfs --output=used -B 1 | tail -n 1) \
$(sudo du /mnt/btrfs -b -d 0 | tr '\t' '\n' | head -n 1);
# Unmount and remount to avoid any caching
sudo umount /mnt/btrfs
sudo mount -t btrfs $@ /dev/sda3 /mnt/btrfs
echo "Decompression"
time sudo tar -c /mnt/btrfs 2> /dev/null | wc -c > /dev/null
sudo rm -rf /mnt/btrfs/*
sudo umount /mnt/btrfs
# Run for each of -o compress-force={none, lzo, zlib, zstd} 5 times and take the
# min time and ratio.
# Ran zstd with compression levels {1, 3, 6, 9, 12, 15}.
# Original size: 2119415342 B (using du /mnt/btrfs)
# none
# compress: 4.205 s
# decompress: 3.090 s
# ratio: 0.99
# lzo
# compress: 5.328 s
# decompress: 4.793 s
# ratio: 1.66
# zlib
# compress: 32.588 s
# decompress: 8.791 s
# ratio : 2.58
# zstd 1
# compress: 8.147 s
# decompress: 5.527 s
# ratio : 2.57
# zstd 3
# compress: 12.207 s
# decompress: 5.195 s
# ratio : 2.71
# zstd 6
# compress: 30.253 s
# decompress: 5.324 s
# ratio : 2.87
# zstd 9
# compress: 49.659 s
# decompress: 5.220 s
# ratio : 2.92
# zstd 12
# compress: 99.245 s
# decompress: 5.193 s
# ratio : 2.93
# zstd 15
# compress: 196.997 s
# decompress: 5.992 s
# ratio : 3.01

View File

@ -0,0 +1,633 @@
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index 80e9c18..a26c63b 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -6,6 +6,8 @@ config BTRFS_FS
select ZLIB_DEFLATE
select LZO_COMPRESS
select LZO_DECOMPRESS
+ select ZSTD_COMPRESS
+ select ZSTD_DECOMPRESS
select RAID6_PQ
select XOR_BLOCKS
select SRCU
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 128ce17..962a95a 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -6,7 +6,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
transaction.o inode.o file.o tree-defrag.o \
extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
- export.o tree-log.o free-space-cache.o zlib.o lzo.o \
+ export.o tree-log.o free-space-cache.o zlib.o lzo.o zstd.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
uuid-tree.o props.o hash.o free-space-tree.o
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index c7721a6..66d4ced 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -761,6 +761,7 @@ static struct {
static const struct btrfs_compress_op * const btrfs_compress_op[] = {
&btrfs_zlib_compress,
&btrfs_lzo_compress,
+ &btrfs_zstd_compress,
};
void __init btrfs_init_compress(void)
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 39ec43a..d99fc21 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -60,8 +60,9 @@ enum btrfs_compression_type {
BTRFS_COMPRESS_NONE = 0,
BTRFS_COMPRESS_ZLIB = 1,
BTRFS_COMPRESS_LZO = 2,
- BTRFS_COMPRESS_TYPES = 2,
- BTRFS_COMPRESS_LAST = 3,
+ BTRFS_COMPRESS_ZSTD = 3,
+ BTRFS_COMPRESS_TYPES = 3,
+ BTRFS_COMPRESS_LAST = 4,
};
struct btrfs_compress_op {
@@ -92,5 +93,6 @@ struct btrfs_compress_op {
extern const struct btrfs_compress_op btrfs_zlib_compress;
extern const struct btrfs_compress_op btrfs_lzo_compress;
+extern const struct btrfs_compress_op btrfs_zstd_compress;
#endif
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 29b7fc2..878b23b9 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -270,6 +270,7 @@ struct btrfs_super_block {
BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \
+ BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD | \
BTRFS_FEATURE_INCOMPAT_RAID56 | \
BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \
BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 08b74da..0c43e4e 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2853,6 +2853,8 @@ int open_ctree(struct super_block *sb,
features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
+ else if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
+ features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
btrfs_info(fs_info, "has skinny extents");
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index dabfc7a..d8ea727 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -327,8 +327,10 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
comp = "lzo";
- else
+ else if (fs_info->compress_type == BTRFS_COMPRESS_ZLIB)
comp = "zlib";
+ else
+ comp = "zstd";
ret = btrfs_set_prop(inode, "btrfs.compression",
comp, strlen(comp), 0);
if (ret)
@@ -1463,6 +1465,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
if (range->compress_type == BTRFS_COMPRESS_LZO) {
btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
+ } else if (range->compress_type == BTRFS_COMPRESS_ZSTD) {
+ btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
}
ret = defrag_count;
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
index d6cb155..162105f 100644
--- a/fs/btrfs/props.c
+++ b/fs/btrfs/props.c
@@ -383,6 +383,8 @@ static int prop_compression_validate(const char *value, size_t len)
return 0;
else if (!strncmp("zlib", value, len))
return 0;
+ else if (!strncmp("zstd", value, len))
+ return 0;
return -EINVAL;
}
@@ -405,6 +407,8 @@ static int prop_compression_apply(struct inode *inode,
type = BTRFS_COMPRESS_LZO;
else if (!strncmp("zlib", value, len))
type = BTRFS_COMPRESS_ZLIB;
+ else if (!strncmp("zstd", value, len))
+ type = BTRFS_COMPRESS_ZSTD;
else
return -EINVAL;
@@ -422,6 +426,8 @@ static const char *prop_compression_extract(struct inode *inode)
return "zlib";
case BTRFS_COMPRESS_LZO:
return "lzo";
+ case BTRFS_COMPRESS_ZSTD:
+ return "zstd";
}
return NULL;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index da687dc..b064456 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -513,6 +513,14 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
btrfs_clear_opt(info->mount_opt, NODATASUM);
btrfs_set_fs_incompat(info, COMPRESS_LZO);
no_compress = 0;
+ } else if (strcmp(args[0].from, "zstd") == 0) {
+ compress_type = "zstd";
+ info->compress_type = BTRFS_COMPRESS_ZSTD;
+ btrfs_set_opt(info->mount_opt, COMPRESS);
+ btrfs_clear_opt(info->mount_opt, NODATACOW);
+ btrfs_clear_opt(info->mount_opt, NODATASUM);
+ btrfs_set_fs_incompat(info, COMPRESS_ZSTD);
+ no_compress = 0;
} else if (strncmp(args[0].from, "no", 2) == 0) {
compress_type = "no";
btrfs_clear_opt(info->mount_opt, COMPRESS);
@@ -1230,8 +1238,10 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
if (btrfs_test_opt(info, COMPRESS)) {
if (info->compress_type == BTRFS_COMPRESS_ZLIB)
compress_type = "zlib";
- else
+ else if (info->compress_type == BTRFS_COMPRESS_LZO)
compress_type = "lzo";
+ else
+ compress_type = "zstd";
if (btrfs_test_opt(info, FORCE_COMPRESS))
seq_printf(seq, ",compress-force=%s", compress_type);
else
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 1f157fb..b0dec90 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -200,6 +200,7 @@ BTRFS_FEAT_ATTR_INCOMPAT(mixed_backref, MIXED_BACKREF);
BTRFS_FEAT_ATTR_INCOMPAT(default_subvol, DEFAULT_SUBVOL);
BTRFS_FEAT_ATTR_INCOMPAT(mixed_groups, MIXED_GROUPS);
BTRFS_FEAT_ATTR_INCOMPAT(compress_lzo, COMPRESS_LZO);
+BTRFS_FEAT_ATTR_INCOMPAT(compress_zstd, COMPRESS_ZSTD);
BTRFS_FEAT_ATTR_INCOMPAT(big_metadata, BIG_METADATA);
BTRFS_FEAT_ATTR_INCOMPAT(extended_iref, EXTENDED_IREF);
BTRFS_FEAT_ATTR_INCOMPAT(raid56, RAID56);
@@ -212,6 +213,7 @@ static struct attribute *btrfs_supported_feature_attrs[] = {
BTRFS_FEAT_ATTR_PTR(default_subvol),
BTRFS_FEAT_ATTR_PTR(mixed_groups),
BTRFS_FEAT_ATTR_PTR(compress_lzo),
+ BTRFS_FEAT_ATTR_PTR(compress_zstd),
BTRFS_FEAT_ATTR_PTR(big_metadata),
BTRFS_FEAT_ATTR_PTR(extended_iref),
BTRFS_FEAT_ATTR_PTR(raid56),
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
new file mode 100644
index 0000000..010548c
--- /dev/null
+++ b/fs/btrfs/zstd.c
@@ -0,0 +1,415 @@
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <linux/bio.h>
+#include <linux/zstd.h>
+#include "compression.h"
+
+#define ZSTD_BTRFS_MAX_WINDOWLOG 17
+#define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG)
+
+static ZSTD_parameters zstd_get_btrfs_parameters(size_t src_len)
+{
+ ZSTD_parameters params = ZSTD_getParams(3, src_len, 0);
+
+ if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG)
+ params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG;
+ WARN_ON(src_len > ZSTD_BTRFS_MAX_INPUT);
+ return params;
+}
+
+struct workspace {
+ void *mem;
+ size_t size;
+ char *buf;
+ struct list_head list;
+};
+
+static void zstd_free_workspace(struct list_head *ws)
+{
+ struct workspace *workspace = list_entry(ws, struct workspace, list);
+
+ vfree(workspace->mem);
+ kfree(workspace->buf);
+ kfree(workspace);
+}
+
+static struct list_head *zstd_alloc_workspace(void)
+{
+ ZSTD_parameters params =
+ zstd_get_btrfs_parameters(ZSTD_BTRFS_MAX_INPUT);
+ struct workspace *workspace;
+
+ workspace = kzalloc(sizeof(*workspace), GFP_NOFS);
+ if (!workspace)
+ return ERR_PTR(-ENOMEM);
+
+ workspace->size = max_t(size_t,
+ ZSTD_CStreamWorkspaceBound(params.cParams),
+ ZSTD_DStreamWorkspaceBound(ZSTD_BTRFS_MAX_INPUT));
+ workspace->mem = vmalloc(workspace->size);
+ workspace->buf = kmalloc(PAGE_SIZE, GFP_NOFS);
+ if (!workspace->mem || !workspace->buf)
+ goto fail;
+
+ INIT_LIST_HEAD(&workspace->list);
+
+ return &workspace->list;
+fail:
+ zstd_free_workspace(&workspace->list);
+ return ERR_PTR(-ENOMEM);
+}
+
+static int zstd_compress_pages(struct list_head *ws,
+ struct address_space *mapping,
+ u64 start,
+ struct page **pages,
+ unsigned long *out_pages,
+ unsigned long *total_in,
+ unsigned long *total_out)
+{
+ struct workspace *workspace = list_entry(ws, struct workspace, list);
+ ZSTD_CStream *stream;
+ int ret = 0;
+ int nr_pages = 0;
+ struct page *in_page = NULL; /* The current page to read */
+ struct page *out_page = NULL; /* The current page to write to */
+ ZSTD_inBuffer in_buf = { NULL, 0, 0 };
+ ZSTD_outBuffer out_buf = { NULL, 0, 0 };
+ unsigned long tot_in = 0;
+ unsigned long tot_out = 0;
+ unsigned long len = *total_out;
+ const unsigned long nr_dest_pages = *out_pages;
+ unsigned long max_out = nr_dest_pages * PAGE_SIZE;
+ ZSTD_parameters params = zstd_get_btrfs_parameters(len);
+
+ *out_pages = 0;
+ *total_out = 0;
+ *total_in = 0;
+
+ /* Initialize the stream */
+ stream = ZSTD_initCStream(params, len, workspace->mem,
+ workspace->size);
+ if (!stream) {
+ pr_warn("BTRFS: ZSTD_initCStream failed\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ /* map in the first page of input data */
+ in_page = find_get_page(mapping, start >> PAGE_SHIFT);
+ in_buf.src = kmap(in_page);
+ in_buf.pos = 0;
+ in_buf.size = min_t(size_t, len, PAGE_SIZE);
+
+
+ /* Allocate and map in the output buffer */
+ out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+ if (out_page == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pages[nr_pages++] = out_page;
+ out_buf.dst = kmap(out_page);
+ out_buf.pos = 0;
+ out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
+
+ while (1) {
+ size_t ret2;
+
+ ret2 = ZSTD_compressStream(stream, &out_buf, &in_buf);
+ if (ZSTD_isError(ret2)) {
+ pr_debug("BTRFS: ZSTD_compressStream returned %d\n",
+ ZSTD_getErrorCode(ret2));
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Check to see if we are making it bigger */
+ if (tot_in + in_buf.pos > 8192 &&
+ tot_in + in_buf.pos <
+ tot_out + out_buf.pos) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ /* We've reached the end of our output range */
+ if (out_buf.pos >= max_out) {
+ tot_out += out_buf.pos;
+ ret = -E2BIG;
+ goto out;
+ }
+
+ /* Check if we need more output space */
+ if (out_buf.pos == out_buf.size) {
+ tot_out += PAGE_SIZE;
+ max_out -= PAGE_SIZE;
+ kunmap(out_page);
+ if (nr_pages == nr_dest_pages) {
+ out_page = NULL;
+ ret = -E2BIG;
+ goto out;
+ }
+ out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+ if (out_page == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pages[nr_pages++] = out_page;
+ out_buf.dst = kmap(out_page);
+ out_buf.pos = 0;
+ out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
+ }
+
+ /* We've reached the end of the input */
+ if (in_buf.pos >= len) {
+ tot_in += in_buf.pos;
+ break;
+ }
+
+ /* Check if we need more input */
+ if (in_buf.pos == in_buf.size) {
+ tot_in += PAGE_SIZE;
+ kunmap(in_page);
+ put_page(in_page);
+
+ start += PAGE_SIZE;
+ len -= PAGE_SIZE;
+ in_page = find_get_page(mapping, start >> PAGE_SHIFT);
+ in_buf.src = kmap(in_page);
+ in_buf.pos = 0;
+ in_buf.size = min_t(size_t, len, PAGE_SIZE);
+ }
+ }
+ while (1) {
+ size_t ret2;
+
+ ret2 = ZSTD_endStream(stream, &out_buf);
+ if (ZSTD_isError(ret2)) {
+ pr_debug("BTRFS: ZSTD_endStream returned %d\n",
+ ZSTD_getErrorCode(ret2));
+ ret = -EIO;
+ goto out;
+ }
+ if (ret2 == 0) {
+ tot_out += out_buf.pos;
+ break;
+ }
+ if (out_buf.pos >= max_out) {
+ tot_out += out_buf.pos;
+ ret = -E2BIG;
+ goto out;
+ }
+
+ tot_out += PAGE_SIZE;
+ max_out -= PAGE_SIZE;
+ kunmap(out_page);
+ if (nr_pages == nr_dest_pages) {
+ out_page = NULL;
+ ret = -E2BIG;
+ goto out;
+ }
+ out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+ if (out_page == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pages[nr_pages++] = out_page;
+ out_buf.dst = kmap(out_page);
+ out_buf.pos = 0;
+ out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
+ }
+
+ if (tot_out >= tot_in) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ ret = 0;
+ *total_in = tot_in;
+ *total_out = tot_out;
+out:
+ *out_pages = nr_pages;
+ /* Cleanup */
+ if (in_page) {
+ kunmap(in_page);
+ put_page(in_page);
+ }
+ if (out_page)
+ kunmap(out_page);
+ return ret;
+}
+
+static int zstd_decompress_bio(struct list_head *ws, struct page **pages_in,
+ u64 disk_start,
+ struct bio *orig_bio,
+ size_t srclen)
+{
+ struct workspace *workspace = list_entry(ws, struct workspace, list);
+ ZSTD_DStream *stream;
+ int ret = 0;
+ unsigned long page_in_index = 0;
+ unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
+ unsigned long buf_start;
+ unsigned long total_out = 0;
+ ZSTD_inBuffer in_buf = { NULL, 0, 0 };
+ ZSTD_outBuffer out_buf = { NULL, 0, 0 };
+
+ stream = ZSTD_initDStream(
+ ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
+ if (!stream) {
+ pr_debug("BTRFS: ZSTD_initDStream failed\n");
+ ret = -EIO;
+ goto done;
+ }
+
+ in_buf.src = kmap(pages_in[page_in_index]);
+ in_buf.pos = 0;
+ in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
+
+ out_buf.dst = workspace->buf;
+ out_buf.pos = 0;
+ out_buf.size = PAGE_SIZE;
+
+ while (1) {
+ size_t ret2;
+
+ ret2 = ZSTD_decompressStream(stream, &out_buf, &in_buf);
+ if (ZSTD_isError(ret2)) {
+ pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
+ ZSTD_getErrorCode(ret2));
+ ret = -EIO;
+ goto done;
+ }
+ buf_start = total_out;
+ total_out += out_buf.pos;
+ out_buf.pos = 0;
+
+ ret = btrfs_decompress_buf2page(out_buf.dst, buf_start,
+ total_out, disk_start, orig_bio);
+ if (ret == 0)
+ break;
+
+ if (in_buf.pos >= srclen)
+ break;
+
+ /* Check if we've hit the end of a frame */
+ if (ret2 == 0)
+ break;
+
+ if (in_buf.pos == in_buf.size) {
+ kunmap(pages_in[page_in_index++]);
+ if (page_in_index >= total_pages_in) {
+ in_buf.src = NULL;
+ ret = -EIO;
+ goto done;
+ }
+ srclen -= PAGE_SIZE;
+ in_buf.src = kmap(pages_in[page_in_index]);
+ in_buf.pos = 0;
+ in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
+ }
+ }
+ ret = 0;
+ zero_fill_bio(orig_bio);
+done:
+ if (in_buf.src)
+ kunmap(pages_in[page_in_index]);
+ return ret;
+}
+
+static int zstd_decompress(struct list_head *ws, unsigned char *data_in,
+ struct page *dest_page,
+ unsigned long start_byte,
+ size_t srclen, size_t destlen)
+{
+ struct workspace *workspace = list_entry(ws, struct workspace, list);
+ ZSTD_DStream *stream;
+ int ret = 0;
+ size_t ret2;
+ ZSTD_inBuffer in_buf = { NULL, 0, 0 };
+ ZSTD_outBuffer out_buf = { NULL, 0, 0 };
+ unsigned long total_out = 0;
+ unsigned long pg_offset = 0;
+ char *kaddr;
+
+ stream = ZSTD_initDStream(
+ ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
+ if (!stream) {
+ pr_warn("BTRFS: ZSTD_initDStream failed\n");
+ ret = -EIO;
+ goto finish;
+ }
+
+ destlen = min_t(size_t, destlen, PAGE_SIZE);
+
+ in_buf.src = data_in;
+ in_buf.pos = 0;
+ in_buf.size = srclen;
+
+ out_buf.dst = workspace->buf;
+ out_buf.pos = 0;
+ out_buf.size = PAGE_SIZE;
+
+ ret2 = 1;
+ while (pg_offset < destlen && in_buf.pos < in_buf.size) {
+ unsigned long buf_start;
+ unsigned long buf_offset;
+ unsigned long bytes;
+
+ /* Check if the frame is over and we still need more input */
+ if (ret2 == 0) {
+ pr_debug("BTRFS: ZSTD_decompressStream ended early\n");
+ ret = -EIO;
+ goto finish;
+ }
+ ret2 = ZSTD_decompressStream(stream, &out_buf, &in_buf);
+ if (ZSTD_isError(ret2)) {
+ pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
+ ZSTD_getErrorCode(ret2));
+ ret = -EIO;
+ goto finish;
+ }
+
+ buf_start = total_out;
+ total_out += out_buf.pos;
+ out_buf.pos = 0;
+
+ if (total_out <= start_byte)
+ continue;
+
+ if (total_out > start_byte && buf_start < start_byte)
+ buf_offset = start_byte - buf_start;
+ else
+ buf_offset = 0;
+
+ bytes = min_t(unsigned long, destlen - pg_offset,
+ out_buf.size - buf_offset);
+
+ kaddr = kmap_atomic(dest_page);
+ memcpy(kaddr + pg_offset, out_buf.dst + buf_offset, bytes);
+ kunmap_atomic(kaddr);
+
+ pg_offset += bytes;
+ }
+ ret = 0;
+finish:
+ if (pg_offset < destlen) {
+ kaddr = kmap_atomic(dest_page);
+ memset(kaddr + pg_offset, 0, destlen - pg_offset);
+ kunmap_atomic(kaddr);
+ }
+ return ret;
+}
+
+const struct btrfs_compress_op btrfs_zstd_compress = {
+ .alloc_workspace = zstd_alloc_workspace,
+ .free_workspace = zstd_free_workspace,
+ .compress_pages = zstd_compress_pages,
+ .decompress_bio = zstd_decompress_bio,
+ .decompress = zstd_decompress,
+};
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index db4c253..f26c34f 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -255,13 +255,7 @@ struct btrfs_ioctl_fs_info_args {
#define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1)
#define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2)
#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO (1ULL << 3)
-/*
- * some patches floated around with a second compression method
- * lets save that incompat here for when they do get in
- * Note we don't actually support it, we're just reserving the
- * number
- */
-#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZOv2 (1ULL << 4)
+#define BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD (1ULL << 4)
/*
* older kernels tried to do bigger metadata blocks, but the

View File

@ -0,0 +1,415 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/pagemap.h>
#include <linux/bio.h>
#include <linux/zstd.h>
#include "compression.h"
#define ZSTD_BTRFS_MAX_WINDOWLOG 17
#define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG)
static ZSTD_parameters zstd_get_btrfs_parameters(size_t src_len)
{
ZSTD_parameters params = ZSTD_getParams(3, src_len, 0);
if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG)
params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG;
WARN_ON(src_len > ZSTD_BTRFS_MAX_INPUT);
return params;
}
struct workspace {
void *mem;
size_t size;
char *buf;
struct list_head list;
};
static void zstd_free_workspace(struct list_head *ws)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
vfree(workspace->mem);
kfree(workspace->buf);
kfree(workspace);
}
static struct list_head *zstd_alloc_workspace(void)
{
ZSTD_parameters params =
zstd_get_btrfs_parameters(ZSTD_BTRFS_MAX_INPUT);
struct workspace *workspace;
workspace = kzalloc(sizeof(*workspace), GFP_NOFS);
if (!workspace)
return ERR_PTR(-ENOMEM);
workspace->size = max_t(size_t,
ZSTD_CStreamWorkspaceBound(params.cParams),
ZSTD_DStreamWorkspaceBound(ZSTD_BTRFS_MAX_INPUT));
workspace->mem = vmalloc(workspace->size);
workspace->buf = kmalloc(PAGE_SIZE, GFP_NOFS);
if (!workspace->mem || !workspace->buf)
goto fail;
INIT_LIST_HEAD(&workspace->list);
return &workspace->list;
fail:
zstd_free_workspace(&workspace->list);
return ERR_PTR(-ENOMEM);
}
static int zstd_compress_pages(struct list_head *ws,
struct address_space *mapping,
u64 start,
struct page **pages,
unsigned long *out_pages,
unsigned long *total_in,
unsigned long *total_out)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
ZSTD_CStream *stream;
int ret = 0;
int nr_pages = 0;
struct page *in_page = NULL; /* The current page to read */
struct page *out_page = NULL; /* The current page to write to */
ZSTD_inBuffer in_buf = { NULL, 0, 0 };
ZSTD_outBuffer out_buf = { NULL, 0, 0 };
unsigned long tot_in = 0;
unsigned long tot_out = 0;
unsigned long len = *total_out;
const unsigned long nr_dest_pages = *out_pages;
unsigned long max_out = nr_dest_pages * PAGE_SIZE;
ZSTD_parameters params = zstd_get_btrfs_parameters(len);
*out_pages = 0;
*total_out = 0;
*total_in = 0;
/* Initialize the stream */
stream = ZSTD_initCStream(params, len, workspace->mem,
workspace->size);
if (!stream) {
pr_warn("BTRFS: ZSTD_initCStream failed\n");
ret = -EIO;
goto out;
}
/* map in the first page of input data */
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
in_buf.src = kmap(in_page);
in_buf.pos = 0;
in_buf.size = min_t(size_t, len, PAGE_SIZE);
/* Allocate and map in the output buffer */
out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
if (out_page == NULL) {
ret = -ENOMEM;
goto out;
}
pages[nr_pages++] = out_page;
out_buf.dst = kmap(out_page);
out_buf.pos = 0;
out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
while (1) {
size_t ret2;
ret2 = ZSTD_compressStream(stream, &out_buf, &in_buf);
if (ZSTD_isError(ret2)) {
pr_debug("BTRFS: ZSTD_compressStream returned %d\n",
ZSTD_getErrorCode(ret2));
ret = -EIO;
goto out;
}
/* Check to see if we are making it bigger */
if (tot_in + in_buf.pos > 8192 &&
tot_in + in_buf.pos <
tot_out + out_buf.pos) {
ret = -E2BIG;
goto out;
}
/* We've reached the end of our output range */
if (out_buf.pos >= max_out) {
tot_out += out_buf.pos;
ret = -E2BIG;
goto out;
}
/* Check if we need more output space */
if (out_buf.pos == out_buf.size) {
tot_out += PAGE_SIZE;
max_out -= PAGE_SIZE;
kunmap(out_page);
if (nr_pages == nr_dest_pages) {
out_page = NULL;
ret = -E2BIG;
goto out;
}
out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
if (out_page == NULL) {
ret = -ENOMEM;
goto out;
}
pages[nr_pages++] = out_page;
out_buf.dst = kmap(out_page);
out_buf.pos = 0;
out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
}
/* We've reached the end of the input */
if (in_buf.pos >= len) {
tot_in += in_buf.pos;
break;
}
/* Check if we need more input */
if (in_buf.pos == in_buf.size) {
tot_in += PAGE_SIZE;
kunmap(in_page);
put_page(in_page);
start += PAGE_SIZE;
len -= PAGE_SIZE;
in_page = find_get_page(mapping, start >> PAGE_SHIFT);
in_buf.src = kmap(in_page);
in_buf.pos = 0;
in_buf.size = min_t(size_t, len, PAGE_SIZE);
}
}
while (1) {
size_t ret2;
ret2 = ZSTD_endStream(stream, &out_buf);
if (ZSTD_isError(ret2)) {
pr_debug("BTRFS: ZSTD_endStream returned %d\n",
ZSTD_getErrorCode(ret2));
ret = -EIO;
goto out;
}
if (ret2 == 0) {
tot_out += out_buf.pos;
break;
}
if (out_buf.pos >= max_out) {
tot_out += out_buf.pos;
ret = -E2BIG;
goto out;
}
tot_out += PAGE_SIZE;
max_out -= PAGE_SIZE;
kunmap(out_page);
if (nr_pages == nr_dest_pages) {
out_page = NULL;
ret = -E2BIG;
goto out;
}
out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
if (out_page == NULL) {
ret = -ENOMEM;
goto out;
}
pages[nr_pages++] = out_page;
out_buf.dst = kmap(out_page);
out_buf.pos = 0;
out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
}
if (tot_out >= tot_in) {
ret = -E2BIG;
goto out;
}
ret = 0;
*total_in = tot_in;
*total_out = tot_out;
out:
*out_pages = nr_pages;
/* Cleanup */
if (in_page) {
kunmap(in_page);
put_page(in_page);
}
if (out_page)
kunmap(out_page);
return ret;
}
static int zstd_decompress_bio(struct list_head *ws, struct page **pages_in,
u64 disk_start,
struct bio *orig_bio,
size_t srclen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
ZSTD_DStream *stream;
int ret = 0;
unsigned long page_in_index = 0;
unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
unsigned long buf_start;
unsigned long total_out = 0;
ZSTD_inBuffer in_buf = { NULL, 0, 0 };
ZSTD_outBuffer out_buf = { NULL, 0, 0 };
stream = ZSTD_initDStream(
ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
if (!stream) {
pr_debug("BTRFS: ZSTD_initDStream failed\n");
ret = -EIO;
goto done;
}
in_buf.src = kmap(pages_in[page_in_index]);
in_buf.pos = 0;
in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
out_buf.dst = workspace->buf;
out_buf.pos = 0;
out_buf.size = PAGE_SIZE;
while (1) {
size_t ret2;
ret2 = ZSTD_decompressStream(stream, &out_buf, &in_buf);
if (ZSTD_isError(ret2)) {
pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
ZSTD_getErrorCode(ret2));
ret = -EIO;
goto done;
}
buf_start = total_out;
total_out += out_buf.pos;
out_buf.pos = 0;
ret = btrfs_decompress_buf2page(out_buf.dst, buf_start,
total_out, disk_start, orig_bio);
if (ret == 0)
break;
if (in_buf.pos >= srclen)
break;
/* Check if we've hit the end of a frame */
if (ret2 == 0)
break;
if (in_buf.pos == in_buf.size) {
kunmap(pages_in[page_in_index++]);
if (page_in_index >= total_pages_in) {
in_buf.src = NULL;
ret = -EIO;
goto done;
}
srclen -= PAGE_SIZE;
in_buf.src = kmap(pages_in[page_in_index]);
in_buf.pos = 0;
in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
}
}
ret = 0;
zero_fill_bio(orig_bio);
done:
if (in_buf.src)
kunmap(pages_in[page_in_index]);
return ret;
}
static int zstd_decompress(struct list_head *ws, unsigned char *data_in,
struct page *dest_page,
unsigned long start_byte,
size_t srclen, size_t destlen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
ZSTD_DStream *stream;
int ret = 0;
size_t ret2;
ZSTD_inBuffer in_buf = { NULL, 0, 0 };
ZSTD_outBuffer out_buf = { NULL, 0, 0 };
unsigned long total_out = 0;
unsigned long pg_offset = 0;
char *kaddr;
stream = ZSTD_initDStream(
ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
if (!stream) {
pr_warn("BTRFS: ZSTD_initDStream failed\n");
ret = -EIO;
goto finish;
}
destlen = min_t(size_t, destlen, PAGE_SIZE);
in_buf.src = data_in;
in_buf.pos = 0;
in_buf.size = srclen;
out_buf.dst = workspace->buf;
out_buf.pos = 0;
out_buf.size = PAGE_SIZE;
ret2 = 1;
while (pg_offset < destlen && in_buf.pos < in_buf.size) {
unsigned long buf_start;
unsigned long buf_offset;
unsigned long bytes;
/* Check if the frame is over and we still need more input */
if (ret2 == 0) {
pr_debug("BTRFS: ZSTD_decompressStream ended early\n");
ret = -EIO;
goto finish;
}
ret2 = ZSTD_decompressStream(stream, &out_buf, &in_buf);
if (ZSTD_isError(ret2)) {
pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
ZSTD_getErrorCode(ret2));
ret = -EIO;
goto finish;
}
buf_start = total_out;
total_out += out_buf.pos;
out_buf.pos = 0;
if (total_out <= start_byte)
continue;
if (total_out > start_byte && buf_start < start_byte)
buf_offset = start_byte - buf_start;
else
buf_offset = 0;
bytes = min_t(unsigned long, destlen - pg_offset,
out_buf.size - buf_offset);
kaddr = kmap_atomic(dest_page);
memcpy(kaddr + pg_offset, out_buf.dst + buf_offset, bytes);
kunmap_atomic(kaddr);
pg_offset += bytes;
}
ret = 0;
finish:
if (pg_offset < destlen) {
kaddr = kmap_atomic(dest_page);
memset(kaddr + pg_offset, 0, destlen - pg_offset);
kunmap_atomic(kaddr);
}
return ret;
}
const struct btrfs_compress_op btrfs_zstd_compress = {
.alloc_workspace = zstd_alloc_workspace,
.free_workspace = zstd_free_workspace,
.compress_pages = zstd_compress_pages,
.decompress_bio = zstd_decompress_bio,
.decompress = zstd_decompress,
};

View File

@ -0,0 +1,149 @@
/*
* Squashfs - a compressed read only filesystem for Linux
*
* Copyright (c) 2017 Facebook
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2,
* or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* zstd_wrapper.c
*/
#include <linux/mutex.h>
#include <linux/buffer_head.h>
#include <linux/slab.h>
#include <linux/zstd.h>
#include <linux/vmalloc.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
#include "squashfs.h"
#include "decompressor.h"
#include "page_actor.h"
struct workspace {
void *mem;
size_t mem_size;
};
static void *zstd_init(struct squashfs_sb_info *msblk, void *buff)
{
struct workspace *wksp = kmalloc(sizeof(*wksp), GFP_KERNEL);
if (wksp == NULL)
goto failed;
wksp->mem_size = ZSTD_DStreamWorkspaceBound(max_t(size_t,
msblk->block_size, SQUASHFS_METADATA_SIZE));
wksp->mem = vmalloc(wksp->mem_size);
if (wksp->mem == NULL)
goto failed;
return wksp;
failed:
ERROR("Failed to allocate zstd workspace\n");
kfree(wksp);
return ERR_PTR(-ENOMEM);
}
static void zstd_free(void *strm)
{
struct workspace *wksp = strm;
if (wksp)
vfree(wksp->mem);
kfree(wksp);
}
static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
struct buffer_head **bh, int b, int offset, int length,
struct squashfs_page_actor *output)
{
struct workspace *wksp = strm;
ZSTD_DStream *stream;
size_t total_out = 0;
size_t zstd_err;
int k = 0;
ZSTD_inBuffer in_buf = { NULL, 0, 0 };
ZSTD_outBuffer out_buf = { NULL, 0, 0 };
stream = ZSTD_initDStream(wksp->mem_size, wksp->mem, wksp->mem_size);
if (!stream) {
ERROR("Failed to initialize zstd decompressor\n");
goto out;
}
out_buf.size = PAGE_SIZE;
out_buf.dst = squashfs_first_page(output);
do {
if (in_buf.pos == in_buf.size && k < b) {
int avail = min(length, msblk->devblksize - offset);
length -= avail;
in_buf.src = bh[k]->b_data + offset;
in_buf.size = avail;
in_buf.pos = 0;
offset = 0;
}
if (out_buf.pos == out_buf.size) {
out_buf.dst = squashfs_next_page(output);
if (out_buf.dst == NULL) {
/* shouldn't run out of pages before stream is
* done */
squashfs_finish_page(output);
goto out;
}
out_buf.pos = 0;
out_buf.size = PAGE_SIZE;
}
total_out -= out_buf.pos;
zstd_err = ZSTD_decompressStream(stream, &out_buf, &in_buf);
total_out += out_buf.pos; /* add the additional data produced */
if (in_buf.pos == in_buf.size && k < b)
put_bh(bh[k++]);
} while (zstd_err != 0 && !ZSTD_isError(zstd_err));
squashfs_finish_page(output);
if (ZSTD_isError(zstd_err)) {
ERROR("zstd decompression error: %d\n",
(int)ZSTD_getErrorCode(zstd_err));
goto out;
}
if (k < b)
goto out;
return (int)total_out;
out:
for (; k < b; k++)
put_bh(bh[k]);
return -EIO;
}
const struct squashfs_decompressor squashfs_zstd_comp_ops = {
.init = zstd_init,
.free = zstd_free,
.decompress = zstd_uncompress,
.id = ZSTD_COMPRESSION,
.name = "zstd",
.supported = 1
};

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,17 @@
diff --git a/lib/Kconfig b/lib/Kconfig
index 260a80e..39d9347 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -239,6 +239,12 @@ config LZ4HC_COMPRESS
config LZ4_DECOMPRESS
tristate
+config ZSTD_COMPRESS
+ tristate
+
+config ZSTD_DECOMPRESS
+ tristate
+
source "lib/xz/Kconfig"
#

View File

@ -0,0 +1,13 @@
diff --git a/lib/Makefile b/lib/Makefile
index 50144a3..b30a998 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -106,6 +106,8 @@ obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
obj-$(CONFIG_LZ4_COMPRESS) += lz4/
obj-$(CONFIG_LZ4HC_COMPRESS) += lz4/
obj-$(CONFIG_LZ4_DECOMPRESS) += lz4/
+obj-$(CONFIG_ZSTD_COMPRESS) += zstd/
+obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd/
obj-$(CONFIG_XZ_DEC) += xz/
obj-$(CONFIG_RAID6_PQ) += raid6/

View File

@ -0,0 +1,9 @@
obj-$(CONFIG_ZSTD_COMPRESS) += zstd_compress.o
obj-$(CONFIG_ZSTD_DECOMPRESS) += zstd_decompress.o
ccflags-y += -O3
zstd_compress-y := entropy_common.o fse_decompress.o xxhash.o zstd_common.o \
fse_compress.o huf_compress.o compress.o
zstd_decompress-y := entropy_common.o fse_decompress.o xxhash.o zstd_common.o \
huf_decompress.o decompress.o

View File

@ -0,0 +1,391 @@
/* ******************************************************************
bitstream
Part of FSE library
header file (to include)
Copyright (C) 2013-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
****************************************************************** */
#ifndef BITSTREAM_H_MODULE
#define BITSTREAM_H_MODULE
/*
* This API consists of small unitary functions, which must be inlined for best performance.
* Since link-time-optimization is not available for all compilers,
* these functions are defined into a .h to be included.
*/
/*-****************************************
* Dependencies
******************************************/
#include "mem.h" /* unaligned access routines */
#include "error_private.h" /* error codes and messages */
/*=========================================
* Target specific
=========================================*/
#define STREAM_ACCUMULATOR_MIN_32 25
#define STREAM_ACCUMULATOR_MIN_64 57
#define STREAM_ACCUMULATOR_MIN ((U32)(MEM_32bits() ? STREAM_ACCUMULATOR_MIN_32 : STREAM_ACCUMULATOR_MIN_64))
/*-******************************************
* bitStream encoding API (write forward)
********************************************/
/* bitStream can mix input from multiple sources.
* A critical property of these streams is that they encode and decode in **reverse** direction.
* So the first bit sequence you add will be the last to be read, like a LIFO stack.
*/
typedef struct
{
size_t bitContainer;
int bitPos;
char* startPtr;
char* ptr;
char* endPtr;
} BIT_CStream_t;
MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* dstBuffer, size_t dstCapacity);
MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC);
MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC);
/* Start with initCStream, providing the size of buffer to write into.
* bitStream will never write outside of this buffer.
* `dstCapacity` must be >= sizeof(bitD->bitContainer), otherwise @return will be an error code.
*
* bits are first added to a local register.
* Local register is size_t, hence 64-bits on 64-bits systems, or 32-bits on 32-bits systems.
* Writing data into memory is an explicit operation, performed by the flushBits function.
* Hence keep track how many bits are potentially stored into local register to avoid register overflow.
* After a flushBits, a maximum of 7 bits might still be stored into local register.
*
* Avoid storing elements of more than 24 bits if you want compatibility with 32-bits bitstream readers.
*
* Last operation is to close the bitStream.
* The function returns the final size of CStream in bytes.
* If data couldn't fit into `dstBuffer`, it will return a 0 ( == not storable)
*/
/*-********************************************
* bitStream decoding API (read backward)
**********************************************/
typedef struct
{
size_t bitContainer;
unsigned bitsConsumed;
const char* ptr;
const char* start;
} BIT_DStream_t;
typedef enum { BIT_DStream_unfinished = 0,
BIT_DStream_endOfBuffer = 1,
BIT_DStream_completed = 2,
BIT_DStream_overflow = 3 } BIT_DStream_status; /* result of BIT_reloadDStream() */
/* 1,2,4,8 would be better for bitmap combinations, but slows down performance a bit ... :( */
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize);
MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, unsigned nbBits);
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD);
MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* bitD);
/* Start by invoking BIT_initDStream().
* A chunk of the bitStream is then stored into a local register.
* Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
* You can then retrieve bitFields stored into the local register, **in reverse order**.
* Local register is explicitly reloaded from memory by the BIT_reloadDStream() method.
* A reload guarantee a minimum of ((8*sizeof(bitD->bitContainer))-7) bits when its result is BIT_DStream_unfinished.
* Otherwise, it can be less than that, so proceed accordingly.
* Checking if DStream has reached its end can be performed with BIT_endOfDStream().
*/
/*-****************************************
* unsafe API
******************************************/
MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits);
/* faster, but works only if value is "clean", meaning all high bits above nbBits are 0 */
MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC);
/* unsafe version; does not check buffer overflow */
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, unsigned nbBits);
/* faster, but works only if nbBits >= 1 */
/*-**************************************************************
* Internal functions
****************************************************************/
MEM_STATIC unsigned BIT_highbit32 (register U32 val)
{
# if defined(_MSC_VER) /* Visual */
unsigned long r=0;
_BitScanReverse ( &r, val );
return (unsigned) r;
# elif defined(__GNUC__) && (__GNUC__ >= 3) /* Use GCC Intrinsic */
return 31 - __builtin_clz (val);
# else /* Software version */
static const unsigned DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
return DeBruijnClz[ (U32) (v * 0x07C4ACDDU) >> 27];
# endif
}
/*===== Local Constants =====*/
static const unsigned BIT_mask[] = { 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0x1FFFF, 0x3FFFF, 0x7FFFF, 0xFFFFF, 0x1FFFFF, 0x3FFFFF, 0x7FFFFF, 0xFFFFFF, 0x1FFFFFF, 0x3FFFFFF }; /* up to 26 bits */
/*-**************************************************************
* bitStream encoding
****************************************************************/
/*! BIT_initCStream() :
* `dstCapacity` must be > sizeof(void*)
* @return : 0 if success,
otherwise an error code (can be tested using ERR_isError() ) */
MEM_STATIC size_t BIT_initCStream(BIT_CStream_t* bitC, void* startPtr, size_t dstCapacity)
{
bitC->bitContainer = 0;
bitC->bitPos = 0;
bitC->startPtr = (char*)startPtr;
bitC->ptr = bitC->startPtr;
bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->ptr);
if (dstCapacity <= sizeof(bitC->ptr)) return ERROR(dstSize_tooSmall);
return 0;
}
/*! BIT_addBits() :
can add up to 26 bits into `bitC`.
Does not check for register overflow ! */
MEM_STATIC void BIT_addBits(BIT_CStream_t* bitC, size_t value, unsigned nbBits)
{
bitC->bitContainer |= (value & BIT_mask[nbBits]) << bitC->bitPos;
bitC->bitPos += nbBits;
}
/*! BIT_addBitsFast() :
* works only if `value` is _clean_, meaning all high bits above nbBits are 0 */
MEM_STATIC void BIT_addBitsFast(BIT_CStream_t* bitC, size_t value, unsigned nbBits)
{
bitC->bitContainer |= value << bitC->bitPos;
bitC->bitPos += nbBits;
}
/*! BIT_flushBitsFast() :
* unsafe version; does not check buffer overflow */
MEM_STATIC void BIT_flushBitsFast(BIT_CStream_t* bitC)
{
size_t const nbBytes = bitC->bitPos >> 3;
MEM_writeLEST(bitC->ptr, bitC->bitContainer);
bitC->ptr += nbBytes;
bitC->bitPos &= 7;
bitC->bitContainer >>= nbBytes*8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */
}
/*! BIT_flushBits() :
* safe version; check for buffer overflow, and prevents it.
* note : does not signal buffer overflow. This will be revealed later on using BIT_closeCStream() */
MEM_STATIC void BIT_flushBits(BIT_CStream_t* bitC)
{
size_t const nbBytes = bitC->bitPos >> 3;
MEM_writeLEST(bitC->ptr, bitC->bitContainer);
bitC->ptr += nbBytes;
if (bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr;
bitC->bitPos &= 7;
bitC->bitContainer >>= nbBytes*8; /* if bitPos >= sizeof(bitContainer)*8 --> undefined behavior */
}
/*! BIT_closeCStream() :
* @return : size of CStream, in bytes,
or 0 if it could not fit into dstBuffer */
MEM_STATIC size_t BIT_closeCStream(BIT_CStream_t* bitC)
{
BIT_addBitsFast(bitC, 1, 1); /* endMark */
BIT_flushBits(bitC);
if (bitC->ptr >= bitC->endPtr) return 0; /* doesn't fit within authorized budget : cancel */
return (bitC->ptr - bitC->startPtr) + (bitC->bitPos > 0);
}
/*-********************************************************
* bitStream decoding
**********************************************************/
/*! BIT_initDStream() :
* Initialize a BIT_DStream_t.
* `bitD` : a pointer to an already allocated BIT_DStream_t structure.
* `srcSize` must be the *exact* size of the bitStream, in bytes.
* @return : size of stream (== srcSize) or an errorCode if a problem is detected
*/
MEM_STATIC size_t BIT_initDStream(BIT_DStream_t* bitD, const void* srcBuffer, size_t srcSize)
{
if (srcSize < 1) { memset(bitD, 0, sizeof(*bitD)); return ERROR(srcSize_wrong); }
if (srcSize >= sizeof(bitD->bitContainer)) { /* normal case */
bitD->start = (const char*)srcBuffer;
bitD->ptr = (const char*)srcBuffer + srcSize - sizeof(bitD->bitContainer);
bitD->bitContainer = MEM_readLEST(bitD->ptr);
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0; /* ensures bitsConsumed is always set */
if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
} else {
bitD->start = (const char*)srcBuffer;
bitD->ptr = bitD->start;
bitD->bitContainer = *(const BYTE*)(bitD->start);
switch(srcSize)
{
case 7: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[6]) << (sizeof(bitD->bitContainer)*8 - 16);
case 6: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[5]) << (sizeof(bitD->bitContainer)*8 - 24);
case 5: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[4]) << (sizeof(bitD->bitContainer)*8 - 32);
case 4: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[3]) << 24;
case 3: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[2]) << 16;
case 2: bitD->bitContainer += (size_t)(((const BYTE*)(srcBuffer))[1]) << 8;
default:;
}
{ BYTE const lastByte = ((const BYTE*)srcBuffer)[srcSize-1];
bitD->bitsConsumed = lastByte ? 8 - BIT_highbit32(lastByte) : 0;
if (lastByte == 0) return ERROR(GENERIC); /* endMark not present */ }
bitD->bitsConsumed += (U32)(sizeof(bitD->bitContainer) - srcSize)*8;
}
return srcSize;
}
MEM_STATIC size_t BIT_getUpperBits(size_t bitContainer, U32 const start)
{
return bitContainer >> start;
}
MEM_STATIC size_t BIT_getMiddleBits(size_t bitContainer, U32 const start, U32 const nbBits)
{
return (bitContainer >> start) & BIT_mask[nbBits];
}
MEM_STATIC size_t BIT_getLowerBits(size_t bitContainer, U32 const nbBits)
{
return bitContainer & BIT_mask[nbBits];
}
/*! BIT_lookBits() :
* Provides next n bits from local register.
* local register is not modified.
* On 32-bits, maxNbBits==24.
* On 64-bits, maxNbBits==56.
* @return : value extracted
*/
MEM_STATIC size_t BIT_lookBits(const BIT_DStream_t* bitD, U32 nbBits)
{
U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;
return ((bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> 1) >> ((bitMask-nbBits) & bitMask);
}
/*! BIT_lookBitsFast() :
* unsafe version; only works only if nbBits >= 1 */
MEM_STATIC size_t BIT_lookBitsFast(const BIT_DStream_t* bitD, U32 nbBits)
{
U32 const bitMask = sizeof(bitD->bitContainer)*8 - 1;
return (bitD->bitContainer << (bitD->bitsConsumed & bitMask)) >> (((bitMask+1)-nbBits) & bitMask);
}
MEM_STATIC void BIT_skipBits(BIT_DStream_t* bitD, U32 nbBits)
{
bitD->bitsConsumed += nbBits;
}
/*! BIT_readBits() :
* Read (consume) next n bits from local register and update.
* Pay attention to not read more than nbBits contained into local register.
* @return : extracted value.
*/
MEM_STATIC size_t BIT_readBits(BIT_DStream_t* bitD, U32 nbBits)
{
size_t const value = BIT_lookBits(bitD, nbBits);
BIT_skipBits(bitD, nbBits);
return value;
}
/*! BIT_readBitsFast() :
* unsafe version; only works only if nbBits >= 1 */
MEM_STATIC size_t BIT_readBitsFast(BIT_DStream_t* bitD, U32 nbBits)
{
size_t const value = BIT_lookBitsFast(bitD, nbBits);
BIT_skipBits(bitD, nbBits);
return value;
}
/*! BIT_reloadDStream() :
* Refill `bitD` from buffer previously set in BIT_initDStream() .
* This function is safe, it guarantees it will not read beyond src buffer.
* @return : status of `BIT_DStream_t` internal register.
if status == BIT_DStream_unfinished, internal register is filled with >= (sizeof(bitD->bitContainer)*8 - 7) bits */
MEM_STATIC BIT_DStream_status BIT_reloadDStream(BIT_DStream_t* bitD)
{
if (bitD->bitsConsumed > (sizeof(bitD->bitContainer)*8)) /* should not happen => corruption detected */
return BIT_DStream_overflow;
if (bitD->ptr >= bitD->start + sizeof(bitD->bitContainer)) {
bitD->ptr -= bitD->bitsConsumed >> 3;
bitD->bitsConsumed &= 7;
bitD->bitContainer = MEM_readLEST(bitD->ptr);
return BIT_DStream_unfinished;
}
if (bitD->ptr == bitD->start) {
if (bitD->bitsConsumed < sizeof(bitD->bitContainer)*8) return BIT_DStream_endOfBuffer;
return BIT_DStream_completed;
}
{ U32 nbBytes = bitD->bitsConsumed >> 3;
BIT_DStream_status result = BIT_DStream_unfinished;
if (bitD->ptr - nbBytes < bitD->start) {
nbBytes = (U32)(bitD->ptr - bitD->start); /* ptr > start */
result = BIT_DStream_endOfBuffer;
}
bitD->ptr -= nbBytes;
bitD->bitsConsumed -= nbBytes*8;
bitD->bitContainer = MEM_readLEST(bitD->ptr); /* reminder : srcSize > sizeof(bitD) */
return result;
}
}
/*! BIT_endOfDStream() :
* @return Tells if DStream has exactly reached its end (all bits consumed).
*/
MEM_STATIC unsigned BIT_endOfDStream(const BIT_DStream_t* DStream)
{
return ((DStream->ptr == DStream->start) && (DStream->bitsConsumed == sizeof(DStream->bitContainer)*8));
}
#endif /* BITSTREAM_H_MODULE */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,217 @@
/*
Common functions of New Generation Entropy library
Copyright (C) 2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
*************************************************************************** */
/* *************************************
* Dependencies
***************************************/
#include "mem.h"
#include "error_private.h" /* ERR_*, ERROR */
#include "fse.h"
#include "huf.h"
/*=== Version ===*/
unsigned FSE_versionNumber(void) { return FSE_VERSION_NUMBER; }
/*=== Error Management ===*/
unsigned FSE_isError(size_t code) { return ERR_isError(code); }
unsigned HUF_isError(size_t code) { return ERR_isError(code); }
/*-**************************************************************
* FSE NCount encoding-decoding
****************************************************************/
size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSVPtr, unsigned* tableLogPtr,
const void* headerBuffer, size_t hbSize)
{
const BYTE* const istart = (const BYTE*) headerBuffer;
const BYTE* const iend = istart + hbSize;
const BYTE* ip = istart;
int nbBits;
int remaining;
int threshold;
U32 bitStream;
int bitCount;
unsigned charnum = 0;
int previous0 = 0;
if (hbSize < 4) return ERROR(srcSize_wrong);
bitStream = MEM_readLE32(ip);
nbBits = (bitStream & 0xF) + FSE_MIN_TABLELOG; /* extract tableLog */
if (nbBits > FSE_TABLELOG_ABSOLUTE_MAX) return ERROR(tableLog_tooLarge);
bitStream >>= 4;
bitCount = 4;
*tableLogPtr = nbBits;
remaining = (1<<nbBits)+1;
threshold = 1<<nbBits;
nbBits++;
while ((remaining>1) & (charnum<=*maxSVPtr)) {
if (previous0) {
unsigned n0 = charnum;
while ((bitStream & 0xFFFF) == 0xFFFF) {
n0 += 24;
if (ip < iend-5) {
ip += 2;
bitStream = MEM_readLE32(ip) >> bitCount;
} else {
bitStream >>= 16;
bitCount += 16;
} }
while ((bitStream & 3) == 3) {
n0 += 3;
bitStream >>= 2;
bitCount += 2;
}
n0 += bitStream & 3;
bitCount += 2;
if (n0 > *maxSVPtr) return ERROR(maxSymbolValue_tooSmall);
while (charnum < n0) normalizedCounter[charnum++] = 0;
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
ip += bitCount>>3;
bitCount &= 7;
bitStream = MEM_readLE32(ip) >> bitCount;
} else {
bitStream >>= 2;
} }
{ int const max = (2*threshold-1) - remaining;
int count;
if ((bitStream & (threshold-1)) < (U32)max) {
count = bitStream & (threshold-1);
bitCount += nbBits-1;
} else {
count = bitStream & (2*threshold-1);
if (count >= threshold) count -= max;
bitCount += nbBits;
}
count--; /* extra accuracy */
remaining -= count < 0 ? -count : count; /* -1 means +1 */
normalizedCounter[charnum++] = (short)count;
previous0 = !count;
while (remaining < threshold) {
nbBits--;
threshold >>= 1;
}
if ((ip <= iend-7) || (ip + (bitCount>>3) <= iend-4)) {
ip += bitCount>>3;
bitCount &= 7;
} else {
bitCount -= (int)(8 * (iend - 4 - ip));
ip = iend - 4;
}
bitStream = MEM_readLE32(ip) >> (bitCount & 31);
} } /* while ((remaining>1) & (charnum<=*maxSVPtr)) */
if (remaining != 1) return ERROR(corruption_detected);
if (bitCount > 32) return ERROR(corruption_detected);
*maxSVPtr = charnum-1;
ip += (bitCount+7)>>3;
return ip-istart;
}
/*! HUF_readStats() :
Read compact Huffman tree, saved by HUF_writeCTable().
`huffWeight` is destination buffer.
`rankStats` is assumed to be a table of at least HUF_TABLELOG_MAX U32.
@return : size read from `src` , or an error Code .
Note : Needed by HUF_readCTable() and HUF_readDTableX?() .
*/
size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize)
{
U32 weightTotal;
const BYTE* ip = (const BYTE*) src;
size_t iSize;
size_t oSize;
if (!srcSize) return ERROR(srcSize_wrong);
iSize = ip[0];
/* memset(huffWeight, 0, hwSize); *//* is not necessary, even though some analyzer complain ... */
if (iSize >= 128) { /* special header */
oSize = iSize - 127;
iSize = ((oSize+1)/2);
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
if (oSize >= hwSize) return ERROR(corruption_detected);
ip += 1;
{ U32 n;
for (n=0; n<oSize; n+=2) {
huffWeight[n] = ip[n/2] >> 4;
huffWeight[n+1] = ip[n/2] & 15;
} } }
else { /* header compressed with FSE (normal case) */
FSE_DTable fseWorkspace[FSE_DTABLE_SIZE_U32(6)]; /* 6 is max possible tableLog for HUF header (maybe even 5, to be tested) */
if (iSize+1 > srcSize) return ERROR(srcSize_wrong);
oSize = FSE_decompress_wksp(huffWeight, hwSize-1, ip+1, iSize, fseWorkspace, 6); /* max (hwSize-1) values decoded, as last one is implied */
if (FSE_isError(oSize)) return oSize;
}
/* collect weight stats */
memset(rankStats, 0, (HUF_TABLELOG_MAX + 1) * sizeof(U32));
weightTotal = 0;
{ U32 n; for (n=0; n<oSize; n++) {
if (huffWeight[n] >= HUF_TABLELOG_MAX) return ERROR(corruption_detected);
rankStats[huffWeight[n]]++;
weightTotal += (1 << huffWeight[n]) >> 1;
} }
if (weightTotal == 0) return ERROR(corruption_detected);
/* get last non-null symbol weight (implied, total must be 2^n) */
{ U32 const tableLog = BIT_highbit32(weightTotal) + 1;
if (tableLog > HUF_TABLELOG_MAX) return ERROR(corruption_detected);
*tableLogPtr = tableLog;
/* determine last weight */
{ U32 const total = 1 << tableLog;
U32 const rest = total - weightTotal;
U32 const verif = 1 << BIT_highbit32(rest);
U32 const lastWeight = BIT_highbit32(rest) + 1;
if (verif != rest) return ERROR(corruption_detected); /* last value must be a clean power of 2 */
huffWeight[oSize] = (BYTE)lastWeight;
rankStats[lastWeight]++;
} }
/* check tree construction validity */
if ((rankStats[1] < 2) || (rankStats[1] & 1)) return ERROR(corruption_detected); /* by construction : at least 2 elts of rank 1, must be even */
/* results */
*nbSymbolsPtr = (U32)(oSize+1);
return iSize+1;
}

View File

@ -0,0 +1,44 @@
/**
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/
/* Note : this module is expected to remain private, do not expose it */
#ifndef ERROR_H_MODULE
#define ERROR_H_MODULE
/* ****************************************
* Dependencies
******************************************/
#include <linux/types.h> /* size_t */
#include <linux/zstd.h> /* enum list */
/* ****************************************
* Compiler-specific
******************************************/
#define ERR_STATIC static __attribute__((unused))
/*-****************************************
* Customization (error_public.h)
******************************************/
typedef ZSTD_ErrorCode ERR_enum;
#define PREFIX(name) ZSTD_error_##name
/*-****************************************
* Error codes handling
******************************************/
#define ERROR(name) ((size_t)-PREFIX(name))
ERR_STATIC unsigned ERR_isError(size_t code) { return (code > ERROR(maxCode)); }
ERR_STATIC ERR_enum ERR_getErrorCode(size_t code) { if (!ERR_isError(code)) return (ERR_enum)0; return (ERR_enum) (0-code); }
#endif /* ERROR_H_MODULE */

View File

@ -0,0 +1,606 @@
/* ******************************************************************
FSE : Finite State Entropy codec
Public Prototypes declaration
Copyright (C) 2013-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
****************************************************************** */
#ifndef FSE_H
#define FSE_H
/*-*****************************************
* Dependencies
******************************************/
#include <linux/types.h> /* size_t, ptrdiff_t */
/*-*****************************************
* FSE_PUBLIC_API : control library symbols visibility
******************************************/
#define FSE_PUBLIC_API
/*------ Version ------*/
#define FSE_VERSION_MAJOR 0
#define FSE_VERSION_MINOR 9
#define FSE_VERSION_RELEASE 0
#define FSE_LIB_VERSION FSE_VERSION_MAJOR.FSE_VERSION_MINOR.FSE_VERSION_RELEASE
#define FSE_QUOTE(str) #str
#define FSE_EXPAND_AND_QUOTE(str) FSE_QUOTE(str)
#define FSE_VERSION_STRING FSE_EXPAND_AND_QUOTE(FSE_LIB_VERSION)
#define FSE_VERSION_NUMBER (FSE_VERSION_MAJOR *100*100 + FSE_VERSION_MINOR *100 + FSE_VERSION_RELEASE)
FSE_PUBLIC_API unsigned FSE_versionNumber(void); /**< library version number; to be used when checking dll version */
/*-*****************************************
* Tool functions
******************************************/
FSE_PUBLIC_API size_t FSE_compressBound(size_t size); /* maximum compressed size */
/* Error Management */
FSE_PUBLIC_API unsigned FSE_isError(size_t code); /* tells if a return value is an error code */
/*-*****************************************
* FSE detailed API
******************************************/
/*!
FSE_compress() does the following:
1. count symbol occurrence from source[] into table count[]
2. normalize counters so that sum(count[]) == Power_of_2 (2^tableLog)
3. save normalized counters to memory buffer using writeNCount()
4. build encoding table 'CTable' from normalized counters
5. encode the data stream using encoding table 'CTable'
FSE_decompress() does the following:
1. read normalized counters with readNCount()
2. build decoding table 'DTable' from normalized counters
3. decode the data stream using decoding table 'DTable'
The following API allows targeting specific sub-functions for advanced tasks.
For example, it's possible to compress several blocks using the same 'CTable',
or to save and provide normalized distribution using external method.
*/
/* *** COMPRESSION *** */
/*! FSE_optimalTableLog():
dynamically downsize 'tableLog' when conditions are met.
It saves CPU time, by using smaller tables, while preserving or even improving compression ratio.
@return : recommended tableLog (necessarily <= 'maxTableLog') */
FSE_PUBLIC_API unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
/*! FSE_normalizeCount():
normalize counts so that sum(count[]) == Power_of_2 (2^tableLog)
'normalizedCounter' is a table of short, of minimum size (maxSymbolValue+1).
@return : tableLog,
or an errorCode, which can be tested using FSE_isError() */
FSE_PUBLIC_API size_t FSE_normalizeCount(short* normalizedCounter, unsigned tableLog, const unsigned* count, size_t srcSize, unsigned maxSymbolValue);
/*! FSE_NCountWriteBound():
Provides the maximum possible size of an FSE normalized table, given 'maxSymbolValue' and 'tableLog'.
Typically useful for allocation purpose. */
FSE_PUBLIC_API size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog);
/*! FSE_writeNCount():
Compactly save 'normalizedCounter' into 'buffer'.
@return : size of the compressed table,
or an errorCode, which can be tested using FSE_isError(). */
FSE_PUBLIC_API size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
/*! Constructor and Destructor of FSE_CTable.
Note that FSE_CTable size depends on 'tableLog' and 'maxSymbolValue' */
typedef unsigned FSE_CTable; /* don't allocate that. It's only meant to be more restrictive than void* */
/*! FSE_compress_usingCTable():
Compress `src` using `ct` into `dst` which must be already allocated.
@return : size of compressed data (<= `dstCapacity`),
or 0 if compressed data could not fit into `dst`,
or an errorCode, which can be tested using FSE_isError() */
FSE_PUBLIC_API size_t FSE_compress_usingCTable (void* dst, size_t dstCapacity, const void* src, size_t srcSize, const FSE_CTable* ct);
/*!
Tutorial :
----------
The first step is to count all symbols. FSE_count() does this job very fast.
Result will be saved into 'count', a table of unsigned int, which must be already allocated, and have 'maxSymbolValuePtr[0]+1' cells.
'src' is a table of bytes of size 'srcSize'. All values within 'src' MUST be <= maxSymbolValuePtr[0]
maxSymbolValuePtr[0] will be updated, with its real value (necessarily <= original value)
FSE_count() will return the number of occurrence of the most frequent symbol.
This can be used to know if there is a single symbol within 'src', and to quickly evaluate its compressibility.
If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
The next step is to normalize the frequencies.
FSE_normalizeCount() will ensure that sum of frequencies is == 2 ^'tableLog'.
It also guarantees a minimum of 1 to any Symbol with frequency >= 1.
You can use 'tableLog'==0 to mean "use default tableLog value".
If you are unsure of which tableLog value to use, you can ask FSE_optimalTableLog(),
which will provide the optimal valid tableLog given sourceSize, maxSymbolValue, and a user-defined maximum (0 means "default").
The result of FSE_normalizeCount() will be saved into a table,
called 'normalizedCounter', which is a table of signed short.
'normalizedCounter' must be already allocated, and have at least 'maxSymbolValue+1' cells.
The return value is tableLog if everything proceeded as expected.
It is 0 if there is a single symbol within distribution.
If there is an error (ex: invalid tableLog value), the function will return an ErrorCode (which can be tested using FSE_isError()).
'normalizedCounter' can be saved in a compact manner to a memory area using FSE_writeNCount().
'buffer' must be already allocated.
For guaranteed success, buffer size must be at least FSE_headerBound().
The result of the function is the number of bytes written into 'buffer'.
If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError(); ex : buffer size too small).
'normalizedCounter' can then be used to create the compression table 'CTable'.
The space required by 'CTable' must be already allocated, using FSE_createCTable().
You can then use FSE_buildCTable() to fill 'CTable'.
If there is an error, both functions will return an ErrorCode (which can be tested using FSE_isError()).
'CTable' can then be used to compress 'src', with FSE_compress_usingCTable().
Similar to FSE_count(), the convention is that 'src' is assumed to be a table of char of size 'srcSize'
The function returns the size of compressed data (without header), necessarily <= `dstCapacity`.
If it returns '0', compressed data could not fit into 'dst'.
If there is an error, the function will return an ErrorCode (which can be tested using FSE_isError()).
*/
/* *** DECOMPRESSION *** */
/*! FSE_readNCount():
Read compactly saved 'normalizedCounter' from 'rBuffer'.
@return : size read from 'rBuffer',
or an errorCode, which can be tested using FSE_isError().
maxSymbolValuePtr[0] and tableLogPtr[0] will also be updated with their respective values */
FSE_PUBLIC_API size_t FSE_readNCount (short* normalizedCounter, unsigned* maxSymbolValuePtr, unsigned* tableLogPtr, const void* rBuffer, size_t rBuffSize);
/*! Constructor and Destructor of FSE_DTable.
Note that its size depends on 'tableLog' */
typedef unsigned FSE_DTable; /* don't allocate that. It's just a way to be more restrictive than void* */
/*! FSE_buildDTable():
Builds 'dt', which must be already allocated, using FSE_createDTable().
return : 0, or an errorCode, which can be tested using FSE_isError() */
FSE_PUBLIC_API size_t FSE_buildDTable (FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog);
/*! FSE_decompress_usingDTable():
Decompress compressed source `cSrc` of size `cSrcSize` using `dt`
into `dst` which must be already allocated.
@return : size of regenerated data (necessarily <= `dstCapacity`),
or an errorCode, which can be tested using FSE_isError() */
FSE_PUBLIC_API size_t FSE_decompress_usingDTable(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, const FSE_DTable* dt);
/*!
Tutorial :
----------
(Note : these functions only decompress FSE-compressed blocks.
If block is uncompressed, use memcpy() instead
If block is a single repeated byte, use memset() instead )
The first step is to obtain the normalized frequencies of symbols.
This can be performed by FSE_readNCount() if it was saved using FSE_writeNCount().
'normalizedCounter' must be already allocated, and have at least 'maxSymbolValuePtr[0]+1' cells of signed short.
In practice, that means it's necessary to know 'maxSymbolValue' beforehand,
or size the table to handle worst case situations (typically 256).
FSE_readNCount() will provide 'tableLog' and 'maxSymbolValue'.
The result of FSE_readNCount() is the number of bytes read from 'rBuffer'.
Note that 'rBufferSize' must be at least 4 bytes, even if useful information is less than that.
If there is an error, the function will return an error code, which can be tested using FSE_isError().
The next step is to build the decompression tables 'FSE_DTable' from 'normalizedCounter'.
This is performed by the function FSE_buildDTable().
The space required by 'FSE_DTable' must be already allocated using FSE_createDTable().
If there is an error, the function will return an error code, which can be tested using FSE_isError().
`FSE_DTable` can then be used to decompress `cSrc`, with FSE_decompress_usingDTable().
`cSrcSize` must be strictly correct, otherwise decompression will fail.
FSE_decompress_usingDTable() result will tell how many bytes were regenerated (<=`dstCapacity`).
If there is an error, the function will return an error code, which can be tested using FSE_isError(). (ex: dst buffer too small)
*/
/* *** Dependency *** */
#include "bitstream.h"
/* *****************************************
* Static allocation
*******************************************/
/* FSE buffer bounds */
#define FSE_NCOUNTBOUND 512
#define FSE_BLOCKBOUND(size) (size + (size>>7))
#define FSE_COMPRESSBOUND(size) (FSE_NCOUNTBOUND + FSE_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
/* It is possible to statically allocate FSE CTable/DTable as a table of FSE_CTable/FSE_DTable using below macros */
#define FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) (1 + (1<<(maxTableLog-1)) + ((maxSymbolValue+1)*2))
#define FSE_DTABLE_SIZE_U32(maxTableLog) (1 + (1<<maxTableLog))
/* *****************************************
* FSE advanced API
*******************************************/
/* FSE_count_wksp() :
* Same as FSE_count(), but using an externally provided scratch buffer.
* `workSpace` size must be table of >= `1024` unsigned
*/
size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
const void* source, size_t sourceSize, unsigned* workSpace);
/* FSE_countFast_wksp() :
* Same as FSE_countFast(), but using an externally provided scratch buffer.
* `workSpace` must be a table of minimum `1024` unsigned
*/
size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* workSpace);
/*! FSE_count_simple
* Same as FSE_countFast(), but does not use any additional memory (not even on stack).
* This function is unsafe, and will segfault if any value within `src` is `> *maxSymbolValuePtr` (presuming it's also the size of `count`).
*/
size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize);
unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus);
/**< same as FSE_optimalTableLog(), which used `minus==2` */
/* FSE_compress_wksp() :
* Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
* FSE_WKSP_SIZE_U32() provides the minimum size required for `workSpace` as a table of FSE_CTable.
*/
#define FSE_WKSP_SIZE_U32(maxTableLog, maxSymbolValue) ( FSE_CTABLE_SIZE_U32(maxTableLog, maxSymbolValue) + ((maxTableLog > 12) ? (1 << (maxTableLog - 2)) : 1024) )
size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits);
/**< build a fake FSE_CTable, designed for a flat distribution, where each symbol uses nbBits */
size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
/**< build a fake FSE_CTable, designed to compress always the same symbolValue */
/* FSE_buildCTable_wksp() :
* Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
* `wkspSize` must be >= `(1<<tableLog)`.
*/
size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits);
/**< build a fake FSE_DTable, designed to read a flat distribution where each symbol uses nbBits */
size_t FSE_buildDTable_rle (FSE_DTable* dt, unsigned char symbolValue);
/**< build a fake FSE_DTable, designed to always generate the same symbolValue */
size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog);
/**< same as FSE_decompress(), using an externally allocated `workSpace` produced with `FSE_DTABLE_SIZE_U32(maxLog)` */
/* *****************************************
* FSE symbol compression API
*******************************************/
/*!
This API consists of small unitary functions, which highly benefit from being inlined.
Hence their body are included in next section.
*/
typedef struct {
ptrdiff_t value;
const void* stateTable;
const void* symbolTT;
unsigned stateLog;
} FSE_CState_t;
static void FSE_initCState(FSE_CState_t* CStatePtr, const FSE_CTable* ct);
static void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* CStatePtr, unsigned symbol);
static void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* CStatePtr);
/**<
These functions are inner components of FSE_compress_usingCTable().
They allow the creation of custom streams, mixing multiple tables and bit sources.
A key property to keep in mind is that encoding and decoding are done **in reverse direction**.
So the first symbol you will encode is the last you will decode, like a LIFO stack.
You will need a few variables to track your CStream. They are :
FSE_CTable ct; // Provided by FSE_buildCTable()
BIT_CStream_t bitStream; // bitStream tracking structure
FSE_CState_t state; // State tracking structure (can have several)
The first thing to do is to init bitStream and state.
size_t errorCode = BIT_initCStream(&bitStream, dstBuffer, maxDstSize);
FSE_initCState(&state, ct);
Note that BIT_initCStream() can produce an error code, so its result should be tested, using FSE_isError();
You can then encode your input data, byte after byte.
FSE_encodeSymbol() outputs a maximum of 'tableLog' bits at a time.
Remember decoding will be done in reverse direction.
FSE_encodeByte(&bitStream, &state, symbol);
At any time, you can also add any bit sequence.
Note : maximum allowed nbBits is 25, for compatibility with 32-bits decoders
BIT_addBits(&bitStream, bitField, nbBits);
The above methods don't commit data to memory, they just store it into local register, for speed.
Local register size is 64-bits on 64-bits systems, 32-bits on 32-bits systems (size_t).
Writing data to memory is a manual operation, performed by the flushBits function.
BIT_flushBits(&bitStream);
Your last FSE encoding operation shall be to flush your last state value(s).
FSE_flushState(&bitStream, &state);
Finally, you must close the bitStream.
The function returns the size of CStream in bytes.
If data couldn't fit into dstBuffer, it will return a 0 ( == not compressible)
If there is an error, it returns an errorCode (which can be tested using FSE_isError()).
size_t size = BIT_closeCStream(&bitStream);
*/
/* *****************************************
* FSE symbol decompression API
*******************************************/
typedef struct {
size_t state;
const void* table; /* precise table may vary, depending on U16 */
} FSE_DState_t;
static void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt);
static unsigned char FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
static unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr);
/**<
Let's now decompose FSE_decompress_usingDTable() into its unitary components.
You will decode FSE-encoded symbols from the bitStream,
and also any other bitFields you put in, **in reverse order**.
You will need a few variables to track your bitStream. They are :
BIT_DStream_t DStream; // Stream context
FSE_DState_t DState; // State context. Multiple ones are possible
FSE_DTable* DTablePtr; // Decoding table, provided by FSE_buildDTable()
The first thing to do is to init the bitStream.
errorCode = BIT_initDStream(&DStream, srcBuffer, srcSize);
You should then retrieve your initial state(s)
(in reverse flushing order if you have several ones) :
errorCode = FSE_initDState(&DState, &DStream, DTablePtr);
You can then decode your data, symbol after symbol.
For information the maximum number of bits read by FSE_decodeSymbol() is 'tableLog'.
Keep in mind that symbols are decoded in reverse order, like a LIFO stack (last in, first out).
unsigned char symbol = FSE_decodeSymbol(&DState, &DStream);
You can retrieve any bitfield you eventually stored into the bitStream (in reverse order)
Note : maximum allowed nbBits is 25, for 32-bits compatibility
size_t bitField = BIT_readBits(&DStream, nbBits);
All above operations only read from local register (which size depends on size_t).
Refueling the register from memory is manually performed by the reload method.
endSignal = FSE_reloadDStream(&DStream);
BIT_reloadDStream() result tells if there is still some more data to read from DStream.
BIT_DStream_unfinished : there is still some data left into the DStream.
BIT_DStream_endOfBuffer : Dstream reached end of buffer. Its container may no longer be completely filled.
BIT_DStream_completed : Dstream reached its exact end, corresponding in general to decompression completed.
BIT_DStream_tooFar : Dstream went too far. Decompression result is corrupted.
When reaching end of buffer (BIT_DStream_endOfBuffer), progress slowly, notably if you decode multiple symbols per loop,
to properly detect the exact end of stream.
After each decoded symbol, check if DStream is fully consumed using this simple test :
BIT_reloadDStream(&DStream) >= BIT_DStream_completed
When it's done, verify decompression is fully completed, by checking both DStream and the relevant states.
Checking if DStream has reached its end is performed by :
BIT_endOfDStream(&DStream);
Check also the states. There might be some symbols left there, if some high probability ones (>50%) are possible.
FSE_endOfDState(&DState);
*/
/* *****************************************
* FSE unsafe API
*******************************************/
static unsigned char FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD);
/* faster, but works only if nbBits is always >= 1 (otherwise, result will be corrupted) */
/* *****************************************
* Implementation of inlined functions
*******************************************/
typedef struct {
int deltaFindState;
U32 deltaNbBits;
} FSE_symbolCompressionTransform; /* total 8 bytes */
MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct)
{
const void* ptr = ct;
const U16* u16ptr = (const U16*) ptr;
const U32 tableLog = MEM_read16(ptr);
statePtr->value = (ptrdiff_t)1<<tableLog;
statePtr->stateTable = u16ptr+2;
statePtr->symbolTT = ((const U32*)ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1));
statePtr->stateLog = tableLog;
}
/*! FSE_initCState2() :
* Same as FSE_initCState(), but the first symbol to include (which will be the last to be read)
* uses the smallest state value possible, saving the cost of this symbol */
MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U32 symbol)
{
FSE_initCState(statePtr, ct);
{ const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
const U16* stateTable = (const U16*)(statePtr->stateTable);
U32 nbBitsOut = (U32)((symbolTT.deltaNbBits + (1<<15)) >> 16);
statePtr->value = (nbBitsOut << 16) - symbolTT.deltaNbBits;
statePtr->value = stateTable[(statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
}
}
MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, U32 symbol)
{
const FSE_symbolCompressionTransform symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
const U16* const stateTable = (const U16*)(statePtr->stateTable);
U32 nbBitsOut = (U32)((statePtr->value + symbolTT.deltaNbBits) >> 16);
BIT_addBits(bitC, statePtr->value, nbBitsOut);
statePtr->value = stateTable[ (statePtr->value >> nbBitsOut) + symbolTT.deltaFindState];
}
MEM_STATIC void FSE_flushCState(BIT_CStream_t* bitC, const FSE_CState_t* statePtr)
{
BIT_addBits(bitC, statePtr->value, statePtr->stateLog);
BIT_flushBits(bitC);
}
/* ====== Decompression ====== */
typedef struct {
U16 tableLog;
U16 fastMode;
} FSE_DTableHeader; /* sizeof U32 */
typedef struct
{
unsigned short newState;
unsigned char symbol;
unsigned char nbBits;
} FSE_decode_t; /* size == U32 */
MEM_STATIC void FSE_initDState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD, const FSE_DTable* dt)
{
const void* ptr = dt;
const FSE_DTableHeader* const DTableH = (const FSE_DTableHeader*)ptr;
DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
BIT_reloadDStream(bitD);
DStatePtr->table = dt + 1;
}
MEM_STATIC BYTE FSE_peekSymbol(const FSE_DState_t* DStatePtr)
{
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
return DInfo.symbol;
}
MEM_STATIC void FSE_updateState(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
{
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
U32 const nbBits = DInfo.nbBits;
size_t const lowBits = BIT_readBits(bitD, nbBits);
DStatePtr->state = DInfo.newState + lowBits;
}
MEM_STATIC BYTE FSE_decodeSymbol(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
{
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
U32 const nbBits = DInfo.nbBits;
BYTE const symbol = DInfo.symbol;
size_t const lowBits = BIT_readBits(bitD, nbBits);
DStatePtr->state = DInfo.newState + lowBits;
return symbol;
}
/*! FSE_decodeSymbolFast() :
unsafe, only works if no symbol has a probability > 50% */
MEM_STATIC BYTE FSE_decodeSymbolFast(FSE_DState_t* DStatePtr, BIT_DStream_t* bitD)
{
FSE_decode_t const DInfo = ((const FSE_decode_t*)(DStatePtr->table))[DStatePtr->state];
U32 const nbBits = DInfo.nbBits;
BYTE const symbol = DInfo.symbol;
size_t const lowBits = BIT_readBitsFast(bitD, nbBits);
DStatePtr->state = DInfo.newState + lowBits;
return symbol;
}
MEM_STATIC unsigned FSE_endOfDState(const FSE_DState_t* DStatePtr)
{
return DStatePtr->state == 0;
}
#ifndef FSE_COMMONDEFS_ONLY
/* **************************************************************
* Tuning parameters
****************************************************************/
/*!MEMORY_USAGE :
* Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
* Increasing memory usage improves compression ratio
* Reduced memory usage can improve speed, due to cache effect
* Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */
#ifndef FSE_MAX_MEMORY_USAGE
# define FSE_MAX_MEMORY_USAGE 14
#endif
#ifndef FSE_DEFAULT_MEMORY_USAGE
# define FSE_DEFAULT_MEMORY_USAGE 13
#endif
/*!FSE_MAX_SYMBOL_VALUE :
* Maximum symbol value authorized.
* Required for proper stack allocation */
#ifndef FSE_MAX_SYMBOL_VALUE
# define FSE_MAX_SYMBOL_VALUE 255
#endif
/* **************************************************************
* template functions type & suffix
****************************************************************/
#define FSE_FUNCTION_TYPE BYTE
#define FSE_FUNCTION_EXTENSION
#define FSE_DECODE_TYPE FSE_decode_t
#endif /* !FSE_COMMONDEFS_ONLY */
/* ***************************************************************
* Constants
*****************************************************************/
#define FSE_MAX_TABLELOG (FSE_MAX_MEMORY_USAGE-2)
#define FSE_MAX_TABLESIZE (1U<<FSE_MAX_TABLELOG)
#define FSE_MAXTABLESIZE_MASK (FSE_MAX_TABLESIZE-1)
#define FSE_DEFAULT_TABLELOG (FSE_DEFAULT_MEMORY_USAGE-2)
#define FSE_MIN_TABLELOG 5
#define FSE_TABLELOG_ABSOLUTE_MAX 15
#if FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX
# error "FSE_MAX_TABLELOG > FSE_TABLELOG_ABSOLUTE_MAX is not supported"
#endif
#define FSE_TABLESTEP(tableSize) ((tableSize>>1) + (tableSize>>3) + 3)
#endif /* FSE_H */

View File

@ -0,0 +1,788 @@
/* ******************************************************************
FSE : Finite State Entropy encoder
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
/* **************************************************************
* Compiler specifics
****************************************************************/
#define FORCE_INLINE static __always_inline
/* **************************************************************
* Includes
****************************************************************/
#include <linux/compiler.h>
#include <linux/string.h> /* memcpy, memset */
#include "bitstream.h"
#include "fse.h"
/* **************************************************************
* Error Management
****************************************************************/
#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
/* **************************************************************
* Templates
****************************************************************/
/*
designed to be included
for type-specific functions (template emulation in C)
Objective is to write these functions only once, for improved maintenance
*/
/* safety checks */
#ifndef FSE_FUNCTION_EXTENSION
# error "FSE_FUNCTION_EXTENSION must be defined"
#endif
#ifndef FSE_FUNCTION_TYPE
# error "FSE_FUNCTION_TYPE must be defined"
#endif
/* Function names */
#define FSE_CAT(X,Y) X##Y
#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
/* Function templates */
/* FSE_buildCTable_wksp() :
* Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
* wkspSize should be sized to handle worst case situation, which is `1<<max_tableLog * sizeof(FSE_FUNCTION_TYPE)`
* workSpace must also be properly aligned with FSE_FUNCTION_TYPE requirements
*/
size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
{
U32 const tableSize = 1 << tableLog;
U32 const tableMask = tableSize - 1;
void* const ptr = ct;
U16* const tableU16 = ( (U16*) ptr) + 2;
void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableLog ? tableSize>>1 : 1) ;
FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
U32 const step = FSE_TABLESTEP(tableSize);
U32 cumul[FSE_MAX_SYMBOL_VALUE+2];
FSE_FUNCTION_TYPE* const tableSymbol = (FSE_FUNCTION_TYPE*)workSpace;
U32 highThreshold = tableSize-1;
/* CTable header */
if (((size_t)1 << tableLog) * sizeof(FSE_FUNCTION_TYPE) > wkspSize) return ERROR(tableLog_tooLarge);
tableU16[-2] = (U16) tableLog;
tableU16[-1] = (U16) maxSymbolValue;
/* For explanations on how to distribute symbol values over the table :
* http://fastcompression.blogspot.fr/2014/02/fse-distributing-symbol-values.html */
/* symbol start positions */
{ U32 u;
cumul[0] = 0;
for (u=1; u<=maxSymbolValue+1; u++) {
if (normalizedCounter[u-1]==-1) { /* Low proba symbol */
cumul[u] = cumul[u-1] + 1;
tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
} else {
cumul[u] = cumul[u-1] + normalizedCounter[u-1];
} }
cumul[maxSymbolValue+1] = tableSize+1;
}
/* Spread symbols */
{ U32 position = 0;
U32 symbol;
for (symbol=0; symbol<=maxSymbolValue; symbol++) {
int nbOccurences;
for (nbOccurences=0; nbOccurences<normalizedCounter[symbol]; nbOccurences++) {
tableSymbol[position] = (FSE_FUNCTION_TYPE)symbol;
position = (position + step) & tableMask;
while (position > highThreshold) position = (position + step) & tableMask; /* Low proba area */
} }
if (position!=0) return ERROR(GENERIC); /* Must have gone through all positions */
}
/* Build table */
{ U32 u; for (u=0; u<tableSize; u++) {
FSE_FUNCTION_TYPE s = tableSymbol[u]; /* note : static analyzer may not understand tableSymbol is properly initialized */
tableU16[cumul[s]++] = (U16) (tableSize+u); /* TableU16 : sorted by symbol order; gives next state value */
} }
/* Build Symbol Transformation Table */
{ unsigned total = 0;
unsigned s;
for (s=0; s<=maxSymbolValue; s++) {
switch (normalizedCounter[s])
{
case 0: break;
case -1:
case 1:
symbolTT[s].deltaNbBits = (tableLog << 16) - (1<<tableLog);
symbolTT[s].deltaFindState = total - 1;
total ++;
break;
default :
{
U32 const maxBitsOut = tableLog - BIT_highbit32 (normalizedCounter[s]-1);
U32 const minStatePlus = normalizedCounter[s] << maxBitsOut;
symbolTT[s].deltaNbBits = (maxBitsOut << 16) - minStatePlus;
symbolTT[s].deltaFindState = total - normalizedCounter[s];
total += normalizedCounter[s];
} } } }
return 0;
}
#ifndef FSE_COMMONDEFS_ONLY
/*-**************************************************************
* FSE NCount encoding-decoding
****************************************************************/
size_t FSE_NCountWriteBound(unsigned maxSymbolValue, unsigned tableLog)
{
size_t const maxHeaderSize = (((maxSymbolValue+1) * tableLog) >> 3) + 3;
return maxSymbolValue ? maxHeaderSize : FSE_NCOUNTBOUND; /* maxSymbolValue==0 ? use default */
}
static size_t FSE_writeNCount_generic (void* header, size_t headerBufferSize,
const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog,
unsigned writeIsSafe)
{
BYTE* const ostart = (BYTE*) header;
BYTE* out = ostart;
BYTE* const oend = ostart + headerBufferSize;
int nbBits;
const int tableSize = 1 << tableLog;
int remaining;
int threshold;
U32 bitStream;
int bitCount;
unsigned charnum = 0;
int previous0 = 0;
bitStream = 0;
bitCount = 0;
/* Table Size */
bitStream += (tableLog-FSE_MIN_TABLELOG) << bitCount;
bitCount += 4;
/* Init */
remaining = tableSize+1; /* +1 for extra accuracy */
threshold = tableSize;
nbBits = tableLog+1;
while (remaining>1) { /* stops at 1 */
if (previous0) {
unsigned start = charnum;
while (!normalizedCounter[charnum]) charnum++;
while (charnum >= start+24) {
start+=24;
bitStream += 0xFFFFU << bitCount;
if ((!writeIsSafe) && (out > oend-2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */
out[0] = (BYTE) bitStream;
out[1] = (BYTE)(bitStream>>8);
out+=2;
bitStream>>=16;
}
while (charnum >= start+3) {
start+=3;
bitStream += 3 << bitCount;
bitCount += 2;
}
bitStream += (charnum-start) << bitCount;
bitCount += 2;
if (bitCount>16) {
if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */
out[0] = (BYTE)bitStream;
out[1] = (BYTE)(bitStream>>8);
out += 2;
bitStream >>= 16;
bitCount -= 16;
} }
{ int count = normalizedCounter[charnum++];
int const max = (2*threshold-1)-remaining;
remaining -= count < 0 ? -count : count;
count++; /* +1 for extra accuracy */
if (count>=threshold) count += max; /* [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ */
bitStream += count << bitCount;
bitCount += nbBits;
bitCount -= (count<max);
previous0 = (count==1);
if (remaining<1) return ERROR(GENERIC);
while (remaining<threshold) nbBits--, threshold>>=1;
}
if (bitCount>16) {
if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */
out[0] = (BYTE)bitStream;
out[1] = (BYTE)(bitStream>>8);
out += 2;
bitStream >>= 16;
bitCount -= 16;
} }
/* flush remaining bitStream */
if ((!writeIsSafe) && (out > oend - 2)) return ERROR(dstSize_tooSmall); /* Buffer overflow */
out[0] = (BYTE)bitStream;
out[1] = (BYTE)(bitStream>>8);
out+= (bitCount+7) /8;
if (charnum > maxSymbolValue + 1) return ERROR(GENERIC);
return (out-ostart);
}
size_t FSE_writeNCount (void* buffer, size_t bufferSize, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
{
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported */
if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported */
if (bufferSize < FSE_NCountWriteBound(maxSymbolValue, tableLog))
return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 0);
return FSE_writeNCount_generic(buffer, bufferSize, normalizedCounter, maxSymbolValue, tableLog, 1);
}
/*-**************************************************************
* Counting histogram
****************************************************************/
/*! FSE_count_simple
This function counts byte values within `src`, and store the histogram into table `count`.
It doesn't use any additional memory.
But this function is unsafe : it doesn't check that all values within `src` can fit into `count`.
For this reason, prefer using a table `count` with 256 elements.
@return : count of most numerous element
*/
size_t FSE_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
const void* src, size_t srcSize)
{
const BYTE* ip = (const BYTE*)src;
const BYTE* const end = ip + srcSize;
unsigned maxSymbolValue = *maxSymbolValuePtr;
unsigned max=0;
memset(count, 0, (maxSymbolValue+1)*sizeof(*count));
if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
while (ip<end) count[*ip++]++;
while (!count[maxSymbolValue]) maxSymbolValue--;
*maxSymbolValuePtr = maxSymbolValue;
{ U32 s; for (s=0; s<=maxSymbolValue; s++) if (count[s] > max) max = count[s]; }
return (size_t)max;
}
/* FSE_count_parallel_wksp() :
* Same as FSE_count_parallel(), but using an externally provided scratch buffer.
* `workSpace` size must be a minimum of `1024 * sizeof(unsigned)`` */
static size_t FSE_count_parallel_wksp(
unsigned* count, unsigned* maxSymbolValuePtr,
const void* source, size_t sourceSize,
unsigned checkMax, unsigned* const workSpace)
{
const BYTE* ip = (const BYTE*)source;
const BYTE* const iend = ip+sourceSize;
unsigned maxSymbolValue = *maxSymbolValuePtr;
unsigned max=0;
U32* const Counting1 = workSpace;
U32* const Counting2 = Counting1 + 256;
U32* const Counting3 = Counting2 + 256;
U32* const Counting4 = Counting3 + 256;
memset(Counting1, 0, 4*256*sizeof(unsigned));
/* safety checks */
if (!sourceSize) {
memset(count, 0, maxSymbolValue + 1);
*maxSymbolValuePtr = 0;
return 0;
}
if (!maxSymbolValue) maxSymbolValue = 255; /* 0 == default */
/* by stripes of 16 bytes */
{ U32 cached = MEM_read32(ip); ip += 4;
while (ip < iend-15) {
U32 c = cached; cached = MEM_read32(ip); ip += 4;
Counting1[(BYTE) c ]++;
Counting2[(BYTE)(c>>8) ]++;
Counting3[(BYTE)(c>>16)]++;
Counting4[ c>>24 ]++;
c = cached; cached = MEM_read32(ip); ip += 4;
Counting1[(BYTE) c ]++;
Counting2[(BYTE)(c>>8) ]++;
Counting3[(BYTE)(c>>16)]++;
Counting4[ c>>24 ]++;
c = cached; cached = MEM_read32(ip); ip += 4;
Counting1[(BYTE) c ]++;
Counting2[(BYTE)(c>>8) ]++;
Counting3[(BYTE)(c>>16)]++;
Counting4[ c>>24 ]++;
c = cached; cached = MEM_read32(ip); ip += 4;
Counting1[(BYTE) c ]++;
Counting2[(BYTE)(c>>8) ]++;
Counting3[(BYTE)(c>>16)]++;
Counting4[ c>>24 ]++;
}
ip-=4;
}
/* finish last symbols */
while (ip<iend) Counting1[*ip++]++;
if (checkMax) { /* verify stats will fit into destination table */
U32 s; for (s=255; s>maxSymbolValue; s--) {
Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall);
} }
{ U32 s; for (s=0; s<=maxSymbolValue; s++) {
count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
if (count[s] > max) max = count[s];
} }
while (!count[maxSymbolValue]) maxSymbolValue--;
*maxSymbolValuePtr = maxSymbolValue;
return (size_t)max;
}
/* FSE_countFast_wksp() :
* Same as FSE_countFast(), but using an externally provided scratch buffer.
* `workSpace` size must be table of >= `1024` unsigned */
size_t FSE_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
const void* source, size_t sourceSize, unsigned* workSpace)
{
if (sourceSize < 1500) return FSE_count_simple(count, maxSymbolValuePtr, source, sourceSize);
return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace);
}
/* FSE_count_wksp() :
* Same as FSE_count(), but using an externally provided scratch buffer.
* `workSpace` size must be table of >= `1024` unsigned */
size_t FSE_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
const void* source, size_t sourceSize, unsigned* workSpace)
{
if (*maxSymbolValuePtr < 255)
return FSE_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace);
*maxSymbolValuePtr = 255;
return FSE_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace);
}
/*-**************************************************************
* FSE Compression Code
****************************************************************/
/*! FSE_sizeof_CTable() :
FSE_CTable is a variable size structure which contains :
`U16 tableLog;`
`U16 maxSymbolValue;`
`U16 nextStateNumber[1 << tableLog];` // This size is variable
`FSE_symbolCompressionTransform symbolTT[maxSymbolValue+1];` // This size is variable
Allocation is manual (C standard does not support variable-size structures).
*/
size_t FSE_sizeof_CTable (unsigned maxSymbolValue, unsigned tableLog)
{
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
return FSE_CTABLE_SIZE_U32 (tableLog, maxSymbolValue) * sizeof(U32);
}
/* provides the minimum logSize to safely represent a distribution */
static unsigned FSE_minTableLog(size_t srcSize, unsigned maxSymbolValue)
{
U32 minBitsSrc = BIT_highbit32((U32)(srcSize - 1)) + 1;
U32 minBitsSymbols = BIT_highbit32(maxSymbolValue) + 2;
U32 minBits = minBitsSrc < minBitsSymbols ? minBitsSrc : minBitsSymbols;
return minBits;
}
unsigned FSE_optimalTableLog_internal(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, unsigned minus)
{
U32 maxBitsSrc = BIT_highbit32((U32)(srcSize - 1)) - minus;
U32 tableLog = maxTableLog;
U32 minBits = FSE_minTableLog(srcSize, maxSymbolValue);
if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
if (maxBitsSrc < tableLog) tableLog = maxBitsSrc; /* Accuracy can be reduced */
if (minBits > tableLog) tableLog = minBits; /* Need a minimum to safely represent all symbol values */
if (tableLog < FSE_MIN_TABLELOG) tableLog = FSE_MIN_TABLELOG;
if (tableLog > FSE_MAX_TABLELOG) tableLog = FSE_MAX_TABLELOG;
return tableLog;
}
unsigned FSE_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
{
return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 2);
}
/* Secondary normalization method.
To be used when primary method fails. */
static size_t FSE_normalizeM2(short* norm, U32 tableLog, const unsigned* count, size_t total, U32 maxSymbolValue)
{
short const NOT_YET_ASSIGNED = -2;
U32 s;
U32 distributed = 0;
U32 ToDistribute;
/* Init */
U32 const lowThreshold = (U32)(total >> tableLog);
U32 lowOne = (U32)((total * 3) >> (tableLog + 1));
for (s=0; s<=maxSymbolValue; s++) {
if (count[s] == 0) {
norm[s]=0;
continue;
}
if (count[s] <= lowThreshold) {
norm[s] = -1;
distributed++;
total -= count[s];
continue;
}
if (count[s] <= lowOne) {
norm[s] = 1;
distributed++;
total -= count[s];
continue;
}
norm[s]=NOT_YET_ASSIGNED;
}
ToDistribute = (1 << tableLog) - distributed;
if ((total / ToDistribute) > lowOne) {
/* risk of rounding to zero */
lowOne = (U32)((total * 3) / (ToDistribute * 2));
for (s=0; s<=maxSymbolValue; s++) {
if ((norm[s] == NOT_YET_ASSIGNED) && (count[s] <= lowOne)) {
norm[s] = 1;
distributed++;
total -= count[s];
continue;
} }
ToDistribute = (1 << tableLog) - distributed;
}
if (distributed == maxSymbolValue+1) {
/* all values are pretty poor;
probably incompressible data (should have already been detected);
find max, then give all remaining points to max */
U32 maxV = 0, maxC = 0;
for (s=0; s<=maxSymbolValue; s++)
if (count[s] > maxC) maxV=s, maxC=count[s];
norm[maxV] += (short)ToDistribute;
return 0;
}
if (total == 0) {
/* all of the symbols were low enough for the lowOne or lowThreshold */
for (s=0; ToDistribute > 0; s = (s+1)%(maxSymbolValue+1))
if (norm[s] > 0) ToDistribute--, norm[s]++;
return 0;
}
{ U64 const vStepLog = 62 - tableLog;
U64 const mid = (1ULL << (vStepLog-1)) - 1;
U64 const rStep = ((((U64)1<<vStepLog) * ToDistribute) + mid) / total; /* scale on remaining */
U64 tmpTotal = mid;
for (s=0; s<=maxSymbolValue; s++) {
if (norm[s]==NOT_YET_ASSIGNED) {
U64 const end = tmpTotal + (count[s] * rStep);
U32 const sStart = (U32)(tmpTotal >> vStepLog);
U32 const sEnd = (U32)(end >> vStepLog);
U32 const weight = sEnd - sStart;
if (weight < 1)
return ERROR(GENERIC);
norm[s] = (short)weight;
tmpTotal = end;
} } }
return 0;
}
size_t FSE_normalizeCount (short* normalizedCounter, unsigned tableLog,
const unsigned* count, size_t total,
unsigned maxSymbolValue)
{
/* Sanity checks */
if (tableLog==0) tableLog = FSE_DEFAULT_TABLELOG;
if (tableLog < FSE_MIN_TABLELOG) return ERROR(GENERIC); /* Unsupported size */
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge); /* Unsupported size */
if (tableLog < FSE_minTableLog(total, maxSymbolValue)) return ERROR(GENERIC); /* Too small tableLog, compression potentially impossible */
{ U32 const rtbTable[] = { 0, 473195, 504333, 520860, 550000, 700000, 750000, 830000 };
U64 const scale = 62 - tableLog;
U64 const step = ((U64)1<<62) / total; /* <== here, one division ! */
U64 const vStep = 1ULL<<(scale-20);
int stillToDistribute = 1<<tableLog;
unsigned s;
unsigned largest=0;
short largestP=0;
U32 lowThreshold = (U32)(total >> tableLog);
for (s=0; s<=maxSymbolValue; s++) {
if (count[s] == total) return 0; /* rle special case */
if (count[s] == 0) { normalizedCounter[s]=0; continue; }
if (count[s] <= lowThreshold) {
normalizedCounter[s] = -1;
stillToDistribute--;
} else {
short proba = (short)((count[s]*step) >> scale);
if (proba<8) {
U64 restToBeat = vStep * rtbTable[proba];
proba += (count[s]*step) - ((U64)proba<<scale) > restToBeat;
}
if (proba > largestP) largestP=proba, largest=s;
normalizedCounter[s] = proba;
stillToDistribute -= proba;
} }
if (-stillToDistribute >= (normalizedCounter[largest] >> 1)) {
/* corner case, need another normalization method */
size_t const errorCode = FSE_normalizeM2(normalizedCounter, tableLog, count, total, maxSymbolValue);
if (FSE_isError(errorCode)) return errorCode;
}
else normalizedCounter[largest] += (short)stillToDistribute;
}
#if 0
{ /* Print Table (debug) */
U32 s;
U32 nTotal = 0;
for (s=0; s<=maxSymbolValue; s++)
printf("%3i: %4i \n", s, normalizedCounter[s]);
for (s=0; s<=maxSymbolValue; s++)
nTotal += abs(normalizedCounter[s]);
if (nTotal != (1U<<tableLog))
printf("Warning !!! Total == %u != %u !!!", nTotal, 1U<<tableLog);
getchar();
}
#endif
return tableLog;
}
/* fake FSE_CTable, for raw (uncompressed) input */
size_t FSE_buildCTable_raw (FSE_CTable* ct, unsigned nbBits)
{
const unsigned tableSize = 1 << nbBits;
const unsigned tableMask = tableSize - 1;
const unsigned maxSymbolValue = tableMask;
void* const ptr = ct;
U16* const tableU16 = ( (U16*) ptr) + 2;
void* const FSCT = ((U32*)ptr) + 1 /* header */ + (tableSize>>1); /* assumption : tableLog >= 1 */
FSE_symbolCompressionTransform* const symbolTT = (FSE_symbolCompressionTransform*) (FSCT);
unsigned s;
/* Sanity checks */
if (nbBits < 1) return ERROR(GENERIC); /* min size */
/* header */
tableU16[-2] = (U16) nbBits;
tableU16[-1] = (U16) maxSymbolValue;
/* Build table */
for (s=0; s<tableSize; s++)
tableU16[s] = (U16)(tableSize + s);
/* Build Symbol Transformation Table */
{ const U32 deltaNbBits = (nbBits << 16) - (1 << nbBits);
for (s=0; s<=maxSymbolValue; s++) {
symbolTT[s].deltaNbBits = deltaNbBits;
symbolTT[s].deltaFindState = s-1;
} }
return 0;
}
/* fake FSE_CTable, for rle input (always same symbol) */
size_t FSE_buildCTable_rle (FSE_CTable* ct, BYTE symbolValue)
{
void* ptr = ct;
U16* tableU16 = ( (U16*) ptr) + 2;
void* FSCTptr = (U32*)ptr + 2;
FSE_symbolCompressionTransform* symbolTT = (FSE_symbolCompressionTransform*) FSCTptr;
/* header */
tableU16[-2] = (U16) 0;
tableU16[-1] = (U16) symbolValue;
/* Build table */
tableU16[0] = 0;
tableU16[1] = 0; /* just in case */
/* Build Symbol Transformation Table */
symbolTT[symbolValue].deltaNbBits = 0;
symbolTT[symbolValue].deltaFindState = 0;
return 0;
}
static size_t FSE_compress_usingCTable_generic (void* dst, size_t dstSize,
const void* src, size_t srcSize,
const FSE_CTable* ct, const unsigned fast)
{
const BYTE* const istart = (const BYTE*) src;
const BYTE* const iend = istart + srcSize;
const BYTE* ip=iend;
BIT_CStream_t bitC;
FSE_CState_t CState1, CState2;
/* init */
if (srcSize <= 2) return 0;
{ size_t const initError = BIT_initCStream(&bitC, dst, dstSize);
if (FSE_isError(initError)) return 0; /* not enough space available to write a bitstream */ }
#define FSE_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
if (srcSize & 1) {
FSE_initCState2(&CState1, ct, *--ip);
FSE_initCState2(&CState2, ct, *--ip);
FSE_encodeSymbol(&bitC, &CState1, *--ip);
FSE_FLUSHBITS(&bitC);
} else {
FSE_initCState2(&CState2, ct, *--ip);
FSE_initCState2(&CState1, ct, *--ip);
}
/* join to mod 4 */
srcSize -= 2;
if ((sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) && (srcSize & 2)) { /* test bit 2 */
FSE_encodeSymbol(&bitC, &CState2, *--ip);
FSE_encodeSymbol(&bitC, &CState1, *--ip);
FSE_FLUSHBITS(&bitC);
}
/* 2 or 4 encoding per loop */
while ( ip>istart ) {
FSE_encodeSymbol(&bitC, &CState2, *--ip);
if (sizeof(bitC.bitContainer)*8 < FSE_MAX_TABLELOG*2+7 ) /* this test must be static */
FSE_FLUSHBITS(&bitC);
FSE_encodeSymbol(&bitC, &CState1, *--ip);
if (sizeof(bitC.bitContainer)*8 > FSE_MAX_TABLELOG*4+7 ) { /* this test must be static */
FSE_encodeSymbol(&bitC, &CState2, *--ip);
FSE_encodeSymbol(&bitC, &CState1, *--ip);
}
FSE_FLUSHBITS(&bitC);
}
FSE_flushCState(&bitC, &CState2);
FSE_flushCState(&bitC, &CState1);
return BIT_closeCStream(&bitC);
}
size_t FSE_compress_usingCTable (void* dst, size_t dstSize,
const void* src, size_t srcSize,
const FSE_CTable* ct)
{
unsigned const fast = (dstSize >= FSE_BLOCKBOUND(srcSize));
if (fast)
return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 1);
else
return FSE_compress_usingCTable_generic(dst, dstSize, src, srcSize, ct, 0);
}
size_t FSE_compressBound(size_t size) { return FSE_COMPRESSBOUND(size); }
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return f
#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
/* FSE_compress_wksp() :
* Same as FSE_compress2(), but using an externally allocated scratch buffer (`workSpace`).
* `wkspSize` size must be `(1<<tableLog)`.
*/
size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
{
BYTE* const ostart = (BYTE*) dst;
BYTE* op = ostart;
BYTE* const oend = ostart + dstSize;
U32 count[FSE_MAX_SYMBOL_VALUE+1];
S16 norm[FSE_MAX_SYMBOL_VALUE+1];
FSE_CTable* CTable = (FSE_CTable*)workSpace;
size_t const CTableSize = FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue);
void* scratchBuffer = (void*)(CTable + CTableSize);
size_t const scratchBufferSize = wkspSize - (CTableSize * sizeof(FSE_CTable));
/* init conditions */
if (wkspSize < FSE_WKSP_SIZE_U32(tableLog, maxSymbolValue)) return ERROR(tableLog_tooLarge);
if (srcSize <= 1) return 0; /* Not compressible */
if (!maxSymbolValue) maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG;
/* Scan input and build symbol stats */
{ CHECK_V_F(maxCount, FSE_count_wksp(count, &maxSymbolValue, src, srcSize, (unsigned*)scratchBuffer) );
if (maxCount == srcSize) return 1; /* only a single symbol in src : rle */
if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */
}
tableLog = FSE_optimalTableLog(tableLog, srcSize, maxSymbolValue);
CHECK_F( FSE_normalizeCount(norm, tableLog, count, srcSize, maxSymbolValue) );
/* Write table description header */
{ CHECK_V_F(nc_err, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
op += nc_err;
}
/* Compress */
CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, scratchBufferSize) );
{ CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, src, srcSize, CTable) );
if (cSize == 0) return 0; /* not enough space for compressed data */
op += cSize;
}
/* check compressibility */
if ( (size_t)(op-ostart) >= srcSize-1 ) return 0;
return op-ostart;
}
#endif /* FSE_COMMONDEFS_ONLY */

View File

@ -0,0 +1,292 @@
/* ******************************************************************
FSE : Finite State Entropy decoder
Copyright (C) 2013-2015, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
/* **************************************************************
* Compiler specifics
****************************************************************/
#define FORCE_INLINE static __always_inline
/* **************************************************************
* Includes
****************************************************************/
#include <linux/compiler.h>
#include <linux/string.h> /* memcpy, memset */
#include "bitstream.h"
#include "fse.h"
/* **************************************************************
* Error Management
****************************************************************/
#define FSE_isError ERR_isError
#define FSE_STATIC_ASSERT(c) { enum { FSE_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
/* check and forward error code */
#define CHECK_F(f) { size_t const e = f; if (FSE_isError(e)) return e; }
/* **************************************************************
* Templates
****************************************************************/
/*
designed to be included
for type-specific functions (template emulation in C)
Objective is to write these functions only once, for improved maintenance
*/
/* safety checks */
#ifndef FSE_FUNCTION_EXTENSION
# error "FSE_FUNCTION_EXTENSION must be defined"
#endif
#ifndef FSE_FUNCTION_TYPE
# error "FSE_FUNCTION_TYPE must be defined"
#endif
/* Function names */
#define FSE_CAT(X,Y) X##Y
#define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
#define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
/* Function templates */
size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
{
void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
U32 const maxSV1 = maxSymbolValue + 1;
U32 const tableSize = 1 << tableLog;
U32 highThreshold = tableSize-1;
/* Sanity Checks */
if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
/* Init, lay down lowprob symbols */
{ FSE_DTableHeader DTableH;
DTableH.tableLog = (U16)tableLog;
DTableH.fastMode = 1;
{ S16 const largeLimit= (S16)(1 << (tableLog-1));
U32 s;
for (s=0; s<maxSV1; s++) {
if (normalizedCounter[s]==-1) {
tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
symbolNext[s] = 1;
} else {
if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
symbolNext[s] = normalizedCounter[s];
} } }
memcpy(dt, &DTableH, sizeof(DTableH));
}
/* Spread symbols */
{ U32 const tableMask = tableSize-1;
U32 const step = FSE_TABLESTEP(tableSize);
U32 s, position = 0;
for (s=0; s<maxSV1; s++) {
int i;
for (i=0; i<normalizedCounter[s]; i++) {
tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
position = (position + step) & tableMask;
while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
} }
if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
}
/* Build Decoding table */
{ U32 u;
for (u=0; u<tableSize; u++) {
FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
U16 nextState = symbolNext[symbol]++;
tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32 ((U32)nextState) );
tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
} }
return 0;
}
#ifndef FSE_COMMONDEFS_ONLY
/*-*******************************************************
* Decompression (Byte symbols)
*********************************************************/
size_t FSE_buildDTable_rle (FSE_DTable* dt, BYTE symbolValue)
{
void* ptr = dt;
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
void* dPtr = dt + 1;
FSE_decode_t* const cell = (FSE_decode_t*)dPtr;
DTableH->tableLog = 0;
DTableH->fastMode = 0;
cell->newState = 0;
cell->symbol = symbolValue;
cell->nbBits = 0;
return 0;
}
size_t FSE_buildDTable_raw (FSE_DTable* dt, unsigned nbBits)
{
void* ptr = dt;
FSE_DTableHeader* const DTableH = (FSE_DTableHeader*)ptr;
void* dPtr = dt + 1;
FSE_decode_t* const dinfo = (FSE_decode_t*)dPtr;
const unsigned tableSize = 1 << nbBits;
const unsigned tableMask = tableSize - 1;
const unsigned maxSV1 = tableMask+1;
unsigned s;
/* Sanity checks */
if (nbBits < 1) return ERROR(GENERIC); /* min size */
/* Build Decoding Table */
DTableH->tableLog = (U16)nbBits;
DTableH->fastMode = 1;
for (s=0; s<maxSV1; s++) {
dinfo[s].newState = 0;
dinfo[s].symbol = (BYTE)s;
dinfo[s].nbBits = (BYTE)nbBits;
}
return 0;
}
FORCE_INLINE size_t FSE_decompress_usingDTable_generic(
void* dst, size_t maxDstSize,
const void* cSrc, size_t cSrcSize,
const FSE_DTable* dt, const unsigned fast)
{
BYTE* const ostart = (BYTE*) dst;
BYTE* op = ostart;
BYTE* const omax = op + maxDstSize;
BYTE* const olimit = omax-3;
BIT_DStream_t bitD;
FSE_DState_t state1;
FSE_DState_t state2;
/* Init */
CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
FSE_initDState(&state1, &bitD, dt);
FSE_initDState(&state2, &bitD, dt);
#define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
/* 4 symbols per loop */
for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) {
op[0] = FSE_GETSYMBOL(&state1);
if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
BIT_reloadDStream(&bitD);
op[1] = FSE_GETSYMBOL(&state2);
if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
{ if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
op[2] = FSE_GETSYMBOL(&state1);
if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
BIT_reloadDStream(&bitD);
op[3] = FSE_GETSYMBOL(&state2);
}
/* tail */
/* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
while (1) {
if (op>(omax-2)) return ERROR(dstSize_tooSmall);
*op++ = FSE_GETSYMBOL(&state1);
if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
*op++ = FSE_GETSYMBOL(&state2);
break;
}
if (op>(omax-2)) return ERROR(dstSize_tooSmall);
*op++ = FSE_GETSYMBOL(&state2);
if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
*op++ = FSE_GETSYMBOL(&state1);
break;
} }
return op-ostart;
}
size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
const void* cSrc, size_t cSrcSize,
const FSE_DTable* dt)
{
const void* ptr = dt;
const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
const U32 fastMode = DTableH->fastMode;
/* select fast mode (static) */
if (fastMode) return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 1);
return FSE_decompress_usingDTable_generic(dst, originalSize, cSrc, cSrcSize, dt, 0);
}
size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog)
{
const BYTE* const istart = (const BYTE*)cSrc;
const BYTE* ip = istart;
short counting[FSE_MAX_SYMBOL_VALUE+1];
unsigned tableLog;
unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
/* normal FSE decoding mode */
size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
if (FSE_isError(NCountLength)) return NCountLength;
//if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); /* too small input size; supposed to be already checked in NCountLength, only remaining case : NCountLength==cSrcSize */
if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
ip += NCountLength;
cSrcSize -= NCountLength;
CHECK_F( FSE_buildDTable (workSpace, counting, maxSymbolValue, tableLog) );
return FSE_decompress_usingDTable (dst, dstCapacity, ip, cSrcSize, workSpace); /* always return, even if it is an error code */
}
#endif /* FSE_COMMONDEFS_ONLY */

View File

@ -0,0 +1,203 @@
/* ******************************************************************
Huffman coder, part of New Generation Entropy library
header file
Copyright (C) 2013-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- Source repository : https://github.com/Cyan4973/FiniteStateEntropy
****************************************************************** */
#ifndef HUF_H_298734234
#define HUF_H_298734234
/* *** Dependencies *** */
#include <linux/types.h> /* size_t */
/* *** Tool functions *** */
#define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */
size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */
/* Error Management */
unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */
/* *** Advanced function *** */
/** HUF_compress4X_wksp() :
* Same as HUF_compress2(), but uses externally allocated `workSpace`, which must be a table of >= 1024 unsigned */
size_t HUF_compress4X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
/* *** Dependencies *** */
#include "mem.h" /* U32 */
/* *** Constants *** */
#define HUF_TABLELOG_MAX 12 /* max configured tableLog (for static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
#define HUF_TABLELOG_DEFAULT 11 /* tableLog by default, when not specified */
#define HUF_SYMBOLVALUE_MAX 255
#define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
#if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
# error "HUF_TABLELOG_MAX is too large !"
#endif
/* ****************************************
* Static allocation
******************************************/
/* HUF buffer bounds */
#define HUF_CTABLEBOUND 129
#define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true if incompressible pre-filtered with fast heuristic */
#define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
/* static allocation of HUF's Compression Table */
#define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
U32 name##hb[maxSymbolValue+1]; \
void* name##hv = &(name##hb); \
HUF_CElt* name = (HUF_CElt*)(name##hv) /* no final ; */
/* static allocation of HUF's DTable */
typedef U32 HUF_DTable;
#define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog)))
#define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }
#define HUF_CREATE_STATIC_DTABLEX4(DTable, maxTableLog) \
HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
/* The workspace must have alignment at least 4 and be at least this large */
#define HUF_WORKSPACE_SIZE (6 << 10)
#define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))
/* ****************************************
* Advanced decompression functions
******************************************/
size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */
size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
size_t HUF_decompress4X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
/* ****************************************
* HUF detailed API
******************************************/
/*!
HUF_compress() does the following:
1. count symbol occurrence from source[] into table count[] using FSE_count()
2. (optional) refine tableLog using HUF_optimalTableLog()
3. build Huffman table from count using HUF_buildCTable()
4. save Huffman table to memory buffer using HUF_writeCTable()
5. encode the data stream using HUF_compress4X_usingCTable()
The following API allows targeting specific sub-functions for advanced tasks.
For example, it's possible to compress several blocks using the same 'CTable',
or to save and regenerate 'CTable' using external methods.
*/
/* FSE_count() : find it within "fse.h" */
unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */
size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
typedef enum {
HUF_repeat_none, /**< Cannot use the previous table */
HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
HUF_repeat_valid /**< Can use the previous table and it is asumed to be valid */
} HUF_repeat;
/** HUF_compress4X_repeat() :
* Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
* If it uses hufTable it does not modify hufTable or repeat.
* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
* If preferRepeat then the old table will always be used if valid. */
size_t HUF_compress4X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
/** HUF_buildCTable_wksp() :
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
* `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
*/
size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize);
/*! HUF_readStats() :
Read compact Huffman tree, saved by HUF_writeCTable().
`huffWeight` is destination buffer.
@return : size read from `src` , or an error Code .
Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
size_t HUF_readStats(BYTE* huffWeight, size_t hwSize, U32* rankStats,
U32* nbSymbolsPtr, U32* tableLogPtr,
const void* src, size_t srcSize);
/** HUF_readCTable() :
* Loading a CTable saved with HUF_writeCTable() */
size_t HUF_readCTable (HUF_CElt* CTable, unsigned maxSymbolValue, const void* src, size_t srcSize);
/*
HUF_decompress() does the following:
1. select the decompression algorithm (X2, X4) based on pre-computed heuristics
2. build Huffman table from save, using HUF_readDTableXn()
3. decode 1 or 4 segments in parallel using HUF_decompressSXn_usingDTable
*/
/** HUF_selectDecoder() :
* Tells which decoder is likely to decode faster,
* based on a set of pre-determined metrics.
* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
* Assumption : 0 < cSrcSize < dstSize <= 128 KB */
U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize);
size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
size_t HUF_decompress4X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
/* single stream variants */
size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
/** HUF_compress1X_repeat() :
* Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
* If it uses hufTable it does not modify hufTable or repeat.
* If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
* If preferRepeat then the old table will always be used if valid. */
size_t HUF_compress1X_repeat(void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize, HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
size_t HUF_decompress1X4_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */
size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
size_t HUF_decompress1X4_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
#endif /* HUF_H_298734234 */

View File

@ -0,0 +1,644 @@
/* ******************************************************************
Huffman encoder, part of New Generation Entropy library
Copyright (C) 2013-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
/* **************************************************************
* Includes
****************************************************************/
#include <linux/string.h> /* memcpy, memset */
#include "bitstream.h"
#include "fse.h" /* header compression */
#include "huf.h"
/* **************************************************************
* Error Management
****************************************************************/
#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
#define CHECK_V_F(e, f) size_t const e = f; if (ERR_isError(e)) return f
#define CHECK_F(f) { CHECK_V_F(_var_err__, f); }
/* **************************************************************
* Utils
****************************************************************/
unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue)
{
return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1);
}
/* *******************************************************
* HUF : Huffman block compression
*********************************************************/
/* HUF_compressWeights() :
* Same as FSE_compress(), but dedicated to huff0's weights compression.
* The use case needs much less stack memory.
* Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX.
*/
#define MAX_FSE_TABLELOG_FOR_HUFF_HEADER 6
size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weightTable, size_t wtSize)
{
BYTE* const ostart = (BYTE*) dst;
BYTE* op = ostart;
BYTE* const oend = ostart + dstSize;
U32 maxSymbolValue = HUF_TABLELOG_MAX;
U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
BYTE scratchBuffer[1<<MAX_FSE_TABLELOG_FOR_HUFF_HEADER];
U32 count[HUF_TABLELOG_MAX+1];
S16 norm[HUF_TABLELOG_MAX+1];
/* init conditions */
if (wtSize <= 1) return 0; /* Not compressible */
/* Scan input and build symbol stats */
{ CHECK_V_F(maxCount, FSE_count_simple(count, &maxSymbolValue, weightTable, wtSize) );
if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */
if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
}
tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) );
/* Write table description header */
{ CHECK_V_F(hSize, FSE_writeNCount(op, oend-op, norm, maxSymbolValue, tableLog) );
op += hSize;
}
/* Compress */
CHECK_F( FSE_buildCTable_wksp(CTable, norm, maxSymbolValue, tableLog, scratchBuffer, sizeof(scratchBuffer)) );
{ CHECK_V_F(cSize, FSE_compress_usingCTable(op, oend - op, weightTable, wtSize, CTable) );
if (cSize == 0) return 0; /* not enough space for compressed data */
op += cSize;
}
return op-ostart;
}
struct HUF_CElt_s {
U16 val;
BYTE nbBits;
}; /* typedef'd to HUF_CElt within "huf.h" */
/*! HUF_writeCTable() :
`CTable` : Huffman tree to save, using huf representation.
@return : size of saved CTable */
size_t HUF_writeCTable (void* dst, size_t maxDstSize,
const HUF_CElt* CTable, U32 maxSymbolValue, U32 huffLog)
{
BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */
BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
BYTE* op = (BYTE*)dst;
U32 n;
/* check conditions */
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge);
/* convert to weight */
bitsToWeight[0] = 0;
for (n=1; n<huffLog+1; n++)
bitsToWeight[n] = (BYTE)(huffLog + 1 - n);
for (n=0; n<maxSymbolValue; n++)
huffWeight[n] = bitsToWeight[CTable[n].nbBits];
/* attempt weights compression by FSE */
{ CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, huffWeight, maxSymbolValue) );
if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */
op[0] = (BYTE)hSize;
return hSize+1;
} }
/* write raw values as 4-bits (max : 15) */
if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */
if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */
op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1));
huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */
for (n=0; n<maxSymbolValue; n+=2)
op[(n/2)+1] = (BYTE)((huffWeight[n] << 4) + huffWeight[n+1]);
return ((maxSymbolValue+1)/2) + 1;
}
size_t HUF_readCTable (HUF_CElt* CTable, U32 maxSymbolValue, const void* src, size_t srcSize)
{
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
U32 tableLog = 0;
U32 nbSymbols = 0;
/* get symbol weights */
CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
/* check result */
if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
if (nbSymbols > maxSymbolValue+1) return ERROR(maxSymbolValue_tooSmall);
/* Prepare base value per rank */
{ U32 n, nextRankStart = 0;
for (n=1; n<=tableLog; n++) {
U32 current = nextRankStart;
nextRankStart += (rankVal[n] << (n-1));
rankVal[n] = current;
} }
/* fill nbBits */
{ U32 n; for (n=0; n<nbSymbols; n++) {
const U32 w = huffWeight[n];
CTable[n].nbBits = (BYTE)(tableLog + 1 - w);
} }
/* fill val */
{ U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */
U16 valPerRank[HUF_TABLELOG_MAX+2] = {0};
{ U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[CTable[n].nbBits]++; }
/* determine stating value per rank */
valPerRank[tableLog+1] = 0; /* for w==0 */
{ U16 min = 0;
U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */
valPerRank[n] = min; /* get starting value within each rank */
min += nbPerRank[n];
min >>= 1;
} }
/* assign value within rank, symbol order */
{ U32 n; for (n=0; n<=maxSymbolValue; n++) CTable[n].val = valPerRank[CTable[n].nbBits]++; }
}
return readSize;
}
typedef struct nodeElt_s {
U32 count;
U16 parent;
BYTE byte;
BYTE nbBits;
} nodeElt;
static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
{
const U32 largestBits = huffNode[lastNonNull].nbBits;
if (largestBits <= maxNbBits) return largestBits; /* early exit : no elt > maxNbBits */
/* there are several too large elements (at least >= 2) */
{ int totalCost = 0;
const U32 baseCost = 1 << (largestBits - maxNbBits);
U32 n = lastNonNull;
while (huffNode[n].nbBits > maxNbBits) {
totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
huffNode[n].nbBits = (BYTE)maxNbBits;
n --;
} /* n stops at huffNode[n].nbBits <= maxNbBits */
while (huffNode[n].nbBits == maxNbBits) n--; /* n end at index of smallest symbol using < maxNbBits */
/* renorm totalCost */
totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */
/* repay normalized cost */
{ U32 const noSymbol = 0xF0F0F0F0;
U32 rankLast[HUF_TABLELOG_MAX+2];
int pos;
/* Get pos of last (smallest) symbol per rank */
memset(rankLast, 0xF0, sizeof(rankLast));
{ U32 currentNbBits = maxNbBits;
for (pos=n ; pos >= 0; pos--) {
if (huffNode[pos].nbBits >= currentNbBits) continue;
currentNbBits = huffNode[pos].nbBits; /* < maxNbBits */
rankLast[maxNbBits-currentNbBits] = pos;
} }
while (totalCost > 0) {
U32 nBitsToDecrease = BIT_highbit32(totalCost) + 1;
for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
U32 highPos = rankLast[nBitsToDecrease];
U32 lowPos = rankLast[nBitsToDecrease-1];
if (highPos == noSymbol) continue;
if (lowPos == noSymbol) break;
{ U32 const highTotal = huffNode[highPos].count;
U32 const lowTotal = 2 * huffNode[lowPos].count;
if (highTotal <= lowTotal) break;
} }
/* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol)) /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
nBitsToDecrease ++;
totalCost -= 1 << (nBitsToDecrease-1);
if (rankLast[nBitsToDecrease-1] == noSymbol)
rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */
huffNode[rankLast[nBitsToDecrease]].nbBits ++;
if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
rankLast[nBitsToDecrease] = noSymbol;
else {
rankLast[nBitsToDecrease]--;
if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
} } /* while (totalCost > 0) */
while (totalCost < 0) { /* Sometimes, cost correction overshoot */
if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
while (huffNode[n].nbBits == maxNbBits) n--;
huffNode[n+1].nbBits--;
rankLast[1] = n+1;
totalCost++;
continue;
}
huffNode[ rankLast[1] + 1 ].nbBits--;
rankLast[1]++;
totalCost ++;
} } } /* there are several too large elements (at least >= 2) */
return maxNbBits;
}
typedef struct {
U32 base;
U32 current;
} rankPos;
static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue)
{
rankPos rank[32];
U32 n;
memset(rank, 0, sizeof(rank));
for (n=0; n<=maxSymbolValue; n++) {
U32 r = BIT_highbit32(count[n] + 1);
rank[r].base ++;
}
for (n=30; n>0; n--) rank[n-1].base += rank[n].base;
for (n=0; n<32; n++) rank[n].current = rank[n].base;
for (n=0; n<=maxSymbolValue; n++) {
U32 const c = count[n];
U32 const r = BIT_highbit32(c+1) + 1;
U32 pos = rank[r].current++;
while ((pos > rank[r].base) && (c > huffNode[pos-1].count)) huffNode[pos]=huffNode[pos-1], pos--;
huffNode[pos].count = c;
huffNode[pos].byte = (BYTE)n;
}
}
/** HUF_buildCTable_wksp() :
* Same as HUF_buildCTable(), but using externally allocated scratch buffer.
* `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as a table of 1024 unsigned.
*/
#define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
typedef nodeElt huffNodeTable[2*HUF_SYMBOLVALUE_MAX+1 +1];
size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
{
nodeElt* const huffNode0 = (nodeElt*)workSpace;
nodeElt* const huffNode = huffNode0+1;
U32 n, nonNullRank;
int lowS, lowN;
U16 nodeNb = STARTNODE;
U32 nodeRoot;
/* safety checks */
if (wkspSize < sizeof(huffNodeTable)) return ERROR(GENERIC); /* workSpace is not large enough */
if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(GENERIC);
memset(huffNode0, 0, sizeof(huffNodeTable));
/* sort, decreasing order */
HUF_sort(huffNode, count, maxSymbolValue);
/* init for parents */
nonNullRank = maxSymbolValue;
while(huffNode[nonNullRank].count == 0) nonNullRank--;
lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb;
huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count;
huffNode[lowS].parent = huffNode[lowS-1].parent = nodeNb;
nodeNb++; lowS-=2;
for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30);
huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */
/* create parents */
while (nodeNb <= nodeRoot) {
U32 n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
U32 n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++;
huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count;
huffNode[n1].parent = huffNode[n2].parent = nodeNb;
nodeNb++;
}
/* distribute weights (unlimited tree height) */
huffNode[nodeRoot].nbBits = 0;
for (n=nodeRoot-1; n>=STARTNODE; n--)
huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
for (n=0; n<=nonNullRank; n++)
huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
/* enforce maxTableLog */
maxNbBits = HUF_setMaxHeight(huffNode, nonNullRank, maxNbBits);
/* fill result into tree (val, nbBits) */
{ U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
for (n=0; n<=nonNullRank; n++)
nbPerRank[huffNode[n].nbBits]++;
/* determine stating value per rank */
{ U16 min = 0;
for (n=maxNbBits; n>0; n--) {
valPerRank[n] = min; /* get starting value within each rank */
min += nbPerRank[n];
min >>= 1;
} }
for (n=0; n<=maxSymbolValue; n++)
tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */
for (n=0; n<=maxSymbolValue; n++)
tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */
}
return maxNbBits;
}
static size_t HUF_estimateCompressedSize(HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
{
size_t nbBits = 0;
int s;
for (s = 0; s <= (int)maxSymbolValue; ++s) {
nbBits += CTable[s].nbBits * count[s];
}
return nbBits >> 3;
}
static int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) {
int bad = 0;
int s;
for (s = 0; s <= (int)maxSymbolValue; ++s) {
bad |= (count[s] != 0) & (CTable[s].nbBits == 0);
}
return !bad;
}
static void HUF_encodeSymbol(BIT_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable)
{
BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits);
}
size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); }
#define HUF_FLUSHBITS(s) (fast ? BIT_flushBitsFast(s) : BIT_flushBits(s))
#define HUF_FLUSHBITS_1(stream) \
if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*2+7) HUF_FLUSHBITS(stream)
#define HUF_FLUSHBITS_2(stream) \
if (sizeof((stream)->bitContainer)*8 < HUF_TABLELOG_MAX*4+7) HUF_FLUSHBITS(stream)
size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
{
const BYTE* ip = (const BYTE*) src;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart;
size_t n;
const unsigned fast = (dstSize >= HUF_BLOCKBOUND(srcSize));
BIT_CStream_t bitC;
/* init */
if (dstSize < 8) return 0; /* not enough space to compress */
{ size_t const initErr = BIT_initCStream(&bitC, op, oend-op);
if (HUF_isError(initErr)) return 0; }
n = srcSize & ~3; /* join to mod 4 */
switch (srcSize & 3)
{
case 3 : HUF_encodeSymbol(&bitC, ip[n+ 2], CTable);
HUF_FLUSHBITS_2(&bitC);
case 2 : HUF_encodeSymbol(&bitC, ip[n+ 1], CTable);
HUF_FLUSHBITS_1(&bitC);
case 1 : HUF_encodeSymbol(&bitC, ip[n+ 0], CTable);
HUF_FLUSHBITS(&bitC);
case 0 :
default: ;
}
for (; n>0; n-=4) { /* note : n&3==0 at this stage */
HUF_encodeSymbol(&bitC, ip[n- 1], CTable);
HUF_FLUSHBITS_1(&bitC);
HUF_encodeSymbol(&bitC, ip[n- 2], CTable);
HUF_FLUSHBITS_2(&bitC);
HUF_encodeSymbol(&bitC, ip[n- 3], CTable);
HUF_FLUSHBITS_1(&bitC);
HUF_encodeSymbol(&bitC, ip[n- 4], CTable);
HUF_FLUSHBITS(&bitC);
}
return BIT_closeCStream(&bitC);
}
size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable)
{
size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */
const BYTE* ip = (const BYTE*) src;
const BYTE* const iend = ip + srcSize;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart;
if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */
if (srcSize < 12) return 0; /* no saving possible : too small input */
op += 6; /* jumpTable */
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
if (cSize==0) return 0;
MEM_writeLE16(ostart, (U16)cSize);
op += cSize;
}
ip += segmentSize;
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
if (cSize==0) return 0;
MEM_writeLE16(ostart+2, (U16)cSize);
op += cSize;
}
ip += segmentSize;
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, segmentSize, CTable) );
if (cSize==0) return 0;
MEM_writeLE16(ostart+4, (U16)cSize);
op += cSize;
}
ip += segmentSize;
{ CHECK_V_F(cSize, HUF_compress1X_usingCTable(op, oend-op, ip, iend-ip, CTable) );
if (cSize==0) return 0;
op += cSize;
}
return op-ostart;
}
static size_t HUF_compressCTable_internal(
BYTE* const ostart, BYTE* op, BYTE* const oend,
const void* src, size_t srcSize,
unsigned singleStream, const HUF_CElt* CTable)
{
size_t const cSize = singleStream ?
HUF_compress1X_usingCTable(op, oend - op, src, srcSize, CTable) :
HUF_compress4X_usingCTable(op, oend - op, src, srcSize, CTable);
if (HUF_isError(cSize)) { return cSize; }
if (cSize==0) { return 0; } /* uncompressible */
op += cSize;
/* check compressibility */
if ((size_t)(op-ostart) >= srcSize-1) { return 0; }
return op-ostart;
}
/* `workSpace` must a table of at least 1024 unsigned */
static size_t HUF_compress_internal (
void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
unsigned singleStream,
void* workSpace, size_t wkspSize,
HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat)
{
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart;
U32* count;
size_t const countSize = sizeof(U32) * (HUF_SYMBOLVALUE_MAX + 1);
HUF_CElt* CTable;
size_t const CTableSize = sizeof(HUF_CElt) * (HUF_SYMBOLVALUE_MAX + 1);
/* checks & inits */
if (wkspSize < sizeof(huffNodeTable) + countSize + CTableSize) return ERROR(GENERIC);
if (!srcSize) return 0; /* Uncompressed (note : 1 means rle, so first byte must be correct) */
if (!dstSize) return 0; /* cannot fit within dst budget */
if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX;
if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT;
count = (U32*)workSpace;
workSpace = (BYTE*)workSpace + countSize;
wkspSize -= countSize;
CTable = (HUF_CElt*)workSpace;
workSpace = (BYTE*)workSpace + CTableSize;
wkspSize -= CTableSize;
/* Heuristic : If we don't need to check the validity of the old table use the old table for small inputs */
if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
}
/* Scan input and build symbol stats */
{ CHECK_V_F(largest, FSE_count_wksp (count, &maxSymbolValue, (const BYTE*)src, srcSize, (U32*)workSpace) );
if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
if (largest <= (srcSize >> 7)+1) return 0; /* Fast heuristic : not compressible enough */
}
/* Check validity of previous table */
if (repeat && *repeat == HUF_repeat_check && !HUF_validateCTable(oldHufTable, count, maxSymbolValue)) {
*repeat = HUF_repeat_none;
}
/* Heuristic : use existing table for small inputs */
if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
}
/* Build Huffman Tree */
huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
{ CHECK_V_F(maxBits, HUF_buildCTable_wksp (CTable, count, maxSymbolValue, huffLog, workSpace, wkspSize) );
huffLog = (U32)maxBits;
/* Zero the unused symbols so we can check it for validity */
memset(CTable + maxSymbolValue + 1, 0, CTableSize - (maxSymbolValue + 1) * sizeof(HUF_CElt));
}
/* Write table description header */
{ CHECK_V_F(hSize, HUF_writeCTable (op, dstSize, CTable, maxSymbolValue, huffLog) );
/* Check if using the previous table will be beneficial */
if (repeat && *repeat != HUF_repeat_none) {
size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, count, maxSymbolValue);
size_t const newSize = HUF_estimateCompressedSize(CTable, count, maxSymbolValue);
if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, oldHufTable);
}
}
/* Use the new table */
if (hSize + 12ul >= srcSize) { return 0; }
op += hSize;
if (repeat) { *repeat = HUF_repeat_none; }
if (oldHufTable) { memcpy(oldHufTable, CTable, CTableSize); } /* Save the new table */
}
return HUF_compressCTable_internal(ostart, op, oend, src, srcSize, singleStream, CTable);
}
size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
void* workSpace, size_t wkspSize)
{
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, NULL, NULL, 0);
}
size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
void* workSpace, size_t wkspSize,
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat)
{
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 1 /* single stream */, workSpace, wkspSize, hufTable, repeat, preferRepeat);
}
size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
void* workSpace, size_t wkspSize)
{
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, NULL, NULL, 0);
}
size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
void* workSpace, size_t wkspSize,
HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat)
{
return HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, 0 /* 4 streams */, workSpace, wkspSize, hufTable, repeat, preferRepeat);
}

View File

@ -0,0 +1,835 @@
/* ******************************************************************
Huffman decoder, part of New Generation Entropy library
Copyright (C) 2013-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy
- Public forum : https://groups.google.com/forum/#!forum/lz4c
****************************************************************** */
/* **************************************************************
* Compiler specifics
****************************************************************/
#define FORCE_INLINE static __always_inline
/* **************************************************************
* Dependencies
****************************************************************/
#include <linux/compiler.h>
#include <linux/string.h> /* memcpy, memset */
#include "bitstream.h" /* BIT_* */
#include "fse.h" /* header compression */
#include "huf.h"
/* **************************************************************
* Error Management
****************************************************************/
#define HUF_STATIC_ASSERT(c) { enum { HUF_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
/*-***************************/
/* generic DTableDesc */
/*-***************************/
typedef struct { BYTE maxTableLog; BYTE tableType; BYTE tableLog; BYTE reserved; } DTableDesc;
static DTableDesc HUF_getDTableDesc(const HUF_DTable* table)
{
DTableDesc dtd;
memcpy(&dtd, table, sizeof(dtd));
return dtd;
}
/*-***************************/
/* single-symbol decoding */
/*-***************************/
typedef struct { BYTE byte; BYTE nbBits; } HUF_DEltX2; /* single-symbol decoding */
size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize)
{
BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1];
U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
U32 tableLog = 0;
U32 nbSymbols = 0;
size_t iSize;
void* const dtPtr = DTable + 1;
HUF_DEltX2* const dt = (HUF_DEltX2*)dtPtr;
HUF_STATIC_ASSERT(sizeof(DTableDesc) == sizeof(HUF_DTable));
/* memset(huffWeight, 0, sizeof(huffWeight)); */ /* is not necessary, even though some analyzer complain ... */
iSize = HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX + 1, rankVal, &nbSymbols, &tableLog, src, srcSize);
if (HUF_isError(iSize)) return iSize;
/* Table header */
{ DTableDesc dtd = HUF_getDTableDesc(DTable);
if (tableLog > (U32)(dtd.maxTableLog+1)) return ERROR(tableLog_tooLarge); /* DTable too small, Huffman tree cannot fit in */
dtd.tableType = 0;
dtd.tableLog = (BYTE)tableLog;
memcpy(DTable, &dtd, sizeof(dtd));
}
/* Calculate starting value for each rank */
{ U32 n, nextRankStart = 0;
for (n=1; n<tableLog+1; n++) {
U32 const current = nextRankStart;
nextRankStart += (rankVal[n] << (n-1));
rankVal[n] = current;
} }
/* fill DTable */
{ U32 n;
for (n=0; n<nbSymbols; n++) {
U32 const w = huffWeight[n];
U32 const length = (1 << w) >> 1;
U32 u;
HUF_DEltX2 D;
D.byte = (BYTE)n; D.nbBits = (BYTE)(tableLog + 1 - w);
for (u = rankVal[w]; u < rankVal[w] + length; u++)
dt[u] = D;
rankVal[w] += length;
} }
return iSize;
}
static BYTE HUF_decodeSymbolX2(BIT_DStream_t* Dstream, const HUF_DEltX2* dt, const U32 dtLog)
{
size_t const val = BIT_lookBitsFast(Dstream, dtLog); /* note : dtLog >= 1 */
BYTE const c = dt[val].byte;
BIT_skipBits(Dstream, dt[val].nbBits);
return c;
}
#define HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr) \
*ptr++ = HUF_decodeSymbolX2(DStreamPtr, dt, dtLog)
#define HUF_DECODE_SYMBOLX2_1(ptr, DStreamPtr) \
if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
#define HUF_DECODE_SYMBOLX2_2(ptr, DStreamPtr) \
if (MEM_64bits()) \
HUF_DECODE_SYMBOLX2_0(ptr, DStreamPtr)
FORCE_INLINE size_t HUF_decodeStreamX2(BYTE* p, BIT_DStream_t* const bitDPtr, BYTE* const pEnd, const HUF_DEltX2* const dt, const U32 dtLog)
{
BYTE* const pStart = p;
/* up to 4 symbols at a time */
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p <= pEnd-4)) {
HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
HUF_DECODE_SYMBOLX2_1(p, bitDPtr);
HUF_DECODE_SYMBOLX2_2(p, bitDPtr);
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
}
/* closer to the end */
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) && (p < pEnd))
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
/* no more data to retrieve from bitstream, hence no need to reload */
while (p < pEnd)
HUF_DECODE_SYMBOLX2_0(p, bitDPtr);
return pEnd-pStart;
}
static size_t HUF_decompress1X2_usingDTable_internal(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
BYTE* op = (BYTE*)dst;
BYTE* const oend = op + dstSize;
const void* dtPtr = DTable + 1;
const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
BIT_DStream_t bitD;
DTableDesc const dtd = HUF_getDTableDesc(DTable);
U32 const dtLog = dtd.tableLog;
{ size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
if (HUF_isError(errorCode)) return errorCode; }
HUF_decodeStreamX2(op, &bitD, oend, dt, dtLog);
/* check */
if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
return dstSize;
}
size_t HUF_decompress1X2_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
DTableDesc dtd = HUF_getDTableDesc(DTable);
if (dtd.tableType != 0) return ERROR(GENERIC);
return HUF_decompress1X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
}
size_t HUF_decompress1X2_DCtx (HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t const hSize = HUF_readDTableX2 (DCtx, cSrc, cSrcSize);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
return HUF_decompress1X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx);
}
static size_t HUF_decompress4X2_usingDTable_internal(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
/* Check */
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
{ const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
const void* const dtPtr = DTable + 1;
const HUF_DEltX2* const dt = (const HUF_DEltX2*)dtPtr;
/* Init */
BIT_DStream_t bitD1;
BIT_DStream_t bitD2;
BIT_DStream_t bitD3;
BIT_DStream_t bitD4;
size_t const length1 = MEM_readLE16(istart);
size_t const length2 = MEM_readLE16(istart+2);
size_t const length3 = MEM_readLE16(istart+4);
size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
const BYTE* const istart1 = istart + 6; /* jumpTable */
const BYTE* const istart2 = istart1 + length1;
const BYTE* const istart3 = istart2 + length2;
const BYTE* const istart4 = istart3 + length3;
const size_t segmentSize = (dstSize+3) / 4;
BYTE* const opStart2 = ostart + segmentSize;
BYTE* const opStart3 = opStart2 + segmentSize;
BYTE* const opStart4 = opStart3 + segmentSize;
BYTE* op1 = ostart;
BYTE* op2 = opStart2;
BYTE* op3 = opStart3;
BYTE* op4 = opStart4;
U32 endSignal;
DTableDesc const dtd = HUF_getDTableDesc(DTable);
U32 const dtLog = dtd.tableLog;
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
{ size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1);
if (HUF_isError(errorCode)) return errorCode; }
{ size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2);
if (HUF_isError(errorCode)) return errorCode; }
{ size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3);
if (HUF_isError(errorCode)) return errorCode; }
{ size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4);
if (HUF_isError(errorCode)) return errorCode; }
/* 16-32 symbols per loop (4-8 symbols per stream) */
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
for ( ; (endSignal==BIT_DStream_unfinished) && (op4<(oend-7)) ; ) {
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
HUF_DECODE_SYMBOLX2_1(op1, &bitD1);
HUF_DECODE_SYMBOLX2_1(op2, &bitD2);
HUF_DECODE_SYMBOLX2_1(op3, &bitD3);
HUF_DECODE_SYMBOLX2_1(op4, &bitD4);
HUF_DECODE_SYMBOLX2_2(op1, &bitD1);
HUF_DECODE_SYMBOLX2_2(op2, &bitD2);
HUF_DECODE_SYMBOLX2_2(op3, &bitD3);
HUF_DECODE_SYMBOLX2_2(op4, &bitD4);
HUF_DECODE_SYMBOLX2_0(op1, &bitD1);
HUF_DECODE_SYMBOLX2_0(op2, &bitD2);
HUF_DECODE_SYMBOLX2_0(op3, &bitD3);
HUF_DECODE_SYMBOLX2_0(op4, &bitD4);
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
}
/* check corruption */
if (op1 > opStart2) return ERROR(corruption_detected);
if (op2 > opStart3) return ERROR(corruption_detected);
if (op3 > opStart4) return ERROR(corruption_detected);
/* note : op4 supposed already verified within main loop */
/* finish bitStreams one by one */
HUF_decodeStreamX2(op1, &bitD1, opStart2, dt, dtLog);
HUF_decodeStreamX2(op2, &bitD2, opStart3, dt, dtLog);
HUF_decodeStreamX2(op3, &bitD3, opStart4, dt, dtLog);
HUF_decodeStreamX2(op4, &bitD4, oend, dt, dtLog);
/* check */
endSignal = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
if (!endSignal) return ERROR(corruption_detected);
/* decoded size */
return dstSize;
}
}
size_t HUF_decompress4X2_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
DTableDesc dtd = HUF_getDTableDesc(DTable);
if (dtd.tableType != 0) return ERROR(GENERIC);
return HUF_decompress4X2_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
}
size_t HUF_decompress4X2_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t const hSize = HUF_readDTableX2 (dctx, cSrc, cSrcSize);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
return HUF_decompress4X2_usingDTable_internal (dst, dstSize, ip, cSrcSize, dctx);
}
/* *************************/
/* double-symbols decoding */
/* *************************/
typedef struct { U16 sequence; BYTE nbBits; BYTE length; } HUF_DEltX4; /* double-symbols decoding */
typedef struct { BYTE symbol; BYTE weight; } sortedSymbol_t;
/* HUF_fillDTableX4Level2() :
* `rankValOrigin` must be a table of at least (HUF_TABLELOG_MAX + 1) U32 */
static void HUF_fillDTableX4Level2(HUF_DEltX4* DTable, U32 sizeLog, const U32 consumed,
const U32* rankValOrigin, const int minWeight,
const sortedSymbol_t* sortedSymbols, const U32 sortedListSize,
U32 nbBitsBaseline, U16 baseSeq)
{
HUF_DEltX4 DElt;
U32 rankVal[HUF_TABLELOG_MAX + 1];
/* get pre-calculated rankVal */
memcpy(rankVal, rankValOrigin, sizeof(rankVal));
/* fill skipped values */
if (minWeight>1) {
U32 i, skipSize = rankVal[minWeight];
MEM_writeLE16(&(DElt.sequence), baseSeq);
DElt.nbBits = (BYTE)(consumed);
DElt.length = 1;
for (i = 0; i < skipSize; i++)
DTable[i] = DElt;
}
/* fill DTable */
{ U32 s; for (s=0; s<sortedListSize; s++) { /* note : sortedSymbols already skipped */
const U32 symbol = sortedSymbols[s].symbol;
const U32 weight = sortedSymbols[s].weight;
const U32 nbBits = nbBitsBaseline - weight;
const U32 length = 1 << (sizeLog-nbBits);
const U32 start = rankVal[weight];
U32 i = start;
const U32 end = start + length;
MEM_writeLE16(&(DElt.sequence), (U16)(baseSeq + (symbol << 8)));
DElt.nbBits = (BYTE)(nbBits + consumed);
DElt.length = 2;
do { DTable[i++] = DElt; } while (i<end); /* since length >= 1 */
rankVal[weight] += length;
} }
}
typedef U32 rankVal_t[HUF_TABLELOG_MAX][HUF_TABLELOG_MAX + 1];
static void HUF_fillDTableX4(HUF_DEltX4* DTable, const U32 targetLog,
const sortedSymbol_t* sortedList, const U32 sortedListSize,
const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight,
const U32 nbBitsBaseline)
{
U32 rankVal[HUF_TABLELOG_MAX + 1];
const int scaleLog = nbBitsBaseline - targetLog; /* note : targetLog >= srcLog, hence scaleLog <= 1 */
const U32 minBits = nbBitsBaseline - maxWeight;
U32 s;
memcpy(rankVal, rankValOrigin, sizeof(rankVal));
/* fill DTable */
for (s=0; s<sortedListSize; s++) {
const U16 symbol = sortedList[s].symbol;
const U32 weight = sortedList[s].weight;
const U32 nbBits = nbBitsBaseline - weight;
const U32 start = rankVal[weight];
const U32 length = 1 << (targetLog-nbBits);
if (targetLog-nbBits >= minBits) { /* enough room for a second symbol */
U32 sortedRank;
int minWeight = nbBits + scaleLog;
if (minWeight < 1) minWeight = 1;
sortedRank = rankStart[minWeight];
HUF_fillDTableX4Level2(DTable+start, targetLog-nbBits, nbBits,
rankValOrigin[nbBits], minWeight,
sortedList+sortedRank, sortedListSize-sortedRank,
nbBitsBaseline, symbol);
} else {
HUF_DEltX4 DElt;
MEM_writeLE16(&(DElt.sequence), symbol);
DElt.nbBits = (BYTE)(nbBits);
DElt.length = 1;
{ U32 const end = start + length;
U32 u;
for (u = start; u < end; u++) DTable[u] = DElt;
} }
rankVal[weight] += length;
}
}
size_t HUF_readDTableX4 (HUF_DTable* DTable, const void* src, size_t srcSize)
{
BYTE weightList[HUF_SYMBOLVALUE_MAX + 1];
sortedSymbol_t sortedSymbol[HUF_SYMBOLVALUE_MAX + 1];
U32 rankStats[HUF_TABLELOG_MAX + 1] = { 0 };
U32 rankStart0[HUF_TABLELOG_MAX + 2] = { 0 };
U32* const rankStart = rankStart0+1;
rankVal_t rankVal;
U32 tableLog, maxW, sizeOfSort, nbSymbols;
DTableDesc dtd = HUF_getDTableDesc(DTable);
U32 const maxTableLog = dtd.maxTableLog;
size_t iSize;
void* dtPtr = DTable+1; /* force compiler to avoid strict-aliasing */
HUF_DEltX4* const dt = (HUF_DEltX4*)dtPtr;
HUF_STATIC_ASSERT(sizeof(HUF_DEltX4) == sizeof(HUF_DTable)); /* if compiler fails here, assertion is wrong */
if (maxTableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
/* memset(weightList, 0, sizeof(weightList)); */ /* is not necessary, even though some analyzer complain ... */
iSize = HUF_readStats(weightList, HUF_SYMBOLVALUE_MAX + 1, rankStats, &nbSymbols, &tableLog, src, srcSize);
if (HUF_isError(iSize)) return iSize;
/* check result */
if (tableLog > maxTableLog) return ERROR(tableLog_tooLarge); /* DTable can't fit code depth */
/* find maxWeight */
for (maxW = tableLog; rankStats[maxW]==0; maxW--) {} /* necessarily finds a solution before 0 */
/* Get start index of each weight */
{ U32 w, nextRankStart = 0;
for (w=1; w<maxW+1; w++) {
U32 current = nextRankStart;
nextRankStart += rankStats[w];
rankStart[w] = current;
}
rankStart[0] = nextRankStart; /* put all 0w symbols at the end of sorted list*/
sizeOfSort = nextRankStart;
}
/* sort symbols by weight */
{ U32 s;
for (s=0; s<nbSymbols; s++) {
U32 const w = weightList[s];
U32 const r = rankStart[w]++;
sortedSymbol[r].symbol = (BYTE)s;
sortedSymbol[r].weight = (BYTE)w;
}
rankStart[0] = 0; /* forget 0w symbols; this is beginning of weight(1) */
}
/* Build rankVal */
{ U32* const rankVal0 = rankVal[0];
{ int const rescale = (maxTableLog-tableLog) - 1; /* tableLog <= maxTableLog */
U32 nextRankVal = 0;
U32 w;
for (w=1; w<maxW+1; w++) {
U32 current = nextRankVal;
nextRankVal += rankStats[w] << (w+rescale);
rankVal0[w] = current;
} }
{ U32 const minBits = tableLog+1 - maxW;
U32 consumed;
for (consumed = minBits; consumed < maxTableLog - minBits + 1; consumed++) {
U32* const rankValPtr = rankVal[consumed];
U32 w;
for (w = 1; w < maxW+1; w++) {
rankValPtr[w] = rankVal0[w] >> consumed;
} } } }
HUF_fillDTableX4(dt, maxTableLog,
sortedSymbol, sizeOfSort,
rankStart0, rankVal, maxW,
tableLog+1);
dtd.tableLog = (BYTE)maxTableLog;
dtd.tableType = 1;
memcpy(DTable, &dtd, sizeof(dtd));
return iSize;
}
static U32 HUF_decodeSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
{
size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
memcpy(op, dt+val, 2);
BIT_skipBits(DStream, dt[val].nbBits);
return dt[val].length;
}
static U32 HUF_decodeLastSymbolX4(void* op, BIT_DStream_t* DStream, const HUF_DEltX4* dt, const U32 dtLog)
{
size_t const val = BIT_lookBitsFast(DStream, dtLog); /* note : dtLog >= 1 */
memcpy(op, dt+val, 1);
if (dt[val].length==1) BIT_skipBits(DStream, dt[val].nbBits);
else {
if (DStream->bitsConsumed < (sizeof(DStream->bitContainer)*8)) {
BIT_skipBits(DStream, dt[val].nbBits);
if (DStream->bitsConsumed > (sizeof(DStream->bitContainer)*8))
DStream->bitsConsumed = (sizeof(DStream->bitContainer)*8); /* ugly hack; works only because it's the last symbol. Note : can't easily extract nbBits from just this symbol */
} }
return 1;
}
#define HUF_DECODE_SYMBOLX4_0(ptr, DStreamPtr) \
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
#define HUF_DECODE_SYMBOLX4_1(ptr, DStreamPtr) \
if (MEM_64bits() || (HUF_TABLELOG_MAX<=12)) \
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
#define HUF_DECODE_SYMBOLX4_2(ptr, DStreamPtr) \
if (MEM_64bits()) \
ptr += HUF_decodeSymbolX4(ptr, DStreamPtr, dt, dtLog)
FORCE_INLINE size_t HUF_decodeStreamX4(BYTE* p, BIT_DStream_t* bitDPtr, BYTE* const pEnd, const HUF_DEltX4* const dt, const U32 dtLog)
{
BYTE* const pStart = p;
/* up to 8 symbols at a time */
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p < pEnd-(sizeof(bitDPtr->bitContainer)-1))) {
HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
HUF_DECODE_SYMBOLX4_1(p, bitDPtr);
HUF_DECODE_SYMBOLX4_2(p, bitDPtr);
HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
}
/* closer to end : up to 2 symbols at a time */
while ((BIT_reloadDStream(bitDPtr) == BIT_DStream_unfinished) & (p <= pEnd-2))
HUF_DECODE_SYMBOLX4_0(p, bitDPtr);
while (p <= pEnd-2)
HUF_DECODE_SYMBOLX4_0(p, bitDPtr); /* no need to reload : reached the end of DStream */
if (p < pEnd)
p += HUF_decodeLastSymbolX4(p, bitDPtr, dt, dtLog);
return p-pStart;
}
static size_t HUF_decompress1X4_usingDTable_internal(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
BIT_DStream_t bitD;
/* Init */
{ size_t const errorCode = BIT_initDStream(&bitD, cSrc, cSrcSize);
if (HUF_isError(errorCode)) return errorCode;
}
/* decode */
{ BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
const void* const dtPtr = DTable+1; /* force compiler to not use strict-aliasing */
const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr;
DTableDesc const dtd = HUF_getDTableDesc(DTable);
HUF_decodeStreamX4(ostart, &bitD, oend, dt, dtd.tableLog);
}
/* check */
if (!BIT_endOfDStream(&bitD)) return ERROR(corruption_detected);
/* decoded size */
return dstSize;
}
size_t HUF_decompress1X4_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
DTableDesc dtd = HUF_getDTableDesc(DTable);
if (dtd.tableType != 1) return ERROR(GENERIC);
return HUF_decompress1X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
}
size_t HUF_decompress1X4_DCtx (HUF_DTable* DCtx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t const hSize = HUF_readDTableX4 (DCtx, cSrc, cSrcSize);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
return HUF_decompress1X4_usingDTable_internal (dst, dstSize, ip, cSrcSize, DCtx);
}
static size_t HUF_decompress4X4_usingDTable_internal(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
if (cSrcSize < 10) return ERROR(corruption_detected); /* strict minimum : jump table + 1 byte per stream */
{ const BYTE* const istart = (const BYTE*) cSrc;
BYTE* const ostart = (BYTE*) dst;
BYTE* const oend = ostart + dstSize;
const void* const dtPtr = DTable+1;
const HUF_DEltX4* const dt = (const HUF_DEltX4*)dtPtr;
/* Init */
BIT_DStream_t bitD1;
BIT_DStream_t bitD2;
BIT_DStream_t bitD3;
BIT_DStream_t bitD4;
size_t const length1 = MEM_readLE16(istart);
size_t const length2 = MEM_readLE16(istart+2);
size_t const length3 = MEM_readLE16(istart+4);
size_t const length4 = cSrcSize - (length1 + length2 + length3 + 6);
const BYTE* const istart1 = istart + 6; /* jumpTable */
const BYTE* const istart2 = istart1 + length1;
const BYTE* const istart3 = istart2 + length2;
const BYTE* const istart4 = istart3 + length3;
size_t const segmentSize = (dstSize+3) / 4;
BYTE* const opStart2 = ostart + segmentSize;
BYTE* const opStart3 = opStart2 + segmentSize;
BYTE* const opStart4 = opStart3 + segmentSize;
BYTE* op1 = ostart;
BYTE* op2 = opStart2;
BYTE* op3 = opStart3;
BYTE* op4 = opStart4;
U32 endSignal;
DTableDesc const dtd = HUF_getDTableDesc(DTable);
U32 const dtLog = dtd.tableLog;
if (length4 > cSrcSize) return ERROR(corruption_detected); /* overflow */
{ size_t const errorCode = BIT_initDStream(&bitD1, istart1, length1);
if (HUF_isError(errorCode)) return errorCode; }
{ size_t const errorCode = BIT_initDStream(&bitD2, istart2, length2);
if (HUF_isError(errorCode)) return errorCode; }
{ size_t const errorCode = BIT_initDStream(&bitD3, istart3, length3);
if (HUF_isError(errorCode)) return errorCode; }
{ size_t const errorCode = BIT_initDStream(&bitD4, istart4, length4);
if (HUF_isError(errorCode)) return errorCode; }
/* 16-32 symbols per loop (4-8 symbols per stream) */
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
for ( ; (endSignal==BIT_DStream_unfinished) & (op4<(oend-(sizeof(bitD4.bitContainer)-1))) ; ) {
HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
HUF_DECODE_SYMBOLX4_1(op1, &bitD1);
HUF_DECODE_SYMBOLX4_1(op2, &bitD2);
HUF_DECODE_SYMBOLX4_1(op3, &bitD3);
HUF_DECODE_SYMBOLX4_1(op4, &bitD4);
HUF_DECODE_SYMBOLX4_2(op1, &bitD1);
HUF_DECODE_SYMBOLX4_2(op2, &bitD2);
HUF_DECODE_SYMBOLX4_2(op3, &bitD3);
HUF_DECODE_SYMBOLX4_2(op4, &bitD4);
HUF_DECODE_SYMBOLX4_0(op1, &bitD1);
HUF_DECODE_SYMBOLX4_0(op2, &bitD2);
HUF_DECODE_SYMBOLX4_0(op3, &bitD3);
HUF_DECODE_SYMBOLX4_0(op4, &bitD4);
endSignal = BIT_reloadDStream(&bitD1) | BIT_reloadDStream(&bitD2) | BIT_reloadDStream(&bitD3) | BIT_reloadDStream(&bitD4);
}
/* check corruption */
if (op1 > opStart2) return ERROR(corruption_detected);
if (op2 > opStart3) return ERROR(corruption_detected);
if (op3 > opStart4) return ERROR(corruption_detected);
/* note : op4 already verified within main loop */
/* finish bitStreams one by one */
HUF_decodeStreamX4(op1, &bitD1, opStart2, dt, dtLog);
HUF_decodeStreamX4(op2, &bitD2, opStart3, dt, dtLog);
HUF_decodeStreamX4(op3, &bitD3, opStart4, dt, dtLog);
HUF_decodeStreamX4(op4, &bitD4, oend, dt, dtLog);
/* check */
{ U32 const endCheck = BIT_endOfDStream(&bitD1) & BIT_endOfDStream(&bitD2) & BIT_endOfDStream(&bitD3) & BIT_endOfDStream(&bitD4);
if (!endCheck) return ERROR(corruption_detected); }
/* decoded size */
return dstSize;
}
}
size_t HUF_decompress4X4_usingDTable(
void* dst, size_t dstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
DTableDesc dtd = HUF_getDTableDesc(DTable);
if (dtd.tableType != 1) return ERROR(GENERIC);
return HUF_decompress4X4_usingDTable_internal(dst, dstSize, cSrc, cSrcSize, DTable);
}
size_t HUF_decompress4X4_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
const BYTE* ip = (const BYTE*) cSrc;
size_t hSize = HUF_readDTableX4 (dctx, cSrc, cSrcSize);
if (HUF_isError(hSize)) return hSize;
if (hSize >= cSrcSize) return ERROR(srcSize_wrong);
ip += hSize; cSrcSize -= hSize;
return HUF_decompress4X4_usingDTable_internal(dst, dstSize, ip, cSrcSize, dctx);
}
/* ********************************/
/* Generic decompression selector */
/* ********************************/
size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
DTableDesc const dtd = HUF_getDTableDesc(DTable);
return dtd.tableType ? HUF_decompress1X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) :
HUF_decompress1X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
}
size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize,
const void* cSrc, size_t cSrcSize,
const HUF_DTable* DTable)
{
DTableDesc const dtd = HUF_getDTableDesc(DTable);
return dtd.tableType ? HUF_decompress4X4_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable) :
HUF_decompress4X2_usingDTable_internal(dst, maxDstSize, cSrc, cSrcSize, DTable);
}
typedef struct { U32 tableTime; U32 decode256Time; } algo_time_t;
static const algo_time_t algoTime[16 /* Quantization */][3 /* single, double, quad */] =
{
/* single, double, quad */
{{0,0}, {1,1}, {2,2}}, /* Q==0 : impossible */
{{0,0}, {1,1}, {2,2}}, /* Q==1 : impossible */
{{ 38,130}, {1313, 74}, {2151, 38}}, /* Q == 2 : 12-18% */
{{ 448,128}, {1353, 74}, {2238, 41}}, /* Q == 3 : 18-25% */
{{ 556,128}, {1353, 74}, {2238, 47}}, /* Q == 4 : 25-32% */
{{ 714,128}, {1418, 74}, {2436, 53}}, /* Q == 5 : 32-38% */
{{ 883,128}, {1437, 74}, {2464, 61}}, /* Q == 6 : 38-44% */
{{ 897,128}, {1515, 75}, {2622, 68}}, /* Q == 7 : 44-50% */
{{ 926,128}, {1613, 75}, {2730, 75}}, /* Q == 8 : 50-56% */
{{ 947,128}, {1729, 77}, {3359, 77}}, /* Q == 9 : 56-62% */
{{1107,128}, {2083, 81}, {4006, 84}}, /* Q ==10 : 62-69% */
{{1177,128}, {2379, 87}, {4785, 88}}, /* Q ==11 : 69-75% */
{{1242,128}, {2415, 93}, {5155, 84}}, /* Q ==12 : 75-81% */
{{1349,128}, {2644,106}, {5260,106}}, /* Q ==13 : 81-87% */
{{1455,128}, {2422,124}, {4174,124}}, /* Q ==14 : 87-93% */
{{ 722,128}, {1891,145}, {1936,146}}, /* Q ==15 : 93-99% */
};
/** HUF_selectDecoder() :
* Tells which decoder is likely to decode faster,
* based on a set of pre-determined metrics.
* @return : 0==HUF_decompress4X2, 1==HUF_decompress4X4 .
* Assumption : 0 < cSrcSize < dstSize <= 128 KB */
U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize)
{
/* decoder timing evaluation */
U32 const Q = (U32)(cSrcSize * 16 / dstSize); /* Q < 16 since dstSize > cSrcSize */
U32 const D256 = (U32)(dstSize >> 8);
U32 const DTime0 = algoTime[Q][0].tableTime + (algoTime[Q][0].decode256Time * D256);
U32 DTime1 = algoTime[Q][1].tableTime + (algoTime[Q][1].decode256Time * D256);
DTime1 += DTime1 >> 3; /* advantage to algorithm using less memory, for cache eviction */
return DTime1 < DTime0;
}
typedef size_t (*decompressionAlgo)(void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
return algoNb ? HUF_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
}
}
size_t HUF_decompress4X_hufOnly (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if ((cSrcSize >= dstSize) || (cSrcSize <= 1)) return ERROR(corruption_detected); /* invalid */
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
return algoNb ? HUF_decompress4X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
HUF_decompress4X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
}
}
size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize)
{
/* validation checks */
if (dstSize == 0) return ERROR(dstSize_tooSmall);
if (cSrcSize > dstSize) return ERROR(corruption_detected); /* invalid */
if (cSrcSize == dstSize) { memcpy(dst, cSrc, dstSize); return dstSize; } /* not compressed */
if (cSrcSize == 1) { memset(dst, *(const BYTE*)cSrc, dstSize); return dstSize; } /* RLE */
{ U32 const algoNb = HUF_selectDecoder(dstSize, cSrcSize);
return algoNb ? HUF_decompress1X4_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) :
HUF_decompress1X2_DCtx(dctx, dst, dstSize, cSrc, cSrcSize) ;
}
}

View File

@ -0,0 +1,209 @@
/**
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/
#ifndef MEM_H_MODULE
#define MEM_H_MODULE
/*-****************************************
* Dependencies
******************************************/
#include <asm/unaligned.h>
#include <linux/types.h> /* size_t, ptrdiff_t */
#include <linux/string.h> /* memcpy */
/*-****************************************
* Compiler specifics
******************************************/
#define MEM_STATIC static __inline __attribute__((unused))
/* code only tested on 32 and 64 bits systems */
#define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
/*-**************************************************************
* Basic Types
*****************************************************************/
typedef uint8_t BYTE;
typedef uint16_t U16;
typedef int16_t S16;
typedef uint32_t U32;
typedef int32_t S32;
typedef uint64_t U64;
typedef int64_t S64;
typedef ptrdiff_t iPtrDiff;
typedef uintptr_t uPtrDiff;
/*-**************************************************************
* Memory I/O
*****************************************************************/
MEM_STATIC unsigned MEM_32bits(void) { return sizeof(size_t)==4; }
MEM_STATIC unsigned MEM_64bits(void) { return sizeof(size_t)==8; }
#if defined(__LITTLE_ENDIAN)
# define MEM_LITTLE_ENDIAN 1
#else
# define MEM_LITTLE_ENDIAN 0
#endif
MEM_STATIC unsigned MEM_isLittleEndian(void)
{
return MEM_LITTLE_ENDIAN;
}
MEM_STATIC U16 MEM_read16(const void* memPtr)
{
return get_unaligned((const U16*)memPtr);
}
MEM_STATIC U32 MEM_read32(const void* memPtr)
{
return get_unaligned((const U32*)memPtr);
}
MEM_STATIC U64 MEM_read64(const void* memPtr)
{
return get_unaligned((const U64*)memPtr);
}
MEM_STATIC size_t MEM_readST(const void* memPtr)
{
return get_unaligned((const size_t*)memPtr);
}
MEM_STATIC void MEM_write16(void* memPtr, U16 value)
{
put_unaligned(value, (U16*)memPtr);
}
MEM_STATIC void MEM_write32(void* memPtr, U32 value)
{
put_unaligned(value, (U32*)memPtr);
}
MEM_STATIC void MEM_write64(void* memPtr, U64 value)
{
put_unaligned(value, (U64*)memPtr);
}
/*=== Little endian r/w ===*/
MEM_STATIC U16 MEM_readLE16(const void* memPtr)
{
return get_unaligned_le16(memPtr);
}
MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
{
put_unaligned_le16(val, memPtr);
}
MEM_STATIC U32 MEM_readLE24(const void* memPtr)
{
return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
}
MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val)
{
MEM_writeLE16(memPtr, (U16)val);
((BYTE*)memPtr)[2] = (BYTE)(val>>16);
}
MEM_STATIC U32 MEM_readLE32(const void* memPtr)
{
return get_unaligned_le32(memPtr);
}
MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32)
{
put_unaligned_le32(val32, memPtr);
}
MEM_STATIC U64 MEM_readLE64(const void* memPtr)
{
return get_unaligned_le64(memPtr);
}
MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64)
{
put_unaligned_le64(val64, memPtr);
}
MEM_STATIC size_t MEM_readLEST(const void* memPtr)
{
if (MEM_32bits())
return (size_t)MEM_readLE32(memPtr);
else
return (size_t)MEM_readLE64(memPtr);
}
MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val)
{
if (MEM_32bits())
MEM_writeLE32(memPtr, (U32)val);
else
MEM_writeLE64(memPtr, (U64)val);
}
/*=== Big endian r/w ===*/
MEM_STATIC U32 MEM_readBE32(const void* memPtr)
{
return get_unaligned_be32(memPtr);
}
MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32)
{
put_unaligned_be32(val32, memPtr);
}
MEM_STATIC U64 MEM_readBE64(const void* memPtr)
{
return get_unaligned_be64(memPtr);
}
MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64)
{
put_unaligned_be64(val64, memPtr);
}
MEM_STATIC size_t MEM_readBEST(const void* memPtr)
{
if (MEM_32bits())
return (size_t)MEM_readBE32(memPtr);
else
return (size_t)MEM_readBE64(memPtr);
}
MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)
{
if (MEM_32bits())
MEM_writeBE32(memPtr, (U32)val);
else
MEM_writeBE64(memPtr, (U64)val);
}
/* function safe only for comparisons */
MEM_STATIC U32 MEM_readMINMATCH(const void* memPtr, U32 length)
{
switch (length)
{
default :
case 4 : return MEM_read32(memPtr);
case 3 : if (MEM_isLittleEndian())
return MEM_read32(memPtr)<<8;
else
return MEM_read32(memPtr)>>8;
}
}
#endif /* MEM_H_MODULE */

View File

@ -0,0 +1,700 @@
/*
* xxHash - Fast Hash algorithm
* Copyright (C) 2012-2016, Yann Collet
*
* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You can contact the author at :
* - xxHash homepage: http://www.xxhash.com
* - xxHash source repository : https://github.com/Cyan4973/xxHash
*/
/* *************************************
* Tuning parameters
***************************************/
/*!XXH_ACCEPT_NULL_INPUT_POINTER :
* If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
* When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
* By default, this option is disabled. To enable it, uncomment below define :
*/
/* #define XXH_ACCEPT_NULL_INPUT_POINTER 1 */
/*!XXH_FORCE_NATIVE_FORMAT :
* By default, xxHash library provides endian-independant Hash values, based on little-endian convention.
* Results are therefore identical for little-endian and big-endian CPU.
* This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
* Should endian-independance be of no importance for your application, you may set the #define below to 1,
* to improve speed for Big-endian CPU.
* This option has no impact on Little_Endian CPU.
*/
#define XXH_FORCE_NATIVE_FORMAT 0
/*!XXH_FORCE_ALIGN_CHECK :
* This is a minor performance trick, only useful with lots of very small keys.
* It means : check for aligned/unaligned input.
* The check costs one initial branch per hash; set to 0 when the input data
* is guaranteed to be aligned.
*/
#define XXH_FORCE_ALIGN_CHECK 0
/* *************************************
* Includes & Memory related functions
***************************************/
/* Modify the local functions below should you wish to use some other memory routines */
/* for memcpy() */
#include <linux/string.h>
static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
#include "xxhash.h"
#include "mem.h"
/* *************************************
* Compiler Specific Options
***************************************/
#include <linux/compiler.h>
#define FORCE_INLINE static __always_inline
/* ****************************************
* Compiler-specific Functions and Macros
******************************************/
#define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
#define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
/* *************************************
* Architecture Macros
***************************************/
typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
#ifndef XXH_CPU_LITTLE_ENDIAN
# define XXH_CPU_LITTLE_ENDIAN MEM_LITTLE_ENDIAN
#endif
/* ***************************
* Memory reads
*****************************/
typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
FORCE_INLINE U32 XXH_readLE32_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
{
(void)endian;
(void)align;
return MEM_readLE32(ptr);
}
FORCE_INLINE U32 XXH_readLE32(const void* ptr, XXH_endianess endian)
{
return XXH_readLE32_align(ptr, endian, XXH_unaligned);
}
static U32 XXH_readBE32(const void* ptr)
{
return MEM_readBE32(ptr);
}
FORCE_INLINE U64 XXH_readLE64_align(const void* ptr, XXH_endianess endian, XXH_alignment align)
{
(void)endian;
(void)align;
return MEM_readLE64(ptr);
}
FORCE_INLINE U64 XXH_readLE64(const void* ptr, XXH_endianess endian)
{
return XXH_readLE64_align(ptr, endian, XXH_unaligned);
}
static U64 XXH_readBE64(const void* ptr)
{
return MEM_readBE64(ptr);
}
/* *************************************
* Macros
***************************************/
#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
/* *************************************
* Constants
***************************************/
static const U32 PRIME32_1 = 2654435761U;
static const U32 PRIME32_2 = 2246822519U;
static const U32 PRIME32_3 = 3266489917U;
static const U32 PRIME32_4 = 668265263U;
static const U32 PRIME32_5 = 374761393U;
static const U64 PRIME64_1 = 11400714785074694791ULL;
static const U64 PRIME64_2 = 14029467366897019727ULL;
static const U64 PRIME64_3 = 1609587929392839161ULL;
static const U64 PRIME64_4 = 9650029242287828579ULL;
static const U64 PRIME64_5 = 2870177450012600261ULL;
XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
/* **************************
* Utils
****************************/
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
{
memcpy(dstState, srcState, sizeof(*dstState));
}
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
{
memcpy(dstState, srcState, sizeof(*dstState));
}
/* ***************************
* Simple Hash Functions
*****************************/
static U32 XXH32_round(U32 seed, U32 input)
{
seed += input * PRIME32_2;
seed = XXH_rotl32(seed, 13);
seed *= PRIME32_1;
return seed;
}
FORCE_INLINE U32 XXH32_endian_align(const void* input, size_t len, U32 seed, XXH_endianess endian, XXH_alignment align)
{
const BYTE* p = (const BYTE*)input;
const BYTE* bEnd = p + len;
U32 h32;
#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
if (p==NULL) {
len=0;
bEnd=p=(const BYTE*)(size_t)16;
}
#endif
if (len>=16) {
const BYTE* const limit = bEnd - 16;
U32 v1 = seed + PRIME32_1 + PRIME32_2;
U32 v2 = seed + PRIME32_2;
U32 v3 = seed + 0;
U32 v4 = seed - PRIME32_1;
do {
v1 = XXH32_round(v1, XXH_get32bits(p)); p+=4;
v2 = XXH32_round(v2, XXH_get32bits(p)); p+=4;
v3 = XXH32_round(v3, XXH_get32bits(p)); p+=4;
v4 = XXH32_round(v4, XXH_get32bits(p)); p+=4;
} while (p<=limit);
h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
} else {
h32 = seed + PRIME32_5;
}
h32 += (U32) len;
while (p+4<=bEnd) {
h32 += XXH_get32bits(p) * PRIME32_3;
h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
p+=4;
}
while (p<bEnd) {
h32 += (*p) * PRIME32_5;
h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
p++;
}
h32 ^= h32 >> 15;
h32 *= PRIME32_2;
h32 ^= h32 >> 13;
h32 *= PRIME32_3;
h32 ^= h32 >> 16;
return h32;
}
XXH_PUBLIC_API unsigned int XXH32 (const void* input, size_t len, unsigned int seed)
{
#if 0
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
XXH32_CREATESTATE_STATIC(state);
XXH32_reset(state, seed);
XXH32_update(state, input, len);
return XXH32_digest(state);
#else
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if (XXH_FORCE_ALIGN_CHECK) {
if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
else
return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
} }
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
else
return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
#endif
}
static U64 XXH64_round(U64 acc, U64 input)
{
acc += input * PRIME64_2;
acc = XXH_rotl64(acc, 31);
acc *= PRIME64_1;
return acc;
}
static U64 XXH64_mergeRound(U64 acc, U64 val)
{
val = XXH64_round(0, val);
acc ^= val;
acc = acc * PRIME64_1 + PRIME64_4;
return acc;
}
FORCE_INLINE U64 XXH64_endian_align(const void* input, size_t len, U64 seed, XXH_endianess endian, XXH_alignment align)
{
const BYTE* p = (const BYTE*)input;
const BYTE* const bEnd = p + len;
U64 h64;
#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
if (p==NULL) {
len=0;
bEnd=p=(const BYTE*)(size_t)32;
}
#endif
if (len>=32) {
const BYTE* const limit = bEnd - 32;
U64 v1 = seed + PRIME64_1 + PRIME64_2;
U64 v2 = seed + PRIME64_2;
U64 v3 = seed + 0;
U64 v4 = seed - PRIME64_1;
do {
v1 = XXH64_round(v1, XXH_get64bits(p)); p+=8;
v2 = XXH64_round(v2, XXH_get64bits(p)); p+=8;
v3 = XXH64_round(v3, XXH_get64bits(p)); p+=8;
v4 = XXH64_round(v4, XXH_get64bits(p)); p+=8;
} while (p<=limit);
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
h64 = XXH64_mergeRound(h64, v1);
h64 = XXH64_mergeRound(h64, v2);
h64 = XXH64_mergeRound(h64, v3);
h64 = XXH64_mergeRound(h64, v4);
} else {
h64 = seed + PRIME64_5;
}
h64 += (U64) len;
while (p+8<=bEnd) {
U64 const k1 = XXH64_round(0, XXH_get64bits(p));
h64 ^= k1;
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
p+=8;
}
if (p+4<=bEnd) {
h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1;
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
p+=4;
}
while (p<bEnd) {
h64 ^= (*p) * PRIME64_5;
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
p++;
}
h64 ^= h64 >> 33;
h64 *= PRIME64_2;
h64 ^= h64 >> 29;
h64 *= PRIME64_3;
h64 ^= h64 >> 32;
return h64;
}
XXH_PUBLIC_API unsigned long long XXH64 (const void* input, size_t len, unsigned long long seed)
{
#if 0
/* Simple version, good for code maintenance, but unfortunately slow for small inputs */
XXH64_CREATESTATE_STATIC(state);
XXH64_reset(state, seed);
XXH64_update(state, input, len);
return XXH64_digest(state);
#else
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if (XXH_FORCE_ALIGN_CHECK) {
if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
else
return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
} }
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH64_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
else
return XXH64_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
#endif
}
/* **************************************************
* Advanced Hash Functions
****************************************************/
/*** Hash feed ***/
XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
{
XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */
state.v1 = seed + PRIME32_1 + PRIME32_2;
state.v2 = seed + PRIME32_2;
state.v3 = seed + 0;
state.v4 = seed - PRIME32_1;
memcpy(statePtr, &state, sizeof(state));
return XXH_OK;
}
XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
{
XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */
state.v1 = seed + PRIME64_1 + PRIME64_2;
state.v2 = seed + PRIME64_2;
state.v3 = seed + 0;
state.v4 = seed - PRIME64_1;
memcpy(statePtr, &state, sizeof(state));
return XXH_OK;
}
FORCE_INLINE XXH_errorcode XXH32_update_endian (XXH32_state_t* state, const void* input, size_t len, XXH_endianess endian)
{
const BYTE* p = (const BYTE*)input;
const BYTE* const bEnd = p + len;
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
if (input==NULL) return XXH_ERROR;
#endif
state->total_len_32 += (unsigned)len;
state->large_len |= (len>=16) | (state->total_len_32>=16);
if (state->memsize + len < 16) { /* fill in tmp buffer */
XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, len);
state->memsize += (unsigned)len;
return XXH_OK;
}
if (state->memsize) { /* some data left from previous update */
XXH_memcpy((BYTE*)(state->mem32) + state->memsize, input, 16-state->memsize);
{ const U32* p32 = state->mem32;
state->v1 = XXH32_round(state->v1, XXH_readLE32(p32, endian)); p32++;
state->v2 = XXH32_round(state->v2, XXH_readLE32(p32, endian)); p32++;
state->v3 = XXH32_round(state->v3, XXH_readLE32(p32, endian)); p32++;
state->v4 = XXH32_round(state->v4, XXH_readLE32(p32, endian)); p32++;
}
p += 16-state->memsize;
state->memsize = 0;
}
if (p <= bEnd-16) {
const BYTE* const limit = bEnd - 16;
U32 v1 = state->v1;
U32 v2 = state->v2;
U32 v3 = state->v3;
U32 v4 = state->v4;
do {
v1 = XXH32_round(v1, XXH_readLE32(p, endian)); p+=4;
v2 = XXH32_round(v2, XXH_readLE32(p, endian)); p+=4;
v3 = XXH32_round(v3, XXH_readLE32(p, endian)); p+=4;
v4 = XXH32_round(v4, XXH_readLE32(p, endian)); p+=4;
} while (p<=limit);
state->v1 = v1;
state->v2 = v2;
state->v3 = v3;
state->v4 = v4;
}
if (p < bEnd) {
XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
state->memsize = (unsigned)(bEnd-p);
}
return XXH_OK;
}
XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* state_in, const void* input, size_t len)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
else
return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
}
FORCE_INLINE U32 XXH32_digest_endian (const XXH32_state_t* state, XXH_endianess endian)
{
const BYTE * p = (const BYTE*)state->mem32;
const BYTE* const bEnd = (const BYTE*)(state->mem32) + state->memsize;
U32 h32;
if (state->large_len) {
h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
} else {
h32 = state->v3 /* == seed */ + PRIME32_5;
}
h32 += state->total_len_32;
while (p+4<=bEnd) {
h32 += XXH_readLE32(p, endian) * PRIME32_3;
h32 = XXH_rotl32(h32, 17) * PRIME32_4;
p+=4;
}
while (p<bEnd) {
h32 += (*p) * PRIME32_5;
h32 = XXH_rotl32(h32, 11) * PRIME32_1;
p++;
}
h32 ^= h32 >> 15;
h32 *= PRIME32_2;
h32 ^= h32 >> 13;
h32 *= PRIME32_3;
h32 ^= h32 >> 16;
return h32;
}
XXH_PUBLIC_API unsigned int XXH32_digest (const XXH32_state_t* state_in)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH32_digest_endian(state_in, XXH_littleEndian);
else
return XXH32_digest_endian(state_in, XXH_bigEndian);
}
/* **** XXH64 **** */
FORCE_INLINE XXH_errorcode XXH64_update_endian (XXH64_state_t* state, const void* input, size_t len, XXH_endianess endian)
{
const BYTE* p = (const BYTE*)input;
const BYTE* const bEnd = p + len;
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
if (input==NULL) return XXH_ERROR;
#endif
state->total_len += len;
if (state->memsize + len < 32) { /* fill in tmp buffer */
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, len);
state->memsize += (U32)len;
return XXH_OK;
}
if (state->memsize) { /* tmp buffer is full */
XXH_memcpy(((BYTE*)state->mem64) + state->memsize, input, 32-state->memsize);
state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0, endian));
state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1, endian));
state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2, endian));
state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3, endian));
p += 32-state->memsize;
state->memsize = 0;
}
if (p+32 <= bEnd) {
const BYTE* const limit = bEnd - 32;
U64 v1 = state->v1;
U64 v2 = state->v2;
U64 v3 = state->v3;
U64 v4 = state->v4;
do {
v1 = XXH64_round(v1, XXH_readLE64(p, endian)); p+=8;
v2 = XXH64_round(v2, XXH_readLE64(p, endian)); p+=8;
v3 = XXH64_round(v3, XXH_readLE64(p, endian)); p+=8;
v4 = XXH64_round(v4, XXH_readLE64(p, endian)); p+=8;
} while (p<=limit);
state->v1 = v1;
state->v2 = v2;
state->v3 = v3;
state->v4 = v4;
}
if (p < bEnd) {
XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
state->memsize = (unsigned)(bEnd-p);
}
return XXH_OK;
}
XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* state_in, const void* input, size_t len)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH64_update_endian(state_in, input, len, XXH_littleEndian);
else
return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
}
FORCE_INLINE U64 XXH64_digest_endian (const XXH64_state_t* state, XXH_endianess endian)
{
const BYTE * p = (const BYTE*)state->mem64;
const BYTE* const bEnd = (const BYTE*)state->mem64 + state->memsize;
U64 h64;
if (state->total_len >= 32) {
U64 const v1 = state->v1;
U64 const v2 = state->v2;
U64 const v3 = state->v3;
U64 const v4 = state->v4;
h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
h64 = XXH64_mergeRound(h64, v1);
h64 = XXH64_mergeRound(h64, v2);
h64 = XXH64_mergeRound(h64, v3);
h64 = XXH64_mergeRound(h64, v4);
} else {
h64 = state->v3 + PRIME64_5;
}
h64 += (U64) state->total_len;
while (p+8<=bEnd) {
U64 const k1 = XXH64_round(0, XXH_readLE64(p, endian));
h64 ^= k1;
h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
p+=8;
}
if (p+4<=bEnd) {
h64 ^= (U64)(XXH_readLE32(p, endian)) * PRIME64_1;
h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
p+=4;
}
while (p<bEnd) {
h64 ^= (*p) * PRIME64_5;
h64 = XXH_rotl64(h64, 11) * PRIME64_1;
p++;
}
h64 ^= h64 >> 33;
h64 *= PRIME64_2;
h64 ^= h64 >> 29;
h64 *= PRIME64_3;
h64 ^= h64 >> 32;
return h64;
}
XXH_PUBLIC_API unsigned long long XXH64_digest (const XXH64_state_t* state_in)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH64_digest_endian(state_in, XXH_littleEndian);
else
return XXH64_digest_endian(state_in, XXH_bigEndian);
}
/* **************************
* Canonical representation
****************************/
/*! Default XXH result types are basic unsigned 32 and 64 bits.
* The canonical representation follows human-readable write convention, aka big-endian (large digits first).
* These functions allow transformation of hash result into and from its canonical format.
* This way, hash values can be written into a file or buffer, and remain comparable across different systems and programs.
*/
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
{
XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
MEM_writeBE32(dst, hash);
}
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
{
XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
MEM_writeBE64(dst, hash);
}
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
{
return XXH_readBE32(src);
}
XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
{
return XXH_readBE64(src);
}

View File

@ -0,0 +1,235 @@
/*
xxHash - Extremely Fast Hash algorithm
Header File
Copyright (C) 2012-2016, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- xxHash source repository : https://github.com/Cyan4973/xxHash
*/
/* Notice extracted from xxHash homepage :
xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
It also successfully passes all tests from the SMHasher suite.
Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
Name Speed Q.Score Author
xxHash 5.4 GB/s 10
CrapWow 3.2 GB/s 2 Andrew
MumurHash 3a 2.7 GB/s 10 Austin Appleby
SpookyHash 2.0 GB/s 10 Bob Jenkins
SBox 1.4 GB/s 9 Bret Mulvey
Lookup3 1.2 GB/s 9 Bob Jenkins
SuperFastHash 1.2 GB/s 1 Paul Hsieh
CityHash64 1.05 GB/s 10 Pike & Alakuijala
FNV 0.55 GB/s 5 Fowler, Noll, Vo
CRC32 0.43 GB/s 9
MD5-32 0.33 GB/s 10 Ronald L. Rivest
SHA1-32 0.28 GB/s 10
Q.Score is a measure of quality of the hash function.
It depends on successfully passing SMHasher test set.
10 is a perfect score.
A 64-bits version, named XXH64, is available since r35.
It offers much better speed, but for 64-bits applications only.
Name Speed on 64 bits Speed on 32 bits
XXH64 13.8 GB/s 1.9 GB/s
XXH32 6.8 GB/s 6.0 GB/s
*/
#ifndef XXHASH_H_5627135585666179
#define XXHASH_H_5627135585666179 1
/* ****************************
* Definitions
******************************/
#include <linux/types.h> /* size_t */
typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
/* ****************************
* API modifier
******************************/
/** XXH_PRIVATE_API
* This is useful if you want to include xxhash functions in `static` mode
* in order to inline them, and remove their symbol from the public list.
* Methodology :
* #define XXH_PRIVATE_API
* #include "xxhash.h"
* `xxhash.c` is automatically included.
* It's not useful to compile and link it as a separate module anymore.
*/
#define XXH_PUBLIC_API /* do nothing */
/*!XXH_NAMESPACE, aka Namespace Emulation :
If you want to include _and expose_ xxHash functions from within your own library,
but also want to avoid symbol collisions with another library which also includes xxHash,
you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library
with the value of XXH_NAMESPACE (so avoid to keep it NULL and avoid numeric values).
Note that no change is required within the calling program as long as it includes `xxhash.h` :
regular symbol name will be automatically translated by this header.
*/
/* *************************************
* Version
***************************************/
#define XXH_VERSION_MAJOR 0
#define XXH_VERSION_MINOR 6
#define XXH_VERSION_RELEASE 2
#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
XXH_PUBLIC_API unsigned XXH_versionNumber (void);
/* ****************************
* Simple Hash Functions
******************************/
typedef unsigned int XXH32_hash_t;
typedef unsigned long long XXH64_hash_t;
XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, unsigned int seed);
XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, unsigned long long seed);
/*!
XXH32() :
Calculate the 32-bits hash of sequence "length" bytes stored at memory address "input".
The memory between input & input+length must be valid (allocated and read-accessible).
"seed" can be used to alter the result predictably.
Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s
XXH64() :
Calculate the 64-bits hash of sequence of length "len" stored at memory address "input".
"seed" can be used to alter the result predictably.
This function runs 2x faster on 64-bits systems, but slower on 32-bits systems (see benchmark).
*/
/* ****************************
* Streaming Hash Functions
******************************/
typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */
typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
/* hash streaming */
XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, unsigned int seed);
XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, unsigned long long seed);
XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr);
/*
These functions generate the xxHash of an input provided in multiple segments.
Note that, for small input, they are slower than single-call functions, due to state management.
For small input, prefer `XXH32()` and `XXH64()` .
XXH state must first be allocated, using XXH*_createState() .
Start a new hash by initializing state with a seed, using XXH*_reset().
Then, feed the hash state by calling XXH*_update() as many times as necessary.
Obviously, input must be allocated and read accessible.
The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.
Finally, a hash value can be produced anytime, by using XXH*_digest().
This function returns the nn-bits hash as an int or long long.
It's still possible to continue inserting input into the hash state after a digest,
and generate some new hashes later on, by calling again XXH*_digest().
When done, free XXH state space if it was allocated dynamically.
*/
/* **************************
* Utils
****************************/
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
/* **************************
* Canonical representation
****************************/
/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.
* The canonical representation uses human-readable write convention, aka big-endian (large digits first).
* These functions allow transformation of hash result into and from its canonical format.
* This way, hash values can be written into a file / memory, and remain comparable on different systems and programs.
*/
typedef struct { unsigned char digest[4]; } XXH32_canonical_t;
typedef struct { unsigned char digest[8]; } XXH64_canonical_t;
XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
/* ================================================================================================
This section contains definitions which are not guaranteed to remain stable.
They may change in future versions, becoming incompatible with a different version of the library.
They shall only be used with static linking.
Never use these definitions in association with dynamic linking !
=================================================================================================== */
/* These definitions are only meant to allow allocation of XXH state
statically, on stack, or in a struct for example.
Do not use members directly. */
struct XXH32_state_s {
unsigned total_len_32;
unsigned large_len;
unsigned v1;
unsigned v2;
unsigned v3;
unsigned v4;
unsigned mem32[4]; /* buffer defined as U32 for alignment */
unsigned memsize;
unsigned reserved; /* never read nor write, will be removed in a future version */
}; /* typedef'd to XXH32_state_t */
struct XXH64_state_s {
unsigned long long total_len;
unsigned long long v1;
unsigned long long v2;
unsigned long long v3;
unsigned long long v4;
unsigned long long mem64[4]; /* buffer defined as U64 for alignment */
unsigned memsize;
unsigned reserved[2]; /* never read nor write, will be removed in a future version */
}; /* typedef'd to XXH64_state_t */
#endif /* XXHASH_H_5627135585666179 */

View File

@ -0,0 +1,69 @@
/**
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/
/*-*************************************
* Dependencies
***************************************/
#include "error_private.h"
#include "zstd_internal.h" /* declaration of ZSTD_isError, ZSTD_getErrorName, ZSTD_getErrorCode, ZSTD_getErrorString, ZSTD_versionNumber */
#include <linux/kernel.h>
/*=**************************************************************
* Custom allocator
****************************************************************/
#define stack_push(stack, size) ({ \
void* const ptr = ZSTD_PTR_ALIGN((stack)->ptr); \
(stack)->ptr = (char*)ptr + (size); \
(stack)->ptr <= (stack)->end ? ptr : NULL; \
})
ZSTD_customMem ZSTD_initStack(void* workspace, size_t workspaceSize) {
ZSTD_customMem stackMem = { ZSTD_stackAlloc, ZSTD_stackFree, workspace };
ZSTD_stack* stack = (ZSTD_stack*) workspace;
/* Verify preconditions */
if (!workspace || workspaceSize < sizeof(ZSTD_stack) || workspace != ZSTD_PTR_ALIGN(workspace)) {
ZSTD_customMem error = {NULL, NULL, NULL};
return error;
}
/* Initialize the stack */
stack->ptr = workspace;
stack->end = (char*)workspace + workspaceSize;
stack_push(stack, sizeof(ZSTD_stack));
return stackMem;
}
void* ZSTD_stackAllocAll(void* opaque, size_t* size) {
ZSTD_stack* stack = (ZSTD_stack*)opaque;
*size = stack->end - ZSTD_PTR_ALIGN(stack->ptr);
return stack_push(stack, *size);
}
void* ZSTD_stackAlloc(void* opaque, size_t size) {
ZSTD_stack* stack = (ZSTD_stack*)opaque;
return stack_push(stack, size);
}
void ZSTD_stackFree(void* opaque, void* address) {
(void)opaque;
(void)address;
}
void* ZSTD_malloc(size_t size, ZSTD_customMem customMem)
{
return customMem.customAlloc(customMem.opaque, size);
}
void ZSTD_free(void* ptr, ZSTD_customMem customMem)
{
if (ptr!=NULL)
customMem.customFree(customMem.opaque, ptr);
}

View File

@ -0,0 +1,274 @@
/**
* Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/
#ifndef ZSTD_CCOMMON_H_MODULE
#define ZSTD_CCOMMON_H_MODULE
/*-*******************************************************
* Compiler specifics
*********************************************************/
#define FORCE_INLINE static __always_inline
#define FORCE_NOINLINE static noinline
/*-*************************************
* Dependencies
***************************************/
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/zstd.h>
#include "mem.h"
#include "error_private.h"
#include "xxhash.h" /* XXH_reset, update, digest */
/*-*************************************
* shared macros
***************************************/
#define MIN(a,b) ((a)<(b) ? (a) : (b))
#define MAX(a,b) ((a)>(b) ? (a) : (b))
#define CHECK_F(f) { size_t const errcod = f; if (ERR_isError(errcod)) return errcod; } /* check and Forward error code */
#define CHECK_E(f, e) { size_t const errcod = f; if (ERR_isError(errcod)) return ERROR(e); } /* check and send Error code */
/*-*************************************
* Common constants
***************************************/
#define ZSTD_OPT_NUM (1<<12)
#define ZSTD_DICT_MAGIC 0xEC30A437 /* v0.7+ */
#define ZSTD_REP_NUM 3 /* number of repcodes */
#define ZSTD_REP_CHECK (ZSTD_REP_NUM) /* number of repcodes to check by the optimal parser */
#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1)
#define ZSTD_REP_MOVE_OPT (ZSTD_REP_NUM)
static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
#define KB *(1 <<10)
#define MB *(1 <<20)
#define GB *(1U<<30)
#define BIT7 128
#define BIT6 64
#define BIT5 32
#define BIT4 16
#define BIT1 2
#define BIT0 1
#define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */
static const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;
typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;
#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */
#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */
#define HufLog 12
typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;
#define LONGNBSEQ 0x7F00
#define MINMATCH 3
#define EQUAL_READ32 4
#define Litbits 8
#define MaxLit ((1<<Litbits) - 1)
#define MaxML 52
#define MaxLL 35
#define MaxOff 28
#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */
#define MLFSELog 9
#define LLFSELog 9
#define OffFSELog 8
static const U32 LL_bits[MaxLL+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9,10,11,12,
13,14,15,16 };
static const S16 LL_defaultNorm[MaxLL+1] = { 4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1,
-1,-1,-1,-1 };
#define LL_DEFAULTNORMLOG 6 /* for static allocation */
static const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;
static const U32 ML_bits[MaxML+1] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9,10,11,
12,13,14,15,16 };
static const S16 ML_defaultNorm[MaxML+1] = { 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,-1,-1,
-1,-1,-1,-1,-1 };
#define ML_DEFAULTNORMLOG 6 /* for static allocation */
static const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;
static const S16 OF_defaultNorm[MaxOff+1] = { 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1 };
#define OF_DEFAULTNORMLOG 5 /* for static allocation */
static const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;
/*-*******************************************
* Shared functions to include for inlining
*********************************************/
static void ZSTD_copy8(void* dst, const void* src) { memcpy(dst, src, 8); }
#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }
/*! ZSTD_wildcopy() :
* custom version of memcpy(), can copy up to 7 bytes too many (8 bytes if length==0) */
#define WILDCOPY_OVERLENGTH 8
MEM_STATIC void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length)
{
const BYTE* ip = (const BYTE*)src;
BYTE* op = (BYTE*)dst;
BYTE* const oend = op + length;
do
COPY8(op, ip)
while (op < oend);
}
MEM_STATIC void ZSTD_wildcopy_e(void* dst, const void* src, void* dstEnd) /* should be faster for decoding, but strangely, not verified on all platform */
{
const BYTE* ip = (const BYTE*)src;
BYTE* op = (BYTE*)dst;
BYTE* const oend = (BYTE*)dstEnd;
do
COPY8(op, ip)
while (op < oend);
}
/*-*******************************************
* Private interfaces
*********************************************/
typedef struct ZSTD_stats_s ZSTD_stats_t;
typedef struct {
U32 off;
U32 len;
} ZSTD_match_t;
typedef struct {
U32 price;
U32 off;
U32 mlen;
U32 litlen;
U32 rep[ZSTD_REP_NUM];
} ZSTD_optimal_t;
typedef struct seqDef_s {
U32 offset;
U16 litLength;
U16 matchLength;
} seqDef;
typedef struct {
seqDef* sequencesStart;
seqDef* sequences;
BYTE* litStart;
BYTE* lit;
BYTE* llCode;
BYTE* mlCode;
BYTE* ofCode;
U32 longLengthID; /* 0 == no longLength; 1 == Lit.longLength; 2 == Match.longLength; */
U32 longLengthPos;
/* opt */
ZSTD_optimal_t* priceTable;
ZSTD_match_t* matchTable;
U32* matchLengthFreq;
U32* litLengthFreq;
U32* litFreq;
U32* offCodeFreq;
U32 matchLengthSum;
U32 matchSum;
U32 litLengthSum;
U32 litSum;
U32 offCodeSum;
U32 log2matchLengthSum;
U32 log2matchSum;
U32 log2litLengthSum;
U32 log2litSum;
U32 log2offCodeSum;
U32 factor;
U32 staticPrices;
U32 cachedPrice;
U32 cachedLitLength;
const BYTE* cachedLiterals;
} seqStore_t;
const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx);
void ZSTD_seqToCodes(const seqStore_t* seqStorePtr);
int ZSTD_isSkipFrame(ZSTD_DCtx* dctx);
/*= Custom memory allocation functions */
typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
typedef void (*ZSTD_freeFunction) (void* opaque, void* address);
typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
void* ZSTD_malloc(size_t size, ZSTD_customMem customMem);
void ZSTD_free(void* ptr, ZSTD_customMem customMem);
/*====== stack allocation ======*/
typedef struct {
void* ptr;
const void* end;
} ZSTD_stack;
#define ZSTD_ALIGN(x) ALIGN(x, sizeof(size_t))
#define ZSTD_PTR_ALIGN(p) PTR_ALIGN(p, sizeof(size_t))
ZSTD_customMem ZSTD_initStack(void* workspace, size_t workspaceSize);
void* ZSTD_stackAllocAll(void* opaque, size_t* size);
void* ZSTD_stackAlloc(void* opaque, size_t size);
void ZSTD_stackFree(void* opaque, void* address);
/*====== common function ======*/
MEM_STATIC U32 ZSTD_highbit32(U32 val)
{
# if defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */
return 31 - __builtin_clz(val);
# else /* Software version */
static const int DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };
U32 v = val;
int r;
v |= v >> 1;
v |= v >> 2;
v |= v >> 4;
v |= v >> 8;
v |= v >> 16;
r = DeBruijnClz[(U32)(v * 0x07C4ACDDU) >> 27];
return r;
# endif
}
/* hidden functions */
/* ZSTD_invalidateRepCodes() :
* ensures next compression will not use repcodes from previous block.
* Note : only works with regular variant;
* do not use with extDict variant ! */
void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx);
size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx);
size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
size_t ZSTD_freeCDict(ZSTD_CDict* cdict);
size_t ZSTD_freeDDict(ZSTD_DDict* cdict);
size_t ZSTD_freeCStream(ZSTD_CStream* zcs);
size_t ZSTD_freeDStream(ZSTD_DStream* zds);
#endif /* ZSTD_CCOMMON_H_MODULE */

View File

@ -0,0 +1,921 @@
/**
* Copyright (c) 2016-present, Przemyslaw Skibinski, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/
/* Note : this file is intended to be included within zstd_compress.c */
#ifndef ZSTD_OPT_H_91842398743
#define ZSTD_OPT_H_91842398743
#define ZSTD_LITFREQ_ADD 2
#define ZSTD_FREQ_DIV 4
#define ZSTD_MAX_PRICE (1<<30)
/*-*************************************
* Price functions for optimal parser
***************************************/
FORCE_INLINE void ZSTD_setLog2Prices(seqStore_t* ssPtr)
{
ssPtr->log2matchLengthSum = ZSTD_highbit32(ssPtr->matchLengthSum+1);
ssPtr->log2litLengthSum = ZSTD_highbit32(ssPtr->litLengthSum+1);
ssPtr->log2litSum = ZSTD_highbit32(ssPtr->litSum+1);
ssPtr->log2offCodeSum = ZSTD_highbit32(ssPtr->offCodeSum+1);
ssPtr->factor = 1 + ((ssPtr->litSum>>5) / ssPtr->litLengthSum) + ((ssPtr->litSum<<1) / (ssPtr->litSum + ssPtr->matchSum));
}
MEM_STATIC void ZSTD_rescaleFreqs(seqStore_t* ssPtr, const BYTE* src, size_t srcSize)
{
unsigned u;
ssPtr->cachedLiterals = NULL;
ssPtr->cachedPrice = ssPtr->cachedLitLength = 0;
ssPtr->staticPrices = 0;
if (ssPtr->litLengthSum == 0) {
if (srcSize <= 1024) ssPtr->staticPrices = 1;
for (u=0; u<=MaxLit; u++)
ssPtr->litFreq[u] = 0;
for (u=0; u<srcSize; u++)
ssPtr->litFreq[src[u]]++;
ssPtr->litSum = 0;
ssPtr->litLengthSum = MaxLL+1;
ssPtr->matchLengthSum = MaxML+1;
ssPtr->offCodeSum = (MaxOff+1);
ssPtr->matchSum = (ZSTD_LITFREQ_ADD<<Litbits);
for (u=0; u<=MaxLit; u++) {
ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u]>>ZSTD_FREQ_DIV);
ssPtr->litSum += ssPtr->litFreq[u];
}
for (u=0; u<=MaxLL; u++)
ssPtr->litLengthFreq[u] = 1;
for (u=0; u<=MaxML; u++)
ssPtr->matchLengthFreq[u] = 1;
for (u=0; u<=MaxOff; u++)
ssPtr->offCodeFreq[u] = 1;
} else {
ssPtr->matchLengthSum = 0;
ssPtr->litLengthSum = 0;
ssPtr->offCodeSum = 0;
ssPtr->matchSum = 0;
ssPtr->litSum = 0;
for (u=0; u<=MaxLit; u++) {
ssPtr->litFreq[u] = 1 + (ssPtr->litFreq[u]>>(ZSTD_FREQ_DIV+1));
ssPtr->litSum += ssPtr->litFreq[u];
}
for (u=0; u<=MaxLL; u++) {
ssPtr->litLengthFreq[u] = 1 + (ssPtr->litLengthFreq[u]>>(ZSTD_FREQ_DIV+1));
ssPtr->litLengthSum += ssPtr->litLengthFreq[u];
}
for (u=0; u<=MaxML; u++) {
ssPtr->matchLengthFreq[u] = 1 + (ssPtr->matchLengthFreq[u]>>ZSTD_FREQ_DIV);
ssPtr->matchLengthSum += ssPtr->matchLengthFreq[u];
ssPtr->matchSum += ssPtr->matchLengthFreq[u] * (u + 3);
}
ssPtr->matchSum *= ZSTD_LITFREQ_ADD;
for (u=0; u<=MaxOff; u++) {
ssPtr->offCodeFreq[u] = 1 + (ssPtr->offCodeFreq[u]>>ZSTD_FREQ_DIV);
ssPtr->offCodeSum += ssPtr->offCodeFreq[u];
}
}
ZSTD_setLog2Prices(ssPtr);
}
FORCE_INLINE U32 ZSTD_getLiteralPrice(seqStore_t* ssPtr, U32 litLength, const BYTE* literals)
{
U32 price, u;
if (ssPtr->staticPrices)
return ZSTD_highbit32((U32)litLength+1) + (litLength*6);
if (litLength == 0)
return ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[0]+1);
/* literals */
if (ssPtr->cachedLiterals == literals) {
U32 const additional = litLength - ssPtr->cachedLitLength;
const BYTE* literals2 = ssPtr->cachedLiterals + ssPtr->cachedLitLength;
price = ssPtr->cachedPrice + additional * ssPtr->log2litSum;
for (u=0; u < additional; u++)
price -= ZSTD_highbit32(ssPtr->litFreq[literals2[u]]+1);
ssPtr->cachedPrice = price;
ssPtr->cachedLitLength = litLength;
} else {
price = litLength * ssPtr->log2litSum;
for (u=0; u < litLength; u++)
price -= ZSTD_highbit32(ssPtr->litFreq[literals[u]]+1);
if (litLength >= 12) {
ssPtr->cachedLiterals = literals;
ssPtr->cachedPrice = price;
ssPtr->cachedLitLength = litLength;
}
}
/* literal Length */
{ const BYTE LL_deltaCode = 19;
const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
price += LL_bits[llCode] + ssPtr->log2litLengthSum - ZSTD_highbit32(ssPtr->litLengthFreq[llCode]+1);
}
return price;
}
FORCE_INLINE U32 ZSTD_getPrice(seqStore_t* seqStorePtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength, const int ultra)
{
/* offset */
U32 price;
BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1);
if (seqStorePtr->staticPrices)
return ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + ZSTD_highbit32((U32)matchLength+1) + 16 + offCode;
price = offCode + seqStorePtr->log2offCodeSum - ZSTD_highbit32(seqStorePtr->offCodeFreq[offCode]+1);
if (!ultra && offCode >= 20) price += (offCode-19)*2;
/* match Length */
{ const BYTE ML_deltaCode = 36;
const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
price += ML_bits[mlCode] + seqStorePtr->log2matchLengthSum - ZSTD_highbit32(seqStorePtr->matchLengthFreq[mlCode]+1);
}
return price + ZSTD_getLiteralPrice(seqStorePtr, litLength, literals) + seqStorePtr->factor;
}
MEM_STATIC void ZSTD_updatePrice(seqStore_t* seqStorePtr, U32 litLength, const BYTE* literals, U32 offset, U32 matchLength)
{
U32 u;
/* literals */
seqStorePtr->litSum += litLength*ZSTD_LITFREQ_ADD;
for (u=0; u < litLength; u++)
seqStorePtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
/* literal Length */
{ const BYTE LL_deltaCode = 19;
const BYTE llCode = (litLength>63) ? (BYTE)ZSTD_highbit32(litLength) + LL_deltaCode : LL_Code[litLength];
seqStorePtr->litLengthFreq[llCode]++;
seqStorePtr->litLengthSum++;
}
/* match offset */
{ BYTE const offCode = (BYTE)ZSTD_highbit32(offset+1);
seqStorePtr->offCodeSum++;
seqStorePtr->offCodeFreq[offCode]++;
}
/* match Length */
{ const BYTE ML_deltaCode = 36;
const BYTE mlCode = (matchLength>127) ? (BYTE)ZSTD_highbit32(matchLength) + ML_deltaCode : ML_Code[matchLength];
seqStorePtr->matchLengthFreq[mlCode]++;
seqStorePtr->matchLengthSum++;
}
ZSTD_setLog2Prices(seqStorePtr);
}
#define SET_PRICE(pos, mlen_, offset_, litlen_, price_) \
{ \
while (last_pos < pos) { opt[last_pos+1].price = ZSTD_MAX_PRICE; last_pos++; } \
opt[pos].mlen = mlen_; \
opt[pos].off = offset_; \
opt[pos].litlen = litlen_; \
opt[pos].price = price_; \
}
/* Update hashTable3 up to ip (excluded)
Assumption : always within prefix (i.e. not within extDict) */
FORCE_INLINE
U32 ZSTD_insertAndFindFirstIndexHash3 (ZSTD_CCtx* zc, const BYTE* ip)
{
U32* const hashTable3 = zc->hashTable3;
U32 const hashLog3 = zc->hashLog3;
const BYTE* const base = zc->base;
U32 idx = zc->nextToUpdate3;
const U32 target = zc->nextToUpdate3 = (U32)(ip - base);
const size_t hash3 = ZSTD_hash3Ptr(ip, hashLog3);
while(idx < target) {
hashTable3[ZSTD_hash3Ptr(base+idx, hashLog3)] = idx;
idx++;
}
return hashTable3[hash3];
}
/*-*************************************
* Binary Tree search
***************************************/
static U32 ZSTD_insertBtAndGetAllMatches (
ZSTD_CCtx* zc,
const BYTE* const ip, const BYTE* const iLimit,
U32 nbCompares, const U32 mls,
U32 extDict, ZSTD_match_t* matches, const U32 minMatchLen)
{
const BYTE* const base = zc->base;
const U32 current = (U32)(ip-base);
const U32 hashLog = zc->params.cParams.hashLog;
const size_t h = ZSTD_hashPtr(ip, hashLog, mls);
U32* const hashTable = zc->hashTable;
U32 matchIndex = hashTable[h];
U32* const bt = zc->chainTable;
const U32 btLog = zc->params.cParams.chainLog - 1;
const U32 btMask= (1U << btLog) - 1;
size_t commonLengthSmaller=0, commonLengthLarger=0;
const BYTE* const dictBase = zc->dictBase;
const U32 dictLimit = zc->dictLimit;
const BYTE* const dictEnd = dictBase + dictLimit;
const BYTE* const prefixStart = base + dictLimit;
const U32 btLow = btMask >= current ? 0 : current - btMask;
const U32 windowLow = zc->lowLimit;
U32* smallerPtr = bt + 2*(current&btMask);
U32* largerPtr = bt + 2*(current&btMask) + 1;
U32 matchEndIdx = current+8;
U32 dummy32; /* to be nullified at the end */
U32 mnum = 0;
const U32 minMatch = (mls == 3) ? 3 : 4;
size_t bestLength = minMatchLen-1;
if (minMatch == 3) { /* HC3 match finder */
U32 const matchIndex3 = ZSTD_insertAndFindFirstIndexHash3 (zc, ip);
if (matchIndex3>windowLow && (current - matchIndex3 < (1<<18))) {
const BYTE* match;
size_t currentMl=0;
if ((!extDict) || matchIndex3 >= dictLimit) {
match = base + matchIndex3;
if (match[bestLength] == ip[bestLength]) currentMl = ZSTD_count(ip, match, iLimit);
} else {
match = dictBase + matchIndex3;
if (MEM_readMINMATCH(match, MINMATCH) == MEM_readMINMATCH(ip, MINMATCH)) /* assumption : matchIndex3 <= dictLimit-4 (by table construction) */
currentMl = ZSTD_count_2segments(ip+MINMATCH, match+MINMATCH, iLimit, dictEnd, prefixStart) + MINMATCH;
}
/* save best solution */
if (currentMl > bestLength) {
bestLength = currentMl;
matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex3;
matches[mnum].len = (U32)currentMl;
mnum++;
if (currentMl > ZSTD_OPT_NUM) goto update;
if (ip+currentMl == iLimit) goto update; /* best possible, and avoid read overflow*/
}
}
}
hashTable[h] = current; /* Update Hash Table */
while (nbCompares-- && (matchIndex > windowLow)) {
U32* nextPtr = bt + 2*(matchIndex & btMask);
size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
const BYTE* match;
if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
match = base + matchIndex;
if (match[matchLength] == ip[matchLength]) {
matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iLimit) +1;
}
} else {
match = dictBase + matchIndex;
matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iLimit, dictEnd, prefixStart);
if (matchIndex+matchLength >= dictLimit)
match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
}
if (matchLength > bestLength) {
if (matchLength > matchEndIdx - matchIndex) matchEndIdx = matchIndex + (U32)matchLength;
bestLength = matchLength;
matches[mnum].off = ZSTD_REP_MOVE_OPT + current - matchIndex;
matches[mnum].len = (U32)matchLength;
mnum++;
if (matchLength > ZSTD_OPT_NUM) break;
if (ip+matchLength == iLimit) /* equal : no way to know if inf or sup */
break; /* drop, to guarantee consistency (miss a little bit of compression) */
}
if (match[matchLength] < ip[matchLength]) {
/* match is smaller than current */
*smallerPtr = matchIndex; /* update smaller idx */
commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
} else {
/* match is larger than current */
*largerPtr = matchIndex;
commonLengthLarger = matchLength;
if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
largerPtr = nextPtr;
matchIndex = nextPtr[0];
} }
*smallerPtr = *largerPtr = 0;
update:
zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1;
return mnum;
}
/** Tree updater, providing best match */
static U32 ZSTD_BtGetAllMatches (
ZSTD_CCtx* zc,
const BYTE* const ip, const BYTE* const iLimit,
const U32 maxNbAttempts, const U32 mls, ZSTD_match_t* matches, const U32 minMatchLen)
{
if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */
ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 0, matches, minMatchLen);
}
static U32 ZSTD_BtGetAllMatches_selectMLS (
ZSTD_CCtx* zc, /* Index table will be updated */
const BYTE* ip, const BYTE* const iHighLimit,
const U32 maxNbAttempts, const U32 matchLengthSearch, ZSTD_match_t* matches, const U32 minMatchLen)
{
switch(matchLengthSearch)
{
case 3 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
default :
case 4 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
case 5 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
case 7 :
case 6 : return ZSTD_BtGetAllMatches(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
}
}
/** Tree updater, providing best match */
static U32 ZSTD_BtGetAllMatches_extDict (
ZSTD_CCtx* zc,
const BYTE* const ip, const BYTE* const iLimit,
const U32 maxNbAttempts, const U32 mls, ZSTD_match_t* matches, const U32 minMatchLen)
{
if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */
ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
return ZSTD_insertBtAndGetAllMatches(zc, ip, iLimit, maxNbAttempts, mls, 1, matches, minMatchLen);
}
static U32 ZSTD_BtGetAllMatches_selectMLS_extDict (
ZSTD_CCtx* zc, /* Index table will be updated */
const BYTE* ip, const BYTE* const iHighLimit,
const U32 maxNbAttempts, const U32 matchLengthSearch, ZSTD_match_t* matches, const U32 minMatchLen)
{
switch(matchLengthSearch)
{
case 3 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 3, matches, minMatchLen);
default :
case 4 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 4, matches, minMatchLen);
case 5 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 5, matches, minMatchLen);
case 7 :
case 6 : return ZSTD_BtGetAllMatches_extDict(zc, ip, iHighLimit, maxNbAttempts, 6, matches, minMatchLen);
}
}
/*-*******************************
* Optimal parser
*********************************/
FORCE_INLINE
void ZSTD_compressBlock_opt_generic(ZSTD_CCtx* ctx,
const void* src, size_t srcSize, const int ultra)
{
seqStore_t* seqStorePtr = &(ctx->seqStore);
const BYTE* const istart = (const BYTE*)src;
const BYTE* ip = istart;
const BYTE* anchor = istart;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - 8;
const BYTE* const base = ctx->base;
const BYTE* const prefixStart = base + ctx->dictLimit;
const U32 maxSearches = 1U << ctx->params.cParams.searchLog;
const U32 sufficient_len = ctx->params.cParams.targetLength;
const U32 mls = ctx->params.cParams.searchLength;
const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4;
ZSTD_optimal_t* opt = seqStorePtr->priceTable;
ZSTD_match_t* matches = seqStorePtr->matchTable;
const BYTE* inr;
U32 offset, rep[ZSTD_REP_NUM];
/* init */
ctx->nextToUpdate3 = ctx->nextToUpdate;
ZSTD_rescaleFreqs(seqStorePtr, (const BYTE*)src, srcSize);
ip += (ip==prefixStart);
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=ctx->rep[i]; }
/* Match Loop */
while (ip < ilimit) {
U32 cur, match_num, last_pos, litlen, price;
U32 u, mlen, best_mlen, best_off, litLength;
memset(opt, 0, sizeof(ZSTD_optimal_t));
last_pos = 0;
litlen = (U32)(ip - anchor);
/* check repCode */
{ U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor);
for (i=(ip == anchor); i<last_i; i++) {
const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
if ( (repCur > 0) && (repCur < (S32)(ip-prefixStart))
&& (MEM_readMINMATCH(ip, minMatch) == MEM_readMINMATCH(ip - repCur, minMatch))) {
mlen = (U32)ZSTD_count(ip+minMatch, ip+minMatch-repCur, iend) + minMatch;
if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
best_mlen = mlen; best_off = i; cur = 0; last_pos = 1;
goto _storeSequence;
}
best_off = i - (ip == anchor);
do {
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
if (mlen > last_pos || price < opt[mlen].price)
SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
mlen--;
} while (mlen >= minMatch);
} } }
match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, ip, iend, maxSearches, mls, matches, minMatch);
if (!last_pos && !match_num) { ip++; continue; }
if (match_num && (matches[match_num-1].len > sufficient_len || matches[match_num-1].len >= ZSTD_OPT_NUM)) {
best_mlen = matches[match_num-1].len;
best_off = matches[match_num-1].off;
cur = 0;
last_pos = 1;
goto _storeSequence;
}
/* set prices using matches at position = 0 */
best_mlen = (last_pos) ? last_pos : minMatch;
for (u = 0; u < match_num; u++) {
mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
best_mlen = matches[u].len;
while (mlen <= best_mlen) {
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
if (mlen > last_pos || price < opt[mlen].price)
SET_PRICE(mlen, mlen, matches[u].off, litlen, price); /* note : macro modifies last_pos */
mlen++;
} }
if (last_pos < minMatch) { ip++; continue; }
/* initialize opt[0] */
{ U32 i ; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
opt[0].mlen = 1;
opt[0].litlen = litlen;
/* check further positions */
for (cur = 1; cur <= last_pos; cur++) {
inr = ip + cur;
if (opt[cur-1].mlen == 1) {
litlen = opt[cur-1].litlen + 1;
if (cur > litlen) {
price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-litlen);
} else
price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor);
} else {
litlen = 1;
price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-1);
}
if (cur > last_pos || price <= opt[cur].price)
SET_PRICE(cur, 1, 0, litlen, price);
if (cur == last_pos) break;
if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */
continue;
mlen = opt[cur].mlen;
if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
opt[cur].rep[2] = opt[cur-mlen].rep[1];
opt[cur].rep[1] = opt[cur-mlen].rep[0];
opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
} else {
opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur-mlen].rep[1] : opt[cur-mlen].rep[2];
opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur-mlen].rep[0] : opt[cur-mlen].rep[1];
opt[cur].rep[0] = ((opt[cur].off==ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur-mlen].rep[0] - 1) : (opt[cur-mlen].rep[opt[cur].off]);
}
best_mlen = minMatch;
{ U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
for (i=(opt[cur].mlen != 1); i<last_i; i++) { /* check rep */
const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
if ( (repCur > 0) && (repCur < (S32)(inr-prefixStart))
&& (MEM_readMINMATCH(inr, minMatch) == MEM_readMINMATCH(inr - repCur, minMatch))) {
mlen = (U32)ZSTD_count(inr+minMatch, inr+minMatch - repCur, iend) + minMatch;
if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
best_mlen = mlen; best_off = i; last_pos = cur + 1;
goto _storeSequence;
}
best_off = i - (opt[cur].mlen != 1);
if (mlen > best_mlen) best_mlen = mlen;
do {
if (opt[cur].mlen == 1) {
litlen = opt[cur].litlen;
if (cur > litlen) {
price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra);
} else
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
} else {
litlen = 0;
price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
}
if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
SET_PRICE(cur + mlen, mlen, i, litlen, price);
mlen--;
} while (mlen >= minMatch);
} } }
match_num = ZSTD_BtGetAllMatches_selectMLS(ctx, inr, iend, maxSearches, mls, matches, best_mlen);
if (match_num > 0 && (matches[match_num-1].len > sufficient_len || cur + matches[match_num-1].len >= ZSTD_OPT_NUM)) {
best_mlen = matches[match_num-1].len;
best_off = matches[match_num-1].off;
last_pos = cur + 1;
goto _storeSequence;
}
/* set prices using matches at position = cur */
for (u = 0; u < match_num; u++) {
mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
best_mlen = matches[u].len;
while (mlen <= best_mlen) {
if (opt[cur].mlen == 1) {
litlen = opt[cur].litlen;
if (cur > litlen)
price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra);
else
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
} else {
litlen = 0;
price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra);
}
if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
mlen++;
} } }
best_mlen = opt[last_pos].mlen;
best_off = opt[last_pos].off;
cur = last_pos - best_mlen;
/* store sequence */
_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
opt[0].mlen = 1;
while (1) {
mlen = opt[cur].mlen;
offset = opt[cur].off;
opt[cur].mlen = best_mlen;
opt[cur].off = best_off;
best_mlen = mlen;
best_off = offset;
if (mlen > cur) break;
cur -= mlen;
}
for (u = 0; u <= last_pos;) {
u += opt[u].mlen;
}
for (cur=0; cur < last_pos; ) {
mlen = opt[cur].mlen;
if (mlen == 1) { ip++; cur++; continue; }
offset = opt[cur].off;
cur += mlen;
litLength = (U32)(ip - anchor);
if (offset > ZSTD_REP_MOVE_OPT) {
rep[2] = rep[1];
rep[1] = rep[0];
rep[0] = offset - ZSTD_REP_MOVE_OPT;
offset--;
} else {
if (offset != 0) {
best_off = (offset==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
if (offset != 1) rep[2] = rep[1];
rep[1] = rep[0];
rep[0] = best_off;
}
if (litLength==0) offset--;
}
ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH);
ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH);
anchor = ip = ip + mlen;
} } /* for (cur=0; cur < last_pos; ) */
/* Save reps for next block */
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->repToConfirm[i] = rep[i]; }
/* Last Literals */
{ size_t const lastLLSize = iend - anchor;
memcpy(seqStorePtr->lit, anchor, lastLLSize);
seqStorePtr->lit += lastLLSize;
}
}
FORCE_INLINE
void ZSTD_compressBlock_opt_extDict_generic(ZSTD_CCtx* ctx,
const void* src, size_t srcSize, const int ultra)
{
seqStore_t* seqStorePtr = &(ctx->seqStore);
const BYTE* const istart = (const BYTE*)src;
const BYTE* ip = istart;
const BYTE* anchor = istart;
const BYTE* const iend = istart + srcSize;
const BYTE* const ilimit = iend - 8;
const BYTE* const base = ctx->base;
const U32 lowestIndex = ctx->lowLimit;
const U32 dictLimit = ctx->dictLimit;
const BYTE* const prefixStart = base + dictLimit;
const BYTE* const dictBase = ctx->dictBase;
const BYTE* const dictEnd = dictBase + dictLimit;
const U32 maxSearches = 1U << ctx->params.cParams.searchLog;
const U32 sufficient_len = ctx->params.cParams.targetLength;
const U32 mls = ctx->params.cParams.searchLength;
const U32 minMatch = (ctx->params.cParams.searchLength == 3) ? 3 : 4;
ZSTD_optimal_t* opt = seqStorePtr->priceTable;
ZSTD_match_t* matches = seqStorePtr->matchTable;
const BYTE* inr;
/* init */
U32 offset, rep[ZSTD_REP_NUM];
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) rep[i]=ctx->rep[i]; }
ctx->nextToUpdate3 = ctx->nextToUpdate;
ZSTD_rescaleFreqs(seqStorePtr, (const BYTE*)src, srcSize);
ip += (ip==prefixStart);
/* Match Loop */
while (ip < ilimit) {
U32 cur, match_num, last_pos, litlen, price;
U32 u, mlen, best_mlen, best_off, litLength;
U32 current = (U32)(ip-base);
memset(opt, 0, sizeof(ZSTD_optimal_t));
last_pos = 0;
opt[0].litlen = (U32)(ip - anchor);
/* check repCode */
{ U32 i, last_i = ZSTD_REP_CHECK + (ip==anchor);
for (i = (ip==anchor); i<last_i; i++) {
const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : rep[i];
const U32 repIndex = (U32)(current - repCur);
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
if ( (repCur > 0 && repCur <= (S32)current)
&& (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */
&& (MEM_readMINMATCH(ip, minMatch) == MEM_readMINMATCH(repMatch, minMatch)) ) {
/* repcode detected we should take it */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
mlen = (U32)ZSTD_count_2segments(ip+minMatch, repMatch+minMatch, iend, repEnd, prefixStart) + minMatch;
if (mlen > sufficient_len || mlen >= ZSTD_OPT_NUM) {
best_mlen = mlen; best_off = i; cur = 0; last_pos = 1;
goto _storeSequence;
}
best_off = i - (ip==anchor);
litlen = opt[0].litlen;
do {
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
if (mlen > last_pos || price < opt[mlen].price)
SET_PRICE(mlen, mlen, i, litlen, price); /* note : macro modifies last_pos */
mlen--;
} while (mlen >= minMatch);
} } }
match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, ip, iend, maxSearches, mls, matches, minMatch); /* first search (depth 0) */
if (!last_pos && !match_num) { ip++; continue; }
{ U32 i; for (i=0; i<ZSTD_REP_NUM; i++) opt[0].rep[i] = rep[i]; }
opt[0].mlen = 1;
if (match_num && (matches[match_num-1].len > sufficient_len || matches[match_num-1].len >= ZSTD_OPT_NUM)) {
best_mlen = matches[match_num-1].len;
best_off = matches[match_num-1].off;
cur = 0;
last_pos = 1;
goto _storeSequence;
}
best_mlen = (last_pos) ? last_pos : minMatch;
/* set prices using matches at position = 0 */
for (u = 0; u < match_num; u++) {
mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
best_mlen = matches[u].len;
litlen = opt[0].litlen;
while (mlen <= best_mlen) {
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
if (mlen > last_pos || price < opt[mlen].price)
SET_PRICE(mlen, mlen, matches[u].off, litlen, price);
mlen++;
} }
if (last_pos < minMatch) {
ip++; continue;
}
/* check further positions */
for (cur = 1; cur <= last_pos; cur++) {
inr = ip + cur;
if (opt[cur-1].mlen == 1) {
litlen = opt[cur-1].litlen + 1;
if (cur > litlen) {
price = opt[cur - litlen].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-litlen);
} else
price = ZSTD_getLiteralPrice(seqStorePtr, litlen, anchor);
} else {
litlen = 1;
price = opt[cur - 1].price + ZSTD_getLiteralPrice(seqStorePtr, litlen, inr-1);
}
if (cur > last_pos || price <= opt[cur].price)
SET_PRICE(cur, 1, 0, litlen, price);
if (cur == last_pos) break;
if (inr > ilimit) /* last match must start at a minimum distance of 8 from oend */
continue;
mlen = opt[cur].mlen;
if (opt[cur].off > ZSTD_REP_MOVE_OPT) {
opt[cur].rep[2] = opt[cur-mlen].rep[1];
opt[cur].rep[1] = opt[cur-mlen].rep[0];
opt[cur].rep[0] = opt[cur].off - ZSTD_REP_MOVE_OPT;
} else {
opt[cur].rep[2] = (opt[cur].off > 1) ? opt[cur-mlen].rep[1] : opt[cur-mlen].rep[2];
opt[cur].rep[1] = (opt[cur].off > 0) ? opt[cur-mlen].rep[0] : opt[cur-mlen].rep[1];
opt[cur].rep[0] = ((opt[cur].off==ZSTD_REP_MOVE_OPT) && (mlen != 1)) ? (opt[cur-mlen].rep[0] - 1) : (opt[cur-mlen].rep[opt[cur].off]);
}
best_mlen = minMatch;
{ U32 i, last_i = ZSTD_REP_CHECK + (mlen != 1);
for (i = (mlen != 1); i<last_i; i++) {
const S32 repCur = (i==ZSTD_REP_MOVE_OPT) ? (opt[cur].rep[0] - 1) : opt[cur].rep[i];
const U32 repIndex = (U32)(current+cur - repCur);
const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
const BYTE* const repMatch = repBase + repIndex;
if ( (repCur > 0 && repCur <= (S32)(current+cur))
&& (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex>lowestIndex)) /* intentional overflow */
&& (MEM_readMINMATCH(inr, minMatch) == MEM_readMINMATCH(repMatch, minMatch)) ) {
/* repcode detected */
const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
mlen = (U32)ZSTD_count_2segments(inr+minMatch, repMatch+minMatch, iend, repEnd, prefixStart) + minMatch;
if (mlen > sufficient_len || cur + mlen >= ZSTD_OPT_NUM) {
best_mlen = mlen; best_off = i; last_pos = cur + 1;
goto _storeSequence;
}
best_off = i - (opt[cur].mlen != 1);
if (mlen > best_mlen) best_mlen = mlen;
do {
if (opt[cur].mlen == 1) {
litlen = opt[cur].litlen;
if (cur > litlen) {
price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, inr-litlen, best_off, mlen - MINMATCH, ultra);
} else
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, best_off, mlen - MINMATCH, ultra);
} else {
litlen = 0;
price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, best_off, mlen - MINMATCH, ultra);
}
if (cur + mlen > last_pos || price <= opt[cur + mlen].price)
SET_PRICE(cur + mlen, mlen, i, litlen, price);
mlen--;
} while (mlen >= minMatch);
} } }
match_num = ZSTD_BtGetAllMatches_selectMLS_extDict(ctx, inr, iend, maxSearches, mls, matches, minMatch);
if (match_num > 0 && (matches[match_num-1].len > sufficient_len || cur + matches[match_num-1].len >= ZSTD_OPT_NUM)) {
best_mlen = matches[match_num-1].len;
best_off = matches[match_num-1].off;
last_pos = cur + 1;
goto _storeSequence;
}
/* set prices using matches at position = cur */
for (u = 0; u < match_num; u++) {
mlen = (u>0) ? matches[u-1].len+1 : best_mlen;
best_mlen = matches[u].len;
while (mlen <= best_mlen) {
if (opt[cur].mlen == 1) {
litlen = opt[cur].litlen;
if (cur > litlen)
price = opt[cur - litlen].price + ZSTD_getPrice(seqStorePtr, litlen, ip+cur-litlen, matches[u].off-1, mlen - MINMATCH, ultra);
else
price = ZSTD_getPrice(seqStorePtr, litlen, anchor, matches[u].off-1, mlen - MINMATCH, ultra);
} else {
litlen = 0;
price = opt[cur].price + ZSTD_getPrice(seqStorePtr, 0, NULL, matches[u].off-1, mlen - MINMATCH, ultra);
}
if (cur + mlen > last_pos || (price < opt[cur + mlen].price))
SET_PRICE(cur + mlen, mlen, matches[u].off, litlen, price);
mlen++;
} } } /* for (cur = 1; cur <= last_pos; cur++) */
best_mlen = opt[last_pos].mlen;
best_off = opt[last_pos].off;
cur = last_pos - best_mlen;
/* store sequence */
_storeSequence: /* cur, last_pos, best_mlen, best_off have to be set */
opt[0].mlen = 1;
while (1) {
mlen = opt[cur].mlen;
offset = opt[cur].off;
opt[cur].mlen = best_mlen;
opt[cur].off = best_off;
best_mlen = mlen;
best_off = offset;
if (mlen > cur) break;
cur -= mlen;
}
for (u = 0; u <= last_pos; ) {
u += opt[u].mlen;
}
for (cur=0; cur < last_pos; ) {
mlen = opt[cur].mlen;
if (mlen == 1) { ip++; cur++; continue; }
offset = opt[cur].off;
cur += mlen;
litLength = (U32)(ip - anchor);
if (offset > ZSTD_REP_MOVE_OPT) {
rep[2] = rep[1];
rep[1] = rep[0];
rep[0] = offset - ZSTD_REP_MOVE_OPT;
offset--;
} else {
if (offset != 0) {
best_off = (offset==ZSTD_REP_MOVE_OPT) ? (rep[0] - 1) : (rep[offset]);
if (offset != 1) rep[2] = rep[1];
rep[1] = rep[0];
rep[0] = best_off;
}
if (litLength==0) offset--;
}
ZSTD_updatePrice(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH);
ZSTD_storeSeq(seqStorePtr, litLength, anchor, offset, mlen-MINMATCH);
anchor = ip = ip + mlen;
} } /* for (cur=0; cur < last_pos; ) */
/* Save reps for next block */
{ int i; for (i=0; i<ZSTD_REP_NUM; i++) ctx->repToConfirm[i] = rep[i]; }
/* Last Literals */
{ size_t lastLLSize = iend - anchor;
memcpy(seqStorePtr->lit, anchor, lastLLSize);
seqStorePtr->lit += lastLLSize;
}
}
#endif /* ZSTD_OPT_H_91842398743 */

View File

@ -0,0 +1,28 @@
#!/bin/sh
set -e
# Constants
INCLUDE='include/'
LIB='lib/'
SPACES=' '
TAB=$'\t'
TMP="replacements.tmp"
echo "Files: " $INCLUDE* $LIB*
# Check files for existing tabs
grep "$TAB" $INCLUDE* $LIB* && exit 1 || true
# Replace the first tab on every line
sed -i '' "s/^$SPACES/$TAB/" $INCLUDE* $LIB*
# Execute once and then execute as long as replacements are happening
more_work="yes"
while [ ! -z "$more_work" ]
do
rm -f $TMP
# Replaces $SPACES that directly follow a $TAB with a $TAB.
# $TMP will be non-empty if any replacements took place.
sed -i '' "s/$TAB$SPACES/$TAB$TAB/w $TMP" $INCLUDE* $LIB*
more_work=$(cat "$TMP")
done
rm -f $TMP

View File

@ -0,0 +1,39 @@
# !/bin/sh
set -e
# Benchmarks run on a Ubuntu 14.04 VM with 2 cores and 4 GiB of RAM.
# The VM is running on a Macbook Pro with a 3.1 GHz Intel Core i7 processor and
# 16 GB of RAM and an SSD.
# $BENCHMARK_DIR is generated with the following commands, from the Ubuntu image
# ubuntu-16.10-desktop-amd64.iso.
# > mkdir mnt
# > sudo mount -o loop ubuntu-16.10-desktop-amd64.iso mnt
# > cp mnt/casper/filesystem.squashfs .
# > sudo unsquashfs filesystem.squashfs
# $HOME is on a ext4 filesystem
BENCHMARK_DIR="$HOME/squashfs-root/"
BENCHMARK_FS="$HOME/filesystem.squashfs"
# Normalize the environment
sudo rm -f $BENCHMARK_FS 2> /dev/null > /dev/null || true
sudo umount /mnt/squashfs 2> /dev/null > /dev/null || true
# Run the benchmark
echo "Compression"
echo "sudo mksquashfs $BENCHMARK_DIR $BENCHMARK_FS $@"
time sudo mksquashfs $BENCHMARK_DIR $BENCHMARK_FS $@ 2> /dev/null > /dev/null
echo "Approximate compression ratio"
printf "%d / %d\n" \
$(sudo du -sx --block-size=1 $BENCHMARK_DIR | cut -f1) \
$(sudo du -sx --block-size=1 $BENCHMARK_FS | cut -f1);
# Mount the filesystem
sudo mount -t squashfs $BENCHMARK_FS /mnt/squashfs
echo "Decompression"
time sudo tar -c /mnt/squashfs 2> /dev/null | wc -c > /dev/null
sudo umount /mnt/squashfs

View File

@ -0,0 +1,245 @@
commit 16bb6b9fd684eadba41a36223d67805d7ea741e7
Author: Sean Purcell <me@seanp.xyz>
Date: Thu Apr 27 17:17:58 2017 -0700
Add zstd support to squashfs
diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig
index ffb093e..1adb334 100644
--- a/fs/squashfs/Kconfig
+++ b/fs/squashfs/Kconfig
@@ -165,6 +165,20 @@ config SQUASHFS_XZ
If unsure, say N.
+config SQUASHFS_ZSTD
+ bool "Include support for ZSTD compressed file systems"
+ depends on SQUASHFS
+ select ZSTD_DECOMPRESS
+ help
+ Saying Y here includes support for reading Squashfs file systems
+ compressed with ZSTD compression. ZSTD gives better compression than
+ the default ZLIB compression, while using less CPU.
+
+ ZSTD is not the standard compression used in Squashfs and so most
+ file systems will be readable without selecting this option.
+
+ If unsure, say N.
+
config SQUASHFS_4K_DEVBLK_SIZE
bool "Use 4K device block size?"
depends on SQUASHFS
diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile
index 246a6f3..6655631 100644
--- a/fs/squashfs/Makefile
+++ b/fs/squashfs/Makefile
@@ -15,3 +15,4 @@ squashfs-$(CONFIG_SQUASHFS_LZ4) += lz4_wrapper.o
squashfs-$(CONFIG_SQUASHFS_LZO) += lzo_wrapper.o
squashfs-$(CONFIG_SQUASHFS_XZ) += xz_wrapper.o
squashfs-$(CONFIG_SQUASHFS_ZLIB) += zlib_wrapper.o
+squashfs-$(CONFIG_SQUASHFS_ZSTD) += zstd_wrapper.o
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
index d2bc136..8366398 100644
--- a/fs/squashfs/decompressor.c
+++ b/fs/squashfs/decompressor.c
@@ -65,6 +65,12 @@ static const struct squashfs_decompressor squashfs_zlib_comp_ops = {
};
#endif
+#ifndef CONFIG_SQUASHFS_ZSTD
+static const struct squashfs_decompressor squashfs_zstd_comp_ops = {
+ NULL, NULL, NULL, NULL, ZSTD_COMPRESSION, "zstd", 0
+};
+#endif
+
static const struct squashfs_decompressor squashfs_unknown_comp_ops = {
NULL, NULL, NULL, NULL, 0, "unknown", 0
};
@@ -75,6 +81,7 @@ static const struct squashfs_decompressor *decompressor[] = {
&squashfs_lzo_comp_ops,
&squashfs_xz_comp_ops,
&squashfs_lzma_unsupported_comp_ops,
+ &squashfs_zstd_comp_ops,
&squashfs_unknown_comp_ops
};
diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h
index a25713c..0f5a8e4 100644
--- a/fs/squashfs/decompressor.h
+++ b/fs/squashfs/decompressor.h
@@ -58,4 +58,8 @@ extern const struct squashfs_decompressor squashfs_lzo_comp_ops;
extern const struct squashfs_decompressor squashfs_zlib_comp_ops;
#endif
+#ifdef CONFIG_SQUASHFS_ZSTD
+extern const struct squashfs_decompressor squashfs_zstd_comp_ops;
+#endif
+
#endif
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 506f4ba..24d12fd 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -241,6 +241,7 @@ struct meta_index {
#define LZO_COMPRESSION 3
#define XZ_COMPRESSION 4
#define LZ4_COMPRESSION 5
+#define ZSTD_COMPRESSION 6
struct squashfs_super_block {
__le32 s_magic;
diff --git a/fs/squashfs/zstd_wrapper.c b/fs/squashfs/zstd_wrapper.c
new file mode 100644
index 0000000..7cc9303
--- /dev/null
+++ b/fs/squashfs/zstd_wrapper.c
@@ -0,0 +1,149 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2017 Facebook
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * zstd_wrapper.c
+ */
+
+#include <linux/mutex.h>
+#include <linux/buffer_head.h>
+#include <linux/slab.h>
+#include <linux/zstd.h>
+#include <linux/vmalloc.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs.h"
+#include "decompressor.h"
+#include "page_actor.h"
+
+struct workspace {
+ void *mem;
+ size_t mem_size;
+};
+
+static void *zstd_init(struct squashfs_sb_info *msblk, void *buff)
+{
+ struct workspace *wksp = kmalloc(sizeof(*wksp), GFP_KERNEL);
+ if (wksp == NULL)
+ goto failed;
+ wksp->mem_size = ZSTD_DStreamWorkspaceBound(max_t(size_t,
+ msblk->block_size, SQUASHFS_METADATA_SIZE));
+ wksp->mem = vmalloc(wksp->mem_size);
+ if (wksp->mem == NULL)
+ goto failed;
+
+ return wksp;
+
+failed:
+ ERROR("Failed to allocate zstd workspace\n");
+ kfree(wksp);
+ return ERR_PTR(-ENOMEM);
+}
+
+
+static void zstd_free(void *strm)
+{
+ struct workspace *wksp = strm;
+
+ if (wksp)
+ vfree(wksp->mem);
+ kfree(wksp);
+}
+
+
+static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
+ struct buffer_head **bh, int b, int offset, int length,
+ struct squashfs_page_actor *output)
+{
+ struct workspace *wksp = strm;
+ ZSTD_DStream *stream;
+ size_t total_out = 0;
+ size_t zstd_err;
+ int k = 0;
+ ZSTD_inBuffer in_buf = { NULL, 0, 0 };
+ ZSTD_outBuffer out_buf = { NULL, 0, 0 };
+
+ stream = ZSTD_initDStream(wksp->mem_size, wksp->mem, wksp->mem_size);
+
+ if (!stream) {
+ ERROR("Failed to initialize zstd decompressor\n");
+ goto out;
+ }
+
+ out_buf.size = PAGE_SIZE;
+ out_buf.dst = squashfs_first_page(output);
+
+ do {
+ if (in_buf.pos == in_buf.size && k < b) {
+ int avail = min(length, msblk->devblksize - offset);
+ length -= avail;
+ in_buf.src = bh[k]->b_data + offset;
+ in_buf.size = avail;
+ in_buf.pos = 0;
+ offset = 0;
+ }
+
+ if (out_buf.pos == out_buf.size) {
+ out_buf.dst = squashfs_next_page(output);
+ if (out_buf.dst == NULL) {
+ /* shouldn't run out of pages before stream is
+ * done */
+ squashfs_finish_page(output);
+ goto out;
+ }
+ out_buf.pos = 0;
+ out_buf.size = PAGE_SIZE;
+ }
+
+ total_out -= out_buf.pos;
+ zstd_err = ZSTD_decompressStream(stream, &out_buf, &in_buf);
+ total_out += out_buf.pos; /* add the additional data produced */
+
+ if (in_buf.pos == in_buf.size && k < b)
+ put_bh(bh[k++]);
+ } while (zstd_err != 0 && !ZSTD_isError(zstd_err));
+
+ squashfs_finish_page(output);
+
+ if (ZSTD_isError(zstd_err)) {
+ ERROR("zstd decompression error: %d\n",
+ (int)ZSTD_getErrorCode(zstd_err));
+ goto out;
+ }
+
+ if (k < b)
+ goto out;
+
+ return (int)total_out;
+
+out:
+ for (; k < b; k++)
+ put_bh(bh[k]);
+
+ return -EIO;
+}
+
+const struct squashfs_decompressor squashfs_zstd_comp_ops = {
+ .init = zstd_init,
+ .free = zstd_free,
+ .decompress = zstd_uncompress,
+ .id = ZSTD_COMPRESSION,
+ .name = "zstd",
+ .supported = 1
+};

1
contrib/linux-kernel/test/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
*Test

View File

@ -0,0 +1,27 @@
IFLAGS := -isystem include/ -I ../include/ -I ../lib/zstd/ -isystem googletest/googletest/include
SOURCES := $(wildcard ../lib/zstd/*.c)
OBJECTS := $(patsubst %.c,%.o,$(SOURCES))
ARFLAGS := rcs
CXXFLAGS += -std=c++11
CFLAGS += -g -O0
CPPFLAGS += $(IFLAGS)
../lib/zstd/libzstd.a: $(OBJECTS)
$(AR) $(ARFLAGS) $@ $^
UserlandTest: UserlandTest.cpp ../lib/zstd/libzstd.a
$(CXX) $(CXXFLAGS) $(CFLAGS) $(CPPFLAGS) $^ googletest/build/googlemock/gtest/libgtest.a googletest/build/googlemock/gtest/libgtest_main.a -o $@
# Install googletest
.PHONY: googletest
googletest:
@$(RM) -rf googletest
@git clone https://github.com/google/googletest
@mkdir -p googletest/build
@cd googletest/build && cmake .. && $(MAKE)
clean:
$(RM) -f *.{o,a} ../lib/zstd/*.{o,a}

View File

@ -0,0 +1,554 @@
extern "C" {
#include <linux/zstd.h>
}
#include <gtest/gtest.h>
#include <memory>
#include <string>
#include <iostream>
using namespace std;
namespace {
struct WorkspaceDeleter {
void *memory;
template <typename T> void operator()(T const *) { free(memory); }
};
std::unique_ptr<ZSTD_CCtx, WorkspaceDeleter>
createCCtx(ZSTD_compressionParameters cParams) {
size_t const workspaceSize = ZSTD_CCtxWorkspaceBound(cParams);
void *workspace = malloc(workspaceSize);
std::unique_ptr<ZSTD_CCtx, WorkspaceDeleter> cctx{
ZSTD_initCCtx(workspace, workspaceSize), WorkspaceDeleter{workspace}};
if (!cctx) {
throw std::runtime_error{"Bad cctx"};
}
return cctx;
}
std::unique_ptr<ZSTD_CCtx, WorkspaceDeleter>
createCCtx(int level, unsigned long long estimatedSrcSize = 0,
size_t dictSize = 0) {
auto const cParams = ZSTD_getCParams(level, estimatedSrcSize, dictSize);
return createCCtx(cParams);
}
std::unique_ptr<ZSTD_DCtx, WorkspaceDeleter>
createDCtx() {
size_t const workspaceSize = ZSTD_DCtxWorkspaceBound();
void *workspace = malloc(workspaceSize);
std::unique_ptr<ZSTD_DCtx, WorkspaceDeleter> dctx{
ZSTD_initDCtx(workspace, workspaceSize), WorkspaceDeleter{workspace}};
if (!dctx) {
throw std::runtime_error{"Bad dctx"};
}
return dctx;
}
std::unique_ptr<ZSTD_CDict, WorkspaceDeleter>
createCDict(std::string const& dict, ZSTD_parameters params) {
size_t const workspaceSize = ZSTD_CDictWorkspaceBound(params.cParams);
void *workspace = malloc(workspaceSize);
std::unique_ptr<ZSTD_CDict, WorkspaceDeleter> cdict{
ZSTD_initCDict(dict.data(), dict.size(), params, workspace,
workspaceSize),
WorkspaceDeleter{workspace}};
if (!cdict) {
throw std::runtime_error{"Bad cdict"};
}
return cdict;
}
std::unique_ptr<ZSTD_CDict, WorkspaceDeleter>
createCDict(std::string const& dict, int level) {
auto const params = ZSTD_getParams(level, 0, dict.size());
return createCDict(dict, params);
}
std::unique_ptr<ZSTD_DDict, WorkspaceDeleter>
createDDict(std::string const& dict) {
size_t const workspaceSize = ZSTD_DDictWorkspaceBound();
void *workspace = malloc(workspaceSize);
std::unique_ptr<ZSTD_DDict, WorkspaceDeleter> ddict{
ZSTD_initDDict(dict.data(), dict.size(), workspace, workspaceSize),
WorkspaceDeleter{workspace}};
if (!ddict) {
throw std::runtime_error{"Bad ddict"};
}
return ddict;
}
std::unique_ptr<ZSTD_CStream, WorkspaceDeleter>
createCStream(ZSTD_parameters params, unsigned long long pledgedSrcSize = 0) {
size_t const workspaceSize = ZSTD_CStreamWorkspaceBound(params.cParams);
void *workspace = malloc(workspaceSize);
std::unique_ptr<ZSTD_CStream, WorkspaceDeleter> zcs{
ZSTD_initCStream(params, pledgedSrcSize, workspace, workspaceSize)};
if (!zcs) {
throw std::runtime_error{"bad cstream"};
}
return zcs;
}
std::unique_ptr<ZSTD_CStream, WorkspaceDeleter>
createCStream(ZSTD_compressionParameters cParams, ZSTD_CDict const &cdict,
unsigned long long pledgedSrcSize = 0) {
size_t const workspaceSize = ZSTD_CStreamWorkspaceBound(cParams);
void *workspace = malloc(workspaceSize);
std::unique_ptr<ZSTD_CStream, WorkspaceDeleter> zcs{
ZSTD_initCStream_usingCDict(&cdict, pledgedSrcSize, workspace,
workspaceSize)};
if (!zcs) {
throw std::runtime_error{"bad cstream"};
}
return zcs;
}
std::unique_ptr<ZSTD_CStream, WorkspaceDeleter>
createCStream(int level, unsigned long long pledgedSrcSize = 0) {
auto const params = ZSTD_getParams(level, pledgedSrcSize, 0);
return createCStream(params, pledgedSrcSize);
}
std::unique_ptr<ZSTD_DStream, WorkspaceDeleter>
createDStream(size_t maxWindowSize = (1ULL << ZSTD_WINDOWLOG_MAX),
ZSTD_DDict const *ddict = nullptr) {
size_t const workspaceSize = ZSTD_DStreamWorkspaceBound(maxWindowSize);
void *workspace = malloc(workspaceSize);
std::unique_ptr<ZSTD_DStream, WorkspaceDeleter> zds{
ddict == nullptr
? ZSTD_initDStream(maxWindowSize, workspace, workspaceSize)
: ZSTD_initDStream_usingDDict(maxWindowSize, ddict, workspace,
workspaceSize)};
if (!zds) {
throw std::runtime_error{"bad dstream"};
}
return zds;
}
std::string compress(ZSTD_CCtx &cctx, std::string const &data,
ZSTD_parameters params, std::string const &dict = "") {
std::string compressed;
compressed.resize(ZSTD_compressBound(data.size()));
size_t const rc =
dict.empty()
? ZSTD_compressCCtx(&cctx, &compressed[0], compressed.size(),
data.data(), data.size(), params)
: ZSTD_compress_usingDict(&cctx, &compressed[0], compressed.size(),
data.data(), data.size(), dict.data(),
dict.size(), params);
if (ZSTD_isError(rc)) {
throw std::runtime_error{"compression error"};
}
compressed.resize(rc);
return compressed;
}
std::string compress(ZSTD_CCtx& cctx, std::string const& data, int level, std::string const& dict = "") {
auto const params = ZSTD_getParams(level, 0, dict.size());
return compress(cctx, data, params, dict);
}
std::string decompress(ZSTD_DCtx& dctx, std::string const& compressed, size_t decompressedSize, std::string const& dict = "") {
std::string decompressed;
decompressed.resize(decompressedSize);
size_t const rc =
dict.empty()
? ZSTD_decompressDCtx(&dctx, &decompressed[0], decompressed.size(),
compressed.data(), compressed.size())
: ZSTD_decompress_usingDict(
&dctx, &decompressed[0], decompressed.size(), compressed.data(),
compressed.size(), dict.data(), dict.size());
if (ZSTD_isError(rc)) {
throw std::runtime_error{"decompression error"};
}
decompressed.resize(rc);
return decompressed;
}
std::string compress(ZSTD_CCtx& cctx, std::string const& data, ZSTD_CDict& cdict) {
std::string compressed;
compressed.resize(ZSTD_compressBound(data.size()));
size_t const rc =
ZSTD_compress_usingCDict(&cctx, &compressed[0], compressed.size(),
data.data(), data.size(), &cdict);
if (ZSTD_isError(rc)) {
throw std::runtime_error{"compression error"};
}
compressed.resize(rc);
return compressed;
}
std::string decompress(ZSTD_DCtx& dctx, std::string const& compressed, size_t decompressedSize, ZSTD_DDict& ddict) {
std::string decompressed;
decompressed.resize(decompressedSize);
size_t const rc =
ZSTD_decompress_usingDDict(&dctx, &decompressed[0], decompressed.size(),
compressed.data(), compressed.size(), &ddict);
if (ZSTD_isError(rc)) {
throw std::runtime_error{"decompression error"};
}
decompressed.resize(rc);
return decompressed;
}
std::string compress(ZSTD_CStream& zcs, std::string const& data) {
std::string compressed;
compressed.resize(ZSTD_compressBound(data.size()));
ZSTD_inBuffer in = {data.data(), data.size(), 0};
ZSTD_outBuffer out = {&compressed[0], compressed.size(), 0};
while (in.pos != in.size) {
size_t const rc = ZSTD_compressStream(&zcs, &out, &in);
if (ZSTD_isError(rc)) {
throw std::runtime_error{"compress stream failed"};
}
}
size_t const rc = ZSTD_endStream(&zcs, &out);
if (rc != 0) {
throw std::runtime_error{"compress end failed"};
}
compressed.resize(out.pos);
return compressed;
}
std::string decompress(ZSTD_DStream &zds, std::string const &compressed,
size_t decompressedSize) {
std::string decompressed;
decompressed.resize(decompressedSize);
ZSTD_inBuffer in = {compressed.data(), compressed.size(), 0};
ZSTD_outBuffer out = {&decompressed[0], decompressed.size(), 0};
while (in.pos != in.size) {
size_t const rc = ZSTD_decompressStream(&zds, &out, &in);
if (ZSTD_isError(rc)) {
throw std::runtime_error{"decompress stream failed"};
}
}
decompressed.resize(out.pos);
return decompressed;
}
std::string makeData(size_t size) {
std::string result;
result.reserve(size + 20);
while (result.size() < size) {
result += "Hello world";
}
return result;
}
std::string const kData = "Hello world";
std::string const kPlainDict = makeData(10000);
std::string const kZstdDict{
"\x37\xA4\x30\xEC\x99\x69\x58\x1C\x21\x10\xD8\x4A\x84\x01\xCC\xF3"
"\x3C\xCF\x9B\x25\xBB\xC9\x6E\xB2\x9B\xEC\x26\xAD\xCF\xDF\x4E\xCD"
"\xF3\x2C\x3A\x21\x84\x10\x42\x08\x21\x01\x33\xF1\x78\x3C\x1E\x8F"
"\xC7\xE3\xF1\x78\x3C\xCF\xF3\xBC\xF7\xD4\x42\x41\x41\x41\x41\x41"
"\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41"
"\x41\x41\x41\x41\xA1\x50\x28\x14\x0A\x85\x42\xA1\x50\x28\x14\x0A"
"\x85\xA2\x28\x8A\xA2\x28\x4A\x29\x7D\x74\xE1\xE1\xE1\xE1\xE1\xE1"
"\xE1\xE1\xE1\xE1\xE1\xE1\xE1\xE1\xE1\xE1\xE1\xE1\xE1\xF1\x78\x3C"
"\x1E\x8F\xC7\xE3\xF1\x78\x9E\xE7\x79\xEF\x01\x01\x00\x00\x00\x04"
"\x00\x00\x00\x08\x00\x00\x00"
"0123456789",
161};
}
TEST(Block, CCtx) {
auto cctx = createCCtx(1);
auto const compressed = compress(*cctx, kData, 1);
auto dctx = createDCtx();
auto const decompressed = decompress(*dctx, compressed, kData.size());
EXPECT_EQ(kData, decompressed);
}
TEST(Block, NoContentSize) {
auto cctx = createCCtx(1);
auto const c = compress(*cctx, kData, 1);
auto const size = ZSTD_findDecompressedSize(c.data(), c.size());
EXPECT_EQ(ZSTD_CONTENTSIZE_UNKNOWN, size);
}
TEST(Block, ContentSize) {
auto cctx = createCCtx(1);
auto params = ZSTD_getParams(1, 0, 0);
params.fParams.contentSizeFlag = 1;
auto const c = compress(*cctx, kData, params);
auto const size = ZSTD_findDecompressedSize(c.data(), c.size());
EXPECT_EQ(kData.size(), size);
}
TEST(Block, CCtxLevelIncrease) {
std::string c;
auto cctx = createCCtx(6);
auto dctx = createDCtx();
for (int level = 1; level <= 6; ++level) {
auto compressed = compress(*cctx, kData, level);
auto const decompressed = decompress(*dctx, compressed, kData.size());
EXPECT_EQ(kData, decompressed);
}
}
TEST(Block, PlainDict) {
auto cctx = createCCtx(1);
auto const compressed = compress(*cctx, kData, 1, kPlainDict);
auto dctx = createDCtx();
EXPECT_ANY_THROW(decompress(*dctx, compressed, kData.size()));
auto const decompressed =
decompress(*dctx, compressed, kData.size(), kPlainDict);
EXPECT_EQ(kData, decompressed);
}
TEST(Block, ZstdDict) {
auto cctx = createCCtx(1);
auto const compressed = compress(*cctx, kData, 1, kZstdDict);
auto dctx = createDCtx();
EXPECT_ANY_THROW(decompress(*dctx, compressed, kData.size()));
auto const decompressed =
decompress(*dctx, compressed, kData.size(), kZstdDict);
EXPECT_EQ(kData, decompressed);
}
TEST(Block, PreprocessedPlainDict) {
auto cctx = createCCtx(1);
auto const cdict = createCDict(kPlainDict, 1);
auto const compressed = compress(*cctx, kData, *cdict);
auto dctx = createDCtx();
auto const ddict = createDDict(kPlainDict);
EXPECT_ANY_THROW(decompress(*dctx, compressed, kData.size()));
auto const decompressed =
decompress(*dctx, compressed, kData.size(), *ddict);
EXPECT_EQ(kData, decompressed);
}
TEST(Block, PreprocessedZstdDict) {
auto cctx = createCCtx(1);
auto const cdict = createCDict(kZstdDict, 1);
auto const compressed = compress(*cctx, kData, *cdict);
auto dctx = createDCtx();
auto const ddict = createDDict(kZstdDict);
EXPECT_ANY_THROW(decompress(*dctx, compressed, kData.size()));
auto const decompressed =
decompress(*dctx, compressed, kData.size(), *ddict);
EXPECT_EQ(kData, decompressed);
}
TEST(Block, ReinitializeCCtx) {
auto cctx = createCCtx(1);
{
auto const compressed = compress(*cctx, kData, 1);
auto dctx = createDCtx();
auto const decompressed = decompress(*dctx, compressed, kData.size());
EXPECT_EQ(kData, decompressed);
}
// Create the cctx with the same memory
auto d = cctx.get_deleter();
auto raw = cctx.release();
auto params = ZSTD_getParams(1, 0, 0);
cctx.reset(
ZSTD_initCCtx(d.memory, ZSTD_CCtxWorkspaceBound(params.cParams)));
// Repeat
{
auto const compressed = compress(*cctx, kData, 1);
auto dctx = createDCtx();
auto const decompressed = decompress(*dctx, compressed, kData.size());
EXPECT_EQ(kData, decompressed);
}
}
TEST(Block, ReinitializeDCtx) {
auto dctx = createDCtx();
{
auto cctx = createCCtx(1);
auto const compressed = compress(*cctx, kData, 1);
auto const decompressed = decompress(*dctx, compressed, kData.size());
EXPECT_EQ(kData, decompressed);
}
// Create the cctx with the same memory
auto d = dctx.get_deleter();
auto raw = dctx.release();
dctx.reset(ZSTD_initDCtx(d.memory, ZSTD_DCtxWorkspaceBound()));
// Repeat
{
auto cctx = createCCtx(1);
auto const compressed = compress(*cctx, kData, 1);
auto dctx = createDCtx();
auto const decompressed = decompress(*dctx, compressed, kData.size());
EXPECT_EQ(kData, decompressed);
}
}
TEST(Stream, Basic) {
auto zcs = createCStream(1);
auto const compressed = compress(*zcs, kData);
auto zds = createDStream();
auto const decompressed = decompress(*zds, compressed, kData.size());
EXPECT_EQ(kData, decompressed);
}
TEST(Stream, PlainDict) {
auto params = ZSTD_getParams(1, kData.size(), kPlainDict.size());
params.cParams.windowLog = 17;
auto cdict = createCDict(kPlainDict, params);
auto zcs = createCStream(params.cParams, *cdict, kData.size());
auto const compressed = compress(*zcs, kData);
auto const contentSize =
ZSTD_findDecompressedSize(compressed.data(), compressed.size());
EXPECT_ANY_THROW(decompress(*createDStream(), compressed, kData.size()));
auto ddict = createDDict(kPlainDict);
auto zds = createDStream(1 << 17, ddict.get());
auto const decompressed = decompress(*zds, compressed, kData.size());
EXPECT_EQ(kData, decompressed);
}
TEST(Stream, ZstdDict) {
auto params = ZSTD_getParams(1, 0, 0);
params.cParams.windowLog = 17;
auto cdict = createCDict(kZstdDict, 1);
auto zcs = createCStream(params.cParams, *cdict);
auto const compressed = compress(*zcs, kData);
EXPECT_ANY_THROW(decompress(*createDStream(), compressed, kData.size()));
auto ddict = createDDict(kZstdDict);
auto zds = createDStream(1 << 17, ddict.get());
auto const decompressed = decompress(*zds, compressed, kData.size());
EXPECT_EQ(kData, decompressed);
}
TEST(Stream, ResetCStream) {
auto zcs = createCStream(1);
auto zds = createDStream();
{
auto const compressed = compress(*zcs, kData);
auto const decompressed = decompress(*zds, compressed, kData.size());
EXPECT_EQ(kData, decompressed);
}
{
ZSTD_resetCStream(zcs.get(), 0);
auto const compressed = compress(*zcs, kData);
auto const decompressed = decompress(*zds, compressed, kData.size());
EXPECT_EQ(kData, decompressed);
}
}
TEST(Stream, ResetDStream) {
auto zcs = createCStream(1);
auto zds = createDStream();
auto const compressed = compress(*zcs, kData);
EXPECT_ANY_THROW(decompress(*zds, kData, kData.size()));
EXPECT_ANY_THROW(decompress(*zds, compressed, kData.size()));
ZSTD_resetDStream(zds.get());
auto const decompressed = decompress(*zds, compressed, kData.size());
EXPECT_EQ(kData, decompressed);
}
TEST(Stream, Flush) {
auto zcs = createCStream(1);
auto zds = createDStream();
std::string compressed;
{
compressed.resize(ZSTD_compressBound(kData.size()));
ZSTD_inBuffer in = {kData.data(), kData.size(), 0};
ZSTD_outBuffer out = {&compressed[0], compressed.size(), 0};
while (in.pos != in.size) {
size_t const rc = ZSTD_compressStream(zcs.get(), &out, &in);
if (ZSTD_isError(rc)) {
throw std::runtime_error{"compress stream failed"};
}
}
EXPECT_EQ(0, out.pos);
size_t const rc = ZSTD_flushStream(zcs.get(), &out);
if (rc != 0) {
throw std::runtime_error{"compress end failed"};
}
compressed.resize(out.pos);
EXPECT_LT(0, out.pos);
}
std::string decompressed;
{
decompressed.resize(kData.size());
ZSTD_inBuffer in = {compressed.data(), compressed.size(), 0};
ZSTD_outBuffer out = {&decompressed[0], decompressed.size(), 0};
while (in.pos != in.size) {
size_t const rc = ZSTD_decompressStream(zds.get(), &out, &in);
if (ZSTD_isError(rc)) {
throw std::runtime_error{"decompress stream failed"};
}
}
}
EXPECT_EQ(kData, decompressed);
}
#define TEST_SYMBOL(symbol) \
do { \
extern void *__##symbol; \
EXPECT_NE((void *)0, __##symbol); \
} while (0)
TEST(API, Symbols) {
TEST_SYMBOL(ZSTD_CCtxWorkspaceBound);
TEST_SYMBOL(ZSTD_initCCtx);
TEST_SYMBOL(ZSTD_compressCCtx);
TEST_SYMBOL(ZSTD_compress_usingDict);
TEST_SYMBOL(ZSTD_DCtxWorkspaceBound);
TEST_SYMBOL(ZSTD_initDCtx);
TEST_SYMBOL(ZSTD_decompressDCtx);
TEST_SYMBOL(ZSTD_decompress_usingDict);
TEST_SYMBOL(ZSTD_CDictWorkspaceBound);
TEST_SYMBOL(ZSTD_initCDict);
TEST_SYMBOL(ZSTD_compress_usingCDict);
TEST_SYMBOL(ZSTD_DDictWorkspaceBound);
TEST_SYMBOL(ZSTD_initDDict);
TEST_SYMBOL(ZSTD_decompress_usingDDict);
TEST_SYMBOL(ZSTD_CStreamWorkspaceBound);
TEST_SYMBOL(ZSTD_initCStream);
TEST_SYMBOL(ZSTD_initCStream_usingCDict);
TEST_SYMBOL(ZSTD_resetCStream);
TEST_SYMBOL(ZSTD_compressStream);
TEST_SYMBOL(ZSTD_flushStream);
TEST_SYMBOL(ZSTD_endStream);
TEST_SYMBOL(ZSTD_CStreamInSize);
TEST_SYMBOL(ZSTD_CStreamOutSize);
TEST_SYMBOL(ZSTD_DStreamWorkspaceBound);
TEST_SYMBOL(ZSTD_initDStream);
TEST_SYMBOL(ZSTD_initDStream_usingDDict);
TEST_SYMBOL(ZSTD_resetDStream);
TEST_SYMBOL(ZSTD_decompressStream);
TEST_SYMBOL(ZSTD_DStreamInSize);
TEST_SYMBOL(ZSTD_DStreamOutSize);
TEST_SYMBOL(ZSTD_findFrameCompressedSize);
TEST_SYMBOL(ZSTD_getFrameContentSize);
TEST_SYMBOL(ZSTD_findDecompressedSize);
TEST_SYMBOL(ZSTD_getCParams);
TEST_SYMBOL(ZSTD_getParams);
TEST_SYMBOL(ZSTD_checkCParams);
TEST_SYMBOL(ZSTD_adjustCParams);
TEST_SYMBOL(ZSTD_isFrame);
TEST_SYMBOL(ZSTD_getDictID_fromDict);
TEST_SYMBOL(ZSTD_getDictID_fromDDict);
TEST_SYMBOL(ZSTD_getDictID_fromFrame);
TEST_SYMBOL(ZSTD_compressBegin);
TEST_SYMBOL(ZSTD_compressBegin_usingDict);
TEST_SYMBOL(ZSTD_compressBegin_advanced);
TEST_SYMBOL(ZSTD_copyCCtx);
TEST_SYMBOL(ZSTD_compressBegin_usingCDict);
TEST_SYMBOL(ZSTD_compressContinue);
TEST_SYMBOL(ZSTD_compressEnd);
TEST_SYMBOL(ZSTD_getFrameParams);
TEST_SYMBOL(ZSTD_decompressBegin);
TEST_SYMBOL(ZSTD_decompressBegin_usingDict);
TEST_SYMBOL(ZSTD_copyDCtx);
TEST_SYMBOL(ZSTD_nextSrcSizeToDecompress);
TEST_SYMBOL(ZSTD_decompressContinue);
TEST_SYMBOL(ZSTD_nextInputType);
TEST_SYMBOL(ZSTD_getBlockSizeMax);
TEST_SYMBOL(ZSTD_compressBlock);
TEST_SYMBOL(ZSTD_decompressBlock);
TEST_SYMBOL(ZSTD_insertBlock);
}

View File

@ -0,0 +1,177 @@
#ifndef ASM_UNALIGNED_H
#define ASM_UNALIGNED_H
#include <assert.h>
#include <linux/string.h>
#include <linux/types.h>
#define _LITTLE_ENDIAN 1
static unsigned _isLittleEndian(void)
{
const union { uint32_t u; uint8_t c[4]; } one = { 1 };
assert(_LITTLE_ENDIAN == one.c[0]);
return _LITTLE_ENDIAN;
}
static uint16_t _swap16(uint16_t in)
{
return ((in & 0xF) << 8) + ((in & 0xF0) >> 8);
}
static uint32_t _swap32(uint32_t in)
{
return __builtin_bswap32(in);
}
static uint64_t _swap64(uint64_t in)
{
return __builtin_bswap64(in);
}
/* Little endian */
static uint16_t get_unaligned_le16(const void* memPtr)
{
uint16_t val;
memcpy(&val, memPtr, sizeof(val));
if (!_isLittleEndian()) _swap16(val);
return val;
}
static uint32_t get_unaligned_le32(const void* memPtr)
{
uint32_t val;
memcpy(&val, memPtr, sizeof(val));
if (!_isLittleEndian()) _swap32(val);
return val;
}
static uint64_t get_unaligned_le64(const void* memPtr)
{
uint64_t val;
memcpy(&val, memPtr, sizeof(val));
if (!_isLittleEndian()) _swap64(val);
return val;
}
static void put_unaligned_le16(uint16_t value, void* memPtr)
{
if (!_isLittleEndian()) value = _swap16(value);
memcpy(memPtr, &value, sizeof(value));
}
static void put_unaligned_le32(uint32_t value, void* memPtr)
{
if (!_isLittleEndian()) value = _swap32(value);
memcpy(memPtr, &value, sizeof(value));
}
static void put_unaligned_le64(uint64_t value, void* memPtr)
{
if (!_isLittleEndian()) value = _swap64(value);
memcpy(memPtr, &value, sizeof(value));
}
/* big endian */
static uint32_t get_unaligned_be32(const void* memPtr)
{
uint32_t val;
memcpy(&val, memPtr, sizeof(val));
if (_isLittleEndian()) _swap32(val);
return val;
}
static uint64_t get_unaligned_be64(const void* memPtr)
{
uint64_t val;
memcpy(&val, memPtr, sizeof(val));
if (_isLittleEndian()) _swap64(val);
return val;
}
static void put_unaligned_be32(uint32_t value, void* memPtr)
{
if (_isLittleEndian()) value = _swap32(value);
memcpy(memPtr, &value, sizeof(value));
}
static void put_unaligned_be64(uint64_t value, void* memPtr)
{
if (_isLittleEndian()) value = _swap64(value);
memcpy(memPtr, &value, sizeof(value));
}
/* generic */
extern void __bad_unaligned_access_size(void);
#define __get_unaligned_le(ptr) ((typeof(*(ptr)))({ \
__builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
__builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \
__builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \
__builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \
__bad_unaligned_access_size())))); \
}))
#define __get_unaligned_be(ptr) ((typeof(*(ptr)))({ \
__builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
__builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \
__builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \
__builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \
__bad_unaligned_access_size())))); \
}))
#define __put_unaligned_le(val, ptr) \
({ \
void *__gu_p = (ptr); \
switch (sizeof(*(ptr))) { \
case 1: \
*(uint8_t *)__gu_p = (uint8_t)(val); \
break; \
case 2: \
put_unaligned_le16((uint16_t)(val), __gu_p); \
break; \
case 4: \
put_unaligned_le32((uint32_t)(val), __gu_p); \
break; \
case 8: \
put_unaligned_le64((uint64_t)(val), __gu_p); \
break; \
default: \
__bad_unaligned_access_size(); \
break; \
} \
(void)0; \
})
#define __put_unaligned_be(val, ptr) \
({ \
void *__gu_p = (ptr); \
switch (sizeof(*(ptr))) { \
case 1: \
*(uint8_t *)__gu_p = (uint8_t)(val); \
break; \
case 2: \
put_unaligned_be16((uint16_t)(val), __gu_p); \
break; \
case 4: \
put_unaligned_be32((uint32_t)(val), __gu_p); \
break; \
case 8: \
put_unaligned_be64((uint64_t)(val), __gu_p); \
break; \
default: \
__bad_unaligned_access_size(); \
break; \
} \
(void)0; \
})
#if _LITTLE_ENDIAN
# define get_unaligned __get_unaligned_le
# define put_unaligned __put_unaligned_le
#else
# define get_unaligned __get_unaligned_be
# define put_unaligned __put_unaligned_be
#endif
#endif // ASM_UNALIGNED_H

View File

@ -0,0 +1,12 @@
#ifndef LINUX_COMIPLER_H_
#define LINUX_COMIPLER_H_
#ifndef __always_inline
# define __always_inline inline
#endif
#ifndef noinline
# define noinline __attribute__((__noinline__))
#endif
#endif // LINUX_COMIPLER_H_

View File

@ -0,0 +1,14 @@
#ifndef LINUX_KERNEL_H_
#define LINUX_KERNEL_H_
#define ALIGN(x, a) ({ \
typeof(x) const __xe = (x); \
typeof(a) const __ae = (a); \
typeof(a) const __m = __ae - 1; \
typeof(x) const __r = __xe & __m; \
__xe + (__r ? (__ae - __r) : 0); \
})
#define PTR_ALIGN(p, a) (typeof(p))ALIGN((unsigned long long)(p), (a))
#endif // LINUX_KERNEL_H_

View File

@ -0,0 +1,10 @@
#ifndef LINUX_MODULE_H_
#define LINUX_MODULE_H_
#define EXPORT_SYMBOL(symbol) \
void* __##symbol = symbol
#define MODULE_LICENSE(license) static char const *const LICENSE = license
#define MODULE_DESCRIPTION(description) \
static char const *const DESCRIPTION = description
#endif // LINUX_MODULE_H_

View File

@ -0,0 +1 @@
#include <string.h>

View File

@ -0,0 +1,2 @@
#include <stddef.h>
#include <stdint.h>

View File

@ -15,7 +15,7 @@ libzstd_includes = [include_directories(common_dir, dictbuilder_dir, compress_di
if get_option('legacy_support')
message('Enabling legacy support')
libzstd_cflags = ['-DZSTD_LEGACY_SUPPORT=1']
libzstd_cflags = ['-DZSTD_LEGACY_SUPPORT=4']
legacy_dir = join_paths(lib_dir, 'legacy')
libzstd_includes += [include_directories(legacy_dir)]

View File

@ -91,7 +91,7 @@ void usage() {
std::fprintf(stderr, " -# : # compression level (1-%d, default:%d)\n", kMaxNonUltraCompressionLevel, kDefaultCompressionLevel);
std::fprintf(stderr, " -d, --decompress : decompression\n");
std::fprintf(stderr, " -o file : result stored into `file` (only if 1 input file)\n");
std::fprintf(stderr, " -f, --force : overwrite output without prompting\n");
std::fprintf(stderr, " -f, --force : overwrite output without prompting, (de)compress links\n");
std::fprintf(stderr, " --rm : remove source file(s) after successful (de)compression\n");
std::fprintf(stderr, " -k, --keep : preserve source file(s) (default)\n");
std::fprintf(stderr, " -h, --help : display help and exit\n");
@ -121,6 +121,7 @@ Options::Status Options::parse(int argc, const char **argv) {
bool recursive = false;
bool ultra = false;
bool forceStdout = false;
bool followLinks = false;
// Local copy of input files, which are pointers into argv.
std::vector<const char *> localInputFiles;
for (int i = 1; i < argc; ++i) {
@ -255,6 +256,7 @@ Options::Status Options::parse(int argc, const char **argv) {
case 'f':
overwrite = true;
forceStdout = true;
followLinks = true;
break;
case 't':
test = true;
@ -328,13 +330,29 @@ Options::Status Options::parse(int argc, const char **argv) {
}
}
g_utilDisplayLevel = verbosity;
// Remove local input files that are symbolic links
if (!followLinks) {
std::remove_if(localInputFiles.begin(), localInputFiles.end(),
[&](const char *path) {
bool isLink = UTIL_isLink(path);
if (isLink && verbosity >= 2) {
std::fprintf(
stderr,
"Warning : %s is symbolic link, ignoring\n",
path);
}
return isLink;
});
}
// Translate input files/directories into files to (de)compress
if (recursive) {
char *scratchBuffer = nullptr;
unsigned numFiles = 0;
const char **files =
UTIL_createFileList(localInputFiles.data(), localInputFiles.size(),
&scratchBuffer, &numFiles);
&scratchBuffer, &numFiles, followLinks);
if (files == nullptr) {
std::fprintf(stderr, "Error traversing directories\n");
return Status::Failure;

View File

@ -10,6 +10,7 @@
#include <gtest/gtest.h>
#include <atomic>
#include <iostream>
#include <thread>
#include <vector>
@ -34,16 +35,19 @@ TEST(ThreadPool, AllJobsFinished) {
std::atomic<unsigned> numFinished{0};
std::atomic<bool> start{false};
{
std::cerr << "Creating executor" << std::endl;
ThreadPool executor(5);
for (int i = 0; i < 10; ++i) {
executor.add([ &numFinished, &start ] {
while (!start.load()) {
// spin
std::this_thread::yield();
}
++numFinished;
});
}
std::cerr << "Starting" << std::endl;
start.store(true);
std::cerr << "Finishing" << std::endl;
}
EXPECT_EQ(10, numFinished.load());
}

View File

@ -10,6 +10,7 @@
#include "utils/WorkQueue.h"
#include <gtest/gtest.h>
#include <iostream>
#include <memory>
#include <mutex>
#include <thread>
@ -201,11 +202,13 @@ TEST(WorkQueue, BoundedSizeMPMC) {
WorkQueue<int> queue(10);
std::vector<int> results(200, -1);
std::mutex mutex;
std::cerr << "Creating popperThreads" << std::endl;
std::vector<std::thread> popperThreads;
for (int i = 0; i < 4; ++i) {
popperThreads.emplace_back(Popper{&queue, results.data(), &mutex});
}
std::cerr << "Creating pusherThreads" << std::endl;
std::vector<std::thread> pusherThreads;
for (int i = 0; i < 2; ++i) {
auto min = i * 100;
@ -218,15 +221,19 @@ TEST(WorkQueue, BoundedSizeMPMC) {
});
}
std::cerr << "Joining pusherThreads" << std::endl;
for (auto& thread : pusherThreads) {
thread.join();
}
std::cerr << "Finishing queue" << std::endl;
queue.finish();
std::cerr << "Joining popperThreads" << std::endl;
for (auto& thread : popperThreads) {
thread.join();
}
std::cerr << "Inspecting results" << std::endl;
for (int i = 0; i < 200; ++i) {
EXPECT_EQ(i, results[i]);
}

View File

@ -0,0 +1,4 @@
seekable_compression
seekable_decompression
parallel_processing
parallel_compression

View File

@ -0,0 +1,42 @@
# ################################################################
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
# ################################################################
# This Makefile presumes libzstd is built, using `make` in / or /lib/
LDFLAGS += ../../../lib/libzstd.a
CPPFLAGS += -I../ -I../../../lib -I../../../lib/common
CFLAGS ?= -O3
CFLAGS += -g
SEEKABLE_OBJS = ../zstdseek_compress.c ../zstdseek_decompress.c
.PHONY: default all clean test
default: all
all: seekable_compression seekable_decompression parallel_processing
seekable_compression : seekable_compression.c $(SEEKABLE_OBJS)
$(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@
seekable_decompression : seekable_decompression.c $(SEEKABLE_OBJS)
$(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@
parallel_processing : parallel_processing.c $(SEEKABLE_OBJS)
$(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@ -pthread
parallel_compression : parallel_compression.c $(SEEKABLE_OBJS)
$(CC) $(CPPFLAGS) $(CFLAGS) $^ $(LDFLAGS) -o $@ -pthread
clean:
@rm -f core *.o tmp* result* *.zst \
seekable_compression seekable_decompression \
parallel_processing parallel_compression
@echo Cleaning completed

View File

@ -0,0 +1,214 @@
/**
* Copyright 2017-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the license found in the
* LICENSE-examples file in the root directory of this source tree.
*/
#include <stdlib.h> // malloc, free, exit, atoi
#include <stdio.h> // fprintf, perror, feof, fopen, etc.
#include <string.h> // strlen, memset, strcat
#define ZSTD_STATIC_LINKING_ONLY
#include <zstd.h> // presumes zstd library is installed
#include <zstd_errors.h>
#if defined(WIN32) || defined(_WIN32)
# include <windows.h>
# define SLEEP(x) Sleep(x)
#else
# include <unistd.h>
# define SLEEP(x) usleep(x * 1000)
#endif
#define XXH_NAMESPACE ZSTD_
#include "xxhash.h"
#include "pool.h" // use zstd thread pool for demo
#include "zstd_seekable.h"
static void* malloc_orDie(size_t size)
{
void* const buff = malloc(size);
if (buff) return buff;
/* error */
perror("malloc:");
exit(1);
}
static FILE* fopen_orDie(const char *filename, const char *instruction)
{
FILE* const inFile = fopen(filename, instruction);
if (inFile) return inFile;
/* error */
perror(filename);
exit(3);
}
static size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file)
{
size_t const readSize = fread(buffer, 1, sizeToRead, file);
if (readSize == sizeToRead) return readSize; /* good */
if (feof(file)) return readSize; /* good, reached end of file */
/* error */
perror("fread");
exit(4);
}
static size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file)
{
size_t const writtenSize = fwrite(buffer, 1, sizeToWrite, file);
if (writtenSize == sizeToWrite) return sizeToWrite; /* good */
/* error */
perror("fwrite");
exit(5);
}
static size_t fclose_orDie(FILE* file)
{
if (!fclose(file)) return 0;
/* error */
perror("fclose");
exit(6);
}
static void fseek_orDie(FILE* file, long int offset, int origin)
{
if (!fseek(file, offset, origin)) {
if (!fflush(file)) return;
}
/* error */
perror("fseek");
exit(7);
}
static long int ftell_orDie(FILE* file)
{
long int off = ftell(file);
if (off != -1) return off;
/* error */
perror("ftell");
exit(8);
}
struct job {
const void* src;
size_t srcSize;
void* dst;
size_t dstSize;
unsigned checksum;
int compressionLevel;
int done;
};
static void compressFrame(void* opaque)
{
struct job* job = opaque;
job->checksum = XXH64(job->src, job->srcSize, 0);
size_t ret = ZSTD_compress(job->dst, job->dstSize, job->src, job->srcSize, job->compressionLevel);
if (ZSTD_isError(ret)) {
fprintf(stderr, "ZSTD_compress() error : %s \n", ZSTD_getErrorName(ret));
exit(20);
}
job->dstSize = ret;
job->done = 1;
}
static void compressFile_orDie(const char* fname, const char* outName, int cLevel, unsigned frameSize, int nbThreads)
{
POOL_ctx* pool = POOL_create(nbThreads, nbThreads);
if (pool == NULL) { fprintf(stderr, "POOL_create() error \n"); exit(9); }
FILE* const fin = fopen_orDie(fname, "rb");
FILE* const fout = fopen_orDie(outName, "wb");
if (ZSTD_compressBound(frameSize) > 0xFFFFFFFFU) { fprintf(stderr, "Frame size too large \n"); exit(10); }
unsigned dstSize = ZSTD_compressBound(frameSize);
fseek_orDie(fin, 0, SEEK_END);
long int length = ftell_orDie(fin);
fseek_orDie(fin, 0, SEEK_SET);
size_t numFrames = (length + frameSize - 1) / frameSize;
struct job* jobs = malloc_orDie(sizeof(struct job) * numFrames);
size_t i;
for(i = 0; i < numFrames; i++) {
void* in = malloc_orDie(frameSize);
void* out = malloc_orDie(dstSize);
size_t inSize = fread_orDie(in, frameSize, fin);
jobs[i].src = in;
jobs[i].srcSize = inSize;
jobs[i].dst = out;
jobs[i].dstSize = dstSize;
jobs[i].compressionLevel = cLevel;
jobs[i].done = 0;
POOL_add(pool, compressFrame, &jobs[i]);
}
ZSTD_frameLog* fl = ZSTD_seekable_createFrameLog(1);
if (fl == NULL) { fprintf(stderr, "ZSTD_seekable_createFrameLog() failed \n"); exit(11); }
for (i = 0; i < numFrames; i++) {
while (!jobs[i].done) SLEEP(5); /* wake up every 5 milliseconds to check */
fwrite_orDie(jobs[i].dst, jobs[i].dstSize, fout);
free((void*)jobs[i].src);
free(jobs[i].dst);
size_t ret = ZSTD_seekable_logFrame(fl, jobs[i].dstSize, jobs[i].srcSize, jobs[i].checksum);
if (ZSTD_isError(ret)) { fprintf(stderr, "ZSTD_seekable_logFrame() error : %s \n", ZSTD_getErrorName(ret)); }
}
{ unsigned char seekTableBuff[1024];
ZSTD_outBuffer out = {seekTableBuff, 1024, 0};
while (ZSTD_seekable_writeSeekTable(fl, &out) != 0) {
fwrite_orDie(seekTableBuff, out.pos, fout);
out.pos = 0;
}
fwrite_orDie(seekTableBuff, out.pos, fout);
}
ZSTD_seekable_freeFrameLog(fl);
free(jobs);
fclose_orDie(fout);
fclose_orDie(fin);
}
static const char* createOutFilename_orDie(const char* filename)
{
size_t const inL = strlen(filename);
size_t const outL = inL + 5;
void* outSpace = malloc_orDie(outL);
memset(outSpace, 0, outL);
strcat(outSpace, filename);
strcat(outSpace, ".zst");
return (const char*)outSpace;
}
int main(int argc, const char** argv) {
const char* const exeName = argv[0];
if (argc!=4) {
printf("wrong arguments\n");
printf("usage:\n");
printf("%s FILE FRAME_SIZE NB_THREADS\n", exeName);
return 1;
}
{ const char* const inFileName = argv[1];
unsigned const frameSize = (unsigned)atoi(argv[2]);
int const nbThreads = atoi(argv[3]);
const char* const outFileName = createOutFilename_orDie(inFileName);
compressFile_orDie(inFileName, outFileName, 5, frameSize, nbThreads);
}
return 0;
}

View File

@ -0,0 +1,193 @@
/**
* Copyright 2017-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the license found in the
* LICENSE-examples file in the root directory of this source tree.
*/
/*
* A simple demo that sums up all the bytes in the file in parallel using
* seekable decompression and the zstd thread pool
*/
#include <stdlib.h> // malloc, exit
#include <stdio.h> // fprintf, perror, feof
#include <string.h> // strerror
#include <errno.h> // errno
#define ZSTD_STATIC_LINKING_ONLY
#include <zstd.h> // presumes zstd library is installed
#include <zstd_errors.h>
#if defined(WIN32) || defined(_WIN32)
# include <windows.h>
# define SLEEP(x) Sleep(x)
#else
# include <unistd.h>
# define SLEEP(x) usleep(x * 1000)
#endif
#include "pool.h" // use zstd thread pool for demo
#include "zstd_seekable.h"
#define MIN(a, b) ((a) < (b) ? (a) : (b))
static void* malloc_orDie(size_t size)
{
void* const buff = malloc(size);
if (buff) return buff;
/* error */
perror("malloc");
exit(1);
}
static void* realloc_orDie(void* ptr, size_t size)
{
ptr = realloc(ptr, size);
if (ptr) return ptr;
/* error */
perror("realloc");
exit(1);
}
static FILE* fopen_orDie(const char *filename, const char *instruction)
{
FILE* const inFile = fopen(filename, instruction);
if (inFile) return inFile;
/* error */
perror(filename);
exit(3);
}
static size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file)
{
size_t const readSize = fread(buffer, 1, sizeToRead, file);
if (readSize == sizeToRead) return readSize; /* good */
if (feof(file)) return readSize; /* good, reached end of file */
/* error */
perror("fread");
exit(4);
}
static size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file)
{
size_t const writtenSize = fwrite(buffer, 1, sizeToWrite, file);
if (writtenSize == sizeToWrite) return sizeToWrite; /* good */
/* error */
perror("fwrite");
exit(5);
}
static size_t fclose_orDie(FILE* file)
{
if (!fclose(file)) return 0;
/* error */
perror("fclose");
exit(6);
}
static void fseek_orDie(FILE* file, long int offset, int origin) {
if (!fseek(file, offset, origin)) {
if (!fflush(file)) return;
}
/* error */
perror("fseek");
exit(7);
}
struct sum_job {
const char* fname;
unsigned long long sum;
unsigned frameNb;
int done;
};
static void sumFrame(void* opaque)
{
struct sum_job* job = (struct sum_job*)opaque;
job->done = 0;
FILE* const fin = fopen_orDie(job->fname, "rb");
ZSTD_seekable* const seekable = ZSTD_seekable_create();
if (seekable==NULL) { fprintf(stderr, "ZSTD_seekable_create() error \n"); exit(10); }
size_t const initResult = ZSTD_seekable_initFile(seekable, fin);
if (ZSTD_isError(initResult)) { fprintf(stderr, "ZSTD_seekable_init() error : %s \n", ZSTD_getErrorName(initResult)); exit(11); }
size_t const frameSize = ZSTD_seekable_getFrameDecompressedSize(seekable, job->frameNb);
unsigned char* data = malloc_orDie(frameSize);
size_t result = ZSTD_seekable_decompressFrame(seekable, data, frameSize, job->frameNb);
if (ZSTD_isError(result)) { fprintf(stderr, "ZSTD_seekable_decompressFrame() error : %s \n", ZSTD_getErrorName(result)); exit(12); }
unsigned long long sum = 0;
size_t i;
for (i = 0; i < frameSize; i++) {
sum += data[i];
}
job->sum = sum;
job->done = 1;
fclose(fin);
ZSTD_seekable_free(seekable);
free(data);
}
static void sumFile_orDie(const char* fname, int nbThreads)
{
POOL_ctx* pool = POOL_create(nbThreads, nbThreads);
if (pool == NULL) { fprintf(stderr, "POOL_create() error \n"); exit(9); }
FILE* const fin = fopen_orDie(fname, "rb");
ZSTD_seekable* const seekable = ZSTD_seekable_create();
if (seekable==NULL) { fprintf(stderr, "ZSTD_seekable_create() error \n"); exit(10); }
size_t const initResult = ZSTD_seekable_initFile(seekable, fin);
if (ZSTD_isError(initResult)) { fprintf(stderr, "ZSTD_seekable_init() error : %s \n", ZSTD_getErrorName(initResult)); exit(11); }
size_t const numFrames = ZSTD_seekable_getNumFrames(seekable);
struct sum_job* jobs = (struct sum_job*)malloc(numFrames * sizeof(struct sum_job));
size_t i;
for (i = 0; i < numFrames; i++) {
jobs[i] = (struct sum_job){ fname, 0, i, 0 };
POOL_add(pool, sumFrame, &jobs[i]);
}
unsigned long long total = 0;
for (i = 0; i < numFrames; i++) {
while (!jobs[i].done) SLEEP(5); /* wake up every 5 milliseconds to check */
total += jobs[i].sum;
}
printf("Sum: %llu\n", total);
POOL_free(pool);
ZSTD_seekable_free(seekable);
fclose(fin);
free(jobs);
}
int main(int argc, const char** argv)
{
const char* const exeName = argv[0];
if (argc!=3) {
fprintf(stderr, "wrong arguments\n");
fprintf(stderr, "usage:\n");
fprintf(stderr, "%s FILE NB_THREADS\n", exeName);
return 1;
}
{
const char* const inFilename = argv[1];
int const nbThreads = atoi(argv[2]);
sumFile_orDie(inFilename, nbThreads);
}
return 0;
}

View File

@ -0,0 +1,131 @@
/**
* Copyright 2017-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the license found in the
* LICENSE-examples file in the root directory of this source tree.
*/
#include <stdlib.h> // malloc, free, exit, atoi
#include <stdio.h> // fprintf, perror, feof, fopen, etc.
#include <string.h> // strlen, memset, strcat
#define ZSTD_STATIC_LINKING_ONLY
#include <zstd.h> // presumes zstd library is installed
#include "zstd_seekable.h"
static void* malloc_orDie(size_t size)
{
void* const buff = malloc(size);
if (buff) return buff;
/* error */
perror("malloc:");
exit(1);
}
static FILE* fopen_orDie(const char *filename, const char *instruction)
{
FILE* const inFile = fopen(filename, instruction);
if (inFile) return inFile;
/* error */
perror(filename);
exit(3);
}
static size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file)
{
size_t const readSize = fread(buffer, 1, sizeToRead, file);
if (readSize == sizeToRead) return readSize; /* good */
if (feof(file)) return readSize; /* good, reached end of file */
/* error */
perror("fread");
exit(4);
}
static size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file)
{
size_t const writtenSize = fwrite(buffer, 1, sizeToWrite, file);
if (writtenSize == sizeToWrite) return sizeToWrite; /* good */
/* error */
perror("fwrite");
exit(5);
}
static size_t fclose_orDie(FILE* file)
{
if (!fclose(file)) return 0;
/* error */
perror("fclose");
exit(6);
}
static void compressFile_orDie(const char* fname, const char* outName, int cLevel, unsigned frameSize)
{
FILE* const fin = fopen_orDie(fname, "rb");
FILE* const fout = fopen_orDie(outName, "wb");
size_t const buffInSize = ZSTD_CStreamInSize(); /* can always read one full block */
void* const buffIn = malloc_orDie(buffInSize);
size_t const buffOutSize = ZSTD_CStreamOutSize(); /* can always flush a full block */
void* const buffOut = malloc_orDie(buffOutSize);
ZSTD_seekable_CStream* const cstream = ZSTD_seekable_createCStream();
if (cstream==NULL) { fprintf(stderr, "ZSTD_seekable_createCStream() error \n"); exit(10); }
size_t const initResult = ZSTD_seekable_initCStream(cstream, cLevel, 1, frameSize);
if (ZSTD_isError(initResult)) { fprintf(stderr, "ZSTD_seekable_initCStream() error : %s \n", ZSTD_getErrorName(initResult)); exit(11); }
size_t read, toRead = buffInSize;
while( (read = fread_orDie(buffIn, toRead, fin)) ) {
ZSTD_inBuffer input = { buffIn, read, 0 };
while (input.pos < input.size) {
ZSTD_outBuffer output = { buffOut, buffOutSize, 0 };
toRead = ZSTD_seekable_compressStream(cstream, &output , &input); /* toRead is guaranteed to be <= ZSTD_CStreamInSize() */
if (ZSTD_isError(toRead)) { fprintf(stderr, "ZSTD_seekable_compressStream() error : %s \n", ZSTD_getErrorName(toRead)); exit(12); }
if (toRead > buffInSize) toRead = buffInSize; /* Safely handle case when `buffInSize` is manually changed to a value < ZSTD_CStreamInSize()*/
fwrite_orDie(buffOut, output.pos, fout);
}
}
while (1) {
ZSTD_outBuffer output = { buffOut, buffOutSize, 0 };
size_t const remainingToFlush = ZSTD_seekable_endStream(cstream, &output); /* close stream */
if (ZSTD_isError(remainingToFlush)) { fprintf(stderr, "ZSTD_seekable_endStream() error : %s \n", ZSTD_getErrorName(remainingToFlush)); exit(13); }
fwrite_orDie(buffOut, output.pos, fout);
if (!remainingToFlush) break;
}
ZSTD_seekable_freeCStream(cstream);
fclose_orDie(fout);
fclose_orDie(fin);
free(buffIn);
free(buffOut);
}
static const char* createOutFilename_orDie(const char* filename)
{
size_t const inL = strlen(filename);
size_t const outL = inL + 5;
void* outSpace = malloc_orDie(outL);
memset(outSpace, 0, outL);
strcat(outSpace, filename);
strcat(outSpace, ".zst");
return (const char*)outSpace;
}
int main(int argc, const char** argv) {
const char* const exeName = argv[0];
if (argc!=3) {
printf("wrong arguments\n");
printf("usage:\n");
printf("%s FILE FRAME_SIZE\n", exeName);
return 1;
}
{ const char* const inFileName = argv[1];
unsigned const frameSize = (unsigned)atoi(argv[2]);
const char* const outFileName = createOutFilename_orDie(inFileName);
compressFile_orDie(inFileName, outFileName, 5, frameSize);
}
return 0;
}

View File

@ -0,0 +1,137 @@
/**
* Copyright 2016-present, Yann Collet, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the license found in the
* LICENSE-examples file in the root directory of this source tree.
*/
#include <stdlib.h> // malloc, exit
#include <stdio.h> // fprintf, perror, feof
#include <string.h> // strerror
#include <errno.h> // errno
#define ZSTD_STATIC_LINKING_ONLY
#include <zstd.h> // presumes zstd library is installed
#include <zstd_errors.h>
#include "zstd_seekable.h"
#define MIN(a, b) ((a) < (b) ? (a) : (b))
static void* malloc_orDie(size_t size)
{
void* const buff = malloc(size);
if (buff) return buff;
/* error */
perror("malloc");
exit(1);
}
static void* realloc_orDie(void* ptr, size_t size)
{
ptr = realloc(ptr, size);
if (ptr) return ptr;
/* error */
perror("realloc");
exit(1);
}
static FILE* fopen_orDie(const char *filename, const char *instruction)
{
FILE* const inFile = fopen(filename, instruction);
if (inFile) return inFile;
/* error */
perror(filename);
exit(3);
}
static size_t fread_orDie(void* buffer, size_t sizeToRead, FILE* file)
{
size_t const readSize = fread(buffer, 1, sizeToRead, file);
if (readSize == sizeToRead) return readSize; /* good */
if (feof(file)) return readSize; /* good, reached end of file */
/* error */
perror("fread");
exit(4);
}
static size_t fwrite_orDie(const void* buffer, size_t sizeToWrite, FILE* file)
{
size_t const writtenSize = fwrite(buffer, 1, sizeToWrite, file);
if (writtenSize == sizeToWrite) return sizeToWrite; /* good */
/* error */
perror("fwrite");
exit(5);
}
static size_t fclose_orDie(FILE* file)
{
if (!fclose(file)) return 0;
/* error */
perror("fclose");
exit(6);
}
static void fseek_orDie(FILE* file, long int offset, int origin) {
if (!fseek(file, offset, origin)) {
if (!fflush(file)) return;
}
/* error */
perror("fseek");
exit(7);
}
static void decompressFile_orDie(const char* fname, unsigned startOffset, unsigned endOffset)
{
FILE* const fin = fopen_orDie(fname, "rb");
FILE* const fout = stdout;
size_t const buffOutSize = ZSTD_DStreamOutSize(); /* Guarantee to successfully flush at least one complete compressed block in all circumstances. */
void* const buffOut = malloc_orDie(buffOutSize);
ZSTD_seekable* const seekable = ZSTD_seekable_create();
if (seekable==NULL) { fprintf(stderr, "ZSTD_seekable_create() error \n"); exit(10); }
size_t const initResult = ZSTD_seekable_initFile(seekable, fin);
if (ZSTD_isError(initResult)) { fprintf(stderr, "ZSTD_seekable_init() error : %s \n", ZSTD_getErrorName(initResult)); exit(11); }
while (startOffset < endOffset) {
size_t const result = ZSTD_seekable_decompress(seekable, buffOut, MIN(endOffset - startOffset, buffOutSize), startOffset);
if (ZSTD_isError(result)) {
fprintf(stderr, "ZSTD_seekable_decompress() error : %s \n",
ZSTD_getErrorName(result));
exit(12);
}
fwrite_orDie(buffOut, result, fout);
startOffset += result;
}
ZSTD_seekable_free(seekable);
fclose_orDie(fin);
fclose_orDie(fout);
free(buffOut);
}
int main(int argc, const char** argv)
{
const char* const exeName = argv[0];
if (argc!=4) {
fprintf(stderr, "wrong arguments\n");
fprintf(stderr, "usage:\n");
fprintf(stderr, "%s FILE START END\n", exeName);
return 1;
}
{
const char* const inFilename = argv[1];
unsigned const startOffset = (unsigned) atoi(argv[2]);
unsigned const endOffset = (unsigned) atoi(argv[3]);
decompressFile_orDie(inFilename, startOffset, endOffset);
}
return 0;
}

View File

@ -0,0 +1,184 @@
#ifndef SEEKABLE_H
#define SEEKABLE_H
#if defined (__cplusplus)
extern "C" {
#endif
#include <stdio.h>
static const unsigned ZSTD_seekTableFooterSize = 9;
#define ZSTD_SEEKABLE_MAGICNUMBER 0x8F92EAB1
#define ZSTD_SEEKABLE_MAXFRAMES 0x8000000U
/* Limit the maximum size to avoid any potential issues storing the compressed size */
#define ZSTD_SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE 0x80000000U
/*-****************************************************************************
* Seekable Format
*
* The seekable format splits the compressed data into a series of "frames",
* each compressed individually so that decompression of a section in the
* middle of an archive only requires zstd to decompress at most a frame's
* worth of extra data, instead of the entire archive.
******************************************************************************/
typedef struct ZSTD_seekable_CStream_s ZSTD_seekable_CStream;
typedef struct ZSTD_seekable_s ZSTD_seekable;
/*-****************************************************************************
* Seekable compression - HowTo
* A ZSTD_seekable_CStream object is required to tracking streaming operation.
* Use ZSTD_seekable_createCStream() and ZSTD_seekable_freeCStream() to create/
* release resources.
*
* Streaming objects are reusable to avoid allocation and deallocation,
* to start a new compression operation call ZSTD_seekable_initCStream() on the
* compressor.
*
* Data streamed to the seekable compressor will automatically be split into
* frames of size `maxFrameSize` (provided in ZSTD_seekable_initCStream()),
* or if none is provided, will be cut off whenever ZSTD_seekable_endFrame() is
* called or when the default maximum frame size (2GB) is reached.
*
* Use ZSTD_seekable_initCStream() to initialize a ZSTD_seekable_CStream object
* for a new compression operation.
* `maxFrameSize` indicates the size at which to automatically start a new
* seekable frame. `maxFrameSize == 0` implies the default maximum size.
* `checksumFlag` indicates whether or not the seek table should include frame
* checksums on the uncompressed data for verification.
* @return : a size hint for input to provide for compression, or an error code
* checkable with ZSTD_isError()
*
* Use ZSTD_seekable_compressStream() repetitively to consume input stream.
* The function will automatically update both `pos` fields.
* Note that it may not consume the entire input, in which case `pos < size`,
* and it's up to the caller to present again remaining data.
* @return : a size hint, preferred nb of bytes to use as input for next
* function call or an error code, which can be tested using
* ZSTD_isError().
* Note 1 : it's just a hint, to help latency a little, any other
* value will work fine.
*
* At any time, call ZSTD_seekable_endFrame() to end the current frame and
* start a new one.
*
* ZSTD_seekable_endStream() will end the current frame, and then write the seek
* table so that decompressors can efficiently find compressed frames.
* ZSTD_seekable_endStream() may return a number > 0 if it was unable to flush
* all the necessary data to `output`. In this case, it should be called again
* until all remaining data is flushed out and 0 is returned.
******************************************************************************/
/*===== Seekable compressor management =====*/
ZSTDLIB_API ZSTD_seekable_CStream* ZSTD_seekable_createCStream(void);
ZSTDLIB_API size_t ZSTD_seekable_freeCStream(ZSTD_seekable_CStream* zcs);
/*===== Seekable compression functions =====*/
ZSTDLIB_API size_t ZSTD_seekable_initCStream(ZSTD_seekable_CStream* zcs, int compressionLevel, int checksumFlag, unsigned maxFrameSize);
ZSTDLIB_API size_t ZSTD_seekable_compressStream(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
ZSTDLIB_API size_t ZSTD_seekable_endFrame(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output);
ZSTDLIB_API size_t ZSTD_seekable_endStream(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output);
/*= Raw seek table API
* These functions allow for the seek table to be constructed directly.
* This table can then be appended to a file of concatenated frames.
* This allows the frames to be compressed independently, even in parallel,
* and compiled together afterward into a seekable archive.
*
* Use ZSTD_seekable_createFrameLog() to allocate and initialize a tracking
* structure.
*
* Call ZSTD_seekable_logFrame() once for each frame in the archive.
* checksum is optional, and will not be used if checksumFlag was 0 when the
* frame log was created. If present, it should be the least significant 32
* bits of the XXH64 hash of the uncompressed data.
*
* Call ZSTD_seekable_writeSeekTable to serialize the data into a seek table.
* If the entire table was written, the return value will be 0. Otherwise,
* it will be equal to the number of bytes left to write. */
typedef struct ZSTD_frameLog_s ZSTD_frameLog;
ZSTDLIB_API ZSTD_frameLog* ZSTD_seekable_createFrameLog(int checksumFlag);
ZSTDLIB_API size_t ZSTD_seekable_freeFrameLog(ZSTD_frameLog* fl);
ZSTDLIB_API size_t ZSTD_seekable_logFrame(ZSTD_frameLog* fl, unsigned compressedSize, unsigned decompressedSize, unsigned checksum);
ZSTDLIB_API size_t ZSTD_seekable_writeSeekTable(ZSTD_frameLog* fl, ZSTD_outBuffer* output);
/*-****************************************************************************
* Seekable decompression - HowTo
* A ZSTD_seekable object is required to tracking the seekTable.
*
* Call ZSTD_seekable_init* to initialize a ZSTD_seekable object with the
* the seek table provided in the input.
* There are three modes for ZSTD_seekable_init:
* - ZSTD_seekable_initBuff() : An in-memory API. The data contained in
* `src` should be the entire seekable file, including the seek table.
* `src` should be kept alive and unmodified until the ZSTD_seekable object
* is freed or reset.
* - ZSTD_seekable_initFile() : A simplified file API using stdio. fread and
* fseek will be used to access the required data for building the seek
* table and doing decompression operations. `src` should not be closed
* or modified until the ZSTD_seekable object is freed or reset.
* - ZSTD_seekable_initAdvanced() : A general API allowing the client to
* provide its own read and seek callbacks.
* + ZSTD_seekable_read() : read exactly `n` bytes into `buffer`.
* Premature EOF should be treated as an error.
* + ZSTD_seekable_seek() : seek the read head to `offset` from `origin`,
* where origin is either SEEK_SET (beginning of
* file), or SEEK_END (end of file).
* Both functions should return a non-negative value in case of success, and a
* negative value in case of failure. If implementing using this API and
* stdio, be careful with files larger than 4GB and fseek. All of these
* functions return an error code checkable with ZSTD_isError().
*
* Call ZSTD_seekable_decompress to decompress `dstSize` bytes at decompressed
* offset `offset`. ZSTD_seekable_decompress may have to decompress the entire
* prefix of the frame before the desired data if it has not already processed
* this section. If ZSTD_seekable_decompress is called multiple times for a
* consecutive range of data, it will efficiently retain the decompressor object
* and avoid redecompressing frame prefixes. The return value is the number of
* bytes decompressed, or an error code checkable with ZSTD_isError().
*
* The seek table access functions can be used to obtain the data contained
* in the seek table. If frameIndex is larger than the value returned by
* ZSTD_seekable_getNumFrames(), they will return error codes checkable with
* ZSTD_isError(). Note that since the offset access functions return
* unsigned long long instead of size_t, in this case they will instead return
* the value ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE.
******************************************************************************/
/*===== Seekable decompressor management =====*/
ZSTDLIB_API ZSTD_seekable* ZSTD_seekable_create(void);
ZSTDLIB_API size_t ZSTD_seekable_free(ZSTD_seekable* zs);
/*===== Seekable decompression functions =====*/
ZSTDLIB_API size_t ZSTD_seekable_initBuff(ZSTD_seekable* zs, const void* src, size_t srcSize);
ZSTDLIB_API size_t ZSTD_seekable_initFile(ZSTD_seekable* zs, FILE* src);
ZSTDLIB_API size_t ZSTD_seekable_decompress(ZSTD_seekable* zs, void* dst, size_t dstSize, unsigned long long offset);
ZSTDLIB_API size_t ZSTD_seekable_decompressFrame(ZSTD_seekable* zs, void* dst, size_t dstSize, unsigned frameIndex);
#define ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE (0ULL-2)
/*===== Seek Table access functions =====*/
ZSTDLIB_API unsigned ZSTD_seekable_getNumFrames(ZSTD_seekable* const zs);
ZSTDLIB_API unsigned long long ZSTD_seekable_getFrameCompressedOffset(ZSTD_seekable* const zs, unsigned frameIndex);
ZSTDLIB_API unsigned long long ZSTD_seekable_getFrameDecompressedOffset(ZSTD_seekable* const zs, unsigned frameIndex);
ZSTDLIB_API size_t ZSTD_seekable_getFrameCompressedSize(ZSTD_seekable* const zs, unsigned frameIndex);
ZSTDLIB_API size_t ZSTD_seekable_getFrameDecompressedSize(ZSTD_seekable* const zs, unsigned frameIndex);
ZSTDLIB_API unsigned ZSTD_seekable_offsetToFrameIndex(ZSTD_seekable* const zs, unsigned long long offset);
/*===== Seekable advanced I/O API =====*/
typedef int(ZSTD_seekable_read)(void* opaque, void* buffer, size_t n);
typedef int(ZSTD_seekable_seek)(void* opaque, long long offset, int origin);
typedef struct {
void* opaque;
ZSTD_seekable_read* read;
ZSTD_seekable_seek* seek;
} ZSTD_seekable_customFile;
ZSTDLIB_API size_t ZSTD_seekable_initAdvanced(ZSTD_seekable* zs, ZSTD_seekable_customFile src);
#if defined (__cplusplus)
}
#endif
#endif

View File

@ -0,0 +1,116 @@
# Zstandard Seekable Format
### Notices
Copyright (c) 2017-present Facebook, Inc.
Permission is granted to copy and distribute this document
for any purpose and without charge,
including translations into other languages
and incorporation into compilations,
provided that the copyright notice and this notice are preserved,
and that any substantive changes or deletions from the original
are clearly marked.
Distribution of this document is unlimited.
### Version
0.1.0 (11/04/17)
## Introduction
This document defines a format for compressed data to be stored so that subranges of the data can be efficiently decompressed without requiring the entire document to be decompressed.
This is done by splitting up the input data into frames,
each of which are compressed independently,
and so can be decompressed independently.
Decompression then takes advantage of a provided 'seek table', which allows the decompressor to immediately jump to the desired data. This is done in a way that is compatible with the original Zstandard format by placing the seek table in a Zstandard skippable frame.
### Overall conventions
In this document:
- square brackets i.e. `[` and `]` are used to indicate optional fields or parameters.
- the naming convention for identifiers is `Mixed_Case_With_Underscores`
- All numeric fields are little-endian unless specified otherwise
## Format
The format consists of a number of frames (Zstandard compressed frames and skippable frames), followed by a final skippable frame at the end containing the seek table.
### Seek Table Format
The structure of the seek table frame is as follows:
|`Skippable_Magic_Number`|`Frame_Size`|`[Seek_Table_Entries]`|`Seek_Table_Footer`|
|------------------------|------------|----------------------|-------------------|
| 4 bytes | 4 bytes | 8-12 bytes each | 9 bytes |
__`Skippable_Magic_Number`__
Value : 0x184D2A5E.
This is for compatibility with [Zstandard skippable frames].
Since it is legal for other Zstandard skippable frames to use the same
magic number, it is not recommended for a decoder to recognize frames
solely on this.
__`Frame_Size`__
The total size of the skippable frame, not including the `Skippable_Magic_Number` or `Frame_Size`.
This is for compatibility with [Zstandard skippable frames].
[Zstandard skippable frames]: https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#skippable-frames
#### `Seek_Table_Footer`
The seek table footer format is as follows:
|`Number_Of_Frames`|`Seek_Table_Descriptor`|`Seekable_Magic_Number`|
|------------------|-----------------------|-----------------------|
| 4 bytes | 1 byte | 4 bytes |
__`Seekable_Magic_Number`__
Value : 0x8F92EAB1.
This value must be the last bytes present in the compressed file so that decoders
can efficiently find it and determine if there is an actual seek table present.
__`Number_Of_Frames`__
The number of stored frames in the data.
__`Seek_Table_Descriptor`__
A bitfield describing the format of the seek table.
| Bit number | Field name |
| ---------- | ---------- |
| 7 | `Checksum_Flag` |
| 6-2 | `Reserved_Bits` |
| 1-0 | `Unused_Bits` |
While only `Checksum_Flag` currently exists, there are 7 other bits in this field that can be used for future changes to the format,
for example the addition of inline dictionaries.
__`Checksum_Flag`__
If the checksum flag is set, each of the seek table entries contains a 4 byte checksum of the uncompressed data contained in its frame.
`Reserved_Bits` are not currently used but may be used in the future for breaking changes, so a compliant decoder should ensure they are set to 0. `Unused_Bits` may be used in the future for non-breaking changes, so a compliant decoder should not interpret these bits.
#### __`Seek_Table_Entries`__
`Seek_Table_Entries` consists of `Number_Of_Frames` (one for each frame in the data, not including the seek table frame) entries of the following form, in sequence:
|`Compressed_Size`|`Decompressed_Size`|`[Checksum]`|
|-----------------|-------------------|------------|
| 4 bytes | 4 bytes | 4 bytes |
__`Compressed_Size`__
The compressed size of the frame.
The cumulative sum of the `Compressed_Size` fields of frames `0` to `i` gives the offset in the compressed file of frame `i+1`.
__`Decompressed_Size`__
The size of the decompressed data contained in the frame. For skippable or otherwise empty frames, this value is 0.
__`Checksum`__
Only present if `Checksum_Flag` is set in the `Seek_Table_Descriptor`. Value : the least significant 32 bits of the XXH64 digest of the uncompressed data, stored in little-endian format.
## Version Changes
- 0.1.0: initial version

View File

@ -0,0 +1,366 @@
/**
* Copyright (c) 2017-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/
#include <stdlib.h> /* malloc, free */
#define XXH_STATIC_LINKING_ONLY
#define XXH_NAMESPACE ZSTD_
#include "xxhash.h"
#define ZSTD_STATIC_LINKING_ONLY
#include "zstd.h"
#include "zstd_errors.h"
#include "mem.h"
#include "zstd_seekable.h"
#define CHECK_Z(f) { size_t const ret = (f); if (ret != 0) return ret; }
#undef ERROR
#define ERROR(name) ((size_t)-ZSTD_error_##name)
#undef MIN
#undef MAX
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#define MAX(a, b) ((a) > (b) ? (a) : (b))
typedef struct {
U32 cSize;
U32 dSize;
U32 checksum;
} framelogEntry_t;
struct ZSTD_frameLog_s {
framelogEntry_t* entries;
U32 size;
U32 capacity;
int checksumFlag;
/* for use when streaming out the seek table */
U32 seekTablePos;
U32 seekTableIndex;
} framelog_t;
struct ZSTD_seekable_CStream_s {
ZSTD_CStream* cstream;
ZSTD_frameLog framelog;
U32 frameCSize;
U32 frameDSize;
XXH64_state_t xxhState;
U32 maxFrameSize;
int writingSeekTable;
};
size_t ZSTD_seekable_frameLog_allocVec(ZSTD_frameLog* fl)
{
/* allocate some initial space */
size_t const FRAMELOG_STARTING_CAPACITY = 16;
fl->entries = (framelogEntry_t*)malloc(
sizeof(framelogEntry_t) * FRAMELOG_STARTING_CAPACITY);
if (fl->entries == NULL) return ERROR(memory_allocation);
fl->capacity = FRAMELOG_STARTING_CAPACITY;
return 0;
}
size_t ZSTD_seekable_frameLog_freeVec(ZSTD_frameLog* fl)
{
if (fl != NULL) free(fl->entries);
return 0;
}
ZSTD_frameLog* ZSTD_seekable_createFrameLog(int checksumFlag)
{
ZSTD_frameLog* fl = malloc(sizeof(ZSTD_frameLog));
if (fl == NULL) return NULL;
if (ZSTD_isError(ZSTD_seekable_frameLog_allocVec(fl))) {
free(fl);
return NULL;
}
fl->checksumFlag = checksumFlag;
fl->seekTablePos = 0;
fl->seekTableIndex = 0;
fl->size = 0;
return fl;
}
size_t ZSTD_seekable_freeFrameLog(ZSTD_frameLog* fl)
{
ZSTD_seekable_frameLog_freeVec(fl);
free(fl);
return 0;
}
ZSTD_seekable_CStream* ZSTD_seekable_createCStream()
{
ZSTD_seekable_CStream* zcs = malloc(sizeof(ZSTD_seekable_CStream));
if (zcs == NULL) return NULL;
memset(zcs, 0, sizeof(*zcs));
zcs->cstream = ZSTD_createCStream();
if (zcs->cstream == NULL) goto failed1;
if (ZSTD_isError(ZSTD_seekable_frameLog_allocVec(&zcs->framelog))) goto failed2;
return zcs;
failed2:
ZSTD_freeCStream(zcs->cstream);
failed1:
free(zcs);
return NULL;
}
size_t ZSTD_seekable_freeCStream(ZSTD_seekable_CStream* zcs)
{
if (zcs == NULL) return 0; /* support free on null */
ZSTD_freeCStream(zcs->cstream);
ZSTD_seekable_frameLog_freeVec(&zcs->framelog);
free(zcs);
return 0;
}
size_t ZSTD_seekable_initCStream(ZSTD_seekable_CStream* zcs,
int compressionLevel,
int checksumFlag,
U32 maxFrameSize)
{
zcs->framelog.size = 0;
zcs->frameCSize = 0;
zcs->frameDSize = 0;
/* make sure maxFrameSize has a reasonable value */
if (maxFrameSize > ZSTD_SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE) {
return ERROR(compressionParameter_unsupported);
}
zcs->maxFrameSize = maxFrameSize
? maxFrameSize
: ZSTD_SEEKABLE_MAX_FRAME_DECOMPRESSED_SIZE;
zcs->framelog.checksumFlag = checksumFlag;
if (zcs->framelog.checksumFlag) {
XXH64_reset(&zcs->xxhState, 0);
}
zcs->framelog.seekTablePos = 0;
zcs->framelog.seekTableIndex = 0;
zcs->writingSeekTable = 0;
return ZSTD_initCStream(zcs->cstream, compressionLevel);
}
size_t ZSTD_seekable_logFrame(ZSTD_frameLog* fl,
unsigned compressedSize,
unsigned decompressedSize,
unsigned checksum)
{
if (fl->size == ZSTD_SEEKABLE_MAXFRAMES)
return ERROR(frameIndex_tooLarge);
/* grow the buffer if required */
if (fl->size == fl->capacity) {
/* exponential size increase for constant amortized runtime */
size_t const newCapacity = fl->capacity * 2;
framelogEntry_t* const newEntries = realloc(fl->entries,
sizeof(framelogEntry_t) * newCapacity);
if (newEntries == NULL) return ERROR(memory_allocation);
fl->entries = newEntries;
fl->capacity = newCapacity;
}
fl->entries[fl->size] = (framelogEntry_t){
compressedSize, decompressedSize, checksum
};
fl->size++;
return 0;
}
size_t ZSTD_seekable_endFrame(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output)
{
size_t const prevOutPos = output->pos;
/* end the frame */
size_t ret = ZSTD_endStream(zcs->cstream, output);
zcs->frameCSize += output->pos - prevOutPos;
/* need to flush before doing the rest */
if (ret) return ret;
/* frame done */
/* store the frame data for later */
ret = ZSTD_seekable_logFrame(
&zcs->framelog, zcs->frameCSize, zcs->frameDSize,
zcs->framelog.checksumFlag
? XXH64_digest(&zcs->xxhState) & 0xFFFFFFFFU
: 0);
if (ret) return ret;
/* reset for the next frame */
zcs->frameCSize = 0;
zcs->frameDSize = 0;
ZSTD_resetCStream(zcs->cstream, 0);
if (zcs->framelog.checksumFlag)
XXH64_reset(&zcs->xxhState, 0);
return 0;
}
size_t ZSTD_seekable_compressStream(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
{
const BYTE* const inBase = (const BYTE*) input->src + input->pos;
size_t inLen = input->size - input->pos;
inLen = MIN(inLen, (size_t)(zcs->maxFrameSize - zcs->frameDSize));
/* if we haven't finished flushing the last frame, don't start writing a new one */
if (inLen > 0) {
ZSTD_inBuffer inTmp = { inBase, inLen, 0 };
size_t const prevOutPos = output->pos;
size_t const ret = ZSTD_compressStream(zcs->cstream, output, &inTmp);
if (zcs->framelog.checksumFlag) {
XXH64_update(&zcs->xxhState, inBase, inTmp.pos);
}
zcs->frameCSize += output->pos - prevOutPos;
zcs->frameDSize += inTmp.pos;
input->pos += inTmp.pos;
if (ZSTD_isError(ret)) return ret;
}
if (zcs->maxFrameSize == zcs->frameDSize) {
/* log the frame and start over */
size_t const ret = ZSTD_seekable_endFrame(zcs, output);
if (ZSTD_isError(ret)) return ret;
/* get the client ready for the next frame */
return (size_t)zcs->maxFrameSize;
}
return (size_t)(zcs->maxFrameSize - zcs->frameDSize);
}
static inline size_t ZSTD_seekable_seekTableSize(const ZSTD_frameLog* fl)
{
size_t const sizePerFrame = 8 + (fl->checksumFlag?4:0);
size_t const seekTableLen = ZSTD_skippableHeaderSize +
sizePerFrame * fl->size +
ZSTD_seekTableFooterSize;
return seekTableLen;
}
static inline size_t ZSTD_stwrite32(ZSTD_frameLog* fl,
ZSTD_outBuffer* output, U32 const value,
U32 const offset)
{
if (fl->seekTablePos < offset + 4) {
BYTE tmp[4]; /* so that we can work with buffers too small to write a whole word to */
size_t const lenWrite =
MIN(output->size - output->pos, offset + 4 - fl->seekTablePos);
MEM_writeLE32(tmp, value);
memcpy((BYTE*)output->dst + output->pos,
tmp + (fl->seekTablePos - offset), lenWrite);
output->pos += lenWrite;
fl->seekTablePos += lenWrite;
if (lenWrite < 4) return ZSTD_seekable_seekTableSize(fl) - fl->seekTablePos;
}
return 0;
}
size_t ZSTD_seekable_writeSeekTable(ZSTD_frameLog* fl, ZSTD_outBuffer* output)
{
/* seekTableIndex: the current index in the table and
* seekTableSize: the amount of the table written so far
*
* This function is written this way so that if it has to return early
* because of a small buffer, it can keep going where it left off.
*/
size_t const sizePerFrame = 8 + (fl->checksumFlag?4:0);
size_t const seekTableLen = ZSTD_seekable_seekTableSize(fl);
CHECK_Z(ZSTD_stwrite32(fl, output, ZSTD_MAGIC_SKIPPABLE_START | 0xE, 0));
CHECK_Z(ZSTD_stwrite32(fl, output, seekTableLen - ZSTD_skippableHeaderSize,
4));
while (fl->seekTableIndex < fl->size) {
CHECK_Z(ZSTD_stwrite32(fl, output,
fl->entries[fl->seekTableIndex].cSize,
ZSTD_skippableHeaderSize +
sizePerFrame * fl->seekTableIndex + 0));
CHECK_Z(ZSTD_stwrite32(fl, output,
fl->entries[fl->seekTableIndex].dSize,
ZSTD_skippableHeaderSize +
sizePerFrame * fl->seekTableIndex + 4));
if (fl->checksumFlag) {
CHECK_Z(ZSTD_stwrite32(
fl, output, fl->entries[fl->seekTableIndex].checksum,
ZSTD_skippableHeaderSize +
sizePerFrame * fl->seekTableIndex + 8));
}
fl->seekTableIndex++;
}
CHECK_Z(ZSTD_stwrite32(fl, output, fl->size,
seekTableLen - ZSTD_seekTableFooterSize));
if (output->size - output->pos < 1) return seekTableLen - fl->seekTablePos;
if (fl->seekTablePos < seekTableLen - 4) {
BYTE sfd = 0;
sfd |= (fl->checksumFlag) << 7;
((BYTE*)output->dst)[output->pos] = sfd;
output->pos++;
fl->seekTablePos++;
}
CHECK_Z(ZSTD_stwrite32(fl, output, ZSTD_SEEKABLE_MAGICNUMBER,
seekTableLen - 4));
if (fl->seekTablePos != seekTableLen) return ERROR(GENERIC);
return 0;
}
size_t ZSTD_seekable_endStream(ZSTD_seekable_CStream* zcs, ZSTD_outBuffer* output)
{
if (!zcs->writingSeekTable && zcs->frameDSize) {
const size_t endFrame = ZSTD_seekable_endFrame(zcs, output);
if (ZSTD_isError(endFrame)) return endFrame;
/* return an accurate size hint */
if (endFrame) return endFrame + ZSTD_seekable_seekTableSize(&zcs->framelog);
}
zcs->writingSeekTable = 1;
return ZSTD_seekable_writeSeekTable(&zcs->framelog, output);
}

View File

@ -0,0 +1,461 @@
/*
* Copyright (c) 2017-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree. An additional grant
* of patent rights can be found in the PATENTS file in the same directory.
*/
/* *********************************************************
* Turn on Large Files support (>4GB) for 32-bit Linux/Unix
***********************************************************/
#if !defined(__64BIT__) || defined(__MINGW32__) /* No point defining Large file for 64 bit but MinGW-w64 requires it */
# if !defined(_FILE_OFFSET_BITS)
# define _FILE_OFFSET_BITS 64 /* turn off_t into a 64-bit type for ftello, fseeko */
# endif
# if !defined(_LARGEFILE_SOURCE) /* obsolete macro, replaced with _FILE_OFFSET_BITS */
# define _LARGEFILE_SOURCE 1 /* Large File Support extension (LFS) - fseeko, ftello */
# endif
# if defined(_AIX) || defined(__hpux)
# define _LARGE_FILES /* Large file support on 32-bits AIX and HP-UX */
# endif
#endif
/* ************************************************************
* Avoid fseek()'s 2GiB barrier with MSVC, MacOS, *BSD, MinGW
***************************************************************/
#if defined(_MSC_VER) && _MSC_VER >= 1400
# define LONG_SEEK _fseeki64
#elif !defined(__64BIT__) && (PLATFORM_POSIX_VERSION >= 200112L) /* No point defining Large file for 64 bit */
# define LONG_SEEK fseeko
#elif defined(__MINGW32__) && !defined(__STRICT_ANSI__) && !defined(__NO_MINGW_LFS) && defined(__MSVCRT__)
# define LONG_SEEK fseeko64
#elif defined(_WIN32) && !defined(__DJGPP__)
# include <windows.h>
static int LONG_SEEK(FILE* file, __int64 offset, int origin) {
LARGE_INTEGER off;
DWORD method;
off.QuadPart = offset;
if (origin == SEEK_END)
method = FILE_END;
else if (origin == SEEK_CUR)
method = FILE_CURRENT;
else
method = FILE_BEGIN;
if (SetFilePointerEx((HANDLE) _get_osfhandle(_fileno(file)), off, NULL, method))
return 0;
else
return -1;
}
#else
# define LONG_SEEK fseek
#endif
#include <stdlib.h> /* malloc, free */
#include <stdio.h> /* FILE* */
#define XXH_STATIC_LINKING_ONLY
#define XXH_NAMESPACE ZSTD_
#include "xxhash.h"
#define ZSTD_STATIC_LINKING_ONLY
#include "zstd.h"
#include "zstd_errors.h"
#include "mem.h"
#include "zstd_seekable.h"
#undef ERROR
#define ERROR(name) ((size_t)-ZSTD_error_##name)
#define CHECK_IO(f) { int const errcod = (f); if (errcod < 0) return ERROR(seekableIO); }
#undef MIN
#undef MAX
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#define MAX(a, b) ((a) > (b) ? (a) : (b))
/* Special-case callbacks for FILE* and in-memory modes, so that we can treat
* them the same way as the advanced API */
static int ZSTD_seekable_read_FILE(void* opaque, void* buffer, size_t n)
{
size_t const result = fread(buffer, 1, n, (FILE*)opaque);
if (result != n) {
return -1;
}
return 0;
}
static int ZSTD_seekable_seek_FILE(void* opaque, S64 offset, int origin)
{
int const ret = LONG_SEEK((FILE*)opaque, offset, origin);
if (ret) return ret;
return fflush((FILE*)opaque);
}
typedef struct {
const void *ptr;
size_t size;
size_t pos;
} buffWrapper_t;
static int ZSTD_seekable_read_buff(void* opaque, void* buffer, size_t n)
{
buffWrapper_t* buff = (buffWrapper_t*) opaque;
if (buff->size + n > buff->pos) return -1;
memcpy(buffer, (const BYTE*)buff->ptr + buff->pos, n);
buff->pos += n;
return 0;
}
static int ZSTD_seekable_seek_buff(void* opaque, S64 offset, int origin)
{
buffWrapper_t* buff = (buffWrapper_t*) opaque;
unsigned long long newOffset;
switch (origin) {
case SEEK_SET:
newOffset = offset;
break;
case SEEK_CUR:
newOffset = (unsigned long long)buff->pos + offset;
break;
case SEEK_END:
newOffset = (unsigned long long)buff->size - offset;
break;
}
if (newOffset < 0 || newOffset > buff->size) {
return -1;
}
buff->pos = newOffset;
return 0;
}
typedef struct {
U64 cOffset;
U64 dOffset;
U32 checksum;
} seekEntry_t;
typedef struct {
seekEntry_t* entries;
size_t tableLen;
int checksumFlag;
} seekTable_t;
#define SEEKABLE_BUFF_SIZE ZSTD_BLOCKSIZE_ABSOLUTEMAX
struct ZSTD_seekable_s {
ZSTD_DStream* dstream;
seekTable_t seekTable;
ZSTD_seekable_customFile src;
U64 decompressedOffset;
U32 curFrame;
BYTE inBuff[SEEKABLE_BUFF_SIZE]; /* need to do our own input buffering */
BYTE outBuff[SEEKABLE_BUFF_SIZE]; /* so we can efficiently decompress the
starts of chunks before we get to the
desired section */
ZSTD_inBuffer in; /* maintain continuity across ZSTD_seekable_decompress operations */
buffWrapper_t buffWrapper; /* for `src.opaque` in in-memory mode */
XXH64_state_t xxhState;
};
ZSTD_seekable* ZSTD_seekable_create(void)
{
ZSTD_seekable* zs = malloc(sizeof(ZSTD_seekable));
if (zs == NULL) return NULL;
/* also initializes stage to zsds_init */
memset(zs, 0, sizeof(*zs));
zs->dstream = ZSTD_createDStream();
if (zs->dstream == NULL) {
free(zs);
return NULL;
}
return zs;
}
size_t ZSTD_seekable_free(ZSTD_seekable* zs)
{
if (zs == NULL) return 0; /* support free on null */
ZSTD_freeDStream(zs->dstream);
free(zs->seekTable.entries);
free(zs);
return 0;
}
/** ZSTD_seekable_offsetToFrameIndex() :
* Performs a binary search to find the last frame with a decompressed offset
* <= pos
* @return : the frame's index */
U32 ZSTD_seekable_offsetToFrameIndex(ZSTD_seekable* const zs, U64 pos)
{
U32 lo = 0;
U32 hi = zs->seekTable.tableLen;
if (pos >= zs->seekTable.entries[zs->seekTable.tableLen].dOffset) {
return zs->seekTable.tableLen;
}
while (lo + 1 < hi) {
U32 const mid = lo + ((hi - lo) >> 1);
if (zs->seekTable.entries[mid].dOffset <= pos) {
lo = mid;
} else {
hi = mid;
}
}
return lo;
}
U32 ZSTD_seekable_getNumFrames(ZSTD_seekable* const zs)
{
return zs->seekTable.tableLen;
}
U64 ZSTD_seekable_getFrameCompressedOffset(ZSTD_seekable* const zs, U32 frameIndex)
{
if (frameIndex >= zs->seekTable.tableLen) return ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE;
return zs->seekTable.entries[frameIndex].cOffset;
}
U64 ZSTD_seekable_getFrameDecompressedOffset(ZSTD_seekable* const zs, U32 frameIndex)
{
if (frameIndex >= zs->seekTable.tableLen) return ZSTD_SEEKABLE_FRAMEINDEX_TOOLARGE;
return zs->seekTable.entries[frameIndex].dOffset;
}
size_t ZSTD_seekable_getFrameCompressedSize(ZSTD_seekable* const zs, U32 frameIndex)
{
if (frameIndex >= zs->seekTable.tableLen) return ERROR(frameIndex_tooLarge);
return zs->seekTable.entries[frameIndex + 1].cOffset -
zs->seekTable.entries[frameIndex].cOffset;
}
size_t ZSTD_seekable_getFrameDecompressedSize(ZSTD_seekable* const zs, U32 frameIndex)
{
if (frameIndex > zs->seekTable.tableLen) return ERROR(frameIndex_tooLarge);
return zs->seekTable.entries[frameIndex + 1].dOffset -
zs->seekTable.entries[frameIndex].dOffset;
}
static size_t ZSTD_seekable_loadSeekTable(ZSTD_seekable* zs)
{
int checksumFlag;
ZSTD_seekable_customFile src = zs->src;
/* read the footer, fixed size */
CHECK_IO(src.seek(src.opaque, -(int)ZSTD_seekTableFooterSize, SEEK_END));
CHECK_IO(src.read(src.opaque, zs->inBuff, ZSTD_seekTableFooterSize));
if (MEM_readLE32(zs->inBuff + 5) != ZSTD_SEEKABLE_MAGICNUMBER) {
return ERROR(prefix_unknown);
}
{ BYTE const sfd = zs->inBuff[4];
checksumFlag = sfd >> 7;
/* check reserved bits */
if ((checksumFlag >> 2) & 0x1f) {
return ERROR(corruption_detected);
}
}
{ U32 const numFrames = MEM_readLE32(zs->inBuff);
U32 const sizePerEntry = 8 + (checksumFlag?4:0);
U32 const tableSize = sizePerEntry * numFrames;
U32 const frameSize = tableSize + ZSTD_seekTableFooterSize + ZSTD_skippableHeaderSize;
U32 remaining = frameSize - ZSTD_seekTableFooterSize; /* don't need to re-read footer */
{
U32 const toRead = MIN(remaining, SEEKABLE_BUFF_SIZE);
CHECK_IO(src.seek(src.opaque, -(S64)frameSize, SEEK_END));
CHECK_IO(src.read(src.opaque, zs->inBuff, toRead));
remaining -= toRead;
}
if (MEM_readLE32(zs->inBuff) != (ZSTD_MAGIC_SKIPPABLE_START | 0xE)) {
return ERROR(prefix_unknown);
}
if (MEM_readLE32(zs->inBuff+4) + ZSTD_skippableHeaderSize != frameSize) {
return ERROR(prefix_unknown);
}
{ /* Allocate an extra entry at the end so that we can do size
* computations on the last element without special case */
seekEntry_t* entries = (seekEntry_t*)malloc(sizeof(seekEntry_t) * (numFrames + 1));
const BYTE* tableBase = zs->inBuff + ZSTD_skippableHeaderSize;
U32 idx = 0;
U32 pos = 8;
U64 cOffset = 0;
U64 dOffset = 0;
if (!entries) {
free(entries);
return ERROR(memory_allocation);
}
/* compute cumulative positions */
for (; idx < numFrames; idx++) {
if (pos + sizePerEntry > SEEKABLE_BUFF_SIZE) {
U32 const toRead = MIN(remaining, SEEKABLE_BUFF_SIZE);
U32 const offset = SEEKABLE_BUFF_SIZE - pos;
memmove(zs->inBuff, zs->inBuff + pos, offset); /* move any data we haven't read yet */
CHECK_IO(src.read(src.opaque, zs->inBuff+offset, toRead));
remaining -= toRead;
pos = 0;
}
entries[idx].cOffset = cOffset;
entries[idx].dOffset = dOffset;
cOffset += MEM_readLE32(zs->inBuff + pos);
pos += 4;
dOffset += MEM_readLE32(zs->inBuff + pos);
pos += 4;
if (checksumFlag) {
entries[idx].checksum = MEM_readLE32(zs->inBuff + pos);
pos += 4;
}
}
entries[numFrames].cOffset = cOffset;
entries[numFrames].dOffset = dOffset;
zs->seekTable.entries = entries;
zs->seekTable.tableLen = numFrames;
zs->seekTable.checksumFlag = checksumFlag;
return 0;
}
}
}
size_t ZSTD_seekable_initBuff(ZSTD_seekable* zs, const void* src, size_t srcSize)
{
zs->buffWrapper = (buffWrapper_t){src, srcSize, 0};
{ ZSTD_seekable_customFile srcFile = {&zs->buffWrapper,
&ZSTD_seekable_read_buff,
&ZSTD_seekable_seek_buff};
return ZSTD_seekable_initAdvanced(zs, srcFile); }
}
size_t ZSTD_seekable_initFile(ZSTD_seekable* zs, FILE* src)
{
ZSTD_seekable_customFile srcFile = {src, &ZSTD_seekable_read_FILE,
&ZSTD_seekable_seek_FILE};
return ZSTD_seekable_initAdvanced(zs, srcFile);
}
size_t ZSTD_seekable_initAdvanced(ZSTD_seekable* zs, ZSTD_seekable_customFile src)
{
zs->src = src;
{ const size_t seekTableInit = ZSTD_seekable_loadSeekTable(zs);
if (ZSTD_isError(seekTableInit)) return seekTableInit; }
zs->decompressedOffset = (U64)-1;
zs->curFrame = (U32)-1;
{ const size_t dstreamInit = ZSTD_initDStream(zs->dstream);
if (ZSTD_isError(dstreamInit)) return dstreamInit; }
return 0;
}
size_t ZSTD_seekable_decompress(ZSTD_seekable* zs, void* dst, size_t len, U64 offset)
{
U32 targetFrame = ZSTD_seekable_offsetToFrameIndex(zs, offset);
do {
/* check if we can continue from a previous decompress job */
if (targetFrame != zs->curFrame || offset != zs->decompressedOffset) {
zs->decompressedOffset = zs->seekTable.entries[targetFrame].dOffset;
zs->curFrame = targetFrame;
CHECK_IO(zs->src.seek(zs->src.opaque,
zs->seekTable.entries[targetFrame].cOffset,
SEEK_SET));
zs->in = (ZSTD_inBuffer){zs->inBuff, 0, 0};
XXH64_reset(&zs->xxhState, 0);
ZSTD_resetDStream(zs->dstream);
}
while (zs->decompressedOffset < offset + len) {
size_t toRead;
ZSTD_outBuffer outTmp;
size_t prevOutPos;
if (zs->decompressedOffset < offset) {
/* dummy decompressions until we get to the target offset */
outTmp = (ZSTD_outBuffer){zs->outBuff, MIN(SEEKABLE_BUFF_SIZE, offset - zs->decompressedOffset), 0};
} else {
outTmp = (ZSTD_outBuffer){dst, len, zs->decompressedOffset - offset};
}
prevOutPos = outTmp.pos;
toRead = ZSTD_decompressStream(zs->dstream, &outTmp, &zs->in);
if (ZSTD_isError(toRead)) {
return toRead;
}
if (zs->seekTable.checksumFlag) {
XXH64_update(&zs->xxhState, (BYTE*)outTmp.dst + prevOutPos,
outTmp.pos - prevOutPos);
}
zs->decompressedOffset += outTmp.pos - prevOutPos;
if (toRead == 0) {
/* frame complete */
/* verify checksum */
if (zs->seekTable.checksumFlag &&
(XXH64_digest(&zs->xxhState) & 0xFFFFFFFFU) !=
zs->seekTable.entries[targetFrame].checksum) {
return ERROR(corruption_detected);
}
if (zs->decompressedOffset < offset + len) {
/* go back to the start and force a reset of the stream */
targetFrame = ZSTD_seekable_offsetToFrameIndex(zs, zs->decompressedOffset);
}
break;
}
/* read in more data if we're done with this buffer */
if (zs->in.pos == zs->in.size) {
toRead = MIN(toRead, SEEKABLE_BUFF_SIZE);
CHECK_IO(zs->src.read(zs->src.opaque, zs->inBuff, toRead));
zs->in.size = toRead;
zs->in.pos = 0;
}
}
} while (zs->decompressedOffset != offset + len);
return len;
}
size_t ZSTD_seekable_decompressFrame(ZSTD_seekable* zs, void* dst, size_t dstSize, U32 frameIndex)
{
if (frameIndex >= zs->seekTable.tableLen) {
return ERROR(frameIndex_tooLarge);
}
{
size_t const decompressedSize =
zs->seekTable.entries[frameIndex + 1].dOffset -
zs->seekTable.entries[frameIndex].dOffset;
if (dstSize < decompressedSize) {
return ERROR(dstSize_tooSmall);
}
return ZSTD_seekable_decompress(
zs, dst, decompressedSize,
zs->seekTable.entries[frameIndex].dOffset);
}
}

View File

@ -27,16 +27,19 @@ size_t ZSTD_decompress_with_dict(void *const dst, const size_t dst_len,
/// Get the decompressed size of an input stream so memory can be allocated in
/// advance
/// Returns -1 if the size can't be determined
size_t ZSTD_get_decompressed_size(const void *const src, const size_t src_len);
/******* UTILITY MACROS AND TYPES *********************************************/
// Max block size decompressed size is 128 KB and literal blocks must be smaller
// than that
// Max block size decompressed size is 128 KB and literal blocks can't be
// larger than their block
#define MAX_LITERALS_SIZE ((size_t)128 * 1024)
#define MAX(a, b) ((a) > (b) ? (a) : (b))
#define MIN(a, b) ((a) < (b) ? (a) : (b))
/// This decoder calls exit(1) when it encounters an error, however a production
/// library should propagate error codes
#define ERROR(s) \
do { \
fprintf(stderr, "Error: %s\n", s); \
@ -67,29 +70,31 @@ typedef int64_t i64;
/// decompression functions.
/*** IO STREAM OPERATIONS *************/
/// These structs are the interface for IO, and do bounds checking on all
/// operations. They should be used opaquely to ensure safety.
/// Output is always done byte-by-byte
/// ostream_t/istream_t are used to wrap the pointers/length data passed into
/// ZSTD_decompress, so that all IO operations are safely bounds checked
/// They are written/read forward, and reads are treated as little-endian
/// They should be used opaquely to ensure safety
typedef struct {
u8 *ptr;
size_t len;
} ostream_t;
/// Input often reads a few bits at a time, so maintain an internal offset
typedef struct {
const u8 *ptr;
int bit_offset;
size_t len;
// Input often reads a few bits at a time, so maintain an internal offset
int bit_offset;
} istream_t;
/// The following two functions are the only ones that allow the istream to be
/// non-byte aligned
/// Reads `num` bits from a bitstream, and updates the internal offset
static inline u64 IO_read_bits(istream_t *const in, const int num);
/// Rewinds the stream by `num` bits
static inline void IO_rewind_bits(istream_t *const in, const int num);
static inline u64 IO_read_bits(istream_t *const in, const int num_bits);
/// Backs-up the stream by `num` bits so they can be read again
static inline void IO_rewind_bits(istream_t *const in, const int num_bits);
/// If the remaining bits in a byte will be unused, advance to the end of the
/// byte
static inline void IO_align_stream(istream_t *const in);
@ -101,30 +106,31 @@ static inline void IO_write_byte(ostream_t *const out, u8 symb);
/// be byte aligned.
static inline size_t IO_istream_len(const istream_t *const in);
/// Returns a pointer where `len` bytes can be read, and advances the internal
/// state. The stream must be byte aligned.
/// Advances the stream by `len` bytes, and returns a pointer to the chunk that
/// was skipped. The stream must be byte aligned.
static inline const u8 *IO_read_bytes(istream_t *const in, size_t len);
/// Returns a pointer where `len` bytes can be written, and advances the internal
/// state. The stream must be byte aligned.
/// Advances the stream by `len` bytes, and returns a pointer to the chunk that
/// was skipped so it can be written to.
static inline u8 *IO_write_bytes(ostream_t *const out, size_t len);
/// Advance the inner state by `len` bytes. The stream must be byte aligned.
static inline void IO_advance_input(istream_t *const in, size_t len);
/// Returns an `ostream_t` constructed from the given pointer and length
/// Returns an `ostream_t` constructed from the given pointer and length.
static inline ostream_t IO_make_ostream(u8 *out, size_t len);
/// Returns an `istream_t` constructed from the given pointer and length
/// Returns an `istream_t` constructed from the given pointer and length.
static inline istream_t IO_make_istream(const u8 *in, size_t len);
/// Returns an `istream_t` with the same base as `in`, and length `len`
/// Then, advance `in` to account for the consumed bytes
/// `in` must be byte aligned
/// Returns an `istream_t` with the same base as `in`, and length `len`.
/// Then, advance `in` to account for the consumed bytes.
/// `in` must be byte aligned.
static inline istream_t IO_make_sub_istream(istream_t *const in, size_t len);
/*** END IO STREAM OPERATIONS *********/
/*** BITSTREAM OPERATIONS *************/
/// Read `num` bits (up to 64) from `src + offset`, where `offset` is in bits
static inline u64 read_bits_LE(const u8 *src, const int num,
/// Read `num` bits (up to 64) from `src + offset`, where `offset` is in bits,
/// and return them interpreted as a little-endian unsigned integer.
static inline u64 read_bits_LE(const u8 *src, const int num_bits,
const size_t offset);
/// Read bits from the end of a HUF or FSE bitstream. `offset` is in bits, so
@ -136,9 +142,8 @@ static inline u64 STREAM_read_bits(const u8 *src, const int bits,
/*** END BITSTREAM OPERATIONS *********/
/*** BIT COUNTING OPERATIONS **********/
/// Returns `x`, where `2^x` is the largest power of 2 less than or equal to
/// `num`, or `-1` if `num == 0`.
static inline int log2inf(const u64 num);
/// Returns the index of the highest set bit in `num`, or `-1` if `num == 0`
static inline int highest_set_bit(const u64 num);
/*** END BIT COUNTING OPERATIONS ******/
/*** HUFFMAN PRIMITIVES ***************/
@ -384,8 +389,8 @@ size_t ZSTD_decompress_with_dict(void *const dst, const size_t dst_len,
parse_dictionary(&parsed_dict, (const u8 *)dict, dict_len);
}
istream_t in = {(const u8 *)src, 0, src_len};
ostream_t out = {(u8 *)dst, dst_len};
istream_t in = IO_make_istream(src, src_len);
ostream_t out = IO_make_ostream(dst, dst_len);
// "A content compressed by Zstandard is transformed into a Zstandard frame.
// Multiple frames can be appended into a single file or stream. A frame is
@ -633,6 +638,7 @@ static void frame_context_apply_dict(frame_context_t *const ctx,
FSE_copy_dtable(&ctx->of_dtable, &dict->of_dtable);
FSE_copy_dtable(&ctx->ml_dtable, &dict->ml_dtable);
// Copy the repeated offsets
memcpy(ctx->previous_offsets, dict->previous_offsets,
sizeof(ctx->previous_offsets));
}
@ -668,7 +674,7 @@ static void decompress_data(frame_context_t *const ctx, ostream_t *const out,
// number of bytes to read and copy."
const u8 *const read_ptr = IO_read_bytes(in, block_len);
u8 *const write_ptr = IO_write_bytes(out, block_len);
//
// Copy the raw data into the output
memcpy(write_ptr, read_ptr, block_len);
@ -682,7 +688,7 @@ static void decompress_data(frame_context_t *const ctx, ostream_t *const out,
const u8 *const read_ptr = IO_read_bytes(in, 1);
u8 *const write_ptr = IO_write_bytes(out, block_len);
// Copy `block_len` copies of `streams->src[0]` to the output
// Copy `block_len` copies of `read_ptr[0]` to the output
memset(write_ptr, read_ptr[0], block_len);
ctx->current_total_output += block_len;
@ -751,7 +757,7 @@ static size_t decode_literals_compressed(frame_context_t *const ctx,
u8 **const literals,
const int block_type,
const int size_format);
static void decode_huf_table(istream_t *const in, HUF_dtable *const dtable);
static void decode_huf_table(HUF_dtable *const dtable, istream_t *const in);
static void fse_decode_hufweights(ostream_t *weights, istream_t *const in,
int *const num_symbs);
@ -894,12 +900,12 @@ static size_t decode_literals_compressed(frame_context_t *const ctx,
istream_t huf_stream = IO_make_sub_istream(in, compressed_size);
if (block_type == 2) {
// Decode provided Huffman table
// Decode the provided Huffman table
// "This section is only present when Literals_Block_Type type is
// Compressed_Literals_Block (2)."
HUF_free_dtable(&ctx->literals_dtable);
decode_huf_table(&huf_stream, &ctx->literals_dtable);
decode_huf_table(&ctx->literals_dtable, &huf_stream);
} else {
// If the previous Huffman table is being repeated, ensure it exists
if (!ctx->literals_dtable.symbols) {
@ -922,13 +928,13 @@ static size_t decode_literals_compressed(frame_context_t *const ctx,
}
// Decode the Huffman table description
static void decode_huf_table(istream_t *const in, HUF_dtable *const dtable) {
const u8 header = IO_read_bits(in, 8);
static void decode_huf_table(HUF_dtable *const dtable, istream_t *const in) {
// "All literal values from zero (included) to last present one (excluded)
// are represented by Weight with values from 0 to Max_Number_of_Bits."
// "This is a single byte value (0-255), which describes how to decode the list of weights."
const u8 header = IO_read_bits(in, 8);
u8 weights[HUF_MAX_SYMBS];
memset(weights, 0, sizeof(weights));
@ -997,7 +1003,7 @@ typedef struct {
u16 ll_state;
u16 of_state;
u16 ml_state;
} sequence_state_t;
} sequence_states_t;
/// Different modes to signal to decode_seq_tables what to do
typedef enum {
@ -1052,10 +1058,10 @@ static void decompress_sequences(frame_context_t *const ctx,
istream_t *const in,
sequence_command_t *const sequences,
const size_t num_sequences);
static sequence_command_t decode_sequence(sequence_state_t *const state,
static sequence_command_t decode_sequence(sequence_states_t *const state,
const u8 *const src,
i64 *const offset);
static void decode_seq_table(istream_t *const in, FSE_dtable *const table,
static void decode_seq_table(FSE_dtable *const table, istream_t *const in,
const seq_part_t type, const seq_mode_t mode);
static size_t decode_sequences(frame_context_t *const ctx, istream_t *in,
@ -1131,34 +1137,33 @@ static void decompress_sequences(frame_context_t *const ctx, istream_t *in,
// Offsets
// Match Lengths"
// Update the tables we have stored in the context
decode_seq_table(in, &ctx->ll_dtable, seq_literal_length,
decode_seq_table(&ctx->ll_dtable, in, seq_literal_length,
(compression_modes >> 6) & 3);
decode_seq_table(in, &ctx->of_dtable, seq_offset,
decode_seq_table(&ctx->of_dtable, in, seq_offset,
(compression_modes >> 4) & 3);
decode_seq_table(in, &ctx->ml_dtable, seq_match_length,
decode_seq_table(&ctx->ml_dtable, in, seq_match_length,
(compression_modes >> 2) & 3);
// Check to make sure none of the tables are uninitialized
if (!ctx->ll_dtable.symbols || !ctx->of_dtable.symbols ||
!ctx->ml_dtable.symbols) {
CORRUPTION();
sequence_states_t states;
// Initialize the decoding tables
{
states.ll_table = ctx->ll_dtable;
states.of_table = ctx->of_dtable;
states.ml_table = ctx->ml_dtable;
}
sequence_state_t state;
// Copy the context's tables into the local state
memcpy(&state.ll_table, &ctx->ll_dtable, sizeof(FSE_dtable));
memcpy(&state.of_table, &ctx->of_dtable, sizeof(FSE_dtable));
memcpy(&state.ml_table, &ctx->ml_dtable, sizeof(FSE_dtable));
size_t len = IO_istream_len(in);
const size_t len = IO_istream_len(in);
const u8 *const src = IO_read_bytes(in, len);
// "After writing the last bit containing information, the compressor writes
// a single 1-bit and then fills the byte with 0-7 0 bits of padding."
const int padding = 8 - log2inf(src[len - 1]);
i64 offset = len * 8 - padding;
const int padding = 8 - highest_set_bit(src[len - 1]);
// The offset starts at the end because FSE streams are read backwards
i64 bit_offset = len * 8 - padding;
// "The bitstream starts with initial state values, each using the required
// number of bits in their respective accuracy, decoded previously from
@ -1166,24 +1171,22 @@ static void decompress_sequences(frame_context_t *const ctx, istream_t *in,
//
// It starts by Literals_Length_State, followed by Offset_State, and finally
// Match_Length_State."
FSE_init_state(&state.ll_table, &state.ll_state, src, &offset);
FSE_init_state(&state.of_table, &state.of_state, src, &offset);
FSE_init_state(&state.ml_table, &state.ml_state, src, &offset);
FSE_init_state(&states.ll_table, &states.ll_state, src, &bit_offset);
FSE_init_state(&states.of_table, &states.of_state, src, &bit_offset);
FSE_init_state(&states.ml_table, &states.ml_state, src, &bit_offset);
for (size_t i = 0; i < num_sequences; i++) {
// Decode sequences one by one
sequences[i] = decode_sequence(&state, src, &offset);
sequences[i] = decode_sequence(&states, src, &bit_offset);
}
if (offset != 0) {
if (bit_offset != 0) {
CORRUPTION();
}
// Don't free tables so they can be used in the next block
}
// Decode a single sequence and update the state
static sequence_command_t decode_sequence(sequence_state_t *const state,
static sequence_command_t decode_sequence(sequence_states_t *const states,
const u8 *const src,
i64 *const offset) {
// "Each symbol is a code in its own context, which specifies Baseline and
@ -1191,9 +1194,9 @@ static sequence_command_t decode_sequence(sequence_state_t *const state,
// additional bits in the same bitstream."
// Decode symbols, but don't update states
const u8 of_code = FSE_peek_symbol(&state->of_table, state->of_state);
const u8 ll_code = FSE_peek_symbol(&state->ll_table, state->ll_state);
const u8 ml_code = FSE_peek_symbol(&state->ml_table, state->ml_state);
const u8 of_code = FSE_peek_symbol(&states->of_table, states->of_state);
const u8 ll_code = FSE_peek_symbol(&states->ll_table, states->ll_state);
const u8 ml_code = FSE_peek_symbol(&states->ml_table, states->ml_state);
// Offset doesn't need a max value as it's not decoded using a table
if (ll_code > SEQ_MAX_CODES[seq_literal_length] ||
@ -1221,17 +1224,18 @@ static sequence_command_t decode_sequence(sequence_state_t *const state,
// then Offset_State."
// If the stream is complete don't read bits to update state
if (*offset != 0) {
FSE_update_state(&state->ll_table, &state->ll_state, src, offset);
FSE_update_state(&state->ml_table, &state->ml_state, src, offset);
FSE_update_state(&state->of_table, &state->of_state, src, offset);
FSE_update_state(&states->ll_table, &states->ll_state, src, offset);
FSE_update_state(&states->ml_table, &states->ml_state, src, offset);
FSE_update_state(&states->of_table, &states->of_state, src, offset);
}
return seq;
}
/// Given a sequence part and table mode, decode the FSE distribution
static void decode_seq_table(istream_t *const in, FSE_dtable *const table,
const seq_part_t type, const seq_mode_t mode) {
/// Errors if the mode is `seq_repeat` without a pre-existing table in `table`
static void decode_seq_table(FSE_dtable *const table, istream_t *const in,
const seq_part_t type, const seq_mode_t mode) {
// Constant arrays indexed by seq_part_t
const i16 *const default_distributions[] = {SEQ_LITERAL_LENGTH_DEFAULT_DIST,
SEQ_OFFSET_DEFAULT_DIST,
@ -1272,12 +1276,17 @@ static void decode_seq_table(istream_t *const in, FSE_dtable *const table,
// "Repeat_Mode : re-use distribution table from previous compressed
// block."
// Nothing to do here, table will be unchanged
if (!table->symbols) {
// This mode is invalid if we don't already have a table
CORRUPTION();
}
break;
default:
// Impossible, as mode is from 0-3
IMPOSSIBLE();
break;
}
}
/******* END SEQUENCE DECODING ************************************************/
@ -1296,6 +1305,8 @@ static void execute_sequences(frame_context_t *const ctx, ostream_t *const out,
const sequence_command_t seq = sequences[i];
{
// If the sequence asks for more literals than are left, the
// sequence must be corrupted
if (seq.literal_length > IO_istream_len(&litstream)) {
CORRUPTION();
}
@ -1336,7 +1347,8 @@ static void execute_sequences(frame_context_t *const ctx, ostream_t *const out,
// as per the exception listed above
offset = idx < 3 ? offset_hist[idx] : offset_hist[0] - 1;
// If idx == 1 we don't need to modify offset_hist[2]
// If idx == 1 we don't need to modify offset_hist[2], since
// we're using the second-most recent code
if (idx > 1) {
offset_hist[2] = offset_hist[1];
}
@ -1344,6 +1356,8 @@ static void execute_sequences(frame_context_t *const ctx, ostream_t *const out,
offset_hist[0] = offset;
}
} else {
// When it's not a repeat offset:
// "if (Offset_Value > 3) offset = Offset_Value - 3;"
offset = seq.offset - 3;
// Shift back history
@ -1391,11 +1405,11 @@ static void execute_sequences(frame_context_t *const ctx, ostream_t *const out,
total_output += seq.match_length;
}
// Copy any leftover literals
{
size_t len = IO_istream_len(&litstream);
u8 *const write_ptr = IO_write_bytes(out, len);
const u8 *const read_ptr = IO_read_bytes(&litstream, len);
// Copy any leftover literals
memcpy(write_ptr, read_ptr, len);
total_output += len;
@ -1517,10 +1531,10 @@ static void parse_dictionary(dictionary_t *const dict, const u8 *src,
// recent offsets (instead of using {1,4,8}), stored in order, 4-bytes
// little-endian each, for a total of 12 bytes. Each recent offset must have
// a value < dictionary size."
decode_huf_table(&in, &dict->literals_dtable);
decode_seq_table(&in, &dict->of_dtable, seq_offset, seq_fse);
decode_seq_table(&in, &dict->ml_dtable, seq_match_length, seq_fse);
decode_seq_table(&in, &dict->ll_dtable, seq_literal_length, seq_fse);
decode_huf_table(&dict->literals_dtable, &in);
decode_seq_table(&dict->of_dtable, &in, seq_offset, seq_fse);
decode_seq_table(&dict->ml_dtable, &in, seq_match_length, seq_fse);
decode_seq_table(&dict->ll_dtable, &in, seq_literal_length, seq_fse);
// Read in the previous offset history
dict->previous_offsets[0] = IO_read_bits(&in, 32);
@ -1571,20 +1585,20 @@ static void free_dictionary(dictionary_t *const dict) {
/******* IO STREAM OPERATIONS *************************************************/
#define UNALIGNED() ERROR("Attempting to operate on a non-byte aligned stream")
/// Reads `num` bits from a bitstream, and updates the internal offset
static inline u64 IO_read_bits(istream_t *const in, const int num) {
if (num > 64 || num <= 0) {
static inline u64 IO_read_bits(istream_t *const in, const int num_bits) {
if (num_bits > 64 || num_bits <= 0) {
ERROR("Attempt to read an invalid number of bits");
}
const size_t bytes = (num + in->bit_offset + 7) / 8;
const size_t full_bytes = (num + in->bit_offset) / 8;
const size_t bytes = (num_bits + in->bit_offset + 7) / 8;
const size_t full_bytes = (num_bits + in->bit_offset) / 8;
if (bytes > in->len) {
INP_SIZE();
}
const u64 result = read_bits_LE(in->ptr, num, in->bit_offset);
const u64 result = read_bits_LE(in->ptr, num_bits, in->bit_offset);
in->bit_offset = (num + in->bit_offset) % 8;
in->bit_offset = (num_bits + in->bit_offset) % 8;
in->ptr += full_bytes;
in->len -= full_bytes;
@ -1593,16 +1607,21 @@ static inline u64 IO_read_bits(istream_t *const in, const int num) {
/// If a non-zero number of bits have been read from the current byte, advance
/// the offset to the next byte
static inline void IO_rewind_bits(istream_t *const in, int num) {
if (num < 0) {
static inline void IO_rewind_bits(istream_t *const in, int num_bits) {
if (num_bits < 0) {
ERROR("Attempting to rewind stream by a negative number of bits");
}
const int new_offset = in->bit_offset - num;
const i64 bytes = (new_offset - 7) / 8;
// move the offset back by `num_bits` bits
const int new_offset = in->bit_offset - num_bits;
// determine the number of whole bytes we have to rewind, rounding up to an
// integer number (e.g. if `new_offset == -5`, `bytes == 1`)
const i64 bytes = -(new_offset - 7) / 8;
in->ptr += bytes;
in->len -= bytes;
in->ptr -= bytes;
in->len += bytes;
// make sure the resulting `bit_offset` is positive, as mod in C does not
// convert numbers from negative to positive (e.g. -22 % 8 == -6)
in->bit_offset = ((new_offset % 8) + 8) % 8;
}
@ -1683,33 +1702,26 @@ static inline ostream_t IO_make_ostream(u8 *out, size_t len) {
/// Returns an `istream_t` constructed from the given pointer and length
static inline istream_t IO_make_istream(const u8 *in, size_t len) {
return (istream_t) { in, 0, len };
return (istream_t) { in, len, 0 };
}
/// Returns an `istream_t` with the same base as `in`, and length `len`
/// Then, advance `in` to account for the consumed bytes
/// `in` must be byte aligned
static inline istream_t IO_make_sub_istream(istream_t *const in, size_t len) {
if (len > in->len) {
INP_SIZE();
}
if (in->bit_offset != 0) {
UNALIGNED();
}
const istream_t sub = { in->ptr, in->bit_offset, len };
// Consume `len` bytes of the parent stream
const u8 *const ptr = IO_read_bytes(in, len);
in->ptr += len;
in->len -= len;
return sub;
// Make a substream using the pointer to those `len` bytes
return IO_make_istream(ptr, len);
}
/******* END IO STREAM OPERATIONS *********************************************/
/******* BITSTREAM OPERATIONS *************************************************/
/// Read `num` bits (up to 64) from `src + offset`, where `offset` is in bits
static inline u64 read_bits_LE(const u8 *src, const int num,
static inline u64 read_bits_LE(const u8 *src, const int num_bits,
const size_t offset) {
if (num > 64) {
if (num_bits > 64) {
ERROR("Attempt to read an invalid number of bits");
}
@ -1719,10 +1731,10 @@ static inline u64 read_bits_LE(const u8 *src, const int num,
u64 res = 0;
int shift = 0;
int left = num;
int left = num_bits;
while (left > 0) {
u64 mask = left >= 8 ? 0xff : (((u64)1 << left) - 1);
// Dead the next byte, shift it to account for the offset, and then mask
// Read the next byte, shift it to account for the offset, and then mask
// out the top part if we don't need all the bits
res += (((u64)*src++ >> bit_offset) & mask) << shift;
shift += 8 - bit_offset;
@ -1761,7 +1773,7 @@ static inline u64 STREAM_read_bits(const u8 *const src, const int bits,
/******* BIT COUNTING OPERATIONS **********************************************/
/// Returns `x`, where `2^x` is the largest power of 2 less than or equal to
/// `num`, or `-1` if `num == 0`.
static inline int log2inf(const u64 num) {
static inline int highest_set_bit(const u64 num) {
for (int i = 63; i >= 0; i--) {
if (((u64)1 << i) <= num) {
return i;
@ -1813,17 +1825,18 @@ static size_t HUF_decompress_1stream(const HUF_dtable *const dtable,
// final-bit-flag. Consequently, a last byte of 0 is not possible. And the
// final-bit-flag itself is not part of the useful bitstream. Hence, the
// last byte contains between 0 and 7 useful bits."
const int padding = 8 - log2inf(src[len - 1]);
const int padding = 8 - highest_set_bit(src[len - 1]);
i64 offset = len * 8 - padding;
// Offset starts at the end because HUF streams are read backwards
i64 bit_offset = len * 8 - padding;
u16 state;
HUF_init_state(dtable, &state, src, &offset);
HUF_init_state(dtable, &state, src, &bit_offset);
size_t symbols_written = 0;
while (offset > -dtable->max_bits) {
while (bit_offset > -dtable->max_bits) {
// Iterate over the stream, decoding one symbol at a time
IO_write_byte(out, HUF_decode_symbol(dtable, &state, src, &offset));
IO_write_byte(out, HUF_decode_symbol(dtable, &state, src, &bit_offset));
symbols_written++;
}
// "The process continues up to reading the required number of symbols per
@ -1836,7 +1849,7 @@ static size_t HUF_decompress_1stream(const HUF_dtable *const dtable,
// before the start of `src`
// Therefore `offset`, the edge to start reading new bits at, should be
// dtable->max_bits before the start of the stream
if (offset != -dtable->max_bits) {
if (bit_offset != -dtable->max_bits) {
CORRUPTION();
}
@ -1960,7 +1973,7 @@ static void HUF_init_dtable_usingweights(HUF_dtable *const table,
}
// Find the first power of 2 larger than the sum
const int max_bits = log2inf(weight_sum) + 1;
const int max_bits = highest_set_bit(weight_sum) + 1;
const u64 left_over = ((u64)1 << max_bits) - weight_sum;
// If the left over isn't a power of 2, the weights are invalid
if (left_over & (left_over - 1)) {
@ -1969,7 +1982,7 @@ static void HUF_init_dtable_usingweights(HUF_dtable *const table,
// left_over is used to find the last weight as it's not transmitted
// by inverting 2^(weight - 1) we can determine the value of last_weight
const int last_weight = log2inf(left_over) + 1;
const int last_weight = highest_set_bit(left_over) + 1;
for (int i = 0; i < num_symbs; i++) {
// "Number_of_Bits = Number_of_Bits ? Max_Number_of_Bits + 1 - Weight : 0"
@ -2063,7 +2076,7 @@ static size_t FSE_decompress_interleaved2(const FSE_dtable *const dtable,
// final-bit-flag. Consequently, a last byte of 0 is not possible. And the
// final-bit-flag itself is not part of the useful bitstream. Hence, the
// last byte contains between 0 and 7 useful bits."
const int padding = 8 - log2inf(src[len - 1]);
const int padding = 8 - highest_set_bit(src[len - 1]);
i64 offset = len * 8 - padding;
u16 state1, state2;
@ -2184,7 +2197,7 @@ static void FSE_init_dtable(FSE_dtable *const dtable,
u16 next_state_desc = state_desc[symbol]++;
// Fills in the table appropriately, next_state_desc increases by symbol
// over time, decreasing number of bits
dtable->num_bits[i] = (u8)(accuracy_log - log2inf(next_state_desc));
dtable->num_bits[i] = (u8)(accuracy_log - highest_set_bit(next_state_desc));
// Baseline increases until the bit threshold is passed, at which point
// it resets to 0
dtable->new_state_base[i] =
@ -2235,7 +2248,7 @@ static void FSE_decode_header(FSE_dtable *const dtable, istream_t *const in,
int symb = 0;
while (remaining > 0 && symb < FSE_MAX_SYMBS) {
// Log of the number of possible values we could read
int bits = log2inf(remaining + 1) + 1;
int bits = highest_set_bit(remaining + 1) + 1;
u16 val = IO_read_bits(in, bits);

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

After

Width:  |  Height:  |  Size: 70 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.8 KiB

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

After

Width:  |  Height:  |  Size: 88 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

After

Width:  |  Height:  |  Size: 92 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 26 KiB

After

Width:  |  Height:  |  Size: 88 KiB

View File

@ -16,7 +16,8 @@ Distribution of this document is unlimited.
### Version
0.2.4 (17/02/17)
0.2.5 (31/03/17)
Introduction
------------
@ -109,7 +110,7 @@ The structure of a single Zstandard frame is following:
__`Magic_Number`__
4 Bytes, little-endian format.
4 Bytes, __little-endian__ format.
Value : 0xFD2FB528
__`Frame_Header`__
@ -127,7 +128,7 @@ An optional 32-bit checksum, only present if `Content_Checksum_flag` is set.
The content checksum is the result
of [xxh64() hash function](http://www.xxhash.org)
digesting the original (decoded) data as input, and a seed of zero.
The low 4 bytes of the checksum are stored in little endian format.
The low 4 bytes of the checksum are stored in __little-endian__ format.
### `Frame_Header`
@ -154,41 +155,42 @@ Decoding this byte is enough to tell the size of `Frame_Header`.
| 2 | `Content_Checksum_flag` |
| 1-0 | `Dictionary_ID_flag` |
In this table, bit 7 the is highest bit, while bit 0 the is lowest.
In this table, bit 7 is the highest bit, while bit 0 is the lowest one.
__`Frame_Content_Size_flag`__
This is a 2-bits flag (`= Frame_Header_Descriptor >> 6`),
specifying if decompressed data size is provided within the header.
The `Flag_Value` can be converted into `Field_Size`,
specifying if `Frame_Content_Size` (the decompressed data size)
is provided within the header.
`Flag_Value` provides `FCS_Field_Size`,
which is the number of bytes used by `Frame_Content_Size`
according to the following table:
|`Flag_Value`| 0 | 1 | 2 | 3 |
| ---------- | ------ | --- | --- | --- |
|`Field_Size`| 0 or 1 | 2 | 4 | 8 |
| `Flag_Value` | 0 | 1 | 2 | 3 |
| -------------- | ------ | --- | --- | --- |
|`FCS_Field_Size`| 0 or 1 | 2 | 4 | 8 |
When `Flag_Value` is `0`, `Field_Size` depends on `Single_Segment_flag` :
When `Flag_Value` is `0`, `FCS_Field_Size` depends on `Single_Segment_flag` :
if `Single_Segment_flag` is set, `Field_Size` is 1.
Otherwise, `Field_Size` is 0 (content size not provided).
Otherwise, `Field_Size` is 0 : `Frame_Content_Size` is not provided.
__`Single_Segment_flag`__
If this flag is set,
data must be regenerated within a single continuous memory segment.
In this case, `Frame_Content_Size` is necessarily present,
but `Window_Descriptor` byte is skipped.
In this case, `Window_Descriptor` byte is skipped,
but `Frame_Content_Size` is necessarily present.
As a consequence, the decoder must allocate a memory segment
of size equal or bigger than `Frame_Content_Size`.
In order to preserve the decoder from unreasonable memory requirements,
a decoder can reject a compressed frame
a decoder is allowed to reject a compressed frame
which requests a memory size beyond decoder's authorized range.
For broader compatibility, decoders are recommended to support
memory sizes of at least 8 MB.
This is just a recommendation,
This is only a recommendation,
each decoder is free to support higher or lower limits,
depending on local limitations.
@ -224,37 +226,38 @@ It also specifies the size of this field as `Field_Size`.
#### `Window_Descriptor`
Provides guarantees on maximum back-reference distance
that will be used within compressed data.
Provides guarantees on minimum memory buffer required to decompress a frame.
This information is important for decoders to allocate enough memory.
The `Window_Descriptor` byte is optional. It is absent when `Single_Segment_flag` is set.
In this case, the maximum back-reference distance is the content size itself,
which can be any value from 1 to 2^64-1 bytes (16 EB).
The `Window_Descriptor` byte is optional.
When `Single_Segment_flag` is set, `Window_Descriptor` is not present.
In this case, `Window_Size` is `Frame_Content_Size`,
which can be any value from 0 to 2^64-1 bytes (16 ExaBytes).
| Bit numbers | 7-3 | 2-0 |
| ----------- | ---------- | ---------- |
| Field name | `Exponent` | `Mantissa` |
Maximum distance is given by the following formulas :
The minimum memory buffer size is called `Window_Size`.
It is described by the following formulas :
```
windowLog = 10 + Exponent;
windowBase = 1 << windowLog;
windowAdd = (windowBase / 8) * Mantissa;
Window_Size = windowBase + windowAdd;
```
The minimum window size is 1 KB.
The maximum size is `15*(1<<38)` bytes, which is 1.875 TB.
The minimum `Window_Size` is 1 KB.
The maximum `Window_Size` is `(1<<41) + 7*(1<<38)` bytes, which is 3.75 TB.
To properly decode compressed data,
a decoder will need to allocate a buffer of at least `Window_Size` bytes.
In order to preserve decoder from unreasonable memory requirements,
a decoder can refuse a compressed frame
a decoder is allowed to reject a compressed frame
which requests a memory size beyond decoder's authorized range.
For improved interoperability,
decoders are recommended to be compatible with window sizes of 8 MB,
decoders are recommended to be compatible with `Window_Size >= 8 MB`,
and encoders are recommended to not request more than 8 MB.
It's merely a recommendation though,
decoders are free to support larger or lower limits,
@ -264,112 +267,118 @@ depending on local limitations.
This is a variable size field, which contains
the ID of the dictionary required to properly decode the frame.
Note that this field is optional. When it's not present,
`Dictionary_ID` field is optional. When it's not present,
it's up to the decoder to make sure it uses the correct dictionary.
Format is little-endian.
Field size depends on `Dictionary_ID_flag`.
1 byte can represent an ID 0-255.
2 bytes can represent an ID 0-65535.
4 bytes can represent an ID 0-4294967295.
Format is __little-endian__.
It's allowed to represent a small ID (for example `13`)
with a large 4-bytes dictionary ID, losing some compacity in the process.
with a large 4-bytes dictionary ID, even if it is less efficient.
_Reserved ranges :_
If the frame is going to be distributed in a private environment,
any dictionary ID can be used.
However, for public distribution of compressed frames using a dictionary,
the following ranges are reserved for future use and should not be used :
- low range : 1 - 32767
- high range : >= (2^31)
the following ranges are reserved and shall not be used :
- low range : `<= 32767`
- high range : `>= (1 << 31)`
#### `Frame_Content_Size`
This is the original (uncompressed) size. This information is optional.
The `Field_Size` is provided according to value of `Frame_Content_Size_flag`.
The `Field_Size` can be equal to 0 (not present), 1, 2, 4 or 8 bytes.
Format is little-endian.
`Frame_Content_Size` uses a variable number of bytes, provided by `FCS_Field_Size`.
`FCS_Field_Size` is provided by the value of `Frame_Content_Size_flag`.
`FCS_Field_Size` can be equal to 0 (not present), 1, 2, 4 or 8 bytes.
| `Field_Size` | Range |
| ------------ | ---------- |
| 1 | 0 - 255 |
| 2 | 256 - 65791|
| 4 | 0 - 2^32-1 |
| 8 | 0 - 2^64-1 |
| `FCS_Field_Size` | Range |
| ---------------- | ---------- |
| 0 | unknown |
| 1 | 0 - 255 |
| 2 | 256 - 65791|
| 4 | 0 - 2^32-1 |
| 8 | 0 - 2^64-1 |
When `Field_Size` is 1, 4 or 8 bytes, the value is read directly.
When `Field_Size` is 2, _the offset of 256 is added_.
`Frame_Content_Size` format is __little-endian__.
When `FCS_Field_Size` is 1, 4 or 8 bytes, the value is read directly.
When `FCS_Field_Size` is 2, _the offset of 256 is added_.
It's allowed to represent a small size (for example `18`) using any compatible variant.
Blocks
-------
After the magic number and header of each block,
there are some number of blocks.
Each frame must have at least one block but there is no upper limit
on the number of blocks per frame.
After `Magic_Number` and `Frame_Header`, there are some number of blocks.
Each frame must have at least one block,
but there is no upper limit on the number of blocks per frame.
The structure of a block is as follows:
| `Last_Block` | `Block_Type` | `Block_Size` | `Block_Content` |
|:------------:|:------------:|:------------:|:---------------:|
| 1 bit | 2 bits | 21 bits | n bytes |
| `Block_Header` | `Block_Content` |
|:--------------:|:---------------:|
| 3 bytes | n bytes |
The block header (`Last_Block`, `Block_Type`, and `Block_Size`) uses 3-bytes.
`Block_Header` uses 3 bytes, written using __little-endian__ convention.
It contains 3 fields :
| `Last_Block` | `Block_Type` | `Block_Size` |
|:------------:|:------------:|:------------:|
| bit 0 | bits 1-2 | bits 3-23 |
__`Last_Block`__
The lowest bit signals if this block is the last one.
The frame will end after this one.
The frame will end after this last block.
It may be followed by an optional `Content_Checksum`
(see [Zstandard Frames](#zstandard-frames)).
__`Block_Type` and `Block_Size`__
The next 2 bits represent the `Block_Type`,
while the remaining 21 bits represent the `Block_Size`.
Format is __little-endian__.
__`Block_Type`__
The next 2 bits represent the `Block_Type`.
There are 4 block types :
| Value | 0 | 1 | 2 | 3 |
| Value | 0 | 1 | 2 | 3 |
| ------------ | ----------- | ----------- | ------------------ | --------- |
| `Block_Type` | `Raw_Block` | `RLE_Block` | `Compressed_Block` | `Reserved`|
- `Raw_Block` - this is an uncompressed block.
`Block_Content` contains `Block_Size` bytes to read and copy
as decoded data.
`Block_Content` contains `Block_Size` bytes.
- `RLE_Block` - this is a single byte, repeated N times.
`Block_Content` consists of a single byte,
and `Block_Size` is the number of times this byte should be repeated.
- `RLE_Block` - this is a single byte, repeated `Block_Size` times.
`Block_Content` consists of a single byte.
On the decompression side, this byte must be repeated `Block_Size` times.
- `Compressed_Block` - this is a [Zstandard compressed block](#compressed-blocks),
explained later on.
`Block_Size` is the length of `Block_Content`, the compressed data.
The decompressed size is unknown,
The decompressed size is not known,
but its maximum possible value is guaranteed (see below)
- `Reserved` - this is not a block.
This value cannot be used with current version of this specification.
__`Block_Size`__
The upper 21 bits of `Block_Header` represent the `Block_Size`.
Block sizes must respect a few rules :
- In compressed mode, compressed size is always strictly less than decompressed size.
- Block decompressed size is always <= maximum back-reference distance.
- For `Compressed_Block`, `Block_Size` is always strictly less than decompressed size.
- Block decompressed size is always <= `Window_Size`
- Block decompressed size is always <= 128 KB.
A data block is not necessarily "full" :
since an arbitrary “flush” may happen anytime,
block decompressed content can be any size (even empty),
A block can contain any number of bytes (even empty),
up to `Block_Maximum_Decompressed_Size`, which is the smallest of :
- Maximum back-reference distance
- `Window_Size`
- 128 KB
Compressed Blocks
-----------------
To decompress a compressed block, the compressed size must be provided from
`Block_Size` field in the block header.
To decompress a compressed block, the compressed size must be provided
from `Block_Size` field within `Block_Header`.
A compressed block consists of 2 sections :
- [Literals Section](#literals-section)
@ -381,36 +390,34 @@ data in [Sequence Execution](#sequence-execution)
#### Prerequisites
To decode a compressed block, the following elements are necessary :
- Previous decoded data, up to a distance of `Window_Size`,
or all previous data when `Single_Segment_flag` is set.
- List of "recent offsets" from the previous compressed block.
- Decoding tables of the previous compressed block for each symbol type
or all previously decoded data when `Single_Segment_flag` is set.
- List of "recent offsets" from previous `Compressed_Block`.
- Decoding tables of previous `Compressed_Block` for each symbol type
(literals, literals lengths, match lengths, offsets).
Literals Section
----------------
During sequence execution, symbols from the literals section
During sequence phase, literals will be entangled with match copy operations.
All literals are regrouped in the first part of the block.
They can be decoded first, and then copied during sequence operations,
or they can be decoded on the flow, as needed by sequence commands.
| `Literals_Section_Header` | [`Huffman_Tree_Description`] | Stream1 | [Stream2] | [Stream3] | [Stream4] |
| ------------------------- | ---------------------------- | ------- | --------- | --------- | --------- |
They can be decoded first, and then copied during [Sequence Execution],
or they can be decoded on the flow during [Sequence Execution].
Literals can be stored uncompressed or compressed using Huffman prefix codes.
When compressed, an optional tree description can be present,
followed by 1 or 4 streams.
| `Literals_Section_Header` | [`Huffman_Tree_Description`] | Stream1 | [Stream2] | [Stream3] | [Stream4] |
| ------------------------- | ---------------------------- | ------- | --------- | --------- | --------- |
#### `Literals_Section_Header`
Header is in charge of describing how literals are packed.
It's a byte-aligned variable-size bitfield, ranging from 1 to 5 bytes,
using little-endian convention.
using __little-endian__ convention.
| `Literals_Block_Type` | `Size_Format` | `Regenerated_Size` | [`Compressed_Size`] |
| --------------------- | ------------- | ------------------ | ----------------- |
| 2 bits | 1 - 2 bits | 5 - 20 bits | 0 - 18 bits |
| --------------------- | ------------- | ------------------ | ------------------- |
| 2 bits | 1 - 2 bits | 5 - 20 bits | 0 - 18 bits |
In this representation, bits on the left are the lowest bits.
@ -418,33 +425,38 @@ __`Literals_Block_Type`__
This field uses 2 lowest bits of first byte, describing 4 different block types :
| `Literals_Block_Type` | Value |
| ----------------------------- | ----- |
| `Raw_Literals_Block` | 0 |
| `RLE_Literals_Block` | 1 |
| `Compressed_Literals_Block` | 2 |
| `Repeat_Stats_Literals_Block` | 3 |
| `Literals_Block_Type` | Value |
| --------------------------- | ----- |
| `Raw_Literals_Block` | 0 |
| `RLE_Literals_Block` | 1 |
| `Compressed_Literals_Block` | 2 |
| `Treeless_Literals_Block` | 3 |
- `Raw_Literals_Block` - Literals are stored uncompressed.
- `RLE_Literals_Block` - Literals consist of a single byte value repeated N times.
- `RLE_Literals_Block` - Literals consist of a single byte value
repeated `Regenerated_Size` times.
- `Compressed_Literals_Block` - This is a standard Huffman-compressed block,
starting with a Huffman tree description.
See details below.
- `Repeat_Stats_Literals_Block` - This is a Huffman-compressed block,
- `Treeless_Literals_Block` - This is a Huffman-compressed block,
using Huffman tree _from previous Huffman-compressed literals block_.
Huffman tree description will be skipped.
Note: If this mode is used without any previous Huffman-table in the frame
(or [dictionary](#dictionary-format)), this should be treated as corruption.
`Huffman_Tree_Description` will be skipped.
Note: If this mode is triggered without any previous Huffman-table in the frame
(or [dictionary](#dictionary-format)), this should be treated as data corruption.
__`Size_Format`__
`Size_Format` is divided into 2 families :
- For `Raw_Literals_Block` and `RLE_Literals_Block` it's enough to decode `Regenerated_Size`.
- For `Compressed_Block`, its required to decode both `Compressed_Size`
and `Regenerated_Size` (the decompressed size). It will also decode the number of streams.
- For `Raw_Literals_Block` and `RLE_Literals_Block`,
it's only necessary to decode `Regenerated_Size`.
There is no `Compressed_Size` field.
- For `Compressed_Block` and `Treeless_Literals_Block`,
it's required to decode both `Compressed_Size`
and `Regenerated_Size` (the decompressed size).
It's also necessary to decode the number of streams (1 or 4).
For values spanning several bytes, convention is little-endian.
For values spanning several bytes, convention is __little-endian__.
__`Size_Format` for `Raw_Literals_Block` and `RLE_Literals_Block`__ :
@ -463,9 +475,9 @@ __`Size_Format` for `Raw_Literals_Block` and `RLE_Literals_Block`__ :
Only Stream1 is present for these cases.
Note : it's allowed to represent a short value (for example `13`)
using a long format, accepting the increased compressed data size.
using a long format, even if it's less efficient.
__`Size_Format` for `Compressed_Literals_Block` and `Repeat_Stats_Literals_Block`__ :
__`Size_Format` for `Compressed_Literals_Block` and `Treeless_Literals_Block`__ :
- Value 00 : _A single stream_.
Both `Regenerated_Size` and `Compressed_Size` use 10 bits (0-1023).
@ -480,67 +492,68 @@ __`Size_Format` for `Compressed_Literals_Block` and `Repeat_Stats_Literals_Block
Both `Regenerated_Size` and `Compressed_Size` use 18 bits (0-262143).
`Literals_Section_Header` has 5 bytes.
Both `Compressed_Size` and `Regenerated_Size` fields follow little-endian convention.
Note: `Compressed_Size` __includes__ the size of the Huffman Tree description if it
is present.
Both `Compressed_Size` and `Regenerated_Size` fields follow __little-endian__ convention.
Note: `Compressed_Size` __includes__ the size of the Huffman Tree description
_when_ it is present.
### Raw Literals Block
The data in Stream1 is `Regenerated_Size` bytes long, and contains the raw literals data
to be used in sequence execution.
The data in Stream1 is `Regenerated_Size` bytes long,
it contains the raw literals data to be used during [Sequence Execution].
### RLE Literals Block
Stream1 consists of a single byte which should be repeated `Regenerated_Size` times
to generate the decoded literals.
### Compressed Literals Block and Repeat Stats Literals Block
Both of these modes contain Huffman encoded data
### Compressed Literals Block and Treeless Literals Block
Both of these modes contain Huffman encoded data.
`Treeless_Literals_Block` does not have a `Huffman_Tree_Description`.
#### `Huffman_Tree_Description`
This section is only present when `Literals_Block_Type` type is `Compressed_Literals_Block` (`2`).
The format of the Huffman tree description can be found at [Huffman Tree description](#huffman-tree-description).
The size Huffman Tree description will be determined during the decoding process,
and must be used to determine where the compressed Huffman streams begin.
The size of `Huffman_Tree_Description` is determined during decoding process,
it must be used to determine where streams begin.
`Total_Streams_Size = Compressed_Size - Huffman_Tree_Description_Size`.
If repeat stats mode is used, the Huffman table used in the previous compressed block will
be used to decompress this block as well.
For `Treeless_Literals_Block`,
the Huffman table comes from previously compressed literals block.
Huffman compressed data consists either 1 or 4 Huffman-coded streams.
Huffman compressed data consists of either 1 or 4 Huffman-coded streams.
If only one stream is present, it is a single bitstream occupying the entire
remaining portion of the literals block, encoded as described at
remaining portion of the literals block, encoded as described within
[Huffman-Coded Streams](#huffman-coded-streams).
If there are four streams, the literals section header only provides enough
information to know the regenerated and compressed sizes of all four streams combined.
The regenerated size of each stream is equal to `(totalSize+3)/4`, except for the last stream,
which may be up to 3 bytes smaller, to reach a total decompressed size match that described
in the literals header.
information to know the decompressed and compressed sizes of all four streams _combined_.
The decompressed size of each stream is equal to `(Regenerated_Size+3)/4`,
except for the last stream which may be up to 3 bytes smaller,
to reach a total decompressed size as specified in `Regenerated_Size`.
The compressed size of each stream is provided explicitly: the first 6 bytes of the compressed
data consist of three 2-byte little endian fields, describing the compressed sizes
of the first three streams.
The last streams size is computed from the total compressed size and the size of the other
three streams.
The compressed size of each stream is provided explicitly:
the first 6 bytes of the compressed data consist of three 2-byte __little-endian__ fields,
describing the compressed sizes of the first three streams.
`Stream4_Size` is computed from total `Total_Streams_Size` minus sizes of other streams.
`stream4CSize = totalCSize - 6 - stream1CSize - stream2CSize - stream3CSize`.
`Stream4_Size = Total_Streams_Size - 6 - Stream1_Size - Stream2_Size - Stream3_Size`.
Note: remember that totalCSize may be smaller than the `Compressed_Size` found in the literals
block header as `Compressed_Size` also contains the size of the Huffman Tree description if it
is present.
Note: remember that `Total_Streams_Size` can be smaller than `Compressed_Size` in header,
because `Compressed_Size` also contains `Huffman_Tree_Description_Size` when it is present.
Each of these 4 bitstreams is then decoded independently as a Huffman-Coded stream,
as described at [Huffman-Coded Streams](#huffman-coded-streams)
Sequences Section
-----------------
A compressed block is a succession of _sequences_ .
A sequence is a literal copy command, followed by a match copy command.
A literal copy command specifies a length.
It is the number of bytes to be copied (or extracted) from the literal section.
It is the number of bytes to be copied (or extracted) from the Literals Section.
A match copy command specifies an offset and a length.
When all _sequences_ are decoded,
if there is are any literals left in the _literal section_,
if there are literals left in the _literal section_,
these bytes are added at the end of the block.
This is described in more detail in [Sequence Execution](#sequence-execution)
@ -557,7 +570,7 @@ followed by the bitstream.
| -------------------------- | ------------------------- | ---------------- | ---------------------- | --------- |
To decode the `Sequences_Section`, it's required to know its size.
This size is deduced from `blockSize - literalSectionSize`.
This size is deduced from `Block_Size - Literals_Section_Size`.
#### `Sequences_Section_Header`
@ -572,7 +585,7 @@ This is a variable size field using between 1 and 3 bytes.
Let's call its first byte `byte0`.
- `if (byte0 == 0)` : there are no sequences.
The sequence section stops there.
Regenerated content is defined entirely by literals section.
Decompressed content is defined entirely as Literals Section content.
- `if (byte0 < 128)` : `Number_of_Sequences = byte0` . Uses 1 byte.
- `if (byte0 < 255)` : `Number_of_Sequences = ((byte0-128) << 8) + byte1` . Uses 2 bytes.
- `if (byte0 == 255)`: `Number_of_Sequences = byte1 + (byte2<<8) + 0x7F00` . Uses 3 bytes.
@ -581,14 +594,14 @@ __Symbol compression modes__
This is a single byte, defining the compression mode of each symbol type.
|Bit number| 7-6 | 5-4 | 3-2 | 1-0 |
|Bit number| 7-6 | 5-4 | 3-2 | 1-0 |
| -------- | ----------------------- | -------------- | -------------------- | ---------- |
|Field name| `Literals_Lengths_Mode` | `Offsets_Mode` | `Match_Lengths_Mode` | `Reserved` |
The last field, `Reserved`, must be all-zeroes.
`Literals_Lengths_Mode`, `Offsets_Mode` and `Match_Lengths_Mode` define the `Compression_Mode` of
literals lengths, offsets, and match lengths respectively.
literals lengths, offsets, and match lengths symbols respectively.
They follow the same enumeration :
@ -598,17 +611,17 @@ They follow the same enumeration :
- `Predefined_Mode` : A predefined FSE distribution table is used, defined in
[default distributions](#default-distributions).
The table takes no space in the compressed data.
No distribution table will be present.
- `RLE_Mode` : The table description consists of a single byte.
This code will be repeated for every sequence.
This code will be repeated for all sequences.
- `Repeat_Mode` : The table used in the previous compressed block will be used again.
No distribution table will be present.
Note: this includes RLE mode, so if repeat_mode follows rle_mode the same symbol will be repeated.
Note: this includes RLE mode, so if `Repeat_Mode` follows `RLE_Mode`, the same symbol will be repeated.
If this mode is used without any previous sequence table in the frame
(or [dictionary](#dictionary-format)) to repeat, this should be treated as corruption.
- `FSE_Compressed_Mode` : standard FSE compression.
A distribution table will be present.
The format of this distribution table is described in (FSE Table Description)[#fse-table-description].
The format of this distribution table is described in [FSE Table Description](#fse-table-description).
Note that the maximum allowed accuracy log for literals length and match length tables is 9,
and the maximum accuracy log for the offsets table is 8.
@ -625,7 +638,7 @@ Literals length codes are values ranging from `0` to `35` included.
They define lengths from 0 to 131071 bytes.
The literals length is equal to the decoded `Baseline` plus
the result of reading `Number_of_Bits` bits from the bitstream,
as a little-endian value.
as a __little-endian__ value.
| `Literals_Length_Code` | 0-15 |
| ---------------------- | ---------------------- |
@ -654,7 +667,7 @@ Match length codes are values ranging from `0` to `52` included.
They define lengths from 3 to 131074 bytes.
The match length is equal to the decoded `Baseline` plus
the result of reading `Number_of_Bits` bits from the bitstream,
as a little-endian value.
as a __little-endian__ value.
| `Match_Length_Code` | 0-31 |
| ------------------- | ----------------------- |
@ -685,7 +698,7 @@ Recommendation is to support at least up to `22`.
For information, at the time of this writing.
the reference decoder supports a maximum `N` value of `28` in 64-bits mode.
An offset code is also the number of additional bits to read in little-endian fashion,
An offset code is also the number of additional bits to read in __little-endian__ fashion,
and can be translated into an `Offset_Value` using the following formulas :
```
@ -720,8 +733,8 @@ begins.
FSE decoding requires a 'state' to be carried from symbol to symbol.
For more explanation on FSE decoding, see the [FSE section](#fse).
For sequence decoding, a separate state must be kept track of for each of
literal lengths, offsets, and match lengths.
For sequence decoding, a separate state keeps track of each
literal lengths, offsets, and match lengths symbols.
Some FSE primitives are also used.
For more details on the operation of these primitives, see the [FSE section](#fse).
@ -753,8 +766,7 @@ See the [description of the codes] for how to determine these values.
[description of the codes]: #the-codes-for-literals-lengths-match-lengths-and-offsets
Decoding starts by reading the `Number_of_Bits` required to decode `Offset`.
It then does the same for `Match_Length`,
and then for `Literals_Length`.
It then does the same for `Match_Length`, and then for `Literals_Length`.
This sequence is then used for [sequence execution](#sequence-execution).
If it is not the last sequence in the block,
@ -807,6 +819,7 @@ short offsetCodes_defaultDistribution[29] =
1, 1, 1, 1, 1, 1, 1, 1,-1,-1,-1,-1,-1 };
```
Sequence Execution
------------------
Once literals and sequences have been decoded,
@ -826,7 +839,8 @@ in this case.
The offset is defined as from the current position, so an offset of 6
and a match length of 3 means that 3 bytes should be copied from 6 bytes back.
Note that all offsets must be at most equal to the window size defined by the frame header.
Note that all offsets leading to previously decoded data
must be smaller than `Window_Size` defined in `Frame_Header_Descriptor`.
#### Repeat offsets
As seen in [Sequence Execution](#sequence-execution),
@ -842,11 +856,10 @@ so an `offset_value` of 1 means `Repeated_Offset2`,
an `offset_value` of 2 means `Repeated_Offset3`,
and an `offset_value` of 3 means `Repeated_Offset1 - 1_byte`.
In the first block, the offset history is populated with the following values : 1, 4 and 8 (in order).
For the first block, the starting offset history is populated with the following values : 1, 4 and 8 (in order).
Then each block gets its starting offset history from the ending values of the most recent compressed block.
Note that non-compressed blocks are skipped,
they do not contribute to offset history.
Then each block gets its starting offset history from the ending values of the most recent `Compressed_Block`.
Note that blocks which are not `Compressed_Block` are skipped, they do not contribute to offset history.
[Offset Codes]: #offset-codes
@ -859,6 +872,7 @@ This means that when `Repeated_Offset1` (most recent) is used, history is unmodi
When `Repeated_Offset2` is used, it's swapped with `Repeated_Offset1`.
If any other offset is used, it becomes `Repeated_Offset1` and the rest are shift back by one.
Skippable Frames
----------------
@ -878,7 +892,7 @@ Skippable frames defined in this specification are compatible with [LZ4] ones.
__`Magic_Number`__
4 Bytes, little-endian format.
4 Bytes, __little-endian__ format.
Value : 0x184D2A5?, which means any value from 0x184D2A50 to 0x184D2A5F.
All 16 values are valid to identify a skippable frame.
@ -886,13 +900,14 @@ __`Frame_Size`__
This is the size, in bytes, of the following `User_Data`
(without including the magic number nor the size field itself).
This field is represented using 4 Bytes, little-endian format, unsigned 32-bits.
This field is represented using 4 Bytes, __little-endian__ format, unsigned 32-bits.
This means `User_Data` cant be bigger than (2^32-1) bytes.
__`User_Data`__
The `User_Data` can be anything. Data will just be skipped by the decoder.
Entropy Encoding
----------------
Two types of entropy encoding are used by the Zstandard format:
@ -900,7 +915,7 @@ FSE, and Huffman coding.
FSE
---
FSE, or FiniteStateEntropy is an entropy coding based on [ANS].
FSE, short for Finite State Entropy, is an entropy codec based on [ANS].
FSE encoding/decoding involves a state that is carried over between symbols,
so decoding must be done in the opposite direction as encoding.
Therefore, all FSE bitstreams are read from end to beginning.
@ -909,15 +924,15 @@ For additional details on FSE, see [Finite State Entropy].
[Finite State Entropy]:https://github.com/Cyan4973/FiniteStateEntropy/
FSE decoding involves a decoding table which has a power of 2 size and three elements:
FSE decoding involves a decoding table which has a power of 2 size, and contain three elements:
`Symbol`, `Num_Bits`, and `Baseline`.
The `log2` of the table size is its `Accuracy_Log`.
The FSE state represents an index in this table.
The next symbol in the stream is the symbol indicated by the table value for that state.
To obtain the next state value,
the decoder should consume `Num_Bits` bits from the stream as a little endian value and add it to baseline.
To obtain the initial state value, consume `Accuracy_Log` bits from the stream as a little endian value.
To obtain the initial state value, consume `Accuracy_Log` bits from the stream as a __little-endian__ value.
The next symbol in the stream is the `Symbol` indicated in the table for that state.
To obtain the next state value,
the decoder should consume `Num_Bits` bits from the stream as a __little-endian__ value and add it to `Baseline`.
[ANS]: https://en.wikipedia.org/wiki/Asymmetric_Numeral_Systems
@ -929,7 +944,7 @@ An FSE distribution table describes the probabilities of all symbols
from `0` to the last present one (included)
on a normalized scale of `1 << Accuracy_Log` .
It's a bitstream which is read forward, in little-endian fashion.
It's a bitstream which is read forward, in __little-endian__ fashion.
It's not necessary to know its exact size,
since it will be discovered and reported by the decoding process.
@ -1064,7 +1079,7 @@ Huffman Coding
--------------
Zstandard Huffman-coded streams are read backwards,
similar to the FSE bitstreams.
Therefore, to find the start of the bitstream it is therefore necessary to
Therefore, to find the start of the bitstream, it is therefore to
know the offset of the last byte of the Huffman-coded stream.
After writing the last bit containing information, the compressor
@ -1077,7 +1092,7 @@ byte to read. The decompressor needs to skip 0-7 initial `0`-bits and
the first `1`-bit it occurs. Afterwards, the useful part of the bitstream
begins.
The bitstream contains Huffman-coded symbols in little-endian order,
The bitstream contains Huffman-coded symbols in __little-endian__ order,
with the codes defined by the method below.
### Huffman Tree Description
@ -1182,14 +1197,14 @@ The Huffman header compression uses 2 states,
which share the same FSE distribution table.
The first state (`State1`) encodes the even indexed symbols,
and the second (`State2`) encodes the odd indexes.
State1 is initialized first, and then State2, and they take turns decoding
a single symbol and updating their state.
`State1` is initialized first, and then `State2`, and they take turns
decoding a single symbol and updating their state.
For more details on these FSE operations, see the [FSE section](#fse).
The number of symbols to decode is determined
by tracking bitStream overflow condition:
If updating state after decoding a symbol would require more bits than
remain in the stream, it is assumed the extra bits are 0. Then,
remain in the stream, it is assumed that extra bits are 0. Then,
the symbols for each of the final states are decoded and the process is complete.
##### Conversion from weights to Huffman prefix codes
@ -1245,7 +1260,7 @@ it would be encoded as:
|Encoding|`0000`|`0001`|`01`|`1`| `10000` |
Starting from the end,
it's possible to read the bitstream in a little-endian fashion,
it's possible to read the bitstream in a __little-endian__ fashion,
keeping track of already used bits. Since the bitstream is encoded in reverse
order, by starting at the end the symbols can be read in forward order.
@ -1258,13 +1273,14 @@ If a bitstream is not entirely and exactly consumed,
hence reaching exactly its beginning position with _all_ bits consumed,
the decoding process is considered faulty.
Dictionary Format
-----------------
Zstandard is compatible with "raw content" dictionaries, free of any format restriction,
except that they must be at least 8 bytes.
These dictionaries function as if they were just the `Content` block of a formatted
dictionary.
Zstandard is compatible with "raw content" dictionaries,
free of any format restriction, except that they must be at least 8 bytes.
These dictionaries function as if they were just the `Content` part
of a formatted dictionary.
But dictionaries created by `zstd --train` follow a format, described here.
@ -1274,9 +1290,9 @@ __Pre-requisites__ : a dictionary has a size,
| `Magic_Number` | `Dictionary_ID` | `Entropy_Tables` | `Content` |
| -------------- | --------------- | ---------------- | --------- |
__`Magic_Number`__ : 4 bytes ID, value 0xEC30A437, little-endian format
__`Magic_Number`__ : 4 bytes ID, value 0xEC30A437, __little-endian__ format
__`Dictionary_ID`__ : 4 bytes, stored in little-endian format.
__`Dictionary_ID`__ : 4 bytes, stored in __little-endian__ format.
`Dictionary_ID` can be any value, except 0 (which means no `Dictionary_ID`).
It's used by decoders to check if they use the correct dictionary.
@ -1284,9 +1300,9 @@ _Reserved ranges :_
If the frame is going to be distributed in a private environment,
any `Dictionary_ID` can be used.
However, for public distribution of compressed frames,
the following ranges are reserved for future use and should not be used :
the following ranges are reserved and shall not be used :
- low range : 1 - 32767
- low range : <= 32767
- high range : >= (2^31)
__`Entropy_Tables`__ : following the same format as the tables in compressed blocks.
@ -1298,26 +1314,30 @@ __`Entropy_Tables`__ : following the same format as the tables in compressed blo
These tables populate the Repeat Stats literals mode and
Repeat distribution mode for sequence decoding.
It's finally followed by 3 offset values, populating recent offsets (instead of using `{1,4,8}`),
stored in order, 4-bytes little-endian each, for a total of 12 bytes.
stored in order, 4-bytes __little-endian__ each, for a total of 12 bytes.
Each recent offset must have a value < dictionary size.
__`Content`__ : The rest of the dictionary is its content.
The content act as a "past" in front of data to compress or decompress,
so it can be referenced in sequence commands.
As long as the amount of data decoded from this frame is less than or
equal to the window-size, sequence commands may specify offsets longer
than the lenght of total decoded output so far to reference back to the
dictionary. After the total output has surpassed the window size however,
equal to `Window_Size`, sequence commands may specify offsets longer
than the total length of decoded output so far to reference back to the
dictionary. After the total output has surpassed `Window_Size` however,
this is no longer allowed and the dictionary is no longer accessible.
[compressed blocks]: #the-format-of-compressed_block
Appendix A - Decoding tables for predefined codes
-------------------------------------------------
This appendix contains FSE decoding tables for the predefined literal length, match length, and offset
codes. The tables have been constructed using the algorithm as given above in the
"from normalized distribution to decoding tables" chapter. The tables here can be used as examples
to crosscheck that an implementation implements the decoding table generation algorithm correctly.
This appendix contains FSE decoding tables
for the predefined literal length, match length, and offset codes.
The tables have been constructed using the algorithm as given above in chapter
"from normalized distribution to decoding tables".
The tables here can be used as examples
to crosscheck that an implementation build its decoding tables correctly.
#### Literal Length Code:
@ -1496,6 +1516,7 @@ to crosscheck that an implementation implements the decoding table generation al
Version changes
---------------
- 0.2.5 : minor typos and clarifications
- 0.2.4 : section restructuring, by Sean Purcell
- 0.2.3 : clarified several details, by Sean Purcell
- 0.2.2 : added predefined codes, by Johannes Rudolph

View File

@ -1,10 +1,10 @@
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<title>zstd 1.1.4 Manual</title>
<title>zstd 1.3.0 Manual</title>
</head>
<body>
<h1>zstd 1.1.4 Manual</h1>
<h1>zstd 1.3.0 Manual</h1>
<hr>
<a name="Contents"></a><h2>Contents</h2>
<ol>
@ -19,8 +19,8 @@
<li><a href="#Chapter9">Streaming decompression - HowTo</a></li>
<li><a href="#Chapter10">START OF ADVANCED AND EXPERIMENTAL FUNCTIONS</a></li>
<li><a href="#Chapter11">Advanced types</a></li>
<li><a href="#Chapter12">Compressed size functions</a></li>
<li><a href="#Chapter13">Decompressed size functions</a></li>
<li><a href="#Chapter12">Frame size functions</a></li>
<li><a href="#Chapter13">Context memory usage</a></li>
<li><a href="#Chapter14">Advanced compression functions</a></li>
<li><a href="#Chapter15">Advanced decompression functions</a></li>
<li><a href="#Chapter16">Advanced streaming functions</a></li>
@ -55,48 +55,48 @@
<a name="Chapter3"></a><h2>Simple API</h2><pre></pre>
<pre><b>size_t ZSTD_compress( void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
int compressionLevel);
</b><p> Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
@return : compressed size written into `dst` (<= `dstCapacity),
or an error code if it fails (which can be tested using ZSTD_isError()).
const void* src, size_t srcSize,
int compressionLevel);
</b><p> Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
@return : compressed size written into `dst` (<= `dstCapacity),
or an error code if it fails (which can be tested using ZSTD_isError()).
</p></pre><BR>
<pre><b>size_t ZSTD_decompress( void* dst, size_t dstCapacity,
const void* src, size_t compressedSize);
</b><p> `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
`dstCapacity` is an upper bound of originalSize.
If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
@return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
or an errorCode if it fails (which can be tested using ZSTD_isError()).
const void* src, size_t compressedSize);
</b><p> `compressedSize` : must be the _exact_ size of some number of compressed and/or skippable frames.
`dstCapacity` is an upper bound of originalSize.
If user cannot imply a maximum upper bound, it's better to use streaming mode to decompress data.
@return : the number of bytes decompressed into `dst` (<= `dstCapacity`),
or an errorCode if it fails (which can be tested using ZSTD_isError()).
</p></pre><BR>
<pre><b>unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
</b><p> NOTE: This function is planned to be obsolete, in favour of ZSTD_getFrameContentSize.
ZSTD_getFrameContentSize functions the same way, returning the decompressed size of a single
frame, but distinguishes empty frames from frames with an unknown size, or errors.
</b><p> NOTE: This function is planned to be obsolete, in favour of ZSTD_getFrameContentSize.
ZSTD_getFrameContentSize functions the same way, returning the decompressed size of a single
frame, but distinguishes empty frames from frames with an unknown size, or errors.
Additionally, ZSTD_findDecompressedSize can be used instead. It can handle multiple
concatenated frames in one buffer, and so is more general.
As a result however, it requires more computation and entire frames to be passed to it,
as opposed to ZSTD_getFrameContentSize which requires only a single frame's header.
Additionally, ZSTD_findDecompressedSize can be used instead. It can handle multiple
concatenated frames in one buffer, and so is more general.
As a result however, it requires more computation and entire frames to be passed to it,
as opposed to ZSTD_getFrameContentSize which requires only a single frame's header.
'src' is the start of a zstd compressed frame.
@return : content size to be decompressed, as a 64-bits value _if known_, 0 otherwise.
note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
When `return==0`, data to decompress could be any size.
In which case, it's necessary to use streaming mode to decompress data.
Optionally, application can still use ZSTD_decompress() while relying on implied limits.
(For example, data may be necessarily cut into blocks <= 16 KB).
note 2 : decompressed size is always present when compression is done with ZSTD_compress()
note 3 : decompressed size can be very large (64-bits value),
potentially larger than what local system can handle as a single memory segment.
In which case, it's necessary to use streaming mode to decompress data.
note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
Always ensure result fits within application's authorized limits.
Each application can set its own limits.
note 5 : when `return==0`, if precise failure cause is needed, use ZSTD_getFrameParams() to know more.
'src' is the start of a zstd compressed frame.
@return : content size to be decompressed, as a 64-bits value _if known_, 0 otherwise.
note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
When `return==0`, data to decompress could be any size.
In which case, it's necessary to use streaming mode to decompress data.
Optionally, application can still use ZSTD_decompress() while relying on implied limits.
(For example, data may be necessarily cut into blocks <= 16 KB).
note 2 : decompressed size is always present when compression is done with ZSTD_compress()
note 3 : decompressed size can be very large (64-bits value),
potentially larger than what local system can handle as a single memory segment.
In which case, it's necessary to use streaming mode to decompress data.
note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
Always ensure result fits within application's authorized limits.
Each application can set its own limits.
note 5 : when `return==0`, if precise failure cause is needed, use ZSTD_getFrameParams() to know more.
</p></pre><BR>
<h3>Helper functions</h3><pre></pre><b><pre>int ZSTD_maxCLevel(void); </b>/*!< maximum compression level available */<b>
@ -106,42 +106,46 @@ const char* ZSTD_getErrorName(size_t code); </b>/*!< provides readable strin
</pre></b><BR>
<a name="Chapter4"></a><h2>Explicit memory management</h2><pre></pre>
<h3>Compression context</h3><pre> When compressing many times,
it is recommended to allocate a context just once, and re-use it for each successive compression operation.
This will make workload friendlier for system's memory.
Use one context per thread for parallel execution in multi-threaded environments.
<h3>Compression context</h3><pre> When compressing many times,
it is recommended to allocate a context just once, and re-use it for each successive compression operation.
This will make workload friendlier for system's memory.
Use one context per thread for parallel execution in multi-threaded environments.
</pre><b><pre>typedef struct ZSTD_CCtx_s ZSTD_CCtx;
ZSTD_CCtx* ZSTD_createCCtx(void);
size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx);
</pre></b><BR>
<pre><b>size_t ZSTD_compressCCtx(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel);
</b><p> Same as ZSTD_compress(), requires an allocated ZSTD_CCtx (see ZSTD_createCCtx()).
</b><p> Same as ZSTD_compress(), requires an allocated ZSTD_CCtx (see ZSTD_createCCtx()).
</p></pre><BR>
<h3>Decompression context</h3><pre></pre><b><pre>typedef struct ZSTD_DCtx_s ZSTD_DCtx;
<h3>Decompression context</h3><pre> When decompressing many times,
it is recommended to allocate a context just once, and re-use it for each successive compression operation.
This will make workload friendlier for system's memory.
Use one context per thread for parallel execution in multi-threaded environments.
</pre><b><pre>typedef struct ZSTD_DCtx_s ZSTD_DCtx;
ZSTD_DCtx* ZSTD_createDCtx(void);
size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
</pre></b><BR>
<pre><b>size_t ZSTD_decompressDCtx(ZSTD_DCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
</b><p> Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx (see ZSTD_createDCtx()).
</b><p> Same as ZSTD_decompress(), requires an allocated ZSTD_DCtx (see ZSTD_createDCtx()).
</p></pre><BR>
<a name="Chapter5"></a><h2>Simple dictionary API</h2><pre></pre>
<pre><b>size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize,
int compressionLevel);
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize,
int compressionLevel);
</b><p> Compression using a predefined Dictionary (see dictBuilder/zdict.h).
Note : This function loads the dictionary, resulting in significant startup delay.
Note : When `dict == NULL || dictSize < 8` no dictionary is used.
</p></pre><BR>
<pre><b>size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize);
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize);
</b><p> Decompression using a predefined Dictionary (see dictBuilder/zdict.h).
Dictionary must be identical to the one used during compression.
Note : This function loads the dictionary, resulting in significant startup delay.
@ -162,12 +166,13 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
</p></pre><BR>
<pre><b>size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict);
</b><p> Compression using a digested Dictionary.
Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
Note that compression level is decided during dictionary creation.
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict);
</b><p> Compression using a digested Dictionary.
Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
Note that compression level is decided during dictionary creation.
Frame parameters are hardcoded (dictID=yes, contentSize=yes, checksum=no)
</p></pre><BR>
<pre><b>ZSTD_DDict* ZSTD_createDDict(const void* dictBuffer, size_t dictSize);
@ -180,9 +185,9 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
</p></pre><BR>
<pre><b>size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_DDict* ddict);
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_DDict* ddict);
</b><p> Decompression using a digested Dictionary.
Faster startup than ZSTD_decompress_usingDict(), recommended when same dictionary is used multiple times.
</p></pre><BR>
@ -239,6 +244,16 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
<BR></pre>
<pre><b>typedef ZSTD_CCtx ZSTD_CStream; </b>/**< CCtx and CStream are effectively same object */<b>
</b></pre><BR>
<h3>ZSTD_CStream management functions</h3><pre></pre><b><pre>ZSTD_CStream* ZSTD_createCStream(void);
size_t ZSTD_freeCStream(ZSTD_CStream* zcs);
</pre></b><BR>
<h3>Streaming compression functions</h3><pre></pre><b><pre>size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output);
</pre></b><BR>
<pre><b>size_t ZSTD_CStreamInSize(void); </b>/**< recommended size for input buffer */<b>
</b></pre><BR>
<pre><b>size_t ZSTD_CStreamOutSize(void); </b>/**< recommended size for output buffer. Guarantee to successfully flush at least one complete compressed block in all circumstances. */<b>
@ -264,6 +279,12 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
<BR></pre>
<h3>ZSTD_DStream management functions</h3><pre></pre><b><pre>ZSTD_DStream* ZSTD_createDStream(void);
size_t ZSTD_freeDStream(ZSTD_DStream* zds);
</pre></b><BR>
<h3>Streaming decompression functions</h3><pre></pre><b><pre>size_t ZSTD_initDStream(ZSTD_DStream* zds);
size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
</pre></b><BR>
<pre><b>size_t ZSTD_DStreamInSize(void); </b>/*!< recommended size for input buffer */<b>
</b></pre><BR>
<pre><b>size_t ZSTD_DStreamOutSize(void); </b>/*!< recommended size for output buffer. Guarantee to successfully flush at least one complete block in all circumstances. */<b>
@ -300,70 +321,101 @@ size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx);
ZSTD_frameParameters fParams;
} ZSTD_parameters;
</b></pre><BR>
<pre><b>typedef struct {
unsigned long long frameContentSize;
unsigned windowSize;
unsigned dictID;
unsigned checksumFlag;
} ZSTD_frameHeader;
</b></pre><BR>
<h3>Custom memory allocation functions</h3><pre></pre><b><pre>typedef void* (*ZSTD_allocFunction) (void* opaque, size_t size);
typedef void (*ZSTD_freeFunction) (void* opaque, void* address);
typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; void* opaque; } ZSTD_customMem;
</pre></b><BR>
<a name="Chapter12"></a><h2>Compressed size functions</h2><pre></pre>
<a name="Chapter12"></a><h2>Frame size functions</h2><pre></pre>
<pre><b>size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize);
</b><p> `src` should point to the start of a ZSTD encoded frame or skippable frame
`srcSize` must be at least as large as the frame
@return : the compressed size of the frame pointed to by `src`, suitable to pass to
`ZSTD_decompress` or similar, or an error code if given invalid input.
@return : the compressed size of the frame pointed to by `src`,
suitable to pass to `ZSTD_decompress` or similar,
or an error code if given invalid input.
</p></pre><BR>
<a name="Chapter13"></a><h2>Decompressed size functions</h2><pre></pre>
<pre><b>unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
</b><p> `src` should point to the start of a ZSTD encoded frame
`srcSize` must be at least as large as the frame header. A value greater than or equal
to `ZSTD_frameHeaderSize_max` is guaranteed to be large enough in all cases.
@return : decompressed size of the frame pointed to be `src` if known, otherwise
- ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
- ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
<pre><b>#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1)
#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2)
unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize);
</b><p> `src` should point to the start of a ZSTD encoded frame.
`srcSize` must be at least as large as the frame header.
A value >= `ZSTD_frameHeaderSize_max` is guaranteed to be large enough.
@return : - decompressed size of the frame pointed to be `src` if known
- ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
- ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small)
</p></pre><BR>
<pre><b>unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize);
</b><p> `src` should point the start of a series of ZSTD encoded and/or skippable frames
`srcSize` must be the _exact_ size of this series
</b><p> `src` should point the start of a series of ZSTD encoded and/or skippable frames
`srcSize` must be the _exact_ size of this series
(i.e. there should be a frame boundary exactly `srcSize` bytes after `src`)
@return : the decompressed size of all data in the contained frames, as a 64-bit value _if known_
- if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
- if an error occurred: ZSTD_CONTENTSIZE_ERROR
@return : - decompressed size of all data in all successive frames
- if the decompressed size cannot be determined: ZSTD_CONTENTSIZE_UNKNOWN
- if an error occurred: ZSTD_CONTENTSIZE_ERROR
note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
In which case, it's necessary to use streaming mode to decompress data.
Optionally, application can still use ZSTD_decompress() while relying on implied limits.
(For example, data may be necessarily cut into blocks <= 16 KB).
note 2 : decompressed size is always present when compression is done with ZSTD_compress()
note 3 : decompressed size can be very large (64-bits value),
potentially larger than what local system can handle as a single memory segment.
In which case, it's necessary to use streaming mode to decompress data.
note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
Always ensure result fits within application's authorized limits.
Each application can set its own limits.
note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
read each contained frame header. This is efficient as most of the data is skipped,
however it does mean that all frame data must be present and valid.
note 1 : decompressed size is an optional field, that may not be present, especially in streaming mode.
When `return==ZSTD_CONTENTSIZE_UNKNOWN`, data to decompress could be any size.
In which case, it's necessary to use streaming mode to decompress data.
Optionally, application can still use ZSTD_decompress() while relying on implied limits.
(For example, data may be necessarily cut into blocks <= 16 KB).
note 2 : decompressed size is always present when compression is done with ZSTD_compress()
note 3 : decompressed size can be very large (64-bits value),
potentially larger than what local system can handle as a single memory segment.
In which case, it's necessary to use streaming mode to decompress data.
note 4 : If source is untrusted, decompressed size could be wrong or intentionally modified.
Always ensure result fits within application's authorized limits.
Each application can set its own limits.
note 5 : ZSTD_findDecompressedSize handles multiple frames, and so it must traverse the input to
read each contained frame header. This is efficient as most of the data is skipped,
however it does mean that all frame data must be present and valid.
</p></pre><BR>
<a name="Chapter13"></a><h2>Context memory usage</h2><pre></pre>
<pre><b>size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
</b><p> These functions give the current memory usage of selected object.
Object memory usage can evolve if it's re-used multiple times.
</p></pre><BR>
<pre><b>size_t ZSTD_estimateCCtxSize(ZSTD_compressionParameters cParams);
size_t ZSTD_estimateDCtxSize(void);
</b><p> These functions make it possible to estimate memory usage
of a future target object, before its allocation,
given a set of parameters, which vary depending on target object.
The objective is to guide decision before allocation.
</p></pre><BR>
<pre><b>size_t ZSTD_estimateCStreamSize(ZSTD_compressionParameters cParams);
size_t ZSTD_estimateDStreamSize(ZSTD_frameHeader fHeader);
</b><p> Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
an internal ?Dict will be created, which size is not estimated.
In this case, get additional size by using ZSTD_estimate?DictSize
</p></pre><BR>
<pre><b>size_t ZSTD_estimateCDictSize(ZSTD_compressionParameters cParams, size_t dictSize);
size_t ZSTD_estimateDDictSize(size_t dictSize);
</b><p> Note : if dictionary is created "byReference", reduce estimation by dictSize
</p></pre><BR>
<a name="Chapter14"></a><h2>Advanced compression functions</h2><pre></pre>
<pre><b>size_t ZSTD_estimateCCtxSize(ZSTD_compressionParameters cParams);
</b><p> Gives the amount of memory allocated for a ZSTD_CCtx given a set of compression parameters.
`frameContentSize` is an optional parameter, provide `0` if unknown
</p></pre><BR>
<pre><b>ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem);
</b><p> Create a ZSTD compression context using external alloc and free functions
</p></pre><BR>
<pre><b>size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx);
</b><p> Gives the amount of memory used by a given ZSTD_CCtx
</p></pre><BR>
<pre><b>typedef enum {
ZSTD_p_forceWindow, </b>/* Force back-references to remain < windowSize, even when referencing Dictionary content (default:0) */<b>
ZSTD_p_forceRawDict </b>/* Force loading dictionary in "content-only" mode (no header analysis) */<b>
@ -381,14 +433,10 @@ typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; v
</p></pre><BR>
<pre><b>ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, unsigned byReference,
ZSTD_parameters params, ZSTD_customMem customMem);
ZSTD_compressionParameters cParams, ZSTD_customMem customMem);
</b><p> Create a ZSTD_CDict using external alloc and free, and customized compression parameters
</p></pre><BR>
<pre><b>size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict);
</b><p> Gives the amount of memory used by a given ZSTD_sizeof_CDict
</p></pre><BR>
<pre><b>ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long estimatedSrcSize, size_t dictSize);
</b><p> @return ZSTD_compressionParameters structure for a selected compression level and estimated srcSize.
`estimatedSrcSize` value is optional, select 0 if not known
@ -408,12 +456,19 @@ typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; v
both values are optional, select `0` if unknown.
</p></pre><BR>
<pre><b>size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize,
ZSTD_parameters params);
</b><p> Same as ZSTD_compress_usingDict(), with fine-tune control of each compression parameter
<pre><b>size_t ZSTD_compress_advanced (ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const void* dict,size_t dictSize,
ZSTD_parameters params);
</b><p> Same as ZSTD_compress_usingDict(), with fine-tune control over each compression parameter
</p></pre><BR>
<pre><b>size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
void* dst, size_t dstCapacity,
const void* src, size_t srcSize,
const ZSTD_CDict* cdict, ZSTD_frameParameters fParams);
</b><p> Same as ZSTD_compress_usingCDict(), with fine-tune control over frame parameters
</p></pre><BR>
<a name="Chapter15"></a><h2>Advanced decompression functions</h2><pre></pre>
@ -425,26 +480,19 @@ typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; v
Note 3 : Skippable Frame Identifiers are considered valid.
</p></pre><BR>
<pre><b>size_t ZSTD_estimateDCtxSize(void);
</b><p> Gives the potential amount of memory allocated to create a ZSTD_DCtx
</p></pre><BR>
<pre><b>ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem);
</b><p> Create a ZSTD decompression context using external alloc and free functions
</p></pre><BR>
<pre><b>size_t ZSTD_sizeof_DCtx(const ZSTD_DCtx* dctx);
</b><p> Gives the amount of memory used by a given ZSTD_DCtx
</p></pre><BR>
<pre><b>ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize);
</b><p> Create a digested dictionary, ready to start decompression operation without startup delay.
Dictionary content is simply referenced, and therefore stays in dictBuffer.
It is important that dictBuffer outlives DDict, it must remain read accessible throughout the lifetime of DDict
</p></pre><BR>
<pre><b>size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
</b><p> Gives the amount of memory used by a given ZSTD_DDict
<pre><b>ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
unsigned byReference, ZSTD_customMem customMem);
</b><p> Create a ZSTD_DDict using external alloc and free, optionally by reference
</p></pre><BR>
<pre><b>unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize);
@ -468,27 +516,34 @@ typedef struct { ZSTD_allocFunction customAlloc; ZSTD_freeFunction customFree; v
Note : this use case also happens when using a non-conformant dictionary.
- `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
- This is not a Zstandard frame.
When identifying the exact failure cause, it's possible to used ZSTD_getFrameParams(), which will provide a more precise error code.
When identifying the exact failure cause, it's possible to use ZSTD_getFrameParams(), which will provide a more precise error code.
</p></pre><BR>
<a name="Chapter16"></a><h2>Advanced streaming functions</h2><pre></pre>
<h3>Advanced Streaming compression functions</h3><pre></pre><b><pre>ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem);
size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize); </b>/**< pledgedSrcSize must be correct, a size of 0 means unknown. for a frame size of 0 use initCStream_advanced */<b>
size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); </b>/**< note: a dict will not be used if dict == NULL or dictSize < 8 */<b>
size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel); </b>/**< note: a dict will not be used if dict == NULL or dictSize < 8. This result in the creation of an internal CDict */<b>
size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs, const void* dict, size_t dictSize,
ZSTD_parameters params, unsigned long long pledgedSrcSize); </b>/**< pledgedSrcSize is optional and can be 0 (meaning unknown). note: if the contentSizeFlag is set, pledgedSrcSize == 0 means the source size is actually 0 */<b>
size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict); </b>/**< note : cdict will just be referenced, and must outlive compression session */<b>
size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize); </b>/**< re-use compression parameters from previous init; skip dictionary loading stage; zcs must be init at least once before. note: pledgedSrcSize must be correct, a size of 0 means unknown. for a frame size of 0 use initCStream_advanced */<b>
size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs);
size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs, const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize, ZSTD_frameParameters fParams); </b>/**< same as ZSTD_initCStream_usingCDict(), with control over frame parameters */<b>
</pre></b><BR>
<pre><b>size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
</b><p> start a new compression job, using same parameters from previous job.
This is typically useful to skip dictionary loading stage, since it will re-use it in-place..
Note that zcs must be init at least once before using ZSTD_resetCStream().
pledgedSrcSize==0 means "srcSize unknown".
If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.
@return : 0, or an error code (which can be tested using ZSTD_isError())
</p></pre><BR>
<h3>Advanced Streaming decompression functions</h3><pre></pre><b><pre>typedef enum { DStream_p_maxWindowSize } ZSTD_DStreamParameter_e;
ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem);
size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); </b>/**< note: a dict will not be used if dict == NULL or dictSize < 8 */<b>
size_t ZSTD_setDStreamParameter(ZSTD_DStream* zds, ZSTD_DStreamParameter_e paramType, unsigned paramValue);
size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize); </b>/**< note: a dict will not be used if dict == NULL or dictSize < 8 */<b>
size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict); </b>/**< note : ddict will just be referenced, and must outlive decompression session */<b>
size_t ZSTD_resetDStream(ZSTD_DStream* zds); </b>/**< re-use decompression parameters from previous init; saves dictionary loading */<b>
size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
</pre></b><BR>
<a name="Chapter17"></a><h2>Buffer-less and synchronous inner streaming functions</h2><pre>
This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
@ -529,10 +584,9 @@ size_t ZSTD_sizeof_DStream(const ZSTD_DStream* zds);
<h3>Buffer-less streaming compression functions</h3><pre></pre><b><pre>size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); </b>/**< pledgedSrcSize is optional and can be 0 (meaning unknown). note: if the contentSizeFlag is set, pledgedSrcSize == 0 means the source size is actually 0 */<b>
size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); </b>/**< note: fails if cdict==NULL */<b>
size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); </b>/* compression parameters are already set within cdict. pledgedSrcSize=0 means null-size */<b>
size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); </b>/**< note: if pledgedSrcSize can be 0, indicating unknown size. if it is non-zero, it must be accurate. for 0 size frames, use compressBegin_advanced */<b>
size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, unsigned long long pledgedSrcSize); </b>/**< note: if pledgedSrcSize can be 0, indicating unknown size. if it is non-zero, it must be accurate. for 0 size frames, use compressBegin_advanced */<b>
size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
</pre></b><BR>
<a name="Chapter19"></a><h2>Buffer-less streaming decompression (synchronous mode)</h2><pre>
A ZSTD_DCtx object is required to track streaming operations.
@ -592,14 +646,7 @@ size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const vo
It also returns Frame Size as fparamsPtr->frameContentSize.
<BR></pre>
<pre><b>typedef struct {
unsigned long long frameContentSize;
unsigned windowSize;
unsigned dictID;
unsigned checksumFlag;
} ZSTD_frameParams;
</b></pre><BR>
<h3>Buffer-less streaming decompression functions</h3><pre></pre><b><pre>size_t ZSTD_getFrameParams(ZSTD_frameParams* fparamsPtr, const void* src, size_t srcSize); </b>/**< doesn't consume input, see details below */<b>
<h3>Buffer-less streaming decompression functions</h3><pre></pre><b><pre>size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); </b>/**< doesn't consume input, see details below */<b>
size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
@ -617,19 +664,20 @@ ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
- Compressing and decompressing require a context structure
+ Use ZSTD_createCCtx() and ZSTD_createDCtx()
- It is necessary to init context before starting
+ compression : ZSTD_compressBegin()
+ decompression : ZSTD_decompressBegin()
+ variants _usingDict() are also allowed
+ copyCCtx() and copyDCtx() work too
- Block size is limited, it must be <= ZSTD_getBlockSizeMax()
+ If you need to compress more, cut data into multiple blocks
+ Consider using the regular ZSTD_compress() instead, as frame metadata costs become negligible when source size is large.
+ compression : any ZSTD_compressBegin*() variant, including with dictionary
+ decompression : any ZSTD_decompressBegin*() variant, including with dictionary
+ copyCCtx() and copyDCtx() can be used too
- Block size is limited, it must be <= ZSTD_getBlockSizeMax() <= ZSTD_BLOCKSIZE_ABSOLUTEMAX
+ If input is larger than a block size, it's necessary to split input data into multiple blocks
+ For inputs larger than a single block size, consider using the regular ZSTD_compress() instead.
Frame metadata is not that costly, and quickly becomes negligible as source size grows larger.
- When a block is considered not compressible enough, ZSTD_compressBlock() result will be zero.
In which case, nothing is produced into `dst`.
+ User must test for such outcome and deal directly with uncompressed data
+ ZSTD_decompressBlock() doesn't accept uncompressed data as input !!!
+ In case of multiple successive blocks, decoder must be informed of uncompressed block existence to follow proper history.
Use ZSTD_insertBlock() in such a case.
+ In case of multiple successive blocks, should some of them be uncompressed,
decoder must be informed of their existence in order to follow proper history.
Use ZSTD_insertBlock() for such a case.
<BR></pre>
<h3>Raw zstd block functions</h3><pre></pre><b><pre>size_t ZSTD_getBlockSizeMax(ZSTD_CCtx* cctx);

View File

@ -116,7 +116,6 @@ static char* createOutFilename_orDie(const char* filename)
int main(int argc, const char** argv)
{
const char* const exeName = argv[0];
const char* const inFilename = argv[1];
if (argc!=2) {
printf("wrong arguments\n");
@ -125,6 +124,8 @@ int main(int argc, const char** argv)
return 1;
}
const char* const inFilename = argv[1];
char* const outFilename = createOutFilename_orDie(inFilename);
compress_orDie(inFilename, outFilename);
free(outFilename);

View File

@ -112,7 +112,6 @@ static const char* createOutFilename_orDie(const char* filename)
int main(int argc, const char** argv)
{
const char* const exeName = argv[0];
const char* const inFilename = argv[1];
if (argc!=2) {
printf("wrong arguments\n");
@ -121,6 +120,8 @@ int main(int argc, const char** argv)
return 1;
}
const char* const inFilename = argv[1];
const char* const outFilename = createOutFilename_orDie(inFilename);
compressFile_orDie(inFilename, outFilename, 1);

View File

@ -99,7 +99,6 @@ static void decompressFile_orDie(const char* fname)
int main(int argc, const char** argv)
{
const char* const exeName = argv[0];
const char* const inFilename = argv[1];
if (argc!=2) {
fprintf(stderr, "wrong arguments\n");
@ -108,6 +107,8 @@ int main(int argc, const char** argv)
return 1;
}
const char* const inFilename = argv[1];
decompressFile_orDie(inFilename);
return 0;
}

Some files were not shown because too many files have changed in this diff Show More