diff --git a/.circleci/config.yml b/.circleci/config.yml
index bcf2e1d5ea2..9738c1bd45b 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -143,8 +143,9 @@ workflows:
filters:
branches:
only:
- - master
+ - release
- dev
+ - master
jobs:
# Run daily long regression tests
- regression-test
diff --git a/.github/workflows/generic-dev.yml b/.github/workflows/generic-dev.yml
index bb88de57ca5..8882fcc0b6a 100644
--- a/.github/workflows/generic-dev.yml
+++ b/.github/workflows/generic-dev.yml
@@ -2,14 +2,13 @@ name: generic-dev
on:
pull_request:
- branches: [ dev, master, actionsTest ]
+ branches: [ dev, release, actionsTest ]
jobs:
# Dev PR jobs that still have to be migrated from travis
#
-# icc (need self-hosted)
-# versionTag
+# versionTag (only on release tags)
# valgrindTest (keeps failing for some reason. need investigation)
# staticAnalyze (need trusty so need self-hosted)
# pcc-fuzz: (need trusty so need self-hosted)
@@ -19,7 +18,7 @@ jobs:
# I need admins permissions to the repo for that it looks like
# So I'm tabling that for now
#
-# The master branch exclusive jobs will be in a separate
+# The release branch exclusive jobs will be in a separate
# workflow file (the osx tests and meson build that is)
benchmarking:
@@ -36,6 +35,15 @@ jobs:
- name: make test
run: make test
+ check-32bit: # designed to catch https://github.com/facebook/zstd/issues/2428
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - name: make check on 32-bit
+ run: |
+ make libc6install
+ CFLAGS="-m32 -O1 -fstack-protector" make check V=1
+
gcc-6-7-libzstd:
runs-on: ubuntu-latest
steps:
@@ -50,6 +58,10 @@ jobs:
LDFLAGS=-Wl,--no-undefined make -C lib libzstd-mt
make -C tests zbufftest-dll
+ # candidate test (to check) : underlink test
+ # LDFLAGS=-Wl,--no-undefined : will make the linker fail if dll is underlinked
+ # zbufftest-dll : test that a user program can link to multi-threaded libzstd without specifying -pthread
+
gcc-8-asan-ubsan-testzstd:
runs-on: ubuntu-16.04 # fails on 18.04
steps:
@@ -84,6 +96,10 @@ jobs:
sudo apt-get install clang-3.8
CC=clang-3.8 make clean msan-test-zstd HAVE_ZLIB=0 HAVE_LZ4=0 HAVE_LZMA=0
+ # Note : external libraries must be turned off when using MSAN tests,
+ # because they are not msan-instrumented,
+ # so any data coming from these libraries is always considered "uninitialized"
+
cmake-build-and-test-check:
runs-on: ubuntu-latest
steps:
@@ -147,7 +163,7 @@ jobs:
make clean
make c99build
make clean
- make travis-install
+ make travis-install # just ensures `make install` works
mingw-cross-compilation:
runs-on: ubuntu-latest
diff --git a/.github/workflows/generic-release.yml b/.github/workflows/generic-release.yml
index de4a1cb9145..167ba38487f 100644
--- a/.github/workflows/generic-release.yml
+++ b/.github/workflows/generic-release.yml
@@ -2,10 +2,10 @@ name: generic-release
on:
pull_request:
- # This will eventually only be for pushes to master
+ # This will eventually only be for pushes to release
# but for dogfooding purposes, I'm running it even
# on dev pushes
- branches: [ dev, master, actionsTest ]
+ branches: [ dev, release, actionsTest ]
jobs:
# missing jobs
@@ -44,6 +44,7 @@ jobs:
sudo apt-get install clang-3.8
CC=clang-3.8 make tsan-test-zstream
CC=clang-3.8 make tsan-fuzztest
+
zlib-wrapper:
runs-on: ubuntu-16.04
steps:
diff --git a/.github/workflows/linux-kernel.yml b/.github/workflows/linux-kernel.yml
index 35871ff0850..124f7777828 100644
--- a/.github/workflows/linux-kernel.yml
+++ b/.github/workflows/linux-kernel.yml
@@ -2,7 +2,7 @@ name: linux-kernel
on:
pull_request:
- branches: [ dev, master, actionsTest ]
+ branches: [ dev, release, actionsTest ]
jobs:
test:
diff --git a/.travis.yml b/.travis.yml
index 226d4c0154e..4757c6b02ae 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -8,6 +8,7 @@ git:
branches:
only:
- dev
+ - release
- master
- travisTest
@@ -31,49 +32,11 @@ matrix:
script:
- make check
- - name: make benchmarking
- script:
- - make benchmarking
-
- name: make test (complete)
script:
# DEVNULLRIGHTS : will request sudo rights to test permissions on /dev/null
- DEVNULLRIGHTS=test make test
- - name: gcc-6 + gcc-7 + libzstdmt compilation # ~ 6mn
- script:
- - make gcc6install gcc7install
- - CC=gcc-6 CFLAGS=-Werror make -j all
- - make clean
- - CC=gcc-7 CFLAGS=-Werror make -j all
- - make clean
- - LDFLAGS=-Wl,--no-undefined make -C lib libzstd-mt
- - make -C tests zbufftest-dll
- # LDFLAGS=-Wl,--no-undefined : will make the linker fail if dll is underlinked
- # zbufftest-dll : test that a user program can link to multi-threaded libzstd without specifying -pthread
-
- - name: gcc-8 + ASan + UBSan + Test Zstd # ~6.5mn
- script:
- - make gcc8install
- - CC=gcc-8 CFLAGS="-Werror" make -j all
- - make clean
- - CC=gcc-8 make -j uasan-test-zstd zstd-src.tar.zst.sha256.sig &&
diff --git a/contrib/seekable_format/zstd_seekable_compression_format.md b/contrib/seekable_format/zstd_seekable_compression_format.md
index bf3080f7bbe..55aebfd2e9d 100644
--- a/contrib/seekable_format/zstd_seekable_compression_format.md
+++ b/contrib/seekable_format/zstd_seekable_compression_format.md
@@ -53,7 +53,7 @@ __`Frame_Size`__
The total size of the skippable frame, not including the `Skippable_Magic_Number` or `Frame_Size`.
This is for compatibility with [Zstandard skippable frames].
-[Zstandard skippable frames]: https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#skippable-frames
+[Zstandard skippable frames]: https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#skippable-frames
#### `Seek_Table_Footer`
The seek table footer format is as follows:
diff --git a/doc/zstd_manual.html b/doc/zstd_manual.html
index cb5ded0703a..315488844ba 100644
--- a/doc/zstd_manual.html
+++ b/doc/zstd_manual.html
@@ -1,10 +1,10 @@
-zstd 1.4.7 Manual
+zstd 1.4.8 Manual
-zstd 1.4.7 Manual
+zstd 1.4.8 Manual
Contents
diff --git a/lib/common/fse.h b/lib/common/fse.h
index 83a07847aaa..dd5fc44e809 100644
--- a/lib/common/fse.h
+++ b/lib/common/fse.h
@@ -335,9 +335,10 @@ size_t FSE_buildCTable_rle (FSE_CTable* ct, unsigned char symbolValue);
/* FSE_buildCTable_wksp() :
* Same as FSE_buildCTable(), but using an externally allocated scratch buffer (`workSpace`).
- * `wkspSize` must be >= `FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog)`.
+ * `wkspSize` must be >= `FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog)` of `unsigned`.
*/
-#define FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) (sizeof(unsigned) * (maxSymbolValue + 2) + (1ull << tableLog))
+#define FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog) (maxSymbolValue + 2 + (1ull << (tableLog - 2)))
+#define FSE_BUILD_CTABLE_WORKSPACE_SIZE(maxSymbolValue, tableLog) (sizeof(unsigned) * FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(maxSymbolValue, tableLog))
size_t FSE_buildCTable_wksp(FSE_CTable* ct, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize);
#define FSE_BUILD_DTABLE_WKSP_SIZE(maxTableLog, maxSymbolValue) (sizeof(short) * (maxSymbolValue + 1) + (1ULL << maxTableLog) + 8)
diff --git a/lib/compress/huf_compress.c b/lib/compress/huf_compress.c
index abbcc319224..302e08864da 100644
--- a/lib/compress/huf_compress.c
+++ b/lib/compress/huf_compress.c
@@ -69,7 +69,7 @@ static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weight
U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
- BYTE scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)];
+ U32 scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)];
unsigned count[HUF_TABLELOG_MAX+1];
S16 norm[HUF_TABLELOG_MAX+1];
@@ -736,25 +736,26 @@ typedef struct {
} HUF_compress_tables_t;
/* HUF_compress_internal() :
- * `workSpace` must a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
+ * `workSpace_align4` must be aligned on 4-bytes boundaries,
+ * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U32 unsigned */
static size_t
HUF_compress_internal (void* dst, size_t dstSize,
const void* src, size_t srcSize,
unsigned maxSymbolValue, unsigned huffLog,
HUF_nbStreams_e nbStreams,
- void* workSpace, size_t wkspSize,
+ void* workSpace_align4, size_t wkspSize,
HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
const int bmi2)
{
- HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace;
+ HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace_align4;
BYTE* const ostart = (BYTE*)dst;
BYTE* const oend = ostart + dstSize;
BYTE* op = ostart;
HUF_STATIC_ASSERT(sizeof(*table) <= HUF_WORKSPACE_SIZE);
+ assert(((size_t)workSpace_align4 & 3) == 0); /* must be aligned on 4-bytes boundaries */
/* checks & inits */
- if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall);
if (!srcSize) return 0; /* Uncompressed */
if (!dstSize) return 0; /* cannot fit anything within dst budget */
@@ -772,7 +773,7 @@ HUF_compress_internal (void* dst, size_t dstSize,
}
/* Scan input and build symbol stats */
- { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace, wkspSize) );
+ { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace_align4, wkspSize) );
if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */
}
diff --git a/lib/compress/zstd_compress.c b/lib/compress/zstd_compress.c
index eb7780cf170..386b051df63 100644
--- a/lib/compress/zstd_compress.c
+++ b/lib/compress/zstd_compress.c
@@ -3258,7 +3258,7 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
/* Dictionary format :
* See :
- * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
+ * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#dictionary-format
*/
/*! ZSTD_loadZstdDictionary() :
* @return : dictID, or an error code
diff --git a/lib/decompress/zstd_decompress_block.c b/lib/decompress/zstd_decompress_block.c
index bec82e85709..19cbdc5c16e 100644
--- a/lib/decompress/zstd_decompress_block.c
+++ b/lib/decompress/zstd_decompress_block.c
@@ -236,7 +236,7 @@ size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
/* Default FSE distribution tables.
* These are pre-calculated FSE decoding tables using default distributions as defined in specification :
- * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#default-distributions
+ * https://github.com/facebook/zstd/blob/release/doc/zstd_compression_format.md#default-distributions
* They were generated programmatically with following method :
* - start from default distributions, present in /lib/common/zstd_internal.h
* - generate tables normally, using ZSTD_buildFSETable()
diff --git a/lib/zstd.h b/lib/zstd.h
index 06e07f7c34d..b0ecdf55385 100644
--- a/lib/zstd.h
+++ b/lib/zstd.h
@@ -72,7 +72,7 @@ extern "C" {
/*------ Version ------*/
#define ZSTD_VERSION_MAJOR 1
#define ZSTD_VERSION_MINOR 4
-#define ZSTD_VERSION_RELEASE 7
+#define ZSTD_VERSION_RELEASE 8
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
/*! ZSTD_versionNumber() :
diff --git a/programs/zstd.1 b/programs/zstd.1
index 0335b175cee..a8fc277672b 100644
--- a/programs/zstd.1
+++ b/programs/zstd.1
@@ -1,5 +1,5 @@
.
-.TH "ZSTD" "1" "December 2020" "zstd 1.4.7" "User Commands"
+.TH "ZSTD" "1" "December 2020" "zstd 1.4.8" "User Commands"
.
.SH "NAME"
\fBzstd\fR \- zstd, zstdmt, unzstd, zstdcat \- Compress or decompress \.zst files
diff --git a/programs/zstdgrep.1 b/programs/zstdgrep.1
index c8af908c984..4d143b59873 100644
--- a/programs/zstdgrep.1
+++ b/programs/zstdgrep.1
@@ -1,5 +1,5 @@
.
-.TH "ZSTDGREP" "1" "December 2020" "zstd 1.4.7" "User Commands"
+.TH "ZSTDGREP" "1" "December 2020" "zstd 1.4.8" "User Commands"
.
.SH "NAME"
\fBzstdgrep\fR \- print lines matching a pattern in zstandard\-compressed files
diff --git a/programs/zstdless.1 b/programs/zstdless.1
index be92e3514ec..43f235453ee 100644
--- a/programs/zstdless.1
+++ b/programs/zstdless.1
@@ -1,5 +1,5 @@
.
-.TH "ZSTDLESS" "1" "December 2020" "zstd 1.4.7" "User Commands"
+.TH "ZSTDLESS" "1" "December 2020" "zstd 1.4.8" "User Commands"
.
.SH "NAME"
\fBzstdless\fR \- view zstandard\-compressed files
diff --git a/tests/README.md b/tests/README.md
index 23e00767c39..1e40c46aae9 100644
--- a/tests/README.md
+++ b/tests/README.md
@@ -28,7 +28,7 @@ desktop machine for every pull request that is made to the zstd repo but can als
be run on any machine via the command line interface.
There are three modes of usage for this script: fastmode will just run a minimal single
-build comparison (between facebook:dev and facebook:master), onetime will pull all the current
+build comparison (between facebook:dev and facebook:release), onetime will pull all the current
pull requests from the zstd repo and compare facebook:dev to all of them once, continuous
will continuously get pull requests from the zstd repo and run benchmarks against facebook:dev.
diff --git a/tests/automated_benchmarking.py b/tests/automated_benchmarking.py
index d0cfb1fbe37..77eea29de27 100644
--- a/tests/automated_benchmarking.py
+++ b/tests/automated_benchmarking.py
@@ -20,7 +20,7 @@
GITHUB_API_PR_URL = "https://api.github.com/repos/facebook/zstd/pulls?state=open"
GITHUB_URL_TEMPLATE = "https://github.com/{}/zstd"
-MASTER_BUILD = {"user": "facebook", "branch": "dev", "hash": None}
+RELEASE_BUILD = {"user": "facebook", "branch": "dev", "hash": None}
# check to see if there are any new PRs every minute
DEFAULT_MAX_API_CALL_FREQUENCY_SEC = 60
@@ -264,11 +264,11 @@ def main(filenames, levels, iterations, builds=None, emails=None, continuous=Fal
for test_build in builds:
if dictionary_filename == None:
regressions = get_regressions(
- MASTER_BUILD, test_build, iterations, filenames, levels
+ RELEASE_BUILD, test_build, iterations, filenames, levels
)
else:
regressions = get_regressions_dictionary(
- MASTER_BUILD, test_build, filenames, dictionary_filename, levels, iterations
+ RELEASE_BUILD, test_build, filenames, dictionary_filename, levels, iterations
)
body = "\n".join(regressions)
if len(regressions) > 0:
@@ -320,7 +320,7 @@ def main(filenames, levels, iterations, builds=None, emails=None, continuous=Fal
builds = [{"user": None, "branch": "None", "hash": None}]
main(filenames, levels, iterations, builds, frequency=frequency, dictionary_filename=dictionary_filename)
elif mode == "fastmode":
- builds = [{"user": "facebook", "branch": "master", "hash": None}]
+ builds = [{"user": "facebook", "branch": "release", "hash": None}]
main(filenames, levels, iterations, builds, frequency=frequency, dictionary_filename=dictionary_filename)
else:
main(filenames, levels, iterations, None, emails, True, frequency=frequency, dictionary_filename=dictionary_filename)
diff --git a/tests/playTests.sh b/tests/playTests.sh
index 51b42b60ac4..4d6abbcc066 100755
--- a/tests/playTests.sh
+++ b/tests/playTests.sh
@@ -1342,8 +1342,6 @@ optCSize19=$(datagen -g2M | zstd -19 -c | wc -c)
longCSize19=$(datagen -g2M | zstd -19 --long -c | wc -c)
optCSize19wlog23=$(datagen -g2M | zstd -19 -c --zstd=wlog=23 | wc -c)
longCSize19wlog23=$(datagen -g2M | zstd -19 -c --long=23 | wc -c)
-optCSize22=$(datagen -g900K | zstd -22 --ultra -c | wc -c)
-longCSize22=$(datagen -g900K | zstd -22 --ultra --long -c | wc -c)
if [ "$longCSize16" -gt "$optCSize16" ]; then
echo using --long on compression level 16 should not cause compressed size regression
exit 1
@@ -1353,9 +1351,6 @@ elif [ "$longCSize19" -gt "$optCSize19" ]; then
elif [ "$longCSize19wlog23" -gt "$optCSize19wlog23" ]; then
echo using --long on compression level 19 with wLog=23 should not cause compressed size regression
exit 1
-elif [ "$longCSize22" -gt "$optCSize22" ]; then
- echo using --long on compression level 22 should not cause compressed size regression
- exit 1
fi