extzstd 0.3.2 → 0.3.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +3 -3
- data/contrib/zstd/CHANGELOG +188 -1
- data/contrib/zstd/CONTRIBUTING.md +157 -74
- data/contrib/zstd/LICENSE +4 -4
- data/contrib/zstd/Makefile +81 -58
- data/contrib/zstd/Package.swift +36 -0
- data/contrib/zstd/README.md +59 -35
- data/contrib/zstd/TESTING.md +2 -3
- data/contrib/zstd/appveyor.yml +49 -136
- data/contrib/zstd/lib/BUCK +5 -7
- data/contrib/zstd/lib/Makefile +87 -181
- data/contrib/zstd/lib/README.md +23 -6
- data/contrib/zstd/lib/common/allocations.h +55 -0
- data/contrib/zstd/lib/common/bits.h +200 -0
- data/contrib/zstd/lib/common/bitstream.h +33 -59
- data/contrib/zstd/lib/common/compiler.h +115 -45
- data/contrib/zstd/lib/common/cpu.h +1 -1
- data/contrib/zstd/lib/common/debug.c +1 -1
- data/contrib/zstd/lib/common/debug.h +1 -1
- data/contrib/zstd/lib/common/entropy_common.c +15 -37
- data/contrib/zstd/lib/common/error_private.c +9 -2
- data/contrib/zstd/lib/common/error_private.h +82 -3
- data/contrib/zstd/lib/common/fse.h +9 -85
- data/contrib/zstd/lib/common/fse_decompress.c +29 -111
- data/contrib/zstd/lib/common/huf.h +84 -172
- data/contrib/zstd/lib/common/mem.h +58 -49
- data/contrib/zstd/lib/common/pool.c +37 -16
- data/contrib/zstd/lib/common/pool.h +9 -3
- data/contrib/zstd/lib/common/portability_macros.h +156 -0
- data/contrib/zstd/lib/common/threading.c +68 -14
- data/contrib/zstd/lib/common/threading.h +5 -10
- data/contrib/zstd/lib/common/xxhash.c +7 -809
- data/contrib/zstd/lib/common/xxhash.h +5568 -167
- data/contrib/zstd/lib/common/zstd_common.c +1 -36
- data/contrib/zstd/lib/common/zstd_deps.h +1 -1
- data/contrib/zstd/lib/common/zstd_internal.h +64 -150
- data/contrib/zstd/lib/common/zstd_trace.h +163 -0
- data/contrib/zstd/lib/compress/clevels.h +134 -0
- data/contrib/zstd/lib/compress/fse_compress.c +69 -150
- data/contrib/zstd/lib/compress/hist.c +1 -1
- data/contrib/zstd/lib/compress/hist.h +1 -1
- data/contrib/zstd/lib/compress/huf_compress.c +773 -251
- data/contrib/zstd/lib/compress/zstd_compress.c +2650 -826
- data/contrib/zstd/lib/compress/zstd_compress_internal.h +509 -180
- data/contrib/zstd/lib/compress/zstd_compress_literals.c +117 -40
- data/contrib/zstd/lib/compress/zstd_compress_literals.h +16 -6
- data/contrib/zstd/lib/compress/zstd_compress_sequences.c +28 -19
- data/contrib/zstd/lib/compress/zstd_compress_sequences.h +1 -1
- data/contrib/zstd/lib/compress/zstd_compress_superblock.c +33 -305
- data/contrib/zstd/lib/compress/zstd_compress_superblock.h +1 -1
- data/contrib/zstd/lib/compress/zstd_cwksp.h +266 -85
- data/contrib/zstd/lib/compress/zstd_double_fast.c +369 -132
- data/contrib/zstd/lib/compress/zstd_double_fast.h +3 -2
- data/contrib/zstd/lib/compress/zstd_fast.c +722 -258
- data/contrib/zstd/lib/compress/zstd_fast.h +3 -2
- data/contrib/zstd/lib/compress/zstd_lazy.c +1105 -360
- data/contrib/zstd/lib/compress/zstd_lazy.h +41 -1
- data/contrib/zstd/lib/compress/zstd_ldm.c +272 -208
- data/contrib/zstd/lib/compress/zstd_ldm.h +3 -2
- data/contrib/zstd/lib/compress/zstd_ldm_geartab.h +106 -0
- data/contrib/zstd/lib/compress/zstd_opt.c +324 -197
- data/contrib/zstd/lib/compress/zstd_opt.h +1 -1
- data/contrib/zstd/lib/compress/zstdmt_compress.c +109 -53
- data/contrib/zstd/lib/compress/zstdmt_compress.h +9 -6
- data/contrib/zstd/lib/decompress/huf_decompress.c +1071 -539
- data/contrib/zstd/lib/decompress/huf_decompress_amd64.S +576 -0
- data/contrib/zstd/lib/decompress/zstd_ddict.c +4 -4
- data/contrib/zstd/lib/decompress/zstd_ddict.h +1 -1
- data/contrib/zstd/lib/decompress/zstd_decompress.c +507 -82
- data/contrib/zstd/lib/decompress/zstd_decompress_block.c +962 -310
- data/contrib/zstd/lib/decompress/zstd_decompress_block.h +14 -3
- data/contrib/zstd/lib/decompress/zstd_decompress_internal.h +54 -6
- data/contrib/zstd/lib/deprecated/zbuff.h +1 -1
- data/contrib/zstd/lib/deprecated/zbuff_common.c +1 -1
- data/contrib/zstd/lib/deprecated/zbuff_compress.c +24 -4
- data/contrib/zstd/lib/deprecated/zbuff_decompress.c +3 -1
- data/contrib/zstd/lib/dictBuilder/cover.c +44 -32
- data/contrib/zstd/lib/dictBuilder/cover.h +6 -5
- data/contrib/zstd/lib/dictBuilder/divsufsort.c +1 -1
- data/contrib/zstd/lib/dictBuilder/fastcover.c +24 -16
- data/contrib/zstd/lib/dictBuilder/zdict.c +88 -95
- data/contrib/zstd/lib/legacy/zstd_legacy.h +8 -1
- data/contrib/zstd/lib/legacy/zstd_v01.c +16 -53
- data/contrib/zstd/lib/legacy/zstd_v01.h +1 -1
- data/contrib/zstd/lib/legacy/zstd_v02.c +24 -69
- data/contrib/zstd/lib/legacy/zstd_v02.h +1 -1
- data/contrib/zstd/lib/legacy/zstd_v03.c +25 -72
- data/contrib/zstd/lib/legacy/zstd_v03.h +1 -1
- data/contrib/zstd/lib/legacy/zstd_v04.c +23 -69
- data/contrib/zstd/lib/legacy/zstd_v04.h +1 -1
- data/contrib/zstd/lib/legacy/zstd_v05.c +35 -85
- data/contrib/zstd/lib/legacy/zstd_v05.h +1 -1
- data/contrib/zstd/lib/legacy/zstd_v06.c +42 -87
- data/contrib/zstd/lib/legacy/zstd_v06.h +1 -1
- data/contrib/zstd/lib/legacy/zstd_v07.c +35 -82
- data/contrib/zstd/lib/legacy/zstd_v07.h +1 -1
- data/contrib/zstd/lib/libzstd.mk +214 -0
- data/contrib/zstd/lib/libzstd.pc.in +4 -3
- data/contrib/zstd/lib/module.modulemap +35 -0
- data/contrib/zstd/lib/{dictBuilder/zdict.h → zdict.h} +202 -33
- data/contrib/zstd/lib/zstd.h +922 -293
- data/contrib/zstd/lib/{common/zstd_errors.h → zstd_errors.h} +27 -8
- data/ext/extconf.rb +7 -6
- data/ext/extzstd.c +13 -10
- data/ext/libzstd_conf.h +0 -1
- data/ext/zstd_decompress_asm.S +1 -0
- metadata +16 -5
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -13,11 +13,36 @@
|
|
13
13
|
***************************************/
|
14
14
|
#include "zstd_compress_literals.h"
|
15
15
|
|
16
|
+
|
17
|
+
/* **************************************************************
|
18
|
+
* Debug Traces
|
19
|
+
****************************************************************/
|
20
|
+
#if DEBUGLEVEL >= 2
|
21
|
+
|
22
|
+
static size_t showHexa(const void* src, size_t srcSize)
|
23
|
+
{
|
24
|
+
const BYTE* const ip = (const BYTE*)src;
|
25
|
+
size_t u;
|
26
|
+
for (u=0; u<srcSize; u++) {
|
27
|
+
RAWLOG(5, " %02X", ip[u]); (void)ip;
|
28
|
+
}
|
29
|
+
RAWLOG(5, " \n");
|
30
|
+
return srcSize;
|
31
|
+
}
|
32
|
+
|
33
|
+
#endif
|
34
|
+
|
35
|
+
|
36
|
+
/* **************************************************************
|
37
|
+
* Literals compression - special cases
|
38
|
+
****************************************************************/
|
16
39
|
size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
|
17
40
|
{
|
18
|
-
BYTE* const ostart = (BYTE*
|
41
|
+
BYTE* const ostart = (BYTE*)dst;
|
19
42
|
U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
|
20
43
|
|
44
|
+
DEBUGLOG(5, "ZSTD_noCompressLiterals: srcSize=%zu, dstCapacity=%zu", srcSize, dstCapacity);
|
45
|
+
|
21
46
|
RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, "");
|
22
47
|
|
23
48
|
switch(flSize)
|
@@ -36,16 +61,30 @@ size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src,
|
|
36
61
|
}
|
37
62
|
|
38
63
|
ZSTD_memcpy(ostart + flSize, src, srcSize);
|
39
|
-
DEBUGLOG(5, "Raw literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
|
64
|
+
DEBUGLOG(5, "Raw (uncompressed) literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
|
40
65
|
return srcSize + flSize;
|
41
66
|
}
|
42
67
|
|
68
|
+
static int allBytesIdentical(const void* src, size_t srcSize)
|
69
|
+
{
|
70
|
+
assert(srcSize >= 1);
|
71
|
+
assert(src != NULL);
|
72
|
+
{ const BYTE b = ((const BYTE*)src)[0];
|
73
|
+
size_t p;
|
74
|
+
for (p=1; p<srcSize; p++) {
|
75
|
+
if (((const BYTE*)src)[p] != b) return 0;
|
76
|
+
}
|
77
|
+
return 1;
|
78
|
+
}
|
79
|
+
}
|
80
|
+
|
43
81
|
size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
|
44
82
|
{
|
45
|
-
BYTE* const ostart = (BYTE*
|
83
|
+
BYTE* const ostart = (BYTE*)dst;
|
46
84
|
U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
|
47
85
|
|
48
|
-
(
|
86
|
+
assert(dstCapacity >= 4); (void)dstCapacity;
|
87
|
+
assert(allBytesIdentical(src, srcSize));
|
49
88
|
|
50
89
|
switch(flSize)
|
51
90
|
{
|
@@ -63,27 +102,51 @@ size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void*
|
|
63
102
|
}
|
64
103
|
|
65
104
|
ostart[flSize] = *(const BYTE*)src;
|
66
|
-
DEBUGLOG(5, "RLE
|
105
|
+
DEBUGLOG(5, "RLE : Repeated Literal (%02X: %u times) -> %u bytes encoded", ((const BYTE*)src)[0], (U32)srcSize, (U32)flSize + 1);
|
67
106
|
return flSize+1;
|
68
107
|
}
|
69
108
|
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
109
|
+
/* ZSTD_minLiteralsToCompress() :
|
110
|
+
* returns minimal amount of literals
|
111
|
+
* for literal compression to even be attempted.
|
112
|
+
* Minimum is made tighter as compression strategy increases.
|
113
|
+
*/
|
114
|
+
static size_t
|
115
|
+
ZSTD_minLiteralsToCompress(ZSTD_strategy strategy, HUF_repeat huf_repeat)
|
116
|
+
{
|
117
|
+
assert((int)strategy >= 0);
|
118
|
+
assert((int)strategy <= 9);
|
119
|
+
/* btultra2 : min 8 bytes;
|
120
|
+
* then 2x larger for each successive compression strategy
|
121
|
+
* max threshold 64 bytes */
|
122
|
+
{ int const shift = MIN(9-(int)strategy, 3);
|
123
|
+
size_t const mintc = (huf_repeat == HUF_repeat_valid) ? 6 : (size_t)8 << shift;
|
124
|
+
DEBUGLOG(7, "minLiteralsToCompress = %zu", mintc);
|
125
|
+
return mintc;
|
126
|
+
}
|
127
|
+
}
|
128
|
+
|
129
|
+
size_t ZSTD_compressLiterals (
|
130
|
+
void* dst, size_t dstCapacity,
|
131
|
+
const void* src, size_t srcSize,
|
132
|
+
void* entropyWorkspace, size_t entropyWorkspaceSize,
|
133
|
+
const ZSTD_hufCTables_t* prevHuf,
|
134
|
+
ZSTD_hufCTables_t* nextHuf,
|
135
|
+
ZSTD_strategy strategy,
|
136
|
+
int disableLiteralCompression,
|
137
|
+
int suspectUncompressible,
|
138
|
+
int bmi2)
|
77
139
|
{
|
78
|
-
size_t const minGain = ZSTD_minGain(srcSize, strategy);
|
79
140
|
size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
|
80
141
|
BYTE* const ostart = (BYTE*)dst;
|
81
142
|
U32 singleStream = srcSize < 256;
|
82
143
|
symbolEncodingType_e hType = set_compressed;
|
83
144
|
size_t cLitSize;
|
84
145
|
|
85
|
-
DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i srcSize=%u)",
|
86
|
-
disableLiteralCompression, (U32)srcSize);
|
146
|
+
DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i, srcSize=%u, dstCapacity=%zu)",
|
147
|
+
disableLiteralCompression, (U32)srcSize, dstCapacity);
|
148
|
+
|
149
|
+
DEBUGLOG(6, "Completed literals listing (%zu bytes)", showHexa(src, srcSize));
|
87
150
|
|
88
151
|
/* Prepare nextEntropy assuming reusing the existing table */
|
89
152
|
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
|
@@ -91,40 +154,51 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
|
|
91
154
|
if (disableLiteralCompression)
|
92
155
|
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
|
93
156
|
|
94
|
-
/* small
|
95
|
-
|
96
|
-
|
97
|
-
if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
|
98
|
-
}
|
157
|
+
/* if too small, don't even attempt compression (speed opt) */
|
158
|
+
if (srcSize < ZSTD_minLiteralsToCompress(strategy, prevHuf->repeatMode))
|
159
|
+
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
|
99
160
|
|
100
161
|
RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
|
101
162
|
{ HUF_repeat repeat = prevHuf->repeatMode;
|
102
|
-
int const
|
163
|
+
int const flags = 0
|
164
|
+
| (bmi2 ? HUF_flags_bmi2 : 0)
|
165
|
+
| (strategy < ZSTD_lazy && srcSize <= 1024 ? HUF_flags_preferRepeat : 0)
|
166
|
+
| (strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD ? HUF_flags_optimalDepth : 0)
|
167
|
+
| (suspectUncompressible ? HUF_flags_suspectUncompressible : 0);
|
168
|
+
|
169
|
+
typedef size_t (*huf_compress_f)(void*, size_t, const void*, size_t, unsigned, unsigned, void*, size_t, HUF_CElt*, HUF_repeat*, int);
|
170
|
+
huf_compress_f huf_compress;
|
103
171
|
if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
(HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
|
172
|
+
huf_compress = singleStream ? HUF_compress1X_repeat : HUF_compress4X_repeat;
|
173
|
+
cLitSize = huf_compress(ostart+lhSize, dstCapacity-lhSize,
|
174
|
+
src, srcSize,
|
175
|
+
HUF_SYMBOLVALUE_MAX, LitHufLog,
|
176
|
+
entropyWorkspace, entropyWorkspaceSize,
|
177
|
+
(HUF_CElt*)nextHuf->CTable,
|
178
|
+
&repeat, flags);
|
179
|
+
DEBUGLOG(5, "%zu literals compressed into %zu bytes (before header)", srcSize, cLitSize);
|
113
180
|
if (repeat != HUF_repeat_none) {
|
114
181
|
/* reused the existing table */
|
115
|
-
DEBUGLOG(5, "
|
182
|
+
DEBUGLOG(5, "reusing statistics from previous huffman block");
|
116
183
|
hType = set_repeat;
|
117
184
|
}
|
118
185
|
}
|
119
186
|
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
187
|
+
{ size_t const minGain = ZSTD_minGain(srcSize, strategy);
|
188
|
+
if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) {
|
189
|
+
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
|
190
|
+
return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
|
191
|
+
} }
|
124
192
|
if (cLitSize==1) {
|
125
|
-
|
126
|
-
|
127
|
-
|
193
|
+
/* A return value of 1 signals that the alphabet consists of a single symbol.
|
194
|
+
* However, in some rare circumstances, it could be the compressed size (a single byte).
|
195
|
+
* For that outcome to have a chance to happen, it's necessary that `srcSize < 8`.
|
196
|
+
* (it's also necessary to not generate statistics).
|
197
|
+
* Therefore, in such a case, actively check that all bytes are identical. */
|
198
|
+
if ((srcSize >= 8) || allBytesIdentical(src, srcSize)) {
|
199
|
+
ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
|
200
|
+
return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
|
201
|
+
} }
|
128
202
|
|
129
203
|
if (hType == set_compressed) {
|
130
204
|
/* using a newly constructed table */
|
@@ -135,16 +209,19 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
|
|
135
209
|
switch(lhSize)
|
136
210
|
{
|
137
211
|
case 3: /* 2 - 2 - 10 - 10 */
|
138
|
-
|
212
|
+
if (!singleStream) assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
|
213
|
+
{ U32 const lhc = hType + ((U32)(!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
|
139
214
|
MEM_writeLE24(ostart, lhc);
|
140
215
|
break;
|
141
216
|
}
|
142
217
|
case 4: /* 2 - 2 - 14 - 14 */
|
218
|
+
assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
|
143
219
|
{ U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
|
144
220
|
MEM_writeLE32(ostart, lhc);
|
145
221
|
break;
|
146
222
|
}
|
147
223
|
case 5: /* 2 - 2 - 18 - 18 */
|
224
|
+
assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
|
148
225
|
{ U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
|
149
226
|
MEM_writeLE32(ostart, lhc);
|
150
227
|
ostart[4] = (BYTE)(cLitSize >> 10);
|
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -16,14 +16,24 @@
|
|
16
16
|
|
17
17
|
size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
18
18
|
|
19
|
+
/* ZSTD_compressRleLiteralsBlock() :
|
20
|
+
* Conditions :
|
21
|
+
* - All bytes in @src are identical
|
22
|
+
* - dstCapacity >= 4 */
|
19
23
|
size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
20
24
|
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
+
/* ZSTD_compressLiterals():
|
26
|
+
* @entropyWorkspace: must be aligned on 4-bytes boundaries
|
27
|
+
* @entropyWorkspaceSize : must be >= HUF_WORKSPACE_SIZE
|
28
|
+
* @suspectUncompressible: sampling checks, to potentially skip huffman coding
|
29
|
+
*/
|
30
|
+
size_t ZSTD_compressLiterals (void* dst, size_t dstCapacity,
|
25
31
|
const void* src, size_t srcSize,
|
26
32
|
void* entropyWorkspace, size_t entropyWorkspaceSize,
|
27
|
-
const
|
33
|
+
const ZSTD_hufCTables_t* prevHuf,
|
34
|
+
ZSTD_hufCTables_t* nextHuf,
|
35
|
+
ZSTD_strategy strategy, int disableLiteralCompression,
|
36
|
+
int suspectUncompressible,
|
37
|
+
int bmi2);
|
28
38
|
|
29
39
|
#endif /* ZSTD_COMPRESS_LITERALS_H */
|
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -58,7 +58,7 @@ static unsigned ZSTD_useLowProbCount(size_t const nbSeq)
|
|
58
58
|
{
|
59
59
|
/* Heuristic: This should cover most blocks <= 16K and
|
60
60
|
* start to fade out after 16K to about 32K depending on
|
61
|
-
*
|
61
|
+
* compressibility.
|
62
62
|
*/
|
63
63
|
return nbSeq >= 2048;
|
64
64
|
}
|
@@ -85,6 +85,8 @@ static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t
|
|
85
85
|
{
|
86
86
|
unsigned cost = 0;
|
87
87
|
unsigned s;
|
88
|
+
|
89
|
+
assert(total > 0);
|
88
90
|
for (s = 0; s <= max; ++s) {
|
89
91
|
unsigned norm = (unsigned)((256 * count[s]) / total);
|
90
92
|
if (count[s] != 0 && norm == 0)
|
@@ -164,7 +166,7 @@ ZSTD_selectEncodingType(
|
|
164
166
|
if (mostFrequent == nbSeq) {
|
165
167
|
*repeatMode = FSE_repeat_none;
|
166
168
|
if (isDefaultAllowed && nbSeq <= 2) {
|
167
|
-
/* Prefer set_basic over set_rle when there are 2 or
|
169
|
+
/* Prefer set_basic over set_rle when there are 2 or fewer symbols,
|
168
170
|
* since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
|
169
171
|
* If basic encoding isn't possible, always choose RLE.
|
170
172
|
*/
|
@@ -232,6 +234,11 @@ ZSTD_selectEncodingType(
|
|
232
234
|
return set_compressed;
|
233
235
|
}
|
234
236
|
|
237
|
+
typedef struct {
|
238
|
+
S16 norm[MaxSeq + 1];
|
239
|
+
U32 wksp[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(MaxSeq, MaxFSELog)];
|
240
|
+
} ZSTD_BuildCTableWksp;
|
241
|
+
|
235
242
|
size_t
|
236
243
|
ZSTD_buildCTable(void* dst, size_t dstCapacity,
|
237
244
|
FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
|
@@ -258,7 +265,7 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
|
|
258
265
|
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize), ""); /* note : could be pre-calculated */
|
259
266
|
return 0;
|
260
267
|
case set_compressed: {
|
261
|
-
|
268
|
+
ZSTD_BuildCTableWksp* wksp = (ZSTD_BuildCTableWksp*)entropyWorkspace;
|
262
269
|
size_t nbSeq_1 = nbSeq;
|
263
270
|
const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
|
264
271
|
if (count[codeTable[nbSeq-1]] > 1) {
|
@@ -266,11 +273,13 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
|
|
266
273
|
nbSeq_1--;
|
267
274
|
}
|
268
275
|
assert(nbSeq_1 > 1);
|
269
|
-
assert(entropyWorkspaceSize >=
|
270
|
-
|
271
|
-
|
276
|
+
assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp));
|
277
|
+
(void)entropyWorkspaceSize;
|
278
|
+
FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "FSE_normalizeCount failed");
|
279
|
+
assert(oend >= op);
|
280
|
+
{ size_t const NCountSize = FSE_writeNCount(op, (size_t)(oend - op), wksp->norm, max, tableLog); /* overflow protected */
|
272
281
|
FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed");
|
273
|
-
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog,
|
282
|
+
FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "FSE_buildCTable_wksp failed");
|
274
283
|
return NCountSize;
|
275
284
|
}
|
276
285
|
}
|
@@ -304,19 +313,19 @@ ZSTD_encodeSequences_body(
|
|
304
313
|
FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]);
|
305
314
|
BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
|
306
315
|
if (MEM_32bits()) BIT_flushBits(&blockStream);
|
307
|
-
BIT_addBits(&blockStream, sequences[nbSeq-1].
|
316
|
+
BIT_addBits(&blockStream, sequences[nbSeq-1].mlBase, ML_bits[mlCodeTable[nbSeq-1]]);
|
308
317
|
if (MEM_32bits()) BIT_flushBits(&blockStream);
|
309
318
|
if (longOffsets) {
|
310
319
|
U32 const ofBits = ofCodeTable[nbSeq-1];
|
311
320
|
unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
|
312
321
|
if (extraBits) {
|
313
|
-
BIT_addBits(&blockStream, sequences[nbSeq-1].
|
322
|
+
BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, extraBits);
|
314
323
|
BIT_flushBits(&blockStream);
|
315
324
|
}
|
316
|
-
BIT_addBits(&blockStream, sequences[nbSeq-1].
|
325
|
+
BIT_addBits(&blockStream, sequences[nbSeq-1].offBase >> extraBits,
|
317
326
|
ofBits - extraBits);
|
318
327
|
} else {
|
319
|
-
BIT_addBits(&blockStream, sequences[nbSeq-1].
|
328
|
+
BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, ofCodeTable[nbSeq-1]);
|
320
329
|
}
|
321
330
|
BIT_flushBits(&blockStream);
|
322
331
|
|
@@ -330,8 +339,8 @@ ZSTD_encodeSequences_body(
|
|
330
339
|
U32 const mlBits = ML_bits[mlCode];
|
331
340
|
DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
|
332
341
|
(unsigned)sequences[n].litLength,
|
333
|
-
(unsigned)sequences[n].
|
334
|
-
(unsigned)sequences[n].
|
342
|
+
(unsigned)sequences[n].mlBase + MINMATCH,
|
343
|
+
(unsigned)sequences[n].offBase);
|
335
344
|
/* 32b*/ /* 64b*/
|
336
345
|
/* (7)*/ /* (7)*/
|
337
346
|
FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
|
@@ -342,18 +351,18 @@ ZSTD_encodeSequences_body(
|
|
342
351
|
BIT_flushBits(&blockStream); /* (7)*/
|
343
352
|
BIT_addBits(&blockStream, sequences[n].litLength, llBits);
|
344
353
|
if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
|
345
|
-
BIT_addBits(&blockStream, sequences[n].
|
354
|
+
BIT_addBits(&blockStream, sequences[n].mlBase, mlBits);
|
346
355
|
if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
|
347
356
|
if (longOffsets) {
|
348
357
|
unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
|
349
358
|
if (extraBits) {
|
350
|
-
BIT_addBits(&blockStream, sequences[n].
|
359
|
+
BIT_addBits(&blockStream, sequences[n].offBase, extraBits);
|
351
360
|
BIT_flushBits(&blockStream); /* (7)*/
|
352
361
|
}
|
353
|
-
BIT_addBits(&blockStream, sequences[n].
|
362
|
+
BIT_addBits(&blockStream, sequences[n].offBase >> extraBits,
|
354
363
|
ofBits - extraBits); /* 31 */
|
355
364
|
} else {
|
356
|
-
BIT_addBits(&blockStream, sequences[n].
|
365
|
+
BIT_addBits(&blockStream, sequences[n].offBase, ofBits); /* 31 */
|
357
366
|
}
|
358
367
|
BIT_flushBits(&blockStream); /* (7)*/
|
359
368
|
DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
|
@@ -390,7 +399,7 @@ ZSTD_encodeSequences_default(
|
|
390
399
|
|
391
400
|
#if DYNAMIC_BMI2
|
392
401
|
|
393
|
-
static
|
402
|
+
static BMI2_TARGET_ATTRIBUTE size_t
|
394
403
|
ZSTD_encodeSequences_bmi2(
|
395
404
|
void* dst, size_t dstCapacity,
|
396
405
|
FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
|