extzstd 0.3.1 → 0.3.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +28 -14
- data/contrib/zstd/CHANGELOG +301 -56
- data/contrib/zstd/CONTRIBUTING.md +169 -72
- data/contrib/zstd/LICENSE +4 -4
- data/contrib/zstd/Makefile +116 -87
- data/contrib/zstd/Package.swift +36 -0
- data/contrib/zstd/README.md +62 -32
- data/contrib/zstd/TESTING.md +2 -3
- data/contrib/zstd/appveyor.yml +52 -136
- data/contrib/zstd/lib/BUCK +5 -7
- data/contrib/zstd/lib/Makefile +225 -222
- data/contrib/zstd/lib/README.md +51 -6
- data/contrib/zstd/lib/common/allocations.h +55 -0
- data/contrib/zstd/lib/common/bits.h +200 -0
- data/contrib/zstd/lib/common/bitstream.h +45 -62
- data/contrib/zstd/lib/common/compiler.h +205 -22
- data/contrib/zstd/lib/common/cpu.h +1 -3
- data/contrib/zstd/lib/common/debug.c +1 -1
- data/contrib/zstd/lib/common/debug.h +12 -19
- data/contrib/zstd/lib/common/entropy_common.c +172 -48
- data/contrib/zstd/lib/common/error_private.c +10 -2
- data/contrib/zstd/lib/common/error_private.h +82 -3
- data/contrib/zstd/lib/common/fse.h +37 -86
- data/contrib/zstd/lib/common/fse_decompress.c +117 -92
- data/contrib/zstd/lib/common/huf.h +99 -166
- data/contrib/zstd/lib/common/mem.h +124 -142
- data/contrib/zstd/lib/common/pool.c +54 -27
- data/contrib/zstd/lib/common/pool.h +10 -4
- data/contrib/zstd/lib/common/portability_macros.h +156 -0
- data/contrib/zstd/lib/common/threading.c +74 -19
- data/contrib/zstd/lib/common/threading.h +5 -10
- data/contrib/zstd/lib/common/xxhash.c +7 -847
- data/contrib/zstd/lib/common/xxhash.h +5568 -167
- data/contrib/zstd/lib/common/zstd_common.c +2 -37
- data/contrib/zstd/lib/common/zstd_deps.h +111 -0
- data/contrib/zstd/lib/common/zstd_internal.h +132 -187
- data/contrib/zstd/lib/common/zstd_trace.h +163 -0
- data/contrib/zstd/lib/compress/clevels.h +134 -0
- data/contrib/zstd/lib/compress/fse_compress.c +83 -157
- data/contrib/zstd/lib/compress/hist.c +27 -29
- data/contrib/zstd/lib/compress/hist.h +2 -2
- data/contrib/zstd/lib/compress/huf_compress.c +916 -279
- data/contrib/zstd/lib/compress/zstd_compress.c +3773 -1019
- data/contrib/zstd/lib/compress/zstd_compress_internal.h +610 -203
- data/contrib/zstd/lib/compress/zstd_compress_literals.c +119 -42
- data/contrib/zstd/lib/compress/zstd_compress_literals.h +16 -6
- data/contrib/zstd/lib/compress/zstd_compress_sequences.c +42 -19
- data/contrib/zstd/lib/compress/zstd_compress_sequences.h +1 -1
- data/contrib/zstd/lib/compress/zstd_compress_superblock.c +49 -317
- data/contrib/zstd/lib/compress/zstd_compress_superblock.h +1 -1
- data/contrib/zstd/lib/compress/zstd_cwksp.h +320 -103
- data/contrib/zstd/lib/compress/zstd_double_fast.c +388 -151
- data/contrib/zstd/lib/compress/zstd_double_fast.h +3 -2
- data/contrib/zstd/lib/compress/zstd_fast.c +729 -265
- data/contrib/zstd/lib/compress/zstd_fast.h +3 -2
- data/contrib/zstd/lib/compress/zstd_lazy.c +1270 -251
- data/contrib/zstd/lib/compress/zstd_lazy.h +61 -1
- data/contrib/zstd/lib/compress/zstd_ldm.c +324 -219
- data/contrib/zstd/lib/compress/zstd_ldm.h +9 -2
- data/contrib/zstd/lib/compress/zstd_ldm_geartab.h +106 -0
- data/contrib/zstd/lib/compress/zstd_opt.c +481 -209
- data/contrib/zstd/lib/compress/zstd_opt.h +1 -1
- data/contrib/zstd/lib/compress/zstdmt_compress.c +181 -457
- data/contrib/zstd/lib/compress/zstdmt_compress.h +34 -113
- data/contrib/zstd/lib/decompress/huf_decompress.c +1199 -565
- data/contrib/zstd/lib/decompress/huf_decompress_amd64.S +576 -0
- data/contrib/zstd/lib/decompress/zstd_ddict.c +12 -12
- data/contrib/zstd/lib/decompress/zstd_ddict.h +2 -2
- data/contrib/zstd/lib/decompress/zstd_decompress.c +627 -157
- data/contrib/zstd/lib/decompress/zstd_decompress_block.c +1086 -326
- data/contrib/zstd/lib/decompress/zstd_decompress_block.h +19 -5
- data/contrib/zstd/lib/decompress/zstd_decompress_internal.h +62 -13
- data/contrib/zstd/lib/deprecated/zbuff.h +1 -1
- data/contrib/zstd/lib/deprecated/zbuff_common.c +1 -1
- data/contrib/zstd/lib/deprecated/zbuff_compress.c +24 -4
- data/contrib/zstd/lib/deprecated/zbuff_decompress.c +3 -1
- data/contrib/zstd/lib/dictBuilder/cover.c +73 -52
- data/contrib/zstd/lib/dictBuilder/cover.h +7 -6
- data/contrib/zstd/lib/dictBuilder/divsufsort.c +1 -1
- data/contrib/zstd/lib/dictBuilder/fastcover.c +44 -35
- data/contrib/zstd/lib/dictBuilder/zdict.c +103 -111
- data/contrib/zstd/lib/legacy/zstd_legacy.h +8 -1
- data/contrib/zstd/lib/legacy/zstd_v01.c +21 -54
- data/contrib/zstd/lib/legacy/zstd_v01.h +1 -1
- data/contrib/zstd/lib/legacy/zstd_v02.c +29 -70
- data/contrib/zstd/lib/legacy/zstd_v02.h +1 -1
- data/contrib/zstd/lib/legacy/zstd_v03.c +30 -73
- data/contrib/zstd/lib/legacy/zstd_v03.h +1 -1
- data/contrib/zstd/lib/legacy/zstd_v04.c +29 -71
- data/contrib/zstd/lib/legacy/zstd_v04.h +1 -1
- data/contrib/zstd/lib/legacy/zstd_v05.c +40 -86
- data/contrib/zstd/lib/legacy/zstd_v05.h +1 -1
- data/contrib/zstd/lib/legacy/zstd_v06.c +47 -88
- data/contrib/zstd/lib/legacy/zstd_v06.h +1 -1
- data/contrib/zstd/lib/legacy/zstd_v07.c +40 -83
- data/contrib/zstd/lib/legacy/zstd_v07.h +1 -1
- data/contrib/zstd/lib/libzstd.mk +214 -0
- data/contrib/zstd/lib/libzstd.pc.in +7 -6
- data/contrib/zstd/lib/module.modulemap +35 -0
- data/contrib/zstd/lib/{dictBuilder/zdict.h → zdict.h} +203 -34
- data/contrib/zstd/lib/zstd.h +1217 -287
- data/contrib/zstd/lib/{common/zstd_errors.h → zstd_errors.h} +28 -8
- data/ext/extconf.rb +7 -6
- data/ext/extzstd.c +19 -10
- data/ext/extzstd.h +6 -0
- data/ext/libzstd_conf.h +0 -1
- data/ext/zstd_decompress_asm.S +1 -0
- data/gemstub.rb +3 -21
- data/lib/extzstd/version.rb +6 -1
- data/lib/extzstd.rb +0 -2
- data/test/test_basic.rb +0 -5
- metadata +18 -6
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -23,12 +23,12 @@
|
|
23
23
|
#ifdef ZSTD_MULTITHREAD
|
24
24
|
# include "zstdmt_compress.h"
|
25
25
|
#endif
|
26
|
+
#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_NbCommonBytes */
|
26
27
|
|
27
28
|
#if defined (__cplusplus)
|
28
29
|
extern "C" {
|
29
30
|
#endif
|
30
31
|
|
31
|
-
|
32
32
|
/*-*************************************
|
33
33
|
* Constants
|
34
34
|
***************************************/
|
@@ -64,7 +64,7 @@ typedef struct {
|
|
64
64
|
} ZSTD_localDict;
|
65
65
|
|
66
66
|
typedef struct {
|
67
|
-
|
67
|
+
HUF_CElt CTable[HUF_CTABLE_SIZE_ST(255)];
|
68
68
|
HUF_repeat repeatMode;
|
69
69
|
} ZSTD_hufCTables_t;
|
70
70
|
|
@@ -82,11 +82,82 @@ typedef struct {
|
|
82
82
|
ZSTD_fseCTables_t fse;
|
83
83
|
} ZSTD_entropyCTables_t;
|
84
84
|
|
85
|
+
/***********************************************
|
86
|
+
* Entropy buffer statistics structs and funcs *
|
87
|
+
***********************************************/
|
88
|
+
/** ZSTD_hufCTablesMetadata_t :
|
89
|
+
* Stores Literals Block Type for a super-block in hType, and
|
90
|
+
* huffman tree description in hufDesBuffer.
|
91
|
+
* hufDesSize refers to the size of huffman tree description in bytes.
|
92
|
+
* This metadata is populated in ZSTD_buildBlockEntropyStats_literals() */
|
85
93
|
typedef struct {
|
86
|
-
|
87
|
-
|
94
|
+
symbolEncodingType_e hType;
|
95
|
+
BYTE hufDesBuffer[ZSTD_MAX_HUF_HEADER_SIZE];
|
96
|
+
size_t hufDesSize;
|
97
|
+
} ZSTD_hufCTablesMetadata_t;
|
98
|
+
|
99
|
+
/** ZSTD_fseCTablesMetadata_t :
|
100
|
+
* Stores symbol compression modes for a super-block in {ll, ol, ml}Type, and
|
101
|
+
* fse tables in fseTablesBuffer.
|
102
|
+
* fseTablesSize refers to the size of fse tables in bytes.
|
103
|
+
* This metadata is populated in ZSTD_buildBlockEntropyStats_sequences() */
|
104
|
+
typedef struct {
|
105
|
+
symbolEncodingType_e llType;
|
106
|
+
symbolEncodingType_e ofType;
|
107
|
+
symbolEncodingType_e mlType;
|
108
|
+
BYTE fseTablesBuffer[ZSTD_MAX_FSE_HEADERS_SIZE];
|
109
|
+
size_t fseTablesSize;
|
110
|
+
size_t lastCountSize; /* This is to account for bug in 1.3.4. More detail in ZSTD_entropyCompressSeqStore_internal() */
|
111
|
+
} ZSTD_fseCTablesMetadata_t;
|
112
|
+
|
113
|
+
typedef struct {
|
114
|
+
ZSTD_hufCTablesMetadata_t hufMetadata;
|
115
|
+
ZSTD_fseCTablesMetadata_t fseMetadata;
|
116
|
+
} ZSTD_entropyCTablesMetadata_t;
|
117
|
+
|
118
|
+
/** ZSTD_buildBlockEntropyStats() :
|
119
|
+
* Builds entropy for the block.
|
120
|
+
* @return : 0 on success or error code */
|
121
|
+
size_t ZSTD_buildBlockEntropyStats(
|
122
|
+
const seqStore_t* seqStorePtr,
|
123
|
+
const ZSTD_entropyCTables_t* prevEntropy,
|
124
|
+
ZSTD_entropyCTables_t* nextEntropy,
|
125
|
+
const ZSTD_CCtx_params* cctxParams,
|
126
|
+
ZSTD_entropyCTablesMetadata_t* entropyMetadata,
|
127
|
+
void* workspace, size_t wkspSize);
|
128
|
+
|
129
|
+
/*********************************
|
130
|
+
* Compression internals structs *
|
131
|
+
*********************************/
|
132
|
+
|
133
|
+
typedef struct {
|
134
|
+
U32 off; /* Offset sumtype code for the match, using ZSTD_storeSeq() format */
|
135
|
+
U32 len; /* Raw length of match */
|
88
136
|
} ZSTD_match_t;
|
89
137
|
|
138
|
+
typedef struct {
|
139
|
+
U32 offset; /* Offset of sequence */
|
140
|
+
U32 litLength; /* Length of literals prior to match */
|
141
|
+
U32 matchLength; /* Raw length of match */
|
142
|
+
} rawSeq;
|
143
|
+
|
144
|
+
typedef struct {
|
145
|
+
rawSeq* seq; /* The start of the sequences */
|
146
|
+
size_t pos; /* The index in seq where reading stopped. pos <= size. */
|
147
|
+
size_t posInSequence; /* The position within the sequence at seq[pos] where reading
|
148
|
+
stopped. posInSequence <= seq[pos].litLength + seq[pos].matchLength */
|
149
|
+
size_t size; /* The number of sequences. <= capacity. */
|
150
|
+
size_t capacity; /* The capacity starting from `seq` pointer */
|
151
|
+
} rawSeqStore_t;
|
152
|
+
|
153
|
+
typedef struct {
|
154
|
+
U32 idx; /* Index in array of ZSTD_Sequence */
|
155
|
+
U32 posInSequence; /* Position within sequence at idx */
|
156
|
+
size_t posInSrc; /* Number of bytes given by sequences provided so far */
|
157
|
+
} ZSTD_sequencePosition;
|
158
|
+
|
159
|
+
UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0};
|
160
|
+
|
90
161
|
typedef struct {
|
91
162
|
int price;
|
92
163
|
U32 off;
|
@@ -116,7 +187,7 @@ typedef struct {
|
|
116
187
|
U32 offCodeSumBasePrice; /* to compare to log2(offreq) */
|
117
188
|
ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */
|
118
189
|
const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */
|
119
|
-
|
190
|
+
ZSTD_paramSwitch_e literalCompressionMode;
|
120
191
|
} optState_t;
|
121
192
|
|
122
193
|
typedef struct {
|
@@ -125,14 +196,23 @@ typedef struct {
|
|
125
196
|
} ZSTD_compressedBlockState_t;
|
126
197
|
|
127
198
|
typedef struct {
|
128
|
-
BYTE const* nextSrc;
|
129
|
-
BYTE const* base;
|
130
|
-
BYTE const* dictBase;
|
131
|
-
U32 dictLimit;
|
132
|
-
U32 lowLimit;
|
199
|
+
BYTE const* nextSrc; /* next block here to continue on current prefix */
|
200
|
+
BYTE const* base; /* All regular indexes relative to this position */
|
201
|
+
BYTE const* dictBase; /* extDict indexes relative to this position */
|
202
|
+
U32 dictLimit; /* below that point, need extDict */
|
203
|
+
U32 lowLimit; /* below that point, no more valid data */
|
204
|
+
U32 nbOverflowCorrections; /* Number of times overflow correction has run since
|
205
|
+
* ZSTD_window_init(). Useful for debugging coredumps
|
206
|
+
* and for ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY.
|
207
|
+
*/
|
133
208
|
} ZSTD_window_t;
|
134
209
|
|
210
|
+
#define ZSTD_WINDOW_START_INDEX 2
|
211
|
+
|
135
212
|
typedef struct ZSTD_matchState_t ZSTD_matchState_t;
|
213
|
+
|
214
|
+
#define ZSTD_ROW_HASH_CACHE_SIZE 8 /* Size of prefetching hash cache for row-based matchfinder */
|
215
|
+
|
136
216
|
struct ZSTD_matchState_t {
|
137
217
|
ZSTD_window_t window; /* State for window round buffer management */
|
138
218
|
U32 loadedDictEnd; /* index of end of dictionary, within context's referential.
|
@@ -144,12 +224,38 @@ struct ZSTD_matchState_t {
|
|
144
224
|
*/
|
145
225
|
U32 nextToUpdate; /* index from which to continue table update */
|
146
226
|
U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */
|
227
|
+
|
228
|
+
U32 rowHashLog; /* For row-based matchfinder: Hashlog based on nb of rows in the hashTable.*/
|
229
|
+
BYTE* tagTable; /* For row-based matchFinder: A row-based table containing the hashes and head index. */
|
230
|
+
U32 hashCache[ZSTD_ROW_HASH_CACHE_SIZE]; /* For row-based matchFinder: a cache of hashes to improve speed */
|
231
|
+
U64 hashSalt; /* For row-based matchFinder: salts the hash for re-use of tag table */
|
232
|
+
U32 hashSaltEntropy; /* For row-based matchFinder: collects entropy for salt generation */
|
233
|
+
|
147
234
|
U32* hashTable;
|
148
235
|
U32* hashTable3;
|
149
236
|
U32* chainTable;
|
237
|
+
|
238
|
+
U32 forceNonContiguous; /* Non-zero if we should force non-contiguous load for the next window update. */
|
239
|
+
|
240
|
+
int dedicatedDictSearch; /* Indicates whether this matchState is using the
|
241
|
+
* dedicated dictionary search structure.
|
242
|
+
*/
|
150
243
|
optState_t opt; /* optimal parser state */
|
151
244
|
const ZSTD_matchState_t* dictMatchState;
|
152
245
|
ZSTD_compressionParameters cParams;
|
246
|
+
const rawSeqStore_t* ldmSeqStore;
|
247
|
+
|
248
|
+
/* Controls prefetching in some dictMatchState matchfinders.
|
249
|
+
* This behavior is controlled from the cctx ms.
|
250
|
+
* This parameter has no effect in the cdict ms. */
|
251
|
+
int prefetchCDictTables;
|
252
|
+
|
253
|
+
/* When == 0, lazy match finders insert every position.
|
254
|
+
* When != 0, lazy match finders only insert positions they search.
|
255
|
+
* This allows them to skip much faster over incompressible data,
|
256
|
+
* at a small cost to compression ratio.
|
257
|
+
*/
|
258
|
+
int lazySkipping;
|
153
259
|
};
|
154
260
|
|
155
261
|
typedef struct {
|
@@ -163,17 +269,26 @@ typedef struct {
|
|
163
269
|
U32 checksum;
|
164
270
|
} ldmEntry_t;
|
165
271
|
|
272
|
+
typedef struct {
|
273
|
+
BYTE const* split;
|
274
|
+
U32 hash;
|
275
|
+
U32 checksum;
|
276
|
+
ldmEntry_t* bucket;
|
277
|
+
} ldmMatchCandidate_t;
|
278
|
+
|
279
|
+
#define LDM_BATCH_SIZE 64
|
280
|
+
|
166
281
|
typedef struct {
|
167
282
|
ZSTD_window_t window; /* State for the window round buffer management */
|
168
283
|
ldmEntry_t* hashTable;
|
169
284
|
U32 loadedDictEnd;
|
170
285
|
BYTE* bucketOffsets; /* Next position in bucket to insert entry */
|
171
|
-
|
172
|
-
|
286
|
+
size_t splitIndices[LDM_BATCH_SIZE];
|
287
|
+
ldmMatchCandidate_t matchCandidates[LDM_BATCH_SIZE];
|
173
288
|
} ldmState_t;
|
174
289
|
|
175
290
|
typedef struct {
|
176
|
-
|
291
|
+
ZSTD_paramSwitch_e enableLdm; /* ZSTD_ps_enable to enable LDM. ZSTD_ps_auto by default */
|
177
292
|
U32 hashLog; /* Log size of hashTable */
|
178
293
|
U32 bucketSizeLog; /* Log bucket size for collision resolution, at most 8 */
|
179
294
|
U32 minMatchLength; /* Minimum match length */
|
@@ -181,19 +296,6 @@ typedef struct {
|
|
181
296
|
U32 windowLog; /* Window log for the LDM */
|
182
297
|
} ldmParams_t;
|
183
298
|
|
184
|
-
typedef struct {
|
185
|
-
U32 offset;
|
186
|
-
U32 litLength;
|
187
|
-
U32 matchLength;
|
188
|
-
} rawSeq;
|
189
|
-
|
190
|
-
typedef struct {
|
191
|
-
rawSeq* seq; /* The start of the sequences */
|
192
|
-
size_t pos; /* The position where reading stopped. <= size. */
|
193
|
-
size_t size; /* The number of sequences. <= capacity. */
|
194
|
-
size_t capacity; /* The capacity starting from `seq` pointer */
|
195
|
-
} rawSeqStore_t;
|
196
|
-
|
197
299
|
typedef struct {
|
198
300
|
int collectSequences;
|
199
301
|
ZSTD_Sequence* seqStart;
|
@@ -217,7 +319,7 @@ struct ZSTD_CCtx_params_s {
|
|
217
319
|
* There is no guarantee that hint is close to actual source size */
|
218
320
|
|
219
321
|
ZSTD_dictAttachPref_e attachDictPref;
|
220
|
-
|
322
|
+
ZSTD_paramSwitch_e literalCompressionMode;
|
221
323
|
|
222
324
|
/* Multithreading: used to pass parameters to mtctx */
|
223
325
|
int nbWorkers;
|
@@ -228,17 +330,94 @@ struct ZSTD_CCtx_params_s {
|
|
228
330
|
/* Long distance matching parameters */
|
229
331
|
ldmParams_t ldmParams;
|
230
332
|
|
333
|
+
/* Dedicated dict search algorithm trigger */
|
334
|
+
int enableDedicatedDictSearch;
|
335
|
+
|
336
|
+
/* Input/output buffer modes */
|
337
|
+
ZSTD_bufferMode_e inBufferMode;
|
338
|
+
ZSTD_bufferMode_e outBufferMode;
|
339
|
+
|
340
|
+
/* Sequence compression API */
|
341
|
+
ZSTD_sequenceFormat_e blockDelimiters;
|
342
|
+
int validateSequences;
|
343
|
+
|
344
|
+
/* Block splitting */
|
345
|
+
ZSTD_paramSwitch_e useBlockSplitter;
|
346
|
+
|
347
|
+
/* Param for deciding whether to use row-based matchfinder */
|
348
|
+
ZSTD_paramSwitch_e useRowMatchFinder;
|
349
|
+
|
350
|
+
/* Always load a dictionary in ext-dict mode (not prefix mode)? */
|
351
|
+
int deterministicRefPrefix;
|
352
|
+
|
231
353
|
/* Internal use, for createCCtxParams() and freeCCtxParams() only */
|
232
354
|
ZSTD_customMem customMem;
|
355
|
+
|
356
|
+
/* Controls prefetching in some dictMatchState matchfinders */
|
357
|
+
ZSTD_paramSwitch_e prefetchCDictTables;
|
358
|
+
|
359
|
+
/* Controls whether zstd will fall back to an internal matchfinder
|
360
|
+
* if the external matchfinder returns an error code. */
|
361
|
+
int enableMatchFinderFallback;
|
362
|
+
|
363
|
+
/* Indicates whether an external matchfinder has been referenced.
|
364
|
+
* Users can't set this externally.
|
365
|
+
* It is set internally in ZSTD_registerSequenceProducer(). */
|
366
|
+
int useSequenceProducer;
|
367
|
+
|
368
|
+
/* Adjust the max block size*/
|
369
|
+
size_t maxBlockSize;
|
370
|
+
|
371
|
+
/* Controls repcode search in external sequence parsing */
|
372
|
+
ZSTD_paramSwitch_e searchForExternalRepcodes;
|
233
373
|
}; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
|
234
374
|
|
375
|
+
#define COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2))
|
376
|
+
#define ENTROPY_WORKSPACE_SIZE (HUF_WORKSPACE_SIZE + COMPRESS_SEQUENCES_WORKSPACE_SIZE)
|
377
|
+
|
378
|
+
/**
|
379
|
+
* Indicates whether this compression proceeds directly from user-provided
|
380
|
+
* source buffer to user-provided destination buffer (ZSTDb_not_buffered), or
|
381
|
+
* whether the context needs to buffer the input/output (ZSTDb_buffered).
|
382
|
+
*/
|
383
|
+
typedef enum {
|
384
|
+
ZSTDb_not_buffered,
|
385
|
+
ZSTDb_buffered
|
386
|
+
} ZSTD_buffered_policy_e;
|
387
|
+
|
388
|
+
/**
|
389
|
+
* Struct that contains all elements of block splitter that should be allocated
|
390
|
+
* in a wksp.
|
391
|
+
*/
|
392
|
+
#define ZSTD_MAX_NB_BLOCK_SPLITS 196
|
393
|
+
typedef struct {
|
394
|
+
seqStore_t fullSeqStoreChunk;
|
395
|
+
seqStore_t firstHalfSeqStore;
|
396
|
+
seqStore_t secondHalfSeqStore;
|
397
|
+
seqStore_t currSeqStore;
|
398
|
+
seqStore_t nextSeqStore;
|
399
|
+
|
400
|
+
U32 partitions[ZSTD_MAX_NB_BLOCK_SPLITS];
|
401
|
+
ZSTD_entropyCTablesMetadata_t entropyMetadata;
|
402
|
+
} ZSTD_blockSplitCtx;
|
403
|
+
|
404
|
+
/* Context for block-level external matchfinder API */
|
405
|
+
typedef struct {
|
406
|
+
void* mState;
|
407
|
+
ZSTD_sequenceProducer_F* mFinder;
|
408
|
+
ZSTD_Sequence* seqBuffer;
|
409
|
+
size_t seqBufferCapacity;
|
410
|
+
} ZSTD_externalMatchCtx;
|
411
|
+
|
235
412
|
struct ZSTD_CCtx_s {
|
236
413
|
ZSTD_compressionStage_e stage;
|
237
414
|
int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
|
238
415
|
int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
|
239
416
|
ZSTD_CCtx_params requestedParams;
|
240
417
|
ZSTD_CCtx_params appliedParams;
|
418
|
+
ZSTD_CCtx_params simpleApiParams; /* Param storage used by the simple API - not sticky. Must only be used in top-level simple API functions for storage. */
|
241
419
|
U32 dictID;
|
420
|
+
size_t dictContentSize;
|
242
421
|
|
243
422
|
ZSTD_cwksp workspace; /* manages buffer for dynamic allocations */
|
244
423
|
size_t blockSize;
|
@@ -247,6 +426,7 @@ struct ZSTD_CCtx_s {
|
|
247
426
|
unsigned long long producedCSize;
|
248
427
|
XXH64_state_t xxhState;
|
249
428
|
ZSTD_customMem customMem;
|
429
|
+
ZSTD_threadPool* pool;
|
250
430
|
size_t staticSize;
|
251
431
|
SeqCollector seqCollector;
|
252
432
|
int isFirstBlock;
|
@@ -258,7 +438,10 @@ struct ZSTD_CCtx_s {
|
|
258
438
|
size_t maxNbLdmSequences;
|
259
439
|
rawSeqStore_t externSeqStore; /* Mutable reference to external sequences */
|
260
440
|
ZSTD_blockState_t blockState;
|
261
|
-
U32* entropyWorkspace; /* entropy workspace of
|
441
|
+
U32* entropyWorkspace; /* entropy workspace of ENTROPY_WORKSPACE_SIZE bytes */
|
442
|
+
|
443
|
+
/* Whether we are streaming or not */
|
444
|
+
ZSTD_buffered_policy_e bufferedPolicy;
|
262
445
|
|
263
446
|
/* streaming */
|
264
447
|
char* inBuff;
|
@@ -273,6 +456,11 @@ struct ZSTD_CCtx_s {
|
|
273
456
|
ZSTD_cStreamStage streamStage;
|
274
457
|
U32 frameEnded;
|
275
458
|
|
459
|
+
/* Stable in/out buffer verification */
|
460
|
+
ZSTD_inBuffer expectedInBuffer;
|
461
|
+
size_t stableIn_notConsumed; /* nb bytes within stable input buffer that are said to be consumed but are not */
|
462
|
+
size_t expectedOutBufferSize;
|
463
|
+
|
276
464
|
/* Dictionary */
|
277
465
|
ZSTD_localDict localDict;
|
278
466
|
const ZSTD_CDict* cdict;
|
@@ -282,17 +470,53 @@ struct ZSTD_CCtx_s {
|
|
282
470
|
#ifdef ZSTD_MULTITHREAD
|
283
471
|
ZSTDMT_CCtx* mtctx;
|
284
472
|
#endif
|
285
|
-
};
|
286
473
|
|
287
|
-
|
474
|
+
/* Tracing */
|
475
|
+
#if ZSTD_TRACE
|
476
|
+
ZSTD_TraceCtx traceCtx;
|
477
|
+
#endif
|
288
478
|
|
289
|
-
|
479
|
+
/* Workspace for block splitter */
|
480
|
+
ZSTD_blockSplitCtx blockSplitCtx;
|
290
481
|
|
482
|
+
/* Workspace for external matchfinder */
|
483
|
+
ZSTD_externalMatchCtx externalMatchCtx;
|
484
|
+
};
|
485
|
+
|
486
|
+
typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
|
487
|
+
typedef enum { ZSTD_tfp_forCCtx, ZSTD_tfp_forCDict } ZSTD_tableFillPurpose_e;
|
488
|
+
|
489
|
+
typedef enum {
|
490
|
+
ZSTD_noDict = 0,
|
491
|
+
ZSTD_extDict = 1,
|
492
|
+
ZSTD_dictMatchState = 2,
|
493
|
+
ZSTD_dedicatedDictSearch = 3
|
494
|
+
} ZSTD_dictMode_e;
|
495
|
+
|
496
|
+
typedef enum {
|
497
|
+
ZSTD_cpm_noAttachDict = 0, /* Compression with ZSTD_noDict or ZSTD_extDict.
|
498
|
+
* In this mode we use both the srcSize and the dictSize
|
499
|
+
* when selecting and adjusting parameters.
|
500
|
+
*/
|
501
|
+
ZSTD_cpm_attachDict = 1, /* Compression with ZSTD_dictMatchState or ZSTD_dedicatedDictSearch.
|
502
|
+
* In this mode we only take the srcSize into account when selecting
|
503
|
+
* and adjusting parameters.
|
504
|
+
*/
|
505
|
+
ZSTD_cpm_createCDict = 2, /* Creating a CDict.
|
506
|
+
* In this mode we take both the source size and the dictionary size
|
507
|
+
* into account when selecting and adjusting the parameters.
|
508
|
+
*/
|
509
|
+
ZSTD_cpm_unknown = 3 /* ZSTD_getCParams, ZSTD_getParams, ZSTD_adjustParams.
|
510
|
+
* We don't know what these parameters are for. We default to the legacy
|
511
|
+
* behavior of taking both the source size and the dict size into account
|
512
|
+
* when selecting and adjusting parameters.
|
513
|
+
*/
|
514
|
+
} ZSTD_cParamMode_e;
|
291
515
|
|
292
516
|
typedef size_t (*ZSTD_blockCompressor) (
|
293
517
|
ZSTD_matchState_t* bs, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
294
518
|
void const* src, size_t srcSize);
|
295
|
-
ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_dictMode_e dictMode);
|
519
|
+
ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramSwitch_e rowMatchfinderMode, ZSTD_dictMode_e dictMode);
|
296
520
|
|
297
521
|
|
298
522
|
MEM_STATIC U32 ZSTD_LLcode(U32 litLength)
|
@@ -326,31 +550,6 @@ MEM_STATIC U32 ZSTD_MLcode(U32 mlBase)
|
|
326
550
|
return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
|
327
551
|
}
|
328
552
|
|
329
|
-
typedef struct repcodes_s {
|
330
|
-
U32 rep[3];
|
331
|
-
} repcodes_t;
|
332
|
-
|
333
|
-
MEM_STATIC repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
|
334
|
-
{
|
335
|
-
repcodes_t newReps;
|
336
|
-
if (offset >= ZSTD_REP_NUM) { /* full offset */
|
337
|
-
newReps.rep[2] = rep[1];
|
338
|
-
newReps.rep[1] = rep[0];
|
339
|
-
newReps.rep[0] = offset - ZSTD_REP_MOVE;
|
340
|
-
} else { /* repcode */
|
341
|
-
U32 const repCode = offset + ll0;
|
342
|
-
if (repCode > 0) { /* note : if repCode==0, no change */
|
343
|
-
U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
|
344
|
-
newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2];
|
345
|
-
newReps.rep[1] = rep[0];
|
346
|
-
newReps.rep[0] = currentOffset;
|
347
|
-
} else { /* repCode == 0 */
|
348
|
-
memcpy(&newReps, rep, sizeof(newReps));
|
349
|
-
}
|
350
|
-
}
|
351
|
-
return newReps;
|
352
|
-
}
|
353
|
-
|
354
553
|
/* ZSTD_cParam_withinBounds:
|
355
554
|
* @return 1 if value is within cParam bounds,
|
356
555
|
* 0 otherwise */
|
@@ -366,17 +565,20 @@ MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
|
|
366
565
|
/* ZSTD_noCompressBlock() :
|
367
566
|
* Writes uncompressed block to dst buffer from given src.
|
368
567
|
* Returns the size of the block */
|
369
|
-
MEM_STATIC size_t
|
568
|
+
MEM_STATIC size_t
|
569
|
+
ZSTD_noCompressBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
|
370
570
|
{
|
371
571
|
U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
|
572
|
+
DEBUGLOG(5, "ZSTD_noCompressBlock (srcSize=%zu, dstCapacity=%zu)", srcSize, dstCapacity);
|
372
573
|
RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
|
373
574
|
dstSize_tooSmall, "dst buf too small for uncompressed block");
|
374
575
|
MEM_writeLE24(dst, cBlockHeader24);
|
375
|
-
|
576
|
+
ZSTD_memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
|
376
577
|
return ZSTD_blockHeaderSize + srcSize;
|
377
578
|
}
|
378
579
|
|
379
|
-
MEM_STATIC size_t
|
580
|
+
MEM_STATIC size_t
|
581
|
+
ZSTD_rleCompressBlock(void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock)
|
380
582
|
{
|
381
583
|
BYTE* const op = (BYTE*)dst;
|
382
584
|
U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3);
|
@@ -395,21 +597,21 @@ MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
|
|
395
597
|
{
|
396
598
|
U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
|
397
599
|
ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
|
398
|
-
assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
|
600
|
+
assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, (int)strat));
|
399
601
|
return (srcSize >> minlog) + 2;
|
400
602
|
}
|
401
603
|
|
402
|
-
MEM_STATIC int
|
604
|
+
MEM_STATIC int ZSTD_literalsCompressionIsDisabled(const ZSTD_CCtx_params* cctxParams)
|
403
605
|
{
|
404
606
|
switch (cctxParams->literalCompressionMode) {
|
405
|
-
case
|
607
|
+
case ZSTD_ps_enable:
|
406
608
|
return 0;
|
407
|
-
case
|
609
|
+
case ZSTD_ps_disable:
|
408
610
|
return 1;
|
409
611
|
default:
|
410
612
|
assert(0 /* impossible: pre-validated */);
|
411
|
-
|
412
|
-
case
|
613
|
+
ZSTD_FALLTHROUGH;
|
614
|
+
case ZSTD_ps_auto:
|
413
615
|
return (cctxParams->cParams.strategy == ZSTD_fast) && (cctxParams->cParams.targetLength > 0);
|
414
616
|
}
|
415
617
|
}
|
@@ -419,7 +621,9 @@ MEM_STATIC int ZSTD_disableLiteralsCompression(const ZSTD_CCtx_params* cctxParam
|
|
419
621
|
* Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single
|
420
622
|
* large copies.
|
421
623
|
*/
|
422
|
-
static void
|
624
|
+
static void
|
625
|
+
ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w)
|
626
|
+
{
|
423
627
|
assert(iend > ilimit_w);
|
424
628
|
if (ip <= ilimit_w) {
|
425
629
|
ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap);
|
@@ -429,14 +633,28 @@ static void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const ie
|
|
429
633
|
while (ip < iend) *op++ = *ip++;
|
430
634
|
}
|
431
635
|
|
636
|
+
|
637
|
+
#define REPCODE1_TO_OFFBASE REPCODE_TO_OFFBASE(1)
|
638
|
+
#define REPCODE2_TO_OFFBASE REPCODE_TO_OFFBASE(2)
|
639
|
+
#define REPCODE3_TO_OFFBASE REPCODE_TO_OFFBASE(3)
|
640
|
+
#define REPCODE_TO_OFFBASE(r) (assert((r)>=1), assert((r)<=ZSTD_REP_NUM), (r)) /* accepts IDs 1,2,3 */
|
641
|
+
#define OFFSET_TO_OFFBASE(o) (assert((o)>0), o + ZSTD_REP_NUM)
|
642
|
+
#define OFFBASE_IS_OFFSET(o) ((o) > ZSTD_REP_NUM)
|
643
|
+
#define OFFBASE_IS_REPCODE(o) ( 1 <= (o) && (o) <= ZSTD_REP_NUM)
|
644
|
+
#define OFFBASE_TO_OFFSET(o) (assert(OFFBASE_IS_OFFSET(o)), (o) - ZSTD_REP_NUM)
|
645
|
+
#define OFFBASE_TO_REPCODE(o) (assert(OFFBASE_IS_REPCODE(o)), (o)) /* returns ID 1,2,3 */
|
646
|
+
|
432
647
|
/*! ZSTD_storeSeq() :
|
433
|
-
* Store a sequence (litlen, litPtr,
|
434
|
-
*
|
435
|
-
*
|
436
|
-
* Allowed to
|
648
|
+
* Store a sequence (litlen, litPtr, offBase and matchLength) into seqStore_t.
|
649
|
+
* @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE().
|
650
|
+
* @matchLength : must be >= MINMATCH
|
651
|
+
* Allowed to over-read literals up to litLimit.
|
437
652
|
*/
|
438
|
-
HINT_INLINE UNUSED_ATTR
|
439
|
-
|
653
|
+
HINT_INLINE UNUSED_ATTR void
|
654
|
+
ZSTD_storeSeq(seqStore_t* seqStorePtr,
|
655
|
+
size_t litLength, const BYTE* literals, const BYTE* litLimit,
|
656
|
+
U32 offBase,
|
657
|
+
size_t matchLength)
|
440
658
|
{
|
441
659
|
BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
|
442
660
|
BYTE const* const litEnd = literals + litLength;
|
@@ -444,8 +662,8 @@ void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* litera
|
|
444
662
|
static const BYTE* g_start = NULL;
|
445
663
|
if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */
|
446
664
|
{ U32 const pos = (U32)((const BYTE*)literals - g_start);
|
447
|
-
DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at
|
448
|
-
pos, (U32)litLength, (U32)
|
665
|
+
DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offBase%7u",
|
666
|
+
pos, (U32)litLength, (U32)matchLength, (U32)offBase);
|
449
667
|
}
|
450
668
|
#endif
|
451
669
|
assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
|
@@ -455,9 +673,9 @@ void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* litera
|
|
455
673
|
assert(literals + litLength <= litLimit);
|
456
674
|
if (litEnd <= litLimit_w) {
|
457
675
|
/* Common case we can use wildcopy.
|
458
|
-
|
459
|
-
|
460
|
-
|
676
|
+
* First copy 16 bytes, because literals are likely short.
|
677
|
+
*/
|
678
|
+
ZSTD_STATIC_ASSERT(WILDCOPY_OVERLENGTH >= 16);
|
461
679
|
ZSTD_copy16(seqStorePtr->lit, literals);
|
462
680
|
if (litLength > 16) {
|
463
681
|
ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap);
|
@@ -469,95 +687,70 @@ void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* litera
|
|
469
687
|
|
470
688
|
/* literal Length */
|
471
689
|
if (litLength>0xFFFF) {
|
472
|
-
assert(seqStorePtr->
|
473
|
-
seqStorePtr->
|
690
|
+
assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
|
691
|
+
seqStorePtr->longLengthType = ZSTD_llt_literalLength;
|
474
692
|
seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
|
475
693
|
}
|
476
694
|
seqStorePtr->sequences[0].litLength = (U16)litLength;
|
477
695
|
|
478
696
|
/* match offset */
|
479
|
-
seqStorePtr->sequences[0].
|
697
|
+
seqStorePtr->sequences[0].offBase = offBase;
|
480
698
|
|
481
699
|
/* match Length */
|
482
|
-
|
483
|
-
|
484
|
-
|
485
|
-
|
700
|
+
assert(matchLength >= MINMATCH);
|
701
|
+
{ size_t const mlBase = matchLength - MINMATCH;
|
702
|
+
if (mlBase>0xFFFF) {
|
703
|
+
assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
|
704
|
+
seqStorePtr->longLengthType = ZSTD_llt_matchLength;
|
705
|
+
seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
|
706
|
+
}
|
707
|
+
seqStorePtr->sequences[0].mlBase = (U16)mlBase;
|
486
708
|
}
|
487
|
-
seqStorePtr->sequences[0].matchLength = (U16)mlBase;
|
488
709
|
|
489
710
|
seqStorePtr->sequences++;
|
490
711
|
}
|
491
712
|
|
492
|
-
|
493
|
-
|
494
|
-
*
|
495
|
-
|
496
|
-
|
713
|
+
/* ZSTD_updateRep() :
|
714
|
+
* updates in-place @rep (array of repeat offsets)
|
715
|
+
* @offBase : sum-type, using numeric representation of ZSTD_storeSeq()
|
716
|
+
*/
|
717
|
+
MEM_STATIC void
|
718
|
+
ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0)
|
497
719
|
{
|
498
|
-
if (
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
2, 6, 5, 5, 3, 4, 5, 6,
|
512
|
-
7, 1, 2, 4, 6, 4, 4, 5,
|
513
|
-
7, 2, 6, 5, 7, 6, 7, 7 };
|
514
|
-
return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
|
515
|
-
# endif
|
516
|
-
} else { /* 32 bits */
|
517
|
-
# if defined(_MSC_VER)
|
518
|
-
unsigned long r=0;
|
519
|
-
return _BitScanForward( &r, (U32)val ) ? (unsigned)(r >> 3) : 0;
|
520
|
-
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
521
|
-
return (__builtin_ctz((U32)val) >> 3);
|
522
|
-
# else
|
523
|
-
static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
|
524
|
-
3, 2, 2, 1, 3, 2, 0, 1,
|
525
|
-
3, 3, 1, 2, 2, 2, 2, 0,
|
526
|
-
3, 1, 2, 0, 1, 0, 1, 1 };
|
527
|
-
return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
|
528
|
-
# endif
|
720
|
+
if (OFFBASE_IS_OFFSET(offBase)) { /* full offset */
|
721
|
+
rep[2] = rep[1];
|
722
|
+
rep[1] = rep[0];
|
723
|
+
rep[0] = OFFBASE_TO_OFFSET(offBase);
|
724
|
+
} else { /* repcode */
|
725
|
+
U32 const repCode = OFFBASE_TO_REPCODE(offBase) - 1 + ll0;
|
726
|
+
if (repCode > 0) { /* note : if repCode==0, no change */
|
727
|
+
U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
|
728
|
+
rep[2] = (repCode >= 2) ? rep[1] : rep[2];
|
729
|
+
rep[1] = rep[0];
|
730
|
+
rep[0] = currentOffset;
|
731
|
+
} else { /* repCode == 0 */
|
732
|
+
/* nothing to do */
|
529
733
|
}
|
530
|
-
}
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
|
536
|
-
|
537
|
-
|
538
|
-
|
539
|
-
|
540
|
-
|
541
|
-
|
542
|
-
|
543
|
-
|
544
|
-
# endif
|
545
|
-
} else { /* 32 bits */
|
546
|
-
# if defined(_MSC_VER)
|
547
|
-
unsigned long r = 0;
|
548
|
-
return _BitScanReverse( &r, (unsigned long)val ) ? (unsigned)(r >> 3) : 0;
|
549
|
-
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
550
|
-
return (__builtin_clz((U32)val) >> 3);
|
551
|
-
# else
|
552
|
-
unsigned r;
|
553
|
-
if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
|
554
|
-
r += (!val);
|
555
|
-
return r;
|
556
|
-
# endif
|
557
|
-
} }
|
734
|
+
}
|
735
|
+
}
|
736
|
+
|
737
|
+
typedef struct repcodes_s {
|
738
|
+
U32 rep[3];
|
739
|
+
} repcodes_t;
|
740
|
+
|
741
|
+
MEM_STATIC repcodes_t
|
742
|
+
ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0)
|
743
|
+
{
|
744
|
+
repcodes_t newReps;
|
745
|
+
ZSTD_memcpy(&newReps, rep, sizeof(newReps));
|
746
|
+
ZSTD_updateRep(newReps.rep, offBase, ll0);
|
747
|
+
return newReps;
|
558
748
|
}
|
559
749
|
|
560
750
|
|
751
|
+
/*-*************************************
|
752
|
+
* Match length counter
|
753
|
+
***************************************/
|
561
754
|
MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
|
562
755
|
{
|
563
756
|
const BYTE* const pStart = pIn;
|
@@ -603,31 +796,43 @@ ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
|
|
603
796
|
* Hashes
|
604
797
|
***************************************/
|
605
798
|
static const U32 prime3bytes = 506832829U;
|
606
|
-
static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes) >> (32-h) ; }
|
607
|
-
MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
|
799
|
+
static U32 ZSTD_hash3(U32 u, U32 h, U32 s) { assert(h <= 32); return (((u << (32-24)) * prime3bytes) ^ s) >> (32-h) ; }
|
800
|
+
MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h, 0); } /* only in zstd_opt.h */
|
801
|
+
MEM_STATIC size_t ZSTD_hash3PtrS(const void* ptr, U32 h, U32 s) { return ZSTD_hash3(MEM_readLE32(ptr), h, s); }
|
608
802
|
|
609
803
|
static const U32 prime4bytes = 2654435761U;
|
610
|
-
static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
|
611
|
-
static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(
|
804
|
+
static U32 ZSTD_hash4(U32 u, U32 h, U32 s) { assert(h <= 32); return ((u * prime4bytes) ^ s) >> (32-h) ; }
|
805
|
+
static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_readLE32(ptr), h, 0); }
|
806
|
+
static size_t ZSTD_hash4PtrS(const void* ptr, U32 h, U32 s) { return ZSTD_hash4(MEM_readLE32(ptr), h, s); }
|
612
807
|
|
613
808
|
static const U64 prime5bytes = 889523592379ULL;
|
614
|
-
static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; }
|
615
|
-
static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
|
809
|
+
static size_t ZSTD_hash5(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-40)) * prime5bytes) ^ s) >> (64-h)) ; }
|
810
|
+
static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h, 0); }
|
811
|
+
static size_t ZSTD_hash5PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash5(MEM_readLE64(p), h, s); }
|
616
812
|
|
617
813
|
static const U64 prime6bytes = 227718039650203ULL;
|
618
|
-
static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; }
|
619
|
-
static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
|
814
|
+
static size_t ZSTD_hash6(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-48)) * prime6bytes) ^ s) >> (64-h)) ; }
|
815
|
+
static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h, 0); }
|
816
|
+
static size_t ZSTD_hash6PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash6(MEM_readLE64(p), h, s); }
|
620
817
|
|
621
818
|
static const U64 prime7bytes = 58295818150454627ULL;
|
622
|
-
static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; }
|
623
|
-
static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
|
819
|
+
static size_t ZSTD_hash7(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-56)) * prime7bytes) ^ s) >> (64-h)) ; }
|
820
|
+
static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h, 0); }
|
821
|
+
static size_t ZSTD_hash7PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash7(MEM_readLE64(p), h, s); }
|
624
822
|
|
625
823
|
static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
|
626
|
-
static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
|
627
|
-
static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
|
824
|
+
static size_t ZSTD_hash8(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u) * prime8bytes) ^ s) >> (64-h)) ; }
|
825
|
+
static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h, 0); }
|
826
|
+
static size_t ZSTD_hash8PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash8(MEM_readLE64(p), h, s); }
|
827
|
+
|
628
828
|
|
629
|
-
MEM_STATIC
|
829
|
+
MEM_STATIC FORCE_INLINE_ATTR
|
830
|
+
size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
|
630
831
|
{
|
832
|
+
/* Although some of these hashes do support hBits up to 64, some do not.
|
833
|
+
* To be on the safe side, always avoid hBits > 32. */
|
834
|
+
assert(hBits <= 32);
|
835
|
+
|
631
836
|
switch(mls)
|
632
837
|
{
|
633
838
|
default:
|
@@ -639,6 +844,24 @@ MEM_STATIC size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
|
|
639
844
|
}
|
640
845
|
}
|
641
846
|
|
847
|
+
MEM_STATIC FORCE_INLINE_ATTR
|
848
|
+
size_t ZSTD_hashPtrSalted(const void* p, U32 hBits, U32 mls, const U64 hashSalt) {
|
849
|
+
/* Although some of these hashes do support hBits up to 64, some do not.
|
850
|
+
* To be on the safe side, always avoid hBits > 32. */
|
851
|
+
assert(hBits <= 32);
|
852
|
+
|
853
|
+
switch(mls)
|
854
|
+
{
|
855
|
+
default:
|
856
|
+
case 4: return ZSTD_hash4PtrS(p, hBits, (U32)hashSalt);
|
857
|
+
case 5: return ZSTD_hash5PtrS(p, hBits, hashSalt);
|
858
|
+
case 6: return ZSTD_hash6PtrS(p, hBits, hashSalt);
|
859
|
+
case 7: return ZSTD_hash7PtrS(p, hBits, hashSalt);
|
860
|
+
case 8: return ZSTD_hash8PtrS(p, hBits, hashSalt);
|
861
|
+
}
|
862
|
+
}
|
863
|
+
|
864
|
+
|
642
865
|
/** ZSTD_ipow() :
|
643
866
|
* Return base^exponent.
|
644
867
|
*/
|
@@ -723,6 +946,13 @@ MEM_STATIC void ZSTD_window_clear(ZSTD_window_t* window)
|
|
723
946
|
window->dictLimit = end;
|
724
947
|
}
|
725
948
|
|
949
|
+
MEM_STATIC U32 ZSTD_window_isEmpty(ZSTD_window_t const window)
|
950
|
+
{
|
951
|
+
return window.dictLimit == ZSTD_WINDOW_START_INDEX &&
|
952
|
+
window.lowLimit == ZSTD_WINDOW_START_INDEX &&
|
953
|
+
(window.nextSrc - window.base) == ZSTD_WINDOW_START_INDEX;
|
954
|
+
}
|
955
|
+
|
726
956
|
/**
|
727
957
|
* ZSTD_window_hasExtDict():
|
728
958
|
* Returns non-zero if the window has a non-empty extDict.
|
@@ -742,20 +972,76 @@ MEM_STATIC ZSTD_dictMode_e ZSTD_matchState_dictMode(const ZSTD_matchState_t *ms)
|
|
742
972
|
return ZSTD_window_hasExtDict(ms->window) ?
|
743
973
|
ZSTD_extDict :
|
744
974
|
ms->dictMatchState != NULL ?
|
745
|
-
ZSTD_dictMatchState :
|
975
|
+
(ms->dictMatchState->dedicatedDictSearch ? ZSTD_dedicatedDictSearch : ZSTD_dictMatchState) :
|
746
976
|
ZSTD_noDict;
|
747
977
|
}
|
748
978
|
|
979
|
+
/* Defining this macro to non-zero tells zstd to run the overflow correction
|
980
|
+
* code much more frequently. This is very inefficient, and should only be
|
981
|
+
* used for tests and fuzzers.
|
982
|
+
*/
|
983
|
+
#ifndef ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY
|
984
|
+
# ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
|
985
|
+
# define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 1
|
986
|
+
# else
|
987
|
+
# define ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY 0
|
988
|
+
# endif
|
989
|
+
#endif
|
990
|
+
|
991
|
+
/**
|
992
|
+
* ZSTD_window_canOverflowCorrect():
|
993
|
+
* Returns non-zero if the indices are large enough for overflow correction
|
994
|
+
* to work correctly without impacting compression ratio.
|
995
|
+
*/
|
996
|
+
MEM_STATIC U32 ZSTD_window_canOverflowCorrect(ZSTD_window_t const window,
|
997
|
+
U32 cycleLog,
|
998
|
+
U32 maxDist,
|
999
|
+
U32 loadedDictEnd,
|
1000
|
+
void const* src)
|
1001
|
+
{
|
1002
|
+
U32 const cycleSize = 1u << cycleLog;
|
1003
|
+
U32 const curr = (U32)((BYTE const*)src - window.base);
|
1004
|
+
U32 const minIndexToOverflowCorrect = cycleSize
|
1005
|
+
+ MAX(maxDist, cycleSize)
|
1006
|
+
+ ZSTD_WINDOW_START_INDEX;
|
1007
|
+
|
1008
|
+
/* Adjust the min index to backoff the overflow correction frequency,
|
1009
|
+
* so we don't waste too much CPU in overflow correction. If this
|
1010
|
+
* computation overflows we don't really care, we just need to make
|
1011
|
+
* sure it is at least minIndexToOverflowCorrect.
|
1012
|
+
*/
|
1013
|
+
U32 const adjustment = window.nbOverflowCorrections + 1;
|
1014
|
+
U32 const adjustedIndex = MAX(minIndexToOverflowCorrect * adjustment,
|
1015
|
+
minIndexToOverflowCorrect);
|
1016
|
+
U32 const indexLargeEnough = curr > adjustedIndex;
|
1017
|
+
|
1018
|
+
/* Only overflow correct early if the dictionary is invalidated already,
|
1019
|
+
* so we don't hurt compression ratio.
|
1020
|
+
*/
|
1021
|
+
U32 const dictionaryInvalidated = curr > maxDist + loadedDictEnd;
|
1022
|
+
|
1023
|
+
return indexLargeEnough && dictionaryInvalidated;
|
1024
|
+
}
|
1025
|
+
|
749
1026
|
/**
|
750
1027
|
* ZSTD_window_needOverflowCorrection():
|
751
1028
|
* Returns non-zero if the indices are getting too large and need overflow
|
752
1029
|
* protection.
|
753
1030
|
*/
|
754
1031
|
MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
|
1032
|
+
U32 cycleLog,
|
1033
|
+
U32 maxDist,
|
1034
|
+
U32 loadedDictEnd,
|
1035
|
+
void const* src,
|
755
1036
|
void const* srcEnd)
|
756
1037
|
{
|
757
|
-
U32 const
|
758
|
-
|
1038
|
+
U32 const curr = (U32)((BYTE const*)srcEnd - window.base);
|
1039
|
+
if (ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY) {
|
1040
|
+
if (ZSTD_window_canOverflowCorrect(window, cycleLog, maxDist, loadedDictEnd, src)) {
|
1041
|
+
return 1;
|
1042
|
+
}
|
1043
|
+
}
|
1044
|
+
return curr > ZSTD_CURRENT_MAX;
|
759
1045
|
}
|
760
1046
|
|
761
1047
|
/**
|
@@ -766,7 +1052,6 @@ MEM_STATIC U32 ZSTD_window_needOverflowCorrection(ZSTD_window_t const window,
|
|
766
1052
|
*
|
767
1053
|
* The least significant cycleLog bits of the indices must remain the same,
|
768
1054
|
* which may be 0. Every index up to maxDist in the past must be valid.
|
769
|
-
* NOTE: (maxDist & cycleMask) must be zero.
|
770
1055
|
*/
|
771
1056
|
MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
|
772
1057
|
U32 maxDist, void const* src)
|
@@ -790,32 +1075,52 @@ MEM_STATIC U32 ZSTD_window_correctOverflow(ZSTD_window_t* window, U32 cycleLog,
|
|
790
1075
|
* 3. (cctx->lowLimit + 1<<windowLog) < 1<<32:
|
791
1076
|
* windowLog <= 31 ==> 3<<29 + 1<<windowLog < 7<<29 < 1<<32.
|
792
1077
|
*/
|
793
|
-
U32 const
|
794
|
-
U32 const
|
795
|
-
U32 const
|
796
|
-
|
797
|
-
|
798
|
-
U32 const
|
799
|
-
|
800
|
-
|
801
|
-
|
802
|
-
|
803
|
-
|
1078
|
+
U32 const cycleSize = 1u << cycleLog;
|
1079
|
+
U32 const cycleMask = cycleSize - 1;
|
1080
|
+
U32 const curr = (U32)((BYTE const*)src - window->base);
|
1081
|
+
U32 const currentCycle = curr & cycleMask;
|
1082
|
+
/* Ensure newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX. */
|
1083
|
+
U32 const currentCycleCorrection = currentCycle < ZSTD_WINDOW_START_INDEX
|
1084
|
+
? MAX(cycleSize, ZSTD_WINDOW_START_INDEX)
|
1085
|
+
: 0;
|
1086
|
+
U32 const newCurrent = currentCycle
|
1087
|
+
+ currentCycleCorrection
|
1088
|
+
+ MAX(maxDist, cycleSize);
|
1089
|
+
U32 const correction = curr - newCurrent;
|
1090
|
+
/* maxDist must be a power of two so that:
|
1091
|
+
* (newCurrent & cycleMask) == (curr & cycleMask)
|
1092
|
+
* This is required to not corrupt the chains / binary tree.
|
1093
|
+
*/
|
1094
|
+
assert((maxDist & (maxDist - 1)) == 0);
|
1095
|
+
assert((curr & cycleMask) == (newCurrent & cycleMask));
|
1096
|
+
assert(curr > newCurrent);
|
1097
|
+
if (!ZSTD_WINDOW_OVERFLOW_CORRECT_FREQUENTLY) {
|
1098
|
+
/* Loose bound, should be around 1<<29 (see above) */
|
1099
|
+
assert(correction > 1<<28);
|
1100
|
+
}
|
804
1101
|
|
805
1102
|
window->base += correction;
|
806
1103
|
window->dictBase += correction;
|
807
|
-
if (window->lowLimit
|
808
|
-
|
809
|
-
|
810
|
-
|
1104
|
+
if (window->lowLimit < correction + ZSTD_WINDOW_START_INDEX) {
|
1105
|
+
window->lowLimit = ZSTD_WINDOW_START_INDEX;
|
1106
|
+
} else {
|
1107
|
+
window->lowLimit -= correction;
|
1108
|
+
}
|
1109
|
+
if (window->dictLimit < correction + ZSTD_WINDOW_START_INDEX) {
|
1110
|
+
window->dictLimit = ZSTD_WINDOW_START_INDEX;
|
1111
|
+
} else {
|
1112
|
+
window->dictLimit -= correction;
|
1113
|
+
}
|
811
1114
|
|
812
1115
|
/* Ensure we can still reference the full window. */
|
813
1116
|
assert(newCurrent >= maxDist);
|
814
|
-
assert(newCurrent - maxDist >=
|
1117
|
+
assert(newCurrent - maxDist >= ZSTD_WINDOW_START_INDEX);
|
815
1118
|
/* Ensure that lowLimit and dictLimit didn't underflow. */
|
816
1119
|
assert(window->lowLimit <= newCurrent);
|
817
1120
|
assert(window->dictLimit <= newCurrent);
|
818
1121
|
|
1122
|
+
++window->nbOverflowCorrections;
|
1123
|
+
|
819
1124
|
DEBUGLOG(4, "Correction of 0x%x bytes to lowLimit=0x%x", correction,
|
820
1125
|
window->lowLimit);
|
821
1126
|
return correction;
|
@@ -904,10 +1209,15 @@ ZSTD_checkDictValidity(const ZSTD_window_t* window,
|
|
904
1209
|
(unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
|
905
1210
|
assert(blockEndIdx >= loadedDictEnd);
|
906
1211
|
|
907
|
-
if (blockEndIdx > loadedDictEnd + maxDist) {
|
1212
|
+
if (blockEndIdx > loadedDictEnd + maxDist || loadedDictEnd != window->dictLimit) {
|
908
1213
|
/* On reaching window size, dictionaries are invalidated.
|
909
1214
|
* For simplification, if window size is reached anywhere within next block,
|
910
1215
|
* the dictionary is invalidated for the full block.
|
1216
|
+
*
|
1217
|
+
* We also have to invalidate the dictionary if ZSTD_window_update() has detected
|
1218
|
+
* non-contiguous segments, which means that loadedDictEnd != window->dictLimit.
|
1219
|
+
* loadedDictEnd may be 0, if forceWindow is true, but in that case we never use
|
1220
|
+
* dictMatchState, so setting it to NULL is not a problem.
|
911
1221
|
*/
|
912
1222
|
DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)");
|
913
1223
|
*loadedDictEndPtr = 0;
|
@@ -919,12 +1229,14 @@ ZSTD_checkDictValidity(const ZSTD_window_t* window,
|
|
919
1229
|
}
|
920
1230
|
|
921
1231
|
MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
|
922
|
-
|
923
|
-
window->base = (BYTE const*)"";
|
924
|
-
window->dictBase = (BYTE const*)"";
|
925
|
-
|
926
|
-
window->
|
927
|
-
window->
|
1232
|
+
ZSTD_memset(window, 0, sizeof(*window));
|
1233
|
+
window->base = (BYTE const*)" ";
|
1234
|
+
window->dictBase = (BYTE const*)" ";
|
1235
|
+
ZSTD_STATIC_ASSERT(ZSTD_DUBT_UNSORTED_MARK < ZSTD_WINDOW_START_INDEX); /* Start above ZSTD_DUBT_UNSORTED_MARK */
|
1236
|
+
window->dictLimit = ZSTD_WINDOW_START_INDEX; /* start from >0, so that 1st position is valid */
|
1237
|
+
window->lowLimit = ZSTD_WINDOW_START_INDEX; /* it ensures first and later CCtx usages compress the same */
|
1238
|
+
window->nextSrc = window->base + ZSTD_WINDOW_START_INDEX; /* see issue #1241 */
|
1239
|
+
window->nbOverflowCorrections = 0;
|
928
1240
|
}
|
929
1241
|
|
930
1242
|
/**
|
@@ -935,7 +1247,8 @@ MEM_STATIC void ZSTD_window_init(ZSTD_window_t* window) {
|
|
935
1247
|
* Returns non-zero if the segment is contiguous.
|
936
1248
|
*/
|
937
1249
|
MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
|
938
|
-
void const* src, size_t srcSize
|
1250
|
+
void const* src, size_t srcSize,
|
1251
|
+
int forceNonContiguous)
|
939
1252
|
{
|
940
1253
|
BYTE const* const ip = (BYTE const*)src;
|
941
1254
|
U32 contiguous = 1;
|
@@ -945,7 +1258,7 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
|
|
945
1258
|
assert(window->base != NULL);
|
946
1259
|
assert(window->dictBase != NULL);
|
947
1260
|
/* Check if blocks follow each other */
|
948
|
-
if (src != window->nextSrc) {
|
1261
|
+
if (src != window->nextSrc || forceNonContiguous) {
|
949
1262
|
/* not contiguous */
|
950
1263
|
size_t const distanceFromBase = (size_t)(window->nextSrc - window->base);
|
951
1264
|
DEBUGLOG(5, "Non contiguous blocks, new segment starts at %u", window->dictLimit);
|
@@ -973,25 +1286,32 @@ MEM_STATIC U32 ZSTD_window_update(ZSTD_window_t* window,
|
|
973
1286
|
/**
|
974
1287
|
* Returns the lowest allowed match index. It may either be in the ext-dict or the prefix.
|
975
1288
|
*/
|
976
|
-
MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32
|
1289
|
+
MEM_STATIC U32 ZSTD_getLowestMatchIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
|
977
1290
|
{
|
978
|
-
U32
|
979
|
-
U32
|
980
|
-
U32
|
981
|
-
U32
|
982
|
-
|
1291
|
+
U32 const maxDistance = 1U << windowLog;
|
1292
|
+
U32 const lowestValid = ms->window.lowLimit;
|
1293
|
+
U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
|
1294
|
+
U32 const isDictionary = (ms->loadedDictEnd != 0);
|
1295
|
+
/* When using a dictionary the entire dictionary is valid if a single byte of the dictionary
|
1296
|
+
* is within the window. We invalidate the dictionary (and set loadedDictEnd to 0) when it isn't
|
1297
|
+
* valid for the entire block. So this check is sufficient to find the lowest valid match index.
|
1298
|
+
*/
|
1299
|
+
U32 const matchLowest = isDictionary ? lowestValid : withinWindow;
|
983
1300
|
return matchLowest;
|
984
1301
|
}
|
985
1302
|
|
986
1303
|
/**
|
987
1304
|
* Returns the lowest allowed match index in the prefix.
|
988
1305
|
*/
|
989
|
-
MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32
|
1306
|
+
MEM_STATIC U32 ZSTD_getLowestPrefixIndex(const ZSTD_matchState_t* ms, U32 curr, unsigned windowLog)
|
990
1307
|
{
|
991
1308
|
U32 const maxDistance = 1U << windowLog;
|
992
1309
|
U32 const lowestValid = ms->window.dictLimit;
|
993
|
-
U32 const withinWindow = (
|
1310
|
+
U32 const withinWindow = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
|
994
1311
|
U32 const isDictionary = (ms->loadedDictEnd != 0);
|
1312
|
+
/* When computing the lowest prefix index we need to take the dictionary into account to handle
|
1313
|
+
* the edge case where the dictionary and the source are contiguous in memory.
|
1314
|
+
*/
|
995
1315
|
U32 const matchLowest = isDictionary ? lowestValid : withinWindow;
|
996
1316
|
return matchLowest;
|
997
1317
|
}
|
@@ -1029,6 +1349,42 @@ MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
|
|
1029
1349
|
|
1030
1350
|
#endif
|
1031
1351
|
|
1352
|
+
/* Short Cache */
|
1353
|
+
|
1354
|
+
/* Normally, zstd matchfinders follow this flow:
|
1355
|
+
* 1. Compute hash at ip
|
1356
|
+
* 2. Load index from hashTable[hash]
|
1357
|
+
* 3. Check if *ip == *(base + index)
|
1358
|
+
* In dictionary compression, loading *(base + index) is often an L2 or even L3 miss.
|
1359
|
+
*
|
1360
|
+
* Short cache is an optimization which allows us to avoid step 3 most of the time
|
1361
|
+
* when the data doesn't actually match. With short cache, the flow becomes:
|
1362
|
+
* 1. Compute (hash, currentTag) at ip. currentTag is an 8-bit independent hash at ip.
|
1363
|
+
* 2. Load (index, matchTag) from hashTable[hash]. See ZSTD_writeTaggedIndex to understand how this works.
|
1364
|
+
* 3. Only if currentTag == matchTag, check *ip == *(base + index). Otherwise, continue.
|
1365
|
+
*
|
1366
|
+
* Currently, short cache is only implemented in CDict hashtables. Thus, its use is limited to
|
1367
|
+
* dictMatchState matchfinders.
|
1368
|
+
*/
|
1369
|
+
#define ZSTD_SHORT_CACHE_TAG_BITS 8
|
1370
|
+
#define ZSTD_SHORT_CACHE_TAG_MASK ((1u << ZSTD_SHORT_CACHE_TAG_BITS) - 1)
|
1371
|
+
|
1372
|
+
/* Helper function for ZSTD_fillHashTable and ZSTD_fillDoubleHashTable.
|
1373
|
+
* Unpacks hashAndTag into (hash, tag), then packs (index, tag) into hashTable[hash]. */
|
1374
|
+
MEM_STATIC void ZSTD_writeTaggedIndex(U32* const hashTable, size_t hashAndTag, U32 index) {
|
1375
|
+
size_t const hash = hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS;
|
1376
|
+
U32 const tag = (U32)(hashAndTag & ZSTD_SHORT_CACHE_TAG_MASK);
|
1377
|
+
assert(index >> (32 - ZSTD_SHORT_CACHE_TAG_BITS) == 0);
|
1378
|
+
hashTable[hash] = (index << ZSTD_SHORT_CACHE_TAG_BITS) | tag;
|
1379
|
+
}
|
1380
|
+
|
1381
|
+
/* Helper function for short cache matchfinders.
|
1382
|
+
* Unpacks tag1 and tag2 from lower bits of packedTag1 and packedTag2, then checks if the tags match. */
|
1383
|
+
MEM_STATIC int ZSTD_comparePackedTags(size_t packedTag1, size_t packedTag2) {
|
1384
|
+
U32 const tag1 = packedTag1 & ZSTD_SHORT_CACHE_TAG_MASK;
|
1385
|
+
U32 const tag2 = packedTag2 & ZSTD_SHORT_CACHE_TAG_MASK;
|
1386
|
+
return tag1 == tag2;
|
1387
|
+
}
|
1032
1388
|
|
1033
1389
|
#if defined (__cplusplus)
|
1034
1390
|
}
|
@@ -1045,7 +1401,6 @@ MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
|
|
1045
1401
|
* assumptions : magic number supposed already checked
|
1046
1402
|
* and dictSize >= 8 */
|
1047
1403
|
size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
|
1048
|
-
short* offcodeNCount, unsigned* offcodeMaxValue,
|
1049
1404
|
const void* const dict, size_t dictSize);
|
1050
1405
|
|
1051
1406
|
void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs);
|
@@ -1061,7 +1416,7 @@ void ZSTD_reset_compressedBlockState(ZSTD_compressedBlockState_t* bs);
|
|
1061
1416
|
* Note: srcSizeHint == 0 means 0!
|
1062
1417
|
*/
|
1063
1418
|
ZSTD_compressionParameters ZSTD_getCParamsFromCCtxParams(
|
1064
|
-
const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize);
|
1419
|
+
const ZSTD_CCtx_params* CCtxParams, U64 srcSizeHint, size_t dictSize, ZSTD_cParamMode_e mode);
|
1065
1420
|
|
1066
1421
|
/*! ZSTD_initCStream_internal() :
|
1067
1422
|
* Private use only. Init streaming operation.
|
@@ -1122,4 +1477,56 @@ size_t ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSe
|
|
1122
1477
|
* condition for correct operation : hashLog > 1 */
|
1123
1478
|
U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat);
|
1124
1479
|
|
1480
|
+
/** ZSTD_CCtx_trace() :
|
1481
|
+
* Trace the end of a compression call.
|
1482
|
+
*/
|
1483
|
+
void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize);
|
1484
|
+
|
1485
|
+
/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of
|
1486
|
+
* ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter.
|
1487
|
+
* Note that the block delimiter must include the last literals of the block.
|
1488
|
+
*/
|
1489
|
+
size_t
|
1490
|
+
ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
|
1491
|
+
ZSTD_sequencePosition* seqPos,
|
1492
|
+
const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
|
1493
|
+
const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch);
|
1494
|
+
|
1495
|
+
/* Returns the number of bytes to move the current read position back by.
|
1496
|
+
* Only non-zero if we ended up splitting a sequence.
|
1497
|
+
* Otherwise, it may return a ZSTD error if something went wrong.
|
1498
|
+
*
|
1499
|
+
* This function will attempt to scan through blockSize bytes
|
1500
|
+
* represented by the sequences in @inSeqs,
|
1501
|
+
* storing any (partial) sequences.
|
1502
|
+
*
|
1503
|
+
* Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to
|
1504
|
+
* avoid splitting a match, or to avoid splitting a match such that it would produce a match
|
1505
|
+
* smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
|
1506
|
+
*/
|
1507
|
+
size_t
|
1508
|
+
ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
|
1509
|
+
const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
|
1510
|
+
const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch);
|
1511
|
+
|
1512
|
+
|
1513
|
+
/* ===============================================================
|
1514
|
+
* Deprecated definitions that are still used internally to avoid
|
1515
|
+
* deprecation warnings. These functions are exactly equivalent to
|
1516
|
+
* their public variants, but avoid the deprecation warnings.
|
1517
|
+
* =============================================================== */
|
1518
|
+
|
1519
|
+
size_t ZSTD_compressBegin_usingCDict_deprecated(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
|
1520
|
+
|
1521
|
+
size_t ZSTD_compressContinue_public(ZSTD_CCtx* cctx,
|
1522
|
+
void* dst, size_t dstCapacity,
|
1523
|
+
const void* src, size_t srcSize);
|
1524
|
+
|
1525
|
+
size_t ZSTD_compressEnd_public(ZSTD_CCtx* cctx,
|
1526
|
+
void* dst, size_t dstCapacity,
|
1527
|
+
const void* src, size_t srcSize);
|
1528
|
+
|
1529
|
+
size_t ZSTD_compressBlock_deprecated(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
1530
|
+
|
1531
|
+
|
1125
1532
|
#endif /* ZSTD_COMPRESS_H */
|