zstd-ruby 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (71) hide show
  1. checksums.yaml +7 -0
  2. data/.gitignore +19 -0
  3. data/.gitmodules +3 -0
  4. data/.rspec +2 -0
  5. data/.travis.yml +11 -0
  6. data/CODE_OF_CONDUCT.md +74 -0
  7. data/Gemfile +4 -0
  8. data/LICENSE.txt +29 -0
  9. data/README.md +63 -0
  10. data/Rakefile +22 -0
  11. data/bin/console +14 -0
  12. data/bin/setup +8 -0
  13. data/ext/zstdruby/extconf.rb +23 -0
  14. data/ext/zstdruby/libzstd/.gitignore +2 -0
  15. data/ext/zstdruby/libzstd/Makefile +133 -0
  16. data/ext/zstdruby/libzstd/README.md +77 -0
  17. data/ext/zstdruby/libzstd/common/bitstream.h +414 -0
  18. data/ext/zstdruby/libzstd/common/entropy_common.c +227 -0
  19. data/ext/zstdruby/libzstd/common/error_private.c +43 -0
  20. data/ext/zstdruby/libzstd/common/error_private.h +76 -0
  21. data/ext/zstdruby/libzstd/common/fse.h +668 -0
  22. data/ext/zstdruby/libzstd/common/fse_decompress.c +329 -0
  23. data/ext/zstdruby/libzstd/common/huf.h +238 -0
  24. data/ext/zstdruby/libzstd/common/mem.h +372 -0
  25. data/ext/zstdruby/libzstd/common/xxhash.c +867 -0
  26. data/ext/zstdruby/libzstd/common/xxhash.h +309 -0
  27. data/ext/zstdruby/libzstd/common/zstd_common.c +77 -0
  28. data/ext/zstdruby/libzstd/common/zstd_errors.h +60 -0
  29. data/ext/zstdruby/libzstd/common/zstd_internal.h +270 -0
  30. data/ext/zstdruby/libzstd/compress/fse_compress.c +850 -0
  31. data/ext/zstdruby/libzstd/compress/huf_compress.c +609 -0
  32. data/ext/zstdruby/libzstd/compress/zstd_compress.c +3291 -0
  33. data/ext/zstdruby/libzstd/compress/zstd_opt.h +919 -0
  34. data/ext/zstdruby/libzstd/decompress/huf_decompress.c +885 -0
  35. data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +2154 -0
  36. data/ext/zstdruby/libzstd/deprecated/zbuff.h +210 -0
  37. data/ext/zstdruby/libzstd/deprecated/zbuff_compress.c +145 -0
  38. data/ext/zstdruby/libzstd/deprecated/zbuff_decompress.c +74 -0
  39. data/ext/zstdruby/libzstd/dictBuilder/divsufsort.c +1913 -0
  40. data/ext/zstdruby/libzstd/dictBuilder/divsufsort.h +67 -0
  41. data/ext/zstdruby/libzstd/dictBuilder/zdict.c +1012 -0
  42. data/ext/zstdruby/libzstd/dictBuilder/zdict.h +111 -0
  43. data/ext/zstdruby/libzstd/dll/example/Makefile +47 -0
  44. data/ext/zstdruby/libzstd/dll/example/README.md +69 -0
  45. data/ext/zstdruby/libzstd/dll/example/build_package.bat +17 -0
  46. data/ext/zstdruby/libzstd/dll/example/fullbench-dll.sln +25 -0
  47. data/ext/zstdruby/libzstd/dll/example/fullbench-dll.vcxproj +179 -0
  48. data/ext/zstdruby/libzstd/dll/libzstd.def +86 -0
  49. data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +259 -0
  50. data/ext/zstdruby/libzstd/legacy/zstd_v01.c +2095 -0
  51. data/ext/zstdruby/libzstd/legacy/zstd_v01.h +80 -0
  52. data/ext/zstdruby/libzstd/legacy/zstd_v02.c +3518 -0
  53. data/ext/zstdruby/libzstd/legacy/zstd_v02.h +79 -0
  54. data/ext/zstdruby/libzstd/legacy/zstd_v03.c +3159 -0
  55. data/ext/zstdruby/libzstd/legacy/zstd_v03.h +79 -0
  56. data/ext/zstdruby/libzstd/legacy/zstd_v04.c +3795 -0
  57. data/ext/zstdruby/libzstd/legacy/zstd_v04.h +128 -0
  58. data/ext/zstdruby/libzstd/legacy/zstd_v05.c +4056 -0
  59. data/ext/zstdruby/libzstd/legacy/zstd_v05.h +149 -0
  60. data/ext/zstdruby/libzstd/legacy/zstd_v06.c +4167 -0
  61. data/ext/zstdruby/libzstd/legacy/zstd_v06.h +159 -0
  62. data/ext/zstdruby/libzstd/legacy/zstd_v07.c +4540 -0
  63. data/ext/zstdruby/libzstd/legacy/zstd_v07.h +173 -0
  64. data/ext/zstdruby/libzstd/libzstd.pc.in +14 -0
  65. data/ext/zstdruby/libzstd/zstd.h +673 -0
  66. data/ext/zstdruby/zstdruby.c +117 -0
  67. data/ext/zstdruby/zstdruby.h +6 -0
  68. data/lib/zstd-ruby.rb +6 -0
  69. data/lib/zstd-ruby/version.rb +3 -0
  70. data/zstd-ruby.gemspec +37 -0
  71. metadata +170 -0
@@ -0,0 +1,3291 @@
1
+ /**
2
+ * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree. An additional grant
7
+ * of patent rights can be found in the PATENTS file in the same directory.
8
+ */
9
+
10
+
11
+ /*-*************************************
12
+ * Dependencies
13
+ ***************************************/
14
+ #include <string.h> /* memset */
15
+ #include "mem.h"
16
+ #define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */
17
+ #include "xxhash.h" /* XXH_reset, update, digest */
18
+ #define FSE_STATIC_LINKING_ONLY /* FSE_encodeSymbol */
19
+ #include "fse.h"
20
+ #define HUF_STATIC_LINKING_ONLY
21
+ #include "huf.h"
22
+ #include "zstd_internal.h" /* includes zstd.h */
23
+
24
+
25
+ /*-*************************************
26
+ * Constants
27
+ ***************************************/
28
+ static const U32 g_searchStrength = 8; /* control skip over incompressible data */
29
+ #define HASH_READ_SIZE 8
30
+ typedef enum { ZSTDcs_created=0, ZSTDcs_init, ZSTDcs_ongoing, ZSTDcs_ending } ZSTD_compressionStage_e;
31
+
32
+
33
+ /*-*************************************
34
+ * Helper functions
35
+ ***************************************/
36
+ #define ZSTD_STATIC_ASSERT(c) { enum { ZSTD_static_assert = 1/(int)(!!(c)) }; }
37
+ size_t ZSTD_compressBound(size_t srcSize) { return FSE_compressBound(srcSize) + 12; }
38
+
39
+
40
+ /*-*************************************
41
+ * Sequence storage
42
+ ***************************************/
43
+ static void ZSTD_resetSeqStore(seqStore_t* ssPtr)
44
+ {
45
+ ssPtr->lit = ssPtr->litStart;
46
+ ssPtr->sequences = ssPtr->sequencesStart;
47
+ ssPtr->longLengthID = 0;
48
+ }
49
+
50
+
51
+ /*-*************************************
52
+ * Context memory management
53
+ ***************************************/
54
+ struct ZSTD_CCtx_s
55
+ {
56
+ const BYTE* nextSrc; /* next block here to continue on current prefix */
57
+ const BYTE* base; /* All regular indexes relative to this position */
58
+ const BYTE* dictBase; /* extDict indexes relative to this position */
59
+ U32 dictLimit; /* below that point, need extDict */
60
+ U32 lowLimit; /* below that point, no more data */
61
+ U32 nextToUpdate; /* index from which to continue dictionary update */
62
+ U32 nextToUpdate3; /* index from which to continue dictionary update */
63
+ U32 hashLog3; /* dispatch table : larger == faster, more memory */
64
+ U32 loadedDictEnd;
65
+ ZSTD_compressionStage_e stage;
66
+ U32 rep[ZSTD_REP_NUM];
67
+ U32 savedRep[ZSTD_REP_NUM];
68
+ U32 dictID;
69
+ ZSTD_parameters params;
70
+ void* workSpace;
71
+ size_t workSpaceSize;
72
+ size_t blockSize;
73
+ U64 frameContentSize;
74
+ XXH64_state_t xxhState;
75
+ ZSTD_customMem customMem;
76
+
77
+ seqStore_t seqStore; /* sequences storage ptrs */
78
+ U32* hashTable;
79
+ U32* hashTable3;
80
+ U32* chainTable;
81
+ HUF_CElt* hufTable;
82
+ U32 flagStaticTables;
83
+ FSE_CTable offcodeCTable [FSE_CTABLE_SIZE_U32(OffFSELog, MaxOff)];
84
+ FSE_CTable matchlengthCTable[FSE_CTABLE_SIZE_U32(MLFSELog, MaxML)];
85
+ FSE_CTable litlengthCTable [FSE_CTABLE_SIZE_U32(LLFSELog, MaxLL)];
86
+ unsigned tmpCounters[1024];
87
+ };
88
+
89
+ ZSTD_CCtx* ZSTD_createCCtx(void)
90
+ {
91
+ return ZSTD_createCCtx_advanced(defaultCustomMem);
92
+ }
93
+
94
+ ZSTD_CCtx* ZSTD_createCCtx_advanced(ZSTD_customMem customMem)
95
+ {
96
+ ZSTD_CCtx* cctx;
97
+
98
+ if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem;
99
+ if (!customMem.customAlloc || !customMem.customFree) return NULL;
100
+
101
+ cctx = (ZSTD_CCtx*) ZSTD_malloc(sizeof(ZSTD_CCtx), customMem);
102
+ if (!cctx) return NULL;
103
+ memset(cctx, 0, sizeof(ZSTD_CCtx));
104
+ memcpy(&(cctx->customMem), &customMem, sizeof(customMem));
105
+ return cctx;
106
+ }
107
+
108
+ size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
109
+ {
110
+ if (cctx==NULL) return 0; /* support free on NULL */
111
+ ZSTD_free(cctx->workSpace, cctx->customMem);
112
+ ZSTD_free(cctx, cctx->customMem);
113
+ return 0; /* reserved as a potential error code in the future */
114
+ }
115
+
116
+ size_t ZSTD_sizeof_CCtx(const ZSTD_CCtx* cctx)
117
+ {
118
+ if (cctx==NULL) return 0; /* support sizeof on NULL */
119
+ return sizeof(*cctx) + cctx->workSpaceSize;
120
+ }
121
+
122
+ const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx) /* hidden interface */
123
+ {
124
+ return &(ctx->seqStore);
125
+ }
126
+
127
+ static ZSTD_parameters ZSTD_getParamsFromCCtx(const ZSTD_CCtx* cctx)
128
+ {
129
+ return cctx->params;
130
+ }
131
+
132
+
133
+ /** ZSTD_checkParams() :
134
+ ensure param values remain within authorized range.
135
+ @return : 0, or an error code if one value is beyond authorized range */
136
+ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
137
+ {
138
+ # define CLAMPCHECK(val,min,max) { if ((val<min) | (val>max)) return ERROR(compressionParameter_unsupported); }
139
+ CLAMPCHECK(cParams.windowLog, ZSTD_WINDOWLOG_MIN, ZSTD_WINDOWLOG_MAX);
140
+ CLAMPCHECK(cParams.chainLog, ZSTD_CHAINLOG_MIN, ZSTD_CHAINLOG_MAX);
141
+ CLAMPCHECK(cParams.hashLog, ZSTD_HASHLOG_MIN, ZSTD_HASHLOG_MAX);
142
+ CLAMPCHECK(cParams.searchLog, ZSTD_SEARCHLOG_MIN, ZSTD_SEARCHLOG_MAX);
143
+ { U32 const searchLengthMin = ((cParams.strategy == ZSTD_fast) | (cParams.strategy == ZSTD_greedy)) ? ZSTD_SEARCHLENGTH_MIN+1 : ZSTD_SEARCHLENGTH_MIN;
144
+ U32 const searchLengthMax = (cParams.strategy == ZSTD_fast) ? ZSTD_SEARCHLENGTH_MAX : ZSTD_SEARCHLENGTH_MAX-1;
145
+ CLAMPCHECK(cParams.searchLength, searchLengthMin, searchLengthMax); }
146
+ CLAMPCHECK(cParams.targetLength, ZSTD_TARGETLENGTH_MIN, ZSTD_TARGETLENGTH_MAX);
147
+ if ((U32)(cParams.strategy) > (U32)ZSTD_btopt2) return ERROR(compressionParameter_unsupported);
148
+ return 0;
149
+ }
150
+
151
+
152
+ /** ZSTD_cycleLog() :
153
+ * condition for correct operation : hashLog > 1 */
154
+ static U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat)
155
+ {
156
+ U32 const btScale = ((U32)strat >= (U32)ZSTD_btlazy2);
157
+ return hashLog - btScale;
158
+ }
159
+
160
+ /** ZSTD_adjustCParams() :
161
+ optimize `cPar` for a given input (`srcSize` and `dictSize`).
162
+ mostly downsizing to reduce memory consumption and initialization.
163
+ Both `srcSize` and `dictSize` are optional (use 0 if unknown),
164
+ but if both are 0, no optimization can be done.
165
+ Note : cPar is considered validated at this stage. Use ZSTD_checkParams() to ensure that. */
166
+ ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize)
167
+ {
168
+ if (srcSize+dictSize == 0) return cPar; /* no size information available : no adjustment */
169
+
170
+ /* resize params, to use less memory when necessary */
171
+ { U32 const minSrcSize = (srcSize==0) ? 500 : 0;
172
+ U64 const rSize = srcSize + dictSize + minSrcSize;
173
+ if (rSize < ((U64)1<<ZSTD_WINDOWLOG_MAX)) {
174
+ U32 const srcLog = MAX(ZSTD_HASHLOG_MIN, ZSTD_highbit32((U32)(rSize)-1) + 1);
175
+ if (cPar.windowLog > srcLog) cPar.windowLog = srcLog;
176
+ } }
177
+ if (cPar.hashLog > cPar.windowLog) cPar.hashLog = cPar.windowLog;
178
+ { U32 const cycleLog = ZSTD_cycleLog(cPar.chainLog, cPar.strategy);
179
+ if (cycleLog > cPar.windowLog) cPar.chainLog -= (cycleLog - cPar.windowLog);
180
+ }
181
+
182
+ if (cPar.windowLog < ZSTD_WINDOWLOG_ABSOLUTEMIN) cPar.windowLog = ZSTD_WINDOWLOG_ABSOLUTEMIN; /* required for frame header */
183
+
184
+ return cPar;
185
+ }
186
+
187
+
188
+ size_t ZSTD_estimateCCtxSize(ZSTD_compressionParameters cParams)
189
+ {
190
+ size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << cParams.windowLog);
191
+ U32 const divider = (cParams.searchLength==3) ? 3 : 4;
192
+ size_t const maxNbSeq = blockSize / divider;
193
+ size_t const tokenSpace = blockSize + 11*maxNbSeq;
194
+
195
+ size_t const chainSize = (cParams.strategy == ZSTD_fast) ? 0 : (1 << cParams.chainLog);
196
+ size_t const hSize = ((size_t)1) << cParams.hashLog;
197
+ U32 const hashLog3 = (cParams.searchLength>3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, cParams.windowLog);
198
+ size_t const h3Size = ((size_t)1) << hashLog3;
199
+ size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
200
+
201
+ size_t const optSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits))*sizeof(U32)
202
+ + (ZSTD_OPT_NUM+1)*(sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
203
+ size_t const neededSpace = tableSpace + (256*sizeof(U32)) /* huffTable */ + tokenSpace
204
+ + (((cParams.strategy == ZSTD_btopt) || (cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
205
+
206
+ return sizeof(ZSTD_CCtx) + neededSpace;
207
+ }
208
+
209
+
210
+ static U32 ZSTD_equivalentParams(ZSTD_parameters param1, ZSTD_parameters param2)
211
+ {
212
+ return (param1.cParams.hashLog == param2.cParams.hashLog)
213
+ & (param1.cParams.chainLog == param2.cParams.chainLog)
214
+ & (param1.cParams.strategy == param2.cParams.strategy)
215
+ & ((param1.cParams.searchLength==3) == (param2.cParams.searchLength==3));
216
+ }
217
+
218
+ /*! ZSTD_continueCCtx() :
219
+ reuse CCtx without reset (note : requires no dictionary) */
220
+ static size_t ZSTD_continueCCtx(ZSTD_CCtx* cctx, ZSTD_parameters params, U64 frameContentSize)
221
+ {
222
+ U32 const end = (U32)(cctx->nextSrc - cctx->base);
223
+ cctx->params = params;
224
+ cctx->frameContentSize = frameContentSize;
225
+ cctx->lowLimit = end;
226
+ cctx->dictLimit = end;
227
+ cctx->nextToUpdate = end+1;
228
+ cctx->stage = ZSTDcs_init;
229
+ cctx->dictID = 0;
230
+ cctx->loadedDictEnd = 0;
231
+ { int i; for (i=0; i<ZSTD_REP_NUM; i++) cctx->rep[i] = repStartValue[i]; }
232
+ cctx->seqStore.litLengthSum = 0; /* force reset of btopt stats */
233
+ XXH64_reset(&cctx->xxhState, 0);
234
+ return 0;
235
+ }
236
+
237
+ typedef enum { ZSTDcrp_continue, ZSTDcrp_noMemset, ZSTDcrp_fullReset } ZSTD_compResetPolicy_e;
238
+
239
+ /*! ZSTD_resetCCtx_advanced() :
240
+ note : 'params' must be validated */
241
+ static size_t ZSTD_resetCCtx_advanced (ZSTD_CCtx* zc,
242
+ ZSTD_parameters params, U64 frameContentSize,
243
+ ZSTD_compResetPolicy_e const crp)
244
+ {
245
+ if (crp == ZSTDcrp_continue)
246
+ if (ZSTD_equivalentParams(params, zc->params))
247
+ return ZSTD_continueCCtx(zc, params, frameContentSize);
248
+
249
+ { size_t const blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, (size_t)1 << params.cParams.windowLog);
250
+ U32 const divider = (params.cParams.searchLength==3) ? 3 : 4;
251
+ size_t const maxNbSeq = blockSize / divider;
252
+ size_t const tokenSpace = blockSize + 11*maxNbSeq;
253
+ size_t const chainSize = (params.cParams.strategy == ZSTD_fast) ? 0 : (1 << params.cParams.chainLog);
254
+ size_t const hSize = ((size_t)1) << params.cParams.hashLog;
255
+ U32 const hashLog3 = (params.cParams.searchLength>3) ? 0 : MIN(ZSTD_HASHLOG3_MAX, params.cParams.windowLog);
256
+ size_t const h3Size = ((size_t)1) << hashLog3;
257
+ size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
258
+ void* ptr;
259
+
260
+ /* Check if workSpace is large enough, alloc a new one if needed */
261
+ { size_t const optSpace = ((MaxML+1) + (MaxLL+1) + (MaxOff+1) + (1<<Litbits))*sizeof(U32)
262
+ + (ZSTD_OPT_NUM+1)*(sizeof(ZSTD_match_t) + sizeof(ZSTD_optimal_t));
263
+ size_t const neededSpace = tableSpace + (256*sizeof(U32)) /* huffTable */ + tokenSpace
264
+ + (((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) ? optSpace : 0);
265
+ if (zc->workSpaceSize < neededSpace) {
266
+ ZSTD_free(zc->workSpace, zc->customMem);
267
+ zc->workSpace = ZSTD_malloc(neededSpace, zc->customMem);
268
+ if (zc->workSpace == NULL) return ERROR(memory_allocation);
269
+ zc->workSpaceSize = neededSpace;
270
+ } }
271
+
272
+ if (crp!=ZSTDcrp_noMemset) memset(zc->workSpace, 0, tableSpace); /* reset tables only */
273
+ XXH64_reset(&zc->xxhState, 0);
274
+ zc->hashLog3 = hashLog3;
275
+ zc->hashTable = (U32*)(zc->workSpace);
276
+ zc->chainTable = zc->hashTable + hSize;
277
+ zc->hashTable3 = zc->chainTable + chainSize;
278
+ ptr = zc->hashTable3 + h3Size;
279
+ zc->hufTable = (HUF_CElt*)ptr;
280
+ zc->flagStaticTables = 0;
281
+ ptr = ((U32*)ptr) + 256; /* note : HUF_CElt* is incomplete type, size is simulated using U32 */
282
+
283
+ zc->nextToUpdate = 1;
284
+ zc->nextSrc = NULL;
285
+ zc->base = NULL;
286
+ zc->dictBase = NULL;
287
+ zc->dictLimit = 0;
288
+ zc->lowLimit = 0;
289
+ zc->params = params;
290
+ zc->blockSize = blockSize;
291
+ zc->frameContentSize = frameContentSize;
292
+ { int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->rep[i] = repStartValue[i]; }
293
+
294
+ if ((params.cParams.strategy == ZSTD_btopt) || (params.cParams.strategy == ZSTD_btopt2)) {
295
+ zc->seqStore.litFreq = (U32*)ptr;
296
+ zc->seqStore.litLengthFreq = zc->seqStore.litFreq + (1<<Litbits);
297
+ zc->seqStore.matchLengthFreq = zc->seqStore.litLengthFreq + (MaxLL+1);
298
+ zc->seqStore.offCodeFreq = zc->seqStore.matchLengthFreq + (MaxML+1);
299
+ ptr = zc->seqStore.offCodeFreq + (MaxOff+1);
300
+ zc->seqStore.matchTable = (ZSTD_match_t*)ptr;
301
+ ptr = zc->seqStore.matchTable + ZSTD_OPT_NUM+1;
302
+ zc->seqStore.priceTable = (ZSTD_optimal_t*)ptr;
303
+ ptr = zc->seqStore.priceTable + ZSTD_OPT_NUM+1;
304
+ zc->seqStore.litLengthSum = 0;
305
+ }
306
+ zc->seqStore.sequencesStart = (seqDef*)ptr;
307
+ ptr = zc->seqStore.sequencesStart + maxNbSeq;
308
+ zc->seqStore.llCode = (BYTE*) ptr;
309
+ zc->seqStore.mlCode = zc->seqStore.llCode + maxNbSeq;
310
+ zc->seqStore.ofCode = zc->seqStore.mlCode + maxNbSeq;
311
+ zc->seqStore.litStart = zc->seqStore.ofCode + maxNbSeq;
312
+
313
+ zc->stage = ZSTDcs_init;
314
+ zc->dictID = 0;
315
+ zc->loadedDictEnd = 0;
316
+
317
+ return 0;
318
+ }
319
+ }
320
+
321
+
322
+ /*! ZSTD_copyCCtx() :
323
+ * Duplicate an existing context `srcCCtx` into another one `dstCCtx`.
324
+ * Only works during stage ZSTDcs_init (i.e. after creation, but before first call to ZSTD_compressContinue()).
325
+ * @return : 0, or an error code */
326
+ size_t ZSTD_copyCCtx(ZSTD_CCtx* dstCCtx, const ZSTD_CCtx* srcCCtx, unsigned long long pledgedSrcSize)
327
+ {
328
+ if (srcCCtx->stage!=ZSTDcs_init) return ERROR(stage_wrong);
329
+
330
+ memcpy(&dstCCtx->customMem, &srcCCtx->customMem, sizeof(ZSTD_customMem));
331
+ ZSTD_resetCCtx_advanced(dstCCtx, srcCCtx->params, pledgedSrcSize, ZSTDcrp_noMemset);
332
+
333
+ /* copy tables */
334
+ { size_t const chainSize = (srcCCtx->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << srcCCtx->params.cParams.chainLog);
335
+ size_t const hSize = ((size_t)1) << srcCCtx->params.cParams.hashLog;
336
+ size_t const h3Size = (size_t)1 << srcCCtx->hashLog3;
337
+ size_t const tableSpace = (chainSize + hSize + h3Size) * sizeof(U32);
338
+ memcpy(dstCCtx->workSpace, srcCCtx->workSpace, tableSpace);
339
+ }
340
+
341
+ /* copy dictionary offsets */
342
+ dstCCtx->nextToUpdate = srcCCtx->nextToUpdate;
343
+ dstCCtx->nextToUpdate3= srcCCtx->nextToUpdate3;
344
+ dstCCtx->nextSrc = srcCCtx->nextSrc;
345
+ dstCCtx->base = srcCCtx->base;
346
+ dstCCtx->dictBase = srcCCtx->dictBase;
347
+ dstCCtx->dictLimit = srcCCtx->dictLimit;
348
+ dstCCtx->lowLimit = srcCCtx->lowLimit;
349
+ dstCCtx->loadedDictEnd= srcCCtx->loadedDictEnd;
350
+ dstCCtx->dictID = srcCCtx->dictID;
351
+
352
+ /* copy entropy tables */
353
+ dstCCtx->flagStaticTables = srcCCtx->flagStaticTables;
354
+ if (srcCCtx->flagStaticTables) {
355
+ memcpy(dstCCtx->hufTable, srcCCtx->hufTable, 256*4);
356
+ memcpy(dstCCtx->litlengthCTable, srcCCtx->litlengthCTable, sizeof(dstCCtx->litlengthCTable));
357
+ memcpy(dstCCtx->matchlengthCTable, srcCCtx->matchlengthCTable, sizeof(dstCCtx->matchlengthCTable));
358
+ memcpy(dstCCtx->offcodeCTable, srcCCtx->offcodeCTable, sizeof(dstCCtx->offcodeCTable));
359
+ }
360
+
361
+ return 0;
362
+ }
363
+
364
+
365
+ /*! ZSTD_reduceTable() :
366
+ * reduce table indexes by `reducerValue` */
367
+ static void ZSTD_reduceTable (U32* const table, U32 const size, U32 const reducerValue)
368
+ {
369
+ U32 u;
370
+ for (u=0 ; u < size ; u++) {
371
+ if (table[u] < reducerValue) table[u] = 0;
372
+ else table[u] -= reducerValue;
373
+ }
374
+ }
375
+
376
+ /*! ZSTD_reduceIndex() :
377
+ * rescale all indexes to avoid future overflow (indexes are U32) */
378
+ static void ZSTD_reduceIndex (ZSTD_CCtx* zc, const U32 reducerValue)
379
+ {
380
+ { U32 const hSize = 1 << zc->params.cParams.hashLog;
381
+ ZSTD_reduceTable(zc->hashTable, hSize, reducerValue); }
382
+
383
+ { U32 const chainSize = (zc->params.cParams.strategy == ZSTD_fast) ? 0 : (1 << zc->params.cParams.chainLog);
384
+ ZSTD_reduceTable(zc->chainTable, chainSize, reducerValue); }
385
+
386
+ { U32 const h3Size = (zc->hashLog3) ? 1 << zc->hashLog3 : 0;
387
+ ZSTD_reduceTable(zc->hashTable3, h3Size, reducerValue); }
388
+ }
389
+
390
+
391
+ /*-*******************************************************
392
+ * Block entropic compression
393
+ *********************************************************/
394
+
395
+ /* See doc/zstd_compression_format.md for detailed format description */
396
+
397
+ size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
398
+ {
399
+ if (srcSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
400
+ memcpy((BYTE*)dst + ZSTD_blockHeaderSize, src, srcSize);
401
+ MEM_writeLE24(dst, (U32)(srcSize << 2) + (U32)bt_raw);
402
+ return ZSTD_blockHeaderSize+srcSize;
403
+ }
404
+
405
+
406
+ static size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
407
+ {
408
+ BYTE* const ostart = (BYTE* const)dst;
409
+ U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
410
+
411
+ if (srcSize + flSize > dstCapacity) return ERROR(dstSize_tooSmall);
412
+
413
+ switch(flSize)
414
+ {
415
+ case 1: /* 2 - 1 - 5 */
416
+ ostart[0] = (BYTE)((U32)set_basic + (srcSize<<3));
417
+ break;
418
+ case 2: /* 2 - 2 - 12 */
419
+ MEM_writeLE16(ostart, (U16)((U32)set_basic + (1<<2) + (srcSize<<4)));
420
+ break;
421
+ default: /*note : should not be necessary : flSize is within {1,2,3} */
422
+ case 3: /* 2 - 2 - 20 */
423
+ MEM_writeLE32(ostart, (U32)((U32)set_basic + (3<<2) + (srcSize<<4)));
424
+ break;
425
+ }
426
+
427
+ memcpy(ostart + flSize, src, srcSize);
428
+ return srcSize + flSize;
429
+ }
430
+
431
+ static size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
432
+ {
433
+ BYTE* const ostart = (BYTE* const)dst;
434
+ U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
435
+
436
+ (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */
437
+
438
+ switch(flSize)
439
+ {
440
+ case 1: /* 2 - 1 - 5 */
441
+ ostart[0] = (BYTE)((U32)set_rle + (srcSize<<3));
442
+ break;
443
+ case 2: /* 2 - 2 - 12 */
444
+ MEM_writeLE16(ostart, (U16)((U32)set_rle + (1<<2) + (srcSize<<4)));
445
+ break;
446
+ default: /*note : should not be necessary : flSize is necessarily within {1,2,3} */
447
+ case 3: /* 2 - 2 - 20 */
448
+ MEM_writeLE32(ostart, (U32)((U32)set_rle + (3<<2) + (srcSize<<4)));
449
+ break;
450
+ }
451
+
452
+ ostart[flSize] = *(const BYTE*)src;
453
+ return flSize+1;
454
+ }
455
+
456
+
457
+ static size_t ZSTD_minGain(size_t srcSize) { return (srcSize >> 6) + 2; }
458
+
459
+ static size_t ZSTD_compressLiterals (ZSTD_CCtx* zc,
460
+ void* dst, size_t dstCapacity,
461
+ const void* src, size_t srcSize)
462
+ {
463
+ size_t const minGain = ZSTD_minGain(srcSize);
464
+ size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
465
+ BYTE* const ostart = (BYTE*)dst;
466
+ U32 singleStream = srcSize < 256;
467
+ symbolEncodingType_e hType = set_compressed;
468
+ size_t cLitSize;
469
+
470
+
471
+ /* small ? don't even attempt compression (speed opt) */
472
+ # define LITERAL_NOENTROPY 63
473
+ { size_t const minLitSize = zc->flagStaticTables ? 6 : LITERAL_NOENTROPY;
474
+ if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
475
+ }
476
+
477
+ if (dstCapacity < lhSize+1) return ERROR(dstSize_tooSmall); /* not enough space for compression */
478
+ if (zc->flagStaticTables && (lhSize==3)) {
479
+ hType = set_repeat;
480
+ singleStream = 1;
481
+ cLitSize = HUF_compress1X_usingCTable(ostart+lhSize, dstCapacity-lhSize, src, srcSize, zc->hufTable);
482
+ } else {
483
+ cLitSize = singleStream ? HUF_compress1X_wksp(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, zc->tmpCounters, sizeof(zc->tmpCounters))
484
+ : HUF_compress4X_wksp(ostart+lhSize, dstCapacity-lhSize, src, srcSize, 255, 11, zc->tmpCounters, sizeof(zc->tmpCounters));
485
+ }
486
+
487
+ if ((cLitSize==0) | (cLitSize >= srcSize - minGain))
488
+ return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
489
+ if (cLitSize==1)
490
+ return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
491
+
492
+ /* Build header */
493
+ switch(lhSize)
494
+ {
495
+ case 3: /* 2 - 2 - 10 - 10 */
496
+ { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
497
+ MEM_writeLE24(ostart, lhc);
498
+ break;
499
+ }
500
+ case 4: /* 2 - 2 - 14 - 14 */
501
+ { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
502
+ MEM_writeLE32(ostart, lhc);
503
+ break;
504
+ }
505
+ default: /* should not be necessary, lhSize is only {3,4,5} */
506
+ case 5: /* 2 - 2 - 18 - 18 */
507
+ { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
508
+ MEM_writeLE32(ostart, lhc);
509
+ ostart[4] = (BYTE)(cLitSize >> 10);
510
+ break;
511
+ }
512
+ }
513
+ return lhSize+cLitSize;
514
+ }
515
+
516
+ static const BYTE LL_Code[64] = { 0, 1, 2, 3, 4, 5, 6, 7,
517
+ 8, 9, 10, 11, 12, 13, 14, 15,
518
+ 16, 16, 17, 17, 18, 18, 19, 19,
519
+ 20, 20, 20, 20, 21, 21, 21, 21,
520
+ 22, 22, 22, 22, 22, 22, 22, 22,
521
+ 23, 23, 23, 23, 23, 23, 23, 23,
522
+ 24, 24, 24, 24, 24, 24, 24, 24,
523
+ 24, 24, 24, 24, 24, 24, 24, 24 };
524
+
525
+ static const BYTE ML_Code[128] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
526
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
527
+ 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37,
528
+ 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39,
529
+ 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
530
+ 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
531
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
532
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42 };
533
+
534
+
535
+ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
536
+ {
537
+ BYTE const LL_deltaCode = 19;
538
+ BYTE const ML_deltaCode = 36;
539
+ const seqDef* const sequences = seqStorePtr->sequencesStart;
540
+ BYTE* const llCodeTable = seqStorePtr->llCode;
541
+ BYTE* const ofCodeTable = seqStorePtr->ofCode;
542
+ BYTE* const mlCodeTable = seqStorePtr->mlCode;
543
+ U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
544
+ U32 u;
545
+ for (u=0; u<nbSeq; u++) {
546
+ U32 const llv = sequences[u].litLength;
547
+ U32 const mlv = sequences[u].matchLength;
548
+ llCodeTable[u] = (llv> 63) ? (BYTE)ZSTD_highbit32(llv) + LL_deltaCode : LL_Code[llv];
549
+ ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
550
+ mlCodeTable[u] = (mlv>127) ? (BYTE)ZSTD_highbit32(mlv) + ML_deltaCode : ML_Code[mlv];
551
+ }
552
+ if (seqStorePtr->longLengthID==1)
553
+ llCodeTable[seqStorePtr->longLengthPos] = MaxLL;
554
+ if (seqStorePtr->longLengthID==2)
555
+ mlCodeTable[seqStorePtr->longLengthPos] = MaxML;
556
+ }
557
+
558
+
559
+ size_t ZSTD_compressSequences(ZSTD_CCtx* zc,
560
+ void* dst, size_t dstCapacity,
561
+ size_t srcSize)
562
+ {
563
+ const seqStore_t* seqStorePtr = &(zc->seqStore);
564
+ U32 count[MaxSeq+1];
565
+ S16 norm[MaxSeq+1];
566
+ FSE_CTable* CTable_LitLength = zc->litlengthCTable;
567
+ FSE_CTable* CTable_OffsetBits = zc->offcodeCTable;
568
+ FSE_CTable* CTable_MatchLength = zc->matchlengthCTable;
569
+ U32 LLtype, Offtype, MLtype; /* compressed, raw or rle */
570
+ const seqDef* const sequences = seqStorePtr->sequencesStart;
571
+ const BYTE* const ofCodeTable = seqStorePtr->ofCode;
572
+ const BYTE* const llCodeTable = seqStorePtr->llCode;
573
+ const BYTE* const mlCodeTable = seqStorePtr->mlCode;
574
+ BYTE* const ostart = (BYTE*)dst;
575
+ BYTE* const oend = ostart + dstCapacity;
576
+ BYTE* op = ostart;
577
+ size_t const nbSeq = seqStorePtr->sequences - seqStorePtr->sequencesStart;
578
+ BYTE* seqHead;
579
+ BYTE scratchBuffer[1<<MAX(MLFSELog,LLFSELog)];
580
+
581
+ /* Compress literals */
582
+ { const BYTE* const literals = seqStorePtr->litStart;
583
+ size_t const litSize = seqStorePtr->lit - literals;
584
+ size_t const cSize = ZSTD_compressLiterals(zc, op, dstCapacity, literals, litSize);
585
+ if (ZSTD_isError(cSize)) return cSize;
586
+ op += cSize;
587
+ }
588
+
589
+ /* Sequences Header */
590
+ if ((oend-op) < 3 /*max nbSeq Size*/ + 1 /*seqHead */) return ERROR(dstSize_tooSmall);
591
+ if (nbSeq < 0x7F) *op++ = (BYTE)nbSeq;
592
+ else if (nbSeq < LONGNBSEQ) op[0] = (BYTE)((nbSeq>>8) + 0x80), op[1] = (BYTE)nbSeq, op+=2;
593
+ else op[0]=0xFF, MEM_writeLE16(op+1, (U16)(nbSeq - LONGNBSEQ)), op+=3;
594
+ if (nbSeq==0) goto _check_compressibility;
595
+
596
+ /* seqHead : flags for FSE encoding type */
597
+ seqHead = op++;
598
+
599
+ #define MIN_SEQ_FOR_DYNAMIC_FSE 64
600
+ #define MAX_SEQ_FOR_STATIC_FSE 1000
601
+
602
+ /* convert length/distances into codes */
603
+ ZSTD_seqToCodes(seqStorePtr);
604
+
605
+ /* CTable for Literal Lengths */
606
+ { U32 max = MaxLL;
607
+ size_t const mostFrequent = FSE_countFast_wksp(count, &max, llCodeTable, nbSeq, zc->tmpCounters);
608
+ if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
609
+ *op++ = llCodeTable[0];
610
+ FSE_buildCTable_rle(CTable_LitLength, (BYTE)max);
611
+ LLtype = set_rle;
612
+ } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
613
+ LLtype = set_repeat;
614
+ } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (LL_defaultNormLog-1)))) {
615
+ FSE_buildCTable_wksp(CTable_LitLength, LL_defaultNorm, MaxLL, LL_defaultNormLog, scratchBuffer, sizeof(scratchBuffer));
616
+ LLtype = set_basic;
617
+ } else {
618
+ size_t nbSeq_1 = nbSeq;
619
+ const U32 tableLog = FSE_optimalTableLog(LLFSELog, nbSeq, max);
620
+ if (count[llCodeTable[nbSeq-1]]>1) { count[llCodeTable[nbSeq-1]]--; nbSeq_1--; }
621
+ FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
622
+ { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */
623
+ if (FSE_isError(NCountSize)) return ERROR(GENERIC);
624
+ op += NCountSize; }
625
+ FSE_buildCTable_wksp(CTable_LitLength, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer));
626
+ LLtype = set_compressed;
627
+ } }
628
+
629
+ /* CTable for Offsets */
630
+ { U32 max = MaxOff;
631
+ size_t const mostFrequent = FSE_countFast_wksp(count, &max, ofCodeTable, nbSeq, zc->tmpCounters);
632
+ if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
633
+ *op++ = ofCodeTable[0];
634
+ FSE_buildCTable_rle(CTable_OffsetBits, (BYTE)max);
635
+ Offtype = set_rle;
636
+ } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
637
+ Offtype = set_repeat;
638
+ } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (OF_defaultNormLog-1)))) {
639
+ FSE_buildCTable_wksp(CTable_OffsetBits, OF_defaultNorm, MaxOff, OF_defaultNormLog, scratchBuffer, sizeof(scratchBuffer));
640
+ Offtype = set_basic;
641
+ } else {
642
+ size_t nbSeq_1 = nbSeq;
643
+ const U32 tableLog = FSE_optimalTableLog(OffFSELog, nbSeq, max);
644
+ if (count[ofCodeTable[nbSeq-1]]>1) { count[ofCodeTable[nbSeq-1]]--; nbSeq_1--; }
645
+ FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
646
+ { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */
647
+ if (FSE_isError(NCountSize)) return ERROR(GENERIC);
648
+ op += NCountSize; }
649
+ FSE_buildCTable_wksp(CTable_OffsetBits, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer));
650
+ Offtype = set_compressed;
651
+ } }
652
+
653
+ /* CTable for MatchLengths */
654
+ { U32 max = MaxML;
655
+ size_t const mostFrequent = FSE_countFast_wksp(count, &max, mlCodeTable, nbSeq, zc->tmpCounters);
656
+ if ((mostFrequent == nbSeq) && (nbSeq > 2)) {
657
+ *op++ = *mlCodeTable;
658
+ FSE_buildCTable_rle(CTable_MatchLength, (BYTE)max);
659
+ MLtype = set_rle;
660
+ } else if ((zc->flagStaticTables) && (nbSeq < MAX_SEQ_FOR_STATIC_FSE)) {
661
+ MLtype = set_repeat;
662
+ } else if ((nbSeq < MIN_SEQ_FOR_DYNAMIC_FSE) || (mostFrequent < (nbSeq >> (ML_defaultNormLog-1)))) {
663
+ FSE_buildCTable_wksp(CTable_MatchLength, ML_defaultNorm, MaxML, ML_defaultNormLog, scratchBuffer, sizeof(scratchBuffer));
664
+ MLtype = set_basic;
665
+ } else {
666
+ size_t nbSeq_1 = nbSeq;
667
+ const U32 tableLog = FSE_optimalTableLog(MLFSELog, nbSeq, max);
668
+ if (count[mlCodeTable[nbSeq-1]]>1) { count[mlCodeTable[nbSeq-1]]--; nbSeq_1--; }
669
+ FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max);
670
+ { size_t const NCountSize = FSE_writeNCount(op, oend-op, norm, max, tableLog); /* overflow protected */
671
+ if (FSE_isError(NCountSize)) return ERROR(GENERIC);
672
+ op += NCountSize; }
673
+ FSE_buildCTable_wksp(CTable_MatchLength, norm, max, tableLog, scratchBuffer, sizeof(scratchBuffer));
674
+ MLtype = set_compressed;
675
+ } }
676
+
677
+ *seqHead = (BYTE)((LLtype<<6) + (Offtype<<4) + (MLtype<<2));
678
+ zc->flagStaticTables = 0;
679
+
680
+ /* Encoding Sequences */
681
+ { BIT_CStream_t blockStream;
682
+ FSE_CState_t stateMatchLength;
683
+ FSE_CState_t stateOffsetBits;
684
+ FSE_CState_t stateLitLength;
685
+
686
+ CHECK_E(BIT_initCStream(&blockStream, op, oend-op), dstSize_tooSmall); /* not enough space remaining */
687
+
688
+ /* first symbols */
689
+ FSE_initCState2(&stateMatchLength, CTable_MatchLength, mlCodeTable[nbSeq-1]);
690
+ FSE_initCState2(&stateOffsetBits, CTable_OffsetBits, ofCodeTable[nbSeq-1]);
691
+ FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]);
692
+ BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
693
+ if (MEM_32bits()) BIT_flushBits(&blockStream);
694
+ BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
695
+ if (MEM_32bits()) BIT_flushBits(&blockStream);
696
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
697
+ BIT_flushBits(&blockStream);
698
+
699
+ { size_t n;
700
+ for (n=nbSeq-2 ; n<nbSeq ; n--) { /* intentional underflow */
701
+ BYTE const llCode = llCodeTable[n];
702
+ BYTE const ofCode = ofCodeTable[n];
703
+ BYTE const mlCode = mlCodeTable[n];
704
+ U32 const llBits = LL_bits[llCode];
705
+ U32 const ofBits = ofCode; /* 32b*/ /* 64b*/
706
+ U32 const mlBits = ML_bits[mlCode];
707
+ /* (7)*/ /* (7)*/
708
+ FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
709
+ FSE_encodeSymbol(&blockStream, &stateMatchLength, mlCode); /* 24 */ /* 24 */
710
+ if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/
711
+ FSE_encodeSymbol(&blockStream, &stateLitLength, llCode); /* 16 */ /* 33 */
712
+ if (MEM_32bits() || (ofBits+mlBits+llBits >= 64-7-(LLFSELog+MLFSELog+OffFSELog)))
713
+ BIT_flushBits(&blockStream); /* (7)*/
714
+ BIT_addBits(&blockStream, sequences[n].litLength, llBits);
715
+ if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
716
+ BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
717
+ if (MEM_32bits()) BIT_flushBits(&blockStream); /* (7)*/
718
+ BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */
719
+ BIT_flushBits(&blockStream); /* (7)*/
720
+ } }
721
+
722
+ FSE_flushCState(&blockStream, &stateMatchLength);
723
+ FSE_flushCState(&blockStream, &stateOffsetBits);
724
+ FSE_flushCState(&blockStream, &stateLitLength);
725
+
726
+ { size_t const streamSize = BIT_closeCStream(&blockStream);
727
+ if (streamSize==0) return ERROR(dstSize_tooSmall); /* not enough space */
728
+ op += streamSize;
729
+ } }
730
+
731
+ /* check compressibility */
732
+ _check_compressibility:
733
+ { size_t const minGain = ZSTD_minGain(srcSize);
734
+ size_t const maxCSize = srcSize - minGain;
735
+ if ((size_t)(op-ostart) >= maxCSize) return 0; }
736
+
737
+ /* confirm repcodes */
738
+ { int i; for (i=0; i<ZSTD_REP_NUM; i++) zc->rep[i] = zc->savedRep[i]; }
739
+
740
+ return op - ostart;
741
+ }
742
+
743
+
744
+ /*! ZSTD_storeSeq() :
745
+ Store a sequence (literal length, literals, offset code and match length code) into seqStore_t.
746
+ `offsetCode` : distance to match, or 0 == repCode.
747
+ `matchCode` : matchLength - MINMATCH
748
+ */
749
+ MEM_STATIC void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const void* literals, U32 offsetCode, size_t matchCode)
750
+ {
751
+ #if 0 /* for debug */
752
+ static const BYTE* g_start = NULL;
753
+ const U32 pos = (U32)((const BYTE*)literals - g_start);
754
+ if (g_start==NULL) g_start = (const BYTE*)literals;
755
+ //if ((pos > 1) && (pos < 50000))
756
+ printf("Cpos %6u :%5u literals & match %3u bytes at distance %6u \n",
757
+ pos, (U32)litLength, (U32)matchCode+MINMATCH, (U32)offsetCode);
758
+ #endif
759
+ /* copy Literals */
760
+ ZSTD_wildcopy(seqStorePtr->lit, literals, litLength);
761
+ seqStorePtr->lit += litLength;
762
+
763
+ /* literal Length */
764
+ if (litLength>0xFFFF) { seqStorePtr->longLengthID = 1; seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); }
765
+ seqStorePtr->sequences[0].litLength = (U16)litLength;
766
+
767
+ /* match offset */
768
+ seqStorePtr->sequences[0].offset = offsetCode + 1;
769
+
770
+ /* match Length */
771
+ if (matchCode>0xFFFF) { seqStorePtr->longLengthID = 2; seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart); }
772
+ seqStorePtr->sequences[0].matchLength = (U16)matchCode;
773
+
774
+ seqStorePtr->sequences++;
775
+ }
776
+
777
+
778
+ /*-*************************************
779
+ * Match length counter
780
+ ***************************************/
781
+ static unsigned ZSTD_NbCommonBytes (register size_t val)
782
+ {
783
+ if (MEM_isLittleEndian()) {
784
+ if (MEM_64bits()) {
785
+ # if defined(_MSC_VER) && defined(_WIN64)
786
+ unsigned long r = 0;
787
+ _BitScanForward64( &r, (U64)val );
788
+ return (unsigned)(r>>3);
789
+ # elif defined(__GNUC__) && (__GNUC__ >= 3)
790
+ return (__builtin_ctzll((U64)val) >> 3);
791
+ # else
792
+ static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 1, 3, 1, 4, 2, 7, 0, 2, 3, 6, 1, 5, 3, 5, 1, 3, 4, 4, 2, 5, 6, 7, 7, 0, 1, 2, 3, 3, 4, 6, 2, 6, 5, 5, 3, 4, 5, 6, 7, 1, 2, 4, 6, 4, 4, 5, 7, 2, 6, 5, 7, 6, 7, 7 };
793
+ return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
794
+ # endif
795
+ } else { /* 32 bits */
796
+ # if defined(_MSC_VER)
797
+ unsigned long r=0;
798
+ _BitScanForward( &r, (U32)val );
799
+ return (unsigned)(r>>3);
800
+ # elif defined(__GNUC__) && (__GNUC__ >= 3)
801
+ return (__builtin_ctz((U32)val) >> 3);
802
+ # else
803
+ static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0, 3, 2, 2, 1, 3, 2, 0, 1, 3, 3, 1, 2, 2, 2, 2, 0, 3, 1, 2, 0, 1, 0, 1, 1 };
804
+ return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
805
+ # endif
806
+ }
807
+ } else { /* Big Endian CPU */
808
+ if (MEM_64bits()) {
809
+ # if defined(_MSC_VER) && defined(_WIN64)
810
+ unsigned long r = 0;
811
+ _BitScanReverse64( &r, val );
812
+ return (unsigned)(r>>3);
813
+ # elif defined(__GNUC__) && (__GNUC__ >= 3)
814
+ return (__builtin_clzll(val) >> 3);
815
+ # else
816
+ unsigned r;
817
+ const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
818
+ if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
819
+ if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
820
+ r += (!val);
821
+ return r;
822
+ # endif
823
+ } else { /* 32 bits */
824
+ # if defined(_MSC_VER)
825
+ unsigned long r = 0;
826
+ _BitScanReverse( &r, (unsigned long)val );
827
+ return (unsigned)(r>>3);
828
+ # elif defined(__GNUC__) && (__GNUC__ >= 3)
829
+ return (__builtin_clz((U32)val) >> 3);
830
+ # else
831
+ unsigned r;
832
+ if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
833
+ r += (!val);
834
+ return r;
835
+ # endif
836
+ } }
837
+ }
838
+
839
+
840
+ static size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
841
+ {
842
+ const BYTE* const pStart = pIn;
843
+ const BYTE* const pInLoopLimit = pInLimit - (sizeof(size_t)-1);
844
+
845
+ while (pIn < pInLoopLimit) {
846
+ size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);
847
+ if (!diff) { pIn+=sizeof(size_t); pMatch+=sizeof(size_t); continue; }
848
+ pIn += ZSTD_NbCommonBytes(diff);
849
+ return (size_t)(pIn - pStart);
850
+ }
851
+ if (MEM_64bits()) if ((pIn<(pInLimit-3)) && (MEM_read32(pMatch) == MEM_read32(pIn))) { pIn+=4; pMatch+=4; }
852
+ if ((pIn<(pInLimit-1)) && (MEM_read16(pMatch) == MEM_read16(pIn))) { pIn+=2; pMatch+=2; }
853
+ if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
854
+ return (size_t)(pIn - pStart);
855
+ }
856
+
857
+ /** ZSTD_count_2segments() :
858
+ * can count match length with `ip` & `match` in 2 different segments.
859
+ * convention : on reaching mEnd, match count continue starting from iStart
860
+ */
861
+ static size_t ZSTD_count_2segments(const BYTE* ip, const BYTE* match, const BYTE* iEnd, const BYTE* mEnd, const BYTE* iStart)
862
+ {
863
+ const BYTE* const vEnd = MIN( ip + (mEnd - match), iEnd);
864
+ size_t const matchLength = ZSTD_count(ip, match, vEnd);
865
+ if (match + matchLength != mEnd) return matchLength;
866
+ return matchLength + ZSTD_count(ip+matchLength, iStart, iEnd);
867
+ }
868
+
869
+
870
+ /*-*************************************
871
+ * Hashes
872
+ ***************************************/
873
+ static const U32 prime3bytes = 506832829U;
874
+ static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes) >> (32-h) ; }
875
+ MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
876
+
877
+ static const U32 prime4bytes = 2654435761U;
878
+ static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
879
+ static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_read32(ptr), h); }
880
+
881
+ static const U64 prime5bytes = 889523592379ULL;
882
+ static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; }
883
+ static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
884
+
885
+ static const U64 prime6bytes = 227718039650203ULL;
886
+ static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; }
887
+ static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
888
+
889
+ static const U64 prime7bytes = 58295818150454627ULL;
890
+ static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; }
891
+ static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
892
+
893
+ static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
894
+ static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
895
+ static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
896
+
897
+ static size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
898
+ {
899
+ switch(mls)
900
+ {
901
+ default:
902
+ case 4: return ZSTD_hash4Ptr(p, hBits);
903
+ case 5: return ZSTD_hash5Ptr(p, hBits);
904
+ case 6: return ZSTD_hash6Ptr(p, hBits);
905
+ case 7: return ZSTD_hash7Ptr(p, hBits);
906
+ case 8: return ZSTD_hash8Ptr(p, hBits);
907
+ }
908
+ }
909
+
910
+
911
+ /*-*************************************
912
+ * Fast Scan
913
+ ***************************************/
914
+ static void ZSTD_fillHashTable (ZSTD_CCtx* zc, const void* end, const U32 mls)
915
+ {
916
+ U32* const hashTable = zc->hashTable;
917
+ U32 const hBits = zc->params.cParams.hashLog;
918
+ const BYTE* const base = zc->base;
919
+ const BYTE* ip = base + zc->nextToUpdate;
920
+ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
921
+ const size_t fastHashFillStep = 3;
922
+
923
+ while(ip <= iend) {
924
+ hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
925
+ ip += fastHashFillStep;
926
+ }
927
+ }
928
+
929
+
930
+ FORCE_INLINE
931
+ void ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
932
+ const void* src, size_t srcSize,
933
+ const U32 mls)
934
+ {
935
+ U32* const hashTable = cctx->hashTable;
936
+ U32 const hBits = cctx->params.cParams.hashLog;
937
+ seqStore_t* seqStorePtr = &(cctx->seqStore);
938
+ const BYTE* const base = cctx->base;
939
+ const BYTE* const istart = (const BYTE*)src;
940
+ const BYTE* ip = istart;
941
+ const BYTE* anchor = istart;
942
+ const U32 lowestIndex = cctx->dictLimit;
943
+ const BYTE* const lowest = base + lowestIndex;
944
+ const BYTE* const iend = istart + srcSize;
945
+ const BYTE* const ilimit = iend - HASH_READ_SIZE;
946
+ U32 offset_1=cctx->rep[0], offset_2=cctx->rep[1];
947
+ U32 offsetSaved = 0;
948
+
949
+ /* init */
950
+ ip += (ip==lowest);
951
+ { U32 const maxRep = (U32)(ip-lowest);
952
+ if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
953
+ if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
954
+ }
955
+
956
+ /* Main Search Loop */
957
+ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
958
+ size_t mLength;
959
+ size_t const h = ZSTD_hashPtr(ip, hBits, mls);
960
+ U32 const current = (U32)(ip-base);
961
+ U32 const matchIndex = hashTable[h];
962
+ const BYTE* match = base + matchIndex;
963
+ hashTable[h] = current; /* update hash table */
964
+
965
+ if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
966
+ mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
967
+ ip++;
968
+ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
969
+ } else {
970
+ U32 offset;
971
+ if ( (matchIndex <= lowestIndex) || (MEM_read32(match) != MEM_read32(ip)) ) {
972
+ ip += ((ip-anchor) >> g_searchStrength) + 1;
973
+ continue;
974
+ }
975
+ mLength = ZSTD_count(ip+4, match+4, iend) + 4;
976
+ offset = (U32)(ip-match);
977
+ while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
978
+ offset_2 = offset_1;
979
+ offset_1 = offset;
980
+
981
+ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
982
+ }
983
+
984
+ /* match found */
985
+ ip += mLength;
986
+ anchor = ip;
987
+
988
+ if (ip <= ilimit) {
989
+ /* Fill Table */
990
+ hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2; /* here because current+2 could be > iend-8 */
991
+ hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base);
992
+ /* check immediate repcode */
993
+ while ( (ip <= ilimit)
994
+ && ( (offset_2>0)
995
+ & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
996
+ /* store sequence */
997
+ size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
998
+ { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
999
+ hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip-base);
1000
+ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH);
1001
+ ip += rLength;
1002
+ anchor = ip;
1003
+ continue; /* faster when present ... (?) */
1004
+ } } }
1005
+
1006
+ /* save reps for next block */
1007
+ cctx->savedRep[0] = offset_1 ? offset_1 : offsetSaved;
1008
+ cctx->savedRep[1] = offset_2 ? offset_2 : offsetSaved;
1009
+
1010
+ /* Last Literals */
1011
+ { size_t const lastLLSize = iend - anchor;
1012
+ memcpy(seqStorePtr->lit, anchor, lastLLSize);
1013
+ seqStorePtr->lit += lastLLSize;
1014
+ }
1015
+ }
1016
+
1017
+
1018
+ static void ZSTD_compressBlock_fast(ZSTD_CCtx* ctx,
1019
+ const void* src, size_t srcSize)
1020
+ {
1021
+ const U32 mls = ctx->params.cParams.searchLength;
1022
+ switch(mls)
1023
+ {
1024
+ default:
1025
+ case 4 :
1026
+ ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4); return;
1027
+ case 5 :
1028
+ ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5); return;
1029
+ case 6 :
1030
+ ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6); return;
1031
+ case 7 :
1032
+ ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7); return;
1033
+ }
1034
+ }
1035
+
1036
+
1037
+ static void ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
1038
+ const void* src, size_t srcSize,
1039
+ const U32 mls)
1040
+ {
1041
+ U32* hashTable = ctx->hashTable;
1042
+ const U32 hBits = ctx->params.cParams.hashLog;
1043
+ seqStore_t* seqStorePtr = &(ctx->seqStore);
1044
+ const BYTE* const base = ctx->base;
1045
+ const BYTE* const dictBase = ctx->dictBase;
1046
+ const BYTE* const istart = (const BYTE*)src;
1047
+ const BYTE* ip = istart;
1048
+ const BYTE* anchor = istart;
1049
+ const U32 lowestIndex = ctx->lowLimit;
1050
+ const BYTE* const dictStart = dictBase + lowestIndex;
1051
+ const U32 dictLimit = ctx->dictLimit;
1052
+ const BYTE* const lowPrefixPtr = base + dictLimit;
1053
+ const BYTE* const dictEnd = dictBase + dictLimit;
1054
+ const BYTE* const iend = istart + srcSize;
1055
+ const BYTE* const ilimit = iend - 8;
1056
+ U32 offset_1=ctx->rep[0], offset_2=ctx->rep[1];
1057
+
1058
+ /* Search Loop */
1059
+ while (ip < ilimit) { /* < instead of <=, because (ip+1) */
1060
+ const size_t h = ZSTD_hashPtr(ip, hBits, mls);
1061
+ const U32 matchIndex = hashTable[h];
1062
+ const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
1063
+ const BYTE* match = matchBase + matchIndex;
1064
+ const U32 current = (U32)(ip-base);
1065
+ const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */
1066
+ const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
1067
+ const BYTE* repMatch = repBase + repIndex;
1068
+ size_t mLength;
1069
+ hashTable[h] = current; /* update hash table */
1070
+
1071
+ if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
1072
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
1073
+ const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
1074
+ mLength = ZSTD_count_2segments(ip+1+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repMatchEnd, lowPrefixPtr) + EQUAL_READ32;
1075
+ ip++;
1076
+ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
1077
+ } else {
1078
+ if ( (matchIndex < lowestIndex) ||
1079
+ (MEM_read32(match) != MEM_read32(ip)) ) {
1080
+ ip += ((ip-anchor) >> g_searchStrength) + 1;
1081
+ continue;
1082
+ }
1083
+ { const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
1084
+ const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
1085
+ U32 offset;
1086
+ mLength = ZSTD_count_2segments(ip+EQUAL_READ32, match+EQUAL_READ32, iend, matchEnd, lowPrefixPtr) + EQUAL_READ32;
1087
+ while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
1088
+ offset = current - matchIndex;
1089
+ offset_2 = offset_1;
1090
+ offset_1 = offset;
1091
+ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
1092
+ } }
1093
+
1094
+ /* found a match : store it */
1095
+ ip += mLength;
1096
+ anchor = ip;
1097
+
1098
+ if (ip <= ilimit) {
1099
+ /* Fill Table */
1100
+ hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2;
1101
+ hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base);
1102
+ /* check immediate repcode */
1103
+ while (ip <= ilimit) {
1104
+ U32 const current2 = (U32)(ip-base);
1105
+ U32 const repIndex2 = current2 - offset_2;
1106
+ const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
1107
+ if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
1108
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
1109
+ const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
1110
+ size_t repLength2 = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch2+EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
1111
+ U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
1112
+ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
1113
+ hashTable[ZSTD_hashPtr(ip, hBits, mls)] = current2;
1114
+ ip += repLength2;
1115
+ anchor = ip;
1116
+ continue;
1117
+ }
1118
+ break;
1119
+ } } }
1120
+
1121
+ /* save reps for next block */
1122
+ ctx->savedRep[0] = offset_1; ctx->savedRep[1] = offset_2;
1123
+
1124
+ /* Last Literals */
1125
+ { size_t const lastLLSize = iend - anchor;
1126
+ memcpy(seqStorePtr->lit, anchor, lastLLSize);
1127
+ seqStorePtr->lit += lastLLSize;
1128
+ }
1129
+ }
1130
+
1131
+
1132
+ static void ZSTD_compressBlock_fast_extDict(ZSTD_CCtx* ctx,
1133
+ const void* src, size_t srcSize)
1134
+ {
1135
+ U32 const mls = ctx->params.cParams.searchLength;
1136
+ switch(mls)
1137
+ {
1138
+ default:
1139
+ case 4 :
1140
+ ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4); return;
1141
+ case 5 :
1142
+ ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5); return;
1143
+ case 6 :
1144
+ ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6); return;
1145
+ case 7 :
1146
+ ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7); return;
1147
+ }
1148
+ }
1149
+
1150
+
1151
+ /*-*************************************
1152
+ * Double Fast
1153
+ ***************************************/
1154
+ static void ZSTD_fillDoubleHashTable (ZSTD_CCtx* cctx, const void* end, const U32 mls)
1155
+ {
1156
+ U32* const hashLarge = cctx->hashTable;
1157
+ U32 const hBitsL = cctx->params.cParams.hashLog;
1158
+ U32* const hashSmall = cctx->chainTable;
1159
+ U32 const hBitsS = cctx->params.cParams.chainLog;
1160
+ const BYTE* const base = cctx->base;
1161
+ const BYTE* ip = base + cctx->nextToUpdate;
1162
+ const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
1163
+ const size_t fastHashFillStep = 3;
1164
+
1165
+ while(ip <= iend) {
1166
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip - base);
1167
+ hashLarge[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip - base);
1168
+ ip += fastHashFillStep;
1169
+ }
1170
+ }
1171
+
1172
+
1173
+ FORCE_INLINE
1174
+ void ZSTD_compressBlock_doubleFast_generic(ZSTD_CCtx* cctx,
1175
+ const void* src, size_t srcSize,
1176
+ const U32 mls)
1177
+ {
1178
+ U32* const hashLong = cctx->hashTable;
1179
+ const U32 hBitsL = cctx->params.cParams.hashLog;
1180
+ U32* const hashSmall = cctx->chainTable;
1181
+ const U32 hBitsS = cctx->params.cParams.chainLog;
1182
+ seqStore_t* seqStorePtr = &(cctx->seqStore);
1183
+ const BYTE* const base = cctx->base;
1184
+ const BYTE* const istart = (const BYTE*)src;
1185
+ const BYTE* ip = istart;
1186
+ const BYTE* anchor = istart;
1187
+ const U32 lowestIndex = cctx->dictLimit;
1188
+ const BYTE* const lowest = base + lowestIndex;
1189
+ const BYTE* const iend = istart + srcSize;
1190
+ const BYTE* const ilimit = iend - HASH_READ_SIZE;
1191
+ U32 offset_1=cctx->rep[0], offset_2=cctx->rep[1];
1192
+ U32 offsetSaved = 0;
1193
+
1194
+ /* init */
1195
+ ip += (ip==lowest);
1196
+ { U32 const maxRep = (U32)(ip-lowest);
1197
+ if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
1198
+ if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
1199
+ }
1200
+
1201
+ /* Main Search Loop */
1202
+ while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
1203
+ size_t mLength;
1204
+ size_t const h2 = ZSTD_hashPtr(ip, hBitsL, 8);
1205
+ size_t const h = ZSTD_hashPtr(ip, hBitsS, mls);
1206
+ U32 const current = (U32)(ip-base);
1207
+ U32 const matchIndexL = hashLong[h2];
1208
+ U32 const matchIndexS = hashSmall[h];
1209
+ const BYTE* matchLong = base + matchIndexL;
1210
+ const BYTE* match = base + matchIndexS;
1211
+ hashLong[h2] = hashSmall[h] = current; /* update hash tables */
1212
+
1213
+ if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) { /* note : by construction, offset_1 <= current */
1214
+ mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
1215
+ ip++;
1216
+ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
1217
+ } else {
1218
+ U32 offset;
1219
+ if ( (matchIndexL > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip)) ) {
1220
+ mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
1221
+ offset = (U32)(ip-matchLong);
1222
+ while (((ip>anchor) & (matchLong>lowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
1223
+ } else if ( (matchIndexS > lowestIndex) && (MEM_read32(match) == MEM_read32(ip)) ) {
1224
+ size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
1225
+ U32 const matchIndex3 = hashLong[h3];
1226
+ const BYTE* match3 = base + matchIndex3;
1227
+ hashLong[h3] = current + 1;
1228
+ if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
1229
+ mLength = ZSTD_count(ip+9, match3+8, iend) + 8;
1230
+ ip++;
1231
+ offset = (U32)(ip-match3);
1232
+ while (((ip>anchor) & (match3>lowest)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
1233
+ } else {
1234
+ mLength = ZSTD_count(ip+4, match+4, iend) + 4;
1235
+ offset = (U32)(ip-match);
1236
+ while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
1237
+ }
1238
+ } else {
1239
+ ip += ((ip-anchor) >> g_searchStrength) + 1;
1240
+ continue;
1241
+ }
1242
+
1243
+ offset_2 = offset_1;
1244
+ offset_1 = offset;
1245
+
1246
+ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
1247
+ }
1248
+
1249
+ /* match found */
1250
+ ip += mLength;
1251
+ anchor = ip;
1252
+
1253
+ if (ip <= ilimit) {
1254
+ /* Fill Table */
1255
+ hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] =
1256
+ hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2; /* here because current+2 could be > iend-8 */
1257
+ hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] =
1258
+ hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
1259
+
1260
+ /* check immediate repcode */
1261
+ while ( (ip <= ilimit)
1262
+ && ( (offset_2>0)
1263
+ & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
1264
+ /* store sequence */
1265
+ size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
1266
+ { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
1267
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
1268
+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
1269
+ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH);
1270
+ ip += rLength;
1271
+ anchor = ip;
1272
+ continue; /* faster when present ... (?) */
1273
+ } } }
1274
+
1275
+ /* save reps for next block */
1276
+ cctx->savedRep[0] = offset_1 ? offset_1 : offsetSaved;
1277
+ cctx->savedRep[1] = offset_2 ? offset_2 : offsetSaved;
1278
+
1279
+ /* Last Literals */
1280
+ { size_t const lastLLSize = iend - anchor;
1281
+ memcpy(seqStorePtr->lit, anchor, lastLLSize);
1282
+ seqStorePtr->lit += lastLLSize;
1283
+ }
1284
+ }
1285
+
1286
+
1287
+ static void ZSTD_compressBlock_doubleFast(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
1288
+ {
1289
+ const U32 mls = ctx->params.cParams.searchLength;
1290
+ switch(mls)
1291
+ {
1292
+ default:
1293
+ case 4 :
1294
+ ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 4); return;
1295
+ case 5 :
1296
+ ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 5); return;
1297
+ case 6 :
1298
+ ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 6); return;
1299
+ case 7 :
1300
+ ZSTD_compressBlock_doubleFast_generic(ctx, src, srcSize, 7); return;
1301
+ }
1302
+ }
1303
+
1304
+
1305
+ static void ZSTD_compressBlock_doubleFast_extDict_generic(ZSTD_CCtx* ctx,
1306
+ const void* src, size_t srcSize,
1307
+ const U32 mls)
1308
+ {
1309
+ U32* const hashLong = ctx->hashTable;
1310
+ U32 const hBitsL = ctx->params.cParams.hashLog;
1311
+ U32* const hashSmall = ctx->chainTable;
1312
+ U32 const hBitsS = ctx->params.cParams.chainLog;
1313
+ seqStore_t* seqStorePtr = &(ctx->seqStore);
1314
+ const BYTE* const base = ctx->base;
1315
+ const BYTE* const dictBase = ctx->dictBase;
1316
+ const BYTE* const istart = (const BYTE*)src;
1317
+ const BYTE* ip = istart;
1318
+ const BYTE* anchor = istart;
1319
+ const U32 lowestIndex = ctx->lowLimit;
1320
+ const BYTE* const dictStart = dictBase + lowestIndex;
1321
+ const U32 dictLimit = ctx->dictLimit;
1322
+ const BYTE* const lowPrefixPtr = base + dictLimit;
1323
+ const BYTE* const dictEnd = dictBase + dictLimit;
1324
+ const BYTE* const iend = istart + srcSize;
1325
+ const BYTE* const ilimit = iend - 8;
1326
+ U32 offset_1=ctx->rep[0], offset_2=ctx->rep[1];
1327
+
1328
+ /* Search Loop */
1329
+ while (ip < ilimit) { /* < instead of <=, because (ip+1) */
1330
+ const size_t hSmall = ZSTD_hashPtr(ip, hBitsS, mls);
1331
+ const U32 matchIndex = hashSmall[hSmall];
1332
+ const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
1333
+ const BYTE* match = matchBase + matchIndex;
1334
+
1335
+ const size_t hLong = ZSTD_hashPtr(ip, hBitsL, 8);
1336
+ const U32 matchLongIndex = hashLong[hLong];
1337
+ const BYTE* matchLongBase = matchLongIndex < dictLimit ? dictBase : base;
1338
+ const BYTE* matchLong = matchLongBase + matchLongIndex;
1339
+
1340
+ const U32 current = (U32)(ip-base);
1341
+ const U32 repIndex = current + 1 - offset_1; /* offset_1 expected <= current +1 */
1342
+ const BYTE* repBase = repIndex < dictLimit ? dictBase : base;
1343
+ const BYTE* repMatch = repBase + repIndex;
1344
+ size_t mLength;
1345
+ hashSmall[hSmall] = hashLong[hLong] = current; /* update hash table */
1346
+
1347
+ if ( (((U32)((dictLimit-1) - repIndex) >= 3) /* intentional underflow */ & (repIndex > lowestIndex))
1348
+ && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
1349
+ const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
1350
+ mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
1351
+ ip++;
1352
+ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
1353
+ } else {
1354
+ if ((matchLongIndex > lowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
1355
+ const BYTE* matchEnd = matchLongIndex < dictLimit ? dictEnd : iend;
1356
+ const BYTE* lowMatchPtr = matchLongIndex < dictLimit ? dictStart : lowPrefixPtr;
1357
+ U32 offset;
1358
+ mLength = ZSTD_count_2segments(ip+8, matchLong+8, iend, matchEnd, lowPrefixPtr) + 8;
1359
+ offset = current - matchLongIndex;
1360
+ while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
1361
+ offset_2 = offset_1;
1362
+ offset_1 = offset;
1363
+ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
1364
+
1365
+ } else if ((matchIndex > lowestIndex) && (MEM_read32(match) == MEM_read32(ip))) {
1366
+ size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
1367
+ U32 const matchIndex3 = hashLong[h3];
1368
+ const BYTE* const match3Base = matchIndex3 < dictLimit ? dictBase : base;
1369
+ const BYTE* match3 = match3Base + matchIndex3;
1370
+ U32 offset;
1371
+ hashLong[h3] = current + 1;
1372
+ if ( (matchIndex3 > lowestIndex) && (MEM_read64(match3) == MEM_read64(ip+1)) ) {
1373
+ const BYTE* matchEnd = matchIndex3 < dictLimit ? dictEnd : iend;
1374
+ const BYTE* lowMatchPtr = matchIndex3 < dictLimit ? dictStart : lowPrefixPtr;
1375
+ mLength = ZSTD_count_2segments(ip+9, match3+8, iend, matchEnd, lowPrefixPtr) + 8;
1376
+ ip++;
1377
+ offset = current+1 - matchIndex3;
1378
+ while (((ip>anchor) & (match3>lowMatchPtr)) && (ip[-1] == match3[-1])) { ip--; match3--; mLength++; } /* catch up */
1379
+ } else {
1380
+ const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
1381
+ const BYTE* lowMatchPtr = matchIndex < dictLimit ? dictStart : lowPrefixPtr;
1382
+ mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, lowPrefixPtr) + 4;
1383
+ offset = current - matchIndex;
1384
+ while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
1385
+ }
1386
+ offset_2 = offset_1;
1387
+ offset_1 = offset;
1388
+ ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
1389
+
1390
+ } else {
1391
+ ip += ((ip-anchor) >> g_searchStrength) + 1;
1392
+ continue;
1393
+ } }
1394
+
1395
+ /* found a match : store it */
1396
+ ip += mLength;
1397
+ anchor = ip;
1398
+
1399
+ if (ip <= ilimit) {
1400
+ /* Fill Table */
1401
+ hashSmall[ZSTD_hashPtr(base+current+2, hBitsS, mls)] = current+2;
1402
+ hashLong[ZSTD_hashPtr(base+current+2, hBitsL, 8)] = current+2;
1403
+ hashSmall[ZSTD_hashPtr(ip-2, hBitsS, mls)] = (U32)(ip-2-base);
1404
+ hashLong[ZSTD_hashPtr(ip-2, hBitsL, 8)] = (U32)(ip-2-base);
1405
+ /* check immediate repcode */
1406
+ while (ip <= ilimit) {
1407
+ U32 const current2 = (U32)(ip-base);
1408
+ U32 const repIndex2 = current2 - offset_2;
1409
+ const BYTE* repMatch2 = repIndex2 < dictLimit ? dictBase + repIndex2 : base + repIndex2;
1410
+ if ( (((U32)((dictLimit-1) - repIndex2) >= 3) & (repIndex2 > lowestIndex)) /* intentional overflow */
1411
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
1412
+ const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
1413
+ size_t const repLength2 = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch2+EQUAL_READ32, iend, repEnd2, lowPrefixPtr) + EQUAL_READ32;
1414
+ U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
1415
+ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
1416
+ hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
1417
+ hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
1418
+ ip += repLength2;
1419
+ anchor = ip;
1420
+ continue;
1421
+ }
1422
+ break;
1423
+ } } }
1424
+
1425
+ /* save reps for next block */
1426
+ ctx->savedRep[0] = offset_1; ctx->savedRep[1] = offset_2;
1427
+
1428
+ /* Last Literals */
1429
+ { size_t const lastLLSize = iend - anchor;
1430
+ memcpy(seqStorePtr->lit, anchor, lastLLSize);
1431
+ seqStorePtr->lit += lastLLSize;
1432
+ }
1433
+ }
1434
+
1435
+
1436
+ static void ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx* ctx,
1437
+ const void* src, size_t srcSize)
1438
+ {
1439
+ U32 const mls = ctx->params.cParams.searchLength;
1440
+ switch(mls)
1441
+ {
1442
+ default:
1443
+ case 4 :
1444
+ ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 4); return;
1445
+ case 5 :
1446
+ ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 5); return;
1447
+ case 6 :
1448
+ ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 6); return;
1449
+ case 7 :
1450
+ ZSTD_compressBlock_doubleFast_extDict_generic(ctx, src, srcSize, 7); return;
1451
+ }
1452
+ }
1453
+
1454
+
1455
+ /*-*************************************
1456
+ * Binary Tree search
1457
+ ***************************************/
1458
+ /** ZSTD_insertBt1() : add one or multiple positions to tree.
1459
+ * ip : assumed <= iend-8 .
1460
+ * @return : nb of positions added */
1461
+ static U32 ZSTD_insertBt1(ZSTD_CCtx* zc, const BYTE* const ip, const U32 mls, const BYTE* const iend, U32 nbCompares,
1462
+ U32 extDict)
1463
+ {
1464
+ U32* const hashTable = zc->hashTable;
1465
+ U32 const hashLog = zc->params.cParams.hashLog;
1466
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
1467
+ U32* const bt = zc->chainTable;
1468
+ U32 const btLog = zc->params.cParams.chainLog - 1;
1469
+ U32 const btMask = (1 << btLog) - 1;
1470
+ U32 matchIndex = hashTable[h];
1471
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
1472
+ const BYTE* const base = zc->base;
1473
+ const BYTE* const dictBase = zc->dictBase;
1474
+ const U32 dictLimit = zc->dictLimit;
1475
+ const BYTE* const dictEnd = dictBase + dictLimit;
1476
+ const BYTE* const prefixStart = base + dictLimit;
1477
+ const BYTE* match;
1478
+ const U32 current = (U32)(ip-base);
1479
+ const U32 btLow = btMask >= current ? 0 : current - btMask;
1480
+ U32* smallerPtr = bt + 2*(current&btMask);
1481
+ U32* largerPtr = smallerPtr + 1;
1482
+ U32 dummy32; /* to be nullified at the end */
1483
+ U32 const windowLow = zc->lowLimit;
1484
+ U32 matchEndIdx = current+8;
1485
+ size_t bestLength = 8;
1486
+ #ifdef ZSTD_C_PREDICT
1487
+ U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0);
1488
+ U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1);
1489
+ predictedSmall += (predictedSmall>0);
1490
+ predictedLarge += (predictedLarge>0);
1491
+ #endif /* ZSTD_C_PREDICT */
1492
+
1493
+ hashTable[h] = current; /* Update Hash Table */
1494
+
1495
+ while (nbCompares-- && (matchIndex > windowLow)) {
1496
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
1497
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
1498
+
1499
+ #ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
1500
+ const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */
1501
+ if (matchIndex == predictedSmall) {
1502
+ /* no need to check length, result known */
1503
+ *smallerPtr = matchIndex;
1504
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
1505
+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
1506
+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
1507
+ predictedSmall = predictPtr[1] + (predictPtr[1]>0);
1508
+ continue;
1509
+ }
1510
+ if (matchIndex == predictedLarge) {
1511
+ *largerPtr = matchIndex;
1512
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
1513
+ largerPtr = nextPtr;
1514
+ matchIndex = nextPtr[0];
1515
+ predictedLarge = predictPtr[0] + (predictPtr[0]>0);
1516
+ continue;
1517
+ }
1518
+ #endif
1519
+ if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
1520
+ match = base + matchIndex;
1521
+ if (match[matchLength] == ip[matchLength])
1522
+ matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iend) +1;
1523
+ } else {
1524
+ match = dictBase + matchIndex;
1525
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
1526
+ if (matchIndex+matchLength >= dictLimit)
1527
+ match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
1528
+ }
1529
+
1530
+ if (matchLength > bestLength) {
1531
+ bestLength = matchLength;
1532
+ if (matchLength > matchEndIdx - matchIndex)
1533
+ matchEndIdx = matchIndex + (U32)matchLength;
1534
+ }
1535
+
1536
+ if (ip+matchLength == iend) /* equal : no way to know if inf or sup */
1537
+ break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt the tree */
1538
+
1539
+ if (match[matchLength] < ip[matchLength]) { /* necessarily within correct buffer */
1540
+ /* match is smaller than current */
1541
+ *smallerPtr = matchIndex; /* update smaller idx */
1542
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
1543
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
1544
+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
1545
+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
1546
+ } else {
1547
+ /* match is larger than current */
1548
+ *largerPtr = matchIndex;
1549
+ commonLengthLarger = matchLength;
1550
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
1551
+ largerPtr = nextPtr;
1552
+ matchIndex = nextPtr[0];
1553
+ } }
1554
+
1555
+ *smallerPtr = *largerPtr = 0;
1556
+ if (bestLength > 384) return MIN(192, (U32)(bestLength - 384)); /* speed optimization */
1557
+ if (matchEndIdx > current + 8) return matchEndIdx - current - 8;
1558
+ return 1;
1559
+ }
1560
+
1561
+
1562
+ static size_t ZSTD_insertBtAndFindBestMatch (
1563
+ ZSTD_CCtx* zc,
1564
+ const BYTE* const ip, const BYTE* const iend,
1565
+ size_t* offsetPtr,
1566
+ U32 nbCompares, const U32 mls,
1567
+ U32 extDict)
1568
+ {
1569
+ U32* const hashTable = zc->hashTable;
1570
+ U32 const hashLog = zc->params.cParams.hashLog;
1571
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
1572
+ U32* const bt = zc->chainTable;
1573
+ U32 const btLog = zc->params.cParams.chainLog - 1;
1574
+ U32 const btMask = (1 << btLog) - 1;
1575
+ U32 matchIndex = hashTable[h];
1576
+ size_t commonLengthSmaller=0, commonLengthLarger=0;
1577
+ const BYTE* const base = zc->base;
1578
+ const BYTE* const dictBase = zc->dictBase;
1579
+ const U32 dictLimit = zc->dictLimit;
1580
+ const BYTE* const dictEnd = dictBase + dictLimit;
1581
+ const BYTE* const prefixStart = base + dictLimit;
1582
+ const U32 current = (U32)(ip-base);
1583
+ const U32 btLow = btMask >= current ? 0 : current - btMask;
1584
+ const U32 windowLow = zc->lowLimit;
1585
+ U32* smallerPtr = bt + 2*(current&btMask);
1586
+ U32* largerPtr = bt + 2*(current&btMask) + 1;
1587
+ U32 matchEndIdx = current+8;
1588
+ U32 dummy32; /* to be nullified at the end */
1589
+ size_t bestLength = 0;
1590
+
1591
+ hashTable[h] = current; /* Update Hash Table */
1592
+
1593
+ while (nbCompares-- && (matchIndex > windowLow)) {
1594
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
1595
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
1596
+ const BYTE* match;
1597
+
1598
+ if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
1599
+ match = base + matchIndex;
1600
+ if (match[matchLength] == ip[matchLength])
1601
+ matchLength += ZSTD_count(ip+matchLength+1, match+matchLength+1, iend) +1;
1602
+ } else {
1603
+ match = dictBase + matchIndex;
1604
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
1605
+ if (matchIndex+matchLength >= dictLimit)
1606
+ match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
1607
+ }
1608
+
1609
+ if (matchLength > bestLength) {
1610
+ if (matchLength > matchEndIdx - matchIndex)
1611
+ matchEndIdx = matchIndex + (U32)matchLength;
1612
+ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
1613
+ bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
1614
+ if (ip+matchLength == iend) /* equal : no way to know if inf or sup */
1615
+ break; /* drop, to guarantee consistency (miss a little bit of compression) */
1616
+ }
1617
+
1618
+ if (match[matchLength] < ip[matchLength]) {
1619
+ /* match is smaller than current */
1620
+ *smallerPtr = matchIndex; /* update smaller idx */
1621
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
1622
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
1623
+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
1624
+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
1625
+ } else {
1626
+ /* match is larger than current */
1627
+ *largerPtr = matchIndex;
1628
+ commonLengthLarger = matchLength;
1629
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
1630
+ largerPtr = nextPtr;
1631
+ matchIndex = nextPtr[0];
1632
+ } }
1633
+
1634
+ *smallerPtr = *largerPtr = 0;
1635
+
1636
+ zc->nextToUpdate = (matchEndIdx > current + 8) ? matchEndIdx - 8 : current+1;
1637
+ return bestLength;
1638
+ }
1639
+
1640
+
1641
+ static void ZSTD_updateTree(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls)
1642
+ {
1643
+ const BYTE* const base = zc->base;
1644
+ const U32 target = (U32)(ip - base);
1645
+ U32 idx = zc->nextToUpdate;
1646
+
1647
+ while(idx < target)
1648
+ idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares, 0);
1649
+ }
1650
+
1651
+ /** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
1652
+ static size_t ZSTD_BtFindBestMatch (
1653
+ ZSTD_CCtx* zc,
1654
+ const BYTE* const ip, const BYTE* const iLimit,
1655
+ size_t* offsetPtr,
1656
+ const U32 maxNbAttempts, const U32 mls)
1657
+ {
1658
+ if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */
1659
+ ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
1660
+ return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0);
1661
+ }
1662
+
1663
+
1664
+ static size_t ZSTD_BtFindBestMatch_selectMLS (
1665
+ ZSTD_CCtx* zc, /* Index table will be updated */
1666
+ const BYTE* ip, const BYTE* const iLimit,
1667
+ size_t* offsetPtr,
1668
+ const U32 maxNbAttempts, const U32 matchLengthSearch)
1669
+ {
1670
+ switch(matchLengthSearch)
1671
+ {
1672
+ default :
1673
+ case 4 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
1674
+ case 5 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
1675
+ case 6 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
1676
+ }
1677
+ }
1678
+
1679
+
1680
+ static void ZSTD_updateTree_extDict(ZSTD_CCtx* zc, const BYTE* const ip, const BYTE* const iend, const U32 nbCompares, const U32 mls)
1681
+ {
1682
+ const BYTE* const base = zc->base;
1683
+ const U32 target = (U32)(ip - base);
1684
+ U32 idx = zc->nextToUpdate;
1685
+
1686
+ while (idx < target) idx += ZSTD_insertBt1(zc, base+idx, mls, iend, nbCompares, 1);
1687
+ }
1688
+
1689
+
1690
+ /** Tree updater, providing best match */
1691
+ static size_t ZSTD_BtFindBestMatch_extDict (
1692
+ ZSTD_CCtx* zc,
1693
+ const BYTE* const ip, const BYTE* const iLimit,
1694
+ size_t* offsetPtr,
1695
+ const U32 maxNbAttempts, const U32 mls)
1696
+ {
1697
+ if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */
1698
+ ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
1699
+ return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1);
1700
+ }
1701
+
1702
+
1703
+ static size_t ZSTD_BtFindBestMatch_selectMLS_extDict (
1704
+ ZSTD_CCtx* zc, /* Index table will be updated */
1705
+ const BYTE* ip, const BYTE* const iLimit,
1706
+ size_t* offsetPtr,
1707
+ const U32 maxNbAttempts, const U32 matchLengthSearch)
1708
+ {
1709
+ switch(matchLengthSearch)
1710
+ {
1711
+ default :
1712
+ case 4 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
1713
+ case 5 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
1714
+ case 6 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
1715
+ }
1716
+ }
1717
+
1718
+
1719
+
1720
+ /* *********************************
1721
+ * Hash Chain
1722
+ ***********************************/
1723
+ #define NEXT_IN_CHAIN(d, mask) chainTable[(d) & mask]
1724
+
1725
+ /* Update chains up to ip (excluded)
1726
+ Assumption : always within prefix (ie. not within extDict) */
1727
+ FORCE_INLINE
1728
+ U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls)
1729
+ {
1730
+ U32* const hashTable = zc->hashTable;
1731
+ const U32 hashLog = zc->params.cParams.hashLog;
1732
+ U32* const chainTable = zc->chainTable;
1733
+ const U32 chainMask = (1 << zc->params.cParams.chainLog) - 1;
1734
+ const BYTE* const base = zc->base;
1735
+ const U32 target = (U32)(ip - base);
1736
+ U32 idx = zc->nextToUpdate;
1737
+
1738
+ while(idx < target) { /* catch up */
1739
+ size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
1740
+ NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
1741
+ hashTable[h] = idx;
1742
+ idx++;
1743
+ }
1744
+
1745
+ zc->nextToUpdate = target;
1746
+ return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
1747
+ }
1748
+
1749
+
1750
+
1751
+ FORCE_INLINE /* inlining is important to hardwire a hot branch (template emulation) */
1752
+ size_t ZSTD_HcFindBestMatch_generic (
1753
+ ZSTD_CCtx* zc, /* Index table will be updated */
1754
+ const BYTE* const ip, const BYTE* const iLimit,
1755
+ size_t* offsetPtr,
1756
+ const U32 maxNbAttempts, const U32 mls, const U32 extDict)
1757
+ {
1758
+ U32* const chainTable = zc->chainTable;
1759
+ const U32 chainSize = (1 << zc->params.cParams.chainLog);
1760
+ const U32 chainMask = chainSize-1;
1761
+ const BYTE* const base = zc->base;
1762
+ const BYTE* const dictBase = zc->dictBase;
1763
+ const U32 dictLimit = zc->dictLimit;
1764
+ const BYTE* const prefixStart = base + dictLimit;
1765
+ const BYTE* const dictEnd = dictBase + dictLimit;
1766
+ const U32 lowLimit = zc->lowLimit;
1767
+ const U32 current = (U32)(ip-base);
1768
+ const U32 minChain = current > chainSize ? current - chainSize : 0;
1769
+ int nbAttempts=maxNbAttempts;
1770
+ size_t ml=EQUAL_READ32-1;
1771
+
1772
+ /* HC4 match finder */
1773
+ U32 matchIndex = ZSTD_insertAndFindFirstIndex (zc, ip, mls);
1774
+
1775
+ for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {
1776
+ const BYTE* match;
1777
+ size_t currentMl=0;
1778
+ if ((!extDict) || matchIndex >= dictLimit) {
1779
+ match = base + matchIndex;
1780
+ if (match[ml] == ip[ml]) /* potentially better */
1781
+ currentMl = ZSTD_count(ip, match, iLimit);
1782
+ } else {
1783
+ match = dictBase + matchIndex;
1784
+ if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
1785
+ currentMl = ZSTD_count_2segments(ip+EQUAL_READ32, match+EQUAL_READ32, iLimit, dictEnd, prefixStart) + EQUAL_READ32;
1786
+ }
1787
+
1788
+ /* save best solution */
1789
+ if (currentMl > ml) { ml = currentMl; *offsetPtr = current - matchIndex + ZSTD_REP_MOVE; if (ip+currentMl == iLimit) break; /* best possible, and avoid read overflow*/ }
1790
+
1791
+ if (matchIndex <= minChain) break;
1792
+ matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
1793
+ }
1794
+
1795
+ return ml;
1796
+ }
1797
+
1798
+
1799
+ FORCE_INLINE size_t ZSTD_HcFindBestMatch_selectMLS (
1800
+ ZSTD_CCtx* zc,
1801
+ const BYTE* ip, const BYTE* const iLimit,
1802
+ size_t* offsetPtr,
1803
+ const U32 maxNbAttempts, const U32 matchLengthSearch)
1804
+ {
1805
+ switch(matchLengthSearch)
1806
+ {
1807
+ default :
1808
+ case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0);
1809
+ case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0);
1810
+ case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0);
1811
+ }
1812
+ }
1813
+
1814
+
1815
+ FORCE_INLINE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
1816
+ ZSTD_CCtx* zc,
1817
+ const BYTE* ip, const BYTE* const iLimit,
1818
+ size_t* offsetPtr,
1819
+ const U32 maxNbAttempts, const U32 matchLengthSearch)
1820
+ {
1821
+ switch(matchLengthSearch)
1822
+ {
1823
+ default :
1824
+ case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1);
1825
+ case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1);
1826
+ case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1);
1827
+ }
1828
+ }
1829
+
1830
+
1831
+ /* *******************************
1832
+ * Common parser - lazy strategy
1833
+ *********************************/
1834
+ FORCE_INLINE
1835
+ void ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
1836
+ const void* src, size_t srcSize,
1837
+ const U32 searchMethod, const U32 depth)
1838
+ {
1839
+ seqStore_t* seqStorePtr = &(ctx->seqStore);
1840
+ const BYTE* const istart = (const BYTE*)src;
1841
+ const BYTE* ip = istart;
1842
+ const BYTE* anchor = istart;
1843
+ const BYTE* const iend = istart + srcSize;
1844
+ const BYTE* const ilimit = iend - 8;
1845
+ const BYTE* const base = ctx->base + ctx->dictLimit;
1846
+
1847
+ U32 const maxSearches = 1 << ctx->params.cParams.searchLog;
1848
+ U32 const mls = ctx->params.cParams.searchLength;
1849
+
1850
+ typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit,
1851
+ size_t* offsetPtr,
1852
+ U32 maxNbAttempts, U32 matchLengthSearch);
1853
+ searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
1854
+ U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1], savedOffset=0;
1855
+
1856
+ /* init */
1857
+ ip += (ip==base);
1858
+ ctx->nextToUpdate3 = ctx->nextToUpdate;
1859
+ { U32 const maxRep = (U32)(ip-base);
1860
+ if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
1861
+ if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
1862
+ }
1863
+
1864
+ /* Match Loop */
1865
+ while (ip < ilimit) {
1866
+ size_t matchLength=0;
1867
+ size_t offset=0;
1868
+ const BYTE* start=ip+1;
1869
+
1870
+ /* check repCode */
1871
+ if ((offset_1>0) & (MEM_read32(ip+1) == MEM_read32(ip+1 - offset_1))) {
1872
+ /* repcode : we take it */
1873
+ matchLength = ZSTD_count(ip+1+EQUAL_READ32, ip+1+EQUAL_READ32-offset_1, iend) + EQUAL_READ32;
1874
+ if (depth==0) goto _storeSequence;
1875
+ }
1876
+
1877
+ /* first search (depth 0) */
1878
+ { size_t offsetFound = 99999999;
1879
+ size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
1880
+ if (ml2 > matchLength)
1881
+ matchLength = ml2, start = ip, offset=offsetFound;
1882
+ }
1883
+
1884
+ if (matchLength < EQUAL_READ32) {
1885
+ ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
1886
+ continue;
1887
+ }
1888
+
1889
+ /* let's try to find a better solution */
1890
+ if (depth>=1)
1891
+ while (ip<ilimit) {
1892
+ ip ++;
1893
+ if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
1894
+ size_t const mlRep = ZSTD_count(ip+EQUAL_READ32, ip+EQUAL_READ32-offset_1, iend) + EQUAL_READ32;
1895
+ int const gain2 = (int)(mlRep * 3);
1896
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
1897
+ if ((mlRep >= EQUAL_READ32) && (gain2 > gain1))
1898
+ matchLength = mlRep, offset = 0, start = ip;
1899
+ }
1900
+ { size_t offset2=99999999;
1901
+ size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
1902
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
1903
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
1904
+ if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
1905
+ matchLength = ml2, offset = offset2, start = ip;
1906
+ continue; /* search a better one */
1907
+ } }
1908
+
1909
+ /* let's find an even better one */
1910
+ if ((depth==2) && (ip<ilimit)) {
1911
+ ip ++;
1912
+ if ((offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
1913
+ size_t const ml2 = ZSTD_count(ip+EQUAL_READ32, ip+EQUAL_READ32-offset_1, iend) + EQUAL_READ32;
1914
+ int const gain2 = (int)(ml2 * 4);
1915
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
1916
+ if ((ml2 >= EQUAL_READ32) && (gain2 > gain1))
1917
+ matchLength = ml2, offset = 0, start = ip;
1918
+ }
1919
+ { size_t offset2=99999999;
1920
+ size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
1921
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
1922
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
1923
+ if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
1924
+ matchLength = ml2, offset = offset2, start = ip;
1925
+ continue;
1926
+ } } }
1927
+ break; /* nothing found : store previous solution */
1928
+ }
1929
+
1930
+ /* catch up */
1931
+ if (offset) {
1932
+ while ((start>anchor) && (start>base+offset-ZSTD_REP_MOVE) && (start[-1] == start[-1-offset+ZSTD_REP_MOVE])) /* only search for offset within prefix */
1933
+ { start--; matchLength++; }
1934
+ offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
1935
+ }
1936
+
1937
+ /* store sequence */
1938
+ _storeSequence:
1939
+ { size_t const litLength = start - anchor;
1940
+ ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH);
1941
+ anchor = ip = start + matchLength;
1942
+ }
1943
+
1944
+ /* check immediate repcode */
1945
+ while ( (ip <= ilimit)
1946
+ && ((offset_2>0)
1947
+ & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
1948
+ /* store sequence */
1949
+ matchLength = ZSTD_count(ip+EQUAL_READ32, ip+EQUAL_READ32-offset_2, iend) + EQUAL_READ32;
1950
+ offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
1951
+ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH);
1952
+ ip += matchLength;
1953
+ anchor = ip;
1954
+ continue; /* faster when present ... (?) */
1955
+ } }
1956
+
1957
+ /* Save reps for next block */
1958
+ ctx->savedRep[0] = offset_1 ? offset_1 : savedOffset;
1959
+ ctx->savedRep[1] = offset_2 ? offset_2 : savedOffset;
1960
+
1961
+ /* Last Literals */
1962
+ { size_t const lastLLSize = iend - anchor;
1963
+ memcpy(seqStorePtr->lit, anchor, lastLLSize);
1964
+ seqStorePtr->lit += lastLLSize;
1965
+ }
1966
+ }
1967
+
1968
+
1969
+ static void ZSTD_compressBlock_btlazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
1970
+ {
1971
+ ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2);
1972
+ }
1973
+
1974
+ static void ZSTD_compressBlock_lazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
1975
+ {
1976
+ ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2);
1977
+ }
1978
+
1979
+ static void ZSTD_compressBlock_lazy(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
1980
+ {
1981
+ ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1);
1982
+ }
1983
+
1984
+ static void ZSTD_compressBlock_greedy(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
1985
+ {
1986
+ ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0);
1987
+ }
1988
+
1989
+
1990
+ FORCE_INLINE
1991
+ void ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
1992
+ const void* src, size_t srcSize,
1993
+ const U32 searchMethod, const U32 depth)
1994
+ {
1995
+ seqStore_t* seqStorePtr = &(ctx->seqStore);
1996
+ const BYTE* const istart = (const BYTE*)src;
1997
+ const BYTE* ip = istart;
1998
+ const BYTE* anchor = istart;
1999
+ const BYTE* const iend = istart + srcSize;
2000
+ const BYTE* const ilimit = iend - 8;
2001
+ const BYTE* const base = ctx->base;
2002
+ const U32 dictLimit = ctx->dictLimit;
2003
+ const U32 lowestIndex = ctx->lowLimit;
2004
+ const BYTE* const prefixStart = base + dictLimit;
2005
+ const BYTE* const dictBase = ctx->dictBase;
2006
+ const BYTE* const dictEnd = dictBase + dictLimit;
2007
+ const BYTE* const dictStart = dictBase + ctx->lowLimit;
2008
+
2009
+ const U32 maxSearches = 1 << ctx->params.cParams.searchLog;
2010
+ const U32 mls = ctx->params.cParams.searchLength;
2011
+
2012
+ typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit,
2013
+ size_t* offsetPtr,
2014
+ U32 maxNbAttempts, U32 matchLengthSearch);
2015
+ searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
2016
+
2017
+ U32 offset_1 = ctx->rep[0], offset_2 = ctx->rep[1];
2018
+
2019
+ /* init */
2020
+ ctx->nextToUpdate3 = ctx->nextToUpdate;
2021
+ ip += (ip == prefixStart);
2022
+
2023
+ /* Match Loop */
2024
+ while (ip < ilimit) {
2025
+ size_t matchLength=0;
2026
+ size_t offset=0;
2027
+ const BYTE* start=ip+1;
2028
+ U32 current = (U32)(ip-base);
2029
+
2030
+ /* check repCode */
2031
+ { const U32 repIndex = (U32)(current+1 - offset_1);
2032
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
2033
+ const BYTE* const repMatch = repBase + repIndex;
2034
+ if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
2035
+ if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
2036
+ /* repcode detected we should take it */
2037
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
2038
+ matchLength = ZSTD_count_2segments(ip+1+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
2039
+ if (depth==0) goto _storeSequence;
2040
+ } }
2041
+
2042
+ /* first search (depth 0) */
2043
+ { size_t offsetFound = 99999999;
2044
+ size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
2045
+ if (ml2 > matchLength)
2046
+ matchLength = ml2, start = ip, offset=offsetFound;
2047
+ }
2048
+
2049
+ if (matchLength < EQUAL_READ32) {
2050
+ ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
2051
+ continue;
2052
+ }
2053
+
2054
+ /* let's try to find a better solution */
2055
+ if (depth>=1)
2056
+ while (ip<ilimit) {
2057
+ ip ++;
2058
+ current++;
2059
+ /* check repCode */
2060
+ if (offset) {
2061
+ const U32 repIndex = (U32)(current - offset_1);
2062
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
2063
+ const BYTE* const repMatch = repBase + repIndex;
2064
+ if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
2065
+ if (MEM_read32(ip) == MEM_read32(repMatch)) {
2066
+ /* repcode detected */
2067
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
2068
+ size_t const repLength = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
2069
+ int const gain2 = (int)(repLength * 3);
2070
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
2071
+ if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
2072
+ matchLength = repLength, offset = 0, start = ip;
2073
+ } }
2074
+
2075
+ /* search match, depth 1 */
2076
+ { size_t offset2=99999999;
2077
+ size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
2078
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
2079
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
2080
+ if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
2081
+ matchLength = ml2, offset = offset2, start = ip;
2082
+ continue; /* search a better one */
2083
+ } }
2084
+
2085
+ /* let's find an even better one */
2086
+ if ((depth==2) && (ip<ilimit)) {
2087
+ ip ++;
2088
+ current++;
2089
+ /* check repCode */
2090
+ if (offset) {
2091
+ const U32 repIndex = (U32)(current - offset_1);
2092
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
2093
+ const BYTE* const repMatch = repBase + repIndex;
2094
+ if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
2095
+ if (MEM_read32(ip) == MEM_read32(repMatch)) {
2096
+ /* repcode detected */
2097
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
2098
+ size_t repLength = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
2099
+ int gain2 = (int)(repLength * 4);
2100
+ int gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
2101
+ if ((repLength >= EQUAL_READ32) && (gain2 > gain1))
2102
+ matchLength = repLength, offset = 0, start = ip;
2103
+ } }
2104
+
2105
+ /* search match, depth 2 */
2106
+ { size_t offset2=99999999;
2107
+ size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
2108
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
2109
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
2110
+ if ((ml2 >= EQUAL_READ32) && (gain2 > gain1)) {
2111
+ matchLength = ml2, offset = offset2, start = ip;
2112
+ continue;
2113
+ } } }
2114
+ break; /* nothing found : store previous solution */
2115
+ }
2116
+
2117
+ /* catch up */
2118
+ if (offset) {
2119
+ U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
2120
+ const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
2121
+ const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
2122
+ while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
2123
+ offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
2124
+ }
2125
+
2126
+ /* store sequence */
2127
+ _storeSequence:
2128
+ { size_t const litLength = start - anchor;
2129
+ ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH);
2130
+ anchor = ip = start + matchLength;
2131
+ }
2132
+
2133
+ /* check immediate repcode */
2134
+ while (ip <= ilimit) {
2135
+ const U32 repIndex = (U32)((ip-base) - offset_2);
2136
+ const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
2137
+ const BYTE* const repMatch = repBase + repIndex;
2138
+ if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
2139
+ if (MEM_read32(ip) == MEM_read32(repMatch)) {
2140
+ /* repcode detected we should take it */
2141
+ const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
2142
+ matchLength = ZSTD_count_2segments(ip+EQUAL_READ32, repMatch+EQUAL_READ32, iend, repEnd, prefixStart) + EQUAL_READ32;
2143
+ offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */
2144
+ ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH);
2145
+ ip += matchLength;
2146
+ anchor = ip;
2147
+ continue; /* faster when present ... (?) */
2148
+ }
2149
+ break;
2150
+ } }
2151
+
2152
+ /* Save reps for next block */
2153
+ ctx->savedRep[0] = offset_1; ctx->savedRep[1] = offset_2;
2154
+
2155
+ /* Last Literals */
2156
+ { size_t const lastLLSize = iend - anchor;
2157
+ memcpy(seqStorePtr->lit, anchor, lastLLSize);
2158
+ seqStorePtr->lit += lastLLSize;
2159
+ }
2160
+ }
2161
+
2162
+
2163
+ void ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
2164
+ {
2165
+ ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0);
2166
+ }
2167
+
2168
+ static void ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
2169
+ {
2170
+ ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1);
2171
+ }
2172
+
2173
+ static void ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
2174
+ {
2175
+ ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2);
2176
+ }
2177
+
2178
+ static void ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
2179
+ {
2180
+ ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2);
2181
+ }
2182
+
2183
+
2184
+ /* The optimal parser */
2185
+ #include "zstd_opt.h"
2186
+
2187
+ static void ZSTD_compressBlock_btopt(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
2188
+ {
2189
+ #ifdef ZSTD_OPT_H_91842398743
2190
+ ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 0);
2191
+ #else
2192
+ (void)ctx; (void)src; (void)srcSize;
2193
+ return;
2194
+ #endif
2195
+ }
2196
+
2197
+ static void ZSTD_compressBlock_btopt2(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
2198
+ {
2199
+ #ifdef ZSTD_OPT_H_91842398743
2200
+ ZSTD_compressBlock_opt_generic(ctx, src, srcSize, 1);
2201
+ #else
2202
+ (void)ctx; (void)src; (void)srcSize;
2203
+ return;
2204
+ #endif
2205
+ }
2206
+
2207
+ static void ZSTD_compressBlock_btopt_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
2208
+ {
2209
+ #ifdef ZSTD_OPT_H_91842398743
2210
+ ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 0);
2211
+ #else
2212
+ (void)ctx; (void)src; (void)srcSize;
2213
+ return;
2214
+ #endif
2215
+ }
2216
+
2217
+ static void ZSTD_compressBlock_btopt2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
2218
+ {
2219
+ #ifdef ZSTD_OPT_H_91842398743
2220
+ ZSTD_compressBlock_opt_extDict_generic(ctx, src, srcSize, 1);
2221
+ #else
2222
+ (void)ctx; (void)src; (void)srcSize;
2223
+ return;
2224
+ #endif
2225
+ }
2226
+
2227
+
2228
+ typedef void (*ZSTD_blockCompressor) (ZSTD_CCtx* ctx, const void* src, size_t srcSize);
2229
+
2230
+ static ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, int extDict)
2231
+ {
2232
+ static const ZSTD_blockCompressor blockCompressor[2][8] = {
2233
+ { ZSTD_compressBlock_fast, ZSTD_compressBlock_doubleFast, ZSTD_compressBlock_greedy, ZSTD_compressBlock_lazy, ZSTD_compressBlock_lazy2, ZSTD_compressBlock_btlazy2, ZSTD_compressBlock_btopt, ZSTD_compressBlock_btopt2 },
2234
+ { ZSTD_compressBlock_fast_extDict, ZSTD_compressBlock_doubleFast_extDict, ZSTD_compressBlock_greedy_extDict, ZSTD_compressBlock_lazy_extDict,ZSTD_compressBlock_lazy2_extDict, ZSTD_compressBlock_btlazy2_extDict, ZSTD_compressBlock_btopt_extDict, ZSTD_compressBlock_btopt2_extDict }
2235
+ };
2236
+
2237
+ return blockCompressor[extDict][(U32)strat];
2238
+ }
2239
+
2240
+
2241
+ static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
2242
+ {
2243
+ ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(zc->params.cParams.strategy, zc->lowLimit < zc->dictLimit);
2244
+ const BYTE* const base = zc->base;
2245
+ const BYTE* const istart = (const BYTE*)src;
2246
+ const U32 current = (U32)(istart-base);
2247
+ if (srcSize < MIN_CBLOCK_SIZE+ZSTD_blockHeaderSize+1) return 0; /* don't even attempt compression below a certain srcSize */
2248
+ ZSTD_resetSeqStore(&(zc->seqStore));
2249
+ if (current > zc->nextToUpdate + 384)
2250
+ zc->nextToUpdate = current - MIN(192, (U32)(current - zc->nextToUpdate - 384)); /* update tree not updated after finding very long rep matches */
2251
+ blockCompressor(zc, src, srcSize);
2252
+ return ZSTD_compressSequences(zc, dst, dstCapacity, srcSize);
2253
+ }
2254
+
2255
+
2256
+ /*! ZSTD_compress_generic() :
2257
+ * Compress a chunk of data into one or multiple blocks.
2258
+ * All blocks will be terminated, all input will be consumed.
2259
+ * Function will issue an error if there is not enough `dstCapacity` to hold the compressed content.
2260
+ * Frame is supposed already started (header already produced)
2261
+ * @return : compressed size, or an error code
2262
+ */
2263
+ static size_t ZSTD_compress_generic (ZSTD_CCtx* cctx,
2264
+ void* dst, size_t dstCapacity,
2265
+ const void* src, size_t srcSize,
2266
+ U32 lastFrameChunk)
2267
+ {
2268
+ size_t blockSize = cctx->blockSize;
2269
+ size_t remaining = srcSize;
2270
+ const BYTE* ip = (const BYTE*)src;
2271
+ BYTE* const ostart = (BYTE*)dst;
2272
+ BYTE* op = ostart;
2273
+ U32 const maxDist = 1 << cctx->params.cParams.windowLog;
2274
+
2275
+ if (cctx->params.fParams.checksumFlag && srcSize)
2276
+ XXH64_update(&cctx->xxhState, src, srcSize);
2277
+
2278
+ while (remaining) {
2279
+ U32 const lastBlock = lastFrameChunk & (blockSize >= remaining);
2280
+ size_t cSize;
2281
+
2282
+ if (dstCapacity < ZSTD_blockHeaderSize + MIN_CBLOCK_SIZE) return ERROR(dstSize_tooSmall); /* not enough space to store compressed block */
2283
+ if (remaining < blockSize) blockSize = remaining;
2284
+
2285
+ /* preemptive overflow correction */
2286
+ if (cctx->lowLimit > (2U<<30)) {
2287
+ U32 const cycleMask = (1 << ZSTD_cycleLog(cctx->params.cParams.hashLog, cctx->params.cParams.strategy)) - 1;
2288
+ U32 const current = (U32)(ip - cctx->base);
2289
+ U32 const newCurrent = (current & cycleMask) + (1 << cctx->params.cParams.windowLog);
2290
+ U32 const correction = current - newCurrent;
2291
+ ZSTD_STATIC_ASSERT(ZSTD_WINDOWLOG_MAX_64 <= 30);
2292
+ ZSTD_reduceIndex(cctx, correction);
2293
+ cctx->base += correction;
2294
+ cctx->dictBase += correction;
2295
+ cctx->lowLimit -= correction;
2296
+ cctx->dictLimit -= correction;
2297
+ if (cctx->nextToUpdate < correction) cctx->nextToUpdate = 0;
2298
+ else cctx->nextToUpdate -= correction;
2299
+ }
2300
+
2301
+ if ((U32)(ip+blockSize - cctx->base) > cctx->loadedDictEnd + maxDist) {
2302
+ /* enforce maxDist */
2303
+ U32 const newLowLimit = (U32)(ip+blockSize - cctx->base) - maxDist;
2304
+ if (cctx->lowLimit < newLowLimit) cctx->lowLimit = newLowLimit;
2305
+ if (cctx->dictLimit < cctx->lowLimit) cctx->dictLimit = cctx->lowLimit;
2306
+ }
2307
+
2308
+ cSize = ZSTD_compressBlock_internal(cctx, op+ZSTD_blockHeaderSize, dstCapacity-ZSTD_blockHeaderSize, ip, blockSize);
2309
+ if (ZSTD_isError(cSize)) return cSize;
2310
+
2311
+ if (cSize == 0) { /* block is not compressible */
2312
+ U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(blockSize << 3);
2313
+ if (blockSize + ZSTD_blockHeaderSize > dstCapacity) return ERROR(dstSize_tooSmall);
2314
+ MEM_writeLE32(op, cBlockHeader24); /* no pb, 4th byte will be overwritten */
2315
+ memcpy(op + ZSTD_blockHeaderSize, ip, blockSize);
2316
+ cSize = ZSTD_blockHeaderSize+blockSize;
2317
+ } else {
2318
+ U32 const cBlockHeader24 = lastBlock + (((U32)bt_compressed)<<1) + (U32)(cSize << 3);
2319
+ MEM_writeLE24(op, cBlockHeader24);
2320
+ cSize += ZSTD_blockHeaderSize;
2321
+ }
2322
+
2323
+ remaining -= blockSize;
2324
+ dstCapacity -= cSize;
2325
+ ip += blockSize;
2326
+ op += cSize;
2327
+ }
2328
+
2329
+ if (lastFrameChunk && (op>ostart)) cctx->stage = ZSTDcs_ending;
2330
+ return op-ostart;
2331
+ }
2332
+
2333
+
2334
+ static size_t ZSTD_writeFrameHeader(void* dst, size_t dstCapacity,
2335
+ ZSTD_parameters params, U64 pledgedSrcSize, U32 dictID)
2336
+ { BYTE* const op = (BYTE*)dst;
2337
+ U32 const dictIDSizeCode = (dictID>0) + (dictID>=256) + (dictID>=65536); /* 0-3 */
2338
+ U32 const checksumFlag = params.fParams.checksumFlag>0;
2339
+ U32 const windowSize = 1U << params.cParams.windowLog;
2340
+ U32 const singleSegment = params.fParams.contentSizeFlag && (windowSize > (pledgedSrcSize-1));
2341
+ BYTE const windowLogByte = (BYTE)((params.cParams.windowLog - ZSTD_WINDOWLOG_ABSOLUTEMIN) << 3);
2342
+ U32 const fcsCode = params.fParams.contentSizeFlag ?
2343
+ (pledgedSrcSize>=256) + (pledgedSrcSize>=65536+256) + (pledgedSrcSize>=0xFFFFFFFFU) : /* 0-3 */
2344
+ 0;
2345
+ BYTE const frameHeaderDecriptionByte = (BYTE)(dictIDSizeCode + (checksumFlag<<2) + (singleSegment<<5) + (fcsCode<<6) );
2346
+ size_t pos;
2347
+
2348
+ if (dstCapacity < ZSTD_frameHeaderSize_max) return ERROR(dstSize_tooSmall);
2349
+
2350
+ MEM_writeLE32(dst, ZSTD_MAGICNUMBER);
2351
+ op[4] = frameHeaderDecriptionByte; pos=5;
2352
+ if (!singleSegment) op[pos++] = windowLogByte;
2353
+ switch(dictIDSizeCode)
2354
+ {
2355
+ default: /* impossible */
2356
+ case 0 : break;
2357
+ case 1 : op[pos] = (BYTE)(dictID); pos++; break;
2358
+ case 2 : MEM_writeLE16(op+pos, (U16)dictID); pos+=2; break;
2359
+ case 3 : MEM_writeLE32(op+pos, dictID); pos+=4; break;
2360
+ }
2361
+ switch(fcsCode)
2362
+ {
2363
+ default: /* impossible */
2364
+ case 0 : if (singleSegment) op[pos++] = (BYTE)(pledgedSrcSize); break;
2365
+ case 1 : MEM_writeLE16(op+pos, (U16)(pledgedSrcSize-256)); pos+=2; break;
2366
+ case 2 : MEM_writeLE32(op+pos, (U32)(pledgedSrcSize)); pos+=4; break;
2367
+ case 3 : MEM_writeLE64(op+pos, (U64)(pledgedSrcSize)); pos+=8; break;
2368
+ }
2369
+ return pos;
2370
+ }
2371
+
2372
+
2373
+ static size_t ZSTD_compressContinue_internal (ZSTD_CCtx* cctx,
2374
+ void* dst, size_t dstCapacity,
2375
+ const void* src, size_t srcSize,
2376
+ U32 frame, U32 lastFrameChunk)
2377
+ {
2378
+ const BYTE* const ip = (const BYTE*) src;
2379
+ size_t fhSize = 0;
2380
+
2381
+ if (cctx->stage==ZSTDcs_created) return ERROR(stage_wrong); /* missing init (ZSTD_compressBegin) */
2382
+
2383
+ if (frame && (cctx->stage==ZSTDcs_init)) {
2384
+ fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, cctx->frameContentSize, cctx->dictID);
2385
+ if (ZSTD_isError(fhSize)) return fhSize;
2386
+ dstCapacity -= fhSize;
2387
+ dst = (char*)dst + fhSize;
2388
+ cctx->stage = ZSTDcs_ongoing;
2389
+ }
2390
+
2391
+ /* Check if blocks follow each other */
2392
+ if (src != cctx->nextSrc) {
2393
+ /* not contiguous */
2394
+ ptrdiff_t const delta = cctx->nextSrc - ip;
2395
+ cctx->lowLimit = cctx->dictLimit;
2396
+ cctx->dictLimit = (U32)(cctx->nextSrc - cctx->base);
2397
+ cctx->dictBase = cctx->base;
2398
+ cctx->base -= delta;
2399
+ cctx->nextToUpdate = cctx->dictLimit;
2400
+ if (cctx->dictLimit - cctx->lowLimit < HASH_READ_SIZE) cctx->lowLimit = cctx->dictLimit; /* too small extDict */
2401
+ }
2402
+
2403
+ /* if input and dictionary overlap : reduce dictionary (area presumed modified by input) */
2404
+ if ((ip+srcSize > cctx->dictBase + cctx->lowLimit) & (ip < cctx->dictBase + cctx->dictLimit)) {
2405
+ ptrdiff_t const highInputIdx = (ip + srcSize) - cctx->dictBase;
2406
+ U32 const lowLimitMax = (highInputIdx > (ptrdiff_t)cctx->dictLimit) ? cctx->dictLimit : (U32)highInputIdx;
2407
+ cctx->lowLimit = lowLimitMax;
2408
+ }
2409
+
2410
+ cctx->nextSrc = ip + srcSize;
2411
+
2412
+ { size_t const cSize = frame ?
2413
+ ZSTD_compress_generic (cctx, dst, dstCapacity, src, srcSize, lastFrameChunk) :
2414
+ ZSTD_compressBlock_internal (cctx, dst, dstCapacity, src, srcSize);
2415
+ if (ZSTD_isError(cSize)) return cSize;
2416
+ return cSize + fhSize;
2417
+ }
2418
+ }
2419
+
2420
+
2421
+ size_t ZSTD_compressContinue (ZSTD_CCtx* cctx,
2422
+ void* dst, size_t dstCapacity,
2423
+ const void* src, size_t srcSize)
2424
+ {
2425
+ return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 0);
2426
+ }
2427
+
2428
+
2429
+ size_t ZSTD_getBlockSizeMax(ZSTD_CCtx* cctx)
2430
+ {
2431
+ return MIN (ZSTD_BLOCKSIZE_ABSOLUTEMAX, 1 << cctx->params.cParams.windowLog);
2432
+ }
2433
+
2434
+ size_t ZSTD_compressBlock(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
2435
+ {
2436
+ size_t const blockSizeMax = ZSTD_getBlockSizeMax(cctx);
2437
+ if (srcSize > blockSizeMax) return ERROR(srcSize_wrong);
2438
+ return ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 0, 0);
2439
+ }
2440
+
2441
+
2442
+ static size_t ZSTD_loadDictionaryContent(ZSTD_CCtx* zc, const void* src, size_t srcSize)
2443
+ {
2444
+ const BYTE* const ip = (const BYTE*) src;
2445
+ const BYTE* const iend = ip + srcSize;
2446
+
2447
+ /* input becomes current prefix */
2448
+ zc->lowLimit = zc->dictLimit;
2449
+ zc->dictLimit = (U32)(zc->nextSrc - zc->base);
2450
+ zc->dictBase = zc->base;
2451
+ zc->base += ip - zc->nextSrc;
2452
+ zc->nextToUpdate = zc->dictLimit;
2453
+ zc->loadedDictEnd = (U32)(iend - zc->base);
2454
+
2455
+ zc->nextSrc = iend;
2456
+ if (srcSize <= HASH_READ_SIZE) return 0;
2457
+
2458
+ switch(zc->params.cParams.strategy)
2459
+ {
2460
+ case ZSTD_fast:
2461
+ ZSTD_fillHashTable (zc, iend, zc->params.cParams.searchLength);
2462
+ break;
2463
+
2464
+ case ZSTD_dfast:
2465
+ ZSTD_fillDoubleHashTable (zc, iend, zc->params.cParams.searchLength);
2466
+ break;
2467
+
2468
+ case ZSTD_greedy:
2469
+ case ZSTD_lazy:
2470
+ case ZSTD_lazy2:
2471
+ ZSTD_insertAndFindFirstIndex (zc, iend-HASH_READ_SIZE, zc->params.cParams.searchLength);
2472
+ break;
2473
+
2474
+ case ZSTD_btlazy2:
2475
+ case ZSTD_btopt:
2476
+ case ZSTD_btopt2:
2477
+ ZSTD_updateTree(zc, iend-HASH_READ_SIZE, iend, 1 << zc->params.cParams.searchLog, zc->params.cParams.searchLength);
2478
+ break;
2479
+
2480
+ default:
2481
+ return ERROR(GENERIC); /* strategy doesn't exist; impossible */
2482
+ }
2483
+
2484
+ zc->nextToUpdate = zc->loadedDictEnd;
2485
+ return 0;
2486
+ }
2487
+
2488
+
2489
+ /* Dictionaries that assign zero probability to symbols that show up causes problems
2490
+ when FSE encoding. Refuse dictionaries that assign zero probability to symbols
2491
+ that we may encounter during compression.
2492
+ NOTE: This behavior is not standard and could be improved in the future. */
2493
+ static size_t ZSTD_checkDictNCount(short* normalizedCounter, unsigned dictMaxSymbolValue, unsigned maxSymbolValue) {
2494
+ U32 s;
2495
+ if (dictMaxSymbolValue < maxSymbolValue) return ERROR(dictionary_corrupted);
2496
+ for (s = 0; s <= maxSymbolValue; ++s) {
2497
+ if (normalizedCounter[s] == 0) return ERROR(dictionary_corrupted);
2498
+ }
2499
+ return 0;
2500
+ }
2501
+
2502
+
2503
+ /* Dictionary format :
2504
+ Magic == ZSTD_DICT_MAGIC (4 bytes)
2505
+ HUF_writeCTable(256)
2506
+ FSE_writeNCount(off)
2507
+ FSE_writeNCount(ml)
2508
+ FSE_writeNCount(ll)
2509
+ RepOffsets
2510
+ Dictionary content
2511
+ */
2512
+ /*! ZSTD_loadDictEntropyStats() :
2513
+ @return : size read from dictionary
2514
+ note : magic number supposed already checked */
2515
+ static size_t ZSTD_loadDictEntropyStats(ZSTD_CCtx* cctx, const void* dict, size_t dictSize)
2516
+ {
2517
+ const BYTE* dictPtr = (const BYTE*)dict;
2518
+ const BYTE* const dictEnd = dictPtr + dictSize;
2519
+ short offcodeNCount[MaxOff+1];
2520
+ unsigned offcodeMaxValue = MaxOff;
2521
+ BYTE scratchBuffer[1<<MAX(MLFSELog,LLFSELog)];
2522
+
2523
+ { size_t const hufHeaderSize = HUF_readCTable(cctx->hufTable, 255, dict, dictSize);
2524
+ if (HUF_isError(hufHeaderSize)) return ERROR(dictionary_corrupted);
2525
+ dictPtr += hufHeaderSize;
2526
+ }
2527
+
2528
+ { unsigned offcodeLog;
2529
+ size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
2530
+ if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
2531
+ if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
2532
+ /* Defer checking offcodeMaxValue because we need to know the size of the dictionary content */
2533
+ CHECK_E (FSE_buildCTable_wksp(cctx->offcodeCTable, offcodeNCount, offcodeMaxValue, offcodeLog, scratchBuffer, sizeof(scratchBuffer)), dictionary_corrupted);
2534
+ dictPtr += offcodeHeaderSize;
2535
+ }
2536
+
2537
+ { short matchlengthNCount[MaxML+1];
2538
+ unsigned matchlengthMaxValue = MaxML, matchlengthLog;
2539
+ size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
2540
+ if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
2541
+ if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
2542
+ /* Every match length code must have non-zero probability */
2543
+ CHECK_F (ZSTD_checkDictNCount(matchlengthNCount, matchlengthMaxValue, MaxML));
2544
+ CHECK_E (FSE_buildCTable_wksp(cctx->matchlengthCTable, matchlengthNCount, matchlengthMaxValue, matchlengthLog, scratchBuffer, sizeof(scratchBuffer)), dictionary_corrupted);
2545
+ dictPtr += matchlengthHeaderSize;
2546
+ }
2547
+
2548
+ { short litlengthNCount[MaxLL+1];
2549
+ unsigned litlengthMaxValue = MaxLL, litlengthLog;
2550
+ size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
2551
+ if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
2552
+ if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
2553
+ /* Every literal length code must have non-zero probability */
2554
+ CHECK_F (ZSTD_checkDictNCount(litlengthNCount, litlengthMaxValue, MaxLL));
2555
+ CHECK_E(FSE_buildCTable_wksp(cctx->litlengthCTable, litlengthNCount, litlengthMaxValue, litlengthLog, scratchBuffer, sizeof(scratchBuffer)), dictionary_corrupted);
2556
+ dictPtr += litlengthHeaderSize;
2557
+ }
2558
+
2559
+ if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
2560
+ cctx->rep[0] = MEM_readLE32(dictPtr+0); if (cctx->rep[0] >= dictSize) return ERROR(dictionary_corrupted);
2561
+ cctx->rep[1] = MEM_readLE32(dictPtr+4); if (cctx->rep[1] >= dictSize) return ERROR(dictionary_corrupted);
2562
+ cctx->rep[2] = MEM_readLE32(dictPtr+8); if (cctx->rep[2] >= dictSize) return ERROR(dictionary_corrupted);
2563
+ dictPtr += 12;
2564
+
2565
+ { U32 offcodeMax = MaxOff;
2566
+ if ((size_t)(dictEnd - dictPtr) <= ((U32)-1) - 128 KB) {
2567
+ U32 const maxOffset = (U32)(dictEnd - dictPtr) + 128 KB; /* The maximum offset that must be supported */
2568
+ /* Calculate minimum offset code required to represent maxOffset */
2569
+ offcodeMax = ZSTD_highbit32(maxOffset);
2570
+ }
2571
+ /* Every possible supported offset <= dictContentSize + 128 KB must be representable */
2572
+ CHECK_F (ZSTD_checkDictNCount(offcodeNCount, offcodeMaxValue, MIN(offcodeMax, MaxOff)));
2573
+ }
2574
+
2575
+ cctx->flagStaticTables = 1;
2576
+ return dictPtr - (const BYTE*)dict;
2577
+ }
2578
+
2579
+ /** ZSTD_compress_insertDictionary() :
2580
+ * @return : 0, or an error code */
2581
+ static size_t ZSTD_compress_insertDictionary(ZSTD_CCtx* zc, const void* dict, size_t dictSize)
2582
+ {
2583
+ if ((dict==NULL) || (dictSize<=8)) return 0;
2584
+
2585
+ /* default : dict is pure content */
2586
+ if (MEM_readLE32(dict) != ZSTD_DICT_MAGIC) return ZSTD_loadDictionaryContent(zc, dict, dictSize);
2587
+ zc->dictID = zc->params.fParams.noDictIDFlag ? 0 : MEM_readLE32((const char*)dict+4);
2588
+
2589
+ /* known magic number : dict is parsed for entropy stats and content */
2590
+ { size_t const loadError = ZSTD_loadDictEntropyStats(zc, (const char*)dict+8 /* skip dictHeader */, dictSize-8);
2591
+ size_t const eSize = loadError + 8;
2592
+ if (ZSTD_isError(loadError)) return loadError;
2593
+ return ZSTD_loadDictionaryContent(zc, (const char*)dict+eSize, dictSize-eSize);
2594
+ }
2595
+ }
2596
+
2597
+
2598
+ /*! ZSTD_compressBegin_internal() :
2599
+ * @return : 0, or an error code */
2600
+ static size_t ZSTD_compressBegin_internal(ZSTD_CCtx* cctx,
2601
+ const void* dict, size_t dictSize,
2602
+ ZSTD_parameters params, U64 pledgedSrcSize)
2603
+ {
2604
+ ZSTD_compResetPolicy_e const crp = dictSize ? ZSTDcrp_fullReset : ZSTDcrp_continue;
2605
+ CHECK_F(ZSTD_resetCCtx_advanced(cctx, params, pledgedSrcSize, crp));
2606
+ return ZSTD_compress_insertDictionary(cctx, dict, dictSize);
2607
+ }
2608
+
2609
+
2610
+ /*! ZSTD_compressBegin_advanced() :
2611
+ * @return : 0, or an error code */
2612
+ size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx,
2613
+ const void* dict, size_t dictSize,
2614
+ ZSTD_parameters params, unsigned long long pledgedSrcSize)
2615
+ {
2616
+ /* compression parameters verification and optimization */
2617
+ CHECK_F(ZSTD_checkCParams(params.cParams));
2618
+ return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, pledgedSrcSize);
2619
+ }
2620
+
2621
+
2622
+ size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel)
2623
+ {
2624
+ ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
2625
+ return ZSTD_compressBegin_internal(cctx, dict, dictSize, params, 0);
2626
+ }
2627
+
2628
+
2629
+ size_t ZSTD_compressBegin(ZSTD_CCtx* zc, int compressionLevel)
2630
+ {
2631
+ return ZSTD_compressBegin_usingDict(zc, NULL, 0, compressionLevel);
2632
+ }
2633
+
2634
+
2635
+ /*! ZSTD_writeEpilogue() :
2636
+ * Ends a frame.
2637
+ * @return : nb of bytes written into dst (or an error code) */
2638
+ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
2639
+ {
2640
+ BYTE* const ostart = (BYTE*)dst;
2641
+ BYTE* op = ostart;
2642
+ size_t fhSize = 0;
2643
+
2644
+ if (cctx->stage == ZSTDcs_created) return ERROR(stage_wrong); /* init missing */
2645
+
2646
+ /* special case : empty frame */
2647
+ if (cctx->stage == ZSTDcs_init) {
2648
+ fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, cctx->params, 0, 0);
2649
+ if (ZSTD_isError(fhSize)) return fhSize;
2650
+ dstCapacity -= fhSize;
2651
+ op += fhSize;
2652
+ cctx->stage = ZSTDcs_ongoing;
2653
+ }
2654
+
2655
+ if (cctx->stage != ZSTDcs_ending) {
2656
+ /* write one last empty block, make it the "last" block */
2657
+ U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
2658
+ if (dstCapacity<4) return ERROR(dstSize_tooSmall);
2659
+ MEM_writeLE32(op, cBlockHeader24);
2660
+ op += ZSTD_blockHeaderSize;
2661
+ dstCapacity -= ZSTD_blockHeaderSize;
2662
+ }
2663
+
2664
+ if (cctx->params.fParams.checksumFlag) {
2665
+ U32 const checksum = (U32) XXH64_digest(&cctx->xxhState);
2666
+ if (dstCapacity<4) return ERROR(dstSize_tooSmall);
2667
+ MEM_writeLE32(op, checksum);
2668
+ op += 4;
2669
+ }
2670
+
2671
+ cctx->stage = ZSTDcs_created; /* return to "created but no init" status */
2672
+ return op-ostart;
2673
+ }
2674
+
2675
+
2676
+ size_t ZSTD_compressEnd (ZSTD_CCtx* cctx,
2677
+ void* dst, size_t dstCapacity,
2678
+ const void* src, size_t srcSize)
2679
+ {
2680
+ size_t endResult;
2681
+ size_t const cSize = ZSTD_compressContinue_internal(cctx, dst, dstCapacity, src, srcSize, 1, 1);
2682
+ if (ZSTD_isError(cSize)) return cSize;
2683
+ endResult = ZSTD_writeEpilogue(cctx, (char*)dst + cSize, dstCapacity-cSize);
2684
+ if (ZSTD_isError(endResult)) return endResult;
2685
+ return cSize + endResult;
2686
+ }
2687
+
2688
+
2689
+ static size_t ZSTD_compress_internal (ZSTD_CCtx* cctx,
2690
+ void* dst, size_t dstCapacity,
2691
+ const void* src, size_t srcSize,
2692
+ const void* dict,size_t dictSize,
2693
+ ZSTD_parameters params)
2694
+ {
2695
+ CHECK_F(ZSTD_compressBegin_internal(cctx, dict, dictSize, params, srcSize));
2696
+ return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
2697
+ }
2698
+
2699
+ size_t ZSTD_compress_advanced (ZSTD_CCtx* ctx,
2700
+ void* dst, size_t dstCapacity,
2701
+ const void* src, size_t srcSize,
2702
+ const void* dict,size_t dictSize,
2703
+ ZSTD_parameters params)
2704
+ {
2705
+ CHECK_F(ZSTD_checkCParams(params.cParams));
2706
+ return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
2707
+ }
2708
+
2709
+ size_t ZSTD_compress_usingDict(ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, const void* dict, size_t dictSize, int compressionLevel)
2710
+ {
2711
+ ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, dict ? dictSize : 0);
2712
+ params.fParams.contentSizeFlag = 1;
2713
+ return ZSTD_compress_internal(ctx, dst, dstCapacity, src, srcSize, dict, dictSize, params);
2714
+ }
2715
+
2716
+ size_t ZSTD_compressCCtx (ZSTD_CCtx* ctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel)
2717
+ {
2718
+ return ZSTD_compress_usingDict(ctx, dst, dstCapacity, src, srcSize, NULL, 0, compressionLevel);
2719
+ }
2720
+
2721
+ size_t ZSTD_compress(void* dst, size_t dstCapacity, const void* src, size_t srcSize, int compressionLevel)
2722
+ {
2723
+ size_t result;
2724
+ ZSTD_CCtx ctxBody;
2725
+ memset(&ctxBody, 0, sizeof(ctxBody));
2726
+ memcpy(&ctxBody.customMem, &defaultCustomMem, sizeof(ZSTD_customMem));
2727
+ result = ZSTD_compressCCtx(&ctxBody, dst, dstCapacity, src, srcSize, compressionLevel);
2728
+ ZSTD_free(ctxBody.workSpace, defaultCustomMem); /* can't free ctxBody itself, as it's on stack; free only heap content */
2729
+ return result;
2730
+ }
2731
+
2732
+
2733
+ /* ===== Dictionary API ===== */
2734
+
2735
+ struct ZSTD_CDict_s {
2736
+ void* dictContent;
2737
+ size_t dictContentSize;
2738
+ ZSTD_CCtx* refContext;
2739
+ }; /* typedef'd tp ZSTD_CDict within "zstd.h" */
2740
+
2741
+ size_t ZSTD_sizeof_CDict(const ZSTD_CDict* cdict)
2742
+ {
2743
+ if (cdict==NULL) return 0; /* support sizeof on NULL */
2744
+ return ZSTD_sizeof_CCtx(cdict->refContext) + cdict->dictContentSize;
2745
+ }
2746
+
2747
+ ZSTD_CDict* ZSTD_createCDict_advanced(const void* dict, size_t dictSize, ZSTD_parameters params, ZSTD_customMem customMem)
2748
+ {
2749
+ if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem;
2750
+ if (!customMem.customAlloc || !customMem.customFree) return NULL;
2751
+
2752
+ { ZSTD_CDict* const cdict = (ZSTD_CDict*) ZSTD_malloc(sizeof(ZSTD_CDict), customMem);
2753
+ void* const dictContent = ZSTD_malloc(dictSize, customMem);
2754
+ ZSTD_CCtx* const cctx = ZSTD_createCCtx_advanced(customMem);
2755
+
2756
+ if (!dictContent || !cdict || !cctx) {
2757
+ ZSTD_free(dictContent, customMem);
2758
+ ZSTD_free(cdict, customMem);
2759
+ ZSTD_free(cctx, customMem);
2760
+ return NULL;
2761
+ }
2762
+
2763
+ if (dictSize) {
2764
+ memcpy(dictContent, dict, dictSize);
2765
+ }
2766
+ { size_t const errorCode = ZSTD_compressBegin_advanced(cctx, dictContent, dictSize, params, 0);
2767
+ if (ZSTD_isError(errorCode)) {
2768
+ ZSTD_free(dictContent, customMem);
2769
+ ZSTD_free(cdict, customMem);
2770
+ ZSTD_free(cctx, customMem);
2771
+ return NULL;
2772
+ } }
2773
+
2774
+ cdict->dictContent = dictContent;
2775
+ cdict->dictContentSize = dictSize;
2776
+ cdict->refContext = cctx;
2777
+ return cdict;
2778
+ }
2779
+ }
2780
+
2781
+ ZSTD_CDict* ZSTD_createCDict(const void* dict, size_t dictSize, int compressionLevel)
2782
+ {
2783
+ ZSTD_customMem const allocator = { NULL, NULL, NULL };
2784
+ ZSTD_parameters params = ZSTD_getParams(compressionLevel, 0, dictSize);
2785
+ params.fParams.contentSizeFlag = 1;
2786
+ return ZSTD_createCDict_advanced(dict, dictSize, params, allocator);
2787
+ }
2788
+
2789
+ size_t ZSTD_freeCDict(ZSTD_CDict* cdict)
2790
+ {
2791
+ if (cdict==NULL) return 0; /* support free on NULL */
2792
+ { ZSTD_customMem const cMem = cdict->refContext->customMem;
2793
+ ZSTD_freeCCtx(cdict->refContext);
2794
+ ZSTD_free(cdict->dictContent, cMem);
2795
+ ZSTD_free(cdict, cMem);
2796
+ return 0;
2797
+ }
2798
+ }
2799
+
2800
+ static ZSTD_parameters ZSTD_getParamsFromCDict(const ZSTD_CDict* cdict) {
2801
+ return ZSTD_getParamsFromCCtx(cdict->refContext);
2802
+ }
2803
+
2804
+ size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict, U64 pledgedSrcSize)
2805
+ {
2806
+ if (cdict->dictContentSize) CHECK_F(ZSTD_copyCCtx(cctx, cdict->refContext, pledgedSrcSize))
2807
+ else CHECK_F(ZSTD_compressBegin_advanced(cctx, NULL, 0, cdict->refContext->params, pledgedSrcSize));
2808
+ return 0;
2809
+ }
2810
+
2811
+ /*! ZSTD_compress_usingCDict() :
2812
+ * Compression using a digested Dictionary.
2813
+ * Faster startup than ZSTD_compress_usingDict(), recommended when same dictionary is used multiple times.
2814
+ * Note that compression level is decided during dictionary creation */
2815
+ size_t ZSTD_compress_usingCDict(ZSTD_CCtx* cctx,
2816
+ void* dst, size_t dstCapacity,
2817
+ const void* src, size_t srcSize,
2818
+ const ZSTD_CDict* cdict)
2819
+ {
2820
+ CHECK_F(ZSTD_compressBegin_usingCDict(cctx, cdict, srcSize));
2821
+
2822
+ if (cdict->refContext->params.fParams.contentSizeFlag==1) {
2823
+ cctx->params.fParams.contentSizeFlag = 1;
2824
+ cctx->frameContentSize = srcSize;
2825
+ }
2826
+
2827
+ return ZSTD_compressEnd(cctx, dst, dstCapacity, src, srcSize);
2828
+ }
2829
+
2830
+
2831
+
2832
+ /* ******************************************************************
2833
+ * Streaming
2834
+ ********************************************************************/
2835
+
2836
+ typedef enum { zcss_init, zcss_load, zcss_flush, zcss_final } ZSTD_cStreamStage;
2837
+
2838
+ struct ZSTD_CStream_s {
2839
+ ZSTD_CCtx* cctx;
2840
+ ZSTD_CDict* cdictLocal;
2841
+ const ZSTD_CDict* cdict;
2842
+ char* inBuff;
2843
+ size_t inBuffSize;
2844
+ size_t inToCompress;
2845
+ size_t inBuffPos;
2846
+ size_t inBuffTarget;
2847
+ size_t blockSize;
2848
+ char* outBuff;
2849
+ size_t outBuffSize;
2850
+ size_t outBuffContentSize;
2851
+ size_t outBuffFlushedSize;
2852
+ ZSTD_cStreamStage stage;
2853
+ U32 checksum;
2854
+ U32 frameEnded;
2855
+ U64 pledgedSrcSize;
2856
+ U64 inputProcessed;
2857
+ ZSTD_parameters params;
2858
+ ZSTD_customMem customMem;
2859
+ }; /* typedef'd to ZSTD_CStream within "zstd.h" */
2860
+
2861
+ ZSTD_CStream* ZSTD_createCStream(void)
2862
+ {
2863
+ return ZSTD_createCStream_advanced(defaultCustomMem);
2864
+ }
2865
+
2866
+ ZSTD_CStream* ZSTD_createCStream_advanced(ZSTD_customMem customMem)
2867
+ {
2868
+ ZSTD_CStream* zcs;
2869
+
2870
+ if (!customMem.customAlloc && !customMem.customFree) customMem = defaultCustomMem;
2871
+ if (!customMem.customAlloc || !customMem.customFree) return NULL;
2872
+
2873
+ zcs = (ZSTD_CStream*)ZSTD_malloc(sizeof(ZSTD_CStream), customMem);
2874
+ if (zcs==NULL) return NULL;
2875
+ memset(zcs, 0, sizeof(ZSTD_CStream));
2876
+ memcpy(&zcs->customMem, &customMem, sizeof(ZSTD_customMem));
2877
+ zcs->cctx = ZSTD_createCCtx_advanced(customMem);
2878
+ if (zcs->cctx == NULL) { ZSTD_freeCStream(zcs); return NULL; }
2879
+ return zcs;
2880
+ }
2881
+
2882
+ size_t ZSTD_freeCStream(ZSTD_CStream* zcs)
2883
+ {
2884
+ if (zcs==NULL) return 0; /* support free on NULL */
2885
+ { ZSTD_customMem const cMem = zcs->customMem;
2886
+ ZSTD_freeCCtx(zcs->cctx);
2887
+ ZSTD_freeCDict(zcs->cdictLocal);
2888
+ ZSTD_free(zcs->inBuff, cMem);
2889
+ ZSTD_free(zcs->outBuff, cMem);
2890
+ ZSTD_free(zcs, cMem);
2891
+ return 0;
2892
+ }
2893
+ }
2894
+
2895
+
2896
+ /*====== Initialization ======*/
2897
+
2898
+ size_t ZSTD_CStreamInSize(void) { return ZSTD_BLOCKSIZE_ABSOLUTEMAX; }
2899
+ size_t ZSTD_CStreamOutSize(void) { return ZSTD_compressBound(ZSTD_BLOCKSIZE_ABSOLUTEMAX) + ZSTD_blockHeaderSize + 4 /* 32-bits hash */ ; }
2900
+
2901
+ size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize)
2902
+ {
2903
+ if (zcs->inBuffSize==0) return ERROR(stage_wrong); /* zcs has not been init at least once */
2904
+
2905
+ if (zcs->cdict) CHECK_F(ZSTD_compressBegin_usingCDict(zcs->cctx, zcs->cdict, pledgedSrcSize))
2906
+ else CHECK_F(ZSTD_compressBegin_advanced(zcs->cctx, NULL, 0, zcs->params, pledgedSrcSize));
2907
+
2908
+ zcs->inToCompress = 0;
2909
+ zcs->inBuffPos = 0;
2910
+ zcs->inBuffTarget = zcs->blockSize;
2911
+ zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
2912
+ zcs->stage = zcss_load;
2913
+ zcs->frameEnded = 0;
2914
+ zcs->pledgedSrcSize = pledgedSrcSize;
2915
+ zcs->inputProcessed = 0;
2916
+ return 0; /* ready to go */
2917
+ }
2918
+
2919
+ size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
2920
+ const void* dict, size_t dictSize,
2921
+ ZSTD_parameters params, unsigned long long pledgedSrcSize)
2922
+ {
2923
+ /* allocate buffers */
2924
+ { size_t const neededInBuffSize = (size_t)1 << params.cParams.windowLog;
2925
+ if (zcs->inBuffSize < neededInBuffSize) {
2926
+ zcs->inBuffSize = neededInBuffSize;
2927
+ ZSTD_free(zcs->inBuff, zcs->customMem);
2928
+ zcs->inBuff = (char*) ZSTD_malloc(neededInBuffSize, zcs->customMem);
2929
+ if (zcs->inBuff == NULL) return ERROR(memory_allocation);
2930
+ }
2931
+ zcs->blockSize = MIN(ZSTD_BLOCKSIZE_ABSOLUTEMAX, neededInBuffSize);
2932
+ }
2933
+ if (zcs->outBuffSize < ZSTD_compressBound(zcs->blockSize)+1) {
2934
+ zcs->outBuffSize = ZSTD_compressBound(zcs->blockSize)+1;
2935
+ ZSTD_free(zcs->outBuff, zcs->customMem);
2936
+ zcs->outBuff = (char*) ZSTD_malloc(zcs->outBuffSize, zcs->customMem);
2937
+ if (zcs->outBuff == NULL) return ERROR(memory_allocation);
2938
+ }
2939
+
2940
+ if (dict) {
2941
+ ZSTD_freeCDict(zcs->cdictLocal);
2942
+ zcs->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize, params, zcs->customMem);
2943
+ if (zcs->cdictLocal == NULL) return ERROR(memory_allocation);
2944
+ zcs->cdict = zcs->cdictLocal;
2945
+ } else zcs->cdict = NULL;
2946
+
2947
+ zcs->checksum = params.fParams.checksumFlag > 0;
2948
+ zcs->params = params;
2949
+
2950
+ return ZSTD_resetCStream(zcs, pledgedSrcSize);
2951
+ }
2952
+
2953
+ /* note : cdict must outlive compression session */
2954
+ size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict)
2955
+ {
2956
+ ZSTD_parameters const params = ZSTD_getParamsFromCDict(cdict);
2957
+ size_t const initError = ZSTD_initCStream_advanced(zcs, NULL, 0, params, 0);
2958
+ zcs->cdict = cdict;
2959
+ return initError;
2960
+ }
2961
+
2962
+ size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs, const void* dict, size_t dictSize, int compressionLevel)
2963
+ {
2964
+ ZSTD_parameters const params = ZSTD_getParams(compressionLevel, 0, dictSize);
2965
+ return ZSTD_initCStream_advanced(zcs, dict, dictSize, params, 0);
2966
+ }
2967
+
2968
+ size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs, int compressionLevel, unsigned long long pledgedSrcSize)
2969
+ {
2970
+ ZSTD_parameters const params = ZSTD_getParams(compressionLevel, pledgedSrcSize, 0);
2971
+ return ZSTD_initCStream_advanced(zcs, NULL, 0, params, pledgedSrcSize);
2972
+ }
2973
+
2974
+ size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel)
2975
+ {
2976
+ return ZSTD_initCStream_usingDict(zcs, NULL, 0, compressionLevel);
2977
+ }
2978
+
2979
+ size_t ZSTD_sizeof_CStream(const ZSTD_CStream* zcs)
2980
+ {
2981
+ if (zcs==NULL) return 0; /* support sizeof on NULL */
2982
+ return sizeof(zcs) + ZSTD_sizeof_CCtx(zcs->cctx) + ZSTD_sizeof_CDict(zcs->cdictLocal) + zcs->outBuffSize + zcs->inBuffSize;
2983
+ }
2984
+
2985
+ /*====== Compression ======*/
2986
+
2987
+ typedef enum { zsf_gather, zsf_flush, zsf_end } ZSTD_flush_e;
2988
+
2989
+ MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
2990
+ {
2991
+ size_t const length = MIN(dstCapacity, srcSize);
2992
+ memcpy(dst, src, length);
2993
+ return length;
2994
+ }
2995
+
2996
+ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
2997
+ void* dst, size_t* dstCapacityPtr,
2998
+ const void* src, size_t* srcSizePtr,
2999
+ ZSTD_flush_e const flush)
3000
+ {
3001
+ U32 someMoreWork = 1;
3002
+ const char* const istart = (const char*)src;
3003
+ const char* const iend = istart + *srcSizePtr;
3004
+ const char* ip = istart;
3005
+ char* const ostart = (char*)dst;
3006
+ char* const oend = ostart + *dstCapacityPtr;
3007
+ char* op = ostart;
3008
+
3009
+ while (someMoreWork) {
3010
+ switch(zcs->stage)
3011
+ {
3012
+ case zcss_init: return ERROR(init_missing); /* call ZBUFF_compressInit() first ! */
3013
+
3014
+ case zcss_load:
3015
+ /* complete inBuffer */
3016
+ { size_t const toLoad = zcs->inBuffTarget - zcs->inBuffPos;
3017
+ size_t const loaded = ZSTD_limitCopy(zcs->inBuff + zcs->inBuffPos, toLoad, ip, iend-ip);
3018
+ zcs->inBuffPos += loaded;
3019
+ ip += loaded;
3020
+ if ( (zcs->inBuffPos==zcs->inToCompress) || (!flush && (toLoad != loaded)) ) {
3021
+ someMoreWork = 0; break; /* not enough input to get a full block : stop there, wait for more */
3022
+ } }
3023
+ /* compress current block (note : this stage cannot be stopped in the middle) */
3024
+ { void* cDst;
3025
+ size_t cSize;
3026
+ size_t const iSize = zcs->inBuffPos - zcs->inToCompress;
3027
+ size_t oSize = oend-op;
3028
+ if (oSize >= ZSTD_compressBound(iSize))
3029
+ cDst = op; /* compress directly into output buffer (avoid flush stage) */
3030
+ else
3031
+ cDst = zcs->outBuff, oSize = zcs->outBuffSize;
3032
+ cSize = (flush == zsf_end) ?
3033
+ ZSTD_compressEnd(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize) :
3034
+ ZSTD_compressContinue(zcs->cctx, cDst, oSize, zcs->inBuff + zcs->inToCompress, iSize);
3035
+ if (ZSTD_isError(cSize)) return cSize;
3036
+ if (flush == zsf_end) zcs->frameEnded = 1;
3037
+ /* prepare next block */
3038
+ zcs->inBuffTarget = zcs->inBuffPos + zcs->blockSize;
3039
+ if (zcs->inBuffTarget > zcs->inBuffSize)
3040
+ zcs->inBuffPos = 0, zcs->inBuffTarget = zcs->blockSize; /* note : inBuffSize >= blockSize */
3041
+ zcs->inToCompress = zcs->inBuffPos;
3042
+ if (cDst == op) { op += cSize; break; } /* no need to flush */
3043
+ zcs->outBuffContentSize = cSize;
3044
+ zcs->outBuffFlushedSize = 0;
3045
+ zcs->stage = zcss_flush; /* pass-through to flush stage */
3046
+ }
3047
+
3048
+ case zcss_flush:
3049
+ { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
3050
+ size_t const flushed = ZSTD_limitCopy(op, oend-op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
3051
+ op += flushed;
3052
+ zcs->outBuffFlushedSize += flushed;
3053
+ if (toFlush!=flushed) { someMoreWork = 0; break; } /* dst too small to store flushed data : stop there */
3054
+ zcs->outBuffContentSize = zcs->outBuffFlushedSize = 0;
3055
+ zcs->stage = zcss_load;
3056
+ break;
3057
+ }
3058
+
3059
+ case zcss_final:
3060
+ someMoreWork = 0; /* do nothing */
3061
+ break;
3062
+
3063
+ default:
3064
+ return ERROR(GENERIC); /* impossible */
3065
+ }
3066
+ }
3067
+
3068
+ *srcSizePtr = ip - istart;
3069
+ *dstCapacityPtr = op - ostart;
3070
+ zcs->inputProcessed += *srcSizePtr;
3071
+ if (zcs->frameEnded) return 0;
3072
+ { size_t hintInSize = zcs->inBuffTarget - zcs->inBuffPos;
3073
+ if (hintInSize==0) hintInSize = zcs->blockSize;
3074
+ return hintInSize;
3075
+ }
3076
+ }
3077
+
3078
+ size_t ZSTD_compressStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
3079
+ {
3080
+ size_t sizeRead = input->size - input->pos;
3081
+ size_t sizeWritten = output->size - output->pos;
3082
+ size_t const result = ZSTD_compressStream_generic(zcs,
3083
+ (char*)(output->dst) + output->pos, &sizeWritten,
3084
+ (const char*)(input->src) + input->pos, &sizeRead, zsf_gather);
3085
+ input->pos += sizeRead;
3086
+ output->pos += sizeWritten;
3087
+ return result;
3088
+ }
3089
+
3090
+
3091
+ /*====== Finalize ======*/
3092
+
3093
+ /*! ZSTD_flushStream() :
3094
+ * @return : amount of data remaining to flush */
3095
+ size_t ZSTD_flushStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
3096
+ {
3097
+ size_t srcSize = 0;
3098
+ size_t sizeWritten = output->size - output->pos;
3099
+ size_t const result = ZSTD_compressStream_generic(zcs,
3100
+ (char*)(output->dst) + output->pos, &sizeWritten,
3101
+ &srcSize, &srcSize, /* use a valid src address instead of NULL */
3102
+ zsf_flush);
3103
+ output->pos += sizeWritten;
3104
+ if (ZSTD_isError(result)) return result;
3105
+ return zcs->outBuffContentSize - zcs->outBuffFlushedSize; /* remaining to flush */
3106
+ }
3107
+
3108
+
3109
+ size_t ZSTD_endStream(ZSTD_CStream* zcs, ZSTD_outBuffer* output)
3110
+ {
3111
+ BYTE* const ostart = (BYTE*)(output->dst) + output->pos;
3112
+ BYTE* const oend = (BYTE*)(output->dst) + output->size;
3113
+ BYTE* op = ostart;
3114
+
3115
+ if ((zcs->pledgedSrcSize) && (zcs->inputProcessed != zcs->pledgedSrcSize))
3116
+ return ERROR(srcSize_wrong); /* pledgedSrcSize not respected */
3117
+
3118
+ if (zcs->stage != zcss_final) {
3119
+ /* flush whatever remains */
3120
+ size_t srcSize = 0;
3121
+ size_t sizeWritten = output->size - output->pos;
3122
+ size_t const notEnded = ZSTD_compressStream_generic(zcs, ostart, &sizeWritten, &srcSize, &srcSize, zsf_end); /* use a valid src address instead of NULL */
3123
+ size_t const remainingToFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
3124
+ op += sizeWritten;
3125
+ if (remainingToFlush) {
3126
+ output->pos += sizeWritten;
3127
+ return remainingToFlush + ZSTD_BLOCKHEADERSIZE /* final empty block */ + (zcs->checksum * 4);
3128
+ }
3129
+ /* create epilogue */
3130
+ zcs->stage = zcss_final;
3131
+ zcs->outBuffContentSize = !notEnded ? 0 :
3132
+ ZSTD_compressEnd(zcs->cctx, zcs->outBuff, zcs->outBuffSize, NULL, 0); /* write epilogue, including final empty block, into outBuff */
3133
+ }
3134
+
3135
+ /* flush epilogue */
3136
+ { size_t const toFlush = zcs->outBuffContentSize - zcs->outBuffFlushedSize;
3137
+ size_t const flushed = ZSTD_limitCopy(op, oend-op, zcs->outBuff + zcs->outBuffFlushedSize, toFlush);
3138
+ op += flushed;
3139
+ zcs->outBuffFlushedSize += flushed;
3140
+ output->pos += op-ostart;
3141
+ if (toFlush==flushed) zcs->stage = zcss_init; /* end reached */
3142
+ return toFlush - flushed;
3143
+ }
3144
+ }
3145
+
3146
+
3147
+
3148
+ /*-===== Pre-defined compression levels =====-*/
3149
+
3150
+ #define ZSTD_DEFAULT_CLEVEL 1
3151
+ #define ZSTD_MAX_CLEVEL 22
3152
+ int ZSTD_maxCLevel(void) { return ZSTD_MAX_CLEVEL; }
3153
+
3154
+ static const ZSTD_compressionParameters ZSTD_defaultCParameters[4][ZSTD_MAX_CLEVEL+1] = {
3155
+ { /* "default" */
3156
+ /* W, C, H, S, L, TL, strat */
3157
+ { 18, 12, 12, 1, 7, 16, ZSTD_fast }, /* level 0 - never used */
3158
+ { 19, 13, 14, 1, 7, 16, ZSTD_fast }, /* level 1 */
3159
+ { 19, 15, 16, 1, 6, 16, ZSTD_fast }, /* level 2 */
3160
+ { 20, 16, 17, 1, 5, 16, ZSTD_dfast }, /* level 3.*/
3161
+ { 20, 18, 18, 1, 5, 16, ZSTD_dfast }, /* level 4.*/
3162
+ { 20, 15, 18, 3, 5, 16, ZSTD_greedy }, /* level 5 */
3163
+ { 21, 16, 19, 2, 5, 16, ZSTD_lazy }, /* level 6 */
3164
+ { 21, 17, 20, 3, 5, 16, ZSTD_lazy }, /* level 7 */
3165
+ { 21, 18, 20, 3, 5, 16, ZSTD_lazy2 }, /* level 8 */
3166
+ { 21, 20, 20, 3, 5, 16, ZSTD_lazy2 }, /* level 9 */
3167
+ { 21, 19, 21, 4, 5, 16, ZSTD_lazy2 }, /* level 10 */
3168
+ { 22, 20, 22, 4, 5, 16, ZSTD_lazy2 }, /* level 11 */
3169
+ { 22, 20, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 12 */
3170
+ { 22, 21, 22, 5, 5, 16, ZSTD_lazy2 }, /* level 13 */
3171
+ { 22, 21, 22, 6, 5, 16, ZSTD_lazy2 }, /* level 14 */
3172
+ { 22, 21, 21, 5, 5, 16, ZSTD_btlazy2 }, /* level 15 */
3173
+ { 23, 22, 22, 5, 5, 16, ZSTD_btlazy2 }, /* level 16 */
3174
+ { 23, 21, 22, 4, 5, 24, ZSTD_btopt }, /* level 17 */
3175
+ { 23, 23, 22, 6, 5, 32, ZSTD_btopt }, /* level 18 */
3176
+ { 23, 23, 22, 6, 3, 48, ZSTD_btopt }, /* level 19 */
3177
+ { 25, 25, 23, 7, 3, 64, ZSTD_btopt2 }, /* level 20 */
3178
+ { 26, 26, 23, 7, 3,256, ZSTD_btopt2 }, /* level 21 */
3179
+ { 27, 27, 25, 9, 3,512, ZSTD_btopt2 }, /* level 22 */
3180
+ },
3181
+ { /* for srcSize <= 256 KB */
3182
+ /* W, C, H, S, L, T, strat */
3183
+ { 0, 0, 0, 0, 0, 0, ZSTD_fast }, /* level 0 - not used */
3184
+ { 18, 13, 14, 1, 6, 8, ZSTD_fast }, /* level 1 */
3185
+ { 18, 14, 13, 1, 5, 8, ZSTD_dfast }, /* level 2 */
3186
+ { 18, 16, 15, 1, 5, 8, ZSTD_dfast }, /* level 3 */
3187
+ { 18, 15, 17, 1, 5, 8, ZSTD_greedy }, /* level 4.*/
3188
+ { 18, 16, 17, 4, 5, 8, ZSTD_greedy }, /* level 5.*/
3189
+ { 18, 16, 17, 3, 5, 8, ZSTD_lazy }, /* level 6.*/
3190
+ { 18, 17, 17, 4, 4, 8, ZSTD_lazy }, /* level 7 */
3191
+ { 18, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
3192
+ { 18, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
3193
+ { 18, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
3194
+ { 18, 18, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 11.*/
3195
+ { 18, 18, 17, 7, 4, 8, ZSTD_lazy2 }, /* level 12.*/
3196
+ { 18, 19, 17, 6, 4, 8, ZSTD_btlazy2 }, /* level 13 */
3197
+ { 18, 18, 18, 4, 4, 16, ZSTD_btopt }, /* level 14.*/
3198
+ { 18, 18, 18, 4, 3, 16, ZSTD_btopt }, /* level 15.*/
3199
+ { 18, 19, 18, 6, 3, 32, ZSTD_btopt }, /* level 16.*/
3200
+ { 18, 19, 18, 8, 3, 64, ZSTD_btopt }, /* level 17.*/
3201
+ { 18, 19, 18, 9, 3,128, ZSTD_btopt }, /* level 18.*/
3202
+ { 18, 19, 18, 10, 3,256, ZSTD_btopt }, /* level 19.*/
3203
+ { 18, 19, 18, 11, 3,512, ZSTD_btopt2 }, /* level 20.*/
3204
+ { 18, 19, 18, 12, 3,512, ZSTD_btopt2 }, /* level 21.*/
3205
+ { 18, 19, 18, 13, 3,512, ZSTD_btopt2 }, /* level 22.*/
3206
+ },
3207
+ { /* for srcSize <= 128 KB */
3208
+ /* W, C, H, S, L, T, strat */
3209
+ { 17, 12, 12, 1, 7, 8, ZSTD_fast }, /* level 0 - not used */
3210
+ { 17, 12, 13, 1, 6, 8, ZSTD_fast }, /* level 1 */
3211
+ { 17, 13, 16, 1, 5, 8, ZSTD_fast }, /* level 2 */
3212
+ { 17, 16, 16, 2, 5, 8, ZSTD_dfast }, /* level 3 */
3213
+ { 17, 13, 15, 3, 4, 8, ZSTD_greedy }, /* level 4 */
3214
+ { 17, 15, 17, 4, 4, 8, ZSTD_greedy }, /* level 5 */
3215
+ { 17, 16, 17, 3, 4, 8, ZSTD_lazy }, /* level 6 */
3216
+ { 17, 15, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 7 */
3217
+ { 17, 17, 17, 4, 4, 8, ZSTD_lazy2 }, /* level 8 */
3218
+ { 17, 17, 17, 5, 4, 8, ZSTD_lazy2 }, /* level 9 */
3219
+ { 17, 17, 17, 6, 4, 8, ZSTD_lazy2 }, /* level 10 */
3220
+ { 17, 17, 17, 7, 4, 8, ZSTD_lazy2 }, /* level 11 */
3221
+ { 17, 17, 17, 8, 4, 8, ZSTD_lazy2 }, /* level 12 */
3222
+ { 17, 18, 17, 6, 4, 8, ZSTD_btlazy2 }, /* level 13.*/
3223
+ { 17, 17, 17, 7, 3, 8, ZSTD_btopt }, /* level 14.*/
3224
+ { 17, 17, 17, 7, 3, 16, ZSTD_btopt }, /* level 15.*/
3225
+ { 17, 18, 17, 7, 3, 32, ZSTD_btopt }, /* level 16.*/
3226
+ { 17, 18, 17, 7, 3, 64, ZSTD_btopt }, /* level 17.*/
3227
+ { 17, 18, 17, 7, 3,256, ZSTD_btopt }, /* level 18.*/
3228
+ { 17, 18, 17, 8, 3,256, ZSTD_btopt }, /* level 19.*/
3229
+ { 17, 18, 17, 9, 3,256, ZSTD_btopt2 }, /* level 20.*/
3230
+ { 17, 18, 17, 10, 3,256, ZSTD_btopt2 }, /* level 21.*/
3231
+ { 17, 18, 17, 11, 3,512, ZSTD_btopt2 }, /* level 22.*/
3232
+ },
3233
+ { /* for srcSize <= 16 KB */
3234
+ /* W, C, H, S, L, T, strat */
3235
+ { 14, 12, 12, 1, 7, 6, ZSTD_fast }, /* level 0 - not used */
3236
+ { 14, 14, 14, 1, 6, 6, ZSTD_fast }, /* level 1 */
3237
+ { 14, 14, 14, 1, 4, 6, ZSTD_fast }, /* level 2 */
3238
+ { 14, 14, 14, 1, 4, 6, ZSTD_dfast }, /* level 3.*/
3239
+ { 14, 14, 14, 4, 4, 6, ZSTD_greedy }, /* level 4.*/
3240
+ { 14, 14, 14, 3, 4, 6, ZSTD_lazy }, /* level 5.*/
3241
+ { 14, 14, 14, 4, 4, 6, ZSTD_lazy2 }, /* level 6 */
3242
+ { 14, 14, 14, 5, 4, 6, ZSTD_lazy2 }, /* level 7 */
3243
+ { 14, 14, 14, 6, 4, 6, ZSTD_lazy2 }, /* level 8.*/
3244
+ { 14, 15, 14, 6, 4, 6, ZSTD_btlazy2 }, /* level 9.*/
3245
+ { 14, 15, 14, 3, 3, 6, ZSTD_btopt }, /* level 10.*/
3246
+ { 14, 15, 14, 6, 3, 8, ZSTD_btopt }, /* level 11.*/
3247
+ { 14, 15, 14, 6, 3, 16, ZSTD_btopt }, /* level 12.*/
3248
+ { 14, 15, 14, 6, 3, 24, ZSTD_btopt }, /* level 13.*/
3249
+ { 14, 15, 15, 6, 3, 48, ZSTD_btopt }, /* level 14.*/
3250
+ { 14, 15, 15, 6, 3, 64, ZSTD_btopt }, /* level 15.*/
3251
+ { 14, 15, 15, 6, 3, 96, ZSTD_btopt }, /* level 16.*/
3252
+ { 14, 15, 15, 6, 3,128, ZSTD_btopt }, /* level 17.*/
3253
+ { 14, 15, 15, 6, 3,256, ZSTD_btopt }, /* level 18.*/
3254
+ { 14, 15, 15, 7, 3,256, ZSTD_btopt }, /* level 19.*/
3255
+ { 14, 15, 15, 8, 3,256, ZSTD_btopt2 }, /* level 20.*/
3256
+ { 14, 15, 15, 9, 3,256, ZSTD_btopt2 }, /* level 21.*/
3257
+ { 14, 15, 15, 10, 3,256, ZSTD_btopt2 }, /* level 22.*/
3258
+ },
3259
+ };
3260
+
3261
+ /*! ZSTD_getCParams() :
3262
+ * @return ZSTD_compressionParameters structure for a selected compression level, `srcSize` and `dictSize`.
3263
+ * Size values are optional, provide 0 if not known or unused */
3264
+ ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, unsigned long long srcSize, size_t dictSize)
3265
+ {
3266
+ ZSTD_compressionParameters cp;
3267
+ size_t const addedSize = srcSize ? 0 : 500;
3268
+ U64 const rSize = srcSize+dictSize ? srcSize+dictSize+addedSize : (U64)-1;
3269
+ U32 const tableID = (rSize <= 256 KB) + (rSize <= 128 KB) + (rSize <= 16 KB); /* intentional underflow for srcSizeHint == 0 */
3270
+ if (compressionLevel <= 0) compressionLevel = ZSTD_DEFAULT_CLEVEL; /* 0 == default; no negative compressionLevel yet */
3271
+ if (compressionLevel > ZSTD_MAX_CLEVEL) compressionLevel = ZSTD_MAX_CLEVEL;
3272
+ cp = ZSTD_defaultCParameters[tableID][compressionLevel];
3273
+ if (MEM_32bits()) { /* auto-correction, for 32-bits mode */
3274
+ if (cp.windowLog > ZSTD_WINDOWLOG_MAX) cp.windowLog = ZSTD_WINDOWLOG_MAX;
3275
+ if (cp.chainLog > ZSTD_CHAINLOG_MAX) cp.chainLog = ZSTD_CHAINLOG_MAX;
3276
+ if (cp.hashLog > ZSTD_HASHLOG_MAX) cp.hashLog = ZSTD_HASHLOG_MAX;
3277
+ }
3278
+ cp = ZSTD_adjustCParams(cp, srcSize, dictSize);
3279
+ return cp;
3280
+ }
3281
+
3282
+ /*! ZSTD_getParams() :
3283
+ * same as ZSTD_getCParams(), but @return a `ZSTD_parameters` object (instead of `ZSTD_compressionParameters`).
3284
+ * All fields of `ZSTD_frameParameters` are set to default (0) */
3285
+ ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSize, size_t dictSize) {
3286
+ ZSTD_parameters params;
3287
+ ZSTD_compressionParameters const cParams = ZSTD_getCParams(compressionLevel, srcSize, dictSize);
3288
+ memset(&params, 0, sizeof(params));
3289
+ params.cParams = cParams;
3290
+ return params;
3291
+ }