zstd-ruby 1.3.3.0 → 1.3.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. checksums.yaml +5 -5
  2. data/README.md +1 -1
  3. data/ext/zstdruby/libzstd/BUCK +13 -0
  4. data/ext/zstdruby/libzstd/README.md +32 -25
  5. data/ext/zstdruby/libzstd/common/bitstream.h +1 -1
  6. data/ext/zstdruby/libzstd/common/compiler.h +25 -0
  7. data/ext/zstdruby/libzstd/common/cpu.h +216 -0
  8. data/ext/zstdruby/libzstd/common/error_private.c +1 -0
  9. data/ext/zstdruby/libzstd/common/fse.h +1 -1
  10. data/ext/zstdruby/libzstd/common/fse_decompress.c +2 -2
  11. data/ext/zstdruby/libzstd/common/huf.h +114 -89
  12. data/ext/zstdruby/libzstd/common/pool.c +46 -17
  13. data/ext/zstdruby/libzstd/common/pool.h +18 -9
  14. data/ext/zstdruby/libzstd/common/threading.h +12 -12
  15. data/ext/zstdruby/libzstd/common/zstd_errors.h +16 -7
  16. data/ext/zstdruby/libzstd/common/zstd_internal.h +4 -5
  17. data/ext/zstdruby/libzstd/compress/fse_compress.c +19 -11
  18. data/ext/zstdruby/libzstd/compress/huf_compress.c +160 -62
  19. data/ext/zstdruby/libzstd/compress/zstd_compress.c +973 -644
  20. data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +281 -34
  21. data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +80 -62
  22. data/ext/zstdruby/libzstd/compress/zstd_double_fast.h +11 -4
  23. data/ext/zstdruby/libzstd/compress/zstd_fast.c +87 -71
  24. data/ext/zstdruby/libzstd/compress/zstd_fast.h +10 -6
  25. data/ext/zstdruby/libzstd/compress/zstd_lazy.c +333 -274
  26. data/ext/zstdruby/libzstd/compress/zstd_lazy.h +33 -16
  27. data/ext/zstdruby/libzstd/compress/zstd_ldm.c +305 -359
  28. data/ext/zstdruby/libzstd/compress/zstd_ldm.h +64 -21
  29. data/ext/zstdruby/libzstd/compress/zstd_opt.c +194 -56
  30. data/ext/zstdruby/libzstd/compress/zstd_opt.h +17 -5
  31. data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +1131 -449
  32. data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +32 -16
  33. data/ext/zstdruby/libzstd/decompress/huf_decompress.c +390 -290
  34. data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +777 -439
  35. data/ext/zstdruby/libzstd/dictBuilder/cover.c +11 -8
  36. data/ext/zstdruby/libzstd/dictBuilder/zdict.c +83 -50
  37. data/ext/zstdruby/libzstd/dictBuilder/zdict.h +44 -43
  38. data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +2 -0
  39. data/ext/zstdruby/libzstd/legacy/zstd_v04.c +42 -118
  40. data/ext/zstdruby/libzstd/legacy/zstd_v06.c +2 -2
  41. data/ext/zstdruby/libzstd/legacy/zstd_v07.c +2 -2
  42. data/ext/zstdruby/libzstd/zstd.h +254 -254
  43. data/lib/zstd-ruby/version.rb +1 -1
  44. metadata +4 -3
@@ -16,11 +16,18 @@ extern "C" {
16
16
  #endif
17
17
 
18
18
  #include "mem.h" /* U32 */
19
- #include "zstd.h" /* ZSTD_CCtx, size_t */
19
+ #include "zstd_compress_internal.h" /* ZSTD_CCtx, size_t */
20
+
21
+ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
22
+ ZSTD_compressionParameters const* cParams,
23
+ void const* end);
24
+ size_t ZSTD_compressBlock_doubleFast(
25
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
26
+ ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
27
+ size_t ZSTD_compressBlock_doubleFast_extDict(
28
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
29
+ ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
20
30
 
21
- void ZSTD_fillDoubleHashTable(ZSTD_CCtx* cctx, const void* end, const U32 mls);
22
- size_t ZSTD_compressBlock_doubleFast(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
23
- size_t ZSTD_compressBlock_doubleFast_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize);
24
31
 
25
32
  #if defined (__cplusplus)
26
33
  }
@@ -12,39 +12,48 @@
12
12
  #include "zstd_fast.h"
13
13
 
14
14
 
15
- void ZSTD_fillHashTable (ZSTD_CCtx* zc, const void* end, const U32 mls)
15
+ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
16
+ ZSTD_compressionParameters const* cParams,
17
+ void const* end)
16
18
  {
17
- U32* const hashTable = zc->hashTable;
18
- U32 const hBits = zc->appliedParams.cParams.hashLog;
19
- const BYTE* const base = zc->base;
20
- const BYTE* ip = base + zc->nextToUpdate;
19
+ U32* const hashTable = ms->hashTable;
20
+ U32 const hBits = cParams->hashLog;
21
+ U32 const mls = cParams->searchLength;
22
+ const BYTE* const base = ms->window.base;
23
+ const BYTE* ip = base + ms->nextToUpdate;
21
24
  const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
22
- const size_t fastHashFillStep = 3;
25
+ const U32 fastHashFillStep = 3;
23
26
 
24
- while(ip <= iend) {
25
- hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip - base);
26
- ip += fastHashFillStep;
27
+ /* Always insert every fastHashFillStep position into the hash table.
28
+ * Insert the other positions if their hash entry is empty.
29
+ */
30
+ for (; ip + fastHashFillStep - 1 <= iend; ip += fastHashFillStep) {
31
+ U32 const current = (U32)(ip - base);
32
+ U32 i;
33
+ for (i = 0; i < fastHashFillStep; ++i) {
34
+ size_t const hash = ZSTD_hashPtr(ip + i, hBits, mls);
35
+ if (i == 0 || hashTable[hash] == 0)
36
+ hashTable[hash] = current + i;
37
+ }
27
38
  }
28
39
  }
29
40
 
30
-
31
41
  FORCE_INLINE_TEMPLATE
32
- size_t ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
33
- const void* src, size_t srcSize,
34
- const U32 mls)
42
+ size_t ZSTD_compressBlock_fast_generic(
43
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
44
+ void const* src, size_t srcSize,
45
+ U32 const hlog, U32 const stepSize, U32 const mls)
35
46
  {
36
- U32* const hashTable = cctx->hashTable;
37
- U32 const hBits = cctx->appliedParams.cParams.hashLog;
38
- seqStore_t* seqStorePtr = &(cctx->seqStore);
39
- const BYTE* const base = cctx->base;
47
+ U32* const hashTable = ms->hashTable;
48
+ const BYTE* const base = ms->window.base;
40
49
  const BYTE* const istart = (const BYTE*)src;
41
50
  const BYTE* ip = istart;
42
51
  const BYTE* anchor = istart;
43
- const U32 lowestIndex = cctx->dictLimit;
52
+ const U32 lowestIndex = ms->window.dictLimit;
44
53
  const BYTE* const lowest = base + lowestIndex;
45
54
  const BYTE* const iend = istart + srcSize;
46
55
  const BYTE* const ilimit = iend - HASH_READ_SIZE;
47
- U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
56
+ U32 offset_1=rep[0], offset_2=rep[1];
48
57
  U32 offsetSaved = 0;
49
58
 
50
59
  /* init */
@@ -57,7 +66,7 @@ size_t ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
57
66
  /* Main Search Loop */
58
67
  while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
59
68
  size_t mLength;
60
- size_t const h = ZSTD_hashPtr(ip, hBits, mls);
69
+ size_t const h = ZSTD_hashPtr(ip, hlog, mls);
61
70
  U32 const current = (U32)(ip-base);
62
71
  U32 const matchIndex = hashTable[h];
63
72
  const BYTE* match = base + matchIndex;
@@ -66,21 +75,21 @@ size_t ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
66
75
  if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
67
76
  mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
68
77
  ip++;
69
- ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
78
+ ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
70
79
  } else {
71
- U32 offset;
72
- if ( (matchIndex <= lowestIndex) || (MEM_read32(match) != MEM_read32(ip)) ) {
73
- ip += ((ip-anchor) >> g_searchStrength) + 1;
80
+ if ( (matchIndex <= lowestIndex)
81
+ || (MEM_read32(match) != MEM_read32(ip)) ) {
82
+ assert(stepSize >= 1);
83
+ ip += ((ip-anchor) >> kSearchStrength) + stepSize;
74
84
  continue;
75
85
  }
76
86
  mLength = ZSTD_count(ip+4, match+4, iend) + 4;
77
- offset = (U32)(ip-match);
78
- while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
79
- offset_2 = offset_1;
80
- offset_1 = offset;
81
-
82
- ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
83
- }
87
+ { U32 const offset = (U32)(ip-match);
88
+ while (((ip>anchor) & (match>lowest)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
89
+ offset_2 = offset_1;
90
+ offset_1 = offset;
91
+ ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
92
+ } }
84
93
 
85
94
  /* match found */
86
95
  ip += mLength;
@@ -88,8 +97,8 @@ size_t ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
88
97
 
89
98
  if (ip <= ilimit) {
90
99
  /* Fill Table */
91
- hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2; /* here because current+2 could be > iend-8 */
92
- hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base);
100
+ hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; /* here because current+2 could be > iend-8 */
101
+ hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
93
102
  /* check immediate repcode */
94
103
  while ( (ip <= ilimit)
95
104
  && ( (offset_2>0)
@@ -97,65 +106,67 @@ size_t ZSTD_compressBlock_fast_generic(ZSTD_CCtx* cctx,
97
106
  /* store sequence */
98
107
  size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
99
108
  { U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
100
- hashTable[ZSTD_hashPtr(ip, hBits, mls)] = (U32)(ip-base);
101
- ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, rLength-MINMATCH);
109
+ hashTable[ZSTD_hashPtr(ip, hlog, mls)] = (U32)(ip-base);
110
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
102
111
  ip += rLength;
103
112
  anchor = ip;
104
113
  continue; /* faster when present ... (?) */
105
114
  } } }
106
115
 
107
116
  /* save reps for next block */
108
- seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : offsetSaved;
109
- seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : offsetSaved;
117
+ rep[0] = offset_1 ? offset_1 : offsetSaved;
118
+ rep[1] = offset_2 ? offset_2 : offsetSaved;
110
119
 
111
120
  /* Return the last literals size */
112
121
  return iend - anchor;
113
122
  }
114
123
 
115
124
 
116
- size_t ZSTD_compressBlock_fast(ZSTD_CCtx* ctx,
117
- const void* src, size_t srcSize)
125
+ size_t ZSTD_compressBlock_fast(
126
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
127
+ ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
118
128
  {
119
- const U32 mls = ctx->appliedParams.cParams.searchLength;
129
+ U32 const hlog = cParams->hashLog;
130
+ U32 const mls = cParams->searchLength;
131
+ U32 const stepSize = cParams->targetLength;
120
132
  switch(mls)
121
133
  {
122
134
  default: /* includes case 3 */
123
135
  case 4 :
124
- return ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 4);
136
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 4);
125
137
  case 5 :
126
- return ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 5);
138
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 5);
127
139
  case 6 :
128
- return ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 6);
140
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 6);
129
141
  case 7 :
130
- return ZSTD_compressBlock_fast_generic(ctx, src, srcSize, 7);
142
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 7);
131
143
  }
132
144
  }
133
145
 
134
146
 
135
- static size_t ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
136
- const void* src, size_t srcSize,
137
- const U32 mls)
147
+ static size_t ZSTD_compressBlock_fast_extDict_generic(
148
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
149
+ void const* src, size_t srcSize,
150
+ U32 const hlog, U32 const stepSize, U32 const mls)
138
151
  {
139
- U32* hashTable = ctx->hashTable;
140
- const U32 hBits = ctx->appliedParams.cParams.hashLog;
141
- seqStore_t* seqStorePtr = &(ctx->seqStore);
142
- const BYTE* const base = ctx->base;
143
- const BYTE* const dictBase = ctx->dictBase;
152
+ U32* hashTable = ms->hashTable;
153
+ const BYTE* const base = ms->window.base;
154
+ const BYTE* const dictBase = ms->window.dictBase;
144
155
  const BYTE* const istart = (const BYTE*)src;
145
156
  const BYTE* ip = istart;
146
157
  const BYTE* anchor = istart;
147
- const U32 lowestIndex = ctx->lowLimit;
158
+ const U32 lowestIndex = ms->window.lowLimit;
148
159
  const BYTE* const dictStart = dictBase + lowestIndex;
149
- const U32 dictLimit = ctx->dictLimit;
160
+ const U32 dictLimit = ms->window.dictLimit;
150
161
  const BYTE* const lowPrefixPtr = base + dictLimit;
151
162
  const BYTE* const dictEnd = dictBase + dictLimit;
152
163
  const BYTE* const iend = istart + srcSize;
153
164
  const BYTE* const ilimit = iend - 8;
154
- U32 offset_1=seqStorePtr->rep[0], offset_2=seqStorePtr->rep[1];
165
+ U32 offset_1=rep[0], offset_2=rep[1];
155
166
 
156
167
  /* Search Loop */
157
168
  while (ip < ilimit) { /* < instead of <=, because (ip+1) */
158
- const size_t h = ZSTD_hashPtr(ip, hBits, mls);
169
+ const size_t h = ZSTD_hashPtr(ip, hlog, mls);
159
170
  const U32 matchIndex = hashTable[h];
160
171
  const BYTE* matchBase = matchIndex < dictLimit ? dictBase : base;
161
172
  const BYTE* match = matchBase + matchIndex;
@@ -171,11 +182,12 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
171
182
  const BYTE* repMatchEnd = repIndex < dictLimit ? dictEnd : iend;
172
183
  mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, lowPrefixPtr) + 4;
173
184
  ip++;
174
- ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, 0, mLength-MINMATCH);
185
+ ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
175
186
  } else {
176
187
  if ( (matchIndex < lowestIndex) ||
177
188
  (MEM_read32(match) != MEM_read32(ip)) ) {
178
- ip += ((ip-anchor) >> g_searchStrength) + 1;
189
+ assert(stepSize >= 1);
190
+ ip += ((ip-anchor) >> kSearchStrength) + stepSize;
179
191
  continue;
180
192
  }
181
193
  { const BYTE* matchEnd = matchIndex < dictLimit ? dictEnd : iend;
@@ -186,7 +198,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
186
198
  offset = current - matchIndex;
187
199
  offset_2 = offset_1;
188
200
  offset_1 = offset;
189
- ZSTD_storeSeq(seqStorePtr, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
201
+ ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
190
202
  } }
191
203
 
192
204
  /* found a match : store it */
@@ -195,8 +207,8 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
195
207
 
196
208
  if (ip <= ilimit) {
197
209
  /* Fill Table */
198
- hashTable[ZSTD_hashPtr(base+current+2, hBits, mls)] = current+2;
199
- hashTable[ZSTD_hashPtr(ip-2, hBits, mls)] = (U32)(ip-2-base);
210
+ hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2;
211
+ hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
200
212
  /* check immediate repcode */
201
213
  while (ip <= ilimit) {
202
214
  U32 const current2 = (U32)(ip-base);
@@ -207,8 +219,8 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
207
219
  const BYTE* const repEnd2 = repIndex2 < dictLimit ? dictEnd : iend;
208
220
  size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, lowPrefixPtr) + 4;
209
221
  U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
210
- ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, repLength2-MINMATCH);
211
- hashTable[ZSTD_hashPtr(ip, hBits, mls)] = current2;
222
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
223
+ hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
212
224
  ip += repLength2;
213
225
  anchor = ip;
214
226
  continue;
@@ -217,27 +229,31 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(ZSTD_CCtx* ctx,
217
229
  } } }
218
230
 
219
231
  /* save reps for next block */
220
- seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2;
232
+ rep[0] = offset_1;
233
+ rep[1] = offset_2;
221
234
 
222
235
  /* Return the last literals size */
223
236
  return iend - anchor;
224
237
  }
225
238
 
226
239
 
227
- size_t ZSTD_compressBlock_fast_extDict(ZSTD_CCtx* ctx,
228
- const void* src, size_t srcSize)
240
+ size_t ZSTD_compressBlock_fast_extDict(
241
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
242
+ ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
229
243
  {
230
- U32 const mls = ctx->appliedParams.cParams.searchLength;
244
+ U32 const hlog = cParams->hashLog;
245
+ U32 const mls = cParams->searchLength;
246
+ U32 const stepSize = cParams->targetLength;
231
247
  switch(mls)
232
248
  {
233
249
  default: /* includes case 3 */
234
250
  case 4 :
235
- return ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 4);
251
+ return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 4);
236
252
  case 5 :
237
- return ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 5);
253
+ return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 5);
238
254
  case 6 :
239
- return ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 6);
255
+ return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 6);
240
256
  case 7 :
241
- return ZSTD_compressBlock_fast_extDict_generic(ctx, src, srcSize, 7);
257
+ return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, hlog, stepSize, 7);
242
258
  }
243
259
  }
@@ -16,13 +16,17 @@ extern "C" {
16
16
  #endif
17
17
 
18
18
  #include "mem.h" /* U32 */
19
- #include "zstd.h" /* ZSTD_CCtx, size_t */
19
+ #include "zstd_compress_internal.h"
20
20
 
21
- void ZSTD_fillHashTable(ZSTD_CCtx* zc, const void* end, const U32 mls);
22
- size_t ZSTD_compressBlock_fast(ZSTD_CCtx* ctx,
23
- const void* src, size_t srcSize);
24
- size_t ZSTD_compressBlock_fast_extDict(ZSTD_CCtx* ctx,
25
- const void* src, size_t srcSize);
21
+ void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
22
+ ZSTD_compressionParameters const* cParams,
23
+ void const* end);
24
+ size_t ZSTD_compressBlock_fast(
25
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
26
+ ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
27
+ size_t ZSTD_compressBlock_fast_extDict(
28
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
29
+ ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
26
30
 
27
31
  #if defined (__cplusplus)
28
32
  }
@@ -15,76 +15,90 @@
15
15
  /*-*************************************
16
16
  * Binary Tree search
17
17
  ***************************************/
18
- /** ZSTD_insertBt1() : add one or multiple positions to tree.
19
- * ip : assumed <= iend-8 .
20
- * @return : nb of positions added */
21
- static U32 ZSTD_insertBt1(ZSTD_CCtx* zc,
22
- const BYTE* const ip, const BYTE* const iend,
23
- U32 nbCompares, U32 const mls, U32 const extDict)
18
+
19
+ void ZSTD_updateDUBT(
20
+ ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
21
+ const BYTE* ip, const BYTE* iend,
22
+ U32 mls)
24
23
  {
25
- U32* const hashTable = zc->hashTable;
26
- U32 const hashLog = zc->appliedParams.cParams.hashLog;
27
- size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
28
- U32* const bt = zc->chainTable;
29
- U32 const btLog = zc->appliedParams.cParams.chainLog - 1;
24
+ U32* const hashTable = ms->hashTable;
25
+ U32 const hashLog = cParams->hashLog;
26
+
27
+ U32* const bt = ms->chainTable;
28
+ U32 const btLog = cParams->chainLog - 1;
29
+ U32 const btMask = (1 << btLog) - 1;
30
+
31
+ const BYTE* const base = ms->window.base;
32
+ U32 const target = (U32)(ip - base);
33
+ U32 idx = ms->nextToUpdate;
34
+
35
+ if (idx != target)
36
+ DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)",
37
+ idx, target, ms->window.dictLimit);
38
+ assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */
39
+ (void)iend;
40
+
41
+ assert(idx >= ms->window.dictLimit); /* condition for valid base+idx */
42
+ for ( ; idx < target ; idx++) {
43
+ size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); /* assumption : ip + 8 <= iend */
44
+ U32 const matchIndex = hashTable[h];
45
+
46
+ U32* const nextCandidatePtr = bt + 2*(idx&btMask);
47
+ U32* const sortMarkPtr = nextCandidatePtr + 1;
48
+
49
+ DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx);
50
+ hashTable[h] = idx; /* Update Hash Table */
51
+ *nextCandidatePtr = matchIndex; /* update BT like a chain */
52
+ *sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;
53
+ }
54
+ ms->nextToUpdate = target;
55
+ }
56
+
57
+
58
+ /** ZSTD_insertDUBT1() :
59
+ * sort one already inserted but unsorted position
60
+ * assumption : current >= btlow == (current - btmask)
61
+ * doesn't fail */
62
+ static void ZSTD_insertDUBT1(
63
+ ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
64
+ U32 current, const BYTE* inputEnd,
65
+ U32 nbCompares, U32 btLow, int extDict)
66
+ {
67
+ U32* const bt = ms->chainTable;
68
+ U32 const btLog = cParams->chainLog - 1;
30
69
  U32 const btMask = (1 << btLog) - 1;
31
- U32 matchIndex = hashTable[h];
32
70
  size_t commonLengthSmaller=0, commonLengthLarger=0;
33
- const BYTE* const base = zc->base;
34
- const BYTE* const dictBase = zc->dictBase;
35
- const U32 dictLimit = zc->dictLimit;
71
+ const BYTE* const base = ms->window.base;
72
+ const BYTE* const dictBase = ms->window.dictBase;
73
+ const U32 dictLimit = ms->window.dictLimit;
74
+ const BYTE* const ip = (current>=dictLimit) ? base + current : dictBase + current;
75
+ const BYTE* const iend = (current>=dictLimit) ? inputEnd : dictBase + dictLimit;
36
76
  const BYTE* const dictEnd = dictBase + dictLimit;
37
77
  const BYTE* const prefixStart = base + dictLimit;
38
78
  const BYTE* match;
39
- const U32 current = (U32)(ip-base);
40
- const U32 btLow = btMask >= current ? 0 : current - btMask;
41
79
  U32* smallerPtr = bt + 2*(current&btMask);
42
80
  U32* largerPtr = smallerPtr + 1;
81
+ U32 matchIndex = *smallerPtr;
43
82
  U32 dummy32; /* to be nullified at the end */
44
- U32 const windowLow = zc->lowLimit;
45
- U32 matchEndIdx = current+8+1;
46
- size_t bestLength = 8;
47
- #ifdef ZSTD_C_PREDICT
48
- U32 predictedSmall = *(bt + 2*((current-1)&btMask) + 0);
49
- U32 predictedLarge = *(bt + 2*((current-1)&btMask) + 1);
50
- predictedSmall += (predictedSmall>0);
51
- predictedLarge += (predictedLarge>0);
52
- #endif /* ZSTD_C_PREDICT */
53
-
54
- DEBUGLOG(8, "ZSTD_insertBt1 (%u)", current);
83
+ U32 const windowLow = ms->window.lowLimit;
55
84
 
56
- assert(ip <= iend-8); /* required for h calculation */
57
- hashTable[h] = current; /* Update Hash Table */
85
+ DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
86
+ current, dictLimit, windowLow);
87
+ assert(current >= btLow);
88
+ assert(ip < iend); /* condition for ZSTD_count */
58
89
 
59
90
  while (nbCompares-- && (matchIndex > windowLow)) {
60
91
  U32* const nextPtr = bt + 2*(matchIndex & btMask);
61
92
  size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
62
93
  assert(matchIndex < current);
63
94
 
64
- #ifdef ZSTD_C_PREDICT /* note : can create issues when hlog small <= 11 */
65
- const U32* predictPtr = bt + 2*((matchIndex-1) & btMask); /* written this way, as bt is a roll buffer */
66
- if (matchIndex == predictedSmall) {
67
- /* no need to check length, result known */
68
- *smallerPtr = matchIndex;
69
- if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
70
- smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
71
- matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
72
- predictedSmall = predictPtr[1] + (predictPtr[1]>0);
73
- continue;
74
- }
75
- if (matchIndex == predictedLarge) {
76
- *largerPtr = matchIndex;
77
- if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
78
- largerPtr = nextPtr;
79
- matchIndex = nextPtr[0];
80
- predictedLarge = predictPtr[0] + (predictPtr[0]>0);
81
- continue;
82
- }
83
- #endif
84
-
85
- if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
86
- assert(matchIndex+matchLength >= dictLimit); /* might be wrong if extDict is incorrectly set to 0 */
87
- match = base + matchIndex;
95
+ if ( (!extDict)
96
+ || (matchIndex+matchLength >= dictLimit) /* both in current segment*/
97
+ || (current < dictLimit) /* both in extDict */) {
98
+ const BYTE* const mBase = !extDict || ((matchIndex+matchLength) >= dictLimit) ? base : dictBase;
99
+ assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */
100
+ || (current < dictLimit) );
101
+ match = mBase + matchIndex;
88
102
  matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
89
103
  } else {
90
104
  match = dictBase + matchIndex;
@@ -93,11 +107,8 @@ static U32 ZSTD_insertBt1(ZSTD_CCtx* zc,
93
107
  match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
94
108
  }
95
109
 
96
- if (matchLength > bestLength) {
97
- bestLength = matchLength;
98
- if (matchLength > matchEndIdx - matchIndex)
99
- matchEndIdx = matchIndex + (U32)matchLength;
100
- }
110
+ DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
111
+ current, matchIndex, (U32)matchLength);
101
112
 
102
113
  if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
103
114
  break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
@@ -108,6 +119,8 @@ static U32 ZSTD_insertBt1(ZSTD_CCtx* zc,
108
119
  *smallerPtr = matchIndex; /* update smaller idx */
109
120
  commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
110
121
  if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */
122
+ DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u",
123
+ matchIndex, btLow, nextPtr[1]);
111
124
  smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */
112
125
  matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */
113
126
  } else {
@@ -115,184 +128,205 @@ static U32 ZSTD_insertBt1(ZSTD_CCtx* zc,
115
128
  *largerPtr = matchIndex;
116
129
  commonLengthLarger = matchLength;
117
130
  if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */
131
+ DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u",
132
+ matchIndex, btLow, nextPtr[0]);
118
133
  largerPtr = nextPtr;
119
134
  matchIndex = nextPtr[0];
120
135
  } }
121
136
 
122
137
  *smallerPtr = *largerPtr = 0;
123
- if (bestLength > 384) return MIN(192, (U32)(bestLength - 384)); /* speed optimization */
124
- assert(matchEndIdx > current + 8);
125
- return matchEndIdx - (current + 8);
126
138
  }
127
139
 
128
- FORCE_INLINE_TEMPLATE
129
- void ZSTD_updateTree_internal(ZSTD_CCtx* zc,
130
- const BYTE* const ip, const BYTE* const iend,
131
- const U32 nbCompares, const U32 mls, const U32 extDict)
132
- {
133
- const BYTE* const base = zc->base;
134
- U32 const target = (U32)(ip - base);
135
- U32 idx = zc->nextToUpdate;
136
- DEBUGLOG(7, "ZSTD_updateTree_internal, from %u to %u (extDict:%u)",
137
- idx, target, extDict);
138
-
139
- while(idx < target)
140
- idx += ZSTD_insertBt1(zc, base+idx, iend, nbCompares, mls, extDict);
141
- zc->nextToUpdate = target;
142
- }
143
140
 
144
- void ZSTD_updateTree(ZSTD_CCtx* zc,
145
- const BYTE* const ip, const BYTE* const iend,
146
- const U32 nbCompares, const U32 mls)
141
+ static size_t ZSTD_DUBT_findBestMatch (
142
+ ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
143
+ const BYTE* const ip, const BYTE* const iend,
144
+ size_t* offsetPtr,
145
+ U32 const mls,
146
+ U32 const extDict)
147
147
  {
148
- ZSTD_updateTree_internal(zc, ip, iend, nbCompares, mls, 0 /*extDict*/);
149
- }
150
-
151
- void ZSTD_updateTree_extDict(ZSTD_CCtx* zc,
152
- const BYTE* const ip, const BYTE* const iend,
153
- const U32 nbCompares, const U32 mls)
154
- {
155
- ZSTD_updateTree_internal(zc, ip, iend, nbCompares, mls, 1 /*extDict*/);
156
- }
148
+ U32* const hashTable = ms->hashTable;
149
+ U32 const hashLog = cParams->hashLog;
150
+ size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
151
+ U32 matchIndex = hashTable[h];
157
152
 
153
+ const BYTE* const base = ms->window.base;
154
+ U32 const current = (U32)(ip-base);
155
+ U32 const windowLow = ms->window.lowLimit;
158
156
 
159
- static size_t ZSTD_insertBtAndFindBestMatch (
160
- ZSTD_CCtx* zc,
161
- const BYTE* const ip, const BYTE* const iend,
162
- size_t* offsetPtr,
163
- U32 nbCompares, const U32 mls,
164
- U32 extDict)
165
- {
166
- U32* const hashTable = zc->hashTable;
167
- U32 const hashLog = zc->appliedParams.cParams.hashLog;
168
- size_t const h = ZSTD_hashPtr(ip, hashLog, mls);
169
- U32* const bt = zc->chainTable;
170
- U32 const btLog = zc->appliedParams.cParams.chainLog - 1;
157
+ U32* const bt = ms->chainTable;
158
+ U32 const btLog = cParams->chainLog - 1;
171
159
  U32 const btMask = (1 << btLog) - 1;
172
- U32 matchIndex = hashTable[h];
173
- size_t commonLengthSmaller=0, commonLengthLarger=0;
174
- const BYTE* const base = zc->base;
175
- const BYTE* const dictBase = zc->dictBase;
176
- const U32 dictLimit = zc->dictLimit;
177
- const BYTE* const dictEnd = dictBase + dictLimit;
178
- const BYTE* const prefixStart = base + dictLimit;
179
- const U32 current = (U32)(ip-base);
180
- const U32 btLow = btMask >= current ? 0 : current - btMask;
181
- const U32 windowLow = zc->lowLimit;
182
- U32* smallerPtr = bt + 2*(current&btMask);
183
- U32* largerPtr = bt + 2*(current&btMask) + 1;
184
- U32 matchEndIdx = current+8+1;
185
- U32 dummy32; /* to be nullified at the end */
186
- size_t bestLength = 0;
160
+ U32 const btLow = (btMask >= current) ? 0 : current - btMask;
161
+ U32 const unsortLimit = MAX(btLow, windowLow);
162
+
163
+ U32* nextCandidate = bt + 2*(matchIndex&btMask);
164
+ U32* unsortedMark = bt + 2*(matchIndex&btMask) + 1;
165
+ U32 nbCompares = 1U << cParams->searchLog;
166
+ U32 nbCandidates = nbCompares;
167
+ U32 previousCandidate = 0;
187
168
 
169
+ DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", current);
188
170
  assert(ip <= iend-8); /* required for h calculation */
189
- hashTable[h] = current; /* Update Hash Table */
190
171
 
191
- while (nbCompares-- && (matchIndex > windowLow)) {
192
- U32* const nextPtr = bt + 2*(matchIndex & btMask);
193
- size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
194
- const BYTE* match;
172
+ /* reach end of unsorted candidates list */
173
+ while ( (matchIndex > unsortLimit)
174
+ && (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)
175
+ && (nbCandidates > 1) ) {
176
+ DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",
177
+ matchIndex);
178
+ *unsortedMark = previousCandidate;
179
+ previousCandidate = matchIndex;
180
+ matchIndex = *nextCandidate;
181
+ nextCandidate = bt + 2*(matchIndex&btMask);
182
+ unsortedMark = bt + 2*(matchIndex&btMask) + 1;
183
+ nbCandidates --;
184
+ }
195
185
 
196
- if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
197
- match = base + matchIndex;
198
- matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
199
- } else {
200
- match = dictBase + matchIndex;
201
- matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
202
- if (matchIndex+matchLength >= dictLimit)
203
- match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
204
- }
186
+ if ( (matchIndex > unsortLimit)
187
+ && (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {
188
+ DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",
189
+ matchIndex);
190
+ *nextCandidate = *unsortedMark = 0; /* nullify next candidate if it's still unsorted (note : simplification, detrimental to compression ratio, beneficial for speed) */
191
+ }
192
+
193
+ /* batch sort stacked candidates */
194
+ matchIndex = previousCandidate;
195
+ while (matchIndex) { /* will end on matchIndex == 0 */
196
+ U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;
197
+ U32 const nextCandidateIdx = *nextCandidateIdxPtr;
198
+ ZSTD_insertDUBT1(ms, cParams, matchIndex, iend,
199
+ nbCandidates, unsortLimit, extDict);
200
+ matchIndex = nextCandidateIdx;
201
+ nbCandidates++;
202
+ }
205
203
 
206
- if (matchLength > bestLength) {
207
- if (matchLength > matchEndIdx - matchIndex)
208
- matchEndIdx = matchIndex + (U32)matchLength;
209
- if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
210
- bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
211
- if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
212
- break; /* drop, to guarantee consistency (miss a little bit of compression) */
204
+ /* find longest match */
205
+ { size_t commonLengthSmaller=0, commonLengthLarger=0;
206
+ const BYTE* const dictBase = ms->window.dictBase;
207
+ const U32 dictLimit = ms->window.dictLimit;
208
+ const BYTE* const dictEnd = dictBase + dictLimit;
209
+ const BYTE* const prefixStart = base + dictLimit;
210
+ U32* smallerPtr = bt + 2*(current&btMask);
211
+ U32* largerPtr = bt + 2*(current&btMask) + 1;
212
+ U32 matchEndIdx = current+8+1;
213
+ U32 dummy32; /* to be nullified at the end */
214
+ size_t bestLength = 0;
215
+
216
+ matchIndex = hashTable[h];
217
+ hashTable[h] = current; /* Update Hash Table */
218
+
219
+ while (nbCompares-- && (matchIndex > windowLow)) {
220
+ U32* const nextPtr = bt + 2*(matchIndex & btMask);
221
+ size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
222
+ const BYTE* match;
223
+
224
+ if ((!extDict) || (matchIndex+matchLength >= dictLimit)) {
225
+ match = base + matchIndex;
226
+ matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
227
+ } else {
228
+ match = dictBase + matchIndex;
229
+ matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);
230
+ if (matchIndex+matchLength >= dictLimit)
231
+ match = base + matchIndex; /* to prepare for next usage of match[matchLength] */
213
232
  }
214
- }
215
233
 
216
- if (match[matchLength] < ip[matchLength]) {
217
- /* match is smaller than current */
218
- *smallerPtr = matchIndex; /* update smaller idx */
219
- commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
220
- if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
221
- smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
222
- matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
223
- } else {
224
- /* match is larger than current */
225
- *largerPtr = matchIndex;
226
- commonLengthLarger = matchLength;
227
- if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
228
- largerPtr = nextPtr;
229
- matchIndex = nextPtr[0];
230
- } }
234
+ if (matchLength > bestLength) {
235
+ if (matchLength > matchEndIdx - matchIndex)
236
+ matchEndIdx = matchIndex + (U32)matchLength;
237
+ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
238
+ bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
239
+ if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
240
+ break; /* drop, to guarantee consistency (miss a little bit of compression) */
241
+ }
242
+ }
231
243
 
232
- *smallerPtr = *largerPtr = 0;
244
+ if (match[matchLength] < ip[matchLength]) {
245
+ /* match is smaller than current */
246
+ *smallerPtr = matchIndex; /* update smaller idx */
247
+ commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */
248
+ if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */
249
+ smallerPtr = nextPtr+1; /* new "smaller" => larger of match */
250
+ matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */
251
+ } else {
252
+ /* match is larger than current */
253
+ *largerPtr = matchIndex;
254
+ commonLengthLarger = matchLength;
255
+ if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */
256
+ largerPtr = nextPtr;
257
+ matchIndex = nextPtr[0];
258
+ } }
259
+
260
+ *smallerPtr = *largerPtr = 0;
233
261
 
234
- assert(matchEndIdx > current+8);
235
- zc->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
236
- return bestLength;
262
+ assert(matchEndIdx > current+8); /* ensure nextToUpdate is increased */
263
+ ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
264
+ if (bestLength >= MINMATCH) {
265
+ U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
266
+ DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
267
+ current, (U32)bestLength, (U32)*offsetPtr, mIndex);
268
+ }
269
+ return bestLength;
270
+ }
237
271
  }
238
272
 
239
273
 
240
274
  /** ZSTD_BtFindBestMatch() : Tree updater, providing best match */
241
275
  static size_t ZSTD_BtFindBestMatch (
242
- ZSTD_CCtx* zc,
276
+ ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
243
277
  const BYTE* const ip, const BYTE* const iLimit,
244
278
  size_t* offsetPtr,
245
- const U32 maxNbAttempts, const U32 mls)
279
+ const U32 mls /* template */)
246
280
  {
247
- if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */
248
- ZSTD_updateTree(zc, ip, iLimit, maxNbAttempts, mls);
249
- return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 0);
281
+ DEBUGLOG(7, "ZSTD_BtFindBestMatch");
282
+ if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
283
+ ZSTD_updateDUBT(ms, cParams, ip, iLimit, mls);
284
+ return ZSTD_DUBT_findBestMatch(ms, cParams, ip, iLimit, offsetPtr, mls, 0);
250
285
  }
251
286
 
252
287
 
253
288
  static size_t ZSTD_BtFindBestMatch_selectMLS (
254
- ZSTD_CCtx* zc, /* Index table will be updated */
289
+ ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
255
290
  const BYTE* ip, const BYTE* const iLimit,
256
- size_t* offsetPtr,
257
- const U32 maxNbAttempts, const U32 matchLengthSearch)
291
+ size_t* offsetPtr)
258
292
  {
259
- switch(matchLengthSearch)
293
+ switch(cParams->searchLength)
260
294
  {
261
295
  default : /* includes case 3 */
262
- case 4 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
263
- case 5 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
296
+ case 4 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 4);
297
+ case 5 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 5);
264
298
  case 7 :
265
- case 6 : return ZSTD_BtFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
299
+ case 6 : return ZSTD_BtFindBestMatch(ms, cParams, ip, iLimit, offsetPtr, 6);
266
300
  }
267
301
  }
268
302
 
269
303
 
270
304
  /** Tree updater, providing best match */
271
305
  static size_t ZSTD_BtFindBestMatch_extDict (
272
- ZSTD_CCtx* zc,
306
+ ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
273
307
  const BYTE* const ip, const BYTE* const iLimit,
274
308
  size_t* offsetPtr,
275
- const U32 maxNbAttempts, const U32 mls)
309
+ const U32 mls)
276
310
  {
277
- if (ip < zc->base + zc->nextToUpdate) return 0; /* skipped area */
278
- ZSTD_updateTree_extDict(zc, ip, iLimit, maxNbAttempts, mls);
279
- return ZSTD_insertBtAndFindBestMatch(zc, ip, iLimit, offsetPtr, maxNbAttempts, mls, 1);
311
+ DEBUGLOG(7, "ZSTD_BtFindBestMatch_extDict");
312
+ if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
313
+ ZSTD_updateDUBT(ms, cParams, ip, iLimit, mls);
314
+ return ZSTD_DUBT_findBestMatch(ms, cParams, ip, iLimit, offsetPtr, mls, 1);
280
315
  }
281
316
 
282
317
 
283
318
  static size_t ZSTD_BtFindBestMatch_selectMLS_extDict (
284
- ZSTD_CCtx* zc, /* Index table will be updated */
319
+ ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
285
320
  const BYTE* ip, const BYTE* const iLimit,
286
- size_t* offsetPtr,
287
- const U32 maxNbAttempts, const U32 matchLengthSearch)
321
+ size_t* offsetPtr)
288
322
  {
289
- switch(matchLengthSearch)
323
+ switch(cParams->searchLength)
290
324
  {
291
325
  default : /* includes case 3 */
292
- case 4 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4);
293
- case 5 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5);
326
+ case 4 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 4);
327
+ case 5 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 5);
294
328
  case 7 :
295
- case 6 : return ZSTD_BtFindBestMatch_extDict(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6);
329
+ case 6 : return ZSTD_BtFindBestMatch_extDict(ms, cParams, ip, iLimit, offsetPtr, 6);
296
330
  }
297
331
  }
298
332
 
@@ -305,15 +339,17 @@ static size_t ZSTD_BtFindBestMatch_selectMLS_extDict (
305
339
 
306
340
  /* Update chains up to ip (excluded)
307
341
  Assumption : always within prefix (i.e. not within extDict) */
308
- U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls)
342
+ static U32 ZSTD_insertAndFindFirstIndex_internal(
343
+ ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
344
+ const BYTE* ip, U32 const mls)
309
345
  {
310
- U32* const hashTable = zc->hashTable;
311
- const U32 hashLog = zc->appliedParams.cParams.hashLog;
312
- U32* const chainTable = zc->chainTable;
313
- const U32 chainMask = (1 << zc->appliedParams.cParams.chainLog) - 1;
314
- const BYTE* const base = zc->base;
346
+ U32* const hashTable = ms->hashTable;
347
+ const U32 hashLog = cParams->hashLog;
348
+ U32* const chainTable = ms->chainTable;
349
+ const U32 chainMask = (1 << cParams->chainLog) - 1;
350
+ const BYTE* const base = ms->window.base;
315
351
  const U32 target = (U32)(ip - base);
316
- U32 idx = zc->nextToUpdate;
352
+ U32 idx = ms->nextToUpdate;
317
353
 
318
354
  while(idx < target) { /* catch up */
319
355
  size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
@@ -322,35 +358,42 @@ U32 ZSTD_insertAndFindFirstIndex (ZSTD_CCtx* zc, const BYTE* ip, U32 mls)
322
358
  idx++;
323
359
  }
324
360
 
325
- zc->nextToUpdate = target;
361
+ ms->nextToUpdate = target;
326
362
  return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
327
363
  }
328
364
 
365
+ U32 ZSTD_insertAndFindFirstIndex(
366
+ ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
367
+ const BYTE* ip)
368
+ {
369
+ return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, cParams->searchLength);
370
+ }
371
+
329
372
 
330
373
  /* inlining is important to hardwire a hot branch (template emulation) */
331
374
  FORCE_INLINE_TEMPLATE
332
375
  size_t ZSTD_HcFindBestMatch_generic (
333
- ZSTD_CCtx* zc, /* Index table will be updated */
376
+ ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
334
377
  const BYTE* const ip, const BYTE* const iLimit,
335
378
  size_t* offsetPtr,
336
- const U32 maxNbAttempts, const U32 mls, const U32 extDict)
379
+ const U32 mls, const U32 extDict)
337
380
  {
338
- U32* const chainTable = zc->chainTable;
339
- const U32 chainSize = (1 << zc->appliedParams.cParams.chainLog);
381
+ U32* const chainTable = ms->chainTable;
382
+ const U32 chainSize = (1 << cParams->chainLog);
340
383
  const U32 chainMask = chainSize-1;
341
- const BYTE* const base = zc->base;
342
- const BYTE* const dictBase = zc->dictBase;
343
- const U32 dictLimit = zc->dictLimit;
384
+ const BYTE* const base = ms->window.base;
385
+ const BYTE* const dictBase = ms->window.dictBase;
386
+ const U32 dictLimit = ms->window.dictLimit;
344
387
  const BYTE* const prefixStart = base + dictLimit;
345
388
  const BYTE* const dictEnd = dictBase + dictLimit;
346
- const U32 lowLimit = zc->lowLimit;
389
+ const U32 lowLimit = ms->window.lowLimit;
347
390
  const U32 current = (U32)(ip-base);
348
391
  const U32 minChain = current > chainSize ? current - chainSize : 0;
349
- int nbAttempts=maxNbAttempts;
392
+ U32 nbAttempts = 1U << cParams->searchLog;
350
393
  size_t ml=4-1;
351
394
 
352
395
  /* HC4 match finder */
353
- U32 matchIndex = ZSTD_insertAndFindFirstIndex (zc, ip, mls);
396
+ U32 matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
354
397
 
355
398
  for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {
356
399
  size_t currentMl=0;
@@ -381,35 +424,33 @@ size_t ZSTD_HcFindBestMatch_generic (
381
424
 
382
425
 
383
426
  FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
384
- ZSTD_CCtx* zc,
427
+ ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
385
428
  const BYTE* ip, const BYTE* const iLimit,
386
- size_t* offsetPtr,
387
- const U32 maxNbAttempts, const U32 matchLengthSearch)
429
+ size_t* offsetPtr)
388
430
  {
389
- switch(matchLengthSearch)
431
+ switch(cParams->searchLength)
390
432
  {
391
433
  default : /* includes case 3 */
392
- case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 0);
393
- case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 0);
434
+ case 4 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 4, 0);
435
+ case 5 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 5, 0);
394
436
  case 7 :
395
- case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 0);
437
+ case 6 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 6, 0);
396
438
  }
397
439
  }
398
440
 
399
441
 
400
442
  FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
401
- ZSTD_CCtx* const zc,
443
+ ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
402
444
  const BYTE* ip, const BYTE* const iLimit,
403
- size_t* const offsetPtr,
404
- U32 const maxNbAttempts, U32 const matchLengthSearch)
445
+ size_t* const offsetPtr)
405
446
  {
406
- switch(matchLengthSearch)
447
+ switch(cParams->searchLength)
407
448
  {
408
449
  default : /* includes case 3 */
409
- case 4 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 4, 1);
410
- case 5 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 5, 1);
450
+ case 4 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 4, 1);
451
+ case 5 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 5, 1);
411
452
  case 7 :
412
- case 6 : return ZSTD_HcFindBestMatch_generic(zc, ip, iLimit, offsetPtr, maxNbAttempts, 6, 1);
453
+ case 6 : return ZSTD_HcFindBestMatch_generic(ms, cParams, ip, iLimit, offsetPtr, 6, 1);
413
454
  }
414
455
  }
415
456
 
@@ -418,30 +459,29 @@ FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
418
459
  * Common parser - lazy strategy
419
460
  *********************************/
420
461
  FORCE_INLINE_TEMPLATE
421
- size_t ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
422
- const void* src, size_t srcSize,
423
- const U32 searchMethod, const U32 depth)
462
+ size_t ZSTD_compressBlock_lazy_generic(
463
+ ZSTD_matchState_t* ms, seqStore_t* seqStore,
464
+ U32 rep[ZSTD_REP_NUM],
465
+ ZSTD_compressionParameters const* cParams,
466
+ const void* src, size_t srcSize,
467
+ const U32 searchMethod, const U32 depth)
424
468
  {
425
- seqStore_t* seqStorePtr = &(ctx->seqStore);
426
469
  const BYTE* const istart = (const BYTE*)src;
427
470
  const BYTE* ip = istart;
428
471
  const BYTE* anchor = istart;
429
472
  const BYTE* const iend = istart + srcSize;
430
473
  const BYTE* const ilimit = iend - 8;
431
- const BYTE* const base = ctx->base + ctx->dictLimit;
474
+ const BYTE* const base = ms->window.base + ms->window.dictLimit;
432
475
 
433
- U32 const maxSearches = 1 << ctx->appliedParams.cParams.searchLog;
434
- U32 const mls = ctx->appliedParams.cParams.searchLength;
435
-
436
- typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit,
437
- size_t* offsetPtr,
438
- U32 maxNbAttempts, U32 matchLengthSearch);
476
+ typedef size_t (*searchMax_f)(
477
+ ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
478
+ const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
439
479
  searchMax_f const searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS : ZSTD_HcFindBestMatch_selectMLS;
440
- U32 offset_1 = seqStorePtr->rep[0], offset_2 = seqStorePtr->rep[1], savedOffset=0;
480
+ U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
441
481
 
442
482
  /* init */
443
483
  ip += (ip==base);
444
- ctx->nextToUpdate3 = ctx->nextToUpdate;
484
+ ms->nextToUpdate3 = ms->nextToUpdate;
445
485
  { U32 const maxRep = (U32)(ip-base);
446
486
  if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
447
487
  if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
@@ -462,13 +502,13 @@ size_t ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
462
502
 
463
503
  /* first search (depth 0) */
464
504
  { size_t offsetFound = 99999999;
465
- size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
505
+ size_t const ml2 = searchMax(ms, cParams, ip, iend, &offsetFound);
466
506
  if (ml2 > matchLength)
467
507
  matchLength = ml2, start = ip, offset=offsetFound;
468
508
  }
469
509
 
470
510
  if (matchLength < 4) {
471
- ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
511
+ ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
472
512
  continue;
473
513
  }
474
514
 
@@ -484,7 +524,7 @@ size_t ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
484
524
  matchLength = mlRep, offset = 0, start = ip;
485
525
  }
486
526
  { size_t offset2=99999999;
487
- size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
527
+ size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
488
528
  int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
489
529
  int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
490
530
  if ((ml2 >= 4) && (gain2 > gain1)) {
@@ -503,7 +543,7 @@ size_t ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
503
543
  matchLength = ml2, offset = 0, start = ip;
504
544
  }
505
545
  { size_t offset2=99999999;
506
- size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
546
+ size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
507
547
  int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
508
548
  int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
509
549
  if ((ml2 >= 4) && (gain2 > gain1)) {
@@ -528,7 +568,7 @@ size_t ZSTD_compressBlock_lazy_generic(ZSTD_CCtx* ctx,
528
568
  /* store sequence */
529
569
  _storeSequence:
530
570
  { size_t const litLength = start - anchor;
531
- ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH);
571
+ ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
532
572
  anchor = ip = start + matchLength;
533
573
  }
534
574
 
@@ -538,73 +578,80 @@ _storeSequence:
538
578
  /* store sequence */
539
579
  matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
540
580
  offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
541
- ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH);
581
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
542
582
  ip += matchLength;
543
583
  anchor = ip;
544
584
  continue; /* faster when present ... (?) */
545
585
  } }
546
586
 
547
587
  /* Save reps for next block */
548
- seqStorePtr->repToConfirm[0] = offset_1 ? offset_1 : savedOffset;
549
- seqStorePtr->repToConfirm[1] = offset_2 ? offset_2 : savedOffset;
588
+ rep[0] = offset_1 ? offset_1 : savedOffset;
589
+ rep[1] = offset_2 ? offset_2 : savedOffset;
550
590
 
551
591
  /* Return the last literals size */
552
592
  return iend - anchor;
553
593
  }
554
594
 
555
595
 
556
- size_t ZSTD_compressBlock_btlazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
596
+ size_t ZSTD_compressBlock_btlazy2(
597
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
598
+ ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
557
599
  {
558
- return ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 1, 2);
600
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 1, 2);
559
601
  }
560
602
 
561
- size_t ZSTD_compressBlock_lazy2(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
603
+ size_t ZSTD_compressBlock_lazy2(
604
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
605
+ ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
562
606
  {
563
- return ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 2);
607
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 2);
564
608
  }
565
609
 
566
- size_t ZSTD_compressBlock_lazy(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
610
+ size_t ZSTD_compressBlock_lazy(
611
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
612
+ ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
567
613
  {
568
- return ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 1);
614
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 1);
569
615
  }
570
616
 
571
- size_t ZSTD_compressBlock_greedy(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
617
+ size_t ZSTD_compressBlock_greedy(
618
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
619
+ ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
572
620
  {
573
- return ZSTD_compressBlock_lazy_generic(ctx, src, srcSize, 0, 0);
621
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 0);
574
622
  }
575
623
 
576
624
 
577
625
  FORCE_INLINE_TEMPLATE
578
- size_t ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
579
- const void* src, size_t srcSize,
580
- const U32 searchMethod, const U32 depth)
626
+ size_t ZSTD_compressBlock_lazy_extDict_generic(
627
+ ZSTD_matchState_t* ms, seqStore_t* seqStore,
628
+ U32 rep[ZSTD_REP_NUM],
629
+ ZSTD_compressionParameters const* cParams,
630
+ const void* src, size_t srcSize,
631
+ const U32 searchMethod, const U32 depth)
581
632
  {
582
- seqStore_t* seqStorePtr = &(ctx->seqStore);
583
633
  const BYTE* const istart = (const BYTE*)src;
584
634
  const BYTE* ip = istart;
585
635
  const BYTE* anchor = istart;
586
636
  const BYTE* const iend = istart + srcSize;
587
637
  const BYTE* const ilimit = iend - 8;
588
- const BYTE* const base = ctx->base;
589
- const U32 dictLimit = ctx->dictLimit;
590
- const U32 lowestIndex = ctx->lowLimit;
638
+ const BYTE* const base = ms->window.base;
639
+ const U32 dictLimit = ms->window.dictLimit;
640
+ const U32 lowestIndex = ms->window.lowLimit;
591
641
  const BYTE* const prefixStart = base + dictLimit;
592
- const BYTE* const dictBase = ctx->dictBase;
642
+ const BYTE* const dictBase = ms->window.dictBase;
593
643
  const BYTE* const dictEnd = dictBase + dictLimit;
594
- const BYTE* const dictStart = dictBase + ctx->lowLimit;
644
+ const BYTE* const dictStart = dictBase + lowestIndex;
595
645
 
596
- const U32 maxSearches = 1 << ctx->appliedParams.cParams.searchLog;
597
- const U32 mls = ctx->appliedParams.cParams.searchLength;
598
-
599
- typedef size_t (*searchMax_f)(ZSTD_CCtx* zc, const BYTE* ip, const BYTE* iLimit,
600
- size_t* offsetPtr,
601
- U32 maxNbAttempts, U32 matchLengthSearch);
646
+ typedef size_t (*searchMax_f)(
647
+ ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
648
+ const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
602
649
  searchMax_f searchMax = searchMethod ? ZSTD_BtFindBestMatch_selectMLS_extDict : ZSTD_HcFindBestMatch_extDict_selectMLS;
603
650
 
604
- U32 offset_1 = seqStorePtr->rep[0], offset_2 = seqStorePtr->rep[1];
651
+ U32 offset_1 = rep[0], offset_2 = rep[1];
605
652
 
606
653
  /* init */
607
- ctx->nextToUpdate3 = ctx->nextToUpdate;
654
+ ms->nextToUpdate3 = ms->nextToUpdate;
608
655
  ip += (ip == prefixStart);
609
656
 
610
657
  /* Match Loop */
@@ -628,13 +675,13 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
628
675
 
629
676
  /* first search (depth 0) */
630
677
  { size_t offsetFound = 99999999;
631
- size_t const ml2 = searchMax(ctx, ip, iend, &offsetFound, maxSearches, mls);
678
+ size_t const ml2 = searchMax(ms, cParams, ip, iend, &offsetFound);
632
679
  if (ml2 > matchLength)
633
680
  matchLength = ml2, start = ip, offset=offsetFound;
634
681
  }
635
682
 
636
683
  if (matchLength < 4) {
637
- ip += ((ip-anchor) >> g_searchStrength) + 1; /* jump faster over incompressible sections */
684
+ ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
638
685
  continue;
639
686
  }
640
687
 
@@ -661,7 +708,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
661
708
 
662
709
  /* search match, depth 1 */
663
710
  { size_t offset2=99999999;
664
- size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
711
+ size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
665
712
  int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
666
713
  int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
667
714
  if ((ml2 >= 4) && (gain2 > gain1)) {
@@ -691,7 +738,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
691
738
 
692
739
  /* search match, depth 2 */
693
740
  { size_t offset2=99999999;
694
- size_t const ml2 = searchMax(ctx, ip, iend, &offset2, maxSearches, mls);
741
+ size_t const ml2 = searchMax(ms, cParams, ip, iend, &offset2);
695
742
  int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
696
743
  int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
697
744
  if ((ml2 >= 4) && (gain2 > gain1)) {
@@ -713,7 +760,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(ZSTD_CCtx* ctx,
713
760
  /* store sequence */
714
761
  _storeSequence:
715
762
  { size_t const litLength = start - anchor;
716
- ZSTD_storeSeq(seqStorePtr, litLength, anchor, (U32)offset, matchLength-MINMATCH);
763
+ ZSTD_storeSeq(seqStore, litLength, anchor, (U32)offset, matchLength-MINMATCH);
717
764
  anchor = ip = start + matchLength;
718
765
  }
719
766
 
@@ -728,7 +775,7 @@ _storeSequence:
728
775
  const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
729
776
  matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
730
777
  offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */
731
- ZSTD_storeSeq(seqStorePtr, 0, anchor, 0, matchLength-MINMATCH);
778
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, matchLength-MINMATCH);
732
779
  ip += matchLength;
733
780
  anchor = ip;
734
781
  continue; /* faster when present ... (?) */
@@ -737,29 +784,41 @@ _storeSequence:
737
784
  } }
738
785
 
739
786
  /* Save reps for next block */
740
- seqStorePtr->repToConfirm[0] = offset_1; seqStorePtr->repToConfirm[1] = offset_2;
787
+ rep[0] = offset_1;
788
+ rep[1] = offset_2;
741
789
 
742
790
  /* Return the last literals size */
743
791
  return iend - anchor;
744
792
  }
745
793
 
746
794
 
747
- size_t ZSTD_compressBlock_greedy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
795
+ size_t ZSTD_compressBlock_greedy_extDict(
796
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
797
+ ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
748
798
  {
749
- return ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 0);
799
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 0);
750
800
  }
751
801
 
752
- size_t ZSTD_compressBlock_lazy_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
802
+ size_t ZSTD_compressBlock_lazy_extDict(
803
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
804
+ ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
805
+
753
806
  {
754
- return ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 1);
807
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 1);
755
808
  }
756
809
 
757
- size_t ZSTD_compressBlock_lazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
810
+ size_t ZSTD_compressBlock_lazy2_extDict(
811
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
812
+ ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
813
+
758
814
  {
759
- return ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 0, 2);
815
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 0, 2);
760
816
  }
761
817
 
762
- size_t ZSTD_compressBlock_btlazy2_extDict(ZSTD_CCtx* ctx, const void* src, size_t srcSize)
818
+ size_t ZSTD_compressBlock_btlazy2_extDict(
819
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
820
+ ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize)
821
+
763
822
  {
764
- return ZSTD_compressBlock_lazy_extDict_generic(ctx, src, srcSize, 1, 2);
823
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, cParams, src, srcSize, 1, 2);
765
824
  }