zstd-ruby 1.3.8.0 → 1.4.0.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (48) hide show
  1. checksums.yaml +4 -4
  2. data/.travis.yml +6 -5
  3. data/README.md +1 -1
  4. data/ext/zstdruby/libzstd/Makefile +7 -3
  5. data/ext/zstdruby/libzstd/README.md +4 -2
  6. data/ext/zstdruby/libzstd/common/compiler.h +1 -1
  7. data/ext/zstdruby/libzstd/common/fse.h +1 -1
  8. data/ext/zstdruby/libzstd/common/threading.c +2 -2
  9. data/ext/zstdruby/libzstd/common/xxhash.c +2 -2
  10. data/ext/zstdruby/libzstd/common/zstd_internal.h +55 -2
  11. data/ext/zstdruby/libzstd/compress/fse_compress.c +2 -2
  12. data/ext/zstdruby/libzstd/compress/zstd_compress.c +423 -296
  13. data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +14 -11
  14. data/ext/zstdruby/libzstd/compress/zstd_fast.c +203 -124
  15. data/ext/zstdruby/libzstd/compress/zstd_lazy.h +1 -1
  16. data/ext/zstdruby/libzstd/compress/zstd_ldm.c +1 -1
  17. data/ext/zstdruby/libzstd/compress/zstd_opt.c +27 -11
  18. data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +41 -49
  19. data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +43 -26
  20. data/ext/zstdruby/libzstd/decompress/zstd_ddict.c +4 -4
  21. data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +257 -164
  22. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +51 -47
  23. data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +7 -0
  24. data/ext/zstdruby/libzstd/dictBuilder/cover.c +58 -13
  25. data/ext/zstdruby/libzstd/dictBuilder/cover.h +29 -0
  26. data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +25 -13
  27. data/ext/zstdruby/libzstd/dictBuilder/zdict.h +18 -8
  28. data/ext/zstdruby/libzstd/dll/example/build_package.bat +3 -2
  29. data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +42 -12
  30. data/ext/zstdruby/libzstd/legacy/zstd_v01.c +32 -7
  31. data/ext/zstdruby/libzstd/legacy/zstd_v01.h +12 -7
  32. data/ext/zstdruby/libzstd/legacy/zstd_v02.c +31 -12
  33. data/ext/zstdruby/libzstd/legacy/zstd_v02.h +12 -7
  34. data/ext/zstdruby/libzstd/legacy/zstd_v03.c +32 -12
  35. data/ext/zstdruby/libzstd/legacy/zstd_v03.h +12 -7
  36. data/ext/zstdruby/libzstd/legacy/zstd_v04.c +32 -12
  37. data/ext/zstdruby/libzstd/legacy/zstd_v04.h +12 -7
  38. data/ext/zstdruby/libzstd/legacy/zstd_v05.c +32 -7
  39. data/ext/zstdruby/libzstd/legacy/zstd_v05.h +12 -7
  40. data/ext/zstdruby/libzstd/legacy/zstd_v06.c +36 -8
  41. data/ext/zstdruby/libzstd/legacy/zstd_v06.h +10 -5
  42. data/ext/zstdruby/libzstd/legacy/zstd_v07.c +40 -9
  43. data/ext/zstdruby/libzstd/legacy/zstd_v07.h +10 -5
  44. data/ext/zstdruby/libzstd/zstd.h +689 -542
  45. data/lib/zstd-ruby/version.rb +1 -1
  46. data/zstd-ruby.gemspec +1 -1
  47. metadata +6 -7
  48. data/ext/zstdruby/libzstd/dll/libzstd.def +0 -87
@@ -36,9 +36,9 @@ extern "C" {
36
36
  #define ZSTD_DUBT_UNSORTED_MARK 1 /* For btlazy2 strategy, index 1 now means "unsorted".
37
37
  It could be confused for a real successor at index "1", if sorted as larger than its predecessor.
38
38
  It's not a big deal though : candidate will just be sorted again.
39
- Additionnally, candidate position 1 will be lost.
39
+ Additionally, candidate position 1 will be lost.
40
40
  But candidate 1 cannot hide a large tree of candidates, so it's a minimal loss.
41
- The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be misdhandled after table re-use with a different strategy
41
+ The benefit is that ZSTD_DUBT_UNSORTED_MARK cannot be mishandled after table re-use with a different strategy
42
42
  Constant required by ZSTD_compressBlock_btlazy2() and ZSTD_reduceTable_internal() */
43
43
 
44
44
 
@@ -54,6 +54,14 @@ typedef struct ZSTD_prefixDict_s {
54
54
  ZSTD_dictContentType_e dictContentType;
55
55
  } ZSTD_prefixDict;
56
56
 
57
+ typedef struct {
58
+ void* dictBuffer;
59
+ void const* dict;
60
+ size_t dictSize;
61
+ ZSTD_dictContentType_e dictContentType;
62
+ ZSTD_CDict* cdict;
63
+ } ZSTD_localDict;
64
+
57
65
  typedef struct {
58
66
  U32 CTable[HUF_CTABLE_SIZE_U32(255)];
59
67
  HUF_repeat repeatMode;
@@ -107,6 +115,7 @@ typedef struct {
107
115
  U32 offCodeSumBasePrice; /* to compare to log2(offreq) */
108
116
  ZSTD_OptPrice_e priceType; /* prices can be determined dynamically, or follow a pre-defined cost structure */
109
117
  const ZSTD_entropyCTables_t* symbolCosts; /* pre-calculated dictionary statistics */
118
+ ZSTD_literalCompressionMode_e literalCompressionMode;
110
119
  } optState_t;
111
120
 
112
121
  typedef struct {
@@ -188,6 +197,7 @@ struct ZSTD_CCtx_params_s {
188
197
  * 1<<wLog, even for dictionary */
189
198
 
190
199
  ZSTD_dictAttachPref_e attachDictPref;
200
+ ZSTD_literalCompressionMode_e literalCompressionMode;
191
201
 
192
202
  /* Multithreading: used to pass parameters to mtctx */
193
203
  int nbWorkers;
@@ -243,7 +253,7 @@ struct ZSTD_CCtx_s {
243
253
  U32 frameEnded;
244
254
 
245
255
  /* Dictionary */
246
- ZSTD_CDict* cdictLocal;
256
+ ZSTD_localDict localDict;
247
257
  const ZSTD_CDict* cdict;
248
258
  ZSTD_prefixDict prefixDict; /* single-usage dictionary */
249
259
 
@@ -806,13 +816,6 @@ size_t ZSTD_initCStream_internal(ZSTD_CStream* zcs,
806
816
 
807
817
  void ZSTD_resetSeqStore(seqStore_t* ssPtr);
808
818
 
809
- /*! ZSTD_compressStream_generic() :
810
- * Private use only. To be called from zstdmt_compress.c in single-thread mode. */
811
- size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
812
- ZSTD_outBuffer* output,
813
- ZSTD_inBuffer* input,
814
- ZSTD_EndDirective const flushMode);
815
-
816
819
  /*! ZSTD_getCParamsFromCDict() :
817
820
  * as the name implies */
818
821
  ZSTD_compressionParameters ZSTD_getCParamsFromCDict(const ZSTD_CDict* cdict);
@@ -839,7 +842,7 @@ size_t ZSTD_compress_advanced_internal(ZSTD_CCtx* cctx,
839
842
  /* ZSTD_writeLastEmptyBlock() :
840
843
  * output an empty Block with end-of-frame mark to complete a frame
841
844
  * @return : size of data written into `dst` (== ZSTD_blockHeaderSize (defined in zstd_internal.h))
842
- * or an error code if `dstCapcity` is too small (<ZSTD_blockHeaderSize)
845
+ * or an error code if `dstCapacity` is too small (<ZSTD_blockHeaderSize)
843
846
  */
844
847
  size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity);
845
848
 
@@ -45,7 +45,155 @@ FORCE_INLINE_TEMPLATE
45
45
  size_t ZSTD_compressBlock_fast_generic(
46
46
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
47
47
  void const* src, size_t srcSize,
48
- U32 const mls, ZSTD_dictMode_e const dictMode)
48
+ U32 const mls)
49
+ {
50
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
51
+ U32* const hashTable = ms->hashTable;
52
+ U32 const hlog = cParams->hashLog;
53
+ /* support stepSize of 0 */
54
+ size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
55
+ const BYTE* const base = ms->window.base;
56
+ const BYTE* const istart = (const BYTE*)src;
57
+ /* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */
58
+ const BYTE* ip0 = istart;
59
+ const BYTE* ip1;
60
+ const BYTE* anchor = istart;
61
+ const U32 prefixStartIndex = ms->window.dictLimit;
62
+ const BYTE* const prefixStart = base + prefixStartIndex;
63
+ const BYTE* const iend = istart + srcSize;
64
+ const BYTE* const ilimit = iend - HASH_READ_SIZE;
65
+ U32 offset_1=rep[0], offset_2=rep[1];
66
+ U32 offsetSaved = 0;
67
+
68
+ /* init */
69
+ ip0 += (ip0 == prefixStart);
70
+ ip1 = ip0 + 1;
71
+ {
72
+ U32 const maxRep = (U32)(ip0 - prefixStart);
73
+ if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
74
+ if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
75
+ }
76
+
77
+ /* Main Search Loop */
78
+ while (ip1 < ilimit) { /* < instead of <=, because check at ip0+2 */
79
+ size_t mLength;
80
+ BYTE const* ip2 = ip0 + 2;
81
+ size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls);
82
+ U32 const val0 = MEM_read32(ip0);
83
+ size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls);
84
+ U32 const val1 = MEM_read32(ip1);
85
+ U32 const current0 = (U32)(ip0-base);
86
+ U32 const current1 = (U32)(ip1-base);
87
+ U32 const matchIndex0 = hashTable[h0];
88
+ U32 const matchIndex1 = hashTable[h1];
89
+ BYTE const* repMatch = ip2-offset_1;
90
+ const BYTE* match0 = base + matchIndex0;
91
+ const BYTE* match1 = base + matchIndex1;
92
+ U32 offcode;
93
+ hashTable[h0] = current0; /* update hash table */
94
+ hashTable[h1] = current1; /* update hash table */
95
+
96
+ assert(ip0 + 1 == ip1);
97
+
98
+ if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) {
99
+ mLength = ip2[-1] == repMatch[-1] ? 1 : 0;
100
+ ip0 = ip2 - mLength;
101
+ match0 = repMatch - mLength;
102
+ offcode = 0;
103
+ goto _match;
104
+ }
105
+ if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) {
106
+ /* found a regular match */
107
+ goto _offset;
108
+ }
109
+ if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) {
110
+ /* found a regular match after one literal */
111
+ ip0 = ip1;
112
+ match0 = match1;
113
+ goto _offset;
114
+ }
115
+ {
116
+ size_t const step = ((ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
117
+ assert(step >= 2);
118
+ ip0 += step;
119
+ ip1 += step;
120
+ continue;
121
+ }
122
+ _offset: /* Requires: ip0, match0 */
123
+ /* Compute the offset code */
124
+ offset_2 = offset_1;
125
+ offset_1 = (U32)(ip0-match0);
126
+ offcode = offset_1 + ZSTD_REP_MOVE;
127
+ mLength = 0;
128
+ /* Count the backwards match length */
129
+ while (((ip0>anchor) & (match0>prefixStart))
130
+ && (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */
131
+
132
+ _match: /* Requires: ip0, match0, offcode */
133
+ /* Count the forward length */
134
+ mLength += ZSTD_count(ip0+mLength+4, match0+mLength+4, iend) + 4;
135
+ ZSTD_storeSeq(seqStore, ip0-anchor, anchor, offcode, mLength-MINMATCH);
136
+ /* match found */
137
+ ip0 += mLength;
138
+ anchor = ip0;
139
+ ip1 = ip0 + 1;
140
+
141
+ if (ip0 <= ilimit) {
142
+ /* Fill Table */
143
+ assert(base+current0+2 > istart); /* check base overflow */
144
+ hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
145
+ hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
146
+
147
+ while ( (ip0 <= ilimit)
148
+ && ( (offset_2>0)
149
+ & (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) )) {
150
+ /* store sequence */
151
+ size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
152
+ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
153
+ hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
154
+ ip0 += rLength;
155
+ ip1 = ip0 + 1;
156
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
157
+ anchor = ip0;
158
+ continue; /* faster when present (confirmed on gcc-8) ... (?) */
159
+ }
160
+ }
161
+ }
162
+
163
+ /* save reps for next block */
164
+ rep[0] = offset_1 ? offset_1 : offsetSaved;
165
+ rep[1] = offset_2 ? offset_2 : offsetSaved;
166
+
167
+ /* Return the last literals size */
168
+ return iend - anchor;
169
+ }
170
+
171
+
172
+ size_t ZSTD_compressBlock_fast(
173
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
174
+ void const* src, size_t srcSize)
175
+ {
176
+ ZSTD_compressionParameters const* cParams = &ms->cParams;
177
+ U32 const mls = cParams->minMatch;
178
+ assert(ms->dictMatchState == NULL);
179
+ switch(mls)
180
+ {
181
+ default: /* includes case 3 */
182
+ case 4 :
183
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4);
184
+ case 5 :
185
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5);
186
+ case 6 :
187
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6);
188
+ case 7 :
189
+ return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7);
190
+ }
191
+ }
192
+
193
+ FORCE_INLINE_TEMPLATE
194
+ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
195
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
196
+ void const* src, size_t srcSize, U32 const mls)
49
197
  {
50
198
  const ZSTD_compressionParameters* const cParams = &ms->cParams;
51
199
  U32* const hashTable = ms->hashTable;
@@ -64,46 +212,26 @@ size_t ZSTD_compressBlock_fast_generic(
64
212
  U32 offsetSaved = 0;
65
213
 
66
214
  const ZSTD_matchState_t* const dms = ms->dictMatchState;
67
- const ZSTD_compressionParameters* const dictCParams =
68
- dictMode == ZSTD_dictMatchState ?
69
- &dms->cParams : NULL;
70
- const U32* const dictHashTable = dictMode == ZSTD_dictMatchState ?
71
- dms->hashTable : NULL;
72
- const U32 dictStartIndex = dictMode == ZSTD_dictMatchState ?
73
- dms->window.dictLimit : 0;
74
- const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ?
75
- dms->window.base : NULL;
76
- const BYTE* const dictStart = dictMode == ZSTD_dictMatchState ?
77
- dictBase + dictStartIndex : NULL;
78
- const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ?
79
- dms->window.nextSrc : NULL;
80
- const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ?
81
- prefixStartIndex - (U32)(dictEnd - dictBase) :
82
- 0;
215
+ const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
216
+ const U32* const dictHashTable = dms->hashTable;
217
+ const U32 dictStartIndex = dms->window.dictLimit;
218
+ const BYTE* const dictBase = dms->window.base;
219
+ const BYTE* const dictStart = dictBase + dictStartIndex;
220
+ const BYTE* const dictEnd = dms->window.nextSrc;
221
+ const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase);
83
222
  const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart);
84
- const U32 dictHLog = dictMode == ZSTD_dictMatchState ?
85
- dictCParams->hashLog : hlog;
86
-
87
- assert(dictMode == ZSTD_noDict || dictMode == ZSTD_dictMatchState);
223
+ const U32 dictHLog = dictCParams->hashLog;
88
224
 
89
225
  /* otherwise, we would get index underflow when translating a dict index
90
226
  * into a local index */
91
- assert(dictMode != ZSTD_dictMatchState
92
- || prefixStartIndex >= (U32)(dictEnd - dictBase));
227
+ assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
93
228
 
94
229
  /* init */
95
230
  ip += (dictAndPrefixLength == 0);
96
- if (dictMode == ZSTD_noDict) {
97
- U32 const maxRep = (U32)(ip - prefixStart);
98
- if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
99
- if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
100
- }
101
- if (dictMode == ZSTD_dictMatchState) {
102
- /* dictMatchState repCode checks don't currently handle repCode == 0
103
- * disabling. */
104
- assert(offset_1 <= dictAndPrefixLength);
105
- assert(offset_2 <= dictAndPrefixLength);
106
- }
231
+ /* dictMatchState repCode checks don't currently handle repCode == 0
232
+ * disabling. */
233
+ assert(offset_1 <= dictAndPrefixLength);
234
+ assert(offset_2 <= dictAndPrefixLength);
107
235
 
108
236
  /* Main Search Loop */
109
237
  while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
@@ -113,50 +241,37 @@ size_t ZSTD_compressBlock_fast_generic(
113
241
  U32 const matchIndex = hashTable[h];
114
242
  const BYTE* match = base + matchIndex;
115
243
  const U32 repIndex = current + 1 - offset_1;
116
- const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
117
- && repIndex < prefixStartIndex) ?
244
+ const BYTE* repMatch = (repIndex < prefixStartIndex) ?
118
245
  dictBase + (repIndex - dictIndexDelta) :
119
246
  base + repIndex;
120
247
  hashTable[h] = current; /* update hash table */
121
248
 
122
- if ( (dictMode == ZSTD_dictMatchState)
123
- && ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
249
+ if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
124
250
  && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
125
251
  const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
126
252
  mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
127
253
  ip++;
128
254
  ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
129
- } else if ( dictMode == ZSTD_noDict
130
- && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {
131
- mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
132
- ip++;
133
- ZSTD_storeSeq(seqStore, ip-anchor, anchor, 0, mLength-MINMATCH);
134
255
  } else if ( (matchIndex <= prefixStartIndex) ) {
135
- if (dictMode == ZSTD_dictMatchState) {
136
- size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
137
- U32 const dictMatchIndex = dictHashTable[dictHash];
138
- const BYTE* dictMatch = dictBase + dictMatchIndex;
139
- if (dictMatchIndex <= dictStartIndex ||
140
- MEM_read32(dictMatch) != MEM_read32(ip)) {
141
- assert(stepSize >= 1);
142
- ip += ((ip-anchor) >> kSearchStrength) + stepSize;
143
- continue;
144
- } else {
145
- /* found a dict match */
146
- U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta);
147
- mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
148
- while (((ip>anchor) & (dictMatch>dictStart))
149
- && (ip[-1] == dictMatch[-1])) {
150
- ip--; dictMatch--; mLength++;
151
- } /* catch up */
152
- offset_2 = offset_1;
153
- offset_1 = offset;
154
- ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
155
- }
156
- } else {
256
+ size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
257
+ U32 const dictMatchIndex = dictHashTable[dictHash];
258
+ const BYTE* dictMatch = dictBase + dictMatchIndex;
259
+ if (dictMatchIndex <= dictStartIndex ||
260
+ MEM_read32(dictMatch) != MEM_read32(ip)) {
157
261
  assert(stepSize >= 1);
158
262
  ip += ((ip-anchor) >> kSearchStrength) + stepSize;
159
263
  continue;
264
+ } else {
265
+ /* found a dict match */
266
+ U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta);
267
+ mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
268
+ while (((ip>anchor) & (dictMatch>dictStart))
269
+ && (ip[-1] == dictMatch[-1])) {
270
+ ip--; dictMatch--; mLength++;
271
+ } /* catch up */
272
+ offset_2 = offset_1;
273
+ offset_1 = offset;
274
+ ZSTD_storeSeq(seqStore, ip-anchor, anchor, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
160
275
  }
161
276
  } else if (MEM_read32(match) != MEM_read32(ip)) {
162
277
  /* it's not a match, and we're not going to check the dictionary */
@@ -185,41 +300,27 @@ size_t ZSTD_compressBlock_fast_generic(
185
300
  hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
186
301
 
187
302
  /* check immediate repcode */
188
- if (dictMode == ZSTD_dictMatchState) {
189
- while (ip <= ilimit) {
190
- U32 const current2 = (U32)(ip-base);
191
- U32 const repIndex2 = current2 - offset_2;
192
- const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
193
- dictBase - dictIndexDelta + repIndex2 :
194
- base + repIndex2;
195
- if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
196
- && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
197
- const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
198
- size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
199
- U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
200
- ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
201
- hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
202
- ip += repLength2;
203
- anchor = ip;
204
- continue;
205
- }
206
- break;
303
+ while (ip <= ilimit) {
304
+ U32 const current2 = (U32)(ip-base);
305
+ U32 const repIndex2 = current2 - offset_2;
306
+ const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
307
+ dictBase - dictIndexDelta + repIndex2 :
308
+ base + repIndex2;
309
+ if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
310
+ && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
311
+ const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
312
+ size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
313
+ U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
314
+ ZSTD_storeSeq(seqStore, 0, anchor, 0, repLength2-MINMATCH);
315
+ hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
316
+ ip += repLength2;
317
+ anchor = ip;
318
+ continue;
207
319
  }
320
+ break;
208
321
  }
209
-
210
- if (dictMode == ZSTD_noDict) {
211
- while ( (ip <= ilimit)
212
- && ( (offset_2>0)
213
- & (MEM_read32(ip) == MEM_read32(ip - offset_2)) )) {
214
- /* store sequence */
215
- size_t const rLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
216
- U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
217
- hashTable[ZSTD_hashPtr(ip, hlog, mls)] = (U32)(ip-base);
218
- ZSTD_storeSeq(seqStore, 0, anchor, 0, rLength-MINMATCH);
219
- ip += rLength;
220
- anchor = ip;
221
- continue; /* faster when present ... (?) */
222
- } } } }
322
+ }
323
+ }
223
324
 
224
325
  /* save reps for next block */
225
326
  rep[0] = offset_1 ? offset_1 : offsetSaved;
@@ -229,28 +330,6 @@ size_t ZSTD_compressBlock_fast_generic(
229
330
  return iend - anchor;
230
331
  }
231
332
 
232
-
233
- size_t ZSTD_compressBlock_fast(
234
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
235
- void const* src, size_t srcSize)
236
- {
237
- ZSTD_compressionParameters const* cParams = &ms->cParams;
238
- U32 const mls = cParams->minMatch;
239
- assert(ms->dictMatchState == NULL);
240
- switch(mls)
241
- {
242
- default: /* includes case 3 */
243
- case 4 :
244
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_noDict);
245
- case 5 :
246
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_noDict);
247
- case 6 :
248
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_noDict);
249
- case 7 :
250
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_noDict);
251
- }
252
- }
253
-
254
333
  size_t ZSTD_compressBlock_fast_dictMatchState(
255
334
  ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
256
335
  void const* src, size_t srcSize)
@@ -262,13 +341,13 @@ size_t ZSTD_compressBlock_fast_dictMatchState(
262
341
  {
263
342
  default: /* includes case 3 */
264
343
  case 4 :
265
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4, ZSTD_dictMatchState);
344
+ return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4);
266
345
  case 5 :
267
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5, ZSTD_dictMatchState);
346
+ return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5);
268
347
  case 6 :
269
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6, ZSTD_dictMatchState);
348
+ return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6);
270
349
  case 7 :
271
- return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7, ZSTD_dictMatchState);
350
+ return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7);
272
351
  }
273
352
  }
274
353