zstd-ruby 1.4.4.0 → 1.5.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +2 -0
  3. data/README.md +78 -5
  4. data/Rakefile +8 -2
  5. data/ext/zstdruby/common.h +15 -0
  6. data/ext/zstdruby/extconf.rb +3 -2
  7. data/ext/zstdruby/libzstd/common/allocations.h +55 -0
  8. data/ext/zstdruby/libzstd/common/bits.h +200 -0
  9. data/ext/zstdruby/libzstd/common/bitstream.h +74 -97
  10. data/ext/zstdruby/libzstd/common/compiler.h +219 -20
  11. data/ext/zstdruby/libzstd/common/cpu.h +1 -3
  12. data/ext/zstdruby/libzstd/common/debug.c +11 -31
  13. data/ext/zstdruby/libzstd/common/debug.h +22 -49
  14. data/ext/zstdruby/libzstd/common/entropy_common.c +184 -80
  15. data/ext/zstdruby/libzstd/common/error_private.c +11 -2
  16. data/ext/zstdruby/libzstd/common/error_private.h +87 -4
  17. data/ext/zstdruby/libzstd/common/fse.h +47 -116
  18. data/ext/zstdruby/libzstd/common/fse_decompress.c +127 -127
  19. data/ext/zstdruby/libzstd/common/huf.h +112 -197
  20. data/ext/zstdruby/libzstd/common/mem.h +124 -142
  21. data/ext/zstdruby/libzstd/common/pool.c +54 -27
  22. data/ext/zstdruby/libzstd/common/pool.h +11 -5
  23. data/ext/zstdruby/libzstd/common/portability_macros.h +156 -0
  24. data/ext/zstdruby/libzstd/common/threading.c +78 -22
  25. data/ext/zstdruby/libzstd/common/threading.h +9 -13
  26. data/ext/zstdruby/libzstd/common/xxhash.c +15 -873
  27. data/ext/zstdruby/libzstd/common/xxhash.h +5572 -191
  28. data/ext/zstdruby/libzstd/common/zstd_common.c +2 -37
  29. data/ext/zstdruby/libzstd/common/zstd_deps.h +111 -0
  30. data/ext/zstdruby/libzstd/common/zstd_internal.h +186 -144
  31. data/ext/zstdruby/libzstd/common/zstd_trace.h +163 -0
  32. data/ext/zstdruby/libzstd/compress/clevels.h +134 -0
  33. data/ext/zstdruby/libzstd/compress/fse_compress.c +99 -196
  34. data/ext/zstdruby/libzstd/compress/hist.c +41 -63
  35. data/ext/zstdruby/libzstd/compress/hist.h +13 -33
  36. data/ext/zstdruby/libzstd/compress/huf_compress.c +968 -331
  37. data/ext/zstdruby/libzstd/compress/zstd_compress.c +4120 -1191
  38. data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +688 -159
  39. data/ext/zstdruby/libzstd/compress/zstd_compress_literals.c +121 -40
  40. data/ext/zstdruby/libzstd/compress/zstd_compress_literals.h +16 -6
  41. data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.c +62 -35
  42. data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.h +10 -3
  43. data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +577 -0
  44. data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.h +32 -0
  45. data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +322 -115
  46. data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +394 -154
  47. data/ext/zstdruby/libzstd/compress/zstd_double_fast.h +4 -3
  48. data/ext/zstdruby/libzstd/compress/zstd_fast.c +729 -253
  49. data/ext/zstdruby/libzstd/compress/zstd_fast.h +4 -3
  50. data/ext/zstdruby/libzstd/compress/zstd_lazy.c +1289 -247
  51. data/ext/zstdruby/libzstd/compress/zstd_lazy.h +61 -1
  52. data/ext/zstdruby/libzstd/compress/zstd_ldm.c +339 -212
  53. data/ext/zstdruby/libzstd/compress/zstd_ldm.h +15 -3
  54. data/ext/zstdruby/libzstd/compress/zstd_ldm_geartab.h +106 -0
  55. data/ext/zstdruby/libzstd/compress/zstd_opt.c +508 -282
  56. data/ext/zstdruby/libzstd/compress/zstd_opt.h +1 -1
  57. data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +217 -466
  58. data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +35 -114
  59. data/ext/zstdruby/libzstd/decompress/huf_decompress.c +1220 -572
  60. data/ext/zstdruby/libzstd/decompress/huf_decompress_amd64.S +576 -0
  61. data/ext/zstdruby/libzstd/decompress/zstd_ddict.c +23 -19
  62. data/ext/zstdruby/libzstd/decompress/zstd_ddict.h +3 -3
  63. data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +859 -273
  64. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +1244 -375
  65. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +21 -7
  66. data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +74 -11
  67. data/ext/zstdruby/libzstd/dictBuilder/cover.c +75 -54
  68. data/ext/zstdruby/libzstd/dictBuilder/cover.h +20 -9
  69. data/ext/zstdruby/libzstd/dictBuilder/divsufsort.c +1 -1
  70. data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +55 -36
  71. data/ext/zstdruby/libzstd/dictBuilder/zdict.c +126 -110
  72. data/ext/zstdruby/libzstd/{dictBuilder/zdict.h → zdict.h} +248 -56
  73. data/ext/zstdruby/libzstd/zstd.h +1277 -306
  74. data/ext/zstdruby/libzstd/{common/zstd_errors.h → zstd_errors.h} +29 -8
  75. data/ext/zstdruby/main.c +20 -0
  76. data/ext/zstdruby/skippable_frame.c +63 -0
  77. data/ext/zstdruby/streaming_compress.c +177 -0
  78. data/ext/zstdruby/streaming_compress.h +5 -0
  79. data/ext/zstdruby/streaming_decompress.c +123 -0
  80. data/ext/zstdruby/zstdruby.c +114 -32
  81. data/lib/zstd-ruby/version.rb +1 -1
  82. data/lib/zstd-ruby.rb +0 -1
  83. data/zstd-ruby.gemspec +1 -1
  84. metadata +24 -39
  85. data/.travis.yml +0 -14
  86. data/ext/zstdruby/libzstd/.gitignore +0 -3
  87. data/ext/zstdruby/libzstd/BUCK +0 -234
  88. data/ext/zstdruby/libzstd/Makefile +0 -289
  89. data/ext/zstdruby/libzstd/README.md +0 -159
  90. data/ext/zstdruby/libzstd/deprecated/zbuff.h +0 -214
  91. data/ext/zstdruby/libzstd/deprecated/zbuff_common.c +0 -26
  92. data/ext/zstdruby/libzstd/deprecated/zbuff_compress.c +0 -147
  93. data/ext/zstdruby/libzstd/deprecated/zbuff_decompress.c +0 -75
  94. data/ext/zstdruby/libzstd/dll/example/Makefile +0 -47
  95. data/ext/zstdruby/libzstd/dll/example/README.md +0 -69
  96. data/ext/zstdruby/libzstd/dll/example/build_package.bat +0 -20
  97. data/ext/zstdruby/libzstd/dll/example/fullbench-dll.sln +0 -25
  98. data/ext/zstdruby/libzstd/dll/example/fullbench-dll.vcxproj +0 -181
  99. data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +0 -415
  100. data/ext/zstdruby/libzstd/legacy/zstd_v01.c +0 -2152
  101. data/ext/zstdruby/libzstd/legacy/zstd_v01.h +0 -94
  102. data/ext/zstdruby/libzstd/legacy/zstd_v02.c +0 -3514
  103. data/ext/zstdruby/libzstd/legacy/zstd_v02.h +0 -93
  104. data/ext/zstdruby/libzstd/legacy/zstd_v03.c +0 -3156
  105. data/ext/zstdruby/libzstd/legacy/zstd_v03.h +0 -93
  106. data/ext/zstdruby/libzstd/legacy/zstd_v04.c +0 -3641
  107. data/ext/zstdruby/libzstd/legacy/zstd_v04.h +0 -142
  108. data/ext/zstdruby/libzstd/legacy/zstd_v05.c +0 -4046
  109. data/ext/zstdruby/libzstd/legacy/zstd_v05.h +0 -162
  110. data/ext/zstdruby/libzstd/legacy/zstd_v06.c +0 -4150
  111. data/ext/zstdruby/libzstd/legacy/zstd_v06.h +0 -172
  112. data/ext/zstdruby/libzstd/legacy/zstd_v07.c +0 -4533
  113. data/ext/zstdruby/libzstd/legacy/zstd_v07.h +0 -187
  114. data/ext/zstdruby/libzstd/libzstd.pc.in +0 -15
  115. data/ext/zstdruby/zstdruby.h +0 -6
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -10,6 +10,9 @@
10
10
 
11
11
  #include "zstd_compress_internal.h"
12
12
  #include "zstd_lazy.h"
13
+ #include "../common/bits.h" /* ZSTD_countTrailingZeros64 */
14
+
15
+ #define kLazySkippingStep 8
13
16
 
14
17
 
15
18
  /*-*************************************
@@ -58,11 +61,11 @@ ZSTD_updateDUBT(ZSTD_matchState_t* ms,
58
61
 
59
62
  /** ZSTD_insertDUBT1() :
60
63
  * sort one already inserted but unsorted position
61
- * assumption : current >= btlow == (current - btmask)
64
+ * assumption : curr >= btlow == (curr - btmask)
62
65
  * doesn't fail */
63
66
  static void
64
- ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
65
- U32 current, const BYTE* inputEnd,
67
+ ZSTD_insertDUBT1(const ZSTD_matchState_t* ms,
68
+ U32 curr, const BYTE* inputEnd,
66
69
  U32 nbCompares, U32 btLow,
67
70
  const ZSTD_dictMode_e dictMode)
68
71
  {
@@ -74,41 +77,41 @@ ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
74
77
  const BYTE* const base = ms->window.base;
75
78
  const BYTE* const dictBase = ms->window.dictBase;
76
79
  const U32 dictLimit = ms->window.dictLimit;
77
- const BYTE* const ip = (current>=dictLimit) ? base + current : dictBase + current;
78
- const BYTE* const iend = (current>=dictLimit) ? inputEnd : dictBase + dictLimit;
80
+ const BYTE* const ip = (curr>=dictLimit) ? base + curr : dictBase + curr;
81
+ const BYTE* const iend = (curr>=dictLimit) ? inputEnd : dictBase + dictLimit;
79
82
  const BYTE* const dictEnd = dictBase + dictLimit;
80
83
  const BYTE* const prefixStart = base + dictLimit;
81
84
  const BYTE* match;
82
- U32* smallerPtr = bt + 2*(current&btMask);
85
+ U32* smallerPtr = bt + 2*(curr&btMask);
83
86
  U32* largerPtr = smallerPtr + 1;
84
87
  U32 matchIndex = *smallerPtr; /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */
85
88
  U32 dummy32; /* to be nullified at the end */
86
89
  U32 const windowValid = ms->window.lowLimit;
87
90
  U32 const maxDistance = 1U << cParams->windowLog;
88
- U32 const windowLow = (current - windowValid > maxDistance) ? current - maxDistance : windowValid;
91
+ U32 const windowLow = (curr - windowValid > maxDistance) ? curr - maxDistance : windowValid;
89
92
 
90
93
 
91
94
  DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",
92
- current, dictLimit, windowLow);
93
- assert(current >= btLow);
95
+ curr, dictLimit, windowLow);
96
+ assert(curr >= btLow);
94
97
  assert(ip < iend); /* condition for ZSTD_count */
95
98
 
96
- while (nbCompares-- && (matchIndex > windowLow)) {
99
+ for (; nbCompares && (matchIndex > windowLow); --nbCompares) {
97
100
  U32* const nextPtr = bt + 2*(matchIndex & btMask);
98
101
  size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
99
- assert(matchIndex < current);
102
+ assert(matchIndex < curr);
100
103
  /* note : all candidates are now supposed sorted,
101
104
  * but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK
102
105
  * when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */
103
106
 
104
107
  if ( (dictMode != ZSTD_extDict)
105
108
  || (matchIndex+matchLength >= dictLimit) /* both in current segment*/
106
- || (current < dictLimit) /* both in extDict */) {
109
+ || (curr < dictLimit) /* both in extDict */) {
107
110
  const BYTE* const mBase = ( (dictMode != ZSTD_extDict)
108
111
  || (matchIndex+matchLength >= dictLimit)) ?
109
112
  base : dictBase;
110
113
  assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */
111
- || (current < dictLimit) );
114
+ || (curr < dictLimit) );
112
115
  match = mBase + matchIndex;
113
116
  matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);
114
117
  } else {
@@ -119,7 +122,7 @@ ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
119
122
  }
120
123
 
121
124
  DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",
122
- current, matchIndex, (U32)matchLength);
125
+ curr, matchIndex, (U32)matchLength);
123
126
 
124
127
  if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
125
128
  break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */
@@ -151,7 +154,7 @@ ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
151
154
 
152
155
  static size_t
153
156
  ZSTD_DUBT_findBetterDictMatch (
154
- ZSTD_matchState_t* ms,
157
+ const ZSTD_matchState_t* ms,
155
158
  const BYTE* const ip, const BYTE* const iend,
156
159
  size_t* offsetPtr,
157
160
  size_t bestLength,
@@ -168,7 +171,7 @@ ZSTD_DUBT_findBetterDictMatch (
168
171
 
169
172
  const BYTE* const base = ms->window.base;
170
173
  const BYTE* const prefixStart = base + ms->window.dictLimit;
171
- U32 const current = (U32)(ip-base);
174
+ U32 const curr = (U32)(ip-base);
172
175
  const BYTE* const dictBase = dms->window.base;
173
176
  const BYTE* const dictEnd = dms->window.nextSrc;
174
177
  U32 const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base);
@@ -185,7 +188,7 @@ ZSTD_DUBT_findBetterDictMatch (
185
188
  (void)dictMode;
186
189
  assert(dictMode == ZSTD_dictMatchState);
187
190
 
188
- while (nbCompares-- && (dictMatchIndex > dictLowLimit)) {
191
+ for (; nbCompares && (dictMatchIndex > dictLowLimit); --nbCompares) {
189
192
  U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask);
190
193
  size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
191
194
  const BYTE* match = dictBase + dictMatchIndex;
@@ -195,10 +198,10 @@ ZSTD_DUBT_findBetterDictMatch (
195
198
 
196
199
  if (matchLength > bestLength) {
197
200
  U32 matchIndex = dictMatchIndex + dictIndexDelta;
198
- if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
201
+ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
199
202
  DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
200
- current, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + current - matchIndex, dictMatchIndex, matchIndex);
201
- bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
203
+ curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, OFFSET_TO_OFFBASE(curr - matchIndex), dictMatchIndex, matchIndex);
204
+ bestLength = matchLength, *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex);
202
205
  }
203
206
  if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
204
207
  break; /* drop, to guarantee consistency (miss a little bit of compression) */
@@ -218,9 +221,9 @@ ZSTD_DUBT_findBetterDictMatch (
218
221
  }
219
222
 
220
223
  if (bestLength >= MINMATCH) {
221
- U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
224
+ U32 const mIndex = curr - (U32)OFFBASE_TO_OFFSET(*offsetPtr); (void)mIndex;
222
225
  DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
223
- current, (U32)bestLength, (U32)*offsetPtr, mIndex);
226
+ curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
224
227
  }
225
228
  return bestLength;
226
229
 
@@ -230,7 +233,7 @@ ZSTD_DUBT_findBetterDictMatch (
230
233
  static size_t
231
234
  ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
232
235
  const BYTE* const ip, const BYTE* const iend,
233
- size_t* offsetPtr,
236
+ size_t* offBasePtr,
234
237
  U32 const mls,
235
238
  const ZSTD_dictMode_e dictMode)
236
239
  {
@@ -241,13 +244,13 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
241
244
  U32 matchIndex = hashTable[h];
242
245
 
243
246
  const BYTE* const base = ms->window.base;
244
- U32 const current = (U32)(ip-base);
245
- U32 const windowLow = ZSTD_getLowestMatchIndex(ms, current, cParams->windowLog);
247
+ U32 const curr = (U32)(ip-base);
248
+ U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);
246
249
 
247
250
  U32* const bt = ms->chainTable;
248
251
  U32 const btLog = cParams->chainLog - 1;
249
252
  U32 const btMask = (1 << btLog) - 1;
250
- U32 const btLow = (btMask >= current) ? 0 : current - btMask;
253
+ U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;
251
254
  U32 const unsortLimit = MAX(btLow, windowLow);
252
255
 
253
256
  U32* nextCandidate = bt + 2*(matchIndex&btMask);
@@ -256,8 +259,9 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
256
259
  U32 nbCandidates = nbCompares;
257
260
  U32 previousCandidate = 0;
258
261
 
259
- DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", current);
262
+ DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", curr);
260
263
  assert(ip <= iend-8); /* required for h calculation */
264
+ assert(dictMode != ZSTD_dedicatedDictSearch);
261
265
 
262
266
  /* reach end of unsorted candidates list */
263
267
  while ( (matchIndex > unsortLimit)
@@ -299,16 +303,16 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
299
303
  const U32 dictLimit = ms->window.dictLimit;
300
304
  const BYTE* const dictEnd = dictBase + dictLimit;
301
305
  const BYTE* const prefixStart = base + dictLimit;
302
- U32* smallerPtr = bt + 2*(current&btMask);
303
- U32* largerPtr = bt + 2*(current&btMask) + 1;
304
- U32 matchEndIdx = current + 8 + 1;
306
+ U32* smallerPtr = bt + 2*(curr&btMask);
307
+ U32* largerPtr = bt + 2*(curr&btMask) + 1;
308
+ U32 matchEndIdx = curr + 8 + 1;
305
309
  U32 dummy32; /* to be nullified at the end */
306
310
  size_t bestLength = 0;
307
311
 
308
312
  matchIndex = hashTable[h];
309
- hashTable[h] = current; /* Update Hash Table */
313
+ hashTable[h] = curr; /* Update Hash Table */
310
314
 
311
- while (nbCompares-- && (matchIndex > windowLow)) {
315
+ for (; nbCompares && (matchIndex > windowLow); --nbCompares) {
312
316
  U32* const nextPtr = bt + 2*(matchIndex & btMask);
313
317
  size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
314
318
  const BYTE* match;
@@ -326,8 +330,8 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
326
330
  if (matchLength > bestLength) {
327
331
  if (matchLength > matchEndIdx - matchIndex)
328
332
  matchEndIdx = matchIndex + (U32)matchLength;
329
- if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(current-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
330
- bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + current - matchIndex;
333
+ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)*offBasePtr)) )
334
+ bestLength = matchLength, *offBasePtr = OFFSET_TO_OFFBASE(curr - matchIndex);
331
335
  if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
332
336
  if (dictMode == ZSTD_dictMatchState) {
333
337
  nbCompares = 0; /* in addition to avoiding checking any
@@ -356,19 +360,20 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
356
360
 
357
361
  *smallerPtr = *largerPtr = 0;
358
362
 
363
+ assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
359
364
  if (dictMode == ZSTD_dictMatchState && nbCompares) {
360
365
  bestLength = ZSTD_DUBT_findBetterDictMatch(
361
366
  ms, ip, iend,
362
- offsetPtr, bestLength, nbCompares,
367
+ offBasePtr, bestLength, nbCompares,
363
368
  mls, dictMode);
364
369
  }
365
370
 
366
- assert(matchEndIdx > current+8); /* ensure nextToUpdate is increased */
371
+ assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */
367
372
  ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
368
373
  if (bestLength >= MINMATCH) {
369
- U32 const mIndex = current - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
374
+ U32 const mIndex = curr - (U32)OFFBASE_TO_OFFSET(*offBasePtr); (void)mIndex;
370
375
  DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
371
- current, (U32)bestLength, (U32)*offsetPtr, mIndex);
376
+ curr, (U32)bestLength, (U32)*offBasePtr, mIndex);
372
377
  }
373
378
  return bestLength;
374
379
  }
@@ -379,66 +384,232 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
379
384
  FORCE_INLINE_TEMPLATE size_t
380
385
  ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
381
386
  const BYTE* const ip, const BYTE* const iLimit,
382
- size_t* offsetPtr,
387
+ size_t* offBasePtr,
383
388
  const U32 mls /* template */,
384
389
  const ZSTD_dictMode_e dictMode)
385
390
  {
386
391
  DEBUGLOG(7, "ZSTD_BtFindBestMatch");
387
392
  if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
388
393
  ZSTD_updateDUBT(ms, ip, iLimit, mls);
389
- return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
394
+ return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offBasePtr, mls, dictMode);
390
395
  }
391
396
 
397
+ /***********************************
398
+ * Dedicated dict search
399
+ ***********************************/
392
400
 
393
- static size_t
394
- ZSTD_BtFindBestMatch_selectMLS ( ZSTD_matchState_t* ms,
395
- const BYTE* ip, const BYTE* const iLimit,
396
- size_t* offsetPtr)
401
+ void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip)
397
402
  {
398
- switch(ms->cParams.minMatch)
403
+ const BYTE* const base = ms->window.base;
404
+ U32 const target = (U32)(ip - base);
405
+ U32* const hashTable = ms->hashTable;
406
+ U32* const chainTable = ms->chainTable;
407
+ U32 const chainSize = 1 << ms->cParams.chainLog;
408
+ U32 idx = ms->nextToUpdate;
409
+ U32 const minChain = chainSize < target - idx ? target - chainSize : idx;
410
+ U32 const bucketSize = 1 << ZSTD_LAZY_DDSS_BUCKET_LOG;
411
+ U32 const cacheSize = bucketSize - 1;
412
+ U32 const chainAttempts = (1 << ms->cParams.searchLog) - cacheSize;
413
+ U32 const chainLimit = chainAttempts > 255 ? 255 : chainAttempts;
414
+
415
+ /* We know the hashtable is oversized by a factor of `bucketSize`.
416
+ * We are going to temporarily pretend `bucketSize == 1`, keeping only a
417
+ * single entry. We will use the rest of the space to construct a temporary
418
+ * chaintable.
419
+ */
420
+ U32 const hashLog = ms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
421
+ U32* const tmpHashTable = hashTable;
422
+ U32* const tmpChainTable = hashTable + ((size_t)1 << hashLog);
423
+ U32 const tmpChainSize = (U32)((1 << ZSTD_LAZY_DDSS_BUCKET_LOG) - 1) << hashLog;
424
+ U32 const tmpMinChain = tmpChainSize < target ? target - tmpChainSize : idx;
425
+ U32 hashIdx;
426
+
427
+ assert(ms->cParams.chainLog <= 24);
428
+ assert(ms->cParams.hashLog > ms->cParams.chainLog);
429
+ assert(idx != 0);
430
+ assert(tmpMinChain <= minChain);
431
+
432
+ /* fill conventional hash table and conventional chain table */
433
+ for ( ; idx < target; idx++) {
434
+ U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch);
435
+ if (idx >= tmpMinChain) {
436
+ tmpChainTable[idx - tmpMinChain] = hashTable[h];
437
+ }
438
+ tmpHashTable[h] = idx;
439
+ }
440
+
441
+ /* sort chains into ddss chain table */
399
442
  {
400
- default : /* includes case 3 */
401
- case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
402
- case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
403
- case 7 :
404
- case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
443
+ U32 chainPos = 0;
444
+ for (hashIdx = 0; hashIdx < (1U << hashLog); hashIdx++) {
445
+ U32 count;
446
+ U32 countBeyondMinChain = 0;
447
+ U32 i = tmpHashTable[hashIdx];
448
+ for (count = 0; i >= tmpMinChain && count < cacheSize; count++) {
449
+ /* skip through the chain to the first position that won't be
450
+ * in the hash cache bucket */
451
+ if (i < minChain) {
452
+ countBeyondMinChain++;
453
+ }
454
+ i = tmpChainTable[i - tmpMinChain];
455
+ }
456
+ if (count == cacheSize) {
457
+ for (count = 0; count < chainLimit;) {
458
+ if (i < minChain) {
459
+ if (!i || ++countBeyondMinChain > cacheSize) {
460
+ /* only allow pulling `cacheSize` number of entries
461
+ * into the cache or chainTable beyond `minChain`,
462
+ * to replace the entries pulled out of the
463
+ * chainTable into the cache. This lets us reach
464
+ * back further without increasing the total number
465
+ * of entries in the chainTable, guaranteeing the
466
+ * DDSS chain table will fit into the space
467
+ * allocated for the regular one. */
468
+ break;
469
+ }
470
+ }
471
+ chainTable[chainPos++] = i;
472
+ count++;
473
+ if (i < tmpMinChain) {
474
+ break;
475
+ }
476
+ i = tmpChainTable[i - tmpMinChain];
477
+ }
478
+ } else {
479
+ count = 0;
480
+ }
481
+ if (count) {
482
+ tmpHashTable[hashIdx] = ((chainPos - count) << 8) + count;
483
+ } else {
484
+ tmpHashTable[hashIdx] = 0;
485
+ }
486
+ }
487
+ assert(chainPos <= chainSize); /* I believe this is guaranteed... */
488
+ }
489
+
490
+ /* move chain pointers into the last entry of each hash bucket */
491
+ for (hashIdx = (1 << hashLog); hashIdx; ) {
492
+ U32 const bucketIdx = --hashIdx << ZSTD_LAZY_DDSS_BUCKET_LOG;
493
+ U32 const chainPackedPointer = tmpHashTable[hashIdx];
494
+ U32 i;
495
+ for (i = 0; i < cacheSize; i++) {
496
+ hashTable[bucketIdx + i] = 0;
497
+ }
498
+ hashTable[bucketIdx + bucketSize - 1] = chainPackedPointer;
499
+ }
500
+
501
+ /* fill the buckets of the hash table */
502
+ for (idx = ms->nextToUpdate; idx < target; idx++) {
503
+ U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch)
504
+ << ZSTD_LAZY_DDSS_BUCKET_LOG;
505
+ U32 i;
506
+ /* Shift hash cache down 1. */
507
+ for (i = cacheSize - 1; i; i--)
508
+ hashTable[h + i] = hashTable[h + i - 1];
509
+ hashTable[h] = idx;
405
510
  }
511
+
512
+ ms->nextToUpdate = target;
406
513
  }
407
514
 
515
+ /* Returns the longest match length found in the dedicated dict search structure.
516
+ * If none are longer than the argument ml, then ml will be returned.
517
+ */
518
+ FORCE_INLINE_TEMPLATE
519
+ size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nbAttempts,
520
+ const ZSTD_matchState_t* const dms,
521
+ const BYTE* const ip, const BYTE* const iLimit,
522
+ const BYTE* const prefixStart, const U32 curr,
523
+ const U32 dictLimit, const size_t ddsIdx) {
524
+ const U32 ddsLowestIndex = dms->window.dictLimit;
525
+ const BYTE* const ddsBase = dms->window.base;
526
+ const BYTE* const ddsEnd = dms->window.nextSrc;
527
+ const U32 ddsSize = (U32)(ddsEnd - ddsBase);
528
+ const U32 ddsIndexDelta = dictLimit - ddsSize;
529
+ const U32 bucketSize = (1 << ZSTD_LAZY_DDSS_BUCKET_LOG);
530
+ const U32 bucketLimit = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1;
531
+ U32 ddsAttempt;
532
+ U32 matchIndex;
533
+
534
+ for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) {
535
+ PREFETCH_L1(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]);
536
+ }
408
537
 
409
- static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS (
410
- ZSTD_matchState_t* ms,
411
- const BYTE* ip, const BYTE* const iLimit,
412
- size_t* offsetPtr)
413
- {
414
- switch(ms->cParams.minMatch)
415
538
  {
416
- default : /* includes case 3 */
417
- case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
418
- case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
419
- case 7 :
420
- case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
539
+ U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
540
+ U32 const chainIndex = chainPackedPointer >> 8;
541
+
542
+ PREFETCH_L1(&dms->chainTable[chainIndex]);
421
543
  }
422
- }
423
544
 
545
+ for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) {
546
+ size_t currentMl=0;
547
+ const BYTE* match;
548
+ matchIndex = dms->hashTable[ddsIdx + ddsAttempt];
549
+ match = ddsBase + matchIndex;
550
+
551
+ if (!matchIndex) {
552
+ return ml;
553
+ }
554
+
555
+ /* guaranteed by table construction */
556
+ (void)ddsLowestIndex;
557
+ assert(matchIndex >= ddsLowestIndex);
558
+ assert(match+4 <= ddsEnd);
559
+ if (MEM_read32(match) == MEM_read32(ip)) {
560
+ /* assumption : matchIndex <= dictLimit-4 (by table construction) */
561
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
562
+ }
563
+
564
+ /* save best solution */
565
+ if (currentMl > ml) {
566
+ ml = currentMl;
567
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + ddsIndexDelta));
568
+ if (ip+currentMl == iLimit) {
569
+ /* best possible, avoids read overflow on next attempt */
570
+ return ml;
571
+ }
572
+ }
573
+ }
424
574
 
425
- static size_t ZSTD_BtFindBestMatch_extDict_selectMLS (
426
- ZSTD_matchState_t* ms,
427
- const BYTE* ip, const BYTE* const iLimit,
428
- size_t* offsetPtr)
429
- {
430
- switch(ms->cParams.minMatch)
431
575
  {
432
- default : /* includes case 3 */
433
- case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
434
- case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
435
- case 7 :
436
- case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
576
+ U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
577
+ U32 chainIndex = chainPackedPointer >> 8;
578
+ U32 const chainLength = chainPackedPointer & 0xFF;
579
+ U32 const chainAttempts = nbAttempts - ddsAttempt;
580
+ U32 const chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts;
581
+ U32 chainAttempt;
582
+
583
+ for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++) {
584
+ PREFETCH_L1(ddsBase + dms->chainTable[chainIndex + chainAttempt]);
585
+ }
586
+
587
+ for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++, chainIndex++) {
588
+ size_t currentMl=0;
589
+ const BYTE* match;
590
+ matchIndex = dms->chainTable[chainIndex];
591
+ match = ddsBase + matchIndex;
592
+
593
+ /* guaranteed by table construction */
594
+ assert(matchIndex >= ddsLowestIndex);
595
+ assert(match+4 <= ddsEnd);
596
+ if (MEM_read32(match) == MEM_read32(ip)) {
597
+ /* assumption : matchIndex <= dictLimit-4 (by table construction) */
598
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
599
+ }
600
+
601
+ /* save best solution */
602
+ if (currentMl > ml) {
603
+ ml = currentMl;
604
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + ddsIndexDelta));
605
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
606
+ }
607
+ }
437
608
  }
609
+ return ml;
438
610
  }
439
611
 
440
612
 
441
-
442
613
  /* *********************************
443
614
  * Hash Chain
444
615
  ***********************************/
@@ -446,10 +617,10 @@ static size_t ZSTD_BtFindBestMatch_extDict_selectMLS (
446
617
 
447
618
  /* Update chains up to ip (excluded)
448
619
  Assumption : always within prefix (i.e. not within extDict) */
449
- static U32 ZSTD_insertAndFindFirstIndex_internal(
620
+ FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
450
621
  ZSTD_matchState_t* ms,
451
622
  const ZSTD_compressionParameters* const cParams,
452
- const BYTE* ip, U32 const mls)
623
+ const BYTE* ip, U32 const mls, U32 const lazySkipping)
453
624
  {
454
625
  U32* const hashTable = ms->hashTable;
455
626
  const U32 hashLog = cParams->hashLog;
@@ -464,6 +635,9 @@ static U32 ZSTD_insertAndFindFirstIndex_internal(
464
635
  NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
465
636
  hashTable[h] = idx;
466
637
  idx++;
638
+ /* Stop inserting every position when in the lazy skipping mode. */
639
+ if (lazySkipping)
640
+ break;
467
641
  }
468
642
 
469
643
  ms->nextToUpdate = target;
@@ -472,13 +646,12 @@ static U32 ZSTD_insertAndFindFirstIndex_internal(
472
646
 
473
647
  U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
474
648
  const ZSTD_compressionParameters* const cParams = &ms->cParams;
475
- return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);
649
+ return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch, /* lazySkipping*/ 0);
476
650
  }
477
651
 
478
-
479
652
  /* inlining is important to hardwire a hot branch (template emulation) */
480
653
  FORCE_INLINE_TEMPLATE
481
- size_t ZSTD_HcFindBestMatch_generic (
654
+ size_t ZSTD_HcFindBestMatch(
482
655
  ZSTD_matchState_t* ms,
483
656
  const BYTE* const ip, const BYTE* const iLimit,
484
657
  size_t* offsetPtr,
@@ -493,25 +666,39 @@ size_t ZSTD_HcFindBestMatch_generic (
493
666
  const U32 dictLimit = ms->window.dictLimit;
494
667
  const BYTE* const prefixStart = base + dictLimit;
495
668
  const BYTE* const dictEnd = dictBase + dictLimit;
496
- const U32 current = (U32)(ip-base);
669
+ const U32 curr = (U32)(ip-base);
497
670
  const U32 maxDistance = 1U << cParams->windowLog;
498
671
  const U32 lowestValid = ms->window.lowLimit;
499
- const U32 withinMaxDistance = (current - lowestValid > maxDistance) ? current - maxDistance : lowestValid;
672
+ const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
500
673
  const U32 isDictionary = (ms->loadedDictEnd != 0);
501
674
  const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
502
- const U32 minChain = current > chainSize ? current - chainSize : 0;
675
+ const U32 minChain = curr > chainSize ? curr - chainSize : 0;
503
676
  U32 nbAttempts = 1U << cParams->searchLog;
504
677
  size_t ml=4-1;
505
678
 
679
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
680
+ const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch
681
+ ? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
682
+ const size_t ddsIdx = dictMode == ZSTD_dedicatedDictSearch
683
+ ? ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG : 0;
684
+
685
+ U32 matchIndex;
686
+
687
+ if (dictMode == ZSTD_dedicatedDictSearch) {
688
+ const U32* entry = &dms->hashTable[ddsIdx];
689
+ PREFETCH_L1(entry);
690
+ }
691
+
506
692
  /* HC4 match finder */
507
- U32 matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
693
+ matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls, ms->lazySkipping);
508
694
 
509
- for ( ; (matchIndex>lowLimit) & (nbAttempts>0) ; nbAttempts--) {
695
+ for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) {
510
696
  size_t currentMl=0;
511
697
  if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
512
698
  const BYTE* const match = base + matchIndex;
513
699
  assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
514
- if (match[ml] == ip[ml]) /* potentially better */
700
+ /* read 4B starting from (match + ml + 1 - sizeof(U32)) */
701
+ if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) /* potentially better */
515
702
  currentMl = ZSTD_count(ip, match, iLimit);
516
703
  } else {
517
704
  const BYTE* const match = dictBase + matchIndex;
@@ -523,7 +710,7 @@ size_t ZSTD_HcFindBestMatch_generic (
523
710
  /* save best solution */
524
711
  if (currentMl > ml) {
525
712
  ml = currentMl;
526
- *offsetPtr = current - matchIndex + ZSTD_REP_MOVE;
713
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex);
527
714
  if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
528
715
  }
529
716
 
@@ -531,8 +718,11 @@ size_t ZSTD_HcFindBestMatch_generic (
531
718
  matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
532
719
  }
533
720
 
534
- if (dictMode == ZSTD_dictMatchState) {
535
- const ZSTD_matchState_t* const dms = ms->dictMatchState;
721
+ assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
722
+ if (dictMode == ZSTD_dedicatedDictSearch) {
723
+ ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts, dms,
724
+ ip, iLimit, prefixStart, curr, dictLimit, ddsIdx);
725
+ } else if (dictMode == ZSTD_dictMatchState) {
536
726
  const U32* const dmsChainTable = dms->chainTable;
537
727
  const U32 dmsChainSize = (1 << dms->cParams.chainLog);
538
728
  const U32 dmsChainMask = dmsChainSize - 1;
@@ -545,7 +735,7 @@ size_t ZSTD_HcFindBestMatch_generic (
545
735
 
546
736
  matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)];
547
737
 
548
- for ( ; (matchIndex>dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {
738
+ for ( ; (matchIndex>=dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {
549
739
  size_t currentMl=0;
550
740
  const BYTE* const match = dmsBase + matchIndex;
551
741
  assert(match+4 <= dmsEnd);
@@ -555,11 +745,13 @@ size_t ZSTD_HcFindBestMatch_generic (
555
745
  /* save best solution */
556
746
  if (currentMl > ml) {
557
747
  ml = currentMl;
558
- *offsetPtr = current - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE;
748
+ assert(curr > matchIndex + dmsIndexDelta);
749
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + dmsIndexDelta));
559
750
  if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
560
751
  }
561
752
 
562
753
  if (matchIndex <= dmsMinChain) break;
754
+
563
755
  matchIndex = dmsChainTable[matchIndex & dmsChainMask];
564
756
  }
565
757
  }
@@ -567,59 +759,735 @@ size_t ZSTD_HcFindBestMatch_generic (
567
759
  return ml;
568
760
  }
569
761
 
762
+ /* *********************************
763
+ * (SIMD) Row-based matchfinder
764
+ ***********************************/
765
+ /* Constants for row-based hash */
766
+ #define ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1)
767
+ #define ZSTD_ROW_HASH_MAX_ENTRIES 64 /* absolute maximum number of entries per row, for all configurations */
768
+
769
+ #define ZSTD_ROW_HASH_CACHE_MASK (ZSTD_ROW_HASH_CACHE_SIZE - 1)
570
770
 
571
- FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
572
- ZSTD_matchState_t* ms,
573
- const BYTE* ip, const BYTE* const iLimit,
574
- size_t* offsetPtr)
771
+ typedef U64 ZSTD_VecMask; /* Clarifies when we are interacting with a U64 representing a mask of matches */
772
+
773
+ /* ZSTD_VecMask_next():
774
+ * Starting from the LSB, returns the idx of the next non-zero bit.
775
+ * Basically counting the nb of trailing zeroes.
776
+ */
777
+ MEM_STATIC U32 ZSTD_VecMask_next(ZSTD_VecMask val) {
778
+ return ZSTD_countTrailingZeros64(val);
779
+ }
780
+
781
+ /* ZSTD_row_nextIndex():
782
+ * Returns the next index to insert at within a tagTable row, and updates the "head"
783
+ * value to reflect the update. Essentially cycles backwards from [1, {entries per row})
784
+ */
785
+ FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextIndex(BYTE* const tagRow, U32 const rowMask) {
786
+ U32 next = (*tagRow-1) & rowMask;
787
+ next += (next == 0) ? rowMask : 0; /* skip first position */
788
+ *tagRow = (BYTE)next;
789
+ return next;
790
+ }
791
+
792
+ /* ZSTD_isAligned():
793
+ * Checks that a pointer is aligned to "align" bytes which must be a power of 2.
794
+ */
795
+ MEM_STATIC int ZSTD_isAligned(void const* ptr, size_t align) {
796
+ assert((align & (align - 1)) == 0);
797
+ return (((size_t)ptr) & (align - 1)) == 0;
798
+ }
799
+
800
+ /* ZSTD_row_prefetch():
801
+ * Performs prefetching for the hashTable and tagTable at a given row.
802
+ */
803
+ FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, BYTE const* tagTable, U32 const relRow, U32 const rowLog) {
804
+ PREFETCH_L1(hashTable + relRow);
805
+ if (rowLog >= 5) {
806
+ PREFETCH_L1(hashTable + relRow + 16);
807
+ /* Note: prefetching more of the hash table does not appear to be beneficial for 128-entry rows */
808
+ }
809
+ PREFETCH_L1(tagTable + relRow);
810
+ if (rowLog == 6) {
811
+ PREFETCH_L1(tagTable + relRow + 32);
812
+ }
813
+ assert(rowLog == 4 || rowLog == 5 || rowLog == 6);
814
+ assert(ZSTD_isAligned(hashTable + relRow, 64)); /* prefetched hash row always 64-byte aligned */
815
+ assert(ZSTD_isAligned(tagTable + relRow, (size_t)1 << rowLog)); /* prefetched tagRow sits on correct multiple of bytes (32,64,128) */
816
+ }
817
+
818
+ /* ZSTD_row_fillHashCache():
819
+ * Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries,
820
+ * but not beyond iLimit.
821
+ */
822
+ FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base,
823
+ U32 const rowLog, U32 const mls,
824
+ U32 idx, const BYTE* const iLimit)
575
825
  {
576
- switch(ms->cParams.minMatch)
577
- {
578
- default : /* includes case 3 */
579
- case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
580
- case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
581
- case 7 :
582
- case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
826
+ U32 const* const hashTable = ms->hashTable;
827
+ BYTE const* const tagTable = ms->tagTable;
828
+ U32 const hashLog = ms->rowHashLog;
829
+ U32 const maxElemsToPrefetch = (base + idx) > iLimit ? 0 : (U32)(iLimit - (base + idx) + 1);
830
+ U32 const lim = idx + MIN(ZSTD_ROW_HASH_CACHE_SIZE, maxElemsToPrefetch);
831
+
832
+ for (; idx < lim; ++idx) {
833
+ U32 const hash = (U32)ZSTD_hashPtrSalted(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt);
834
+ U32 const row = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
835
+ ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
836
+ ms->hashCache[idx & ZSTD_ROW_HASH_CACHE_MASK] = hash;
583
837
  }
838
+
839
+ DEBUGLOG(6, "ZSTD_row_fillHashCache(): [%u %u %u %u %u %u %u %u]", ms->hashCache[0], ms->hashCache[1],
840
+ ms->hashCache[2], ms->hashCache[3], ms->hashCache[4],
841
+ ms->hashCache[5], ms->hashCache[6], ms->hashCache[7]);
584
842
  }
585
843
 
844
+ /* ZSTD_row_nextCachedHash():
845
+ * Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at
846
+ * base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable.
847
+ */
848
+ FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable,
849
+ BYTE const* tagTable, BYTE const* base,
850
+ U32 idx, U32 const hashLog,
851
+ U32 const rowLog, U32 const mls,
852
+ U64 const hashSalt)
853
+ {
854
+ U32 const newHash = (U32)ZSTD_hashPtrSalted(base+idx+ZSTD_ROW_HASH_CACHE_SIZE, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, hashSalt);
855
+ U32 const row = (newHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
856
+ ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
857
+ { U32 const hash = cache[idx & ZSTD_ROW_HASH_CACHE_MASK];
858
+ cache[idx & ZSTD_ROW_HASH_CACHE_MASK] = newHash;
859
+ return hash;
860
+ }
861
+ }
586
862
 
587
- static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS (
588
- ZSTD_matchState_t* ms,
589
- const BYTE* ip, const BYTE* const iLimit,
590
- size_t* offsetPtr)
863
+ /* ZSTD_row_update_internalImpl():
864
+ * Updates the hash table with positions starting from updateStartIdx until updateEndIdx.
865
+ */
866
+ FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,
867
+ U32 updateStartIdx, U32 const updateEndIdx,
868
+ U32 const mls, U32 const rowLog,
869
+ U32 const rowMask, U32 const useCache)
591
870
  {
592
- switch(ms->cParams.minMatch)
593
- {
594
- default : /* includes case 3 */
595
- case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
596
- case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
597
- case 7 :
598
- case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
871
+ U32* const hashTable = ms->hashTable;
872
+ BYTE* const tagTable = ms->tagTable;
873
+ U32 const hashLog = ms->rowHashLog;
874
+ const BYTE* const base = ms->window.base;
875
+
876
+ DEBUGLOG(6, "ZSTD_row_update_internalImpl(): updateStartIdx=%u, updateEndIdx=%u", updateStartIdx, updateEndIdx);
877
+ for (; updateStartIdx < updateEndIdx; ++updateStartIdx) {
878
+ U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, updateStartIdx, hashLog, rowLog, mls, ms->hashSalt)
879
+ : (U32)ZSTD_hashPtrSalted(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt);
880
+ U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
881
+ U32* const row = hashTable + relRow;
882
+ BYTE* tagRow = tagTable + relRow;
883
+ U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);
884
+
885
+ assert(hash == ZSTD_hashPtrSalted(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt));
886
+ tagRow[pos] = hash & ZSTD_ROW_HASH_TAG_MASK;
887
+ row[pos] = updateStartIdx;
888
+ }
889
+ }
890
+
891
+ /* ZSTD_row_update_internal():
892
+ * Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate.
893
+ * Skips sections of long matches as is necessary.
894
+ */
895
+ FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip,
896
+ U32 const mls, U32 const rowLog,
897
+ U32 const rowMask, U32 const useCache)
898
+ {
899
+ U32 idx = ms->nextToUpdate;
900
+ const BYTE* const base = ms->window.base;
901
+ const U32 target = (U32)(ip - base);
902
+ const U32 kSkipThreshold = 384;
903
+ const U32 kMaxMatchStartPositionsToUpdate = 96;
904
+ const U32 kMaxMatchEndPositionsToUpdate = 32;
905
+
906
+ if (useCache) {
907
+ /* Only skip positions when using hash cache, i.e.
908
+ * if we are loading a dict, don't skip anything.
909
+ * If we decide to skip, then we only update a set number
910
+ * of positions at the beginning and end of the match.
911
+ */
912
+ if (UNLIKELY(target - idx > kSkipThreshold)) {
913
+ U32 const bound = idx + kMaxMatchStartPositionsToUpdate;
914
+ ZSTD_row_update_internalImpl(ms, idx, bound, mls, rowLog, rowMask, useCache);
915
+ idx = target - kMaxMatchEndPositionsToUpdate;
916
+ ZSTD_row_fillHashCache(ms, base, rowLog, mls, idx, ip+1);
917
+ }
918
+ }
919
+ assert(target >= idx);
920
+ ZSTD_row_update_internalImpl(ms, idx, target, mls, rowLog, rowMask, useCache);
921
+ ms->nextToUpdate = target;
922
+ }
923
+
924
+ /* ZSTD_row_update():
925
+ * External wrapper for ZSTD_row_update_internal(). Used for filling the hashtable during dictionary
926
+ * processing.
927
+ */
928
+ void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) {
929
+ const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
930
+ const U32 rowMask = (1u << rowLog) - 1;
931
+ const U32 mls = MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */);
932
+
933
+ DEBUGLOG(5, "ZSTD_row_update(), rowLog=%u", rowLog);
934
+ ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0 /* don't use cache */);
935
+ }
936
+
937
+ /* Returns the mask width of bits group of which will be set to 1. Given not all
938
+ * architectures have easy movemask instruction, this helps to iterate over
939
+ * groups of bits easier and faster.
940
+ */
941
+ FORCE_INLINE_TEMPLATE U32
942
+ ZSTD_row_matchMaskGroupWidth(const U32 rowEntries)
943
+ {
944
+ assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);
945
+ assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES);
946
+ (void)rowEntries;
947
+ #if defined(ZSTD_ARCH_ARM_NEON)
948
+ /* NEON path only works for little endian */
949
+ if (!MEM_isLittleEndian()) {
950
+ return 1;
951
+ }
952
+ if (rowEntries == 16) {
953
+ return 4;
954
+ }
955
+ if (rowEntries == 32) {
956
+ return 2;
957
+ }
958
+ if (rowEntries == 64) {
959
+ return 1;
960
+ }
961
+ #endif
962
+ return 1;
963
+ }
964
+
965
+ #if defined(ZSTD_ARCH_X86_SSE2)
966
+ FORCE_INLINE_TEMPLATE ZSTD_VecMask
967
+ ZSTD_row_getSSEMask(int nbChunks, const BYTE* const src, const BYTE tag, const U32 head)
968
+ {
969
+ const __m128i comparisonMask = _mm_set1_epi8((char)tag);
970
+ int matches[4] = {0};
971
+ int i;
972
+ assert(nbChunks == 1 || nbChunks == 2 || nbChunks == 4);
973
+ for (i=0; i<nbChunks; i++) {
974
+ const __m128i chunk = _mm_loadu_si128((const __m128i*)(const void*)(src + 16*i));
975
+ const __m128i equalMask = _mm_cmpeq_epi8(chunk, comparisonMask);
976
+ matches[i] = _mm_movemask_epi8(equalMask);
977
+ }
978
+ if (nbChunks == 1) return ZSTD_rotateRight_U16((U16)matches[0], head);
979
+ if (nbChunks == 2) return ZSTD_rotateRight_U32((U32)matches[1] << 16 | (U32)matches[0], head);
980
+ assert(nbChunks == 4);
981
+ return ZSTD_rotateRight_U64((U64)matches[3] << 48 | (U64)matches[2] << 32 | (U64)matches[1] << 16 | (U64)matches[0], head);
982
+ }
983
+ #endif
984
+
985
+ #if defined(ZSTD_ARCH_ARM_NEON)
986
+ FORCE_INLINE_TEMPLATE ZSTD_VecMask
987
+ ZSTD_row_getNEONMask(const U32 rowEntries, const BYTE* const src, const BYTE tag, const U32 headGrouped)
988
+ {
989
+ assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);
990
+ if (rowEntries == 16) {
991
+ /* vshrn_n_u16 shifts by 4 every u16 and narrows to 8 lower bits.
992
+ * After that groups of 4 bits represent the equalMask. We lower
993
+ * all bits except the highest in these groups by doing AND with
994
+ * 0x88 = 0b10001000.
995
+ */
996
+ const uint8x16_t chunk = vld1q_u8(src);
997
+ const uint16x8_t equalMask = vreinterpretq_u16_u8(vceqq_u8(chunk, vdupq_n_u8(tag)));
998
+ const uint8x8_t res = vshrn_n_u16(equalMask, 4);
999
+ const U64 matches = vget_lane_u64(vreinterpret_u64_u8(res), 0);
1000
+ return ZSTD_rotateRight_U64(matches, headGrouped) & 0x8888888888888888ull;
1001
+ } else if (rowEntries == 32) {
1002
+ /* Same idea as with rowEntries == 16 but doing AND with
1003
+ * 0x55 = 0b01010101.
1004
+ */
1005
+ const uint16x8x2_t chunk = vld2q_u16((const uint16_t*)(const void*)src);
1006
+ const uint8x16_t chunk0 = vreinterpretq_u8_u16(chunk.val[0]);
1007
+ const uint8x16_t chunk1 = vreinterpretq_u8_u16(chunk.val[1]);
1008
+ const uint8x16_t dup = vdupq_n_u8(tag);
1009
+ const uint8x8_t t0 = vshrn_n_u16(vreinterpretq_u16_u8(vceqq_u8(chunk0, dup)), 6);
1010
+ const uint8x8_t t1 = vshrn_n_u16(vreinterpretq_u16_u8(vceqq_u8(chunk1, dup)), 6);
1011
+ const uint8x8_t res = vsli_n_u8(t0, t1, 4);
1012
+ const U64 matches = vget_lane_u64(vreinterpret_u64_u8(res), 0) ;
1013
+ return ZSTD_rotateRight_U64(matches, headGrouped) & 0x5555555555555555ull;
1014
+ } else { /* rowEntries == 64 */
1015
+ const uint8x16x4_t chunk = vld4q_u8(src);
1016
+ const uint8x16_t dup = vdupq_n_u8(tag);
1017
+ const uint8x16_t cmp0 = vceqq_u8(chunk.val[0], dup);
1018
+ const uint8x16_t cmp1 = vceqq_u8(chunk.val[1], dup);
1019
+ const uint8x16_t cmp2 = vceqq_u8(chunk.val[2], dup);
1020
+ const uint8x16_t cmp3 = vceqq_u8(chunk.val[3], dup);
1021
+
1022
+ const uint8x16_t t0 = vsriq_n_u8(cmp1, cmp0, 1);
1023
+ const uint8x16_t t1 = vsriq_n_u8(cmp3, cmp2, 1);
1024
+ const uint8x16_t t2 = vsriq_n_u8(t1, t0, 2);
1025
+ const uint8x16_t t3 = vsriq_n_u8(t2, t2, 4);
1026
+ const uint8x8_t t4 = vshrn_n_u16(vreinterpretq_u16_u8(t3), 4);
1027
+ const U64 matches = vget_lane_u64(vreinterpret_u64_u8(t4), 0);
1028
+ return ZSTD_rotateRight_U64(matches, headGrouped);
599
1029
  }
600
1030
  }
1031
+ #endif
1032
+
1033
+ /* Returns a ZSTD_VecMask (U64) that has the nth group (determined by
1034
+ * ZSTD_row_matchMaskGroupWidth) of bits set to 1 if the newly-computed "tag"
1035
+ * matches the hash at the nth position in a row of the tagTable.
1036
+ * Each row is a circular buffer beginning at the value of "headGrouped". So we
1037
+ * must rotate the "matches" bitfield to match up with the actual layout of the
1038
+ * entries within the hashTable */
1039
+ FORCE_INLINE_TEMPLATE ZSTD_VecMask
1040
+ ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 headGrouped, const U32 rowEntries)
1041
+ {
1042
+ const BYTE* const src = tagRow;
1043
+ assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);
1044
+ assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES);
1045
+ assert(ZSTD_row_matchMaskGroupWidth(rowEntries) * rowEntries <= sizeof(ZSTD_VecMask) * 8);
1046
+
1047
+ #if defined(ZSTD_ARCH_X86_SSE2)
601
1048
 
1049
+ return ZSTD_row_getSSEMask(rowEntries / 16, src, tag, headGrouped);
602
1050
 
603
- FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
1051
+ #else /* SW or NEON-LE */
1052
+
1053
+ # if defined(ZSTD_ARCH_ARM_NEON)
1054
+ /* This NEON path only works for little endian - otherwise use SWAR below */
1055
+ if (MEM_isLittleEndian()) {
1056
+ return ZSTD_row_getNEONMask(rowEntries, src, tag, headGrouped);
1057
+ }
1058
+ # endif /* ZSTD_ARCH_ARM_NEON */
1059
+ /* SWAR */
1060
+ { const int chunkSize = sizeof(size_t);
1061
+ const size_t shiftAmount = ((chunkSize * 8) - chunkSize);
1062
+ const size_t xFF = ~((size_t)0);
1063
+ const size_t x01 = xFF / 0xFF;
1064
+ const size_t x80 = x01 << 7;
1065
+ const size_t splatChar = tag * x01;
1066
+ ZSTD_VecMask matches = 0;
1067
+ int i = rowEntries - chunkSize;
1068
+ assert((sizeof(size_t) == 4) || (sizeof(size_t) == 8));
1069
+ if (MEM_isLittleEndian()) { /* runtime check so have two loops */
1070
+ const size_t extractMagic = (xFF / 0x7F) >> chunkSize;
1071
+ do {
1072
+ size_t chunk = MEM_readST(&src[i]);
1073
+ chunk ^= splatChar;
1074
+ chunk = (((chunk | x80) - x01) | chunk) & x80;
1075
+ matches <<= chunkSize;
1076
+ matches |= (chunk * extractMagic) >> shiftAmount;
1077
+ i -= chunkSize;
1078
+ } while (i >= 0);
1079
+ } else { /* big endian: reverse bits during extraction */
1080
+ const size_t msb = xFF ^ (xFF >> 1);
1081
+ const size_t extractMagic = (msb / 0x1FF) | msb;
1082
+ do {
1083
+ size_t chunk = MEM_readST(&src[i]);
1084
+ chunk ^= splatChar;
1085
+ chunk = (((chunk | x80) - x01) | chunk) & x80;
1086
+ matches <<= chunkSize;
1087
+ matches |= ((chunk >> 7) * extractMagic) >> shiftAmount;
1088
+ i -= chunkSize;
1089
+ } while (i >= 0);
1090
+ }
1091
+ matches = ~matches;
1092
+ if (rowEntries == 16) {
1093
+ return ZSTD_rotateRight_U16((U16)matches, headGrouped);
1094
+ } else if (rowEntries == 32) {
1095
+ return ZSTD_rotateRight_U32((U32)matches, headGrouped);
1096
+ } else {
1097
+ return ZSTD_rotateRight_U64((U64)matches, headGrouped);
1098
+ }
1099
+ }
1100
+ #endif
1101
+ }
1102
+
1103
+ /* The high-level approach of the SIMD row based match finder is as follows:
1104
+ * - Figure out where to insert the new entry:
1105
+ * - Generate a hash from a byte along with an additional 1-byte "short hash". The additional byte is our "tag"
1106
+ * - The hashTable is effectively split into groups or "rows" of 16 or 32 entries of U32, and the hash determines
1107
+ * which row to insert into.
1108
+ * - Determine the correct position within the row to insert the entry into. Each row of 16 or 32 can
1109
+ * be considered as a circular buffer with a "head" index that resides in the tagTable.
1110
+ * - Also insert the "tag" into the equivalent row and position in the tagTable.
1111
+ * - Note: The tagTable has 17 or 33 1-byte entries per row, due to 16 or 32 tags, and 1 "head" entry.
1112
+ * The 17 or 33 entry rows are spaced out to occur every 32 or 64 bytes, respectively,
1113
+ * for alignment/performance reasons, leaving some bytes unused.
1114
+ * - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte "short hash" and
1115
+ * generate a bitfield that we can cycle through to check the collisions in the hash table.
1116
+ * - Pick the longest match.
1117
+ */
1118
+ FORCE_INLINE_TEMPLATE
1119
+ size_t ZSTD_RowFindBestMatch(
604
1120
  ZSTD_matchState_t* ms,
605
- const BYTE* ip, const BYTE* const iLimit,
606
- size_t* offsetPtr)
1121
+ const BYTE* const ip, const BYTE* const iLimit,
1122
+ size_t* offsetPtr,
1123
+ const U32 mls, const ZSTD_dictMode_e dictMode,
1124
+ const U32 rowLog)
607
1125
  {
608
- switch(ms->cParams.minMatch)
609
- {
610
- default : /* includes case 3 */
611
- case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
612
- case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
613
- case 7 :
614
- case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
1126
+ U32* const hashTable = ms->hashTable;
1127
+ BYTE* const tagTable = ms->tagTable;
1128
+ U32* const hashCache = ms->hashCache;
1129
+ const U32 hashLog = ms->rowHashLog;
1130
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
1131
+ const BYTE* const base = ms->window.base;
1132
+ const BYTE* const dictBase = ms->window.dictBase;
1133
+ const U32 dictLimit = ms->window.dictLimit;
1134
+ const BYTE* const prefixStart = base + dictLimit;
1135
+ const BYTE* const dictEnd = dictBase + dictLimit;
1136
+ const U32 curr = (U32)(ip-base);
1137
+ const U32 maxDistance = 1U << cParams->windowLog;
1138
+ const U32 lowestValid = ms->window.lowLimit;
1139
+ const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
1140
+ const U32 isDictionary = (ms->loadedDictEnd != 0);
1141
+ const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
1142
+ const U32 rowEntries = (1U << rowLog);
1143
+ const U32 rowMask = rowEntries - 1;
1144
+ const U32 cappedSearchLog = MIN(cParams->searchLog, rowLog); /* nb of searches is capped at nb entries per row */
1145
+ const U32 groupWidth = ZSTD_row_matchMaskGroupWidth(rowEntries);
1146
+ const U64 hashSalt = ms->hashSalt;
1147
+ U32 nbAttempts = 1U << cappedSearchLog;
1148
+ size_t ml=4-1;
1149
+ U32 hash;
1150
+
1151
+ /* DMS/DDS variables that may be referenced laster */
1152
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
1153
+
1154
+ /* Initialize the following variables to satisfy static analyzer */
1155
+ size_t ddsIdx = 0;
1156
+ U32 ddsExtraAttempts = 0; /* cctx hash tables are limited in searches, but allow extra searches into DDS */
1157
+ U32 dmsTag = 0;
1158
+ U32* dmsRow = NULL;
1159
+ BYTE* dmsTagRow = NULL;
1160
+
1161
+ if (dictMode == ZSTD_dedicatedDictSearch) {
1162
+ const U32 ddsHashLog = dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
1163
+ { /* Prefetch DDS hashtable entry */
1164
+ ddsIdx = ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG;
1165
+ PREFETCH_L1(&dms->hashTable[ddsIdx]);
1166
+ }
1167
+ ddsExtraAttempts = cParams->searchLog > rowLog ? 1U << (cParams->searchLog - rowLog) : 0;
615
1168
  }
1169
+
1170
+ if (dictMode == ZSTD_dictMatchState) {
1171
+ /* Prefetch DMS rows */
1172
+ U32* const dmsHashTable = dms->hashTable;
1173
+ BYTE* const dmsTagTable = dms->tagTable;
1174
+ U32 const dmsHash = (U32)ZSTD_hashPtr(ip, dms->rowHashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
1175
+ U32 const dmsRelRow = (dmsHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
1176
+ dmsTag = dmsHash & ZSTD_ROW_HASH_TAG_MASK;
1177
+ dmsTagRow = (BYTE*)(dmsTagTable + dmsRelRow);
1178
+ dmsRow = dmsHashTable + dmsRelRow;
1179
+ ZSTD_row_prefetch(dmsHashTable, dmsTagTable, dmsRelRow, rowLog);
1180
+ }
1181
+
1182
+ /* Update the hashTable and tagTable up to (but not including) ip */
1183
+ if (!ms->lazySkipping) {
1184
+ ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1 /* useCache */);
1185
+ hash = ZSTD_row_nextCachedHash(hashCache, hashTable, tagTable, base, curr, hashLog, rowLog, mls, hashSalt);
1186
+ } else {
1187
+ /* Stop inserting every position when in the lazy skipping mode.
1188
+ * The hash cache is also not kept up to date in this mode.
1189
+ */
1190
+ hash = (U32)ZSTD_hashPtrSalted(ip, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, hashSalt);
1191
+ ms->nextToUpdate = curr;
1192
+ }
1193
+ ms->hashSaltEntropy += hash; /* collect salt entropy */
1194
+
1195
+ { /* Get the hash for ip, compute the appropriate row */
1196
+ U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
1197
+ U32 const tag = hash & ZSTD_ROW_HASH_TAG_MASK;
1198
+ U32* const row = hashTable + relRow;
1199
+ BYTE* tagRow = (BYTE*)(tagTable + relRow);
1200
+ U32 const headGrouped = (*tagRow & rowMask) * groupWidth;
1201
+ U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES];
1202
+ size_t numMatches = 0;
1203
+ size_t currMatch = 0;
1204
+ ZSTD_VecMask matches = ZSTD_row_getMatchMask(tagRow, (BYTE)tag, headGrouped, rowEntries);
1205
+
1206
+ /* Cycle through the matches and prefetch */
1207
+ for (; (matches > 0) && (nbAttempts > 0); matches &= (matches - 1)) {
1208
+ U32 const matchPos = ((headGrouped + ZSTD_VecMask_next(matches)) / groupWidth) & rowMask;
1209
+ U32 const matchIndex = row[matchPos];
1210
+ if(matchPos == 0) continue;
1211
+ assert(numMatches < rowEntries);
1212
+ if (matchIndex < lowLimit)
1213
+ break;
1214
+ if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
1215
+ PREFETCH_L1(base + matchIndex);
1216
+ } else {
1217
+ PREFETCH_L1(dictBase + matchIndex);
1218
+ }
1219
+ matchBuffer[numMatches++] = matchIndex;
1220
+ --nbAttempts;
1221
+ }
1222
+
1223
+ /* Speed opt: insert current byte into hashtable too. This allows us to avoid one iteration of the loop
1224
+ in ZSTD_row_update_internal() at the next search. */
1225
+ {
1226
+ U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);
1227
+ tagRow[pos] = (BYTE)tag;
1228
+ row[pos] = ms->nextToUpdate++;
1229
+ }
1230
+
1231
+ /* Return the longest match */
1232
+ for (; currMatch < numMatches; ++currMatch) {
1233
+ U32 const matchIndex = matchBuffer[currMatch];
1234
+ size_t currentMl=0;
1235
+ assert(matchIndex < curr);
1236
+ assert(matchIndex >= lowLimit);
1237
+
1238
+ if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
1239
+ const BYTE* const match = base + matchIndex;
1240
+ assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
1241
+ /* read 4B starting from (match + ml + 1 - sizeof(U32)) */
1242
+ if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) /* potentially better */
1243
+ currentMl = ZSTD_count(ip, match, iLimit);
1244
+ } else {
1245
+ const BYTE* const match = dictBase + matchIndex;
1246
+ assert(match+4 <= dictEnd);
1247
+ if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
1248
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
1249
+ }
1250
+
1251
+ /* Save best solution */
1252
+ if (currentMl > ml) {
1253
+ ml = currentMl;
1254
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex);
1255
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
1256
+ }
1257
+ }
1258
+ }
1259
+
1260
+ assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
1261
+ if (dictMode == ZSTD_dedicatedDictSearch) {
1262
+ ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts + ddsExtraAttempts, dms,
1263
+ ip, iLimit, prefixStart, curr, dictLimit, ddsIdx);
1264
+ } else if (dictMode == ZSTD_dictMatchState) {
1265
+ /* TODO: Measure and potentially add prefetching to DMS */
1266
+ const U32 dmsLowestIndex = dms->window.dictLimit;
1267
+ const BYTE* const dmsBase = dms->window.base;
1268
+ const BYTE* const dmsEnd = dms->window.nextSrc;
1269
+ const U32 dmsSize = (U32)(dmsEnd - dmsBase);
1270
+ const U32 dmsIndexDelta = dictLimit - dmsSize;
1271
+
1272
+ { U32 const headGrouped = (*dmsTagRow & rowMask) * groupWidth;
1273
+ U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES];
1274
+ size_t numMatches = 0;
1275
+ size_t currMatch = 0;
1276
+ ZSTD_VecMask matches = ZSTD_row_getMatchMask(dmsTagRow, (BYTE)dmsTag, headGrouped, rowEntries);
1277
+
1278
+ for (; (matches > 0) && (nbAttempts > 0); matches &= (matches - 1)) {
1279
+ U32 const matchPos = ((headGrouped + ZSTD_VecMask_next(matches)) / groupWidth) & rowMask;
1280
+ U32 const matchIndex = dmsRow[matchPos];
1281
+ if(matchPos == 0) continue;
1282
+ if (matchIndex < dmsLowestIndex)
1283
+ break;
1284
+ PREFETCH_L1(dmsBase + matchIndex);
1285
+ matchBuffer[numMatches++] = matchIndex;
1286
+ --nbAttempts;
1287
+ }
1288
+
1289
+ /* Return the longest match */
1290
+ for (; currMatch < numMatches; ++currMatch) {
1291
+ U32 const matchIndex = matchBuffer[currMatch];
1292
+ size_t currentMl=0;
1293
+ assert(matchIndex >= dmsLowestIndex);
1294
+ assert(matchIndex < curr);
1295
+
1296
+ { const BYTE* const match = dmsBase + matchIndex;
1297
+ assert(match+4 <= dmsEnd);
1298
+ if (MEM_read32(match) == MEM_read32(ip))
1299
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
1300
+ }
1301
+
1302
+ if (currentMl > ml) {
1303
+ ml = currentMl;
1304
+ assert(curr > matchIndex + dmsIndexDelta);
1305
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + dmsIndexDelta));
1306
+ if (ip+currentMl == iLimit) break;
1307
+ }
1308
+ }
1309
+ }
1310
+ }
1311
+ return ml;
616
1312
  }
617
1313
 
618
1314
 
1315
+ /**
1316
+ * Generate search functions templated on (dictMode, mls, rowLog).
1317
+ * These functions are outlined for code size & compilation time.
1318
+ * ZSTD_searchMax() dispatches to the correct implementation function.
1319
+ *
1320
+ * TODO: The start of the search function involves loading and calculating a
1321
+ * bunch of constants from the ZSTD_matchState_t. These computations could be
1322
+ * done in an initialization function, and saved somewhere in the match state.
1323
+ * Then we could pass a pointer to the saved state instead of the match state,
1324
+ * and avoid duplicate computations.
1325
+ *
1326
+ * TODO: Move the match re-winding into searchMax. This improves compression
1327
+ * ratio, and unlocks further simplifications with the next TODO.
1328
+ *
1329
+ * TODO: Try moving the repcode search into searchMax. After the re-winding
1330
+ * and repcode search are in searchMax, there is no more logic in the match
1331
+ * finder loop that requires knowledge about the dictMode. So we should be
1332
+ * able to avoid force inlining it, and we can join the extDict loop with
1333
+ * the single segment loop. It should go in searchMax instead of its own
1334
+ * function to avoid having multiple virtual function calls per search.
1335
+ */
1336
+
1337
+ #define ZSTD_BT_SEARCH_FN(dictMode, mls) ZSTD_BtFindBestMatch_##dictMode##_##mls
1338
+ #define ZSTD_HC_SEARCH_FN(dictMode, mls) ZSTD_HcFindBestMatch_##dictMode##_##mls
1339
+ #define ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog
1340
+
1341
+ #define ZSTD_SEARCH_FN_ATTRS FORCE_NOINLINE
1342
+
1343
+ #define GEN_ZSTD_BT_SEARCH_FN(dictMode, mls) \
1344
+ ZSTD_SEARCH_FN_ATTRS size_t ZSTD_BT_SEARCH_FN(dictMode, mls)( \
1345
+ ZSTD_matchState_t* ms, \
1346
+ const BYTE* ip, const BYTE* const iLimit, \
1347
+ size_t* offBasePtr) \
1348
+ { \
1349
+ assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
1350
+ return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, mls, ZSTD_##dictMode); \
1351
+ } \
1352
+
1353
+ #define GEN_ZSTD_HC_SEARCH_FN(dictMode, mls) \
1354
+ ZSTD_SEARCH_FN_ATTRS size_t ZSTD_HC_SEARCH_FN(dictMode, mls)( \
1355
+ ZSTD_matchState_t* ms, \
1356
+ const BYTE* ip, const BYTE* const iLimit, \
1357
+ size_t* offsetPtr) \
1358
+ { \
1359
+ assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
1360
+ return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode); \
1361
+ } \
1362
+
1363
+ #define GEN_ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) \
1364
+ ZSTD_SEARCH_FN_ATTRS size_t ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)( \
1365
+ ZSTD_matchState_t* ms, \
1366
+ const BYTE* ip, const BYTE* const iLimit, \
1367
+ size_t* offsetPtr) \
1368
+ { \
1369
+ assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
1370
+ assert(MAX(4, MIN(6, ms->cParams.searchLog)) == rowLog); \
1371
+ return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode, rowLog); \
1372
+ } \
1373
+
1374
+ #define ZSTD_FOR_EACH_ROWLOG(X, dictMode, mls) \
1375
+ X(dictMode, mls, 4) \
1376
+ X(dictMode, mls, 5) \
1377
+ X(dictMode, mls, 6)
1378
+
1379
+ #define ZSTD_FOR_EACH_MLS_ROWLOG(X, dictMode) \
1380
+ ZSTD_FOR_EACH_ROWLOG(X, dictMode, 4) \
1381
+ ZSTD_FOR_EACH_ROWLOG(X, dictMode, 5) \
1382
+ ZSTD_FOR_EACH_ROWLOG(X, dictMode, 6)
1383
+
1384
+ #define ZSTD_FOR_EACH_MLS(X, dictMode) \
1385
+ X(dictMode, 4) \
1386
+ X(dictMode, 5) \
1387
+ X(dictMode, 6)
1388
+
1389
+ #define ZSTD_FOR_EACH_DICT_MODE(X, ...) \
1390
+ X(__VA_ARGS__, noDict) \
1391
+ X(__VA_ARGS__, extDict) \
1392
+ X(__VA_ARGS__, dictMatchState) \
1393
+ X(__VA_ARGS__, dedicatedDictSearch)
1394
+
1395
+ /* Generate row search fns for each combination of (dictMode, mls, rowLog) */
1396
+ ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS_ROWLOG, GEN_ZSTD_ROW_SEARCH_FN)
1397
+ /* Generate binary Tree search fns for each combination of (dictMode, mls) */
1398
+ ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_BT_SEARCH_FN)
1399
+ /* Generate hash chain search fns for each combination of (dictMode, mls) */
1400
+ ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_HC_SEARCH_FN)
1401
+
1402
+ typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e;
1403
+
1404
+ #define GEN_ZSTD_CALL_BT_SEARCH_FN(dictMode, mls) \
1405
+ case mls: \
1406
+ return ZSTD_BT_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr);
1407
+ #define GEN_ZSTD_CALL_HC_SEARCH_FN(dictMode, mls) \
1408
+ case mls: \
1409
+ return ZSTD_HC_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr);
1410
+ #define GEN_ZSTD_CALL_ROW_SEARCH_FN(dictMode, mls, rowLog) \
1411
+ case rowLog: \
1412
+ return ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)(ms, ip, iend, offsetPtr);
1413
+
1414
+ #define ZSTD_SWITCH_MLS(X, dictMode) \
1415
+ switch (mls) { \
1416
+ ZSTD_FOR_EACH_MLS(X, dictMode) \
1417
+ }
1418
+
1419
+ #define ZSTD_SWITCH_ROWLOG(dictMode, mls) \
1420
+ case mls: \
1421
+ switch (rowLog) { \
1422
+ ZSTD_FOR_EACH_ROWLOG(GEN_ZSTD_CALL_ROW_SEARCH_FN, dictMode, mls) \
1423
+ } \
1424
+ ZSTD_UNREACHABLE; \
1425
+ break;
1426
+
1427
+ #define ZSTD_SWITCH_SEARCH_METHOD(dictMode) \
1428
+ switch (searchMethod) { \
1429
+ case search_hashChain: \
1430
+ ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_HC_SEARCH_FN, dictMode) \
1431
+ break; \
1432
+ case search_binaryTree: \
1433
+ ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_BT_SEARCH_FN, dictMode) \
1434
+ break; \
1435
+ case search_rowHash: \
1436
+ ZSTD_SWITCH_MLS(ZSTD_SWITCH_ROWLOG, dictMode) \
1437
+ break; \
1438
+ } \
1439
+ ZSTD_UNREACHABLE;
1440
+
1441
+ /**
1442
+ * Searches for the longest match at @p ip.
1443
+ * Dispatches to the correct implementation function based on the
1444
+ * (searchMethod, dictMode, mls, rowLog). We use switch statements
1445
+ * here instead of using an indirect function call through a function
1446
+ * pointer because after Spectre and Meltdown mitigations, indirect
1447
+ * function calls can be very costly, especially in the kernel.
1448
+ *
1449
+ * NOTE: dictMode and searchMethod should be templated, so those switch
1450
+ * statements should be optimized out. Only the mls & rowLog switches
1451
+ * should be left.
1452
+ *
1453
+ * @param ms The match state.
1454
+ * @param ip The position to search at.
1455
+ * @param iend The end of the input data.
1456
+ * @param[out] offsetPtr Stores the match offset into this pointer.
1457
+ * @param mls The minimum search length, in the range [4, 6].
1458
+ * @param rowLog The row log (if applicable), in the range [4, 6].
1459
+ * @param searchMethod The search method to use (templated).
1460
+ * @param dictMode The dictMode (templated).
1461
+ *
1462
+ * @returns The length of the longest match found, or < mls if no match is found.
1463
+ * If a match is found its offset is stored in @p offsetPtr.
1464
+ */
1465
+ FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax(
1466
+ ZSTD_matchState_t* ms,
1467
+ const BYTE* ip,
1468
+ const BYTE* iend,
1469
+ size_t* offsetPtr,
1470
+ U32 const mls,
1471
+ U32 const rowLog,
1472
+ searchMethod_e const searchMethod,
1473
+ ZSTD_dictMode_e const dictMode)
1474
+ {
1475
+ if (dictMode == ZSTD_noDict) {
1476
+ ZSTD_SWITCH_SEARCH_METHOD(noDict)
1477
+ } else if (dictMode == ZSTD_extDict) {
1478
+ ZSTD_SWITCH_SEARCH_METHOD(extDict)
1479
+ } else if (dictMode == ZSTD_dictMatchState) {
1480
+ ZSTD_SWITCH_SEARCH_METHOD(dictMatchState)
1481
+ } else if (dictMode == ZSTD_dedicatedDictSearch) {
1482
+ ZSTD_SWITCH_SEARCH_METHOD(dedicatedDictSearch)
1483
+ }
1484
+ ZSTD_UNREACHABLE;
1485
+ return 0;
1486
+ }
1487
+
619
1488
  /* *******************************
620
1489
  * Common parser - lazy strategy
621
1490
  *********************************/
622
- typedef enum { search_hashChain, search_binaryTree } searchMethod_e;
623
1491
 
624
1492
  FORCE_INLINE_TEMPLATE size_t
625
1493
  ZSTD_compressBlock_lazy_generic(
@@ -633,59 +1501,69 @@ ZSTD_compressBlock_lazy_generic(
633
1501
  const BYTE* ip = istart;
634
1502
  const BYTE* anchor = istart;
635
1503
  const BYTE* const iend = istart + srcSize;
636
- const BYTE* const ilimit = iend - 8;
1504
+ const BYTE* const ilimit = (searchMethod == search_rowHash) ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8;
637
1505
  const BYTE* const base = ms->window.base;
638
1506
  const U32 prefixLowestIndex = ms->window.dictLimit;
639
1507
  const BYTE* const prefixLowest = base + prefixLowestIndex;
1508
+ const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6);
1509
+ const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
640
1510
 
641
- typedef size_t (*searchMax_f)(
642
- ZSTD_matchState_t* ms,
643
- const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
644
- searchMax_f const searchMax = dictMode == ZSTD_dictMatchState ?
645
- (searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_dictMatchState_selectMLS
646
- : ZSTD_HcFindBestMatch_dictMatchState_selectMLS) :
647
- (searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_selectMLS
648
- : ZSTD_HcFindBestMatch_selectMLS);
649
- U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
1511
+ U32 offset_1 = rep[0], offset_2 = rep[1];
1512
+ U32 offsetSaved1 = 0, offsetSaved2 = 0;
650
1513
 
1514
+ const int isDMS = dictMode == ZSTD_dictMatchState;
1515
+ const int isDDS = dictMode == ZSTD_dedicatedDictSearch;
1516
+ const int isDxS = isDMS || isDDS;
651
1517
  const ZSTD_matchState_t* const dms = ms->dictMatchState;
652
- const U32 dictLowestIndex = dictMode == ZSTD_dictMatchState ?
653
- dms->window.dictLimit : 0;
654
- const BYTE* const dictBase = dictMode == ZSTD_dictMatchState ?
655
- dms->window.base : NULL;
656
- const BYTE* const dictLowest = dictMode == ZSTD_dictMatchState ?
657
- dictBase + dictLowestIndex : NULL;
658
- const BYTE* const dictEnd = dictMode == ZSTD_dictMatchState ?
659
- dms->window.nextSrc : NULL;
660
- const U32 dictIndexDelta = dictMode == ZSTD_dictMatchState ?
1518
+ const U32 dictLowestIndex = isDxS ? dms->window.dictLimit : 0;
1519
+ const BYTE* const dictBase = isDxS ? dms->window.base : NULL;
1520
+ const BYTE* const dictLowest = isDxS ? dictBase + dictLowestIndex : NULL;
1521
+ const BYTE* const dictEnd = isDxS ? dms->window.nextSrc : NULL;
1522
+ const U32 dictIndexDelta = isDxS ?
661
1523
  prefixLowestIndex - (U32)(dictEnd - dictBase) :
662
1524
  0;
663
- const U32 dictAndPrefixLength = (U32)(ip - prefixLowest + dictEnd - dictLowest);
1525
+ const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest));
664
1526
 
665
- /* init */
1527
+ DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u) (searchFunc=%u)", (U32)dictMode, (U32)searchMethod);
666
1528
  ip += (dictAndPrefixLength == 0);
667
1529
  if (dictMode == ZSTD_noDict) {
668
- U32 const maxRep = (U32)(ip - prefixLowest);
669
- if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
670
- if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
1530
+ U32 const curr = (U32)(ip - base);
1531
+ U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog);
1532
+ U32 const maxRep = curr - windowLow;
1533
+ if (offset_2 > maxRep) offsetSaved2 = offset_2, offset_2 = 0;
1534
+ if (offset_1 > maxRep) offsetSaved1 = offset_1, offset_1 = 0;
671
1535
  }
672
- if (dictMode == ZSTD_dictMatchState) {
1536
+ if (isDxS) {
673
1537
  /* dictMatchState repCode checks don't currently handle repCode == 0
674
1538
  * disabling. */
675
1539
  assert(offset_1 <= dictAndPrefixLength);
676
1540
  assert(offset_2 <= dictAndPrefixLength);
677
1541
  }
678
1542
 
1543
+ /* Reset the lazy skipping state */
1544
+ ms->lazySkipping = 0;
1545
+
1546
+ if (searchMethod == search_rowHash) {
1547
+ ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit);
1548
+ }
1549
+
679
1550
  /* Match Loop */
1551
+ #if defined(__GNUC__) && defined(__x86_64__)
1552
+ /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
1553
+ * code alignment is perturbed. To fix the instability align the loop on 32-bytes.
1554
+ */
1555
+ __asm__(".p2align 5");
1556
+ #endif
680
1557
  while (ip < ilimit) {
681
1558
  size_t matchLength=0;
682
- size_t offset=0;
1559
+ size_t offBase = REPCODE1_TO_OFFBASE;
683
1560
  const BYTE* start=ip+1;
1561
+ DEBUGLOG(7, "search baseline (depth 0)");
684
1562
 
685
1563
  /* check repCode */
686
- if (dictMode == ZSTD_dictMatchState) {
1564
+ if (isDxS) {
687
1565
  const U32 repIndex = (U32)(ip - base) + 1 - offset_1;
688
- const BYTE* repMatch = (dictMode == ZSTD_dictMatchState
1566
+ const BYTE* repMatch = ((dictMode == ZSTD_dictMatchState || dictMode == ZSTD_dedicatedDictSearch)
689
1567
  && repIndex < prefixLowestIndex) ?
690
1568
  dictBase + (repIndex - dictIndexDelta) :
691
1569
  base + repIndex;
@@ -703,30 +1581,40 @@ ZSTD_compressBlock_lazy_generic(
703
1581
  }
704
1582
 
705
1583
  /* first search (depth 0) */
706
- { size_t offsetFound = 999999999;
707
- size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
1584
+ { size_t offbaseFound = 999999999;
1585
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offbaseFound, mls, rowLog, searchMethod, dictMode);
708
1586
  if (ml2 > matchLength)
709
- matchLength = ml2, start = ip, offset=offsetFound;
1587
+ matchLength = ml2, start = ip, offBase = offbaseFound;
710
1588
  }
711
1589
 
712
1590
  if (matchLength < 4) {
713
- ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
1591
+ size_t const step = ((size_t)(ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */;
1592
+ ip += step;
1593
+ /* Enter the lazy skipping mode once we are skipping more than 8 bytes at a time.
1594
+ * In this mode we stop inserting every position into our tables, and only insert
1595
+ * positions that we search, which is one in step positions.
1596
+ * The exact cutoff is flexible, I've just chosen a number that is reasonably high,
1597
+ * so we minimize the compression ratio loss in "normal" scenarios. This mode gets
1598
+ * triggered once we've gone 2KB without finding any matches.
1599
+ */
1600
+ ms->lazySkipping = step > kLazySkippingStep;
714
1601
  continue;
715
1602
  }
716
1603
 
717
1604
  /* let's try to find a better solution */
718
1605
  if (depth>=1)
719
1606
  while (ip<ilimit) {
1607
+ DEBUGLOG(7, "search depth 1");
720
1608
  ip ++;
721
1609
  if ( (dictMode == ZSTD_noDict)
722
- && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
1610
+ && (offBase) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
723
1611
  size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
724
1612
  int const gain2 = (int)(mlRep * 3);
725
- int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
1613
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1);
726
1614
  if ((mlRep >= 4) && (gain2 > gain1))
727
- matchLength = mlRep, offset = 0, start = ip;
1615
+ matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip;
728
1616
  }
729
- if (dictMode == ZSTD_dictMatchState) {
1617
+ if (isDxS) {
730
1618
  const U32 repIndex = (U32)(ip - base) - offset_1;
731
1619
  const BYTE* repMatch = repIndex < prefixLowestIndex ?
732
1620
  dictBase + (repIndex - dictIndexDelta) :
@@ -736,32 +1624,33 @@ ZSTD_compressBlock_lazy_generic(
736
1624
  const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
737
1625
  size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
738
1626
  int const gain2 = (int)(mlRep * 3);
739
- int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
1627
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1);
740
1628
  if ((mlRep >= 4) && (gain2 > gain1))
741
- matchLength = mlRep, offset = 0, start = ip;
1629
+ matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip;
742
1630
  }
743
1631
  }
744
- { size_t offset2=999999999;
745
- size_t const ml2 = searchMax(ms, ip, iend, &offset2);
746
- int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
747
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
1632
+ { size_t ofbCandidate=999999999;
1633
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode);
1634
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */
1635
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 4);
748
1636
  if ((ml2 >= 4) && (gain2 > gain1)) {
749
- matchLength = ml2, offset = offset2, start = ip;
1637
+ matchLength = ml2, offBase = ofbCandidate, start = ip;
750
1638
  continue; /* search a better one */
751
1639
  } }
752
1640
 
753
1641
  /* let's find an even better one */
754
1642
  if ((depth==2) && (ip<ilimit)) {
1643
+ DEBUGLOG(7, "search depth 2");
755
1644
  ip ++;
756
1645
  if ( (dictMode == ZSTD_noDict)
757
- && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
1646
+ && (offBase) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
758
1647
  size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
759
1648
  int const gain2 = (int)(mlRep * 4);
760
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
1649
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1);
761
1650
  if ((mlRep >= 4) && (gain2 > gain1))
762
- matchLength = mlRep, offset = 0, start = ip;
1651
+ matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip;
763
1652
  }
764
- if (dictMode == ZSTD_dictMatchState) {
1653
+ if (isDxS) {
765
1654
  const U32 repIndex = (U32)(ip - base) - offset_1;
766
1655
  const BYTE* repMatch = repIndex < prefixLowestIndex ?
767
1656
  dictBase + (repIndex - dictIndexDelta) :
@@ -771,64 +1660,69 @@ ZSTD_compressBlock_lazy_generic(
771
1660
  const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
772
1661
  size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
773
1662
  int const gain2 = (int)(mlRep * 4);
774
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
1663
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1);
775
1664
  if ((mlRep >= 4) && (gain2 > gain1))
776
- matchLength = mlRep, offset = 0, start = ip;
1665
+ matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip;
777
1666
  }
778
1667
  }
779
- { size_t offset2=999999999;
780
- size_t const ml2 = searchMax(ms, ip, iend, &offset2);
781
- int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
782
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
1668
+ { size_t ofbCandidate=999999999;
1669
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode);
1670
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */
1671
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 7);
783
1672
  if ((ml2 >= 4) && (gain2 > gain1)) {
784
- matchLength = ml2, offset = offset2, start = ip;
1673
+ matchLength = ml2, offBase = ofbCandidate, start = ip;
785
1674
  continue;
786
1675
  } } }
787
1676
  break; /* nothing found : store previous solution */
788
1677
  }
789
1678
 
790
1679
  /* NOTE:
791
- * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
792
- * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
793
- * overflows the pointer, which is undefined behavior.
1680
+ * Pay attention that `start[-value]` can lead to strange undefined behavior
1681
+ * notably if `value` is unsigned, resulting in a large positive `-value`.
794
1682
  */
795
1683
  /* catch up */
796
- if (offset) {
1684
+ if (OFFBASE_IS_OFFSET(offBase)) {
797
1685
  if (dictMode == ZSTD_noDict) {
798
- while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest))
799
- && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) ) /* only search for offset within prefix */
1686
+ while ( ((start > anchor) & (start - OFFBASE_TO_OFFSET(offBase) > prefixLowest))
1687
+ && (start[-1] == (start-OFFBASE_TO_OFFSET(offBase))[-1]) ) /* only search for offset within prefix */
800
1688
  { start--; matchLength++; }
801
1689
  }
802
- if (dictMode == ZSTD_dictMatchState) {
803
- U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
1690
+ if (isDxS) {
1691
+ U32 const matchIndex = (U32)((size_t)(start-base) - OFFBASE_TO_OFFSET(offBase));
804
1692
  const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;
805
1693
  const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;
806
1694
  while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
807
1695
  }
808
- offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
1696
+ offset_2 = offset_1; offset_1 = (U32)OFFBASE_TO_OFFSET(offBase);
809
1697
  }
810
1698
  /* store sequence */
811
1699
  _storeSequence:
812
- { size_t const litLength = start - anchor;
813
- ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
1700
+ { size_t const litLength = (size_t)(start - anchor);
1701
+ ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offBase, matchLength);
814
1702
  anchor = ip = start + matchLength;
815
1703
  }
1704
+ if (ms->lazySkipping) {
1705
+ /* We've found a match, disable lazy skipping mode, and refill the hash cache. */
1706
+ if (searchMethod == search_rowHash) {
1707
+ ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit);
1708
+ }
1709
+ ms->lazySkipping = 0;
1710
+ }
816
1711
 
817
1712
  /* check immediate repcode */
818
- if (dictMode == ZSTD_dictMatchState) {
1713
+ if (isDxS) {
819
1714
  while (ip <= ilimit) {
820
1715
  U32 const current2 = (U32)(ip-base);
821
1716
  U32 const repIndex = current2 - offset_2;
822
- const BYTE* repMatch = dictMode == ZSTD_dictMatchState
823
- && repIndex < prefixLowestIndex ?
1717
+ const BYTE* repMatch = repIndex < prefixLowestIndex ?
824
1718
  dictBase - dictIndexDelta + repIndex :
825
1719
  base + repIndex;
826
1720
  if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)
827
1721
  && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
828
1722
  const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
829
1723
  matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
830
- offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset_2 <=> offset_1 */
831
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
1724
+ offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap offset_2 <=> offset_1 */
1725
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength);
832
1726
  ip += matchLength;
833
1727
  anchor = ip;
834
1728
  continue;
@@ -842,16 +1736,20 @@ _storeSequence:
842
1736
  && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
843
1737
  /* store sequence */
844
1738
  matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
845
- offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
846
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
1739
+ offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap repcodes */
1740
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength);
847
1741
  ip += matchLength;
848
1742
  anchor = ip;
849
1743
  continue; /* faster when present ... (?) */
850
1744
  } } }
851
1745
 
852
- /* Save reps for next block */
853
- rep[0] = offset_1 ? offset_1 : savedOffset;
854
- rep[1] = offset_2 ? offset_2 : savedOffset;
1746
+ /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0),
1747
+ * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */
1748
+ offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2;
1749
+
1750
+ /* save reps for next block */
1751
+ rep[0] = offset_1 ? offset_1 : offsetSaved1;
1752
+ rep[1] = offset_2 ? offset_2 : offsetSaved2;
855
1753
 
856
1754
  /* Return the last literals size */
857
1755
  return (size_t)(iend - anchor);
@@ -915,6 +1813,92 @@ size_t ZSTD_compressBlock_greedy_dictMatchState(
915
1813
  }
916
1814
 
917
1815
 
1816
+ size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
1817
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1818
+ void const* src, size_t srcSize)
1819
+ {
1820
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch);
1821
+ }
1822
+
1823
+ size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
1824
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1825
+ void const* src, size_t srcSize)
1826
+ {
1827
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch);
1828
+ }
1829
+
1830
+ size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
1831
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1832
+ void const* src, size_t srcSize)
1833
+ {
1834
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);
1835
+ }
1836
+
1837
+ /* Row-based matchfinder */
1838
+ size_t ZSTD_compressBlock_lazy2_row(
1839
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1840
+ void const* src, size_t srcSize)
1841
+ {
1842
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_noDict);
1843
+ }
1844
+
1845
+ size_t ZSTD_compressBlock_lazy_row(
1846
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1847
+ void const* src, size_t srcSize)
1848
+ {
1849
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict);
1850
+ }
1851
+
1852
+ size_t ZSTD_compressBlock_greedy_row(
1853
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1854
+ void const* src, size_t srcSize)
1855
+ {
1856
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict);
1857
+ }
1858
+
1859
+ size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
1860
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1861
+ void const* src, size_t srcSize)
1862
+ {
1863
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState);
1864
+ }
1865
+
1866
+ size_t ZSTD_compressBlock_lazy_dictMatchState_row(
1867
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1868
+ void const* src, size_t srcSize)
1869
+ {
1870
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState);
1871
+ }
1872
+
1873
+ size_t ZSTD_compressBlock_greedy_dictMatchState_row(
1874
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1875
+ void const* src, size_t srcSize)
1876
+ {
1877
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState);
1878
+ }
1879
+
1880
+
1881
+ size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
1882
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1883
+ void const* src, size_t srcSize)
1884
+ {
1885
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dedicatedDictSearch);
1886
+ }
1887
+
1888
+ size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
1889
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1890
+ void const* src, size_t srcSize)
1891
+ {
1892
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch);
1893
+ }
1894
+
1895
+ size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
1896
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1897
+ void const* src, size_t srcSize)
1898
+ {
1899
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch);
1900
+ }
1901
+
918
1902
  FORCE_INLINE_TEMPLATE
919
1903
  size_t ZSTD_compressBlock_lazy_extDict_generic(
920
1904
  ZSTD_matchState_t* ms, seqStore_t* seqStore,
@@ -926,37 +1910,50 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
926
1910
  const BYTE* ip = istart;
927
1911
  const BYTE* anchor = istart;
928
1912
  const BYTE* const iend = istart + srcSize;
929
- const BYTE* const ilimit = iend - 8;
1913
+ const BYTE* const ilimit = searchMethod == search_rowHash ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8;
930
1914
  const BYTE* const base = ms->window.base;
931
1915
  const U32 dictLimit = ms->window.dictLimit;
932
- const U32 lowestIndex = ms->window.lowLimit;
933
1916
  const BYTE* const prefixStart = base + dictLimit;
934
1917
  const BYTE* const dictBase = ms->window.dictBase;
935
1918
  const BYTE* const dictEnd = dictBase + dictLimit;
936
- const BYTE* const dictStart = dictBase + lowestIndex;
937
-
938
- typedef size_t (*searchMax_f)(
939
- ZSTD_matchState_t* ms,
940
- const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
941
- searchMax_f searchMax = searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
1919
+ const BYTE* const dictStart = dictBase + ms->window.lowLimit;
1920
+ const U32 windowLog = ms->cParams.windowLog;
1921
+ const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6);
1922
+ const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
942
1923
 
943
1924
  U32 offset_1 = rep[0], offset_2 = rep[1];
944
1925
 
1926
+ DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic (searchFunc=%u)", (U32)searchMethod);
1927
+
1928
+ /* Reset the lazy skipping state */
1929
+ ms->lazySkipping = 0;
1930
+
945
1931
  /* init */
946
1932
  ip += (ip == prefixStart);
1933
+ if (searchMethod == search_rowHash) {
1934
+ ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit);
1935
+ }
947
1936
 
948
1937
  /* Match Loop */
1938
+ #if defined(__GNUC__) && defined(__x86_64__)
1939
+ /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
1940
+ * code alignment is perturbed. To fix the instability align the loop on 32-bytes.
1941
+ */
1942
+ __asm__(".p2align 5");
1943
+ #endif
949
1944
  while (ip < ilimit) {
950
1945
  size_t matchLength=0;
951
- size_t offset=0;
1946
+ size_t offBase = REPCODE1_TO_OFFBASE;
952
1947
  const BYTE* start=ip+1;
953
- U32 current = (U32)(ip-base);
1948
+ U32 curr = (U32)(ip-base);
954
1949
 
955
1950
  /* check repCode */
956
- { const U32 repIndex = (U32)(current+1 - offset_1);
1951
+ { const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr+1, windowLog);
1952
+ const U32 repIndex = (U32)(curr+1 - offset_1);
957
1953
  const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
958
1954
  const BYTE* const repMatch = repBase + repIndex;
959
- if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
1955
+ if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow */
1956
+ & (offset_1 <= curr+1 - windowLow) ) /* note: we are searching at curr+1 */
960
1957
  if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
961
1958
  /* repcode detected we should take it */
962
1959
  const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
@@ -965,14 +1962,23 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
965
1962
  } }
966
1963
 
967
1964
  /* first search (depth 0) */
968
- { size_t offsetFound = 999999999;
969
- size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
1965
+ { size_t ofbCandidate = 999999999;
1966
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict);
970
1967
  if (ml2 > matchLength)
971
- matchLength = ml2, start = ip, offset=offsetFound;
1968
+ matchLength = ml2, start = ip, offBase = ofbCandidate;
972
1969
  }
973
1970
 
974
- if (matchLength < 4) {
975
- ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
1971
+ if (matchLength < 4) {
1972
+ size_t const step = ((size_t)(ip-anchor) >> kSearchStrength);
1973
+ ip += step + 1; /* jump faster over incompressible sections */
1974
+ /* Enter the lazy skipping mode once we are skipping more than 8 bytes at a time.
1975
+ * In this mode we stop inserting every position into our tables, and only insert
1976
+ * positions that we search, which is one in step positions.
1977
+ * The exact cutoff is flexible, I've just chosen a number that is reasonably high,
1978
+ * so we minimize the compression ratio loss in "normal" scenarios. This mode gets
1979
+ * triggered once we've gone 2KB without finding any matches.
1980
+ */
1981
+ ms->lazySkipping = step > kLazySkippingStep;
976
1982
  continue;
977
1983
  }
978
1984
 
@@ -980,93 +1986,107 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
980
1986
  if (depth>=1)
981
1987
  while (ip<ilimit) {
982
1988
  ip ++;
983
- current++;
1989
+ curr++;
984
1990
  /* check repCode */
985
- if (offset) {
986
- const U32 repIndex = (U32)(current - offset_1);
1991
+ if (offBase) {
1992
+ const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
1993
+ const U32 repIndex = (U32)(curr - offset_1);
987
1994
  const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
988
1995
  const BYTE* const repMatch = repBase + repIndex;
989
- if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
1996
+ if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
1997
+ & (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
990
1998
  if (MEM_read32(ip) == MEM_read32(repMatch)) {
991
1999
  /* repcode detected */
992
2000
  const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
993
2001
  size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
994
2002
  int const gain2 = (int)(repLength * 3);
995
- int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
2003
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1);
996
2004
  if ((repLength >= 4) && (gain2 > gain1))
997
- matchLength = repLength, offset = 0, start = ip;
2005
+ matchLength = repLength, offBase = REPCODE1_TO_OFFBASE, start = ip;
998
2006
  } }
999
2007
 
1000
2008
  /* search match, depth 1 */
1001
- { size_t offset2=999999999;
1002
- size_t const ml2 = searchMax(ms, ip, iend, &offset2);
1003
- int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
1004
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
2009
+ { size_t ofbCandidate = 999999999;
2010
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict);
2011
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */
2012
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 4);
1005
2013
  if ((ml2 >= 4) && (gain2 > gain1)) {
1006
- matchLength = ml2, offset = offset2, start = ip;
2014
+ matchLength = ml2, offBase = ofbCandidate, start = ip;
1007
2015
  continue; /* search a better one */
1008
2016
  } }
1009
2017
 
1010
2018
  /* let's find an even better one */
1011
2019
  if ((depth==2) && (ip<ilimit)) {
1012
2020
  ip ++;
1013
- current++;
2021
+ curr++;
1014
2022
  /* check repCode */
1015
- if (offset) {
1016
- const U32 repIndex = (U32)(current - offset_1);
2023
+ if (offBase) {
2024
+ const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
2025
+ const U32 repIndex = (U32)(curr - offset_1);
1017
2026
  const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
1018
2027
  const BYTE* const repMatch = repBase + repIndex;
1019
- if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
2028
+ if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
2029
+ & (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
1020
2030
  if (MEM_read32(ip) == MEM_read32(repMatch)) {
1021
2031
  /* repcode detected */
1022
2032
  const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
1023
2033
  size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
1024
2034
  int const gain2 = (int)(repLength * 4);
1025
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
2035
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1);
1026
2036
  if ((repLength >= 4) && (gain2 > gain1))
1027
- matchLength = repLength, offset = 0, start = ip;
2037
+ matchLength = repLength, offBase = REPCODE1_TO_OFFBASE, start = ip;
1028
2038
  } }
1029
2039
 
1030
2040
  /* search match, depth 2 */
1031
- { size_t offset2=999999999;
1032
- size_t const ml2 = searchMax(ms, ip, iend, &offset2);
1033
- int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
1034
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
2041
+ { size_t ofbCandidate = 999999999;
2042
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict);
2043
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */
2044
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 7);
1035
2045
  if ((ml2 >= 4) && (gain2 > gain1)) {
1036
- matchLength = ml2, offset = offset2, start = ip;
2046
+ matchLength = ml2, offBase = ofbCandidate, start = ip;
1037
2047
  continue;
1038
2048
  } } }
1039
2049
  break; /* nothing found : store previous solution */
1040
2050
  }
1041
2051
 
1042
2052
  /* catch up */
1043
- if (offset) {
1044
- U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
2053
+ if (OFFBASE_IS_OFFSET(offBase)) {
2054
+ U32 const matchIndex = (U32)((size_t)(start-base) - OFFBASE_TO_OFFSET(offBase));
1045
2055
  const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
1046
2056
  const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
1047
2057
  while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
1048
- offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
2058
+ offset_2 = offset_1; offset_1 = (U32)OFFBASE_TO_OFFSET(offBase);
1049
2059
  }
1050
2060
 
1051
2061
  /* store sequence */
1052
2062
  _storeSequence:
1053
- { size_t const litLength = start - anchor;
1054
- ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
2063
+ { size_t const litLength = (size_t)(start - anchor);
2064
+ ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offBase, matchLength);
1055
2065
  anchor = ip = start + matchLength;
1056
2066
  }
2067
+ if (ms->lazySkipping) {
2068
+ /* We've found a match, disable lazy skipping mode, and refill the hash cache. */
2069
+ if (searchMethod == search_rowHash) {
2070
+ ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit);
2071
+ }
2072
+ ms->lazySkipping = 0;
2073
+ }
1057
2074
 
1058
2075
  /* check immediate repcode */
1059
2076
  while (ip <= ilimit) {
1060
- const U32 repIndex = (U32)((ip-base) - offset_2);
2077
+ const U32 repCurrent = (U32)(ip-base);
2078
+ const U32 windowLow = ZSTD_getLowestMatchIndex(ms, repCurrent, windowLog);
2079
+ const U32 repIndex = repCurrent - offset_2;
1061
2080
  const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
1062
2081
  const BYTE* const repMatch = repBase + repIndex;
1063
- if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > lowestIndex)) /* intentional overflow */
2082
+ if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
2083
+ & (offset_2 <= repCurrent - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
1064
2084
  if (MEM_read32(ip) == MEM_read32(repMatch)) {
1065
2085
  /* repcode detected we should take it */
1066
2086
  const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
1067
2087
  matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
1068
- offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */
1069
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
2088
+ offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap offset history */
2089
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength);
1070
2090
  ip += matchLength;
1071
2091
  anchor = ip;
1072
2092
  continue; /* faster when present ... (?) */
@@ -1113,3 +2133,25 @@ size_t ZSTD_compressBlock_btlazy2_extDict(
1113
2133
  {
1114
2134
  return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
1115
2135
  }
2136
+
2137
+ size_t ZSTD_compressBlock_greedy_extDict_row(
2138
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
2139
+ void const* src, size_t srcSize)
2140
+ {
2141
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0);
2142
+ }
2143
+
2144
+ size_t ZSTD_compressBlock_lazy_extDict_row(
2145
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
2146
+ void const* src, size_t srcSize)
2147
+
2148
+ {
2149
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1);
2150
+ }
2151
+
2152
+ size_t ZSTD_compressBlock_lazy2_extDict_row(
2153
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
2154
+ void const* src, size_t srcSize)
2155
+ {
2156
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2);
2157
+ }