extzstd 0.3.2 → 0.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (108) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +3 -3
  3. data/contrib/zstd/CHANGELOG +188 -1
  4. data/contrib/zstd/CONTRIBUTING.md +157 -74
  5. data/contrib/zstd/LICENSE +4 -4
  6. data/contrib/zstd/Makefile +81 -58
  7. data/contrib/zstd/Package.swift +36 -0
  8. data/contrib/zstd/README.md +59 -35
  9. data/contrib/zstd/TESTING.md +2 -3
  10. data/contrib/zstd/appveyor.yml +49 -136
  11. data/contrib/zstd/lib/BUCK +5 -7
  12. data/contrib/zstd/lib/Makefile +87 -181
  13. data/contrib/zstd/lib/README.md +23 -6
  14. data/contrib/zstd/lib/common/allocations.h +55 -0
  15. data/contrib/zstd/lib/common/bits.h +200 -0
  16. data/contrib/zstd/lib/common/bitstream.h +33 -59
  17. data/contrib/zstd/lib/common/compiler.h +115 -45
  18. data/contrib/zstd/lib/common/cpu.h +1 -1
  19. data/contrib/zstd/lib/common/debug.c +1 -1
  20. data/contrib/zstd/lib/common/debug.h +1 -1
  21. data/contrib/zstd/lib/common/entropy_common.c +15 -37
  22. data/contrib/zstd/lib/common/error_private.c +9 -2
  23. data/contrib/zstd/lib/common/error_private.h +82 -3
  24. data/contrib/zstd/lib/common/fse.h +9 -85
  25. data/contrib/zstd/lib/common/fse_decompress.c +29 -111
  26. data/contrib/zstd/lib/common/huf.h +84 -172
  27. data/contrib/zstd/lib/common/mem.h +58 -49
  28. data/contrib/zstd/lib/common/pool.c +37 -16
  29. data/contrib/zstd/lib/common/pool.h +9 -3
  30. data/contrib/zstd/lib/common/portability_macros.h +156 -0
  31. data/contrib/zstd/lib/common/threading.c +68 -14
  32. data/contrib/zstd/lib/common/threading.h +5 -10
  33. data/contrib/zstd/lib/common/xxhash.c +7 -809
  34. data/contrib/zstd/lib/common/xxhash.h +5568 -167
  35. data/contrib/zstd/lib/common/zstd_common.c +1 -36
  36. data/contrib/zstd/lib/common/zstd_deps.h +1 -1
  37. data/contrib/zstd/lib/common/zstd_internal.h +64 -150
  38. data/contrib/zstd/lib/common/zstd_trace.h +163 -0
  39. data/contrib/zstd/lib/compress/clevels.h +134 -0
  40. data/contrib/zstd/lib/compress/fse_compress.c +69 -150
  41. data/contrib/zstd/lib/compress/hist.c +1 -1
  42. data/contrib/zstd/lib/compress/hist.h +1 -1
  43. data/contrib/zstd/lib/compress/huf_compress.c +773 -251
  44. data/contrib/zstd/lib/compress/zstd_compress.c +2650 -826
  45. data/contrib/zstd/lib/compress/zstd_compress_internal.h +509 -180
  46. data/contrib/zstd/lib/compress/zstd_compress_literals.c +117 -40
  47. data/contrib/zstd/lib/compress/zstd_compress_literals.h +16 -6
  48. data/contrib/zstd/lib/compress/zstd_compress_sequences.c +28 -19
  49. data/contrib/zstd/lib/compress/zstd_compress_sequences.h +1 -1
  50. data/contrib/zstd/lib/compress/zstd_compress_superblock.c +33 -305
  51. data/contrib/zstd/lib/compress/zstd_compress_superblock.h +1 -1
  52. data/contrib/zstd/lib/compress/zstd_cwksp.h +266 -85
  53. data/contrib/zstd/lib/compress/zstd_double_fast.c +369 -132
  54. data/contrib/zstd/lib/compress/zstd_double_fast.h +3 -2
  55. data/contrib/zstd/lib/compress/zstd_fast.c +722 -258
  56. data/contrib/zstd/lib/compress/zstd_fast.h +3 -2
  57. data/contrib/zstd/lib/compress/zstd_lazy.c +1105 -360
  58. data/contrib/zstd/lib/compress/zstd_lazy.h +41 -1
  59. data/contrib/zstd/lib/compress/zstd_ldm.c +272 -208
  60. data/contrib/zstd/lib/compress/zstd_ldm.h +3 -2
  61. data/contrib/zstd/lib/compress/zstd_ldm_geartab.h +106 -0
  62. data/contrib/zstd/lib/compress/zstd_opt.c +324 -197
  63. data/contrib/zstd/lib/compress/zstd_opt.h +1 -1
  64. data/contrib/zstd/lib/compress/zstdmt_compress.c +109 -53
  65. data/contrib/zstd/lib/compress/zstdmt_compress.h +9 -6
  66. data/contrib/zstd/lib/decompress/huf_decompress.c +1071 -539
  67. data/contrib/zstd/lib/decompress/huf_decompress_amd64.S +576 -0
  68. data/contrib/zstd/lib/decompress/zstd_ddict.c +4 -4
  69. data/contrib/zstd/lib/decompress/zstd_ddict.h +1 -1
  70. data/contrib/zstd/lib/decompress/zstd_decompress.c +507 -82
  71. data/contrib/zstd/lib/decompress/zstd_decompress_block.c +962 -310
  72. data/contrib/zstd/lib/decompress/zstd_decompress_block.h +14 -3
  73. data/contrib/zstd/lib/decompress/zstd_decompress_internal.h +54 -6
  74. data/contrib/zstd/lib/deprecated/zbuff.h +1 -1
  75. data/contrib/zstd/lib/deprecated/zbuff_common.c +1 -1
  76. data/contrib/zstd/lib/deprecated/zbuff_compress.c +24 -4
  77. data/contrib/zstd/lib/deprecated/zbuff_decompress.c +3 -1
  78. data/contrib/zstd/lib/dictBuilder/cover.c +44 -32
  79. data/contrib/zstd/lib/dictBuilder/cover.h +6 -5
  80. data/contrib/zstd/lib/dictBuilder/divsufsort.c +1 -1
  81. data/contrib/zstd/lib/dictBuilder/fastcover.c +24 -16
  82. data/contrib/zstd/lib/dictBuilder/zdict.c +88 -95
  83. data/contrib/zstd/lib/legacy/zstd_legacy.h +8 -1
  84. data/contrib/zstd/lib/legacy/zstd_v01.c +16 -53
  85. data/contrib/zstd/lib/legacy/zstd_v01.h +1 -1
  86. data/contrib/zstd/lib/legacy/zstd_v02.c +24 -69
  87. data/contrib/zstd/lib/legacy/zstd_v02.h +1 -1
  88. data/contrib/zstd/lib/legacy/zstd_v03.c +25 -72
  89. data/contrib/zstd/lib/legacy/zstd_v03.h +1 -1
  90. data/contrib/zstd/lib/legacy/zstd_v04.c +23 -69
  91. data/contrib/zstd/lib/legacy/zstd_v04.h +1 -1
  92. data/contrib/zstd/lib/legacy/zstd_v05.c +35 -85
  93. data/contrib/zstd/lib/legacy/zstd_v05.h +1 -1
  94. data/contrib/zstd/lib/legacy/zstd_v06.c +42 -87
  95. data/contrib/zstd/lib/legacy/zstd_v06.h +1 -1
  96. data/contrib/zstd/lib/legacy/zstd_v07.c +35 -82
  97. data/contrib/zstd/lib/legacy/zstd_v07.h +1 -1
  98. data/contrib/zstd/lib/libzstd.mk +214 -0
  99. data/contrib/zstd/lib/libzstd.pc.in +4 -3
  100. data/contrib/zstd/lib/module.modulemap +35 -0
  101. data/contrib/zstd/lib/{dictBuilder/zdict.h → zdict.h} +202 -33
  102. data/contrib/zstd/lib/zstd.h +922 -293
  103. data/contrib/zstd/lib/{common/zstd_errors.h → zstd_errors.h} +27 -8
  104. data/ext/extconf.rb +7 -6
  105. data/ext/extzstd.c +13 -10
  106. data/ext/libzstd_conf.h +0 -1
  107. data/ext/zstd_decompress_asm.S +1 -0
  108. metadata +16 -5
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -10,6 +10,9 @@
10
10
 
11
11
  #include "zstd_compress_internal.h"
12
12
  #include "zstd_lazy.h"
13
+ #include "../common/bits.h" /* ZSTD_countTrailingZeros64 */
14
+
15
+ #define kLazySkippingStep 8
13
16
 
14
17
 
15
18
  /*-*************************************
@@ -61,7 +64,7 @@ ZSTD_updateDUBT(ZSTD_matchState_t* ms,
61
64
  * assumption : curr >= btlow == (curr - btmask)
62
65
  * doesn't fail */
63
66
  static void
64
- ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
67
+ ZSTD_insertDUBT1(const ZSTD_matchState_t* ms,
65
68
  U32 curr, const BYTE* inputEnd,
66
69
  U32 nbCompares, U32 btLow,
67
70
  const ZSTD_dictMode_e dictMode)
@@ -93,7 +96,7 @@ ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
93
96
  assert(curr >= btLow);
94
97
  assert(ip < iend); /* condition for ZSTD_count */
95
98
 
96
- while (nbCompares-- && (matchIndex > windowLow)) {
99
+ for (; nbCompares && (matchIndex > windowLow); --nbCompares) {
97
100
  U32* const nextPtr = bt + 2*(matchIndex & btMask);
98
101
  size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
99
102
  assert(matchIndex < curr);
@@ -151,7 +154,7 @@ ZSTD_insertDUBT1(ZSTD_matchState_t* ms,
151
154
 
152
155
  static size_t
153
156
  ZSTD_DUBT_findBetterDictMatch (
154
- ZSTD_matchState_t* ms,
157
+ const ZSTD_matchState_t* ms,
155
158
  const BYTE* const ip, const BYTE* const iend,
156
159
  size_t* offsetPtr,
157
160
  size_t bestLength,
@@ -185,7 +188,7 @@ ZSTD_DUBT_findBetterDictMatch (
185
188
  (void)dictMode;
186
189
  assert(dictMode == ZSTD_dictMatchState);
187
190
 
188
- while (nbCompares-- && (dictMatchIndex > dictLowLimit)) {
191
+ for (; nbCompares && (dictMatchIndex > dictLowLimit); --nbCompares) {
189
192
  U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask);
190
193
  size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
191
194
  const BYTE* match = dictBase + dictMatchIndex;
@@ -197,8 +200,8 @@ ZSTD_DUBT_findBetterDictMatch (
197
200
  U32 matchIndex = dictMatchIndex + dictIndexDelta;
198
201
  if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {
199
202
  DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",
200
- curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, ZSTD_REP_MOVE + curr - matchIndex, dictMatchIndex, matchIndex);
201
- bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
203
+ curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, OFFSET_TO_OFFBASE(curr - matchIndex), dictMatchIndex, matchIndex);
204
+ bestLength = matchLength, *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex);
202
205
  }
203
206
  if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */
204
207
  break; /* drop, to guarantee consistency (miss a little bit of compression) */
@@ -218,7 +221,7 @@ ZSTD_DUBT_findBetterDictMatch (
218
221
  }
219
222
 
220
223
  if (bestLength >= MINMATCH) {
221
- U32 const mIndex = curr - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
224
+ U32 const mIndex = curr - (U32)OFFBASE_TO_OFFSET(*offsetPtr); (void)mIndex;
222
225
  DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
223
226
  curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
224
227
  }
@@ -230,7 +233,7 @@ ZSTD_DUBT_findBetterDictMatch (
230
233
  static size_t
231
234
  ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
232
235
  const BYTE* const ip, const BYTE* const iend,
233
- size_t* offsetPtr,
236
+ size_t* offBasePtr,
234
237
  U32 const mls,
235
238
  const ZSTD_dictMode_e dictMode)
236
239
  {
@@ -309,7 +312,7 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
309
312
  matchIndex = hashTable[h];
310
313
  hashTable[h] = curr; /* Update Hash Table */
311
314
 
312
- while (nbCompares-- && (matchIndex > windowLow)) {
315
+ for (; nbCompares && (matchIndex > windowLow); --nbCompares) {
313
316
  U32* const nextPtr = bt + 2*(matchIndex & btMask);
314
317
  size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */
315
318
  const BYTE* match;
@@ -327,8 +330,8 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
327
330
  if (matchLength > bestLength) {
328
331
  if (matchLength > matchEndIdx - matchIndex)
329
332
  matchEndIdx = matchIndex + (U32)matchLength;
330
- if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) )
331
- bestLength = matchLength, *offsetPtr = ZSTD_REP_MOVE + curr - matchIndex;
333
+ if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)*offBasePtr)) )
334
+ bestLength = matchLength, *offBasePtr = OFFSET_TO_OFFBASE(curr - matchIndex);
332
335
  if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */
333
336
  if (dictMode == ZSTD_dictMatchState) {
334
337
  nbCompares = 0; /* in addition to avoiding checking any
@@ -357,19 +360,20 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
357
360
 
358
361
  *smallerPtr = *largerPtr = 0;
359
362
 
363
+ assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
360
364
  if (dictMode == ZSTD_dictMatchState && nbCompares) {
361
365
  bestLength = ZSTD_DUBT_findBetterDictMatch(
362
366
  ms, ip, iend,
363
- offsetPtr, bestLength, nbCompares,
367
+ offBasePtr, bestLength, nbCompares,
364
368
  mls, dictMode);
365
369
  }
366
370
 
367
371
  assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */
368
372
  ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */
369
373
  if (bestLength >= MINMATCH) {
370
- U32 const mIndex = curr - ((U32)*offsetPtr - ZSTD_REP_MOVE); (void)mIndex;
374
+ U32 const mIndex = curr - (U32)OFFBASE_TO_OFFSET(*offBasePtr); (void)mIndex;
371
375
  DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",
372
- curr, (U32)bestLength, (U32)*offsetPtr, mIndex);
376
+ curr, (U32)bestLength, (U32)*offBasePtr, mIndex);
373
377
  }
374
378
  return bestLength;
375
379
  }
@@ -380,101 +384,19 @@ ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,
380
384
  FORCE_INLINE_TEMPLATE size_t
381
385
  ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,
382
386
  const BYTE* const ip, const BYTE* const iLimit,
383
- size_t* offsetPtr,
387
+ size_t* offBasePtr,
384
388
  const U32 mls /* template */,
385
389
  const ZSTD_dictMode_e dictMode)
386
390
  {
387
391
  DEBUGLOG(7, "ZSTD_BtFindBestMatch");
388
392
  if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */
389
393
  ZSTD_updateDUBT(ms, ip, iLimit, mls);
390
- return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offsetPtr, mls, dictMode);
391
- }
392
-
393
-
394
- static size_t
395
- ZSTD_BtFindBestMatch_selectMLS ( ZSTD_matchState_t* ms,
396
- const BYTE* ip, const BYTE* const iLimit,
397
- size_t* offsetPtr)
398
- {
399
- switch(ms->cParams.minMatch)
400
- {
401
- default : /* includes case 3 */
402
- case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
403
- case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
404
- case 7 :
405
- case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
406
- }
407
- }
408
-
409
-
410
- static size_t ZSTD_BtFindBestMatch_dictMatchState_selectMLS (
411
- ZSTD_matchState_t* ms,
412
- const BYTE* ip, const BYTE* const iLimit,
413
- size_t* offsetPtr)
414
- {
415
- switch(ms->cParams.minMatch)
416
- {
417
- default : /* includes case 3 */
418
- case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
419
- case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
420
- case 7 :
421
- case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
422
- }
423
- }
424
-
425
-
426
- static size_t ZSTD_BtFindBestMatch_extDict_selectMLS (
427
- ZSTD_matchState_t* ms,
428
- const BYTE* ip, const BYTE* const iLimit,
429
- size_t* offsetPtr)
430
- {
431
- switch(ms->cParams.minMatch)
432
- {
433
- default : /* includes case 3 */
434
- case 4 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
435
- case 5 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
436
- case 7 :
437
- case 6 : return ZSTD_BtFindBestMatch(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
438
- }
394
+ return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offBasePtr, mls, dictMode);
439
395
  }
440
396
 
441
-
442
-
443
- /* *********************************
444
- * Hash Chain
397
+ /***********************************
398
+ * Dedicated dict search
445
399
  ***********************************/
446
- #define NEXT_IN_CHAIN(d, mask) chainTable[(d) & (mask)]
447
-
448
- /* Update chains up to ip (excluded)
449
- Assumption : always within prefix (i.e. not within extDict) */
450
- FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
451
- ZSTD_matchState_t* ms,
452
- const ZSTD_compressionParameters* const cParams,
453
- const BYTE* ip, U32 const mls)
454
- {
455
- U32* const hashTable = ms->hashTable;
456
- const U32 hashLog = cParams->hashLog;
457
- U32* const chainTable = ms->chainTable;
458
- const U32 chainMask = (1 << cParams->chainLog) - 1;
459
- const BYTE* const base = ms->window.base;
460
- const U32 target = (U32)(ip - base);
461
- U32 idx = ms->nextToUpdate;
462
-
463
- while(idx < target) { /* catch up */
464
- size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
465
- NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
466
- hashTable[h] = idx;
467
- idx++;
468
- }
469
-
470
- ms->nextToUpdate = target;
471
- return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
472
- }
473
-
474
- U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
475
- const ZSTD_compressionParameters* const cParams = &ms->cParams;
476
- return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch);
477
- }
478
400
 
479
401
  void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip)
480
402
  {
@@ -484,7 +406,7 @@ void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const B
484
406
  U32* const chainTable = ms->chainTable;
485
407
  U32 const chainSize = 1 << ms->cParams.chainLog;
486
408
  U32 idx = ms->nextToUpdate;
487
- U32 const minChain = chainSize < target ? target - chainSize : idx;
409
+ U32 const minChain = chainSize < target - idx ? target - chainSize : idx;
488
410
  U32 const bucketSize = 1 << ZSTD_LAZY_DDSS_BUCKET_LOG;
489
411
  U32 const cacheSize = bucketSize - 1;
490
412
  U32 const chainAttempts = (1 << ms->cParams.searchLog) - cacheSize;
@@ -498,13 +420,12 @@ void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const B
498
420
  U32 const hashLog = ms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
499
421
  U32* const tmpHashTable = hashTable;
500
422
  U32* const tmpChainTable = hashTable + ((size_t)1 << hashLog);
501
- U32 const tmpChainSize = ((1 << ZSTD_LAZY_DDSS_BUCKET_LOG) - 1) << hashLog;
423
+ U32 const tmpChainSize = (U32)((1 << ZSTD_LAZY_DDSS_BUCKET_LOG) - 1) << hashLog;
502
424
  U32 const tmpMinChain = tmpChainSize < target ? target - tmpChainSize : idx;
503
-
504
425
  U32 hashIdx;
505
426
 
506
427
  assert(ms->cParams.chainLog <= 24);
507
- assert(ms->cParams.hashLog >= ms->cParams.chainLog);
428
+ assert(ms->cParams.hashLog > ms->cParams.chainLog);
508
429
  assert(idx != 0);
509
430
  assert(tmpMinChain <= minChain);
510
431
 
@@ -535,7 +456,7 @@ void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const B
535
456
  if (count == cacheSize) {
536
457
  for (count = 0; count < chainLimit;) {
537
458
  if (i < minChain) {
538
- if (!i || countBeyondMinChain++ > cacheSize) {
459
+ if (!i || ++countBeyondMinChain > cacheSize) {
539
460
  /* only allow pulling `cacheSize` number of entries
540
461
  * into the cache or chainTable beyond `minChain`,
541
462
  * to replace the entries pulled out of the
@@ -591,10 +512,146 @@ void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const B
591
512
  ms->nextToUpdate = target;
592
513
  }
593
514
 
515
+ /* Returns the longest match length found in the dedicated dict search structure.
516
+ * If none are longer than the argument ml, then ml will be returned.
517
+ */
518
+ FORCE_INLINE_TEMPLATE
519
+ size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nbAttempts,
520
+ const ZSTD_matchState_t* const dms,
521
+ const BYTE* const ip, const BYTE* const iLimit,
522
+ const BYTE* const prefixStart, const U32 curr,
523
+ const U32 dictLimit, const size_t ddsIdx) {
524
+ const U32 ddsLowestIndex = dms->window.dictLimit;
525
+ const BYTE* const ddsBase = dms->window.base;
526
+ const BYTE* const ddsEnd = dms->window.nextSrc;
527
+ const U32 ddsSize = (U32)(ddsEnd - ddsBase);
528
+ const U32 ddsIndexDelta = dictLimit - ddsSize;
529
+ const U32 bucketSize = (1 << ZSTD_LAZY_DDSS_BUCKET_LOG);
530
+ const U32 bucketLimit = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1;
531
+ U32 ddsAttempt;
532
+ U32 matchIndex;
533
+
534
+ for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) {
535
+ PREFETCH_L1(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]);
536
+ }
537
+
538
+ {
539
+ U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
540
+ U32 const chainIndex = chainPackedPointer >> 8;
541
+
542
+ PREFETCH_L1(&dms->chainTable[chainIndex]);
543
+ }
544
+
545
+ for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) {
546
+ size_t currentMl=0;
547
+ const BYTE* match;
548
+ matchIndex = dms->hashTable[ddsIdx + ddsAttempt];
549
+ match = ddsBase + matchIndex;
550
+
551
+ if (!matchIndex) {
552
+ return ml;
553
+ }
554
+
555
+ /* guaranteed by table construction */
556
+ (void)ddsLowestIndex;
557
+ assert(matchIndex >= ddsLowestIndex);
558
+ assert(match+4 <= ddsEnd);
559
+ if (MEM_read32(match) == MEM_read32(ip)) {
560
+ /* assumption : matchIndex <= dictLimit-4 (by table construction) */
561
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
562
+ }
563
+
564
+ /* save best solution */
565
+ if (currentMl > ml) {
566
+ ml = currentMl;
567
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + ddsIndexDelta));
568
+ if (ip+currentMl == iLimit) {
569
+ /* best possible, avoids read overflow on next attempt */
570
+ return ml;
571
+ }
572
+ }
573
+ }
574
+
575
+ {
576
+ U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
577
+ U32 chainIndex = chainPackedPointer >> 8;
578
+ U32 const chainLength = chainPackedPointer & 0xFF;
579
+ U32 const chainAttempts = nbAttempts - ddsAttempt;
580
+ U32 const chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts;
581
+ U32 chainAttempt;
582
+
583
+ for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++) {
584
+ PREFETCH_L1(ddsBase + dms->chainTable[chainIndex + chainAttempt]);
585
+ }
586
+
587
+ for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++, chainIndex++) {
588
+ size_t currentMl=0;
589
+ const BYTE* match;
590
+ matchIndex = dms->chainTable[chainIndex];
591
+ match = ddsBase + matchIndex;
592
+
593
+ /* guaranteed by table construction */
594
+ assert(matchIndex >= ddsLowestIndex);
595
+ assert(match+4 <= ddsEnd);
596
+ if (MEM_read32(match) == MEM_read32(ip)) {
597
+ /* assumption : matchIndex <= dictLimit-4 (by table construction) */
598
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
599
+ }
600
+
601
+ /* save best solution */
602
+ if (currentMl > ml) {
603
+ ml = currentMl;
604
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + ddsIndexDelta));
605
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
606
+ }
607
+ }
608
+ }
609
+ return ml;
610
+ }
611
+
612
+
613
+ /* *********************************
614
+ * Hash Chain
615
+ ***********************************/
616
+ #define NEXT_IN_CHAIN(d, mask) chainTable[(d) & (mask)]
617
+
618
+ /* Update chains up to ip (excluded)
619
+ Assumption : always within prefix (i.e. not within extDict) */
620
+ FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(
621
+ ZSTD_matchState_t* ms,
622
+ const ZSTD_compressionParameters* const cParams,
623
+ const BYTE* ip, U32 const mls, U32 const lazySkipping)
624
+ {
625
+ U32* const hashTable = ms->hashTable;
626
+ const U32 hashLog = cParams->hashLog;
627
+ U32* const chainTable = ms->chainTable;
628
+ const U32 chainMask = (1 << cParams->chainLog) - 1;
629
+ const BYTE* const base = ms->window.base;
630
+ const U32 target = (U32)(ip - base);
631
+ U32 idx = ms->nextToUpdate;
632
+
633
+ while(idx < target) { /* catch up */
634
+ size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);
635
+ NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];
636
+ hashTable[h] = idx;
637
+ idx++;
638
+ /* Stop inserting every position when in the lazy skipping mode. */
639
+ if (lazySkipping)
640
+ break;
641
+ }
642
+
643
+ ms->nextToUpdate = target;
644
+ return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];
645
+ }
646
+
647
+ U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {
648
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
649
+ return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch, /* lazySkipping*/ 0);
650
+ }
594
651
 
595
652
  /* inlining is important to hardwire a hot branch (template emulation) */
596
653
  FORCE_INLINE_TEMPLATE
597
- size_t ZSTD_HcFindBestMatch_generic (
654
+ size_t ZSTD_HcFindBestMatch(
598
655
  ZSTD_matchState_t* ms,
599
656
  const BYTE* const ip, const BYTE* const iLimit,
600
657
  size_t* offsetPtr,
@@ -633,14 +690,15 @@ size_t ZSTD_HcFindBestMatch_generic (
633
690
  }
634
691
 
635
692
  /* HC4 match finder */
636
- matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls);
693
+ matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls, ms->lazySkipping);
637
694
 
638
695
  for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) {
639
696
  size_t currentMl=0;
640
697
  if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
641
698
  const BYTE* const match = base + matchIndex;
642
699
  assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
643
- if (match[ml] == ip[ml]) /* potentially better */
700
+ /* read 4B starting from (match + ml + 1 - sizeof(U32)) */
701
+ if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) /* potentially better */
644
702
  currentMl = ZSTD_count(ip, match, iLimit);
645
703
  } else {
646
704
  const BYTE* const match = dictBase + matchIndex;
@@ -652,7 +710,7 @@ size_t ZSTD_HcFindBestMatch_generic (
652
710
  /* save best solution */
653
711
  if (currentMl > ml) {
654
712
  ml = currentMl;
655
- *offsetPtr = curr - matchIndex + ZSTD_REP_MOVE;
713
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex);
656
714
  if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
657
715
  }
658
716
 
@@ -660,91 +718,10 @@ size_t ZSTD_HcFindBestMatch_generic (
660
718
  matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);
661
719
  }
662
720
 
721
+ assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
663
722
  if (dictMode == ZSTD_dedicatedDictSearch) {
664
- const U32 ddsLowestIndex = dms->window.dictLimit;
665
- const BYTE* const ddsBase = dms->window.base;
666
- const BYTE* const ddsEnd = dms->window.nextSrc;
667
- const U32 ddsSize = (U32)(ddsEnd - ddsBase);
668
- const U32 ddsIndexDelta = dictLimit - ddsSize;
669
- const U32 bucketSize = (1 << ZSTD_LAZY_DDSS_BUCKET_LOG);
670
- const U32 bucketLimit = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1;
671
- U32 ddsAttempt;
672
-
673
- for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) {
674
- PREFETCH_L1(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]);
675
- }
676
-
677
- {
678
- U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
679
- U32 const chainIndex = chainPackedPointer >> 8;
680
-
681
- PREFETCH_L1(&dms->chainTable[chainIndex]);
682
- }
683
-
684
- for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) {
685
- size_t currentMl=0;
686
- const BYTE* match;
687
- matchIndex = dms->hashTable[ddsIdx + ddsAttempt];
688
- match = ddsBase + matchIndex;
689
-
690
- if (!matchIndex) {
691
- return ml;
692
- }
693
-
694
- /* guaranteed by table construction */
695
- (void)ddsLowestIndex;
696
- assert(matchIndex >= ddsLowestIndex);
697
- assert(match+4 <= ddsEnd);
698
- if (MEM_read32(match) == MEM_read32(ip)) {
699
- /* assumption : matchIndex <= dictLimit-4 (by table construction) */
700
- currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
701
- }
702
-
703
- /* save best solution */
704
- if (currentMl > ml) {
705
- ml = currentMl;
706
- *offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE;
707
- if (ip+currentMl == iLimit) {
708
- /* best possible, avoids read overflow on next attempt */
709
- return ml;
710
- }
711
- }
712
- }
713
-
714
- {
715
- U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];
716
- U32 chainIndex = chainPackedPointer >> 8;
717
- U32 const chainLength = chainPackedPointer & 0xFF;
718
- U32 const chainAttempts = nbAttempts - ddsAttempt;
719
- U32 const chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts;
720
- U32 chainAttempt;
721
-
722
- for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++) {
723
- PREFETCH_L1(ddsBase + dms->chainTable[chainIndex + chainAttempt]);
724
- }
725
-
726
- for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++, chainIndex++) {
727
- size_t currentMl=0;
728
- const BYTE* match;
729
- matchIndex = dms->chainTable[chainIndex];
730
- match = ddsBase + matchIndex;
731
-
732
- /* guaranteed by table construction */
733
- assert(matchIndex >= ddsLowestIndex);
734
- assert(match+4 <= ddsEnd);
735
- if (MEM_read32(match) == MEM_read32(ip)) {
736
- /* assumption : matchIndex <= dictLimit-4 (by table construction) */
737
- currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;
738
- }
739
-
740
- /* save best solution */
741
- if (currentMl > ml) {
742
- ml = currentMl;
743
- *offsetPtr = curr - (matchIndex + ddsIndexDelta) + ZSTD_REP_MOVE;
744
- if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
745
- }
746
- }
747
- }
723
+ ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts, dms,
724
+ ip, iLimit, prefixStart, curr, dictLimit, ddsIdx);
748
725
  } else if (dictMode == ZSTD_dictMatchState) {
749
726
  const U32* const dmsChainTable = dms->chainTable;
750
727
  const U32 dmsChainSize = (1 << dms->cParams.chainLog);
@@ -768,7 +745,8 @@ size_t ZSTD_HcFindBestMatch_generic (
768
745
  /* save best solution */
769
746
  if (currentMl > ml) {
770
747
  ml = currentMl;
771
- *offsetPtr = curr - (matchIndex + dmsIndexDelta) + ZSTD_REP_MOVE;
748
+ assert(curr > matchIndex + dmsIndexDelta);
749
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + dmsIndexDelta));
772
750
  if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
773
751
  }
774
752
 
@@ -781,75 +759,735 @@ size_t ZSTD_HcFindBestMatch_generic (
781
759
  return ml;
782
760
  }
783
761
 
762
+ /* *********************************
763
+ * (SIMD) Row-based matchfinder
764
+ ***********************************/
765
+ /* Constants for row-based hash */
766
+ #define ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1)
767
+ #define ZSTD_ROW_HASH_MAX_ENTRIES 64 /* absolute maximum number of entries per row, for all configurations */
784
768
 
785
- FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_selectMLS (
786
- ZSTD_matchState_t* ms,
787
- const BYTE* ip, const BYTE* const iLimit,
788
- size_t* offsetPtr)
769
+ #define ZSTD_ROW_HASH_CACHE_MASK (ZSTD_ROW_HASH_CACHE_SIZE - 1)
770
+
771
+ typedef U64 ZSTD_VecMask; /* Clarifies when we are interacting with a U64 representing a mask of matches */
772
+
773
+ /* ZSTD_VecMask_next():
774
+ * Starting from the LSB, returns the idx of the next non-zero bit.
775
+ * Basically counting the nb of trailing zeroes.
776
+ */
777
+ MEM_STATIC U32 ZSTD_VecMask_next(ZSTD_VecMask val) {
778
+ return ZSTD_countTrailingZeros64(val);
779
+ }
780
+
781
+ /* ZSTD_row_nextIndex():
782
+ * Returns the next index to insert at within a tagTable row, and updates the "head"
783
+ * value to reflect the update. Essentially cycles backwards from [1, {entries per row})
784
+ */
785
+ FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextIndex(BYTE* const tagRow, U32 const rowMask) {
786
+ U32 next = (*tagRow-1) & rowMask;
787
+ next += (next == 0) ? rowMask : 0; /* skip first position */
788
+ *tagRow = (BYTE)next;
789
+ return next;
790
+ }
791
+
792
+ /* ZSTD_isAligned():
793
+ * Checks that a pointer is aligned to "align" bytes which must be a power of 2.
794
+ */
795
+ MEM_STATIC int ZSTD_isAligned(void const* ptr, size_t align) {
796
+ assert((align & (align - 1)) == 0);
797
+ return (((size_t)ptr) & (align - 1)) == 0;
798
+ }
799
+
800
+ /* ZSTD_row_prefetch():
801
+ * Performs prefetching for the hashTable and tagTable at a given row.
802
+ */
803
+ FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, BYTE const* tagTable, U32 const relRow, U32 const rowLog) {
804
+ PREFETCH_L1(hashTable + relRow);
805
+ if (rowLog >= 5) {
806
+ PREFETCH_L1(hashTable + relRow + 16);
807
+ /* Note: prefetching more of the hash table does not appear to be beneficial for 128-entry rows */
808
+ }
809
+ PREFETCH_L1(tagTable + relRow);
810
+ if (rowLog == 6) {
811
+ PREFETCH_L1(tagTable + relRow + 32);
812
+ }
813
+ assert(rowLog == 4 || rowLog == 5 || rowLog == 6);
814
+ assert(ZSTD_isAligned(hashTable + relRow, 64)); /* prefetched hash row always 64-byte aligned */
815
+ assert(ZSTD_isAligned(tagTable + relRow, (size_t)1 << rowLog)); /* prefetched tagRow sits on correct multiple of bytes (32,64,128) */
816
+ }
817
+
818
+ /* ZSTD_row_fillHashCache():
819
+ * Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries,
820
+ * but not beyond iLimit.
821
+ */
822
+ FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base,
823
+ U32 const rowLog, U32 const mls,
824
+ U32 idx, const BYTE* const iLimit)
789
825
  {
790
- switch(ms->cParams.minMatch)
791
- {
792
- default : /* includes case 3 */
793
- case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_noDict);
794
- case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_noDict);
795
- case 7 :
796
- case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_noDict);
826
+ U32 const* const hashTable = ms->hashTable;
827
+ BYTE const* const tagTable = ms->tagTable;
828
+ U32 const hashLog = ms->rowHashLog;
829
+ U32 const maxElemsToPrefetch = (base + idx) > iLimit ? 0 : (U32)(iLimit - (base + idx) + 1);
830
+ U32 const lim = idx + MIN(ZSTD_ROW_HASH_CACHE_SIZE, maxElemsToPrefetch);
831
+
832
+ for (; idx < lim; ++idx) {
833
+ U32 const hash = (U32)ZSTD_hashPtrSalted(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt);
834
+ U32 const row = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
835
+ ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
836
+ ms->hashCache[idx & ZSTD_ROW_HASH_CACHE_MASK] = hash;
797
837
  }
838
+
839
+ DEBUGLOG(6, "ZSTD_row_fillHashCache(): [%u %u %u %u %u %u %u %u]", ms->hashCache[0], ms->hashCache[1],
840
+ ms->hashCache[2], ms->hashCache[3], ms->hashCache[4],
841
+ ms->hashCache[5], ms->hashCache[6], ms->hashCache[7]);
798
842
  }
799
843
 
844
+ /* ZSTD_row_nextCachedHash():
845
+ * Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at
846
+ * base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable.
847
+ */
848
+ FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable,
849
+ BYTE const* tagTable, BYTE const* base,
850
+ U32 idx, U32 const hashLog,
851
+ U32 const rowLog, U32 const mls,
852
+ U64 const hashSalt)
853
+ {
854
+ U32 const newHash = (U32)ZSTD_hashPtrSalted(base+idx+ZSTD_ROW_HASH_CACHE_SIZE, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, hashSalt);
855
+ U32 const row = (newHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
856
+ ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);
857
+ { U32 const hash = cache[idx & ZSTD_ROW_HASH_CACHE_MASK];
858
+ cache[idx & ZSTD_ROW_HASH_CACHE_MASK] = newHash;
859
+ return hash;
860
+ }
861
+ }
800
862
 
801
- static size_t ZSTD_HcFindBestMatch_dictMatchState_selectMLS (
802
- ZSTD_matchState_t* ms,
803
- const BYTE* ip, const BYTE* const iLimit,
804
- size_t* offsetPtr)
863
+ /* ZSTD_row_update_internalImpl():
864
+ * Updates the hash table with positions starting from updateStartIdx until updateEndIdx.
865
+ */
866
+ FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,
867
+ U32 updateStartIdx, U32 const updateEndIdx,
868
+ U32 const mls, U32 const rowLog,
869
+ U32 const rowMask, U32 const useCache)
805
870
  {
806
- switch(ms->cParams.minMatch)
807
- {
808
- default : /* includes case 3 */
809
- case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dictMatchState);
810
- case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dictMatchState);
811
- case 7 :
812
- case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dictMatchState);
871
+ U32* const hashTable = ms->hashTable;
872
+ BYTE* const tagTable = ms->tagTable;
873
+ U32 const hashLog = ms->rowHashLog;
874
+ const BYTE* const base = ms->window.base;
875
+
876
+ DEBUGLOG(6, "ZSTD_row_update_internalImpl(): updateStartIdx=%u, updateEndIdx=%u", updateStartIdx, updateEndIdx);
877
+ for (; updateStartIdx < updateEndIdx; ++updateStartIdx) {
878
+ U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, updateStartIdx, hashLog, rowLog, mls, ms->hashSalt)
879
+ : (U32)ZSTD_hashPtrSalted(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt);
880
+ U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
881
+ U32* const row = hashTable + relRow;
882
+ BYTE* tagRow = tagTable + relRow;
883
+ U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);
884
+
885
+ assert(hash == ZSTD_hashPtrSalted(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt));
886
+ tagRow[pos] = hash & ZSTD_ROW_HASH_TAG_MASK;
887
+ row[pos] = updateStartIdx;
813
888
  }
814
889
  }
815
890
 
891
+ /* ZSTD_row_update_internal():
892
+ * Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate.
893
+ * Skips sections of long matches as is necessary.
894
+ */
895
+ FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip,
896
+ U32 const mls, U32 const rowLog,
897
+ U32 const rowMask, U32 const useCache)
898
+ {
899
+ U32 idx = ms->nextToUpdate;
900
+ const BYTE* const base = ms->window.base;
901
+ const U32 target = (U32)(ip - base);
902
+ const U32 kSkipThreshold = 384;
903
+ const U32 kMaxMatchStartPositionsToUpdate = 96;
904
+ const U32 kMaxMatchEndPositionsToUpdate = 32;
905
+
906
+ if (useCache) {
907
+ /* Only skip positions when using hash cache, i.e.
908
+ * if we are loading a dict, don't skip anything.
909
+ * If we decide to skip, then we only update a set number
910
+ * of positions at the beginning and end of the match.
911
+ */
912
+ if (UNLIKELY(target - idx > kSkipThreshold)) {
913
+ U32 const bound = idx + kMaxMatchStartPositionsToUpdate;
914
+ ZSTD_row_update_internalImpl(ms, idx, bound, mls, rowLog, rowMask, useCache);
915
+ idx = target - kMaxMatchEndPositionsToUpdate;
916
+ ZSTD_row_fillHashCache(ms, base, rowLog, mls, idx, ip+1);
917
+ }
918
+ }
919
+ assert(target >= idx);
920
+ ZSTD_row_update_internalImpl(ms, idx, target, mls, rowLog, rowMask, useCache);
921
+ ms->nextToUpdate = target;
922
+ }
816
923
 
817
- static size_t ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS (
818
- ZSTD_matchState_t* ms,
819
- const BYTE* ip, const BYTE* const iLimit,
820
- size_t* offsetPtr)
924
+ /* ZSTD_row_update():
925
+ * External wrapper for ZSTD_row_update_internal(). Used for filling the hashtable during dictionary
926
+ * processing.
927
+ */
928
+ void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) {
929
+ const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
930
+ const U32 rowMask = (1u << rowLog) - 1;
931
+ const U32 mls = MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */);
932
+
933
+ DEBUGLOG(5, "ZSTD_row_update(), rowLog=%u", rowLog);
934
+ ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0 /* don't use cache */);
935
+ }
936
+
937
+ /* Returns the mask width of bits group of which will be set to 1. Given not all
938
+ * architectures have easy movemask instruction, this helps to iterate over
939
+ * groups of bits easier and faster.
940
+ */
941
+ FORCE_INLINE_TEMPLATE U32
942
+ ZSTD_row_matchMaskGroupWidth(const U32 rowEntries)
821
943
  {
822
- switch(ms->cParams.minMatch)
823
- {
824
- default : /* includes case 3 */
825
- case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_dedicatedDictSearch);
826
- case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_dedicatedDictSearch);
827
- case 7 :
828
- case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_dedicatedDictSearch);
944
+ assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);
945
+ assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES);
946
+ (void)rowEntries;
947
+ #if defined(ZSTD_ARCH_ARM_NEON)
948
+ /* NEON path only works for little endian */
949
+ if (!MEM_isLittleEndian()) {
950
+ return 1;
951
+ }
952
+ if (rowEntries == 16) {
953
+ return 4;
954
+ }
955
+ if (rowEntries == 32) {
956
+ return 2;
957
+ }
958
+ if (rowEntries == 64) {
959
+ return 1;
960
+ }
961
+ #endif
962
+ return 1;
963
+ }
964
+
965
+ #if defined(ZSTD_ARCH_X86_SSE2)
966
+ FORCE_INLINE_TEMPLATE ZSTD_VecMask
967
+ ZSTD_row_getSSEMask(int nbChunks, const BYTE* const src, const BYTE tag, const U32 head)
968
+ {
969
+ const __m128i comparisonMask = _mm_set1_epi8((char)tag);
970
+ int matches[4] = {0};
971
+ int i;
972
+ assert(nbChunks == 1 || nbChunks == 2 || nbChunks == 4);
973
+ for (i=0; i<nbChunks; i++) {
974
+ const __m128i chunk = _mm_loadu_si128((const __m128i*)(const void*)(src + 16*i));
975
+ const __m128i equalMask = _mm_cmpeq_epi8(chunk, comparisonMask);
976
+ matches[i] = _mm_movemask_epi8(equalMask);
977
+ }
978
+ if (nbChunks == 1) return ZSTD_rotateRight_U16((U16)matches[0], head);
979
+ if (nbChunks == 2) return ZSTD_rotateRight_U32((U32)matches[1] << 16 | (U32)matches[0], head);
980
+ assert(nbChunks == 4);
981
+ return ZSTD_rotateRight_U64((U64)matches[3] << 48 | (U64)matches[2] << 32 | (U64)matches[1] << 16 | (U64)matches[0], head);
982
+ }
983
+ #endif
984
+
985
+ #if defined(ZSTD_ARCH_ARM_NEON)
986
+ FORCE_INLINE_TEMPLATE ZSTD_VecMask
987
+ ZSTD_row_getNEONMask(const U32 rowEntries, const BYTE* const src, const BYTE tag, const U32 headGrouped)
988
+ {
989
+ assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);
990
+ if (rowEntries == 16) {
991
+ /* vshrn_n_u16 shifts by 4 every u16 and narrows to 8 lower bits.
992
+ * After that groups of 4 bits represent the equalMask. We lower
993
+ * all bits except the highest in these groups by doing AND with
994
+ * 0x88 = 0b10001000.
995
+ */
996
+ const uint8x16_t chunk = vld1q_u8(src);
997
+ const uint16x8_t equalMask = vreinterpretq_u16_u8(vceqq_u8(chunk, vdupq_n_u8(tag)));
998
+ const uint8x8_t res = vshrn_n_u16(equalMask, 4);
999
+ const U64 matches = vget_lane_u64(vreinterpret_u64_u8(res), 0);
1000
+ return ZSTD_rotateRight_U64(matches, headGrouped) & 0x8888888888888888ull;
1001
+ } else if (rowEntries == 32) {
1002
+ /* Same idea as with rowEntries == 16 but doing AND with
1003
+ * 0x55 = 0b01010101.
1004
+ */
1005
+ const uint16x8x2_t chunk = vld2q_u16((const uint16_t*)(const void*)src);
1006
+ const uint8x16_t chunk0 = vreinterpretq_u8_u16(chunk.val[0]);
1007
+ const uint8x16_t chunk1 = vreinterpretq_u8_u16(chunk.val[1]);
1008
+ const uint8x16_t dup = vdupq_n_u8(tag);
1009
+ const uint8x8_t t0 = vshrn_n_u16(vreinterpretq_u16_u8(vceqq_u8(chunk0, dup)), 6);
1010
+ const uint8x8_t t1 = vshrn_n_u16(vreinterpretq_u16_u8(vceqq_u8(chunk1, dup)), 6);
1011
+ const uint8x8_t res = vsli_n_u8(t0, t1, 4);
1012
+ const U64 matches = vget_lane_u64(vreinterpret_u64_u8(res), 0) ;
1013
+ return ZSTD_rotateRight_U64(matches, headGrouped) & 0x5555555555555555ull;
1014
+ } else { /* rowEntries == 64 */
1015
+ const uint8x16x4_t chunk = vld4q_u8(src);
1016
+ const uint8x16_t dup = vdupq_n_u8(tag);
1017
+ const uint8x16_t cmp0 = vceqq_u8(chunk.val[0], dup);
1018
+ const uint8x16_t cmp1 = vceqq_u8(chunk.val[1], dup);
1019
+ const uint8x16_t cmp2 = vceqq_u8(chunk.val[2], dup);
1020
+ const uint8x16_t cmp3 = vceqq_u8(chunk.val[3], dup);
1021
+
1022
+ const uint8x16_t t0 = vsriq_n_u8(cmp1, cmp0, 1);
1023
+ const uint8x16_t t1 = vsriq_n_u8(cmp3, cmp2, 1);
1024
+ const uint8x16_t t2 = vsriq_n_u8(t1, t0, 2);
1025
+ const uint8x16_t t3 = vsriq_n_u8(t2, t2, 4);
1026
+ const uint8x8_t t4 = vshrn_n_u16(vreinterpretq_u16_u8(t3), 4);
1027
+ const U64 matches = vget_lane_u64(vreinterpret_u64_u8(t4), 0);
1028
+ return ZSTD_rotateRight_U64(matches, headGrouped);
829
1029
  }
830
1030
  }
1031
+ #endif
1032
+
1033
+ /* Returns a ZSTD_VecMask (U64) that has the nth group (determined by
1034
+ * ZSTD_row_matchMaskGroupWidth) of bits set to 1 if the newly-computed "tag"
1035
+ * matches the hash at the nth position in a row of the tagTable.
1036
+ * Each row is a circular buffer beginning at the value of "headGrouped". So we
1037
+ * must rotate the "matches" bitfield to match up with the actual layout of the
1038
+ * entries within the hashTable */
1039
+ FORCE_INLINE_TEMPLATE ZSTD_VecMask
1040
+ ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 headGrouped, const U32 rowEntries)
1041
+ {
1042
+ const BYTE* const src = tagRow;
1043
+ assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);
1044
+ assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES);
1045
+ assert(ZSTD_row_matchMaskGroupWidth(rowEntries) * rowEntries <= sizeof(ZSTD_VecMask) * 8);
1046
+
1047
+ #if defined(ZSTD_ARCH_X86_SSE2)
1048
+
1049
+ return ZSTD_row_getSSEMask(rowEntries / 16, src, tag, headGrouped);
1050
+
1051
+ #else /* SW or NEON-LE */
831
1052
 
1053
+ # if defined(ZSTD_ARCH_ARM_NEON)
1054
+ /* This NEON path only works for little endian - otherwise use SWAR below */
1055
+ if (MEM_isLittleEndian()) {
1056
+ return ZSTD_row_getNEONMask(rowEntries, src, tag, headGrouped);
1057
+ }
1058
+ # endif /* ZSTD_ARCH_ARM_NEON */
1059
+ /* SWAR */
1060
+ { const int chunkSize = sizeof(size_t);
1061
+ const size_t shiftAmount = ((chunkSize * 8) - chunkSize);
1062
+ const size_t xFF = ~((size_t)0);
1063
+ const size_t x01 = xFF / 0xFF;
1064
+ const size_t x80 = x01 << 7;
1065
+ const size_t splatChar = tag * x01;
1066
+ ZSTD_VecMask matches = 0;
1067
+ int i = rowEntries - chunkSize;
1068
+ assert((sizeof(size_t) == 4) || (sizeof(size_t) == 8));
1069
+ if (MEM_isLittleEndian()) { /* runtime check so have two loops */
1070
+ const size_t extractMagic = (xFF / 0x7F) >> chunkSize;
1071
+ do {
1072
+ size_t chunk = MEM_readST(&src[i]);
1073
+ chunk ^= splatChar;
1074
+ chunk = (((chunk | x80) - x01) | chunk) & x80;
1075
+ matches <<= chunkSize;
1076
+ matches |= (chunk * extractMagic) >> shiftAmount;
1077
+ i -= chunkSize;
1078
+ } while (i >= 0);
1079
+ } else { /* big endian: reverse bits during extraction */
1080
+ const size_t msb = xFF ^ (xFF >> 1);
1081
+ const size_t extractMagic = (msb / 0x1FF) | msb;
1082
+ do {
1083
+ size_t chunk = MEM_readST(&src[i]);
1084
+ chunk ^= splatChar;
1085
+ chunk = (((chunk | x80) - x01) | chunk) & x80;
1086
+ matches <<= chunkSize;
1087
+ matches |= ((chunk >> 7) * extractMagic) >> shiftAmount;
1088
+ i -= chunkSize;
1089
+ } while (i >= 0);
1090
+ }
1091
+ matches = ~matches;
1092
+ if (rowEntries == 16) {
1093
+ return ZSTD_rotateRight_U16((U16)matches, headGrouped);
1094
+ } else if (rowEntries == 32) {
1095
+ return ZSTD_rotateRight_U32((U32)matches, headGrouped);
1096
+ } else {
1097
+ return ZSTD_rotateRight_U64((U64)matches, headGrouped);
1098
+ }
1099
+ }
1100
+ #endif
1101
+ }
832
1102
 
833
- FORCE_INLINE_TEMPLATE size_t ZSTD_HcFindBestMatch_extDict_selectMLS (
1103
+ /* The high-level approach of the SIMD row based match finder is as follows:
1104
+ * - Figure out where to insert the new entry:
1105
+ * - Generate a hash from a byte along with an additional 1-byte "short hash". The additional byte is our "tag"
1106
+ * - The hashTable is effectively split into groups or "rows" of 16 or 32 entries of U32, and the hash determines
1107
+ * which row to insert into.
1108
+ * - Determine the correct position within the row to insert the entry into. Each row of 16 or 32 can
1109
+ * be considered as a circular buffer with a "head" index that resides in the tagTable.
1110
+ * - Also insert the "tag" into the equivalent row and position in the tagTable.
1111
+ * - Note: The tagTable has 17 or 33 1-byte entries per row, due to 16 or 32 tags, and 1 "head" entry.
1112
+ * The 17 or 33 entry rows are spaced out to occur every 32 or 64 bytes, respectively,
1113
+ * for alignment/performance reasons, leaving some bytes unused.
1114
+ * - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte "short hash" and
1115
+ * generate a bitfield that we can cycle through to check the collisions in the hash table.
1116
+ * - Pick the longest match.
1117
+ */
1118
+ FORCE_INLINE_TEMPLATE
1119
+ size_t ZSTD_RowFindBestMatch(
834
1120
  ZSTD_matchState_t* ms,
835
- const BYTE* ip, const BYTE* const iLimit,
836
- size_t* offsetPtr)
1121
+ const BYTE* const ip, const BYTE* const iLimit,
1122
+ size_t* offsetPtr,
1123
+ const U32 mls, const ZSTD_dictMode_e dictMode,
1124
+ const U32 rowLog)
837
1125
  {
838
- switch(ms->cParams.minMatch)
839
- {
840
- default : /* includes case 3 */
841
- case 4 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 4, ZSTD_extDict);
842
- case 5 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 5, ZSTD_extDict);
843
- case 7 :
844
- case 6 : return ZSTD_HcFindBestMatch_generic(ms, ip, iLimit, offsetPtr, 6, ZSTD_extDict);
1126
+ U32* const hashTable = ms->hashTable;
1127
+ BYTE* const tagTable = ms->tagTable;
1128
+ U32* const hashCache = ms->hashCache;
1129
+ const U32 hashLog = ms->rowHashLog;
1130
+ const ZSTD_compressionParameters* const cParams = &ms->cParams;
1131
+ const BYTE* const base = ms->window.base;
1132
+ const BYTE* const dictBase = ms->window.dictBase;
1133
+ const U32 dictLimit = ms->window.dictLimit;
1134
+ const BYTE* const prefixStart = base + dictLimit;
1135
+ const BYTE* const dictEnd = dictBase + dictLimit;
1136
+ const U32 curr = (U32)(ip-base);
1137
+ const U32 maxDistance = 1U << cParams->windowLog;
1138
+ const U32 lowestValid = ms->window.lowLimit;
1139
+ const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;
1140
+ const U32 isDictionary = (ms->loadedDictEnd != 0);
1141
+ const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;
1142
+ const U32 rowEntries = (1U << rowLog);
1143
+ const U32 rowMask = rowEntries - 1;
1144
+ const U32 cappedSearchLog = MIN(cParams->searchLog, rowLog); /* nb of searches is capped at nb entries per row */
1145
+ const U32 groupWidth = ZSTD_row_matchMaskGroupWidth(rowEntries);
1146
+ const U64 hashSalt = ms->hashSalt;
1147
+ U32 nbAttempts = 1U << cappedSearchLog;
1148
+ size_t ml=4-1;
1149
+ U32 hash;
1150
+
1151
+ /* DMS/DDS variables that may be referenced laster */
1152
+ const ZSTD_matchState_t* const dms = ms->dictMatchState;
1153
+
1154
+ /* Initialize the following variables to satisfy static analyzer */
1155
+ size_t ddsIdx = 0;
1156
+ U32 ddsExtraAttempts = 0; /* cctx hash tables are limited in searches, but allow extra searches into DDS */
1157
+ U32 dmsTag = 0;
1158
+ U32* dmsRow = NULL;
1159
+ BYTE* dmsTagRow = NULL;
1160
+
1161
+ if (dictMode == ZSTD_dedicatedDictSearch) {
1162
+ const U32 ddsHashLog = dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;
1163
+ { /* Prefetch DDS hashtable entry */
1164
+ ddsIdx = ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG;
1165
+ PREFETCH_L1(&dms->hashTable[ddsIdx]);
1166
+ }
1167
+ ddsExtraAttempts = cParams->searchLog > rowLog ? 1U << (cParams->searchLog - rowLog) : 0;
1168
+ }
1169
+
1170
+ if (dictMode == ZSTD_dictMatchState) {
1171
+ /* Prefetch DMS rows */
1172
+ U32* const dmsHashTable = dms->hashTable;
1173
+ BYTE* const dmsTagTable = dms->tagTable;
1174
+ U32 const dmsHash = (U32)ZSTD_hashPtr(ip, dms->rowHashLog + ZSTD_ROW_HASH_TAG_BITS, mls);
1175
+ U32 const dmsRelRow = (dmsHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
1176
+ dmsTag = dmsHash & ZSTD_ROW_HASH_TAG_MASK;
1177
+ dmsTagRow = (BYTE*)(dmsTagTable + dmsRelRow);
1178
+ dmsRow = dmsHashTable + dmsRelRow;
1179
+ ZSTD_row_prefetch(dmsHashTable, dmsTagTable, dmsRelRow, rowLog);
1180
+ }
1181
+
1182
+ /* Update the hashTable and tagTable up to (but not including) ip */
1183
+ if (!ms->lazySkipping) {
1184
+ ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1 /* useCache */);
1185
+ hash = ZSTD_row_nextCachedHash(hashCache, hashTable, tagTable, base, curr, hashLog, rowLog, mls, hashSalt);
1186
+ } else {
1187
+ /* Stop inserting every position when in the lazy skipping mode.
1188
+ * The hash cache is also not kept up to date in this mode.
1189
+ */
1190
+ hash = (U32)ZSTD_hashPtrSalted(ip, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, hashSalt);
1191
+ ms->nextToUpdate = curr;
1192
+ }
1193
+ ms->hashSaltEntropy += hash; /* collect salt entropy */
1194
+
1195
+ { /* Get the hash for ip, compute the appropriate row */
1196
+ U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;
1197
+ U32 const tag = hash & ZSTD_ROW_HASH_TAG_MASK;
1198
+ U32* const row = hashTable + relRow;
1199
+ BYTE* tagRow = (BYTE*)(tagTable + relRow);
1200
+ U32 const headGrouped = (*tagRow & rowMask) * groupWidth;
1201
+ U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES];
1202
+ size_t numMatches = 0;
1203
+ size_t currMatch = 0;
1204
+ ZSTD_VecMask matches = ZSTD_row_getMatchMask(tagRow, (BYTE)tag, headGrouped, rowEntries);
1205
+
1206
+ /* Cycle through the matches and prefetch */
1207
+ for (; (matches > 0) && (nbAttempts > 0); matches &= (matches - 1)) {
1208
+ U32 const matchPos = ((headGrouped + ZSTD_VecMask_next(matches)) / groupWidth) & rowMask;
1209
+ U32 const matchIndex = row[matchPos];
1210
+ if(matchPos == 0) continue;
1211
+ assert(numMatches < rowEntries);
1212
+ if (matchIndex < lowLimit)
1213
+ break;
1214
+ if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
1215
+ PREFETCH_L1(base + matchIndex);
1216
+ } else {
1217
+ PREFETCH_L1(dictBase + matchIndex);
1218
+ }
1219
+ matchBuffer[numMatches++] = matchIndex;
1220
+ --nbAttempts;
1221
+ }
1222
+
1223
+ /* Speed opt: insert current byte into hashtable too. This allows us to avoid one iteration of the loop
1224
+ in ZSTD_row_update_internal() at the next search. */
1225
+ {
1226
+ U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);
1227
+ tagRow[pos] = (BYTE)tag;
1228
+ row[pos] = ms->nextToUpdate++;
1229
+ }
1230
+
1231
+ /* Return the longest match */
1232
+ for (; currMatch < numMatches; ++currMatch) {
1233
+ U32 const matchIndex = matchBuffer[currMatch];
1234
+ size_t currentMl=0;
1235
+ assert(matchIndex < curr);
1236
+ assert(matchIndex >= lowLimit);
1237
+
1238
+ if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {
1239
+ const BYTE* const match = base + matchIndex;
1240
+ assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */
1241
+ /* read 4B starting from (match + ml + 1 - sizeof(U32)) */
1242
+ if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) /* potentially better */
1243
+ currentMl = ZSTD_count(ip, match, iLimit);
1244
+ } else {
1245
+ const BYTE* const match = dictBase + matchIndex;
1246
+ assert(match+4 <= dictEnd);
1247
+ if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */
1248
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;
1249
+ }
1250
+
1251
+ /* Save best solution */
1252
+ if (currentMl > ml) {
1253
+ ml = currentMl;
1254
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex);
1255
+ if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */
1256
+ }
1257
+ }
1258
+ }
1259
+
1260
+ assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */
1261
+ if (dictMode == ZSTD_dedicatedDictSearch) {
1262
+ ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts + ddsExtraAttempts, dms,
1263
+ ip, iLimit, prefixStart, curr, dictLimit, ddsIdx);
1264
+ } else if (dictMode == ZSTD_dictMatchState) {
1265
+ /* TODO: Measure and potentially add prefetching to DMS */
1266
+ const U32 dmsLowestIndex = dms->window.dictLimit;
1267
+ const BYTE* const dmsBase = dms->window.base;
1268
+ const BYTE* const dmsEnd = dms->window.nextSrc;
1269
+ const U32 dmsSize = (U32)(dmsEnd - dmsBase);
1270
+ const U32 dmsIndexDelta = dictLimit - dmsSize;
1271
+
1272
+ { U32 const headGrouped = (*dmsTagRow & rowMask) * groupWidth;
1273
+ U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES];
1274
+ size_t numMatches = 0;
1275
+ size_t currMatch = 0;
1276
+ ZSTD_VecMask matches = ZSTD_row_getMatchMask(dmsTagRow, (BYTE)dmsTag, headGrouped, rowEntries);
1277
+
1278
+ for (; (matches > 0) && (nbAttempts > 0); matches &= (matches - 1)) {
1279
+ U32 const matchPos = ((headGrouped + ZSTD_VecMask_next(matches)) / groupWidth) & rowMask;
1280
+ U32 const matchIndex = dmsRow[matchPos];
1281
+ if(matchPos == 0) continue;
1282
+ if (matchIndex < dmsLowestIndex)
1283
+ break;
1284
+ PREFETCH_L1(dmsBase + matchIndex);
1285
+ matchBuffer[numMatches++] = matchIndex;
1286
+ --nbAttempts;
1287
+ }
1288
+
1289
+ /* Return the longest match */
1290
+ for (; currMatch < numMatches; ++currMatch) {
1291
+ U32 const matchIndex = matchBuffer[currMatch];
1292
+ size_t currentMl=0;
1293
+ assert(matchIndex >= dmsLowestIndex);
1294
+ assert(matchIndex < curr);
1295
+
1296
+ { const BYTE* const match = dmsBase + matchIndex;
1297
+ assert(match+4 <= dmsEnd);
1298
+ if (MEM_read32(match) == MEM_read32(ip))
1299
+ currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;
1300
+ }
1301
+
1302
+ if (currentMl > ml) {
1303
+ ml = currentMl;
1304
+ assert(curr > matchIndex + dmsIndexDelta);
1305
+ *offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + dmsIndexDelta));
1306
+ if (ip+currentMl == iLimit) break;
1307
+ }
1308
+ }
1309
+ }
845
1310
  }
1311
+ return ml;
846
1312
  }
847
1313
 
848
1314
 
1315
+ /**
1316
+ * Generate search functions templated on (dictMode, mls, rowLog).
1317
+ * These functions are outlined for code size & compilation time.
1318
+ * ZSTD_searchMax() dispatches to the correct implementation function.
1319
+ *
1320
+ * TODO: The start of the search function involves loading and calculating a
1321
+ * bunch of constants from the ZSTD_matchState_t. These computations could be
1322
+ * done in an initialization function, and saved somewhere in the match state.
1323
+ * Then we could pass a pointer to the saved state instead of the match state,
1324
+ * and avoid duplicate computations.
1325
+ *
1326
+ * TODO: Move the match re-winding into searchMax. This improves compression
1327
+ * ratio, and unlocks further simplifications with the next TODO.
1328
+ *
1329
+ * TODO: Try moving the repcode search into searchMax. After the re-winding
1330
+ * and repcode search are in searchMax, there is no more logic in the match
1331
+ * finder loop that requires knowledge about the dictMode. So we should be
1332
+ * able to avoid force inlining it, and we can join the extDict loop with
1333
+ * the single segment loop. It should go in searchMax instead of its own
1334
+ * function to avoid having multiple virtual function calls per search.
1335
+ */
1336
+
1337
+ #define ZSTD_BT_SEARCH_FN(dictMode, mls) ZSTD_BtFindBestMatch_##dictMode##_##mls
1338
+ #define ZSTD_HC_SEARCH_FN(dictMode, mls) ZSTD_HcFindBestMatch_##dictMode##_##mls
1339
+ #define ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog
1340
+
1341
+ #define ZSTD_SEARCH_FN_ATTRS FORCE_NOINLINE
1342
+
1343
+ #define GEN_ZSTD_BT_SEARCH_FN(dictMode, mls) \
1344
+ ZSTD_SEARCH_FN_ATTRS size_t ZSTD_BT_SEARCH_FN(dictMode, mls)( \
1345
+ ZSTD_matchState_t* ms, \
1346
+ const BYTE* ip, const BYTE* const iLimit, \
1347
+ size_t* offBasePtr) \
1348
+ { \
1349
+ assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
1350
+ return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, mls, ZSTD_##dictMode); \
1351
+ } \
1352
+
1353
+ #define GEN_ZSTD_HC_SEARCH_FN(dictMode, mls) \
1354
+ ZSTD_SEARCH_FN_ATTRS size_t ZSTD_HC_SEARCH_FN(dictMode, mls)( \
1355
+ ZSTD_matchState_t* ms, \
1356
+ const BYTE* ip, const BYTE* const iLimit, \
1357
+ size_t* offsetPtr) \
1358
+ { \
1359
+ assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
1360
+ return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode); \
1361
+ } \
1362
+
1363
+ #define GEN_ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) \
1364
+ ZSTD_SEARCH_FN_ATTRS size_t ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)( \
1365
+ ZSTD_matchState_t* ms, \
1366
+ const BYTE* ip, const BYTE* const iLimit, \
1367
+ size_t* offsetPtr) \
1368
+ { \
1369
+ assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \
1370
+ assert(MAX(4, MIN(6, ms->cParams.searchLog)) == rowLog); \
1371
+ return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode, rowLog); \
1372
+ } \
1373
+
1374
+ #define ZSTD_FOR_EACH_ROWLOG(X, dictMode, mls) \
1375
+ X(dictMode, mls, 4) \
1376
+ X(dictMode, mls, 5) \
1377
+ X(dictMode, mls, 6)
1378
+
1379
+ #define ZSTD_FOR_EACH_MLS_ROWLOG(X, dictMode) \
1380
+ ZSTD_FOR_EACH_ROWLOG(X, dictMode, 4) \
1381
+ ZSTD_FOR_EACH_ROWLOG(X, dictMode, 5) \
1382
+ ZSTD_FOR_EACH_ROWLOG(X, dictMode, 6)
1383
+
1384
+ #define ZSTD_FOR_EACH_MLS(X, dictMode) \
1385
+ X(dictMode, 4) \
1386
+ X(dictMode, 5) \
1387
+ X(dictMode, 6)
1388
+
1389
+ #define ZSTD_FOR_EACH_DICT_MODE(X, ...) \
1390
+ X(__VA_ARGS__, noDict) \
1391
+ X(__VA_ARGS__, extDict) \
1392
+ X(__VA_ARGS__, dictMatchState) \
1393
+ X(__VA_ARGS__, dedicatedDictSearch)
1394
+
1395
+ /* Generate row search fns for each combination of (dictMode, mls, rowLog) */
1396
+ ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS_ROWLOG, GEN_ZSTD_ROW_SEARCH_FN)
1397
+ /* Generate binary Tree search fns for each combination of (dictMode, mls) */
1398
+ ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_BT_SEARCH_FN)
1399
+ /* Generate hash chain search fns for each combination of (dictMode, mls) */
1400
+ ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_HC_SEARCH_FN)
1401
+
1402
+ typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e;
1403
+
1404
+ #define GEN_ZSTD_CALL_BT_SEARCH_FN(dictMode, mls) \
1405
+ case mls: \
1406
+ return ZSTD_BT_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr);
1407
+ #define GEN_ZSTD_CALL_HC_SEARCH_FN(dictMode, mls) \
1408
+ case mls: \
1409
+ return ZSTD_HC_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr);
1410
+ #define GEN_ZSTD_CALL_ROW_SEARCH_FN(dictMode, mls, rowLog) \
1411
+ case rowLog: \
1412
+ return ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)(ms, ip, iend, offsetPtr);
1413
+
1414
+ #define ZSTD_SWITCH_MLS(X, dictMode) \
1415
+ switch (mls) { \
1416
+ ZSTD_FOR_EACH_MLS(X, dictMode) \
1417
+ }
1418
+
1419
+ #define ZSTD_SWITCH_ROWLOG(dictMode, mls) \
1420
+ case mls: \
1421
+ switch (rowLog) { \
1422
+ ZSTD_FOR_EACH_ROWLOG(GEN_ZSTD_CALL_ROW_SEARCH_FN, dictMode, mls) \
1423
+ } \
1424
+ ZSTD_UNREACHABLE; \
1425
+ break;
1426
+
1427
+ #define ZSTD_SWITCH_SEARCH_METHOD(dictMode) \
1428
+ switch (searchMethod) { \
1429
+ case search_hashChain: \
1430
+ ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_HC_SEARCH_FN, dictMode) \
1431
+ break; \
1432
+ case search_binaryTree: \
1433
+ ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_BT_SEARCH_FN, dictMode) \
1434
+ break; \
1435
+ case search_rowHash: \
1436
+ ZSTD_SWITCH_MLS(ZSTD_SWITCH_ROWLOG, dictMode) \
1437
+ break; \
1438
+ } \
1439
+ ZSTD_UNREACHABLE;
1440
+
1441
+ /**
1442
+ * Searches for the longest match at @p ip.
1443
+ * Dispatches to the correct implementation function based on the
1444
+ * (searchMethod, dictMode, mls, rowLog). We use switch statements
1445
+ * here instead of using an indirect function call through a function
1446
+ * pointer because after Spectre and Meltdown mitigations, indirect
1447
+ * function calls can be very costly, especially in the kernel.
1448
+ *
1449
+ * NOTE: dictMode and searchMethod should be templated, so those switch
1450
+ * statements should be optimized out. Only the mls & rowLog switches
1451
+ * should be left.
1452
+ *
1453
+ * @param ms The match state.
1454
+ * @param ip The position to search at.
1455
+ * @param iend The end of the input data.
1456
+ * @param[out] offsetPtr Stores the match offset into this pointer.
1457
+ * @param mls The minimum search length, in the range [4, 6].
1458
+ * @param rowLog The row log (if applicable), in the range [4, 6].
1459
+ * @param searchMethod The search method to use (templated).
1460
+ * @param dictMode The dictMode (templated).
1461
+ *
1462
+ * @returns The length of the longest match found, or < mls if no match is found.
1463
+ * If a match is found its offset is stored in @p offsetPtr.
1464
+ */
1465
+ FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax(
1466
+ ZSTD_matchState_t* ms,
1467
+ const BYTE* ip,
1468
+ const BYTE* iend,
1469
+ size_t* offsetPtr,
1470
+ U32 const mls,
1471
+ U32 const rowLog,
1472
+ searchMethod_e const searchMethod,
1473
+ ZSTD_dictMode_e const dictMode)
1474
+ {
1475
+ if (dictMode == ZSTD_noDict) {
1476
+ ZSTD_SWITCH_SEARCH_METHOD(noDict)
1477
+ } else if (dictMode == ZSTD_extDict) {
1478
+ ZSTD_SWITCH_SEARCH_METHOD(extDict)
1479
+ } else if (dictMode == ZSTD_dictMatchState) {
1480
+ ZSTD_SWITCH_SEARCH_METHOD(dictMatchState)
1481
+ } else if (dictMode == ZSTD_dedicatedDictSearch) {
1482
+ ZSTD_SWITCH_SEARCH_METHOD(dedicatedDictSearch)
1483
+ }
1484
+ ZSTD_UNREACHABLE;
1485
+ return 0;
1486
+ }
1487
+
849
1488
  /* *******************************
850
1489
  * Common parser - lazy strategy
851
1490
  *********************************/
852
- typedef enum { search_hashChain, search_binaryTree } searchMethod_e;
853
1491
 
854
1492
  FORCE_INLINE_TEMPLATE size_t
855
1493
  ZSTD_compressBlock_lazy_generic(
@@ -863,42 +1501,15 @@ ZSTD_compressBlock_lazy_generic(
863
1501
  const BYTE* ip = istart;
864
1502
  const BYTE* anchor = istart;
865
1503
  const BYTE* const iend = istart + srcSize;
866
- const BYTE* const ilimit = iend - 8;
1504
+ const BYTE* const ilimit = (searchMethod == search_rowHash) ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8;
867
1505
  const BYTE* const base = ms->window.base;
868
1506
  const U32 prefixLowestIndex = ms->window.dictLimit;
869
1507
  const BYTE* const prefixLowest = base + prefixLowestIndex;
1508
+ const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6);
1509
+ const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
870
1510
 
871
- typedef size_t (*searchMax_f)(
872
- ZSTD_matchState_t* ms,
873
- const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
874
-
875
- /**
876
- * This table is indexed first by the four ZSTD_dictMode_e values, and then
877
- * by the two searchMethod_e values. NULLs are placed for configurations
878
- * that should never occur (extDict modes go to the other implementation
879
- * below and there is no DDSS for binary tree search yet).
880
- */
881
- const searchMax_f searchFuncs[4][2] = {
882
- {
883
- ZSTD_HcFindBestMatch_selectMLS,
884
- ZSTD_BtFindBestMatch_selectMLS
885
- },
886
- {
887
- NULL,
888
- NULL
889
- },
890
- {
891
- ZSTD_HcFindBestMatch_dictMatchState_selectMLS,
892
- ZSTD_BtFindBestMatch_dictMatchState_selectMLS
893
- },
894
- {
895
- ZSTD_HcFindBestMatch_dedicatedDictSearch_selectMLS,
896
- NULL
897
- }
898
- };
899
-
900
- searchMax_f const searchMax = searchFuncs[dictMode][searchMethod == search_binaryTree];
901
- U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0;
1511
+ U32 offset_1 = rep[0], offset_2 = rep[1];
1512
+ U32 offsetSaved1 = 0, offsetSaved2 = 0;
902
1513
 
903
1514
  const int isDMS = dictMode == ZSTD_dictMatchState;
904
1515
  const int isDDS = dictMode == ZSTD_dedicatedDictSearch;
@@ -913,18 +1524,14 @@ ZSTD_compressBlock_lazy_generic(
913
1524
  0;
914
1525
  const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest));
915
1526
 
916
- assert(searchMax != NULL);
917
-
918
- DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u)", (U32)dictMode);
919
-
920
- /* init */
1527
+ DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u) (searchFunc=%u)", (U32)dictMode, (U32)searchMethod);
921
1528
  ip += (dictAndPrefixLength == 0);
922
1529
  if (dictMode == ZSTD_noDict) {
923
1530
  U32 const curr = (U32)(ip - base);
924
1531
  U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog);
925
1532
  U32 const maxRep = curr - windowLow;
926
- if (offset_2 > maxRep) savedOffset = offset_2, offset_2 = 0;
927
- if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0;
1533
+ if (offset_2 > maxRep) offsetSaved2 = offset_2, offset_2 = 0;
1534
+ if (offset_1 > maxRep) offsetSaved1 = offset_1, offset_1 = 0;
928
1535
  }
929
1536
  if (isDxS) {
930
1537
  /* dictMatchState repCode checks don't currently handle repCode == 0
@@ -933,6 +1540,13 @@ ZSTD_compressBlock_lazy_generic(
933
1540
  assert(offset_2 <= dictAndPrefixLength);
934
1541
  }
935
1542
 
1543
+ /* Reset the lazy skipping state */
1544
+ ms->lazySkipping = 0;
1545
+
1546
+ if (searchMethod == search_rowHash) {
1547
+ ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit);
1548
+ }
1549
+
936
1550
  /* Match Loop */
937
1551
  #if defined(__GNUC__) && defined(__x86_64__)
938
1552
  /* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the
@@ -942,8 +1556,9 @@ ZSTD_compressBlock_lazy_generic(
942
1556
  #endif
943
1557
  while (ip < ilimit) {
944
1558
  size_t matchLength=0;
945
- size_t offset=0;
1559
+ size_t offBase = REPCODE1_TO_OFFBASE;
946
1560
  const BYTE* start=ip+1;
1561
+ DEBUGLOG(7, "search baseline (depth 0)");
947
1562
 
948
1563
  /* check repCode */
949
1564
  if (isDxS) {
@@ -966,28 +1581,38 @@ ZSTD_compressBlock_lazy_generic(
966
1581
  }
967
1582
 
968
1583
  /* first search (depth 0) */
969
- { size_t offsetFound = 999999999;
970
- size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
1584
+ { size_t offbaseFound = 999999999;
1585
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offbaseFound, mls, rowLog, searchMethod, dictMode);
971
1586
  if (ml2 > matchLength)
972
- matchLength = ml2, start = ip, offset=offsetFound;
1587
+ matchLength = ml2, start = ip, offBase = offbaseFound;
973
1588
  }
974
1589
 
975
1590
  if (matchLength < 4) {
976
- ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
1591
+ size_t const step = ((size_t)(ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */;
1592
+ ip += step;
1593
+ /* Enter the lazy skipping mode once we are skipping more than 8 bytes at a time.
1594
+ * In this mode we stop inserting every position into our tables, and only insert
1595
+ * positions that we search, which is one in step positions.
1596
+ * The exact cutoff is flexible, I've just chosen a number that is reasonably high,
1597
+ * so we minimize the compression ratio loss in "normal" scenarios. This mode gets
1598
+ * triggered once we've gone 2KB without finding any matches.
1599
+ */
1600
+ ms->lazySkipping = step > kLazySkippingStep;
977
1601
  continue;
978
1602
  }
979
1603
 
980
1604
  /* let's try to find a better solution */
981
1605
  if (depth>=1)
982
1606
  while (ip<ilimit) {
1607
+ DEBUGLOG(7, "search depth 1");
983
1608
  ip ++;
984
1609
  if ( (dictMode == ZSTD_noDict)
985
- && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
1610
+ && (offBase) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
986
1611
  size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
987
1612
  int const gain2 = (int)(mlRep * 3);
988
- int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
1613
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1);
989
1614
  if ((mlRep >= 4) && (gain2 > gain1))
990
- matchLength = mlRep, offset = 0, start = ip;
1615
+ matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip;
991
1616
  }
992
1617
  if (isDxS) {
993
1618
  const U32 repIndex = (U32)(ip - base) - offset_1;
@@ -999,30 +1624,31 @@ ZSTD_compressBlock_lazy_generic(
999
1624
  const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
1000
1625
  size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
1001
1626
  int const gain2 = (int)(mlRep * 3);
1002
- int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
1627
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1);
1003
1628
  if ((mlRep >= 4) && (gain2 > gain1))
1004
- matchLength = mlRep, offset = 0, start = ip;
1629
+ matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip;
1005
1630
  }
1006
1631
  }
1007
- { size_t offset2=999999999;
1008
- size_t const ml2 = searchMax(ms, ip, iend, &offset2);
1009
- int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
1010
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
1632
+ { size_t ofbCandidate=999999999;
1633
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode);
1634
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */
1635
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 4);
1011
1636
  if ((ml2 >= 4) && (gain2 > gain1)) {
1012
- matchLength = ml2, offset = offset2, start = ip;
1637
+ matchLength = ml2, offBase = ofbCandidate, start = ip;
1013
1638
  continue; /* search a better one */
1014
1639
  } }
1015
1640
 
1016
1641
  /* let's find an even better one */
1017
1642
  if ((depth==2) && (ip<ilimit)) {
1643
+ DEBUGLOG(7, "search depth 2");
1018
1644
  ip ++;
1019
1645
  if ( (dictMode == ZSTD_noDict)
1020
- && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
1646
+ && (offBase) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {
1021
1647
  size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;
1022
1648
  int const gain2 = (int)(mlRep * 4);
1023
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
1649
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1);
1024
1650
  if ((mlRep >= 4) && (gain2 > gain1))
1025
- matchLength = mlRep, offset = 0, start = ip;
1651
+ matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip;
1026
1652
  }
1027
1653
  if (isDxS) {
1028
1654
  const U32 repIndex = (U32)(ip - base) - offset_1;
@@ -1034,48 +1660,54 @@ ZSTD_compressBlock_lazy_generic(
1034
1660
  const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
1035
1661
  size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
1036
1662
  int const gain2 = (int)(mlRep * 4);
1037
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
1663
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1);
1038
1664
  if ((mlRep >= 4) && (gain2 > gain1))
1039
- matchLength = mlRep, offset = 0, start = ip;
1665
+ matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip;
1040
1666
  }
1041
1667
  }
1042
- { size_t offset2=999999999;
1043
- size_t const ml2 = searchMax(ms, ip, iend, &offset2);
1044
- int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
1045
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
1668
+ { size_t ofbCandidate=999999999;
1669
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode);
1670
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */
1671
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 7);
1046
1672
  if ((ml2 >= 4) && (gain2 > gain1)) {
1047
- matchLength = ml2, offset = offset2, start = ip;
1673
+ matchLength = ml2, offBase = ofbCandidate, start = ip;
1048
1674
  continue;
1049
1675
  } } }
1050
1676
  break; /* nothing found : store previous solution */
1051
1677
  }
1052
1678
 
1053
1679
  /* NOTE:
1054
- * start[-offset+ZSTD_REP_MOVE-1] is undefined behavior.
1055
- * (-offset+ZSTD_REP_MOVE-1) is unsigned, and is added to start, which
1056
- * overflows the pointer, which is undefined behavior.
1680
+ * Pay attention that `start[-value]` can lead to strange undefined behavior
1681
+ * notably if `value` is unsigned, resulting in a large positive `-value`.
1057
1682
  */
1058
1683
  /* catch up */
1059
- if (offset) {
1684
+ if (OFFBASE_IS_OFFSET(offBase)) {
1060
1685
  if (dictMode == ZSTD_noDict) {
1061
- while ( ((start > anchor) & (start - (offset-ZSTD_REP_MOVE) > prefixLowest))
1062
- && (start[-1] == (start-(offset-ZSTD_REP_MOVE))[-1]) ) /* only search for offset within prefix */
1686
+ while ( ((start > anchor) & (start - OFFBASE_TO_OFFSET(offBase) > prefixLowest))
1687
+ && (start[-1] == (start-OFFBASE_TO_OFFSET(offBase))[-1]) ) /* only search for offset within prefix */
1063
1688
  { start--; matchLength++; }
1064
1689
  }
1065
1690
  if (isDxS) {
1066
- U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
1691
+ U32 const matchIndex = (U32)((size_t)(start-base) - OFFBASE_TO_OFFSET(offBase));
1067
1692
  const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;
1068
1693
  const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;
1069
1694
  while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
1070
1695
  }
1071
- offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
1696
+ offset_2 = offset_1; offset_1 = (U32)OFFBASE_TO_OFFSET(offBase);
1072
1697
  }
1073
1698
  /* store sequence */
1074
1699
  _storeSequence:
1075
- { size_t const litLength = start - anchor;
1076
- ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
1700
+ { size_t const litLength = (size_t)(start - anchor);
1701
+ ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offBase, matchLength);
1077
1702
  anchor = ip = start + matchLength;
1078
1703
  }
1704
+ if (ms->lazySkipping) {
1705
+ /* We've found a match, disable lazy skipping mode, and refill the hash cache. */
1706
+ if (searchMethod == search_rowHash) {
1707
+ ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit);
1708
+ }
1709
+ ms->lazySkipping = 0;
1710
+ }
1079
1711
 
1080
1712
  /* check immediate repcode */
1081
1713
  if (isDxS) {
@@ -1089,8 +1721,8 @@ _storeSequence:
1089
1721
  && (MEM_read32(repMatch) == MEM_read32(ip)) ) {
1090
1722
  const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;
1091
1723
  matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;
1092
- offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset_2 <=> offset_1 */
1093
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
1724
+ offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap offset_2 <=> offset_1 */
1725
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength);
1094
1726
  ip += matchLength;
1095
1727
  anchor = ip;
1096
1728
  continue;
@@ -1104,16 +1736,20 @@ _storeSequence:
1104
1736
  && (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {
1105
1737
  /* store sequence */
1106
1738
  matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;
1107
- offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap repcodes */
1108
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
1739
+ offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap repcodes */
1740
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength);
1109
1741
  ip += matchLength;
1110
1742
  anchor = ip;
1111
1743
  continue; /* faster when present ... (?) */
1112
1744
  } } }
1113
1745
 
1114
- /* Save reps for next block */
1115
- rep[0] = offset_1 ? offset_1 : savedOffset;
1116
- rep[1] = offset_2 ? offset_2 : savedOffset;
1746
+ /* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0),
1747
+ * rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */
1748
+ offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2;
1749
+
1750
+ /* save reps for next block */
1751
+ rep[0] = offset_1 ? offset_1 : offsetSaved1;
1752
+ rep[1] = offset_2 ? offset_2 : offsetSaved2;
1117
1753
 
1118
1754
  /* Return the last literals size */
1119
1755
  return (size_t)(iend - anchor);
@@ -1198,6 +1834,70 @@ size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
1198
1834
  return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);
1199
1835
  }
1200
1836
 
1837
+ /* Row-based matchfinder */
1838
+ size_t ZSTD_compressBlock_lazy2_row(
1839
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1840
+ void const* src, size_t srcSize)
1841
+ {
1842
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_noDict);
1843
+ }
1844
+
1845
+ size_t ZSTD_compressBlock_lazy_row(
1846
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1847
+ void const* src, size_t srcSize)
1848
+ {
1849
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict);
1850
+ }
1851
+
1852
+ size_t ZSTD_compressBlock_greedy_row(
1853
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1854
+ void const* src, size_t srcSize)
1855
+ {
1856
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict);
1857
+ }
1858
+
1859
+ size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
1860
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1861
+ void const* src, size_t srcSize)
1862
+ {
1863
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState);
1864
+ }
1865
+
1866
+ size_t ZSTD_compressBlock_lazy_dictMatchState_row(
1867
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1868
+ void const* src, size_t srcSize)
1869
+ {
1870
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState);
1871
+ }
1872
+
1873
+ size_t ZSTD_compressBlock_greedy_dictMatchState_row(
1874
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1875
+ void const* src, size_t srcSize)
1876
+ {
1877
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState);
1878
+ }
1879
+
1880
+
1881
+ size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
1882
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1883
+ void const* src, size_t srcSize)
1884
+ {
1885
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dedicatedDictSearch);
1886
+ }
1887
+
1888
+ size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
1889
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1890
+ void const* src, size_t srcSize)
1891
+ {
1892
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch);
1893
+ }
1894
+
1895
+ size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
1896
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
1897
+ void const* src, size_t srcSize)
1898
+ {
1899
+ return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch);
1900
+ }
1201
1901
 
1202
1902
  FORCE_INLINE_TEMPLATE
1203
1903
  size_t ZSTD_compressBlock_lazy_extDict_generic(
@@ -1210,7 +1910,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
1210
1910
  const BYTE* ip = istart;
1211
1911
  const BYTE* anchor = istart;
1212
1912
  const BYTE* const iend = istart + srcSize;
1213
- const BYTE* const ilimit = iend - 8;
1913
+ const BYTE* const ilimit = searchMethod == search_rowHash ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8;
1214
1914
  const BYTE* const base = ms->window.base;
1215
1915
  const U32 dictLimit = ms->window.dictLimit;
1216
1916
  const BYTE* const prefixStart = base + dictLimit;
@@ -1218,18 +1918,21 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
1218
1918
  const BYTE* const dictEnd = dictBase + dictLimit;
1219
1919
  const BYTE* const dictStart = dictBase + ms->window.lowLimit;
1220
1920
  const U32 windowLog = ms->cParams.windowLog;
1221
-
1222
- typedef size_t (*searchMax_f)(
1223
- ZSTD_matchState_t* ms,
1224
- const BYTE* ip, const BYTE* iLimit, size_t* offsetPtr);
1225
- searchMax_f searchMax = searchMethod==search_binaryTree ? ZSTD_BtFindBestMatch_extDict_selectMLS : ZSTD_HcFindBestMatch_extDict_selectMLS;
1921
+ const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6);
1922
+ const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);
1226
1923
 
1227
1924
  U32 offset_1 = rep[0], offset_2 = rep[1];
1228
1925
 
1229
- DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic");
1926
+ DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic (searchFunc=%u)", (U32)searchMethod);
1927
+
1928
+ /* Reset the lazy skipping state */
1929
+ ms->lazySkipping = 0;
1230
1930
 
1231
1931
  /* init */
1232
1932
  ip += (ip == prefixStart);
1933
+ if (searchMethod == search_rowHash) {
1934
+ ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit);
1935
+ }
1233
1936
 
1234
1937
  /* Match Loop */
1235
1938
  #if defined(__GNUC__) && defined(__x86_64__)
@@ -1240,7 +1943,7 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
1240
1943
  #endif
1241
1944
  while (ip < ilimit) {
1242
1945
  size_t matchLength=0;
1243
- size_t offset=0;
1946
+ size_t offBase = REPCODE1_TO_OFFBASE;
1244
1947
  const BYTE* start=ip+1;
1245
1948
  U32 curr = (U32)(ip-base);
1246
1949
 
@@ -1249,7 +1952,8 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
1249
1952
  const U32 repIndex = (U32)(curr+1 - offset_1);
1250
1953
  const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
1251
1954
  const BYTE* const repMatch = repBase + repIndex;
1252
- if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
1955
+ if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow */
1956
+ & (offset_1 <= curr+1 - windowLow) ) /* note: we are searching at curr+1 */
1253
1957
  if (MEM_read32(ip+1) == MEM_read32(repMatch)) {
1254
1958
  /* repcode detected we should take it */
1255
1959
  const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
@@ -1258,14 +1962,23 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
1258
1962
  } }
1259
1963
 
1260
1964
  /* first search (depth 0) */
1261
- { size_t offsetFound = 999999999;
1262
- size_t const ml2 = searchMax(ms, ip, iend, &offsetFound);
1965
+ { size_t ofbCandidate = 999999999;
1966
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict);
1263
1967
  if (ml2 > matchLength)
1264
- matchLength = ml2, start = ip, offset=offsetFound;
1968
+ matchLength = ml2, start = ip, offBase = ofbCandidate;
1265
1969
  }
1266
1970
 
1267
- if (matchLength < 4) {
1268
- ip += ((ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */
1971
+ if (matchLength < 4) {
1972
+ size_t const step = ((size_t)(ip-anchor) >> kSearchStrength);
1973
+ ip += step + 1; /* jump faster over incompressible sections */
1974
+ /* Enter the lazy skipping mode once we are skipping more than 8 bytes at a time.
1975
+ * In this mode we stop inserting every position into our tables, and only insert
1976
+ * positions that we search, which is one in step positions.
1977
+ * The exact cutoff is flexible, I've just chosen a number that is reasonably high,
1978
+ * so we minimize the compression ratio loss in "normal" scenarios. This mode gets
1979
+ * triggered once we've gone 2KB without finding any matches.
1980
+ */
1981
+ ms->lazySkipping = step > kLazySkippingStep;
1269
1982
  continue;
1270
1983
  }
1271
1984
 
@@ -1275,29 +1988,30 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
1275
1988
  ip ++;
1276
1989
  curr++;
1277
1990
  /* check repCode */
1278
- if (offset) {
1991
+ if (offBase) {
1279
1992
  const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
1280
1993
  const U32 repIndex = (U32)(curr - offset_1);
1281
1994
  const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
1282
1995
  const BYTE* const repMatch = repBase + repIndex;
1283
- if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
1996
+ if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
1997
+ & (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
1284
1998
  if (MEM_read32(ip) == MEM_read32(repMatch)) {
1285
1999
  /* repcode detected */
1286
2000
  const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
1287
2001
  size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
1288
2002
  int const gain2 = (int)(repLength * 3);
1289
- int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offset+1) + 1);
2003
+ int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1);
1290
2004
  if ((repLength >= 4) && (gain2 > gain1))
1291
- matchLength = repLength, offset = 0, start = ip;
2005
+ matchLength = repLength, offBase = REPCODE1_TO_OFFBASE, start = ip;
1292
2006
  } }
1293
2007
 
1294
2008
  /* search match, depth 1 */
1295
- { size_t offset2=999999999;
1296
- size_t const ml2 = searchMax(ms, ip, iend, &offset2);
1297
- int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
1298
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 4);
2009
+ { size_t ofbCandidate = 999999999;
2010
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict);
2011
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */
2012
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 4);
1299
2013
  if ((ml2 >= 4) && (gain2 > gain1)) {
1300
- matchLength = ml2, offset = offset2, start = ip;
2014
+ matchLength = ml2, offBase = ofbCandidate, start = ip;
1301
2015
  continue; /* search a better one */
1302
2016
  } }
1303
2017
 
@@ -1306,49 +2020,57 @@ size_t ZSTD_compressBlock_lazy_extDict_generic(
1306
2020
  ip ++;
1307
2021
  curr++;
1308
2022
  /* check repCode */
1309
- if (offset) {
2023
+ if (offBase) {
1310
2024
  const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);
1311
2025
  const U32 repIndex = (U32)(curr - offset_1);
1312
2026
  const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
1313
2027
  const BYTE* const repMatch = repBase + repIndex;
1314
- if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
2028
+ if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
2029
+ & (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
1315
2030
  if (MEM_read32(ip) == MEM_read32(repMatch)) {
1316
2031
  /* repcode detected */
1317
2032
  const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
1318
2033
  size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
1319
2034
  int const gain2 = (int)(repLength * 4);
1320
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 1);
2035
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1);
1321
2036
  if ((repLength >= 4) && (gain2 > gain1))
1322
- matchLength = repLength, offset = 0, start = ip;
2037
+ matchLength = repLength, offBase = REPCODE1_TO_OFFBASE, start = ip;
1323
2038
  } }
1324
2039
 
1325
2040
  /* search match, depth 2 */
1326
- { size_t offset2=999999999;
1327
- size_t const ml2 = searchMax(ms, ip, iend, &offset2);
1328
- int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)offset2+1)); /* raw approx */
1329
- int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offset+1) + 7);
2041
+ { size_t ofbCandidate = 999999999;
2042
+ size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict);
2043
+ int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */
2044
+ int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 7);
1330
2045
  if ((ml2 >= 4) && (gain2 > gain1)) {
1331
- matchLength = ml2, offset = offset2, start = ip;
2046
+ matchLength = ml2, offBase = ofbCandidate, start = ip;
1332
2047
  continue;
1333
2048
  } } }
1334
2049
  break; /* nothing found : store previous solution */
1335
2050
  }
1336
2051
 
1337
2052
  /* catch up */
1338
- if (offset) {
1339
- U32 const matchIndex = (U32)((start-base) - (offset - ZSTD_REP_MOVE));
2053
+ if (OFFBASE_IS_OFFSET(offBase)) {
2054
+ U32 const matchIndex = (U32)((size_t)(start-base) - OFFBASE_TO_OFFSET(offBase));
1340
2055
  const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;
1341
2056
  const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;
1342
2057
  while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */
1343
- offset_2 = offset_1; offset_1 = (U32)(offset - ZSTD_REP_MOVE);
2058
+ offset_2 = offset_1; offset_1 = (U32)OFFBASE_TO_OFFSET(offBase);
1344
2059
  }
1345
2060
 
1346
2061
  /* store sequence */
1347
2062
  _storeSequence:
1348
- { size_t const litLength = start - anchor;
1349
- ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offset, matchLength-MINMATCH);
2063
+ { size_t const litLength = (size_t)(start - anchor);
2064
+ ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offBase, matchLength);
1350
2065
  anchor = ip = start + matchLength;
1351
2066
  }
2067
+ if (ms->lazySkipping) {
2068
+ /* We've found a match, disable lazy skipping mode, and refill the hash cache. */
2069
+ if (searchMethod == search_rowHash) {
2070
+ ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit);
2071
+ }
2072
+ ms->lazySkipping = 0;
2073
+ }
1352
2074
 
1353
2075
  /* check immediate repcode */
1354
2076
  while (ip <= ilimit) {
@@ -1357,13 +2079,14 @@ _storeSequence:
1357
2079
  const U32 repIndex = repCurrent - offset_2;
1358
2080
  const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;
1359
2081
  const BYTE* const repMatch = repBase + repIndex;
1360
- if (((U32)((dictLimit-1) - repIndex) >= 3) & (repIndex > windowLow)) /* intentional overflow */
2082
+ if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */
2083
+ & (offset_2 <= repCurrent - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */
1361
2084
  if (MEM_read32(ip) == MEM_read32(repMatch)) {
1362
2085
  /* repcode detected we should take it */
1363
2086
  const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;
1364
2087
  matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;
1365
- offset = offset_2; offset_2 = offset_1; offset_1 = (U32)offset; /* swap offset history */
1366
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, matchLength-MINMATCH);
2088
+ offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap offset history */
2089
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength);
1367
2090
  ip += matchLength;
1368
2091
  anchor = ip;
1369
2092
  continue; /* faster when present ... (?) */
@@ -1410,3 +2133,25 @@ size_t ZSTD_compressBlock_btlazy2_extDict(
1410
2133
  {
1411
2134
  return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);
1412
2135
  }
2136
+
2137
+ size_t ZSTD_compressBlock_greedy_extDict_row(
2138
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
2139
+ void const* src, size_t srcSize)
2140
+ {
2141
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0);
2142
+ }
2143
+
2144
+ size_t ZSTD_compressBlock_lazy_extDict_row(
2145
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
2146
+ void const* src, size_t srcSize)
2147
+
2148
+ {
2149
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1);
2150
+ }
2151
+
2152
+ size_t ZSTD_compressBlock_lazy2_extDict_row(
2153
+ ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
2154
+ void const* src, size_t srcSize)
2155
+ {
2156
+ return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2);
2157
+ }