zstdlib 0.14.0-x86-mingw32 → 0.15.0-x86-mingw32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGES.md +5 -0
  3. data/ext/zstdlib_c/extconf.rb +1 -1
  4. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/bits.h +92 -87
  5. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/bitstream.h +26 -29
  6. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/compiler.h +36 -22
  7. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/cpu.h +1 -1
  8. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/debug.h +0 -9
  9. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/error_private.c +1 -0
  10. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/error_private.h +0 -10
  11. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/fse.h +2 -17
  12. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/fse_decompress.c +2 -0
  13. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/huf.h +0 -9
  14. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/mem.h +7 -11
  15. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/pool.h +0 -9
  16. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/portability_macros.h +22 -9
  17. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/threading.h +0 -8
  18. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/xxhash.h +93 -19
  19. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/zstd_deps.h +12 -0
  20. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/zstd_internal.h +1 -69
  21. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/zstd_trace.h +5 -12
  22. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/hist.c +10 -0
  23. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/hist.h +7 -0
  24. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/zstd_compress.c +1057 -367
  25. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/zstd_compress_internal.h +227 -125
  26. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/zstd_compress_literals.c +1 -1
  27. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/zstd_compress_sequences.c +7 -7
  28. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/zstd_compress_sequences.h +7 -6
  29. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/zstd_compress_superblock.c +17 -17
  30. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/zstd_cwksp.h +41 -24
  31. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/zstd_double_fast.c +58 -50
  32. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/zstd_double_fast.h +4 -12
  33. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/zstd_fast.c +91 -74
  34. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/zstd_fast.h +4 -12
  35. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/zstd_lazy.c +64 -64
  36. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/zstd_lazy.h +30 -39
  37. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/zstd_ldm.c +48 -33
  38. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/zstd_ldm.h +6 -14
  39. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/zstd_opt.c +55 -51
  40. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/zstd_opt.h +8 -16
  41. data/ext/zstdlib_c/zstd-1.5.7/lib/compress/zstd_preSplit.c +238 -0
  42. data/ext/zstdlib_c/zstd-1.5.7/lib/compress/zstd_preSplit.h +33 -0
  43. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/zstdmt_compress.c +134 -93
  44. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/zstdmt_compress.h +4 -15
  45. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/decompress/huf_decompress_amd64.S +10 -3
  46. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/decompress/zstd_decompress.c +14 -11
  47. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/decompress/zstd_decompress_block.c +6 -12
  48. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/decompress/zstd_decompress_internal.h +5 -5
  49. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/zdict.h +15 -8
  50. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/zstd.h +241 -132
  51. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/zstd_errors.h +1 -8
  52. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/zlibWrapper/gzwrite.c +2 -1
  53. data/lib/2.4/zstdlib_c.so +0 -0
  54. data/lib/2.5/zstdlib_c.so +0 -0
  55. data/lib/2.6/zstdlib_c.so +0 -0
  56. data/lib/2.7/zstdlib_c.so +0 -0
  57. data/lib/3.0/zstdlib_c.so +0 -0
  58. data/lib/3.1/zstdlib_c.so +0 -0
  59. data/lib/3.2/zstdlib_c.so +0 -0
  60. data/lib/3.3/zstdlib_c.so +0 -0
  61. data/lib/3.4/zstdlib_c.so +0 -0
  62. metadata +75 -73
  63. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/allocations.h +0 -0
  64. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/debug.c +0 -0
  65. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/entropy_common.c +0 -0
  66. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/pool.c +0 -0
  67. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/threading.c +0 -0
  68. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/xxhash.c +0 -0
  69. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/common/zstd_common.c +0 -0
  70. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/clevels.h +0 -0
  71. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/fse_compress.c +0 -0
  72. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/huf_compress.c +0 -0
  73. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/zstd_compress_literals.h +0 -0
  74. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/zstd_compress_superblock.h +0 -0
  75. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/compress/zstd_ldm_geartab.h +0 -0
  76. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/decompress/huf_decompress.c +0 -0
  77. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/decompress/zstd_ddict.c +0 -0
  78. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/decompress/zstd_ddict.h +0 -0
  79. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/lib/decompress/zstd_decompress_block.h +0 -0
  80. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/zlibWrapper/gzclose.c +0 -0
  81. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/zlibWrapper/gzcompatibility.h +0 -0
  82. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/zlibWrapper/gzguts.h +0 -0
  83. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/zlibWrapper/gzlib.c +0 -0
  84. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/zlibWrapper/gzread.c +0 -0
  85. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/zlibWrapper/zstd_zlibwrapper.c +0 -0
  86. data/ext/zstdlib_c/{zstd-1.5.6 → zstd-1.5.7}/zlibWrapper/zstd_zlibwrapper.h +5 -5
@@ -153,13 +153,13 @@ size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
153
153
  return cost >> 8;
154
154
  }
155
155
 
156
- symbolEncodingType_e
156
+ SymbolEncodingType_e
157
157
  ZSTD_selectEncodingType(
158
158
  FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
159
159
  size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
160
160
  FSE_CTable const* prevCTable,
161
161
  short const* defaultNorm, U32 defaultNormLog,
162
- ZSTD_defaultPolicy_e const isDefaultAllowed,
162
+ ZSTD_DefaultPolicy_e const isDefaultAllowed,
163
163
  ZSTD_strategy const strategy)
164
164
  {
165
165
  ZSTD_STATIC_ASSERT(ZSTD_defaultDisallowed == 0 && ZSTD_defaultAllowed != 0);
@@ -241,7 +241,7 @@ typedef struct {
241
241
 
242
242
  size_t
243
243
  ZSTD_buildCTable(void* dst, size_t dstCapacity,
244
- FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
244
+ FSE_CTable* nextCTable, U32 FSELog, SymbolEncodingType_e type,
245
245
  unsigned* count, U32 max,
246
246
  const BYTE* codeTable, size_t nbSeq,
247
247
  const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
@@ -293,7 +293,7 @@ ZSTD_encodeSequences_body(
293
293
  FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
294
294
  FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
295
295
  FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
296
- seqDef const* sequences, size_t nbSeq, int longOffsets)
296
+ SeqDef const* sequences, size_t nbSeq, int longOffsets)
297
297
  {
298
298
  BIT_CStream_t blockStream;
299
299
  FSE_CState_t stateMatchLength;
@@ -387,7 +387,7 @@ ZSTD_encodeSequences_default(
387
387
  FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
388
388
  FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
389
389
  FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
390
- seqDef const* sequences, size_t nbSeq, int longOffsets)
390
+ SeqDef const* sequences, size_t nbSeq, int longOffsets)
391
391
  {
392
392
  return ZSTD_encodeSequences_body(dst, dstCapacity,
393
393
  CTable_MatchLength, mlCodeTable,
@@ -405,7 +405,7 @@ ZSTD_encodeSequences_bmi2(
405
405
  FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
406
406
  FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
407
407
  FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
408
- seqDef const* sequences, size_t nbSeq, int longOffsets)
408
+ SeqDef const* sequences, size_t nbSeq, int longOffsets)
409
409
  {
410
410
  return ZSTD_encodeSequences_body(dst, dstCapacity,
411
411
  CTable_MatchLength, mlCodeTable,
@@ -421,7 +421,7 @@ size_t ZSTD_encodeSequences(
421
421
  FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
422
422
  FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
423
423
  FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
424
- seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
424
+ SeqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2)
425
425
  {
426
426
  DEBUGLOG(5, "ZSTD_encodeSequences: dstCapacity = %u", (unsigned)dstCapacity);
427
427
  #if DYNAMIC_BMI2
@@ -11,26 +11,27 @@
11
11
  #ifndef ZSTD_COMPRESS_SEQUENCES_H
12
12
  #define ZSTD_COMPRESS_SEQUENCES_H
13
13
 
14
+ #include "zstd_compress_internal.h" /* SeqDef */
14
15
  #include "../common/fse.h" /* FSE_repeat, FSE_CTable */
15
- #include "../common/zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */
16
+ #include "../common/zstd_internal.h" /* SymbolEncodingType_e, ZSTD_strategy */
16
17
 
17
18
  typedef enum {
18
19
  ZSTD_defaultDisallowed = 0,
19
20
  ZSTD_defaultAllowed = 1
20
- } ZSTD_defaultPolicy_e;
21
+ } ZSTD_DefaultPolicy_e;
21
22
 
22
- symbolEncodingType_e
23
+ SymbolEncodingType_e
23
24
  ZSTD_selectEncodingType(
24
25
  FSE_repeat* repeatMode, unsigned const* count, unsigned const max,
25
26
  size_t const mostFrequent, size_t nbSeq, unsigned const FSELog,
26
27
  FSE_CTable const* prevCTable,
27
28
  short const* defaultNorm, U32 defaultNormLog,
28
- ZSTD_defaultPolicy_e const isDefaultAllowed,
29
+ ZSTD_DefaultPolicy_e const isDefaultAllowed,
29
30
  ZSTD_strategy const strategy);
30
31
 
31
32
  size_t
32
33
  ZSTD_buildCTable(void* dst, size_t dstCapacity,
33
- FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
34
+ FSE_CTable* nextCTable, U32 FSELog, SymbolEncodingType_e type,
34
35
  unsigned* count, U32 max,
35
36
  const BYTE* codeTable, size_t nbSeq,
36
37
  const S16* defaultNorm, U32 defaultNormLog, U32 defaultMax,
@@ -42,7 +43,7 @@ size_t ZSTD_encodeSequences(
42
43
  FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
43
44
  FSE_CTable const* CTable_OffsetBits, BYTE const* ofCodeTable,
44
45
  FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
45
- seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2);
46
+ SeqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2);
46
47
 
47
48
  size_t ZSTD_fseBitCost(
48
49
  FSE_CTable const* ctable,
@@ -51,7 +51,7 @@ ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
51
51
  BYTE* const oend = ostart + dstSize;
52
52
  BYTE* op = ostart + lhSize;
53
53
  U32 const singleStream = lhSize == 3;
54
- symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
54
+ SymbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
55
55
  size_t cLitSize = 0;
56
56
 
57
57
  DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy);
@@ -126,15 +126,15 @@ ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
126
126
  }
127
127
 
128
128
  static size_t
129
- ZSTD_seqDecompressedSize(seqStore_t const* seqStore,
130
- const seqDef* sequences, size_t nbSeqs,
129
+ ZSTD_seqDecompressedSize(SeqStore_t const* seqStore,
130
+ const SeqDef* sequences, size_t nbSeqs,
131
131
  size_t litSize, int lastSubBlock)
132
132
  {
133
133
  size_t matchLengthSum = 0;
134
134
  size_t litLengthSum = 0;
135
135
  size_t n;
136
136
  for (n=0; n<nbSeqs; n++) {
137
- const ZSTD_sequenceLength seqLen = ZSTD_getSequenceLength(seqStore, sequences+n);
137
+ const ZSTD_SequenceLength seqLen = ZSTD_getSequenceLength(seqStore, sequences+n);
138
138
  litLengthSum += seqLen.litLength;
139
139
  matchLengthSum += seqLen.matchLength;
140
140
  }
@@ -162,7 +162,7 @@ ZSTD_seqDecompressedSize(seqStore_t const* seqStore,
162
162
  static size_t
163
163
  ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,
164
164
  const ZSTD_fseCTablesMetadata_t* fseMetadata,
165
- const seqDef* sequences, size_t nbSeq,
165
+ const SeqDef* sequences, size_t nbSeq,
166
166
  const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
167
167
  const ZSTD_CCtx_params* cctxParams,
168
168
  void* dst, size_t dstCapacity,
@@ -262,7 +262,7 @@ ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,
262
262
  * Or 0 if it failed to compress. */
263
263
  static size_t ZSTD_compressSubBlock(const ZSTD_entropyCTables_t* entropy,
264
264
  const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
265
- const seqDef* sequences, size_t nbSeq,
265
+ const SeqDef* sequences, size_t nbSeq,
266
266
  const BYTE* literals, size_t litSize,
267
267
  const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
268
268
  const ZSTD_CCtx_params* cctxParams,
@@ -327,7 +327,7 @@ static size_t ZSTD_estimateSubBlockSize_literal(const BYTE* literals, size_t lit
327
327
  return 0;
328
328
  }
329
329
 
330
- static size_t ZSTD_estimateSubBlockSize_symbolType(symbolEncodingType_e type,
330
+ static size_t ZSTD_estimateSubBlockSize_symbolType(SymbolEncodingType_e type,
331
331
  const BYTE* codeTable, unsigned maxCode,
332
332
  size_t nbSeq, const FSE_CTable* fseCTable,
333
333
  const U8* additionalBits,
@@ -426,7 +426,7 @@ static int ZSTD_needSequenceEntropyTables(ZSTD_fseCTablesMetadata_t const* fseMe
426
426
  return 0;
427
427
  }
428
428
 
429
- static size_t countLiterals(seqStore_t const* seqStore, const seqDef* sp, size_t seqCount)
429
+ static size_t countLiterals(SeqStore_t const* seqStore, const SeqDef* sp, size_t seqCount)
430
430
  {
431
431
  size_t n, total = 0;
432
432
  assert(sp != NULL);
@@ -439,7 +439,7 @@ static size_t countLiterals(seqStore_t const* seqStore, const seqDef* sp, size_t
439
439
 
440
440
  #define BYTESCALE 256
441
441
 
442
- static size_t sizeBlockSequences(const seqDef* sp, size_t nbSeqs,
442
+ static size_t sizeBlockSequences(const SeqDef* sp, size_t nbSeqs,
443
443
  size_t targetBudget, size_t avgLitCost, size_t avgSeqCost,
444
444
  int firstSubBlock)
445
445
  {
@@ -476,7 +476,7 @@ static size_t sizeBlockSequences(const seqDef* sp, size_t nbSeqs,
476
476
  * Sub-blocks are all compressed, except the last one when beneficial.
477
477
  * @return : compressed size of the super block (which features multiple ZSTD blocks)
478
478
  * or 0 if it failed to compress. */
479
- static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
479
+ static size_t ZSTD_compressSubBlock_multi(const SeqStore_t* seqStorePtr,
480
480
  const ZSTD_compressedBlockState_t* prevCBlock,
481
481
  ZSTD_compressedBlockState_t* nextCBlock,
482
482
  const ZSTD_entropyCTablesMetadata_t* entropyMetadata,
@@ -486,9 +486,9 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
486
486
  const int bmi2, U32 lastBlock,
487
487
  void* workspace, size_t wkspSize)
488
488
  {
489
- const seqDef* const sstart = seqStorePtr->sequencesStart;
490
- const seqDef* const send = seqStorePtr->sequences;
491
- const seqDef* sp = sstart; /* tracks progresses within seqStorePtr->sequences */
489
+ const SeqDef* const sstart = seqStorePtr->sequencesStart;
490
+ const SeqDef* const send = seqStorePtr->sequences;
491
+ const SeqDef* sp = sstart; /* tracks progresses within seqStorePtr->sequences */
492
492
  size_t const nbSeqs = (size_t)(send - sstart);
493
493
  const BYTE* const lstart = seqStorePtr->litStart;
494
494
  const BYTE* const lend = seqStorePtr->lit;
@@ -647,8 +647,8 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
647
647
  op += cSize;
648
648
  /* We have to regenerate the repcodes because we've skipped some sequences */
649
649
  if (sp < send) {
650
- const seqDef* seq;
651
- repcodes_t rep;
650
+ const SeqDef* seq;
651
+ Repcodes_t rep;
652
652
  ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep));
653
653
  for (seq = sstart; seq < sp; ++seq) {
654
654
  ZSTD_updateRep(rep.rep, seq->offBase, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
@@ -674,7 +674,7 @@ size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
674
674
  &zc->blockState.nextCBlock->entropy,
675
675
  &zc->appliedParams,
676
676
  &entropyMetadata,
677
- zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */), "");
677
+ zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */), "");
678
678
 
679
679
  return ZSTD_compressSubBlock_multi(&zc->seqStore,
680
680
  zc->blockState.prevCBlock,
@@ -684,5 +684,5 @@ size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
684
684
  dst, dstCapacity,
685
685
  src, srcSize,
686
686
  zc->bmi2, lastBlock,
687
- zc->entropyWorkspace, ENTROPY_WORKSPACE_SIZE /* statically allocated in resetCCtx */);
687
+ zc->tmpWorkspace, zc->tmpWkspSize /* statically allocated in resetCCtx */);
688
688
  }
@@ -17,10 +17,7 @@
17
17
  #include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */
18
18
  #include "../common/zstd_internal.h"
19
19
  #include "../common/portability_macros.h"
20
-
21
- #if defined (__cplusplus)
22
- extern "C" {
23
- #endif
20
+ #include "../common/compiler.h" /* ZS2_isPower2 */
24
21
 
25
22
  /*-*************************************
26
23
  * Constants
@@ -206,9 +203,9 @@ MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
206
203
  /**
207
204
  * Align must be a power of 2.
208
205
  */
209
- MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
206
+ MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t align) {
210
207
  size_t const mask = align - 1;
211
- assert((align & mask) == 0);
208
+ assert(ZSTD_isPower2(align));
212
209
  return (size + mask) & ~mask;
213
210
  }
214
211
 
@@ -222,7 +219,7 @@ MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
222
219
  * to figure out how much space you need for the matchState tables. Everything
223
220
  * else is though.
224
221
  *
225
- * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
222
+ * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned64_alloc_size().
226
223
  */
227
224
  MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
228
225
  if (size == 0)
@@ -234,12 +231,16 @@ MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
234
231
  #endif
235
232
  }
236
233
 
234
+ MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size, size_t alignment) {
235
+ return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, alignment));
236
+ }
237
+
237
238
  /**
238
239
  * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
239
240
  * Used to determine the number of bytes required for a given "aligned".
240
241
  */
241
- MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
242
- return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));
242
+ MEM_STATIC size_t ZSTD_cwksp_aligned64_alloc_size(size_t size) {
243
+ return ZSTD_cwksp_aligned_alloc_size(size, ZSTD_CWKSP_ALIGNMENT_BYTES);
243
244
  }
244
245
 
245
246
  /**
@@ -262,7 +263,7 @@ MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
262
263
  MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
263
264
  size_t const alignBytesMask = alignBytes - 1;
264
265
  size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
265
- assert((alignBytes & alignBytesMask) == 0);
266
+ assert(ZSTD_isPower2(alignBytes));
266
267
  assert(bytes < alignBytes);
267
268
  return bytes;
268
269
  }
@@ -271,8 +272,12 @@ MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignByt
271
272
  * Returns the initial value for allocStart which is used to determine the position from
272
273
  * which we can allocate from the end of the workspace.
273
274
  */
274
- MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws) {
275
- return (void*)((size_t)ws->workspaceEnd & ~(ZSTD_CWKSP_ALIGNMENT_BYTES-1));
275
+ MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws)
276
+ {
277
+ char* endPtr = (char*)ws->workspaceEnd;
278
+ assert(ZSTD_isPower2(ZSTD_CWKSP_ALIGNMENT_BYTES));
279
+ endPtr = endPtr - ((size_t)endPtr % ZSTD_CWKSP_ALIGNMENT_BYTES);
280
+ return (void*)endPtr;
276
281
  }
277
282
 
278
283
  /**
@@ -287,7 +292,7 @@ ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
287
292
  {
288
293
  void* const alloc = (BYTE*)ws->allocStart - bytes;
289
294
  void* const bottom = ws->tableEnd;
290
- DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
295
+ DEBUGLOG(5, "cwksp: reserving [0x%p]:%zd bytes; %zd bytes remaining",
291
296
  alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
292
297
  ZSTD_cwksp_assert_internal_consistency(ws);
293
298
  assert(alloc >= bottom);
@@ -404,7 +409,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t byt
404
409
  {
405
410
  size_t const alignedBytes = ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES);
406
411
  void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_aligned_init_once);
407
- assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
412
+ assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
408
413
  if(ptr && ptr < ws->initOnceStart) {
409
414
  /* We assume the memory following the current allocation is either:
410
415
  * 1. Not usable as initOnce memory (end of workspace)
@@ -424,11 +429,12 @@ MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t byt
424
429
  /**
425
430
  * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
426
431
  */
427
- MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
432
+ MEM_STATIC void* ZSTD_cwksp_reserve_aligned64(ZSTD_cwksp* ws, size_t bytes)
428
433
  {
429
- void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
430
- ZSTD_cwksp_alloc_aligned);
431
- assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
434
+ void* const ptr = ZSTD_cwksp_reserve_internal(ws,
435
+ ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
436
+ ZSTD_cwksp_alloc_aligned);
437
+ assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
432
438
  return ptr;
433
439
  }
434
440
 
@@ -474,7 +480,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
474
480
  #endif
475
481
 
476
482
  assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
477
- assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
483
+ assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
478
484
  return alloc;
479
485
  }
480
486
 
@@ -520,6 +526,20 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
520
526
 
521
527
  return alloc;
522
528
  }
529
+ /**
530
+ * with alignment control
531
+ * Note : should happen only once, at workspace first initialization
532
+ */
533
+ MEM_STATIC void* ZSTD_cwksp_reserve_object_aligned(ZSTD_cwksp* ws, size_t byteSize, size_t alignment)
534
+ {
535
+ size_t const mask = alignment - 1;
536
+ size_t const surplus = (alignment > sizeof(void*)) ? alignment - sizeof(void*) : 0;
537
+ void* const start = ZSTD_cwksp_reserve_object(ws, byteSize + surplus);
538
+ if (start == NULL) return NULL;
539
+ if (surplus == 0) return start;
540
+ assert(ZSTD_isPower2(alignment));
541
+ return (void*)(((size_t)start + surplus) & ~mask);
542
+ }
523
543
 
524
544
  MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
525
545
  {
@@ -577,7 +597,8 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
577
597
  * Invalidates table allocations.
578
598
  * All other allocations remain valid.
579
599
  */
580
- MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
600
+ MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws)
601
+ {
581
602
  DEBUGLOG(4, "cwksp: clearing tables!");
582
603
 
583
604
  #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
@@ -741,8 +762,4 @@ MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
741
762
  }
742
763
  }
743
764
 
744
- #if defined (__cplusplus)
745
- }
746
- #endif
747
-
748
765
  #endif /* ZSTD_CWKSP_H */
@@ -15,7 +15,7 @@
15
15
 
16
16
  static
17
17
  ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
18
- void ZSTD_fillDoubleHashTableForCDict(ZSTD_matchState_t* ms,
18
+ void ZSTD_fillDoubleHashTableForCDict(ZSTD_MatchState_t* ms,
19
19
  void const* end, ZSTD_dictTableLoadMethod_e dtlm)
20
20
  {
21
21
  const ZSTD_compressionParameters* const cParams = &ms->cParams;
@@ -53,7 +53,7 @@ void ZSTD_fillDoubleHashTableForCDict(ZSTD_matchState_t* ms,
53
53
 
54
54
  static
55
55
  ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
56
- void ZSTD_fillDoubleHashTableForCCtx(ZSTD_matchState_t* ms,
56
+ void ZSTD_fillDoubleHashTableForCCtx(ZSTD_MatchState_t* ms,
57
57
  void const* end, ZSTD_dictTableLoadMethod_e dtlm)
58
58
  {
59
59
  const ZSTD_compressionParameters* const cParams = &ms->cParams;
@@ -87,7 +87,7 @@ void ZSTD_fillDoubleHashTableForCCtx(ZSTD_matchState_t* ms,
87
87
  } }
88
88
  }
89
89
 
90
- void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
90
+ void ZSTD_fillDoubleHashTable(ZSTD_MatchState_t* ms,
91
91
  const void* const end,
92
92
  ZSTD_dictTableLoadMethod_e dtlm,
93
93
  ZSTD_tableFillPurpose_e tfp)
@@ -103,7 +103,7 @@ void ZSTD_fillDoubleHashTable(ZSTD_matchState_t* ms,
103
103
  FORCE_INLINE_TEMPLATE
104
104
  ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
105
105
  size_t ZSTD_compressBlock_doubleFast_noDict_generic(
106
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
106
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
107
107
  void const* src, size_t srcSize, U32 const mls /* template */)
108
108
  {
109
109
  ZSTD_compressionParameters const* cParams = &ms->cParams;
@@ -142,9 +142,14 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
142
142
  const BYTE* matchl0; /* the long match for ip */
143
143
  const BYTE* matchs0; /* the short match for ip */
144
144
  const BYTE* matchl1; /* the long match for ip1 */
145
+ const BYTE* matchs0_safe; /* matchs0 or safe address */
145
146
 
146
147
  const BYTE* ip = istart; /* the current position */
147
148
  const BYTE* ip1; /* the next position */
149
+ /* Array of ~random data, should have low probability of matching data
150
+ * we load from here instead of from tables, if matchl0/matchl1 are
151
+ * invalid indices. Used to avoid unpredictable branches. */
152
+ const BYTE dummy[] = {0x12,0x34,0x56,0x78,0x9a,0xbc,0xde,0xf0,0xe2,0xb4};
148
153
 
149
154
  DEBUGLOG(5, "ZSTD_compressBlock_doubleFast_noDict_generic");
150
155
 
@@ -191,24 +196,29 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
191
196
 
192
197
  hl1 = ZSTD_hashPtr(ip1, hBitsL, 8);
193
198
 
194
- if (idxl0 > prefixLowestIndex) {
199
+ /* idxl0 > prefixLowestIndex is a (somewhat) unpredictable branch.
200
+ * However expression below complies into conditional move. Since
201
+ * match is unlikely and we only *branch* on idxl0 > prefixLowestIndex
202
+ * if there is a match, all branches become predictable. */
203
+ { const BYTE* const matchl0_safe = ZSTD_selectAddr(idxl0, prefixLowestIndex, matchl0, &dummy[0]);
204
+
195
205
  /* check prefix long match */
196
- if (MEM_read64(matchl0) == MEM_read64(ip)) {
206
+ if (MEM_read64(matchl0_safe) == MEM_read64(ip) && matchl0_safe == matchl0) {
197
207
  mLength = ZSTD_count(ip+8, matchl0+8, iend) + 8;
198
208
  offset = (U32)(ip-matchl0);
199
209
  while (((ip>anchor) & (matchl0>prefixLowest)) && (ip[-1] == matchl0[-1])) { ip--; matchl0--; mLength++; } /* catch up */
200
210
  goto _match_found;
201
- }
202
- }
211
+ } }
203
212
 
204
213
  idxl1 = hashLong[hl1];
205
214
  matchl1 = base + idxl1;
206
215
 
207
- if (idxs0 > prefixLowestIndex) {
208
- /* check prefix short match */
209
- if (MEM_read32(matchs0) == MEM_read32(ip)) {
210
- goto _search_next_long;
211
- }
216
+ /* Same optimization as matchl0 above */
217
+ matchs0_safe = ZSTD_selectAddr(idxs0, prefixLowestIndex, matchs0, &dummy[0]);
218
+
219
+ /* check prefix short match */
220
+ if(MEM_read32(matchs0_safe) == MEM_read32(ip) && matchs0_safe == matchs0) {
221
+ goto _search_next_long;
212
222
  }
213
223
 
214
224
  if (ip1 >= nextStep) {
@@ -242,21 +252,23 @@ _cleanup:
242
252
 
243
253
  _search_next_long:
244
254
 
245
- /* check prefix long +1 match */
246
- if (idxl1 > prefixLowestIndex) {
247
- if (MEM_read64(matchl1) == MEM_read64(ip1)) {
255
+ /* short match found: let's check for a longer one */
256
+ mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4;
257
+ offset = (U32)(ip - matchs0);
258
+
259
+ /* check long match at +1 position */
260
+ if ((idxl1 > prefixLowestIndex) && (MEM_read64(matchl1) == MEM_read64(ip1))) {
261
+ size_t const l1len = ZSTD_count(ip1+8, matchl1+8, iend) + 8;
262
+ if (l1len > mLength) {
263
+ /* use the long match instead */
248
264
  ip = ip1;
249
- mLength = ZSTD_count(ip+8, matchl1+8, iend) + 8;
265
+ mLength = l1len;
250
266
  offset = (U32)(ip-matchl1);
251
- while (((ip>anchor) & (matchl1>prefixLowest)) && (ip[-1] == matchl1[-1])) { ip--; matchl1--; mLength++; } /* catch up */
252
- goto _match_found;
267
+ matchs0 = matchl1;
253
268
  }
254
269
  }
255
270
 
256
- /* if no long +1 match, explore the short match we found */
257
- mLength = ZSTD_count(ip+4, matchs0+4, iend) + 4;
258
- offset = (U32)(ip - matchs0);
259
- while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* catch up */
271
+ while (((ip>anchor) & (matchs0>prefixLowest)) && (ip[-1] == matchs0[-1])) { ip--; matchs0--; mLength++; } /* complete backward */
260
272
 
261
273
  /* fall-through */
262
274
 
@@ -314,7 +326,7 @@ _match_stored:
314
326
  FORCE_INLINE_TEMPLATE
315
327
  ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
316
328
  size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
317
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
329
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
318
330
  void const* src, size_t srcSize,
319
331
  U32 const mls /* template */)
320
332
  {
@@ -335,7 +347,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
335
347
  const BYTE* const ilimit = iend - HASH_READ_SIZE;
336
348
  U32 offset_1=rep[0], offset_2=rep[1];
337
349
 
338
- const ZSTD_matchState_t* const dms = ms->dictMatchState;
350
+ const ZSTD_MatchState_t* const dms = ms->dictMatchState;
339
351
  const ZSTD_compressionParameters* const dictCParams = &dms->cParams;
340
352
  const U32* const dictHashLong = dms->hashTable;
341
353
  const U32* const dictHashSmall = dms->chainTable;
@@ -392,7 +404,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
392
404
  hashLong[h2] = hashSmall[h] = curr; /* update hash tables */
393
405
 
394
406
  /* check repcode */
395
- if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)
407
+ if ((ZSTD_index_overlap_check(prefixLowestIndex, repIndex))
396
408
  && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
397
409
  const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
398
410
  mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
@@ -401,14 +413,12 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
401
413
  goto _match_stored;
402
414
  }
403
415
 
404
- if (matchIndexL > prefixLowestIndex) {
416
+ if ((matchIndexL >= prefixLowestIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
405
417
  /* check prefix long match */
406
- if (MEM_read64(matchLong) == MEM_read64(ip)) {
407
- mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
408
- offset = (U32)(ip-matchLong);
409
- while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
410
- goto _match_found;
411
- }
418
+ mLength = ZSTD_count(ip+8, matchLong+8, iend) + 8;
419
+ offset = (U32)(ip-matchLong);
420
+ while (((ip>anchor) & (matchLong>prefixLowest)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
421
+ goto _match_found;
412
422
  } else if (dictTagsMatchL) {
413
423
  /* check dictMatchState long match */
414
424
  U32 const dictMatchIndexL = dictMatchIndexAndTagL >> ZSTD_SHORT_CACHE_TAG_BITS;
@@ -423,7 +433,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
423
433
  } }
424
434
 
425
435
  if (matchIndexS > prefixLowestIndex) {
426
- /* check prefix short match */
436
+ /* short match candidate */
427
437
  if (MEM_read32(match) == MEM_read32(ip)) {
428
438
  goto _search_next_long;
429
439
  }
@@ -453,14 +463,12 @@ _search_next_long:
453
463
  hashLong[hl3] = curr + 1;
454
464
 
455
465
  /* check prefix long +1 match */
456
- if (matchIndexL3 > prefixLowestIndex) {
457
- if (MEM_read64(matchL3) == MEM_read64(ip+1)) {
458
- mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
459
- ip++;
460
- offset = (U32)(ip-matchL3);
461
- while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
462
- goto _match_found;
463
- }
466
+ if ((matchIndexL3 >= prefixLowestIndex) && (MEM_read64(matchL3) == MEM_read64(ip+1))) {
467
+ mLength = ZSTD_count(ip+9, matchL3+8, iend) + 8;
468
+ ip++;
469
+ offset = (U32)(ip-matchL3);
470
+ while (((ip>anchor) & (matchL3>prefixLowest)) && (ip[-1] == matchL3[-1])) { ip--; matchL3--; mLength++; } /* catch up */
471
+ goto _match_found;
464
472
  } else if (dictTagsMatchL3) {
465
473
  /* check dict long +1 match */
466
474
  U32 const dictMatchIndexL3 = dictMatchIndexAndTagL3 >> ZSTD_SHORT_CACHE_TAG_BITS;
@@ -513,7 +521,7 @@ _match_stored:
513
521
  const BYTE* repMatch2 = repIndex2 < prefixLowestIndex ?
514
522
  dictBase + repIndex2 - dictIndexDelta :
515
523
  base + repIndex2;
516
- if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
524
+ if ( (ZSTD_index_overlap_check(prefixLowestIndex, repIndex2))
517
525
  && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
518
526
  const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
519
527
  size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
@@ -540,7 +548,7 @@ _match_stored:
540
548
 
541
549
  #define ZSTD_GEN_DFAST_FN(dictMode, mls) \
542
550
  static size_t ZSTD_compressBlock_doubleFast_##dictMode##_##mls( \
543
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
551
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \
544
552
  void const* src, size_t srcSize) \
545
553
  { \
546
554
  return ZSTD_compressBlock_doubleFast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls); \
@@ -558,7 +566,7 @@ ZSTD_GEN_DFAST_FN(dictMatchState, 7)
558
566
 
559
567
 
560
568
  size_t ZSTD_compressBlock_doubleFast(
561
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
569
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
562
570
  void const* src, size_t srcSize)
563
571
  {
564
572
  const U32 mls = ms->cParams.minMatch;
@@ -578,7 +586,7 @@ size_t ZSTD_compressBlock_doubleFast(
578
586
 
579
587
 
580
588
  size_t ZSTD_compressBlock_doubleFast_dictMatchState(
581
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
589
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
582
590
  void const* src, size_t srcSize)
583
591
  {
584
592
  const U32 mls = ms->cParams.minMatch;
@@ -600,7 +608,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState(
600
608
  static
601
609
  ZSTD_ALLOW_POINTER_OVERFLOW_ATTR
602
610
  size_t ZSTD_compressBlock_doubleFast_extDict_generic(
603
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
611
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
604
612
  void const* src, size_t srcSize,
605
613
  U32 const mls /* template */)
606
614
  {
@@ -651,7 +659,7 @@ size_t ZSTD_compressBlock_doubleFast_extDict_generic(
651
659
  size_t mLength;
652
660
  hashSmall[hSmall] = hashLong[hLong] = curr; /* update hash table */
653
661
 
654
- if ((((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex doesn't overlap dict + prefix */
662
+ if (((ZSTD_index_overlap_check(prefixStartIndex, repIndex))
655
663
  & (offset_1 <= curr+1 - dictStartIndex)) /* note: we are searching at curr+1 */
656
664
  && (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
657
665
  const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
@@ -719,7 +727,7 @@ size_t ZSTD_compressBlock_doubleFast_extDict_generic(
719
727
  U32 const current2 = (U32)(ip-base);
720
728
  U32 const repIndex2 = current2 - offset_2;
721
729
  const BYTE* repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
722
- if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) /* intentional overflow : ensure repIndex2 doesn't overlap dict + prefix */
730
+ if ( ((ZSTD_index_overlap_check(prefixStartIndex, repIndex2))
723
731
  & (offset_2 <= current2 - dictStartIndex))
724
732
  && (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
725
733
  const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
@@ -749,7 +757,7 @@ ZSTD_GEN_DFAST_FN(extDict, 6)
749
757
  ZSTD_GEN_DFAST_FN(extDict, 7)
750
758
 
751
759
  size_t ZSTD_compressBlock_doubleFast_extDict(
752
- ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
760
+ ZSTD_MatchState_t* ms, SeqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
753
761
  void const* src, size_t srcSize)
754
762
  {
755
763
  U32 const mls = ms->cParams.minMatch;