zstd-ruby 1.5.1.0 → 1.5.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +2 -0
  3. data/README.md +56 -3
  4. data/Rakefile +8 -2
  5. data/ext/zstdruby/{zstdruby.h → common.h} +2 -0
  6. data/ext/zstdruby/extconf.rb +1 -1
  7. data/ext/zstdruby/libzstd/common/pool.c +11 -6
  8. data/ext/zstdruby/libzstd/common/pool.h +2 -2
  9. data/ext/zstdruby/libzstd/common/portability_macros.h +6 -0
  10. data/ext/zstdruby/libzstd/common/zstd_internal.h +3 -4
  11. data/ext/zstdruby/libzstd/compress/zstd_compress.c +114 -96
  12. data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +72 -39
  13. data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.c +10 -10
  14. data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +1 -1
  15. data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +38 -24
  16. data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +10 -10
  17. data/ext/zstdruby/libzstd/compress/zstd_fast.c +11 -11
  18. data/ext/zstdruby/libzstd/compress/zstd_lazy.c +66 -62
  19. data/ext/zstdruby/libzstd/compress/zstd_ldm.c +5 -3
  20. data/ext/zstdruby/libzstd/compress/zstd_opt.c +66 -43
  21. data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +17 -9
  22. data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +4 -1
  23. data/ext/zstdruby/libzstd/decompress/huf_decompress.c +2 -2
  24. data/ext/zstdruby/libzstd/decompress/huf_decompress_amd64.S +17 -3
  25. data/ext/zstdruby/libzstd/dictBuilder/zdict.c +2 -2
  26. data/ext/zstdruby/libzstd/zstd.h +1 -1
  27. data/ext/zstdruby/main.c +14 -0
  28. data/ext/zstdruby/streaming_compress.c +185 -0
  29. data/ext/zstdruby/streaming_compress.h +5 -0
  30. data/ext/zstdruby/streaming_decompress.c +125 -0
  31. data/ext/zstdruby/zstdruby.c +4 -6
  32. data/lib/zstd-ruby/version.rb +1 -1
  33. data/zstd-ruby.gemspec +1 -1
  34. metadata +11 -40
  35. data/.github/dependabot.yml +0 -8
  36. data/.github/workflows/ruby.yml +0 -35
  37. data/ext/zstdruby/libzstd/.gitignore +0 -3
  38. data/ext/zstdruby/libzstd/BUCK +0 -232
  39. data/ext/zstdruby/libzstd/Makefile +0 -357
  40. data/ext/zstdruby/libzstd/README.md +0 -217
  41. data/ext/zstdruby/libzstd/deprecated/zbuff.h +0 -214
  42. data/ext/zstdruby/libzstd/deprecated/zbuff_common.c +0 -26
  43. data/ext/zstdruby/libzstd/deprecated/zbuff_compress.c +0 -167
  44. data/ext/zstdruby/libzstd/deprecated/zbuff_decompress.c +0 -75
  45. data/ext/zstdruby/libzstd/dll/example/Makefile +0 -48
  46. data/ext/zstdruby/libzstd/dll/example/README.md +0 -63
  47. data/ext/zstdruby/libzstd/dll/example/build_package.bat +0 -20
  48. data/ext/zstdruby/libzstd/dll/example/fullbench-dll.sln +0 -25
  49. data/ext/zstdruby/libzstd/dll/example/fullbench-dll.vcxproj +0 -181
  50. data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +0 -415
  51. data/ext/zstdruby/libzstd/legacy/zstd_v01.c +0 -2158
  52. data/ext/zstdruby/libzstd/legacy/zstd_v01.h +0 -94
  53. data/ext/zstdruby/libzstd/legacy/zstd_v02.c +0 -3518
  54. data/ext/zstdruby/libzstd/legacy/zstd_v02.h +0 -93
  55. data/ext/zstdruby/libzstd/legacy/zstd_v03.c +0 -3160
  56. data/ext/zstdruby/libzstd/legacy/zstd_v03.h +0 -93
  57. data/ext/zstdruby/libzstd/legacy/zstd_v04.c +0 -3647
  58. data/ext/zstdruby/libzstd/legacy/zstd_v04.h +0 -142
  59. data/ext/zstdruby/libzstd/legacy/zstd_v05.c +0 -4050
  60. data/ext/zstdruby/libzstd/legacy/zstd_v05.h +0 -162
  61. data/ext/zstdruby/libzstd/legacy/zstd_v06.c +0 -4154
  62. data/ext/zstdruby/libzstd/legacy/zstd_v06.h +0 -172
  63. data/ext/zstdruby/libzstd/legacy/zstd_v07.c +0 -4541
  64. data/ext/zstdruby/libzstd/legacy/zstd_v07.h +0 -187
  65. data/ext/zstdruby/libzstd/libzstd.mk +0 -185
  66. data/ext/zstdruby/libzstd/libzstd.pc.in +0 -16
  67. data/ext/zstdruby/libzstd/modulemap/module.modulemap +0 -4
@@ -539,7 +539,7 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
539
539
  repcodes_t rep;
540
540
  ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep));
541
541
  for (seq = sstart; seq < sp; ++seq) {
542
- rep = ZSTD_updateRep(rep.rep, seq->offset - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
542
+ ZSTD_updateRep(rep.rep, seq->offBase - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
543
543
  }
544
544
  ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep));
545
545
  }
@@ -243,12 +243,14 @@ MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignByt
243
243
 
244
244
  /**
245
245
  * Internal function. Do not use directly.
246
- * Reserves the given number of bytes within the aligned/buffer segment of the wksp, which
247
- * counts from the end of the wksp. (as opposed to the object/table segment)
246
+ * Reserves the given number of bytes within the aligned/buffer segment of the wksp,
247
+ * which counts from the end of the wksp (as opposed to the object/table segment).
248
248
  *
249
249
  * Returns a pointer to the beginning of that space.
250
250
  */
251
- MEM_STATIC void* ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes) {
251
+ MEM_STATIC void*
252
+ ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
253
+ {
252
254
  void* const alloc = (BYTE*)ws->allocStart - bytes;
253
255
  void* const bottom = ws->tableEnd;
254
256
  DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
@@ -260,6 +262,8 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t
260
262
  ws->allocFailed = 1;
261
263
  return NULL;
262
264
  }
265
+ /* the area is reserved from the end of wksp.
266
+ * If it overlaps with tableValidEnd, it voids guarantees on values' range */
263
267
  if (alloc < ws->tableValidEnd) {
264
268
  ws->tableValidEnd = alloc;
265
269
  }
@@ -269,10 +273,12 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t
269
273
 
270
274
  /**
271
275
  * Moves the cwksp to the next phase, and does any necessary allocations.
276
+ * cwksp initialization must necessarily go through each phase in order.
272
277
  * Returns a 0 on success, or zstd error
273
278
  */
274
- MEM_STATIC size_t ZSTD_cwksp_internal_advance_phase(
275
- ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
279
+ MEM_STATIC size_t
280
+ ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)
281
+ {
276
282
  assert(phase >= ws->phase);
277
283
  if (phase > ws->phase) {
278
284
  /* Going from allocating objects to allocating buffers */
@@ -295,15 +301,15 @@ MEM_STATIC size_t ZSTD_cwksp_internal_advance_phase(
295
301
  { /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
296
302
  void* const alloc = ws->objectEnd;
297
303
  size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
298
- void* const end = (BYTE*)alloc + bytesToAlign;
304
+ void* const objectEnd = (BYTE*)alloc + bytesToAlign;
299
305
  DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
300
- RETURN_ERROR_IF(end > ws->workspaceEnd, memory_allocation,
306
+ RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,
301
307
  "table phase - alignment initial allocation failed!");
302
- ws->objectEnd = end;
303
- ws->tableEnd = end;
304
- ws->tableValidEnd = end;
305
- }
306
- }
308
+ ws->objectEnd = objectEnd;
309
+ ws->tableEnd = objectEnd; /* table area starts being empty */
310
+ if (ws->tableValidEnd < ws->tableEnd) {
311
+ ws->tableValidEnd = ws->tableEnd;
312
+ } } }
307
313
  ws->phase = phase;
308
314
  ZSTD_cwksp_assert_internal_consistency(ws);
309
315
  }
@@ -313,15 +319,17 @@ MEM_STATIC size_t ZSTD_cwksp_internal_advance_phase(
313
319
  /**
314
320
  * Returns whether this object/buffer/etc was allocated in this workspace.
315
321
  */
316
- MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
322
+ MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
323
+ {
317
324
  return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
318
325
  }
319
326
 
320
327
  /**
321
328
  * Internal function. Do not use directly.
322
329
  */
323
- MEM_STATIC void* ZSTD_cwksp_reserve_internal(
324
- ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
330
+ MEM_STATIC void*
331
+ ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)
332
+ {
325
333
  void* alloc;
326
334
  if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
327
335
  return NULL;
@@ -351,14 +359,16 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal(
351
359
  /**
352
360
  * Reserves and returns unaligned memory.
353
361
  */
354
- MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
362
+ MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
363
+ {
355
364
  return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
356
365
  }
357
366
 
358
367
  /**
359
368
  * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
360
369
  */
361
- MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
370
+ MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
371
+ {
362
372
  void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
363
373
  ZSTD_cwksp_alloc_aligned);
364
374
  assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
@@ -370,7 +380,8 @@ MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
370
380
  * their values remain constrained, allowing us to re-use them without
371
381
  * memset()-ing them.
372
382
  */
373
- MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
383
+ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
384
+ {
374
385
  const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
375
386
  void* alloc;
376
387
  void* end;
@@ -408,9 +419,11 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
408
419
 
409
420
  /**
410
421
  * Aligned on sizeof(void*).
422
+ * Note : should happen only once, at workspace first initialization
411
423
  */
412
- MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
413
- size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
424
+ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
425
+ {
426
+ size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
414
427
  void* alloc = ws->objectEnd;
415
428
  void* end = (BYTE*)alloc + roundedBytes;
416
429
 
@@ -419,7 +432,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
419
432
  end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
420
433
  #endif
421
434
 
422
- DEBUGLOG(5,
435
+ DEBUGLOG(4,
423
436
  "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
424
437
  alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
425
438
  assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
@@ -427,7 +440,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
427
440
  ZSTD_cwksp_assert_internal_consistency(ws);
428
441
  /* we must be in the first phase, no advance is possible */
429
442
  if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
430
- DEBUGLOG(4, "cwksp: object alloc failed!");
443
+ DEBUGLOG(3, "cwksp: object alloc failed!");
431
444
  ws->allocFailed = 1;
432
445
  return NULL;
433
446
  }
@@ -438,7 +451,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
438
451
  #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
439
452
  /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
440
453
  * either size. */
441
- alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
454
+ alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
442
455
  if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
443
456
  __asan_unpoison_memory_region(alloc, bytes);
444
457
  }
@@ -447,7 +460,8 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
447
460
  return alloc;
448
461
  }
449
462
 
450
- MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
463
+ MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
464
+ {
451
465
  DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
452
466
 
453
467
  #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
@@ -131,7 +131,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
131
131
  if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
132
132
  mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
133
133
  ip++;
134
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
134
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
135
135
  goto _match_stored;
136
136
  }
137
137
 
@@ -217,7 +217,7 @@ _match_found: /* requires ip, offset, mLength */
217
217
  hashLong[hl1] = (U32)(ip1 - base);
218
218
  }
219
219
 
220
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
220
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
221
221
 
222
222
  _match_stored:
223
223
  /* match found */
@@ -243,7 +243,7 @@ _match_stored:
243
243
  U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
244
244
  hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
245
245
  hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
246
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH);
246
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, rLength);
247
247
  ip += rLength;
248
248
  anchor = ip;
249
249
  continue; /* faster when present ... (?) */
@@ -328,7 +328,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
328
328
  const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
329
329
  mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
330
330
  ip++;
331
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
331
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
332
332
  goto _match_stored;
333
333
  }
334
334
 
@@ -419,7 +419,7 @@ _match_found:
419
419
  offset_2 = offset_1;
420
420
  offset_1 = offset;
421
421
 
422
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
422
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
423
423
 
424
424
  _match_stored:
425
425
  /* match found */
@@ -448,7 +448,7 @@ _match_stored:
448
448
  const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
449
449
  size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
450
450
  U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
451
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
451
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2);
452
452
  hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
453
453
  hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
454
454
  ip += repLength2;
@@ -585,7 +585,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
585
585
  const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
586
586
  mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
587
587
  ip++;
588
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
588
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
589
589
  } else {
590
590
  if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
591
591
  const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
@@ -596,7 +596,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
596
596
  while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
597
597
  offset_2 = offset_1;
598
598
  offset_1 = offset;
599
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
599
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
600
600
 
601
601
  } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
602
602
  size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
@@ -621,7 +621,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
621
621
  }
622
622
  offset_2 = offset_1;
623
623
  offset_1 = offset;
624
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
624
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
625
625
 
626
626
  } else {
627
627
  ip += ((ip-anchor) >> kSearchStrength) + 1;
@@ -653,7 +653,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
653
653
  const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
654
654
  size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
655
655
  U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
656
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
656
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2);
657
657
  hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
658
658
  hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
659
659
  ip += repLength2;
@@ -180,7 +180,7 @@ _start: /* Requires: ip0 */
180
180
  mLength = ip0[-1] == match0[-1];
181
181
  ip0 -= mLength;
182
182
  match0 -= mLength;
183
- offcode = 0;
183
+ offcode = STORE_REPCODE_1;
184
184
  mLength += 4;
185
185
  goto _match;
186
186
  }
@@ -267,7 +267,7 @@ _offset: /* Requires: ip0, idx */
267
267
  match0 = base + idx;
268
268
  rep_offset2 = rep_offset1;
269
269
  rep_offset1 = (U32)(ip0-match0);
270
- offcode = rep_offset1 + ZSTD_REP_MOVE;
270
+ offcode = STORE_OFFSET(rep_offset1);
271
271
  mLength = 4;
272
272
 
273
273
  /* Count the backwards match length. */
@@ -282,7 +282,7 @@ _match: /* Requires: ip0, match0, offcode */
282
282
  /* Count the forward length. */
283
283
  mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend);
284
284
 
285
- ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength - MINMATCH);
285
+ ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
286
286
 
287
287
  ip0 += mLength;
288
288
  anchor = ip0;
@@ -306,7 +306,7 @@ _match: /* Requires: ip0, match0, offcode */
306
306
  { U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */
307
307
  hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
308
308
  ip0 += rLength;
309
- ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH);
309
+ ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, STORE_REPCODE_1, rLength);
310
310
  anchor = ip0;
311
311
  continue; /* faster when present (confirmed on gcc-8) ... (?) */
312
312
  } } }
@@ -439,7 +439,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
439
439
  const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
440
440
  mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
441
441
  ip++;
442
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
442
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
443
443
  } else if ( (matchIndex <= prefixStartIndex) ) {
444
444
  size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
445
445
  U32 const dictMatchIndex = dictHashTable[dictHash];
@@ -459,7 +459,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
459
459
  } /* catch up */
460
460
  offset_2 = offset_1;
461
461
  offset_1 = offset;
462
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
462
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
463
463
  }
464
464
  } else if (MEM_read32(match) != MEM_read32(ip)) {
465
465
  /* it's not a match, and we're not going to check the dictionary */
@@ -474,7 +474,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
474
474
  && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
475
475
  offset_2 = offset_1;
476
476
  offset_1 = offset;
477
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
477
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
478
478
  }
479
479
 
480
480
  /* match found */
@@ -499,7 +499,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
499
499
  const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
500
500
  size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
501
501
  U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
502
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
502
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2);
503
503
  hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
504
504
  ip += repLength2;
505
505
  anchor = ip;
@@ -598,7 +598,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
598
598
  const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
599
599
  size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
600
600
  ip++;
601
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH);
601
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, rLength);
602
602
  ip += rLength;
603
603
  anchor = ip;
604
604
  } else {
@@ -614,7 +614,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
614
614
  size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
615
615
  while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
616
616
  offset_2 = offset_1; offset_1 = offset; /* update offset history */
617
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
617
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
618
618
  ip += mLength;
619
619
  anchor = ip;
620
620
  } }
@@ -633,7 +633,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
633
633
  const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
634
634
  size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
635
635
  { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
636
- ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH);
636
+ ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, STORE_REPCODE_1, repLength2);
637
637
  hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
638
638
  ip += repLength2;
639
639
  anchor = ip;