zstd-ruby 1.5.1.1 → 1.5.2.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -129,7 +129,7 @@ size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr,
129
129
  *********************************/
130
130
 
131
131
  typedef struct {
132
- U32 off; /* Offset code (offset + ZSTD_REP_MOVE) for the match */
132
+ U32 off; /* Offset sumtype code for the match, using ZSTD_storeSeq() format */
133
133
  U32 len; /* Raw length of match */
134
134
  } ZSTD_match_t;
135
135
 
@@ -497,31 +497,6 @@ MEM_STATIC U32 ZSTD_MLcode(U32 mlBase)
497
497
  return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
498
498
  }
499
499
 
500
- typedef struct repcodes_s {
501
- U32 rep[3];
502
- } repcodes_t;
503
-
504
- MEM_STATIC repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
505
- {
506
- repcodes_t newReps;
507
- if (offset >= ZSTD_REP_NUM) { /* full offset */
508
- newReps.rep[2] = rep[1];
509
- newReps.rep[1] = rep[0];
510
- newReps.rep[0] = offset - ZSTD_REP_MOVE;
511
- } else { /* repcode */
512
- U32 const repCode = offset + ll0;
513
- if (repCode > 0) { /* note : if repCode==0, no change */
514
- U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
515
- newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2];
516
- newReps.rep[1] = rep[0];
517
- newReps.rep[0] = currentOffset;
518
- } else { /* repCode == 0 */
519
- ZSTD_memcpy(&newReps, rep, sizeof(newReps));
520
- }
521
- }
522
- return newReps;
523
- }
524
-
525
500
  /* ZSTD_cParam_withinBounds:
526
501
  * @return 1 if value is within cParam bounds,
527
502
  * 0 otherwise */
@@ -590,7 +565,9 @@ MEM_STATIC int ZSTD_literalsCompressionIsDisabled(const ZSTD_CCtx_params* cctxPa
590
565
  * Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single
591
566
  * large copies.
592
567
  */
593
- static void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w) {
568
+ static void
569
+ ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w)
570
+ {
594
571
  assert(iend > ilimit_w);
595
572
  if (ip <= ilimit_w) {
596
573
  ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap);
@@ -600,14 +577,30 @@ static void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const ie
600
577
  while (ip < iend) *op++ = *ip++;
601
578
  }
602
579
 
580
+ #define ZSTD_REP_MOVE (ZSTD_REP_NUM-1)
581
+ #define STORE_REPCODE_1 STORE_REPCODE(1)
582
+ #define STORE_REPCODE_2 STORE_REPCODE(2)
583
+ #define STORE_REPCODE_3 STORE_REPCODE(3)
584
+ #define STORE_REPCODE(r) (assert((r)>=1), assert((r)<=3), (r)-1)
585
+ #define STORE_OFFSET(o) (assert((o)>0), o + ZSTD_REP_MOVE)
586
+ #define STORED_IS_OFFSET(o) ((o) > ZSTD_REP_MOVE)
587
+ #define STORED_IS_REPCODE(o) ((o) <= ZSTD_REP_MOVE)
588
+ #define STORED_OFFSET(o) (assert(STORED_IS_OFFSET(o)), (o)-ZSTD_REP_MOVE)
589
+ #define STORED_REPCODE(o) (assert(STORED_IS_REPCODE(o)), (o)+1) /* returns ID 1,2,3 */
590
+ #define STORED_TO_OFFBASE(o) ((o)+1)
591
+ #define OFFBASE_TO_STORED(o) ((o)-1)
592
+
603
593
  /*! ZSTD_storeSeq() :
604
- * Store a sequence (litlen, litPtr, offCode and mlBase) into seqStore_t.
605
- * `offCode` : distance to match + ZSTD_REP_MOVE (values <= ZSTD_REP_MOVE are repCodes).
606
- * `mlBase` : matchLength - MINMATCH
594
+ * Store a sequence (litlen, litPtr, offCode and matchLength) into seqStore_t.
595
+ * @offBase_minus1 : Users should use employ macros STORE_REPCODE_X and STORE_OFFSET().
596
+ * @matchLength : must be >= MINMATCH
607
597
  * Allowed to overread literals up to litLimit.
608
598
  */
609
- HINT_INLINE UNUSED_ATTR
610
- void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* literals, const BYTE* litLimit, U32 offCode, size_t mlBase)
599
+ HINT_INLINE UNUSED_ATTR void
600
+ ZSTD_storeSeq(seqStore_t* seqStorePtr,
601
+ size_t litLength, const BYTE* literals, const BYTE* litLimit,
602
+ U32 offBase_minus1,
603
+ size_t matchLength)
611
604
  {
612
605
  BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
613
606
  BYTE const* const litEnd = literals + litLength;
@@ -616,7 +609,7 @@ void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* litera
616
609
  if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */
617
610
  { U32 const pos = (U32)((const BYTE*)literals - g_start);
618
611
  DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
619
- pos, (U32)litLength, (U32)mlBase+MINMATCH, (U32)offCode);
612
+ pos, (U32)litLength, (U32)matchLength, (U32)offBase_minus1);
620
613
  }
621
614
  #endif
622
615
  assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
@@ -647,19 +640,59 @@ void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* litera
647
640
  seqStorePtr->sequences[0].litLength = (U16)litLength;
648
641
 
649
642
  /* match offset */
650
- seqStorePtr->sequences[0].offset = offCode + 1;
643
+ seqStorePtr->sequences[0].offBase = STORED_TO_OFFBASE(offBase_minus1);
651
644
 
652
645
  /* match Length */
653
- if (mlBase>0xFFFF) {
654
- assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
655
- seqStorePtr->longLengthType = ZSTD_llt_matchLength;
656
- seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
646
+ assert(matchLength >= MINMATCH);
647
+ { size_t const mlBase = matchLength - MINMATCH;
648
+ if (mlBase>0xFFFF) {
649
+ assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
650
+ seqStorePtr->longLengthType = ZSTD_llt_matchLength;
651
+ seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
652
+ }
653
+ seqStorePtr->sequences[0].mlBase = (U16)mlBase;
657
654
  }
658
- seqStorePtr->sequences[0].matchLength = (U16)mlBase;
659
655
 
660
656
  seqStorePtr->sequences++;
661
657
  }
662
658
 
659
+ /* ZSTD_updateRep() :
660
+ * updates in-place @rep (array of repeat offsets)
661
+ * @offBase_minus1 : sum-type, with same numeric representation as ZSTD_storeSeq()
662
+ */
663
+ MEM_STATIC void
664
+ ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0)
665
+ {
666
+ if (STORED_IS_OFFSET(offBase_minus1)) { /* full offset */
667
+ rep[2] = rep[1];
668
+ rep[1] = rep[0];
669
+ rep[0] = STORED_OFFSET(offBase_minus1);
670
+ } else { /* repcode */
671
+ U32 const repCode = STORED_REPCODE(offBase_minus1) - 1 + ll0;
672
+ if (repCode > 0) { /* note : if repCode==0, no change */
673
+ U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
674
+ rep[2] = (repCode >= 2) ? rep[1] : rep[2];
675
+ rep[1] = rep[0];
676
+ rep[0] = currentOffset;
677
+ } else { /* repCode == 0 */
678
+ /* nothing to do */
679
+ }
680
+ }
681
+ }
682
+
683
+ typedef struct repcodes_s {
684
+ U32 rep[3];
685
+ } repcodes_t;
686
+
687
+ MEM_STATIC repcodes_t
688
+ ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0)
689
+ {
690
+ repcodes_t newReps;
691
+ ZSTD_memcpy(&newReps, rep, sizeof(newReps));
692
+ ZSTD_updateRep(newReps.rep, offBase_minus1, ll0);
693
+ return newReps;
694
+ }
695
+
663
696
 
664
697
  /*-*************************************
665
698
  * Match length counter
@@ -313,19 +313,19 @@ ZSTD_encodeSequences_body(
313
313
  FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]);
314
314
  BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
315
315
  if (MEM_32bits()) BIT_flushBits(&blockStream);
316
- BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
316
+ BIT_addBits(&blockStream, sequences[nbSeq-1].mlBase, ML_bits[mlCodeTable[nbSeq-1]]);
317
317
  if (MEM_32bits()) BIT_flushBits(&blockStream);
318
318
  if (longOffsets) {
319
319
  U32 const ofBits = ofCodeTable[nbSeq-1];
320
320
  unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
321
321
  if (extraBits) {
322
- BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
322
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, extraBits);
323
323
  BIT_flushBits(&blockStream);
324
324
  }
325
- BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
325
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offBase >> extraBits,
326
326
  ofBits - extraBits);
327
327
  } else {
328
- BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
328
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, ofCodeTable[nbSeq-1]);
329
329
  }
330
330
  BIT_flushBits(&blockStream);
331
331
 
@@ -339,8 +339,8 @@ ZSTD_encodeSequences_body(
339
339
  U32 const mlBits = ML_bits[mlCode];
340
340
  DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
341
341
  (unsigned)sequences[n].litLength,
342
- (unsigned)sequences[n].matchLength + MINMATCH,
343
- (unsigned)sequences[n].offset);
342
+ (unsigned)sequences[n].mlBase + MINMATCH,
343
+ (unsigned)sequences[n].offBase);
344
344
  /* 32b*/ /* 64b*/
345
345
  /* (7)*/ /* (7)*/
346
346
  FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
@@ -351,18 +351,18 @@ ZSTD_encodeSequences_body(
351
351
  BIT_flushBits(&blockStream); /* (7)*/
352
352
  BIT_addBits(&blockStream, sequences[n].litLength, llBits);
353
353
  if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
354
- BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
354
+ BIT_addBits(&blockStream, sequences[n].mlBase, mlBits);
355
355
  if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
356
356
  if (longOffsets) {
357
357
  unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
358
358
  if (extraBits) {
359
- BIT_addBits(&blockStream, sequences[n].offset, extraBits);
359
+ BIT_addBits(&blockStream, sequences[n].offBase, extraBits);
360
360
  BIT_flushBits(&blockStream); /* (7)*/
361
361
  }
362
- BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
362
+ BIT_addBits(&blockStream, sequences[n].offBase >> extraBits,
363
363
  ofBits - extraBits); /* 31 */
364
364
  } else {
365
- BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */
365
+ BIT_addBits(&blockStream, sequences[n].offBase, ofBits); /* 31 */
366
366
  }
367
367
  BIT_flushBits(&blockStream); /* (7)*/
368
368
  DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
@@ -539,7 +539,7 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
539
539
  repcodes_t rep;
540
540
  ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep));
541
541
  for (seq = sstart; seq < sp; ++seq) {
542
- rep = ZSTD_updateRep(rep.rep, seq->offset - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
542
+ ZSTD_updateRep(rep.rep, seq->offBase - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
543
543
  }
544
544
  ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep));
545
545
  }
@@ -243,12 +243,14 @@ MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignByt
243
243
 
244
244
  /**
245
245
  * Internal function. Do not use directly.
246
- * Reserves the given number of bytes within the aligned/buffer segment of the wksp, which
247
- * counts from the end of the wksp. (as opposed to the object/table segment)
246
+ * Reserves the given number of bytes within the aligned/buffer segment of the wksp,
247
+ * which counts from the end of the wksp (as opposed to the object/table segment).
248
248
  *
249
249
  * Returns a pointer to the beginning of that space.
250
250
  */
251
- MEM_STATIC void* ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes) {
251
+ MEM_STATIC void*
252
+ ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
253
+ {
252
254
  void* const alloc = (BYTE*)ws->allocStart - bytes;
253
255
  void* const bottom = ws->tableEnd;
254
256
  DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
@@ -260,6 +262,8 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t
260
262
  ws->allocFailed = 1;
261
263
  return NULL;
262
264
  }
265
+ /* the area is reserved from the end of wksp.
266
+ * If it overlaps with tableValidEnd, it voids guarantees on values' range */
263
267
  if (alloc < ws->tableValidEnd) {
264
268
  ws->tableValidEnd = alloc;
265
269
  }
@@ -269,10 +273,12 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t
269
273
 
270
274
  /**
271
275
  * Moves the cwksp to the next phase, and does any necessary allocations.
276
+ * cwksp initialization must necessarily go through each phase in order.
272
277
  * Returns a 0 on success, or zstd error
273
278
  */
274
- MEM_STATIC size_t ZSTD_cwksp_internal_advance_phase(
275
- ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
279
+ MEM_STATIC size_t
280
+ ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)
281
+ {
276
282
  assert(phase >= ws->phase);
277
283
  if (phase > ws->phase) {
278
284
  /* Going from allocating objects to allocating buffers */
@@ -295,15 +301,15 @@ MEM_STATIC size_t ZSTD_cwksp_internal_advance_phase(
295
301
  { /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
296
302
  void* const alloc = ws->objectEnd;
297
303
  size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
298
- void* const end = (BYTE*)alloc + bytesToAlign;
304
+ void* const objectEnd = (BYTE*)alloc + bytesToAlign;
299
305
  DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
300
- RETURN_ERROR_IF(end > ws->workspaceEnd, memory_allocation,
306
+ RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,
301
307
  "table phase - alignment initial allocation failed!");
302
- ws->objectEnd = end;
303
- ws->tableEnd = end;
304
- ws->tableValidEnd = end;
305
- }
306
- }
308
+ ws->objectEnd = objectEnd;
309
+ ws->tableEnd = objectEnd; /* table area starts being empty */
310
+ if (ws->tableValidEnd < ws->tableEnd) {
311
+ ws->tableValidEnd = ws->tableEnd;
312
+ } } }
307
313
  ws->phase = phase;
308
314
  ZSTD_cwksp_assert_internal_consistency(ws);
309
315
  }
@@ -313,15 +319,17 @@ MEM_STATIC size_t ZSTD_cwksp_internal_advance_phase(
313
319
  /**
314
320
  * Returns whether this object/buffer/etc was allocated in this workspace.
315
321
  */
316
- MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
322
+ MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
323
+ {
317
324
  return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
318
325
  }
319
326
 
320
327
  /**
321
328
  * Internal function. Do not use directly.
322
329
  */
323
- MEM_STATIC void* ZSTD_cwksp_reserve_internal(
324
- ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
330
+ MEM_STATIC void*
331
+ ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)
332
+ {
325
333
  void* alloc;
326
334
  if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
327
335
  return NULL;
@@ -351,14 +359,16 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal(
351
359
  /**
352
360
  * Reserves and returns unaligned memory.
353
361
  */
354
- MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
362
+ MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
363
+ {
355
364
  return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
356
365
  }
357
366
 
358
367
  /**
359
368
  * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
360
369
  */
361
- MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
370
+ MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
371
+ {
362
372
  void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
363
373
  ZSTD_cwksp_alloc_aligned);
364
374
  assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
@@ -370,7 +380,8 @@ MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
370
380
  * their values remain constrained, allowing us to re-use them without
371
381
  * memset()-ing them.
372
382
  */
373
- MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
383
+ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
384
+ {
374
385
  const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
375
386
  void* alloc;
376
387
  void* end;
@@ -408,9 +419,11 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
408
419
 
409
420
  /**
410
421
  * Aligned on sizeof(void*).
422
+ * Note : should happen only once, at workspace first initialization
411
423
  */
412
- MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
413
- size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
424
+ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
425
+ {
426
+ size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
414
427
  void* alloc = ws->objectEnd;
415
428
  void* end = (BYTE*)alloc + roundedBytes;
416
429
 
@@ -419,7 +432,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
419
432
  end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
420
433
  #endif
421
434
 
422
- DEBUGLOG(5,
435
+ DEBUGLOG(4,
423
436
  "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
424
437
  alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
425
438
  assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
@@ -427,7 +440,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
427
440
  ZSTD_cwksp_assert_internal_consistency(ws);
428
441
  /* we must be in the first phase, no advance is possible */
429
442
  if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
430
- DEBUGLOG(4, "cwksp: object alloc failed!");
443
+ DEBUGLOG(3, "cwksp: object alloc failed!");
431
444
  ws->allocFailed = 1;
432
445
  return NULL;
433
446
  }
@@ -438,7 +451,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
438
451
  #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
439
452
  /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
440
453
  * either size. */
441
- alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
454
+ alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
442
455
  if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
443
456
  __asan_unpoison_memory_region(alloc, bytes);
444
457
  }
@@ -447,7 +460,8 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
447
460
  return alloc;
448
461
  }
449
462
 
450
- MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
463
+ MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
464
+ {
451
465
  DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
452
466
 
453
467
  #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
@@ -131,7 +131,7 @@ size_t ZSTD_compressBlock_doubleFast_noDict_generic(
131
131
  if ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1))) {
132
132
  mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;
133
133
  ip++;
134
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
134
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
135
135
  goto _match_stored;
136
136
  }
137
137
 
@@ -217,7 +217,7 @@ _match_found: /* requires ip, offset, mLength */
217
217
  hashLong[hl1] = (U32)(ip1 - base);
218
218
  }
219
219
 
220
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
220
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
221
221
 
222
222
  _match_stored:
223
223
  /* match found */
@@ -243,7 +243,7 @@ _match_stored:
243
243
  U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_1 */
244
244
  hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = (U32)(ip-base);
245
245
  hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = (U32)(ip-base);
246
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, rLength-MINMATCH);
246
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, rLength);
247
247
  ip += rLength;
248
248
  anchor = ip;
249
249
  continue; /* faster when present ... (?) */
@@ -328,7 +328,7 @@ size_t ZSTD_compressBlock_doubleFast_dictMatchState_generic(
328
328
  const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;
329
329
  mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;
330
330
  ip++;
331
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
331
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
332
332
  goto _match_stored;
333
333
  }
334
334
 
@@ -419,7 +419,7 @@ _match_found:
419
419
  offset_2 = offset_1;
420
420
  offset_1 = offset;
421
421
 
422
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
422
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
423
423
 
424
424
  _match_stored:
425
425
  /* match found */
@@ -448,7 +448,7 @@ _match_stored:
448
448
  const BYTE* const repEnd2 = repIndex2 < prefixLowestIndex ? dictEnd : iend;
449
449
  size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixLowest) + 4;
450
450
  U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
451
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
451
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2);
452
452
  hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
453
453
  hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
454
454
  ip += repLength2;
@@ -585,7 +585,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
585
585
  const BYTE* repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
586
586
  mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
587
587
  ip++;
588
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
588
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
589
589
  } else {
590
590
  if ((matchLongIndex > dictStartIndex) && (MEM_read64(matchLong) == MEM_read64(ip))) {
591
591
  const BYTE* const matchEnd = matchLongIndex < prefixStartIndex ? dictEnd : iend;
@@ -596,7 +596,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
596
596
  while (((ip>anchor) & (matchLong>lowMatchPtr)) && (ip[-1] == matchLong[-1])) { ip--; matchLong--; mLength++; } /* catch up */
597
597
  offset_2 = offset_1;
598
598
  offset_1 = offset;
599
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
599
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
600
600
 
601
601
  } else if ((matchIndex > dictStartIndex) && (MEM_read32(match) == MEM_read32(ip))) {
602
602
  size_t const h3 = ZSTD_hashPtr(ip+1, hBitsL, 8);
@@ -621,7 +621,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
621
621
  }
622
622
  offset_2 = offset_1;
623
623
  offset_1 = offset;
624
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
624
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
625
625
 
626
626
  } else {
627
627
  ip += ((ip-anchor) >> kSearchStrength) + 1;
@@ -653,7 +653,7 @@ static size_t ZSTD_compressBlock_doubleFast_extDict_generic(
653
653
  const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
654
654
  size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
655
655
  U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
656
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
656
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2);
657
657
  hashSmall[ZSTD_hashPtr(ip, hBitsS, mls)] = current2;
658
658
  hashLong[ZSTD_hashPtr(ip, hBitsL, 8)] = current2;
659
659
  ip += repLength2;
@@ -180,7 +180,7 @@ _start: /* Requires: ip0 */
180
180
  mLength = ip0[-1] == match0[-1];
181
181
  ip0 -= mLength;
182
182
  match0 -= mLength;
183
- offcode = 0;
183
+ offcode = STORE_REPCODE_1;
184
184
  mLength += 4;
185
185
  goto _match;
186
186
  }
@@ -267,7 +267,7 @@ _offset: /* Requires: ip0, idx */
267
267
  match0 = base + idx;
268
268
  rep_offset2 = rep_offset1;
269
269
  rep_offset1 = (U32)(ip0-match0);
270
- offcode = rep_offset1 + ZSTD_REP_MOVE;
270
+ offcode = STORE_OFFSET(rep_offset1);
271
271
  mLength = 4;
272
272
 
273
273
  /* Count the backwards match length. */
@@ -282,7 +282,7 @@ _match: /* Requires: ip0, match0, offcode */
282
282
  /* Count the forward length. */
283
283
  mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend);
284
284
 
285
- ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength - MINMATCH);
285
+ ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);
286
286
 
287
287
  ip0 += mLength;
288
288
  anchor = ip0;
@@ -306,7 +306,7 @@ _match: /* Requires: ip0, match0, offcode */
306
306
  { U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */
307
307
  hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
308
308
  ip0 += rLength;
309
- ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH);
309
+ ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, STORE_REPCODE_1, rLength);
310
310
  anchor = ip0;
311
311
  continue; /* faster when present (confirmed on gcc-8) ... (?) */
312
312
  } } }
@@ -439,7 +439,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
439
439
  const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
440
440
  mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
441
441
  ip++;
442
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
442
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, mLength);
443
443
  } else if ( (matchIndex <= prefixStartIndex) ) {
444
444
  size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
445
445
  U32 const dictMatchIndex = dictHashTable[dictHash];
@@ -459,7 +459,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
459
459
  } /* catch up */
460
460
  offset_2 = offset_1;
461
461
  offset_1 = offset;
462
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
462
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
463
463
  }
464
464
  } else if (MEM_read32(match) != MEM_read32(ip)) {
465
465
  /* it's not a match, and we're not going to check the dictionary */
@@ -474,7 +474,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
474
474
  && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
475
475
  offset_2 = offset_1;
476
476
  offset_1 = offset;
477
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
477
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
478
478
  }
479
479
 
480
480
  /* match found */
@@ -499,7 +499,7 @@ size_t ZSTD_compressBlock_fast_dictMatchState_generic(
499
499
  const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
500
500
  size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
501
501
  U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
502
- ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
502
+ ZSTD_storeSeq(seqStore, 0, anchor, iend, STORE_REPCODE_1, repLength2);
503
503
  hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
504
504
  ip += repLength2;
505
505
  anchor = ip;
@@ -598,7 +598,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
598
598
  const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
599
599
  size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
600
600
  ip++;
601
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH);
601
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_REPCODE_1, rLength);
602
602
  ip += rLength;
603
603
  anchor = ip;
604
604
  } else {
@@ -614,7 +614,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
614
614
  size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
615
615
  while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
616
616
  offset_2 = offset_1; offset_1 = offset; /* update offset history */
617
- ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
617
+ ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, STORE_OFFSET(offset), mLength);
618
618
  ip += mLength;
619
619
  anchor = ip;
620
620
  } }
@@ -633,7 +633,7 @@ static size_t ZSTD_compressBlock_fast_extDict_generic(
633
633
  const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
634
634
  size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
635
635
  { U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
636
- ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH);
636
+ ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, STORE_REPCODE_1, repLength2);
637
637
  hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
638
638
  ip += repLength2;
639
639
  anchor = ip;