zstd-ruby 1.5.1.1 → 1.5.2.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: f544352cc9afa92507052f7c925db620b01307d15a3d6b1c4fedbf5de020a456
4
- data.tar.gz: 11f85a814595a6b1f367b119c65d4715da8bd09f73c9aaf87179a38d1c6dec2a
3
+ metadata.gz: 3c33df57662a35d3eae35eb7b82afecf5e91191152edcef555b88aab4b803a3a
4
+ data.tar.gz: e3d5d34ff446b9a280bcb4e0fc103b4f6cb3312c37756145a578a05bfc237672
5
5
  SHA512:
6
- metadata.gz: 9082bbafb9d1c7e405905425ca4dfa684e2eba152002673753748ba35769261a9430ae3ee9fb719b6906f71b3b40eae8f718c134d485e92b6783f015451ceb0c
7
- data.tar.gz: 0e02ae6727c942ea699835e383c0433729de3c2eaa8efdb4ec9c9d150b87b112c72be5251fe3818a4c8b9c4ca7f0b4518f75f14f9059159f93c19a53e6198678
6
+ metadata.gz: 3f0f2cab5053a1bd6ffc3af753aae2ba2d61c20623738e8c4214a9b67dedd6a76df1fdec888b1fe21fbc3334457d8fe4ed03c9ff17dcfbe19f55022846291ea3
7
+ data.tar.gz: 59ee08ebad25d6c1f21d3b611f7352db6b4cb219a047fe0d9782c9eaae17aa6972bc70e4818c91289e8f7c03cab0dea9db7ac06ce372295c718fa0dae5046c94
data/README.md CHANGED
@@ -10,7 +10,7 @@ See https://github.com/facebook/zstd
10
10
  Fork from https://github.com/jarredholman/ruby-zstd.
11
11
 
12
12
  ## Zstd version
13
- v1.5.1 (https://github.com/facebook/zstd/tree/v1.5.1)
13
+ v1.5.2 (https://github.com/facebook/zstd/tree/v1.5.2)
14
14
 
15
15
  ## Installation
16
16
 
@@ -212,11 +212,11 @@ $(ZSTD_STATLIB_DIR)/%.o : %.c $(ZSTD_STATLIB_DIR)/%.d | $(ZSTD_STATLIB_DIR)
212
212
 
213
213
  $(ZSTD_DYNLIB_DIR)/%.o : %.S | $(ZSTD_DYNLIB_DIR)
214
214
  @echo AS $@
215
- $(COMPILE.c) $(OUTPUT_OPTION) $<
215
+ $(COMPILE.S) $(OUTPUT_OPTION) $<
216
216
 
217
217
  $(ZSTD_STATLIB_DIR)/%.o : %.S | $(ZSTD_STATLIB_DIR)
218
218
  @echo AS $@
219
- $(COMPILE.c) $(OUTPUT_OPTION) $<
219
+ $(COMPILE.S) $(OUTPUT_OPTION) $<
220
220
 
221
221
  MKDIR ?= mkdir
222
222
  $(BUILD_DIR) $(ZSTD_DYNLIB_DIR) $(ZSTD_STATLIB_DIR):
@@ -86,7 +86,7 @@ static void* POOL_thread(void* opaque) {
86
86
  { POOL_job const job = ctx->queue[ctx->queueHead];
87
87
  ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
88
88
  ctx->numThreadsBusy++;
89
- ctx->queueEmpty = ctx->queueHead == ctx->queueTail;
89
+ ctx->queueEmpty = (ctx->queueHead == ctx->queueTail);
90
90
  /* Unlock the mutex, signal a pusher, and run the job */
91
91
  ZSTD_pthread_cond_signal(&ctx->queuePushCond);
92
92
  ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
@@ -105,6 +105,7 @@ static void* POOL_thread(void* opaque) {
105
105
  assert(0); /* Unreachable */
106
106
  }
107
107
 
108
+ /* ZSTD_createThreadPool() : public access point */
108
109
  POOL_ctx* ZSTD_createThreadPool(size_t numThreads) {
109
110
  return POOL_create (numThreads, 0);
110
111
  }
@@ -114,7 +115,8 @@ POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
114
115
  }
115
116
 
116
117
  POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
117
- ZSTD_customMem customMem) {
118
+ ZSTD_customMem customMem)
119
+ {
118
120
  POOL_ctx* ctx;
119
121
  /* Check parameters */
120
122
  if (!numThreads) { return NULL; }
@@ -192,7 +194,7 @@ void ZSTD_freeThreadPool (ZSTD_threadPool* pool) {
192
194
  POOL_free (pool);
193
195
  }
194
196
 
195
- size_t POOL_sizeof(POOL_ctx *ctx) {
197
+ size_t POOL_sizeof(const POOL_ctx* ctx) {
196
198
  if (ctx==NULL) return 0; /* supports sizeof NULL */
197
199
  return sizeof(*ctx)
198
200
  + ctx->queueSize * sizeof(POOL_job)
@@ -257,7 +259,8 @@ static int isQueueFull(POOL_ctx const* ctx) {
257
259
  }
258
260
 
259
261
 
260
- static void POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque)
262
+ static void
263
+ POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque)
261
264
  {
262
265
  POOL_job const job = {function, opaque};
263
266
  assert(ctx != NULL);
@@ -313,7 +316,9 @@ POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
313
316
  return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
314
317
  }
315
318
 
316
- POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) {
319
+ POOL_ctx*
320
+ POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem)
321
+ {
317
322
  (void)numThreads;
318
323
  (void)queueSize;
319
324
  (void)customMem;
@@ -341,7 +346,7 @@ int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) {
341
346
  return 1;
342
347
  }
343
348
 
344
- size_t POOL_sizeof(POOL_ctx* ctx) {
349
+ size_t POOL_sizeof(const POOL_ctx* ctx) {
345
350
  if (ctx==NULL) return 0; /* supports sizeof NULL */
346
351
  assert(ctx == &g_poolCtx);
347
352
  return sizeof(*ctx);
@@ -53,7 +53,7 @@ int POOL_resize(POOL_ctx* ctx, size_t numThreads);
53
53
  * @return threadpool memory usage
54
54
  * note : compatible with NULL (returns 0 in this case)
55
55
  */
56
- size_t POOL_sizeof(POOL_ctx* ctx);
56
+ size_t POOL_sizeof(const POOL_ctx* ctx);
57
57
 
58
58
  /*! POOL_function :
59
59
  * The function type that can be added to a thread pool.
@@ -70,7 +70,7 @@ void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque);
70
70
 
71
71
 
72
72
  /*! POOL_tryAdd() :
73
- * Add the job `function(opaque)` to thread pool _if_ a worker is available.
73
+ * Add the job `function(opaque)` to thread pool _if_ a queue slot is available.
74
74
  * Returns immediately even if not (does not block).
75
75
  * @return : 1 if successful, 0 if not.
76
76
  */
@@ -65,6 +65,12 @@
65
65
  # endif
66
66
  #endif
67
67
 
68
+ /* Mark the internal assembly functions as hidden */
69
+ #ifdef __ELF__
70
+ # define ZSTD_HIDE_ASM_FUNCTION(func) .hidden func
71
+ #else
72
+ # define ZSTD_HIDE_ASM_FUNCTION(func)
73
+ #endif
68
74
 
69
75
  /* Enable runtime BMI2 dispatch based on the CPU.
70
76
  * Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
@@ -67,7 +67,6 @@ extern "C" {
67
67
  #define ZSTD_OPT_NUM (1<<12)
68
68
 
69
69
  #define ZSTD_REP_NUM 3 /* number of repcodes */
70
- #define ZSTD_REP_MOVE (ZSTD_REP_NUM-1)
71
70
  static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
72
71
 
73
72
  #define KB *(1 <<10)
@@ -285,9 +284,9 @@ typedef enum {
285
284
  * Private declarations
286
285
  *********************************************/
287
286
  typedef struct seqDef_s {
288
- U32 offset; /* offset == rawOffset + ZSTD_REP_NUM, or equivalently, offCode + 1 */
287
+ U32 offBase; /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */
289
288
  U16 litLength;
290
- U16 matchLength;
289
+ U16 mlBase; /* mlBase == matchLength - MINMATCH */
291
290
  } seqDef;
292
291
 
293
292
  /* Controls whether seqStore has a single "long" litLength or matchLength. See seqStore_t. */
@@ -329,7 +328,7 @@ MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore
329
328
  {
330
329
  ZSTD_sequenceLength seqLen;
331
330
  seqLen.litLength = seq->litLength;
332
- seqLen.matchLength = seq->matchLength + MINMATCH;
331
+ seqLen.matchLength = seq->mlBase + MINMATCH;
333
332
  if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) {
334
333
  if (seqStore->longLengthType == ZSTD_llt_literalLength) {
335
334
  seqLen.litLength += 0xFFFF;
@@ -2396,9 +2396,9 @@ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
2396
2396
  assert(nbSeq <= seqStorePtr->maxNbSeq);
2397
2397
  for (u=0; u<nbSeq; u++) {
2398
2398
  U32 const llv = sequences[u].litLength;
2399
- U32 const mlv = sequences[u].matchLength;
2399
+ U32 const mlv = sequences[u].mlBase;
2400
2400
  llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
2401
- ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offset);
2401
+ ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offBase);
2402
2402
  mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
2403
2403
  }
2404
2404
  if (seqStorePtr->longLengthType==ZSTD_llt_literalLength)
@@ -2910,9 +2910,9 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
2910
2910
  assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1);
2911
2911
  ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
2912
2912
  for (i = 0; i < seqStoreSeqSize; ++i) {
2913
- U32 rawOffset = seqStoreSeqs[i].offset - ZSTD_REP_NUM;
2913
+ U32 rawOffset = seqStoreSeqs[i].offBase - ZSTD_REP_NUM;
2914
2914
  outSeqs[i].litLength = seqStoreSeqs[i].litLength;
2915
- outSeqs[i].matchLength = seqStoreSeqs[i].matchLength + MINMATCH;
2915
+ outSeqs[i].matchLength = seqStoreSeqs[i].mlBase + MINMATCH;
2916
2916
  outSeqs[i].rep = 0;
2917
2917
 
2918
2918
  if (i == seqStore->longLengthPos) {
@@ -2923,9 +2923,9 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
2923
2923
  }
2924
2924
  }
2925
2925
 
2926
- if (seqStoreSeqs[i].offset <= ZSTD_REP_NUM) {
2926
+ if (seqStoreSeqs[i].offBase <= ZSTD_REP_NUM) {
2927
2927
  /* Derive the correct offset corresponding to a repcode */
2928
- outSeqs[i].rep = seqStoreSeqs[i].offset;
2928
+ outSeqs[i].rep = seqStoreSeqs[i].offBase;
2929
2929
  if (outSeqs[i].litLength != 0) {
2930
2930
  rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1];
2931
2931
  } else {
@@ -2939,9 +2939,9 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
2939
2939
  outSeqs[i].offset = rawOffset;
2940
2940
  /* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode
2941
2941
  so we provide seqStoreSeqs[i].offset - 1 */
2942
- updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep,
2943
- seqStoreSeqs[i].offset - 1,
2944
- seqStoreSeqs[i].litLength == 0);
2942
+ ZSTD_updateRep(updatedRepcodes.rep,
2943
+ seqStoreSeqs[i].offBase - 1,
2944
+ seqStoreSeqs[i].litLength == 0);
2945
2945
  literalsRead += outSeqs[i].litLength;
2946
2946
  }
2947
2947
  /* Insert last literals (if any exist) in the block as a sequence with ml == off == 0.
@@ -3385,7 +3385,7 @@ static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) {
3385
3385
  size_t i;
3386
3386
  for (i = 0; i < nbSeqs; ++i) {
3387
3387
  seqDef seq = seqStore->sequencesStart[i];
3388
- matchBytes += seq.matchLength + MINMATCH;
3388
+ matchBytes += seq.mlBase + MINMATCH;
3389
3389
  if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_matchLength) {
3390
3390
  matchBytes += 0x10000;
3391
3391
  }
@@ -3434,11 +3434,13 @@ static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore,
3434
3434
 
3435
3435
  /**
3436
3436
  * Returns the raw offset represented by the combination of offCode, ll0, and repcode history.
3437
- * offCode must be an offCode representing a repcode, therefore in the range of [0, 2].
3437
+ * offCode must represent a repcode in the numeric representation of ZSTD_storeSeq().
3438
3438
  */
3439
- static U32 ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offCode, const U32 ll0) {
3440
- U32 const adjustedOffCode = offCode + ll0;
3441
- assert(offCode < ZSTD_REP_NUM);
3439
+ static U32
3440
+ ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offCode, const U32 ll0)
3441
+ {
3442
+ U32 const adjustedOffCode = STORED_REPCODE(offCode) - 1 + ll0; /* [ 0 - 3 ] */
3443
+ assert(STORED_IS_REPCODE(offCode));
3442
3444
  if (adjustedOffCode == ZSTD_REP_NUM) {
3443
3445
  /* litlength == 0 and offCode == 2 implies selection of first repcode - 1 */
3444
3446
  assert(rep[0] > 0);
@@ -3449,11 +3451,16 @@ static U32 ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32
3449
3451
 
3450
3452
  /**
3451
3453
  * ZSTD_seqStore_resolveOffCodes() reconciles any possible divergences in offset history that may arise
3452
- * due to emission of RLE/raw blocks that disturb the offset history, and replaces any repcodes within
3453
- * the seqStore that may be invalid.
3454
+ * due to emission of RLE/raw blocks that disturb the offset history,
3455
+ * and replaces any repcodes within the seqStore that may be invalid.
3456
+ *
3457
+ * dRepcodes are updated as would be on the decompression side.
3458
+ * cRepcodes are updated exactly in accordance with the seqStore.
3454
3459
  *
3455
- * dRepcodes are updated as would be on the decompression side. cRepcodes are updated exactly in
3456
- * accordance with the seqStore.
3460
+ * Note : this function assumes seq->offBase respects the following numbering scheme :
3461
+ * 0 : invalid
3462
+ * 1-3 : repcode 1-3
3463
+ * 4+ : real_offset+3
3457
3464
  */
3458
3465
  static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRepcodes,
3459
3466
  seqStore_t* const seqStore, U32 const nbSeq) {
@@ -3461,9 +3468,9 @@ static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_
3461
3468
  for (; idx < nbSeq; ++idx) {
3462
3469
  seqDef* const seq = seqStore->sequencesStart + idx;
3463
3470
  U32 const ll0 = (seq->litLength == 0);
3464
- U32 offCode = seq->offset - 1;
3465
- assert(seq->offset > 0);
3466
- if (offCode <= ZSTD_REP_MOVE) {
3471
+ U32 const offCode = OFFBASE_TO_STORED(seq->offBase);
3472
+ assert(seq->offBase > 0);
3473
+ if (STORED_IS_REPCODE(offCode)) {
3467
3474
  U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offCode, ll0);
3468
3475
  U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offCode, ll0);
3469
3476
  /* Adjust simulated decompression repcode history if we come across a mismatch. Replace
@@ -3471,14 +3478,14 @@ static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_
3471
3478
  * repcode history.
3472
3479
  */
3473
3480
  if (dRawOffset != cRawOffset) {
3474
- seq->offset = cRawOffset + ZSTD_REP_NUM;
3481
+ seq->offBase = cRawOffset + ZSTD_REP_NUM;
3475
3482
  }
3476
3483
  }
3477
3484
  /* Compression repcode history is always updated with values directly from the unmodified seqStore.
3478
3485
  * Decompression repcode history may use modified seq->offset value taken from compression repcode history.
3479
3486
  */
3480
- *dRepcodes = ZSTD_updateRep(dRepcodes->rep, seq->offset - 1, ll0);
3481
- *cRepcodes = ZSTD_updateRep(cRepcodes->rep, offCode, ll0);
3487
+ ZSTD_updateRep(dRepcodes->rep, OFFBASE_TO_STORED(seq->offBase), ll0);
3488
+ ZSTD_updateRep(cRepcodes->rep, offCode, ll0);
3482
3489
  }
3483
3490
  }
3484
3491
 
@@ -3487,11 +3494,13 @@ static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_
3487
3494
  *
3488
3495
  * Returns the total size of that block (including header) or a ZSTD error code.
3489
3496
  */
3490
- static size_t ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore,
3491
- repcodes_t* const dRep, repcodes_t* const cRep,
3492
- void* dst, size_t dstCapacity,
3493
- const void* src, size_t srcSize,
3494
- U32 lastBlock, U32 isPartition) {
3497
+ static size_t
3498
+ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore,
3499
+ repcodes_t* const dRep, repcodes_t* const cRep,
3500
+ void* dst, size_t dstCapacity,
3501
+ const void* src, size_t srcSize,
3502
+ U32 lastBlock, U32 isPartition)
3503
+ {
3495
3504
  const U32 rleMaxLength = 25;
3496
3505
  BYTE* op = (BYTE*)dst;
3497
3506
  const BYTE* ip = (const BYTE*)src;
@@ -3500,6 +3509,7 @@ static size_t ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const
3500
3509
 
3501
3510
  /* In case of an RLE or raw block, the simulated decompression repcode history must be reset */
3502
3511
  repcodes_t const dRepOriginal = *dRep;
3512
+ DEBUGLOG(5, "ZSTD_compressSeqStore_singleBlock");
3503
3513
  if (isPartition)
3504
3514
  ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (U32)(seqStore->sequences - seqStore->sequencesStart));
3505
3515
 
@@ -3572,8 +3582,10 @@ typedef struct {
3572
3582
  * Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS. At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize
3573
3583
  * maximum of 128 KB, this value is actually impossible to reach.
3574
3584
  */
3575
- static void ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx,
3576
- ZSTD_CCtx* zc, const seqStore_t* origSeqStore) {
3585
+ static void
3586
+ ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx,
3587
+ ZSTD_CCtx* zc, const seqStore_t* origSeqStore)
3588
+ {
3577
3589
  seqStore_t* fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk;
3578
3590
  seqStore_t* firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore;
3579
3591
  seqStore_t* secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore;
@@ -3628,8 +3640,10 @@ static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq)
3628
3640
  *
3629
3641
  * Returns combined size of all blocks (which includes headers), or a ZSTD error code.
3630
3642
  */
3631
- static size_t ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity,
3632
- const void* src, size_t blockSize, U32 lastBlock, U32 nbSeq) {
3643
+ static size_t
3644
+ ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity,
3645
+ const void* src, size_t blockSize, U32 lastBlock, U32 nbSeq)
3646
+ {
3633
3647
  size_t cSize = 0;
3634
3648
  const BYTE* ip = (const BYTE*)src;
3635
3649
  BYTE* op = (BYTE*)dst;
@@ -3715,9 +3729,11 @@ static size_t ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, s
3715
3729
  return cSize;
3716
3730
  }
3717
3731
 
3718
- static size_t ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc,
3719
- void* dst, size_t dstCapacity,
3720
- const void* src, size_t srcSize, U32 lastBlock) {
3732
+ static size_t
3733
+ ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc,
3734
+ void* dst, size_t dstCapacity,
3735
+ const void* src, size_t srcSize, U32 lastBlock)
3736
+ {
3721
3737
  const BYTE* ip = (const BYTE*)src;
3722
3738
  BYTE* op = (BYTE*)dst;
3723
3739
  U32 nbSeq;
@@ -3743,9 +3759,10 @@ static size_t ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc,
3743
3759
  return cSize;
3744
3760
  }
3745
3761
 
3746
- static size_t ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
3747
- void* dst, size_t dstCapacity,
3748
- const void* src, size_t srcSize, U32 frame)
3762
+ static size_t
3763
+ ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
3764
+ void* dst, size_t dstCapacity,
3765
+ const void* src, size_t srcSize, U32 frame)
3749
3766
  {
3750
3767
  /* This the upper bound for the length of an rle block.
3751
3768
  * This isn't the actual upper bound. Finding the real threshold
@@ -5738,39 +5755,39 @@ typedef struct {
5738
5755
  size_t posInSrc; /* Number of bytes given by sequences provided so far */
5739
5756
  } ZSTD_sequencePosition;
5740
5757
 
5741
- /* Returns a ZSTD error code if sequence is not valid */
5742
- static size_t ZSTD_validateSequence(U32 offCode, U32 matchLength,
5743
- size_t posInSrc, U32 windowLog, size_t dictSize, U32 minMatch) {
5744
- size_t offsetBound;
5745
- U32 windowSize = 1 << windowLog;
5758
+ /* ZSTD_validateSequence() :
5759
+ * @offCode : is presumed to follow format required by ZSTD_storeSeq()
5760
+ * @returns a ZSTD error code if sequence is not valid
5761
+ */
5762
+ static size_t
5763
+ ZSTD_validateSequence(U32 offCode, U32 matchLength,
5764
+ size_t posInSrc, U32 windowLog, size_t dictSize)
5765
+ {
5766
+ U32 const windowSize = 1 << windowLog;
5746
5767
  /* posInSrc represents the amount of data the the decoder would decode up to this point.
5747
5768
  * As long as the amount of data decoded is less than or equal to window size, offsets may be
5748
5769
  * larger than the total length of output decoded in order to reference the dict, even larger than
5749
5770
  * window size. After output surpasses windowSize, we're limited to windowSize offsets again.
5750
5771
  */
5751
- offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
5752
- RETURN_ERROR_IF(offCode > offsetBound + ZSTD_REP_MOVE, corruption_detected, "Offset too large!");
5753
- RETURN_ERROR_IF(matchLength < minMatch, corruption_detected, "Matchlength too small");
5772
+ size_t const offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
5773
+ RETURN_ERROR_IF(offCode > STORE_OFFSET(offsetBound), corruption_detected, "Offset too large!");
5774
+ RETURN_ERROR_IF(matchLength < MINMATCH, corruption_detected, "Matchlength too small");
5754
5775
  return 0;
5755
5776
  }
5756
5777
 
5757
5778
  /* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */
5758
- static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0) {
5759
- U32 offCode = rawOffset + ZSTD_REP_MOVE;
5760
- U32 repCode = 0;
5779
+ static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0)
5780
+ {
5781
+ U32 offCode = STORE_OFFSET(rawOffset);
5761
5782
 
5762
5783
  if (!ll0 && rawOffset == rep[0]) {
5763
- repCode = 1;
5784
+ offCode = STORE_REPCODE_1;
5764
5785
  } else if (rawOffset == rep[1]) {
5765
- repCode = 2 - ll0;
5786
+ offCode = STORE_REPCODE(2 - ll0);
5766
5787
  } else if (rawOffset == rep[2]) {
5767
- repCode = 3 - ll0;
5788
+ offCode = STORE_REPCODE(3 - ll0);
5768
5789
  } else if (ll0 && rawOffset == rep[0] - 1) {
5769
- repCode = 3;
5770
- }
5771
- if (repCode) {
5772
- /* ZSTD_storeSeq expects a number in the range [0, 2] to represent a repcode */
5773
- offCode = repCode - 1;
5790
+ offCode = STORE_REPCODE_3;
5774
5791
  }
5775
5792
  return offCode;
5776
5793
  }
@@ -5778,18 +5795,17 @@ static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32
5778
5795
  /* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of
5779
5796
  * ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter.
5780
5797
  */
5781
- static size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
5782
- const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
5783
- const void* src, size_t blockSize) {
5798
+ static size_t
5799
+ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
5800
+ ZSTD_sequencePosition* seqPos,
5801
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
5802
+ const void* src, size_t blockSize)
5803
+ {
5784
5804
  U32 idx = seqPos->idx;
5785
5805
  BYTE const* ip = (BYTE const*)(src);
5786
5806
  const BYTE* const iend = ip + blockSize;
5787
5807
  repcodes_t updatedRepcodes;
5788
5808
  U32 dictSize;
5789
- U32 litLength;
5790
- U32 matchLength;
5791
- U32 ll0;
5792
- U32 offCode;
5793
5809
 
5794
5810
  if (cctx->cdict) {
5795
5811
  dictSize = (U32)cctx->cdict->dictContentSize;
@@ -5800,23 +5816,22 @@ static size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZS
5800
5816
  }
5801
5817
  ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
5802
5818
  for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) {
5803
- litLength = inSeqs[idx].litLength;
5804
- matchLength = inSeqs[idx].matchLength;
5805
- ll0 = litLength == 0;
5806
- offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
5807
- updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
5819
+ U32 const litLength = inSeqs[idx].litLength;
5820
+ U32 const ll0 = (litLength == 0);
5821
+ U32 const matchLength = inSeqs[idx].matchLength;
5822
+ U32 const offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
5823
+ ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
5808
5824
 
5809
5825
  DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
5810
5826
  if (cctx->appliedParams.validateSequences) {
5811
5827
  seqPos->posInSrc += litLength + matchLength;
5812
5828
  FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
5813
- cctx->appliedParams.cParams.windowLog, dictSize,
5814
- cctx->appliedParams.cParams.minMatch),
5829
+ cctx->appliedParams.cParams.windowLog, dictSize),
5815
5830
  "Sequence validation failed");
5816
5831
  }
5817
5832
  RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
5818
5833
  "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
5819
- ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH);
5834
+ ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength);
5820
5835
  ip += matchLength + litLength;
5821
5836
  }
5822
5837
  ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
@@ -5843,9 +5858,11 @@ static size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZS
5843
5858
  * avoid splitting a match, or to avoid splitting a match such that it would produce a match
5844
5859
  * smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
5845
5860
  */
5846
- static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
5847
- const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
5848
- const void* src, size_t blockSize) {
5861
+ static size_t
5862
+ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
5863
+ const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
5864
+ const void* src, size_t blockSize)
5865
+ {
5849
5866
  U32 idx = seqPos->idx;
5850
5867
  U32 startPosInSequence = seqPos->posInSequence;
5851
5868
  U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize;
@@ -5855,10 +5872,6 @@ static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_seq
5855
5872
  repcodes_t updatedRepcodes;
5856
5873
  U32 bytesAdjustment = 0;
5857
5874
  U32 finalMatchSplit = 0;
5858
- U32 litLength;
5859
- U32 matchLength;
5860
- U32 rawOffset;
5861
- U32 offCode;
5862
5875
 
5863
5876
  if (cctx->cdict) {
5864
5877
  dictSize = cctx->cdict->dictContentSize;
@@ -5872,9 +5885,10 @@ static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_seq
5872
5885
  ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
5873
5886
  while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) {
5874
5887
  const ZSTD_Sequence currSeq = inSeqs[idx];
5875
- litLength = currSeq.litLength;
5876
- matchLength = currSeq.matchLength;
5877
- rawOffset = currSeq.offset;
5888
+ U32 litLength = currSeq.litLength;
5889
+ U32 matchLength = currSeq.matchLength;
5890
+ U32 const rawOffset = currSeq.offset;
5891
+ U32 offCode;
5878
5892
 
5879
5893
  /* Modify the sequence depending on where endPosInSequence lies */
5880
5894
  if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) {
@@ -5927,22 +5941,21 @@ static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_seq
5927
5941
  }
5928
5942
  }
5929
5943
  /* Check if this offset can be represented with a repcode */
5930
- { U32 ll0 = (litLength == 0);
5944
+ { U32 const ll0 = (litLength == 0);
5931
5945
  offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0);
5932
- updatedRepcodes = ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
5946
+ ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
5933
5947
  }
5934
5948
 
5935
5949
  if (cctx->appliedParams.validateSequences) {
5936
5950
  seqPos->posInSrc += litLength + matchLength;
5937
5951
  FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
5938
- cctx->appliedParams.cParams.windowLog, dictSize,
5939
- cctx->appliedParams.cParams.minMatch),
5952
+ cctx->appliedParams.cParams.windowLog, dictSize),
5940
5953
  "Sequence validation failed");
5941
5954
  }
5942
5955
  DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
5943
5956
  RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
5944
5957
  "Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
5945
- ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength - MINMATCH);
5958
+ ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength);
5946
5959
  ip += matchLength + litLength;
5947
5960
  }
5948
5961
  DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
@@ -5967,7 +5980,8 @@ static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_seq
5967
5980
  typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
5968
5981
  const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
5969
5982
  const void* src, size_t blockSize);
5970
- static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode) {
5983
+ static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode)
5984
+ {
5971
5985
  ZSTD_sequenceCopier sequenceCopier = NULL;
5972
5986
  assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode));
5973
5987
  if (mode == ZSTD_sf_explicitBlockDelimiters) {
@@ -5981,12 +5995,15 @@ static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode)
5981
5995
 
5982
5996
  /* Compress, block-by-block, all of the sequences given.
5983
5997
  *
5984
- * Returns the cumulative size of all compressed blocks (including their headers), otherwise a ZSTD error.
5998
+ * Returns the cumulative size of all compressed blocks (including their headers),
5999
+ * otherwise a ZSTD error.
5985
6000
  */
5986
- static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
5987
- void* dst, size_t dstCapacity,
5988
- const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
5989
- const void* src, size_t srcSize) {
6001
+ static size_t
6002
+ ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
6003
+ void* dst, size_t dstCapacity,
6004
+ const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
6005
+ const void* src, size_t srcSize)
6006
+ {
5990
6007
  size_t cSize = 0;
5991
6008
  U32 lastBlock;
5992
6009
  size_t blockSize;
@@ -5996,7 +6013,7 @@ static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
5996
6013
 
5997
6014
  BYTE const* ip = (BYTE const*)src;
5998
6015
  BYTE* op = (BYTE*)dst;
5999
- ZSTD_sequenceCopier sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
6016
+ ZSTD_sequenceCopier const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
6000
6017
 
6001
6018
  DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize);
6002
6019
  /* Special case: empty frame */
@@ -6096,7 +6113,8 @@ static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
6096
6113
 
6097
6114
  size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapacity,
6098
6115
  const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
6099
- const void* src, size_t srcSize) {
6116
+ const void* src, size_t srcSize)
6117
+ {
6100
6118
  BYTE* op = (BYTE*)dst;
6101
6119
  size_t cSize = 0;
6102
6120
  size_t compressedBlocksSize = 0;