zstd-ruby 1.3.7.0 → 1.3.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +1 -1
  3. data/ext/zstdruby/libzstd/BUCK +15 -2
  4. data/ext/zstdruby/libzstd/Makefile +37 -2
  5. data/ext/zstdruby/libzstd/README.md +67 -41
  6. data/ext/zstdruby/libzstd/common/bitstream.h +2 -2
  7. data/ext/zstdruby/libzstd/common/compiler.h +19 -12
  8. data/ext/zstdruby/libzstd/common/cpu.h +1 -1
  9. data/ext/zstdruby/libzstd/common/debug.h +22 -11
  10. data/ext/zstdruby/libzstd/common/error_private.c +6 -0
  11. data/ext/zstdruby/libzstd/common/fse.h +2 -2
  12. data/ext/zstdruby/libzstd/common/huf.h +25 -1
  13. data/ext/zstdruby/libzstd/common/pool.c +1 -1
  14. data/ext/zstdruby/libzstd/common/zstd_common.c +3 -1
  15. data/ext/zstdruby/libzstd/common/zstd_errors.h +1 -0
  16. data/ext/zstdruby/libzstd/common/zstd_internal.h +11 -2
  17. data/ext/zstdruby/libzstd/compress/fse_compress.c +3 -3
  18. data/ext/zstdruby/libzstd/compress/hist.c +19 -11
  19. data/ext/zstdruby/libzstd/compress/hist.h +11 -8
  20. data/ext/zstdruby/libzstd/compress/huf_compress.c +33 -31
  21. data/ext/zstdruby/libzstd/compress/zstd_compress.c +621 -371
  22. data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +90 -28
  23. data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +4 -4
  24. data/ext/zstdruby/libzstd/compress/zstd_fast.c +15 -15
  25. data/ext/zstdruby/libzstd/compress/zstd_lazy.c +25 -18
  26. data/ext/zstdruby/libzstd/compress/zstd_ldm.c +18 -67
  27. data/ext/zstdruby/libzstd/compress/zstd_ldm.h +2 -6
  28. data/ext/zstdruby/libzstd/compress/zstd_opt.c +133 -48
  29. data/ext/zstdruby/libzstd/compress/zstd_opt.h +8 -0
  30. data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +229 -73
  31. data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +18 -10
  32. data/ext/zstdruby/libzstd/decompress/huf_decompress.c +178 -42
  33. data/ext/zstdruby/libzstd/decompress/zstd_ddict.c +240 -0
  34. data/ext/zstdruby/libzstd/decompress/zstd_ddict.h +44 -0
  35. data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +244 -1680
  36. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +1307 -0
  37. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +59 -0
  38. data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +168 -0
  39. data/ext/zstdruby/libzstd/dictBuilder/cover.c +13 -11
  40. data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +15 -15
  41. data/ext/zstdruby/libzstd/dictBuilder/zdict.c +28 -28
  42. data/ext/zstdruby/libzstd/dll/libzstd.def +0 -1
  43. data/ext/zstdruby/libzstd/legacy/zstd_v04.c +0 -10
  44. data/ext/zstdruby/libzstd/legacy/zstd_v05.c +15 -15
  45. data/ext/zstdruby/libzstd/zstd.h +1208 -968
  46. data/lib/zstd-ruby/version.rb +1 -1
  47. metadata +7 -2
@@ -512,7 +512,7 @@ MEM_STATIC void FSE_initCState(FSE_CState_t* statePtr, const FSE_CTable* ct)
512
512
  const U32 tableLog = MEM_read16(ptr);
513
513
  statePtr->value = (ptrdiff_t)1<<tableLog;
514
514
  statePtr->stateTable = u16ptr+2;
515
- statePtr->symbolTT = ((const U32*)ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1));
515
+ statePtr->symbolTT = ct + 1 + (tableLog ? (1<<(tableLog-1)) : 1);
516
516
  statePtr->stateLog = tableLog;
517
517
  }
518
518
 
@@ -531,7 +531,7 @@ MEM_STATIC void FSE_initCState2(FSE_CState_t* statePtr, const FSE_CTable* ct, U3
531
531
  }
532
532
  }
533
533
 
534
- MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, U32 symbol)
534
+ MEM_STATIC void FSE_encodeSymbol(BIT_CStream_t* bitC, FSE_CState_t* statePtr, unsigned symbol)
535
535
  {
536
536
  FSE_symbolCompressionTransform const symbolTT = ((const FSE_symbolCompressionTransform*)(statePtr->symbolTT))[symbol];
537
537
  const U16* const stateTable = (const U16*)(statePtr->stateTable);
@@ -173,15 +173,19 @@ typedef U32 HUF_DTable;
173
173
  * Advanced decompression functions
174
174
  ******************************************/
175
175
  size_t HUF_decompress4X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
176
+ #ifndef HUF_FORCE_DECOMPRESS_X1
176
177
  size_t HUF_decompress4X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
178
+ #endif
177
179
 
178
180
  size_t HUF_decompress4X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< decodes RLE and uncompressed */
179
181
  size_t HUF_decompress4X_hufOnly(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< considers RLE and uncompressed as errors */
180
182
  size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< considers RLE and uncompressed as errors */
181
183
  size_t HUF_decompress4X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
182
184
  size_t HUF_decompress4X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */
185
+ #ifndef HUF_FORCE_DECOMPRESS_X1
183
186
  size_t HUF_decompress4X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
184
187
  size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */
188
+ #endif
185
189
 
186
190
 
187
191
  /* ****************************************
@@ -228,7 +232,7 @@ size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
228
232
  #define HUF_CTABLE_WORKSPACE_SIZE_U32 (2*HUF_SYMBOLVALUE_MAX +1 +1)
229
233
  #define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
230
234
  size_t HUF_buildCTable_wksp (HUF_CElt* tree,
231
- const U32* count, U32 maxSymbolValue, U32 maxNbBits,
235
+ const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
232
236
  void* workSpace, size_t wkspSize);
233
237
 
234
238
  /*! HUF_readStats() :
@@ -277,14 +281,22 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
277
281
  #define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10)
278
282
  #define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
279
283
 
284
+ #ifndef HUF_FORCE_DECOMPRESS_X2
280
285
  size_t HUF_readDTableX1 (HUF_DTable* DTable, const void* src, size_t srcSize);
281
286
  size_t HUF_readDTableX1_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
287
+ #endif
288
+ #ifndef HUF_FORCE_DECOMPRESS_X1
282
289
  size_t HUF_readDTableX2 (HUF_DTable* DTable, const void* src, size_t srcSize);
283
290
  size_t HUF_readDTableX2_wksp (HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize);
291
+ #endif
284
292
 
285
293
  size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
294
+ #ifndef HUF_FORCE_DECOMPRESS_X2
286
295
  size_t HUF_decompress4X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
296
+ #endif
297
+ #ifndef HUF_FORCE_DECOMPRESS_X1
287
298
  size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
299
+ #endif
288
300
 
289
301
 
290
302
  /* ====================== */
@@ -306,24 +318,36 @@ size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
306
318
  HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
307
319
 
308
320
  size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
321
+ #ifndef HUF_FORCE_DECOMPRESS_X1
309
322
  size_t HUF_decompress1X2 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* double-symbol decoder */
323
+ #endif
310
324
 
311
325
  size_t HUF_decompress1X_DCtx (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize);
312
326
  size_t HUF_decompress1X_DCtx_wksp (HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize);
327
+ #ifndef HUF_FORCE_DECOMPRESS_X2
313
328
  size_t HUF_decompress1X1_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< single-symbol decoder */
314
329
  size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< single-symbol decoder */
330
+ #endif
331
+ #ifndef HUF_FORCE_DECOMPRESS_X1
315
332
  size_t HUF_decompress1X2_DCtx(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /**< double-symbols decoder */
316
333
  size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize); /**< double-symbols decoder */
334
+ #endif
317
335
 
318
336
  size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable); /**< automatic selection of sing or double symbol decoder, based on DTable */
337
+ #ifndef HUF_FORCE_DECOMPRESS_X2
319
338
  size_t HUF_decompress1X1_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
339
+ #endif
340
+ #ifndef HUF_FORCE_DECOMPRESS_X1
320
341
  size_t HUF_decompress1X2_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable);
342
+ #endif
321
343
 
322
344
  /* BMI2 variants.
323
345
  * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
324
346
  */
325
347
  size_t HUF_decompress1X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
348
+ #ifndef HUF_FORCE_DECOMPRESS_X2
326
349
  size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
350
+ #endif
327
351
  size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
328
352
  size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
329
353
 
@@ -88,8 +88,8 @@ static void* POOL_thread(void* opaque) {
88
88
  ctx->numThreadsBusy++;
89
89
  ctx->queueEmpty = ctx->queueHead == ctx->queueTail;
90
90
  /* Unlock the mutex, signal a pusher, and run the job */
91
- ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
92
91
  ZSTD_pthread_cond_signal(&ctx->queuePushCond);
92
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
93
93
 
94
94
  job.function(job.opaque);
95
95
 
@@ -30,8 +30,10 @@ const char* ZSTD_versionString(void) { return ZSTD_VERSION_STRING; }
30
30
  /*-****************************************
31
31
  * ZSTD Error Management
32
32
  ******************************************/
33
+ #undef ZSTD_isError /* defined within zstd_internal.h */
33
34
  /*! ZSTD_isError() :
34
- * tells if a return value is an error code */
35
+ * tells if a return value is an error code
36
+ * symbol is required for external callers */
35
37
  unsigned ZSTD_isError(size_t code) { return ERR_isError(code); }
36
38
 
37
39
  /*! ZSTD_getErrorName() :
@@ -72,6 +72,7 @@ typedef enum {
72
72
  ZSTD_error_workSpace_tooSmall= 66,
73
73
  ZSTD_error_dstSize_tooSmall = 70,
74
74
  ZSTD_error_srcSize_wrong = 72,
75
+ ZSTD_error_dstBuffer_null = 74,
75
76
  /* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
76
77
  ZSTD_error_frameIndex_tooLarge = 100,
77
78
  ZSTD_error_seekableIO = 102,
@@ -41,6 +41,9 @@ extern "C" {
41
41
 
42
42
  /* ---- static assert (debug) --- */
43
43
  #define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)
44
+ #define ZSTD_isError ERR_isError /* for inlining */
45
+ #define FSE_isError ERR_isError
46
+ #define HUF_isError ERR_isError
44
47
 
45
48
 
46
49
  /*-*************************************
@@ -75,7 +78,6 @@ static const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };
75
78
  #define BIT0 1
76
79
 
77
80
  #define ZSTD_WINDOWLOG_ABSOLUTEMIN 10
78
- #define ZSTD_WINDOWLOG_DEFAULTMAX 27 /* Default maximum allowed window log */
79
81
  static const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };
80
82
  static const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };
81
83
 
@@ -242,7 +244,7 @@ typedef struct {
242
244
  blockType_e blockType;
243
245
  U32 lastBlock;
244
246
  U32 origSize;
245
- } blockProperties_t;
247
+ } blockProperties_t; /* declared here for decompress and fullbench */
246
248
 
247
249
  /*! ZSTD_getcBlockSize() :
248
250
  * Provides the size of compressed block from block header `src` */
@@ -250,6 +252,13 @@ typedef struct {
250
252
  size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
251
253
  blockProperties_t* bpPtr);
252
254
 
255
+ /*! ZSTD_decodeSeqHeaders() :
256
+ * decode sequence header from src */
257
+ /* Used by: decompress, fullbench (does not get its definition from here) */
258
+ size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
259
+ const void* src, size_t srcSize);
260
+
261
+
253
262
  #if defined (__cplusplus)
254
263
  }
255
264
  #endif
@@ -115,7 +115,7 @@ size_t FSE_buildCTable_wksp(FSE_CTable* ct,
115
115
  /* symbol start positions */
116
116
  { U32 u;
117
117
  cumul[0] = 0;
118
- for (u=1; u<=maxSymbolValue+1; u++) {
118
+ for (u=1; u <= maxSymbolValue+1; u++) {
119
119
  if (normalizedCounter[u-1]==-1) { /* Low proba symbol */
120
120
  cumul[u] = cumul[u-1] + 1;
121
121
  tableSymbol[highThreshold--] = (FSE_FUNCTION_TYPE)(u-1);
@@ -658,7 +658,7 @@ size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t src
658
658
  BYTE* op = ostart;
659
659
  BYTE* const oend = ostart + dstSize;
660
660
 
661
- U32 count[FSE_MAX_SYMBOL_VALUE+1];
661
+ unsigned count[FSE_MAX_SYMBOL_VALUE+1];
662
662
  S16 norm[FSE_MAX_SYMBOL_VALUE+1];
663
663
  FSE_CTable* CTable = (FSE_CTable*)workSpace;
664
664
  size_t const CTableSize = FSE_CTABLE_SIZE_U32(tableLog, maxSymbolValue);
@@ -672,7 +672,7 @@ size_t FSE_compress_wksp (void* dst, size_t dstSize, const void* src, size_t src
672
672
  if (!tableLog) tableLog = FSE_DEFAULT_TABLELOG;
673
673
 
674
674
  /* Scan input and build symbol stats */
675
- { CHECK_V_F(maxCount, HIST_count_wksp(count, &maxSymbolValue, src, srcSize, (unsigned*)scratchBuffer) );
675
+ { CHECK_V_F(maxCount, HIST_count_wksp(count, &maxSymbolValue, src, srcSize, scratchBuffer, scratchBufferSize) );
676
676
  if (maxCount == srcSize) return 1; /* only a single symbol in src : rle */
677
677
  if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */
678
678
  if (maxCount < (srcSize >> 7)) return 0; /* Heuristic : not compressible enough */
@@ -73,6 +73,7 @@ unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
73
73
  return largestCount;
74
74
  }
75
75
 
76
+ typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;
76
77
 
77
78
  /* HIST_count_parallel_wksp() :
78
79
  * store histogram into 4 intermediate tables, recombined at the end.
@@ -85,8 +86,8 @@ unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
85
86
  static size_t HIST_count_parallel_wksp(
86
87
  unsigned* count, unsigned* maxSymbolValuePtr,
87
88
  const void* source, size_t sourceSize,
88
- unsigned checkMax,
89
- unsigned* const workSpace)
89
+ HIST_checkInput_e check,
90
+ U32* const workSpace)
90
91
  {
91
92
  const BYTE* ip = (const BYTE*)source;
92
93
  const BYTE* const iend = ip+sourceSize;
@@ -137,7 +138,7 @@ static size_t HIST_count_parallel_wksp(
137
138
  /* finish last symbols */
138
139
  while (ip<iend) Counting1[*ip++]++;
139
140
 
140
- if (checkMax) { /* verify stats will fit into destination table */
141
+ if (check) { /* verify stats will fit into destination table */
141
142
  U32 s; for (s=255; s>maxSymbolValue; s--) {
142
143
  Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
143
144
  if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall);
@@ -157,14 +158,18 @@ static size_t HIST_count_parallel_wksp(
157
158
 
158
159
  /* HIST_countFast_wksp() :
159
160
  * Same as HIST_countFast(), but using an externally provided scratch buffer.
160
- * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
161
+ * `workSpace` is a writable buffer which must be 4-bytes aligned,
162
+ * `workSpaceSize` must be >= HIST_WKSP_SIZE
163
+ */
161
164
  size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
162
165
  const void* source, size_t sourceSize,
163
- unsigned* workSpace)
166
+ void* workSpace, size_t workSpaceSize)
164
167
  {
165
168
  if (sourceSize < 1500) /* heuristic threshold */
166
169
  return HIST_count_simple(count, maxSymbolValuePtr, source, sourceSize);
167
- return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 0, workSpace);
170
+ if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
171
+ if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
172
+ return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);
168
173
  }
169
174
 
170
175
  /* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
@@ -172,24 +177,27 @@ size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
172
177
  const void* source, size_t sourceSize)
173
178
  {
174
179
  unsigned tmpCounters[HIST_WKSP_SIZE_U32];
175
- return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters);
180
+ return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(tmpCounters));
176
181
  }
177
182
 
178
183
  /* HIST_count_wksp() :
179
184
  * Same as HIST_count(), but using an externally provided scratch buffer.
180
185
  * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
181
186
  size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
182
- const void* source, size_t sourceSize, unsigned* workSpace)
187
+ const void* source, size_t sourceSize,
188
+ void* workSpace, size_t workSpaceSize)
183
189
  {
190
+ if ((size_t)workSpace & 3) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
191
+ if (workSpaceSize < HIST_WKSP_SIZE) return ERROR(workSpace_tooSmall);
184
192
  if (*maxSymbolValuePtr < 255)
185
- return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, 1, workSpace);
193
+ return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, checkMaxSymbolValue, (U32*)workSpace);
186
194
  *maxSymbolValuePtr = 255;
187
- return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace);
195
+ return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
188
196
  }
189
197
 
190
198
  size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
191
199
  const void* src, size_t srcSize)
192
200
  {
193
201
  unsigned tmpCounters[HIST_WKSP_SIZE_U32];
194
- return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters);
202
+ return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters, sizeof(tmpCounters));
195
203
  }
@@ -41,11 +41,11 @@
41
41
 
42
42
  /*! HIST_count():
43
43
  * Provides the precise count of each byte within a table 'count'.
44
- * 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
44
+ * 'count' is a table of unsigned int, of minimum size (*maxSymbolValuePtr+1).
45
45
  * Updates *maxSymbolValuePtr with actual largest symbol value detected.
46
- * @return : count of the most frequent symbol (which isn't identified).
47
- * or an error code, which can be tested using HIST_isError().
48
- * note : if return == srcSize, there is only one symbol.
46
+ * @return : count of the most frequent symbol (which isn't identified).
47
+ * or an error code, which can be tested using HIST_isError().
48
+ * note : if return == srcSize, there is only one symbol.
49
49
  */
50
50
  size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
51
51
  const void* src, size_t srcSize);
@@ -56,14 +56,16 @@ unsigned HIST_isError(size_t code); /**< tells if a return value is an error co
56
56
  /* --- advanced histogram functions --- */
57
57
 
58
58
  #define HIST_WKSP_SIZE_U32 1024
59
+ #define HIST_WKSP_SIZE (HIST_WKSP_SIZE_U32 * sizeof(unsigned))
59
60
  /** HIST_count_wksp() :
60
61
  * Same as HIST_count(), but using an externally provided scratch buffer.
61
62
  * Benefit is this function will use very little stack space.
62
- * `workSpace` must be a table of unsigned of size >= HIST_WKSP_SIZE_U32
63
+ * `workSpace` is a writable buffer which must be 4-bytes aligned,
64
+ * `workSpaceSize` must be >= HIST_WKSP_SIZE
63
65
  */
64
66
  size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
65
67
  const void* src, size_t srcSize,
66
- unsigned* workSpace);
68
+ void* workSpace, size_t workSpaceSize);
67
69
 
68
70
  /** HIST_countFast() :
69
71
  * same as HIST_count(), but blindly trusts that all byte values within src are <= *maxSymbolValuePtr.
@@ -74,11 +76,12 @@ size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
74
76
 
75
77
  /** HIST_countFast_wksp() :
76
78
  * Same as HIST_countFast(), but using an externally provided scratch buffer.
77
- * `workSpace` must be a table of unsigned of size >= HIST_WKSP_SIZE_U32
79
+ * `workSpace` is a writable buffer which must be 4-bytes aligned,
80
+ * `workSpaceSize` must be >= HIST_WKSP_SIZE
78
81
  */
79
82
  size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
80
83
  const void* src, size_t srcSize,
81
- unsigned* workSpace);
84
+ void* workSpace, size_t workSpaceSize);
82
85
 
83
86
  /*! HIST_count_simple() :
84
87
  * Same as HIST_countFast(), this function is unsafe,
@@ -88,13 +88,13 @@ static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weight
88
88
  BYTE* op = ostart;
89
89
  BYTE* const oend = ostart + dstSize;
90
90
 
91
- U32 maxSymbolValue = HUF_TABLELOG_MAX;
91
+ unsigned maxSymbolValue = HUF_TABLELOG_MAX;
92
92
  U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
93
93
 
94
94
  FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
95
95
  BYTE scratchBuffer[1<<MAX_FSE_TABLELOG_FOR_HUFF_HEADER];
96
96
 
97
- U32 count[HUF_TABLELOG_MAX+1];
97
+ unsigned count[HUF_TABLELOG_MAX+1];
98
98
  S16 norm[HUF_TABLELOG_MAX+1];
99
99
 
100
100
  /* init conditions */
@@ -134,7 +134,7 @@ struct HUF_CElt_s {
134
134
  `CTable` : Huffman tree to save, using huf representation.
135
135
  @return : size of saved CTable */
136
136
  size_t HUF_writeCTable (void* dst, size_t maxDstSize,
137
- const HUF_CElt* CTable, U32 maxSymbolValue, U32 huffLog)
137
+ const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog)
138
138
  {
139
139
  BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */
140
140
  BYTE huffWeight[HUF_SYMBOLVALUE_MAX];
@@ -169,7 +169,7 @@ size_t HUF_writeCTable (void* dst, size_t maxDstSize,
169
169
  }
170
170
 
171
171
 
172
- size_t HUF_readCTable (HUF_CElt* CTable, U32* maxSymbolValuePtr, const void* src, size_t srcSize)
172
+ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize)
173
173
  {
174
174
  BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */
175
175
  U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */
@@ -315,7 +315,7 @@ typedef struct {
315
315
  U32 current;
316
316
  } rankPos;
317
317
 
318
- static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue)
318
+ static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue)
319
319
  {
320
320
  rankPos rank[32];
321
321
  U32 n;
@@ -347,7 +347,7 @@ static void HUF_sort(nodeElt* huffNode, const U32* count, U32 maxSymbolValue)
347
347
  */
348
348
  #define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
349
349
  typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
350
- size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
350
+ size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
351
351
  {
352
352
  nodeElt* const huffNode0 = (nodeElt*)workSpace;
353
353
  nodeElt* const huffNode = huffNode0+1;
@@ -421,7 +421,7 @@ size_t HUF_buildCTable_wksp (HUF_CElt* tree, const U32* count, U32 maxSymbolValu
421
421
  * @return : maxNbBits
422
422
  * Note : count is used before tree is written, so they can safely overlap
423
423
  */
424
- size_t HUF_buildCTable (HUF_CElt* tree, const U32* count, U32 maxSymbolValue, U32 maxNbBits)
424
+ size_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits)
425
425
  {
426
426
  huffNodeTable nodeTable;
427
427
  return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, nodeTable, sizeof(nodeTable));
@@ -610,13 +610,14 @@ size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, si
610
610
  return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, /* bmi2 */ 0);
611
611
  }
612
612
 
613
+ typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e;
613
614
 
614
615
  static size_t HUF_compressCTable_internal(
615
616
  BYTE* const ostart, BYTE* op, BYTE* const oend,
616
617
  const void* src, size_t srcSize,
617
- unsigned singleStream, const HUF_CElt* CTable, const int bmi2)
618
+ HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int bmi2)
618
619
  {
619
- size_t const cSize = singleStream ?
620
+ size_t const cSize = (nbStreams==HUF_singleStream) ?
620
621
  HUF_compress1X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2) :
621
622
  HUF_compress4X_usingCTable_internal(op, oend - op, src, srcSize, CTable, bmi2);
622
623
  if (HUF_isError(cSize)) { return cSize; }
@@ -628,21 +629,21 @@ static size_t HUF_compressCTable_internal(
628
629
  }
629
630
 
630
631
  typedef struct {
631
- U32 count[HUF_SYMBOLVALUE_MAX + 1];
632
+ unsigned count[HUF_SYMBOLVALUE_MAX + 1];
632
633
  HUF_CElt CTable[HUF_SYMBOLVALUE_MAX + 1];
633
634
  huffNodeTable nodeTable;
634
635
  } HUF_compress_tables_t;
635
636
 
636
637
  /* HUF_compress_internal() :
637
638
  * `workSpace` must a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
638
- static size_t HUF_compress_internal (
639
- void* dst, size_t dstSize,
640
- const void* src, size_t srcSize,
641
- unsigned maxSymbolValue, unsigned huffLog,
642
- unsigned singleStream,
643
- void* workSpace, size_t wkspSize,
644
- HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
645
- const int bmi2)
639
+ static size_t
640
+ HUF_compress_internal (void* dst, size_t dstSize,
641
+ const void* src, size_t srcSize,
642
+ unsigned maxSymbolValue, unsigned huffLog,
643
+ HUF_nbStreams_e nbStreams,
644
+ void* workSpace, size_t wkspSize,
645
+ HUF_CElt* oldHufTable, HUF_repeat* repeat, int preferRepeat,
646
+ const int bmi2)
646
647
  {
647
648
  HUF_compress_tables_t* const table = (HUF_compress_tables_t*)workSpace;
648
649
  BYTE* const ostart = (BYTE*)dst;
@@ -651,7 +652,7 @@ static size_t HUF_compress_internal (
651
652
 
652
653
  /* checks & inits */
653
654
  if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
654
- if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall);
655
+ if (wkspSize < HUF_WORKSPACE_SIZE) return ERROR(workSpace_tooSmall);
655
656
  if (!srcSize) return 0; /* Uncompressed */
656
657
  if (!dstSize) return 0; /* cannot fit anything within dst budget */
657
658
  if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */
@@ -664,11 +665,11 @@ static size_t HUF_compress_internal (
664
665
  if (preferRepeat && repeat && *repeat == HUF_repeat_valid) {
665
666
  return HUF_compressCTable_internal(ostart, op, oend,
666
667
  src, srcSize,
667
- singleStream, oldHufTable, bmi2);
668
+ nbStreams, oldHufTable, bmi2);
668
669
  }
669
670
 
670
671
  /* Scan input and build symbol stats */
671
- { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->count) );
672
+ { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, workSpace, wkspSize) );
672
673
  if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */
673
674
  if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */
674
675
  }
@@ -683,14 +684,15 @@ static size_t HUF_compress_internal (
683
684
  if (preferRepeat && repeat && *repeat != HUF_repeat_none) {
684
685
  return HUF_compressCTable_internal(ostart, op, oend,
685
686
  src, srcSize,
686
- singleStream, oldHufTable, bmi2);
687
+ nbStreams, oldHufTable, bmi2);
687
688
  }
688
689
 
689
690
  /* Build Huffman Tree */
690
691
  huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue);
691
- { CHECK_V_F(maxBits, HUF_buildCTable_wksp(table->CTable, table->count,
692
- maxSymbolValue, huffLog,
693
- table->nodeTable, sizeof(table->nodeTable)) );
692
+ { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count,
693
+ maxSymbolValue, huffLog,
694
+ table->nodeTable, sizeof(table->nodeTable));
695
+ CHECK_F(maxBits);
694
696
  huffLog = (U32)maxBits;
695
697
  /* Zero unused symbols in CTable, so we can check it for validity */
696
698
  memset(table->CTable + (maxSymbolValue + 1), 0,
@@ -706,7 +708,7 @@ static size_t HUF_compress_internal (
706
708
  if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) {
707
709
  return HUF_compressCTable_internal(ostart, op, oend,
708
710
  src, srcSize,
709
- singleStream, oldHufTable, bmi2);
711
+ nbStreams, oldHufTable, bmi2);
710
712
  } }
711
713
 
712
714
  /* Use the new huffman table */
@@ -718,7 +720,7 @@ static size_t HUF_compress_internal (
718
720
  }
719
721
  return HUF_compressCTable_internal(ostart, op, oend,
720
722
  src, srcSize,
721
- singleStream, table->CTable, bmi2);
723
+ nbStreams, table->CTable, bmi2);
722
724
  }
723
725
 
724
726
 
@@ -728,7 +730,7 @@ size_t HUF_compress1X_wksp (void* dst, size_t dstSize,
728
730
  void* workSpace, size_t wkspSize)
729
731
  {
730
732
  return HUF_compress_internal(dst, dstSize, src, srcSize,
731
- maxSymbolValue, huffLog, 1 /*single stream*/,
733
+ maxSymbolValue, huffLog, HUF_singleStream,
732
734
  workSpace, wkspSize,
733
735
  NULL, NULL, 0, 0 /*bmi2*/);
734
736
  }
@@ -740,7 +742,7 @@ size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
740
742
  HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
741
743
  {
742
744
  return HUF_compress_internal(dst, dstSize, src, srcSize,
743
- maxSymbolValue, huffLog, 1 /*single stream*/,
745
+ maxSymbolValue, huffLog, HUF_singleStream,
744
746
  workSpace, wkspSize, hufTable,
745
747
  repeat, preferRepeat, bmi2);
746
748
  }
@@ -762,7 +764,7 @@ size_t HUF_compress4X_wksp (void* dst, size_t dstSize,
762
764
  void* workSpace, size_t wkspSize)
763
765
  {
764
766
  return HUF_compress_internal(dst, dstSize, src, srcSize,
765
- maxSymbolValue, huffLog, 0 /*4 streams*/,
767
+ maxSymbolValue, huffLog, HUF_fourStreams,
766
768
  workSpace, wkspSize,
767
769
  NULL, NULL, 0, 0 /*bmi2*/);
768
770
  }
@@ -777,7 +779,7 @@ size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
777
779
  HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2)
778
780
  {
779
781
  return HUF_compress_internal(dst, dstSize, src, srcSize,
780
- maxSymbolValue, huffLog, 0 /* 4 streams */,
782
+ maxSymbolValue, huffLog, HUF_fourStreams,
781
783
  workSpace, wkspSize,
782
784
  hufTable, repeat, preferRepeat, bmi2);
783
785
  }