zstd-ruby 1.4.5.0 → 1.5.1.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (101) hide show
  1. checksums.yaml +4 -4
  2. data/.github/dependabot.yml +8 -0
  3. data/.github/workflows/ruby.yml +35 -0
  4. data/README.md +2 -2
  5. data/ext/zstdruby/extconf.rb +2 -1
  6. data/ext/zstdruby/libzstd/BUCK +5 -7
  7. data/ext/zstdruby/libzstd/Makefile +225 -222
  8. data/ext/zstdruby/libzstd/README.md +43 -5
  9. data/ext/zstdruby/libzstd/common/bitstream.h +46 -22
  10. data/ext/zstdruby/libzstd/common/compiler.h +182 -22
  11. data/ext/zstdruby/libzstd/common/cpu.h +1 -3
  12. data/ext/zstdruby/libzstd/common/debug.c +1 -1
  13. data/ext/zstdruby/libzstd/common/debug.h +12 -19
  14. data/ext/zstdruby/libzstd/common/entropy_common.c +196 -44
  15. data/ext/zstdruby/libzstd/common/error_private.c +2 -1
  16. data/ext/zstdruby/libzstd/common/error_private.h +82 -3
  17. data/ext/zstdruby/libzstd/common/fse.h +41 -12
  18. data/ext/zstdruby/libzstd/common/fse_decompress.c +139 -22
  19. data/ext/zstdruby/libzstd/common/huf.h +47 -23
  20. data/ext/zstdruby/libzstd/common/mem.h +87 -98
  21. data/ext/zstdruby/libzstd/common/pool.c +23 -17
  22. data/ext/zstdruby/libzstd/common/pool.h +2 -2
  23. data/ext/zstdruby/libzstd/common/portability_macros.h +131 -0
  24. data/ext/zstdruby/libzstd/common/threading.c +6 -5
  25. data/ext/zstdruby/libzstd/common/xxhash.c +6 -846
  26. data/ext/zstdruby/libzstd/common/xxhash.h +5568 -167
  27. data/ext/zstdruby/libzstd/common/zstd_common.c +10 -10
  28. data/ext/zstdruby/libzstd/common/zstd_deps.h +111 -0
  29. data/ext/zstdruby/libzstd/common/zstd_internal.h +189 -142
  30. data/ext/zstdruby/libzstd/common/zstd_trace.h +163 -0
  31. data/ext/zstdruby/libzstd/compress/clevels.h +134 -0
  32. data/ext/zstdruby/libzstd/compress/fse_compress.c +89 -46
  33. data/ext/zstdruby/libzstd/compress/hist.c +27 -29
  34. data/ext/zstdruby/libzstd/compress/hist.h +2 -2
  35. data/ext/zstdruby/libzstd/compress/huf_compress.c +770 -198
  36. data/ext/zstdruby/libzstd/compress/zstd_compress.c +2894 -863
  37. data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +390 -90
  38. data/ext/zstdruby/libzstd/compress/zstd_compress_literals.c +12 -11
  39. data/ext/zstdruby/libzstd/compress/zstd_compress_literals.h +4 -2
  40. data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.c +31 -8
  41. data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.h +1 -1
  42. data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +25 -297
  43. data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.h +1 -1
  44. data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +206 -69
  45. data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +307 -132
  46. data/ext/zstdruby/libzstd/compress/zstd_double_fast.h +1 -1
  47. data/ext/zstdruby/libzstd/compress/zstd_fast.c +322 -143
  48. data/ext/zstdruby/libzstd/compress/zstd_fast.h +1 -1
  49. data/ext/zstdruby/libzstd/compress/zstd_lazy.c +1136 -174
  50. data/ext/zstdruby/libzstd/compress/zstd_lazy.h +59 -1
  51. data/ext/zstdruby/libzstd/compress/zstd_ldm.c +316 -213
  52. data/ext/zstdruby/libzstd/compress/zstd_ldm.h +9 -2
  53. data/ext/zstdruby/libzstd/compress/zstd_ldm_geartab.h +106 -0
  54. data/ext/zstdruby/libzstd/compress/zstd_opt.c +373 -150
  55. data/ext/zstdruby/libzstd/compress/zstd_opt.h +1 -1
  56. data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +152 -444
  57. data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +31 -113
  58. data/ext/zstdruby/libzstd/decompress/huf_decompress.c +1044 -403
  59. data/ext/zstdruby/libzstd/decompress/huf_decompress_amd64.S +571 -0
  60. data/ext/zstdruby/libzstd/decompress/zstd_ddict.c +9 -9
  61. data/ext/zstdruby/libzstd/decompress/zstd_ddict.h +2 -2
  62. data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +450 -105
  63. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +913 -273
  64. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +14 -5
  65. data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +59 -12
  66. data/ext/zstdruby/libzstd/deprecated/zbuff.h +1 -1
  67. data/ext/zstdruby/libzstd/deprecated/zbuff_common.c +1 -1
  68. data/ext/zstdruby/libzstd/deprecated/zbuff_compress.c +24 -4
  69. data/ext/zstdruby/libzstd/deprecated/zbuff_decompress.c +1 -1
  70. data/ext/zstdruby/libzstd/dictBuilder/cover.c +55 -38
  71. data/ext/zstdruby/libzstd/dictBuilder/cover.h +7 -6
  72. data/ext/zstdruby/libzstd/dictBuilder/divsufsort.c +1 -1
  73. data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +43 -34
  74. data/ext/zstdruby/libzstd/dictBuilder/zdict.c +128 -58
  75. data/ext/zstdruby/libzstd/dll/example/Makefile +1 -1
  76. data/ext/zstdruby/libzstd/dll/example/README.md +16 -22
  77. data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +1 -1
  78. data/ext/zstdruby/libzstd/legacy/zstd_v01.c +8 -8
  79. data/ext/zstdruby/libzstd/legacy/zstd_v01.h +1 -1
  80. data/ext/zstdruby/libzstd/legacy/zstd_v02.c +9 -9
  81. data/ext/zstdruby/libzstd/legacy/zstd_v02.h +1 -1
  82. data/ext/zstdruby/libzstd/legacy/zstd_v03.c +9 -9
  83. data/ext/zstdruby/libzstd/legacy/zstd_v03.h +1 -1
  84. data/ext/zstdruby/libzstd/legacy/zstd_v04.c +10 -10
  85. data/ext/zstdruby/libzstd/legacy/zstd_v04.h +1 -1
  86. data/ext/zstdruby/libzstd/legacy/zstd_v05.c +13 -13
  87. data/ext/zstdruby/libzstd/legacy/zstd_v05.h +1 -1
  88. data/ext/zstdruby/libzstd/legacy/zstd_v06.c +13 -13
  89. data/ext/zstdruby/libzstd/legacy/zstd_v06.h +1 -1
  90. data/ext/zstdruby/libzstd/legacy/zstd_v07.c +13 -13
  91. data/ext/zstdruby/libzstd/legacy/zstd_v07.h +1 -1
  92. data/ext/zstdruby/libzstd/libzstd.mk +185 -0
  93. data/ext/zstdruby/libzstd/libzstd.pc.in +4 -3
  94. data/ext/zstdruby/libzstd/modulemap/module.modulemap +4 -0
  95. data/ext/zstdruby/libzstd/{dictBuilder/zdict.h → zdict.h} +154 -7
  96. data/ext/zstdruby/libzstd/zstd.h +699 -214
  97. data/ext/zstdruby/libzstd/{common/zstd_errors.h → zstd_errors.h} +2 -1
  98. data/ext/zstdruby/zstdruby.c +2 -2
  99. data/lib/zstd-ruby/version.rb +1 -1
  100. metadata +15 -6
  101. data/.travis.yml +0 -14
@@ -1,6 +1,6 @@
1
1
  /* ******************************************************************
2
2
  * FSE : Finite State Entropy decoder
3
- * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
3
+ * Copyright (c) Yann Collet, Facebook, Inc.
4
4
  *
5
5
  * You can contact the author at :
6
6
  * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -16,13 +16,14 @@
16
16
  /* **************************************************************
17
17
  * Includes
18
18
  ****************************************************************/
19
- #include <stdlib.h> /* malloc, free, qsort */
20
- #include <string.h> /* memcpy, memset */
19
+ #include "debug.h" /* assert */
21
20
  #include "bitstream.h"
22
21
  #include "compiler.h"
23
22
  #define FSE_STATIC_LINKING_ONLY
24
23
  #include "fse.h"
25
24
  #include "error_private.h"
25
+ #define ZSTD_DEPS_NEED_MALLOC
26
+ #include "zstd_deps.h"
26
27
 
27
28
 
28
29
  /* **************************************************************
@@ -59,25 +60,27 @@
59
60
  FSE_DTable* FSE_createDTable (unsigned tableLog)
60
61
  {
61
62
  if (tableLog > FSE_TABLELOG_ABSOLUTE_MAX) tableLog = FSE_TABLELOG_ABSOLUTE_MAX;
62
- return (FSE_DTable*)malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
63
+ return (FSE_DTable*)ZSTD_malloc( FSE_DTABLE_SIZE_U32(tableLog) * sizeof (U32) );
63
64
  }
64
65
 
65
66
  void FSE_freeDTable (FSE_DTable* dt)
66
67
  {
67
- free(dt);
68
+ ZSTD_free(dt);
68
69
  }
69
70
 
70
- size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog)
71
+ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
71
72
  {
72
73
  void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
73
74
  FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
74
- U16 symbolNext[FSE_MAX_SYMBOL_VALUE+1];
75
+ U16* symbolNext = (U16*)workSpace;
76
+ BYTE* spread = (BYTE*)(symbolNext + maxSymbolValue + 1);
75
77
 
76
78
  U32 const maxSV1 = maxSymbolValue + 1;
77
79
  U32 const tableSize = 1 << tableLog;
78
80
  U32 highThreshold = tableSize-1;
79
81
 
80
82
  /* Sanity Checks */
83
+ if (FSE_BUILD_DTABLE_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(maxSymbolValue_tooLarge);
81
84
  if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
82
85
  if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
83
86
 
@@ -95,11 +98,57 @@ size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned
95
98
  if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
96
99
  symbolNext[s] = normalizedCounter[s];
97
100
  } } }
98
- memcpy(dt, &DTableH, sizeof(DTableH));
101
+ ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
99
102
  }
100
103
 
101
104
  /* Spread symbols */
102
- { U32 const tableMask = tableSize-1;
105
+ if (highThreshold == tableSize - 1) {
106
+ size_t const tableMask = tableSize-1;
107
+ size_t const step = FSE_TABLESTEP(tableSize);
108
+ /* First lay down the symbols in order.
109
+ * We use a uint64_t to lay down 8 bytes at a time. This reduces branch
110
+ * misses since small blocks generally have small table logs, so nearly
111
+ * all symbols have counts <= 8. We ensure we have 8 bytes at the end of
112
+ * our buffer to handle the over-write.
113
+ */
114
+ {
115
+ U64 const add = 0x0101010101010101ull;
116
+ size_t pos = 0;
117
+ U64 sv = 0;
118
+ U32 s;
119
+ for (s=0; s<maxSV1; ++s, sv += add) {
120
+ int i;
121
+ int const n = normalizedCounter[s];
122
+ MEM_write64(spread + pos, sv);
123
+ for (i = 8; i < n; i += 8) {
124
+ MEM_write64(spread + pos + i, sv);
125
+ }
126
+ pos += n;
127
+ }
128
+ }
129
+ /* Now we spread those positions across the table.
130
+ * The benefit of doing it in two stages is that we avoid the the
131
+ * variable size inner loop, which caused lots of branch misses.
132
+ * Now we can run through all the positions without any branch misses.
133
+ * We unroll the loop twice, since that is what emperically worked best.
134
+ */
135
+ {
136
+ size_t position = 0;
137
+ size_t s;
138
+ size_t const unroll = 2;
139
+ assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
140
+ for (s = 0; s < (size_t)tableSize; s += unroll) {
141
+ size_t u;
142
+ for (u = 0; u < unroll; ++u) {
143
+ size_t const uPosition = (position + (u * step)) & tableMask;
144
+ tableDecode[uPosition].symbol = spread[s + u];
145
+ }
146
+ position = (position + (unroll * step)) & tableMask;
147
+ }
148
+ assert(position == 0);
149
+ }
150
+ } else {
151
+ U32 const tableMask = tableSize-1;
103
152
  U32 const step = FSE_TABLESTEP(tableSize);
104
153
  U32 s, position = 0;
105
154
  for (s=0; s<maxSV1; s++) {
@@ -124,6 +173,11 @@ size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned
124
173
  return 0;
125
174
  }
126
175
 
176
+ size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
177
+ {
178
+ return FSE_buildDTable_internal(dt, normalizedCounter, maxSymbolValue, tableLog, workSpace, wkspSize);
179
+ }
180
+
127
181
 
128
182
  #ifndef FSE_COMMONDEFS_ONLY
129
183
 
@@ -251,36 +305,99 @@ size_t FSE_decompress_usingDTable(void* dst, size_t originalSize,
251
305
  }
252
306
 
253
307
 
254
- size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, FSE_DTable* workSpace, unsigned maxLog)
308
+ size_t FSE_decompress_wksp(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
309
+ {
310
+ return FSE_decompress_wksp_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, /* bmi2 */ 0);
311
+ }
312
+
313
+ typedef struct {
314
+ short ncount[FSE_MAX_SYMBOL_VALUE + 1];
315
+ FSE_DTable dtable[1]; /* Dynamically sized */
316
+ } FSE_DecompressWksp;
317
+
318
+
319
+ FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body(
320
+ void* dst, size_t dstCapacity,
321
+ const void* cSrc, size_t cSrcSize,
322
+ unsigned maxLog, void* workSpace, size_t wkspSize,
323
+ int bmi2)
255
324
  {
256
325
  const BYTE* const istart = (const BYTE*)cSrc;
257
326
  const BYTE* ip = istart;
258
- short counting[FSE_MAX_SYMBOL_VALUE+1];
259
327
  unsigned tableLog;
260
328
  unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
329
+ FSE_DecompressWksp* const wksp = (FSE_DecompressWksp*)workSpace;
330
+
331
+ DEBUG_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0);
332
+ if (wkspSize < sizeof(*wksp)) return ERROR(GENERIC);
261
333
 
262
334
  /* normal FSE decoding mode */
263
- size_t const NCountLength = FSE_readNCount (counting, &maxSymbolValue, &tableLog, istart, cSrcSize);
264
- if (FSE_isError(NCountLength)) return NCountLength;
265
- /* if (NCountLength >= cSrcSize) return ERROR(srcSize_wrong); */ /* too small input size; supposed to be already checked in NCountLength, only remaining case : NCountLength==cSrcSize */
266
- if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
267
- ip += NCountLength;
268
- cSrcSize -= NCountLength;
335
+ {
336
+ size_t const NCountLength = FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
337
+ if (FSE_isError(NCountLength)) return NCountLength;
338
+ if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
339
+ assert(NCountLength <= cSrcSize);
340
+ ip += NCountLength;
341
+ cSrcSize -= NCountLength;
342
+ }
343
+
344
+ if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge);
345
+ workSpace = wksp->dtable + FSE_DTABLE_SIZE_U32(tableLog);
346
+ wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
347
+
348
+ CHECK_F( FSE_buildDTable_internal(wksp->dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) );
349
+
350
+ {
351
+ const void* ptr = wksp->dtable;
352
+ const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
353
+ const U32 fastMode = DTableH->fastMode;
269
354
 
270
- CHECK_F( FSE_buildDTable (workSpace, counting, maxSymbolValue, tableLog) );
355
+ /* select fast mode (static) */
356
+ if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 1);
357
+ return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, wksp->dtable, 0);
358
+ }
359
+ }
360
+
361
+ /* Avoids the FORCE_INLINE of the _body() function. */
362
+ static size_t FSE_decompress_wksp_body_default(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
363
+ {
364
+ return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 0);
365
+ }
366
+
367
+ #if DYNAMIC_BMI2
368
+ BMI2_TARGET_ATTRIBUTE static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
369
+ {
370
+ return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1);
371
+ }
372
+ #endif
271
373
 
272
- return FSE_decompress_usingDTable (dst, dstCapacity, ip, cSrcSize, workSpace); /* always return, even if it is an error code */
374
+ size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2)
375
+ {
376
+ #if DYNAMIC_BMI2
377
+ if (bmi2) {
378
+ return FSE_decompress_wksp_body_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
379
+ }
380
+ #endif
381
+ (void)bmi2;
382
+ return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
273
383
  }
274
384
 
275
385
 
276
386
  typedef FSE_DTable DTable_max_t[FSE_DTABLE_SIZE_U32(FSE_MAX_TABLELOG)];
277
387
 
388
+ #ifndef ZSTD_NO_UNUSED_FUNCTIONS
389
+ size_t FSE_buildDTable(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog) {
390
+ U32 wksp[FSE_BUILD_DTABLE_WKSP_SIZE_U32(FSE_TABLELOG_ABSOLUTE_MAX, FSE_MAX_SYMBOL_VALUE)];
391
+ return FSE_buildDTable_wksp(dt, normalizedCounter, maxSymbolValue, tableLog, wksp, sizeof(wksp));
392
+ }
393
+
278
394
  size_t FSE_decompress(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize)
279
395
  {
280
- DTable_max_t dt; /* Static analyzer seems unable to understand this table will be properly initialized later */
281
- return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, dt, FSE_MAX_TABLELOG);
396
+ /* Static analyzer seems unable to understand this table will be properly initialized later */
397
+ U32 wksp[FSE_DECOMPRESS_WKSP_SIZE_U32(FSE_MAX_TABLELOG, FSE_MAX_SYMBOL_VALUE)];
398
+ return FSE_decompress_wksp(dst, dstCapacity, cSrc, cSrcSize, FSE_MAX_TABLELOG, wksp, sizeof(wksp));
282
399
  }
283
-
400
+ #endif
284
401
 
285
402
 
286
403
  #endif /* FSE_COMMONDEFS_ONLY */
@@ -1,7 +1,7 @@
1
1
  /* ******************************************************************
2
2
  * huff0 huffman codec,
3
3
  * part of Finite State Entropy library
4
- * Copyright (c) 2013-2020, Yann Collet, Facebook, Inc.
4
+ * Copyright (c) Yann Collet, Facebook, Inc.
5
5
  *
6
6
  * You can contact the author at :
7
7
  * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
@@ -20,7 +20,7 @@ extern "C" {
20
20
  #define HUF_H_298734234
21
21
 
22
22
  /* *** Dependencies *** */
23
- #include <stddef.h> /* size_t */
23
+ #include "zstd_deps.h" /* size_t */
24
24
 
25
25
 
26
26
  /* *** library symbols visibility *** */
@@ -89,9 +89,9 @@ HUF_PUBLIC_API size_t HUF_compress2 (void* dst, size_t dstCapacity,
89
89
 
90
90
  /** HUF_compress4X_wksp() :
91
91
  * Same as HUF_compress2(), but uses externally allocated `workSpace`.
92
- * `workspace` must have minimum alignment of 4, and be at least as large as HUF_WORKSPACE_SIZE */
93
- #define HUF_WORKSPACE_SIZE ((6 << 10) + 256)
94
- #define HUF_WORKSPACE_SIZE_U32 (HUF_WORKSPACE_SIZE / sizeof(U32))
92
+ * `workspace` must be at least as large as HUF_WORKSPACE_SIZE */
93
+ #define HUF_WORKSPACE_SIZE ((8 << 10) + 512 /* sorting scratch space */)
94
+ #define HUF_WORKSPACE_SIZE_U64 (HUF_WORKSPACE_SIZE / sizeof(U64))
95
95
  HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
96
96
  const void* src, size_t srcSize,
97
97
  unsigned maxSymbolValue, unsigned tableLog,
@@ -111,14 +111,16 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
111
111
 
112
112
  /* *** Dependencies *** */
113
113
  #include "mem.h" /* U32 */
114
+ #define FSE_STATIC_LINKING_ONLY
115
+ #include "fse.h"
114
116
 
115
117
 
116
118
  /* *** Constants *** */
117
- #define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_ABSOLUTEMAX_TABLELOG */
119
+ #define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */
118
120
  #define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */
119
121
  #define HUF_SYMBOLVALUE_MAX 255
120
122
 
121
- #define HUF_TABLELOG_ABSOLUTEMAX 15 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
123
+ #define HUF_TABLELOG_ABSOLUTEMAX 12 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
122
124
  #if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
123
125
  # error "HUF_TABLELOG_MAX is too large !"
124
126
  #endif
@@ -133,12 +135,12 @@ HUF_PUBLIC_API size_t HUF_compress4X_wksp (void* dst, size_t dstCapacity,
133
135
  #define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
134
136
 
135
137
  /* static allocation of HUF's Compression Table */
136
- #define HUF_CTABLE_SIZE_U32(maxSymbolValue) ((maxSymbolValue)+1) /* Use tables of U32, for proper alignment */
137
- #define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_U32(maxSymbolValue) * sizeof(U32))
138
+ /* this is a private definition, just exposed for allocation and strict aliasing purpose. never EVER access its members directly */
139
+ typedef size_t HUF_CElt; /* consider it an incomplete type */
140
+ #define HUF_CTABLE_SIZE_ST(maxSymbolValue) ((maxSymbolValue)+2) /* Use tables of size_t, for proper alignment */
141
+ #define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_ST(maxSymbolValue) * sizeof(size_t))
138
142
  #define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
139
- U32 name##hb[HUF_CTABLE_SIZE_U32(maxSymbolValue)]; \
140
- void* name##hv = &(name##hb); \
141
- HUF_CElt* name = (HUF_CElt*)(name##hv) /* no final ; */
143
+ HUF_CElt name[HUF_CTABLE_SIZE_ST(maxSymbolValue)] /* no final ; */
142
144
 
143
145
  /* static allocation of HUF's DTable */
144
146
  typedef U32 HUF_DTable;
@@ -184,10 +186,11 @@ size_t HUF_decompress4X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize,
184
186
  * or to save and regenerate 'CTable' using external methods.
185
187
  */
186
188
  unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue);
187
- typedef struct HUF_CElt_s HUF_CElt; /* incomplete type */
188
189
  size_t HUF_buildCTable (HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits); /* @return : maxNbBits; CTable and count can overlap. In which case, CTable will overwrite count content */
189
190
  size_t HUF_writeCTable (void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog);
191
+ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize);
190
192
  size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
193
+ size_t HUF_compress4X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2);
191
194
  size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
192
195
  int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
193
196
 
@@ -200,12 +203,13 @@ typedef enum {
200
203
  * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
201
204
  * If it uses hufTable it does not modify hufTable or repeat.
202
205
  * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
203
- * If preferRepeat then the old table will always be used if valid. */
206
+ * If preferRepeat then the old table will always be used if valid.
207
+ * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
204
208
  size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
205
209
  const void* src, size_t srcSize,
206
210
  unsigned maxSymbolValue, unsigned tableLog,
207
211
  void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
208
- HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
212
+ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible);
209
213
 
210
214
  /** HUF_buildCTable_wksp() :
211
215
  * Same as HUF_buildCTable(), but using externally allocated scratch buffer.
@@ -226,15 +230,27 @@ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
226
230
  U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
227
231
  const void* src, size_t srcSize);
228
232
 
233
+ /*! HUF_readStats_wksp() :
234
+ * Same as HUF_readStats() but takes an external workspace which must be
235
+ * 4-byte aligned and its size must be >= HUF_READ_STATS_WORKSPACE_SIZE.
236
+ * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
237
+ */
238
+ #define HUF_READ_STATS_WORKSPACE_SIZE_U32 FSE_DECOMPRESS_WKSP_SIZE_U32(6, HUF_TABLELOG_MAX-1)
239
+ #define HUF_READ_STATS_WORKSPACE_SIZE (HUF_READ_STATS_WORKSPACE_SIZE_U32 * sizeof(unsigned))
240
+ size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize,
241
+ U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
242
+ const void* src, size_t srcSize,
243
+ void* workspace, size_t wkspSize,
244
+ int bmi2);
245
+
229
246
  /** HUF_readCTable() :
230
247
  * Loading a CTable saved with HUF_writeCTable() */
231
248
  size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights);
232
249
 
233
- /** HUF_getNbBits() :
250
+ /** HUF_getNbBitsFromCTable() :
234
251
  * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX
235
- * Note 1 : is not inlined, as HUF_CElt definition is private
236
- * Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */
237
- U32 HUF_getNbBits(const void* symbolTable, U32 symbolValue);
252
+ * Note 1 : is not inlined, as HUF_CElt definition is private */
253
+ U32 HUF_getNbBitsFromCTable(const HUF_CElt* symbolTable, U32 symbolValue);
238
254
 
239
255
  /*
240
256
  * HUF_decompress() does the following:
@@ -260,7 +276,7 @@ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
260
276
  * a required workspace size greater than that specified in the following
261
277
  * macro.
262
278
  */
263
- #define HUF_DECOMPRESS_WORKSPACE_SIZE (2 << 10)
279
+ #define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9))
264
280
  #define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
265
281
 
266
282
  #ifndef HUF_FORCE_DECOMPRESS_X2
@@ -286,18 +302,20 @@ size_t HUF_decompress4X2_usingDTable(void* dst, size_t maxDstSize, const void* c
286
302
  /* ====================== */
287
303
 
288
304
  size_t HUF_compress1X (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog);
289
- size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U32 unsigned */
305
+ size_t HUF_compress1X_wksp (void* dst, size_t dstSize, const void* src, size_t srcSize, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize); /**< `workSpace` must be a table of at least HUF_WORKSPACE_SIZE_U64 U64 */
290
306
  size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable);
307
+ size_t HUF_compress1X_usingCTable_bmi2(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int bmi2);
291
308
  /** HUF_compress1X_repeat() :
292
309
  * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
293
310
  * If it uses hufTable it does not modify hufTable or repeat.
294
311
  * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
295
- * If preferRepeat then the old table will always be used if valid. */
312
+ * If preferRepeat then the old table will always be used if valid.
313
+ * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
296
314
  size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
297
315
  const void* src, size_t srcSize,
298
316
  unsigned maxSymbolValue, unsigned tableLog,
299
317
  void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
300
- HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2);
318
+ HUF_CElt* hufTable, HUF_repeat* repeat, int preferRepeat, int bmi2, unsigned suspectUncompressible);
301
319
 
302
320
  size_t HUF_decompress1X1 (void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize); /* single-symbol decoder */
303
321
  #ifndef HUF_FORCE_DECOMPRESS_X1
@@ -332,6 +350,12 @@ size_t HUF_decompress1X1_DCtx_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstS
332
350
  #endif
333
351
  size_t HUF_decompress4X_usingDTable_bmi2(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int bmi2);
334
352
  size_t HUF_decompress4X_hufOnly_wksp_bmi2(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int bmi2);
353
+ #ifndef HUF_FORCE_DECOMPRESS_X2
354
+ size_t HUF_readDTableX1_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
355
+ #endif
356
+ #ifndef HUF_FORCE_DECOMPRESS_X1
357
+ size_t HUF_readDTableX2_wksp_bmi2(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int bmi2);
358
+ #endif
335
359
 
336
360
  #endif /* HUF_STATIC_LINKING_ONLY */
337
361