zstdlib 0.13.0-x86-linux

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (129) hide show
  1. checksums.yaml +7 -0
  2. data/.yardopts +6 -0
  3. data/CHANGES.md +107 -0
  4. data/Gemfile +3 -0
  5. data/README.md +107 -0
  6. data/Rakefile +59 -0
  7. data/ext/zstdlib_c/extconf.rb +59 -0
  8. data/ext/zstdlib_c/ruby/zlib-2.2/zstdlib.c +4675 -0
  9. data/ext/zstdlib_c/ruby/zlib-2.3/zstdlib.c +4702 -0
  10. data/ext/zstdlib_c/ruby/zlib-2.4/zstdlib.c +4859 -0
  11. data/ext/zstdlib_c/ruby/zlib-2.5/zstdlib.c +4864 -0
  12. data/ext/zstdlib_c/ruby/zlib-2.6/zstdlib.c +4906 -0
  13. data/ext/zstdlib_c/ruby/zlib-2.7/zstdlib.c +4895 -0
  14. data/ext/zstdlib_c/ruby/zlib-3.0/zstdlib.c +4994 -0
  15. data/ext/zstdlib_c/ruby/zlib-3.1/zstdlib.c +5076 -0
  16. data/ext/zstdlib_c/ruby/zlib-3.2/zstdlib.c +5090 -0
  17. data/ext/zstdlib_c/ruby/zlib-3.3/zstdlib.c +5090 -0
  18. data/ext/zstdlib_c/zlib-1.3.1/adler32.c +164 -0
  19. data/ext/zstdlib_c/zlib-1.3.1/compress.c +75 -0
  20. data/ext/zstdlib_c/zlib-1.3.1/crc32.c +1049 -0
  21. data/ext/zstdlib_c/zlib-1.3.1/crc32.h +9446 -0
  22. data/ext/zstdlib_c/zlib-1.3.1/deflate.c +2139 -0
  23. data/ext/zstdlib_c/zlib-1.3.1/deflate.h +377 -0
  24. data/ext/zstdlib_c/zlib-1.3.1/gzclose.c +23 -0
  25. data/ext/zstdlib_c/zlib-1.3.1/gzguts.h +214 -0
  26. data/ext/zstdlib_c/zlib-1.3.1/gzlib.c +582 -0
  27. data/ext/zstdlib_c/zlib-1.3.1/gzread.c +602 -0
  28. data/ext/zstdlib_c/zlib-1.3.1/gzwrite.c +631 -0
  29. data/ext/zstdlib_c/zlib-1.3.1/infback.c +628 -0
  30. data/ext/zstdlib_c/zlib-1.3.1/inffast.c +320 -0
  31. data/ext/zstdlib_c/zlib-1.3.1/inffast.h +11 -0
  32. data/ext/zstdlib_c/zlib-1.3.1/inffixed.h +94 -0
  33. data/ext/zstdlib_c/zlib-1.3.1/inflate.c +1526 -0
  34. data/ext/zstdlib_c/zlib-1.3.1/inflate.h +126 -0
  35. data/ext/zstdlib_c/zlib-1.3.1/inftrees.c +299 -0
  36. data/ext/zstdlib_c/zlib-1.3.1/inftrees.h +62 -0
  37. data/ext/zstdlib_c/zlib-1.3.1/trees.c +1117 -0
  38. data/ext/zstdlib_c/zlib-1.3.1/trees.h +128 -0
  39. data/ext/zstdlib_c/zlib-1.3.1/uncompr.c +85 -0
  40. data/ext/zstdlib_c/zlib-1.3.1/zconf.h +543 -0
  41. data/ext/zstdlib_c/zlib-1.3.1/zlib.h +1938 -0
  42. data/ext/zstdlib_c/zlib-1.3.1/zutil.c +299 -0
  43. data/ext/zstdlib_c/zlib-1.3.1/zutil.h +254 -0
  44. data/ext/zstdlib_c/zlib.mk +14 -0
  45. data/ext/zstdlib_c/zlibwrapper/zlibwrapper.c +10 -0
  46. data/ext/zstdlib_c/zlibwrapper.mk +14 -0
  47. data/ext/zstdlib_c/zstd-1.5.6/lib/common/allocations.h +55 -0
  48. data/ext/zstdlib_c/zstd-1.5.6/lib/common/bits.h +200 -0
  49. data/ext/zstdlib_c/zstd-1.5.6/lib/common/bitstream.h +457 -0
  50. data/ext/zstdlib_c/zstd-1.5.6/lib/common/compiler.h +450 -0
  51. data/ext/zstdlib_c/zstd-1.5.6/lib/common/cpu.h +249 -0
  52. data/ext/zstdlib_c/zstd-1.5.6/lib/common/debug.c +30 -0
  53. data/ext/zstdlib_c/zstd-1.5.6/lib/common/debug.h +116 -0
  54. data/ext/zstdlib_c/zstd-1.5.6/lib/common/entropy_common.c +340 -0
  55. data/ext/zstdlib_c/zstd-1.5.6/lib/common/error_private.c +63 -0
  56. data/ext/zstdlib_c/zstd-1.5.6/lib/common/error_private.h +168 -0
  57. data/ext/zstdlib_c/zstd-1.5.6/lib/common/fse.h +640 -0
  58. data/ext/zstdlib_c/zstd-1.5.6/lib/common/fse_decompress.c +313 -0
  59. data/ext/zstdlib_c/zstd-1.5.6/lib/common/huf.h +286 -0
  60. data/ext/zstdlib_c/zstd-1.5.6/lib/common/mem.h +426 -0
  61. data/ext/zstdlib_c/zstd-1.5.6/lib/common/pool.c +371 -0
  62. data/ext/zstdlib_c/zstd-1.5.6/lib/common/pool.h +90 -0
  63. data/ext/zstdlib_c/zstd-1.5.6/lib/common/portability_macros.h +158 -0
  64. data/ext/zstdlib_c/zstd-1.5.6/lib/common/threading.c +182 -0
  65. data/ext/zstdlib_c/zstd-1.5.6/lib/common/threading.h +150 -0
  66. data/ext/zstdlib_c/zstd-1.5.6/lib/common/xxhash.c +18 -0
  67. data/ext/zstdlib_c/zstd-1.5.6/lib/common/xxhash.h +7020 -0
  68. data/ext/zstdlib_c/zstd-1.5.6/lib/common/zstd_common.c +48 -0
  69. data/ext/zstdlib_c/zstd-1.5.6/lib/common/zstd_deps.h +111 -0
  70. data/ext/zstdlib_c/zstd-1.5.6/lib/common/zstd_internal.h +392 -0
  71. data/ext/zstdlib_c/zstd-1.5.6/lib/common/zstd_trace.h +163 -0
  72. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/clevels.h +134 -0
  73. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/fse_compress.c +625 -0
  74. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/hist.c +181 -0
  75. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/hist.h +75 -0
  76. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/huf_compress.c +1464 -0
  77. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_compress.c +7153 -0
  78. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_compress_internal.h +1534 -0
  79. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_compress_literals.c +235 -0
  80. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_compress_literals.h +39 -0
  81. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_compress_sequences.c +442 -0
  82. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_compress_sequences.h +54 -0
  83. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_compress_superblock.c +688 -0
  84. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_compress_superblock.h +32 -0
  85. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_cwksp.h +748 -0
  86. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_double_fast.c +770 -0
  87. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_double_fast.h +50 -0
  88. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_fast.c +968 -0
  89. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_fast.h +38 -0
  90. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_lazy.c +2199 -0
  91. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_lazy.h +202 -0
  92. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_ldm.c +730 -0
  93. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_ldm.h +117 -0
  94. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_ldm_geartab.h +106 -0
  95. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_opt.c +1576 -0
  96. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_opt.h +80 -0
  97. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstdmt_compress.c +1882 -0
  98. data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstdmt_compress.h +113 -0
  99. data/ext/zstdlib_c/zstd-1.5.6/lib/decompress/huf_decompress.c +1944 -0
  100. data/ext/zstdlib_c/zstd-1.5.6/lib/decompress/huf_decompress_amd64.S +595 -0
  101. data/ext/zstdlib_c/zstd-1.5.6/lib/decompress/zstd_ddict.c +244 -0
  102. data/ext/zstdlib_c/zstd-1.5.6/lib/decompress/zstd_ddict.h +44 -0
  103. data/ext/zstdlib_c/zstd-1.5.6/lib/decompress/zstd_decompress.c +2407 -0
  104. data/ext/zstdlib_c/zstd-1.5.6/lib/decompress/zstd_decompress_block.c +2215 -0
  105. data/ext/zstdlib_c/zstd-1.5.6/lib/decompress/zstd_decompress_block.h +73 -0
  106. data/ext/zstdlib_c/zstd-1.5.6/lib/decompress/zstd_decompress_internal.h +240 -0
  107. data/ext/zstdlib_c/zstd-1.5.6/lib/zdict.h +474 -0
  108. data/ext/zstdlib_c/zstd-1.5.6/lib/zstd.h +3089 -0
  109. data/ext/zstdlib_c/zstd-1.5.6/lib/zstd_errors.h +114 -0
  110. data/ext/zstdlib_c/zstd-1.5.6/zlibWrapper/gzclose.c +26 -0
  111. data/ext/zstdlib_c/zstd-1.5.6/zlibWrapper/gzcompatibility.h +68 -0
  112. data/ext/zstdlib_c/zstd-1.5.6/zlibWrapper/gzguts.h +229 -0
  113. data/ext/zstdlib_c/zstd-1.5.6/zlibWrapper/gzlib.c +587 -0
  114. data/ext/zstdlib_c/zstd-1.5.6/zlibWrapper/gzread.c +637 -0
  115. data/ext/zstdlib_c/zstd-1.5.6/zlibWrapper/gzwrite.c +631 -0
  116. data/ext/zstdlib_c/zstd-1.5.6/zlibWrapper/zstd_zlibwrapper.c +1200 -0
  117. data/ext/zstdlib_c/zstd-1.5.6/zlibWrapper/zstd_zlibwrapper.h +91 -0
  118. data/ext/zstdlib_c/zstd.mk +15 -0
  119. data/lib/2.4/zstdlib_c.so +0 -0
  120. data/lib/2.5/zstdlib_c.so +0 -0
  121. data/lib/2.6/zstdlib_c.so +0 -0
  122. data/lib/2.7/zstdlib_c.so +0 -0
  123. data/lib/3.0/zstdlib_c.so +0 -0
  124. data/lib/3.1/zstdlib_c.so +0 -0
  125. data/lib/3.2/zstdlib_c.so +0 -0
  126. data/lib/3.3/zstdlib_c.so +0 -0
  127. data/lib/zstdlib.rb +6 -0
  128. data/test/zstdlib_test.rb +21 -0
  129. metadata +243 -0
@@ -0,0 +1,313 @@
1
+ /* ******************************************************************
2
+ * FSE : Finite State Entropy decoder
3
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
4
+ *
5
+ * You can contact the author at :
6
+ * - FSE source repository : https://github.com/Cyan4973/FiniteStateEntropy
7
+ * - Public forum : https://groups.google.com/forum/#!forum/lz4c
8
+ *
9
+ * This source code is licensed under both the BSD-style license (found in the
10
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
11
+ * in the COPYING file in the root directory of this source tree).
12
+ * You may select, at your option, one of the above-listed licenses.
13
+ ****************************************************************** */
14
+
15
+
16
+ /* **************************************************************
17
+ * Includes
18
+ ****************************************************************/
19
+ #include "debug.h" /* assert */
20
+ #include "bitstream.h"
21
+ #include "compiler.h"
22
+ #define FSE_STATIC_LINKING_ONLY
23
+ #include "fse.h"
24
+ #include "error_private.h"
25
+ #include "zstd_deps.h" /* ZSTD_memcpy */
26
+ #include "bits.h" /* ZSTD_highbit32 */
27
+
28
+
29
+ /* **************************************************************
30
+ * Error Management
31
+ ****************************************************************/
32
+ #define FSE_isError ERR_isError
33
+ #define FSE_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */
34
+
35
+
36
+ /* **************************************************************
37
+ * Templates
38
+ ****************************************************************/
39
+ /*
40
+ designed to be included
41
+ for type-specific functions (template emulation in C)
42
+ Objective is to write these functions only once, for improved maintenance
43
+ */
44
+
45
+ /* safety checks */
46
+ #ifndef FSE_FUNCTION_EXTENSION
47
+ # error "FSE_FUNCTION_EXTENSION must be defined"
48
+ #endif
49
+ #ifndef FSE_FUNCTION_TYPE
50
+ # error "FSE_FUNCTION_TYPE must be defined"
51
+ #endif
52
+
53
+ /* Function names */
54
+ #define FSE_CAT(X,Y) X##Y
55
+ #define FSE_FUNCTION_NAME(X,Y) FSE_CAT(X,Y)
56
+ #define FSE_TYPE_NAME(X,Y) FSE_CAT(X,Y)
57
+
58
+ static size_t FSE_buildDTable_internal(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
59
+ {
60
+ void* const tdPtr = dt+1; /* because *dt is unsigned, 32-bits aligned on 32-bits */
61
+ FSE_DECODE_TYPE* const tableDecode = (FSE_DECODE_TYPE*) (tdPtr);
62
+ U16* symbolNext = (U16*)workSpace;
63
+ BYTE* spread = (BYTE*)(symbolNext + maxSymbolValue + 1);
64
+
65
+ U32 const maxSV1 = maxSymbolValue + 1;
66
+ U32 const tableSize = 1 << tableLog;
67
+ U32 highThreshold = tableSize-1;
68
+
69
+ /* Sanity Checks */
70
+ if (FSE_BUILD_DTABLE_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(maxSymbolValue_tooLarge);
71
+ if (maxSymbolValue > FSE_MAX_SYMBOL_VALUE) return ERROR(maxSymbolValue_tooLarge);
72
+ if (tableLog > FSE_MAX_TABLELOG) return ERROR(tableLog_tooLarge);
73
+
74
+ /* Init, lay down lowprob symbols */
75
+ { FSE_DTableHeader DTableH;
76
+ DTableH.tableLog = (U16)tableLog;
77
+ DTableH.fastMode = 1;
78
+ { S16 const largeLimit= (S16)(1 << (tableLog-1));
79
+ U32 s;
80
+ for (s=0; s<maxSV1; s++) {
81
+ if (normalizedCounter[s]==-1) {
82
+ tableDecode[highThreshold--].symbol = (FSE_FUNCTION_TYPE)s;
83
+ symbolNext[s] = 1;
84
+ } else {
85
+ if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
86
+ symbolNext[s] = (U16)normalizedCounter[s];
87
+ } } }
88
+ ZSTD_memcpy(dt, &DTableH, sizeof(DTableH));
89
+ }
90
+
91
+ /* Spread symbols */
92
+ if (highThreshold == tableSize - 1) {
93
+ size_t const tableMask = tableSize-1;
94
+ size_t const step = FSE_TABLESTEP(tableSize);
95
+ /* First lay down the symbols in order.
96
+ * We use a uint64_t to lay down 8 bytes at a time. This reduces branch
97
+ * misses since small blocks generally have small table logs, so nearly
98
+ * all symbols have counts <= 8. We ensure we have 8 bytes at the end of
99
+ * our buffer to handle the over-write.
100
+ */
101
+ { U64 const add = 0x0101010101010101ull;
102
+ size_t pos = 0;
103
+ U64 sv = 0;
104
+ U32 s;
105
+ for (s=0; s<maxSV1; ++s, sv += add) {
106
+ int i;
107
+ int const n = normalizedCounter[s];
108
+ MEM_write64(spread + pos, sv);
109
+ for (i = 8; i < n; i += 8) {
110
+ MEM_write64(spread + pos + i, sv);
111
+ }
112
+ pos += (size_t)n;
113
+ } }
114
+ /* Now we spread those positions across the table.
115
+ * The benefit of doing it in two stages is that we avoid the
116
+ * variable size inner loop, which caused lots of branch misses.
117
+ * Now we can run through all the positions without any branch misses.
118
+ * We unroll the loop twice, since that is what empirically worked best.
119
+ */
120
+ {
121
+ size_t position = 0;
122
+ size_t s;
123
+ size_t const unroll = 2;
124
+ assert(tableSize % unroll == 0); /* FSE_MIN_TABLELOG is 5 */
125
+ for (s = 0; s < (size_t)tableSize; s += unroll) {
126
+ size_t u;
127
+ for (u = 0; u < unroll; ++u) {
128
+ size_t const uPosition = (position + (u * step)) & tableMask;
129
+ tableDecode[uPosition].symbol = spread[s + u];
130
+ }
131
+ position = (position + (unroll * step)) & tableMask;
132
+ }
133
+ assert(position == 0);
134
+ }
135
+ } else {
136
+ U32 const tableMask = tableSize-1;
137
+ U32 const step = FSE_TABLESTEP(tableSize);
138
+ U32 s, position = 0;
139
+ for (s=0; s<maxSV1; s++) {
140
+ int i;
141
+ for (i=0; i<normalizedCounter[s]; i++) {
142
+ tableDecode[position].symbol = (FSE_FUNCTION_TYPE)s;
143
+ position = (position + step) & tableMask;
144
+ while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
145
+ } }
146
+ if (position!=0) return ERROR(GENERIC); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
147
+ }
148
+
149
+ /* Build Decoding table */
150
+ { U32 u;
151
+ for (u=0; u<tableSize; u++) {
152
+ FSE_FUNCTION_TYPE const symbol = (FSE_FUNCTION_TYPE)(tableDecode[u].symbol);
153
+ U32 const nextState = symbolNext[symbol]++;
154
+ tableDecode[u].nbBits = (BYTE) (tableLog - ZSTD_highbit32(nextState) );
155
+ tableDecode[u].newState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
156
+ } }
157
+
158
+ return 0;
159
+ }
160
+
161
+ size_t FSE_buildDTable_wksp(FSE_DTable* dt, const short* normalizedCounter, unsigned maxSymbolValue, unsigned tableLog, void* workSpace, size_t wkspSize)
162
+ {
163
+ return FSE_buildDTable_internal(dt, normalizedCounter, maxSymbolValue, tableLog, workSpace, wkspSize);
164
+ }
165
+
166
+
167
+ #ifndef FSE_COMMONDEFS_ONLY
168
+
169
+ /*-*******************************************************
170
+ * Decompression (Byte symbols)
171
+ *********************************************************/
172
+
173
+ FORCE_INLINE_TEMPLATE size_t FSE_decompress_usingDTable_generic(
174
+ void* dst, size_t maxDstSize,
175
+ const void* cSrc, size_t cSrcSize,
176
+ const FSE_DTable* dt, const unsigned fast)
177
+ {
178
+ BYTE* const ostart = (BYTE*) dst;
179
+ BYTE* op = ostart;
180
+ BYTE* const omax = op + maxDstSize;
181
+ BYTE* const olimit = omax-3;
182
+
183
+ BIT_DStream_t bitD;
184
+ FSE_DState_t state1;
185
+ FSE_DState_t state2;
186
+
187
+ /* Init */
188
+ CHECK_F(BIT_initDStream(&bitD, cSrc, cSrcSize));
189
+
190
+ FSE_initDState(&state1, &bitD, dt);
191
+ FSE_initDState(&state2, &bitD, dt);
192
+
193
+ #define FSE_GETSYMBOL(statePtr) fast ? FSE_decodeSymbolFast(statePtr, &bitD) : FSE_decodeSymbol(statePtr, &bitD)
194
+
195
+ /* 4 symbols per loop */
196
+ for ( ; (BIT_reloadDStream(&bitD)==BIT_DStream_unfinished) & (op<olimit) ; op+=4) {
197
+ op[0] = FSE_GETSYMBOL(&state1);
198
+
199
+ if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
200
+ BIT_reloadDStream(&bitD);
201
+
202
+ op[1] = FSE_GETSYMBOL(&state2);
203
+
204
+ if (FSE_MAX_TABLELOG*4+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
205
+ { if (BIT_reloadDStream(&bitD) > BIT_DStream_unfinished) { op+=2; break; } }
206
+
207
+ op[2] = FSE_GETSYMBOL(&state1);
208
+
209
+ if (FSE_MAX_TABLELOG*2+7 > sizeof(bitD.bitContainer)*8) /* This test must be static */
210
+ BIT_reloadDStream(&bitD);
211
+
212
+ op[3] = FSE_GETSYMBOL(&state2);
213
+ }
214
+
215
+ /* tail */
216
+ /* note : BIT_reloadDStream(&bitD) >= FSE_DStream_partiallyFilled; Ends at exactly BIT_DStream_completed */
217
+ while (1) {
218
+ if (op>(omax-2)) return ERROR(dstSize_tooSmall);
219
+ *op++ = FSE_GETSYMBOL(&state1);
220
+ if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
221
+ *op++ = FSE_GETSYMBOL(&state2);
222
+ break;
223
+ }
224
+
225
+ if (op>(omax-2)) return ERROR(dstSize_tooSmall);
226
+ *op++ = FSE_GETSYMBOL(&state2);
227
+ if (BIT_reloadDStream(&bitD)==BIT_DStream_overflow) {
228
+ *op++ = FSE_GETSYMBOL(&state1);
229
+ break;
230
+ } }
231
+
232
+ assert(op >= ostart);
233
+ return (size_t)(op-ostart);
234
+ }
235
+
236
+ typedef struct {
237
+ short ncount[FSE_MAX_SYMBOL_VALUE + 1];
238
+ } FSE_DecompressWksp;
239
+
240
+
241
+ FORCE_INLINE_TEMPLATE size_t FSE_decompress_wksp_body(
242
+ void* dst, size_t dstCapacity,
243
+ const void* cSrc, size_t cSrcSize,
244
+ unsigned maxLog, void* workSpace, size_t wkspSize,
245
+ int bmi2)
246
+ {
247
+ const BYTE* const istart = (const BYTE*)cSrc;
248
+ const BYTE* ip = istart;
249
+ unsigned tableLog;
250
+ unsigned maxSymbolValue = FSE_MAX_SYMBOL_VALUE;
251
+ FSE_DecompressWksp* const wksp = (FSE_DecompressWksp*)workSpace;
252
+ size_t const dtablePos = sizeof(FSE_DecompressWksp) / sizeof(FSE_DTable);
253
+ FSE_DTable* const dtable = (FSE_DTable*)workSpace + dtablePos;
254
+
255
+ FSE_STATIC_ASSERT((FSE_MAX_SYMBOL_VALUE + 1) % 2 == 0);
256
+ if (wkspSize < sizeof(*wksp)) return ERROR(GENERIC);
257
+
258
+ /* correct offset to dtable depends on this property */
259
+ FSE_STATIC_ASSERT(sizeof(FSE_DecompressWksp) % sizeof(FSE_DTable) == 0);
260
+
261
+ /* normal FSE decoding mode */
262
+ { size_t const NCountLength =
263
+ FSE_readNCount_bmi2(wksp->ncount, &maxSymbolValue, &tableLog, istart, cSrcSize, bmi2);
264
+ if (FSE_isError(NCountLength)) return NCountLength;
265
+ if (tableLog > maxLog) return ERROR(tableLog_tooLarge);
266
+ assert(NCountLength <= cSrcSize);
267
+ ip += NCountLength;
268
+ cSrcSize -= NCountLength;
269
+ }
270
+
271
+ if (FSE_DECOMPRESS_WKSP_SIZE(tableLog, maxSymbolValue) > wkspSize) return ERROR(tableLog_tooLarge);
272
+ assert(sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog) <= wkspSize);
273
+ workSpace = (BYTE*)workSpace + sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
274
+ wkspSize -= sizeof(*wksp) + FSE_DTABLE_SIZE(tableLog);
275
+
276
+ CHECK_F( FSE_buildDTable_internal(dtable, wksp->ncount, maxSymbolValue, tableLog, workSpace, wkspSize) );
277
+
278
+ {
279
+ const void* ptr = dtable;
280
+ const FSE_DTableHeader* DTableH = (const FSE_DTableHeader*)ptr;
281
+ const U32 fastMode = DTableH->fastMode;
282
+
283
+ /* select fast mode (static) */
284
+ if (fastMode) return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 1);
285
+ return FSE_decompress_usingDTable_generic(dst, dstCapacity, ip, cSrcSize, dtable, 0);
286
+ }
287
+ }
288
+
289
+ /* Avoids the FORCE_INLINE of the _body() function. */
290
+ static size_t FSE_decompress_wksp_body_default(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
291
+ {
292
+ return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 0);
293
+ }
294
+
295
+ #if DYNAMIC_BMI2
296
+ BMI2_TARGET_ATTRIBUTE static size_t FSE_decompress_wksp_body_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize)
297
+ {
298
+ return FSE_decompress_wksp_body(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize, 1);
299
+ }
300
+ #endif
301
+
302
+ size_t FSE_decompress_wksp_bmi2(void* dst, size_t dstCapacity, const void* cSrc, size_t cSrcSize, unsigned maxLog, void* workSpace, size_t wkspSize, int bmi2)
303
+ {
304
+ #if DYNAMIC_BMI2
305
+ if (bmi2) {
306
+ return FSE_decompress_wksp_body_bmi2(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
307
+ }
308
+ #endif
309
+ (void)bmi2;
310
+ return FSE_decompress_wksp_body_default(dst, dstCapacity, cSrc, cSrcSize, maxLog, workSpace, wkspSize);
311
+ }
312
+
313
+ #endif /* FSE_COMMONDEFS_ONLY */
@@ -0,0 +1,286 @@
1
+ /* ******************************************************************
2
+ * huff0 huffman codec,
3
+ * part of Finite State Entropy library
4
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
5
+ *
6
+ * You can contact the author at :
7
+ * - Source repository : https://github.com/Cyan4973/FiniteStateEntropy
8
+ *
9
+ * This source code is licensed under both the BSD-style license (found in the
10
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
11
+ * in the COPYING file in the root directory of this source tree).
12
+ * You may select, at your option, one of the above-listed licenses.
13
+ ****************************************************************** */
14
+
15
+ #if defined (__cplusplus)
16
+ extern "C" {
17
+ #endif
18
+
19
+ #ifndef HUF_H_298734234
20
+ #define HUF_H_298734234
21
+
22
+ /* *** Dependencies *** */
23
+ #include "zstd_deps.h" /* size_t */
24
+ #include "mem.h" /* U32 */
25
+ #define FSE_STATIC_LINKING_ONLY
26
+ #include "fse.h"
27
+
28
+
29
+ /* *** Tool functions *** */
30
+ #define HUF_BLOCKSIZE_MAX (128 * 1024) /**< maximum input size for a single block compressed with HUF_compress */
31
+ size_t HUF_compressBound(size_t size); /**< maximum compressed size (worst case) */
32
+
33
+ /* Error Management */
34
+ unsigned HUF_isError(size_t code); /**< tells if a return value is an error code */
35
+ const char* HUF_getErrorName(size_t code); /**< provides error code string (useful for debugging) */
36
+
37
+
38
+ #define HUF_WORKSPACE_SIZE ((8 << 10) + 512 /* sorting scratch space */)
39
+ #define HUF_WORKSPACE_SIZE_U64 (HUF_WORKSPACE_SIZE / sizeof(U64))
40
+
41
+ /* *** Constants *** */
42
+ #define HUF_TABLELOG_MAX 12 /* max runtime value of tableLog (due to static allocation); can be modified up to HUF_TABLELOG_ABSOLUTEMAX */
43
+ #define HUF_TABLELOG_DEFAULT 11 /* default tableLog value when none specified */
44
+ #define HUF_SYMBOLVALUE_MAX 255
45
+
46
+ #define HUF_TABLELOG_ABSOLUTEMAX 12 /* absolute limit of HUF_MAX_TABLELOG. Beyond that value, code does not work */
47
+ #if (HUF_TABLELOG_MAX > HUF_TABLELOG_ABSOLUTEMAX)
48
+ # error "HUF_TABLELOG_MAX is too large !"
49
+ #endif
50
+
51
+
52
+ /* ****************************************
53
+ * Static allocation
54
+ ******************************************/
55
+ /* HUF buffer bounds */
56
+ #define HUF_CTABLEBOUND 129
57
+ #define HUF_BLOCKBOUND(size) (size + (size>>8) + 8) /* only true when incompressible is pre-filtered with fast heuristic */
58
+ #define HUF_COMPRESSBOUND(size) (HUF_CTABLEBOUND + HUF_BLOCKBOUND(size)) /* Macro version, useful for static allocation */
59
+
60
+ /* static allocation of HUF's Compression Table */
61
+ /* this is a private definition, just exposed for allocation and strict aliasing purpose. never EVER access its members directly */
62
+ typedef size_t HUF_CElt; /* consider it an incomplete type */
63
+ #define HUF_CTABLE_SIZE_ST(maxSymbolValue) ((maxSymbolValue)+2) /* Use tables of size_t, for proper alignment */
64
+ #define HUF_CTABLE_SIZE(maxSymbolValue) (HUF_CTABLE_SIZE_ST(maxSymbolValue) * sizeof(size_t))
65
+ #define HUF_CREATE_STATIC_CTABLE(name, maxSymbolValue) \
66
+ HUF_CElt name[HUF_CTABLE_SIZE_ST(maxSymbolValue)] /* no final ; */
67
+
68
+ /* static allocation of HUF's DTable */
69
+ typedef U32 HUF_DTable;
70
+ #define HUF_DTABLE_SIZE(maxTableLog) (1 + (1<<(maxTableLog)))
71
+ #define HUF_CREATE_STATIC_DTABLEX1(DTable, maxTableLog) \
72
+ HUF_DTable DTable[HUF_DTABLE_SIZE((maxTableLog)-1)] = { ((U32)((maxTableLog)-1) * 0x01000001) }
73
+ #define HUF_CREATE_STATIC_DTABLEX2(DTable, maxTableLog) \
74
+ HUF_DTable DTable[HUF_DTABLE_SIZE(maxTableLog)] = { ((U32)(maxTableLog) * 0x01000001) }
75
+
76
+
77
+ /* ****************************************
78
+ * Advanced decompression functions
79
+ ******************************************/
80
+
81
+ /**
82
+ * Huffman flags bitset.
83
+ * For all flags, 0 is the default value.
84
+ */
85
+ typedef enum {
86
+ /**
87
+ * If compiled with DYNAMIC_BMI2: Set flag only if the CPU supports BMI2 at runtime.
88
+ * Otherwise: Ignored.
89
+ */
90
+ HUF_flags_bmi2 = (1 << 0),
91
+ /**
92
+ * If set: Test possible table depths to find the one that produces the smallest header + encoded size.
93
+ * If unset: Use heuristic to find the table depth.
94
+ */
95
+ HUF_flags_optimalDepth = (1 << 1),
96
+ /**
97
+ * If set: If the previous table can encode the input, always reuse the previous table.
98
+ * If unset: If the previous table can encode the input, reuse the previous table if it results in a smaller output.
99
+ */
100
+ HUF_flags_preferRepeat = (1 << 2),
101
+ /**
102
+ * If set: Sample the input and check if the sample is uncompressible, if it is then don't attempt to compress.
103
+ * If unset: Always histogram the entire input.
104
+ */
105
+ HUF_flags_suspectUncompressible = (1 << 3),
106
+ /**
107
+ * If set: Don't use assembly implementations
108
+ * If unset: Allow using assembly implementations
109
+ */
110
+ HUF_flags_disableAsm = (1 << 4),
111
+ /**
112
+ * If set: Don't use the fast decoding loop, always use the fallback decoding loop.
113
+ * If unset: Use the fast decoding loop when possible.
114
+ */
115
+ HUF_flags_disableFast = (1 << 5)
116
+ } HUF_flags_e;
117
+
118
+
119
+ /* ****************************************
120
+ * HUF detailed API
121
+ * ****************************************/
122
+ #define HUF_OPTIMAL_DEPTH_THRESHOLD ZSTD_btultra
123
+
124
+ /*! HUF_compress() does the following:
125
+ * 1. count symbol occurrence from source[] into table count[] using FSE_count() (exposed within "fse.h")
126
+ * 2. (optional) refine tableLog using HUF_optimalTableLog()
127
+ * 3. build Huffman table from count using HUF_buildCTable()
128
+ * 4. save Huffman table to memory buffer using HUF_writeCTable()
129
+ * 5. encode the data stream using HUF_compress4X_usingCTable()
130
+ *
131
+ * The following API allows targeting specific sub-functions for advanced tasks.
132
+ * For example, it's possible to compress several blocks using the same 'CTable',
133
+ * or to save and regenerate 'CTable' using external methods.
134
+ */
135
+ unsigned HUF_minTableLog(unsigned symbolCardinality);
136
+ unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue);
137
+ unsigned HUF_optimalTableLog(unsigned maxTableLog, size_t srcSize, unsigned maxSymbolValue, void* workSpace,
138
+ size_t wkspSize, HUF_CElt* table, const unsigned* count, int flags); /* table is used as scratch space for building and testing tables, not a return value */
139
+ size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, void* workspace, size_t workspaceSize);
140
+ size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags);
141
+ size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
142
+ int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue);
143
+
144
+ typedef enum {
145
+ HUF_repeat_none, /**< Cannot use the previous table */
146
+ HUF_repeat_check, /**< Can use the previous table but it must be checked. Note : The previous table must have been constructed by HUF_compress{1, 4}X_repeat */
147
+ HUF_repeat_valid /**< Can use the previous table and it is assumed to be valid */
148
+ } HUF_repeat;
149
+
150
+ /** HUF_compress4X_repeat() :
151
+ * Same as HUF_compress4X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
152
+ * If it uses hufTable it does not modify hufTable or repeat.
153
+ * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
154
+ * If preferRepeat then the old table will always be used if valid.
155
+ * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
156
+ size_t HUF_compress4X_repeat(void* dst, size_t dstSize,
157
+ const void* src, size_t srcSize,
158
+ unsigned maxSymbolValue, unsigned tableLog,
159
+ void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
160
+ HUF_CElt* hufTable, HUF_repeat* repeat, int flags);
161
+
162
+ /** HUF_buildCTable_wksp() :
163
+ * Same as HUF_buildCTable(), but using externally allocated scratch buffer.
164
+ * `workSpace` must be aligned on 4-bytes boundaries, and its size must be >= HUF_CTABLE_WORKSPACE_SIZE.
165
+ */
166
+ #define HUF_CTABLE_WORKSPACE_SIZE_U32 ((4 * (HUF_SYMBOLVALUE_MAX + 1)) + 192)
167
+ #define HUF_CTABLE_WORKSPACE_SIZE (HUF_CTABLE_WORKSPACE_SIZE_U32 * sizeof(unsigned))
168
+ size_t HUF_buildCTable_wksp (HUF_CElt* tree,
169
+ const unsigned* count, U32 maxSymbolValue, U32 maxNbBits,
170
+ void* workSpace, size_t wkspSize);
171
+
172
+ /*! HUF_readStats() :
173
+ * Read compact Huffman tree, saved by HUF_writeCTable().
174
+ * `huffWeight` is destination buffer.
175
+ * @return : size read from `src` , or an error Code .
176
+ * Note : Needed by HUF_readCTable() and HUF_readDTableXn() . */
177
+ size_t HUF_readStats(BYTE* huffWeight, size_t hwSize,
178
+ U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
179
+ const void* src, size_t srcSize);
180
+
181
+ /*! HUF_readStats_wksp() :
182
+ * Same as HUF_readStats() but takes an external workspace which must be
183
+ * 4-byte aligned and its size must be >= HUF_READ_STATS_WORKSPACE_SIZE.
184
+ * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
185
+ */
186
+ #define HUF_READ_STATS_WORKSPACE_SIZE_U32 FSE_DECOMPRESS_WKSP_SIZE_U32(6, HUF_TABLELOG_MAX-1)
187
+ #define HUF_READ_STATS_WORKSPACE_SIZE (HUF_READ_STATS_WORKSPACE_SIZE_U32 * sizeof(unsigned))
188
+ size_t HUF_readStats_wksp(BYTE* huffWeight, size_t hwSize,
189
+ U32* rankStats, U32* nbSymbolsPtr, U32* tableLogPtr,
190
+ const void* src, size_t srcSize,
191
+ void* workspace, size_t wkspSize,
192
+ int flags);
193
+
194
+ /** HUF_readCTable() :
195
+ * Loading a CTable saved with HUF_writeCTable() */
196
+ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned *hasZeroWeights);
197
+
198
+ /** HUF_getNbBitsFromCTable() :
199
+ * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX
200
+ * Note 1 : If symbolValue > HUF_readCTableHeader(symbolTable).maxSymbolValue, returns 0
201
+ * Note 2 : is not inlined, as HUF_CElt definition is private
202
+ */
203
+ U32 HUF_getNbBitsFromCTable(const HUF_CElt* symbolTable, U32 symbolValue);
204
+
205
+ typedef struct {
206
+ BYTE tableLog;
207
+ BYTE maxSymbolValue;
208
+ BYTE unused[sizeof(size_t) - 2];
209
+ } HUF_CTableHeader;
210
+
211
+ /** HUF_readCTableHeader() :
212
+ * @returns The header from the CTable specifying the tableLog and the maxSymbolValue.
213
+ */
214
+ HUF_CTableHeader HUF_readCTableHeader(HUF_CElt const* ctable);
215
+
216
+ /*
217
+ * HUF_decompress() does the following:
218
+ * 1. select the decompression algorithm (X1, X2) based on pre-computed heuristics
219
+ * 2. build Huffman table from save, using HUF_readDTableX?()
220
+ * 3. decode 1 or 4 segments in parallel using HUF_decompress?X?_usingDTable()
221
+ */
222
+
223
+ /** HUF_selectDecoder() :
224
+ * Tells which decoder is likely to decode faster,
225
+ * based on a set of pre-computed metrics.
226
+ * @return : 0==HUF_decompress4X1, 1==HUF_decompress4X2 .
227
+ * Assumption : 0 < dstSize <= 128 KB */
228
+ U32 HUF_selectDecoder (size_t dstSize, size_t cSrcSize);
229
+
230
+ /**
231
+ * The minimum workspace size for the `workSpace` used in
232
+ * HUF_readDTableX1_wksp() and HUF_readDTableX2_wksp().
233
+ *
234
+ * The space used depends on HUF_TABLELOG_MAX, ranging from ~1500 bytes when
235
+ * HUF_TABLE_LOG_MAX=12 to ~1850 bytes when HUF_TABLE_LOG_MAX=15.
236
+ * Buffer overflow errors may potentially occur if code modifications result in
237
+ * a required workspace size greater than that specified in the following
238
+ * macro.
239
+ */
240
+ #define HUF_DECOMPRESS_WORKSPACE_SIZE ((2 << 10) + (1 << 9))
241
+ #define HUF_DECOMPRESS_WORKSPACE_SIZE_U32 (HUF_DECOMPRESS_WORKSPACE_SIZE / sizeof(U32))
242
+
243
+
244
+ /* ====================== */
245
+ /* single stream variants */
246
+ /* ====================== */
247
+
248
+ size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags);
249
+ /** HUF_compress1X_repeat() :
250
+ * Same as HUF_compress1X_wksp(), but considers using hufTable if *repeat != HUF_repeat_none.
251
+ * If it uses hufTable it does not modify hufTable or repeat.
252
+ * If it doesn't, it sets *repeat = HUF_repeat_none, and it sets hufTable to the table used.
253
+ * If preferRepeat then the old table will always be used if valid.
254
+ * If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
255
+ size_t HUF_compress1X_repeat(void* dst, size_t dstSize,
256
+ const void* src, size_t srcSize,
257
+ unsigned maxSymbolValue, unsigned tableLog,
258
+ void* workSpace, size_t wkspSize, /**< `workSpace` must be aligned on 4-bytes boundaries, `wkspSize` must be >= HUF_WORKSPACE_SIZE */
259
+ HUF_CElt* hufTable, HUF_repeat* repeat, int flags);
260
+
261
+ size_t HUF_decompress1X_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
262
+ #ifndef HUF_FORCE_DECOMPRESS_X1
263
+ size_t HUF_decompress1X2_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags); /**< double-symbols decoder */
264
+ #endif
265
+
266
+ /* BMI2 variants.
267
+ * If the CPU has BMI2 support, pass bmi2=1, otherwise pass bmi2=0.
268
+ */
269
+ size_t HUF_decompress1X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags);
270
+ #ifndef HUF_FORCE_DECOMPRESS_X2
271
+ size_t HUF_decompress1X1_DCtx_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
272
+ #endif
273
+ size_t HUF_decompress4X_usingDTable(void* dst, size_t maxDstSize, const void* cSrc, size_t cSrcSize, const HUF_DTable* DTable, int flags);
274
+ size_t HUF_decompress4X_hufOnly_wksp(HUF_DTable* dctx, void* dst, size_t dstSize, const void* cSrc, size_t cSrcSize, void* workSpace, size_t wkspSize, int flags);
275
+ #ifndef HUF_FORCE_DECOMPRESS_X2
276
+ size_t HUF_readDTableX1_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags);
277
+ #endif
278
+ #ifndef HUF_FORCE_DECOMPRESS_X1
279
+ size_t HUF_readDTableX2_wksp(HUF_DTable* DTable, const void* src, size_t srcSize, void* workSpace, size_t wkspSize, int flags);
280
+ #endif
281
+
282
+ #endif /* HUF_H_298734234 */
283
+
284
+ #if defined (__cplusplus)
285
+ }
286
+ #endif