extzstd 0.3.1 → 0.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +28 -14
  3. data/contrib/zstd/CHANGELOG +114 -56
  4. data/contrib/zstd/CONTRIBUTING.md +14 -0
  5. data/contrib/zstd/Makefile +37 -31
  6. data/contrib/zstd/README.md +6 -0
  7. data/contrib/zstd/appveyor.yml +4 -1
  8. data/contrib/zstd/lib/Makefile +231 -134
  9. data/contrib/zstd/lib/README.md +28 -0
  10. data/contrib/zstd/lib/common/bitstream.h +24 -15
  11. data/contrib/zstd/lib/common/compiler.h +116 -3
  12. data/contrib/zstd/lib/common/cpu.h +0 -2
  13. data/contrib/zstd/lib/common/debug.h +11 -18
  14. data/contrib/zstd/lib/common/entropy_common.c +188 -42
  15. data/contrib/zstd/lib/common/error_private.c +1 -0
  16. data/contrib/zstd/lib/common/error_private.h +1 -1
  17. data/contrib/zstd/lib/common/fse.h +38 -11
  18. data/contrib/zstd/lib/common/fse_decompress.c +123 -16
  19. data/contrib/zstd/lib/common/huf.h +26 -5
  20. data/contrib/zstd/lib/common/mem.h +66 -93
  21. data/contrib/zstd/lib/common/pool.c +22 -16
  22. data/contrib/zstd/lib/common/pool.h +1 -1
  23. data/contrib/zstd/lib/common/threading.c +6 -5
  24. data/contrib/zstd/lib/common/xxhash.c +18 -56
  25. data/contrib/zstd/lib/common/xxhash.h +1 -1
  26. data/contrib/zstd/lib/common/zstd_common.c +9 -9
  27. data/contrib/zstd/lib/common/zstd_deps.h +111 -0
  28. data/contrib/zstd/lib/common/zstd_errors.h +1 -0
  29. data/contrib/zstd/lib/common/zstd_internal.h +89 -58
  30. data/contrib/zstd/lib/compress/fse_compress.c +30 -23
  31. data/contrib/zstd/lib/compress/hist.c +26 -28
  32. data/contrib/zstd/lib/compress/hist.h +1 -1
  33. data/contrib/zstd/lib/compress/huf_compress.c +210 -95
  34. data/contrib/zstd/lib/compress/zstd_compress.c +1339 -409
  35. data/contrib/zstd/lib/compress/zstd_compress_internal.h +119 -41
  36. data/contrib/zstd/lib/compress/zstd_compress_literals.c +4 -4
  37. data/contrib/zstd/lib/compress/zstd_compress_sequences.c +17 -3
  38. data/contrib/zstd/lib/compress/zstd_compress_superblock.c +23 -19
  39. data/contrib/zstd/lib/compress/zstd_cwksp.h +60 -24
  40. data/contrib/zstd/lib/compress/zstd_double_fast.c +22 -22
  41. data/contrib/zstd/lib/compress/zstd_fast.c +19 -19
  42. data/contrib/zstd/lib/compress/zstd_lazy.c +351 -77
  43. data/contrib/zstd/lib/compress/zstd_lazy.h +20 -0
  44. data/contrib/zstd/lib/compress/zstd_ldm.c +59 -18
  45. data/contrib/zstd/lib/compress/zstd_ldm.h +6 -0
  46. data/contrib/zstd/lib/compress/zstd_opt.c +190 -45
  47. data/contrib/zstd/lib/compress/zstdmt_compress.c +74 -406
  48. data/contrib/zstd/lib/compress/zstdmt_compress.h +26 -108
  49. data/contrib/zstd/lib/decompress/huf_decompress.c +302 -200
  50. data/contrib/zstd/lib/decompress/zstd_ddict.c +8 -8
  51. data/contrib/zstd/lib/decompress/zstd_ddict.h +1 -1
  52. data/contrib/zstd/lib/decompress/zstd_decompress.c +125 -80
  53. data/contrib/zstd/lib/decompress/zstd_decompress_block.c +145 -37
  54. data/contrib/zstd/lib/decompress/zstd_decompress_block.h +5 -2
  55. data/contrib/zstd/lib/decompress/zstd_decompress_internal.h +11 -10
  56. data/contrib/zstd/lib/dictBuilder/cover.c +29 -20
  57. data/contrib/zstd/lib/dictBuilder/cover.h +1 -1
  58. data/contrib/zstd/lib/dictBuilder/fastcover.c +20 -19
  59. data/contrib/zstd/lib/dictBuilder/zdict.c +15 -16
  60. data/contrib/zstd/lib/dictBuilder/zdict.h +1 -1
  61. data/contrib/zstd/lib/legacy/zstd_v01.c +5 -1
  62. data/contrib/zstd/lib/legacy/zstd_v02.c +5 -1
  63. data/contrib/zstd/lib/legacy/zstd_v03.c +5 -1
  64. data/contrib/zstd/lib/legacy/zstd_v04.c +6 -2
  65. data/contrib/zstd/lib/legacy/zstd_v05.c +5 -1
  66. data/contrib/zstd/lib/legacy/zstd_v06.c +5 -1
  67. data/contrib/zstd/lib/legacy/zstd_v07.c +5 -1
  68. data/contrib/zstd/lib/libzstd.pc.in +3 -3
  69. data/contrib/zstd/lib/zstd.h +348 -47
  70. data/ext/extzstd.c +6 -0
  71. data/ext/extzstd.h +6 -0
  72. data/gemstub.rb +3 -21
  73. data/lib/extzstd.rb +0 -2
  74. data/lib/extzstd/version.rb +6 -1
  75. data/test/test_basic.rb +0 -5
  76. metadata +5 -4
@@ -34,7 +34,7 @@ unsigned HIST_count_simple(unsigned* count, unsigned* maxSymbolValuePtr,
34
34
  unsigned maxSymbolValue = *maxSymbolValuePtr;
35
35
  unsigned largestCount=0;
36
36
 
37
- memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
37
+ ZSTD_memset(count, 0, (maxSymbolValue+1) * sizeof(*count));
38
38
  if (srcSize==0) { *maxSymbolValuePtr = 0; return 0; }
39
39
 
40
40
  while (ip<end) {
@@ -60,9 +60,9 @@ typedef enum { trustInput, checkMaxSymbolValue } HIST_checkInput_e;
60
60
  * this design makes better use of OoO cpus,
61
61
  * and is noticeably faster when some values are heavily repeated.
62
62
  * But it needs some additional workspace for intermediate tables.
63
- * `workSpace` size must be a table of size >= HIST_WKSP_SIZE_U32.
63
+ * `workSpace` must be a U32 table of size >= HIST_WKSP_SIZE_U32.
64
64
  * @return : largest histogram frequency,
65
- * or an error code (notably when histogram would be larger than *maxSymbolValuePtr). */
65
+ * or an error code (notably when histogram's alphabet is larger than *maxSymbolValuePtr) */
66
66
  static size_t HIST_count_parallel_wksp(
67
67
  unsigned* count, unsigned* maxSymbolValuePtr,
68
68
  const void* source, size_t sourceSize,
@@ -71,22 +71,21 @@ static size_t HIST_count_parallel_wksp(
71
71
  {
72
72
  const BYTE* ip = (const BYTE*)source;
73
73
  const BYTE* const iend = ip+sourceSize;
74
- unsigned maxSymbolValue = *maxSymbolValuePtr;
74
+ size_t const countSize = (*maxSymbolValuePtr + 1) * sizeof(*count);
75
75
  unsigned max=0;
76
76
  U32* const Counting1 = workSpace;
77
77
  U32* const Counting2 = Counting1 + 256;
78
78
  U32* const Counting3 = Counting2 + 256;
79
79
  U32* const Counting4 = Counting3 + 256;
80
80
 
81
- memset(workSpace, 0, 4*256*sizeof(unsigned));
82
-
83
81
  /* safety checks */
82
+ assert(*maxSymbolValuePtr <= 255);
84
83
  if (!sourceSize) {
85
- memset(count, 0, maxSymbolValue + 1);
84
+ ZSTD_memset(count, 0, countSize);
86
85
  *maxSymbolValuePtr = 0;
87
86
  return 0;
88
87
  }
89
- if (!maxSymbolValue) maxSymbolValue = 255; /* 0 == default */
88
+ ZSTD_memset(workSpace, 0, 4*256*sizeof(unsigned));
90
89
 
91
90
  /* by stripes of 16 bytes */
92
91
  { U32 cached = MEM_read32(ip); ip += 4;
@@ -118,21 +117,18 @@ static size_t HIST_count_parallel_wksp(
118
117
  /* finish last symbols */
119
118
  while (ip<iend) Counting1[*ip++]++;
120
119
 
121
- if (check) { /* verify stats will fit into destination table */
122
- U32 s; for (s=255; s>maxSymbolValue; s--) {
123
- Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
124
- if (Counting1[s]) return ERROR(maxSymbolValue_tooSmall);
125
- } }
126
-
127
120
  { U32 s;
128
- if (maxSymbolValue > 255) maxSymbolValue = 255;
129
- for (s=0; s<=maxSymbolValue; s++) {
130
- count[s] = Counting1[s] + Counting2[s] + Counting3[s] + Counting4[s];
131
- if (count[s] > max) max = count[s];
121
+ for (s=0; s<256; s++) {
122
+ Counting1[s] += Counting2[s] + Counting3[s] + Counting4[s];
123
+ if (Counting1[s] > max) max = Counting1[s];
132
124
  } }
133
125
 
134
- while (!count[maxSymbolValue]) maxSymbolValue--;
135
- *maxSymbolValuePtr = maxSymbolValue;
126
+ { unsigned maxSymbolValue = 255;
127
+ while (!Counting1[maxSymbolValue]) maxSymbolValue--;
128
+ if (check && maxSymbolValue > *maxSymbolValuePtr) return ERROR(maxSymbolValue_tooSmall);
129
+ *maxSymbolValuePtr = maxSymbolValue;
130
+ ZSTD_memmove(count, Counting1, countSize); /* in case count & Counting1 are overlapping */
131
+ }
136
132
  return (size_t)max;
137
133
  }
138
134
 
@@ -152,14 +148,6 @@ size_t HIST_countFast_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
152
148
  return HIST_count_parallel_wksp(count, maxSymbolValuePtr, source, sourceSize, trustInput, (U32*)workSpace);
153
149
  }
154
150
 
155
- /* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
156
- size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
157
- const void* source, size_t sourceSize)
158
- {
159
- unsigned tmpCounters[HIST_WKSP_SIZE_U32];
160
- return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(tmpCounters));
161
- }
162
-
163
151
  /* HIST_count_wksp() :
164
152
  * Same as HIST_count(), but using an externally provided scratch buffer.
165
153
  * `workSpace` size must be table of >= HIST_WKSP_SIZE_U32 unsigned */
@@ -175,9 +163,19 @@ size_t HIST_count_wksp(unsigned* count, unsigned* maxSymbolValuePtr,
175
163
  return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, workSpace, workSpaceSize);
176
164
  }
177
165
 
166
+ #ifndef ZSTD_NO_UNUSED_FUNCTIONS
167
+ /* fast variant (unsafe : won't check if src contains values beyond count[] limit) */
168
+ size_t HIST_countFast(unsigned* count, unsigned* maxSymbolValuePtr,
169
+ const void* source, size_t sourceSize)
170
+ {
171
+ unsigned tmpCounters[HIST_WKSP_SIZE_U32];
172
+ return HIST_countFast_wksp(count, maxSymbolValuePtr, source, sourceSize, tmpCounters, sizeof(tmpCounters));
173
+ }
174
+
178
175
  size_t HIST_count(unsigned* count, unsigned* maxSymbolValuePtr,
179
176
  const void* src, size_t srcSize)
180
177
  {
181
178
  unsigned tmpCounters[HIST_WKSP_SIZE_U32];
182
179
  return HIST_count_wksp(count, maxSymbolValuePtr, src, srcSize, tmpCounters, sizeof(tmpCounters));
183
180
  }
181
+ #endif
@@ -14,7 +14,7 @@
14
14
  ****************************************************************** */
15
15
 
16
16
  /* --- dependencies --- */
17
- #include <stddef.h> /* size_t */
17
+ #include "../common/zstd_deps.h" /* size_t */
18
18
 
19
19
 
20
20
  /* --- simple histogram functions --- */
@@ -23,8 +23,7 @@
23
23
  /* **************************************************************
24
24
  * Includes
25
25
  ****************************************************************/
26
- #include <string.h> /* memcpy, memset */
27
- #include <stdio.h> /* printf (debug) */
26
+ #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */
28
27
  #include "../common/compiler.h"
29
28
  #include "../common/bitstream.h"
30
29
  #include "hist.h"
@@ -70,7 +69,7 @@ static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weight
70
69
  U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER;
71
70
 
72
71
  FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)];
73
- BYTE scratchBuffer[1<<MAX_FSE_TABLELOG_FOR_HUFF_HEADER];
72
+ BYTE scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)];
74
73
 
75
74
  unsigned count[HUF_TABLELOG_MAX+1];
76
75
  S16 norm[HUF_TABLELOG_MAX+1];
@@ -85,7 +84,7 @@ static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weight
85
84
  }
86
85
 
87
86
  tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue);
88
- CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue) );
87
+ CHECK_F( FSE_normalizeCount(norm, tableLog, count, wtSize, maxSymbolValue, /* useLowProbCount */ 0) );
89
88
 
90
89
  /* Write table description header */
91
90
  { CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), norm, maxSymbolValue, tableLog) );
@@ -103,11 +102,6 @@ static size_t HUF_compressWeights (void* dst, size_t dstSize, const void* weight
103
102
  }
104
103
 
105
104
 
106
- struct HUF_CElt_s {
107
- U16 val;
108
- BYTE nbBits;
109
- }; /* typedef'd to HUF_CElt within "huf.h" */
110
-
111
105
  /*! HUF_writeCTable() :
112
106
  `CTable` : Huffman tree to save, using huf representation.
113
107
  @return : size of saved CTable */
@@ -156,6 +150,7 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void
156
150
 
157
151
  /* get symbol weights */
158
152
  CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize));
153
+ *hasZeroWeights = (rankVal[0] > 0);
159
154
 
160
155
  /* check result */
161
156
  if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge);
@@ -164,16 +159,14 @@ size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void
164
159
  /* Prepare base value per rank */
165
160
  { U32 n, nextRankStart = 0;
166
161
  for (n=1; n<=tableLog; n++) {
167
- U32 current = nextRankStart;
162
+ U32 curr = nextRankStart;
168
163
  nextRankStart += (rankVal[n] << (n-1));
169
- rankVal[n] = current;
164
+ rankVal[n] = curr;
170
165
  } }
171
166
 
172
167
  /* fill nbBits */
173
- *hasZeroWeights = 0;
174
168
  { U32 n; for (n=0; n<nbSymbols; n++) {
175
169
  const U32 w = huffWeight[n];
176
- *hasZeroWeights |= (w == 0);
177
170
  CTable[n].nbBits = (BYTE)(tableLog + 1 - w) & -(w != 0);
178
171
  } }
179
172
 
@@ -212,32 +205,63 @@ typedef struct nodeElt_s {
212
205
  BYTE nbBits;
213
206
  } nodeElt;
214
207
 
208
+ /**
209
+ * HUF_setMaxHeight():
210
+ * Enforces maxNbBits on the Huffman tree described in huffNode.
211
+ *
212
+ * It sets all nodes with nbBits > maxNbBits to be maxNbBits. Then it adjusts
213
+ * the tree to so that it is a valid canonical Huffman tree.
214
+ *
215
+ * @pre The sum of the ranks of each symbol == 2^largestBits,
216
+ * where largestBits == huffNode[lastNonNull].nbBits.
217
+ * @post The sum of the ranks of each symbol == 2^largestBits,
218
+ * where largestBits is the return value <= maxNbBits.
219
+ *
220
+ * @param huffNode The Huffman tree modified in place to enforce maxNbBits.
221
+ * @param lastNonNull The symbol with the lowest count in the Huffman tree.
222
+ * @param maxNbBits The maximum allowed number of bits, which the Huffman tree
223
+ * may not respect. After this function the Huffman tree will
224
+ * respect maxNbBits.
225
+ * @return The maximum number of bits of the Huffman tree after adjustment,
226
+ * necessarily no more than maxNbBits.
227
+ */
215
228
  static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
216
229
  {
217
230
  const U32 largestBits = huffNode[lastNonNull].nbBits;
218
- if (largestBits <= maxNbBits) return largestBits; /* early exit : no elt > maxNbBits */
231
+ /* early exit : no elt > maxNbBits, so the tree is already valid. */
232
+ if (largestBits <= maxNbBits) return largestBits;
219
233
 
220
234
  /* there are several too large elements (at least >= 2) */
221
235
  { int totalCost = 0;
222
236
  const U32 baseCost = 1 << (largestBits - maxNbBits);
223
237
  int n = (int)lastNonNull;
224
238
 
239
+ /* Adjust any ranks > maxNbBits to maxNbBits.
240
+ * Compute totalCost, which is how far the sum of the ranks is
241
+ * we are over 2^largestBits after adjust the offending ranks.
242
+ */
225
243
  while (huffNode[n].nbBits > maxNbBits) {
226
244
  totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits));
227
245
  huffNode[n].nbBits = (BYTE)maxNbBits;
228
- n --;
229
- } /* n stops at huffNode[n].nbBits <= maxNbBits */
230
- while (huffNode[n].nbBits == maxNbBits) n--; /* n end at index of smallest symbol using < maxNbBits */
246
+ n--;
247
+ }
248
+ /* n stops at huffNode[n].nbBits <= maxNbBits */
249
+ assert(huffNode[n].nbBits <= maxNbBits);
250
+ /* n end at index of smallest symbol using < maxNbBits */
251
+ while (huffNode[n].nbBits == maxNbBits) --n;
231
252
 
232
- /* renorm totalCost */
233
- totalCost >>= (largestBits - maxNbBits); /* note : totalCost is necessarily a multiple of baseCost */
253
+ /* renorm totalCost from 2^largestBits to 2^maxNbBits
254
+ * note : totalCost is necessarily a multiple of baseCost */
255
+ assert((totalCost & (baseCost - 1)) == 0);
256
+ totalCost >>= (largestBits - maxNbBits);
257
+ assert(totalCost > 0);
234
258
 
235
259
  /* repay normalized cost */
236
260
  { U32 const noSymbol = 0xF0F0F0F0;
237
261
  U32 rankLast[HUF_TABLELOG_MAX+2];
238
262
 
239
- /* Get pos of last (smallest) symbol per rank */
240
- memset(rankLast, 0xF0, sizeof(rankLast));
263
+ /* Get pos of last (smallest = lowest cum. count) symbol per rank */
264
+ ZSTD_memset(rankLast, 0xF0, sizeof(rankLast));
241
265
  { U32 currentNbBits = maxNbBits;
242
266
  int pos;
243
267
  for (pos=n ; pos >= 0; pos--) {
@@ -247,34 +271,65 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
247
271
  } }
248
272
 
249
273
  while (totalCost > 0) {
274
+ /* Try to reduce the next power of 2 above totalCost because we
275
+ * gain back half the rank.
276
+ */
250
277
  U32 nBitsToDecrease = BIT_highbit32((U32)totalCost) + 1;
251
278
  for ( ; nBitsToDecrease > 1; nBitsToDecrease--) {
252
279
  U32 const highPos = rankLast[nBitsToDecrease];
253
280
  U32 const lowPos = rankLast[nBitsToDecrease-1];
254
281
  if (highPos == noSymbol) continue;
282
+ /* Decrease highPos if no symbols of lowPos or if it is
283
+ * not cheaper to remove 2 lowPos than highPos.
284
+ */
255
285
  if (lowPos == noSymbol) break;
256
286
  { U32 const highTotal = huffNode[highPos].count;
257
287
  U32 const lowTotal = 2 * huffNode[lowPos].count;
258
288
  if (highTotal <= lowTotal) break;
259
289
  } }
260
290
  /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */
291
+ assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1);
261
292
  /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */
262
293
  while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol))
263
- nBitsToDecrease ++;
294
+ nBitsToDecrease++;
295
+ assert(rankLast[nBitsToDecrease] != noSymbol);
296
+ /* Increase the number of bits to gain back half the rank cost. */
264
297
  totalCost -= 1 << (nBitsToDecrease-1);
298
+ huffNode[rankLast[nBitsToDecrease]].nbBits++;
299
+
300
+ /* Fix up the new rank.
301
+ * If the new rank was empty, this symbol is now its smallest.
302
+ * Otherwise, this symbol will be the largest in the new rank so no adjustment.
303
+ */
265
304
  if (rankLast[nBitsToDecrease-1] == noSymbol)
266
- rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; /* this rank is no longer empty */
267
- huffNode[rankLast[nBitsToDecrease]].nbBits ++;
305
+ rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease];
306
+ /* Fix up the old rank.
307
+ * If the symbol was at position 0, meaning it was the highest weight symbol in the tree,
308
+ * it must be the only symbol in its rank, so the old rank now has no symbols.
309
+ * Otherwise, since the Huffman nodes are sorted by count, the previous position is now
310
+ * the smallest node in the rank. If the previous position belongs to a different rank,
311
+ * then the rank is now empty.
312
+ */
268
313
  if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */
269
314
  rankLast[nBitsToDecrease] = noSymbol;
270
315
  else {
271
316
  rankLast[nBitsToDecrease]--;
272
317
  if (huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease)
273
318
  rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */
274
- } } /* while (totalCost > 0) */
275
-
319
+ }
320
+ } /* while (totalCost > 0) */
321
+
322
+ /* If we've removed too much weight, then we have to add it back.
323
+ * To avoid overshooting again, we only adjust the smallest rank.
324
+ * We take the largest nodes from the lowest rank 0 and move them
325
+ * to rank 1. There's guaranteed to be enough rank 0 symbols because
326
+ * TODO.
327
+ */
276
328
  while (totalCost < 0) { /* Sometimes, cost correction overshoot */
277
- if (rankLast[1] == noSymbol) { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
329
+ /* special case : no rank 1 symbol (using maxNbBits-1);
330
+ * let's create one from largest rank 0 (using maxNbBits).
331
+ */
332
+ if (rankLast[1] == noSymbol) {
278
333
  while (huffNode[n].nbBits == maxNbBits) n--;
279
334
  huffNode[n+1].nbBits--;
280
335
  assert(n >= 0);
@@ -285,14 +340,16 @@ static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 maxNbBits)
285
340
  huffNode[ rankLast[1] + 1 ].nbBits--;
286
341
  rankLast[1]++;
287
342
  totalCost ++;
288
- } } } /* there are several too large elements (at least >= 2) */
343
+ }
344
+ } /* repay normalized cost */
345
+ } /* there are several too large elements (at least >= 2) */
289
346
 
290
347
  return maxNbBits;
291
348
  }
292
349
 
293
350
  typedef struct {
294
351
  U32 base;
295
- U32 current;
352
+ U32 curr;
296
353
  } rankPos;
297
354
 
298
355
  typedef nodeElt huffNodeTable[HUF_CTABLE_WORKSPACE_SIZE_U32];
@@ -304,21 +361,45 @@ typedef struct {
304
361
  rankPos rankPosition[RANK_POSITION_TABLE_SIZE];
305
362
  } HUF_buildCTable_wksp_tables;
306
363
 
364
+ /**
365
+ * HUF_sort():
366
+ * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order.
367
+ *
368
+ * @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled.
369
+ * Must have (maxSymbolValue + 1) entries.
370
+ * @param[in] count Histogram of the symbols.
371
+ * @param[in] maxSymbolValue Maximum symbol value.
372
+ * @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries.
373
+ */
307
374
  static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValue, rankPos* rankPosition)
308
375
  {
309
- U32 n;
310
-
311
- memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE);
312
- for (n=0; n<=maxSymbolValue; n++) {
313
- U32 r = BIT_highbit32(count[n] + 1);
314
- rankPosition[r].base ++;
376
+ int n;
377
+ int const maxSymbolValue1 = (int)maxSymbolValue + 1;
378
+
379
+ /* Compute base and set curr to base.
380
+ * For symbol s let lowerRank = BIT_highbit32(count[n]+1) and rank = lowerRank + 1.
381
+ * Then 2^lowerRank <= count[n]+1 <= 2^rank.
382
+ * We attribute each symbol to lowerRank's base value, because we want to know where
383
+ * each rank begins in the output, so for rank R we want to count ranks R+1 and above.
384
+ */
385
+ ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE);
386
+ for (n = 0; n < maxSymbolValue1; ++n) {
387
+ U32 lowerRank = BIT_highbit32(count[n] + 1);
388
+ rankPosition[lowerRank].base++;
315
389
  }
316
- for (n=30; n>0; n--) rankPosition[n-1].base += rankPosition[n].base;
317
- for (n=0; n<32; n++) rankPosition[n].current = rankPosition[n].base;
318
- for (n=0; n<=maxSymbolValue; n++) {
390
+ assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0);
391
+ for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) {
392
+ rankPosition[n-1].base += rankPosition[n].base;
393
+ rankPosition[n-1].curr = rankPosition[n-1].base;
394
+ }
395
+ /* Sort */
396
+ for (n = 0; n < maxSymbolValue1; ++n) {
319
397
  U32 const c = count[n];
320
398
  U32 const r = BIT_highbit32(c+1) + 1;
321
- U32 pos = rankPosition[r].current++;
399
+ U32 pos = rankPosition[r].curr++;
400
+ /* Insert into the correct position in the rank.
401
+ * We have at most 256 symbols, so this insertion should be fine.
402
+ */
322
403
  while ((pos > rankPosition[r].base) && (c > huffNode[pos-1].count)) {
323
404
  huffNode[pos] = huffNode[pos-1];
324
405
  pos--;
@@ -335,28 +416,20 @@ static void HUF_sort(nodeElt* huffNode, const unsigned* count, U32 maxSymbolValu
335
416
  */
336
417
  #define STARTNODE (HUF_SYMBOLVALUE_MAX+1)
337
418
 
338
- size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
419
+ /* HUF_buildTree():
420
+ * Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree.
421
+ *
422
+ * @param huffNode The array sorted by HUF_sort(). Builds the Huffman tree in this array.
423
+ * @param maxSymbolValue The maximum symbol value.
424
+ * @return The smallest node in the Huffman tree (by count).
425
+ */
426
+ static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue)
339
427
  {
340
- HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)workSpace;
341
- nodeElt* const huffNode0 = wksp_tables->huffNodeTbl;
342
- nodeElt* const huffNode = huffNode0+1;
428
+ nodeElt* const huffNode0 = huffNode - 1;
343
429
  int nonNullRank;
344
430
  int lowS, lowN;
345
431
  int nodeNb = STARTNODE;
346
432
  int n, nodeRoot;
347
-
348
- /* safety checks */
349
- if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
350
- if (wkspSize < sizeof(HUF_buildCTable_wksp_tables))
351
- return ERROR(workSpace_tooSmall);
352
- if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
353
- if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
354
- return ERROR(maxSymbolValue_tooLarge);
355
- memset(huffNode0, 0, sizeof(huffNodeTable));
356
-
357
- /* sort, decreasing order */
358
- HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition);
359
-
360
433
  /* init for parents */
361
434
  nonNullRank = (int)maxSymbolValue;
362
435
  while(huffNode[nonNullRank].count == 0) nonNullRank--;
@@ -383,42 +456,72 @@ size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbo
383
456
  for (n=0; n<=nonNullRank; n++)
384
457
  huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1;
385
458
 
459
+ return nonNullRank;
460
+ }
461
+
462
+ /**
463
+ * HUF_buildCTableFromTree():
464
+ * Build the CTable given the Huffman tree in huffNode.
465
+ *
466
+ * @param[out] CTable The output Huffman CTable.
467
+ * @param huffNode The Huffman tree.
468
+ * @param nonNullRank The last and smallest node in the Huffman tree.
469
+ * @param maxSymbolValue The maximum symbol value.
470
+ * @param maxNbBits The exact maximum number of bits used in the Huffman tree.
471
+ */
472
+ static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits)
473
+ {
474
+ /* fill result into ctable (val, nbBits) */
475
+ int n;
476
+ U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
477
+ U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
478
+ int const alphabetSize = (int)(maxSymbolValue + 1);
479
+ for (n=0; n<=nonNullRank; n++)
480
+ nbPerRank[huffNode[n].nbBits]++;
481
+ /* determine starting value per rank */
482
+ { U16 min = 0;
483
+ for (n=(int)maxNbBits; n>0; n--) {
484
+ valPerRank[n] = min; /* get starting value within each rank */
485
+ min += nbPerRank[n];
486
+ min >>= 1;
487
+ } }
488
+ for (n=0; n<alphabetSize; n++)
489
+ CTable[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */
490
+ for (n=0; n<alphabetSize; n++)
491
+ CTable[n].val = valPerRank[CTable[n].nbBits]++; /* assign value within rank, symbol order */
492
+ }
493
+
494
+ size_t HUF_buildCTable_wksp (HUF_CElt* tree, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, void* workSpace, size_t wkspSize)
495
+ {
496
+ HUF_buildCTable_wksp_tables* const wksp_tables = (HUF_buildCTable_wksp_tables*)workSpace;
497
+ nodeElt* const huffNode0 = wksp_tables->huffNodeTbl;
498
+ nodeElt* const huffNode = huffNode0+1;
499
+ int nonNullRank;
500
+
501
+ /* safety checks */
502
+ if (((size_t)workSpace & 3) != 0) return ERROR(GENERIC); /* must be aligned on 4-bytes boundaries */
503
+ if (wkspSize < sizeof(HUF_buildCTable_wksp_tables))
504
+ return ERROR(workSpace_tooSmall);
505
+ if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT;
506
+ if (maxSymbolValue > HUF_SYMBOLVALUE_MAX)
507
+ return ERROR(maxSymbolValue_tooLarge);
508
+ ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable));
509
+
510
+ /* sort, decreasing order */
511
+ HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition);
512
+
513
+ /* build tree */
514
+ nonNullRank = HUF_buildTree(huffNode, maxSymbolValue);
515
+
386
516
  /* enforce maxTableLog */
387
517
  maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits);
518
+ if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
388
519
 
389
- /* fill result into tree (val, nbBits) */
390
- { U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0};
391
- U16 valPerRank[HUF_TABLELOG_MAX+1] = {0};
392
- int const alphabetSize = (int)(maxSymbolValue + 1);
393
- if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */
394
- for (n=0; n<=nonNullRank; n++)
395
- nbPerRank[huffNode[n].nbBits]++;
396
- /* determine stating value per rank */
397
- { U16 min = 0;
398
- for (n=(int)maxNbBits; n>0; n--) {
399
- valPerRank[n] = min; /* get starting value within each rank */
400
- min += nbPerRank[n];
401
- min >>= 1;
402
- } }
403
- for (n=0; n<alphabetSize; n++)
404
- tree[huffNode[n].byte].nbBits = huffNode[n].nbBits; /* push nbBits per symbol, symbol order */
405
- for (n=0; n<alphabetSize; n++)
406
- tree[n].val = valPerRank[tree[n].nbBits]++; /* assign value within rank, symbol order */
407
- }
520
+ HUF_buildCTableFromTree(tree, huffNode, nonNullRank, maxSymbolValue, maxNbBits);
408
521
 
409
522
  return maxNbBits;
410
523
  }
411
524
 
412
- /** HUF_buildCTable() :
413
- * @return : maxNbBits
414
- * Note : count is used before tree is written, so they can safely overlap
415
- */
416
- size_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits)
417
- {
418
- HUF_buildCTable_wksp_tables workspace;
419
- return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, &workspace, sizeof(workspace));
420
- }
421
-
422
525
  size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue)
423
526
  {
424
527
  size_t nbBits = 0;
@@ -695,7 +798,7 @@ HUF_compress_internal (void* dst, size_t dstSize,
695
798
  CHECK_F(maxBits);
696
799
  huffLog = (U32)maxBits;
697
800
  /* Zero unused symbols in CTable, so we can check it for validity */
698
- memset(table->CTable + (maxSymbolValue + 1), 0,
801
+ ZSTD_memset(table->CTable + (maxSymbolValue + 1), 0,
699
802
  sizeof(table->CTable) - ((maxSymbolValue + 1) * sizeof(HUF_CElt)));
700
803
  }
701
804
 
@@ -716,7 +819,7 @@ HUF_compress_internal (void* dst, size_t dstSize,
716
819
  op += hSize;
717
820
  if (repeat) { *repeat = HUF_repeat_none; }
718
821
  if (oldHufTable)
719
- memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */
822
+ ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */
720
823
  }
721
824
  return HUF_compressCTable_internal(ostart, op, oend,
722
825
  src, srcSize,
@@ -747,14 +850,6 @@ size_t HUF_compress1X_repeat (void* dst, size_t dstSize,
747
850
  repeat, preferRepeat, bmi2);
748
851
  }
749
852
 
750
- size_t HUF_compress1X (void* dst, size_t dstSize,
751
- const void* src, size_t srcSize,
752
- unsigned maxSymbolValue, unsigned huffLog)
753
- {
754
- unsigned workSpace[HUF_WORKSPACE_SIZE_U32];
755
- return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
756
- }
757
-
758
853
  /* HUF_compress4X_repeat():
759
854
  * compress input using 4 streams.
760
855
  * provide workspace to generate compression tables */
@@ -784,6 +879,25 @@ size_t HUF_compress4X_repeat (void* dst, size_t dstSize,
784
879
  hufTable, repeat, preferRepeat, bmi2);
785
880
  }
786
881
 
882
+ #ifndef ZSTD_NO_UNUSED_FUNCTIONS
883
+ /** HUF_buildCTable() :
884
+ * @return : maxNbBits
885
+ * Note : count is used before tree is written, so they can safely overlap
886
+ */
887
+ size_t HUF_buildCTable (HUF_CElt* tree, const unsigned* count, unsigned maxSymbolValue, unsigned maxNbBits)
888
+ {
889
+ HUF_buildCTable_wksp_tables workspace;
890
+ return HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, &workspace, sizeof(workspace));
891
+ }
892
+
893
+ size_t HUF_compress1X (void* dst, size_t dstSize,
894
+ const void* src, size_t srcSize,
895
+ unsigned maxSymbolValue, unsigned huffLog)
896
+ {
897
+ unsigned workSpace[HUF_WORKSPACE_SIZE_U32];
898
+ return HUF_compress1X_wksp(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, workSpace, sizeof(workSpace));
899
+ }
900
+
787
901
  size_t HUF_compress2 (void* dst, size_t dstSize,
788
902
  const void* src, size_t srcSize,
789
903
  unsigned maxSymbolValue, unsigned huffLog)
@@ -796,3 +910,4 @@ size_t HUF_compress (void* dst, size_t maxDstSize, const void* src, size_t srcSi
796
910
  {
797
911
  return HUF_compress2(dst, maxDstSize, src, srcSize, 255, HUF_TABLELOG_DEFAULT);
798
912
  }
913
+ #endif