zstd-ruby 1.4.4.0 → 1.5.5.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (115) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +2 -0
  3. data/README.md +78 -5
  4. data/Rakefile +8 -2
  5. data/ext/zstdruby/common.h +15 -0
  6. data/ext/zstdruby/extconf.rb +3 -2
  7. data/ext/zstdruby/libzstd/common/allocations.h +55 -0
  8. data/ext/zstdruby/libzstd/common/bits.h +200 -0
  9. data/ext/zstdruby/libzstd/common/bitstream.h +74 -97
  10. data/ext/zstdruby/libzstd/common/compiler.h +219 -20
  11. data/ext/zstdruby/libzstd/common/cpu.h +1 -3
  12. data/ext/zstdruby/libzstd/common/debug.c +11 -31
  13. data/ext/zstdruby/libzstd/common/debug.h +22 -49
  14. data/ext/zstdruby/libzstd/common/entropy_common.c +184 -80
  15. data/ext/zstdruby/libzstd/common/error_private.c +11 -2
  16. data/ext/zstdruby/libzstd/common/error_private.h +87 -4
  17. data/ext/zstdruby/libzstd/common/fse.h +47 -116
  18. data/ext/zstdruby/libzstd/common/fse_decompress.c +127 -127
  19. data/ext/zstdruby/libzstd/common/huf.h +112 -197
  20. data/ext/zstdruby/libzstd/common/mem.h +124 -142
  21. data/ext/zstdruby/libzstd/common/pool.c +54 -27
  22. data/ext/zstdruby/libzstd/common/pool.h +11 -5
  23. data/ext/zstdruby/libzstd/common/portability_macros.h +156 -0
  24. data/ext/zstdruby/libzstd/common/threading.c +78 -22
  25. data/ext/zstdruby/libzstd/common/threading.h +9 -13
  26. data/ext/zstdruby/libzstd/common/xxhash.c +15 -873
  27. data/ext/zstdruby/libzstd/common/xxhash.h +5572 -191
  28. data/ext/zstdruby/libzstd/common/zstd_common.c +2 -37
  29. data/ext/zstdruby/libzstd/common/zstd_deps.h +111 -0
  30. data/ext/zstdruby/libzstd/common/zstd_internal.h +186 -144
  31. data/ext/zstdruby/libzstd/common/zstd_trace.h +163 -0
  32. data/ext/zstdruby/libzstd/compress/clevels.h +134 -0
  33. data/ext/zstdruby/libzstd/compress/fse_compress.c +99 -196
  34. data/ext/zstdruby/libzstd/compress/hist.c +41 -63
  35. data/ext/zstdruby/libzstd/compress/hist.h +13 -33
  36. data/ext/zstdruby/libzstd/compress/huf_compress.c +968 -331
  37. data/ext/zstdruby/libzstd/compress/zstd_compress.c +4120 -1191
  38. data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +688 -159
  39. data/ext/zstdruby/libzstd/compress/zstd_compress_literals.c +121 -40
  40. data/ext/zstdruby/libzstd/compress/zstd_compress_literals.h +16 -6
  41. data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.c +62 -35
  42. data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.h +10 -3
  43. data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +577 -0
  44. data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.h +32 -0
  45. data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +322 -115
  46. data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +394 -154
  47. data/ext/zstdruby/libzstd/compress/zstd_double_fast.h +4 -3
  48. data/ext/zstdruby/libzstd/compress/zstd_fast.c +729 -253
  49. data/ext/zstdruby/libzstd/compress/zstd_fast.h +4 -3
  50. data/ext/zstdruby/libzstd/compress/zstd_lazy.c +1289 -247
  51. data/ext/zstdruby/libzstd/compress/zstd_lazy.h +61 -1
  52. data/ext/zstdruby/libzstd/compress/zstd_ldm.c +339 -212
  53. data/ext/zstdruby/libzstd/compress/zstd_ldm.h +15 -3
  54. data/ext/zstdruby/libzstd/compress/zstd_ldm_geartab.h +106 -0
  55. data/ext/zstdruby/libzstd/compress/zstd_opt.c +508 -282
  56. data/ext/zstdruby/libzstd/compress/zstd_opt.h +1 -1
  57. data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +217 -466
  58. data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +35 -114
  59. data/ext/zstdruby/libzstd/decompress/huf_decompress.c +1220 -572
  60. data/ext/zstdruby/libzstd/decompress/huf_decompress_amd64.S +576 -0
  61. data/ext/zstdruby/libzstd/decompress/zstd_ddict.c +23 -19
  62. data/ext/zstdruby/libzstd/decompress/zstd_ddict.h +3 -3
  63. data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +859 -273
  64. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +1244 -375
  65. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +21 -7
  66. data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +74 -11
  67. data/ext/zstdruby/libzstd/dictBuilder/cover.c +75 -54
  68. data/ext/zstdruby/libzstd/dictBuilder/cover.h +20 -9
  69. data/ext/zstdruby/libzstd/dictBuilder/divsufsort.c +1 -1
  70. data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +55 -36
  71. data/ext/zstdruby/libzstd/dictBuilder/zdict.c +126 -110
  72. data/ext/zstdruby/libzstd/{dictBuilder/zdict.h → zdict.h} +248 -56
  73. data/ext/zstdruby/libzstd/zstd.h +1277 -306
  74. data/ext/zstdruby/libzstd/{common/zstd_errors.h → zstd_errors.h} +29 -8
  75. data/ext/zstdruby/main.c +20 -0
  76. data/ext/zstdruby/skippable_frame.c +63 -0
  77. data/ext/zstdruby/streaming_compress.c +177 -0
  78. data/ext/zstdruby/streaming_compress.h +5 -0
  79. data/ext/zstdruby/streaming_decompress.c +123 -0
  80. data/ext/zstdruby/zstdruby.c +114 -32
  81. data/lib/zstd-ruby/version.rb +1 -1
  82. data/lib/zstd-ruby.rb +0 -1
  83. data/zstd-ruby.gemspec +1 -1
  84. metadata +24 -39
  85. data/.travis.yml +0 -14
  86. data/ext/zstdruby/libzstd/.gitignore +0 -3
  87. data/ext/zstdruby/libzstd/BUCK +0 -234
  88. data/ext/zstdruby/libzstd/Makefile +0 -289
  89. data/ext/zstdruby/libzstd/README.md +0 -159
  90. data/ext/zstdruby/libzstd/deprecated/zbuff.h +0 -214
  91. data/ext/zstdruby/libzstd/deprecated/zbuff_common.c +0 -26
  92. data/ext/zstdruby/libzstd/deprecated/zbuff_compress.c +0 -147
  93. data/ext/zstdruby/libzstd/deprecated/zbuff_decompress.c +0 -75
  94. data/ext/zstdruby/libzstd/dll/example/Makefile +0 -47
  95. data/ext/zstdruby/libzstd/dll/example/README.md +0 -69
  96. data/ext/zstdruby/libzstd/dll/example/build_package.bat +0 -20
  97. data/ext/zstdruby/libzstd/dll/example/fullbench-dll.sln +0 -25
  98. data/ext/zstdruby/libzstd/dll/example/fullbench-dll.vcxproj +0 -181
  99. data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +0 -415
  100. data/ext/zstdruby/libzstd/legacy/zstd_v01.c +0 -2152
  101. data/ext/zstdruby/libzstd/legacy/zstd_v01.h +0 -94
  102. data/ext/zstdruby/libzstd/legacy/zstd_v02.c +0 -3514
  103. data/ext/zstdruby/libzstd/legacy/zstd_v02.h +0 -93
  104. data/ext/zstdruby/libzstd/legacy/zstd_v03.c +0 -3156
  105. data/ext/zstdruby/libzstd/legacy/zstd_v03.h +0 -93
  106. data/ext/zstdruby/libzstd/legacy/zstd_v04.c +0 -3641
  107. data/ext/zstdruby/libzstd/legacy/zstd_v04.h +0 -142
  108. data/ext/zstdruby/libzstd/legacy/zstd_v05.c +0 -4046
  109. data/ext/zstdruby/libzstd/legacy/zstd_v05.h +0 -162
  110. data/ext/zstdruby/libzstd/legacy/zstd_v06.c +0 -4150
  111. data/ext/zstdruby/libzstd/legacy/zstd_v06.h +0 -172
  112. data/ext/zstdruby/libzstd/legacy/zstd_v07.c +0 -4533
  113. data/ext/zstdruby/libzstd/legacy/zstd_v07.h +0 -187
  114. data/ext/zstdruby/libzstd/libzstd.pc.in +0 -15
  115. data/ext/zstdruby/zstdruby.h +0 -6
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -13,12 +13,37 @@
13
13
  ***************************************/
14
14
  #include "zstd_compress_literals.h"
15
15
 
16
+
17
+ /* **************************************************************
18
+ * Debug Traces
19
+ ****************************************************************/
20
+ #if DEBUGLEVEL >= 2
21
+
22
+ static size_t showHexa(const void* src, size_t srcSize)
23
+ {
24
+ const BYTE* const ip = (const BYTE*)src;
25
+ size_t u;
26
+ for (u=0; u<srcSize; u++) {
27
+ RAWLOG(5, " %02X", ip[u]); (void)ip;
28
+ }
29
+ RAWLOG(5, " \n");
30
+ return srcSize;
31
+ }
32
+
33
+ #endif
34
+
35
+
36
+ /* **************************************************************
37
+ * Literals compression - special cases
38
+ ****************************************************************/
16
39
  size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
17
40
  {
18
- BYTE* const ostart = (BYTE* const)dst;
41
+ BYTE* const ostart = (BYTE*)dst;
19
42
  U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
20
43
 
21
- RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall);
44
+ DEBUGLOG(5, "ZSTD_noCompressLiterals: srcSize=%zu, dstCapacity=%zu", srcSize, dstCapacity);
45
+
46
+ RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, "");
22
47
 
23
48
  switch(flSize)
24
49
  {
@@ -35,16 +60,31 @@ size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src,
35
60
  assert(0);
36
61
  }
37
62
 
38
- memcpy(ostart + flSize, src, srcSize);
63
+ ZSTD_memcpy(ostart + flSize, src, srcSize);
64
+ DEBUGLOG(5, "Raw (uncompressed) literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
39
65
  return srcSize + flSize;
40
66
  }
41
67
 
68
+ static int allBytesIdentical(const void* src, size_t srcSize)
69
+ {
70
+ assert(srcSize >= 1);
71
+ assert(src != NULL);
72
+ { const BYTE b = ((const BYTE*)src)[0];
73
+ size_t p;
74
+ for (p=1; p<srcSize; p++) {
75
+ if (((const BYTE*)src)[p] != b) return 0;
76
+ }
77
+ return 1;
78
+ }
79
+ }
80
+
42
81
  size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
43
82
  {
44
- BYTE* const ostart = (BYTE* const)dst;
83
+ BYTE* const ostart = (BYTE*)dst;
45
84
  U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
46
85
 
47
- (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */
86
+ assert(dstCapacity >= 4); (void)dstCapacity;
87
+ assert(allBytesIdentical(src, srcSize));
48
88
 
49
89
  switch(flSize)
50
90
  {
@@ -62,66 +102,103 @@ size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void*
62
102
  }
63
103
 
64
104
  ostart[flSize] = *(const BYTE*)src;
105
+ DEBUGLOG(5, "RLE : Repeated Literal (%02X: %u times) -> %u bytes encoded", ((const BYTE*)src)[0], (U32)srcSize, (U32)flSize + 1);
65
106
  return flSize+1;
66
107
  }
67
108
 
68
- size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
69
- ZSTD_hufCTables_t* nextHuf,
70
- ZSTD_strategy strategy, int disableLiteralCompression,
71
- void* dst, size_t dstCapacity,
72
- const void* src, size_t srcSize,
73
- void* entropyWorkspace, size_t entropyWorkspaceSize,
74
- const int bmi2)
109
+ /* ZSTD_minLiteralsToCompress() :
110
+ * returns minimal amount of literals
111
+ * for literal compression to even be attempted.
112
+ * Minimum is made tighter as compression strategy increases.
113
+ */
114
+ static size_t
115
+ ZSTD_minLiteralsToCompress(ZSTD_strategy strategy, HUF_repeat huf_repeat)
116
+ {
117
+ assert((int)strategy >= 0);
118
+ assert((int)strategy <= 9);
119
+ /* btultra2 : min 8 bytes;
120
+ * then 2x larger for each successive compression strategy
121
+ * max threshold 64 bytes */
122
+ { int const shift = MIN(9-(int)strategy, 3);
123
+ size_t const mintc = (huf_repeat == HUF_repeat_valid) ? 6 : (size_t)8 << shift;
124
+ DEBUGLOG(7, "minLiteralsToCompress = %zu", mintc);
125
+ return mintc;
126
+ }
127
+ }
128
+
129
+ size_t ZSTD_compressLiterals (
130
+ void* dst, size_t dstCapacity,
131
+ const void* src, size_t srcSize,
132
+ void* entropyWorkspace, size_t entropyWorkspaceSize,
133
+ const ZSTD_hufCTables_t* prevHuf,
134
+ ZSTD_hufCTables_t* nextHuf,
135
+ ZSTD_strategy strategy,
136
+ int disableLiteralCompression,
137
+ int suspectUncompressible,
138
+ int bmi2)
75
139
  {
76
- size_t const minGain = ZSTD_minGain(srcSize, strategy);
77
140
  size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
78
141
  BYTE* const ostart = (BYTE*)dst;
79
142
  U32 singleStream = srcSize < 256;
80
143
  symbolEncodingType_e hType = set_compressed;
81
144
  size_t cLitSize;
82
145
 
83
- DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i)",
84
- disableLiteralCompression);
146
+ DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i, srcSize=%u, dstCapacity=%zu)",
147
+ disableLiteralCompression, (U32)srcSize, dstCapacity);
148
+
149
+ DEBUGLOG(6, "Completed literals listing (%zu bytes)", showHexa(src, srcSize));
85
150
 
86
151
  /* Prepare nextEntropy assuming reusing the existing table */
87
- memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
152
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
88
153
 
89
154
  if (disableLiteralCompression)
90
155
  return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
91
156
 
92
- /* small ? don't even attempt compression (speed opt) */
93
- # define COMPRESS_LITERALS_SIZE_MIN 63
94
- { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
95
- if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
96
- }
157
+ /* if too small, don't even attempt compression (speed opt) */
158
+ if (srcSize < ZSTD_minLiteralsToCompress(strategy, prevHuf->repeatMode))
159
+ return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
97
160
 
98
161
  RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
99
162
  { HUF_repeat repeat = prevHuf->repeatMode;
100
- int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
163
+ int const flags = 0
164
+ | (bmi2 ? HUF_flags_bmi2 : 0)
165
+ | (strategy < ZSTD_lazy && srcSize <= 1024 ? HUF_flags_preferRepeat : 0)
166
+ | (strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD ? HUF_flags_optimalDepth : 0)
167
+ | (suspectUncompressible ? HUF_flags_suspectUncompressible : 0);
168
+
169
+ typedef size_t (*huf_compress_f)(void*, size_t, const void*, size_t, unsigned, unsigned, void*, size_t, HUF_CElt*, HUF_repeat*, int);
170
+ huf_compress_f huf_compress;
101
171
  if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
102
- cLitSize = singleStream ?
103
- HUF_compress1X_repeat(
104
- ostart+lhSize, dstCapacity-lhSize, src, srcSize,
105
- 255, 11, entropyWorkspace, entropyWorkspaceSize,
106
- (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2) :
107
- HUF_compress4X_repeat(
108
- ostart+lhSize, dstCapacity-lhSize, src, srcSize,
109
- 255, 11, entropyWorkspace, entropyWorkspaceSize,
110
- (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2);
172
+ huf_compress = singleStream ? HUF_compress1X_repeat : HUF_compress4X_repeat;
173
+ cLitSize = huf_compress(ostart+lhSize, dstCapacity-lhSize,
174
+ src, srcSize,
175
+ HUF_SYMBOLVALUE_MAX, LitHufLog,
176
+ entropyWorkspace, entropyWorkspaceSize,
177
+ (HUF_CElt*)nextHuf->CTable,
178
+ &repeat, flags);
179
+ DEBUGLOG(5, "%zu literals compressed into %zu bytes (before header)", srcSize, cLitSize);
111
180
  if (repeat != HUF_repeat_none) {
112
181
  /* reused the existing table */
182
+ DEBUGLOG(5, "reusing statistics from previous huffman block");
113
183
  hType = set_repeat;
114
184
  }
115
185
  }
116
186
 
117
- if ((cLitSize==0) | (cLitSize >= srcSize - minGain) | ERR_isError(cLitSize)) {
118
- memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
119
- return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
120
- }
187
+ { size_t const minGain = ZSTD_minGain(srcSize, strategy);
188
+ if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) {
189
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
190
+ return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
191
+ } }
121
192
  if (cLitSize==1) {
122
- memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
123
- return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
124
- }
193
+ /* A return value of 1 signals that the alphabet consists of a single symbol.
194
+ * However, in some rare circumstances, it could be the compressed size (a single byte).
195
+ * For that outcome to have a chance to happen, it's necessary that `srcSize < 8`.
196
+ * (it's also necessary to not generate statistics).
197
+ * Therefore, in such a case, actively check that all bytes are identical. */
198
+ if ((srcSize >= 8) || allBytesIdentical(src, srcSize)) {
199
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
200
+ return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
201
+ } }
125
202
 
126
203
  if (hType == set_compressed) {
127
204
  /* using a newly constructed table */
@@ -132,16 +209,19 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
132
209
  switch(lhSize)
133
210
  {
134
211
  case 3: /* 2 - 2 - 10 - 10 */
135
- { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
212
+ if (!singleStream) assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
213
+ { U32 const lhc = hType + ((U32)(!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
136
214
  MEM_writeLE24(ostart, lhc);
137
215
  break;
138
216
  }
139
217
  case 4: /* 2 - 2 - 14 - 14 */
218
+ assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
140
219
  { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
141
220
  MEM_writeLE32(ostart, lhc);
142
221
  break;
143
222
  }
144
223
  case 5: /* 2 - 2 - 18 - 18 */
224
+ assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
145
225
  { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
146
226
  MEM_writeLE32(ostart, lhc);
147
227
  ostart[4] = (BYTE)(cLitSize >> 10);
@@ -150,5 +230,6 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
150
230
  default: /* not possible : lhSize is {3,4,5} */
151
231
  assert(0);
152
232
  }
233
+ DEBUGLOG(5, "Compressed literals: %u -> %u", (U32)srcSize, (U32)(lhSize+cLitSize));
153
234
  return lhSize+cLitSize;
154
235
  }
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -16,14 +16,24 @@
16
16
 
17
17
  size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
18
18
 
19
+ /* ZSTD_compressRleLiteralsBlock() :
20
+ * Conditions :
21
+ * - All bytes in @src are identical
22
+ * - dstCapacity >= 4 */
19
23
  size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
20
24
 
21
- size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
22
- ZSTD_hufCTables_t* nextHuf,
23
- ZSTD_strategy strategy, int disableLiteralCompression,
24
- void* dst, size_t dstCapacity,
25
+ /* ZSTD_compressLiterals():
26
+ * @entropyWorkspace: must be aligned on 4-bytes boundaries
27
+ * @entropyWorkspaceSize : must be >= HUF_WORKSPACE_SIZE
28
+ * @suspectUncompressible: sampling checks, to potentially skip huffman coding
29
+ */
30
+ size_t ZSTD_compressLiterals (void* dst, size_t dstCapacity,
25
31
  const void* src, size_t srcSize,
26
32
  void* entropyWorkspace, size_t entropyWorkspaceSize,
27
- const int bmi2);
33
+ const ZSTD_hufCTables_t* prevHuf,
34
+ ZSTD_hufCTables_t* nextHuf,
35
+ ZSTD_strategy strategy, int disableLiteralCompression,
36
+ int suspectUncompressible,
37
+ int bmi2);
28
38
 
29
39
  #endif /* ZSTD_COMPRESS_LITERALS_H */
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -50,6 +50,19 @@ static unsigned ZSTD_getFSEMaxSymbolValue(FSE_CTable const* ctable) {
50
50
  return maxSymbolValue;
51
51
  }
52
52
 
53
+ /**
54
+ * Returns true if we should use ncount=-1 else we should
55
+ * use ncount=1 for low probability symbols instead.
56
+ */
57
+ static unsigned ZSTD_useLowProbCount(size_t const nbSeq)
58
+ {
59
+ /* Heuristic: This should cover most blocks <= 16K and
60
+ * start to fade out after 16K to about 32K depending on
61
+ * compressibility.
62
+ */
63
+ return nbSeq >= 2048;
64
+ }
65
+
53
66
  /**
54
67
  * Returns the cost in bytes of encoding the normalized count header.
55
68
  * Returns an error if any of the helper functions return an error.
@@ -60,7 +73,7 @@ static size_t ZSTD_NCountCost(unsigned const* count, unsigned const max,
60
73
  BYTE wksp[FSE_NCOUNTBOUND];
61
74
  S16 norm[MaxSeq + 1];
62
75
  const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
63
- FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max));
76
+ FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq, max, ZSTD_useLowProbCount(nbSeq)), "");
64
77
  return FSE_writeNCount(wksp, sizeof(wksp), norm, max, tableLog);
65
78
  }
66
79
 
@@ -72,6 +85,8 @@ static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t
72
85
  {
73
86
  unsigned cost = 0;
74
87
  unsigned s;
88
+
89
+ assert(total > 0);
75
90
  for (s = 0; s <= max; ++s) {
76
91
  unsigned norm = (unsigned)((256 * count[s]) / total);
77
92
  if (count[s] != 0 && norm == 0)
@@ -86,7 +101,7 @@ static size_t ZSTD_entropyCost(unsigned const* count, unsigned const max, size_t
86
101
  * Returns the cost in bits of encoding the distribution in count using ctable.
87
102
  * Returns an error if ctable cannot represent all the symbols in count.
88
103
  */
89
- static size_t ZSTD_fseBitCost(
104
+ size_t ZSTD_fseBitCost(
90
105
  FSE_CTable const* ctable,
91
106
  unsigned const* count,
92
107
  unsigned const max)
@@ -96,18 +111,22 @@ static size_t ZSTD_fseBitCost(
96
111
  unsigned s;
97
112
  FSE_CState_t cstate;
98
113
  FSE_initCState(&cstate, ctable);
99
- RETURN_ERROR_IF(ZSTD_getFSEMaxSymbolValue(ctable) < max, GENERIC,
100
- "Repeat FSE_CTable has maxSymbolValue %u < %u",
114
+ if (ZSTD_getFSEMaxSymbolValue(ctable) < max) {
115
+ DEBUGLOG(5, "Repeat FSE_CTable has maxSymbolValue %u < %u",
101
116
  ZSTD_getFSEMaxSymbolValue(ctable), max);
117
+ return ERROR(GENERIC);
118
+ }
102
119
  for (s = 0; s <= max; ++s) {
103
120
  unsigned const tableLog = cstate.stateLog;
104
121
  unsigned const badCost = (tableLog + 1) << kAccuracyLog;
105
122
  unsigned const bitCost = FSE_bitCost(cstate.symbolTT, tableLog, s, kAccuracyLog);
106
123
  if (count[s] == 0)
107
124
  continue;
108
- RETURN_ERROR_IF(bitCost >= badCost, GENERIC,
109
- "Repeat FSE_CTable has Prob[%u] == 0", s);
110
- cost += count[s] * bitCost;
125
+ if (bitCost >= badCost) {
126
+ DEBUGLOG(5, "Repeat FSE_CTable has Prob[%u] == 0", s);
127
+ return ERROR(GENERIC);
128
+ }
129
+ cost += (size_t)count[s] * bitCost;
111
130
  }
112
131
  return cost >> kAccuracyLog;
113
132
  }
@@ -117,15 +136,15 @@ static size_t ZSTD_fseBitCost(
117
136
  * table described by norm. The max symbol support by norm is assumed >= max.
118
137
  * norm must be valid for every symbol with non-zero probability in count.
119
138
  */
120
- static size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
121
- unsigned const* count, unsigned const max)
139
+ size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
140
+ unsigned const* count, unsigned const max)
122
141
  {
123
142
  unsigned const shift = 8 - accuracyLog;
124
143
  size_t cost = 0;
125
144
  unsigned s;
126
145
  assert(accuracyLog <= 8);
127
146
  for (s = 0; s <= max; ++s) {
128
- unsigned const normAcc = norm[s] != -1 ? norm[s] : 1;
147
+ unsigned const normAcc = (norm[s] != -1) ? (unsigned)norm[s] : 1;
129
148
  unsigned const norm256 = normAcc << shift;
130
149
  assert(norm256 > 0);
131
150
  assert(norm256 < 256);
@@ -147,7 +166,7 @@ ZSTD_selectEncodingType(
147
166
  if (mostFrequent == nbSeq) {
148
167
  *repeatMode = FSE_repeat_none;
149
168
  if (isDefaultAllowed && nbSeq <= 2) {
150
- /* Prefer set_basic over set_rle when there are 2 or less symbols,
169
+ /* Prefer set_basic over set_rle when there are 2 or fewer symbols,
151
170
  * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
152
171
  * If basic encoding isn't possible, always choose RLE.
153
172
  */
@@ -215,6 +234,11 @@ ZSTD_selectEncodingType(
215
234
  return set_compressed;
216
235
  }
217
236
 
237
+ typedef struct {
238
+ S16 norm[MaxSeq + 1];
239
+ U32 wksp[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(MaxSeq, MaxFSELog)];
240
+ } ZSTD_BuildCTableWksp;
241
+
218
242
  size_t
219
243
  ZSTD_buildCTable(void* dst, size_t dstCapacity,
220
244
  FSE_CTable* nextCTable, U32 FSELog, symbolEncodingType_e type,
@@ -230,18 +254,18 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
230
254
 
231
255
  switch (type) {
232
256
  case set_rle:
233
- FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max));
234
- RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall);
257
+ FORWARD_IF_ERROR(FSE_buildCTable_rle(nextCTable, (BYTE)max), "");
258
+ RETURN_ERROR_IF(dstCapacity==0, dstSize_tooSmall, "not enough space");
235
259
  *op = codeTable[0];
236
260
  return 1;
237
261
  case set_repeat:
238
- memcpy(nextCTable, prevCTable, prevCTableSize);
262
+ ZSTD_memcpy(nextCTable, prevCTable, prevCTableSize);
239
263
  return 0;
240
264
  case set_basic:
241
- FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize)); /* note : could be pre-calculated */
265
+ FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, defaultNorm, defaultMax, defaultNormLog, entropyWorkspace, entropyWorkspaceSize), ""); /* note : could be pre-calculated */
242
266
  return 0;
243
267
  case set_compressed: {
244
- S16 norm[MaxSeq + 1];
268
+ ZSTD_BuildCTableWksp* wksp = (ZSTD_BuildCTableWksp*)entropyWorkspace;
245
269
  size_t nbSeq_1 = nbSeq;
246
270
  const U32 tableLog = FSE_optimalTableLog(FSELog, nbSeq, max);
247
271
  if (count[codeTable[nbSeq-1]] > 1) {
@@ -249,14 +273,17 @@ ZSTD_buildCTable(void* dst, size_t dstCapacity,
249
273
  nbSeq_1--;
250
274
  }
251
275
  assert(nbSeq_1 > 1);
252
- FORWARD_IF_ERROR(FSE_normalizeCount(norm, tableLog, count, nbSeq_1, max));
253
- { size_t const NCountSize = FSE_writeNCount(op, oend - op, norm, max, tableLog); /* overflow protected */
254
- FORWARD_IF_ERROR(NCountSize);
255
- FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, norm, max, tableLog, entropyWorkspace, entropyWorkspaceSize));
276
+ assert(entropyWorkspaceSize >= sizeof(ZSTD_BuildCTableWksp));
277
+ (void)entropyWorkspaceSize;
278
+ FORWARD_IF_ERROR(FSE_normalizeCount(wksp->norm, tableLog, count, nbSeq_1, max, ZSTD_useLowProbCount(nbSeq_1)), "FSE_normalizeCount failed");
279
+ assert(oend >= op);
280
+ { size_t const NCountSize = FSE_writeNCount(op, (size_t)(oend - op), wksp->norm, max, tableLog); /* overflow protected */
281
+ FORWARD_IF_ERROR(NCountSize, "FSE_writeNCount failed");
282
+ FORWARD_IF_ERROR(FSE_buildCTable_wksp(nextCTable, wksp->norm, max, tableLog, wksp->wksp, sizeof(wksp->wksp)), "FSE_buildCTable_wksp failed");
256
283
  return NCountSize;
257
284
  }
258
285
  }
259
- default: assert(0); RETURN_ERROR(GENERIC);
286
+ default: assert(0); RETURN_ERROR(GENERIC, "impossible to reach");
260
287
  }
261
288
  }
262
289
 
@@ -286,19 +313,19 @@ ZSTD_encodeSequences_body(
286
313
  FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]);
287
314
  BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
288
315
  if (MEM_32bits()) BIT_flushBits(&blockStream);
289
- BIT_addBits(&blockStream, sequences[nbSeq-1].matchLength, ML_bits[mlCodeTable[nbSeq-1]]);
316
+ BIT_addBits(&blockStream, sequences[nbSeq-1].mlBase, ML_bits[mlCodeTable[nbSeq-1]]);
290
317
  if (MEM_32bits()) BIT_flushBits(&blockStream);
291
318
  if (longOffsets) {
292
319
  U32 const ofBits = ofCodeTable[nbSeq-1];
293
- int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
320
+ unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
294
321
  if (extraBits) {
295
- BIT_addBits(&blockStream, sequences[nbSeq-1].offset, extraBits);
322
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, extraBits);
296
323
  BIT_flushBits(&blockStream);
297
324
  }
298
- BIT_addBits(&blockStream, sequences[nbSeq-1].offset >> extraBits,
325
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offBase >> extraBits,
299
326
  ofBits - extraBits);
300
327
  } else {
301
- BIT_addBits(&blockStream, sequences[nbSeq-1].offset, ofCodeTable[nbSeq-1]);
328
+ BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, ofCodeTable[nbSeq-1]);
302
329
  }
303
330
  BIT_flushBits(&blockStream);
304
331
 
@@ -312,8 +339,8 @@ ZSTD_encodeSequences_body(
312
339
  U32 const mlBits = ML_bits[mlCode];
313
340
  DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
314
341
  (unsigned)sequences[n].litLength,
315
- (unsigned)sequences[n].matchLength + MINMATCH,
316
- (unsigned)sequences[n].offset);
342
+ (unsigned)sequences[n].mlBase + MINMATCH,
343
+ (unsigned)sequences[n].offBase);
317
344
  /* 32b*/ /* 64b*/
318
345
  /* (7)*/ /* (7)*/
319
346
  FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
@@ -324,18 +351,18 @@ ZSTD_encodeSequences_body(
324
351
  BIT_flushBits(&blockStream); /* (7)*/
325
352
  BIT_addBits(&blockStream, sequences[n].litLength, llBits);
326
353
  if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
327
- BIT_addBits(&blockStream, sequences[n].matchLength, mlBits);
354
+ BIT_addBits(&blockStream, sequences[n].mlBase, mlBits);
328
355
  if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
329
356
  if (longOffsets) {
330
- int const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
357
+ unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
331
358
  if (extraBits) {
332
- BIT_addBits(&blockStream, sequences[n].offset, extraBits);
359
+ BIT_addBits(&blockStream, sequences[n].offBase, extraBits);
333
360
  BIT_flushBits(&blockStream); /* (7)*/
334
361
  }
335
- BIT_addBits(&blockStream, sequences[n].offset >> extraBits,
362
+ BIT_addBits(&blockStream, sequences[n].offBase >> extraBits,
336
363
  ofBits - extraBits); /* 31 */
337
364
  } else {
338
- BIT_addBits(&blockStream, sequences[n].offset, ofBits); /* 31 */
365
+ BIT_addBits(&blockStream, sequences[n].offBase, ofBits); /* 31 */
339
366
  }
340
367
  BIT_flushBits(&blockStream); /* (7)*/
341
368
  DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
@@ -372,7 +399,7 @@ ZSTD_encodeSequences_default(
372
399
 
373
400
  #if DYNAMIC_BMI2
374
401
 
375
- static TARGET_ATTRIBUTE("bmi2") size_t
402
+ static BMI2_TARGET_ATTRIBUTE size_t
376
403
  ZSTD_encodeSequences_bmi2(
377
404
  void* dst, size_t dstCapacity,
378
405
  FSE_CTable const* CTable_MatchLength, BYTE const* mlCodeTable,
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -11,8 +11,8 @@
11
11
  #ifndef ZSTD_COMPRESS_SEQUENCES_H
12
12
  #define ZSTD_COMPRESS_SEQUENCES_H
13
13
 
14
- #include "fse.h" /* FSE_repeat, FSE_CTable */
15
- #include "zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */
14
+ #include "../common/fse.h" /* FSE_repeat, FSE_CTable */
15
+ #include "../common/zstd_internal.h" /* symbolEncodingType_e, ZSTD_strategy */
16
16
 
17
17
  typedef enum {
18
18
  ZSTD_defaultDisallowed = 0,
@@ -44,4 +44,11 @@ size_t ZSTD_encodeSequences(
44
44
  FSE_CTable const* CTable_LitLength, BYTE const* llCodeTable,
45
45
  seqDef const* sequences, size_t nbSeq, int longOffsets, int bmi2);
46
46
 
47
+ size_t ZSTD_fseBitCost(
48
+ FSE_CTable const* ctable,
49
+ unsigned const* count,
50
+ unsigned const max);
51
+
52
+ size_t ZSTD_crossEntropyCost(short const* norm, unsigned accuracyLog,
53
+ unsigned const* count, unsigned const max);
47
54
  #endif /* ZSTD_COMPRESS_SEQUENCES_H */