zstd-ruby 1.5.2.2 → 1.5.4.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (75) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +15 -3
  3. data/ext/zstdruby/common.h +7 -0
  4. data/ext/zstdruby/libzstd/common/bits.h +175 -0
  5. data/ext/zstdruby/libzstd/common/bitstream.h +18 -59
  6. data/ext/zstdruby/libzstd/common/compiler.h +22 -3
  7. data/ext/zstdruby/libzstd/common/cpu.h +1 -1
  8. data/ext/zstdruby/libzstd/common/debug.c +1 -1
  9. data/ext/zstdruby/libzstd/common/debug.h +1 -1
  10. data/ext/zstdruby/libzstd/common/entropy_common.c +12 -40
  11. data/ext/zstdruby/libzstd/common/error_private.c +9 -2
  12. data/ext/zstdruby/libzstd/common/error_private.h +1 -1
  13. data/ext/zstdruby/libzstd/common/fse.h +5 -83
  14. data/ext/zstdruby/libzstd/common/fse_decompress.c +7 -99
  15. data/ext/zstdruby/libzstd/common/huf.h +65 -156
  16. data/ext/zstdruby/libzstd/common/mem.h +39 -46
  17. data/ext/zstdruby/libzstd/common/pool.c +26 -10
  18. data/ext/zstdruby/libzstd/common/pool.h +7 -1
  19. data/ext/zstdruby/libzstd/common/portability_macros.h +22 -3
  20. data/ext/zstdruby/libzstd/common/threading.c +68 -14
  21. data/ext/zstdruby/libzstd/common/threading.h +5 -10
  22. data/ext/zstdruby/libzstd/common/xxhash.c +2 -2
  23. data/ext/zstdruby/libzstd/common/xxhash.h +8 -8
  24. data/ext/zstdruby/libzstd/common/zstd_common.c +1 -1
  25. data/ext/zstdruby/libzstd/common/zstd_deps.h +1 -1
  26. data/ext/zstdruby/libzstd/common/zstd_internal.h +17 -113
  27. data/ext/zstdruby/libzstd/common/zstd_trace.h +3 -3
  28. data/ext/zstdruby/libzstd/compress/clevels.h +1 -1
  29. data/ext/zstdruby/libzstd/compress/fse_compress.c +7 -124
  30. data/ext/zstdruby/libzstd/compress/hist.c +1 -1
  31. data/ext/zstdruby/libzstd/compress/hist.h +1 -1
  32. data/ext/zstdruby/libzstd/compress/huf_compress.c +234 -169
  33. data/ext/zstdruby/libzstd/compress/zstd_compress.c +1055 -455
  34. data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +165 -145
  35. data/ext/zstdruby/libzstd/compress/zstd_compress_literals.c +115 -39
  36. data/ext/zstdruby/libzstd/compress/zstd_compress_literals.h +16 -8
  37. data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.c +3 -3
  38. data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.h +1 -1
  39. data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +25 -21
  40. data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.h +1 -1
  41. data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +5 -3
  42. data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +95 -33
  43. data/ext/zstdruby/libzstd/compress/zstd_double_fast.h +3 -2
  44. data/ext/zstdruby/libzstd/compress/zstd_fast.c +433 -148
  45. data/ext/zstdruby/libzstd/compress/zstd_fast.h +3 -2
  46. data/ext/zstdruby/libzstd/compress/zstd_lazy.c +306 -283
  47. data/ext/zstdruby/libzstd/compress/zstd_lazy.h +4 -2
  48. data/ext/zstdruby/libzstd/compress/zstd_ldm.c +5 -5
  49. data/ext/zstdruby/libzstd/compress/zstd_ldm.h +1 -1
  50. data/ext/zstdruby/libzstd/compress/zstd_ldm_geartab.h +1 -1
  51. data/ext/zstdruby/libzstd/compress/zstd_opt.c +104 -80
  52. data/ext/zstdruby/libzstd/compress/zstd_opt.h +1 -1
  53. data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +12 -5
  54. data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +1 -1
  55. data/ext/zstdruby/libzstd/decompress/huf_decompress.c +434 -441
  56. data/ext/zstdruby/libzstd/decompress/huf_decompress_amd64.S +30 -39
  57. data/ext/zstdruby/libzstd/decompress/zstd_ddict.c +3 -4
  58. data/ext/zstdruby/libzstd/decompress/zstd_ddict.h +1 -1
  59. data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +164 -42
  60. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +186 -65
  61. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +1 -1
  62. data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +4 -2
  63. data/ext/zstdruby/libzstd/dictBuilder/cover.c +19 -15
  64. data/ext/zstdruby/libzstd/dictBuilder/cover.h +1 -1
  65. data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +2 -2
  66. data/ext/zstdruby/libzstd/dictBuilder/zdict.c +9 -87
  67. data/ext/zstdruby/libzstd/zdict.h +53 -31
  68. data/ext/zstdruby/libzstd/zstd.h +489 -90
  69. data/ext/zstdruby/libzstd/zstd_errors.h +27 -8
  70. data/ext/zstdruby/main.c +4 -0
  71. data/ext/zstdruby/streaming_compress.c +1 -7
  72. data/ext/zstdruby/zstdruby.c +110 -26
  73. data/lib/zstd-ruby/version.rb +1 -1
  74. data/lib/zstd-ruby.rb +0 -1
  75. metadata +7 -6
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -13,11 +13,36 @@
13
13
  ***************************************/
14
14
  #include "zstd_compress_literals.h"
15
15
 
16
+
17
+ /* **************************************************************
18
+ * Debug Traces
19
+ ****************************************************************/
20
+ #if DEBUGLEVEL >= 2
21
+
22
+ static size_t showHexa(const void* src, size_t srcSize)
23
+ {
24
+ const BYTE* const ip = (const BYTE*)src;
25
+ size_t u;
26
+ for (u=0; u<srcSize; u++) {
27
+ RAWLOG(5, " %02X", ip[u]); (void)ip;
28
+ }
29
+ RAWLOG(5, " \n");
30
+ return srcSize;
31
+ }
32
+
33
+ #endif
34
+
35
+
36
+ /* **************************************************************
37
+ * Literals compression - special cases
38
+ ****************************************************************/
16
39
  size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
17
40
  {
18
41
  BYTE* const ostart = (BYTE*)dst;
19
42
  U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
20
43
 
44
+ DEBUGLOG(5, "ZSTD_noCompressLiterals: srcSize=%zu, dstCapacity=%zu", srcSize, dstCapacity);
45
+
21
46
  RETURN_ERROR_IF(srcSize + flSize > dstCapacity, dstSize_tooSmall, "");
22
47
 
23
48
  switch(flSize)
@@ -36,16 +61,30 @@ size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src,
36
61
  }
37
62
 
38
63
  ZSTD_memcpy(ostart + flSize, src, srcSize);
39
- DEBUGLOG(5, "Raw literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
64
+ DEBUGLOG(5, "Raw (uncompressed) literals: %u -> %u", (U32)srcSize, (U32)(srcSize + flSize));
40
65
  return srcSize + flSize;
41
66
  }
42
67
 
68
+ static int allBytesIdentical(const void* src, size_t srcSize)
69
+ {
70
+ assert(srcSize >= 1);
71
+ assert(src != NULL);
72
+ { const BYTE b = ((const BYTE*)src)[0];
73
+ size_t p;
74
+ for (p=1; p<srcSize; p++) {
75
+ if (((const BYTE*)src)[p] != b) return 0;
76
+ }
77
+ return 1;
78
+ }
79
+ }
80
+
43
81
  size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize)
44
82
  {
45
83
  BYTE* const ostart = (BYTE*)dst;
46
84
  U32 const flSize = 1 + (srcSize>31) + (srcSize>4095);
47
85
 
48
- (void)dstCapacity; /* dstCapacity already guaranteed to be >=4, hence large enough */
86
+ assert(dstCapacity >= 4); (void)dstCapacity;
87
+ assert(allBytesIdentical(src, srcSize));
49
88
 
50
89
  switch(flSize)
51
90
  {
@@ -63,28 +102,51 @@ size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void*
63
102
  }
64
103
 
65
104
  ostart[flSize] = *(const BYTE*)src;
66
- DEBUGLOG(5, "RLE literals: %u -> %u", (U32)srcSize, (U32)flSize + 1);
105
+ DEBUGLOG(5, "RLE : Repeated Literal (%02X: %u times) -> %u bytes encoded", ((const BYTE*)src)[0], (U32)srcSize, (U32)flSize + 1);
67
106
  return flSize+1;
68
107
  }
69
108
 
70
- size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
71
- ZSTD_hufCTables_t* nextHuf,
72
- ZSTD_strategy strategy, int disableLiteralCompression,
73
- void* dst, size_t dstCapacity,
74
- const void* src, size_t srcSize,
75
- void* entropyWorkspace, size_t entropyWorkspaceSize,
76
- const int bmi2,
77
- unsigned suspectUncompressible)
109
+ /* ZSTD_minLiteralsToCompress() :
110
+ * returns minimal amount of literals
111
+ * for literal compression to even be attempted.
112
+ * Minimum is made tighter as compression strategy increases.
113
+ */
114
+ static size_t
115
+ ZSTD_minLiteralsToCompress(ZSTD_strategy strategy, HUF_repeat huf_repeat)
116
+ {
117
+ assert((int)strategy >= 0);
118
+ assert((int)strategy <= 9);
119
+ /* btultra2 : min 8 bytes;
120
+ * then 2x larger for each successive compression strategy
121
+ * max threshold 64 bytes */
122
+ { int const shift = MIN(9-(int)strategy, 3);
123
+ size_t const mintc = (huf_repeat == HUF_repeat_valid) ? 6 : (size_t)8 << shift;
124
+ DEBUGLOG(7, "minLiteralsToCompress = %zu", mintc);
125
+ return mintc;
126
+ }
127
+ }
128
+
129
+ size_t ZSTD_compressLiterals (
130
+ void* dst, size_t dstCapacity,
131
+ const void* src, size_t srcSize,
132
+ void* entropyWorkspace, size_t entropyWorkspaceSize,
133
+ const ZSTD_hufCTables_t* prevHuf,
134
+ ZSTD_hufCTables_t* nextHuf,
135
+ ZSTD_strategy strategy,
136
+ int disableLiteralCompression,
137
+ int suspectUncompressible,
138
+ int bmi2)
78
139
  {
79
- size_t const minGain = ZSTD_minGain(srcSize, strategy);
80
140
  size_t const lhSize = 3 + (srcSize >= 1 KB) + (srcSize >= 16 KB);
81
141
  BYTE* const ostart = (BYTE*)dst;
82
142
  U32 singleStream = srcSize < 256;
83
143
  symbolEncodingType_e hType = set_compressed;
84
144
  size_t cLitSize;
85
145
 
86
- DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i srcSize=%u)",
87
- disableLiteralCompression, (U32)srcSize);
146
+ DEBUGLOG(5,"ZSTD_compressLiterals (disableLiteralCompression=%i, srcSize=%u, dstCapacity=%zu)",
147
+ disableLiteralCompression, (U32)srcSize, dstCapacity);
148
+
149
+ DEBUGLOG(6, "Completed literals listing (%zu bytes)", showHexa(src, srcSize));
88
150
 
89
151
  /* Prepare nextEntropy assuming reusing the existing table */
90
152
  ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
@@ -92,40 +154,51 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
92
154
  if (disableLiteralCompression)
93
155
  return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
94
156
 
95
- /* small ? don't even attempt compression (speed opt) */
96
- # define COMPRESS_LITERALS_SIZE_MIN 63
97
- { size_t const minLitSize = (prevHuf->repeatMode == HUF_repeat_valid) ? 6 : COMPRESS_LITERALS_SIZE_MIN;
98
- if (srcSize <= minLitSize) return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
99
- }
157
+ /* if too small, don't even attempt compression (speed opt) */
158
+ if (srcSize < ZSTD_minLiteralsToCompress(strategy, prevHuf->repeatMode))
159
+ return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
100
160
 
101
161
  RETURN_ERROR_IF(dstCapacity < lhSize+1, dstSize_tooSmall, "not enough space for compression");
102
162
  { HUF_repeat repeat = prevHuf->repeatMode;
103
- int const preferRepeat = strategy < ZSTD_lazy ? srcSize <= 1024 : 0;
163
+ int const flags = 0
164
+ | (bmi2 ? HUF_flags_bmi2 : 0)
165
+ | (strategy < ZSTD_lazy && srcSize <= 1024 ? HUF_flags_preferRepeat : 0)
166
+ | (strategy >= HUF_OPTIMAL_DEPTH_THRESHOLD ? HUF_flags_optimalDepth : 0)
167
+ | (suspectUncompressible ? HUF_flags_suspectUncompressible : 0);
168
+
169
+ typedef size_t (*huf_compress_f)(void*, size_t, const void*, size_t, unsigned, unsigned, void*, size_t, HUF_CElt*, HUF_repeat*, int);
170
+ huf_compress_f huf_compress;
104
171
  if (repeat == HUF_repeat_valid && lhSize == 3) singleStream = 1;
105
- cLitSize = singleStream ?
106
- HUF_compress1X_repeat(
107
- ostart+lhSize, dstCapacity-lhSize, src, srcSize,
108
- HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
109
- (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible) :
110
- HUF_compress4X_repeat(
111
- ostart+lhSize, dstCapacity-lhSize, src, srcSize,
112
- HUF_SYMBOLVALUE_MAX, HUF_TABLELOG_DEFAULT, entropyWorkspace, entropyWorkspaceSize,
113
- (HUF_CElt*)nextHuf->CTable, &repeat, preferRepeat, bmi2, suspectUncompressible);
172
+ huf_compress = singleStream ? HUF_compress1X_repeat : HUF_compress4X_repeat;
173
+ cLitSize = huf_compress(ostart+lhSize, dstCapacity-lhSize,
174
+ src, srcSize,
175
+ HUF_SYMBOLVALUE_MAX, LitHufLog,
176
+ entropyWorkspace, entropyWorkspaceSize,
177
+ (HUF_CElt*)nextHuf->CTable,
178
+ &repeat, flags);
179
+ DEBUGLOG(5, "%zu literals compressed into %zu bytes (before header)", srcSize, cLitSize);
114
180
  if (repeat != HUF_repeat_none) {
115
181
  /* reused the existing table */
116
- DEBUGLOG(5, "Reusing previous huffman table");
182
+ DEBUGLOG(5, "reusing statistics from previous huffman block");
117
183
  hType = set_repeat;
118
184
  }
119
185
  }
120
186
 
121
- if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) {
122
- ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
123
- return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
124
- }
187
+ { size_t const minGain = ZSTD_minGain(srcSize, strategy);
188
+ if ((cLitSize==0) || (cLitSize >= srcSize - minGain) || ERR_isError(cLitSize)) {
189
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
190
+ return ZSTD_noCompressLiterals(dst, dstCapacity, src, srcSize);
191
+ } }
125
192
  if (cLitSize==1) {
126
- ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
127
- return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
128
- }
193
+ /* A return value of 1 signals that the alphabet consists of a single symbol.
194
+ * However, in some rare circumstances, it could be the compressed size (a single byte).
195
+ * For that outcome to have a chance to happen, it's necessary that `srcSize < 8`.
196
+ * (it's also necessary to not generate statistics).
197
+ * Therefore, in such a case, actively check that all bytes are identical. */
198
+ if ((srcSize >= 8) || allBytesIdentical(src, srcSize)) {
199
+ ZSTD_memcpy(nextHuf, prevHuf, sizeof(*prevHuf));
200
+ return ZSTD_compressRleLiteralsBlock(dst, dstCapacity, src, srcSize);
201
+ } }
129
202
 
130
203
  if (hType == set_compressed) {
131
204
  /* using a newly constructed table */
@@ -136,16 +209,19 @@ size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
136
209
  switch(lhSize)
137
210
  {
138
211
  case 3: /* 2 - 2 - 10 - 10 */
139
- { U32 const lhc = hType + ((!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
212
+ if (!singleStream) assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
213
+ { U32 const lhc = hType + ((U32)(!singleStream) << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<14);
140
214
  MEM_writeLE24(ostart, lhc);
141
215
  break;
142
216
  }
143
217
  case 4: /* 2 - 2 - 14 - 14 */
218
+ assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
144
219
  { U32 const lhc = hType + (2 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<18);
145
220
  MEM_writeLE32(ostart, lhc);
146
221
  break;
147
222
  }
148
223
  case 5: /* 2 - 2 - 18 - 18 */
224
+ assert(srcSize >= MIN_LITERALS_FOR_4_STREAMS);
149
225
  { U32 const lhc = hType + (3 << 2) + ((U32)srcSize<<4) + ((U32)cLitSize<<22);
150
226
  MEM_writeLE32(ostart, lhc);
151
227
  ostart[4] = (BYTE)(cLitSize >> 10);
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -16,16 +16,24 @@
16
16
 
17
17
  size_t ZSTD_noCompressLiterals (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
18
18
 
19
+ /* ZSTD_compressRleLiteralsBlock() :
20
+ * Conditions :
21
+ * - All bytes in @src are identical
22
+ * - dstCapacity >= 4 */
19
23
  size_t ZSTD_compressRleLiteralsBlock (void* dst, size_t dstCapacity, const void* src, size_t srcSize);
20
24
 
21
- /* If suspectUncompressible then some sampling checks will be run to potentially skip huffman coding */
22
- size_t ZSTD_compressLiterals (ZSTD_hufCTables_t const* prevHuf,
23
- ZSTD_hufCTables_t* nextHuf,
24
- ZSTD_strategy strategy, int disableLiteralCompression,
25
- void* dst, size_t dstCapacity,
25
+ /* ZSTD_compressLiterals():
26
+ * @entropyWorkspace: must be aligned on 4-bytes boundaries
27
+ * @entropyWorkspaceSize : must be >= HUF_WORKSPACE_SIZE
28
+ * @suspectUncompressible: sampling checks, to potentially skip huffman coding
29
+ */
30
+ size_t ZSTD_compressLiterals (void* dst, size_t dstCapacity,
26
31
  const void* src, size_t srcSize,
27
32
  void* entropyWorkspace, size_t entropyWorkspaceSize,
28
- const int bmi2,
29
- unsigned suspectUncompressible);
33
+ const ZSTD_hufCTables_t* prevHuf,
34
+ ZSTD_hufCTables_t* nextHuf,
35
+ ZSTD_strategy strategy, int disableLiteralCompression,
36
+ int suspectUncompressible,
37
+ int bmi2);
30
38
 
31
39
  #endif /* ZSTD_COMPRESS_LITERALS_H */
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -58,7 +58,7 @@ static unsigned ZSTD_useLowProbCount(size_t const nbSeq)
58
58
  {
59
59
  /* Heuristic: This should cover most blocks <= 16K and
60
60
  * start to fade out after 16K to about 32K depending on
61
- * comprssibility.
61
+ * compressibility.
62
62
  */
63
63
  return nbSeq >= 2048;
64
64
  }
@@ -166,7 +166,7 @@ ZSTD_selectEncodingType(
166
166
  if (mostFrequent == nbSeq) {
167
167
  *repeatMode = FSE_repeat_none;
168
168
  if (isDefaultAllowed && nbSeq <= 2) {
169
- /* Prefer set_basic over set_rle when there are 2 or less symbols,
169
+ /* Prefer set_basic over set_rle when there are 2 or fewer symbols,
170
170
  * since RLE uses 1 byte, but set_basic uses 5-6 bits per symbol.
171
171
  * If basic encoding isn't possible, always choose RLE.
172
172
  */
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -36,13 +36,14 @@
36
36
  * If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block
37
37
  * and the following sub-blocks' literals sections will be Treeless_Literals_Block.
38
38
  * @return : compressed size of literals section of a sub-block
39
- * Or 0 if it unable to compress.
39
+ * Or 0 if unable to compress.
40
40
  * Or error code */
41
- static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
42
- const ZSTD_hufCTablesMetadata_t* hufMetadata,
43
- const BYTE* literals, size_t litSize,
44
- void* dst, size_t dstSize,
45
- const int bmi2, int writeEntropy, int* entropyWritten)
41
+ static size_t
42
+ ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
43
+ const ZSTD_hufCTablesMetadata_t* hufMetadata,
44
+ const BYTE* literals, size_t litSize,
45
+ void* dst, size_t dstSize,
46
+ const int bmi2, int writeEntropy, int* entropyWritten)
46
47
  {
47
48
  size_t const header = writeEntropy ? 200 : 0;
48
49
  size_t const lhSize = 3 + (litSize >= (1 KB - header)) + (litSize >= (16 KB - header));
@@ -53,8 +54,6 @@ static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
53
54
  symbolEncodingType_e hType = writeEntropy ? hufMetadata->hType : set_repeat;
54
55
  size_t cLitSize = 0;
55
56
 
56
- (void)bmi2; /* TODO bmi2... */
57
-
58
57
  DEBUGLOG(5, "ZSTD_compressSubBlock_literal (litSize=%zu, lhSize=%zu, writeEntropy=%d)", litSize, lhSize, writeEntropy);
59
58
 
60
59
  *entropyWritten = 0;
@@ -76,9 +75,9 @@ static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
76
75
  DEBUGLOG(5, "ZSTD_compressSubBlock_literal (hSize=%zu)", hufMetadata->hufDesSize);
77
76
  }
78
77
 
79
- /* TODO bmi2 */
80
- { const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable)
81
- : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable);
78
+ { int const flags = bmi2 ? HUF_flags_bmi2 : 0;
79
+ const size_t cSize = singleStream ? HUF_compress1X_usingCTable(op, oend-op, literals, litSize, hufTable, flags)
80
+ : HUF_compress4X_usingCTable(op, oend-op, literals, litSize, hufTable, flags);
82
81
  op += cSize;
83
82
  cLitSize += cSize;
84
83
  if (cSize == 0 || ERR_isError(cSize)) {
@@ -126,7 +125,11 @@ static size_t ZSTD_compressSubBlock_literal(const HUF_CElt* hufTable,
126
125
  return op-ostart;
127
126
  }
128
127
 
129
- static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef* sequences, size_t nbSeq, size_t litSize, int lastSequence) {
128
+ static size_t
129
+ ZSTD_seqDecompressedSize(seqStore_t const* seqStore,
130
+ const seqDef* sequences, size_t nbSeq,
131
+ size_t litSize, int lastSequence)
132
+ {
130
133
  const seqDef* const sstart = sequences;
131
134
  const seqDef* const send = sequences + nbSeq;
132
135
  const seqDef* sp = sstart;
@@ -156,13 +159,14 @@ static size_t ZSTD_seqDecompressedSize(seqStore_t const* seqStore, const seqDef*
156
159
  * @return : compressed size of sequences section of a sub-block
157
160
  * Or 0 if it is unable to compress
158
161
  * Or error code. */
159
- static size_t ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,
160
- const ZSTD_fseCTablesMetadata_t* fseMetadata,
161
- const seqDef* sequences, size_t nbSeq,
162
- const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
163
- const ZSTD_CCtx_params* cctxParams,
164
- void* dst, size_t dstCapacity,
165
- const int bmi2, int writeEntropy, int* entropyWritten)
162
+ static size_t
163
+ ZSTD_compressSubBlock_sequences(const ZSTD_fseCTables_t* fseTables,
164
+ const ZSTD_fseCTablesMetadata_t* fseMetadata,
165
+ const seqDef* sequences, size_t nbSeq,
166
+ const BYTE* llCode, const BYTE* mlCode, const BYTE* ofCode,
167
+ const ZSTD_CCtx_params* cctxParams,
168
+ void* dst, size_t dstCapacity,
169
+ const int bmi2, int writeEntropy, int* entropyWritten)
166
170
  {
167
171
  const int longOffsets = cctxParams->cParams.windowLog > STREAM_ACCUMULATOR_MIN;
168
172
  BYTE* const ostart = (BYTE*)dst;
@@ -539,7 +543,7 @@ static size_t ZSTD_compressSubBlock_multi(const seqStore_t* seqStorePtr,
539
543
  repcodes_t rep;
540
544
  ZSTD_memcpy(&rep, prevCBlock->rep, sizeof(rep));
541
545
  for (seq = sstart; seq < sp; ++seq) {
542
- ZSTD_updateRep(rep.rep, seq->offBase - 1, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
546
+ ZSTD_updateRep(rep.rep, seq->offBase, ZSTD_getSequenceLength(seqStorePtr, seq).litLength == 0);
543
547
  }
544
548
  ZSTD_memcpy(nextCBlock->rep, &rep, sizeof(rep));
545
549
  }
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -348,7 +348,9 @@ ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase
348
348
  if (alloc) {
349
349
  alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
350
350
  if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
351
- __asan_unpoison_memory_region(alloc, bytes);
351
+ /* We need to keep the redzone poisoned while unpoisoning the bytes that
352
+ * are actually allocated. */
353
+ __asan_unpoison_memory_region(alloc, bytes - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE);
352
354
  }
353
355
  }
354
356
  #endif
@@ -499,7 +501,7 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
499
501
  assert(ws->tableValidEnd >= ws->objectEnd);
500
502
  assert(ws->tableValidEnd <= ws->allocStart);
501
503
  if (ws->tableValidEnd < ws->tableEnd) {
502
- ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
504
+ ZSTD_memset(ws->tableValidEnd, 0, (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd));
503
505
  }
504
506
  ZSTD_cwksp_mark_tables_clean(ws);
505
507
  }