zstd-ruby 1.4.4.0 → 1.5.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (102) hide show
  1. checksums.yaml +4 -4
  2. data/.github/dependabot.yml +8 -0
  3. data/.github/workflows/ruby.yml +35 -0
  4. data/README.md +2 -2
  5. data/ext/zstdruby/extconf.rb +1 -0
  6. data/ext/zstdruby/libzstd/BUCK +5 -7
  7. data/ext/zstdruby/libzstd/Makefile +241 -173
  8. data/ext/zstdruby/libzstd/README.md +76 -18
  9. data/ext/zstdruby/libzstd/common/bitstream.h +75 -57
  10. data/ext/zstdruby/libzstd/common/compiler.h +196 -20
  11. data/ext/zstdruby/libzstd/common/cpu.h +1 -3
  12. data/ext/zstdruby/libzstd/common/debug.c +11 -31
  13. data/ext/zstdruby/libzstd/common/debug.h +22 -49
  14. data/ext/zstdruby/libzstd/common/entropy_common.c +208 -76
  15. data/ext/zstdruby/libzstd/common/error_private.c +3 -1
  16. data/ext/zstdruby/libzstd/common/error_private.h +87 -4
  17. data/ext/zstdruby/libzstd/common/fse.h +51 -42
  18. data/ext/zstdruby/libzstd/common/fse_decompress.c +149 -57
  19. data/ext/zstdruby/libzstd/common/huf.h +60 -54
  20. data/ext/zstdruby/libzstd/common/mem.h +87 -98
  21. data/ext/zstdruby/libzstd/common/pool.c +23 -17
  22. data/ext/zstdruby/libzstd/common/pool.h +3 -3
  23. data/ext/zstdruby/libzstd/common/portability_macros.h +131 -0
  24. data/ext/zstdruby/libzstd/common/threading.c +10 -8
  25. data/ext/zstdruby/libzstd/common/threading.h +4 -3
  26. data/ext/zstdruby/libzstd/common/xxhash.c +15 -873
  27. data/ext/zstdruby/libzstd/common/xxhash.h +5572 -191
  28. data/ext/zstdruby/libzstd/common/zstd_common.c +10 -10
  29. data/ext/zstdruby/libzstd/common/zstd_deps.h +111 -0
  30. data/ext/zstdruby/libzstd/common/zstd_internal.h +252 -108
  31. data/ext/zstdruby/libzstd/common/zstd_trace.h +163 -0
  32. data/ext/zstdruby/libzstd/compress/clevels.h +134 -0
  33. data/ext/zstdruby/libzstd/compress/fse_compress.c +105 -85
  34. data/ext/zstdruby/libzstd/compress/hist.c +41 -63
  35. data/ext/zstdruby/libzstd/compress/hist.h +13 -33
  36. data/ext/zstdruby/libzstd/compress/huf_compress.c +831 -259
  37. data/ext/zstdruby/libzstd/compress/zstd_compress.c +3213 -1007
  38. data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +493 -71
  39. data/ext/zstdruby/libzstd/compress/zstd_compress_literals.c +21 -16
  40. data/ext/zstdruby/libzstd/compress/zstd_compress_literals.h +4 -2
  41. data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.c +51 -24
  42. data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.h +10 -3
  43. data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +573 -0
  44. data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.h +32 -0
  45. data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +208 -81
  46. data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +315 -137
  47. data/ext/zstdruby/libzstd/compress/zstd_double_fast.h +2 -2
  48. data/ext/zstdruby/libzstd/compress/zstd_fast.c +319 -128
  49. data/ext/zstdruby/libzstd/compress/zstd_fast.h +2 -2
  50. data/ext/zstdruby/libzstd/compress/zstd_lazy.c +1156 -171
  51. data/ext/zstdruby/libzstd/compress/zstd_lazy.h +59 -1
  52. data/ext/zstdruby/libzstd/compress/zstd_ldm.c +331 -206
  53. data/ext/zstdruby/libzstd/compress/zstd_ldm.h +15 -3
  54. data/ext/zstdruby/libzstd/compress/zstd_ldm_geartab.h +106 -0
  55. data/ext/zstdruby/libzstd/compress/zstd_opt.c +403 -226
  56. data/ext/zstdruby/libzstd/compress/zstd_opt.h +1 -1
  57. data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +188 -453
  58. data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +32 -114
  59. data/ext/zstdruby/libzstd/decompress/huf_decompress.c +1065 -410
  60. data/ext/zstdruby/libzstd/decompress/huf_decompress_amd64.S +571 -0
  61. data/ext/zstdruby/libzstd/decompress/zstd_ddict.c +20 -16
  62. data/ext/zstdruby/libzstd/decompress/zstd_ddict.h +3 -3
  63. data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +691 -230
  64. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +1072 -323
  65. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +16 -7
  66. data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +71 -10
  67. data/ext/zstdruby/libzstd/deprecated/zbuff.h +3 -3
  68. data/ext/zstdruby/libzstd/deprecated/zbuff_common.c +2 -2
  69. data/ext/zstdruby/libzstd/deprecated/zbuff_compress.c +24 -4
  70. data/ext/zstdruby/libzstd/deprecated/zbuff_decompress.c +1 -1
  71. data/ext/zstdruby/libzstd/dictBuilder/cover.c +57 -40
  72. data/ext/zstdruby/libzstd/dictBuilder/cover.h +20 -9
  73. data/ext/zstdruby/libzstd/dictBuilder/divsufsort.c +1 -1
  74. data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +54 -35
  75. data/ext/zstdruby/libzstd/dictBuilder/zdict.c +151 -57
  76. data/ext/zstdruby/libzstd/dll/example/Makefile +2 -1
  77. data/ext/zstdruby/libzstd/dll/example/README.md +16 -22
  78. data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +4 -4
  79. data/ext/zstdruby/libzstd/legacy/zstd_v01.c +25 -19
  80. data/ext/zstdruby/libzstd/legacy/zstd_v01.h +1 -1
  81. data/ext/zstdruby/libzstd/legacy/zstd_v02.c +18 -14
  82. data/ext/zstdruby/libzstd/legacy/zstd_v02.h +1 -1
  83. data/ext/zstdruby/libzstd/legacy/zstd_v03.c +18 -14
  84. data/ext/zstdruby/libzstd/legacy/zstd_v03.h +1 -1
  85. data/ext/zstdruby/libzstd/legacy/zstd_v04.c +22 -16
  86. data/ext/zstdruby/libzstd/legacy/zstd_v04.h +1 -1
  87. data/ext/zstdruby/libzstd/legacy/zstd_v05.c +29 -25
  88. data/ext/zstdruby/libzstd/legacy/zstd_v05.h +2 -2
  89. data/ext/zstdruby/libzstd/legacy/zstd_v06.c +29 -25
  90. data/ext/zstdruby/libzstd/legacy/zstd_v06.h +1 -1
  91. data/ext/zstdruby/libzstd/legacy/zstd_v07.c +34 -26
  92. data/ext/zstdruby/libzstd/legacy/zstd_v07.h +1 -1
  93. data/ext/zstdruby/libzstd/libzstd.mk +185 -0
  94. data/ext/zstdruby/libzstd/libzstd.pc.in +4 -3
  95. data/ext/zstdruby/libzstd/modulemap/module.modulemap +4 -0
  96. data/ext/zstdruby/libzstd/{dictBuilder/zdict.h → zdict.h} +201 -31
  97. data/ext/zstdruby/libzstd/zstd.h +760 -234
  98. data/ext/zstdruby/libzstd/{common/zstd_errors.h → zstd_errors.h} +3 -1
  99. data/ext/zstdruby/zstdruby.c +2 -2
  100. data/lib/zstd-ruby/version.rb +1 -1
  101. metadata +20 -9
  102. data/.travis.yml +0 -14
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Yann Collet, Facebook, Inc.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -14,7 +14,7 @@
14
14
  /*-*************************************
15
15
  * Dependencies
16
16
  ***************************************/
17
- #include "zstd_internal.h"
17
+ #include "../common/zstd_internal.h"
18
18
 
19
19
  #if defined (__cplusplus)
20
20
  extern "C" {
@@ -24,16 +24,6 @@ extern "C" {
24
24
  * Constants
25
25
  ***************************************/
26
26
 
27
- /* define "workspace is too large" as this number of times larger than needed */
28
- #define ZSTD_WORKSPACETOOLARGE_FACTOR 3
29
-
30
- /* when workspace is continuously too large
31
- * during at least this number of times,
32
- * context's memory usage is considered wasteful,
33
- * because it's sized to handle a worst case scenario which rarely happens.
34
- * In which case, resize it down to free some memory */
35
- #define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
36
-
37
27
  /* Since the workspace is effectively its own little malloc implementation /
38
28
  * arena, when we run under ASAN, we should similarly insert redzones between
39
29
  * each internal element of the workspace, so ASAN will catch overruns that
@@ -45,6 +35,10 @@ extern "C" {
45
35
  #define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
46
36
  #endif
47
37
 
38
+
39
+ /* Set our tables and aligneds to align by 64 bytes */
40
+ #define ZSTD_CWKSP_ALIGNMENT_BYTES 64
41
+
48
42
  /*-*************************************
49
43
  * Structures
50
44
  ***************************************/
@@ -54,6 +48,16 @@ typedef enum {
54
48
  ZSTD_cwksp_alloc_aligned
55
49
  } ZSTD_cwksp_alloc_phase_e;
56
50
 
51
+ /**
52
+ * Used to describe whether the workspace is statically allocated (and will not
53
+ * necessarily ever be freed), or if it's dynamically allocated and we can
54
+ * expect a well-formed caller to free this.
55
+ */
56
+ typedef enum {
57
+ ZSTD_cwksp_dynamic_alloc,
58
+ ZSTD_cwksp_static_alloc
59
+ } ZSTD_cwksp_static_alloc_e;
60
+
57
61
  /**
58
62
  * Zstd fits all its internal datastructures into a single continuous buffer,
59
63
  * so that it only needs to perform a single OS allocation (or so that a buffer
@@ -102,7 +106,7 @@ typedef enum {
102
106
  *
103
107
  * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
104
108
  * so that literally everything fits in a single buffer. Note: if present,
105
- * this must be the first object in the workspace, since ZSTD_free{CCtx,
109
+ * this must be the first object in the workspace, since ZSTD_customFree{CCtx,
106
110
  * CDict}() rely on a pointer comparison to see whether one or two frees are
107
111
  * required.
108
112
  *
@@ -117,10 +121,11 @@ typedef enum {
117
121
  * - Tables: these are any of several different datastructures (hash tables,
118
122
  * chain tables, binary trees) that all respect a common format: they are
119
123
  * uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
120
- * Their sizes depend on the cparams.
124
+ * Their sizes depend on the cparams. These tables are 64-byte aligned.
121
125
  *
122
126
  * - Aligned: these buffers are used for various purposes that require 4 byte
123
- * alignment, but don't require any initialization before they're used.
127
+ * alignment, but don't require any initialization before they're used. These
128
+ * buffers are each aligned to 64 bytes.
124
129
  *
125
130
  * - Buffers: these buffers are used for various purposes that don't require
126
131
  * any alignment or initialization before they're used. This means they can
@@ -133,8 +138,7 @@ typedef enum {
133
138
  *
134
139
  * 1. Objects
135
140
  * 2. Buffers
136
- * 3. Aligned
137
- * 4. Tables
141
+ * 3. Aligned/Tables
138
142
  *
139
143
  * Attempts to reserve objects of different types out of order will fail.
140
144
  */
@@ -147,9 +151,10 @@ typedef struct {
147
151
  void* tableValidEnd;
148
152
  void* allocStart;
149
153
 
150
- int allocFailed;
154
+ BYTE allocFailed;
151
155
  int workspaceOversizedDuration;
152
156
  ZSTD_cwksp_alloc_phase_e phase;
157
+ ZSTD_cwksp_static_alloc_e isStatic;
153
158
  } ZSTD_cwksp;
154
159
 
155
160
  /*-*************************************
@@ -186,39 +191,123 @@ MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
186
191
  * Since tables aren't currently redzoned, you don't need to call through this
187
192
  * to figure out how much space you need for the matchState tables. Everything
188
193
  * else is though.
194
+ *
195
+ * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
189
196
  */
190
197
  MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
191
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
198
+ if (size == 0)
199
+ return 0;
200
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
192
201
  return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
193
202
  #else
194
203
  return size;
195
204
  #endif
196
205
  }
197
206
 
198
- MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
207
+ /**
208
+ * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
209
+ * Used to determine the number of bytes required for a given "aligned".
210
+ */
211
+ MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
212
+ return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));
213
+ }
214
+
215
+ /**
216
+ * Returns the amount of additional space the cwksp must allocate
217
+ * for internal purposes (currently only alignment).
218
+ */
219
+ MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
220
+ /* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes
221
+ * to align the beginning of tables section, as well as another n_2=[0, 63] bytes
222
+ * to align the beginning of the aligned section.
223
+ *
224
+ * n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and
225
+ * aligneds being sized in multiples of 64 bytes.
226
+ */
227
+ size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES;
228
+ return slackSpace;
229
+ }
230
+
231
+
232
+ /**
233
+ * Return the number of additional bytes required to align a pointer to the given number of bytes.
234
+ * alignBytes must be a power of two.
235
+ */
236
+ MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
237
+ size_t const alignBytesMask = alignBytes - 1;
238
+ size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
239
+ assert((alignBytes & alignBytesMask) == 0);
240
+ assert(bytes != ZSTD_CWKSP_ALIGNMENT_BYTES);
241
+ return bytes;
242
+ }
243
+
244
+ /**
245
+ * Internal function. Do not use directly.
246
+ * Reserves the given number of bytes within the aligned/buffer segment of the wksp, which
247
+ * counts from the end of the wksp. (as opposed to the object/table segment)
248
+ *
249
+ * Returns a pointer to the beginning of that space.
250
+ */
251
+ MEM_STATIC void* ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes) {
252
+ void* const alloc = (BYTE*)ws->allocStart - bytes;
253
+ void* const bottom = ws->tableEnd;
254
+ DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
255
+ alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
256
+ ZSTD_cwksp_assert_internal_consistency(ws);
257
+ assert(alloc >= bottom);
258
+ if (alloc < bottom) {
259
+ DEBUGLOG(4, "cwksp: alloc failed!");
260
+ ws->allocFailed = 1;
261
+ return NULL;
262
+ }
263
+ if (alloc < ws->tableValidEnd) {
264
+ ws->tableValidEnd = alloc;
265
+ }
266
+ ws->allocStart = alloc;
267
+ return alloc;
268
+ }
269
+
270
+ /**
271
+ * Moves the cwksp to the next phase, and does any necessary allocations.
272
+ * Returns a 0 on success, or zstd error
273
+ */
274
+ MEM_STATIC size_t ZSTD_cwksp_internal_advance_phase(
199
275
  ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
200
276
  assert(phase >= ws->phase);
201
277
  if (phase > ws->phase) {
278
+ /* Going from allocating objects to allocating buffers */
202
279
  if (ws->phase < ZSTD_cwksp_alloc_buffers &&
203
280
  phase >= ZSTD_cwksp_alloc_buffers) {
204
281
  ws->tableValidEnd = ws->objectEnd;
205
282
  }
283
+
284
+ /* Going from allocating buffers to allocating aligneds/tables */
206
285
  if (ws->phase < ZSTD_cwksp_alloc_aligned &&
207
286
  phase >= ZSTD_cwksp_alloc_aligned) {
208
- /* If unaligned allocations down from a too-large top have left us
209
- * unaligned, we need to realign our alloc ptr. Technically, this
210
- * can consume space that is unaccounted for in the neededSpace
211
- * calculation. However, I believe this can only happen when the
212
- * workspace is too large, and specifically when it is too large
213
- * by a larger margin than the space that will be consumed. */
214
- /* TODO: cleaner, compiler warning friendly way to do this??? */
215
- ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
216
- if (ws->allocStart < ws->tableValidEnd) {
217
- ws->tableValidEnd = ws->allocStart;
287
+ { /* Align the start of the "aligned" to 64 bytes. Use [1, 64] bytes. */
288
+ size_t const bytesToAlign =
289
+ ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BYTES);
290
+ DEBUGLOG(5, "reserving aligned alignment addtl space: %zu", bytesToAlign);
291
+ ZSTD_STATIC_ASSERT((ZSTD_CWKSP_ALIGNMENT_BYTES & (ZSTD_CWKSP_ALIGNMENT_BYTES - 1)) == 0); /* power of 2 */
292
+ RETURN_ERROR_IF(!ZSTD_cwksp_reserve_internal_buffer_space(ws, bytesToAlign),
293
+ memory_allocation, "aligned phase - alignment initial allocation failed!");
294
+ }
295
+ { /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
296
+ void* const alloc = ws->objectEnd;
297
+ size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
298
+ void* const end = (BYTE*)alloc + bytesToAlign;
299
+ DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
300
+ RETURN_ERROR_IF(end > ws->workspaceEnd, memory_allocation,
301
+ "table phase - alignment initial allocation failed!");
302
+ ws->objectEnd = end;
303
+ ws->tableEnd = end;
304
+ ws->tableValidEnd = end;
218
305
  }
219
306
  }
220
307
  ws->phase = phase;
308
+ ZSTD_cwksp_assert_internal_consistency(ws);
221
309
  }
310
+ return 0;
222
311
  }
223
312
 
224
313
  /**
@@ -234,34 +323,26 @@ MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
234
323
  MEM_STATIC void* ZSTD_cwksp_reserve_internal(
235
324
  ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
236
325
  void* alloc;
237
- void* bottom = ws->tableEnd;
238
- ZSTD_cwksp_internal_advance_phase(ws, phase);
239
- alloc = (BYTE *)ws->allocStart - bytes;
326
+ if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
327
+ return NULL;
328
+ }
240
329
 
241
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
330
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
242
331
  /* over-reserve space */
243
- alloc = (BYTE *)alloc - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
332
+ bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
244
333
  #endif
245
334
 
246
- DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
247
- alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
248
- ZSTD_cwksp_assert_internal_consistency(ws);
249
- assert(alloc >= bottom);
250
- if (alloc < bottom) {
251
- DEBUGLOG(4, "cwksp: alloc failed!");
252
- ws->allocFailed = 1;
253
- return NULL;
254
- }
255
- if (alloc < ws->tableValidEnd) {
256
- ws->tableValidEnd = alloc;
257
- }
258
- ws->allocStart = alloc;
335
+ alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
259
336
 
260
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
337
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
261
338
  /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
262
339
  * either size. */
263
- alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
264
- __asan_unpoison_memory_region(alloc, bytes);
340
+ if (alloc) {
341
+ alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
342
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
343
+ __asan_unpoison_memory_region(alloc, bytes);
344
+ }
345
+ }
265
346
  #endif
266
347
 
267
348
  return alloc;
@@ -275,28 +356,36 @@ MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
275
356
  }
276
357
 
277
358
  /**
278
- * Reserves and returns memory sized on and aligned on sizeof(unsigned).
359
+ * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
279
360
  */
280
361
  MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
281
- assert((bytes & (sizeof(U32)-1)) == 0);
282
- return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
362
+ void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
363
+ ZSTD_cwksp_alloc_aligned);
364
+ assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
365
+ return ptr;
283
366
  }
284
367
 
285
368
  /**
286
- * Aligned on sizeof(unsigned). These buffers have the special property that
369
+ * Aligned on 64 bytes. These buffers have the special property that
287
370
  * their values remain constrained, allowing us to re-use them without
288
371
  * memset()-ing them.
289
372
  */
290
373
  MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
291
374
  const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
292
- void* alloc = ws->tableEnd;
293
- void* end = (BYTE *)alloc + bytes;
294
- void* top = ws->allocStart;
375
+ void* alloc;
376
+ void* end;
377
+ void* top;
378
+
379
+ if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
380
+ return NULL;
381
+ }
382
+ alloc = ws->tableEnd;
383
+ end = (BYTE *)alloc + bytes;
384
+ top = ws->allocStart;
295
385
 
296
386
  DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
297
387
  alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
298
388
  assert((bytes & (sizeof(U32)-1)) == 0);
299
- ZSTD_cwksp_internal_advance_phase(ws, phase);
300
389
  ZSTD_cwksp_assert_internal_consistency(ws);
301
390
  assert(end <= top);
302
391
  if (end > top) {
@@ -306,10 +395,14 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
306
395
  }
307
396
  ws->tableEnd = end;
308
397
 
309
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
310
- __asan_unpoison_memory_region(alloc, bytes);
398
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
399
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
400
+ __asan_unpoison_memory_region(alloc, bytes);
401
+ }
311
402
  #endif
312
403
 
404
+ assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
405
+ assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
313
406
  return alloc;
314
407
  }
315
408
 
@@ -321,7 +414,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
321
414
  void* alloc = ws->objectEnd;
322
415
  void* end = (BYTE*)alloc + roundedBytes;
323
416
 
324
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
417
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
325
418
  /* over-reserve space */
326
419
  end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
327
420
  #endif
@@ -329,8 +422,8 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
329
422
  DEBUGLOG(5,
330
423
  "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
331
424
  alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
332
- assert(((size_t)alloc & (sizeof(void*)-1)) == 0);
333
- assert((bytes & (sizeof(void*)-1)) == 0);
425
+ assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
426
+ assert(bytes % ZSTD_ALIGNOF(void*) == 0);
334
427
  ZSTD_cwksp_assert_internal_consistency(ws);
335
428
  /* we must be in the first phase, no advance is possible */
336
429
  if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
@@ -342,11 +435,13 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
342
435
  ws->tableEnd = end;
343
436
  ws->tableValidEnd = end;
344
437
 
345
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
438
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
346
439
  /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
347
440
  * either size. */
348
441
  alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
349
- __asan_unpoison_memory_region(alloc, bytes);
442
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
443
+ __asan_unpoison_memory_region(alloc, bytes);
444
+ }
350
445
  #endif
351
446
 
352
447
  return alloc;
@@ -355,7 +450,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
355
450
  MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
356
451
  DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
357
452
 
358
- #if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
453
+ #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
359
454
  /* To validate that the table re-use logic is sound, and that we don't
360
455
  * access table space that we haven't cleaned, we re-"poison" the table
361
456
  * space every time we mark it dirty. */
@@ -390,7 +485,7 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
390
485
  assert(ws->tableValidEnd >= ws->objectEnd);
391
486
  assert(ws->tableValidEnd <= ws->allocStart);
392
487
  if (ws->tableValidEnd < ws->tableEnd) {
393
- memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
488
+ ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
394
489
  }
395
490
  ZSTD_cwksp_mark_tables_clean(ws);
396
491
  }
@@ -402,8 +497,12 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
402
497
  MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
403
498
  DEBUGLOG(4, "cwksp: clearing tables!");
404
499
 
405
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
406
- {
500
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
501
+ /* We don't do this when the workspace is statically allocated, because
502
+ * when that is the case, we have no capability to hook into the end of the
503
+ * workspace's lifecycle to unpoison the memory.
504
+ */
505
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
407
506
  size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
408
507
  __asan_poison_memory_region(ws->objectEnd, size);
409
508
  }
@@ -420,7 +519,7 @@ MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
420
519
  MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
421
520
  DEBUGLOG(4, "cwksp: clearing!");
422
521
 
423
- #if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
522
+ #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
424
523
  /* To validate that the context re-use logic is sound, and that we don't
425
524
  * access stuff that this compression hasn't initialized, we re-"poison"
426
525
  * the workspace (or at least the non-static, non-table parts of it)
@@ -431,8 +530,12 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
431
530
  }
432
531
  #endif
433
532
 
434
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
435
- {
533
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
534
+ /* We don't do this when the workspace is statically allocated, because
535
+ * when that is the case, we have no capability to hook into the end of the
536
+ * workspace's lifecycle to unpoison the memory.
537
+ */
538
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
436
539
  size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
437
540
  __asan_poison_memory_region(ws->objectEnd, size);
438
541
  }
@@ -452,7 +555,7 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
452
555
  * Any existing values in the workspace are ignored (the previously managed
453
556
  * buffer, if present, must be separately freed).
454
557
  */
455
- MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
558
+ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
456
559
  DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
457
560
  assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
458
561
  ws->workspace = start;
@@ -460,39 +563,45 @@ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
460
563
  ws->objectEnd = ws->workspace;
461
564
  ws->tableValidEnd = ws->objectEnd;
462
565
  ws->phase = ZSTD_cwksp_alloc_objects;
566
+ ws->isStatic = isStatic;
463
567
  ZSTD_cwksp_clear(ws);
464
568
  ws->workspaceOversizedDuration = 0;
465
569
  ZSTD_cwksp_assert_internal_consistency(ws);
466
570
  }
467
571
 
468
572
  MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
469
- void* workspace = ZSTD_malloc(size, customMem);
573
+ void* workspace = ZSTD_customMalloc(size, customMem);
470
574
  DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
471
- RETURN_ERROR_IF(workspace == NULL, memory_allocation);
472
- ZSTD_cwksp_init(ws, workspace, size);
575
+ RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
576
+ ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
473
577
  return 0;
474
578
  }
475
579
 
476
580
  MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
477
581
  void *ptr = ws->workspace;
478
582
  DEBUGLOG(4, "cwksp: freeing workspace");
479
- memset(ws, 0, sizeof(ZSTD_cwksp));
480
- ZSTD_free(ptr, customMem);
583
+ ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
584
+ ZSTD_customFree(ptr, customMem);
481
585
  }
482
586
 
483
587
  /**
484
588
  * Moves the management of a workspace from one cwksp to another. The src cwksp
485
- * is left in an invalid state (src must be re-init()'ed before its used again).
589
+ * is left in an invalid state (src must be re-init()'ed before it's used again).
486
590
  */
487
591
  MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
488
592
  *dst = *src;
489
- memset(src, 0, sizeof(ZSTD_cwksp));
593
+ ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
490
594
  }
491
595
 
492
596
  MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
493
597
  return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
494
598
  }
495
599
 
600
+ MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
601
+ return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
602
+ + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
603
+ }
604
+
496
605
  MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
497
606
  return ws->allocFailed;
498
607
  }
@@ -501,6 +610,24 @@ MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
501
610
  * Functions Checking Free Space
502
611
  ***************************************/
503
612
 
613
+ /* ZSTD_alignmentSpaceWithinBounds() :
614
+ * Returns if the estimated space needed for a wksp is within an acceptable limit of the
615
+ * actual amount of space used.
616
+ */
617
+ MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp* const ws,
618
+ size_t const estimatedSpace, int resizedWorkspace) {
619
+ if (resizedWorkspace) {
620
+ /* Resized/newly allocated wksp should have exact bounds */
621
+ return ZSTD_cwksp_used(ws) == estimatedSpace;
622
+ } else {
623
+ /* Due to alignment, when reusing a workspace, we can actually consume 63 fewer or more bytes
624
+ * than estimatedSpace. See the comments in zstd_cwksp.h for details.
625
+ */
626
+ return (ZSTD_cwksp_used(ws) >= estimatedSpace - 63) && (ZSTD_cwksp_used(ws) <= estimatedSpace + 63);
627
+ }
628
+ }
629
+
630
+
504
631
  MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
505
632
  return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
506
633
  }