zstdlib 0.7.0-x86-mingw32 → 0.8.0-x86-mingw32

Sign up to get free protection for your applications and to get access to all the features.
Files changed (81) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGES.md +5 -0
  3. data/ext/zstdlib/extconf.rb +1 -1
  4. data/ext/zstdlib/ruby/zlib-3.0/zstdlib.c +4994 -0
  5. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/bitstream.h +25 -16
  6. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/compiler.h +118 -4
  7. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/cpu.h +1 -3
  8. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/debug.c +1 -1
  9. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/debug.h +12 -19
  10. data/ext/zstdlib/zstd-1.5.0/lib/common/entropy_common.c +362 -0
  11. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/error_private.c +2 -1
  12. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/error_private.h +3 -3
  13. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/fse.h +40 -12
  14. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/fse_decompress.c +139 -22
  15. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/huf.h +29 -7
  16. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/mem.h +69 -98
  17. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/pool.c +23 -17
  18. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/pool.h +2 -2
  19. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/threading.c +6 -5
  20. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/threading.h +0 -0
  21. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/xxhash.c +20 -60
  22. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/xxhash.h +2 -2
  23. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/zstd_common.c +10 -10
  24. data/ext/zstdlib/zstd-1.5.0/lib/common/zstd_deps.h +111 -0
  25. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/zstd_internal.h +105 -62
  26. data/ext/zstdlib/zstd-1.5.0/lib/common/zstd_trace.h +154 -0
  27. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/fse_compress.c +31 -24
  28. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/hist.c +27 -29
  29. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/hist.h +2 -2
  30. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/huf_compress.c +265 -126
  31. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress.c +2843 -728
  32. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_internal.h +305 -63
  33. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_literals.c +8 -8
  34. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_literals.h +1 -1
  35. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_sequences.c +29 -7
  36. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_sequences.h +1 -1
  37. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_superblock.c +22 -295
  38. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_superblock.h +1 -1
  39. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_cwksp.h +204 -67
  40. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_double_fast.c +25 -25
  41. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_double_fast.h +1 -1
  42. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_fast.c +23 -23
  43. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_fast.h +1 -1
  44. data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_lazy.c +2184 -0
  45. data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_lazy.h +125 -0
  46. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_ldm.c +314 -211
  47. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_ldm.h +9 -2
  48. data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_ldm_geartab.h +103 -0
  49. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_opt.c +191 -46
  50. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_opt.h +1 -1
  51. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstdmt_compress.c +93 -415
  52. data/ext/zstdlib/zstd-1.5.0/lib/compress/zstdmt_compress.h +110 -0
  53. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/huf_decompress.c +342 -239
  54. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_ddict.c +9 -9
  55. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_ddict.h +2 -2
  56. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_decompress.c +369 -87
  57. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_decompress_block.c +191 -75
  58. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_decompress_block.h +6 -3
  59. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_decompress_internal.h +27 -11
  60. data/ext/zstdlib/zstd-1.5.0/lib/zdict.h +452 -0
  61. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/zstd.h +568 -126
  62. data/ext/zstdlib/{zstd-1.4.5/lib/common → zstd-1.5.0/lib}/zstd_errors.h +2 -1
  63. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzclose.c +0 -0
  64. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzcompatibility.h +1 -1
  65. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzguts.h +0 -0
  66. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzlib.c +0 -0
  67. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzread.c +0 -0
  68. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzwrite.c +0 -0
  69. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/zstd_zlibwrapper.c +126 -44
  70. data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/zstd_zlibwrapper.h +1 -1
  71. data/lib/2.2/zstdlib.so +0 -0
  72. data/lib/2.3/zstdlib.so +0 -0
  73. data/lib/2.4/zstdlib.so +0 -0
  74. data/lib/2.5/zstdlib.so +0 -0
  75. data/lib/2.6/zstdlib.so +0 -0
  76. data/lib/2.7/zstdlib.so +0 -0
  77. metadata +69 -64
  78. data/ext/zstdlib/zstd-1.4.5/lib/common/entropy_common.c +0 -216
  79. data/ext/zstdlib/zstd-1.4.5/lib/compress/zstd_lazy.c +0 -1138
  80. data/ext/zstdlib/zstd-1.4.5/lib/compress/zstd_lazy.h +0 -67
  81. data/ext/zstdlib/zstd-1.4.5/lib/compress/zstdmt_compress.h +0 -192
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Yann Collet, Facebook, Inc.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Yann Collet, Facebook, Inc.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -35,6 +35,10 @@ extern "C" {
35
35
  #define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
36
36
  #endif
37
37
 
38
+
39
+ /* Set our tables and aligneds to align by 64 bytes */
40
+ #define ZSTD_CWKSP_ALIGNMENT_BYTES 64
41
+
38
42
  /*-*************************************
39
43
  * Structures
40
44
  ***************************************/
@@ -44,6 +48,16 @@ typedef enum {
44
48
  ZSTD_cwksp_alloc_aligned
45
49
  } ZSTD_cwksp_alloc_phase_e;
46
50
 
51
+ /**
52
+ * Used to describe whether the workspace is statically allocated (and will not
53
+ * necessarily ever be freed), or if it's dynamically allocated and we can
54
+ * expect a well-formed caller to free this.
55
+ */
56
+ typedef enum {
57
+ ZSTD_cwksp_dynamic_alloc,
58
+ ZSTD_cwksp_static_alloc
59
+ } ZSTD_cwksp_static_alloc_e;
60
+
47
61
  /**
48
62
  * Zstd fits all its internal datastructures into a single continuous buffer,
49
63
  * so that it only needs to perform a single OS allocation (or so that a buffer
@@ -92,7 +106,7 @@ typedef enum {
92
106
  *
93
107
  * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
94
108
  * so that literally everything fits in a single buffer. Note: if present,
95
- * this must be the first object in the workspace, since ZSTD_free{CCtx,
109
+ * this must be the first object in the workspace, since ZSTD_customFree{CCtx,
96
110
  * CDict}() rely on a pointer comparison to see whether one or two frees are
97
111
  * required.
98
112
  *
@@ -107,10 +121,11 @@ typedef enum {
107
121
  * - Tables: these are any of several different datastructures (hash tables,
108
122
  * chain tables, binary trees) that all respect a common format: they are
109
123
  * uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
110
- * Their sizes depend on the cparams.
124
+ * Their sizes depend on the cparams. These tables are 64-byte aligned.
111
125
  *
112
126
  * - Aligned: these buffers are used for various purposes that require 4 byte
113
- * alignment, but don't require any initialization before they're used.
127
+ * alignment, but don't require any initialization before they're used. These
128
+ * buffers are each aligned to 64 bytes.
114
129
  *
115
130
  * - Buffers: these buffers are used for various purposes that don't require
116
131
  * any alignment or initialization before they're used. This means they can
@@ -123,8 +138,7 @@ typedef enum {
123
138
  *
124
139
  * 1. Objects
125
140
  * 2. Buffers
126
- * 3. Aligned
127
- * 4. Tables
141
+ * 3. Aligned/Tables
128
142
  *
129
143
  * Attempts to reserve objects of different types out of order will fail.
130
144
  */
@@ -137,9 +151,10 @@ typedef struct {
137
151
  void* tableValidEnd;
138
152
  void* allocStart;
139
153
 
140
- int allocFailed;
154
+ BYTE allocFailed;
141
155
  int workspaceOversizedDuration;
142
156
  ZSTD_cwksp_alloc_phase_e phase;
157
+ ZSTD_cwksp_static_alloc_e isStatic;
143
158
  } ZSTD_cwksp;
144
159
 
145
160
  /*-*************************************
@@ -176,39 +191,123 @@ MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
176
191
  * Since tables aren't currently redzoned, you don't need to call through this
177
192
  * to figure out how much space you need for the matchState tables. Everything
178
193
  * else is though.
194
+ *
195
+ * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
179
196
  */
180
197
  MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
181
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
198
+ if (size == 0)
199
+ return 0;
200
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
182
201
  return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
183
202
  #else
184
203
  return size;
185
204
  #endif
186
205
  }
187
206
 
188
- MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
207
+ /**
208
+ * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
209
+ * Used to determine the number of bytes required for a given "aligned".
210
+ */
211
+ MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
212
+ return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));
213
+ }
214
+
215
+ /**
216
+ * Returns the amount of additional space the cwksp must allocate
217
+ * for internal purposes (currently only alignment).
218
+ */
219
+ MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
220
+ /* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes
221
+ * to align the beginning of tables section, as well as another n_2=[0, 63] bytes
222
+ * to align the beginning of the aligned secion.
223
+ *
224
+ * n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and
225
+ * aligneds being sized in multiples of 64 bytes.
226
+ */
227
+ size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES;
228
+ return slackSpace;
229
+ }
230
+
231
+
232
+ /**
233
+ * Return the number of additional bytes required to align a pointer to the given number of bytes.
234
+ * alignBytes must be a power of two.
235
+ */
236
+ MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
237
+ size_t const alignBytesMask = alignBytes - 1;
238
+ size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
239
+ assert((alignBytes & alignBytesMask) == 0);
240
+ assert(bytes != ZSTD_CWKSP_ALIGNMENT_BYTES);
241
+ return bytes;
242
+ }
243
+
244
+ /**
245
+ * Internal function. Do not use directly.
246
+ * Reserves the given number of bytes within the aligned/buffer segment of the wksp, which
247
+ * counts from the end of the wksp. (as opposed to the object/table segment)
248
+ *
249
+ * Returns a pointer to the beginning of that space.
250
+ */
251
+ MEM_STATIC void* ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes) {
252
+ void* const alloc = (BYTE*)ws->allocStart - bytes;
253
+ void* const bottom = ws->tableEnd;
254
+ DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
255
+ alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
256
+ ZSTD_cwksp_assert_internal_consistency(ws);
257
+ assert(alloc >= bottom);
258
+ if (alloc < bottom) {
259
+ DEBUGLOG(4, "cwksp: alloc failed!");
260
+ ws->allocFailed = 1;
261
+ return NULL;
262
+ }
263
+ if (alloc < ws->tableValidEnd) {
264
+ ws->tableValidEnd = alloc;
265
+ }
266
+ ws->allocStart = alloc;
267
+ return alloc;
268
+ }
269
+
270
+ /**
271
+ * Moves the cwksp to the next phase, and does any necessary allocations.
272
+ * Returns a 0 on success, or zstd error
273
+ */
274
+ MEM_STATIC size_t ZSTD_cwksp_internal_advance_phase(
189
275
  ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
190
276
  assert(phase >= ws->phase);
191
277
  if (phase > ws->phase) {
278
+ /* Going from allocating objects to allocating buffers */
192
279
  if (ws->phase < ZSTD_cwksp_alloc_buffers &&
193
280
  phase >= ZSTD_cwksp_alloc_buffers) {
194
281
  ws->tableValidEnd = ws->objectEnd;
195
282
  }
283
+
284
+ /* Going from allocating buffers to allocating aligneds/tables */
196
285
  if (ws->phase < ZSTD_cwksp_alloc_aligned &&
197
286
  phase >= ZSTD_cwksp_alloc_aligned) {
198
- /* If unaligned allocations down from a too-large top have left us
199
- * unaligned, we need to realign our alloc ptr. Technically, this
200
- * can consume space that is unaccounted for in the neededSpace
201
- * calculation. However, I believe this can only happen when the
202
- * workspace is too large, and specifically when it is too large
203
- * by a larger margin than the space that will be consumed. */
204
- /* TODO: cleaner, compiler warning friendly way to do this??? */
205
- ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
206
- if (ws->allocStart < ws->tableValidEnd) {
207
- ws->tableValidEnd = ws->allocStart;
287
+ { /* Align the start of the "aligned" to 64 bytes. Use [1, 64] bytes. */
288
+ size_t const bytesToAlign =
289
+ ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BYTES);
290
+ DEBUGLOG(5, "reserving aligned alignment addtl space: %zu", bytesToAlign);
291
+ ZSTD_STATIC_ASSERT((ZSTD_CWKSP_ALIGNMENT_BYTES & (ZSTD_CWKSP_ALIGNMENT_BYTES - 1)) == 0); /* power of 2 */
292
+ RETURN_ERROR_IF(!ZSTD_cwksp_reserve_internal_buffer_space(ws, bytesToAlign),
293
+ memory_allocation, "aligned phase - alignment initial allocation failed!");
294
+ }
295
+ { /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
296
+ void* const alloc = ws->objectEnd;
297
+ size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
298
+ void* const end = (BYTE*)alloc + bytesToAlign;
299
+ DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
300
+ RETURN_ERROR_IF(end > ws->workspaceEnd, memory_allocation,
301
+ "table phase - alignment initial allocation failed!");
302
+ ws->objectEnd = end;
303
+ ws->tableEnd = end;
304
+ ws->tableValidEnd = end;
208
305
  }
209
306
  }
210
307
  ws->phase = phase;
308
+ ZSTD_cwksp_assert_internal_consistency(ws);
211
309
  }
310
+ return 0;
212
311
  }
213
312
 
214
313
  /**
@@ -224,34 +323,26 @@ MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
224
323
  MEM_STATIC void* ZSTD_cwksp_reserve_internal(
225
324
  ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
226
325
  void* alloc;
227
- void* bottom = ws->tableEnd;
228
- ZSTD_cwksp_internal_advance_phase(ws, phase);
229
- alloc = (BYTE *)ws->allocStart - bytes;
326
+ if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
327
+ return NULL;
328
+ }
230
329
 
231
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
330
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
232
331
  /* over-reserve space */
233
- alloc = (BYTE *)alloc - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
332
+ bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
234
333
  #endif
235
334
 
236
- DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
237
- alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
238
- ZSTD_cwksp_assert_internal_consistency(ws);
239
- assert(alloc >= bottom);
240
- if (alloc < bottom) {
241
- DEBUGLOG(4, "cwksp: alloc failed!");
242
- ws->allocFailed = 1;
243
- return NULL;
244
- }
245
- if (alloc < ws->tableValidEnd) {
246
- ws->tableValidEnd = alloc;
247
- }
248
- ws->allocStart = alloc;
335
+ alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
249
336
 
250
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
337
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
251
338
  /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
252
339
  * either size. */
253
- alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
254
- __asan_unpoison_memory_region(alloc, bytes);
340
+ if (alloc) {
341
+ alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
342
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
343
+ __asan_unpoison_memory_region(alloc, bytes);
344
+ }
345
+ }
255
346
  #endif
256
347
 
257
348
  return alloc;
@@ -265,28 +356,36 @@ MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
265
356
  }
266
357
 
267
358
  /**
268
- * Reserves and returns memory sized on and aligned on sizeof(unsigned).
359
+ * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
269
360
  */
270
361
  MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
271
- assert((bytes & (sizeof(U32)-1)) == 0);
272
- return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
362
+ void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
363
+ ZSTD_cwksp_alloc_aligned);
364
+ assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
365
+ return ptr;
273
366
  }
274
367
 
275
368
  /**
276
- * Aligned on sizeof(unsigned). These buffers have the special property that
369
+ * Aligned on 64 bytes. These buffers have the special property that
277
370
  * their values remain constrained, allowing us to re-use them without
278
371
  * memset()-ing them.
279
372
  */
280
373
  MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
281
374
  const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
282
- void* alloc = ws->tableEnd;
283
- void* end = (BYTE *)alloc + bytes;
284
- void* top = ws->allocStart;
375
+ void* alloc;
376
+ void* end;
377
+ void* top;
378
+
379
+ if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
380
+ return NULL;
381
+ }
382
+ alloc = ws->tableEnd;
383
+ end = (BYTE *)alloc + bytes;
384
+ top = ws->allocStart;
285
385
 
286
386
  DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
287
387
  alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
288
388
  assert((bytes & (sizeof(U32)-1)) == 0);
289
- ZSTD_cwksp_internal_advance_phase(ws, phase);
290
389
  ZSTD_cwksp_assert_internal_consistency(ws);
291
390
  assert(end <= top);
292
391
  if (end > top) {
@@ -296,10 +395,14 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
296
395
  }
297
396
  ws->tableEnd = end;
298
397
 
299
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
300
- __asan_unpoison_memory_region(alloc, bytes);
398
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
399
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
400
+ __asan_unpoison_memory_region(alloc, bytes);
401
+ }
301
402
  #endif
302
403
 
404
+ assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
405
+ assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
303
406
  return alloc;
304
407
  }
305
408
 
@@ -311,7 +414,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
311
414
  void* alloc = ws->objectEnd;
312
415
  void* end = (BYTE*)alloc + roundedBytes;
313
416
 
314
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
417
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
315
418
  /* over-reserve space */
316
419
  end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
317
420
  #endif
@@ -332,11 +435,13 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
332
435
  ws->tableEnd = end;
333
436
  ws->tableValidEnd = end;
334
437
 
335
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
438
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
336
439
  /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
337
440
  * either size. */
338
441
  alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
339
- __asan_unpoison_memory_region(alloc, bytes);
442
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
443
+ __asan_unpoison_memory_region(alloc, bytes);
444
+ }
340
445
  #endif
341
446
 
342
447
  return alloc;
@@ -345,7 +450,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
345
450
  MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
346
451
  DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
347
452
 
348
- #if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
453
+ #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
349
454
  /* To validate that the table re-use logic is sound, and that we don't
350
455
  * access table space that we haven't cleaned, we re-"poison" the table
351
456
  * space every time we mark it dirty. */
@@ -380,7 +485,7 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
380
485
  assert(ws->tableValidEnd >= ws->objectEnd);
381
486
  assert(ws->tableValidEnd <= ws->allocStart);
382
487
  if (ws->tableValidEnd < ws->tableEnd) {
383
- memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
488
+ ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
384
489
  }
385
490
  ZSTD_cwksp_mark_tables_clean(ws);
386
491
  }
@@ -392,8 +497,12 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
392
497
  MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
393
498
  DEBUGLOG(4, "cwksp: clearing tables!");
394
499
 
395
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
396
- {
500
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
501
+ /* We don't do this when the workspace is statically allocated, because
502
+ * when that is the case, we have no capability to hook into the end of the
503
+ * workspace's lifecycle to unpoison the memory.
504
+ */
505
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
397
506
  size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
398
507
  __asan_poison_memory_region(ws->objectEnd, size);
399
508
  }
@@ -410,7 +519,7 @@ MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
410
519
  MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
411
520
  DEBUGLOG(4, "cwksp: clearing!");
412
521
 
413
- #if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
522
+ #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
414
523
  /* To validate that the context re-use logic is sound, and that we don't
415
524
  * access stuff that this compression hasn't initialized, we re-"poison"
416
525
  * the workspace (or at least the non-static, non-table parts of it)
@@ -421,8 +530,12 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
421
530
  }
422
531
  #endif
423
532
 
424
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
425
- {
533
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
534
+ /* We don't do this when the workspace is statically allocated, because
535
+ * when that is the case, we have no capability to hook into the end of the
536
+ * workspace's lifecycle to unpoison the memory.
537
+ */
538
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
426
539
  size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
427
540
  __asan_poison_memory_region(ws->objectEnd, size);
428
541
  }
@@ -442,7 +555,7 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
442
555
  * Any existing values in the workspace are ignored (the previously managed
443
556
  * buffer, if present, must be separately freed).
444
557
  */
445
- MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
558
+ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
446
559
  DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
447
560
  assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
448
561
  ws->workspace = start;
@@ -450,39 +563,45 @@ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
450
563
  ws->objectEnd = ws->workspace;
451
564
  ws->tableValidEnd = ws->objectEnd;
452
565
  ws->phase = ZSTD_cwksp_alloc_objects;
566
+ ws->isStatic = isStatic;
453
567
  ZSTD_cwksp_clear(ws);
454
568
  ws->workspaceOversizedDuration = 0;
455
569
  ZSTD_cwksp_assert_internal_consistency(ws);
456
570
  }
457
571
 
458
572
  MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
459
- void* workspace = ZSTD_malloc(size, customMem);
573
+ void* workspace = ZSTD_customMalloc(size, customMem);
460
574
  DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
461
575
  RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
462
- ZSTD_cwksp_init(ws, workspace, size);
576
+ ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
463
577
  return 0;
464
578
  }
465
579
 
466
580
  MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
467
581
  void *ptr = ws->workspace;
468
582
  DEBUGLOG(4, "cwksp: freeing workspace");
469
- memset(ws, 0, sizeof(ZSTD_cwksp));
470
- ZSTD_free(ptr, customMem);
583
+ ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
584
+ ZSTD_customFree(ptr, customMem);
471
585
  }
472
586
 
473
587
  /**
474
588
  * Moves the management of a workspace from one cwksp to another. The src cwksp
475
- * is left in an invalid state (src must be re-init()'ed before its used again).
589
+ * is left in an invalid state (src must be re-init()'ed before it's used again).
476
590
  */
477
591
  MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
478
592
  *dst = *src;
479
- memset(src, 0, sizeof(ZSTD_cwksp));
593
+ ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
480
594
  }
481
595
 
482
596
  MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
483
597
  return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
484
598
  }
485
599
 
600
+ MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
601
+ return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
602
+ + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
603
+ }
604
+
486
605
  MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
487
606
  return ws->allocFailed;
488
607
  }
@@ -491,6 +610,24 @@ MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
491
610
  * Functions Checking Free Space
492
611
  ***************************************/
493
612
 
613
+ /* ZSTD_alignmentSpaceWithinBounds() :
614
+ * Returns if the estimated space needed for a wksp is within an acceptable limit of the
615
+ * actual amount of space used.
616
+ */
617
+ MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp* const ws,
618
+ size_t const estimatedSpace, int resizedWorkspace) {
619
+ if (resizedWorkspace) {
620
+ /* Resized/newly allocated wksp should have exact bounds */
621
+ return ZSTD_cwksp_used(ws) == estimatedSpace;
622
+ } else {
623
+ /* Due to alignment, when reusing a workspace, we can actually consume 63 fewer or more bytes
624
+ * than estimatedSpace. See the comments in zstd_cwksp.h for details.
625
+ */
626
+ return (ZSTD_cwksp_used(ws) >= estimatedSpace - 63) && (ZSTD_cwksp_used(ws) <= estimatedSpace + 63);
627
+ }
628
+ }
629
+
630
+
494
631
  MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
495
632
  return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
496
633
  }