zstd-ruby 1.4.4.0 → 1.5.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. checksums.yaml +4 -4
  2. data/.gitignore +2 -0
  3. data/README.md +78 -5
  4. data/Rakefile +8 -2
  5. data/ext/zstdruby/common.h +15 -0
  6. data/ext/zstdruby/extconf.rb +3 -2
  7. data/ext/zstdruby/libzstd/common/allocations.h +55 -0
  8. data/ext/zstdruby/libzstd/common/bits.h +200 -0
  9. data/ext/zstdruby/libzstd/common/bitstream.h +74 -97
  10. data/ext/zstdruby/libzstd/common/compiler.h +219 -20
  11. data/ext/zstdruby/libzstd/common/cpu.h +1 -3
  12. data/ext/zstdruby/libzstd/common/debug.c +11 -31
  13. data/ext/zstdruby/libzstd/common/debug.h +22 -49
  14. data/ext/zstdruby/libzstd/common/entropy_common.c +184 -80
  15. data/ext/zstdruby/libzstd/common/error_private.c +11 -2
  16. data/ext/zstdruby/libzstd/common/error_private.h +87 -4
  17. data/ext/zstdruby/libzstd/common/fse.h +47 -116
  18. data/ext/zstdruby/libzstd/common/fse_decompress.c +127 -127
  19. data/ext/zstdruby/libzstd/common/huf.h +112 -197
  20. data/ext/zstdruby/libzstd/common/mem.h +124 -142
  21. data/ext/zstdruby/libzstd/common/pool.c +54 -27
  22. data/ext/zstdruby/libzstd/common/pool.h +11 -5
  23. data/ext/zstdruby/libzstd/common/portability_macros.h +156 -0
  24. data/ext/zstdruby/libzstd/common/threading.c +78 -22
  25. data/ext/zstdruby/libzstd/common/threading.h +9 -13
  26. data/ext/zstdruby/libzstd/common/xxhash.c +15 -873
  27. data/ext/zstdruby/libzstd/common/xxhash.h +5572 -191
  28. data/ext/zstdruby/libzstd/common/zstd_common.c +2 -37
  29. data/ext/zstdruby/libzstd/common/zstd_deps.h +111 -0
  30. data/ext/zstdruby/libzstd/common/zstd_internal.h +186 -144
  31. data/ext/zstdruby/libzstd/common/zstd_trace.h +163 -0
  32. data/ext/zstdruby/libzstd/compress/clevels.h +134 -0
  33. data/ext/zstdruby/libzstd/compress/fse_compress.c +99 -196
  34. data/ext/zstdruby/libzstd/compress/hist.c +41 -63
  35. data/ext/zstdruby/libzstd/compress/hist.h +13 -33
  36. data/ext/zstdruby/libzstd/compress/huf_compress.c +968 -331
  37. data/ext/zstdruby/libzstd/compress/zstd_compress.c +4120 -1191
  38. data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +688 -159
  39. data/ext/zstdruby/libzstd/compress/zstd_compress_literals.c +121 -40
  40. data/ext/zstdruby/libzstd/compress/zstd_compress_literals.h +16 -6
  41. data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.c +62 -35
  42. data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.h +10 -3
  43. data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +577 -0
  44. data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.h +32 -0
  45. data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +322 -115
  46. data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +394 -154
  47. data/ext/zstdruby/libzstd/compress/zstd_double_fast.h +4 -3
  48. data/ext/zstdruby/libzstd/compress/zstd_fast.c +729 -253
  49. data/ext/zstdruby/libzstd/compress/zstd_fast.h +4 -3
  50. data/ext/zstdruby/libzstd/compress/zstd_lazy.c +1289 -247
  51. data/ext/zstdruby/libzstd/compress/zstd_lazy.h +61 -1
  52. data/ext/zstdruby/libzstd/compress/zstd_ldm.c +339 -212
  53. data/ext/zstdruby/libzstd/compress/zstd_ldm.h +15 -3
  54. data/ext/zstdruby/libzstd/compress/zstd_ldm_geartab.h +106 -0
  55. data/ext/zstdruby/libzstd/compress/zstd_opt.c +508 -282
  56. data/ext/zstdruby/libzstd/compress/zstd_opt.h +1 -1
  57. data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +217 -466
  58. data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +35 -114
  59. data/ext/zstdruby/libzstd/decompress/huf_decompress.c +1220 -572
  60. data/ext/zstdruby/libzstd/decompress/huf_decompress_amd64.S +576 -0
  61. data/ext/zstdruby/libzstd/decompress/zstd_ddict.c +23 -19
  62. data/ext/zstdruby/libzstd/decompress/zstd_ddict.h +3 -3
  63. data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +859 -273
  64. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +1244 -375
  65. data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +21 -7
  66. data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +74 -11
  67. data/ext/zstdruby/libzstd/dictBuilder/cover.c +75 -54
  68. data/ext/zstdruby/libzstd/dictBuilder/cover.h +20 -9
  69. data/ext/zstdruby/libzstd/dictBuilder/divsufsort.c +1 -1
  70. data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +55 -36
  71. data/ext/zstdruby/libzstd/dictBuilder/zdict.c +126 -110
  72. data/ext/zstdruby/libzstd/{dictBuilder/zdict.h → zdict.h} +248 -56
  73. data/ext/zstdruby/libzstd/zstd.h +1277 -306
  74. data/ext/zstdruby/libzstd/{common/zstd_errors.h → zstd_errors.h} +29 -8
  75. data/ext/zstdruby/main.c +20 -0
  76. data/ext/zstdruby/skippable_frame.c +63 -0
  77. data/ext/zstdruby/streaming_compress.c +177 -0
  78. data/ext/zstdruby/streaming_compress.h +5 -0
  79. data/ext/zstdruby/streaming_decompress.c +123 -0
  80. data/ext/zstdruby/zstdruby.c +114 -32
  81. data/lib/zstd-ruby/version.rb +1 -1
  82. data/lib/zstd-ruby.rb +0 -1
  83. data/zstd-ruby.gemspec +1 -1
  84. metadata +24 -39
  85. data/.travis.yml +0 -14
  86. data/ext/zstdruby/libzstd/.gitignore +0 -3
  87. data/ext/zstdruby/libzstd/BUCK +0 -234
  88. data/ext/zstdruby/libzstd/Makefile +0 -289
  89. data/ext/zstdruby/libzstd/README.md +0 -159
  90. data/ext/zstdruby/libzstd/deprecated/zbuff.h +0 -214
  91. data/ext/zstdruby/libzstd/deprecated/zbuff_common.c +0 -26
  92. data/ext/zstdruby/libzstd/deprecated/zbuff_compress.c +0 -147
  93. data/ext/zstdruby/libzstd/deprecated/zbuff_decompress.c +0 -75
  94. data/ext/zstdruby/libzstd/dll/example/Makefile +0 -47
  95. data/ext/zstdruby/libzstd/dll/example/README.md +0 -69
  96. data/ext/zstdruby/libzstd/dll/example/build_package.bat +0 -20
  97. data/ext/zstdruby/libzstd/dll/example/fullbench-dll.sln +0 -25
  98. data/ext/zstdruby/libzstd/dll/example/fullbench-dll.vcxproj +0 -181
  99. data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +0 -415
  100. data/ext/zstdruby/libzstd/legacy/zstd_v01.c +0 -2152
  101. data/ext/zstdruby/libzstd/legacy/zstd_v01.h +0 -94
  102. data/ext/zstdruby/libzstd/legacy/zstd_v02.c +0 -3514
  103. data/ext/zstdruby/libzstd/legacy/zstd_v02.h +0 -93
  104. data/ext/zstdruby/libzstd/legacy/zstd_v03.c +0 -3156
  105. data/ext/zstdruby/libzstd/legacy/zstd_v03.h +0 -93
  106. data/ext/zstdruby/libzstd/legacy/zstd_v04.c +0 -3641
  107. data/ext/zstdruby/libzstd/legacy/zstd_v04.h +0 -142
  108. data/ext/zstdruby/libzstd/legacy/zstd_v05.c +0 -4046
  109. data/ext/zstdruby/libzstd/legacy/zstd_v05.h +0 -162
  110. data/ext/zstdruby/libzstd/legacy/zstd_v06.c +0 -4150
  111. data/ext/zstdruby/libzstd/legacy/zstd_v06.h +0 -172
  112. data/ext/zstdruby/libzstd/legacy/zstd_v07.c +0 -4533
  113. data/ext/zstdruby/libzstd/legacy/zstd_v07.h +0 -187
  114. data/ext/zstdruby/libzstd/libzstd.pc.in +0 -15
  115. data/ext/zstdruby/zstdruby.h +0 -6
@@ -1,5 +1,5 @@
1
1
  /*
2
- * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
3
  * All rights reserved.
4
4
  *
5
5
  * This source code is licensed under both the BSD-style license (found in the
@@ -14,7 +14,9 @@
14
14
  /*-*************************************
15
15
  * Dependencies
16
16
  ***************************************/
17
- #include "zstd_internal.h"
17
+ #include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */
18
+ #include "../common/zstd_internal.h"
19
+ #include "../common/portability_macros.h"
18
20
 
19
21
  #if defined (__cplusplus)
20
22
  extern "C" {
@@ -24,16 +26,6 @@ extern "C" {
24
26
  * Constants
25
27
  ***************************************/
26
28
 
27
- /* define "workspace is too large" as this number of times larger than needed */
28
- #define ZSTD_WORKSPACETOOLARGE_FACTOR 3
29
-
30
- /* when workspace is continuously too large
31
- * during at least this number of times,
32
- * context's memory usage is considered wasteful,
33
- * because it's sized to handle a worst case scenario which rarely happens.
34
- * In which case, resize it down to free some memory */
35
- #define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128
36
-
37
29
  /* Since the workspace is effectively its own little malloc implementation /
38
30
  * arena, when we run under ASAN, we should similarly insert redzones between
39
31
  * each internal element of the workspace, so ASAN will catch overruns that
@@ -45,15 +37,30 @@ extern "C" {
45
37
  #define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
46
38
  #endif
47
39
 
40
+
41
+ /* Set our tables and aligneds to align by 64 bytes */
42
+ #define ZSTD_CWKSP_ALIGNMENT_BYTES 64
43
+
48
44
  /*-*************************************
49
45
  * Structures
50
46
  ***************************************/
51
47
  typedef enum {
52
48
  ZSTD_cwksp_alloc_objects,
53
- ZSTD_cwksp_alloc_buffers,
54
- ZSTD_cwksp_alloc_aligned
49
+ ZSTD_cwksp_alloc_aligned_init_once,
50
+ ZSTD_cwksp_alloc_aligned,
51
+ ZSTD_cwksp_alloc_buffers
55
52
  } ZSTD_cwksp_alloc_phase_e;
56
53
 
54
+ /**
55
+ * Used to describe whether the workspace is statically allocated (and will not
56
+ * necessarily ever be freed), or if it's dynamically allocated and we can
57
+ * expect a well-formed caller to free this.
58
+ */
59
+ typedef enum {
60
+ ZSTD_cwksp_dynamic_alloc,
61
+ ZSTD_cwksp_static_alloc
62
+ } ZSTD_cwksp_static_alloc_e;
63
+
57
64
  /**
58
65
  * Zstd fits all its internal datastructures into a single continuous buffer,
59
66
  * so that it only needs to perform a single OS allocation (or so that a buffer
@@ -94,15 +101,15 @@ typedef enum {
94
101
  *
95
102
  * Workspace Layout:
96
103
  *
97
- * [ ... workspace ... ]
98
- * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
104
+ * [ ... workspace ... ]
105
+ * [objects][tables ->] free space [<- buffers][<- aligned][<- init once]
99
106
  *
100
107
  * The various objects that live in the workspace are divided into the
101
108
  * following categories, and are allocated separately:
102
109
  *
103
110
  * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
104
111
  * so that literally everything fits in a single buffer. Note: if present,
105
- * this must be the first object in the workspace, since ZSTD_free{CCtx,
112
+ * this must be the first object in the workspace, since ZSTD_customFree{CCtx,
106
113
  * CDict}() rely on a pointer comparison to see whether one or two frees are
107
114
  * required.
108
115
  *
@@ -117,10 +124,20 @@ typedef enum {
117
124
  * - Tables: these are any of several different datastructures (hash tables,
118
125
  * chain tables, binary trees) that all respect a common format: they are
119
126
  * uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
120
- * Their sizes depend on the cparams.
127
+ * Their sizes depend on the cparams. These tables are 64-byte aligned.
128
+ *
129
+ * - Init once: these buffers require to be initialized at least once before
130
+ * use. They should be used when we want to skip memory initialization
131
+ * while not triggering memory checkers (like Valgrind) when reading from
132
+ * from this memory without writing to it first.
133
+ * These buffers should be used carefully as they might contain data
134
+ * from previous compressions.
135
+ * Buffers are aligned to 64 bytes.
121
136
  *
122
- * - Aligned: these buffers are used for various purposes that require 4 byte
123
- * alignment, but don't require any initialization before they're used.
137
+ * - Aligned: these buffers don't require any initialization before they're
138
+ * used. The user of the buffer should make sure they write into a buffer
139
+ * location before reading from it.
140
+ * Buffers are aligned to 64 bytes.
124
141
  *
125
142
  * - Buffers: these buffers are used for various purposes that don't require
126
143
  * any alignment or initialization before they're used. This means they can
@@ -132,9 +149,9 @@ typedef enum {
132
149
  * correctly packed into the workspace buffer. That order is:
133
150
  *
134
151
  * 1. Objects
135
- * 2. Buffers
136
- * 3. Aligned
137
- * 4. Tables
152
+ * 2. Init once / Tables
153
+ * 3. Aligned / Tables
154
+ * 4. Buffers / Tables
138
155
  *
139
156
  * Attempts to reserve objects of different types out of order will fail.
140
157
  */
@@ -146,10 +163,12 @@ typedef struct {
146
163
  void* tableEnd;
147
164
  void* tableValidEnd;
148
165
  void* allocStart;
166
+ void* initOnceStart;
149
167
 
150
- int allocFailed;
168
+ BYTE allocFailed;
151
169
  int workspaceOversizedDuration;
152
170
  ZSTD_cwksp_alloc_phase_e phase;
171
+ ZSTD_cwksp_static_alloc_e isStatic;
153
172
  } ZSTD_cwksp;
154
173
 
155
174
  /*-*************************************
@@ -157,6 +176,7 @@ typedef struct {
157
176
  ***************************************/
158
177
 
159
178
  MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
179
+ MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws);
160
180
 
161
181
  MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
162
182
  (void)ws;
@@ -166,6 +186,20 @@ MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
166
186
  assert(ws->tableEnd <= ws->allocStart);
167
187
  assert(ws->tableValidEnd <= ws->allocStart);
168
188
  assert(ws->allocStart <= ws->workspaceEnd);
189
+ assert(ws->initOnceStart <= ZSTD_cwksp_initialAllocStart(ws));
190
+ assert(ws->workspace <= ws->initOnceStart);
191
+ #if ZSTD_MEMORY_SANITIZER
192
+ {
193
+ intptr_t const offset = __msan_test_shadow(ws->initOnceStart,
194
+ (U8*)ZSTD_cwksp_initialAllocStart(ws) - (U8*)ws->initOnceStart);
195
+ #if defined(ZSTD_MSAN_PRINT)
196
+ if(offset!=-1) {
197
+ __msan_print_shadow((U8*)ws->initOnceStart + offset - 8, 32);
198
+ }
199
+ #endif
200
+ assert(offset==-1);
201
+ };
202
+ #endif
169
203
  }
170
204
 
171
205
  /**
@@ -186,63 +220,72 @@ MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
186
220
  * Since tables aren't currently redzoned, you don't need to call through this
187
221
  * to figure out how much space you need for the matchState tables. Everything
188
222
  * else is though.
223
+ *
224
+ * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
189
225
  */
190
226
  MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
191
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
227
+ if (size == 0)
228
+ return 0;
229
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
192
230
  return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
193
231
  #else
194
232
  return size;
195
233
  #endif
196
234
  }
197
235
 
198
- MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
199
- ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
200
- assert(phase >= ws->phase);
201
- if (phase > ws->phase) {
202
- if (ws->phase < ZSTD_cwksp_alloc_buffers &&
203
- phase >= ZSTD_cwksp_alloc_buffers) {
204
- ws->tableValidEnd = ws->objectEnd;
205
- }
206
- if (ws->phase < ZSTD_cwksp_alloc_aligned &&
207
- phase >= ZSTD_cwksp_alloc_aligned) {
208
- /* If unaligned allocations down from a too-large top have left us
209
- * unaligned, we need to realign our alloc ptr. Technically, this
210
- * can consume space that is unaccounted for in the neededSpace
211
- * calculation. However, I believe this can only happen when the
212
- * workspace is too large, and specifically when it is too large
213
- * by a larger margin than the space that will be consumed. */
214
- /* TODO: cleaner, compiler warning friendly way to do this??? */
215
- ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
216
- if (ws->allocStart < ws->tableValidEnd) {
217
- ws->tableValidEnd = ws->allocStart;
218
- }
219
- }
220
- ws->phase = phase;
221
- }
236
+ /**
237
+ * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
238
+ * Used to determine the number of bytes required for a given "aligned".
239
+ */
240
+ MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
241
+ return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));
222
242
  }
223
243
 
224
244
  /**
225
- * Returns whether this object/buffer/etc was allocated in this workspace.
245
+ * Returns the amount of additional space the cwksp must allocate
246
+ * for internal purposes (currently only alignment).
226
247
  */
227
- MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
228
- return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
248
+ MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
249
+ /* For alignment, the wksp will always allocate an additional 2*ZSTD_CWKSP_ALIGNMENT_BYTES
250
+ * bytes to align the beginning of tables section and end of buffers;
251
+ */
252
+ size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES * 2;
253
+ return slackSpace;
229
254
  }
230
255
 
256
+
231
257
  /**
232
- * Internal function. Do not use directly.
258
+ * Return the number of additional bytes required to align a pointer to the given number of bytes.
259
+ * alignBytes must be a power of two.
233
260
  */
234
- MEM_STATIC void* ZSTD_cwksp_reserve_internal(
235
- ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
236
- void* alloc;
237
- void* bottom = ws->tableEnd;
238
- ZSTD_cwksp_internal_advance_phase(ws, phase);
239
- alloc = (BYTE *)ws->allocStart - bytes;
261
+ MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
262
+ size_t const alignBytesMask = alignBytes - 1;
263
+ size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
264
+ assert((alignBytes & alignBytesMask) == 0);
265
+ assert(bytes < alignBytes);
266
+ return bytes;
267
+ }
240
268
 
241
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
242
- /* over-reserve space */
243
- alloc = (BYTE *)alloc - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
244
- #endif
269
+ /**
270
+ * Returns the initial value for allocStart which is used to determine the position from
271
+ * which we can allocate from the end of the workspace.
272
+ */
273
+ MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws) {
274
+ return (void*)((size_t)ws->workspaceEnd & ~(ZSTD_CWKSP_ALIGNMENT_BYTES-1));
275
+ }
245
276
 
277
+ /**
278
+ * Internal function. Do not use directly.
279
+ * Reserves the given number of bytes within the aligned/buffer segment of the wksp,
280
+ * which counts from the end of the wksp (as opposed to the object/table segment).
281
+ *
282
+ * Returns a pointer to the beginning of that space.
283
+ */
284
+ MEM_STATIC void*
285
+ ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
286
+ {
287
+ void* const alloc = (BYTE*)ws->allocStart - bytes;
288
+ void* const bottom = ws->tableEnd;
246
289
  DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
247
290
  alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
248
291
  ZSTD_cwksp_assert_internal_consistency(ws);
@@ -252,16 +295,88 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal(
252
295
  ws->allocFailed = 1;
253
296
  return NULL;
254
297
  }
298
+ /* the area is reserved from the end of wksp.
299
+ * If it overlaps with tableValidEnd, it voids guarantees on values' range */
255
300
  if (alloc < ws->tableValidEnd) {
256
301
  ws->tableValidEnd = alloc;
257
302
  }
258
303
  ws->allocStart = alloc;
304
+ return alloc;
305
+ }
306
+
307
+ /**
308
+ * Moves the cwksp to the next phase, and does any necessary allocations.
309
+ * cwksp initialization must necessarily go through each phase in order.
310
+ * Returns a 0 on success, or zstd error
311
+ */
312
+ MEM_STATIC size_t
313
+ ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)
314
+ {
315
+ assert(phase >= ws->phase);
316
+ if (phase > ws->phase) {
317
+ /* Going from allocating objects to allocating initOnce / tables */
318
+ if (ws->phase < ZSTD_cwksp_alloc_aligned_init_once &&
319
+ phase >= ZSTD_cwksp_alloc_aligned_init_once) {
320
+ ws->tableValidEnd = ws->objectEnd;
321
+ ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);
322
+
323
+ { /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
324
+ void *const alloc = ws->objectEnd;
325
+ size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
326
+ void *const objectEnd = (BYTE *) alloc + bytesToAlign;
327
+ DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
328
+ RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,
329
+ "table phase - alignment initial allocation failed!");
330
+ ws->objectEnd = objectEnd;
331
+ ws->tableEnd = objectEnd; /* table area starts being empty */
332
+ if (ws->tableValidEnd < ws->tableEnd) {
333
+ ws->tableValidEnd = ws->tableEnd;
334
+ }
335
+ }
336
+ }
337
+ ws->phase = phase;
338
+ ZSTD_cwksp_assert_internal_consistency(ws);
339
+ }
340
+ return 0;
341
+ }
259
342
 
260
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
343
+ /**
344
+ * Returns whether this object/buffer/etc was allocated in this workspace.
345
+ */
346
+ MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
347
+ {
348
+ return (ptr != NULL) && (ws->workspace <= ptr) && (ptr < ws->workspaceEnd);
349
+ }
350
+
351
+ /**
352
+ * Internal function. Do not use directly.
353
+ */
354
+ MEM_STATIC void*
355
+ ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)
356
+ {
357
+ void* alloc;
358
+ if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
359
+ return NULL;
360
+ }
361
+
362
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
363
+ /* over-reserve space */
364
+ bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
365
+ #endif
366
+
367
+ alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
368
+
369
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
261
370
  /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
262
371
  * either size. */
263
- alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
264
- __asan_unpoison_memory_region(alloc, bytes);
372
+ if (alloc) {
373
+ alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
374
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
375
+ /* We need to keep the redzone poisoned while unpoisoning the bytes that
376
+ * are actually allocated. */
377
+ __asan_unpoison_memory_region(alloc, bytes - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE);
378
+ }
379
+ }
265
380
  #endif
266
381
 
267
382
  return alloc;
@@ -270,33 +385,78 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal(
270
385
  /**
271
386
  * Reserves and returns unaligned memory.
272
387
  */
273
- MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
388
+ MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
389
+ {
274
390
  return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
275
391
  }
276
392
 
277
393
  /**
278
- * Reserves and returns memory sized on and aligned on sizeof(unsigned).
394
+ * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
395
+ * This memory has been initialized at least once in the past.
396
+ * This doesn't mean it has been initialized this time, and it might contain data from previous
397
+ * operations.
398
+ * The main usage is for algorithms that might need read access into uninitialized memory.
399
+ * The algorithm must maintain safety under these conditions and must make sure it doesn't
400
+ * leak any of the past data (directly or in side channels).
279
401
  */
280
- MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
281
- assert((bytes & (sizeof(U32)-1)) == 0);
282
- return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
402
+ MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t bytes)
403
+ {
404
+ size_t const alignedBytes = ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES);
405
+ void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_aligned_init_once);
406
+ assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
407
+ if(ptr && ptr < ws->initOnceStart) {
408
+ /* We assume the memory following the current allocation is either:
409
+ * 1. Not usable as initOnce memory (end of workspace)
410
+ * 2. Another initOnce buffer that has been allocated before (and so was previously memset)
411
+ * 3. An ASAN redzone, in which case we don't want to write on it
412
+ * For these reasons it should be fine to not explicitly zero every byte up to ws->initOnceStart.
413
+ * Note that we assume here that MSAN and ASAN cannot run in the same time. */
414
+ ZSTD_memset(ptr, 0, MIN((size_t)((U8*)ws->initOnceStart - (U8*)ptr), alignedBytes));
415
+ ws->initOnceStart = ptr;
416
+ }
417
+ #if ZSTD_MEMORY_SANITIZER
418
+ assert(__msan_test_shadow(ptr, bytes) == -1);
419
+ #endif
420
+ return ptr;
421
+ }
422
+
423
+ /**
424
+ * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
425
+ */
426
+ MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
427
+ {
428
+ void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
429
+ ZSTD_cwksp_alloc_aligned);
430
+ assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
431
+ return ptr;
283
432
  }
284
433
 
285
434
  /**
286
- * Aligned on sizeof(unsigned). These buffers have the special property that
435
+ * Aligned on 64 bytes. These buffers have the special property that
287
436
  * their values remain constrained, allowing us to re-use them without
288
437
  * memset()-ing them.
289
438
  */
290
- MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
291
- const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
292
- void* alloc = ws->tableEnd;
293
- void* end = (BYTE *)alloc + bytes;
294
- void* top = ws->allocStart;
439
+ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
440
+ {
441
+ const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned_init_once;
442
+ void* alloc;
443
+ void* end;
444
+ void* top;
445
+
446
+ /* We can only start allocating tables after we are done reserving space for objects at the
447
+ * start of the workspace */
448
+ if(ws->phase < phase) {
449
+ if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
450
+ return NULL;
451
+ }
452
+ }
453
+ alloc = ws->tableEnd;
454
+ end = (BYTE *)alloc + bytes;
455
+ top = ws->allocStart;
295
456
 
296
457
  DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
297
458
  alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
298
459
  assert((bytes & (sizeof(U32)-1)) == 0);
299
- ZSTD_cwksp_internal_advance_phase(ws, phase);
300
460
  ZSTD_cwksp_assert_internal_consistency(ws);
301
461
  assert(end <= top);
302
462
  if (end > top) {
@@ -306,35 +466,41 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
306
466
  }
307
467
  ws->tableEnd = end;
308
468
 
309
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
310
- __asan_unpoison_memory_region(alloc, bytes);
469
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
470
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
471
+ __asan_unpoison_memory_region(alloc, bytes);
472
+ }
311
473
  #endif
312
474
 
475
+ assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
476
+ assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
313
477
  return alloc;
314
478
  }
315
479
 
316
480
  /**
317
481
  * Aligned on sizeof(void*).
482
+ * Note : should happen only once, at workspace first initialization
318
483
  */
319
- MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
320
- size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
484
+ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
485
+ {
486
+ size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
321
487
  void* alloc = ws->objectEnd;
322
488
  void* end = (BYTE*)alloc + roundedBytes;
323
489
 
324
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
490
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
325
491
  /* over-reserve space */
326
492
  end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
327
493
  #endif
328
494
 
329
- DEBUGLOG(5,
495
+ DEBUGLOG(4,
330
496
  "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
331
497
  alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
332
- assert(((size_t)alloc & (sizeof(void*)-1)) == 0);
333
- assert((bytes & (sizeof(void*)-1)) == 0);
498
+ assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
499
+ assert(bytes % ZSTD_ALIGNOF(void*) == 0);
334
500
  ZSTD_cwksp_assert_internal_consistency(ws);
335
501
  /* we must be in the first phase, no advance is possible */
336
502
  if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
337
- DEBUGLOG(4, "cwksp: object alloc failed!");
503
+ DEBUGLOG(3, "cwksp: object alloc failed!");
338
504
  ws->allocFailed = 1;
339
505
  return NULL;
340
506
  }
@@ -342,27 +508,38 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
342
508
  ws->tableEnd = end;
343
509
  ws->tableValidEnd = end;
344
510
 
345
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
511
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
346
512
  /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
347
513
  * either size. */
348
- alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
349
- __asan_unpoison_memory_region(alloc, bytes);
514
+ alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
515
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
516
+ __asan_unpoison_memory_region(alloc, bytes);
517
+ }
350
518
  #endif
351
519
 
352
520
  return alloc;
353
521
  }
354
522
 
355
- MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
523
+ MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
524
+ {
356
525
  DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
357
526
 
358
- #if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
527
+ #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
359
528
  /* To validate that the table re-use logic is sound, and that we don't
360
529
  * access table space that we haven't cleaned, we re-"poison" the table
361
- * space every time we mark it dirty. */
530
+ * space every time we mark it dirty.
531
+ * Since tableValidEnd space and initOnce space may overlap we don't poison
532
+ * the initOnce portion as it break its promise. This means that this poisoning
533
+ * check isn't always applied fully. */
362
534
  {
363
535
  size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
364
536
  assert(__msan_test_shadow(ws->objectEnd, size) == -1);
365
- __msan_poison(ws->objectEnd, size);
537
+ if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) {
538
+ __msan_poison(ws->objectEnd, size);
539
+ } else {
540
+ assert(ws->initOnceStart >= ws->objectEnd);
541
+ __msan_poison(ws->objectEnd, (BYTE*)ws->initOnceStart - (BYTE*)ws->objectEnd);
542
+ }
366
543
  }
367
544
  #endif
368
545
 
@@ -390,7 +567,7 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
390
567
  assert(ws->tableValidEnd >= ws->objectEnd);
391
568
  assert(ws->tableValidEnd <= ws->allocStart);
392
569
  if (ws->tableValidEnd < ws->tableEnd) {
393
- memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
570
+ ZSTD_memset(ws->tableValidEnd, 0, (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd));
394
571
  }
395
572
  ZSTD_cwksp_mark_tables_clean(ws);
396
573
  }
@@ -402,8 +579,12 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
402
579
  MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
403
580
  DEBUGLOG(4, "cwksp: clearing tables!");
404
581
 
405
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
406
- {
582
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
583
+ /* We don't do this when the workspace is statically allocated, because
584
+ * when that is the case, we have no capability to hook into the end of the
585
+ * workspace's lifecycle to unpoison the memory.
586
+ */
587
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
407
588
  size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
408
589
  __asan_poison_memory_region(ws->objectEnd, size);
409
590
  }
@@ -420,29 +601,36 @@ MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
420
601
  MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
421
602
  DEBUGLOG(4, "cwksp: clearing!");
422
603
 
423
- #if defined (MEMORY_SANITIZER) && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
604
+ #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
424
605
  /* To validate that the context re-use logic is sound, and that we don't
425
606
  * access stuff that this compression hasn't initialized, we re-"poison"
426
- * the workspace (or at least the non-static, non-table parts of it)
427
- * every time we start a new compression. */
607
+ * the workspace except for the areas in which we expect memory re-use
608
+ * without initialization (objects, valid tables area and init once
609
+ * memory). */
428
610
  {
429
- size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd;
430
- __msan_poison(ws->tableValidEnd, size);
611
+ if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) {
612
+ size_t size = (BYTE*)ws->initOnceStart - (BYTE*)ws->tableValidEnd;
613
+ __msan_poison(ws->tableValidEnd, size);
614
+ }
431
615
  }
432
616
  #endif
433
617
 
434
- #if defined (ADDRESS_SANITIZER) && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
435
- {
618
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
619
+ /* We don't do this when the workspace is statically allocated, because
620
+ * when that is the case, we have no capability to hook into the end of the
621
+ * workspace's lifecycle to unpoison the memory.
622
+ */
623
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
436
624
  size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
437
625
  __asan_poison_memory_region(ws->objectEnd, size);
438
626
  }
439
627
  #endif
440
628
 
441
629
  ws->tableEnd = ws->objectEnd;
442
- ws->allocStart = ws->workspaceEnd;
630
+ ws->allocStart = ZSTD_cwksp_initialAllocStart(ws);
443
631
  ws->allocFailed = 0;
444
- if (ws->phase > ZSTD_cwksp_alloc_buffers) {
445
- ws->phase = ZSTD_cwksp_alloc_buffers;
632
+ if (ws->phase > ZSTD_cwksp_alloc_aligned_init_once) {
633
+ ws->phase = ZSTD_cwksp_alloc_aligned_init_once;
446
634
  }
447
635
  ZSTD_cwksp_assert_internal_consistency(ws);
448
636
  }
@@ -452,47 +640,54 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
452
640
  * Any existing values in the workspace are ignored (the previously managed
453
641
  * buffer, if present, must be separately freed).
454
642
  */
455
- MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
643
+ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
456
644
  DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
457
645
  assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
458
646
  ws->workspace = start;
459
647
  ws->workspaceEnd = (BYTE*)start + size;
460
648
  ws->objectEnd = ws->workspace;
461
649
  ws->tableValidEnd = ws->objectEnd;
650
+ ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);
462
651
  ws->phase = ZSTD_cwksp_alloc_objects;
652
+ ws->isStatic = isStatic;
463
653
  ZSTD_cwksp_clear(ws);
464
654
  ws->workspaceOversizedDuration = 0;
465
655
  ZSTD_cwksp_assert_internal_consistency(ws);
466
656
  }
467
657
 
468
658
  MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
469
- void* workspace = ZSTD_malloc(size, customMem);
659
+ void* workspace = ZSTD_customMalloc(size, customMem);
470
660
  DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
471
- RETURN_ERROR_IF(workspace == NULL, memory_allocation);
472
- ZSTD_cwksp_init(ws, workspace, size);
661
+ RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
662
+ ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
473
663
  return 0;
474
664
  }
475
665
 
476
666
  MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
477
667
  void *ptr = ws->workspace;
478
668
  DEBUGLOG(4, "cwksp: freeing workspace");
479
- memset(ws, 0, sizeof(ZSTD_cwksp));
480
- ZSTD_free(ptr, customMem);
669
+ ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
670
+ ZSTD_customFree(ptr, customMem);
481
671
  }
482
672
 
483
673
  /**
484
674
  * Moves the management of a workspace from one cwksp to another. The src cwksp
485
- * is left in an invalid state (src must be re-init()'ed before its used again).
675
+ * is left in an invalid state (src must be re-init()'ed before it's used again).
486
676
  */
487
677
  MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
488
678
  *dst = *src;
489
- memset(src, 0, sizeof(ZSTD_cwksp));
679
+ ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
490
680
  }
491
681
 
492
682
  MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
493
683
  return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
494
684
  }
495
685
 
686
+ MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
687
+ return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
688
+ + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
689
+ }
690
+
496
691
  MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
497
692
  return ws->allocFailed;
498
693
  }
@@ -501,6 +696,18 @@ MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
501
696
  * Functions Checking Free Space
502
697
  ***************************************/
503
698
 
699
+ /* ZSTD_alignmentSpaceWithinBounds() :
700
+ * Returns if the estimated space needed for a wksp is within an acceptable limit of the
701
+ * actual amount of space used.
702
+ */
703
+ MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp *const ws, size_t const estimatedSpace) {
704
+ /* We have an alignment space between objects and tables between tables and buffers, so we can have up to twice
705
+ * the alignment bytes difference between estimation and actual usage */
706
+ return (estimatedSpace - ZSTD_cwksp_slack_space_required()) <= ZSTD_cwksp_used(ws) &&
707
+ ZSTD_cwksp_used(ws) <= estimatedSpace;
708
+ }
709
+
710
+
504
711
  MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
505
712
  return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
506
713
  }