zstd-ruby 1.4.5.0 → 1.5.1.1
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.github/dependabot.yml +8 -0
- data/.github/workflows/ruby.yml +35 -0
- data/README.md +2 -2
- data/ext/zstdruby/extconf.rb +2 -1
- data/ext/zstdruby/libzstd/BUCK +5 -7
- data/ext/zstdruby/libzstd/Makefile +225 -222
- data/ext/zstdruby/libzstd/README.md +43 -5
- data/ext/zstdruby/libzstd/common/bitstream.h +46 -22
- data/ext/zstdruby/libzstd/common/compiler.h +182 -22
- data/ext/zstdruby/libzstd/common/cpu.h +1 -3
- data/ext/zstdruby/libzstd/common/debug.c +1 -1
- data/ext/zstdruby/libzstd/common/debug.h +12 -19
- data/ext/zstdruby/libzstd/common/entropy_common.c +196 -44
- data/ext/zstdruby/libzstd/common/error_private.c +2 -1
- data/ext/zstdruby/libzstd/common/error_private.h +82 -3
- data/ext/zstdruby/libzstd/common/fse.h +41 -12
- data/ext/zstdruby/libzstd/common/fse_decompress.c +139 -22
- data/ext/zstdruby/libzstd/common/huf.h +47 -23
- data/ext/zstdruby/libzstd/common/mem.h +87 -98
- data/ext/zstdruby/libzstd/common/pool.c +23 -17
- data/ext/zstdruby/libzstd/common/pool.h +2 -2
- data/ext/zstdruby/libzstd/common/portability_macros.h +131 -0
- data/ext/zstdruby/libzstd/common/threading.c +6 -5
- data/ext/zstdruby/libzstd/common/xxhash.c +6 -846
- data/ext/zstdruby/libzstd/common/xxhash.h +5568 -167
- data/ext/zstdruby/libzstd/common/zstd_common.c +10 -10
- data/ext/zstdruby/libzstd/common/zstd_deps.h +111 -0
- data/ext/zstdruby/libzstd/common/zstd_internal.h +189 -142
- data/ext/zstdruby/libzstd/common/zstd_trace.h +163 -0
- data/ext/zstdruby/libzstd/compress/clevels.h +134 -0
- data/ext/zstdruby/libzstd/compress/fse_compress.c +89 -46
- data/ext/zstdruby/libzstd/compress/hist.c +27 -29
- data/ext/zstdruby/libzstd/compress/hist.h +2 -2
- data/ext/zstdruby/libzstd/compress/huf_compress.c +770 -198
- data/ext/zstdruby/libzstd/compress/zstd_compress.c +2894 -863
- data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +390 -90
- data/ext/zstdruby/libzstd/compress/zstd_compress_literals.c +12 -11
- data/ext/zstdruby/libzstd/compress/zstd_compress_literals.h +4 -2
- data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.c +31 -8
- data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +25 -297
- data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +206 -69
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +307 -132
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_fast.c +322 -143
- data/ext/zstdruby/libzstd/compress/zstd_fast.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_lazy.c +1136 -174
- data/ext/zstdruby/libzstd/compress/zstd_lazy.h +59 -1
- data/ext/zstdruby/libzstd/compress/zstd_ldm.c +316 -213
- data/ext/zstdruby/libzstd/compress/zstd_ldm.h +9 -2
- data/ext/zstdruby/libzstd/compress/zstd_ldm_geartab.h +106 -0
- data/ext/zstdruby/libzstd/compress/zstd_opt.c +373 -150
- data/ext/zstdruby/libzstd/compress/zstd_opt.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +152 -444
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +31 -113
- data/ext/zstdruby/libzstd/decompress/huf_decompress.c +1044 -403
- data/ext/zstdruby/libzstd/decompress/huf_decompress_amd64.S +571 -0
- data/ext/zstdruby/libzstd/decompress/zstd_ddict.c +9 -9
- data/ext/zstdruby/libzstd/decompress/zstd_ddict.h +2 -2
- data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +450 -105
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +913 -273
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +14 -5
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +59 -12
- data/ext/zstdruby/libzstd/deprecated/zbuff.h +1 -1
- data/ext/zstdruby/libzstd/deprecated/zbuff_common.c +1 -1
- data/ext/zstdruby/libzstd/deprecated/zbuff_compress.c +24 -4
- data/ext/zstdruby/libzstd/deprecated/zbuff_decompress.c +1 -1
- data/ext/zstdruby/libzstd/dictBuilder/cover.c +55 -38
- data/ext/zstdruby/libzstd/dictBuilder/cover.h +7 -6
- data/ext/zstdruby/libzstd/dictBuilder/divsufsort.c +1 -1
- data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +43 -34
- data/ext/zstdruby/libzstd/dictBuilder/zdict.c +128 -58
- data/ext/zstdruby/libzstd/dll/example/Makefile +1 -1
- data/ext/zstdruby/libzstd/dll/example/README.md +16 -22
- data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +1 -1
- data/ext/zstdruby/libzstd/legacy/zstd_v01.c +8 -8
- data/ext/zstdruby/libzstd/legacy/zstd_v01.h +1 -1
- data/ext/zstdruby/libzstd/legacy/zstd_v02.c +9 -9
- data/ext/zstdruby/libzstd/legacy/zstd_v02.h +1 -1
- data/ext/zstdruby/libzstd/legacy/zstd_v03.c +9 -9
- data/ext/zstdruby/libzstd/legacy/zstd_v03.h +1 -1
- data/ext/zstdruby/libzstd/legacy/zstd_v04.c +10 -10
- data/ext/zstdruby/libzstd/legacy/zstd_v04.h +1 -1
- data/ext/zstdruby/libzstd/legacy/zstd_v05.c +13 -13
- data/ext/zstdruby/libzstd/legacy/zstd_v05.h +1 -1
- data/ext/zstdruby/libzstd/legacy/zstd_v06.c +13 -13
- data/ext/zstdruby/libzstd/legacy/zstd_v06.h +1 -1
- data/ext/zstdruby/libzstd/legacy/zstd_v07.c +13 -13
- data/ext/zstdruby/libzstd/legacy/zstd_v07.h +1 -1
- data/ext/zstdruby/libzstd/libzstd.mk +185 -0
- data/ext/zstdruby/libzstd/libzstd.pc.in +4 -3
- data/ext/zstdruby/libzstd/modulemap/module.modulemap +4 -0
- data/ext/zstdruby/libzstd/{dictBuilder/zdict.h → zdict.h} +154 -7
- data/ext/zstdruby/libzstd/zstd.h +699 -214
- data/ext/zstdruby/libzstd/{common/zstd_errors.h → zstd_errors.h} +2 -1
- data/ext/zstdruby/zstdruby.c +2 -2
- data/lib/zstd-ruby/version.rb +1 -1
- metadata +15 -6
- data/.travis.yml +0 -14
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Yann Collet, Facebook, Inc.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -35,6 +35,10 @@ extern "C" {
|
|
35
35
|
#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
|
36
36
|
#endif
|
37
37
|
|
38
|
+
|
39
|
+
/* Set our tables and aligneds to align by 64 bytes */
|
40
|
+
#define ZSTD_CWKSP_ALIGNMENT_BYTES 64
|
41
|
+
|
38
42
|
/*-*************************************
|
39
43
|
* Structures
|
40
44
|
***************************************/
|
@@ -44,6 +48,16 @@ typedef enum {
|
|
44
48
|
ZSTD_cwksp_alloc_aligned
|
45
49
|
} ZSTD_cwksp_alloc_phase_e;
|
46
50
|
|
51
|
+
/**
|
52
|
+
* Used to describe whether the workspace is statically allocated (and will not
|
53
|
+
* necessarily ever be freed), or if it's dynamically allocated and we can
|
54
|
+
* expect a well-formed caller to free this.
|
55
|
+
*/
|
56
|
+
typedef enum {
|
57
|
+
ZSTD_cwksp_dynamic_alloc,
|
58
|
+
ZSTD_cwksp_static_alloc
|
59
|
+
} ZSTD_cwksp_static_alloc_e;
|
60
|
+
|
47
61
|
/**
|
48
62
|
* Zstd fits all its internal datastructures into a single continuous buffer,
|
49
63
|
* so that it only needs to perform a single OS allocation (or so that a buffer
|
@@ -92,7 +106,7 @@ typedef enum {
|
|
92
106
|
*
|
93
107
|
* - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
|
94
108
|
* so that literally everything fits in a single buffer. Note: if present,
|
95
|
-
* this must be the first object in the workspace, since
|
109
|
+
* this must be the first object in the workspace, since ZSTD_customFree{CCtx,
|
96
110
|
* CDict}() rely on a pointer comparison to see whether one or two frees are
|
97
111
|
* required.
|
98
112
|
*
|
@@ -107,10 +121,11 @@ typedef enum {
|
|
107
121
|
* - Tables: these are any of several different datastructures (hash tables,
|
108
122
|
* chain tables, binary trees) that all respect a common format: they are
|
109
123
|
* uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
|
110
|
-
* Their sizes depend on the cparams.
|
124
|
+
* Their sizes depend on the cparams. These tables are 64-byte aligned.
|
111
125
|
*
|
112
126
|
* - Aligned: these buffers are used for various purposes that require 4 byte
|
113
|
-
* alignment, but don't require any initialization before they're used.
|
127
|
+
* alignment, but don't require any initialization before they're used. These
|
128
|
+
* buffers are each aligned to 64 bytes.
|
114
129
|
*
|
115
130
|
* - Buffers: these buffers are used for various purposes that don't require
|
116
131
|
* any alignment or initialization before they're used. This means they can
|
@@ -123,8 +138,7 @@ typedef enum {
|
|
123
138
|
*
|
124
139
|
* 1. Objects
|
125
140
|
* 2. Buffers
|
126
|
-
* 3. Aligned
|
127
|
-
* 4. Tables
|
141
|
+
* 3. Aligned/Tables
|
128
142
|
*
|
129
143
|
* Attempts to reserve objects of different types out of order will fail.
|
130
144
|
*/
|
@@ -137,9 +151,10 @@ typedef struct {
|
|
137
151
|
void* tableValidEnd;
|
138
152
|
void* allocStart;
|
139
153
|
|
140
|
-
|
154
|
+
BYTE allocFailed;
|
141
155
|
int workspaceOversizedDuration;
|
142
156
|
ZSTD_cwksp_alloc_phase_e phase;
|
157
|
+
ZSTD_cwksp_static_alloc_e isStatic;
|
143
158
|
} ZSTD_cwksp;
|
144
159
|
|
145
160
|
/*-*************************************
|
@@ -176,39 +191,123 @@ MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
|
|
176
191
|
* Since tables aren't currently redzoned, you don't need to call through this
|
177
192
|
* to figure out how much space you need for the matchState tables. Everything
|
178
193
|
* else is though.
|
194
|
+
*
|
195
|
+
* Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
|
179
196
|
*/
|
180
197
|
MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
|
181
|
-
|
198
|
+
if (size == 0)
|
199
|
+
return 0;
|
200
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
182
201
|
return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
183
202
|
#else
|
184
203
|
return size;
|
185
204
|
#endif
|
186
205
|
}
|
187
206
|
|
188
|
-
|
207
|
+
/**
|
208
|
+
* Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
|
209
|
+
* Used to determine the number of bytes required for a given "aligned".
|
210
|
+
*/
|
211
|
+
MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
|
212
|
+
return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));
|
213
|
+
}
|
214
|
+
|
215
|
+
/**
|
216
|
+
* Returns the amount of additional space the cwksp must allocate
|
217
|
+
* for internal purposes (currently only alignment).
|
218
|
+
*/
|
219
|
+
MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
|
220
|
+
/* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes
|
221
|
+
* to align the beginning of tables section, as well as another n_2=[0, 63] bytes
|
222
|
+
* to align the beginning of the aligned section.
|
223
|
+
*
|
224
|
+
* n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and
|
225
|
+
* aligneds being sized in multiples of 64 bytes.
|
226
|
+
*/
|
227
|
+
size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES;
|
228
|
+
return slackSpace;
|
229
|
+
}
|
230
|
+
|
231
|
+
|
232
|
+
/**
|
233
|
+
* Return the number of additional bytes required to align a pointer to the given number of bytes.
|
234
|
+
* alignBytes must be a power of two.
|
235
|
+
*/
|
236
|
+
MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
|
237
|
+
size_t const alignBytesMask = alignBytes - 1;
|
238
|
+
size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
|
239
|
+
assert((alignBytes & alignBytesMask) == 0);
|
240
|
+
assert(bytes != ZSTD_CWKSP_ALIGNMENT_BYTES);
|
241
|
+
return bytes;
|
242
|
+
}
|
243
|
+
|
244
|
+
/**
|
245
|
+
* Internal function. Do not use directly.
|
246
|
+
* Reserves the given number of bytes within the aligned/buffer segment of the wksp, which
|
247
|
+
* counts from the end of the wksp. (as opposed to the object/table segment)
|
248
|
+
*
|
249
|
+
* Returns a pointer to the beginning of that space.
|
250
|
+
*/
|
251
|
+
MEM_STATIC void* ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes) {
|
252
|
+
void* const alloc = (BYTE*)ws->allocStart - bytes;
|
253
|
+
void* const bottom = ws->tableEnd;
|
254
|
+
DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
|
255
|
+
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
|
256
|
+
ZSTD_cwksp_assert_internal_consistency(ws);
|
257
|
+
assert(alloc >= bottom);
|
258
|
+
if (alloc < bottom) {
|
259
|
+
DEBUGLOG(4, "cwksp: alloc failed!");
|
260
|
+
ws->allocFailed = 1;
|
261
|
+
return NULL;
|
262
|
+
}
|
263
|
+
if (alloc < ws->tableValidEnd) {
|
264
|
+
ws->tableValidEnd = alloc;
|
265
|
+
}
|
266
|
+
ws->allocStart = alloc;
|
267
|
+
return alloc;
|
268
|
+
}
|
269
|
+
|
270
|
+
/**
|
271
|
+
* Moves the cwksp to the next phase, and does any necessary allocations.
|
272
|
+
* Returns a 0 on success, or zstd error
|
273
|
+
*/
|
274
|
+
MEM_STATIC size_t ZSTD_cwksp_internal_advance_phase(
|
189
275
|
ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
|
190
276
|
assert(phase >= ws->phase);
|
191
277
|
if (phase > ws->phase) {
|
278
|
+
/* Going from allocating objects to allocating buffers */
|
192
279
|
if (ws->phase < ZSTD_cwksp_alloc_buffers &&
|
193
280
|
phase >= ZSTD_cwksp_alloc_buffers) {
|
194
281
|
ws->tableValidEnd = ws->objectEnd;
|
195
282
|
}
|
283
|
+
|
284
|
+
/* Going from allocating buffers to allocating aligneds/tables */
|
196
285
|
if (ws->phase < ZSTD_cwksp_alloc_aligned &&
|
197
286
|
phase >= ZSTD_cwksp_alloc_aligned) {
|
198
|
-
/*
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
287
|
+
{ /* Align the start of the "aligned" to 64 bytes. Use [1, 64] bytes. */
|
288
|
+
size_t const bytesToAlign =
|
289
|
+
ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BYTES);
|
290
|
+
DEBUGLOG(5, "reserving aligned alignment addtl space: %zu", bytesToAlign);
|
291
|
+
ZSTD_STATIC_ASSERT((ZSTD_CWKSP_ALIGNMENT_BYTES & (ZSTD_CWKSP_ALIGNMENT_BYTES - 1)) == 0); /* power of 2 */
|
292
|
+
RETURN_ERROR_IF(!ZSTD_cwksp_reserve_internal_buffer_space(ws, bytesToAlign),
|
293
|
+
memory_allocation, "aligned phase - alignment initial allocation failed!");
|
294
|
+
}
|
295
|
+
{ /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
|
296
|
+
void* const alloc = ws->objectEnd;
|
297
|
+
size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
|
298
|
+
void* const end = (BYTE*)alloc + bytesToAlign;
|
299
|
+
DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
|
300
|
+
RETURN_ERROR_IF(end > ws->workspaceEnd, memory_allocation,
|
301
|
+
"table phase - alignment initial allocation failed!");
|
302
|
+
ws->objectEnd = end;
|
303
|
+
ws->tableEnd = end;
|
304
|
+
ws->tableValidEnd = end;
|
208
305
|
}
|
209
306
|
}
|
210
307
|
ws->phase = phase;
|
308
|
+
ZSTD_cwksp_assert_internal_consistency(ws);
|
211
309
|
}
|
310
|
+
return 0;
|
212
311
|
}
|
213
312
|
|
214
313
|
/**
|
@@ -224,34 +323,26 @@ MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
|
|
224
323
|
MEM_STATIC void* ZSTD_cwksp_reserve_internal(
|
225
324
|
ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
|
226
325
|
void* alloc;
|
227
|
-
|
228
|
-
|
229
|
-
|
326
|
+
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
|
327
|
+
return NULL;
|
328
|
+
}
|
230
329
|
|
231
|
-
#if
|
330
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
232
331
|
/* over-reserve space */
|
233
|
-
|
332
|
+
bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
234
333
|
#endif
|
235
334
|
|
236
|
-
|
237
|
-
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
|
238
|
-
ZSTD_cwksp_assert_internal_consistency(ws);
|
239
|
-
assert(alloc >= bottom);
|
240
|
-
if (alloc < bottom) {
|
241
|
-
DEBUGLOG(4, "cwksp: alloc failed!");
|
242
|
-
ws->allocFailed = 1;
|
243
|
-
return NULL;
|
244
|
-
}
|
245
|
-
if (alloc < ws->tableValidEnd) {
|
246
|
-
ws->tableValidEnd = alloc;
|
247
|
-
}
|
248
|
-
ws->allocStart = alloc;
|
335
|
+
alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
|
249
336
|
|
250
|
-
#if
|
337
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
251
338
|
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
|
252
339
|
* either size. */
|
253
|
-
|
254
|
-
|
340
|
+
if (alloc) {
|
341
|
+
alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
342
|
+
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
343
|
+
__asan_unpoison_memory_region(alloc, bytes);
|
344
|
+
}
|
345
|
+
}
|
255
346
|
#endif
|
256
347
|
|
257
348
|
return alloc;
|
@@ -265,28 +356,36 @@ MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
|
|
265
356
|
}
|
266
357
|
|
267
358
|
/**
|
268
|
-
* Reserves and returns memory sized on and aligned on
|
359
|
+
* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
|
269
360
|
*/
|
270
361
|
MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
|
271
|
-
|
272
|
-
|
362
|
+
void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
|
363
|
+
ZSTD_cwksp_alloc_aligned);
|
364
|
+
assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
|
365
|
+
return ptr;
|
273
366
|
}
|
274
367
|
|
275
368
|
/**
|
276
|
-
* Aligned on
|
369
|
+
* Aligned on 64 bytes. These buffers have the special property that
|
277
370
|
* their values remain constrained, allowing us to re-use them without
|
278
371
|
* memset()-ing them.
|
279
372
|
*/
|
280
373
|
MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
|
281
374
|
const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
|
282
|
-
void* alloc
|
283
|
-
void* end
|
284
|
-
void* top
|
375
|
+
void* alloc;
|
376
|
+
void* end;
|
377
|
+
void* top;
|
378
|
+
|
379
|
+
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
|
380
|
+
return NULL;
|
381
|
+
}
|
382
|
+
alloc = ws->tableEnd;
|
383
|
+
end = (BYTE *)alloc + bytes;
|
384
|
+
top = ws->allocStart;
|
285
385
|
|
286
386
|
DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
|
287
387
|
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
|
288
388
|
assert((bytes & (sizeof(U32)-1)) == 0);
|
289
|
-
ZSTD_cwksp_internal_advance_phase(ws, phase);
|
290
389
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
291
390
|
assert(end <= top);
|
292
391
|
if (end > top) {
|
@@ -296,10 +395,14 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
|
|
296
395
|
}
|
297
396
|
ws->tableEnd = end;
|
298
397
|
|
299
|
-
#if
|
300
|
-
|
398
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
399
|
+
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
400
|
+
__asan_unpoison_memory_region(alloc, bytes);
|
401
|
+
}
|
301
402
|
#endif
|
302
403
|
|
404
|
+
assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
|
405
|
+
assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
|
303
406
|
return alloc;
|
304
407
|
}
|
305
408
|
|
@@ -311,7 +414,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
|
|
311
414
|
void* alloc = ws->objectEnd;
|
312
415
|
void* end = (BYTE*)alloc + roundedBytes;
|
313
416
|
|
314
|
-
#if
|
417
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
315
418
|
/* over-reserve space */
|
316
419
|
end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
317
420
|
#endif
|
@@ -319,8 +422,8 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
|
|
319
422
|
DEBUGLOG(5,
|
320
423
|
"cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
|
321
424
|
alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
|
322
|
-
assert((
|
323
|
-
assert(
|
425
|
+
assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
|
426
|
+
assert(bytes % ZSTD_ALIGNOF(void*) == 0);
|
324
427
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
325
428
|
/* we must be in the first phase, no advance is possible */
|
326
429
|
if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
|
@@ -332,11 +435,13 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
|
|
332
435
|
ws->tableEnd = end;
|
333
436
|
ws->tableValidEnd = end;
|
334
437
|
|
335
|
-
#if
|
438
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
336
439
|
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
|
337
440
|
* either size. */
|
338
441
|
alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
339
|
-
|
442
|
+
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
443
|
+
__asan_unpoison_memory_region(alloc, bytes);
|
444
|
+
}
|
340
445
|
#endif
|
341
446
|
|
342
447
|
return alloc;
|
@@ -345,7 +450,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
|
|
345
450
|
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
|
346
451
|
DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
|
347
452
|
|
348
|
-
#if
|
453
|
+
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
|
349
454
|
/* To validate that the table re-use logic is sound, and that we don't
|
350
455
|
* access table space that we haven't cleaned, we re-"poison" the table
|
351
456
|
* space every time we mark it dirty. */
|
@@ -380,7 +485,7 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
|
|
380
485
|
assert(ws->tableValidEnd >= ws->objectEnd);
|
381
486
|
assert(ws->tableValidEnd <= ws->allocStart);
|
382
487
|
if (ws->tableValidEnd < ws->tableEnd) {
|
383
|
-
|
488
|
+
ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
|
384
489
|
}
|
385
490
|
ZSTD_cwksp_mark_tables_clean(ws);
|
386
491
|
}
|
@@ -392,8 +497,12 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
|
|
392
497
|
MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
|
393
498
|
DEBUGLOG(4, "cwksp: clearing tables!");
|
394
499
|
|
395
|
-
#if
|
396
|
-
|
500
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
501
|
+
/* We don't do this when the workspace is statically allocated, because
|
502
|
+
* when that is the case, we have no capability to hook into the end of the
|
503
|
+
* workspace's lifecycle to unpoison the memory.
|
504
|
+
*/
|
505
|
+
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
397
506
|
size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
|
398
507
|
__asan_poison_memory_region(ws->objectEnd, size);
|
399
508
|
}
|
@@ -410,7 +519,7 @@ MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
|
|
410
519
|
MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
|
411
520
|
DEBUGLOG(4, "cwksp: clearing!");
|
412
521
|
|
413
|
-
#if
|
522
|
+
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
|
414
523
|
/* To validate that the context re-use logic is sound, and that we don't
|
415
524
|
* access stuff that this compression hasn't initialized, we re-"poison"
|
416
525
|
* the workspace (or at least the non-static, non-table parts of it)
|
@@ -421,8 +530,12 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
|
|
421
530
|
}
|
422
531
|
#endif
|
423
532
|
|
424
|
-
#if
|
425
|
-
|
533
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
534
|
+
/* We don't do this when the workspace is statically allocated, because
|
535
|
+
* when that is the case, we have no capability to hook into the end of the
|
536
|
+
* workspace's lifecycle to unpoison the memory.
|
537
|
+
*/
|
538
|
+
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
426
539
|
size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
|
427
540
|
__asan_poison_memory_region(ws->objectEnd, size);
|
428
541
|
}
|
@@ -442,7 +555,7 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
|
|
442
555
|
* Any existing values in the workspace are ignored (the previously managed
|
443
556
|
* buffer, if present, must be separately freed).
|
444
557
|
*/
|
445
|
-
MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
|
558
|
+
MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
|
446
559
|
DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
|
447
560
|
assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
|
448
561
|
ws->workspace = start;
|
@@ -450,39 +563,45 @@ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
|
|
450
563
|
ws->objectEnd = ws->workspace;
|
451
564
|
ws->tableValidEnd = ws->objectEnd;
|
452
565
|
ws->phase = ZSTD_cwksp_alloc_objects;
|
566
|
+
ws->isStatic = isStatic;
|
453
567
|
ZSTD_cwksp_clear(ws);
|
454
568
|
ws->workspaceOversizedDuration = 0;
|
455
569
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
456
570
|
}
|
457
571
|
|
458
572
|
MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
|
459
|
-
void* workspace =
|
573
|
+
void* workspace = ZSTD_customMalloc(size, customMem);
|
460
574
|
DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
|
461
575
|
RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
|
462
|
-
ZSTD_cwksp_init(ws, workspace, size);
|
576
|
+
ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
|
463
577
|
return 0;
|
464
578
|
}
|
465
579
|
|
466
580
|
MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
|
467
581
|
void *ptr = ws->workspace;
|
468
582
|
DEBUGLOG(4, "cwksp: freeing workspace");
|
469
|
-
|
470
|
-
|
583
|
+
ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
|
584
|
+
ZSTD_customFree(ptr, customMem);
|
471
585
|
}
|
472
586
|
|
473
587
|
/**
|
474
588
|
* Moves the management of a workspace from one cwksp to another. The src cwksp
|
475
|
-
* is left in an invalid state (src must be re-init()'ed before
|
589
|
+
* is left in an invalid state (src must be re-init()'ed before it's used again).
|
476
590
|
*/
|
477
591
|
MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
|
478
592
|
*dst = *src;
|
479
|
-
|
593
|
+
ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
|
480
594
|
}
|
481
595
|
|
482
596
|
MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
|
483
597
|
return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
|
484
598
|
}
|
485
599
|
|
600
|
+
MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
|
601
|
+
return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
|
602
|
+
+ (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
|
603
|
+
}
|
604
|
+
|
486
605
|
MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
|
487
606
|
return ws->allocFailed;
|
488
607
|
}
|
@@ -491,6 +610,24 @@ MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
|
|
491
610
|
* Functions Checking Free Space
|
492
611
|
***************************************/
|
493
612
|
|
613
|
+
/* ZSTD_alignmentSpaceWithinBounds() :
|
614
|
+
* Returns if the estimated space needed for a wksp is within an acceptable limit of the
|
615
|
+
* actual amount of space used.
|
616
|
+
*/
|
617
|
+
MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp* const ws,
|
618
|
+
size_t const estimatedSpace, int resizedWorkspace) {
|
619
|
+
if (resizedWorkspace) {
|
620
|
+
/* Resized/newly allocated wksp should have exact bounds */
|
621
|
+
return ZSTD_cwksp_used(ws) == estimatedSpace;
|
622
|
+
} else {
|
623
|
+
/* Due to alignment, when reusing a workspace, we can actually consume 63 fewer or more bytes
|
624
|
+
* than estimatedSpace. See the comments in zstd_cwksp.h for details.
|
625
|
+
*/
|
626
|
+
return (ZSTD_cwksp_used(ws) >= estimatedSpace - 63) && (ZSTD_cwksp_used(ws) <= estimatedSpace + 63);
|
627
|
+
}
|
628
|
+
}
|
629
|
+
|
630
|
+
|
494
631
|
MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
|
495
632
|
return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
|
496
633
|
}
|