zstd-ruby 1.5.1.1 → 1.5.5.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.gitignore +2 -0
- data/README.md +78 -5
- data/Rakefile +8 -2
- data/ext/zstdruby/common.h +15 -0
- data/ext/zstdruby/extconf.rb +1 -1
- data/ext/zstdruby/libzstd/common/allocations.h +55 -0
- data/ext/zstdruby/libzstd/common/bits.h +200 -0
- data/ext/zstdruby/libzstd/common/bitstream.h +19 -60
- data/ext/zstdruby/libzstd/common/compiler.h +26 -3
- data/ext/zstdruby/libzstd/common/cpu.h +1 -1
- data/ext/zstdruby/libzstd/common/debug.c +1 -1
- data/ext/zstdruby/libzstd/common/debug.h +1 -1
- data/ext/zstdruby/libzstd/common/entropy_common.c +12 -40
- data/ext/zstdruby/libzstd/common/error_private.c +9 -2
- data/ext/zstdruby/libzstd/common/error_private.h +1 -1
- data/ext/zstdruby/libzstd/common/fse.h +5 -83
- data/ext/zstdruby/libzstd/common/fse_decompress.c +7 -99
- data/ext/zstdruby/libzstd/common/huf.h +65 -156
- data/ext/zstdruby/libzstd/common/mem.h +39 -46
- data/ext/zstdruby/libzstd/common/pool.c +37 -16
- data/ext/zstdruby/libzstd/common/pool.h +9 -3
- data/ext/zstdruby/libzstd/common/portability_macros.h +28 -3
- data/ext/zstdruby/libzstd/common/threading.c +68 -14
- data/ext/zstdruby/libzstd/common/threading.h +5 -10
- data/ext/zstdruby/libzstd/common/xxhash.c +2 -2
- data/ext/zstdruby/libzstd/common/xxhash.h +8 -8
- data/ext/zstdruby/libzstd/common/zstd_common.c +1 -36
- data/ext/zstdruby/libzstd/common/zstd_deps.h +1 -1
- data/ext/zstdruby/libzstd/common/zstd_internal.h +20 -122
- data/ext/zstdruby/libzstd/common/zstd_trace.h +3 -3
- data/ext/zstdruby/libzstd/compress/clevels.h +1 -1
- data/ext/zstdruby/libzstd/compress/fse_compress.c +7 -124
- data/ext/zstdruby/libzstd/compress/hist.c +1 -1
- data/ext/zstdruby/libzstd/compress/hist.h +1 -1
- data/ext/zstdruby/libzstd/compress/huf_compress.c +234 -169
- data/ext/zstdruby/libzstd/compress/zstd_compress.c +1317 -594
- data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +272 -165
- data/ext/zstdruby/libzstd/compress/zstd_compress_literals.c +115 -39
- data/ext/zstdruby/libzstd/compress/zstd_compress_literals.h +16 -8
- data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.c +13 -13
- data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +25 -21
- data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +162 -82
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +95 -33
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.h +3 -2
- data/ext/zstdruby/libzstd/compress/zstd_fast.c +434 -149
- data/ext/zstdruby/libzstd/compress/zstd_fast.h +3 -2
- data/ext/zstdruby/libzstd/compress/zstd_lazy.c +405 -348
- data/ext/zstdruby/libzstd/compress/zstd_lazy.h +4 -2
- data/ext/zstdruby/libzstd/compress/zstd_ldm.c +9 -7
- data/ext/zstdruby/libzstd/compress/zstd_ldm.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_ldm_geartab.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_opt.c +149 -100
- data/ext/zstdruby/libzstd/compress/zstd_opt.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +32 -16
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +5 -2
- data/ext/zstdruby/libzstd/decompress/huf_decompress.c +434 -441
- data/ext/zstdruby/libzstd/decompress/huf_decompress_amd64.S +42 -37
- data/ext/zstdruby/libzstd/decompress/zstd_ddict.c +4 -4
- data/ext/zstdruby/libzstd/decompress/zstd_ddict.h +1 -1
- data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +205 -80
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +201 -81
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +6 -1
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +4 -2
- data/ext/zstdruby/libzstd/dictBuilder/cover.c +19 -15
- data/ext/zstdruby/libzstd/dictBuilder/cover.h +1 -1
- data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +2 -2
- data/ext/zstdruby/libzstd/dictBuilder/zdict.c +13 -91
- data/ext/zstdruby/libzstd/zdict.h +53 -31
- data/ext/zstdruby/libzstd/zstd.h +580 -135
- data/ext/zstdruby/libzstd/zstd_errors.h +27 -8
- data/ext/zstdruby/main.c +20 -0
- data/ext/zstdruby/skippable_frame.c +63 -0
- data/ext/zstdruby/streaming_compress.c +177 -0
- data/ext/zstdruby/streaming_compress.h +5 -0
- data/ext/zstdruby/streaming_decompress.c +123 -0
- data/ext/zstdruby/zstdruby.c +113 -31
- data/lib/zstd-ruby/version.rb +1 -1
- data/lib/zstd-ruby.rb +0 -1
- data/zstd-ruby.gemspec +1 -1
- metadata +11 -37
- data/.github/dependabot.yml +0 -8
- data/.github/workflows/ruby.yml +0 -35
- data/ext/zstdruby/libzstd/.gitignore +0 -3
- data/ext/zstdruby/libzstd/BUCK +0 -232
- data/ext/zstdruby/libzstd/Makefile +0 -357
- data/ext/zstdruby/libzstd/README.md +0 -217
- data/ext/zstdruby/libzstd/deprecated/zbuff.h +0 -214
- data/ext/zstdruby/libzstd/deprecated/zbuff_common.c +0 -26
- data/ext/zstdruby/libzstd/deprecated/zbuff_compress.c +0 -167
- data/ext/zstdruby/libzstd/deprecated/zbuff_decompress.c +0 -75
- data/ext/zstdruby/libzstd/dll/example/Makefile +0 -48
- data/ext/zstdruby/libzstd/dll/example/README.md +0 -63
- data/ext/zstdruby/libzstd/dll/example/build_package.bat +0 -20
- data/ext/zstdruby/libzstd/dll/example/fullbench-dll.sln +0 -25
- data/ext/zstdruby/libzstd/dll/example/fullbench-dll.vcxproj +0 -181
- data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +0 -415
- data/ext/zstdruby/libzstd/legacy/zstd_v01.c +0 -2158
- data/ext/zstdruby/libzstd/legacy/zstd_v01.h +0 -94
- data/ext/zstdruby/libzstd/legacy/zstd_v02.c +0 -3518
- data/ext/zstdruby/libzstd/legacy/zstd_v02.h +0 -93
- data/ext/zstdruby/libzstd/legacy/zstd_v03.c +0 -3160
- data/ext/zstdruby/libzstd/legacy/zstd_v03.h +0 -93
- data/ext/zstdruby/libzstd/legacy/zstd_v04.c +0 -3647
- data/ext/zstdruby/libzstd/legacy/zstd_v04.h +0 -142
- data/ext/zstdruby/libzstd/legacy/zstd_v05.c +0 -4050
- data/ext/zstdruby/libzstd/legacy/zstd_v05.h +0 -162
- data/ext/zstdruby/libzstd/legacy/zstd_v06.c +0 -4154
- data/ext/zstdruby/libzstd/legacy/zstd_v06.h +0 -172
- data/ext/zstdruby/libzstd/legacy/zstd_v07.c +0 -4541
- data/ext/zstdruby/libzstd/legacy/zstd_v07.h +0 -187
- data/ext/zstdruby/libzstd/libzstd.mk +0 -185
- data/ext/zstdruby/libzstd/libzstd.pc.in +0 -16
- data/ext/zstdruby/libzstd/modulemap/module.modulemap +0 -4
- data/ext/zstdruby/zstdruby.h +0 -6
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -14,7 +14,9 @@
|
|
14
14
|
/*-*************************************
|
15
15
|
* Dependencies
|
16
16
|
***************************************/
|
17
|
+
#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */
|
17
18
|
#include "../common/zstd_internal.h"
|
19
|
+
#include "../common/portability_macros.h"
|
18
20
|
|
19
21
|
#if defined (__cplusplus)
|
20
22
|
extern "C" {
|
@@ -44,8 +46,9 @@ extern "C" {
|
|
44
46
|
***************************************/
|
45
47
|
typedef enum {
|
46
48
|
ZSTD_cwksp_alloc_objects,
|
47
|
-
|
48
|
-
ZSTD_cwksp_alloc_aligned
|
49
|
+
ZSTD_cwksp_alloc_aligned_init_once,
|
50
|
+
ZSTD_cwksp_alloc_aligned,
|
51
|
+
ZSTD_cwksp_alloc_buffers
|
49
52
|
} ZSTD_cwksp_alloc_phase_e;
|
50
53
|
|
51
54
|
/**
|
@@ -98,8 +101,8 @@ typedef enum {
|
|
98
101
|
*
|
99
102
|
* Workspace Layout:
|
100
103
|
*
|
101
|
-
* [ ... workspace ...
|
102
|
-
* [objects][tables
|
104
|
+
* [ ... workspace ... ]
|
105
|
+
* [objects][tables ->] free space [<- buffers][<- aligned][<- init once]
|
103
106
|
*
|
104
107
|
* The various objects that live in the workspace are divided into the
|
105
108
|
* following categories, and are allocated separately:
|
@@ -123,9 +126,18 @@ typedef enum {
|
|
123
126
|
* uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
|
124
127
|
* Their sizes depend on the cparams. These tables are 64-byte aligned.
|
125
128
|
*
|
126
|
-
* -
|
127
|
-
*
|
128
|
-
*
|
129
|
+
* - Init once: these buffers require to be initialized at least once before
|
130
|
+
* use. They should be used when we want to skip memory initialization
|
131
|
+
* while not triggering memory checkers (like Valgrind) when reading from
|
132
|
+
* from this memory without writing to it first.
|
133
|
+
* These buffers should be used carefully as they might contain data
|
134
|
+
* from previous compressions.
|
135
|
+
* Buffers are aligned to 64 bytes.
|
136
|
+
*
|
137
|
+
* - Aligned: these buffers don't require any initialization before they're
|
138
|
+
* used. The user of the buffer should make sure they write into a buffer
|
139
|
+
* location before reading from it.
|
140
|
+
* Buffers are aligned to 64 bytes.
|
129
141
|
*
|
130
142
|
* - Buffers: these buffers are used for various purposes that don't require
|
131
143
|
* any alignment or initialization before they're used. This means they can
|
@@ -137,8 +149,9 @@ typedef enum {
|
|
137
149
|
* correctly packed into the workspace buffer. That order is:
|
138
150
|
*
|
139
151
|
* 1. Objects
|
140
|
-
* 2.
|
141
|
-
* 3. Aligned/Tables
|
152
|
+
* 2. Init once / Tables
|
153
|
+
* 3. Aligned / Tables
|
154
|
+
* 4. Buffers / Tables
|
142
155
|
*
|
143
156
|
* Attempts to reserve objects of different types out of order will fail.
|
144
157
|
*/
|
@@ -150,6 +163,7 @@ typedef struct {
|
|
150
163
|
void* tableEnd;
|
151
164
|
void* tableValidEnd;
|
152
165
|
void* allocStart;
|
166
|
+
void* initOnceStart;
|
153
167
|
|
154
168
|
BYTE allocFailed;
|
155
169
|
int workspaceOversizedDuration;
|
@@ -162,6 +176,7 @@ typedef struct {
|
|
162
176
|
***************************************/
|
163
177
|
|
164
178
|
MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
|
179
|
+
MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws);
|
165
180
|
|
166
181
|
MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
|
167
182
|
(void)ws;
|
@@ -171,6 +186,20 @@ MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
|
|
171
186
|
assert(ws->tableEnd <= ws->allocStart);
|
172
187
|
assert(ws->tableValidEnd <= ws->allocStart);
|
173
188
|
assert(ws->allocStart <= ws->workspaceEnd);
|
189
|
+
assert(ws->initOnceStart <= ZSTD_cwksp_initialAllocStart(ws));
|
190
|
+
assert(ws->workspace <= ws->initOnceStart);
|
191
|
+
#if ZSTD_MEMORY_SANITIZER
|
192
|
+
{
|
193
|
+
intptr_t const offset = __msan_test_shadow(ws->initOnceStart,
|
194
|
+
(U8*)ZSTD_cwksp_initialAllocStart(ws) - (U8*)ws->initOnceStart);
|
195
|
+
#if defined(ZSTD_MSAN_PRINT)
|
196
|
+
if(offset!=-1) {
|
197
|
+
__msan_print_shadow((U8*)ws->initOnceStart + offset - 8, 32);
|
198
|
+
}
|
199
|
+
#endif
|
200
|
+
assert(offset==-1);
|
201
|
+
};
|
202
|
+
#endif
|
174
203
|
}
|
175
204
|
|
176
205
|
/**
|
@@ -217,14 +246,10 @@ MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
|
|
217
246
|
* for internal purposes (currently only alignment).
|
218
247
|
*/
|
219
248
|
MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
|
220
|
-
/* For alignment, the wksp will always allocate an additional
|
221
|
-
* to align the beginning of tables section
|
222
|
-
* to align the beginning of the aligned section.
|
223
|
-
*
|
224
|
-
* n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and
|
225
|
-
* aligneds being sized in multiples of 64 bytes.
|
249
|
+
/* For alignment, the wksp will always allocate an additional 2*ZSTD_CWKSP_ALIGNMENT_BYTES
|
250
|
+
* bytes to align the beginning of tables section and end of buffers;
|
226
251
|
*/
|
227
|
-
size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES;
|
252
|
+
size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES * 2;
|
228
253
|
return slackSpace;
|
229
254
|
}
|
230
255
|
|
@@ -237,18 +262,28 @@ MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignByt
|
|
237
262
|
size_t const alignBytesMask = alignBytes - 1;
|
238
263
|
size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
|
239
264
|
assert((alignBytes & alignBytesMask) == 0);
|
240
|
-
assert(bytes
|
265
|
+
assert(bytes < alignBytes);
|
241
266
|
return bytes;
|
242
267
|
}
|
243
268
|
|
269
|
+
/**
|
270
|
+
* Returns the initial value for allocStart which is used to determine the position from
|
271
|
+
* which we can allocate from the end of the workspace.
|
272
|
+
*/
|
273
|
+
MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws) {
|
274
|
+
return (void*)((size_t)ws->workspaceEnd & ~(ZSTD_CWKSP_ALIGNMENT_BYTES-1));
|
275
|
+
}
|
276
|
+
|
244
277
|
/**
|
245
278
|
* Internal function. Do not use directly.
|
246
|
-
* Reserves the given number of bytes within the aligned/buffer segment of the wksp,
|
247
|
-
* counts from the end of the wksp
|
279
|
+
* Reserves the given number of bytes within the aligned/buffer segment of the wksp,
|
280
|
+
* which counts from the end of the wksp (as opposed to the object/table segment).
|
248
281
|
*
|
249
282
|
* Returns a pointer to the beginning of that space.
|
250
283
|
*/
|
251
|
-
MEM_STATIC void*
|
284
|
+
MEM_STATIC void*
|
285
|
+
ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
|
286
|
+
{
|
252
287
|
void* const alloc = (BYTE*)ws->allocStart - bytes;
|
253
288
|
void* const bottom = ws->tableEnd;
|
254
289
|
DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
|
@@ -260,6 +295,8 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t
|
|
260
295
|
ws->allocFailed = 1;
|
261
296
|
return NULL;
|
262
297
|
}
|
298
|
+
/* the area is reserved from the end of wksp.
|
299
|
+
* If it overlaps with tableValidEnd, it voids guarantees on values' range */
|
263
300
|
if (alloc < ws->tableValidEnd) {
|
264
301
|
ws->tableValidEnd = alloc;
|
265
302
|
}
|
@@ -269,39 +306,32 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t
|
|
269
306
|
|
270
307
|
/**
|
271
308
|
* Moves the cwksp to the next phase, and does any necessary allocations.
|
309
|
+
* cwksp initialization must necessarily go through each phase in order.
|
272
310
|
* Returns a 0 on success, or zstd error
|
273
311
|
*/
|
274
|
-
MEM_STATIC size_t
|
275
|
-
|
312
|
+
MEM_STATIC size_t
|
313
|
+
ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)
|
314
|
+
{
|
276
315
|
assert(phase >= ws->phase);
|
277
316
|
if (phase > ws->phase) {
|
278
|
-
/* Going from allocating objects to allocating
|
279
|
-
if (ws->phase <
|
280
|
-
|
317
|
+
/* Going from allocating objects to allocating initOnce / tables */
|
318
|
+
if (ws->phase < ZSTD_cwksp_alloc_aligned_init_once &&
|
319
|
+
phase >= ZSTD_cwksp_alloc_aligned_init_once) {
|
281
320
|
ws->tableValidEnd = ws->objectEnd;
|
282
|
-
|
321
|
+
ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);
|
283
322
|
|
284
|
-
/* Going from allocating buffers to allocating aligneds/tables */
|
285
|
-
if (ws->phase < ZSTD_cwksp_alloc_aligned &&
|
286
|
-
phase >= ZSTD_cwksp_alloc_aligned) {
|
287
|
-
{ /* Align the start of the "aligned" to 64 bytes. Use [1, 64] bytes. */
|
288
|
-
size_t const bytesToAlign =
|
289
|
-
ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BYTES);
|
290
|
-
DEBUGLOG(5, "reserving aligned alignment addtl space: %zu", bytesToAlign);
|
291
|
-
ZSTD_STATIC_ASSERT((ZSTD_CWKSP_ALIGNMENT_BYTES & (ZSTD_CWKSP_ALIGNMENT_BYTES - 1)) == 0); /* power of 2 */
|
292
|
-
RETURN_ERROR_IF(!ZSTD_cwksp_reserve_internal_buffer_space(ws, bytesToAlign),
|
293
|
-
memory_allocation, "aligned phase - alignment initial allocation failed!");
|
294
|
-
}
|
295
323
|
{ /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
|
296
|
-
void*
|
324
|
+
void *const alloc = ws->objectEnd;
|
297
325
|
size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
|
298
|
-
void*
|
326
|
+
void *const objectEnd = (BYTE *) alloc + bytesToAlign;
|
299
327
|
DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
|
300
|
-
RETURN_ERROR_IF(
|
328
|
+
RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,
|
301
329
|
"table phase - alignment initial allocation failed!");
|
302
|
-
ws->objectEnd =
|
303
|
-
ws->tableEnd =
|
304
|
-
ws->tableValidEnd
|
330
|
+
ws->objectEnd = objectEnd;
|
331
|
+
ws->tableEnd = objectEnd; /* table area starts being empty */
|
332
|
+
if (ws->tableValidEnd < ws->tableEnd) {
|
333
|
+
ws->tableValidEnd = ws->tableEnd;
|
334
|
+
}
|
305
335
|
}
|
306
336
|
}
|
307
337
|
ws->phase = phase;
|
@@ -313,15 +343,17 @@ MEM_STATIC size_t ZSTD_cwksp_internal_advance_phase(
|
|
313
343
|
/**
|
314
344
|
* Returns whether this object/buffer/etc was allocated in this workspace.
|
315
345
|
*/
|
316
|
-
MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
|
317
|
-
|
346
|
+
MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
|
347
|
+
{
|
348
|
+
return (ptr != NULL) && (ws->workspace <= ptr) && (ptr < ws->workspaceEnd);
|
318
349
|
}
|
319
350
|
|
320
351
|
/**
|
321
352
|
* Internal function. Do not use directly.
|
322
353
|
*/
|
323
|
-
MEM_STATIC void*
|
324
|
-
|
354
|
+
MEM_STATIC void*
|
355
|
+
ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)
|
356
|
+
{
|
325
357
|
void* alloc;
|
326
358
|
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
|
327
359
|
return NULL;
|
@@ -340,7 +372,9 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal(
|
|
340
372
|
if (alloc) {
|
341
373
|
alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
342
374
|
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
343
|
-
|
375
|
+
/* We need to keep the redzone poisoned while unpoisoning the bytes that
|
376
|
+
* are actually allocated. */
|
377
|
+
__asan_unpoison_memory_region(alloc, bytes - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE);
|
344
378
|
}
|
345
379
|
}
|
346
380
|
#endif
|
@@ -351,14 +385,46 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal(
|
|
351
385
|
/**
|
352
386
|
* Reserves and returns unaligned memory.
|
353
387
|
*/
|
354
|
-
MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
|
388
|
+
MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
|
389
|
+
{
|
355
390
|
return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
|
356
391
|
}
|
357
392
|
|
393
|
+
/**
|
394
|
+
* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
|
395
|
+
* This memory has been initialized at least once in the past.
|
396
|
+
* This doesn't mean it has been initialized this time, and it might contain data from previous
|
397
|
+
* operations.
|
398
|
+
* The main usage is for algorithms that might need read access into uninitialized memory.
|
399
|
+
* The algorithm must maintain safety under these conditions and must make sure it doesn't
|
400
|
+
* leak any of the past data (directly or in side channels).
|
401
|
+
*/
|
402
|
+
MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t bytes)
|
403
|
+
{
|
404
|
+
size_t const alignedBytes = ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES);
|
405
|
+
void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_aligned_init_once);
|
406
|
+
assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
|
407
|
+
if(ptr && ptr < ws->initOnceStart) {
|
408
|
+
/* We assume the memory following the current allocation is either:
|
409
|
+
* 1. Not usable as initOnce memory (end of workspace)
|
410
|
+
* 2. Another initOnce buffer that has been allocated before (and so was previously memset)
|
411
|
+
* 3. An ASAN redzone, in which case we don't want to write on it
|
412
|
+
* For these reasons it should be fine to not explicitly zero every byte up to ws->initOnceStart.
|
413
|
+
* Note that we assume here that MSAN and ASAN cannot run in the same time. */
|
414
|
+
ZSTD_memset(ptr, 0, MIN((size_t)((U8*)ws->initOnceStart - (U8*)ptr), alignedBytes));
|
415
|
+
ws->initOnceStart = ptr;
|
416
|
+
}
|
417
|
+
#if ZSTD_MEMORY_SANITIZER
|
418
|
+
assert(__msan_test_shadow(ptr, bytes) == -1);
|
419
|
+
#endif
|
420
|
+
return ptr;
|
421
|
+
}
|
422
|
+
|
358
423
|
/**
|
359
424
|
* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
|
360
425
|
*/
|
361
|
-
MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
|
426
|
+
MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
|
427
|
+
{
|
362
428
|
void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
|
363
429
|
ZSTD_cwksp_alloc_aligned);
|
364
430
|
assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
|
@@ -370,14 +436,19 @@ MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
|
|
370
436
|
* their values remain constrained, allowing us to re-use them without
|
371
437
|
* memset()-ing them.
|
372
438
|
*/
|
373
|
-
MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
|
374
|
-
|
439
|
+
MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
|
440
|
+
{
|
441
|
+
const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned_init_once;
|
375
442
|
void* alloc;
|
376
443
|
void* end;
|
377
444
|
void* top;
|
378
445
|
|
379
|
-
|
380
|
-
|
446
|
+
/* We can only start allocating tables after we are done reserving space for objects at the
|
447
|
+
* start of the workspace */
|
448
|
+
if(ws->phase < phase) {
|
449
|
+
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
|
450
|
+
return NULL;
|
451
|
+
}
|
381
452
|
}
|
382
453
|
alloc = ws->tableEnd;
|
383
454
|
end = (BYTE *)alloc + bytes;
|
@@ -408,9 +479,11 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
|
|
408
479
|
|
409
480
|
/**
|
410
481
|
* Aligned on sizeof(void*).
|
482
|
+
* Note : should happen only once, at workspace first initialization
|
411
483
|
*/
|
412
|
-
MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
|
413
|
-
|
484
|
+
MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
|
485
|
+
{
|
486
|
+
size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
|
414
487
|
void* alloc = ws->objectEnd;
|
415
488
|
void* end = (BYTE*)alloc + roundedBytes;
|
416
489
|
|
@@ -419,7 +492,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
|
|
419
492
|
end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
420
493
|
#endif
|
421
494
|
|
422
|
-
DEBUGLOG(
|
495
|
+
DEBUGLOG(4,
|
423
496
|
"cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
|
424
497
|
alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
|
425
498
|
assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
|
@@ -427,7 +500,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
|
|
427
500
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
428
501
|
/* we must be in the first phase, no advance is possible */
|
429
502
|
if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
|
430
|
-
DEBUGLOG(
|
503
|
+
DEBUGLOG(3, "cwksp: object alloc failed!");
|
431
504
|
ws->allocFailed = 1;
|
432
505
|
return NULL;
|
433
506
|
}
|
@@ -438,7 +511,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
|
|
438
511
|
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
439
512
|
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
|
440
513
|
* either size. */
|
441
|
-
alloc = (BYTE
|
514
|
+
alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
442
515
|
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
443
516
|
__asan_unpoison_memory_region(alloc, bytes);
|
444
517
|
}
|
@@ -447,17 +520,26 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
|
|
447
520
|
return alloc;
|
448
521
|
}
|
449
522
|
|
450
|
-
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
|
523
|
+
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
|
524
|
+
{
|
451
525
|
DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
|
452
526
|
|
453
527
|
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
|
454
528
|
/* To validate that the table re-use logic is sound, and that we don't
|
455
529
|
* access table space that we haven't cleaned, we re-"poison" the table
|
456
|
-
* space every time we mark it dirty.
|
530
|
+
* space every time we mark it dirty.
|
531
|
+
* Since tableValidEnd space and initOnce space may overlap we don't poison
|
532
|
+
* the initOnce portion as it break its promise. This means that this poisoning
|
533
|
+
* check isn't always applied fully. */
|
457
534
|
{
|
458
535
|
size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
|
459
536
|
assert(__msan_test_shadow(ws->objectEnd, size) == -1);
|
460
|
-
|
537
|
+
if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) {
|
538
|
+
__msan_poison(ws->objectEnd, size);
|
539
|
+
} else {
|
540
|
+
assert(ws->initOnceStart >= ws->objectEnd);
|
541
|
+
__msan_poison(ws->objectEnd, (BYTE*)ws->initOnceStart - (BYTE*)ws->objectEnd);
|
542
|
+
}
|
461
543
|
}
|
462
544
|
#endif
|
463
545
|
|
@@ -485,7 +567,7 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
|
|
485
567
|
assert(ws->tableValidEnd >= ws->objectEnd);
|
486
568
|
assert(ws->tableValidEnd <= ws->allocStart);
|
487
569
|
if (ws->tableValidEnd < ws->tableEnd) {
|
488
|
-
ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
|
570
|
+
ZSTD_memset(ws->tableValidEnd, 0, (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd));
|
489
571
|
}
|
490
572
|
ZSTD_cwksp_mark_tables_clean(ws);
|
491
573
|
}
|
@@ -522,11 +604,14 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
|
|
522
604
|
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
|
523
605
|
/* To validate that the context re-use logic is sound, and that we don't
|
524
606
|
* access stuff that this compression hasn't initialized, we re-"poison"
|
525
|
-
* the workspace
|
526
|
-
*
|
607
|
+
* the workspace except for the areas in which we expect memory re-use
|
608
|
+
* without initialization (objects, valid tables area and init once
|
609
|
+
* memory). */
|
527
610
|
{
|
528
|
-
|
529
|
-
|
611
|
+
if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) {
|
612
|
+
size_t size = (BYTE*)ws->initOnceStart - (BYTE*)ws->tableValidEnd;
|
613
|
+
__msan_poison(ws->tableValidEnd, size);
|
614
|
+
}
|
530
615
|
}
|
531
616
|
#endif
|
532
617
|
|
@@ -542,10 +627,10 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
|
|
542
627
|
#endif
|
543
628
|
|
544
629
|
ws->tableEnd = ws->objectEnd;
|
545
|
-
ws->allocStart = ws
|
630
|
+
ws->allocStart = ZSTD_cwksp_initialAllocStart(ws);
|
546
631
|
ws->allocFailed = 0;
|
547
|
-
if (ws->phase >
|
548
|
-
ws->phase =
|
632
|
+
if (ws->phase > ZSTD_cwksp_alloc_aligned_init_once) {
|
633
|
+
ws->phase = ZSTD_cwksp_alloc_aligned_init_once;
|
549
634
|
}
|
550
635
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
551
636
|
}
|
@@ -562,6 +647,7 @@ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_c
|
|
562
647
|
ws->workspaceEnd = (BYTE*)start + size;
|
563
648
|
ws->objectEnd = ws->workspace;
|
564
649
|
ws->tableValidEnd = ws->objectEnd;
|
650
|
+
ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);
|
565
651
|
ws->phase = ZSTD_cwksp_alloc_objects;
|
566
652
|
ws->isStatic = isStatic;
|
567
653
|
ZSTD_cwksp_clear(ws);
|
@@ -614,17 +700,11 @@ MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
|
|
614
700
|
* Returns if the estimated space needed for a wksp is within an acceptable limit of the
|
615
701
|
* actual amount of space used.
|
616
702
|
*/
|
617
|
-
MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp*
|
618
|
-
|
619
|
-
|
620
|
-
|
621
|
-
|
622
|
-
} else {
|
623
|
-
/* Due to alignment, when reusing a workspace, we can actually consume 63 fewer or more bytes
|
624
|
-
* than estimatedSpace. See the comments in zstd_cwksp.h for details.
|
625
|
-
*/
|
626
|
-
return (ZSTD_cwksp_used(ws) >= estimatedSpace - 63) && (ZSTD_cwksp_used(ws) <= estimatedSpace + 63);
|
627
|
-
}
|
703
|
+
MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp *const ws, size_t const estimatedSpace) {
|
704
|
+
/* We have an alignment space between objects and tables between tables and buffers, so we can have up to twice
|
705
|
+
* the alignment bytes difference between estimation and actual usage */
|
706
|
+
return (estimatedSpace - ZSTD_cwksp_slack_space_required()) <= ZSTD_cwksp_used(ws) &&
|
707
|
+
ZSTD_cwksp_used(ws) <= estimatedSpace;
|
628
708
|
}
|
629
709
|
|
630
710
|
|