zstd-ruby 1.4.5.0 → 1.5.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +2 -0
- data/README.md +78 -5
- data/Rakefile +8 -2
- data/ext/zstdruby/common.h +15 -0
- data/ext/zstdruby/extconf.rb +3 -2
- data/ext/zstdruby/libzstd/common/allocations.h +55 -0
- data/ext/zstdruby/libzstd/common/bits.h +200 -0
- data/ext/zstdruby/libzstd/common/bitstream.h +45 -62
- data/ext/zstdruby/libzstd/common/compiler.h +205 -22
- data/ext/zstdruby/libzstd/common/cpu.h +1 -3
- data/ext/zstdruby/libzstd/common/debug.c +1 -1
- data/ext/zstdruby/libzstd/common/debug.h +12 -19
- data/ext/zstdruby/libzstd/common/entropy_common.c +172 -48
- data/ext/zstdruby/libzstd/common/error_private.c +10 -2
- data/ext/zstdruby/libzstd/common/error_private.h +82 -3
- data/ext/zstdruby/libzstd/common/fse.h +37 -86
- data/ext/zstdruby/libzstd/common/fse_decompress.c +117 -92
- data/ext/zstdruby/libzstd/common/huf.h +99 -166
- data/ext/zstdruby/libzstd/common/mem.h +124 -142
- data/ext/zstdruby/libzstd/common/pool.c +54 -27
- data/ext/zstdruby/libzstd/common/pool.h +10 -4
- data/ext/zstdruby/libzstd/common/portability_macros.h +156 -0
- data/ext/zstdruby/libzstd/common/threading.c +74 -19
- data/ext/zstdruby/libzstd/common/threading.h +5 -10
- data/ext/zstdruby/libzstd/common/xxhash.c +7 -847
- data/ext/zstdruby/libzstd/common/xxhash.h +5568 -167
- data/ext/zstdruby/libzstd/common/zstd_common.c +2 -37
- data/ext/zstdruby/libzstd/common/zstd_deps.h +111 -0
- data/ext/zstdruby/libzstd/common/zstd_internal.h +132 -187
- data/ext/zstdruby/libzstd/common/zstd_trace.h +163 -0
- data/ext/zstdruby/libzstd/compress/clevels.h +134 -0
- data/ext/zstdruby/libzstd/compress/fse_compress.c +83 -157
- data/ext/zstdruby/libzstd/compress/hist.c +27 -29
- data/ext/zstdruby/libzstd/compress/hist.h +2 -2
- data/ext/zstdruby/libzstd/compress/huf_compress.c +916 -279
- data/ext/zstdruby/libzstd/compress/zstd_compress.c +3773 -1019
- data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +610 -203
- data/ext/zstdruby/libzstd/compress/zstd_compress_literals.c +119 -42
- data/ext/zstdruby/libzstd/compress/zstd_compress_literals.h +16 -6
- data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.c +42 -19
- data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +49 -317
- data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +320 -103
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +388 -151
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.h +3 -2
- data/ext/zstdruby/libzstd/compress/zstd_fast.c +729 -265
- data/ext/zstdruby/libzstd/compress/zstd_fast.h +3 -2
- data/ext/zstdruby/libzstd/compress/zstd_lazy.c +1270 -251
- data/ext/zstdruby/libzstd/compress/zstd_lazy.h +61 -1
- data/ext/zstdruby/libzstd/compress/zstd_ldm.c +324 -219
- data/ext/zstdruby/libzstd/compress/zstd_ldm.h +9 -2
- data/ext/zstdruby/libzstd/compress/zstd_ldm_geartab.h +106 -0
- data/ext/zstdruby/libzstd/compress/zstd_opt.c +481 -209
- data/ext/zstdruby/libzstd/compress/zstd_opt.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +181 -457
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +34 -113
- data/ext/zstdruby/libzstd/decompress/huf_decompress.c +1199 -565
- data/ext/zstdruby/libzstd/decompress/huf_decompress_amd64.S +576 -0
- data/ext/zstdruby/libzstd/decompress/zstd_ddict.c +12 -12
- data/ext/zstdruby/libzstd/decompress/zstd_ddict.h +2 -2
- data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +627 -157
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +1086 -326
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +19 -5
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +62 -13
- data/ext/zstdruby/libzstd/dictBuilder/cover.c +73 -52
- data/ext/zstdruby/libzstd/dictBuilder/cover.h +7 -6
- data/ext/zstdruby/libzstd/dictBuilder/divsufsort.c +1 -1
- data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +44 -35
- data/ext/zstdruby/libzstd/dictBuilder/zdict.c +103 -111
- data/ext/zstdruby/libzstd/{dictBuilder/zdict.h → zdict.h} +203 -34
- data/ext/zstdruby/libzstd/zstd.h +1217 -287
- data/ext/zstdruby/libzstd/{common/zstd_errors.h → zstd_errors.h} +28 -8
- data/ext/zstdruby/main.c +20 -0
- data/ext/zstdruby/skippable_frame.c +63 -0
- data/ext/zstdruby/streaming_compress.c +177 -0
- data/ext/zstdruby/streaming_compress.h +5 -0
- data/ext/zstdruby/streaming_decompress.c +123 -0
- data/ext/zstdruby/zstdruby.c +114 -32
- data/lib/zstd-ruby/version.rb +1 -1
- data/lib/zstd-ruby.rb +0 -1
- data/zstd-ruby.gemspec +1 -1
- metadata +19 -36
- data/.travis.yml +0 -14
- data/ext/zstdruby/libzstd/.gitignore +0 -3
- data/ext/zstdruby/libzstd/BUCK +0 -234
- data/ext/zstdruby/libzstd/Makefile +0 -354
- data/ext/zstdruby/libzstd/README.md +0 -179
- data/ext/zstdruby/libzstd/deprecated/zbuff.h +0 -214
- data/ext/zstdruby/libzstd/deprecated/zbuff_common.c +0 -26
- data/ext/zstdruby/libzstd/deprecated/zbuff_compress.c +0 -147
- data/ext/zstdruby/libzstd/deprecated/zbuff_decompress.c +0 -75
- data/ext/zstdruby/libzstd/dll/example/Makefile +0 -48
- data/ext/zstdruby/libzstd/dll/example/README.md +0 -69
- data/ext/zstdruby/libzstd/dll/example/build_package.bat +0 -20
- data/ext/zstdruby/libzstd/dll/example/fullbench-dll.sln +0 -25
- data/ext/zstdruby/libzstd/dll/example/fullbench-dll.vcxproj +0 -181
- data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +0 -415
- data/ext/zstdruby/libzstd/legacy/zstd_v01.c +0 -2158
- data/ext/zstdruby/libzstd/legacy/zstd_v01.h +0 -94
- data/ext/zstdruby/libzstd/legacy/zstd_v02.c +0 -3518
- data/ext/zstdruby/libzstd/legacy/zstd_v02.h +0 -93
- data/ext/zstdruby/libzstd/legacy/zstd_v03.c +0 -3160
- data/ext/zstdruby/libzstd/legacy/zstd_v03.h +0 -93
- data/ext/zstdruby/libzstd/legacy/zstd_v04.c +0 -3647
- data/ext/zstdruby/libzstd/legacy/zstd_v04.h +0 -142
- data/ext/zstdruby/libzstd/legacy/zstd_v05.c +0 -4050
- data/ext/zstdruby/libzstd/legacy/zstd_v05.h +0 -162
- data/ext/zstdruby/libzstd/legacy/zstd_v06.c +0 -4154
- data/ext/zstdruby/libzstd/legacy/zstd_v06.h +0 -172
- data/ext/zstdruby/libzstd/legacy/zstd_v07.c +0 -4541
- data/ext/zstdruby/libzstd/legacy/zstd_v07.h +0 -187
- data/ext/zstdruby/libzstd/libzstd.pc.in +0 -15
- data/ext/zstdruby/zstdruby.h +0 -6
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
/*
|
|
2
|
-
* Copyright (c)
|
|
2
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
3
3
|
* All rights reserved.
|
|
4
4
|
*
|
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
|
@@ -14,7 +14,9 @@
|
|
|
14
14
|
/*-*************************************
|
|
15
15
|
* Dependencies
|
|
16
16
|
***************************************/
|
|
17
|
+
#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */
|
|
17
18
|
#include "../common/zstd_internal.h"
|
|
19
|
+
#include "../common/portability_macros.h"
|
|
18
20
|
|
|
19
21
|
#if defined (__cplusplus)
|
|
20
22
|
extern "C" {
|
|
@@ -35,15 +37,30 @@ extern "C" {
|
|
|
35
37
|
#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
|
|
36
38
|
#endif
|
|
37
39
|
|
|
40
|
+
|
|
41
|
+
/* Set our tables and aligneds to align by 64 bytes */
|
|
42
|
+
#define ZSTD_CWKSP_ALIGNMENT_BYTES 64
|
|
43
|
+
|
|
38
44
|
/*-*************************************
|
|
39
45
|
* Structures
|
|
40
46
|
***************************************/
|
|
41
47
|
typedef enum {
|
|
42
48
|
ZSTD_cwksp_alloc_objects,
|
|
43
|
-
|
|
44
|
-
ZSTD_cwksp_alloc_aligned
|
|
49
|
+
ZSTD_cwksp_alloc_aligned_init_once,
|
|
50
|
+
ZSTD_cwksp_alloc_aligned,
|
|
51
|
+
ZSTD_cwksp_alloc_buffers
|
|
45
52
|
} ZSTD_cwksp_alloc_phase_e;
|
|
46
53
|
|
|
54
|
+
/**
|
|
55
|
+
* Used to describe whether the workspace is statically allocated (and will not
|
|
56
|
+
* necessarily ever be freed), or if it's dynamically allocated and we can
|
|
57
|
+
* expect a well-formed caller to free this.
|
|
58
|
+
*/
|
|
59
|
+
typedef enum {
|
|
60
|
+
ZSTD_cwksp_dynamic_alloc,
|
|
61
|
+
ZSTD_cwksp_static_alloc
|
|
62
|
+
} ZSTD_cwksp_static_alloc_e;
|
|
63
|
+
|
|
47
64
|
/**
|
|
48
65
|
* Zstd fits all its internal datastructures into a single continuous buffer,
|
|
49
66
|
* so that it only needs to perform a single OS allocation (or so that a buffer
|
|
@@ -84,15 +101,15 @@ typedef enum {
|
|
|
84
101
|
*
|
|
85
102
|
* Workspace Layout:
|
|
86
103
|
*
|
|
87
|
-
* [ ... workspace ...
|
|
88
|
-
* [objects][tables
|
|
104
|
+
* [ ... workspace ... ]
|
|
105
|
+
* [objects][tables ->] free space [<- buffers][<- aligned][<- init once]
|
|
89
106
|
*
|
|
90
107
|
* The various objects that live in the workspace are divided into the
|
|
91
108
|
* following categories, and are allocated separately:
|
|
92
109
|
*
|
|
93
110
|
* - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
|
|
94
111
|
* so that literally everything fits in a single buffer. Note: if present,
|
|
95
|
-
* this must be the first object in the workspace, since
|
|
112
|
+
* this must be the first object in the workspace, since ZSTD_customFree{CCtx,
|
|
96
113
|
* CDict}() rely on a pointer comparison to see whether one or two frees are
|
|
97
114
|
* required.
|
|
98
115
|
*
|
|
@@ -107,10 +124,20 @@ typedef enum {
|
|
|
107
124
|
* - Tables: these are any of several different datastructures (hash tables,
|
|
108
125
|
* chain tables, binary trees) that all respect a common format: they are
|
|
109
126
|
* uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
|
|
110
|
-
* Their sizes depend on the cparams.
|
|
127
|
+
* Their sizes depend on the cparams. These tables are 64-byte aligned.
|
|
111
128
|
*
|
|
112
|
-
* -
|
|
113
|
-
*
|
|
129
|
+
* - Init once: these buffers require to be initialized at least once before
|
|
130
|
+
* use. They should be used when we want to skip memory initialization
|
|
131
|
+
* while not triggering memory checkers (like Valgrind) when reading from
|
|
132
|
+
* from this memory without writing to it first.
|
|
133
|
+
* These buffers should be used carefully as they might contain data
|
|
134
|
+
* from previous compressions.
|
|
135
|
+
* Buffers are aligned to 64 bytes.
|
|
136
|
+
*
|
|
137
|
+
* - Aligned: these buffers don't require any initialization before they're
|
|
138
|
+
* used. The user of the buffer should make sure they write into a buffer
|
|
139
|
+
* location before reading from it.
|
|
140
|
+
* Buffers are aligned to 64 bytes.
|
|
114
141
|
*
|
|
115
142
|
* - Buffers: these buffers are used for various purposes that don't require
|
|
116
143
|
* any alignment or initialization before they're used. This means they can
|
|
@@ -122,9 +149,9 @@ typedef enum {
|
|
|
122
149
|
* correctly packed into the workspace buffer. That order is:
|
|
123
150
|
*
|
|
124
151
|
* 1. Objects
|
|
125
|
-
* 2.
|
|
126
|
-
* 3. Aligned
|
|
127
|
-
* 4. Tables
|
|
152
|
+
* 2. Init once / Tables
|
|
153
|
+
* 3. Aligned / Tables
|
|
154
|
+
* 4. Buffers / Tables
|
|
128
155
|
*
|
|
129
156
|
* Attempts to reserve objects of different types out of order will fail.
|
|
130
157
|
*/
|
|
@@ -136,10 +163,12 @@ typedef struct {
|
|
|
136
163
|
void* tableEnd;
|
|
137
164
|
void* tableValidEnd;
|
|
138
165
|
void* allocStart;
|
|
166
|
+
void* initOnceStart;
|
|
139
167
|
|
|
140
|
-
|
|
168
|
+
BYTE allocFailed;
|
|
141
169
|
int workspaceOversizedDuration;
|
|
142
170
|
ZSTD_cwksp_alloc_phase_e phase;
|
|
171
|
+
ZSTD_cwksp_static_alloc_e isStatic;
|
|
143
172
|
} ZSTD_cwksp;
|
|
144
173
|
|
|
145
174
|
/*-*************************************
|
|
@@ -147,6 +176,7 @@ typedef struct {
|
|
|
147
176
|
***************************************/
|
|
148
177
|
|
|
149
178
|
MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
|
|
179
|
+
MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws);
|
|
150
180
|
|
|
151
181
|
MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
|
|
152
182
|
(void)ws;
|
|
@@ -156,6 +186,20 @@ MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
|
|
|
156
186
|
assert(ws->tableEnd <= ws->allocStart);
|
|
157
187
|
assert(ws->tableValidEnd <= ws->allocStart);
|
|
158
188
|
assert(ws->allocStart <= ws->workspaceEnd);
|
|
189
|
+
assert(ws->initOnceStart <= ZSTD_cwksp_initialAllocStart(ws));
|
|
190
|
+
assert(ws->workspace <= ws->initOnceStart);
|
|
191
|
+
#if ZSTD_MEMORY_SANITIZER
|
|
192
|
+
{
|
|
193
|
+
intptr_t const offset = __msan_test_shadow(ws->initOnceStart,
|
|
194
|
+
(U8*)ZSTD_cwksp_initialAllocStart(ws) - (U8*)ws->initOnceStart);
|
|
195
|
+
#if defined(ZSTD_MSAN_PRINT)
|
|
196
|
+
if(offset!=-1) {
|
|
197
|
+
__msan_print_shadow((U8*)ws->initOnceStart + offset - 8, 32);
|
|
198
|
+
}
|
|
199
|
+
#endif
|
|
200
|
+
assert(offset==-1);
|
|
201
|
+
};
|
|
202
|
+
#endif
|
|
159
203
|
}
|
|
160
204
|
|
|
161
205
|
/**
|
|
@@ -176,63 +220,72 @@ MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
|
|
|
176
220
|
* Since tables aren't currently redzoned, you don't need to call through this
|
|
177
221
|
* to figure out how much space you need for the matchState tables. Everything
|
|
178
222
|
* else is though.
|
|
223
|
+
*
|
|
224
|
+
* Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
|
|
179
225
|
*/
|
|
180
226
|
MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
|
|
181
|
-
|
|
227
|
+
if (size == 0)
|
|
228
|
+
return 0;
|
|
229
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
|
182
230
|
return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
|
183
231
|
#else
|
|
184
232
|
return size;
|
|
185
233
|
#endif
|
|
186
234
|
}
|
|
187
235
|
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
ws->tableValidEnd = ws->objectEnd;
|
|
195
|
-
}
|
|
196
|
-
if (ws->phase < ZSTD_cwksp_alloc_aligned &&
|
|
197
|
-
phase >= ZSTD_cwksp_alloc_aligned) {
|
|
198
|
-
/* If unaligned allocations down from a too-large top have left us
|
|
199
|
-
* unaligned, we need to realign our alloc ptr. Technically, this
|
|
200
|
-
* can consume space that is unaccounted for in the neededSpace
|
|
201
|
-
* calculation. However, I believe this can only happen when the
|
|
202
|
-
* workspace is too large, and specifically when it is too large
|
|
203
|
-
* by a larger margin than the space that will be consumed. */
|
|
204
|
-
/* TODO: cleaner, compiler warning friendly way to do this??? */
|
|
205
|
-
ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
|
|
206
|
-
if (ws->allocStart < ws->tableValidEnd) {
|
|
207
|
-
ws->tableValidEnd = ws->allocStart;
|
|
208
|
-
}
|
|
209
|
-
}
|
|
210
|
-
ws->phase = phase;
|
|
211
|
-
}
|
|
236
|
+
/**
|
|
237
|
+
* Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
|
|
238
|
+
* Used to determine the number of bytes required for a given "aligned".
|
|
239
|
+
*/
|
|
240
|
+
MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
|
|
241
|
+
return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));
|
|
212
242
|
}
|
|
213
243
|
|
|
214
244
|
/**
|
|
215
|
-
* Returns
|
|
245
|
+
* Returns the amount of additional space the cwksp must allocate
|
|
246
|
+
* for internal purposes (currently only alignment).
|
|
216
247
|
*/
|
|
217
|
-
MEM_STATIC
|
|
218
|
-
|
|
248
|
+
MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
|
|
249
|
+
/* For alignment, the wksp will always allocate an additional 2*ZSTD_CWKSP_ALIGNMENT_BYTES
|
|
250
|
+
* bytes to align the beginning of tables section and end of buffers;
|
|
251
|
+
*/
|
|
252
|
+
size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES * 2;
|
|
253
|
+
return slackSpace;
|
|
219
254
|
}
|
|
220
255
|
|
|
256
|
+
|
|
221
257
|
/**
|
|
222
|
-
*
|
|
258
|
+
* Return the number of additional bytes required to align a pointer to the given number of bytes.
|
|
259
|
+
* alignBytes must be a power of two.
|
|
223
260
|
*/
|
|
224
|
-
MEM_STATIC void*
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
261
|
+
MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
|
|
262
|
+
size_t const alignBytesMask = alignBytes - 1;
|
|
263
|
+
size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
|
|
264
|
+
assert((alignBytes & alignBytesMask) == 0);
|
|
265
|
+
assert(bytes < alignBytes);
|
|
266
|
+
return bytes;
|
|
267
|
+
}
|
|
230
268
|
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
269
|
+
/**
|
|
270
|
+
* Returns the initial value for allocStart which is used to determine the position from
|
|
271
|
+
* which we can allocate from the end of the workspace.
|
|
272
|
+
*/
|
|
273
|
+
MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws) {
|
|
274
|
+
return (void*)((size_t)ws->workspaceEnd & ~(ZSTD_CWKSP_ALIGNMENT_BYTES-1));
|
|
275
|
+
}
|
|
235
276
|
|
|
277
|
+
/**
|
|
278
|
+
* Internal function. Do not use directly.
|
|
279
|
+
* Reserves the given number of bytes within the aligned/buffer segment of the wksp,
|
|
280
|
+
* which counts from the end of the wksp (as opposed to the object/table segment).
|
|
281
|
+
*
|
|
282
|
+
* Returns a pointer to the beginning of that space.
|
|
283
|
+
*/
|
|
284
|
+
MEM_STATIC void*
|
|
285
|
+
ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
|
|
286
|
+
{
|
|
287
|
+
void* const alloc = (BYTE*)ws->allocStart - bytes;
|
|
288
|
+
void* const bottom = ws->tableEnd;
|
|
236
289
|
DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
|
|
237
290
|
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
|
|
238
291
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
|
@@ -242,16 +295,88 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal(
|
|
|
242
295
|
ws->allocFailed = 1;
|
|
243
296
|
return NULL;
|
|
244
297
|
}
|
|
298
|
+
/* the area is reserved from the end of wksp.
|
|
299
|
+
* If it overlaps with tableValidEnd, it voids guarantees on values' range */
|
|
245
300
|
if (alloc < ws->tableValidEnd) {
|
|
246
301
|
ws->tableValidEnd = alloc;
|
|
247
302
|
}
|
|
248
303
|
ws->allocStart = alloc;
|
|
304
|
+
return alloc;
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
/**
|
|
308
|
+
* Moves the cwksp to the next phase, and does any necessary allocations.
|
|
309
|
+
* cwksp initialization must necessarily go through each phase in order.
|
|
310
|
+
* Returns a 0 on success, or zstd error
|
|
311
|
+
*/
|
|
312
|
+
MEM_STATIC size_t
|
|
313
|
+
ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)
|
|
314
|
+
{
|
|
315
|
+
assert(phase >= ws->phase);
|
|
316
|
+
if (phase > ws->phase) {
|
|
317
|
+
/* Going from allocating objects to allocating initOnce / tables */
|
|
318
|
+
if (ws->phase < ZSTD_cwksp_alloc_aligned_init_once &&
|
|
319
|
+
phase >= ZSTD_cwksp_alloc_aligned_init_once) {
|
|
320
|
+
ws->tableValidEnd = ws->objectEnd;
|
|
321
|
+
ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);
|
|
322
|
+
|
|
323
|
+
{ /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
|
|
324
|
+
void *const alloc = ws->objectEnd;
|
|
325
|
+
size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
|
|
326
|
+
void *const objectEnd = (BYTE *) alloc + bytesToAlign;
|
|
327
|
+
DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
|
|
328
|
+
RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,
|
|
329
|
+
"table phase - alignment initial allocation failed!");
|
|
330
|
+
ws->objectEnd = objectEnd;
|
|
331
|
+
ws->tableEnd = objectEnd; /* table area starts being empty */
|
|
332
|
+
if (ws->tableValidEnd < ws->tableEnd) {
|
|
333
|
+
ws->tableValidEnd = ws->tableEnd;
|
|
334
|
+
}
|
|
335
|
+
}
|
|
336
|
+
}
|
|
337
|
+
ws->phase = phase;
|
|
338
|
+
ZSTD_cwksp_assert_internal_consistency(ws);
|
|
339
|
+
}
|
|
340
|
+
return 0;
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
/**
|
|
344
|
+
* Returns whether this object/buffer/etc was allocated in this workspace.
|
|
345
|
+
*/
|
|
346
|
+
MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
|
|
347
|
+
{
|
|
348
|
+
return (ptr != NULL) && (ws->workspace <= ptr) && (ptr < ws->workspaceEnd);
|
|
349
|
+
}
|
|
249
350
|
|
|
250
|
-
|
|
351
|
+
/**
|
|
352
|
+
* Internal function. Do not use directly.
|
|
353
|
+
*/
|
|
354
|
+
MEM_STATIC void*
|
|
355
|
+
ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)
|
|
356
|
+
{
|
|
357
|
+
void* alloc;
|
|
358
|
+
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
|
|
359
|
+
return NULL;
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
|
363
|
+
/* over-reserve space */
|
|
364
|
+
bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
|
365
|
+
#endif
|
|
366
|
+
|
|
367
|
+
alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
|
|
368
|
+
|
|
369
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
|
251
370
|
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
|
|
252
371
|
* either size. */
|
|
253
|
-
|
|
254
|
-
|
|
372
|
+
if (alloc) {
|
|
373
|
+
alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
|
374
|
+
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
|
375
|
+
/* We need to keep the redzone poisoned while unpoisoning the bytes that
|
|
376
|
+
* are actually allocated. */
|
|
377
|
+
__asan_unpoison_memory_region(alloc, bytes - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE);
|
|
378
|
+
}
|
|
379
|
+
}
|
|
255
380
|
#endif
|
|
256
381
|
|
|
257
382
|
return alloc;
|
|
@@ -260,33 +385,78 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal(
|
|
|
260
385
|
/**
|
|
261
386
|
* Reserves and returns unaligned memory.
|
|
262
387
|
*/
|
|
263
|
-
MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
|
|
388
|
+
MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
|
|
389
|
+
{
|
|
264
390
|
return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
|
|
265
391
|
}
|
|
266
392
|
|
|
267
393
|
/**
|
|
268
|
-
* Reserves and returns memory sized on and aligned on
|
|
394
|
+
* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
|
|
395
|
+
* This memory has been initialized at least once in the past.
|
|
396
|
+
* This doesn't mean it has been initialized this time, and it might contain data from previous
|
|
397
|
+
* operations.
|
|
398
|
+
* The main usage is for algorithms that might need read access into uninitialized memory.
|
|
399
|
+
* The algorithm must maintain safety under these conditions and must make sure it doesn't
|
|
400
|
+
* leak any of the past data (directly or in side channels).
|
|
269
401
|
*/
|
|
270
|
-
MEM_STATIC void*
|
|
271
|
-
|
|
272
|
-
|
|
402
|
+
MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t bytes)
|
|
403
|
+
{
|
|
404
|
+
size_t const alignedBytes = ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES);
|
|
405
|
+
void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_aligned_init_once);
|
|
406
|
+
assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
|
|
407
|
+
if(ptr && ptr < ws->initOnceStart) {
|
|
408
|
+
/* We assume the memory following the current allocation is either:
|
|
409
|
+
* 1. Not usable as initOnce memory (end of workspace)
|
|
410
|
+
* 2. Another initOnce buffer that has been allocated before (and so was previously memset)
|
|
411
|
+
* 3. An ASAN redzone, in which case we don't want to write on it
|
|
412
|
+
* For these reasons it should be fine to not explicitly zero every byte up to ws->initOnceStart.
|
|
413
|
+
* Note that we assume here that MSAN and ASAN cannot run in the same time. */
|
|
414
|
+
ZSTD_memset(ptr, 0, MIN((size_t)((U8*)ws->initOnceStart - (U8*)ptr), alignedBytes));
|
|
415
|
+
ws->initOnceStart = ptr;
|
|
416
|
+
}
|
|
417
|
+
#if ZSTD_MEMORY_SANITIZER
|
|
418
|
+
assert(__msan_test_shadow(ptr, bytes) == -1);
|
|
419
|
+
#endif
|
|
420
|
+
return ptr;
|
|
273
421
|
}
|
|
274
422
|
|
|
275
423
|
/**
|
|
276
|
-
*
|
|
424
|
+
* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
|
|
425
|
+
*/
|
|
426
|
+
MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
|
|
427
|
+
{
|
|
428
|
+
void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
|
|
429
|
+
ZSTD_cwksp_alloc_aligned);
|
|
430
|
+
assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
|
|
431
|
+
return ptr;
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
/**
|
|
435
|
+
* Aligned on 64 bytes. These buffers have the special property that
|
|
277
436
|
* their values remain constrained, allowing us to re-use them without
|
|
278
437
|
* memset()-ing them.
|
|
279
438
|
*/
|
|
280
|
-
MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
void*
|
|
284
|
-
void*
|
|
439
|
+
MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
|
|
440
|
+
{
|
|
441
|
+
const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned_init_once;
|
|
442
|
+
void* alloc;
|
|
443
|
+
void* end;
|
|
444
|
+
void* top;
|
|
445
|
+
|
|
446
|
+
/* We can only start allocating tables after we are done reserving space for objects at the
|
|
447
|
+
* start of the workspace */
|
|
448
|
+
if(ws->phase < phase) {
|
|
449
|
+
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
|
|
450
|
+
return NULL;
|
|
451
|
+
}
|
|
452
|
+
}
|
|
453
|
+
alloc = ws->tableEnd;
|
|
454
|
+
end = (BYTE *)alloc + bytes;
|
|
455
|
+
top = ws->allocStart;
|
|
285
456
|
|
|
286
457
|
DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
|
|
287
458
|
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
|
|
288
459
|
assert((bytes & (sizeof(U32)-1)) == 0);
|
|
289
|
-
ZSTD_cwksp_internal_advance_phase(ws, phase);
|
|
290
460
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
|
291
461
|
assert(end <= top);
|
|
292
462
|
if (end > top) {
|
|
@@ -296,35 +466,41 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
|
|
|
296
466
|
}
|
|
297
467
|
ws->tableEnd = end;
|
|
298
468
|
|
|
299
|
-
#if
|
|
300
|
-
|
|
469
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
|
470
|
+
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
|
471
|
+
__asan_unpoison_memory_region(alloc, bytes);
|
|
472
|
+
}
|
|
301
473
|
#endif
|
|
302
474
|
|
|
475
|
+
assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
|
|
476
|
+
assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
|
|
303
477
|
return alloc;
|
|
304
478
|
}
|
|
305
479
|
|
|
306
480
|
/**
|
|
307
481
|
* Aligned on sizeof(void*).
|
|
482
|
+
* Note : should happen only once, at workspace first initialization
|
|
308
483
|
*/
|
|
309
|
-
MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
|
|
310
|
-
|
|
484
|
+
MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
|
|
485
|
+
{
|
|
486
|
+
size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
|
|
311
487
|
void* alloc = ws->objectEnd;
|
|
312
488
|
void* end = (BYTE*)alloc + roundedBytes;
|
|
313
489
|
|
|
314
|
-
#if
|
|
490
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
|
315
491
|
/* over-reserve space */
|
|
316
492
|
end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
|
317
493
|
#endif
|
|
318
494
|
|
|
319
|
-
DEBUGLOG(
|
|
495
|
+
DEBUGLOG(4,
|
|
320
496
|
"cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
|
|
321
497
|
alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
|
|
322
|
-
assert((
|
|
323
|
-
assert(
|
|
498
|
+
assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
|
|
499
|
+
assert(bytes % ZSTD_ALIGNOF(void*) == 0);
|
|
324
500
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
|
325
501
|
/* we must be in the first phase, no advance is possible */
|
|
326
502
|
if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
|
|
327
|
-
DEBUGLOG(
|
|
503
|
+
DEBUGLOG(3, "cwksp: object alloc failed!");
|
|
328
504
|
ws->allocFailed = 1;
|
|
329
505
|
return NULL;
|
|
330
506
|
}
|
|
@@ -332,27 +508,38 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
|
|
|
332
508
|
ws->tableEnd = end;
|
|
333
509
|
ws->tableValidEnd = end;
|
|
334
510
|
|
|
335
|
-
#if
|
|
511
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
|
336
512
|
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
|
|
337
513
|
* either size. */
|
|
338
|
-
alloc = (BYTE
|
|
339
|
-
|
|
514
|
+
alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
|
515
|
+
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
|
516
|
+
__asan_unpoison_memory_region(alloc, bytes);
|
|
517
|
+
}
|
|
340
518
|
#endif
|
|
341
519
|
|
|
342
520
|
return alloc;
|
|
343
521
|
}
|
|
344
522
|
|
|
345
|
-
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
|
|
523
|
+
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
|
|
524
|
+
{
|
|
346
525
|
DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
|
|
347
526
|
|
|
348
|
-
#if
|
|
527
|
+
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
|
|
349
528
|
/* To validate that the table re-use logic is sound, and that we don't
|
|
350
529
|
* access table space that we haven't cleaned, we re-"poison" the table
|
|
351
|
-
* space every time we mark it dirty.
|
|
530
|
+
* space every time we mark it dirty.
|
|
531
|
+
* Since tableValidEnd space and initOnce space may overlap we don't poison
|
|
532
|
+
* the initOnce portion as it break its promise. This means that this poisoning
|
|
533
|
+
* check isn't always applied fully. */
|
|
352
534
|
{
|
|
353
535
|
size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
|
|
354
536
|
assert(__msan_test_shadow(ws->objectEnd, size) == -1);
|
|
355
|
-
|
|
537
|
+
if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) {
|
|
538
|
+
__msan_poison(ws->objectEnd, size);
|
|
539
|
+
} else {
|
|
540
|
+
assert(ws->initOnceStart >= ws->objectEnd);
|
|
541
|
+
__msan_poison(ws->objectEnd, (BYTE*)ws->initOnceStart - (BYTE*)ws->objectEnd);
|
|
542
|
+
}
|
|
356
543
|
}
|
|
357
544
|
#endif
|
|
358
545
|
|
|
@@ -380,7 +567,7 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
|
|
|
380
567
|
assert(ws->tableValidEnd >= ws->objectEnd);
|
|
381
568
|
assert(ws->tableValidEnd <= ws->allocStart);
|
|
382
569
|
if (ws->tableValidEnd < ws->tableEnd) {
|
|
383
|
-
|
|
570
|
+
ZSTD_memset(ws->tableValidEnd, 0, (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd));
|
|
384
571
|
}
|
|
385
572
|
ZSTD_cwksp_mark_tables_clean(ws);
|
|
386
573
|
}
|
|
@@ -392,8 +579,12 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
|
|
|
392
579
|
MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
|
|
393
580
|
DEBUGLOG(4, "cwksp: clearing tables!");
|
|
394
581
|
|
|
395
|
-
#if
|
|
396
|
-
|
|
582
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
|
583
|
+
/* We don't do this when the workspace is statically allocated, because
|
|
584
|
+
* when that is the case, we have no capability to hook into the end of the
|
|
585
|
+
* workspace's lifecycle to unpoison the memory.
|
|
586
|
+
*/
|
|
587
|
+
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
|
397
588
|
size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
|
|
398
589
|
__asan_poison_memory_region(ws->objectEnd, size);
|
|
399
590
|
}
|
|
@@ -410,29 +601,36 @@ MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
|
|
|
410
601
|
MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
|
|
411
602
|
DEBUGLOG(4, "cwksp: clearing!");
|
|
412
603
|
|
|
413
|
-
#if
|
|
604
|
+
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
|
|
414
605
|
/* To validate that the context re-use logic is sound, and that we don't
|
|
415
606
|
* access stuff that this compression hasn't initialized, we re-"poison"
|
|
416
|
-
* the workspace
|
|
417
|
-
*
|
|
607
|
+
* the workspace except for the areas in which we expect memory re-use
|
|
608
|
+
* without initialization (objects, valid tables area and init once
|
|
609
|
+
* memory). */
|
|
418
610
|
{
|
|
419
|
-
|
|
420
|
-
|
|
611
|
+
if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) {
|
|
612
|
+
size_t size = (BYTE*)ws->initOnceStart - (BYTE*)ws->tableValidEnd;
|
|
613
|
+
__msan_poison(ws->tableValidEnd, size);
|
|
614
|
+
}
|
|
421
615
|
}
|
|
422
616
|
#endif
|
|
423
617
|
|
|
424
|
-
#if
|
|
425
|
-
|
|
618
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
|
619
|
+
/* We don't do this when the workspace is statically allocated, because
|
|
620
|
+
* when that is the case, we have no capability to hook into the end of the
|
|
621
|
+
* workspace's lifecycle to unpoison the memory.
|
|
622
|
+
*/
|
|
623
|
+
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
|
426
624
|
size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
|
|
427
625
|
__asan_poison_memory_region(ws->objectEnd, size);
|
|
428
626
|
}
|
|
429
627
|
#endif
|
|
430
628
|
|
|
431
629
|
ws->tableEnd = ws->objectEnd;
|
|
432
|
-
ws->allocStart = ws
|
|
630
|
+
ws->allocStart = ZSTD_cwksp_initialAllocStart(ws);
|
|
433
631
|
ws->allocFailed = 0;
|
|
434
|
-
if (ws->phase >
|
|
435
|
-
ws->phase =
|
|
632
|
+
if (ws->phase > ZSTD_cwksp_alloc_aligned_init_once) {
|
|
633
|
+
ws->phase = ZSTD_cwksp_alloc_aligned_init_once;
|
|
436
634
|
}
|
|
437
635
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
|
438
636
|
}
|
|
@@ -442,47 +640,54 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
|
|
|
442
640
|
* Any existing values in the workspace are ignored (the previously managed
|
|
443
641
|
* buffer, if present, must be separately freed).
|
|
444
642
|
*/
|
|
445
|
-
MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
|
|
643
|
+
MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
|
|
446
644
|
DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
|
|
447
645
|
assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
|
|
448
646
|
ws->workspace = start;
|
|
449
647
|
ws->workspaceEnd = (BYTE*)start + size;
|
|
450
648
|
ws->objectEnd = ws->workspace;
|
|
451
649
|
ws->tableValidEnd = ws->objectEnd;
|
|
650
|
+
ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);
|
|
452
651
|
ws->phase = ZSTD_cwksp_alloc_objects;
|
|
652
|
+
ws->isStatic = isStatic;
|
|
453
653
|
ZSTD_cwksp_clear(ws);
|
|
454
654
|
ws->workspaceOversizedDuration = 0;
|
|
455
655
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
|
456
656
|
}
|
|
457
657
|
|
|
458
658
|
MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
|
|
459
|
-
void* workspace =
|
|
659
|
+
void* workspace = ZSTD_customMalloc(size, customMem);
|
|
460
660
|
DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
|
|
461
661
|
RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
|
|
462
|
-
ZSTD_cwksp_init(ws, workspace, size);
|
|
662
|
+
ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
|
|
463
663
|
return 0;
|
|
464
664
|
}
|
|
465
665
|
|
|
466
666
|
MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
|
|
467
667
|
void *ptr = ws->workspace;
|
|
468
668
|
DEBUGLOG(4, "cwksp: freeing workspace");
|
|
469
|
-
|
|
470
|
-
|
|
669
|
+
ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
|
|
670
|
+
ZSTD_customFree(ptr, customMem);
|
|
471
671
|
}
|
|
472
672
|
|
|
473
673
|
/**
|
|
474
674
|
* Moves the management of a workspace from one cwksp to another. The src cwksp
|
|
475
|
-
* is left in an invalid state (src must be re-init()'ed before
|
|
675
|
+
* is left in an invalid state (src must be re-init()'ed before it's used again).
|
|
476
676
|
*/
|
|
477
677
|
MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
|
|
478
678
|
*dst = *src;
|
|
479
|
-
|
|
679
|
+
ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
|
|
480
680
|
}
|
|
481
681
|
|
|
482
682
|
MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
|
|
483
683
|
return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
|
|
484
684
|
}
|
|
485
685
|
|
|
686
|
+
MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
|
|
687
|
+
return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
|
|
688
|
+
+ (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
|
|
689
|
+
}
|
|
690
|
+
|
|
486
691
|
MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
|
|
487
692
|
return ws->allocFailed;
|
|
488
693
|
}
|
|
@@ -491,6 +696,18 @@ MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
|
|
|
491
696
|
* Functions Checking Free Space
|
|
492
697
|
***************************************/
|
|
493
698
|
|
|
699
|
+
/* ZSTD_alignmentSpaceWithinBounds() :
|
|
700
|
+
* Returns if the estimated space needed for a wksp is within an acceptable limit of the
|
|
701
|
+
* actual amount of space used.
|
|
702
|
+
*/
|
|
703
|
+
MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp *const ws, size_t const estimatedSpace) {
|
|
704
|
+
/* We have an alignment space between objects and tables between tables and buffers, so we can have up to twice
|
|
705
|
+
* the alignment bytes difference between estimation and actual usage */
|
|
706
|
+
return (estimatedSpace - ZSTD_cwksp_slack_space_required()) <= ZSTD_cwksp_used(ws) &&
|
|
707
|
+
ZSTD_cwksp_used(ws) <= estimatedSpace;
|
|
708
|
+
}
|
|
709
|
+
|
|
710
|
+
|
|
494
711
|
MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
|
|
495
712
|
return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
|
|
496
713
|
}
|