zstdlib 0.7.0-x86-mingw32 → 0.10.0-x86-mingw32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGES.md +20 -0
- data/README.md +7 -1
- data/Rakefile +38 -8
- data/ext/{zstdlib → zstdlib_c}/extconf.rb +11 -6
- data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.2/zstdlib.c +2 -2
- data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.3/zstdlib.c +2 -2
- data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.4/zstdlib.c +2 -2
- data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.5/zstdlib.c +2 -2
- data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.6/zstdlib.c +2 -2
- data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.7/zstdlib.c +2 -2
- data/ext/zstdlib_c/ruby/zlib-3.0/zstdlib.c +4994 -0
- data/ext/zstdlib_c/ruby/zlib-3.1/zstdlib.c +5076 -0
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/adler32.c +0 -0
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/compress.c +0 -0
- data/ext/zstdlib_c/zlib-1.2.12/crc32.c +1116 -0
- data/ext/zstdlib_c/zlib-1.2.12/crc32.h +9446 -0
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/deflate.c +78 -30
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/deflate.h +12 -15
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/gzclose.c +0 -0
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/gzguts.h +3 -2
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/gzlib.c +5 -3
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/gzread.c +5 -7
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/gzwrite.c +25 -13
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/infback.c +2 -1
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/inffast.c +14 -14
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/inffast.h +0 -0
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/inffixed.h +0 -0
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/inflate.c +39 -8
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/inflate.h +3 -2
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/inftrees.c +3 -3
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/inftrees.h +0 -0
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/trees.c +27 -48
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/trees.h +0 -0
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/uncompr.c +0 -0
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/zconf.h +0 -0
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/zlib.h +123 -100
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/zutil.c +2 -2
- data/ext/{zstdlib/zlib-1.2.11 → zstdlib_c/zlib-1.2.12}/zutil.h +12 -9
- data/ext/{zstdlib → zstdlib_c}/zlib.mk +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlibwrapper/zlibwrapper.c +1 -5
- data/ext/{zstdlib → zstdlib_c}/zlibwrapper.mk +0 -0
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/bitstream.h +46 -22
- data/ext/zstdlib_c/zstd-1.5.2/lib/common/compiler.h +335 -0
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/cpu.h +1 -3
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/debug.c +1 -1
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/debug.h +12 -19
- data/ext/zstdlib_c/zstd-1.5.2/lib/common/entropy_common.c +368 -0
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/error_private.c +2 -1
- data/ext/zstdlib_c/zstd-1.5.2/lib/common/error_private.h +159 -0
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/fse.h +41 -12
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/fse_decompress.c +139 -22
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/huf.h +47 -23
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/mem.h +87 -98
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/pool.c +34 -23
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/pool.h +4 -4
- data/ext/zstdlib_c/zstd-1.5.2/lib/common/portability_macros.h +137 -0
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/threading.c +6 -5
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/threading.h +0 -0
- data/ext/zstdlib_c/zstd-1.5.2/lib/common/xxhash.c +24 -0
- data/ext/zstdlib_c/zstd-1.5.2/lib/common/xxhash.h +5686 -0
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/zstd_common.c +10 -10
- data/ext/zstdlib_c/zstd-1.5.2/lib/common/zstd_deps.h +111 -0
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/common/zstd_internal.h +191 -145
- data/ext/zstdlib_c/zstd-1.5.2/lib/common/zstd_trace.h +163 -0
- data/ext/zstdlib_c/zstd-1.5.2/lib/compress/clevels.h +134 -0
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/fse_compress.c +89 -46
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/hist.c +27 -29
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/hist.h +2 -2
- data/ext/zstdlib_c/zstd-1.5.2/lib/compress/huf_compress.c +1370 -0
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress.c +2917 -868
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_internal.h +458 -125
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_literals.c +12 -11
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_literals.h +4 -2
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_sequences.c +41 -18
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_sequences.h +1 -1
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_superblock.c +26 -298
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_superblock.h +1 -1
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_cwksp.h +234 -83
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_double_fast.c +313 -138
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_double_fast.h +1 -1
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_fast.c +329 -150
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_fast.h +1 -1
- data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstd_lazy.c +2104 -0
- data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstd_lazy.h +125 -0
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_ldm.c +321 -216
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_ldm.h +9 -2
- data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstd_ldm_geartab.h +106 -0
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_opt.c +412 -166
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_opt.h +1 -1
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/compress/zstdmt_compress.c +169 -453
- data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstdmt_compress.h +113 -0
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/decompress/huf_decompress.c +1044 -403
- data/ext/zstdlib_c/zstd-1.5.2/lib/decompress/huf_decompress_amd64.S +585 -0
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_ddict.c +9 -9
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_ddict.h +2 -2
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_decompress.c +450 -105
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_decompress_block.c +913 -273
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_decompress_block.h +14 -5
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_decompress_internal.h +59 -12
- data/ext/zstdlib_c/zstd-1.5.2/lib/zdict.h +452 -0
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/lib/zstd.h +699 -214
- data/ext/{zstdlib/zstd-1.4.5/lib/common → zstdlib_c/zstd-1.5.2/lib}/zstd_errors.h +2 -1
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzclose.c +0 -0
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzcompatibility.h +1 -1
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzguts.h +0 -0
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzlib.c +0 -0
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzread.c +0 -0
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzwrite.c +0 -0
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/zlibWrapper/zstd_zlibwrapper.c +133 -44
- data/ext/{zstdlib/zstd-1.4.5 → zstdlib_c/zstd-1.5.2}/zlibWrapper/zstd_zlibwrapper.h +1 -1
- data/ext/zstdlib_c/zstd.mk +15 -0
- data/lib/2.4/zstdlib_c.so +0 -0
- data/lib/2.5/zstdlib_c.so +0 -0
- data/lib/2.6/zstdlib_c.so +0 -0
- data/lib/2.7/zstdlib_c.so +0 -0
- data/lib/3.0/zstdlib_c.so +0 -0
- data/lib/3.1/zstdlib_c.so +0 -0
- data/lib/zstdlib.rb +2 -2
- metadata +125 -116
- data/ext/zstdlib/zlib-1.2.11/crc32.c +0 -442
- data/ext/zstdlib/zlib-1.2.11/crc32.h +0 -441
- data/ext/zstdlib/zstd-1.4.5/lib/common/compiler.h +0 -175
- data/ext/zstdlib/zstd-1.4.5/lib/common/entropy_common.c +0 -216
- data/ext/zstdlib/zstd-1.4.5/lib/common/error_private.h +0 -80
- data/ext/zstdlib/zstd-1.4.5/lib/common/xxhash.c +0 -864
- data/ext/zstdlib/zstd-1.4.5/lib/common/xxhash.h +0 -285
- data/ext/zstdlib/zstd-1.4.5/lib/compress/huf_compress.c +0 -798
- data/ext/zstdlib/zstd-1.4.5/lib/compress/zstd_lazy.c +0 -1138
- data/ext/zstdlib/zstd-1.4.5/lib/compress/zstd_lazy.h +0 -67
- data/ext/zstdlib/zstd-1.4.5/lib/compress/zstdmt_compress.h +0 -192
- data/ext/zstdlib/zstd.mk +0 -14
- data/lib/2.2/zstdlib.so +0 -0
- data/lib/2.3/zstdlib.so +0 -0
- data/lib/2.4/zstdlib.so +0 -0
- data/lib/2.5/zstdlib.so +0 -0
- data/lib/2.6/zstdlib.so +0 -0
- data/lib/2.7/zstdlib.so +0 -0
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Yann Collet, Facebook, Inc.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -35,6 +35,10 @@ extern "C" {
|
|
35
35
|
#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
|
36
36
|
#endif
|
37
37
|
|
38
|
+
|
39
|
+
/* Set our tables and aligneds to align by 64 bytes */
|
40
|
+
#define ZSTD_CWKSP_ALIGNMENT_BYTES 64
|
41
|
+
|
38
42
|
/*-*************************************
|
39
43
|
* Structures
|
40
44
|
***************************************/
|
@@ -44,6 +48,16 @@ typedef enum {
|
|
44
48
|
ZSTD_cwksp_alloc_aligned
|
45
49
|
} ZSTD_cwksp_alloc_phase_e;
|
46
50
|
|
51
|
+
/**
|
52
|
+
* Used to describe whether the workspace is statically allocated (and will not
|
53
|
+
* necessarily ever be freed), or if it's dynamically allocated and we can
|
54
|
+
* expect a well-formed caller to free this.
|
55
|
+
*/
|
56
|
+
typedef enum {
|
57
|
+
ZSTD_cwksp_dynamic_alloc,
|
58
|
+
ZSTD_cwksp_static_alloc
|
59
|
+
} ZSTD_cwksp_static_alloc_e;
|
60
|
+
|
47
61
|
/**
|
48
62
|
* Zstd fits all its internal datastructures into a single continuous buffer,
|
49
63
|
* so that it only needs to perform a single OS allocation (or so that a buffer
|
@@ -92,7 +106,7 @@ typedef enum {
|
|
92
106
|
*
|
93
107
|
* - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
|
94
108
|
* so that literally everything fits in a single buffer. Note: if present,
|
95
|
-
* this must be the first object in the workspace, since
|
109
|
+
* this must be the first object in the workspace, since ZSTD_customFree{CCtx,
|
96
110
|
* CDict}() rely on a pointer comparison to see whether one or two frees are
|
97
111
|
* required.
|
98
112
|
*
|
@@ -107,10 +121,11 @@ typedef enum {
|
|
107
121
|
* - Tables: these are any of several different datastructures (hash tables,
|
108
122
|
* chain tables, binary trees) that all respect a common format: they are
|
109
123
|
* uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
|
110
|
-
* Their sizes depend on the cparams.
|
124
|
+
* Their sizes depend on the cparams. These tables are 64-byte aligned.
|
111
125
|
*
|
112
126
|
* - Aligned: these buffers are used for various purposes that require 4 byte
|
113
|
-
* alignment, but don't require any initialization before they're used.
|
127
|
+
* alignment, but don't require any initialization before they're used. These
|
128
|
+
* buffers are each aligned to 64 bytes.
|
114
129
|
*
|
115
130
|
* - Buffers: these buffers are used for various purposes that don't require
|
116
131
|
* any alignment or initialization before they're used. This means they can
|
@@ -123,8 +138,7 @@ typedef enum {
|
|
123
138
|
*
|
124
139
|
* 1. Objects
|
125
140
|
* 2. Buffers
|
126
|
-
* 3. Aligned
|
127
|
-
* 4. Tables
|
141
|
+
* 3. Aligned/Tables
|
128
142
|
*
|
129
143
|
* Attempts to reserve objects of different types out of order will fail.
|
130
144
|
*/
|
@@ -137,9 +151,10 @@ typedef struct {
|
|
137
151
|
void* tableValidEnd;
|
138
152
|
void* allocStart;
|
139
153
|
|
140
|
-
|
154
|
+
BYTE allocFailed;
|
141
155
|
int workspaceOversizedDuration;
|
142
156
|
ZSTD_cwksp_alloc_phase_e phase;
|
157
|
+
ZSTD_cwksp_static_alloc_e isStatic;
|
143
158
|
} ZSTD_cwksp;
|
144
159
|
|
145
160
|
/*-*************************************
|
@@ -176,82 +191,166 @@ MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
|
|
176
191
|
* Since tables aren't currently redzoned, you don't need to call through this
|
177
192
|
* to figure out how much space you need for the matchState tables. Everything
|
178
193
|
* else is though.
|
194
|
+
*
|
195
|
+
* Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
|
179
196
|
*/
|
180
197
|
MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
|
181
|
-
|
198
|
+
if (size == 0)
|
199
|
+
return 0;
|
200
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
182
201
|
return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
183
202
|
#else
|
184
203
|
return size;
|
185
204
|
#endif
|
186
205
|
}
|
187
206
|
|
188
|
-
|
189
|
-
|
207
|
+
/**
|
208
|
+
* Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
|
209
|
+
* Used to determine the number of bytes required for a given "aligned".
|
210
|
+
*/
|
211
|
+
MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
|
212
|
+
return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));
|
213
|
+
}
|
214
|
+
|
215
|
+
/**
|
216
|
+
* Returns the amount of additional space the cwksp must allocate
|
217
|
+
* for internal purposes (currently only alignment).
|
218
|
+
*/
|
219
|
+
MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
|
220
|
+
/* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes
|
221
|
+
* to align the beginning of tables section, as well as another n_2=[0, 63] bytes
|
222
|
+
* to align the beginning of the aligned section.
|
223
|
+
*
|
224
|
+
* n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and
|
225
|
+
* aligneds being sized in multiples of 64 bytes.
|
226
|
+
*/
|
227
|
+
size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES;
|
228
|
+
return slackSpace;
|
229
|
+
}
|
230
|
+
|
231
|
+
|
232
|
+
/**
|
233
|
+
* Return the number of additional bytes required to align a pointer to the given number of bytes.
|
234
|
+
* alignBytes must be a power of two.
|
235
|
+
*/
|
236
|
+
MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
|
237
|
+
size_t const alignBytesMask = alignBytes - 1;
|
238
|
+
size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
|
239
|
+
assert((alignBytes & alignBytesMask) == 0);
|
240
|
+
assert(bytes != ZSTD_CWKSP_ALIGNMENT_BYTES);
|
241
|
+
return bytes;
|
242
|
+
}
|
243
|
+
|
244
|
+
/**
|
245
|
+
* Internal function. Do not use directly.
|
246
|
+
* Reserves the given number of bytes within the aligned/buffer segment of the wksp,
|
247
|
+
* which counts from the end of the wksp (as opposed to the object/table segment).
|
248
|
+
*
|
249
|
+
* Returns a pointer to the beginning of that space.
|
250
|
+
*/
|
251
|
+
MEM_STATIC void*
|
252
|
+
ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
|
253
|
+
{
|
254
|
+
void* const alloc = (BYTE*)ws->allocStart - bytes;
|
255
|
+
void* const bottom = ws->tableEnd;
|
256
|
+
DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
|
257
|
+
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
|
258
|
+
ZSTD_cwksp_assert_internal_consistency(ws);
|
259
|
+
assert(alloc >= bottom);
|
260
|
+
if (alloc < bottom) {
|
261
|
+
DEBUGLOG(4, "cwksp: alloc failed!");
|
262
|
+
ws->allocFailed = 1;
|
263
|
+
return NULL;
|
264
|
+
}
|
265
|
+
/* the area is reserved from the end of wksp.
|
266
|
+
* If it overlaps with tableValidEnd, it voids guarantees on values' range */
|
267
|
+
if (alloc < ws->tableValidEnd) {
|
268
|
+
ws->tableValidEnd = alloc;
|
269
|
+
}
|
270
|
+
ws->allocStart = alloc;
|
271
|
+
return alloc;
|
272
|
+
}
|
273
|
+
|
274
|
+
/**
|
275
|
+
* Moves the cwksp to the next phase, and does any necessary allocations.
|
276
|
+
* cwksp initialization must necessarily go through each phase in order.
|
277
|
+
* Returns a 0 on success, or zstd error
|
278
|
+
*/
|
279
|
+
MEM_STATIC size_t
|
280
|
+
ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)
|
281
|
+
{
|
190
282
|
assert(phase >= ws->phase);
|
191
283
|
if (phase > ws->phase) {
|
284
|
+
/* Going from allocating objects to allocating buffers */
|
192
285
|
if (ws->phase < ZSTD_cwksp_alloc_buffers &&
|
193
286
|
phase >= ZSTD_cwksp_alloc_buffers) {
|
194
287
|
ws->tableValidEnd = ws->objectEnd;
|
195
288
|
}
|
289
|
+
|
290
|
+
/* Going from allocating buffers to allocating aligneds/tables */
|
196
291
|
if (ws->phase < ZSTD_cwksp_alloc_aligned &&
|
197
292
|
phase >= ZSTD_cwksp_alloc_aligned) {
|
198
|
-
/*
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
|
206
|
-
if (ws->allocStart < ws->tableValidEnd) {
|
207
|
-
ws->tableValidEnd = ws->allocStart;
|
293
|
+
{ /* Align the start of the "aligned" to 64 bytes. Use [1, 64] bytes. */
|
294
|
+
size_t const bytesToAlign =
|
295
|
+
ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BYTES);
|
296
|
+
DEBUGLOG(5, "reserving aligned alignment addtl space: %zu", bytesToAlign);
|
297
|
+
ZSTD_STATIC_ASSERT((ZSTD_CWKSP_ALIGNMENT_BYTES & (ZSTD_CWKSP_ALIGNMENT_BYTES - 1)) == 0); /* power of 2 */
|
298
|
+
RETURN_ERROR_IF(!ZSTD_cwksp_reserve_internal_buffer_space(ws, bytesToAlign),
|
299
|
+
memory_allocation, "aligned phase - alignment initial allocation failed!");
|
208
300
|
}
|
209
|
-
|
301
|
+
{ /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
|
302
|
+
void* const alloc = ws->objectEnd;
|
303
|
+
size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
|
304
|
+
void* const objectEnd = (BYTE*)alloc + bytesToAlign;
|
305
|
+
DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
|
306
|
+
RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,
|
307
|
+
"table phase - alignment initial allocation failed!");
|
308
|
+
ws->objectEnd = objectEnd;
|
309
|
+
ws->tableEnd = objectEnd; /* table area starts being empty */
|
310
|
+
if (ws->tableValidEnd < ws->tableEnd) {
|
311
|
+
ws->tableValidEnd = ws->tableEnd;
|
312
|
+
} } }
|
210
313
|
ws->phase = phase;
|
314
|
+
ZSTD_cwksp_assert_internal_consistency(ws);
|
211
315
|
}
|
316
|
+
return 0;
|
212
317
|
}
|
213
318
|
|
214
319
|
/**
|
215
320
|
* Returns whether this object/buffer/etc was allocated in this workspace.
|
216
321
|
*/
|
217
|
-
MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
|
322
|
+
MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
|
323
|
+
{
|
218
324
|
return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
|
219
325
|
}
|
220
326
|
|
221
327
|
/**
|
222
328
|
* Internal function. Do not use directly.
|
223
329
|
*/
|
224
|
-
MEM_STATIC void*
|
225
|
-
|
330
|
+
MEM_STATIC void*
|
331
|
+
ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)
|
332
|
+
{
|
226
333
|
void* alloc;
|
227
|
-
|
228
|
-
|
229
|
-
|
334
|
+
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
|
335
|
+
return NULL;
|
336
|
+
}
|
230
337
|
|
231
|
-
#if
|
338
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
232
339
|
/* over-reserve space */
|
233
|
-
|
340
|
+
bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
234
341
|
#endif
|
235
342
|
|
236
|
-
|
237
|
-
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
|
238
|
-
ZSTD_cwksp_assert_internal_consistency(ws);
|
239
|
-
assert(alloc >= bottom);
|
240
|
-
if (alloc < bottom) {
|
241
|
-
DEBUGLOG(4, "cwksp: alloc failed!");
|
242
|
-
ws->allocFailed = 1;
|
243
|
-
return NULL;
|
244
|
-
}
|
245
|
-
if (alloc < ws->tableValidEnd) {
|
246
|
-
ws->tableValidEnd = alloc;
|
247
|
-
}
|
248
|
-
ws->allocStart = alloc;
|
343
|
+
alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
|
249
344
|
|
250
|
-
#if
|
345
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
251
346
|
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
|
252
347
|
* either size. */
|
253
|
-
|
254
|
-
|
348
|
+
if (alloc) {
|
349
|
+
alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
350
|
+
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
351
|
+
__asan_unpoison_memory_region(alloc, bytes);
|
352
|
+
}
|
353
|
+
}
|
255
354
|
#endif
|
256
355
|
|
257
356
|
return alloc;
|
@@ -260,33 +359,44 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal(
|
|
260
359
|
/**
|
261
360
|
* Reserves and returns unaligned memory.
|
262
361
|
*/
|
263
|
-
MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
|
362
|
+
MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
|
363
|
+
{
|
264
364
|
return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
|
265
365
|
}
|
266
366
|
|
267
367
|
/**
|
268
|
-
* Reserves and returns memory sized on and aligned on
|
368
|
+
* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
|
269
369
|
*/
|
270
|
-
MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
|
271
|
-
|
272
|
-
|
370
|
+
MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
|
371
|
+
{
|
372
|
+
void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
|
373
|
+
ZSTD_cwksp_alloc_aligned);
|
374
|
+
assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
|
375
|
+
return ptr;
|
273
376
|
}
|
274
377
|
|
275
378
|
/**
|
276
|
-
* Aligned on
|
379
|
+
* Aligned on 64 bytes. These buffers have the special property that
|
277
380
|
* their values remain constrained, allowing us to re-use them without
|
278
381
|
* memset()-ing them.
|
279
382
|
*/
|
280
|
-
MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
|
383
|
+
MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
|
384
|
+
{
|
281
385
|
const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
|
282
|
-
void* alloc
|
283
|
-
void* end
|
284
|
-
void* top
|
386
|
+
void* alloc;
|
387
|
+
void* end;
|
388
|
+
void* top;
|
389
|
+
|
390
|
+
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
|
391
|
+
return NULL;
|
392
|
+
}
|
393
|
+
alloc = ws->tableEnd;
|
394
|
+
end = (BYTE *)alloc + bytes;
|
395
|
+
top = ws->allocStart;
|
285
396
|
|
286
397
|
DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
|
287
398
|
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
|
288
399
|
assert((bytes & (sizeof(U32)-1)) == 0);
|
289
|
-
ZSTD_cwksp_internal_advance_phase(ws, phase);
|
290
400
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
291
401
|
assert(end <= top);
|
292
402
|
if (end > top) {
|
@@ -296,35 +406,41 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
|
|
296
406
|
}
|
297
407
|
ws->tableEnd = end;
|
298
408
|
|
299
|
-
#if
|
300
|
-
|
409
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
410
|
+
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
411
|
+
__asan_unpoison_memory_region(alloc, bytes);
|
412
|
+
}
|
301
413
|
#endif
|
302
414
|
|
415
|
+
assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
|
416
|
+
assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
|
303
417
|
return alloc;
|
304
418
|
}
|
305
419
|
|
306
420
|
/**
|
307
421
|
* Aligned on sizeof(void*).
|
422
|
+
* Note : should happen only once, at workspace first initialization
|
308
423
|
*/
|
309
|
-
MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
|
310
|
-
|
424
|
+
MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
|
425
|
+
{
|
426
|
+
size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
|
311
427
|
void* alloc = ws->objectEnd;
|
312
428
|
void* end = (BYTE*)alloc + roundedBytes;
|
313
429
|
|
314
|
-
#if
|
430
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
315
431
|
/* over-reserve space */
|
316
432
|
end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
317
433
|
#endif
|
318
434
|
|
319
|
-
DEBUGLOG(
|
435
|
+
DEBUGLOG(4,
|
320
436
|
"cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
|
321
437
|
alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
|
322
|
-
assert((
|
323
|
-
assert(
|
438
|
+
assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
|
439
|
+
assert(bytes % ZSTD_ALIGNOF(void*) == 0);
|
324
440
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
325
441
|
/* we must be in the first phase, no advance is possible */
|
326
442
|
if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
|
327
|
-
DEBUGLOG(
|
443
|
+
DEBUGLOG(3, "cwksp: object alloc failed!");
|
328
444
|
ws->allocFailed = 1;
|
329
445
|
return NULL;
|
330
446
|
}
|
@@ -332,20 +448,23 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
|
|
332
448
|
ws->tableEnd = end;
|
333
449
|
ws->tableValidEnd = end;
|
334
450
|
|
335
|
-
#if
|
451
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
336
452
|
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
|
337
453
|
* either size. */
|
338
|
-
alloc = (BYTE
|
339
|
-
|
454
|
+
alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
455
|
+
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
456
|
+
__asan_unpoison_memory_region(alloc, bytes);
|
457
|
+
}
|
340
458
|
#endif
|
341
459
|
|
342
460
|
return alloc;
|
343
461
|
}
|
344
462
|
|
345
|
-
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
|
463
|
+
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
|
464
|
+
{
|
346
465
|
DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
|
347
466
|
|
348
|
-
#if
|
467
|
+
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
|
349
468
|
/* To validate that the table re-use logic is sound, and that we don't
|
350
469
|
* access table space that we haven't cleaned, we re-"poison" the table
|
351
470
|
* space every time we mark it dirty. */
|
@@ -380,7 +499,7 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
|
|
380
499
|
assert(ws->tableValidEnd >= ws->objectEnd);
|
381
500
|
assert(ws->tableValidEnd <= ws->allocStart);
|
382
501
|
if (ws->tableValidEnd < ws->tableEnd) {
|
383
|
-
|
502
|
+
ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
|
384
503
|
}
|
385
504
|
ZSTD_cwksp_mark_tables_clean(ws);
|
386
505
|
}
|
@@ -392,8 +511,12 @@ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
|
|
392
511
|
MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
|
393
512
|
DEBUGLOG(4, "cwksp: clearing tables!");
|
394
513
|
|
395
|
-
#if
|
396
|
-
|
514
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
515
|
+
/* We don't do this when the workspace is statically allocated, because
|
516
|
+
* when that is the case, we have no capability to hook into the end of the
|
517
|
+
* workspace's lifecycle to unpoison the memory.
|
518
|
+
*/
|
519
|
+
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
397
520
|
size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
|
398
521
|
__asan_poison_memory_region(ws->objectEnd, size);
|
399
522
|
}
|
@@ -410,7 +533,7 @@ MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
|
|
410
533
|
MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
|
411
534
|
DEBUGLOG(4, "cwksp: clearing!");
|
412
535
|
|
413
|
-
#if
|
536
|
+
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
|
414
537
|
/* To validate that the context re-use logic is sound, and that we don't
|
415
538
|
* access stuff that this compression hasn't initialized, we re-"poison"
|
416
539
|
* the workspace (or at least the non-static, non-table parts of it)
|
@@ -421,8 +544,12 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
|
|
421
544
|
}
|
422
545
|
#endif
|
423
546
|
|
424
|
-
#if
|
425
|
-
|
547
|
+
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
548
|
+
/* We don't do this when the workspace is statically allocated, because
|
549
|
+
* when that is the case, we have no capability to hook into the end of the
|
550
|
+
* workspace's lifecycle to unpoison the memory.
|
551
|
+
*/
|
552
|
+
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
426
553
|
size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
|
427
554
|
__asan_poison_memory_region(ws->objectEnd, size);
|
428
555
|
}
|
@@ -442,7 +569,7 @@ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
|
|
442
569
|
* Any existing values in the workspace are ignored (the previously managed
|
443
570
|
* buffer, if present, must be separately freed).
|
444
571
|
*/
|
445
|
-
MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
|
572
|
+
MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
|
446
573
|
DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
|
447
574
|
assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
|
448
575
|
ws->workspace = start;
|
@@ -450,39 +577,45 @@ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size) {
|
|
450
577
|
ws->objectEnd = ws->workspace;
|
451
578
|
ws->tableValidEnd = ws->objectEnd;
|
452
579
|
ws->phase = ZSTD_cwksp_alloc_objects;
|
580
|
+
ws->isStatic = isStatic;
|
453
581
|
ZSTD_cwksp_clear(ws);
|
454
582
|
ws->workspaceOversizedDuration = 0;
|
455
583
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
456
584
|
}
|
457
585
|
|
458
586
|
MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
|
459
|
-
void* workspace =
|
587
|
+
void* workspace = ZSTD_customMalloc(size, customMem);
|
460
588
|
DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
|
461
589
|
RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
|
462
|
-
ZSTD_cwksp_init(ws, workspace, size);
|
590
|
+
ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
|
463
591
|
return 0;
|
464
592
|
}
|
465
593
|
|
466
594
|
MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
|
467
595
|
void *ptr = ws->workspace;
|
468
596
|
DEBUGLOG(4, "cwksp: freeing workspace");
|
469
|
-
|
470
|
-
|
597
|
+
ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
|
598
|
+
ZSTD_customFree(ptr, customMem);
|
471
599
|
}
|
472
600
|
|
473
601
|
/**
|
474
602
|
* Moves the management of a workspace from one cwksp to another. The src cwksp
|
475
|
-
* is left in an invalid state (src must be re-init()'ed before
|
603
|
+
* is left in an invalid state (src must be re-init()'ed before it's used again).
|
476
604
|
*/
|
477
605
|
MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
|
478
606
|
*dst = *src;
|
479
|
-
|
607
|
+
ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
|
480
608
|
}
|
481
609
|
|
482
610
|
MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
|
483
611
|
return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
|
484
612
|
}
|
485
613
|
|
614
|
+
MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
|
615
|
+
return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
|
616
|
+
+ (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
|
617
|
+
}
|
618
|
+
|
486
619
|
MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
|
487
620
|
return ws->allocFailed;
|
488
621
|
}
|
@@ -491,6 +624,24 @@ MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
|
|
491
624
|
* Functions Checking Free Space
|
492
625
|
***************************************/
|
493
626
|
|
627
|
+
/* ZSTD_alignmentSpaceWithinBounds() :
|
628
|
+
* Returns if the estimated space needed for a wksp is within an acceptable limit of the
|
629
|
+
* actual amount of space used.
|
630
|
+
*/
|
631
|
+
MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp* const ws,
|
632
|
+
size_t const estimatedSpace, int resizedWorkspace) {
|
633
|
+
if (resizedWorkspace) {
|
634
|
+
/* Resized/newly allocated wksp should have exact bounds */
|
635
|
+
return ZSTD_cwksp_used(ws) == estimatedSpace;
|
636
|
+
} else {
|
637
|
+
/* Due to alignment, when reusing a workspace, we can actually consume 63 fewer or more bytes
|
638
|
+
* than estimatedSpace. See the comments in zstd_cwksp.h for details.
|
639
|
+
*/
|
640
|
+
return (ZSTD_cwksp_used(ws) >= estimatedSpace - 63) && (ZSTD_cwksp_used(ws) <= estimatedSpace + 63);
|
641
|
+
}
|
642
|
+
}
|
643
|
+
|
644
|
+
|
494
645
|
MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
|
495
646
|
return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
|
496
647
|
}
|