extzstd 0.1 → 0.3.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (134) hide show
  1. checksums.yaml +5 -5
  2. data/HISTORY.ja.md +39 -0
  3. data/README.md +38 -56
  4. data/contrib/zstd/CHANGELOG +613 -0
  5. data/contrib/zstd/CODE_OF_CONDUCT.md +5 -0
  6. data/contrib/zstd/CONTRIBUTING.md +406 -0
  7. data/contrib/zstd/COPYING +339 -0
  8. data/contrib/zstd/Makefile +420 -0
  9. data/contrib/zstd/README.md +179 -41
  10. data/contrib/zstd/TESTING.md +44 -0
  11. data/contrib/zstd/appveyor.yml +292 -0
  12. data/contrib/zstd/lib/BUCK +234 -0
  13. data/contrib/zstd/lib/Makefile +451 -0
  14. data/contrib/zstd/lib/README.md +207 -0
  15. data/contrib/zstd/{common → lib/common}/bitstream.h +187 -138
  16. data/contrib/zstd/lib/common/compiler.h +288 -0
  17. data/contrib/zstd/lib/common/cpu.h +213 -0
  18. data/contrib/zstd/lib/common/debug.c +24 -0
  19. data/contrib/zstd/lib/common/debug.h +107 -0
  20. data/contrib/zstd/lib/common/entropy_common.c +362 -0
  21. data/contrib/zstd/{common → lib/common}/error_private.c +25 -12
  22. data/contrib/zstd/{common → lib/common}/error_private.h +14 -10
  23. data/contrib/zstd/{common → lib/common}/fse.h +173 -92
  24. data/contrib/zstd/{common → lib/common}/fse_decompress.c +149 -85
  25. data/contrib/zstd/lib/common/huf.h +361 -0
  26. data/contrib/zstd/{common → lib/common}/mem.h +115 -59
  27. data/contrib/zstd/lib/common/pool.c +350 -0
  28. data/contrib/zstd/lib/common/pool.h +84 -0
  29. data/contrib/zstd/lib/common/threading.c +122 -0
  30. data/contrib/zstd/lib/common/threading.h +155 -0
  31. data/contrib/zstd/{common → lib/common}/xxhash.c +55 -96
  32. data/contrib/zstd/{common → lib/common}/xxhash.h +23 -47
  33. data/contrib/zstd/lib/common/zstd_common.c +83 -0
  34. data/contrib/zstd/lib/common/zstd_deps.h +111 -0
  35. data/contrib/zstd/lib/common/zstd_errors.h +95 -0
  36. data/contrib/zstd/lib/common/zstd_internal.h +478 -0
  37. data/contrib/zstd/{compress → lib/compress}/fse_compress.c +214 -319
  38. data/contrib/zstd/lib/compress/hist.c +181 -0
  39. data/contrib/zstd/lib/compress/hist.h +75 -0
  40. data/contrib/zstd/lib/compress/huf_compress.c +913 -0
  41. data/contrib/zstd/lib/compress/zstd_compress.c +5208 -0
  42. data/contrib/zstd/lib/compress/zstd_compress_internal.h +1203 -0
  43. data/contrib/zstd/lib/compress/zstd_compress_literals.c +158 -0
  44. data/contrib/zstd/lib/compress/zstd_compress_literals.h +29 -0
  45. data/contrib/zstd/lib/compress/zstd_compress_sequences.c +433 -0
  46. data/contrib/zstd/lib/compress/zstd_compress_sequences.h +54 -0
  47. data/contrib/zstd/lib/compress/zstd_compress_superblock.c +849 -0
  48. data/contrib/zstd/lib/compress/zstd_compress_superblock.h +32 -0
  49. data/contrib/zstd/lib/compress/zstd_cwksp.h +561 -0
  50. data/contrib/zstd/lib/compress/zstd_double_fast.c +521 -0
  51. data/contrib/zstd/lib/compress/zstd_double_fast.h +38 -0
  52. data/contrib/zstd/lib/compress/zstd_fast.c +496 -0
  53. data/contrib/zstd/lib/compress/zstd_fast.h +37 -0
  54. data/contrib/zstd/lib/compress/zstd_lazy.c +1412 -0
  55. data/contrib/zstd/lib/compress/zstd_lazy.h +87 -0
  56. data/contrib/zstd/lib/compress/zstd_ldm.c +660 -0
  57. data/contrib/zstd/lib/compress/zstd_ldm.h +116 -0
  58. data/contrib/zstd/lib/compress/zstd_opt.c +1345 -0
  59. data/contrib/zstd/lib/compress/zstd_opt.h +56 -0
  60. data/contrib/zstd/lib/compress/zstdmt_compress.c +1811 -0
  61. data/contrib/zstd/lib/compress/zstdmt_compress.h +110 -0
  62. data/contrib/zstd/lib/decompress/huf_decompress.c +1350 -0
  63. data/contrib/zstd/lib/decompress/zstd_ddict.c +244 -0
  64. data/contrib/zstd/lib/decompress/zstd_ddict.h +44 -0
  65. data/contrib/zstd/lib/decompress/zstd_decompress.c +1930 -0
  66. data/contrib/zstd/lib/decompress/zstd_decompress_block.c +1540 -0
  67. data/contrib/zstd/lib/decompress/zstd_decompress_block.h +62 -0
  68. data/contrib/zstd/lib/decompress/zstd_decompress_internal.h +190 -0
  69. data/contrib/zstd/{common → lib/deprecated}/zbuff.h +68 -45
  70. data/contrib/zstd/lib/deprecated/zbuff_common.c +26 -0
  71. data/contrib/zstd/lib/deprecated/zbuff_compress.c +147 -0
  72. data/contrib/zstd/lib/deprecated/zbuff_decompress.c +75 -0
  73. data/contrib/zstd/lib/dictBuilder/cover.c +1245 -0
  74. data/contrib/zstd/lib/dictBuilder/cover.h +157 -0
  75. data/contrib/zstd/{dictBuilder → lib/dictBuilder}/divsufsort.c +3 -3
  76. data/contrib/zstd/{dictBuilder → lib/dictBuilder}/divsufsort.h +0 -0
  77. data/contrib/zstd/lib/dictBuilder/fastcover.c +758 -0
  78. data/contrib/zstd/{dictBuilder → lib/dictBuilder}/zdict.c +318 -194
  79. data/contrib/zstd/lib/dictBuilder/zdict.h +305 -0
  80. data/contrib/zstd/{legacy → lib/legacy}/zstd_legacy.h +171 -15
  81. data/contrib/zstd/{legacy → lib/legacy}/zstd_v01.c +191 -124
  82. data/contrib/zstd/{legacy → lib/legacy}/zstd_v01.h +19 -5
  83. data/contrib/zstd/{legacy → lib/legacy}/zstd_v02.c +125 -125
  84. data/contrib/zstd/{legacy → lib/legacy}/zstd_v02.h +19 -5
  85. data/contrib/zstd/{legacy → lib/legacy}/zstd_v03.c +125 -124
  86. data/contrib/zstd/{legacy → lib/legacy}/zstd_v03.h +20 -6
  87. data/contrib/zstd/{legacy → lib/legacy}/zstd_v04.c +151 -299
  88. data/contrib/zstd/{legacy → lib/legacy}/zstd_v04.h +19 -5
  89. data/contrib/zstd/{legacy → lib/legacy}/zstd_v05.c +237 -243
  90. data/contrib/zstd/{legacy → lib/legacy}/zstd_v05.h +19 -6
  91. data/contrib/zstd/{legacy → lib/legacy}/zstd_v06.c +130 -143
  92. data/contrib/zstd/{legacy → lib/legacy}/zstd_v06.h +18 -5
  93. data/contrib/zstd/{legacy → lib/legacy}/zstd_v07.c +158 -157
  94. data/contrib/zstd/{legacy → lib/legacy}/zstd_v07.h +19 -5
  95. data/contrib/zstd/lib/libzstd.pc.in +15 -0
  96. data/contrib/zstd/lib/zstd.h +2391 -0
  97. data/ext/depend +2 -0
  98. data/ext/extconf.rb +15 -6
  99. data/ext/extzstd.c +76 -145
  100. data/ext/extzstd.h +80 -31
  101. data/ext/extzstd_stream.c +417 -142
  102. data/ext/libzstd_conf.h +8 -0
  103. data/ext/zstd_common.c +10 -7
  104. data/ext/zstd_compress.c +14 -5
  105. data/ext/zstd_decompress.c +5 -4
  106. data/ext/zstd_dictbuilder.c +9 -4
  107. data/ext/zstd_dictbuilder_fastcover.c +3 -0
  108. data/ext/zstd_legacy_v01.c +3 -1
  109. data/ext/zstd_legacy_v02.c +3 -1
  110. data/ext/zstd_legacy_v03.c +3 -1
  111. data/ext/zstd_legacy_v04.c +3 -1
  112. data/ext/zstd_legacy_v05.c +3 -1
  113. data/ext/zstd_legacy_v06.c +3 -1
  114. data/ext/zstd_legacy_v07.c +3 -1
  115. data/gemstub.rb +10 -24
  116. data/lib/extzstd.rb +64 -179
  117. data/lib/extzstd/version.rb +6 -1
  118. data/test/test_basic.rb +9 -6
  119. metadata +113 -57
  120. data/HISTORY.ja +0 -5
  121. data/contrib/zstd/common/entropy_common.c +0 -225
  122. data/contrib/zstd/common/huf.h +0 -228
  123. data/contrib/zstd/common/zstd_common.c +0 -83
  124. data/contrib/zstd/common/zstd_errors.h +0 -60
  125. data/contrib/zstd/common/zstd_internal.h +0 -267
  126. data/contrib/zstd/compress/huf_compress.c +0 -533
  127. data/contrib/zstd/compress/zbuff_compress.c +0 -319
  128. data/contrib/zstd/compress/zstd_compress.c +0 -3264
  129. data/contrib/zstd/compress/zstd_opt.h +0 -900
  130. data/contrib/zstd/decompress/huf_decompress.c +0 -883
  131. data/contrib/zstd/decompress/zbuff_decompress.c +0 -252
  132. data/contrib/zstd/decompress/zstd_decompress.c +0 -1842
  133. data/contrib/zstd/dictBuilder/zdict.h +0 -111
  134. data/contrib/zstd/zstd.h +0 -640
@@ -0,0 +1,32 @@
1
+ /*
2
+ * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under both the BSD-style license (found in the
6
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7
+ * in the COPYING file in the root directory of this source tree).
8
+ * You may select, at your option, one of the above-listed licenses.
9
+ */
10
+
11
+ #ifndef ZSTD_COMPRESS_ADVANCED_H
12
+ #define ZSTD_COMPRESS_ADVANCED_H
13
+
14
+ /*-*************************************
15
+ * Dependencies
16
+ ***************************************/
17
+
18
+ #include "../zstd.h" /* ZSTD_CCtx */
19
+
20
+ /*-*************************************
21
+ * Target Compressed Block Size
22
+ ***************************************/
23
+
24
+ /* ZSTD_compressSuperBlock() :
25
+ * Used to compress a super block when targetCBlockSize is being used.
26
+ * The given block will be compressed into multiple sub blocks that are around targetCBlockSize. */
27
+ size_t ZSTD_compressSuperBlock(ZSTD_CCtx* zc,
28
+ void* dst, size_t dstCapacity,
29
+ void const* src, size_t srcSize,
30
+ unsigned lastBlock);
31
+
32
+ #endif /* ZSTD_COMPRESS_ADVANCED_H */
@@ -0,0 +1,561 @@
1
+ /*
2
+ * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under both the BSD-style license (found in the
6
+ * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7
+ * in the COPYING file in the root directory of this source tree).
8
+ * You may select, at your option, one of the above-listed licenses.
9
+ */
10
+
11
+ #ifndef ZSTD_CWKSP_H
12
+ #define ZSTD_CWKSP_H
13
+
14
+ /*-*************************************
15
+ * Dependencies
16
+ ***************************************/
17
+ #include "../common/zstd_internal.h"
18
+
19
+ #if defined (__cplusplus)
20
+ extern "C" {
21
+ #endif
22
+
23
+ /*-*************************************
24
+ * Constants
25
+ ***************************************/
26
+
27
+ /* Since the workspace is effectively its own little malloc implementation /
28
+ * arena, when we run under ASAN, we should similarly insert redzones between
29
+ * each internal element of the workspace, so ASAN will catch overruns that
30
+ * reach outside an object but that stay inside the workspace.
31
+ *
32
+ * This defines the size of that redzone.
33
+ */
34
+ #ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
35
+ #define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
36
+ #endif
37
+
38
+ /*-*************************************
39
+ * Structures
40
+ ***************************************/
41
+ typedef enum {
42
+ ZSTD_cwksp_alloc_objects,
43
+ ZSTD_cwksp_alloc_buffers,
44
+ ZSTD_cwksp_alloc_aligned
45
+ } ZSTD_cwksp_alloc_phase_e;
46
+
47
+ /**
48
+ * Used to describe whether the workspace is statically allocated (and will not
49
+ * necessarily ever be freed), or if it's dynamically allocated and we can
50
+ * expect a well-formed caller to free this.
51
+ */
52
+ typedef enum {
53
+ ZSTD_cwksp_dynamic_alloc,
54
+ ZSTD_cwksp_static_alloc
55
+ } ZSTD_cwksp_static_alloc_e;
56
+
57
+ /**
58
+ * Zstd fits all its internal datastructures into a single continuous buffer,
59
+ * so that it only needs to perform a single OS allocation (or so that a buffer
60
+ * can be provided to it and it can perform no allocations at all). This buffer
61
+ * is called the workspace.
62
+ *
63
+ * Several optimizations complicate that process of allocating memory ranges
64
+ * from this workspace for each internal datastructure:
65
+ *
66
+ * - These different internal datastructures have different setup requirements:
67
+ *
68
+ * - The static objects need to be cleared once and can then be trivially
69
+ * reused for each compression.
70
+ *
71
+ * - Various buffers don't need to be initialized at all--they are always
72
+ * written into before they're read.
73
+ *
74
+ * - The matchstate tables have a unique requirement that they don't need
75
+ * their memory to be totally cleared, but they do need the memory to have
76
+ * some bound, i.e., a guarantee that all values in the memory they've been
77
+ * allocated is less than some maximum value (which is the starting value
78
+ * for the indices that they will then use for compression). When this
79
+ * guarantee is provided to them, they can use the memory without any setup
80
+ * work. When it can't, they have to clear the area.
81
+ *
82
+ * - These buffers also have different alignment requirements.
83
+ *
84
+ * - We would like to reuse the objects in the workspace for multiple
85
+ * compressions without having to perform any expensive reallocation or
86
+ * reinitialization work.
87
+ *
88
+ * - We would like to be able to efficiently reuse the workspace across
89
+ * multiple compressions **even when the compression parameters change** and
90
+ * we need to resize some of the objects (where possible).
91
+ *
92
+ * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
93
+ * abstraction was created. It works as follows:
94
+ *
95
+ * Workspace Layout:
96
+ *
97
+ * [ ... workspace ... ]
98
+ * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
99
+ *
100
+ * The various objects that live in the workspace are divided into the
101
+ * following categories, and are allocated separately:
102
+ *
103
+ * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
104
+ * so that literally everything fits in a single buffer. Note: if present,
105
+ * this must be the first object in the workspace, since ZSTD_customFree{CCtx,
106
+ * CDict}() rely on a pointer comparison to see whether one or two frees are
107
+ * required.
108
+ *
109
+ * - Fixed size objects: these are fixed-size, fixed-count objects that are
110
+ * nonetheless "dynamically" allocated in the workspace so that we can
111
+ * control how they're initialized separately from the broader ZSTD_CCtx.
112
+ * Examples:
113
+ * - Entropy Workspace
114
+ * - 2 x ZSTD_compressedBlockState_t
115
+ * - CDict dictionary contents
116
+ *
117
+ * - Tables: these are any of several different datastructures (hash tables,
118
+ * chain tables, binary trees) that all respect a common format: they are
119
+ * uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
120
+ * Their sizes depend on the cparams.
121
+ *
122
+ * - Aligned: these buffers are used for various purposes that require 4 byte
123
+ * alignment, but don't require any initialization before they're used.
124
+ *
125
+ * - Buffers: these buffers are used for various purposes that don't require
126
+ * any alignment or initialization before they're used. This means they can
127
+ * be moved around at no cost for a new compression.
128
+ *
129
+ * Allocating Memory:
130
+ *
131
+ * The various types of objects must be allocated in order, so they can be
132
+ * correctly packed into the workspace buffer. That order is:
133
+ *
134
+ * 1. Objects
135
+ * 2. Buffers
136
+ * 3. Aligned
137
+ * 4. Tables
138
+ *
139
+ * Attempts to reserve objects of different types out of order will fail.
140
+ */
141
+ typedef struct {
142
+ void* workspace;
143
+ void* workspaceEnd;
144
+
145
+ void* objectEnd;
146
+ void* tableEnd;
147
+ void* tableValidEnd;
148
+ void* allocStart;
149
+
150
+ BYTE allocFailed;
151
+ int workspaceOversizedDuration;
152
+ ZSTD_cwksp_alloc_phase_e phase;
153
+ ZSTD_cwksp_static_alloc_e isStatic;
154
+ } ZSTD_cwksp;
155
+
156
+ /*-*************************************
157
+ * Functions
158
+ ***************************************/
159
+
160
+ MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
161
+
162
+ MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
163
+ (void)ws;
164
+ assert(ws->workspace <= ws->objectEnd);
165
+ assert(ws->objectEnd <= ws->tableEnd);
166
+ assert(ws->objectEnd <= ws->tableValidEnd);
167
+ assert(ws->tableEnd <= ws->allocStart);
168
+ assert(ws->tableValidEnd <= ws->allocStart);
169
+ assert(ws->allocStart <= ws->workspaceEnd);
170
+ }
171
+
172
+ /**
173
+ * Align must be a power of 2.
174
+ */
175
+ MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
176
+ size_t const mask = align - 1;
177
+ assert((align & mask) == 0);
178
+ return (size + mask) & ~mask;
179
+ }
180
+
181
+ /**
182
+ * Use this to determine how much space in the workspace we will consume to
183
+ * allocate this object. (Normally it should be exactly the size of the object,
184
+ * but under special conditions, like ASAN, where we pad each object, it might
185
+ * be larger.)
186
+ *
187
+ * Since tables aren't currently redzoned, you don't need to call through this
188
+ * to figure out how much space you need for the matchState tables. Everything
189
+ * else is though.
190
+ */
191
+ MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
192
+ if (size == 0)
193
+ return 0;
194
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
195
+ return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
196
+ #else
197
+ return size;
198
+ #endif
199
+ }
200
+
201
+ MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
202
+ ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
203
+ assert(phase >= ws->phase);
204
+ if (phase > ws->phase) {
205
+ if (ws->phase < ZSTD_cwksp_alloc_buffers &&
206
+ phase >= ZSTD_cwksp_alloc_buffers) {
207
+ ws->tableValidEnd = ws->objectEnd;
208
+ }
209
+ if (ws->phase < ZSTD_cwksp_alloc_aligned &&
210
+ phase >= ZSTD_cwksp_alloc_aligned) {
211
+ /* If unaligned allocations down from a too-large top have left us
212
+ * unaligned, we need to realign our alloc ptr. Technically, this
213
+ * can consume space that is unaccounted for in the neededSpace
214
+ * calculation. However, I believe this can only happen when the
215
+ * workspace is too large, and specifically when it is too large
216
+ * by a larger margin than the space that will be consumed. */
217
+ /* TODO: cleaner, compiler warning friendly way to do this??? */
218
+ ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
219
+ if (ws->allocStart < ws->tableValidEnd) {
220
+ ws->tableValidEnd = ws->allocStart;
221
+ }
222
+ }
223
+ ws->phase = phase;
224
+ }
225
+ }
226
+
227
+ /**
228
+ * Returns whether this object/buffer/etc was allocated in this workspace.
229
+ */
230
+ MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
231
+ return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
232
+ }
233
+
234
+ /**
235
+ * Internal function. Do not use directly.
236
+ */
237
+ MEM_STATIC void* ZSTD_cwksp_reserve_internal(
238
+ ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
239
+ void* alloc;
240
+ void* bottom = ws->tableEnd;
241
+ ZSTD_cwksp_internal_advance_phase(ws, phase);
242
+ alloc = (BYTE *)ws->allocStart - bytes;
243
+
244
+ if (bytes == 0)
245
+ return NULL;
246
+
247
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
248
+ /* over-reserve space */
249
+ alloc = (BYTE *)alloc - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
250
+ #endif
251
+
252
+ DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
253
+ alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
254
+ ZSTD_cwksp_assert_internal_consistency(ws);
255
+ assert(alloc >= bottom);
256
+ if (alloc < bottom) {
257
+ DEBUGLOG(4, "cwksp: alloc failed!");
258
+ ws->allocFailed = 1;
259
+ return NULL;
260
+ }
261
+ if (alloc < ws->tableValidEnd) {
262
+ ws->tableValidEnd = alloc;
263
+ }
264
+ ws->allocStart = alloc;
265
+
266
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
267
+ /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
268
+ * either size. */
269
+ alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
270
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
271
+ __asan_unpoison_memory_region(alloc, bytes);
272
+ }
273
+ #endif
274
+
275
+ return alloc;
276
+ }
277
+
278
+ /**
279
+ * Reserves and returns unaligned memory.
280
+ */
281
+ MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
282
+ return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
283
+ }
284
+
285
+ /**
286
+ * Reserves and returns memory sized on and aligned on sizeof(unsigned).
287
+ */
288
+ MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
289
+ assert((bytes & (sizeof(U32)-1)) == 0);
290
+ return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
291
+ }
292
+
293
+ /**
294
+ * Aligned on sizeof(unsigned). These buffers have the special property that
295
+ * their values remain constrained, allowing us to re-use them without
296
+ * memset()-ing them.
297
+ */
298
+ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
299
+ const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
300
+ void* alloc = ws->tableEnd;
301
+ void* end = (BYTE *)alloc + bytes;
302
+ void* top = ws->allocStart;
303
+
304
+ DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
305
+ alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
306
+ assert((bytes & (sizeof(U32)-1)) == 0);
307
+ ZSTD_cwksp_internal_advance_phase(ws, phase);
308
+ ZSTD_cwksp_assert_internal_consistency(ws);
309
+ assert(end <= top);
310
+ if (end > top) {
311
+ DEBUGLOG(4, "cwksp: table alloc failed!");
312
+ ws->allocFailed = 1;
313
+ return NULL;
314
+ }
315
+ ws->tableEnd = end;
316
+
317
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
318
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
319
+ __asan_unpoison_memory_region(alloc, bytes);
320
+ }
321
+ #endif
322
+
323
+ return alloc;
324
+ }
325
+
326
+ /**
327
+ * Aligned on sizeof(void*).
328
+ */
329
+ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
330
+ size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
331
+ void* alloc = ws->objectEnd;
332
+ void* end = (BYTE*)alloc + roundedBytes;
333
+
334
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
335
+ /* over-reserve space */
336
+ end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
337
+ #endif
338
+
339
+ DEBUGLOG(5,
340
+ "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
341
+ alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
342
+ assert(((size_t)alloc & (sizeof(void*)-1)) == 0);
343
+ assert((bytes & (sizeof(void*)-1)) == 0);
344
+ ZSTD_cwksp_assert_internal_consistency(ws);
345
+ /* we must be in the first phase, no advance is possible */
346
+ if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
347
+ DEBUGLOG(4, "cwksp: object alloc failed!");
348
+ ws->allocFailed = 1;
349
+ return NULL;
350
+ }
351
+ ws->objectEnd = end;
352
+ ws->tableEnd = end;
353
+ ws->tableValidEnd = end;
354
+
355
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
356
+ /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
357
+ * either size. */
358
+ alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
359
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
360
+ __asan_unpoison_memory_region(alloc, bytes);
361
+ }
362
+ #endif
363
+
364
+ return alloc;
365
+ }
366
+
367
+ MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
368
+ DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
369
+
370
+ #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
371
+ /* To validate that the table re-use logic is sound, and that we don't
372
+ * access table space that we haven't cleaned, we re-"poison" the table
373
+ * space every time we mark it dirty. */
374
+ {
375
+ size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
376
+ assert(__msan_test_shadow(ws->objectEnd, size) == -1);
377
+ __msan_poison(ws->objectEnd, size);
378
+ }
379
+ #endif
380
+
381
+ assert(ws->tableValidEnd >= ws->objectEnd);
382
+ assert(ws->tableValidEnd <= ws->allocStart);
383
+ ws->tableValidEnd = ws->objectEnd;
384
+ ZSTD_cwksp_assert_internal_consistency(ws);
385
+ }
386
+
387
+ MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
388
+ DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
389
+ assert(ws->tableValidEnd >= ws->objectEnd);
390
+ assert(ws->tableValidEnd <= ws->allocStart);
391
+ if (ws->tableValidEnd < ws->tableEnd) {
392
+ ws->tableValidEnd = ws->tableEnd;
393
+ }
394
+ ZSTD_cwksp_assert_internal_consistency(ws);
395
+ }
396
+
397
+ /**
398
+ * Zero the part of the allocated tables not already marked clean.
399
+ */
400
+ MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
401
+ DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
402
+ assert(ws->tableValidEnd >= ws->objectEnd);
403
+ assert(ws->tableValidEnd <= ws->allocStart);
404
+ if (ws->tableValidEnd < ws->tableEnd) {
405
+ ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
406
+ }
407
+ ZSTD_cwksp_mark_tables_clean(ws);
408
+ }
409
+
410
+ /**
411
+ * Invalidates table allocations.
412
+ * All other allocations remain valid.
413
+ */
414
+ MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
415
+ DEBUGLOG(4, "cwksp: clearing tables!");
416
+
417
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
418
+ /* We don't do this when the workspace is statically allocated, because
419
+ * when that is the case, we have no capability to hook into the end of the
420
+ * workspace's lifecycle to unpoison the memory.
421
+ */
422
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
423
+ size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
424
+ __asan_poison_memory_region(ws->objectEnd, size);
425
+ }
426
+ #endif
427
+
428
+ ws->tableEnd = ws->objectEnd;
429
+ ZSTD_cwksp_assert_internal_consistency(ws);
430
+ }
431
+
432
+ /**
433
+ * Invalidates all buffer, aligned, and table allocations.
434
+ * Object allocations remain valid.
435
+ */
436
+ MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
437
+ DEBUGLOG(4, "cwksp: clearing!");
438
+
439
+ #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
440
+ /* To validate that the context re-use logic is sound, and that we don't
441
+ * access stuff that this compression hasn't initialized, we re-"poison"
442
+ * the workspace (or at least the non-static, non-table parts of it)
443
+ * every time we start a new compression. */
444
+ {
445
+ size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd;
446
+ __msan_poison(ws->tableValidEnd, size);
447
+ }
448
+ #endif
449
+
450
+ #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
451
+ /* We don't do this when the workspace is statically allocated, because
452
+ * when that is the case, we have no capability to hook into the end of the
453
+ * workspace's lifecycle to unpoison the memory.
454
+ */
455
+ if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
456
+ size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
457
+ __asan_poison_memory_region(ws->objectEnd, size);
458
+ }
459
+ #endif
460
+
461
+ ws->tableEnd = ws->objectEnd;
462
+ ws->allocStart = ws->workspaceEnd;
463
+ ws->allocFailed = 0;
464
+ if (ws->phase > ZSTD_cwksp_alloc_buffers) {
465
+ ws->phase = ZSTD_cwksp_alloc_buffers;
466
+ }
467
+ ZSTD_cwksp_assert_internal_consistency(ws);
468
+ }
469
+
470
+ /**
471
+ * The provided workspace takes ownership of the buffer [start, start+size).
472
+ * Any existing values in the workspace are ignored (the previously managed
473
+ * buffer, if present, must be separately freed).
474
+ */
475
+ MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
476
+ DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
477
+ assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
478
+ ws->workspace = start;
479
+ ws->workspaceEnd = (BYTE*)start + size;
480
+ ws->objectEnd = ws->workspace;
481
+ ws->tableValidEnd = ws->objectEnd;
482
+ ws->phase = ZSTD_cwksp_alloc_objects;
483
+ ws->isStatic = isStatic;
484
+ ZSTD_cwksp_clear(ws);
485
+ ws->workspaceOversizedDuration = 0;
486
+ ZSTD_cwksp_assert_internal_consistency(ws);
487
+ }
488
+
489
+ MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
490
+ void* workspace = ZSTD_customMalloc(size, customMem);
491
+ DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
492
+ RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
493
+ ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
494
+ return 0;
495
+ }
496
+
497
+ MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
498
+ void *ptr = ws->workspace;
499
+ DEBUGLOG(4, "cwksp: freeing workspace");
500
+ ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
501
+ ZSTD_customFree(ptr, customMem);
502
+ }
503
+
504
+ /**
505
+ * Moves the management of a workspace from one cwksp to another. The src cwksp
506
+ * is left in an invalid state (src must be re-init()'ed before its used again).
507
+ */
508
+ MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
509
+ *dst = *src;
510
+ ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
511
+ }
512
+
513
+ MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
514
+ return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
515
+ }
516
+
517
+ MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
518
+ return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
519
+ + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
520
+ }
521
+
522
+ MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
523
+ return ws->allocFailed;
524
+ }
525
+
526
+ /*-*************************************
527
+ * Functions Checking Free Space
528
+ ***************************************/
529
+
530
+ MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
531
+ return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
532
+ }
533
+
534
+ MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
535
+ return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
536
+ }
537
+
538
+ MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
539
+ return ZSTD_cwksp_check_available(
540
+ ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
541
+ }
542
+
543
+ MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
544
+ return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
545
+ && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
546
+ }
547
+
548
+ MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
549
+ ZSTD_cwksp* ws, size_t additionalNeededSpace) {
550
+ if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
551
+ ws->workspaceOversizedDuration++;
552
+ } else {
553
+ ws->workspaceOversizedDuration = 0;
554
+ }
555
+ }
556
+
557
+ #if defined (__cplusplus)
558
+ }
559
+ #endif
560
+
561
+ #endif /* ZSTD_CWKSP_H */