zstd-ruby 1.5.0.0 → 1.5.2.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.github/workflows/ruby.yml +2 -2
- data/README.md +1 -1
- data/ext/zstdruby/extconf.rb +2 -1
- data/ext/zstdruby/libzstd/Makefile +50 -175
- data/ext/zstdruby/libzstd/README.md +7 -1
- data/ext/zstdruby/libzstd/common/bitstream.h +24 -9
- data/ext/zstdruby/libzstd/common/compiler.h +89 -43
- data/ext/zstdruby/libzstd/common/entropy_common.c +11 -5
- data/ext/zstdruby/libzstd/common/error_private.h +79 -0
- data/ext/zstdruby/libzstd/common/fse.h +2 -1
- data/ext/zstdruby/libzstd/common/fse_decompress.c +1 -1
- data/ext/zstdruby/libzstd/common/huf.h +24 -22
- data/ext/zstdruby/libzstd/common/mem.h +18 -0
- data/ext/zstdruby/libzstd/common/pool.c +11 -6
- data/ext/zstdruby/libzstd/common/pool.h +2 -2
- data/ext/zstdruby/libzstd/common/portability_macros.h +137 -0
- data/ext/zstdruby/libzstd/common/xxhash.c +5 -805
- data/ext/zstdruby/libzstd/common/xxhash.h +5568 -167
- data/ext/zstdruby/libzstd/common/zstd_internal.h +95 -92
- data/ext/zstdruby/libzstd/common/zstd_trace.h +12 -3
- data/ext/zstdruby/libzstd/compress/clevels.h +134 -0
- data/ext/zstdruby/libzstd/compress/fse_compress.c +63 -27
- data/ext/zstdruby/libzstd/compress/huf_compress.c +537 -104
- data/ext/zstdruby/libzstd/compress/zstd_compress.c +307 -373
- data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +174 -83
- data/ext/zstdruby/libzstd/compress/zstd_compress_literals.c +4 -3
- data/ext/zstdruby/libzstd/compress/zstd_compress_literals.h +3 -1
- data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.c +15 -14
- data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +4 -3
- data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +41 -27
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +295 -120
- data/ext/zstdruby/libzstd/compress/zstd_fast.c +309 -130
- data/ext/zstdruby/libzstd/compress/zstd_lazy.c +482 -562
- data/ext/zstdruby/libzstd/compress/zstd_ldm.c +9 -7
- data/ext/zstdruby/libzstd/compress/zstd_ldm.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_ldm_geartab.h +4 -1
- data/ext/zstdruby/libzstd/compress/zstd_opt.c +249 -148
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +76 -38
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +4 -1
- data/ext/zstdruby/libzstd/decompress/huf_decompress.c +727 -189
- data/ext/zstdruby/libzstd/decompress/huf_decompress_amd64.S +585 -0
- data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +85 -22
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +744 -220
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +8 -2
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +34 -3
- data/ext/zstdruby/libzstd/deprecated/zbuff_compress.c +23 -3
- data/ext/zstdruby/libzstd/dictBuilder/cover.c +9 -2
- data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +11 -4
- data/ext/zstdruby/libzstd/dictBuilder/zdict.c +101 -30
- data/ext/zstdruby/libzstd/legacy/zstd_v01.c +2 -6
- data/ext/zstdruby/libzstd/legacy/zstd_v02.c +3 -7
- data/ext/zstdruby/libzstd/legacy/zstd_v03.c +3 -7
- data/ext/zstdruby/libzstd/legacy/zstd_v04.c +3 -7
- data/ext/zstdruby/libzstd/legacy/zstd_v05.c +3 -7
- data/ext/zstdruby/libzstd/legacy/zstd_v06.c +3 -7
- data/ext/zstdruby/libzstd/legacy/zstd_v07.c +3 -7
- data/ext/zstdruby/libzstd/libzstd.mk +203 -0
- data/ext/zstdruby/libzstd/libzstd.pc.in +1 -0
- data/ext/zstdruby/libzstd/module.modulemap +25 -0
- data/ext/zstdruby/libzstd/zdict.h +4 -4
- data/ext/zstdruby/libzstd/zstd.h +179 -136
- data/ext/zstdruby/zstdruby.c +2 -2
- data/lib/zstd-ruby/version.rb +1 -1
- metadata +11 -6
@@ -219,7 +219,7 @@ MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
|
|
219
219
|
MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
|
220
220
|
/* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes
|
221
221
|
* to align the beginning of tables section, as well as another n_2=[0, 63] bytes
|
222
|
-
* to align the beginning of the aligned
|
222
|
+
* to align the beginning of the aligned section.
|
223
223
|
*
|
224
224
|
* n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and
|
225
225
|
* aligneds being sized in multiples of 64 bytes.
|
@@ -243,12 +243,14 @@ MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignByt
|
|
243
243
|
|
244
244
|
/**
|
245
245
|
* Internal function. Do not use directly.
|
246
|
-
* Reserves the given number of bytes within the aligned/buffer segment of the wksp,
|
247
|
-
* counts from the end of the wksp
|
246
|
+
* Reserves the given number of bytes within the aligned/buffer segment of the wksp,
|
247
|
+
* which counts from the end of the wksp (as opposed to the object/table segment).
|
248
248
|
*
|
249
249
|
* Returns a pointer to the beginning of that space.
|
250
250
|
*/
|
251
|
-
MEM_STATIC void*
|
251
|
+
MEM_STATIC void*
|
252
|
+
ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
|
253
|
+
{
|
252
254
|
void* const alloc = (BYTE*)ws->allocStart - bytes;
|
253
255
|
void* const bottom = ws->tableEnd;
|
254
256
|
DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
|
@@ -260,6 +262,8 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t
|
|
260
262
|
ws->allocFailed = 1;
|
261
263
|
return NULL;
|
262
264
|
}
|
265
|
+
/* the area is reserved from the end of wksp.
|
266
|
+
* If it overlaps with tableValidEnd, it voids guarantees on values' range */
|
263
267
|
if (alloc < ws->tableValidEnd) {
|
264
268
|
ws->tableValidEnd = alloc;
|
265
269
|
}
|
@@ -269,10 +273,12 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t
|
|
269
273
|
|
270
274
|
/**
|
271
275
|
* Moves the cwksp to the next phase, and does any necessary allocations.
|
276
|
+
* cwksp initialization must necessarily go through each phase in order.
|
272
277
|
* Returns a 0 on success, or zstd error
|
273
278
|
*/
|
274
|
-
MEM_STATIC size_t
|
275
|
-
|
279
|
+
MEM_STATIC size_t
|
280
|
+
ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)
|
281
|
+
{
|
276
282
|
assert(phase >= ws->phase);
|
277
283
|
if (phase > ws->phase) {
|
278
284
|
/* Going from allocating objects to allocating buffers */
|
@@ -295,15 +301,15 @@ MEM_STATIC size_t ZSTD_cwksp_internal_advance_phase(
|
|
295
301
|
{ /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
|
296
302
|
void* const alloc = ws->objectEnd;
|
297
303
|
size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
|
298
|
-
void* const
|
304
|
+
void* const objectEnd = (BYTE*)alloc + bytesToAlign;
|
299
305
|
DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
|
300
|
-
RETURN_ERROR_IF(
|
306
|
+
RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,
|
301
307
|
"table phase - alignment initial allocation failed!");
|
302
|
-
ws->objectEnd =
|
303
|
-
ws->tableEnd =
|
304
|
-
ws->tableValidEnd
|
305
|
-
|
306
|
-
}
|
308
|
+
ws->objectEnd = objectEnd;
|
309
|
+
ws->tableEnd = objectEnd; /* table area starts being empty */
|
310
|
+
if (ws->tableValidEnd < ws->tableEnd) {
|
311
|
+
ws->tableValidEnd = ws->tableEnd;
|
312
|
+
} } }
|
307
313
|
ws->phase = phase;
|
308
314
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
309
315
|
}
|
@@ -313,15 +319,17 @@ MEM_STATIC size_t ZSTD_cwksp_internal_advance_phase(
|
|
313
319
|
/**
|
314
320
|
* Returns whether this object/buffer/etc was allocated in this workspace.
|
315
321
|
*/
|
316
|
-
MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
|
322
|
+
MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
|
323
|
+
{
|
317
324
|
return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
|
318
325
|
}
|
319
326
|
|
320
327
|
/**
|
321
328
|
* Internal function. Do not use directly.
|
322
329
|
*/
|
323
|
-
MEM_STATIC void*
|
324
|
-
|
330
|
+
MEM_STATIC void*
|
331
|
+
ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)
|
332
|
+
{
|
325
333
|
void* alloc;
|
326
334
|
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
|
327
335
|
return NULL;
|
@@ -351,14 +359,16 @@ MEM_STATIC void* ZSTD_cwksp_reserve_internal(
|
|
351
359
|
/**
|
352
360
|
* Reserves and returns unaligned memory.
|
353
361
|
*/
|
354
|
-
MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
|
362
|
+
MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
|
363
|
+
{
|
355
364
|
return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
|
356
365
|
}
|
357
366
|
|
358
367
|
/**
|
359
368
|
* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
|
360
369
|
*/
|
361
|
-
MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
|
370
|
+
MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
|
371
|
+
{
|
362
372
|
void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
|
363
373
|
ZSTD_cwksp_alloc_aligned);
|
364
374
|
assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
|
@@ -370,7 +380,8 @@ MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
|
|
370
380
|
* their values remain constrained, allowing us to re-use them without
|
371
381
|
* memset()-ing them.
|
372
382
|
*/
|
373
|
-
MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
|
383
|
+
MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
|
384
|
+
{
|
374
385
|
const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
|
375
386
|
void* alloc;
|
376
387
|
void* end;
|
@@ -408,9 +419,11 @@ MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
|
|
408
419
|
|
409
420
|
/**
|
410
421
|
* Aligned on sizeof(void*).
|
422
|
+
* Note : should happen only once, at workspace first initialization
|
411
423
|
*/
|
412
|
-
MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
|
413
|
-
|
424
|
+
MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
|
425
|
+
{
|
426
|
+
size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
|
414
427
|
void* alloc = ws->objectEnd;
|
415
428
|
void* end = (BYTE*)alloc + roundedBytes;
|
416
429
|
|
@@ -419,15 +432,15 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
|
|
419
432
|
end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
420
433
|
#endif
|
421
434
|
|
422
|
-
DEBUGLOG(
|
435
|
+
DEBUGLOG(4,
|
423
436
|
"cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
|
424
437
|
alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
|
425
|
-
assert((
|
426
|
-
assert(
|
438
|
+
assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
|
439
|
+
assert(bytes % ZSTD_ALIGNOF(void*) == 0);
|
427
440
|
ZSTD_cwksp_assert_internal_consistency(ws);
|
428
441
|
/* we must be in the first phase, no advance is possible */
|
429
442
|
if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
|
430
|
-
DEBUGLOG(
|
443
|
+
DEBUGLOG(3, "cwksp: object alloc failed!");
|
431
444
|
ws->allocFailed = 1;
|
432
445
|
return NULL;
|
433
446
|
}
|
@@ -438,7 +451,7 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
|
|
438
451
|
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
|
439
452
|
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
|
440
453
|
* either size. */
|
441
|
-
alloc = (BYTE
|
454
|
+
alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
|
442
455
|
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
|
443
456
|
__asan_unpoison_memory_region(alloc, bytes);
|
444
457
|
}
|
@@ -447,7 +460,8 @@ MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
|
|
447
460
|
return alloc;
|
448
461
|
}
|
449
462
|
|
450
|
-
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
|
463
|
+
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
|
464
|
+
{
|
451
465
|
DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
|
452
466
|
|
453
467
|
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
|