zstd-ruby 1.4.5.0 → 1.4.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/ruby.yml +35 -0
- data/README.md +2 -2
- data/ext/zstdruby/libzstd/Makefile +237 -138
- data/ext/zstdruby/libzstd/README.md +28 -0
- data/ext/zstdruby/libzstd/common/bitstream.h +25 -16
- data/ext/zstdruby/libzstd/common/compiler.h +118 -4
- data/ext/zstdruby/libzstd/common/cpu.h +1 -3
- data/ext/zstdruby/libzstd/common/debug.c +1 -1
- data/ext/zstdruby/libzstd/common/debug.h +12 -19
- data/ext/zstdruby/libzstd/common/entropy_common.c +189 -43
- data/ext/zstdruby/libzstd/common/error_private.c +2 -1
- data/ext/zstdruby/libzstd/common/error_private.h +2 -2
- data/ext/zstdruby/libzstd/common/fse.h +40 -12
- data/ext/zstdruby/libzstd/common/fse_decompress.c +124 -17
- data/ext/zstdruby/libzstd/common/huf.h +27 -6
- data/ext/zstdruby/libzstd/common/mem.h +67 -94
- data/ext/zstdruby/libzstd/common/pool.c +23 -17
- data/ext/zstdruby/libzstd/common/pool.h +2 -2
- data/ext/zstdruby/libzstd/common/threading.c +6 -5
- data/ext/zstdruby/libzstd/common/xxhash.c +19 -57
- data/ext/zstdruby/libzstd/common/xxhash.h +2 -2
- data/ext/zstdruby/libzstd/common/zstd_common.c +10 -10
- data/ext/zstdruby/libzstd/common/zstd_deps.h +111 -0
- data/ext/zstdruby/libzstd/common/zstd_errors.h +2 -1
- data/ext/zstdruby/libzstd/common/zstd_internal.h +90 -59
- data/ext/zstdruby/libzstd/common/zstd_trace.c +42 -0
- data/ext/zstdruby/libzstd/common/zstd_trace.h +152 -0
- data/ext/zstdruby/libzstd/compress/fse_compress.c +31 -24
- data/ext/zstdruby/libzstd/compress/hist.c +27 -29
- data/ext/zstdruby/libzstd/compress/hist.h +2 -2
- data/ext/zstdruby/libzstd/compress/huf_compress.c +217 -101
- data/ext/zstdruby/libzstd/compress/zstd_compress.c +1495 -478
- data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +143 -44
- data/ext/zstdruby/libzstd/compress/zstd_compress_literals.c +7 -7
- data/ext/zstdruby/libzstd/compress/zstd_compress_literals.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.c +18 -4
- data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +25 -21
- data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +62 -26
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +23 -23
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_fast.c +21 -21
- data/ext/zstdruby/libzstd/compress/zstd_fast.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_lazy.c +352 -78
- data/ext/zstdruby/libzstd/compress/zstd_lazy.h +21 -1
- data/ext/zstdruby/libzstd/compress/zstd_ldm.c +276 -209
- data/ext/zstdruby/libzstd/compress/zstd_ldm.h +8 -2
- data/ext/zstdruby/libzstd/compress/zstd_ldm_geartab.h +103 -0
- data/ext/zstdruby/libzstd/compress/zstd_opt.c +191 -46
- data/ext/zstdruby/libzstd/compress/zstd_opt.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +79 -410
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +27 -109
- data/ext/zstdruby/libzstd/decompress/huf_decompress.c +303 -201
- data/ext/zstdruby/libzstd/decompress/zstd_ddict.c +9 -9
- data/ext/zstdruby/libzstd/decompress/zstd_ddict.h +2 -2
- data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +370 -87
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +153 -45
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +6 -3
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +28 -11
- data/ext/zstdruby/libzstd/deprecated/zbuff.h +1 -1
- data/ext/zstdruby/libzstd/deprecated/zbuff_common.c +1 -1
- data/ext/zstdruby/libzstd/deprecated/zbuff_compress.c +1 -1
- data/ext/zstdruby/libzstd/deprecated/zbuff_decompress.c +1 -1
- data/ext/zstdruby/libzstd/dictBuilder/cover.c +40 -31
- data/ext/zstdruby/libzstd/dictBuilder/cover.h +2 -2
- data/ext/zstdruby/libzstd/dictBuilder/divsufsort.c +1 -1
- data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +26 -25
- data/ext/zstdruby/libzstd/dictBuilder/zdict.c +22 -24
- data/ext/zstdruby/libzstd/dictBuilder/zdict.h +5 -4
- data/ext/zstdruby/libzstd/dll/example/Makefile +1 -1
- data/ext/zstdruby/libzstd/dll/example/README.md +16 -22
- data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +1 -1
- data/ext/zstdruby/libzstd/legacy/zstd_v01.c +6 -2
- data/ext/zstdruby/libzstd/legacy/zstd_v01.h +1 -1
- data/ext/zstdruby/libzstd/legacy/zstd_v02.c +6 -2
- data/ext/zstdruby/libzstd/legacy/zstd_v02.h +1 -1
- data/ext/zstdruby/libzstd/legacy/zstd_v03.c +6 -2
- data/ext/zstdruby/libzstd/legacy/zstd_v03.h +1 -1
- data/ext/zstdruby/libzstd/legacy/zstd_v04.c +7 -3
- data/ext/zstdruby/libzstd/legacy/zstd_v04.h +1 -1
- data/ext/zstdruby/libzstd/legacy/zstd_v05.c +10 -6
- data/ext/zstdruby/libzstd/legacy/zstd_v05.h +1 -1
- data/ext/zstdruby/libzstd/legacy/zstd_v06.c +10 -6
- data/ext/zstdruby/libzstd/legacy/zstd_v06.h +1 -1
- data/ext/zstdruby/libzstd/legacy/zstd_v07.c +10 -6
- data/ext/zstdruby/libzstd/legacy/zstd_v07.h +1 -1
- data/ext/zstdruby/libzstd/libzstd.pc.in +3 -3
- data/ext/zstdruby/libzstd/zstd.h +414 -54
- data/lib/zstd-ruby/version.rb +1 -1
- metadata +7 -3
- data/.travis.yml +0 -14
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c) 2016-
|
2
|
+
* Copyright (c) 2016-2021, Yann Collet, Facebook, Inc.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -18,8 +18,10 @@ extern "C" {
|
|
18
18
|
/*-****************************************
|
19
19
|
* Dependencies
|
20
20
|
******************************************/
|
21
|
-
#include <stddef.h>
|
22
|
-
#include
|
21
|
+
#include <stddef.h> /* size_t, ptrdiff_t */
|
22
|
+
#include "compiler.h" /* __has_builtin */
|
23
|
+
#include "debug.h" /* DEBUG_STATIC_ASSERT */
|
24
|
+
#include "zstd_deps.h" /* ZSTD_memcpy */
|
23
25
|
|
24
26
|
|
25
27
|
/*-****************************************
|
@@ -39,93 +41,15 @@ extern "C" {
|
|
39
41
|
# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
|
40
42
|
#endif
|
41
43
|
|
42
|
-
#ifndef __has_builtin
|
43
|
-
# define __has_builtin(x) 0 /* compat. with non-clang compilers */
|
44
|
-
#endif
|
45
|
-
|
46
|
-
/* code only tested on 32 and 64 bits systems */
|
47
|
-
#define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
|
48
|
-
MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
|
49
|
-
|
50
|
-
/* detects whether we are being compiled under msan */
|
51
|
-
#if defined (__has_feature)
|
52
|
-
# if __has_feature(memory_sanitizer)
|
53
|
-
# define MEMORY_SANITIZER 1
|
54
|
-
# endif
|
55
|
-
#endif
|
56
|
-
|
57
|
-
#if defined (MEMORY_SANITIZER)
|
58
|
-
/* Not all platforms that support msan provide sanitizers/msan_interface.h.
|
59
|
-
* We therefore declare the functions we need ourselves, rather than trying to
|
60
|
-
* include the header file... */
|
61
|
-
|
62
|
-
#include <stdint.h> /* intptr_t */
|
63
|
-
|
64
|
-
/* Make memory region fully initialized (without changing its contents). */
|
65
|
-
void __msan_unpoison(const volatile void *a, size_t size);
|
66
|
-
|
67
|
-
/* Make memory region fully uninitialized (without changing its contents).
|
68
|
-
This is a legacy interface that does not update origin information. Use
|
69
|
-
__msan_allocated_memory() instead. */
|
70
|
-
void __msan_poison(const volatile void *a, size_t size);
|
71
|
-
|
72
|
-
/* Returns the offset of the first (at least partially) poisoned byte in the
|
73
|
-
memory range, or -1 if the whole range is good. */
|
74
|
-
intptr_t __msan_test_shadow(const volatile void *x, size_t size);
|
75
|
-
#endif
|
76
|
-
|
77
|
-
/* detects whether we are being compiled under asan */
|
78
|
-
#if defined (__has_feature)
|
79
|
-
# if __has_feature(address_sanitizer)
|
80
|
-
# define ADDRESS_SANITIZER 1
|
81
|
-
# endif
|
82
|
-
#elif defined(__SANITIZE_ADDRESS__)
|
83
|
-
# define ADDRESS_SANITIZER 1
|
84
|
-
#endif
|
85
|
-
|
86
|
-
#if defined (ADDRESS_SANITIZER)
|
87
|
-
/* Not all platforms that support asan provide sanitizers/asan_interface.h.
|
88
|
-
* We therefore declare the functions we need ourselves, rather than trying to
|
89
|
-
* include the header file... */
|
90
|
-
|
91
|
-
/**
|
92
|
-
* Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.
|
93
|
-
*
|
94
|
-
* This memory must be previously allocated by your program. Instrumented
|
95
|
-
* code is forbidden from accessing addresses in this region until it is
|
96
|
-
* unpoisoned. This function is not guaranteed to poison the entire region -
|
97
|
-
* it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan
|
98
|
-
* alignment restrictions.
|
99
|
-
*
|
100
|
-
* \note This function is not thread-safe because no two threads can poison or
|
101
|
-
* unpoison memory in the same memory region simultaneously.
|
102
|
-
*
|
103
|
-
* \param addr Start of memory region.
|
104
|
-
* \param size Size of memory region. */
|
105
|
-
void __asan_poison_memory_region(void const volatile *addr, size_t size);
|
106
|
-
|
107
|
-
/**
|
108
|
-
* Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
|
109
|
-
*
|
110
|
-
* This memory must be previously allocated by your program. Accessing
|
111
|
-
* addresses in this region is allowed until this region is poisoned again.
|
112
|
-
* This function could unpoison a super-region of <c>[addr, addr+size)</c> due
|
113
|
-
* to ASan alignment restrictions.
|
114
|
-
*
|
115
|
-
* \note This function is not thread-safe because no two threads can
|
116
|
-
* poison or unpoison memory in the same memory region simultaneously.
|
117
|
-
*
|
118
|
-
* \param addr Start of memory region.
|
119
|
-
* \param size Size of memory region. */
|
120
|
-
void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
|
121
|
-
#endif
|
122
|
-
|
123
|
-
|
124
44
|
/*-**************************************************************
|
125
45
|
* Basic Types
|
126
46
|
*****************************************************************/
|
127
47
|
#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
|
128
|
-
#
|
48
|
+
# if defined(_AIX)
|
49
|
+
# include <inttypes.h>
|
50
|
+
# else
|
51
|
+
# include <stdint.h> /* intptr_t */
|
52
|
+
# endif
|
129
53
|
typedef uint8_t BYTE;
|
130
54
|
typedef uint16_t U16;
|
131
55
|
typedef int16_t S16;
|
@@ -157,7 +81,53 @@ void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
|
|
157
81
|
|
158
82
|
|
159
83
|
/*-**************************************************************
|
160
|
-
* Memory I/O
|
84
|
+
* Memory I/O API
|
85
|
+
*****************************************************************/
|
86
|
+
/*=== Static platform detection ===*/
|
87
|
+
MEM_STATIC unsigned MEM_32bits(void);
|
88
|
+
MEM_STATIC unsigned MEM_64bits(void);
|
89
|
+
MEM_STATIC unsigned MEM_isLittleEndian(void);
|
90
|
+
|
91
|
+
/*=== Native unaligned read/write ===*/
|
92
|
+
MEM_STATIC U16 MEM_read16(const void* memPtr);
|
93
|
+
MEM_STATIC U32 MEM_read32(const void* memPtr);
|
94
|
+
MEM_STATIC U64 MEM_read64(const void* memPtr);
|
95
|
+
MEM_STATIC size_t MEM_readST(const void* memPtr);
|
96
|
+
|
97
|
+
MEM_STATIC void MEM_write16(void* memPtr, U16 value);
|
98
|
+
MEM_STATIC void MEM_write32(void* memPtr, U32 value);
|
99
|
+
MEM_STATIC void MEM_write64(void* memPtr, U64 value);
|
100
|
+
|
101
|
+
/*=== Little endian unaligned read/write ===*/
|
102
|
+
MEM_STATIC U16 MEM_readLE16(const void* memPtr);
|
103
|
+
MEM_STATIC U32 MEM_readLE24(const void* memPtr);
|
104
|
+
MEM_STATIC U32 MEM_readLE32(const void* memPtr);
|
105
|
+
MEM_STATIC U64 MEM_readLE64(const void* memPtr);
|
106
|
+
MEM_STATIC size_t MEM_readLEST(const void* memPtr);
|
107
|
+
|
108
|
+
MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val);
|
109
|
+
MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val);
|
110
|
+
MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32);
|
111
|
+
MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64);
|
112
|
+
MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val);
|
113
|
+
|
114
|
+
/*=== Big endian unaligned read/write ===*/
|
115
|
+
MEM_STATIC U32 MEM_readBE32(const void* memPtr);
|
116
|
+
MEM_STATIC U64 MEM_readBE64(const void* memPtr);
|
117
|
+
MEM_STATIC size_t MEM_readBEST(const void* memPtr);
|
118
|
+
|
119
|
+
MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32);
|
120
|
+
MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64);
|
121
|
+
MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val);
|
122
|
+
|
123
|
+
/*=== Byteswap ===*/
|
124
|
+
MEM_STATIC U32 MEM_swap32(U32 in);
|
125
|
+
MEM_STATIC U64 MEM_swap64(U64 in);
|
126
|
+
MEM_STATIC size_t MEM_swapST(size_t in);
|
127
|
+
|
128
|
+
|
129
|
+
/*-**************************************************************
|
130
|
+
* Memory I/O Implementation
|
161
131
|
*****************************************************************/
|
162
132
|
/* MEM_FORCE_MEMORY_ACCESS :
|
163
133
|
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
|
@@ -236,37 +206,37 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v =
|
|
236
206
|
|
237
207
|
MEM_STATIC U16 MEM_read16(const void* memPtr)
|
238
208
|
{
|
239
|
-
U16 val;
|
209
|
+
U16 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
|
240
210
|
}
|
241
211
|
|
242
212
|
MEM_STATIC U32 MEM_read32(const void* memPtr)
|
243
213
|
{
|
244
|
-
U32 val;
|
214
|
+
U32 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
|
245
215
|
}
|
246
216
|
|
247
217
|
MEM_STATIC U64 MEM_read64(const void* memPtr)
|
248
218
|
{
|
249
|
-
U64 val;
|
219
|
+
U64 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
|
250
220
|
}
|
251
221
|
|
252
222
|
MEM_STATIC size_t MEM_readST(const void* memPtr)
|
253
223
|
{
|
254
|
-
size_t val;
|
224
|
+
size_t val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
|
255
225
|
}
|
256
226
|
|
257
227
|
MEM_STATIC void MEM_write16(void* memPtr, U16 value)
|
258
228
|
{
|
259
|
-
|
229
|
+
ZSTD_memcpy(memPtr, &value, sizeof(value));
|
260
230
|
}
|
261
231
|
|
262
232
|
MEM_STATIC void MEM_write32(void* memPtr, U32 value)
|
263
233
|
{
|
264
|
-
|
234
|
+
ZSTD_memcpy(memPtr, &value, sizeof(value));
|
265
235
|
}
|
266
236
|
|
267
237
|
MEM_STATIC void MEM_write64(void* memPtr, U64 value)
|
268
238
|
{
|
269
|
-
|
239
|
+
ZSTD_memcpy(memPtr, &value, sizeof(value));
|
270
240
|
}
|
271
241
|
|
272
242
|
#endif /* MEM_FORCE_MEMORY_ACCESS */
|
@@ -445,6 +415,9 @@ MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)
|
|
445
415
|
MEM_writeBE64(memPtr, (U64)val);
|
446
416
|
}
|
447
417
|
|
418
|
+
/* code only tested on 32 and 64 bits systems */
|
419
|
+
MEM_STATIC void MEM_check(void) { DEBUG_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
|
420
|
+
|
448
421
|
|
449
422
|
#if defined (__cplusplus)
|
450
423
|
}
|
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c) 2016-
|
2
|
+
* Copyright (c) 2016-2021, Yann Collet, Facebook, Inc.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -10,9 +10,9 @@
|
|
10
10
|
|
11
11
|
|
12
12
|
/* ====== Dependencies ======= */
|
13
|
-
#include
|
13
|
+
#include "zstd_deps.h" /* size_t */
|
14
14
|
#include "debug.h" /* assert */
|
15
|
-
#include "zstd_internal.h" /*
|
15
|
+
#include "zstd_internal.h" /* ZSTD_customMalloc, ZSTD_customFree */
|
16
16
|
#include "pool.h"
|
17
17
|
|
18
18
|
/* ====== Compiler specifics ====== */
|
@@ -105,6 +105,10 @@ static void* POOL_thread(void* opaque) {
|
|
105
105
|
assert(0); /* Unreachable */
|
106
106
|
}
|
107
107
|
|
108
|
+
POOL_ctx* ZSTD_createThreadPool(size_t numThreads) {
|
109
|
+
return POOL_create (numThreads, 0);
|
110
|
+
}
|
111
|
+
|
108
112
|
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
|
109
113
|
return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
|
110
114
|
}
|
@@ -115,14 +119,14 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
|
|
115
119
|
/* Check parameters */
|
116
120
|
if (!numThreads) { return NULL; }
|
117
121
|
/* Allocate the context and zero initialize */
|
118
|
-
ctx = (POOL_ctx*)
|
122
|
+
ctx = (POOL_ctx*)ZSTD_customCalloc(sizeof(POOL_ctx), customMem);
|
119
123
|
if (!ctx) { return NULL; }
|
120
124
|
/* Initialize the job queue.
|
121
125
|
* It needs one extra space since one space is wasted to differentiate
|
122
126
|
* empty and full queues.
|
123
127
|
*/
|
124
128
|
ctx->queueSize = queueSize + 1;
|
125
|
-
ctx->queue = (POOL_job*)
|
129
|
+
ctx->queue = (POOL_job*)ZSTD_customMalloc(ctx->queueSize * sizeof(POOL_job), customMem);
|
126
130
|
ctx->queueHead = 0;
|
127
131
|
ctx->queueTail = 0;
|
128
132
|
ctx->numThreadsBusy = 0;
|
@@ -136,7 +140,7 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
|
|
136
140
|
}
|
137
141
|
ctx->shutdown = 0;
|
138
142
|
/* Allocate space for the thread handles */
|
139
|
-
ctx->threads = (ZSTD_pthread_t*)
|
143
|
+
ctx->threads = (ZSTD_pthread_t*)ZSTD_customMalloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
|
140
144
|
ctx->threadCapacity = 0;
|
141
145
|
ctx->customMem = customMem;
|
142
146
|
/* Check for errors */
|
@@ -179,12 +183,14 @@ void POOL_free(POOL_ctx *ctx) {
|
|
179
183
|
ZSTD_pthread_mutex_destroy(&ctx->queueMutex);
|
180
184
|
ZSTD_pthread_cond_destroy(&ctx->queuePushCond);
|
181
185
|
ZSTD_pthread_cond_destroy(&ctx->queuePopCond);
|
182
|
-
|
183
|
-
|
184
|
-
|
186
|
+
ZSTD_customFree(ctx->queue, ctx->customMem);
|
187
|
+
ZSTD_customFree(ctx->threads, ctx->customMem);
|
188
|
+
ZSTD_customFree(ctx, ctx->customMem);
|
185
189
|
}
|
186
190
|
|
187
|
-
|
191
|
+
void ZSTD_freeThreadPool (ZSTD_threadPool* pool) {
|
192
|
+
POOL_free (pool);
|
193
|
+
}
|
188
194
|
|
189
195
|
size_t POOL_sizeof(POOL_ctx *ctx) {
|
190
196
|
if (ctx==NULL) return 0; /* supports sizeof NULL */
|
@@ -203,11 +209,11 @@ static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads)
|
|
203
209
|
return 0;
|
204
210
|
}
|
205
211
|
/* numThreads > threadCapacity */
|
206
|
-
{ ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)
|
212
|
+
{ ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customMalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
|
207
213
|
if (!threadPool) return 1;
|
208
214
|
/* replace existing thread pool */
|
209
|
-
|
210
|
-
|
215
|
+
ZSTD_memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool));
|
216
|
+
ZSTD_customFree(ctx->threads, ctx->customMem);
|
211
217
|
ctx->threads = threadPool;
|
212
218
|
/* Initialize additional threads */
|
213
219
|
{ size_t threadId;
|
@@ -301,7 +307,7 @@ int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque)
|
|
301
307
|
struct POOL_ctx_s {
|
302
308
|
int dummy;
|
303
309
|
};
|
304
|
-
static POOL_ctx
|
310
|
+
static POOL_ctx g_poolCtx;
|
305
311
|
|
306
312
|
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
|
307
313
|
return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
|
@@ -311,11 +317,11 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customM
|
|
311
317
|
(void)numThreads;
|
312
318
|
(void)queueSize;
|
313
319
|
(void)customMem;
|
314
|
-
return &
|
320
|
+
return &g_poolCtx;
|
315
321
|
}
|
316
322
|
|
317
323
|
void POOL_free(POOL_ctx* ctx) {
|
318
|
-
assert(!ctx || ctx == &
|
324
|
+
assert(!ctx || ctx == &g_poolCtx);
|
319
325
|
(void)ctx;
|
320
326
|
}
|
321
327
|
|
@@ -337,7 +343,7 @@ int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) {
|
|
337
343
|
|
338
344
|
size_t POOL_sizeof(POOL_ctx* ctx) {
|
339
345
|
if (ctx==NULL) return 0; /* supports sizeof NULL */
|
340
|
-
assert(ctx == &
|
346
|
+
assert(ctx == &g_poolCtx);
|
341
347
|
return sizeof(*ctx);
|
342
348
|
}
|
343
349
|
|
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c) 2016-
|
2
|
+
* Copyright (c) 2016-2021, Yann Collet, Facebook, Inc.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -16,7 +16,7 @@ extern "C" {
|
|
16
16
|
#endif
|
17
17
|
|
18
18
|
|
19
|
-
#include
|
19
|
+
#include "zstd_deps.h"
|
20
20
|
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */
|
21
21
|
#include "../zstd.h"
|
22
22
|
|
@@ -78,11 +78,12 @@ int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr)
|
|
78
78
|
|
79
79
|
#if defined(ZSTD_MULTITHREAD) && DEBUGLEVEL >= 1 && !defined(_WIN32)
|
80
80
|
|
81
|
-
#
|
81
|
+
#define ZSTD_DEPS_NEED_MALLOC
|
82
|
+
#include "zstd_deps.h"
|
82
83
|
|
83
84
|
int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr)
|
84
85
|
{
|
85
|
-
*mutex = (pthread_mutex_t*)
|
86
|
+
*mutex = (pthread_mutex_t*)ZSTD_malloc(sizeof(pthread_mutex_t));
|
86
87
|
if (!*mutex)
|
87
88
|
return 1;
|
88
89
|
return pthread_mutex_init(*mutex, attr);
|
@@ -94,14 +95,14 @@ int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex)
|
|
94
95
|
return 0;
|
95
96
|
{
|
96
97
|
int const ret = pthread_mutex_destroy(*mutex);
|
97
|
-
|
98
|
+
ZSTD_free(*mutex);
|
98
99
|
return ret;
|
99
100
|
}
|
100
101
|
}
|
101
102
|
|
102
103
|
int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr)
|
103
104
|
{
|
104
|
-
*cond = (pthread_cond_t*)
|
105
|
+
*cond = (pthread_cond_t*)ZSTD_malloc(sizeof(pthread_cond_t));
|
105
106
|
if (!*cond)
|
106
107
|
return 1;
|
107
108
|
return pthread_cond_init(*cond, attr);
|
@@ -113,7 +114,7 @@ int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond)
|
|
113
114
|
return 0;
|
114
115
|
{
|
115
116
|
int const ret = pthread_cond_destroy(*cond);
|
116
|
-
|
117
|
+
ZSTD_free(*cond);
|
117
118
|
return ret;
|
118
119
|
}
|
119
120
|
}
|
@@ -1,6 +1,6 @@
|
|
1
1
|
/*
|
2
2
|
* xxHash - Fast Hash algorithm
|
3
|
-
* Copyright (c) 2012-
|
3
|
+
* Copyright (c) 2012-2021, Yann Collet, Facebook, Inc.
|
4
4
|
*
|
5
5
|
* You can contact the author at :
|
6
6
|
* - xxHash homepage: http://www.xxhash.com
|
@@ -77,14 +77,12 @@
|
|
77
77
|
* Includes & Memory related functions
|
78
78
|
***************************************/
|
79
79
|
/* Modify the local functions below should you wish to use some other memory routines */
|
80
|
-
/* for
|
81
|
-
#
|
82
|
-
#include
|
83
|
-
static void* XXH_malloc(size_t s) { return
|
84
|
-
static void XXH_free (void* p) {
|
85
|
-
|
86
|
-
#include <string.h>
|
87
|
-
static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
|
80
|
+
/* for ZSTD_malloc(), ZSTD_free() */
|
81
|
+
#define ZSTD_DEPS_NEED_MALLOC
|
82
|
+
#include "zstd_deps.h" /* size_t, ZSTD_malloc, ZSTD_free, ZSTD_memcpy */
|
83
|
+
static void* XXH_malloc(size_t s) { return ZSTD_malloc(s); }
|
84
|
+
static void XXH_free (void* p) { ZSTD_free(p); }
|
85
|
+
static void* XXH_memcpy(void* dest, const void* src, size_t size) { return ZSTD_memcpy(dest,src,size); }
|
88
86
|
|
89
87
|
#ifndef XXH_STATIC_LINKING_ONLY
|
90
88
|
# define XXH_STATIC_LINKING_ONLY
|
@@ -95,49 +93,13 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp
|
|
95
93
|
/* *************************************
|
96
94
|
* Compiler Specific Options
|
97
95
|
***************************************/
|
98
|
-
#
|
99
|
-
# define INLINE_KEYWORD inline
|
100
|
-
#else
|
101
|
-
# define INLINE_KEYWORD
|
102
|
-
#endif
|
103
|
-
|
104
|
-
#if defined(__GNUC__) || defined(__ICCARM__)
|
105
|
-
# define FORCE_INLINE_ATTR __attribute__((always_inline))
|
106
|
-
#elif defined(_MSC_VER)
|
107
|
-
# define FORCE_INLINE_ATTR __forceinline
|
108
|
-
#else
|
109
|
-
# define FORCE_INLINE_ATTR
|
110
|
-
#endif
|
111
|
-
|
112
|
-
#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
|
113
|
-
|
114
|
-
|
115
|
-
#ifdef _MSC_VER
|
116
|
-
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
117
|
-
#endif
|
96
|
+
#include "compiler.h"
|
118
97
|
|
119
98
|
|
120
99
|
/* *************************************
|
121
100
|
* Basic Types
|
122
101
|
***************************************/
|
123
|
-
#
|
124
|
-
# define MEM_MODULE
|
125
|
-
# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
|
126
|
-
# include <stdint.h>
|
127
|
-
typedef uint8_t BYTE;
|
128
|
-
typedef uint16_t U16;
|
129
|
-
typedef uint32_t U32;
|
130
|
-
typedef int32_t S32;
|
131
|
-
typedef uint64_t U64;
|
132
|
-
# else
|
133
|
-
typedef unsigned char BYTE;
|
134
|
-
typedef unsigned short U16;
|
135
|
-
typedef unsigned int U32;
|
136
|
-
typedef signed int S32;
|
137
|
-
typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */
|
138
|
-
# endif
|
139
|
-
#endif
|
140
|
-
|
102
|
+
#include "mem.h" /* BYTE, U32, U64, size_t */
|
141
103
|
|
142
104
|
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
|
143
105
|
|
@@ -163,14 +125,14 @@ static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
|
|
163
125
|
static U32 XXH_read32(const void* memPtr)
|
164
126
|
{
|
165
127
|
U32 val;
|
166
|
-
|
128
|
+
ZSTD_memcpy(&val, memPtr, sizeof(val));
|
167
129
|
return val;
|
168
130
|
}
|
169
131
|
|
170
132
|
static U64 XXH_read64(const void* memPtr)
|
171
133
|
{
|
172
134
|
U64 val;
|
173
|
-
|
135
|
+
ZSTD_memcpy(&val, memPtr, sizeof(val));
|
174
136
|
return val;
|
175
137
|
}
|
176
138
|
|
@@ -307,12 +269,12 @@ XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
|
|
307
269
|
****************************/
|
308
270
|
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState)
|
309
271
|
{
|
310
|
-
|
272
|
+
ZSTD_memcpy(dstState, srcState, sizeof(*dstState));
|
311
273
|
}
|
312
274
|
|
313
275
|
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState)
|
314
276
|
{
|
315
|
-
|
277
|
+
ZSTD_memcpy(dstState, srcState, sizeof(*dstState));
|
316
278
|
}
|
317
279
|
|
318
280
|
|
@@ -554,12 +516,12 @@ XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
|
|
554
516
|
XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
|
555
517
|
{
|
556
518
|
XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
|
557
|
-
|
519
|
+
ZSTD_memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */
|
558
520
|
state.v1 = seed + PRIME32_1 + PRIME32_2;
|
559
521
|
state.v2 = seed + PRIME32_2;
|
560
522
|
state.v3 = seed + 0;
|
561
523
|
state.v4 = seed - PRIME32_1;
|
562
|
-
|
524
|
+
ZSTD_memcpy(statePtr, &state, sizeof(state));
|
563
525
|
return XXH_OK;
|
564
526
|
}
|
565
527
|
|
@@ -567,12 +529,12 @@ XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int s
|
|
567
529
|
XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
|
568
530
|
{
|
569
531
|
XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
|
570
|
-
|
532
|
+
ZSTD_memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */
|
571
533
|
state.v1 = seed + PRIME64_1 + PRIME64_2;
|
572
534
|
state.v2 = seed + PRIME64_2;
|
573
535
|
state.v3 = seed + 0;
|
574
536
|
state.v4 = seed - PRIME64_1;
|
575
|
-
|
537
|
+
ZSTD_memcpy(statePtr, &state, sizeof(state));
|
576
538
|
return XXH_OK;
|
577
539
|
}
|
578
540
|
|
@@ -843,14 +805,14 @@ XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t
|
|
843
805
|
{
|
844
806
|
XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
|
845
807
|
if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
|
846
|
-
|
808
|
+
ZSTD_memcpy(dst, &hash, sizeof(*dst));
|
847
809
|
}
|
848
810
|
|
849
811
|
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
|
850
812
|
{
|
851
813
|
XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
|
852
814
|
if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
|
853
|
-
|
815
|
+
ZSTD_memcpy(dst, &hash, sizeof(*dst));
|
854
816
|
}
|
855
817
|
|
856
818
|
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
|