zstdlib 0.7.0-x64-mingw32 → 0.8.0-x64-mingw32
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGES.md +5 -0
- data/ext/zstdlib/extconf.rb +1 -1
- data/ext/zstdlib/ruby/zlib-3.0/zstdlib.c +4994 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/bitstream.h +25 -16
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/compiler.h +118 -4
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/cpu.h +1 -3
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/debug.c +1 -1
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/debug.h +12 -19
- data/ext/zstdlib/zstd-1.5.0/lib/common/entropy_common.c +362 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/error_private.c +2 -1
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/error_private.h +3 -3
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/fse.h +40 -12
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/fse_decompress.c +139 -22
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/huf.h +29 -7
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/mem.h +69 -98
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/pool.c +23 -17
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/pool.h +2 -2
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/threading.c +6 -5
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/threading.h +0 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/xxhash.c +20 -60
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/xxhash.h +2 -2
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/zstd_common.c +10 -10
- data/ext/zstdlib/zstd-1.5.0/lib/common/zstd_deps.h +111 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/zstd_internal.h +105 -62
- data/ext/zstdlib/zstd-1.5.0/lib/common/zstd_trace.h +154 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/fse_compress.c +31 -24
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/hist.c +27 -29
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/hist.h +2 -2
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/huf_compress.c +265 -126
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress.c +2843 -728
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_internal.h +305 -63
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_literals.c +8 -8
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_literals.h +1 -1
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_sequences.c +29 -7
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_sequences.h +1 -1
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_superblock.c +22 -295
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_superblock.h +1 -1
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_cwksp.h +204 -67
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_double_fast.c +25 -25
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_double_fast.h +1 -1
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_fast.c +23 -23
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_fast.h +1 -1
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_lazy.c +2184 -0
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_lazy.h +125 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_ldm.c +314 -211
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_ldm.h +9 -2
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_ldm_geartab.h +103 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_opt.c +191 -46
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_opt.h +1 -1
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstdmt_compress.c +93 -415
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstdmt_compress.h +110 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/huf_decompress.c +342 -239
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_ddict.c +9 -9
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_ddict.h +2 -2
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_decompress.c +369 -87
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_decompress_block.c +191 -75
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_decompress_block.h +6 -3
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_decompress_internal.h +27 -11
- data/ext/zstdlib/zstd-1.5.0/lib/zdict.h +452 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/zstd.h +568 -126
- data/ext/zstdlib/{zstd-1.4.5/lib/common → zstd-1.5.0/lib}/zstd_errors.h +2 -1
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzclose.c +0 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzcompatibility.h +1 -1
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzguts.h +0 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzlib.c +0 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzread.c +0 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzwrite.c +0 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/zstd_zlibwrapper.c +126 -44
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/zstd_zlibwrapper.h +1 -1
- data/lib/2.2/zstdlib.so +0 -0
- data/lib/2.3/zstdlib.so +0 -0
- data/lib/2.4/zstdlib.so +0 -0
- data/lib/2.5/zstdlib.so +0 -0
- data/lib/2.6/zstdlib.so +0 -0
- data/lib/2.7/zstdlib.so +0 -0
- metadata +69 -64
- data/ext/zstdlib/zstd-1.4.5/lib/common/entropy_common.c +0 -216
- data/ext/zstdlib/zstd-1.4.5/lib/compress/zstd_lazy.c +0 -1138
- data/ext/zstdlib/zstd-1.4.5/lib/compress/zstd_lazy.h +0 -67
- data/ext/zstdlib/zstd-1.4.5/lib/compress/zstdmt_compress.h +0 -192
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Yann Collet, Facebook, Inc.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -18,8 +18,10 @@ extern "C" {
|
|
18
18
|
/*-****************************************
|
19
19
|
* Dependencies
|
20
20
|
******************************************/
|
21
|
-
#include <stddef.h>
|
22
|
-
#include
|
21
|
+
#include <stddef.h> /* size_t, ptrdiff_t */
|
22
|
+
#include "compiler.h" /* __has_builtin */
|
23
|
+
#include "debug.h" /* DEBUG_STATIC_ASSERT */
|
24
|
+
#include "zstd_deps.h" /* ZSTD_memcpy */
|
23
25
|
|
24
26
|
|
25
27
|
/*-****************************************
|
@@ -39,93 +41,15 @@ extern "C" {
|
|
39
41
|
# define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
|
40
42
|
#endif
|
41
43
|
|
42
|
-
#ifndef __has_builtin
|
43
|
-
# define __has_builtin(x) 0 /* compat. with non-clang compilers */
|
44
|
-
#endif
|
45
|
-
|
46
|
-
/* code only tested on 32 and 64 bits systems */
|
47
|
-
#define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
|
48
|
-
MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
|
49
|
-
|
50
|
-
/* detects whether we are being compiled under msan */
|
51
|
-
#if defined (__has_feature)
|
52
|
-
# if __has_feature(memory_sanitizer)
|
53
|
-
# define MEMORY_SANITIZER 1
|
54
|
-
# endif
|
55
|
-
#endif
|
56
|
-
|
57
|
-
#if defined (MEMORY_SANITIZER)
|
58
|
-
/* Not all platforms that support msan provide sanitizers/msan_interface.h.
|
59
|
-
* We therefore declare the functions we need ourselves, rather than trying to
|
60
|
-
* include the header file... */
|
61
|
-
|
62
|
-
#include <stdint.h> /* intptr_t */
|
63
|
-
|
64
|
-
/* Make memory region fully initialized (without changing its contents). */
|
65
|
-
void __msan_unpoison(const volatile void *a, size_t size);
|
66
|
-
|
67
|
-
/* Make memory region fully uninitialized (without changing its contents).
|
68
|
-
This is a legacy interface that does not update origin information. Use
|
69
|
-
__msan_allocated_memory() instead. */
|
70
|
-
void __msan_poison(const volatile void *a, size_t size);
|
71
|
-
|
72
|
-
/* Returns the offset of the first (at least partially) poisoned byte in the
|
73
|
-
memory range, or -1 if the whole range is good. */
|
74
|
-
intptr_t __msan_test_shadow(const volatile void *x, size_t size);
|
75
|
-
#endif
|
76
|
-
|
77
|
-
/* detects whether we are being compiled under asan */
|
78
|
-
#if defined (__has_feature)
|
79
|
-
# if __has_feature(address_sanitizer)
|
80
|
-
# define ADDRESS_SANITIZER 1
|
81
|
-
# endif
|
82
|
-
#elif defined(__SANITIZE_ADDRESS__)
|
83
|
-
# define ADDRESS_SANITIZER 1
|
84
|
-
#endif
|
85
|
-
|
86
|
-
#if defined (ADDRESS_SANITIZER)
|
87
|
-
/* Not all platforms that support asan provide sanitizers/asan_interface.h.
|
88
|
-
* We therefore declare the functions we need ourselves, rather than trying to
|
89
|
-
* include the header file... */
|
90
|
-
|
91
|
-
/**
|
92
|
-
* Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.
|
93
|
-
*
|
94
|
-
* This memory must be previously allocated by your program. Instrumented
|
95
|
-
* code is forbidden from accessing addresses in this region until it is
|
96
|
-
* unpoisoned. This function is not guaranteed to poison the entire region -
|
97
|
-
* it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan
|
98
|
-
* alignment restrictions.
|
99
|
-
*
|
100
|
-
* \note This function is not thread-safe because no two threads can poison or
|
101
|
-
* unpoison memory in the same memory region simultaneously.
|
102
|
-
*
|
103
|
-
* \param addr Start of memory region.
|
104
|
-
* \param size Size of memory region. */
|
105
|
-
void __asan_poison_memory_region(void const volatile *addr, size_t size);
|
106
|
-
|
107
|
-
/**
|
108
|
-
* Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
|
109
|
-
*
|
110
|
-
* This memory must be previously allocated by your program. Accessing
|
111
|
-
* addresses in this region is allowed until this region is poisoned again.
|
112
|
-
* This function could unpoison a super-region of <c>[addr, addr+size)</c> due
|
113
|
-
* to ASan alignment restrictions.
|
114
|
-
*
|
115
|
-
* \note This function is not thread-safe because no two threads can
|
116
|
-
* poison or unpoison memory in the same memory region simultaneously.
|
117
|
-
*
|
118
|
-
* \param addr Start of memory region.
|
119
|
-
* \param size Size of memory region. */
|
120
|
-
void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
|
121
|
-
#endif
|
122
|
-
|
123
|
-
|
124
44
|
/*-**************************************************************
|
125
45
|
* Basic Types
|
126
46
|
*****************************************************************/
|
127
47
|
#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
|
128
|
-
#
|
48
|
+
# if defined(_AIX)
|
49
|
+
# include <inttypes.h>
|
50
|
+
# else
|
51
|
+
# include <stdint.h> /* intptr_t */
|
52
|
+
# endif
|
129
53
|
typedef uint8_t BYTE;
|
130
54
|
typedef uint16_t U16;
|
131
55
|
typedef int16_t S16;
|
@@ -157,7 +81,53 @@ void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
|
|
157
81
|
|
158
82
|
|
159
83
|
/*-**************************************************************
|
160
|
-
* Memory I/O
|
84
|
+
* Memory I/O API
|
85
|
+
*****************************************************************/
|
86
|
+
/*=== Static platform detection ===*/
|
87
|
+
MEM_STATIC unsigned MEM_32bits(void);
|
88
|
+
MEM_STATIC unsigned MEM_64bits(void);
|
89
|
+
MEM_STATIC unsigned MEM_isLittleEndian(void);
|
90
|
+
|
91
|
+
/*=== Native unaligned read/write ===*/
|
92
|
+
MEM_STATIC U16 MEM_read16(const void* memPtr);
|
93
|
+
MEM_STATIC U32 MEM_read32(const void* memPtr);
|
94
|
+
MEM_STATIC U64 MEM_read64(const void* memPtr);
|
95
|
+
MEM_STATIC size_t MEM_readST(const void* memPtr);
|
96
|
+
|
97
|
+
MEM_STATIC void MEM_write16(void* memPtr, U16 value);
|
98
|
+
MEM_STATIC void MEM_write32(void* memPtr, U32 value);
|
99
|
+
MEM_STATIC void MEM_write64(void* memPtr, U64 value);
|
100
|
+
|
101
|
+
/*=== Little endian unaligned read/write ===*/
|
102
|
+
MEM_STATIC U16 MEM_readLE16(const void* memPtr);
|
103
|
+
MEM_STATIC U32 MEM_readLE24(const void* memPtr);
|
104
|
+
MEM_STATIC U32 MEM_readLE32(const void* memPtr);
|
105
|
+
MEM_STATIC U64 MEM_readLE64(const void* memPtr);
|
106
|
+
MEM_STATIC size_t MEM_readLEST(const void* memPtr);
|
107
|
+
|
108
|
+
MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val);
|
109
|
+
MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val);
|
110
|
+
MEM_STATIC void MEM_writeLE32(void* memPtr, U32 val32);
|
111
|
+
MEM_STATIC void MEM_writeLE64(void* memPtr, U64 val64);
|
112
|
+
MEM_STATIC void MEM_writeLEST(void* memPtr, size_t val);
|
113
|
+
|
114
|
+
/*=== Big endian unaligned read/write ===*/
|
115
|
+
MEM_STATIC U32 MEM_readBE32(const void* memPtr);
|
116
|
+
MEM_STATIC U64 MEM_readBE64(const void* memPtr);
|
117
|
+
MEM_STATIC size_t MEM_readBEST(const void* memPtr);
|
118
|
+
|
119
|
+
MEM_STATIC void MEM_writeBE32(void* memPtr, U32 val32);
|
120
|
+
MEM_STATIC void MEM_writeBE64(void* memPtr, U64 val64);
|
121
|
+
MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val);
|
122
|
+
|
123
|
+
/*=== Byteswap ===*/
|
124
|
+
MEM_STATIC U32 MEM_swap32(U32 in);
|
125
|
+
MEM_STATIC U64 MEM_swap64(U64 in);
|
126
|
+
MEM_STATIC size_t MEM_swapST(size_t in);
|
127
|
+
|
128
|
+
|
129
|
+
/*-**************************************************************
|
130
|
+
* Memory I/O Implementation
|
161
131
|
*****************************************************************/
|
162
132
|
/* MEM_FORCE_MEMORY_ACCESS :
|
163
133
|
* By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
|
@@ -173,9 +143,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
|
|
173
143
|
* Prefer these methods in priority order (0 > 1 > 2)
|
174
144
|
*/
|
175
145
|
#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
|
176
|
-
# if defined(
|
177
|
-
# define MEM_FORCE_MEMORY_ACCESS 2
|
178
|
-
# elif defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__)
|
146
|
+
# if defined(__INTEL_COMPILER) || defined(__GNUC__) || defined(__ICCARM__)
|
179
147
|
# define MEM_FORCE_MEMORY_ACCESS 1
|
180
148
|
# endif
|
181
149
|
#endif
|
@@ -236,37 +204,37 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value) { ((unalign64*)memPtr)->v =
|
|
236
204
|
|
237
205
|
MEM_STATIC U16 MEM_read16(const void* memPtr)
|
238
206
|
{
|
239
|
-
U16 val;
|
207
|
+
U16 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
|
240
208
|
}
|
241
209
|
|
242
210
|
MEM_STATIC U32 MEM_read32(const void* memPtr)
|
243
211
|
{
|
244
|
-
U32 val;
|
212
|
+
U32 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
|
245
213
|
}
|
246
214
|
|
247
215
|
MEM_STATIC U64 MEM_read64(const void* memPtr)
|
248
216
|
{
|
249
|
-
U64 val;
|
217
|
+
U64 val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
|
250
218
|
}
|
251
219
|
|
252
220
|
MEM_STATIC size_t MEM_readST(const void* memPtr)
|
253
221
|
{
|
254
|
-
size_t val;
|
222
|
+
size_t val; ZSTD_memcpy(&val, memPtr, sizeof(val)); return val;
|
255
223
|
}
|
256
224
|
|
257
225
|
MEM_STATIC void MEM_write16(void* memPtr, U16 value)
|
258
226
|
{
|
259
|
-
|
227
|
+
ZSTD_memcpy(memPtr, &value, sizeof(value));
|
260
228
|
}
|
261
229
|
|
262
230
|
MEM_STATIC void MEM_write32(void* memPtr, U32 value)
|
263
231
|
{
|
264
|
-
|
232
|
+
ZSTD_memcpy(memPtr, &value, sizeof(value));
|
265
233
|
}
|
266
234
|
|
267
235
|
MEM_STATIC void MEM_write64(void* memPtr, U64 value)
|
268
236
|
{
|
269
|
-
|
237
|
+
ZSTD_memcpy(memPtr, &value, sizeof(value));
|
270
238
|
}
|
271
239
|
|
272
240
|
#endif /* MEM_FORCE_MEMORY_ACCESS */
|
@@ -338,7 +306,7 @@ MEM_STATIC void MEM_writeLE16(void* memPtr, U16 val)
|
|
338
306
|
|
339
307
|
MEM_STATIC U32 MEM_readLE24(const void* memPtr)
|
340
308
|
{
|
341
|
-
return MEM_readLE16(memPtr) + (((const BYTE*)memPtr)[2] << 16);
|
309
|
+
return (U32)MEM_readLE16(memPtr) + ((U32)(((const BYTE*)memPtr)[2]) << 16);
|
342
310
|
}
|
343
311
|
|
344
312
|
MEM_STATIC void MEM_writeLE24(void* memPtr, U32 val)
|
@@ -445,6 +413,9 @@ MEM_STATIC void MEM_writeBEST(void* memPtr, size_t val)
|
|
445
413
|
MEM_writeBE64(memPtr, (U64)val);
|
446
414
|
}
|
447
415
|
|
416
|
+
/* code only tested on 32 and 64 bits systems */
|
417
|
+
MEM_STATIC void MEM_check(void) { DEBUG_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
|
418
|
+
|
448
419
|
|
449
420
|
#if defined (__cplusplus)
|
450
421
|
}
|
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Yann Collet, Facebook, Inc.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -10,9 +10,9 @@
|
|
10
10
|
|
11
11
|
|
12
12
|
/* ====== Dependencies ======= */
|
13
|
-
#include
|
13
|
+
#include "zstd_deps.h" /* size_t */
|
14
14
|
#include "debug.h" /* assert */
|
15
|
-
#include "zstd_internal.h" /*
|
15
|
+
#include "zstd_internal.h" /* ZSTD_customMalloc, ZSTD_customFree */
|
16
16
|
#include "pool.h"
|
17
17
|
|
18
18
|
/* ====== Compiler specifics ====== */
|
@@ -105,6 +105,10 @@ static void* POOL_thread(void* opaque) {
|
|
105
105
|
assert(0); /* Unreachable */
|
106
106
|
}
|
107
107
|
|
108
|
+
POOL_ctx* ZSTD_createThreadPool(size_t numThreads) {
|
109
|
+
return POOL_create (numThreads, 0);
|
110
|
+
}
|
111
|
+
|
108
112
|
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
|
109
113
|
return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
|
110
114
|
}
|
@@ -115,14 +119,14 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
|
|
115
119
|
/* Check parameters */
|
116
120
|
if (!numThreads) { return NULL; }
|
117
121
|
/* Allocate the context and zero initialize */
|
118
|
-
ctx = (POOL_ctx*)
|
122
|
+
ctx = (POOL_ctx*)ZSTD_customCalloc(sizeof(POOL_ctx), customMem);
|
119
123
|
if (!ctx) { return NULL; }
|
120
124
|
/* Initialize the job queue.
|
121
125
|
* It needs one extra space since one space is wasted to differentiate
|
122
126
|
* empty and full queues.
|
123
127
|
*/
|
124
128
|
ctx->queueSize = queueSize + 1;
|
125
|
-
ctx->queue = (POOL_job*)
|
129
|
+
ctx->queue = (POOL_job*)ZSTD_customMalloc(ctx->queueSize * sizeof(POOL_job), customMem);
|
126
130
|
ctx->queueHead = 0;
|
127
131
|
ctx->queueTail = 0;
|
128
132
|
ctx->numThreadsBusy = 0;
|
@@ -136,7 +140,7 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
|
|
136
140
|
}
|
137
141
|
ctx->shutdown = 0;
|
138
142
|
/* Allocate space for the thread handles */
|
139
|
-
ctx->threads = (ZSTD_pthread_t*)
|
143
|
+
ctx->threads = (ZSTD_pthread_t*)ZSTD_customMalloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
|
140
144
|
ctx->threadCapacity = 0;
|
141
145
|
ctx->customMem = customMem;
|
142
146
|
/* Check for errors */
|
@@ -179,12 +183,14 @@ void POOL_free(POOL_ctx *ctx) {
|
|
179
183
|
ZSTD_pthread_mutex_destroy(&ctx->queueMutex);
|
180
184
|
ZSTD_pthread_cond_destroy(&ctx->queuePushCond);
|
181
185
|
ZSTD_pthread_cond_destroy(&ctx->queuePopCond);
|
182
|
-
|
183
|
-
|
184
|
-
|
186
|
+
ZSTD_customFree(ctx->queue, ctx->customMem);
|
187
|
+
ZSTD_customFree(ctx->threads, ctx->customMem);
|
188
|
+
ZSTD_customFree(ctx, ctx->customMem);
|
185
189
|
}
|
186
190
|
|
187
|
-
|
191
|
+
void ZSTD_freeThreadPool (ZSTD_threadPool* pool) {
|
192
|
+
POOL_free (pool);
|
193
|
+
}
|
188
194
|
|
189
195
|
size_t POOL_sizeof(POOL_ctx *ctx) {
|
190
196
|
if (ctx==NULL) return 0; /* supports sizeof NULL */
|
@@ -203,11 +209,11 @@ static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads)
|
|
203
209
|
return 0;
|
204
210
|
}
|
205
211
|
/* numThreads > threadCapacity */
|
206
|
-
{ ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)
|
212
|
+
{ ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customMalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
|
207
213
|
if (!threadPool) return 1;
|
208
214
|
/* replace existing thread pool */
|
209
|
-
|
210
|
-
|
215
|
+
ZSTD_memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool));
|
216
|
+
ZSTD_customFree(ctx->threads, ctx->customMem);
|
211
217
|
ctx->threads = threadPool;
|
212
218
|
/* Initialize additional threads */
|
213
219
|
{ size_t threadId;
|
@@ -301,7 +307,7 @@ int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque)
|
|
301
307
|
struct POOL_ctx_s {
|
302
308
|
int dummy;
|
303
309
|
};
|
304
|
-
static POOL_ctx
|
310
|
+
static POOL_ctx g_poolCtx;
|
305
311
|
|
306
312
|
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
|
307
313
|
return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
|
@@ -311,11 +317,11 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customM
|
|
311
317
|
(void)numThreads;
|
312
318
|
(void)queueSize;
|
313
319
|
(void)customMem;
|
314
|
-
return &
|
320
|
+
return &g_poolCtx;
|
315
321
|
}
|
316
322
|
|
317
323
|
void POOL_free(POOL_ctx* ctx) {
|
318
|
-
assert(!ctx || ctx == &
|
324
|
+
assert(!ctx || ctx == &g_poolCtx);
|
319
325
|
(void)ctx;
|
320
326
|
}
|
321
327
|
|
@@ -337,7 +343,7 @@ int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) {
|
|
337
343
|
|
338
344
|
size_t POOL_sizeof(POOL_ctx* ctx) {
|
339
345
|
if (ctx==NULL) return 0; /* supports sizeof NULL */
|
340
|
-
assert(ctx == &
|
346
|
+
assert(ctx == &g_poolCtx);
|
341
347
|
return sizeof(*ctx);
|
342
348
|
}
|
343
349
|
|
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Yann Collet, Facebook, Inc.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -16,7 +16,7 @@ extern "C" {
|
|
16
16
|
#endif
|
17
17
|
|
18
18
|
|
19
|
-
#include
|
19
|
+
#include "zstd_deps.h"
|
20
20
|
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */
|
21
21
|
#include "../zstd.h"
|
22
22
|
|
@@ -78,11 +78,12 @@ int ZSTD_pthread_join(ZSTD_pthread_t thread, void **value_ptr)
|
|
78
78
|
|
79
79
|
#if defined(ZSTD_MULTITHREAD) && DEBUGLEVEL >= 1 && !defined(_WIN32)
|
80
80
|
|
81
|
-
#
|
81
|
+
#define ZSTD_DEPS_NEED_MALLOC
|
82
|
+
#include "zstd_deps.h"
|
82
83
|
|
83
84
|
int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr)
|
84
85
|
{
|
85
|
-
*mutex = (pthread_mutex_t*)
|
86
|
+
*mutex = (pthread_mutex_t*)ZSTD_malloc(sizeof(pthread_mutex_t));
|
86
87
|
if (!*mutex)
|
87
88
|
return 1;
|
88
89
|
return pthread_mutex_init(*mutex, attr);
|
@@ -94,14 +95,14 @@ int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex)
|
|
94
95
|
return 0;
|
95
96
|
{
|
96
97
|
int const ret = pthread_mutex_destroy(*mutex);
|
97
|
-
|
98
|
+
ZSTD_free(*mutex);
|
98
99
|
return ret;
|
99
100
|
}
|
100
101
|
}
|
101
102
|
|
102
103
|
int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr)
|
103
104
|
{
|
104
|
-
*cond = (pthread_cond_t*)
|
105
|
+
*cond = (pthread_cond_t*)ZSTD_malloc(sizeof(pthread_cond_t));
|
105
106
|
if (!*cond)
|
106
107
|
return 1;
|
107
108
|
return pthread_cond_init(*cond, attr);
|
@@ -113,7 +114,7 @@ int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond)
|
|
113
114
|
return 0;
|
114
115
|
{
|
115
116
|
int const ret = pthread_cond_destroy(*cond);
|
116
|
-
|
117
|
+
ZSTD_free(*cond);
|
117
118
|
return ret;
|
118
119
|
}
|
119
120
|
}
|
File without changes
|
@@ -1,6 +1,6 @@
|
|
1
1
|
/*
|
2
2
|
* xxHash - Fast Hash algorithm
|
3
|
-
* Copyright (c)
|
3
|
+
* Copyright (c) Yann Collet, Facebook, Inc.
|
4
4
|
*
|
5
5
|
* You can contact the author at :
|
6
6
|
* - xxHash homepage: http://www.xxhash.com
|
@@ -30,9 +30,7 @@
|
|
30
30
|
* Prefer these methods in priority order (0 > 1 > 2)
|
31
31
|
*/
|
32
32
|
#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
|
33
|
-
# if defined(
|
34
|
-
# define XXH_FORCE_MEMORY_ACCESS 2
|
35
|
-
# elif (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
|
33
|
+
# if (defined(__INTEL_COMPILER) && !defined(WIN32)) || \
|
36
34
|
(defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) )) || \
|
37
35
|
defined(__ICCARM__)
|
38
36
|
# define XXH_FORCE_MEMORY_ACCESS 1
|
@@ -77,14 +75,12 @@
|
|
77
75
|
* Includes & Memory related functions
|
78
76
|
***************************************/
|
79
77
|
/* Modify the local functions below should you wish to use some other memory routines */
|
80
|
-
/* for
|
81
|
-
#
|
82
|
-
#include
|
83
|
-
static void* XXH_malloc(size_t s) { return
|
84
|
-
static void XXH_free (void* p) {
|
85
|
-
|
86
|
-
#include <string.h>
|
87
|
-
static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
|
78
|
+
/* for ZSTD_malloc(), ZSTD_free() */
|
79
|
+
#define ZSTD_DEPS_NEED_MALLOC
|
80
|
+
#include "zstd_deps.h" /* size_t, ZSTD_malloc, ZSTD_free, ZSTD_memcpy */
|
81
|
+
static void* XXH_malloc(size_t s) { return ZSTD_malloc(s); }
|
82
|
+
static void XXH_free (void* p) { ZSTD_free(p); }
|
83
|
+
static void* XXH_memcpy(void* dest, const void* src, size_t size) { return ZSTD_memcpy(dest,src,size); }
|
88
84
|
|
89
85
|
#ifndef XXH_STATIC_LINKING_ONLY
|
90
86
|
# define XXH_STATIC_LINKING_ONLY
|
@@ -95,49 +91,13 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcp
|
|
95
91
|
/* *************************************
|
96
92
|
* Compiler Specific Options
|
97
93
|
***************************************/
|
98
|
-
#
|
99
|
-
# define INLINE_KEYWORD inline
|
100
|
-
#else
|
101
|
-
# define INLINE_KEYWORD
|
102
|
-
#endif
|
103
|
-
|
104
|
-
#if defined(__GNUC__) || defined(__ICCARM__)
|
105
|
-
# define FORCE_INLINE_ATTR __attribute__((always_inline))
|
106
|
-
#elif defined(_MSC_VER)
|
107
|
-
# define FORCE_INLINE_ATTR __forceinline
|
108
|
-
#else
|
109
|
-
# define FORCE_INLINE_ATTR
|
110
|
-
#endif
|
111
|
-
|
112
|
-
#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
|
113
|
-
|
114
|
-
|
115
|
-
#ifdef _MSC_VER
|
116
|
-
# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
|
117
|
-
#endif
|
94
|
+
#include "compiler.h"
|
118
95
|
|
119
96
|
|
120
97
|
/* *************************************
|
121
98
|
* Basic Types
|
122
99
|
***************************************/
|
123
|
-
#
|
124
|
-
# define MEM_MODULE
|
125
|
-
# if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
|
126
|
-
# include <stdint.h>
|
127
|
-
typedef uint8_t BYTE;
|
128
|
-
typedef uint16_t U16;
|
129
|
-
typedef uint32_t U32;
|
130
|
-
typedef int32_t S32;
|
131
|
-
typedef uint64_t U64;
|
132
|
-
# else
|
133
|
-
typedef unsigned char BYTE;
|
134
|
-
typedef unsigned short U16;
|
135
|
-
typedef unsigned int U32;
|
136
|
-
typedef signed int S32;
|
137
|
-
typedef unsigned long long U64; /* if your compiler doesn't support unsigned long long, replace by another 64-bit type here. Note that xxhash.h will also need to be updated. */
|
138
|
-
# endif
|
139
|
-
#endif
|
140
|
-
|
100
|
+
#include "mem.h" /* BYTE, U32, U64, size_t */
|
141
101
|
|
142
102
|
#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
|
143
103
|
|
@@ -163,14 +123,14 @@ static U64 XXH_read64(const void* ptr) { return ((const unalign*)ptr)->u64; }
|
|
163
123
|
static U32 XXH_read32(const void* memPtr)
|
164
124
|
{
|
165
125
|
U32 val;
|
166
|
-
|
126
|
+
ZSTD_memcpy(&val, memPtr, sizeof(val));
|
167
127
|
return val;
|
168
128
|
}
|
169
129
|
|
170
130
|
static U64 XXH_read64(const void* memPtr)
|
171
131
|
{
|
172
132
|
U64 val;
|
173
|
-
|
133
|
+
ZSTD_memcpy(&val, memPtr, sizeof(val));
|
174
134
|
return val;
|
175
135
|
}
|
176
136
|
|
@@ -307,12 +267,12 @@ XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
|
|
307
267
|
****************************/
|
308
268
|
XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* restrict dstState, const XXH32_state_t* restrict srcState)
|
309
269
|
{
|
310
|
-
|
270
|
+
ZSTD_memcpy(dstState, srcState, sizeof(*dstState));
|
311
271
|
}
|
312
272
|
|
313
273
|
XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* restrict dstState, const XXH64_state_t* restrict srcState)
|
314
274
|
{
|
315
|
-
|
275
|
+
ZSTD_memcpy(dstState, srcState, sizeof(*dstState));
|
316
276
|
}
|
317
277
|
|
318
278
|
|
@@ -554,12 +514,12 @@ XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
|
|
554
514
|
XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int seed)
|
555
515
|
{
|
556
516
|
XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
|
557
|
-
|
517
|
+
ZSTD_memset(&state, 0, sizeof(state)-4); /* do not write into reserved, for future removal */
|
558
518
|
state.v1 = seed + PRIME32_1 + PRIME32_2;
|
559
519
|
state.v2 = seed + PRIME32_2;
|
560
520
|
state.v3 = seed + 0;
|
561
521
|
state.v4 = seed - PRIME32_1;
|
562
|
-
|
522
|
+
ZSTD_memcpy(statePtr, &state, sizeof(state));
|
563
523
|
return XXH_OK;
|
564
524
|
}
|
565
525
|
|
@@ -567,12 +527,12 @@ XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, unsigned int s
|
|
567
527
|
XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, unsigned long long seed)
|
568
528
|
{
|
569
529
|
XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
|
570
|
-
|
530
|
+
ZSTD_memset(&state, 0, sizeof(state)-8); /* do not write into reserved, for future removal */
|
571
531
|
state.v1 = seed + PRIME64_1 + PRIME64_2;
|
572
532
|
state.v2 = seed + PRIME64_2;
|
573
533
|
state.v3 = seed + 0;
|
574
534
|
state.v4 = seed - PRIME64_1;
|
575
|
-
|
535
|
+
ZSTD_memcpy(statePtr, &state, sizeof(state));
|
576
536
|
return XXH_OK;
|
577
537
|
}
|
578
538
|
|
@@ -843,14 +803,14 @@ XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t
|
|
843
803
|
{
|
844
804
|
XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
|
845
805
|
if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
|
846
|
-
|
806
|
+
ZSTD_memcpy(dst, &hash, sizeof(*dst));
|
847
807
|
}
|
848
808
|
|
849
809
|
XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
|
850
810
|
{
|
851
811
|
XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
|
852
812
|
if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
|
853
|
-
|
813
|
+
ZSTD_memcpy(dst, &hash, sizeof(*dst));
|
854
814
|
}
|
855
815
|
|
856
816
|
XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
|