zstdlib 0.10.0-arm64-darwin → 0.11.0-arm64-darwin
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGES.md +8 -0
- data/ext/zstdlib_c/extconf.rb +2 -2
- data/ext/zstdlib_c/ruby/zlib-3.2/zstdlib.c +5090 -0
- data/ext/zstdlib_c/zstd-1.5.5/lib/common/allocations.h +55 -0
- data/ext/zstdlib_c/zstd-1.5.5/lib/common/bits.h +200 -0
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/bitstream.h +19 -60
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/compiler.h +26 -3
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/cpu.h +1 -1
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/debug.c +1 -1
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/debug.h +1 -1
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/entropy_common.c +12 -40
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/error_private.c +9 -2
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/error_private.h +1 -1
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/fse.h +5 -83
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/fse_decompress.c +7 -99
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/huf.h +65 -156
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/mem.h +39 -46
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/pool.c +26 -10
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/pool.h +7 -1
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/portability_macros.h +22 -3
- data/ext/zstdlib_c/zstd-1.5.5/lib/common/threading.c +176 -0
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/threading.h +5 -10
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/xxhash.c +2 -2
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/xxhash.h +8 -8
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/zstd_common.c +1 -36
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/zstd_deps.h +1 -1
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/zstd_internal.h +17 -118
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/common/zstd_trace.h +3 -3
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/clevels.h +1 -1
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/fse_compress.c +7 -124
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/hist.c +1 -1
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/hist.h +1 -1
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/huf_compress.c +234 -169
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_compress.c +1243 -538
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_compress_internal.h +225 -151
- data/ext/zstdlib_c/zstd-1.5.5/lib/compress/zstd_compress_literals.c +235 -0
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_compress_literals.h +16 -8
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_compress_sequences.c +3 -3
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_compress_sequences.h +1 -1
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_compress_superblock.c +25 -21
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_compress_superblock.h +1 -1
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_cwksp.h +128 -62
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_double_fast.c +95 -33
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_double_fast.h +3 -2
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_fast.c +433 -148
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_fast.h +3 -2
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_lazy.c +398 -345
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_lazy.h +4 -2
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_ldm.c +5 -5
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_ldm.h +1 -1
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_ldm_geartab.h +1 -1
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_opt.c +106 -80
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstd_opt.h +1 -1
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstdmt_compress.c +17 -9
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/compress/zstdmt_compress.h +1 -1
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/decompress/huf_decompress.c +434 -441
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/decompress/huf_decompress_amd64.S +30 -39
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/decompress/zstd_ddict.c +4 -4
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/decompress/zstd_ddict.h +1 -1
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/decompress/zstd_decompress.c +205 -80
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/decompress/zstd_decompress_block.c +201 -81
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/decompress/zstd_decompress_block.h +6 -1
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/decompress/zstd_decompress_internal.h +4 -2
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/zdict.h +53 -31
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/zstd.h +580 -135
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/lib/zstd_errors.h +27 -8
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/zlibWrapper/gzclose.c +1 -1
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/zlibWrapper/gzcompatibility.h +8 -8
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/zlibWrapper/gzguts.h +10 -10
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/zlibWrapper/gzlib.c +3 -3
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/zlibWrapper/gzread.c +10 -10
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/zlibWrapper/gzwrite.c +5 -5
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/zlibWrapper/zstd_zlibwrapper.c +46 -44
- data/ext/zstdlib_c/{zstd-1.5.2 → zstd-1.5.5}/zlibWrapper/zstd_zlibwrapper.h +4 -1
- data/lib/2.4/zstdlib_c.bundle +0 -0
- data/lib/2.5/zstdlib_c.bundle +0 -0
- data/lib/2.6/zstdlib_c.bundle +0 -0
- data/lib/2.7/zstdlib_c.bundle +0 -0
- data/lib/3.0/zstdlib_c.bundle +0 -0
- data/lib/3.1/zstdlib_c.bundle +0 -0
- data/lib/3.2/zstdlib_c.bundle +0 -0
- metadata +82 -78
- data/ext/zstdlib_c/zstd-1.5.2/lib/common/threading.c +0 -122
- data/ext/zstdlib_c/zstd-1.5.2/lib/compress/zstd_compress_literals.c +0 -159
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -133,21 +133,15 @@ MEM_STATIC size_t MEM_swapST(size_t in);
|
|
133
133
|
/*-**************************************************************
|
134
134
|
* Memory I/O Implementation
|
135
135
|
*****************************************************************/
|
136
|
-
/* MEM_FORCE_MEMORY_ACCESS :
|
137
|
-
*
|
138
|
-
*
|
139
|
-
* The below switch allow to select different access method for improved performance.
|
140
|
-
* Method 0 (default) : use `memcpy()`. Safe and portable.
|
141
|
-
* Method 1 : `__packed` statement. It depends on compiler extension (i.e., not portable).
|
142
|
-
* This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
|
136
|
+
/* MEM_FORCE_MEMORY_ACCESS : For accessing unaligned memory:
|
137
|
+
* Method 0 : always use `memcpy()`. Safe and portable.
|
138
|
+
* Method 1 : Use compiler extension to set unaligned access.
|
143
139
|
* Method 2 : direct access. This method is portable but violate C standard.
|
144
140
|
* It can generate buggy code on targets depending on alignment.
|
145
|
-
*
|
146
|
-
* See http://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
|
147
|
-
* Prefer these methods in priority order (0 > 1 > 2)
|
141
|
+
* Default : method 1 if supported, else method 0
|
148
142
|
*/
|
149
143
|
#ifndef MEM_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
|
150
|
-
#
|
144
|
+
# ifdef __GNUC__
|
151
145
|
# define MEM_FORCE_MEMORY_ACCESS 1
|
152
146
|
# endif
|
153
147
|
#endif
|
@@ -190,30 +184,19 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(U64*)memPtr = value; }
|
|
190
184
|
|
191
185
|
#elif defined(MEM_FORCE_MEMORY_ACCESS) && (MEM_FORCE_MEMORY_ACCESS==1)
|
192
186
|
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
typedef struct { U16 v; } unalign16;
|
198
|
-
typedef struct { U32 v; } unalign32;
|
199
|
-
typedef struct { U64 v; } unalign64;
|
200
|
-
typedef struct { size_t v; } unalignArch;
|
201
|
-
__pragma( pack(pop) )
|
202
|
-
#else
|
203
|
-
typedef struct { U16 v; } __attribute__((packed)) unalign16;
|
204
|
-
typedef struct { U32 v; } __attribute__((packed)) unalign32;
|
205
|
-
typedef struct { U64 v; } __attribute__((packed)) unalign64;
|
206
|
-
typedef struct { size_t v; } __attribute__((packed)) unalignArch;
|
207
|
-
#endif
|
187
|
+
typedef __attribute__((aligned(1))) U16 unalign16;
|
188
|
+
typedef __attribute__((aligned(1))) U32 unalign32;
|
189
|
+
typedef __attribute__((aligned(1))) U64 unalign64;
|
190
|
+
typedef __attribute__((aligned(1))) size_t unalignArch;
|
208
191
|
|
209
|
-
MEM_STATIC U16 MEM_read16(const void* ptr) { return (
|
210
|
-
MEM_STATIC U32 MEM_read32(const void* ptr) { return (
|
211
|
-
MEM_STATIC U64 MEM_read64(const void* ptr) { return (
|
212
|
-
MEM_STATIC size_t MEM_readST(const void* ptr) { return (
|
192
|
+
MEM_STATIC U16 MEM_read16(const void* ptr) { return *(const unalign16*)ptr; }
|
193
|
+
MEM_STATIC U32 MEM_read32(const void* ptr) { return *(const unalign32*)ptr; }
|
194
|
+
MEM_STATIC U64 MEM_read64(const void* ptr) { return *(const unalign64*)ptr; }
|
195
|
+
MEM_STATIC size_t MEM_readST(const void* ptr) { return *(const unalignArch*)ptr; }
|
213
196
|
|
214
|
-
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { (
|
215
|
-
MEM_STATIC void MEM_write32(void* memPtr, U32 value) { (
|
216
|
-
MEM_STATIC void MEM_write64(void* memPtr, U64 value) { (
|
197
|
+
MEM_STATIC void MEM_write16(void* memPtr, U16 value) { *(unalign16*)memPtr = value; }
|
198
|
+
MEM_STATIC void MEM_write32(void* memPtr, U32 value) { *(unalign32*)memPtr = value; }
|
199
|
+
MEM_STATIC void MEM_write64(void* memPtr, U64 value) { *(unalign64*)memPtr = value; }
|
217
200
|
|
218
201
|
#else
|
219
202
|
|
@@ -257,6 +240,14 @@ MEM_STATIC void MEM_write64(void* memPtr, U64 value)
|
|
257
240
|
|
258
241
|
#endif /* MEM_FORCE_MEMORY_ACCESS */
|
259
242
|
|
243
|
+
MEM_STATIC U32 MEM_swap32_fallback(U32 in)
|
244
|
+
{
|
245
|
+
return ((in << 24) & 0xff000000 ) |
|
246
|
+
((in << 8) & 0x00ff0000 ) |
|
247
|
+
((in >> 8) & 0x0000ff00 ) |
|
248
|
+
((in >> 24) & 0x000000ff );
|
249
|
+
}
|
250
|
+
|
260
251
|
MEM_STATIC U32 MEM_swap32(U32 in)
|
261
252
|
{
|
262
253
|
#if defined(_MSC_VER) /* Visual Studio */
|
@@ -265,22 +256,13 @@ MEM_STATIC U32 MEM_swap32(U32 in)
|
|
265
256
|
|| (defined(__clang__) && __has_builtin(__builtin_bswap32))
|
266
257
|
return __builtin_bswap32(in);
|
267
258
|
#else
|
268
|
-
return
|
269
|
-
((in << 8) & 0x00ff0000 ) |
|
270
|
-
((in >> 8) & 0x0000ff00 ) |
|
271
|
-
((in >> 24) & 0x000000ff );
|
259
|
+
return MEM_swap32_fallback(in);
|
272
260
|
#endif
|
273
261
|
}
|
274
262
|
|
275
|
-
MEM_STATIC U64
|
263
|
+
MEM_STATIC U64 MEM_swap64_fallback(U64 in)
|
276
264
|
{
|
277
|
-
|
278
|
-
return _byteswap_uint64(in);
|
279
|
-
#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
|
280
|
-
|| (defined(__clang__) && __has_builtin(__builtin_bswap64))
|
281
|
-
return __builtin_bswap64(in);
|
282
|
-
#else
|
283
|
-
return ((in << 56) & 0xff00000000000000ULL) |
|
265
|
+
return ((in << 56) & 0xff00000000000000ULL) |
|
284
266
|
((in << 40) & 0x00ff000000000000ULL) |
|
285
267
|
((in << 24) & 0x0000ff0000000000ULL) |
|
286
268
|
((in << 8) & 0x000000ff00000000ULL) |
|
@@ -288,6 +270,17 @@ MEM_STATIC U64 MEM_swap64(U64 in)
|
|
288
270
|
((in >> 24) & 0x0000000000ff0000ULL) |
|
289
271
|
((in >> 40) & 0x000000000000ff00ULL) |
|
290
272
|
((in >> 56) & 0x00000000000000ffULL);
|
273
|
+
}
|
274
|
+
|
275
|
+
MEM_STATIC U64 MEM_swap64(U64 in)
|
276
|
+
{
|
277
|
+
#if defined(_MSC_VER) /* Visual Studio */
|
278
|
+
return _byteswap_uint64(in);
|
279
|
+
#elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
|
280
|
+
|| (defined(__clang__) && __has_builtin(__builtin_bswap64))
|
281
|
+
return __builtin_bswap64(in);
|
282
|
+
#else
|
283
|
+
return MEM_swap64_fallback(in);
|
291
284
|
#endif
|
292
285
|
}
|
293
286
|
|
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -10,9 +10,9 @@
|
|
10
10
|
|
11
11
|
|
12
12
|
/* ====== Dependencies ======= */
|
13
|
+
#include "../common/allocations.h" /* ZSTD_customCalloc, ZSTD_customFree */
|
13
14
|
#include "zstd_deps.h" /* size_t */
|
14
15
|
#include "debug.h" /* assert */
|
15
|
-
#include "zstd_internal.h" /* ZSTD_customMalloc, ZSTD_customFree */
|
16
16
|
#include "pool.h"
|
17
17
|
|
18
18
|
/* ====== Compiler specifics ====== */
|
@@ -96,9 +96,7 @@ static void* POOL_thread(void* opaque) {
|
|
96
96
|
/* If the intended queue size was 0, signal after finishing job */
|
97
97
|
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
98
98
|
ctx->numThreadsBusy--;
|
99
|
-
|
100
|
-
ZSTD_pthread_cond_signal(&ctx->queuePushCond);
|
101
|
-
}
|
99
|
+
ZSTD_pthread_cond_signal(&ctx->queuePushCond);
|
102
100
|
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
103
101
|
}
|
104
102
|
} /* for (;;) */
|
@@ -128,7 +126,7 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
|
|
128
126
|
* empty and full queues.
|
129
127
|
*/
|
130
128
|
ctx->queueSize = queueSize + 1;
|
131
|
-
ctx->queue = (POOL_job*)
|
129
|
+
ctx->queue = (POOL_job*)ZSTD_customCalloc(ctx->queueSize * sizeof(POOL_job), customMem);
|
132
130
|
ctx->queueHead = 0;
|
133
131
|
ctx->queueTail = 0;
|
134
132
|
ctx->numThreadsBusy = 0;
|
@@ -142,7 +140,7 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
|
|
142
140
|
}
|
143
141
|
ctx->shutdown = 0;
|
144
142
|
/* Allocate space for the thread handles */
|
145
|
-
ctx->threads = (ZSTD_pthread_t*)
|
143
|
+
ctx->threads = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
|
146
144
|
ctx->threadCapacity = 0;
|
147
145
|
ctx->customMem = customMem;
|
148
146
|
/* Check for errors */
|
@@ -175,7 +173,7 @@ static void POOL_join(POOL_ctx* ctx) {
|
|
175
173
|
/* Join all of the threads */
|
176
174
|
{ size_t i;
|
177
175
|
for (i = 0; i < ctx->threadCapacity; ++i) {
|
178
|
-
ZSTD_pthread_join(ctx->threads[i]
|
176
|
+
ZSTD_pthread_join(ctx->threads[i]); /* note : could fail */
|
179
177
|
} }
|
180
178
|
}
|
181
179
|
|
@@ -190,6 +188,17 @@ void POOL_free(POOL_ctx *ctx) {
|
|
190
188
|
ZSTD_customFree(ctx, ctx->customMem);
|
191
189
|
}
|
192
190
|
|
191
|
+
/*! POOL_joinJobs() :
|
192
|
+
* Waits for all queued jobs to finish executing.
|
193
|
+
*/
|
194
|
+
void POOL_joinJobs(POOL_ctx* ctx) {
|
195
|
+
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
196
|
+
while(!ctx->queueEmpty || ctx->numThreadsBusy > 0) {
|
197
|
+
ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
|
198
|
+
}
|
199
|
+
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
200
|
+
}
|
201
|
+
|
193
202
|
void ZSTD_freeThreadPool (ZSTD_threadPool* pool) {
|
194
203
|
POOL_free (pool);
|
195
204
|
}
|
@@ -211,7 +220,7 @@ static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads)
|
|
211
220
|
return 0;
|
212
221
|
}
|
213
222
|
/* numThreads > threadCapacity */
|
214
|
-
{ ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)
|
223
|
+
{ ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
|
215
224
|
if (!threadPool) return 1;
|
216
225
|
/* replace existing thread pool */
|
217
226
|
ZSTD_memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool));
|
@@ -262,7 +271,9 @@ static int isQueueFull(POOL_ctx const* ctx) {
|
|
262
271
|
static void
|
263
272
|
POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque)
|
264
273
|
{
|
265
|
-
POOL_job
|
274
|
+
POOL_job job;
|
275
|
+
job.function = function;
|
276
|
+
job.opaque = opaque;
|
266
277
|
assert(ctx != NULL);
|
267
278
|
if (ctx->shutdown) return;
|
268
279
|
|
@@ -330,6 +341,11 @@ void POOL_free(POOL_ctx* ctx) {
|
|
330
341
|
(void)ctx;
|
331
342
|
}
|
332
343
|
|
344
|
+
void POOL_joinJobs(POOL_ctx* ctx){
|
345
|
+
assert(!ctx || ctx == &g_poolCtx);
|
346
|
+
(void)ctx;
|
347
|
+
}
|
348
|
+
|
333
349
|
int POOL_resize(POOL_ctx* ctx, size_t numThreads) {
|
334
350
|
(void)ctx; (void)numThreads;
|
335
351
|
return 0;
|
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -38,6 +38,12 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
|
|
38
38
|
*/
|
39
39
|
void POOL_free(POOL_ctx* ctx);
|
40
40
|
|
41
|
+
|
42
|
+
/*! POOL_joinJobs() :
|
43
|
+
* Waits for all queued jobs to finish executing.
|
44
|
+
*/
|
45
|
+
void POOL_joinJobs(POOL_ctx* ctx);
|
46
|
+
|
41
47
|
/*! POOL_resize() :
|
42
48
|
* Expands or shrinks pool's number of threads.
|
43
49
|
* This is more efficient than releasing + creating a new context,
|
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -12,7 +12,7 @@
|
|
12
12
|
#define ZSTD_PORTABILITY_MACROS_H
|
13
13
|
|
14
14
|
/**
|
15
|
-
* This header file contains macro
|
15
|
+
* This header file contains macro definitions to support portability.
|
16
16
|
* This header is shared between C and ASM code, so it MUST only
|
17
17
|
* contain macro definitions. It MUST not contain any C code.
|
18
18
|
*
|
@@ -88,7 +88,7 @@
|
|
88
88
|
#endif
|
89
89
|
|
90
90
|
/**
|
91
|
-
* Only enable assembly for GNUC
|
91
|
+
* Only enable assembly for GNUC compatible compilers,
|
92
92
|
* because other platforms may not support GAS assembly syntax.
|
93
93
|
*
|
94
94
|
* Only enable assembly for Linux / MacOS, other platforms may
|
@@ -134,4 +134,23 @@
|
|
134
134
|
# define ZSTD_ENABLE_ASM_X86_64_BMI2 0
|
135
135
|
#endif
|
136
136
|
|
137
|
+
/*
|
138
|
+
* For x86 ELF targets, add .note.gnu.property section for Intel CET in
|
139
|
+
* assembly sources when CET is enabled.
|
140
|
+
*
|
141
|
+
* Additionally, any function that may be called indirectly must begin
|
142
|
+
* with ZSTD_CET_ENDBRANCH.
|
143
|
+
*/
|
144
|
+
#if defined(__ELF__) && (defined(__x86_64__) || defined(__i386__)) \
|
145
|
+
&& defined(__has_include)
|
146
|
+
# if __has_include(<cet.h>)
|
147
|
+
# include <cet.h>
|
148
|
+
# define ZSTD_CET_ENDBRANCH _CET_ENDBR
|
149
|
+
# endif
|
150
|
+
#endif
|
151
|
+
|
152
|
+
#ifndef ZSTD_CET_ENDBRANCH
|
153
|
+
# define ZSTD_CET_ENDBRANCH
|
154
|
+
#endif
|
155
|
+
|
137
156
|
#endif /* ZSTD_PORTABILITY_MACROS_H */
|
@@ -0,0 +1,176 @@
|
|
1
|
+
/**
|
2
|
+
* Copyright (c) 2016 Tino Reichardt
|
3
|
+
* All rights reserved.
|
4
|
+
*
|
5
|
+
* You can contact the author at:
|
6
|
+
* - zstdmt source repository: https://github.com/mcmilk/zstdmt
|
7
|
+
*
|
8
|
+
* This source code is licensed under both the BSD-style license (found in the
|
9
|
+
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
10
|
+
* in the COPYING file in the root directory of this source tree).
|
11
|
+
* You may select, at your option, one of the above-listed licenses.
|
12
|
+
*/
|
13
|
+
|
14
|
+
/**
|
15
|
+
* This file will hold wrapper for systems, which do not support pthreads
|
16
|
+
*/
|
17
|
+
|
18
|
+
#include "threading.h"
|
19
|
+
|
20
|
+
/* create fake symbol to avoid empty translation unit warning */
|
21
|
+
int g_ZSTD_threading_useless_symbol;
|
22
|
+
|
23
|
+
#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
|
24
|
+
|
25
|
+
/**
|
26
|
+
* Windows minimalist Pthread Wrapper
|
27
|
+
*/
|
28
|
+
|
29
|
+
|
30
|
+
/* === Dependencies === */
|
31
|
+
#include <process.h>
|
32
|
+
#include <errno.h>
|
33
|
+
|
34
|
+
|
35
|
+
/* === Implementation === */
|
36
|
+
|
37
|
+
typedef struct {
|
38
|
+
void* (*start_routine)(void*);
|
39
|
+
void* arg;
|
40
|
+
int initialized;
|
41
|
+
ZSTD_pthread_cond_t initialized_cond;
|
42
|
+
ZSTD_pthread_mutex_t initialized_mutex;
|
43
|
+
} ZSTD_thread_params_t;
|
44
|
+
|
45
|
+
static unsigned __stdcall worker(void *arg)
|
46
|
+
{
|
47
|
+
void* (*start_routine)(void*);
|
48
|
+
void* thread_arg;
|
49
|
+
|
50
|
+
/* Initialized thread_arg and start_routine and signal main thread that we don't need it
|
51
|
+
* to wait any longer.
|
52
|
+
*/
|
53
|
+
{
|
54
|
+
ZSTD_thread_params_t* thread_param = (ZSTD_thread_params_t*)arg;
|
55
|
+
thread_arg = thread_param->arg;
|
56
|
+
start_routine = thread_param->start_routine;
|
57
|
+
|
58
|
+
/* Signal main thread that we are running and do not depend on its memory anymore */
|
59
|
+
ZSTD_pthread_mutex_lock(&thread_param->initialized_mutex);
|
60
|
+
thread_param->initialized = 1;
|
61
|
+
ZSTD_pthread_cond_signal(&thread_param->initialized_cond);
|
62
|
+
ZSTD_pthread_mutex_unlock(&thread_param->initialized_mutex);
|
63
|
+
}
|
64
|
+
|
65
|
+
start_routine(thread_arg);
|
66
|
+
|
67
|
+
return 0;
|
68
|
+
}
|
69
|
+
|
70
|
+
int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
|
71
|
+
void* (*start_routine) (void*), void* arg)
|
72
|
+
{
|
73
|
+
ZSTD_thread_params_t thread_param;
|
74
|
+
(void)unused;
|
75
|
+
|
76
|
+
thread_param.start_routine = start_routine;
|
77
|
+
thread_param.arg = arg;
|
78
|
+
thread_param.initialized = 0;
|
79
|
+
*thread = NULL;
|
80
|
+
|
81
|
+
/* Setup thread initialization synchronization */
|
82
|
+
if(ZSTD_pthread_cond_init(&thread_param.initialized_cond, NULL)) {
|
83
|
+
/* Should never happen on Windows */
|
84
|
+
return -1;
|
85
|
+
}
|
86
|
+
if(ZSTD_pthread_mutex_init(&thread_param.initialized_mutex, NULL)) {
|
87
|
+
/* Should never happen on Windows */
|
88
|
+
ZSTD_pthread_cond_destroy(&thread_param.initialized_cond);
|
89
|
+
return -1;
|
90
|
+
}
|
91
|
+
|
92
|
+
/* Spawn thread */
|
93
|
+
*thread = (HANDLE)_beginthreadex(NULL, 0, worker, &thread_param, 0, NULL);
|
94
|
+
if (!thread) {
|
95
|
+
ZSTD_pthread_mutex_destroy(&thread_param.initialized_mutex);
|
96
|
+
ZSTD_pthread_cond_destroy(&thread_param.initialized_cond);
|
97
|
+
return errno;
|
98
|
+
}
|
99
|
+
|
100
|
+
/* Wait for thread to be initialized */
|
101
|
+
ZSTD_pthread_mutex_lock(&thread_param.initialized_mutex);
|
102
|
+
while(!thread_param.initialized) {
|
103
|
+
ZSTD_pthread_cond_wait(&thread_param.initialized_cond, &thread_param.initialized_mutex);
|
104
|
+
}
|
105
|
+
ZSTD_pthread_mutex_unlock(&thread_param.initialized_mutex);
|
106
|
+
ZSTD_pthread_mutex_destroy(&thread_param.initialized_mutex);
|
107
|
+
ZSTD_pthread_cond_destroy(&thread_param.initialized_cond);
|
108
|
+
|
109
|
+
return 0;
|
110
|
+
}
|
111
|
+
|
112
|
+
int ZSTD_pthread_join(ZSTD_pthread_t thread)
|
113
|
+
{
|
114
|
+
DWORD result;
|
115
|
+
|
116
|
+
if (!thread) return 0;
|
117
|
+
|
118
|
+
result = WaitForSingleObject(thread, INFINITE);
|
119
|
+
CloseHandle(thread);
|
120
|
+
|
121
|
+
switch (result) {
|
122
|
+
case WAIT_OBJECT_0:
|
123
|
+
return 0;
|
124
|
+
case WAIT_ABANDONED:
|
125
|
+
return EINVAL;
|
126
|
+
default:
|
127
|
+
return GetLastError();
|
128
|
+
}
|
129
|
+
}
|
130
|
+
|
131
|
+
#endif /* ZSTD_MULTITHREAD */
|
132
|
+
|
133
|
+
#if defined(ZSTD_MULTITHREAD) && DEBUGLEVEL >= 1 && !defined(_WIN32)
|
134
|
+
|
135
|
+
#define ZSTD_DEPS_NEED_MALLOC
|
136
|
+
#include "zstd_deps.h"
|
137
|
+
|
138
|
+
int ZSTD_pthread_mutex_init(ZSTD_pthread_mutex_t* mutex, pthread_mutexattr_t const* attr)
|
139
|
+
{
|
140
|
+
*mutex = (pthread_mutex_t*)ZSTD_malloc(sizeof(pthread_mutex_t));
|
141
|
+
if (!*mutex)
|
142
|
+
return 1;
|
143
|
+
return pthread_mutex_init(*mutex, attr);
|
144
|
+
}
|
145
|
+
|
146
|
+
int ZSTD_pthread_mutex_destroy(ZSTD_pthread_mutex_t* mutex)
|
147
|
+
{
|
148
|
+
if (!*mutex)
|
149
|
+
return 0;
|
150
|
+
{
|
151
|
+
int const ret = pthread_mutex_destroy(*mutex);
|
152
|
+
ZSTD_free(*mutex);
|
153
|
+
return ret;
|
154
|
+
}
|
155
|
+
}
|
156
|
+
|
157
|
+
int ZSTD_pthread_cond_init(ZSTD_pthread_cond_t* cond, pthread_condattr_t const* attr)
|
158
|
+
{
|
159
|
+
*cond = (pthread_cond_t*)ZSTD_malloc(sizeof(pthread_cond_t));
|
160
|
+
if (!*cond)
|
161
|
+
return 1;
|
162
|
+
return pthread_cond_init(*cond, attr);
|
163
|
+
}
|
164
|
+
|
165
|
+
int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond)
|
166
|
+
{
|
167
|
+
if (!*cond)
|
168
|
+
return 0;
|
169
|
+
{
|
170
|
+
int const ret = pthread_cond_destroy(*cond);
|
171
|
+
ZSTD_free(*cond);
|
172
|
+
return ret;
|
173
|
+
}
|
174
|
+
}
|
175
|
+
|
176
|
+
#endif
|
@@ -23,8 +23,7 @@ extern "C" {
|
|
23
23
|
#if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
|
24
24
|
|
25
25
|
/**
|
26
|
-
* Windows minimalist Pthread Wrapper
|
27
|
-
* http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
|
26
|
+
* Windows minimalist Pthread Wrapper
|
28
27
|
*/
|
29
28
|
#ifdef WINVER
|
30
29
|
# undef WINVER
|
@@ -62,16 +61,12 @@ extern "C" {
|
|
62
61
|
#define ZSTD_pthread_cond_broadcast(a) WakeAllConditionVariable((a))
|
63
62
|
|
64
63
|
/* ZSTD_pthread_create() and ZSTD_pthread_join() */
|
65
|
-
typedef
|
66
|
-
HANDLE handle;
|
67
|
-
void* (*start_routine)(void*);
|
68
|
-
void* arg;
|
69
|
-
} ZSTD_pthread_t;
|
64
|
+
typedef HANDLE ZSTD_pthread_t;
|
70
65
|
|
71
66
|
int ZSTD_pthread_create(ZSTD_pthread_t* thread, const void* unused,
|
72
67
|
void* (*start_routine) (void*), void* arg);
|
73
68
|
|
74
|
-
int ZSTD_pthread_join(ZSTD_pthread_t thread
|
69
|
+
int ZSTD_pthread_join(ZSTD_pthread_t thread);
|
75
70
|
|
76
71
|
/**
|
77
72
|
* add here more wrappers as required
|
@@ -99,7 +94,7 @@ int ZSTD_pthread_join(ZSTD_pthread_t thread, void** value_ptr);
|
|
99
94
|
|
100
95
|
#define ZSTD_pthread_t pthread_t
|
101
96
|
#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
|
102
|
-
#define ZSTD_pthread_join(a
|
97
|
+
#define ZSTD_pthread_join(a) pthread_join((a),NULL)
|
103
98
|
|
104
99
|
#else /* DEBUGLEVEL >= 1 */
|
105
100
|
|
@@ -124,7 +119,7 @@ int ZSTD_pthread_cond_destroy(ZSTD_pthread_cond_t* cond);
|
|
124
119
|
|
125
120
|
#define ZSTD_pthread_t pthread_t
|
126
121
|
#define ZSTD_pthread_create(a, b, c, d) pthread_create((a), (b), (c), (d))
|
127
|
-
#define ZSTD_pthread_join(a
|
122
|
+
#define ZSTD_pthread_join(a) pthread_join((a),NULL)
|
128
123
|
|
129
124
|
#endif
|
130
125
|
|
@@ -1,9 +1,9 @@
|
|
1
1
|
/*
|
2
2
|
* xxHash - Fast Hash algorithm
|
3
|
-
* Copyright (c)
|
3
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
4
4
|
*
|
5
5
|
* You can contact the author at :
|
6
|
-
* - xxHash homepage:
|
6
|
+
* - xxHash homepage: https://cyan4973.github.io/xxHash/
|
7
7
|
* - xxHash source repository : https://github.com/Cyan4973/xxHash
|
8
8
|
*
|
9
9
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -1,9 +1,9 @@
|
|
1
1
|
/*
|
2
2
|
* xxHash - Fast Hash algorithm
|
3
|
-
* Copyright (c)
|
3
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
4
4
|
*
|
5
5
|
* You can contact the author at :
|
6
|
-
* - xxHash homepage:
|
6
|
+
* - xxHash homepage: https://cyan4973.github.io/xxHash/
|
7
7
|
* - xxHash source repository : https://github.com/Cyan4973/xxHash
|
8
8
|
*
|
9
9
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -1314,7 +1314,7 @@ XXH3_128bits_reset_withSecretandSeed(XXH3_state_t* statePtr,
|
|
1314
1314
|
* care, as what works on one compiler/platform/optimization level may cause
|
1315
1315
|
* another to read garbage data or even crash.
|
1316
1316
|
*
|
1317
|
-
* See
|
1317
|
+
* See https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html for details.
|
1318
1318
|
*
|
1319
1319
|
* Prefer these methods in priority order (0 > 3 > 1 > 2)
|
1320
1320
|
*/
|
@@ -1534,7 +1534,7 @@ static void* XXH_memcpy(void* dest, const void* src, size_t size) { return ZSTD_
|
|
1534
1534
|
* @brief Used to prevent unwanted optimizations for @p var.
|
1535
1535
|
*
|
1536
1536
|
* It uses an empty GCC inline assembly statement with a register constraint
|
1537
|
-
* which forces @p var into a general purpose register (
|
1537
|
+
* which forces @p var into a general purpose register (e.g. eax, ebx, ecx
|
1538
1538
|
* on x86) and marks it as modified.
|
1539
1539
|
*
|
1540
1540
|
* This is used in a few places to avoid unwanted autovectorization (e.g.
|
@@ -1655,7 +1655,7 @@ static xxh_u32 XXH_read32(const void* ptr)
|
|
1655
1655
|
|
1656
1656
|
/*
|
1657
1657
|
* Portable and safe solution. Generally efficient.
|
1658
|
-
* see:
|
1658
|
+
* see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
|
1659
1659
|
*/
|
1660
1660
|
static xxh_u32 XXH_read32(const void* memPtr)
|
1661
1661
|
{
|
@@ -2296,7 +2296,7 @@ static xxh_u64 XXH_read64(const void* ptr)
|
|
2296
2296
|
|
2297
2297
|
/*
|
2298
2298
|
* Portable and safe solution. Generally efficient.
|
2299
|
-
* see:
|
2299
|
+
* see: https://fastcompression.blogspot.com/2015/08/accessing-unaligned-memory.html
|
2300
2300
|
*/
|
2301
2301
|
static xxh_u64 XXH_read64(const void* memPtr)
|
2302
2302
|
{
|
@@ -2809,7 +2809,7 @@ enum XXH_VECTOR_TYPE /* fake enum */ {
|
|
2809
2809
|
* @ingroup tuning
|
2810
2810
|
* @brief Selects the minimum alignment for XXH3's accumulators.
|
2811
2811
|
*
|
2812
|
-
* When using SIMD, this should match the alignment
|
2812
|
+
* When using SIMD, this should match the alignment required for said vector
|
2813
2813
|
* type, so, for example, 32 for AVX2.
|
2814
2814
|
*
|
2815
2815
|
* Default: Auto detected.
|
@@ -3026,7 +3026,7 @@ enum XXH_VECTOR_TYPE /* fake enum */ {
|
|
3026
3026
|
* have more than 2 NEON (F0/F1) micro-ops. If you are only using NEON instructions,
|
3027
3027
|
* you are only using 2/3 of the CPU bandwidth.
|
3028
3028
|
*
|
3029
|
-
* This is even more
|
3029
|
+
* This is even more noticeable on the more advanced cores like the A76 which
|
3030
3030
|
* can dispatch 8 micro-ops per cycle, but still only 2 NEON micro-ops at once.
|
3031
3031
|
*
|
3032
3032
|
* Therefore, @ref XXH3_NEON_LANES lanes will be processed using NEON, and the
|
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -14,7 +14,6 @@
|
|
14
14
|
* Dependencies
|
15
15
|
***************************************/
|
16
16
|
#define ZSTD_DEPS_NEED_MALLOC
|
17
|
-
#include "zstd_deps.h" /* ZSTD_malloc, ZSTD_calloc, ZSTD_free, ZSTD_memset */
|
18
17
|
#include "error_private.h"
|
19
18
|
#include "zstd_internal.h"
|
20
19
|
|
@@ -47,37 +46,3 @@ ZSTD_ErrorCode ZSTD_getErrorCode(size_t code) { return ERR_getErrorCode(code); }
|
|
47
46
|
/*! ZSTD_getErrorString() :
|
48
47
|
* provides error code string from enum */
|
49
48
|
const char* ZSTD_getErrorString(ZSTD_ErrorCode code) { return ERR_getErrorString(code); }
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
/*=**************************************************************
|
54
|
-
* Custom allocator
|
55
|
-
****************************************************************/
|
56
|
-
void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem)
|
57
|
-
{
|
58
|
-
if (customMem.customAlloc)
|
59
|
-
return customMem.customAlloc(customMem.opaque, size);
|
60
|
-
return ZSTD_malloc(size);
|
61
|
-
}
|
62
|
-
|
63
|
-
void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem)
|
64
|
-
{
|
65
|
-
if (customMem.customAlloc) {
|
66
|
-
/* calloc implemented as malloc+memset;
|
67
|
-
* not as efficient as calloc, but next best guess for custom malloc */
|
68
|
-
void* const ptr = customMem.customAlloc(customMem.opaque, size);
|
69
|
-
ZSTD_memset(ptr, 0, size);
|
70
|
-
return ptr;
|
71
|
-
}
|
72
|
-
return ZSTD_calloc(1, size);
|
73
|
-
}
|
74
|
-
|
75
|
-
void ZSTD_customFree(void* ptr, ZSTD_customMem customMem)
|
76
|
-
{
|
77
|
-
if (ptr!=NULL) {
|
78
|
-
if (customMem.customFree)
|
79
|
-
customMem.customFree(customMem.opaque, ptr);
|
80
|
-
else
|
81
|
-
ZSTD_free(ptr);
|
82
|
-
}
|
83
|
-
}
|