zstdlib 0.13.0-x86-linux
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.yardopts +6 -0
- data/CHANGES.md +107 -0
- data/Gemfile +3 -0
- data/README.md +107 -0
- data/Rakefile +59 -0
- data/ext/zstdlib_c/extconf.rb +59 -0
- data/ext/zstdlib_c/ruby/zlib-2.2/zstdlib.c +4675 -0
- data/ext/zstdlib_c/ruby/zlib-2.3/zstdlib.c +4702 -0
- data/ext/zstdlib_c/ruby/zlib-2.4/zstdlib.c +4859 -0
- data/ext/zstdlib_c/ruby/zlib-2.5/zstdlib.c +4864 -0
- data/ext/zstdlib_c/ruby/zlib-2.6/zstdlib.c +4906 -0
- data/ext/zstdlib_c/ruby/zlib-2.7/zstdlib.c +4895 -0
- data/ext/zstdlib_c/ruby/zlib-3.0/zstdlib.c +4994 -0
- data/ext/zstdlib_c/ruby/zlib-3.1/zstdlib.c +5076 -0
- data/ext/zstdlib_c/ruby/zlib-3.2/zstdlib.c +5090 -0
- data/ext/zstdlib_c/ruby/zlib-3.3/zstdlib.c +5090 -0
- data/ext/zstdlib_c/zlib-1.3.1/adler32.c +164 -0
- data/ext/zstdlib_c/zlib-1.3.1/compress.c +75 -0
- data/ext/zstdlib_c/zlib-1.3.1/crc32.c +1049 -0
- data/ext/zstdlib_c/zlib-1.3.1/crc32.h +9446 -0
- data/ext/zstdlib_c/zlib-1.3.1/deflate.c +2139 -0
- data/ext/zstdlib_c/zlib-1.3.1/deflate.h +377 -0
- data/ext/zstdlib_c/zlib-1.3.1/gzclose.c +23 -0
- data/ext/zstdlib_c/zlib-1.3.1/gzguts.h +214 -0
- data/ext/zstdlib_c/zlib-1.3.1/gzlib.c +582 -0
- data/ext/zstdlib_c/zlib-1.3.1/gzread.c +602 -0
- data/ext/zstdlib_c/zlib-1.3.1/gzwrite.c +631 -0
- data/ext/zstdlib_c/zlib-1.3.1/infback.c +628 -0
- data/ext/zstdlib_c/zlib-1.3.1/inffast.c +320 -0
- data/ext/zstdlib_c/zlib-1.3.1/inffast.h +11 -0
- data/ext/zstdlib_c/zlib-1.3.1/inffixed.h +94 -0
- data/ext/zstdlib_c/zlib-1.3.1/inflate.c +1526 -0
- data/ext/zstdlib_c/zlib-1.3.1/inflate.h +126 -0
- data/ext/zstdlib_c/zlib-1.3.1/inftrees.c +299 -0
- data/ext/zstdlib_c/zlib-1.3.1/inftrees.h +62 -0
- data/ext/zstdlib_c/zlib-1.3.1/trees.c +1117 -0
- data/ext/zstdlib_c/zlib-1.3.1/trees.h +128 -0
- data/ext/zstdlib_c/zlib-1.3.1/uncompr.c +85 -0
- data/ext/zstdlib_c/zlib-1.3.1/zconf.h +543 -0
- data/ext/zstdlib_c/zlib-1.3.1/zlib.h +1938 -0
- data/ext/zstdlib_c/zlib-1.3.1/zutil.c +299 -0
- data/ext/zstdlib_c/zlib-1.3.1/zutil.h +254 -0
- data/ext/zstdlib_c/zlib.mk +14 -0
- data/ext/zstdlib_c/zlibwrapper/zlibwrapper.c +10 -0
- data/ext/zstdlib_c/zlibwrapper.mk +14 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/allocations.h +55 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/bits.h +200 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/bitstream.h +457 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/compiler.h +450 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/cpu.h +249 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/debug.c +30 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/debug.h +116 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/entropy_common.c +340 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/error_private.c +63 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/error_private.h +168 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/fse.h +640 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/fse_decompress.c +313 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/huf.h +286 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/mem.h +426 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/pool.c +371 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/pool.h +90 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/portability_macros.h +158 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/threading.c +182 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/threading.h +150 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/xxhash.c +18 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/xxhash.h +7020 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/zstd_common.c +48 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/zstd_deps.h +111 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/zstd_internal.h +392 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/common/zstd_trace.h +163 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/clevels.h +134 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/fse_compress.c +625 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/hist.c +181 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/hist.h +75 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/huf_compress.c +1464 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_compress.c +7153 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_compress_internal.h +1534 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_compress_literals.c +235 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_compress_literals.h +39 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_compress_sequences.c +442 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_compress_sequences.h +54 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_compress_superblock.c +688 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_compress_superblock.h +32 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_cwksp.h +748 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_double_fast.c +770 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_double_fast.h +50 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_fast.c +968 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_fast.h +38 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_lazy.c +2199 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_lazy.h +202 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_ldm.c +730 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_ldm.h +117 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_ldm_geartab.h +106 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_opt.c +1576 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstd_opt.h +80 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstdmt_compress.c +1882 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/compress/zstdmt_compress.h +113 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/decompress/huf_decompress.c +1944 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/decompress/huf_decompress_amd64.S +595 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/decompress/zstd_ddict.c +244 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/decompress/zstd_ddict.h +44 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/decompress/zstd_decompress.c +2407 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/decompress/zstd_decompress_block.c +2215 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/decompress/zstd_decompress_block.h +73 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/decompress/zstd_decompress_internal.h +240 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/zdict.h +474 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/zstd.h +3089 -0
- data/ext/zstdlib_c/zstd-1.5.6/lib/zstd_errors.h +114 -0
- data/ext/zstdlib_c/zstd-1.5.6/zlibWrapper/gzclose.c +26 -0
- data/ext/zstdlib_c/zstd-1.5.6/zlibWrapper/gzcompatibility.h +68 -0
- data/ext/zstdlib_c/zstd-1.5.6/zlibWrapper/gzguts.h +229 -0
- data/ext/zstdlib_c/zstd-1.5.6/zlibWrapper/gzlib.c +587 -0
- data/ext/zstdlib_c/zstd-1.5.6/zlibWrapper/gzread.c +637 -0
- data/ext/zstdlib_c/zstd-1.5.6/zlibWrapper/gzwrite.c +631 -0
- data/ext/zstdlib_c/zstd-1.5.6/zlibWrapper/zstd_zlibwrapper.c +1200 -0
- data/ext/zstdlib_c/zstd-1.5.6/zlibWrapper/zstd_zlibwrapper.h +91 -0
- data/ext/zstdlib_c/zstd.mk +15 -0
- data/lib/2.4/zstdlib_c.so +0 -0
- data/lib/2.5/zstdlib_c.so +0 -0
- data/lib/2.6/zstdlib_c.so +0 -0
- data/lib/2.7/zstdlib_c.so +0 -0
- data/lib/3.0/zstdlib_c.so +0 -0
- data/lib/3.1/zstdlib_c.so +0 -0
- data/lib/3.2/zstdlib_c.so +0 -0
- data/lib/3.3/zstdlib_c.so +0 -0
- data/lib/zstdlib.rb +6 -0
- data/test/zstdlib_test.rb +21 -0
- metadata +243 -0
@@ -0,0 +1,371 @@
|
|
1
|
+
/*
|
2
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
3
|
+
* All rights reserved.
|
4
|
+
*
|
5
|
+
* This source code is licensed under both the BSD-style license (found in the
|
6
|
+
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
7
|
+
* in the COPYING file in the root directory of this source tree).
|
8
|
+
* You may select, at your option, one of the above-listed licenses.
|
9
|
+
*/
|
10
|
+
|
11
|
+
|
12
|
+
/* ====== Dependencies ======= */
|
13
|
+
#include "../common/allocations.h" /* ZSTD_customCalloc, ZSTD_customFree */
|
14
|
+
#include "zstd_deps.h" /* size_t */
|
15
|
+
#include "debug.h" /* assert */
|
16
|
+
#include "pool.h"
|
17
|
+
|
18
|
+
/* ====== Compiler specifics ====== */
|
19
|
+
#if defined(_MSC_VER)
|
20
|
+
# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
|
21
|
+
#endif
|
22
|
+
|
23
|
+
|
24
|
+
#ifdef ZSTD_MULTITHREAD
|
25
|
+
|
26
|
+
#include "threading.h" /* pthread adaptation */
|
27
|
+
|
28
|
+
/* A job is a function and an opaque argument */
|
29
|
+
typedef struct POOL_job_s {
|
30
|
+
POOL_function function;
|
31
|
+
void *opaque;
|
32
|
+
} POOL_job;
|
33
|
+
|
34
|
+
struct POOL_ctx_s {
|
35
|
+
ZSTD_customMem customMem;
|
36
|
+
/* Keep track of the threads */
|
37
|
+
ZSTD_pthread_t* threads;
|
38
|
+
size_t threadCapacity;
|
39
|
+
size_t threadLimit;
|
40
|
+
|
41
|
+
/* The queue is a circular buffer */
|
42
|
+
POOL_job *queue;
|
43
|
+
size_t queueHead;
|
44
|
+
size_t queueTail;
|
45
|
+
size_t queueSize;
|
46
|
+
|
47
|
+
/* The number of threads working on jobs */
|
48
|
+
size_t numThreadsBusy;
|
49
|
+
/* Indicates if the queue is empty */
|
50
|
+
int queueEmpty;
|
51
|
+
|
52
|
+
/* The mutex protects the queue */
|
53
|
+
ZSTD_pthread_mutex_t queueMutex;
|
54
|
+
/* Condition variable for pushers to wait on when the queue is full */
|
55
|
+
ZSTD_pthread_cond_t queuePushCond;
|
56
|
+
/* Condition variables for poppers to wait on when the queue is empty */
|
57
|
+
ZSTD_pthread_cond_t queuePopCond;
|
58
|
+
/* Indicates if the queue is shutting down */
|
59
|
+
int shutdown;
|
60
|
+
};
|
61
|
+
|
62
|
+
/* POOL_thread() :
|
63
|
+
* Work thread for the thread pool.
|
64
|
+
* Waits for jobs and executes them.
|
65
|
+
* @returns : NULL on failure else non-null.
|
66
|
+
*/
|
67
|
+
static void* POOL_thread(void* opaque) {
|
68
|
+
POOL_ctx* const ctx = (POOL_ctx*)opaque;
|
69
|
+
if (!ctx) { return NULL; }
|
70
|
+
for (;;) {
|
71
|
+
/* Lock the mutex and wait for a non-empty queue or until shutdown */
|
72
|
+
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
73
|
+
|
74
|
+
while ( ctx->queueEmpty
|
75
|
+
|| (ctx->numThreadsBusy >= ctx->threadLimit) ) {
|
76
|
+
if (ctx->shutdown) {
|
77
|
+
/* even if !queueEmpty, (possible if numThreadsBusy >= threadLimit),
|
78
|
+
* a few threads will be shutdown while !queueEmpty,
|
79
|
+
* but enough threads will remain active to finish the queue */
|
80
|
+
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
81
|
+
return opaque;
|
82
|
+
}
|
83
|
+
ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
|
84
|
+
}
|
85
|
+
/* Pop a job off the queue */
|
86
|
+
{ POOL_job const job = ctx->queue[ctx->queueHead];
|
87
|
+
ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
|
88
|
+
ctx->numThreadsBusy++;
|
89
|
+
ctx->queueEmpty = (ctx->queueHead == ctx->queueTail);
|
90
|
+
/* Unlock the mutex, signal a pusher, and run the job */
|
91
|
+
ZSTD_pthread_cond_signal(&ctx->queuePushCond);
|
92
|
+
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
93
|
+
|
94
|
+
job.function(job.opaque);
|
95
|
+
|
96
|
+
/* If the intended queue size was 0, signal after finishing job */
|
97
|
+
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
98
|
+
ctx->numThreadsBusy--;
|
99
|
+
ZSTD_pthread_cond_signal(&ctx->queuePushCond);
|
100
|
+
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
101
|
+
}
|
102
|
+
} /* for (;;) */
|
103
|
+
assert(0); /* Unreachable */
|
104
|
+
}
|
105
|
+
|
106
|
+
/* ZSTD_createThreadPool() : public access point */
|
107
|
+
POOL_ctx* ZSTD_createThreadPool(size_t numThreads) {
|
108
|
+
return POOL_create (numThreads, 0);
|
109
|
+
}
|
110
|
+
|
111
|
+
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
|
112
|
+
return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
|
113
|
+
}
|
114
|
+
|
115
|
+
POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
|
116
|
+
ZSTD_customMem customMem)
|
117
|
+
{
|
118
|
+
POOL_ctx* ctx;
|
119
|
+
/* Check parameters */
|
120
|
+
if (!numThreads) { return NULL; }
|
121
|
+
/* Allocate the context and zero initialize */
|
122
|
+
ctx = (POOL_ctx*)ZSTD_customCalloc(sizeof(POOL_ctx), customMem);
|
123
|
+
if (!ctx) { return NULL; }
|
124
|
+
/* Initialize the job queue.
|
125
|
+
* It needs one extra space since one space is wasted to differentiate
|
126
|
+
* empty and full queues.
|
127
|
+
*/
|
128
|
+
ctx->queueSize = queueSize + 1;
|
129
|
+
ctx->queue = (POOL_job*)ZSTD_customCalloc(ctx->queueSize * sizeof(POOL_job), customMem);
|
130
|
+
ctx->queueHead = 0;
|
131
|
+
ctx->queueTail = 0;
|
132
|
+
ctx->numThreadsBusy = 0;
|
133
|
+
ctx->queueEmpty = 1;
|
134
|
+
{
|
135
|
+
int error = 0;
|
136
|
+
error |= ZSTD_pthread_mutex_init(&ctx->queueMutex, NULL);
|
137
|
+
error |= ZSTD_pthread_cond_init(&ctx->queuePushCond, NULL);
|
138
|
+
error |= ZSTD_pthread_cond_init(&ctx->queuePopCond, NULL);
|
139
|
+
if (error) { POOL_free(ctx); return NULL; }
|
140
|
+
}
|
141
|
+
ctx->shutdown = 0;
|
142
|
+
/* Allocate space for the thread handles */
|
143
|
+
ctx->threads = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
|
144
|
+
ctx->threadCapacity = 0;
|
145
|
+
ctx->customMem = customMem;
|
146
|
+
/* Check for errors */
|
147
|
+
if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }
|
148
|
+
/* Initialize the threads */
|
149
|
+
{ size_t i;
|
150
|
+
for (i = 0; i < numThreads; ++i) {
|
151
|
+
if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) {
|
152
|
+
ctx->threadCapacity = i;
|
153
|
+
POOL_free(ctx);
|
154
|
+
return NULL;
|
155
|
+
} }
|
156
|
+
ctx->threadCapacity = numThreads;
|
157
|
+
ctx->threadLimit = numThreads;
|
158
|
+
}
|
159
|
+
return ctx;
|
160
|
+
}
|
161
|
+
|
162
|
+
/*! POOL_join() :
|
163
|
+
Shutdown the queue, wake any sleeping threads, and join all of the threads.
|
164
|
+
*/
|
165
|
+
static void POOL_join(POOL_ctx* ctx) {
|
166
|
+
/* Shut down the queue */
|
167
|
+
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
168
|
+
ctx->shutdown = 1;
|
169
|
+
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
170
|
+
/* Wake up sleeping threads */
|
171
|
+
ZSTD_pthread_cond_broadcast(&ctx->queuePushCond);
|
172
|
+
ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
|
173
|
+
/* Join all of the threads */
|
174
|
+
{ size_t i;
|
175
|
+
for (i = 0; i < ctx->threadCapacity; ++i) {
|
176
|
+
ZSTD_pthread_join(ctx->threads[i]); /* note : could fail */
|
177
|
+
} }
|
178
|
+
}
|
179
|
+
|
180
|
+
void POOL_free(POOL_ctx *ctx) {
|
181
|
+
if (!ctx) { return; }
|
182
|
+
POOL_join(ctx);
|
183
|
+
ZSTD_pthread_mutex_destroy(&ctx->queueMutex);
|
184
|
+
ZSTD_pthread_cond_destroy(&ctx->queuePushCond);
|
185
|
+
ZSTD_pthread_cond_destroy(&ctx->queuePopCond);
|
186
|
+
ZSTD_customFree(ctx->queue, ctx->customMem);
|
187
|
+
ZSTD_customFree(ctx->threads, ctx->customMem);
|
188
|
+
ZSTD_customFree(ctx, ctx->customMem);
|
189
|
+
}
|
190
|
+
|
191
|
+
/*! POOL_joinJobs() :
|
192
|
+
* Waits for all queued jobs to finish executing.
|
193
|
+
*/
|
194
|
+
void POOL_joinJobs(POOL_ctx* ctx) {
|
195
|
+
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
196
|
+
while(!ctx->queueEmpty || ctx->numThreadsBusy > 0) {
|
197
|
+
ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
|
198
|
+
}
|
199
|
+
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
200
|
+
}
|
201
|
+
|
202
|
+
void ZSTD_freeThreadPool (ZSTD_threadPool* pool) {
|
203
|
+
POOL_free (pool);
|
204
|
+
}
|
205
|
+
|
206
|
+
size_t POOL_sizeof(const POOL_ctx* ctx) {
|
207
|
+
if (ctx==NULL) return 0; /* supports sizeof NULL */
|
208
|
+
return sizeof(*ctx)
|
209
|
+
+ ctx->queueSize * sizeof(POOL_job)
|
210
|
+
+ ctx->threadCapacity * sizeof(ZSTD_pthread_t);
|
211
|
+
}
|
212
|
+
|
213
|
+
|
214
|
+
/* @return : 0 on success, 1 on error */
|
215
|
+
static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads)
|
216
|
+
{
|
217
|
+
if (numThreads <= ctx->threadCapacity) {
|
218
|
+
if (!numThreads) return 1;
|
219
|
+
ctx->threadLimit = numThreads;
|
220
|
+
return 0;
|
221
|
+
}
|
222
|
+
/* numThreads > threadCapacity */
|
223
|
+
{ ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_customCalloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
|
224
|
+
if (!threadPool) return 1;
|
225
|
+
/* replace existing thread pool */
|
226
|
+
ZSTD_memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(ZSTD_pthread_t));
|
227
|
+
ZSTD_customFree(ctx->threads, ctx->customMem);
|
228
|
+
ctx->threads = threadPool;
|
229
|
+
/* Initialize additional threads */
|
230
|
+
{ size_t threadId;
|
231
|
+
for (threadId = ctx->threadCapacity; threadId < numThreads; ++threadId) {
|
232
|
+
if (ZSTD_pthread_create(&threadPool[threadId], NULL, &POOL_thread, ctx)) {
|
233
|
+
ctx->threadCapacity = threadId;
|
234
|
+
return 1;
|
235
|
+
} }
|
236
|
+
} }
|
237
|
+
/* successfully expanded */
|
238
|
+
ctx->threadCapacity = numThreads;
|
239
|
+
ctx->threadLimit = numThreads;
|
240
|
+
return 0;
|
241
|
+
}
|
242
|
+
|
243
|
+
/* @return : 0 on success, 1 on error */
|
244
|
+
int POOL_resize(POOL_ctx* ctx, size_t numThreads)
|
245
|
+
{
|
246
|
+
int result;
|
247
|
+
if (ctx==NULL) return 1;
|
248
|
+
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
249
|
+
result = POOL_resize_internal(ctx, numThreads);
|
250
|
+
ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
|
251
|
+
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
252
|
+
return result;
|
253
|
+
}
|
254
|
+
|
255
|
+
/**
|
256
|
+
* Returns 1 if the queue is full and 0 otherwise.
|
257
|
+
*
|
258
|
+
* When queueSize is 1 (pool was created with an intended queueSize of 0),
|
259
|
+
* then a queue is empty if there is a thread free _and_ no job is waiting.
|
260
|
+
*/
|
261
|
+
static int isQueueFull(POOL_ctx const* ctx) {
|
262
|
+
if (ctx->queueSize > 1) {
|
263
|
+
return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize);
|
264
|
+
} else {
|
265
|
+
return (ctx->numThreadsBusy == ctx->threadLimit) ||
|
266
|
+
!ctx->queueEmpty;
|
267
|
+
}
|
268
|
+
}
|
269
|
+
|
270
|
+
|
271
|
+
static void
|
272
|
+
POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque)
|
273
|
+
{
|
274
|
+
POOL_job job;
|
275
|
+
job.function = function;
|
276
|
+
job.opaque = opaque;
|
277
|
+
assert(ctx != NULL);
|
278
|
+
if (ctx->shutdown) return;
|
279
|
+
|
280
|
+
ctx->queueEmpty = 0;
|
281
|
+
ctx->queue[ctx->queueTail] = job;
|
282
|
+
ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize;
|
283
|
+
ZSTD_pthread_cond_signal(&ctx->queuePopCond);
|
284
|
+
}
|
285
|
+
|
286
|
+
void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque)
|
287
|
+
{
|
288
|
+
assert(ctx != NULL);
|
289
|
+
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
290
|
+
/* Wait until there is space in the queue for the new job */
|
291
|
+
while (isQueueFull(ctx) && (!ctx->shutdown)) {
|
292
|
+
ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
|
293
|
+
}
|
294
|
+
POOL_add_internal(ctx, function, opaque);
|
295
|
+
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
296
|
+
}
|
297
|
+
|
298
|
+
|
299
|
+
int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque)
|
300
|
+
{
|
301
|
+
assert(ctx != NULL);
|
302
|
+
ZSTD_pthread_mutex_lock(&ctx->queueMutex);
|
303
|
+
if (isQueueFull(ctx)) {
|
304
|
+
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
305
|
+
return 0;
|
306
|
+
}
|
307
|
+
POOL_add_internal(ctx, function, opaque);
|
308
|
+
ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
|
309
|
+
return 1;
|
310
|
+
}
|
311
|
+
|
312
|
+
|
313
|
+
#else /* ZSTD_MULTITHREAD not defined */
|
314
|
+
|
315
|
+
/* ========================== */
|
316
|
+
/* No multi-threading support */
|
317
|
+
/* ========================== */
|
318
|
+
|
319
|
+
|
320
|
+
/* We don't need any data, but if it is empty, malloc() might return NULL. */
|
321
|
+
struct POOL_ctx_s {
|
322
|
+
int dummy;
|
323
|
+
};
|
324
|
+
static POOL_ctx g_poolCtx;
|
325
|
+
|
326
|
+
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
|
327
|
+
return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
|
328
|
+
}
|
329
|
+
|
330
|
+
POOL_ctx*
|
331
|
+
POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem)
|
332
|
+
{
|
333
|
+
(void)numThreads;
|
334
|
+
(void)queueSize;
|
335
|
+
(void)customMem;
|
336
|
+
return &g_poolCtx;
|
337
|
+
}
|
338
|
+
|
339
|
+
void POOL_free(POOL_ctx* ctx) {
|
340
|
+
assert(!ctx || ctx == &g_poolCtx);
|
341
|
+
(void)ctx;
|
342
|
+
}
|
343
|
+
|
344
|
+
void POOL_joinJobs(POOL_ctx* ctx){
|
345
|
+
assert(!ctx || ctx == &g_poolCtx);
|
346
|
+
(void)ctx;
|
347
|
+
}
|
348
|
+
|
349
|
+
int POOL_resize(POOL_ctx* ctx, size_t numThreads) {
|
350
|
+
(void)ctx; (void)numThreads;
|
351
|
+
return 0;
|
352
|
+
}
|
353
|
+
|
354
|
+
void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) {
|
355
|
+
(void)ctx;
|
356
|
+
function(opaque);
|
357
|
+
}
|
358
|
+
|
359
|
+
int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) {
|
360
|
+
(void)ctx;
|
361
|
+
function(opaque);
|
362
|
+
return 1;
|
363
|
+
}
|
364
|
+
|
365
|
+
size_t POOL_sizeof(const POOL_ctx* ctx) {
|
366
|
+
if (ctx==NULL) return 0; /* supports sizeof NULL */
|
367
|
+
assert(ctx == &g_poolCtx);
|
368
|
+
return sizeof(*ctx);
|
369
|
+
}
|
370
|
+
|
371
|
+
#endif /* ZSTD_MULTITHREAD */
|
@@ -0,0 +1,90 @@
|
|
1
|
+
/*
|
2
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
3
|
+
* All rights reserved.
|
4
|
+
*
|
5
|
+
* This source code is licensed under both the BSD-style license (found in the
|
6
|
+
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
7
|
+
* in the COPYING file in the root directory of this source tree).
|
8
|
+
* You may select, at your option, one of the above-listed licenses.
|
9
|
+
*/
|
10
|
+
|
11
|
+
#ifndef POOL_H
|
12
|
+
#define POOL_H
|
13
|
+
|
14
|
+
#if defined (__cplusplus)
|
15
|
+
extern "C" {
|
16
|
+
#endif
|
17
|
+
|
18
|
+
|
19
|
+
#include "zstd_deps.h"
|
20
|
+
#define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */
|
21
|
+
#include "../zstd.h"
|
22
|
+
|
23
|
+
typedef struct POOL_ctx_s POOL_ctx;
|
24
|
+
|
25
|
+
/*! POOL_create() :
|
26
|
+
* Create a thread pool with at most `numThreads` threads.
|
27
|
+
* `numThreads` must be at least 1.
|
28
|
+
* The maximum number of queued jobs before blocking is `queueSize`.
|
29
|
+
* @return : POOL_ctx pointer on success, else NULL.
|
30
|
+
*/
|
31
|
+
POOL_ctx* POOL_create(size_t numThreads, size_t queueSize);
|
32
|
+
|
33
|
+
POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
|
34
|
+
ZSTD_customMem customMem);
|
35
|
+
|
36
|
+
/*! POOL_free() :
|
37
|
+
* Free a thread pool returned by POOL_create().
|
38
|
+
*/
|
39
|
+
void POOL_free(POOL_ctx* ctx);
|
40
|
+
|
41
|
+
|
42
|
+
/*! POOL_joinJobs() :
|
43
|
+
* Waits for all queued jobs to finish executing.
|
44
|
+
*/
|
45
|
+
void POOL_joinJobs(POOL_ctx* ctx);
|
46
|
+
|
47
|
+
/*! POOL_resize() :
|
48
|
+
* Expands or shrinks pool's number of threads.
|
49
|
+
* This is more efficient than releasing + creating a new context,
|
50
|
+
* since it tries to preserve and reuse existing threads.
|
51
|
+
* `numThreads` must be at least 1.
|
52
|
+
* @return : 0 when resize was successful,
|
53
|
+
* !0 (typically 1) if there is an error.
|
54
|
+
* note : only numThreads can be resized, queueSize remains unchanged.
|
55
|
+
*/
|
56
|
+
int POOL_resize(POOL_ctx* ctx, size_t numThreads);
|
57
|
+
|
58
|
+
/*! POOL_sizeof() :
|
59
|
+
* @return threadpool memory usage
|
60
|
+
* note : compatible with NULL (returns 0 in this case)
|
61
|
+
*/
|
62
|
+
size_t POOL_sizeof(const POOL_ctx* ctx);
|
63
|
+
|
64
|
+
/*! POOL_function :
|
65
|
+
* The function type that can be added to a thread pool.
|
66
|
+
*/
|
67
|
+
typedef void (*POOL_function)(void*);
|
68
|
+
|
69
|
+
/*! POOL_add() :
|
70
|
+
* Add the job `function(opaque)` to the thread pool. `ctx` must be valid.
|
71
|
+
* Possibly blocks until there is room in the queue.
|
72
|
+
* Note : The function may be executed asynchronously,
|
73
|
+
* therefore, `opaque` must live until function has been completed.
|
74
|
+
*/
|
75
|
+
void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque);
|
76
|
+
|
77
|
+
|
78
|
+
/*! POOL_tryAdd() :
|
79
|
+
* Add the job `function(opaque)` to thread pool _if_ a queue slot is available.
|
80
|
+
* Returns immediately even if not (does not block).
|
81
|
+
* @return : 1 if successful, 0 if not.
|
82
|
+
*/
|
83
|
+
int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque);
|
84
|
+
|
85
|
+
|
86
|
+
#if defined (__cplusplus)
|
87
|
+
}
|
88
|
+
#endif
|
89
|
+
|
90
|
+
#endif
|
@@ -0,0 +1,158 @@
|
|
1
|
+
/*
|
2
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
3
|
+
* All rights reserved.
|
4
|
+
*
|
5
|
+
* This source code is licensed under both the BSD-style license (found in the
|
6
|
+
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
7
|
+
* in the COPYING file in the root directory of this source tree).
|
8
|
+
* You may select, at your option, one of the above-listed licenses.
|
9
|
+
*/
|
10
|
+
|
11
|
+
#ifndef ZSTD_PORTABILITY_MACROS_H
|
12
|
+
#define ZSTD_PORTABILITY_MACROS_H
|
13
|
+
|
14
|
+
/**
|
15
|
+
* This header file contains macro definitions to support portability.
|
16
|
+
* This header is shared between C and ASM code, so it MUST only
|
17
|
+
* contain macro definitions. It MUST not contain any C code.
|
18
|
+
*
|
19
|
+
* This header ONLY defines macros to detect platforms/feature support.
|
20
|
+
*
|
21
|
+
*/
|
22
|
+
|
23
|
+
|
24
|
+
/* compat. with non-clang compilers */
|
25
|
+
#ifndef __has_attribute
|
26
|
+
#define __has_attribute(x) 0
|
27
|
+
#endif
|
28
|
+
|
29
|
+
/* compat. with non-clang compilers */
|
30
|
+
#ifndef __has_builtin
|
31
|
+
# define __has_builtin(x) 0
|
32
|
+
#endif
|
33
|
+
|
34
|
+
/* compat. with non-clang compilers */
|
35
|
+
#ifndef __has_feature
|
36
|
+
# define __has_feature(x) 0
|
37
|
+
#endif
|
38
|
+
|
39
|
+
/* detects whether we are being compiled under msan */
|
40
|
+
#ifndef ZSTD_MEMORY_SANITIZER
|
41
|
+
# if __has_feature(memory_sanitizer)
|
42
|
+
# define ZSTD_MEMORY_SANITIZER 1
|
43
|
+
# else
|
44
|
+
# define ZSTD_MEMORY_SANITIZER 0
|
45
|
+
# endif
|
46
|
+
#endif
|
47
|
+
|
48
|
+
/* detects whether we are being compiled under asan */
|
49
|
+
#ifndef ZSTD_ADDRESS_SANITIZER
|
50
|
+
# if __has_feature(address_sanitizer)
|
51
|
+
# define ZSTD_ADDRESS_SANITIZER 1
|
52
|
+
# elif defined(__SANITIZE_ADDRESS__)
|
53
|
+
# define ZSTD_ADDRESS_SANITIZER 1
|
54
|
+
# else
|
55
|
+
# define ZSTD_ADDRESS_SANITIZER 0
|
56
|
+
# endif
|
57
|
+
#endif
|
58
|
+
|
59
|
+
/* detects whether we are being compiled under dfsan */
|
60
|
+
#ifndef ZSTD_DATAFLOW_SANITIZER
|
61
|
+
# if __has_feature(dataflow_sanitizer)
|
62
|
+
# define ZSTD_DATAFLOW_SANITIZER 1
|
63
|
+
# else
|
64
|
+
# define ZSTD_DATAFLOW_SANITIZER 0
|
65
|
+
# endif
|
66
|
+
#endif
|
67
|
+
|
68
|
+
/* Mark the internal assembly functions as hidden */
|
69
|
+
#ifdef __ELF__
|
70
|
+
# define ZSTD_HIDE_ASM_FUNCTION(func) .hidden func
|
71
|
+
#elif defined(__APPLE__)
|
72
|
+
# define ZSTD_HIDE_ASM_FUNCTION(func) .private_extern func
|
73
|
+
#else
|
74
|
+
# define ZSTD_HIDE_ASM_FUNCTION(func)
|
75
|
+
#endif
|
76
|
+
|
77
|
+
/* Enable runtime BMI2 dispatch based on the CPU.
|
78
|
+
* Enabled for clang & gcc >=4.8 on x86 when BMI2 isn't enabled by default.
|
79
|
+
*/
|
80
|
+
#ifndef DYNAMIC_BMI2
|
81
|
+
#if ((defined(__clang__) && __has_attribute(__target__)) \
|
82
|
+
|| (defined(__GNUC__) \
|
83
|
+
&& (__GNUC__ >= 5 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8)))) \
|
84
|
+
&& (defined(__x86_64__) || defined(_M_X64)) \
|
85
|
+
&& !defined(__BMI2__)
|
86
|
+
# define DYNAMIC_BMI2 1
|
87
|
+
#else
|
88
|
+
# define DYNAMIC_BMI2 0
|
89
|
+
#endif
|
90
|
+
#endif
|
91
|
+
|
92
|
+
/**
|
93
|
+
* Only enable assembly for GNUC compatible compilers,
|
94
|
+
* because other platforms may not support GAS assembly syntax.
|
95
|
+
*
|
96
|
+
* Only enable assembly for Linux / MacOS, other platforms may
|
97
|
+
* work, but they haven't been tested. This could likely be
|
98
|
+
* extended to BSD systems.
|
99
|
+
*
|
100
|
+
* Disable assembly when MSAN is enabled, because MSAN requires
|
101
|
+
* 100% of code to be instrumented to work.
|
102
|
+
*/
|
103
|
+
#if defined(__GNUC__)
|
104
|
+
# if defined(__linux__) || defined(__linux) || defined(__APPLE__)
|
105
|
+
# if ZSTD_MEMORY_SANITIZER
|
106
|
+
# define ZSTD_ASM_SUPPORTED 0
|
107
|
+
# elif ZSTD_DATAFLOW_SANITIZER
|
108
|
+
# define ZSTD_ASM_SUPPORTED 0
|
109
|
+
# else
|
110
|
+
# define ZSTD_ASM_SUPPORTED 1
|
111
|
+
# endif
|
112
|
+
# else
|
113
|
+
# define ZSTD_ASM_SUPPORTED 0
|
114
|
+
# endif
|
115
|
+
#else
|
116
|
+
# define ZSTD_ASM_SUPPORTED 0
|
117
|
+
#endif
|
118
|
+
|
119
|
+
/**
|
120
|
+
* Determines whether we should enable assembly for x86-64
|
121
|
+
* with BMI2.
|
122
|
+
*
|
123
|
+
* Enable if all of the following conditions hold:
|
124
|
+
* - ASM hasn't been explicitly disabled by defining ZSTD_DISABLE_ASM
|
125
|
+
* - Assembly is supported
|
126
|
+
* - We are compiling for x86-64 and either:
|
127
|
+
* - DYNAMIC_BMI2 is enabled
|
128
|
+
* - BMI2 is supported at compile time
|
129
|
+
*/
|
130
|
+
#if !defined(ZSTD_DISABLE_ASM) && \
|
131
|
+
ZSTD_ASM_SUPPORTED && \
|
132
|
+
defined(__x86_64__) && \
|
133
|
+
(DYNAMIC_BMI2 || defined(__BMI2__))
|
134
|
+
# define ZSTD_ENABLE_ASM_X86_64_BMI2 1
|
135
|
+
#else
|
136
|
+
# define ZSTD_ENABLE_ASM_X86_64_BMI2 0
|
137
|
+
#endif
|
138
|
+
|
139
|
+
/*
|
140
|
+
* For x86 ELF targets, add .note.gnu.property section for Intel CET in
|
141
|
+
* assembly sources when CET is enabled.
|
142
|
+
*
|
143
|
+
* Additionally, any function that may be called indirectly must begin
|
144
|
+
* with ZSTD_CET_ENDBRANCH.
|
145
|
+
*/
|
146
|
+
#if defined(__ELF__) && (defined(__x86_64__) || defined(__i386__)) \
|
147
|
+
&& defined(__has_include)
|
148
|
+
# if __has_include(<cet.h>)
|
149
|
+
# include <cet.h>
|
150
|
+
# define ZSTD_CET_ENDBRANCH _CET_ENDBR
|
151
|
+
# endif
|
152
|
+
#endif
|
153
|
+
|
154
|
+
#ifndef ZSTD_CET_ENDBRANCH
|
155
|
+
# define ZSTD_CET_ENDBRANCH
|
156
|
+
#endif
|
157
|
+
|
158
|
+
#endif /* ZSTD_PORTABILITY_MACROS_H */
|