extzstd 0.2 → 0.3

Sign up to get free protection for your applications and to get access to all the features.
Files changed (88) hide show
  1. checksums.yaml +4 -4
  2. data/HISTORY.ja.md +13 -0
  3. data/README.md +17 -14
  4. data/contrib/zstd/{NEWS → CHANGELOG} +115 -2
  5. data/contrib/zstd/CODE_OF_CONDUCT.md +5 -0
  6. data/contrib/zstd/Makefile +99 -53
  7. data/contrib/zstd/README.md +59 -39
  8. data/contrib/zstd/TESTING.md +1 -1
  9. data/contrib/zstd/appveyor.yml +17 -6
  10. data/contrib/zstd/lib/BUCK +29 -2
  11. data/contrib/zstd/lib/Makefile +118 -21
  12. data/contrib/zstd/lib/README.md +84 -44
  13. data/contrib/zstd/lib/common/bitstream.h +17 -33
  14. data/contrib/zstd/lib/common/compiler.h +62 -8
  15. data/contrib/zstd/lib/common/cpu.h +215 -0
  16. data/contrib/zstd/lib/common/debug.c +44 -0
  17. data/contrib/zstd/lib/common/debug.h +134 -0
  18. data/contrib/zstd/lib/common/entropy_common.c +16 -1
  19. data/contrib/zstd/lib/common/error_private.c +7 -0
  20. data/contrib/zstd/lib/common/fse.h +48 -44
  21. data/contrib/zstd/lib/common/fse_decompress.c +3 -3
  22. data/contrib/zstd/lib/common/huf.h +169 -113
  23. data/contrib/zstd/lib/common/mem.h +20 -2
  24. data/contrib/zstd/lib/common/pool.c +135 -49
  25. data/contrib/zstd/lib/common/pool.h +40 -21
  26. data/contrib/zstd/lib/common/threading.c +2 -2
  27. data/contrib/zstd/lib/common/threading.h +12 -12
  28. data/contrib/zstd/lib/common/xxhash.c +3 -2
  29. data/contrib/zstd/lib/common/zstd_common.c +3 -6
  30. data/contrib/zstd/lib/common/zstd_errors.h +17 -7
  31. data/contrib/zstd/lib/common/zstd_internal.h +76 -48
  32. data/contrib/zstd/lib/compress/fse_compress.c +89 -209
  33. data/contrib/zstd/lib/compress/hist.c +203 -0
  34. data/contrib/zstd/lib/compress/hist.h +95 -0
  35. data/contrib/zstd/lib/compress/huf_compress.c +188 -80
  36. data/contrib/zstd/lib/compress/zstd_compress.c +2500 -1203
  37. data/contrib/zstd/lib/compress/zstd_compress_internal.h +463 -62
  38. data/contrib/zstd/lib/compress/zstd_double_fast.c +321 -131
  39. data/contrib/zstd/lib/compress/zstd_double_fast.h +13 -4
  40. data/contrib/zstd/lib/compress/zstd_fast.c +335 -108
  41. data/contrib/zstd/lib/compress/zstd_fast.h +12 -6
  42. data/contrib/zstd/lib/compress/zstd_lazy.c +654 -313
  43. data/contrib/zstd/lib/compress/zstd_lazy.h +44 -16
  44. data/contrib/zstd/lib/compress/zstd_ldm.c +310 -420
  45. data/contrib/zstd/lib/compress/zstd_ldm.h +63 -26
  46. data/contrib/zstd/lib/compress/zstd_opt.c +773 -325
  47. data/contrib/zstd/lib/compress/zstd_opt.h +31 -5
  48. data/contrib/zstd/lib/compress/zstdmt_compress.c +1468 -518
  49. data/contrib/zstd/lib/compress/zstdmt_compress.h +96 -45
  50. data/contrib/zstd/lib/decompress/huf_decompress.c +518 -282
  51. data/contrib/zstd/lib/decompress/zstd_ddict.c +240 -0
  52. data/contrib/zstd/lib/decompress/zstd_ddict.h +44 -0
  53. data/contrib/zstd/lib/decompress/zstd_decompress.c +613 -1513
  54. data/contrib/zstd/lib/decompress/zstd_decompress_block.c +1311 -0
  55. data/contrib/zstd/lib/decompress/zstd_decompress_block.h +59 -0
  56. data/contrib/zstd/lib/decompress/zstd_decompress_internal.h +175 -0
  57. data/contrib/zstd/lib/dictBuilder/cover.c +194 -113
  58. data/contrib/zstd/lib/dictBuilder/cover.h +112 -0
  59. data/contrib/zstd/lib/dictBuilder/divsufsort.c +3 -3
  60. data/contrib/zstd/lib/dictBuilder/fastcover.c +740 -0
  61. data/contrib/zstd/lib/dictBuilder/zdict.c +142 -106
  62. data/contrib/zstd/lib/dictBuilder/zdict.h +115 -49
  63. data/contrib/zstd/lib/legacy/zstd_legacy.h +44 -12
  64. data/contrib/zstd/lib/legacy/zstd_v01.c +41 -10
  65. data/contrib/zstd/lib/legacy/zstd_v01.h +12 -7
  66. data/contrib/zstd/lib/legacy/zstd_v02.c +37 -12
  67. data/contrib/zstd/lib/legacy/zstd_v02.h +12 -7
  68. data/contrib/zstd/lib/legacy/zstd_v03.c +38 -12
  69. data/contrib/zstd/lib/legacy/zstd_v03.h +12 -7
  70. data/contrib/zstd/lib/legacy/zstd_v04.c +55 -174
  71. data/contrib/zstd/lib/legacy/zstd_v04.h +12 -7
  72. data/contrib/zstd/lib/legacy/zstd_v05.c +59 -31
  73. data/contrib/zstd/lib/legacy/zstd_v05.h +12 -7
  74. data/contrib/zstd/lib/legacy/zstd_v06.c +48 -20
  75. data/contrib/zstd/lib/legacy/zstd_v06.h +10 -5
  76. data/contrib/zstd/lib/legacy/zstd_v07.c +62 -29
  77. data/contrib/zstd/lib/legacy/zstd_v07.h +10 -5
  78. data/contrib/zstd/lib/zstd.h +1346 -832
  79. data/ext/extzstd.c +27 -19
  80. data/ext/extzstd_stream.c +20 -4
  81. data/ext/zstd_compress.c +1 -0
  82. data/ext/zstd_decompress.c +4 -0
  83. data/ext/zstd_dictbuilder.c +4 -0
  84. data/ext/zstd_dictbuilder_fastcover.c +5 -0
  85. data/lib/extzstd.rb +52 -220
  86. data/lib/extzstd/version.rb +1 -1
  87. metadata +21 -7
  88. data/contrib/zstd/circle.yml +0 -63
@@ -39,6 +39,10 @@ extern "C" {
39
39
  # define MEM_STATIC static /* this version may generate warnings for unused static functions; disable the relevant warning */
40
40
  #endif
41
41
 
42
+ #ifndef __has_builtin
43
+ # define __has_builtin(x) 0 /* compat. with non-clang compilers */
44
+ #endif
45
+
42
46
  /* code only tested on 32 and 64 bits systems */
43
47
  #define MEM_STATIC_ASSERT(c) { enum { MEM_static_assert = 1/(int)(!!(c)) }; }
44
48
  MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (sizeof(size_t)==8)); }
@@ -57,11 +61,23 @@ MEM_STATIC void MEM_check(void) { MEM_STATIC_ASSERT((sizeof(size_t)==4) || (size
57
61
  typedef uint64_t U64;
58
62
  typedef int64_t S64;
59
63
  #else
64
+ # include <limits.h>
65
+ #if CHAR_BIT != 8
66
+ # error "this implementation requires char to be exactly 8-bit type"
67
+ #endif
60
68
  typedef unsigned char BYTE;
69
+ #if USHRT_MAX != 65535
70
+ # error "this implementation requires short to be exactly 16-bit type"
71
+ #endif
61
72
  typedef unsigned short U16;
62
73
  typedef signed short S16;
74
+ #if UINT_MAX != 4294967295
75
+ # error "this implementation requires int to be exactly 32-bit type"
76
+ #endif
63
77
  typedef unsigned int U32;
64
78
  typedef signed int S32;
79
+ /* note : there are no limits defined for long long type in C90.
80
+ * limits exist in C99, however, in such case, <stdint.h> is preferred */
65
81
  typedef unsigned long long U64;
66
82
  typedef signed long long S64;
67
83
  #endif
@@ -186,7 +202,8 @@ MEM_STATIC U32 MEM_swap32(U32 in)
186
202
  {
187
203
  #if defined(_MSC_VER) /* Visual Studio */
188
204
  return _byteswap_ulong(in);
189
- #elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
205
+ #elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
206
+ || (defined(__clang__) && __has_builtin(__builtin_bswap32))
190
207
  return __builtin_bswap32(in);
191
208
  #else
192
209
  return ((in << 24) & 0xff000000 ) |
@@ -200,7 +217,8 @@ MEM_STATIC U64 MEM_swap64(U64 in)
200
217
  {
201
218
  #if defined(_MSC_VER) /* Visual Studio */
202
219
  return _byteswap_uint64(in);
203
- #elif defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)
220
+ #elif (defined (__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__ >= 403)) \
221
+ || (defined(__clang__) && __has_builtin(__builtin_bswap64))
204
222
  return __builtin_bswap64(in);
205
223
  #else
206
224
  return ((in << 56) & 0xff00000000000000ULL) |
@@ -10,7 +10,9 @@
10
10
 
11
11
 
12
12
  /* ====== Dependencies ======= */
13
- #include <stddef.h> /* size_t */
13
+ #include <stddef.h> /* size_t */
14
+ #include "debug.h" /* assert */
15
+ #include "zstd_internal.h" /* ZSTD_malloc, ZSTD_free */
14
16
  #include "pool.h"
15
17
 
16
18
  /* ====== Compiler specifics ====== */
@@ -32,8 +34,9 @@ typedef struct POOL_job_s {
32
34
  struct POOL_ctx_s {
33
35
  ZSTD_customMem customMem;
34
36
  /* Keep track of the threads */
35
- ZSTD_pthread_t *threads;
36
- size_t numThreads;
37
+ ZSTD_pthread_t* threads;
38
+ size_t threadCapacity;
39
+ size_t threadLimit;
37
40
 
38
41
  /* The queue is a circular buffer */
39
42
  POOL_job *queue;
@@ -57,10 +60,10 @@ struct POOL_ctx_s {
57
60
  };
58
61
 
59
62
  /* POOL_thread() :
60
- Work thread for the thread pool.
61
- Waits for jobs and executes them.
62
- @returns : NULL on failure else non-null.
63
- */
63
+ * Work thread for the thread pool.
64
+ * Waits for jobs and executes them.
65
+ * @returns : NULL on failure else non-null.
66
+ */
64
67
  static void* POOL_thread(void* opaque) {
65
68
  POOL_ctx* const ctx = (POOL_ctx*)opaque;
66
69
  if (!ctx) { return NULL; }
@@ -68,50 +71,55 @@ static void* POOL_thread(void* opaque) {
68
71
  /* Lock the mutex and wait for a non-empty queue or until shutdown */
69
72
  ZSTD_pthread_mutex_lock(&ctx->queueMutex);
70
73
 
71
- while (ctx->queueEmpty && !ctx->shutdown) {
74
+ while ( ctx->queueEmpty
75
+ || (ctx->numThreadsBusy >= ctx->threadLimit) ) {
76
+ if (ctx->shutdown) {
77
+ /* even if !queueEmpty, (possible if numThreadsBusy >= threadLimit),
78
+ * a few threads will be shutdown while !queueEmpty,
79
+ * but enough threads will remain active to finish the queue */
80
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
81
+ return opaque;
82
+ }
72
83
  ZSTD_pthread_cond_wait(&ctx->queuePopCond, &ctx->queueMutex);
73
84
  }
74
- /* empty => shutting down: so stop */
75
- if (ctx->queueEmpty) {
76
- ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
77
- return opaque;
78
- }
79
85
  /* Pop a job off the queue */
80
86
  { POOL_job const job = ctx->queue[ctx->queueHead];
81
87
  ctx->queueHead = (ctx->queueHead + 1) % ctx->queueSize;
82
88
  ctx->numThreadsBusy++;
83
89
  ctx->queueEmpty = ctx->queueHead == ctx->queueTail;
84
90
  /* Unlock the mutex, signal a pusher, and run the job */
85
- ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
86
91
  ZSTD_pthread_cond_signal(&ctx->queuePushCond);
92
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
87
93
 
88
94
  job.function(job.opaque);
89
95
 
90
96
  /* If the intended queue size was 0, signal after finishing job */
97
+ ZSTD_pthread_mutex_lock(&ctx->queueMutex);
98
+ ctx->numThreadsBusy--;
91
99
  if (ctx->queueSize == 1) {
92
- ZSTD_pthread_mutex_lock(&ctx->queueMutex);
93
- ctx->numThreadsBusy--;
94
- ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
95
100
  ZSTD_pthread_cond_signal(&ctx->queuePushCond);
96
- } }
101
+ }
102
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
103
+ }
97
104
  } /* for (;;) */
98
- /* Unreachable */
105
+ assert(0); /* Unreachable */
99
106
  }
100
107
 
101
108
  POOL_ctx* POOL_create(size_t numThreads, size_t queueSize) {
102
109
  return POOL_create_advanced(numThreads, queueSize, ZSTD_defaultCMem);
103
110
  }
104
111
 
105
- POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem) {
112
+ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
113
+ ZSTD_customMem customMem) {
106
114
  POOL_ctx* ctx;
107
- /* Check the parameters */
115
+ /* Check parameters */
108
116
  if (!numThreads) { return NULL; }
109
117
  /* Allocate the context and zero initialize */
110
118
  ctx = (POOL_ctx*)ZSTD_calloc(sizeof(POOL_ctx), customMem);
111
119
  if (!ctx) { return NULL; }
112
120
  /* Initialize the job queue.
113
- * It needs one extra space since one space is wasted to differentiate empty
114
- * and full queues.
121
+ * It needs one extra space since one space is wasted to differentiate
122
+ * empty and full queues.
115
123
  */
116
124
  ctx->queueSize = queueSize + 1;
117
125
  ctx->queue = (POOL_job*)ZSTD_malloc(ctx->queueSize * sizeof(POOL_job), customMem);
@@ -125,7 +133,7 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customM
125
133
  ctx->shutdown = 0;
126
134
  /* Allocate space for the thread handles */
127
135
  ctx->threads = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), customMem);
128
- ctx->numThreads = 0;
136
+ ctx->threadCapacity = 0;
129
137
  ctx->customMem = customMem;
130
138
  /* Check for errors */
131
139
  if (!ctx->threads || !ctx->queue) { POOL_free(ctx); return NULL; }
@@ -133,11 +141,12 @@ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customM
133
141
  { size_t i;
134
142
  for (i = 0; i < numThreads; ++i) {
135
143
  if (ZSTD_pthread_create(&ctx->threads[i], NULL, &POOL_thread, ctx)) {
136
- ctx->numThreads = i;
144
+ ctx->threadCapacity = i;
137
145
  POOL_free(ctx);
138
146
  return NULL;
139
147
  } }
140
- ctx->numThreads = numThreads;
148
+ ctx->threadCapacity = numThreads;
149
+ ctx->threadLimit = numThreads;
141
150
  }
142
151
  return ctx;
143
152
  }
@@ -155,8 +164,8 @@ static void POOL_join(POOL_ctx* ctx) {
155
164
  ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
156
165
  /* Join all of the threads */
157
166
  { size_t i;
158
- for (i = 0; i < ctx->numThreads; ++i) {
159
- ZSTD_pthread_join(ctx->threads[i], NULL);
167
+ for (i = 0; i < ctx->threadCapacity; ++i) {
168
+ ZSTD_pthread_join(ctx->threads[i], NULL); /* note : could fail */
160
169
  } }
161
170
  }
162
171
 
@@ -171,54 +180,120 @@ void POOL_free(POOL_ctx *ctx) {
171
180
  ZSTD_free(ctx, ctx->customMem);
172
181
  }
173
182
 
183
+
184
+
174
185
  size_t POOL_sizeof(POOL_ctx *ctx) {
175
186
  if (ctx==NULL) return 0; /* supports sizeof NULL */
176
187
  return sizeof(*ctx)
177
188
  + ctx->queueSize * sizeof(POOL_job)
178
- + ctx->numThreads * sizeof(ZSTD_pthread_t);
189
+ + ctx->threadCapacity * sizeof(ZSTD_pthread_t);
190
+ }
191
+
192
+
193
+ /* @return : 0 on success, 1 on error */
194
+ static int POOL_resize_internal(POOL_ctx* ctx, size_t numThreads)
195
+ {
196
+ if (numThreads <= ctx->threadCapacity) {
197
+ if (!numThreads) return 1;
198
+ ctx->threadLimit = numThreads;
199
+ return 0;
200
+ }
201
+ /* numThreads > threadCapacity */
202
+ { ZSTD_pthread_t* const threadPool = (ZSTD_pthread_t*)ZSTD_malloc(numThreads * sizeof(ZSTD_pthread_t), ctx->customMem);
203
+ if (!threadPool) return 1;
204
+ /* replace existing thread pool */
205
+ memcpy(threadPool, ctx->threads, ctx->threadCapacity * sizeof(*threadPool));
206
+ ZSTD_free(ctx->threads, ctx->customMem);
207
+ ctx->threads = threadPool;
208
+ /* Initialize additional threads */
209
+ { size_t threadId;
210
+ for (threadId = ctx->threadCapacity; threadId < numThreads; ++threadId) {
211
+ if (ZSTD_pthread_create(&threadPool[threadId], NULL, &POOL_thread, ctx)) {
212
+ ctx->threadCapacity = threadId;
213
+ return 1;
214
+ } }
215
+ } }
216
+ /* successfully expanded */
217
+ ctx->threadCapacity = numThreads;
218
+ ctx->threadLimit = numThreads;
219
+ return 0;
220
+ }
221
+
222
+ /* @return : 0 on success, 1 on error */
223
+ int POOL_resize(POOL_ctx* ctx, size_t numThreads)
224
+ {
225
+ int result;
226
+ if (ctx==NULL) return 1;
227
+ ZSTD_pthread_mutex_lock(&ctx->queueMutex);
228
+ result = POOL_resize_internal(ctx, numThreads);
229
+ ZSTD_pthread_cond_broadcast(&ctx->queuePopCond);
230
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
231
+ return result;
179
232
  }
180
233
 
181
234
  /**
182
235
  * Returns 1 if the queue is full and 0 otherwise.
183
236
  *
184
- * If the queueSize is 1 (the pool was created with an intended queueSize of 0),
185
- * then a queue is empty if there is a thread free and no job is waiting.
237
+ * When queueSize is 1 (pool was created with an intended queueSize of 0),
238
+ * then a queue is empty if there is a thread free _and_ no job is waiting.
186
239
  */
187
240
  static int isQueueFull(POOL_ctx const* ctx) {
188
241
  if (ctx->queueSize > 1) {
189
242
  return ctx->queueHead == ((ctx->queueTail + 1) % ctx->queueSize);
190
243
  } else {
191
- return ctx->numThreadsBusy == ctx->numThreads ||
244
+ return (ctx->numThreadsBusy == ctx->threadLimit) ||
192
245
  !ctx->queueEmpty;
193
246
  }
194
247
  }
195
248
 
196
- void POOL_add(void* ctxVoid, POOL_function function, void *opaque) {
197
- POOL_ctx* const ctx = (POOL_ctx*)ctxVoid;
198
- if (!ctx) { return; }
199
249
 
250
+ static void POOL_add_internal(POOL_ctx* ctx, POOL_function function, void *opaque)
251
+ {
252
+ POOL_job const job = {function, opaque};
253
+ assert(ctx != NULL);
254
+ if (ctx->shutdown) return;
255
+
256
+ ctx->queueEmpty = 0;
257
+ ctx->queue[ctx->queueTail] = job;
258
+ ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize;
259
+ ZSTD_pthread_cond_signal(&ctx->queuePopCond);
260
+ }
261
+
262
+ void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque)
263
+ {
264
+ assert(ctx != NULL);
200
265
  ZSTD_pthread_mutex_lock(&ctx->queueMutex);
201
- { POOL_job const job = {function, opaque};
266
+ /* Wait until there is space in the queue for the new job */
267
+ while (isQueueFull(ctx) && (!ctx->shutdown)) {
268
+ ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
269
+ }
270
+ POOL_add_internal(ctx, function, opaque);
271
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
272
+ }
202
273
 
203
- /* Wait until there is space in the queue for the new job */
204
- while (isQueueFull(ctx) && !ctx->shutdown) {
205
- ZSTD_pthread_cond_wait(&ctx->queuePushCond, &ctx->queueMutex);
206
- }
207
- /* The queue is still going => there is space */
208
- if (!ctx->shutdown) {
209
- ctx->queueEmpty = 0;
210
- ctx->queue[ctx->queueTail] = job;
211
- ctx->queueTail = (ctx->queueTail + 1) % ctx->queueSize;
212
- }
274
+
275
+ int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque)
276
+ {
277
+ assert(ctx != NULL);
278
+ ZSTD_pthread_mutex_lock(&ctx->queueMutex);
279
+ if (isQueueFull(ctx)) {
280
+ ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
281
+ return 0;
213
282
  }
283
+ POOL_add_internal(ctx, function, opaque);
214
284
  ZSTD_pthread_mutex_unlock(&ctx->queueMutex);
215
- ZSTD_pthread_cond_signal(&ctx->queuePopCond);
285
+ return 1;
216
286
  }
217
287
 
288
+
218
289
  #else /* ZSTD_MULTITHREAD not defined */
290
+
291
+ /* ========================== */
219
292
  /* No multi-threading support */
293
+ /* ========================== */
294
+
220
295
 
221
- /* We don't need any data, but if it is empty malloc() might return NULL. */
296
+ /* We don't need any data, but if it is empty, malloc() might return NULL. */
222
297
  struct POOL_ctx_s {
223
298
  int dummy;
224
299
  };
@@ -240,9 +315,20 @@ void POOL_free(POOL_ctx* ctx) {
240
315
  (void)ctx;
241
316
  }
242
317
 
243
- void POOL_add(void* ctx, POOL_function function, void* opaque) {
318
+ int POOL_resize(POOL_ctx* ctx, size_t numThreads) {
319
+ (void)ctx; (void)numThreads;
320
+ return 0;
321
+ }
322
+
323
+ void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque) {
324
+ (void)ctx;
325
+ function(opaque);
326
+ }
327
+
328
+ int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque) {
244
329
  (void)ctx;
245
330
  function(opaque);
331
+ return 1;
246
332
  }
247
333
 
248
334
  size_t POOL_sizeof(POOL_ctx* ctx) {
@@ -17,7 +17,8 @@ extern "C" {
17
17
 
18
18
 
19
19
  #include <stddef.h> /* size_t */
20
- #include "zstd_internal.h" /* ZSTD_customMem */
20
+ #define ZSTD_STATIC_LINKING_ONLY /* ZSTD_customMem */
21
+ #include "zstd.h"
21
22
 
22
23
  typedef struct POOL_ctx_s POOL_ctx;
23
24
 
@@ -27,35 +28,53 @@ typedef struct POOL_ctx_s POOL_ctx;
27
28
  * The maximum number of queued jobs before blocking is `queueSize`.
28
29
  * @return : POOL_ctx pointer on success, else NULL.
29
30
  */
30
- POOL_ctx *POOL_create(size_t numThreads, size_t queueSize);
31
+ POOL_ctx* POOL_create(size_t numThreads, size_t queueSize);
31
32
 
32
- POOL_ctx *POOL_create_advanced(size_t numThreads, size_t queueSize, ZSTD_customMem customMem);
33
+ POOL_ctx* POOL_create_advanced(size_t numThreads, size_t queueSize,
34
+ ZSTD_customMem customMem);
33
35
 
34
36
  /*! POOL_free() :
35
- Free a thread pool returned by POOL_create().
36
- */
37
- void POOL_free(POOL_ctx *ctx);
37
+ * Free a thread pool returned by POOL_create().
38
+ */
39
+ void POOL_free(POOL_ctx* ctx);
40
+
41
+ /*! POOL_resize() :
42
+ * Expands or shrinks pool's number of threads.
43
+ * This is more efficient than releasing + creating a new context,
44
+ * since it tries to preserve and re-use existing threads.
45
+ * `numThreads` must be at least 1.
46
+ * @return : 0 when resize was successful,
47
+ * !0 (typically 1) if there is an error.
48
+ * note : only numThreads can be resized, queueSize remains unchanged.
49
+ */
50
+ int POOL_resize(POOL_ctx* ctx, size_t numThreads);
38
51
 
39
52
  /*! POOL_sizeof() :
40
- return memory usage of pool returned by POOL_create().
41
- */
42
- size_t POOL_sizeof(POOL_ctx *ctx);
53
+ * @return threadpool memory usage
54
+ * note : compatible with NULL (returns 0 in this case)
55
+ */
56
+ size_t POOL_sizeof(POOL_ctx* ctx);
43
57
 
44
58
  /*! POOL_function :
45
- The function type that can be added to a thread pool.
46
- */
47
- typedef void (*POOL_function)(void *);
48
- /*! POOL_add_function :
49
- The function type for a generic thread pool add function.
50
- */
51
- typedef void (*POOL_add_function)(void *, POOL_function, void *);
59
+ * The function type that can be added to a thread pool.
60
+ */
61
+ typedef void (*POOL_function)(void*);
52
62
 
53
63
  /*! POOL_add() :
54
- Add the job `function(opaque)` to the thread pool.
55
- Possibly blocks until there is room in the queue.
56
- Note : The function may be executed asynchronously, so `opaque` must live until the function has been completed.
57
- */
58
- void POOL_add(void *ctx, POOL_function function, void *opaque);
64
+ * Add the job `function(opaque)` to the thread pool. `ctx` must be valid.
65
+ * Possibly blocks until there is room in the queue.
66
+ * Note : The function may be executed asynchronously,
67
+ * therefore, `opaque` must live until function has been completed.
68
+ */
69
+ void POOL_add(POOL_ctx* ctx, POOL_function function, void* opaque);
70
+
71
+
72
+ /*! POOL_tryAdd() :
73
+ * Add the job `function(opaque)` to thread pool _if_ a worker is available.
74
+ * Returns immediately even if not (does not block).
75
+ * @return : 1 if successful, 0 if not.
76
+ */
77
+ int POOL_tryAdd(POOL_ctx* ctx, POOL_function function, void* opaque);
59
78
 
60
79
 
61
80
  #if defined (__cplusplus)
@@ -14,8 +14,8 @@
14
14
  * This file will hold wrapper for systems, which do not support pthreads
15
15
  */
16
16
 
17
- /* create fake symbol to avoid empty trnaslation unit warning */
18
- int g_ZSTD_threading_useles_symbol;
17
+ /* create fake symbol to avoid empty translation unit warning */
18
+ int g_ZSTD_threading_useless_symbol;
19
19
 
20
20
  #if defined(ZSTD_MULTITHREAD) && defined(_WIN32)
21
21