zstd-ruby 1.3.3.0 → 1.3.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -5
- data/README.md +1 -1
- data/ext/zstdruby/libzstd/BUCK +13 -0
- data/ext/zstdruby/libzstd/README.md +32 -25
- data/ext/zstdruby/libzstd/common/bitstream.h +1 -1
- data/ext/zstdruby/libzstd/common/compiler.h +25 -0
- data/ext/zstdruby/libzstd/common/cpu.h +216 -0
- data/ext/zstdruby/libzstd/common/error_private.c +1 -0
- data/ext/zstdruby/libzstd/common/fse.h +1 -1
- data/ext/zstdruby/libzstd/common/fse_decompress.c +2 -2
- data/ext/zstdruby/libzstd/common/huf.h +114 -89
- data/ext/zstdruby/libzstd/common/pool.c +46 -17
- data/ext/zstdruby/libzstd/common/pool.h +18 -9
- data/ext/zstdruby/libzstd/common/threading.h +12 -12
- data/ext/zstdruby/libzstd/common/zstd_errors.h +16 -7
- data/ext/zstdruby/libzstd/common/zstd_internal.h +4 -5
- data/ext/zstdruby/libzstd/compress/fse_compress.c +19 -11
- data/ext/zstdruby/libzstd/compress/huf_compress.c +160 -62
- data/ext/zstdruby/libzstd/compress/zstd_compress.c +973 -644
- data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +281 -34
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +80 -62
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.h +11 -4
- data/ext/zstdruby/libzstd/compress/zstd_fast.c +87 -71
- data/ext/zstdruby/libzstd/compress/zstd_fast.h +10 -6
- data/ext/zstdruby/libzstd/compress/zstd_lazy.c +333 -274
- data/ext/zstdruby/libzstd/compress/zstd_lazy.h +33 -16
- data/ext/zstdruby/libzstd/compress/zstd_ldm.c +305 -359
- data/ext/zstdruby/libzstd/compress/zstd_ldm.h +64 -21
- data/ext/zstdruby/libzstd/compress/zstd_opt.c +194 -56
- data/ext/zstdruby/libzstd/compress/zstd_opt.h +17 -5
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +1131 -449
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +32 -16
- data/ext/zstdruby/libzstd/decompress/huf_decompress.c +390 -290
- data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +777 -439
- data/ext/zstdruby/libzstd/dictBuilder/cover.c +11 -8
- data/ext/zstdruby/libzstd/dictBuilder/zdict.c +83 -50
- data/ext/zstdruby/libzstd/dictBuilder/zdict.h +44 -43
- data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +2 -0
- data/ext/zstdruby/libzstd/legacy/zstd_v04.c +42 -118
- data/ext/zstdruby/libzstd/legacy/zstd_v06.c +2 -2
- data/ext/zstdruby/libzstd/legacy/zstd_v07.c +2 -2
- data/ext/zstdruby/libzstd/zstd.h +254 -254
- data/lib/zstd-ruby/version.rb +1 -1
- metadata +4 -3
@@ -15,13 +15,25 @@
|
|
15
15
|
extern "C" {
|
16
16
|
#endif
|
17
17
|
|
18
|
-
#include "
|
18
|
+
#include "zstd_compress_internal.h"
|
19
19
|
|
20
|
-
|
21
|
-
|
20
|
+
void ZSTD_updateTree(
|
21
|
+
ZSTD_matchState_t* ms, ZSTD_compressionParameters const* cParams,
|
22
|
+
const BYTE* ip, const BYTE* iend); /* used in ZSTD_loadDictionaryContent() */
|
22
23
|
|
23
|
-
size_t
|
24
|
-
|
24
|
+
size_t ZSTD_compressBlock_btopt(
|
25
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
26
|
+
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
27
|
+
size_t ZSTD_compressBlock_btultra(
|
28
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
29
|
+
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
30
|
+
|
31
|
+
size_t ZSTD_compressBlock_btopt_extDict(
|
32
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
33
|
+
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
34
|
+
size_t ZSTD_compressBlock_btultra_extDict(
|
35
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
36
|
+
ZSTD_compressionParameters const* cParams, void const* src, size_t srcSize);
|
25
37
|
|
26
38
|
#if defined (__cplusplus)
|
27
39
|
}
|
@@ -10,7 +10,8 @@
|
|
10
10
|
|
11
11
|
|
12
12
|
/* ====== Tuning parameters ====== */
|
13
|
-
#define
|
13
|
+
#define ZSTDMT_NBWORKERS_MAX 200
|
14
|
+
#define ZSTDMT_JOBSIZE_MAX (MEM_32bits() ? (512 MB) : (2 GB)) /* note : limited by `jobSize` type, which is `unsigned` */
|
14
15
|
#define ZSTDMT_OVERLAPLOG_DEFAULT 6
|
15
16
|
|
16
17
|
|
@@ -22,11 +23,18 @@
|
|
22
23
|
|
23
24
|
/* ====== Dependencies ====== */
|
24
25
|
#include <string.h> /* memcpy, memset */
|
26
|
+
#include <limits.h> /* INT_MAX */
|
25
27
|
#include "pool.h" /* threadpool */
|
26
28
|
#include "threading.h" /* mutex */
|
27
29
|
#include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
|
30
|
+
#include "zstd_ldm.h"
|
28
31
|
#include "zstdmt_compress.h"
|
29
32
|
|
33
|
+
/* Guards code to support resizing the SeqPool.
|
34
|
+
* We will want to resize the SeqPool to save memory in the future.
|
35
|
+
* Until then, comment the code out since it is unused.
|
36
|
+
*/
|
37
|
+
#define ZSTD_RESIZE_SEQPOOL 0
|
30
38
|
|
31
39
|
/* ====== Debug ====== */
|
32
40
|
#if defined(ZSTD_DEBUG) && (ZSTD_DEBUG>=2)
|
@@ -81,7 +89,7 @@ static unsigned long long GetCurrentClockTimeMicroseconds(void)
|
|
81
89
|
|
82
90
|
typedef struct buffer_s {
|
83
91
|
void* start;
|
84
|
-
size_t
|
92
|
+
size_t capacity;
|
85
93
|
} buffer_t;
|
86
94
|
|
87
95
|
static const buffer_t g_nullBuffer = { NULL, 0 };
|
@@ -95,9 +103,9 @@ typedef struct ZSTDMT_bufferPool_s {
|
|
95
103
|
buffer_t bTable[1]; /* variable size */
|
96
104
|
} ZSTDMT_bufferPool;
|
97
105
|
|
98
|
-
static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned
|
106
|
+
static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbWorkers, ZSTD_customMem cMem)
|
99
107
|
{
|
100
|
-
unsigned const maxNbBuffers = 2*
|
108
|
+
unsigned const maxNbBuffers = 2*nbWorkers + 3;
|
101
109
|
ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_calloc(
|
102
110
|
sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem);
|
103
111
|
if (bufPool==NULL) return NULL;
|
@@ -129,17 +137,21 @@ static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
|
|
129
137
|
static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool)
|
130
138
|
{
|
131
139
|
size_t const poolSize = sizeof(*bufPool)
|
132
|
-
|
140
|
+
+ (bufPool->totalBuffers - 1) * sizeof(buffer_t);
|
133
141
|
unsigned u;
|
134
142
|
size_t totalBufferSize = 0;
|
135
143
|
ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
|
136
144
|
for (u=0; u<bufPool->totalBuffers; u++)
|
137
|
-
totalBufferSize += bufPool->bTable[u].
|
145
|
+
totalBufferSize += bufPool->bTable[u].capacity;
|
138
146
|
ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
|
139
147
|
|
140
148
|
return poolSize + totalBufferSize;
|
141
149
|
}
|
142
150
|
|
151
|
+
/* ZSTDMT_setBufferSize() :
|
152
|
+
* all future buffers provided by this buffer pool will have _at least_ this size
|
153
|
+
* note : it's better for all buffers to have same size,
|
154
|
+
* as they become freely interchangeable, reducing malloc/free usages and memory fragmentation */
|
143
155
|
static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const bSize)
|
144
156
|
{
|
145
157
|
ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
|
@@ -149,7 +161,9 @@ static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const
|
|
149
161
|
}
|
150
162
|
|
151
163
|
/** ZSTDMT_getBuffer() :
|
152
|
-
* assumption : bufPool must be valid
|
164
|
+
* assumption : bufPool must be valid
|
165
|
+
* @return : a buffer, with start pointer and size
|
166
|
+
* note: allocation may fail, in this case, start==NULL and size==0 */
|
153
167
|
static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
|
154
168
|
{
|
155
169
|
size_t const bSize = bufPool->bufferSize;
|
@@ -157,12 +171,12 @@ static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
|
|
157
171
|
ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
|
158
172
|
if (bufPool->nbBuffers) { /* try to use an existing buffer */
|
159
173
|
buffer_t const buf = bufPool->bTable[--(bufPool->nbBuffers)];
|
160
|
-
size_t const availBufferSize = buf.
|
174
|
+
size_t const availBufferSize = buf.capacity;
|
161
175
|
bufPool->bTable[bufPool->nbBuffers] = g_nullBuffer;
|
162
176
|
if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) {
|
163
177
|
/* large enough, but not too much */
|
164
178
|
DEBUGLOG(5, "ZSTDMT_getBuffer: provide buffer %u of size %u",
|
165
|
-
bufPool->nbBuffers, (U32)buf.
|
179
|
+
bufPool->nbBuffers, (U32)buf.capacity);
|
166
180
|
ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
|
167
181
|
return buf;
|
168
182
|
}
|
@@ -176,12 +190,42 @@ static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
|
|
176
190
|
{ buffer_t buffer;
|
177
191
|
void* const start = ZSTD_malloc(bSize, bufPool->cMem);
|
178
192
|
buffer.start = start; /* note : start can be NULL if malloc fails ! */
|
179
|
-
buffer.
|
180
|
-
|
193
|
+
buffer.capacity = (start==NULL) ? 0 : bSize;
|
194
|
+
if (start==NULL) {
|
195
|
+
DEBUGLOG(5, "ZSTDMT_getBuffer: buffer allocation failure !!");
|
196
|
+
} else {
|
197
|
+
DEBUGLOG(5, "ZSTDMT_getBuffer: created buffer of size %u", (U32)bSize);
|
198
|
+
}
|
181
199
|
return buffer;
|
182
200
|
}
|
183
201
|
}
|
184
202
|
|
203
|
+
#if ZSTD_RESIZE_SEQPOOL
|
204
|
+
/** ZSTDMT_resizeBuffer() :
|
205
|
+
* assumption : bufPool must be valid
|
206
|
+
* @return : a buffer that is at least the buffer pool buffer size.
|
207
|
+
* If a reallocation happens, the data in the input buffer is copied.
|
208
|
+
*/
|
209
|
+
static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer)
|
210
|
+
{
|
211
|
+
size_t const bSize = bufPool->bufferSize;
|
212
|
+
if (buffer.capacity < bSize) {
|
213
|
+
void* const start = ZSTD_malloc(bSize, bufPool->cMem);
|
214
|
+
buffer_t newBuffer;
|
215
|
+
newBuffer.start = start;
|
216
|
+
newBuffer.capacity = start == NULL ? 0 : bSize;
|
217
|
+
if (start != NULL) {
|
218
|
+
assert(newBuffer.capacity >= buffer.capacity);
|
219
|
+
memcpy(newBuffer.start, buffer.start, buffer.capacity);
|
220
|
+
DEBUGLOG(5, "ZSTDMT_resizeBuffer: created buffer of size %u", (U32)bSize);
|
221
|
+
return newBuffer;
|
222
|
+
}
|
223
|
+
DEBUGLOG(5, "ZSTDMT_resizeBuffer: buffer allocation failure !!");
|
224
|
+
}
|
225
|
+
return buffer;
|
226
|
+
}
|
227
|
+
#endif
|
228
|
+
|
185
229
|
/* store buffer for later re-use, up to pool capacity */
|
186
230
|
static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
|
187
231
|
{
|
@@ -191,7 +235,7 @@ static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
|
|
191
235
|
if (bufPool->nbBuffers < bufPool->totalBuffers) {
|
192
236
|
bufPool->bTable[bufPool->nbBuffers++] = buf; /* stored for later use */
|
193
237
|
DEBUGLOG(5, "ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u",
|
194
|
-
(U32)buf.
|
238
|
+
(U32)buf.capacity, (U32)(bufPool->nbBuffers-1));
|
195
239
|
ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
|
196
240
|
return;
|
197
241
|
}
|
@@ -201,21 +245,73 @@ static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
|
|
201
245
|
ZSTD_free(buf.start, bufPool->cMem);
|
202
246
|
}
|
203
247
|
|
204
|
-
|
205
|
-
|
206
|
-
|
248
|
+
|
249
|
+
/* ===== Seq Pool Wrapper ====== */
|
250
|
+
|
251
|
+
static rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0};
|
252
|
+
|
253
|
+
typedef ZSTDMT_bufferPool ZSTDMT_seqPool;
|
254
|
+
|
255
|
+
static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool)
|
256
|
+
{
|
257
|
+
return ZSTDMT_sizeof_bufferPool(seqPool);
|
258
|
+
}
|
259
|
+
|
260
|
+
static rawSeqStore_t bufferToSeq(buffer_t buffer)
|
207
261
|
{
|
208
|
-
|
209
|
-
|
262
|
+
rawSeqStore_t seq = {NULL, 0, 0, 0};
|
263
|
+
seq.seq = (rawSeq*)buffer.start;
|
264
|
+
seq.capacity = buffer.capacity / sizeof(rawSeq);
|
265
|
+
return seq;
|
266
|
+
}
|
210
267
|
|
211
|
-
|
212
|
-
|
213
|
-
|
268
|
+
static buffer_t seqToBuffer(rawSeqStore_t seq)
|
269
|
+
{
|
270
|
+
buffer_t buffer;
|
271
|
+
buffer.start = seq.seq;
|
272
|
+
buffer.capacity = seq.capacity * sizeof(rawSeq);
|
273
|
+
return buffer;
|
274
|
+
}
|
214
275
|
|
215
|
-
|
216
|
-
|
276
|
+
static rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool)
|
277
|
+
{
|
278
|
+
if (seqPool->bufferSize == 0) {
|
279
|
+
return kNullRawSeqStore;
|
280
|
+
}
|
281
|
+
return bufferToSeq(ZSTDMT_getBuffer(seqPool));
|
217
282
|
}
|
218
283
|
|
284
|
+
#if ZSTD_RESIZE_SEQPOOL
|
285
|
+
static rawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
|
286
|
+
{
|
287
|
+
return bufferToSeq(ZSTDMT_resizeBuffer(seqPool, seqToBuffer(seq)));
|
288
|
+
}
|
289
|
+
#endif
|
290
|
+
|
291
|
+
static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
|
292
|
+
{
|
293
|
+
ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq));
|
294
|
+
}
|
295
|
+
|
296
|
+
static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq)
|
297
|
+
{
|
298
|
+
ZSTDMT_setBufferSize(seqPool, nbSeq * sizeof(rawSeq));
|
299
|
+
}
|
300
|
+
|
301
|
+
static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem)
|
302
|
+
{
|
303
|
+
ZSTDMT_seqPool* seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
|
304
|
+
ZSTDMT_setNbSeq(seqPool, 0);
|
305
|
+
return seqPool;
|
306
|
+
}
|
307
|
+
|
308
|
+
static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool)
|
309
|
+
{
|
310
|
+
ZSTDMT_freeBufferPool(seqPool);
|
311
|
+
}
|
312
|
+
|
313
|
+
|
314
|
+
|
219
315
|
/* ===== CCtx Pool ===== */
|
220
316
|
/* a single CCtx Pool can be invoked from multiple threads in parallel */
|
221
317
|
|
@@ -238,23 +334,24 @@ static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
|
|
238
334
|
}
|
239
335
|
|
240
336
|
/* ZSTDMT_createCCtxPool() :
|
241
|
-
* implies
|
242
|
-
static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(unsigned
|
337
|
+
* implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */
|
338
|
+
static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(unsigned nbWorkers,
|
243
339
|
ZSTD_customMem cMem)
|
244
340
|
{
|
245
341
|
ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_calloc(
|
246
|
-
sizeof(ZSTDMT_CCtxPool) + (
|
342
|
+
sizeof(ZSTDMT_CCtxPool) + (nbWorkers-1)*sizeof(ZSTD_CCtx*), cMem);
|
343
|
+
assert(nbWorkers > 0);
|
247
344
|
if (!cctxPool) return NULL;
|
248
345
|
if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) {
|
249
346
|
ZSTD_free(cctxPool, cMem);
|
250
347
|
return NULL;
|
251
348
|
}
|
252
349
|
cctxPool->cMem = cMem;
|
253
|
-
cctxPool->totalCCtx =
|
350
|
+
cctxPool->totalCCtx = nbWorkers;
|
254
351
|
cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */
|
255
352
|
cctxPool->cctx[0] = ZSTD_createCCtx_advanced(cMem);
|
256
353
|
if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; }
|
257
|
-
DEBUGLOG(3, "cctxPool created, with %u
|
354
|
+
DEBUGLOG(3, "cctxPool created, with %u workers", nbWorkers);
|
258
355
|
return cctxPool;
|
259
356
|
}
|
260
357
|
|
@@ -262,15 +359,16 @@ static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(unsigned nbThreads,
|
|
262
359
|
static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
|
263
360
|
{
|
264
361
|
ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
|
265
|
-
{ unsigned const
|
362
|
+
{ unsigned const nbWorkers = cctxPool->totalCCtx;
|
266
363
|
size_t const poolSize = sizeof(*cctxPool)
|
267
|
-
+ (
|
364
|
+
+ (nbWorkers-1) * sizeof(ZSTD_CCtx*);
|
268
365
|
unsigned u;
|
269
366
|
size_t totalCCtxSize = 0;
|
270
|
-
for (u=0; u<
|
367
|
+
for (u=0; u<nbWorkers; u++) {
|
271
368
|
totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctx[u]);
|
272
369
|
}
|
273
370
|
ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
|
371
|
+
assert(nbWorkers > 0);
|
274
372
|
return poolSize + totalCCtxSize;
|
275
373
|
}
|
276
374
|
}
|
@@ -297,111 +395,318 @@ static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx)
|
|
297
395
|
if (pool->availCCtx < pool->totalCCtx)
|
298
396
|
pool->cctx[pool->availCCtx++] = cctx;
|
299
397
|
else {
|
300
|
-
/* pool overflow : should not happen, since totalCCtx==
|
301
|
-
DEBUGLOG(
|
398
|
+
/* pool overflow : should not happen, since totalCCtx==nbWorkers */
|
399
|
+
DEBUGLOG(4, "CCtx pool overflow : free cctx");
|
302
400
|
ZSTD_freeCCtx(cctx);
|
303
401
|
}
|
304
402
|
ZSTD_pthread_mutex_unlock(&pool->poolMutex);
|
305
403
|
}
|
306
404
|
|
405
|
+
/* ==== Serial State ==== */
|
307
406
|
|
308
|
-
|
407
|
+
typedef struct {
|
408
|
+
void const* start;
|
409
|
+
size_t size;
|
410
|
+
} range_t;
|
309
411
|
|
310
412
|
typedef struct {
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
size_t srcSize;
|
315
|
-
buffer_t dstBuff;
|
316
|
-
size_t cSize;
|
317
|
-
size_t dstFlushed;
|
318
|
-
unsigned firstChunk;
|
319
|
-
unsigned lastChunk;
|
320
|
-
unsigned jobCompleted;
|
321
|
-
unsigned jobScanned;
|
322
|
-
ZSTD_pthread_mutex_t* jobCompleted_mutex;
|
323
|
-
ZSTD_pthread_cond_t* jobCompleted_cond;
|
413
|
+
/* All variables in the struct are protected by mutex. */
|
414
|
+
ZSTD_pthread_mutex_t mutex;
|
415
|
+
ZSTD_pthread_cond_t cond;
|
324
416
|
ZSTD_CCtx_params params;
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
417
|
+
ldmState_t ldmState;
|
418
|
+
XXH64_state_t xxhState;
|
419
|
+
unsigned nextJobID;
|
420
|
+
/* Protects ldmWindow.
|
421
|
+
* Must be acquired after the main mutex when acquiring both.
|
422
|
+
*/
|
423
|
+
ZSTD_pthread_mutex_t ldmWindowMutex;
|
424
|
+
ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is udpated */
|
425
|
+
ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */
|
426
|
+
} serialState_t;
|
427
|
+
|
428
|
+
static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool* seqPool, ZSTD_CCtx_params params)
|
429
|
+
{
|
430
|
+
/* Adjust parameters */
|
431
|
+
if (params.ldmParams.enableLdm) {
|
432
|
+
DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10);
|
433
|
+
params.ldmParams.windowLog = params.cParams.windowLog;
|
434
|
+
ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams);
|
435
|
+
assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
|
436
|
+
assert(params.ldmParams.hashEveryLog < 32);
|
437
|
+
serialState->ldmState.hashPower =
|
438
|
+
ZSTD_ldm_getHashPower(params.ldmParams.minMatchLength);
|
439
|
+
} else {
|
440
|
+
memset(¶ms.ldmParams, 0, sizeof(params.ldmParams));
|
441
|
+
}
|
442
|
+
serialState->nextJobID = 0;
|
443
|
+
if (params.fParams.checksumFlag)
|
444
|
+
XXH64_reset(&serialState->xxhState, 0);
|
445
|
+
if (params.ldmParams.enableLdm) {
|
446
|
+
ZSTD_customMem cMem = params.customMem;
|
447
|
+
unsigned const hashLog = params.ldmParams.hashLog;
|
448
|
+
size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t);
|
449
|
+
unsigned const bucketLog =
|
450
|
+
params.ldmParams.hashLog - params.ldmParams.bucketSizeLog;
|
451
|
+
size_t const bucketSize = (size_t)1 << bucketLog;
|
452
|
+
unsigned const prevBucketLog =
|
453
|
+
serialState->params.ldmParams.hashLog -
|
454
|
+
serialState->params.ldmParams.bucketSizeLog;
|
455
|
+
/* Size the seq pool tables */
|
456
|
+
ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, params.jobSize));
|
457
|
+
/* Reset the window */
|
458
|
+
ZSTD_window_clear(&serialState->ldmState.window);
|
459
|
+
serialState->ldmWindow = serialState->ldmState.window;
|
460
|
+
/* Resize tables and output space if necessary. */
|
461
|
+
if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) {
|
462
|
+
ZSTD_free(serialState->ldmState.hashTable, cMem);
|
463
|
+
serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_malloc(hashSize, cMem);
|
464
|
+
}
|
465
|
+
if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) {
|
466
|
+
ZSTD_free(serialState->ldmState.bucketOffsets, cMem);
|
467
|
+
serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_malloc(bucketSize, cMem);
|
468
|
+
}
|
469
|
+
if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets)
|
470
|
+
return 1;
|
471
|
+
/* Zero the tables */
|
472
|
+
memset(serialState->ldmState.hashTable, 0, hashSize);
|
473
|
+
memset(serialState->ldmState.bucketOffsets, 0, bucketSize);
|
474
|
+
}
|
475
|
+
serialState->params = params;
|
476
|
+
return 0;
|
477
|
+
}
|
478
|
+
|
479
|
+
static int ZSTDMT_serialState_init(serialState_t* serialState)
|
480
|
+
{
|
481
|
+
int initError = 0;
|
482
|
+
memset(serialState, 0, sizeof(*serialState));
|
483
|
+
initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL);
|
484
|
+
initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL);
|
485
|
+
initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL);
|
486
|
+
initError |= ZSTD_pthread_cond_init(&serialState->ldmWindowCond, NULL);
|
487
|
+
return initError;
|
488
|
+
}
|
489
|
+
|
490
|
+
static void ZSTDMT_serialState_free(serialState_t* serialState)
|
491
|
+
{
|
492
|
+
ZSTD_customMem cMem = serialState->params.customMem;
|
493
|
+
ZSTD_pthread_mutex_destroy(&serialState->mutex);
|
494
|
+
ZSTD_pthread_cond_destroy(&serialState->cond);
|
495
|
+
ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex);
|
496
|
+
ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond);
|
497
|
+
ZSTD_free(serialState->ldmState.hashTable, cMem);
|
498
|
+
ZSTD_free(serialState->ldmState.bucketOffsets, cMem);
|
499
|
+
}
|
500
|
+
|
501
|
+
static void ZSTDMT_serialState_update(serialState_t* serialState,
|
502
|
+
ZSTD_CCtx* jobCCtx, rawSeqStore_t seqStore,
|
503
|
+
range_t src, unsigned jobID)
|
504
|
+
{
|
505
|
+
/* Wait for our turn */
|
506
|
+
ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
|
507
|
+
while (serialState->nextJobID < jobID) {
|
508
|
+
ZSTD_pthread_cond_wait(&serialState->cond, &serialState->mutex);
|
509
|
+
}
|
510
|
+
/* A future job may error and skip our job */
|
511
|
+
if (serialState->nextJobID == jobID) {
|
512
|
+
/* It is now our turn, do any processing necessary */
|
513
|
+
if (serialState->params.ldmParams.enableLdm) {
|
514
|
+
size_t error;
|
515
|
+
assert(seqStore.seq != NULL && seqStore.pos == 0 &&
|
516
|
+
seqStore.size == 0 && seqStore.capacity > 0);
|
517
|
+
ZSTD_window_update(&serialState->ldmState.window, src.start, src.size);
|
518
|
+
error = ZSTD_ldm_generateSequences(
|
519
|
+
&serialState->ldmState, &seqStore,
|
520
|
+
&serialState->params.ldmParams, src.start, src.size);
|
521
|
+
/* We provide a large enough buffer to never fail. */
|
522
|
+
assert(!ZSTD_isError(error)); (void)error;
|
523
|
+
/* Update ldmWindow to match the ldmState.window and signal the main
|
524
|
+
* thread if it is waiting for a buffer.
|
525
|
+
*/
|
526
|
+
ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
|
527
|
+
serialState->ldmWindow = serialState->ldmState.window;
|
528
|
+
ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
|
529
|
+
ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
|
530
|
+
}
|
531
|
+
if (serialState->params.fParams.checksumFlag && src.size > 0)
|
532
|
+
XXH64_update(&serialState->xxhState, src.start, src.size);
|
533
|
+
}
|
534
|
+
/* Now it is the next jobs turn */
|
535
|
+
serialState->nextJobID++;
|
536
|
+
ZSTD_pthread_cond_broadcast(&serialState->cond);
|
537
|
+
ZSTD_pthread_mutex_unlock(&serialState->mutex);
|
538
|
+
|
539
|
+
if (seqStore.size > 0) {
|
540
|
+
size_t const err = ZSTD_referenceExternalSequences(
|
541
|
+
jobCCtx, seqStore.seq, seqStore.size);
|
542
|
+
assert(serialState->params.ldmParams.enableLdm);
|
543
|
+
assert(!ZSTD_isError(err));
|
544
|
+
(void)err;
|
545
|
+
}
|
546
|
+
}
|
547
|
+
|
548
|
+
static void ZSTDMT_serialState_ensureFinished(serialState_t* serialState,
|
549
|
+
unsigned jobID, size_t cSize)
|
550
|
+
{
|
551
|
+
ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
|
552
|
+
if (serialState->nextJobID <= jobID) {
|
553
|
+
assert(ZSTD_isError(cSize)); (void)cSize;
|
554
|
+
DEBUGLOG(5, "Skipping past job %u because of error", jobID);
|
555
|
+
serialState->nextJobID = jobID + 1;
|
556
|
+
ZSTD_pthread_cond_broadcast(&serialState->cond);
|
557
|
+
|
558
|
+
ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
|
559
|
+
ZSTD_window_clear(&serialState->ldmWindow);
|
560
|
+
ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
|
561
|
+
ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
|
562
|
+
}
|
563
|
+
ZSTD_pthread_mutex_unlock(&serialState->mutex);
|
564
|
+
|
565
|
+
}
|
566
|
+
|
567
|
+
|
568
|
+
/* ------------------------------------------ */
|
569
|
+
/* ===== Worker thread ===== */
|
570
|
+
/* ------------------------------------------ */
|
571
|
+
|
572
|
+
static const range_t kNullRange = { NULL, 0 };
|
573
|
+
|
574
|
+
typedef struct {
|
575
|
+
size_t consumed; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */
|
576
|
+
size_t cSize; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */
|
577
|
+
ZSTD_pthread_mutex_t job_mutex; /* Thread-safe - used by mtctx and worker */
|
578
|
+
ZSTD_pthread_cond_t job_cond; /* Thread-safe - used by mtctx and worker */
|
579
|
+
ZSTDMT_CCtxPool* cctxPool; /* Thread-safe - used by mtctx and (all) workers */
|
580
|
+
ZSTDMT_bufferPool* bufPool; /* Thread-safe - used by mtctx and (all) workers */
|
581
|
+
ZSTDMT_seqPool* seqPool; /* Thread-safe - used by mtctx and (all) workers */
|
582
|
+
serialState_t* serial; /* Thread-safe - used by mtctx and (all) workers */
|
583
|
+
buffer_t dstBuff; /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */
|
584
|
+
range_t prefix; /* set by mtctx, then read by worker & mtctx => no barrier */
|
585
|
+
range_t src; /* set by mtctx, then read by worker & mtctx => no barrier */
|
586
|
+
unsigned jobID; /* set by mtctx, then read by worker => no barrier */
|
587
|
+
unsigned firstJob; /* set by mtctx, then read by worker => no barrier */
|
588
|
+
unsigned lastJob; /* set by mtctx, then read by worker => no barrier */
|
589
|
+
ZSTD_CCtx_params params; /* set by mtctx, then read by worker => no barrier */
|
590
|
+
const ZSTD_CDict* cdict; /* set by mtctx, then read by worker => no barrier */
|
591
|
+
unsigned long long fullFrameSize; /* set by mtctx, then read by worker => no barrier */
|
592
|
+
size_t dstFlushed; /* used only by mtctx */
|
593
|
+
unsigned frameChecksumNeeded; /* used only by mtctx */
|
329
594
|
} ZSTDMT_jobDescription;
|
330
595
|
|
331
|
-
/*
|
332
|
-
void
|
596
|
+
/* ZSTDMT_compressionJob() is a POOL_function type */
|
597
|
+
void ZSTDMT_compressionJob(void* jobDescription)
|
333
598
|
{
|
334
599
|
ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
|
600
|
+
ZSTD_CCtx_params jobParams = job->params; /* do not modify job->params ! copy it, modify the copy */
|
335
601
|
ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool);
|
336
|
-
|
602
|
+
rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool);
|
337
603
|
buffer_t dstBuff = job->dstBuff;
|
338
|
-
DEBUGLOG(5, "ZSTDMT_compressChunk: job (first:%u) (last:%u) : prefixSize %u, srcSize %u ",
|
339
|
-
job->firstChunk, job->lastChunk, (U32)job->prefixSize, (U32)job->srcSize);
|
340
604
|
|
605
|
+
/* Don't compute the checksum for chunks, since we compute it externally,
|
606
|
+
* but write it in the header.
|
607
|
+
*/
|
608
|
+
if (job->jobID != 0) jobParams.fParams.checksumFlag = 0;
|
609
|
+
/* Don't run LDM for the chunks, since we handle it externally */
|
610
|
+
jobParams.ldmParams.enableLdm = 0;
|
611
|
+
|
612
|
+
/* ressources */
|
341
613
|
if (cctx==NULL) {
|
342
614
|
job->cSize = ERROR(memory_allocation);
|
343
615
|
goto _endJob;
|
344
616
|
}
|
345
|
-
|
346
|
-
if (dstBuff.start == NULL) {
|
617
|
+
if (dstBuff.start == NULL) { /* streaming job : doesn't provide a dstBuffer */
|
347
618
|
dstBuff = ZSTDMT_getBuffer(job->bufPool);
|
348
619
|
if (dstBuff.start==NULL) {
|
349
620
|
job->cSize = ERROR(memory_allocation);
|
350
621
|
goto _endJob;
|
351
622
|
}
|
352
|
-
job->dstBuff = dstBuff;
|
353
|
-
DEBUGLOG(5, "ZSTDMT_compressChunk: received dstBuff of size %u", (U32)dstBuff.size);
|
623
|
+
job->dstBuff = dstBuff; /* this value can be read in ZSTDMT_flush, when it copies the whole job */
|
354
624
|
}
|
355
625
|
|
626
|
+
/* init */
|
356
627
|
if (job->cdict) {
|
357
|
-
size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0,
|
358
|
-
|
359
|
-
assert(job->firstChunk); /* only allowed for first job */
|
628
|
+
size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, job->cdict, jobParams, job->fullFrameSize);
|
629
|
+
assert(job->firstJob); /* only allowed for first job */
|
360
630
|
if (ZSTD_isError(initError)) { job->cSize = initError; goto _endJob; }
|
361
631
|
} else { /* srcStart points at reloaded section */
|
362
|
-
U64 const pledgedSrcSize = job->
|
363
|
-
|
364
|
-
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
goto _endJob;
|
369
|
-
}
|
370
|
-
DEBUGLOG(5, "ZSTDMT_compressChunk: invoking ZSTD_compressBegin_advanced_internal with windowLog = %u ", jobParams.cParams.windowLog);
|
632
|
+
U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size;
|
633
|
+
{ size_t const forceWindowError = ZSTD_CCtxParam_setParameter(&jobParams, ZSTD_p_forceMaxWindow, !job->firstJob);
|
634
|
+
if (ZSTD_isError(forceWindowError)) {
|
635
|
+
job->cSize = forceWindowError;
|
636
|
+
goto _endJob;
|
637
|
+
} }
|
371
638
|
{ size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
|
372
|
-
job->
|
373
|
-
NULL,
|
639
|
+
job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
|
640
|
+
NULL, /*cdict*/
|
374
641
|
jobParams, pledgedSrcSize);
|
375
642
|
if (ZSTD_isError(initError)) {
|
376
|
-
DEBUGLOG(5, "ZSTD_compressBegin_advanced_internal error : %s ", ZSTD_getErrorName(initError));
|
377
643
|
job->cSize = initError;
|
378
644
|
goto _endJob;
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
|
645
|
+
} } }
|
646
|
+
|
647
|
+
/* Perform serial step as early as possible, but after CCtx initialization */
|
648
|
+
ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID);
|
649
|
+
|
650
|
+
if (!job->firstJob) { /* flush and overwrite frame header when it's not first job */
|
651
|
+
size_t const hSize = ZSTD_compressContinue(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0);
|
383
652
|
if (ZSTD_isError(hSize)) { job->cSize = hSize; /* save error code */ goto _endJob; }
|
653
|
+
DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize);
|
384
654
|
ZSTD_invalidateRepCodes(cctx);
|
385
655
|
}
|
386
656
|
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
394
|
-
|
657
|
+
/* compress */
|
658
|
+
{ size_t const chunkSize = 4*ZSTD_BLOCKSIZE_MAX;
|
659
|
+
int const nbChunks = (int)((job->src.size + (chunkSize-1)) / chunkSize);
|
660
|
+
const BYTE* ip = (const BYTE*) job->src.start;
|
661
|
+
BYTE* const ostart = (BYTE*)dstBuff.start;
|
662
|
+
BYTE* op = ostart;
|
663
|
+
BYTE* oend = op + dstBuff.capacity;
|
664
|
+
int chunkNb;
|
665
|
+
if (sizeof(size_t) > sizeof(int)) assert(job->src.size < ((size_t)INT_MAX) * chunkSize); /* check overflow */
|
666
|
+
DEBUGLOG(5, "ZSTDMT_compressionJob: compress %u bytes in %i blocks", (U32)job->src.size, nbChunks);
|
667
|
+
assert(job->cSize == 0);
|
668
|
+
for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) {
|
669
|
+
size_t const cSize = ZSTD_compressContinue(cctx, op, oend-op, ip, chunkSize);
|
670
|
+
if (ZSTD_isError(cSize)) { job->cSize = cSize; goto _endJob; }
|
671
|
+
ip += chunkSize;
|
672
|
+
op += cSize; assert(op < oend);
|
673
|
+
/* stats */
|
674
|
+
ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
|
675
|
+
job->cSize += cSize;
|
676
|
+
job->consumed = chunkSize * chunkNb;
|
677
|
+
DEBUGLOG(5, "ZSTDMT_compressionJob: compress new block : cSize==%u bytes (total: %u)",
|
678
|
+
(U32)cSize, (U32)job->cSize);
|
679
|
+
ZSTD_pthread_cond_signal(&job->job_cond); /* warns some more data is ready to be flushed */
|
680
|
+
ZSTD_pthread_mutex_unlock(&job->job_mutex);
|
681
|
+
}
|
682
|
+
/* last block */
|
683
|
+
assert(chunkSize > 0); assert((chunkSize & (chunkSize - 1)) == 0); /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */
|
684
|
+
if ((nbChunks > 0) | job->lastJob /*must output a "last block" flag*/ ) {
|
685
|
+
size_t const lastBlockSize1 = job->src.size & (chunkSize-1);
|
686
|
+
size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1;
|
687
|
+
size_t const cSize = (job->lastJob) ?
|
688
|
+
ZSTD_compressEnd (cctx, op, oend-op, ip, lastBlockSize) :
|
689
|
+
ZSTD_compressContinue(cctx, op, oend-op, ip, lastBlockSize);
|
690
|
+
if (ZSTD_isError(cSize)) { job->cSize = cSize; goto _endJob; }
|
691
|
+
/* stats */
|
692
|
+
ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
|
693
|
+
job->cSize += cSize;
|
694
|
+
ZSTD_pthread_mutex_unlock(&job->job_mutex);
|
695
|
+
} }
|
395
696
|
|
396
697
|
_endJob:
|
698
|
+
ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize);
|
699
|
+
if (job->prefix.size > 0)
|
700
|
+
DEBUGLOG(5, "Finished with prefix: %zx", (size_t)job->prefix.start);
|
701
|
+
DEBUGLOG(5, "Finished with source: %zx", (size_t)job->src.start);
|
702
|
+
/* release resources */
|
703
|
+
ZSTDMT_releaseSeq(job->seqPool, rawSeqStore);
|
397
704
|
ZSTDMT_releaseCCtx(job->cctxPool, cctx);
|
398
|
-
|
399
|
-
job->
|
400
|
-
|
401
|
-
job->
|
402
|
-
job->
|
403
|
-
ZSTD_pthread_cond_signal(job->jobCompleted_cond);
|
404
|
-
ZSTD_pthread_mutex_unlock(job->jobCompleted_mutex);
|
705
|
+
/* report */
|
706
|
+
ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
|
707
|
+
job->consumed = job->src.size;
|
708
|
+
ZSTD_pthread_cond_signal(&job->job_cond);
|
709
|
+
ZSTD_pthread_mutex_unlock(&job->job_mutex);
|
405
710
|
}
|
406
711
|
|
407
712
|
|
@@ -410,109 +715,141 @@ _endJob:
|
|
410
715
|
/* ------------------------------------------ */
|
411
716
|
|
412
717
|
typedef struct {
|
718
|
+
range_t prefix; /* read-only non-owned prefix buffer */
|
413
719
|
buffer_t buffer;
|
414
720
|
size_t filled;
|
415
721
|
} inBuff_t;
|
416
722
|
|
723
|
+
typedef struct {
|
724
|
+
BYTE* buffer; /* The round input buffer. All jobs get references
|
725
|
+
* to pieces of the buffer. ZSTDMT_tryGetInputRange()
|
726
|
+
* handles handing out job input buffers, and makes
|
727
|
+
* sure it doesn't overlap with any pieces still in use.
|
728
|
+
*/
|
729
|
+
size_t capacity; /* The capacity of buffer. */
|
730
|
+
size_t pos; /* The position of the current inBuff in the round
|
731
|
+
* buffer. Updated past the end if the inBuff once
|
732
|
+
* the inBuff is sent to the worker thread.
|
733
|
+
* pos <= capacity.
|
734
|
+
*/
|
735
|
+
} roundBuff_t;
|
736
|
+
|
737
|
+
static const roundBuff_t kNullRoundBuff = {NULL, 0, 0};
|
738
|
+
|
417
739
|
struct ZSTDMT_CCtx_s {
|
418
740
|
POOL_ctx* factory;
|
419
741
|
ZSTDMT_jobDescription* jobs;
|
420
742
|
ZSTDMT_bufferPool* bufPool;
|
421
743
|
ZSTDMT_CCtxPool* cctxPool;
|
422
|
-
|
423
|
-
ZSTD_pthread_cond_t jobCompleted_cond;
|
744
|
+
ZSTDMT_seqPool* seqPool;
|
424
745
|
ZSTD_CCtx_params params;
|
425
746
|
size_t targetSectionSize;
|
426
|
-
size_t
|
427
|
-
|
428
|
-
size_t targetDictSize;
|
747
|
+
size_t targetPrefixSize;
|
748
|
+
roundBuff_t roundBuff;
|
429
749
|
inBuff_t inBuff;
|
430
|
-
|
431
|
-
|
750
|
+
int jobReady; /* 1 => one job is already prepared, but pool has shortage of workers. Don't create another one. */
|
751
|
+
serialState_t serial;
|
752
|
+
unsigned singleBlockingThread;
|
432
753
|
unsigned jobIDMask;
|
433
754
|
unsigned doneJobID;
|
434
755
|
unsigned nextJobID;
|
435
756
|
unsigned frameEnded;
|
436
757
|
unsigned allJobsCompleted;
|
437
758
|
unsigned long long frameContentSize;
|
759
|
+
unsigned long long consumed;
|
760
|
+
unsigned long long produced;
|
438
761
|
ZSTD_customMem cMem;
|
439
762
|
ZSTD_CDict* cdictLocal;
|
440
763
|
const ZSTD_CDict* cdict;
|
441
764
|
};
|
442
765
|
|
443
|
-
static ZSTDMT_jobDescription*
|
766
|
+
static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem)
|
767
|
+
{
|
768
|
+
U32 jobNb;
|
769
|
+
if (jobTable == NULL) return;
|
770
|
+
for (jobNb=0; jobNb<nbJobs; jobNb++) {
|
771
|
+
ZSTD_pthread_mutex_destroy(&jobTable[jobNb].job_mutex);
|
772
|
+
ZSTD_pthread_cond_destroy(&jobTable[jobNb].job_cond);
|
773
|
+
}
|
774
|
+
ZSTD_free(jobTable, cMem);
|
775
|
+
}
|
776
|
+
|
777
|
+
/* ZSTDMT_allocJobsTable()
|
778
|
+
* allocate and init a job table.
|
779
|
+
* update *nbJobsPtr to next power of 2 value, as size of table */
|
780
|
+
static ZSTDMT_jobDescription* ZSTDMT_createJobsTable(U32* nbJobsPtr, ZSTD_customMem cMem)
|
444
781
|
{
|
445
782
|
U32 const nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1;
|
446
783
|
U32 const nbJobs = 1 << nbJobsLog2;
|
784
|
+
U32 jobNb;
|
785
|
+
ZSTDMT_jobDescription* const jobTable = (ZSTDMT_jobDescription*)
|
786
|
+
ZSTD_calloc(nbJobs * sizeof(ZSTDMT_jobDescription), cMem);
|
787
|
+
int initError = 0;
|
788
|
+
if (jobTable==NULL) return NULL;
|
447
789
|
*nbJobsPtr = nbJobs;
|
448
|
-
|
449
|
-
|
790
|
+
for (jobNb=0; jobNb<nbJobs; jobNb++) {
|
791
|
+
initError |= ZSTD_pthread_mutex_init(&jobTable[jobNb].job_mutex, NULL);
|
792
|
+
initError |= ZSTD_pthread_cond_init(&jobTable[jobNb].job_cond, NULL);
|
793
|
+
}
|
794
|
+
if (initError != 0) {
|
795
|
+
ZSTDMT_freeJobsTable(jobTable, nbJobs, cMem);
|
796
|
+
return NULL;
|
797
|
+
}
|
798
|
+
return jobTable;
|
450
799
|
}
|
451
800
|
|
452
|
-
/*
|
801
|
+
/* ZSTDMT_CCtxParam_setNbWorkers():
|
453
802
|
* Internal use only */
|
454
|
-
size_t
|
803
|
+
size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
|
455
804
|
{
|
456
|
-
if (
|
457
|
-
|
458
|
-
params->nbThreads = nbThreads;
|
805
|
+
if (nbWorkers > ZSTDMT_NBWORKERS_MAX) nbWorkers = ZSTDMT_NBWORKERS_MAX;
|
806
|
+
params->nbWorkers = nbWorkers;
|
459
807
|
params->overlapSizeLog = ZSTDMT_OVERLAPLOG_DEFAULT;
|
460
808
|
params->jobSize = 0;
|
461
|
-
return
|
462
|
-
}
|
463
|
-
|
464
|
-
/* ZSTDMT_getNbThreads():
|
465
|
-
* @return nb threads currently active in mtctx.
|
466
|
-
* mtctx must be valid */
|
467
|
-
size_t ZSTDMT_getNbThreads(const ZSTDMT_CCtx* mtctx)
|
468
|
-
{
|
469
|
-
assert(mtctx != NULL);
|
470
|
-
return mtctx->params.nbThreads;
|
809
|
+
return nbWorkers;
|
471
810
|
}
|
472
811
|
|
473
|
-
ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned
|
812
|
+
ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem)
|
474
813
|
{
|
475
814
|
ZSTDMT_CCtx* mtctx;
|
476
|
-
U32 nbJobs =
|
477
|
-
|
815
|
+
U32 nbJobs = nbWorkers + 2;
|
816
|
+
int initError;
|
817
|
+
DEBUGLOG(3, "ZSTDMT_createCCtx_advanced (nbWorkers = %u)", nbWorkers);
|
478
818
|
|
479
|
-
if (
|
480
|
-
|
819
|
+
if (nbWorkers < 1) return NULL;
|
820
|
+
nbWorkers = MIN(nbWorkers , ZSTDMT_NBWORKERS_MAX);
|
481
821
|
if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL))
|
482
822
|
/* invalid custom allocator */
|
483
823
|
return NULL;
|
484
824
|
|
485
825
|
mtctx = (ZSTDMT_CCtx*) ZSTD_calloc(sizeof(ZSTDMT_CCtx), cMem);
|
486
826
|
if (!mtctx) return NULL;
|
487
|
-
|
827
|
+
ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
|
488
828
|
mtctx->cMem = cMem;
|
489
829
|
mtctx->allJobsCompleted = 1;
|
490
|
-
mtctx->factory = POOL_create_advanced(
|
491
|
-
mtctx->jobs =
|
830
|
+
mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem);
|
831
|
+
mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem);
|
832
|
+
assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0); /* ensure nbJobs is a power of 2 */
|
492
833
|
mtctx->jobIDMask = nbJobs - 1;
|
493
|
-
mtctx->bufPool = ZSTDMT_createBufferPool(
|
494
|
-
mtctx->cctxPool = ZSTDMT_createCCtxPool(
|
495
|
-
|
834
|
+
mtctx->bufPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
|
835
|
+
mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem);
|
836
|
+
mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem);
|
837
|
+
initError = ZSTDMT_serialState_init(&mtctx->serial);
|
838
|
+
mtctx->roundBuff = kNullRoundBuff;
|
839
|
+
if (!mtctx->factory | !mtctx->jobs | !mtctx->bufPool | !mtctx->cctxPool | !mtctx->seqPool | initError) {
|
496
840
|
ZSTDMT_freeCCtx(mtctx);
|
497
841
|
return NULL;
|
498
842
|
}
|
499
|
-
|
500
|
-
ZSTDMT_freeCCtx(mtctx);
|
501
|
-
return NULL;
|
502
|
-
}
|
503
|
-
if (ZSTD_pthread_cond_init(&mtctx->jobCompleted_cond, NULL)) {
|
504
|
-
ZSTDMT_freeCCtx(mtctx);
|
505
|
-
return NULL;
|
506
|
-
}
|
507
|
-
DEBUGLOG(3, "mt_cctx created, for %u threads", nbThreads);
|
843
|
+
DEBUGLOG(3, "mt_cctx created, for %u threads", nbWorkers);
|
508
844
|
return mtctx;
|
509
845
|
}
|
510
846
|
|
511
|
-
ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned
|
847
|
+
ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers)
|
512
848
|
{
|
513
|
-
return ZSTDMT_createCCtx_advanced(
|
849
|
+
return ZSTDMT_createCCtx_advanced(nbWorkers, ZSTD_defaultCMem);
|
514
850
|
}
|
515
851
|
|
852
|
+
|
516
853
|
/* ZSTDMT_releaseAllJobResources() :
|
517
854
|
* note : ensure all workers are killed first ! */
|
518
855
|
static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
|
@@ -523,29 +860,26 @@ static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
|
|
523
860
|
DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start);
|
524
861
|
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
|
525
862
|
mtctx->jobs[jobID].dstBuff = g_nullBuffer;
|
526
|
-
|
527
|
-
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].src);
|
528
|
-
mtctx->jobs[jobID].src = g_nullBuffer;
|
863
|
+
mtctx->jobs[jobID].cSize = 0;
|
529
864
|
}
|
530
865
|
memset(mtctx->jobs, 0, (mtctx->jobIDMask+1)*sizeof(ZSTDMT_jobDescription));
|
531
|
-
DEBUGLOG(4, "input: release address %08X", (U32)(size_t)mtctx->inBuff.buffer.start);
|
532
|
-
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->inBuff.buffer);
|
533
866
|
mtctx->inBuff.buffer = g_nullBuffer;
|
867
|
+
mtctx->inBuff.filled = 0;
|
534
868
|
mtctx->allJobsCompleted = 1;
|
535
869
|
}
|
536
870
|
|
537
|
-
static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx*
|
871
|
+
static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx)
|
538
872
|
{
|
539
873
|
DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted");
|
540
|
-
while (
|
541
|
-
unsigned const jobID =
|
542
|
-
ZSTD_PTHREAD_MUTEX_LOCK(&
|
543
|
-
while (
|
544
|
-
DEBUGLOG(5, "waiting for jobCompleted signal from
|
545
|
-
ZSTD_pthread_cond_wait(&
|
874
|
+
while (mtctx->doneJobID < mtctx->nextJobID) {
|
875
|
+
unsigned const jobID = mtctx->doneJobID & mtctx->jobIDMask;
|
876
|
+
ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
|
877
|
+
while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
|
878
|
+
DEBUGLOG(5, "waiting for jobCompleted signal from job %u", mtctx->doneJobID); /* we want to block when waiting for data to flush */
|
879
|
+
ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
|
546
880
|
}
|
547
|
-
ZSTD_pthread_mutex_unlock(&
|
548
|
-
|
881
|
+
ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
|
882
|
+
mtctx->doneJobID++;
|
549
883
|
}
|
550
884
|
}
|
551
885
|
|
@@ -554,12 +888,14 @@ size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
|
|
554
888
|
if (mtctx==NULL) return 0; /* compatible with free on NULL */
|
555
889
|
POOL_free(mtctx->factory); /* stop and free worker threads */
|
556
890
|
ZSTDMT_releaseAllJobResources(mtctx); /* release job resources into pools first */
|
557
|
-
|
891
|
+
ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
|
558
892
|
ZSTDMT_freeBufferPool(mtctx->bufPool);
|
559
893
|
ZSTDMT_freeCCtxPool(mtctx->cctxPool);
|
894
|
+
ZSTDMT_freeSeqPool(mtctx->seqPool);
|
895
|
+
ZSTDMT_serialState_free(&mtctx->serial);
|
560
896
|
ZSTD_freeCDict(mtctx->cdictLocal);
|
561
|
-
|
562
|
-
|
897
|
+
if (mtctx->roundBuff.buffer)
|
898
|
+
ZSTD_free(mtctx->roundBuff.buffer, mtctx->cMem);
|
563
899
|
ZSTD_free(mtctx, mtctx->cMem);
|
564
900
|
return 0;
|
565
901
|
}
|
@@ -572,7 +908,9 @@ size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx)
|
|
572
908
|
+ ZSTDMT_sizeof_bufferPool(mtctx->bufPool)
|
573
909
|
+ (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription)
|
574
910
|
+ ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool)
|
575
|
-
+
|
911
|
+
+ ZSTDMT_sizeof_seqPool(mtctx->seqPool)
|
912
|
+
+ ZSTD_sizeof_CDict(mtctx->cdictLocal)
|
913
|
+
+ mtctx->roundBuff.capacity;
|
576
914
|
}
|
577
915
|
|
578
916
|
/* Internal only */
|
@@ -612,133 +950,224 @@ size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter,
|
|
612
950
|
}
|
613
951
|
}
|
614
952
|
|
953
|
+
/* Sets parameters relevant to the compression job,
|
954
|
+
* initializing others to default values. */
|
955
|
+
static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
|
956
|
+
{
|
957
|
+
ZSTD_CCtx_params jobParams;
|
958
|
+
memset(&jobParams, 0, sizeof(jobParams));
|
959
|
+
|
960
|
+
jobParams.cParams = params.cParams;
|
961
|
+
jobParams.fParams = params.fParams;
|
962
|
+
jobParams.compressionLevel = params.compressionLevel;
|
963
|
+
jobParams.disableLiteralCompression = params.disableLiteralCompression;
|
964
|
+
|
965
|
+
return jobParams;
|
966
|
+
}
|
967
|
+
|
968
|
+
/*! ZSTDMT_updateCParams_whileCompressing() :
|
969
|
+
* Updates only a selected set of compression parameters, to remain compatible with current frame.
|
970
|
+
* New parameters will be applied to next compression job. */
|
971
|
+
void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams)
|
972
|
+
{
|
973
|
+
U32 const saved_wlog = mtctx->params.cParams.windowLog; /* Do not modify windowLog while compressing */
|
974
|
+
int const compressionLevel = cctxParams->compressionLevel;
|
975
|
+
DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)",
|
976
|
+
compressionLevel);
|
977
|
+
mtctx->params.compressionLevel = compressionLevel;
|
978
|
+
{ ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, 0, 0);
|
979
|
+
cParams.windowLog = saved_wlog;
|
980
|
+
mtctx->params.cParams = cParams;
|
981
|
+
}
|
982
|
+
}
|
983
|
+
|
984
|
+
/* ZSTDMT_getNbWorkers():
|
985
|
+
* @return nb threads currently active in mtctx.
|
986
|
+
* mtctx must be valid */
|
987
|
+
unsigned ZSTDMT_getNbWorkers(const ZSTDMT_CCtx* mtctx)
|
988
|
+
{
|
989
|
+
assert(mtctx != NULL);
|
990
|
+
return mtctx->params.nbWorkers;
|
991
|
+
}
|
992
|
+
|
993
|
+
/* ZSTDMT_getFrameProgression():
|
994
|
+
* tells how much data has been consumed (input) and produced (output) for current frame.
|
995
|
+
* able to count progression inside worker threads.
|
996
|
+
* Note : mutex will be acquired during statistics collection. */
|
997
|
+
ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx)
|
998
|
+
{
|
999
|
+
ZSTD_frameProgression fps;
|
1000
|
+
DEBUGLOG(6, "ZSTDMT_getFrameProgression");
|
1001
|
+
fps.consumed = mtctx->consumed;
|
1002
|
+
fps.produced = mtctx->produced;
|
1003
|
+
fps.ingested = mtctx->consumed + mtctx->inBuff.filled;
|
1004
|
+
{ unsigned jobNb;
|
1005
|
+
unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1);
|
1006
|
+
DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)",
|
1007
|
+
mtctx->doneJobID, lastJobNb, mtctx->jobReady)
|
1008
|
+
for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) {
|
1009
|
+
unsigned const wJobID = jobNb & mtctx->jobIDMask;
|
1010
|
+
ZSTD_pthread_mutex_lock(&mtctx->jobs[wJobID].job_mutex);
|
1011
|
+
{ size_t const cResult = mtctx->jobs[wJobID].cSize;
|
1012
|
+
size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
|
1013
|
+
fps.consumed += mtctx->jobs[wJobID].consumed;
|
1014
|
+
fps.ingested += mtctx->jobs[wJobID].src.size;
|
1015
|
+
fps.produced += produced;
|
1016
|
+
}
|
1017
|
+
ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
|
1018
|
+
}
|
1019
|
+
}
|
1020
|
+
return fps;
|
1021
|
+
}
|
1022
|
+
|
1023
|
+
|
615
1024
|
/* ------------------------------------------ */
|
616
1025
|
/* ===== Multi-threaded compression ===== */
|
617
1026
|
/* ------------------------------------------ */
|
618
1027
|
|
619
|
-
static
|
620
|
-
|
621
|
-
|
622
|
-
|
623
|
-
|
624
|
-
|
625
|
-
|
626
|
-
|
627
|
-
|
1028
|
+
static size_t ZSTDMT_computeTargetJobLog(ZSTD_CCtx_params const params)
|
1029
|
+
{
|
1030
|
+
if (params.ldmParams.enableLdm)
|
1031
|
+
return MAX(21, params.cParams.chainLog + 4);
|
1032
|
+
return MAX(20, params.cParams.windowLog + 2);
|
1033
|
+
}
|
1034
|
+
|
1035
|
+
static size_t ZSTDMT_computeOverlapLog(ZSTD_CCtx_params const params)
|
1036
|
+
{
|
1037
|
+
unsigned const overlapRLog = (params.overlapSizeLog>9) ? 0 : 9-params.overlapSizeLog;
|
1038
|
+
if (params.ldmParams.enableLdm)
|
1039
|
+
return (MIN(params.cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2) - overlapRLog);
|
1040
|
+
return overlapRLog >= 9 ? 0 : (params.cParams.windowLog - overlapRLog);
|
628
1041
|
}
|
629
1042
|
|
1043
|
+
static unsigned ZSTDMT_computeNbJobs(ZSTD_CCtx_params params, size_t srcSize, unsigned nbWorkers) {
|
1044
|
+
assert(nbWorkers>0);
|
1045
|
+
{ size_t const jobSizeTarget = (size_t)1 << ZSTDMT_computeTargetJobLog(params);
|
1046
|
+
size_t const jobMaxSize = jobSizeTarget << 2;
|
1047
|
+
size_t const passSizeMax = jobMaxSize * nbWorkers;
|
1048
|
+
unsigned const multiplier = (unsigned)(srcSize / passSizeMax) + 1;
|
1049
|
+
unsigned const nbJobsLarge = multiplier * nbWorkers;
|
1050
|
+
unsigned const nbJobsMax = (unsigned)(srcSize / jobSizeTarget) + 1;
|
1051
|
+
unsigned const nbJobsSmall = MIN(nbJobsMax, nbWorkers);
|
1052
|
+
return (multiplier>1) ? nbJobsLarge : nbJobsSmall;
|
1053
|
+
} }
|
1054
|
+
|
1055
|
+
/* ZSTDMT_compress_advanced_internal() :
|
1056
|
+
* This is a blocking function : it will only give back control to caller after finishing its compression job.
|
1057
|
+
*/
|
630
1058
|
static size_t ZSTDMT_compress_advanced_internal(
|
631
1059
|
ZSTDMT_CCtx* mtctx,
|
632
1060
|
void* dst, size_t dstCapacity,
|
633
1061
|
const void* src, size_t srcSize,
|
634
1062
|
const ZSTD_CDict* cdict,
|
635
|
-
ZSTD_CCtx_params
|
1063
|
+
ZSTD_CCtx_params params)
|
636
1064
|
{
|
637
|
-
ZSTD_CCtx_params const jobParams =
|
638
|
-
|
639
|
-
|
640
|
-
|
641
|
-
size_t const
|
642
|
-
size_t const avgChunkSize = (((proposedChunkSize-1) & 0x1FFFF) < 0x7FFF) ? proposedChunkSize + 0xFFFF : proposedChunkSize; /* avoid too small last block */
|
1065
|
+
ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(params);
|
1066
|
+
size_t const overlapSize = (size_t)1 << ZSTDMT_computeOverlapLog(params);
|
1067
|
+
unsigned const nbJobs = ZSTDMT_computeNbJobs(params, srcSize, params.nbWorkers);
|
1068
|
+
size_t const proposedJobSize = (srcSize + (nbJobs-1)) / nbJobs;
|
1069
|
+
size_t const avgJobSize = (((proposedJobSize-1) & 0x1FFFF) < 0x7FFF) ? proposedJobSize + 0xFFFF : proposedJobSize; /* avoid too small last block */
|
643
1070
|
const char* const srcStart = (const char*)src;
|
644
1071
|
size_t remainingSrcSize = srcSize;
|
645
|
-
unsigned const compressWithinDst = (dstCapacity >= ZSTD_compressBound(srcSize)) ?
|
1072
|
+
unsigned const compressWithinDst = (dstCapacity >= ZSTD_compressBound(srcSize)) ? nbJobs : (unsigned)(dstCapacity / ZSTD_compressBound(avgJobSize)); /* presumes avgJobSize >= 256 KB, which should be the case */
|
646
1073
|
size_t frameStartPos = 0, dstBufferPos = 0;
|
647
|
-
|
648
|
-
assert(
|
649
|
-
assert(mtctx->cctxPool->totalCCtx == params.nbThreads);
|
1074
|
+
assert(jobParams.nbWorkers == 0);
|
1075
|
+
assert(mtctx->cctxPool->totalCCtx == params.nbWorkers);
|
650
1076
|
|
651
|
-
|
652
|
-
|
653
|
-
|
1077
|
+
params.jobSize = (U32)avgJobSize;
|
1078
|
+
DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: nbJobs=%2u (rawSize=%u bytes; fixedSize=%u) ",
|
1079
|
+
nbJobs, (U32)proposedJobSize, (U32)avgJobSize);
|
1080
|
+
|
1081
|
+
if ((nbJobs==1) | (params.nbWorkers<=1)) { /* fallback to single-thread mode : this is a blocking invocation anyway */
|
654
1082
|
ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0];
|
1083
|
+
DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: fallback to single-thread mode");
|
655
1084
|
if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, jobParams.fParams);
|
656
1085
|
return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, jobParams);
|
657
1086
|
}
|
658
|
-
assert(avgChunkSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), which is required for compressWithinDst */
|
659
|
-
ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgChunkSize) );
|
660
|
-
XXH64_reset(&xxh64, 0);
|
661
1087
|
|
662
|
-
|
663
|
-
|
664
|
-
|
1088
|
+
assert(avgJobSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), required to compress directly into Dst (no additional buffer) */
|
1089
|
+
ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgJobSize) );
|
1090
|
+
if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params))
|
1091
|
+
return ERROR(memory_allocation);
|
1092
|
+
|
1093
|
+
if (nbJobs > mtctx->jobIDMask+1) { /* enlarge job table */
|
1094
|
+
U32 jobsTableSize = nbJobs;
|
1095
|
+
ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
|
665
1096
|
mtctx->jobIDMask = 0;
|
666
|
-
mtctx->jobs =
|
1097
|
+
mtctx->jobs = ZSTDMT_createJobsTable(&jobsTableSize, mtctx->cMem);
|
667
1098
|
if (mtctx->jobs==NULL) return ERROR(memory_allocation);
|
668
|
-
|
1099
|
+
assert((jobsTableSize != 0) && ((jobsTableSize & (jobsTableSize - 1)) == 0)); /* ensure jobsTableSize is a power of 2 */
|
1100
|
+
mtctx->jobIDMask = jobsTableSize - 1;
|
669
1101
|
}
|
670
1102
|
|
671
1103
|
{ unsigned u;
|
672
|
-
for (u=0; u<
|
673
|
-
size_t const
|
674
|
-
size_t const dstBufferCapacity = ZSTD_compressBound(
|
1104
|
+
for (u=0; u<nbJobs; u++) {
|
1105
|
+
size_t const jobSize = MIN(remainingSrcSize, avgJobSize);
|
1106
|
+
size_t const dstBufferCapacity = ZSTD_compressBound(jobSize);
|
675
1107
|
buffer_t const dstAsBuffer = { (char*)dst + dstBufferPos, dstBufferCapacity };
|
676
1108
|
buffer_t const dstBuffer = u < compressWithinDst ? dstAsBuffer : g_nullBuffer;
|
677
1109
|
size_t dictSize = u ? overlapSize : 0;
|
678
1110
|
|
679
|
-
mtctx->jobs[u].
|
680
|
-
mtctx->jobs[u].
|
681
|
-
mtctx->jobs[u].
|
682
|
-
mtctx->jobs[u].
|
1111
|
+
mtctx->jobs[u].prefix.start = srcStart + frameStartPos - dictSize;
|
1112
|
+
mtctx->jobs[u].prefix.size = dictSize;
|
1113
|
+
mtctx->jobs[u].src.start = srcStart + frameStartPos;
|
1114
|
+
mtctx->jobs[u].src.size = jobSize; assert(jobSize > 0); /* avoid job.src.size == 0 */
|
1115
|
+
mtctx->jobs[u].consumed = 0;
|
1116
|
+
mtctx->jobs[u].cSize = 0;
|
683
1117
|
mtctx->jobs[u].cdict = (u==0) ? cdict : NULL;
|
684
1118
|
mtctx->jobs[u].fullFrameSize = srcSize;
|
685
1119
|
mtctx->jobs[u].params = jobParams;
|
686
1120
|
/* do not calculate checksum within sections, but write it in header for first section */
|
687
|
-
if (u!=0) mtctx->jobs[u].params.fParams.checksumFlag = 0;
|
688
1121
|
mtctx->jobs[u].dstBuff = dstBuffer;
|
689
1122
|
mtctx->jobs[u].cctxPool = mtctx->cctxPool;
|
690
1123
|
mtctx->jobs[u].bufPool = mtctx->bufPool;
|
691
|
-
mtctx->jobs[u].
|
692
|
-
mtctx->jobs[u].
|
693
|
-
mtctx->jobs[u].
|
694
|
-
mtctx->jobs[u].
|
695
|
-
mtctx->jobs[u].
|
696
|
-
|
697
|
-
if (params.fParams.checksumFlag) {
|
698
|
-
XXH64_update(&xxh64, srcStart + frameStartPos, chunkSize);
|
699
|
-
}
|
1124
|
+
mtctx->jobs[u].seqPool = mtctx->seqPool;
|
1125
|
+
mtctx->jobs[u].serial = &mtctx->serial;
|
1126
|
+
mtctx->jobs[u].jobID = u;
|
1127
|
+
mtctx->jobs[u].firstJob = (u==0);
|
1128
|
+
mtctx->jobs[u].lastJob = (u==nbJobs-1);
|
700
1129
|
|
701
|
-
DEBUGLOG(5, "ZSTDMT_compress_advanced_internal: posting job %u (%u bytes)", u, (U32)
|
702
|
-
DEBUG_PRINTHEX(6, mtctx->jobs[u].
|
703
|
-
POOL_add(mtctx->factory,
|
1130
|
+
DEBUGLOG(5, "ZSTDMT_compress_advanced_internal: posting job %u (%u bytes)", u, (U32)jobSize);
|
1131
|
+
DEBUG_PRINTHEX(6, mtctx->jobs[u].prefix.start, 12);
|
1132
|
+
POOL_add(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[u]);
|
704
1133
|
|
705
|
-
frameStartPos +=
|
1134
|
+
frameStartPos += jobSize;
|
706
1135
|
dstBufferPos += dstBufferCapacity;
|
707
|
-
remainingSrcSize -=
|
1136
|
+
remainingSrcSize -= jobSize;
|
708
1137
|
} }
|
709
1138
|
|
710
1139
|
/* collect result */
|
711
1140
|
{ size_t error = 0, dstPos = 0;
|
712
|
-
unsigned
|
713
|
-
for (
|
714
|
-
DEBUGLOG(5, "waiting for
|
715
|
-
ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->
|
716
|
-
while (mtctx->jobs[
|
717
|
-
DEBUGLOG(5, "waiting for jobCompleted signal from
|
718
|
-
ZSTD_pthread_cond_wait(&mtctx->
|
1141
|
+
unsigned jobID;
|
1142
|
+
for (jobID=0; jobID<nbJobs; jobID++) {
|
1143
|
+
DEBUGLOG(5, "waiting for job %u ", jobID);
|
1144
|
+
ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
|
1145
|
+
while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
|
1146
|
+
DEBUGLOG(5, "waiting for jobCompleted signal from job %u", jobID);
|
1147
|
+
ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
|
719
1148
|
}
|
720
|
-
ZSTD_pthread_mutex_unlock(&mtctx->
|
721
|
-
DEBUGLOG(5, "ready to write
|
1149
|
+
ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
|
1150
|
+
DEBUGLOG(5, "ready to write job %u ", jobID);
|
722
1151
|
|
723
|
-
mtctx->jobs[
|
724
|
-
{ size_t const cSize = mtctx->jobs[chunkID].cSize;
|
1152
|
+
{ size_t const cSize = mtctx->jobs[jobID].cSize;
|
725
1153
|
if (ZSTD_isError(cSize)) error = cSize;
|
726
1154
|
if ((!error) && (dstPos + cSize > dstCapacity)) error = ERROR(dstSize_tooSmall);
|
727
|
-
if (
|
1155
|
+
if (jobID) { /* note : job 0 is written directly at dst, which is correct position */
|
728
1156
|
if (!error)
|
729
|
-
memmove((char*)dst + dstPos, mtctx->jobs[
|
730
|
-
if (
|
731
|
-
DEBUGLOG(5, "releasing buffer %u>=%u",
|
732
|
-
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[
|
1157
|
+
memmove((char*)dst + dstPos, mtctx->jobs[jobID].dstBuff.start, cSize); /* may overlap when job compressed within dst */
|
1158
|
+
if (jobID >= compressWithinDst) { /* job compressed into its own buffer, which must be released */
|
1159
|
+
DEBUGLOG(5, "releasing buffer %u>=%u", jobID, compressWithinDst);
|
1160
|
+
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
|
733
1161
|
} }
|
734
|
-
mtctx->jobs[
|
1162
|
+
mtctx->jobs[jobID].dstBuff = g_nullBuffer;
|
1163
|
+
mtctx->jobs[jobID].cSize = 0;
|
735
1164
|
dstPos += cSize ;
|
736
1165
|
}
|
737
|
-
} /* for (
|
1166
|
+
} /* for (jobID=0; jobID<nbJobs; jobID++) */
|
738
1167
|
|
739
1168
|
DEBUGLOG(4, "checksumFlag : %u ", params.fParams.checksumFlag);
|
740
1169
|
if (params.fParams.checksumFlag) {
|
741
|
-
U32 const checksum = (U32)XXH64_digest(&
|
1170
|
+
U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState);
|
742
1171
|
if (dstPos + 4 > dstCapacity) {
|
743
1172
|
error = ERROR(dstSize_tooSmall);
|
744
1173
|
} else {
|
@@ -756,7 +1185,7 @@ size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
|
|
756
1185
|
void* dst, size_t dstCapacity,
|
757
1186
|
const void* src, size_t srcSize,
|
758
1187
|
const ZSTD_CDict* cdict,
|
759
|
-
ZSTD_parameters
|
1188
|
+
ZSTD_parameters params,
|
760
1189
|
unsigned overlapLog)
|
761
1190
|
{
|
762
1191
|
ZSTD_CCtx_params cctxParams = mtctx->params;
|
@@ -787,66 +1216,104 @@ size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
|
|
787
1216
|
/* ====================================== */
|
788
1217
|
|
789
1218
|
size_t ZSTDMT_initCStream_internal(
|
790
|
-
ZSTDMT_CCtx*
|
791
|
-
const void* dict, size_t dictSize,
|
1219
|
+
ZSTDMT_CCtx* mtctx,
|
1220
|
+
const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
|
792
1221
|
const ZSTD_CDict* cdict, ZSTD_CCtx_params params,
|
793
1222
|
unsigned long long pledgedSrcSize)
|
794
1223
|
{
|
795
|
-
DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u)",
|
1224
|
+
DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u, disableLiteralCompression=%i)",
|
1225
|
+
(U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx, params.disableLiteralCompression);
|
796
1226
|
/* params are supposed to be fully validated at this point */
|
797
1227
|
assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
|
798
1228
|
assert(!((dict) && (cdict))); /* either dict or cdict, not both */
|
799
|
-
assert(
|
800
|
-
|
801
|
-
|
802
|
-
if (
|
803
|
-
|
804
|
-
|
805
|
-
|
806
|
-
|
1229
|
+
assert(mtctx->cctxPool->totalCCtx == params.nbWorkers);
|
1230
|
+
|
1231
|
+
/* init */
|
1232
|
+
if (params.jobSize == 0) {
|
1233
|
+
params.jobSize = 1U << ZSTDMT_computeTargetJobLog(params);
|
1234
|
+
}
|
1235
|
+
if (params.jobSize > ZSTDMT_JOBSIZE_MAX) params.jobSize = ZSTDMT_JOBSIZE_MAX;
|
1236
|
+
|
1237
|
+
mtctx->singleBlockingThread = (pledgedSrcSize <= ZSTDMT_JOBSIZE_MIN); /* do not trigger multi-threading when srcSize is too small */
|
1238
|
+
if (mtctx->singleBlockingThread) {
|
1239
|
+
ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams(params);
|
1240
|
+
DEBUGLOG(5, "ZSTDMT_initCStream_internal: switch to single blocking thread mode");
|
1241
|
+
assert(singleThreadParams.nbWorkers == 0);
|
1242
|
+
return ZSTD_initCStream_internal(mtctx->cctxPool->cctx[0],
|
807
1243
|
dict, dictSize, cdict,
|
808
1244
|
singleThreadParams, pledgedSrcSize);
|
809
1245
|
}
|
810
|
-
DEBUGLOG(4, "multi-threading mode (%u threads)", params.nbThreads);
|
811
1246
|
|
812
|
-
|
813
|
-
|
814
|
-
|
815
|
-
|
1247
|
+
DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers);
|
1248
|
+
|
1249
|
+
if (mtctx->allJobsCompleted == 0) { /* previous compression not correctly finished */
|
1250
|
+
ZSTDMT_waitForAllJobsCompleted(mtctx);
|
1251
|
+
ZSTDMT_releaseAllJobResources(mtctx);
|
1252
|
+
mtctx->allJobsCompleted = 1;
|
816
1253
|
}
|
817
1254
|
|
818
|
-
|
819
|
-
|
1255
|
+
mtctx->params = params;
|
1256
|
+
mtctx->frameContentSize = pledgedSrcSize;
|
820
1257
|
if (dict) {
|
821
|
-
ZSTD_freeCDict(
|
822
|
-
|
823
|
-
ZSTD_dlm_byCopy,
|
824
|
-
params.cParams,
|
825
|
-
|
826
|
-
if (
|
1258
|
+
ZSTD_freeCDict(mtctx->cdictLocal);
|
1259
|
+
mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
|
1260
|
+
ZSTD_dlm_byCopy, dictContentType, /* note : a loadPrefix becomes an internal CDict */
|
1261
|
+
params.cParams, mtctx->cMem);
|
1262
|
+
mtctx->cdict = mtctx->cdictLocal;
|
1263
|
+
if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation);
|
827
1264
|
} else {
|
828
|
-
ZSTD_freeCDict(
|
829
|
-
|
830
|
-
|
1265
|
+
ZSTD_freeCDict(mtctx->cdictLocal);
|
1266
|
+
mtctx->cdictLocal = NULL;
|
1267
|
+
mtctx->cdict = cdict;
|
831
1268
|
}
|
832
1269
|
|
833
|
-
|
834
|
-
|
835
|
-
|
836
|
-
|
837
|
-
if (
|
838
|
-
|
839
|
-
DEBUGLOG(4, "
|
840
|
-
|
841
|
-
|
842
|
-
|
843
|
-
|
844
|
-
|
845
|
-
|
846
|
-
|
847
|
-
|
848
|
-
|
849
|
-
|
1270
|
+
mtctx->targetPrefixSize = (size_t)1 << ZSTDMT_computeOverlapLog(params);
|
1271
|
+
DEBUGLOG(4, "overlapLog=%u => %u KB", params.overlapSizeLog, (U32)(mtctx->targetPrefixSize>>10));
|
1272
|
+
mtctx->targetSectionSize = params.jobSize;
|
1273
|
+
if (mtctx->targetSectionSize < ZSTDMT_JOBSIZE_MIN) mtctx->targetSectionSize = ZSTDMT_JOBSIZE_MIN;
|
1274
|
+
if (mtctx->targetSectionSize < mtctx->targetPrefixSize) mtctx->targetSectionSize = mtctx->targetPrefixSize; /* job size must be >= overlap size */
|
1275
|
+
DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(mtctx->targetSectionSize>>10), params.jobSize);
|
1276
|
+
DEBUGLOG(4, "inBuff Size : %u KB", (U32)(mtctx->targetSectionSize>>10));
|
1277
|
+
ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize));
|
1278
|
+
{
|
1279
|
+
/* If ldm is enabled we need windowSize space. */
|
1280
|
+
size_t const windowSize = mtctx->params.ldmParams.enableLdm ? (1U << mtctx->params.cParams.windowLog) : 0;
|
1281
|
+
/* Two buffers of slack, plus extra space for the overlap
|
1282
|
+
* This is the minimum slack that LDM works with. One extra because
|
1283
|
+
* flush might waste up to targetSectionSize-1 bytes. Another extra
|
1284
|
+
* for the overlap (if > 0), then one to fill which doesn't overlap
|
1285
|
+
* with the LDM window.
|
1286
|
+
*/
|
1287
|
+
size_t const nbSlackBuffers = 2 + (mtctx->targetPrefixSize > 0);
|
1288
|
+
size_t const slackSize = mtctx->targetSectionSize * nbSlackBuffers;
|
1289
|
+
/* Compute the total size, and always have enough slack */
|
1290
|
+
size_t const nbWorkers = MAX(mtctx->params.nbWorkers, 1);
|
1291
|
+
size_t const sectionsSize = mtctx->targetSectionSize * nbWorkers;
|
1292
|
+
size_t const capacity = MAX(windowSize, sectionsSize) + slackSize;
|
1293
|
+
if (mtctx->roundBuff.capacity < capacity) {
|
1294
|
+
if (mtctx->roundBuff.buffer)
|
1295
|
+
ZSTD_free(mtctx->roundBuff.buffer, mtctx->cMem);
|
1296
|
+
mtctx->roundBuff.buffer = (BYTE*)ZSTD_malloc(capacity, mtctx->cMem);
|
1297
|
+
if (mtctx->roundBuff.buffer == NULL) {
|
1298
|
+
mtctx->roundBuff.capacity = 0;
|
1299
|
+
return ERROR(memory_allocation);
|
1300
|
+
}
|
1301
|
+
mtctx->roundBuff.capacity = capacity;
|
1302
|
+
}
|
1303
|
+
}
|
1304
|
+
DEBUGLOG(4, "roundBuff capacity : %u KB", (U32)(mtctx->roundBuff.capacity>>10));
|
1305
|
+
mtctx->roundBuff.pos = 0;
|
1306
|
+
mtctx->inBuff.buffer = g_nullBuffer;
|
1307
|
+
mtctx->inBuff.filled = 0;
|
1308
|
+
mtctx->inBuff.prefix = kNullRange;
|
1309
|
+
mtctx->doneJobID = 0;
|
1310
|
+
mtctx->nextJobID = 0;
|
1311
|
+
mtctx->frameEnded = 0;
|
1312
|
+
mtctx->allJobsCompleted = 0;
|
1313
|
+
mtctx->consumed = 0;
|
1314
|
+
mtctx->produced = 0;
|
1315
|
+
if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params))
|
1316
|
+
return ERROR(memory_allocation);
|
850
1317
|
return 0;
|
851
1318
|
}
|
852
1319
|
|
@@ -855,11 +1322,11 @@ size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
|
|
855
1322
|
ZSTD_parameters params,
|
856
1323
|
unsigned long long pledgedSrcSize)
|
857
1324
|
{
|
858
|
-
ZSTD_CCtx_params cctxParams = mtctx->params;
|
859
|
-
DEBUGLOG(
|
1325
|
+
ZSTD_CCtx_params cctxParams = mtctx->params; /* retrieve sticky params */
|
1326
|
+
DEBUGLOG(4, "ZSTDMT_initCStream_advanced (pledgedSrcSize=%u)", (U32)pledgedSrcSize);
|
860
1327
|
cctxParams.cParams = params.cParams;
|
861
1328
|
cctxParams.fParams = params.fParams;
|
862
|
-
return ZSTDMT_initCStream_internal(mtctx, dict, dictSize,
|
1329
|
+
return ZSTDMT_initCStream_internal(mtctx, dict, dictSize, ZSTD_dct_auto, NULL,
|
863
1330
|
cctxParams, pledgedSrcSize);
|
864
1331
|
}
|
865
1332
|
|
@@ -869,10 +1336,10 @@ size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
|
|
869
1336
|
unsigned long long pledgedSrcSize)
|
870
1337
|
{
|
871
1338
|
ZSTD_CCtx_params cctxParams = mtctx->params;
|
1339
|
+
if (cdict==NULL) return ERROR(dictionary_wrong); /* method incompatible with NULL cdict */
|
872
1340
|
cctxParams.cParams = ZSTD_getCParamsFromCDict(cdict);
|
873
1341
|
cctxParams.fParams = fParams;
|
874
|
-
|
875
|
-
return ZSTDMT_initCStream_internal(mtctx, NULL, 0 /*dictSize*/, ZSTD_dm_auto, cdict,
|
1342
|
+
return ZSTDMT_initCStream_internal(mtctx, NULL, 0 /*dictSize*/, ZSTD_dct_auto, cdict,
|
876
1343
|
cctxParams, pledgedSrcSize);
|
877
1344
|
}
|
878
1345
|
|
@@ -881,148 +1348,358 @@ size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
|
|
881
1348
|
* pledgedSrcSize can be zero == unknown (for the time being)
|
882
1349
|
* prefer using ZSTD_CONTENTSIZE_UNKNOWN,
|
883
1350
|
* as `0` might mean "empty" in the future */
|
884
|
-
size_t ZSTDMT_resetCStream(ZSTDMT_CCtx*
|
1351
|
+
size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize)
|
885
1352
|
{
|
886
1353
|
if (!pledgedSrcSize) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
|
887
|
-
|
888
|
-
return ZSTD_resetCStream(zcs->cctxPool->cctx[0], pledgedSrcSize);
|
889
|
-
return ZSTDMT_initCStream_internal(zcs, NULL, 0, ZSTD_dm_auto, 0, zcs->params,
|
1354
|
+
return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, 0, mtctx->params,
|
890
1355
|
pledgedSrcSize);
|
891
1356
|
}
|
892
1357
|
|
893
|
-
size_t ZSTDMT_initCStream(ZSTDMT_CCtx*
|
894
|
-
ZSTD_parameters const params = ZSTD_getParams(compressionLevel,
|
895
|
-
ZSTD_CCtx_params cctxParams =
|
1358
|
+
size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel) {
|
1359
|
+
ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0);
|
1360
|
+
ZSTD_CCtx_params cctxParams = mtctx->params; /* retrieve sticky params */
|
1361
|
+
DEBUGLOG(4, "ZSTDMT_initCStream (cLevel=%i)", compressionLevel);
|
896
1362
|
cctxParams.cParams = params.cParams;
|
897
1363
|
cctxParams.fParams = params.fParams;
|
898
|
-
return ZSTDMT_initCStream_internal(
|
899
|
-
}
|
900
|
-
|
901
|
-
|
902
|
-
|
903
|
-
|
904
|
-
|
905
|
-
|
906
|
-
|
907
|
-
|
908
|
-
|
909
|
-
|
910
|
-
|
911
|
-
|
912
|
-
assert(
|
913
|
-
|
914
|
-
|
915
|
-
|
916
|
-
|
917
|
-
|
918
|
-
|
919
|
-
|
920
|
-
|
921
|
-
|
922
|
-
|
923
|
-
|
924
|
-
|
925
|
-
|
926
|
-
|
927
|
-
|
928
|
-
|
929
|
-
|
930
|
-
|
931
|
-
|
932
|
-
|
933
|
-
|
934
|
-
|
935
|
-
|
936
|
-
|
937
|
-
|
938
|
-
|
939
|
-
|
940
|
-
|
1364
|
+
return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN);
|
1365
|
+
}
|
1366
|
+
|
1367
|
+
|
1368
|
+
/* ZSTDMT_writeLastEmptyBlock()
|
1369
|
+
* Write a single empty block with an end-of-frame to finish a frame.
|
1370
|
+
* Job must be created from streaming variant.
|
1371
|
+
* This function is always successfull if expected conditions are fulfilled.
|
1372
|
+
*/
|
1373
|
+
static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job)
|
1374
|
+
{
|
1375
|
+
assert(job->lastJob == 1);
|
1376
|
+
assert(job->src.size == 0); /* last job is empty -> will be simplified into a last empty block */
|
1377
|
+
assert(job->firstJob == 0); /* cannot be first job, as it also needs to create frame header */
|
1378
|
+
assert(job->dstBuff.start == NULL); /* invoked from streaming variant only (otherwise, dstBuff might be user's output) */
|
1379
|
+
job->dstBuff = ZSTDMT_getBuffer(job->bufPool);
|
1380
|
+
if (job->dstBuff.start == NULL) {
|
1381
|
+
job->cSize = ERROR(memory_allocation);
|
1382
|
+
return;
|
1383
|
+
}
|
1384
|
+
assert(job->dstBuff.capacity >= ZSTD_blockHeaderSize); /* no buffer should ever be that small */
|
1385
|
+
job->src = kNullRange;
|
1386
|
+
job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity);
|
1387
|
+
assert(!ZSTD_isError(job->cSize));
|
1388
|
+
assert(job->consumed == 0);
|
1389
|
+
}
|
1390
|
+
|
1391
|
+
static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZSTD_EndDirective endOp)
|
1392
|
+
{
|
1393
|
+
unsigned const jobID = mtctx->nextJobID & mtctx->jobIDMask;
|
1394
|
+
int const endFrame = (endOp == ZSTD_e_end);
|
1395
|
+
|
1396
|
+
if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) {
|
1397
|
+
DEBUGLOG(5, "ZSTDMT_createCompressionJob: will not create new job : table is full");
|
1398
|
+
assert((mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask));
|
1399
|
+
return 0;
|
1400
|
+
}
|
1401
|
+
|
1402
|
+
if (!mtctx->jobReady) {
|
1403
|
+
BYTE const* src = (BYTE const*)mtctx->inBuff.buffer.start;
|
1404
|
+
DEBUGLOG(5, "ZSTDMT_createCompressionJob: preparing job %u to compress %u bytes with %u preload ",
|
1405
|
+
mtctx->nextJobID, (U32)srcSize, (U32)mtctx->inBuff.prefix.size);
|
1406
|
+
mtctx->jobs[jobID].src.start = src;
|
1407
|
+
mtctx->jobs[jobID].src.size = srcSize;
|
1408
|
+
assert(mtctx->inBuff.filled >= srcSize);
|
1409
|
+
mtctx->jobs[jobID].prefix = mtctx->inBuff.prefix;
|
1410
|
+
mtctx->jobs[jobID].consumed = 0;
|
1411
|
+
mtctx->jobs[jobID].cSize = 0;
|
1412
|
+
mtctx->jobs[jobID].params = mtctx->params;
|
1413
|
+
mtctx->jobs[jobID].cdict = mtctx->nextJobID==0 ? mtctx->cdict : NULL;
|
1414
|
+
mtctx->jobs[jobID].fullFrameSize = mtctx->frameContentSize;
|
1415
|
+
mtctx->jobs[jobID].dstBuff = g_nullBuffer;
|
1416
|
+
mtctx->jobs[jobID].cctxPool = mtctx->cctxPool;
|
1417
|
+
mtctx->jobs[jobID].bufPool = mtctx->bufPool;
|
1418
|
+
mtctx->jobs[jobID].seqPool = mtctx->seqPool;
|
1419
|
+
mtctx->jobs[jobID].serial = &mtctx->serial;
|
1420
|
+
mtctx->jobs[jobID].jobID = mtctx->nextJobID;
|
1421
|
+
mtctx->jobs[jobID].firstJob = (mtctx->nextJobID==0);
|
1422
|
+
mtctx->jobs[jobID].lastJob = endFrame;
|
1423
|
+
mtctx->jobs[jobID].frameChecksumNeeded = endFrame && (mtctx->nextJobID>0) && mtctx->params.fParams.checksumFlag;
|
1424
|
+
mtctx->jobs[jobID].dstFlushed = 0;
|
1425
|
+
|
1426
|
+
/* Update the round buffer pos and clear the input buffer to be reset */
|
1427
|
+
mtctx->roundBuff.pos += srcSize;
|
1428
|
+
mtctx->inBuff.buffer = g_nullBuffer;
|
1429
|
+
mtctx->inBuff.filled = 0;
|
1430
|
+
/* Set the prefix */
|
1431
|
+
if (!endFrame) {
|
1432
|
+
size_t const newPrefixSize = MIN(srcSize, mtctx->targetPrefixSize);
|
1433
|
+
mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize;
|
1434
|
+
mtctx->inBuff.prefix.size = newPrefixSize;
|
1435
|
+
} else { /* endFrame==1 => no need for another input buffer */
|
1436
|
+
mtctx->inBuff.prefix = kNullRange;
|
1437
|
+
mtctx->frameEnded = endFrame;
|
1438
|
+
if (mtctx->nextJobID == 0) {
|
1439
|
+
/* single job exception : checksum is already calculated directly within worker thread */
|
1440
|
+
mtctx->params.fParams.checksumFlag = 0;
|
1441
|
+
} }
|
1442
|
+
|
1443
|
+
if ( (srcSize == 0)
|
1444
|
+
&& (mtctx->nextJobID>0)/*single job must also write frame header*/ ) {
|
1445
|
+
DEBUGLOG(5, "ZSTDMT_createCompressionJob: creating a last empty block to end frame");
|
1446
|
+
assert(endOp == ZSTD_e_end); /* only possible case : need to end the frame with an empty last block */
|
1447
|
+
ZSTDMT_writeLastEmptyBlock(mtctx->jobs + jobID);
|
1448
|
+
mtctx->nextJobID++;
|
1449
|
+
return 0;
|
941
1450
|
}
|
942
|
-
|
943
|
-
memmove(zcs->inBuff.buffer.start,
|
944
|
-
(const char*)zcs->jobs[jobID].srcStart + zcs->dictSize + srcSize - newDictSize,
|
945
|
-
zcs->inBuff.filled);
|
946
|
-
zcs->dictSize = newDictSize;
|
947
|
-
} else { /* if (endFrame==1) */
|
948
|
-
zcs->inBuff.buffer = g_nullBuffer;
|
949
|
-
zcs->inBuff.filled = 0;
|
950
|
-
zcs->dictSize = 0;
|
951
|
-
zcs->frameEnded = 1;
|
952
|
-
if (zcs->nextJobID == 0) {
|
953
|
-
/* single chunk exception : checksum is calculated directly within worker thread */
|
954
|
-
zcs->params.fParams.checksumFlag = 0;
|
955
|
-
} }
|
1451
|
+
}
|
956
1452
|
|
957
|
-
DEBUGLOG(5, "ZSTDMT_createCompressionJob: posting job %u : %u bytes (end:%u
|
958
|
-
|
959
|
-
(U32)
|
960
|
-
|
961
|
-
|
962
|
-
|
963
|
-
|
964
|
-
|
1453
|
+
DEBUGLOG(5, "ZSTDMT_createCompressionJob: posting job %u : %u bytes (end:%u, jobNb == %u (mod:%u))",
|
1454
|
+
mtctx->nextJobID,
|
1455
|
+
(U32)mtctx->jobs[jobID].src.size,
|
1456
|
+
mtctx->jobs[jobID].lastJob,
|
1457
|
+
mtctx->nextJobID,
|
1458
|
+
jobID);
|
1459
|
+
if (POOL_tryAdd(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[jobID])) {
|
1460
|
+
mtctx->nextJobID++;
|
1461
|
+
mtctx->jobReady = 0;
|
1462
|
+
} else {
|
1463
|
+
DEBUGLOG(5, "ZSTDMT_createCompressionJob: no worker available for job %u", mtctx->nextJobID);
|
1464
|
+
mtctx->jobReady = 1;
|
1465
|
+
}
|
965
1466
|
return 0;
|
966
1467
|
}
|
967
1468
|
|
968
1469
|
|
969
|
-
|
970
|
-
* output : will be updated with amount of data flushed .
|
971
|
-
* blockToFlush : if >0, the function will block and wait if there is no data available to flush .
|
972
|
-
* @return : amount of data remaining within internal buffer, 1 if unknown but > 0,
|
973
|
-
static size_t
|
1470
|
+
/*! ZSTDMT_flushProduced() :
|
1471
|
+
* `output` : `pos` will be updated with amount of data flushed .
|
1472
|
+
* `blockToFlush` : if >0, the function will block and wait if there is no data available to flush .
|
1473
|
+
* @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */
|
1474
|
+
static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, unsigned blockToFlush, ZSTD_EndDirective end)
|
974
1475
|
{
|
975
|
-
unsigned const wJobID =
|
976
|
-
DEBUGLOG(5, "
|
977
|
-
|
978
|
-
|
979
|
-
|
980
|
-
|
981
|
-
|
982
|
-
|
1476
|
+
unsigned const wJobID = mtctx->doneJobID & mtctx->jobIDMask;
|
1477
|
+
DEBUGLOG(5, "ZSTDMT_flushProduced (blocking:%u , job %u <= %u)",
|
1478
|
+
blockToFlush, mtctx->doneJobID, mtctx->nextJobID);
|
1479
|
+
assert(output->size >= output->pos);
|
1480
|
+
|
1481
|
+
ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
|
1482
|
+
if ( blockToFlush
|
1483
|
+
&& (mtctx->doneJobID < mtctx->nextJobID) ) {
|
1484
|
+
assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize);
|
1485
|
+
while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) { /* nothing to flush */
|
1486
|
+
if (mtctx->jobs[wJobID].consumed == mtctx->jobs[wJobID].src.size) {
|
1487
|
+
DEBUGLOG(5, "job %u is completely consumed (%u == %u) => don't wait for cond, there will be none",
|
1488
|
+
mtctx->doneJobID, (U32)mtctx->jobs[wJobID].consumed, (U32)mtctx->jobs[wJobID].src.size);
|
1489
|
+
break;
|
1490
|
+
}
|
1491
|
+
DEBUGLOG(5, "waiting for something to flush from job %u (currently flushed: %u bytes)",
|
1492
|
+
mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
|
1493
|
+
ZSTD_pthread_cond_wait(&mtctx->jobs[wJobID].job_cond, &mtctx->jobs[wJobID].job_mutex); /* block when nothing to flush but some to come */
|
1494
|
+
} }
|
1495
|
+
|
1496
|
+
/* try to flush something */
|
1497
|
+
{ size_t cSize = mtctx->jobs[wJobID].cSize; /* shared */
|
1498
|
+
size_t const srcConsumed = mtctx->jobs[wJobID].consumed; /* shared */
|
1499
|
+
size_t const srcSize = mtctx->jobs[wJobID].src.size; /* read-only, could be done after mutex lock, but no-declaration-after-statement */
|
1500
|
+
ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
|
1501
|
+
if (ZSTD_isError(cSize)) {
|
1502
|
+
DEBUGLOG(5, "ZSTDMT_flushProduced: job %u : compression error detected : %s",
|
1503
|
+
mtctx->doneJobID, ZSTD_getErrorName(cSize));
|
1504
|
+
ZSTDMT_waitForAllJobsCompleted(mtctx);
|
1505
|
+
ZSTDMT_releaseAllJobResources(mtctx);
|
1506
|
+
return cSize;
|
1507
|
+
}
|
1508
|
+
/* add frame checksum if necessary (can only happen once) */
|
1509
|
+
assert(srcConsumed <= srcSize);
|
1510
|
+
if ( (srcConsumed == srcSize) /* job completed -> worker no longer active */
|
1511
|
+
&& mtctx->jobs[wJobID].frameChecksumNeeded ) {
|
1512
|
+
U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState);
|
1513
|
+
DEBUGLOG(4, "ZSTDMT_flushProduced: writing checksum : %08X \n", checksum);
|
1514
|
+
MEM_writeLE32((char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, checksum);
|
1515
|
+
cSize += 4;
|
1516
|
+
mtctx->jobs[wJobID].cSize += 4; /* can write this shared value, as worker is no longer active */
|
1517
|
+
mtctx->jobs[wJobID].frameChecksumNeeded = 0;
|
1518
|
+
}
|
1519
|
+
if (cSize > 0) { /* compression is ongoing or completed */
|
1520
|
+
size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos);
|
1521
|
+
DEBUGLOG(5, "ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)",
|
1522
|
+
(U32)toFlush, mtctx->doneJobID, (U32)srcConsumed, (U32)srcSize, (U32)cSize);
|
1523
|
+
assert(mtctx->doneJobID < mtctx->nextJobID);
|
1524
|
+
assert(cSize >= mtctx->jobs[wJobID].dstFlushed);
|
1525
|
+
assert(mtctx->jobs[wJobID].dstBuff.start != NULL);
|
1526
|
+
memcpy((char*)output->dst + output->pos,
|
1527
|
+
(const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed,
|
1528
|
+
toFlush);
|
1529
|
+
output->pos += toFlush;
|
1530
|
+
mtctx->jobs[wJobID].dstFlushed += toFlush; /* can write : this value is only used by mtctx */
|
1531
|
+
|
1532
|
+
if ( (srcConsumed == srcSize) /* job completed */
|
1533
|
+
&& (mtctx->jobs[wJobID].dstFlushed == cSize) ) { /* output buffer fully flushed => free this job position */
|
1534
|
+
DEBUGLOG(5, "Job %u completed (%u bytes), moving to next one",
|
1535
|
+
mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
|
1536
|
+
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff);
|
1537
|
+
mtctx->jobs[wJobID].dstBuff = g_nullBuffer;
|
1538
|
+
mtctx->jobs[wJobID].cSize = 0; /* ensure this job slot is considered "not started" in future check */
|
1539
|
+
mtctx->consumed += srcSize;
|
1540
|
+
mtctx->produced += cSize;
|
1541
|
+
mtctx->doneJobID++;
|
1542
|
+
} }
|
1543
|
+
|
1544
|
+
/* return value : how many bytes left in buffer ; fake it to 1 when unknown but >0 */
|
1545
|
+
if (cSize > mtctx->jobs[wJobID].dstFlushed) return (cSize - mtctx->jobs[wJobID].dstFlushed);
|
1546
|
+
if (srcSize > srcConsumed) return 1; /* current job not completely compressed */
|
983
1547
|
}
|
984
|
-
|
985
|
-
/*
|
986
|
-
|
987
|
-
|
988
|
-
|
989
|
-
|
990
|
-
|
991
|
-
|
992
|
-
|
993
|
-
|
1548
|
+
if (mtctx->doneJobID < mtctx->nextJobID) return 1; /* some more jobs ongoing */
|
1549
|
+
if (mtctx->jobReady) return 1; /* one job is ready to push, just not yet in the list */
|
1550
|
+
if (mtctx->inBuff.filled > 0) return 1; /* input is not empty, and still needs to be converted into a job */
|
1551
|
+
mtctx->allJobsCompleted = mtctx->frameEnded; /* all jobs are entirely flushed => if this one is last one, frame is completed */
|
1552
|
+
if (end == ZSTD_e_end) return !mtctx->frameEnded; /* for ZSTD_e_end, question becomes : is frame completed ? instead of : are internal buffers fully flushed ? */
|
1553
|
+
return 0; /* internal buffers fully flushed */
|
1554
|
+
}
|
1555
|
+
|
1556
|
+
/**
|
1557
|
+
* Returns the range of data used by the earliest job that is not yet complete.
|
1558
|
+
* If the data of the first job is broken up into two segments, we cover both
|
1559
|
+
* sections.
|
1560
|
+
*/
|
1561
|
+
static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx)
|
1562
|
+
{
|
1563
|
+
unsigned const firstJobID = mtctx->doneJobID;
|
1564
|
+
unsigned const lastJobID = mtctx->nextJobID;
|
1565
|
+
unsigned jobID;
|
1566
|
+
|
1567
|
+
for (jobID = firstJobID; jobID < lastJobID; ++jobID) {
|
1568
|
+
unsigned const wJobID = jobID & mtctx->jobIDMask;
|
1569
|
+
size_t consumed;
|
1570
|
+
|
1571
|
+
ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
|
1572
|
+
consumed = mtctx->jobs[wJobID].consumed;
|
1573
|
+
ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
|
1574
|
+
|
1575
|
+
if (consumed < mtctx->jobs[wJobID].src.size) {
|
1576
|
+
range_t range = mtctx->jobs[wJobID].prefix;
|
1577
|
+
if (range.size == 0) {
|
1578
|
+
/* Empty prefix */
|
1579
|
+
range = mtctx->jobs[wJobID].src;
|
994
1580
|
}
|
995
|
-
|
996
|
-
|
997
|
-
|
998
|
-
U32 const checksum = (U32)XXH64_digest(&zcs->xxhState);
|
999
|
-
DEBUGLOG(5, "writing checksum : %08X \n", checksum);
|
1000
|
-
MEM_writeLE32((char*)job.dstBuff.start + job.cSize, checksum);
|
1001
|
-
job.cSize += 4;
|
1002
|
-
zcs->jobs[wJobID].cSize += 4;
|
1003
|
-
} }
|
1004
|
-
zcs->jobs[wJobID].jobScanned = 1;
|
1581
|
+
/* Job source in multiple segments not supported yet */
|
1582
|
+
assert(range.start <= mtctx->jobs[wJobID].src.start);
|
1583
|
+
return range;
|
1005
1584
|
}
|
1006
|
-
|
1007
|
-
|
1008
|
-
|
1009
|
-
|
1010
|
-
|
1585
|
+
}
|
1586
|
+
return kNullRange;
|
1587
|
+
}
|
1588
|
+
|
1589
|
+
/**
|
1590
|
+
* Returns non-zero iff buffer and range overlap.
|
1591
|
+
*/
|
1592
|
+
static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range)
|
1593
|
+
{
|
1594
|
+
BYTE const* const bufferStart = (BYTE const*)buffer.start;
|
1595
|
+
BYTE const* const bufferEnd = bufferStart + buffer.capacity;
|
1596
|
+
BYTE const* const rangeStart = (BYTE const*)range.start;
|
1597
|
+
BYTE const* const rangeEnd = rangeStart + range.size;
|
1598
|
+
|
1599
|
+
if (rangeStart == NULL || bufferStart == NULL)
|
1600
|
+
return 0;
|
1601
|
+
/* Empty ranges cannot overlap */
|
1602
|
+
if (bufferStart == bufferEnd || rangeStart == rangeEnd)
|
1603
|
+
return 0;
|
1604
|
+
|
1605
|
+
return bufferStart < rangeEnd && rangeStart < bufferEnd;
|
1606
|
+
}
|
1607
|
+
|
1608
|
+
static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window)
|
1609
|
+
{
|
1610
|
+
range_t extDict;
|
1611
|
+
range_t prefix;
|
1612
|
+
|
1613
|
+
extDict.start = window.dictBase + window.lowLimit;
|
1614
|
+
extDict.size = window.dictLimit - window.lowLimit;
|
1615
|
+
|
1616
|
+
prefix.start = window.base + window.dictLimit;
|
1617
|
+
prefix.size = window.nextSrc - (window.base + window.dictLimit);
|
1618
|
+
DEBUGLOG(5, "extDict [0x%zx, 0x%zx)",
|
1619
|
+
(size_t)extDict.start,
|
1620
|
+
(size_t)extDict.start + extDict.size);
|
1621
|
+
DEBUGLOG(5, "prefix [0x%zx, 0x%zx)",
|
1622
|
+
(size_t)prefix.start,
|
1623
|
+
(size_t)prefix.start + prefix.size);
|
1624
|
+
|
1625
|
+
return ZSTDMT_isOverlapped(buffer, extDict)
|
1626
|
+
|| ZSTDMT_isOverlapped(buffer, prefix);
|
1627
|
+
}
|
1628
|
+
|
1629
|
+
static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer)
|
1630
|
+
{
|
1631
|
+
if (mtctx->params.ldmParams.enableLdm) {
|
1632
|
+
ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex;
|
1633
|
+
DEBUGLOG(5, "source [0x%zx, 0x%zx)",
|
1634
|
+
(size_t)buffer.start,
|
1635
|
+
(size_t)buffer.start + buffer.capacity);
|
1636
|
+
ZSTD_PTHREAD_MUTEX_LOCK(mutex);
|
1637
|
+
while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow)) {
|
1638
|
+
DEBUGLOG(6, "Waiting for LDM to finish...");
|
1639
|
+
ZSTD_pthread_cond_wait(&mtctx->serial.ldmWindowCond, mutex);
|
1011
1640
|
}
|
1012
|
-
|
1013
|
-
|
1014
|
-
|
1015
|
-
|
1016
|
-
|
1017
|
-
|
1018
|
-
|
1641
|
+
DEBUGLOG(6, "Done waiting for LDM to finish");
|
1642
|
+
ZSTD_pthread_mutex_unlock(mutex);
|
1643
|
+
}
|
1644
|
+
}
|
1645
|
+
|
1646
|
+
/**
|
1647
|
+
* Attempts to set the inBuff to the next section to fill.
|
1648
|
+
* If any part of the new section is still in use we give up.
|
1649
|
+
* Returns non-zero if the buffer is filled.
|
1650
|
+
*/
|
1651
|
+
static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)
|
1652
|
+
{
|
1653
|
+
range_t const inUse = ZSTDMT_getInputDataInUse(mtctx);
|
1654
|
+
size_t const spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos;
|
1655
|
+
size_t const target = mtctx->targetSectionSize;
|
1656
|
+
buffer_t buffer;
|
1657
|
+
|
1658
|
+
assert(mtctx->inBuff.buffer.start == NULL);
|
1659
|
+
assert(mtctx->roundBuff.capacity >= target);
|
1660
|
+
|
1661
|
+
if (spaceLeft < target) {
|
1662
|
+
/* ZSTD_invalidateRepCodes() doesn't work for extDict variants.
|
1663
|
+
* Simply copy the prefix to the beginning in that case.
|
1664
|
+
*/
|
1665
|
+
BYTE* const start = (BYTE*)mtctx->roundBuff.buffer;
|
1666
|
+
size_t const prefixSize = mtctx->inBuff.prefix.size;
|
1667
|
+
|
1668
|
+
buffer.start = start;
|
1669
|
+
buffer.capacity = prefixSize;
|
1670
|
+
if (ZSTDMT_isOverlapped(buffer, inUse)) {
|
1671
|
+
DEBUGLOG(6, "Waiting for buffer...");
|
1672
|
+
return 0;
|
1019
1673
|
}
|
1020
|
-
|
1021
|
-
|
1022
|
-
|
1023
|
-
|
1024
|
-
|
1025
|
-
|
1674
|
+
ZSTDMT_waitForLdmComplete(mtctx, buffer);
|
1675
|
+
memmove(start, mtctx->inBuff.prefix.start, prefixSize);
|
1676
|
+
mtctx->inBuff.prefix.start = start;
|
1677
|
+
mtctx->roundBuff.pos = prefixSize;
|
1678
|
+
}
|
1679
|
+
buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos;
|
1680
|
+
buffer.capacity = target;
|
1681
|
+
|
1682
|
+
if (ZSTDMT_isOverlapped(buffer, inUse)) {
|
1683
|
+
DEBUGLOG(6, "Waiting for buffer...");
|
1684
|
+
return 0;
|
1685
|
+
}
|
1686
|
+
assert(!ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix));
|
1687
|
+
|
1688
|
+
ZSTDMT_waitForLdmComplete(mtctx, buffer);
|
1689
|
+
|
1690
|
+
DEBUGLOG(5, "Using prefix range [%zx, %zx)",
|
1691
|
+
(size_t)mtctx->inBuff.prefix.start,
|
1692
|
+
(size_t)mtctx->inBuff.prefix.start + mtctx->inBuff.prefix.size);
|
1693
|
+
DEBUGLOG(5, "Using source range [%zx, %zx)",
|
1694
|
+
(size_t)buffer.start,
|
1695
|
+
(size_t)buffer.start + buffer.capacity);
|
1696
|
+
|
1697
|
+
|
1698
|
+
mtctx->inBuff.buffer = buffer;
|
1699
|
+
mtctx->inBuff.filled = 0;
|
1700
|
+
assert(mtctx->roundBuff.pos + buffer.capacity <= mtctx->roundBuff.capacity);
|
1701
|
+
return 1;
|
1702
|
+
}
|
1026
1703
|
|
1027
1704
|
|
1028
1705
|
/** ZSTDMT_compressStream_generic() :
|
@@ -1034,13 +1711,13 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
|
1034
1711
|
ZSTD_inBuffer* input,
|
1035
1712
|
ZSTD_EndDirective endOp)
|
1036
1713
|
{
|
1037
|
-
size_t const newJobThreshold = mtctx->dictSize + mtctx->targetSectionSize;
|
1038
1714
|
unsigned forwardInputProgress = 0;
|
1039
|
-
DEBUGLOG(5, "ZSTDMT_compressStream_generic "
|
1715
|
+
DEBUGLOG(5, "ZSTDMT_compressStream_generic (endOp=%u, srcSize=%u)",
|
1716
|
+
(U32)endOp, (U32)(input->size - input->pos));
|
1040
1717
|
assert(output->pos <= output->size);
|
1041
1718
|
assert(input->pos <= input->size);
|
1042
1719
|
|
1043
|
-
if (mtctx->
|
1720
|
+
if (mtctx->singleBlockingThread) { /* delegate to single-thread (synchronous) */
|
1044
1721
|
return ZSTD_compressStream_generic(mtctx->cctxPool->cctx[0], output, input, endOp);
|
1045
1722
|
}
|
1046
1723
|
|
@@ -1050,10 +1727,11 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
|
1050
1727
|
}
|
1051
1728
|
|
1052
1729
|
/* single-pass shortcut (note : synchronous-mode) */
|
1053
|
-
if ( (mtctx->nextJobID == 0)
|
1054
|
-
&& (mtctx->inBuff.filled == 0)
|
1055
|
-
&& (
|
1056
|
-
&& (
|
1730
|
+
if ( (mtctx->nextJobID == 0) /* just started */
|
1731
|
+
&& (mtctx->inBuff.filled == 0) /* nothing buffered */
|
1732
|
+
&& (!mtctx->jobReady) /* no job already created */
|
1733
|
+
&& (endOp == ZSTD_e_end) /* end order */
|
1734
|
+
&& (output->size - output->pos >= ZSTD_compressBound(input->size - input->pos)) ) { /* enough space in dst */
|
1057
1735
|
size_t const cSize = ZSTDMT_compress_advanced_internal(mtctx,
|
1058
1736
|
(char*)output->dst + output->pos, output->size - output->pos,
|
1059
1737
|
(const char*)input->src + input->pos, input->size - input->pos,
|
@@ -1061,89 +1739,93 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
|
1061
1739
|
if (ZSTD_isError(cSize)) return cSize;
|
1062
1740
|
input->pos = input->size;
|
1063
1741
|
output->pos += cSize;
|
1064
|
-
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->inBuff.buffer); /* was allocated in initStream */
|
1065
1742
|
mtctx->allJobsCompleted = 1;
|
1066
1743
|
mtctx->frameEnded = 1;
|
1067
1744
|
return 0;
|
1068
1745
|
}
|
1069
1746
|
|
1070
1747
|
/* fill input buffer */
|
1071
|
-
if (
|
1748
|
+
if ( (!mtctx->jobReady)
|
1749
|
+
&& (input->size > input->pos) ) { /* support NULL input */
|
1072
1750
|
if (mtctx->inBuff.buffer.start == NULL) {
|
1073
|
-
mtctx->inBuff.
|
1074
|
-
mtctx
|
1751
|
+
assert(mtctx->inBuff.filled == 0); /* Can't fill an empty buffer */
|
1752
|
+
if (!ZSTDMT_tryGetInputRange(mtctx)) {
|
1753
|
+
/* It is only possible for this operation to fail if there are
|
1754
|
+
* still compression jobs ongoing.
|
1755
|
+
*/
|
1756
|
+
assert(mtctx->doneJobID != mtctx->nextJobID);
|
1757
|
+
}
|
1075
1758
|
}
|
1076
|
-
if (mtctx->inBuff.buffer.start) {
|
1077
|
-
size_t const toLoad = MIN(input->size - input->pos, mtctx->
|
1078
|
-
|
1759
|
+
if (mtctx->inBuff.buffer.start != NULL) {
|
1760
|
+
size_t const toLoad = MIN(input->size - input->pos, mtctx->targetSectionSize - mtctx->inBuff.filled);
|
1761
|
+
assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize);
|
1762
|
+
DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u",
|
1763
|
+
(U32)toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize);
|
1079
1764
|
memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, toLoad);
|
1080
1765
|
input->pos += toLoad;
|
1081
1766
|
mtctx->inBuff.filled += toLoad;
|
1082
1767
|
forwardInputProgress = toLoad>0;
|
1083
|
-
|
1768
|
+
}
|
1769
|
+
if ((input->pos < input->size) && (endOp == ZSTD_e_end))
|
1770
|
+
endOp = ZSTD_e_flush; /* can't end now : not all input consumed */
|
1771
|
+
}
|
1084
1772
|
|
1085
|
-
if ( (mtctx->
|
1086
|
-
|
1087
|
-
|
1773
|
+
if ( (mtctx->jobReady)
|
1774
|
+
|| (mtctx->inBuff.filled >= mtctx->targetSectionSize) /* filled enough : let's compress */
|
1775
|
+
|| ((endOp != ZSTD_e_continue) && (mtctx->inBuff.filled > 0)) /* something to flush : let's go */
|
1776
|
+
|| ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) { /* must finish the frame with a zero-size block */
|
1777
|
+
size_t const jobSize = mtctx->inBuff.filled;
|
1778
|
+
assert(mtctx->inBuff.filled <= mtctx->targetSectionSize);
|
1779
|
+
CHECK_F( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) );
|
1088
1780
|
}
|
1089
1781
|
|
1090
1782
|
/* check for potential compressed data ready to be flushed */
|
1091
|
-
|
1092
|
-
|
1093
|
-
|
1094
|
-
endOp = ZSTD_e_continue;
|
1095
|
-
|
1096
|
-
switch(endOp)
|
1097
|
-
{
|
1098
|
-
case ZSTD_e_flush:
|
1099
|
-
return ZSTDMT_flushStream(mtctx, output);
|
1100
|
-
case ZSTD_e_end:
|
1101
|
-
return ZSTDMT_endStream(mtctx, output);
|
1102
|
-
case ZSTD_e_continue:
|
1103
|
-
return 1;
|
1104
|
-
default:
|
1105
|
-
return ERROR(GENERIC); /* invalid endDirective */
|
1783
|
+
{ size_t const remainingToFlush = ZSTDMT_flushProduced(mtctx, output, !forwardInputProgress, endOp); /* block if there was no forward input progress */
|
1784
|
+
if (input->pos < input->size) return MAX(remainingToFlush, 1); /* input not consumed : do not end flush yet */
|
1785
|
+
return remainingToFlush;
|
1106
1786
|
}
|
1107
1787
|
}
|
1108
1788
|
|
1109
1789
|
|
1110
|
-
size_t ZSTDMT_compressStream(ZSTDMT_CCtx*
|
1790
|
+
size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
|
1111
1791
|
{
|
1112
|
-
CHECK_F( ZSTDMT_compressStream_generic(
|
1792
|
+
CHECK_F( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) );
|
1113
1793
|
|
1114
1794
|
/* recommended next input size : fill current input buffer */
|
1115
|
-
return
|
1795
|
+
return mtctx->targetSectionSize - mtctx->inBuff.filled; /* note : could be zero when input buffer is fully filled and no more availability to create new job */
|
1116
1796
|
}
|
1117
1797
|
|
1118
1798
|
|
1119
|
-
static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output,
|
1799
|
+
static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_EndDirective endFrame)
|
1120
1800
|
{
|
1121
|
-
size_t const srcSize = mtctx->inBuff.filled
|
1801
|
+
size_t const srcSize = mtctx->inBuff.filled;
|
1122
1802
|
DEBUGLOG(5, "ZSTDMT_flushStream_internal");
|
1123
1803
|
|
1124
|
-
if (
|
1125
|
-
|
1126
|
-
|
1804
|
+
if ( mtctx->jobReady /* one job ready for a worker to pick up */
|
1805
|
+
|| (srcSize > 0) /* still some data within input buffer */
|
1806
|
+
|| ((endFrame==ZSTD_e_end) && !mtctx->frameEnded)) { /* need a last 0-size block to end frame */
|
1807
|
+
DEBUGLOG(5, "ZSTDMT_flushStream_internal : create a new job (%u bytes, end:%u)",
|
1808
|
+
(U32)srcSize, (U32)endFrame);
|
1127
1809
|
CHECK_F( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) );
|
1128
1810
|
}
|
1129
1811
|
|
1130
1812
|
/* check if there is any data available to flush */
|
1131
|
-
return
|
1813
|
+
return ZSTDMT_flushProduced(mtctx, output, 1 /* blockToFlush */, endFrame);
|
1132
1814
|
}
|
1133
1815
|
|
1134
1816
|
|
1135
1817
|
size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output)
|
1136
1818
|
{
|
1137
1819
|
DEBUGLOG(5, "ZSTDMT_flushStream");
|
1138
|
-
if (mtctx->
|
1820
|
+
if (mtctx->singleBlockingThread)
|
1139
1821
|
return ZSTD_flushStream(mtctx->cctxPool->cctx[0], output);
|
1140
|
-
return ZSTDMT_flushStream_internal(mtctx, output,
|
1822
|
+
return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_flush);
|
1141
1823
|
}
|
1142
1824
|
|
1143
1825
|
size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output)
|
1144
1826
|
{
|
1145
1827
|
DEBUGLOG(4, "ZSTDMT_endStream");
|
1146
|
-
if (mtctx->
|
1828
|
+
if (mtctx->singleBlockingThread)
|
1147
1829
|
return ZSTD_endStream(mtctx->cctxPool->cctx[0], output);
|
1148
|
-
return ZSTDMT_flushStream_internal(mtctx, output,
|
1830
|
+
return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_end);
|
1149
1831
|
}
|