zstdlib 0.3.0-x86-mingw32 → 0.8.0-x86-mingw32
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGES.md +30 -1
- data/README.md +2 -2
- data/Rakefile +1 -1
- data/ext/zstdlib/extconf.rb +3 -3
- data/ext/zstdlib/ruby/zlib-2.7/zstdlib.c +4895 -0
- data/ext/zstdlib/ruby/zlib-3.0/zstdlib.c +4994 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/bitstream.h +59 -51
- data/ext/zstdlib/zstd-1.5.0/lib/common/compiler.h +289 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/cpu.h +1 -3
- data/ext/zstdlib/zstd-1.5.0/lib/common/debug.c +24 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/debug.h +22 -49
- data/ext/zstdlib/zstd-1.5.0/lib/common/entropy_common.c +362 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/error_private.c +3 -1
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/error_private.h +8 -4
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/fse.h +50 -42
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/fse_decompress.c +149 -55
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/huf.h +43 -39
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/mem.h +69 -25
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/pool.c +30 -20
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/pool.h +3 -3
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/threading.c +51 -4
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/threading.h +36 -4
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/xxhash.c +40 -92
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/xxhash.h +12 -32
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/common/zstd_common.c +10 -10
- data/ext/zstdlib/zstd-1.5.0/lib/common/zstd_deps.h +111 -0
- data/ext/zstdlib/zstd-1.5.0/lib/common/zstd_internal.h +490 -0
- data/ext/zstdlib/zstd-1.5.0/lib/common/zstd_trace.h +154 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/fse_compress.c +47 -63
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/hist.c +41 -63
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/hist.h +13 -33
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/huf_compress.c +332 -193
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_compress.c +6393 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_compress_internal.h +522 -86
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_compress_literals.c +25 -16
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_compress_literals.h +2 -2
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_compress_sequences.c +50 -24
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_compress_sequences.h +11 -4
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_compress_superblock.c +572 -0
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_compress_superblock.h +32 -0
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_cwksp.h +662 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_double_fast.c +43 -41
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_double_fast.h +2 -2
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_fast.c +85 -80
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_fast.h +2 -2
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_lazy.c +2184 -0
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_lazy.h +125 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_ldm.c +333 -208
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_ldm.h +15 -3
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_ldm_geartab.h +103 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_opt.c +228 -129
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstd_opt.h +1 -1
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/compress/zstdmt_compress.c +151 -440
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstdmt_compress.h +110 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/decompress/huf_decompress.c +395 -276
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/decompress/zstd_ddict.c +20 -16
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/decompress/zstd_ddict.h +3 -3
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/decompress/zstd_decompress.c +628 -231
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/decompress/zstd_decompress_block.c +606 -380
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/decompress/zstd_decompress_block.h +8 -5
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/decompress/zstd_decompress_internal.h +39 -9
- data/ext/zstdlib/zstd-1.5.0/lib/zdict.h +452 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/lib/zstd.h +740 -153
- data/ext/zstdlib/{zstd-1.4.2/lib/common → zstd-1.5.0/lib}/zstd_errors.h +3 -1
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/zlibWrapper/gzclose.c +1 -1
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/zlibWrapper/gzcompatibility.h +1 -1
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/zlibWrapper/gzguts.h +0 -0
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/zlibWrapper/gzlib.c +9 -9
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/zlibWrapper/gzread.c +16 -8
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/zlibWrapper/gzwrite.c +8 -8
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/zlibWrapper/zstd_zlibwrapper.c +131 -45
- data/ext/zstdlib/{zstd-1.4.2 → zstd-1.5.0}/zlibWrapper/zstd_zlibwrapper.h +1 -1
- data/lib/2.2/zstdlib.so +0 -0
- data/lib/2.3/zstdlib.so +0 -0
- data/lib/2.4/zstdlib.so +0 -0
- data/lib/2.5/zstdlib.so +0 -0
- data/lib/2.6/zstdlib.so +0 -0
- data/lib/2.7/zstdlib.so +0 -0
- metadata +76 -67
- data/ext/zstdlib/zstd-1.4.2/lib/common/compiler.h +0 -147
- data/ext/zstdlib/zstd-1.4.2/lib/common/debug.c +0 -44
- data/ext/zstdlib/zstd-1.4.2/lib/common/entropy_common.c +0 -236
- data/ext/zstdlib/zstd-1.4.2/lib/common/zstd_internal.h +0 -371
- data/ext/zstdlib/zstd-1.4.2/lib/compress/zstd_compress.c +0 -3904
- data/ext/zstdlib/zstd-1.4.2/lib/compress/zstd_lazy.c +0 -1111
- data/ext/zstdlib/zstd-1.4.2/lib/compress/zstd_lazy.h +0 -67
- data/ext/zstdlib/zstd-1.4.2/lib/compress/zstdmt_compress.h +0 -192
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Yann Collet, Facebook, Inc.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -20,11 +20,10 @@
|
|
20
20
|
|
21
21
|
|
22
22
|
/* ====== Dependencies ====== */
|
23
|
-
#include
|
24
|
-
#include
|
25
|
-
#include "
|
26
|
-
#include "
|
27
|
-
#include "threading.h" /* mutex */
|
23
|
+
#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset, INT_MAX, UINT_MAX */
|
24
|
+
#include "../common/mem.h" /* MEM_STATIC */
|
25
|
+
#include "../common/pool.h" /* threadpool */
|
26
|
+
#include "../common/threading.h" /* mutex */
|
28
27
|
#include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
|
29
28
|
#include "zstd_ldm.h"
|
30
29
|
#include "zstdmt_compress.h"
|
@@ -106,11 +105,11 @@ typedef struct ZSTDMT_bufferPool_s {
|
|
106
105
|
static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbWorkers, ZSTD_customMem cMem)
|
107
106
|
{
|
108
107
|
unsigned const maxNbBuffers = 2*nbWorkers + 3;
|
109
|
-
ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)
|
108
|
+
ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_customCalloc(
|
110
109
|
sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem);
|
111
110
|
if (bufPool==NULL) return NULL;
|
112
111
|
if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) {
|
113
|
-
|
112
|
+
ZSTD_customFree(bufPool, cMem);
|
114
113
|
return NULL;
|
115
114
|
}
|
116
115
|
bufPool->bufferSize = 64 KB;
|
@@ -127,10 +126,10 @@ static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
|
|
127
126
|
if (!bufPool) return; /* compatibility with free on NULL */
|
128
127
|
for (u=0; u<bufPool->totalBuffers; u++) {
|
129
128
|
DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->bTable[u].start);
|
130
|
-
|
129
|
+
ZSTD_customFree(bufPool->bTable[u].start, bufPool->cMem);
|
131
130
|
}
|
132
131
|
ZSTD_pthread_mutex_destroy(&bufPool->poolMutex);
|
133
|
-
|
132
|
+
ZSTD_customFree(bufPool, bufPool->cMem);
|
134
133
|
}
|
135
134
|
|
136
135
|
/* only works at initialization, not during compression */
|
@@ -201,13 +200,13 @@ static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
|
|
201
200
|
}
|
202
201
|
/* size conditions not respected : scratch this buffer, create new one */
|
203
202
|
DEBUGLOG(5, "ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing");
|
204
|
-
|
203
|
+
ZSTD_customFree(buf.start, bufPool->cMem);
|
205
204
|
}
|
206
205
|
ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
|
207
206
|
/* create new buffer */
|
208
207
|
DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer");
|
209
208
|
{ buffer_t buffer;
|
210
|
-
void* const start =
|
209
|
+
void* const start = ZSTD_customMalloc(bSize, bufPool->cMem);
|
211
210
|
buffer.start = start; /* note : start can be NULL if malloc fails ! */
|
212
211
|
buffer.capacity = (start==NULL) ? 0 : bSize;
|
213
212
|
if (start==NULL) {
|
@@ -229,13 +228,13 @@ static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer)
|
|
229
228
|
{
|
230
229
|
size_t const bSize = bufPool->bufferSize;
|
231
230
|
if (buffer.capacity < bSize) {
|
232
|
-
void* const start =
|
231
|
+
void* const start = ZSTD_customMalloc(bSize, bufPool->cMem);
|
233
232
|
buffer_t newBuffer;
|
234
233
|
newBuffer.start = start;
|
235
234
|
newBuffer.capacity = start == NULL ? 0 : bSize;
|
236
235
|
if (start != NULL) {
|
237
236
|
assert(newBuffer.capacity >= buffer.capacity);
|
238
|
-
|
237
|
+
ZSTD_memcpy(newBuffer.start, buffer.start, buffer.capacity);
|
239
238
|
DEBUGLOG(5, "ZSTDMT_resizeBuffer: created buffer of size %u", (U32)bSize);
|
240
239
|
return newBuffer;
|
241
240
|
}
|
@@ -261,14 +260,12 @@ static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
|
|
261
260
|
ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
|
262
261
|
/* Reached bufferPool capacity (should not happen) */
|
263
262
|
DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing ");
|
264
|
-
|
263
|
+
ZSTD_customFree(buf.start, bufPool->cMem);
|
265
264
|
}
|
266
265
|
|
267
266
|
|
268
267
|
/* ===== Seq Pool Wrapper ====== */
|
269
268
|
|
270
|
-
static rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0};
|
271
|
-
|
272
269
|
typedef ZSTDMT_bufferPool ZSTDMT_seqPool;
|
273
270
|
|
274
271
|
static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool)
|
@@ -278,7 +275,7 @@ static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool)
|
|
278
275
|
|
279
276
|
static rawSeqStore_t bufferToSeq(buffer_t buffer)
|
280
277
|
{
|
281
|
-
rawSeqStore_t seq =
|
278
|
+
rawSeqStore_t seq = kNullRawSeqStore;
|
282
279
|
seq.seq = (rawSeq*)buffer.start;
|
283
280
|
seq.capacity = buffer.capacity / sizeof(rawSeq);
|
284
281
|
return seq;
|
@@ -354,7 +351,7 @@ static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
|
|
354
351
|
for (cid=0; cid<pool->totalCCtx; cid++)
|
355
352
|
ZSTD_freeCCtx(pool->cctx[cid]); /* note : compatible with free on NULL */
|
356
353
|
ZSTD_pthread_mutex_destroy(&pool->poolMutex);
|
357
|
-
|
354
|
+
ZSTD_customFree(pool, pool->cMem);
|
358
355
|
}
|
359
356
|
|
360
357
|
/* ZSTDMT_createCCtxPool() :
|
@@ -362,12 +359,12 @@ static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
|
|
362
359
|
static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers,
|
363
360
|
ZSTD_customMem cMem)
|
364
361
|
{
|
365
|
-
ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*)
|
362
|
+
ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_customCalloc(
|
366
363
|
sizeof(ZSTDMT_CCtxPool) + (nbWorkers-1)*sizeof(ZSTD_CCtx*), cMem);
|
367
364
|
assert(nbWorkers > 0);
|
368
365
|
if (!cctxPool) return NULL;
|
369
366
|
if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) {
|
370
|
-
|
367
|
+
ZSTD_customFree(cctxPool, cMem);
|
371
368
|
return NULL;
|
372
369
|
}
|
373
370
|
cctxPool->cMem = cMem;
|
@@ -461,7 +458,13 @@ typedef struct {
|
|
461
458
|
ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */
|
462
459
|
} serialState_t;
|
463
460
|
|
464
|
-
static int
|
461
|
+
static int
|
462
|
+
ZSTDMT_serialState_reset(serialState_t* serialState,
|
463
|
+
ZSTDMT_seqPool* seqPool,
|
464
|
+
ZSTD_CCtx_params params,
|
465
|
+
size_t jobSize,
|
466
|
+
const void* dict, size_t const dictSize,
|
467
|
+
ZSTD_dictContentType_e dictContentType)
|
465
468
|
{
|
466
469
|
/* Adjust parameters */
|
467
470
|
if (params.ldmParams.enableLdm) {
|
@@ -469,10 +472,8 @@ static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool*
|
|
469
472
|
ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams);
|
470
473
|
assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
|
471
474
|
assert(params.ldmParams.hashRateLog < 32);
|
472
|
-
serialState->ldmState.hashPower =
|
473
|
-
ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength);
|
474
475
|
} else {
|
475
|
-
|
476
|
+
ZSTD_memset(¶ms.ldmParams, 0, sizeof(params.ldmParams));
|
476
477
|
}
|
477
478
|
serialState->nextJobID = 0;
|
478
479
|
if (params.fParams.checksumFlag)
|
@@ -483,30 +484,46 @@ static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool*
|
|
483
484
|
size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t);
|
484
485
|
unsigned const bucketLog =
|
485
486
|
params.ldmParams.hashLog - params.ldmParams.bucketSizeLog;
|
486
|
-
size_t const bucketSize = (size_t)1 << bucketLog;
|
487
487
|
unsigned const prevBucketLog =
|
488
488
|
serialState->params.ldmParams.hashLog -
|
489
489
|
serialState->params.ldmParams.bucketSizeLog;
|
490
|
+
size_t const numBuckets = (size_t)1 << bucketLog;
|
490
491
|
/* Size the seq pool tables */
|
491
492
|
ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize));
|
492
493
|
/* Reset the window */
|
493
|
-
|
494
|
-
serialState->ldmWindow = serialState->ldmState.window;
|
494
|
+
ZSTD_window_init(&serialState->ldmState.window);
|
495
495
|
/* Resize tables and output space if necessary. */
|
496
496
|
if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) {
|
497
|
-
|
498
|
-
serialState->ldmState.hashTable = (ldmEntry_t*)
|
497
|
+
ZSTD_customFree(serialState->ldmState.hashTable, cMem);
|
498
|
+
serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_customMalloc(hashSize, cMem);
|
499
499
|
}
|
500
500
|
if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) {
|
501
|
-
|
502
|
-
serialState->ldmState.bucketOffsets = (BYTE*)
|
501
|
+
ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem);
|
502
|
+
serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_customMalloc(numBuckets, cMem);
|
503
503
|
}
|
504
504
|
if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets)
|
505
505
|
return 1;
|
506
506
|
/* Zero the tables */
|
507
|
-
|
508
|
-
|
507
|
+
ZSTD_memset(serialState->ldmState.hashTable, 0, hashSize);
|
508
|
+
ZSTD_memset(serialState->ldmState.bucketOffsets, 0, numBuckets);
|
509
|
+
|
510
|
+
/* Update window state and fill hash table with dict */
|
511
|
+
serialState->ldmState.loadedDictEnd = 0;
|
512
|
+
if (dictSize > 0) {
|
513
|
+
if (dictContentType == ZSTD_dct_rawContent) {
|
514
|
+
BYTE const* const dictEnd = (const BYTE*)dict + dictSize;
|
515
|
+
ZSTD_window_update(&serialState->ldmState.window, dict, dictSize, /* forceNonContiguous */ 0);
|
516
|
+
ZSTD_ldm_fillHashTable(&serialState->ldmState, (const BYTE*)dict, dictEnd, ¶ms.ldmParams);
|
517
|
+
serialState->ldmState.loadedDictEnd = params.forceWindow ? 0 : (U32)(dictEnd - serialState->ldmState.window.base);
|
518
|
+
} else {
|
519
|
+
/* don't even load anything */
|
520
|
+
}
|
521
|
+
}
|
522
|
+
|
523
|
+
/* Initialize serialState's copy of ldmWindow. */
|
524
|
+
serialState->ldmWindow = serialState->ldmState.window;
|
509
525
|
}
|
526
|
+
|
510
527
|
serialState->params = params;
|
511
528
|
serialState->params.jobSize = (U32)jobSize;
|
512
529
|
return 0;
|
@@ -515,7 +532,7 @@ static int ZSTDMT_serialState_reset(serialState_t* serialState, ZSTDMT_seqPool*
|
|
515
532
|
static int ZSTDMT_serialState_init(serialState_t* serialState)
|
516
533
|
{
|
517
534
|
int initError = 0;
|
518
|
-
|
535
|
+
ZSTD_memset(serialState, 0, sizeof(*serialState));
|
519
536
|
initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL);
|
520
537
|
initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL);
|
521
538
|
initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL);
|
@@ -530,8 +547,8 @@ static void ZSTDMT_serialState_free(serialState_t* serialState)
|
|
530
547
|
ZSTD_pthread_cond_destroy(&serialState->cond);
|
531
548
|
ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex);
|
532
549
|
ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond);
|
533
|
-
|
534
|
-
|
550
|
+
ZSTD_customFree(serialState->ldmState.hashTable, cMem);
|
551
|
+
ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem);
|
535
552
|
}
|
536
553
|
|
537
554
|
static void ZSTDMT_serialState_update(serialState_t* serialState,
|
@@ -552,7 +569,7 @@ static void ZSTDMT_serialState_update(serialState_t* serialState,
|
|
552
569
|
assert(seqStore.seq != NULL && seqStore.pos == 0 &&
|
553
570
|
seqStore.size == 0 && seqStore.capacity > 0);
|
554
571
|
assert(src.size <= serialState->params.jobSize);
|
555
|
-
ZSTD_window_update(&serialState->ldmState.window, src.start, src.size);
|
572
|
+
ZSTD_window_update(&serialState->ldmState.window, src.start, src.size, /* forceNonContiguous */ 0);
|
556
573
|
error = ZSTD_ldm_generateSequences(
|
557
574
|
&serialState->ldmState, &seqStore,
|
558
575
|
&serialState->params.ldmParams, src.start, src.size);
|
@@ -664,11 +681,13 @@ static void ZSTDMT_compressionJob(void* jobDescription)
|
|
664
681
|
if (job->jobID != 0) jobParams.fParams.checksumFlag = 0;
|
665
682
|
/* Don't run LDM for the chunks, since we handle it externally */
|
666
683
|
jobParams.ldmParams.enableLdm = 0;
|
684
|
+
/* Correct nbWorkers to 0. */
|
685
|
+
jobParams.nbWorkers = 0;
|
667
686
|
|
668
687
|
|
669
688
|
/* init */
|
670
689
|
if (job->cdict) {
|
671
|
-
size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, jobParams, job->fullFrameSize);
|
690
|
+
size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize);
|
672
691
|
assert(job->firstJob); /* only allowed for first job */
|
673
692
|
if (ZSTD_isError(initError)) JOB_ERROR(initError);
|
674
693
|
} else { /* srcStart points at reloaded section */
|
@@ -676,11 +695,15 @@ static void ZSTDMT_compressionJob(void* jobDescription)
|
|
676
695
|
{ size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);
|
677
696
|
if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError);
|
678
697
|
}
|
698
|
+
if (!job->firstJob) {
|
699
|
+
size_t const err = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_deterministicRefPrefix, 0);
|
700
|
+
if (ZSTD_isError(err)) JOB_ERROR(err);
|
701
|
+
}
|
679
702
|
{ size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
|
680
703
|
job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
|
681
704
|
ZSTD_dtlm_fast,
|
682
705
|
NULL, /*cdict*/
|
683
|
-
jobParams, pledgedSrcSize);
|
706
|
+
&jobParams, pledgedSrcSize);
|
684
707
|
if (ZSTD_isError(initError)) JOB_ERROR(initError);
|
685
708
|
} }
|
686
709
|
|
@@ -731,6 +754,13 @@ static void ZSTDMT_compressionJob(void* jobDescription)
|
|
731
754
|
if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
|
732
755
|
lastCBlockSize = cSize;
|
733
756
|
} }
|
757
|
+
if (!job->firstJob) {
|
758
|
+
/* Double check that we don't have an ext-dict, because then our
|
759
|
+
* repcode invalidation doesn't work.
|
760
|
+
*/
|
761
|
+
assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
|
762
|
+
}
|
763
|
+
ZSTD_CCtx_trace(cctx, 0);
|
734
764
|
|
735
765
|
_endJob:
|
736
766
|
ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize);
|
@@ -798,7 +828,6 @@ struct ZSTDMT_CCtx_s {
|
|
798
828
|
roundBuff_t roundBuff;
|
799
829
|
serialState_t serial;
|
800
830
|
rsyncState_t rsync;
|
801
|
-
unsigned singleBlockingThread;
|
802
831
|
unsigned jobIDMask;
|
803
832
|
unsigned doneJobID;
|
804
833
|
unsigned nextJobID;
|
@@ -810,6 +839,7 @@ struct ZSTDMT_CCtx_s {
|
|
810
839
|
ZSTD_customMem cMem;
|
811
840
|
ZSTD_CDict* cdictLocal;
|
812
841
|
const ZSTD_CDict* cdict;
|
842
|
+
unsigned providedFactory: 1;
|
813
843
|
};
|
814
844
|
|
815
845
|
static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem)
|
@@ -820,7 +850,7 @@ static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZS
|
|
820
850
|
ZSTD_pthread_mutex_destroy(&jobTable[jobNb].job_mutex);
|
821
851
|
ZSTD_pthread_cond_destroy(&jobTable[jobNb].job_cond);
|
822
852
|
}
|
823
|
-
|
853
|
+
ZSTD_customFree(jobTable, cMem);
|
824
854
|
}
|
825
855
|
|
826
856
|
/* ZSTDMT_allocJobsTable()
|
@@ -832,7 +862,7 @@ static ZSTDMT_jobDescription* ZSTDMT_createJobsTable(U32* nbJobsPtr, ZSTD_custom
|
|
832
862
|
U32 const nbJobs = 1 << nbJobsLog2;
|
833
863
|
U32 jobNb;
|
834
864
|
ZSTDMT_jobDescription* const jobTable = (ZSTDMT_jobDescription*)
|
835
|
-
|
865
|
+
ZSTD_customCalloc(nbJobs * sizeof(ZSTDMT_jobDescription), cMem);
|
836
866
|
int initError = 0;
|
837
867
|
if (jobTable==NULL) return NULL;
|
838
868
|
*nbJobsPtr = nbJobs;
|
@@ -863,12 +893,12 @@ static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) {
|
|
863
893
|
|
864
894
|
/* ZSTDMT_CCtxParam_setNbWorkers():
|
865
895
|
* Internal use only */
|
866
|
-
size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
|
896
|
+
static size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
|
867
897
|
{
|
868
898
|
return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers);
|
869
899
|
}
|
870
900
|
|
871
|
-
MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem)
|
901
|
+
MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool)
|
872
902
|
{
|
873
903
|
ZSTDMT_CCtx* mtctx;
|
874
904
|
U32 nbJobs = nbWorkers + 2;
|
@@ -881,12 +911,19 @@ MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers,
|
|
881
911
|
/* invalid custom allocator */
|
882
912
|
return NULL;
|
883
913
|
|
884
|
-
mtctx = (ZSTDMT_CCtx*)
|
914
|
+
mtctx = (ZSTDMT_CCtx*) ZSTD_customCalloc(sizeof(ZSTDMT_CCtx), cMem);
|
885
915
|
if (!mtctx) return NULL;
|
886
916
|
ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
|
887
917
|
mtctx->cMem = cMem;
|
888
918
|
mtctx->allJobsCompleted = 1;
|
889
|
-
|
919
|
+
if (pool != NULL) {
|
920
|
+
mtctx->factory = pool;
|
921
|
+
mtctx->providedFactory = 1;
|
922
|
+
}
|
923
|
+
else {
|
924
|
+
mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem);
|
925
|
+
mtctx->providedFactory = 0;
|
926
|
+
}
|
890
927
|
mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem);
|
891
928
|
assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0); /* ensure nbJobs is a power of 2 */
|
892
929
|
mtctx->jobIDMask = nbJobs - 1;
|
@@ -903,22 +940,18 @@ MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers,
|
|
903
940
|
return mtctx;
|
904
941
|
}
|
905
942
|
|
906
|
-
ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem)
|
943
|
+
ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool)
|
907
944
|
{
|
908
945
|
#ifdef ZSTD_MULTITHREAD
|
909
|
-
return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem);
|
946
|
+
return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem, pool);
|
910
947
|
#else
|
911
948
|
(void)nbWorkers;
|
912
949
|
(void)cMem;
|
950
|
+
(void)pool;
|
913
951
|
return NULL;
|
914
952
|
#endif
|
915
953
|
}
|
916
954
|
|
917
|
-
ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers)
|
918
|
-
{
|
919
|
-
return ZSTDMT_createCCtx_advanced(nbWorkers, ZSTD_defaultCMem);
|
920
|
-
}
|
921
|
-
|
922
955
|
|
923
956
|
/* ZSTDMT_releaseAllJobResources() :
|
924
957
|
* note : ensure all workers are killed first ! */
|
@@ -927,12 +960,18 @@ static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
|
|
927
960
|
unsigned jobID;
|
928
961
|
DEBUGLOG(3, "ZSTDMT_releaseAllJobResources");
|
929
962
|
for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) {
|
963
|
+
/* Copy the mutex/cond out */
|
964
|
+
ZSTD_pthread_mutex_t const mutex = mtctx->jobs[jobID].job_mutex;
|
965
|
+
ZSTD_pthread_cond_t const cond = mtctx->jobs[jobID].job_cond;
|
966
|
+
|
930
967
|
DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start);
|
931
968
|
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
|
932
|
-
|
933
|
-
|
969
|
+
|
970
|
+
/* Clear the job description, but keep the mutex/cond */
|
971
|
+
ZSTD_memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID]));
|
972
|
+
mtctx->jobs[jobID].job_mutex = mutex;
|
973
|
+
mtctx->jobs[jobID].job_cond = cond;
|
934
974
|
}
|
935
|
-
memset(mtctx->jobs, 0, (mtctx->jobIDMask+1)*sizeof(ZSTDMT_jobDescription));
|
936
975
|
mtctx->inBuff.buffer = g_nullBuffer;
|
937
976
|
mtctx->inBuff.filled = 0;
|
938
977
|
mtctx->allJobsCompleted = 1;
|
@@ -956,7 +995,8 @@ static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx)
|
|
956
995
|
size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
|
957
996
|
{
|
958
997
|
if (mtctx==NULL) return 0; /* compatible with free on NULL */
|
959
|
-
|
998
|
+
if (!mtctx->providedFactory)
|
999
|
+
POOL_free(mtctx->factory); /* stop and free worker threads */
|
960
1000
|
ZSTDMT_releaseAllJobResources(mtctx); /* release job resources into pools first */
|
961
1001
|
ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
|
962
1002
|
ZSTDMT_freeBufferPool(mtctx->bufPool);
|
@@ -965,8 +1005,8 @@ size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
|
|
965
1005
|
ZSTDMT_serialState_free(&mtctx->serial);
|
966
1006
|
ZSTD_freeCDict(mtctx->cdictLocal);
|
967
1007
|
if (mtctx->roundBuff.buffer)
|
968
|
-
|
969
|
-
|
1008
|
+
ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem);
|
1009
|
+
ZSTD_customFree(mtctx, mtctx->cMem);
|
970
1010
|
return 0;
|
971
1011
|
}
|
972
1012
|
|
@@ -983,72 +1023,13 @@ size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx)
|
|
983
1023
|
+ mtctx->roundBuff.capacity;
|
984
1024
|
}
|
985
1025
|
|
986
|
-
/* Internal only */
|
987
|
-
size_t
|
988
|
-
ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params,
|
989
|
-
ZSTDMT_parameter parameter,
|
990
|
-
int value)
|
991
|
-
{
|
992
|
-
DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter");
|
993
|
-
switch(parameter)
|
994
|
-
{
|
995
|
-
case ZSTDMT_p_jobSize :
|
996
|
-
DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter : set jobSize to %i", value);
|
997
|
-
return ZSTD_CCtxParams_setParameter(params, ZSTD_c_jobSize, value);
|
998
|
-
case ZSTDMT_p_overlapLog :
|
999
|
-
DEBUGLOG(4, "ZSTDMT_p_overlapLog : %i", value);
|
1000
|
-
return ZSTD_CCtxParams_setParameter(params, ZSTD_c_overlapLog, value);
|
1001
|
-
case ZSTDMT_p_rsyncable :
|
1002
|
-
DEBUGLOG(4, "ZSTD_p_rsyncable : %i", value);
|
1003
|
-
return ZSTD_CCtxParams_setParameter(params, ZSTD_c_rsyncable, value);
|
1004
|
-
default :
|
1005
|
-
return ERROR(parameter_unsupported);
|
1006
|
-
}
|
1007
|
-
}
|
1008
|
-
|
1009
|
-
size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value)
|
1010
|
-
{
|
1011
|
-
DEBUGLOG(4, "ZSTDMT_setMTCtxParameter");
|
1012
|
-
return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value);
|
1013
|
-
}
|
1014
|
-
|
1015
|
-
size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value)
|
1016
|
-
{
|
1017
|
-
switch (parameter) {
|
1018
|
-
case ZSTDMT_p_jobSize:
|
1019
|
-
return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_jobSize, value);
|
1020
|
-
case ZSTDMT_p_overlapLog:
|
1021
|
-
return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_overlapLog, value);
|
1022
|
-
case ZSTDMT_p_rsyncable:
|
1023
|
-
return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_rsyncable, value);
|
1024
|
-
default:
|
1025
|
-
return ERROR(parameter_unsupported);
|
1026
|
-
}
|
1027
|
-
}
|
1028
|
-
|
1029
|
-
/* Sets parameters relevant to the compression job,
|
1030
|
-
* initializing others to default values. */
|
1031
|
-
static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
|
1032
|
-
{
|
1033
|
-
ZSTD_CCtx_params jobParams = params;
|
1034
|
-
/* Clear parameters related to multithreading */
|
1035
|
-
jobParams.forceWindow = 0;
|
1036
|
-
jobParams.nbWorkers = 0;
|
1037
|
-
jobParams.jobSize = 0;
|
1038
|
-
jobParams.overlapLog = 0;
|
1039
|
-
jobParams.rsyncable = 0;
|
1040
|
-
memset(&jobParams.ldmParams, 0, sizeof(ldmParams_t));
|
1041
|
-
memset(&jobParams.customMem, 0, sizeof(ZSTD_customMem));
|
1042
|
-
return jobParams;
|
1043
|
-
}
|
1044
|
-
|
1045
1026
|
|
1046
1027
|
/* ZSTDMT_resize() :
|
1047
1028
|
* @return : error code if fails, 0 on success */
|
1048
1029
|
static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
|
1049
1030
|
{
|
1050
1031
|
if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation);
|
1051
|
-
FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) );
|
1032
|
+
FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) , "");
|
1052
1033
|
mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers);
|
1053
1034
|
if (mtctx->bufPool == NULL) return ERROR(memory_allocation);
|
1054
1035
|
mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers);
|
@@ -1070,7 +1051,7 @@ void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_p
|
|
1070
1051
|
DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)",
|
1071
1052
|
compressionLevel);
|
1072
1053
|
mtctx->params.compressionLevel = compressionLevel;
|
1073
|
-
{ ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, 0,
|
1054
|
+
{ ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
|
1074
1055
|
cParams.windowLog = saved_wlog;
|
1075
1056
|
mtctx->params.cParams = cParams;
|
1076
1057
|
}
|
@@ -1151,16 +1132,16 @@ size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx)
|
|
1151
1132
|
/* ===== Multi-threaded compression ===== */
|
1152
1133
|
/* ------------------------------------------ */
|
1153
1134
|
|
1154
|
-
static unsigned ZSTDMT_computeTargetJobLog(ZSTD_CCtx_params
|
1135
|
+
static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params)
|
1155
1136
|
{
|
1156
1137
|
unsigned jobLog;
|
1157
|
-
if (params
|
1138
|
+
if (params->ldmParams.enableLdm) {
|
1158
1139
|
/* In Long Range Mode, the windowLog is typically oversized.
|
1159
1140
|
* In which case, it's preferable to determine the jobSize
|
1160
|
-
* based on
|
1161
|
-
jobLog = MAX(21, params
|
1141
|
+
* based on cycleLog instead. */
|
1142
|
+
jobLog = MAX(21, ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy) + 3);
|
1162
1143
|
} else {
|
1163
|
-
jobLog = MAX(20, params
|
1144
|
+
jobLog = MAX(20, params->cParams.windowLog + 2);
|
1164
1145
|
}
|
1165
1146
|
return MIN(jobLog, (unsigned)ZSTDMT_JOBLOG_MAX);
|
1166
1147
|
}
|
@@ -1193,191 +1174,25 @@ static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat)
|
|
1193
1174
|
return ovlog;
|
1194
1175
|
}
|
1195
1176
|
|
1196
|
-
static size_t ZSTDMT_computeOverlapSize(ZSTD_CCtx_params
|
1177
|
+
static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params)
|
1197
1178
|
{
|
1198
|
-
int const overlapRLog = 9 - ZSTDMT_overlapLog(params
|
1199
|
-
int ovLog = (overlapRLog >= 8) ? 0 : (params
|
1179
|
+
int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy);
|
1180
|
+
int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog);
|
1200
1181
|
assert(0 <= overlapRLog && overlapRLog <= 8);
|
1201
|
-
if (params
|
1182
|
+
if (params->ldmParams.enableLdm) {
|
1202
1183
|
/* In Long Range Mode, the windowLog is typically oversized.
|
1203
1184
|
* In which case, it's preferable to determine the jobSize
|
1204
1185
|
* based on chainLog instead.
|
1205
1186
|
* Then, ovLog becomes a fraction of the jobSize, rather than windowSize */
|
1206
|
-
ovLog = MIN(params
|
1187
|
+
ovLog = MIN(params->cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)
|
1207
1188
|
- overlapRLog;
|
1208
1189
|
}
|
1209
1190
|
assert(0 <= ovLog && ovLog <= ZSTD_WINDOWLOG_MAX);
|
1210
|
-
DEBUGLOG(4, "overlapLog : %i", params
|
1191
|
+
DEBUGLOG(4, "overlapLog : %i", params->overlapLog);
|
1211
1192
|
DEBUGLOG(4, "overlap size : %i", 1 << ovLog);
|
1212
1193
|
return (ovLog==0) ? 0 : (size_t)1 << ovLog;
|
1213
1194
|
}
|
1214
1195
|
|
1215
|
-
static unsigned
|
1216
|
-
ZSTDMT_computeNbJobs(ZSTD_CCtx_params params, size_t srcSize, unsigned nbWorkers)
|
1217
|
-
{
|
1218
|
-
assert(nbWorkers>0);
|
1219
|
-
{ size_t const jobSizeTarget = (size_t)1 << ZSTDMT_computeTargetJobLog(params);
|
1220
|
-
size_t const jobMaxSize = jobSizeTarget << 2;
|
1221
|
-
size_t const passSizeMax = jobMaxSize * nbWorkers;
|
1222
|
-
unsigned const multiplier = (unsigned)(srcSize / passSizeMax) + 1;
|
1223
|
-
unsigned const nbJobsLarge = multiplier * nbWorkers;
|
1224
|
-
unsigned const nbJobsMax = (unsigned)(srcSize / jobSizeTarget) + 1;
|
1225
|
-
unsigned const nbJobsSmall = MIN(nbJobsMax, nbWorkers);
|
1226
|
-
return (multiplier>1) ? nbJobsLarge : nbJobsSmall;
|
1227
|
-
} }
|
1228
|
-
|
1229
|
-
/* ZSTDMT_compress_advanced_internal() :
|
1230
|
-
* This is a blocking function : it will only give back control to caller after finishing its compression job.
|
1231
|
-
*/
|
1232
|
-
static size_t ZSTDMT_compress_advanced_internal(
|
1233
|
-
ZSTDMT_CCtx* mtctx,
|
1234
|
-
void* dst, size_t dstCapacity,
|
1235
|
-
const void* src, size_t srcSize,
|
1236
|
-
const ZSTD_CDict* cdict,
|
1237
|
-
ZSTD_CCtx_params params)
|
1238
|
-
{
|
1239
|
-
ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(params);
|
1240
|
-
size_t const overlapSize = ZSTDMT_computeOverlapSize(params);
|
1241
|
-
unsigned const nbJobs = ZSTDMT_computeNbJobs(params, srcSize, params.nbWorkers);
|
1242
|
-
size_t const proposedJobSize = (srcSize + (nbJobs-1)) / nbJobs;
|
1243
|
-
size_t const avgJobSize = (((proposedJobSize-1) & 0x1FFFF) < 0x7FFF) ? proposedJobSize + 0xFFFF : proposedJobSize; /* avoid too small last block */
|
1244
|
-
const char* const srcStart = (const char*)src;
|
1245
|
-
size_t remainingSrcSize = srcSize;
|
1246
|
-
unsigned const compressWithinDst = (dstCapacity >= ZSTD_compressBound(srcSize)) ? nbJobs : (unsigned)(dstCapacity / ZSTD_compressBound(avgJobSize)); /* presumes avgJobSize >= 256 KB, which should be the case */
|
1247
|
-
size_t frameStartPos = 0, dstBufferPos = 0;
|
1248
|
-
assert(jobParams.nbWorkers == 0);
|
1249
|
-
assert(mtctx->cctxPool->totalCCtx == params.nbWorkers);
|
1250
|
-
|
1251
|
-
params.jobSize = (U32)avgJobSize;
|
1252
|
-
DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: nbJobs=%2u (rawSize=%u bytes; fixedSize=%u) ",
|
1253
|
-
nbJobs, (U32)proposedJobSize, (U32)avgJobSize);
|
1254
|
-
|
1255
|
-
if ((nbJobs==1) | (params.nbWorkers<=1)) { /* fallback to single-thread mode : this is a blocking invocation anyway */
|
1256
|
-
ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0];
|
1257
|
-
DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: fallback to single-thread mode");
|
1258
|
-
if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, jobParams.fParams);
|
1259
|
-
return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, jobParams);
|
1260
|
-
}
|
1261
|
-
|
1262
|
-
assert(avgJobSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), required to compress directly into Dst (no additional buffer) */
|
1263
|
-
ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgJobSize) );
|
1264
|
-
if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, avgJobSize))
|
1265
|
-
return ERROR(memory_allocation);
|
1266
|
-
|
1267
|
-
FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbJobs) ); /* only expands if necessary */
|
1268
|
-
|
1269
|
-
{ unsigned u;
|
1270
|
-
for (u=0; u<nbJobs; u++) {
|
1271
|
-
size_t const jobSize = MIN(remainingSrcSize, avgJobSize);
|
1272
|
-
size_t const dstBufferCapacity = ZSTD_compressBound(jobSize);
|
1273
|
-
buffer_t const dstAsBuffer = { (char*)dst + dstBufferPos, dstBufferCapacity };
|
1274
|
-
buffer_t const dstBuffer = u < compressWithinDst ? dstAsBuffer : g_nullBuffer;
|
1275
|
-
size_t dictSize = u ? overlapSize : 0;
|
1276
|
-
|
1277
|
-
mtctx->jobs[u].prefix.start = srcStart + frameStartPos - dictSize;
|
1278
|
-
mtctx->jobs[u].prefix.size = dictSize;
|
1279
|
-
mtctx->jobs[u].src.start = srcStart + frameStartPos;
|
1280
|
-
mtctx->jobs[u].src.size = jobSize; assert(jobSize > 0); /* avoid job.src.size == 0 */
|
1281
|
-
mtctx->jobs[u].consumed = 0;
|
1282
|
-
mtctx->jobs[u].cSize = 0;
|
1283
|
-
mtctx->jobs[u].cdict = (u==0) ? cdict : NULL;
|
1284
|
-
mtctx->jobs[u].fullFrameSize = srcSize;
|
1285
|
-
mtctx->jobs[u].params = jobParams;
|
1286
|
-
/* do not calculate checksum within sections, but write it in header for first section */
|
1287
|
-
mtctx->jobs[u].dstBuff = dstBuffer;
|
1288
|
-
mtctx->jobs[u].cctxPool = mtctx->cctxPool;
|
1289
|
-
mtctx->jobs[u].bufPool = mtctx->bufPool;
|
1290
|
-
mtctx->jobs[u].seqPool = mtctx->seqPool;
|
1291
|
-
mtctx->jobs[u].serial = &mtctx->serial;
|
1292
|
-
mtctx->jobs[u].jobID = u;
|
1293
|
-
mtctx->jobs[u].firstJob = (u==0);
|
1294
|
-
mtctx->jobs[u].lastJob = (u==nbJobs-1);
|
1295
|
-
|
1296
|
-
DEBUGLOG(5, "ZSTDMT_compress_advanced_internal: posting job %u (%u bytes)", u, (U32)jobSize);
|
1297
|
-
DEBUG_PRINTHEX(6, mtctx->jobs[u].prefix.start, 12);
|
1298
|
-
POOL_add(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[u]);
|
1299
|
-
|
1300
|
-
frameStartPos += jobSize;
|
1301
|
-
dstBufferPos += dstBufferCapacity;
|
1302
|
-
remainingSrcSize -= jobSize;
|
1303
|
-
} }
|
1304
|
-
|
1305
|
-
/* collect result */
|
1306
|
-
{ size_t error = 0, dstPos = 0;
|
1307
|
-
unsigned jobID;
|
1308
|
-
for (jobID=0; jobID<nbJobs; jobID++) {
|
1309
|
-
DEBUGLOG(5, "waiting for job %u ", jobID);
|
1310
|
-
ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
|
1311
|
-
while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
|
1312
|
-
DEBUGLOG(5, "waiting for jobCompleted signal from job %u", jobID);
|
1313
|
-
ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
|
1314
|
-
}
|
1315
|
-
ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
|
1316
|
-
DEBUGLOG(5, "ready to write job %u ", jobID);
|
1317
|
-
|
1318
|
-
{ size_t const cSize = mtctx->jobs[jobID].cSize;
|
1319
|
-
if (ZSTD_isError(cSize)) error = cSize;
|
1320
|
-
if ((!error) && (dstPos + cSize > dstCapacity)) error = ERROR(dstSize_tooSmall);
|
1321
|
-
if (jobID) { /* note : job 0 is written directly at dst, which is correct position */
|
1322
|
-
if (!error)
|
1323
|
-
memmove((char*)dst + dstPos, mtctx->jobs[jobID].dstBuff.start, cSize); /* may overlap when job compressed within dst */
|
1324
|
-
if (jobID >= compressWithinDst) { /* job compressed into its own buffer, which must be released */
|
1325
|
-
DEBUGLOG(5, "releasing buffer %u>=%u", jobID, compressWithinDst);
|
1326
|
-
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
|
1327
|
-
} }
|
1328
|
-
mtctx->jobs[jobID].dstBuff = g_nullBuffer;
|
1329
|
-
mtctx->jobs[jobID].cSize = 0;
|
1330
|
-
dstPos += cSize ;
|
1331
|
-
}
|
1332
|
-
} /* for (jobID=0; jobID<nbJobs; jobID++) */
|
1333
|
-
|
1334
|
-
DEBUGLOG(4, "checksumFlag : %u ", params.fParams.checksumFlag);
|
1335
|
-
if (params.fParams.checksumFlag) {
|
1336
|
-
U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState);
|
1337
|
-
if (dstPos + 4 > dstCapacity) {
|
1338
|
-
error = ERROR(dstSize_tooSmall);
|
1339
|
-
} else {
|
1340
|
-
DEBUGLOG(4, "writing checksum : %08X \n", checksum);
|
1341
|
-
MEM_writeLE32((char*)dst + dstPos, checksum);
|
1342
|
-
dstPos += 4;
|
1343
|
-
} }
|
1344
|
-
|
1345
|
-
if (!error) DEBUGLOG(4, "compressed size : %u ", (U32)dstPos);
|
1346
|
-
return error ? error : dstPos;
|
1347
|
-
}
|
1348
|
-
}
|
1349
|
-
|
1350
|
-
size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
|
1351
|
-
void* dst, size_t dstCapacity,
|
1352
|
-
const void* src, size_t srcSize,
|
1353
|
-
const ZSTD_CDict* cdict,
|
1354
|
-
ZSTD_parameters params,
|
1355
|
-
int overlapLog)
|
1356
|
-
{
|
1357
|
-
ZSTD_CCtx_params cctxParams = mtctx->params;
|
1358
|
-
cctxParams.cParams = params.cParams;
|
1359
|
-
cctxParams.fParams = params.fParams;
|
1360
|
-
assert(ZSTD_OVERLAPLOG_MIN <= overlapLog && overlapLog <= ZSTD_OVERLAPLOG_MAX);
|
1361
|
-
cctxParams.overlapLog = overlapLog;
|
1362
|
-
return ZSTDMT_compress_advanced_internal(mtctx,
|
1363
|
-
dst, dstCapacity,
|
1364
|
-
src, srcSize,
|
1365
|
-
cdict, cctxParams);
|
1366
|
-
}
|
1367
|
-
|
1368
|
-
|
1369
|
-
size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
|
1370
|
-
void* dst, size_t dstCapacity,
|
1371
|
-
const void* src, size_t srcSize,
|
1372
|
-
int compressionLevel)
|
1373
|
-
{
|
1374
|
-
ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0);
|
1375
|
-
int const overlapLog = ZSTDMT_overlapLog_default(params.cParams.strategy);
|
1376
|
-
params.fParams.contentSizeFlag = 1;
|
1377
|
-
return ZSTDMT_compress_advanced(mtctx, dst, dstCapacity, src, srcSize, NULL, params, overlapLog);
|
1378
|
-
}
|
1379
|
-
|
1380
|
-
|
1381
1196
|
/* ====================================== */
|
1382
1197
|
/* ======= Streaming API ======= */
|
1383
1198
|
/* ====================================== */
|
@@ -1397,21 +1212,11 @@ size_t ZSTDMT_initCStream_internal(
|
|
1397
1212
|
|
1398
1213
|
/* init */
|
1399
1214
|
if (params.nbWorkers != mtctx->params.nbWorkers)
|
1400
|
-
FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) );
|
1215
|
+
FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) , "");
|
1401
1216
|
|
1402
1217
|
if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;
|
1403
1218
|
if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = (size_t)ZSTDMT_JOBSIZE_MAX;
|
1404
1219
|
|
1405
|
-
mtctx->singleBlockingThread = (pledgedSrcSize <= ZSTDMT_JOBSIZE_MIN); /* do not trigger multi-threading when srcSize is too small */
|
1406
|
-
if (mtctx->singleBlockingThread) {
|
1407
|
-
ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams(params);
|
1408
|
-
DEBUGLOG(5, "ZSTDMT_initCStream_internal: switch to single blocking thread mode");
|
1409
|
-
assert(singleThreadParams.nbWorkers == 0);
|
1410
|
-
return ZSTD_initCStream_internal(mtctx->cctxPool->cctx[0],
|
1411
|
-
dict, dictSize, cdict,
|
1412
|
-
singleThreadParams, pledgedSrcSize);
|
1413
|
-
}
|
1414
|
-
|
1415
1220
|
DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers);
|
1416
1221
|
|
1417
1222
|
if (mtctx->allJobsCompleted == 0) { /* previous compression not correctly finished */
|
@@ -1435,19 +1240,18 @@ size_t ZSTDMT_initCStream_internal(
|
|
1435
1240
|
mtctx->cdict = cdict;
|
1436
1241
|
}
|
1437
1242
|
|
1438
|
-
mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(params);
|
1243
|
+
mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(¶ms);
|
1439
1244
|
DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10));
|
1440
1245
|
mtctx->targetSectionSize = params.jobSize;
|
1441
1246
|
if (mtctx->targetSectionSize == 0) {
|
1442
|
-
mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(params);
|
1247
|
+
mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(¶ms);
|
1443
1248
|
}
|
1444
1249
|
assert(mtctx->targetSectionSize <= (size_t)ZSTDMT_JOBSIZE_MAX);
|
1445
1250
|
|
1446
1251
|
if (params.rsyncable) {
|
1447
1252
|
/* Aim for the targetsectionSize as the average job size. */
|
1448
|
-
U32 const
|
1449
|
-
U32 const rsyncBits = ZSTD_highbit32(
|
1450
|
-
assert(jobSizeMB >= 1);
|
1253
|
+
U32 const jobSizeKB = (U32)(mtctx->targetSectionSize >> 10);
|
1254
|
+
U32 const rsyncBits = (assert(jobSizeKB >= 1), ZSTD_highbit32(jobSizeKB) + 10);
|
1451
1255
|
DEBUGLOG(4, "rsyncLog = %u", rsyncBits);
|
1452
1256
|
mtctx->rsync.hash = 0;
|
1453
1257
|
mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1;
|
@@ -1474,8 +1278,8 @@ size_t ZSTDMT_initCStream_internal(
|
|
1474
1278
|
size_t const capacity = MAX(windowSize, sectionsSize) + slackSize;
|
1475
1279
|
if (mtctx->roundBuff.capacity < capacity) {
|
1476
1280
|
if (mtctx->roundBuff.buffer)
|
1477
|
-
|
1478
|
-
mtctx->roundBuff.buffer = (BYTE*)
|
1281
|
+
ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem);
|
1282
|
+
mtctx->roundBuff.buffer = (BYTE*)ZSTD_customMalloc(capacity, mtctx->cMem);
|
1479
1283
|
if (mtctx->roundBuff.buffer == NULL) {
|
1480
1284
|
mtctx->roundBuff.capacity = 0;
|
1481
1285
|
return ERROR(memory_allocation);
|
@@ -1494,58 +1298,12 @@ size_t ZSTDMT_initCStream_internal(
|
|
1494
1298
|
mtctx->allJobsCompleted = 0;
|
1495
1299
|
mtctx->consumed = 0;
|
1496
1300
|
mtctx->produced = 0;
|
1497
|
-
if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize
|
1301
|
+
if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize,
|
1302
|
+
dict, dictSize, dictContentType))
|
1498
1303
|
return ERROR(memory_allocation);
|
1499
1304
|
return 0;
|
1500
1305
|
}
|
1501
1306
|
|
1502
|
-
size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
|
1503
|
-
const void* dict, size_t dictSize,
|
1504
|
-
ZSTD_parameters params,
|
1505
|
-
unsigned long long pledgedSrcSize)
|
1506
|
-
{
|
1507
|
-
ZSTD_CCtx_params cctxParams = mtctx->params; /* retrieve sticky params */
|
1508
|
-
DEBUGLOG(4, "ZSTDMT_initCStream_advanced (pledgedSrcSize=%u)", (U32)pledgedSrcSize);
|
1509
|
-
cctxParams.cParams = params.cParams;
|
1510
|
-
cctxParams.fParams = params.fParams;
|
1511
|
-
return ZSTDMT_initCStream_internal(mtctx, dict, dictSize, ZSTD_dct_auto, NULL,
|
1512
|
-
cctxParams, pledgedSrcSize);
|
1513
|
-
}
|
1514
|
-
|
1515
|
-
size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
|
1516
|
-
const ZSTD_CDict* cdict,
|
1517
|
-
ZSTD_frameParameters fParams,
|
1518
|
-
unsigned long long pledgedSrcSize)
|
1519
|
-
{
|
1520
|
-
ZSTD_CCtx_params cctxParams = mtctx->params;
|
1521
|
-
if (cdict==NULL) return ERROR(dictionary_wrong); /* method incompatible with NULL cdict */
|
1522
|
-
cctxParams.cParams = ZSTD_getCParamsFromCDict(cdict);
|
1523
|
-
cctxParams.fParams = fParams;
|
1524
|
-
return ZSTDMT_initCStream_internal(mtctx, NULL, 0 /*dictSize*/, ZSTD_dct_auto, cdict,
|
1525
|
-
cctxParams, pledgedSrcSize);
|
1526
|
-
}
|
1527
|
-
|
1528
|
-
|
1529
|
-
/* ZSTDMT_resetCStream() :
|
1530
|
-
* pledgedSrcSize can be zero == unknown (for the time being)
|
1531
|
-
* prefer using ZSTD_CONTENTSIZE_UNKNOWN,
|
1532
|
-
* as `0` might mean "empty" in the future */
|
1533
|
-
size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize)
|
1534
|
-
{
|
1535
|
-
if (!pledgedSrcSize) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
|
1536
|
-
return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, 0, mtctx->params,
|
1537
|
-
pledgedSrcSize);
|
1538
|
-
}
|
1539
|
-
|
1540
|
-
size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel) {
|
1541
|
-
ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0);
|
1542
|
-
ZSTD_CCtx_params cctxParams = mtctx->params; /* retrieve sticky params */
|
1543
|
-
DEBUGLOG(4, "ZSTDMT_initCStream (cLevel=%i)", compressionLevel);
|
1544
|
-
cctxParams.cParams = params.cParams;
|
1545
|
-
cctxParams.fParams = params.fParams;
|
1546
|
-
return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN);
|
1547
|
-
}
|
1548
|
-
|
1549
1307
|
|
1550
1308
|
/* ZSTDMT_writeLastEmptyBlock()
|
1551
1309
|
* Write a single empty block with an end-of-frame to finish a frame.
|
@@ -1708,9 +1466,11 @@ static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, u
|
|
1708
1466
|
assert(mtctx->doneJobID < mtctx->nextJobID);
|
1709
1467
|
assert(cSize >= mtctx->jobs[wJobID].dstFlushed);
|
1710
1468
|
assert(mtctx->jobs[wJobID].dstBuff.start != NULL);
|
1711
|
-
|
1712
|
-
|
1713
|
-
|
1469
|
+
if (toFlush > 0) {
|
1470
|
+
ZSTD_memcpy((char*)output->dst + output->pos,
|
1471
|
+
(const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed,
|
1472
|
+
toFlush);
|
1473
|
+
}
|
1714
1474
|
output->pos += toFlush;
|
1715
1475
|
mtctx->jobs[wJobID].dstFlushed += toFlush; /* can write : this value is only used by mtctx */
|
1716
1476
|
|
@@ -1780,7 +1540,7 @@ static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range)
|
|
1780
1540
|
BYTE const* const bufferStart = (BYTE const*)buffer.start;
|
1781
1541
|
BYTE const* const bufferEnd = bufferStart + buffer.capacity;
|
1782
1542
|
BYTE const* const rangeStart = (BYTE const*)range.start;
|
1783
|
-
BYTE const* const rangeEnd = rangeStart + range.size;
|
1543
|
+
BYTE const* const rangeEnd = range.size != 0 ? rangeStart + range.size : rangeStart;
|
1784
1544
|
|
1785
1545
|
if (rangeStart == NULL || bufferStart == NULL)
|
1786
1546
|
return 0;
|
@@ -1861,7 +1621,7 @@ static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)
|
|
1861
1621
|
return 0;
|
1862
1622
|
}
|
1863
1623
|
ZSTDMT_waitForLdmComplete(mtctx, buffer);
|
1864
|
-
|
1624
|
+
ZSTD_memmove(start, mtctx->inBuff.prefix.start, prefixSize);
|
1865
1625
|
mtctx->inBuff.prefix.start = start;
|
1866
1626
|
mtctx->roundBuff.pos = prefixSize;
|
1867
1627
|
}
|
@@ -1935,6 +1695,16 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)
|
|
1935
1695
|
pos = 0;
|
1936
1696
|
prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
|
1937
1697
|
hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
|
1698
|
+
if ((hash & hitMask) == hitMask) {
|
1699
|
+
/* We're already at a sync point so don't load any more until
|
1700
|
+
* we're able to flush this sync point.
|
1701
|
+
* This likely happened because the job table was full so we
|
1702
|
+
* couldn't add our job.
|
1703
|
+
*/
|
1704
|
+
syncPoint.toLoad = 0;
|
1705
|
+
syncPoint.flush = 1;
|
1706
|
+
return syncPoint;
|
1707
|
+
}
|
1938
1708
|
} else {
|
1939
1709
|
/* We don't have enough bytes buffered to initialize the hash, but
|
1940
1710
|
* we know we have at least RSYNC_LENGTH bytes total.
|
@@ -1989,34 +1759,11 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
|
1989
1759
|
assert(output->pos <= output->size);
|
1990
1760
|
assert(input->pos <= input->size);
|
1991
1761
|
|
1992
|
-
if (mtctx->singleBlockingThread) { /* delegate to single-thread (synchronous) */
|
1993
|
-
return ZSTD_compressStream2(mtctx->cctxPool->cctx[0], output, input, endOp);
|
1994
|
-
}
|
1995
|
-
|
1996
1762
|
if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
|
1997
1763
|
/* current frame being ended. Only flush/end are allowed */
|
1998
1764
|
return ERROR(stage_wrong);
|
1999
1765
|
}
|
2000
1766
|
|
2001
|
-
/* single-pass shortcut (note : synchronous-mode) */
|
2002
|
-
if ( (!mtctx->params.rsyncable) /* rsyncable mode is disabled */
|
2003
|
-
&& (mtctx->nextJobID == 0) /* just started */
|
2004
|
-
&& (mtctx->inBuff.filled == 0) /* nothing buffered */
|
2005
|
-
&& (!mtctx->jobReady) /* no job already created */
|
2006
|
-
&& (endOp == ZSTD_e_end) /* end order */
|
2007
|
-
&& (output->size - output->pos >= ZSTD_compressBound(input->size - input->pos)) ) { /* enough space in dst */
|
2008
|
-
size_t const cSize = ZSTDMT_compress_advanced_internal(mtctx,
|
2009
|
-
(char*)output->dst + output->pos, output->size - output->pos,
|
2010
|
-
(const char*)input->src + input->pos, input->size - input->pos,
|
2011
|
-
mtctx->cdict, mtctx->params);
|
2012
|
-
if (ZSTD_isError(cSize)) return cSize;
|
2013
|
-
input->pos = input->size;
|
2014
|
-
output->pos += cSize;
|
2015
|
-
mtctx->allJobsCompleted = 1;
|
2016
|
-
mtctx->frameEnded = 1;
|
2017
|
-
return 0;
|
2018
|
-
}
|
2019
|
-
|
2020
1767
|
/* fill input buffer */
|
2021
1768
|
if ( (!mtctx->jobReady)
|
2022
1769
|
&& (input->size > input->pos) ) { /* support NULL input */
|
@@ -2039,13 +1786,21 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
|
2039
1786
|
assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize);
|
2040
1787
|
DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u",
|
2041
1788
|
(U32)syncPoint.toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize);
|
2042
|
-
|
1789
|
+
ZSTD_memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad);
|
2043
1790
|
input->pos += syncPoint.toLoad;
|
2044
1791
|
mtctx->inBuff.filled += syncPoint.toLoad;
|
2045
1792
|
forwardInputProgress = syncPoint.toLoad>0;
|
2046
1793
|
}
|
2047
|
-
|
2048
|
-
|
1794
|
+
}
|
1795
|
+
if ((input->pos < input->size) && (endOp == ZSTD_e_end)) {
|
1796
|
+
/* Can't end yet because the input is not fully consumed.
|
1797
|
+
* We are in one of these cases:
|
1798
|
+
* - mtctx->inBuff is NULL & empty: we couldn't get an input buffer so don't create a new job.
|
1799
|
+
* - We filled the input buffer: flush this job but don't end the frame.
|
1800
|
+
* - We hit a synchronization point: flush this job but don't end the frame.
|
1801
|
+
*/
|
1802
|
+
assert(mtctx->inBuff.filled == 0 || mtctx->inBuff.filled == mtctx->targetSectionSize || mtctx->params.rsyncable);
|
1803
|
+
endOp = ZSTD_e_flush;
|
2049
1804
|
}
|
2050
1805
|
|
2051
1806
|
if ( (mtctx->jobReady)
|
@@ -2054,7 +1809,7 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
|
2054
1809
|
|| ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) { /* must finish the frame with a zero-size block */
|
2055
1810
|
size_t const jobSize = mtctx->inBuff.filled;
|
2056
1811
|
assert(mtctx->inBuff.filled <= mtctx->targetSectionSize);
|
2057
|
-
FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) );
|
1812
|
+
FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) , "");
|
2058
1813
|
}
|
2059
1814
|
|
2060
1815
|
/* check for potential compressed data ready to be flushed */
|
@@ -2064,47 +1819,3 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
|
2064
1819
|
return remainingToFlush;
|
2065
1820
|
}
|
2066
1821
|
}
|
2067
|
-
|
2068
|
-
|
2069
|
-
size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
|
2070
|
-
{
|
2071
|
-
FORWARD_IF_ERROR( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) );
|
2072
|
-
|
2073
|
-
/* recommended next input size : fill current input buffer */
|
2074
|
-
return mtctx->targetSectionSize - mtctx->inBuff.filled; /* note : could be zero when input buffer is fully filled and no more availability to create new job */
|
2075
|
-
}
|
2076
|
-
|
2077
|
-
|
2078
|
-
static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_EndDirective endFrame)
|
2079
|
-
{
|
2080
|
-
size_t const srcSize = mtctx->inBuff.filled;
|
2081
|
-
DEBUGLOG(5, "ZSTDMT_flushStream_internal");
|
2082
|
-
|
2083
|
-
if ( mtctx->jobReady /* one job ready for a worker to pick up */
|
2084
|
-
|| (srcSize > 0) /* still some data within input buffer */
|
2085
|
-
|| ((endFrame==ZSTD_e_end) && !mtctx->frameEnded)) { /* need a last 0-size block to end frame */
|
2086
|
-
DEBUGLOG(5, "ZSTDMT_flushStream_internal : create a new job (%u bytes, end:%u)",
|
2087
|
-
(U32)srcSize, (U32)endFrame);
|
2088
|
-
FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) );
|
2089
|
-
}
|
2090
|
-
|
2091
|
-
/* check if there is any data available to flush */
|
2092
|
-
return ZSTDMT_flushProduced(mtctx, output, 1 /* blockToFlush */, endFrame);
|
2093
|
-
}
|
2094
|
-
|
2095
|
-
|
2096
|
-
size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output)
|
2097
|
-
{
|
2098
|
-
DEBUGLOG(5, "ZSTDMT_flushStream");
|
2099
|
-
if (mtctx->singleBlockingThread)
|
2100
|
-
return ZSTD_flushStream(mtctx->cctxPool->cctx[0], output);
|
2101
|
-
return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_flush);
|
2102
|
-
}
|
2103
|
-
|
2104
|
-
size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output)
|
2105
|
-
{
|
2106
|
-
DEBUGLOG(4, "ZSTDMT_endStream");
|
2107
|
-
if (mtctx->singleBlockingThread)
|
2108
|
-
return ZSTD_endStream(mtctx->cctxPool->cctx[0], output);
|
2109
|
-
return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_end);
|
2110
|
-
}
|