extzstd 0.3.1 → 0.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +28 -14
- data/contrib/zstd/CHANGELOG +114 -56
- data/contrib/zstd/CONTRIBUTING.md +14 -0
- data/contrib/zstd/Makefile +37 -31
- data/contrib/zstd/README.md +6 -0
- data/contrib/zstd/appveyor.yml +4 -1
- data/contrib/zstd/lib/Makefile +231 -134
- data/contrib/zstd/lib/README.md +28 -0
- data/contrib/zstd/lib/common/bitstream.h +24 -15
- data/contrib/zstd/lib/common/compiler.h +116 -3
- data/contrib/zstd/lib/common/cpu.h +0 -2
- data/contrib/zstd/lib/common/debug.h +11 -18
- data/contrib/zstd/lib/common/entropy_common.c +188 -42
- data/contrib/zstd/lib/common/error_private.c +1 -0
- data/contrib/zstd/lib/common/error_private.h +1 -1
- data/contrib/zstd/lib/common/fse.h +38 -11
- data/contrib/zstd/lib/common/fse_decompress.c +123 -16
- data/contrib/zstd/lib/common/huf.h +26 -5
- data/contrib/zstd/lib/common/mem.h +66 -93
- data/contrib/zstd/lib/common/pool.c +22 -16
- data/contrib/zstd/lib/common/pool.h +1 -1
- data/contrib/zstd/lib/common/threading.c +6 -5
- data/contrib/zstd/lib/common/xxhash.c +18 -56
- data/contrib/zstd/lib/common/xxhash.h +1 -1
- data/contrib/zstd/lib/common/zstd_common.c +9 -9
- data/contrib/zstd/lib/common/zstd_deps.h +111 -0
- data/contrib/zstd/lib/common/zstd_errors.h +1 -0
- data/contrib/zstd/lib/common/zstd_internal.h +89 -58
- data/contrib/zstd/lib/compress/fse_compress.c +30 -23
- data/contrib/zstd/lib/compress/hist.c +26 -28
- data/contrib/zstd/lib/compress/hist.h +1 -1
- data/contrib/zstd/lib/compress/huf_compress.c +210 -95
- data/contrib/zstd/lib/compress/zstd_compress.c +1339 -409
- data/contrib/zstd/lib/compress/zstd_compress_internal.h +119 -41
- data/contrib/zstd/lib/compress/zstd_compress_literals.c +4 -4
- data/contrib/zstd/lib/compress/zstd_compress_sequences.c +17 -3
- data/contrib/zstd/lib/compress/zstd_compress_superblock.c +23 -19
- data/contrib/zstd/lib/compress/zstd_cwksp.h +60 -24
- data/contrib/zstd/lib/compress/zstd_double_fast.c +22 -22
- data/contrib/zstd/lib/compress/zstd_fast.c +19 -19
- data/contrib/zstd/lib/compress/zstd_lazy.c +351 -77
- data/contrib/zstd/lib/compress/zstd_lazy.h +20 -0
- data/contrib/zstd/lib/compress/zstd_ldm.c +59 -18
- data/contrib/zstd/lib/compress/zstd_ldm.h +6 -0
- data/contrib/zstd/lib/compress/zstd_opt.c +190 -45
- data/contrib/zstd/lib/compress/zstdmt_compress.c +74 -406
- data/contrib/zstd/lib/compress/zstdmt_compress.h +26 -108
- data/contrib/zstd/lib/decompress/huf_decompress.c +302 -200
- data/contrib/zstd/lib/decompress/zstd_ddict.c +8 -8
- data/contrib/zstd/lib/decompress/zstd_ddict.h +1 -1
- data/contrib/zstd/lib/decompress/zstd_decompress.c +125 -80
- data/contrib/zstd/lib/decompress/zstd_decompress_block.c +145 -37
- data/contrib/zstd/lib/decompress/zstd_decompress_block.h +5 -2
- data/contrib/zstd/lib/decompress/zstd_decompress_internal.h +11 -10
- data/contrib/zstd/lib/dictBuilder/cover.c +29 -20
- data/contrib/zstd/lib/dictBuilder/cover.h +1 -1
- data/contrib/zstd/lib/dictBuilder/fastcover.c +20 -19
- data/contrib/zstd/lib/dictBuilder/zdict.c +15 -16
- data/contrib/zstd/lib/dictBuilder/zdict.h +1 -1
- data/contrib/zstd/lib/legacy/zstd_v01.c +5 -1
- data/contrib/zstd/lib/legacy/zstd_v02.c +5 -1
- data/contrib/zstd/lib/legacy/zstd_v03.c +5 -1
- data/contrib/zstd/lib/legacy/zstd_v04.c +6 -2
- data/contrib/zstd/lib/legacy/zstd_v05.c +5 -1
- data/contrib/zstd/lib/legacy/zstd_v06.c +5 -1
- data/contrib/zstd/lib/legacy/zstd_v07.c +5 -1
- data/contrib/zstd/lib/libzstd.pc.in +3 -3
- data/contrib/zstd/lib/zstd.h +348 -47
- data/ext/extzstd.c +6 -0
- data/ext/extzstd.h +6 -0
- data/gemstub.rb +3 -21
- data/lib/extzstd.rb +0 -2
- data/lib/extzstd/version.rb +6 -1
- data/test/test_basic.rb +0 -5
- metadata +5 -4
@@ -20,8 +20,7 @@
|
|
20
20
|
|
21
21
|
|
22
22
|
/* ====== Dependencies ====== */
|
23
|
-
#include
|
24
|
-
#include <limits.h> /* INT_MAX, UINT_MAX */
|
23
|
+
#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset, INT_MAX, UINT_MAX */
|
25
24
|
#include "../common/mem.h" /* MEM_STATIC */
|
26
25
|
#include "../common/pool.h" /* threadpool */
|
27
26
|
#include "../common/threading.h" /* mutex */
|
@@ -106,11 +105,11 @@ typedef struct ZSTDMT_bufferPool_s {
|
|
106
105
|
static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned nbWorkers, ZSTD_customMem cMem)
|
107
106
|
{
|
108
107
|
unsigned const maxNbBuffers = 2*nbWorkers + 3;
|
109
|
-
ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)
|
108
|
+
ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_customCalloc(
|
110
109
|
sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem);
|
111
110
|
if (bufPool==NULL) return NULL;
|
112
111
|
if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) {
|
113
|
-
|
112
|
+
ZSTD_customFree(bufPool, cMem);
|
114
113
|
return NULL;
|
115
114
|
}
|
116
115
|
bufPool->bufferSize = 64 KB;
|
@@ -127,10 +126,10 @@ static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
|
|
127
126
|
if (!bufPool) return; /* compatibility with free on NULL */
|
128
127
|
for (u=0; u<bufPool->totalBuffers; u++) {
|
129
128
|
DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->bTable[u].start);
|
130
|
-
|
129
|
+
ZSTD_customFree(bufPool->bTable[u].start, bufPool->cMem);
|
131
130
|
}
|
132
131
|
ZSTD_pthread_mutex_destroy(&bufPool->poolMutex);
|
133
|
-
|
132
|
+
ZSTD_customFree(bufPool, bufPool->cMem);
|
134
133
|
}
|
135
134
|
|
136
135
|
/* only works at initialization, not during compression */
|
@@ -201,13 +200,13 @@ static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
|
|
201
200
|
}
|
202
201
|
/* size conditions not respected : scratch this buffer, create new one */
|
203
202
|
DEBUGLOG(5, "ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing");
|
204
|
-
|
203
|
+
ZSTD_customFree(buf.start, bufPool->cMem);
|
205
204
|
}
|
206
205
|
ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
|
207
206
|
/* create new buffer */
|
208
207
|
DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer");
|
209
208
|
{ buffer_t buffer;
|
210
|
-
void* const start =
|
209
|
+
void* const start = ZSTD_customMalloc(bSize, bufPool->cMem);
|
211
210
|
buffer.start = start; /* note : start can be NULL if malloc fails ! */
|
212
211
|
buffer.capacity = (start==NULL) ? 0 : bSize;
|
213
212
|
if (start==NULL) {
|
@@ -229,13 +228,13 @@ static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer)
|
|
229
228
|
{
|
230
229
|
size_t const bSize = bufPool->bufferSize;
|
231
230
|
if (buffer.capacity < bSize) {
|
232
|
-
void* const start =
|
231
|
+
void* const start = ZSTD_customMalloc(bSize, bufPool->cMem);
|
233
232
|
buffer_t newBuffer;
|
234
233
|
newBuffer.start = start;
|
235
234
|
newBuffer.capacity = start == NULL ? 0 : bSize;
|
236
235
|
if (start != NULL) {
|
237
236
|
assert(newBuffer.capacity >= buffer.capacity);
|
238
|
-
|
237
|
+
ZSTD_memcpy(newBuffer.start, buffer.start, buffer.capacity);
|
239
238
|
DEBUGLOG(5, "ZSTDMT_resizeBuffer: created buffer of size %u", (U32)bSize);
|
240
239
|
return newBuffer;
|
241
240
|
}
|
@@ -261,14 +260,12 @@ static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
|
|
261
260
|
ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
|
262
261
|
/* Reached bufferPool capacity (should not happen) */
|
263
262
|
DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing ");
|
264
|
-
|
263
|
+
ZSTD_customFree(buf.start, bufPool->cMem);
|
265
264
|
}
|
266
265
|
|
267
266
|
|
268
267
|
/* ===== Seq Pool Wrapper ====== */
|
269
268
|
|
270
|
-
static rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0};
|
271
|
-
|
272
269
|
typedef ZSTDMT_bufferPool ZSTDMT_seqPool;
|
273
270
|
|
274
271
|
static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool)
|
@@ -278,7 +275,7 @@ static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool)
|
|
278
275
|
|
279
276
|
static rawSeqStore_t bufferToSeq(buffer_t buffer)
|
280
277
|
{
|
281
|
-
rawSeqStore_t seq =
|
278
|
+
rawSeqStore_t seq = kNullRawSeqStore;
|
282
279
|
seq.seq = (rawSeq*)buffer.start;
|
283
280
|
seq.capacity = buffer.capacity / sizeof(rawSeq);
|
284
281
|
return seq;
|
@@ -354,7 +351,7 @@ static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
|
|
354
351
|
for (cid=0; cid<pool->totalCCtx; cid++)
|
355
352
|
ZSTD_freeCCtx(pool->cctx[cid]); /* note : compatible with free on NULL */
|
356
353
|
ZSTD_pthread_mutex_destroy(&pool->poolMutex);
|
357
|
-
|
354
|
+
ZSTD_customFree(pool, pool->cMem);
|
358
355
|
}
|
359
356
|
|
360
357
|
/* ZSTDMT_createCCtxPool() :
|
@@ -362,12 +359,12 @@ static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
|
|
362
359
|
static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers,
|
363
360
|
ZSTD_customMem cMem)
|
364
361
|
{
|
365
|
-
ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*)
|
362
|
+
ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_customCalloc(
|
366
363
|
sizeof(ZSTDMT_CCtxPool) + (nbWorkers-1)*sizeof(ZSTD_CCtx*), cMem);
|
367
364
|
assert(nbWorkers > 0);
|
368
365
|
if (!cctxPool) return NULL;
|
369
366
|
if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) {
|
370
|
-
|
367
|
+
ZSTD_customFree(cctxPool, cMem);
|
371
368
|
return NULL;
|
372
369
|
}
|
373
370
|
cctxPool->cMem = cMem;
|
@@ -478,7 +475,7 @@ ZSTDMT_serialState_reset(serialState_t* serialState,
|
|
478
475
|
serialState->ldmState.hashPower =
|
479
476
|
ZSTD_rollingHash_primePower(params.ldmParams.minMatchLength);
|
480
477
|
} else {
|
481
|
-
|
478
|
+
ZSTD_memset(¶ms.ldmParams, 0, sizeof(params.ldmParams));
|
482
479
|
}
|
483
480
|
serialState->nextJobID = 0;
|
484
481
|
if (params.fParams.checksumFlag)
|
@@ -499,18 +496,18 @@ ZSTDMT_serialState_reset(serialState_t* serialState,
|
|
499
496
|
ZSTD_window_init(&serialState->ldmState.window);
|
500
497
|
/* Resize tables and output space if necessary. */
|
501
498
|
if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) {
|
502
|
-
|
503
|
-
serialState->ldmState.hashTable = (ldmEntry_t*)
|
499
|
+
ZSTD_customFree(serialState->ldmState.hashTable, cMem);
|
500
|
+
serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_customMalloc(hashSize, cMem);
|
504
501
|
}
|
505
502
|
if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) {
|
506
|
-
|
507
|
-
serialState->ldmState.bucketOffsets = (BYTE*)
|
503
|
+
ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem);
|
504
|
+
serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_customMalloc(bucketSize, cMem);
|
508
505
|
}
|
509
506
|
if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets)
|
510
507
|
return 1;
|
511
508
|
/* Zero the tables */
|
512
|
-
|
513
|
-
|
509
|
+
ZSTD_memset(serialState->ldmState.hashTable, 0, hashSize);
|
510
|
+
ZSTD_memset(serialState->ldmState.bucketOffsets, 0, bucketSize);
|
514
511
|
|
515
512
|
/* Update window state and fill hash table with dict */
|
516
513
|
serialState->ldmState.loadedDictEnd = 0;
|
@@ -537,7 +534,7 @@ ZSTDMT_serialState_reset(serialState_t* serialState,
|
|
537
534
|
static int ZSTDMT_serialState_init(serialState_t* serialState)
|
538
535
|
{
|
539
536
|
int initError = 0;
|
540
|
-
|
537
|
+
ZSTD_memset(serialState, 0, sizeof(*serialState));
|
541
538
|
initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL);
|
542
539
|
initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL);
|
543
540
|
initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL);
|
@@ -552,8 +549,8 @@ static void ZSTDMT_serialState_free(serialState_t* serialState)
|
|
552
549
|
ZSTD_pthread_cond_destroy(&serialState->cond);
|
553
550
|
ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex);
|
554
551
|
ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond);
|
555
|
-
|
556
|
-
|
552
|
+
ZSTD_customFree(serialState->ldmState.hashTable, cMem);
|
553
|
+
ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem);
|
557
554
|
}
|
558
555
|
|
559
556
|
static void ZSTDMT_serialState_update(serialState_t* serialState,
|
@@ -820,7 +817,6 @@ struct ZSTDMT_CCtx_s {
|
|
820
817
|
roundBuff_t roundBuff;
|
821
818
|
serialState_t serial;
|
822
819
|
rsyncState_t rsync;
|
823
|
-
unsigned singleBlockingThread;
|
824
820
|
unsigned jobIDMask;
|
825
821
|
unsigned doneJobID;
|
826
822
|
unsigned nextJobID;
|
@@ -832,6 +828,7 @@ struct ZSTDMT_CCtx_s {
|
|
832
828
|
ZSTD_customMem cMem;
|
833
829
|
ZSTD_CDict* cdictLocal;
|
834
830
|
const ZSTD_CDict* cdict;
|
831
|
+
unsigned providedFactory: 1;
|
835
832
|
};
|
836
833
|
|
837
834
|
static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem)
|
@@ -842,7 +839,7 @@ static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZS
|
|
842
839
|
ZSTD_pthread_mutex_destroy(&jobTable[jobNb].job_mutex);
|
843
840
|
ZSTD_pthread_cond_destroy(&jobTable[jobNb].job_cond);
|
844
841
|
}
|
845
|
-
|
842
|
+
ZSTD_customFree(jobTable, cMem);
|
846
843
|
}
|
847
844
|
|
848
845
|
/* ZSTDMT_allocJobsTable()
|
@@ -854,7 +851,7 @@ static ZSTDMT_jobDescription* ZSTDMT_createJobsTable(U32* nbJobsPtr, ZSTD_custom
|
|
854
851
|
U32 const nbJobs = 1 << nbJobsLog2;
|
855
852
|
U32 jobNb;
|
856
853
|
ZSTDMT_jobDescription* const jobTable = (ZSTDMT_jobDescription*)
|
857
|
-
|
854
|
+
ZSTD_customCalloc(nbJobs * sizeof(ZSTDMT_jobDescription), cMem);
|
858
855
|
int initError = 0;
|
859
856
|
if (jobTable==NULL) return NULL;
|
860
857
|
*nbJobsPtr = nbJobs;
|
@@ -885,12 +882,12 @@ static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) {
|
|
885
882
|
|
886
883
|
/* ZSTDMT_CCtxParam_setNbWorkers():
|
887
884
|
* Internal use only */
|
888
|
-
size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
|
885
|
+
static size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
|
889
886
|
{
|
890
887
|
return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers);
|
891
888
|
}
|
892
889
|
|
893
|
-
MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem)
|
890
|
+
MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool)
|
894
891
|
{
|
895
892
|
ZSTDMT_CCtx* mtctx;
|
896
893
|
U32 nbJobs = nbWorkers + 2;
|
@@ -903,12 +900,19 @@ MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers,
|
|
903
900
|
/* invalid custom allocator */
|
904
901
|
return NULL;
|
905
902
|
|
906
|
-
mtctx = (ZSTDMT_CCtx*)
|
903
|
+
mtctx = (ZSTDMT_CCtx*) ZSTD_customCalloc(sizeof(ZSTDMT_CCtx), cMem);
|
907
904
|
if (!mtctx) return NULL;
|
908
905
|
ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
|
909
906
|
mtctx->cMem = cMem;
|
910
907
|
mtctx->allJobsCompleted = 1;
|
911
|
-
|
908
|
+
if (pool != NULL) {
|
909
|
+
mtctx->factory = pool;
|
910
|
+
mtctx->providedFactory = 1;
|
911
|
+
}
|
912
|
+
else {
|
913
|
+
mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem);
|
914
|
+
mtctx->providedFactory = 0;
|
915
|
+
}
|
912
916
|
mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem);
|
913
917
|
assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0); /* ensure nbJobs is a power of 2 */
|
914
918
|
mtctx->jobIDMask = nbJobs - 1;
|
@@ -925,22 +929,18 @@ MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers,
|
|
925
929
|
return mtctx;
|
926
930
|
}
|
927
931
|
|
928
|
-
ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem)
|
932
|
+
ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool)
|
929
933
|
{
|
930
934
|
#ifdef ZSTD_MULTITHREAD
|
931
|
-
return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem);
|
935
|
+
return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem, pool);
|
932
936
|
#else
|
933
937
|
(void)nbWorkers;
|
934
938
|
(void)cMem;
|
939
|
+
(void)pool;
|
935
940
|
return NULL;
|
936
941
|
#endif
|
937
942
|
}
|
938
943
|
|
939
|
-
ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers)
|
940
|
-
{
|
941
|
-
return ZSTDMT_createCCtx_advanced(nbWorkers, ZSTD_defaultCMem);
|
942
|
-
}
|
943
|
-
|
944
944
|
|
945
945
|
/* ZSTDMT_releaseAllJobResources() :
|
946
946
|
* note : ensure all workers are killed first ! */
|
@@ -957,7 +957,7 @@ static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
|
|
957
957
|
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
|
958
958
|
|
959
959
|
/* Clear the job description, but keep the mutex/cond */
|
960
|
-
|
960
|
+
ZSTD_memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID]));
|
961
961
|
mtctx->jobs[jobID].job_mutex = mutex;
|
962
962
|
mtctx->jobs[jobID].job_cond = cond;
|
963
963
|
}
|
@@ -984,7 +984,8 @@ static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx)
|
|
984
984
|
size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
|
985
985
|
{
|
986
986
|
if (mtctx==NULL) return 0; /* compatible with free on NULL */
|
987
|
-
|
987
|
+
if (!mtctx->providedFactory)
|
988
|
+
POOL_free(mtctx->factory); /* stop and free worker threads */
|
988
989
|
ZSTDMT_releaseAllJobResources(mtctx); /* release job resources into pools first */
|
989
990
|
ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
|
990
991
|
ZSTDMT_freeBufferPool(mtctx->bufPool);
|
@@ -993,8 +994,8 @@ size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
|
|
993
994
|
ZSTDMT_serialState_free(&mtctx->serial);
|
994
995
|
ZSTD_freeCDict(mtctx->cdictLocal);
|
995
996
|
if (mtctx->roundBuff.buffer)
|
996
|
-
|
997
|
-
|
997
|
+
ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem);
|
998
|
+
ZSTD_customFree(mtctx, mtctx->cMem);
|
998
999
|
return 0;
|
999
1000
|
}
|
1000
1001
|
|
@@ -1011,65 +1012,6 @@ size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx)
|
|
1011
1012
|
+ mtctx->roundBuff.capacity;
|
1012
1013
|
}
|
1013
1014
|
|
1014
|
-
/* Internal only */
|
1015
|
-
size_t
|
1016
|
-
ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params,
|
1017
|
-
ZSTDMT_parameter parameter,
|
1018
|
-
int value)
|
1019
|
-
{
|
1020
|
-
DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter");
|
1021
|
-
switch(parameter)
|
1022
|
-
{
|
1023
|
-
case ZSTDMT_p_jobSize :
|
1024
|
-
DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter : set jobSize to %i", value);
|
1025
|
-
return ZSTD_CCtxParams_setParameter(params, ZSTD_c_jobSize, value);
|
1026
|
-
case ZSTDMT_p_overlapLog :
|
1027
|
-
DEBUGLOG(4, "ZSTDMT_p_overlapLog : %i", value);
|
1028
|
-
return ZSTD_CCtxParams_setParameter(params, ZSTD_c_overlapLog, value);
|
1029
|
-
case ZSTDMT_p_rsyncable :
|
1030
|
-
DEBUGLOG(4, "ZSTD_p_rsyncable : %i", value);
|
1031
|
-
return ZSTD_CCtxParams_setParameter(params, ZSTD_c_rsyncable, value);
|
1032
|
-
default :
|
1033
|
-
return ERROR(parameter_unsupported);
|
1034
|
-
}
|
1035
|
-
}
|
1036
|
-
|
1037
|
-
size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value)
|
1038
|
-
{
|
1039
|
-
DEBUGLOG(4, "ZSTDMT_setMTCtxParameter");
|
1040
|
-
return ZSTDMT_CCtxParam_setMTCtxParameter(&mtctx->params, parameter, value);
|
1041
|
-
}
|
1042
|
-
|
1043
|
-
size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value)
|
1044
|
-
{
|
1045
|
-
switch (parameter) {
|
1046
|
-
case ZSTDMT_p_jobSize:
|
1047
|
-
return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_jobSize, value);
|
1048
|
-
case ZSTDMT_p_overlapLog:
|
1049
|
-
return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_overlapLog, value);
|
1050
|
-
case ZSTDMT_p_rsyncable:
|
1051
|
-
return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_rsyncable, value);
|
1052
|
-
default:
|
1053
|
-
return ERROR(parameter_unsupported);
|
1054
|
-
}
|
1055
|
-
}
|
1056
|
-
|
1057
|
-
/* Sets parameters relevant to the compression job,
|
1058
|
-
* initializing others to default values. */
|
1059
|
-
static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(const ZSTD_CCtx_params* params)
|
1060
|
-
{
|
1061
|
-
ZSTD_CCtx_params jobParams = *params;
|
1062
|
-
/* Clear parameters related to multithreading */
|
1063
|
-
jobParams.forceWindow = 0;
|
1064
|
-
jobParams.nbWorkers = 0;
|
1065
|
-
jobParams.jobSize = 0;
|
1066
|
-
jobParams.overlapLog = 0;
|
1067
|
-
jobParams.rsyncable = 0;
|
1068
|
-
memset(&jobParams.ldmParams, 0, sizeof(ldmParams_t));
|
1069
|
-
memset(&jobParams.customMem, 0, sizeof(ZSTD_customMem));
|
1070
|
-
return jobParams;
|
1071
|
-
}
|
1072
|
-
|
1073
1015
|
|
1074
1016
|
/* ZSTDMT_resize() :
|
1075
1017
|
* @return : error code if fails, 0 on success */
|
@@ -1098,7 +1040,7 @@ void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_p
|
|
1098
1040
|
DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)",
|
1099
1041
|
compressionLevel);
|
1100
1042
|
mtctx->params.compressionLevel = compressionLevel;
|
1101
|
-
{ ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, 0);
|
1043
|
+
{ ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
|
1102
1044
|
cParams.windowLog = saved_wlog;
|
1103
1045
|
mtctx->params.cParams = cParams;
|
1104
1046
|
}
|
@@ -1185,8 +1127,8 @@ static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params)
|
|
1185
1127
|
if (params->ldmParams.enableLdm) {
|
1186
1128
|
/* In Long Range Mode, the windowLog is typically oversized.
|
1187
1129
|
* In which case, it's preferable to determine the jobSize
|
1188
|
-
* based on
|
1189
|
-
jobLog = MAX(21, params->cParams.chainLog +
|
1130
|
+
* based on cycleLog instead. */
|
1131
|
+
jobLog = MAX(21, ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy) + 3);
|
1190
1132
|
} else {
|
1191
1133
|
jobLog = MAX(20, params->cParams.windowLog + 2);
|
1192
1134
|
}
|
@@ -1240,174 +1182,6 @@ static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params)
|
|
1240
1182
|
return (ovLog==0) ? 0 : (size_t)1 << ovLog;
|
1241
1183
|
}
|
1242
1184
|
|
1243
|
-
static unsigned
|
1244
|
-
ZSTDMT_computeNbJobs(const ZSTD_CCtx_params* params, size_t srcSize, unsigned nbWorkers)
|
1245
|
-
{
|
1246
|
-
assert(nbWorkers>0);
|
1247
|
-
{ size_t const jobSizeTarget = (size_t)1 << ZSTDMT_computeTargetJobLog(params);
|
1248
|
-
size_t const jobMaxSize = jobSizeTarget << 2;
|
1249
|
-
size_t const passSizeMax = jobMaxSize * nbWorkers;
|
1250
|
-
unsigned const multiplier = (unsigned)(srcSize / passSizeMax) + 1;
|
1251
|
-
unsigned const nbJobsLarge = multiplier * nbWorkers;
|
1252
|
-
unsigned const nbJobsMax = (unsigned)(srcSize / jobSizeTarget) + 1;
|
1253
|
-
unsigned const nbJobsSmall = MIN(nbJobsMax, nbWorkers);
|
1254
|
-
return (multiplier>1) ? nbJobsLarge : nbJobsSmall;
|
1255
|
-
} }
|
1256
|
-
|
1257
|
-
/* ZSTDMT_compress_advanced_internal() :
|
1258
|
-
* This is a blocking function : it will only give back control to caller after finishing its compression job.
|
1259
|
-
*/
|
1260
|
-
static size_t
|
1261
|
-
ZSTDMT_compress_advanced_internal(
|
1262
|
-
ZSTDMT_CCtx* mtctx,
|
1263
|
-
void* dst, size_t dstCapacity,
|
1264
|
-
const void* src, size_t srcSize,
|
1265
|
-
const ZSTD_CDict* cdict,
|
1266
|
-
ZSTD_CCtx_params params)
|
1267
|
-
{
|
1268
|
-
ZSTD_CCtx_params const jobParams = ZSTDMT_initJobCCtxParams(¶ms);
|
1269
|
-
size_t const overlapSize = ZSTDMT_computeOverlapSize(¶ms);
|
1270
|
-
unsigned const nbJobs = ZSTDMT_computeNbJobs(¶ms, srcSize, params.nbWorkers);
|
1271
|
-
size_t const proposedJobSize = (srcSize + (nbJobs-1)) / nbJobs;
|
1272
|
-
size_t const avgJobSize = (((proposedJobSize-1) & 0x1FFFF) < 0x7FFF) ? proposedJobSize + 0xFFFF : proposedJobSize; /* avoid too small last block */
|
1273
|
-
const char* const srcStart = (const char*)src;
|
1274
|
-
size_t remainingSrcSize = srcSize;
|
1275
|
-
unsigned const compressWithinDst = (dstCapacity >= ZSTD_compressBound(srcSize)) ? nbJobs : (unsigned)(dstCapacity / ZSTD_compressBound(avgJobSize)); /* presumes avgJobSize >= 256 KB, which should be the case */
|
1276
|
-
size_t frameStartPos = 0, dstBufferPos = 0;
|
1277
|
-
assert(jobParams.nbWorkers == 0);
|
1278
|
-
assert(mtctx->cctxPool->totalCCtx == params.nbWorkers);
|
1279
|
-
|
1280
|
-
params.jobSize = (U32)avgJobSize;
|
1281
|
-
DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: nbJobs=%2u (rawSize=%u bytes; fixedSize=%u) ",
|
1282
|
-
nbJobs, (U32)proposedJobSize, (U32)avgJobSize);
|
1283
|
-
|
1284
|
-
if ((nbJobs==1) | (params.nbWorkers<=1)) { /* fallback to single-thread mode : this is a blocking invocation anyway */
|
1285
|
-
ZSTD_CCtx* const cctx = mtctx->cctxPool->cctx[0];
|
1286
|
-
DEBUGLOG(4, "ZSTDMT_compress_advanced_internal: fallback to single-thread mode");
|
1287
|
-
if (cdict) return ZSTD_compress_usingCDict_advanced(cctx, dst, dstCapacity, src, srcSize, cdict, jobParams.fParams);
|
1288
|
-
return ZSTD_compress_advanced_internal(cctx, dst, dstCapacity, src, srcSize, NULL, 0, &jobParams);
|
1289
|
-
}
|
1290
|
-
|
1291
|
-
assert(avgJobSize >= 256 KB); /* condition for ZSTD_compressBound(A) + ZSTD_compressBound(B) <= ZSTD_compressBound(A+B), required to compress directly into Dst (no additional buffer) */
|
1292
|
-
ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(avgJobSize) );
|
1293
|
-
/* LDM doesn't even try to load the dictionary in single-ingestion mode */
|
1294
|
-
if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, avgJobSize, NULL, 0, ZSTD_dct_auto))
|
1295
|
-
return ERROR(memory_allocation);
|
1296
|
-
|
1297
|
-
FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbJobs) , ""); /* only expands if necessary */
|
1298
|
-
|
1299
|
-
{ unsigned u;
|
1300
|
-
for (u=0; u<nbJobs; u++) {
|
1301
|
-
size_t const jobSize = MIN(remainingSrcSize, avgJobSize);
|
1302
|
-
size_t const dstBufferCapacity = ZSTD_compressBound(jobSize);
|
1303
|
-
buffer_t const dstAsBuffer = { (char*)dst + dstBufferPos, dstBufferCapacity };
|
1304
|
-
buffer_t const dstBuffer = u < compressWithinDst ? dstAsBuffer : g_nullBuffer;
|
1305
|
-
size_t dictSize = u ? overlapSize : 0;
|
1306
|
-
|
1307
|
-
mtctx->jobs[u].prefix.start = srcStart + frameStartPos - dictSize;
|
1308
|
-
mtctx->jobs[u].prefix.size = dictSize;
|
1309
|
-
mtctx->jobs[u].src.start = srcStart + frameStartPos;
|
1310
|
-
mtctx->jobs[u].src.size = jobSize; assert(jobSize > 0); /* avoid job.src.size == 0 */
|
1311
|
-
mtctx->jobs[u].consumed = 0;
|
1312
|
-
mtctx->jobs[u].cSize = 0;
|
1313
|
-
mtctx->jobs[u].cdict = (u==0) ? cdict : NULL;
|
1314
|
-
mtctx->jobs[u].fullFrameSize = srcSize;
|
1315
|
-
mtctx->jobs[u].params = jobParams;
|
1316
|
-
/* do not calculate checksum within sections, but write it in header for first section */
|
1317
|
-
mtctx->jobs[u].dstBuff = dstBuffer;
|
1318
|
-
mtctx->jobs[u].cctxPool = mtctx->cctxPool;
|
1319
|
-
mtctx->jobs[u].bufPool = mtctx->bufPool;
|
1320
|
-
mtctx->jobs[u].seqPool = mtctx->seqPool;
|
1321
|
-
mtctx->jobs[u].serial = &mtctx->serial;
|
1322
|
-
mtctx->jobs[u].jobID = u;
|
1323
|
-
mtctx->jobs[u].firstJob = (u==0);
|
1324
|
-
mtctx->jobs[u].lastJob = (u==nbJobs-1);
|
1325
|
-
|
1326
|
-
DEBUGLOG(5, "ZSTDMT_compress_advanced_internal: posting job %u (%u bytes)", u, (U32)jobSize);
|
1327
|
-
DEBUG_PRINTHEX(6, mtctx->jobs[u].prefix.start, 12);
|
1328
|
-
POOL_add(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[u]);
|
1329
|
-
|
1330
|
-
frameStartPos += jobSize;
|
1331
|
-
dstBufferPos += dstBufferCapacity;
|
1332
|
-
remainingSrcSize -= jobSize;
|
1333
|
-
} }
|
1334
|
-
|
1335
|
-
/* collect result */
|
1336
|
-
{ size_t error = 0, dstPos = 0;
|
1337
|
-
unsigned jobID;
|
1338
|
-
for (jobID=0; jobID<nbJobs; jobID++) {
|
1339
|
-
DEBUGLOG(5, "waiting for job %u ", jobID);
|
1340
|
-
ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
|
1341
|
-
while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
|
1342
|
-
DEBUGLOG(5, "waiting for jobCompleted signal from job %u", jobID);
|
1343
|
-
ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
|
1344
|
-
}
|
1345
|
-
ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
|
1346
|
-
DEBUGLOG(5, "ready to write job %u ", jobID);
|
1347
|
-
|
1348
|
-
{ size_t const cSize = mtctx->jobs[jobID].cSize;
|
1349
|
-
if (ZSTD_isError(cSize)) error = cSize;
|
1350
|
-
if ((!error) && (dstPos + cSize > dstCapacity)) error = ERROR(dstSize_tooSmall);
|
1351
|
-
if (jobID) { /* note : job 0 is written directly at dst, which is correct position */
|
1352
|
-
if (!error)
|
1353
|
-
memmove((char*)dst + dstPos, mtctx->jobs[jobID].dstBuff.start, cSize); /* may overlap when job compressed within dst */
|
1354
|
-
if (jobID >= compressWithinDst) { /* job compressed into its own buffer, which must be released */
|
1355
|
-
DEBUGLOG(5, "releasing buffer %u>=%u", jobID, compressWithinDst);
|
1356
|
-
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
|
1357
|
-
} }
|
1358
|
-
mtctx->jobs[jobID].dstBuff = g_nullBuffer;
|
1359
|
-
mtctx->jobs[jobID].cSize = 0;
|
1360
|
-
dstPos += cSize ;
|
1361
|
-
}
|
1362
|
-
} /* for (jobID=0; jobID<nbJobs; jobID++) */
|
1363
|
-
|
1364
|
-
DEBUGLOG(4, "checksumFlag : %u ", params.fParams.checksumFlag);
|
1365
|
-
if (params.fParams.checksumFlag) {
|
1366
|
-
U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState);
|
1367
|
-
if (dstPos + 4 > dstCapacity) {
|
1368
|
-
error = ERROR(dstSize_tooSmall);
|
1369
|
-
} else {
|
1370
|
-
DEBUGLOG(4, "writing checksum : %08X \n", checksum);
|
1371
|
-
MEM_writeLE32((char*)dst + dstPos, checksum);
|
1372
|
-
dstPos += 4;
|
1373
|
-
} }
|
1374
|
-
|
1375
|
-
if (!error) DEBUGLOG(4, "compressed size : %u ", (U32)dstPos);
|
1376
|
-
return error ? error : dstPos;
|
1377
|
-
}
|
1378
|
-
}
|
1379
|
-
|
1380
|
-
size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
|
1381
|
-
void* dst, size_t dstCapacity,
|
1382
|
-
const void* src, size_t srcSize,
|
1383
|
-
const ZSTD_CDict* cdict,
|
1384
|
-
ZSTD_parameters params,
|
1385
|
-
int overlapLog)
|
1386
|
-
{
|
1387
|
-
ZSTD_CCtx_params cctxParams = mtctx->params;
|
1388
|
-
cctxParams.cParams = params.cParams;
|
1389
|
-
cctxParams.fParams = params.fParams;
|
1390
|
-
assert(ZSTD_OVERLAPLOG_MIN <= overlapLog && overlapLog <= ZSTD_OVERLAPLOG_MAX);
|
1391
|
-
cctxParams.overlapLog = overlapLog;
|
1392
|
-
return ZSTDMT_compress_advanced_internal(mtctx,
|
1393
|
-
dst, dstCapacity,
|
1394
|
-
src, srcSize,
|
1395
|
-
cdict, cctxParams);
|
1396
|
-
}
|
1397
|
-
|
1398
|
-
|
1399
|
-
size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
|
1400
|
-
void* dst, size_t dstCapacity,
|
1401
|
-
const void* src, size_t srcSize,
|
1402
|
-
int compressionLevel)
|
1403
|
-
{
|
1404
|
-
ZSTD_parameters params = ZSTD_getParams(compressionLevel, srcSize, 0);
|
1405
|
-
int const overlapLog = ZSTDMT_overlapLog_default(params.cParams.strategy);
|
1406
|
-
params.fParams.contentSizeFlag = 1;
|
1407
|
-
return ZSTDMT_compress_advanced(mtctx, dst, dstCapacity, src, srcSize, NULL, params, overlapLog);
|
1408
|
-
}
|
1409
|
-
|
1410
|
-
|
1411
1185
|
/* ====================================== */
|
1412
1186
|
/* ======= Streaming API ======= */
|
1413
1187
|
/* ====================================== */
|
@@ -1432,16 +1206,6 @@ size_t ZSTDMT_initCStream_internal(
|
|
1432
1206
|
if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;
|
1433
1207
|
if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = (size_t)ZSTDMT_JOBSIZE_MAX;
|
1434
1208
|
|
1435
|
-
mtctx->singleBlockingThread = (pledgedSrcSize <= ZSTDMT_JOBSIZE_MIN); /* do not trigger multi-threading when srcSize is too small */
|
1436
|
-
if (mtctx->singleBlockingThread) {
|
1437
|
-
ZSTD_CCtx_params const singleThreadParams = ZSTDMT_initJobCCtxParams(¶ms);
|
1438
|
-
DEBUGLOG(5, "ZSTDMT_initCStream_internal: switch to single blocking thread mode");
|
1439
|
-
assert(singleThreadParams.nbWorkers == 0);
|
1440
|
-
return ZSTD_initCStream_internal(mtctx->cctxPool->cctx[0],
|
1441
|
-
dict, dictSize, cdict,
|
1442
|
-
&singleThreadParams, pledgedSrcSize);
|
1443
|
-
}
|
1444
|
-
|
1445
1209
|
DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers);
|
1446
1210
|
|
1447
1211
|
if (mtctx->allJobsCompleted == 0) { /* previous compression not correctly finished */
|
@@ -1504,8 +1268,8 @@ size_t ZSTDMT_initCStream_internal(
|
|
1504
1268
|
size_t const capacity = MAX(windowSize, sectionsSize) + slackSize;
|
1505
1269
|
if (mtctx->roundBuff.capacity < capacity) {
|
1506
1270
|
if (mtctx->roundBuff.buffer)
|
1507
|
-
|
1508
|
-
mtctx->roundBuff.buffer = (BYTE*)
|
1271
|
+
ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem);
|
1272
|
+
mtctx->roundBuff.buffer = (BYTE*)ZSTD_customMalloc(capacity, mtctx->cMem);
|
1509
1273
|
if (mtctx->roundBuff.buffer == NULL) {
|
1510
1274
|
mtctx->roundBuff.capacity = 0;
|
1511
1275
|
return ERROR(memory_allocation);
|
@@ -1530,53 +1294,6 @@ size_t ZSTDMT_initCStream_internal(
|
|
1530
1294
|
return 0;
|
1531
1295
|
}
|
1532
1296
|
|
1533
|
-
size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
|
1534
|
-
const void* dict, size_t dictSize,
|
1535
|
-
ZSTD_parameters params,
|
1536
|
-
unsigned long long pledgedSrcSize)
|
1537
|
-
{
|
1538
|
-
ZSTD_CCtx_params cctxParams = mtctx->params; /* retrieve sticky params */
|
1539
|
-
DEBUGLOG(4, "ZSTDMT_initCStream_advanced (pledgedSrcSize=%u)", (U32)pledgedSrcSize);
|
1540
|
-
cctxParams.cParams = params.cParams;
|
1541
|
-
cctxParams.fParams = params.fParams;
|
1542
|
-
return ZSTDMT_initCStream_internal(mtctx, dict, dictSize, ZSTD_dct_auto, NULL,
|
1543
|
-
cctxParams, pledgedSrcSize);
|
1544
|
-
}
|
1545
|
-
|
1546
|
-
size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
|
1547
|
-
const ZSTD_CDict* cdict,
|
1548
|
-
ZSTD_frameParameters fParams,
|
1549
|
-
unsigned long long pledgedSrcSize)
|
1550
|
-
{
|
1551
|
-
ZSTD_CCtx_params cctxParams = mtctx->params;
|
1552
|
-
if (cdict==NULL) return ERROR(dictionary_wrong); /* method incompatible with NULL cdict */
|
1553
|
-
cctxParams.cParams = ZSTD_getCParamsFromCDict(cdict);
|
1554
|
-
cctxParams.fParams = fParams;
|
1555
|
-
return ZSTDMT_initCStream_internal(mtctx, NULL, 0 /*dictSize*/, ZSTD_dct_auto, cdict,
|
1556
|
-
cctxParams, pledgedSrcSize);
|
1557
|
-
}
|
1558
|
-
|
1559
|
-
|
1560
|
-
/* ZSTDMT_resetCStream() :
|
1561
|
-
* pledgedSrcSize can be zero == unknown (for the time being)
|
1562
|
-
* prefer using ZSTD_CONTENTSIZE_UNKNOWN,
|
1563
|
-
* as `0` might mean "empty" in the future */
|
1564
|
-
size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize)
|
1565
|
-
{
|
1566
|
-
if (!pledgedSrcSize) pledgedSrcSize = ZSTD_CONTENTSIZE_UNKNOWN;
|
1567
|
-
return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, 0, mtctx->params,
|
1568
|
-
pledgedSrcSize);
|
1569
|
-
}
|
1570
|
-
|
1571
|
-
size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel) {
|
1572
|
-
ZSTD_parameters const params = ZSTD_getParams(compressionLevel, ZSTD_CONTENTSIZE_UNKNOWN, 0);
|
1573
|
-
ZSTD_CCtx_params cctxParams = mtctx->params; /* retrieve sticky params */
|
1574
|
-
DEBUGLOG(4, "ZSTDMT_initCStream (cLevel=%i)", compressionLevel);
|
1575
|
-
cctxParams.cParams = params.cParams;
|
1576
|
-
cctxParams.fParams = params.fParams;
|
1577
|
-
return ZSTDMT_initCStream_internal(mtctx, NULL, 0, ZSTD_dct_auto, NULL, cctxParams, ZSTD_CONTENTSIZE_UNKNOWN);
|
1578
|
-
}
|
1579
|
-
|
1580
1297
|
|
1581
1298
|
/* ZSTDMT_writeLastEmptyBlock()
|
1582
1299
|
* Write a single empty block with an end-of-frame to finish a frame.
|
@@ -1740,7 +1457,7 @@ static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, u
|
|
1740
1457
|
assert(cSize >= mtctx->jobs[wJobID].dstFlushed);
|
1741
1458
|
assert(mtctx->jobs[wJobID].dstBuff.start != NULL);
|
1742
1459
|
if (toFlush > 0) {
|
1743
|
-
|
1460
|
+
ZSTD_memcpy((char*)output->dst + output->pos,
|
1744
1461
|
(const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed,
|
1745
1462
|
toFlush);
|
1746
1463
|
}
|
@@ -1894,7 +1611,7 @@ static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)
|
|
1894
1611
|
return 0;
|
1895
1612
|
}
|
1896
1613
|
ZSTDMT_waitForLdmComplete(mtctx, buffer);
|
1897
|
-
|
1614
|
+
ZSTD_memmove(start, mtctx->inBuff.prefix.start, prefixSize);
|
1898
1615
|
mtctx->inBuff.prefix.start = start;
|
1899
1616
|
mtctx->roundBuff.pos = prefixSize;
|
1900
1617
|
}
|
@@ -1968,6 +1685,16 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)
|
|
1968
1685
|
pos = 0;
|
1969
1686
|
prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
|
1970
1687
|
hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
|
1688
|
+
if ((hash & hitMask) == hitMask) {
|
1689
|
+
/* We're already at a sync point so don't load any more until
|
1690
|
+
* we're able to flush this sync point.
|
1691
|
+
* This likely happened because the job table was full so we
|
1692
|
+
* couldn't add our job.
|
1693
|
+
*/
|
1694
|
+
syncPoint.toLoad = 0;
|
1695
|
+
syncPoint.flush = 1;
|
1696
|
+
return syncPoint;
|
1697
|
+
}
|
1971
1698
|
} else {
|
1972
1699
|
/* We don't have enough bytes buffered to initialize the hash, but
|
1973
1700
|
* we know we have at least RSYNC_LENGTH bytes total.
|
@@ -2022,34 +1749,11 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
|
2022
1749
|
assert(output->pos <= output->size);
|
2023
1750
|
assert(input->pos <= input->size);
|
2024
1751
|
|
2025
|
-
if (mtctx->singleBlockingThread) { /* delegate to single-thread (synchronous) */
|
2026
|
-
return ZSTD_compressStream2(mtctx->cctxPool->cctx[0], output, input, endOp);
|
2027
|
-
}
|
2028
|
-
|
2029
1752
|
if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
|
2030
1753
|
/* current frame being ended. Only flush/end are allowed */
|
2031
1754
|
return ERROR(stage_wrong);
|
2032
1755
|
}
|
2033
1756
|
|
2034
|
-
/* single-pass shortcut (note : synchronous-mode) */
|
2035
|
-
if ( (!mtctx->params.rsyncable) /* rsyncable mode is disabled */
|
2036
|
-
&& (mtctx->nextJobID == 0) /* just started */
|
2037
|
-
&& (mtctx->inBuff.filled == 0) /* nothing buffered */
|
2038
|
-
&& (!mtctx->jobReady) /* no job already created */
|
2039
|
-
&& (endOp == ZSTD_e_end) /* end order */
|
2040
|
-
&& (output->size - output->pos >= ZSTD_compressBound(input->size - input->pos)) ) { /* enough space in dst */
|
2041
|
-
size_t const cSize = ZSTDMT_compress_advanced_internal(mtctx,
|
2042
|
-
(char*)output->dst + output->pos, output->size - output->pos,
|
2043
|
-
(const char*)input->src + input->pos, input->size - input->pos,
|
2044
|
-
mtctx->cdict, mtctx->params);
|
2045
|
-
if (ZSTD_isError(cSize)) return cSize;
|
2046
|
-
input->pos = input->size;
|
2047
|
-
output->pos += cSize;
|
2048
|
-
mtctx->allJobsCompleted = 1;
|
2049
|
-
mtctx->frameEnded = 1;
|
2050
|
-
return 0;
|
2051
|
-
}
|
2052
|
-
|
2053
1757
|
/* fill input buffer */
|
2054
1758
|
if ( (!mtctx->jobReady)
|
2055
1759
|
&& (input->size > input->pos) ) { /* support NULL input */
|
@@ -2072,13 +1776,21 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
|
2072
1776
|
assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize);
|
2073
1777
|
DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u",
|
2074
1778
|
(U32)syncPoint.toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize);
|
2075
|
-
|
1779
|
+
ZSTD_memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad);
|
2076
1780
|
input->pos += syncPoint.toLoad;
|
2077
1781
|
mtctx->inBuff.filled += syncPoint.toLoad;
|
2078
1782
|
forwardInputProgress = syncPoint.toLoad>0;
|
2079
1783
|
}
|
2080
|
-
|
2081
|
-
|
1784
|
+
}
|
1785
|
+
if ((input->pos < input->size) && (endOp == ZSTD_e_end)) {
|
1786
|
+
/* Can't end yet because the input is not fully consumed.
|
1787
|
+
* We are in one of these cases:
|
1788
|
+
* - mtctx->inBuff is NULL & empty: we couldn't get an input buffer so don't create a new job.
|
1789
|
+
* - We filled the input buffer: flush this job but don't end the frame.
|
1790
|
+
* - We hit a synchronization point: flush this job but don't end the frame.
|
1791
|
+
*/
|
1792
|
+
assert(mtctx->inBuff.filled == 0 || mtctx->inBuff.filled == mtctx->targetSectionSize || mtctx->params.rsyncable);
|
1793
|
+
endOp = ZSTD_e_flush;
|
2082
1794
|
}
|
2083
1795
|
|
2084
1796
|
if ( (mtctx->jobReady)
|
@@ -2097,47 +1809,3 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
|
2097
1809
|
return remainingToFlush;
|
2098
1810
|
}
|
2099
1811
|
}
|
2100
|
-
|
2101
|
-
|
2102
|
-
size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
|
2103
|
-
{
|
2104
|
-
FORWARD_IF_ERROR( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) , "");
|
2105
|
-
|
2106
|
-
/* recommended next input size : fill current input buffer */
|
2107
|
-
return mtctx->targetSectionSize - mtctx->inBuff.filled; /* note : could be zero when input buffer is fully filled and no more availability to create new job */
|
2108
|
-
}
|
2109
|
-
|
2110
|
-
|
2111
|
-
static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_EndDirective endFrame)
|
2112
|
-
{
|
2113
|
-
size_t const srcSize = mtctx->inBuff.filled;
|
2114
|
-
DEBUGLOG(5, "ZSTDMT_flushStream_internal");
|
2115
|
-
|
2116
|
-
if ( mtctx->jobReady /* one job ready for a worker to pick up */
|
2117
|
-
|| (srcSize > 0) /* still some data within input buffer */
|
2118
|
-
|| ((endFrame==ZSTD_e_end) && !mtctx->frameEnded)) { /* need a last 0-size block to end frame */
|
2119
|
-
DEBUGLOG(5, "ZSTDMT_flushStream_internal : create a new job (%u bytes, end:%u)",
|
2120
|
-
(U32)srcSize, (U32)endFrame);
|
2121
|
-
FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) , "");
|
2122
|
-
}
|
2123
|
-
|
2124
|
-
/* check if there is any data available to flush */
|
2125
|
-
return ZSTDMT_flushProduced(mtctx, output, 1 /* blockToFlush */, endFrame);
|
2126
|
-
}
|
2127
|
-
|
2128
|
-
|
2129
|
-
size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output)
|
2130
|
-
{
|
2131
|
-
DEBUGLOG(5, "ZSTDMT_flushStream");
|
2132
|
-
if (mtctx->singleBlockingThread)
|
2133
|
-
return ZSTD_flushStream(mtctx->cctxPool->cctx[0], output);
|
2134
|
-
return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_flush);
|
2135
|
-
}
|
2136
|
-
|
2137
|
-
size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output)
|
2138
|
-
{
|
2139
|
-
DEBUGLOG(4, "ZSTDMT_endStream");
|
2140
|
-
if (mtctx->singleBlockingThread)
|
2141
|
-
return ZSTD_endStream(mtctx->cctxPool->cctx[0], output);
|
2142
|
-
return ZSTDMT_flushStream_internal(mtctx, output, ZSTD_e_end);
|
2143
|
-
}
|