zstdlib 0.8.0-x86-mingw32 → 0.9.0-x86-mingw32
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGES.md +10 -0
- data/README.md +7 -1
- data/Rakefile +38 -8
- data/ext/{zstdlib → zstdlib_c}/extconf.rb +10 -5
- data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.2/zstdlib.c +2 -2
- data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.3/zstdlib.c +2 -2
- data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.4/zstdlib.c +2 -2
- data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.5/zstdlib.c +2 -2
- data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.6/zstdlib.c +2 -2
- data/ext/{zstdlib → zstdlib_c}/ruby/zlib-2.7/zstdlib.c +2 -2
- data/ext/{zstdlib → zstdlib_c}/ruby/zlib-3.0/zstdlib.c +2 -2
- data/ext/zstdlib_c/ruby/zlib-3.1/zstdlib.c +5076 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/adler32.c +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/compress.c +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/crc32.c +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/crc32.h +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/deflate.c +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/deflate.h +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/gzclose.c +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/gzguts.h +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/gzlib.c +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/gzread.c +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/gzwrite.c +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/infback.c +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/inffast.c +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/inffast.h +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/inffixed.h +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/inflate.c +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/inflate.h +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/inftrees.c +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/inftrees.h +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/trees.c +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/trees.h +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/uncompr.c +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/zconf.h +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/zlib.h +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/zutil.c +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib-1.2.11/zutil.h +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlib.mk +0 -0
- data/ext/{zstdlib → zstdlib_c}/zlibwrapper/zlibwrapper.c +1 -5
- data/ext/{zstdlib → zstdlib_c}/zlibwrapper.mk +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/common/bitstream.h +24 -9
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/common/compiler.h +89 -43
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/common/cpu.h +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/common/debug.c +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/common/debug.h +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/common/entropy_common.c +11 -5
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/common/error_private.c +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/common/error_private.h +79 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/common/fse.h +2 -1
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/common/fse_decompress.c +1 -1
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/common/huf.h +24 -22
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/common/mem.h +18 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/common/pool.c +11 -6
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/common/pool.h +2 -2
- data/ext/zstdlib_c/zstd-1.5.2/lib/common/portability_macros.h +137 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/common/threading.c +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/common/threading.h +0 -0
- data/ext/zstdlib_c/zstd-1.5.2/lib/common/xxhash.c +24 -0
- data/ext/zstdlib_c/zstd-1.5.2/lib/common/xxhash.h +5686 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/common/zstd_common.c +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/common/zstd_deps.h +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/common/zstd_internal.h +95 -92
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/common/zstd_trace.h +12 -3
- data/ext/zstdlib_c/zstd-1.5.2/lib/compress/clevels.h +134 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/fse_compress.c +63 -27
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/hist.c +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/hist.h +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/huf_compress.c +537 -104
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress.c +307 -373
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_internal.h +174 -83
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_literals.c +4 -3
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_literals.h +3 -1
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_sequences.c +15 -14
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_sequences.h +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_superblock.c +4 -3
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_compress_superblock.h +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_cwksp.h +41 -27
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_double_fast.c +295 -120
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_double_fast.h +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_fast.c +309 -130
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_fast.h +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_lazy.c +482 -562
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_lazy.h +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_ldm.c +9 -7
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_ldm.h +1 -1
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_ldm_geartab.h +4 -1
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_opt.c +249 -148
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/zstd_opt.h +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/zstdmt_compress.c +76 -38
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/compress/zstdmt_compress.h +4 -1
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/decompress/huf_decompress.c +727 -189
- data/ext/zstdlib_c/zstd-1.5.2/lib/decompress/huf_decompress_amd64.S +585 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_ddict.c +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_ddict.h +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_decompress.c +85 -22
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_decompress_block.c +744 -220
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_decompress_block.h +8 -2
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/decompress/zstd_decompress_internal.h +34 -3
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/zdict.h +4 -4
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/zstd.h +179 -136
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/lib/zstd_errors.h +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzclose.c +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzcompatibility.h +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzguts.h +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzlib.c +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzread.c +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/zlibWrapper/gzwrite.c +0 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/zlibWrapper/zstd_zlibwrapper.c +7 -0
- data/ext/{zstdlib/zstd-1.5.0 → zstdlib_c/zstd-1.5.2}/zlibWrapper/zstd_zlibwrapper.h +0 -0
- data/ext/zstdlib_c/zstd.mk +15 -0
- data/lib/2.4/zstdlib_c.so +0 -0
- data/lib/2.5/zstdlib_c.so +0 -0
- data/lib/2.6/zstdlib_c.so +0 -0
- data/lib/2.7/zstdlib_c.so +0 -0
- data/lib/3.0/zstdlib_c.so +0 -0
- data/lib/3.1/zstdlib_c.so +0 -0
- data/lib/zstdlib.rb +2 -2
- metadata +125 -121
- data/ext/zstdlib/zstd-1.5.0/lib/common/xxhash.c +0 -824
- data/ext/zstdlib/zstd-1.5.0/lib/common/xxhash.h +0 -285
- data/ext/zstdlib/zstd.mk +0 -14
- data/lib/2.2/zstdlib.so +0 -0
- data/lib/2.3/zstdlib.so +0 -0
- data/lib/2.4/zstdlib.so +0 -0
- data/lib/2.5/zstdlib.so +0 -0
- data/lib/2.6/zstdlib.so +0 -0
- data/lib/2.7/zstdlib.so +0 -0
File without changes
|
@@ -102,9 +102,8 @@ typedef struct ZSTDMT_bufferPool_s {
|
|
102
102
|
buffer_t bTable[1]; /* variable size */
|
103
103
|
} ZSTDMT_bufferPool;
|
104
104
|
|
105
|
-
static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned
|
105
|
+
static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned maxNbBuffers, ZSTD_customMem cMem)
|
106
106
|
{
|
107
|
-
unsigned const maxNbBuffers = 2*nbWorkers + 3;
|
108
107
|
ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_customCalloc(
|
109
108
|
sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem);
|
110
109
|
if (bufPool==NULL) return NULL;
|
@@ -160,9 +159,8 @@ static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const
|
|
160
159
|
}
|
161
160
|
|
162
161
|
|
163
|
-
static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool,
|
162
|
+
static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, unsigned maxNbBuffers)
|
164
163
|
{
|
165
|
-
unsigned const maxNbBuffers = 2*nbWorkers + 3;
|
166
164
|
if (srcBufPool==NULL) return NULL;
|
167
165
|
if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */
|
168
166
|
return srcBufPool;
|
@@ -171,7 +169,7 @@ static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool,
|
|
171
169
|
size_t const bSize = srcBufPool->bufferSize; /* forward parameters */
|
172
170
|
ZSTDMT_bufferPool* newBufPool;
|
173
171
|
ZSTDMT_freeBufferPool(srcBufPool);
|
174
|
-
newBufPool = ZSTDMT_createBufferPool(
|
172
|
+
newBufPool = ZSTDMT_createBufferPool(maxNbBuffers, cMem);
|
175
173
|
if (newBufPool==NULL) return newBufPool;
|
176
174
|
ZSTDMT_setBufferSize(newBufPool, bSize);
|
177
175
|
return newBufPool;
|
@@ -263,6 +261,16 @@ static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
|
|
263
261
|
ZSTD_customFree(buf.start, bufPool->cMem);
|
264
262
|
}
|
265
263
|
|
264
|
+
/* We need 2 output buffers per worker since each dstBuff must be flushed after it is released.
|
265
|
+
* The 3 additional buffers are as follows:
|
266
|
+
* 1 buffer for input loading
|
267
|
+
* 1 buffer for "next input" when submitting current one
|
268
|
+
* 1 buffer stuck in queue */
|
269
|
+
#define BUF_POOL_MAX_NB_BUFFERS(nbWorkers) 2*nbWorkers + 3
|
270
|
+
|
271
|
+
/* After a worker releases its rawSeqStore, it is immediately ready for reuse.
|
272
|
+
* So we only need one seq buffer per worker. */
|
273
|
+
#define SEQ_POOL_MAX_NB_BUFFERS(nbWorkers) nbWorkers
|
266
274
|
|
267
275
|
/* ===== Seq Pool Wrapper ====== */
|
268
276
|
|
@@ -316,7 +324,7 @@ static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq)
|
|
316
324
|
|
317
325
|
static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem)
|
318
326
|
{
|
319
|
-
ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
|
327
|
+
ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(SEQ_POOL_MAX_NB_BUFFERS(nbWorkers), cMem);
|
320
328
|
if (seqPool == NULL) return NULL;
|
321
329
|
ZSTDMT_setNbSeq(seqPool, 0);
|
322
330
|
return seqPool;
|
@@ -329,7 +337,7 @@ static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool)
|
|
329
337
|
|
330
338
|
static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers)
|
331
339
|
{
|
332
|
-
return ZSTDMT_expandBufferPool(pool, nbWorkers);
|
340
|
+
return ZSTDMT_expandBufferPool(pool, SEQ_POOL_MAX_NB_BUFFERS(nbWorkers));
|
333
341
|
}
|
334
342
|
|
335
343
|
|
@@ -467,7 +475,7 @@ ZSTDMT_serialState_reset(serialState_t* serialState,
|
|
467
475
|
ZSTD_dictContentType_e dictContentType)
|
468
476
|
{
|
469
477
|
/* Adjust parameters */
|
470
|
-
if (params.ldmParams.enableLdm) {
|
478
|
+
if (params.ldmParams.enableLdm == ZSTD_ps_enable) {
|
471
479
|
DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10);
|
472
480
|
ZSTD_ldm_adjustParameters(¶ms.ldmParams, ¶ms.cParams);
|
473
481
|
assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
|
@@ -478,7 +486,7 @@ ZSTDMT_serialState_reset(serialState_t* serialState,
|
|
478
486
|
serialState->nextJobID = 0;
|
479
487
|
if (params.fParams.checksumFlag)
|
480
488
|
XXH64_reset(&serialState->xxhState, 0);
|
481
|
-
if (params.ldmParams.enableLdm) {
|
489
|
+
if (params.ldmParams.enableLdm == ZSTD_ps_enable) {
|
482
490
|
ZSTD_customMem cMem = params.customMem;
|
483
491
|
unsigned const hashLog = params.ldmParams.hashLog;
|
484
492
|
size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t);
|
@@ -564,7 +572,7 @@ static void ZSTDMT_serialState_update(serialState_t* serialState,
|
|
564
572
|
/* A future job may error and skip our job */
|
565
573
|
if (serialState->nextJobID == jobID) {
|
566
574
|
/* It is now our turn, do any processing necessary */
|
567
|
-
if (serialState->params.ldmParams.enableLdm) {
|
575
|
+
if (serialState->params.ldmParams.enableLdm == ZSTD_ps_enable) {
|
568
576
|
size_t error;
|
569
577
|
assert(seqStore.seq != NULL && seqStore.pos == 0 &&
|
570
578
|
seqStore.size == 0 && seqStore.capacity > 0);
|
@@ -594,7 +602,7 @@ static void ZSTDMT_serialState_update(serialState_t* serialState,
|
|
594
602
|
if (seqStore.size > 0) {
|
595
603
|
size_t const err = ZSTD_referenceExternalSequences(
|
596
604
|
jobCCtx, seqStore.seq, seqStore.size);
|
597
|
-
assert(serialState->params.ldmParams.enableLdm);
|
605
|
+
assert(serialState->params.ldmParams.enableLdm == ZSTD_ps_enable);
|
598
606
|
assert(!ZSTD_isError(err));
|
599
607
|
(void)err;
|
600
608
|
}
|
@@ -672,7 +680,7 @@ static void ZSTDMT_compressionJob(void* jobDescription)
|
|
672
680
|
if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation));
|
673
681
|
job->dstBuff = dstBuff; /* this value can be read in ZSTDMT_flush, when it copies the whole job */
|
674
682
|
}
|
675
|
-
if (jobParams.ldmParams.enableLdm && rawSeqStore.seq == NULL)
|
683
|
+
if (jobParams.ldmParams.enableLdm == ZSTD_ps_enable && rawSeqStore.seq == NULL)
|
676
684
|
JOB_ERROR(ERROR(memory_allocation));
|
677
685
|
|
678
686
|
/* Don't compute the checksum for chunks, since we compute it externally,
|
@@ -680,7 +688,7 @@ static void ZSTDMT_compressionJob(void* jobDescription)
|
|
680
688
|
*/
|
681
689
|
if (job->jobID != 0) jobParams.fParams.checksumFlag = 0;
|
682
690
|
/* Don't run LDM for the chunks, since we handle it externally */
|
683
|
-
jobParams.ldmParams.enableLdm =
|
691
|
+
jobParams.ldmParams.enableLdm = ZSTD_ps_disable;
|
684
692
|
/* Correct nbWorkers to 0. */
|
685
693
|
jobParams.nbWorkers = 0;
|
686
694
|
|
@@ -807,6 +815,15 @@ typedef struct {
|
|
807
815
|
static const roundBuff_t kNullRoundBuff = {NULL, 0, 0};
|
808
816
|
|
809
817
|
#define RSYNC_LENGTH 32
|
818
|
+
/* Don't create chunks smaller than the zstd block size.
|
819
|
+
* This stops us from regressing compression ratio too much,
|
820
|
+
* and ensures our output fits in ZSTD_compressBound().
|
821
|
+
*
|
822
|
+
* If this is shrunk < ZSTD_BLOCKSIZELOG_MIN then
|
823
|
+
* ZSTD_COMPRESSBOUND() will need to be updated.
|
824
|
+
*/
|
825
|
+
#define RSYNC_MIN_BLOCK_LOG ZSTD_BLOCKSIZELOG_MAX
|
826
|
+
#define RSYNC_MIN_BLOCK_SIZE (1<<RSYNC_MIN_BLOCK_LOG)
|
810
827
|
|
811
828
|
typedef struct {
|
812
829
|
U64 hash;
|
@@ -927,7 +944,7 @@ MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers,
|
|
927
944
|
mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem);
|
928
945
|
assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0); /* ensure nbJobs is a power of 2 */
|
929
946
|
mtctx->jobIDMask = nbJobs - 1;
|
930
|
-
mtctx->bufPool = ZSTDMT_createBufferPool(nbWorkers, cMem);
|
947
|
+
mtctx->bufPool = ZSTDMT_createBufferPool(BUF_POOL_MAX_NB_BUFFERS(nbWorkers), cMem);
|
931
948
|
mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem);
|
932
949
|
mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem);
|
933
950
|
initError = ZSTDMT_serialState_init(&mtctx->serial);
|
@@ -1030,7 +1047,7 @@ static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
|
|
1030
1047
|
{
|
1031
1048
|
if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation);
|
1032
1049
|
FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) , "");
|
1033
|
-
mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers);
|
1050
|
+
mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, BUF_POOL_MAX_NB_BUFFERS(nbWorkers));
|
1034
1051
|
if (mtctx->bufPool == NULL) return ERROR(memory_allocation);
|
1035
1052
|
mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers);
|
1036
1053
|
if (mtctx->cctxPool == NULL) return ERROR(memory_allocation);
|
@@ -1135,7 +1152,7 @@ size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx)
|
|
1135
1152
|
static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params)
|
1136
1153
|
{
|
1137
1154
|
unsigned jobLog;
|
1138
|
-
if (params->ldmParams.enableLdm) {
|
1155
|
+
if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
|
1139
1156
|
/* In Long Range Mode, the windowLog is typically oversized.
|
1140
1157
|
* In which case, it's preferable to determine the jobSize
|
1141
1158
|
* based on cycleLog instead. */
|
@@ -1179,7 +1196,7 @@ static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params)
|
|
1179
1196
|
int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy);
|
1180
1197
|
int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog);
|
1181
1198
|
assert(0 <= overlapRLog && overlapRLog <= 8);
|
1182
|
-
if (params->ldmParams.enableLdm) {
|
1199
|
+
if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
|
1183
1200
|
/* In Long Range Mode, the windowLog is typically oversized.
|
1184
1201
|
* In which case, it's preferable to determine the jobSize
|
1185
1202
|
* based on chainLog instead.
|
@@ -1252,6 +1269,9 @@ size_t ZSTDMT_initCStream_internal(
|
|
1252
1269
|
/* Aim for the targetsectionSize as the average job size. */
|
1253
1270
|
U32 const jobSizeKB = (U32)(mtctx->targetSectionSize >> 10);
|
1254
1271
|
U32 const rsyncBits = (assert(jobSizeKB >= 1), ZSTD_highbit32(jobSizeKB) + 10);
|
1272
|
+
/* We refuse to create jobs < RSYNC_MIN_BLOCK_SIZE bytes, so make sure our
|
1273
|
+
* expected job size is at least 4x larger. */
|
1274
|
+
assert(rsyncBits >= RSYNC_MIN_BLOCK_LOG + 2);
|
1255
1275
|
DEBUGLOG(4, "rsyncLog = %u", rsyncBits);
|
1256
1276
|
mtctx->rsync.hash = 0;
|
1257
1277
|
mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1;
|
@@ -1263,7 +1283,7 @@ size_t ZSTDMT_initCStream_internal(
|
|
1263
1283
|
ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize));
|
1264
1284
|
{
|
1265
1285
|
/* If ldm is enabled we need windowSize space. */
|
1266
|
-
size_t const windowSize = mtctx->params.ldmParams.enableLdm ? (1U << mtctx->params.cParams.windowLog) : 0;
|
1286
|
+
size_t const windowSize = mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable ? (1U << mtctx->params.cParams.windowLog) : 0;
|
1267
1287
|
/* Two buffers of slack, plus extra space for the overlap
|
1268
1288
|
* This is the minimum slack that LDM works with. One extra because
|
1269
1289
|
* flush might waste up to targetSectionSize-1 bytes. Another extra
|
@@ -1538,17 +1558,21 @@ static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx)
|
|
1538
1558
|
static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range)
|
1539
1559
|
{
|
1540
1560
|
BYTE const* const bufferStart = (BYTE const*)buffer.start;
|
1541
|
-
BYTE const* const bufferEnd = bufferStart + buffer.capacity;
|
1542
1561
|
BYTE const* const rangeStart = (BYTE const*)range.start;
|
1543
|
-
BYTE const* const rangeEnd = range.size != 0 ? rangeStart + range.size : rangeStart;
|
1544
1562
|
|
1545
1563
|
if (rangeStart == NULL || bufferStart == NULL)
|
1546
1564
|
return 0;
|
1547
|
-
/* Empty ranges cannot overlap */
|
1548
|
-
if (bufferStart == bufferEnd || rangeStart == rangeEnd)
|
1549
|
-
return 0;
|
1550
1565
|
|
1551
|
-
|
1566
|
+
{
|
1567
|
+
BYTE const* const bufferEnd = bufferStart + buffer.capacity;
|
1568
|
+
BYTE const* const rangeEnd = rangeStart + range.size;
|
1569
|
+
|
1570
|
+
/* Empty ranges cannot overlap */
|
1571
|
+
if (bufferStart == bufferEnd || rangeStart == rangeEnd)
|
1572
|
+
return 0;
|
1573
|
+
|
1574
|
+
return bufferStart < rangeEnd && rangeStart < bufferEnd;
|
1575
|
+
}
|
1552
1576
|
}
|
1553
1577
|
|
1554
1578
|
static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window)
|
@@ -1575,7 +1599,7 @@ static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window)
|
|
1575
1599
|
|
1576
1600
|
static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer)
|
1577
1601
|
{
|
1578
|
-
if (mtctx->params.ldmParams.enableLdm) {
|
1602
|
+
if (mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable) {
|
1579
1603
|
ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex;
|
1580
1604
|
DEBUGLOG(5, "ZSTDMT_waitForLdmComplete");
|
1581
1605
|
DEBUGLOG(5, "source [0x%zx, 0x%zx)",
|
@@ -1678,6 +1702,11 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)
|
|
1678
1702
|
if (!mtctx->params.rsyncable)
|
1679
1703
|
/* Rsync is disabled. */
|
1680
1704
|
return syncPoint;
|
1705
|
+
if (mtctx->inBuff.filled + input.size - input.pos < RSYNC_MIN_BLOCK_SIZE)
|
1706
|
+
/* We don't emit synchronization points if it would produce too small blocks.
|
1707
|
+
* We don't have enough input to find a synchronization point, so don't look.
|
1708
|
+
*/
|
1709
|
+
return syncPoint;
|
1681
1710
|
if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH)
|
1682
1711
|
/* Not enough to compute the hash.
|
1683
1712
|
* We will miss any synchronization points in this RSYNC_LENGTH byte
|
@@ -1688,10 +1717,28 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)
|
|
1688
1717
|
*/
|
1689
1718
|
return syncPoint;
|
1690
1719
|
/* Initialize the loop variables. */
|
1691
|
-
if (mtctx->inBuff.filled
|
1692
|
-
/* We
|
1720
|
+
if (mtctx->inBuff.filled < RSYNC_MIN_BLOCK_SIZE) {
|
1721
|
+
/* We don't need to scan the first RSYNC_MIN_BLOCK_SIZE positions
|
1722
|
+
* because they can't possibly be a sync point. So we can start
|
1723
|
+
* part way through the input buffer.
|
1724
|
+
*/
|
1725
|
+
pos = RSYNC_MIN_BLOCK_SIZE - mtctx->inBuff.filled;
|
1726
|
+
if (pos >= RSYNC_LENGTH) {
|
1727
|
+
prev = istart + pos - RSYNC_LENGTH;
|
1728
|
+
hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
|
1729
|
+
} else {
|
1730
|
+
assert(mtctx->inBuff.filled >= RSYNC_LENGTH);
|
1731
|
+
prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
|
1732
|
+
hash = ZSTD_rollingHash_compute(prev + pos, (RSYNC_LENGTH - pos));
|
1733
|
+
hash = ZSTD_rollingHash_append(hash, istart, pos);
|
1734
|
+
}
|
1735
|
+
} else {
|
1736
|
+
/* We have enough bytes buffered to initialize the hash,
|
1737
|
+
* and are have processed enough bytes to find a sync point.
|
1693
1738
|
* Start scanning at the beginning of the input.
|
1694
1739
|
*/
|
1740
|
+
assert(mtctx->inBuff.filled >= RSYNC_MIN_BLOCK_SIZE);
|
1741
|
+
assert(RSYNC_MIN_BLOCK_SIZE >= RSYNC_LENGTH);
|
1695
1742
|
pos = 0;
|
1696
1743
|
prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
|
1697
1744
|
hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
|
@@ -1705,16 +1752,6 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)
|
|
1705
1752
|
syncPoint.flush = 1;
|
1706
1753
|
return syncPoint;
|
1707
1754
|
}
|
1708
|
-
} else {
|
1709
|
-
/* We don't have enough bytes buffered to initialize the hash, but
|
1710
|
-
* we know we have at least RSYNC_LENGTH bytes total.
|
1711
|
-
* Start scanning after the first RSYNC_LENGTH bytes less the bytes
|
1712
|
-
* already buffered.
|
1713
|
-
*/
|
1714
|
-
pos = RSYNC_LENGTH - mtctx->inBuff.filled;
|
1715
|
-
prev = (BYTE const*)mtctx->inBuff.buffer.start - pos;
|
1716
|
-
hash = ZSTD_rollingHash_compute(mtctx->inBuff.buffer.start, mtctx->inBuff.filled);
|
1717
|
-
hash = ZSTD_rollingHash_append(hash, istart, pos);
|
1718
1755
|
}
|
1719
1756
|
/* Starting with the hash of the previous RSYNC_LENGTH bytes, roll
|
1720
1757
|
* through the input. If we hit a synchronization point, then cut the
|
@@ -1726,8 +1763,9 @@ findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)
|
|
1726
1763
|
*/
|
1727
1764
|
for (; pos < syncPoint.toLoad; ++pos) {
|
1728
1765
|
BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH];
|
1729
|
-
|
1766
|
+
assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash);
|
1730
1767
|
hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower);
|
1768
|
+
assert(mtctx->inBuff.filled + pos >= RSYNC_MIN_BLOCK_SIZE);
|
1731
1769
|
if ((hash & hitMask) == hitMask) {
|
1732
1770
|
syncPoint.toLoad = pos + 1;
|
1733
1771
|
syncPoint.flush = 1;
|
@@ -65,8 +65,11 @@ size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx);
|
|
65
65
|
* Private use only. Init streaming operation.
|
66
66
|
* expects params to be valid.
|
67
67
|
* must receive dict, or cdict, or none, but not both.
|
68
|
+
* mtctx can be freshly constructed or reused from a prior compression.
|
69
|
+
* If mtctx is reused, memory allocations from the prior compression may not be freed,
|
70
|
+
* even if they are not needed for the current compression.
|
68
71
|
* @return : 0, or an error code */
|
69
|
-
size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx*
|
72
|
+
size_t ZSTDMT_initCStream_internal(ZSTDMT_CCtx* mtctx,
|
70
73
|
const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
|
71
74
|
const ZSTD_CDict* cdict,
|
72
75
|
ZSTD_CCtx_params params, unsigned long long pledgedSrcSize);
|