zstd-ruby 1.3.8.0 → 1.4.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.travis.yml +6 -5
- data/README.md +1 -1
- data/ext/zstdruby/libzstd/Makefile +7 -3
- data/ext/zstdruby/libzstd/README.md +4 -2
- data/ext/zstdruby/libzstd/common/compiler.h +1 -1
- data/ext/zstdruby/libzstd/common/fse.h +1 -1
- data/ext/zstdruby/libzstd/common/threading.c +2 -2
- data/ext/zstdruby/libzstd/common/xxhash.c +2 -2
- data/ext/zstdruby/libzstd/common/zstd_internal.h +55 -2
- data/ext/zstdruby/libzstd/compress/fse_compress.c +2 -2
- data/ext/zstdruby/libzstd/compress/zstd_compress.c +423 -296
- data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +14 -11
- data/ext/zstdruby/libzstd/compress/zstd_fast.c +203 -124
- data/ext/zstdruby/libzstd/compress/zstd_lazy.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_ldm.c +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_opt.c +27 -11
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +41 -49
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +43 -26
- data/ext/zstdruby/libzstd/decompress/zstd_ddict.c +4 -4
- data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +257 -164
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +51 -47
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +7 -0
- data/ext/zstdruby/libzstd/dictBuilder/cover.c +58 -13
- data/ext/zstdruby/libzstd/dictBuilder/cover.h +29 -0
- data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +25 -13
- data/ext/zstdruby/libzstd/dictBuilder/zdict.h +18 -8
- data/ext/zstdruby/libzstd/dll/example/build_package.bat +3 -2
- data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +42 -12
- data/ext/zstdruby/libzstd/legacy/zstd_v01.c +32 -7
- data/ext/zstdruby/libzstd/legacy/zstd_v01.h +12 -7
- data/ext/zstdruby/libzstd/legacy/zstd_v02.c +31 -12
- data/ext/zstdruby/libzstd/legacy/zstd_v02.h +12 -7
- data/ext/zstdruby/libzstd/legacy/zstd_v03.c +32 -12
- data/ext/zstdruby/libzstd/legacy/zstd_v03.h +12 -7
- data/ext/zstdruby/libzstd/legacy/zstd_v04.c +32 -12
- data/ext/zstdruby/libzstd/legacy/zstd_v04.h +12 -7
- data/ext/zstdruby/libzstd/legacy/zstd_v05.c +32 -7
- data/ext/zstdruby/libzstd/legacy/zstd_v05.h +12 -7
- data/ext/zstdruby/libzstd/legacy/zstd_v06.c +36 -8
- data/ext/zstdruby/libzstd/legacy/zstd_v06.h +10 -5
- data/ext/zstdruby/libzstd/legacy/zstd_v07.c +40 -9
- data/ext/zstdruby/libzstd/legacy/zstd_v07.h +10 -5
- data/ext/zstdruby/libzstd/zstd.h +689 -542
- data/lib/zstd-ruby/version.rb +1 -1
- data/zstd-ruby.gemspec +1 -1
- metadata +6 -7
- data/ext/zstdruby/libzstd/dll/libzstd.def +0 -87
@@ -19,7 +19,7 @@ extern "C" {
|
|
19
19
|
|
20
20
|
U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
|
21
21
|
|
22
|
-
void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex().
|
22
|
+
void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
|
23
23
|
|
24
24
|
size_t ZSTD_compressBlock_btlazy2(
|
25
25
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
@@ -429,7 +429,7 @@ size_t ZSTD_ldm_generateSequences(
|
|
429
429
|
*/
|
430
430
|
assert(ldmState->window.nextSrc >= (BYTE const*)src + srcSize);
|
431
431
|
/* The input could be very large (in zstdmt), so it must be broken up into
|
432
|
-
* chunks to enforce the
|
432
|
+
* chunks to enforce the maximum distance and handle overflow correction.
|
433
433
|
*/
|
434
434
|
assert(sequences->pos <= sequences->size);
|
435
435
|
assert(sequences->size <= sequences->capacity);
|
@@ -64,9 +64,15 @@ MEM_STATIC double ZSTD_fCost(U32 price)
|
|
64
64
|
}
|
65
65
|
#endif
|
66
66
|
|
67
|
+
static int ZSTD_compressedLiterals(optState_t const* const optPtr)
|
68
|
+
{
|
69
|
+
return optPtr->literalCompressionMode != ZSTD_lcm_uncompressed;
|
70
|
+
}
|
71
|
+
|
67
72
|
static void ZSTD_setBasePrices(optState_t* optPtr, int optLevel)
|
68
73
|
{
|
69
|
-
|
74
|
+
if (ZSTD_compressedLiterals(optPtr))
|
75
|
+
optPtr->litSumBasePrice = WEIGHT(optPtr->litSum, optLevel);
|
70
76
|
optPtr->litLengthSumBasePrice = WEIGHT(optPtr->litLengthSum, optLevel);
|
71
77
|
optPtr->matchLengthSumBasePrice = WEIGHT(optPtr->matchLengthSum, optLevel);
|
72
78
|
optPtr->offCodeSumBasePrice = WEIGHT(optPtr->offCodeSum, optLevel);
|
@@ -99,6 +105,7 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
|
|
99
105
|
const BYTE* const src, size_t const srcSize,
|
100
106
|
int const optLevel)
|
101
107
|
{
|
108
|
+
int const compressedLiterals = ZSTD_compressedLiterals(optPtr);
|
102
109
|
DEBUGLOG(5, "ZSTD_rescaleFreqs (srcSize=%u)", (unsigned)srcSize);
|
103
110
|
optPtr->priceType = zop_dynamic;
|
104
111
|
|
@@ -113,9 +120,10 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
|
|
113
120
|
/* huffman table presumed generated by dictionary */
|
114
121
|
optPtr->priceType = zop_dynamic;
|
115
122
|
|
116
|
-
|
117
|
-
|
118
|
-
|
123
|
+
if (compressedLiterals) {
|
124
|
+
unsigned lit;
|
125
|
+
assert(optPtr->litFreq != NULL);
|
126
|
+
optPtr->litSum = 0;
|
119
127
|
for (lit=0; lit<=MaxLit; lit++) {
|
120
128
|
U32 const scaleLog = 11; /* scale to 2K */
|
121
129
|
U32 const bitCost = HUF_getNbBits(optPtr->symbolCosts->huf.CTable, lit);
|
@@ -163,10 +171,11 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
|
|
163
171
|
} else { /* not a dictionary */
|
164
172
|
|
165
173
|
assert(optPtr->litFreq != NULL);
|
166
|
-
|
174
|
+
if (compressedLiterals) {
|
175
|
+
unsigned lit = MaxLit;
|
167
176
|
HIST_count_simple(optPtr->litFreq, &lit, src, srcSize); /* use raw first block to init statistics */
|
177
|
+
optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
|
168
178
|
}
|
169
|
-
optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
|
170
179
|
|
171
180
|
{ unsigned ll;
|
172
181
|
for (ll=0; ll<=MaxLL; ll++)
|
@@ -190,7 +199,8 @@ ZSTD_rescaleFreqs(optState_t* const optPtr,
|
|
190
199
|
|
191
200
|
} else { /* new block : re-use previous statistics, scaled down */
|
192
201
|
|
193
|
-
|
202
|
+
if (compressedLiterals)
|
203
|
+
optPtr->litSum = ZSTD_downscaleStat(optPtr->litFreq, MaxLit, 1);
|
194
204
|
optPtr->litLengthSum = ZSTD_downscaleStat(optPtr->litLengthFreq, MaxLL, 0);
|
195
205
|
optPtr->matchLengthSum = ZSTD_downscaleStat(optPtr->matchLengthFreq, MaxML, 0);
|
196
206
|
optPtr->offCodeSum = ZSTD_downscaleStat(optPtr->offCodeFreq, MaxOff, 0);
|
@@ -207,6 +217,10 @@ static U32 ZSTD_rawLiteralsCost(const BYTE* const literals, U32 const litLength,
|
|
207
217
|
int optLevel)
|
208
218
|
{
|
209
219
|
if (litLength == 0) return 0;
|
220
|
+
|
221
|
+
if (!ZSTD_compressedLiterals(optPtr))
|
222
|
+
return (litLength << 3) * BITCOST_MULTIPLIER; /* Uncompressed - 8 bytes per literal. */
|
223
|
+
|
210
224
|
if (optPtr->priceType == zop_predef)
|
211
225
|
return (litLength*6) * BITCOST_MULTIPLIER; /* 6 bit per literal - no statistic used */
|
212
226
|
|
@@ -310,7 +324,8 @@ static void ZSTD_updateStats(optState_t* const optPtr,
|
|
310
324
|
U32 offsetCode, U32 matchLength)
|
311
325
|
{
|
312
326
|
/* literals */
|
313
|
-
{
|
327
|
+
if (ZSTD_compressedLiterals(optPtr)) {
|
328
|
+
U32 u;
|
314
329
|
for (u=0; u < litLength; u++)
|
315
330
|
optPtr->litFreq[literals[u]] += ZSTD_LITFREQ_ADD;
|
316
331
|
optPtr->litSum += litLength*ZSTD_LITFREQ_ADD;
|
@@ -870,7 +885,7 @@ ZSTD_compressBlock_opt_generic(ZSTD_matchState_t* ms,
|
|
870
885
|
/* large match -> immediate encoding */
|
871
886
|
{ U32 const maxML = matches[nbMatches-1].len;
|
872
887
|
U32 const maxOffset = matches[nbMatches-1].off;
|
873
|
-
DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new
|
888
|
+
DEBUGLOG(6, "found %u matches of maxLength=%u and maxOffCode=%u at cPos=%u => start new series",
|
874
889
|
nbMatches, maxML, maxOffset, (U32)(ip-prefixStart));
|
875
890
|
|
876
891
|
if (maxML > sufficient_len) {
|
@@ -1108,7 +1123,8 @@ static U32 ZSTD_upscaleStat(unsigned* table, U32 lastEltIndex, int bonus)
|
|
1108
1123
|
/* used in 2-pass strategy */
|
1109
1124
|
MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr)
|
1110
1125
|
{
|
1111
|
-
|
1126
|
+
if (ZSTD_compressedLiterals(optPtr))
|
1127
|
+
optPtr->litSum = ZSTD_upscaleStat(optPtr->litFreq, MaxLit, 0);
|
1112
1128
|
optPtr->litLengthSum = ZSTD_upscaleStat(optPtr->litLengthFreq, MaxLL, 0);
|
1113
1129
|
optPtr->matchLengthSum = ZSTD_upscaleStat(optPtr->matchLengthFreq, MaxML, 0);
|
1114
1130
|
optPtr->offCodeSum = ZSTD_upscaleStat(optPtr->offCodeFreq, MaxOff, 0);
|
@@ -1117,7 +1133,7 @@ MEM_STATIC void ZSTD_upscaleStats(optState_t* optPtr)
|
|
1117
1133
|
/* ZSTD_initStats_ultra():
|
1118
1134
|
* make a first compression pass, just to seed stats with more accurate starting values.
|
1119
1135
|
* only works on first block, with no dictionary and no ldm.
|
1120
|
-
* this function cannot error, hence its
|
1136
|
+
* this function cannot error, hence its contract must be respected.
|
1121
1137
|
*/
|
1122
1138
|
static void
|
1123
1139
|
ZSTD_initStats_ultra(ZSTD_matchState_t* ms,
|
@@ -22,6 +22,7 @@
|
|
22
22
|
/* ====== Dependencies ====== */
|
23
23
|
#include <string.h> /* memcpy, memset */
|
24
24
|
#include <limits.h> /* INT_MAX, UINT_MAX */
|
25
|
+
#include "mem.h" /* MEM_STATIC */
|
25
26
|
#include "pool.h" /* threadpool */
|
26
27
|
#include "threading.h" /* mutex */
|
27
28
|
#include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
|
@@ -456,7 +457,7 @@ typedef struct {
|
|
456
457
|
* Must be acquired after the main mutex when acquiring both.
|
457
458
|
*/
|
458
459
|
ZSTD_pthread_mutex_t ldmWindowMutex;
|
459
|
-
ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is
|
460
|
+
ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is updated */
|
460
461
|
ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */
|
461
462
|
} serialState_t;
|
462
463
|
|
@@ -647,7 +648,7 @@ static void ZSTDMT_compressionJob(void* jobDescription)
|
|
647
648
|
buffer_t dstBuff = job->dstBuff;
|
648
649
|
size_t lastCBlockSize = 0;
|
649
650
|
|
650
|
-
/*
|
651
|
+
/* resources */
|
651
652
|
if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation));
|
652
653
|
if (dstBuff.start == NULL) { /* streaming job : doesn't provide a dstBuffer */
|
653
654
|
dstBuff = ZSTDMT_getBuffer(job->bufPool);
|
@@ -672,7 +673,7 @@ static void ZSTDMT_compressionJob(void* jobDescription)
|
|
672
673
|
if (ZSTD_isError(initError)) JOB_ERROR(initError);
|
673
674
|
} else { /* srcStart points at reloaded section */
|
674
675
|
U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size;
|
675
|
-
{ size_t const forceWindowError =
|
676
|
+
{ size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);
|
676
677
|
if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError);
|
677
678
|
}
|
678
679
|
{ size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
|
@@ -864,14 +865,10 @@ static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) {
|
|
864
865
|
* Internal use only */
|
865
866
|
size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
|
866
867
|
{
|
867
|
-
|
868
|
-
params->nbWorkers = nbWorkers;
|
869
|
-
params->overlapLog = ZSTDMT_OVERLAPLOG_DEFAULT;
|
870
|
-
params->jobSize = 0;
|
871
|
-
return nbWorkers;
|
868
|
+
return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers);
|
872
869
|
}
|
873
870
|
|
874
|
-
ZSTDMT_CCtx*
|
871
|
+
MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem)
|
875
872
|
{
|
876
873
|
ZSTDMT_CCtx* mtctx;
|
877
874
|
U32 nbJobs = nbWorkers + 2;
|
@@ -906,6 +903,17 @@ ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem)
|
|
906
903
|
return mtctx;
|
907
904
|
}
|
908
905
|
|
906
|
+
ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem)
|
907
|
+
{
|
908
|
+
#ifdef ZSTD_MULTITHREAD
|
909
|
+
return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem);
|
910
|
+
#else
|
911
|
+
(void)nbWorkers;
|
912
|
+
(void)cMem;
|
913
|
+
return NULL;
|
914
|
+
#endif
|
915
|
+
}
|
916
|
+
|
909
917
|
ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers)
|
910
918
|
{
|
911
919
|
return ZSTDMT_createCCtx_advanced(nbWorkers, ZSTD_defaultCMem);
|
@@ -986,26 +994,13 @@ ZSTDMT_CCtxParam_setMTCtxParameter(ZSTD_CCtx_params* params,
|
|
986
994
|
{
|
987
995
|
case ZSTDMT_p_jobSize :
|
988
996
|
DEBUGLOG(4, "ZSTDMT_CCtxParam_setMTCtxParameter : set jobSize to %i", value);
|
989
|
-
|
990
|
-
&& value < ZSTDMT_JOBSIZE_MIN)
|
991
|
-
value = ZSTDMT_JOBSIZE_MIN;
|
992
|
-
assert(value >= 0);
|
993
|
-
if (value > ZSTDMT_JOBSIZE_MAX) value = ZSTDMT_JOBSIZE_MAX;
|
994
|
-
params->jobSize = value;
|
995
|
-
return value;
|
996
|
-
|
997
|
+
return ZSTD_CCtxParams_setParameter(params, ZSTD_c_jobSize, value);
|
997
998
|
case ZSTDMT_p_overlapLog :
|
998
999
|
DEBUGLOG(4, "ZSTDMT_p_overlapLog : %i", value);
|
999
|
-
|
1000
|
-
if (value > ZSTD_OVERLAPLOG_MAX) value = ZSTD_OVERLAPLOG_MAX;
|
1001
|
-
params->overlapLog = value;
|
1002
|
-
return value;
|
1003
|
-
|
1000
|
+
return ZSTD_CCtxParams_setParameter(params, ZSTD_c_overlapLog, value);
|
1004
1001
|
case ZSTDMT_p_rsyncable :
|
1005
|
-
|
1006
|
-
params
|
1007
|
-
return value;
|
1008
|
-
|
1002
|
+
DEBUGLOG(4, "ZSTD_p_rsyncable : %i", value);
|
1003
|
+
return ZSTD_CCtxParams_setParameter(params, ZSTD_c_rsyncable, value);
|
1009
1004
|
default :
|
1010
1005
|
return ERROR(parameter_unsupported);
|
1011
1006
|
}
|
@@ -1021,32 +1016,29 @@ size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter,
|
|
1021
1016
|
{
|
1022
1017
|
switch (parameter) {
|
1023
1018
|
case ZSTDMT_p_jobSize:
|
1024
|
-
|
1025
|
-
*value = (int)(mtctx->params.jobSize);
|
1026
|
-
break;
|
1019
|
+
return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_jobSize, value);
|
1027
1020
|
case ZSTDMT_p_overlapLog:
|
1028
|
-
|
1029
|
-
break;
|
1021
|
+
return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_overlapLog, value);
|
1030
1022
|
case ZSTDMT_p_rsyncable:
|
1031
|
-
|
1032
|
-
break;
|
1023
|
+
return ZSTD_CCtxParams_getParameter(&mtctx->params, ZSTD_c_rsyncable, value);
|
1033
1024
|
default:
|
1034
1025
|
return ERROR(parameter_unsupported);
|
1035
1026
|
}
|
1036
|
-
return 0;
|
1037
1027
|
}
|
1038
1028
|
|
1039
1029
|
/* Sets parameters relevant to the compression job,
|
1040
1030
|
* initializing others to default values. */
|
1041
1031
|
static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
|
1042
1032
|
{
|
1043
|
-
ZSTD_CCtx_params jobParams;
|
1044
|
-
|
1045
|
-
|
1046
|
-
jobParams.
|
1047
|
-
jobParams.
|
1048
|
-
jobParams.
|
1049
|
-
|
1033
|
+
ZSTD_CCtx_params jobParams = params;
|
1034
|
+
/* Clear parameters related to multithreading */
|
1035
|
+
jobParams.forceWindow = 0;
|
1036
|
+
jobParams.nbWorkers = 0;
|
1037
|
+
jobParams.jobSize = 0;
|
1038
|
+
jobParams.overlapLog = 0;
|
1039
|
+
jobParams.rsyncable = 0;
|
1040
|
+
memset(&jobParams.ldmParams, 0, sizeof(ldmParams_t));
|
1041
|
+
memset(&jobParams.customMem, 0, sizeof(ZSTD_customMem));
|
1050
1042
|
return jobParams;
|
1051
1043
|
}
|
1052
1044
|
|
@@ -1056,7 +1048,7 @@ static ZSTD_CCtx_params ZSTDMT_initJobCCtxParams(ZSTD_CCtx_params const params)
|
|
1056
1048
|
static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
|
1057
1049
|
{
|
1058
1050
|
if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation);
|
1059
|
-
|
1051
|
+
FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) );
|
1060
1052
|
mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, nbWorkers);
|
1061
1053
|
if (mtctx->bufPool == NULL) return ERROR(memory_allocation);
|
1062
1054
|
mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers);
|
@@ -1263,7 +1255,7 @@ static size_t ZSTDMT_compress_advanced_internal(
|
|
1263
1255
|
if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, avgJobSize))
|
1264
1256
|
return ERROR(memory_allocation);
|
1265
1257
|
|
1266
|
-
|
1258
|
+
FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbJobs) ); /* only expands if necessary */
|
1267
1259
|
|
1268
1260
|
{ unsigned u;
|
1269
1261
|
for (u=0; u<nbJobs; u++) {
|
@@ -1396,7 +1388,7 @@ size_t ZSTDMT_initCStream_internal(
|
|
1396
1388
|
|
1397
1389
|
/* init */
|
1398
1390
|
if (params.nbWorkers != mtctx->params.nbWorkers)
|
1399
|
-
|
1391
|
+
FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) );
|
1400
1392
|
|
1401
1393
|
if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;
|
1402
1394
|
if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = ZSTDMT_JOBSIZE_MAX;
|
@@ -1547,7 +1539,7 @@ size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel) {
|
|
1547
1539
|
/* ZSTDMT_writeLastEmptyBlock()
|
1548
1540
|
* Write a single empty block with an end-of-frame to finish a frame.
|
1549
1541
|
* Job must be created from streaming variant.
|
1550
|
-
* This function is always
|
1542
|
+
* This function is always successful if expected conditions are fulfilled.
|
1551
1543
|
*/
|
1552
1544
|
static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job)
|
1553
1545
|
{
|
@@ -1987,7 +1979,7 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
|
1987
1979
|
assert(input->pos <= input->size);
|
1988
1980
|
|
1989
1981
|
if (mtctx->singleBlockingThread) { /* delegate to single-thread (synchronous) */
|
1990
|
-
return
|
1982
|
+
return ZSTD_compressStream2(mtctx->cctxPool->cctx[0], output, input, endOp);
|
1991
1983
|
}
|
1992
1984
|
|
1993
1985
|
if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
|
@@ -2051,7 +2043,7 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
|
2051
2043
|
|| ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) { /* must finish the frame with a zero-size block */
|
2052
2044
|
size_t const jobSize = mtctx->inBuff.filled;
|
2053
2045
|
assert(mtctx->inBuff.filled <= mtctx->targetSectionSize);
|
2054
|
-
|
2046
|
+
FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) );
|
2055
2047
|
}
|
2056
2048
|
|
2057
2049
|
/* check for potential compressed data ready to be flushed */
|
@@ -2065,7 +2057,7 @@ size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
|
2065
2057
|
|
2066
2058
|
size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
|
2067
2059
|
{
|
2068
|
-
|
2060
|
+
FORWARD_IF_ERROR( ZSTDMT_compressStream_generic(mtctx, output, input, ZSTD_e_continue) );
|
2069
2061
|
|
2070
2062
|
/* recommended next input size : fill current input buffer */
|
2071
2063
|
return mtctx->targetSectionSize - mtctx->inBuff.filled; /* note : could be zero when input buffer is fully filled and no more availability to create new job */
|
@@ -2082,7 +2074,7 @@ static size_t ZSTDMT_flushStream_internal(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* ou
|
|
2082
2074
|
|| ((endFrame==ZSTD_e_end) && !mtctx->frameEnded)) { /* need a last 0-size block to end frame */
|
2083
2075
|
DEBUGLOG(5, "ZSTDMT_flushStream_internal : create a new job (%u bytes, end:%u)",
|
2084
2076
|
(U32)srcSize, (U32)endFrame);
|
2085
|
-
|
2077
|
+
FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, srcSize, endFrame) );
|
2086
2078
|
}
|
2087
2079
|
|
2088
2080
|
/* check if there is any data available to flush */
|
@@ -17,10 +17,25 @@
|
|
17
17
|
|
18
18
|
|
19
19
|
/* Note : This is an internal API.
|
20
|
-
*
|
20
|
+
* These APIs used to be exposed with ZSTDLIB_API,
|
21
21
|
* because it used to be the only way to invoke MT compression.
|
22
|
-
* Now, it's recommended to use
|
23
|
-
*
|
22
|
+
* Now, it's recommended to use ZSTD_compress2 and ZSTD_compressStream2()
|
23
|
+
* instead.
|
24
|
+
*
|
25
|
+
* If you depend on these APIs and can't switch, then define
|
26
|
+
* ZSTD_LEGACY_MULTITHREADED_API when making the dynamic library.
|
27
|
+
* However, we may completely remove these functions in a future
|
28
|
+
* release, so please switch soon.
|
29
|
+
*
|
30
|
+
* This API requires ZSTD_MULTITHREAD to be defined during compilation,
|
31
|
+
* otherwise ZSTDMT_createCCtx*() will fail.
|
32
|
+
*/
|
33
|
+
|
34
|
+
#ifdef ZSTD_LEGACY_MULTITHREADED_API
|
35
|
+
# define ZSTDMT_API ZSTDLIB_API
|
36
|
+
#else
|
37
|
+
# define ZSTDMT_API
|
38
|
+
#endif
|
24
39
|
|
25
40
|
/* === Dependencies === */
|
26
41
|
#include <stddef.h> /* size_t */
|
@@ -40,17 +55,19 @@
|
|
40
55
|
|
41
56
|
/* === Memory management === */
|
42
57
|
typedef struct ZSTDMT_CCtx_s ZSTDMT_CCtx;
|
43
|
-
|
44
|
-
|
58
|
+
/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */
|
59
|
+
ZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx(unsigned nbWorkers);
|
60
|
+
/* Requires ZSTD_MULTITHREAD to be defined during compilation, otherwise it will return NULL. */
|
61
|
+
ZSTDMT_API ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers,
|
45
62
|
ZSTD_customMem cMem);
|
46
|
-
|
63
|
+
ZSTDMT_API size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx);
|
47
64
|
|
48
|
-
|
65
|
+
ZSTDMT_API size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx);
|
49
66
|
|
50
67
|
|
51
68
|
/* === Simple one-pass compression function === */
|
52
69
|
|
53
|
-
|
70
|
+
ZSTDMT_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
|
54
71
|
void* dst, size_t dstCapacity,
|
55
72
|
const void* src, size_t srcSize,
|
56
73
|
int compressionLevel);
|
@@ -59,31 +76,31 @@ ZSTDLIB_API size_t ZSTDMT_compressCCtx(ZSTDMT_CCtx* mtctx,
|
|
59
76
|
|
60
77
|
/* === Streaming functions === */
|
61
78
|
|
62
|
-
|
63
|
-
|
79
|
+
ZSTDMT_API size_t ZSTDMT_initCStream(ZSTDMT_CCtx* mtctx, int compressionLevel);
|
80
|
+
ZSTDMT_API size_t ZSTDMT_resetCStream(ZSTDMT_CCtx* mtctx, unsigned long long pledgedSrcSize); /**< if srcSize is not known at reset time, use ZSTD_CONTENTSIZE_UNKNOWN. Note: for compatibility with older programs, 0 means the same as ZSTD_CONTENTSIZE_UNKNOWN, but it will change in the future to mean "empty" */
|
64
81
|
|
65
|
-
|
66
|
-
|
82
|
+
ZSTDMT_API size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx);
|
83
|
+
ZSTDMT_API size_t ZSTDMT_compressStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
|
67
84
|
|
68
|
-
|
69
|
-
|
85
|
+
ZSTDMT_API size_t ZSTDMT_flushStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
|
86
|
+
ZSTDMT_API size_t ZSTDMT_endStream(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output); /**< @return : 0 == all flushed; >0 : still some data to be flushed; or an error code (ZSTD_isError()) */
|
70
87
|
|
71
88
|
|
72
89
|
/* === Advanced functions and parameters === */
|
73
90
|
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
91
|
+
ZSTDMT_API size_t ZSTDMT_compress_advanced(ZSTDMT_CCtx* mtctx,
|
92
|
+
void* dst, size_t dstCapacity,
|
93
|
+
const void* src, size_t srcSize,
|
94
|
+
const ZSTD_CDict* cdict,
|
95
|
+
ZSTD_parameters params,
|
96
|
+
int overlapLog);
|
80
97
|
|
81
|
-
|
98
|
+
ZSTDMT_API size_t ZSTDMT_initCStream_advanced(ZSTDMT_CCtx* mtctx,
|
82
99
|
const void* dict, size_t dictSize, /* dict can be released after init, a local copy is preserved within zcs */
|
83
100
|
ZSTD_parameters params,
|
84
101
|
unsigned long long pledgedSrcSize); /* pledgedSrcSize is optional and can be zero == unknown */
|
85
102
|
|
86
|
-
|
103
|
+
ZSTDMT_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
|
87
104
|
const ZSTD_CDict* cdict,
|
88
105
|
ZSTD_frameParameters fparams,
|
89
106
|
unsigned long long pledgedSrcSize); /* note : zero means empty */
|
@@ -92,7 +109,7 @@ ZSTDLIB_API size_t ZSTDMT_initCStream_usingCDict(ZSTDMT_CCtx* mtctx,
|
|
92
109
|
* List of parameters that can be set using ZSTDMT_setMTCtxParameter() */
|
93
110
|
typedef enum {
|
94
111
|
ZSTDMT_p_jobSize, /* Each job is compressed in parallel. By default, this value is dynamically determined depending on compression parameters. Can be set explicitly here. */
|
95
|
-
ZSTDMT_p_overlapLog, /* Each job may reload a part of previous job to enhance
|
112
|
+
ZSTDMT_p_overlapLog, /* Each job may reload a part of previous job to enhance compression ratio; 0 == no overlap, 6(default) == use 1/8th of window, >=9 == use full window. This is a "sticky" parameter : its value will be re-used on next compression job */
|
96
113
|
ZSTDMT_p_rsyncable /* Enables rsyncable mode. */
|
97
114
|
} ZSTDMT_parameter;
|
98
115
|
|
@@ -101,12 +118,12 @@ typedef enum {
|
|
101
118
|
* The function must be called typically after ZSTD_createCCtx() but __before ZSTDMT_init*() !__
|
102
119
|
* Parameters not explicitly reset by ZSTDMT_init*() remain the same in consecutive compression sessions.
|
103
120
|
* @return : 0, or an error code (which can be tested using ZSTD_isError()) */
|
104
|
-
|
121
|
+
ZSTDMT_API size_t ZSTDMT_setMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int value);
|
105
122
|
|
106
123
|
/* ZSTDMT_getMTCtxParameter() :
|
107
124
|
* Query the ZSTDMT_CCtx for a parameter value.
|
108
125
|
* @return : 0, or an error code (which can be tested using ZSTD_isError()) */
|
109
|
-
|
126
|
+
ZSTDMT_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter parameter, int* value);
|
110
127
|
|
111
128
|
|
112
129
|
/*! ZSTDMT_compressStream_generic() :
|
@@ -116,7 +133,7 @@ ZSTDLIB_API size_t ZSTDMT_getMTCtxParameter(ZSTDMT_CCtx* mtctx, ZSTDMT_parameter
|
|
116
133
|
* 0 if fully flushed
|
117
134
|
* or an error code
|
118
135
|
* note : needs to be init using any ZSTD_initCStream*() variant */
|
119
|
-
|
136
|
+
ZSTDMT_API size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
|
120
137
|
ZSTD_outBuffer* output,
|
121
138
|
ZSTD_inBuffer* input,
|
122
139
|
ZSTD_EndDirective endOp);
|