zstd-ruby 1.5.5.1 → 1.5.6.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +1 -1
- data/ext/zstdruby/libzstd/common/allocations.h +1 -1
- data/ext/zstdruby/libzstd/common/bitstream.h +49 -29
- data/ext/zstdruby/libzstd/common/compiler.h +114 -22
- data/ext/zstdruby/libzstd/common/cpu.h +36 -0
- data/ext/zstdruby/libzstd/common/debug.c +6 -0
- data/ext/zstdruby/libzstd/common/debug.h +20 -11
- data/ext/zstdruby/libzstd/common/error_private.h +45 -36
- data/ext/zstdruby/libzstd/common/fse.h +3 -2
- data/ext/zstdruby/libzstd/common/fse_decompress.c +19 -17
- data/ext/zstdruby/libzstd/common/huf.h +14 -1
- data/ext/zstdruby/libzstd/common/mem.h +0 -9
- data/ext/zstdruby/libzstd/common/pool.c +1 -1
- data/ext/zstdruby/libzstd/common/pool.h +1 -1
- data/ext/zstdruby/libzstd/common/portability_macros.h +2 -0
- data/ext/zstdruby/libzstd/common/threading.c +8 -2
- data/ext/zstdruby/libzstd/common/xxhash.c +5 -11
- data/ext/zstdruby/libzstd/common/xxhash.h +2341 -1007
- data/ext/zstdruby/libzstd/common/zstd_internal.h +5 -5
- data/ext/zstdruby/libzstd/compress/fse_compress.c +8 -7
- data/ext/zstdruby/libzstd/compress/huf_compress.c +54 -25
- data/ext/zstdruby/libzstd/compress/zstd_compress.c +282 -161
- data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +29 -27
- data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +224 -113
- data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +19 -13
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +17 -5
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.h +11 -0
- data/ext/zstdruby/libzstd/compress/zstd_fast.c +14 -6
- data/ext/zstdruby/libzstd/compress/zstd_lazy.c +129 -87
- data/ext/zstdruby/libzstd/compress/zstd_lazy.h +103 -28
- data/ext/zstdruby/libzstd/compress/zstd_ldm.c +8 -2
- data/ext/zstdruby/libzstd/compress/zstd_opt.c +216 -112
- data/ext/zstdruby/libzstd/compress/zstd_opt.h +31 -7
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +94 -79
- data/ext/zstdruby/libzstd/decompress/huf_decompress.c +188 -126
- data/ext/zstdruby/libzstd/decompress/huf_decompress_amd64.S +38 -19
- data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +84 -32
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +231 -208
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +1 -1
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +2 -0
- data/ext/zstdruby/libzstd/dictBuilder/cover.c +16 -12
- data/ext/zstdruby/libzstd/dictBuilder/cover.h +2 -8
- data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +2 -2
- data/ext/zstdruby/libzstd/dictBuilder/zdict.c +12 -6
- data/ext/zstdruby/libzstd/zstd.h +129 -60
- data/lib/zstd-ruby/version.rb +1 -1
- metadata +1 -1
@@ -178,6 +178,7 @@ static void ZSTD_freeCCtxContent(ZSTD_CCtx* cctx)
|
|
178
178
|
|
179
179
|
size_t ZSTD_freeCCtx(ZSTD_CCtx* cctx)
|
180
180
|
{
|
181
|
+
DEBUGLOG(3, "ZSTD_freeCCtx (address: %p)", (void*)cctx);
|
181
182
|
if (cctx==NULL) return 0; /* support free on NULL */
|
182
183
|
RETURN_ERROR_IF(cctx->staticSize, memory_allocation,
|
183
184
|
"not compatible with static CCtx");
|
@@ -649,10 +650,11 @@ static size_t ZSTD_cParam_clampBounds(ZSTD_cParameter cParam, int* value)
|
|
649
650
|
return 0;
|
650
651
|
}
|
651
652
|
|
652
|
-
#define BOUNDCHECK(cParam, val)
|
653
|
-
|
654
|
-
|
655
|
-
|
653
|
+
#define BOUNDCHECK(cParam, val) \
|
654
|
+
do { \
|
655
|
+
RETURN_ERROR_IF(!ZSTD_cParam_withinBounds(cParam,val), \
|
656
|
+
parameter_outOfBound, "Param out of bounds"); \
|
657
|
+
} while (0)
|
656
658
|
|
657
659
|
|
658
660
|
static int ZSTD_isUpdateAuthorized(ZSTD_cParameter param)
|
@@ -868,7 +870,7 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
|
|
868
870
|
#else
|
869
871
|
FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(param, &value), "");
|
870
872
|
CCtxParams->nbWorkers = value;
|
871
|
-
return CCtxParams->nbWorkers;
|
873
|
+
return (size_t)(CCtxParams->nbWorkers);
|
872
874
|
#endif
|
873
875
|
|
874
876
|
case ZSTD_c_jobSize :
|
@@ -892,7 +894,7 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
|
|
892
894
|
#else
|
893
895
|
FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), "");
|
894
896
|
CCtxParams->overlapLog = value;
|
895
|
-
return CCtxParams->overlapLog;
|
897
|
+
return (size_t)CCtxParams->overlapLog;
|
896
898
|
#endif
|
897
899
|
|
898
900
|
case ZSTD_c_rsyncable :
|
@@ -902,7 +904,7 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
|
|
902
904
|
#else
|
903
905
|
FORWARD_IF_ERROR(ZSTD_cParam_clampBounds(ZSTD_c_overlapLog, &value), "");
|
904
906
|
CCtxParams->rsyncable = value;
|
905
|
-
return CCtxParams->rsyncable;
|
907
|
+
return (size_t)CCtxParams->rsyncable;
|
906
908
|
#endif
|
907
909
|
|
908
910
|
case ZSTD_c_enableDedicatedDictSearch :
|
@@ -939,8 +941,10 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
|
|
939
941
|
return CCtxParams->ldmParams.hashRateLog;
|
940
942
|
|
941
943
|
case ZSTD_c_targetCBlockSize :
|
942
|
-
if (value!=0)
|
944
|
+
if (value!=0) { /* 0 ==> default */
|
945
|
+
value = MAX(value, ZSTD_TARGETCBLOCKSIZE_MIN);
|
943
946
|
BOUNDCHECK(ZSTD_c_targetCBlockSize, value);
|
947
|
+
}
|
944
948
|
CCtxParams->targetCBlockSize = (U32)value;
|
945
949
|
return CCtxParams->targetCBlockSize;
|
946
950
|
|
@@ -968,7 +972,7 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
|
|
968
972
|
case ZSTD_c_validateSequences:
|
969
973
|
BOUNDCHECK(ZSTD_c_validateSequences, value);
|
970
974
|
CCtxParams->validateSequences = value;
|
971
|
-
return CCtxParams->validateSequences;
|
975
|
+
return (size_t)CCtxParams->validateSequences;
|
972
976
|
|
973
977
|
case ZSTD_c_useBlockSplitter:
|
974
978
|
BOUNDCHECK(ZSTD_c_useBlockSplitter, value);
|
@@ -983,7 +987,7 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
|
|
983
987
|
case ZSTD_c_deterministicRefPrefix:
|
984
988
|
BOUNDCHECK(ZSTD_c_deterministicRefPrefix, value);
|
985
989
|
CCtxParams->deterministicRefPrefix = !!value;
|
986
|
-
return CCtxParams->deterministicRefPrefix;
|
990
|
+
return (size_t)CCtxParams->deterministicRefPrefix;
|
987
991
|
|
988
992
|
case ZSTD_c_prefetchCDictTables:
|
989
993
|
BOUNDCHECK(ZSTD_c_prefetchCDictTables, value);
|
@@ -993,7 +997,7 @@ size_t ZSTD_CCtxParams_setParameter(ZSTD_CCtx_params* CCtxParams,
|
|
993
997
|
case ZSTD_c_enableSeqProducerFallback:
|
994
998
|
BOUNDCHECK(ZSTD_c_enableSeqProducerFallback, value);
|
995
999
|
CCtxParams->enableMatchFinderFallback = value;
|
996
|
-
return CCtxParams->enableMatchFinderFallback;
|
1000
|
+
return (size_t)CCtxParams->enableMatchFinderFallback;
|
997
1001
|
|
998
1002
|
case ZSTD_c_maxBlockSize:
|
999
1003
|
if (value!=0) /* 0 ==> default */
|
@@ -1363,7 +1367,6 @@ size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset)
|
|
1363
1367
|
RETURN_ERROR_IF(cctx->streamStage != zcss_init, stage_wrong,
|
1364
1368
|
"Reset parameters is only possible during init stage.");
|
1365
1369
|
ZSTD_clearAllDicts(cctx);
|
1366
|
-
ZSTD_memset(&cctx->externalMatchCtx, 0, sizeof(cctx->externalMatchCtx));
|
1367
1370
|
return ZSTD_CCtxParams_reset(&cctx->requestedParams);
|
1368
1371
|
}
|
1369
1372
|
return 0;
|
@@ -1391,11 +1394,12 @@ size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams)
|
|
1391
1394
|
static ZSTD_compressionParameters
|
1392
1395
|
ZSTD_clampCParams(ZSTD_compressionParameters cParams)
|
1393
1396
|
{
|
1394
|
-
# define CLAMP_TYPE(cParam, val, type)
|
1395
|
-
|
1396
|
-
|
1397
|
-
|
1398
|
-
|
1397
|
+
# define CLAMP_TYPE(cParam, val, type) \
|
1398
|
+
do { \
|
1399
|
+
ZSTD_bounds const bounds = ZSTD_cParam_getBounds(cParam); \
|
1400
|
+
if ((int)val<bounds.lowerBound) val=(type)bounds.lowerBound; \
|
1401
|
+
else if ((int)val>bounds.upperBound) val=(type)bounds.upperBound; \
|
1402
|
+
} while (0)
|
1399
1403
|
# define CLAMP(cParam, val) CLAMP_TYPE(cParam, val, unsigned)
|
1400
1404
|
CLAMP(ZSTD_c_windowLog, cParams.windowLog);
|
1401
1405
|
CLAMP(ZSTD_c_chainLog, cParams.chainLog);
|
@@ -1467,6 +1471,48 @@ ZSTD_adjustCParams_internal(ZSTD_compressionParameters cPar,
|
|
1467
1471
|
const U64 maxWindowResize = 1ULL << (ZSTD_WINDOWLOG_MAX-1);
|
1468
1472
|
assert(ZSTD_checkCParams(cPar)==0);
|
1469
1473
|
|
1474
|
+
/* Cascade the selected strategy down to the next-highest one built into
|
1475
|
+
* this binary. */
|
1476
|
+
#ifdef ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR
|
1477
|
+
if (cPar.strategy == ZSTD_btultra2) {
|
1478
|
+
cPar.strategy = ZSTD_btultra;
|
1479
|
+
}
|
1480
|
+
if (cPar.strategy == ZSTD_btultra) {
|
1481
|
+
cPar.strategy = ZSTD_btopt;
|
1482
|
+
}
|
1483
|
+
#endif
|
1484
|
+
#ifdef ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR
|
1485
|
+
if (cPar.strategy == ZSTD_btopt) {
|
1486
|
+
cPar.strategy = ZSTD_btlazy2;
|
1487
|
+
}
|
1488
|
+
#endif
|
1489
|
+
#ifdef ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR
|
1490
|
+
if (cPar.strategy == ZSTD_btlazy2) {
|
1491
|
+
cPar.strategy = ZSTD_lazy2;
|
1492
|
+
}
|
1493
|
+
#endif
|
1494
|
+
#ifdef ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR
|
1495
|
+
if (cPar.strategy == ZSTD_lazy2) {
|
1496
|
+
cPar.strategy = ZSTD_lazy;
|
1497
|
+
}
|
1498
|
+
#endif
|
1499
|
+
#ifdef ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR
|
1500
|
+
if (cPar.strategy == ZSTD_lazy) {
|
1501
|
+
cPar.strategy = ZSTD_greedy;
|
1502
|
+
}
|
1503
|
+
#endif
|
1504
|
+
#ifdef ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR
|
1505
|
+
if (cPar.strategy == ZSTD_greedy) {
|
1506
|
+
cPar.strategy = ZSTD_dfast;
|
1507
|
+
}
|
1508
|
+
#endif
|
1509
|
+
#ifdef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
|
1510
|
+
if (cPar.strategy == ZSTD_dfast) {
|
1511
|
+
cPar.strategy = ZSTD_fast;
|
1512
|
+
cPar.targetLength = 0;
|
1513
|
+
}
|
1514
|
+
#endif
|
1515
|
+
|
1470
1516
|
switch (mode) {
|
1471
1517
|
case ZSTD_cpm_unknown:
|
1472
1518
|
case ZSTD_cpm_noAttachDict:
|
@@ -1617,8 +1663,8 @@ ZSTD_sizeof_matchState(const ZSTD_compressionParameters* const cParams,
|
|
1617
1663
|
+ ZSTD_cwksp_aligned_alloc_size((MaxLL+1) * sizeof(U32))
|
1618
1664
|
+ ZSTD_cwksp_aligned_alloc_size((MaxOff+1) * sizeof(U32))
|
1619
1665
|
+ ZSTD_cwksp_aligned_alloc_size((1<<Litbits) * sizeof(U32))
|
1620
|
-
+ ZSTD_cwksp_aligned_alloc_size(
|
1621
|
-
+ ZSTD_cwksp_aligned_alloc_size(
|
1666
|
+
+ ZSTD_cwksp_aligned_alloc_size(ZSTD_OPT_SIZE * sizeof(ZSTD_match_t))
|
1667
|
+
+ ZSTD_cwksp_aligned_alloc_size(ZSTD_OPT_SIZE * sizeof(ZSTD_optimal_t));
|
1622
1668
|
size_t const lazyAdditionalSpace = ZSTD_rowMatchFinderUsed(cParams->strategy, useRowMatchFinder)
|
1623
1669
|
? ZSTD_cwksp_aligned_alloc_size(hSize)
|
1624
1670
|
: 0;
|
@@ -1707,7 +1753,7 @@ size_t ZSTD_estimateCCtxSize_usingCCtxParams(const ZSTD_CCtx_params* params)
|
|
1707
1753
|
* be needed. However, we still allocate two 0-sized buffers, which can
|
1708
1754
|
* take space under ASAN. */
|
1709
1755
|
return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
|
1710
|
-
&cParams, ¶ms->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN, params
|
1756
|
+
&cParams, ¶ms->ldmParams, 1, useRowMatchFinder, 0, 0, ZSTD_CONTENTSIZE_UNKNOWN, ZSTD_hasExtSeqProd(params), params->maxBlockSize);
|
1711
1757
|
}
|
1712
1758
|
|
1713
1759
|
size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams)
|
@@ -1768,7 +1814,7 @@ size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params)
|
|
1768
1814
|
|
1769
1815
|
return ZSTD_estimateCCtxSize_usingCCtxParams_internal(
|
1770
1816
|
&cParams, ¶ms->ldmParams, 1, useRowMatchFinder, inBuffSize, outBuffSize,
|
1771
|
-
ZSTD_CONTENTSIZE_UNKNOWN, params
|
1817
|
+
ZSTD_CONTENTSIZE_UNKNOWN, ZSTD_hasExtSeqProd(params), params->maxBlockSize);
|
1772
1818
|
}
|
1773
1819
|
}
|
1774
1820
|
|
@@ -2001,8 +2047,8 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
|
|
2001
2047
|
ms->opt.litLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxLL+1) * sizeof(unsigned));
|
2002
2048
|
ms->opt.matchLengthFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxML+1) * sizeof(unsigned));
|
2003
2049
|
ms->opt.offCodeFreq = (unsigned*)ZSTD_cwksp_reserve_aligned(ws, (MaxOff+1) * sizeof(unsigned));
|
2004
|
-
ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws,
|
2005
|
-
ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws,
|
2050
|
+
ms->opt.matchTable = (ZSTD_match_t*)ZSTD_cwksp_reserve_aligned(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_match_t));
|
2051
|
+
ms->opt.priceTable = (ZSTD_optimal_t*)ZSTD_cwksp_reserve_aligned(ws, ZSTD_OPT_SIZE * sizeof(ZSTD_optimal_t));
|
2006
2052
|
}
|
2007
2053
|
|
2008
2054
|
ms->cParams = *cParams;
|
@@ -2074,7 +2120,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
|
|
2074
2120
|
|
2075
2121
|
{ size_t const windowSize = MAX(1, (size_t)MIN(((U64)1 << params->cParams.windowLog), pledgedSrcSize));
|
2076
2122
|
size_t const blockSize = MIN(params->maxBlockSize, windowSize);
|
2077
|
-
size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, params->cParams.minMatch, params
|
2123
|
+
size_t const maxNbSeq = ZSTD_maxNbSeq(blockSize, params->cParams.minMatch, ZSTD_hasExtSeqProd(params));
|
2078
2124
|
size_t const buffOutSize = (zbuff == ZSTDb_buffered && params->outBufferMode == ZSTD_bm_buffered)
|
2079
2125
|
? ZSTD_compressBound(blockSize) + 1
|
2080
2126
|
: 0;
|
@@ -2091,8 +2137,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
|
|
2091
2137
|
size_t const neededSpace =
|
2092
2138
|
ZSTD_estimateCCtxSize_usingCCtxParams_internal(
|
2093
2139
|
¶ms->cParams, ¶ms->ldmParams, zc->staticSize != 0, params->useRowMatchFinder,
|
2094
|
-
buffInSize, buffOutSize, pledgedSrcSize, params
|
2095
|
-
int resizeWorkspace;
|
2140
|
+
buffInSize, buffOutSize, pledgedSrcSize, ZSTD_hasExtSeqProd(params), params->maxBlockSize);
|
2096
2141
|
|
2097
2142
|
FORWARD_IF_ERROR(neededSpace, "cctx size estimate failed!");
|
2098
2143
|
|
@@ -2101,7 +2146,7 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
|
|
2101
2146
|
{ /* Check if workspace is large enough, alloc a new one if needed */
|
2102
2147
|
int const workspaceTooSmall = ZSTD_cwksp_sizeof(ws) < neededSpace;
|
2103
2148
|
int const workspaceWasteful = ZSTD_cwksp_check_wasteful(ws, neededSpace);
|
2104
|
-
resizeWorkspace = workspaceTooSmall || workspaceWasteful;
|
2149
|
+
int resizeWorkspace = workspaceTooSmall || workspaceWasteful;
|
2105
2150
|
DEBUGLOG(4, "Need %zu B workspace", neededSpace);
|
2106
2151
|
DEBUGLOG(4, "windowSize: %zu - blockSize: %zu", windowSize, blockSize);
|
2107
2152
|
|
@@ -2176,10 +2221,10 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
|
|
2176
2221
|
}
|
2177
2222
|
|
2178
2223
|
/* reserve space for block-level external sequences */
|
2179
|
-
if (params
|
2224
|
+
if (ZSTD_hasExtSeqProd(params)) {
|
2180
2225
|
size_t const maxNbExternalSeq = ZSTD_sequenceBound(blockSize);
|
2181
|
-
zc->
|
2182
|
-
zc->
|
2226
|
+
zc->extSeqBufCapacity = maxNbExternalSeq;
|
2227
|
+
zc->extSeqBuf =
|
2183
2228
|
(ZSTD_Sequence*)ZSTD_cwksp_reserve_aligned(ws, maxNbExternalSeq * sizeof(ZSTD_Sequence));
|
2184
2229
|
}
|
2185
2230
|
|
@@ -2564,7 +2609,7 @@ ZSTD_reduceTable_internal (U32* const table, U32 const size, U32 const reducerVa
|
|
2564
2609
|
assert(size < (1U<<31)); /* can be casted to int */
|
2565
2610
|
|
2566
2611
|
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
|
2567
|
-
/* To validate that the table
|
2612
|
+
/* To validate that the table reuse logic is sound, and that we don't
|
2568
2613
|
* access table space that we haven't cleaned, we re-"poison" the table
|
2569
2614
|
* space every time we mark it dirty.
|
2570
2615
|
*
|
@@ -2992,40 +3037,43 @@ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramS
|
|
2992
3037
|
static const ZSTD_blockCompressor blockCompressor[4][ZSTD_STRATEGY_MAX+1] = {
|
2993
3038
|
{ ZSTD_compressBlock_fast /* default for 0 */,
|
2994
3039
|
ZSTD_compressBlock_fast,
|
2995
|
-
|
2996
|
-
|
2997
|
-
|
2998
|
-
|
2999
|
-
|
3000
|
-
|
3001
|
-
|
3002
|
-
|
3040
|
+
ZSTD_COMPRESSBLOCK_DOUBLEFAST,
|
3041
|
+
ZSTD_COMPRESSBLOCK_GREEDY,
|
3042
|
+
ZSTD_COMPRESSBLOCK_LAZY,
|
3043
|
+
ZSTD_COMPRESSBLOCK_LAZY2,
|
3044
|
+
ZSTD_COMPRESSBLOCK_BTLAZY2,
|
3045
|
+
ZSTD_COMPRESSBLOCK_BTOPT,
|
3046
|
+
ZSTD_COMPRESSBLOCK_BTULTRA,
|
3047
|
+
ZSTD_COMPRESSBLOCK_BTULTRA2
|
3048
|
+
},
|
3003
3049
|
{ ZSTD_compressBlock_fast_extDict /* default for 0 */,
|
3004
3050
|
ZSTD_compressBlock_fast_extDict,
|
3005
|
-
|
3006
|
-
|
3007
|
-
|
3008
|
-
|
3009
|
-
|
3010
|
-
|
3011
|
-
|
3012
|
-
|
3051
|
+
ZSTD_COMPRESSBLOCK_DOUBLEFAST_EXTDICT,
|
3052
|
+
ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT,
|
3053
|
+
ZSTD_COMPRESSBLOCK_LAZY_EXTDICT,
|
3054
|
+
ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT,
|
3055
|
+
ZSTD_COMPRESSBLOCK_BTLAZY2_EXTDICT,
|
3056
|
+
ZSTD_COMPRESSBLOCK_BTOPT_EXTDICT,
|
3057
|
+
ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT,
|
3058
|
+
ZSTD_COMPRESSBLOCK_BTULTRA_EXTDICT
|
3059
|
+
},
|
3013
3060
|
{ ZSTD_compressBlock_fast_dictMatchState /* default for 0 */,
|
3014
3061
|
ZSTD_compressBlock_fast_dictMatchState,
|
3015
|
-
|
3016
|
-
|
3017
|
-
|
3018
|
-
|
3019
|
-
|
3020
|
-
|
3021
|
-
|
3022
|
-
|
3062
|
+
ZSTD_COMPRESSBLOCK_DOUBLEFAST_DICTMATCHSTATE,
|
3063
|
+
ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE,
|
3064
|
+
ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE,
|
3065
|
+
ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE,
|
3066
|
+
ZSTD_COMPRESSBLOCK_BTLAZY2_DICTMATCHSTATE,
|
3067
|
+
ZSTD_COMPRESSBLOCK_BTOPT_DICTMATCHSTATE,
|
3068
|
+
ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE,
|
3069
|
+
ZSTD_COMPRESSBLOCK_BTULTRA_DICTMATCHSTATE
|
3070
|
+
},
|
3023
3071
|
{ NULL /* default for 0 */,
|
3024
3072
|
NULL,
|
3025
3073
|
NULL,
|
3026
|
-
|
3027
|
-
|
3028
|
-
|
3074
|
+
ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH,
|
3075
|
+
ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH,
|
3076
|
+
ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH,
|
3029
3077
|
NULL,
|
3030
3078
|
NULL,
|
3031
3079
|
NULL,
|
@@ -3038,18 +3086,26 @@ ZSTD_blockCompressor ZSTD_selectBlockCompressor(ZSTD_strategy strat, ZSTD_paramS
|
|
3038
3086
|
DEBUGLOG(4, "Selected block compressor: dictMode=%d strat=%d rowMatchfinder=%d", (int)dictMode, (int)strat, (int)useRowMatchFinder);
|
3039
3087
|
if (ZSTD_rowMatchFinderUsed(strat, useRowMatchFinder)) {
|
3040
3088
|
static const ZSTD_blockCompressor rowBasedBlockCompressors[4][3] = {
|
3041
|
-
{
|
3042
|
-
|
3043
|
-
|
3044
|
-
|
3045
|
-
|
3046
|
-
|
3047
|
-
|
3048
|
-
|
3049
|
-
|
3050
|
-
|
3051
|
-
|
3052
|
-
|
3089
|
+
{
|
3090
|
+
ZSTD_COMPRESSBLOCK_GREEDY_ROW,
|
3091
|
+
ZSTD_COMPRESSBLOCK_LAZY_ROW,
|
3092
|
+
ZSTD_COMPRESSBLOCK_LAZY2_ROW
|
3093
|
+
},
|
3094
|
+
{
|
3095
|
+
ZSTD_COMPRESSBLOCK_GREEDY_EXTDICT_ROW,
|
3096
|
+
ZSTD_COMPRESSBLOCK_LAZY_EXTDICT_ROW,
|
3097
|
+
ZSTD_COMPRESSBLOCK_LAZY2_EXTDICT_ROW
|
3098
|
+
},
|
3099
|
+
{
|
3100
|
+
ZSTD_COMPRESSBLOCK_GREEDY_DICTMATCHSTATE_ROW,
|
3101
|
+
ZSTD_COMPRESSBLOCK_LAZY_DICTMATCHSTATE_ROW,
|
3102
|
+
ZSTD_COMPRESSBLOCK_LAZY2_DICTMATCHSTATE_ROW
|
3103
|
+
},
|
3104
|
+
{
|
3105
|
+
ZSTD_COMPRESSBLOCK_GREEDY_DEDICATEDDICTSEARCH_ROW,
|
3106
|
+
ZSTD_COMPRESSBLOCK_LAZY_DEDICATEDDICTSEARCH_ROW,
|
3107
|
+
ZSTD_COMPRESSBLOCK_LAZY2_DEDICATEDDICTSEARCH_ROW
|
3108
|
+
}
|
3053
3109
|
};
|
3054
3110
|
DEBUGLOG(4, "Selecting a row-based matchfinder");
|
3055
3111
|
assert(useRowMatchFinder != ZSTD_ps_auto);
|
@@ -3192,7 +3248,7 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
|
|
3192
3248
|
/* External matchfinder + LDM is technically possible, just not implemented yet.
|
3193
3249
|
* We need to revisit soon and implement it. */
|
3194
3250
|
RETURN_ERROR_IF(
|
3195
|
-
zc->appliedParams
|
3251
|
+
ZSTD_hasExtSeqProd(&zc->appliedParams),
|
3196
3252
|
parameter_combination_unsupported,
|
3197
3253
|
"Long-distance matching with external sequence producer enabled is not currently supported."
|
3198
3254
|
);
|
@@ -3211,7 +3267,7 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
|
|
3211
3267
|
/* External matchfinder + LDM is technically possible, just not implemented yet.
|
3212
3268
|
* We need to revisit soon and implement it. */
|
3213
3269
|
RETURN_ERROR_IF(
|
3214
|
-
zc->appliedParams
|
3270
|
+
ZSTD_hasExtSeqProd(&zc->appliedParams),
|
3215
3271
|
parameter_combination_unsupported,
|
3216
3272
|
"Long-distance matching with external sequence producer enabled is not currently supported."
|
3217
3273
|
);
|
@@ -3230,18 +3286,18 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
|
|
3230
3286
|
zc->appliedParams.useRowMatchFinder,
|
3231
3287
|
src, srcSize);
|
3232
3288
|
assert(ldmSeqStore.pos == ldmSeqStore.size);
|
3233
|
-
} else if (zc->appliedParams
|
3289
|
+
} else if (ZSTD_hasExtSeqProd(&zc->appliedParams)) {
|
3234
3290
|
assert(
|
3235
|
-
zc->
|
3291
|
+
zc->extSeqBufCapacity >= ZSTD_sequenceBound(srcSize)
|
3236
3292
|
);
|
3237
|
-
assert(zc->
|
3293
|
+
assert(zc->appliedParams.extSeqProdFunc != NULL);
|
3238
3294
|
|
3239
3295
|
{ U32 const windowSize = (U32)1 << zc->appliedParams.cParams.windowLog;
|
3240
3296
|
|
3241
|
-
size_t const nbExternalSeqs = (zc->
|
3242
|
-
zc->
|
3243
|
-
zc->
|
3244
|
-
zc->
|
3297
|
+
size_t const nbExternalSeqs = (zc->appliedParams.extSeqProdFunc)(
|
3298
|
+
zc->appliedParams.extSeqProdState,
|
3299
|
+
zc->extSeqBuf,
|
3300
|
+
zc->extSeqBufCapacity,
|
3245
3301
|
src, srcSize,
|
3246
3302
|
NULL, 0, /* dict and dictSize, currently not supported */
|
3247
3303
|
zc->appliedParams.compressionLevel,
|
@@ -3249,21 +3305,21 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
|
|
3249
3305
|
);
|
3250
3306
|
|
3251
3307
|
size_t const nbPostProcessedSeqs = ZSTD_postProcessSequenceProducerResult(
|
3252
|
-
zc->
|
3308
|
+
zc->extSeqBuf,
|
3253
3309
|
nbExternalSeqs,
|
3254
|
-
zc->
|
3310
|
+
zc->extSeqBufCapacity,
|
3255
3311
|
srcSize
|
3256
3312
|
);
|
3257
3313
|
|
3258
3314
|
/* Return early if there is no error, since we don't need to worry about last literals */
|
3259
3315
|
if (!ZSTD_isError(nbPostProcessedSeqs)) {
|
3260
3316
|
ZSTD_sequencePosition seqPos = {0,0,0};
|
3261
|
-
size_t const seqLenSum = ZSTD_fastSequenceLengthSum(zc->
|
3317
|
+
size_t const seqLenSum = ZSTD_fastSequenceLengthSum(zc->extSeqBuf, nbPostProcessedSeqs);
|
3262
3318
|
RETURN_ERROR_IF(seqLenSum > srcSize, externalSequences_invalid, "External sequences imply too large a block!");
|
3263
3319
|
FORWARD_IF_ERROR(
|
3264
3320
|
ZSTD_copySequencesToSeqStoreExplicitBlockDelim(
|
3265
3321
|
zc, &seqPos,
|
3266
|
-
zc->
|
3322
|
+
zc->extSeqBuf, nbPostProcessedSeqs,
|
3267
3323
|
src, srcSize,
|
3268
3324
|
zc->appliedParams.searchForExternalRepcodes
|
3269
3325
|
),
|
@@ -3280,9 +3336,11 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
|
|
3280
3336
|
}
|
3281
3337
|
|
3282
3338
|
/* Fallback to software matchfinder */
|
3283
|
-
{ ZSTD_blockCompressor const blockCompressor =
|
3284
|
-
|
3285
|
-
|
3339
|
+
{ ZSTD_blockCompressor const blockCompressor =
|
3340
|
+
ZSTD_selectBlockCompressor(
|
3341
|
+
zc->appliedParams.cParams.strategy,
|
3342
|
+
zc->appliedParams.useRowMatchFinder,
|
3343
|
+
dictMode);
|
3286
3344
|
ms->ldmSeqStore = NULL;
|
3287
3345
|
DEBUGLOG(
|
3288
3346
|
5,
|
@@ -3292,9 +3350,10 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
|
|
3292
3350
|
lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
|
3293
3351
|
} }
|
3294
3352
|
} else { /* not long range mode and no external matchfinder */
|
3295
|
-
ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(
|
3296
|
-
|
3297
|
-
|
3353
|
+
ZSTD_blockCompressor const blockCompressor = ZSTD_selectBlockCompressor(
|
3354
|
+
zc->appliedParams.cParams.strategy,
|
3355
|
+
zc->appliedParams.useRowMatchFinder,
|
3356
|
+
dictMode);
|
3298
3357
|
ms->ldmSeqStore = NULL;
|
3299
3358
|
lastLLSize = blockCompressor(ms, &zc->seqStore, zc->blockState.nextCBlock->rep, src, srcSize);
|
3300
3359
|
}
|
@@ -3304,29 +3363,38 @@ static size_t ZSTD_buildSeqStore(ZSTD_CCtx* zc, const void* src, size_t srcSize)
|
|
3304
3363
|
return ZSTDbss_compress;
|
3305
3364
|
}
|
3306
3365
|
|
3307
|
-
static
|
3366
|
+
static size_t ZSTD_copyBlockSequences(SeqCollector* seqCollector, const seqStore_t* seqStore, const U32 prevRepcodes[ZSTD_REP_NUM])
|
3308
3367
|
{
|
3309
|
-
const
|
3310
|
-
const
|
3311
|
-
size_t
|
3312
|
-
size_t seqStoreLiteralsSize = (size_t)(seqStore->lit - seqStore->litStart);
|
3313
|
-
size_t literalsRead = 0;
|
3314
|
-
size_t lastLLSize;
|
3368
|
+
const seqDef* inSeqs = seqStore->sequencesStart;
|
3369
|
+
const size_t nbInSequences = seqStore->sequences - inSeqs;
|
3370
|
+
const size_t nbInLiterals = (size_t)(seqStore->lit - seqStore->litStart);
|
3315
3371
|
|
3316
|
-
ZSTD_Sequence* outSeqs =
|
3372
|
+
ZSTD_Sequence* outSeqs = seqCollector->seqIndex == 0 ? seqCollector->seqStart : seqCollector->seqStart + seqCollector->seqIndex;
|
3373
|
+
const size_t nbOutSequences = nbInSequences + 1;
|
3374
|
+
size_t nbOutLiterals = 0;
|
3375
|
+
repcodes_t repcodes;
|
3317
3376
|
size_t i;
|
3318
|
-
repcodes_t updatedRepcodes;
|
3319
3377
|
|
3320
|
-
|
3321
|
-
|
3322
|
-
|
3323
|
-
|
3324
|
-
|
3325
|
-
|
3326
|
-
|
3327
|
-
|
3378
|
+
/* Bounds check that we have enough space for every input sequence
|
3379
|
+
* and the block delimiter
|
3380
|
+
*/
|
3381
|
+
assert(seqCollector->seqIndex <= seqCollector->maxSequences);
|
3382
|
+
RETURN_ERROR_IF(
|
3383
|
+
nbOutSequences > (size_t)(seqCollector->maxSequences - seqCollector->seqIndex),
|
3384
|
+
dstSize_tooSmall,
|
3385
|
+
"Not enough space to copy sequences");
|
3386
|
+
|
3387
|
+
ZSTD_memcpy(&repcodes, prevRepcodes, sizeof(repcodes));
|
3388
|
+
for (i = 0; i < nbInSequences; ++i) {
|
3389
|
+
U32 rawOffset;
|
3390
|
+
outSeqs[i].litLength = inSeqs[i].litLength;
|
3391
|
+
outSeqs[i].matchLength = inSeqs[i].mlBase + MINMATCH;
|
3328
3392
|
outSeqs[i].rep = 0;
|
3329
3393
|
|
3394
|
+
/* Handle the possible single length >= 64K
|
3395
|
+
* There can only be one because we add MINMATCH to every match length,
|
3396
|
+
* and blocks are at most 128K.
|
3397
|
+
*/
|
3330
3398
|
if (i == seqStore->longLengthPos) {
|
3331
3399
|
if (seqStore->longLengthType == ZSTD_llt_literalLength) {
|
3332
3400
|
outSeqs[i].litLength += 0x10000;
|
@@ -3335,41 +3403,55 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
|
|
3335
3403
|
}
|
3336
3404
|
}
|
3337
3405
|
|
3338
|
-
|
3339
|
-
|
3340
|
-
|
3406
|
+
/* Determine the raw offset given the offBase, which may be a repcode. */
|
3407
|
+
if (OFFBASE_IS_REPCODE(inSeqs[i].offBase)) {
|
3408
|
+
const U32 repcode = OFFBASE_TO_REPCODE(inSeqs[i].offBase);
|
3409
|
+
assert(repcode > 0);
|
3410
|
+
outSeqs[i].rep = repcode;
|
3341
3411
|
if (outSeqs[i].litLength != 0) {
|
3342
|
-
rawOffset =
|
3412
|
+
rawOffset = repcodes.rep[repcode - 1];
|
3343
3413
|
} else {
|
3344
|
-
if (
|
3345
|
-
|
3414
|
+
if (repcode == 3) {
|
3415
|
+
assert(repcodes.rep[0] > 1);
|
3416
|
+
rawOffset = repcodes.rep[0] - 1;
|
3346
3417
|
} else {
|
3347
|
-
rawOffset =
|
3418
|
+
rawOffset = repcodes.rep[repcode];
|
3348
3419
|
}
|
3349
3420
|
}
|
3421
|
+
} else {
|
3422
|
+
rawOffset = OFFBASE_TO_OFFSET(inSeqs[i].offBase);
|
3350
3423
|
}
|
3351
3424
|
outSeqs[i].offset = rawOffset;
|
3352
|
-
|
3353
|
-
|
3354
|
-
ZSTD_updateRep(
|
3355
|
-
|
3356
|
-
|
3357
|
-
|
3425
|
+
|
3426
|
+
/* Update repcode history for the sequence */
|
3427
|
+
ZSTD_updateRep(repcodes.rep,
|
3428
|
+
inSeqs[i].offBase,
|
3429
|
+
inSeqs[i].litLength == 0);
|
3430
|
+
|
3431
|
+
nbOutLiterals += outSeqs[i].litLength;
|
3358
3432
|
}
|
3359
3433
|
/* Insert last literals (if any exist) in the block as a sequence with ml == off == 0.
|
3360
3434
|
* If there are no last literals, then we'll emit (of: 0, ml: 0, ll: 0), which is a marker
|
3361
3435
|
* for the block boundary, according to the API.
|
3362
3436
|
*/
|
3363
|
-
assert(
|
3364
|
-
|
3365
|
-
|
3366
|
-
|
3367
|
-
|
3368
|
-
|
3437
|
+
assert(nbInLiterals >= nbOutLiterals);
|
3438
|
+
{
|
3439
|
+
const size_t lastLLSize = nbInLiterals - nbOutLiterals;
|
3440
|
+
outSeqs[nbInSequences].litLength = (U32)lastLLSize;
|
3441
|
+
outSeqs[nbInSequences].matchLength = 0;
|
3442
|
+
outSeqs[nbInSequences].offset = 0;
|
3443
|
+
assert(nbOutSequences == nbInSequences + 1);
|
3444
|
+
}
|
3445
|
+
seqCollector->seqIndex += nbOutSequences;
|
3446
|
+
assert(seqCollector->seqIndex <= seqCollector->maxSequences);
|
3447
|
+
|
3448
|
+
return 0;
|
3369
3449
|
}
|
3370
3450
|
|
3371
3451
|
size_t ZSTD_sequenceBound(size_t srcSize) {
|
3372
|
-
|
3452
|
+
const size_t maxNbSeq = (srcSize / ZSTD_MINMATCH_MIN) + 1;
|
3453
|
+
const size_t maxNbDelims = (srcSize / ZSTD_BLOCKSIZE_MAX_MIN) + 1;
|
3454
|
+
return maxNbSeq + maxNbDelims;
|
3373
3455
|
}
|
3374
3456
|
|
3375
3457
|
size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
|
@@ -3378,6 +3460,16 @@ size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
|
|
3378
3460
|
const size_t dstCapacity = ZSTD_compressBound(srcSize);
|
3379
3461
|
void* dst = ZSTD_customMalloc(dstCapacity, ZSTD_defaultCMem);
|
3380
3462
|
SeqCollector seqCollector;
|
3463
|
+
{
|
3464
|
+
int targetCBlockSize;
|
3465
|
+
FORWARD_IF_ERROR(ZSTD_CCtx_getParameter(zc, ZSTD_c_targetCBlockSize, &targetCBlockSize), "");
|
3466
|
+
RETURN_ERROR_IF(targetCBlockSize != 0, parameter_unsupported, "targetCBlockSize != 0");
|
3467
|
+
}
|
3468
|
+
{
|
3469
|
+
int nbWorkers;
|
3470
|
+
FORWARD_IF_ERROR(ZSTD_CCtx_getParameter(zc, ZSTD_c_nbWorkers, &nbWorkers), "");
|
3471
|
+
RETURN_ERROR_IF(nbWorkers != 0, parameter_unsupported, "nbWorkers != 0");
|
3472
|
+
}
|
3381
3473
|
|
3382
3474
|
RETURN_ERROR_IF(dst == NULL, memory_allocation, "NULL pointer!");
|
3383
3475
|
|
@@ -3387,8 +3479,12 @@ size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
|
|
3387
3479
|
seqCollector.maxSequences = outSeqsSize;
|
3388
3480
|
zc->seqCollector = seqCollector;
|
3389
3481
|
|
3390
|
-
|
3391
|
-
|
3482
|
+
{
|
3483
|
+
const size_t ret = ZSTD_compress2(zc, dst, dstCapacity, src, srcSize);
|
3484
|
+
ZSTD_customFree(dst, ZSTD_defaultCMem);
|
3485
|
+
FORWARD_IF_ERROR(ret, "ZSTD_compress2 failed");
|
3486
|
+
}
|
3487
|
+
assert(zc->seqCollector.seqIndex <= ZSTD_sequenceBound(srcSize));
|
3392
3488
|
return zc->seqCollector.seqIndex;
|
3393
3489
|
}
|
3394
3490
|
|
@@ -3981,8 +4077,9 @@ ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc,
|
|
3981
4077
|
cSeqsSize = 1;
|
3982
4078
|
}
|
3983
4079
|
|
4080
|
+
/* Sequence collection not supported when block splitting */
|
3984
4081
|
if (zc->seqCollector.collectSequences) {
|
3985
|
-
ZSTD_copyBlockSequences(zc);
|
4082
|
+
FORWARD_IF_ERROR(ZSTD_copyBlockSequences(&zc->seqCollector, seqStore, dRepOriginal.rep), "copyBlockSequences failed");
|
3986
4083
|
ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
|
3987
4084
|
return 0;
|
3988
4085
|
}
|
@@ -4204,6 +4301,7 @@ ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc,
|
|
4204
4301
|
if (bss == ZSTDbss_noCompress) {
|
4205
4302
|
if (zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode == FSE_repeat_valid)
|
4206
4303
|
zc->blockState.prevCBlock->entropy.fse.offcode_repeatMode = FSE_repeat_check;
|
4304
|
+
RETURN_ERROR_IF(zc->seqCollector.collectSequences, sequenceProducer_failed, "Uncompressible block");
|
4207
4305
|
cSize = ZSTD_noCompressBlock(dst, dstCapacity, src, srcSize, lastBlock);
|
4208
4306
|
FORWARD_IF_ERROR(cSize, "ZSTD_noCompressBlock failed");
|
4209
4307
|
DEBUGLOG(4, "ZSTD_compressBlock_splitBlock: Nocompress block");
|
@@ -4236,11 +4334,15 @@ ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
|
|
4236
4334
|
|
4237
4335
|
{ const size_t bss = ZSTD_buildSeqStore(zc, src, srcSize);
|
4238
4336
|
FORWARD_IF_ERROR(bss, "ZSTD_buildSeqStore failed");
|
4239
|
-
if (bss == ZSTDbss_noCompress) {
|
4337
|
+
if (bss == ZSTDbss_noCompress) {
|
4338
|
+
RETURN_ERROR_IF(zc->seqCollector.collectSequences, sequenceProducer_failed, "Uncompressible block");
|
4339
|
+
cSize = 0;
|
4340
|
+
goto out;
|
4341
|
+
}
|
4240
4342
|
}
|
4241
4343
|
|
4242
4344
|
if (zc->seqCollector.collectSequences) {
|
4243
|
-
ZSTD_copyBlockSequences(zc);
|
4345
|
+
FORWARD_IF_ERROR(ZSTD_copyBlockSequences(&zc->seqCollector, ZSTD_getSeqStore(zc), zc->blockState.prevCBlock->rep), "copyBlockSequences failed");
|
4244
4346
|
ZSTD_blockState_confirmRepcodesAndEntropyTables(&zc->blockState);
|
4245
4347
|
return 0;
|
4246
4348
|
}
|
@@ -4553,19 +4655,15 @@ size_t ZSTD_writeLastEmptyBlock(void* dst, size_t dstCapacity)
|
|
4553
4655
|
}
|
4554
4656
|
}
|
4555
4657
|
|
4556
|
-
|
4658
|
+
void ZSTD_referenceExternalSequences(ZSTD_CCtx* cctx, rawSeq* seq, size_t nbSeq)
|
4557
4659
|
{
|
4558
|
-
|
4559
|
-
|
4560
|
-
RETURN_ERROR_IF(cctx->appliedParams.ldmParams.enableLdm == ZSTD_ps_enable,
|
4561
|
-
parameter_unsupported,
|
4562
|
-
"incompatible with ldm");
|
4660
|
+
assert(cctx->stage == ZSTDcs_init);
|
4661
|
+
assert(nbSeq == 0 || cctx->appliedParams.ldmParams.enableLdm != ZSTD_ps_enable);
|
4563
4662
|
cctx->externSeqStore.seq = seq;
|
4564
4663
|
cctx->externSeqStore.size = nbSeq;
|
4565
4664
|
cctx->externSeqStore.capacity = nbSeq;
|
4566
4665
|
cctx->externSeqStore.pos = 0;
|
4567
4666
|
cctx->externSeqStore.posInSequence = 0;
|
4568
|
-
return 0;
|
4569
4667
|
}
|
4570
4668
|
|
4571
4669
|
|
@@ -4760,12 +4858,19 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
|
|
4760
4858
|
ZSTD_fillHashTable(ms, iend, dtlm, tfp);
|
4761
4859
|
break;
|
4762
4860
|
case ZSTD_dfast:
|
4861
|
+
#ifndef ZSTD_EXCLUDE_DFAST_BLOCK_COMPRESSOR
|
4763
4862
|
ZSTD_fillDoubleHashTable(ms, iend, dtlm, tfp);
|
4863
|
+
#else
|
4864
|
+
assert(0); /* shouldn't be called: cparams should've been adjusted. */
|
4865
|
+
#endif
|
4764
4866
|
break;
|
4765
4867
|
|
4766
4868
|
case ZSTD_greedy:
|
4767
4869
|
case ZSTD_lazy:
|
4768
4870
|
case ZSTD_lazy2:
|
4871
|
+
#if !defined(ZSTD_EXCLUDE_GREEDY_BLOCK_COMPRESSOR) \
|
4872
|
+
|| !defined(ZSTD_EXCLUDE_LAZY_BLOCK_COMPRESSOR) \
|
4873
|
+
|| !defined(ZSTD_EXCLUDE_LAZY2_BLOCK_COMPRESSOR)
|
4769
4874
|
assert(srcSize >= HASH_READ_SIZE);
|
4770
4875
|
if (ms->dedicatedDictSearch) {
|
4771
4876
|
assert(ms->chainTable != NULL);
|
@@ -4782,14 +4887,23 @@ static size_t ZSTD_loadDictionaryContent(ZSTD_matchState_t* ms,
|
|
4782
4887
|
DEBUGLOG(4, "Using chain-based hash table for lazy dict");
|
4783
4888
|
}
|
4784
4889
|
}
|
4890
|
+
#else
|
4891
|
+
assert(0); /* shouldn't be called: cparams should've been adjusted. */
|
4892
|
+
#endif
|
4785
4893
|
break;
|
4786
4894
|
|
4787
4895
|
case ZSTD_btlazy2: /* we want the dictionary table fully sorted */
|
4788
4896
|
case ZSTD_btopt:
|
4789
4897
|
case ZSTD_btultra:
|
4790
4898
|
case ZSTD_btultra2:
|
4899
|
+
#if !defined(ZSTD_EXCLUDE_BTLAZY2_BLOCK_COMPRESSOR) \
|
4900
|
+
|| !defined(ZSTD_EXCLUDE_BTOPT_BLOCK_COMPRESSOR) \
|
4901
|
+
|| !defined(ZSTD_EXCLUDE_BTULTRA_BLOCK_COMPRESSOR)
|
4791
4902
|
assert(srcSize >= HASH_READ_SIZE);
|
4792
4903
|
ZSTD_updateTree(ms, iend-HASH_READ_SIZE, iend);
|
4904
|
+
#else
|
4905
|
+
assert(0); /* shouldn't be called: cparams should've been adjusted. */
|
4906
|
+
#endif
|
4793
4907
|
break;
|
4794
4908
|
|
4795
4909
|
default:
|
@@ -4836,11 +4950,10 @@ size_t ZSTD_loadCEntropy(ZSTD_compressedBlockState_t* bs, void* workspace,
|
|
4836
4950
|
|
4837
4951
|
/* We only set the loaded table as valid if it contains all non-zero
|
4838
4952
|
* weights. Otherwise, we set it to check */
|
4839
|
-
if (!hasZeroWeights)
|
4953
|
+
if (!hasZeroWeights && maxSymbolValue == 255)
|
4840
4954
|
bs->entropy.huf.repeatMode = HUF_repeat_valid;
|
4841
4955
|
|
4842
4956
|
RETURN_ERROR_IF(HUF_isError(hufHeaderSize), dictionary_corrupted, "");
|
4843
|
-
RETURN_ERROR_IF(maxSymbolValue < 255, dictionary_corrupted, "");
|
4844
4957
|
dictPtr += hufHeaderSize;
|
4845
4958
|
}
|
4846
4959
|
|
@@ -5107,14 +5220,13 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
|
|
5107
5220
|
{
|
5108
5221
|
BYTE* const ostart = (BYTE*)dst;
|
5109
5222
|
BYTE* op = ostart;
|
5110
|
-
size_t fhSize = 0;
|
5111
5223
|
|
5112
5224
|
DEBUGLOG(4, "ZSTD_writeEpilogue");
|
5113
5225
|
RETURN_ERROR_IF(cctx->stage == ZSTDcs_created, stage_wrong, "init missing");
|
5114
5226
|
|
5115
5227
|
/* special case : empty frame */
|
5116
5228
|
if (cctx->stage == ZSTDcs_init) {
|
5117
|
-
fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
|
5229
|
+
size_t fhSize = ZSTD_writeFrameHeader(dst, dstCapacity, &cctx->appliedParams, 0, 0);
|
5118
5230
|
FORWARD_IF_ERROR(fhSize, "ZSTD_writeFrameHeader failed");
|
5119
5231
|
dstCapacity -= fhSize;
|
5120
5232
|
op += fhSize;
|
@@ -5124,8 +5236,9 @@ static size_t ZSTD_writeEpilogue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity)
|
|
5124
5236
|
if (cctx->stage != ZSTDcs_ending) {
|
5125
5237
|
/* write one last empty block, make it the "last" block */
|
5126
5238
|
U32 const cBlockHeader24 = 1 /* last block */ + (((U32)bt_raw)<<1) + 0;
|
5127
|
-
|
5128
|
-
|
5239
|
+
ZSTD_STATIC_ASSERT(ZSTD_BLOCKHEADERSIZE == 3);
|
5240
|
+
RETURN_ERROR_IF(dstCapacity<3, dstSize_tooSmall, "no room for epilogue");
|
5241
|
+
MEM_writeLE24(op, cBlockHeader24);
|
5129
5242
|
op += ZSTD_blockHeaderSize;
|
5130
5243
|
dstCapacity -= ZSTD_blockHeaderSize;
|
5131
5244
|
}
|
@@ -5455,7 +5568,7 @@ ZSTD_CDict* ZSTD_createCDict_advanced2(
|
|
5455
5568
|
cctxParams.useRowMatchFinder, cctxParams.enableDedicatedDictSearch,
|
5456
5569
|
customMem);
|
5457
5570
|
|
5458
|
-
if (ZSTD_isError( ZSTD_initCDict_internal(cdict,
|
5571
|
+
if (!cdict || ZSTD_isError( ZSTD_initCDict_internal(cdict,
|
5459
5572
|
dict, dictSize,
|
5460
5573
|
dictLoadMethod, dictContentType,
|
5461
5574
|
cctxParams) )) {
|
@@ -5879,7 +5992,7 @@ static size_t ZSTD_compressStream_generic(ZSTD_CStream* zcs,
|
|
5879
5992
|
if (zcs->appliedParams.inBufferMode == ZSTD_bm_stable) {
|
5880
5993
|
assert(input->pos >= zcs->stableIn_notConsumed);
|
5881
5994
|
input->pos -= zcs->stableIn_notConsumed;
|
5882
|
-
ip -= zcs->stableIn_notConsumed;
|
5995
|
+
if (ip) ip -= zcs->stableIn_notConsumed;
|
5883
5996
|
zcs->stableIn_notConsumed = 0;
|
5884
5997
|
}
|
5885
5998
|
if (zcs->appliedParams.inBufferMode == ZSTD_bm_buffered) {
|
@@ -6138,7 +6251,7 @@ static size_t ZSTD_CCtx_init_compressStream2(ZSTD_CCtx* cctx,
|
|
6138
6251
|
#ifdef ZSTD_MULTITHREAD
|
6139
6252
|
/* If external matchfinder is enabled, make sure to fail before checking job size (for consistency) */
|
6140
6253
|
RETURN_ERROR_IF(
|
6141
|
-
params
|
6254
|
+
ZSTD_hasExtSeqProd(¶ms) && params.nbWorkers >= 1,
|
6142
6255
|
parameter_combination_unsupported,
|
6143
6256
|
"External sequence producer isn't supported with nbWorkers >= 1"
|
6144
6257
|
);
|
@@ -6430,7 +6543,7 @@ ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
|
|
6430
6543
|
if (cctx->appliedParams.validateSequences) {
|
6431
6544
|
seqPos->posInSrc += litLength + matchLength;
|
6432
6545
|
FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc,
|
6433
|
-
cctx->appliedParams.cParams.windowLog, dictSize, cctx->appliedParams
|
6546
|
+
cctx->appliedParams.cParams.windowLog, dictSize, ZSTD_hasExtSeqProd(&cctx->appliedParams)),
|
6434
6547
|
"Sequence validation failed");
|
6435
6548
|
}
|
6436
6549
|
RETURN_ERROR_IF(idx - seqPos->idx >= cctx->seqStore.maxNbSeq, externalSequences_invalid,
|
@@ -6568,7 +6681,7 @@ ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition*
|
|
6568
6681
|
if (cctx->appliedParams.validateSequences) {
|
6569
6682
|
seqPos->posInSrc += litLength + matchLength;
|
6570
6683
|
FORWARD_IF_ERROR(ZSTD_validateSequence(offBase, matchLength, cctx->appliedParams.cParams.minMatch, seqPos->posInSrc,
|
6571
|
-
cctx->appliedParams.cParams.windowLog, dictSize, cctx->appliedParams
|
6684
|
+
cctx->appliedParams.cParams.windowLog, dictSize, ZSTD_hasExtSeqProd(&cctx->appliedParams)),
|
6572
6685
|
"Sequence validation failed");
|
6573
6686
|
}
|
6574
6687
|
DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offBase, matchLength, litLength);
|
@@ -7014,19 +7127,27 @@ ZSTD_parameters ZSTD_getParams(int compressionLevel, unsigned long long srcSizeH
|
|
7014
7127
|
}
|
7015
7128
|
|
7016
7129
|
void ZSTD_registerSequenceProducer(
|
7017
|
-
ZSTD_CCtx* zc,
|
7018
|
-
|
7130
|
+
ZSTD_CCtx* zc,
|
7131
|
+
void* extSeqProdState,
|
7132
|
+
ZSTD_sequenceProducer_F extSeqProdFunc
|
7133
|
+
) {
|
7134
|
+
assert(zc != NULL);
|
7135
|
+
ZSTD_CCtxParams_registerSequenceProducer(
|
7136
|
+
&zc->requestedParams, extSeqProdState, extSeqProdFunc
|
7137
|
+
);
|
7138
|
+
}
|
7139
|
+
|
7140
|
+
void ZSTD_CCtxParams_registerSequenceProducer(
|
7141
|
+
ZSTD_CCtx_params* params,
|
7142
|
+
void* extSeqProdState,
|
7143
|
+
ZSTD_sequenceProducer_F extSeqProdFunc
|
7019
7144
|
) {
|
7020
|
-
|
7021
|
-
|
7022
|
-
|
7023
|
-
|
7024
|
-
emctx.seqBuffer = NULL;
|
7025
|
-
emctx.seqBufferCapacity = 0;
|
7026
|
-
zc->externalMatchCtx = emctx;
|
7027
|
-
zc->requestedParams.useSequenceProducer = 1;
|
7145
|
+
assert(params != NULL);
|
7146
|
+
if (extSeqProdFunc != NULL) {
|
7147
|
+
params->extSeqProdFunc = extSeqProdFunc;
|
7148
|
+
params->extSeqProdState = extSeqProdState;
|
7028
7149
|
} else {
|
7029
|
-
|
7030
|
-
|
7150
|
+
params->extSeqProdFunc = NULL;
|
7151
|
+
params->extSeqProdState = NULL;
|
7031
7152
|
}
|
7032
7153
|
}
|