zstd-ruby 1.5.1.1 → 1.5.2.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/.gitignore +2 -0
- data/README.md +57 -4
- data/Rakefile +8 -2
- data/ext/zstdruby/{zstdruby.h → common.h} +2 -0
- data/ext/zstdruby/libzstd/common/pool.c +11 -6
- data/ext/zstdruby/libzstd/common/pool.h +2 -2
- data/ext/zstdruby/libzstd/common/portability_macros.h +6 -0
- data/ext/zstdruby/libzstd/common/zstd_internal.h +3 -4
- data/ext/zstdruby/libzstd/compress/zstd_compress.c +114 -96
- data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +72 -39
- data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.c +10 -10
- data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +38 -24
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +10 -10
- data/ext/zstdruby/libzstd/compress/zstd_fast.c +11 -11
- data/ext/zstdruby/libzstd/compress/zstd_lazy.c +66 -62
- data/ext/zstdruby/libzstd/compress/zstd_ldm.c +5 -3
- data/ext/zstdruby/libzstd/compress/zstd_opt.c +66 -43
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +17 -9
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +4 -1
- data/ext/zstdruby/libzstd/decompress/huf_decompress.c +2 -2
- data/ext/zstdruby/libzstd/decompress/huf_decompress_amd64.S +17 -3
- data/ext/zstdruby/libzstd/dictBuilder/zdict.c +2 -2
- data/ext/zstdruby/libzstd/zstd.h +1 -1
- data/ext/zstdruby/main.c +14 -0
- data/ext/zstdruby/streaming_compress.c +183 -0
- data/ext/zstdruby/streaming_compress.h +5 -0
- data/ext/zstdruby/streaming_decompress.c +123 -0
- data/ext/zstdruby/zstdruby.c +4 -6
- data/lib/zstd-ruby/version.rb +1 -1
- data/zstd-ruby.gemspec +1 -1
- metadata +11 -40
- data/.github/dependabot.yml +0 -8
- data/.github/workflows/ruby.yml +0 -35
- data/ext/zstdruby/libzstd/.gitignore +0 -3
- data/ext/zstdruby/libzstd/BUCK +0 -232
- data/ext/zstdruby/libzstd/Makefile +0 -357
- data/ext/zstdruby/libzstd/README.md +0 -217
- data/ext/zstdruby/libzstd/deprecated/zbuff.h +0 -214
- data/ext/zstdruby/libzstd/deprecated/zbuff_common.c +0 -26
- data/ext/zstdruby/libzstd/deprecated/zbuff_compress.c +0 -167
- data/ext/zstdruby/libzstd/deprecated/zbuff_decompress.c +0 -75
- data/ext/zstdruby/libzstd/dll/example/Makefile +0 -48
- data/ext/zstdruby/libzstd/dll/example/README.md +0 -63
- data/ext/zstdruby/libzstd/dll/example/build_package.bat +0 -20
- data/ext/zstdruby/libzstd/dll/example/fullbench-dll.sln +0 -25
- data/ext/zstdruby/libzstd/dll/example/fullbench-dll.vcxproj +0 -181
- data/ext/zstdruby/libzstd/legacy/zstd_legacy.h +0 -415
- data/ext/zstdruby/libzstd/legacy/zstd_v01.c +0 -2158
- data/ext/zstdruby/libzstd/legacy/zstd_v01.h +0 -94
- data/ext/zstdruby/libzstd/legacy/zstd_v02.c +0 -3518
- data/ext/zstdruby/libzstd/legacy/zstd_v02.h +0 -93
- data/ext/zstdruby/libzstd/legacy/zstd_v03.c +0 -3160
- data/ext/zstdruby/libzstd/legacy/zstd_v03.h +0 -93
- data/ext/zstdruby/libzstd/legacy/zstd_v04.c +0 -3647
- data/ext/zstdruby/libzstd/legacy/zstd_v04.h +0 -142
- data/ext/zstdruby/libzstd/legacy/zstd_v05.c +0 -4050
- data/ext/zstdruby/libzstd/legacy/zstd_v05.h +0 -162
- data/ext/zstdruby/libzstd/legacy/zstd_v06.c +0 -4154
- data/ext/zstdruby/libzstd/legacy/zstd_v06.h +0 -172
- data/ext/zstdruby/libzstd/legacy/zstd_v07.c +0 -4541
- data/ext/zstdruby/libzstd/legacy/zstd_v07.h +0 -187
- data/ext/zstdruby/libzstd/libzstd.mk +0 -185
- data/ext/zstdruby/libzstd/libzstd.pc.in +0 -16
- data/ext/zstdruby/libzstd/modulemap/module.modulemap +0 -4
@@ -2396,9 +2396,9 @@ void ZSTD_seqToCodes(const seqStore_t* seqStorePtr)
|
|
2396
2396
|
assert(nbSeq <= seqStorePtr->maxNbSeq);
|
2397
2397
|
for (u=0; u<nbSeq; u++) {
|
2398
2398
|
U32 const llv = sequences[u].litLength;
|
2399
|
-
U32 const mlv = sequences[u].
|
2399
|
+
U32 const mlv = sequences[u].mlBase;
|
2400
2400
|
llCodeTable[u] = (BYTE)ZSTD_LLcode(llv);
|
2401
|
-
ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].
|
2401
|
+
ofCodeTable[u] = (BYTE)ZSTD_highbit32(sequences[u].offBase);
|
2402
2402
|
mlCodeTable[u] = (BYTE)ZSTD_MLcode(mlv);
|
2403
2403
|
}
|
2404
2404
|
if (seqStorePtr->longLengthType==ZSTD_llt_literalLength)
|
@@ -2910,9 +2910,9 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
|
|
2910
2910
|
assert(zc->seqCollector.maxSequences >= seqStoreSeqSize + 1);
|
2911
2911
|
ZSTD_memcpy(updatedRepcodes.rep, zc->blockState.prevCBlock->rep, sizeof(repcodes_t));
|
2912
2912
|
for (i = 0; i < seqStoreSeqSize; ++i) {
|
2913
|
-
U32 rawOffset = seqStoreSeqs[i].
|
2913
|
+
U32 rawOffset = seqStoreSeqs[i].offBase - ZSTD_REP_NUM;
|
2914
2914
|
outSeqs[i].litLength = seqStoreSeqs[i].litLength;
|
2915
|
-
outSeqs[i].matchLength = seqStoreSeqs[i].
|
2915
|
+
outSeqs[i].matchLength = seqStoreSeqs[i].mlBase + MINMATCH;
|
2916
2916
|
outSeqs[i].rep = 0;
|
2917
2917
|
|
2918
2918
|
if (i == seqStore->longLengthPos) {
|
@@ -2923,9 +2923,9 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
|
|
2923
2923
|
}
|
2924
2924
|
}
|
2925
2925
|
|
2926
|
-
if (seqStoreSeqs[i].
|
2926
|
+
if (seqStoreSeqs[i].offBase <= ZSTD_REP_NUM) {
|
2927
2927
|
/* Derive the correct offset corresponding to a repcode */
|
2928
|
-
outSeqs[i].rep = seqStoreSeqs[i].
|
2928
|
+
outSeqs[i].rep = seqStoreSeqs[i].offBase;
|
2929
2929
|
if (outSeqs[i].litLength != 0) {
|
2930
2930
|
rawOffset = updatedRepcodes.rep[outSeqs[i].rep - 1];
|
2931
2931
|
} else {
|
@@ -2939,9 +2939,9 @@ static void ZSTD_copyBlockSequences(ZSTD_CCtx* zc)
|
|
2939
2939
|
outSeqs[i].offset = rawOffset;
|
2940
2940
|
/* seqStoreSeqs[i].offset == offCode+1, and ZSTD_updateRep() expects offCode
|
2941
2941
|
so we provide seqStoreSeqs[i].offset - 1 */
|
2942
|
-
|
2943
|
-
|
2944
|
-
|
2942
|
+
ZSTD_updateRep(updatedRepcodes.rep,
|
2943
|
+
seqStoreSeqs[i].offBase - 1,
|
2944
|
+
seqStoreSeqs[i].litLength == 0);
|
2945
2945
|
literalsRead += outSeqs[i].litLength;
|
2946
2946
|
}
|
2947
2947
|
/* Insert last literals (if any exist) in the block as a sequence with ml == off == 0.
|
@@ -3385,7 +3385,7 @@ static size_t ZSTD_countSeqStoreMatchBytes(const seqStore_t* const seqStore) {
|
|
3385
3385
|
size_t i;
|
3386
3386
|
for (i = 0; i < nbSeqs; ++i) {
|
3387
3387
|
seqDef seq = seqStore->sequencesStart[i];
|
3388
|
-
matchBytes += seq.
|
3388
|
+
matchBytes += seq.mlBase + MINMATCH;
|
3389
3389
|
if (i == seqStore->longLengthPos && seqStore->longLengthType == ZSTD_llt_matchLength) {
|
3390
3390
|
matchBytes += 0x10000;
|
3391
3391
|
}
|
@@ -3434,11 +3434,13 @@ static void ZSTD_deriveSeqStoreChunk(seqStore_t* resultSeqStore,
|
|
3434
3434
|
|
3435
3435
|
/**
|
3436
3436
|
* Returns the raw offset represented by the combination of offCode, ll0, and repcode history.
|
3437
|
-
* offCode must
|
3437
|
+
* offCode must represent a repcode in the numeric representation of ZSTD_storeSeq().
|
3438
3438
|
*/
|
3439
|
-
static U32
|
3440
|
-
|
3441
|
-
|
3439
|
+
static U32
|
3440
|
+
ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32 offCode, const U32 ll0)
|
3441
|
+
{
|
3442
|
+
U32 const adjustedOffCode = STORED_REPCODE(offCode) - 1 + ll0; /* [ 0 - 3 ] */
|
3443
|
+
assert(STORED_IS_REPCODE(offCode));
|
3442
3444
|
if (adjustedOffCode == ZSTD_REP_NUM) {
|
3443
3445
|
/* litlength == 0 and offCode == 2 implies selection of first repcode - 1 */
|
3444
3446
|
assert(rep[0] > 0);
|
@@ -3449,11 +3451,16 @@ static U32 ZSTD_resolveRepcodeToRawOffset(const U32 rep[ZSTD_REP_NUM], const U32
|
|
3449
3451
|
|
3450
3452
|
/**
|
3451
3453
|
* ZSTD_seqStore_resolveOffCodes() reconciles any possible divergences in offset history that may arise
|
3452
|
-
* due to emission of RLE/raw blocks that disturb the offset history,
|
3453
|
-
* the seqStore that may be invalid.
|
3454
|
+
* due to emission of RLE/raw blocks that disturb the offset history,
|
3455
|
+
* and replaces any repcodes within the seqStore that may be invalid.
|
3456
|
+
*
|
3457
|
+
* dRepcodes are updated as would be on the decompression side.
|
3458
|
+
* cRepcodes are updated exactly in accordance with the seqStore.
|
3454
3459
|
*
|
3455
|
-
*
|
3456
|
-
*
|
3460
|
+
* Note : this function assumes seq->offBase respects the following numbering scheme :
|
3461
|
+
* 0 : invalid
|
3462
|
+
* 1-3 : repcode 1-3
|
3463
|
+
* 4+ : real_offset+3
|
3457
3464
|
*/
|
3458
3465
|
static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_t* const cRepcodes,
|
3459
3466
|
seqStore_t* const seqStore, U32 const nbSeq) {
|
@@ -3461,9 +3468,9 @@ static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_
|
|
3461
3468
|
for (; idx < nbSeq; ++idx) {
|
3462
3469
|
seqDef* const seq = seqStore->sequencesStart + idx;
|
3463
3470
|
U32 const ll0 = (seq->litLength == 0);
|
3464
|
-
U32 offCode = seq->
|
3465
|
-
assert(seq->
|
3466
|
-
if (offCode
|
3471
|
+
U32 const offCode = OFFBASE_TO_STORED(seq->offBase);
|
3472
|
+
assert(seq->offBase > 0);
|
3473
|
+
if (STORED_IS_REPCODE(offCode)) {
|
3467
3474
|
U32 const dRawOffset = ZSTD_resolveRepcodeToRawOffset(dRepcodes->rep, offCode, ll0);
|
3468
3475
|
U32 const cRawOffset = ZSTD_resolveRepcodeToRawOffset(cRepcodes->rep, offCode, ll0);
|
3469
3476
|
/* Adjust simulated decompression repcode history if we come across a mismatch. Replace
|
@@ -3471,14 +3478,14 @@ static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_
|
|
3471
3478
|
* repcode history.
|
3472
3479
|
*/
|
3473
3480
|
if (dRawOffset != cRawOffset) {
|
3474
|
-
seq->
|
3481
|
+
seq->offBase = cRawOffset + ZSTD_REP_NUM;
|
3475
3482
|
}
|
3476
3483
|
}
|
3477
3484
|
/* Compression repcode history is always updated with values directly from the unmodified seqStore.
|
3478
3485
|
* Decompression repcode history may use modified seq->offset value taken from compression repcode history.
|
3479
3486
|
*/
|
3480
|
-
|
3481
|
-
|
3487
|
+
ZSTD_updateRep(dRepcodes->rep, OFFBASE_TO_STORED(seq->offBase), ll0);
|
3488
|
+
ZSTD_updateRep(cRepcodes->rep, offCode, ll0);
|
3482
3489
|
}
|
3483
3490
|
}
|
3484
3491
|
|
@@ -3487,11 +3494,13 @@ static void ZSTD_seqStore_resolveOffCodes(repcodes_t* const dRepcodes, repcodes_
|
|
3487
3494
|
*
|
3488
3495
|
* Returns the total size of that block (including header) or a ZSTD error code.
|
3489
3496
|
*/
|
3490
|
-
static size_t
|
3491
|
-
|
3492
|
-
|
3493
|
-
|
3494
|
-
|
3497
|
+
static size_t
|
3498
|
+
ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const seqStore,
|
3499
|
+
repcodes_t* const dRep, repcodes_t* const cRep,
|
3500
|
+
void* dst, size_t dstCapacity,
|
3501
|
+
const void* src, size_t srcSize,
|
3502
|
+
U32 lastBlock, U32 isPartition)
|
3503
|
+
{
|
3495
3504
|
const U32 rleMaxLength = 25;
|
3496
3505
|
BYTE* op = (BYTE*)dst;
|
3497
3506
|
const BYTE* ip = (const BYTE*)src;
|
@@ -3500,6 +3509,7 @@ static size_t ZSTD_compressSeqStore_singleBlock(ZSTD_CCtx* zc, seqStore_t* const
|
|
3500
3509
|
|
3501
3510
|
/* In case of an RLE or raw block, the simulated decompression repcode history must be reset */
|
3502
3511
|
repcodes_t const dRepOriginal = *dRep;
|
3512
|
+
DEBUGLOG(5, "ZSTD_compressSeqStore_singleBlock");
|
3503
3513
|
if (isPartition)
|
3504
3514
|
ZSTD_seqStore_resolveOffCodes(dRep, cRep, seqStore, (U32)(seqStore->sequences - seqStore->sequencesStart));
|
3505
3515
|
|
@@ -3572,8 +3582,10 @@ typedef struct {
|
|
3572
3582
|
* Furthermore, the number of splits is capped by ZSTD_MAX_NB_BLOCK_SPLITS. At ZSTD_MAX_NB_BLOCK_SPLITS == 196 with the current existing blockSize
|
3573
3583
|
* maximum of 128 KB, this value is actually impossible to reach.
|
3574
3584
|
*/
|
3575
|
-
static void
|
3576
|
-
|
3585
|
+
static void
|
3586
|
+
ZSTD_deriveBlockSplitsHelper(seqStoreSplits* splits, size_t startIdx, size_t endIdx,
|
3587
|
+
ZSTD_CCtx* zc, const seqStore_t* origSeqStore)
|
3588
|
+
{
|
3577
3589
|
seqStore_t* fullSeqStoreChunk = &zc->blockSplitCtx.fullSeqStoreChunk;
|
3578
3590
|
seqStore_t* firstHalfSeqStore = &zc->blockSplitCtx.firstHalfSeqStore;
|
3579
3591
|
seqStore_t* secondHalfSeqStore = &zc->blockSplitCtx.secondHalfSeqStore;
|
@@ -3628,8 +3640,10 @@ static size_t ZSTD_deriveBlockSplits(ZSTD_CCtx* zc, U32 partitions[], U32 nbSeq)
|
|
3628
3640
|
*
|
3629
3641
|
* Returns combined size of all blocks (which includes headers), or a ZSTD error code.
|
3630
3642
|
*/
|
3631
|
-
static size_t
|
3632
|
-
|
3643
|
+
static size_t
|
3644
|
+
ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, size_t dstCapacity,
|
3645
|
+
const void* src, size_t blockSize, U32 lastBlock, U32 nbSeq)
|
3646
|
+
{
|
3633
3647
|
size_t cSize = 0;
|
3634
3648
|
const BYTE* ip = (const BYTE*)src;
|
3635
3649
|
BYTE* op = (BYTE*)dst;
|
@@ -3715,9 +3729,11 @@ static size_t ZSTD_compressBlock_splitBlock_internal(ZSTD_CCtx* zc, void* dst, s
|
|
3715
3729
|
return cSize;
|
3716
3730
|
}
|
3717
3731
|
|
3718
|
-
static size_t
|
3719
|
-
|
3720
|
-
|
3732
|
+
static size_t
|
3733
|
+
ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc,
|
3734
|
+
void* dst, size_t dstCapacity,
|
3735
|
+
const void* src, size_t srcSize, U32 lastBlock)
|
3736
|
+
{
|
3721
3737
|
const BYTE* ip = (const BYTE*)src;
|
3722
3738
|
BYTE* op = (BYTE*)dst;
|
3723
3739
|
U32 nbSeq;
|
@@ -3743,9 +3759,10 @@ static size_t ZSTD_compressBlock_splitBlock(ZSTD_CCtx* zc,
|
|
3743
3759
|
return cSize;
|
3744
3760
|
}
|
3745
3761
|
|
3746
|
-
static size_t
|
3747
|
-
|
3748
|
-
|
3762
|
+
static size_t
|
3763
|
+
ZSTD_compressBlock_internal(ZSTD_CCtx* zc,
|
3764
|
+
void* dst, size_t dstCapacity,
|
3765
|
+
const void* src, size_t srcSize, U32 frame)
|
3749
3766
|
{
|
3750
3767
|
/* This the upper bound for the length of an rle block.
|
3751
3768
|
* This isn't the actual upper bound. Finding the real threshold
|
@@ -5738,39 +5755,39 @@ typedef struct {
|
|
5738
5755
|
size_t posInSrc; /* Number of bytes given by sequences provided so far */
|
5739
5756
|
} ZSTD_sequencePosition;
|
5740
5757
|
|
5741
|
-
/*
|
5742
|
-
|
5743
|
-
|
5744
|
-
|
5745
|
-
|
5758
|
+
/* ZSTD_validateSequence() :
|
5759
|
+
* @offCode : is presumed to follow format required by ZSTD_storeSeq()
|
5760
|
+
* @returns a ZSTD error code if sequence is not valid
|
5761
|
+
*/
|
5762
|
+
static size_t
|
5763
|
+
ZSTD_validateSequence(U32 offCode, U32 matchLength,
|
5764
|
+
size_t posInSrc, U32 windowLog, size_t dictSize)
|
5765
|
+
{
|
5766
|
+
U32 const windowSize = 1 << windowLog;
|
5746
5767
|
/* posInSrc represents the amount of data the the decoder would decode up to this point.
|
5747
5768
|
* As long as the amount of data decoded is less than or equal to window size, offsets may be
|
5748
5769
|
* larger than the total length of output decoded in order to reference the dict, even larger than
|
5749
5770
|
* window size. After output surpasses windowSize, we're limited to windowSize offsets again.
|
5750
5771
|
*/
|
5751
|
-
offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
|
5752
|
-
RETURN_ERROR_IF(offCode > offsetBound
|
5753
|
-
RETURN_ERROR_IF(matchLength <
|
5772
|
+
size_t const offsetBound = posInSrc > windowSize ? (size_t)windowSize : posInSrc + (size_t)dictSize;
|
5773
|
+
RETURN_ERROR_IF(offCode > STORE_OFFSET(offsetBound), corruption_detected, "Offset too large!");
|
5774
|
+
RETURN_ERROR_IF(matchLength < MINMATCH, corruption_detected, "Matchlength too small");
|
5754
5775
|
return 0;
|
5755
5776
|
}
|
5756
5777
|
|
5757
5778
|
/* Returns an offset code, given a sequence's raw offset, the ongoing repcode array, and whether litLength == 0 */
|
5758
|
-
static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0)
|
5759
|
-
|
5760
|
-
U32
|
5779
|
+
static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32 ll0)
|
5780
|
+
{
|
5781
|
+
U32 offCode = STORE_OFFSET(rawOffset);
|
5761
5782
|
|
5762
5783
|
if (!ll0 && rawOffset == rep[0]) {
|
5763
|
-
|
5784
|
+
offCode = STORE_REPCODE_1;
|
5764
5785
|
} else if (rawOffset == rep[1]) {
|
5765
|
-
|
5786
|
+
offCode = STORE_REPCODE(2 - ll0);
|
5766
5787
|
} else if (rawOffset == rep[2]) {
|
5767
|
-
|
5788
|
+
offCode = STORE_REPCODE(3 - ll0);
|
5768
5789
|
} else if (ll0 && rawOffset == rep[0] - 1) {
|
5769
|
-
|
5770
|
-
}
|
5771
|
-
if (repCode) {
|
5772
|
-
/* ZSTD_storeSeq expects a number in the range [0, 2] to represent a repcode */
|
5773
|
-
offCode = repCode - 1;
|
5790
|
+
offCode = STORE_REPCODE_3;
|
5774
5791
|
}
|
5775
5792
|
return offCode;
|
5776
5793
|
}
|
@@ -5778,18 +5795,17 @@ static U32 ZSTD_finalizeOffCode(U32 rawOffset, const U32 rep[ZSTD_REP_NUM], U32
|
|
5778
5795
|
/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of
|
5779
5796
|
* ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter.
|
5780
5797
|
*/
|
5781
|
-
static size_t
|
5782
|
-
|
5783
|
-
|
5798
|
+
static size_t
|
5799
|
+
ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
|
5800
|
+
ZSTD_sequencePosition* seqPos,
|
5801
|
+
const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
|
5802
|
+
const void* src, size_t blockSize)
|
5803
|
+
{
|
5784
5804
|
U32 idx = seqPos->idx;
|
5785
5805
|
BYTE const* ip = (BYTE const*)(src);
|
5786
5806
|
const BYTE* const iend = ip + blockSize;
|
5787
5807
|
repcodes_t updatedRepcodes;
|
5788
5808
|
U32 dictSize;
|
5789
|
-
U32 litLength;
|
5790
|
-
U32 matchLength;
|
5791
|
-
U32 ll0;
|
5792
|
-
U32 offCode;
|
5793
5809
|
|
5794
5810
|
if (cctx->cdict) {
|
5795
5811
|
dictSize = (U32)cctx->cdict->dictContentSize;
|
@@ -5800,23 +5816,22 @@ static size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZS
|
|
5800
5816
|
}
|
5801
5817
|
ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
|
5802
5818
|
for (; (inSeqs[idx].matchLength != 0 || inSeqs[idx].offset != 0) && idx < inSeqsSize; ++idx) {
|
5803
|
-
litLength = inSeqs[idx].litLength;
|
5804
|
-
|
5805
|
-
|
5806
|
-
offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
|
5807
|
-
|
5819
|
+
U32 const litLength = inSeqs[idx].litLength;
|
5820
|
+
U32 const ll0 = (litLength == 0);
|
5821
|
+
U32 const matchLength = inSeqs[idx].matchLength;
|
5822
|
+
U32 const offCode = ZSTD_finalizeOffCode(inSeqs[idx].offset, updatedRepcodes.rep, ll0);
|
5823
|
+
ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
|
5808
5824
|
|
5809
5825
|
DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
|
5810
5826
|
if (cctx->appliedParams.validateSequences) {
|
5811
5827
|
seqPos->posInSrc += litLength + matchLength;
|
5812
5828
|
FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
|
5813
|
-
cctx->appliedParams.cParams.windowLog, dictSize,
|
5814
|
-
cctx->appliedParams.cParams.minMatch),
|
5829
|
+
cctx->appliedParams.cParams.windowLog, dictSize),
|
5815
5830
|
"Sequence validation failed");
|
5816
5831
|
}
|
5817
5832
|
RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
|
5818
5833
|
"Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
|
5819
|
-
ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength
|
5834
|
+
ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength);
|
5820
5835
|
ip += matchLength + litLength;
|
5821
5836
|
}
|
5822
5837
|
ZSTD_memcpy(cctx->blockState.nextCBlock->rep, updatedRepcodes.rep, sizeof(repcodes_t));
|
@@ -5843,9 +5858,11 @@ static size_t ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx, ZS
|
|
5843
5858
|
* avoid splitting a match, or to avoid splitting a match such that it would produce a match
|
5844
5859
|
* smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
|
5845
5860
|
*/
|
5846
|
-
static size_t
|
5847
|
-
|
5848
|
-
|
5861
|
+
static size_t
|
5862
|
+
ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
|
5863
|
+
const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
|
5864
|
+
const void* src, size_t blockSize)
|
5865
|
+
{
|
5849
5866
|
U32 idx = seqPos->idx;
|
5850
5867
|
U32 startPosInSequence = seqPos->posInSequence;
|
5851
5868
|
U32 endPosInSequence = seqPos->posInSequence + (U32)blockSize;
|
@@ -5855,10 +5872,6 @@ static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_seq
|
|
5855
5872
|
repcodes_t updatedRepcodes;
|
5856
5873
|
U32 bytesAdjustment = 0;
|
5857
5874
|
U32 finalMatchSplit = 0;
|
5858
|
-
U32 litLength;
|
5859
|
-
U32 matchLength;
|
5860
|
-
U32 rawOffset;
|
5861
|
-
U32 offCode;
|
5862
5875
|
|
5863
5876
|
if (cctx->cdict) {
|
5864
5877
|
dictSize = cctx->cdict->dictContentSize;
|
@@ -5872,9 +5885,10 @@ static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_seq
|
|
5872
5885
|
ZSTD_memcpy(updatedRepcodes.rep, cctx->blockState.prevCBlock->rep, sizeof(repcodes_t));
|
5873
5886
|
while (endPosInSequence && idx < inSeqsSize && !finalMatchSplit) {
|
5874
5887
|
const ZSTD_Sequence currSeq = inSeqs[idx];
|
5875
|
-
litLength = currSeq.litLength;
|
5876
|
-
matchLength = currSeq.matchLength;
|
5877
|
-
rawOffset = currSeq.offset;
|
5888
|
+
U32 litLength = currSeq.litLength;
|
5889
|
+
U32 matchLength = currSeq.matchLength;
|
5890
|
+
U32 const rawOffset = currSeq.offset;
|
5891
|
+
U32 offCode;
|
5878
5892
|
|
5879
5893
|
/* Modify the sequence depending on where endPosInSequence lies */
|
5880
5894
|
if (endPosInSequence >= currSeq.litLength + currSeq.matchLength) {
|
@@ -5927,22 +5941,21 @@ static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_seq
|
|
5927
5941
|
}
|
5928
5942
|
}
|
5929
5943
|
/* Check if this offset can be represented with a repcode */
|
5930
|
-
{ U32 ll0 = (litLength == 0);
|
5944
|
+
{ U32 const ll0 = (litLength == 0);
|
5931
5945
|
offCode = ZSTD_finalizeOffCode(rawOffset, updatedRepcodes.rep, ll0);
|
5932
|
-
|
5946
|
+
ZSTD_updateRep(updatedRepcodes.rep, offCode, ll0);
|
5933
5947
|
}
|
5934
5948
|
|
5935
5949
|
if (cctx->appliedParams.validateSequences) {
|
5936
5950
|
seqPos->posInSrc += litLength + matchLength;
|
5937
5951
|
FORWARD_IF_ERROR(ZSTD_validateSequence(offCode, matchLength, seqPos->posInSrc,
|
5938
|
-
cctx->appliedParams.cParams.windowLog, dictSize,
|
5939
|
-
cctx->appliedParams.cParams.minMatch),
|
5952
|
+
cctx->appliedParams.cParams.windowLog, dictSize),
|
5940
5953
|
"Sequence validation failed");
|
5941
5954
|
}
|
5942
5955
|
DEBUGLOG(6, "Storing sequence: (of: %u, ml: %u, ll: %u)", offCode, matchLength, litLength);
|
5943
5956
|
RETURN_ERROR_IF(idx - seqPos->idx > cctx->seqStore.maxNbSeq, memory_allocation,
|
5944
5957
|
"Not enough memory allocated. Try adjusting ZSTD_c_minMatch.");
|
5945
|
-
ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength
|
5958
|
+
ZSTD_storeSeq(&cctx->seqStore, litLength, ip, iend, offCode, matchLength);
|
5946
5959
|
ip += matchLength + litLength;
|
5947
5960
|
}
|
5948
5961
|
DEBUGLOG(5, "Ending seq: idx: %u (of: %u ml: %u ll: %u)", idx, inSeqs[idx].offset, inSeqs[idx].matchLength, inSeqs[idx].litLength);
|
@@ -5967,7 +5980,8 @@ static size_t ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_seq
|
|
5967
5980
|
typedef size_t (*ZSTD_sequenceCopier) (ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
|
5968
5981
|
const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
|
5969
5982
|
const void* src, size_t blockSize);
|
5970
|
-
static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode)
|
5983
|
+
static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode)
|
5984
|
+
{
|
5971
5985
|
ZSTD_sequenceCopier sequenceCopier = NULL;
|
5972
5986
|
assert(ZSTD_cParam_withinBounds(ZSTD_c_blockDelimiters, mode));
|
5973
5987
|
if (mode == ZSTD_sf_explicitBlockDelimiters) {
|
@@ -5981,12 +5995,15 @@ static ZSTD_sequenceCopier ZSTD_selectSequenceCopier(ZSTD_sequenceFormat_e mode)
|
|
5981
5995
|
|
5982
5996
|
/* Compress, block-by-block, all of the sequences given.
|
5983
5997
|
*
|
5984
|
-
* Returns the cumulative size of all compressed blocks (including their headers),
|
5998
|
+
* Returns the cumulative size of all compressed blocks (including their headers),
|
5999
|
+
* otherwise a ZSTD error.
|
5985
6000
|
*/
|
5986
|
-
static size_t
|
5987
|
-
|
5988
|
-
|
5989
|
-
|
6001
|
+
static size_t
|
6002
|
+
ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
|
6003
|
+
void* dst, size_t dstCapacity,
|
6004
|
+
const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
|
6005
|
+
const void* src, size_t srcSize)
|
6006
|
+
{
|
5990
6007
|
size_t cSize = 0;
|
5991
6008
|
U32 lastBlock;
|
5992
6009
|
size_t blockSize;
|
@@ -5996,7 +6013,7 @@ static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
|
|
5996
6013
|
|
5997
6014
|
BYTE const* ip = (BYTE const*)src;
|
5998
6015
|
BYTE* op = (BYTE*)dst;
|
5999
|
-
ZSTD_sequenceCopier sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
|
6016
|
+
ZSTD_sequenceCopier const sequenceCopier = ZSTD_selectSequenceCopier(cctx->appliedParams.blockDelimiters);
|
6000
6017
|
|
6001
6018
|
DEBUGLOG(4, "ZSTD_compressSequences_internal srcSize: %zu, inSeqsSize: %zu", srcSize, inSeqsSize);
|
6002
6019
|
/* Special case: empty frame */
|
@@ -6096,7 +6113,8 @@ static size_t ZSTD_compressSequences_internal(ZSTD_CCtx* cctx,
|
|
6096
6113
|
|
6097
6114
|
size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstCapacity,
|
6098
6115
|
const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
|
6099
|
-
const void* src, size_t srcSize)
|
6116
|
+
const void* src, size_t srcSize)
|
6117
|
+
{
|
6100
6118
|
BYTE* op = (BYTE*)dst;
|
6101
6119
|
size_t cSize = 0;
|
6102
6120
|
size_t compressedBlocksSize = 0;
|
@@ -129,7 +129,7 @@ size_t ZSTD_buildBlockEntropyStats(seqStore_t* seqStorePtr,
|
|
129
129
|
*********************************/
|
130
130
|
|
131
131
|
typedef struct {
|
132
|
-
U32 off; /* Offset code
|
132
|
+
U32 off; /* Offset sumtype code for the match, using ZSTD_storeSeq() format */
|
133
133
|
U32 len; /* Raw length of match */
|
134
134
|
} ZSTD_match_t;
|
135
135
|
|
@@ -497,31 +497,6 @@ MEM_STATIC U32 ZSTD_MLcode(U32 mlBase)
|
|
497
497
|
return (mlBase > 127) ? ZSTD_highbit32(mlBase) + ML_deltaCode : ML_Code[mlBase];
|
498
498
|
}
|
499
499
|
|
500
|
-
typedef struct repcodes_s {
|
501
|
-
U32 rep[3];
|
502
|
-
} repcodes_t;
|
503
|
-
|
504
|
-
MEM_STATIC repcodes_t ZSTD_updateRep(U32 const rep[3], U32 const offset, U32 const ll0)
|
505
|
-
{
|
506
|
-
repcodes_t newReps;
|
507
|
-
if (offset >= ZSTD_REP_NUM) { /* full offset */
|
508
|
-
newReps.rep[2] = rep[1];
|
509
|
-
newReps.rep[1] = rep[0];
|
510
|
-
newReps.rep[0] = offset - ZSTD_REP_MOVE;
|
511
|
-
} else { /* repcode */
|
512
|
-
U32 const repCode = offset + ll0;
|
513
|
-
if (repCode > 0) { /* note : if repCode==0, no change */
|
514
|
-
U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
|
515
|
-
newReps.rep[2] = (repCode >= 2) ? rep[1] : rep[2];
|
516
|
-
newReps.rep[1] = rep[0];
|
517
|
-
newReps.rep[0] = currentOffset;
|
518
|
-
} else { /* repCode == 0 */
|
519
|
-
ZSTD_memcpy(&newReps, rep, sizeof(newReps));
|
520
|
-
}
|
521
|
-
}
|
522
|
-
return newReps;
|
523
|
-
}
|
524
|
-
|
525
500
|
/* ZSTD_cParam_withinBounds:
|
526
501
|
* @return 1 if value is within cParam bounds,
|
527
502
|
* 0 otherwise */
|
@@ -590,7 +565,9 @@ MEM_STATIC int ZSTD_literalsCompressionIsDisabled(const ZSTD_CCtx_params* cctxPa
|
|
590
565
|
* Only called when the sequence ends past ilimit_w, so it only needs to be optimized for single
|
591
566
|
* large copies.
|
592
567
|
*/
|
593
|
-
static void
|
568
|
+
static void
|
569
|
+
ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE const* ilimit_w)
|
570
|
+
{
|
594
571
|
assert(iend > ilimit_w);
|
595
572
|
if (ip <= ilimit_w) {
|
596
573
|
ZSTD_wildcopy(op, ip, ilimit_w - ip, ZSTD_no_overlap);
|
@@ -600,14 +577,30 @@ static void ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const ie
|
|
600
577
|
while (ip < iend) *op++ = *ip++;
|
601
578
|
}
|
602
579
|
|
580
|
+
#define ZSTD_REP_MOVE (ZSTD_REP_NUM-1)
|
581
|
+
#define STORE_REPCODE_1 STORE_REPCODE(1)
|
582
|
+
#define STORE_REPCODE_2 STORE_REPCODE(2)
|
583
|
+
#define STORE_REPCODE_3 STORE_REPCODE(3)
|
584
|
+
#define STORE_REPCODE(r) (assert((r)>=1), assert((r)<=3), (r)-1)
|
585
|
+
#define STORE_OFFSET(o) (assert((o)>0), o + ZSTD_REP_MOVE)
|
586
|
+
#define STORED_IS_OFFSET(o) ((o) > ZSTD_REP_MOVE)
|
587
|
+
#define STORED_IS_REPCODE(o) ((o) <= ZSTD_REP_MOVE)
|
588
|
+
#define STORED_OFFSET(o) (assert(STORED_IS_OFFSET(o)), (o)-ZSTD_REP_MOVE)
|
589
|
+
#define STORED_REPCODE(o) (assert(STORED_IS_REPCODE(o)), (o)+1) /* returns ID 1,2,3 */
|
590
|
+
#define STORED_TO_OFFBASE(o) ((o)+1)
|
591
|
+
#define OFFBASE_TO_STORED(o) ((o)-1)
|
592
|
+
|
603
593
|
/*! ZSTD_storeSeq() :
|
604
|
-
* Store a sequence (litlen, litPtr, offCode and
|
605
|
-
*
|
606
|
-
*
|
594
|
+
* Store a sequence (litlen, litPtr, offCode and matchLength) into seqStore_t.
|
595
|
+
* @offBase_minus1 : Users should use employ macros STORE_REPCODE_X and STORE_OFFSET().
|
596
|
+
* @matchLength : must be >= MINMATCH
|
607
597
|
* Allowed to overread literals up to litLimit.
|
608
598
|
*/
|
609
|
-
HINT_INLINE UNUSED_ATTR
|
610
|
-
|
599
|
+
HINT_INLINE UNUSED_ATTR void
|
600
|
+
ZSTD_storeSeq(seqStore_t* seqStorePtr,
|
601
|
+
size_t litLength, const BYTE* literals, const BYTE* litLimit,
|
602
|
+
U32 offBase_minus1,
|
603
|
+
size_t matchLength)
|
611
604
|
{
|
612
605
|
BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
|
613
606
|
BYTE const* const litEnd = literals + litLength;
|
@@ -616,7 +609,7 @@ void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* litera
|
|
616
609
|
if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */
|
617
610
|
{ U32 const pos = (U32)((const BYTE*)literals - g_start);
|
618
611
|
DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offCode%7u",
|
619
|
-
pos, (U32)litLength, (U32)
|
612
|
+
pos, (U32)litLength, (U32)matchLength, (U32)offBase_minus1);
|
620
613
|
}
|
621
614
|
#endif
|
622
615
|
assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
|
@@ -647,19 +640,59 @@ void ZSTD_storeSeq(seqStore_t* seqStorePtr, size_t litLength, const BYTE* litera
|
|
647
640
|
seqStorePtr->sequences[0].litLength = (U16)litLength;
|
648
641
|
|
649
642
|
/* match offset */
|
650
|
-
seqStorePtr->sequences[0].
|
643
|
+
seqStorePtr->sequences[0].offBase = STORED_TO_OFFBASE(offBase_minus1);
|
651
644
|
|
652
645
|
/* match Length */
|
653
|
-
|
654
|
-
|
655
|
-
|
656
|
-
|
646
|
+
assert(matchLength >= MINMATCH);
|
647
|
+
{ size_t const mlBase = matchLength - MINMATCH;
|
648
|
+
if (mlBase>0xFFFF) {
|
649
|
+
assert(seqStorePtr->longLengthType == ZSTD_llt_none); /* there can only be a single long length */
|
650
|
+
seqStorePtr->longLengthType = ZSTD_llt_matchLength;
|
651
|
+
seqStorePtr->longLengthPos = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);
|
652
|
+
}
|
653
|
+
seqStorePtr->sequences[0].mlBase = (U16)mlBase;
|
657
654
|
}
|
658
|
-
seqStorePtr->sequences[0].matchLength = (U16)mlBase;
|
659
655
|
|
660
656
|
seqStorePtr->sequences++;
|
661
657
|
}
|
662
658
|
|
659
|
+
/* ZSTD_updateRep() :
|
660
|
+
* updates in-place @rep (array of repeat offsets)
|
661
|
+
* @offBase_minus1 : sum-type, with same numeric representation as ZSTD_storeSeq()
|
662
|
+
*/
|
663
|
+
MEM_STATIC void
|
664
|
+
ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0)
|
665
|
+
{
|
666
|
+
if (STORED_IS_OFFSET(offBase_minus1)) { /* full offset */
|
667
|
+
rep[2] = rep[1];
|
668
|
+
rep[1] = rep[0];
|
669
|
+
rep[0] = STORED_OFFSET(offBase_minus1);
|
670
|
+
} else { /* repcode */
|
671
|
+
U32 const repCode = STORED_REPCODE(offBase_minus1) - 1 + ll0;
|
672
|
+
if (repCode > 0) { /* note : if repCode==0, no change */
|
673
|
+
U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
|
674
|
+
rep[2] = (repCode >= 2) ? rep[1] : rep[2];
|
675
|
+
rep[1] = rep[0];
|
676
|
+
rep[0] = currentOffset;
|
677
|
+
} else { /* repCode == 0 */
|
678
|
+
/* nothing to do */
|
679
|
+
}
|
680
|
+
}
|
681
|
+
}
|
682
|
+
|
683
|
+
typedef struct repcodes_s {
|
684
|
+
U32 rep[3];
|
685
|
+
} repcodes_t;
|
686
|
+
|
687
|
+
MEM_STATIC repcodes_t
|
688
|
+
ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0)
|
689
|
+
{
|
690
|
+
repcodes_t newReps;
|
691
|
+
ZSTD_memcpy(&newReps, rep, sizeof(newReps));
|
692
|
+
ZSTD_updateRep(newReps.rep, offBase_minus1, ll0);
|
693
|
+
return newReps;
|
694
|
+
}
|
695
|
+
|
663
696
|
|
664
697
|
/*-*************************************
|
665
698
|
* Match length counter
|
@@ -313,19 +313,19 @@ ZSTD_encodeSequences_body(
|
|
313
313
|
FSE_initCState2(&stateLitLength, CTable_LitLength, llCodeTable[nbSeq-1]);
|
314
314
|
BIT_addBits(&blockStream, sequences[nbSeq-1].litLength, LL_bits[llCodeTable[nbSeq-1]]);
|
315
315
|
if (MEM_32bits()) BIT_flushBits(&blockStream);
|
316
|
-
BIT_addBits(&blockStream, sequences[nbSeq-1].
|
316
|
+
BIT_addBits(&blockStream, sequences[nbSeq-1].mlBase, ML_bits[mlCodeTable[nbSeq-1]]);
|
317
317
|
if (MEM_32bits()) BIT_flushBits(&blockStream);
|
318
318
|
if (longOffsets) {
|
319
319
|
U32 const ofBits = ofCodeTable[nbSeq-1];
|
320
320
|
unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
|
321
321
|
if (extraBits) {
|
322
|
-
BIT_addBits(&blockStream, sequences[nbSeq-1].
|
322
|
+
BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, extraBits);
|
323
323
|
BIT_flushBits(&blockStream);
|
324
324
|
}
|
325
|
-
BIT_addBits(&blockStream, sequences[nbSeq-1].
|
325
|
+
BIT_addBits(&blockStream, sequences[nbSeq-1].offBase >> extraBits,
|
326
326
|
ofBits - extraBits);
|
327
327
|
} else {
|
328
|
-
BIT_addBits(&blockStream, sequences[nbSeq-1].
|
328
|
+
BIT_addBits(&blockStream, sequences[nbSeq-1].offBase, ofCodeTable[nbSeq-1]);
|
329
329
|
}
|
330
330
|
BIT_flushBits(&blockStream);
|
331
331
|
|
@@ -339,8 +339,8 @@ ZSTD_encodeSequences_body(
|
|
339
339
|
U32 const mlBits = ML_bits[mlCode];
|
340
340
|
DEBUGLOG(6, "encoding: litlen:%2u - matchlen:%2u - offCode:%7u",
|
341
341
|
(unsigned)sequences[n].litLength,
|
342
|
-
(unsigned)sequences[n].
|
343
|
-
(unsigned)sequences[n].
|
342
|
+
(unsigned)sequences[n].mlBase + MINMATCH,
|
343
|
+
(unsigned)sequences[n].offBase);
|
344
344
|
/* 32b*/ /* 64b*/
|
345
345
|
/* (7)*/ /* (7)*/
|
346
346
|
FSE_encodeSymbol(&blockStream, &stateOffsetBits, ofCode); /* 15 */ /* 15 */
|
@@ -351,18 +351,18 @@ ZSTD_encodeSequences_body(
|
|
351
351
|
BIT_flushBits(&blockStream); /* (7)*/
|
352
352
|
BIT_addBits(&blockStream, sequences[n].litLength, llBits);
|
353
353
|
if (MEM_32bits() && ((llBits+mlBits)>24)) BIT_flushBits(&blockStream);
|
354
|
-
BIT_addBits(&blockStream, sequences[n].
|
354
|
+
BIT_addBits(&blockStream, sequences[n].mlBase, mlBits);
|
355
355
|
if (MEM_32bits() || (ofBits+mlBits+llBits > 56)) BIT_flushBits(&blockStream);
|
356
356
|
if (longOffsets) {
|
357
357
|
unsigned const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN-1);
|
358
358
|
if (extraBits) {
|
359
|
-
BIT_addBits(&blockStream, sequences[n].
|
359
|
+
BIT_addBits(&blockStream, sequences[n].offBase, extraBits);
|
360
360
|
BIT_flushBits(&blockStream); /* (7)*/
|
361
361
|
}
|
362
|
-
BIT_addBits(&blockStream, sequences[n].
|
362
|
+
BIT_addBits(&blockStream, sequences[n].offBase >> extraBits,
|
363
363
|
ofBits - extraBits); /* 31 */
|
364
364
|
} else {
|
365
|
-
BIT_addBits(&blockStream, sequences[n].
|
365
|
+
BIT_addBits(&blockStream, sequences[n].offBase, ofBits); /* 31 */
|
366
366
|
}
|
367
367
|
BIT_flushBits(&blockStream); /* (7)*/
|
368
368
|
DEBUGLOG(7, "remaining space : %i", (int)(blockStream.endPtr - blockStream.ptr));
|