zstd-ruby 1.5.2.3 → 1.5.5.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +13 -5
- data/ext/zstdruby/extconf.rb +1 -1
- data/ext/zstdruby/libzstd/common/allocations.h +55 -0
- data/ext/zstdruby/libzstd/common/bits.h +200 -0
- data/ext/zstdruby/libzstd/common/bitstream.h +19 -60
- data/ext/zstdruby/libzstd/common/compiler.h +26 -3
- data/ext/zstdruby/libzstd/common/cpu.h +1 -1
- data/ext/zstdruby/libzstd/common/debug.c +1 -1
- data/ext/zstdruby/libzstd/common/debug.h +1 -1
- data/ext/zstdruby/libzstd/common/entropy_common.c +12 -40
- data/ext/zstdruby/libzstd/common/error_private.c +9 -2
- data/ext/zstdruby/libzstd/common/error_private.h +1 -1
- data/ext/zstdruby/libzstd/common/fse.h +5 -83
- data/ext/zstdruby/libzstd/common/fse_decompress.c +7 -99
- data/ext/zstdruby/libzstd/common/huf.h +65 -156
- data/ext/zstdruby/libzstd/common/mem.h +39 -46
- data/ext/zstdruby/libzstd/common/pool.c +26 -10
- data/ext/zstdruby/libzstd/common/pool.h +7 -1
- data/ext/zstdruby/libzstd/common/portability_macros.h +22 -3
- data/ext/zstdruby/libzstd/common/threading.c +68 -14
- data/ext/zstdruby/libzstd/common/threading.h +5 -10
- data/ext/zstdruby/libzstd/common/xxhash.c +2 -2
- data/ext/zstdruby/libzstd/common/xxhash.h +8 -8
- data/ext/zstdruby/libzstd/common/zstd_common.c +1 -36
- data/ext/zstdruby/libzstd/common/zstd_deps.h +1 -1
- data/ext/zstdruby/libzstd/common/zstd_internal.h +17 -118
- data/ext/zstdruby/libzstd/common/zstd_trace.h +3 -3
- data/ext/zstdruby/libzstd/compress/clevels.h +1 -1
- data/ext/zstdruby/libzstd/compress/fse_compress.c +7 -124
- data/ext/zstdruby/libzstd/compress/hist.c +1 -1
- data/ext/zstdruby/libzstd/compress/hist.h +1 -1
- data/ext/zstdruby/libzstd/compress/huf_compress.c +234 -169
- data/ext/zstdruby/libzstd/compress/zstd_compress.c +1243 -538
- data/ext/zstdruby/libzstd/compress/zstd_compress_internal.h +225 -151
- data/ext/zstdruby/libzstd/compress/zstd_compress_literals.c +115 -39
- data/ext/zstdruby/libzstd/compress/zstd_compress_literals.h +16 -8
- data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.c +3 -3
- data/ext/zstdruby/libzstd/compress/zstd_compress_sequences.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.c +25 -21
- data/ext/zstdruby/libzstd/compress/zstd_compress_superblock.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_cwksp.h +128 -62
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.c +95 -33
- data/ext/zstdruby/libzstd/compress/zstd_double_fast.h +3 -2
- data/ext/zstdruby/libzstd/compress/zstd_fast.c +433 -148
- data/ext/zstdruby/libzstd/compress/zstd_fast.h +3 -2
- data/ext/zstdruby/libzstd/compress/zstd_lazy.c +398 -345
- data/ext/zstdruby/libzstd/compress/zstd_lazy.h +4 -2
- data/ext/zstdruby/libzstd/compress/zstd_ldm.c +5 -5
- data/ext/zstdruby/libzstd/compress/zstd_ldm.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_ldm_geartab.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstd_opt.c +106 -80
- data/ext/zstdruby/libzstd/compress/zstd_opt.h +1 -1
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.c +17 -9
- data/ext/zstdruby/libzstd/compress/zstdmt_compress.h +1 -1
- data/ext/zstdruby/libzstd/decompress/huf_decompress.c +434 -441
- data/ext/zstdruby/libzstd/decompress/huf_decompress_amd64.S +30 -39
- data/ext/zstdruby/libzstd/decompress/zstd_ddict.c +4 -4
- data/ext/zstdruby/libzstd/decompress/zstd_ddict.h +1 -1
- data/ext/zstdruby/libzstd/decompress/zstd_decompress.c +205 -80
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.c +201 -81
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_block.h +6 -1
- data/ext/zstdruby/libzstd/decompress/zstd_decompress_internal.h +4 -2
- data/ext/zstdruby/libzstd/dictBuilder/cover.c +19 -15
- data/ext/zstdruby/libzstd/dictBuilder/cover.h +1 -1
- data/ext/zstdruby/libzstd/dictBuilder/fastcover.c +2 -2
- data/ext/zstdruby/libzstd/dictBuilder/zdict.c +11 -89
- data/ext/zstdruby/libzstd/zdict.h +53 -31
- data/ext/zstdruby/libzstd/zstd.h +580 -135
- data/ext/zstdruby/libzstd/zstd_errors.h +27 -8
- data/ext/zstdruby/main.c +6 -0
- data/ext/zstdruby/skippable_frame.c +63 -0
- data/lib/zstd-ruby/version.rb +1 -1
- metadata +9 -6
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -23,6 +23,7 @@
|
|
23
23
|
#ifdef ZSTD_MULTITHREAD
|
24
24
|
# include "zstdmt_compress.h"
|
25
25
|
#endif
|
26
|
+
#include "../common/bits.h" /* ZSTD_highbit32, ZSTD_NbCommonBytes */
|
26
27
|
|
27
28
|
#if defined (__cplusplus)
|
28
29
|
extern "C" {
|
@@ -117,12 +118,13 @@ typedef struct {
|
|
117
118
|
/** ZSTD_buildBlockEntropyStats() :
|
118
119
|
* Builds entropy for the block.
|
119
120
|
* @return : 0 on success or error code */
|
120
|
-
size_t ZSTD_buildBlockEntropyStats(
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
121
|
+
size_t ZSTD_buildBlockEntropyStats(
|
122
|
+
const seqStore_t* seqStorePtr,
|
123
|
+
const ZSTD_entropyCTables_t* prevEntropy,
|
124
|
+
ZSTD_entropyCTables_t* nextEntropy,
|
125
|
+
const ZSTD_CCtx_params* cctxParams,
|
126
|
+
ZSTD_entropyCTablesMetadata_t* entropyMetadata,
|
127
|
+
void* workspace, size_t wkspSize);
|
126
128
|
|
127
129
|
/*********************************
|
128
130
|
* Compression internals structs *
|
@@ -148,6 +150,12 @@ typedef struct {
|
|
148
150
|
size_t capacity; /* The capacity starting from `seq` pointer */
|
149
151
|
} rawSeqStore_t;
|
150
152
|
|
153
|
+
typedef struct {
|
154
|
+
U32 idx; /* Index in array of ZSTD_Sequence */
|
155
|
+
U32 posInSequence; /* Position within sequence at idx */
|
156
|
+
size_t posInSrc; /* Number of bytes given by sequences provided so far */
|
157
|
+
} ZSTD_sequencePosition;
|
158
|
+
|
151
159
|
UNUSED_ATTR static const rawSeqStore_t kNullRawSeqStore = {NULL, 0, 0, 0, 0};
|
152
160
|
|
153
161
|
typedef struct {
|
@@ -218,8 +226,10 @@ struct ZSTD_matchState_t {
|
|
218
226
|
U32 hashLog3; /* dispatch table for matches of len==3 : larger == faster, more memory */
|
219
227
|
|
220
228
|
U32 rowHashLog; /* For row-based matchfinder: Hashlog based on nb of rows in the hashTable.*/
|
221
|
-
|
229
|
+
BYTE* tagTable; /* For row-based matchFinder: A row-based table containing the hashes and head index. */
|
222
230
|
U32 hashCache[ZSTD_ROW_HASH_CACHE_SIZE]; /* For row-based matchFinder: a cache of hashes to improve speed */
|
231
|
+
U64 hashSalt; /* For row-based matchFinder: salts the hash for re-use of tag table */
|
232
|
+
U32 hashSaltEntropy; /* For row-based matchFinder: collects entropy for salt generation */
|
223
233
|
|
224
234
|
U32* hashTable;
|
225
235
|
U32* hashTable3;
|
@@ -234,6 +244,18 @@ struct ZSTD_matchState_t {
|
|
234
244
|
const ZSTD_matchState_t* dictMatchState;
|
235
245
|
ZSTD_compressionParameters cParams;
|
236
246
|
const rawSeqStore_t* ldmSeqStore;
|
247
|
+
|
248
|
+
/* Controls prefetching in some dictMatchState matchfinders.
|
249
|
+
* This behavior is controlled from the cctx ms.
|
250
|
+
* This parameter has no effect in the cdict ms. */
|
251
|
+
int prefetchCDictTables;
|
252
|
+
|
253
|
+
/* When == 0, lazy match finders insert every position.
|
254
|
+
* When != 0, lazy match finders only insert positions they search.
|
255
|
+
* This allows them to skip much faster over incompressible data,
|
256
|
+
* at a small cost to compression ratio.
|
257
|
+
*/
|
258
|
+
int lazySkipping;
|
237
259
|
};
|
238
260
|
|
239
261
|
typedef struct {
|
@@ -330,6 +352,24 @@ struct ZSTD_CCtx_params_s {
|
|
330
352
|
|
331
353
|
/* Internal use, for createCCtxParams() and freeCCtxParams() only */
|
332
354
|
ZSTD_customMem customMem;
|
355
|
+
|
356
|
+
/* Controls prefetching in some dictMatchState matchfinders */
|
357
|
+
ZSTD_paramSwitch_e prefetchCDictTables;
|
358
|
+
|
359
|
+
/* Controls whether zstd will fall back to an internal matchfinder
|
360
|
+
* if the external matchfinder returns an error code. */
|
361
|
+
int enableMatchFinderFallback;
|
362
|
+
|
363
|
+
/* Indicates whether an external matchfinder has been referenced.
|
364
|
+
* Users can't set this externally.
|
365
|
+
* It is set internally in ZSTD_registerSequenceProducer(). */
|
366
|
+
int useSequenceProducer;
|
367
|
+
|
368
|
+
/* Adjust the max block size*/
|
369
|
+
size_t maxBlockSize;
|
370
|
+
|
371
|
+
/* Controls repcode search in external sequence parsing */
|
372
|
+
ZSTD_paramSwitch_e searchForExternalRepcodes;
|
333
373
|
}; /* typedef'd to ZSTD_CCtx_params within "zstd.h" */
|
334
374
|
|
335
375
|
#define COMPRESS_SEQUENCES_WORKSPACE_SIZE (sizeof(unsigned) * (MaxSeq + 2))
|
@@ -361,6 +401,14 @@ typedef struct {
|
|
361
401
|
ZSTD_entropyCTablesMetadata_t entropyMetadata;
|
362
402
|
} ZSTD_blockSplitCtx;
|
363
403
|
|
404
|
+
/* Context for block-level external matchfinder API */
|
405
|
+
typedef struct {
|
406
|
+
void* mState;
|
407
|
+
ZSTD_sequenceProducer_F* mFinder;
|
408
|
+
ZSTD_Sequence* seqBuffer;
|
409
|
+
size_t seqBufferCapacity;
|
410
|
+
} ZSTD_externalMatchCtx;
|
411
|
+
|
364
412
|
struct ZSTD_CCtx_s {
|
365
413
|
ZSTD_compressionStage_e stage;
|
366
414
|
int cParamsChanged; /* == 1 if cParams(except wlog) or compression level are changed in requestedParams. Triggers transmission of new params to ZSTDMT (if available) then reset to 0. */
|
@@ -410,6 +458,7 @@ struct ZSTD_CCtx_s {
|
|
410
458
|
|
411
459
|
/* Stable in/out buffer verification */
|
412
460
|
ZSTD_inBuffer expectedInBuffer;
|
461
|
+
size_t stableIn_notConsumed; /* nb bytes within stable input buffer that are said to be consumed but are not */
|
413
462
|
size_t expectedOutBufferSize;
|
414
463
|
|
415
464
|
/* Dictionary */
|
@@ -429,9 +478,13 @@ struct ZSTD_CCtx_s {
|
|
429
478
|
|
430
479
|
/* Workspace for block splitter */
|
431
480
|
ZSTD_blockSplitCtx blockSplitCtx;
|
481
|
+
|
482
|
+
/* Workspace for external matchfinder */
|
483
|
+
ZSTD_externalMatchCtx externalMatchCtx;
|
432
484
|
};
|
433
485
|
|
434
486
|
typedef enum { ZSTD_dtlm_fast, ZSTD_dtlm_full } ZSTD_dictTableLoadMethod_e;
|
487
|
+
typedef enum { ZSTD_tfp_forCCtx, ZSTD_tfp_forCDict } ZSTD_tableFillPurpose_e;
|
435
488
|
|
436
489
|
typedef enum {
|
437
490
|
ZSTD_noDict = 0,
|
@@ -453,7 +506,7 @@ typedef enum {
|
|
453
506
|
* In this mode we take both the source size and the dictionary size
|
454
507
|
* into account when selecting and adjusting the parameters.
|
455
508
|
*/
|
456
|
-
ZSTD_cpm_unknown = 3
|
509
|
+
ZSTD_cpm_unknown = 3 /* ZSTD_getCParams, ZSTD_getParams, ZSTD_adjustParams.
|
457
510
|
* We don't know what these parameters are for. We default to the legacy
|
458
511
|
* behavior of taking both the source size and the dict size into account
|
459
512
|
* when selecting and adjusting parameters.
|
@@ -512,9 +565,11 @@ MEM_STATIC int ZSTD_cParam_withinBounds(ZSTD_cParameter cParam, int value)
|
|
512
565
|
/* ZSTD_noCompressBlock() :
|
513
566
|
* Writes uncompressed block to dst buffer from given src.
|
514
567
|
* Returns the size of the block */
|
515
|
-
MEM_STATIC size_t
|
568
|
+
MEM_STATIC size_t
|
569
|
+
ZSTD_noCompressBlock(void* dst, size_t dstCapacity, const void* src, size_t srcSize, U32 lastBlock)
|
516
570
|
{
|
517
571
|
U32 const cBlockHeader24 = lastBlock + (((U32)bt_raw)<<1) + (U32)(srcSize << 3);
|
572
|
+
DEBUGLOG(5, "ZSTD_noCompressBlock (srcSize=%zu, dstCapacity=%zu)", srcSize, dstCapacity);
|
518
573
|
RETURN_ERROR_IF(srcSize + ZSTD_blockHeaderSize > dstCapacity,
|
519
574
|
dstSize_tooSmall, "dst buf too small for uncompressed block");
|
520
575
|
MEM_writeLE24(dst, cBlockHeader24);
|
@@ -522,7 +577,8 @@ MEM_STATIC size_t ZSTD_noCompressBlock (void* dst, size_t dstCapacity, const voi
|
|
522
577
|
return ZSTD_blockHeaderSize + srcSize;
|
523
578
|
}
|
524
579
|
|
525
|
-
MEM_STATIC size_t
|
580
|
+
MEM_STATIC size_t
|
581
|
+
ZSTD_rleCompressBlock(void* dst, size_t dstCapacity, BYTE src, size_t srcSize, U32 lastBlock)
|
526
582
|
{
|
527
583
|
BYTE* const op = (BYTE*)dst;
|
528
584
|
U32 const cBlockHeader = lastBlock + (((U32)bt_rle)<<1) + (U32)(srcSize << 3);
|
@@ -541,7 +597,7 @@ MEM_STATIC size_t ZSTD_minGain(size_t srcSize, ZSTD_strategy strat)
|
|
541
597
|
{
|
542
598
|
U32 const minlog = (strat>=ZSTD_btultra) ? (U32)(strat) - 1 : 6;
|
543
599
|
ZSTD_STATIC_ASSERT(ZSTD_btultra == 8);
|
544
|
-
assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, strat));
|
600
|
+
assert(ZSTD_cParam_withinBounds(ZSTD_c_strategy, (int)strat));
|
545
601
|
return (srcSize >> minlog) + 2;
|
546
602
|
}
|
547
603
|
|
@@ -577,29 +633,27 @@ ZSTD_safecopyLiterals(BYTE* op, BYTE const* ip, BYTE const* const iend, BYTE con
|
|
577
633
|
while (ip < iend) *op++ = *ip++;
|
578
634
|
}
|
579
635
|
|
580
|
-
|
581
|
-
#define
|
582
|
-
#define
|
583
|
-
#define
|
584
|
-
#define
|
585
|
-
#define
|
586
|
-
#define
|
587
|
-
#define
|
588
|
-
#define
|
589
|
-
#define
|
590
|
-
#define STORED_TO_OFFBASE(o) ((o)+1)
|
591
|
-
#define OFFBASE_TO_STORED(o) ((o)-1)
|
636
|
+
|
637
|
+
#define REPCODE1_TO_OFFBASE REPCODE_TO_OFFBASE(1)
|
638
|
+
#define REPCODE2_TO_OFFBASE REPCODE_TO_OFFBASE(2)
|
639
|
+
#define REPCODE3_TO_OFFBASE REPCODE_TO_OFFBASE(3)
|
640
|
+
#define REPCODE_TO_OFFBASE(r) (assert((r)>=1), assert((r)<=ZSTD_REP_NUM), (r)) /* accepts IDs 1,2,3 */
|
641
|
+
#define OFFSET_TO_OFFBASE(o) (assert((o)>0), o + ZSTD_REP_NUM)
|
642
|
+
#define OFFBASE_IS_OFFSET(o) ((o) > ZSTD_REP_NUM)
|
643
|
+
#define OFFBASE_IS_REPCODE(o) ( 1 <= (o) && (o) <= ZSTD_REP_NUM)
|
644
|
+
#define OFFBASE_TO_OFFSET(o) (assert(OFFBASE_IS_OFFSET(o)), (o) - ZSTD_REP_NUM)
|
645
|
+
#define OFFBASE_TO_REPCODE(o) (assert(OFFBASE_IS_REPCODE(o)), (o)) /* returns ID 1,2,3 */
|
592
646
|
|
593
647
|
/*! ZSTD_storeSeq() :
|
594
|
-
* Store a sequence (litlen, litPtr,
|
595
|
-
* @
|
648
|
+
* Store a sequence (litlen, litPtr, offBase and matchLength) into seqStore_t.
|
649
|
+
* @offBase : Users should employ macros REPCODE_TO_OFFBASE() and OFFSET_TO_OFFBASE().
|
596
650
|
* @matchLength : must be >= MINMATCH
|
597
|
-
* Allowed to
|
651
|
+
* Allowed to over-read literals up to litLimit.
|
598
652
|
*/
|
599
653
|
HINT_INLINE UNUSED_ATTR void
|
600
654
|
ZSTD_storeSeq(seqStore_t* seqStorePtr,
|
601
655
|
size_t litLength, const BYTE* literals, const BYTE* litLimit,
|
602
|
-
U32
|
656
|
+
U32 offBase,
|
603
657
|
size_t matchLength)
|
604
658
|
{
|
605
659
|
BYTE const* const litLimit_w = litLimit - WILDCOPY_OVERLENGTH;
|
@@ -608,8 +662,8 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr,
|
|
608
662
|
static const BYTE* g_start = NULL;
|
609
663
|
if (g_start==NULL) g_start = (const BYTE*)literals; /* note : index only works for compression within a single segment */
|
610
664
|
{ U32 const pos = (U32)((const BYTE*)literals - g_start);
|
611
|
-
DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at
|
612
|
-
pos, (U32)litLength, (U32)matchLength, (U32)
|
665
|
+
DEBUGLOG(6, "Cpos%7u :%3u literals, match%4u bytes at offBase%7u",
|
666
|
+
pos, (U32)litLength, (U32)matchLength, (U32)offBase);
|
613
667
|
}
|
614
668
|
#endif
|
615
669
|
assert((size_t)(seqStorePtr->sequences - seqStorePtr->sequencesStart) < seqStorePtr->maxNbSeq);
|
@@ -619,9 +673,9 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr,
|
|
619
673
|
assert(literals + litLength <= litLimit);
|
620
674
|
if (litEnd <= litLimit_w) {
|
621
675
|
/* Common case we can use wildcopy.
|
622
|
-
|
623
|
-
|
624
|
-
|
676
|
+
* First copy 16 bytes, because literals are likely short.
|
677
|
+
*/
|
678
|
+
ZSTD_STATIC_ASSERT(WILDCOPY_OVERLENGTH >= 16);
|
625
679
|
ZSTD_copy16(seqStorePtr->lit, literals);
|
626
680
|
if (litLength > 16) {
|
627
681
|
ZSTD_wildcopy(seqStorePtr->lit+16, literals+16, (ptrdiff_t)litLength-16, ZSTD_no_overlap);
|
@@ -640,7 +694,7 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr,
|
|
640
694
|
seqStorePtr->sequences[0].litLength = (U16)litLength;
|
641
695
|
|
642
696
|
/* match offset */
|
643
|
-
seqStorePtr->sequences[0].offBase =
|
697
|
+
seqStorePtr->sequences[0].offBase = offBase;
|
644
698
|
|
645
699
|
/* match Length */
|
646
700
|
assert(matchLength >= MINMATCH);
|
@@ -658,17 +712,17 @@ ZSTD_storeSeq(seqStore_t* seqStorePtr,
|
|
658
712
|
|
659
713
|
/* ZSTD_updateRep() :
|
660
714
|
* updates in-place @rep (array of repeat offsets)
|
661
|
-
* @
|
715
|
+
* @offBase : sum-type, using numeric representation of ZSTD_storeSeq()
|
662
716
|
*/
|
663
717
|
MEM_STATIC void
|
664
|
-
ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const
|
718
|
+
ZSTD_updateRep(U32 rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0)
|
665
719
|
{
|
666
|
-
if (
|
720
|
+
if (OFFBASE_IS_OFFSET(offBase)) { /* full offset */
|
667
721
|
rep[2] = rep[1];
|
668
722
|
rep[1] = rep[0];
|
669
|
-
rep[0] =
|
723
|
+
rep[0] = OFFBASE_TO_OFFSET(offBase);
|
670
724
|
} else { /* repcode */
|
671
|
-
U32 const repCode =
|
725
|
+
U32 const repCode = OFFBASE_TO_REPCODE(offBase) - 1 + ll0;
|
672
726
|
if (repCode > 0) { /* note : if repCode==0, no change */
|
673
727
|
U32 const currentOffset = (repCode==ZSTD_REP_NUM) ? (rep[0] - 1) : rep[repCode];
|
674
728
|
rep[2] = (repCode >= 2) ? rep[1] : rep[2];
|
@@ -685,11 +739,11 @@ typedef struct repcodes_s {
|
|
685
739
|
} repcodes_t;
|
686
740
|
|
687
741
|
MEM_STATIC repcodes_t
|
688
|
-
ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const
|
742
|
+
ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase, U32 const ll0)
|
689
743
|
{
|
690
744
|
repcodes_t newReps;
|
691
745
|
ZSTD_memcpy(&newReps, rep, sizeof(newReps));
|
692
|
-
ZSTD_updateRep(newReps.rep,
|
746
|
+
ZSTD_updateRep(newReps.rep, offBase, ll0);
|
693
747
|
return newReps;
|
694
748
|
}
|
695
749
|
|
@@ -697,103 +751,6 @@ ZSTD_newRep(U32 const rep[ZSTD_REP_NUM], U32 const offBase_minus1, U32 const ll0
|
|
697
751
|
/*-*************************************
|
698
752
|
* Match length counter
|
699
753
|
***************************************/
|
700
|
-
static unsigned ZSTD_NbCommonBytes (size_t val)
|
701
|
-
{
|
702
|
-
if (MEM_isLittleEndian()) {
|
703
|
-
if (MEM_64bits()) {
|
704
|
-
# if defined(_MSC_VER) && defined(_WIN64)
|
705
|
-
# if STATIC_BMI2
|
706
|
-
return _tzcnt_u64(val) >> 3;
|
707
|
-
# else
|
708
|
-
if (val != 0) {
|
709
|
-
unsigned long r;
|
710
|
-
_BitScanForward64(&r, (U64)val);
|
711
|
-
return (unsigned)(r >> 3);
|
712
|
-
} else {
|
713
|
-
/* Should not reach this code path */
|
714
|
-
__assume(0);
|
715
|
-
}
|
716
|
-
# endif
|
717
|
-
# elif defined(__GNUC__) && (__GNUC__ >= 4)
|
718
|
-
return (__builtin_ctzll((U64)val) >> 3);
|
719
|
-
# else
|
720
|
-
static const int DeBruijnBytePos[64] = { 0, 0, 0, 0, 0, 1, 1, 2,
|
721
|
-
0, 3, 1, 3, 1, 4, 2, 7,
|
722
|
-
0, 2, 3, 6, 1, 5, 3, 5,
|
723
|
-
1, 3, 4, 4, 2, 5, 6, 7,
|
724
|
-
7, 0, 1, 2, 3, 3, 4, 6,
|
725
|
-
2, 6, 5, 5, 3, 4, 5, 6,
|
726
|
-
7, 1, 2, 4, 6, 4, 4, 5,
|
727
|
-
7, 2, 6, 5, 7, 6, 7, 7 };
|
728
|
-
return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];
|
729
|
-
# endif
|
730
|
-
} else { /* 32 bits */
|
731
|
-
# if defined(_MSC_VER)
|
732
|
-
if (val != 0) {
|
733
|
-
unsigned long r;
|
734
|
-
_BitScanForward(&r, (U32)val);
|
735
|
-
return (unsigned)(r >> 3);
|
736
|
-
} else {
|
737
|
-
/* Should not reach this code path */
|
738
|
-
__assume(0);
|
739
|
-
}
|
740
|
-
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
741
|
-
return (__builtin_ctz((U32)val) >> 3);
|
742
|
-
# else
|
743
|
-
static const int DeBruijnBytePos[32] = { 0, 0, 3, 0, 3, 1, 3, 0,
|
744
|
-
3, 2, 2, 1, 3, 2, 0, 1,
|
745
|
-
3, 3, 1, 2, 2, 2, 2, 0,
|
746
|
-
3, 1, 2, 0, 1, 0, 1, 1 };
|
747
|
-
return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];
|
748
|
-
# endif
|
749
|
-
}
|
750
|
-
} else { /* Big Endian CPU */
|
751
|
-
if (MEM_64bits()) {
|
752
|
-
# if defined(_MSC_VER) && defined(_WIN64)
|
753
|
-
# if STATIC_BMI2
|
754
|
-
return _lzcnt_u64(val) >> 3;
|
755
|
-
# else
|
756
|
-
if (val != 0) {
|
757
|
-
unsigned long r;
|
758
|
-
_BitScanReverse64(&r, (U64)val);
|
759
|
-
return (unsigned)(r >> 3);
|
760
|
-
} else {
|
761
|
-
/* Should not reach this code path */
|
762
|
-
__assume(0);
|
763
|
-
}
|
764
|
-
# endif
|
765
|
-
# elif defined(__GNUC__) && (__GNUC__ >= 4)
|
766
|
-
return (__builtin_clzll(val) >> 3);
|
767
|
-
# else
|
768
|
-
unsigned r;
|
769
|
-
const unsigned n32 = sizeof(size_t)*4; /* calculate this way due to compiler complaining in 32-bits mode */
|
770
|
-
if (!(val>>n32)) { r=4; } else { r=0; val>>=n32; }
|
771
|
-
if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
|
772
|
-
r += (!val);
|
773
|
-
return r;
|
774
|
-
# endif
|
775
|
-
} else { /* 32 bits */
|
776
|
-
# if defined(_MSC_VER)
|
777
|
-
if (val != 0) {
|
778
|
-
unsigned long r;
|
779
|
-
_BitScanReverse(&r, (unsigned long)val);
|
780
|
-
return (unsigned)(r >> 3);
|
781
|
-
} else {
|
782
|
-
/* Should not reach this code path */
|
783
|
-
__assume(0);
|
784
|
-
}
|
785
|
-
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
786
|
-
return (__builtin_clz((U32)val) >> 3);
|
787
|
-
# else
|
788
|
-
unsigned r;
|
789
|
-
if (!(val>>16)) { r=2; val>>=8; } else { r=0; val>>=24; }
|
790
|
-
r += (!val);
|
791
|
-
return r;
|
792
|
-
# endif
|
793
|
-
} }
|
794
|
-
}
|
795
|
-
|
796
|
-
|
797
754
|
MEM_STATIC size_t ZSTD_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* const pInLimit)
|
798
755
|
{
|
799
756
|
const BYTE* const pStart = pIn;
|
@@ -839,32 +796,43 @@ ZSTD_count_2segments(const BYTE* ip, const BYTE* match,
|
|
839
796
|
* Hashes
|
840
797
|
***************************************/
|
841
798
|
static const U32 prime3bytes = 506832829U;
|
842
|
-
static U32 ZSTD_hash3(U32 u, U32 h) { return ((u << (32-24)) * prime3bytes) >> (32-h) ; }
|
843
|
-
MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h); } /* only in zstd_opt.h */
|
799
|
+
static U32 ZSTD_hash3(U32 u, U32 h, U32 s) { assert(h <= 32); return (((u << (32-24)) * prime3bytes) ^ s) >> (32-h) ; }
|
800
|
+
MEM_STATIC size_t ZSTD_hash3Ptr(const void* ptr, U32 h) { return ZSTD_hash3(MEM_readLE32(ptr), h, 0); } /* only in zstd_opt.h */
|
801
|
+
MEM_STATIC size_t ZSTD_hash3PtrS(const void* ptr, U32 h, U32 s) { return ZSTD_hash3(MEM_readLE32(ptr), h, s); }
|
844
802
|
|
845
803
|
static const U32 prime4bytes = 2654435761U;
|
846
|
-
static U32 ZSTD_hash4(U32 u, U32 h) { return (u * prime4bytes) >> (32-h) ; }
|
847
|
-
static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(
|
804
|
+
static U32 ZSTD_hash4(U32 u, U32 h, U32 s) { assert(h <= 32); return ((u * prime4bytes) ^ s) >> (32-h) ; }
|
805
|
+
static size_t ZSTD_hash4Ptr(const void* ptr, U32 h) { return ZSTD_hash4(MEM_readLE32(ptr), h, 0); }
|
806
|
+
static size_t ZSTD_hash4PtrS(const void* ptr, U32 h, U32 s) { return ZSTD_hash4(MEM_readLE32(ptr), h, s); }
|
848
807
|
|
849
808
|
static const U64 prime5bytes = 889523592379ULL;
|
850
|
-
static size_t ZSTD_hash5(U64 u, U32 h) { return (size_t)(((u << (64-40)) * prime5bytes) >> (64-h)) ; }
|
851
|
-
static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h); }
|
809
|
+
static size_t ZSTD_hash5(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-40)) * prime5bytes) ^ s) >> (64-h)) ; }
|
810
|
+
static size_t ZSTD_hash5Ptr(const void* p, U32 h) { return ZSTD_hash5(MEM_readLE64(p), h, 0); }
|
811
|
+
static size_t ZSTD_hash5PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash5(MEM_readLE64(p), h, s); }
|
852
812
|
|
853
813
|
static const U64 prime6bytes = 227718039650203ULL;
|
854
|
-
static size_t ZSTD_hash6(U64 u, U32 h) { return (size_t)(((u << (64-48)) * prime6bytes) >> (64-h)) ; }
|
855
|
-
static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h); }
|
814
|
+
static size_t ZSTD_hash6(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-48)) * prime6bytes) ^ s) >> (64-h)) ; }
|
815
|
+
static size_t ZSTD_hash6Ptr(const void* p, U32 h) { return ZSTD_hash6(MEM_readLE64(p), h, 0); }
|
816
|
+
static size_t ZSTD_hash6PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash6(MEM_readLE64(p), h, s); }
|
856
817
|
|
857
818
|
static const U64 prime7bytes = 58295818150454627ULL;
|
858
|
-
static size_t ZSTD_hash7(U64 u, U32 h) { return (size_t)(((u << (64-56)) * prime7bytes) >> (64-h)) ; }
|
859
|
-
static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h); }
|
819
|
+
static size_t ZSTD_hash7(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u << (64-56)) * prime7bytes) ^ s) >> (64-h)) ; }
|
820
|
+
static size_t ZSTD_hash7Ptr(const void* p, U32 h) { return ZSTD_hash7(MEM_readLE64(p), h, 0); }
|
821
|
+
static size_t ZSTD_hash7PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash7(MEM_readLE64(p), h, s); }
|
860
822
|
|
861
823
|
static const U64 prime8bytes = 0xCF1BBCDCB7A56463ULL;
|
862
|
-
static size_t ZSTD_hash8(U64 u, U32 h) { return (size_t)(((u) * prime8bytes) >> (64-h)) ; }
|
863
|
-
static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h); }
|
824
|
+
static size_t ZSTD_hash8(U64 u, U32 h, U64 s) { assert(h <= 64); return (size_t)((((u) * prime8bytes) ^ s) >> (64-h)) ; }
|
825
|
+
static size_t ZSTD_hash8Ptr(const void* p, U32 h) { return ZSTD_hash8(MEM_readLE64(p), h, 0); }
|
826
|
+
static size_t ZSTD_hash8PtrS(const void* p, U32 h, U64 s) { return ZSTD_hash8(MEM_readLE64(p), h, s); }
|
827
|
+
|
864
828
|
|
865
829
|
MEM_STATIC FORCE_INLINE_ATTR
|
866
830
|
size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
|
867
831
|
{
|
832
|
+
/* Although some of these hashes do support hBits up to 64, some do not.
|
833
|
+
* To be on the safe side, always avoid hBits > 32. */
|
834
|
+
assert(hBits <= 32);
|
835
|
+
|
868
836
|
switch(mls)
|
869
837
|
{
|
870
838
|
default:
|
@@ -876,6 +844,24 @@ size_t ZSTD_hashPtr(const void* p, U32 hBits, U32 mls)
|
|
876
844
|
}
|
877
845
|
}
|
878
846
|
|
847
|
+
MEM_STATIC FORCE_INLINE_ATTR
|
848
|
+
size_t ZSTD_hashPtrSalted(const void* p, U32 hBits, U32 mls, const U64 hashSalt) {
|
849
|
+
/* Although some of these hashes do support hBits up to 64, some do not.
|
850
|
+
* To be on the safe side, always avoid hBits > 32. */
|
851
|
+
assert(hBits <= 32);
|
852
|
+
|
853
|
+
switch(mls)
|
854
|
+
{
|
855
|
+
default:
|
856
|
+
case 4: return ZSTD_hash4PtrS(p, hBits, (U32)hashSalt);
|
857
|
+
case 5: return ZSTD_hash5PtrS(p, hBits, hashSalt);
|
858
|
+
case 6: return ZSTD_hash6PtrS(p, hBits, hashSalt);
|
859
|
+
case 7: return ZSTD_hash7PtrS(p, hBits, hashSalt);
|
860
|
+
case 8: return ZSTD_hash8PtrS(p, hBits, hashSalt);
|
861
|
+
}
|
862
|
+
}
|
863
|
+
|
864
|
+
|
879
865
|
/** ZSTD_ipow() :
|
880
866
|
* Return base^exponent.
|
881
867
|
*/
|
@@ -1223,10 +1209,15 @@ ZSTD_checkDictValidity(const ZSTD_window_t* window,
|
|
1223
1209
|
(unsigned)blockEndIdx, (unsigned)maxDist, (unsigned)loadedDictEnd);
|
1224
1210
|
assert(blockEndIdx >= loadedDictEnd);
|
1225
1211
|
|
1226
|
-
if (blockEndIdx > loadedDictEnd + maxDist) {
|
1212
|
+
if (blockEndIdx > loadedDictEnd + maxDist || loadedDictEnd != window->dictLimit) {
|
1227
1213
|
/* On reaching window size, dictionaries are invalidated.
|
1228
1214
|
* For simplification, if window size is reached anywhere within next block,
|
1229
1215
|
* the dictionary is invalidated for the full block.
|
1216
|
+
*
|
1217
|
+
* We also have to invalidate the dictionary if ZSTD_window_update() has detected
|
1218
|
+
* non-contiguous segments, which means that loadedDictEnd != window->dictLimit.
|
1219
|
+
* loadedDictEnd may be 0, if forceWindow is true, but in that case we never use
|
1220
|
+
* dictMatchState, so setting it to NULL is not a problem.
|
1230
1221
|
*/
|
1231
1222
|
DEBUGLOG(6, "invalidating dictionary for current block (distance > windowSize)");
|
1232
1223
|
*loadedDictEndPtr = 0;
|
@@ -1358,6 +1349,42 @@ MEM_STATIC void ZSTD_debugTable(const U32* table, U32 max)
|
|
1358
1349
|
|
1359
1350
|
#endif
|
1360
1351
|
|
1352
|
+
/* Short Cache */
|
1353
|
+
|
1354
|
+
/* Normally, zstd matchfinders follow this flow:
|
1355
|
+
* 1. Compute hash at ip
|
1356
|
+
* 2. Load index from hashTable[hash]
|
1357
|
+
* 3. Check if *ip == *(base + index)
|
1358
|
+
* In dictionary compression, loading *(base + index) is often an L2 or even L3 miss.
|
1359
|
+
*
|
1360
|
+
* Short cache is an optimization which allows us to avoid step 3 most of the time
|
1361
|
+
* when the data doesn't actually match. With short cache, the flow becomes:
|
1362
|
+
* 1. Compute (hash, currentTag) at ip. currentTag is an 8-bit independent hash at ip.
|
1363
|
+
* 2. Load (index, matchTag) from hashTable[hash]. See ZSTD_writeTaggedIndex to understand how this works.
|
1364
|
+
* 3. Only if currentTag == matchTag, check *ip == *(base + index). Otherwise, continue.
|
1365
|
+
*
|
1366
|
+
* Currently, short cache is only implemented in CDict hashtables. Thus, its use is limited to
|
1367
|
+
* dictMatchState matchfinders.
|
1368
|
+
*/
|
1369
|
+
#define ZSTD_SHORT_CACHE_TAG_BITS 8
|
1370
|
+
#define ZSTD_SHORT_CACHE_TAG_MASK ((1u << ZSTD_SHORT_CACHE_TAG_BITS) - 1)
|
1371
|
+
|
1372
|
+
/* Helper function for ZSTD_fillHashTable and ZSTD_fillDoubleHashTable.
|
1373
|
+
* Unpacks hashAndTag into (hash, tag), then packs (index, tag) into hashTable[hash]. */
|
1374
|
+
MEM_STATIC void ZSTD_writeTaggedIndex(U32* const hashTable, size_t hashAndTag, U32 index) {
|
1375
|
+
size_t const hash = hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS;
|
1376
|
+
U32 const tag = (U32)(hashAndTag & ZSTD_SHORT_CACHE_TAG_MASK);
|
1377
|
+
assert(index >> (32 - ZSTD_SHORT_CACHE_TAG_BITS) == 0);
|
1378
|
+
hashTable[hash] = (index << ZSTD_SHORT_CACHE_TAG_BITS) | tag;
|
1379
|
+
}
|
1380
|
+
|
1381
|
+
/* Helper function for short cache matchfinders.
|
1382
|
+
* Unpacks tag1 and tag2 from lower bits of packedTag1 and packedTag2, then checks if the tags match. */
|
1383
|
+
MEM_STATIC int ZSTD_comparePackedTags(size_t packedTag1, size_t packedTag2) {
|
1384
|
+
U32 const tag1 = packedTag1 & ZSTD_SHORT_CACHE_TAG_MASK;
|
1385
|
+
U32 const tag2 = packedTag2 & ZSTD_SHORT_CACHE_TAG_MASK;
|
1386
|
+
return tag1 == tag2;
|
1387
|
+
}
|
1361
1388
|
|
1362
1389
|
#if defined (__cplusplus)
|
1363
1390
|
}
|
@@ -1455,4 +1482,51 @@ U32 ZSTD_cycleLog(U32 hashLog, ZSTD_strategy strat);
|
|
1455
1482
|
*/
|
1456
1483
|
void ZSTD_CCtx_trace(ZSTD_CCtx* cctx, size_t extraCSize);
|
1457
1484
|
|
1485
|
+
/* Returns 0 on success, and a ZSTD_error otherwise. This function scans through an array of
|
1486
|
+
* ZSTD_Sequence, storing the sequences it finds, until it reaches a block delimiter.
|
1487
|
+
* Note that the block delimiter must include the last literals of the block.
|
1488
|
+
*/
|
1489
|
+
size_t
|
1490
|
+
ZSTD_copySequencesToSeqStoreExplicitBlockDelim(ZSTD_CCtx* cctx,
|
1491
|
+
ZSTD_sequencePosition* seqPos,
|
1492
|
+
const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
|
1493
|
+
const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch);
|
1494
|
+
|
1495
|
+
/* Returns the number of bytes to move the current read position back by.
|
1496
|
+
* Only non-zero if we ended up splitting a sequence.
|
1497
|
+
* Otherwise, it may return a ZSTD error if something went wrong.
|
1498
|
+
*
|
1499
|
+
* This function will attempt to scan through blockSize bytes
|
1500
|
+
* represented by the sequences in @inSeqs,
|
1501
|
+
* storing any (partial) sequences.
|
1502
|
+
*
|
1503
|
+
* Occasionally, we may want to change the actual number of bytes we consumed from inSeqs to
|
1504
|
+
* avoid splitting a match, or to avoid splitting a match such that it would produce a match
|
1505
|
+
* smaller than MINMATCH. In this case, we return the number of bytes that we didn't read from this block.
|
1506
|
+
*/
|
1507
|
+
size_t
|
1508
|
+
ZSTD_copySequencesToSeqStoreNoBlockDelim(ZSTD_CCtx* cctx, ZSTD_sequencePosition* seqPos,
|
1509
|
+
const ZSTD_Sequence* const inSeqs, size_t inSeqsSize,
|
1510
|
+
const void* src, size_t blockSize, ZSTD_paramSwitch_e externalRepSearch);
|
1511
|
+
|
1512
|
+
|
1513
|
+
/* ===============================================================
|
1514
|
+
* Deprecated definitions that are still used internally to avoid
|
1515
|
+
* deprecation warnings. These functions are exactly equivalent to
|
1516
|
+
* their public variants, but avoid the deprecation warnings.
|
1517
|
+
* =============================================================== */
|
1518
|
+
|
1519
|
+
size_t ZSTD_compressBegin_usingCDict_deprecated(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
|
1520
|
+
|
1521
|
+
size_t ZSTD_compressContinue_public(ZSTD_CCtx* cctx,
|
1522
|
+
void* dst, size_t dstCapacity,
|
1523
|
+
const void* src, size_t srcSize);
|
1524
|
+
|
1525
|
+
size_t ZSTD_compressEnd_public(ZSTD_CCtx* cctx,
|
1526
|
+
void* dst, size_t dstCapacity,
|
1527
|
+
const void* src, size_t srcSize);
|
1528
|
+
|
1529
|
+
size_t ZSTD_compressBlock_deprecated(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
1530
|
+
|
1531
|
+
|
1458
1532
|
#endif /* ZSTD_COMPRESS_H */
|