zstdlib 0.7.0-x86-mingw32 → 0.8.0-x86-mingw32
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGES.md +5 -0
- data/ext/zstdlib/extconf.rb +1 -1
- data/ext/zstdlib/ruby/zlib-3.0/zstdlib.c +4994 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/bitstream.h +25 -16
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/compiler.h +118 -4
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/cpu.h +1 -3
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/debug.c +1 -1
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/debug.h +12 -19
- data/ext/zstdlib/zstd-1.5.0/lib/common/entropy_common.c +362 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/error_private.c +2 -1
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/error_private.h +3 -3
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/fse.h +40 -12
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/fse_decompress.c +139 -22
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/huf.h +29 -7
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/mem.h +69 -98
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/pool.c +23 -17
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/pool.h +2 -2
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/threading.c +6 -5
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/threading.h +0 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/xxhash.c +20 -60
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/xxhash.h +2 -2
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/zstd_common.c +10 -10
- data/ext/zstdlib/zstd-1.5.0/lib/common/zstd_deps.h +111 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/common/zstd_internal.h +105 -62
- data/ext/zstdlib/zstd-1.5.0/lib/common/zstd_trace.h +154 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/fse_compress.c +31 -24
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/hist.c +27 -29
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/hist.h +2 -2
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/huf_compress.c +265 -126
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress.c +2843 -728
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_internal.h +305 -63
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_literals.c +8 -8
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_literals.h +1 -1
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_sequences.c +29 -7
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_sequences.h +1 -1
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_superblock.c +22 -295
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_compress_superblock.h +1 -1
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_cwksp.h +204 -67
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_double_fast.c +25 -25
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_double_fast.h +1 -1
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_fast.c +23 -23
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_fast.h +1 -1
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_lazy.c +2184 -0
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_lazy.h +125 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_ldm.c +314 -211
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_ldm.h +9 -2
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstd_ldm_geartab.h +103 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_opt.c +191 -46
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstd_opt.h +1 -1
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/compress/zstdmt_compress.c +93 -415
- data/ext/zstdlib/zstd-1.5.0/lib/compress/zstdmt_compress.h +110 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/huf_decompress.c +342 -239
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_ddict.c +9 -9
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_ddict.h +2 -2
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_decompress.c +369 -87
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_decompress_block.c +191 -75
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_decompress_block.h +6 -3
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/decompress/zstd_decompress_internal.h +27 -11
- data/ext/zstdlib/zstd-1.5.0/lib/zdict.h +452 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/lib/zstd.h +568 -126
- data/ext/zstdlib/{zstd-1.4.5/lib/common → zstd-1.5.0/lib}/zstd_errors.h +2 -1
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzclose.c +0 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzcompatibility.h +1 -1
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzguts.h +0 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzlib.c +0 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzread.c +0 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/gzwrite.c +0 -0
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/zstd_zlibwrapper.c +126 -44
- data/ext/zstdlib/{zstd-1.4.5 → zstd-1.5.0}/zlibWrapper/zstd_zlibwrapper.h +1 -1
- data/lib/2.2/zstdlib.so +0 -0
- data/lib/2.3/zstdlib.so +0 -0
- data/lib/2.4/zstdlib.so +0 -0
- data/lib/2.5/zstdlib.so +0 -0
- data/lib/2.6/zstdlib.so +0 -0
- data/lib/2.7/zstdlib.so +0 -0
- metadata +69 -64
- data/ext/zstdlib/zstd-1.4.5/lib/common/entropy_common.c +0 -216
- data/ext/zstdlib/zstd-1.4.5/lib/compress/zstd_lazy.c +0 -1138
- data/ext/zstdlib/zstd-1.4.5/lib/compress/zstd_lazy.h +0 -67
- data/ext/zstdlib/zstd-1.4.5/lib/compress/zstdmt_compress.h +0 -192
@@ -0,0 +1,125 @@
|
|
1
|
+
/*
|
2
|
+
* Copyright (c) Yann Collet, Facebook, Inc.
|
3
|
+
* All rights reserved.
|
4
|
+
*
|
5
|
+
* This source code is licensed under both the BSD-style license (found in the
|
6
|
+
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
|
7
|
+
* in the COPYING file in the root directory of this source tree).
|
8
|
+
* You may select, at your option, one of the above-listed licenses.
|
9
|
+
*/
|
10
|
+
|
11
|
+
#ifndef ZSTD_LAZY_H
|
12
|
+
#define ZSTD_LAZY_H
|
13
|
+
|
14
|
+
#if defined (__cplusplus)
|
15
|
+
extern "C" {
|
16
|
+
#endif
|
17
|
+
|
18
|
+
#include "zstd_compress_internal.h"
|
19
|
+
|
20
|
+
/**
|
21
|
+
* Dedicated Dictionary Search Structure bucket log. In the
|
22
|
+
* ZSTD_dedicatedDictSearch mode, the hashTable has
|
23
|
+
* 2 ** ZSTD_LAZY_DDSS_BUCKET_LOG entries in each bucket, rather than just
|
24
|
+
* one.
|
25
|
+
*/
|
26
|
+
#define ZSTD_LAZY_DDSS_BUCKET_LOG 2
|
27
|
+
|
28
|
+
U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip);
|
29
|
+
void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip);
|
30
|
+
|
31
|
+
void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip);
|
32
|
+
|
33
|
+
void ZSTD_preserveUnsortedMark (U32* const table, U32 const size, U32 const reducerValue); /*! used in ZSTD_reduceIndex(). preemptively increase value of ZSTD_DUBT_UNSORTED_MARK */
|
34
|
+
|
35
|
+
size_t ZSTD_compressBlock_btlazy2(
|
36
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
37
|
+
void const* src, size_t srcSize);
|
38
|
+
size_t ZSTD_compressBlock_lazy2(
|
39
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
40
|
+
void const* src, size_t srcSize);
|
41
|
+
size_t ZSTD_compressBlock_lazy(
|
42
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
43
|
+
void const* src, size_t srcSize);
|
44
|
+
size_t ZSTD_compressBlock_greedy(
|
45
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
46
|
+
void const* src, size_t srcSize);
|
47
|
+
size_t ZSTD_compressBlock_lazy2_row(
|
48
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
49
|
+
void const* src, size_t srcSize);
|
50
|
+
size_t ZSTD_compressBlock_lazy_row(
|
51
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
52
|
+
void const* src, size_t srcSize);
|
53
|
+
size_t ZSTD_compressBlock_greedy_row(
|
54
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
55
|
+
void const* src, size_t srcSize);
|
56
|
+
|
57
|
+
size_t ZSTD_compressBlock_btlazy2_dictMatchState(
|
58
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
59
|
+
void const* src, size_t srcSize);
|
60
|
+
size_t ZSTD_compressBlock_lazy2_dictMatchState(
|
61
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
62
|
+
void const* src, size_t srcSize);
|
63
|
+
size_t ZSTD_compressBlock_lazy_dictMatchState(
|
64
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
65
|
+
void const* src, size_t srcSize);
|
66
|
+
size_t ZSTD_compressBlock_greedy_dictMatchState(
|
67
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
68
|
+
void const* src, size_t srcSize);
|
69
|
+
size_t ZSTD_compressBlock_lazy2_dictMatchState_row(
|
70
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
71
|
+
void const* src, size_t srcSize);
|
72
|
+
size_t ZSTD_compressBlock_lazy_dictMatchState_row(
|
73
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
74
|
+
void const* src, size_t srcSize);
|
75
|
+
size_t ZSTD_compressBlock_greedy_dictMatchState_row(
|
76
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
77
|
+
void const* src, size_t srcSize);
|
78
|
+
|
79
|
+
size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(
|
80
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
81
|
+
void const* src, size_t srcSize);
|
82
|
+
size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(
|
83
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
84
|
+
void const* src, size_t srcSize);
|
85
|
+
size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(
|
86
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
87
|
+
void const* src, size_t srcSize);
|
88
|
+
size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(
|
89
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
90
|
+
void const* src, size_t srcSize);
|
91
|
+
size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(
|
92
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
93
|
+
void const* src, size_t srcSize);
|
94
|
+
size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(
|
95
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
96
|
+
void const* src, size_t srcSize);
|
97
|
+
|
98
|
+
size_t ZSTD_compressBlock_greedy_extDict(
|
99
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
100
|
+
void const* src, size_t srcSize);
|
101
|
+
size_t ZSTD_compressBlock_lazy_extDict(
|
102
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
103
|
+
void const* src, size_t srcSize);
|
104
|
+
size_t ZSTD_compressBlock_lazy2_extDict(
|
105
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
106
|
+
void const* src, size_t srcSize);
|
107
|
+
size_t ZSTD_compressBlock_greedy_extDict_row(
|
108
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
109
|
+
void const* src, size_t srcSize);
|
110
|
+
size_t ZSTD_compressBlock_lazy_extDict_row(
|
111
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
112
|
+
void const* src, size_t srcSize);
|
113
|
+
size_t ZSTD_compressBlock_lazy2_extDict_row(
|
114
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
115
|
+
void const* src, size_t srcSize);
|
116
|
+
size_t ZSTD_compressBlock_btlazy2_extDict(
|
117
|
+
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
118
|
+
void const* src, size_t srcSize);
|
119
|
+
|
120
|
+
|
121
|
+
#if defined (__cplusplus)
|
122
|
+
}
|
123
|
+
#endif
|
124
|
+
|
125
|
+
#endif /* ZSTD_LAZY_H */
|
@@ -1,5 +1,5 @@
|
|
1
1
|
/*
|
2
|
-
* Copyright (c)
|
2
|
+
* Copyright (c) Yann Collet, Facebook, Inc.
|
3
3
|
* All rights reserved.
|
4
4
|
*
|
5
5
|
* This source code is licensed under both the BSD-style license (found in the
|
@@ -11,13 +11,126 @@
|
|
11
11
|
#include "zstd_ldm.h"
|
12
12
|
|
13
13
|
#include "../common/debug.h"
|
14
|
+
#include "../common/xxhash.h"
|
14
15
|
#include "zstd_fast.h" /* ZSTD_fillHashTable() */
|
15
16
|
#include "zstd_double_fast.h" /* ZSTD_fillDoubleHashTable() */
|
17
|
+
#include "zstd_ldm_geartab.h"
|
16
18
|
|
17
19
|
#define LDM_BUCKET_SIZE_LOG 3
|
18
20
|
#define LDM_MIN_MATCH_LENGTH 64
|
19
21
|
#define LDM_HASH_RLOG 7
|
20
|
-
|
22
|
+
|
23
|
+
typedef struct {
|
24
|
+
U64 rolling;
|
25
|
+
U64 stopMask;
|
26
|
+
} ldmRollingHashState_t;
|
27
|
+
|
28
|
+
/** ZSTD_ldm_gear_init():
|
29
|
+
*
|
30
|
+
* Initializes the rolling hash state such that it will honor the
|
31
|
+
* settings in params. */
|
32
|
+
static void ZSTD_ldm_gear_init(ldmRollingHashState_t* state, ldmParams_t const* params)
|
33
|
+
{
|
34
|
+
unsigned maxBitsInMask = MIN(params->minMatchLength, 64);
|
35
|
+
unsigned hashRateLog = params->hashRateLog;
|
36
|
+
|
37
|
+
state->rolling = ~(U32)0;
|
38
|
+
|
39
|
+
/* The choice of the splitting criterion is subject to two conditions:
|
40
|
+
* 1. it has to trigger on average every 2^(hashRateLog) bytes;
|
41
|
+
* 2. ideally, it has to depend on a window of minMatchLength bytes.
|
42
|
+
*
|
43
|
+
* In the gear hash algorithm, bit n depends on the last n bytes;
|
44
|
+
* so in order to obtain a good quality splitting criterion it is
|
45
|
+
* preferable to use bits with high weight.
|
46
|
+
*
|
47
|
+
* To match condition 1 we use a mask with hashRateLog bits set
|
48
|
+
* and, because of the previous remark, we make sure these bits
|
49
|
+
* have the highest possible weight while still respecting
|
50
|
+
* condition 2.
|
51
|
+
*/
|
52
|
+
if (hashRateLog > 0 && hashRateLog <= maxBitsInMask) {
|
53
|
+
state->stopMask = (((U64)1 << hashRateLog) - 1) << (maxBitsInMask - hashRateLog);
|
54
|
+
} else {
|
55
|
+
/* In this degenerate case we simply honor the hash rate. */
|
56
|
+
state->stopMask = ((U64)1 << hashRateLog) - 1;
|
57
|
+
}
|
58
|
+
}
|
59
|
+
|
60
|
+
/** ZSTD_ldm_gear_reset()
|
61
|
+
* Feeds [data, data + minMatchLength) into the hash without registering any
|
62
|
+
* splits. This effectively resets the hash state. This is used when skipping
|
63
|
+
* over data, either at the beginning of a block, or skipping sections.
|
64
|
+
*/
|
65
|
+
static void ZSTD_ldm_gear_reset(ldmRollingHashState_t* state,
|
66
|
+
BYTE const* data, size_t minMatchLength)
|
67
|
+
{
|
68
|
+
U64 hash = state->rolling;
|
69
|
+
size_t n = 0;
|
70
|
+
|
71
|
+
#define GEAR_ITER_ONCE() do { \
|
72
|
+
hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
|
73
|
+
n += 1; \
|
74
|
+
} while (0)
|
75
|
+
while (n + 3 < minMatchLength) {
|
76
|
+
GEAR_ITER_ONCE();
|
77
|
+
GEAR_ITER_ONCE();
|
78
|
+
GEAR_ITER_ONCE();
|
79
|
+
GEAR_ITER_ONCE();
|
80
|
+
}
|
81
|
+
while (n < minMatchLength) {
|
82
|
+
GEAR_ITER_ONCE();
|
83
|
+
}
|
84
|
+
#undef GEAR_ITER_ONCE
|
85
|
+
}
|
86
|
+
|
87
|
+
/** ZSTD_ldm_gear_feed():
|
88
|
+
*
|
89
|
+
* Registers in the splits array all the split points found in the first
|
90
|
+
* size bytes following the data pointer. This function terminates when
|
91
|
+
* either all the data has been processed or LDM_BATCH_SIZE splits are
|
92
|
+
* present in the splits array.
|
93
|
+
*
|
94
|
+
* Precondition: The splits array must not be full.
|
95
|
+
* Returns: The number of bytes processed. */
|
96
|
+
static size_t ZSTD_ldm_gear_feed(ldmRollingHashState_t* state,
|
97
|
+
BYTE const* data, size_t size,
|
98
|
+
size_t* splits, unsigned* numSplits)
|
99
|
+
{
|
100
|
+
size_t n;
|
101
|
+
U64 hash, mask;
|
102
|
+
|
103
|
+
hash = state->rolling;
|
104
|
+
mask = state->stopMask;
|
105
|
+
n = 0;
|
106
|
+
|
107
|
+
#define GEAR_ITER_ONCE() do { \
|
108
|
+
hash = (hash << 1) + ZSTD_ldm_gearTab[data[n] & 0xff]; \
|
109
|
+
n += 1; \
|
110
|
+
if (UNLIKELY((hash & mask) == 0)) { \
|
111
|
+
splits[*numSplits] = n; \
|
112
|
+
*numSplits += 1; \
|
113
|
+
if (*numSplits == LDM_BATCH_SIZE) \
|
114
|
+
goto done; \
|
115
|
+
} \
|
116
|
+
} while (0)
|
117
|
+
|
118
|
+
while (n + 3 < size) {
|
119
|
+
GEAR_ITER_ONCE();
|
120
|
+
GEAR_ITER_ONCE();
|
121
|
+
GEAR_ITER_ONCE();
|
122
|
+
GEAR_ITER_ONCE();
|
123
|
+
}
|
124
|
+
while (n < size) {
|
125
|
+
GEAR_ITER_ONCE();
|
126
|
+
}
|
127
|
+
|
128
|
+
#undef GEAR_ITER_ONCE
|
129
|
+
|
130
|
+
done:
|
131
|
+
state->rolling = hash;
|
132
|
+
return n;
|
133
|
+
}
|
21
134
|
|
22
135
|
void ZSTD_ldm_adjustParameters(ldmParams_t* params,
|
23
136
|
ZSTD_compressionParameters const* cParams)
|
@@ -27,13 +140,6 @@ void ZSTD_ldm_adjustParameters(ldmParams_t* params,
|
|
27
140
|
DEBUGLOG(4, "ZSTD_ldm_adjustParameters");
|
28
141
|
if (!params->bucketSizeLog) params->bucketSizeLog = LDM_BUCKET_SIZE_LOG;
|
29
142
|
if (!params->minMatchLength) params->minMatchLength = LDM_MIN_MATCH_LENGTH;
|
30
|
-
if (cParams->strategy >= ZSTD_btopt) {
|
31
|
-
/* Get out of the way of the optimal parser */
|
32
|
-
U32 const minMatch = MAX(cParams->targetLength, params->minMatchLength);
|
33
|
-
assert(minMatch >= ZSTD_LDM_MINMATCH_MIN);
|
34
|
-
assert(minMatch <= ZSTD_LDM_MINMATCH_MAX);
|
35
|
-
params->minMatchLength = minMatch;
|
36
|
-
}
|
37
143
|
if (params->hashLog == 0) {
|
38
144
|
params->hashLog = MAX(ZSTD_HASHLOG_MIN, params->windowLog - LDM_HASH_RLOG);
|
39
145
|
assert(params->hashLog <= ZSTD_HASHLOG_MAX);
|
@@ -61,41 +167,6 @@ size_t ZSTD_ldm_getMaxNbSeq(ldmParams_t params, size_t maxChunkSize)
|
|
61
167
|
return params.enableLdm ? (maxChunkSize / params.minMatchLength) : 0;
|
62
168
|
}
|
63
169
|
|
64
|
-
/** ZSTD_ldm_getSmallHash() :
|
65
|
-
* numBits should be <= 32
|
66
|
-
* If numBits==0, returns 0.
|
67
|
-
* @return : the most significant numBits of value. */
|
68
|
-
static U32 ZSTD_ldm_getSmallHash(U64 value, U32 numBits)
|
69
|
-
{
|
70
|
-
assert(numBits <= 32);
|
71
|
-
return numBits == 0 ? 0 : (U32)(value >> (64 - numBits));
|
72
|
-
}
|
73
|
-
|
74
|
-
/** ZSTD_ldm_getChecksum() :
|
75
|
-
* numBitsToDiscard should be <= 32
|
76
|
-
* @return : the next most significant 32 bits after numBitsToDiscard */
|
77
|
-
static U32 ZSTD_ldm_getChecksum(U64 hash, U32 numBitsToDiscard)
|
78
|
-
{
|
79
|
-
assert(numBitsToDiscard <= 32);
|
80
|
-
return (hash >> (64 - 32 - numBitsToDiscard)) & 0xFFFFFFFF;
|
81
|
-
}
|
82
|
-
|
83
|
-
/** ZSTD_ldm_getTag() ;
|
84
|
-
* Given the hash, returns the most significant numTagBits bits
|
85
|
-
* after (32 + hbits) bits.
|
86
|
-
*
|
87
|
-
* If there are not enough bits remaining, return the last
|
88
|
-
* numTagBits bits. */
|
89
|
-
static U32 ZSTD_ldm_getTag(U64 hash, U32 hbits, U32 numTagBits)
|
90
|
-
{
|
91
|
-
assert(numTagBits < 32 && hbits <= 32);
|
92
|
-
if (32 - hbits < numTagBits) {
|
93
|
-
return hash & (((U32)1 << numTagBits) - 1);
|
94
|
-
} else {
|
95
|
-
return (hash >> (32 - hbits - numTagBits)) & (((U32)1 << numTagBits) - 1);
|
96
|
-
}
|
97
|
-
}
|
98
|
-
|
99
170
|
/** ZSTD_ldm_getBucket() :
|
100
171
|
* Returns a pointer to the start of the bucket associated with hash. */
|
101
172
|
static ldmEntry_t* ZSTD_ldm_getBucket(
|
@@ -110,38 +181,12 @@ static void ZSTD_ldm_insertEntry(ldmState_t* ldmState,
|
|
110
181
|
size_t const hash, const ldmEntry_t entry,
|
111
182
|
ldmParams_t const ldmParams)
|
112
183
|
{
|
113
|
-
BYTE* const
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
184
|
+
BYTE* const pOffset = ldmState->bucketOffsets + hash;
|
185
|
+
unsigned const offset = *pOffset;
|
186
|
+
|
187
|
+
*(ZSTD_ldm_getBucket(ldmState, hash, ldmParams) + offset) = entry;
|
188
|
+
*pOffset = (BYTE)((offset + 1) & ((1u << ldmParams.bucketSizeLog) - 1));
|
118
189
|
|
119
|
-
/** ZSTD_ldm_makeEntryAndInsertByTag() :
|
120
|
-
*
|
121
|
-
* Gets the small hash, checksum, and tag from the rollingHash.
|
122
|
-
*
|
123
|
-
* If the tag matches (1 << ldmParams.hashRateLog)-1, then
|
124
|
-
* creates an ldmEntry from the offset, and inserts it into the hash table.
|
125
|
-
*
|
126
|
-
* hBits is the length of the small hash, which is the most significant hBits
|
127
|
-
* of rollingHash. The checksum is the next 32 most significant bits, followed
|
128
|
-
* by ldmParams.hashRateLog bits that make up the tag. */
|
129
|
-
static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState,
|
130
|
-
U64 const rollingHash,
|
131
|
-
U32 const hBits,
|
132
|
-
U32 const offset,
|
133
|
-
ldmParams_t const ldmParams)
|
134
|
-
{
|
135
|
-
U32 const tag = ZSTD_ldm_getTag(rollingHash, hBits, ldmParams.hashRateLog);
|
136
|
-
U32 const tagMask = ((U32)1 << ldmParams.hashRateLog) - 1;
|
137
|
-
if (tag == tagMask) {
|
138
|
-
U32 const hash = ZSTD_ldm_getSmallHash(rollingHash, hBits);
|
139
|
-
U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
|
140
|
-
ldmEntry_t entry;
|
141
|
-
entry.offset = offset;
|
142
|
-
entry.checksum = checksum;
|
143
|
-
ZSTD_ldm_insertEntry(ldmState, hash, entry, ldmParams);
|
144
|
-
}
|
145
190
|
}
|
146
191
|
|
147
192
|
/** ZSTD_ldm_countBackwardsMatch() :
|
@@ -150,10 +195,10 @@ static void ZSTD_ldm_makeEntryAndInsertByTag(ldmState_t* ldmState,
|
|
150
195
|
* We count only bytes where pMatch >= pBase and pIn >= pAnchor. */
|
151
196
|
static size_t ZSTD_ldm_countBackwardsMatch(
|
152
197
|
const BYTE* pIn, const BYTE* pAnchor,
|
153
|
-
const BYTE* pMatch, const BYTE*
|
198
|
+
const BYTE* pMatch, const BYTE* pMatchBase)
|
154
199
|
{
|
155
200
|
size_t matchLength = 0;
|
156
|
-
while (pIn > pAnchor && pMatch >
|
201
|
+
while (pIn > pAnchor && pMatch > pMatchBase && pIn[-1] == pMatch[-1]) {
|
157
202
|
pIn--;
|
158
203
|
pMatch--;
|
159
204
|
matchLength++;
|
@@ -161,6 +206,27 @@ static size_t ZSTD_ldm_countBackwardsMatch(
|
|
161
206
|
return matchLength;
|
162
207
|
}
|
163
208
|
|
209
|
+
/** ZSTD_ldm_countBackwardsMatch_2segments() :
|
210
|
+
* Returns the number of bytes that match backwards from pMatch,
|
211
|
+
* even with the backwards match spanning 2 different segments.
|
212
|
+
*
|
213
|
+
* On reaching `pMatchBase`, start counting from mEnd */
|
214
|
+
static size_t ZSTD_ldm_countBackwardsMatch_2segments(
|
215
|
+
const BYTE* pIn, const BYTE* pAnchor,
|
216
|
+
const BYTE* pMatch, const BYTE* pMatchBase,
|
217
|
+
const BYTE* pExtDictStart, const BYTE* pExtDictEnd)
|
218
|
+
{
|
219
|
+
size_t matchLength = ZSTD_ldm_countBackwardsMatch(pIn, pAnchor, pMatch, pMatchBase);
|
220
|
+
if (pMatch - matchLength != pMatchBase || pMatchBase == pExtDictStart) {
|
221
|
+
/* If backwards match is entirely in the extDict or prefix, immediately return */
|
222
|
+
return matchLength;
|
223
|
+
}
|
224
|
+
DEBUGLOG(7, "ZSTD_ldm_countBackwardsMatch_2segments: found 2-parts backwards match (length in prefix==%zu)", matchLength);
|
225
|
+
matchLength += ZSTD_ldm_countBackwardsMatch(pIn - matchLength, pAnchor, pExtDictEnd, pExtDictStart);
|
226
|
+
DEBUGLOG(7, "final backwards match length = %zu", matchLength);
|
227
|
+
return matchLength;
|
228
|
+
}
|
229
|
+
|
164
230
|
/** ZSTD_ldm_fillFastTables() :
|
165
231
|
*
|
166
232
|
* Fills the relevant tables for the ZSTD_fast and ZSTD_dfast strategies.
|
@@ -198,43 +264,42 @@ static size_t ZSTD_ldm_fillFastTables(ZSTD_matchState_t* ms,
|
|
198
264
|
return 0;
|
199
265
|
}
|
200
266
|
|
201
|
-
/** ZSTD_ldm_fillLdmHashTable() :
|
202
|
-
*
|
203
|
-
* Fills hashTable from (lastHashed + 1) to iend (non-inclusive).
|
204
|
-
* lastHash is the rolling hash that corresponds to lastHashed.
|
205
|
-
*
|
206
|
-
* Returns the rolling hash corresponding to position iend-1. */
|
207
|
-
static U64 ZSTD_ldm_fillLdmHashTable(ldmState_t* state,
|
208
|
-
U64 lastHash, const BYTE* lastHashed,
|
209
|
-
const BYTE* iend, const BYTE* base,
|
210
|
-
U32 hBits, ldmParams_t const ldmParams)
|
211
|
-
{
|
212
|
-
U64 rollingHash = lastHash;
|
213
|
-
const BYTE* cur = lastHashed + 1;
|
214
|
-
|
215
|
-
while (cur < iend) {
|
216
|
-
rollingHash = ZSTD_rollingHash_rotate(rollingHash, cur[-1],
|
217
|
-
cur[ldmParams.minMatchLength-1],
|
218
|
-
state->hashPower);
|
219
|
-
ZSTD_ldm_makeEntryAndInsertByTag(state,
|
220
|
-
rollingHash, hBits,
|
221
|
-
(U32)(cur - base), ldmParams);
|
222
|
-
++cur;
|
223
|
-
}
|
224
|
-
return rollingHash;
|
225
|
-
}
|
226
|
-
|
227
267
|
void ZSTD_ldm_fillHashTable(
|
228
|
-
ldmState_t*
|
268
|
+
ldmState_t* ldmState, const BYTE* ip,
|
229
269
|
const BYTE* iend, ldmParams_t const* params)
|
230
270
|
{
|
271
|
+
U32 const minMatchLength = params->minMatchLength;
|
272
|
+
U32 const hBits = params->hashLog - params->bucketSizeLog;
|
273
|
+
BYTE const* const base = ldmState->window.base;
|
274
|
+
BYTE const* const istart = ip;
|
275
|
+
ldmRollingHashState_t hashState;
|
276
|
+
size_t* const splits = ldmState->splitIndices;
|
277
|
+
unsigned numSplits;
|
278
|
+
|
231
279
|
DEBUGLOG(5, "ZSTD_ldm_fillHashTable");
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
280
|
+
|
281
|
+
ZSTD_ldm_gear_init(&hashState, params);
|
282
|
+
while (ip < iend) {
|
283
|
+
size_t hashed;
|
284
|
+
unsigned n;
|
285
|
+
|
286
|
+
numSplits = 0;
|
287
|
+
hashed = ZSTD_ldm_gear_feed(&hashState, ip, iend - ip, splits, &numSplits);
|
288
|
+
|
289
|
+
for (n = 0; n < numSplits; n++) {
|
290
|
+
if (ip + splits[n] >= istart + minMatchLength) {
|
291
|
+
BYTE const* const split = ip + splits[n] - minMatchLength;
|
292
|
+
U64 const xxhash = XXH64(split, minMatchLength, 0);
|
293
|
+
U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
|
294
|
+
ldmEntry_t entry;
|
295
|
+
|
296
|
+
entry.offset = (U32)(split - base);
|
297
|
+
entry.checksum = (U32)(xxhash >> 32);
|
298
|
+
ZSTD_ldm_insertEntry(ldmState, hash, entry, *params);
|
299
|
+
}
|
300
|
+
}
|
301
|
+
|
302
|
+
ip += hashed;
|
238
303
|
}
|
239
304
|
}
|
240
305
|
|
@@ -246,10 +311,10 @@ void ZSTD_ldm_fillHashTable(
|
|
246
311
|
* (after a long match, only update tables a limited amount). */
|
247
312
|
static void ZSTD_ldm_limitTableUpdate(ZSTD_matchState_t* ms, const BYTE* anchor)
|
248
313
|
{
|
249
|
-
U32 const
|
250
|
-
if (
|
314
|
+
U32 const curr = (U32)(anchor - ms->window.base);
|
315
|
+
if (curr > ms->nextToUpdate + 1024) {
|
251
316
|
ms->nextToUpdate =
|
252
|
-
|
317
|
+
curr - MIN(512, curr - ms->nextToUpdate - 1024);
|
253
318
|
}
|
254
319
|
}
|
255
320
|
|
@@ -260,11 +325,8 @@ static size_t ZSTD_ldm_generateSequences_internal(
|
|
260
325
|
/* LDM parameters */
|
261
326
|
int const extDict = ZSTD_window_hasExtDict(ldmState->window);
|
262
327
|
U32 const minMatchLength = params->minMatchLength;
|
263
|
-
|
328
|
+
U32 const entsPerBucket = 1U << params->bucketSizeLog;
|
264
329
|
U32 const hBits = params->hashLog - params->bucketSizeLog;
|
265
|
-
U32 const ldmBucketSize = 1U << params->bucketSizeLog;
|
266
|
-
U32 const hashRateLog = params->hashRateLog;
|
267
|
-
U32 const ldmTagMask = (1U << params->hashRateLog) - 1;
|
268
330
|
/* Prefix and extDict parameters */
|
269
331
|
U32 const dictLimit = ldmState->window.dictLimit;
|
270
332
|
U32 const lowestIndex = extDict ? ldmState->window.lowLimit : dictLimit;
|
@@ -276,45 +338,69 @@ static size_t ZSTD_ldm_generateSequences_internal(
|
|
276
338
|
/* Input bounds */
|
277
339
|
BYTE const* const istart = (BYTE const*)src;
|
278
340
|
BYTE const* const iend = istart + srcSize;
|
279
|
-
BYTE const* const ilimit = iend -
|
341
|
+
BYTE const* const ilimit = iend - HASH_READ_SIZE;
|
280
342
|
/* Input positions */
|
281
343
|
BYTE const* anchor = istart;
|
282
344
|
BYTE const* ip = istart;
|
283
|
-
/* Rolling hash */
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
345
|
+
/* Rolling hash state */
|
346
|
+
ldmRollingHashState_t hashState;
|
347
|
+
/* Arrays for staged-processing */
|
348
|
+
size_t* const splits = ldmState->splitIndices;
|
349
|
+
ldmMatchCandidate_t* const candidates = ldmState->matchCandidates;
|
350
|
+
unsigned numSplits;
|
351
|
+
|
352
|
+
if (srcSize < minMatchLength)
|
353
|
+
return iend - anchor;
|
354
|
+
|
355
|
+
/* Initialize the rolling hash state with the first minMatchLength bytes */
|
356
|
+
ZSTD_ldm_gear_init(&hashState, params);
|
357
|
+
ZSTD_ldm_gear_reset(&hashState, ip, minMatchLength);
|
358
|
+
ip += minMatchLength;
|
359
|
+
|
360
|
+
while (ip < ilimit) {
|
361
|
+
size_t hashed;
|
362
|
+
unsigned n;
|
363
|
+
|
364
|
+
numSplits = 0;
|
365
|
+
hashed = ZSTD_ldm_gear_feed(&hashState, ip, ilimit - ip,
|
366
|
+
splits, &numSplits);
|
367
|
+
|
368
|
+
for (n = 0; n < numSplits; n++) {
|
369
|
+
BYTE const* const split = ip + splits[n] - minMatchLength;
|
370
|
+
U64 const xxhash = XXH64(split, minMatchLength, 0);
|
371
|
+
U32 const hash = (U32)(xxhash & (((U32)1 << hBits) - 1));
|
372
|
+
|
373
|
+
candidates[n].split = split;
|
374
|
+
candidates[n].hash = hash;
|
375
|
+
candidates[n].checksum = (U32)(xxhash >> 32);
|
376
|
+
candidates[n].bucket = ZSTD_ldm_getBucket(ldmState, hash, *params);
|
377
|
+
PREFETCH_L1(candidates[n].bucket);
|
298
378
|
}
|
299
|
-
lastHashed = ip;
|
300
379
|
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
380
|
+
for (n = 0; n < numSplits; n++) {
|
381
|
+
size_t forwardMatchLength = 0, backwardMatchLength = 0,
|
382
|
+
bestMatchLength = 0, mLength;
|
383
|
+
U32 offset;
|
384
|
+
BYTE const* const split = candidates[n].split;
|
385
|
+
U32 const checksum = candidates[n].checksum;
|
386
|
+
U32 const hash = candidates[n].hash;
|
387
|
+
ldmEntry_t* const bucket = candidates[n].bucket;
|
388
|
+
ldmEntry_t const* cur;
|
389
|
+
ldmEntry_t const* bestEntry = NULL;
|
390
|
+
ldmEntry_t newEntry;
|
391
|
+
|
392
|
+
newEntry.offset = (U32)(split - base);
|
393
|
+
newEntry.checksum = checksum;
|
394
|
+
|
395
|
+
/* If a split point would generate a sequence overlapping with
|
396
|
+
* the previous one, we merely register it in the hash table and
|
397
|
+
* move on */
|
398
|
+
if (split < anchor) {
|
399
|
+
ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
|
400
|
+
continue;
|
401
|
+
}
|
306
402
|
|
307
|
-
|
308
|
-
{
|
309
|
-
ldmEntry_t* const bucket =
|
310
|
-
ZSTD_ldm_getBucket(ldmState,
|
311
|
-
ZSTD_ldm_getSmallHash(rollingHash, hBits),
|
312
|
-
*params);
|
313
|
-
ldmEntry_t* cur;
|
314
|
-
size_t bestMatchLength = 0;
|
315
|
-
U32 const checksum = ZSTD_ldm_getChecksum(rollingHash, hBits);
|
316
|
-
|
317
|
-
for (cur = bucket; cur < bucket + ldmBucketSize; ++cur) {
|
403
|
+
for (cur = bucket; cur < bucket + entsPerBucket; cur++) {
|
318
404
|
size_t curForwardMatchLength, curBackwardMatchLength,
|
319
405
|
curTotalMatchLength;
|
320
406
|
if (cur->checksum != checksum || cur->offset <= lowestIndex) {
|
@@ -328,30 +414,23 @@ static size_t ZSTD_ldm_generateSequences_internal(
|
|
328
414
|
cur->offset < dictLimit ? dictEnd : iend;
|
329
415
|
BYTE const* const lowMatchPtr =
|
330
416
|
cur->offset < dictLimit ? dictStart : lowPrefixPtr;
|
331
|
-
|
332
|
-
|
333
|
-
ip, pMatch, iend,
|
334
|
-
matchEnd, lowPrefixPtr);
|
417
|
+
curForwardMatchLength =
|
418
|
+
ZSTD_count_2segments(split, pMatch, iend, matchEnd, lowPrefixPtr);
|
335
419
|
if (curForwardMatchLength < minMatchLength) {
|
336
420
|
continue;
|
337
421
|
}
|
338
|
-
curBackwardMatchLength =
|
339
|
-
|
340
|
-
lowMatchPtr);
|
341
|
-
curTotalMatchLength = curForwardMatchLength +
|
342
|
-
curBackwardMatchLength;
|
422
|
+
curBackwardMatchLength = ZSTD_ldm_countBackwardsMatch_2segments(
|
423
|
+
split, anchor, pMatch, lowMatchPtr, dictStart, dictEnd);
|
343
424
|
} else { /* !extDict */
|
344
425
|
BYTE const* const pMatch = base + cur->offset;
|
345
|
-
curForwardMatchLength = ZSTD_count(
|
426
|
+
curForwardMatchLength = ZSTD_count(split, pMatch, iend);
|
346
427
|
if (curForwardMatchLength < minMatchLength) {
|
347
428
|
continue;
|
348
429
|
}
|
349
430
|
curBackwardMatchLength =
|
350
|
-
ZSTD_ldm_countBackwardsMatch(
|
351
|
-
lowPrefixPtr);
|
352
|
-
curTotalMatchLength = curForwardMatchLength +
|
353
|
-
curBackwardMatchLength;
|
431
|
+
ZSTD_ldm_countBackwardsMatch(split, anchor, pMatch, lowPrefixPtr);
|
354
432
|
}
|
433
|
+
curTotalMatchLength = curForwardMatchLength + curBackwardMatchLength;
|
355
434
|
|
356
435
|
if (curTotalMatchLength > bestMatchLength) {
|
357
436
|
bestMatchLength = curTotalMatchLength;
|
@@ -360,57 +439,54 @@ static size_t ZSTD_ldm_generateSequences_internal(
|
|
360
439
|
bestEntry = cur;
|
361
440
|
}
|
362
441
|
}
|
363
|
-
}
|
364
|
-
|
365
|
-
/* No match found -- continue searching */
|
366
|
-
if (bestEntry == NULL) {
|
367
|
-
ZSTD_ldm_makeEntryAndInsertByTag(ldmState, rollingHash,
|
368
|
-
hBits, current,
|
369
|
-
*params);
|
370
|
-
ip++;
|
371
|
-
continue;
|
372
|
-
}
|
373
442
|
|
374
|
-
|
375
|
-
|
376
|
-
|
443
|
+
/* No match found -- insert an entry into the hash table
|
444
|
+
* and process the next candidate match */
|
445
|
+
if (bestEntry == NULL) {
|
446
|
+
ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
|
447
|
+
continue;
|
448
|
+
}
|
377
449
|
|
378
|
-
|
379
|
-
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
seq->offset = offset;
|
393
|
-
rawSeqStore->size++;
|
394
|
-
}
|
450
|
+
/* Match found */
|
451
|
+
offset = (U32)(split - base) - bestEntry->offset;
|
452
|
+
mLength = forwardMatchLength + backwardMatchLength;
|
453
|
+
{
|
454
|
+
rawSeq* const seq = rawSeqStore->seq + rawSeqStore->size;
|
455
|
+
|
456
|
+
/* Out of sequence storage */
|
457
|
+
if (rawSeqStore->size == rawSeqStore->capacity)
|
458
|
+
return ERROR(dstSize_tooSmall);
|
459
|
+
seq->litLength = (U32)(split - backwardMatchLength - anchor);
|
460
|
+
seq->matchLength = (U32)mLength;
|
461
|
+
seq->offset = offset;
|
462
|
+
rawSeqStore->size++;
|
463
|
+
}
|
395
464
|
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
*params);
|
465
|
+
/* Insert the current entry into the hash table --- it must be
|
466
|
+
* done after the previous block to avoid clobbering bestEntry */
|
467
|
+
ZSTD_ldm_insertEntry(ldmState, hash, newEntry, *params);
|
400
468
|
|
401
|
-
|
469
|
+
anchor = split + forwardMatchLength;
|
402
470
|
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
471
|
+
/* If we find a match that ends after the data that we've hashed
|
472
|
+
* then we have a repeating, overlapping, pattern. E.g. all zeros.
|
473
|
+
* If one repetition of the pattern matches our `stopMask` then all
|
474
|
+
* repetitions will. We don't need to insert them all into out table,
|
475
|
+
* only the first one. So skip over overlapping matches.
|
476
|
+
* This is a major speed boost (20x) for compressing a single byte
|
477
|
+
* repeated, when that byte ends up in the table.
|
478
|
+
*/
|
479
|
+
if (anchor > ip + hashed) {
|
480
|
+
ZSTD_ldm_gear_reset(&hashState, anchor - minMatchLength, minMatchLength);
|
481
|
+
/* Continue the outter loop at anchor (ip + hashed == anchor). */
|
482
|
+
ip = anchor - hashed;
|
483
|
+
break;
|
484
|
+
}
|
410
485
|
}
|
411
|
-
|
412
|
-
|
486
|
+
|
487
|
+
ip += hashed;
|
413
488
|
}
|
489
|
+
|
414
490
|
return iend - anchor;
|
415
491
|
}
|
416
492
|
|
@@ -459,7 +535,7 @@ size_t ZSTD_ldm_generateSequences(
|
|
459
535
|
|
460
536
|
assert(chunkStart < iend);
|
461
537
|
/* 1. Perform overflow correction if necessary. */
|
462
|
-
if (ZSTD_window_needOverflowCorrection(ldmState->window, chunkEnd)) {
|
538
|
+
if (ZSTD_window_needOverflowCorrection(ldmState->window, 0, maxDist, ldmState->loadedDictEnd, chunkStart, chunkEnd)) {
|
463
539
|
U32 const ldmHSize = 1U << params->hashLog;
|
464
540
|
U32 const correction = ZSTD_window_correctOverflow(
|
465
541
|
&ldmState->window, /* cycleLog */ 0, maxDist, chunkStart);
|
@@ -562,14 +638,32 @@ static rawSeq maybeSplitSequence(rawSeqStore_t* rawSeqStore,
|
|
562
638
|
return sequence;
|
563
639
|
}
|
564
640
|
|
641
|
+
void ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore_t* rawSeqStore, size_t nbBytes) {
|
642
|
+
U32 currPos = (U32)(rawSeqStore->posInSequence + nbBytes);
|
643
|
+
while (currPos && rawSeqStore->pos < rawSeqStore->size) {
|
644
|
+
rawSeq currSeq = rawSeqStore->seq[rawSeqStore->pos];
|
645
|
+
if (currPos >= currSeq.litLength + currSeq.matchLength) {
|
646
|
+
currPos -= currSeq.litLength + currSeq.matchLength;
|
647
|
+
rawSeqStore->pos++;
|
648
|
+
} else {
|
649
|
+
rawSeqStore->posInSequence = currPos;
|
650
|
+
break;
|
651
|
+
}
|
652
|
+
}
|
653
|
+
if (currPos == 0 || rawSeqStore->pos == rawSeqStore->size) {
|
654
|
+
rawSeqStore->posInSequence = 0;
|
655
|
+
}
|
656
|
+
}
|
657
|
+
|
565
658
|
size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
|
566
659
|
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
|
660
|
+
ZSTD_useRowMatchFinderMode_e useRowMatchFinder,
|
567
661
|
void const* src, size_t srcSize)
|
568
662
|
{
|
569
663
|
const ZSTD_compressionParameters* const cParams = &ms->cParams;
|
570
664
|
unsigned const minMatch = cParams->minMatch;
|
571
665
|
ZSTD_blockCompressor const blockCompressor =
|
572
|
-
ZSTD_selectBlockCompressor(cParams->strategy, ZSTD_matchState_dictMode(ms));
|
666
|
+
ZSTD_selectBlockCompressor(cParams->strategy, useRowMatchFinder, ZSTD_matchState_dictMode(ms));
|
573
667
|
/* Input bounds */
|
574
668
|
BYTE const* const istart = (BYTE const*)src;
|
575
669
|
BYTE const* const iend = istart + srcSize;
|
@@ -577,9 +671,18 @@ size_t ZSTD_ldm_blockCompress(rawSeqStore_t* rawSeqStore,
|
|
577
671
|
BYTE const* ip = istart;
|
578
672
|
|
579
673
|
DEBUGLOG(5, "ZSTD_ldm_blockCompress: srcSize=%zu", srcSize);
|
674
|
+
/* If using opt parser, use LDMs only as candidates rather than always accepting them */
|
675
|
+
if (cParams->strategy >= ZSTD_btopt) {
|
676
|
+
size_t lastLLSize;
|
677
|
+
ms->ldmSeqStore = rawSeqStore;
|
678
|
+
lastLLSize = blockCompressor(ms, seqStore, rep, src, srcSize);
|
679
|
+
ZSTD_ldm_skipRawSeqStoreBytes(rawSeqStore, srcSize);
|
680
|
+
return lastLLSize;
|
681
|
+
}
|
682
|
+
|
580
683
|
assert(rawSeqStore->pos <= rawSeqStore->size);
|
581
684
|
assert(rawSeqStore->size <= rawSeqStore->capacity);
|
582
|
-
/* Loop through each sequence and apply the block compressor to the
|
685
|
+
/* Loop through each sequence and apply the block compressor to the literals */
|
583
686
|
while (rawSeqStore->pos < rawSeqStore->size && ip < iend) {
|
584
687
|
/* maybeSplitSequence updates rawSeqStore->pos */
|
585
688
|
rawSeq const sequence = maybeSplitSequence(rawSeqStore,
|