deflate-ruby 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CLAUDE.md +138 -0
- data/LICENSE.txt +21 -0
- data/README.md +117 -0
- data/ext/deflate_ruby/deflate_ruby.c +301 -0
- data/ext/deflate_ruby/extconf.rb +34 -0
- data/ext/deflate_ruby/libdeflate/CMakeLists.txt +270 -0
- data/ext/deflate_ruby/libdeflate/COPYING +22 -0
- data/ext/deflate_ruby/libdeflate/NEWS.md +494 -0
- data/ext/deflate_ruby/libdeflate/README.md +228 -0
- data/ext/deflate_ruby/libdeflate/common_defs.h +747 -0
- data/ext/deflate_ruby/libdeflate/lib/adler32.c +162 -0
- data/ext/deflate_ruby/libdeflate/lib/arm/adler32_impl.h +358 -0
- data/ext/deflate_ruby/libdeflate/lib/arm/cpu_features.c +230 -0
- data/ext/deflate_ruby/libdeflate/lib/arm/cpu_features.h +214 -0
- data/ext/deflate_ruby/libdeflate/lib/arm/crc32_impl.h +600 -0
- data/ext/deflate_ruby/libdeflate/lib/arm/crc32_pmull_helpers.h +156 -0
- data/ext/deflate_ruby/libdeflate/lib/arm/crc32_pmull_wide.h +226 -0
- data/ext/deflate_ruby/libdeflate/lib/arm/matchfinder_impl.h +78 -0
- data/ext/deflate_ruby/libdeflate/lib/bt_matchfinder.h +342 -0
- data/ext/deflate_ruby/libdeflate/lib/cpu_features_common.h +93 -0
- data/ext/deflate_ruby/libdeflate/lib/crc32.c +262 -0
- data/ext/deflate_ruby/libdeflate/lib/crc32_multipliers.h +377 -0
- data/ext/deflate_ruby/libdeflate/lib/crc32_tables.h +587 -0
- data/ext/deflate_ruby/libdeflate/lib/decompress_template.h +777 -0
- data/ext/deflate_ruby/libdeflate/lib/deflate_compress.c +4129 -0
- data/ext/deflate_ruby/libdeflate/lib/deflate_compress.h +15 -0
- data/ext/deflate_ruby/libdeflate/lib/deflate_constants.h +56 -0
- data/ext/deflate_ruby/libdeflate/lib/deflate_decompress.c +1208 -0
- data/ext/deflate_ruby/libdeflate/lib/gzip_compress.c +90 -0
- data/ext/deflate_ruby/libdeflate/lib/gzip_constants.h +45 -0
- data/ext/deflate_ruby/libdeflate/lib/gzip_decompress.c +144 -0
- data/ext/deflate_ruby/libdeflate/lib/hc_matchfinder.h +401 -0
- data/ext/deflate_ruby/libdeflate/lib/ht_matchfinder.h +234 -0
- data/ext/deflate_ruby/libdeflate/lib/lib_common.h +106 -0
- data/ext/deflate_ruby/libdeflate/lib/matchfinder_common.h +224 -0
- data/ext/deflate_ruby/libdeflate/lib/riscv/matchfinder_impl.h +97 -0
- data/ext/deflate_ruby/libdeflate/lib/utils.c +141 -0
- data/ext/deflate_ruby/libdeflate/lib/x86/adler32_impl.h +134 -0
- data/ext/deflate_ruby/libdeflate/lib/x86/adler32_template.h +518 -0
- data/ext/deflate_ruby/libdeflate/lib/x86/cpu_features.c +183 -0
- data/ext/deflate_ruby/libdeflate/lib/x86/cpu_features.h +169 -0
- data/ext/deflate_ruby/libdeflate/lib/x86/crc32_impl.h +160 -0
- data/ext/deflate_ruby/libdeflate/lib/x86/crc32_pclmul_template.h +495 -0
- data/ext/deflate_ruby/libdeflate/lib/x86/decompress_impl.h +57 -0
- data/ext/deflate_ruby/libdeflate/lib/x86/matchfinder_impl.h +122 -0
- data/ext/deflate_ruby/libdeflate/lib/zlib_compress.c +82 -0
- data/ext/deflate_ruby/libdeflate/lib/zlib_constants.h +21 -0
- data/ext/deflate_ruby/libdeflate/lib/zlib_decompress.c +104 -0
- data/ext/deflate_ruby/libdeflate/libdeflate-config.cmake.in +3 -0
- data/ext/deflate_ruby/libdeflate/libdeflate.h +411 -0
- data/ext/deflate_ruby/libdeflate/libdeflate.pc.in +18 -0
- data/ext/deflate_ruby/libdeflate/programs/CMakeLists.txt +105 -0
- data/ext/deflate_ruby/libdeflate/programs/benchmark.c +696 -0
- data/ext/deflate_ruby/libdeflate/programs/checksum.c +218 -0
- data/ext/deflate_ruby/libdeflate/programs/config.h.in +19 -0
- data/ext/deflate_ruby/libdeflate/programs/gzip.c +688 -0
- data/ext/deflate_ruby/libdeflate/programs/prog_util.c +521 -0
- data/ext/deflate_ruby/libdeflate/programs/prog_util.h +225 -0
- data/ext/deflate_ruby/libdeflate/programs/test_checksums.c +200 -0
- data/ext/deflate_ruby/libdeflate/programs/test_custom_malloc.c +155 -0
- data/ext/deflate_ruby/libdeflate/programs/test_incomplete_codes.c +385 -0
- data/ext/deflate_ruby/libdeflate/programs/test_invalid_streams.c +130 -0
- data/ext/deflate_ruby/libdeflate/programs/test_litrunlen_overflow.c +72 -0
- data/ext/deflate_ruby/libdeflate/programs/test_overread.c +95 -0
- data/ext/deflate_ruby/libdeflate/programs/test_slow_decompression.c +472 -0
- data/ext/deflate_ruby/libdeflate/programs/test_trailing_bytes.c +151 -0
- data/ext/deflate_ruby/libdeflate/programs/test_util.c +237 -0
- data/ext/deflate_ruby/libdeflate/programs/test_util.h +61 -0
- data/ext/deflate_ruby/libdeflate/programs/tgetopt.c +118 -0
- data/ext/deflate_ruby/libdeflate/scripts/android_build.sh +118 -0
- data/ext/deflate_ruby/libdeflate/scripts/android_tests.sh +69 -0
- data/ext/deflate_ruby/libdeflate/scripts/benchmark.sh +10 -0
- data/ext/deflate_ruby/libdeflate/scripts/checksum.sh +10 -0
- data/ext/deflate_ruby/libdeflate/scripts/checksum_benchmarks.sh +253 -0
- data/ext/deflate_ruby/libdeflate/scripts/cmake-helper.sh +17 -0
- data/ext/deflate_ruby/libdeflate/scripts/deflate_benchmarks.sh +119 -0
- data/ext/deflate_ruby/libdeflate/scripts/exec_tests.sh +38 -0
- data/ext/deflate_ruby/libdeflate/scripts/gen-release-archives.sh +37 -0
- data/ext/deflate_ruby/libdeflate/scripts/gen_bitreverse_tab.py +19 -0
- data/ext/deflate_ruby/libdeflate/scripts/gen_crc32_multipliers.c +199 -0
- data/ext/deflate_ruby/libdeflate/scripts/gen_crc32_tables.c +105 -0
- data/ext/deflate_ruby/libdeflate/scripts/gen_default_litlen_costs.py +44 -0
- data/ext/deflate_ruby/libdeflate/scripts/gen_offset_slot_map.py +29 -0
- data/ext/deflate_ruby/libdeflate/scripts/gzip_tests.sh +523 -0
- data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/deflate_compress/corpus/0 +0 -0
- data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/deflate_compress/fuzz.c +95 -0
- data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/deflate_decompress/corpus/0 +3 -0
- data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/deflate_decompress/fuzz.c +62 -0
- data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/fuzz.sh +108 -0
- data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/gzip_decompress/corpus/0 +0 -0
- data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/gzip_decompress/fuzz.c +19 -0
- data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/zlib_decompress/corpus/0 +3 -0
- data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/zlib_decompress/fuzz.c +19 -0
- data/ext/deflate_ruby/libdeflate/scripts/run_tests.sh +416 -0
- data/ext/deflate_ruby/libdeflate/scripts/toolchain-i686-w64-mingw32.cmake +8 -0
- data/ext/deflate_ruby/libdeflate/scripts/toolchain-x86_64-w64-mingw32.cmake +8 -0
- data/lib/deflate_ruby/version.rb +5 -0
- data/lib/deflate_ruby.rb +71 -0
- metadata +191 -0
|
@@ -0,0 +1,342 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* bt_matchfinder.h - Lempel-Ziv matchfinding with a hash table of binary trees
|
|
3
|
+
*
|
|
4
|
+
* Copyright 2016 Eric Biggers
|
|
5
|
+
*
|
|
6
|
+
* Permission is hereby granted, free of charge, to any person
|
|
7
|
+
* obtaining a copy of this software and associated documentation
|
|
8
|
+
* files (the "Software"), to deal in the Software without
|
|
9
|
+
* restriction, including without limitation the rights to use,
|
|
10
|
+
* copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
11
|
+
* copies of the Software, and to permit persons to whom the
|
|
12
|
+
* Software is furnished to do so, subject to the following
|
|
13
|
+
* conditions:
|
|
14
|
+
*
|
|
15
|
+
* The above copyright notice and this permission notice shall be
|
|
16
|
+
* included in all copies or substantial portions of the Software.
|
|
17
|
+
*
|
|
18
|
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
19
|
+
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
|
20
|
+
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
21
|
+
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
|
22
|
+
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
|
23
|
+
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
24
|
+
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
25
|
+
* OTHER DEALINGS IN THE SOFTWARE.
|
|
26
|
+
*
|
|
27
|
+
* ----------------------------------------------------------------------------
|
|
28
|
+
*
|
|
29
|
+
* This is a Binary Trees (bt) based matchfinder.
|
|
30
|
+
*
|
|
31
|
+
* The main data structure is a hash table where each hash bucket contains a
|
|
32
|
+
* binary tree of sequences whose first 4 bytes share the same hash code. Each
|
|
33
|
+
* sequence is identified by its starting position in the input buffer. Each
|
|
34
|
+
* binary tree is always sorted such that each left child represents a sequence
|
|
35
|
+
* lexicographically lesser than its parent and each right child represents a
|
|
36
|
+
* sequence lexicographically greater than its parent.
|
|
37
|
+
*
|
|
38
|
+
* The algorithm processes the input buffer sequentially. At each byte
|
|
39
|
+
* position, the hash code of the first 4 bytes of the sequence beginning at
|
|
40
|
+
* that position (the sequence being matched against) is computed. This
|
|
41
|
+
* identifies the hash bucket to use for that position. Then, a new binary tree
|
|
42
|
+
* node is created to represent the current sequence. Then, in a single tree
|
|
43
|
+
* traversal, the hash bucket's binary tree is searched for matches and is
|
|
44
|
+
* re-rooted at the new node.
|
|
45
|
+
*
|
|
46
|
+
* Compared to the simpler algorithm that uses linked lists instead of binary
|
|
47
|
+
* trees (see hc_matchfinder.h), the binary tree version gains more information
|
|
48
|
+
* at each node visitation. Ideally, the binary tree version will examine only
|
|
49
|
+
* 'log(n)' nodes to find the same matches that the linked list version will
|
|
50
|
+
* find by examining 'n' nodes. In addition, the binary tree version can
|
|
51
|
+
* examine fewer bytes at each node by taking advantage of the common prefixes
|
|
52
|
+
* that result from the sort order, whereas the linked list version may have to
|
|
53
|
+
* examine up to the full length of the match at each node.
|
|
54
|
+
*
|
|
55
|
+
* However, it is not always best to use the binary tree version. It requires
|
|
56
|
+
* nearly twice as much memory as the linked list version, and it takes time to
|
|
57
|
+
* keep the binary trees sorted, even at positions where the compressor does not
|
|
58
|
+
* need matches. Generally, when doing fast compression on small buffers,
|
|
59
|
+
* binary trees are the wrong approach. They are best suited for thorough
|
|
60
|
+
* compression and/or large buffers.
|
|
61
|
+
*
|
|
62
|
+
* ----------------------------------------------------------------------------
|
|
63
|
+
*/
|
|
64
|
+
|
|
65
|
+
#ifndef LIB_BT_MATCHFINDER_H
|
|
66
|
+
#define LIB_BT_MATCHFINDER_H
|
|
67
|
+
|
|
68
|
+
#include "matchfinder_common.h"
|
|
69
|
+
|
|
70
|
+
#define BT_MATCHFINDER_HASH3_ORDER 16
|
|
71
|
+
#define BT_MATCHFINDER_HASH3_WAYS 2
|
|
72
|
+
#define BT_MATCHFINDER_HASH4_ORDER 16
|
|
73
|
+
|
|
74
|
+
#define BT_MATCHFINDER_TOTAL_HASH_SIZE \
|
|
75
|
+
(((1UL << BT_MATCHFINDER_HASH3_ORDER) * BT_MATCHFINDER_HASH3_WAYS + \
|
|
76
|
+
(1UL << BT_MATCHFINDER_HASH4_ORDER)) * sizeof(mf_pos_t))
|
|
77
|
+
|
|
78
|
+
/* Representation of a match found by the bt_matchfinder */
|
|
79
|
+
struct lz_match {
|
|
80
|
+
|
|
81
|
+
/* The number of bytes matched. */
|
|
82
|
+
u16 length;
|
|
83
|
+
|
|
84
|
+
/* The offset back from the current position that was matched. */
|
|
85
|
+
u16 offset;
|
|
86
|
+
};
|
|
87
|
+
|
|
88
|
+
struct MATCHFINDER_ALIGNED bt_matchfinder {
|
|
89
|
+
|
|
90
|
+
/* The hash table for finding length 3 matches */
|
|
91
|
+
mf_pos_t hash3_tab[1UL << BT_MATCHFINDER_HASH3_ORDER][BT_MATCHFINDER_HASH3_WAYS];
|
|
92
|
+
|
|
93
|
+
/* The hash table which contains the roots of the binary trees for
|
|
94
|
+
* finding length 4+ matches */
|
|
95
|
+
mf_pos_t hash4_tab[1UL << BT_MATCHFINDER_HASH4_ORDER];
|
|
96
|
+
|
|
97
|
+
/* The child node references for the binary trees. The left and right
|
|
98
|
+
* children of the node for the sequence with position 'pos' are
|
|
99
|
+
* 'child_tab[pos * 2]' and 'child_tab[pos * 2 + 1]', respectively. */
|
|
100
|
+
mf_pos_t child_tab[2UL * MATCHFINDER_WINDOW_SIZE];
|
|
101
|
+
};
|
|
102
|
+
|
|
103
|
+
/* Prepare the matchfinder for a new input buffer. */
|
|
104
|
+
static forceinline void
|
|
105
|
+
bt_matchfinder_init(struct bt_matchfinder *mf)
|
|
106
|
+
{
|
|
107
|
+
STATIC_ASSERT(BT_MATCHFINDER_TOTAL_HASH_SIZE %
|
|
108
|
+
MATCHFINDER_SIZE_ALIGNMENT == 0);
|
|
109
|
+
|
|
110
|
+
matchfinder_init((mf_pos_t *)mf, BT_MATCHFINDER_TOTAL_HASH_SIZE);
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
static forceinline void
|
|
114
|
+
bt_matchfinder_slide_window(struct bt_matchfinder *mf)
|
|
115
|
+
{
|
|
116
|
+
STATIC_ASSERT(sizeof(*mf) % MATCHFINDER_SIZE_ALIGNMENT == 0);
|
|
117
|
+
|
|
118
|
+
matchfinder_rebase((mf_pos_t *)mf, sizeof(*mf));
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
static forceinline mf_pos_t *
|
|
122
|
+
bt_left_child(struct bt_matchfinder *mf, s32 node)
|
|
123
|
+
{
|
|
124
|
+
return &mf->child_tab[2 * (node & (MATCHFINDER_WINDOW_SIZE - 1)) + 0];
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
static forceinline mf_pos_t *
|
|
128
|
+
bt_right_child(struct bt_matchfinder *mf, s32 node)
|
|
129
|
+
{
|
|
130
|
+
return &mf->child_tab[2 * (node & (MATCHFINDER_WINDOW_SIZE - 1)) + 1];
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
/* The minimum permissible value of 'max_len' for bt_matchfinder_get_matches()
|
|
134
|
+
* and bt_matchfinder_skip_byte(). There must be sufficiently many bytes
|
|
135
|
+
* remaining to load a 32-bit integer from the *next* position. */
|
|
136
|
+
#define BT_MATCHFINDER_REQUIRED_NBYTES 5
|
|
137
|
+
|
|
138
|
+
/* Advance the binary tree matchfinder by one byte, optionally recording
|
|
139
|
+
* matches. @record_matches should be a compile-time constant. */
|
|
140
|
+
static forceinline struct lz_match *
|
|
141
|
+
bt_matchfinder_advance_one_byte(struct bt_matchfinder * const mf,
|
|
142
|
+
const u8 * const in_base,
|
|
143
|
+
const ptrdiff_t cur_pos,
|
|
144
|
+
const u32 max_len,
|
|
145
|
+
const u32 nice_len,
|
|
146
|
+
const u32 max_search_depth,
|
|
147
|
+
u32 * const next_hashes,
|
|
148
|
+
struct lz_match *lz_matchptr,
|
|
149
|
+
const bool record_matches)
|
|
150
|
+
{
|
|
151
|
+
const u8 *in_next = in_base + cur_pos;
|
|
152
|
+
u32 depth_remaining = max_search_depth;
|
|
153
|
+
const s32 cutoff = cur_pos - MATCHFINDER_WINDOW_SIZE;
|
|
154
|
+
u32 next_hashseq;
|
|
155
|
+
u32 hash3;
|
|
156
|
+
u32 hash4;
|
|
157
|
+
s32 cur_node;
|
|
158
|
+
#if BT_MATCHFINDER_HASH3_WAYS >= 2
|
|
159
|
+
s32 cur_node_2;
|
|
160
|
+
#endif
|
|
161
|
+
const u8 *matchptr;
|
|
162
|
+
mf_pos_t *pending_lt_ptr, *pending_gt_ptr;
|
|
163
|
+
u32 best_lt_len, best_gt_len;
|
|
164
|
+
u32 len;
|
|
165
|
+
u32 best_len = 3;
|
|
166
|
+
|
|
167
|
+
STATIC_ASSERT(BT_MATCHFINDER_HASH3_WAYS >= 1 &&
|
|
168
|
+
BT_MATCHFINDER_HASH3_WAYS <= 2);
|
|
169
|
+
|
|
170
|
+
next_hashseq = get_unaligned_le32(in_next + 1);
|
|
171
|
+
|
|
172
|
+
hash3 = next_hashes[0];
|
|
173
|
+
hash4 = next_hashes[1];
|
|
174
|
+
|
|
175
|
+
next_hashes[0] = lz_hash(next_hashseq & 0xFFFFFF, BT_MATCHFINDER_HASH3_ORDER);
|
|
176
|
+
next_hashes[1] = lz_hash(next_hashseq, BT_MATCHFINDER_HASH4_ORDER);
|
|
177
|
+
prefetchw(&mf->hash3_tab[next_hashes[0]]);
|
|
178
|
+
prefetchw(&mf->hash4_tab[next_hashes[1]]);
|
|
179
|
+
|
|
180
|
+
cur_node = mf->hash3_tab[hash3][0];
|
|
181
|
+
mf->hash3_tab[hash3][0] = cur_pos;
|
|
182
|
+
#if BT_MATCHFINDER_HASH3_WAYS >= 2
|
|
183
|
+
cur_node_2 = mf->hash3_tab[hash3][1];
|
|
184
|
+
mf->hash3_tab[hash3][1] = cur_node;
|
|
185
|
+
#endif
|
|
186
|
+
if (record_matches && cur_node > cutoff) {
|
|
187
|
+
u32 seq3 = load_u24_unaligned(in_next);
|
|
188
|
+
if (seq3 == load_u24_unaligned(&in_base[cur_node])) {
|
|
189
|
+
lz_matchptr->length = 3;
|
|
190
|
+
lz_matchptr->offset = in_next - &in_base[cur_node];
|
|
191
|
+
lz_matchptr++;
|
|
192
|
+
}
|
|
193
|
+
#if BT_MATCHFINDER_HASH3_WAYS >= 2
|
|
194
|
+
else if (cur_node_2 > cutoff &&
|
|
195
|
+
seq3 == load_u24_unaligned(&in_base[cur_node_2]))
|
|
196
|
+
{
|
|
197
|
+
lz_matchptr->length = 3;
|
|
198
|
+
lz_matchptr->offset = in_next - &in_base[cur_node_2];
|
|
199
|
+
lz_matchptr++;
|
|
200
|
+
}
|
|
201
|
+
#endif
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
cur_node = mf->hash4_tab[hash4];
|
|
205
|
+
mf->hash4_tab[hash4] = cur_pos;
|
|
206
|
+
|
|
207
|
+
pending_lt_ptr = bt_left_child(mf, cur_pos);
|
|
208
|
+
pending_gt_ptr = bt_right_child(mf, cur_pos);
|
|
209
|
+
|
|
210
|
+
if (cur_node <= cutoff) {
|
|
211
|
+
*pending_lt_ptr = MATCHFINDER_INITVAL;
|
|
212
|
+
*pending_gt_ptr = MATCHFINDER_INITVAL;
|
|
213
|
+
return lz_matchptr;
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
best_lt_len = 0;
|
|
217
|
+
best_gt_len = 0;
|
|
218
|
+
len = 0;
|
|
219
|
+
|
|
220
|
+
for (;;) {
|
|
221
|
+
matchptr = &in_base[cur_node];
|
|
222
|
+
|
|
223
|
+
if (matchptr[len] == in_next[len]) {
|
|
224
|
+
len = lz_extend(in_next, matchptr, len + 1, max_len);
|
|
225
|
+
if (!record_matches || len > best_len) {
|
|
226
|
+
if (record_matches) {
|
|
227
|
+
best_len = len;
|
|
228
|
+
lz_matchptr->length = len;
|
|
229
|
+
lz_matchptr->offset = in_next - matchptr;
|
|
230
|
+
lz_matchptr++;
|
|
231
|
+
}
|
|
232
|
+
if (len >= nice_len) {
|
|
233
|
+
*pending_lt_ptr = *bt_left_child(mf, cur_node);
|
|
234
|
+
*pending_gt_ptr = *bt_right_child(mf, cur_node);
|
|
235
|
+
return lz_matchptr;
|
|
236
|
+
}
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
if (matchptr[len] < in_next[len]) {
|
|
241
|
+
*pending_lt_ptr = cur_node;
|
|
242
|
+
pending_lt_ptr = bt_right_child(mf, cur_node);
|
|
243
|
+
cur_node = *pending_lt_ptr;
|
|
244
|
+
best_lt_len = len;
|
|
245
|
+
if (best_gt_len < len)
|
|
246
|
+
len = best_gt_len;
|
|
247
|
+
} else {
|
|
248
|
+
*pending_gt_ptr = cur_node;
|
|
249
|
+
pending_gt_ptr = bt_left_child(mf, cur_node);
|
|
250
|
+
cur_node = *pending_gt_ptr;
|
|
251
|
+
best_gt_len = len;
|
|
252
|
+
if (best_lt_len < len)
|
|
253
|
+
len = best_lt_len;
|
|
254
|
+
}
|
|
255
|
+
|
|
256
|
+
if (cur_node <= cutoff || !--depth_remaining) {
|
|
257
|
+
*pending_lt_ptr = MATCHFINDER_INITVAL;
|
|
258
|
+
*pending_gt_ptr = MATCHFINDER_INITVAL;
|
|
259
|
+
return lz_matchptr;
|
|
260
|
+
}
|
|
261
|
+
}
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
/*
|
|
265
|
+
* Retrieve a list of matches with the current position.
|
|
266
|
+
*
|
|
267
|
+
* @mf
|
|
268
|
+
* The matchfinder structure.
|
|
269
|
+
* @in_base
|
|
270
|
+
* Pointer to the next byte in the input buffer to process _at the last
|
|
271
|
+
* time bt_matchfinder_init() or bt_matchfinder_slide_window() was called_.
|
|
272
|
+
* @cur_pos
|
|
273
|
+
* The current position in the input buffer relative to @in_base (the
|
|
274
|
+
* position of the sequence being matched against).
|
|
275
|
+
* @max_len
|
|
276
|
+
* The maximum permissible match length at this position. Must be >=
|
|
277
|
+
* BT_MATCHFINDER_REQUIRED_NBYTES.
|
|
278
|
+
* @nice_len
|
|
279
|
+
* Stop searching if a match of at least this length is found.
|
|
280
|
+
* Must be <= @max_len.
|
|
281
|
+
* @max_search_depth
|
|
282
|
+
* Limit on the number of potential matches to consider. Must be >= 1.
|
|
283
|
+
* @next_hashes
|
|
284
|
+
* The precomputed hash codes for the sequence beginning at @in_next.
|
|
285
|
+
* These will be used and then updated with the precomputed hashcodes for
|
|
286
|
+
* the sequence beginning at @in_next + 1.
|
|
287
|
+
* @lz_matchptr
|
|
288
|
+
* An array in which this function will record the matches. The recorded
|
|
289
|
+
* matches will be sorted by strictly increasing length and (non-strictly)
|
|
290
|
+
* increasing offset. The maximum number of matches that may be found is
|
|
291
|
+
* 'nice_len - 2'.
|
|
292
|
+
*
|
|
293
|
+
* The return value is a pointer to the next available slot in the @lz_matchptr
|
|
294
|
+
* array. (If no matches were found, this will be the same as @lz_matchptr.)
|
|
295
|
+
*/
|
|
296
|
+
static forceinline struct lz_match *
|
|
297
|
+
bt_matchfinder_get_matches(struct bt_matchfinder *mf,
|
|
298
|
+
const u8 *in_base,
|
|
299
|
+
ptrdiff_t cur_pos,
|
|
300
|
+
u32 max_len,
|
|
301
|
+
u32 nice_len,
|
|
302
|
+
u32 max_search_depth,
|
|
303
|
+
u32 next_hashes[2],
|
|
304
|
+
struct lz_match *lz_matchptr)
|
|
305
|
+
{
|
|
306
|
+
return bt_matchfinder_advance_one_byte(mf,
|
|
307
|
+
in_base,
|
|
308
|
+
cur_pos,
|
|
309
|
+
max_len,
|
|
310
|
+
nice_len,
|
|
311
|
+
max_search_depth,
|
|
312
|
+
next_hashes,
|
|
313
|
+
lz_matchptr,
|
|
314
|
+
true);
|
|
315
|
+
}
|
|
316
|
+
|
|
317
|
+
/*
|
|
318
|
+
* Advance the matchfinder, but don't record any matches.
|
|
319
|
+
*
|
|
320
|
+
* This is very similar to bt_matchfinder_get_matches() because both functions
|
|
321
|
+
* must do hashing and tree re-rooting.
|
|
322
|
+
*/
|
|
323
|
+
static forceinline void
|
|
324
|
+
bt_matchfinder_skip_byte(struct bt_matchfinder *mf,
|
|
325
|
+
const u8 *in_base,
|
|
326
|
+
ptrdiff_t cur_pos,
|
|
327
|
+
u32 nice_len,
|
|
328
|
+
u32 max_search_depth,
|
|
329
|
+
u32 next_hashes[2])
|
|
330
|
+
{
|
|
331
|
+
bt_matchfinder_advance_one_byte(mf,
|
|
332
|
+
in_base,
|
|
333
|
+
cur_pos,
|
|
334
|
+
nice_len,
|
|
335
|
+
nice_len,
|
|
336
|
+
max_search_depth,
|
|
337
|
+
next_hashes,
|
|
338
|
+
NULL,
|
|
339
|
+
false);
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
#endif /* LIB_BT_MATCHFINDER_H */
|
|
@@ -0,0 +1,93 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* cpu_features_common.h - code shared by all lib/$arch/cpu_features.c
|
|
3
|
+
*
|
|
4
|
+
* Copyright 2020 Eric Biggers
|
|
5
|
+
*
|
|
6
|
+
* Permission is hereby granted, free of charge, to any person
|
|
7
|
+
* obtaining a copy of this software and associated documentation
|
|
8
|
+
* files (the "Software"), to deal in the Software without
|
|
9
|
+
* restriction, including without limitation the rights to use,
|
|
10
|
+
* copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
11
|
+
* copies of the Software, and to permit persons to whom the
|
|
12
|
+
* Software is furnished to do so, subject to the following
|
|
13
|
+
* conditions:
|
|
14
|
+
*
|
|
15
|
+
* The above copyright notice and this permission notice shall be
|
|
16
|
+
* included in all copies or substantial portions of the Software.
|
|
17
|
+
*
|
|
18
|
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
19
|
+
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
|
20
|
+
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
21
|
+
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
|
22
|
+
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
|
23
|
+
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
24
|
+
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
25
|
+
* OTHER DEALINGS IN THE SOFTWARE.
|
|
26
|
+
*/
|
|
27
|
+
|
|
28
|
+
#ifndef LIB_CPU_FEATURES_COMMON_H
|
|
29
|
+
#define LIB_CPU_FEATURES_COMMON_H
|
|
30
|
+
|
|
31
|
+
#if defined(TEST_SUPPORT__DO_NOT_USE) && !defined(FREESTANDING)
|
|
32
|
+
/* for strdup() and strtok_r() */
|
|
33
|
+
# undef _ANSI_SOURCE
|
|
34
|
+
# ifndef __APPLE__
|
|
35
|
+
# undef _GNU_SOURCE
|
|
36
|
+
# define _GNU_SOURCE
|
|
37
|
+
# endif
|
|
38
|
+
# include <stdio.h>
|
|
39
|
+
# include <stdlib.h>
|
|
40
|
+
# include <string.h>
|
|
41
|
+
#endif
|
|
42
|
+
|
|
43
|
+
#include "lib_common.h"
|
|
44
|
+
|
|
45
|
+
struct cpu_feature {
|
|
46
|
+
u32 bit;
|
|
47
|
+
const char *name;
|
|
48
|
+
};
|
|
49
|
+
|
|
50
|
+
#if defined(TEST_SUPPORT__DO_NOT_USE) && !defined(FREESTANDING)
|
|
51
|
+
/* Disable any features that are listed in $LIBDEFLATE_DISABLE_CPU_FEATURES. */
|
|
52
|
+
static inline void
|
|
53
|
+
disable_cpu_features_for_testing(u32 *features,
|
|
54
|
+
const struct cpu_feature *feature_table,
|
|
55
|
+
size_t feature_table_length)
|
|
56
|
+
{
|
|
57
|
+
char *env_value, *strbuf, *p, *saveptr = NULL;
|
|
58
|
+
size_t i;
|
|
59
|
+
|
|
60
|
+
env_value = getenv("LIBDEFLATE_DISABLE_CPU_FEATURES");
|
|
61
|
+
if (!env_value)
|
|
62
|
+
return;
|
|
63
|
+
strbuf = strdup(env_value);
|
|
64
|
+
if (!strbuf)
|
|
65
|
+
abort();
|
|
66
|
+
p = strtok_r(strbuf, ",", &saveptr);
|
|
67
|
+
while (p) {
|
|
68
|
+
for (i = 0; i < feature_table_length; i++) {
|
|
69
|
+
if (strcmp(p, feature_table[i].name) == 0) {
|
|
70
|
+
*features &= ~feature_table[i].bit;
|
|
71
|
+
break;
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
if (i == feature_table_length) {
|
|
75
|
+
fprintf(stderr,
|
|
76
|
+
"unrecognized feature in LIBDEFLATE_DISABLE_CPU_FEATURES: \"%s\"\n",
|
|
77
|
+
p);
|
|
78
|
+
abort();
|
|
79
|
+
}
|
|
80
|
+
p = strtok_r(NULL, ",", &saveptr);
|
|
81
|
+
}
|
|
82
|
+
free(strbuf);
|
|
83
|
+
}
|
|
84
|
+
#else /* TEST_SUPPORT__DO_NOT_USE */
|
|
85
|
+
static inline void
|
|
86
|
+
disable_cpu_features_for_testing(u32 *features,
|
|
87
|
+
const struct cpu_feature *feature_table,
|
|
88
|
+
size_t feature_table_length)
|
|
89
|
+
{
|
|
90
|
+
}
|
|
91
|
+
#endif /* !TEST_SUPPORT__DO_NOT_USE */
|
|
92
|
+
|
|
93
|
+
#endif /* LIB_CPU_FEATURES_COMMON_H */
|
|
@@ -0,0 +1,262 @@
|
|
|
1
|
+
/*
|
|
2
|
+
* crc32.c - CRC-32 checksum algorithm for the gzip format
|
|
3
|
+
*
|
|
4
|
+
* Copyright 2016 Eric Biggers
|
|
5
|
+
*
|
|
6
|
+
* Permission is hereby granted, free of charge, to any person
|
|
7
|
+
* obtaining a copy of this software and associated documentation
|
|
8
|
+
* files (the "Software"), to deal in the Software without
|
|
9
|
+
* restriction, including without limitation the rights to use,
|
|
10
|
+
* copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
11
|
+
* copies of the Software, and to permit persons to whom the
|
|
12
|
+
* Software is furnished to do so, subject to the following
|
|
13
|
+
* conditions:
|
|
14
|
+
*
|
|
15
|
+
* The above copyright notice and this permission notice shall be
|
|
16
|
+
* included in all copies or substantial portions of the Software.
|
|
17
|
+
*
|
|
18
|
+
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
19
|
+
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
|
20
|
+
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
21
|
+
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
|
22
|
+
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
|
23
|
+
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
|
24
|
+
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
25
|
+
* OTHER DEALINGS IN THE SOFTWARE.
|
|
26
|
+
*/
|
|
27
|
+
|
|
28
|
+
/*
|
|
29
|
+
* High-level description of CRC
|
|
30
|
+
* =============================
|
|
31
|
+
*
|
|
32
|
+
* Consider a bit sequence 'bits[1...len]'. Interpret 'bits' as the "message"
|
|
33
|
+
* polynomial M(x) with coefficients in GF(2) (the field of integers modulo 2),
|
|
34
|
+
* where the coefficient of 'x^i' is 'bits[len - i]'. Then, compute:
|
|
35
|
+
*
|
|
36
|
+
* R(x) = M(x)*x^n mod G(x)
|
|
37
|
+
*
|
|
38
|
+
* where G(x) is a selected "generator" polynomial of degree 'n'. The remainder
|
|
39
|
+
* R(x) is a polynomial of max degree 'n - 1'. The CRC of 'bits' is R(x)
|
|
40
|
+
* interpreted as a bitstring of length 'n'.
|
|
41
|
+
*
|
|
42
|
+
* CRC used in gzip
|
|
43
|
+
* ================
|
|
44
|
+
*
|
|
45
|
+
* In the gzip format (RFC 1952):
|
|
46
|
+
*
|
|
47
|
+
* - The bitstring to checksum is formed from the bytes of the uncompressed
|
|
48
|
+
* data by concatenating the bits from the bytes in order, proceeding
|
|
49
|
+
* from the low-order bit to the high-order bit within each byte.
|
|
50
|
+
*
|
|
51
|
+
* - The generator polynomial G(x) is: x^32 + x^26 + x^23 + x^22 + x^16 +
|
|
52
|
+
* x^12 + x^11 + x^10 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1.
|
|
53
|
+
* Consequently, the CRC length is 32 bits ("CRC-32").
|
|
54
|
+
*
|
|
55
|
+
* - The highest order 32 coefficients of M(x)*x^n are inverted.
|
|
56
|
+
*
|
|
57
|
+
* - All 32 coefficients of R(x) are inverted.
|
|
58
|
+
*
|
|
59
|
+
* The two inversions cause added leading and trailing zero bits to affect the
|
|
60
|
+
* resulting CRC, whereas with a regular CRC such bits would have no effect on
|
|
61
|
+
* the CRC.
|
|
62
|
+
*
|
|
63
|
+
* Computation and optimizations
|
|
64
|
+
* =============================
|
|
65
|
+
*
|
|
66
|
+
* We can compute R(x) through "long division", maintaining only 32 bits of
|
|
67
|
+
* state at any given time. Multiplication by 'x' can be implemented as
|
|
68
|
+
* right-shifting by 1 (assuming the polynomial<=>bitstring mapping where the
|
|
69
|
+
* highest order bit represents the coefficient of x^0), and both addition and
|
|
70
|
+
* subtraction can be implemented as bitwise exclusive OR (since we are working
|
|
71
|
+
* in GF(2)). Here is an unoptimized implementation:
|
|
72
|
+
*
|
|
73
|
+
* static u32 crc32_gzip(const u8 *p, size_t len)
|
|
74
|
+
* {
|
|
75
|
+
* u32 crc = 0;
|
|
76
|
+
* const u32 divisor = 0xEDB88320;
|
|
77
|
+
*
|
|
78
|
+
* for (size_t i = 0; i < len * 8 + 32; i++) {
|
|
79
|
+
* int bit;
|
|
80
|
+
* u32 multiple;
|
|
81
|
+
*
|
|
82
|
+
* if (i < len * 8)
|
|
83
|
+
* bit = (p[i / 8] >> (i % 8)) & 1;
|
|
84
|
+
* else
|
|
85
|
+
* bit = 0; // one of the 32 appended 0 bits
|
|
86
|
+
*
|
|
87
|
+
* if (i < 32) // the first 32 bits are inverted
|
|
88
|
+
* bit ^= 1;
|
|
89
|
+
*
|
|
90
|
+
* if (crc & 1)
|
|
91
|
+
* multiple = divisor;
|
|
92
|
+
* else
|
|
93
|
+
* multiple = 0;
|
|
94
|
+
*
|
|
95
|
+
* crc >>= 1;
|
|
96
|
+
* crc |= (u32)bit << 31;
|
|
97
|
+
* crc ^= multiple;
|
|
98
|
+
* }
|
|
99
|
+
*
|
|
100
|
+
* return ~crc;
|
|
101
|
+
* }
|
|
102
|
+
*
|
|
103
|
+
* In this implementation, the 32-bit integer 'crc' maintains the remainder of
|
|
104
|
+
* the currently processed portion of the message (with 32 zero bits appended)
|
|
105
|
+
* when divided by the generator polynomial. 'crc' is the representation of
|
|
106
|
+
* R(x), and 'divisor' is the representation of G(x) excluding the x^32
|
|
107
|
+
* coefficient. For each bit to process, we multiply R(x) by 'x^1', then add
|
|
108
|
+
* 'x^0' if the new bit is a 1. If this causes R(x) to gain a nonzero x^32
|
|
109
|
+
* term, then we subtract G(x) from R(x).
|
|
110
|
+
*
|
|
111
|
+
* We can speed this up by taking advantage of the fact that XOR is commutative
|
|
112
|
+
* and associative, so the order in which we combine the inputs into 'crc' is
|
|
113
|
+
* unimportant. And since each message bit we add doesn't affect the choice of
|
|
114
|
+
* 'multiple' until 32 bits later, we need not actually add each message bit
|
|
115
|
+
* until that point:
|
|
116
|
+
*
|
|
117
|
+
* static u32 crc32_gzip(const u8 *p, size_t len)
|
|
118
|
+
* {
|
|
119
|
+
* u32 crc = ~0;
|
|
120
|
+
* const u32 divisor = 0xEDB88320;
|
|
121
|
+
*
|
|
122
|
+
* for (size_t i = 0; i < len * 8; i++) {
|
|
123
|
+
* int bit;
|
|
124
|
+
* u32 multiple;
|
|
125
|
+
*
|
|
126
|
+
* bit = (p[i / 8] >> (i % 8)) & 1;
|
|
127
|
+
* crc ^= bit;
|
|
128
|
+
* if (crc & 1)
|
|
129
|
+
* multiple = divisor;
|
|
130
|
+
* else
|
|
131
|
+
* multiple = 0;
|
|
132
|
+
* crc >>= 1;
|
|
133
|
+
* crc ^= multiple;
|
|
134
|
+
* }
|
|
135
|
+
*
|
|
136
|
+
* return ~crc;
|
|
137
|
+
* }
|
|
138
|
+
*
|
|
139
|
+
* With the above implementation we get the effect of 32 appended 0 bits for
|
|
140
|
+
* free; they never affect the choice of a divisor, nor would they change the
|
|
141
|
+
* value of 'crc' if they were to be actually XOR'ed in. And by starting with a
|
|
142
|
+
* remainder of all 1 bits, we get the effect of complementing the first 32
|
|
143
|
+
* message bits.
|
|
144
|
+
*
|
|
145
|
+
* The next optimization is to process the input in multi-bit units. Suppose
|
|
146
|
+
* that we insert the next 'n' message bits into the remainder. Then we get an
|
|
147
|
+
* intermediate remainder of length '32 + n' bits, and the CRC of the extra 'n'
|
|
148
|
+
* bits is the amount by which the low 32 bits of the remainder will change as a
|
|
149
|
+
* result of cancelling out those 'n' bits. Taking n=8 (one byte) and
|
|
150
|
+
* precomputing a table containing the CRC of each possible byte, we get
|
|
151
|
+
* crc32_slice1() defined below.
|
|
152
|
+
*
|
|
153
|
+
* As a further optimization, we could increase the multi-bit unit size to 16.
|
|
154
|
+
* However, that is inefficient because the table size explodes from 256 entries
|
|
155
|
+
* (1024 bytes) to 65536 entries (262144 bytes), which wastes memory and won't
|
|
156
|
+
* fit in L1 cache on typical processors.
|
|
157
|
+
*
|
|
158
|
+
* However, we can actually process 4 bytes at a time using 4 different tables
|
|
159
|
+
* with 256 entries each. Logically, we form a 64-bit intermediate remainder
|
|
160
|
+
* and cancel out the high 32 bits in 8-bit chunks. Bits 32-39 are cancelled
|
|
161
|
+
* out by the CRC of those bits, whereas bits 40-47 are be cancelled out by the
|
|
162
|
+
* CRC of those bits with 8 zero bits appended, and so on.
|
|
163
|
+
*
|
|
164
|
+
* In crc32_slice8(), this method is extended to 8 bytes at a time. The
|
|
165
|
+
* intermediate remainder (which we never actually store explicitly) is 96 bits.
|
|
166
|
+
*
|
|
167
|
+
* On CPUs that support fast carryless multiplication, CRCs can be computed even
|
|
168
|
+
* more quickly via "folding". See e.g. the x86 PCLMUL implementations.
|
|
169
|
+
*/
|
|
170
|
+
|
|
171
|
+
#include "lib_common.h"
|
|
172
|
+
#include "crc32_multipliers.h"
|
|
173
|
+
#include "crc32_tables.h"
|
|
174
|
+
|
|
175
|
+
/* This is the default implementation. It uses the slice-by-8 method. */
|
|
176
|
+
static u32 MAYBE_UNUSED
|
|
177
|
+
crc32_slice8(u32 crc, const u8 *p, size_t len)
|
|
178
|
+
{
|
|
179
|
+
const u8 * const end = p + len;
|
|
180
|
+
const u8 *end64;
|
|
181
|
+
|
|
182
|
+
for (; ((uintptr_t)p & 7) && p != end; p++)
|
|
183
|
+
crc = (crc >> 8) ^ crc32_slice8_table[(u8)crc ^ *p];
|
|
184
|
+
|
|
185
|
+
end64 = p + ((end - p) & ~7);
|
|
186
|
+
for (; p != end64; p += 8) {
|
|
187
|
+
u32 v1 = le32_bswap(*(const u32 *)(p + 0));
|
|
188
|
+
u32 v2 = le32_bswap(*(const u32 *)(p + 4));
|
|
189
|
+
|
|
190
|
+
crc = crc32_slice8_table[0x700 + (u8)((crc ^ v1) >> 0)] ^
|
|
191
|
+
crc32_slice8_table[0x600 + (u8)((crc ^ v1) >> 8)] ^
|
|
192
|
+
crc32_slice8_table[0x500 + (u8)((crc ^ v1) >> 16)] ^
|
|
193
|
+
crc32_slice8_table[0x400 + (u8)((crc ^ v1) >> 24)] ^
|
|
194
|
+
crc32_slice8_table[0x300 + (u8)(v2 >> 0)] ^
|
|
195
|
+
crc32_slice8_table[0x200 + (u8)(v2 >> 8)] ^
|
|
196
|
+
crc32_slice8_table[0x100 + (u8)(v2 >> 16)] ^
|
|
197
|
+
crc32_slice8_table[0x000 + (u8)(v2 >> 24)];
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
for (; p != end; p++)
|
|
201
|
+
crc = (crc >> 8) ^ crc32_slice8_table[(u8)crc ^ *p];
|
|
202
|
+
|
|
203
|
+
return crc;
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
/*
|
|
207
|
+
* This is a more lightweight generic implementation, which can be used as a
|
|
208
|
+
* subroutine by architecture-specific implementations to process small amounts
|
|
209
|
+
* of unaligned data at the beginning and/or end of the buffer.
|
|
210
|
+
*/
|
|
211
|
+
static forceinline u32 MAYBE_UNUSED
|
|
212
|
+
crc32_slice1(u32 crc, const u8 *p, size_t len)
|
|
213
|
+
{
|
|
214
|
+
size_t i;
|
|
215
|
+
|
|
216
|
+
for (i = 0; i < len; i++)
|
|
217
|
+
crc = (crc >> 8) ^ crc32_slice1_table[(u8)crc ^ p[i]];
|
|
218
|
+
return crc;
|
|
219
|
+
}
|
|
220
|
+
|
|
221
|
+
/* Include architecture-specific implementation(s) if available. */
|
|
222
|
+
#undef DEFAULT_IMPL
|
|
223
|
+
#undef arch_select_crc32_func
|
|
224
|
+
typedef u32 (*crc32_func_t)(u32 crc, const u8 *p, size_t len);
|
|
225
|
+
#if defined(ARCH_ARM32) || defined(ARCH_ARM64)
|
|
226
|
+
# include "arm/crc32_impl.h"
|
|
227
|
+
#elif defined(ARCH_X86_32) || defined(ARCH_X86_64)
|
|
228
|
+
# include "x86/crc32_impl.h"
|
|
229
|
+
#endif
|
|
230
|
+
|
|
231
|
+
#ifndef DEFAULT_IMPL
|
|
232
|
+
# define DEFAULT_IMPL crc32_slice8
|
|
233
|
+
#endif
|
|
234
|
+
|
|
235
|
+
#ifdef arch_select_crc32_func
|
|
236
|
+
static u32 dispatch_crc32(u32 crc, const u8 *p, size_t len);
|
|
237
|
+
|
|
238
|
+
static volatile crc32_func_t crc32_impl = dispatch_crc32;
|
|
239
|
+
|
|
240
|
+
/* Choose the best implementation at runtime. */
|
|
241
|
+
static u32 dispatch_crc32(u32 crc, const u8 *p, size_t len)
|
|
242
|
+
{
|
|
243
|
+
crc32_func_t f = arch_select_crc32_func();
|
|
244
|
+
|
|
245
|
+
if (f == NULL)
|
|
246
|
+
f = DEFAULT_IMPL;
|
|
247
|
+
|
|
248
|
+
crc32_impl = f;
|
|
249
|
+
return f(crc, p, len);
|
|
250
|
+
}
|
|
251
|
+
#else
|
|
252
|
+
/* The best implementation is statically known, so call it directly. */
|
|
253
|
+
#define crc32_impl DEFAULT_IMPL
|
|
254
|
+
#endif
|
|
255
|
+
|
|
256
|
+
LIBDEFLATEAPI u32
|
|
257
|
+
libdeflate_crc32(u32 crc, const void *p, size_t len)
|
|
258
|
+
{
|
|
259
|
+
if (p == NULL) /* Return initial value. */
|
|
260
|
+
return 0;
|
|
261
|
+
return ~crc32_impl(~crc, p, len);
|
|
262
|
+
}
|