deflate-ruby 1.0.1 → 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (138) hide show
  1. checksums.yaml +4 -4
  2. data/CLAUDE.md +95 -92
  3. data/LICENSE.txt +6 -6
  4. data/README.md +87 -65
  5. data/Rakefile +23 -0
  6. data/ext/deflate_ruby/{libdeflate/lib/x86/adler32_impl.h → adler32_impl.h} +8 -7
  7. data/ext/deflate_ruby/common_defs.h +748 -0
  8. data/ext/deflate_ruby/{libdeflate/lib/x86/cpu_features.c → cpu_features.c} +46 -16
  9. data/ext/deflate_ruby/{libdeflate/lib/x86/cpu_features.h → cpu_features.h} +2 -1
  10. data/ext/deflate_ruby/{libdeflate/lib/x86/crc32_impl.h → crc32_impl.h} +22 -23
  11. data/ext/deflate_ruby/{libdeflate/lib/crc32_multipliers.h → crc32_multipliers.h} +2 -4
  12. data/ext/deflate_ruby/{libdeflate/lib/x86/crc32_pclmul_template.h → crc32_pclmul_template.h} +23 -94
  13. data/ext/deflate_ruby/{libdeflate/lib/crc32_tables.h → crc32_tables.h} +1 -1
  14. data/ext/deflate_ruby/{libdeflate/lib/deflate_compress.c → deflate_compress.c} +59 -60
  15. data/ext/deflate_ruby/deflate_ruby.c +392 -218
  16. data/ext/deflate_ruby/deflate_ruby.h +6 -0
  17. data/ext/deflate_ruby/extconf.rb +35 -25
  18. data/ext/deflate_ruby/libdeflate/adler32.c +162 -0
  19. data/ext/deflate_ruby/libdeflate/{lib/arm → arm}/adler32_impl.h +14 -7
  20. data/ext/deflate_ruby/libdeflate/{lib/arm → arm}/crc32_impl.h +25 -31
  21. data/ext/deflate_ruby/libdeflate/arm/crc32_pmull_helpers.h +156 -0
  22. data/ext/deflate_ruby/libdeflate/arm/crc32_pmull_wide.h +226 -0
  23. data/ext/deflate_ruby/libdeflate/bt_matchfinder.h +342 -0
  24. data/ext/deflate_ruby/libdeflate/common_defs.h +2 -1
  25. data/ext/deflate_ruby/libdeflate/cpu_features_common.h +93 -0
  26. data/ext/deflate_ruby/libdeflate/crc32.c +262 -0
  27. data/ext/deflate_ruby/libdeflate/crc32_multipliers.h +375 -0
  28. data/ext/deflate_ruby/libdeflate/crc32_tables.h +587 -0
  29. data/ext/deflate_ruby/libdeflate/decompress_template.h +777 -0
  30. data/ext/deflate_ruby/libdeflate/deflate_compress.c +4128 -0
  31. data/ext/deflate_ruby/libdeflate/deflate_compress.h +15 -0
  32. data/ext/deflate_ruby/libdeflate/deflate_constants.h +56 -0
  33. data/ext/deflate_ruby/libdeflate/deflate_decompress.c +1208 -0
  34. data/ext/deflate_ruby/libdeflate/gzip_compress.c +90 -0
  35. data/ext/deflate_ruby/libdeflate/gzip_constants.h +45 -0
  36. data/ext/deflate_ruby/libdeflate/gzip_decompress.c +144 -0
  37. data/ext/deflate_ruby/libdeflate/hc_matchfinder.h +401 -0
  38. data/ext/deflate_ruby/libdeflate/ht_matchfinder.h +234 -0
  39. data/ext/deflate_ruby/libdeflate/lib_common.h +106 -0
  40. data/ext/deflate_ruby/libdeflate/libdeflate.h +2 -2
  41. data/ext/deflate_ruby/libdeflate/{lib/matchfinder_common.h → matchfinder_common.h} +3 -3
  42. data/ext/deflate_ruby/libdeflate/x86/adler32_impl.h +135 -0
  43. data/ext/deflate_ruby/libdeflate/x86/adler32_template.h +518 -0
  44. data/ext/deflate_ruby/libdeflate/x86/cpu_features.c +213 -0
  45. data/ext/deflate_ruby/libdeflate/x86/cpu_features.h +170 -0
  46. data/ext/deflate_ruby/libdeflate/x86/crc32_impl.h +159 -0
  47. data/ext/deflate_ruby/libdeflate/x86/crc32_pclmul_template.h +424 -0
  48. data/ext/deflate_ruby/libdeflate/x86/decompress_impl.h +57 -0
  49. data/ext/deflate_ruby/libdeflate.h +411 -0
  50. data/ext/deflate_ruby/matchfinder_common.h +224 -0
  51. data/ext/deflate_ruby/matchfinder_impl.h +122 -0
  52. data/ext/deflate_ruby/utils.c +141 -0
  53. data/ext/deflate_ruby/zlib_compress.c +82 -0
  54. data/ext/deflate_ruby/zlib_constants.h +21 -0
  55. data/ext/deflate_ruby/zlib_decompress.c +104 -0
  56. data/lib/deflate_ruby/version.rb +1 -1
  57. data/lib/deflate_ruby.rb +1 -63
  58. data/sig/deflate_ruby.rbs +4 -0
  59. data/test/test_deflate_ruby.rb +220 -0
  60. data/test/test_helper.rb +6 -0
  61. metadata +89 -144
  62. data/ext/deflate_ruby/libdeflate/CMakeLists.txt +0 -270
  63. data/ext/deflate_ruby/libdeflate/NEWS.md +0 -494
  64. data/ext/deflate_ruby/libdeflate/README.md +0 -228
  65. data/ext/deflate_ruby/libdeflate/libdeflate-config.cmake.in +0 -3
  66. data/ext/deflate_ruby/libdeflate/libdeflate.pc.in +0 -18
  67. data/ext/deflate_ruby/libdeflate/programs/CMakeLists.txt +0 -105
  68. data/ext/deflate_ruby/libdeflate/programs/benchmark.c +0 -696
  69. data/ext/deflate_ruby/libdeflate/programs/checksum.c +0 -218
  70. data/ext/deflate_ruby/libdeflate/programs/config.h.in +0 -19
  71. data/ext/deflate_ruby/libdeflate/programs/gzip.c +0 -688
  72. data/ext/deflate_ruby/libdeflate/programs/prog_util.c +0 -521
  73. data/ext/deflate_ruby/libdeflate/programs/prog_util.h +0 -225
  74. data/ext/deflate_ruby/libdeflate/programs/test_checksums.c +0 -200
  75. data/ext/deflate_ruby/libdeflate/programs/test_custom_malloc.c +0 -155
  76. data/ext/deflate_ruby/libdeflate/programs/test_incomplete_codes.c +0 -385
  77. data/ext/deflate_ruby/libdeflate/programs/test_invalid_streams.c +0 -130
  78. data/ext/deflate_ruby/libdeflate/programs/test_litrunlen_overflow.c +0 -72
  79. data/ext/deflate_ruby/libdeflate/programs/test_overread.c +0 -95
  80. data/ext/deflate_ruby/libdeflate/programs/test_slow_decompression.c +0 -472
  81. data/ext/deflate_ruby/libdeflate/programs/test_trailing_bytes.c +0 -151
  82. data/ext/deflate_ruby/libdeflate/programs/test_util.c +0 -237
  83. data/ext/deflate_ruby/libdeflate/programs/test_util.h +0 -61
  84. data/ext/deflate_ruby/libdeflate/programs/tgetopt.c +0 -118
  85. data/ext/deflate_ruby/libdeflate/scripts/android_build.sh +0 -118
  86. data/ext/deflate_ruby/libdeflate/scripts/android_tests.sh +0 -69
  87. data/ext/deflate_ruby/libdeflate/scripts/benchmark.sh +0 -10
  88. data/ext/deflate_ruby/libdeflate/scripts/checksum.sh +0 -10
  89. data/ext/deflate_ruby/libdeflate/scripts/checksum_benchmarks.sh +0 -253
  90. data/ext/deflate_ruby/libdeflate/scripts/cmake-helper.sh +0 -17
  91. data/ext/deflate_ruby/libdeflate/scripts/deflate_benchmarks.sh +0 -119
  92. data/ext/deflate_ruby/libdeflate/scripts/exec_tests.sh +0 -38
  93. data/ext/deflate_ruby/libdeflate/scripts/gen-release-archives.sh +0 -37
  94. data/ext/deflate_ruby/libdeflate/scripts/gen_bitreverse_tab.py +0 -19
  95. data/ext/deflate_ruby/libdeflate/scripts/gen_crc32_multipliers.c +0 -199
  96. data/ext/deflate_ruby/libdeflate/scripts/gen_crc32_tables.c +0 -105
  97. data/ext/deflate_ruby/libdeflate/scripts/gen_default_litlen_costs.py +0 -44
  98. data/ext/deflate_ruby/libdeflate/scripts/gen_offset_slot_map.py +0 -29
  99. data/ext/deflate_ruby/libdeflate/scripts/gzip_tests.sh +0 -523
  100. data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/deflate_compress/corpus/0 +0 -0
  101. data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/deflate_compress/fuzz.c +0 -95
  102. data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/deflate_decompress/corpus/0 +0 -3
  103. data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/deflate_decompress/fuzz.c +0 -62
  104. data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/fuzz.sh +0 -108
  105. data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/gzip_decompress/corpus/0 +0 -0
  106. data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/gzip_decompress/fuzz.c +0 -19
  107. data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/zlib_decompress/corpus/0 +0 -3
  108. data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/zlib_decompress/fuzz.c +0 -19
  109. data/ext/deflate_ruby/libdeflate/scripts/run_tests.sh +0 -416
  110. data/ext/deflate_ruby/libdeflate/scripts/toolchain-i686-w64-mingw32.cmake +0 -8
  111. data/ext/deflate_ruby/libdeflate/scripts/toolchain-x86_64-w64-mingw32.cmake +0 -8
  112. /data/ext/deflate_ruby/{libdeflate/lib/adler32.c → adler32.c} +0 -0
  113. /data/ext/deflate_ruby/{libdeflate/lib/x86/adler32_template.h → adler32_template.h} +0 -0
  114. /data/ext/deflate_ruby/{libdeflate/lib/bt_matchfinder.h → bt_matchfinder.h} +0 -0
  115. /data/ext/deflate_ruby/{libdeflate/lib/cpu_features_common.h → cpu_features_common.h} +0 -0
  116. /data/ext/deflate_ruby/{libdeflate/lib/crc32.c → crc32.c} +0 -0
  117. /data/ext/deflate_ruby/{libdeflate/lib/arm/crc32_pmull_helpers.h → crc32_pmull_helpers.h} +0 -0
  118. /data/ext/deflate_ruby/{libdeflate/lib/arm/crc32_pmull_wide.h → crc32_pmull_wide.h} +0 -0
  119. /data/ext/deflate_ruby/{libdeflate/lib/x86/decompress_impl.h → decompress_impl.h} +0 -0
  120. /data/ext/deflate_ruby/{libdeflate/lib/decompress_template.h → decompress_template.h} +0 -0
  121. /data/ext/deflate_ruby/{libdeflate/lib/deflate_compress.h → deflate_compress.h} +0 -0
  122. /data/ext/deflate_ruby/{libdeflate/lib/deflate_constants.h → deflate_constants.h} +0 -0
  123. /data/ext/deflate_ruby/{libdeflate/lib/deflate_decompress.c → deflate_decompress.c} +0 -0
  124. /data/ext/deflate_ruby/{libdeflate/lib/gzip_compress.c → gzip_compress.c} +0 -0
  125. /data/ext/deflate_ruby/{libdeflate/lib/gzip_constants.h → gzip_constants.h} +0 -0
  126. /data/ext/deflate_ruby/{libdeflate/lib/gzip_decompress.c → gzip_decompress.c} +0 -0
  127. /data/ext/deflate_ruby/{libdeflate/lib/hc_matchfinder.h → hc_matchfinder.h} +0 -0
  128. /data/ext/deflate_ruby/{libdeflate/lib/ht_matchfinder.h → ht_matchfinder.h} +0 -0
  129. /data/ext/deflate_ruby/{libdeflate/lib/lib_common.h → lib_common.h} +0 -0
  130. /data/ext/deflate_ruby/libdeflate/{lib/arm → arm}/cpu_features.c +0 -0
  131. /data/ext/deflate_ruby/libdeflate/{lib/arm → arm}/cpu_features.h +0 -0
  132. /data/ext/deflate_ruby/libdeflate/{lib/arm → arm}/matchfinder_impl.h +0 -0
  133. /data/ext/deflate_ruby/libdeflate/{lib/riscv → riscv}/matchfinder_impl.h +0 -0
  134. /data/ext/deflate_ruby/libdeflate/{lib/utils.c → utils.c} +0 -0
  135. /data/ext/deflate_ruby/libdeflate/{lib/x86 → x86}/matchfinder_impl.h +0 -0
  136. /data/ext/deflate_ruby/libdeflate/{lib/zlib_compress.c → zlib_compress.c} +0 -0
  137. /data/ext/deflate_ruby/libdeflate/{lib/zlib_constants.h → zlib_constants.h} +0 -0
  138. /data/ext/deflate_ruby/libdeflate/{lib/zlib_decompress.c → zlib_decompress.c} +0 -0
@@ -0,0 +1,226 @@
1
+ /*
2
+ * arm/crc32_pmull_wide.h - gzip CRC-32 with PMULL (extra-wide version)
3
+ *
4
+ * Copyright 2022 Eric Biggers
5
+ *
6
+ * Permission is hereby granted, free of charge, to any person
7
+ * obtaining a copy of this software and associated documentation
8
+ * files (the "Software"), to deal in the Software without
9
+ * restriction, including without limitation the rights to use,
10
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
11
+ * copies of the Software, and to permit persons to whom the
12
+ * Software is furnished to do so, subject to the following
13
+ * conditions:
14
+ *
15
+ * The above copyright notice and this permission notice shall be
16
+ * included in all copies or substantial portions of the Software.
17
+ *
18
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
20
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
21
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
22
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
23
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25
+ * OTHER DEALINGS IN THE SOFTWARE.
26
+ */
27
+
28
+ /*
29
+ * This file is a "template" for instantiating PMULL-based crc32_arm functions.
30
+ * The "parameters" are:
31
+ *
32
+ * SUFFIX:
33
+ * Name suffix to append to all instantiated functions.
34
+ * ATTRIBUTES:
35
+ * Target function attributes to use.
36
+ * ENABLE_EOR3:
37
+ * Use the eor3 instruction (from the sha3 extension).
38
+ *
39
+ * This is the extra-wide version; it uses an unusually large stride length of
40
+ * 12, and it assumes that crc32 instructions are available too. It's intended
41
+ * for powerful CPUs that support both pmull and crc32 instructions, but where
42
+ * throughput of pmull and xor (given enough instructions issued in parallel) is
43
+ * significantly higher than that of crc32, thus making the crc32 instructions
44
+ * (counterintuitively) not actually the fastest way to compute the CRC-32. The
45
+ * Apple M1 processor is an example of such a CPU.
46
+ */
47
+
48
+ #include "crc32_pmull_helpers.h"
49
+
50
+ static ATTRIBUTES u32
51
+ ADD_SUFFIX(crc32_arm)(u32 crc, const u8 *p, size_t len)
52
+ {
53
+ uint8x16_t v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11;
54
+
55
+ if (len < 3 * 192) {
56
+ static const u64 _aligned_attribute(16) mults[3][2] = {
57
+ { CRC32_X543_MODG, CRC32_X479_MODG }, /* 4 vecs */
58
+ { CRC32_X287_MODG, CRC32_X223_MODG }, /* 2 vecs */
59
+ { CRC32_X159_MODG, CRC32_X95_MODG }, /* 1 vecs */
60
+ };
61
+ poly64x2_t multipliers_4, multipliers_2, multipliers_1;
62
+
63
+ if (len < 64)
64
+ goto tail;
65
+ multipliers_4 = load_multipliers(mults[0]);
66
+ multipliers_2 = load_multipliers(mults[1]);
67
+ multipliers_1 = load_multipliers(mults[2]);
68
+ /*
69
+ * Short length; don't bother aligning the pointer, and fold
70
+ * 64 bytes (4 vectors) at a time, at most.
71
+ */
72
+ v0 = veorq_u8(vld1q_u8(p + 0), u32_to_bytevec(crc));
73
+ v1 = vld1q_u8(p + 16);
74
+ v2 = vld1q_u8(p + 32);
75
+ v3 = vld1q_u8(p + 48);
76
+ p += 64;
77
+ len -= 64;
78
+ while (len >= 64) {
79
+ v0 = fold_vec(v0, vld1q_u8(p + 0), multipliers_4);
80
+ v1 = fold_vec(v1, vld1q_u8(p + 16), multipliers_4);
81
+ v2 = fold_vec(v2, vld1q_u8(p + 32), multipliers_4);
82
+ v3 = fold_vec(v3, vld1q_u8(p + 48), multipliers_4);
83
+ p += 64;
84
+ len -= 64;
85
+ }
86
+ v0 = fold_vec(v0, v2, multipliers_2);
87
+ v1 = fold_vec(v1, v3, multipliers_2);
88
+ if (len >= 32) {
89
+ v0 = fold_vec(v0, vld1q_u8(p + 0), multipliers_2);
90
+ v1 = fold_vec(v1, vld1q_u8(p + 16), multipliers_2);
91
+ p += 32;
92
+ len -= 32;
93
+ }
94
+ v0 = fold_vec(v0, v1, multipliers_1);
95
+ } else {
96
+ static const u64 _aligned_attribute(16) mults[4][2] = {
97
+ { CRC32_X1567_MODG, CRC32_X1503_MODG }, /* 12 vecs */
98
+ { CRC32_X799_MODG, CRC32_X735_MODG }, /* 6 vecs */
99
+ { CRC32_X415_MODG, CRC32_X351_MODG }, /* 3 vecs */
100
+ { CRC32_X159_MODG, CRC32_X95_MODG }, /* 1 vecs */
101
+ };
102
+ const poly64x2_t multipliers_12 = load_multipliers(mults[0]);
103
+ const poly64x2_t multipliers_6 = load_multipliers(mults[1]);
104
+ const poly64x2_t multipliers_3 = load_multipliers(mults[2]);
105
+ const poly64x2_t multipliers_1 = load_multipliers(mults[3]);
106
+ const size_t align = -(uintptr_t)p & 15;
107
+ const uint8x16_t *vp;
108
+
109
+ /* Align p to the next 16-byte boundary. */
110
+ if (align) {
111
+ if (align & 1)
112
+ crc = __crc32b(crc, *p++);
113
+ if (align & 2) {
114
+ crc = __crc32h(crc, le16_bswap(*(u16 *)p));
115
+ p += 2;
116
+ }
117
+ if (align & 4) {
118
+ crc = __crc32w(crc, le32_bswap(*(u32 *)p));
119
+ p += 4;
120
+ }
121
+ if (align & 8) {
122
+ crc = __crc32d(crc, le64_bswap(*(u64 *)p));
123
+ p += 8;
124
+ }
125
+ len -= align;
126
+ }
127
+ vp = (const uint8x16_t *)p;
128
+ v0 = veorq_u8(*vp++, u32_to_bytevec(crc));
129
+ v1 = *vp++;
130
+ v2 = *vp++;
131
+ v3 = *vp++;
132
+ v4 = *vp++;
133
+ v5 = *vp++;
134
+ v6 = *vp++;
135
+ v7 = *vp++;
136
+ v8 = *vp++;
137
+ v9 = *vp++;
138
+ v10 = *vp++;
139
+ v11 = *vp++;
140
+ len -= 192;
141
+ /* Fold 192 bytes (12 vectors) at a time. */
142
+ do {
143
+ v0 = fold_vec(v0, *vp++, multipliers_12);
144
+ v1 = fold_vec(v1, *vp++, multipliers_12);
145
+ v2 = fold_vec(v2, *vp++, multipliers_12);
146
+ v3 = fold_vec(v3, *vp++, multipliers_12);
147
+ v4 = fold_vec(v4, *vp++, multipliers_12);
148
+ v5 = fold_vec(v5, *vp++, multipliers_12);
149
+ v6 = fold_vec(v6, *vp++, multipliers_12);
150
+ v7 = fold_vec(v7, *vp++, multipliers_12);
151
+ v8 = fold_vec(v8, *vp++, multipliers_12);
152
+ v9 = fold_vec(v9, *vp++, multipliers_12);
153
+ v10 = fold_vec(v10, *vp++, multipliers_12);
154
+ v11 = fold_vec(v11, *vp++, multipliers_12);
155
+ len -= 192;
156
+ } while (len >= 192);
157
+
158
+ /*
159
+ * Fewer than 192 bytes left. Fold v0-v11 down to just v0,
160
+ * while processing up to 144 more bytes.
161
+ */
162
+ v0 = fold_vec(v0, v6, multipliers_6);
163
+ v1 = fold_vec(v1, v7, multipliers_6);
164
+ v2 = fold_vec(v2, v8, multipliers_6);
165
+ v3 = fold_vec(v3, v9, multipliers_6);
166
+ v4 = fold_vec(v4, v10, multipliers_6);
167
+ v5 = fold_vec(v5, v11, multipliers_6);
168
+ if (len >= 96) {
169
+ v0 = fold_vec(v0, *vp++, multipliers_6);
170
+ v1 = fold_vec(v1, *vp++, multipliers_6);
171
+ v2 = fold_vec(v2, *vp++, multipliers_6);
172
+ v3 = fold_vec(v3, *vp++, multipliers_6);
173
+ v4 = fold_vec(v4, *vp++, multipliers_6);
174
+ v5 = fold_vec(v5, *vp++, multipliers_6);
175
+ len -= 96;
176
+ }
177
+ v0 = fold_vec(v0, v3, multipliers_3);
178
+ v1 = fold_vec(v1, v4, multipliers_3);
179
+ v2 = fold_vec(v2, v5, multipliers_3);
180
+ if (len >= 48) {
181
+ v0 = fold_vec(v0, *vp++, multipliers_3);
182
+ v1 = fold_vec(v1, *vp++, multipliers_3);
183
+ v2 = fold_vec(v2, *vp++, multipliers_3);
184
+ len -= 48;
185
+ }
186
+ v0 = fold_vec(v0, v1, multipliers_1);
187
+ v0 = fold_vec(v0, v2, multipliers_1);
188
+ p = (const u8 *)vp;
189
+ }
190
+ /* Reduce 128 to 32 bits using crc32 instructions. */
191
+ crc = __crc32d(0, vgetq_lane_u64(vreinterpretq_u64_u8(v0), 0));
192
+ crc = __crc32d(crc, vgetq_lane_u64(vreinterpretq_u64_u8(v0), 1));
193
+ tail:
194
+ /* Finish up the remainder using crc32 instructions. */
195
+ if (len & 32) {
196
+ crc = __crc32d(crc, get_unaligned_le64(p + 0));
197
+ crc = __crc32d(crc, get_unaligned_le64(p + 8));
198
+ crc = __crc32d(crc, get_unaligned_le64(p + 16));
199
+ crc = __crc32d(crc, get_unaligned_le64(p + 24));
200
+ p += 32;
201
+ }
202
+ if (len & 16) {
203
+ crc = __crc32d(crc, get_unaligned_le64(p + 0));
204
+ crc = __crc32d(crc, get_unaligned_le64(p + 8));
205
+ p += 16;
206
+ }
207
+ if (len & 8) {
208
+ crc = __crc32d(crc, get_unaligned_le64(p));
209
+ p += 8;
210
+ }
211
+ if (len & 4) {
212
+ crc = __crc32w(crc, get_unaligned_le32(p));
213
+ p += 4;
214
+ }
215
+ if (len & 2) {
216
+ crc = __crc32h(crc, get_unaligned_le16(p));
217
+ p += 2;
218
+ }
219
+ if (len & 1)
220
+ crc = __crc32b(crc, *p);
221
+ return crc;
222
+ }
223
+
224
+ #undef SUFFIX
225
+ #undef ATTRIBUTES
226
+ #undef ENABLE_EOR3
@@ -0,0 +1,342 @@
1
+ /*
2
+ * bt_matchfinder.h - Lempel-Ziv matchfinding with a hash table of binary trees
3
+ *
4
+ * Copyright 2016 Eric Biggers
5
+ *
6
+ * Permission is hereby granted, free of charge, to any person
7
+ * obtaining a copy of this software and associated documentation
8
+ * files (the "Software"), to deal in the Software without
9
+ * restriction, including without limitation the rights to use,
10
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
11
+ * copies of the Software, and to permit persons to whom the
12
+ * Software is furnished to do so, subject to the following
13
+ * conditions:
14
+ *
15
+ * The above copyright notice and this permission notice shall be
16
+ * included in all copies or substantial portions of the Software.
17
+ *
18
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
20
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
21
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
22
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
23
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25
+ * OTHER DEALINGS IN THE SOFTWARE.
26
+ *
27
+ * ----------------------------------------------------------------------------
28
+ *
29
+ * This is a Binary Trees (bt) based matchfinder.
30
+ *
31
+ * The main data structure is a hash table where each hash bucket contains a
32
+ * binary tree of sequences whose first 4 bytes share the same hash code. Each
33
+ * sequence is identified by its starting position in the input buffer. Each
34
+ * binary tree is always sorted such that each left child represents a sequence
35
+ * lexicographically lesser than its parent and each right child represents a
36
+ * sequence lexicographically greater than its parent.
37
+ *
38
+ * The algorithm processes the input buffer sequentially. At each byte
39
+ * position, the hash code of the first 4 bytes of the sequence beginning at
40
+ * that position (the sequence being matched against) is computed. This
41
+ * identifies the hash bucket to use for that position. Then, a new binary tree
42
+ * node is created to represent the current sequence. Then, in a single tree
43
+ * traversal, the hash bucket's binary tree is searched for matches and is
44
+ * re-rooted at the new node.
45
+ *
46
+ * Compared to the simpler algorithm that uses linked lists instead of binary
47
+ * trees (see hc_matchfinder.h), the binary tree version gains more information
48
+ * at each node visitation. Ideally, the binary tree version will examine only
49
+ * 'log(n)' nodes to find the same matches that the linked list version will
50
+ * find by examining 'n' nodes. In addition, the binary tree version can
51
+ * examine fewer bytes at each node by taking advantage of the common prefixes
52
+ * that result from the sort order, whereas the linked list version may have to
53
+ * examine up to the full length of the match at each node.
54
+ *
55
+ * However, it is not always best to use the binary tree version. It requires
56
+ * nearly twice as much memory as the linked list version, and it takes time to
57
+ * keep the binary trees sorted, even at positions where the compressor does not
58
+ * need matches. Generally, when doing fast compression on small buffers,
59
+ * binary trees are the wrong approach. They are best suited for thorough
60
+ * compression and/or large buffers.
61
+ *
62
+ * ----------------------------------------------------------------------------
63
+ */
64
+
65
+ #ifndef LIB_BT_MATCHFINDER_H
66
+ #define LIB_BT_MATCHFINDER_H
67
+
68
+ #include "matchfinder_common.h"
69
+
70
+ #define BT_MATCHFINDER_HASH3_ORDER 16
71
+ #define BT_MATCHFINDER_HASH3_WAYS 2
72
+ #define BT_MATCHFINDER_HASH4_ORDER 16
73
+
74
+ #define BT_MATCHFINDER_TOTAL_HASH_SIZE \
75
+ (((1UL << BT_MATCHFINDER_HASH3_ORDER) * BT_MATCHFINDER_HASH3_WAYS + \
76
+ (1UL << BT_MATCHFINDER_HASH4_ORDER)) * sizeof(mf_pos_t))
77
+
78
+ /* Representation of a match found by the bt_matchfinder */
79
+ struct lz_match {
80
+
81
+ /* The number of bytes matched. */
82
+ u16 length;
83
+
84
+ /* The offset back from the current position that was matched. */
85
+ u16 offset;
86
+ };
87
+
88
+ struct MATCHFINDER_ALIGNED bt_matchfinder {
89
+
90
+ /* The hash table for finding length 3 matches */
91
+ mf_pos_t hash3_tab[1UL << BT_MATCHFINDER_HASH3_ORDER][BT_MATCHFINDER_HASH3_WAYS];
92
+
93
+ /* The hash table which contains the roots of the binary trees for
94
+ * finding length 4+ matches */
95
+ mf_pos_t hash4_tab[1UL << BT_MATCHFINDER_HASH4_ORDER];
96
+
97
+ /* The child node references for the binary trees. The left and right
98
+ * children of the node for the sequence with position 'pos' are
99
+ * 'child_tab[pos * 2]' and 'child_tab[pos * 2 + 1]', respectively. */
100
+ mf_pos_t child_tab[2UL * MATCHFINDER_WINDOW_SIZE];
101
+ };
102
+
103
+ /* Prepare the matchfinder for a new input buffer. */
104
+ static forceinline void
105
+ bt_matchfinder_init(struct bt_matchfinder *mf)
106
+ {
107
+ STATIC_ASSERT(BT_MATCHFINDER_TOTAL_HASH_SIZE %
108
+ MATCHFINDER_SIZE_ALIGNMENT == 0);
109
+
110
+ matchfinder_init((mf_pos_t *)mf, BT_MATCHFINDER_TOTAL_HASH_SIZE);
111
+ }
112
+
113
+ static forceinline void
114
+ bt_matchfinder_slide_window(struct bt_matchfinder *mf)
115
+ {
116
+ STATIC_ASSERT(sizeof(*mf) % MATCHFINDER_SIZE_ALIGNMENT == 0);
117
+
118
+ matchfinder_rebase((mf_pos_t *)mf, sizeof(*mf));
119
+ }
120
+
121
+ static forceinline mf_pos_t *
122
+ bt_left_child(struct bt_matchfinder *mf, s32 node)
123
+ {
124
+ return &mf->child_tab[2 * (node & (MATCHFINDER_WINDOW_SIZE - 1)) + 0];
125
+ }
126
+
127
+ static forceinline mf_pos_t *
128
+ bt_right_child(struct bt_matchfinder *mf, s32 node)
129
+ {
130
+ return &mf->child_tab[2 * (node & (MATCHFINDER_WINDOW_SIZE - 1)) + 1];
131
+ }
132
+
133
+ /* The minimum permissible value of 'max_len' for bt_matchfinder_get_matches()
134
+ * and bt_matchfinder_skip_byte(). There must be sufficiently many bytes
135
+ * remaining to load a 32-bit integer from the *next* position. */
136
+ #define BT_MATCHFINDER_REQUIRED_NBYTES 5
137
+
138
+ /* Advance the binary tree matchfinder by one byte, optionally recording
139
+ * matches. @record_matches should be a compile-time constant. */
140
+ static forceinline struct lz_match *
141
+ bt_matchfinder_advance_one_byte(struct bt_matchfinder * const mf,
142
+ const u8 * const in_base,
143
+ const ptrdiff_t cur_pos,
144
+ const u32 max_len,
145
+ const u32 nice_len,
146
+ const u32 max_search_depth,
147
+ u32 * const next_hashes,
148
+ struct lz_match *lz_matchptr,
149
+ const bool record_matches)
150
+ {
151
+ const u8 *in_next = in_base + cur_pos;
152
+ u32 depth_remaining = max_search_depth;
153
+ const s32 cutoff = cur_pos - MATCHFINDER_WINDOW_SIZE;
154
+ u32 next_hashseq;
155
+ u32 hash3;
156
+ u32 hash4;
157
+ s32 cur_node;
158
+ #if BT_MATCHFINDER_HASH3_WAYS >= 2
159
+ s32 cur_node_2;
160
+ #endif
161
+ const u8 *matchptr;
162
+ mf_pos_t *pending_lt_ptr, *pending_gt_ptr;
163
+ u32 best_lt_len, best_gt_len;
164
+ u32 len;
165
+ u32 best_len = 3;
166
+
167
+ STATIC_ASSERT(BT_MATCHFINDER_HASH3_WAYS >= 1 &&
168
+ BT_MATCHFINDER_HASH3_WAYS <= 2);
169
+
170
+ next_hashseq = get_unaligned_le32(in_next + 1);
171
+
172
+ hash3 = next_hashes[0];
173
+ hash4 = next_hashes[1];
174
+
175
+ next_hashes[0] = lz_hash(next_hashseq & 0xFFFFFF, BT_MATCHFINDER_HASH3_ORDER);
176
+ next_hashes[1] = lz_hash(next_hashseq, BT_MATCHFINDER_HASH4_ORDER);
177
+ prefetchw(&mf->hash3_tab[next_hashes[0]]);
178
+ prefetchw(&mf->hash4_tab[next_hashes[1]]);
179
+
180
+ cur_node = mf->hash3_tab[hash3][0];
181
+ mf->hash3_tab[hash3][0] = cur_pos;
182
+ #if BT_MATCHFINDER_HASH3_WAYS >= 2
183
+ cur_node_2 = mf->hash3_tab[hash3][1];
184
+ mf->hash3_tab[hash3][1] = cur_node;
185
+ #endif
186
+ if (record_matches && cur_node > cutoff) {
187
+ u32 seq3 = load_u24_unaligned(in_next);
188
+ if (seq3 == load_u24_unaligned(&in_base[cur_node])) {
189
+ lz_matchptr->length = 3;
190
+ lz_matchptr->offset = in_next - &in_base[cur_node];
191
+ lz_matchptr++;
192
+ }
193
+ #if BT_MATCHFINDER_HASH3_WAYS >= 2
194
+ else if (cur_node_2 > cutoff &&
195
+ seq3 == load_u24_unaligned(&in_base[cur_node_2]))
196
+ {
197
+ lz_matchptr->length = 3;
198
+ lz_matchptr->offset = in_next - &in_base[cur_node_2];
199
+ lz_matchptr++;
200
+ }
201
+ #endif
202
+ }
203
+
204
+ cur_node = mf->hash4_tab[hash4];
205
+ mf->hash4_tab[hash4] = cur_pos;
206
+
207
+ pending_lt_ptr = bt_left_child(mf, cur_pos);
208
+ pending_gt_ptr = bt_right_child(mf, cur_pos);
209
+
210
+ if (cur_node <= cutoff) {
211
+ *pending_lt_ptr = MATCHFINDER_INITVAL;
212
+ *pending_gt_ptr = MATCHFINDER_INITVAL;
213
+ return lz_matchptr;
214
+ }
215
+
216
+ best_lt_len = 0;
217
+ best_gt_len = 0;
218
+ len = 0;
219
+
220
+ for (;;) {
221
+ matchptr = &in_base[cur_node];
222
+
223
+ if (matchptr[len] == in_next[len]) {
224
+ len = lz_extend(in_next, matchptr, len + 1, max_len);
225
+ if (!record_matches || len > best_len) {
226
+ if (record_matches) {
227
+ best_len = len;
228
+ lz_matchptr->length = len;
229
+ lz_matchptr->offset = in_next - matchptr;
230
+ lz_matchptr++;
231
+ }
232
+ if (len >= nice_len) {
233
+ *pending_lt_ptr = *bt_left_child(mf, cur_node);
234
+ *pending_gt_ptr = *bt_right_child(mf, cur_node);
235
+ return lz_matchptr;
236
+ }
237
+ }
238
+ }
239
+
240
+ if (matchptr[len] < in_next[len]) {
241
+ *pending_lt_ptr = cur_node;
242
+ pending_lt_ptr = bt_right_child(mf, cur_node);
243
+ cur_node = *pending_lt_ptr;
244
+ best_lt_len = len;
245
+ if (best_gt_len < len)
246
+ len = best_gt_len;
247
+ } else {
248
+ *pending_gt_ptr = cur_node;
249
+ pending_gt_ptr = bt_left_child(mf, cur_node);
250
+ cur_node = *pending_gt_ptr;
251
+ best_gt_len = len;
252
+ if (best_lt_len < len)
253
+ len = best_lt_len;
254
+ }
255
+
256
+ if (cur_node <= cutoff || !--depth_remaining) {
257
+ *pending_lt_ptr = MATCHFINDER_INITVAL;
258
+ *pending_gt_ptr = MATCHFINDER_INITVAL;
259
+ return lz_matchptr;
260
+ }
261
+ }
262
+ }
263
+
264
+ /*
265
+ * Retrieve a list of matches with the current position.
266
+ *
267
+ * @mf
268
+ * The matchfinder structure.
269
+ * @in_base
270
+ * Pointer to the next byte in the input buffer to process _at the last
271
+ * time bt_matchfinder_init() or bt_matchfinder_slide_window() was called_.
272
+ * @cur_pos
273
+ * The current position in the input buffer relative to @in_base (the
274
+ * position of the sequence being matched against).
275
+ * @max_len
276
+ * The maximum permissible match length at this position. Must be >=
277
+ * BT_MATCHFINDER_REQUIRED_NBYTES.
278
+ * @nice_len
279
+ * Stop searching if a match of at least this length is found.
280
+ * Must be <= @max_len.
281
+ * @max_search_depth
282
+ * Limit on the number of potential matches to consider. Must be >= 1.
283
+ * @next_hashes
284
+ * The precomputed hash codes for the sequence beginning at @in_next.
285
+ * These will be used and then updated with the precomputed hashcodes for
286
+ * the sequence beginning at @in_next + 1.
287
+ * @lz_matchptr
288
+ * An array in which this function will record the matches. The recorded
289
+ * matches will be sorted by strictly increasing length and (non-strictly)
290
+ * increasing offset. The maximum number of matches that may be found is
291
+ * 'nice_len - 2'.
292
+ *
293
+ * The return value is a pointer to the next available slot in the @lz_matchptr
294
+ * array. (If no matches were found, this will be the same as @lz_matchptr.)
295
+ */
296
+ static forceinline struct lz_match *
297
+ bt_matchfinder_get_matches(struct bt_matchfinder *mf,
298
+ const u8 *in_base,
299
+ ptrdiff_t cur_pos,
300
+ u32 max_len,
301
+ u32 nice_len,
302
+ u32 max_search_depth,
303
+ u32 next_hashes[2],
304
+ struct lz_match *lz_matchptr)
305
+ {
306
+ return bt_matchfinder_advance_one_byte(mf,
307
+ in_base,
308
+ cur_pos,
309
+ max_len,
310
+ nice_len,
311
+ max_search_depth,
312
+ next_hashes,
313
+ lz_matchptr,
314
+ true);
315
+ }
316
+
317
+ /*
318
+ * Advance the matchfinder, but don't record any matches.
319
+ *
320
+ * This is very similar to bt_matchfinder_get_matches() because both functions
321
+ * must do hashing and tree re-rooting.
322
+ */
323
+ static forceinline void
324
+ bt_matchfinder_skip_byte(struct bt_matchfinder *mf,
325
+ const u8 *in_base,
326
+ ptrdiff_t cur_pos,
327
+ u32 nice_len,
328
+ u32 max_search_depth,
329
+ u32 next_hashes[2])
330
+ {
331
+ bt_matchfinder_advance_one_byte(mf,
332
+ in_base,
333
+ cur_pos,
334
+ nice_len,
335
+ nice_len,
336
+ max_search_depth,
337
+ next_hashes,
338
+ NULL,
339
+ false);
340
+ }
341
+
342
+ #endif /* LIB_BT_MATCHFINDER_H */
@@ -67,7 +67,8 @@
67
67
  #undef ARCH_ARM32
68
68
  #undef ARCH_RISCV
69
69
  #ifdef _MSC_VER
70
- # if defined(_M_X64)
70
+ /* Way too many things are broken in ARM64EC to pretend that it is x86_64. */
71
+ # if defined(_M_X64) && !defined(_M_ARM64EC)
71
72
  # define ARCH_X86_64
72
73
  # elif defined(_M_IX86)
73
74
  # define ARCH_X86_32
@@ -0,0 +1,93 @@
1
+ /*
2
+ * cpu_features_common.h - code shared by all lib/$arch/cpu_features.c
3
+ *
4
+ * Copyright 2020 Eric Biggers
5
+ *
6
+ * Permission is hereby granted, free of charge, to any person
7
+ * obtaining a copy of this software and associated documentation
8
+ * files (the "Software"), to deal in the Software without
9
+ * restriction, including without limitation the rights to use,
10
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
11
+ * copies of the Software, and to permit persons to whom the
12
+ * Software is furnished to do so, subject to the following
13
+ * conditions:
14
+ *
15
+ * The above copyright notice and this permission notice shall be
16
+ * included in all copies or substantial portions of the Software.
17
+ *
18
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
20
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
21
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
22
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
23
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25
+ * OTHER DEALINGS IN THE SOFTWARE.
26
+ */
27
+
28
+ #ifndef LIB_CPU_FEATURES_COMMON_H
29
+ #define LIB_CPU_FEATURES_COMMON_H
30
+
31
+ #if defined(TEST_SUPPORT__DO_NOT_USE) && !defined(FREESTANDING)
32
+ /* for strdup() and strtok_r() */
33
+ # undef _ANSI_SOURCE
34
+ # ifndef __APPLE__
35
+ # undef _GNU_SOURCE
36
+ # define _GNU_SOURCE
37
+ # endif
38
+ # include <stdio.h>
39
+ # include <stdlib.h>
40
+ # include <string.h>
41
+ #endif
42
+
43
+ #include "lib_common.h"
44
+
45
+ struct cpu_feature {
46
+ u32 bit;
47
+ const char *name;
48
+ };
49
+
50
+ #if defined(TEST_SUPPORT__DO_NOT_USE) && !defined(FREESTANDING)
51
+ /* Disable any features that are listed in $LIBDEFLATE_DISABLE_CPU_FEATURES. */
52
+ static inline void
53
+ disable_cpu_features_for_testing(u32 *features,
54
+ const struct cpu_feature *feature_table,
55
+ size_t feature_table_length)
56
+ {
57
+ char *env_value, *strbuf, *p, *saveptr = NULL;
58
+ size_t i;
59
+
60
+ env_value = getenv("LIBDEFLATE_DISABLE_CPU_FEATURES");
61
+ if (!env_value)
62
+ return;
63
+ strbuf = strdup(env_value);
64
+ if (!strbuf)
65
+ abort();
66
+ p = strtok_r(strbuf, ",", &saveptr);
67
+ while (p) {
68
+ for (i = 0; i < feature_table_length; i++) {
69
+ if (strcmp(p, feature_table[i].name) == 0) {
70
+ *features &= ~feature_table[i].bit;
71
+ break;
72
+ }
73
+ }
74
+ if (i == feature_table_length) {
75
+ fprintf(stderr,
76
+ "unrecognized feature in LIBDEFLATE_DISABLE_CPU_FEATURES: \"%s\"\n",
77
+ p);
78
+ abort();
79
+ }
80
+ p = strtok_r(NULL, ",", &saveptr);
81
+ }
82
+ free(strbuf);
83
+ }
84
+ #else /* TEST_SUPPORT__DO_NOT_USE */
85
+ static inline void
86
+ disable_cpu_features_for_testing(u32 *features,
87
+ const struct cpu_feature *feature_table,
88
+ size_t feature_table_length)
89
+ {
90
+ }
91
+ #endif /* !TEST_SUPPORT__DO_NOT_USE */
92
+
93
+ #endif /* LIB_CPU_FEATURES_COMMON_H */