deflate-ruby 1.0.0 → 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (138) hide show
  1. checksums.yaml +4 -4
  2. data/CLAUDE.md +95 -92
  3. data/LICENSE.txt +6 -6
  4. data/README.md +87 -65
  5. data/Rakefile +23 -0
  6. data/ext/deflate_ruby/{libdeflate/lib/x86/adler32_impl.h → adler32_impl.h} +8 -7
  7. data/ext/deflate_ruby/common_defs.h +748 -0
  8. data/ext/deflate_ruby/{libdeflate/lib/x86/cpu_features.c → cpu_features.c} +46 -16
  9. data/ext/deflate_ruby/{libdeflate/lib/x86/cpu_features.h → cpu_features.h} +2 -1
  10. data/ext/deflate_ruby/{libdeflate/lib/x86/crc32_impl.h → crc32_impl.h} +22 -23
  11. data/ext/deflate_ruby/{libdeflate/lib/crc32_multipliers.h → crc32_multipliers.h} +2 -4
  12. data/ext/deflate_ruby/{libdeflate/lib/x86/crc32_pclmul_template.h → crc32_pclmul_template.h} +23 -94
  13. data/ext/deflate_ruby/{libdeflate/lib/crc32_tables.h → crc32_tables.h} +1 -1
  14. data/ext/deflate_ruby/{libdeflate/lib/deflate_compress.c → deflate_compress.c} +59 -60
  15. data/ext/deflate_ruby/deflate_ruby.c +392 -218
  16. data/ext/deflate_ruby/deflate_ruby.h +6 -0
  17. data/ext/deflate_ruby/extconf.rb +35 -25
  18. data/ext/deflate_ruby/libdeflate/adler32.c +162 -0
  19. data/ext/deflate_ruby/libdeflate/{lib/arm → arm}/adler32_impl.h +14 -7
  20. data/ext/deflate_ruby/libdeflate/{lib/arm → arm}/crc32_impl.h +25 -31
  21. data/ext/deflate_ruby/libdeflate/arm/crc32_pmull_helpers.h +156 -0
  22. data/ext/deflate_ruby/libdeflate/arm/crc32_pmull_wide.h +226 -0
  23. data/ext/deflate_ruby/libdeflate/bt_matchfinder.h +342 -0
  24. data/ext/deflate_ruby/libdeflate/common_defs.h +2 -1
  25. data/ext/deflate_ruby/libdeflate/cpu_features_common.h +93 -0
  26. data/ext/deflate_ruby/libdeflate/crc32.c +262 -0
  27. data/ext/deflate_ruby/libdeflate/crc32_multipliers.h +375 -0
  28. data/ext/deflate_ruby/libdeflate/crc32_tables.h +587 -0
  29. data/ext/deflate_ruby/libdeflate/decompress_template.h +777 -0
  30. data/ext/deflate_ruby/libdeflate/deflate_compress.c +4128 -0
  31. data/ext/deflate_ruby/libdeflate/deflate_compress.h +15 -0
  32. data/ext/deflate_ruby/libdeflate/deflate_constants.h +56 -0
  33. data/ext/deflate_ruby/libdeflate/deflate_decompress.c +1208 -0
  34. data/ext/deflate_ruby/libdeflate/gzip_compress.c +90 -0
  35. data/ext/deflate_ruby/libdeflate/gzip_constants.h +45 -0
  36. data/ext/deflate_ruby/libdeflate/gzip_decompress.c +144 -0
  37. data/ext/deflate_ruby/libdeflate/hc_matchfinder.h +401 -0
  38. data/ext/deflate_ruby/libdeflate/ht_matchfinder.h +234 -0
  39. data/ext/deflate_ruby/libdeflate/lib_common.h +106 -0
  40. data/ext/deflate_ruby/libdeflate/libdeflate.h +2 -2
  41. data/ext/deflate_ruby/libdeflate/{lib/matchfinder_common.h → matchfinder_common.h} +3 -3
  42. data/ext/deflate_ruby/libdeflate/x86/adler32_impl.h +135 -0
  43. data/ext/deflate_ruby/libdeflate/x86/adler32_template.h +518 -0
  44. data/ext/deflate_ruby/libdeflate/x86/cpu_features.c +213 -0
  45. data/ext/deflate_ruby/libdeflate/x86/cpu_features.h +170 -0
  46. data/ext/deflate_ruby/libdeflate/x86/crc32_impl.h +159 -0
  47. data/ext/deflate_ruby/libdeflate/x86/crc32_pclmul_template.h +424 -0
  48. data/ext/deflate_ruby/libdeflate/x86/decompress_impl.h +57 -0
  49. data/ext/deflate_ruby/libdeflate.h +411 -0
  50. data/ext/deflate_ruby/matchfinder_common.h +224 -0
  51. data/ext/deflate_ruby/matchfinder_impl.h +122 -0
  52. data/ext/deflate_ruby/utils.c +141 -0
  53. data/ext/deflate_ruby/zlib_compress.c +82 -0
  54. data/ext/deflate_ruby/zlib_constants.h +21 -0
  55. data/ext/deflate_ruby/zlib_decompress.c +104 -0
  56. data/lib/deflate_ruby/version.rb +1 -1
  57. data/lib/deflate_ruby.rb +1 -63
  58. data/sig/deflate_ruby.rbs +4 -0
  59. data/test/test_deflate_ruby.rb +220 -0
  60. data/test/test_helper.rb +6 -0
  61. metadata +89 -144
  62. data/ext/deflate_ruby/libdeflate/CMakeLists.txt +0 -270
  63. data/ext/deflate_ruby/libdeflate/NEWS.md +0 -494
  64. data/ext/deflate_ruby/libdeflate/README.md +0 -228
  65. data/ext/deflate_ruby/libdeflate/libdeflate-config.cmake.in +0 -3
  66. data/ext/deflate_ruby/libdeflate/libdeflate.pc.in +0 -18
  67. data/ext/deflate_ruby/libdeflate/programs/CMakeLists.txt +0 -105
  68. data/ext/deflate_ruby/libdeflate/programs/benchmark.c +0 -696
  69. data/ext/deflate_ruby/libdeflate/programs/checksum.c +0 -218
  70. data/ext/deflate_ruby/libdeflate/programs/config.h.in +0 -19
  71. data/ext/deflate_ruby/libdeflate/programs/gzip.c +0 -688
  72. data/ext/deflate_ruby/libdeflate/programs/prog_util.c +0 -521
  73. data/ext/deflate_ruby/libdeflate/programs/prog_util.h +0 -225
  74. data/ext/deflate_ruby/libdeflate/programs/test_checksums.c +0 -200
  75. data/ext/deflate_ruby/libdeflate/programs/test_custom_malloc.c +0 -155
  76. data/ext/deflate_ruby/libdeflate/programs/test_incomplete_codes.c +0 -385
  77. data/ext/deflate_ruby/libdeflate/programs/test_invalid_streams.c +0 -130
  78. data/ext/deflate_ruby/libdeflate/programs/test_litrunlen_overflow.c +0 -72
  79. data/ext/deflate_ruby/libdeflate/programs/test_overread.c +0 -95
  80. data/ext/deflate_ruby/libdeflate/programs/test_slow_decompression.c +0 -472
  81. data/ext/deflate_ruby/libdeflate/programs/test_trailing_bytes.c +0 -151
  82. data/ext/deflate_ruby/libdeflate/programs/test_util.c +0 -237
  83. data/ext/deflate_ruby/libdeflate/programs/test_util.h +0 -61
  84. data/ext/deflate_ruby/libdeflate/programs/tgetopt.c +0 -118
  85. data/ext/deflate_ruby/libdeflate/scripts/android_build.sh +0 -118
  86. data/ext/deflate_ruby/libdeflate/scripts/android_tests.sh +0 -69
  87. data/ext/deflate_ruby/libdeflate/scripts/benchmark.sh +0 -10
  88. data/ext/deflate_ruby/libdeflate/scripts/checksum.sh +0 -10
  89. data/ext/deflate_ruby/libdeflate/scripts/checksum_benchmarks.sh +0 -253
  90. data/ext/deflate_ruby/libdeflate/scripts/cmake-helper.sh +0 -17
  91. data/ext/deflate_ruby/libdeflate/scripts/deflate_benchmarks.sh +0 -119
  92. data/ext/deflate_ruby/libdeflate/scripts/exec_tests.sh +0 -38
  93. data/ext/deflate_ruby/libdeflate/scripts/gen-release-archives.sh +0 -37
  94. data/ext/deflate_ruby/libdeflate/scripts/gen_bitreverse_tab.py +0 -19
  95. data/ext/deflate_ruby/libdeflate/scripts/gen_crc32_multipliers.c +0 -199
  96. data/ext/deflate_ruby/libdeflate/scripts/gen_crc32_tables.c +0 -105
  97. data/ext/deflate_ruby/libdeflate/scripts/gen_default_litlen_costs.py +0 -44
  98. data/ext/deflate_ruby/libdeflate/scripts/gen_offset_slot_map.py +0 -29
  99. data/ext/deflate_ruby/libdeflate/scripts/gzip_tests.sh +0 -523
  100. data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/deflate_compress/corpus/0 +0 -0
  101. data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/deflate_compress/fuzz.c +0 -95
  102. data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/deflate_decompress/corpus/0 +0 -3
  103. data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/deflate_decompress/fuzz.c +0 -62
  104. data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/fuzz.sh +0 -108
  105. data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/gzip_decompress/corpus/0 +0 -0
  106. data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/gzip_decompress/fuzz.c +0 -19
  107. data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/zlib_decompress/corpus/0 +0 -3
  108. data/ext/deflate_ruby/libdeflate/scripts/libFuzzer/zlib_decompress/fuzz.c +0 -19
  109. data/ext/deflate_ruby/libdeflate/scripts/run_tests.sh +0 -416
  110. data/ext/deflate_ruby/libdeflate/scripts/toolchain-i686-w64-mingw32.cmake +0 -8
  111. data/ext/deflate_ruby/libdeflate/scripts/toolchain-x86_64-w64-mingw32.cmake +0 -8
  112. /data/ext/deflate_ruby/{libdeflate/lib/adler32.c → adler32.c} +0 -0
  113. /data/ext/deflate_ruby/{libdeflate/lib/x86/adler32_template.h → adler32_template.h} +0 -0
  114. /data/ext/deflate_ruby/{libdeflate/lib/bt_matchfinder.h → bt_matchfinder.h} +0 -0
  115. /data/ext/deflate_ruby/{libdeflate/lib/cpu_features_common.h → cpu_features_common.h} +0 -0
  116. /data/ext/deflate_ruby/{libdeflate/lib/crc32.c → crc32.c} +0 -0
  117. /data/ext/deflate_ruby/{libdeflate/lib/arm/crc32_pmull_helpers.h → crc32_pmull_helpers.h} +0 -0
  118. /data/ext/deflate_ruby/{libdeflate/lib/arm/crc32_pmull_wide.h → crc32_pmull_wide.h} +0 -0
  119. /data/ext/deflate_ruby/{libdeflate/lib/x86/decompress_impl.h → decompress_impl.h} +0 -0
  120. /data/ext/deflate_ruby/{libdeflate/lib/decompress_template.h → decompress_template.h} +0 -0
  121. /data/ext/deflate_ruby/{libdeflate/lib/deflate_compress.h → deflate_compress.h} +0 -0
  122. /data/ext/deflate_ruby/{libdeflate/lib/deflate_constants.h → deflate_constants.h} +0 -0
  123. /data/ext/deflate_ruby/{libdeflate/lib/deflate_decompress.c → deflate_decompress.c} +0 -0
  124. /data/ext/deflate_ruby/{libdeflate/lib/gzip_compress.c → gzip_compress.c} +0 -0
  125. /data/ext/deflate_ruby/{libdeflate/lib/gzip_constants.h → gzip_constants.h} +0 -0
  126. /data/ext/deflate_ruby/{libdeflate/lib/gzip_decompress.c → gzip_decompress.c} +0 -0
  127. /data/ext/deflate_ruby/{libdeflate/lib/hc_matchfinder.h → hc_matchfinder.h} +0 -0
  128. /data/ext/deflate_ruby/{libdeflate/lib/ht_matchfinder.h → ht_matchfinder.h} +0 -0
  129. /data/ext/deflate_ruby/{libdeflate/lib/lib_common.h → lib_common.h} +0 -0
  130. /data/ext/deflate_ruby/libdeflate/{lib/arm → arm}/cpu_features.c +0 -0
  131. /data/ext/deflate_ruby/libdeflate/{lib/arm → arm}/cpu_features.h +0 -0
  132. /data/ext/deflate_ruby/libdeflate/{lib/arm → arm}/matchfinder_impl.h +0 -0
  133. /data/ext/deflate_ruby/libdeflate/{lib/riscv → riscv}/matchfinder_impl.h +0 -0
  134. /data/ext/deflate_ruby/libdeflate/{lib/utils.c → utils.c} +0 -0
  135. /data/ext/deflate_ruby/libdeflate/{lib/x86 → x86}/matchfinder_impl.h +0 -0
  136. /data/ext/deflate_ruby/libdeflate/{lib/zlib_compress.c → zlib_compress.c} +0 -0
  137. /data/ext/deflate_ruby/libdeflate/{lib/zlib_constants.h → zlib_constants.h} +0 -0
  138. /data/ext/deflate_ruby/libdeflate/{lib/zlib_decompress.c → zlib_decompress.c} +0 -0
@@ -0,0 +1,777 @@
1
+ /*
2
+ * decompress_template.h
3
+ *
4
+ * Copyright 2016 Eric Biggers
5
+ *
6
+ * Permission is hereby granted, free of charge, to any person
7
+ * obtaining a copy of this software and associated documentation
8
+ * files (the "Software"), to deal in the Software without
9
+ * restriction, including without limitation the rights to use,
10
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell
11
+ * copies of the Software, and to permit persons to whom the
12
+ * Software is furnished to do so, subject to the following
13
+ * conditions:
14
+ *
15
+ * The above copyright notice and this permission notice shall be
16
+ * included in all copies or substantial portions of the Software.
17
+ *
18
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
20
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
21
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
22
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
23
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25
+ * OTHER DEALINGS IN THE SOFTWARE.
26
+ */
27
+
28
+ /*
29
+ * This is the actual DEFLATE decompression routine, lifted out of
30
+ * deflate_decompress.c so that it can be compiled multiple times with different
31
+ * target instruction sets.
32
+ */
33
+
34
+ #ifndef ATTRIBUTES
35
+ # define ATTRIBUTES
36
+ #endif
37
+ #ifndef EXTRACT_VARBITS
38
+ # define EXTRACT_VARBITS(word, count) ((word) & BITMASK(count))
39
+ #endif
40
+ #ifndef EXTRACT_VARBITS8
41
+ # define EXTRACT_VARBITS8(word, count) ((word) & BITMASK((u8)(count)))
42
+ #endif
43
+
44
+ static ATTRIBUTES MAYBE_UNUSED enum libdeflate_result
45
+ FUNCNAME(struct libdeflate_decompressor * restrict d,
46
+ const void * restrict in, size_t in_nbytes,
47
+ void * restrict out, size_t out_nbytes_avail,
48
+ size_t *actual_in_nbytes_ret, size_t *actual_out_nbytes_ret)
49
+ {
50
+ u8 *out_next = out;
51
+ u8 * const out_end = out_next + out_nbytes_avail;
52
+ u8 * const out_fastloop_end =
53
+ out_end - MIN(out_nbytes_avail, FASTLOOP_MAX_BYTES_WRITTEN);
54
+
55
+ /* Input bitstream state; see deflate_decompress.c for documentation */
56
+ const u8 *in_next = in;
57
+ const u8 * const in_end = in_next + in_nbytes;
58
+ const u8 * const in_fastloop_end =
59
+ in_end - MIN(in_nbytes, FASTLOOP_MAX_BYTES_READ);
60
+ bitbuf_t bitbuf = 0;
61
+ bitbuf_t saved_bitbuf;
62
+ u32 bitsleft = 0;
63
+ size_t overread_count = 0;
64
+
65
+ bool is_final_block;
66
+ unsigned block_type;
67
+ unsigned num_litlen_syms;
68
+ unsigned num_offset_syms;
69
+ bitbuf_t litlen_tablemask;
70
+ u32 entry;
71
+
72
+ next_block:
73
+ /* Starting to read the next block */
74
+ ;
75
+
76
+ STATIC_ASSERT(CAN_CONSUME(1 + 2 + 5 + 5 + 4 + 3));
77
+ REFILL_BITS();
78
+
79
+ /* BFINAL: 1 bit */
80
+ is_final_block = bitbuf & BITMASK(1);
81
+
82
+ /* BTYPE: 2 bits */
83
+ block_type = (bitbuf >> 1) & BITMASK(2);
84
+
85
+ if (block_type == DEFLATE_BLOCKTYPE_DYNAMIC_HUFFMAN) {
86
+
87
+ /* Dynamic Huffman block */
88
+
89
+ /* The order in which precode lengths are stored */
90
+ static const u8 deflate_precode_lens_permutation[DEFLATE_NUM_PRECODE_SYMS] = {
91
+ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15
92
+ };
93
+
94
+ unsigned num_explicit_precode_lens;
95
+ unsigned i;
96
+
97
+ /* Read the codeword length counts. */
98
+
99
+ STATIC_ASSERT(DEFLATE_NUM_LITLEN_SYMS == 257 + BITMASK(5));
100
+ num_litlen_syms = 257 + ((bitbuf >> 3) & BITMASK(5));
101
+
102
+ STATIC_ASSERT(DEFLATE_NUM_OFFSET_SYMS == 1 + BITMASK(5));
103
+ num_offset_syms = 1 + ((bitbuf >> 8) & BITMASK(5));
104
+
105
+ STATIC_ASSERT(DEFLATE_NUM_PRECODE_SYMS == 4 + BITMASK(4));
106
+ num_explicit_precode_lens = 4 + ((bitbuf >> 13) & BITMASK(4));
107
+
108
+ d->static_codes_loaded = false;
109
+
110
+ /*
111
+ * Read the precode codeword lengths.
112
+ *
113
+ * A 64-bit bitbuffer is just one bit too small to hold the
114
+ * maximum number of precode lens, so to minimize branches we
115
+ * merge one len with the previous fields.
116
+ */
117
+ STATIC_ASSERT(DEFLATE_MAX_PRE_CODEWORD_LEN == (1 << 3) - 1);
118
+ if (CAN_CONSUME(3 * (DEFLATE_NUM_PRECODE_SYMS - 1))) {
119
+ d->u.precode_lens[deflate_precode_lens_permutation[0]] =
120
+ (bitbuf >> 17) & BITMASK(3);
121
+ bitbuf >>= 20;
122
+ bitsleft -= 20;
123
+ REFILL_BITS();
124
+ i = 1;
125
+ do {
126
+ d->u.precode_lens[deflate_precode_lens_permutation[i]] =
127
+ bitbuf & BITMASK(3);
128
+ bitbuf >>= 3;
129
+ bitsleft -= 3;
130
+ } while (++i < num_explicit_precode_lens);
131
+ } else {
132
+ bitbuf >>= 17;
133
+ bitsleft -= 17;
134
+ i = 0;
135
+ do {
136
+ if ((u8)bitsleft < 3)
137
+ REFILL_BITS();
138
+ d->u.precode_lens[deflate_precode_lens_permutation[i]] =
139
+ bitbuf & BITMASK(3);
140
+ bitbuf >>= 3;
141
+ bitsleft -= 3;
142
+ } while (++i < num_explicit_precode_lens);
143
+ }
144
+ for (; i < DEFLATE_NUM_PRECODE_SYMS; i++)
145
+ d->u.precode_lens[deflate_precode_lens_permutation[i]] = 0;
146
+
147
+ /* Build the decode table for the precode. */
148
+ SAFETY_CHECK(build_precode_decode_table(d));
149
+
150
+ /* Decode the litlen and offset codeword lengths. */
151
+ i = 0;
152
+ do {
153
+ unsigned presym;
154
+ u8 rep_val;
155
+ unsigned rep_count;
156
+
157
+ if ((u8)bitsleft < DEFLATE_MAX_PRE_CODEWORD_LEN + 7)
158
+ REFILL_BITS();
159
+
160
+ /*
161
+ * The code below assumes that the precode decode table
162
+ * doesn't have any subtables.
163
+ */
164
+ STATIC_ASSERT(PRECODE_TABLEBITS == DEFLATE_MAX_PRE_CODEWORD_LEN);
165
+
166
+ /* Decode the next precode symbol. */
167
+ entry = d->u.l.precode_decode_table[
168
+ bitbuf & BITMASK(DEFLATE_MAX_PRE_CODEWORD_LEN)];
169
+ bitbuf >>= (u8)entry;
170
+ bitsleft -= entry; /* optimization: subtract full entry */
171
+ presym = entry >> 16;
172
+
173
+ if (presym < 16) {
174
+ /* Explicit codeword length */
175
+ d->u.l.lens[i++] = presym;
176
+ continue;
177
+ }
178
+
179
+ /* Run-length encoded codeword lengths */
180
+
181
+ /*
182
+ * Note: we don't need to immediately verify that the
183
+ * repeat count doesn't overflow the number of elements,
184
+ * since we've sized the lens array to have enough extra
185
+ * space to allow for the worst-case overrun (138 zeroes
186
+ * when only 1 length was remaining).
187
+ *
188
+ * In the case of the small repeat counts (presyms 16
189
+ * and 17), it is fastest to always write the maximum
190
+ * number of entries. That gets rid of branches that
191
+ * would otherwise be required.
192
+ *
193
+ * It is not just because of the numerical order that
194
+ * our checks go in the order 'presym < 16', 'presym ==
195
+ * 16', and 'presym == 17'. For typical data this is
196
+ * ordered from most frequent to least frequent case.
197
+ */
198
+ STATIC_ASSERT(DEFLATE_MAX_LENS_OVERRUN == 138 - 1);
199
+
200
+ if (presym == 16) {
201
+ /* Repeat the previous length 3 - 6 times. */
202
+ SAFETY_CHECK(i != 0);
203
+ rep_val = d->u.l.lens[i - 1];
204
+ STATIC_ASSERT(3 + BITMASK(2) == 6);
205
+ rep_count = 3 + (bitbuf & BITMASK(2));
206
+ bitbuf >>= 2;
207
+ bitsleft -= 2;
208
+ d->u.l.lens[i + 0] = rep_val;
209
+ d->u.l.lens[i + 1] = rep_val;
210
+ d->u.l.lens[i + 2] = rep_val;
211
+ d->u.l.lens[i + 3] = rep_val;
212
+ d->u.l.lens[i + 4] = rep_val;
213
+ d->u.l.lens[i + 5] = rep_val;
214
+ i += rep_count;
215
+ } else if (presym == 17) {
216
+ /* Repeat zero 3 - 10 times. */
217
+ STATIC_ASSERT(3 + BITMASK(3) == 10);
218
+ rep_count = 3 + (bitbuf & BITMASK(3));
219
+ bitbuf >>= 3;
220
+ bitsleft -= 3;
221
+ d->u.l.lens[i + 0] = 0;
222
+ d->u.l.lens[i + 1] = 0;
223
+ d->u.l.lens[i + 2] = 0;
224
+ d->u.l.lens[i + 3] = 0;
225
+ d->u.l.lens[i + 4] = 0;
226
+ d->u.l.lens[i + 5] = 0;
227
+ d->u.l.lens[i + 6] = 0;
228
+ d->u.l.lens[i + 7] = 0;
229
+ d->u.l.lens[i + 8] = 0;
230
+ d->u.l.lens[i + 9] = 0;
231
+ i += rep_count;
232
+ } else {
233
+ /* Repeat zero 11 - 138 times. */
234
+ STATIC_ASSERT(11 + BITMASK(7) == 138);
235
+ rep_count = 11 + (bitbuf & BITMASK(7));
236
+ bitbuf >>= 7;
237
+ bitsleft -= 7;
238
+ memset(&d->u.l.lens[i], 0,
239
+ rep_count * sizeof(d->u.l.lens[i]));
240
+ i += rep_count;
241
+ }
242
+ } while (i < num_litlen_syms + num_offset_syms);
243
+
244
+ /* Unnecessary, but check this for consistency with zlib. */
245
+ SAFETY_CHECK(i == num_litlen_syms + num_offset_syms);
246
+
247
+ } else if (block_type == DEFLATE_BLOCKTYPE_UNCOMPRESSED) {
248
+ u16 len, nlen;
249
+
250
+ /*
251
+ * Uncompressed block: copy 'len' bytes literally from the input
252
+ * buffer to the output buffer.
253
+ */
254
+
255
+ bitsleft -= 3; /* for BTYPE and BFINAL */
256
+
257
+ /*
258
+ * Align the bitstream to the next byte boundary. This means
259
+ * the next byte boundary as if we were reading a byte at a
260
+ * time. Therefore, we have to rewind 'in_next' by any bytes
261
+ * that have been refilled but not actually consumed yet (not
262
+ * counting overread bytes, which don't increment 'in_next').
263
+ */
264
+ bitsleft = (u8)bitsleft;
265
+ SAFETY_CHECK(overread_count <= (bitsleft >> 3));
266
+ in_next -= (bitsleft >> 3) - overread_count;
267
+ overread_count = 0;
268
+ bitbuf = 0;
269
+ bitsleft = 0;
270
+
271
+ SAFETY_CHECK(in_end - in_next >= 4);
272
+ len = get_unaligned_le16(in_next);
273
+ nlen = get_unaligned_le16(in_next + 2);
274
+ in_next += 4;
275
+
276
+ SAFETY_CHECK(len == (u16)~nlen);
277
+ if (unlikely(len > out_end - out_next))
278
+ return LIBDEFLATE_INSUFFICIENT_SPACE;
279
+ SAFETY_CHECK(len <= in_end - in_next);
280
+
281
+ memcpy(out_next, in_next, len);
282
+ in_next += len;
283
+ out_next += len;
284
+
285
+ goto block_done;
286
+
287
+ } else {
288
+ unsigned i;
289
+
290
+ SAFETY_CHECK(block_type == DEFLATE_BLOCKTYPE_STATIC_HUFFMAN);
291
+
292
+ /*
293
+ * Static Huffman block: build the decode tables for the static
294
+ * codes. Skip doing so if the tables are already set up from
295
+ * an earlier static block; this speeds up decompression of
296
+ * degenerate input of many empty or very short static blocks.
297
+ *
298
+ * Afterwards, the remainder is the same as decompressing a
299
+ * dynamic Huffman block.
300
+ */
301
+
302
+ bitbuf >>= 3; /* for BTYPE and BFINAL */
303
+ bitsleft -= 3;
304
+
305
+ if (d->static_codes_loaded)
306
+ goto have_decode_tables;
307
+
308
+ d->static_codes_loaded = true;
309
+
310
+ STATIC_ASSERT(DEFLATE_NUM_LITLEN_SYMS == 288);
311
+ STATIC_ASSERT(DEFLATE_NUM_OFFSET_SYMS == 32);
312
+
313
+ for (i = 0; i < 144; i++)
314
+ d->u.l.lens[i] = 8;
315
+ for (; i < 256; i++)
316
+ d->u.l.lens[i] = 9;
317
+ for (; i < 280; i++)
318
+ d->u.l.lens[i] = 7;
319
+ for (; i < 288; i++)
320
+ d->u.l.lens[i] = 8;
321
+
322
+ for (; i < 288 + 32; i++)
323
+ d->u.l.lens[i] = 5;
324
+
325
+ num_litlen_syms = 288;
326
+ num_offset_syms = 32;
327
+ }
328
+
329
+ /* Decompressing a Huffman block (either dynamic or static) */
330
+
331
+ SAFETY_CHECK(build_offset_decode_table(d, num_litlen_syms, num_offset_syms));
332
+ SAFETY_CHECK(build_litlen_decode_table(d, num_litlen_syms, num_offset_syms));
333
+ have_decode_tables:
334
+ litlen_tablemask = BITMASK(d->litlen_tablebits);
335
+
336
+ /*
337
+ * This is the "fastloop" for decoding literals and matches. It does
338
+ * bounds checks on in_next and out_next in the loop conditions so that
339
+ * additional bounds checks aren't needed inside the loop body.
340
+ *
341
+ * To reduce latency, the bitbuffer is refilled and the next litlen
342
+ * decode table entry is preloaded before each loop iteration.
343
+ */
344
+ if (in_next >= in_fastloop_end || out_next >= out_fastloop_end)
345
+ goto generic_loop;
346
+ REFILL_BITS_IN_FASTLOOP();
347
+ entry = d->u.litlen_decode_table[bitbuf & litlen_tablemask];
348
+ do {
349
+ u32 length, offset, lit;
350
+ const u8 *src;
351
+ u8 *dst;
352
+
353
+ /*
354
+ * Consume the bits for the litlen decode table entry. Save the
355
+ * original bitbuf for later, in case the extra match length
356
+ * bits need to be extracted from it.
357
+ */
358
+ saved_bitbuf = bitbuf;
359
+ bitbuf >>= (u8)entry;
360
+ bitsleft -= entry; /* optimization: subtract full entry */
361
+
362
+ /*
363
+ * Begin by checking for a "fast" literal, i.e. a literal that
364
+ * doesn't need a subtable.
365
+ */
366
+ if (entry & HUFFDEC_LITERAL) {
367
+ /*
368
+ * On 64-bit platforms, we decode up to 2 extra fast
369
+ * literals in addition to the primary item, as this
370
+ * increases performance and still leaves enough bits
371
+ * remaining for what follows. We could actually do 3,
372
+ * assuming LITLEN_TABLEBITS=11, but that actually
373
+ * decreases performance slightly (perhaps by messing
374
+ * with the branch prediction of the conditional refill
375
+ * that happens later while decoding the match offset).
376
+ *
377
+ * Note: the definitions of FASTLOOP_MAX_BYTES_WRITTEN
378
+ * and FASTLOOP_MAX_BYTES_READ need to be updated if the
379
+ * number of extra literals decoded here is changed.
380
+ */
381
+ if (/* enough bits for 2 fast literals + length + offset preload? */
382
+ CAN_CONSUME_AND_THEN_PRELOAD(2 * LITLEN_TABLEBITS +
383
+ LENGTH_MAXBITS,
384
+ OFFSET_TABLEBITS) &&
385
+ /* enough bits for 2 fast literals + slow literal + litlen preload? */
386
+ CAN_CONSUME_AND_THEN_PRELOAD(2 * LITLEN_TABLEBITS +
387
+ DEFLATE_MAX_LITLEN_CODEWORD_LEN,
388
+ LITLEN_TABLEBITS)) {
389
+ /* 1st extra fast literal */
390
+ lit = entry >> 16;
391
+ entry = d->u.litlen_decode_table[bitbuf & litlen_tablemask];
392
+ saved_bitbuf = bitbuf;
393
+ bitbuf >>= (u8)entry;
394
+ bitsleft -= entry;
395
+ *out_next++ = lit;
396
+ if (entry & HUFFDEC_LITERAL) {
397
+ /* 2nd extra fast literal */
398
+ lit = entry >> 16;
399
+ entry = d->u.litlen_decode_table[bitbuf & litlen_tablemask];
400
+ saved_bitbuf = bitbuf;
401
+ bitbuf >>= (u8)entry;
402
+ bitsleft -= entry;
403
+ *out_next++ = lit;
404
+ if (entry & HUFFDEC_LITERAL) {
405
+ /*
406
+ * Another fast literal, but
407
+ * this one is in lieu of the
408
+ * primary item, so it doesn't
409
+ * count as one of the extras.
410
+ */
411
+ lit = entry >> 16;
412
+ entry = d->u.litlen_decode_table[bitbuf & litlen_tablemask];
413
+ REFILL_BITS_IN_FASTLOOP();
414
+ *out_next++ = lit;
415
+ continue;
416
+ }
417
+ }
418
+ } else {
419
+ /*
420
+ * Decode a literal. While doing so, preload
421
+ * the next litlen decode table entry and refill
422
+ * the bitbuffer. To reduce latency, we've
423
+ * arranged for there to be enough "preloadable"
424
+ * bits remaining to do the table preload
425
+ * independently of the refill.
426
+ */
427
+ STATIC_ASSERT(CAN_CONSUME_AND_THEN_PRELOAD(
428
+ LITLEN_TABLEBITS, LITLEN_TABLEBITS));
429
+ lit = entry >> 16;
430
+ entry = d->u.litlen_decode_table[bitbuf & litlen_tablemask];
431
+ REFILL_BITS_IN_FASTLOOP();
432
+ *out_next++ = lit;
433
+ continue;
434
+ }
435
+ }
436
+
437
+ /*
438
+ * It's not a literal entry, so it can be a length entry, a
439
+ * subtable pointer entry, or an end-of-block entry. Detect the
440
+ * two unlikely cases by testing the HUFFDEC_EXCEPTIONAL flag.
441
+ */
442
+ if (unlikely(entry & HUFFDEC_EXCEPTIONAL)) {
443
+ /* Subtable pointer or end-of-block entry */
444
+
445
+ if (unlikely(entry & HUFFDEC_END_OF_BLOCK))
446
+ goto block_done;
447
+
448
+ /*
449
+ * A subtable is required. Load and consume the
450
+ * subtable entry. The subtable entry can be of any
451
+ * type: literal, length, or end-of-block.
452
+ */
453
+ entry = d->u.litlen_decode_table[(entry >> 16) +
454
+ EXTRACT_VARBITS(bitbuf, (entry >> 8) & 0x3F)];
455
+ saved_bitbuf = bitbuf;
456
+ bitbuf >>= (u8)entry;
457
+ bitsleft -= entry;
458
+
459
+ /*
460
+ * 32-bit platforms that use the byte-at-a-time refill
461
+ * method have to do a refill here for there to always
462
+ * be enough bits to decode a literal that requires a
463
+ * subtable, then preload the next litlen decode table
464
+ * entry; or to decode a match length that requires a
465
+ * subtable, then preload the offset decode table entry.
466
+ */
467
+ if (!CAN_CONSUME_AND_THEN_PRELOAD(DEFLATE_MAX_LITLEN_CODEWORD_LEN,
468
+ LITLEN_TABLEBITS) ||
469
+ !CAN_CONSUME_AND_THEN_PRELOAD(LENGTH_MAXBITS,
470
+ OFFSET_TABLEBITS))
471
+ REFILL_BITS_IN_FASTLOOP();
472
+ if (entry & HUFFDEC_LITERAL) {
473
+ /* Decode a literal that required a subtable. */
474
+ lit = entry >> 16;
475
+ entry = d->u.litlen_decode_table[bitbuf & litlen_tablemask];
476
+ REFILL_BITS_IN_FASTLOOP();
477
+ *out_next++ = lit;
478
+ continue;
479
+ }
480
+ if (unlikely(entry & HUFFDEC_END_OF_BLOCK))
481
+ goto block_done;
482
+ /* Else, it's a length that required a subtable. */
483
+ }
484
+
485
+ /*
486
+ * Decode the match length: the length base value associated
487
+ * with the litlen symbol (which we extract from the decode
488
+ * table entry), plus the extra length bits. We don't need to
489
+ * consume the extra length bits here, as they were included in
490
+ * the bits consumed by the entry earlier. We also don't need
491
+ * to check for too-long matches here, as this is inside the
492
+ * fastloop where it's already been verified that the output
493
+ * buffer has enough space remaining to copy a max-length match.
494
+ */
495
+ length = entry >> 16;
496
+ length += EXTRACT_VARBITS8(saved_bitbuf, entry) >> (u8)(entry >> 8);
497
+
498
+ /*
499
+ * Decode the match offset. There are enough "preloadable" bits
500
+ * remaining to preload the offset decode table entry, but a
501
+ * refill might be needed before consuming it.
502
+ */
503
+ STATIC_ASSERT(CAN_CONSUME_AND_THEN_PRELOAD(LENGTH_MAXFASTBITS,
504
+ OFFSET_TABLEBITS));
505
+ entry = d->offset_decode_table[bitbuf & BITMASK(OFFSET_TABLEBITS)];
506
+ if (CAN_CONSUME_AND_THEN_PRELOAD(OFFSET_MAXBITS,
507
+ LITLEN_TABLEBITS)) {
508
+ /*
509
+ * Decoding a match offset on a 64-bit platform. We may
510
+ * need to refill once, but then we can decode the whole
511
+ * offset and preload the next litlen table entry.
512
+ */
513
+ if (unlikely(entry & HUFFDEC_EXCEPTIONAL)) {
514
+ /* Offset codeword requires a subtable */
515
+ if (unlikely((u8)bitsleft < OFFSET_MAXBITS +
516
+ LITLEN_TABLEBITS - PRELOAD_SLACK))
517
+ REFILL_BITS_IN_FASTLOOP();
518
+ bitbuf >>= OFFSET_TABLEBITS;
519
+ bitsleft -= OFFSET_TABLEBITS;
520
+ entry = d->offset_decode_table[(entry >> 16) +
521
+ EXTRACT_VARBITS(bitbuf, (entry >> 8) & 0x3F)];
522
+ } else if (unlikely((u8)bitsleft < OFFSET_MAXFASTBITS +
523
+ LITLEN_TABLEBITS - PRELOAD_SLACK))
524
+ REFILL_BITS_IN_FASTLOOP();
525
+ } else {
526
+ /* Decoding a match offset on a 32-bit platform */
527
+ REFILL_BITS_IN_FASTLOOP();
528
+ if (unlikely(entry & HUFFDEC_EXCEPTIONAL)) {
529
+ /* Offset codeword requires a subtable */
530
+ bitbuf >>= OFFSET_TABLEBITS;
531
+ bitsleft -= OFFSET_TABLEBITS;
532
+ entry = d->offset_decode_table[(entry >> 16) +
533
+ EXTRACT_VARBITS(bitbuf, (entry >> 8) & 0x3F)];
534
+ REFILL_BITS_IN_FASTLOOP();
535
+ /* No further refill needed before extra bits */
536
+ STATIC_ASSERT(CAN_CONSUME(
537
+ OFFSET_MAXBITS - OFFSET_TABLEBITS));
538
+ } else {
539
+ /* No refill needed before extra bits */
540
+ STATIC_ASSERT(CAN_CONSUME(OFFSET_MAXFASTBITS));
541
+ }
542
+ }
543
+ saved_bitbuf = bitbuf;
544
+ bitbuf >>= (u8)entry;
545
+ bitsleft -= entry; /* optimization: subtract full entry */
546
+ offset = entry >> 16;
547
+ offset += EXTRACT_VARBITS8(saved_bitbuf, entry) >> (u8)(entry >> 8);
548
+
549
+ /* Validate the match offset; needed even in the fastloop. */
550
+ SAFETY_CHECK(offset <= out_next - (const u8 *)out);
551
+ src = out_next - offset;
552
+ dst = out_next;
553
+ out_next += length;
554
+
555
+ /*
556
+ * Before starting to issue the instructions to copy the match,
557
+ * refill the bitbuffer and preload the litlen decode table
558
+ * entry for the next loop iteration. This can increase
559
+ * performance by allowing the latency of the match copy to
560
+ * overlap with these other operations. To further reduce
561
+ * latency, we've arranged for there to be enough bits remaining
562
+ * to do the table preload independently of the refill, except
563
+ * on 32-bit platforms using the byte-at-a-time refill method.
564
+ */
565
+ if (!CAN_CONSUME_AND_THEN_PRELOAD(
566
+ MAX(OFFSET_MAXBITS - OFFSET_TABLEBITS,
567
+ OFFSET_MAXFASTBITS),
568
+ LITLEN_TABLEBITS) &&
569
+ unlikely((u8)bitsleft < LITLEN_TABLEBITS - PRELOAD_SLACK))
570
+ REFILL_BITS_IN_FASTLOOP();
571
+ entry = d->u.litlen_decode_table[bitbuf & litlen_tablemask];
572
+ REFILL_BITS_IN_FASTLOOP();
573
+
574
+ /*
575
+ * Copy the match. On most CPUs the fastest method is a
576
+ * word-at-a-time copy, unconditionally copying about 5 words
577
+ * since this is enough for most matches without being too much.
578
+ *
579
+ * The normal word-at-a-time copy works for offset >= WORDBYTES,
580
+ * which is most cases. The case of offset == 1 is also common
581
+ * and is worth optimizing for, since it is just RLE encoding of
582
+ * the previous byte, which is the result of compressing long
583
+ * runs of the same byte.
584
+ *
585
+ * Writing past the match 'length' is allowed here, since it's
586
+ * been ensured there is enough output space left for a slight
587
+ * overrun. FASTLOOP_MAX_BYTES_WRITTEN needs to be updated if
588
+ * the maximum possible overrun here is changed.
589
+ */
590
+ if (UNALIGNED_ACCESS_IS_FAST && offset >= WORDBYTES) {
591
+ store_word_unaligned(load_word_unaligned(src), dst);
592
+ src += WORDBYTES;
593
+ dst += WORDBYTES;
594
+ store_word_unaligned(load_word_unaligned(src), dst);
595
+ src += WORDBYTES;
596
+ dst += WORDBYTES;
597
+ store_word_unaligned(load_word_unaligned(src), dst);
598
+ src += WORDBYTES;
599
+ dst += WORDBYTES;
600
+ store_word_unaligned(load_word_unaligned(src), dst);
601
+ src += WORDBYTES;
602
+ dst += WORDBYTES;
603
+ store_word_unaligned(load_word_unaligned(src), dst);
604
+ src += WORDBYTES;
605
+ dst += WORDBYTES;
606
+ while (dst < out_next) {
607
+ store_word_unaligned(load_word_unaligned(src), dst);
608
+ src += WORDBYTES;
609
+ dst += WORDBYTES;
610
+ store_word_unaligned(load_word_unaligned(src), dst);
611
+ src += WORDBYTES;
612
+ dst += WORDBYTES;
613
+ store_word_unaligned(load_word_unaligned(src), dst);
614
+ src += WORDBYTES;
615
+ dst += WORDBYTES;
616
+ store_word_unaligned(load_word_unaligned(src), dst);
617
+ src += WORDBYTES;
618
+ dst += WORDBYTES;
619
+ store_word_unaligned(load_word_unaligned(src), dst);
620
+ src += WORDBYTES;
621
+ dst += WORDBYTES;
622
+ }
623
+ } else if (UNALIGNED_ACCESS_IS_FAST && offset == 1) {
624
+ machine_word_t v;
625
+
626
+ /*
627
+ * This part tends to get auto-vectorized, so keep it
628
+ * copying a multiple of 16 bytes at a time.
629
+ */
630
+ v = (machine_word_t)0x0101010101010101 * src[0];
631
+ store_word_unaligned(v, dst);
632
+ dst += WORDBYTES;
633
+ store_word_unaligned(v, dst);
634
+ dst += WORDBYTES;
635
+ store_word_unaligned(v, dst);
636
+ dst += WORDBYTES;
637
+ store_word_unaligned(v, dst);
638
+ dst += WORDBYTES;
639
+ while (dst < out_next) {
640
+ store_word_unaligned(v, dst);
641
+ dst += WORDBYTES;
642
+ store_word_unaligned(v, dst);
643
+ dst += WORDBYTES;
644
+ store_word_unaligned(v, dst);
645
+ dst += WORDBYTES;
646
+ store_word_unaligned(v, dst);
647
+ dst += WORDBYTES;
648
+ }
649
+ } else if (UNALIGNED_ACCESS_IS_FAST) {
650
+ store_word_unaligned(load_word_unaligned(src), dst);
651
+ src += offset;
652
+ dst += offset;
653
+ store_word_unaligned(load_word_unaligned(src), dst);
654
+ src += offset;
655
+ dst += offset;
656
+ do {
657
+ store_word_unaligned(load_word_unaligned(src), dst);
658
+ src += offset;
659
+ dst += offset;
660
+ store_word_unaligned(load_word_unaligned(src), dst);
661
+ src += offset;
662
+ dst += offset;
663
+ } while (dst < out_next);
664
+ } else {
665
+ *dst++ = *src++;
666
+ *dst++ = *src++;
667
+ do {
668
+ *dst++ = *src++;
669
+ } while (dst < out_next);
670
+ }
671
+ } while (in_next < in_fastloop_end && out_next < out_fastloop_end);
672
+
673
+ /*
674
+ * This is the generic loop for decoding literals and matches. This
675
+ * handles cases where in_next and out_next are close to the end of
676
+ * their respective buffers. Usually this loop isn't performance-
677
+ * critical, as most time is spent in the fastloop above instead. We
678
+ * therefore omit some optimizations here in favor of smaller code.
679
+ */
680
+ generic_loop:
681
+ for (;;) {
682
+ u32 length, offset;
683
+ const u8 *src;
684
+ u8 *dst;
685
+
686
+ REFILL_BITS();
687
+ entry = d->u.litlen_decode_table[bitbuf & litlen_tablemask];
688
+ saved_bitbuf = bitbuf;
689
+ bitbuf >>= (u8)entry;
690
+ bitsleft -= entry;
691
+ if (unlikely(entry & HUFFDEC_SUBTABLE_POINTER)) {
692
+ entry = d->u.litlen_decode_table[(entry >> 16) +
693
+ EXTRACT_VARBITS(bitbuf, (entry >> 8) & 0x3F)];
694
+ saved_bitbuf = bitbuf;
695
+ bitbuf >>= (u8)entry;
696
+ bitsleft -= entry;
697
+ }
698
+ length = entry >> 16;
699
+ if (entry & HUFFDEC_LITERAL) {
700
+ if (unlikely(out_next == out_end))
701
+ return LIBDEFLATE_INSUFFICIENT_SPACE;
702
+ *out_next++ = length;
703
+ continue;
704
+ }
705
+ if (unlikely(entry & HUFFDEC_END_OF_BLOCK))
706
+ goto block_done;
707
+ length += EXTRACT_VARBITS8(saved_bitbuf, entry) >> (u8)(entry >> 8);
708
+ if (unlikely(length > out_end - out_next))
709
+ return LIBDEFLATE_INSUFFICIENT_SPACE;
710
+
711
+ if (!CAN_CONSUME(LENGTH_MAXBITS + OFFSET_MAXBITS))
712
+ REFILL_BITS();
713
+ entry = d->offset_decode_table[bitbuf & BITMASK(OFFSET_TABLEBITS)];
714
+ if (unlikely(entry & HUFFDEC_EXCEPTIONAL)) {
715
+ bitbuf >>= OFFSET_TABLEBITS;
716
+ bitsleft -= OFFSET_TABLEBITS;
717
+ entry = d->offset_decode_table[(entry >> 16) +
718
+ EXTRACT_VARBITS(bitbuf, (entry >> 8) & 0x3F)];
719
+ if (!CAN_CONSUME(OFFSET_MAXBITS))
720
+ REFILL_BITS();
721
+ }
722
+ offset = entry >> 16;
723
+ offset += EXTRACT_VARBITS8(bitbuf, entry) >> (u8)(entry >> 8);
724
+ bitbuf >>= (u8)entry;
725
+ bitsleft -= entry;
726
+
727
+ SAFETY_CHECK(offset <= out_next - (const u8 *)out);
728
+ src = out_next - offset;
729
+ dst = out_next;
730
+ out_next += length;
731
+
732
+ STATIC_ASSERT(DEFLATE_MIN_MATCH_LEN == 3);
733
+ *dst++ = *src++;
734
+ *dst++ = *src++;
735
+ do {
736
+ *dst++ = *src++;
737
+ } while (dst < out_next);
738
+ }
739
+
740
+ block_done:
741
+ /* Finished decoding a block */
742
+
743
+ if (!is_final_block)
744
+ goto next_block;
745
+
746
+ /* That was the last block. */
747
+
748
+ bitsleft = (u8)bitsleft;
749
+
750
+ /*
751
+ * If any of the implicit appended zero bytes were consumed (not just
752
+ * refilled) before hitting end of stream, then the data is bad.
753
+ */
754
+ SAFETY_CHECK(overread_count <= (bitsleft >> 3));
755
+
756
+ /* Optionally return the actual number of bytes consumed. */
757
+ if (actual_in_nbytes_ret) {
758
+ /* Don't count bytes that were refilled but not consumed. */
759
+ in_next -= (bitsleft >> 3) - overread_count;
760
+
761
+ *actual_in_nbytes_ret = in_next - (u8 *)in;
762
+ }
763
+
764
+ /* Optionally return the actual number of bytes written. */
765
+ if (actual_out_nbytes_ret) {
766
+ *actual_out_nbytes_ret = out_next - (u8 *)out;
767
+ } else {
768
+ if (out_next != out_end)
769
+ return LIBDEFLATE_SHORT_OUTPUT;
770
+ }
771
+ return LIBDEFLATE_SUCCESS;
772
+ }
773
+
774
+ #undef FUNCNAME
775
+ #undef ATTRIBUTES
776
+ #undef EXTRACT_VARBITS
777
+ #undef EXTRACT_VARBITS8