snappy 0.0.17 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. checksums.yaml +5 -5
  2. data/.dockerignore +2 -0
  3. data/.github/workflows/main.yml +34 -0
  4. data/.github/workflows/publish.yml +34 -0
  5. data/.gitignore +2 -1
  6. data/.gitmodules +1 -1
  7. data/Dockerfile +13 -0
  8. data/Gemfile +4 -0
  9. data/README.md +45 -5
  10. data/Rakefile +32 -29
  11. data/ext/api.c +6 -1
  12. data/ext/extconf.rb +31 -22
  13. data/lib/snappy/hadoop/reader.rb +62 -0
  14. data/lib/snappy/hadoop/writer.rb +51 -0
  15. data/lib/snappy/hadoop.rb +22 -0
  16. data/lib/snappy/reader.rb +14 -10
  17. data/lib/snappy/shim.rb +1 -1
  18. data/lib/snappy/version.rb +1 -1
  19. data/lib/snappy.rb +5 -4
  20. data/snappy.gemspec +14 -13
  21. data/test/hadoop/snappy_hadoop_reader_test.rb +115 -0
  22. data/test/hadoop/snappy_hadoop_writer_test.rb +48 -0
  23. data/test/snappy_hadoop_test.rb +26 -0
  24. data/test/snappy_reader_test.rb +148 -0
  25. data/test/snappy_test.rb +95 -0
  26. data/test/snappy_writer_test.rb +55 -0
  27. data/test/test_helper.rb +7 -0
  28. data/test.sh +3 -0
  29. data/vendor/snappy/CMakeLists.txt +420 -0
  30. data/vendor/snappy/CONTRIBUTING.md +31 -0
  31. data/vendor/snappy/NEWS +52 -0
  32. data/vendor/snappy/{README → README.md} +75 -49
  33. data/vendor/snappy/cmake/SnappyConfig.cmake.in +33 -0
  34. data/vendor/snappy/cmake/config.h.in +66 -0
  35. data/vendor/snappy/docs/README.md +72 -0
  36. data/vendor/snappy/snappy-internal.h +200 -32
  37. data/vendor/snappy/snappy-sinksource.cc +26 -9
  38. data/vendor/snappy/snappy-sinksource.h +11 -11
  39. data/vendor/snappy/snappy-stubs-internal.cc +1 -1
  40. data/vendor/snappy/snappy-stubs-internal.h +299 -302
  41. data/vendor/snappy/snappy-stubs-public.h.in +10 -47
  42. data/vendor/snappy/snappy-test.cc +94 -200
  43. data/vendor/snappy/snappy-test.h +101 -358
  44. data/vendor/snappy/snappy.cc +1437 -474
  45. data/vendor/snappy/snappy.h +31 -12
  46. data/vendor/snappy/snappy_benchmark.cc +378 -0
  47. data/vendor/snappy/snappy_compress_fuzzer.cc +60 -0
  48. data/vendor/snappy/snappy_test_data.cc +57 -0
  49. data/vendor/snappy/snappy_test_data.h +68 -0
  50. data/vendor/snappy/snappy_test_tool.cc +471 -0
  51. data/vendor/snappy/snappy_uncompress_fuzzer.cc +58 -0
  52. data/vendor/snappy/snappy_unittest.cc +271 -792
  53. metadata +42 -92
  54. data/.travis.yml +0 -26
  55. data/smoke.sh +0 -8
  56. data/test/test-snappy-reader.rb +0 -129
  57. data/test/test-snappy-writer.rb +0 -55
  58. data/test/test-snappy.rb +0 -58
  59. data/vendor/snappy/ChangeLog +0 -2468
  60. data/vendor/snappy/INSTALL +0 -370
  61. data/vendor/snappy/Makefile +0 -982
  62. data/vendor/snappy/Makefile.am +0 -26
  63. data/vendor/snappy/Makefile.in +0 -982
  64. data/vendor/snappy/aclocal.m4 +0 -9738
  65. data/vendor/snappy/autogen.sh +0 -12
  66. data/vendor/snappy/autom4te.cache/output.0 +0 -18856
  67. data/vendor/snappy/autom4te.cache/output.1 +0 -18852
  68. data/vendor/snappy/autom4te.cache/requests +0 -297
  69. data/vendor/snappy/autom4te.cache/traces.0 +0 -2689
  70. data/vendor/snappy/autom4te.cache/traces.1 +0 -714
  71. data/vendor/snappy/config.guess +0 -1530
  72. data/vendor/snappy/config.h +0 -135
  73. data/vendor/snappy/config.h.in +0 -134
  74. data/vendor/snappy/config.log +0 -1640
  75. data/vendor/snappy/config.status +0 -2318
  76. data/vendor/snappy/config.sub +0 -1773
  77. data/vendor/snappy/configure +0 -18852
  78. data/vendor/snappy/configure.ac +0 -134
  79. data/vendor/snappy/depcomp +0 -688
  80. data/vendor/snappy/install-sh +0 -527
  81. data/vendor/snappy/libtool +0 -10246
  82. data/vendor/snappy/ltmain.sh +0 -9661
  83. data/vendor/snappy/m4/gtest.m4 +0 -74
  84. data/vendor/snappy/m4/libtool.m4 +0 -8001
  85. data/vendor/snappy/m4/ltoptions.m4 +0 -384
  86. data/vendor/snappy/m4/ltsugar.m4 +0 -123
  87. data/vendor/snappy/m4/ltversion.m4 +0 -23
  88. data/vendor/snappy/m4/lt~obsolete.m4 +0 -98
  89. data/vendor/snappy/missing +0 -331
  90. data/vendor/snappy/snappy-stubs-public.h +0 -100
  91. data/vendor/snappy/snappy.pc +0 -10
  92. data/vendor/snappy/snappy.pc.in +0 -10
  93. data/vendor/snappy/stamp-h1 +0 -1
@@ -26,43 +26,163 @@
26
26
  // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
27
  // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
28
 
29
- #include "snappy.h"
30
29
  #include "snappy-internal.h"
31
30
  #include "snappy-sinksource.h"
31
+ #include "snappy.h"
32
+ #if !defined(SNAPPY_HAVE_BMI2)
33
+ // __BMI2__ is defined by GCC and Clang. Visual Studio doesn't target BMI2
34
+ // specifically, but it does define __AVX2__ when AVX2 support is available.
35
+ // Fortunately, AVX2 was introduced in Haswell, just like BMI2.
36
+ //
37
+ // BMI2 is not defined as a subset of AVX2 (unlike SSSE3 and AVX above). So,
38
+ // GCC and Clang can build code with AVX2 enabled but BMI2 disabled, in which
39
+ // case issuing BMI2 instructions results in a compiler error.
40
+ #if defined(__BMI2__) || (defined(_MSC_VER) && defined(__AVX2__))
41
+ #define SNAPPY_HAVE_BMI2 1
42
+ #else
43
+ #define SNAPPY_HAVE_BMI2 0
44
+ #endif
45
+ #endif // !defined(SNAPPY_HAVE_BMI2)
46
+
47
+ #if !defined(SNAPPY_HAVE_X86_CRC32)
48
+ #if defined(__SSE4_2__)
49
+ #define SNAPPY_HAVE_X86_CRC32 1
50
+ #else
51
+ #define SNAPPY_HAVE_X86_CRC32 0
52
+ #endif
53
+ #endif // !defined(SNAPPY_HAVE_X86_CRC32)
54
+
55
+ #if !defined(SNAPPY_HAVE_NEON_CRC32)
56
+ #if SNAPPY_HAVE_NEON && defined(__ARM_FEATURE_CRC32)
57
+ #define SNAPPY_HAVE_NEON_CRC32 1
58
+ #else
59
+ #define SNAPPY_HAVE_NEON_CRC32 0
60
+ #endif
61
+ #endif // !defined(SNAPPY_HAVE_NEON_CRC32)
62
+
63
+ #if SNAPPY_HAVE_BMI2 || SNAPPY_HAVE_X86_CRC32
64
+ // Please do not replace with <x86intrin.h>. or with headers that assume more
65
+ // advanced SSE versions without checking with all the OWNERS.
66
+ #include <immintrin.h>
67
+ #elif SNAPPY_HAVE_NEON_CRC32
68
+ #include <arm_acle.h>
69
+ #endif
32
70
 
33
- #if defined(__x86_64__) || defined(_M_X64)
34
- #include <emmintrin.h>
71
+ #if defined(__GNUC__)
72
+ #define SNAPPY_PREFETCH(ptr) __builtin_prefetch(ptr, 0, 3)
73
+ #else
74
+ #define SNAPPY_PREFETCH(ptr) (void)(ptr)
35
75
  #endif
36
- #include <stdio.h>
37
76
 
38
77
  #include <algorithm>
78
+ #include <array>
79
+ #include <cstddef>
80
+ #include <cstdint>
81
+ #include <cstdio>
82
+ #include <cstring>
39
83
  #include <string>
84
+ #include <utility>
40
85
  #include <vector>
41
86
 
42
-
43
87
  namespace snappy {
44
88
 
89
+ namespace {
90
+
91
+ // The amount of slop bytes writers are using for unconditional copies.
92
+ constexpr int kSlopBytes = 64;
93
+
94
+ using internal::char_table;
45
95
  using internal::COPY_1_BYTE_OFFSET;
46
96
  using internal::COPY_2_BYTE_OFFSET;
47
- using internal::LITERAL;
48
- using internal::char_table;
97
+ using internal::COPY_4_BYTE_OFFSET;
49
98
  using internal::kMaximumTagLength;
50
- using internal::wordmask;
51
-
52
- // Any hash function will produce a valid compressed bitstream, but a good
53
- // hash function reduces the number of collisions and thus yields better
54
- // compression for compressible input, and more speed for incompressible
55
- // input. Of course, it doesn't hurt if the hash function is reasonably fast
56
- // either, as it gets called a lot.
57
- static inline uint32 HashBytes(uint32 bytes, int shift) {
58
- uint32 kMul = 0x1e35a7bd;
59
- return (bytes * kMul) >> shift;
99
+ using internal::LITERAL;
100
+ #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
101
+ using internal::V128;
102
+ using internal::V128_Load;
103
+ using internal::V128_LoadU;
104
+ using internal::V128_Shuffle;
105
+ using internal::V128_StoreU;
106
+ using internal::V128_DupChar;
107
+ #endif
108
+
109
+ // We translate the information encoded in a tag through a lookup table to a
110
+ // format that requires fewer instructions to decode. Effectively we store
111
+ // the length minus the tag part of the offset. The lowest significant byte
112
+ // thus stores the length. While total length - offset is given by
113
+ // entry - ExtractOffset(type). The nice thing is that the subtraction
114
+ // immediately sets the flags for the necessary check that offset >= length.
115
+ // This folds the cmp with sub. We engineer the long literals and copy-4 to
116
+ // always fail this check, so their presence doesn't affect the fast path.
117
+ // To prevent literals from triggering the guard against offset < length (offset
118
+ // does not apply to literals) the table is giving them a spurious offset of
119
+ // 256.
120
+ inline constexpr int16_t MakeEntry(int16_t len, int16_t offset) {
121
+ return len - (offset << 8);
122
+ }
123
+
124
+ inline constexpr int16_t LengthMinusOffset(int data, int type) {
125
+ return type == 3 ? 0xFF // copy-4 (or type == 3)
126
+ : type == 2 ? MakeEntry(data + 1, 0) // copy-2
127
+ : type == 1 ? MakeEntry((data & 7) + 4, data >> 3) // copy-1
128
+ : data < 60 ? MakeEntry(data + 1, 1) // note spurious offset.
129
+ : 0xFF; // long literal
130
+ }
131
+
132
+ inline constexpr int16_t LengthMinusOffset(uint8_t tag) {
133
+ return LengthMinusOffset(tag >> 2, tag & 3);
60
134
  }
61
- static inline uint32 Hash(const char* p, int shift) {
62
- return HashBytes(UNALIGNED_LOAD32(p), shift);
135
+
136
+ template <size_t... Ints>
137
+ struct index_sequence {};
138
+
139
+ template <std::size_t N, size_t... Is>
140
+ struct make_index_sequence : make_index_sequence<N - 1, N - 1, Is...> {};
141
+
142
+ template <size_t... Is>
143
+ struct make_index_sequence<0, Is...> : index_sequence<Is...> {};
144
+
145
+ template <size_t... seq>
146
+ constexpr std::array<int16_t, 256> MakeTable(index_sequence<seq...>) {
147
+ return std::array<int16_t, 256>{LengthMinusOffset(seq)...};
63
148
  }
64
149
 
65
- size_t MaxCompressedLength(size_t source_len) {
150
+ alignas(64) const std::array<int16_t, 256> kLengthMinusOffset =
151
+ MakeTable(make_index_sequence<256>{});
152
+
153
+ // Given a table of uint16_t whose size is mask / 2 + 1, return a pointer to the
154
+ // relevant entry, if any, for the given bytes. Any hash function will do,
155
+ // but a good hash function reduces the number of collisions and thus yields
156
+ // better compression for compressible input.
157
+ //
158
+ // REQUIRES: mask is 2 * (table_size - 1), and table_size is a power of two.
159
+ inline uint16_t* TableEntry(uint16_t* table, uint32_t bytes, uint32_t mask) {
160
+ // Our choice is quicker-and-dirtier than the typical hash function;
161
+ // empirically, that seems beneficial. The upper bits of kMagic * bytes are a
162
+ // higher-quality hash than the lower bits, so when using kMagic * bytes we
163
+ // also shift right to get a higher-quality end result. There's no similar
164
+ // issue with a CRC because all of the output bits of a CRC are equally good
165
+ // "hashes." So, a CPU instruction for CRC, if available, tends to be a good
166
+ // choice.
167
+ #if SNAPPY_HAVE_NEON_CRC32
168
+ // We use mask as the second arg to the CRC function, as it's about to
169
+ // be used anyway; it'd be equally correct to use 0 or some constant.
170
+ // Mathematically, _mm_crc32_u32 (or similar) is a function of the
171
+ // xor of its arguments.
172
+ const uint32_t hash = __crc32cw(bytes, mask);
173
+ #elif SNAPPY_HAVE_X86_CRC32
174
+ const uint32_t hash = _mm_crc32_u32(bytes, mask);
175
+ #else
176
+ constexpr uint32_t kMagic = 0x1e35a7bd;
177
+ const uint32_t hash = (kMagic * bytes) >> (31 - kMaxHashTableBits);
178
+ #endif
179
+ return reinterpret_cast<uint16_t*>(reinterpret_cast<uintptr_t>(table) +
180
+ (hash & mask));
181
+ }
182
+
183
+ } // namespace
184
+
185
+ size_t MaxCompressedLength(size_t source_bytes) {
66
186
  // Compressed data can be defined as:
67
187
  // compressed := item* literal*
68
188
  // item := literal* copy
@@ -83,24 +203,34 @@ size_t MaxCompressedLength(size_t source_len) {
83
203
  // I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
84
204
  //
85
205
  // This last factor dominates the blowup, so the final estimate is:
86
- return 32 + source_len + source_len/6;
206
+ return 32 + source_bytes + source_bytes / 6;
87
207
  }
88
208
 
89
209
  namespace {
90
210
 
91
211
  void UnalignedCopy64(const void* src, void* dst) {
92
- memcpy(dst, src, 8);
212
+ char tmp[8];
213
+ std::memcpy(tmp, src, 8);
214
+ std::memcpy(dst, tmp, 8);
93
215
  }
94
216
 
95
217
  void UnalignedCopy128(const void* src, void* dst) {
96
- // TODO(alkis): Remove this when we upgrade to a recent compiler that emits
97
- // SSE2 moves for memcpy(dst, src, 16).
98
- #ifdef __SSE2__
99
- __m128i x = _mm_loadu_si128(static_cast<const __m128i*>(src));
100
- _mm_storeu_si128(static_cast<__m128i*>(dst), x);
101
- #else
102
- memcpy(dst, src, 16);
103
- #endif
218
+ // std::memcpy() gets vectorized when the appropriate compiler options are
219
+ // used. For example, x86 compilers targeting SSE2+ will optimize to an SSE2
220
+ // load and store.
221
+ char tmp[16];
222
+ std::memcpy(tmp, src, 16);
223
+ std::memcpy(dst, tmp, 16);
224
+ }
225
+
226
+ template <bool use_16bytes_chunk>
227
+ inline void ConditionalUnalignedCopy128(const char* src, char* dst) {
228
+ if (use_16bytes_chunk) {
229
+ UnalignedCopy128(src, dst);
230
+ } else {
231
+ UnalignedCopy64(src, dst);
232
+ UnalignedCopy64(src + 8, dst + 8);
233
+ }
104
234
  }
105
235
 
106
236
  // Copy [src, src+(op_limit-op)) to [op, (op_limit-op)) a byte at a time. Used
@@ -112,30 +242,195 @@ void UnalignedCopy128(const void* src, void* dst) {
112
242
  // After IncrementalCopySlow(src, op, op_limit), the result will have eleven
113
243
  // copies of "ab"
114
244
  // ababababababababababab
115
- // Note that this does not match the semantics of either memcpy() or memmove().
245
+ // Note that this does not match the semantics of either std::memcpy() or
246
+ // std::memmove().
116
247
  inline char* IncrementalCopySlow(const char* src, char* op,
117
248
  char* const op_limit) {
249
+ // TODO: Remove pragma when LLVM is aware this
250
+ // function is only called in cold regions and when cold regions don't get
251
+ // vectorized or unrolled.
252
+ #ifdef __clang__
253
+ #pragma clang loop unroll(disable)
254
+ #endif
118
255
  while (op < op_limit) {
119
256
  *op++ = *src++;
120
257
  }
121
258
  return op_limit;
122
259
  }
123
260
 
124
- // Copy [src, src+(op_limit-op)) to [op, (op_limit-op)) but faster than
261
+ #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
262
+
263
+ // Computes the bytes for shuffle control mask (please read comments on
264
+ // 'pattern_generation_masks' as well) for the given index_offset and
265
+ // pattern_size. For example, when the 'offset' is 6, it will generate a
266
+ // repeating pattern of size 6. So, the first 16 byte indexes will correspond to
267
+ // the pattern-bytes {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3} and the
268
+ // next 16 byte indexes will correspond to the pattern-bytes {4, 5, 0, 1, 2, 3,
269
+ // 4, 5, 0, 1, 2, 3, 4, 5, 0, 1}. These byte index sequences are generated by
270
+ // calling MakePatternMaskBytes(0, 6, index_sequence<16>()) and
271
+ // MakePatternMaskBytes(16, 6, index_sequence<16>()) respectively.
272
+ template <size_t... indexes>
273
+ inline constexpr std::array<char, sizeof...(indexes)> MakePatternMaskBytes(
274
+ int index_offset, int pattern_size, index_sequence<indexes...>) {
275
+ return {static_cast<char>((index_offset + indexes) % pattern_size)...};
276
+ }
277
+
278
+ // Computes the shuffle control mask bytes array for given pattern-sizes and
279
+ // returns an array.
280
+ template <size_t... pattern_sizes_minus_one>
281
+ inline constexpr std::array<std::array<char, sizeof(V128)>,
282
+ sizeof...(pattern_sizes_minus_one)>
283
+ MakePatternMaskBytesTable(int index_offset,
284
+ index_sequence<pattern_sizes_minus_one...>) {
285
+ return {
286
+ MakePatternMaskBytes(index_offset, pattern_sizes_minus_one + 1,
287
+ make_index_sequence</*indexes=*/sizeof(V128)>())...};
288
+ }
289
+
290
+ // This is an array of shuffle control masks that can be used as the source
291
+ // operand for PSHUFB to permute the contents of the destination XMM register
292
+ // into a repeating byte pattern.
293
+ alignas(16) constexpr std::array<std::array<char, sizeof(V128)>,
294
+ 16> pattern_generation_masks =
295
+ MakePatternMaskBytesTable(
296
+ /*index_offset=*/0,
297
+ /*pattern_sizes_minus_one=*/make_index_sequence<16>());
298
+
299
+ // Similar to 'pattern_generation_masks', this table is used to "rotate" the
300
+ // pattern so that we can copy the *next 16 bytes* consistent with the pattern.
301
+ // Basically, pattern_reshuffle_masks is a continuation of
302
+ // pattern_generation_masks. It follows that, pattern_reshuffle_masks is same as
303
+ // pattern_generation_masks for offsets 1, 2, 4, 8 and 16.
304
+ alignas(16) constexpr std::array<std::array<char, sizeof(V128)>,
305
+ 16> pattern_reshuffle_masks =
306
+ MakePatternMaskBytesTable(
307
+ /*index_offset=*/16,
308
+ /*pattern_sizes_minus_one=*/make_index_sequence<16>());
309
+
310
+ SNAPPY_ATTRIBUTE_ALWAYS_INLINE
311
+ static inline V128 LoadPattern(const char* src, const size_t pattern_size) {
312
+ V128 generation_mask = V128_Load(reinterpret_cast<const V128*>(
313
+ pattern_generation_masks[pattern_size - 1].data()));
314
+ // Uninitialized bytes are masked out by the shuffle mask.
315
+ // TODO: remove annotation and macro defs once MSan is fixed.
316
+ SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(src + pattern_size, 16 - pattern_size);
317
+ return V128_Shuffle(V128_LoadU(reinterpret_cast<const V128*>(src)),
318
+ generation_mask);
319
+ }
320
+
321
+ SNAPPY_ATTRIBUTE_ALWAYS_INLINE
322
+ static inline std::pair<V128 /* pattern */, V128 /* reshuffle_mask */>
323
+ LoadPatternAndReshuffleMask(const char* src, const size_t pattern_size) {
324
+ V128 pattern = LoadPattern(src, pattern_size);
325
+
326
+ // This mask will generate the next 16 bytes in-place. Doing so enables us to
327
+ // write data by at most 4 V128_StoreU.
328
+ //
329
+ // For example, suppose pattern is: abcdefabcdefabcd
330
+ // Shuffling with this mask will generate: efabcdefabcdefab
331
+ // Shuffling again will generate: cdefabcdefabcdef
332
+ V128 reshuffle_mask = V128_Load(reinterpret_cast<const V128*>(
333
+ pattern_reshuffle_masks[pattern_size - 1].data()));
334
+ return {pattern, reshuffle_mask};
335
+ }
336
+
337
+ #endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
338
+
339
+ // Fallback for when we need to copy while extending the pattern, for example
340
+ // copying 10 bytes from 3 positions back abc -> abcabcabcabca.
341
+ //
342
+ // REQUIRES: [dst - offset, dst + 64) is a valid address range.
343
+ SNAPPY_ATTRIBUTE_ALWAYS_INLINE
344
+ static inline bool Copy64BytesWithPatternExtension(char* dst, size_t offset) {
345
+ #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
346
+ if (SNAPPY_PREDICT_TRUE(offset <= 16)) {
347
+ switch (offset) {
348
+ case 0:
349
+ return false;
350
+ case 1: {
351
+ // TODO: Ideally we should memset, move back once the
352
+ // codegen issues are fixed.
353
+ V128 pattern = V128_DupChar(dst[-1]);
354
+ for (int i = 0; i < 4; i++) {
355
+ V128_StoreU(reinterpret_cast<V128*>(dst + 16 * i), pattern);
356
+ }
357
+ return true;
358
+ }
359
+ case 2:
360
+ case 4:
361
+ case 8:
362
+ case 16: {
363
+ V128 pattern = LoadPattern(dst - offset, offset);
364
+ for (int i = 0; i < 4; i++) {
365
+ V128_StoreU(reinterpret_cast<V128*>(dst + 16 * i), pattern);
366
+ }
367
+ return true;
368
+ }
369
+ default: {
370
+ auto pattern_and_reshuffle_mask =
371
+ LoadPatternAndReshuffleMask(dst - offset, offset);
372
+ V128 pattern = pattern_and_reshuffle_mask.first;
373
+ V128 reshuffle_mask = pattern_and_reshuffle_mask.second;
374
+ for (int i = 0; i < 4; i++) {
375
+ V128_StoreU(reinterpret_cast<V128*>(dst + 16 * i), pattern);
376
+ pattern = V128_Shuffle(pattern, reshuffle_mask);
377
+ }
378
+ return true;
379
+ }
380
+ }
381
+ }
382
+ #else
383
+ if (SNAPPY_PREDICT_TRUE(offset < 16)) {
384
+ if (SNAPPY_PREDICT_FALSE(offset == 0)) return false;
385
+ // Extend the pattern to the first 16 bytes.
386
+ // The simpler formulation of `dst[i - offset]` induces undefined behavior.
387
+ for (int i = 0; i < 16; i++) dst[i] = (dst - offset)[i];
388
+ // Find a multiple of pattern >= 16.
389
+ static std::array<uint8_t, 16> pattern_sizes = []() {
390
+ std::array<uint8_t, 16> res;
391
+ for (int i = 1; i < 16; i++) res[i] = (16 / i + 1) * i;
392
+ return res;
393
+ }();
394
+ offset = pattern_sizes[offset];
395
+ for (int i = 1; i < 4; i++) {
396
+ std::memcpy(dst + i * 16, dst + i * 16 - offset, 16);
397
+ }
398
+ return true;
399
+ }
400
+ #endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
401
+
402
+ // Very rare.
403
+ for (int i = 0; i < 4; i++) {
404
+ std::memcpy(dst + i * 16, dst + i * 16 - offset, 16);
405
+ }
406
+ return true;
407
+ }
408
+
409
+ // Copy [src, src+(op_limit-op)) to [op, op_limit) but faster than
125
410
  // IncrementalCopySlow. buf_limit is the address past the end of the writable
126
411
  // region of the buffer.
127
412
  inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
128
413
  char* const buf_limit) {
414
+ #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
415
+ constexpr int big_pattern_size_lower_bound = 16;
416
+ #else
417
+ constexpr int big_pattern_size_lower_bound = 8;
418
+ #endif
419
+
129
420
  // Terminology:
130
421
  //
131
422
  // slop = buf_limit - op
132
423
  // pat = op - src
133
- // len = limit - op
424
+ // len = op_limit - op
134
425
  assert(src < op);
426
+ assert(op < op_limit);
135
427
  assert(op_limit <= buf_limit);
136
- // NOTE: The compressor always emits 4 <= len <= 64. It is ok to assume that
137
- // to optimize this function but we have to also handle these cases in case
138
- // the input does not satisfy these conditions.
428
+ // NOTE: The copy tags use 3 or 6 bits to store the copy length, so len <= 64.
429
+ assert(op_limit - op <= 64);
430
+ // NOTE: In practice the compressor always emits len >= 4, so it is ok to
431
+ // assume that to optimize this function, but this is not guaranteed by the
432
+ // compression format, so we have to also handle len < 4 in case the input
433
+ // does not satisfy these conditions.
139
434
 
140
435
  size_t pattern_size = op - src;
141
436
  // The cases are split into different branches to allow the branch predictor,
@@ -159,47 +454,141 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
159
454
  // input. In general if we always predict len <= 16 it would be an ok
160
455
  // prediction.
161
456
  //
162
- // In order to be fast we want a pattern >= 8 bytes and an unrolled loop
163
- // copying 2x 8 bytes at a time.
164
-
165
- // Handle the uncommon case where pattern is less than 8 bytes.
166
- if (PREDICT_FALSE(pattern_size < 8)) {
167
- // Expand pattern to at least 8 bytes. The worse case scenario in terms of
168
- // buffer usage is when the pattern is size 3. ^ is the original position
169
- // of op. x are irrelevant bytes copied by the last UnalignedCopy64.
457
+ // In order to be fast we want a pattern >= 16 bytes (or 8 bytes in non-SSE)
458
+ // and an unrolled loop copying 1x 16 bytes (or 2x 8 bytes in non-SSE) at a
459
+ // time.
460
+
461
+ // Handle the uncommon case where pattern is less than 16 (or 8 in non-SSE)
462
+ // bytes.
463
+ if (pattern_size < big_pattern_size_lower_bound) {
464
+ #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
465
+ // Load the first eight bytes into an 128-bit XMM register, then use PSHUFB
466
+ // to permute the register's contents in-place into a repeating sequence of
467
+ // the first "pattern_size" bytes.
468
+ // For example, suppose:
469
+ // src == "abc"
470
+ // op == op + 3
471
+ // After V128_Shuffle(), "pattern" will have five copies of "abc"
472
+ // followed by one byte of slop: abcabcabcabcabca.
170
473
  //
171
- // abc
172
- // abcabcxxxxx
173
- // abcabcabcabcxxxxx
174
- // ^
175
- // The last x is 14 bytes after ^.
176
- if (PREDICT_TRUE(op <= buf_limit - 14)) {
474
+ // The non-SSE fallback implementation suffers from store-forwarding stalls
475
+ // because its loads and stores partly overlap. By expanding the pattern
476
+ // in-place, we avoid the penalty.
477
+
478
+ // Typically, the op_limit is the gating factor so try to simplify the loop
479
+ // based on that.
480
+ if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 15)) {
481
+ auto pattern_and_reshuffle_mask =
482
+ LoadPatternAndReshuffleMask(src, pattern_size);
483
+ V128 pattern = pattern_and_reshuffle_mask.first;
484
+ V128 reshuffle_mask = pattern_and_reshuffle_mask.second;
485
+
486
+ // There is at least one, and at most four 16-byte blocks. Writing four
487
+ // conditionals instead of a loop allows FDO to layout the code with
488
+ // respect to the actual probabilities of each length.
489
+ // TODO: Replace with loop with trip count hint.
490
+ V128_StoreU(reinterpret_cast<V128*>(op), pattern);
491
+
492
+ if (op + 16 < op_limit) {
493
+ pattern = V128_Shuffle(pattern, reshuffle_mask);
494
+ V128_StoreU(reinterpret_cast<V128*>(op + 16), pattern);
495
+ }
496
+ if (op + 32 < op_limit) {
497
+ pattern = V128_Shuffle(pattern, reshuffle_mask);
498
+ V128_StoreU(reinterpret_cast<V128*>(op + 32), pattern);
499
+ }
500
+ if (op + 48 < op_limit) {
501
+ pattern = V128_Shuffle(pattern, reshuffle_mask);
502
+ V128_StoreU(reinterpret_cast<V128*>(op + 48), pattern);
503
+ }
504
+ return op_limit;
505
+ }
506
+ char* const op_end = buf_limit - 15;
507
+ if (SNAPPY_PREDICT_TRUE(op < op_end)) {
508
+ auto pattern_and_reshuffle_mask =
509
+ LoadPatternAndReshuffleMask(src, pattern_size);
510
+ V128 pattern = pattern_and_reshuffle_mask.first;
511
+ V128 reshuffle_mask = pattern_and_reshuffle_mask.second;
512
+
513
+ // This code path is relatively cold however so we save code size
514
+ // by avoiding unrolling and vectorizing.
515
+ //
516
+ // TODO: Remove pragma when when cold regions don't get
517
+ // vectorized or unrolled.
518
+ #ifdef __clang__
519
+ #pragma clang loop unroll(disable)
520
+ #endif
521
+ do {
522
+ V128_StoreU(reinterpret_cast<V128*>(op), pattern);
523
+ pattern = V128_Shuffle(pattern, reshuffle_mask);
524
+ op += 16;
525
+ } while (SNAPPY_PREDICT_TRUE(op < op_end));
526
+ }
527
+ return IncrementalCopySlow(op - pattern_size, op, op_limit);
528
+ #else // !SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
529
+ // If plenty of buffer space remains, expand the pattern to at least 8
530
+ // bytes. The way the following loop is written, we need 8 bytes of buffer
531
+ // space if pattern_size >= 4, 11 bytes if pattern_size is 1 or 3, and 10
532
+ // bytes if pattern_size is 2. Precisely encoding that is probably not
533
+ // worthwhile; instead, invoke the slow path if we cannot write 11 bytes
534
+ // (because 11 are required in the worst case).
535
+ if (SNAPPY_PREDICT_TRUE(op <= buf_limit - 11)) {
177
536
  while (pattern_size < 8) {
178
537
  UnalignedCopy64(src, op);
179
538
  op += pattern_size;
180
539
  pattern_size *= 2;
181
540
  }
182
- if (PREDICT_TRUE(op >= op_limit)) return op_limit;
541
+ if (SNAPPY_PREDICT_TRUE(op >= op_limit)) return op_limit;
183
542
  } else {
184
543
  return IncrementalCopySlow(src, op, op_limit);
185
544
  }
545
+ #endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
186
546
  }
187
- assert(pattern_size >= 8);
547
+ assert(pattern_size >= big_pattern_size_lower_bound);
548
+ constexpr bool use_16bytes_chunk = big_pattern_size_lower_bound == 16;
188
549
 
189
- // Copy 2x 8 bytes at a time. Because op - src can be < 16, a single
190
- // UnalignedCopy128 might overwrite data in op. UnalignedCopy64 is safe
191
- // because expanding the pattern to at least 8 bytes guarantees that
192
- // op - src >= 8.
193
- while (op <= buf_limit - 16) {
194
- UnalignedCopy64(src, op);
195
- UnalignedCopy64(src + 8, op + 8);
196
- src += 16;
197
- op += 16;
198
- if (PREDICT_TRUE(op >= op_limit)) return op_limit;
550
+ // Copy 1x 16 bytes (or 2x 8 bytes in non-SSE) at a time. Because op - src can
551
+ // be < 16 in non-SSE, a single UnalignedCopy128 might overwrite data in op.
552
+ // UnalignedCopy64 is safe because expanding the pattern to at least 8 bytes
553
+ // guarantees that op - src >= 8.
554
+ //
555
+ // Typically, the op_limit is the gating factor so try to simplify the loop
556
+ // based on that.
557
+ if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 15)) {
558
+ // There is at least one, and at most four 16-byte blocks. Writing four
559
+ // conditionals instead of a loop allows FDO to layout the code with respect
560
+ // to the actual probabilities of each length.
561
+ // TODO: Replace with loop with trip count hint.
562
+ ConditionalUnalignedCopy128<use_16bytes_chunk>(src, op);
563
+ if (op + 16 < op_limit) {
564
+ ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 16, op + 16);
565
+ }
566
+ if (op + 32 < op_limit) {
567
+ ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 32, op + 32);
568
+ }
569
+ if (op + 48 < op_limit) {
570
+ ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 48, op + 48);
571
+ }
572
+ return op_limit;
199
573
  }
574
+
575
+ // Fall back to doing as much as we can with the available slop in the
576
+ // buffer. This code path is relatively cold however so we save code size by
577
+ // avoiding unrolling and vectorizing.
578
+ //
579
+ // TODO: Remove pragma when when cold regions don't get vectorized
580
+ // or unrolled.
581
+ #ifdef __clang__
582
+ #pragma clang loop unroll(disable)
583
+ #endif
584
+ for (char* op_end = buf_limit - 16; op < op_end; op += 16, src += 16) {
585
+ ConditionalUnalignedCopy128<use_16bytes_chunk>(src, op);
586
+ }
587
+ if (op >= op_limit) return op_limit;
588
+
200
589
  // We only take this branch if we didn't have enough slop and we can do a
201
590
  // single 8 byte copy.
202
- if (PREDICT_FALSE(op <= buf_limit - 8)) {
591
+ if (SNAPPY_PREDICT_FALSE(op <= buf_limit - 8)) {
203
592
  UnalignedCopy64(src, op);
204
593
  src += 8;
205
594
  op += 8;
@@ -209,12 +598,10 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
209
598
 
210
599
  } // namespace
211
600
 
212
- static inline char* EmitLiteral(char* op,
213
- const char* literal,
214
- int len,
215
- bool allow_fast_path) {
601
+ template <bool allow_fast_path>
602
+ static inline char* EmitLiteral(char* op, const char* literal, int len) {
216
603
  // The vast majority of copies are below 16 bytes, for which a
217
- // call to memcpy is overkill. This fast path can sometimes
604
+ // call to std::memcpy() is overkill. This fast path can sometimes
218
605
  // copy up to 15 bytes too much, but that is okay in the
219
606
  // main loop, since we have a bit to go on for both sides:
220
607
  //
@@ -223,7 +610,7 @@ static inline char* EmitLiteral(char* op,
223
610
  // if not, allow_fast_path = false.
224
611
  // - The output will always have 32 spare bytes (see
225
612
  // MaxCompressedLength).
226
- assert(len > 0); // Zero-length literals are disallowed
613
+ assert(len > 0); // Zero-length literals are disallowed
227
614
  int n = len - 1;
228
615
  if (allow_fast_path && len <= 16) {
229
616
  // Fits in tag byte
@@ -237,74 +624,95 @@ static inline char* EmitLiteral(char* op,
237
624
  // Fits in tag byte
238
625
  *op++ = LITERAL | (n << 2);
239
626
  } else {
240
- // Encode in upcoming bytes
241
- char* base = op;
242
- int count = 0;
243
- op++;
244
- while (n > 0) {
245
- *op++ = n & 0xff;
246
- n >>= 8;
247
- count++;
248
- }
627
+ int count = (Bits::Log2Floor(n) >> 3) + 1;
249
628
  assert(count >= 1);
250
629
  assert(count <= 4);
251
- *base = LITERAL | ((59+count) << 2);
630
+ *op++ = LITERAL | ((59 + count) << 2);
631
+ // Encode in upcoming bytes.
632
+ // Write 4 bytes, though we may care about only 1 of them. The output buffer
633
+ // is guaranteed to have at least 3 more spaces left as 'len >= 61' holds
634
+ // here and there is a std::memcpy() of size 'len' below.
635
+ LittleEndian::Store32(op, n);
636
+ op += count;
637
+ }
638
+ // When allow_fast_path is true, we can overwrite up to 16 bytes.
639
+ if (allow_fast_path) {
640
+ char* destination = op;
641
+ const char* source = literal;
642
+ const char* end = destination + len;
643
+ do {
644
+ std::memcpy(destination, source, 16);
645
+ destination += 16;
646
+ source += 16;
647
+ } while (destination < end);
648
+ } else {
649
+ std::memcpy(op, literal, len);
252
650
  }
253
- memcpy(op, literal, len);
254
651
  return op + len;
255
652
  }
256
653
 
257
- static inline char* EmitCopyAtMost64(char* op, size_t offset, size_t len,
258
- bool len_less_than_12) {
654
+ template <bool len_less_than_12>
655
+ static inline char* EmitCopyAtMost64(char* op, size_t offset, size_t len) {
259
656
  assert(len <= 64);
260
657
  assert(len >= 4);
261
658
  assert(offset < 65536);
262
659
  assert(len_less_than_12 == (len < 12));
263
660
 
264
- if (len_less_than_12 && PREDICT_TRUE(offset < 2048)) {
265
- // offset fits in 11 bits. The 3 highest go in the top of the first byte,
266
- // and the rest go in the second byte.
267
- *op++ = COPY_1_BYTE_OFFSET + ((len - 4) << 2) + ((offset >> 3) & 0xe0);
268
- *op++ = offset & 0xff;
661
+ if (len_less_than_12) {
662
+ uint32_t u = (len << 2) + (offset << 8);
663
+ uint32_t copy1 = COPY_1_BYTE_OFFSET - (4 << 2) + ((offset >> 3) & 0xe0);
664
+ uint32_t copy2 = COPY_2_BYTE_OFFSET - (1 << 2);
665
+ // It turns out that offset < 2048 is a difficult to predict branch.
666
+ // `perf record` shows this is the highest percentage of branch misses in
667
+ // benchmarks. This code produces branch free code, the data dependency
668
+ // chain that bottlenecks the throughput is so long that a few extra
669
+ // instructions are completely free (IPC << 6 because of data deps).
670
+ u += offset < 2048 ? copy1 : copy2;
671
+ LittleEndian::Store32(op, u);
672
+ op += offset < 2048 ? 2 : 3;
269
673
  } else {
270
674
  // Write 4 bytes, though we only care about 3 of them. The output buffer
271
675
  // is required to have some slack, so the extra byte won't overrun it.
272
- uint32 u = COPY_2_BYTE_OFFSET + ((len - 1) << 2) + (offset << 8);
676
+ uint32_t u = COPY_2_BYTE_OFFSET + ((len - 1) << 2) + (offset << 8);
273
677
  LittleEndian::Store32(op, u);
274
678
  op += 3;
275
679
  }
276
680
  return op;
277
681
  }
278
682
 
279
- static inline char* EmitCopy(char* op, size_t offset, size_t len,
280
- bool len_less_than_12) {
683
+ template <bool len_less_than_12>
684
+ static inline char* EmitCopy(char* op, size_t offset, size_t len) {
281
685
  assert(len_less_than_12 == (len < 12));
282
686
  if (len_less_than_12) {
283
- return EmitCopyAtMost64(op, offset, len, true);
687
+ return EmitCopyAtMost64</*len_less_than_12=*/true>(op, offset, len);
284
688
  } else {
285
689
  // A special case for len <= 64 might help, but so far measurements suggest
286
690
  // it's in the noise.
287
691
 
288
692
  // Emit 64 byte copies but make sure to keep at least four bytes reserved.
289
- while (PREDICT_FALSE(len >= 68)) {
290
- op = EmitCopyAtMost64(op, offset, 64, false);
693
+ while (SNAPPY_PREDICT_FALSE(len >= 68)) {
694
+ op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, 64);
291
695
  len -= 64;
292
696
  }
293
697
 
294
698
  // One or two copies will now finish the job.
295
699
  if (len > 64) {
296
- op = EmitCopyAtMost64(op, offset, 60, false);
700
+ op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, 60);
297
701
  len -= 60;
298
702
  }
299
703
 
300
704
  // Emit remainder.
301
- op = EmitCopyAtMost64(op, offset, len, len < 12);
705
+ if (len < 12) {
706
+ op = EmitCopyAtMost64</*len_less_than_12=*/true>(op, offset, len);
707
+ } else {
708
+ op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, len);
709
+ }
302
710
  return op;
303
711
  }
304
712
  }
305
713
 
306
714
  bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
307
- uint32 v = 0;
715
+ uint32_t v = 0;
308
716
  const char* limit = start + n;
309
717
  if (Varint::Parse32WithLimit(start, limit, &v) != NULL) {
310
718
  *result = v;
@@ -314,76 +722,47 @@ bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
314
722
  }
315
723
  }
316
724
 
317
- namespace internal {
318
- uint16* WorkingMemory::GetHashTable(size_t input_size, int* table_size) {
319
- // Use smaller hash table when input.size() is smaller, since we
320
- // fill the table, incurring O(hash table size) overhead for
321
- // compression, and if the input is short, we won't need that
322
- // many hash table entries anyway.
323
- assert(kMaxHashTableSize >= 256);
324
- size_t htsize = 256;
325
- while (htsize < kMaxHashTableSize && htsize < input_size) {
326
- htsize <<= 1;
327
- }
328
-
329
- uint16* table;
330
- if (htsize <= ARRAYSIZE(small_table_)) {
331
- table = small_table_;
332
- } else {
333
- if (large_table_ == NULL) {
334
- large_table_ = new uint16[kMaxHashTableSize];
335
- }
336
- table = large_table_;
725
+ namespace {
726
+ uint32_t CalculateTableSize(uint32_t input_size) {
727
+ static_assert(
728
+ kMaxHashTableSize >= kMinHashTableSize,
729
+ "kMaxHashTableSize should be greater or equal to kMinHashTableSize.");
730
+ if (input_size > kMaxHashTableSize) {
731
+ return kMaxHashTableSize;
337
732
  }
338
-
339
- *table_size = htsize;
340
- memset(table, 0, htsize * sizeof(*table));
341
- return table;
342
- }
343
- } // end namespace internal
344
-
345
- // For 0 <= offset <= 4, GetUint32AtOffset(GetEightBytesAt(p), offset) will
346
- // equal UNALIGNED_LOAD32(p + offset). Motivation: On x86-64 hardware we have
347
- // empirically found that overlapping loads such as
348
- // UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
349
- // are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32.
350
- //
351
- // We have different versions for 64- and 32-bit; ideally we would avoid the
352
- // two functions and just inline the UNALIGNED_LOAD64 call into
353
- // GetUint32AtOffset, but GCC (at least not as of 4.6) is seemingly not clever
354
- // enough to avoid loading the value multiple times then. For 64-bit, the load
355
- // is done when GetEightBytesAt() is called, whereas for 32-bit, the load is
356
- // done at GetUint32AtOffset() time.
357
-
358
- #ifdef ARCH_K8
359
-
360
- typedef uint64 EightBytesReference;
361
-
362
- static inline EightBytesReference GetEightBytesAt(const char* ptr) {
363
- return UNALIGNED_LOAD64(ptr);
733
+ if (input_size < kMinHashTableSize) {
734
+ return kMinHashTableSize;
735
+ }
736
+ // This is equivalent to Log2Ceiling(input_size), assuming input_size > 1.
737
+ // 2 << Log2Floor(x - 1) is equivalent to 1 << (1 + Log2Floor(x - 1)).
738
+ return 2u << Bits::Log2Floor(input_size - 1);
364
739
  }
740
+ } // namespace
365
741
 
366
- static inline uint32 GetUint32AtOffset(uint64 v, int offset) {
367
- assert(offset >= 0);
368
- assert(offset <= 4);
369
- return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset);
742
+ namespace internal {
743
+ WorkingMemory::WorkingMemory(size_t input_size) {
744
+ const size_t max_fragment_size = std::min(input_size, kBlockSize);
745
+ const size_t table_size = CalculateTableSize(max_fragment_size);
746
+ size_ = table_size * sizeof(*table_) + max_fragment_size +
747
+ MaxCompressedLength(max_fragment_size);
748
+ mem_ = std::allocator<char>().allocate(size_);
749
+ table_ = reinterpret_cast<uint16_t*>(mem_);
750
+ input_ = mem_ + table_size * sizeof(*table_);
751
+ output_ = input_ + max_fragment_size;
370
752
  }
371
753
 
372
- #else
373
-
374
- typedef const char* EightBytesReference;
375
-
376
- static inline EightBytesReference GetEightBytesAt(const char* ptr) {
377
- return ptr;
754
+ WorkingMemory::~WorkingMemory() {
755
+ std::allocator<char>().deallocate(mem_, size_);
378
756
  }
379
757
 
380
- static inline uint32 GetUint32AtOffset(const char* v, int offset) {
381
- assert(offset >= 0);
382
- assert(offset <= 4);
383
- return UNALIGNED_LOAD32(v + offset);
758
+ uint16_t* WorkingMemory::GetHashTable(size_t fragment_size,
759
+ int* table_size) const {
760
+ const size_t htsize = CalculateTableSize(fragment_size);
761
+ memset(table_, 0, htsize * sizeof(*table_));
762
+ *table_size = htsize;
763
+ return table_;
384
764
  }
385
-
386
- #endif
765
+ } // end namespace internal
387
766
 
388
767
  // Flat array compression that does not emit the "uncompressed length"
389
768
  // prefix. Compresses "input" string to the "*op" buffer.
@@ -397,29 +776,25 @@ static inline uint32 GetUint32AtOffset(const char* v, int offset) {
397
776
  // Returns an "end" pointer into "op" buffer.
398
777
  // "end - op" is the compressed size of "input".
399
778
  namespace internal {
400
- char* CompressFragment(const char* input,
401
- size_t input_size,
402
- char* op,
403
- uint16* table,
404
- const int table_size) {
779
+ char* CompressFragment(const char* input, size_t input_size, char* op,
780
+ uint16_t* table, const int table_size) {
405
781
  // "ip" is the input pointer, and "op" is the output pointer.
406
782
  const char* ip = input;
407
783
  assert(input_size <= kBlockSize);
408
- assert((table_size & (table_size - 1)) == 0); // table must be power of two
409
- const int shift = 32 - Bits::Log2Floor(table_size);
410
- assert(static_cast<int>(kuint32max >> shift) == table_size - 1);
784
+ assert((table_size & (table_size - 1)) == 0); // table must be power of two
785
+ const uint32_t mask = 2 * (table_size - 1);
411
786
  const char* ip_end = input + input_size;
412
787
  const char* base_ip = ip;
413
- // Bytes in [next_emit, ip) will be emitted as literal bytes. Or
414
- // [next_emit, ip_end) after the main loop.
415
- const char* next_emit = ip;
416
788
 
417
789
  const size_t kInputMarginBytes = 15;
418
- if (PREDICT_TRUE(input_size >= kInputMarginBytes)) {
790
+ if (SNAPPY_PREDICT_TRUE(input_size >= kInputMarginBytes)) {
419
791
  const char* ip_limit = input + input_size - kInputMarginBytes;
420
792
 
421
- for (uint32 next_hash = Hash(++ip, shift); ; ) {
422
- assert(next_emit < ip);
793
+ for (uint32_t preload = LittleEndian::Load32(ip + 1);;) {
794
+ // Bytes in [next_emit, ip) will be emitted as literal bytes. Or
795
+ // [next_emit, ip_end) after the main loop.
796
+ const char* next_emit = ip++;
797
+ uint64_t data = LittleEndian::Load64(ip);
423
798
  // The body of this loop calls EmitLiteral once and then EmitCopy one or
424
799
  // more times. (The exception is that when we're close to exhausting
425
800
  // the input we goto emit_remainder.)
@@ -445,34 +820,66 @@ char* CompressFragment(const char* input,
445
820
  // The "skip" variable keeps track of how many bytes there are since the
446
821
  // last match; dividing it by 32 (ie. right-shifting by five) gives the
447
822
  // number of bytes to move ahead for each iteration.
448
- uint32 skip = 32;
823
+ uint32_t skip = 32;
449
824
 
450
- const char* next_ip = ip;
451
825
  const char* candidate;
452
- do {
453
- ip = next_ip;
454
- uint32 hash = next_hash;
455
- assert(hash == Hash(ip, shift));
456
- uint32 bytes_between_hash_lookups = skip >> 5;
826
+ if (ip_limit - ip >= 16) {
827
+ auto delta = ip - base_ip;
828
+ for (int j = 0; j < 4; ++j) {
829
+ for (int k = 0; k < 4; ++k) {
830
+ int i = 4 * j + k;
831
+ // These for-loops are meant to be unrolled. So we can freely
832
+ // special case the first iteration to use the value already
833
+ // loaded in preload.
834
+ uint32_t dword = i == 0 ? preload : static_cast<uint32_t>(data);
835
+ assert(dword == LittleEndian::Load32(ip + i));
836
+ uint16_t* table_entry = TableEntry(table, dword, mask);
837
+ candidate = base_ip + *table_entry;
838
+ assert(candidate >= base_ip);
839
+ assert(candidate < ip + i);
840
+ *table_entry = delta + i;
841
+ if (SNAPPY_PREDICT_FALSE(LittleEndian::Load32(candidate) == dword)) {
842
+ *op = LITERAL | (i << 2);
843
+ UnalignedCopy128(next_emit, op + 1);
844
+ ip += i;
845
+ op = op + i + 2;
846
+ goto emit_match;
847
+ }
848
+ data >>= 8;
849
+ }
850
+ data = LittleEndian::Load64(ip + 4 * j + 4);
851
+ }
852
+ ip += 16;
853
+ skip += 16;
854
+ }
855
+ while (true) {
856
+ assert(static_cast<uint32_t>(data) == LittleEndian::Load32(ip));
857
+ uint16_t* table_entry = TableEntry(table, data, mask);
858
+ uint32_t bytes_between_hash_lookups = skip >> 5;
457
859
  skip += bytes_between_hash_lookups;
458
- next_ip = ip + bytes_between_hash_lookups;
459
- if (PREDICT_FALSE(next_ip > ip_limit)) {
860
+ const char* next_ip = ip + bytes_between_hash_lookups;
861
+ if (SNAPPY_PREDICT_FALSE(next_ip > ip_limit)) {
862
+ ip = next_emit;
460
863
  goto emit_remainder;
461
864
  }
462
- next_hash = Hash(next_ip, shift);
463
- candidate = base_ip + table[hash];
865
+ candidate = base_ip + *table_entry;
464
866
  assert(candidate >= base_ip);
465
867
  assert(candidate < ip);
466
868
 
467
- table[hash] = ip - base_ip;
468
- } while (PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
469
- UNALIGNED_LOAD32(candidate)));
869
+ *table_entry = ip - base_ip;
870
+ if (SNAPPY_PREDICT_FALSE(static_cast<uint32_t>(data) ==
871
+ LittleEndian::Load32(candidate))) {
872
+ break;
873
+ }
874
+ data = LittleEndian::Load32(next_ip);
875
+ ip = next_ip;
876
+ }
470
877
 
471
878
  // Step 2: A 4-byte match has been found. We'll later see if more
472
879
  // than 4 bytes match. But, prior to the match, input
473
880
  // bytes [next_emit, ip) are unmatched. Emit them as "literal bytes."
474
881
  assert(next_emit + 16 <= ip_end);
475
- op = EmitLiteral(op, next_emit, ip - next_emit, true);
882
+ op = EmitLiteral</*allow_fast_path=*/true>(op, next_emit, ip - next_emit);
476
883
 
477
884
  // Step 3: Call EmitCopy, and then see if another EmitCopy could
478
885
  // be our next move. Repeat until we find no match for the
@@ -482,51 +889,72 @@ char* CompressFragment(const char* input,
482
889
  // though we don't yet know how big the literal will be. We handle that
483
890
  // by proceeding to the next iteration of the main loop. We also can exit
484
891
  // this loop via goto if we get close to exhausting the input.
485
- EightBytesReference input_bytes;
486
- uint32 candidate_bytes = 0;
487
-
892
+ emit_match:
488
893
  do {
489
894
  // We have a 4-byte match at ip, and no need to emit any
490
895
  // "literal bytes" prior to ip.
491
896
  const char* base = ip;
492
897
  std::pair<size_t, bool> p =
493
- FindMatchLength(candidate + 4, ip + 4, ip_end);
898
+ FindMatchLength(candidate + 4, ip + 4, ip_end, &data);
494
899
  size_t matched = 4 + p.first;
495
900
  ip += matched;
496
901
  size_t offset = base - candidate;
497
902
  assert(0 == memcmp(base, candidate, matched));
498
- op = EmitCopy(op, offset, matched, p.second);
499
- next_emit = ip;
500
- if (PREDICT_FALSE(ip >= ip_limit)) {
903
+ if (p.second) {
904
+ op = EmitCopy</*len_less_than_12=*/true>(op, offset, matched);
905
+ } else {
906
+ op = EmitCopy</*len_less_than_12=*/false>(op, offset, matched);
907
+ }
908
+ if (SNAPPY_PREDICT_FALSE(ip >= ip_limit)) {
501
909
  goto emit_remainder;
502
910
  }
911
+ // Expect 5 bytes to match
912
+ assert((data & 0xFFFFFFFFFF) ==
913
+ (LittleEndian::Load64(ip) & 0xFFFFFFFFFF));
503
914
  // We are now looking for a 4-byte match again. We read
504
- // table[Hash(ip, shift)] for that. To improve compression,
505
- // we also update table[Hash(ip - 1, shift)] and table[Hash(ip, shift)].
506
- input_bytes = GetEightBytesAt(ip - 1);
507
- uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift);
508
- table[prev_hash] = ip - base_ip - 1;
509
- uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift);
510
- candidate = base_ip + table[cur_hash];
511
- candidate_bytes = UNALIGNED_LOAD32(candidate);
512
- table[cur_hash] = ip - base_ip;
513
- } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes);
514
-
515
- next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift);
516
- ++ip;
915
+ // table[Hash(ip, mask)] for that. To improve compression,
916
+ // we also update table[Hash(ip - 1, mask)] and table[Hash(ip, mask)].
917
+ *TableEntry(table, LittleEndian::Load32(ip - 1), mask) =
918
+ ip - base_ip - 1;
919
+ uint16_t* table_entry = TableEntry(table, data, mask);
920
+ candidate = base_ip + *table_entry;
921
+ *table_entry = ip - base_ip;
922
+ // Measurements on the benchmarks have shown the following probabilities
923
+ // for the loop to exit (ie. avg. number of iterations is reciprocal).
924
+ // BM_Flat/6 txt1 p = 0.3-0.4
925
+ // BM_Flat/7 txt2 p = 0.35
926
+ // BM_Flat/8 txt3 p = 0.3-0.4
927
+ // BM_Flat/9 txt3 p = 0.34-0.4
928
+ // BM_Flat/10 pb p = 0.4
929
+ // BM_Flat/11 gaviota p = 0.1
930
+ // BM_Flat/12 cp p = 0.5
931
+ // BM_Flat/13 c p = 0.3
932
+ } while (static_cast<uint32_t>(data) == LittleEndian::Load32(candidate));
933
+ // Because the least significant 5 bytes matched, we can utilize data
934
+ // for the next iteration.
935
+ preload = data >> 8;
517
936
  }
518
937
  }
519
938
 
520
- emit_remainder:
939
+ emit_remainder:
521
940
  // Emit the remaining bytes as a literal
522
- if (next_emit < ip_end) {
523
- op = EmitLiteral(op, next_emit, ip_end - next_emit, false);
941
+ if (ip < ip_end) {
942
+ op = EmitLiteral</*allow_fast_path=*/false>(op, ip, ip_end - ip);
524
943
  }
525
944
 
526
945
  return op;
527
946
  }
528
947
  } // end namespace internal
529
948
 
949
+ // Called back at avery compression call to trace parameters and sizes.
950
+ static inline void Report(const char *algorithm, size_t compressed_size,
951
+ size_t uncompressed_size) {
952
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
953
+ (void)algorithm;
954
+ (void)compressed_size;
955
+ (void)uncompressed_size;
956
+ }
957
+
530
958
  // Signature of output types needed by decompression code.
531
959
  // The decompression code is templatized on a type that obeys this
532
960
  // signature so that we do not pay virtual function call overhead in
@@ -537,12 +965,28 @@ char* CompressFragment(const char* input,
537
965
  // // Called before decompression
538
966
  // void SetExpectedLength(size_t length);
539
967
  //
968
+ // // For performance a writer may choose to donate the cursor variable to the
969
+ // // decompression function. The decompression will inject it in all its
970
+ // // function calls to the writer. Keeping the important output cursor as a
971
+ // // function local stack variable allows the compiler to keep it in
972
+ // // register, which greatly aids performance by avoiding loads and stores of
973
+ // // this variable in the fast path loop iterations.
974
+ // T GetOutputPtr() const;
975
+ //
976
+ // // At end of decompression the loop donates the ownership of the cursor
977
+ // // variable back to the writer by calling this function.
978
+ // void SetOutputPtr(T op);
979
+ //
540
980
  // // Called after decompression
541
981
  // bool CheckLength() const;
542
982
  //
543
983
  // // Called repeatedly during decompression
544
- // bool Append(const char* ip, size_t length);
545
- // bool AppendFromSelf(uint32 offset, size_t length);
984
+ // // Each function get a pointer to the op (output pointer), that the writer
985
+ // // can use and update. Note it's important that these functions get fully
986
+ // // inlined so that no actual address of the local variable needs to be
987
+ // // taken.
988
+ // bool Append(const char* ip, size_t length, T* op);
989
+ // bool AppendFromSelf(uint32_t offset, size_t length, T* op);
546
990
  //
547
991
  // // The rules for how TryFastAppend differs from Append are somewhat
548
992
  // // convoluted:
@@ -564,19 +1008,315 @@ char* CompressFragment(const char* input,
564
1008
  // // as it is unlikely that one would implement a fast path accepting
565
1009
  // // this much data.
566
1010
  // //
567
- // bool TryFastAppend(const char* ip, size_t available, size_t length);
1011
+ // bool TryFastAppend(const char* ip, size_t available, size_t length, T* op);
568
1012
  // };
569
1013
 
1014
+ static inline uint32_t ExtractLowBytes(const uint32_t& v, int n) {
1015
+ assert(n >= 0);
1016
+ assert(n <= 4);
1017
+ #if SNAPPY_HAVE_BMI2
1018
+ return _bzhi_u32(v, 8 * n);
1019
+ #else
1020
+ // This needs to be wider than uint32_t otherwise `mask << 32` will be
1021
+ // undefined.
1022
+ uint64_t mask = 0xffffffff;
1023
+ return v & ~(mask << (8 * n));
1024
+ #endif
1025
+ }
1026
+
1027
+ static inline bool LeftShiftOverflows(uint8_t value, uint32_t shift) {
1028
+ assert(shift < 32);
1029
+ static const uint8_t masks[] = {
1030
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
1031
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
1032
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
1033
+ 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe};
1034
+ return (value & masks[shift]) != 0;
1035
+ }
1036
+
1037
+ inline bool Copy64BytesWithPatternExtension(ptrdiff_t dst, size_t offset) {
1038
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
1039
+ (void)dst;
1040
+ return offset != 0;
1041
+ }
1042
+
1043
+ // Copies between size bytes and 64 bytes from src to dest. size cannot exceed
1044
+ // 64. More than size bytes, but never exceeding 64, might be copied if doing
1045
+ // so gives better performance. [src, src + size) must not overlap with
1046
+ // [dst, dst + size), but [src, src + 64) may overlap with [dst, dst + 64).
1047
+ void MemCopy64(char* dst, const void* src, size_t size) {
1048
+ // Always copy this many bytes. If that's below size then copy the full 64.
1049
+ constexpr int kShortMemCopy = 32;
1050
+
1051
+ assert(size <= 64);
1052
+ assert(std::less_equal<const void*>()(static_cast<const char*>(src) + size,
1053
+ dst) ||
1054
+ std::less_equal<const void*>()(dst + size, src));
1055
+
1056
+ // We know that src and dst are at least size bytes apart. However, because we
1057
+ // might copy more than size bytes the copy still might overlap past size.
1058
+ // E.g. if src and dst appear consecutively in memory (src + size >= dst).
1059
+ // TODO: Investigate wider copies on other platforms.
1060
+ #if defined(__x86_64__) && defined(__AVX__)
1061
+ assert(kShortMemCopy <= 32);
1062
+ __m256i data = _mm256_lddqu_si256(static_cast<const __m256i *>(src));
1063
+ _mm256_storeu_si256(reinterpret_cast<__m256i *>(dst), data);
1064
+ // Profiling shows that nearly all copies are short.
1065
+ if (SNAPPY_PREDICT_FALSE(size > kShortMemCopy)) {
1066
+ data = _mm256_lddqu_si256(static_cast<const __m256i *>(src) + 1);
1067
+ _mm256_storeu_si256(reinterpret_cast<__m256i *>(dst) + 1, data);
1068
+ }
1069
+ #else
1070
+ std::memmove(dst, src, kShortMemCopy);
1071
+ // Profiling shows that nearly all copies are short.
1072
+ if (SNAPPY_PREDICT_FALSE(size > kShortMemCopy)) {
1073
+ std::memmove(dst + kShortMemCopy,
1074
+ static_cast<const uint8_t*>(src) + kShortMemCopy,
1075
+ 64 - kShortMemCopy);
1076
+ }
1077
+ #endif
1078
+ }
1079
+
1080
+ void MemCopy64(ptrdiff_t dst, const void* src, size_t size) {
1081
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
1082
+ (void)dst;
1083
+ (void)src;
1084
+ (void)size;
1085
+ }
1086
+
1087
+ void ClearDeferred(const void** deferred_src, size_t* deferred_length,
1088
+ uint8_t* safe_source) {
1089
+ *deferred_src = safe_source;
1090
+ *deferred_length = 0;
1091
+ }
1092
+
1093
+ void DeferMemCopy(const void** deferred_src, size_t* deferred_length,
1094
+ const void* src, size_t length) {
1095
+ *deferred_src = src;
1096
+ *deferred_length = length;
1097
+ }
1098
+
1099
+ SNAPPY_ATTRIBUTE_ALWAYS_INLINE
1100
+ inline size_t AdvanceToNextTagARMOptimized(const uint8_t** ip_p, size_t* tag) {
1101
+ const uint8_t*& ip = *ip_p;
1102
+ // This section is crucial for the throughput of the decompression loop.
1103
+ // The latency of an iteration is fundamentally constrained by the
1104
+ // following data chain on ip.
1105
+ // ip -> c = Load(ip) -> delta1 = (c & 3) -> ip += delta1 or delta2
1106
+ // delta2 = ((c >> 2) + 1) ip++
1107
+ // This is different from X86 optimizations because ARM has conditional add
1108
+ // instruction (csinc) and it removes several register moves.
1109
+ const size_t tag_type = *tag & 3;
1110
+ const bool is_literal = (tag_type == 0);
1111
+ if (is_literal) {
1112
+ size_t next_literal_tag = (*tag >> 2) + 1;
1113
+ *tag = ip[next_literal_tag];
1114
+ ip += next_literal_tag + 1;
1115
+ } else {
1116
+ *tag = ip[tag_type];
1117
+ ip += tag_type + 1;
1118
+ }
1119
+ return tag_type;
1120
+ }
1121
+
1122
+ SNAPPY_ATTRIBUTE_ALWAYS_INLINE
1123
+ inline size_t AdvanceToNextTagX86Optimized(const uint8_t** ip_p, size_t* tag) {
1124
+ const uint8_t*& ip = *ip_p;
1125
+ // This section is crucial for the throughput of the decompression loop.
1126
+ // The latency of an iteration is fundamentally constrained by the
1127
+ // following data chain on ip.
1128
+ // ip -> c = Load(ip) -> ip1 = ip + 1 + (c & 3) -> ip = ip1 or ip2
1129
+ // ip2 = ip + 2 + (c >> 2)
1130
+ // This amounts to 8 cycles.
1131
+ // 5 (load) + 1 (c & 3) + 1 (lea ip1, [ip + (c & 3) + 1]) + 1 (cmov)
1132
+ size_t literal_len = *tag >> 2;
1133
+ size_t tag_type = *tag;
1134
+ bool is_literal;
1135
+ #if defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(__x86_64__)
1136
+ // TODO clang misses the fact that the (c & 3) already correctly
1137
+ // sets the zero flag.
1138
+ asm("and $3, %k[tag_type]\n\t"
1139
+ : [tag_type] "+r"(tag_type), "=@ccz"(is_literal)
1140
+ :: "cc");
1141
+ #else
1142
+ tag_type &= 3;
1143
+ is_literal = (tag_type == 0);
1144
+ #endif
1145
+ // TODO
1146
+ // This is code is subtle. Loading the values first and then cmov has less
1147
+ // latency then cmov ip and then load. However clang would move the loads
1148
+ // in an optimization phase, volatile prevents this transformation.
1149
+ // Note that we have enough slop bytes (64) that the loads are always valid.
1150
+ size_t tag_literal =
1151
+ static_cast<const volatile uint8_t*>(ip)[1 + literal_len];
1152
+ size_t tag_copy = static_cast<const volatile uint8_t*>(ip)[tag_type];
1153
+ *tag = is_literal ? tag_literal : tag_copy;
1154
+ const uint8_t* ip_copy = ip + 1 + tag_type;
1155
+ const uint8_t* ip_literal = ip + 2 + literal_len;
1156
+ ip = is_literal ? ip_literal : ip_copy;
1157
+ #if defined(__GNUC__) && defined(__x86_64__)
1158
+ // TODO Clang is "optimizing" zero-extension (a totally free
1159
+ // operation) this means that after the cmov of tag, it emits another movzb
1160
+ // tag, byte(tag). It really matters as it's on the core chain. This dummy
1161
+ // asm, persuades clang to do the zero-extension at the load (it's automatic)
1162
+ // removing the expensive movzb.
1163
+ asm("" ::"r"(tag_copy));
1164
+ #endif
1165
+ return tag_type;
1166
+ }
1167
+
1168
+ // Extract the offset for copy-1 and copy-2 returns 0 for literals or copy-4.
1169
+ inline uint32_t ExtractOffset(uint32_t val, size_t tag_type) {
1170
+ // For x86 non-static storage works better. For ARM static storage is better.
1171
+ // TODO: Once the array is recognized as a register, improve the
1172
+ // readability for x86.
1173
+ #if defined(__x86_64__)
1174
+ constexpr uint64_t kExtractMasksCombined = 0x0000FFFF00FF0000ull;
1175
+ uint16_t result;
1176
+ memcpy(&result,
1177
+ reinterpret_cast<const char*>(&kExtractMasksCombined) + 2 * tag_type,
1178
+ sizeof(result));
1179
+ return val & result;
1180
+ #elif defined(__aarch64__)
1181
+ constexpr uint64_t kExtractMasksCombined = 0x0000FFFF00FF0000ull;
1182
+ return val & static_cast<uint32_t>(
1183
+ (kExtractMasksCombined >> (tag_type * 16)) & 0xFFFF);
1184
+ #else
1185
+ static constexpr uint32_t kExtractMasks[4] = {0, 0xFF, 0xFFFF, 0};
1186
+ return val & kExtractMasks[tag_type];
1187
+ #endif
1188
+ };
1189
+
1190
+ // Core decompression loop, when there is enough data available.
1191
+ // Decompresses the input buffer [ip, ip_limit) into the output buffer
1192
+ // [op, op_limit_min_slop). Returning when either we are too close to the end
1193
+ // of the input buffer, or we exceed op_limit_min_slop or when a exceptional
1194
+ // tag is encountered (literal of length > 60) or a copy-4.
1195
+ // Returns {ip, op} at the points it stopped decoding.
1196
+ // TODO This function probably does not need to be inlined, as it
1197
+ // should decode large chunks at a time. This allows runtime dispatch to
1198
+ // implementations based on CPU capability (BMI2 / perhaps 32 / 64 byte memcpy).
1199
+ template <typename T>
1200
+ std::pair<const uint8_t*, ptrdiff_t> DecompressBranchless(
1201
+ const uint8_t* ip, const uint8_t* ip_limit, ptrdiff_t op, T op_base,
1202
+ ptrdiff_t op_limit_min_slop) {
1203
+ // If deferred_src is invalid point it here.
1204
+ uint8_t safe_source[64];
1205
+ const void* deferred_src;
1206
+ size_t deferred_length;
1207
+ ClearDeferred(&deferred_src, &deferred_length, safe_source);
1208
+
1209
+ // We unroll the inner loop twice so we need twice the spare room.
1210
+ op_limit_min_slop -= kSlopBytes;
1211
+ if (2 * (kSlopBytes + 1) < ip_limit - ip && op < op_limit_min_slop) {
1212
+ const uint8_t* const ip_limit_min_slop = ip_limit - 2 * kSlopBytes - 1;
1213
+ ip++;
1214
+ // ip points just past the tag and we are touching at maximum kSlopBytes
1215
+ // in an iteration.
1216
+ size_t tag = ip[-1];
1217
+ #if defined(__clang__) && defined(__aarch64__)
1218
+ // Workaround for https://bugs.llvm.org/show_bug.cgi?id=51317
1219
+ // when loading 1 byte, clang for aarch64 doesn't realize that it(ldrb)
1220
+ // comes with free zero-extension, so clang generates another
1221
+ // 'and xn, xm, 0xff' before it use that as the offset. This 'and' is
1222
+ // redundant and can be removed by adding this dummy asm, which gives
1223
+ // clang a hint that we're doing the zero-extension at the load.
1224
+ asm("" ::"r"(tag));
1225
+ #endif
1226
+ do {
1227
+ // The throughput is limited by instructions, unrolling the inner loop
1228
+ // twice reduces the amount of instructions checking limits and also
1229
+ // leads to reduced mov's.
1230
+
1231
+ SNAPPY_PREFETCH(ip + 128);
1232
+ for (int i = 0; i < 2; i++) {
1233
+ const uint8_t* old_ip = ip;
1234
+ assert(tag == ip[-1]);
1235
+ // For literals tag_type = 0, hence we will always obtain 0 from
1236
+ // ExtractLowBytes. For literals offset will thus be kLiteralOffset.
1237
+ ptrdiff_t len_min_offset = kLengthMinusOffset[tag];
1238
+ #if defined(__aarch64__)
1239
+ size_t tag_type = AdvanceToNextTagARMOptimized(&ip, &tag);
1240
+ #else
1241
+ size_t tag_type = AdvanceToNextTagX86Optimized(&ip, &tag);
1242
+ #endif
1243
+ uint32_t next = LittleEndian::Load32(old_ip);
1244
+ size_t len = len_min_offset & 0xFF;
1245
+ len_min_offset -= ExtractOffset(next, tag_type);
1246
+ if (SNAPPY_PREDICT_FALSE(len_min_offset > 0)) {
1247
+ if (SNAPPY_PREDICT_FALSE(len & 0x80)) {
1248
+ // Exceptional case (long literal or copy 4).
1249
+ // Actually doing the copy here is negatively impacting the main
1250
+ // loop due to compiler incorrectly allocating a register for
1251
+ // this fallback. Hence we just break.
1252
+ break_loop:
1253
+ ip = old_ip;
1254
+ goto exit;
1255
+ }
1256
+ // Only copy-1 or copy-2 tags can get here.
1257
+ assert(tag_type == 1 || tag_type == 2);
1258
+ std::ptrdiff_t delta = (op + deferred_length) + len_min_offset - len;
1259
+ // Guard against copies before the buffer start.
1260
+ // Execute any deferred MemCopy since we write to dst here.
1261
+ MemCopy64(op_base + op, deferred_src, deferred_length);
1262
+ op += deferred_length;
1263
+ ClearDeferred(&deferred_src, &deferred_length, safe_source);
1264
+ if (SNAPPY_PREDICT_FALSE(delta < 0 ||
1265
+ !Copy64BytesWithPatternExtension(
1266
+ op_base + op, len - len_min_offset))) {
1267
+ goto break_loop;
1268
+ }
1269
+ // We aren't deferring this copy so add length right away.
1270
+ op += len;
1271
+ continue;
1272
+ }
1273
+ std::ptrdiff_t delta = (op + deferred_length) + len_min_offset - len;
1274
+ if (SNAPPY_PREDICT_FALSE(delta < 0)) {
1275
+ // Due to the spurious offset in literals have this will trigger
1276
+ // at the start of a block when op is still smaller than 256.
1277
+ if (tag_type != 0) goto break_loop;
1278
+ MemCopy64(op_base + op, deferred_src, deferred_length);
1279
+ op += deferred_length;
1280
+ DeferMemCopy(&deferred_src, &deferred_length, old_ip, len);
1281
+ continue;
1282
+ }
1283
+
1284
+ // For copies we need to copy from op_base + delta, for literals
1285
+ // we need to copy from ip instead of from the stream.
1286
+ const void* from =
1287
+ tag_type ? reinterpret_cast<void*>(op_base + delta) : old_ip;
1288
+ MemCopy64(op_base + op, deferred_src, deferred_length);
1289
+ op += deferred_length;
1290
+ DeferMemCopy(&deferred_src, &deferred_length, from, len);
1291
+ }
1292
+ } while (ip < ip_limit_min_slop &&
1293
+ (op + deferred_length) < op_limit_min_slop);
1294
+ exit:
1295
+ ip--;
1296
+ assert(ip <= ip_limit);
1297
+ }
1298
+ // If we deferred a copy then we can perform. If we are up to date then we
1299
+ // might not have enough slop bytes and could run past the end.
1300
+ if (deferred_length) {
1301
+ MemCopy64(op_base + op, deferred_src, deferred_length);
1302
+ op += deferred_length;
1303
+ ClearDeferred(&deferred_src, &deferred_length, safe_source);
1304
+ }
1305
+ return {ip, op};
1306
+ }
570
1307
 
571
1308
  // Helper class for decompression
572
1309
  class SnappyDecompressor {
573
1310
  private:
574
- Source* reader_; // Underlying source of bytes to decompress
575
- const char* ip_; // Points to next buffered byte
576
- const char* ip_limit_; // Points just past buffered bytes
577
- uint32 peeked_; // Bytes peeked from reader (need to skip)
578
- bool eof_; // Hit end of input without an error?
579
- char scratch_[kMaximumTagLength]; // See RefillTag().
1311
+ Source* reader_; // Underlying source of bytes to decompress
1312
+ const char* ip_; // Points to next buffered byte
1313
+ const char* ip_limit_; // Points just past buffered bytes
1314
+ // If ip < ip_limit_min_maxtaglen_ it's safe to read kMaxTagLength from
1315
+ // buffer.
1316
+ const char* ip_limit_min_maxtaglen_;
1317
+ uint32_t peeked_; // Bytes peeked from reader (need to skip)
1318
+ bool eof_; // Hit end of input without an error?
1319
+ char scratch_[kMaximumTagLength]; // See RefillTag().
580
1320
 
581
1321
  // Ensure that all of the tag metadata for the next tag is available
582
1322
  // in [ip_..ip_limit_-1]. Also ensures that [ip,ip+4] is readable even
@@ -585,14 +1325,14 @@ class SnappyDecompressor {
585
1325
  // Returns true on success, false on error or end of input.
586
1326
  bool RefillTag();
587
1327
 
1328
+ void ResetLimit(const char* ip) {
1329
+ ip_limit_min_maxtaglen_ =
1330
+ ip_limit_ - std::min<ptrdiff_t>(ip_limit_ - ip, kMaximumTagLength - 1);
1331
+ }
1332
+
588
1333
  public:
589
1334
  explicit SnappyDecompressor(Source* reader)
590
- : reader_(reader),
591
- ip_(NULL),
592
- ip_limit_(NULL),
593
- peeked_(0),
594
- eof_(false) {
595
- }
1335
+ : reader_(reader), ip_(NULL), ip_limit_(NULL), peeked_(0), eof_(false) {}
596
1336
 
597
1337
  ~SnappyDecompressor() {
598
1338
  // Advance past any bytes we peeked at from the reader
@@ -600,18 +1340,16 @@ class SnappyDecompressor {
600
1340
  }
601
1341
 
602
1342
  // Returns true iff we have hit the end of the input without an error.
603
- bool eof() const {
604
- return eof_;
605
- }
1343
+ bool eof() const { return eof_; }
606
1344
 
607
1345
  // Read the uncompressed length stored at the start of the compressed data.
608
- // On succcess, stores the length in *result and returns true.
1346
+ // On success, stores the length in *result and returns true.
609
1347
  // On failure, returns false.
610
- bool ReadUncompressedLength(uint32* result) {
611
- assert(ip_ == NULL); // Must not have read anything yet
1348
+ bool ReadUncompressedLength(uint32_t* result) {
1349
+ assert(ip_ == NULL); // Must not have read anything yet
612
1350
  // Length is encoded in 1..5 bytes
613
1351
  *result = 0;
614
- uint32 shift = 0;
1352
+ uint32_t shift = 0;
615
1353
  while (true) {
616
1354
  if (shift >= 32) return false;
617
1355
  size_t n;
@@ -619,8 +1357,8 @@ class SnappyDecompressor {
619
1357
  if (n == 0) return false;
620
1358
  const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
621
1359
  reader_->Skip(1);
622
- uint32 val = c & 0x7f;
623
- if (((val << shift) >> shift) != val) return false;
1360
+ uint32_t val = c & 0x7f;
1361
+ if (LeftShiftOverflows(static_cast<uint8_t>(val), shift)) return false;
624
1362
  *result |= val << shift;
625
1363
  if (c < 128) {
626
1364
  break;
@@ -633,27 +1371,47 @@ class SnappyDecompressor {
633
1371
  // Process the next item found in the input.
634
1372
  // Returns true if successful, false on error or end of input.
635
1373
  template <class Writer>
636
- void DecompressAllTags(Writer* writer) {
1374
+ #if defined(__GNUC__) && defined(__x86_64__)
1375
+ __attribute__((aligned(32)))
1376
+ #endif
1377
+ void
1378
+ DecompressAllTags(Writer* writer) {
637
1379
  const char* ip = ip_;
638
- // For position-independent executables, accessing global arrays can be
639
- // slow. Move wordmask array onto the stack to mitigate this.
640
- uint32 wordmask[sizeof(internal::wordmask)/sizeof(uint32)];
641
- memcpy(wordmask, internal::wordmask, sizeof(wordmask));
642
-
1380
+ ResetLimit(ip);
1381
+ auto op = writer->GetOutputPtr();
643
1382
  // We could have put this refill fragment only at the beginning of the loop.
644
1383
  // However, duplicating it at the end of each branch gives the compiler more
645
1384
  // scope to optimize the <ip_limit_ - ip> expression based on the local
646
1385
  // context, which overall increases speed.
647
- #define MAYBE_REFILL() \
648
- if (ip_limit_ - ip < kMaximumTagLength) { \
649
- ip_ = ip; \
650
- if (!RefillTag()) return; \
651
- ip = ip_; \
652
- }
653
-
1386
+ #define MAYBE_REFILL() \
1387
+ if (SNAPPY_PREDICT_FALSE(ip >= ip_limit_min_maxtaglen_)) { \
1388
+ ip_ = ip; \
1389
+ if (SNAPPY_PREDICT_FALSE(!RefillTag())) goto exit; \
1390
+ ip = ip_; \
1391
+ ResetLimit(ip); \
1392
+ } \
1393
+ preload = static_cast<uint8_t>(*ip)
1394
+
1395
+ // At the start of the for loop below the least significant byte of preload
1396
+ // contains the tag.
1397
+ uint32_t preload;
654
1398
  MAYBE_REFILL();
655
- for ( ;; ) {
656
- const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
1399
+ for (;;) {
1400
+ {
1401
+ ptrdiff_t op_limit_min_slop;
1402
+ auto op_base = writer->GetBase(&op_limit_min_slop);
1403
+ if (op_base) {
1404
+ auto res =
1405
+ DecompressBranchless(reinterpret_cast<const uint8_t*>(ip),
1406
+ reinterpret_cast<const uint8_t*>(ip_limit_),
1407
+ op - op_base, op_base, op_limit_min_slop);
1408
+ ip = reinterpret_cast<const char*>(res.first);
1409
+ op = op_base + res.second;
1410
+ MAYBE_REFILL();
1411
+ }
1412
+ }
1413
+ const uint8_t c = static_cast<uint8_t>(preload);
1414
+ ip++;
657
1415
 
658
1416
  // Ratio of iterations that have LITERAL vs non-LITERAL for different
659
1417
  // inputs.
@@ -667,67 +1425,101 @@ class SnappyDecompressor {
667
1425
  // txt[1-4] 25% 75%
668
1426
  // pb 24% 76%
669
1427
  // bin 24% 76%
670
- if (PREDICT_FALSE((c & 0x3) == LITERAL)) {
1428
+ if (SNAPPY_PREDICT_FALSE((c & 0x3) == LITERAL)) {
671
1429
  size_t literal_length = (c >> 2) + 1u;
672
- if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
1430
+ if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length, &op)) {
673
1431
  assert(literal_length < 61);
674
1432
  ip += literal_length;
675
- // NOTE(user): There is no MAYBE_REFILL() here, as TryFastAppend()
1433
+ // NOTE: There is no MAYBE_REFILL() here, as TryFastAppend()
676
1434
  // will not return true unless there's already at least five spare
677
1435
  // bytes in addition to the literal.
1436
+ preload = static_cast<uint8_t>(*ip);
678
1437
  continue;
679
1438
  }
680
- if (PREDICT_FALSE(literal_length >= 61)) {
1439
+ if (SNAPPY_PREDICT_FALSE(literal_length >= 61)) {
681
1440
  // Long literal.
682
1441
  const size_t literal_length_length = literal_length - 60;
683
1442
  literal_length =
684
- (LittleEndian::Load32(ip) & wordmask[literal_length_length]) + 1;
1443
+ ExtractLowBytes(LittleEndian::Load32(ip), literal_length_length) +
1444
+ 1;
685
1445
  ip += literal_length_length;
686
1446
  }
687
1447
 
688
1448
  size_t avail = ip_limit_ - ip;
689
1449
  while (avail < literal_length) {
690
- if (!writer->Append(ip, avail)) return;
1450
+ if (!writer->Append(ip, avail, &op)) goto exit;
691
1451
  literal_length -= avail;
692
1452
  reader_->Skip(peeked_);
693
1453
  size_t n;
694
1454
  ip = reader_->Peek(&n);
695
1455
  avail = n;
696
1456
  peeked_ = avail;
697
- if (avail == 0) return; // Premature end of input
1457
+ if (avail == 0) goto exit;
698
1458
  ip_limit_ = ip + avail;
1459
+ ResetLimit(ip);
699
1460
  }
700
- if (!writer->Append(ip, literal_length)) {
701
- return;
702
- }
1461
+ if (!writer->Append(ip, literal_length, &op)) goto exit;
703
1462
  ip += literal_length;
704
1463
  MAYBE_REFILL();
705
1464
  } else {
706
- const size_t entry = char_table[c];
707
- const size_t trailer = LittleEndian::Load32(ip) & wordmask[entry >> 11];
708
- const size_t length = entry & 0xff;
709
- ip += entry >> 11;
710
-
711
- // copy_offset/256 is encoded in bits 8..10. By just fetching
712
- // those bits, we get copy_offset (since the bit-field starts at
713
- // bit 8).
714
- const size_t copy_offset = entry & 0x700;
715
- if (!writer->AppendFromSelf(copy_offset + trailer, length)) {
716
- return;
1465
+ if (SNAPPY_PREDICT_FALSE((c & 3) == COPY_4_BYTE_OFFSET)) {
1466
+ const size_t copy_offset = LittleEndian::Load32(ip);
1467
+ const size_t length = (c >> 2) + 1;
1468
+ ip += 4;
1469
+
1470
+ if (!writer->AppendFromSelf(copy_offset, length, &op)) goto exit;
1471
+ } else {
1472
+ const ptrdiff_t entry = kLengthMinusOffset[c];
1473
+ preload = LittleEndian::Load32(ip);
1474
+ const uint32_t trailer = ExtractLowBytes(preload, c & 3);
1475
+ const uint32_t length = entry & 0xff;
1476
+ assert(length > 0);
1477
+
1478
+ // copy_offset/256 is encoded in bits 8..10. By just fetching
1479
+ // those bits, we get copy_offset (since the bit-field starts at
1480
+ // bit 8).
1481
+ const uint32_t copy_offset = trailer - entry + length;
1482
+ if (!writer->AppendFromSelf(copy_offset, length, &op)) goto exit;
1483
+
1484
+ ip += (c & 3);
1485
+ // By using the result of the previous load we reduce the critical
1486
+ // dependency chain of ip to 4 cycles.
1487
+ preload >>= (c & 3) * 8;
1488
+ if (ip < ip_limit_min_maxtaglen_) continue;
717
1489
  }
718
1490
  MAYBE_REFILL();
719
1491
  }
720
1492
  }
721
-
722
1493
  #undef MAYBE_REFILL
1494
+ exit:
1495
+ writer->SetOutputPtr(op);
723
1496
  }
724
1497
  };
725
1498
 
1499
+ constexpr uint32_t CalculateNeeded(uint8_t tag) {
1500
+ return ((tag & 3) == 0 && tag >= (60 * 4))
1501
+ ? (tag >> 2) - 58
1502
+ : (0x05030201 >> ((tag * 8) & 31)) & 0xFF;
1503
+ }
1504
+
1505
+ #if __cplusplus >= 201402L
1506
+ constexpr bool VerifyCalculateNeeded() {
1507
+ for (int i = 0; i < 1; i++) {
1508
+ if (CalculateNeeded(i) != (char_table[i] >> 11) + 1) return false;
1509
+ }
1510
+ return true;
1511
+ }
1512
+
1513
+ // Make sure CalculateNeeded is correct by verifying it against the established
1514
+ // table encoding the number of added bytes needed.
1515
+ static_assert(VerifyCalculateNeeded(), "");
1516
+ #endif // c++14
1517
+
726
1518
  bool SnappyDecompressor::RefillTag() {
727
1519
  const char* ip = ip_;
728
1520
  if (ip == ip_limit_) {
729
1521
  // Fetch a new fragment from the reader
730
- reader_->Skip(peeked_); // All peeked bytes are used up
1522
+ reader_->Skip(peeked_); // All peeked bytes are used up
731
1523
  size_t n;
732
1524
  ip = reader_->Peek(&n);
733
1525
  peeked_ = n;
@@ -739,26 +1531,31 @@ bool SnappyDecompressor::RefillTag() {
739
1531
  // Read the tag character
740
1532
  assert(ip < ip_limit_);
741
1533
  const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
742
- const uint32 entry = char_table[c];
743
- const uint32 needed = (entry >> 11) + 1; // +1 byte for 'c'
1534
+ // At this point make sure that the data for the next tag is consecutive.
1535
+ // For copy 1 this means the next 2 bytes (tag and 1 byte offset)
1536
+ // For copy 2 the next 3 bytes (tag and 2 byte offset)
1537
+ // For copy 4 the next 5 bytes (tag and 4 byte offset)
1538
+ // For all small literals we only need 1 byte buf for literals 60...63 the
1539
+ // length is encoded in 1...4 extra bytes.
1540
+ const uint32_t needed = CalculateNeeded(c);
744
1541
  assert(needed <= sizeof(scratch_));
745
1542
 
746
1543
  // Read more bytes from reader if needed
747
- uint32 nbuf = ip_limit_ - ip;
1544
+ uint32_t nbuf = ip_limit_ - ip;
748
1545
  if (nbuf < needed) {
749
1546
  // Stitch together bytes from ip and reader to form the word
750
1547
  // contents. We store the needed bytes in "scratch_". They
751
1548
  // will be consumed immediately by the caller since we do not
752
1549
  // read more than we need.
753
- memmove(scratch_, ip, nbuf);
1550
+ std::memmove(scratch_, ip, nbuf);
754
1551
  reader_->Skip(peeked_); // All peeked bytes are used up
755
1552
  peeked_ = 0;
756
1553
  while (nbuf < needed) {
757
1554
  size_t length;
758
1555
  const char* src = reader_->Peek(&length);
759
1556
  if (length == 0) return false;
760
- uint32 to_add = min<uint32>(needed - nbuf, length);
761
- memcpy(scratch_ + nbuf, src, to_add);
1557
+ uint32_t to_add = std::min<uint32_t>(needed - nbuf, length);
1558
+ std::memcpy(scratch_ + nbuf, src, to_add);
762
1559
  nbuf += to_add;
763
1560
  reader_->Skip(to_add);
764
1561
  }
@@ -768,7 +1565,7 @@ bool SnappyDecompressor::RefillTag() {
768
1565
  } else if (nbuf < kMaximumTagLength) {
769
1566
  // Have enough bytes, but move into scratch_ so that we do not
770
1567
  // read past end of input
771
- memmove(scratch_, ip, nbuf);
1568
+ std::memmove(scratch_, ip, nbuf);
772
1569
  reader_->Skip(peeked_); // All peeked bytes are used up
773
1570
  peeked_ = 0;
774
1571
  ip_ = scratch_;
@@ -784,15 +1581,19 @@ template <typename Writer>
784
1581
  static bool InternalUncompress(Source* r, Writer* writer) {
785
1582
  // Read the uncompressed length from the front of the compressed input
786
1583
  SnappyDecompressor decompressor(r);
787
- uint32 uncompressed_len = 0;
1584
+ uint32_t uncompressed_len = 0;
788
1585
  if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
789
- return InternalUncompressAllTags(&decompressor, writer, uncompressed_len);
1586
+
1587
+ return InternalUncompressAllTags(&decompressor, writer, r->Available(),
1588
+ uncompressed_len);
790
1589
  }
791
1590
 
792
1591
  template <typename Writer>
793
1592
  static bool InternalUncompressAllTags(SnappyDecompressor* decompressor,
794
- Writer* writer,
795
- uint32 uncompressed_len) {
1593
+ Writer* writer, uint32_t compressed_len,
1594
+ uint32_t uncompressed_len) {
1595
+ Report("snappy_uncompress", compressed_len, uncompressed_len);
1596
+
796
1597
  writer->SetExpectedLength(uncompressed_len);
797
1598
 
798
1599
  // Process the entire input
@@ -801,7 +1602,7 @@ static bool InternalUncompressAllTags(SnappyDecompressor* decompressor,
801
1602
  return (decompressor->eof() && writer->CheckLength());
802
1603
  }
803
1604
 
804
- bool GetUncompressedLength(Source* source, uint32* result) {
1605
+ bool GetUncompressedLength(Source* source, uint32_t* result) {
805
1606
  SnappyDecompressor decompressor(source);
806
1607
  return decompressor.ReadUncompressedLength(result);
807
1608
  }
@@ -809,21 +1610,20 @@ bool GetUncompressedLength(Source* source, uint32* result) {
809
1610
  size_t Compress(Source* reader, Sink* writer) {
810
1611
  size_t written = 0;
811
1612
  size_t N = reader->Available();
1613
+ const size_t uncompressed_size = N;
812
1614
  char ulength[Varint::kMax32];
813
1615
  char* p = Varint::Encode32(ulength, N);
814
- writer->Append(ulength, p-ulength);
1616
+ writer->Append(ulength, p - ulength);
815
1617
  written += (p - ulength);
816
1618
 
817
- internal::WorkingMemory wmem;
818
- char* scratch = NULL;
819
- char* scratch_output = NULL;
1619
+ internal::WorkingMemory wmem(N);
820
1620
 
821
1621
  while (N > 0) {
822
1622
  // Get next block to compress (without copying if possible)
823
1623
  size_t fragment_size;
824
1624
  const char* fragment = reader->Peek(&fragment_size);
825
1625
  assert(fragment_size != 0); // premature end of input
826
- const size_t num_to_read = min(N, kBlockSize);
1626
+ const size_t num_to_read = std::min(N, kBlockSize);
827
1627
  size_t bytes_read = fragment_size;
828
1628
 
829
1629
  size_t pending_advance = 0;
@@ -832,20 +1632,14 @@ size_t Compress(Source* reader, Sink* writer) {
832
1632
  pending_advance = num_to_read;
833
1633
  fragment_size = num_to_read;
834
1634
  } else {
835
- // Read into scratch buffer
836
- if (scratch == NULL) {
837
- // If this is the last iteration, we want to allocate N bytes
838
- // of space, otherwise the max possible kBlockSize space.
839
- // num_to_read contains exactly the correct value
840
- scratch = new char[num_to_read];
841
- }
842
- memcpy(scratch, fragment, bytes_read);
1635
+ char* scratch = wmem.GetScratchInput();
1636
+ std::memcpy(scratch, fragment, bytes_read);
843
1637
  reader->Skip(bytes_read);
844
1638
 
845
1639
  while (bytes_read < num_to_read) {
846
1640
  fragment = reader->Peek(&fragment_size);
847
- size_t n = min<size_t>(fragment_size, num_to_read - bytes_read);
848
- memcpy(scratch + bytes_read, fragment, n);
1641
+ size_t n = std::min<size_t>(fragment_size, num_to_read - bytes_read);
1642
+ std::memcpy(scratch + bytes_read, fragment, n);
849
1643
  bytes_read += n;
850
1644
  reader->Skip(n);
851
1645
  }
@@ -857,23 +1651,20 @@ size_t Compress(Source* reader, Sink* writer) {
857
1651
 
858
1652
  // Get encoding table for compression
859
1653
  int table_size;
860
- uint16* table = wmem.GetHashTable(num_to_read, &table_size);
1654
+ uint16_t* table = wmem.GetHashTable(num_to_read, &table_size);
861
1655
 
862
1656
  // Compress input_fragment and append to dest
863
1657
  const int max_output = MaxCompressedLength(num_to_read);
864
1658
 
865
1659
  // Need a scratch buffer for the output, in case the byte sink doesn't
866
1660
  // have room for us directly.
867
- if (scratch_output == NULL) {
868
- scratch_output = new char[max_output];
869
- } else {
870
- // Since we encode kBlockSize regions followed by a region
871
- // which is <= kBlockSize in length, a previously allocated
872
- // scratch_output[] region is big enough for this iteration.
873
- }
874
- char* dest = writer->GetAppendBuffer(max_output, scratch_output);
875
- char* end = internal::CompressFragment(fragment, fragment_size,
876
- dest, table, table_size);
1661
+
1662
+ // Since we encode kBlockSize regions followed by a region
1663
+ // which is <= kBlockSize in length, a previously allocated
1664
+ // scratch_output[] region is big enough for this iteration.
1665
+ char* dest = writer->GetAppendBuffer(max_output, wmem.GetScratchOutput());
1666
+ char* end = internal::CompressFragment(fragment, fragment_size, dest, table,
1667
+ table_size);
877
1668
  writer->Append(dest, end - dest);
878
1669
  written += (end - dest);
879
1670
 
@@ -881,8 +1672,7 @@ size_t Compress(Source* reader, Sink* writer) {
881
1672
  reader->Skip(pending_advance);
882
1673
  }
883
1674
 
884
- delete[] scratch;
885
- delete[] scratch_output;
1675
+ Report("snappy_compress", written, uncompressed_size);
886
1676
 
887
1677
  return written;
888
1678
  }
@@ -891,19 +1681,88 @@ size_t Compress(Source* reader, Sink* writer) {
891
1681
  // IOVec interfaces
892
1682
  // -----------------------------------------------------------------------
893
1683
 
1684
+ // A `Source` implementation that yields the contents of an `iovec` array. Note
1685
+ // that `total_size` is the total number of bytes to be read from the elements
1686
+ // of `iov` (_not_ the total number of elements in `iov`).
1687
+ class SnappyIOVecReader : public Source {
1688
+ public:
1689
+ SnappyIOVecReader(const struct iovec* iov, size_t total_size)
1690
+ : curr_iov_(iov),
1691
+ curr_pos_(total_size > 0 ? reinterpret_cast<const char*>(iov->iov_base)
1692
+ : nullptr),
1693
+ curr_size_remaining_(total_size > 0 ? iov->iov_len : 0),
1694
+ total_size_remaining_(total_size) {
1695
+ // Skip empty leading `iovec`s.
1696
+ if (total_size > 0 && curr_size_remaining_ == 0) Advance();
1697
+ }
1698
+
1699
+ ~SnappyIOVecReader() = default;
1700
+
1701
+ size_t Available() const { return total_size_remaining_; }
1702
+
1703
+ const char* Peek(size_t* len) {
1704
+ *len = curr_size_remaining_;
1705
+ return curr_pos_;
1706
+ }
1707
+
1708
+ void Skip(size_t n) {
1709
+ while (n >= curr_size_remaining_ && n > 0) {
1710
+ n -= curr_size_remaining_;
1711
+ Advance();
1712
+ }
1713
+ curr_size_remaining_ -= n;
1714
+ total_size_remaining_ -= n;
1715
+ curr_pos_ += n;
1716
+ }
1717
+
1718
+ private:
1719
+ // Advances to the next nonempty `iovec` and updates related variables.
1720
+ void Advance() {
1721
+ do {
1722
+ assert(total_size_remaining_ >= curr_size_remaining_);
1723
+ total_size_remaining_ -= curr_size_remaining_;
1724
+ if (total_size_remaining_ == 0) {
1725
+ curr_pos_ = nullptr;
1726
+ curr_size_remaining_ = 0;
1727
+ return;
1728
+ }
1729
+ ++curr_iov_;
1730
+ curr_pos_ = reinterpret_cast<const char*>(curr_iov_->iov_base);
1731
+ curr_size_remaining_ = curr_iov_->iov_len;
1732
+ } while (curr_size_remaining_ == 0);
1733
+ }
1734
+
1735
+ // The `iovec` currently being read.
1736
+ const struct iovec* curr_iov_;
1737
+ // The location in `curr_iov_` currently being read.
1738
+ const char* curr_pos_;
1739
+ // The amount of unread data in `curr_iov_`.
1740
+ size_t curr_size_remaining_;
1741
+ // The amount of unread data in the entire input array.
1742
+ size_t total_size_remaining_;
1743
+ };
1744
+
894
1745
  // A type that writes to an iovec.
895
1746
  // Note that this is not a "ByteSink", but a type that matches the
896
1747
  // Writer template argument to SnappyDecompressor::DecompressAllTags().
897
1748
  class SnappyIOVecWriter {
898
1749
  private:
1750
+ // output_iov_end_ is set to iov + count and used to determine when
1751
+ // the end of the iovs is reached.
1752
+ const struct iovec* output_iov_end_;
1753
+
1754
+ #if !defined(NDEBUG)
899
1755
  const struct iovec* output_iov_;
900
- const size_t output_iov_count_;
1756
+ #endif // !defined(NDEBUG)
1757
+
1758
+ // Current iov that is being written into.
1759
+ const struct iovec* curr_iov_;
901
1760
 
902
- // We are currently writing into output_iov_[curr_iov_index_].
903
- size_t curr_iov_index_;
1761
+ // Pointer to current iov's write location.
1762
+ char* curr_iov_output_;
904
1763
 
905
- // Bytes written to output_iov_[curr_iov_index_] so far.
906
- size_t curr_iov_written_;
1764
+ // Remaining bytes to write into curr_iov_output.
1765
+ size_t curr_iov_remaining_;
907
1766
 
908
1767
  // Total bytes decompressed into output_iov_ so far.
909
1768
  size_t total_written_;
@@ -911,53 +1770,61 @@ class SnappyIOVecWriter {
911
1770
  // Maximum number of bytes that will be decompressed into output_iov_.
912
1771
  size_t output_limit_;
913
1772
 
914
- inline char* GetIOVecPointer(size_t index, size_t offset) {
915
- return reinterpret_cast<char*>(output_iov_[index].iov_base) +
916
- offset;
1773
+ static inline char* GetIOVecPointer(const struct iovec* iov, size_t offset) {
1774
+ return reinterpret_cast<char*>(iov->iov_base) + offset;
917
1775
  }
918
1776
 
919
1777
  public:
920
1778
  // Does not take ownership of iov. iov must be valid during the
921
1779
  // entire lifetime of the SnappyIOVecWriter.
922
1780
  inline SnappyIOVecWriter(const struct iovec* iov, size_t iov_count)
923
- : output_iov_(iov),
924
- output_iov_count_(iov_count),
925
- curr_iov_index_(0),
926
- curr_iov_written_(0),
1781
+ : output_iov_end_(iov + iov_count),
1782
+ #if !defined(NDEBUG)
1783
+ output_iov_(iov),
1784
+ #endif // !defined(NDEBUG)
1785
+ curr_iov_(iov),
1786
+ curr_iov_output_(iov_count ? reinterpret_cast<char*>(iov->iov_base)
1787
+ : nullptr),
1788
+ curr_iov_remaining_(iov_count ? iov->iov_len : 0),
927
1789
  total_written_(0),
928
1790
  output_limit_(-1) {
929
1791
  }
930
1792
 
931
- inline void SetExpectedLength(size_t len) {
932
- output_limit_ = len;
933
- }
1793
+ inline void SetExpectedLength(size_t len) { output_limit_ = len; }
934
1794
 
935
- inline bool CheckLength() const {
936
- return total_written_ == output_limit_;
937
- }
1795
+ inline bool CheckLength() const { return total_written_ == output_limit_; }
938
1796
 
939
- inline bool Append(const char* ip, size_t len) {
1797
+ inline bool Append(const char* ip, size_t len, char**) {
940
1798
  if (total_written_ + len > output_limit_) {
941
1799
  return false;
942
1800
  }
943
1801
 
1802
+ return AppendNoCheck(ip, len);
1803
+ }
1804
+
1805
+ char* GetOutputPtr() { return nullptr; }
1806
+ char* GetBase(ptrdiff_t*) { return nullptr; }
1807
+ void SetOutputPtr(char* op) {
1808
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
1809
+ (void)op;
1810
+ }
1811
+
1812
+ inline bool AppendNoCheck(const char* ip, size_t len) {
944
1813
  while (len > 0) {
945
- assert(curr_iov_written_ <= output_iov_[curr_iov_index_].iov_len);
946
- if (curr_iov_written_ >= output_iov_[curr_iov_index_].iov_len) {
1814
+ if (curr_iov_remaining_ == 0) {
947
1815
  // This iovec is full. Go to the next one.
948
- if (curr_iov_index_ + 1 >= output_iov_count_) {
1816
+ if (curr_iov_ + 1 >= output_iov_end_) {
949
1817
  return false;
950
1818
  }
951
- curr_iov_written_ = 0;
952
- ++curr_iov_index_;
1819
+ ++curr_iov_;
1820
+ curr_iov_output_ = reinterpret_cast<char*>(curr_iov_->iov_base);
1821
+ curr_iov_remaining_ = curr_iov_->iov_len;
953
1822
  }
954
1823
 
955
- const size_t to_write = std::min(
956
- len, output_iov_[curr_iov_index_].iov_len - curr_iov_written_);
957
- memcpy(GetIOVecPointer(curr_iov_index_, curr_iov_written_),
958
- ip,
959
- to_write);
960
- curr_iov_written_ += to_write;
1824
+ const size_t to_write = std::min(len, curr_iov_remaining_);
1825
+ std::memcpy(curr_iov_output_, ip, to_write);
1826
+ curr_iov_output_ += to_write;
1827
+ curr_iov_remaining_ -= to_write;
961
1828
  total_written_ += to_write;
962
1829
  ip += to_write;
963
1830
  len -= to_write;
@@ -966,14 +1833,15 @@ class SnappyIOVecWriter {
966
1833
  return true;
967
1834
  }
968
1835
 
969
- inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
1836
+ inline bool TryFastAppend(const char* ip, size_t available, size_t len,
1837
+ char**) {
970
1838
  const size_t space_left = output_limit_ - total_written_;
971
1839
  if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16 &&
972
- output_iov_[curr_iov_index_].iov_len - curr_iov_written_ >= 16) {
1840
+ curr_iov_remaining_ >= 16) {
973
1841
  // Fast path, used for the majority (about 95%) of invocations.
974
- char* ptr = GetIOVecPointer(curr_iov_index_, curr_iov_written_);
975
- UnalignedCopy128(ip, ptr);
976
- curr_iov_written_ += len;
1842
+ UnalignedCopy128(ip, curr_iov_output_);
1843
+ curr_iov_output_ += len;
1844
+ curr_iov_remaining_ -= len;
977
1845
  total_written_ += len;
978
1846
  return true;
979
1847
  }
@@ -981,8 +1849,10 @@ class SnappyIOVecWriter {
981
1849
  return false;
982
1850
  }
983
1851
 
984
- inline bool AppendFromSelf(size_t offset, size_t len) {
985
- if (offset > total_written_ || offset == 0) {
1852
+ inline bool AppendFromSelf(size_t offset, size_t len, char**) {
1853
+ // See SnappyArrayWriter::AppendFromSelf for an explanation of
1854
+ // the "offset - 1u" trick.
1855
+ if (offset - 1u >= total_written_) {
986
1856
  return false;
987
1857
  }
988
1858
  const size_t space_left = output_limit_ - total_written_;
@@ -991,8 +1861,8 @@ class SnappyIOVecWriter {
991
1861
  }
992
1862
 
993
1863
  // Locate the iovec from which we need to start the copy.
994
- size_t from_iov_index = curr_iov_index_;
995
- size_t from_iov_offset = curr_iov_written_;
1864
+ const iovec* from_iov = curr_iov_;
1865
+ size_t from_iov_offset = curr_iov_->iov_len - curr_iov_remaining_;
996
1866
  while (offset > 0) {
997
1867
  if (from_iov_offset >= offset) {
998
1868
  from_iov_offset -= offset;
@@ -1000,47 +1870,48 @@ class SnappyIOVecWriter {
1000
1870
  }
1001
1871
 
1002
1872
  offset -= from_iov_offset;
1003
- assert(from_iov_index > 0);
1004
- --from_iov_index;
1005
- from_iov_offset = output_iov_[from_iov_index].iov_len;
1873
+ --from_iov;
1874
+ #if !defined(NDEBUG)
1875
+ assert(from_iov >= output_iov_);
1876
+ #endif // !defined(NDEBUG)
1877
+ from_iov_offset = from_iov->iov_len;
1006
1878
  }
1007
1879
 
1008
1880
  // Copy <len> bytes starting from the iovec pointed to by from_iov_index to
1009
1881
  // the current iovec.
1010
1882
  while (len > 0) {
1011
- assert(from_iov_index <= curr_iov_index_);
1012
- if (from_iov_index != curr_iov_index_) {
1013
- const size_t to_copy = std::min(
1014
- output_iov_[from_iov_index].iov_len - from_iov_offset,
1015
- len);
1016
- Append(GetIOVecPointer(from_iov_index, from_iov_offset), to_copy);
1883
+ assert(from_iov <= curr_iov_);
1884
+ if (from_iov != curr_iov_) {
1885
+ const size_t to_copy =
1886
+ std::min(from_iov->iov_len - from_iov_offset, len);
1887
+ AppendNoCheck(GetIOVecPointer(from_iov, from_iov_offset), to_copy);
1017
1888
  len -= to_copy;
1018
1889
  if (len > 0) {
1019
- ++from_iov_index;
1890
+ ++from_iov;
1020
1891
  from_iov_offset = 0;
1021
1892
  }
1022
1893
  } else {
1023
- assert(curr_iov_written_ <= output_iov_[curr_iov_index_].iov_len);
1024
- size_t to_copy = std::min(output_iov_[curr_iov_index_].iov_len -
1025
- curr_iov_written_,
1026
- len);
1894
+ size_t to_copy = curr_iov_remaining_;
1027
1895
  if (to_copy == 0) {
1028
1896
  // This iovec is full. Go to the next one.
1029
- if (curr_iov_index_ + 1 >= output_iov_count_) {
1897
+ if (curr_iov_ + 1 >= output_iov_end_) {
1030
1898
  return false;
1031
1899
  }
1032
- ++curr_iov_index_;
1033
- curr_iov_written_ = 0;
1900
+ ++curr_iov_;
1901
+ curr_iov_output_ = reinterpret_cast<char*>(curr_iov_->iov_base);
1902
+ curr_iov_remaining_ = curr_iov_->iov_len;
1034
1903
  continue;
1035
1904
  }
1036
1905
  if (to_copy > len) {
1037
1906
  to_copy = len;
1038
1907
  }
1039
- IncrementalCopySlow(
1040
- GetIOVecPointer(from_iov_index, from_iov_offset),
1041
- GetIOVecPointer(curr_iov_index_, curr_iov_written_),
1042
- GetIOVecPointer(curr_iov_index_, curr_iov_written_) + to_copy);
1043
- curr_iov_written_ += to_copy;
1908
+ assert(to_copy > 0);
1909
+
1910
+ IncrementalCopy(GetIOVecPointer(from_iov, from_iov_offset),
1911
+ curr_iov_output_, curr_iov_output_ + to_copy,
1912
+ curr_iov_output_ + curr_iov_remaining_);
1913
+ curr_iov_output_ += to_copy;
1914
+ curr_iov_remaining_ -= to_copy;
1044
1915
  from_iov_offset += to_copy;
1045
1916
  total_written_ += to_copy;
1046
1917
  len -= to_copy;
@@ -1077,59 +1948,74 @@ class SnappyArrayWriter {
1077
1948
  char* base_;
1078
1949
  char* op_;
1079
1950
  char* op_limit_;
1951
+ // If op < op_limit_min_slop_ then it's safe to unconditionally write
1952
+ // kSlopBytes starting at op.
1953
+ char* op_limit_min_slop_;
1080
1954
 
1081
1955
  public:
1082
1956
  inline explicit SnappyArrayWriter(char* dst)
1083
1957
  : base_(dst),
1084
1958
  op_(dst),
1085
- op_limit_(dst) {
1086
- }
1959
+ op_limit_(dst),
1960
+ op_limit_min_slop_(dst) {} // Safe default see invariant.
1087
1961
 
1088
1962
  inline void SetExpectedLength(size_t len) {
1089
1963
  op_limit_ = op_ + len;
1964
+ // Prevent pointer from being past the buffer.
1965
+ op_limit_min_slop_ = op_limit_ - std::min<size_t>(kSlopBytes - 1, len);
1090
1966
  }
1091
1967
 
1092
- inline bool CheckLength() const {
1093
- return op_ == op_limit_;
1968
+ inline bool CheckLength() const { return op_ == op_limit_; }
1969
+
1970
+ char* GetOutputPtr() { return op_; }
1971
+ char* GetBase(ptrdiff_t* op_limit_min_slop) {
1972
+ *op_limit_min_slop = op_limit_min_slop_ - base_;
1973
+ return base_;
1094
1974
  }
1975
+ void SetOutputPtr(char* op) { op_ = op; }
1095
1976
 
1096
- inline bool Append(const char* ip, size_t len) {
1097
- char* op = op_;
1977
+ inline bool Append(const char* ip, size_t len, char** op_p) {
1978
+ char* op = *op_p;
1098
1979
  const size_t space_left = op_limit_ - op;
1099
- if (space_left < len) {
1100
- return false;
1101
- }
1102
- memcpy(op, ip, len);
1103
- op_ = op + len;
1980
+ if (space_left < len) return false;
1981
+ std::memcpy(op, ip, len);
1982
+ *op_p = op + len;
1104
1983
  return true;
1105
1984
  }
1106
1985
 
1107
- inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
1108
- char* op = op_;
1986
+ inline bool TryFastAppend(const char* ip, size_t available, size_t len,
1987
+ char** op_p) {
1988
+ char* op = *op_p;
1109
1989
  const size_t space_left = op_limit_ - op;
1110
1990
  if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16) {
1111
1991
  // Fast path, used for the majority (about 95%) of invocations.
1112
1992
  UnalignedCopy128(ip, op);
1113
- op_ = op + len;
1993
+ *op_p = op + len;
1114
1994
  return true;
1115
1995
  } else {
1116
1996
  return false;
1117
1997
  }
1118
1998
  }
1119
1999
 
1120
- inline bool AppendFromSelf(size_t offset, size_t len) {
1121
- char* const op_end = op_ + len;
2000
+ SNAPPY_ATTRIBUTE_ALWAYS_INLINE
2001
+ inline bool AppendFromSelf(size_t offset, size_t len, char** op_p) {
2002
+ assert(len > 0);
2003
+ char* const op = *op_p;
2004
+ assert(op >= base_);
2005
+ char* const op_end = op + len;
1122
2006
 
1123
2007
  // Check if we try to append from before the start of the buffer.
1124
- // Normally this would just be a check for "produced < offset",
1125
- // but "produced <= offset - 1u" is equivalent for every case
1126
- // except the one where offset==0, where the right side will wrap around
1127
- // to a very big number. This is convenient, as offset==0 is another
1128
- // invalid case that we also want to catch, so that we do not go
1129
- // into an infinite loop.
1130
- if (Produced() <= offset - 1u || op_end > op_limit_) return false;
1131
- op_ = IncrementalCopy(op_ - offset, op_, op_end, op_limit_);
2008
+ if (SNAPPY_PREDICT_FALSE(static_cast<size_t>(op - base_) < offset))
2009
+ return false;
1132
2010
 
2011
+ if (SNAPPY_PREDICT_FALSE((kSlopBytes < 64 && len > kSlopBytes) ||
2012
+ op >= op_limit_min_slop_ || offset < len)) {
2013
+ if (op_end > op_limit_ || offset == 0) return false;
2014
+ *op_p = IncrementalCopy(op - offset, op, op_end, op_limit_);
2015
+ return true;
2016
+ }
2017
+ std::memmove(op, op - offset, kSlopBytes);
2018
+ *op_p = op_end;
1133
2019
  return true;
1134
2020
  }
1135
2021
  inline size_t Produced() const {
@@ -1139,8 +2025,9 @@ class SnappyArrayWriter {
1139
2025
  inline void Flush() {}
1140
2026
  };
1141
2027
 
1142
- bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
1143
- ByteArraySource reader(compressed, n);
2028
+ bool RawUncompress(const char* compressed, size_t compressed_length,
2029
+ char* uncompressed) {
2030
+ ByteArraySource reader(compressed, compressed_length);
1144
2031
  return RawUncompress(&reader, uncompressed);
1145
2032
  }
1146
2033
 
@@ -1149,9 +2036,10 @@ bool RawUncompress(Source* compressed, char* uncompressed) {
1149
2036
  return InternalUncompress(compressed, &output);
1150
2037
  }
1151
2038
 
1152
- bool Uncompress(const char* compressed, size_t n, string* uncompressed) {
2039
+ bool Uncompress(const char* compressed, size_t compressed_length,
2040
+ std::string* uncompressed) {
1153
2041
  size_t ulength;
1154
- if (!GetUncompressedLength(compressed, n, &ulength)) {
2042
+ if (!GetUncompressedLength(compressed, compressed_length, &ulength)) {
1155
2043
  return false;
1156
2044
  }
1157
2045
  // On 32-bit builds: max_size() < kuint32max. Check for that instead
@@ -1160,7 +2048,8 @@ bool Uncompress(const char* compressed, size_t n, string* uncompressed) {
1160
2048
  return false;
1161
2049
  }
1162
2050
  STLStringResizeUninitialized(uncompressed, ulength);
1163
- return RawUncompress(compressed, n, string_as_array(uncompressed));
2051
+ return RawUncompress(compressed, compressed_length,
2052
+ string_as_array(uncompressed));
1164
2053
  }
1165
2054
 
1166
2055
  // A Writer that drops everything on the floor and just does validation
@@ -1170,32 +2059,44 @@ class SnappyDecompressionValidator {
1170
2059
  size_t produced_;
1171
2060
 
1172
2061
  public:
1173
- inline SnappyDecompressionValidator() : expected_(0), produced_(0) { }
1174
- inline void SetExpectedLength(size_t len) {
1175
- expected_ = len;
2062
+ inline SnappyDecompressionValidator() : expected_(0), produced_(0) {}
2063
+ inline void SetExpectedLength(size_t len) { expected_ = len; }
2064
+ size_t GetOutputPtr() { return produced_; }
2065
+ size_t GetBase(ptrdiff_t* op_limit_min_slop) {
2066
+ *op_limit_min_slop = std::numeric_limits<ptrdiff_t>::max() - kSlopBytes + 1;
2067
+ return 1;
1176
2068
  }
1177
- inline bool CheckLength() const {
1178
- return expected_ == produced_;
2069
+ void SetOutputPtr(size_t op) { produced_ = op; }
2070
+ inline bool CheckLength() const { return expected_ == produced_; }
2071
+ inline bool Append(const char* ip, size_t len, size_t* produced) {
2072
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
2073
+ (void)ip;
2074
+
2075
+ *produced += len;
2076
+ return *produced <= expected_;
1179
2077
  }
1180
- inline bool Append(const char* ip, size_t len) {
1181
- produced_ += len;
1182
- return produced_ <= expected_;
1183
- }
1184
- inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
2078
+ inline bool TryFastAppend(const char* ip, size_t available, size_t length,
2079
+ size_t* produced) {
2080
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
2081
+ (void)ip;
2082
+ (void)available;
2083
+ (void)length;
2084
+ (void)produced;
2085
+
1185
2086
  return false;
1186
2087
  }
1187
- inline bool AppendFromSelf(size_t offset, size_t len) {
2088
+ inline bool AppendFromSelf(size_t offset, size_t len, size_t* produced) {
1188
2089
  // See SnappyArrayWriter::AppendFromSelf for an explanation of
1189
2090
  // the "offset - 1u" trick.
1190
- if (produced_ <= offset - 1u) return false;
1191
- produced_ += len;
1192
- return produced_ <= expected_;
2091
+ if (*produced <= offset - 1u) return false;
2092
+ *produced += len;
2093
+ return *produced <= expected_;
1193
2094
  }
1194
2095
  inline void Flush() {}
1195
2096
  };
1196
2097
 
1197
- bool IsValidCompressedBuffer(const char* compressed, size_t n) {
1198
- ByteArraySource reader(compressed, n);
2098
+ bool IsValidCompressedBuffer(const char* compressed, size_t compressed_length) {
2099
+ ByteArraySource reader(compressed, compressed_length);
1199
2100
  SnappyDecompressionValidator writer;
1200
2101
  return InternalUncompress(&reader, &writer);
1201
2102
  }
@@ -1205,9 +2106,7 @@ bool IsValidCompressed(Source* compressed) {
1205
2106
  return InternalUncompress(compressed, &writer);
1206
2107
  }
1207
2108
 
1208
- void RawCompress(const char* input,
1209
- size_t input_length,
1210
- char* compressed,
2109
+ void RawCompress(const char* input, size_t input_length, char* compressed,
1211
2110
  size_t* compressed_length) {
1212
2111
  ByteArraySource reader(input, input_length);
1213
2112
  UncheckedByteArraySink writer(compressed);
@@ -1217,14 +2116,44 @@ void RawCompress(const char* input,
1217
2116
  *compressed_length = (writer.CurrentDestination() - compressed);
1218
2117
  }
1219
2118
 
1220
- size_t Compress(const char* input, size_t input_length, string* compressed) {
2119
+ void RawCompressFromIOVec(const struct iovec* iov, size_t uncompressed_length,
2120
+ char* compressed, size_t* compressed_length) {
2121
+ SnappyIOVecReader reader(iov, uncompressed_length);
2122
+ UncheckedByteArraySink writer(compressed);
2123
+ Compress(&reader, &writer);
2124
+
2125
+ // Compute how many bytes were added.
2126
+ *compressed_length = writer.CurrentDestination() - compressed;
2127
+ }
2128
+
2129
+ size_t Compress(const char* input, size_t input_length,
2130
+ std::string* compressed) {
1221
2131
  // Pre-grow the buffer to the max length of the compressed output
1222
2132
  STLStringResizeUninitialized(compressed, MaxCompressedLength(input_length));
1223
2133
 
1224
2134
  size_t compressed_length;
1225
2135
  RawCompress(input, input_length, string_as_array(compressed),
1226
2136
  &compressed_length);
1227
- compressed->resize(compressed_length);
2137
+ compressed->erase(compressed_length);
2138
+ return compressed_length;
2139
+ }
2140
+
2141
+ size_t CompressFromIOVec(const struct iovec* iov, size_t iov_cnt,
2142
+ std::string* compressed) {
2143
+ // Compute the number of bytes to be compressed.
2144
+ size_t uncompressed_length = 0;
2145
+ for (size_t i = 0; i < iov_cnt; ++i) {
2146
+ uncompressed_length += iov[i].iov_len;
2147
+ }
2148
+
2149
+ // Pre-grow the buffer to the max length of the compressed output.
2150
+ STLStringResizeUninitialized(compressed, MaxCompressedLength(
2151
+ uncompressed_length));
2152
+
2153
+ size_t compressed_length;
2154
+ RawCompressFromIOVec(iov, uncompressed_length, string_as_array(compressed),
2155
+ &compressed_length);
2156
+ compressed->erase(compressed_length);
1228
2157
  return compressed_length;
1229
2158
  }
1230
2159
 
@@ -1249,13 +2178,14 @@ class SnappyScatteredWriter {
1249
2178
  size_t full_size_;
1250
2179
 
1251
2180
  // Pointer into current output block
1252
- char* op_base_; // Base of output block
1253
- char* op_ptr_; // Pointer to next unfilled byte in block
1254
- char* op_limit_; // Pointer just past block
2181
+ char* op_base_; // Base of output block
2182
+ char* op_ptr_; // Pointer to next unfilled byte in block
2183
+ char* op_limit_; // Pointer just past block
2184
+ // If op < op_limit_min_slop_ then it's safe to unconditionally write
2185
+ // kSlopBytes starting at op.
2186
+ char* op_limit_min_slop_;
1255
2187
 
1256
- inline size_t Size() const {
1257
- return full_size_ + (op_ptr_ - op_base_);
1258
- }
2188
+ inline size_t Size() const { return full_size_ + (op_ptr_ - op_base_); }
1259
2189
 
1260
2190
  bool SlowAppend(const char* ip, size_t len);
1261
2191
  bool SlowAppendFromSelf(size_t offset, size_t len);
@@ -1266,59 +2196,79 @@ class SnappyScatteredWriter {
1266
2196
  full_size_(0),
1267
2197
  op_base_(NULL),
1268
2198
  op_ptr_(NULL),
1269
- op_limit_(NULL) {
2199
+ op_limit_(NULL),
2200
+ op_limit_min_slop_(NULL) {}
2201
+ char* GetOutputPtr() { return op_ptr_; }
2202
+ char* GetBase(ptrdiff_t* op_limit_min_slop) {
2203
+ *op_limit_min_slop = op_limit_min_slop_ - op_base_;
2204
+ return op_base_;
1270
2205
  }
2206
+ void SetOutputPtr(char* op) { op_ptr_ = op; }
1271
2207
 
1272
2208
  inline void SetExpectedLength(size_t len) {
1273
2209
  assert(blocks_.empty());
1274
2210
  expected_ = len;
1275
2211
  }
1276
2212
 
1277
- inline bool CheckLength() const {
1278
- return Size() == expected_;
1279
- }
2213
+ inline bool CheckLength() const { return Size() == expected_; }
1280
2214
 
1281
2215
  // Return the number of bytes actually uncompressed so far
1282
- inline size_t Produced() const {
1283
- return Size();
1284
- }
2216
+ inline size_t Produced() const { return Size(); }
1285
2217
 
1286
- inline bool Append(const char* ip, size_t len) {
1287
- size_t avail = op_limit_ - op_ptr_;
2218
+ inline bool Append(const char* ip, size_t len, char** op_p) {
2219
+ char* op = *op_p;
2220
+ size_t avail = op_limit_ - op;
1288
2221
  if (len <= avail) {
1289
2222
  // Fast path
1290
- memcpy(op_ptr_, ip, len);
1291
- op_ptr_ += len;
2223
+ std::memcpy(op, ip, len);
2224
+ *op_p = op + len;
1292
2225
  return true;
1293
2226
  } else {
1294
- return SlowAppend(ip, len);
2227
+ op_ptr_ = op;
2228
+ bool res = SlowAppend(ip, len);
2229
+ *op_p = op_ptr_;
2230
+ return res;
1295
2231
  }
1296
2232
  }
1297
2233
 
1298
- inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
1299
- char* op = op_ptr_;
2234
+ inline bool TryFastAppend(const char* ip, size_t available, size_t length,
2235
+ char** op_p) {
2236
+ char* op = *op_p;
1300
2237
  const int space_left = op_limit_ - op;
1301
2238
  if (length <= 16 && available >= 16 + kMaximumTagLength &&
1302
2239
  space_left >= 16) {
1303
2240
  // Fast path, used for the majority (about 95%) of invocations.
1304
2241
  UnalignedCopy128(ip, op);
1305
- op_ptr_ = op + length;
2242
+ *op_p = op + length;
1306
2243
  return true;
1307
2244
  } else {
1308
2245
  return false;
1309
2246
  }
1310
2247
  }
1311
2248
 
1312
- inline bool AppendFromSelf(size_t offset, size_t len) {
1313
- char* const op_end = op_ptr_ + len;
1314
- // See SnappyArrayWriter::AppendFromSelf for an explanation of
1315
- // the "offset - 1u" trick.
1316
- if (PREDICT_TRUE(offset - 1u < op_ptr_ - op_base_ && op_end <= op_limit_)) {
1317
- // Fast path: src and dst in current block.
1318
- op_ptr_ = IncrementalCopy(op_ptr_ - offset, op_ptr_, op_end, op_limit_);
2249
+ inline bool AppendFromSelf(size_t offset, size_t len, char** op_p) {
2250
+ char* op = *op_p;
2251
+ assert(op >= op_base_);
2252
+ // Check if we try to append from before the start of the buffer.
2253
+ if (SNAPPY_PREDICT_FALSE((kSlopBytes < 64 && len > kSlopBytes) ||
2254
+ static_cast<size_t>(op - op_base_) < offset ||
2255
+ op >= op_limit_min_slop_ || offset < len)) {
2256
+ if (offset == 0) return false;
2257
+ if (SNAPPY_PREDICT_FALSE(static_cast<size_t>(op - op_base_) < offset ||
2258
+ op + len > op_limit_)) {
2259
+ op_ptr_ = op;
2260
+ bool res = SlowAppendFromSelf(offset, len);
2261
+ *op_p = op_ptr_;
2262
+ return res;
2263
+ }
2264
+ *op_p = IncrementalCopy(op - offset, op, op + len, op_limit_);
1319
2265
  return true;
1320
2266
  }
1321
- return SlowAppendFromSelf(offset, len);
2267
+ // Fast path
2268
+ char* const op_end = op + len;
2269
+ std::memmove(op, op - offset, kSlopBytes);
2270
+ *op_p = op_end;
2271
+ return true;
1322
2272
  }
1323
2273
 
1324
2274
  // Called at the end of the decompress. We ask the allocator
@@ -1326,12 +2276,12 @@ class SnappyScatteredWriter {
1326
2276
  inline void Flush() { allocator_.Flush(Produced()); }
1327
2277
  };
1328
2278
 
1329
- template<typename Allocator>
2279
+ template <typename Allocator>
1330
2280
  bool SnappyScatteredWriter<Allocator>::SlowAppend(const char* ip, size_t len) {
1331
2281
  size_t avail = op_limit_ - op_ptr_;
1332
2282
  while (len > avail) {
1333
2283
  // Completely fill this block
1334
- memcpy(op_ptr_, ip, avail);
2284
+ std::memcpy(op_ptr_, ip, avail);
1335
2285
  op_ptr_ += avail;
1336
2286
  assert(op_limit_ - op_ptr_ == 0);
1337
2287
  full_size_ += (op_ptr_ - op_base_);
@@ -1339,25 +2289,25 @@ bool SnappyScatteredWriter<Allocator>::SlowAppend(const char* ip, size_t len) {
1339
2289
  ip += avail;
1340
2290
 
1341
2291
  // Bounds check
1342
- if (full_size_ + len > expected_) {
1343
- return false;
1344
- }
2292
+ if (full_size_ + len > expected_) return false;
1345
2293
 
1346
2294
  // Make new block
1347
- size_t bsize = min<size_t>(kBlockSize, expected_ - full_size_);
2295
+ size_t bsize = std::min<size_t>(kBlockSize, expected_ - full_size_);
1348
2296
  op_base_ = allocator_.Allocate(bsize);
1349
2297
  op_ptr_ = op_base_;
1350
2298
  op_limit_ = op_base_ + bsize;
2299
+ op_limit_min_slop_ = op_limit_ - std::min<size_t>(kSlopBytes - 1, bsize);
2300
+
1351
2301
  blocks_.push_back(op_base_);
1352
2302
  avail = bsize;
1353
2303
  }
1354
2304
 
1355
- memcpy(op_ptr_, ip, len);
2305
+ std::memcpy(op_ptr_, ip, len);
1356
2306
  op_ptr_ += len;
1357
2307
  return true;
1358
2308
  }
1359
2309
 
1360
- template<typename Allocator>
2310
+ template <typename Allocator>
1361
2311
  bool SnappyScatteredWriter<Allocator>::SlowAppendFromSelf(size_t offset,
1362
2312
  size_t len) {
1363
2313
  // Overflow check
@@ -1372,18 +2322,26 @@ bool SnappyScatteredWriter<Allocator>::SlowAppendFromSelf(size_t offset,
1372
2322
  // nice if we do not rely on that, since we can get better compression if we
1373
2323
  // allow cross-block copies and thus might want to change the compressor in
1374
2324
  // the future.
2325
+ // TODO Replace this with a properly optimized path. This is not
2326
+ // triggered right now. But this is so super slow, that it would regress
2327
+ // performance unacceptably if triggered.
1375
2328
  size_t src = cur - offset;
2329
+ char* op = op_ptr_;
1376
2330
  while (len-- > 0) {
1377
- char c = blocks_[src >> kBlockLog][src & (kBlockSize-1)];
1378
- Append(&c, 1);
2331
+ char c = blocks_[src >> kBlockLog][src & (kBlockSize - 1)];
2332
+ if (!Append(&c, 1, &op)) {
2333
+ op_ptr_ = op;
2334
+ return false;
2335
+ }
1379
2336
  src++;
1380
2337
  }
2338
+ op_ptr_ = op;
1381
2339
  return true;
1382
2340
  }
1383
2341
 
1384
2342
  class SnappySinkAllocator {
1385
2343
  public:
1386
- explicit SnappySinkAllocator(Sink* dest): dest_(dest) {}
2344
+ explicit SnappySinkAllocator(Sink* dest) : dest_(dest) {}
1387
2345
  ~SnappySinkAllocator() {}
1388
2346
 
1389
2347
  char* Allocate(int size) {
@@ -1399,10 +2357,9 @@ class SnappySinkAllocator {
1399
2357
  // to the blocks.
1400
2358
  void Flush(size_t size) {
1401
2359
  size_t size_written = 0;
1402
- size_t block_size;
1403
- for (int i = 0; i < blocks_.size(); ++i) {
1404
- block_size = min<size_t>(blocks_[i].size, size - size_written);
1405
- dest_->AppendAndTakeOwnership(blocks_[i].data, block_size,
2360
+ for (Datablock& block : blocks_) {
2361
+ size_t block_size = std::min<size_t>(block.size, size - size_written);
2362
+ dest_->AppendAndTakeOwnership(block.data, block_size,
1406
2363
  &SnappySinkAllocator::Deleter, NULL);
1407
2364
  size_written += block_size;
1408
2365
  }
@@ -1417,6 +2374,10 @@ class SnappySinkAllocator {
1417
2374
  };
1418
2375
 
1419
2376
  static void Deleter(void* arg, const char* bytes, size_t size) {
2377
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
2378
+ (void)arg;
2379
+ (void)size;
2380
+
1420
2381
  delete[] bytes;
1421
2382
  }
1422
2383
 
@@ -1436,29 +2397,31 @@ size_t UncompressAsMuchAsPossible(Source* compressed, Sink* uncompressed) {
1436
2397
  bool Uncompress(Source* compressed, Sink* uncompressed) {
1437
2398
  // Read the uncompressed length from the front of the compressed input
1438
2399
  SnappyDecompressor decompressor(compressed);
1439
- uint32 uncompressed_len = 0;
2400
+ uint32_t uncompressed_len = 0;
1440
2401
  if (!decompressor.ReadUncompressedLength(&uncompressed_len)) {
1441
2402
  return false;
1442
2403
  }
1443
2404
 
1444
2405
  char c;
1445
2406
  size_t allocated_size;
1446
- char* buf = uncompressed->GetAppendBufferVariable(
1447
- 1, uncompressed_len, &c, 1, &allocated_size);
2407
+ char* buf = uncompressed->GetAppendBufferVariable(1, uncompressed_len, &c, 1,
2408
+ &allocated_size);
1448
2409
 
2410
+ const size_t compressed_len = compressed->Available();
1449
2411
  // If we can get a flat buffer, then use it, otherwise do block by block
1450
2412
  // uncompression
1451
2413
  if (allocated_size >= uncompressed_len) {
1452
2414
  SnappyArrayWriter writer(buf);
1453
- bool result = InternalUncompressAllTags(
1454
- &decompressor, &writer, uncompressed_len);
2415
+ bool result = InternalUncompressAllTags(&decompressor, &writer,
2416
+ compressed_len, uncompressed_len);
1455
2417
  uncompressed->Append(buf, writer.Produced());
1456
2418
  return result;
1457
2419
  } else {
1458
2420
  SnappySinkAllocator allocator(uncompressed);
1459
2421
  SnappyScatteredWriter<SnappySinkAllocator> writer(allocator);
1460
- return InternalUncompressAllTags(&decompressor, &writer, uncompressed_len);
2422
+ return InternalUncompressAllTags(&decompressor, &writer, compressed_len,
2423
+ uncompressed_len);
1461
2424
  }
1462
2425
  }
1463
2426
 
1464
- } // end namespace snappy
2427
+ } // namespace snappy