couchbase 3.4.1 → 3.4.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (65) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +2 -2
  3. data/ext/couchbase/CMakeLists.txt +2 -0
  4. data/ext/couchbase/cmake/ThirdPartyDependencies.cmake +4 -0
  5. data/ext/couchbase/core/cluster_options.hxx +0 -1
  6. data/ext/couchbase/core/config_profile.cxx +23 -1
  7. data/ext/couchbase/core/config_profile.hxx +2 -12
  8. data/ext/couchbase/core/impl/analytics.cxx +236 -0
  9. data/ext/couchbase/core/impl/cluster.cxx +0 -1
  10. data/ext/couchbase/core/impl/dns_srv_tracker.cxx +5 -3
  11. data/ext/couchbase/core/impl/query.cxx +5 -5
  12. data/ext/couchbase/core/io/dns_client.cxx +225 -0
  13. data/ext/couchbase/core/io/dns_client.hxx +19 -188
  14. data/ext/couchbase/core/transactions/active_transaction_record.hxx +2 -2
  15. data/ext/couchbase/core/transactions/attempt_context_impl.cxx +3 -0
  16. data/ext/couchbase/core/transactions/attempt_context_impl.hxx +1 -1
  17. data/ext/couchbase/core/transactions/internal/transaction_context.hxx +12 -12
  18. data/ext/couchbase/core/transactions/internal/transactions_cleanup.hxx +7 -1
  19. data/ext/couchbase/core/transactions/transaction_context.cxx +1 -0
  20. data/ext/couchbase/core/transactions/transactions_cleanup.cxx +144 -155
  21. data/ext/couchbase/core/utils/connection_string.cxx +10 -3
  22. data/ext/couchbase/core/utils/connection_string.hxx +3 -3
  23. data/ext/couchbase/couchbase/analytics_error_context.hxx +143 -0
  24. data/ext/couchbase/couchbase/analytics_meta_data.hxx +155 -0
  25. data/ext/couchbase/couchbase/analytics_metrics.hxx +163 -0
  26. data/ext/couchbase/couchbase/analytics_options.hxx +359 -0
  27. data/ext/couchbase/couchbase/analytics_result.hxx +102 -0
  28. data/ext/couchbase/couchbase/analytics_scan_consistency.hxx +46 -0
  29. data/ext/couchbase/couchbase/analytics_status.hxx +41 -0
  30. data/ext/couchbase/couchbase/analytics_warning.hxx +85 -0
  31. data/ext/couchbase/couchbase/cluster.hxx +33 -0
  32. data/ext/couchbase/couchbase/fmt/analytics_status.hxx +76 -0
  33. data/ext/couchbase/couchbase/query_options.hxx +0 -1
  34. data/ext/couchbase/couchbase/scope.hxx +33 -0
  35. data/ext/couchbase/couchbase/transactions/attempt_context.hxx +1 -1
  36. data/ext/couchbase/test/CMakeLists.txt +1 -2
  37. data/ext/couchbase/test/test_helper.hxx +1 -1
  38. data/ext/couchbase/test/test_integration_analytics.cxx +289 -13
  39. data/ext/couchbase/test/test_integration_crud.cxx +8 -1
  40. data/ext/couchbase/test/test_integration_examples.cxx +41 -0
  41. data/ext/couchbase/test/test_integration_management.cxx +15 -3
  42. data/ext/couchbase/test/test_integration_search.cxx +601 -0
  43. data/ext/couchbase/test/test_transaction_transaction_simple.cxx +73 -0
  44. data/ext/couchbase/test/test_unit_config_profiles.cxx +12 -12
  45. data/ext/couchbase/test/test_unit_connection_string.cxx +35 -0
  46. data/ext/couchbase/third_party/snappy/CMakeLists.txt +150 -27
  47. data/ext/couchbase/third_party/snappy/cmake/config.h.in +28 -24
  48. data/ext/couchbase/third_party/snappy/snappy-internal.h +189 -25
  49. data/ext/couchbase/third_party/snappy/snappy-sinksource.cc +26 -9
  50. data/ext/couchbase/third_party/snappy/snappy-sinksource.h +11 -11
  51. data/ext/couchbase/third_party/snappy/snappy-stubs-internal.cc +1 -1
  52. data/ext/couchbase/third_party/snappy/snappy-stubs-internal.h +227 -308
  53. data/ext/couchbase/third_party/snappy/snappy-stubs-public.h.in +0 -11
  54. data/ext/couchbase/third_party/snappy/snappy.cc +1176 -410
  55. data/ext/couchbase/third_party/snappy/snappy.h +19 -4
  56. data/ext/couchbase.cxx +27 -6
  57. data/ext/revisions.rb +3 -3
  58. data/lib/couchbase/cluster.rb +13 -9
  59. data/lib/couchbase/cluster_registry.rb +7 -2
  60. data/lib/couchbase/configuration.rb +3 -4
  61. data/lib/couchbase/options.rb +85 -2
  62. data/lib/couchbase/search_options.rb +158 -240
  63. data/lib/couchbase/version.rb +1 -1
  64. metadata +17 -6
  65. data/ext/couchbase/core/CMakeLists.txt +0 -0
@@ -26,21 +26,9 @@
26
26
  // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
27
  // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
28
 
29
- #include "snappy.h"
30
29
  #include "snappy-internal.h"
31
30
  #include "snappy-sinksource.h"
32
-
33
- #if !defined(SNAPPY_HAVE_SSSE3)
34
- // __SSSE3__ is defined by GCC and Clang. Visual Studio doesn't target SIMD
35
- // support between SSE2 and AVX (so SSSE3 instructions require AVX support), and
36
- // defines __AVX__ when AVX support is available.
37
- #if defined(__SSSE3__) || defined(__AVX__)
38
- #define SNAPPY_HAVE_SSSE3 1
39
- #else
40
- #define SNAPPY_HAVE_SSSE3 0
41
- #endif
42
- #endif // !defined(SNAPPY_HAVE_SSSE3)
43
-
31
+ #include "snappy.h"
44
32
  #if !defined(SNAPPY_HAVE_BMI2)
45
33
  // __BMI2__ is defined by GCC and Clang. Visual Studio doesn't target BMI2
46
34
  // specifically, but it does define __AVX2__ when AVX2 support is available.
@@ -56,46 +44,145 @@
56
44
  #endif
57
45
  #endif // !defined(SNAPPY_HAVE_BMI2)
58
46
 
59
- #if SNAPPY_HAVE_SSSE3
60
- // Please do not replace with <x86intrin.h>. or with headers that assume more
61
- // advanced SSE versions without checking with all the OWNERS.
62
- #include <tmmintrin.h>
47
+ #if !defined(SNAPPY_HAVE_X86_CRC32)
48
+ #if defined(__SSE4_2__)
49
+ #define SNAPPY_HAVE_X86_CRC32 1
50
+ #else
51
+ #define SNAPPY_HAVE_X86_CRC32 0
63
52
  #endif
53
+ #endif // !defined(SNAPPY_HAVE_X86_CRC32)
64
54
 
65
- #if SNAPPY_HAVE_BMI2
55
+ #if !defined(SNAPPY_HAVE_NEON_CRC32)
56
+ #if SNAPPY_HAVE_NEON && defined(__ARM_FEATURE_CRC32)
57
+ #define SNAPPY_HAVE_NEON_CRC32 1
58
+ #else
59
+ #define SNAPPY_HAVE_NEON_CRC32 0
60
+ #endif
61
+ #endif // !defined(SNAPPY_HAVE_NEON_CRC32)
62
+
63
+ #if SNAPPY_HAVE_BMI2 || SNAPPY_HAVE_X86_CRC32
66
64
  // Please do not replace with <x86intrin.h>. or with headers that assume more
67
65
  // advanced SSE versions without checking with all the OWNERS.
68
66
  #include <immintrin.h>
67
+ #elif SNAPPY_HAVE_NEON_CRC32
68
+ #include <arm_acle.h>
69
69
  #endif
70
70
 
71
- #include <stdio.h>
71
+ #if defined(__GNUC__)
72
+ #define SNAPPY_PREFETCH(ptr) __builtin_prefetch(ptr, 0, 3)
73
+ #else
74
+ #define SNAPPY_PREFETCH(ptr) (void)(ptr)
75
+ #endif
72
76
 
73
77
  #include <algorithm>
78
+ #include <array>
79
+ #include <cstddef>
80
+ #include <cstdint>
81
+ #include <cstdio>
82
+ #include <cstring>
74
83
  #include <string>
84
+ #include <utility>
75
85
  #include <vector>
76
86
 
77
87
  namespace snappy {
78
88
 
89
+ namespace {
90
+
91
+ // The amount of slop bytes writers are using for unconditional copies.
92
+ constexpr int kSlopBytes = 64;
93
+
94
+ using internal::char_table;
79
95
  using internal::COPY_1_BYTE_OFFSET;
80
96
  using internal::COPY_2_BYTE_OFFSET;
81
- using internal::LITERAL;
82
- using internal::char_table;
97
+ using internal::COPY_4_BYTE_OFFSET;
83
98
  using internal::kMaximumTagLength;
99
+ using internal::LITERAL;
100
+ #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
101
+ using internal::V128;
102
+ using internal::V128_Load;
103
+ using internal::V128_LoadU;
104
+ using internal::V128_Shuffle;
105
+ using internal::V128_StoreU;
106
+ using internal::V128_DupChar;
107
+ #endif
108
+
109
+ // We translate the information encoded in a tag through a lookup table to a
110
+ // format that requires fewer instructions to decode. Effectively we store
111
+ // the length minus the tag part of the offset. The lowest significant byte
112
+ // thus stores the length. While total length - offset is given by
113
+ // entry - ExtractOffset(type). The nice thing is that the subtraction
114
+ // immediately sets the flags for the necessary check that offset >= length.
115
+ // This folds the cmp with sub. We engineer the long literals and copy-4 to
116
+ // always fail this check, so their presence doesn't affect the fast path.
117
+ // To prevent literals from triggering the guard against offset < length (offset
118
+ // does not apply to literals) the table is giving them a spurious offset of
119
+ // 256.
120
+ inline constexpr int16_t MakeEntry(int16_t len, int16_t offset) {
121
+ return len - (offset << 8);
122
+ }
123
+
124
+ inline constexpr int16_t LengthMinusOffset(int data, int type) {
125
+ return type == 3 ? 0xFF // copy-4 (or type == 3)
126
+ : type == 2 ? MakeEntry(data + 1, 0) // copy-2
127
+ : type == 1 ? MakeEntry((data & 7) + 4, data >> 3) // copy-1
128
+ : data < 60 ? MakeEntry(data + 1, 1) // note spurious offset.
129
+ : 0xFF; // long literal
130
+ }
131
+
132
+ inline constexpr int16_t LengthMinusOffset(uint8_t tag) {
133
+ return LengthMinusOffset(tag >> 2, tag & 3);
134
+ }
135
+
136
+ template <size_t... Ints>
137
+ struct index_sequence {};
138
+
139
+ template <std::size_t N, size_t... Is>
140
+ struct make_index_sequence : make_index_sequence<N - 1, N - 1, Is...> {};
141
+
142
+ template <size_t... Is>
143
+ struct make_index_sequence<0, Is...> : index_sequence<Is...> {};
84
144
 
85
- // Any hash function will produce a valid compressed bitstream, but a good
86
- // hash function reduces the number of collisions and thus yields better
87
- // compression for compressible input, and more speed for incompressible
88
- // input. Of course, it doesn't hurt if the hash function is reasonably fast
89
- // either, as it gets called a lot.
90
- static inline uint32 HashBytes(uint32 bytes, int shift) {
91
- uint32 kMul = 0x1e35a7bd;
92
- return (bytes * kMul) >> shift;
145
+ template <size_t... seq>
146
+ constexpr std::array<int16_t, 256> MakeTable(index_sequence<seq...>) {
147
+ return std::array<int16_t, 256>{LengthMinusOffset(seq)...};
93
148
  }
94
- static inline uint32 Hash(const char* p, int shift) {
95
- return HashBytes(UNALIGNED_LOAD32(p), shift);
149
+
150
+ alignas(64) const std::array<int16_t, 256> kLengthMinusOffset =
151
+ MakeTable(make_index_sequence<256>{});
152
+
153
+ // Given a table of uint16_t whose size is mask / 2 + 1, return a pointer to the
154
+ // relevant entry, if any, for the given bytes. Any hash function will do,
155
+ // but a good hash function reduces the number of collisions and thus yields
156
+ // better compression for compressible input.
157
+ //
158
+ // REQUIRES: mask is 2 * (table_size - 1), and table_size is a power of two.
159
+ inline uint16_t* TableEntry(uint16_t* table, uint32_t bytes, uint32_t mask) {
160
+ // Our choice is quicker-and-dirtier than the typical hash function;
161
+ // empirically, that seems beneficial. The upper bits of kMagic * bytes are a
162
+ // higher-quality hash than the lower bits, so when using kMagic * bytes we
163
+ // also shift right to get a higher-quality end result. There's no similar
164
+ // issue with a CRC because all of the output bits of a CRC are equally good
165
+ // "hashes." So, a CPU instruction for CRC, if available, tends to be a good
166
+ // choice.
167
+ #if SNAPPY_HAVE_NEON_CRC32
168
+ // We use mask as the second arg to the CRC function, as it's about to
169
+ // be used anyway; it'd be equally correct to use 0 or some constant.
170
+ // Mathematically, _mm_crc32_u32 (or similar) is a function of the
171
+ // xor of its arguments.
172
+ const uint32_t hash = __crc32cw(bytes, mask);
173
+ #elif SNAPPY_HAVE_X86_CRC32
174
+ const uint32_t hash = _mm_crc32_u32(bytes, mask);
175
+ #else
176
+ constexpr uint32_t kMagic = 0x1e35a7bd;
177
+ const uint32_t hash = (kMagic * bytes) >> (31 - kMaxHashTableBits);
178
+ #endif
179
+ return reinterpret_cast<uint16_t*>(reinterpret_cast<uintptr_t>(table) +
180
+ (hash & mask));
96
181
  }
97
182
 
98
- size_t MaxCompressedLength(size_t source_len) {
183
+ } // namespace
184
+
185
+ size_t MaxCompressedLength(size_t source_bytes) {
99
186
  // Compressed data can be defined as:
100
187
  // compressed := item* literal*
101
188
  // item := literal* copy
@@ -116,24 +203,34 @@ size_t MaxCompressedLength(size_t source_len) {
116
203
  // I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
117
204
  //
118
205
  // This last factor dominates the blowup, so the final estimate is:
119
- return 32 + source_len + source_len/6;
206
+ return 32 + source_bytes + source_bytes / 6;
120
207
  }
121
208
 
122
209
  namespace {
123
210
 
124
211
  void UnalignedCopy64(const void* src, void* dst) {
125
212
  char tmp[8];
126
- memcpy(tmp, src, 8);
127
- memcpy(dst, tmp, 8);
213
+ std::memcpy(tmp, src, 8);
214
+ std::memcpy(dst, tmp, 8);
128
215
  }
129
216
 
130
217
  void UnalignedCopy128(const void* src, void* dst) {
131
- // memcpy gets vectorized when the appropriate compiler options are used.
132
- // For example, x86 compilers targeting SSE2+ will optimize to an SSE2 load
133
- // and store.
218
+ // std::memcpy() gets vectorized when the appropriate compiler options are
219
+ // used. For example, x86 compilers targeting SSE2+ will optimize to an SSE2
220
+ // load and store.
134
221
  char tmp[16];
135
- memcpy(tmp, src, 16);
136
- memcpy(dst, tmp, 16);
222
+ std::memcpy(tmp, src, 16);
223
+ std::memcpy(dst, tmp, 16);
224
+ }
225
+
226
+ template <bool use_16bytes_chunk>
227
+ inline void ConditionalUnalignedCopy128(const char* src, char* dst) {
228
+ if (use_16bytes_chunk) {
229
+ UnalignedCopy128(src, dst);
230
+ } else {
231
+ UnalignedCopy64(src, dst);
232
+ UnalignedCopy64(src + 8, dst + 8);
233
+ }
137
234
  }
138
235
 
139
236
  // Copy [src, src+(op_limit-op)) to [op, (op_limit-op)) a byte at a time. Used
@@ -145,7 +242,8 @@ void UnalignedCopy128(const void* src, void* dst) {
145
242
  // After IncrementalCopySlow(src, op, op_limit), the result will have eleven
146
243
  // copies of "ab"
147
244
  // ababababababababababab
148
- // Note that this does not match the semantics of either memcpy() or memmove().
245
+ // Note that this does not match the semantics of either std::memcpy() or
246
+ // std::memmove().
149
247
  inline char* IncrementalCopySlow(const char* src, char* op,
150
248
  char* const op_limit) {
151
249
  // TODO: Remove pragma when LLVM is aware this
@@ -160,39 +258,179 @@ inline char* IncrementalCopySlow(const char* src, char* op,
160
258
  return op_limit;
161
259
  }
162
260
 
163
- #if SNAPPY_HAVE_SSSE3
261
+ #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
262
+
263
+ // Computes the bytes for shuffle control mask (please read comments on
264
+ // 'pattern_generation_masks' as well) for the given index_offset and
265
+ // pattern_size. For example, when the 'offset' is 6, it will generate a
266
+ // repeating pattern of size 6. So, the first 16 byte indexes will correspond to
267
+ // the pattern-bytes {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3} and the
268
+ // next 16 byte indexes will correspond to the pattern-bytes {4, 5, 0, 1, 2, 3,
269
+ // 4, 5, 0, 1, 2, 3, 4, 5, 0, 1}. These byte index sequences are generated by
270
+ // calling MakePatternMaskBytes(0, 6, index_sequence<16>()) and
271
+ // MakePatternMaskBytes(16, 6, index_sequence<16>()) respectively.
272
+ template <size_t... indexes>
273
+ inline constexpr std::array<char, sizeof...(indexes)> MakePatternMaskBytes(
274
+ int index_offset, int pattern_size, index_sequence<indexes...>) {
275
+ return {static_cast<char>((index_offset + indexes) % pattern_size)...};
276
+ }
277
+
278
+ // Computes the shuffle control mask bytes array for given pattern-sizes and
279
+ // returns an array.
280
+ template <size_t... pattern_sizes_minus_one>
281
+ inline constexpr std::array<std::array<char, sizeof(V128)>,
282
+ sizeof...(pattern_sizes_minus_one)>
283
+ MakePatternMaskBytesTable(int index_offset,
284
+ index_sequence<pattern_sizes_minus_one...>) {
285
+ return {
286
+ MakePatternMaskBytes(index_offset, pattern_sizes_minus_one + 1,
287
+ make_index_sequence</*indexes=*/sizeof(V128)>())...};
288
+ }
164
289
 
165
- // This is a table of shuffle control masks that can be used as the source
290
+ // This is an array of shuffle control masks that can be used as the source
166
291
  // operand for PSHUFB to permute the contents of the destination XMM register
167
292
  // into a repeating byte pattern.
168
- alignas(16) const char pshufb_fill_patterns[7][16] = {
169
- {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
170
- {0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1},
171
- {0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0},
172
- {0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3},
173
- {0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0},
174
- {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3},
175
- {0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1},
176
- };
293
+ alignas(16) constexpr std::array<std::array<char, sizeof(V128)>,
294
+ 16> pattern_generation_masks =
295
+ MakePatternMaskBytesTable(
296
+ /*index_offset=*/0,
297
+ /*pattern_sizes_minus_one=*/make_index_sequence<16>());
298
+
299
+ // Similar to 'pattern_generation_masks', this table is used to "rotate" the
300
+ // pattern so that we can copy the *next 16 bytes* consistent with the pattern.
301
+ // Basically, pattern_reshuffle_masks is a continuation of
302
+ // pattern_generation_masks. It follows that, pattern_reshuffle_masks is same as
303
+ // pattern_generation_masks for offsets 1, 2, 4, 8 and 16.
304
+ alignas(16) constexpr std::array<std::array<char, sizeof(V128)>,
305
+ 16> pattern_reshuffle_masks =
306
+ MakePatternMaskBytesTable(
307
+ /*index_offset=*/16,
308
+ /*pattern_sizes_minus_one=*/make_index_sequence<16>());
309
+
310
+ SNAPPY_ATTRIBUTE_ALWAYS_INLINE
311
+ static inline V128 LoadPattern(const char* src, const size_t pattern_size) {
312
+ V128 generation_mask = V128_Load(reinterpret_cast<const V128*>(
313
+ pattern_generation_masks[pattern_size - 1].data()));
314
+ // Uninitialized bytes are masked out by the shuffle mask.
315
+ // TODO: remove annotation and macro defs once MSan is fixed.
316
+ SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(src + pattern_size, 16 - pattern_size);
317
+ return V128_Shuffle(V128_LoadU(reinterpret_cast<const V128*>(src)),
318
+ generation_mask);
319
+ }
177
320
 
178
- #endif // SNAPPY_HAVE_SSSE3
321
+ SNAPPY_ATTRIBUTE_ALWAYS_INLINE
322
+ static inline std::pair<V128 /* pattern */, V128 /* reshuffle_mask */>
323
+ LoadPatternAndReshuffleMask(const char* src, const size_t pattern_size) {
324
+ V128 pattern = LoadPattern(src, pattern_size);
179
325
 
180
- // Copy [src, src+(op_limit-op)) to [op, (op_limit-op)) but faster than
326
+ // This mask will generate the next 16 bytes in-place. Doing so enables us to
327
+ // write data by at most 4 V128_StoreU.
328
+ //
329
+ // For example, suppose pattern is: abcdefabcdefabcd
330
+ // Shuffling with this mask will generate: efabcdefabcdefab
331
+ // Shuffling again will generate: cdefabcdefabcdef
332
+ V128 reshuffle_mask = V128_Load(reinterpret_cast<const V128*>(
333
+ pattern_reshuffle_masks[pattern_size - 1].data()));
334
+ return {pattern, reshuffle_mask};
335
+ }
336
+
337
+ #endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
338
+
339
+ // Fallback for when we need to copy while extending the pattern, for example
340
+ // copying 10 bytes from 3 positions back abc -> abcabcabcabca.
341
+ //
342
+ // REQUIRES: [dst - offset, dst + 64) is a valid address range.
343
+ SNAPPY_ATTRIBUTE_ALWAYS_INLINE
344
+ static inline bool Copy64BytesWithPatternExtension(char* dst, size_t offset) {
345
+ #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
346
+ if (SNAPPY_PREDICT_TRUE(offset <= 16)) {
347
+ switch (offset) {
348
+ case 0:
349
+ return false;
350
+ case 1: {
351
+ // TODO: Ideally we should memset, move back once the
352
+ // codegen issues are fixed.
353
+ V128 pattern = V128_DupChar(dst[-1]);
354
+ for (int i = 0; i < 4; i++) {
355
+ V128_StoreU(reinterpret_cast<V128*>(dst + 16 * i), pattern);
356
+ }
357
+ return true;
358
+ }
359
+ case 2:
360
+ case 4:
361
+ case 8:
362
+ case 16: {
363
+ V128 pattern = LoadPattern(dst - offset, offset);
364
+ for (int i = 0; i < 4; i++) {
365
+ V128_StoreU(reinterpret_cast<V128*>(dst + 16 * i), pattern);
366
+ }
367
+ return true;
368
+ }
369
+ default: {
370
+ auto pattern_and_reshuffle_mask =
371
+ LoadPatternAndReshuffleMask(dst - offset, offset);
372
+ V128 pattern = pattern_and_reshuffle_mask.first;
373
+ V128 reshuffle_mask = pattern_and_reshuffle_mask.second;
374
+ for (int i = 0; i < 4; i++) {
375
+ V128_StoreU(reinterpret_cast<V128*>(dst + 16 * i), pattern);
376
+ pattern = V128_Shuffle(pattern, reshuffle_mask);
377
+ }
378
+ return true;
379
+ }
380
+ }
381
+ }
382
+ #else
383
+ if (SNAPPY_PREDICT_TRUE(offset < 16)) {
384
+ if (SNAPPY_PREDICT_FALSE(offset == 0)) return false;
385
+ // Extend the pattern to the first 16 bytes.
386
+ // The simpler formulation of `dst[i - offset]` induces undefined behavior.
387
+ for (int i = 0; i < 16; i++) dst[i] = (dst - offset)[i];
388
+ // Find a multiple of pattern >= 16.
389
+ static std::array<uint8_t, 16> pattern_sizes = []() {
390
+ std::array<uint8_t, 16> res;
391
+ for (int i = 1; i < 16; i++) res[i] = (16 / i + 1) * i;
392
+ return res;
393
+ }();
394
+ offset = pattern_sizes[offset];
395
+ for (int i = 1; i < 4; i++) {
396
+ std::memcpy(dst + i * 16, dst + i * 16 - offset, 16);
397
+ }
398
+ return true;
399
+ }
400
+ #endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
401
+
402
+ // Very rare.
403
+ for (int i = 0; i < 4; i++) {
404
+ std::memcpy(dst + i * 16, dst + i * 16 - offset, 16);
405
+ }
406
+ return true;
407
+ }
408
+
409
+ // Copy [src, src+(op_limit-op)) to [op, op_limit) but faster than
181
410
  // IncrementalCopySlow. buf_limit is the address past the end of the writable
182
411
  // region of the buffer.
183
412
  inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
184
413
  char* const buf_limit) {
414
+ #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
415
+ constexpr int big_pattern_size_lower_bound = 16;
416
+ #else
417
+ constexpr int big_pattern_size_lower_bound = 8;
418
+ #endif
419
+
185
420
  // Terminology:
186
421
  //
187
422
  // slop = buf_limit - op
188
423
  // pat = op - src
189
- // len = limit - op
424
+ // len = op_limit - op
190
425
  assert(src < op);
191
- assert(op <= op_limit);
426
+ assert(op < op_limit);
192
427
  assert(op_limit <= buf_limit);
193
- // NOTE: The compressor always emits 4 <= len <= 64. It is ok to assume that
194
- // to optimize this function but we have to also handle other cases in case
195
- // the input does not satisfy these conditions.
428
+ // NOTE: The copy tags use 3 or 6 bits to store the copy length, so len <= 64.
429
+ assert(op_limit - op <= 64);
430
+ // NOTE: In practice the compressor always emits len >= 4, so it is ok to
431
+ // assume that to optimize this function, but this is not guaranteed by the
432
+ // compression format, so we have to also handle len < 4 in case the input
433
+ // does not satisfy these conditions.
196
434
 
197
435
  size_t pattern_size = op - src;
198
436
  // The cases are split into different branches to allow the branch predictor,
@@ -216,43 +454,78 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
216
454
  // input. In general if we always predict len <= 16 it would be an ok
217
455
  // prediction.
218
456
  //
219
- // In order to be fast we want a pattern >= 8 bytes and an unrolled loop
220
- // copying 2x 8 bytes at a time.
221
-
222
- // Handle the uncommon case where pattern is less than 8 bytes.
223
- if (SNAPPY_PREDICT_FALSE(pattern_size < 8)) {
224
- #if SNAPPY_HAVE_SSSE3
457
+ // In order to be fast we want a pattern >= 16 bytes (or 8 bytes in non-SSE)
458
+ // and an unrolled loop copying 1x 16 bytes (or 2x 8 bytes in non-SSE) at a
459
+ // time.
460
+
461
+ // Handle the uncommon case where pattern is less than 16 (or 8 in non-SSE)
462
+ // bytes.
463
+ if (pattern_size < big_pattern_size_lower_bound) {
464
+ #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
225
465
  // Load the first eight bytes into an 128-bit XMM register, then use PSHUFB
226
466
  // to permute the register's contents in-place into a repeating sequence of
227
467
  // the first "pattern_size" bytes.
228
468
  // For example, suppose:
229
469
  // src == "abc"
230
470
  // op == op + 3
231
- // After _mm_shuffle_epi8(), "pattern" will have five copies of "abc"
471
+ // After V128_Shuffle(), "pattern" will have five copies of "abc"
232
472
  // followed by one byte of slop: abcabcabcabcabca.
233
473
  //
234
474
  // The non-SSE fallback implementation suffers from store-forwarding stalls
235
475
  // because its loads and stores partly overlap. By expanding the pattern
236
476
  // in-place, we avoid the penalty.
237
- if (SNAPPY_PREDICT_TRUE(op <= buf_limit - 16)) {
238
- const __m128i shuffle_mask = _mm_load_si128(
239
- reinterpret_cast<const __m128i*>(pshufb_fill_patterns)
240
- + pattern_size - 1);
241
- const __m128i pattern = _mm_shuffle_epi8(
242
- _mm_loadl_epi64(reinterpret_cast<const __m128i*>(src)), shuffle_mask);
243
- // Uninitialized bytes are masked out by the shuffle mask.
244
- // TODO: remove annotation and macro defs once MSan is fixed.
245
- SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(&pattern, sizeof(pattern));
246
- pattern_size *= 16 / pattern_size;
247
- char* op_end = std::min(op_limit, buf_limit - 15);
248
- while (op < op_end) {
249
- _mm_storeu_si128(reinterpret_cast<__m128i*>(op), pattern);
250
- op += pattern_size;
477
+
478
+ // Typically, the op_limit is the gating factor so try to simplify the loop
479
+ // based on that.
480
+ if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 15)) {
481
+ auto pattern_and_reshuffle_mask =
482
+ LoadPatternAndReshuffleMask(src, pattern_size);
483
+ V128 pattern = pattern_and_reshuffle_mask.first;
484
+ V128 reshuffle_mask = pattern_and_reshuffle_mask.second;
485
+
486
+ // There is at least one, and at most four 16-byte blocks. Writing four
487
+ // conditionals instead of a loop allows FDO to layout the code with
488
+ // respect to the actual probabilities of each length.
489
+ // TODO: Replace with loop with trip count hint.
490
+ V128_StoreU(reinterpret_cast<V128*>(op), pattern);
491
+
492
+ if (op + 16 < op_limit) {
493
+ pattern = V128_Shuffle(pattern, reshuffle_mask);
494
+ V128_StoreU(reinterpret_cast<V128*>(op + 16), pattern);
251
495
  }
252
- if (SNAPPY_PREDICT_TRUE(op >= op_limit)) return op_limit;
496
+ if (op + 32 < op_limit) {
497
+ pattern = V128_Shuffle(pattern, reshuffle_mask);
498
+ V128_StoreU(reinterpret_cast<V128*>(op + 32), pattern);
499
+ }
500
+ if (op + 48 < op_limit) {
501
+ pattern = V128_Shuffle(pattern, reshuffle_mask);
502
+ V128_StoreU(reinterpret_cast<V128*>(op + 48), pattern);
503
+ }
504
+ return op_limit;
505
+ }
506
+ char* const op_end = buf_limit - 15;
507
+ if (SNAPPY_PREDICT_TRUE(op < op_end)) {
508
+ auto pattern_and_reshuffle_mask =
509
+ LoadPatternAndReshuffleMask(src, pattern_size);
510
+ V128 pattern = pattern_and_reshuffle_mask.first;
511
+ V128 reshuffle_mask = pattern_and_reshuffle_mask.second;
512
+
513
+ // This code path is relatively cold however so we save code size
514
+ // by avoiding unrolling and vectorizing.
515
+ //
516
+ // TODO: Remove pragma when when cold regions don't get
517
+ // vectorized or unrolled.
518
+ #ifdef __clang__
519
+ #pragma clang loop unroll(disable)
520
+ #endif
521
+ do {
522
+ V128_StoreU(reinterpret_cast<V128*>(op), pattern);
523
+ pattern = V128_Shuffle(pattern, reshuffle_mask);
524
+ op += 16;
525
+ } while (SNAPPY_PREDICT_TRUE(op < op_end));
253
526
  }
254
- return IncrementalCopySlow(src, op, op_limit);
255
- #else // !SNAPPY_HAVE_SSSE3
527
+ return IncrementalCopySlow(op - pattern_size, op, op_limit);
528
+ #else // !SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
256
529
  // If plenty of buffer space remains, expand the pattern to at least 8
257
530
  // bytes. The way the following loop is written, we need 8 bytes of buffer
258
531
  // space if pattern_size >= 4, 11 bytes if pattern_size is 1 or 3, and 10
@@ -269,36 +542,32 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
269
542
  } else {
270
543
  return IncrementalCopySlow(src, op, op_limit);
271
544
  }
272
- #endif // SNAPPY_HAVE_SSSE3
545
+ #endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
273
546
  }
274
- assert(pattern_size >= 8);
547
+ assert(pattern_size >= big_pattern_size_lower_bound);
548
+ constexpr bool use_16bytes_chunk = big_pattern_size_lower_bound == 16;
275
549
 
276
- // Copy 2x 8 bytes at a time. Because op - src can be < 16, a single
277
- // UnalignedCopy128 might overwrite data in op. UnalignedCopy64 is safe
278
- // because expanding the pattern to at least 8 bytes guarantees that
279
- // op - src >= 8.
550
+ // Copy 1x 16 bytes (or 2x 8 bytes in non-SSE) at a time. Because op - src can
551
+ // be < 16 in non-SSE, a single UnalignedCopy128 might overwrite data in op.
552
+ // UnalignedCopy64 is safe because expanding the pattern to at least 8 bytes
553
+ // guarantees that op - src >= 8.
280
554
  //
281
555
  // Typically, the op_limit is the gating factor so try to simplify the loop
282
556
  // based on that.
283
- if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 16)) {
557
+ if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 15)) {
284
558
  // There is at least one, and at most four 16-byte blocks. Writing four
285
559
  // conditionals instead of a loop allows FDO to layout the code with respect
286
560
  // to the actual probabilities of each length.
287
561
  // TODO: Replace with loop with trip count hint.
288
- UnalignedCopy64(src, op);
289
- UnalignedCopy64(src + 8, op + 8);
290
-
562
+ ConditionalUnalignedCopy128<use_16bytes_chunk>(src, op);
291
563
  if (op + 16 < op_limit) {
292
- UnalignedCopy64(src + 16, op + 16);
293
- UnalignedCopy64(src + 24, op + 24);
564
+ ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 16, op + 16);
294
565
  }
295
566
  if (op + 32 < op_limit) {
296
- UnalignedCopy64(src + 32, op + 32);
297
- UnalignedCopy64(src + 40, op + 40);
567
+ ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 32, op + 32);
298
568
  }
299
569
  if (op + 48 < op_limit) {
300
- UnalignedCopy64(src + 48, op + 48);
301
- UnalignedCopy64(src + 56, op + 56);
570
+ ConditionalUnalignedCopy128<use_16bytes_chunk>(src + 48, op + 48);
302
571
  }
303
572
  return op_limit;
304
573
  }
@@ -312,12 +581,10 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
312
581
  #ifdef __clang__
313
582
  #pragma clang loop unroll(disable)
314
583
  #endif
315
- for (char *op_end = buf_limit - 16; op < op_end; op += 16, src += 16) {
316
- UnalignedCopy64(src, op);
317
- UnalignedCopy64(src + 8, op + 8);
584
+ for (char* op_end = buf_limit - 16; op < op_end; op += 16, src += 16) {
585
+ ConditionalUnalignedCopy128<use_16bytes_chunk>(src, op);
318
586
  }
319
- if (op >= op_limit)
320
- return op_limit;
587
+ if (op >= op_limit) return op_limit;
321
588
 
322
589
  // We only take this branch if we didn't have enough slop and we can do a
323
590
  // single 8 byte copy.
@@ -332,11 +599,9 @@ inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
332
599
  } // namespace
333
600
 
334
601
  template <bool allow_fast_path>
335
- static inline char* EmitLiteral(char* op,
336
- const char* literal,
337
- int len) {
602
+ static inline char* EmitLiteral(char* op, const char* literal, int len) {
338
603
  // The vast majority of copies are below 16 bytes, for which a
339
- // call to memcpy is overkill. This fast path can sometimes
604
+ // call to std::memcpy() is overkill. This fast path can sometimes
340
605
  // copy up to 15 bytes too much, but that is okay in the
341
606
  // main loop, since we have a bit to go on for both sides:
342
607
  //
@@ -345,7 +610,7 @@ static inline char* EmitLiteral(char* op,
345
610
  // if not, allow_fast_path = false.
346
611
  // - The output will always have 32 spare bytes (see
347
612
  // MaxCompressedLength).
348
- assert(len > 0); // Zero-length literals are disallowed
613
+ assert(len > 0); // Zero-length literals are disallowed
349
614
  int n = len - 1;
350
615
  if (allow_fast_path && len <= 16) {
351
616
  // Fits in tag byte
@@ -366,11 +631,23 @@ static inline char* EmitLiteral(char* op,
366
631
  // Encode in upcoming bytes.
367
632
  // Write 4 bytes, though we may care about only 1 of them. The output buffer
368
633
  // is guaranteed to have at least 3 more spaces left as 'len >= 61' holds
369
- // here and there is a memcpy of size 'len' below.
634
+ // here and there is a std::memcpy() of size 'len' below.
370
635
  LittleEndian::Store32(op, n);
371
636
  op += count;
372
637
  }
373
- memcpy(op, literal, len);
638
+ // When allow_fast_path is true, we can overwrite up to 16 bytes.
639
+ if (allow_fast_path) {
640
+ char* destination = op;
641
+ const char* source = literal;
642
+ const char* end = destination + len;
643
+ do {
644
+ std::memcpy(destination, source, 16);
645
+ destination += 16;
646
+ source += 16;
647
+ } while (destination < end);
648
+ } else {
649
+ std::memcpy(op, literal, len);
650
+ }
374
651
  return op + len;
375
652
  }
376
653
 
@@ -381,15 +658,22 @@ static inline char* EmitCopyAtMost64(char* op, size_t offset, size_t len) {
381
658
  assert(offset < 65536);
382
659
  assert(len_less_than_12 == (len < 12));
383
660
 
384
- if (len_less_than_12 && SNAPPY_PREDICT_TRUE(offset < 2048)) {
385
- // offset fits in 11 bits. The 3 highest go in the top of the first byte,
386
- // and the rest go in the second byte.
387
- *op++ = COPY_1_BYTE_OFFSET + ((len - 4) << 2) + ((offset >> 3) & 0xe0);
388
- *op++ = offset & 0xff;
661
+ if (len_less_than_12) {
662
+ uint32_t u = (len << 2) + (offset << 8);
663
+ uint32_t copy1 = COPY_1_BYTE_OFFSET - (4 << 2) + ((offset >> 3) & 0xe0);
664
+ uint32_t copy2 = COPY_2_BYTE_OFFSET - (1 << 2);
665
+ // It turns out that offset < 2048 is a difficult to predict branch.
666
+ // `perf record` shows this is the highest percentage of branch misses in
667
+ // benchmarks. This code produces branch free code, the data dependency
668
+ // chain that bottlenecks the throughput is so long that a few extra
669
+ // instructions are completely free (IPC << 6 because of data deps).
670
+ u += offset < 2048 ? copy1 : copy2;
671
+ LittleEndian::Store32(op, u);
672
+ op += offset < 2048 ? 2 : 3;
389
673
  } else {
390
674
  // Write 4 bytes, though we only care about 3 of them. The output buffer
391
675
  // is required to have some slack, so the extra byte won't overrun it.
392
- uint32 u = COPY_2_BYTE_OFFSET + ((len - 1) << 2) + (offset << 8);
676
+ uint32_t u = COPY_2_BYTE_OFFSET + ((len - 1) << 2) + (offset << 8);
393
677
  LittleEndian::Store32(op, u);
394
678
  op += 3;
395
679
  }
@@ -428,7 +712,7 @@ static inline char* EmitCopy(char* op, size_t offset, size_t len) {
428
712
  }
429
713
 
430
714
  bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
431
- uint32 v = 0;
715
+ uint32_t v = 0;
432
716
  const char* limit = start + n;
433
717
  if (Varint::Parse32WithLimit(start, limit, &v) != NULL) {
434
718
  *result = v;
@@ -439,7 +723,7 @@ bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
439
723
  }
440
724
 
441
725
  namespace {
442
- uint32 CalculateTableSize(uint32 input_size) {
726
+ uint32_t CalculateTableSize(uint32_t input_size) {
443
727
  static_assert(
444
728
  kMaxHashTableSize >= kMinHashTableSize,
445
729
  "kMaxHashTableSize should be greater or equal to kMinHashTableSize.");
@@ -462,7 +746,7 @@ WorkingMemory::WorkingMemory(size_t input_size) {
462
746
  size_ = table_size * sizeof(*table_) + max_fragment_size +
463
747
  MaxCompressedLength(max_fragment_size);
464
748
  mem_ = std::allocator<char>().allocate(size_);
465
- table_ = reinterpret_cast<uint16*>(mem_);
749
+ table_ = reinterpret_cast<uint16_t*>(mem_);
466
750
  input_ = mem_ + table_size * sizeof(*table_);
467
751
  output_ = input_ + max_fragment_size;
468
752
  }
@@ -471,8 +755,8 @@ WorkingMemory::~WorkingMemory() {
471
755
  std::allocator<char>().deallocate(mem_, size_);
472
756
  }
473
757
 
474
- uint16* WorkingMemory::GetHashTable(size_t fragment_size,
475
- int* table_size) const {
758
+ uint16_t* WorkingMemory::GetHashTable(size_t fragment_size,
759
+ int* table_size) const {
476
760
  const size_t htsize = CalculateTableSize(fragment_size);
477
761
  memset(table_, 0, htsize * sizeof(*table_));
478
762
  *table_size = htsize;
@@ -480,49 +764,6 @@ uint16* WorkingMemory::GetHashTable(size_t fragment_size,
480
764
  }
481
765
  } // end namespace internal
482
766
 
483
- // For 0 <= offset <= 4, GetUint32AtOffset(GetEightBytesAt(p), offset) will
484
- // equal UNALIGNED_LOAD32(p + offset). Motivation: On x86-64 hardware we have
485
- // empirically found that overlapping loads such as
486
- // UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
487
- // are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32.
488
- //
489
- // We have different versions for 64- and 32-bit; ideally we would avoid the
490
- // two functions and just inline the UNALIGNED_LOAD64 call into
491
- // GetUint32AtOffset, but GCC (at least not as of 4.6) is seemingly not clever
492
- // enough to avoid loading the value multiple times then. For 64-bit, the load
493
- // is done when GetEightBytesAt() is called, whereas for 32-bit, the load is
494
- // done at GetUint32AtOffset() time.
495
-
496
- #ifdef ARCH_K8
497
-
498
- typedef uint64 EightBytesReference;
499
-
500
- static inline EightBytesReference GetEightBytesAt(const char* ptr) {
501
- return UNALIGNED_LOAD64(ptr);
502
- }
503
-
504
- static inline uint32 GetUint32AtOffset(uint64 v, int offset) {
505
- assert(offset >= 0);
506
- assert(offset <= 4);
507
- return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset);
508
- }
509
-
510
- #else
511
-
512
- typedef const char* EightBytesReference;
513
-
514
- static inline EightBytesReference GetEightBytesAt(const char* ptr) {
515
- return ptr;
516
- }
517
-
518
- static inline uint32 GetUint32AtOffset(const char* v, int offset) {
519
- assert(offset >= 0);
520
- assert(offset <= 4);
521
- return UNALIGNED_LOAD32(v + offset);
522
- }
523
-
524
- #endif
525
-
526
767
  // Flat array compression that does not emit the "uncompressed length"
527
768
  // prefix. Compresses "input" string to the "*op" buffer.
528
769
  //
@@ -535,29 +776,25 @@ static inline uint32 GetUint32AtOffset(const char* v, int offset) {
535
776
  // Returns an "end" pointer into "op" buffer.
536
777
  // "end - op" is the compressed size of "input".
537
778
  namespace internal {
538
- char* CompressFragment(const char* input,
539
- size_t input_size,
540
- char* op,
541
- uint16* table,
542
- const int table_size) {
779
+ char* CompressFragment(const char* input, size_t input_size, char* op,
780
+ uint16_t* table, const int table_size) {
543
781
  // "ip" is the input pointer, and "op" is the output pointer.
544
782
  const char* ip = input;
545
783
  assert(input_size <= kBlockSize);
546
784
  assert((table_size & (table_size - 1)) == 0); // table must be power of two
547
- const int shift = 32 - Bits::Log2Floor(table_size);
548
- assert(static_cast<int>(kuint32max >> shift) == table_size - 1);
785
+ const uint32_t mask = 2 * (table_size - 1);
549
786
  const char* ip_end = input + input_size;
550
787
  const char* base_ip = ip;
551
- // Bytes in [next_emit, ip) will be emitted as literal bytes. Or
552
- // [next_emit, ip_end) after the main loop.
553
- const char* next_emit = ip;
554
788
 
555
789
  const size_t kInputMarginBytes = 15;
556
790
  if (SNAPPY_PREDICT_TRUE(input_size >= kInputMarginBytes)) {
557
791
  const char* ip_limit = input + input_size - kInputMarginBytes;
558
792
 
559
- for (uint32 next_hash = Hash(++ip, shift); ; ) {
560
- assert(next_emit < ip);
793
+ for (uint32_t preload = LittleEndian::Load32(ip + 1);;) {
794
+ // Bytes in [next_emit, ip) will be emitted as literal bytes. Or
795
+ // [next_emit, ip_end) after the main loop.
796
+ const char* next_emit = ip++;
797
+ uint64_t data = LittleEndian::Load64(ip);
561
798
  // The body of this loop calls EmitLiteral once and then EmitCopy one or
562
799
  // more times. (The exception is that when we're close to exhausting
563
800
  // the input we goto emit_remainder.)
@@ -583,28 +820,60 @@ char* CompressFragment(const char* input,
583
820
  // The "skip" variable keeps track of how many bytes there are since the
584
821
  // last match; dividing it by 32 (ie. right-shifting by five) gives the
585
822
  // number of bytes to move ahead for each iteration.
586
- uint32 skip = 32;
823
+ uint32_t skip = 32;
587
824
 
588
- const char* next_ip = ip;
589
825
  const char* candidate;
590
- do {
591
- ip = next_ip;
592
- uint32 hash = next_hash;
593
- assert(hash == Hash(ip, shift));
594
- uint32 bytes_between_hash_lookups = skip >> 5;
826
+ if (ip_limit - ip >= 16) {
827
+ auto delta = ip - base_ip;
828
+ for (int j = 0; j < 4; ++j) {
829
+ for (int k = 0; k < 4; ++k) {
830
+ int i = 4 * j + k;
831
+ // These for-loops are meant to be unrolled. So we can freely
832
+ // special case the first iteration to use the value already
833
+ // loaded in preload.
834
+ uint32_t dword = i == 0 ? preload : static_cast<uint32_t>(data);
835
+ assert(dword == LittleEndian::Load32(ip + i));
836
+ uint16_t* table_entry = TableEntry(table, dword, mask);
837
+ candidate = base_ip + *table_entry;
838
+ assert(candidate >= base_ip);
839
+ assert(candidate < ip + i);
840
+ *table_entry = delta + i;
841
+ if (SNAPPY_PREDICT_FALSE(LittleEndian::Load32(candidate) == dword)) {
842
+ *op = LITERAL | (i << 2);
843
+ UnalignedCopy128(next_emit, op + 1);
844
+ ip += i;
845
+ op = op + i + 2;
846
+ goto emit_match;
847
+ }
848
+ data >>= 8;
849
+ }
850
+ data = LittleEndian::Load64(ip + 4 * j + 4);
851
+ }
852
+ ip += 16;
853
+ skip += 16;
854
+ }
855
+ while (true) {
856
+ assert(static_cast<uint32_t>(data) == LittleEndian::Load32(ip));
857
+ uint16_t* table_entry = TableEntry(table, data, mask);
858
+ uint32_t bytes_between_hash_lookups = skip >> 5;
595
859
  skip += bytes_between_hash_lookups;
596
- next_ip = ip + bytes_between_hash_lookups;
860
+ const char* next_ip = ip + bytes_between_hash_lookups;
597
861
  if (SNAPPY_PREDICT_FALSE(next_ip > ip_limit)) {
862
+ ip = next_emit;
598
863
  goto emit_remainder;
599
864
  }
600
- next_hash = Hash(next_ip, shift);
601
- candidate = base_ip + table[hash];
865
+ candidate = base_ip + *table_entry;
602
866
  assert(candidate >= base_ip);
603
867
  assert(candidate < ip);
604
868
 
605
- table[hash] = ip - base_ip;
606
- } while (SNAPPY_PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
607
- UNALIGNED_LOAD32(candidate)));
869
+ *table_entry = ip - base_ip;
870
+ if (SNAPPY_PREDICT_FALSE(static_cast<uint32_t>(data) ==
871
+ LittleEndian::Load32(candidate))) {
872
+ break;
873
+ }
874
+ data = LittleEndian::Load32(next_ip);
875
+ ip = next_ip;
876
+ }
608
877
 
609
878
  // Step 2: A 4-byte match has been found. We'll later see if more
610
879
  // than 4 bytes match. But, prior to the match, input
@@ -620,15 +889,13 @@ char* CompressFragment(const char* input,
620
889
  // though we don't yet know how big the literal will be. We handle that
621
890
  // by proceeding to the next iteration of the main loop. We also can exit
622
891
  // this loop via goto if we get close to exhausting the input.
623
- EightBytesReference input_bytes;
624
- uint32 candidate_bytes = 0;
625
-
892
+ emit_match:
626
893
  do {
627
894
  // We have a 4-byte match at ip, and no need to emit any
628
895
  // "literal bytes" prior to ip.
629
896
  const char* base = ip;
630
897
  std::pair<size_t, bool> p =
631
- FindMatchLength(candidate + 4, ip + 4, ip_end);
898
+ FindMatchLength(candidate + 4, ip + 4, ip_end, &data);
632
899
  size_t matched = 4 + p.first;
633
900
  ip += matched;
634
901
  size_t offset = base - candidate;
@@ -638,32 +905,41 @@ char* CompressFragment(const char* input,
638
905
  } else {
639
906
  op = EmitCopy</*len_less_than_12=*/false>(op, offset, matched);
640
907
  }
641
- next_emit = ip;
642
908
  if (SNAPPY_PREDICT_FALSE(ip >= ip_limit)) {
643
909
  goto emit_remainder;
644
910
  }
911
+ // Expect 5 bytes to match
912
+ assert((data & 0xFFFFFFFFFF) ==
913
+ (LittleEndian::Load64(ip) & 0xFFFFFFFFFF));
645
914
  // We are now looking for a 4-byte match again. We read
646
- // table[Hash(ip, shift)] for that. To improve compression,
647
- // we also update table[Hash(ip - 1, shift)] and table[Hash(ip, shift)].
648
- input_bytes = GetEightBytesAt(ip - 1);
649
- uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift);
650
- table[prev_hash] = ip - base_ip - 1;
651
- uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift);
652
- candidate = base_ip + table[cur_hash];
653
- candidate_bytes = UNALIGNED_LOAD32(candidate);
654
- table[cur_hash] = ip - base_ip;
655
- } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes);
656
-
657
- next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift);
658
- ++ip;
915
+ // table[Hash(ip, mask)] for that. To improve compression,
916
+ // we also update table[Hash(ip - 1, mask)] and table[Hash(ip, mask)].
917
+ *TableEntry(table, LittleEndian::Load32(ip - 1), mask) =
918
+ ip - base_ip - 1;
919
+ uint16_t* table_entry = TableEntry(table, data, mask);
920
+ candidate = base_ip + *table_entry;
921
+ *table_entry = ip - base_ip;
922
+ // Measurements on the benchmarks have shown the following probabilities
923
+ // for the loop to exit (ie. avg. number of iterations is reciprocal).
924
+ // BM_Flat/6 txt1 p = 0.3-0.4
925
+ // BM_Flat/7 txt2 p = 0.35
926
+ // BM_Flat/8 txt3 p = 0.3-0.4
927
+ // BM_Flat/9 txt3 p = 0.34-0.4
928
+ // BM_Flat/10 pb p = 0.4
929
+ // BM_Flat/11 gaviota p = 0.1
930
+ // BM_Flat/12 cp p = 0.5
931
+ // BM_Flat/13 c p = 0.3
932
+ } while (static_cast<uint32_t>(data) == LittleEndian::Load32(candidate));
933
+ // Because the least significant 5 bytes matched, we can utilize data
934
+ // for the next iteration.
935
+ preload = data >> 8;
659
936
  }
660
937
  }
661
938
 
662
- emit_remainder:
939
+ emit_remainder:
663
940
  // Emit the remaining bytes as a literal
664
- if (next_emit < ip_end) {
665
- op = EmitLiteral</*allow_fast_path=*/false>(op, next_emit,
666
- ip_end - next_emit);
941
+ if (ip < ip_end) {
942
+ op = EmitLiteral</*allow_fast_path=*/false>(op, ip, ip_end - ip);
667
943
  }
668
944
 
669
945
  return op;
@@ -672,7 +948,12 @@ char* CompressFragment(const char* input,
672
948
 
673
949
  // Called back at avery compression call to trace parameters and sizes.
674
950
  static inline void Report(const char *algorithm, size_t compressed_size,
675
- size_t uncompressed_size) {}
951
+ size_t uncompressed_size) {
952
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
953
+ (void)algorithm;
954
+ (void)compressed_size;
955
+ (void)uncompressed_size;
956
+ }
676
957
 
677
958
  // Signature of output types needed by decompression code.
678
959
  // The decompression code is templatized on a type that obeys this
@@ -684,12 +965,28 @@ static inline void Report(const char *algorithm, size_t compressed_size,
684
965
  // // Called before decompression
685
966
  // void SetExpectedLength(size_t length);
686
967
  //
968
+ // // For performance a writer may choose to donate the cursor variable to the
969
+ // // decompression function. The decompression will inject it in all its
970
+ // // function calls to the writer. Keeping the important output cursor as a
971
+ // // function local stack variable allows the compiler to keep it in
972
+ // // register, which greatly aids performance by avoiding loads and stores of
973
+ // // this variable in the fast path loop iterations.
974
+ // T GetOutputPtr() const;
975
+ //
976
+ // // At end of decompression the loop donates the ownership of the cursor
977
+ // // variable back to the writer by calling this function.
978
+ // void SetOutputPtr(T op);
979
+ //
687
980
  // // Called after decompression
688
981
  // bool CheckLength() const;
689
982
  //
690
983
  // // Called repeatedly during decompression
691
- // bool Append(const char* ip, size_t length);
692
- // bool AppendFromSelf(uint32 offset, size_t length);
984
+ // // Each function get a pointer to the op (output pointer), that the writer
985
+ // // can use and update. Note it's important that these functions get fully
986
+ // // inlined so that no actual address of the local variable needs to be
987
+ // // taken.
988
+ // bool Append(const char* ip, size_t length, T* op);
989
+ // bool AppendFromSelf(uint32_t offset, size_t length, T* op);
693
990
  //
694
991
  // // The rules for how TryFastAppend differs from Append are somewhat
695
992
  // // convoluted:
@@ -711,25 +1008,25 @@ static inline void Report(const char *algorithm, size_t compressed_size,
711
1008
  // // as it is unlikely that one would implement a fast path accepting
712
1009
  // // this much data.
713
1010
  // //
714
- // bool TryFastAppend(const char* ip, size_t available, size_t length);
1011
+ // bool TryFastAppend(const char* ip, size_t available, size_t length, T* op);
715
1012
  // };
716
1013
 
717
- static inline uint32 ExtractLowBytes(uint32 v, int n) {
1014
+ static inline uint32_t ExtractLowBytes(const uint32_t& v, int n) {
718
1015
  assert(n >= 0);
719
1016
  assert(n <= 4);
720
1017
  #if SNAPPY_HAVE_BMI2
721
1018
  return _bzhi_u32(v, 8 * n);
722
1019
  #else
723
- // This needs to be wider than uint32 otherwise `mask << 32` will be
1020
+ // This needs to be wider than uint32_t otherwise `mask << 32` will be
724
1021
  // undefined.
725
- uint64 mask = 0xffffffff;
1022
+ uint64_t mask = 0xffffffff;
726
1023
  return v & ~(mask << (8 * n));
727
1024
  #endif
728
1025
  }
729
1026
 
730
- static inline bool LeftShiftOverflows(uint8 value, uint32 shift) {
1027
+ static inline bool LeftShiftOverflows(uint8_t value, uint32_t shift) {
731
1028
  assert(shift < 32);
732
- static const uint8 masks[] = {
1029
+ static const uint8_t masks[] = {
733
1030
  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
734
1031
  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
735
1032
  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
@@ -737,15 +1034,289 @@ static inline bool LeftShiftOverflows(uint8 value, uint32 shift) {
737
1034
  return (value & masks[shift]) != 0;
738
1035
  }
739
1036
 
1037
+ inline bool Copy64BytesWithPatternExtension(ptrdiff_t dst, size_t offset) {
1038
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
1039
+ (void)dst;
1040
+ return offset != 0;
1041
+ }
1042
+
1043
+ // Copies between size bytes and 64 bytes from src to dest. size cannot exceed
1044
+ // 64. More than size bytes, but never exceeding 64, might be copied if doing
1045
+ // so gives better performance. [src, src + size) must not overlap with
1046
+ // [dst, dst + size), but [src, src + 64) may overlap with [dst, dst + 64).
1047
+ void MemCopy64(char* dst, const void* src, size_t size) {
1048
+ // Always copy this many bytes. If that's below size then copy the full 64.
1049
+ constexpr int kShortMemCopy = 32;
1050
+
1051
+ assert(size <= 64);
1052
+ assert(std::less_equal<const void*>()(static_cast<const char*>(src) + size,
1053
+ dst) ||
1054
+ std::less_equal<const void*>()(dst + size, src));
1055
+
1056
+ // We know that src and dst are at least size bytes apart. However, because we
1057
+ // might copy more than size bytes the copy still might overlap past size.
1058
+ // E.g. if src and dst appear consecutively in memory (src + size >= dst).
1059
+ // TODO: Investigate wider copies on other platforms.
1060
+ #if defined(__x86_64__) && defined(__AVX__)
1061
+ assert(kShortMemCopy <= 32);
1062
+ __m256i data = _mm256_lddqu_si256(static_cast<const __m256i *>(src));
1063
+ _mm256_storeu_si256(reinterpret_cast<__m256i *>(dst), data);
1064
+ // Profiling shows that nearly all copies are short.
1065
+ if (SNAPPY_PREDICT_FALSE(size > kShortMemCopy)) {
1066
+ data = _mm256_lddqu_si256(static_cast<const __m256i *>(src) + 1);
1067
+ _mm256_storeu_si256(reinterpret_cast<__m256i *>(dst) + 1, data);
1068
+ }
1069
+ #else
1070
+ std::memmove(dst, src, kShortMemCopy);
1071
+ // Profiling shows that nearly all copies are short.
1072
+ if (SNAPPY_PREDICT_FALSE(size > kShortMemCopy)) {
1073
+ std::memmove(dst + kShortMemCopy,
1074
+ static_cast<const uint8_t*>(src) + kShortMemCopy,
1075
+ 64 - kShortMemCopy);
1076
+ }
1077
+ #endif
1078
+ }
1079
+
1080
+ void MemCopy64(ptrdiff_t dst, const void* src, size_t size) {
1081
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
1082
+ (void)dst;
1083
+ (void)src;
1084
+ (void)size;
1085
+ }
1086
+
1087
+ void ClearDeferred(const void** deferred_src, size_t* deferred_length,
1088
+ uint8_t* safe_source) {
1089
+ *deferred_src = safe_source;
1090
+ *deferred_length = 0;
1091
+ }
1092
+
1093
+ void DeferMemCopy(const void** deferred_src, size_t* deferred_length,
1094
+ const void* src, size_t length) {
1095
+ *deferred_src = src;
1096
+ *deferred_length = length;
1097
+ }
1098
+
1099
+ SNAPPY_ATTRIBUTE_ALWAYS_INLINE
1100
+ inline size_t AdvanceToNextTagARMOptimized(const uint8_t** ip_p, size_t* tag) {
1101
+ const uint8_t*& ip = *ip_p;
1102
+ // This section is crucial for the throughput of the decompression loop.
1103
+ // The latency of an iteration is fundamentally constrained by the
1104
+ // following data chain on ip.
1105
+ // ip -> c = Load(ip) -> delta1 = (c & 3) -> ip += delta1 or delta2
1106
+ // delta2 = ((c >> 2) + 1) ip++
1107
+ // This is different from X86 optimizations because ARM has conditional add
1108
+ // instruction (csinc) and it removes several register moves.
1109
+ const size_t tag_type = *tag & 3;
1110
+ const bool is_literal = (tag_type == 0);
1111
+ if (is_literal) {
1112
+ size_t next_literal_tag = (*tag >> 2) + 1;
1113
+ *tag = ip[next_literal_tag];
1114
+ ip += next_literal_tag + 1;
1115
+ } else {
1116
+ *tag = ip[tag_type];
1117
+ ip += tag_type + 1;
1118
+ }
1119
+ return tag_type;
1120
+ }
1121
+
1122
+ SNAPPY_ATTRIBUTE_ALWAYS_INLINE
1123
+ inline size_t AdvanceToNextTagX86Optimized(const uint8_t** ip_p, size_t* tag) {
1124
+ const uint8_t*& ip = *ip_p;
1125
+ // This section is crucial for the throughput of the decompression loop.
1126
+ // The latency of an iteration is fundamentally constrained by the
1127
+ // following data chain on ip.
1128
+ // ip -> c = Load(ip) -> ip1 = ip + 1 + (c & 3) -> ip = ip1 or ip2
1129
+ // ip2 = ip + 2 + (c >> 2)
1130
+ // This amounts to 8 cycles.
1131
+ // 5 (load) + 1 (c & 3) + 1 (lea ip1, [ip + (c & 3) + 1]) + 1 (cmov)
1132
+ size_t literal_len = *tag >> 2;
1133
+ size_t tag_type = *tag;
1134
+ bool is_literal;
1135
+ #if defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(__x86_64__)
1136
+ // TODO clang misses the fact that the (c & 3) already correctly
1137
+ // sets the zero flag.
1138
+ asm("and $3, %k[tag_type]\n\t"
1139
+ : [tag_type] "+r"(tag_type), "=@ccz"(is_literal)
1140
+ :: "cc");
1141
+ #else
1142
+ tag_type &= 3;
1143
+ is_literal = (tag_type == 0);
1144
+ #endif
1145
+ // TODO
1146
+ // This is code is subtle. Loading the values first and then cmov has less
1147
+ // latency then cmov ip and then load. However clang would move the loads
1148
+ // in an optimization phase, volatile prevents this transformation.
1149
+ // Note that we have enough slop bytes (64) that the loads are always valid.
1150
+ size_t tag_literal =
1151
+ static_cast<const volatile uint8_t*>(ip)[1 + literal_len];
1152
+ size_t tag_copy = static_cast<const volatile uint8_t*>(ip)[tag_type];
1153
+ *tag = is_literal ? tag_literal : tag_copy;
1154
+ const uint8_t* ip_copy = ip + 1 + tag_type;
1155
+ const uint8_t* ip_literal = ip + 2 + literal_len;
1156
+ ip = is_literal ? ip_literal : ip_copy;
1157
+ #if defined(__GNUC__) && defined(__x86_64__)
1158
+ // TODO Clang is "optimizing" zero-extension (a totally free
1159
+ // operation) this means that after the cmov of tag, it emits another movzb
1160
+ // tag, byte(tag). It really matters as it's on the core chain. This dummy
1161
+ // asm, persuades clang to do the zero-extension at the load (it's automatic)
1162
+ // removing the expensive movzb.
1163
+ asm("" ::"r"(tag_copy));
1164
+ #endif
1165
+ return tag_type;
1166
+ }
1167
+
1168
+ // Extract the offset for copy-1 and copy-2 returns 0 for literals or copy-4.
1169
+ inline uint32_t ExtractOffset(uint32_t val, size_t tag_type) {
1170
+ // For x86 non-static storage works better. For ARM static storage is better.
1171
+ // TODO: Once the array is recognized as a register, improve the
1172
+ // readability for x86.
1173
+ #if defined(__x86_64__)
1174
+ constexpr uint64_t kExtractMasksCombined = 0x0000FFFF00FF0000ull;
1175
+ uint16_t result;
1176
+ memcpy(&result,
1177
+ reinterpret_cast<const char*>(&kExtractMasksCombined) + 2 * tag_type,
1178
+ sizeof(result));
1179
+ return val & result;
1180
+ #elif defined(__aarch64__)
1181
+ constexpr uint64_t kExtractMasksCombined = 0x0000FFFF00FF0000ull;
1182
+ return val & static_cast<uint32_t>(
1183
+ (kExtractMasksCombined >> (tag_type * 16)) & 0xFFFF);
1184
+ #else
1185
+ static constexpr uint32_t kExtractMasks[4] = {0, 0xFF, 0xFFFF, 0};
1186
+ return val & kExtractMasks[tag_type];
1187
+ #endif
1188
+ };
1189
+
1190
+ // Core decompression loop, when there is enough data available.
1191
+ // Decompresses the input buffer [ip, ip_limit) into the output buffer
1192
+ // [op, op_limit_min_slop). Returning when either we are too close to the end
1193
+ // of the input buffer, or we exceed op_limit_min_slop or when a exceptional
1194
+ // tag is encountered (literal of length > 60) or a copy-4.
1195
+ // Returns {ip, op} at the points it stopped decoding.
1196
+ // TODO This function probably does not need to be inlined, as it
1197
+ // should decode large chunks at a time. This allows runtime dispatch to
1198
+ // implementations based on CPU capability (BMI2 / perhaps 32 / 64 byte memcpy).
1199
+ template <typename T>
1200
+ std::pair<const uint8_t*, ptrdiff_t> DecompressBranchless(
1201
+ const uint8_t* ip, const uint8_t* ip_limit, ptrdiff_t op, T op_base,
1202
+ ptrdiff_t op_limit_min_slop) {
1203
+ // If deferred_src is invalid point it here.
1204
+ uint8_t safe_source[64];
1205
+ const void* deferred_src;
1206
+ size_t deferred_length;
1207
+ ClearDeferred(&deferred_src, &deferred_length, safe_source);
1208
+
1209
+ // We unroll the inner loop twice so we need twice the spare room.
1210
+ op_limit_min_slop -= kSlopBytes;
1211
+ if (2 * (kSlopBytes + 1) < ip_limit - ip && op < op_limit_min_slop) {
1212
+ const uint8_t* const ip_limit_min_slop = ip_limit - 2 * kSlopBytes - 1;
1213
+ ip++;
1214
+ // ip points just past the tag and we are touching at maximum kSlopBytes
1215
+ // in an iteration.
1216
+ size_t tag = ip[-1];
1217
+ #if defined(__clang__) && defined(__aarch64__)
1218
+ // Workaround for https://bugs.llvm.org/show_bug.cgi?id=51317
1219
+ // when loading 1 byte, clang for aarch64 doesn't realize that it(ldrb)
1220
+ // comes with free zero-extension, so clang generates another
1221
+ // 'and xn, xm, 0xff' before it use that as the offset. This 'and' is
1222
+ // redundant and can be removed by adding this dummy asm, which gives
1223
+ // clang a hint that we're doing the zero-extension at the load.
1224
+ asm("" ::"r"(tag));
1225
+ #endif
1226
+ do {
1227
+ // The throughput is limited by instructions, unrolling the inner loop
1228
+ // twice reduces the amount of instructions checking limits and also
1229
+ // leads to reduced mov's.
1230
+
1231
+ SNAPPY_PREFETCH(ip + 128);
1232
+ for (int i = 0; i < 2; i++) {
1233
+ const uint8_t* old_ip = ip;
1234
+ assert(tag == ip[-1]);
1235
+ // For literals tag_type = 0, hence we will always obtain 0 from
1236
+ // ExtractLowBytes. For literals offset will thus be kLiteralOffset.
1237
+ ptrdiff_t len_min_offset = kLengthMinusOffset[tag];
1238
+ #if defined(__aarch64__)
1239
+ size_t tag_type = AdvanceToNextTagARMOptimized(&ip, &tag);
1240
+ #else
1241
+ size_t tag_type = AdvanceToNextTagX86Optimized(&ip, &tag);
1242
+ #endif
1243
+ uint32_t next = LittleEndian::Load32(old_ip);
1244
+ size_t len = len_min_offset & 0xFF;
1245
+ len_min_offset -= ExtractOffset(next, tag_type);
1246
+ if (SNAPPY_PREDICT_FALSE(len_min_offset > 0)) {
1247
+ if (SNAPPY_PREDICT_FALSE(len & 0x80)) {
1248
+ // Exceptional case (long literal or copy 4).
1249
+ // Actually doing the copy here is negatively impacting the main
1250
+ // loop due to compiler incorrectly allocating a register for
1251
+ // this fallback. Hence we just break.
1252
+ break_loop:
1253
+ ip = old_ip;
1254
+ goto exit;
1255
+ }
1256
+ // Only copy-1 or copy-2 tags can get here.
1257
+ assert(tag_type == 1 || tag_type == 2);
1258
+ std::ptrdiff_t delta = (op + deferred_length) + len_min_offset - len;
1259
+ // Guard against copies before the buffer start.
1260
+ // Execute any deferred MemCopy since we write to dst here.
1261
+ MemCopy64(op_base + op, deferred_src, deferred_length);
1262
+ op += deferred_length;
1263
+ ClearDeferred(&deferred_src, &deferred_length, safe_source);
1264
+ if (SNAPPY_PREDICT_FALSE(delta < 0 ||
1265
+ !Copy64BytesWithPatternExtension(
1266
+ op_base + op, len - len_min_offset))) {
1267
+ goto break_loop;
1268
+ }
1269
+ // We aren't deferring this copy so add length right away.
1270
+ op += len;
1271
+ continue;
1272
+ }
1273
+ std::ptrdiff_t delta = (op + deferred_length) + len_min_offset - len;
1274
+ if (SNAPPY_PREDICT_FALSE(delta < 0)) {
1275
+ // Due to the spurious offset in literals have this will trigger
1276
+ // at the start of a block when op is still smaller than 256.
1277
+ if (tag_type != 0) goto break_loop;
1278
+ MemCopy64(op_base + op, deferred_src, deferred_length);
1279
+ op += deferred_length;
1280
+ DeferMemCopy(&deferred_src, &deferred_length, old_ip, len);
1281
+ continue;
1282
+ }
1283
+
1284
+ // For copies we need to copy from op_base + delta, for literals
1285
+ // we need to copy from ip instead of from the stream.
1286
+ const void* from =
1287
+ tag_type ? reinterpret_cast<void*>(op_base + delta) : old_ip;
1288
+ MemCopy64(op_base + op, deferred_src, deferred_length);
1289
+ op += deferred_length;
1290
+ DeferMemCopy(&deferred_src, &deferred_length, from, len);
1291
+ }
1292
+ } while (ip < ip_limit_min_slop &&
1293
+ (op + deferred_length) < op_limit_min_slop);
1294
+ exit:
1295
+ ip--;
1296
+ assert(ip <= ip_limit);
1297
+ }
1298
+ // If we deferred a copy then we can perform. If we are up to date then we
1299
+ // might not have enough slop bytes and could run past the end.
1300
+ if (deferred_length) {
1301
+ MemCopy64(op_base + op, deferred_src, deferred_length);
1302
+ op += deferred_length;
1303
+ ClearDeferred(&deferred_src, &deferred_length, safe_source);
1304
+ }
1305
+ return {ip, op};
1306
+ }
1307
+
740
1308
  // Helper class for decompression
741
1309
  class SnappyDecompressor {
742
1310
  private:
743
- Source* reader_; // Underlying source of bytes to decompress
744
- const char* ip_; // Points to next buffered byte
745
- const char* ip_limit_; // Points just past buffered bytes
746
- uint32 peeked_; // Bytes peeked from reader (need to skip)
747
- bool eof_; // Hit end of input without an error?
748
- char scratch_[kMaximumTagLength]; // See RefillTag().
1311
+ Source* reader_; // Underlying source of bytes to decompress
1312
+ const char* ip_; // Points to next buffered byte
1313
+ const char* ip_limit_; // Points just past buffered bytes
1314
+ // If ip < ip_limit_min_maxtaglen_ it's safe to read kMaxTagLength from
1315
+ // buffer.
1316
+ const char* ip_limit_min_maxtaglen_;
1317
+ uint32_t peeked_; // Bytes peeked from reader (need to skip)
1318
+ bool eof_; // Hit end of input without an error?
1319
+ char scratch_[kMaximumTagLength]; // See RefillTag().
749
1320
 
750
1321
  // Ensure that all of the tag metadata for the next tag is available
751
1322
  // in [ip_..ip_limit_-1]. Also ensures that [ip,ip+4] is readable even
@@ -754,14 +1325,14 @@ class SnappyDecompressor {
754
1325
  // Returns true on success, false on error or end of input.
755
1326
  bool RefillTag();
756
1327
 
1328
+ void ResetLimit(const char* ip) {
1329
+ ip_limit_min_maxtaglen_ =
1330
+ ip_limit_ - std::min<ptrdiff_t>(ip_limit_ - ip, kMaximumTagLength - 1);
1331
+ }
1332
+
757
1333
  public:
758
1334
  explicit SnappyDecompressor(Source* reader)
759
- : reader_(reader),
760
- ip_(NULL),
761
- ip_limit_(NULL),
762
- peeked_(0),
763
- eof_(false) {
764
- }
1335
+ : reader_(reader), ip_(NULL), ip_limit_(NULL), peeked_(0), eof_(false) {}
765
1336
 
766
1337
  ~SnappyDecompressor() {
767
1338
  // Advance past any bytes we peeked at from the reader
@@ -769,18 +1340,16 @@ class SnappyDecompressor {
769
1340
  }
770
1341
 
771
1342
  // Returns true iff we have hit the end of the input without an error.
772
- bool eof() const {
773
- return eof_;
774
- }
1343
+ bool eof() const { return eof_; }
775
1344
 
776
1345
  // Read the uncompressed length stored at the start of the compressed data.
777
1346
  // On success, stores the length in *result and returns true.
778
1347
  // On failure, returns false.
779
- bool ReadUncompressedLength(uint32* result) {
780
- assert(ip_ == NULL); // Must not have read anything yet
1348
+ bool ReadUncompressedLength(uint32_t* result) {
1349
+ assert(ip_ == NULL); // Must not have read anything yet
781
1350
  // Length is encoded in 1..5 bytes
782
1351
  *result = 0;
783
- uint32 shift = 0;
1352
+ uint32_t shift = 0;
784
1353
  while (true) {
785
1354
  if (shift >= 32) return false;
786
1355
  size_t n;
@@ -788,8 +1357,8 @@ class SnappyDecompressor {
788
1357
  if (n == 0) return false;
789
1358
  const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
790
1359
  reader_->Skip(1);
791
- uint32 val = c & 0x7f;
792
- if (LeftShiftOverflows(static_cast<uint8>(val), shift)) return false;
1360
+ uint32_t val = c & 0x7f;
1361
+ if (LeftShiftOverflows(static_cast<uint8_t>(val), shift)) return false;
793
1362
  *result |= val << shift;
794
1363
  if (c < 128) {
795
1364
  break;
@@ -805,38 +1374,44 @@ class SnappyDecompressor {
805
1374
  #if defined(__GNUC__) && defined(__x86_64__)
806
1375
  __attribute__((aligned(32)))
807
1376
  #endif
808
- void DecompressAllTags(Writer* writer) {
809
- // In x86, pad the function body to start 16 bytes later. This function has
810
- // a couple of hotspots that are highly sensitive to alignment: we have
811
- // observed regressions by more than 20% in some metrics just by moving the
812
- // exact same code to a different position in the benchmark binary.
813
- //
814
- // Putting this code on a 32-byte-aligned boundary + 16 bytes makes us hit
815
- // the "lucky" case consistently. Unfortunately, this is a very brittle
816
- // workaround, and future differences in code generation may reintroduce
817
- // this regression. If you experience a big, difficult to explain, benchmark
818
- // performance regression here, first try removing this hack.
819
- #if defined(__GNUC__) && defined(__x86_64__)
820
- // Two 8-byte "NOP DWORD ptr [EAX + EAX*1 + 00000000H]" instructions.
821
- asm(".byte 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00");
822
- asm(".byte 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00");
823
- #endif
824
-
1377
+ void
1378
+ DecompressAllTags(Writer* writer) {
825
1379
  const char* ip = ip_;
1380
+ ResetLimit(ip);
1381
+ auto op = writer->GetOutputPtr();
826
1382
  // We could have put this refill fragment only at the beginning of the loop.
827
1383
  // However, duplicating it at the end of each branch gives the compiler more
828
1384
  // scope to optimize the <ip_limit_ - ip> expression based on the local
829
1385
  // context, which overall increases speed.
830
- #define MAYBE_REFILL() \
831
- if (ip_limit_ - ip < kMaximumTagLength) { \
832
- ip_ = ip; \
833
- if (!RefillTag()) return; \
834
- ip = ip_; \
835
- }
836
-
1386
+ #define MAYBE_REFILL() \
1387
+ if (SNAPPY_PREDICT_FALSE(ip >= ip_limit_min_maxtaglen_)) { \
1388
+ ip_ = ip; \
1389
+ if (SNAPPY_PREDICT_FALSE(!RefillTag())) goto exit; \
1390
+ ip = ip_; \
1391
+ ResetLimit(ip); \
1392
+ } \
1393
+ preload = static_cast<uint8_t>(*ip)
1394
+
1395
+ // At the start of the for loop below the least significant byte of preload
1396
+ // contains the tag.
1397
+ uint32_t preload;
837
1398
  MAYBE_REFILL();
838
- for ( ;; ) {
839
- const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
1399
+ for (;;) {
1400
+ {
1401
+ ptrdiff_t op_limit_min_slop;
1402
+ auto op_base = writer->GetBase(&op_limit_min_slop);
1403
+ if (op_base) {
1404
+ auto res =
1405
+ DecompressBranchless(reinterpret_cast<const uint8_t*>(ip),
1406
+ reinterpret_cast<const uint8_t*>(ip_limit_),
1407
+ op - op_base, op_base, op_limit_min_slop);
1408
+ ip = reinterpret_cast<const char*>(res.first);
1409
+ op = op_base + res.second;
1410
+ MAYBE_REFILL();
1411
+ }
1412
+ }
1413
+ const uint8_t c = static_cast<uint8_t>(preload);
1414
+ ip++;
840
1415
 
841
1416
  // Ratio of iterations that have LITERAL vs non-LITERAL for different
842
1417
  // inputs.
@@ -852,12 +1427,13 @@ class SnappyDecompressor {
852
1427
  // bin 24% 76%
853
1428
  if (SNAPPY_PREDICT_FALSE((c & 0x3) == LITERAL)) {
854
1429
  size_t literal_length = (c >> 2) + 1u;
855
- if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
1430
+ if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length, &op)) {
856
1431
  assert(literal_length < 61);
857
1432
  ip += literal_length;
858
1433
  // NOTE: There is no MAYBE_REFILL() here, as TryFastAppend()
859
1434
  // will not return true unless there's already at least five spare
860
1435
  // bytes in addition to the literal.
1436
+ preload = static_cast<uint8_t>(*ip);
861
1437
  continue;
862
1438
  }
863
1439
  if (SNAPPY_PREDICT_FALSE(literal_length >= 61)) {
@@ -871,48 +1447,79 @@ class SnappyDecompressor {
871
1447
 
872
1448
  size_t avail = ip_limit_ - ip;
873
1449
  while (avail < literal_length) {
874
- if (!writer->Append(ip, avail)) return;
1450
+ if (!writer->Append(ip, avail, &op)) goto exit;
875
1451
  literal_length -= avail;
876
1452
  reader_->Skip(peeked_);
877
1453
  size_t n;
878
1454
  ip = reader_->Peek(&n);
879
1455
  avail = n;
880
1456
  peeked_ = avail;
881
- if (avail == 0) return; // Premature end of input
1457
+ if (avail == 0) goto exit;
882
1458
  ip_limit_ = ip + avail;
1459
+ ResetLimit(ip);
883
1460
  }
884
- if (!writer->Append(ip, literal_length)) {
885
- return;
886
- }
1461
+ if (!writer->Append(ip, literal_length, &op)) goto exit;
887
1462
  ip += literal_length;
888
1463
  MAYBE_REFILL();
889
1464
  } else {
890
- const size_t entry = char_table[c];
891
- const size_t trailer =
892
- ExtractLowBytes(LittleEndian::Load32(ip), entry >> 11);
893
- const size_t length = entry & 0xff;
894
- ip += entry >> 11;
895
-
896
- // copy_offset/256 is encoded in bits 8..10. By just fetching
897
- // those bits, we get copy_offset (since the bit-field starts at
898
- // bit 8).
899
- const size_t copy_offset = entry & 0x700;
900
- if (!writer->AppendFromSelf(copy_offset + trailer, length)) {
901
- return;
1465
+ if (SNAPPY_PREDICT_FALSE((c & 3) == COPY_4_BYTE_OFFSET)) {
1466
+ const size_t copy_offset = LittleEndian::Load32(ip);
1467
+ const size_t length = (c >> 2) + 1;
1468
+ ip += 4;
1469
+
1470
+ if (!writer->AppendFromSelf(copy_offset, length, &op)) goto exit;
1471
+ } else {
1472
+ const ptrdiff_t entry = kLengthMinusOffset[c];
1473
+ preload = LittleEndian::Load32(ip);
1474
+ const uint32_t trailer = ExtractLowBytes(preload, c & 3);
1475
+ const uint32_t length = entry & 0xff;
1476
+ assert(length > 0);
1477
+
1478
+ // copy_offset/256 is encoded in bits 8..10. By just fetching
1479
+ // those bits, we get copy_offset (since the bit-field starts at
1480
+ // bit 8).
1481
+ const uint32_t copy_offset = trailer - entry + length;
1482
+ if (!writer->AppendFromSelf(copy_offset, length, &op)) goto exit;
1483
+
1484
+ ip += (c & 3);
1485
+ // By using the result of the previous load we reduce the critical
1486
+ // dependency chain of ip to 4 cycles.
1487
+ preload >>= (c & 3) * 8;
1488
+ if (ip < ip_limit_min_maxtaglen_) continue;
902
1489
  }
903
1490
  MAYBE_REFILL();
904
1491
  }
905
1492
  }
906
-
907
1493
  #undef MAYBE_REFILL
1494
+ exit:
1495
+ writer->SetOutputPtr(op);
908
1496
  }
909
1497
  };
910
1498
 
1499
+ constexpr uint32_t CalculateNeeded(uint8_t tag) {
1500
+ return ((tag & 3) == 0 && tag >= (60 * 4))
1501
+ ? (tag >> 2) - 58
1502
+ : (0x05030201 >> ((tag * 8) & 31)) & 0xFF;
1503
+ }
1504
+
1505
+ #if __cplusplus >= 201402L
1506
+ constexpr bool VerifyCalculateNeeded() {
1507
+ for (int i = 0; i < 1; i++) {
1508
+ if (CalculateNeeded(i) != (char_table[i] >> 11) + 1) return false;
1509
+ }
1510
+ return true;
1511
+ }
1512
+
1513
+ // Make sure CalculateNeeded is correct by verifying it against the established
1514
+ // table encoding the number of added bytes needed.
1515
+ static_assert(VerifyCalculateNeeded(), "");
1516
+ #endif // c++14
1517
+
911
1518
  bool SnappyDecompressor::RefillTag() {
912
1519
  const char* ip = ip_;
913
1520
  if (ip == ip_limit_) {
914
1521
  // Fetch a new fragment from the reader
915
- reader_->Skip(peeked_); // All peeked bytes are used up
1522
+ reader_->Skip(peeked_); // All peeked bytes are used up
916
1523
  size_t n;
917
1524
  ip = reader_->Peek(&n);
918
1525
  peeked_ = n;
@@ -924,26 +1531,31 @@ bool SnappyDecompressor::RefillTag() {
924
1531
  // Read the tag character
925
1532
  assert(ip < ip_limit_);
926
1533
  const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
927
- const uint32 entry = char_table[c];
928
- const uint32 needed = (entry >> 11) + 1; // +1 byte for 'c'
1534
+ // At this point make sure that the data for the next tag is consecutive.
1535
+ // For copy 1 this means the next 2 bytes (tag and 1 byte offset)
1536
+ // For copy 2 the next 3 bytes (tag and 2 byte offset)
1537
+ // For copy 4 the next 5 bytes (tag and 4 byte offset)
1538
+ // For all small literals we only need 1 byte buf for literals 60...63 the
1539
+ // length is encoded in 1...4 extra bytes.
1540
+ const uint32_t needed = CalculateNeeded(c);
929
1541
  assert(needed <= sizeof(scratch_));
930
1542
 
931
1543
  // Read more bytes from reader if needed
932
- uint32 nbuf = ip_limit_ - ip;
1544
+ uint32_t nbuf = ip_limit_ - ip;
933
1545
  if (nbuf < needed) {
934
1546
  // Stitch together bytes from ip and reader to form the word
935
1547
  // contents. We store the needed bytes in "scratch_". They
936
1548
  // will be consumed immediately by the caller since we do not
937
1549
  // read more than we need.
938
- memmove(scratch_, ip, nbuf);
1550
+ std::memmove(scratch_, ip, nbuf);
939
1551
  reader_->Skip(peeked_); // All peeked bytes are used up
940
1552
  peeked_ = 0;
941
1553
  while (nbuf < needed) {
942
1554
  size_t length;
943
1555
  const char* src = reader_->Peek(&length);
944
1556
  if (length == 0) return false;
945
- uint32 to_add = std::min<uint32>(needed - nbuf, length);
946
- memcpy(scratch_ + nbuf, src, to_add);
1557
+ uint32_t to_add = std::min<uint32_t>(needed - nbuf, length);
1558
+ std::memcpy(scratch_ + nbuf, src, to_add);
947
1559
  nbuf += to_add;
948
1560
  reader_->Skip(to_add);
949
1561
  }
@@ -953,7 +1565,7 @@ bool SnappyDecompressor::RefillTag() {
953
1565
  } else if (nbuf < kMaximumTagLength) {
954
1566
  // Have enough bytes, but move into scratch_ so that we do not
955
1567
  // read past end of input
956
- memmove(scratch_, ip, nbuf);
1568
+ std::memmove(scratch_, ip, nbuf);
957
1569
  reader_->Skip(peeked_); // All peeked bytes are used up
958
1570
  peeked_ = 0;
959
1571
  ip_ = scratch_;
@@ -969,7 +1581,7 @@ template <typename Writer>
969
1581
  static bool InternalUncompress(Source* r, Writer* writer) {
970
1582
  // Read the uncompressed length from the front of the compressed input
971
1583
  SnappyDecompressor decompressor(r);
972
- uint32 uncompressed_len = 0;
1584
+ uint32_t uncompressed_len = 0;
973
1585
  if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
974
1586
 
975
1587
  return InternalUncompressAllTags(&decompressor, writer, r->Available(),
@@ -978,9 +1590,8 @@ static bool InternalUncompress(Source* r, Writer* writer) {
978
1590
 
979
1591
  template <typename Writer>
980
1592
  static bool InternalUncompressAllTags(SnappyDecompressor* decompressor,
981
- Writer* writer,
982
- uint32 compressed_len,
983
- uint32 uncompressed_len) {
1593
+ Writer* writer, uint32_t compressed_len,
1594
+ uint32_t uncompressed_len) {
984
1595
  Report("snappy_uncompress", compressed_len, uncompressed_len);
985
1596
 
986
1597
  writer->SetExpectedLength(uncompressed_len);
@@ -991,7 +1602,7 @@ static bool InternalUncompressAllTags(SnappyDecompressor* decompressor,
991
1602
  return (decompressor->eof() && writer->CheckLength());
992
1603
  }
993
1604
 
994
- bool GetUncompressedLength(Source* source, uint32* result) {
1605
+ bool GetUncompressedLength(Source* source, uint32_t* result) {
995
1606
  SnappyDecompressor decompressor(source);
996
1607
  return decompressor.ReadUncompressedLength(result);
997
1608
  }
@@ -1002,7 +1613,7 @@ size_t Compress(Source* reader, Sink* writer) {
1002
1613
  const size_t uncompressed_size = N;
1003
1614
  char ulength[Varint::kMax32];
1004
1615
  char* p = Varint::Encode32(ulength, N);
1005
- writer->Append(ulength, p-ulength);
1616
+ writer->Append(ulength, p - ulength);
1006
1617
  written += (p - ulength);
1007
1618
 
1008
1619
  internal::WorkingMemory wmem(N);
@@ -1022,13 +1633,13 @@ size_t Compress(Source* reader, Sink* writer) {
1022
1633
  fragment_size = num_to_read;
1023
1634
  } else {
1024
1635
  char* scratch = wmem.GetScratchInput();
1025
- memcpy(scratch, fragment, bytes_read);
1636
+ std::memcpy(scratch, fragment, bytes_read);
1026
1637
  reader->Skip(bytes_read);
1027
1638
 
1028
1639
  while (bytes_read < num_to_read) {
1029
1640
  fragment = reader->Peek(&fragment_size);
1030
1641
  size_t n = std::min<size_t>(fragment_size, num_to_read - bytes_read);
1031
- memcpy(scratch + bytes_read, fragment, n);
1642
+ std::memcpy(scratch + bytes_read, fragment, n);
1032
1643
  bytes_read += n;
1033
1644
  reader->Skip(n);
1034
1645
  }
@@ -1040,7 +1651,7 @@ size_t Compress(Source* reader, Sink* writer) {
1040
1651
 
1041
1652
  // Get encoding table for compression
1042
1653
  int table_size;
1043
- uint16* table = wmem.GetHashTable(num_to_read, &table_size);
1654
+ uint16_t* table = wmem.GetHashTable(num_to_read, &table_size);
1044
1655
 
1045
1656
  // Compress input_fragment and append to dest
1046
1657
  const int max_output = MaxCompressedLength(num_to_read);
@@ -1070,6 +1681,67 @@ size_t Compress(Source* reader, Sink* writer) {
1070
1681
  // IOVec interfaces
1071
1682
  // -----------------------------------------------------------------------
1072
1683
 
1684
+ // A `Source` implementation that yields the contents of an `iovec` array. Note
1685
+ // that `total_size` is the total number of bytes to be read from the elements
1686
+ // of `iov` (_not_ the total number of elements in `iov`).
1687
+ class SnappyIOVecReader : public Source {
1688
+ public:
1689
+ SnappyIOVecReader(const struct iovec* iov, size_t total_size)
1690
+ : curr_iov_(iov),
1691
+ curr_pos_(total_size > 0 ? reinterpret_cast<const char*>(iov->iov_base)
1692
+ : nullptr),
1693
+ curr_size_remaining_(total_size > 0 ? iov->iov_len : 0),
1694
+ total_size_remaining_(total_size) {
1695
+ // Skip empty leading `iovec`s.
1696
+ if (total_size > 0 && curr_size_remaining_ == 0) Advance();
1697
+ }
1698
+
1699
+ ~SnappyIOVecReader() = default;
1700
+
1701
+ size_t Available() const { return total_size_remaining_; }
1702
+
1703
+ const char* Peek(size_t* len) {
1704
+ *len = curr_size_remaining_;
1705
+ return curr_pos_;
1706
+ }
1707
+
1708
+ void Skip(size_t n) {
1709
+ while (n >= curr_size_remaining_ && n > 0) {
1710
+ n -= curr_size_remaining_;
1711
+ Advance();
1712
+ }
1713
+ curr_size_remaining_ -= n;
1714
+ total_size_remaining_ -= n;
1715
+ curr_pos_ += n;
1716
+ }
1717
+
1718
+ private:
1719
+ // Advances to the next nonempty `iovec` and updates related variables.
1720
+ void Advance() {
1721
+ do {
1722
+ assert(total_size_remaining_ >= curr_size_remaining_);
1723
+ total_size_remaining_ -= curr_size_remaining_;
1724
+ if (total_size_remaining_ == 0) {
1725
+ curr_pos_ = nullptr;
1726
+ curr_size_remaining_ = 0;
1727
+ return;
1728
+ }
1729
+ ++curr_iov_;
1730
+ curr_pos_ = reinterpret_cast<const char*>(curr_iov_->iov_base);
1731
+ curr_size_remaining_ = curr_iov_->iov_len;
1732
+ } while (curr_size_remaining_ == 0);
1733
+ }
1734
+
1735
+ // The `iovec` currently being read.
1736
+ const struct iovec* curr_iov_;
1737
+ // The location in `curr_iov_` currently being read.
1738
+ const char* curr_pos_;
1739
+ // The amount of unread data in `curr_iov_`.
1740
+ size_t curr_size_remaining_;
1741
+ // The amount of unread data in the entire input array.
1742
+ size_t total_size_remaining_;
1743
+ };
1744
+
1073
1745
  // A type that writes to an iovec.
1074
1746
  // Note that this is not a "ByteSink", but a type that matches the
1075
1747
  // Writer template argument to SnappyDecompressor::DecompressAllTags().
@@ -1115,17 +1787,14 @@ class SnappyIOVecWriter {
1115
1787
  : nullptr),
1116
1788
  curr_iov_remaining_(iov_count ? iov->iov_len : 0),
1117
1789
  total_written_(0),
1118
- output_limit_(-1) {}
1119
-
1120
- inline void SetExpectedLength(size_t len) {
1121
- output_limit_ = len;
1790
+ output_limit_(-1) {
1122
1791
  }
1123
1792
 
1124
- inline bool CheckLength() const {
1125
- return total_written_ == output_limit_;
1126
- }
1793
+ inline void SetExpectedLength(size_t len) { output_limit_ = len; }
1127
1794
 
1128
- inline bool Append(const char* ip, size_t len) {
1795
+ inline bool CheckLength() const { return total_written_ == output_limit_; }
1796
+
1797
+ inline bool Append(const char* ip, size_t len, char**) {
1129
1798
  if (total_written_ + len > output_limit_) {
1130
1799
  return false;
1131
1800
  }
@@ -1133,6 +1802,13 @@ class SnappyIOVecWriter {
1133
1802
  return AppendNoCheck(ip, len);
1134
1803
  }
1135
1804
 
1805
+ char* GetOutputPtr() { return nullptr; }
1806
+ char* GetBase(ptrdiff_t*) { return nullptr; }
1807
+ void SetOutputPtr(char* op) {
1808
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
1809
+ (void)op;
1810
+ }
1811
+
1136
1812
  inline bool AppendNoCheck(const char* ip, size_t len) {
1137
1813
  while (len > 0) {
1138
1814
  if (curr_iov_remaining_ == 0) {
@@ -1146,7 +1822,7 @@ class SnappyIOVecWriter {
1146
1822
  }
1147
1823
 
1148
1824
  const size_t to_write = std::min(len, curr_iov_remaining_);
1149
- memcpy(curr_iov_output_, ip, to_write);
1825
+ std::memcpy(curr_iov_output_, ip, to_write);
1150
1826
  curr_iov_output_ += to_write;
1151
1827
  curr_iov_remaining_ -= to_write;
1152
1828
  total_written_ += to_write;
@@ -1157,7 +1833,8 @@ class SnappyIOVecWriter {
1157
1833
  return true;
1158
1834
  }
1159
1835
 
1160
- inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
1836
+ inline bool TryFastAppend(const char* ip, size_t available, size_t len,
1837
+ char**) {
1161
1838
  const size_t space_left = output_limit_ - total_written_;
1162
1839
  if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16 &&
1163
1840
  curr_iov_remaining_ >= 16) {
@@ -1172,7 +1849,7 @@ class SnappyIOVecWriter {
1172
1849
  return false;
1173
1850
  }
1174
1851
 
1175
- inline bool AppendFromSelf(size_t offset, size_t len) {
1852
+ inline bool AppendFromSelf(size_t offset, size_t len, char**) {
1176
1853
  // See SnappyArrayWriter::AppendFromSelf for an explanation of
1177
1854
  // the "offset - 1u" trick.
1178
1855
  if (offset - 1u >= total_written_) {
@@ -1228,6 +1905,7 @@ class SnappyIOVecWriter {
1228
1905
  if (to_copy > len) {
1229
1906
  to_copy = len;
1230
1907
  }
1908
+ assert(to_copy > 0);
1231
1909
 
1232
1910
  IncrementalCopy(GetIOVecPointer(from_iov, from_iov_offset),
1233
1911
  curr_iov_output_, curr_iov_output_ + to_copy,
@@ -1270,59 +1948,74 @@ class SnappyArrayWriter {
1270
1948
  char* base_;
1271
1949
  char* op_;
1272
1950
  char* op_limit_;
1951
+ // If op < op_limit_min_slop_ then it's safe to unconditionally write
1952
+ // kSlopBytes starting at op.
1953
+ char* op_limit_min_slop_;
1273
1954
 
1274
1955
  public:
1275
1956
  inline explicit SnappyArrayWriter(char* dst)
1276
1957
  : base_(dst),
1277
1958
  op_(dst),
1278
- op_limit_(dst) {
1279
- }
1959
+ op_limit_(dst),
1960
+ op_limit_min_slop_(dst) {} // Safe default see invariant.
1280
1961
 
1281
1962
  inline void SetExpectedLength(size_t len) {
1282
1963
  op_limit_ = op_ + len;
1964
+ // Prevent pointer from being past the buffer.
1965
+ op_limit_min_slop_ = op_limit_ - std::min<size_t>(kSlopBytes - 1, len);
1283
1966
  }
1284
1967
 
1285
- inline bool CheckLength() const {
1286
- return op_ == op_limit_;
1968
+ inline bool CheckLength() const { return op_ == op_limit_; }
1969
+
1970
+ char* GetOutputPtr() { return op_; }
1971
+ char* GetBase(ptrdiff_t* op_limit_min_slop) {
1972
+ *op_limit_min_slop = op_limit_min_slop_ - base_;
1973
+ return base_;
1287
1974
  }
1975
+ void SetOutputPtr(char* op) { op_ = op; }
1288
1976
 
1289
- inline bool Append(const char* ip, size_t len) {
1290
- char* op = op_;
1977
+ inline bool Append(const char* ip, size_t len, char** op_p) {
1978
+ char* op = *op_p;
1291
1979
  const size_t space_left = op_limit_ - op;
1292
- if (space_left < len) {
1293
- return false;
1294
- }
1295
- memcpy(op, ip, len);
1296
- op_ = op + len;
1980
+ if (space_left < len) return false;
1981
+ std::memcpy(op, ip, len);
1982
+ *op_p = op + len;
1297
1983
  return true;
1298
1984
  }
1299
1985
 
1300
- inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
1301
- char* op = op_;
1986
+ inline bool TryFastAppend(const char* ip, size_t available, size_t len,
1987
+ char** op_p) {
1988
+ char* op = *op_p;
1302
1989
  const size_t space_left = op_limit_ - op;
1303
1990
  if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16) {
1304
1991
  // Fast path, used for the majority (about 95%) of invocations.
1305
1992
  UnalignedCopy128(ip, op);
1306
- op_ = op + len;
1993
+ *op_p = op + len;
1307
1994
  return true;
1308
1995
  } else {
1309
1996
  return false;
1310
1997
  }
1311
1998
  }
1312
1999
 
1313
- inline bool AppendFromSelf(size_t offset, size_t len) {
1314
- char* const op_end = op_ + len;
2000
+ SNAPPY_ATTRIBUTE_ALWAYS_INLINE
2001
+ inline bool AppendFromSelf(size_t offset, size_t len, char** op_p) {
2002
+ assert(len > 0);
2003
+ char* const op = *op_p;
2004
+ assert(op >= base_);
2005
+ char* const op_end = op + len;
1315
2006
 
1316
2007
  // Check if we try to append from before the start of the buffer.
1317
- // Normally this would just be a check for "produced < offset",
1318
- // but "produced <= offset - 1u" is equivalent for every case
1319
- // except the one where offset==0, where the right side will wrap around
1320
- // to a very big number. This is convenient, as offset==0 is another
1321
- // invalid case that we also want to catch, so that we do not go
1322
- // into an infinite loop.
1323
- if (Produced() <= offset - 1u || op_end > op_limit_) return false;
1324
- op_ = IncrementalCopy(op_ - offset, op_, op_end, op_limit_);
2008
+ if (SNAPPY_PREDICT_FALSE(static_cast<size_t>(op - base_) < offset))
2009
+ return false;
1325
2010
 
2011
+ if (SNAPPY_PREDICT_FALSE((kSlopBytes < 64 && len > kSlopBytes) ||
2012
+ op >= op_limit_min_slop_ || offset < len)) {
2013
+ if (op_end > op_limit_ || offset == 0) return false;
2014
+ *op_p = IncrementalCopy(op - offset, op, op_end, op_limit_);
2015
+ return true;
2016
+ }
2017
+ std::memmove(op, op - offset, kSlopBytes);
2018
+ *op_p = op_end;
1326
2019
  return true;
1327
2020
  }
1328
2021
  inline size_t Produced() const {
@@ -1332,8 +2025,9 @@ class SnappyArrayWriter {
1332
2025
  inline void Flush() {}
1333
2026
  };
1334
2027
 
1335
- bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
1336
- ByteArraySource reader(compressed, n);
2028
+ bool RawUncompress(const char* compressed, size_t compressed_length,
2029
+ char* uncompressed) {
2030
+ ByteArraySource reader(compressed, compressed_length);
1337
2031
  return RawUncompress(&reader, uncompressed);
1338
2032
  }
1339
2033
 
@@ -1342,9 +2036,10 @@ bool RawUncompress(Source* compressed, char* uncompressed) {
1342
2036
  return InternalUncompress(compressed, &output);
1343
2037
  }
1344
2038
 
1345
- bool Uncompress(const char* compressed, size_t n, std::string* uncompressed) {
2039
+ bool Uncompress(const char* compressed, size_t compressed_length,
2040
+ std::string* uncompressed) {
1346
2041
  size_t ulength;
1347
- if (!GetUncompressedLength(compressed, n, &ulength)) {
2042
+ if (!GetUncompressedLength(compressed, compressed_length, &ulength)) {
1348
2043
  return false;
1349
2044
  }
1350
2045
  // On 32-bit builds: max_size() < kuint32max. Check for that instead
@@ -1353,7 +2048,8 @@ bool Uncompress(const char* compressed, size_t n, std::string* uncompressed) {
1353
2048
  return false;
1354
2049
  }
1355
2050
  STLStringResizeUninitialized(uncompressed, ulength);
1356
- return RawUncompress(compressed, n, string_as_array(uncompressed));
2051
+ return RawUncompress(compressed, compressed_length,
2052
+ string_as_array(uncompressed));
1357
2053
  }
1358
2054
 
1359
2055
  // A Writer that drops everything on the floor and just does validation
@@ -1363,32 +2059,44 @@ class SnappyDecompressionValidator {
1363
2059
  size_t produced_;
1364
2060
 
1365
2061
  public:
1366
- inline SnappyDecompressionValidator() : expected_(0), produced_(0) { }
1367
- inline void SetExpectedLength(size_t len) {
1368
- expected_ = len;
1369
- }
1370
- inline bool CheckLength() const {
1371
- return expected_ == produced_;
2062
+ inline SnappyDecompressionValidator() : expected_(0), produced_(0) {}
2063
+ inline void SetExpectedLength(size_t len) { expected_ = len; }
2064
+ size_t GetOutputPtr() { return produced_; }
2065
+ size_t GetBase(ptrdiff_t* op_limit_min_slop) {
2066
+ *op_limit_min_slop = std::numeric_limits<ptrdiff_t>::max() - kSlopBytes + 1;
2067
+ return 1;
1372
2068
  }
1373
- inline bool Append(const char* ip, size_t len) {
1374
- produced_ += len;
1375
- return produced_ <= expected_;
2069
+ void SetOutputPtr(size_t op) { produced_ = op; }
2070
+ inline bool CheckLength() const { return expected_ == produced_; }
2071
+ inline bool Append(const char* ip, size_t len, size_t* produced) {
2072
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
2073
+ (void)ip;
2074
+
2075
+ *produced += len;
2076
+ return *produced <= expected_;
1376
2077
  }
1377
- inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
2078
+ inline bool TryFastAppend(const char* ip, size_t available, size_t length,
2079
+ size_t* produced) {
2080
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
2081
+ (void)ip;
2082
+ (void)available;
2083
+ (void)length;
2084
+ (void)produced;
2085
+
1378
2086
  return false;
1379
2087
  }
1380
- inline bool AppendFromSelf(size_t offset, size_t len) {
2088
+ inline bool AppendFromSelf(size_t offset, size_t len, size_t* produced) {
1381
2089
  // See SnappyArrayWriter::AppendFromSelf for an explanation of
1382
2090
  // the "offset - 1u" trick.
1383
- if (produced_ <= offset - 1u) return false;
1384
- produced_ += len;
1385
- return produced_ <= expected_;
2091
+ if (*produced <= offset - 1u) return false;
2092
+ *produced += len;
2093
+ return *produced <= expected_;
1386
2094
  }
1387
2095
  inline void Flush() {}
1388
2096
  };
1389
2097
 
1390
- bool IsValidCompressedBuffer(const char* compressed, size_t n) {
1391
- ByteArraySource reader(compressed, n);
2098
+ bool IsValidCompressedBuffer(const char* compressed, size_t compressed_length) {
2099
+ ByteArraySource reader(compressed, compressed_length);
1392
2100
  SnappyDecompressionValidator writer;
1393
2101
  return InternalUncompress(&reader, &writer);
1394
2102
  }
@@ -1398,9 +2106,7 @@ bool IsValidCompressed(Source* compressed) {
1398
2106
  return InternalUncompress(compressed, &writer);
1399
2107
  }
1400
2108
 
1401
- void RawCompress(const char* input,
1402
- size_t input_length,
1403
- char* compressed,
2109
+ void RawCompress(const char* input, size_t input_length, char* compressed,
1404
2110
  size_t* compressed_length) {
1405
2111
  ByteArraySource reader(input, input_length);
1406
2112
  UncheckedByteArraySink writer(compressed);
@@ -1410,6 +2116,16 @@ void RawCompress(const char* input,
1410
2116
  *compressed_length = (writer.CurrentDestination() - compressed);
1411
2117
  }
1412
2118
 
2119
+ void RawCompressFromIOVec(const struct iovec* iov, size_t uncompressed_length,
2120
+ char* compressed, size_t* compressed_length) {
2121
+ SnappyIOVecReader reader(iov, uncompressed_length);
2122
+ UncheckedByteArraySink writer(compressed);
2123
+ Compress(&reader, &writer);
2124
+
2125
+ // Compute how many bytes were added.
2126
+ *compressed_length = writer.CurrentDestination() - compressed;
2127
+ }
2128
+
1413
2129
  size_t Compress(const char* input, size_t input_length,
1414
2130
  std::string* compressed) {
1415
2131
  // Pre-grow the buffer to the max length of the compressed output
@@ -1418,7 +2134,26 @@ size_t Compress(const char* input, size_t input_length,
1418
2134
  size_t compressed_length;
1419
2135
  RawCompress(input, input_length, string_as_array(compressed),
1420
2136
  &compressed_length);
1421
- compressed->resize(compressed_length);
2137
+ compressed->erase(compressed_length);
2138
+ return compressed_length;
2139
+ }
2140
+
2141
+ size_t CompressFromIOVec(const struct iovec* iov, size_t iov_cnt,
2142
+ std::string* compressed) {
2143
+ // Compute the number of bytes to be compressed.
2144
+ size_t uncompressed_length = 0;
2145
+ for (size_t i = 0; i < iov_cnt; ++i) {
2146
+ uncompressed_length += iov[i].iov_len;
2147
+ }
2148
+
2149
+ // Pre-grow the buffer to the max length of the compressed output.
2150
+ STLStringResizeUninitialized(compressed, MaxCompressedLength(
2151
+ uncompressed_length));
2152
+
2153
+ size_t compressed_length;
2154
+ RawCompressFromIOVec(iov, uncompressed_length, string_as_array(compressed),
2155
+ &compressed_length);
2156
+ compressed->erase(compressed_length);
1422
2157
  return compressed_length;
1423
2158
  }
1424
2159
 
@@ -1443,13 +2178,14 @@ class SnappyScatteredWriter {
1443
2178
  size_t full_size_;
1444
2179
 
1445
2180
  // Pointer into current output block
1446
- char* op_base_; // Base of output block
1447
- char* op_ptr_; // Pointer to next unfilled byte in block
1448
- char* op_limit_; // Pointer just past block
2181
+ char* op_base_; // Base of output block
2182
+ char* op_ptr_; // Pointer to next unfilled byte in block
2183
+ char* op_limit_; // Pointer just past block
2184
+ // If op < op_limit_min_slop_ then it's safe to unconditionally write
2185
+ // kSlopBytes starting at op.
2186
+ char* op_limit_min_slop_;
1449
2187
 
1450
- inline size_t Size() const {
1451
- return full_size_ + (op_ptr_ - op_base_);
1452
- }
2188
+ inline size_t Size() const { return full_size_ + (op_ptr_ - op_base_); }
1453
2189
 
1454
2190
  bool SlowAppend(const char* ip, size_t len);
1455
2191
  bool SlowAppendFromSelf(size_t offset, size_t len);
@@ -1460,60 +2196,79 @@ class SnappyScatteredWriter {
1460
2196
  full_size_(0),
1461
2197
  op_base_(NULL),
1462
2198
  op_ptr_(NULL),
1463
- op_limit_(NULL) {
2199
+ op_limit_(NULL),
2200
+ op_limit_min_slop_(NULL) {}
2201
+ char* GetOutputPtr() { return op_ptr_; }
2202
+ char* GetBase(ptrdiff_t* op_limit_min_slop) {
2203
+ *op_limit_min_slop = op_limit_min_slop_ - op_base_;
2204
+ return op_base_;
1464
2205
  }
2206
+ void SetOutputPtr(char* op) { op_ptr_ = op; }
1465
2207
 
1466
2208
  inline void SetExpectedLength(size_t len) {
1467
2209
  assert(blocks_.empty());
1468
2210
  expected_ = len;
1469
2211
  }
1470
2212
 
1471
- inline bool CheckLength() const {
1472
- return Size() == expected_;
1473
- }
2213
+ inline bool CheckLength() const { return Size() == expected_; }
1474
2214
 
1475
2215
  // Return the number of bytes actually uncompressed so far
1476
- inline size_t Produced() const {
1477
- return Size();
1478
- }
2216
+ inline size_t Produced() const { return Size(); }
1479
2217
 
1480
- inline bool Append(const char* ip, size_t len) {
1481
- size_t avail = op_limit_ - op_ptr_;
2218
+ inline bool Append(const char* ip, size_t len, char** op_p) {
2219
+ char* op = *op_p;
2220
+ size_t avail = op_limit_ - op;
1482
2221
  if (len <= avail) {
1483
2222
  // Fast path
1484
- memcpy(op_ptr_, ip, len);
1485
- op_ptr_ += len;
2223
+ std::memcpy(op, ip, len);
2224
+ *op_p = op + len;
1486
2225
  return true;
1487
2226
  } else {
1488
- return SlowAppend(ip, len);
2227
+ op_ptr_ = op;
2228
+ bool res = SlowAppend(ip, len);
2229
+ *op_p = op_ptr_;
2230
+ return res;
1489
2231
  }
1490
2232
  }
1491
2233
 
1492
- inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
1493
- char* op = op_ptr_;
2234
+ inline bool TryFastAppend(const char* ip, size_t available, size_t length,
2235
+ char** op_p) {
2236
+ char* op = *op_p;
1494
2237
  const int space_left = op_limit_ - op;
1495
2238
  if (length <= 16 && available >= 16 + kMaximumTagLength &&
1496
2239
  space_left >= 16) {
1497
2240
  // Fast path, used for the majority (about 95%) of invocations.
1498
2241
  UnalignedCopy128(ip, op);
1499
- op_ptr_ = op + length;
2242
+ *op_p = op + length;
1500
2243
  return true;
1501
2244
  } else {
1502
2245
  return false;
1503
2246
  }
1504
2247
  }
1505
2248
 
1506
- inline bool AppendFromSelf(size_t offset, size_t len) {
1507
- char* const op_end = op_ptr_ + len;
1508
- // See SnappyArrayWriter::AppendFromSelf for an explanation of
1509
- // the "offset - 1u" trick.
1510
- if (SNAPPY_PREDICT_TRUE(offset - 1u < op_ptr_ - op_base_ &&
1511
- op_end <= op_limit_)) {
1512
- // Fast path: src and dst in current block.
1513
- op_ptr_ = IncrementalCopy(op_ptr_ - offset, op_ptr_, op_end, op_limit_);
2249
+ inline bool AppendFromSelf(size_t offset, size_t len, char** op_p) {
2250
+ char* op = *op_p;
2251
+ assert(op >= op_base_);
2252
+ // Check if we try to append from before the start of the buffer.
2253
+ if (SNAPPY_PREDICT_FALSE((kSlopBytes < 64 && len > kSlopBytes) ||
2254
+ static_cast<size_t>(op - op_base_) < offset ||
2255
+ op >= op_limit_min_slop_ || offset < len)) {
2256
+ if (offset == 0) return false;
2257
+ if (SNAPPY_PREDICT_FALSE(static_cast<size_t>(op - op_base_) < offset ||
2258
+ op + len > op_limit_)) {
2259
+ op_ptr_ = op;
2260
+ bool res = SlowAppendFromSelf(offset, len);
2261
+ *op_p = op_ptr_;
2262
+ return res;
2263
+ }
2264
+ *op_p = IncrementalCopy(op - offset, op, op + len, op_limit_);
1514
2265
  return true;
1515
2266
  }
1516
- return SlowAppendFromSelf(offset, len);
2267
+ // Fast path
2268
+ char* const op_end = op + len;
2269
+ std::memmove(op, op - offset, kSlopBytes);
2270
+ *op_p = op_end;
2271
+ return true;
1517
2272
  }
1518
2273
 
1519
2274
  // Called at the end of the decompress. We ask the allocator
@@ -1521,12 +2276,12 @@ class SnappyScatteredWriter {
1521
2276
  inline void Flush() { allocator_.Flush(Produced()); }
1522
2277
  };
1523
2278
 
1524
- template<typename Allocator>
2279
+ template <typename Allocator>
1525
2280
  bool SnappyScatteredWriter<Allocator>::SlowAppend(const char* ip, size_t len) {
1526
2281
  size_t avail = op_limit_ - op_ptr_;
1527
2282
  while (len > avail) {
1528
2283
  // Completely fill this block
1529
- memcpy(op_ptr_, ip, avail);
2284
+ std::memcpy(op_ptr_, ip, avail);
1530
2285
  op_ptr_ += avail;
1531
2286
  assert(op_limit_ - op_ptr_ == 0);
1532
2287
  full_size_ += (op_ptr_ - op_base_);
@@ -1534,25 +2289,25 @@ bool SnappyScatteredWriter<Allocator>::SlowAppend(const char* ip, size_t len) {
1534
2289
  ip += avail;
1535
2290
 
1536
2291
  // Bounds check
1537
- if (full_size_ + len > expected_) {
1538
- return false;
1539
- }
2292
+ if (full_size_ + len > expected_) return false;
1540
2293
 
1541
2294
  // Make new block
1542
2295
  size_t bsize = std::min<size_t>(kBlockSize, expected_ - full_size_);
1543
2296
  op_base_ = allocator_.Allocate(bsize);
1544
2297
  op_ptr_ = op_base_;
1545
2298
  op_limit_ = op_base_ + bsize;
2299
+ op_limit_min_slop_ = op_limit_ - std::min<size_t>(kSlopBytes - 1, bsize);
2300
+
1546
2301
  blocks_.push_back(op_base_);
1547
2302
  avail = bsize;
1548
2303
  }
1549
2304
 
1550
- memcpy(op_ptr_, ip, len);
2305
+ std::memcpy(op_ptr_, ip, len);
1551
2306
  op_ptr_ += len;
1552
2307
  return true;
1553
2308
  }
1554
2309
 
1555
- template<typename Allocator>
2310
+ template <typename Allocator>
1556
2311
  bool SnappyScatteredWriter<Allocator>::SlowAppendFromSelf(size_t offset,
1557
2312
  size_t len) {
1558
2313
  // Overflow check
@@ -1567,18 +2322,26 @@ bool SnappyScatteredWriter<Allocator>::SlowAppendFromSelf(size_t offset,
1567
2322
  // nice if we do not rely on that, since we can get better compression if we
1568
2323
  // allow cross-block copies and thus might want to change the compressor in
1569
2324
  // the future.
2325
+ // TODO Replace this with a properly optimized path. This is not
2326
+ // triggered right now. But this is so super slow, that it would regress
2327
+ // performance unacceptably if triggered.
1570
2328
  size_t src = cur - offset;
2329
+ char* op = op_ptr_;
1571
2330
  while (len-- > 0) {
1572
- char c = blocks_[src >> kBlockLog][src & (kBlockSize-1)];
1573
- Append(&c, 1);
2331
+ char c = blocks_[src >> kBlockLog][src & (kBlockSize - 1)];
2332
+ if (!Append(&c, 1, &op)) {
2333
+ op_ptr_ = op;
2334
+ return false;
2335
+ }
1574
2336
  src++;
1575
2337
  }
2338
+ op_ptr_ = op;
1576
2339
  return true;
1577
2340
  }
1578
2341
 
1579
2342
  class SnappySinkAllocator {
1580
2343
  public:
1581
- explicit SnappySinkAllocator(Sink* dest): dest_(dest) {}
2344
+ explicit SnappySinkAllocator(Sink* dest) : dest_(dest) {}
1582
2345
  ~SnappySinkAllocator() {}
1583
2346
 
1584
2347
  char* Allocate(int size) {
@@ -1594,10 +2357,9 @@ class SnappySinkAllocator {
1594
2357
  // to the blocks.
1595
2358
  void Flush(size_t size) {
1596
2359
  size_t size_written = 0;
1597
- size_t block_size;
1598
- for (int i = 0; i < blocks_.size(); ++i) {
1599
- block_size = std::min<size_t>(blocks_[i].size, size - size_written);
1600
- dest_->AppendAndTakeOwnership(blocks_[i].data, block_size,
2360
+ for (Datablock& block : blocks_) {
2361
+ size_t block_size = std::min<size_t>(block.size, size - size_written);
2362
+ dest_->AppendAndTakeOwnership(block.data, block_size,
1601
2363
  &SnappySinkAllocator::Deleter, NULL);
1602
2364
  size_written += block_size;
1603
2365
  }
@@ -1612,6 +2374,10 @@ class SnappySinkAllocator {
1612
2374
  };
1613
2375
 
1614
2376
  static void Deleter(void* arg, const char* bytes, size_t size) {
2377
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
2378
+ (void)arg;
2379
+ (void)size;
2380
+
1615
2381
  delete[] bytes;
1616
2382
  }
1617
2383
 
@@ -1631,15 +2397,15 @@ size_t UncompressAsMuchAsPossible(Source* compressed, Sink* uncompressed) {
1631
2397
  bool Uncompress(Source* compressed, Sink* uncompressed) {
1632
2398
  // Read the uncompressed length from the front of the compressed input
1633
2399
  SnappyDecompressor decompressor(compressed);
1634
- uint32 uncompressed_len = 0;
2400
+ uint32_t uncompressed_len = 0;
1635
2401
  if (!decompressor.ReadUncompressedLength(&uncompressed_len)) {
1636
2402
  return false;
1637
2403
  }
1638
2404
 
1639
2405
  char c;
1640
2406
  size_t allocated_size;
1641
- char* buf = uncompressed->GetAppendBufferVariable(
1642
- 1, uncompressed_len, &c, 1, &allocated_size);
2407
+ char* buf = uncompressed->GetAppendBufferVariable(1, uncompressed_len, &c, 1,
2408
+ &allocated_size);
1643
2409
 
1644
2410
  const size_t compressed_len = compressed->Available();
1645
2411
  // If we can get a flat buffer, then use it, otherwise do block by block