snappy 0.3.0 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/main.yml +2 -2
  3. data/.github/workflows/publish.yml +7 -13
  4. data/Dockerfile +1 -1
  5. data/Gemfile +1 -0
  6. data/README.md +20 -1
  7. data/Rakefile +1 -1
  8. data/ext/extconf.rb +13 -11
  9. data/lib/snappy/shim.rb +3 -23
  10. data/lib/snappy/version.rb +1 -1
  11. data/lib/snappy/writer.rb +1 -1
  12. data/snappy.gemspec +1 -0
  13. data/test/snappy_test.rb +29 -4
  14. data/vendor/snappy/BUILD.bazel +211 -0
  15. data/vendor/snappy/CMakeLists.txt +176 -31
  16. data/vendor/snappy/CONTRIBUTING.md +9 -4
  17. data/vendor/snappy/MODULE.bazel +23 -0
  18. data/vendor/snappy/NEWS +27 -0
  19. data/vendor/snappy/README.md +52 -35
  20. data/vendor/snappy/WORKSPACE +27 -0
  21. data/vendor/snappy/WORKSPACE.bzlmod +0 -0
  22. data/vendor/snappy/cmake/config.h.in +30 -23
  23. data/vendor/snappy/snappy-internal.h +218 -25
  24. data/vendor/snappy/snappy-sinksource.cc +26 -9
  25. data/vendor/snappy/snappy-sinksource.h +11 -11
  26. data/vendor/snappy/snappy-stubs-internal.cc +1 -1
  27. data/vendor/snappy/snappy-stubs-internal.h +231 -306
  28. data/vendor/snappy/snappy-stubs-public.h.in +0 -11
  29. data/vendor/snappy/snappy-test.cc +88 -198
  30. data/vendor/snappy/snappy-test.h +102 -285
  31. data/vendor/snappy/snappy.cc +1412 -425
  32. data/vendor/snappy/snappy.h +60 -10
  33. data/vendor/snappy/snappy_benchmark.cc +398 -0
  34. data/vendor/snappy/snappy_compress_fuzzer.cc +21 -16
  35. data/vendor/snappy/snappy_test_data.cc +57 -0
  36. data/vendor/snappy/snappy_test_data.h +68 -0
  37. data/vendor/snappy/snappy_test_tool.cc +471 -0
  38. data/vendor/snappy/snappy_uncompress_fuzzer.cc +3 -2
  39. data/vendor/snappy/snappy_unittest.cc +183 -666
  40. metadata +12 -6
@@ -31,11 +31,88 @@
31
31
  #ifndef THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
32
32
  #define THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
33
33
 
34
+ #include <utility>
35
+
34
36
  #include "snappy-stubs-internal.h"
35
37
 
38
+ #if SNAPPY_HAVE_SSSE3
39
+ // Please do not replace with <x86intrin.h> or with headers that assume more
40
+ // advanced SSE versions without checking with all the OWNERS.
41
+ #include <emmintrin.h>
42
+ #include <tmmintrin.h>
43
+ #endif
44
+
45
+ #if SNAPPY_HAVE_NEON
46
+ #include <arm_neon.h>
47
+ #endif
48
+
49
+ #if SNAPPY_HAVE_SSSE3 || SNAPPY_HAVE_NEON
50
+ #define SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE 1
51
+ #else
52
+ #define SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE 0
53
+ #endif
54
+
36
55
  namespace snappy {
37
56
  namespace internal {
38
57
 
58
+ #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
59
+ #if SNAPPY_HAVE_SSSE3
60
+ using V128 = __m128i;
61
+ #elif SNAPPY_HAVE_NEON
62
+ using V128 = uint8x16_t;
63
+ #endif
64
+
65
+ // Load 128 bits of integer data. `src` must be 16-byte aligned.
66
+ inline V128 V128_Load(const V128* src);
67
+
68
+ // Load 128 bits of integer data. `src` does not need to be aligned.
69
+ inline V128 V128_LoadU(const V128* src);
70
+
71
+ // Store 128 bits of integer data. `dst` does not need to be aligned.
72
+ inline void V128_StoreU(V128* dst, V128 val);
73
+
74
+ // Shuffle packed 8-bit integers using a shuffle mask.
75
+ // Each packed integer in the shuffle mask must be in [0,16).
76
+ inline V128 V128_Shuffle(V128 input, V128 shuffle_mask);
77
+
78
+ // Constructs V128 with 16 chars |c|.
79
+ inline V128 V128_DupChar(char c);
80
+
81
+ #if SNAPPY_HAVE_SSSE3
82
+ inline V128 V128_Load(const V128* src) { return _mm_load_si128(src); }
83
+
84
+ inline V128 V128_LoadU(const V128* src) { return _mm_loadu_si128(src); }
85
+
86
+ inline void V128_StoreU(V128* dst, V128 val) { _mm_storeu_si128(dst, val); }
87
+
88
+ inline V128 V128_Shuffle(V128 input, V128 shuffle_mask) {
89
+ return _mm_shuffle_epi8(input, shuffle_mask);
90
+ }
91
+
92
+ inline V128 V128_DupChar(char c) { return _mm_set1_epi8(c); }
93
+
94
+ #elif SNAPPY_HAVE_NEON
95
+ inline V128 V128_Load(const V128* src) {
96
+ return vld1q_u8(reinterpret_cast<const uint8_t*>(src));
97
+ }
98
+
99
+ inline V128 V128_LoadU(const V128* src) {
100
+ return vld1q_u8(reinterpret_cast<const uint8_t*>(src));
101
+ }
102
+
103
+ inline void V128_StoreU(V128* dst, V128 val) {
104
+ vst1q_u8(reinterpret_cast<uint8_t*>(dst), val);
105
+ }
106
+
107
+ inline V128 V128_Shuffle(V128 input, V128 shuffle_mask) {
108
+ assert(vminvq_u8(shuffle_mask) >= 0 && vmaxvq_u8(shuffle_mask) <= 15);
109
+ return vqtbl1q_u8(input, shuffle_mask);
110
+ }
111
+
112
+ inline V128 V128_DupChar(char c) { return vdupq_n_u8(c); }
113
+ #endif
114
+ #endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
115
+
39
116
  // Working memory performs a single allocation to hold all scratch space
40
117
  // required for compression.
41
118
  class WorkingMemory {
@@ -46,16 +123,16 @@ class WorkingMemory {
46
123
  // Allocates and clears a hash table using memory in "*this",
47
124
  // stores the number of buckets in "*table_size" and returns a pointer to
48
125
  // the base of the hash table.
49
- uint16* GetHashTable(size_t fragment_size, int* table_size) const;
126
+ uint16_t* GetHashTable(size_t fragment_size, int* table_size) const;
50
127
  char* GetScratchInput() const { return input_; }
51
128
  char* GetScratchOutput() const { return output_; }
52
129
 
53
130
  private:
54
- char* mem_; // the allocated memory, never nullptr
55
- size_t size_; // the size of the allocated memory, never 0
56
- uint16* table_; // the pointer to the hashtable
57
- char* input_; // the pointer to the input scratch buffer
58
- char* output_; // the pointer to the output scratch buffer
131
+ char* mem_; // the allocated memory, never nullptr
132
+ size_t size_; // the size of the allocated memory, never 0
133
+ uint16_t* table_; // the pointer to the hashtable
134
+ char* input_; // the pointer to the input scratch buffer
135
+ char* output_; // the pointer to the output scratch buffer
59
136
 
60
137
  // No copying
61
138
  WorkingMemory(const WorkingMemory&);
@@ -76,7 +153,7 @@ class WorkingMemory {
76
153
  char* CompressFragment(const char* input,
77
154
  size_t input_length,
78
155
  char* op,
79
- uint16* table,
156
+ uint16_t* table,
80
157
  const int table_size);
81
158
 
82
159
  // Find the largest n such that
@@ -89,12 +166,19 @@ char* CompressFragment(const char* input,
89
166
  // Does not read *(s1 + (s2_limit - s2)) or beyond.
90
167
  // Requires that s2_limit >= s2.
91
168
  //
169
+ // In addition populate *data with the next 5 bytes from the end of the match.
170
+ // This is only done if 8 bytes are available (s2_limit - s2 >= 8). The point is
171
+ // that on some arch's this can be done faster in this routine than subsequent
172
+ // loading from s2 + n.
173
+ //
92
174
  // Separate implementation for 64-bit, little-endian cpus.
93
- #if !defined(SNAPPY_IS_BIG_ENDIAN) && \
94
- (defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM))
175
+ #if !SNAPPY_IS_BIG_ENDIAN && \
176
+ (defined(__x86_64__) || defined(_M_X64) || defined(ARCH_PPC) || \
177
+ defined(ARCH_ARM))
95
178
  static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
96
179
  const char* s2,
97
- const char* s2_limit) {
180
+ const char* s2_limit,
181
+ uint64_t* data) {
98
182
  assert(s2_limit >= s2);
99
183
  size_t matched = 0;
100
184
 
@@ -103,30 +187,106 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
103
187
  // uncommon code paths that determine, without extra effort, whether the match
104
188
  // length is less than 8. In short, we are hoping to avoid a conditional
105
189
  // branch, and perhaps get better code layout from the C++ compiler.
106
- if (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 8)) {
107
- uint64 a1 = UNALIGNED_LOAD64(s1);
108
- uint64 a2 = UNALIGNED_LOAD64(s2);
109
- if (a1 != a2) {
110
- return std::pair<size_t, bool>(Bits::FindLSBSetNonZero64(a1 ^ a2) >> 3,
111
- true);
190
+ if (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 16)) {
191
+ uint64_t a1 = UNALIGNED_LOAD64(s1);
192
+ uint64_t a2 = UNALIGNED_LOAD64(s2);
193
+ if (SNAPPY_PREDICT_TRUE(a1 != a2)) {
194
+ // This code is critical for performance. The reason is that it determines
195
+ // how much to advance `ip` (s2). This obviously depends on both the loads
196
+ // from the `candidate` (s1) and `ip`. Furthermore the next `candidate`
197
+ // depends on the advanced `ip` calculated here through a load, hash and
198
+ // new candidate hash lookup (a lot of cycles). This makes s1 (ie.
199
+ // `candidate`) the variable that limits throughput. This is the reason we
200
+ // go through hoops to have this function update `data` for the next iter.
201
+ // The straightforward code would use *data, given by
202
+ //
203
+ // *data = UNALIGNED_LOAD64(s2 + matched_bytes) (Latency of 5 cycles),
204
+ //
205
+ // as input for the hash table lookup to find next candidate. However
206
+ // this forces the load on the data dependency chain of s1, because
207
+ // matched_bytes directly depends on s1. However matched_bytes is 0..7, so
208
+ // we can also calculate *data by
209
+ //
210
+ // *data = AlignRight(UNALIGNED_LOAD64(s2), UNALIGNED_LOAD64(s2 + 8),
211
+ // matched_bytes);
212
+ //
213
+ // The loads do not depend on s1 anymore and are thus off the bottleneck.
214
+ // The straightforward implementation on x86_64 would be to use
215
+ //
216
+ // shrd rax, rdx, cl (cl being matched_bytes * 8)
217
+ //
218
+ // unfortunately shrd with a variable shift has a 4 cycle latency. So this
219
+ // only wins 1 cycle. The BMI2 shrx instruction is a 1 cycle variable
220
+ // shift instruction but can only shift 64 bits. If we focus on just
221
+ // obtaining the least significant 4 bytes, we can obtain this by
222
+ //
223
+ // *data = ConditionalMove(matched_bytes < 4, UNALIGNED_LOAD64(s2),
224
+ // UNALIGNED_LOAD64(s2 + 4) >> ((matched_bytes & 3) * 8);
225
+ //
226
+ // Writen like above this is not a big win, the conditional move would be
227
+ // a cmp followed by a cmov (2 cycles) followed by a shift (1 cycle).
228
+ // However matched_bytes < 4 is equal to
229
+ // static_cast<uint32_t>(xorval) != 0. Writen that way, the conditional
230
+ // move (2 cycles) can execute in parallel with FindLSBSetNonZero64
231
+ // (tzcnt), which takes 3 cycles.
232
+ uint64_t xorval = a1 ^ a2;
233
+ int shift = Bits::FindLSBSetNonZero64(xorval);
234
+ size_t matched_bytes = shift >> 3;
235
+ uint64_t a3 = UNALIGNED_LOAD64(s2 + 4);
236
+ #ifndef __x86_64__
237
+ a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2;
238
+ #else
239
+ // Ideally this would just be
240
+ //
241
+ // a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2;
242
+ //
243
+ // However clang correctly infers that the above statement participates on
244
+ // a critical data dependency chain and thus, unfortunately, refuses to
245
+ // use a conditional move (it's tuned to cut data dependencies). In this
246
+ // case there is a longer parallel chain anyway AND this will be fairly
247
+ // unpredictable.
248
+ asm("testl %k2, %k2\n\t"
249
+ "cmovzq %1, %0\n\t"
250
+ : "+r"(a2)
251
+ : "r"(a3), "r"(xorval)
252
+ : "cc");
253
+ #endif
254
+ *data = a2 >> (shift & (3 * 8));
255
+ return std::pair<size_t, bool>(matched_bytes, true);
112
256
  } else {
113
257
  matched = 8;
114
258
  s2 += 8;
115
259
  }
116
260
  }
261
+ SNAPPY_PREFETCH(s1 + 64);
262
+ SNAPPY_PREFETCH(s2 + 64);
117
263
 
118
264
  // Find out how long the match is. We loop over the data 64 bits at a
119
265
  // time until we find a 64-bit block that doesn't match; then we find
120
266
  // the first non-matching bit and use that to calculate the total
121
267
  // length of the match.
122
- while (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 8)) {
123
- if (UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched)) {
268
+ while (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 16)) {
269
+ uint64_t a1 = UNALIGNED_LOAD64(s1 + matched);
270
+ uint64_t a2 = UNALIGNED_LOAD64(s2);
271
+ if (a1 == a2) {
124
272
  s2 += 8;
125
273
  matched += 8;
126
274
  } else {
127
- uint64 x = UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 + matched);
128
- int matching_bits = Bits::FindLSBSetNonZero64(x);
129
- matched += matching_bits >> 3;
275
+ uint64_t xorval = a1 ^ a2;
276
+ int shift = Bits::FindLSBSetNonZero64(xorval);
277
+ size_t matched_bytes = shift >> 3;
278
+ uint64_t a3 = UNALIGNED_LOAD64(s2 + 4);
279
+ #ifndef __x86_64__
280
+ a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2;
281
+ #else
282
+ asm("testl %k2, %k2\n\t"
283
+ "cmovzq %1, %0\n\t"
284
+ : "+r"(a2)
285
+ : "r"(a3), "r"(xorval)
286
+ : "cc");
287
+ #endif
288
+ *data = a2 >> (shift & (3 * 8));
289
+ matched += matched_bytes;
130
290
  assert(matched >= 8);
131
291
  return std::pair<size_t, bool>(matched, false);
132
292
  }
@@ -136,6 +296,9 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
136
296
  ++s2;
137
297
  ++matched;
138
298
  } else {
299
+ if (s2 <= s2_limit - 8) {
300
+ *data = UNALIGNED_LOAD64(s2);
301
+ }
139
302
  return std::pair<size_t, bool>(matched, matched < 8);
140
303
  }
141
304
  }
@@ -144,7 +307,8 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
144
307
  #else
145
308
  static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
146
309
  const char* s2,
147
- const char* s2_limit) {
310
+ const char* s2_limit,
311
+ uint64_t* data) {
148
312
  // Implementation based on the x86-64 version, above.
149
313
  assert(s2_limit >= s2);
150
314
  int matched = 0;
@@ -155,19 +319,46 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
155
319
  matched += 4;
156
320
  }
157
321
  if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 4) {
158
- uint32 x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
322
+ uint32_t x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
159
323
  int matching_bits = Bits::FindLSBSetNonZero(x);
160
324
  matched += matching_bits >> 3;
325
+ s2 += matching_bits >> 3;
161
326
  } else {
162
327
  while ((s2 < s2_limit) && (s1[matched] == *s2)) {
163
328
  ++s2;
164
329
  ++matched;
165
330
  }
166
331
  }
332
+ if (s2 <= s2_limit - 8) *data = LittleEndian::Load64(s2);
167
333
  return std::pair<size_t, bool>(matched, matched < 8);
168
334
  }
169
335
  #endif
170
336
 
337
+ static inline size_t FindMatchLengthPlain(const char* s1, const char* s2,
338
+ const char* s2_limit) {
339
+ // Implementation based on the x86-64 version, above.
340
+ assert(s2_limit >= s2);
341
+ int matched = 0;
342
+
343
+ while (s2 <= s2_limit - 8 &&
344
+ UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched)) {
345
+ s2 += 8;
346
+ matched += 8;
347
+ }
348
+ if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 8) {
349
+ uint64_t x = UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 + matched);
350
+ int matching_bits = Bits::FindLSBSetNonZero64(x);
351
+ matched += matching_bits >> 3;
352
+ s2 += matching_bits >> 3;
353
+ } else {
354
+ while ((s2 < s2_limit) && (s1[matched] == *s2)) {
355
+ ++s2;
356
+ ++matched;
357
+ }
358
+ }
359
+ return matched;
360
+ }
361
+
171
362
  // Lookup tables for decompression code. Give --snappy_dump_decompression_table
172
363
  // to the unit test to recompute char_table.
173
364
 
@@ -190,7 +381,8 @@ static const int kMaximumTagLength = 5; // COPY_4_BYTE_OFFSET plus the actual o
190
381
  // because of efficiency reasons:
191
382
  // (1) Extracting a byte is faster than a bit-field
192
383
  // (2) It properly aligns copy offset so we do not need a <<8
193
- static const uint16 char_table[256] = {
384
+ static constexpr uint16_t char_table[256] = {
385
+ // clang-format off
194
386
  0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
195
387
  0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
196
388
  0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
@@ -222,7 +414,8 @@ static const uint16 char_table[256] = {
222
414
  0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
223
415
  0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
224
416
  0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
225
- 0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040
417
+ 0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040,
418
+ // clang-format on
226
419
  };
227
420
 
228
421
  } // end namespace internal
@@ -26,23 +26,31 @@
26
26
  // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
27
  // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
28
 
29
- #include <string.h>
29
+ #include <stddef.h>
30
+ #include <cstring>
30
31
 
31
32
  #include "snappy-sinksource.h"
32
33
 
33
34
  namespace snappy {
34
35
 
35
- Source::~Source() { }
36
+ Source::~Source() = default;
36
37
 
37
- Sink::~Sink() { }
38
+ Sink::~Sink() = default;
38
39
 
39
40
  char* Sink::GetAppendBuffer(size_t length, char* scratch) {
41
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
42
+ (void)length;
43
+
40
44
  return scratch;
41
45
  }
42
46
 
43
47
  char* Sink::GetAppendBufferVariable(
44
48
  size_t min_size, size_t desired_size_hint, char* scratch,
45
49
  size_t scratch_size, size_t* allocated_size) {
50
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
51
+ (void)min_size;
52
+ (void)desired_size_hint;
53
+
46
54
  *allocated_size = scratch_size;
47
55
  return scratch;
48
56
  }
@@ -55,7 +63,7 @@ void Sink::AppendAndTakeOwnership(
55
63
  (*deleter)(deleter_arg, bytes, n);
56
64
  }
57
65
 
58
- ByteArraySource::~ByteArraySource() { }
66
+ ByteArraySource::~ByteArraySource() = default;
59
67
 
60
68
  size_t ByteArraySource::Available() const { return left_; }
61
69
 
@@ -74,22 +82,26 @@ UncheckedByteArraySink::~UncheckedByteArraySink() { }
74
82
  void UncheckedByteArraySink::Append(const char* data, size_t n) {
75
83
  // Do no copying if the caller filled in the result of GetAppendBuffer()
76
84
  if (data != dest_) {
77
- memcpy(dest_, data, n);
85
+ std::memcpy(dest_, data, n);
78
86
  }
79
87
  dest_ += n;
80
88
  }
81
89
 
82
90
  char* UncheckedByteArraySink::GetAppendBuffer(size_t len, char* scratch) {
91
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
92
+ (void)len;
93
+ (void)scratch;
94
+
83
95
  return dest_;
84
96
  }
85
97
 
86
98
  void UncheckedByteArraySink::AppendAndTakeOwnership(
87
- char* data, size_t n,
99
+ char* bytes, size_t n,
88
100
  void (*deleter)(void*, const char*, size_t),
89
101
  void *deleter_arg) {
90
- if (data != dest_) {
91
- memcpy(dest_, data, n);
92
- (*deleter)(deleter_arg, data, n);
102
+ if (bytes != dest_) {
103
+ std::memcpy(dest_, bytes, n);
104
+ (*deleter)(deleter_arg, bytes, n);
93
105
  }
94
106
  dest_ += n;
95
107
  }
@@ -97,6 +109,11 @@ void UncheckedByteArraySink::AppendAndTakeOwnership(
97
109
  char* UncheckedByteArraySink::GetAppendBufferVariable(
98
110
  size_t min_size, size_t desired_size_hint, char* scratch,
99
111
  size_t scratch_size, size_t* allocated_size) {
112
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
113
+ (void)min_size;
114
+ (void)scratch;
115
+ (void)scratch_size;
116
+
100
117
  *allocated_size = desired_size_hint;
101
118
  return dest_;
102
119
  }
@@ -146,10 +146,10 @@ class Source {
146
146
  class ByteArraySource : public Source {
147
147
  public:
148
148
  ByteArraySource(const char* p, size_t n) : ptr_(p), left_(n) { }
149
- virtual ~ByteArraySource();
150
- virtual size_t Available() const;
151
- virtual const char* Peek(size_t* len);
152
- virtual void Skip(size_t n);
149
+ ~ByteArraySource() override;
150
+ size_t Available() const override;
151
+ const char* Peek(size_t* len) override;
152
+ void Skip(size_t n) override;
153
153
  private:
154
154
  const char* ptr_;
155
155
  size_t left_;
@@ -159,15 +159,15 @@ class ByteArraySource : public Source {
159
159
  class UncheckedByteArraySink : public Sink {
160
160
  public:
161
161
  explicit UncheckedByteArraySink(char* dest) : dest_(dest) { }
162
- virtual ~UncheckedByteArraySink();
163
- virtual void Append(const char* data, size_t n);
164
- virtual char* GetAppendBuffer(size_t len, char* scratch);
165
- virtual char* GetAppendBufferVariable(
162
+ ~UncheckedByteArraySink() override;
163
+ void Append(const char* data, size_t n) override;
164
+ char* GetAppendBuffer(size_t len, char* scratch) override;
165
+ char* GetAppendBufferVariable(
166
166
  size_t min_size, size_t desired_size_hint, char* scratch,
167
- size_t scratch_size, size_t* allocated_size);
168
- virtual void AppendAndTakeOwnership(
167
+ size_t scratch_size, size_t* allocated_size) override;
168
+ void AppendAndTakeOwnership(
169
169
  char* bytes, size_t n, void (*deleter)(void*, const char*, size_t),
170
- void *deleter_arg);
170
+ void *deleter_arg) override;
171
171
 
172
172
  // Return the current output pointer so that a caller can see how
173
173
  // many bytes were produced.
@@ -33,7 +33,7 @@
33
33
 
34
34
  namespace snappy {
35
35
 
36
- void Varint::Append32(std::string* s, uint32 value) {
36
+ void Varint::Append32(std::string* s, uint32_t value) {
37
37
  char buf[Varint::kMax32];
38
38
  const char* p = Varint::Encode32(buf, value);
39
39
  s->append(buf, p - buf);