snappy 0.2.0-java → 0.4.0-java

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,62 +1,66 @@
1
1
  #ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_CMAKE_CONFIG_H_
2
2
  #define THIRD_PARTY_SNAPPY_OPENSOURCE_CMAKE_CONFIG_H_
3
3
 
4
+ /* Define to 1 if the compiler supports __attribute__((always_inline)). */
5
+ #cmakedefine01 HAVE_ATTRIBUTE_ALWAYS_INLINE
6
+
4
7
  /* Define to 1 if the compiler supports __builtin_ctz and friends. */
5
- #cmakedefine HAVE_BUILTIN_CTZ 1
8
+ #cmakedefine01 HAVE_BUILTIN_CTZ
6
9
 
7
10
  /* Define to 1 if the compiler supports __builtin_expect. */
8
- #cmakedefine HAVE_BUILTIN_EXPECT 1
9
-
10
- /* Define to 1 if you have the <byteswap.h> header file. */
11
- #cmakedefine HAVE_BYTESWAP_H 1
11
+ #cmakedefine01 HAVE_BUILTIN_EXPECT
12
12
 
13
13
  /* Define to 1 if you have a definition for mmap() in <sys/mman.h>. */
14
- #cmakedefine HAVE_FUNC_MMAP 1
14
+ #cmakedefine01 HAVE_FUNC_MMAP
15
15
 
16
16
  /* Define to 1 if you have a definition for sysconf() in <unistd.h>. */
17
- #cmakedefine HAVE_FUNC_SYSCONF 1
18
-
19
- /* Define to 1 to use the gflags package for command-line parsing. */
20
- #cmakedefine HAVE_GFLAGS 1
21
-
22
- /* Define to 1 if you have Google Test. */
23
- #cmakedefine HAVE_GTEST 1
17
+ #cmakedefine01 HAVE_FUNC_SYSCONF
24
18
 
25
19
  /* Define to 1 if you have the `lzo2' library (-llzo2). */
26
- #cmakedefine HAVE_LIBLZO2 1
20
+ #cmakedefine01 HAVE_LIBLZO2
27
21
 
28
22
  /* Define to 1 if you have the `z' library (-lz). */
29
- #cmakedefine HAVE_LIBZ 1
23
+ #cmakedefine01 HAVE_LIBZ
30
24
 
31
- /* Define to 1 if you have the <sys/endian.h> header file. */
32
- #cmakedefine HAVE_SYS_ENDIAN_H 1
25
+ /* Define to 1 if you have the `lz4' library (-llz4). */
26
+ #cmakedefine01 HAVE_LIBLZ4
33
27
 
34
28
  /* Define to 1 if you have the <sys/mman.h> header file. */
35
- #cmakedefine HAVE_SYS_MMAN_H 1
29
+ #cmakedefine01 HAVE_SYS_MMAN_H
36
30
 
37
31
  /* Define to 1 if you have the <sys/resource.h> header file. */
38
- #cmakedefine HAVE_SYS_RESOURCE_H 1
32
+ #cmakedefine01 HAVE_SYS_RESOURCE_H
39
33
 
40
34
  /* Define to 1 if you have the <sys/time.h> header file. */
41
- #cmakedefine HAVE_SYS_TIME_H 1
35
+ #cmakedefine01 HAVE_SYS_TIME_H
42
36
 
43
37
  /* Define to 1 if you have the <sys/uio.h> header file. */
44
- #cmakedefine HAVE_SYS_UIO_H 1
38
+ #cmakedefine01 HAVE_SYS_UIO_H
45
39
 
46
40
  /* Define to 1 if you have the <unistd.h> header file. */
47
- #cmakedefine HAVE_UNISTD_H 1
41
+ #cmakedefine01 HAVE_UNISTD_H
48
42
 
49
43
  /* Define to 1 if you have the <windows.h> header file. */
50
- #cmakedefine HAVE_WINDOWS_H 1
44
+ #cmakedefine01 HAVE_WINDOWS_H
51
45
 
52
46
  /* Define to 1 if you target processors with SSSE3+ and have <tmmintrin.h>. */
53
47
  #cmakedefine01 SNAPPY_HAVE_SSSE3
54
48
 
49
+ /* Define to 1 if you target processors with SSE4.2 and have <crc32intrin.h>. */
50
+ #cmakedefine01 SNAPPY_HAVE_X86_CRC32
51
+
55
52
  /* Define to 1 if you target processors with BMI2+ and have <bmi2intrin.h>. */
56
53
  #cmakedefine01 SNAPPY_HAVE_BMI2
57
54
 
55
+ /* Define to 1 if you target processors with NEON and have <arm_neon.h>. */
56
+ #cmakedefine01 SNAPPY_HAVE_NEON
57
+
58
+ /* Define to 1 if you have <arm_neon.h> and <arm_acle.h> and want to optimize
59
+ compression speed by using __crc32cw from <arm_acle.h>. */
60
+ #cmakedefine01 SNAPPY_HAVE_NEON_CRC32
61
+
58
62
  /* Define to 1 if your processor stores words with the most significant byte
59
63
  first (like Motorola and SPARC, unlike Intel and VAX). */
60
- #cmakedefine SNAPPY_IS_BIG_ENDIAN 1
64
+ #cmakedefine01 SNAPPY_IS_BIG_ENDIAN
61
65
 
62
66
  #endif // THIRD_PARTY_SNAPPY_OPENSOURCE_CMAKE_CONFIG_H_
@@ -33,9 +33,84 @@
33
33
 
34
34
  #include "snappy-stubs-internal.h"
35
35
 
36
+ #if SNAPPY_HAVE_SSSE3
37
+ // Please do not replace with <x86intrin.h> or with headers that assume more
38
+ // advanced SSE versions without checking with all the OWNERS.
39
+ #include <emmintrin.h>
40
+ #include <tmmintrin.h>
41
+ #endif
42
+
43
+ #if SNAPPY_HAVE_NEON
44
+ #include <arm_neon.h>
45
+ #endif
46
+
47
+ #if SNAPPY_HAVE_SSSE3 || SNAPPY_HAVE_NEON
48
+ #define SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE 1
49
+ #else
50
+ #define SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE 0
51
+ #endif
52
+
36
53
  namespace snappy {
37
54
  namespace internal {
38
55
 
56
+ #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
57
+ #if SNAPPY_HAVE_SSSE3
58
+ using V128 = __m128i;
59
+ #elif SNAPPY_HAVE_NEON
60
+ using V128 = uint8x16_t;
61
+ #endif
62
+
63
+ // Load 128 bits of integer data. `src` must be 16-byte aligned.
64
+ inline V128 V128_Load(const V128* src);
65
+
66
+ // Load 128 bits of integer data. `src` does not need to be aligned.
67
+ inline V128 V128_LoadU(const V128* src);
68
+
69
+ // Store 128 bits of integer data. `dst` does not need to be aligned.
70
+ inline void V128_StoreU(V128* dst, V128 val);
71
+
72
+ // Shuffle packed 8-bit integers using a shuffle mask.
73
+ // Each packed integer in the shuffle mask must be in [0,16).
74
+ inline V128 V128_Shuffle(V128 input, V128 shuffle_mask);
75
+
76
+ // Constructs V128 with 16 chars |c|.
77
+ inline V128 V128_DupChar(char c);
78
+
79
+ #if SNAPPY_HAVE_SSSE3
80
+ inline V128 V128_Load(const V128* src) { return _mm_load_si128(src); }
81
+
82
+ inline V128 V128_LoadU(const V128* src) { return _mm_loadu_si128(src); }
83
+
84
+ inline void V128_StoreU(V128* dst, V128 val) { _mm_storeu_si128(dst, val); }
85
+
86
+ inline V128 V128_Shuffle(V128 input, V128 shuffle_mask) {
87
+ return _mm_shuffle_epi8(input, shuffle_mask);
88
+ }
89
+
90
+ inline V128 V128_DupChar(char c) { return _mm_set1_epi8(c); }
91
+
92
+ #elif SNAPPY_HAVE_NEON
93
+ inline V128 V128_Load(const V128* src) {
94
+ return vld1q_u8(reinterpret_cast<const uint8_t*>(src));
95
+ }
96
+
97
+ inline V128 V128_LoadU(const V128* src) {
98
+ return vld1q_u8(reinterpret_cast<const uint8_t*>(src));
99
+ }
100
+
101
+ inline void V128_StoreU(V128* dst, V128 val) {
102
+ vst1q_u8(reinterpret_cast<uint8_t*>(dst), val);
103
+ }
104
+
105
+ inline V128 V128_Shuffle(V128 input, V128 shuffle_mask) {
106
+ assert(vminvq_u8(shuffle_mask) >= 0 && vmaxvq_u8(shuffle_mask) <= 15);
107
+ return vqtbl1q_u8(input, shuffle_mask);
108
+ }
109
+
110
+ inline V128 V128_DupChar(char c) { return vdupq_n_u8(c); }
111
+ #endif
112
+ #endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
113
+
39
114
  // Working memory performs a single allocation to hold all scratch space
40
115
  // required for compression.
41
116
  class WorkingMemory {
@@ -46,16 +121,16 @@ class WorkingMemory {
46
121
  // Allocates and clears a hash table using memory in "*this",
47
122
  // stores the number of buckets in "*table_size" and returns a pointer to
48
123
  // the base of the hash table.
49
- uint16* GetHashTable(size_t fragment_size, int* table_size) const;
124
+ uint16_t* GetHashTable(size_t fragment_size, int* table_size) const;
50
125
  char* GetScratchInput() const { return input_; }
51
126
  char* GetScratchOutput() const { return output_; }
52
127
 
53
128
  private:
54
- char* mem_; // the allocated memory, never nullptr
55
- size_t size_; // the size of the allocated memory, never 0
56
- uint16* table_; // the pointer to the hashtable
57
- char* input_; // the pointer to the input scratch buffer
58
- char* output_; // the pointer to the output scratch buffer
129
+ char* mem_; // the allocated memory, never nullptr
130
+ size_t size_; // the size of the allocated memory, never 0
131
+ uint16_t* table_; // the pointer to the hashtable
132
+ char* input_; // the pointer to the input scratch buffer
133
+ char* output_; // the pointer to the output scratch buffer
59
134
 
60
135
  // No copying
61
136
  WorkingMemory(const WorkingMemory&);
@@ -76,7 +151,7 @@ class WorkingMemory {
76
151
  char* CompressFragment(const char* input,
77
152
  size_t input_length,
78
153
  char* op,
79
- uint16* table,
154
+ uint16_t* table,
80
155
  const int table_size);
81
156
 
82
157
  // Find the largest n such that
@@ -89,12 +164,19 @@ char* CompressFragment(const char* input,
89
164
  // Does not read *(s1 + (s2_limit - s2)) or beyond.
90
165
  // Requires that s2_limit >= s2.
91
166
  //
167
+ // In addition populate *data with the next 5 bytes from the end of the match.
168
+ // This is only done if 8 bytes are available (s2_limit - s2 >= 8). The point is
169
+ // that on some arch's this can be done faster in this routine than subsequent
170
+ // loading from s2 + n.
171
+ //
92
172
  // Separate implementation for 64-bit, little-endian cpus.
93
- #if !defined(SNAPPY_IS_BIG_ENDIAN) && \
94
- (defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM))
173
+ #if !SNAPPY_IS_BIG_ENDIAN && \
174
+ (defined(__x86_64__) || defined(_M_X64) || defined(ARCH_PPC) || \
175
+ defined(ARCH_ARM))
95
176
  static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
96
177
  const char* s2,
97
- const char* s2_limit) {
178
+ const char* s2_limit,
179
+ uint64_t* data) {
98
180
  assert(s2_limit >= s2);
99
181
  size_t matched = 0;
100
182
 
@@ -103,12 +185,72 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
103
185
  // uncommon code paths that determine, without extra effort, whether the match
104
186
  // length is less than 8. In short, we are hoping to avoid a conditional
105
187
  // branch, and perhaps get better code layout from the C++ compiler.
106
- if (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 8)) {
107
- uint64 a1 = UNALIGNED_LOAD64(s1);
108
- uint64 a2 = UNALIGNED_LOAD64(s2);
109
- if (a1 != a2) {
110
- return std::pair<size_t, bool>(Bits::FindLSBSetNonZero64(a1 ^ a2) >> 3,
111
- true);
188
+ if (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 16)) {
189
+ uint64_t a1 = UNALIGNED_LOAD64(s1);
190
+ uint64_t a2 = UNALIGNED_LOAD64(s2);
191
+ if (SNAPPY_PREDICT_TRUE(a1 != a2)) {
192
+ // This code is critical for performance. The reason is that it determines
193
+ // how much to advance `ip` (s2). This obviously depends on both the loads
194
+ // from the `candidate` (s1) and `ip`. Furthermore the next `candidate`
195
+ // depends on the advanced `ip` calculated here through a load, hash and
196
+ // new candidate hash lookup (a lot of cycles). This makes s1 (ie.
197
+ // `candidate`) the variable that limits throughput. This is the reason we
198
+ // go through hoops to have this function update `data` for the next iter.
199
+ // The straightforward code would use *data, given by
200
+ //
201
+ // *data = UNALIGNED_LOAD64(s2 + matched_bytes) (Latency of 5 cycles),
202
+ //
203
+ // as input for the hash table lookup to find next candidate. However
204
+ // this forces the load on the data dependency chain of s1, because
205
+ // matched_bytes directly depends on s1. However matched_bytes is 0..7, so
206
+ // we can also calculate *data by
207
+ //
208
+ // *data = AlignRight(UNALIGNED_LOAD64(s2), UNALIGNED_LOAD64(s2 + 8),
209
+ // matched_bytes);
210
+ //
211
+ // The loads do not depend on s1 anymore and are thus off the bottleneck.
212
+ // The straightforward implementation on x86_64 would be to use
213
+ //
214
+ // shrd rax, rdx, cl (cl being matched_bytes * 8)
215
+ //
216
+ // unfortunately shrd with a variable shift has a 4 cycle latency. So this
217
+ // only wins 1 cycle. The BMI2 shrx instruction is a 1 cycle variable
218
+ // shift instruction but can only shift 64 bits. If we focus on just
219
+ // obtaining the least significant 4 bytes, we can obtain this by
220
+ //
221
+ // *data = ConditionalMove(matched_bytes < 4, UNALIGNED_LOAD64(s2),
222
+ // UNALIGNED_LOAD64(s2 + 4) >> ((matched_bytes & 3) * 8);
223
+ //
224
+ // Writen like above this is not a big win, the conditional move would be
225
+ // a cmp followed by a cmov (2 cycles) followed by a shift (1 cycle).
226
+ // However matched_bytes < 4 is equal to
227
+ // static_cast<uint32_t>(xorval) != 0. Writen that way, the conditional
228
+ // move (2 cycles) can execute in parallel with FindLSBSetNonZero64
229
+ // (tzcnt), which takes 3 cycles.
230
+ uint64_t xorval = a1 ^ a2;
231
+ int shift = Bits::FindLSBSetNonZero64(xorval);
232
+ size_t matched_bytes = shift >> 3;
233
+ uint64_t a3 = UNALIGNED_LOAD64(s2 + 4);
234
+ #ifndef __x86_64__
235
+ a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2;
236
+ #else
237
+ // Ideally this would just be
238
+ //
239
+ // a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2;
240
+ //
241
+ // However clang correctly infers that the above statement participates on
242
+ // a critical data dependency chain and thus, unfortunately, refuses to
243
+ // use a conditional move (it's tuned to cut data dependencies). In this
244
+ // case there is a longer parallel chain anyway AND this will be fairly
245
+ // unpredictable.
246
+ asm("testl %k2, %k2\n\t"
247
+ "cmovzq %1, %0\n\t"
248
+ : "+r"(a2)
249
+ : "r"(a3), "r"(xorval)
250
+ : "cc");
251
+ #endif
252
+ *data = a2 >> (shift & (3 * 8));
253
+ return std::pair<size_t, bool>(matched_bytes, true);
112
254
  } else {
113
255
  matched = 8;
114
256
  s2 += 8;
@@ -119,14 +261,28 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
119
261
  // time until we find a 64-bit block that doesn't match; then we find
120
262
  // the first non-matching bit and use that to calculate the total
121
263
  // length of the match.
122
- while (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 8)) {
123
- if (UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched)) {
264
+ while (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 16)) {
265
+ uint64_t a1 = UNALIGNED_LOAD64(s1 + matched);
266
+ uint64_t a2 = UNALIGNED_LOAD64(s2);
267
+ if (a1 == a2) {
124
268
  s2 += 8;
125
269
  matched += 8;
126
270
  } else {
127
- uint64 x = UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 + matched);
128
- int matching_bits = Bits::FindLSBSetNonZero64(x);
129
- matched += matching_bits >> 3;
271
+ uint64_t xorval = a1 ^ a2;
272
+ int shift = Bits::FindLSBSetNonZero64(xorval);
273
+ size_t matched_bytes = shift >> 3;
274
+ uint64_t a3 = UNALIGNED_LOAD64(s2 + 4);
275
+ #ifndef __x86_64__
276
+ a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2;
277
+ #else
278
+ asm("testl %k2, %k2\n\t"
279
+ "cmovzq %1, %0\n\t"
280
+ : "+r"(a2)
281
+ : "r"(a3), "r"(xorval)
282
+ : "cc");
283
+ #endif
284
+ *data = a2 >> (shift & (3 * 8));
285
+ matched += matched_bytes;
130
286
  assert(matched >= 8);
131
287
  return std::pair<size_t, bool>(matched, false);
132
288
  }
@@ -136,6 +292,9 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
136
292
  ++s2;
137
293
  ++matched;
138
294
  } else {
295
+ if (s2 <= s2_limit - 8) {
296
+ *data = UNALIGNED_LOAD64(s2);
297
+ }
139
298
  return std::pair<size_t, bool>(matched, matched < 8);
140
299
  }
141
300
  }
@@ -144,7 +303,8 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
144
303
  #else
145
304
  static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
146
305
  const char* s2,
147
- const char* s2_limit) {
306
+ const char* s2_limit,
307
+ uint64_t* data) {
148
308
  // Implementation based on the x86-64 version, above.
149
309
  assert(s2_limit >= s2);
150
310
  int matched = 0;
@@ -155,15 +315,17 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
155
315
  matched += 4;
156
316
  }
157
317
  if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 4) {
158
- uint32 x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
318
+ uint32_t x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
159
319
  int matching_bits = Bits::FindLSBSetNonZero(x);
160
320
  matched += matching_bits >> 3;
321
+ s2 += matching_bits >> 3;
161
322
  } else {
162
323
  while ((s2 < s2_limit) && (s1[matched] == *s2)) {
163
324
  ++s2;
164
325
  ++matched;
165
326
  }
166
327
  }
328
+ if (s2 <= s2_limit - 8) *data = LittleEndian::Load64(s2);
167
329
  return std::pair<size_t, bool>(matched, matched < 8);
168
330
  }
169
331
  #endif
@@ -190,7 +352,8 @@ static const int kMaximumTagLength = 5; // COPY_4_BYTE_OFFSET plus the actual o
190
352
  // because of efficiency reasons:
191
353
  // (1) Extracting a byte is faster than a bit-field
192
354
  // (2) It properly aligns copy offset so we do not need a <<8
193
- static const uint16 char_table[256] = {
355
+ static constexpr uint16_t char_table[256] = {
356
+ // clang-format off
194
357
  0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
195
358
  0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
196
359
  0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
@@ -222,7 +385,8 @@ static const uint16 char_table[256] = {
222
385
  0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
223
386
  0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
224
387
  0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
225
- 0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040
388
+ 0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040,
389
+ // clang-format on
226
390
  };
227
391
 
228
392
  } // end namespace internal
@@ -26,23 +26,31 @@
26
26
  // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
27
  // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
28
 
29
- #include <string.h>
29
+ #include <stddef.h>
30
+ #include <cstring>
30
31
 
31
32
  #include "snappy-sinksource.h"
32
33
 
33
34
  namespace snappy {
34
35
 
35
- Source::~Source() { }
36
+ Source::~Source() = default;
36
37
 
37
- Sink::~Sink() { }
38
+ Sink::~Sink() = default;
38
39
 
39
40
  char* Sink::GetAppendBuffer(size_t length, char* scratch) {
41
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
42
+ (void)length;
43
+
40
44
  return scratch;
41
45
  }
42
46
 
43
47
  char* Sink::GetAppendBufferVariable(
44
48
  size_t min_size, size_t desired_size_hint, char* scratch,
45
49
  size_t scratch_size, size_t* allocated_size) {
50
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
51
+ (void)min_size;
52
+ (void)desired_size_hint;
53
+
46
54
  *allocated_size = scratch_size;
47
55
  return scratch;
48
56
  }
@@ -55,7 +63,7 @@ void Sink::AppendAndTakeOwnership(
55
63
  (*deleter)(deleter_arg, bytes, n);
56
64
  }
57
65
 
58
- ByteArraySource::~ByteArraySource() { }
66
+ ByteArraySource::~ByteArraySource() = default;
59
67
 
60
68
  size_t ByteArraySource::Available() const { return left_; }
61
69
 
@@ -74,22 +82,26 @@ UncheckedByteArraySink::~UncheckedByteArraySink() { }
74
82
  void UncheckedByteArraySink::Append(const char* data, size_t n) {
75
83
  // Do no copying if the caller filled in the result of GetAppendBuffer()
76
84
  if (data != dest_) {
77
- memcpy(dest_, data, n);
85
+ std::memcpy(dest_, data, n);
78
86
  }
79
87
  dest_ += n;
80
88
  }
81
89
 
82
90
  char* UncheckedByteArraySink::GetAppendBuffer(size_t len, char* scratch) {
91
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
92
+ (void)len;
93
+ (void)scratch;
94
+
83
95
  return dest_;
84
96
  }
85
97
 
86
98
  void UncheckedByteArraySink::AppendAndTakeOwnership(
87
- char* data, size_t n,
99
+ char* bytes, size_t n,
88
100
  void (*deleter)(void*, const char*, size_t),
89
101
  void *deleter_arg) {
90
- if (data != dest_) {
91
- memcpy(dest_, data, n);
92
- (*deleter)(deleter_arg, data, n);
102
+ if (bytes != dest_) {
103
+ std::memcpy(dest_, bytes, n);
104
+ (*deleter)(deleter_arg, bytes, n);
93
105
  }
94
106
  dest_ += n;
95
107
  }
@@ -97,6 +109,11 @@ void UncheckedByteArraySink::AppendAndTakeOwnership(
97
109
  char* UncheckedByteArraySink::GetAppendBufferVariable(
98
110
  size_t min_size, size_t desired_size_hint, char* scratch,
99
111
  size_t scratch_size, size_t* allocated_size) {
112
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
113
+ (void)min_size;
114
+ (void)scratch;
115
+ (void)scratch_size;
116
+
100
117
  *allocated_size = desired_size_hint;
101
118
  return dest_;
102
119
  }
@@ -146,10 +146,10 @@ class Source {
146
146
  class ByteArraySource : public Source {
147
147
  public:
148
148
  ByteArraySource(const char* p, size_t n) : ptr_(p), left_(n) { }
149
- virtual ~ByteArraySource();
150
- virtual size_t Available() const;
151
- virtual const char* Peek(size_t* len);
152
- virtual void Skip(size_t n);
149
+ ~ByteArraySource() override;
150
+ size_t Available() const override;
151
+ const char* Peek(size_t* len) override;
152
+ void Skip(size_t n) override;
153
153
  private:
154
154
  const char* ptr_;
155
155
  size_t left_;
@@ -159,15 +159,15 @@ class ByteArraySource : public Source {
159
159
  class UncheckedByteArraySink : public Sink {
160
160
  public:
161
161
  explicit UncheckedByteArraySink(char* dest) : dest_(dest) { }
162
- virtual ~UncheckedByteArraySink();
163
- virtual void Append(const char* data, size_t n);
164
- virtual char* GetAppendBuffer(size_t len, char* scratch);
165
- virtual char* GetAppendBufferVariable(
162
+ ~UncheckedByteArraySink() override;
163
+ void Append(const char* data, size_t n) override;
164
+ char* GetAppendBuffer(size_t len, char* scratch) override;
165
+ char* GetAppendBufferVariable(
166
166
  size_t min_size, size_t desired_size_hint, char* scratch,
167
- size_t scratch_size, size_t* allocated_size);
168
- virtual void AppendAndTakeOwnership(
167
+ size_t scratch_size, size_t* allocated_size) override;
168
+ void AppendAndTakeOwnership(
169
169
  char* bytes, size_t n, void (*deleter)(void*, const char*, size_t),
170
- void *deleter_arg);
170
+ void *deleter_arg) override;
171
171
 
172
172
  // Return the current output pointer so that a caller can see how
173
173
  // many bytes were produced.
@@ -33,7 +33,7 @@
33
33
 
34
34
  namespace snappy {
35
35
 
36
- void Varint::Append32(std::string* s, uint32 value) {
36
+ void Varint::Append32(std::string* s, uint32_t value) {
37
37
  char buf[Varint::kMax32];
38
38
  const char* p = Varint::Encode32(buf, value);
39
39
  s->append(buf, p - buf);