snappy 0.3.0-java → 0.5.0-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/main.yml +2 -2
  3. data/.github/workflows/publish.yml +7 -13
  4. data/Dockerfile +1 -1
  5. data/Gemfile +1 -0
  6. data/README.md +20 -1
  7. data/Rakefile +1 -1
  8. data/ext/extconf.rb +13 -11
  9. data/lib/snappy/shim.rb +3 -23
  10. data/lib/snappy/version.rb +1 -1
  11. data/lib/snappy/writer.rb +1 -1
  12. data/lib/snappy_ext.jar +0 -0
  13. data/snappy.gemspec +1 -0
  14. data/test/snappy_test.rb +29 -4
  15. data/vendor/snappy/BUILD.bazel +211 -0
  16. data/vendor/snappy/CMakeLists.txt +176 -31
  17. data/vendor/snappy/CONTRIBUTING.md +9 -4
  18. data/vendor/snappy/MODULE.bazel +23 -0
  19. data/vendor/snappy/NEWS +27 -0
  20. data/vendor/snappy/README.md +52 -35
  21. data/vendor/snappy/WORKSPACE +27 -0
  22. data/vendor/snappy/WORKSPACE.bzlmod +0 -0
  23. data/vendor/snappy/cmake/config.h.in +30 -23
  24. data/vendor/snappy/snappy-internal.h +218 -25
  25. data/vendor/snappy/snappy-sinksource.cc +26 -9
  26. data/vendor/snappy/snappy-sinksource.h +11 -11
  27. data/vendor/snappy/snappy-stubs-internal.cc +1 -1
  28. data/vendor/snappy/snappy-stubs-internal.h +231 -306
  29. data/vendor/snappy/snappy-stubs-public.h.in +0 -11
  30. data/vendor/snappy/snappy-test.cc +88 -198
  31. data/vendor/snappy/snappy-test.h +102 -285
  32. data/vendor/snappy/snappy.cc +1412 -425
  33. data/vendor/snappy/snappy.h +60 -10
  34. data/vendor/snappy/snappy_benchmark.cc +398 -0
  35. data/vendor/snappy/snappy_compress_fuzzer.cc +21 -16
  36. data/vendor/snappy/snappy_test_data.cc +57 -0
  37. data/vendor/snappy/snappy_test_data.h +68 -0
  38. data/vendor/snappy/snappy_test_tool.cc +471 -0
  39. data/vendor/snappy/snappy_uncompress_fuzzer.cc +3 -2
  40. data/vendor/snappy/snappy_unittest.cc +183 -666
  41. metadata +13 -8
@@ -31,21 +31,23 @@
31
31
  #ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
32
32
  #define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
33
33
 
34
- #ifdef HAVE_CONFIG_H
34
+ #if HAVE_CONFIG_H
35
35
  #include "config.h"
36
36
  #endif
37
37
 
38
- #include <string>
38
+ #include <stdint.h>
39
39
 
40
- #include <assert.h>
41
- #include <stdlib.h>
42
- #include <string.h>
40
+ #include <cassert>
41
+ #include <cstdlib>
42
+ #include <cstring>
43
+ #include <limits>
44
+ #include <string>
43
45
 
44
- #ifdef HAVE_SYS_MMAN_H
46
+ #if HAVE_SYS_MMAN_H
45
47
  #include <sys/mman.h>
46
48
  #endif
47
49
 
48
- #ifdef HAVE_UNISTD_H
50
+ #if HAVE_UNISTD_H
49
51
  #include <unistd.h>
50
52
  #endif
51
53
 
@@ -67,19 +69,11 @@
67
69
 
68
70
  #include "snappy-stubs-public.h"
69
71
 
70
- #if defined(__x86_64__)
71
-
72
- // Enable 64-bit optimized versions of some routines.
73
- #define ARCH_K8 1
74
-
75
- #elif defined(__ppc64__)
76
-
72
+ // Used to enable 64-bit optimized versions of some routines.
73
+ #if defined(__PPC64__) || defined(__powerpc64__)
77
74
  #define ARCH_PPC 1
78
-
79
- #elif defined(__aarch64__)
80
-
75
+ #elif defined(__aarch64__) || defined(_M_ARM64)
81
76
  #define ARCH_ARM 1
82
-
83
77
  #endif
84
78
 
85
79
  // Needed by OS X, among others.
@@ -93,223 +87,83 @@
93
87
  #ifdef ARRAYSIZE
94
88
  #undef ARRAYSIZE
95
89
  #endif
96
- #define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a)))
90
+ #define ARRAYSIZE(a) int{sizeof(a) / sizeof(*(a))}
97
91
 
98
92
  // Static prediction hints.
99
- #ifdef HAVE_BUILTIN_EXPECT
93
+ #if HAVE_BUILTIN_EXPECT
100
94
  #define SNAPPY_PREDICT_FALSE(x) (__builtin_expect(x, 0))
101
95
  #define SNAPPY_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
102
96
  #else
103
97
  #define SNAPPY_PREDICT_FALSE(x) x
104
98
  #define SNAPPY_PREDICT_TRUE(x) x
105
- #endif
106
-
107
- // This is only used for recomputing the tag byte table used during
108
- // decompression; for simplicity we just remove it from the open-source
109
- // version (anyone who wants to regenerate it can just do the call
110
- // themselves within main()).
111
- #define DEFINE_bool(flag_name, default_value, description) \
112
- bool FLAGS_ ## flag_name = default_value
113
- #define DECLARE_bool(flag_name) \
114
- extern bool FLAGS_ ## flag_name
115
-
116
- namespace snappy {
117
-
118
- static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF);
119
- static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
99
+ #endif // HAVE_BUILTIN_EXPECT
120
100
 
121
- // Potentially unaligned loads and stores.
122
-
123
- // x86, PowerPC, and ARM64 can simply do these loads and stores native.
124
-
125
- #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
126
- defined(__aarch64__)
127
-
128
- #define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
129
- #define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
130
- #define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
131
-
132
- #define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
133
- #define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
134
- #define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
101
+ // Inlining hints.
102
+ #if HAVE_ATTRIBUTE_ALWAYS_INLINE
103
+ #define SNAPPY_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
104
+ #else
105
+ #define SNAPPY_ATTRIBUTE_ALWAYS_INLINE
106
+ #endif // HAVE_ATTRIBUTE_ALWAYS_INLINE
135
107
 
136
- // ARMv7 and newer support native unaligned accesses, but only of 16-bit
137
- // and 32-bit values (not 64-bit); older versions either raise a fatal signal,
138
- // do an unaligned read and rotate the words around a bit, or do the reads very
139
- // slowly (trip through kernel mode). There's no simple #define that says just
140
- // “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6
141
- // sub-architectures.
142
- //
143
- // This is a mess, but there's not much we can do about it.
144
- //
145
- // To further complicate matters, only LDR instructions (single reads) are
146
- // allowed to be unaligned, not LDRD (two reads) or LDM (many reads). Unless we
147
- // explicitly tell the compiler that these accesses can be unaligned, it can and
148
- // will combine accesses. On armcc, the way to signal this is done by accessing
149
- // through the type (uint32 __packed *), but GCC has no such attribute
150
- // (it ignores __attribute__((packed)) on individual variables). However,
151
- // we can tell it that a _struct_ is unaligned, which has the same effect,
152
- // so we do that.
153
-
154
- #elif defined(__arm__) && \
155
- !defined(__ARM_ARCH_4__) && \
156
- !defined(__ARM_ARCH_4T__) && \
157
- !defined(__ARM_ARCH_5__) && \
158
- !defined(__ARM_ARCH_5T__) && \
159
- !defined(__ARM_ARCH_5TE__) && \
160
- !defined(__ARM_ARCH_5TEJ__) && \
161
- !defined(__ARM_ARCH_6__) && \
162
- !defined(__ARM_ARCH_6J__) && \
163
- !defined(__ARM_ARCH_6K__) && \
164
- !defined(__ARM_ARCH_6Z__) && \
165
- !defined(__ARM_ARCH_6ZK__) && \
166
- !defined(__ARM_ARCH_6T2__)
167
-
168
- #if __GNUC__
169
- #define ATTRIBUTE_PACKED __attribute__((__packed__))
108
+ #if HAVE_BUILTIN_PREFETCH
109
+ #define SNAPPY_PREFETCH(ptr) __builtin_prefetch(ptr, 0, 3)
170
110
  #else
171
- #define ATTRIBUTE_PACKED
111
+ #define SNAPPY_PREFETCH(ptr) (void)(ptr)
172
112
  #endif
173
113
 
174
- namespace base {
175
- namespace internal {
176
-
177
- struct Unaligned16Struct {
178
- uint16 value;
179
- uint8 dummy; // To make the size non-power-of-two.
180
- } ATTRIBUTE_PACKED;
181
-
182
- struct Unaligned32Struct {
183
- uint32 value;
184
- uint8 dummy; // To make the size non-power-of-two.
185
- } ATTRIBUTE_PACKED;
186
-
187
- } // namespace internal
188
- } // namespace base
189
-
190
- #define UNALIGNED_LOAD16(_p) \
191
- ((reinterpret_cast<const ::snappy::base::internal::Unaligned16Struct *>(_p))->value)
192
- #define UNALIGNED_LOAD32(_p) \
193
- ((reinterpret_cast<const ::snappy::base::internal::Unaligned32Struct *>(_p))->value)
194
-
195
- #define UNALIGNED_STORE16(_p, _val) \
196
- ((reinterpret_cast< ::snappy::base::internal::Unaligned16Struct *>(_p))->value = \
197
- (_val))
198
- #define UNALIGNED_STORE32(_p, _val) \
199
- ((reinterpret_cast< ::snappy::base::internal::Unaligned32Struct *>(_p))->value = \
200
- (_val))
201
-
202
- // TODO: NEON supports unaligned 64-bit loads and stores.
203
- // See if that would be more efficient on platforms supporting it,
204
- // at least for copies.
205
-
206
- inline uint64 UNALIGNED_LOAD64(const void *p) {
207
- uint64 t;
208
- memcpy(&t, p, sizeof t);
209
- return t;
210
- }
211
-
212
- inline void UNALIGNED_STORE64(void *p, uint64 v) {
213
- memcpy(p, &v, sizeof v);
214
- }
215
-
216
- #else
114
+ // Stubbed version of ABSL_FLAG.
115
+ //
116
+ // In the open source version, flags can only be changed at compile time.
117
+ #define SNAPPY_FLAG(flag_type, flag_name, default_value, help) \
118
+ flag_type FLAGS_ ## flag_name = default_value
217
119
 
218
- // These functions are provided for architectures that don't support
219
- // unaligned loads and stores.
120
+ namespace snappy {
220
121
 
221
- inline uint16 UNALIGNED_LOAD16(const void *p) {
222
- uint16 t;
223
- memcpy(&t, p, sizeof t);
224
- return t;
225
- }
122
+ // Stubbed version of absl::GetFlag().
123
+ template <typename T>
124
+ inline T GetFlag(T flag) { return flag; }
226
125
 
227
- inline uint32 UNALIGNED_LOAD32(const void *p) {
228
- uint32 t;
229
- memcpy(&t, p, sizeof t);
230
- return t;
231
- }
126
+ static const uint32_t kuint32max = std::numeric_limits<uint32_t>::max();
127
+ static const int64_t kint64max = std::numeric_limits<int64_t>::max();
232
128
 
233
- inline uint64 UNALIGNED_LOAD64(const void *p) {
234
- uint64 t;
235
- memcpy(&t, p, sizeof t);
236
- return t;
237
- }
129
+ // Potentially unaligned loads and stores.
238
130
 
239
- inline void UNALIGNED_STORE16(void *p, uint16 v) {
240
- memcpy(p, &v, sizeof v);
131
+ inline uint16_t UNALIGNED_LOAD16(const void *p) {
132
+ // Compiles to a single movzx/ldrh on clang/gcc/msvc.
133
+ uint16_t v;
134
+ std::memcpy(&v, p, sizeof(v));
135
+ return v;
241
136
  }
242
137
 
243
- inline void UNALIGNED_STORE32(void *p, uint32 v) {
244
- memcpy(p, &v, sizeof v);
138
+ inline uint32_t UNALIGNED_LOAD32(const void *p) {
139
+ // Compiles to a single mov/ldr on clang/gcc/msvc.
140
+ uint32_t v;
141
+ std::memcpy(&v, p, sizeof(v));
142
+ return v;
245
143
  }
246
144
 
247
- inline void UNALIGNED_STORE64(void *p, uint64 v) {
248
- memcpy(p, &v, sizeof v);
145
+ inline uint64_t UNALIGNED_LOAD64(const void *p) {
146
+ // Compiles to a single mov/ldr on clang/gcc/msvc.
147
+ uint64_t v;
148
+ std::memcpy(&v, p, sizeof(v));
149
+ return v;
249
150
  }
250
151
 
251
- #endif
252
-
253
- // The following guarantees declaration of the byte swap functions.
254
- #if defined(SNAPPY_IS_BIG_ENDIAN)
255
-
256
- #ifdef HAVE_SYS_BYTEORDER_H
257
- #include <sys/byteorder.h>
258
- #endif
259
-
260
- #ifdef HAVE_SYS_ENDIAN_H
261
- #include <sys/endian.h>
262
- #endif
263
-
264
- #ifdef _MSC_VER
265
- #include <stdlib.h>
266
- #define bswap_16(x) _byteswap_ushort(x)
267
- #define bswap_32(x) _byteswap_ulong(x)
268
- #define bswap_64(x) _byteswap_uint64(x)
269
-
270
- #elif defined(__APPLE__)
271
- // Mac OS X / Darwin features
272
- #include <libkern/OSByteOrder.h>
273
- #define bswap_16(x) OSSwapInt16(x)
274
- #define bswap_32(x) OSSwapInt32(x)
275
- #define bswap_64(x) OSSwapInt64(x)
276
-
277
- #elif defined(HAVE_BYTESWAP_H)
278
- #include <byteswap.h>
279
-
280
- #elif defined(bswap32)
281
- // FreeBSD defines bswap{16,32,64} in <sys/endian.h> (already #included).
282
- #define bswap_16(x) bswap16(x)
283
- #define bswap_32(x) bswap32(x)
284
- #define bswap_64(x) bswap64(x)
285
-
286
- #elif defined(BSWAP_64)
287
- // Solaris 10 defines BSWAP_{16,32,64} in <sys/byteorder.h> (already #included).
288
- #define bswap_16(x) BSWAP_16(x)
289
- #define bswap_32(x) BSWAP_32(x)
290
- #define bswap_64(x) BSWAP_64(x)
291
-
292
- #else
293
-
294
- inline uint16 bswap_16(uint16 x) {
295
- return (x << 8) | (x >> 8);
152
+ inline void UNALIGNED_STORE16(void *p, uint16_t v) {
153
+ // Compiles to a single mov/strh on clang/gcc/msvc.
154
+ std::memcpy(p, &v, sizeof(v));
296
155
  }
297
156
 
298
- inline uint32 bswap_32(uint32 x) {
299
- x = ((x & 0xff00ff00UL) >> 8) | ((x & 0x00ff00ffUL) << 8);
300
- return (x >> 16) | (x << 16);
157
+ inline void UNALIGNED_STORE32(void *p, uint32_t v) {
158
+ // Compiles to a single mov/str on clang/gcc/msvc.
159
+ std::memcpy(p, &v, sizeof(v));
301
160
  }
302
161
 
303
- inline uint64 bswap_64(uint64 x) {
304
- x = ((x & 0xff00ff00ff00ff00ULL) >> 8) | ((x & 0x00ff00ff00ff00ffULL) << 8);
305
- x = ((x & 0xffff0000ffff0000ULL) >> 16) | ((x & 0x0000ffff0000ffffULL) << 16);
306
- return (x >> 32) | (x << 32);
162
+ inline void UNALIGNED_STORE64(void *p, uint64_t v) {
163
+ // Compiles to a single mov/str on clang/gcc/msvc.
164
+ std::memcpy(p, &v, sizeof(v));
307
165
  }
308
166
 
309
- #endif
310
-
311
- #endif // defined(SNAPPY_IS_BIG_ENDIAN)
312
-
313
167
  // Convert to little-endian storage, opposite of network format.
314
168
  // Convert x from host to little endian: x = LittleEndian.FromHost(x);
315
169
  // convert x from little endian to host: x = LittleEndian.ToHost(x);
@@ -321,44 +175,110 @@ inline uint64 bswap_64(uint64 x) {
321
175
  // x = LittleEndian.Load16(p);
322
176
  class LittleEndian {
323
177
  public:
324
- // Conversion functions.
325
- #if defined(SNAPPY_IS_BIG_ENDIAN)
326
-
327
- static uint16 FromHost16(uint16 x) { return bswap_16(x); }
328
- static uint16 ToHost16(uint16 x) { return bswap_16(x); }
329
-
330
- static uint32 FromHost32(uint32 x) { return bswap_32(x); }
331
- static uint32 ToHost32(uint32 x) { return bswap_32(x); }
332
-
333
- static bool IsLittleEndian() { return false; }
334
-
335
- #else // !defined(SNAPPY_IS_BIG_ENDIAN)
336
-
337
- static uint16 FromHost16(uint16 x) { return x; }
338
- static uint16 ToHost16(uint16 x) { return x; }
339
-
340
- static uint32 FromHost32(uint32 x) { return x; }
341
- static uint32 ToHost32(uint32 x) { return x; }
178
+ // Functions to do unaligned loads and stores in little-endian order.
179
+ static inline uint16_t Load16(const void *ptr) {
180
+ // Compiles to a single mov/str on recent clang and gcc.
181
+ #if SNAPPY_IS_BIG_ENDIAN
182
+ const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
183
+ return (static_cast<uint16_t>(buffer[0])) |
184
+ (static_cast<uint16_t>(buffer[1]) << 8);
185
+ #else
186
+ // memcpy() turns into a single instruction early in the optimization
187
+ // pipeline (relatively to a series of byte accesses). So, using memcpy
188
+ // instead of byte accesses may lead to better decisions in more stages of
189
+ // the optimization pipeline.
190
+ uint16_t value;
191
+ std::memcpy(&value, ptr, 2);
192
+ return value;
193
+ #endif
194
+ }
342
195
 
343
- static bool IsLittleEndian() { return true; }
196
+ static inline uint32_t Load32(const void *ptr) {
197
+ // Compiles to a single mov/str on recent clang and gcc.
198
+ #if SNAPPY_IS_BIG_ENDIAN
199
+ const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
200
+ return (static_cast<uint32_t>(buffer[0])) |
201
+ (static_cast<uint32_t>(buffer[1]) << 8) |
202
+ (static_cast<uint32_t>(buffer[2]) << 16) |
203
+ (static_cast<uint32_t>(buffer[3]) << 24);
204
+ #else
205
+ // See Load16() for the rationale of using memcpy().
206
+ uint32_t value;
207
+ std::memcpy(&value, ptr, 4);
208
+ return value;
209
+ #endif
210
+ }
344
211
 
345
- #endif // !defined(SNAPPY_IS_BIG_ENDIAN)
212
+ static inline uint64_t Load64(const void *ptr) {
213
+ // Compiles to a single mov/str on recent clang and gcc.
214
+ #if SNAPPY_IS_BIG_ENDIAN
215
+ const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
216
+ return (static_cast<uint64_t>(buffer[0])) |
217
+ (static_cast<uint64_t>(buffer[1]) << 8) |
218
+ (static_cast<uint64_t>(buffer[2]) << 16) |
219
+ (static_cast<uint64_t>(buffer[3]) << 24) |
220
+ (static_cast<uint64_t>(buffer[4]) << 32) |
221
+ (static_cast<uint64_t>(buffer[5]) << 40) |
222
+ (static_cast<uint64_t>(buffer[6]) << 48) |
223
+ (static_cast<uint64_t>(buffer[7]) << 56);
224
+ #else
225
+ // See Load16() for the rationale of using memcpy().
226
+ uint64_t value;
227
+ std::memcpy(&value, ptr, 8);
228
+ return value;
229
+ #endif
230
+ }
346
231
 
347
- // Functions to do unaligned loads and stores in little-endian order.
348
- static uint16 Load16(const void *p) {
349
- return ToHost16(UNALIGNED_LOAD16(p));
232
+ static inline void Store16(void *dst, uint16_t value) {
233
+ // Compiles to a single mov/str on recent clang and gcc.
234
+ #if SNAPPY_IS_BIG_ENDIAN
235
+ uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
236
+ buffer[0] = static_cast<uint8_t>(value);
237
+ buffer[1] = static_cast<uint8_t>(value >> 8);
238
+ #else
239
+ // See Load16() for the rationale of using memcpy().
240
+ std::memcpy(dst, &value, 2);
241
+ #endif
350
242
  }
351
243
 
352
- static void Store16(void *p, uint16 v) {
353
- UNALIGNED_STORE16(p, FromHost16(v));
244
+ static void Store32(void *dst, uint32_t value) {
245
+ // Compiles to a single mov/str on recent clang and gcc.
246
+ #if SNAPPY_IS_BIG_ENDIAN
247
+ uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
248
+ buffer[0] = static_cast<uint8_t>(value);
249
+ buffer[1] = static_cast<uint8_t>(value >> 8);
250
+ buffer[2] = static_cast<uint8_t>(value >> 16);
251
+ buffer[3] = static_cast<uint8_t>(value >> 24);
252
+ #else
253
+ // See Load16() for the rationale of using memcpy().
254
+ std::memcpy(dst, &value, 4);
255
+ #endif
354
256
  }
355
257
 
356
- static uint32 Load32(const void *p) {
357
- return ToHost32(UNALIGNED_LOAD32(p));
258
+ static void Store64(void* dst, uint64_t value) {
259
+ // Compiles to a single mov/str on recent clang and gcc.
260
+ #if SNAPPY_IS_BIG_ENDIAN
261
+ uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
262
+ buffer[0] = static_cast<uint8_t>(value);
263
+ buffer[1] = static_cast<uint8_t>(value >> 8);
264
+ buffer[2] = static_cast<uint8_t>(value >> 16);
265
+ buffer[3] = static_cast<uint8_t>(value >> 24);
266
+ buffer[4] = static_cast<uint8_t>(value >> 32);
267
+ buffer[5] = static_cast<uint8_t>(value >> 40);
268
+ buffer[6] = static_cast<uint8_t>(value >> 48);
269
+ buffer[7] = static_cast<uint8_t>(value >> 56);
270
+ #else
271
+ // See Load16() for the rationale of using memcpy().
272
+ std::memcpy(dst, &value, 8);
273
+ #endif
358
274
  }
359
275
 
360
- static void Store32(void *p, uint32 v) {
361
- UNALIGNED_STORE32(p, FromHost32(v));
276
+ static inline constexpr bool IsLittleEndian() {
277
+ #if SNAPPY_IS_BIG_ENDIAN
278
+ return false;
279
+ #else
280
+ return true;
281
+ #endif // SNAPPY_IS_BIG_ENDIAN
362
282
  }
363
283
  };
364
284
 
@@ -366,19 +286,17 @@ class LittleEndian {
366
286
  class Bits {
367
287
  public:
368
288
  // Return floor(log2(n)) for positive integer n.
369
- static int Log2FloorNonZero(uint32 n);
289
+ static int Log2FloorNonZero(uint32_t n);
370
290
 
371
291
  // Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0.
372
- static int Log2Floor(uint32 n);
292
+ static int Log2Floor(uint32_t n);
373
293
 
374
294
  // Return the first set least / most significant bit, 0-indexed. Returns an
375
295
  // undefined value if n == 0. FindLSBSetNonZero() is similar to ffs() except
376
296
  // that it's 0-indexed.
377
- static int FindLSBSetNonZero(uint32 n);
297
+ static int FindLSBSetNonZero(uint32_t n);
378
298
 
379
- #if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
380
- static int FindLSBSetNonZero64(uint64 n);
381
- #endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
299
+ static int FindLSBSetNonZero64(uint64_t n);
382
300
 
383
301
  private:
384
302
  // No copying
@@ -386,9 +304,9 @@ class Bits {
386
304
  void operator=(const Bits&);
387
305
  };
388
306
 
389
- #ifdef HAVE_BUILTIN_CTZ
307
+ #if HAVE_BUILTIN_CTZ
390
308
 
391
- inline int Bits::Log2FloorNonZero(uint32 n) {
309
+ inline int Bits::Log2FloorNonZero(uint32_t n) {
392
310
  assert(n != 0);
393
311
  // (31 ^ x) is equivalent to (31 - x) for x in [0, 31]. An easy proof
394
312
  // represents subtraction in base 2 and observes that there's no carry.
@@ -399,66 +317,52 @@ inline int Bits::Log2FloorNonZero(uint32 n) {
399
317
  return 31 ^ __builtin_clz(n);
400
318
  }
401
319
 
402
- inline int Bits::Log2Floor(uint32 n) {
320
+ inline int Bits::Log2Floor(uint32_t n) {
403
321
  return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
404
322
  }
405
323
 
406
- inline int Bits::FindLSBSetNonZero(uint32 n) {
324
+ inline int Bits::FindLSBSetNonZero(uint32_t n) {
407
325
  assert(n != 0);
408
326
  return __builtin_ctz(n);
409
327
  }
410
328
 
411
- #if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
412
- inline int Bits::FindLSBSetNonZero64(uint64 n) {
413
- assert(n != 0);
414
- return __builtin_ctzll(n);
415
- }
416
- #endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
417
-
418
329
  #elif defined(_MSC_VER)
419
330
 
420
- inline int Bits::Log2FloorNonZero(uint32 n) {
331
+ inline int Bits::Log2FloorNonZero(uint32_t n) {
421
332
  assert(n != 0);
333
+ // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
422
334
  unsigned long where;
423
335
  _BitScanReverse(&where, n);
424
336
  return static_cast<int>(where);
425
337
  }
426
338
 
427
- inline int Bits::Log2Floor(uint32 n) {
339
+ inline int Bits::Log2Floor(uint32_t n) {
340
+ // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
428
341
  unsigned long where;
429
342
  if (_BitScanReverse(&where, n))
430
343
  return static_cast<int>(where);
431
344
  return -1;
432
345
  }
433
346
 
434
- inline int Bits::FindLSBSetNonZero(uint32 n) {
347
+ inline int Bits::FindLSBSetNonZero(uint32_t n) {
435
348
  assert(n != 0);
349
+ // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
436
350
  unsigned long where;
437
351
  if (_BitScanForward(&where, n))
438
352
  return static_cast<int>(where);
439
353
  return 32;
440
354
  }
441
355
 
442
- #if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
443
- inline int Bits::FindLSBSetNonZero64(uint64 n) {
444
- assert(n != 0);
445
- unsigned long where;
446
- if (_BitScanForward64(&where, n))
447
- return static_cast<int>(where);
448
- return 64;
449
- }
450
- #endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
451
-
452
356
  #else // Portable versions.
453
357
 
454
- inline int Bits::Log2FloorNonZero(uint32 n) {
358
+ inline int Bits::Log2FloorNonZero(uint32_t n) {
455
359
  assert(n != 0);
456
360
 
457
361
  int log = 0;
458
- uint32 value = n;
362
+ uint32_t value = n;
459
363
  for (int i = 4; i >= 0; --i) {
460
364
  int shift = (1 << i);
461
- uint32 x = value >> shift;
365
+ uint32_t x = value >> shift;
462
366
  if (x != 0) {
463
367
  value = x;
464
368
  log += shift;
@@ -468,16 +372,16 @@ inline int Bits::Log2FloorNonZero(uint32 n) {
468
372
  return log;
469
373
  }
470
374
 
471
- inline int Bits::Log2Floor(uint32 n) {
375
+ inline int Bits::Log2Floor(uint32_t n) {
472
376
  return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
473
377
  }
474
378
 
475
- inline int Bits::FindLSBSetNonZero(uint32 n) {
379
+ inline int Bits::FindLSBSetNonZero(uint32_t n) {
476
380
  assert(n != 0);
477
381
 
478
382
  int rc = 31;
479
383
  for (int i = 4, shift = 1 << 4; i >= 0; --i) {
480
- const uint32 x = n << shift;
384
+ const uint32_t x = n << shift;
481
385
  if (x != 0) {
482
386
  n = x;
483
387
  rc -= shift;
@@ -487,27 +391,48 @@ inline int Bits::FindLSBSetNonZero(uint32 n) {
487
391
  return rc;
488
392
  }
489
393
 
490
- #if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
394
+ #endif // End portable versions.
395
+
396
+ #if HAVE_BUILTIN_CTZ
397
+
398
+ inline int Bits::FindLSBSetNonZero64(uint64_t n) {
399
+ assert(n != 0);
400
+ return __builtin_ctzll(n);
401
+ }
402
+
403
+ #elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64))
404
+ // _BitScanForward64() is only available on x64 and ARM64.
405
+
406
+ inline int Bits::FindLSBSetNonZero64(uint64_t n) {
407
+ assert(n != 0);
408
+ // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
409
+ unsigned long where;
410
+ if (_BitScanForward64(&where, n))
411
+ return static_cast<int>(where);
412
+ return 64;
413
+ }
414
+
415
+ #else // Portable version.
416
+
491
417
  // FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
492
- inline int Bits::FindLSBSetNonZero64(uint64 n) {
418
+ inline int Bits::FindLSBSetNonZero64(uint64_t n) {
493
419
  assert(n != 0);
494
420
 
495
- const uint32 bottombits = static_cast<uint32>(n);
421
+ const uint32_t bottombits = static_cast<uint32_t>(n);
496
422
  if (bottombits == 0) {
497
- // Bottom bits are zero, so scan in top bits
498
- return 32 + FindLSBSetNonZero(static_cast<uint32>(n >> 32));
423
+ // Bottom bits are zero, so scan the top bits.
424
+ return 32 + FindLSBSetNonZero(static_cast<uint32_t>(n >> 32));
499
425
  } else {
500
426
  return FindLSBSetNonZero(bottombits);
501
427
  }
502
428
  }
503
- #endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
504
429
 
505
- #endif // End portable versions.
430
+ #endif // HAVE_BUILTIN_CTZ
506
431
 
507
432
  // Variable-length integer encoding.
508
433
  class Varint {
509
434
  public:
510
- // Maximum lengths of varint encoding of uint32.
435
+ // Maximum lengths of varint encoding of uint32_t.
511
436
  static const int kMax32 = 5;
512
437
 
513
438
  // Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1].
@@ -516,23 +441,23 @@ class Varint {
516
441
  // past the last byte of the varint32. Else returns NULL. On success,
517
442
  // "result <= limit".
518
443
  static const char* Parse32WithLimit(const char* ptr, const char* limit,
519
- uint32* OUTPUT);
444
+ uint32_t* OUTPUT);
520
445
 
521
446
  // REQUIRES "ptr" points to a buffer of length sufficient to hold "v".
522
447
  // EFFECTS Encodes "v" into "ptr" and returns a pointer to the
523
448
  // byte just past the last encoded byte.
524
- static char* Encode32(char* ptr, uint32 v);
449
+ static char* Encode32(char* ptr, uint32_t v);
525
450
 
526
451
  // EFFECTS Appends the varint representation of "value" to "*s".
527
- static void Append32(std::string* s, uint32 value);
452
+ static void Append32(std::string* s, uint32_t value);
528
453
  };
529
454
 
530
455
  inline const char* Varint::Parse32WithLimit(const char* p,
531
456
  const char* l,
532
- uint32* OUTPUT) {
457
+ uint32_t* OUTPUT) {
533
458
  const unsigned char* ptr = reinterpret_cast<const unsigned char*>(p);
534
459
  const unsigned char* limit = reinterpret_cast<const unsigned char*>(l);
535
- uint32 b, result;
460
+ uint32_t b, result;
536
461
  if (ptr >= limit) return NULL;
537
462
  b = *(ptr++); result = b & 127; if (b < 128) goto done;
538
463
  if (ptr >= limit) return NULL;
@@ -549,30 +474,30 @@ inline const char* Varint::Parse32WithLimit(const char* p,
549
474
  return reinterpret_cast<const char*>(ptr);
550
475
  }
551
476
 
552
- inline char* Varint::Encode32(char* sptr, uint32 v) {
477
+ inline char* Varint::Encode32(char* sptr, uint32_t v) {
553
478
  // Operate on characters as unsigneds
554
- unsigned char* ptr = reinterpret_cast<unsigned char*>(sptr);
555
- static const int B = 128;
556
- if (v < (1<<7)) {
557
- *(ptr++) = v;
558
- } else if (v < (1<<14)) {
559
- *(ptr++) = v | B;
560
- *(ptr++) = v>>7;
561
- } else if (v < (1<<21)) {
562
- *(ptr++) = v | B;
563
- *(ptr++) = (v>>7) | B;
564
- *(ptr++) = v>>14;
565
- } else if (v < (1<<28)) {
566
- *(ptr++) = v | B;
567
- *(ptr++) = (v>>7) | B;
568
- *(ptr++) = (v>>14) | B;
569
- *(ptr++) = v>>21;
479
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(sptr);
480
+ static const uint8_t B = 128;
481
+ if (v < (1 << 7)) {
482
+ *(ptr++) = static_cast<uint8_t>(v);
483
+ } else if (v < (1 << 14)) {
484
+ *(ptr++) = static_cast<uint8_t>(v | B);
485
+ *(ptr++) = static_cast<uint8_t>(v >> 7);
486
+ } else if (v < (1 << 21)) {
487
+ *(ptr++) = static_cast<uint8_t>(v | B);
488
+ *(ptr++) = static_cast<uint8_t>((v >> 7) | B);
489
+ *(ptr++) = static_cast<uint8_t>(v >> 14);
490
+ } else if (v < (1 << 28)) {
491
+ *(ptr++) = static_cast<uint8_t>(v | B);
492
+ *(ptr++) = static_cast<uint8_t>((v >> 7) | B);
493
+ *(ptr++) = static_cast<uint8_t>((v >> 14) | B);
494
+ *(ptr++) = static_cast<uint8_t>(v >> 21);
570
495
  } else {
571
- *(ptr++) = v | B;
572
- *(ptr++) = (v>>7) | B;
573
- *(ptr++) = (v>>14) | B;
574
- *(ptr++) = (v>>21) | B;
575
- *(ptr++) = v>>28;
496
+ *(ptr++) = static_cast<uint8_t>(v | B);
497
+ *(ptr++) = static_cast<uint8_t>((v>>7) | B);
498
+ *(ptr++) = static_cast<uint8_t>((v>>14) | B);
499
+ *(ptr++) = static_cast<uint8_t>((v>>21) | B);
500
+ *(ptr++) = static_cast<uint8_t>(v >> 28);
576
501
  }
577
502
  return reinterpret_cast<char*>(ptr);
578
503
  }