snappy 0.0.17 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. checksums.yaml +5 -5
  2. data/.dockerignore +2 -0
  3. data/.github/workflows/main.yml +34 -0
  4. data/.github/workflows/publish.yml +34 -0
  5. data/.gitignore +2 -1
  6. data/.gitmodules +1 -1
  7. data/Dockerfile +13 -0
  8. data/Gemfile +4 -0
  9. data/README.md +45 -5
  10. data/Rakefile +32 -29
  11. data/ext/api.c +6 -1
  12. data/ext/extconf.rb +31 -22
  13. data/lib/snappy/hadoop/reader.rb +62 -0
  14. data/lib/snappy/hadoop/writer.rb +51 -0
  15. data/lib/snappy/hadoop.rb +22 -0
  16. data/lib/snappy/reader.rb +14 -10
  17. data/lib/snappy/shim.rb +1 -1
  18. data/lib/snappy/version.rb +1 -1
  19. data/lib/snappy.rb +5 -4
  20. data/snappy.gemspec +14 -13
  21. data/test/hadoop/snappy_hadoop_reader_test.rb +115 -0
  22. data/test/hadoop/snappy_hadoop_writer_test.rb +48 -0
  23. data/test/snappy_hadoop_test.rb +26 -0
  24. data/test/snappy_reader_test.rb +148 -0
  25. data/test/snappy_test.rb +95 -0
  26. data/test/snappy_writer_test.rb +55 -0
  27. data/test/test_helper.rb +7 -0
  28. data/test.sh +3 -0
  29. data/vendor/snappy/CMakeLists.txt +420 -0
  30. data/vendor/snappy/CONTRIBUTING.md +31 -0
  31. data/vendor/snappy/NEWS +52 -0
  32. data/vendor/snappy/{README → README.md} +75 -49
  33. data/vendor/snappy/cmake/SnappyConfig.cmake.in +33 -0
  34. data/vendor/snappy/cmake/config.h.in +66 -0
  35. data/vendor/snappy/docs/README.md +72 -0
  36. data/vendor/snappy/snappy-internal.h +200 -32
  37. data/vendor/snappy/snappy-sinksource.cc +26 -9
  38. data/vendor/snappy/snappy-sinksource.h +11 -11
  39. data/vendor/snappy/snappy-stubs-internal.cc +1 -1
  40. data/vendor/snappy/snappy-stubs-internal.h +299 -302
  41. data/vendor/snappy/snappy-stubs-public.h.in +10 -47
  42. data/vendor/snappy/snappy-test.cc +94 -200
  43. data/vendor/snappy/snappy-test.h +101 -358
  44. data/vendor/snappy/snappy.cc +1437 -474
  45. data/vendor/snappy/snappy.h +31 -12
  46. data/vendor/snappy/snappy_benchmark.cc +378 -0
  47. data/vendor/snappy/snappy_compress_fuzzer.cc +60 -0
  48. data/vendor/snappy/snappy_test_data.cc +57 -0
  49. data/vendor/snappy/snappy_test_data.h +68 -0
  50. data/vendor/snappy/snappy_test_tool.cc +471 -0
  51. data/vendor/snappy/snappy_uncompress_fuzzer.cc +58 -0
  52. data/vendor/snappy/snappy_unittest.cc +271 -792
  53. metadata +42 -92
  54. data/.travis.yml +0 -26
  55. data/smoke.sh +0 -8
  56. data/test/test-snappy-reader.rb +0 -129
  57. data/test/test-snappy-writer.rb +0 -55
  58. data/test/test-snappy.rb +0 -58
  59. data/vendor/snappy/ChangeLog +0 -2468
  60. data/vendor/snappy/INSTALL +0 -370
  61. data/vendor/snappy/Makefile +0 -982
  62. data/vendor/snappy/Makefile.am +0 -26
  63. data/vendor/snappy/Makefile.in +0 -982
  64. data/vendor/snappy/aclocal.m4 +0 -9738
  65. data/vendor/snappy/autogen.sh +0 -12
  66. data/vendor/snappy/autom4te.cache/output.0 +0 -18856
  67. data/vendor/snappy/autom4te.cache/output.1 +0 -18852
  68. data/vendor/snappy/autom4te.cache/requests +0 -297
  69. data/vendor/snappy/autom4te.cache/traces.0 +0 -2689
  70. data/vendor/snappy/autom4te.cache/traces.1 +0 -714
  71. data/vendor/snappy/config.guess +0 -1530
  72. data/vendor/snappy/config.h +0 -135
  73. data/vendor/snappy/config.h.in +0 -134
  74. data/vendor/snappy/config.log +0 -1640
  75. data/vendor/snappy/config.status +0 -2318
  76. data/vendor/snappy/config.sub +0 -1773
  77. data/vendor/snappy/configure +0 -18852
  78. data/vendor/snappy/configure.ac +0 -134
  79. data/vendor/snappy/depcomp +0 -688
  80. data/vendor/snappy/install-sh +0 -527
  81. data/vendor/snappy/libtool +0 -10246
  82. data/vendor/snappy/ltmain.sh +0 -9661
  83. data/vendor/snappy/m4/gtest.m4 +0 -74
  84. data/vendor/snappy/m4/libtool.m4 +0 -8001
  85. data/vendor/snappy/m4/ltoptions.m4 +0 -384
  86. data/vendor/snappy/m4/ltsugar.m4 +0 -123
  87. data/vendor/snappy/m4/ltversion.m4 +0 -23
  88. data/vendor/snappy/m4/lt~obsolete.m4 +0 -98
  89. data/vendor/snappy/missing +0 -331
  90. data/vendor/snappy/snappy-stubs-public.h +0 -100
  91. data/vendor/snappy/snappy.pc +0 -10
  92. data/vendor/snappy/snappy.pc.in +0 -10
  93. data/vendor/snappy/stamp-h1 +0 -1
@@ -31,27 +31,49 @@
31
31
  #ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
32
32
  #define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
33
33
 
34
- #ifdef HAVE_CONFIG_H
34
+ #if HAVE_CONFIG_H
35
35
  #include "config.h"
36
36
  #endif
37
37
 
38
- #include <string>
38
+ #include <stdint.h>
39
39
 
40
- #include <assert.h>
41
- #include <stdlib.h>
42
- #include <string.h>
40
+ #include <cassert>
41
+ #include <cstdlib>
42
+ #include <cstring>
43
+ #include <limits>
44
+ #include <string>
43
45
 
44
- #ifdef HAVE_SYS_MMAN_H
46
+ #if HAVE_SYS_MMAN_H
45
47
  #include <sys/mman.h>
46
48
  #endif
47
49
 
48
- #include "snappy-stubs-public.h"
50
+ #if HAVE_UNISTD_H
51
+ #include <unistd.h>
52
+ #endif
49
53
 
50
- #if defined(__x86_64__)
54
+ #if defined(_MSC_VER)
55
+ #include <intrin.h>
56
+ #endif // defined(_MSC_VER)
51
57
 
52
- // Enable 64-bit optimized versions of some routines.
53
- #define ARCH_K8 1
58
+ #ifndef __has_feature
59
+ #define __has_feature(x) 0
60
+ #endif
61
+
62
+ #if __has_feature(memory_sanitizer)
63
+ #include <sanitizer/msan_interface.h>
64
+ #define SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
65
+ __msan_unpoison((address), (size))
66
+ #else
67
+ #define SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) /* empty */
68
+ #endif // __has_feature(memory_sanitizer)
54
69
 
70
+ #include "snappy-stubs-public.h"
71
+
72
+ // Used to enable 64-bit optimized versions of some routines.
73
+ #if defined(__PPC64__) || defined(__powerpc64__)
74
+ #define ARCH_PPC 1
75
+ #elif defined(__aarch64__) || defined(_M_ARM64)
76
+ #define ARCH_ARM 1
55
77
  #endif
56
78
 
57
79
  // Needed by OS X, among others.
@@ -59,246 +81,83 @@
59
81
  #define MAP_ANONYMOUS MAP_ANON
60
82
  #endif
61
83
 
62
- // Pull in std::min, std::ostream, and the likes. This is safe because this
63
- // header file is never used from any public header files.
64
- using namespace std;
65
-
66
84
  // The size of an array, if known at compile-time.
67
85
  // Will give unexpected results if used on a pointer.
68
86
  // We undefine it first, since some compilers already have a definition.
69
87
  #ifdef ARRAYSIZE
70
88
  #undef ARRAYSIZE
71
89
  #endif
72
- #define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a)))
90
+ #define ARRAYSIZE(a) int{sizeof(a) / sizeof(*(a))}
73
91
 
74
92
  // Static prediction hints.
75
- #ifdef HAVE_BUILTIN_EXPECT
76
- #define PREDICT_FALSE(x) (__builtin_expect(x, 0))
77
- #define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
93
+ #if HAVE_BUILTIN_EXPECT
94
+ #define SNAPPY_PREDICT_FALSE(x) (__builtin_expect(x, 0))
95
+ #define SNAPPY_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
78
96
  #else
79
- #define PREDICT_FALSE(x) x
80
- #define PREDICT_TRUE(x) x
81
- #endif
82
-
83
- // This is only used for recomputing the tag byte table used during
84
- // decompression; for simplicity we just remove it from the open-source
85
- // version (anyone who wants to regenerate it can just do the call
86
- // themselves within main()).
87
- #define DEFINE_bool(flag_name, default_value, description) \
88
- bool FLAGS_ ## flag_name = default_value
89
- #define DECLARE_bool(flag_name) \
90
- extern bool FLAGS_ ## flag_name
91
-
92
- namespace snappy {
93
-
94
- static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF);
95
- static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
96
-
97
- // Potentially unaligned loads and stores.
98
-
99
- // x86 and PowerPC can simply do these loads and stores native.
100
-
101
- #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
102
-
103
- #define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
104
- #define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
105
- #define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
97
+ #define SNAPPY_PREDICT_FALSE(x) x
98
+ #define SNAPPY_PREDICT_TRUE(x) x
99
+ #endif // HAVE_BUILTIN_EXPECT
106
100
 
107
- #define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
108
- #define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
109
- #define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
110
-
111
- // ARMv7 and newer support native unaligned accesses, but only of 16-bit
112
- // and 32-bit values (not 64-bit); older versions either raise a fatal signal,
113
- // do an unaligned read and rotate the words around a bit, or do the reads very
114
- // slowly (trip through kernel mode). There's no simple #define that says just
115
- // “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6
116
- // sub-architectures.
117
- //
118
- // This is a mess, but there's not much we can do about it.
119
- //
120
- // To further complicate matters, only LDR instructions (single reads) are
121
- // allowed to be unaligned, not LDRD (two reads) or LDM (many reads). Unless we
122
- // explicitly tell the compiler that these accesses can be unaligned, it can and
123
- // will combine accesses. On armcc, the way to signal this is done by accessing
124
- // through the type (uint32 __packed *), but GCC has no such attribute
125
- // (it ignores __attribute__((packed)) on individual variables). However,
126
- // we can tell it that a _struct_ is unaligned, which has the same effect,
127
- // so we do that.
128
-
129
- #elif defined(__arm__) && \
130
- !defined(__ARM_ARCH_4__) && \
131
- !defined(__ARM_ARCH_4T__) && \
132
- !defined(__ARM_ARCH_5__) && \
133
- !defined(__ARM_ARCH_5T__) && \
134
- !defined(__ARM_ARCH_5TE__) && \
135
- !defined(__ARM_ARCH_5TEJ__) && \
136
- !defined(__ARM_ARCH_6__) && \
137
- !defined(__ARM_ARCH_6J__) && \
138
- !defined(__ARM_ARCH_6K__) && \
139
- !defined(__ARM_ARCH_6Z__) && \
140
- !defined(__ARM_ARCH_6ZK__) && \
141
- !defined(__ARM_ARCH_6T2__)
142
-
143
- #if __GNUC__
144
- #define ATTRIBUTE_PACKED __attribute__((__packed__))
101
+ // Inlining hints.
102
+ #if HAVE_ATTRIBUTE_ALWAYS_INLINE
103
+ #define SNAPPY_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
145
104
  #else
146
- #define ATTRIBUTE_PACKED
147
- #endif
148
-
149
- namespace base {
150
- namespace internal {
151
-
152
- struct Unaligned16Struct {
153
- uint16 value;
154
- uint8 dummy; // To make the size non-power-of-two.
155
- } ATTRIBUTE_PACKED;
156
-
157
- struct Unaligned32Struct {
158
- uint32 value;
159
- uint8 dummy; // To make the size non-power-of-two.
160
- } ATTRIBUTE_PACKED;
161
-
162
- } // namespace internal
163
- } // namespace base
164
-
165
- #define UNALIGNED_LOAD16(_p) \
166
- ((reinterpret_cast<const ::snappy::base::internal::Unaligned16Struct *>(_p))->value)
167
- #define UNALIGNED_LOAD32(_p) \
168
- ((reinterpret_cast<const ::snappy::base::internal::Unaligned32Struct *>(_p))->value)
169
-
170
- #define UNALIGNED_STORE16(_p, _val) \
171
- ((reinterpret_cast< ::snappy::base::internal::Unaligned16Struct *>(_p))->value = \
172
- (_val))
173
- #define UNALIGNED_STORE32(_p, _val) \
174
- ((reinterpret_cast< ::snappy::base::internal::Unaligned32Struct *>(_p))->value = \
175
- (_val))
176
-
177
- // TODO(user): NEON supports unaligned 64-bit loads and stores.
178
- // See if that would be more efficient on platforms supporting it,
179
- // at least for copies.
180
-
181
- inline uint64 UNALIGNED_LOAD64(const void *p) {
182
- uint64 t;
183
- memcpy(&t, p, sizeof t);
184
- return t;
185
- }
105
+ #define SNAPPY_ATTRIBUTE_ALWAYS_INLINE
106
+ #endif // HAVE_ATTRIBUTE_ALWAYS_INLINE
186
107
 
187
- inline void UNALIGNED_STORE64(void *p, uint64 v) {
188
- memcpy(p, &v, sizeof v);
189
- }
108
+ // Stubbed version of ABSL_FLAG.
109
+ //
110
+ // In the open source version, flags can only be changed at compile time.
111
+ #define SNAPPY_FLAG(flag_type, flag_name, default_value, help) \
112
+ flag_type FLAGS_ ## flag_name = default_value
190
113
 
191
- #else
114
+ namespace snappy {
192
115
 
193
- // These functions are provided for architectures that don't support
194
- // unaligned loads and stores.
116
+ // Stubbed version of absl::GetFlag().
117
+ template <typename T>
118
+ inline T GetFlag(T flag) { return flag; }
195
119
 
196
- inline uint16 UNALIGNED_LOAD16(const void *p) {
197
- uint16 t;
198
- memcpy(&t, p, sizeof t);
199
- return t;
200
- }
120
+ static const uint32_t kuint32max = std::numeric_limits<uint32_t>::max();
121
+ static const int64_t kint64max = std::numeric_limits<int64_t>::max();
201
122
 
202
- inline uint32 UNALIGNED_LOAD32(const void *p) {
203
- uint32 t;
204
- memcpy(&t, p, sizeof t);
205
- return t;
206
- }
123
+ // Potentially unaligned loads and stores.
207
124
 
208
- inline uint64 UNALIGNED_LOAD64(const void *p) {
209
- uint64 t;
210
- memcpy(&t, p, sizeof t);
211
- return t;
125
+ inline uint16_t UNALIGNED_LOAD16(const void *p) {
126
+ // Compiles to a single movzx/ldrh on clang/gcc/msvc.
127
+ uint16_t v;
128
+ std::memcpy(&v, p, sizeof(v));
129
+ return v;
212
130
  }
213
131
 
214
- inline void UNALIGNED_STORE16(void *p, uint16 v) {
215
- memcpy(p, &v, sizeof v);
132
+ inline uint32_t UNALIGNED_LOAD32(const void *p) {
133
+ // Compiles to a single mov/ldr on clang/gcc/msvc.
134
+ uint32_t v;
135
+ std::memcpy(&v, p, sizeof(v));
136
+ return v;
216
137
  }
217
138
 
218
- inline void UNALIGNED_STORE32(void *p, uint32 v) {
219
- memcpy(p, &v, sizeof v);
139
+ inline uint64_t UNALIGNED_LOAD64(const void *p) {
140
+ // Compiles to a single mov/ldr on clang/gcc/msvc.
141
+ uint64_t v;
142
+ std::memcpy(&v, p, sizeof(v));
143
+ return v;
220
144
  }
221
145
 
222
- inline void UNALIGNED_STORE64(void *p, uint64 v) {
223
- memcpy(p, &v, sizeof v);
146
+ inline void UNALIGNED_STORE16(void *p, uint16_t v) {
147
+ // Compiles to a single mov/strh on clang/gcc/msvc.
148
+ std::memcpy(p, &v, sizeof(v));
224
149
  }
225
150
 
226
- #endif
227
-
228
- // This can be more efficient than UNALIGNED_LOAD64 + UNALIGNED_STORE64
229
- // on some platforms, in particular ARM.
230
- inline void UnalignedCopy64(const void *src, void *dst) {
231
- if (sizeof(void *) == 8) {
232
- UNALIGNED_STORE64(dst, UNALIGNED_LOAD64(src));
233
- } else {
234
- const char *src_char = reinterpret_cast<const char *>(src);
235
- char *dst_char = reinterpret_cast<char *>(dst);
236
-
237
- UNALIGNED_STORE32(dst_char, UNALIGNED_LOAD32(src_char));
238
- UNALIGNED_STORE32(dst_char + 4, UNALIGNED_LOAD32(src_char + 4));
239
- }
151
+ inline void UNALIGNED_STORE32(void *p, uint32_t v) {
152
+ // Compiles to a single mov/str on clang/gcc/msvc.
153
+ std::memcpy(p, &v, sizeof(v));
240
154
  }
241
155
 
242
- // The following guarantees declaration of the byte swap functions.
243
- #ifdef WORDS_BIGENDIAN
244
-
245
- #ifdef HAVE_SYS_BYTEORDER_H
246
- #include <sys/byteorder.h>
247
- #endif
248
-
249
- #ifdef HAVE_SYS_ENDIAN_H
250
- #include <sys/endian.h>
251
- #endif
252
-
253
- #ifdef _MSC_VER
254
- #include <stdlib.h>
255
- #define bswap_16(x) _byteswap_ushort(x)
256
- #define bswap_32(x) _byteswap_ulong(x)
257
- #define bswap_64(x) _byteswap_uint64(x)
258
-
259
- #elif defined(__APPLE__)
260
- // Mac OS X / Darwin features
261
- #include <libkern/OSByteOrder.h>
262
- #define bswap_16(x) OSSwapInt16(x)
263
- #define bswap_32(x) OSSwapInt32(x)
264
- #define bswap_64(x) OSSwapInt64(x)
265
-
266
- #elif defined(HAVE_BYTESWAP_H)
267
- #include <byteswap.h>
268
-
269
- #elif defined(bswap32)
270
- // FreeBSD defines bswap{16,32,64} in <sys/endian.h> (already #included).
271
- #define bswap_16(x) bswap16(x)
272
- #define bswap_32(x) bswap32(x)
273
- #define bswap_64(x) bswap64(x)
274
-
275
- #elif defined(BSWAP_64)
276
- // Solaris 10 defines BSWAP_{16,32,64} in <sys/byteorder.h> (already #included).
277
- #define bswap_16(x) BSWAP_16(x)
278
- #define bswap_32(x) BSWAP_32(x)
279
- #define bswap_64(x) BSWAP_64(x)
280
-
281
- #else
282
-
283
- inline uint16 bswap_16(uint16 x) {
284
- return (x << 8) | (x >> 8);
285
- }
286
-
287
- inline uint32 bswap_32(uint32 x) {
288
- x = ((x & 0xff00ff00UL) >> 8) | ((x & 0x00ff00ffUL) << 8);
289
- return (x >> 16) | (x << 16);
156
+ inline void UNALIGNED_STORE64(void *p, uint64_t v) {
157
+ // Compiles to a single mov/str on clang/gcc/msvc.
158
+ std::memcpy(p, &v, sizeof(v));
290
159
  }
291
160
 
292
- inline uint64 bswap_64(uint64 x) {
293
- x = ((x & 0xff00ff00ff00ff00ULL) >> 8) | ((x & 0x00ff00ff00ff00ffULL) << 8);
294
- x = ((x & 0xffff0000ffff0000ULL) >> 16) | ((x & 0x0000ffff0000ffffULL) << 16);
295
- return (x >> 32) | (x << 32);
296
- }
297
-
298
- #endif
299
-
300
- #endif // WORDS_BIGENDIAN
301
-
302
161
  // Convert to little-endian storage, opposite of network format.
303
162
  // Convert x from host to little endian: x = LittleEndian.FromHost(x);
304
163
  // convert x from little endian to host: x = LittleEndian.ToHost(x);
@@ -310,87 +169,194 @@ inline uint64 bswap_64(uint64 x) {
310
169
  // x = LittleEndian.Load16(p);
311
170
  class LittleEndian {
312
171
  public:
313
- // Conversion functions.
314
- #ifdef WORDS_BIGENDIAN
315
-
316
- static uint16 FromHost16(uint16 x) { return bswap_16(x); }
317
- static uint16 ToHost16(uint16 x) { return bswap_16(x); }
318
-
319
- static uint32 FromHost32(uint32 x) { return bswap_32(x); }
320
- static uint32 ToHost32(uint32 x) { return bswap_32(x); }
321
-
322
- static bool IsLittleEndian() { return false; }
323
-
324
- #else // !defined(WORDS_BIGENDIAN)
325
-
326
- static uint16 FromHost16(uint16 x) { return x; }
327
- static uint16 ToHost16(uint16 x) { return x; }
328
-
329
- static uint32 FromHost32(uint32 x) { return x; }
330
- static uint32 ToHost32(uint32 x) { return x; }
172
+ // Functions to do unaligned loads and stores in little-endian order.
173
+ static inline uint16_t Load16(const void *ptr) {
174
+ // Compiles to a single mov/str on recent clang and gcc.
175
+ #if SNAPPY_IS_BIG_ENDIAN
176
+ const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
177
+ return (static_cast<uint16_t>(buffer[0])) |
178
+ (static_cast<uint16_t>(buffer[1]) << 8);
179
+ #else
180
+ // memcpy() turns into a single instruction early in the optimization
181
+ // pipeline (relatively to a series of byte accesses). So, using memcpy
182
+ // instead of byte accesses may lead to better decisions in more stages of
183
+ // the optimization pipeline.
184
+ uint16_t value;
185
+ std::memcpy(&value, ptr, 2);
186
+ return value;
187
+ #endif
188
+ }
331
189
 
332
- static bool IsLittleEndian() { return true; }
190
+ static inline uint32_t Load32(const void *ptr) {
191
+ // Compiles to a single mov/str on recent clang and gcc.
192
+ #if SNAPPY_IS_BIG_ENDIAN
193
+ const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
194
+ return (static_cast<uint32_t>(buffer[0])) |
195
+ (static_cast<uint32_t>(buffer[1]) << 8) |
196
+ (static_cast<uint32_t>(buffer[2]) << 16) |
197
+ (static_cast<uint32_t>(buffer[3]) << 24);
198
+ #else
199
+ // See Load16() for the rationale of using memcpy().
200
+ uint32_t value;
201
+ std::memcpy(&value, ptr, 4);
202
+ return value;
203
+ #endif
204
+ }
333
205
 
334
- #endif // !defined(WORDS_BIGENDIAN)
206
+ static inline uint64_t Load64(const void *ptr) {
207
+ // Compiles to a single mov/str on recent clang and gcc.
208
+ #if SNAPPY_IS_BIG_ENDIAN
209
+ const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
210
+ return (static_cast<uint64_t>(buffer[0])) |
211
+ (static_cast<uint64_t>(buffer[1]) << 8) |
212
+ (static_cast<uint64_t>(buffer[2]) << 16) |
213
+ (static_cast<uint64_t>(buffer[3]) << 24) |
214
+ (static_cast<uint64_t>(buffer[4]) << 32) |
215
+ (static_cast<uint64_t>(buffer[5]) << 40) |
216
+ (static_cast<uint64_t>(buffer[6]) << 48) |
217
+ (static_cast<uint64_t>(buffer[7]) << 56);
218
+ #else
219
+ // See Load16() for the rationale of using memcpy().
220
+ uint64_t value;
221
+ std::memcpy(&value, ptr, 8);
222
+ return value;
223
+ #endif
224
+ }
335
225
 
336
- // Functions to do unaligned loads and stores in little-endian order.
337
- static uint16 Load16(const void *p) {
338
- return ToHost16(UNALIGNED_LOAD16(p));
226
+ static inline void Store16(void *dst, uint16_t value) {
227
+ // Compiles to a single mov/str on recent clang and gcc.
228
+ #if SNAPPY_IS_BIG_ENDIAN
229
+ uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
230
+ buffer[0] = static_cast<uint8_t>(value);
231
+ buffer[1] = static_cast<uint8_t>(value >> 8);
232
+ #else
233
+ // See Load16() for the rationale of using memcpy().
234
+ std::memcpy(dst, &value, 2);
235
+ #endif
339
236
  }
340
237
 
341
- static void Store16(void *p, uint16 v) {
342
- UNALIGNED_STORE16(p, FromHost16(v));
238
+ static void Store32(void *dst, uint32_t value) {
239
+ // Compiles to a single mov/str on recent clang and gcc.
240
+ #if SNAPPY_IS_BIG_ENDIAN
241
+ uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
242
+ buffer[0] = static_cast<uint8_t>(value);
243
+ buffer[1] = static_cast<uint8_t>(value >> 8);
244
+ buffer[2] = static_cast<uint8_t>(value >> 16);
245
+ buffer[3] = static_cast<uint8_t>(value >> 24);
246
+ #else
247
+ // See Load16() for the rationale of using memcpy().
248
+ std::memcpy(dst, &value, 4);
249
+ #endif
343
250
  }
344
251
 
345
- static uint32 Load32(const void *p) {
346
- return ToHost32(UNALIGNED_LOAD32(p));
252
+ static void Store64(void* dst, uint64_t value) {
253
+ // Compiles to a single mov/str on recent clang and gcc.
254
+ #if SNAPPY_IS_BIG_ENDIAN
255
+ uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
256
+ buffer[0] = static_cast<uint8_t>(value);
257
+ buffer[1] = static_cast<uint8_t>(value >> 8);
258
+ buffer[2] = static_cast<uint8_t>(value >> 16);
259
+ buffer[3] = static_cast<uint8_t>(value >> 24);
260
+ buffer[4] = static_cast<uint8_t>(value >> 32);
261
+ buffer[5] = static_cast<uint8_t>(value >> 40);
262
+ buffer[6] = static_cast<uint8_t>(value >> 48);
263
+ buffer[7] = static_cast<uint8_t>(value >> 56);
264
+ #else
265
+ // See Load16() for the rationale of using memcpy().
266
+ std::memcpy(dst, &value, 8);
267
+ #endif
347
268
  }
348
269
 
349
- static void Store32(void *p, uint32 v) {
350
- UNALIGNED_STORE32(p, FromHost32(v));
270
+ static inline constexpr bool IsLittleEndian() {
271
+ #if SNAPPY_IS_BIG_ENDIAN
272
+ return false;
273
+ #else
274
+ return true;
275
+ #endif // SNAPPY_IS_BIG_ENDIAN
351
276
  }
352
277
  };
353
278
 
354
279
  // Some bit-manipulation functions.
355
280
  class Bits {
356
281
  public:
282
+ // Return floor(log2(n)) for positive integer n.
283
+ static int Log2FloorNonZero(uint32_t n);
284
+
357
285
  // Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0.
358
- static int Log2Floor(uint32 n);
286
+ static int Log2Floor(uint32_t n);
359
287
 
360
288
  // Return the first set least / most significant bit, 0-indexed. Returns an
361
289
  // undefined value if n == 0. FindLSBSetNonZero() is similar to ffs() except
362
290
  // that it's 0-indexed.
363
- static int FindLSBSetNonZero(uint32 n);
364
- static int FindLSBSetNonZero64(uint64 n);
291
+ static int FindLSBSetNonZero(uint32_t n);
292
+
293
+ static int FindLSBSetNonZero64(uint64_t n);
365
294
 
366
295
  private:
367
- DISALLOW_COPY_AND_ASSIGN(Bits);
296
+ // No copying
297
+ Bits(const Bits&);
298
+ void operator=(const Bits&);
368
299
  };
369
300
 
370
- #ifdef HAVE_BUILTIN_CTZ
301
+ #if HAVE_BUILTIN_CTZ
302
+
303
+ inline int Bits::Log2FloorNonZero(uint32_t n) {
304
+ assert(n != 0);
305
+ // (31 ^ x) is equivalent to (31 - x) for x in [0, 31]. An easy proof
306
+ // represents subtraction in base 2 and observes that there's no carry.
307
+ //
308
+ // GCC and Clang represent __builtin_clz on x86 as 31 ^ _bit_scan_reverse(x).
309
+ // Using "31 ^" here instead of "31 -" allows the optimizer to strip the
310
+ // function body down to _bit_scan_reverse(x).
311
+ return 31 ^ __builtin_clz(n);
312
+ }
371
313
 
372
- inline int Bits::Log2Floor(uint32 n) {
373
- return n == 0 ? -1 : 31 ^ __builtin_clz(n);
314
+ inline int Bits::Log2Floor(uint32_t n) {
315
+ return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
374
316
  }
375
317
 
376
- inline int Bits::FindLSBSetNonZero(uint32 n) {
318
+ inline int Bits::FindLSBSetNonZero(uint32_t n) {
319
+ assert(n != 0);
377
320
  return __builtin_ctz(n);
378
321
  }
379
322
 
380
- inline int Bits::FindLSBSetNonZero64(uint64 n) {
381
- return __builtin_ctzll(n);
323
+ #elif defined(_MSC_VER)
324
+
325
+ inline int Bits::Log2FloorNonZero(uint32_t n) {
326
+ assert(n != 0);
327
+ // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
328
+ unsigned long where;
329
+ _BitScanReverse(&where, n);
330
+ return static_cast<int>(where);
331
+ }
332
+
333
+ inline int Bits::Log2Floor(uint32_t n) {
334
+ // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
335
+ unsigned long where;
336
+ if (_BitScanReverse(&where, n))
337
+ return static_cast<int>(where);
338
+ return -1;
339
+ }
340
+
341
+ inline int Bits::FindLSBSetNonZero(uint32_t n) {
342
+ assert(n != 0);
343
+ // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
344
+ unsigned long where;
345
+ if (_BitScanForward(&where, n))
346
+ return static_cast<int>(where);
347
+ return 32;
382
348
  }
383
349
 
384
350
  #else // Portable versions.
385
351
 
386
- inline int Bits::Log2Floor(uint32 n) {
387
- if (n == 0)
388
- return -1;
352
+ inline int Bits::Log2FloorNonZero(uint32_t n) {
353
+ assert(n != 0);
354
+
389
355
  int log = 0;
390
- uint32 value = n;
356
+ uint32_t value = n;
391
357
  for (int i = 4; i >= 0; --i) {
392
358
  int shift = (1 << i);
393
- uint32 x = value >> shift;
359
+ uint32_t x = value >> shift;
394
360
  if (x != 0) {
395
361
  value = x;
396
362
  log += shift;
@@ -400,10 +366,16 @@ inline int Bits::Log2Floor(uint32 n) {
400
366
  return log;
401
367
  }
402
368
 
403
- inline int Bits::FindLSBSetNonZero(uint32 n) {
369
+ inline int Bits::Log2Floor(uint32_t n) {
370
+ return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
371
+ }
372
+
373
+ inline int Bits::FindLSBSetNonZero(uint32_t n) {
374
+ assert(n != 0);
375
+
404
376
  int rc = 31;
405
377
  for (int i = 4, shift = 1 << 4; i >= 0; --i) {
406
- const uint32 x = n << shift;
378
+ const uint32_t x = n << shift;
407
379
  if (x != 0) {
408
380
  n = x;
409
381
  rc -= shift;
@@ -413,23 +385,48 @@ inline int Bits::FindLSBSetNonZero(uint32 n) {
413
385
  return rc;
414
386
  }
415
387
 
388
+ #endif // End portable versions.
389
+
390
+ #if HAVE_BUILTIN_CTZ
391
+
392
+ inline int Bits::FindLSBSetNonZero64(uint64_t n) {
393
+ assert(n != 0);
394
+ return __builtin_ctzll(n);
395
+ }
396
+
397
+ #elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64))
398
+ // _BitScanForward64() is only available on x64 and ARM64.
399
+
400
+ inline int Bits::FindLSBSetNonZero64(uint64_t n) {
401
+ assert(n != 0);
402
+ // NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
403
+ unsigned long where;
404
+ if (_BitScanForward64(&where, n))
405
+ return static_cast<int>(where);
406
+ return 64;
407
+ }
408
+
409
+ #else // Portable version.
410
+
416
411
  // FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
417
- inline int Bits::FindLSBSetNonZero64(uint64 n) {
418
- const uint32 bottombits = static_cast<uint32>(n);
412
+ inline int Bits::FindLSBSetNonZero64(uint64_t n) {
413
+ assert(n != 0);
414
+
415
+ const uint32_t bottombits = static_cast<uint32_t>(n);
419
416
  if (bottombits == 0) {
420
- // Bottom bits are zero, so scan in top bits
421
- return 32 + FindLSBSetNonZero(static_cast<uint32>(n >> 32));
417
+ // Bottom bits are zero, so scan the top bits.
418
+ return 32 + FindLSBSetNonZero(static_cast<uint32_t>(n >> 32));
422
419
  } else {
423
420
  return FindLSBSetNonZero(bottombits);
424
421
  }
425
422
  }
426
423
 
427
- #endif // End portable versions.
424
+ #endif // HAVE_BUILTIN_CTZ
428
425
 
429
426
  // Variable-length integer encoding.
430
427
  class Varint {
431
428
  public:
432
- // Maximum lengths of varint encoding of uint32.
429
+ // Maximum lengths of varint encoding of uint32_t.
433
430
  static const int kMax32 = 5;
434
431
 
435
432
  // Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1].
@@ -438,23 +435,23 @@ class Varint {
438
435
  // past the last byte of the varint32. Else returns NULL. On success,
439
436
  // "result <= limit".
440
437
  static const char* Parse32WithLimit(const char* ptr, const char* limit,
441
- uint32* OUTPUT);
438
+ uint32_t* OUTPUT);
442
439
 
443
440
  // REQUIRES "ptr" points to a buffer of length sufficient to hold "v".
444
441
  // EFFECTS Encodes "v" into "ptr" and returns a pointer to the
445
442
  // byte just past the last encoded byte.
446
- static char* Encode32(char* ptr, uint32 v);
443
+ static char* Encode32(char* ptr, uint32_t v);
447
444
 
448
445
  // EFFECTS Appends the varint representation of "value" to "*s".
449
- static void Append32(string* s, uint32 value);
446
+ static void Append32(std::string* s, uint32_t value);
450
447
  };
451
448
 
452
449
  inline const char* Varint::Parse32WithLimit(const char* p,
453
450
  const char* l,
454
- uint32* OUTPUT) {
451
+ uint32_t* OUTPUT) {
455
452
  const unsigned char* ptr = reinterpret_cast<const unsigned char*>(p);
456
453
  const unsigned char* limit = reinterpret_cast<const unsigned char*>(l);
457
- uint32 b, result;
454
+ uint32_t b, result;
458
455
  if (ptr >= limit) return NULL;
459
456
  b = *(ptr++); result = b & 127; if (b < 128) goto done;
460
457
  if (ptr >= limit) return NULL;
@@ -471,30 +468,30 @@ inline const char* Varint::Parse32WithLimit(const char* p,
471
468
  return reinterpret_cast<const char*>(ptr);
472
469
  }
473
470
 
474
- inline char* Varint::Encode32(char* sptr, uint32 v) {
471
+ inline char* Varint::Encode32(char* sptr, uint32_t v) {
475
472
  // Operate on characters as unsigneds
476
- unsigned char* ptr = reinterpret_cast<unsigned char*>(sptr);
477
- static const int B = 128;
478
- if (v < (1<<7)) {
479
- *(ptr++) = v;
480
- } else if (v < (1<<14)) {
481
- *(ptr++) = v | B;
482
- *(ptr++) = v>>7;
483
- } else if (v < (1<<21)) {
484
- *(ptr++) = v | B;
485
- *(ptr++) = (v>>7) | B;
486
- *(ptr++) = v>>14;
487
- } else if (v < (1<<28)) {
488
- *(ptr++) = v | B;
489
- *(ptr++) = (v>>7) | B;
490
- *(ptr++) = (v>>14) | B;
491
- *(ptr++) = v>>21;
473
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(sptr);
474
+ static const uint8_t B = 128;
475
+ if (v < (1 << 7)) {
476
+ *(ptr++) = static_cast<uint8_t>(v);
477
+ } else if (v < (1 << 14)) {
478
+ *(ptr++) = static_cast<uint8_t>(v | B);
479
+ *(ptr++) = static_cast<uint8_t>(v >> 7);
480
+ } else if (v < (1 << 21)) {
481
+ *(ptr++) = static_cast<uint8_t>(v | B);
482
+ *(ptr++) = static_cast<uint8_t>((v >> 7) | B);
483
+ *(ptr++) = static_cast<uint8_t>(v >> 14);
484
+ } else if (v < (1 << 28)) {
485
+ *(ptr++) = static_cast<uint8_t>(v | B);
486
+ *(ptr++) = static_cast<uint8_t>((v >> 7) | B);
487
+ *(ptr++) = static_cast<uint8_t>((v >> 14) | B);
488
+ *(ptr++) = static_cast<uint8_t>(v >> 21);
492
489
  } else {
493
- *(ptr++) = v | B;
494
- *(ptr++) = (v>>7) | B;
495
- *(ptr++) = (v>>14) | B;
496
- *(ptr++) = (v>>21) | B;
497
- *(ptr++) = v>>28;
490
+ *(ptr++) = static_cast<uint8_t>(v | B);
491
+ *(ptr++) = static_cast<uint8_t>((v>>7) | B);
492
+ *(ptr++) = static_cast<uint8_t>((v>>14) | B);
493
+ *(ptr++) = static_cast<uint8_t>((v>>21) | B);
494
+ *(ptr++) = static_cast<uint8_t>(v >> 28);
498
495
  }
499
496
  return reinterpret_cast<char*>(ptr);
500
497
  }
@@ -503,7 +500,7 @@ inline char* Varint::Encode32(char* sptr, uint32 v) {
503
500
  // replace this function with one that resizes the string without
504
501
  // filling the new space with zeros (if applicable) --
505
502
  // it will be non-portable but faster.
506
- inline void STLStringResizeUninitialized(string* s, size_t new_size) {
503
+ inline void STLStringResizeUninitialized(std::string* s, size_t new_size) {
507
504
  s->resize(new_size);
508
505
  }
509
506
 
@@ -519,7 +516,7 @@ inline void STLStringResizeUninitialized(string* s, size_t new_size) {
519
516
  // (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-defects.html#530)
520
517
  // proposes this as the method. It will officially be part of the standard
521
518
  // for C++0x. This should already work on all current implementations.
522
- inline char* string_as_array(string* str) {
519
+ inline char* string_as_array(std::string* str) {
523
520
  return str->empty() ? NULL : &*str->begin();
524
521
  }
525
522