snappy 0.0.14-java → 0.2.0-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. checksums.yaml +5 -5
  2. data/.github/workflows/main.yml +34 -0
  3. data/.github/workflows/publish.yml +34 -0
  4. data/Gemfile +4 -0
  5. data/README.md +28 -4
  6. data/Rakefile +32 -29
  7. data/ext/api.c +6 -1
  8. data/ext/extconf.rb +21 -24
  9. data/lib/snappy.rb +6 -4
  10. data/lib/snappy/hadoop.rb +22 -0
  11. data/lib/snappy/hadoop/reader.rb +62 -0
  12. data/lib/snappy/hadoop/writer.rb +51 -0
  13. data/lib/snappy/reader.rb +19 -11
  14. data/lib/snappy/shim.rb +30 -0
  15. data/lib/snappy/version.rb +3 -1
  16. data/lib/snappy/writer.rb +8 -9
  17. data/snappy.gemspec +17 -37
  18. data/test/hadoop/snappy_hadoop_reader_test.rb +115 -0
  19. data/test/hadoop/snappy_hadoop_writer_test.rb +48 -0
  20. data/test/snappy_hadoop_test.rb +26 -0
  21. data/test/snappy_reader_test.rb +148 -0
  22. data/test/snappy_test.rb +95 -0
  23. data/test/snappy_writer_test.rb +55 -0
  24. data/test/test_helper.rb +7 -0
  25. data/vendor/snappy/CMakeLists.txt +297 -0
  26. data/vendor/snappy/CONTRIBUTING.md +26 -0
  27. data/vendor/snappy/COPYING +1 -1
  28. data/vendor/snappy/NEWS +60 -0
  29. data/vendor/snappy/{README → README.md} +29 -16
  30. data/vendor/snappy/cmake/SnappyConfig.cmake.in +33 -0
  31. data/vendor/snappy/cmake/config.h.in +62 -0
  32. data/vendor/snappy/docs/README.md +72 -0
  33. data/vendor/snappy/snappy-c.h +3 -3
  34. data/vendor/snappy/snappy-internal.h +113 -32
  35. data/vendor/snappy/snappy-sinksource.cc +33 -0
  36. data/vendor/snappy/snappy-sinksource.h +51 -6
  37. data/vendor/snappy/snappy-stubs-internal.cc +1 -1
  38. data/vendor/snappy/snappy-stubs-internal.h +160 -45
  39. data/vendor/snappy/snappy-stubs-public.h.in +23 -47
  40. data/vendor/snappy/snappy-test.cc +31 -24
  41. data/vendor/snappy/snappy-test.h +46 -103
  42. data/vendor/snappy/snappy.cc +786 -431
  43. data/vendor/snappy/snappy.h +37 -14
  44. data/vendor/snappy/snappy_compress_fuzzer.cc +59 -0
  45. data/vendor/snappy/snappy_uncompress_fuzzer.cc +57 -0
  46. data/vendor/snappy/snappy_unittest.cc +441 -290
  47. metadata +35 -75
  48. data/.travis.yml +0 -4
  49. data/test/test-snappy-reader.rb +0 -129
  50. data/test/test-snappy-writer.rb +0 -55
  51. data/test/test-snappy.rb +0 -58
  52. data/vendor/snappy/ChangeLog +0 -1916
  53. data/vendor/snappy/Makefile.am +0 -23
  54. data/vendor/snappy/autogen.sh +0 -7
  55. data/vendor/snappy/configure.ac +0 -133
  56. data/vendor/snappy/m4/gtest.m4 +0 -74
  57. data/vendor/snappy/testdata/alice29.txt +0 -3609
  58. data/vendor/snappy/testdata/asyoulik.txt +0 -4122
  59. data/vendor/snappy/testdata/baddata1.snappy +0 -0
  60. data/vendor/snappy/testdata/baddata2.snappy +0 -0
  61. data/vendor/snappy/testdata/baddata3.snappy +0 -0
  62. data/vendor/snappy/testdata/fireworks.jpeg +0 -0
  63. data/vendor/snappy/testdata/geo.protodata +0 -0
  64. data/vendor/snappy/testdata/html +0 -1
  65. data/vendor/snappy/testdata/html_x_4 +0 -1
  66. data/vendor/snappy/testdata/kppkn.gtb +0 -0
  67. data/vendor/snappy/testdata/lcet10.txt +0 -7519
  68. data/vendor/snappy/testdata/paper-100k.pdf +2 -600
  69. data/vendor/snappy/testdata/plrabn12.txt +0 -10699
  70. data/vendor/snappy/testdata/urls.10K +0 -10000
@@ -40,6 +40,21 @@ char* Sink::GetAppendBuffer(size_t length, char* scratch) {
40
40
  return scratch;
41
41
  }
42
42
 
43
+ char* Sink::GetAppendBufferVariable(
44
+ size_t min_size, size_t desired_size_hint, char* scratch,
45
+ size_t scratch_size, size_t* allocated_size) {
46
+ *allocated_size = scratch_size;
47
+ return scratch;
48
+ }
49
+
50
+ void Sink::AppendAndTakeOwnership(
51
+ char* bytes, size_t n,
52
+ void (*deleter)(void*, const char*, size_t),
53
+ void *deleter_arg) {
54
+ Append(bytes, n);
55
+ (*deleter)(deleter_arg, bytes, n);
56
+ }
57
+
43
58
  ByteArraySource::~ByteArraySource() { }
44
59
 
45
60
  size_t ByteArraySource::Available() const { return left_; }
@@ -68,4 +83,22 @@ char* UncheckedByteArraySink::GetAppendBuffer(size_t len, char* scratch) {
68
83
  return dest_;
69
84
  }
70
85
 
86
+ void UncheckedByteArraySink::AppendAndTakeOwnership(
87
+ char* data, size_t n,
88
+ void (*deleter)(void*, const char*, size_t),
89
+ void *deleter_arg) {
90
+ if (data != dest_) {
91
+ memcpy(dest_, data, n);
92
+ (*deleter)(deleter_arg, data, n);
93
+ }
94
+ dest_ += n;
95
+ }
96
+
97
+ char* UncheckedByteArraySink::GetAppendBufferVariable(
98
+ size_t min_size, size_t desired_size_hint, char* scratch,
99
+ size_t scratch_size, size_t* allocated_size) {
100
+ *allocated_size = desired_size_hint;
101
+ return dest_;
71
102
  }
103
+
104
+ } // namespace snappy
@@ -26,12 +26,11 @@
26
26
  // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
27
  // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
28
 
29
- #ifndef UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
30
- #define UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
29
+ #ifndef THIRD_PARTY_SNAPPY_SNAPPY_SINKSOURCE_H_
30
+ #define THIRD_PARTY_SNAPPY_SNAPPY_SINKSOURCE_H_
31
31
 
32
32
  #include <stddef.h>
33
33
 
34
-
35
34
  namespace snappy {
36
35
 
37
36
  // A Sink is an interface that consumes a sequence of bytes.
@@ -60,6 +59,47 @@ class Sink {
60
59
  // The default implementation always returns the scratch buffer.
61
60
  virtual char* GetAppendBuffer(size_t length, char* scratch);
62
61
 
62
+ // For higher performance, Sink implementations can provide custom
63
+ // AppendAndTakeOwnership() and GetAppendBufferVariable() methods.
64
+ // These methods can reduce the number of copies done during
65
+ // compression/decompression.
66
+
67
+ // Append "bytes[0,n-1] to the sink. Takes ownership of "bytes"
68
+ // and calls the deleter function as (*deleter)(deleter_arg, bytes, n)
69
+ // to free the buffer. deleter function must be non NULL.
70
+ //
71
+ // The default implementation just calls Append and frees "bytes".
72
+ // Other implementations may avoid a copy while appending the buffer.
73
+ virtual void AppendAndTakeOwnership(
74
+ char* bytes, size_t n, void (*deleter)(void*, const char*, size_t),
75
+ void *deleter_arg);
76
+
77
+ // Returns a writable buffer for appending and writes the buffer's capacity to
78
+ // *allocated_size. Guarantees *allocated_size >= min_size.
79
+ // May return a pointer to the caller-owned scratch buffer which must have
80
+ // scratch_size >= min_size.
81
+ //
82
+ // The returned buffer is only valid until the next operation
83
+ // on this ByteSink.
84
+ //
85
+ // After writing at most *allocated_size bytes, call Append() with the
86
+ // pointer returned from this function and the number of bytes written.
87
+ // Many Append() implementations will avoid copying bytes if this function
88
+ // returned an internal buffer.
89
+ //
90
+ // If the sink implementation allocates or reallocates an internal buffer,
91
+ // it should use the desired_size_hint if appropriate. If a caller cannot
92
+ // provide a reasonable guess at the desired capacity, it should set
93
+ // desired_size_hint = 0.
94
+ //
95
+ // If a non-scratch buffer is returned, the caller may only pass
96
+ // a prefix to it to Append(). That is, it is not correct to pass an
97
+ // interior pointer to Append().
98
+ //
99
+ // The default implementation always returns the scratch buffer.
100
+ virtual char* GetAppendBufferVariable(
101
+ size_t min_size, size_t desired_size_hint, char* scratch,
102
+ size_t scratch_size, size_t* allocated_size);
63
103
 
64
104
  private:
65
105
  // No copying
@@ -122,6 +162,12 @@ class UncheckedByteArraySink : public Sink {
122
162
  virtual ~UncheckedByteArraySink();
123
163
  virtual void Append(const char* data, size_t n);
124
164
  virtual char* GetAppendBuffer(size_t len, char* scratch);
165
+ virtual char* GetAppendBufferVariable(
166
+ size_t min_size, size_t desired_size_hint, char* scratch,
167
+ size_t scratch_size, size_t* allocated_size);
168
+ virtual void AppendAndTakeOwnership(
169
+ char* bytes, size_t n, void (*deleter)(void*, const char*, size_t),
170
+ void *deleter_arg);
125
171
 
126
172
  // Return the current output pointer so that a caller can see how
127
173
  // many bytes were produced.
@@ -131,7 +177,6 @@ class UncheckedByteArraySink : public Sink {
131
177
  char* dest_;
132
178
  };
133
179
 
180
+ } // namespace snappy
134
181
 
135
- }
136
-
137
- #endif // UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
182
+ #endif // THIRD_PARTY_SNAPPY_SNAPPY_SINKSOURCE_H_
@@ -33,7 +33,7 @@
33
33
 
34
34
  namespace snappy {
35
35
 
36
- void Varint::Append32(string* s, uint32 value) {
36
+ void Varint::Append32(std::string* s, uint32 value) {
37
37
  char buf[Varint::kMax32];
38
38
  const char* p = Varint::Encode32(buf, value);
39
39
  s->append(buf, p - buf);
@@ -28,8 +28,8 @@
28
28
  //
29
29
  // Various stubs for the open-source version of Snappy.
30
30
 
31
- #ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
32
- #define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
31
+ #ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
32
+ #define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
33
33
 
34
34
  #ifdef HAVE_CONFIG_H
35
35
  #include "config.h"
@@ -45,6 +45,26 @@
45
45
  #include <sys/mman.h>
46
46
  #endif
47
47
 
48
+ #ifdef HAVE_UNISTD_H
49
+ #include <unistd.h>
50
+ #endif
51
+
52
+ #if defined(_MSC_VER)
53
+ #include <intrin.h>
54
+ #endif // defined(_MSC_VER)
55
+
56
+ #ifndef __has_feature
57
+ #define __has_feature(x) 0
58
+ #endif
59
+
60
+ #if __has_feature(memory_sanitizer)
61
+ #include <sanitizer/msan_interface.h>
62
+ #define SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
63
+ __msan_unpoison((address), (size))
64
+ #else
65
+ #define SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) /* empty */
66
+ #endif // __has_feature(memory_sanitizer)
67
+
48
68
  #include "snappy-stubs-public.h"
49
69
 
50
70
  #if defined(__x86_64__)
@@ -52,6 +72,14 @@
52
72
  // Enable 64-bit optimized versions of some routines.
53
73
  #define ARCH_K8 1
54
74
 
75
+ #elif defined(__ppc64__)
76
+
77
+ #define ARCH_PPC 1
78
+
79
+ #elif defined(__aarch64__)
80
+
81
+ #define ARCH_ARM 1
82
+
55
83
  #endif
56
84
 
57
85
  // Needed by OS X, among others.
@@ -59,10 +87,6 @@
59
87
  #define MAP_ANONYMOUS MAP_ANON
60
88
  #endif
61
89
 
62
- // Pull in std::min, std::ostream, and the likes. This is safe because this
63
- // header file is never used from any public header files.
64
- using namespace std;
65
-
66
90
  // The size of an array, if known at compile-time.
67
91
  // Will give unexpected results if used on a pointer.
68
92
  // We undefine it first, since some compilers already have a definition.
@@ -73,11 +97,11 @@ using namespace std;
73
97
 
74
98
  // Static prediction hints.
75
99
  #ifdef HAVE_BUILTIN_EXPECT
76
- #define PREDICT_FALSE(x) (__builtin_expect(x, 0))
77
- #define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
100
+ #define SNAPPY_PREDICT_FALSE(x) (__builtin_expect(x, 0))
101
+ #define SNAPPY_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
78
102
  #else
79
- #define PREDICT_FALSE(x) x
80
- #define PREDICT_TRUE(x) x
103
+ #define SNAPPY_PREDICT_FALSE(x) x
104
+ #define SNAPPY_PREDICT_TRUE(x) x
81
105
  #endif
82
106
 
83
107
  // This is only used for recomputing the tag byte table used during
@@ -96,9 +120,10 @@ static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
96
120
 
97
121
  // Potentially unaligned loads and stores.
98
122
 
99
- // x86 and PowerPC can simply do these loads and stores native.
123
+ // x86, PowerPC, and ARM64 can simply do these loads and stores native.
100
124
 
101
- #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
125
+ #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
126
+ defined(__aarch64__)
102
127
 
103
128
  #define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
104
129
  #define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
@@ -116,6 +141,15 @@ static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
116
141
  // sub-architectures.
117
142
  //
118
143
  // This is a mess, but there's not much we can do about it.
144
+ //
145
+ // To further complicate matters, only LDR instructions (single reads) are
146
+ // allowed to be unaligned, not LDRD (two reads) or LDM (many reads). Unless we
147
+ // explicitly tell the compiler that these accesses can be unaligned, it can and
148
+ // will combine accesses. On armcc, the way to signal this is done by accessing
149
+ // through the type (uint32 __packed *), but GCC has no such attribute
150
+ // (it ignores __attribute__((packed)) on individual variables). However,
151
+ // we can tell it that a _struct_ is unaligned, which has the same effect,
152
+ // so we do that.
119
153
 
120
154
  #elif defined(__arm__) && \
121
155
  !defined(__ARM_ARCH_4__) && \
@@ -131,13 +165,41 @@ static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
131
165
  !defined(__ARM_ARCH_6ZK__) && \
132
166
  !defined(__ARM_ARCH_6T2__)
133
167
 
134
- #define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
135
- #define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
168
+ #if __GNUC__
169
+ #define ATTRIBUTE_PACKED __attribute__((__packed__))
170
+ #else
171
+ #define ATTRIBUTE_PACKED
172
+ #endif
136
173
 
137
- #define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
138
- #define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
174
+ namespace base {
175
+ namespace internal {
176
+
177
+ struct Unaligned16Struct {
178
+ uint16 value;
179
+ uint8 dummy; // To make the size non-power-of-two.
180
+ } ATTRIBUTE_PACKED;
139
181
 
140
- // TODO(user): NEON supports unaligned 64-bit loads and stores.
182
+ struct Unaligned32Struct {
183
+ uint32 value;
184
+ uint8 dummy; // To make the size non-power-of-two.
185
+ } ATTRIBUTE_PACKED;
186
+
187
+ } // namespace internal
188
+ } // namespace base
189
+
190
+ #define UNALIGNED_LOAD16(_p) \
191
+ ((reinterpret_cast<const ::snappy::base::internal::Unaligned16Struct *>(_p))->value)
192
+ #define UNALIGNED_LOAD32(_p) \
193
+ ((reinterpret_cast<const ::snappy::base::internal::Unaligned32Struct *>(_p))->value)
194
+
195
+ #define UNALIGNED_STORE16(_p, _val) \
196
+ ((reinterpret_cast< ::snappy::base::internal::Unaligned16Struct *>(_p))->value = \
197
+ (_val))
198
+ #define UNALIGNED_STORE32(_p, _val) \
199
+ ((reinterpret_cast< ::snappy::base::internal::Unaligned32Struct *>(_p))->value = \
200
+ (_val))
201
+
202
+ // TODO: NEON supports unaligned 64-bit loads and stores.
141
203
  // See if that would be more efficient on platforms supporting it,
142
204
  // at least for copies.
143
205
 
@@ -188,22 +250,8 @@ inline void UNALIGNED_STORE64(void *p, uint64 v) {
188
250
 
189
251
  #endif
190
252
 
191
- // This can be more efficient than UNALIGNED_LOAD64 + UNALIGNED_STORE64
192
- // on some platforms, in particular ARM.
193
- inline void UnalignedCopy64(const void *src, void *dst) {
194
- if (sizeof(void *) == 8) {
195
- UNALIGNED_STORE64(dst, UNALIGNED_LOAD64(src));
196
- } else {
197
- const char *src_char = reinterpret_cast<const char *>(src);
198
- char *dst_char = reinterpret_cast<char *>(dst);
199
-
200
- UNALIGNED_STORE32(dst_char, UNALIGNED_LOAD32(src_char));
201
- UNALIGNED_STORE32(dst_char + 4, UNALIGNED_LOAD32(src_char + 4));
202
- }
203
- }
204
-
205
253
  // The following guarantees declaration of the byte swap functions.
206
- #ifdef WORDS_BIGENDIAN
254
+ #if defined(SNAPPY_IS_BIG_ENDIAN)
207
255
 
208
256
  #ifdef HAVE_SYS_BYTEORDER_H
209
257
  #include <sys/byteorder.h>
@@ -260,7 +308,7 @@ inline uint64 bswap_64(uint64 x) {
260
308
 
261
309
  #endif
262
310
 
263
- #endif // WORDS_BIGENDIAN
311
+ #endif // defined(SNAPPY_IS_BIG_ENDIAN)
264
312
 
265
313
  // Convert to little-endian storage, opposite of network format.
266
314
  // Convert x from host to little endian: x = LittleEndian.FromHost(x);
@@ -274,7 +322,7 @@ inline uint64 bswap_64(uint64 x) {
274
322
  class LittleEndian {
275
323
  public:
276
324
  // Conversion functions.
277
- #ifdef WORDS_BIGENDIAN
325
+ #if defined(SNAPPY_IS_BIG_ENDIAN)
278
326
 
279
327
  static uint16 FromHost16(uint16 x) { return bswap_16(x); }
280
328
  static uint16 ToHost16(uint16 x) { return bswap_16(x); }
@@ -284,7 +332,7 @@ class LittleEndian {
284
332
 
285
333
  static bool IsLittleEndian() { return false; }
286
334
 
287
- #else // !defined(WORDS_BIGENDIAN)
335
+ #else // !defined(SNAPPY_IS_BIG_ENDIAN)
288
336
 
289
337
  static uint16 FromHost16(uint16 x) { return x; }
290
338
  static uint16 ToHost16(uint16 x) { return x; }
@@ -294,7 +342,7 @@ class LittleEndian {
294
342
 
295
343
  static bool IsLittleEndian() { return true; }
296
344
 
297
- #endif // !defined(WORDS_BIGENDIAN)
345
+ #endif // !defined(SNAPPY_IS_BIG_ENDIAN)
298
346
 
299
347
  // Functions to do unaligned loads and stores in little-endian order.
300
348
  static uint16 Load16(const void *p) {
@@ -317,6 +365,9 @@ class LittleEndian {
317
365
  // Some bit-manipulation functions.
318
366
  class Bits {
319
367
  public:
368
+ // Return floor(log2(n)) for positive integer n.
369
+ static int Log2FloorNonZero(uint32 n);
370
+
320
371
  // Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0.
321
372
  static int Log2Floor(uint32 n);
322
373
 
@@ -324,31 +375,85 @@ class Bits {
324
375
  // undefined value if n == 0. FindLSBSetNonZero() is similar to ffs() except
325
376
  // that it's 0-indexed.
326
377
  static int FindLSBSetNonZero(uint32 n);
378
+
379
+ #if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
327
380
  static int FindLSBSetNonZero64(uint64 n);
381
+ #endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
328
382
 
329
383
  private:
330
- DISALLOW_COPY_AND_ASSIGN(Bits);
384
+ // No copying
385
+ Bits(const Bits&);
386
+ void operator=(const Bits&);
331
387
  };
332
388
 
333
389
  #ifdef HAVE_BUILTIN_CTZ
334
390
 
391
+ inline int Bits::Log2FloorNonZero(uint32 n) {
392
+ assert(n != 0);
393
+ // (31 ^ x) is equivalent to (31 - x) for x in [0, 31]. An easy proof
394
+ // represents subtraction in base 2 and observes that there's no carry.
395
+ //
396
+ // GCC and Clang represent __builtin_clz on x86 as 31 ^ _bit_scan_reverse(x).
397
+ // Using "31 ^" here instead of "31 -" allows the optimizer to strip the
398
+ // function body down to _bit_scan_reverse(x).
399
+ return 31 ^ __builtin_clz(n);
400
+ }
401
+
335
402
  inline int Bits::Log2Floor(uint32 n) {
336
- return n == 0 ? -1 : 31 ^ __builtin_clz(n);
403
+ return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
337
404
  }
338
405
 
339
406
  inline int Bits::FindLSBSetNonZero(uint32 n) {
407
+ assert(n != 0);
340
408
  return __builtin_ctz(n);
341
409
  }
342
410
 
411
+ #if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
343
412
  inline int Bits::FindLSBSetNonZero64(uint64 n) {
413
+ assert(n != 0);
344
414
  return __builtin_ctzll(n);
345
415
  }
416
+ #endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
346
417
 
347
- #else // Portable versions.
418
+ #elif defined(_MSC_VER)
419
+
420
+ inline int Bits::Log2FloorNonZero(uint32 n) {
421
+ assert(n != 0);
422
+ unsigned long where;
423
+ _BitScanReverse(&where, n);
424
+ return static_cast<int>(where);
425
+ }
348
426
 
349
427
  inline int Bits::Log2Floor(uint32 n) {
350
- if (n == 0)
351
- return -1;
428
+ unsigned long where;
429
+ if (_BitScanReverse(&where, n))
430
+ return static_cast<int>(where);
431
+ return -1;
432
+ }
433
+
434
+ inline int Bits::FindLSBSetNonZero(uint32 n) {
435
+ assert(n != 0);
436
+ unsigned long where;
437
+ if (_BitScanForward(&where, n))
438
+ return static_cast<int>(where);
439
+ return 32;
440
+ }
441
+
442
+ #if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
443
+ inline int Bits::FindLSBSetNonZero64(uint64 n) {
444
+ assert(n != 0);
445
+ unsigned long where;
446
+ if (_BitScanForward64(&where, n))
447
+ return static_cast<int>(where);
448
+ return 64;
449
+ }
450
+ #endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
451
+
452
+ #else // Portable versions.
453
+
454
+ inline int Bits::Log2FloorNonZero(uint32 n) {
455
+ assert(n != 0);
456
+
352
457
  int log = 0;
353
458
  uint32 value = n;
354
459
  for (int i = 4; i >= 0; --i) {
@@ -363,7 +468,13 @@ inline int Bits::Log2Floor(uint32 n) {
363
468
  return log;
364
469
  }
365
470
 
471
+ inline int Bits::Log2Floor(uint32 n) {
472
+ return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
473
+ }
474
+
366
475
  inline int Bits::FindLSBSetNonZero(uint32 n) {
476
+ assert(n != 0);
477
+
367
478
  int rc = 31;
368
479
  for (int i = 4, shift = 1 << 4; i >= 0; --i) {
369
480
  const uint32 x = n << shift;
@@ -376,8 +487,11 @@ inline int Bits::FindLSBSetNonZero(uint32 n) {
376
487
  return rc;
377
488
  }
378
489
 
490
+ #if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
379
491
  // FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
380
492
  inline int Bits::FindLSBSetNonZero64(uint64 n) {
493
+ assert(n != 0);
494
+
381
495
  const uint32 bottombits = static_cast<uint32>(n);
382
496
  if (bottombits == 0) {
383
497
  // Bottom bits are zero, so scan in top bits
@@ -386,6 +500,7 @@ inline int Bits::FindLSBSetNonZero64(uint64 n) {
386
500
  return FindLSBSetNonZero(bottombits);
387
501
  }
388
502
  }
503
+ #endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
389
504
 
390
505
  #endif // End portable versions.
391
506
 
@@ -409,7 +524,7 @@ class Varint {
409
524
  static char* Encode32(char* ptr, uint32 v);
410
525
 
411
526
  // EFFECTS Appends the varint representation of "value" to "*s".
412
- static void Append32(string* s, uint32 value);
527
+ static void Append32(std::string* s, uint32 value);
413
528
  };
414
529
 
415
530
  inline const char* Varint::Parse32WithLimit(const char* p,
@@ -466,7 +581,7 @@ inline char* Varint::Encode32(char* sptr, uint32 v) {
466
581
  // replace this function with one that resizes the string without
467
582
  // filling the new space with zeros (if applicable) --
468
583
  // it will be non-portable but faster.
469
- inline void STLStringResizeUninitialized(string* s, size_t new_size) {
584
+ inline void STLStringResizeUninitialized(std::string* s, size_t new_size) {
470
585
  s->resize(new_size);
471
586
  }
472
587
 
@@ -482,10 +597,10 @@ inline void STLStringResizeUninitialized(string* s, size_t new_size) {
482
597
  // (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-defects.html#530)
483
598
  // proposes this as the method. It will officially be part of the standard
484
599
  // for C++0x. This should already work on all current implementations.
485
- inline char* string_as_array(string* str) {
600
+ inline char* string_as_array(std::string* str) {
486
601
  return str->empty() ? NULL : &*str->begin();
487
602
  }
488
603
 
489
604
  } // namespace snappy
490
605
 
491
- #endif // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
606
+ #endif // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_