snappy 0.0.17 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. checksums.yaml +5 -5
  2. data/.dockerignore +2 -0
  3. data/.github/workflows/main.yml +34 -0
  4. data/.github/workflows/publish.yml +34 -0
  5. data/.gitignore +2 -1
  6. data/.gitmodules +1 -1
  7. data/Dockerfile +13 -0
  8. data/Gemfile +4 -0
  9. data/README.md +29 -5
  10. data/Rakefile +32 -29
  11. data/ext/api.c +6 -1
  12. data/ext/extconf.rb +23 -16
  13. data/lib/snappy/hadoop/reader.rb +62 -0
  14. data/lib/snappy/hadoop/writer.rb +51 -0
  15. data/lib/snappy/hadoop.rb +22 -0
  16. data/lib/snappy/reader.rb +14 -10
  17. data/lib/snappy/shim.rb +1 -1
  18. data/lib/snappy/version.rb +1 -1
  19. data/lib/snappy.rb +5 -4
  20. data/snappy.gemspec +13 -13
  21. data/test/hadoop/snappy_hadoop_reader_test.rb +115 -0
  22. data/test/hadoop/snappy_hadoop_writer_test.rb +48 -0
  23. data/test/snappy_hadoop_test.rb +26 -0
  24. data/test/snappy_reader_test.rb +148 -0
  25. data/test/snappy_test.rb +95 -0
  26. data/test/snappy_writer_test.rb +55 -0
  27. data/test/test_helper.rb +7 -0
  28. data/test.sh +3 -0
  29. data/vendor/snappy/CMakeLists.txt +297 -0
  30. data/vendor/snappy/CONTRIBUTING.md +26 -0
  31. data/vendor/snappy/NEWS +40 -0
  32. data/vendor/snappy/{README → README.md} +27 -18
  33. data/vendor/snappy/cmake/SnappyConfig.cmake.in +33 -0
  34. data/vendor/snappy/cmake/config.h.in +62 -0
  35. data/vendor/snappy/docs/README.md +72 -0
  36. data/vendor/snappy/snappy-internal.h +22 -18
  37. data/vendor/snappy/snappy-stubs-internal.cc +1 -1
  38. data/vendor/snappy/snappy-stubs-internal.h +116 -38
  39. data/vendor/snappy/snappy-stubs-public.h.in +20 -46
  40. data/vendor/snappy/snappy-test.cc +26 -22
  41. data/vendor/snappy/snappy-test.h +24 -98
  42. data/vendor/snappy/snappy.cc +380 -183
  43. data/vendor/snappy/snappy.h +14 -10
  44. data/vendor/snappy/snappy_compress_fuzzer.cc +59 -0
  45. data/vendor/snappy/snappy_uncompress_fuzzer.cc +57 -0
  46. data/vendor/snappy/snappy_unittest.cc +236 -261
  47. metadata +37 -92
  48. data/.travis.yml +0 -26
  49. data/smoke.sh +0 -8
  50. data/test/test-snappy-reader.rb +0 -129
  51. data/test/test-snappy-writer.rb +0 -55
  52. data/test/test-snappy.rb +0 -58
  53. data/vendor/snappy/ChangeLog +0 -2468
  54. data/vendor/snappy/INSTALL +0 -370
  55. data/vendor/snappy/Makefile +0 -982
  56. data/vendor/snappy/Makefile.am +0 -26
  57. data/vendor/snappy/Makefile.in +0 -982
  58. data/vendor/snappy/aclocal.m4 +0 -9738
  59. data/vendor/snappy/autogen.sh +0 -12
  60. data/vendor/snappy/autom4te.cache/output.0 +0 -18856
  61. data/vendor/snappy/autom4te.cache/output.1 +0 -18852
  62. data/vendor/snappy/autom4te.cache/requests +0 -297
  63. data/vendor/snappy/autom4te.cache/traces.0 +0 -2689
  64. data/vendor/snappy/autom4te.cache/traces.1 +0 -714
  65. data/vendor/snappy/config.guess +0 -1530
  66. data/vendor/snappy/config.h +0 -135
  67. data/vendor/snappy/config.h.in +0 -134
  68. data/vendor/snappy/config.log +0 -1640
  69. data/vendor/snappy/config.status +0 -2318
  70. data/vendor/snappy/config.sub +0 -1773
  71. data/vendor/snappy/configure +0 -18852
  72. data/vendor/snappy/configure.ac +0 -134
  73. data/vendor/snappy/depcomp +0 -688
  74. data/vendor/snappy/install-sh +0 -527
  75. data/vendor/snappy/libtool +0 -10246
  76. data/vendor/snappy/ltmain.sh +0 -9661
  77. data/vendor/snappy/m4/gtest.m4 +0 -74
  78. data/vendor/snappy/m4/libtool.m4 +0 -8001
  79. data/vendor/snappy/m4/ltoptions.m4 +0 -384
  80. data/vendor/snappy/m4/ltsugar.m4 +0 -123
  81. data/vendor/snappy/m4/ltversion.m4 +0 -23
  82. data/vendor/snappy/m4/lt~obsolete.m4 +0 -98
  83. data/vendor/snappy/missing +0 -331
  84. data/vendor/snappy/snappy-stubs-public.h +0 -100
  85. data/vendor/snappy/snappy.pc +0 -10
  86. data/vendor/snappy/snappy.pc.in +0 -10
  87. data/vendor/snappy/stamp-h1 +0 -1
@@ -36,21 +36,30 @@
36
36
  namespace snappy {
37
37
  namespace internal {
38
38
 
39
+ // Working memory performs a single allocation to hold all scratch space
40
+ // required for compression.
39
41
  class WorkingMemory {
40
42
  public:
41
- WorkingMemory() : large_table_(NULL) { }
42
- ~WorkingMemory() { delete[] large_table_; }
43
+ explicit WorkingMemory(size_t input_size);
44
+ ~WorkingMemory();
43
45
 
44
46
  // Allocates and clears a hash table using memory in "*this",
45
47
  // stores the number of buckets in "*table_size" and returns a pointer to
46
48
  // the base of the hash table.
47
- uint16* GetHashTable(size_t input_size, int* table_size);
49
+ uint16* GetHashTable(size_t fragment_size, int* table_size) const;
50
+ char* GetScratchInput() const { return input_; }
51
+ char* GetScratchOutput() const { return output_; }
48
52
 
49
53
  private:
50
- uint16 small_table_[1<<10]; // 2KB
51
- uint16* large_table_; // Allocated only when needed
52
-
53
- DISALLOW_COPY_AND_ASSIGN(WorkingMemory);
54
+ char* mem_; // the allocated memory, never nullptr
55
+ size_t size_; // the size of the allocated memory, never 0
56
+ uint16* table_; // the pointer to the hashtable
57
+ char* input_; // the pointer to the input scratch buffer
58
+ char* output_; // the pointer to the output scratch buffer
59
+
60
+ // No copying
61
+ WorkingMemory(const WorkingMemory&);
62
+ void operator=(const WorkingMemory&);
54
63
  };
55
64
 
56
65
  // Flat array compression that does not emit the "uncompressed length"
@@ -80,9 +89,9 @@ char* CompressFragment(const char* input,
80
89
  // Does not read *(s1 + (s2_limit - s2)) or beyond.
81
90
  // Requires that s2_limit >= s2.
82
91
  //
83
- // Separate implementation for x86_64, for speed. Uses the fact that
84
- // x86_64 is little endian.
85
- #if defined(ARCH_K8)
92
+ // Separate implementation for 64-bit, little-endian cpus.
93
+ #if !defined(SNAPPY_IS_BIG_ENDIAN) && \
94
+ (defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM))
86
95
  static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
87
96
  const char* s2,
88
97
  const char* s2_limit) {
@@ -94,7 +103,7 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
94
103
  // uncommon code paths that determine, without extra effort, whether the match
95
104
  // length is less than 8. In short, we are hoping to avoid a conditional
96
105
  // branch, and perhaps get better code layout from the C++ compiler.
97
- if (PREDICT_TRUE(s2 <= s2_limit - 8)) {
106
+ if (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 8)) {
98
107
  uint64 a1 = UNALIGNED_LOAD64(s1);
99
108
  uint64 a2 = UNALIGNED_LOAD64(s2);
100
109
  if (a1 != a2) {
@@ -110,7 +119,7 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
110
119
  // time until we find a 64-bit block that doesn't match; then we find
111
120
  // the first non-matching bit and use that to calculate the total
112
121
  // length of the match.
113
- while (PREDICT_TRUE(s2 <= s2_limit - 8)) {
122
+ while (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 8)) {
114
123
  if (UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched)) {
115
124
  s2 += 8;
116
125
  matched += 8;
@@ -122,7 +131,7 @@ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
122
131
  return std::pair<size_t, bool>(matched, false);
123
132
  }
124
133
  }
125
- while (PREDICT_TRUE(s2 < s2_limit)) {
134
+ while (SNAPPY_PREDICT_TRUE(s2 < s2_limit)) {
126
135
  if (s1[matched] == *s2) {
127
136
  ++s2;
128
137
  ++matched;
@@ -170,11 +179,6 @@ enum {
170
179
  };
171
180
  static const int kMaximumTagLength = 5; // COPY_4_BYTE_OFFSET plus the actual offset.
172
181
 
173
- // Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits
174
- static const uint32 wordmask[] = {
175
- 0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu
176
- };
177
-
178
182
  // Data stored per entry in lookup table:
179
183
  // Range Bits-used Description
180
184
  // ------------------------------------
@@ -33,7 +33,7 @@
33
33
 
34
34
  namespace snappy {
35
35
 
36
- void Varint::Append32(string* s, uint32 value) {
36
+ void Varint::Append32(std::string* s, uint32 value) {
37
37
  char buf[Varint::kMax32];
38
38
  const char* p = Varint::Encode32(buf, value);
39
39
  s->append(buf, p - buf);
@@ -45,6 +45,26 @@
45
45
  #include <sys/mman.h>
46
46
  #endif
47
47
 
48
+ #ifdef HAVE_UNISTD_H
49
+ #include <unistd.h>
50
+ #endif
51
+
52
+ #if defined(_MSC_VER)
53
+ #include <intrin.h>
54
+ #endif // defined(_MSC_VER)
55
+
56
+ #ifndef __has_feature
57
+ #define __has_feature(x) 0
58
+ #endif
59
+
60
+ #if __has_feature(memory_sanitizer)
61
+ #include <sanitizer/msan_interface.h>
62
+ #define SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
63
+ __msan_unpoison((address), (size))
64
+ #else
65
+ #define SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) /* empty */
66
+ #endif // __has_feature(memory_sanitizer)
67
+
48
68
  #include "snappy-stubs-public.h"
49
69
 
50
70
  #if defined(__x86_64__)
@@ -52,6 +72,14 @@
52
72
  // Enable 64-bit optimized versions of some routines.
53
73
  #define ARCH_K8 1
54
74
 
75
+ #elif defined(__ppc64__)
76
+
77
+ #define ARCH_PPC 1
78
+
79
+ #elif defined(__aarch64__)
80
+
81
+ #define ARCH_ARM 1
82
+
55
83
  #endif
56
84
 
57
85
  // Needed by OS X, among others.
@@ -59,10 +87,6 @@
59
87
  #define MAP_ANONYMOUS MAP_ANON
60
88
  #endif
61
89
 
62
- // Pull in std::min, std::ostream, and the likes. This is safe because this
63
- // header file is never used from any public header files.
64
- using namespace std;
65
-
66
90
  // The size of an array, if known at compile-time.
67
91
  // Will give unexpected results if used on a pointer.
68
92
  // We undefine it first, since some compilers already have a definition.
@@ -73,11 +97,11 @@ using namespace std;
73
97
 
74
98
  // Static prediction hints.
75
99
  #ifdef HAVE_BUILTIN_EXPECT
76
- #define PREDICT_FALSE(x) (__builtin_expect(x, 0))
77
- #define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
100
+ #define SNAPPY_PREDICT_FALSE(x) (__builtin_expect(x, 0))
101
+ #define SNAPPY_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
78
102
  #else
79
- #define PREDICT_FALSE(x) x
80
- #define PREDICT_TRUE(x) x
103
+ #define SNAPPY_PREDICT_FALSE(x) x
104
+ #define SNAPPY_PREDICT_TRUE(x) x
81
105
  #endif
82
106
 
83
107
  // This is only used for recomputing the tag byte table used during
@@ -96,9 +120,10 @@ static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
96
120
 
97
121
  // Potentially unaligned loads and stores.
98
122
 
99
- // x86 and PowerPC can simply do these loads and stores native.
123
+ // x86, PowerPC, and ARM64 can simply do these loads and stores native.
100
124
 
101
- #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
125
+ #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
126
+ defined(__aarch64__)
102
127
 
103
128
  #define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
104
129
  #define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
@@ -174,7 +199,7 @@ struct Unaligned32Struct {
174
199
  ((reinterpret_cast< ::snappy::base::internal::Unaligned32Struct *>(_p))->value = \
175
200
  (_val))
176
201
 
177
- // TODO(user): NEON supports unaligned 64-bit loads and stores.
202
+ // TODO: NEON supports unaligned 64-bit loads and stores.
178
203
  // See if that would be more efficient on platforms supporting it,
179
204
  // at least for copies.
180
205
 
@@ -225,22 +250,8 @@ inline void UNALIGNED_STORE64(void *p, uint64 v) {
225
250
 
226
251
  #endif
227
252
 
228
- // This can be more efficient than UNALIGNED_LOAD64 + UNALIGNED_STORE64
229
- // on some platforms, in particular ARM.
230
- inline void UnalignedCopy64(const void *src, void *dst) {
231
- if (sizeof(void *) == 8) {
232
- UNALIGNED_STORE64(dst, UNALIGNED_LOAD64(src));
233
- } else {
234
- const char *src_char = reinterpret_cast<const char *>(src);
235
- char *dst_char = reinterpret_cast<char *>(dst);
236
-
237
- UNALIGNED_STORE32(dst_char, UNALIGNED_LOAD32(src_char));
238
- UNALIGNED_STORE32(dst_char + 4, UNALIGNED_LOAD32(src_char + 4));
239
- }
240
- }
241
-
242
253
  // The following guarantees declaration of the byte swap functions.
243
- #ifdef WORDS_BIGENDIAN
254
+ #if defined(SNAPPY_IS_BIG_ENDIAN)
244
255
 
245
256
  #ifdef HAVE_SYS_BYTEORDER_H
246
257
  #include <sys/byteorder.h>
@@ -297,7 +308,7 @@ inline uint64 bswap_64(uint64 x) {
297
308
 
298
309
  #endif
299
310
 
300
- #endif // WORDS_BIGENDIAN
311
+ #endif // defined(SNAPPY_IS_BIG_ENDIAN)
301
312
 
302
313
  // Convert to little-endian storage, opposite of network format.
303
314
  // Convert x from host to little endian: x = LittleEndian.FromHost(x);
@@ -311,7 +322,7 @@ inline uint64 bswap_64(uint64 x) {
311
322
  class LittleEndian {
312
323
  public:
313
324
  // Conversion functions.
314
- #ifdef WORDS_BIGENDIAN
325
+ #if defined(SNAPPY_IS_BIG_ENDIAN)
315
326
 
316
327
  static uint16 FromHost16(uint16 x) { return bswap_16(x); }
317
328
  static uint16 ToHost16(uint16 x) { return bswap_16(x); }
@@ -321,7 +332,7 @@ class LittleEndian {
321
332
 
322
333
  static bool IsLittleEndian() { return false; }
323
334
 
324
- #else // !defined(WORDS_BIGENDIAN)
335
+ #else // !defined(SNAPPY_IS_BIG_ENDIAN)
325
336
 
326
337
  static uint16 FromHost16(uint16 x) { return x; }
327
338
  static uint16 ToHost16(uint16 x) { return x; }
@@ -331,7 +342,7 @@ class LittleEndian {
331
342
 
332
343
  static bool IsLittleEndian() { return true; }
333
344
 
334
- #endif // !defined(WORDS_BIGENDIAN)
345
+ #endif // !defined(SNAPPY_IS_BIG_ENDIAN)
335
346
 
336
347
  // Functions to do unaligned loads and stores in little-endian order.
337
348
  static uint16 Load16(const void *p) {
@@ -354,6 +365,9 @@ class LittleEndian {
354
365
  // Some bit-manipulation functions.
355
366
  class Bits {
356
367
  public:
368
+ // Return floor(log2(n)) for positive integer n.
369
+ static int Log2FloorNonZero(uint32 n);
370
+
357
371
  // Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0.
358
372
  static int Log2Floor(uint32 n);
359
373
 
@@ -361,31 +375,85 @@ class Bits {
361
375
  // undefined value if n == 0. FindLSBSetNonZero() is similar to ffs() except
362
376
  // that it's 0-indexed.
363
377
  static int FindLSBSetNonZero(uint32 n);
378
+
379
+ #if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
364
380
  static int FindLSBSetNonZero64(uint64 n);
381
+ #endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
365
382
 
366
383
  private:
367
- DISALLOW_COPY_AND_ASSIGN(Bits);
384
+ // No copying
385
+ Bits(const Bits&);
386
+ void operator=(const Bits&);
368
387
  };
369
388
 
370
389
  #ifdef HAVE_BUILTIN_CTZ
371
390
 
391
+ inline int Bits::Log2FloorNonZero(uint32 n) {
392
+ assert(n != 0);
393
+ // (31 ^ x) is equivalent to (31 - x) for x in [0, 31]. An easy proof
394
+ // represents subtraction in base 2 and observes that there's no carry.
395
+ //
396
+ // GCC and Clang represent __builtin_clz on x86 as 31 ^ _bit_scan_reverse(x).
397
+ // Using "31 ^" here instead of "31 -" allows the optimizer to strip the
398
+ // function body down to _bit_scan_reverse(x).
399
+ return 31 ^ __builtin_clz(n);
400
+ }
401
+
372
402
  inline int Bits::Log2Floor(uint32 n) {
373
- return n == 0 ? -1 : 31 ^ __builtin_clz(n);
403
+ return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
374
404
  }
375
405
 
376
406
  inline int Bits::FindLSBSetNonZero(uint32 n) {
407
+ assert(n != 0);
377
408
  return __builtin_ctz(n);
378
409
  }
379
410
 
411
+ #if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
380
412
  inline int Bits::FindLSBSetNonZero64(uint64 n) {
413
+ assert(n != 0);
381
414
  return __builtin_ctzll(n);
382
415
  }
416
+ #endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
383
417
 
384
- #else // Portable versions.
418
+ #elif defined(_MSC_VER)
419
+
420
+ inline int Bits::Log2FloorNonZero(uint32 n) {
421
+ assert(n != 0);
422
+ unsigned long where;
423
+ _BitScanReverse(&where, n);
424
+ return static_cast<int>(where);
425
+ }
385
426
 
386
427
  inline int Bits::Log2Floor(uint32 n) {
387
- if (n == 0)
388
- return -1;
428
+ unsigned long where;
429
+ if (_BitScanReverse(&where, n))
430
+ return static_cast<int>(where);
431
+ return -1;
432
+ }
433
+
434
+ inline int Bits::FindLSBSetNonZero(uint32 n) {
435
+ assert(n != 0);
436
+ unsigned long where;
437
+ if (_BitScanForward(&where, n))
438
+ return static_cast<int>(where);
439
+ return 32;
440
+ }
441
+
442
+ #if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
443
+ inline int Bits::FindLSBSetNonZero64(uint64 n) {
444
+ assert(n != 0);
445
+ unsigned long where;
446
+ if (_BitScanForward64(&where, n))
447
+ return static_cast<int>(where);
448
+ return 64;
449
+ }
450
+ #endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
451
+
452
+ #else // Portable versions.
453
+
454
+ inline int Bits::Log2FloorNonZero(uint32 n) {
455
+ assert(n != 0);
456
+
389
457
  int log = 0;
390
458
  uint32 value = n;
391
459
  for (int i = 4; i >= 0; --i) {
@@ -400,7 +468,13 @@ inline int Bits::Log2Floor(uint32 n) {
400
468
  return log;
401
469
  }
402
470
 
471
+ inline int Bits::Log2Floor(uint32 n) {
472
+ return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
473
+ }
474
+
403
475
  inline int Bits::FindLSBSetNonZero(uint32 n) {
476
+ assert(n != 0);
477
+
404
478
  int rc = 31;
405
479
  for (int i = 4, shift = 1 << 4; i >= 0; --i) {
406
480
  const uint32 x = n << shift;
@@ -413,8 +487,11 @@ inline int Bits::FindLSBSetNonZero(uint32 n) {
413
487
  return rc;
414
488
  }
415
489
 
490
+ #if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
416
491
  // FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
417
492
  inline int Bits::FindLSBSetNonZero64(uint64 n) {
493
+ assert(n != 0);
494
+
418
495
  const uint32 bottombits = static_cast<uint32>(n);
419
496
  if (bottombits == 0) {
420
497
  // Bottom bits are zero, so scan in top bits
@@ -423,6 +500,7 @@ inline int Bits::FindLSBSetNonZero64(uint64 n) {
423
500
  return FindLSBSetNonZero(bottombits);
424
501
  }
425
502
  }
503
+ #endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
426
504
 
427
505
  #endif // End portable versions.
428
506
 
@@ -446,7 +524,7 @@ class Varint {
446
524
  static char* Encode32(char* ptr, uint32 v);
447
525
 
448
526
  // EFFECTS Appends the varint representation of "value" to "*s".
449
- static void Append32(string* s, uint32 value);
527
+ static void Append32(std::string* s, uint32 value);
450
528
  };
451
529
 
452
530
  inline const char* Varint::Parse32WithLimit(const char* p,
@@ -503,7 +581,7 @@ inline char* Varint::Encode32(char* sptr, uint32 v) {
503
581
  // replace this function with one that resizes the string without
504
582
  // filling the new space with zeros (if applicable) --
505
583
  // it will be non-portable but faster.
506
- inline void STLStringResizeUninitialized(string* s, size_t new_size) {
584
+ inline void STLStringResizeUninitialized(std::string* s, size_t new_size) {
507
585
  s->resize(new_size);
508
586
  }
509
587
 
@@ -519,7 +597,7 @@ inline void STLStringResizeUninitialized(string* s, size_t new_size) {
519
597
  // (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-defects.html#530)
520
598
  // proposes this as the method. It will officially be part of the standard
521
599
  // for C++0x. This should already work on all current implementations.
522
- inline char* string_as_array(string* str) {
600
+ inline char* string_as_array(std::string* str) {
523
601
  return str->empty() ? NULL : &*str->begin();
524
602
  }
525
603
 
@@ -1,5 +1,4 @@
1
1
  // Copyright 2011 Google Inc. All Rights Reserved.
2
- // Author: sesse@google.com (Steinar H. Gunderson)
3
2
  //
4
3
  // Redistribution and use in source and binary forms, with or without
5
4
  // modification, are permitted provided that the following conditions are
@@ -36,64 +35,39 @@
36
35
  #ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
37
36
  #define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
38
37
 
39
- #if @ac_cv_have_stdint_h@
40
- #include <stdint.h>
41
- #endif
42
-
43
- #if @ac_cv_have_stddef_h@
44
- #include <stddef.h>
45
- #endif
38
+ #include <cstddef>
39
+ #include <cstdint>
40
+ #include <string>
46
41
 
47
- #if @ac_cv_have_sys_uio_h@
42
+ #if ${HAVE_SYS_UIO_H_01} // HAVE_SYS_UIO_H
48
43
  #include <sys/uio.h>
49
- #endif
44
+ #endif // HAVE_SYS_UIO_H
50
45
 
51
- #define SNAPPY_MAJOR @SNAPPY_MAJOR@
52
- #define SNAPPY_MINOR @SNAPPY_MINOR@
53
- #define SNAPPY_PATCHLEVEL @SNAPPY_PATCHLEVEL@
46
+ #define SNAPPY_MAJOR ${PROJECT_VERSION_MAJOR}
47
+ #define SNAPPY_MINOR ${PROJECT_VERSION_MINOR}
48
+ #define SNAPPY_PATCHLEVEL ${PROJECT_VERSION_PATCH}
54
49
  #define SNAPPY_VERSION \
55
50
  ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
56
51
 
57
- #include <string>
58
-
59
52
  namespace snappy {
60
53
 
61
- #if @ac_cv_have_stdint_h@
62
- typedef int8_t int8;
63
- typedef uint8_t uint8;
64
- typedef int16_t int16;
65
- typedef uint16_t uint16;
66
- typedef int32_t int32;
67
- typedef uint32_t uint32;
68
- typedef int64_t int64;
69
- typedef uint64_t uint64;
70
- #else
71
- typedef signed char int8;
72
- typedef unsigned char uint8;
73
- typedef short int16;
74
- typedef unsigned short uint16;
75
- typedef int int32;
76
- typedef unsigned int uint32;
77
- typedef long long int64;
78
- typedef unsigned long long uint64;
79
- #endif
80
-
81
- typedef std::string string;
82
-
83
- #ifndef DISALLOW_COPY_AND_ASSIGN
84
- #define DISALLOW_COPY_AND_ASSIGN(TypeName) \
85
- TypeName(const TypeName&); \
86
- void operator=(const TypeName&)
87
- #endif
54
+ using int8 = std::int8_t;
55
+ using uint8 = std::uint8_t;
56
+ using int16 = std::int16_t;
57
+ using uint16 = std::uint16_t;
58
+ using int32 = std::int32_t;
59
+ using uint32 = std::uint32_t;
60
+ using int64 = std::int64_t;
61
+ using uint64 = std::uint64_t;
88
62
 
89
- #if !@ac_cv_have_sys_uio_h@
63
+ #if !${HAVE_SYS_UIO_H_01} // !HAVE_SYS_UIO_H
90
64
  // Windows does not have an iovec type, yet the concept is universally useful.
91
65
  // It is simple to define it ourselves, so we put it inside our own namespace.
92
66
  struct iovec {
93
- void* iov_base;
94
- size_t iov_len;
67
+ void* iov_base;
68
+ size_t iov_len;
95
69
  };
96
- #endif
70
+ #endif // !HAVE_SYS_UIO_H
97
71
 
98
72
  } // namespace snappy
99
73
 
@@ -33,6 +33,9 @@
33
33
  #endif
34
34
 
35
35
  #ifdef HAVE_WINDOWS_H
36
+ // Needed to be able to use std::max without workarounds in the source code.
37
+ // https://support.microsoft.com/en-us/help/143208/prb-using-stl-in-windows-program-can-cause-min-max-conflicts
38
+ #define NOMINMAX
36
39
  #include <windows.h>
37
40
  #endif
38
41
 
@@ -45,12 +48,12 @@ DEFINE_bool(run_microbenchmarks, true,
45
48
 
46
49
  namespace snappy {
47
50
 
48
- string ReadTestDataFile(const string& base, size_t size_limit) {
49
- string contents;
51
+ std::string ReadTestDataFile(const std::string& base, size_t size_limit) {
52
+ std::string contents;
50
53
  const char* srcdir = getenv("srcdir"); // This is set by Automake.
51
- string prefix;
54
+ std::string prefix;
52
55
  if (srcdir) {
53
- prefix = string(srcdir) + "/";
56
+ prefix = std::string(srcdir) + "/";
54
57
  }
55
58
  file::GetContents(prefix + "testdata/" + base, &contents, file::Defaults()
56
59
  ).CheckSuccess();
@@ -60,11 +63,11 @@ string ReadTestDataFile(const string& base, size_t size_limit) {
60
63
  return contents;
61
64
  }
62
65
 
63
- string ReadTestDataFile(const string& base) {
66
+ std::string ReadTestDataFile(const std::string& base) {
64
67
  return ReadTestDataFile(base, 0);
65
68
  }
66
69
 
67
- string StringPrintf(const char* format, ...) {
70
+ std::string StrFormat(const char* format, ...) {
68
71
  char buf[4096];
69
72
  va_list ap;
70
73
  va_start(ap, format);
@@ -76,7 +79,7 @@ string StringPrintf(const char* format, ...) {
76
79
  bool benchmark_running = false;
77
80
  int64 benchmark_real_time_us = 0;
78
81
  int64 benchmark_cpu_time_us = 0;
79
- string *benchmark_label = NULL;
82
+ std::string* benchmark_label = nullptr;
80
83
  int64 benchmark_bytes_processed = 0;
81
84
 
82
85
  void ResetBenchmarkTiming() {
@@ -160,11 +163,11 @@ void StopBenchmarkTiming() {
160
163
  benchmark_running = false;
161
164
  }
162
165
 
163
- void SetBenchmarkLabel(const string& str) {
166
+ void SetBenchmarkLabel(const std::string& str) {
164
167
  if (benchmark_label) {
165
168
  delete benchmark_label;
166
169
  }
167
- benchmark_label = new string(str);
170
+ benchmark_label = new std::string(str);
168
171
  }
169
172
 
170
173
  void SetBenchmarkBytesProcessed(int64 bytes) {
@@ -201,7 +204,7 @@ void Benchmark::Run() {
201
204
  if (benchmark_real_time_us > 0) {
202
205
  num_iterations = 200000 * kCalibrateIterations / benchmark_real_time_us;
203
206
  }
204
- num_iterations = max(num_iterations, kCalibrateIterations);
207
+ num_iterations = std::max(num_iterations, kCalibrateIterations);
205
208
  BenchmarkRun benchmark_runs[kNumRuns];
206
209
 
207
210
  for (int run = 0; run < kNumRuns; ++run) {
@@ -214,13 +217,13 @@ void Benchmark::Run() {
214
217
  benchmark_runs[run].cpu_time_us = benchmark_cpu_time_us;
215
218
  }
216
219
 
217
- string heading = StringPrintf("%s/%d", name_.c_str(), test_case_num);
218
- string human_readable_speed;
220
+ std::string heading = StrFormat("%s/%d", name_.c_str(), test_case_num);
221
+ std::string human_readable_speed;
219
222
 
220
- nth_element(benchmark_runs,
221
- benchmark_runs + kMedianPos,
222
- benchmark_runs + kNumRuns,
223
- BenchmarkCompareCPUTime());
223
+ std::nth_element(benchmark_runs,
224
+ benchmark_runs + kMedianPos,
225
+ benchmark_runs + kNumRuns,
226
+ BenchmarkCompareCPUTime());
224
227
  int64 real_time_us = benchmark_runs[kMedianPos].real_time_us;
225
228
  int64 cpu_time_us = benchmark_runs[kMedianPos].cpu_time_us;
226
229
  if (cpu_time_us <= 0) {
@@ -229,15 +232,16 @@ void Benchmark::Run() {
229
232
  int64 bytes_per_second =
230
233
  benchmark_bytes_processed * 1000000 / cpu_time_us;
231
234
  if (bytes_per_second < 1024) {
232
- human_readable_speed = StringPrintf("%dB/s", bytes_per_second);
235
+ human_readable_speed =
236
+ StrFormat("%dB/s", static_cast<int>(bytes_per_second));
233
237
  } else if (bytes_per_second < 1024 * 1024) {
234
- human_readable_speed = StringPrintf(
238
+ human_readable_speed = StrFormat(
235
239
  "%.1fkB/s", bytes_per_second / 1024.0f);
236
240
  } else if (bytes_per_second < 1024 * 1024 * 1024) {
237
- human_readable_speed = StringPrintf(
241
+ human_readable_speed = StrFormat(
238
242
  "%.1fMB/s", bytes_per_second / (1024.0f * 1024.0f));
239
243
  } else {
240
- human_readable_speed = StringPrintf(
244
+ human_readable_speed = StrFormat(
241
245
  "%.1fGB/s", bytes_per_second / (1024.0f * 1024.0f * 1024.0f));
242
246
  }
243
247
  }
@@ -523,8 +527,8 @@ int ZLib::UncompressAtMostOrAll(Bytef *dest, uLongf *destLen,
523
527
  LOG(WARNING)
524
528
  << "UncompressChunkOrAll: Received some extra data, bytes total: "
525
529
  << uncomp_stream_.avail_in << " bytes: "
526
- << string(reinterpret_cast<const char *>(uncomp_stream_.next_in),
527
- min(int(uncomp_stream_.avail_in), 20));
530
+ << std::string(reinterpret_cast<const char *>(uncomp_stream_.next_in),
531
+ std::min(int(uncomp_stream_.avail_in), 20));
528
532
  UncompressErrorInit();
529
533
  return Z_DATA_ERROR; // what's the extra data for?
530
534
  } else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) {