snappy_ext 0.1.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (39) hide show
  1. data/ext/snappy/extconf.rb +36 -0
  2. data/ext/snappy/snappy_ext.cc +131 -0
  3. data/ext/snappy/vendor/snappy-1.0.0/AUTHORS +1 -0
  4. data/ext/snappy/vendor/snappy-1.0.0/COPYING +28 -0
  5. data/ext/snappy/vendor/snappy-1.0.0/ChangeLog +3 -0
  6. data/ext/snappy/vendor/snappy-1.0.0/INSTALL +230 -0
  7. data/ext/snappy/vendor/snappy-1.0.0/Makefile.am +24 -0
  8. data/ext/snappy/vendor/snappy-1.0.0/Makefile.in +926 -0
  9. data/ext/snappy/vendor/snappy-1.0.0/NEWS +3 -0
  10. data/ext/snappy/vendor/snappy-1.0.0/README +132 -0
  11. data/ext/snappy/vendor/snappy-1.0.0/aclocal.m4 +9076 -0
  12. data/ext/snappy/vendor/snappy-1.0.0/autogen.sh +8 -0
  13. data/ext/snappy/vendor/snappy-1.0.0/compile +99 -0
  14. data/ext/snappy/vendor/snappy-1.0.0/config.guess +1466 -0
  15. data/ext/snappy/vendor/snappy-1.0.0/config.h.in +107 -0
  16. data/ext/snappy/vendor/snappy-1.0.0/config.sub +1579 -0
  17. data/ext/snappy/vendor/snappy-1.0.0/configure +17962 -0
  18. data/ext/snappy/vendor/snappy-1.0.0/configure.ac +99 -0
  19. data/ext/snappy/vendor/snappy-1.0.0/depcomp +530 -0
  20. data/ext/snappy/vendor/snappy-1.0.0/install-sh +323 -0
  21. data/ext/snappy/vendor/snappy-1.0.0/ltmain.sh +8413 -0
  22. data/ext/snappy/vendor/snappy-1.0.0/m4/gtest.m4 +74 -0
  23. data/ext/snappy/vendor/snappy-1.0.0/missing +360 -0
  24. data/ext/snappy/vendor/snappy-1.0.0/mkinstalldirs +158 -0
  25. data/ext/snappy/vendor/snappy-1.0.0/snappy-internal.h +136 -0
  26. data/ext/snappy/vendor/snappy-1.0.0/snappy-sinksource.cc +46 -0
  27. data/ext/snappy/vendor/snappy-1.0.0/snappy-sinksource.h +110 -0
  28. data/ext/snappy/vendor/snappy-1.0.0/snappy-stubs-internal.cc +28 -0
  29. data/ext/snappy/vendor/snappy-1.0.0/snappy-stubs-internal.h +457 -0
  30. data/ext/snappy/vendor/snappy-1.0.0/snappy-stubs-public.h +59 -0
  31. data/ext/snappy/vendor/snappy-1.0.0/snappy-stubs-public.h.in +59 -0
  32. data/ext/snappy/vendor/snappy-1.0.0/snappy-test.cc +523 -0
  33. data/ext/snappy/vendor/snappy-1.0.0/snappy-test.h +458 -0
  34. data/ext/snappy/vendor/snappy-1.0.0/snappy.cc +1001 -0
  35. data/ext/snappy/vendor/snappy-1.0.0/snappy.h +141 -0
  36. data/ext/snappy/vendor/snappy-1.0.0/snappy_unittest.cc +1073 -0
  37. data/ext/snappy/version.h +4 -0
  38. data/snappy_ext.gemspec +58 -0
  39. metadata +99 -0
@@ -0,0 +1,136 @@
1
+ // Copyright 2008 Google Inc. All Rights Reserved.
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // http://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+ //
15
+ // Internals shared between the snappy implementation and its unittest.
16
+
17
+ #ifndef UTIL_SNAPPY_SNAPPY_INTERNAL_H_
18
+ #define UTIL_SNAPPY_SNAPPY_INTERNAL_H_
19
+
20
+ #include "snappy-stubs-internal.h"
21
+
22
+ namespace snappy {
23
+ namespace internal {
24
+
25
+ class WorkingMemory {
26
+ public:
27
+ WorkingMemory() : large_table_(NULL) { }
28
+ ~WorkingMemory() { delete[] large_table_; }
29
+
30
+ // Allocates and clears a hash table using memory in "*this",
31
+ // stores the number of buckets in "*table_size" and returns a pointer to
32
+ // the base of the hash table.
33
+ uint16* GetHashTable(size_t input_size, int* table_size);
34
+
35
+ private:
36
+ uint16 small_table_[1<<10]; // 2KB
37
+ uint16* large_table_; // Allocated only when needed
38
+
39
+ DISALLOW_COPY_AND_ASSIGN(WorkingMemory);
40
+ };
41
+
42
+ // Flat array compression that does not emit the "uncompressed length"
43
+ // prefix. Compresses "input" string to the "*op" buffer.
44
+ //
45
+ // REQUIRES: "input_length <= kBlockSize"
46
+ // REQUIRES: "op" points to an array of memory that is at least
47
+ // "MaxCompressedLength(input_length)" in size.
48
+ // REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
49
+ // REQUIRES: "table_size" is a power of two
50
+ //
51
+ // Returns an "end" pointer into "op" buffer.
52
+ // "end - op" is the compressed size of "input".
53
+ char* CompressFragment(const char* input,
54
+ size_t input_length,
55
+ char* op,
56
+ uint16* table,
57
+ const int table_size);
58
+
59
+ // Return the largest n such that
60
+ //
61
+ // s1[0,n-1] == s2[0,n-1]
62
+ // and n <= (s2_limit - s2).
63
+ //
64
+ // Does not read *s2_limit or beyond.
65
+ // Does not read *(s1 + (s2_limit - s2)) or beyond.
66
+ // Requires that s2_limit >= s2.
67
+ //
68
+ // Separate implementation for x86_64, for speed. Uses the fact that
69
+ // x86_64 is little endian.
70
+ #if defined(ARCH_K8)
71
+ static inline int FindMatchLength(const char* s1,
72
+ const char* s2,
73
+ const char* s2_limit) {
74
+ DCHECK_GE(s2_limit, s2);
75
+ int matched = 0;
76
+
77
+ // Find out how long the match is. We loop over the data 64 bits at a
78
+ // time until we find a 64-bit block that doesn't match; then we find
79
+ // the first non-matching bit and use that to calculate the total
80
+ // length of the match.
81
+ while (PREDICT_TRUE(s2 <= s2_limit - 8)) {
82
+ if (PREDICT_FALSE(UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched))) {
83
+ s2 += 8;
84
+ matched += 8;
85
+ } else {
86
+ // On current (mid-2008) Opteron models there is a 3% more
87
+ // efficient code sequence to find the first non-matching byte.
88
+ // However, what follows is ~10% better on Intel Core 2 and newer,
89
+ // and we expect AMD's bsf instruction to improve.
90
+ uint64 x = UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 + matched);
91
+ int matching_bits = Bits::FindLSBSetNonZero64(x);
92
+ matched += matching_bits >> 3;
93
+ return matched;
94
+ }
95
+ }
96
+ while (PREDICT_TRUE(s2 < s2_limit)) {
97
+ if (PREDICT_TRUE(s1[matched] == *s2)) {
98
+ ++s2;
99
+ ++matched;
100
+ } else {
101
+ return matched;
102
+ }
103
+ }
104
+ return matched;
105
+ }
106
+ #else
107
+ static inline int FindMatchLength(const char* s1,
108
+ const char* s2,
109
+ const char* s2_limit) {
110
+ // Implementation based on the x86-64 version, above.
111
+ DCHECK_GE(s2_limit, s2);
112
+ int matched = 0;
113
+
114
+ while (s2 <= s2_limit - 4 &&
115
+ UNALIGNED_LOAD32(s2) == UNALIGNED_LOAD32(s1 + matched)) {
116
+ s2 += 4;
117
+ matched += 4;
118
+ }
119
+ if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 4) {
120
+ uint32 x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
121
+ int matching_bits = Bits::FindLSBSetNonZero(x);
122
+ matched += matching_bits >> 3;
123
+ } else {
124
+ while ((s2 < s2_limit) && (s1[matched] == *s2)) {
125
+ ++s2;
126
+ ++matched;
127
+ }
128
+ }
129
+ return matched;
130
+ }
131
+ #endif
132
+
133
+ } // end namespace internal
134
+ } // end namespace snappy
135
+
136
+ #endif // UTIL_SNAPPY_SNAPPY_INTERNAL_H_
@@ -0,0 +1,46 @@
1
+ // Copyright 2011 Google Inc. All Rights Reserved.
2
+
3
+ #include <string.h>
4
+
5
+ #include "snappy-sinksource.h"
6
+
7
+ namespace snappy {
8
+
9
+ Source::~Source() { }
10
+
11
+ Sink::~Sink() { }
12
+
13
+ char* Sink::GetAppendBuffer(size_t length, char* scratch) {
14
+ return scratch;
15
+ }
16
+
17
+ ByteArraySource::~ByteArraySource() { }
18
+
19
+ size_t ByteArraySource::Available() const { return left_; }
20
+
21
+ const char* ByteArraySource::Peek(size_t* len) {
22
+ *len = left_;
23
+ return ptr_;
24
+ }
25
+
26
+ void ByteArraySource::Skip(size_t n) {
27
+ left_ -= n;
28
+ ptr_ += n;
29
+ }
30
+
31
+ UncheckedByteArraySink::~UncheckedByteArraySink() { }
32
+
33
+ void UncheckedByteArraySink::Append(const char* data, size_t n) {
34
+ // Do no copying if the caller filled in the result of GetAppendBuffer()
35
+ if (data != dest_) {
36
+ memcpy(dest_, data, n);
37
+ }
38
+ dest_ += n;
39
+ }
40
+
41
+ char* UncheckedByteArraySink::GetAppendBuffer(size_t len, char* scratch) {
42
+ return dest_;
43
+ }
44
+
45
+
46
+ }
@@ -0,0 +1,110 @@
1
+ // Copyright 2011 Google Inc. All Rights Reserved.
2
+
3
+ #ifndef UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
4
+ #define UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
5
+
6
+ #include <stddef.h>
7
+
8
+
9
+ namespace snappy {
10
+
11
+ // A Sink is an interface that consumes a sequence of bytes.
12
+ class Sink {
13
+ public:
14
+ Sink() { }
15
+ virtual ~Sink();
16
+
17
+ // Append "bytes[0,n-1]" to this.
18
+ virtual void Append(const char* bytes, size_t n) = 0;
19
+
20
+ // Returns a writable buffer of the specified length for appending.
21
+ // May return a pointer to the caller-owned scratch buffer which
22
+ // must have at least the indicated length. The returned buffer is
23
+ // only valid until the next operation on this Sink.
24
+ //
25
+ // After writing at most "length" bytes, call Append() with the
26
+ // pointer returned from this function and the number of bytes
27
+ // written. Many Append() implementations will avoid copying
28
+ // bytes if this function returned an internal buffer.
29
+ //
30
+ // If a non-scratch buffer is returned, the caller may only pass a
31
+ // prefix of it to Append(). That is, it is not correct to pass an
32
+ // interior pointer of the returned array to Append().
33
+ //
34
+ // The default implementation always returns the scratch buffer.
35
+ virtual char* GetAppendBuffer(size_t length, char* scratch);
36
+
37
+ private:
38
+ // No copying
39
+ Sink(const Sink&);
40
+ void operator=(const Sink&);
41
+ };
42
+
43
+ // A Source is an interface that yields a sequence of bytes
44
+ class Source {
45
+ public:
46
+ Source() { }
47
+ virtual ~Source();
48
+
49
+ // Return the number of bytes left to read from the source
50
+ virtual size_t Available() const = 0;
51
+
52
+ // Peek at the next flat region of the source. Does not reposition
53
+ // the source. The returned region is empty iff Available()==0.
54
+ //
55
+ // Returns a pointer to the beginning of the region and store its
56
+ // length in *len.
57
+ //
58
+ // The returned region is valid until the next call to Skip() or
59
+ // until this object is destroyed, whichever occurs first.
60
+ //
61
+ // The returned region may be larger than Available() (for example
62
+ // if this ByteSource is a view on a substring of a larger source).
63
+ // The caller is responsible for ensuring that it only reads the
64
+ // Available() bytes.
65
+ virtual const char* Peek(size_t* len) = 0;
66
+
67
+ // Skip the next n bytes. Invalidates any buffer returned by
68
+ // a previous call to Peek().
69
+ // REQUIRES: Available() >= n
70
+ virtual void Skip(size_t n) = 0;
71
+
72
+ private:
73
+ // No copying
74
+ Source(const Source&);
75
+ void operator=(const Source&);
76
+ };
77
+
78
+ // A Source implementation that yields the contents of a flat array
79
+ class ByteArraySource : public Source {
80
+ public:
81
+ ByteArraySource(const char* p, size_t n) : ptr_(p), left_(n) { }
82
+ virtual ~ByteArraySource();
83
+ virtual size_t Available() const;
84
+ virtual const char* Peek(size_t* len);
85
+ virtual void Skip(size_t n);
86
+ private:
87
+ const char* ptr_;
88
+ size_t left_;
89
+ };
90
+
91
+ // A Sink implementation that writes to a flat array without any bound checks.
92
+ class UncheckedByteArraySink : public Sink {
93
+ public:
94
+ explicit UncheckedByteArraySink(char* dest) : dest_(dest) { }
95
+ virtual ~UncheckedByteArraySink();
96
+ virtual void Append(const char* data, size_t n);
97
+ virtual char* GetAppendBuffer(size_t len, char* scratch);
98
+
99
+ // Return the current output pointer so that a caller can see how
100
+ // many bytes were produced.
101
+ // Note: this is not a Sink method.
102
+ char* CurrentDestination() const { return dest_; }
103
+ private:
104
+ char* dest_;
105
+ };
106
+
107
+
108
+ }
109
+
110
+ #endif // UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
@@ -0,0 +1,28 @@
1
+ // Copyright 2011 Google Inc. All Rights Reserved.
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // http://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+
15
+ #include <algorithm>
16
+ #include <string>
17
+
18
+ #include "snappy-stubs-internal.h"
19
+
20
+ namespace snappy {
21
+
22
+ void Varint::Append32(string* s, uint32 value) {
23
+ char buf[Varint::kMax32];
24
+ const char* p = Varint::Encode32(buf, value);
25
+ s->append(buf, p - buf);
26
+ }
27
+
28
+ } // namespace snappy
@@ -0,0 +1,457 @@
1
+ // Copyright 2011 Google Inc. All Rights Reserved.
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // http://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+ //
15
+ // Various stubs for the open-source version of Snappy.
16
+
17
+ #ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
18
+ #define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
19
+
20
+ #include <iostream>
21
+ #include <string>
22
+
23
+ #include <assert.h>
24
+ #include <stdlib.h>
25
+ #include <string.h>
26
+ #include <sys/mman.h>
27
+
28
+ #include "config.h"
29
+ #include "snappy-stubs-public.h"
30
+
31
+ #if defined(__x86_64__)
32
+
33
+ // Enable 64-bit optimized versions of some routines.
34
+ #define ARCH_K8 1
35
+
36
+ #endif
37
+
38
+ // Needed by OS X, among others.
39
+ #ifndef MAP_ANONYMOUS
40
+ #define MAP_ANONYMOUS MAP_ANON
41
+ #endif
42
+
43
+ // Pull in std::min, std::ostream, and the likes. This is safe because this
44
+ // header file is never used from any public header files.
45
+ using namespace std;
46
+
47
+ // The size of an array, if known at compile-time.
48
+ // Will give unexpected results if used on a pointer.
49
+ // We undefine it first, since some compilers already have a definition.
50
+ #ifdef ARRAYSIZE
51
+ #undef ARRAYSIZE
52
+ #endif
53
+ #define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a)))
54
+
55
+ // Static prediction hints.
56
+ #ifdef HAVE_BUILTIN_EXPECT
57
+ #define PREDICT_FALSE(x) (__builtin_expect(x, 0))
58
+ #define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
59
+ #else
60
+ #define PREDICT_FALSE(x) x
61
+ #define PREDICT_TRUE(x) x
62
+ #endif
63
+
64
+ // This is only used for recomputing the tag byte table used during
65
+ // decompression; for simplicity we just remove it from the open-source
66
+ // version (anyone who wants to regenerate it can just do the call
67
+ // themselves within main()).
68
+ #define DEFINE_bool(flag_name, default_value, description) \
69
+ bool FLAGS_ ## flag_name = default_value;
70
+ #define DECLARE_bool(flag_name) \
71
+ extern bool FLAGS_ ## flag_name;
72
+ #define REGISTER_MODULE_INITIALIZER(name, code)
73
+
74
+ namespace snappy {
75
+
76
+ static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF);
77
+ static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
78
+
79
+ // Logging.
80
+
81
+ #define LOG(level) LogMessage()
82
+ #define VLOG(level) true ? (void)0 : \
83
+ snappy::LogMessageVoidify() & snappy::LogMessage()
84
+
85
+ class LogMessage {
86
+ public:
87
+ LogMessage() { }
88
+ ~LogMessage() {
89
+ cerr << endl;
90
+ }
91
+
92
+ LogMessage& operator<<(const std::string& msg) {
93
+ cerr << msg;
94
+ return *this;
95
+ }
96
+ LogMessage& operator<<(int x) {
97
+ cerr << x;
98
+ return *this;
99
+ }
100
+ };
101
+
102
+ // Asserts, both versions activated in debug mode only,
103
+ // and ones that are always active.
104
+
105
+ #define CRASH_UNLESS(condition) \
106
+ PREDICT_TRUE(condition) ? (void)0 : \
107
+ snappy::LogMessageVoidify() & snappy::LogMessageCrash()
108
+
109
+ class LogMessageCrash : public LogMessage {
110
+ public:
111
+ LogMessageCrash() { }
112
+ ~LogMessageCrash() {
113
+ cerr << endl;
114
+ abort();
115
+ }
116
+ };
117
+
118
+ // This class is used to explicitly ignore values in the conditional
119
+ // logging macros. This avoids compiler warnings like "value computed
120
+ // is not used" and "statement has no effect".
121
+
122
+ class LogMessageVoidify {
123
+ public:
124
+ LogMessageVoidify() { }
125
+ // This has to be an operator with a precedence lower than << but
126
+ // higher than ?:
127
+ void operator&(const LogMessage&) { }
128
+ };
129
+
130
+ #define CHECK(cond) CRASH_UNLESS(cond)
131
+ #define CHECK_LE(a, b) CRASH_UNLESS((a) <= (b))
132
+ #define CHECK_GE(a, b) CRASH_UNLESS((a) >= (b))
133
+ #define CHECK_EQ(a, b) CRASH_UNLESS((a) == (b))
134
+ #define CHECK_NE(a, b) CRASH_UNLESS((a) != (b))
135
+ #define CHECK_LT(a, b) CRASH_UNLESS((a) < (b))
136
+ #define CHECK_GT(a, b) CRASH_UNLESS((a) > (b))
137
+
138
+ #ifdef NDEBUG
139
+
140
+ #define DCHECK(cond) CRASH_UNLESS(true)
141
+ #define DCHECK_LE(a, b) CRASH_UNLESS(true)
142
+ #define DCHECK_GE(a, b) CRASH_UNLESS(true)
143
+ #define DCHECK_EQ(a, b) CRASH_UNLESS(true)
144
+ #define DCHECK_NE(a, b) CRASH_UNLESS(true)
145
+ #define DCHECK_LT(a, b) CRASH_UNLESS(true)
146
+ #define DCHECK_GT(a, b) CRASH_UNLESS(true)
147
+
148
+ #else
149
+
150
+ #define DCHECK(cond) CHECK(cond)
151
+ #define DCHECK_LE(a, b) CHECK_LE(a, b)
152
+ #define DCHECK_GE(a, b) CHECK_GE(a, b)
153
+ #define DCHECK_EQ(a, b) CHECK_EQ(a, b)
154
+ #define DCHECK_NE(a, b) CHECK_NE(a, b)
155
+ #define DCHECK_LT(a, b) CHECK_LT(a, b)
156
+ #define DCHECK_GT(a, b) CHECK_GT(a, b)
157
+
158
+ #endif
159
+
160
+ // Potentially unaligned loads and stores.
161
+
162
+ #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
163
+
164
+ #define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
165
+ #define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
166
+ #define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
167
+
168
+ #define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
169
+ #define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
170
+ #define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
171
+
172
+ #else
173
+
174
+ // These functions are provided for architectures that don't support
175
+ // unaligned loads and stores.
176
+
177
+ inline uint16 UNALIGNED_LOAD16(const void *p) {
178
+ uint16 t;
179
+ memcpy(&t, p, sizeof t);
180
+ return t;
181
+ }
182
+
183
+ inline uint32 UNALIGNED_LOAD32(const void *p) {
184
+ uint32 t;
185
+ memcpy(&t, p, sizeof t);
186
+ return t;
187
+ }
188
+
189
+ inline uint64 UNALIGNED_LOAD64(const void *p) {
190
+ uint64 t;
191
+ memcpy(&t, p, sizeof t);
192
+ return t;
193
+ }
194
+
195
+ inline void UNALIGNED_STORE16(void *p, uint16 v) {
196
+ memcpy(p, &v, sizeof v);
197
+ }
198
+
199
+ inline void UNALIGNED_STORE32(void *p, uint32 v) {
200
+ memcpy(p, &v, sizeof v);
201
+ }
202
+
203
+ inline void UNALIGNED_STORE64(void *p, uint64 v) {
204
+ memcpy(p, &v, sizeof v);
205
+ }
206
+
207
+ #endif
208
+
209
+ // The following guarantees declaration of the byte swap functions.
210
+ #ifdef WORDS_BIGENDIAN
211
+
212
+ #ifdef _MSC_VER
213
+ #include <stdlib.h>
214
+ #define bswap_16(x) _byteswap_ushort(x)
215
+ #define bswap_32(x) _byteswap_ulong(x)
216
+ #define bswap_64(x) _byteswap_uint64(x)
217
+
218
+ #elif defined(__APPLE__)
219
+ // Mac OS X / Darwin features
220
+ #include <libkern/OSByteOrder.h>
221
+ #define bswap_16(x) OSSwapInt16(x)
222
+ #define bswap_32(x) OSSwapInt32(x)
223
+ #define bswap_64(x) OSSwapInt64(x)
224
+
225
+ #else
226
+ #include <byteswap.h>
227
+ #endif
228
+
229
+ #endif // WORDS_BIGENDIAN
230
+
231
+ // Convert to little-endian storage, opposite of network format.
232
+ // Convert x from host to little endian: x = LittleEndian.FromHost(x);
233
+ // convert x from little endian to host: x = LittleEndian.ToHost(x);
234
+ //
235
+ // Store values into unaligned memory converting to little endian order:
236
+ // LittleEndian.Store16(p, x);
237
+ //
238
+ // Load unaligned values stored in little endian converting to host order:
239
+ // x = LittleEndian.Load16(p);
240
+ class LittleEndian {
241
+ public:
242
+ // Conversion functions.
243
+ #ifdef WORDS_BIGENDIAN
244
+
245
+ static uint16 FromHost16(uint16 x) { return bswap_16(x); }
246
+ static uint16 ToHost16(uint16 x) { return bswap_16(x); }
247
+
248
+ static uint32 FromHost32(uint32 x) { return bswap_32(x); }
249
+ static uint32 ToHost32(uint32 x) { return bswap_32(x); }
250
+
251
+ static bool IsLittleEndian() { return false; }
252
+
253
+ #else // !defined(WORDS_BIGENDIAN)
254
+
255
+ static uint16 FromHost16(uint16 x) { return x; }
256
+ static uint16 ToHost16(uint16 x) { return x; }
257
+
258
+ static uint32 FromHost32(uint32 x) { return x; }
259
+ static uint32 ToHost32(uint32 x) { return x; }
260
+
261
+ static bool IsLittleEndian() { return true; }
262
+
263
+ #endif // !defined(WORDS_BIGENDIAN)
264
+
265
+ // Functions to do unaligned loads and stores in little-endian order.
266
+ static uint16 Load16(const void *p) {
267
+ return ToHost16(UNALIGNED_LOAD16(p));
268
+ }
269
+
270
+ static void Store16(void *p, uint16 v) {
271
+ UNALIGNED_STORE16(p, FromHost16(v));
272
+ }
273
+
274
+ static uint32 Load32(const void *p) {
275
+ return ToHost32(UNALIGNED_LOAD32(p));
276
+ }
277
+
278
+ static void Store32(void *p, uint32 v) {
279
+ UNALIGNED_STORE32(p, FromHost32(v));
280
+ }
281
+ };
282
+
283
+ // Some bit-manipulation functions.
284
+ class Bits {
285
+ public:
286
+ // Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0.
287
+ static int Log2Floor(uint32 n);
288
+
289
+ // Return the first set least / most significant bit, 0-indexed. Returns an
290
+ // undefined value if n == 0. FindLSBSetNonZero() is similar to ffs() except
291
+ // that it's 0-indexed.
292
+ static int FindLSBSetNonZero(uint32 n);
293
+ static int FindLSBSetNonZero64(uint64 n);
294
+
295
+ private:
296
+ DISALLOW_COPY_AND_ASSIGN(Bits);
297
+ };
298
+
299
+ #ifdef HAVE_BUILTIN_CTZ
300
+
301
+ inline int Bits::Log2Floor(uint32 n) {
302
+ return n == 0 ? -1 : 31 ^ __builtin_clz(n);
303
+ }
304
+
305
+ inline int Bits::FindLSBSetNonZero(uint32 n) {
306
+ return __builtin_ctz(n);
307
+ }
308
+
309
+ inline int Bits::FindLSBSetNonZero64(uint64 n) {
310
+ return __builtin_ctzll(n);
311
+ }
312
+
313
+ #else // Portable versions.
314
+
315
+ inline int Bits::Log2Floor(uint32 n) {
316
+ if (n == 0)
317
+ return -1;
318
+ int log = 0;
319
+ uint32 value = n;
320
+ for (int i = 4; i >= 0; --i) {
321
+ int shift = (1 << i);
322
+ uint32 x = value >> shift;
323
+ if (x != 0) {
324
+ value = x;
325
+ log += shift;
326
+ }
327
+ }
328
+ assert(value == 1);
329
+ return log;
330
+ }
331
+
332
+ inline int Bits::FindLSBSetNonZero(uint32 n) {
333
+ int rc = 31;
334
+ for (int i = 4, shift = 1 << 4; i >= 0; --i) {
335
+ const uint32 x = n << shift;
336
+ if (x != 0) {
337
+ n = x;
338
+ rc -= shift;
339
+ }
340
+ shift >>= 1;
341
+ }
342
+ return rc;
343
+ }
344
+
345
+ // FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
346
+ inline int Bits::FindLSBSetNonZero64(uint64 n) {
347
+ const uint32 bottombits = static_cast<uint32>(n);
348
+ if (bottombits == 0) {
349
+ // Bottom bits are zero, so scan in top bits
350
+ return 32 + FindLSBSetNonZero(static_cast<uint32>(n >> 32));
351
+ } else {
352
+ return FindLSBSetNonZero(bottombits);
353
+ }
354
+ }
355
+
356
+ #endif // End portable versions.
357
+
358
+ // Variable-length integer encoding.
359
+ class Varint {
360
+ public:
361
+ // Maximum lengths of varint encoding of uint32.
362
+ static const int kMax32 = 5;
363
+
364
+ // Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1].
365
+ // Never reads a character at or beyond limit. If a valid/terminated varint32
366
+ // was found in the range, stores it in *OUTPUT and returns a pointer just
367
+ // past the last byte of the varint32. Else returns NULL. On success,
368
+ // "result <= limit".
369
+ static const char* Parse32WithLimit(const char* ptr, const char* limit,
370
+ uint32* OUTPUT);
371
+
372
+ // REQUIRES "ptr" points to a buffer of length sufficient to hold "v".
373
+ // EFFECTS Encodes "v" into "ptr" and returns a pointer to the
374
+ // byte just past the last encoded byte.
375
+ static char* Encode32(char* ptr, uint32 v);
376
+
377
+ // EFFECTS Appends the varint representation of "value" to "*s".
378
+ static void Append32(string* s, uint32 value);
379
+ };
380
+
381
+ inline const char* Varint::Parse32WithLimit(const char* p,
382
+ const char* l,
383
+ uint32* OUTPUT) {
384
+ const unsigned char* ptr = reinterpret_cast<const unsigned char*>(p);
385
+ const unsigned char* limit = reinterpret_cast<const unsigned char*>(l);
386
+ uint32 b, result;
387
+ if (ptr >= limit) return NULL;
388
+ b = *(ptr++); result = b & 127; if (b < 128) goto done;
389
+ if (ptr >= limit) return NULL;
390
+ b = *(ptr++); result |= (b & 127) << 7; if (b < 128) goto done;
391
+ if (ptr >= limit) return NULL;
392
+ b = *(ptr++); result |= (b & 127) << 14; if (b < 128) goto done;
393
+ if (ptr >= limit) return NULL;
394
+ b = *(ptr++); result |= (b & 127) << 21; if (b < 128) goto done;
395
+ if (ptr >= limit) return NULL;
396
+ b = *(ptr++); result |= (b & 127) << 28; if (b < 16) goto done;
397
+ return NULL; // Value is too long to be a varint32
398
+ done:
399
+ *OUTPUT = result;
400
+ return reinterpret_cast<const char*>(ptr);
401
+ }
402
+
403
+ inline char* Varint::Encode32(char* sptr, uint32 v) {
404
+ // Operate on characters as unsigneds
405
+ unsigned char* ptr = reinterpret_cast<unsigned char*>(sptr);
406
+ static const int B = 128;
407
+ if (v < (1<<7)) {
408
+ *(ptr++) = v;
409
+ } else if (v < (1<<14)) {
410
+ *(ptr++) = v | B;
411
+ *(ptr++) = v>>7;
412
+ } else if (v < (1<<21)) {
413
+ *(ptr++) = v | B;
414
+ *(ptr++) = (v>>7) | B;
415
+ *(ptr++) = v>>14;
416
+ } else if (v < (1<<28)) {
417
+ *(ptr++) = v | B;
418
+ *(ptr++) = (v>>7) | B;
419
+ *(ptr++) = (v>>14) | B;
420
+ *(ptr++) = v>>21;
421
+ } else {
422
+ *(ptr++) = v | B;
423
+ *(ptr++) = (v>>7) | B;
424
+ *(ptr++) = (v>>14) | B;
425
+ *(ptr++) = (v>>21) | B;
426
+ *(ptr++) = v>>28;
427
+ }
428
+ return reinterpret_cast<char*>(ptr);
429
+ }
430
+
431
+ // If you know the internal layout of the std::string in use, you can
432
+ // replace this function with one that resizes the string without
433
+ // filling the new space with zeros (if applicable) --
434
+ // it will be non-portable but faster.
435
+ inline void STLStringResizeUninitialized(string* s, size_t new_size) {
436
+ s->resize(new_size);
437
+ }
438
+
439
+ // Return a mutable char* pointing to a string's internal buffer,
440
+ // which may not be null-terminated. Writing through this pointer will
441
+ // modify the string.
442
+ //
443
+ // string_as_array(&str)[i] is valid for 0 <= i < str.size() until the
444
+ // next call to a string method that invalidates iterators.
445
+ //
446
+ // As of 2006-04, there is no standard-blessed way of getting a
447
+ // mutable reference to a string's internal buffer. However, issue 530
448
+ // (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-defects.html#530)
449
+ // proposes this as the method. It will officially be part of the standard
450
+ // for C++0x. This should already work on all current implementations.
451
+ inline char* string_as_array(string* str) {
452
+ return str->empty() ? NULL : &*str->begin();
453
+ }
454
+
455
+ } // namespace snappy
456
+
457
+ #endif // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_