snappy 0.0.13 → 0.0.14

Sign up to get free protection for your applications and to get access to all the features.
Files changed (42) hide show
  1. checksums.yaml +4 -4
  2. data/Gemfile +1 -1
  3. data/lib/snappy/version.rb +1 -1
  4. data/vendor/snappy/AUTHORS +1 -0
  5. data/vendor/snappy/COPYING +54 -0
  6. data/vendor/snappy/ChangeLog +1916 -0
  7. data/vendor/snappy/Makefile.am +23 -0
  8. data/vendor/snappy/NEWS +128 -0
  9. data/vendor/snappy/README +135 -0
  10. data/vendor/snappy/autogen.sh +7 -0
  11. data/vendor/snappy/configure.ac +133 -0
  12. data/vendor/snappy/format_description.txt +110 -0
  13. data/vendor/snappy/framing_format.txt +135 -0
  14. data/vendor/snappy/m4/gtest.m4 +74 -0
  15. data/vendor/snappy/snappy-c.cc +90 -0
  16. data/vendor/snappy/snappy-c.h +138 -0
  17. data/vendor/snappy/snappy-internal.h +150 -0
  18. data/vendor/snappy/snappy-sinksource.cc +71 -0
  19. data/vendor/snappy/snappy-sinksource.h +137 -0
  20. data/vendor/snappy/snappy-stubs-internal.cc +42 -0
  21. data/vendor/snappy/snappy-stubs-internal.h +491 -0
  22. data/vendor/snappy/snappy-stubs-public.h.in +98 -0
  23. data/vendor/snappy/snappy-test.cc +606 -0
  24. data/vendor/snappy/snappy-test.h +582 -0
  25. data/vendor/snappy/snappy.cc +1306 -0
  26. data/vendor/snappy/snappy.h +184 -0
  27. data/vendor/snappy/snappy_unittest.cc +1355 -0
  28. data/vendor/snappy/testdata/alice29.txt +3609 -0
  29. data/vendor/snappy/testdata/asyoulik.txt +4122 -0
  30. data/vendor/snappy/testdata/baddata1.snappy +0 -0
  31. data/vendor/snappy/testdata/baddata2.snappy +0 -0
  32. data/vendor/snappy/testdata/baddata3.snappy +0 -0
  33. data/vendor/snappy/testdata/fireworks.jpeg +0 -0
  34. data/vendor/snappy/testdata/geo.protodata +0 -0
  35. data/vendor/snappy/testdata/html +1 -0
  36. data/vendor/snappy/testdata/html_x_4 +1 -0
  37. data/vendor/snappy/testdata/kppkn.gtb +0 -0
  38. data/vendor/snappy/testdata/lcet10.txt +7519 -0
  39. data/vendor/snappy/testdata/paper-100k.pdf +600 -2
  40. data/vendor/snappy/testdata/plrabn12.txt +10699 -0
  41. data/vendor/snappy/testdata/urls.10K +10000 -0
  42. metadata +40 -2
@@ -0,0 +1,71 @@
1
+ // Copyright 2011 Google Inc. All Rights Reserved.
2
+ //
3
+ // Redistribution and use in source and binary forms, with or without
4
+ // modification, are permitted provided that the following conditions are
5
+ // met:
6
+ //
7
+ // * Redistributions of source code must retain the above copyright
8
+ // notice, this list of conditions and the following disclaimer.
9
+ // * Redistributions in binary form must reproduce the above
10
+ // copyright notice, this list of conditions and the following disclaimer
11
+ // in the documentation and/or other materials provided with the
12
+ // distribution.
13
+ // * Neither the name of Google Inc. nor the names of its
14
+ // contributors may be used to endorse or promote products derived from
15
+ // this software without specific prior written permission.
16
+ //
17
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
+
29
+ #include <string.h>
30
+
31
+ #include "snappy-sinksource.h"
32
+
33
+ namespace snappy {
34
+
35
+ Source::~Source() { }
36
+
37
+ Sink::~Sink() { }
38
+
39
+ char* Sink::GetAppendBuffer(size_t length, char* scratch) {
40
+ return scratch;
41
+ }
42
+
43
+ ByteArraySource::~ByteArraySource() { }
44
+
45
+ size_t ByteArraySource::Available() const { return left_; }
46
+
47
+ const char* ByteArraySource::Peek(size_t* len) {
48
+ *len = left_;
49
+ return ptr_;
50
+ }
51
+
52
+ void ByteArraySource::Skip(size_t n) {
53
+ left_ -= n;
54
+ ptr_ += n;
55
+ }
56
+
57
+ UncheckedByteArraySink::~UncheckedByteArraySink() { }
58
+
59
+ void UncheckedByteArraySink::Append(const char* data, size_t n) {
60
+ // Do no copying if the caller filled in the result of GetAppendBuffer()
61
+ if (data != dest_) {
62
+ memcpy(dest_, data, n);
63
+ }
64
+ dest_ += n;
65
+ }
66
+
67
+ char* UncheckedByteArraySink::GetAppendBuffer(size_t len, char* scratch) {
68
+ return dest_;
69
+ }
70
+
71
+ }
@@ -0,0 +1,137 @@
1
+ // Copyright 2011 Google Inc. All Rights Reserved.
2
+ //
3
+ // Redistribution and use in source and binary forms, with or without
4
+ // modification, are permitted provided that the following conditions are
5
+ // met:
6
+ //
7
+ // * Redistributions of source code must retain the above copyright
8
+ // notice, this list of conditions and the following disclaimer.
9
+ // * Redistributions in binary form must reproduce the above
10
+ // copyright notice, this list of conditions and the following disclaimer
11
+ // in the documentation and/or other materials provided with the
12
+ // distribution.
13
+ // * Neither the name of Google Inc. nor the names of its
14
+ // contributors may be used to endorse or promote products derived from
15
+ // this software without specific prior written permission.
16
+ //
17
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
+
29
+ #ifndef UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
30
+ #define UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
31
+
32
+ #include <stddef.h>
33
+
34
+
35
+ namespace snappy {
36
+
37
+ // A Sink is an interface that consumes a sequence of bytes.
38
+ class Sink {
39
+ public:
40
+ Sink() { }
41
+ virtual ~Sink();
42
+
43
+ // Append "bytes[0,n-1]" to this.
44
+ virtual void Append(const char* bytes, size_t n) = 0;
45
+
46
+ // Returns a writable buffer of the specified length for appending.
47
+ // May return a pointer to the caller-owned scratch buffer which
48
+ // must have at least the indicated length. The returned buffer is
49
+ // only valid until the next operation on this Sink.
50
+ //
51
+ // After writing at most "length" bytes, call Append() with the
52
+ // pointer returned from this function and the number of bytes
53
+ // written. Many Append() implementations will avoid copying
54
+ // bytes if this function returned an internal buffer.
55
+ //
56
+ // If a non-scratch buffer is returned, the caller may only pass a
57
+ // prefix of it to Append(). That is, it is not correct to pass an
58
+ // interior pointer of the returned array to Append().
59
+ //
60
+ // The default implementation always returns the scratch buffer.
61
+ virtual char* GetAppendBuffer(size_t length, char* scratch);
62
+
63
+
64
+ private:
65
+ // No copying
66
+ Sink(const Sink&);
67
+ void operator=(const Sink&);
68
+ };
69
+
70
+ // A Source is an interface that yields a sequence of bytes
71
+ class Source {
72
+ public:
73
+ Source() { }
74
+ virtual ~Source();
75
+
76
+ // Return the number of bytes left to read from the source
77
+ virtual size_t Available() const = 0;
78
+
79
+ // Peek at the next flat region of the source. Does not reposition
80
+ // the source. The returned region is empty iff Available()==0.
81
+ //
82
+ // Returns a pointer to the beginning of the region and store its
83
+ // length in *len.
84
+ //
85
+ // The returned region is valid until the next call to Skip() or
86
+ // until this object is destroyed, whichever occurs first.
87
+ //
88
+ // The returned region may be larger than Available() (for example
89
+ // if this ByteSource is a view on a substring of a larger source).
90
+ // The caller is responsible for ensuring that it only reads the
91
+ // Available() bytes.
92
+ virtual const char* Peek(size_t* len) = 0;
93
+
94
+ // Skip the next n bytes. Invalidates any buffer returned by
95
+ // a previous call to Peek().
96
+ // REQUIRES: Available() >= n
97
+ virtual void Skip(size_t n) = 0;
98
+
99
+ private:
100
+ // No copying
101
+ Source(const Source&);
102
+ void operator=(const Source&);
103
+ };
104
+
105
+ // A Source implementation that yields the contents of a flat array
106
+ class ByteArraySource : public Source {
107
+ public:
108
+ ByteArraySource(const char* p, size_t n) : ptr_(p), left_(n) { }
109
+ virtual ~ByteArraySource();
110
+ virtual size_t Available() const;
111
+ virtual const char* Peek(size_t* len);
112
+ virtual void Skip(size_t n);
113
+ private:
114
+ const char* ptr_;
115
+ size_t left_;
116
+ };
117
+
118
+ // A Sink implementation that writes to a flat array without any bound checks.
119
+ class UncheckedByteArraySink : public Sink {
120
+ public:
121
+ explicit UncheckedByteArraySink(char* dest) : dest_(dest) { }
122
+ virtual ~UncheckedByteArraySink();
123
+ virtual void Append(const char* data, size_t n);
124
+ virtual char* GetAppendBuffer(size_t len, char* scratch);
125
+
126
+ // Return the current output pointer so that a caller can see how
127
+ // many bytes were produced.
128
+ // Note: this is not a Sink method.
129
+ char* CurrentDestination() const { return dest_; }
130
+ private:
131
+ char* dest_;
132
+ };
133
+
134
+
135
+ }
136
+
137
+ #endif // UTIL_SNAPPY_SNAPPY_SINKSOURCE_H_
@@ -0,0 +1,42 @@
1
+ // Copyright 2011 Google Inc. All Rights Reserved.
2
+ //
3
+ // Redistribution and use in source and binary forms, with or without
4
+ // modification, are permitted provided that the following conditions are
5
+ // met:
6
+ //
7
+ // * Redistributions of source code must retain the above copyright
8
+ // notice, this list of conditions and the following disclaimer.
9
+ // * Redistributions in binary form must reproduce the above
10
+ // copyright notice, this list of conditions and the following disclaimer
11
+ // in the documentation and/or other materials provided with the
12
+ // distribution.
13
+ // * Neither the name of Google Inc. nor the names of its
14
+ // contributors may be used to endorse or promote products derived from
15
+ // this software without specific prior written permission.
16
+ //
17
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
+
29
+ #include <algorithm>
30
+ #include <string>
31
+
32
+ #include "snappy-stubs-internal.h"
33
+
34
+ namespace snappy {
35
+
36
+ void Varint::Append32(string* s, uint32 value) {
37
+ char buf[Varint::kMax32];
38
+ const char* p = Varint::Encode32(buf, value);
39
+ s->append(buf, p - buf);
40
+ }
41
+
42
+ } // namespace snappy
@@ -0,0 +1,491 @@
1
+ // Copyright 2011 Google Inc. All Rights Reserved.
2
+ //
3
+ // Redistribution and use in source and binary forms, with or without
4
+ // modification, are permitted provided that the following conditions are
5
+ // met:
6
+ //
7
+ // * Redistributions of source code must retain the above copyright
8
+ // notice, this list of conditions and the following disclaimer.
9
+ // * Redistributions in binary form must reproduce the above
10
+ // copyright notice, this list of conditions and the following disclaimer
11
+ // in the documentation and/or other materials provided with the
12
+ // distribution.
13
+ // * Neither the name of Google Inc. nor the names of its
14
+ // contributors may be used to endorse or promote products derived from
15
+ // this software without specific prior written permission.
16
+ //
17
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
+ //
29
+ // Various stubs for the open-source version of Snappy.
30
+
31
+ #ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
32
+ #define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
33
+
34
+ #ifdef HAVE_CONFIG_H
35
+ #include "config.h"
36
+ #endif
37
+
38
+ #include <string>
39
+
40
+ #include <assert.h>
41
+ #include <stdlib.h>
42
+ #include <string.h>
43
+
44
+ #ifdef HAVE_SYS_MMAN_H
45
+ #include <sys/mman.h>
46
+ #endif
47
+
48
+ #include "snappy-stubs-public.h"
49
+
50
+ #if defined(__x86_64__)
51
+
52
+ // Enable 64-bit optimized versions of some routines.
53
+ #define ARCH_K8 1
54
+
55
+ #endif
56
+
57
+ // Needed by OS X, among others.
58
+ #ifndef MAP_ANONYMOUS
59
+ #define MAP_ANONYMOUS MAP_ANON
60
+ #endif
61
+
62
+ // Pull in std::min, std::ostream, and the likes. This is safe because this
63
+ // header file is never used from any public header files.
64
+ using namespace std;
65
+
66
+ // The size of an array, if known at compile-time.
67
+ // Will give unexpected results if used on a pointer.
68
+ // We undefine it first, since some compilers already have a definition.
69
+ #ifdef ARRAYSIZE
70
+ #undef ARRAYSIZE
71
+ #endif
72
+ #define ARRAYSIZE(a) (sizeof(a) / sizeof(*(a)))
73
+
74
+ // Static prediction hints.
75
+ #ifdef HAVE_BUILTIN_EXPECT
76
+ #define PREDICT_FALSE(x) (__builtin_expect(x, 0))
77
+ #define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
78
+ #else
79
+ #define PREDICT_FALSE(x) x
80
+ #define PREDICT_TRUE(x) x
81
+ #endif
82
+
83
+ // This is only used for recomputing the tag byte table used during
84
+ // decompression; for simplicity we just remove it from the open-source
85
+ // version (anyone who wants to regenerate it can just do the call
86
+ // themselves within main()).
87
+ #define DEFINE_bool(flag_name, default_value, description) \
88
+ bool FLAGS_ ## flag_name = default_value
89
+ #define DECLARE_bool(flag_name) \
90
+ extern bool FLAGS_ ## flag_name
91
+
92
+ namespace snappy {
93
+
94
+ static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF);
95
+ static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
96
+
97
+ // Potentially unaligned loads and stores.
98
+
99
+ // x86 and PowerPC can simply do these loads and stores native.
100
+
101
+ #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__)
102
+
103
+ #define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
104
+ #define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
105
+ #define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
106
+
107
+ #define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
108
+ #define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
109
+ #define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
110
+
111
+ // ARMv7 and newer support native unaligned accesses, but only of 16-bit
112
+ // and 32-bit values (not 64-bit); older versions either raise a fatal signal,
113
+ // do an unaligned read and rotate the words around a bit, or do the reads very
114
+ // slowly (trip through kernel mode). There's no simple #define that says just
115
+ // “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6
116
+ // sub-architectures.
117
+ //
118
+ // This is a mess, but there's not much we can do about it.
119
+
120
+ #elif defined(__arm__) && \
121
+ !defined(__ARM_ARCH_4__) && \
122
+ !defined(__ARM_ARCH_4T__) && \
123
+ !defined(__ARM_ARCH_5__) && \
124
+ !defined(__ARM_ARCH_5T__) && \
125
+ !defined(__ARM_ARCH_5TE__) && \
126
+ !defined(__ARM_ARCH_5TEJ__) && \
127
+ !defined(__ARM_ARCH_6__) && \
128
+ !defined(__ARM_ARCH_6J__) && \
129
+ !defined(__ARM_ARCH_6K__) && \
130
+ !defined(__ARM_ARCH_6Z__) && \
131
+ !defined(__ARM_ARCH_6ZK__) && \
132
+ !defined(__ARM_ARCH_6T2__)
133
+
134
+ #define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
135
+ #define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
136
+
137
+ #define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
138
+ #define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
139
+
140
+ // TODO(user): NEON supports unaligned 64-bit loads and stores.
141
+ // See if that would be more efficient on platforms supporting it,
142
+ // at least for copies.
143
+
144
+ inline uint64 UNALIGNED_LOAD64(const void *p) {
145
+ uint64 t;
146
+ memcpy(&t, p, sizeof t);
147
+ return t;
148
+ }
149
+
150
+ inline void UNALIGNED_STORE64(void *p, uint64 v) {
151
+ memcpy(p, &v, sizeof v);
152
+ }
153
+
154
+ #else
155
+
156
+ // These functions are provided for architectures that don't support
157
+ // unaligned loads and stores.
158
+
159
+ inline uint16 UNALIGNED_LOAD16(const void *p) {
160
+ uint16 t;
161
+ memcpy(&t, p, sizeof t);
162
+ return t;
163
+ }
164
+
165
+ inline uint32 UNALIGNED_LOAD32(const void *p) {
166
+ uint32 t;
167
+ memcpy(&t, p, sizeof t);
168
+ return t;
169
+ }
170
+
171
+ inline uint64 UNALIGNED_LOAD64(const void *p) {
172
+ uint64 t;
173
+ memcpy(&t, p, sizeof t);
174
+ return t;
175
+ }
176
+
177
+ inline void UNALIGNED_STORE16(void *p, uint16 v) {
178
+ memcpy(p, &v, sizeof v);
179
+ }
180
+
181
+ inline void UNALIGNED_STORE32(void *p, uint32 v) {
182
+ memcpy(p, &v, sizeof v);
183
+ }
184
+
185
+ inline void UNALIGNED_STORE64(void *p, uint64 v) {
186
+ memcpy(p, &v, sizeof v);
187
+ }
188
+
189
+ #endif
190
+
191
+ // This can be more efficient than UNALIGNED_LOAD64 + UNALIGNED_STORE64
192
+ // on some platforms, in particular ARM.
193
+ inline void UnalignedCopy64(const void *src, void *dst) {
194
+ if (sizeof(void *) == 8) {
195
+ UNALIGNED_STORE64(dst, UNALIGNED_LOAD64(src));
196
+ } else {
197
+ const char *src_char = reinterpret_cast<const char *>(src);
198
+ char *dst_char = reinterpret_cast<char *>(dst);
199
+
200
+ UNALIGNED_STORE32(dst_char, UNALIGNED_LOAD32(src_char));
201
+ UNALIGNED_STORE32(dst_char + 4, UNALIGNED_LOAD32(src_char + 4));
202
+ }
203
+ }
204
+
205
+ // The following guarantees declaration of the byte swap functions.
206
+ #ifdef WORDS_BIGENDIAN
207
+
208
+ #ifdef HAVE_SYS_BYTEORDER_H
209
+ #include <sys/byteorder.h>
210
+ #endif
211
+
212
+ #ifdef HAVE_SYS_ENDIAN_H
213
+ #include <sys/endian.h>
214
+ #endif
215
+
216
+ #ifdef _MSC_VER
217
+ #include <stdlib.h>
218
+ #define bswap_16(x) _byteswap_ushort(x)
219
+ #define bswap_32(x) _byteswap_ulong(x)
220
+ #define bswap_64(x) _byteswap_uint64(x)
221
+
222
+ #elif defined(__APPLE__)
223
+ // Mac OS X / Darwin features
224
+ #include <libkern/OSByteOrder.h>
225
+ #define bswap_16(x) OSSwapInt16(x)
226
+ #define bswap_32(x) OSSwapInt32(x)
227
+ #define bswap_64(x) OSSwapInt64(x)
228
+
229
+ #elif defined(HAVE_BYTESWAP_H)
230
+ #include <byteswap.h>
231
+
232
+ #elif defined(bswap32)
233
+ // FreeBSD defines bswap{16,32,64} in <sys/endian.h> (already #included).
234
+ #define bswap_16(x) bswap16(x)
235
+ #define bswap_32(x) bswap32(x)
236
+ #define bswap_64(x) bswap64(x)
237
+
238
+ #elif defined(BSWAP_64)
239
+ // Solaris 10 defines BSWAP_{16,32,64} in <sys/byteorder.h> (already #included).
240
+ #define bswap_16(x) BSWAP_16(x)
241
+ #define bswap_32(x) BSWAP_32(x)
242
+ #define bswap_64(x) BSWAP_64(x)
243
+
244
+ #else
245
+
246
+ inline uint16 bswap_16(uint16 x) {
247
+ return (x << 8) | (x >> 8);
248
+ }
249
+
250
+ inline uint32 bswap_32(uint32 x) {
251
+ x = ((x & 0xff00ff00UL) >> 8) | ((x & 0x00ff00ffUL) << 8);
252
+ return (x >> 16) | (x << 16);
253
+ }
254
+
255
+ inline uint64 bswap_64(uint64 x) {
256
+ x = ((x & 0xff00ff00ff00ff00ULL) >> 8) | ((x & 0x00ff00ff00ff00ffULL) << 8);
257
+ x = ((x & 0xffff0000ffff0000ULL) >> 16) | ((x & 0x0000ffff0000ffffULL) << 16);
258
+ return (x >> 32) | (x << 32);
259
+ }
260
+
261
+ #endif
262
+
263
+ #endif // WORDS_BIGENDIAN
264
+
265
+ // Convert to little-endian storage, opposite of network format.
266
+ // Convert x from host to little endian: x = LittleEndian.FromHost(x);
267
+ // convert x from little endian to host: x = LittleEndian.ToHost(x);
268
+ //
269
+ // Store values into unaligned memory converting to little endian order:
270
+ // LittleEndian.Store16(p, x);
271
+ //
272
+ // Load unaligned values stored in little endian converting to host order:
273
+ // x = LittleEndian.Load16(p);
274
+ class LittleEndian {
275
+ public:
276
+ // Conversion functions.
277
+ #ifdef WORDS_BIGENDIAN
278
+
279
+ static uint16 FromHost16(uint16 x) { return bswap_16(x); }
280
+ static uint16 ToHost16(uint16 x) { return bswap_16(x); }
281
+
282
+ static uint32 FromHost32(uint32 x) { return bswap_32(x); }
283
+ static uint32 ToHost32(uint32 x) { return bswap_32(x); }
284
+
285
+ static bool IsLittleEndian() { return false; }
286
+
287
+ #else // !defined(WORDS_BIGENDIAN)
288
+
289
+ static uint16 FromHost16(uint16 x) { return x; }
290
+ static uint16 ToHost16(uint16 x) { return x; }
291
+
292
+ static uint32 FromHost32(uint32 x) { return x; }
293
+ static uint32 ToHost32(uint32 x) { return x; }
294
+
295
+ static bool IsLittleEndian() { return true; }
296
+
297
+ #endif // !defined(WORDS_BIGENDIAN)
298
+
299
+ // Functions to do unaligned loads and stores in little-endian order.
300
+ static uint16 Load16(const void *p) {
301
+ return ToHost16(UNALIGNED_LOAD16(p));
302
+ }
303
+
304
+ static void Store16(void *p, uint16 v) {
305
+ UNALIGNED_STORE16(p, FromHost16(v));
306
+ }
307
+
308
+ static uint32 Load32(const void *p) {
309
+ return ToHost32(UNALIGNED_LOAD32(p));
310
+ }
311
+
312
+ static void Store32(void *p, uint32 v) {
313
+ UNALIGNED_STORE32(p, FromHost32(v));
314
+ }
315
+ };
316
+
317
+ // Some bit-manipulation functions.
318
+ class Bits {
319
+ public:
320
+ // Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0.
321
+ static int Log2Floor(uint32 n);
322
+
323
+ // Return the first set least / most significant bit, 0-indexed. Returns an
324
+ // undefined value if n == 0. FindLSBSetNonZero() is similar to ffs() except
325
+ // that it's 0-indexed.
326
+ static int FindLSBSetNonZero(uint32 n);
327
+ static int FindLSBSetNonZero64(uint64 n);
328
+
329
+ private:
330
+ DISALLOW_COPY_AND_ASSIGN(Bits);
331
+ };
332
+
333
+ #ifdef HAVE_BUILTIN_CTZ
334
+
335
+ inline int Bits::Log2Floor(uint32 n) {
336
+ return n == 0 ? -1 : 31 ^ __builtin_clz(n);
337
+ }
338
+
339
+ inline int Bits::FindLSBSetNonZero(uint32 n) {
340
+ return __builtin_ctz(n);
341
+ }
342
+
343
+ inline int Bits::FindLSBSetNonZero64(uint64 n) {
344
+ return __builtin_ctzll(n);
345
+ }
346
+
347
+ #else // Portable versions.
348
+
349
+ inline int Bits::Log2Floor(uint32 n) {
350
+ if (n == 0)
351
+ return -1;
352
+ int log = 0;
353
+ uint32 value = n;
354
+ for (int i = 4; i >= 0; --i) {
355
+ int shift = (1 << i);
356
+ uint32 x = value >> shift;
357
+ if (x != 0) {
358
+ value = x;
359
+ log += shift;
360
+ }
361
+ }
362
+ assert(value == 1);
363
+ return log;
364
+ }
365
+
366
+ inline int Bits::FindLSBSetNonZero(uint32 n) {
367
+ int rc = 31;
368
+ for (int i = 4, shift = 1 << 4; i >= 0; --i) {
369
+ const uint32 x = n << shift;
370
+ if (x != 0) {
371
+ n = x;
372
+ rc -= shift;
373
+ }
374
+ shift >>= 1;
375
+ }
376
+ return rc;
377
+ }
378
+
379
+ // FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
380
+ inline int Bits::FindLSBSetNonZero64(uint64 n) {
381
+ const uint32 bottombits = static_cast<uint32>(n);
382
+ if (bottombits == 0) {
383
+ // Bottom bits are zero, so scan in top bits
384
+ return 32 + FindLSBSetNonZero(static_cast<uint32>(n >> 32));
385
+ } else {
386
+ return FindLSBSetNonZero(bottombits);
387
+ }
388
+ }
389
+
390
+ #endif // End portable versions.
391
+
392
+ // Variable-length integer encoding.
393
+ class Varint {
394
+ public:
395
+ // Maximum lengths of varint encoding of uint32.
396
+ static const int kMax32 = 5;
397
+
398
+ // Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1].
399
+ // Never reads a character at or beyond limit. If a valid/terminated varint32
400
+ // was found in the range, stores it in *OUTPUT and returns a pointer just
401
+ // past the last byte of the varint32. Else returns NULL. On success,
402
+ // "result <= limit".
403
+ static const char* Parse32WithLimit(const char* ptr, const char* limit,
404
+ uint32* OUTPUT);
405
+
406
+ // REQUIRES "ptr" points to a buffer of length sufficient to hold "v".
407
+ // EFFECTS Encodes "v" into "ptr" and returns a pointer to the
408
+ // byte just past the last encoded byte.
409
+ static char* Encode32(char* ptr, uint32 v);
410
+
411
+ // EFFECTS Appends the varint representation of "value" to "*s".
412
+ static void Append32(string* s, uint32 value);
413
+ };
414
+
415
+ inline const char* Varint::Parse32WithLimit(const char* p,
416
+ const char* l,
417
+ uint32* OUTPUT) {
418
+ const unsigned char* ptr = reinterpret_cast<const unsigned char*>(p);
419
+ const unsigned char* limit = reinterpret_cast<const unsigned char*>(l);
420
+ uint32 b, result;
421
+ if (ptr >= limit) return NULL;
422
+ b = *(ptr++); result = b & 127; if (b < 128) goto done;
423
+ if (ptr >= limit) return NULL;
424
+ b = *(ptr++); result |= (b & 127) << 7; if (b < 128) goto done;
425
+ if (ptr >= limit) return NULL;
426
+ b = *(ptr++); result |= (b & 127) << 14; if (b < 128) goto done;
427
+ if (ptr >= limit) return NULL;
428
+ b = *(ptr++); result |= (b & 127) << 21; if (b < 128) goto done;
429
+ if (ptr >= limit) return NULL;
430
+ b = *(ptr++); result |= (b & 127) << 28; if (b < 16) goto done;
431
+ return NULL; // Value is too long to be a varint32
432
+ done:
433
+ *OUTPUT = result;
434
+ return reinterpret_cast<const char*>(ptr);
435
+ }
436
+
437
+ inline char* Varint::Encode32(char* sptr, uint32 v) {
438
+ // Operate on characters as unsigneds
439
+ unsigned char* ptr = reinterpret_cast<unsigned char*>(sptr);
440
+ static const int B = 128;
441
+ if (v < (1<<7)) {
442
+ *(ptr++) = v;
443
+ } else if (v < (1<<14)) {
444
+ *(ptr++) = v | B;
445
+ *(ptr++) = v>>7;
446
+ } else if (v < (1<<21)) {
447
+ *(ptr++) = v | B;
448
+ *(ptr++) = (v>>7) | B;
449
+ *(ptr++) = v>>14;
450
+ } else if (v < (1<<28)) {
451
+ *(ptr++) = v | B;
452
+ *(ptr++) = (v>>7) | B;
453
+ *(ptr++) = (v>>14) | B;
454
+ *(ptr++) = v>>21;
455
+ } else {
456
+ *(ptr++) = v | B;
457
+ *(ptr++) = (v>>7) | B;
458
+ *(ptr++) = (v>>14) | B;
459
+ *(ptr++) = (v>>21) | B;
460
+ *(ptr++) = v>>28;
461
+ }
462
+ return reinterpret_cast<char*>(ptr);
463
+ }
464
+
465
+ // If you know the internal layout of the std::string in use, you can
466
+ // replace this function with one that resizes the string without
467
+ // filling the new space with zeros (if applicable) --
468
+ // it will be non-portable but faster.
469
+ inline void STLStringResizeUninitialized(string* s, size_t new_size) {
470
+ s->resize(new_size);
471
+ }
472
+
473
+ // Return a mutable char* pointing to a string's internal buffer,
474
+ // which may not be null-terminated. Writing through this pointer will
475
+ // modify the string.
476
+ //
477
+ // string_as_array(&str)[i] is valid for 0 <= i < str.size() until the
478
+ // next call to a string method that invalidates iterators.
479
+ //
480
+ // As of 2006-04, there is no standard-blessed way of getting a
481
+ // mutable reference to a string's internal buffer. However, issue 530
482
+ // (http://www.open-std.org/JTC1/SC22/WG21/docs/lwg-defects.html#530)
483
+ // proposes this as the method. It will officially be part of the standard
484
+ // for C++0x. This should already work on all current implementations.
485
+ inline char* string_as_array(string* str) {
486
+ return str->empty() ? NULL : &*str->begin();
487
+ }
488
+
489
+ } // namespace snappy
490
+
491
+ #endif // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_