sereal 0.0.2
Sign up to get free protection for your applications and to get access to all the features.
- data/ext/sereal/buffer.h +89 -0
- data/ext/sereal/decode.c +238 -0
- data/ext/sereal/decode.h +282 -0
- data/ext/sereal/encode.c +269 -0
- data/ext/sereal/encode.h +1 -0
- data/ext/sereal/extconf.rb +8 -0
- data/ext/sereal/proto.h +73 -0
- data/ext/sereal/sereal.c +12 -0
- data/ext/sereal/sereal.h +73 -0
- data/ext/sereal/snappy/csnappy.h +129 -0
- data/ext/sereal/snappy/csnappy_compress.c +659 -0
- data/ext/sereal/snappy/csnappy_decompress.c +414 -0
- data/ext/sereal/snappy/csnappy_internal.h +147 -0
- data/ext/sereal/snappy/csnappy_internal_userspace.h +301 -0
- metadata +75 -0
@@ -0,0 +1,414 @@
|
|
1
|
+
/*
|
2
|
+
Copyright 2011, Google Inc.
|
3
|
+
All rights reserved.
|
4
|
+
|
5
|
+
Redistribution and use in source and binary forms, with or without
|
6
|
+
modification, are permitted provided that the following conditions are
|
7
|
+
met:
|
8
|
+
|
9
|
+
* Redistributions of source code must retain the above copyright
|
10
|
+
notice, this list of conditions and the following disclaimer.
|
11
|
+
* Redistributions in binary form must reproduce the above
|
12
|
+
copyright notice, this list of conditions and the following disclaimer
|
13
|
+
in the documentation and/or other materials provided with the
|
14
|
+
distribution.
|
15
|
+
* Neither the name of Google Inc. nor the names of its
|
16
|
+
contributors may be used to endorse or promote products derived from
|
17
|
+
this software without specific prior written permission.
|
18
|
+
|
19
|
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
20
|
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
21
|
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
22
|
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
23
|
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
24
|
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
25
|
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
26
|
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
27
|
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
28
|
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29
|
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
30
|
+
|
31
|
+
File modified for the Linux Kernel by
|
32
|
+
Zeev Tarantov <zeev.tarantov@gmail.com>
|
33
|
+
*/
|
34
|
+
|
35
|
+
#include "csnappy_internal.h"
|
36
|
+
#ifdef __KERNEL__
|
37
|
+
#include <linux/kernel.h>
|
38
|
+
#include <linux/module.h>
|
39
|
+
#endif
|
40
|
+
#include "csnappy.h"
|
41
|
+
|
42
|
+
int
|
43
|
+
csnappy_get_uncompressed_length(
|
44
|
+
const char *src,
|
45
|
+
uint32_t src_len,
|
46
|
+
uint32_t *result)
|
47
|
+
{
|
48
|
+
const char *src_base = src;
|
49
|
+
uint32_t shift = 0;
|
50
|
+
uint8_t c;
|
51
|
+
/* Length is encoded in 1..5 bytes */
|
52
|
+
*result = 0;
|
53
|
+
for (;;) {
|
54
|
+
if (shift >= 32)
|
55
|
+
goto err_out;
|
56
|
+
if (src_len == 0)
|
57
|
+
goto err_out;
|
58
|
+
c = *(const uint8_t *)src++;
|
59
|
+
src_len -= 1;
|
60
|
+
*result |= (uint32_t)(c & 0x7f) << shift;
|
61
|
+
if (c < 128)
|
62
|
+
break;
|
63
|
+
shift += 7;
|
64
|
+
}
|
65
|
+
return src - src_base;
|
66
|
+
err_out:
|
67
|
+
return CSNAPPY_E_HEADER_BAD;
|
68
|
+
}
|
69
|
+
#if defined(__KERNEL__) && !defined(STATIC)
|
70
|
+
EXPORT_SYMBOL(csnappy_get_uncompressed_length);
|
71
|
+
#endif
|
72
|
+
|
73
|
+
#if defined(__arm__) && !(ARCH_ARM_HAVE_UNALIGNED)
|
74
|
+
int csnappy_decompress_noheader(
|
75
|
+
const char *src_,
|
76
|
+
uint32_t src_remaining,
|
77
|
+
char *dst,
|
78
|
+
uint32_t *dst_len)
|
79
|
+
{
|
80
|
+
const uint8_t * src = (const uint8_t *)src_;
|
81
|
+
const uint8_t * const src_end = src + src_remaining;
|
82
|
+
char * const dst_base = dst;
|
83
|
+
char * const dst_end = dst + *dst_len;
|
84
|
+
while (src < src_end) {
|
85
|
+
uint32_t opcode = *src++;
|
86
|
+
uint32_t length = (opcode >> 2) + 1;
|
87
|
+
const uint8_t *copy_src;
|
88
|
+
if (likely((opcode & 3) == 0)) {
|
89
|
+
if (unlikely(length > 60)) {
|
90
|
+
uint32_t extra_bytes = length - 60;
|
91
|
+
int shift, max_shift;
|
92
|
+
if (unlikely(src + extra_bytes > src_end))
|
93
|
+
return CSNAPPY_E_DATA_MALFORMED;
|
94
|
+
length = 0;
|
95
|
+
for (shift = 0, max_shift = extra_bytes*8;
|
96
|
+
shift < max_shift;
|
97
|
+
shift += 8)
|
98
|
+
length |= *src++ << shift;
|
99
|
+
++length;
|
100
|
+
}
|
101
|
+
if (unlikely(src + length > src_end))
|
102
|
+
return CSNAPPY_E_DATA_MALFORMED;
|
103
|
+
copy_src = src;
|
104
|
+
src += length;
|
105
|
+
} else {
|
106
|
+
uint32_t offset;
|
107
|
+
if (likely((opcode & 3) == 1)) {
|
108
|
+
if (unlikely(src + 1 > src_end))
|
109
|
+
return CSNAPPY_E_DATA_MALFORMED;
|
110
|
+
length = ((length - 1) & 7) + 4;
|
111
|
+
offset = ((opcode >> 5) << 8) + *src++;
|
112
|
+
} else if (likely((opcode & 3) == 2)) {
|
113
|
+
if (unlikely(src + 2 > src_end))
|
114
|
+
return CSNAPPY_E_DATA_MALFORMED;
|
115
|
+
offset = src[0] | (src[1] << 8);
|
116
|
+
src += 2;
|
117
|
+
} else {
|
118
|
+
if (unlikely(src + 4 > src_end))
|
119
|
+
return CSNAPPY_E_DATA_MALFORMED;
|
120
|
+
offset = src[0] | (src[1] << 8) |
|
121
|
+
(src[2] << 16) | (src[3] << 24);
|
122
|
+
src += 4;
|
123
|
+
}
|
124
|
+
if (unlikely(!offset || (offset > dst - dst_base)))
|
125
|
+
return CSNAPPY_E_DATA_MALFORMED;
|
126
|
+
copy_src = (const uint8_t *)dst - offset;
|
127
|
+
}
|
128
|
+
if (unlikely(dst + length > dst_end))
|
129
|
+
return CSNAPPY_E_OUTPUT_OVERRUN;
|
130
|
+
do *dst++ = *copy_src++; while (--length);
|
131
|
+
}
|
132
|
+
*dst_len = dst - dst_base;
|
133
|
+
return CSNAPPY_E_OK;
|
134
|
+
}
|
135
|
+
#else /* !(arm with no unaligned access) */
|
136
|
+
/*
|
137
|
+
* Data stored per entry in lookup table:
|
138
|
+
* Range Bits-used Description
|
139
|
+
* ------------------------------------
|
140
|
+
* 1..64 0..7 Literal/copy length encoded in opcode byte
|
141
|
+
* 0..7 8..10 Copy offset encoded in opcode byte / 256
|
142
|
+
* 0..4 11..13 Extra bytes after opcode
|
143
|
+
*
|
144
|
+
* We use eight bits for the length even though 7 would have sufficed
|
145
|
+
* because of efficiency reasons:
|
146
|
+
* (1) Extracting a byte is faster than a bit-field
|
147
|
+
* (2) It properly aligns copy offset so we do not need a <<8
|
148
|
+
*/
|
149
|
+
static const uint16_t char_table[256] = {
|
150
|
+
0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
|
151
|
+
0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
|
152
|
+
0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
|
153
|
+
0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
|
154
|
+
0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
|
155
|
+
0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
|
156
|
+
0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
|
157
|
+
0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
|
158
|
+
0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012,
|
159
|
+
0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014,
|
160
|
+
0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016,
|
161
|
+
0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018,
|
162
|
+
0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a,
|
163
|
+
0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c,
|
164
|
+
0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e,
|
165
|
+
0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020,
|
166
|
+
0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022,
|
167
|
+
0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024,
|
168
|
+
0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026,
|
169
|
+
0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028,
|
170
|
+
0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a,
|
171
|
+
0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c,
|
172
|
+
0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e,
|
173
|
+
0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
|
174
|
+
0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
|
175
|
+
0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
|
176
|
+
0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
|
177
|
+
0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
|
178
|
+
0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
|
179
|
+
0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
|
180
|
+
0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
|
181
|
+
0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040
|
182
|
+
};
|
183
|
+
|
184
|
+
/*
|
185
|
+
* Copy "len" bytes from "src" to "op", one byte at a time. Used for
|
186
|
+
* handling COPY operations where the input and output regions may
|
187
|
+
* overlap. For example, suppose:
|
188
|
+
* src == "ab"
|
189
|
+
* op == src + 2
|
190
|
+
* len == 20
|
191
|
+
* After IncrementalCopy(src, op, len), the result will have
|
192
|
+
* eleven copies of "ab"
|
193
|
+
* ababababababababababab
|
194
|
+
* Note that this does not match the semantics of either memcpy()
|
195
|
+
* or memmove().
|
196
|
+
*/
|
197
|
+
static inline void IncrementalCopy(const char *src, char *op, int len)
|
198
|
+
{
|
199
|
+
DCHECK_GT(len, 0);
|
200
|
+
do {
|
201
|
+
*op++ = *src++;
|
202
|
+
} while (--len > 0);
|
203
|
+
}
|
204
|
+
|
205
|
+
/*
|
206
|
+
* Equivalent to IncrementalCopy except that it can write up to ten extra
|
207
|
+
* bytes after the end of the copy, and that it is faster.
|
208
|
+
*
|
209
|
+
* The main part of this loop is a simple copy of eight bytes at a time until
|
210
|
+
* we've copied (at least) the requested amount of bytes. However, if op and
|
211
|
+
* src are less than eight bytes apart (indicating a repeating pattern of
|
212
|
+
* length < 8), we first need to expand the pattern in order to get the correct
|
213
|
+
* results. For instance, if the buffer looks like this, with the eight-byte
|
214
|
+
* <src> and <op> patterns marked as intervals:
|
215
|
+
*
|
216
|
+
* abxxxxxxxxxxxx
|
217
|
+
* [------] src
|
218
|
+
* [------] op
|
219
|
+
*
|
220
|
+
* a single eight-byte copy from <src> to <op> will repeat the pattern once,
|
221
|
+
* after which we can move <op> two bytes without moving <src>:
|
222
|
+
*
|
223
|
+
* ababxxxxxxxxxx
|
224
|
+
* [------] src
|
225
|
+
* [------] op
|
226
|
+
*
|
227
|
+
* and repeat the exercise until the two no longer overlap.
|
228
|
+
*
|
229
|
+
* This allows us to do very well in the special case of one single byte
|
230
|
+
* repeated many times, without taking a big hit for more general cases.
|
231
|
+
*
|
232
|
+
* The worst case of extra writing past the end of the match occurs when
|
233
|
+
* op - src == 1 and len == 1; the last copy will read from byte positions
|
234
|
+
* [0..7] and write to [4..11], whereas it was only supposed to write to
|
235
|
+
* position 1. Thus, ten excess bytes.
|
236
|
+
*/
|
237
|
+
static const int kMaxIncrementCopyOverflow = 10;
|
238
|
+
static inline void IncrementalCopyFastPath(const char *src, char *op, int len)
|
239
|
+
{
|
240
|
+
while (op - src < 8) {
|
241
|
+
UnalignedCopy64(src, op);
|
242
|
+
len -= op - src;
|
243
|
+
op += op - src;
|
244
|
+
}
|
245
|
+
while (len > 0) {
|
246
|
+
UnalignedCopy64(src, op);
|
247
|
+
src += 8;
|
248
|
+
op += 8;
|
249
|
+
len -= 8;
|
250
|
+
}
|
251
|
+
}
|
252
|
+
|
253
|
+
|
254
|
+
/* A type that writes to a flat array. */
|
255
|
+
struct SnappyArrayWriter {
|
256
|
+
char *base;
|
257
|
+
char *op;
|
258
|
+
char *op_limit;
|
259
|
+
};
|
260
|
+
|
261
|
+
static inline int
|
262
|
+
SAW__AppendFastPath(struct SnappyArrayWriter *this,
|
263
|
+
const char *ip, uint32_t len)
|
264
|
+
{
|
265
|
+
char *op = this->op;
|
266
|
+
const int space_left = this->op_limit - op;
|
267
|
+
if (likely(space_left >= 16)) {
|
268
|
+
UnalignedCopy64(ip, op);
|
269
|
+
UnalignedCopy64(ip + 8, op + 8);
|
270
|
+
} else {
|
271
|
+
if (unlikely(space_left < (int32_t)len))
|
272
|
+
return CSNAPPY_E_OUTPUT_OVERRUN;
|
273
|
+
memcpy(op, ip, len);
|
274
|
+
}
|
275
|
+
this->op = op + len;
|
276
|
+
return CSNAPPY_E_OK;
|
277
|
+
}
|
278
|
+
|
279
|
+
static inline int
|
280
|
+
SAW__Append(struct SnappyArrayWriter *this,
|
281
|
+
const char *ip, uint32_t len)
|
282
|
+
{
|
283
|
+
char *op = this->op;
|
284
|
+
const int space_left = this->op_limit - op;
|
285
|
+
if (unlikely(space_left < (int32_t)len))
|
286
|
+
return CSNAPPY_E_OUTPUT_OVERRUN;
|
287
|
+
memcpy(op, ip, len);
|
288
|
+
this->op = op + len;
|
289
|
+
return CSNAPPY_E_OK;
|
290
|
+
}
|
291
|
+
|
292
|
+
static inline int
|
293
|
+
SAW__AppendFromSelf(struct SnappyArrayWriter *this,
|
294
|
+
uint32_t offset, uint32_t len)
|
295
|
+
{
|
296
|
+
char *op = this->op;
|
297
|
+
const int space_left = this->op_limit - op;
|
298
|
+
/* -1u catches offset==0 */
|
299
|
+
if (op - this->base <= offset - 1u)
|
300
|
+
return CSNAPPY_E_DATA_MALFORMED;
|
301
|
+
/* Fast path, used for the majority (70-80%) of dynamic invocations. */
|
302
|
+
if (len <= 16 && offset >= 8 && space_left >= 16) {
|
303
|
+
UnalignedCopy64(op - offset, op);
|
304
|
+
UnalignedCopy64(op - offset + 8, op + 8);
|
305
|
+
} else if (space_left >= (int32_t)(len + kMaxIncrementCopyOverflow)) {
|
306
|
+
IncrementalCopyFastPath(op - offset, op, len);
|
307
|
+
} else {
|
308
|
+
if (space_left < (int32_t)len)
|
309
|
+
return CSNAPPY_E_OUTPUT_OVERRUN;
|
310
|
+
IncrementalCopy(op - offset, op, len);
|
311
|
+
}
|
312
|
+
this->op = op + len;
|
313
|
+
return CSNAPPY_E_OK;
|
314
|
+
}
|
315
|
+
|
316
|
+
int
|
317
|
+
csnappy_decompress_noheader(
|
318
|
+
const char *src,
|
319
|
+
uint32_t src_remaining,
|
320
|
+
char *dst,
|
321
|
+
uint32_t *dst_len)
|
322
|
+
{
|
323
|
+
struct SnappyArrayWriter writer;
|
324
|
+
const char *end_minus5 = src + src_remaining - 5;
|
325
|
+
uint32_t length, trailer, opword, extra_bytes;
|
326
|
+
int ret, available;
|
327
|
+
uint8_t opcode;
|
328
|
+
char scratch[5];
|
329
|
+
writer.op = writer.base = dst;
|
330
|
+
writer.op_limit = writer.op + *dst_len;
|
331
|
+
#define LOOP_COND() \
|
332
|
+
if (unlikely(src >= end_minus5)) { \
|
333
|
+
available = end_minus5 + 5 - src; \
|
334
|
+
if (unlikely(available <= 0)) \
|
335
|
+
goto out; \
|
336
|
+
memmove(scratch, src, available); \
|
337
|
+
src = scratch; \
|
338
|
+
end_minus5 = scratch + available - 5; \
|
339
|
+
}
|
340
|
+
|
341
|
+
LOOP_COND();
|
342
|
+
for (;;) {
|
343
|
+
opcode = *(const uint8_t *)src++;
|
344
|
+
if (opcode & 0x3) {
|
345
|
+
opword = char_table[opcode];
|
346
|
+
extra_bytes = opword >> 11;
|
347
|
+
trailer = get_unaligned_le(src, extra_bytes);
|
348
|
+
length = opword & 0xff;
|
349
|
+
src += extra_bytes;
|
350
|
+
trailer += opword & 0x700;
|
351
|
+
ret = SAW__AppendFromSelf(&writer, trailer, length);
|
352
|
+
if (ret < 0)
|
353
|
+
return ret;
|
354
|
+
LOOP_COND();
|
355
|
+
} else {
|
356
|
+
length = (opcode >> 2) + 1;
|
357
|
+
available = end_minus5 + 5 - src;
|
358
|
+
if (length <= 16 && available >= 16) {
|
359
|
+
if ((ret = SAW__AppendFastPath(&writer, src, length)) < 0)
|
360
|
+
return ret;
|
361
|
+
src += length;
|
362
|
+
LOOP_COND();
|
363
|
+
continue;
|
364
|
+
}
|
365
|
+
if (unlikely(length > 60)) {
|
366
|
+
extra_bytes = length - 60;
|
367
|
+
length = get_unaligned_le(src, extra_bytes) + 1;
|
368
|
+
src += extra_bytes;
|
369
|
+
available = end_minus5 + 5 - src;
|
370
|
+
}
|
371
|
+
if (unlikely(available < (int32_t)length))
|
372
|
+
return CSNAPPY_E_DATA_MALFORMED;
|
373
|
+
ret = SAW__Append(&writer, src, length);
|
374
|
+
if (ret < 0)
|
375
|
+
return ret;
|
376
|
+
src += length;
|
377
|
+
LOOP_COND();
|
378
|
+
}
|
379
|
+
}
|
380
|
+
#undef LOOP_COND
|
381
|
+
out:
|
382
|
+
*dst_len = writer.op - writer.base;
|
383
|
+
return CSNAPPY_E_OK;
|
384
|
+
}
|
385
|
+
#endif /* optimized for unaligned arch */
|
386
|
+
|
387
|
+
#if defined(__KERNEL__) && !defined(STATIC)
|
388
|
+
EXPORT_SYMBOL(csnappy_decompress_noheader);
|
389
|
+
#endif
|
390
|
+
|
391
|
+
int
|
392
|
+
csnappy_decompress(
|
393
|
+
const char *src,
|
394
|
+
uint32_t src_len,
|
395
|
+
char *dst,
|
396
|
+
uint32_t dst_len)
|
397
|
+
{
|
398
|
+
int n;
|
399
|
+
uint32_t olen = 0;
|
400
|
+
/* Read uncompressed length from the front of the compressed input */
|
401
|
+
n = csnappy_get_uncompressed_length(src, src_len, &olen);
|
402
|
+
if (unlikely(n < CSNAPPY_E_OK))
|
403
|
+
return n;
|
404
|
+
/* Protect against possible DoS attack */
|
405
|
+
if (unlikely(olen > dst_len))
|
406
|
+
return CSNAPPY_E_OUTPUT_INSUF;
|
407
|
+
return csnappy_decompress_noheader(src + n, src_len - n, dst, &olen);
|
408
|
+
}
|
409
|
+
#if defined(__KERNEL__) && !defined(STATIC)
|
410
|
+
EXPORT_SYMBOL(csnappy_decompress);
|
411
|
+
|
412
|
+
MODULE_LICENSE("BSD");
|
413
|
+
MODULE_DESCRIPTION("Snappy Decompressor");
|
414
|
+
#endif
|
@@ -0,0 +1,147 @@
|
|
1
|
+
/*
|
2
|
+
Copyright 2011 Google Inc. All Rights Reserved.
|
3
|
+
|
4
|
+
Redistribution and use in source and binary forms, with or without
|
5
|
+
modification, are permitted provided that the following conditions are
|
6
|
+
met:
|
7
|
+
|
8
|
+
* Redistributions of source code must retain the above copyright
|
9
|
+
notice, this list of conditions and the following disclaimer.
|
10
|
+
* Redistributions in binary form must reproduce the above
|
11
|
+
copyright notice, this list of conditions and the following disclaimer
|
12
|
+
in the documentation and/or other materials provided with the
|
13
|
+
distribution.
|
14
|
+
* Neither the name of Google Inc. nor the names of its
|
15
|
+
contributors may be used to endorse or promote products derived from
|
16
|
+
this software without specific prior written permission.
|
17
|
+
|
18
|
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
19
|
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
20
|
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
21
|
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
22
|
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
23
|
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
24
|
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
25
|
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
26
|
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
27
|
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
28
|
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
29
|
+
|
30
|
+
Various stubs for the open-source version of Snappy.
|
31
|
+
|
32
|
+
File modified for the Linux Kernel by
|
33
|
+
Zeev Tarantov <zeev.tarantov@gmail.com>
|
34
|
+
*/
|
35
|
+
|
36
|
+
#ifndef CSNAPPY_INTERNAL_H_
|
37
|
+
#define CSNAPPY_INTERNAL_H_
|
38
|
+
|
39
|
+
#ifndef __KERNEL__
|
40
|
+
#include "csnappy_internal_userspace.h"
|
41
|
+
#include <string.h>
|
42
|
+
#else
|
43
|
+
|
44
|
+
#include <linux/types.h>
|
45
|
+
#include <linux/string.h>
|
46
|
+
#include <linux/compiler.h>
|
47
|
+
#include <asm/byteorder.h>
|
48
|
+
#include <asm/unaligned.h>
|
49
|
+
|
50
|
+
#if (defined(__LITTLE_ENDIAN) && defined(__BIG_ENDIAN)) || \
|
51
|
+
(!defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN))
|
52
|
+
#error either __LITTLE_ENDIAN or __BIG_ENDIAN must be defined
|
53
|
+
#endif
|
54
|
+
#if defined(__LITTLE_ENDIAN)
|
55
|
+
#define __BYTE_ORDER __LITTLE_ENDIAN
|
56
|
+
#else
|
57
|
+
#define __BYTE_ORDER __BIG_ENDIAN
|
58
|
+
#endif
|
59
|
+
|
60
|
+
#ifdef DEBUG
|
61
|
+
#define DCHECK(cond) if (!(cond)) \
|
62
|
+
printk(KERN_DEBUG "assert failed @ %s:%i\n", \
|
63
|
+
__FILE__, __LINE__)
|
64
|
+
#else
|
65
|
+
#define DCHECK(cond)
|
66
|
+
#endif
|
67
|
+
|
68
|
+
#define UNALIGNED_LOAD16(_p) get_unaligned((const uint16_t *)(_p))
|
69
|
+
#define UNALIGNED_LOAD32(_p) get_unaligned((const uint32_t *)(_p))
|
70
|
+
#define UNALIGNED_LOAD64(_p) get_unaligned((const uint64_t *)(_p))
|
71
|
+
#define UNALIGNED_STORE16(_p, _val) put_unaligned((_val), (uint16_t *)(_p))
|
72
|
+
#define UNALIGNED_STORE32(_p, _val) put_unaligned((_val), (uint32_t *)(_p))
|
73
|
+
#define UNALIGNED_STORE64(_p, _val) put_unaligned((_val), (uint64_t *)(_p))
|
74
|
+
|
75
|
+
#define FindLSBSetNonZero(n) __builtin_ctz(n)
|
76
|
+
#define FindLSBSetNonZero64(n) __builtin_ctzll(n)
|
77
|
+
|
78
|
+
#endif /* __KERNEL__ */
|
79
|
+
|
80
|
+
#define ARCH_ARM_HAVE_UNALIGNED \
|
81
|
+
defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) || defined(__ARMV6__) || \
|
82
|
+
defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__)
|
83
|
+
|
84
|
+
static inline void UnalignedCopy64(const void *src, void *dst) {
|
85
|
+
#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || ARCH_ARM_HAVE_UNALIGNED
|
86
|
+
if ((sizeof(void *) == 8) || (sizeof(long) == 8)) {
|
87
|
+
UNALIGNED_STORE64(dst, UNALIGNED_LOAD64(src));
|
88
|
+
} else {
|
89
|
+
/* This can be more efficient than UNALIGNED_LOAD64 + UNALIGNED_STORE64
|
90
|
+
on some platforms, in particular ARM. */
|
91
|
+
const uint8_t *src_bytep = (const uint8_t *)src;
|
92
|
+
uint8_t *dst_bytep = (uint8_t *)dst;
|
93
|
+
|
94
|
+
UNALIGNED_STORE32(dst_bytep, UNALIGNED_LOAD32(src_bytep));
|
95
|
+
UNALIGNED_STORE32(dst_bytep + 4, UNALIGNED_LOAD32(src_bytep + 4));
|
96
|
+
}
|
97
|
+
#else
|
98
|
+
const uint8_t *src_bytep = (const uint8_t *)src;
|
99
|
+
uint8_t *dst_bytep = (uint8_t *)dst;
|
100
|
+
dst_bytep[0] = src_bytep[0];
|
101
|
+
dst_bytep[1] = src_bytep[1];
|
102
|
+
dst_bytep[2] = src_bytep[2];
|
103
|
+
dst_bytep[3] = src_bytep[3];
|
104
|
+
dst_bytep[4] = src_bytep[4];
|
105
|
+
dst_bytep[5] = src_bytep[5];
|
106
|
+
dst_bytep[6] = src_bytep[6];
|
107
|
+
dst_bytep[7] = src_bytep[7];
|
108
|
+
#endif
|
109
|
+
}
|
110
|
+
|
111
|
+
#if defined(__arm__)
|
112
|
+
#if ARCH_ARM_HAVE_UNALIGNED
|
113
|
+
static inline uint32_t get_unaligned_le(const void *p, uint32_t n)
|
114
|
+
{
|
115
|
+
uint32_t wordmask = (1U << (8 * n)) - 1;
|
116
|
+
return get_unaligned_le32(p) & wordmask;
|
117
|
+
}
|
118
|
+
#else
|
119
|
+
extern uint32_t get_unaligned_le_armv5(const void *p, uint32_t n);
|
120
|
+
#define get_unaligned_le get_unaligned_le_armv5
|
121
|
+
#endif
|
122
|
+
#else
|
123
|
+
static inline uint32_t get_unaligned_le(const void *p, uint32_t n)
|
124
|
+
{
|
125
|
+
/* Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits */
|
126
|
+
static const uint32_t wordmask[] = {
|
127
|
+
0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu
|
128
|
+
};
|
129
|
+
return get_unaligned_le32(p) & wordmask[n];
|
130
|
+
}
|
131
|
+
#endif
|
132
|
+
|
133
|
+
#define DCHECK_EQ(a, b) DCHECK(((a) == (b)))
|
134
|
+
#define DCHECK_NE(a, b) DCHECK(((a) != (b)))
|
135
|
+
#define DCHECK_GT(a, b) DCHECK(((a) > (b)))
|
136
|
+
#define DCHECK_GE(a, b) DCHECK(((a) >= (b)))
|
137
|
+
#define DCHECK_LT(a, b) DCHECK(((a) < (b)))
|
138
|
+
#define DCHECK_LE(a, b) DCHECK(((a) <= (b)))
|
139
|
+
|
140
|
+
enum {
|
141
|
+
LITERAL = 0,
|
142
|
+
COPY_1_BYTE_OFFSET = 1, /* 3 bit length + 3 bits of offset in opcode */
|
143
|
+
COPY_2_BYTE_OFFSET = 2,
|
144
|
+
COPY_4_BYTE_OFFSET = 3
|
145
|
+
};
|
146
|
+
|
147
|
+
#endif /* CSNAPPY_INTERNAL_H_ */
|