snappy_ext 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. data/ext/snappy/extconf.rb +36 -0
  2. data/ext/snappy/snappy_ext.cc +131 -0
  3. data/ext/snappy/vendor/snappy-1.0.0/AUTHORS +1 -0
  4. data/ext/snappy/vendor/snappy-1.0.0/COPYING +28 -0
  5. data/ext/snappy/vendor/snappy-1.0.0/ChangeLog +3 -0
  6. data/ext/snappy/vendor/snappy-1.0.0/INSTALL +230 -0
  7. data/ext/snappy/vendor/snappy-1.0.0/Makefile.am +24 -0
  8. data/ext/snappy/vendor/snappy-1.0.0/Makefile.in +926 -0
  9. data/ext/snappy/vendor/snappy-1.0.0/NEWS +3 -0
  10. data/ext/snappy/vendor/snappy-1.0.0/README +132 -0
  11. data/ext/snappy/vendor/snappy-1.0.0/aclocal.m4 +9076 -0
  12. data/ext/snappy/vendor/snappy-1.0.0/autogen.sh +8 -0
  13. data/ext/snappy/vendor/snappy-1.0.0/compile +99 -0
  14. data/ext/snappy/vendor/snappy-1.0.0/config.guess +1466 -0
  15. data/ext/snappy/vendor/snappy-1.0.0/config.h.in +107 -0
  16. data/ext/snappy/vendor/snappy-1.0.0/config.sub +1579 -0
  17. data/ext/snappy/vendor/snappy-1.0.0/configure +17962 -0
  18. data/ext/snappy/vendor/snappy-1.0.0/configure.ac +99 -0
  19. data/ext/snappy/vendor/snappy-1.0.0/depcomp +530 -0
  20. data/ext/snappy/vendor/snappy-1.0.0/install-sh +323 -0
  21. data/ext/snappy/vendor/snappy-1.0.0/ltmain.sh +8413 -0
  22. data/ext/snappy/vendor/snappy-1.0.0/m4/gtest.m4 +74 -0
  23. data/ext/snappy/vendor/snappy-1.0.0/missing +360 -0
  24. data/ext/snappy/vendor/snappy-1.0.0/mkinstalldirs +158 -0
  25. data/ext/snappy/vendor/snappy-1.0.0/snappy-internal.h +136 -0
  26. data/ext/snappy/vendor/snappy-1.0.0/snappy-sinksource.cc +46 -0
  27. data/ext/snappy/vendor/snappy-1.0.0/snappy-sinksource.h +110 -0
  28. data/ext/snappy/vendor/snappy-1.0.0/snappy-stubs-internal.cc +28 -0
  29. data/ext/snappy/vendor/snappy-1.0.0/snappy-stubs-internal.h +457 -0
  30. data/ext/snappy/vendor/snappy-1.0.0/snappy-stubs-public.h +59 -0
  31. data/ext/snappy/vendor/snappy-1.0.0/snappy-stubs-public.h.in +59 -0
  32. data/ext/snappy/vendor/snappy-1.0.0/snappy-test.cc +523 -0
  33. data/ext/snappy/vendor/snappy-1.0.0/snappy-test.h +458 -0
  34. data/ext/snappy/vendor/snappy-1.0.0/snappy.cc +1001 -0
  35. data/ext/snappy/vendor/snappy-1.0.0/snappy.h +141 -0
  36. data/ext/snappy/vendor/snappy-1.0.0/snappy_unittest.cc +1073 -0
  37. data/ext/snappy/version.h +4 -0
  38. data/snappy_ext.gemspec +58 -0
  39. metadata +99 -0
@@ -0,0 +1,1001 @@
1
+ // Copyright 2005 Google Inc. All Rights Reserved.
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // http://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+
15
+ #include "snappy.h"
16
+ #include "snappy-internal.h"
17
+ #include "snappy-sinksource.h"
18
+
19
+ #include <stdio.h>
20
+
21
+ #include <algorithm>
22
+ #include <string>
23
+ #include <vector>
24
+
25
+
26
+ namespace snappy {
27
+
28
+ // Any hash function will produce a valid compressed bitstream, but a good
29
+ // hash function reduces the number of collisions and thus yields better
30
+ // compression for compressible input, and more speed for incompressible
31
+ // input. Of course, it doesn't hurt if the hash function is reasonably fast
32
+ // either, as it gets called a lot.
33
+ static inline uint32 HashBytes(uint32 bytes, int shift) {
34
+ uint32 kMul = 0x1e35a7bd;
35
+ return (bytes * kMul) >> shift;
36
+ }
37
+ static inline uint32 Hash(const char* p, int shift) {
38
+ return HashBytes(UNALIGNED_LOAD32(p), shift);
39
+ }
40
+
41
+ size_t MaxCompressedLength(size_t source_len) {
42
+ // Compressed data can be defined as:
43
+ // compressed := item* literal*
44
+ // item := literal* copy
45
+ //
46
+ // The trailing literal sequence has a space blowup of at most 62/60
47
+ // since a literal of length 60 needs one tag byte + one extra byte
48
+ // for length information.
49
+ //
50
+ // Item blowup is trickier to measure. Suppose the "copy" op copies
51
+ // 4 bytes of data. Because of a special check in the encoding code,
52
+ // we produce a 4-byte copy only if the offset is < 65536. Therefore
53
+ // the copy op takes 3 bytes to encode, and this type of item leads
54
+ // to at most the 62/60 blowup for representing literals.
55
+ //
56
+ // Suppose the "copy" op copies 5 bytes of data. If the offset is big
57
+ // enough, it will take 5 bytes to encode the copy op. Therefore the
58
+ // worst case here is a one-byte literal followed by a five-byte copy.
59
+ // I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
60
+ //
61
+ // This last factor dominates the blowup, so the final estimate is:
62
+ return 32 + source_len + source_len/6;
63
+ }
64
+
65
+ enum {
66
+ LITERAL = 0,
67
+ COPY_1_BYTE_OFFSET = 1, // 3 bit length + 3 bits of offset in opcode
68
+ COPY_2_BYTE_OFFSET = 2,
69
+ COPY_4_BYTE_OFFSET = 3
70
+ };
71
+
72
+ // Copy "len" bytes from "src" to "op", one byte at a time. Used for
73
+ // handling COPY operations where the input and output regions may
74
+ // overlap. For example, suppose:
75
+ // src == "ab"
76
+ // op == src + 2
77
+ // len == 20
78
+ // After IncrementalCopy(src, op, len), the result will have
79
+ // eleven copies of "ab"
80
+ // ababababababababababab
81
+ // Note that this does not match the semantics of either memcpy()
82
+ // or memmove().
83
+ static inline void IncrementalCopy(const char* src, char* op, int len) {
84
+ DCHECK_GT(len, 0);
85
+ do {
86
+ *op++ = *src++;
87
+ } while (--len > 0);
88
+ }
89
+
90
+ // Equivalent to IncrementalCopy except that it can write up to ten extra
91
+ // bytes after the end of the copy, and that it is faster.
92
+ //
93
+ // The main part of this loop is a simple copy of eight bytes at a time until
94
+ // we've copied (at least) the requested amount of bytes. However, if op and
95
+ // src are less than eight bytes apart (indicating a repeating pattern of
96
+ // length < 8), we first need to expand the pattern in order to get the correct
97
+ // results. For instance, if the buffer looks like this, with the eight-byte
98
+ // <src> and <op> patterns marked as intervals:
99
+ //
100
+ // abxxxxxxxxxxxx
101
+ // [------] src
102
+ // [------] op
103
+ //
104
+ // a single eight-byte copy from <src> to <op> will repeat the pattern once,
105
+ // after which we can move <op> two bytes without moving <src>:
106
+ //
107
+ // ababxxxxxxxxxx
108
+ // [------] src
109
+ // [------] op
110
+ //
111
+ // and repeat the exercise until the two no longer overlap.
112
+ //
113
+ // This allows us to do very well in the special case of one single byte
114
+ // repeated many times, without taking a big hit for more general cases.
115
+ //
116
+ // The worst case of extra writing past the end of the match occurs when
117
+ // op - src == 1 and len == 1; the last copy will read from byte positions
118
+ // [0..7] and write to [4..11], whereas it was only supposed to write to
119
+ // position 1. Thus, ten excess bytes.
120
+
121
+ namespace {
122
+
123
+ const int kMaxIncrementCopyOverflow = 10;
124
+
125
+ } // namespace
126
+
127
+ static inline void IncrementalCopyFastPath(const char* src, char* op, int len) {
128
+ while (op - src < 8) {
129
+ UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src));
130
+ len -= op - src;
131
+ op += op - src;
132
+ }
133
+ while (len > 0) {
134
+ UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src));
135
+ src += 8;
136
+ op += 8;
137
+ len -= 8;
138
+ }
139
+ }
140
+
141
+ static inline char* EmitLiteral(char* op,
142
+ const char* literal,
143
+ int len,
144
+ bool allow_fast_path) {
145
+ int n = len - 1; // Zero-length literals are disallowed
146
+ if (n < 60) {
147
+ // Fits in tag byte
148
+ *op++ = LITERAL | (n << 2);
149
+
150
+ // The vast majority of copies are below 16 bytes, for which a
151
+ // call to memcpy is overkill. This fast path can sometimes
152
+ // copy up to 15 bytes too much, but that is okay in the
153
+ // main loop, since we have a bit to go on for both sides:
154
+ //
155
+ // - The input will always have kInputMarginBytes = 15 extra
156
+ // available bytes, as long as we're in the main loop, and
157
+ // if not, allow_fast_path = false.
158
+ // - The output will always have 32 spare bytes (see
159
+ // MaxCompressedLength).
160
+ if (allow_fast_path && len <= 16) {
161
+ UNALIGNED_STORE64(op, UNALIGNED_LOAD64(literal));
162
+ UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(literal + 8));
163
+ return op + len;
164
+ }
165
+ } else {
166
+ // Encode in upcoming bytes
167
+ char* base = op;
168
+ int count = 0;
169
+ op++;
170
+ while (n > 0) {
171
+ *op++ = n & 0xff;
172
+ n >>= 8;
173
+ count++;
174
+ }
175
+ assert(count >= 1);
176
+ assert(count <= 4);
177
+ *base = LITERAL | ((59+count) << 2);
178
+ }
179
+ memcpy(op, literal, len);
180
+ return op + len;
181
+ }
182
+
183
+ static inline char* EmitCopyLessThan64(char* op, int offset, int len) {
184
+ DCHECK_LE(len, 64);
185
+ DCHECK_GE(len, 4);
186
+ DCHECK_LT(offset, 65536);
187
+
188
+ if ((len < 12) && (offset < 2048)) {
189
+ int len_minus_4 = len - 4;
190
+ assert(len_minus_4 < 8); // Must fit in 3 bits
191
+ *op++ = COPY_1_BYTE_OFFSET | ((len_minus_4) << 2) | ((offset >> 8) << 5);
192
+ *op++ = offset & 0xff;
193
+ } else {
194
+ *op++ = COPY_2_BYTE_OFFSET | ((len-1) << 2);
195
+ LittleEndian::Store16(op, offset);
196
+ op += 2;
197
+ }
198
+ return op;
199
+ }
200
+
201
+ static inline char* EmitCopy(char* op, int offset, int len) {
202
+ // Emit 64 byte copies but make sure to keep at least four bytes reserved
203
+ while (len >= 68) {
204
+ op = EmitCopyLessThan64(op, offset, 64);
205
+ len -= 64;
206
+ }
207
+
208
+ // Emit an extra 60 byte copy if have too much data to fit in one copy
209
+ if (len > 64) {
210
+ op = EmitCopyLessThan64(op, offset, 60);
211
+ len -= 60;
212
+ }
213
+
214
+ // Emit remainder
215
+ op = EmitCopyLessThan64(op, offset, len);
216
+ return op;
217
+ }
218
+
219
+
220
+ bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
221
+ uint32 v = 0;
222
+ const char* limit = start + n;
223
+ if (Varint::Parse32WithLimit(start, limit, &v) != NULL) {
224
+ *result = v;
225
+ return true;
226
+ } else {
227
+ return false;
228
+ }
229
+ }
230
+
231
+ namespace internal {
232
+ uint16* WorkingMemory::GetHashTable(size_t input_size, int* table_size) {
233
+ // Use smaller hash table when input.size() is smaller, since we
234
+ // fill the table, incurring O(hash table size) overhead for
235
+ // compression, and if the input is short, we won't need that
236
+ // many hash table entries anyway.
237
+ assert(kMaxHashTableSize >= 256);
238
+ int htsize = 256;
239
+ while (htsize < kMaxHashTableSize && htsize < input_size) {
240
+ htsize <<= 1;
241
+ }
242
+ CHECK_EQ(0, htsize & (htsize - 1)) << ": must be power of two";
243
+ CHECK_LE(htsize, kMaxHashTableSize) << ": hash table too large";
244
+
245
+ uint16* table;
246
+ if (htsize <= ARRAYSIZE(small_table_)) {
247
+ table = small_table_;
248
+ } else {
249
+ if (large_table_ == NULL) {
250
+ large_table_ = new uint16[kMaxHashTableSize];
251
+ }
252
+ table = large_table_;
253
+ }
254
+
255
+ *table_size = htsize;
256
+ memset(table, 0, htsize * sizeof(*table));
257
+ return table;
258
+ }
259
+ } // end namespace internal
260
+
261
+ // For 0 <= offset <= 4, GetUint32AtOffset(UNALIGNED_LOAD64(p), offset) will
262
+ // equal UNALIGNED_LOAD32(p + offset). Motivation: On x86-64 hardware we have
263
+ // empirically found that overlapping loads such as
264
+ // UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
265
+ // are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32.
266
+ static inline uint32 GetUint32AtOffset(uint64 v, int offset) {
267
+ DCHECK(0 <= offset && offset <= 4) << offset;
268
+ return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset);
269
+ }
270
+
271
+ // Flat array compression that does not emit the "uncompressed length"
272
+ // prefix. Compresses "input" string to the "*op" buffer.
273
+ //
274
+ // REQUIRES: "input" is at most "kBlockSize" bytes long.
275
+ // REQUIRES: "op" points to an array of memory that is at least
276
+ // "MaxCompressedLength(input.size())" in size.
277
+ // REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
278
+ // REQUIRES: "table_size" is a power of two
279
+ //
280
+ // Returns an "end" pointer into "op" buffer.
281
+ // "end - op" is the compressed size of "input".
282
+ namespace internal {
283
+ char* CompressFragment(const char* const input,
284
+ const size_t input_size,
285
+ char* op,
286
+ uint16* table,
287
+ const int table_size) {
288
+ // "ip" is the input pointer, and "op" is the output pointer.
289
+ const char* ip = input;
290
+ CHECK_LE(input_size, kBlockSize);
291
+ CHECK_EQ(table_size & (table_size - 1), 0) << ": table must be power of two";
292
+ const int shift = 32 - Bits::Log2Floor(table_size);
293
+ DCHECK_EQ(kuint32max >> shift, table_size - 1);
294
+ const char* ip_end = input + input_size;
295
+ const char* base_ip = ip;
296
+ // Bytes in [next_emit, ip) will be emitted as literal bytes. Or
297
+ // [next_emit, ip_end) after the main loop.
298
+ const char* next_emit = ip;
299
+
300
+ const int kInputMarginBytes = 15;
301
+ if (PREDICT_TRUE(input_size >= kInputMarginBytes)) {
302
+ const char* ip_limit = input + input_size - kInputMarginBytes;
303
+
304
+ for (uint32 next_hash = Hash(++ip, shift); ; ) {
305
+ DCHECK_LT(next_emit, ip);
306
+ // The body of this loop calls EmitLiteral once and then EmitCopy one or
307
+ // more times. (The exception is that when we're close to exhausting
308
+ // the input we goto emit_remainder.)
309
+ //
310
+ // In the first iteration of this loop we're just starting, so
311
+ // there's nothing to copy, so calling EmitLiteral once is
312
+ // necessary. And we only start a new iteration when the
313
+ // current iteration has determined that a call to EmitLiteral will
314
+ // precede the next call to EmitCopy (if any).
315
+ //
316
+ // Step 1: Scan forward in the input looking for a 4-byte-long match.
317
+ // If we get close to exhausting the input then goto emit_remainder.
318
+ //
319
+ // Heuristic match skipping: If 32 bytes are scanned with no matches
320
+ // found, start looking only at every other byte. If 32 more bytes are
321
+ // scanned, look at every third byte, etc.. When a match is found,
322
+ // immediately go back to looking at every byte. This is a small loss
323
+ // (~5% performance, ~0.1% density) for compressible data due to more
324
+ // bookkeeping, but for non-compressible data (such as JPEG) it's a huge
325
+ // win since the compressor quickly "realizes" the data is incompressible
326
+ // and doesn't bother looking for matches everywhere.
327
+ //
328
+ // The "skip" variable keeps track of how many bytes there are since the
329
+ // last match; dividing it by 32 (ie. right-shifting by five) gives the
330
+ // number of bytes to move ahead for each iteration.
331
+ uint32 skip = 32;
332
+
333
+ const char* next_ip = ip;
334
+ const char* candidate;
335
+ do {
336
+ ip = next_ip;
337
+ uint32 hash = next_hash;
338
+ DCHECK_EQ(hash, Hash(ip, shift));
339
+ uint32 bytes_between_hash_lookups = skip++ >> 5;
340
+ next_ip = ip + bytes_between_hash_lookups;
341
+ if (PREDICT_FALSE(next_ip > ip_limit)) {
342
+ goto emit_remainder;
343
+ }
344
+ next_hash = Hash(next_ip, shift);
345
+ candidate = base_ip + table[hash];
346
+ DCHECK_GE(candidate, base_ip);
347
+ DCHECK_LT(candidate, ip);
348
+
349
+ table[hash] = ip - base_ip;
350
+ } while (PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
351
+ UNALIGNED_LOAD32(candidate)));
352
+
353
+ // Step 2: A 4-byte match has been found. We'll later see if more
354
+ // than 4 bytes match. But, prior to the match, input
355
+ // bytes [next_emit, ip) are unmatched. Emit them as "literal bytes."
356
+ DCHECK_LE(next_emit + 16, ip_end);
357
+ op = EmitLiteral(op, next_emit, ip - next_emit, true);
358
+
359
+ // Step 3: Call EmitCopy, and then see if another EmitCopy could
360
+ // be our next move. Repeat until we find no match for the
361
+ // input immediately after what was consumed by the last EmitCopy call.
362
+ //
363
+ // If we exit this loop normally then we need to call EmitLiteral next,
364
+ // though we don't yet know how big the literal will be. We handle that
365
+ // by proceeding to the next iteration of the main loop. We also can exit
366
+ // this loop via goto if we get close to exhausting the input.
367
+ uint64 input_bytes = 0;
368
+ uint32 candidate_bytes = 0;
369
+
370
+ do {
371
+ // We have a 4-byte match at ip, and no need to emit any
372
+ // "literal bytes" prior to ip.
373
+ const char* base = ip;
374
+ int matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end);
375
+ ip += matched;
376
+ int offset = base - candidate;
377
+ DCHECK_EQ(0, memcmp(base, candidate, matched));
378
+ op = EmitCopy(op, offset, matched);
379
+ // We could immediately start working at ip now, but to improve
380
+ // compression we first update table[Hash(ip - 1, ...)].
381
+ const char* insert_tail = ip - 1;
382
+ next_emit = ip;
383
+ if (PREDICT_FALSE(ip >= ip_limit)) {
384
+ goto emit_remainder;
385
+ }
386
+ input_bytes = UNALIGNED_LOAD64(insert_tail);
387
+ uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift);
388
+ table[prev_hash] = ip - base_ip - 1;
389
+ uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift);
390
+ candidate = base_ip + table[cur_hash];
391
+ candidate_bytes = UNALIGNED_LOAD32(candidate);
392
+ table[cur_hash] = ip - base_ip;
393
+ } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes);
394
+
395
+ next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift);
396
+ ++ip;
397
+ }
398
+ }
399
+
400
+ emit_remainder:
401
+ // Emit the remaining bytes as a literal
402
+ if (next_emit < ip_end) {
403
+ op = EmitLiteral(op, next_emit, ip_end - next_emit, false);
404
+ }
405
+
406
+ return op;
407
+ }
408
+ } // end namespace internal
409
+
410
+ // Signature of output types needed by decompression code.
411
+ // The decompression code is templatized on a type that obeys this
412
+ // signature so that we do not pay virtual function call overhead in
413
+ // the middle of a tight decompression loop.
414
+ //
415
+ // class DecompressionWriter {
416
+ // public:
417
+ // // Called before decompression
418
+ // void SetExpectedLength(size_t length);
419
+ //
420
+ // // Called after decompression
421
+ // bool CheckLength() const;
422
+ //
423
+ // // Called repeatedly during decompression
424
+ // bool Append(const char* ip, uint32 length, bool allow_fast_path);
425
+ // bool AppendFromSelf(uint32 offset, uint32 length);
426
+ // };
427
+ //
428
+ // "allow_fast_path" is a parameter that says if there is at least 16
429
+ // readable bytes in "ip". It is currently only used by SnappyArrayWriter.
430
+
431
+ // -----------------------------------------------------------------------
432
+ // Lookup table for decompression code. Generated by ComputeTable() below.
433
+ // -----------------------------------------------------------------------
434
+
435
+ // Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits
436
+ static const uint32 wordmask[] = {
437
+ 0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu
438
+ };
439
+
440
+ // Data stored per entry in lookup table:
441
+ // Range Bits-used Description
442
+ // ------------------------------------
443
+ // 1..64 0..7 Literal/copy length encoded in opcode byte
444
+ // 0..7 8..10 Copy offset encoded in opcode byte / 256
445
+ // 0..4 11..13 Extra bytes after opcode
446
+ //
447
+ // We use eight bits for the length even though 7 would have sufficed
448
+ // because of efficiency reasons:
449
+ // (1) Extracting a byte is faster than a bit-field
450
+ // (2) It properly aligns copy offset so we do not need a <<8
451
+ static const uint16 char_table[256] = {
452
+ 0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
453
+ 0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
454
+ 0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
455
+ 0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
456
+ 0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
457
+ 0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
458
+ 0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
459
+ 0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
460
+ 0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012,
461
+ 0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014,
462
+ 0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016,
463
+ 0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018,
464
+ 0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a,
465
+ 0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c,
466
+ 0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e,
467
+ 0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020,
468
+ 0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022,
469
+ 0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024,
470
+ 0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026,
471
+ 0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028,
472
+ 0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a,
473
+ 0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c,
474
+ 0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e,
475
+ 0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
476
+ 0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
477
+ 0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
478
+ 0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
479
+ 0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
480
+ 0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
481
+ 0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
482
+ 0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
483
+ 0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040
484
+ };
485
+
486
+ // In debug mode, allow optional computation of the table at startup.
487
+ // Also, check that the decompression table is correct.
488
+ #ifndef NDEBUG
489
+ DEFINE_bool(snappy_dump_decompression_table, false,
490
+ "If true, we print the decompression table at startup.");
491
+
492
+ static uint16 MakeEntry(unsigned int extra,
493
+ unsigned int len,
494
+ unsigned int copy_offset) {
495
+ // Check that all of the fields fit within the allocated space
496
+ DCHECK_EQ(extra, extra & 0x7); // At most 3 bits
497
+ DCHECK_EQ(copy_offset, copy_offset & 0x7); // At most 3 bits
498
+ DCHECK_EQ(len, len & 0x7f); // At most 7 bits
499
+ return len | (copy_offset << 8) | (extra << 11);
500
+ }
501
+
502
+ static void ComputeTable() {
503
+ uint16 dst[256];
504
+
505
+ // Place invalid entries in all places to detect missing initialization
506
+ int assigned = 0;
507
+ for (int i = 0; i < 256; i++) {
508
+ dst[i] = 0xffff;
509
+ }
510
+
511
+ // Small LITERAL entries. We store (len-1) in the top 6 bits.
512
+ for (unsigned int len = 1; len <= 60; len++) {
513
+ dst[LITERAL | ((len-1) << 2)] = MakeEntry(0, len, 0);
514
+ assigned++;
515
+ }
516
+
517
+ // Large LITERAL entries. We use 60..63 in the high 6 bits to
518
+ // encode the number of bytes of length info that follow the opcode.
519
+ for (unsigned int extra_bytes = 1; extra_bytes <= 4; extra_bytes++) {
520
+ // We set the length field in the lookup table to 1 because extra
521
+ // bytes encode len-1.
522
+ dst[LITERAL | ((extra_bytes+59) << 2)] = MakeEntry(extra_bytes, 1, 0);
523
+ assigned++;
524
+ }
525
+
526
+ // COPY_1_BYTE_OFFSET.
527
+ //
528
+ // The tag byte in the compressed data stores len-4 in 3 bits, and
529
+ // offset/256 in 5 bits. offset%256 is stored in the next byte.
530
+ //
531
+ // This format is used for length in range [4..11] and offset in
532
+ // range [0..2047]
533
+ for (unsigned int len = 4; len < 12; len++) {
534
+ for (unsigned int offset = 0; offset < 2048; offset += 256) {
535
+ dst[COPY_1_BYTE_OFFSET | ((len-4)<<2) | ((offset>>8)<<5)] =
536
+ MakeEntry(1, len, offset>>8);
537
+ assigned++;
538
+ }
539
+ }
540
+
541
+ // COPY_2_BYTE_OFFSET.
542
+ // Tag contains len-1 in top 6 bits, and offset in next two bytes.
543
+ for (unsigned int len = 1; len <= 64; len++) {
544
+ dst[COPY_2_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(2, len, 0);
545
+ assigned++;
546
+ }
547
+
548
+ // COPY_4_BYTE_OFFSET.
549
+ // Tag contents len-1 in top 6 bits, and offset in next four bytes.
550
+ for (unsigned int len = 1; len <= 64; len++) {
551
+ dst[COPY_4_BYTE_OFFSET | ((len-1)<<2)] = MakeEntry(4, len, 0);
552
+ assigned++;
553
+ }
554
+
555
+ // Check that each entry was initialized exactly once.
556
+ CHECK_EQ(assigned, 256);
557
+ for (int i = 0; i < 256; i++) {
558
+ CHECK_NE(dst[i], 0xffff);
559
+ }
560
+
561
+ if (FLAGS_snappy_dump_decompression_table) {
562
+ printf("static const uint16 char_table[256] = {\n ");
563
+ for (int i = 0; i < 256; i++) {
564
+ printf("0x%04x%s",
565
+ dst[i],
566
+ ((i == 255) ? "\n" : (((i%8) == 7) ? ",\n " : ", ")));
567
+ }
568
+ printf("};\n");
569
+ }
570
+
571
+ // Check that computed table matched recorded table
572
+ for (int i = 0; i < 256; i++) {
573
+ CHECK_EQ(dst[i], char_table[i]);
574
+ }
575
+ }
576
+ REGISTER_MODULE_INITIALIZER(snappy, ComputeTable());
577
+ #endif /* !NDEBUG */
578
+
579
+ // Helper class for decompression
580
+ class SnappyDecompressor {
581
+ private:
582
+ Source* reader_; // Underlying source of bytes to decompress
583
+ const char* ip_; // Points to next buffered byte
584
+ const char* ip_limit_; // Points just past buffered bytes
585
+ uint32 peeked_; // Bytes peeked from reader (need to skip)
586
+ bool eof_; // Hit end of input without an error?
587
+ char scratch_[5]; // Temporary buffer for PeekFast() boundaries
588
+
589
+ // Ensure that all of the tag metadata for the next tag is available
590
+ // in [ip_..ip_limit_-1]. Also ensures that [ip,ip+4] is readable even
591
+ // if (ip_limit_ - ip_ < 5).
592
+ //
593
+ // Returns true on success, false on error or end of input.
594
+ bool RefillTag();
595
+
596
+ public:
597
+ explicit SnappyDecompressor(Source* reader)
598
+ : reader_(reader),
599
+ ip_(NULL),
600
+ ip_limit_(NULL),
601
+ peeked_(0),
602
+ eof_(false) {
603
+ }
604
+
605
+ ~SnappyDecompressor() {
606
+ // Advance past any bytes we peeked at from the reader
607
+ reader_->Skip(peeked_);
608
+ }
609
+
610
+ // Returns true iff we have hit the end of the input without an error.
611
+ bool eof() const {
612
+ return eof_;
613
+ }
614
+
615
+ // Read the uncompressed length stored at the start of the compressed data.
616
+ // On succcess, stores the length in *result and returns true.
617
+ // On failure, returns false.
618
+ bool ReadUncompressedLength(uint32* result) {
619
+ DCHECK(ip_ == NULL); // Must not have read anything yet
620
+ // Length is encoded in 1..5 bytes
621
+ *result = 0;
622
+ uint32 shift = 0;
623
+ while (true) {
624
+ if (shift >= 32) return false;
625
+ size_t n;
626
+ const char* ip = reader_->Peek(&n);
627
+ if (n == 0) return false;
628
+ const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
629
+ reader_->Skip(1);
630
+ *result |= static_cast<uint32>(c & 0x7f) << shift;
631
+ if (c < 128) {
632
+ break;
633
+ }
634
+ shift += 7;
635
+ }
636
+ return true;
637
+ }
638
+
639
+ // Process the next item found in the input.
640
+ // Returns true if successful, false on error or end of input.
641
+ template <class Writer>
642
+ bool Step(Writer* writer) {
643
+ const char* ip = ip_;
644
+ if (ip_limit_ - ip < 5) {
645
+ if (!RefillTag()) return false;
646
+ ip = ip_;
647
+ }
648
+
649
+ const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
650
+ const uint32 entry = char_table[c];
651
+ const uint32 trailer = LittleEndian::Load32(ip) & wordmask[entry >> 11];
652
+ ip += entry >> 11;
653
+ const uint32 length = entry & 0xff;
654
+
655
+ if ((c & 0x3) == LITERAL) {
656
+ uint32 literal_length = length + trailer;
657
+ uint32 avail = ip_limit_ - ip;
658
+ while (avail < literal_length) {
659
+ bool allow_fast_path = (avail >= 16);
660
+ if (!writer->Append(ip, avail, allow_fast_path)) return false;
661
+ literal_length -= avail;
662
+ reader_->Skip(peeked_);
663
+ size_t n;
664
+ ip = reader_->Peek(&n);
665
+ avail = n;
666
+ peeked_ = avail;
667
+ if (avail == 0) return false; // Premature end of input
668
+ ip_limit_ = ip + avail;
669
+ }
670
+ ip_ = ip + literal_length;
671
+ bool allow_fast_path = (avail >= 16);
672
+ return writer->Append(ip, literal_length, allow_fast_path);
673
+ } else {
674
+ ip_ = ip;
675
+ // copy_offset/256 is encoded in bits 8..10. By just fetching
676
+ // those bits, we get copy_offset (since the bit-field starts at
677
+ // bit 8).
678
+ const uint32 copy_offset = entry & 0x700;
679
+ return writer->AppendFromSelf(copy_offset + trailer, length);
680
+ }
681
+ }
682
+ };
683
+
684
+ bool SnappyDecompressor::RefillTag() {
685
+ const char* ip = ip_;
686
+ if (ip == ip_limit_) {
687
+ // Fetch a new fragment from the reader
688
+ reader_->Skip(peeked_); // All peeked bytes are used up
689
+ size_t n;
690
+ ip = reader_->Peek(&n);
691
+ peeked_ = n;
692
+ if (n == 0) {
693
+ eof_ = true;
694
+ return false;
695
+ }
696
+ ip_limit_ = ip + n;
697
+ }
698
+
699
+ // Read the tag character
700
+ DCHECK_LT(ip, ip_limit_);
701
+ const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
702
+ const uint32 entry = char_table[c];
703
+ const uint32 needed = (entry >> 11) + 1; // +1 byte for 'c'
704
+ DCHECK_LE(needed, sizeof(scratch_));
705
+
706
+ // Read more bytes from reader if needed
707
+ uint32 nbuf = ip_limit_ - ip;
708
+ if (nbuf < needed) {
709
+ // Stitch together bytes from ip and reader to form the word
710
+ // contents. We store the needed bytes in "scratch_". They
711
+ // will be consumed immediately by the caller since we do not
712
+ // read more than we need.
713
+ memmove(scratch_, ip, nbuf);
714
+ reader_->Skip(peeked_); // All peeked bytes are used up
715
+ peeked_ = 0;
716
+ while (nbuf < needed) {
717
+ size_t length;
718
+ const char* src = reader_->Peek(&length);
719
+ if (length == 0) return false;
720
+ uint32 to_add = min<uint32>(needed - nbuf, length);
721
+ memcpy(scratch_ + nbuf, src, to_add);
722
+ nbuf += to_add;
723
+ reader_->Skip(to_add);
724
+ }
725
+ DCHECK_EQ(nbuf, needed);
726
+ ip_ = scratch_;
727
+ ip_limit_ = scratch_ + needed;
728
+ } else if (nbuf < 5) {
729
+ // Have enough bytes, but move into scratch_ so that we do not
730
+ // read past end of input
731
+ memmove(scratch_, ip, nbuf);
732
+ reader_->Skip(peeked_); // All peeked bytes are used up
733
+ peeked_ = 0;
734
+ ip_ = scratch_;
735
+ ip_limit_ = scratch_ + nbuf;
736
+ } else {
737
+ // Pass pointer to buffer returned by reader_.
738
+ ip_ = ip;
739
+ }
740
+ return true;
741
+ }
742
+
743
+ template <typename Writer>
744
+ static bool InternalUncompress(Source* r,
745
+ Writer* writer,
746
+ uint32 max_len) {
747
+ // Read the uncompressed length from the front of the compressed input
748
+ SnappyDecompressor decompressor(r);
749
+ uint32 uncompressed_len = 0;
750
+ if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
751
+ // Protect against possible DoS attack
752
+ if (static_cast<uint64>(uncompressed_len) > max_len) {
753
+ return false;
754
+ }
755
+
756
+ writer->SetExpectedLength(uncompressed_len);
757
+
758
+ // Process the entire input
759
+ while (decompressor.Step(writer)) { }
760
+ return (decompressor.eof() && writer->CheckLength());
761
+ }
762
+
763
+ bool GetUncompressedLength(Source* source, uint32* result) {
764
+ SnappyDecompressor decompressor(source);
765
+ return decompressor.ReadUncompressedLength(result);
766
+ }
767
+
768
+ size_t Compress(Source* reader, Sink* writer) {
769
+ size_t written = 0;
770
+ int N = reader->Available();
771
+ char ulength[Varint::kMax32];
772
+ char* p = Varint::Encode32(ulength, N);
773
+ writer->Append(ulength, p-ulength);
774
+ written += (p - ulength);
775
+
776
+ internal::WorkingMemory wmem;
777
+ char* scratch = NULL;
778
+ char* scratch_output = NULL;
779
+
780
+ while (N > 0) {
781
+ // Get next block to compress (without copying if possible)
782
+ size_t fragment_size;
783
+ const char* fragment = reader->Peek(&fragment_size);
784
+ DCHECK_NE(fragment_size, 0) << ": premature end of input";
785
+ const int num_to_read = min(N, kBlockSize);
786
+ size_t bytes_read = fragment_size;
787
+
788
+ int pending_advance = 0;
789
+ if (bytes_read >= num_to_read) {
790
+ // Buffer returned by reader is large enough
791
+ pending_advance = num_to_read;
792
+ fragment_size = num_to_read;
793
+ } else {
794
+ // Read into scratch buffer
795
+ if (scratch == NULL) {
796
+ // If this is the last iteration, we want to allocate N bytes
797
+ // of space, otherwise the max possible kBlockSize space.
798
+ // num_to_read contains exactly the correct value
799
+ scratch = new char[num_to_read];
800
+ }
801
+ memcpy(scratch, fragment, bytes_read);
802
+ reader->Skip(bytes_read);
803
+
804
+ while (bytes_read < num_to_read) {
805
+ fragment = reader->Peek(&fragment_size);
806
+ size_t n = min<size_t>(fragment_size, num_to_read - bytes_read);
807
+ memcpy(scratch + bytes_read, fragment, n);
808
+ bytes_read += n;
809
+ reader->Skip(n);
810
+ }
811
+ DCHECK_EQ(bytes_read, num_to_read);
812
+ fragment = scratch;
813
+ fragment_size = num_to_read;
814
+ }
815
+ DCHECK_EQ(fragment_size, num_to_read);
816
+
817
+ // Get encoding table for compression
818
+ int table_size;
819
+ uint16* table = wmem.GetHashTable(num_to_read, &table_size);
820
+
821
+ // Compress input_fragment and append to dest
822
+ const int max_output = MaxCompressedLength(num_to_read);
823
+
824
+ // Need a scratch buffer for the output, in case the byte sink doesn't
825
+ // have room for us directly.
826
+ if (scratch_output == NULL) {
827
+ scratch_output = new char[max_output];
828
+ } else {
829
+ // Since we encode kBlockSize regions followed by a region
830
+ // which is <= kBlockSize in length, a previously allocated
831
+ // scratch_output[] region is big enough for this iteration.
832
+ }
833
+ char* dest = writer->GetAppendBuffer(max_output, scratch_output);
834
+ char* end = internal::CompressFragment(fragment, fragment_size,
835
+ dest, table, table_size);
836
+ writer->Append(dest, end - dest);
837
+ written += (end - dest);
838
+
839
+ N -= num_to_read;
840
+ reader->Skip(pending_advance);
841
+ }
842
+
843
+ delete[] scratch;
844
+ delete[] scratch_output;
845
+
846
+ return written;
847
+ }
848
+
849
+ // -----------------------------------------------------------------------
850
+ // Flat array interfaces
851
+ // -----------------------------------------------------------------------
852
+
853
+ // A type that writes to a flat array.
854
+ // Note that this is not a "ByteSink", but a type that matches the
855
+ // Writer template argument to SnappyDecompressor::Step().
856
+ class SnappyArrayWriter {
857
+ private:
858
+ char* base_;
859
+ char* op_;
860
+ char* op_limit_;
861
+
862
+ public:
863
+ inline explicit SnappyArrayWriter(char* dst)
864
+ : base_(dst),
865
+ op_(dst) {
866
+ }
867
+
868
+ inline void SetExpectedLength(size_t len) {
869
+ op_limit_ = op_ + len;
870
+ }
871
+
872
+ inline bool CheckLength() const {
873
+ return op_ == op_limit_;
874
+ }
875
+
876
+ inline bool Append(const char* ip, uint32 len, bool allow_fast_path) {
877
+ char* op = op_;
878
+ const int space_left = op_limit_ - op;
879
+ if (allow_fast_path && len <= 16 && space_left >= 16) {
880
+ // Fast path, used for the majority (about 90%) of dynamic invocations.
881
+ UNALIGNED_STORE64(op, UNALIGNED_LOAD64(ip));
882
+ UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(ip + 8));
883
+ } else {
884
+ if (space_left < len) {
885
+ return false;
886
+ }
887
+ memcpy(op, ip, len);
888
+ }
889
+ op_ = op + len;
890
+ return true;
891
+ }
892
+
893
+ inline bool AppendFromSelf(uint32 offset, uint32 len) {
894
+ char* op = op_;
895
+ const int space_left = op_limit_ - op;
896
+
897
+ if (op - base_ <= offset - 1u) { // -1u catches offset==0
898
+ return false;
899
+ }
900
+ if (len <= 16 && offset >= 8 && space_left >= 16) {
901
+ // Fast path, used for the majority (70-80%) of dynamic invocations.
902
+ UNALIGNED_STORE64(op, UNALIGNED_LOAD64(op - offset));
903
+ UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(op - offset + 8));
904
+ } else {
905
+ if (space_left >= len + kMaxIncrementCopyOverflow) {
906
+ IncrementalCopyFastPath(op - offset, op, len);
907
+ } else {
908
+ if (space_left < len) {
909
+ return false;
910
+ }
911
+ IncrementalCopy(op - offset, op, len);
912
+ }
913
+ }
914
+
915
+ op_ = op + len;
916
+ return true;
917
+ }
918
+ };
919
+
920
+ bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
921
+ ByteArraySource reader(compressed, n);
922
+ return RawUncompress(&reader, uncompressed);
923
+ }
924
+
925
+ bool RawUncompress(Source* compressed, char* uncompressed) {
926
+ SnappyArrayWriter output(uncompressed);
927
+ return InternalUncompress(compressed, &output, kuint32max);
928
+ }
929
+
930
+ bool Uncompress(const char* compressed, size_t n, string* uncompressed) {
931
+ size_t ulength;
932
+ if (!GetUncompressedLength(compressed, n, &ulength)) {
933
+ return false;
934
+ }
935
+ // Protect against possible DoS attack
936
+ if ((static_cast<uint64>(ulength) + uncompressed->size()) >
937
+ uncompressed->max_size()) {
938
+ return false;
939
+ }
940
+ STLStringResizeUninitialized(uncompressed, ulength);
941
+ return RawUncompress(compressed, n, string_as_array(uncompressed));
942
+ }
943
+
944
+
945
+ // A Writer that drops everything on the floor and just does validation
946
+ class SnappyDecompressionValidator {
947
+ private:
948
+ size_t expected_;
949
+ size_t produced_;
950
+
951
+ public:
952
+ inline SnappyDecompressionValidator() : produced_(0) { }
953
+ inline void SetExpectedLength(size_t len) {
954
+ expected_ = len;
955
+ }
956
+ inline bool CheckLength() const {
957
+ return expected_ == produced_;
958
+ }
959
+ inline bool Append(const char* ip, uint32 len, bool allow_fast_path) {
960
+ produced_ += len;
961
+ return produced_ <= expected_;
962
+ }
963
+ inline bool AppendFromSelf(uint32 offset, uint32 len) {
964
+ if (produced_ <= offset - 1u) return false; // -1u catches offset==0
965
+ produced_ += len;
966
+ return produced_ <= expected_;
967
+ }
968
+ };
969
+
970
+ bool IsValidCompressedBuffer(const char* compressed, size_t n) {
971
+ ByteArraySource reader(compressed, n);
972
+ SnappyDecompressionValidator writer;
973
+ return InternalUncompress(&reader, &writer, kuint32max);
974
+ }
975
+
976
+ void RawCompress(const char* input,
977
+ size_t input_length,
978
+ char* compressed,
979
+ size_t* compressed_length) {
980
+ ByteArraySource reader(input, input_length);
981
+ UncheckedByteArraySink writer(compressed);
982
+ Compress(&reader, &writer);
983
+
984
+ // Compute how many bytes were added
985
+ *compressed_length = (writer.CurrentDestination() - compressed);
986
+ }
987
+
988
+ size_t Compress(const char* input, size_t input_length, string* compressed) {
989
+ // Pre-grow the buffer to the max length of the compressed output
990
+ compressed->resize(MaxCompressedLength(input_length));
991
+
992
+ size_t compressed_length;
993
+ RawCompress(input, input_length, string_as_array(compressed),
994
+ &compressed_length);
995
+ compressed->resize(compressed_length);
996
+ return compressed_length;
997
+ }
998
+
999
+
1000
+ } // end namespace snappy
1001
+