snappy-ruby 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. checksums.yaml +7 -0
  2. data/LICENSE +21 -0
  3. data/README.md +77 -0
  4. data/Rakefile +12 -0
  5. data/ext/snappy/extconf.rb +83 -0
  6. data/ext/snappy/snappy-src/AUTHORS +1 -0
  7. data/ext/snappy/snappy-src/BUILD.bazel +211 -0
  8. data/ext/snappy/snappy-src/CMakeLists.txt +467 -0
  9. data/ext/snappy/snappy-src/CONTRIBUTING.md +31 -0
  10. data/ext/snappy/snappy-src/COPYING +54 -0
  11. data/ext/snappy/snappy-src/MODULE.bazel +23 -0
  12. data/ext/snappy/snappy-src/NEWS +215 -0
  13. data/ext/snappy/snappy-src/README.md +165 -0
  14. data/ext/snappy/snappy-src/WORKSPACE +27 -0
  15. data/ext/snappy/snappy-src/WORKSPACE.bzlmod +0 -0
  16. data/ext/snappy/snappy-src/cmake/SnappyConfig.cmake.in +33 -0
  17. data/ext/snappy/snappy-src/cmake/config.h.in +75 -0
  18. data/ext/snappy/snappy-src/config.h +78 -0
  19. data/ext/snappy/snappy-src/docs/README.md +72 -0
  20. data/ext/snappy/snappy-src/format_description.txt +110 -0
  21. data/ext/snappy/snappy-src/framing_format.txt +135 -0
  22. data/ext/snappy/snappy-src/snappy-c.cc +90 -0
  23. data/ext/snappy/snappy-src/snappy-c.h +138 -0
  24. data/ext/snappy/snappy-src/snappy-internal.h +444 -0
  25. data/ext/snappy/snappy-src/snappy-sinksource.cc +121 -0
  26. data/ext/snappy/snappy-src/snappy-sinksource.h +182 -0
  27. data/ext/snappy/snappy-src/snappy-stubs-internal.cc +42 -0
  28. data/ext/snappy/snappy-src/snappy-stubs-internal.h +531 -0
  29. data/ext/snappy/snappy-src/snappy-stubs-public.h +60 -0
  30. data/ext/snappy/snappy-src/snappy-stubs-public.h.in +63 -0
  31. data/ext/snappy/snappy-src/snappy-test.cc +503 -0
  32. data/ext/snappy/snappy-src/snappy-test.h +342 -0
  33. data/ext/snappy/snappy-src/snappy.cc +2666 -0
  34. data/ext/snappy/snappy-src/snappy.h +257 -0
  35. data/ext/snappy/snappy-src/snappy_test_data.cc +57 -0
  36. data/ext/snappy/snappy-src/snappy_test_data.h +68 -0
  37. data/ext/snappy/snappy-src/snappy_test_tool.cc +471 -0
  38. data/ext/snappy/snappy-src/snappy_unittest.cc +1023 -0
  39. data/ext/snappy/snappy.c +282 -0
  40. data/lib/snappy/snappy.so +0 -0
  41. data/lib/snappy.rb +5 -0
  42. metadata +142 -0
@@ -0,0 +1,444 @@
1
+ // Copyright 2008 Google Inc. All Rights Reserved.
2
+ //
3
+ // Redistribution and use in source and binary forms, with or without
4
+ // modification, are permitted provided that the following conditions are
5
+ // met:
6
+ //
7
+ // * Redistributions of source code must retain the above copyright
8
+ // notice, this list of conditions and the following disclaimer.
9
+ // * Redistributions in binary form must reproduce the above
10
+ // copyright notice, this list of conditions and the following disclaimer
11
+ // in the documentation and/or other materials provided with the
12
+ // distribution.
13
+ // * Neither the name of Google Inc. nor the names of its
14
+ // contributors may be used to endorse or promote products derived from
15
+ // this software without specific prior written permission.
16
+ //
17
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
+ //
29
+ // Internals shared between the Snappy implementation and its unittest.
30
+
31
+ #ifndef THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
32
+ #define THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
33
+
34
+ #include <utility>
35
+
36
+ #include "snappy-stubs-internal.h"
37
+
38
+ #if SNAPPY_HAVE_SSSE3
39
+ // Please do not replace with <x86intrin.h> or with headers that assume more
40
+ // advanced SSE versions without checking with all the OWNERS.
41
+ #include <emmintrin.h>
42
+ #include <tmmintrin.h>
43
+ #endif
44
+
45
+ #if SNAPPY_HAVE_NEON
46
+ #include <arm_neon.h>
47
+ #endif
48
+
49
+ #if SNAPPY_RVV_1 || SNAPPY_RVV_0_7
50
+ #define SNAPPY_HAVE_RVV 1
51
+ #include <riscv_vector.h>
52
+ #else
53
+ #define SNAPPY_HAVE_RVV 0
54
+ #endif
55
+
56
+ #ifdef SNAPPY_RVV_1
57
+ #define VSETVL_E8M2 __riscv_vsetvl_e8m2
58
+ #define VLE8_V_U8M2 __riscv_vle8_v_u8m2
59
+ #define VSE8_V_U8M2 __riscv_vse8_v_u8m2
60
+ #elif SNAPPY_RVV_0_7
61
+ #define VSETVL_E8M2 vsetvl_e8m2
62
+ #define VLE8_V_U8M2 vle8_v_u8m2
63
+ #define VSE8_V_U8M2 vse8_v_u8m2
64
+ #endif
65
+
66
+ #if SNAPPY_HAVE_SSSE3 || SNAPPY_HAVE_NEON
67
+ #define SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE 1
68
+ #else
69
+ #define SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE 0
70
+ #endif
71
+
72
+ namespace snappy {
73
+ namespace internal {
74
+
75
+ #if SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
76
+ #if SNAPPY_HAVE_SSSE3
77
+ using V128 = __m128i;
78
+ #elif SNAPPY_HAVE_NEON
79
+ using V128 = uint8x16_t;
80
+ #endif
81
+
82
+ // Load 128 bits of integer data. `src` must be 16-byte aligned.
83
+ inline V128 V128_Load(const V128* src);
84
+
85
+ // Load 128 bits of integer data. `src` does not need to be aligned.
86
+ inline V128 V128_LoadU(const V128* src);
87
+
88
+ // Store 128 bits of integer data. `dst` does not need to be aligned.
89
+ inline void V128_StoreU(V128* dst, V128 val);
90
+
91
+ // Shuffle packed 8-bit integers using a shuffle mask.
92
+ // Each packed integer in the shuffle mask must be in [0,16).
93
+ inline V128 V128_Shuffle(V128 input, V128 shuffle_mask);
94
+
95
+ // Constructs V128 with 16 chars |c|.
96
+ inline V128 V128_DupChar(char c);
97
+
98
+ #if SNAPPY_HAVE_SSSE3
99
+ inline V128 V128_Load(const V128* src) { return _mm_load_si128(src); }
100
+
101
+ inline V128 V128_LoadU(const V128* src) { return _mm_loadu_si128(src); }
102
+
103
+ inline void V128_StoreU(V128* dst, V128 val) { _mm_storeu_si128(dst, val); }
104
+
105
+ inline V128 V128_Shuffle(V128 input, V128 shuffle_mask) {
106
+ return _mm_shuffle_epi8(input, shuffle_mask);
107
+ }
108
+
109
+ inline V128 V128_DupChar(char c) { return _mm_set1_epi8(c); }
110
+
111
+ #elif SNAPPY_HAVE_NEON
112
+ inline V128 V128_Load(const V128* src) {
113
+ return vld1q_u8(reinterpret_cast<const uint8_t*>(src));
114
+ }
115
+
116
+ inline V128 V128_LoadU(const V128* src) {
117
+ return vld1q_u8(reinterpret_cast<const uint8_t*>(src));
118
+ }
119
+
120
+ inline void V128_StoreU(V128* dst, V128 val) {
121
+ vst1q_u8(reinterpret_cast<uint8_t*>(dst), val);
122
+ }
123
+
124
+ inline V128 V128_Shuffle(V128 input, V128 shuffle_mask) {
125
+ assert(vminvq_u8(shuffle_mask) >= 0 && vmaxvq_u8(shuffle_mask) <= 15);
126
+ return vqtbl1q_u8(input, shuffle_mask);
127
+ }
128
+
129
+ inline V128 V128_DupChar(char c) { return vdupq_n_u8(c); }
130
+
131
+
132
+ #endif
133
+ #endif // SNAPPY_HAVE_VECTOR_BYTE_SHUFFLE
134
+
135
+ // Working memory performs a single allocation to hold all scratch space
136
+ // required for compression.
137
+ class WorkingMemory {
138
+ public:
139
+ explicit WorkingMemory(size_t input_size);
140
+ ~WorkingMemory();
141
+
142
+ // Allocates and clears a hash table using memory in "*this",
143
+ // stores the number of buckets in "*table_size" and returns a pointer to
144
+ // the base of the hash table.
145
+ uint16_t* GetHashTable(size_t fragment_size, int* table_size) const;
146
+ char* GetScratchInput() const { return input_; }
147
+ char* GetScratchOutput() const { return output_; }
148
+
149
+ private:
150
+ char* mem_; // the allocated memory, never nullptr
151
+ size_t size_; // the size of the allocated memory, never 0
152
+ uint16_t* table_; // the pointer to the hashtable
153
+ char* input_; // the pointer to the input scratch buffer
154
+ char* output_; // the pointer to the output scratch buffer
155
+
156
+ // No copying
157
+ WorkingMemory(const WorkingMemory&);
158
+ void operator=(const WorkingMemory&);
159
+ };
160
+
161
+ // Flat array compression that does not emit the "uncompressed length"
162
+ // prefix. Compresses "input" string to the "*op" buffer.
163
+ //
164
+ // REQUIRES: "input_length <= kBlockSize"
165
+ // REQUIRES: "op" points to an array of memory that is at least
166
+ // "MaxCompressedLength(input_length)" in size.
167
+ // REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
168
+ // REQUIRES: "table_size" is a power of two
169
+ //
170
+ // Returns an "end" pointer into "op" buffer.
171
+ // "end - op" is the compressed size of "input".
172
+ char* CompressFragment(const char* input,
173
+ size_t input_length,
174
+ char* op,
175
+ uint16_t* table,
176
+ const int table_size);
177
+
178
+ // Find the largest n such that
179
+ //
180
+ // s1[0,n-1] == s2[0,n-1]
181
+ // and n <= (s2_limit - s2).
182
+ //
183
+ // Return make_pair(n, n < 8).
184
+ // Does not read *s2_limit or beyond.
185
+ // Does not read *(s1 + (s2_limit - s2)) or beyond.
186
+ // Requires that s2_limit >= s2.
187
+ //
188
+ // In addition populate *data with the next 5 bytes from the end of the match.
189
+ // This is only done if 8 bytes are available (s2_limit - s2 >= 8). The point is
190
+ // that on some arch's this can be done faster in this routine than subsequent
191
+ // loading from s2 + n.
192
+ //
193
+ // Separate implementation for 64-bit, little-endian cpus.
194
+ // riscv and little-endian cpu choose this routinue can be done faster too.
195
+ #if !SNAPPY_IS_BIG_ENDIAN && \
196
+ (defined(__x86_64__) || defined(_M_X64) || defined(ARCH_PPC) || \
197
+ defined(ARCH_ARM) || defined(__riscv))
198
+ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
199
+ const char* s2,
200
+ const char* s2_limit,
201
+ uint64_t* data) {
202
+ assert(s2_limit >= s2);
203
+ size_t matched = 0;
204
+
205
+ // This block isn't necessary for correctness; we could just start looping
206
+ // immediately. As an optimization though, it is useful. It creates some not
207
+ // uncommon code paths that determine, without extra effort, whether the match
208
+ // length is less than 8. In short, we are hoping to avoid a conditional
209
+ // branch, and perhaps get better code layout from the C++ compiler.
210
+ if (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 16)) {
211
+ uint64_t a1 = UNALIGNED_LOAD64(s1);
212
+ uint64_t a2 = UNALIGNED_LOAD64(s2);
213
+ if (SNAPPY_PREDICT_TRUE(a1 != a2)) {
214
+ // This code is critical for performance. The reason is that it determines
215
+ // how much to advance `ip` (s2). This obviously depends on both the loads
216
+ // from the `candidate` (s1) and `ip`. Furthermore the next `candidate`
217
+ // depends on the advanced `ip` calculated here through a load, hash and
218
+ // new candidate hash lookup (a lot of cycles). This makes s1 (ie.
219
+ // `candidate`) the variable that limits throughput. This is the reason we
220
+ // go through hoops to have this function update `data` for the next iter.
221
+ // The straightforward code would use *data, given by
222
+ //
223
+ // *data = UNALIGNED_LOAD64(s2 + matched_bytes) (Latency of 5 cycles),
224
+ //
225
+ // as input for the hash table lookup to find next candidate. However
226
+ // this forces the load on the data dependency chain of s1, because
227
+ // matched_bytes directly depends on s1. However matched_bytes is 0..7, so
228
+ // we can also calculate *data by
229
+ //
230
+ // *data = AlignRight(UNALIGNED_LOAD64(s2), UNALIGNED_LOAD64(s2 + 8),
231
+ // matched_bytes);
232
+ //
233
+ // The loads do not depend on s1 anymore and are thus off the bottleneck.
234
+ // The straightforward implementation on x86_64 would be to use
235
+ //
236
+ // shrd rax, rdx, cl (cl being matched_bytes * 8)
237
+ //
238
+ // unfortunately shrd with a variable shift has a 4 cycle latency. So this
239
+ // only wins 1 cycle. The BMI2 shrx instruction is a 1 cycle variable
240
+ // shift instruction but can only shift 64 bits. If we focus on just
241
+ // obtaining the least significant 4 bytes, we can obtain this by
242
+ //
243
+ // *data = ConditionalMove(matched_bytes < 4, UNALIGNED_LOAD64(s2),
244
+ // UNALIGNED_LOAD64(s2 + 4) >> ((matched_bytes & 3) * 8);
245
+ //
246
+ // Writen like above this is not a big win, the conditional move would be
247
+ // a cmp followed by a cmov (2 cycles) followed by a shift (1 cycle).
248
+ // However matched_bytes < 4 is equal to
249
+ // static_cast<uint32_t>(xorval) != 0. Writen that way, the conditional
250
+ // move (2 cycles) can execute in parallel with FindLSBSetNonZero64
251
+ // (tzcnt), which takes 3 cycles.
252
+ uint64_t xorval = a1 ^ a2;
253
+ int shift = Bits::FindLSBSetNonZero64(xorval);
254
+ size_t matched_bytes = shift >> 3;
255
+ uint64_t a3 = UNALIGNED_LOAD64(s2 + 4);
256
+ #ifndef __x86_64__
257
+ a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2;
258
+ #else
259
+ // Ideally this would just be
260
+ //
261
+ // a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2;
262
+ //
263
+ // However clang correctly infers that the above statement participates on
264
+ // a critical data dependency chain and thus, unfortunately, refuses to
265
+ // use a conditional move (it's tuned to cut data dependencies). In this
266
+ // case there is a longer parallel chain anyway AND this will be fairly
267
+ // unpredictable.
268
+ asm("testl %k2, %k2\n\t"
269
+ "cmovzq %1, %0\n\t"
270
+ : "+r"(a2)
271
+ : "r"(a3), "r"(xorval)
272
+ : "cc");
273
+ #endif
274
+ *data = a2 >> (shift & (3 * 8));
275
+ return std::pair<size_t, bool>(matched_bytes, true);
276
+ } else {
277
+ matched = 8;
278
+ s2 += 8;
279
+ }
280
+ }
281
+ SNAPPY_PREFETCH(s1 + 64);
282
+ SNAPPY_PREFETCH(s2 + 64);
283
+
284
+ // Find out how long the match is. We loop over the data 64 bits at a
285
+ // time until we find a 64-bit block that doesn't match; then we find
286
+ // the first non-matching bit and use that to calculate the total
287
+ // length of the match.
288
+ while (SNAPPY_PREDICT_TRUE(s2 <= s2_limit - 16)) {
289
+ uint64_t a1 = UNALIGNED_LOAD64(s1 + matched);
290
+ uint64_t a2 = UNALIGNED_LOAD64(s2);
291
+ if (a1 == a2) {
292
+ s2 += 8;
293
+ matched += 8;
294
+ } else {
295
+ uint64_t xorval = a1 ^ a2;
296
+ int shift = Bits::FindLSBSetNonZero64(xorval);
297
+ size_t matched_bytes = shift >> 3;
298
+ uint64_t a3 = UNALIGNED_LOAD64(s2 + 4);
299
+ #ifndef __x86_64__
300
+ a2 = static_cast<uint32_t>(xorval) == 0 ? a3 : a2;
301
+ #else
302
+ asm("testl %k2, %k2\n\t"
303
+ "cmovzq %1, %0\n\t"
304
+ : "+r"(a2)
305
+ : "r"(a3), "r"(xorval)
306
+ : "cc");
307
+ #endif
308
+ *data = a2 >> (shift & (3 * 8));
309
+ matched += matched_bytes;
310
+ assert(matched >= 8);
311
+ return std::pair<size_t, bool>(matched, false);
312
+ }
313
+ }
314
+ while (SNAPPY_PREDICT_TRUE(s2 < s2_limit)) {
315
+ if (s1[matched] == *s2) {
316
+ ++s2;
317
+ ++matched;
318
+ } else {
319
+ if (s2 <= s2_limit - 8) {
320
+ *data = UNALIGNED_LOAD64(s2);
321
+ }
322
+ return std::pair<size_t, bool>(matched, matched < 8);
323
+ }
324
+ }
325
+ return std::pair<size_t, bool>(matched, matched < 8);
326
+ }
327
+ #else
328
+ static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
329
+ const char* s2,
330
+ const char* s2_limit,
331
+ uint64_t* data) {
332
+ // Implementation based on the x86-64 version, above.
333
+ assert(s2_limit >= s2);
334
+ int matched = 0;
335
+
336
+ while (s2 <= s2_limit - 4 &&
337
+ UNALIGNED_LOAD32(s2) == UNALIGNED_LOAD32(s1 + matched)) {
338
+ s2 += 4;
339
+ matched += 4;
340
+ }
341
+ if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 4) {
342
+ uint32_t x = UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
343
+ int matching_bits = Bits::FindLSBSetNonZero(x);
344
+ matched += matching_bits >> 3;
345
+ s2 += matching_bits >> 3;
346
+ } else {
347
+ while ((s2 < s2_limit) && (s1[matched] == *s2)) {
348
+ ++s2;
349
+ ++matched;
350
+ }
351
+ }
352
+ if (s2 <= s2_limit - 8) *data = LittleEndian::Load64(s2);
353
+ return std::pair<size_t, bool>(matched, matched < 8);
354
+ }
355
+ #endif
356
+
357
+ static inline size_t FindMatchLengthPlain(const char* s1, const char* s2,
358
+ const char* s2_limit) {
359
+ // Implementation based on the x86-64 version, above.
360
+ assert(s2_limit >= s2);
361
+ int matched = 0;
362
+
363
+ while (s2 <= s2_limit - 8 &&
364
+ UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched)) {
365
+ s2 += 8;
366
+ matched += 8;
367
+ }
368
+ if (LittleEndian::IsLittleEndian() && s2 <= s2_limit - 8) {
369
+ uint64_t x = UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 + matched);
370
+ int matching_bits = Bits::FindLSBSetNonZero64(x);
371
+ matched += matching_bits >> 3;
372
+ s2 += matching_bits >> 3;
373
+ } else {
374
+ while ((s2 < s2_limit) && (s1[matched] == *s2)) {
375
+ ++s2;
376
+ ++matched;
377
+ }
378
+ }
379
+ return matched;
380
+ }
381
+
382
+ // Lookup tables for decompression code. Give --snappy_dump_decompression_table
383
+ // to the unit test to recompute char_table.
384
+
385
+ enum {
386
+ LITERAL = 0,
387
+ COPY_1_BYTE_OFFSET = 1, // 3 bit length + 3 bits of offset in opcode
388
+ COPY_2_BYTE_OFFSET = 2,
389
+ COPY_4_BYTE_OFFSET = 3
390
+ };
391
+ static const int kMaximumTagLength = 5; // COPY_4_BYTE_OFFSET plus the actual offset.
392
+
393
+ // Data stored per entry in lookup table:
394
+ // Range Bits-used Description
395
+ // ------------------------------------
396
+ // 1..64 0..7 Literal/copy length encoded in opcode byte
397
+ // 0..7 8..10 Copy offset encoded in opcode byte / 256
398
+ // 0..4 11..13 Extra bytes after opcode
399
+ //
400
+ // We use eight bits for the length even though 7 would have sufficed
401
+ // because of efficiency reasons:
402
+ // (1) Extracting a byte is faster than a bit-field
403
+ // (2) It properly aligns copy offset so we do not need a <<8
404
+ static constexpr uint16_t char_table[256] = {
405
+ // clang-format off
406
+ 0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
407
+ 0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
408
+ 0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
409
+ 0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
410
+ 0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
411
+ 0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
412
+ 0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
413
+ 0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
414
+ 0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012,
415
+ 0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014,
416
+ 0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016,
417
+ 0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018,
418
+ 0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a,
419
+ 0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c,
420
+ 0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e,
421
+ 0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020,
422
+ 0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022,
423
+ 0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024,
424
+ 0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026,
425
+ 0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028,
426
+ 0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a,
427
+ 0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c,
428
+ 0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e,
429
+ 0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
430
+ 0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
431
+ 0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
432
+ 0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
433
+ 0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
434
+ 0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
435
+ 0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
436
+ 0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
437
+ 0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040,
438
+ // clang-format on
439
+ };
440
+
441
+ } // end namespace internal
442
+ } // end namespace snappy
443
+
444
+ #endif // THIRD_PARTY_SNAPPY_SNAPPY_INTERNAL_H_
@@ -0,0 +1,121 @@
1
+ // Copyright 2011 Google Inc. All Rights Reserved.
2
+ //
3
+ // Redistribution and use in source and binary forms, with or without
4
+ // modification, are permitted provided that the following conditions are
5
+ // met:
6
+ //
7
+ // * Redistributions of source code must retain the above copyright
8
+ // notice, this list of conditions and the following disclaimer.
9
+ // * Redistributions in binary form must reproduce the above
10
+ // copyright notice, this list of conditions and the following disclaimer
11
+ // in the documentation and/or other materials provided with the
12
+ // distribution.
13
+ // * Neither the name of Google Inc. nor the names of its
14
+ // contributors may be used to endorse or promote products derived from
15
+ // this software without specific prior written permission.
16
+ //
17
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
+
29
+ #include <stddef.h>
30
+ #include <cstring>
31
+
32
+ #include "snappy-sinksource.h"
33
+
34
+ namespace snappy {
35
+
36
+ Source::~Source() = default;
37
+
38
+ Sink::~Sink() = default;
39
+
40
+ char* Sink::GetAppendBuffer(size_t length, char* scratch) {
41
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
42
+ (void)length;
43
+
44
+ return scratch;
45
+ }
46
+
47
+ char* Sink::GetAppendBufferVariable(
48
+ size_t min_size, size_t desired_size_hint, char* scratch,
49
+ size_t scratch_size, size_t* allocated_size) {
50
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
51
+ (void)min_size;
52
+ (void)desired_size_hint;
53
+
54
+ *allocated_size = scratch_size;
55
+ return scratch;
56
+ }
57
+
58
+ void Sink::AppendAndTakeOwnership(
59
+ char* bytes, size_t n,
60
+ void (*deleter)(void*, const char*, size_t),
61
+ void *deleter_arg) {
62
+ Append(bytes, n);
63
+ (*deleter)(deleter_arg, bytes, n);
64
+ }
65
+
66
+ ByteArraySource::~ByteArraySource() = default;
67
+
68
+ size_t ByteArraySource::Available() const { return left_; }
69
+
70
+ const char* ByteArraySource::Peek(size_t* len) {
71
+ *len = left_;
72
+ return ptr_;
73
+ }
74
+
75
+ void ByteArraySource::Skip(size_t n) {
76
+ left_ -= n;
77
+ ptr_ += n;
78
+ }
79
+
80
+ UncheckedByteArraySink::~UncheckedByteArraySink() { }
81
+
82
+ void UncheckedByteArraySink::Append(const char* data, size_t n) {
83
+ // Do no copying if the caller filled in the result of GetAppendBuffer()
84
+ if (data != dest_) {
85
+ std::memcpy(dest_, data, n);
86
+ }
87
+ dest_ += n;
88
+ }
89
+
90
+ char* UncheckedByteArraySink::GetAppendBuffer(size_t len, char* scratch) {
91
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
92
+ (void)len;
93
+ (void)scratch;
94
+
95
+ return dest_;
96
+ }
97
+
98
+ void UncheckedByteArraySink::AppendAndTakeOwnership(
99
+ char* bytes, size_t n,
100
+ void (*deleter)(void*, const char*, size_t),
101
+ void *deleter_arg) {
102
+ if (bytes != dest_) {
103
+ std::memcpy(dest_, bytes, n);
104
+ (*deleter)(deleter_arg, bytes, n);
105
+ }
106
+ dest_ += n;
107
+ }
108
+
109
+ char* UncheckedByteArraySink::GetAppendBufferVariable(
110
+ size_t min_size, size_t desired_size_hint, char* scratch,
111
+ size_t scratch_size, size_t* allocated_size) {
112
+ // TODO: Switch to [[maybe_unused]] when we can assume C++17.
113
+ (void)min_size;
114
+ (void)scratch;
115
+ (void)scratch_size;
116
+
117
+ *allocated_size = desired_size_hint;
118
+ return dest_;
119
+ }
120
+
121
+ } // namespace snappy