snappy_ext 0.1.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (39) hide show
  1. data/ext/snappy/extconf.rb +36 -0
  2. data/ext/snappy/snappy_ext.cc +131 -0
  3. data/ext/snappy/vendor/snappy-1.0.0/AUTHORS +1 -0
  4. data/ext/snappy/vendor/snappy-1.0.0/COPYING +28 -0
  5. data/ext/snappy/vendor/snappy-1.0.0/ChangeLog +3 -0
  6. data/ext/snappy/vendor/snappy-1.0.0/INSTALL +230 -0
  7. data/ext/snappy/vendor/snappy-1.0.0/Makefile.am +24 -0
  8. data/ext/snappy/vendor/snappy-1.0.0/Makefile.in +926 -0
  9. data/ext/snappy/vendor/snappy-1.0.0/NEWS +3 -0
  10. data/ext/snappy/vendor/snappy-1.0.0/README +132 -0
  11. data/ext/snappy/vendor/snappy-1.0.0/aclocal.m4 +9076 -0
  12. data/ext/snappy/vendor/snappy-1.0.0/autogen.sh +8 -0
  13. data/ext/snappy/vendor/snappy-1.0.0/compile +99 -0
  14. data/ext/snappy/vendor/snappy-1.0.0/config.guess +1466 -0
  15. data/ext/snappy/vendor/snappy-1.0.0/config.h.in +107 -0
  16. data/ext/snappy/vendor/snappy-1.0.0/config.sub +1579 -0
  17. data/ext/snappy/vendor/snappy-1.0.0/configure +17962 -0
  18. data/ext/snappy/vendor/snappy-1.0.0/configure.ac +99 -0
  19. data/ext/snappy/vendor/snappy-1.0.0/depcomp +530 -0
  20. data/ext/snappy/vendor/snappy-1.0.0/install-sh +323 -0
  21. data/ext/snappy/vendor/snappy-1.0.0/ltmain.sh +8413 -0
  22. data/ext/snappy/vendor/snappy-1.0.0/m4/gtest.m4 +74 -0
  23. data/ext/snappy/vendor/snappy-1.0.0/missing +360 -0
  24. data/ext/snappy/vendor/snappy-1.0.0/mkinstalldirs +158 -0
  25. data/ext/snappy/vendor/snappy-1.0.0/snappy-internal.h +136 -0
  26. data/ext/snappy/vendor/snappy-1.0.0/snappy-sinksource.cc +46 -0
  27. data/ext/snappy/vendor/snappy-1.0.0/snappy-sinksource.h +110 -0
  28. data/ext/snappy/vendor/snappy-1.0.0/snappy-stubs-internal.cc +28 -0
  29. data/ext/snappy/vendor/snappy-1.0.0/snappy-stubs-internal.h +457 -0
  30. data/ext/snappy/vendor/snappy-1.0.0/snappy-stubs-public.h +59 -0
  31. data/ext/snappy/vendor/snappy-1.0.0/snappy-stubs-public.h.in +59 -0
  32. data/ext/snappy/vendor/snappy-1.0.0/snappy-test.cc +523 -0
  33. data/ext/snappy/vendor/snappy-1.0.0/snappy-test.h +458 -0
  34. data/ext/snappy/vendor/snappy-1.0.0/snappy.cc +1001 -0
  35. data/ext/snappy/vendor/snappy-1.0.0/snappy.h +141 -0
  36. data/ext/snappy/vendor/snappy-1.0.0/snappy_unittest.cc +1073 -0
  37. data/ext/snappy/version.h +4 -0
  38. data/snappy_ext.gemspec +58 -0
  39. metadata +99 -0
@@ -0,0 +1,59 @@
1
+ // Copyright 2011 Google Inc. All Rights Reserved.
2
+ // Author: sesse@google.com (Steinar H. Gunderson)
3
+ //
4
+ // Various type stubs for the open-source version of Snappy.
5
+ //
6
+ // This file cannot include config.h, as it is included from snappy.h,
7
+ // which is a public header. Instead, snappy-stubs-public.h is generated by
8
+ // from snappy-stubs-public.h.in at configure time.
9
+
10
+ #ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
11
+ #define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
12
+
13
+ #if 1
14
+ #include <stdint.h>
15
+ #endif
16
+
17
+ #if 1
18
+ #include <stddef.h>
19
+ #endif
20
+
21
+ #define SNAPPY_MAJOR 1
22
+ #define SNAPPY_MINOR 0
23
+ #define SNAPPY_PATCHLEVEL 0
24
+ #define SNAPPY_VERSION \
25
+ ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
26
+
27
+ #include <string>
28
+
29
+ namespace snappy {
30
+
31
+ #if 1
32
+ typedef int8_t int8;
33
+ typedef uint8_t uint8;
34
+ typedef int16_t int16;
35
+ typedef uint16_t uint16;
36
+ typedef int32_t int32;
37
+ typedef uint32_t uint32;
38
+ typedef int64_t int64;
39
+ typedef uint64_t uint64;
40
+ #else
41
+ typedef signed char int8;
42
+ typedef unsigned char uint8;
43
+ typedef short int16;
44
+ typedef unsigned short uint16;
45
+ typedef int int32;
46
+ typedef unsigned int uint32;
47
+ typedef long long int64;
48
+ typedef unsigned long long uint64;
49
+ #endif
50
+
51
+ typedef std::string string;
52
+
53
+ #define DISALLOW_COPY_AND_ASSIGN(TypeName) \
54
+ TypeName(const TypeName&); \
55
+ void operator=(const TypeName&)
56
+
57
+ } // namespace snappy
58
+
59
+ #endif // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
@@ -0,0 +1,59 @@
1
+ // Copyright 2011 Google Inc. All Rights Reserved.
2
+ // Author: sesse@google.com (Steinar H. Gunderson)
3
+ //
4
+ // Various type stubs for the open-source version of Snappy.
5
+ //
6
+ // This file cannot include config.h, as it is included from snappy.h,
7
+ // which is a public header. Instead, snappy-stubs-public.h is generated by
8
+ // from snappy-stubs-public.h.in at configure time.
9
+
10
+ #ifndef UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
11
+ #define UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
12
+
13
+ #if @ac_cv_have_stdint_h@
14
+ #include <stdint.h>
15
+ #endif
16
+
17
+ #if @ac_cv_have_stddef_h@
18
+ #include <stddef.h>
19
+ #endif
20
+
21
+ #define SNAPPY_MAJOR @SNAPPY_MAJOR@
22
+ #define SNAPPY_MINOR @SNAPPY_MINOR@
23
+ #define SNAPPY_PATCHLEVEL @SNAPPY_PATCHLEVEL@
24
+ #define SNAPPY_VERSION \
25
+ ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
26
+
27
+ #include <string>
28
+
29
+ namespace snappy {
30
+
31
+ #if @ac_cv_have_stdint_h@
32
+ typedef int8_t int8;
33
+ typedef uint8_t uint8;
34
+ typedef int16_t int16;
35
+ typedef uint16_t uint16;
36
+ typedef int32_t int32;
37
+ typedef uint32_t uint32;
38
+ typedef int64_t int64;
39
+ typedef uint64_t uint64;
40
+ #else
41
+ typedef signed char int8;
42
+ typedef unsigned char uint8;
43
+ typedef short int16;
44
+ typedef unsigned short uint16;
45
+ typedef int int32;
46
+ typedef unsigned int uint32;
47
+ typedef long long int64;
48
+ typedef unsigned long long uint64;
49
+ #endif
50
+
51
+ typedef std::string string;
52
+
53
+ #define DISALLOW_COPY_AND_ASSIGN(TypeName) \
54
+ TypeName(const TypeName&); \
55
+ void operator=(const TypeName&)
56
+
57
+ } // namespace snappy
58
+
59
+ #endif // UTIL_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
@@ -0,0 +1,523 @@
1
+ // Copyright 2011 Google Inc. All Rights Reserved.
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // http://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+ //
15
+ // Various stubs for the unit tests for the open-source version of Snappy.
16
+
17
+ #include "snappy-test.h"
18
+
19
+ #include <algorithm>
20
+
21
+ DEFINE_bool(run_microbenchmarks, true,
22
+ "Run microbenchmarks before doing anything else.");
23
+
24
+ namespace snappy {
25
+
26
+ string ReadTestDataFile(const string& base) {
27
+ string contents;
28
+ const char* srcdir = getenv("srcdir"); // This is set by Automake.
29
+ if (srcdir) {
30
+ File::ReadFileToStringOrDie(
31
+ string(srcdir) + "/testdata/" + base, &contents);
32
+ } else {
33
+ File::ReadFileToStringOrDie("testdata/" + base, &contents);
34
+ }
35
+ return contents;
36
+ }
37
+
38
+ string StringPrintf(const char* format, ...) {
39
+ char buf[4096];
40
+ va_list ap;
41
+ va_start(ap, format);
42
+ vsnprintf(buf, sizeof(buf), format, ap);
43
+ va_end(ap);
44
+ return buf;
45
+ }
46
+
47
+ bool benchmark_running = false;
48
+ int64 benchmark_real_time_us = 0;
49
+ int64 benchmark_cpu_time_us = 0;
50
+ string *benchmark_label = NULL;
51
+ int64 benchmark_bytes_processed = 0;
52
+
53
+ struct timeval benchmark_start_real;
54
+ struct rusage benchmark_start_cpu;
55
+
56
+ void ResetBenchmarkTiming() {
57
+ benchmark_real_time_us = 0;
58
+ benchmark_cpu_time_us = 0;
59
+ }
60
+
61
+ void StartBenchmarkTiming() {
62
+ gettimeofday(&benchmark_start_real, NULL);
63
+ if (getrusage(RUSAGE_SELF, &benchmark_start_cpu) == -1) {
64
+ perror("getrusage(RUSAGE_SELF)");
65
+ exit(1);
66
+ }
67
+ benchmark_running = true;
68
+ }
69
+
70
+ void StopBenchmarkTiming() {
71
+ if (!benchmark_running) {
72
+ return;
73
+ }
74
+ struct timeval benchmark_stop_real;
75
+ gettimeofday(&benchmark_stop_real, NULL);
76
+ benchmark_real_time_us +=
77
+ 1000000 * (benchmark_stop_real.tv_sec - benchmark_start_real.tv_sec);
78
+ benchmark_real_time_us +=
79
+ (benchmark_stop_real.tv_usec - benchmark_start_real.tv_usec);
80
+
81
+ struct rusage benchmark_stop_cpu;
82
+ if (getrusage(RUSAGE_SELF, &benchmark_stop_cpu) == -1) {
83
+ perror("getrusage(RUSAGE_SELF)");
84
+ exit(1);
85
+ }
86
+ benchmark_cpu_time_us += 1000000 * (benchmark_stop_cpu.ru_utime.tv_sec -
87
+ benchmark_start_cpu.ru_utime.tv_sec);
88
+ benchmark_cpu_time_us += (benchmark_stop_cpu.ru_utime.tv_usec -
89
+ benchmark_start_cpu.ru_utime.tv_usec);
90
+ benchmark_running = false;
91
+ }
92
+
93
+ void SetBenchmarkLabel(const string& str) {
94
+ if (benchmark_label) {
95
+ delete benchmark_label;
96
+ }
97
+ benchmark_label = new string(str);
98
+ }
99
+
100
+ void SetBenchmarkBytesProcessed(int64 bytes) {
101
+ benchmark_bytes_processed = bytes;
102
+ }
103
+
104
+ struct BenchmarkRun {
105
+ int64 real_time_us;
106
+ int64 cpu_time_us;
107
+ };
108
+
109
+ struct BenchmarkCompareCPUTime {
110
+ bool operator() (const BenchmarkRun& a, const BenchmarkRun& b) const {
111
+ return a.real_time_us < b.real_time_us;
112
+ }
113
+ };
114
+
115
+ void Benchmark::Run() {
116
+ for (int test_case_num = start_; test_case_num <= stop_; ++test_case_num) {
117
+ // Run a few iterations first to find out approximately how fast
118
+ // the benchmark is.
119
+ const int kCalibrateIterations = 100;
120
+ ResetBenchmarkTiming();
121
+ StartBenchmarkTiming();
122
+ (*function_)(kCalibrateIterations, test_case_num);
123
+ StopBenchmarkTiming();
124
+
125
+ // Let each test case run for about 200ms.
126
+ // Run five times and pick the median.
127
+ const int kNumRuns = 5;
128
+ const int kMedianPos = kNumRuns / 2;
129
+ int num_iterations = 200000 * kCalibrateIterations / benchmark_real_time_us;
130
+ BenchmarkRun benchmark_runs[kNumRuns];
131
+
132
+ for (int run = 0; run < kNumRuns; ++run) {
133
+ ResetBenchmarkTiming();
134
+ StartBenchmarkTiming();
135
+ (*function_)(num_iterations, test_case_num);
136
+ StopBenchmarkTiming();
137
+
138
+ benchmark_runs[run].real_time_us = benchmark_real_time_us;
139
+ benchmark_runs[run].cpu_time_us = benchmark_cpu_time_us;
140
+ }
141
+
142
+ sort(benchmark_runs, benchmark_runs + kNumRuns, BenchmarkCompareCPUTime());
143
+ int64 real_time_us = benchmark_runs[kMedianPos].real_time_us;
144
+ int64 cpu_time_us = benchmark_runs[kMedianPos].cpu_time_us;
145
+ int64 bytes_per_second = benchmark_bytes_processed * 1000000 / cpu_time_us;
146
+
147
+ string heading = StringPrintf("%s/%d", name_.c_str(), test_case_num);
148
+ string human_readable_speed;
149
+ if (bytes_per_second < 1024) {
150
+ human_readable_speed = StringPrintf("%dB/s", bytes_per_second);
151
+ } else if (bytes_per_second < 1024 * 1024) {
152
+ human_readable_speed = StringPrintf(
153
+ "%.1fkB/s", bytes_per_second / 1024.0f);
154
+ } else if (bytes_per_second < 1024 * 1024 * 1024) {
155
+ human_readable_speed = StringPrintf(
156
+ "%.1fMB/s", bytes_per_second / (1024.0f * 1024.0f));
157
+ } else {
158
+ human_readable_speed = StringPrintf(
159
+ "%.1fGB/s", bytes_per_second / (1024.0f * 1024.0f * 1024.0f));
160
+ }
161
+
162
+ fprintf(stderr, "%-18s %10lld %10lld %10d %s %s\n",
163
+ heading.c_str(),
164
+ static_cast<long long>(real_time_us * 1000 / num_iterations),
165
+ static_cast<long long>(cpu_time_us * 1000 / num_iterations),
166
+ num_iterations,
167
+ human_readable_speed.c_str(),
168
+ benchmark_label->c_str());
169
+ }
170
+ }
171
+
172
+ #ifdef HAVE_LIBZ
173
+
174
+ ZLib::ZLib()
175
+ : comp_init_(false),
176
+ uncomp_init_(false) {
177
+ Reinit();
178
+ }
179
+
180
+ ZLib::~ZLib() {
181
+ if (comp_init_) { deflateEnd(&comp_stream_); }
182
+ if (uncomp_init_) { inflateEnd(&uncomp_stream_); }
183
+ }
184
+
185
+ void ZLib::Reinit() {
186
+ compression_level_ = Z_DEFAULT_COMPRESSION;
187
+ window_bits_ = MAX_WBITS;
188
+ mem_level_ = 8; // DEF_MEM_LEVEL
189
+ if (comp_init_) {
190
+ deflateEnd(&comp_stream_);
191
+ comp_init_ = false;
192
+ }
193
+ if (uncomp_init_) {
194
+ inflateEnd(&uncomp_stream_);
195
+ uncomp_init_ = false;
196
+ }
197
+ first_chunk_ = true;
198
+ }
199
+
200
+ void ZLib::Reset() {
201
+ first_chunk_ = true;
202
+ }
203
+
204
+ // --------- COMPRESS MODE
205
+
206
+ // Initialization method to be called if we hit an error while
207
+ // compressing. On hitting an error, call this method before returning
208
+ // the error.
209
+ void ZLib::CompressErrorInit() {
210
+ deflateEnd(&comp_stream_);
211
+ comp_init_ = false;
212
+ Reset();
213
+ }
214
+
215
+ int ZLib::DeflateInit() {
216
+ return deflateInit2(&comp_stream_,
217
+ compression_level_,
218
+ Z_DEFLATED,
219
+ window_bits_,
220
+ mem_level_,
221
+ Z_DEFAULT_STRATEGY);
222
+ }
223
+
224
+ int ZLib::CompressInit(Bytef *dest, uLongf *destLen,
225
+ const Bytef *source, uLong *sourceLen) {
226
+ int err;
227
+
228
+ comp_stream_.next_in = (Bytef*)source;
229
+ comp_stream_.avail_in = (uInt)*sourceLen;
230
+ if ((uLong)comp_stream_.avail_in != *sourceLen) return Z_BUF_ERROR;
231
+ comp_stream_.next_out = dest;
232
+ comp_stream_.avail_out = (uInt)*destLen;
233
+ if ((uLong)comp_stream_.avail_out != *destLen) return Z_BUF_ERROR;
234
+
235
+ if ( !first_chunk_ ) // only need to set up stream the first time through
236
+ return Z_OK;
237
+
238
+ if (comp_init_) { // we've already initted it
239
+ err = deflateReset(&comp_stream_);
240
+ if (err != Z_OK) {
241
+ LOG(WARNING) << "ERROR: Can't reset compress object; creating a new one";
242
+ deflateEnd(&comp_stream_);
243
+ comp_init_ = false;
244
+ }
245
+ }
246
+ if (!comp_init_) { // first use
247
+ comp_stream_.zalloc = (alloc_func)0;
248
+ comp_stream_.zfree = (free_func)0;
249
+ comp_stream_.opaque = (voidpf)0;
250
+ err = DeflateInit();
251
+ if (err != Z_OK) return err;
252
+ comp_init_ = true;
253
+ }
254
+ return Z_OK;
255
+ }
256
+
257
+ // In a perfect world we'd always have the full buffer to compress
258
+ // when the time came, and we could just call Compress(). Alas, we
259
+ // want to do chunked compression on our webserver. In this
260
+ // application, we compress the header, send it off, then compress the
261
+ // results, send them off, then compress the footer. Thus we need to
262
+ // use the chunked compression features of zlib.
263
+ int ZLib::CompressAtMostOrAll(Bytef *dest, uLongf *destLen,
264
+ const Bytef *source, uLong *sourceLen,
265
+ int flush_mode) { // Z_FULL_FLUSH or Z_FINISH
266
+ int err;
267
+
268
+ if ( (err=CompressInit(dest, destLen, source, sourceLen)) != Z_OK )
269
+ return err;
270
+
271
+ // This is used to figure out how many bytes we wrote *this chunk*
272
+ int compressed_size = comp_stream_.total_out;
273
+
274
+ // Some setup happens only for the first chunk we compress in a run
275
+ if ( first_chunk_ ) {
276
+ first_chunk_ = false;
277
+ }
278
+
279
+ // flush_mode is Z_FINISH for all mode, Z_SYNC_FLUSH for incremental
280
+ // compression.
281
+ err = deflate(&comp_stream_, flush_mode);
282
+
283
+ const uLong source_bytes_consumed = *sourceLen - comp_stream_.avail_in;
284
+ *sourceLen = comp_stream_.avail_in;
285
+
286
+ if ((err == Z_STREAM_END || err == Z_OK)
287
+ && comp_stream_.avail_in == 0
288
+ && comp_stream_.avail_out != 0 ) {
289
+ // we processed everything ok and the output buffer was large enough.
290
+ ;
291
+ } else if (err == Z_STREAM_END && comp_stream_.avail_in > 0) {
292
+ return Z_BUF_ERROR; // should never happen
293
+ } else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) {
294
+ // an error happened
295
+ CompressErrorInit();
296
+ return err;
297
+ } else if (comp_stream_.avail_out == 0) { // not enough space
298
+ err = Z_BUF_ERROR;
299
+ }
300
+
301
+ assert(err == Z_OK || err == Z_STREAM_END || err == Z_BUF_ERROR);
302
+ if (err == Z_STREAM_END)
303
+ err = Z_OK;
304
+
305
+ // update the crc and other metadata
306
+ compressed_size = comp_stream_.total_out - compressed_size; // delta
307
+ *destLen = compressed_size;
308
+
309
+ return err;
310
+ }
311
+
312
+ int ZLib::CompressChunkOrAll(Bytef *dest, uLongf *destLen,
313
+ const Bytef *source, uLong sourceLen,
314
+ int flush_mode) { // Z_FULL_FLUSH or Z_FINISH
315
+ const int ret =
316
+ CompressAtMostOrAll(dest, destLen, source, &sourceLen, flush_mode);
317
+ if (ret == Z_BUF_ERROR)
318
+ CompressErrorInit();
319
+ return ret;
320
+ }
321
+
322
+ // This routine only initializes the compression stream once. Thereafter, it
323
+ // just does a deflateReset on the stream, which should be faster.
324
+ int ZLib::Compress(Bytef *dest, uLongf *destLen,
325
+ const Bytef *source, uLong sourceLen) {
326
+ int err;
327
+ const uLongf orig_destLen = *destLen;
328
+ if ( (err=CompressChunkOrAll(dest, destLen, source, sourceLen,
329
+ Z_FINISH)) != Z_OK )
330
+ return err;
331
+ Reset(); // reset for next call to Compress
332
+
333
+ return Z_OK;
334
+ }
335
+
336
+
337
+ // --------- UNCOMPRESS MODE
338
+
339
+ int ZLib::InflateInit() {
340
+ return inflateInit2(&uncomp_stream_, MAX_WBITS);
341
+ }
342
+
343
+ // Initialization method to be called if we hit an error while
344
+ // uncompressing. On hitting an error, call this method before
345
+ // returning the error.
346
+ void ZLib::UncompressErrorInit() {
347
+ inflateEnd(&uncomp_stream_);
348
+ uncomp_init_ = false;
349
+ Reset();
350
+ }
351
+
352
+ int ZLib::UncompressInit(Bytef *dest, uLongf *destLen,
353
+ const Bytef *source, uLong *sourceLen) {
354
+ int err;
355
+
356
+ uncomp_stream_.next_in = (Bytef*)source;
357
+ uncomp_stream_.avail_in = (uInt)*sourceLen;
358
+ // Check for source > 64K on 16-bit machine:
359
+ if ((uLong)uncomp_stream_.avail_in != *sourceLen) return Z_BUF_ERROR;
360
+
361
+ uncomp_stream_.next_out = dest;
362
+ uncomp_stream_.avail_out = (uInt)*destLen;
363
+ if ((uLong)uncomp_stream_.avail_out != *destLen) return Z_BUF_ERROR;
364
+
365
+ if ( !first_chunk_ ) // only need to set up stream the first time through
366
+ return Z_OK;
367
+
368
+ if (uncomp_init_) { // we've already initted it
369
+ err = inflateReset(&uncomp_stream_);
370
+ if (err != Z_OK) {
371
+ LOG(WARNING)
372
+ << "ERROR: Can't reset uncompress object; creating a new one";
373
+ UncompressErrorInit();
374
+ }
375
+ }
376
+ if (!uncomp_init_) {
377
+ uncomp_stream_.zalloc = (alloc_func)0;
378
+ uncomp_stream_.zfree = (free_func)0;
379
+ uncomp_stream_.opaque = (voidpf)0;
380
+ err = InflateInit();
381
+ if (err != Z_OK) return err;
382
+ uncomp_init_ = true;
383
+ }
384
+ return Z_OK;
385
+ }
386
+
387
+ // If you compressed your data a chunk at a time, with CompressChunk,
388
+ // you can uncompress it a chunk at a time with UncompressChunk.
389
+ // Only difference bewteen chunked and unchunked uncompression
390
+ // is the flush mode we use: Z_SYNC_FLUSH (chunked) or Z_FINISH (unchunked).
391
+ int ZLib::UncompressAtMostOrAll(Bytef *dest, uLongf *destLen,
392
+ const Bytef *source, uLong *sourceLen,
393
+ int flush_mode) { // Z_SYNC_FLUSH or Z_FINISH
394
+ int err = Z_OK;
395
+
396
+ if ( (err=UncompressInit(dest, destLen, source, sourceLen)) != Z_OK ) {
397
+ LOG(WARNING) << "UncompressInit: Error: " << err << " SourceLen: "
398
+ << *sourceLen;
399
+ return err;
400
+ }
401
+
402
+ // This is used to figure out how many output bytes we wrote *this chunk*:
403
+ const uLong old_total_out = uncomp_stream_.total_out;
404
+
405
+ // This is used to figure out how many input bytes we read *this chunk*:
406
+ const uLong old_total_in = uncomp_stream_.total_in;
407
+
408
+ // Some setup happens only for the first chunk we compress in a run
409
+ if ( first_chunk_ ) {
410
+ first_chunk_ = false; // so we don't do this again
411
+
412
+ // For the first chunk *only* (to avoid infinite troubles), we let
413
+ // there be no actual data to uncompress. This sometimes triggers
414
+ // when the input is only the gzip header, say.
415
+ if ( *sourceLen == 0 ) {
416
+ *destLen = 0;
417
+ return Z_OK;
418
+ }
419
+ }
420
+
421
+ // We'll uncompress as much as we can. If we end OK great, otherwise
422
+ // if we get an error that seems to be the gzip footer, we store the
423
+ // gzip footer and return OK, otherwise we return the error.
424
+
425
+ // flush_mode is Z_SYNC_FLUSH for chunked mode, Z_FINISH for all mode.
426
+ err = inflate(&uncomp_stream_, flush_mode);
427
+
428
+ // Figure out how many bytes of the input zlib slurped up:
429
+ const uLong bytes_read = uncomp_stream_.total_in - old_total_in;
430
+ CHECK_LE(source + bytes_read, source + *sourceLen);
431
+ *sourceLen = uncomp_stream_.avail_in;
432
+
433
+ if ((err == Z_STREAM_END || err == Z_OK) // everything went ok
434
+ && uncomp_stream_.avail_in == 0) { // and we read it all
435
+ ;
436
+ } else if (err == Z_STREAM_END && uncomp_stream_.avail_in > 0) {
437
+ LOG(WARNING)
438
+ << "UncompressChunkOrAll: Received some extra data, bytes total: "
439
+ << uncomp_stream_.avail_in << " bytes: "
440
+ << string(reinterpret_cast<const char *>(uncomp_stream_.next_in),
441
+ min(int(uncomp_stream_.avail_in), 20));
442
+ UncompressErrorInit();
443
+ return Z_DATA_ERROR; // what's the extra data for?
444
+ } else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) {
445
+ // an error happened
446
+ LOG(WARNING) << "UncompressChunkOrAll: Error: " << err
447
+ << " avail_out: " << uncomp_stream_.avail_out;
448
+ UncompressErrorInit();
449
+ return err;
450
+ } else if (uncomp_stream_.avail_out == 0) {
451
+ err = Z_BUF_ERROR;
452
+ }
453
+
454
+ assert(err == Z_OK || err == Z_BUF_ERROR || err == Z_STREAM_END);
455
+ if (err == Z_STREAM_END)
456
+ err = Z_OK;
457
+
458
+ *destLen = uncomp_stream_.total_out - old_total_out; // size for this call
459
+
460
+ return err;
461
+ }
462
+
463
+ int ZLib::UncompressChunkOrAll(Bytef *dest, uLongf *destLen,
464
+ const Bytef *source, uLong sourceLen,
465
+ int flush_mode) { // Z_SYNC_FLUSH or Z_FINISH
466
+ const int ret =
467
+ UncompressAtMostOrAll(dest, destLen, source, &sourceLen, flush_mode);
468
+ if (ret == Z_BUF_ERROR)
469
+ UncompressErrorInit();
470
+ return ret;
471
+ }
472
+
473
+ int ZLib::UncompressAtMost(Bytef *dest, uLongf *destLen,
474
+ const Bytef *source, uLong *sourceLen) {
475
+ return UncompressAtMostOrAll(dest, destLen, source, sourceLen, Z_SYNC_FLUSH);
476
+ }
477
+
478
+ // We make sure we've uncompressed everything, that is, the current
479
+ // uncompress stream is at a compressed-buffer-EOF boundary. In gzip
480
+ // mode, we also check the gzip footer to make sure we pass the gzip
481
+ // consistency checks. We RETURN true iff both types of checks pass.
482
+ bool ZLib::UncompressChunkDone() {
483
+ assert(!first_chunk_ && uncomp_init_);
484
+ // Make sure we're at the end-of-compressed-data point. This means
485
+ // if we call inflate with Z_FINISH we won't consume any input or
486
+ // write any output
487
+ Bytef dummyin, dummyout;
488
+ uLongf dummylen = 0;
489
+ if ( UncompressChunkOrAll(&dummyout, &dummylen, &dummyin, 0, Z_FINISH)
490
+ != Z_OK ) {
491
+ return false;
492
+ }
493
+
494
+ // Make sure that when we exit, we can start a new round of chunks later
495
+ Reset();
496
+
497
+ return true;
498
+ }
499
+
500
+ // Uncompresses the source buffer into the destination buffer.
501
+ // The destination buffer must be long enough to hold the entire
502
+ // decompressed contents.
503
+ //
504
+ // We only initialize the uncomp_stream once. Thereafter, we use
505
+ // inflateReset, which should be faster.
506
+ //
507
+ // Returns Z_OK on success, otherwise, it returns a zlib error code.
508
+ int ZLib::Uncompress(Bytef *dest, uLongf *destLen,
509
+ const Bytef *source, uLong sourceLen) {
510
+ int err;
511
+ if ( (err=UncompressChunkOrAll(dest, destLen, source, sourceLen,
512
+ Z_FINISH)) != Z_OK ) {
513
+ Reset(); // let us try to compress again
514
+ return err;
515
+ }
516
+ if ( !UncompressChunkDone() ) // calls Reset()
517
+ return Z_DATA_ERROR;
518
+ return Z_OK; // stream_end is ok
519
+ }
520
+
521
+ #endif // HAVE_LIBZ
522
+
523
+ } // namespace snappy