couchbase 3.0.0.alpha.1 → 3.0.0.alpha.2

Sign up to get free protection for your applications and to get access to all the features.
Files changed (176) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/tests-6.0.3.yml +49 -0
  3. data/.github/workflows/tests.yml +47 -0
  4. data/.gitmodules +3 -0
  5. data/.idea/dictionaries/gem_terms.xml +5 -0
  6. data/.idea/inspectionProfiles/Project_Default.xml +1 -0
  7. data/.idea/vcs.xml +1 -0
  8. data/Gemfile +1 -0
  9. data/README.md +55 -2
  10. data/Rakefile +18 -0
  11. data/bin/init-cluster +62 -0
  12. data/bin/setup +1 -0
  13. data/couchbase.gemspec +3 -2
  14. data/examples/crud.rb +1 -2
  15. data/examples/managing_buckets.rb +47 -0
  16. data/examples/managing_collections.rb +58 -0
  17. data/examples/managing_query_indexes.rb +63 -0
  18. data/examples/query.rb +3 -2
  19. data/examples/query_with_consistency.rb +76 -0
  20. data/examples/subdocument.rb +23 -1
  21. data/ext/.clang-format +1 -1
  22. data/ext/.idea/dictionaries/couchbase_terms.xml +2 -0
  23. data/ext/.idea/vcs.xml +1 -0
  24. data/ext/CMakeLists.txt +30 -12
  25. data/ext/build_version.hxx.in +26 -0
  26. data/ext/couchbase/bucket.hxx +69 -8
  27. data/ext/couchbase/cluster.hxx +70 -54
  28. data/ext/couchbase/collections_manifest.hxx +3 -3
  29. data/ext/couchbase/configuration.hxx +14 -0
  30. data/ext/couchbase/couchbase.cxx +2044 -383
  31. data/ext/couchbase/{operations/document_id.hxx → document_id.hxx} +5 -4
  32. data/ext/couchbase/io/http_message.hxx +5 -1
  33. data/ext/couchbase/io/http_parser.hxx +2 -1
  34. data/ext/couchbase/io/http_session.hxx +6 -3
  35. data/ext/couchbase/io/{binary_message.hxx → mcbp_message.hxx} +15 -12
  36. data/ext/couchbase/io/mcbp_parser.hxx +99 -0
  37. data/ext/couchbase/io/{key_value_session.hxx → mcbp_session.hxx} +200 -95
  38. data/ext/couchbase/io/session_manager.hxx +37 -22
  39. data/ext/couchbase/mutation_token.hxx +2 -1
  40. data/ext/couchbase/operations.hxx +38 -8
  41. data/ext/couchbase/operations/bucket_create.hxx +138 -0
  42. data/ext/couchbase/operations/bucket_drop.hxx +65 -0
  43. data/ext/couchbase/operations/bucket_flush.hxx +65 -0
  44. data/ext/couchbase/operations/bucket_get.hxx +69 -0
  45. data/ext/couchbase/operations/bucket_get_all.hxx +62 -0
  46. data/ext/couchbase/operations/bucket_settings.hxx +111 -0
  47. data/ext/couchbase/operations/bucket_update.hxx +115 -0
  48. data/ext/couchbase/operations/cluster_developer_preview_enable.hxx +60 -0
  49. data/ext/couchbase/operations/collection_create.hxx +86 -0
  50. data/ext/couchbase/operations/collection_drop.hxx +82 -0
  51. data/ext/couchbase/operations/command.hxx +10 -10
  52. data/ext/couchbase/operations/document_decrement.hxx +80 -0
  53. data/ext/couchbase/operations/document_exists.hxx +80 -0
  54. data/ext/couchbase/operations/{get.hxx → document_get.hxx} +4 -2
  55. data/ext/couchbase/operations/document_get_and_lock.hxx +64 -0
  56. data/ext/couchbase/operations/document_get_and_touch.hxx +64 -0
  57. data/ext/couchbase/operations/document_increment.hxx +80 -0
  58. data/ext/couchbase/operations/document_insert.hxx +74 -0
  59. data/ext/couchbase/operations/{lookup_in.hxx → document_lookup_in.hxx} +2 -2
  60. data/ext/couchbase/operations/{mutate_in.hxx → document_mutate_in.hxx} +11 -2
  61. data/ext/couchbase/operations/{query.hxx → document_query.hxx} +101 -6
  62. data/ext/couchbase/operations/document_remove.hxx +67 -0
  63. data/ext/couchbase/operations/document_replace.hxx +76 -0
  64. data/ext/couchbase/operations/{upsert.hxx → document_touch.hxx} +14 -14
  65. data/ext/couchbase/operations/{remove.hxx → document_unlock.hxx} +12 -10
  66. data/ext/couchbase/operations/document_upsert.hxx +74 -0
  67. data/ext/couchbase/operations/query_index_build_deferred.hxx +85 -0
  68. data/ext/couchbase/operations/query_index_create.hxx +134 -0
  69. data/ext/couchbase/operations/query_index_drop.hxx +108 -0
  70. data/ext/couchbase/operations/query_index_get_all.hxx +106 -0
  71. data/ext/couchbase/operations/scope_create.hxx +81 -0
  72. data/ext/couchbase/operations/scope_drop.hxx +79 -0
  73. data/ext/couchbase/operations/scope_get_all.hxx +72 -0
  74. data/ext/couchbase/protocol/client_opcode.hxx +35 -0
  75. data/ext/couchbase/protocol/client_request.hxx +56 -9
  76. data/ext/couchbase/protocol/client_response.hxx +52 -15
  77. data/ext/couchbase/protocol/cmd_cluster_map_change_notification.hxx +81 -0
  78. data/ext/couchbase/protocol/cmd_decrement.hxx +187 -0
  79. data/ext/couchbase/protocol/cmd_exists.hxx +171 -0
  80. data/ext/couchbase/protocol/cmd_get.hxx +31 -8
  81. data/ext/couchbase/protocol/cmd_get_and_lock.hxx +142 -0
  82. data/ext/couchbase/protocol/cmd_get_and_touch.hxx +142 -0
  83. data/ext/couchbase/protocol/cmd_get_cluster_config.hxx +16 -3
  84. data/ext/couchbase/protocol/cmd_get_collections_manifest.hxx +16 -3
  85. data/ext/couchbase/protocol/cmd_get_error_map.hxx +16 -3
  86. data/ext/couchbase/protocol/cmd_hello.hxx +24 -8
  87. data/ext/couchbase/protocol/cmd_increment.hxx +187 -0
  88. data/ext/couchbase/protocol/cmd_info.hxx +1 -0
  89. data/ext/couchbase/protocol/cmd_insert.hxx +172 -0
  90. data/ext/couchbase/protocol/cmd_lookup_in.hxx +28 -13
  91. data/ext/couchbase/protocol/cmd_mutate_in.hxx +65 -13
  92. data/ext/couchbase/protocol/cmd_remove.hxx +59 -4
  93. data/ext/couchbase/protocol/cmd_replace.hxx +172 -0
  94. data/ext/couchbase/protocol/cmd_sasl_auth.hxx +15 -3
  95. data/ext/couchbase/protocol/cmd_sasl_list_mechs.hxx +15 -3
  96. data/ext/couchbase/protocol/cmd_sasl_step.hxx +15 -3
  97. data/ext/couchbase/protocol/cmd_select_bucket.hxx +14 -2
  98. data/ext/couchbase/protocol/cmd_touch.hxx +102 -0
  99. data/ext/couchbase/protocol/cmd_unlock.hxx +95 -0
  100. data/ext/couchbase/protocol/cmd_upsert.hxx +50 -14
  101. data/ext/couchbase/protocol/durability_level.hxx +67 -0
  102. data/ext/couchbase/protocol/frame_info_id.hxx +187 -0
  103. data/ext/couchbase/protocol/hello_feature.hxx +137 -0
  104. data/ext/couchbase/protocol/server_opcode.hxx +57 -0
  105. data/ext/couchbase/protocol/server_request.hxx +122 -0
  106. data/ext/couchbase/protocol/unsigned_leb128.h +15 -15
  107. data/ext/couchbase/utils/byteswap.hxx +1 -2
  108. data/ext/couchbase/utils/url_codec.hxx +225 -0
  109. data/ext/couchbase/version.hxx +3 -1
  110. data/ext/extconf.rb +4 -1
  111. data/ext/test/main.cxx +37 -113
  112. data/ext/third_party/snappy/.appveyor.yml +36 -0
  113. data/ext/third_party/snappy/.gitignore +8 -0
  114. data/ext/third_party/snappy/.travis.yml +98 -0
  115. data/ext/third_party/snappy/AUTHORS +1 -0
  116. data/ext/third_party/snappy/CMakeLists.txt +345 -0
  117. data/ext/third_party/snappy/CONTRIBUTING.md +26 -0
  118. data/ext/third_party/snappy/COPYING +54 -0
  119. data/ext/third_party/snappy/NEWS +188 -0
  120. data/ext/third_party/snappy/README.md +148 -0
  121. data/ext/third_party/snappy/cmake/SnappyConfig.cmake.in +33 -0
  122. data/ext/third_party/snappy/cmake/config.h.in +59 -0
  123. data/ext/third_party/snappy/docs/README.md +72 -0
  124. data/ext/third_party/snappy/format_description.txt +110 -0
  125. data/ext/third_party/snappy/framing_format.txt +135 -0
  126. data/ext/third_party/snappy/snappy-c.cc +90 -0
  127. data/ext/third_party/snappy/snappy-c.h +138 -0
  128. data/ext/third_party/snappy/snappy-internal.h +315 -0
  129. data/ext/third_party/snappy/snappy-sinksource.cc +121 -0
  130. data/ext/third_party/snappy/snappy-sinksource.h +182 -0
  131. data/ext/third_party/snappy/snappy-stubs-internal.cc +42 -0
  132. data/ext/third_party/snappy/snappy-stubs-internal.h +493 -0
  133. data/ext/third_party/snappy/snappy-stubs-public.h.in +63 -0
  134. data/ext/third_party/snappy/snappy-test.cc +613 -0
  135. data/ext/third_party/snappy/snappy-test.h +526 -0
  136. data/ext/third_party/snappy/snappy.cc +1770 -0
  137. data/ext/third_party/snappy/snappy.h +209 -0
  138. data/ext/third_party/snappy/snappy_compress_fuzzer.cc +60 -0
  139. data/ext/third_party/snappy/snappy_uncompress_fuzzer.cc +58 -0
  140. data/ext/third_party/snappy/snappy_unittest.cc +1512 -0
  141. data/ext/third_party/snappy/testdata/alice29.txt +3609 -0
  142. data/ext/third_party/snappy/testdata/asyoulik.txt +4122 -0
  143. data/ext/third_party/snappy/testdata/baddata1.snappy +0 -0
  144. data/ext/third_party/snappy/testdata/baddata2.snappy +0 -0
  145. data/ext/third_party/snappy/testdata/baddata3.snappy +0 -0
  146. data/ext/third_party/snappy/testdata/fireworks.jpeg +0 -0
  147. data/ext/third_party/snappy/testdata/geo.protodata +0 -0
  148. data/ext/third_party/snappy/testdata/html +1 -0
  149. data/ext/third_party/snappy/testdata/html_x_4 +1 -0
  150. data/ext/third_party/snappy/testdata/kppkn.gtb +0 -0
  151. data/ext/third_party/snappy/testdata/lcet10.txt +7519 -0
  152. data/ext/third_party/snappy/testdata/paper-100k.pdf +600 -2
  153. data/ext/third_party/snappy/testdata/plrabn12.txt +10699 -0
  154. data/ext/third_party/snappy/testdata/urls.10K +10000 -0
  155. data/lib/couchbase/binary_collection.rb +33 -76
  156. data/lib/couchbase/binary_collection_options.rb +94 -0
  157. data/lib/couchbase/bucket.rb +9 -3
  158. data/lib/couchbase/cluster.rb +161 -23
  159. data/lib/couchbase/collection.rb +108 -191
  160. data/lib/couchbase/collection_options.rb +430 -0
  161. data/lib/couchbase/errors.rb +136 -134
  162. data/lib/couchbase/json_transcoder.rb +32 -0
  163. data/lib/couchbase/management/analytics_index_manager.rb +185 -9
  164. data/lib/couchbase/management/bucket_manager.rb +84 -33
  165. data/lib/couchbase/management/collection_manager.rb +166 -1
  166. data/lib/couchbase/management/query_index_manager.rb +261 -0
  167. data/lib/couchbase/management/search_index_manager.rb +291 -0
  168. data/lib/couchbase/management/user_manager.rb +12 -10
  169. data/lib/couchbase/management/view_index_manager.rb +151 -1
  170. data/lib/couchbase/mutation_state.rb +11 -1
  171. data/lib/couchbase/scope.rb +4 -4
  172. data/lib/couchbase/version.rb +1 -1
  173. metadata +113 -18
  174. data/.travis.yml +0 -7
  175. data/ext/couchbase/io/binary_parser.hxx +0 -64
  176. data/lib/couchbase/results.rb +0 -307
@@ -0,0 +1,63 @@
1
+ // Copyright 2011 Google Inc. All Rights Reserved.
2
+ //
3
+ // Redistribution and use in source and binary forms, with or without
4
+ // modification, are permitted provided that the following conditions are
5
+ // met:
6
+ //
7
+ // * Redistributions of source code must retain the above copyright
8
+ // notice, this list of conditions and the following disclaimer.
9
+ // * Redistributions in binary form must reproduce the above
10
+ // copyright notice, this list of conditions and the following disclaimer
11
+ // in the documentation and/or other materials provided with the
12
+ // distribution.
13
+ // * Neither the name of Google Inc. nor the names of its
14
+ // contributors may be used to endorse or promote products derived from
15
+ // this software without specific prior written permission.
16
+ //
17
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
+ //
29
+ // Various type stubs for the open-source version of Snappy.
30
+ //
31
+ // This file cannot include config.h, as it is included from snappy.h,
32
+ // which is a public header. Instead, snappy-stubs-public.h is generated by
33
+ // from snappy-stubs-public.h.in at configure time.
34
+
35
+ #ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
36
+ #define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
37
+
38
+ #include <cstddef>
39
+
40
+ #if ${HAVE_SYS_UIO_H_01} // HAVE_SYS_UIO_H
41
+ #include <sys/uio.h>
42
+ #endif // HAVE_SYS_UIO_H
43
+
44
+ #define SNAPPY_MAJOR ${PROJECT_VERSION_MAJOR}
45
+ #define SNAPPY_MINOR ${PROJECT_VERSION_MINOR}
46
+ #define SNAPPY_PATCHLEVEL ${PROJECT_VERSION_PATCH}
47
+ #define SNAPPY_VERSION \
48
+ ((SNAPPY_MAJOR << 16) | (SNAPPY_MINOR << 8) | SNAPPY_PATCHLEVEL)
49
+
50
+ namespace snappy {
51
+
52
+ #if !${HAVE_SYS_UIO_H_01} // !HAVE_SYS_UIO_H
53
+ // Windows does not have an iovec type, yet the concept is universally useful.
54
+ // It is simple to define it ourselves, so we put it inside our own namespace.
55
+ struct iovec {
56
+ void* iov_base;
57
+ size_t iov_len;
58
+ };
59
+ #endif // !HAVE_SYS_UIO_H
60
+
61
+ } // namespace snappy
62
+
63
+ #endif // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_
@@ -0,0 +1,613 @@
1
+ // Copyright 2011 Google Inc. All Rights Reserved.
2
+ //
3
+ // Redistribution and use in source and binary forms, with or without
4
+ // modification, are permitted provided that the following conditions are
5
+ // met:
6
+ //
7
+ // * Redistributions of source code must retain the above copyright
8
+ // notice, this list of conditions and the following disclaimer.
9
+ // * Redistributions in binary form must reproduce the above
10
+ // copyright notice, this list of conditions and the following disclaimer
11
+ // in the documentation and/or other materials provided with the
12
+ // distribution.
13
+ // * Neither the name of Google Inc. nor the names of its
14
+ // contributors may be used to endorse or promote products derived from
15
+ // this software without specific prior written permission.
16
+ //
17
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18
+ // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19
+ // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20
+ // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21
+ // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22
+ // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23
+ // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
+ // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
+ // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
+ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27
+ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
+ //
29
+ // Various stubs for the unit tests for the open-source version of Snappy.
30
+
31
+ #ifdef HAVE_CONFIG_H
32
+ #include "config.h"
33
+ #endif
34
+
35
+ #ifdef HAVE_WINDOWS_H
36
+ // Needed to be able to use std::max without workarounds in the source code.
37
+ // https://support.microsoft.com/en-us/help/143208/prb-using-stl-in-windows-program-can-cause-min-max-conflicts
38
+ #define NOMINMAX
39
+ #include <windows.h>
40
+ #endif
41
+
42
+ #include "snappy-test.h"
43
+
44
+ #include <algorithm>
45
+
46
+ DEFINE_bool(run_microbenchmarks, true,
47
+ "Run microbenchmarks before doing anything else.");
48
+
49
+ namespace snappy {
50
+
51
+ std::string ReadTestDataFile(const std::string& base, size_t size_limit) {
52
+ std::string contents;
53
+ const char* srcdir = getenv("srcdir"); // This is set by Automake.
54
+ std::string prefix;
55
+ if (srcdir) {
56
+ prefix = std::string(srcdir) + "/";
57
+ }
58
+ file::GetContents(prefix + "testdata/" + base, &contents, file::Defaults()
59
+ ).CheckSuccess();
60
+ if (size_limit > 0) {
61
+ contents = contents.substr(0, size_limit);
62
+ }
63
+ return contents;
64
+ }
65
+
66
+ std::string ReadTestDataFile(const std::string& base) {
67
+ return ReadTestDataFile(base, 0);
68
+ }
69
+
70
+ std::string StrFormat(const char* format, ...) {
71
+ char buf[4096];
72
+ std::va_list ap;
73
+ va_start(ap, format);
74
+ std::vsnprintf(buf, sizeof(buf), format, ap);
75
+ va_end(ap);
76
+ return buf;
77
+ }
78
+
79
+ bool benchmark_running = false;
80
+ int64_t benchmark_real_time_us = 0;
81
+ int64_t benchmark_cpu_time_us = 0;
82
+ std::string* benchmark_label = nullptr;
83
+ int64_t benchmark_bytes_processed = 0;
84
+
85
+ void ResetBenchmarkTiming() {
86
+ benchmark_real_time_us = 0;
87
+ benchmark_cpu_time_us = 0;
88
+ }
89
+
90
+ #ifdef WIN32
91
+ LARGE_INTEGER benchmark_start_real;
92
+ FILETIME benchmark_start_cpu;
93
+ #else // WIN32
94
+ struct timeval benchmark_start_real;
95
+ struct rusage benchmark_start_cpu;
96
+ #endif // WIN32
97
+
98
+ void StartBenchmarkTiming() {
99
+ #ifdef WIN32
100
+ QueryPerformanceCounter(&benchmark_start_real);
101
+ FILETIME dummy;
102
+ CHECK(GetProcessTimes(
103
+ GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_start_cpu));
104
+ #else
105
+ gettimeofday(&benchmark_start_real, NULL);
106
+ if (getrusage(RUSAGE_SELF, &benchmark_start_cpu) == -1) {
107
+ std::perror("getrusage(RUSAGE_SELF)");
108
+ std::exit(1);
109
+ }
110
+ #endif
111
+ benchmark_running = true;
112
+ }
113
+
114
+ void StopBenchmarkTiming() {
115
+ if (!benchmark_running) {
116
+ return;
117
+ }
118
+
119
+ #ifdef WIN32
120
+ LARGE_INTEGER benchmark_stop_real;
121
+ LARGE_INTEGER benchmark_frequency;
122
+ QueryPerformanceCounter(&benchmark_stop_real);
123
+ QueryPerformanceFrequency(&benchmark_frequency);
124
+
125
+ double elapsed_real = static_cast<double>(
126
+ benchmark_stop_real.QuadPart - benchmark_start_real.QuadPart) /
127
+ benchmark_frequency.QuadPart;
128
+ benchmark_real_time_us += elapsed_real * 1e6 + 0.5;
129
+
130
+ FILETIME benchmark_stop_cpu, dummy;
131
+ CHECK(GetProcessTimes(
132
+ GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_stop_cpu));
133
+
134
+ ULARGE_INTEGER start_ulargeint;
135
+ start_ulargeint.LowPart = benchmark_start_cpu.dwLowDateTime;
136
+ start_ulargeint.HighPart = benchmark_start_cpu.dwHighDateTime;
137
+
138
+ ULARGE_INTEGER stop_ulargeint;
139
+ stop_ulargeint.LowPart = benchmark_stop_cpu.dwLowDateTime;
140
+ stop_ulargeint.HighPart = benchmark_stop_cpu.dwHighDateTime;
141
+
142
+ benchmark_cpu_time_us +=
143
+ (stop_ulargeint.QuadPart - start_ulargeint.QuadPart + 5) / 10;
144
+ #else // WIN32
145
+ struct timeval benchmark_stop_real;
146
+ gettimeofday(&benchmark_stop_real, NULL);
147
+ benchmark_real_time_us +=
148
+ 1000000 * (benchmark_stop_real.tv_sec - benchmark_start_real.tv_sec);
149
+ benchmark_real_time_us +=
150
+ (benchmark_stop_real.tv_usec - benchmark_start_real.tv_usec);
151
+
152
+ struct rusage benchmark_stop_cpu;
153
+ if (getrusage(RUSAGE_SELF, &benchmark_stop_cpu) == -1) {
154
+ std::perror("getrusage(RUSAGE_SELF)");
155
+ std::exit(1);
156
+ }
157
+ benchmark_cpu_time_us += 1000000 * (benchmark_stop_cpu.ru_utime.tv_sec -
158
+ benchmark_start_cpu.ru_utime.tv_sec);
159
+ benchmark_cpu_time_us += (benchmark_stop_cpu.ru_utime.tv_usec -
160
+ benchmark_start_cpu.ru_utime.tv_usec);
161
+ #endif // WIN32
162
+
163
+ benchmark_running = false;
164
+ }
165
+
166
+ void SetBenchmarkLabel(const std::string& str) {
167
+ if (benchmark_label) {
168
+ delete benchmark_label;
169
+ }
170
+ benchmark_label = new std::string(str);
171
+ }
172
+
173
+ void SetBenchmarkBytesProcessed(int64_t bytes) {
174
+ benchmark_bytes_processed = bytes;
175
+ }
176
+
177
+ struct BenchmarkRun {
178
+ int64_t real_time_us;
179
+ int64_t cpu_time_us;
180
+ };
181
+
182
+ struct BenchmarkCompareCPUTime {
183
+ bool operator() (const BenchmarkRun& a, const BenchmarkRun& b) const {
184
+ return a.cpu_time_us < b.cpu_time_us;
185
+ }
186
+ };
187
+
188
+ void Benchmark::Run() {
189
+ for (int test_case_num = start_; test_case_num <= stop_; ++test_case_num) {
190
+ // Run a few iterations first to find out approximately how fast
191
+ // the benchmark is.
192
+ const int kCalibrateIterations = 100;
193
+ ResetBenchmarkTiming();
194
+ StartBenchmarkTiming();
195
+ (*function_)(kCalibrateIterations, test_case_num);
196
+ StopBenchmarkTiming();
197
+
198
+ // Let each test case run for about 200ms, but at least as many
199
+ // as we used to calibrate.
200
+ // Run five times and pick the median.
201
+ const int kNumRuns = 5;
202
+ const int kMedianPos = kNumRuns / 2;
203
+ int num_iterations = 0;
204
+ if (benchmark_real_time_us > 0) {
205
+ num_iterations = 200000 * kCalibrateIterations / benchmark_real_time_us;
206
+ }
207
+ num_iterations = std::max(num_iterations, kCalibrateIterations);
208
+ BenchmarkRun benchmark_runs[kNumRuns];
209
+
210
+ for (int run = 0; run < kNumRuns; ++run) {
211
+ ResetBenchmarkTiming();
212
+ StartBenchmarkTiming();
213
+ (*function_)(num_iterations, test_case_num);
214
+ StopBenchmarkTiming();
215
+
216
+ benchmark_runs[run].real_time_us = benchmark_real_time_us;
217
+ benchmark_runs[run].cpu_time_us = benchmark_cpu_time_us;
218
+ }
219
+
220
+ std::string heading = StrFormat("%s/%d", name_.c_str(), test_case_num);
221
+ std::string human_readable_speed;
222
+
223
+ std::nth_element(benchmark_runs,
224
+ benchmark_runs + kMedianPos,
225
+ benchmark_runs + kNumRuns,
226
+ BenchmarkCompareCPUTime());
227
+ int64_t real_time_us = benchmark_runs[kMedianPos].real_time_us;
228
+ int64_t cpu_time_us = benchmark_runs[kMedianPos].cpu_time_us;
229
+ if (cpu_time_us <= 0) {
230
+ human_readable_speed = "?";
231
+ } else {
232
+ int64_t bytes_per_second =
233
+ benchmark_bytes_processed * 1000000 / cpu_time_us;
234
+ if (bytes_per_second < 1024) {
235
+ human_readable_speed =
236
+ StrFormat("%dB/s", static_cast<int>(bytes_per_second));
237
+ } else if (bytes_per_second < 1024 * 1024) {
238
+ human_readable_speed = StrFormat(
239
+ "%.1fkB/s", bytes_per_second / 1024.0f);
240
+ } else if (bytes_per_second < 1024 * 1024 * 1024) {
241
+ human_readable_speed = StrFormat(
242
+ "%.1fMB/s", bytes_per_second / (1024.0f * 1024.0f));
243
+ } else {
244
+ human_readable_speed = StrFormat(
245
+ "%.1fGB/s", bytes_per_second / (1024.0f * 1024.0f * 1024.0f));
246
+ }
247
+ }
248
+
249
+ std::fprintf(stderr,
250
+ #ifdef WIN32
251
+ "%-18s %10I64d %10I64d %10d %s %s\n",
252
+ #else
253
+ "%-18s %10lld %10lld %10d %s %s\n",
254
+ #endif
255
+ heading.c_str(),
256
+ static_cast<long long>(real_time_us * 1000 / num_iterations),
257
+ static_cast<long long>(cpu_time_us * 1000 / num_iterations),
258
+ num_iterations,
259
+ human_readable_speed.c_str(),
260
+ benchmark_label->c_str());
261
+ }
262
+ }
263
+
264
+ #ifdef HAVE_LIBZ
265
+
266
+ ZLib::ZLib()
267
+ : comp_init_(false),
268
+ uncomp_init_(false) {
269
+ Reinit();
270
+ }
271
+
272
+ ZLib::~ZLib() {
273
+ if (comp_init_) { deflateEnd(&comp_stream_); }
274
+ if (uncomp_init_) { inflateEnd(&uncomp_stream_); }
275
+ }
276
+
277
+ void ZLib::Reinit() {
278
+ compression_level_ = Z_DEFAULT_COMPRESSION;
279
+ window_bits_ = MAX_WBITS;
280
+ mem_level_ = 8; // DEF_MEM_LEVEL
281
+ if (comp_init_) {
282
+ deflateEnd(&comp_stream_);
283
+ comp_init_ = false;
284
+ }
285
+ if (uncomp_init_) {
286
+ inflateEnd(&uncomp_stream_);
287
+ uncomp_init_ = false;
288
+ }
289
+ first_chunk_ = true;
290
+ }
291
+
292
+ void ZLib::Reset() {
293
+ first_chunk_ = true;
294
+ }
295
+
296
+ // --------- COMPRESS MODE
297
+
298
+ // Initialization method to be called if we hit an error while
299
+ // compressing. On hitting an error, call this method before returning
300
+ // the error.
301
+ void ZLib::CompressErrorInit() {
302
+ deflateEnd(&comp_stream_);
303
+ comp_init_ = false;
304
+ Reset();
305
+ }
306
+
307
+ int ZLib::DeflateInit() {
308
+ return deflateInit2(&comp_stream_,
309
+ compression_level_,
310
+ Z_DEFLATED,
311
+ window_bits_,
312
+ mem_level_,
313
+ Z_DEFAULT_STRATEGY);
314
+ }
315
+
316
+ int ZLib::CompressInit(Bytef *dest, uLongf *destLen,
317
+ const Bytef *source, uLong *sourceLen) {
318
+ int err;
319
+
320
+ comp_stream_.next_in = (Bytef*)source;
321
+ comp_stream_.avail_in = (uInt)*sourceLen;
322
+ if ((uLong)comp_stream_.avail_in != *sourceLen) return Z_BUF_ERROR;
323
+ comp_stream_.next_out = dest;
324
+ comp_stream_.avail_out = (uInt)*destLen;
325
+ if ((uLong)comp_stream_.avail_out != *destLen) return Z_BUF_ERROR;
326
+
327
+ if ( !first_chunk_ ) // only need to set up stream the first time through
328
+ return Z_OK;
329
+
330
+ if (comp_init_) { // we've already initted it
331
+ err = deflateReset(&comp_stream_);
332
+ if (err != Z_OK) {
333
+ LOG(WARNING) << "ERROR: Can't reset compress object; creating a new one";
334
+ deflateEnd(&comp_stream_);
335
+ comp_init_ = false;
336
+ }
337
+ }
338
+ if (!comp_init_) { // first use
339
+ comp_stream_.zalloc = (alloc_func)0;
340
+ comp_stream_.zfree = (free_func)0;
341
+ comp_stream_.opaque = (voidpf)0;
342
+ err = DeflateInit();
343
+ if (err != Z_OK) return err;
344
+ comp_init_ = true;
345
+ }
346
+ return Z_OK;
347
+ }
348
+
349
+ // In a perfect world we'd always have the full buffer to compress
350
+ // when the time came, and we could just call Compress(). Alas, we
351
+ // want to do chunked compression on our webserver. In this
352
+ // application, we compress the header, send it off, then compress the
353
+ // results, send them off, then compress the footer. Thus we need to
354
+ // use the chunked compression features of zlib.
355
+ int ZLib::CompressAtMostOrAll(Bytef *dest, uLongf *destLen,
356
+ const Bytef *source, uLong *sourceLen,
357
+ int flush_mode) { // Z_FULL_FLUSH or Z_FINISH
358
+ int err;
359
+
360
+ if ( (err=CompressInit(dest, destLen, source, sourceLen)) != Z_OK )
361
+ return err;
362
+
363
+ // This is used to figure out how many bytes we wrote *this chunk*
364
+ int compressed_size = comp_stream_.total_out;
365
+
366
+ // Some setup happens only for the first chunk we compress in a run
367
+ if ( first_chunk_ ) {
368
+ first_chunk_ = false;
369
+ }
370
+
371
+ // flush_mode is Z_FINISH for all mode, Z_SYNC_FLUSH for incremental
372
+ // compression.
373
+ err = deflate(&comp_stream_, flush_mode);
374
+
375
+ *sourceLen = comp_stream_.avail_in;
376
+
377
+ if ((err == Z_STREAM_END || err == Z_OK)
378
+ && comp_stream_.avail_in == 0
379
+ && comp_stream_.avail_out != 0 ) {
380
+ // we processed everything ok and the output buffer was large enough.
381
+ ;
382
+ } else if (err == Z_STREAM_END && comp_stream_.avail_in > 0) {
383
+ return Z_BUF_ERROR; // should never happen
384
+ } else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) {
385
+ // an error happened
386
+ CompressErrorInit();
387
+ return err;
388
+ } else if (comp_stream_.avail_out == 0) { // not enough space
389
+ err = Z_BUF_ERROR;
390
+ }
391
+
392
+ assert(err == Z_OK || err == Z_STREAM_END || err == Z_BUF_ERROR);
393
+ if (err == Z_STREAM_END)
394
+ err = Z_OK;
395
+
396
+ // update the crc and other metadata
397
+ compressed_size = comp_stream_.total_out - compressed_size; // delta
398
+ *destLen = compressed_size;
399
+
400
+ return err;
401
+ }
402
+
403
+ int ZLib::CompressChunkOrAll(Bytef *dest, uLongf *destLen,
404
+ const Bytef *source, uLong sourceLen,
405
+ int flush_mode) { // Z_FULL_FLUSH or Z_FINISH
406
+ const int ret =
407
+ CompressAtMostOrAll(dest, destLen, source, &sourceLen, flush_mode);
408
+ if (ret == Z_BUF_ERROR)
409
+ CompressErrorInit();
410
+ return ret;
411
+ }
412
+
413
+ // This routine only initializes the compression stream once. Thereafter, it
414
+ // just does a deflateReset on the stream, which should be faster.
415
+ int ZLib::Compress(Bytef *dest, uLongf *destLen,
416
+ const Bytef *source, uLong sourceLen) {
417
+ int err;
418
+ if ( (err=CompressChunkOrAll(dest, destLen, source, sourceLen,
419
+ Z_FINISH)) != Z_OK )
420
+ return err;
421
+ Reset(); // reset for next call to Compress
422
+
423
+ return Z_OK;
424
+ }
425
+
426
+
427
+ // --------- UNCOMPRESS MODE
428
+
429
+ int ZLib::InflateInit() {
430
+ return inflateInit2(&uncomp_stream_, MAX_WBITS);
431
+ }
432
+
433
+ // Initialization method to be called if we hit an error while
434
+ // uncompressing. On hitting an error, call this method before
435
+ // returning the error.
436
+ void ZLib::UncompressErrorInit() {
437
+ inflateEnd(&uncomp_stream_);
438
+ uncomp_init_ = false;
439
+ Reset();
440
+ }
441
+
442
+ int ZLib::UncompressInit(Bytef *dest, uLongf *destLen,
443
+ const Bytef *source, uLong *sourceLen) {
444
+ int err;
445
+
446
+ uncomp_stream_.next_in = (Bytef*)source;
447
+ uncomp_stream_.avail_in = (uInt)*sourceLen;
448
+ // Check for source > 64K on 16-bit machine:
449
+ if ((uLong)uncomp_stream_.avail_in != *sourceLen) return Z_BUF_ERROR;
450
+
451
+ uncomp_stream_.next_out = dest;
452
+ uncomp_stream_.avail_out = (uInt)*destLen;
453
+ if ((uLong)uncomp_stream_.avail_out != *destLen) return Z_BUF_ERROR;
454
+
455
+ if ( !first_chunk_ ) // only need to set up stream the first time through
456
+ return Z_OK;
457
+
458
+ if (uncomp_init_) { // we've already initted it
459
+ err = inflateReset(&uncomp_stream_);
460
+ if (err != Z_OK) {
461
+ LOG(WARNING)
462
+ << "ERROR: Can't reset uncompress object; creating a new one";
463
+ UncompressErrorInit();
464
+ }
465
+ }
466
+ if (!uncomp_init_) {
467
+ uncomp_stream_.zalloc = (alloc_func)0;
468
+ uncomp_stream_.zfree = (free_func)0;
469
+ uncomp_stream_.opaque = (voidpf)0;
470
+ err = InflateInit();
471
+ if (err != Z_OK) return err;
472
+ uncomp_init_ = true;
473
+ }
474
+ return Z_OK;
475
+ }
476
+
477
+ // If you compressed your data a chunk at a time, with CompressChunk,
478
+ // you can uncompress it a chunk at a time with UncompressChunk.
479
+ // Only difference bewteen chunked and unchunked uncompression
480
+ // is the flush mode we use: Z_SYNC_FLUSH (chunked) or Z_FINISH (unchunked).
481
+ int ZLib::UncompressAtMostOrAll(Bytef *dest, uLongf *destLen,
482
+ const Bytef *source, uLong *sourceLen,
483
+ int flush_mode) { // Z_SYNC_FLUSH or Z_FINISH
484
+ int err = Z_OK;
485
+
486
+ if ( (err=UncompressInit(dest, destLen, source, sourceLen)) != Z_OK ) {
487
+ LOG(WARNING) << "UncompressInit: Error: " << err << " SourceLen: "
488
+ << *sourceLen;
489
+ return err;
490
+ }
491
+
492
+ // This is used to figure out how many output bytes we wrote *this chunk*:
493
+ const uLong old_total_out = uncomp_stream_.total_out;
494
+
495
+ // This is used to figure out how many input bytes we read *this chunk*:
496
+ const uLong old_total_in = uncomp_stream_.total_in;
497
+
498
+ // Some setup happens only for the first chunk we compress in a run
499
+ if ( first_chunk_ ) {
500
+ first_chunk_ = false; // so we don't do this again
501
+
502
+ // For the first chunk *only* (to avoid infinite troubles), we let
503
+ // there be no actual data to uncompress. This sometimes triggers
504
+ // when the input is only the gzip header, say.
505
+ if ( *sourceLen == 0 ) {
506
+ *destLen = 0;
507
+ return Z_OK;
508
+ }
509
+ }
510
+
511
+ // We'll uncompress as much as we can. If we end OK great, otherwise
512
+ // if we get an error that seems to be the gzip footer, we store the
513
+ // gzip footer and return OK, otherwise we return the error.
514
+
515
+ // flush_mode is Z_SYNC_FLUSH for chunked mode, Z_FINISH for all mode.
516
+ err = inflate(&uncomp_stream_, flush_mode);
517
+
518
+ // Figure out how many bytes of the input zlib slurped up:
519
+ const uLong bytes_read = uncomp_stream_.total_in - old_total_in;
520
+ CHECK_LE(source + bytes_read, source + *sourceLen);
521
+ *sourceLen = uncomp_stream_.avail_in;
522
+
523
+ if ((err == Z_STREAM_END || err == Z_OK) // everything went ok
524
+ && uncomp_stream_.avail_in == 0) { // and we read it all
525
+ ;
526
+ } else if (err == Z_STREAM_END && uncomp_stream_.avail_in > 0) {
527
+ LOG(WARNING)
528
+ << "UncompressChunkOrAll: Received some extra data, bytes total: "
529
+ << uncomp_stream_.avail_in << " bytes: "
530
+ << std::string(reinterpret_cast<const char *>(uncomp_stream_.next_in),
531
+ std::min(int(uncomp_stream_.avail_in), 20));
532
+ UncompressErrorInit();
533
+ return Z_DATA_ERROR; // what's the extra data for?
534
+ } else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) {
535
+ // an error happened
536
+ LOG(WARNING) << "UncompressChunkOrAll: Error: " << err
537
+ << " avail_out: " << uncomp_stream_.avail_out;
538
+ UncompressErrorInit();
539
+ return err;
540
+ } else if (uncomp_stream_.avail_out == 0) {
541
+ err = Z_BUF_ERROR;
542
+ }
543
+
544
+ assert(err == Z_OK || err == Z_BUF_ERROR || err == Z_STREAM_END);
545
+ if (err == Z_STREAM_END)
546
+ err = Z_OK;
547
+
548
+ *destLen = uncomp_stream_.total_out - old_total_out; // size for this call
549
+
550
+ return err;
551
+ }
552
+
553
+ int ZLib::UncompressChunkOrAll(Bytef *dest, uLongf *destLen,
554
+ const Bytef *source, uLong sourceLen,
555
+ int flush_mode) { // Z_SYNC_FLUSH or Z_FINISH
556
+ const int ret =
557
+ UncompressAtMostOrAll(dest, destLen, source, &sourceLen, flush_mode);
558
+ if (ret == Z_BUF_ERROR)
559
+ UncompressErrorInit();
560
+ return ret;
561
+ }
562
+
563
+ int ZLib::UncompressAtMost(Bytef *dest, uLongf *destLen,
564
+ const Bytef *source, uLong *sourceLen) {
565
+ return UncompressAtMostOrAll(dest, destLen, source, sourceLen, Z_SYNC_FLUSH);
566
+ }
567
+
568
+ // We make sure we've uncompressed everything, that is, the current
569
+ // uncompress stream is at a compressed-buffer-EOF boundary. In gzip
570
+ // mode, we also check the gzip footer to make sure we pass the gzip
571
+ // consistency checks. We RETURN true iff both types of checks pass.
572
+ bool ZLib::UncompressChunkDone() {
573
+ assert(!first_chunk_ && uncomp_init_);
574
+ // Make sure we're at the end-of-compressed-data point. This means
575
+ // if we call inflate with Z_FINISH we won't consume any input or
576
+ // write any output
577
+ Bytef dummyin, dummyout;
578
+ uLongf dummylen = 0;
579
+ if ( UncompressChunkOrAll(&dummyout, &dummylen, &dummyin, 0, Z_FINISH)
580
+ != Z_OK ) {
581
+ return false;
582
+ }
583
+
584
+ // Make sure that when we exit, we can start a new round of chunks later
585
+ Reset();
586
+
587
+ return true;
588
+ }
589
+
590
+ // Uncompresses the source buffer into the destination buffer.
591
+ // The destination buffer must be long enough to hold the entire
592
+ // decompressed contents.
593
+ //
594
+ // We only initialize the uncomp_stream once. Thereafter, we use
595
+ // inflateReset, which should be faster.
596
+ //
597
+ // Returns Z_OK on success, otherwise, it returns a zlib error code.
598
+ int ZLib::Uncompress(Bytef *dest, uLongf *destLen,
599
+ const Bytef *source, uLong sourceLen) {
600
+ int err;
601
+ if ( (err=UncompressChunkOrAll(dest, destLen, source, sourceLen,
602
+ Z_FINISH)) != Z_OK ) {
603
+ Reset(); // let us try to compress again
604
+ return err;
605
+ }
606
+ if ( !UncompressChunkDone() ) // calls Reset()
607
+ return Z_DATA_ERROR;
608
+ return Z_OK; // stream_end is ok
609
+ }
610
+
611
+ #endif // HAVE_LIBZ
612
+
613
+ } // namespace snappy