couchbase 3.4.0 → 3.4.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +2 -2
- data/ext/couchbase/CMakeLists.txt +10 -3
- data/ext/couchbase/cmake/CompilerWarnings.cmake +12 -4
- data/ext/couchbase/cmake/Documentation.cmake +4 -3
- data/ext/couchbase/cmake/OpenSSL.cmake +52 -7
- data/ext/couchbase/cmake/ThirdPartyDependencies.cmake +4 -0
- data/ext/couchbase/cmake/VersionInfo.cmake +39 -3
- data/ext/couchbase/cmake/test_openssl.cxx +7 -0
- data/ext/couchbase/core/cluster_options.hxx +0 -1
- data/ext/couchbase/core/config_profile.cxx +23 -1
- data/ext/couchbase/core/config_profile.hxx +2 -12
- data/ext/couchbase/core/crypto/CMakeLists.txt +5 -1
- data/ext/couchbase/core/impl/analytics.cxx +236 -0
- data/ext/couchbase/core/impl/cluster.cxx +0 -1
- data/ext/couchbase/core/impl/collection_query_index_manager.cxx +3 -3
- data/ext/couchbase/core/impl/dns_srv_tracker.cxx +5 -3
- data/ext/couchbase/core/impl/get_all_query_indexes.cxx +3 -3
- data/ext/couchbase/core/impl/query.cxx +5 -5
- data/ext/couchbase/core/impl/transaction_get_result.cxx +54 -0
- data/ext/couchbase/core/io/dns_client.cxx +225 -0
- data/ext/couchbase/core/io/dns_client.hxx +19 -188
- data/ext/couchbase/core/meta/CMakeLists.txt +7 -5
- data/ext/couchbase/core/meta/version.cxx +19 -0
- data/ext/couchbase/core/operations/document_search.cxx +5 -2
- data/ext/couchbase/core/operations/document_search.hxx +0 -1
- data/ext/couchbase/core/transactions/active_transaction_record.hxx +2 -2
- data/ext/couchbase/core/transactions/atr_cleanup_entry.cxx +1 -0
- data/ext/couchbase/core/transactions/attempt_context_impl.cxx +65 -31
- data/ext/couchbase/core/transactions/attempt_context_impl.hxx +44 -23
- data/ext/couchbase/core/transactions/forward_compat.hxx +2 -2
- data/ext/couchbase/core/transactions/internal/transaction_context.hxx +13 -13
- data/ext/couchbase/core/transactions/internal/transaction_fields.hxx +1 -0
- data/ext/couchbase/core/transactions/internal/transactions_cleanup.hxx +7 -1
- data/ext/couchbase/core/transactions/staged_mutation.cxx +1 -1
- data/ext/couchbase/core/transactions/staged_mutation.hxx +12 -2
- data/ext/couchbase/core/transactions/transaction_context.cxx +9 -11
- data/ext/couchbase/core/transactions/transaction_get_result.cxx +41 -31
- data/ext/couchbase/core/transactions/transaction_get_result.hxx +7 -3
- data/ext/couchbase/core/transactions/transaction_links.hxx +13 -1
- data/ext/couchbase/core/transactions/transactions_cleanup.cxx +144 -155
- data/ext/couchbase/core/transactions/waitable_op_list.hxx +1 -0
- data/ext/couchbase/core/utils/connection_string.cxx +10 -3
- data/ext/couchbase/core/utils/connection_string.hxx +3 -3
- data/ext/couchbase/couchbase/analytics_error_context.hxx +143 -0
- data/ext/couchbase/couchbase/analytics_meta_data.hxx +155 -0
- data/ext/couchbase/couchbase/analytics_metrics.hxx +163 -0
- data/ext/couchbase/couchbase/analytics_options.hxx +359 -0
- data/ext/couchbase/couchbase/analytics_result.hxx +102 -0
- data/ext/couchbase/couchbase/analytics_scan_consistency.hxx +46 -0
- data/ext/couchbase/couchbase/analytics_status.hxx +41 -0
- data/ext/couchbase/couchbase/analytics_warning.hxx +85 -0
- data/ext/couchbase/couchbase/cluster.hxx +35 -2
- data/ext/couchbase/couchbase/cluster_options.hxx +10 -10
- data/ext/couchbase/couchbase/collection.hxx +22 -17
- data/ext/couchbase/couchbase/collection_query_index_manager.hxx +1 -1
- data/ext/couchbase/couchbase/common_options.hxx +1 -1
- data/ext/couchbase/couchbase/configuration_profile.hxx +1 -1
- data/ext/couchbase/couchbase/configuration_profiles_registry.hxx +0 -1
- data/ext/couchbase/couchbase/create_primary_query_index_options.hxx +1 -1
- data/ext/couchbase/couchbase/drop_primary_query_index_options.hxx +1 -1
- data/ext/couchbase/couchbase/drop_query_index_options.hxx +1 -1
- data/ext/couchbase/couchbase/fmt/analytics_status.hxx +76 -0
- data/ext/couchbase/couchbase/fmt/cas.hxx +12 -0
- data/ext/couchbase/couchbase/fmt/durability_level.hxx +6 -0
- data/ext/couchbase/couchbase/fmt/key_value_extended_error_info.hxx +6 -0
- data/ext/couchbase/couchbase/fmt/key_value_status_code.hxx +6 -0
- data/ext/couchbase/couchbase/fmt/mutation_token.hxx +6 -0
- data/ext/couchbase/couchbase/fmt/query_scan_consistency.hxx +6 -0
- data/ext/couchbase/couchbase/fmt/query_status.hxx +6 -0
- data/ext/couchbase/couchbase/fmt/retry_reason.hxx +6 -0
- data/ext/couchbase/couchbase/fmt/tls_verify_mode.hxx +6 -0
- data/ext/couchbase/couchbase/get_all_query_indexes_options.hxx +5 -4
- data/ext/couchbase/couchbase/query_index_manager.hxx +4 -2
- data/ext/couchbase/couchbase/query_options.hxx +0 -1
- data/ext/couchbase/couchbase/scope.hxx +34 -1
- data/ext/couchbase/couchbase/subdoc/array_add_unique.hxx +2 -0
- data/ext/couchbase/couchbase/subdoc/array_append.hxx +2 -0
- data/ext/couchbase/couchbase/subdoc/array_insert.hxx +2 -0
- data/ext/couchbase/couchbase/subdoc/array_prepend.hxx +2 -0
- data/ext/couchbase/couchbase/subdoc/count.hxx +2 -0
- data/ext/couchbase/couchbase/subdoc/counter.hxx +2 -0
- data/ext/couchbase/couchbase/subdoc/exists.hxx +2 -0
- data/ext/couchbase/couchbase/subdoc/get.hxx +2 -0
- data/ext/couchbase/couchbase/subdoc/insert.hxx +2 -0
- data/ext/couchbase/couchbase/subdoc/remove.hxx +2 -0
- data/ext/couchbase/couchbase/subdoc/replace.hxx +3 -1
- data/ext/couchbase/couchbase/subdoc/upsert.hxx +2 -0
- data/ext/couchbase/couchbase/transaction_op_error_context.hxx +4 -4
- data/ext/couchbase/couchbase/transactions/attempt_context.hxx +1 -1
- data/ext/couchbase/couchbase/transactions/transaction_get_result.hxx +36 -51
- data/ext/couchbase/couchbase/transactions/transactions_config.hxx +1 -1
- data/ext/couchbase/test/CMakeLists.txt +3 -2
- data/ext/couchbase/test/test_helper.hxx +1 -1
- data/ext/couchbase/test/test_integration_analytics.cxx +289 -13
- data/ext/couchbase/test/test_integration_crud.cxx +8 -1
- data/ext/couchbase/test/test_integration_examples.cxx +182 -0
- data/ext/couchbase/test/test_integration_management.cxx +15 -3
- data/ext/couchbase/test/test_integration_search.cxx +601 -0
- data/ext/couchbase/test/test_transaction_transaction_simple.cxx +73 -0
- data/ext/couchbase/test/test_unit_config_profiles.cxx +12 -12
- data/ext/couchbase/test/test_unit_connection_string.cxx +35 -0
- data/ext/couchbase/test/test_unit_transaction_utils.cxx +76 -19
- data/ext/couchbase/third_party/snappy/CMakeLists.txt +150 -27
- data/ext/couchbase/third_party/snappy/cmake/config.h.in +28 -24
- data/ext/couchbase/third_party/snappy/snappy-internal.h +189 -25
- data/ext/couchbase/third_party/snappy/snappy-sinksource.cc +26 -9
- data/ext/couchbase/third_party/snappy/snappy-sinksource.h +11 -11
- data/ext/couchbase/third_party/snappy/snappy-stubs-internal.cc +1 -1
- data/ext/couchbase/third_party/snappy/snappy-stubs-internal.h +227 -308
- data/ext/couchbase/third_party/snappy/snappy-stubs-public.h.in +0 -11
- data/ext/couchbase/third_party/snappy/snappy.cc +1176 -410
- data/ext/couchbase/third_party/snappy/snappy.h +19 -4
- data/ext/couchbase.cxx +506 -26
- data/ext/extconf.rb +2 -1
- data/ext/revisions.rb +3 -2
- data/lib/couchbase/binary_collection.rb +4 -4
- data/lib/couchbase/cluster.rb +13 -9
- data/lib/couchbase/cluster_registry.rb +7 -2
- data/lib/couchbase/collection.rb +5 -0
- data/lib/couchbase/configuration.rb +3 -4
- data/lib/couchbase/errors.rb +10 -0
- data/lib/couchbase/management/collection_query_index_manager.rb +183 -0
- data/lib/couchbase/management/query_index_manager.rb +35 -3
- data/lib/couchbase/management.rb +1 -0
- data/lib/couchbase/options.rb +87 -5
- data/lib/couchbase/search_options.rb +158 -240
- data/lib/couchbase/version.rb +1 -1
- metadata +21 -6
- data/ext/couchbase/core/CMakeLists.txt +0 -0
@@ -31,21 +31,23 @@
|
|
31
31
|
#ifndef THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
|
32
32
|
#define THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_INTERNAL_H_
|
33
33
|
|
34
|
-
#
|
34
|
+
#if HAVE_CONFIG_H
|
35
35
|
#include "config.h"
|
36
36
|
#endif
|
37
37
|
|
38
|
-
#include <
|
38
|
+
#include <stdint.h>
|
39
39
|
|
40
|
-
#include <
|
41
|
-
#include <
|
42
|
-
#include <
|
40
|
+
#include <cassert>
|
41
|
+
#include <cstdlib>
|
42
|
+
#include <cstring>
|
43
|
+
#include <limits>
|
44
|
+
#include <string>
|
43
45
|
|
44
|
-
#
|
46
|
+
#if HAVE_SYS_MMAN_H
|
45
47
|
#include <sys/mman.h>
|
46
48
|
#endif
|
47
49
|
|
48
|
-
#
|
50
|
+
#if HAVE_UNISTD_H
|
49
51
|
#include <unistd.h>
|
50
52
|
#endif
|
51
53
|
|
@@ -67,19 +69,11 @@
|
|
67
69
|
|
68
70
|
#include "snappy-stubs-public.h"
|
69
71
|
|
70
|
-
|
71
|
-
|
72
|
-
// Enable 64-bit optimized versions of some routines.
|
73
|
-
#define ARCH_K8 1
|
74
|
-
|
75
|
-
#elif defined(__ppc64__)
|
76
|
-
|
72
|
+
// Used to enable 64-bit optimized versions of some routines.
|
73
|
+
#if defined(__PPC64__) || defined(__powerpc64__)
|
77
74
|
#define ARCH_PPC 1
|
78
|
-
|
79
|
-
#elif defined(__aarch64__)
|
80
|
-
|
75
|
+
#elif defined(__aarch64__) || defined(_M_ARM64)
|
81
76
|
#define ARCH_ARM 1
|
82
|
-
|
83
77
|
#endif
|
84
78
|
|
85
79
|
// Needed by OS X, among others.
|
@@ -93,223 +87,77 @@
|
|
93
87
|
#ifdef ARRAYSIZE
|
94
88
|
#undef ARRAYSIZE
|
95
89
|
#endif
|
96
|
-
#define ARRAYSIZE(a)
|
90
|
+
#define ARRAYSIZE(a) int{sizeof(a) / sizeof(*(a))}
|
97
91
|
|
98
92
|
// Static prediction hints.
|
99
|
-
#
|
93
|
+
#if HAVE_BUILTIN_EXPECT
|
100
94
|
#define SNAPPY_PREDICT_FALSE(x) (__builtin_expect(x, 0))
|
101
95
|
#define SNAPPY_PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
|
102
96
|
#else
|
103
97
|
#define SNAPPY_PREDICT_FALSE(x) x
|
104
98
|
#define SNAPPY_PREDICT_TRUE(x) x
|
105
|
-
#endif
|
106
|
-
|
107
|
-
// This is only used for recomputing the tag byte table used during
|
108
|
-
// decompression; for simplicity we just remove it from the open-source
|
109
|
-
// version (anyone who wants to regenerate it can just do the call
|
110
|
-
// themselves within main()).
|
111
|
-
#define DEFINE_bool(flag_name, default_value, description) \
|
112
|
-
bool FLAGS_ ## flag_name = default_value
|
113
|
-
#define DECLARE_bool(flag_name) \
|
114
|
-
extern bool FLAGS_ ## flag_name
|
115
|
-
|
116
|
-
namespace snappy {
|
117
|
-
|
118
|
-
static const uint32 kuint32max = static_cast<uint32>(0xFFFFFFFF);
|
119
|
-
static const int64 kint64max = static_cast<int64>(0x7FFFFFFFFFFFFFFFLL);
|
99
|
+
#endif // HAVE_BUILTIN_EXPECT
|
120
100
|
|
121
|
-
//
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
#if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \
|
126
|
-
defined(__aarch64__)
|
127
|
-
|
128
|
-
#define UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
|
129
|
-
#define UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
|
130
|
-
#define UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
|
131
|
-
|
132
|
-
#define UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
|
133
|
-
#define UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
|
134
|
-
#define UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
|
135
|
-
|
136
|
-
// ARMv7 and newer support native unaligned accesses, but only of 16-bit
|
137
|
-
// and 32-bit values (not 64-bit); older versions either raise a fatal signal,
|
138
|
-
// do an unaligned read and rotate the words around a bit, or do the reads very
|
139
|
-
// slowly (trip through kernel mode). There's no simple #define that says just
|
140
|
-
// “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6
|
141
|
-
// sub-architectures.
|
142
|
-
//
|
143
|
-
// This is a mess, but there's not much we can do about it.
|
144
|
-
//
|
145
|
-
// To further complicate matters, only LDR instructions (single reads) are
|
146
|
-
// allowed to be unaligned, not LDRD (two reads) or LDM (many reads). Unless we
|
147
|
-
// explicitly tell the compiler that these accesses can be unaligned, it can and
|
148
|
-
// will combine accesses. On armcc, the way to signal this is done by accessing
|
149
|
-
// through the type (uint32 __packed *), but GCC has no such attribute
|
150
|
-
// (it ignores __attribute__((packed)) on individual variables). However,
|
151
|
-
// we can tell it that a _struct_ is unaligned, which has the same effect,
|
152
|
-
// so we do that.
|
153
|
-
|
154
|
-
#elif defined(__arm__) && \
|
155
|
-
!defined(__ARM_ARCH_4__) && \
|
156
|
-
!defined(__ARM_ARCH_4T__) && \
|
157
|
-
!defined(__ARM_ARCH_5__) && \
|
158
|
-
!defined(__ARM_ARCH_5T__) && \
|
159
|
-
!defined(__ARM_ARCH_5TE__) && \
|
160
|
-
!defined(__ARM_ARCH_5TEJ__) && \
|
161
|
-
!defined(__ARM_ARCH_6__) && \
|
162
|
-
!defined(__ARM_ARCH_6J__) && \
|
163
|
-
!defined(__ARM_ARCH_6K__) && \
|
164
|
-
!defined(__ARM_ARCH_6Z__) && \
|
165
|
-
!defined(__ARM_ARCH_6ZK__) && \
|
166
|
-
!defined(__ARM_ARCH_6T2__)
|
167
|
-
|
168
|
-
#if __GNUC__
|
169
|
-
#define ATTRIBUTE_PACKED __attribute__((__packed__))
|
101
|
+
// Inlining hints.
|
102
|
+
#if HAVE_ATTRIBUTE_ALWAYS_INLINE
|
103
|
+
#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE __attribute__((always_inline))
|
170
104
|
#else
|
171
|
-
#define
|
172
|
-
#endif
|
105
|
+
#define SNAPPY_ATTRIBUTE_ALWAYS_INLINE
|
106
|
+
#endif // HAVE_ATTRIBUTE_ALWAYS_INLINE
|
173
107
|
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
uint8 dummy; // To make the size non-power-of-two.
|
180
|
-
} ATTRIBUTE_PACKED;
|
181
|
-
|
182
|
-
struct Unaligned32Struct {
|
183
|
-
uint32 value;
|
184
|
-
uint8 dummy; // To make the size non-power-of-two.
|
185
|
-
} ATTRIBUTE_PACKED;
|
186
|
-
|
187
|
-
} // namespace internal
|
188
|
-
} // namespace base
|
189
|
-
|
190
|
-
#define UNALIGNED_LOAD16(_p) \
|
191
|
-
((reinterpret_cast<const ::snappy::base::internal::Unaligned16Struct *>(_p))->value)
|
192
|
-
#define UNALIGNED_LOAD32(_p) \
|
193
|
-
((reinterpret_cast<const ::snappy::base::internal::Unaligned32Struct *>(_p))->value)
|
194
|
-
|
195
|
-
#define UNALIGNED_STORE16(_p, _val) \
|
196
|
-
((reinterpret_cast< ::snappy::base::internal::Unaligned16Struct *>(_p))->value = \
|
197
|
-
(_val))
|
198
|
-
#define UNALIGNED_STORE32(_p, _val) \
|
199
|
-
((reinterpret_cast< ::snappy::base::internal::Unaligned32Struct *>(_p))->value = \
|
200
|
-
(_val))
|
201
|
-
|
202
|
-
// TODO: NEON supports unaligned 64-bit loads and stores.
|
203
|
-
// See if that would be more efficient on platforms supporting it,
|
204
|
-
// at least for copies.
|
205
|
-
|
206
|
-
inline uint64 UNALIGNED_LOAD64(const void *p) {
|
207
|
-
uint64 t;
|
208
|
-
memcpy(&t, p, sizeof t);
|
209
|
-
return t;
|
210
|
-
}
|
211
|
-
|
212
|
-
inline void UNALIGNED_STORE64(void *p, uint64 v) {
|
213
|
-
memcpy(p, &v, sizeof v);
|
214
|
-
}
|
215
|
-
|
216
|
-
#else
|
108
|
+
// Stubbed version of ABSL_FLAG.
|
109
|
+
//
|
110
|
+
// In the open source version, flags can only be changed at compile time.
|
111
|
+
#define SNAPPY_FLAG(flag_type, flag_name, default_value, help) \
|
112
|
+
flag_type FLAGS_ ## flag_name = default_value
|
217
113
|
|
218
|
-
|
219
|
-
// unaligned loads and stores.
|
114
|
+
namespace snappy {
|
220
115
|
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
return t;
|
225
|
-
}
|
116
|
+
// Stubbed version of absl::GetFlag().
|
117
|
+
template <typename T>
|
118
|
+
inline T GetFlag(T flag) { return flag; }
|
226
119
|
|
227
|
-
|
228
|
-
|
229
|
-
memcpy(&t, p, sizeof t);
|
230
|
-
return t;
|
231
|
-
}
|
120
|
+
static const uint32_t kuint32max = std::numeric_limits<uint32_t>::max();
|
121
|
+
static const int64_t kint64max = std::numeric_limits<int64_t>::max();
|
232
122
|
|
233
|
-
|
234
|
-
uint64 t;
|
235
|
-
memcpy(&t, p, sizeof t);
|
236
|
-
return t;
|
237
|
-
}
|
123
|
+
// Potentially unaligned loads and stores.
|
238
124
|
|
239
|
-
inline
|
240
|
-
|
125
|
+
inline uint16_t UNALIGNED_LOAD16(const void *p) {
|
126
|
+
// Compiles to a single movzx/ldrh on clang/gcc/msvc.
|
127
|
+
uint16_t v;
|
128
|
+
std::memcpy(&v, p, sizeof(v));
|
129
|
+
return v;
|
241
130
|
}
|
242
131
|
|
243
|
-
inline
|
244
|
-
|
132
|
+
inline uint32_t UNALIGNED_LOAD32(const void *p) {
|
133
|
+
// Compiles to a single mov/ldr on clang/gcc/msvc.
|
134
|
+
uint32_t v;
|
135
|
+
std::memcpy(&v, p, sizeof(v));
|
136
|
+
return v;
|
245
137
|
}
|
246
138
|
|
247
|
-
inline
|
248
|
-
|
139
|
+
inline uint64_t UNALIGNED_LOAD64(const void *p) {
|
140
|
+
// Compiles to a single mov/ldr on clang/gcc/msvc.
|
141
|
+
uint64_t v;
|
142
|
+
std::memcpy(&v, p, sizeof(v));
|
143
|
+
return v;
|
249
144
|
}
|
250
145
|
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
#if defined(SNAPPY_IS_BIG_ENDIAN)
|
255
|
-
|
256
|
-
#ifdef HAVE_SYS_BYTEORDER_H
|
257
|
-
#include <sys/byteorder.h>
|
258
|
-
#endif
|
259
|
-
|
260
|
-
#ifdef HAVE_SYS_ENDIAN_H
|
261
|
-
#include <sys/endian.h>
|
262
|
-
#endif
|
263
|
-
|
264
|
-
#ifdef _MSC_VER
|
265
|
-
#include <stdlib.h>
|
266
|
-
#define bswap_16(x) _byteswap_ushort(x)
|
267
|
-
#define bswap_32(x) _byteswap_ulong(x)
|
268
|
-
#define bswap_64(x) _byteswap_uint64(x)
|
269
|
-
|
270
|
-
#elif defined(__APPLE__)
|
271
|
-
// Mac OS X / Darwin features
|
272
|
-
#include <libkern/OSByteOrder.h>
|
273
|
-
#define bswap_16(x) OSSwapInt16(x)
|
274
|
-
#define bswap_32(x) OSSwapInt32(x)
|
275
|
-
#define bswap_64(x) OSSwapInt64(x)
|
276
|
-
|
277
|
-
#elif defined(HAVE_BYTESWAP_H)
|
278
|
-
#include <byteswap.h>
|
279
|
-
|
280
|
-
#elif defined(bswap32)
|
281
|
-
// FreeBSD defines bswap{16,32,64} in <sys/endian.h> (already #included).
|
282
|
-
#define bswap_16(x) bswap16(x)
|
283
|
-
#define bswap_32(x) bswap32(x)
|
284
|
-
#define bswap_64(x) bswap64(x)
|
285
|
-
|
286
|
-
#elif defined(BSWAP_64)
|
287
|
-
// Solaris 10 defines BSWAP_{16,32,64} in <sys/byteorder.h> (already #included).
|
288
|
-
#define bswap_16(x) BSWAP_16(x)
|
289
|
-
#define bswap_32(x) BSWAP_32(x)
|
290
|
-
#define bswap_64(x) BSWAP_64(x)
|
291
|
-
|
292
|
-
#else
|
293
|
-
|
294
|
-
inline uint16 bswap_16(uint16 x) {
|
295
|
-
return (x << 8) | (x >> 8);
|
146
|
+
inline void UNALIGNED_STORE16(void *p, uint16_t v) {
|
147
|
+
// Compiles to a single mov/strh on clang/gcc/msvc.
|
148
|
+
std::memcpy(p, &v, sizeof(v));
|
296
149
|
}
|
297
150
|
|
298
|
-
inline
|
299
|
-
|
300
|
-
|
151
|
+
inline void UNALIGNED_STORE32(void *p, uint32_t v) {
|
152
|
+
// Compiles to a single mov/str on clang/gcc/msvc.
|
153
|
+
std::memcpy(p, &v, sizeof(v));
|
301
154
|
}
|
302
155
|
|
303
|
-
inline
|
304
|
-
|
305
|
-
|
306
|
-
return (x >> 32) | (x << 32);
|
156
|
+
inline void UNALIGNED_STORE64(void *p, uint64_t v) {
|
157
|
+
// Compiles to a single mov/str on clang/gcc/msvc.
|
158
|
+
std::memcpy(p, &v, sizeof(v));
|
307
159
|
}
|
308
160
|
|
309
|
-
#endif
|
310
|
-
|
311
|
-
#endif // defined(SNAPPY_IS_BIG_ENDIAN)
|
312
|
-
|
313
161
|
// Convert to little-endian storage, opposite of network format.
|
314
162
|
// Convert x from host to little endian: x = LittleEndian.FromHost(x);
|
315
163
|
// convert x from little endian to host: x = LittleEndian.ToHost(x);
|
@@ -321,44 +169,110 @@ inline uint64 bswap_64(uint64 x) {
|
|
321
169
|
// x = LittleEndian.Load16(p);
|
322
170
|
class LittleEndian {
|
323
171
|
public:
|
324
|
-
//
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
static uint32 ToHost32(uint32 x) { return x; }
|
172
|
+
// Functions to do unaligned loads and stores in little-endian order.
|
173
|
+
static inline uint16_t Load16(const void *ptr) {
|
174
|
+
// Compiles to a single mov/str on recent clang and gcc.
|
175
|
+
#if SNAPPY_IS_BIG_ENDIAN
|
176
|
+
const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
|
177
|
+
return (static_cast<uint16_t>(buffer[0])) |
|
178
|
+
(static_cast<uint16_t>(buffer[1]) << 8);
|
179
|
+
#else
|
180
|
+
// memcpy() turns into a single instruction early in the optimization
|
181
|
+
// pipeline (relatively to a series of byte accesses). So, using memcpy
|
182
|
+
// instead of byte accesses may lead to better decisions in more stages of
|
183
|
+
// the optimization pipeline.
|
184
|
+
uint16_t value;
|
185
|
+
std::memcpy(&value, ptr, 2);
|
186
|
+
return value;
|
187
|
+
#endif
|
188
|
+
}
|
342
189
|
|
343
|
-
static
|
190
|
+
static inline uint32_t Load32(const void *ptr) {
|
191
|
+
// Compiles to a single mov/str on recent clang and gcc.
|
192
|
+
#if SNAPPY_IS_BIG_ENDIAN
|
193
|
+
const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
|
194
|
+
return (static_cast<uint32_t>(buffer[0])) |
|
195
|
+
(static_cast<uint32_t>(buffer[1]) << 8) |
|
196
|
+
(static_cast<uint32_t>(buffer[2]) << 16) |
|
197
|
+
(static_cast<uint32_t>(buffer[3]) << 24);
|
198
|
+
#else
|
199
|
+
// See Load16() for the rationale of using memcpy().
|
200
|
+
uint32_t value;
|
201
|
+
std::memcpy(&value, ptr, 4);
|
202
|
+
return value;
|
203
|
+
#endif
|
204
|
+
}
|
344
205
|
|
345
|
-
|
206
|
+
static inline uint64_t Load64(const void *ptr) {
|
207
|
+
// Compiles to a single mov/str on recent clang and gcc.
|
208
|
+
#if SNAPPY_IS_BIG_ENDIAN
|
209
|
+
const uint8_t* const buffer = reinterpret_cast<const uint8_t*>(ptr);
|
210
|
+
return (static_cast<uint64_t>(buffer[0])) |
|
211
|
+
(static_cast<uint64_t>(buffer[1]) << 8) |
|
212
|
+
(static_cast<uint64_t>(buffer[2]) << 16) |
|
213
|
+
(static_cast<uint64_t>(buffer[3]) << 24) |
|
214
|
+
(static_cast<uint64_t>(buffer[4]) << 32) |
|
215
|
+
(static_cast<uint64_t>(buffer[5]) << 40) |
|
216
|
+
(static_cast<uint64_t>(buffer[6]) << 48) |
|
217
|
+
(static_cast<uint64_t>(buffer[7]) << 56);
|
218
|
+
#else
|
219
|
+
// See Load16() for the rationale of using memcpy().
|
220
|
+
uint64_t value;
|
221
|
+
std::memcpy(&value, ptr, 8);
|
222
|
+
return value;
|
223
|
+
#endif
|
224
|
+
}
|
346
225
|
|
347
|
-
|
348
|
-
|
349
|
-
|
226
|
+
static inline void Store16(void *dst, uint16_t value) {
|
227
|
+
// Compiles to a single mov/str on recent clang and gcc.
|
228
|
+
#if SNAPPY_IS_BIG_ENDIAN
|
229
|
+
uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
|
230
|
+
buffer[0] = static_cast<uint8_t>(value);
|
231
|
+
buffer[1] = static_cast<uint8_t>(value >> 8);
|
232
|
+
#else
|
233
|
+
// See Load16() for the rationale of using memcpy().
|
234
|
+
std::memcpy(dst, &value, 2);
|
235
|
+
#endif
|
350
236
|
}
|
351
237
|
|
352
|
-
static void
|
353
|
-
|
238
|
+
static void Store32(void *dst, uint32_t value) {
|
239
|
+
// Compiles to a single mov/str on recent clang and gcc.
|
240
|
+
#if SNAPPY_IS_BIG_ENDIAN
|
241
|
+
uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
|
242
|
+
buffer[0] = static_cast<uint8_t>(value);
|
243
|
+
buffer[1] = static_cast<uint8_t>(value >> 8);
|
244
|
+
buffer[2] = static_cast<uint8_t>(value >> 16);
|
245
|
+
buffer[3] = static_cast<uint8_t>(value >> 24);
|
246
|
+
#else
|
247
|
+
// See Load16() for the rationale of using memcpy().
|
248
|
+
std::memcpy(dst, &value, 4);
|
249
|
+
#endif
|
354
250
|
}
|
355
251
|
|
356
|
-
static
|
357
|
-
|
252
|
+
static void Store64(void* dst, uint64_t value) {
|
253
|
+
// Compiles to a single mov/str on recent clang and gcc.
|
254
|
+
#if SNAPPY_IS_BIG_ENDIAN
|
255
|
+
uint8_t* const buffer = reinterpret_cast<uint8_t*>(dst);
|
256
|
+
buffer[0] = static_cast<uint8_t>(value);
|
257
|
+
buffer[1] = static_cast<uint8_t>(value >> 8);
|
258
|
+
buffer[2] = static_cast<uint8_t>(value >> 16);
|
259
|
+
buffer[3] = static_cast<uint8_t>(value >> 24);
|
260
|
+
buffer[4] = static_cast<uint8_t>(value >> 32);
|
261
|
+
buffer[5] = static_cast<uint8_t>(value >> 40);
|
262
|
+
buffer[6] = static_cast<uint8_t>(value >> 48);
|
263
|
+
buffer[7] = static_cast<uint8_t>(value >> 56);
|
264
|
+
#else
|
265
|
+
// See Load16() for the rationale of using memcpy().
|
266
|
+
std::memcpy(dst, &value, 8);
|
267
|
+
#endif
|
358
268
|
}
|
359
269
|
|
360
|
-
static
|
361
|
-
|
270
|
+
static inline constexpr bool IsLittleEndian() {
|
271
|
+
#if SNAPPY_IS_BIG_ENDIAN
|
272
|
+
return false;
|
273
|
+
#else
|
274
|
+
return true;
|
275
|
+
#endif // SNAPPY_IS_BIG_ENDIAN
|
362
276
|
}
|
363
277
|
};
|
364
278
|
|
@@ -366,19 +280,17 @@ class LittleEndian {
|
|
366
280
|
class Bits {
|
367
281
|
public:
|
368
282
|
// Return floor(log2(n)) for positive integer n.
|
369
|
-
static int Log2FloorNonZero(
|
283
|
+
static int Log2FloorNonZero(uint32_t n);
|
370
284
|
|
371
285
|
// Return floor(log2(n)) for positive integer n. Returns -1 iff n == 0.
|
372
|
-
static int Log2Floor(
|
286
|
+
static int Log2Floor(uint32_t n);
|
373
287
|
|
374
288
|
// Return the first set least / most significant bit, 0-indexed. Returns an
|
375
289
|
// undefined value if n == 0. FindLSBSetNonZero() is similar to ffs() except
|
376
290
|
// that it's 0-indexed.
|
377
|
-
static int FindLSBSetNonZero(
|
291
|
+
static int FindLSBSetNonZero(uint32_t n);
|
378
292
|
|
379
|
-
|
380
|
-
static int FindLSBSetNonZero64(uint64 n);
|
381
|
-
#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
|
293
|
+
static int FindLSBSetNonZero64(uint64_t n);
|
382
294
|
|
383
295
|
private:
|
384
296
|
// No copying
|
@@ -386,9 +298,9 @@ class Bits {
|
|
386
298
|
void operator=(const Bits&);
|
387
299
|
};
|
388
300
|
|
389
|
-
#
|
301
|
+
#if HAVE_BUILTIN_CTZ
|
390
302
|
|
391
|
-
inline int Bits::Log2FloorNonZero(
|
303
|
+
inline int Bits::Log2FloorNonZero(uint32_t n) {
|
392
304
|
assert(n != 0);
|
393
305
|
// (31 ^ x) is equivalent to (31 - x) for x in [0, 31]. An easy proof
|
394
306
|
// represents subtraction in base 2 and observes that there's no carry.
|
@@ -399,66 +311,52 @@ inline int Bits::Log2FloorNonZero(uint32 n) {
|
|
399
311
|
return 31 ^ __builtin_clz(n);
|
400
312
|
}
|
401
313
|
|
402
|
-
inline int Bits::Log2Floor(
|
314
|
+
inline int Bits::Log2Floor(uint32_t n) {
|
403
315
|
return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
|
404
316
|
}
|
405
317
|
|
406
|
-
inline int Bits::FindLSBSetNonZero(
|
318
|
+
inline int Bits::FindLSBSetNonZero(uint32_t n) {
|
407
319
|
assert(n != 0);
|
408
320
|
return __builtin_ctz(n);
|
409
321
|
}
|
410
322
|
|
411
|
-
#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
|
412
|
-
inline int Bits::FindLSBSetNonZero64(uint64 n) {
|
413
|
-
assert(n != 0);
|
414
|
-
return __builtin_ctzll(n);
|
415
|
-
}
|
416
|
-
#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
|
417
|
-
|
418
323
|
#elif defined(_MSC_VER)
|
419
324
|
|
420
|
-
inline int Bits::Log2FloorNonZero(
|
325
|
+
inline int Bits::Log2FloorNonZero(uint32_t n) {
|
421
326
|
assert(n != 0);
|
327
|
+
// NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
|
422
328
|
unsigned long where;
|
423
329
|
_BitScanReverse(&where, n);
|
424
330
|
return static_cast<int>(where);
|
425
331
|
}
|
426
332
|
|
427
|
-
inline int Bits::Log2Floor(
|
333
|
+
inline int Bits::Log2Floor(uint32_t n) {
|
334
|
+
// NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
|
428
335
|
unsigned long where;
|
429
336
|
if (_BitScanReverse(&where, n))
|
430
337
|
return static_cast<int>(where);
|
431
338
|
return -1;
|
432
339
|
}
|
433
340
|
|
434
|
-
inline int Bits::FindLSBSetNonZero(
|
341
|
+
inline int Bits::FindLSBSetNonZero(uint32_t n) {
|
435
342
|
assert(n != 0);
|
343
|
+
// NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
|
436
344
|
unsigned long where;
|
437
345
|
if (_BitScanForward(&where, n))
|
438
346
|
return static_cast<int>(where);
|
439
347
|
return 32;
|
440
348
|
}
|
441
349
|
|
442
|
-
#if defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
|
443
|
-
inline int Bits::FindLSBSetNonZero64(uint64 n) {
|
444
|
-
assert(n != 0);
|
445
|
-
unsigned long where;
|
446
|
-
if (_BitScanForward64(&where, n))
|
447
|
-
return static_cast<int>(where);
|
448
|
-
return 64;
|
449
|
-
}
|
450
|
-
#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
|
451
|
-
|
452
350
|
#else // Portable versions.
|
453
351
|
|
454
|
-
inline int Bits::Log2FloorNonZero(
|
352
|
+
inline int Bits::Log2FloorNonZero(uint32_t n) {
|
455
353
|
assert(n != 0);
|
456
354
|
|
457
355
|
int log = 0;
|
458
|
-
|
356
|
+
uint32_t value = n;
|
459
357
|
for (int i = 4; i >= 0; --i) {
|
460
358
|
int shift = (1 << i);
|
461
|
-
|
359
|
+
uint32_t x = value >> shift;
|
462
360
|
if (x != 0) {
|
463
361
|
value = x;
|
464
362
|
log += shift;
|
@@ -468,16 +366,16 @@ inline int Bits::Log2FloorNonZero(uint32 n) {
|
|
468
366
|
return log;
|
469
367
|
}
|
470
368
|
|
471
|
-
inline int Bits::Log2Floor(
|
369
|
+
inline int Bits::Log2Floor(uint32_t n) {
|
472
370
|
return (n == 0) ? -1 : Bits::Log2FloorNonZero(n);
|
473
371
|
}
|
474
372
|
|
475
|
-
inline int Bits::FindLSBSetNonZero(
|
373
|
+
inline int Bits::FindLSBSetNonZero(uint32_t n) {
|
476
374
|
assert(n != 0);
|
477
375
|
|
478
376
|
int rc = 31;
|
479
377
|
for (int i = 4, shift = 1 << 4; i >= 0; --i) {
|
480
|
-
const
|
378
|
+
const uint32_t x = n << shift;
|
481
379
|
if (x != 0) {
|
482
380
|
n = x;
|
483
381
|
rc -= shift;
|
@@ -487,27 +385,48 @@ inline int Bits::FindLSBSetNonZero(uint32 n) {
|
|
487
385
|
return rc;
|
488
386
|
}
|
489
387
|
|
490
|
-
#
|
388
|
+
#endif // End portable versions.
|
389
|
+
|
390
|
+
#if HAVE_BUILTIN_CTZ
|
391
|
+
|
392
|
+
inline int Bits::FindLSBSetNonZero64(uint64_t n) {
|
393
|
+
assert(n != 0);
|
394
|
+
return __builtin_ctzll(n);
|
395
|
+
}
|
396
|
+
|
397
|
+
#elif defined(_MSC_VER) && (defined(_M_X64) || defined(_M_ARM64))
|
398
|
+
// _BitScanForward64() is only available on x64 and ARM64.
|
399
|
+
|
400
|
+
inline int Bits::FindLSBSetNonZero64(uint64_t n) {
|
401
|
+
assert(n != 0);
|
402
|
+
// NOLINTNEXTLINE(runtime/int): The MSVC intrinsic demands unsigned long.
|
403
|
+
unsigned long where;
|
404
|
+
if (_BitScanForward64(&where, n))
|
405
|
+
return static_cast<int>(where);
|
406
|
+
return 64;
|
407
|
+
}
|
408
|
+
|
409
|
+
#else // Portable version.
|
410
|
+
|
491
411
|
// FindLSBSetNonZero64() is defined in terms of FindLSBSetNonZero().
|
492
|
-
inline int Bits::FindLSBSetNonZero64(
|
412
|
+
inline int Bits::FindLSBSetNonZero64(uint64_t n) {
|
493
413
|
assert(n != 0);
|
494
414
|
|
495
|
-
const
|
415
|
+
const uint32_t bottombits = static_cast<uint32_t>(n);
|
496
416
|
if (bottombits == 0) {
|
497
|
-
// Bottom bits are zero, so scan
|
498
|
-
return 32 + FindLSBSetNonZero(static_cast<
|
417
|
+
// Bottom bits are zero, so scan the top bits.
|
418
|
+
return 32 + FindLSBSetNonZero(static_cast<uint32_t>(n >> 32));
|
499
419
|
} else {
|
500
420
|
return FindLSBSetNonZero(bottombits);
|
501
421
|
}
|
502
422
|
}
|
503
|
-
#endif // defined(ARCH_K8) || defined(ARCH_PPC) || defined(ARCH_ARM)
|
504
423
|
|
505
|
-
#endif //
|
424
|
+
#endif // HAVE_BUILTIN_CTZ
|
506
425
|
|
507
426
|
// Variable-length integer encoding.
|
508
427
|
class Varint {
|
509
428
|
public:
|
510
|
-
// Maximum lengths of varint encoding of
|
429
|
+
// Maximum lengths of varint encoding of uint32_t.
|
511
430
|
static const int kMax32 = 5;
|
512
431
|
|
513
432
|
// Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1].
|
@@ -516,23 +435,23 @@ class Varint {
|
|
516
435
|
// past the last byte of the varint32. Else returns NULL. On success,
|
517
436
|
// "result <= limit".
|
518
437
|
static const char* Parse32WithLimit(const char* ptr, const char* limit,
|
519
|
-
|
438
|
+
uint32_t* OUTPUT);
|
520
439
|
|
521
440
|
// REQUIRES "ptr" points to a buffer of length sufficient to hold "v".
|
522
441
|
// EFFECTS Encodes "v" into "ptr" and returns a pointer to the
|
523
442
|
// byte just past the last encoded byte.
|
524
|
-
static char* Encode32(char* ptr,
|
443
|
+
static char* Encode32(char* ptr, uint32_t v);
|
525
444
|
|
526
445
|
// EFFECTS Appends the varint representation of "value" to "*s".
|
527
|
-
static void Append32(std::string* s,
|
446
|
+
static void Append32(std::string* s, uint32_t value);
|
528
447
|
};
|
529
448
|
|
530
449
|
inline const char* Varint::Parse32WithLimit(const char* p,
|
531
450
|
const char* l,
|
532
|
-
|
451
|
+
uint32_t* OUTPUT) {
|
533
452
|
const unsigned char* ptr = reinterpret_cast<const unsigned char*>(p);
|
534
453
|
const unsigned char* limit = reinterpret_cast<const unsigned char*>(l);
|
535
|
-
|
454
|
+
uint32_t b, result;
|
536
455
|
if (ptr >= limit) return NULL;
|
537
456
|
b = *(ptr++); result = b & 127; if (b < 128) goto done;
|
538
457
|
if (ptr >= limit) return NULL;
|
@@ -549,30 +468,30 @@ inline const char* Varint::Parse32WithLimit(const char* p,
|
|
549
468
|
return reinterpret_cast<const char*>(ptr);
|
550
469
|
}
|
551
470
|
|
552
|
-
inline char* Varint::Encode32(char* sptr,
|
471
|
+
inline char* Varint::Encode32(char* sptr, uint32_t v) {
|
553
472
|
// Operate on characters as unsigneds
|
554
|
-
|
555
|
-
static const
|
556
|
-
if (v < (1<<7)) {
|
557
|
-
*(ptr++) = v;
|
558
|
-
} else if (v < (1<<14)) {
|
559
|
-
*(ptr++) = v | B;
|
560
|
-
*(ptr++) = v>>7;
|
561
|
-
} else if (v < (1<<21)) {
|
562
|
-
*(ptr++) = v | B;
|
563
|
-
*(ptr++) = (v>>7) | B;
|
564
|
-
*(ptr++) = v>>14;
|
565
|
-
} else if (v < (1<<28)) {
|
566
|
-
*(ptr++) = v | B;
|
567
|
-
*(ptr++) = (v>>7) | B;
|
568
|
-
*(ptr++) = (v>>14) | B;
|
569
|
-
*(ptr++) = v>>21;
|
473
|
+
uint8_t* ptr = reinterpret_cast<uint8_t*>(sptr);
|
474
|
+
static const uint8_t B = 128;
|
475
|
+
if (v < (1 << 7)) {
|
476
|
+
*(ptr++) = static_cast<uint8_t>(v);
|
477
|
+
} else if (v < (1 << 14)) {
|
478
|
+
*(ptr++) = static_cast<uint8_t>(v | B);
|
479
|
+
*(ptr++) = static_cast<uint8_t>(v >> 7);
|
480
|
+
} else if (v < (1 << 21)) {
|
481
|
+
*(ptr++) = static_cast<uint8_t>(v | B);
|
482
|
+
*(ptr++) = static_cast<uint8_t>((v >> 7) | B);
|
483
|
+
*(ptr++) = static_cast<uint8_t>(v >> 14);
|
484
|
+
} else if (v < (1 << 28)) {
|
485
|
+
*(ptr++) = static_cast<uint8_t>(v | B);
|
486
|
+
*(ptr++) = static_cast<uint8_t>((v >> 7) | B);
|
487
|
+
*(ptr++) = static_cast<uint8_t>((v >> 14) | B);
|
488
|
+
*(ptr++) = static_cast<uint8_t>(v >> 21);
|
570
489
|
} else {
|
571
|
-
*(ptr++) = v | B;
|
572
|
-
*(ptr++) = (v>>7) | B;
|
573
|
-
*(ptr++) = (v>>14) | B;
|
574
|
-
*(ptr++) = (v>>21) | B;
|
575
|
-
*(ptr++) = v>>28;
|
490
|
+
*(ptr++) = static_cast<uint8_t>(v | B);
|
491
|
+
*(ptr++) = static_cast<uint8_t>((v>>7) | B);
|
492
|
+
*(ptr++) = static_cast<uint8_t>((v>>14) | B);
|
493
|
+
*(ptr++) = static_cast<uint8_t>((v>>21) | B);
|
494
|
+
*(ptr++) = static_cast<uint8_t>(v >> 28);
|
576
495
|
}
|
577
496
|
return reinterpret_cast<char*>(ptr);
|
578
497
|
}
|