couchbase 3.0.0.alpha.1-universal-darwin-19 → 3.0.0.alpha.2-universal-darwin-19

Sign up to get free protection for your applications and to get access to all the features.
Files changed (176) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/tests-6.0.3.yml +49 -0
  3. data/.github/workflows/tests.yml +47 -0
  4. data/.gitmodules +3 -0
  5. data/.idea/dictionaries/gem_terms.xml +5 -0
  6. data/.idea/inspectionProfiles/Project_Default.xml +1 -0
  7. data/.idea/vcs.xml +1 -0
  8. data/Gemfile +1 -0
  9. data/README.md +55 -2
  10. data/Rakefile +18 -0
  11. data/bin/init-cluster +62 -0
  12. data/bin/setup +1 -0
  13. data/couchbase.gemspec +3 -2
  14. data/examples/crud.rb +1 -2
  15. data/examples/managing_buckets.rb +47 -0
  16. data/examples/managing_collections.rb +58 -0
  17. data/examples/managing_query_indexes.rb +63 -0
  18. data/examples/query.rb +3 -2
  19. data/examples/query_with_consistency.rb +76 -0
  20. data/examples/subdocument.rb +23 -1
  21. data/ext/.clang-format +1 -1
  22. data/ext/.idea/dictionaries/couchbase_terms.xml +2 -0
  23. data/ext/.idea/vcs.xml +1 -0
  24. data/ext/CMakeLists.txt +30 -12
  25. data/ext/build_version.hxx.in +26 -0
  26. data/ext/couchbase/bucket.hxx +69 -8
  27. data/ext/couchbase/cluster.hxx +70 -54
  28. data/ext/couchbase/collections_manifest.hxx +3 -3
  29. data/ext/couchbase/configuration.hxx +14 -0
  30. data/ext/couchbase/couchbase.cxx +2044 -383
  31. data/ext/couchbase/{operations/document_id.hxx → document_id.hxx} +5 -4
  32. data/ext/couchbase/io/http_message.hxx +5 -1
  33. data/ext/couchbase/io/http_parser.hxx +2 -1
  34. data/ext/couchbase/io/http_session.hxx +6 -3
  35. data/ext/couchbase/io/{binary_message.hxx → mcbp_message.hxx} +15 -12
  36. data/ext/couchbase/io/mcbp_parser.hxx +99 -0
  37. data/ext/couchbase/io/{key_value_session.hxx → mcbp_session.hxx} +200 -95
  38. data/ext/couchbase/io/session_manager.hxx +37 -22
  39. data/ext/couchbase/mutation_token.hxx +2 -1
  40. data/ext/couchbase/operations.hxx +38 -8
  41. data/ext/couchbase/operations/bucket_create.hxx +138 -0
  42. data/ext/couchbase/operations/bucket_drop.hxx +65 -0
  43. data/ext/couchbase/operations/bucket_flush.hxx +65 -0
  44. data/ext/couchbase/operations/bucket_get.hxx +69 -0
  45. data/ext/couchbase/operations/bucket_get_all.hxx +62 -0
  46. data/ext/couchbase/operations/bucket_settings.hxx +111 -0
  47. data/ext/couchbase/operations/bucket_update.hxx +115 -0
  48. data/ext/couchbase/operations/cluster_developer_preview_enable.hxx +60 -0
  49. data/ext/couchbase/operations/collection_create.hxx +86 -0
  50. data/ext/couchbase/operations/collection_drop.hxx +82 -0
  51. data/ext/couchbase/operations/command.hxx +10 -10
  52. data/ext/couchbase/operations/document_decrement.hxx +80 -0
  53. data/ext/couchbase/operations/document_exists.hxx +80 -0
  54. data/ext/couchbase/operations/{get.hxx → document_get.hxx} +4 -2
  55. data/ext/couchbase/operations/document_get_and_lock.hxx +64 -0
  56. data/ext/couchbase/operations/document_get_and_touch.hxx +64 -0
  57. data/ext/couchbase/operations/document_increment.hxx +80 -0
  58. data/ext/couchbase/operations/document_insert.hxx +74 -0
  59. data/ext/couchbase/operations/{lookup_in.hxx → document_lookup_in.hxx} +2 -2
  60. data/ext/couchbase/operations/{mutate_in.hxx → document_mutate_in.hxx} +11 -2
  61. data/ext/couchbase/operations/{query.hxx → document_query.hxx} +101 -6
  62. data/ext/couchbase/operations/document_remove.hxx +67 -0
  63. data/ext/couchbase/operations/document_replace.hxx +76 -0
  64. data/ext/couchbase/operations/{upsert.hxx → document_touch.hxx} +14 -14
  65. data/ext/couchbase/operations/{remove.hxx → document_unlock.hxx} +12 -10
  66. data/ext/couchbase/operations/document_upsert.hxx +74 -0
  67. data/ext/couchbase/operations/query_index_build_deferred.hxx +85 -0
  68. data/ext/couchbase/operations/query_index_create.hxx +134 -0
  69. data/ext/couchbase/operations/query_index_drop.hxx +108 -0
  70. data/ext/couchbase/operations/query_index_get_all.hxx +106 -0
  71. data/ext/couchbase/operations/scope_create.hxx +81 -0
  72. data/ext/couchbase/operations/scope_drop.hxx +79 -0
  73. data/ext/couchbase/operations/scope_get_all.hxx +72 -0
  74. data/ext/couchbase/protocol/client_opcode.hxx +35 -0
  75. data/ext/couchbase/protocol/client_request.hxx +56 -9
  76. data/ext/couchbase/protocol/client_response.hxx +52 -15
  77. data/ext/couchbase/protocol/cmd_cluster_map_change_notification.hxx +81 -0
  78. data/ext/couchbase/protocol/cmd_decrement.hxx +187 -0
  79. data/ext/couchbase/protocol/cmd_exists.hxx +171 -0
  80. data/ext/couchbase/protocol/cmd_get.hxx +31 -8
  81. data/ext/couchbase/protocol/cmd_get_and_lock.hxx +142 -0
  82. data/ext/couchbase/protocol/cmd_get_and_touch.hxx +142 -0
  83. data/ext/couchbase/protocol/cmd_get_cluster_config.hxx +16 -3
  84. data/ext/couchbase/protocol/cmd_get_collections_manifest.hxx +16 -3
  85. data/ext/couchbase/protocol/cmd_get_error_map.hxx +16 -3
  86. data/ext/couchbase/protocol/cmd_hello.hxx +24 -8
  87. data/ext/couchbase/protocol/cmd_increment.hxx +187 -0
  88. data/ext/couchbase/protocol/cmd_info.hxx +1 -0
  89. data/ext/couchbase/protocol/cmd_insert.hxx +172 -0
  90. data/ext/couchbase/protocol/cmd_lookup_in.hxx +28 -13
  91. data/ext/couchbase/protocol/cmd_mutate_in.hxx +65 -13
  92. data/ext/couchbase/protocol/cmd_remove.hxx +59 -4
  93. data/ext/couchbase/protocol/cmd_replace.hxx +172 -0
  94. data/ext/couchbase/protocol/cmd_sasl_auth.hxx +15 -3
  95. data/ext/couchbase/protocol/cmd_sasl_list_mechs.hxx +15 -3
  96. data/ext/couchbase/protocol/cmd_sasl_step.hxx +15 -3
  97. data/ext/couchbase/protocol/cmd_select_bucket.hxx +14 -2
  98. data/ext/couchbase/protocol/cmd_touch.hxx +102 -0
  99. data/ext/couchbase/protocol/cmd_unlock.hxx +95 -0
  100. data/ext/couchbase/protocol/cmd_upsert.hxx +50 -14
  101. data/ext/couchbase/protocol/durability_level.hxx +67 -0
  102. data/ext/couchbase/protocol/frame_info_id.hxx +187 -0
  103. data/ext/couchbase/protocol/hello_feature.hxx +137 -0
  104. data/ext/couchbase/protocol/server_opcode.hxx +57 -0
  105. data/ext/couchbase/protocol/server_request.hxx +122 -0
  106. data/ext/couchbase/protocol/unsigned_leb128.h +15 -15
  107. data/ext/couchbase/utils/byteswap.hxx +1 -2
  108. data/ext/couchbase/utils/url_codec.hxx +225 -0
  109. data/ext/couchbase/version.hxx +3 -1
  110. data/ext/extconf.rb +4 -1
  111. data/ext/test/main.cxx +37 -113
  112. data/ext/third_party/snappy/.appveyor.yml +36 -0
  113. data/ext/third_party/snappy/.gitignore +8 -0
  114. data/ext/third_party/snappy/.travis.yml +98 -0
  115. data/ext/third_party/snappy/AUTHORS +1 -0
  116. data/ext/third_party/snappy/CMakeLists.txt +345 -0
  117. data/ext/third_party/snappy/CONTRIBUTING.md +26 -0
  118. data/ext/third_party/snappy/COPYING +54 -0
  119. data/ext/third_party/snappy/NEWS +188 -0
  120. data/ext/third_party/snappy/README.md +148 -0
  121. data/ext/third_party/snappy/cmake/SnappyConfig.cmake.in +33 -0
  122. data/ext/third_party/snappy/cmake/config.h.in +59 -0
  123. data/ext/third_party/snappy/docs/README.md +72 -0
  124. data/ext/third_party/snappy/format_description.txt +110 -0
  125. data/ext/third_party/snappy/framing_format.txt +135 -0
  126. data/ext/third_party/snappy/snappy-c.cc +90 -0
  127. data/ext/third_party/snappy/snappy-c.h +138 -0
  128. data/ext/third_party/snappy/snappy-internal.h +315 -0
  129. data/ext/third_party/snappy/snappy-sinksource.cc +121 -0
  130. data/ext/third_party/snappy/snappy-sinksource.h +182 -0
  131. data/ext/third_party/snappy/snappy-stubs-internal.cc +42 -0
  132. data/ext/third_party/snappy/snappy-stubs-internal.h +493 -0
  133. data/ext/third_party/snappy/snappy-stubs-public.h.in +63 -0
  134. data/ext/third_party/snappy/snappy-test.cc +613 -0
  135. data/ext/third_party/snappy/snappy-test.h +526 -0
  136. data/ext/third_party/snappy/snappy.cc +1770 -0
  137. data/ext/third_party/snappy/snappy.h +209 -0
  138. data/ext/third_party/snappy/snappy_compress_fuzzer.cc +60 -0
  139. data/ext/third_party/snappy/snappy_uncompress_fuzzer.cc +58 -0
  140. data/ext/third_party/snappy/snappy_unittest.cc +1512 -0
  141. data/ext/third_party/snappy/testdata/alice29.txt +3609 -0
  142. data/ext/third_party/snappy/testdata/asyoulik.txt +4122 -0
  143. data/ext/third_party/snappy/testdata/baddata1.snappy +0 -0
  144. data/ext/third_party/snappy/testdata/baddata2.snappy +0 -0
  145. data/ext/third_party/snappy/testdata/baddata3.snappy +0 -0
  146. data/ext/third_party/snappy/testdata/fireworks.jpeg +0 -0
  147. data/ext/third_party/snappy/testdata/geo.protodata +0 -0
  148. data/ext/third_party/snappy/testdata/html +1 -0
  149. data/ext/third_party/snappy/testdata/html_x_4 +1 -0
  150. data/ext/third_party/snappy/testdata/kppkn.gtb +0 -0
  151. data/ext/third_party/snappy/testdata/lcet10.txt +7519 -0
  152. data/ext/third_party/snappy/testdata/paper-100k.pdf +600 -2
  153. data/ext/third_party/snappy/testdata/plrabn12.txt +10699 -0
  154. data/ext/third_party/snappy/testdata/urls.10K +10000 -0
  155. data/lib/couchbase/binary_collection.rb +33 -76
  156. data/lib/couchbase/binary_collection_options.rb +94 -0
  157. data/lib/couchbase/bucket.rb +9 -3
  158. data/lib/couchbase/cluster.rb +161 -23
  159. data/lib/couchbase/collection.rb +108 -191
  160. data/lib/couchbase/collection_options.rb +430 -0
  161. data/lib/couchbase/errors.rb +136 -134
  162. data/lib/couchbase/json_transcoder.rb +32 -0
  163. data/lib/couchbase/management/analytics_index_manager.rb +185 -9
  164. data/lib/couchbase/management/bucket_manager.rb +84 -33
  165. data/lib/couchbase/management/collection_manager.rb +166 -1
  166. data/lib/couchbase/management/query_index_manager.rb +261 -0
  167. data/lib/couchbase/management/search_index_manager.rb +291 -0
  168. data/lib/couchbase/management/user_manager.rb +12 -10
  169. data/lib/couchbase/management/view_index_manager.rb +151 -1
  170. data/lib/couchbase/mutation_state.rb +11 -1
  171. data/lib/couchbase/scope.rb +4 -4
  172. data/lib/couchbase/version.rb +1 -1
  173. metadata +113 -18
  174. data/.travis.yml +0 -7
  175. data/ext/couchbase/io/binary_parser.hxx +0 -64
  176. data/lib/couchbase/results.rb +0 -307
@@ -69,14 +69,14 @@ struct traits {
69
69
  (void)v;
70
70
  couchbase::collections_manifest result;
71
71
  result.id = couchbase::uuid::random();
72
- result.uid = std::stoull(v.at("uid").get_string());
72
+ result.uid = std::stoull(v.at("uid").get_string(), 0, 16);
73
73
  for (const auto& s : v.at("scopes").get_array()) {
74
74
  couchbase::collections_manifest::scope scope;
75
- scope.uid = std::stoull(s.at("uid").get_string());
75
+ scope.uid = std::stoull(s.at("uid").get_string(), 0, 16);
76
76
  scope.name = s.at("name").get_string();
77
77
  for (const auto& c : s.at("collections").get_array()) {
78
78
  couchbase::collections_manifest::collection collection;
79
- collection.uid = std::stoull(c.at("uid").get_string());
79
+ collection.uid = std::stoull(c.at("uid").get_string(), 0, 16);
80
80
  collection.name = c.at("name").get_string();
81
81
  scope.collections.emplace_back(collection);
82
82
  }
@@ -84,6 +84,20 @@ struct configuration {
84
84
  return std::make_pair(vbucket, static_cast<std::size_t>(vbmap->at(vbucket)[0]));
85
85
  }
86
86
  };
87
+
88
+ configuration
89
+ make_blank_configuration(const std::string& hostname, std::uint16_t plain_port, std::uint16_t tls_port)
90
+ {
91
+ configuration result;
92
+ result.id = couchbase::uuid::random();
93
+ result.rev = 0;
94
+ result.nodes.resize(1);
95
+ result.nodes[0].hostname = hostname;
96
+ result.nodes[0].this_node = true;
97
+ result.nodes[0].services_plain.key_value = plain_port;
98
+ result.nodes[0].services_tls.key_value = tls_port;
99
+ return result;
100
+ }
87
101
  } // namespace couchbase
88
102
 
89
103
  template<>
@@ -23,11 +23,16 @@
23
23
 
24
24
  #include <http_parser.h>
25
25
 
26
+ #include <snappy.h>
27
+
26
28
  #include <version.hxx>
27
29
  #include <cluster.hxx>
28
30
  #include <operations.hxx>
29
31
 
30
32
  #include <ruby.h>
33
+ #if defined(HAVE_RUBY_VERSION_H)
34
+ #include <ruby/version.h>
35
+ #endif
31
36
 
32
37
  #if !defined(RB_METHOD_DEFINITION_DECL)
33
38
  #define VALUE_FUNC(f) reinterpret_cast<VALUE (*)(ANYARGS)>(f)
@@ -50,14 +55,26 @@ init_versions(VALUE mCouchbase)
50
55
  #define VERSION_SPLIT_(VER) (VER) / 100000, (VER) / 100 % 1000, (VER) % 100
51
56
 
52
57
  std::string ver;
58
+ ver = fmt::format("{}.{}.{}", BACKEND_VERSION_MAJOR, BACKEND_VERSION_MINOR, BACKEND_VERSION_PATCH);
59
+ rb_hash_aset(cb_Version, rb_id2sym(rb_intern("backend")), rb_str_freeze(rb_str_new(ver.c_str(), static_cast<long>(ver.size()))));
60
+ rb_hash_aset(cb_Version, rb_id2sym(rb_intern("build_timestamp")), rb_str_freeze(rb_str_new_cstr(BACKEND_BUILD_TIMESTAMP)));
61
+ rb_hash_aset(cb_Version, rb_id2sym(rb_intern("revision")), rb_str_freeze(rb_str_new_cstr(BACKEND_GIT_REVISION)));
62
+ rb_hash_aset(cb_Version, rb_id2sym(rb_intern("platform")), rb_str_freeze(rb_str_new_cstr(BACKEND_SYSTEM)));
63
+ rb_hash_aset(cb_Version, rb_id2sym(rb_intern("cpu")), rb_str_freeze(rb_str_new_cstr(BACKEND_SYSTEM_PROCESSOR)));
64
+ rb_hash_aset(cb_Version, rb_id2sym(rb_intern("cc")), rb_str_freeze(rb_str_new_cstr(BACKEND_C_COMPILER)));
65
+ rb_hash_aset(cb_Version, rb_id2sym(rb_intern("cxx")), rb_str_freeze(rb_str_new_cstr(BACKEND_CXX_COMPILER)));
66
+ #if defined(HAVE_RUBY_VERSION_H)
67
+ ver = fmt::format("{}.{}.{}", RUBY_API_VERSION_MAJOR, RUBY_API_VERSION_MINOR, RUBY_API_VERSION_TEENY);
68
+ rb_hash_aset(cb_Version, rb_id2sym(rb_intern("ruby")), rb_str_freeze(rb_str_new(ver.c_str(), static_cast<long>(ver.size()))));
69
+ #endif
53
70
  ver = fmt::format("{}.{}.{}", SPDLOG_VER_MAJOR, SPDLOG_VER_MINOR, SPDLOG_VER_PATCH);
54
71
  rb_hash_aset(cb_Version, rb_id2sym(rb_intern("spdlog")), rb_str_freeze(rb_str_new(ver.c_str(), static_cast<long>(ver.size()))));
55
72
  ver = fmt::format("{}.{}.{}", VERSION_SPLIT_(ASIO_VERSION));
56
73
  rb_hash_aset(cb_Version, rb_id2sym(rb_intern("asio")), rb_str_freeze(rb_str_new(ver.c_str(), static_cast<long>(ver.size()))));
74
+ ver = fmt::format("{}.{}.{}", SNAPPY_MAJOR, SNAPPY_MINOR, SNAPPY_PATCHLEVEL);
75
+ rb_hash_aset(cb_Version, rb_id2sym(rb_intern("snappy")), rb_str_freeze(rb_str_new(ver.c_str(), static_cast<long>(ver.size()))));
57
76
  ver = fmt::format("{}.{}.{}", HTTP_PARSER_VERSION_MAJOR, HTTP_PARSER_VERSION_MINOR, HTTP_PARSER_VERSION_PATCH);
58
77
  rb_hash_aset(cb_Version, rb_id2sym(rb_intern("http_parser")), rb_str_freeze(rb_str_new(ver.c_str(), static_cast<long>(ver.size()))));
59
- ver = fmt::format("{}.{}.{}", BACKEND_VERSION_MAJOR, BACKEND_VERSION_MINOR, BACKEND_VERSION_PATCH);
60
- rb_hash_aset(cb_Version, rb_id2sym(rb_intern("backend")), rb_str_freeze(rb_str_new(ver.c_str(), static_cast<long>(ver.size()))));
61
78
  rb_hash_aset(cb_Version, rb_id2sym(rb_intern("openssl_headers")), rb_str_freeze(rb_str_new_cstr(OPENSSL_VERSION_TEXT)));
62
79
  #if defined(OPENSSL_VERSION)
63
80
  rb_hash_aset(cb_Version, rb_id2sym(rb_intern("openssl_runtime")), rb_str_freeze(rb_str_new_cstr(OpenSSL_version(OPENSSL_VERSION))));
@@ -78,6 +95,10 @@ static void
78
95
  cb__backend_close(cb_backend_data* backend)
79
96
  {
80
97
  if (backend->cluster) {
98
+ auto barrier = std::make_shared<std::promise<void>>();
99
+ auto f = barrier->get_future();
100
+ backend->cluster->close([barrier]() { barrier->set_value(); });
101
+ f.wait();
81
102
  backend->cluster.reset(nullptr);
82
103
  if (backend->worker.joinable()) {
83
104
  backend->worker.join();
@@ -202,70 +223,71 @@ static VALUE eXattrUnknownVirtualAttribute;
202
223
  static void
203
224
  init_exceptions(VALUE mCouchbase)
204
225
  {
205
- eBackendError = rb_define_class_under(mCouchbase, "BackendError", rb_eStandardError);
206
- eAmbiguousTimeout = rb_define_class_under(mCouchbase, "AmbiguousTimeout", rb_eStandardError);
207
- eAuthenticationFailure = rb_define_class_under(mCouchbase, "AuthenticationFailure", rb_eStandardError);
208
- eBucketExists = rb_define_class_under(mCouchbase, "BucketExists", rb_eStandardError);
209
- eBucketNotFlushable = rb_define_class_under(mCouchbase, "BucketNotFlushable", rb_eStandardError);
210
- eBucketNotFound = rb_define_class_under(mCouchbase, "BucketNotFound", rb_eStandardError);
211
- eCasMismatch = rb_define_class_under(mCouchbase, "CasMismatch", rb_eStandardError);
212
- eCollectionExists = rb_define_class_under(mCouchbase, "CollectionExists", rb_eStandardError);
213
- eCollectionNotFound = rb_define_class_under(mCouchbase, "CollectionNotFound", rb_eStandardError);
214
- eCompilationFailure = rb_define_class_under(mCouchbase, "CompilationFailure", rb_eStandardError);
215
- eDatasetExists = rb_define_class_under(mCouchbase, "DatasetExists", rb_eStandardError);
216
- eDatasetNotFound = rb_define_class_under(mCouchbase, "DatasetNotFound", rb_eStandardError);
217
- eDataverseExists = rb_define_class_under(mCouchbase, "DataverseExists", rb_eStandardError);
218
- eDataverseNotFound = rb_define_class_under(mCouchbase, "DataverseNotFound", rb_eStandardError);
219
- eDecodingFailure = rb_define_class_under(mCouchbase, "DecodingFailure", rb_eStandardError);
220
- eDeltaInvalid = rb_define_class_under(mCouchbase, "DeltaInvalid", rb_eStandardError);
221
- eDesignDocumentNotFound = rb_define_class_under(mCouchbase, "DesignDocumentNotFound", rb_eStandardError);
222
- eDocumentExists = rb_define_class_under(mCouchbase, "DocumentExists", rb_eStandardError);
223
- eDocumentIrretrievable = rb_define_class_under(mCouchbase, "DocumentIrretrievable", rb_eStandardError);
224
- eDocumentLocked = rb_define_class_under(mCouchbase, "DocumentLocked", rb_eStandardError);
225
- eDocumentNotFound = rb_define_class_under(mCouchbase, "DocumentNotFound", rb_eStandardError);
226
- eDocumentNotJson = rb_define_class_under(mCouchbase, "DocumentNotJson", rb_eStandardError);
227
- eDurabilityAmbiguous = rb_define_class_under(mCouchbase, "DurabilityAmbiguous", rb_eStandardError);
228
- eDurabilityImpossible = rb_define_class_under(mCouchbase, "DurabilityImpossible", rb_eStandardError);
229
- eDurabilityLevelNotAvailable = rb_define_class_under(mCouchbase, "DurabilityLevelNotAvailable", rb_eStandardError);
230
- eDurableWriteInProgress = rb_define_class_under(mCouchbase, "DurableWriteInProgress", rb_eStandardError);
231
- eDurableWriteReCommitInProgress = rb_define_class_under(mCouchbase, "DurableWriteReCommitInProgress", rb_eStandardError);
232
- eEncodingFailure = rb_define_class_under(mCouchbase, "EncodingFailure", rb_eStandardError);
233
- eFeatureNotAvailable = rb_define_class_under(mCouchbase, "FeatureNotAvailable", rb_eStandardError);
234
- eGroupNotFound = rb_define_class_under(mCouchbase, "GroupNotFound", rb_eStandardError);
235
- eIndexExists = rb_define_class_under(mCouchbase, "IndexExists", rb_eStandardError);
236
- eIndexFailure = rb_define_class_under(mCouchbase, "IndexFailure", rb_eStandardError);
237
- eIndexNotFound = rb_define_class_under(mCouchbase, "IndexNotFound", rb_eStandardError);
238
- eInternalServerFailure = rb_define_class_under(mCouchbase, "InternalServerFailure", rb_eStandardError);
239
- eInvalidArgument = rb_define_class_under(mCouchbase, "InvalidArgument", rb_eStandardError);
240
- eJobQueueFull = rb_define_class_under(mCouchbase, "JobQueueFull", rb_eStandardError);
241
- eLinkNotFound = rb_define_class_under(mCouchbase, "LinkNotFound", rb_eStandardError);
242
- eNumberTooBig = rb_define_class_under(mCouchbase, "NumberTooBig", rb_eStandardError);
243
- eParsingFailure = rb_define_class_under(mCouchbase, "ParsingFailure", rb_eStandardError);
244
- ePathExists = rb_define_class_under(mCouchbase, "PathExists", rb_eStandardError);
245
- ePathInvalid = rb_define_class_under(mCouchbase, "PathInvalid", rb_eStandardError);
246
- ePathMismatch = rb_define_class_under(mCouchbase, "PathMismatch", rb_eStandardError);
247
- ePathNotFound = rb_define_class_under(mCouchbase, "PathNotFound", rb_eStandardError);
248
- ePathTooBig = rb_define_class_under(mCouchbase, "PathTooBig", rb_eStandardError);
249
- ePathTooDeep = rb_define_class_under(mCouchbase, "PathTooDeep", rb_eStandardError);
250
- ePlanningFailure = rb_define_class_under(mCouchbase, "PlanningFailure", rb_eStandardError);
251
- ePreparedStatementFailure = rb_define_class_under(mCouchbase, "PreparedStatementFailure", rb_eStandardError);
252
- eRequestCanceled = rb_define_class_under(mCouchbase, "RequestCanceled", rb_eStandardError);
253
- eScopeExists = rb_define_class_under(mCouchbase, "ScopeExists", rb_eStandardError);
254
- eScopeNotFound = rb_define_class_under(mCouchbase, "ScopeNotFound", rb_eStandardError);
255
- eServiceNotAvailable = rb_define_class_under(mCouchbase, "ServiceNotAvailable", rb_eStandardError);
256
- eTemporaryFailure = rb_define_class_under(mCouchbase, "TemporaryFailure", rb_eStandardError);
257
- eUnambiguousTimeout = rb_define_class_under(mCouchbase, "UnambiguousTimeout", rb_eStandardError);
258
- eUnsupportedOperation = rb_define_class_under(mCouchbase, "UnsupportedOperation", rb_eStandardError);
259
- eUserNotFound = rb_define_class_under(mCouchbase, "UserNotFound", rb_eStandardError);
260
- eUserExists = rb_define_class_under(mCouchbase, "UserExists", rb_eStandardError);
261
- eValueInvalid = rb_define_class_under(mCouchbase, "ValueInvalid", rb_eStandardError);
262
- eValueTooDeep = rb_define_class_under(mCouchbase, "ValueTooDeep", rb_eStandardError);
263
- eValueTooLarge = rb_define_class_under(mCouchbase, "ValueTooLarge", rb_eStandardError);
264
- eViewNotFound = rb_define_class_under(mCouchbase, "ViewNotFound", rb_eStandardError);
265
- eXattrCannotModifyVirtualAttribute = rb_define_class_under(mCouchbase, "XattrCannotModifyVirtualAttribute", rb_eStandardError);
266
- eXattrInvalidKeyCombo = rb_define_class_under(mCouchbase, "XattrInvalidKeyCombo", rb_eStandardError);
267
- eXattrUnknownMacro = rb_define_class_under(mCouchbase, "XattrUnknownMacro", rb_eStandardError);
268
- eXattrUnknownVirtualAttribute = rb_define_class_under(mCouchbase, "XattrUnknownVirtualAttribute", rb_eStandardError);
226
+ VALUE mError = rb_define_module_under(mCouchbase, "Error");
227
+ eBackendError = rb_define_class_under(mError, "BackendError", rb_eStandardError);
228
+ eAmbiguousTimeout = rb_define_class_under(mError, "AmbiguousTimeout", rb_eStandardError);
229
+ eAuthenticationFailure = rb_define_class_under(mError, "AuthenticationFailure", rb_eStandardError);
230
+ eBucketExists = rb_define_class_under(mError, "BucketExists", rb_eStandardError);
231
+ eBucketNotFlushable = rb_define_class_under(mError, "BucketNotFlushable", rb_eStandardError);
232
+ eBucketNotFound = rb_define_class_under(mError, "BucketNotFound", rb_eStandardError);
233
+ eCasMismatch = rb_define_class_under(mError, "CasMismatch", rb_eStandardError);
234
+ eCollectionExists = rb_define_class_under(mError, "CollectionExists", rb_eStandardError);
235
+ eCollectionNotFound = rb_define_class_under(mError, "CollectionNotFound", rb_eStandardError);
236
+ eCompilationFailure = rb_define_class_under(mError, "CompilationFailure", rb_eStandardError);
237
+ eDatasetExists = rb_define_class_under(mError, "DatasetExists", rb_eStandardError);
238
+ eDatasetNotFound = rb_define_class_under(mError, "DatasetNotFound", rb_eStandardError);
239
+ eDataverseExists = rb_define_class_under(mError, "DataverseExists", rb_eStandardError);
240
+ eDataverseNotFound = rb_define_class_under(mError, "DataverseNotFound", rb_eStandardError);
241
+ eDecodingFailure = rb_define_class_under(mError, "DecodingFailure", rb_eStandardError);
242
+ eDeltaInvalid = rb_define_class_under(mError, "DeltaInvalid", rb_eStandardError);
243
+ eDesignDocumentNotFound = rb_define_class_under(mError, "DesignDocumentNotFound", rb_eStandardError);
244
+ eDocumentExists = rb_define_class_under(mError, "DocumentExists", rb_eStandardError);
245
+ eDocumentIrretrievable = rb_define_class_under(mError, "DocumentIrretrievable", rb_eStandardError);
246
+ eDocumentLocked = rb_define_class_under(mError, "DocumentLocked", rb_eStandardError);
247
+ eDocumentNotFound = rb_define_class_under(mError, "DocumentNotFound", rb_eStandardError);
248
+ eDocumentNotJson = rb_define_class_under(mError, "DocumentNotJson", rb_eStandardError);
249
+ eDurabilityAmbiguous = rb_define_class_under(mError, "DurabilityAmbiguous", rb_eStandardError);
250
+ eDurabilityImpossible = rb_define_class_under(mError, "DurabilityImpossible", rb_eStandardError);
251
+ eDurabilityLevelNotAvailable = rb_define_class_under(mError, "DurabilityLevelNotAvailable", rb_eStandardError);
252
+ eDurableWriteInProgress = rb_define_class_under(mError, "DurableWriteInProgress", rb_eStandardError);
253
+ eDurableWriteReCommitInProgress = rb_define_class_under(mError, "DurableWriteReCommitInProgress", rb_eStandardError);
254
+ eEncodingFailure = rb_define_class_under(mError, "EncodingFailure", rb_eStandardError);
255
+ eFeatureNotAvailable = rb_define_class_under(mError, "FeatureNotAvailable", rb_eStandardError);
256
+ eGroupNotFound = rb_define_class_under(mError, "GroupNotFound", rb_eStandardError);
257
+ eIndexExists = rb_define_class_under(mError, "IndexExists", rb_eStandardError);
258
+ eIndexFailure = rb_define_class_under(mError, "IndexFailure", rb_eStandardError);
259
+ eIndexNotFound = rb_define_class_under(mError, "IndexNotFound", rb_eStandardError);
260
+ eInternalServerFailure = rb_define_class_under(mError, "InternalServerFailure", rb_eStandardError);
261
+ eInvalidArgument = rb_define_class_under(mError, "InvalidArgument", rb_eStandardError);
262
+ eJobQueueFull = rb_define_class_under(mError, "JobQueueFull", rb_eStandardError);
263
+ eLinkNotFound = rb_define_class_under(mError, "LinkNotFound", rb_eStandardError);
264
+ eNumberTooBig = rb_define_class_under(mError, "NumberTooBig", rb_eStandardError);
265
+ eParsingFailure = rb_define_class_under(mError, "ParsingFailure", rb_eStandardError);
266
+ ePathExists = rb_define_class_under(mError, "PathExists", rb_eStandardError);
267
+ ePathInvalid = rb_define_class_under(mError, "PathInvalid", rb_eStandardError);
268
+ ePathMismatch = rb_define_class_under(mError, "PathMismatch", rb_eStandardError);
269
+ ePathNotFound = rb_define_class_under(mError, "PathNotFound", rb_eStandardError);
270
+ ePathTooBig = rb_define_class_under(mError, "PathTooBig", rb_eStandardError);
271
+ ePathTooDeep = rb_define_class_under(mError, "PathTooDeep", rb_eStandardError);
272
+ ePlanningFailure = rb_define_class_under(mError, "PlanningFailure", rb_eStandardError);
273
+ ePreparedStatementFailure = rb_define_class_under(mError, "PreparedStatementFailure", rb_eStandardError);
274
+ eRequestCanceled = rb_define_class_under(mError, "RequestCanceled", rb_eStandardError);
275
+ eScopeExists = rb_define_class_under(mError, "ScopeExists", rb_eStandardError);
276
+ eScopeNotFound = rb_define_class_under(mError, "ScopeNotFound", rb_eStandardError);
277
+ eServiceNotAvailable = rb_define_class_under(mError, "ServiceNotAvailable", rb_eStandardError);
278
+ eTemporaryFailure = rb_define_class_under(mError, "TemporaryFailure", rb_eStandardError);
279
+ eUnambiguousTimeout = rb_define_class_under(mError, "UnambiguousTimeout", rb_eStandardError);
280
+ eUnsupportedOperation = rb_define_class_under(mError, "UnsupportedOperation", rb_eStandardError);
281
+ eUserNotFound = rb_define_class_under(mError, "UserNotFound", rb_eStandardError);
282
+ eUserExists = rb_define_class_under(mError, "UserExists", rb_eStandardError);
283
+ eValueInvalid = rb_define_class_under(mError, "ValueInvalid", rb_eStandardError);
284
+ eValueTooDeep = rb_define_class_under(mError, "ValueTooDeep", rb_eStandardError);
285
+ eValueTooLarge = rb_define_class_under(mError, "ValueTooLarge", rb_eStandardError);
286
+ eViewNotFound = rb_define_class_under(mError, "ViewNotFound", rb_eStandardError);
287
+ eXattrCannotModifyVirtualAttribute = rb_define_class_under(mError, "XattrCannotModifyVirtualAttribute", rb_eStandardError);
288
+ eXattrInvalidKeyCombo = rb_define_class_under(mError, "XattrInvalidKeyCombo", rb_eStandardError);
289
+ eXattrUnknownMacro = rb_define_class_under(mError, "XattrUnknownMacro", rb_eStandardError);
290
+ eXattrUnknownVirtualAttribute = rb_define_class_under(mError, "XattrUnknownVirtualAttribute", rb_eStandardError);
269
291
  }
270
292
 
271
293
  static NORETURN(void cb_raise_error_code(std::error_code ec, const std::string& message))
@@ -498,7 +520,7 @@ cb_Backend_open(VALUE self, VALUE hostname, VALUE username, VALUE password)
498
520
  auto f = barrier->get_future();
499
521
  backend->cluster->open(options, [barrier](std::error_code ec) mutable { barrier->set_value(ec); });
500
522
  if (auto ec = f.get()) {
501
- cb_raise_error_code(ec, "unable open cluster");
523
+ cb_raise_error_code(ec, fmt::format("unable open cluster at {}", options.hostname));
502
524
  }
503
525
 
504
526
  return Qnil;
@@ -530,14 +552,14 @@ cb_Backend_open_bucket(VALUE self, VALUE bucket)
530
552
  auto f = barrier->get_future();
531
553
  backend->cluster->open_bucket(name, [barrier](std::error_code ec) mutable { barrier->set_value(ec); });
532
554
  if (auto ec = f.get()) {
533
- cb_raise_error_code(ec, "unable open cluster");
555
+ cb_raise_error_code(ec, fmt::format("unable open bucket \"{}\"", name));
534
556
  }
535
557
 
536
558
  return Qtrue;
537
559
  }
538
560
 
539
561
  static VALUE
540
- cb_Backend_get(VALUE self, VALUE bucket, VALUE collection, VALUE id)
562
+ cb_Backend_document_get(VALUE self, VALUE bucket, VALUE collection, VALUE id)
541
563
  {
542
564
  cb_backend_data* backend = nullptr;
543
565
  TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
@@ -550,15 +572,15 @@ cb_Backend_get(VALUE self, VALUE bucket, VALUE collection, VALUE id)
550
572
  Check_Type(collection, T_STRING);
551
573
  Check_Type(id, T_STRING);
552
574
 
553
- couchbase::operations::document_id doc_id;
575
+ couchbase::document_id doc_id;
554
576
  doc_id.bucket.assign(RSTRING_PTR(bucket), static_cast<size_t>(RSTRING_LEN(bucket)));
555
577
  doc_id.collection.assign(RSTRING_PTR(collection), static_cast<size_t>(RSTRING_LEN(collection)));
556
578
  doc_id.key.assign(RSTRING_PTR(id), static_cast<size_t>(RSTRING_LEN(id)));
557
579
 
580
+ couchbase::operations::get_request req{ doc_id };
558
581
  auto barrier = std::make_shared<std::promise<couchbase::operations::get_response>>();
559
582
  auto f = barrier->get_future();
560
- backend->cluster->execute(couchbase::operations::get_request{ doc_id },
561
- [barrier](couchbase::operations::get_response resp) mutable { barrier->set_value(resp); });
583
+ backend->cluster->execute(req, [barrier](couchbase::operations::get_response resp) mutable { barrier->set_value(resp); });
562
584
  auto resp = f.get();
563
585
  if (resp.ec) {
564
586
  cb_raise_error_code(resp.ec, fmt::format("unable fetch {}", doc_id));
@@ -566,12 +588,13 @@ cb_Backend_get(VALUE self, VALUE bucket, VALUE collection, VALUE id)
566
588
 
567
589
  VALUE res = rb_hash_new();
568
590
  rb_hash_aset(res, rb_id2sym(rb_intern("content")), rb_str_new(resp.value.data(), static_cast<long>(resp.value.size())));
569
- rb_hash_aset(res, rb_id2sym(rb_intern("cas")), ULONG2NUM(resp.cas));
591
+ rb_hash_aset(res, rb_id2sym(rb_intern("cas")), ULL2NUM(resp.cas));
592
+ rb_hash_aset(res, rb_id2sym(rb_intern("flags")), UINT2NUM(resp.flags));
570
593
  return res;
571
594
  }
572
595
 
573
596
  static VALUE
574
- cb_Backend_upsert(VALUE self, VALUE bucket, VALUE collection, VALUE id, VALUE content)
597
+ cb_Backend_document_get_and_lock(VALUE self, VALUE bucket, VALUE collection, VALUE id, VALUE lock_time)
575
598
  {
576
599
  cb_backend_data* backend = nullptr;
577
600
  TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
@@ -583,29 +606,33 @@ cb_Backend_upsert(VALUE self, VALUE bucket, VALUE collection, VALUE id, VALUE co
583
606
  Check_Type(bucket, T_STRING);
584
607
  Check_Type(collection, T_STRING);
585
608
  Check_Type(id, T_STRING);
609
+ Check_Type(lock_time, T_FIXNUM);
586
610
 
587
- couchbase::operations::document_id doc_id;
611
+ couchbase::document_id doc_id;
588
612
  doc_id.bucket.assign(RSTRING_PTR(bucket), static_cast<size_t>(RSTRING_LEN(bucket)));
589
613
  doc_id.collection.assign(RSTRING_PTR(collection), static_cast<size_t>(RSTRING_LEN(collection)));
590
614
  doc_id.key.assign(RSTRING_PTR(id), static_cast<size_t>(RSTRING_LEN(id)));
591
- std::string value(RSTRING_PTR(content), static_cast<size_t>(RSTRING_LEN(content)));
592
615
 
593
- auto barrier = std::make_shared<std::promise<couchbase::operations::upsert_response>>();
616
+ couchbase::operations::get_and_lock_request req{ doc_id };
617
+ req.lock_time = NUM2UINT(lock_time);
618
+
619
+ auto barrier = std::make_shared<std::promise<couchbase::operations::get_and_lock_response>>();
594
620
  auto f = barrier->get_future();
595
- backend->cluster->execute(couchbase::operations::upsert_request{ doc_id, value },
596
- [barrier](couchbase::operations::upsert_response resp) mutable { barrier->set_value(resp); });
621
+ backend->cluster->execute(req, [barrier](couchbase::operations::get_and_lock_response resp) mutable { barrier->set_value(resp); });
597
622
  auto resp = f.get();
598
623
  if (resp.ec) {
599
- cb_raise_error_code(resp.ec, fmt::format("unable upsert {}", doc_id));
624
+ cb_raise_error_code(resp.ec, fmt::format("unable lock and fetch {}", doc_id));
600
625
  }
601
626
 
602
627
  VALUE res = rb_hash_new();
603
- rb_hash_aset(res, rb_id2sym(rb_intern("cas")), ULONG2NUM(resp.cas));
628
+ rb_hash_aset(res, rb_id2sym(rb_intern("content")), rb_str_new(resp.value.data(), static_cast<long>(resp.value.size())));
629
+ rb_hash_aset(res, rb_id2sym(rb_intern("cas")), ULL2NUM(resp.cas));
630
+ rb_hash_aset(res, rb_id2sym(rb_intern("flags")), UINT2NUM(resp.flags));
604
631
  return res;
605
632
  }
606
633
 
607
634
  static VALUE
608
- cb_Backend_remove(VALUE self, VALUE bucket, VALUE collection, VALUE id)
635
+ cb_Backend_document_get_and_touch(VALUE self, VALUE bucket, VALUE collection, VALUE id, VALUE expiration)
609
636
  {
610
637
  cb_backend_data* backend = nullptr;
611
638
  TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
@@ -617,128 +644,319 @@ cb_Backend_remove(VALUE self, VALUE bucket, VALUE collection, VALUE id)
617
644
  Check_Type(bucket, T_STRING);
618
645
  Check_Type(collection, T_STRING);
619
646
  Check_Type(id, T_STRING);
647
+ Check_Type(expiration, T_FIXNUM);
620
648
 
621
- couchbase::operations::document_id doc_id;
649
+ couchbase::document_id doc_id;
622
650
  doc_id.bucket.assign(RSTRING_PTR(bucket), static_cast<size_t>(RSTRING_LEN(bucket)));
623
651
  doc_id.collection.assign(RSTRING_PTR(collection), static_cast<size_t>(RSTRING_LEN(collection)));
624
652
  doc_id.key.assign(RSTRING_PTR(id), static_cast<size_t>(RSTRING_LEN(id)));
625
653
 
626
- auto barrier = std::make_shared<std::promise<couchbase::operations::remove_response>>();
654
+ couchbase::operations::get_and_touch_request req{ doc_id };
655
+ req.expiration = NUM2UINT(expiration);
656
+
657
+ auto barrier = std::make_shared<std::promise<couchbase::operations::get_and_touch_response>>();
627
658
  auto f = barrier->get_future();
628
- backend->cluster->execute(couchbase::operations::remove_request{ doc_id },
629
- [barrier](couchbase::operations::remove_response resp) mutable { barrier->set_value(resp); });
659
+ backend->cluster->execute(req, [barrier](couchbase::operations::get_and_touch_response resp) mutable { barrier->set_value(resp); });
630
660
  auto resp = f.get();
631
661
  if (resp.ec) {
632
- cb_raise_error_code(resp.ec, fmt::format("unable to remove {}", doc_id));
662
+ cb_raise_error_code(resp.ec, fmt::format("unable fetch and touch {}", doc_id));
633
663
  }
634
664
 
635
665
  VALUE res = rb_hash_new();
636
- rb_hash_aset(res, rb_id2sym(rb_intern("cas")), ULONG2NUM(resp.cas));
666
+ rb_hash_aset(res, rb_id2sym(rb_intern("content")), rb_str_new(resp.value.data(), static_cast<long>(resp.value.size())));
667
+ rb_hash_aset(res, rb_id2sym(rb_intern("cas")), ULL2NUM(resp.cas));
668
+ rb_hash_aset(res, rb_id2sym(rb_intern("flags")), UINT2NUM(resp.flags));
637
669
  return res;
638
670
  }
639
671
 
672
+ template<typename Response>
640
673
  static VALUE
641
- cb__map_subdoc_opcode(couchbase::protocol::subdoc_opcode opcode)
674
+ cb__extract_mutation_result(Response resp)
642
675
  {
643
- switch (opcode) {
644
- case couchbase::protocol::subdoc_opcode::get:
645
- return rb_id2sym(rb_intern("get"));
676
+ VALUE res = rb_hash_new();
677
+ rb_hash_aset(res, rb_id2sym(rb_intern("cas")), ULL2NUM(resp.cas));
678
+ VALUE token = rb_hash_new();
679
+ rb_hash_aset(token, rb_id2sym(rb_intern("partition_uuid")), ULL2NUM(resp.token.partition_uuid));
680
+ rb_hash_aset(token, rb_id2sym(rb_intern("sequence_number")), ULONG2NUM(resp.token.sequence_number));
681
+ rb_hash_aset(token, rb_id2sym(rb_intern("partition_id")), UINT2NUM(resp.token.partition_id));
682
+ rb_hash_aset(token,
683
+ rb_id2sym(rb_intern("bucket_name")),
684
+ rb_str_new(resp.token.bucket_name.c_str(), static_cast<long>(resp.token.bucket_name.size())));
685
+ rb_hash_aset(res, rb_id2sym(rb_intern("mutation_token")), token);
686
+ return res;
687
+ }
646
688
 
647
- case couchbase::protocol::subdoc_opcode::exists:
648
- return rb_id2sym(rb_intern("exists"));
689
+ static VALUE
690
+ cb_Backend_document_touch(VALUE self, VALUE bucket, VALUE collection, VALUE id, VALUE expiration)
691
+ {
692
+ cb_backend_data* backend = nullptr;
693
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
649
694
 
650
- case couchbase::protocol::subdoc_opcode::dict_add:
651
- return rb_id2sym(rb_intern("dict_add"));
695
+ if (!backend->cluster) {
696
+ rb_raise(rb_eArgError, "Cluster has been closed already");
697
+ }
652
698
 
653
- case couchbase::protocol::subdoc_opcode::dict_upsert:
654
- return rb_id2sym(rb_intern("dict_upsert"));
699
+ Check_Type(bucket, T_STRING);
700
+ Check_Type(collection, T_STRING);
701
+ Check_Type(id, T_STRING);
702
+ Check_Type(expiration, T_FIXNUM);
655
703
 
656
- case couchbase::protocol::subdoc_opcode::remove:
657
- return rb_id2sym(rb_intern("remove"));
704
+ couchbase::document_id doc_id;
705
+ doc_id.bucket.assign(RSTRING_PTR(bucket), static_cast<size_t>(RSTRING_LEN(bucket)));
706
+ doc_id.collection.assign(RSTRING_PTR(collection), static_cast<size_t>(RSTRING_LEN(collection)));
707
+ doc_id.key.assign(RSTRING_PTR(id), static_cast<size_t>(RSTRING_LEN(id)));
658
708
 
659
- case couchbase::protocol::subdoc_opcode::replace:
660
- return rb_id2sym(rb_intern("replace"));
709
+ couchbase::operations::touch_request req{ doc_id };
710
+ req.expiration = NUM2UINT(expiration);
661
711
 
662
- case couchbase::protocol::subdoc_opcode::array_push_last:
663
- return rb_id2sym(rb_intern("array_push_last"));
712
+ auto barrier = std::make_shared<std::promise<couchbase::operations::touch_response>>();
713
+ auto f = barrier->get_future();
714
+ backend->cluster->execute(req, [barrier](couchbase::operations::touch_response resp) mutable { barrier->set_value(resp); });
715
+ auto resp = f.get();
716
+ if (resp.ec) {
717
+ cb_raise_error_code(resp.ec, fmt::format("unable to touch {}", doc_id));
718
+ }
664
719
 
665
- case couchbase::protocol::subdoc_opcode::array_push_first:
666
- return rb_id2sym(rb_intern("array_push_first"));
720
+ VALUE res = rb_hash_new();
721
+ rb_hash_aset(res, rb_id2sym(rb_intern("cas")), ULL2NUM(resp.cas));
722
+ return res;
723
+ }
667
724
 
668
- case couchbase::protocol::subdoc_opcode::array_insert:
669
- return rb_id2sym(rb_intern("array_insert"));
725
+ static VALUE
726
+ cb_Backend_document_exists(VALUE self, VALUE bucket, VALUE collection, VALUE id)
727
+ {
728
+ cb_backend_data* backend = nullptr;
729
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
670
730
 
671
- case couchbase::protocol::subdoc_opcode::array_add_unique:
672
- return rb_id2sym(rb_intern("array_add_unique"));
731
+ if (!backend->cluster) {
732
+ rb_raise(rb_eArgError, "Cluster has been closed already");
733
+ }
673
734
 
674
- case couchbase::protocol::subdoc_opcode::counter:
675
- return rb_id2sym(rb_intern("counter"));
735
+ Check_Type(bucket, T_STRING);
736
+ Check_Type(collection, T_STRING);
737
+ Check_Type(id, T_STRING);
676
738
 
677
- case couchbase::protocol::subdoc_opcode::get_count:
678
- return rb_id2sym(rb_intern("count"));
739
+ couchbase::document_id doc_id;
740
+ doc_id.bucket.assign(RSTRING_PTR(bucket), static_cast<size_t>(RSTRING_LEN(bucket)));
741
+ doc_id.collection.assign(RSTRING_PTR(collection), static_cast<size_t>(RSTRING_LEN(collection)));
742
+ doc_id.key.assign(RSTRING_PTR(id), static_cast<size_t>(RSTRING_LEN(id)));
743
+
744
+ couchbase::operations::exists_request req{ doc_id };
745
+
746
+ auto barrier = std::make_shared<std::promise<couchbase::operations::exists_response>>();
747
+ auto f = barrier->get_future();
748
+ backend->cluster->execute(req, [barrier](couchbase::operations::exists_response resp) mutable { barrier->set_value(resp); });
749
+ auto resp = f.get();
750
+ if (resp.ec) {
751
+ cb_raise_error_code(resp.ec, fmt::format("unable to exists {}", doc_id));
679
752
  }
680
- return rb_id2sym(rb_intern("unknown"));
753
+
754
+ VALUE res = rb_hash_new();
755
+ rb_hash_aset(res, rb_id2sym(rb_intern("cas")), ULL2NUM(resp.cas));
756
+ rb_hash_aset(res, rb_id2sym(rb_intern("partition_id")), UINT2NUM(resp.partition_id));
757
+ switch (resp.status) {
758
+ case couchbase::operations::exists_response::observe_status::invalid:
759
+ rb_hash_aset(res, rb_id2sym(rb_intern("status")), rb_id2sym(rb_intern("invalid")));
760
+ break;
761
+ case couchbase::operations::exists_response::observe_status::found:
762
+ rb_hash_aset(res, rb_id2sym(rb_intern("status")), rb_id2sym(rb_intern("found")));
763
+ break;
764
+ case couchbase::operations::exists_response::observe_status::not_found:
765
+ rb_hash_aset(res, rb_id2sym(rb_intern("status")), rb_id2sym(rb_intern("not_found")));
766
+ break;
767
+ case couchbase::operations::exists_response::observe_status::persisted:
768
+ rb_hash_aset(res, rb_id2sym(rb_intern("status")), rb_id2sym(rb_intern("persisted")));
769
+ break;
770
+ case couchbase::operations::exists_response::observe_status::logically_deleted:
771
+ rb_hash_aset(res, rb_id2sym(rb_intern("status")), rb_id2sym(rb_intern("logically_deleted")));
772
+ break;
773
+ }
774
+ return res;
681
775
  }
682
776
 
683
777
  static VALUE
684
- cb__map_subdoc_status(couchbase::protocol::status status)
778
+ cb_Backend_document_unlock(VALUE self, VALUE bucket, VALUE collection, VALUE id, VALUE cas)
685
779
  {
686
- switch (status) {
687
- case couchbase::protocol::status::success:
688
- return rb_id2sym(rb_intern("success"));
780
+ cb_backend_data* backend = nullptr;
781
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
689
782
 
690
- case couchbase::protocol::status::subdoc_path_mismatch:
691
- return rb_id2sym(rb_intern("path_mismatch"));
783
+ if (!backend->cluster) {
784
+ rb_raise(rb_eArgError, "Cluster has been closed already");
785
+ }
692
786
 
693
- case couchbase::protocol::status::subdoc_path_invalid:
694
- return rb_id2sym(rb_intern("path_invalid"));
787
+ Check_Type(bucket, T_STRING);
788
+ Check_Type(collection, T_STRING);
789
+ Check_Type(id, T_STRING);
695
790
 
696
- case couchbase::protocol::status::subdoc_path_too_big:
697
- return rb_id2sym(rb_intern("path_too_big"));
791
+ couchbase::document_id doc_id;
792
+ doc_id.bucket.assign(RSTRING_PTR(bucket), static_cast<size_t>(RSTRING_LEN(bucket)));
793
+ doc_id.collection.assign(RSTRING_PTR(collection), static_cast<size_t>(RSTRING_LEN(collection)));
794
+ doc_id.key.assign(RSTRING_PTR(id), static_cast<size_t>(RSTRING_LEN(id)));
698
795
 
699
- case couchbase::protocol::status::subdoc_value_cannot_insert:
700
- return rb_id2sym(rb_intern("value_cannot_insert"));
796
+ couchbase::operations::unlock_request req{ doc_id };
797
+ switch (TYPE(cas)) {
798
+ case T_FIXNUM:
799
+ case T_BIGNUM:
800
+ req.cas = NUM2ULL(cas);
801
+ break;
802
+ default:
803
+ rb_raise(rb_eArgError, "CAS must be an Integer");
804
+ }
701
805
 
702
- case couchbase::protocol::status::subdoc_doc_not_json:
703
- return rb_id2sym(rb_intern("doc_not_json"));
806
+ auto barrier = std::make_shared<std::promise<couchbase::operations::unlock_response>>();
807
+ auto f = barrier->get_future();
808
+ backend->cluster->execute(req, [barrier](couchbase::operations::unlock_response resp) mutable { barrier->set_value(resp); });
809
+ auto resp = f.get();
810
+ if (resp.ec) {
811
+ cb_raise_error_code(resp.ec, fmt::format("unable to unlock {}", doc_id));
812
+ }
704
813
 
705
- case couchbase::protocol::status::subdoc_num_range_error:
706
- return rb_id2sym(rb_intern("num_range"));
814
+ VALUE res = rb_hash_new();
815
+ rb_hash_aset(res, rb_id2sym(rb_intern("cas")), ULL2NUM(resp.cas));
816
+ return res;
817
+ }
707
818
 
708
- case couchbase::protocol::status::subdoc_delta_invalid:
709
- return rb_id2sym(rb_intern("delta_invalid"));
819
+ static VALUE
820
+ cb_Backend_document_upsert(VALUE self, VALUE bucket, VALUE collection, VALUE id, VALUE content, VALUE flags, VALUE options)
821
+ {
822
+ cb_backend_data* backend = nullptr;
823
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
710
824
 
711
- case couchbase::protocol::status::subdoc_path_exists:
712
- return rb_id2sym(rb_intern("path_exists"));
825
+ if (!backend->cluster) {
826
+ rb_raise(rb_eArgError, "Cluster has been closed already");
827
+ }
713
828
 
714
- case couchbase::protocol::status::subdoc_value_too_deep:
715
- return rb_id2sym(rb_intern("value_too_deep"));
829
+ Check_Type(bucket, T_STRING);
830
+ Check_Type(collection, T_STRING);
831
+ Check_Type(id, T_STRING);
832
+ Check_Type(content, T_STRING);
833
+ Check_Type(flags, T_FIXNUM);
716
834
 
717
- case couchbase::protocol::status::subdoc_invalid_combo:
718
- return rb_id2sym(rb_intern("invalid_combo"));
835
+ couchbase::document_id doc_id;
836
+ doc_id.bucket.assign(RSTRING_PTR(bucket), static_cast<size_t>(RSTRING_LEN(bucket)));
837
+ doc_id.collection.assign(RSTRING_PTR(collection), static_cast<size_t>(RSTRING_LEN(collection)));
838
+ doc_id.key.assign(RSTRING_PTR(id), static_cast<size_t>(RSTRING_LEN(id)));
839
+ std::string value(RSTRING_PTR(content), static_cast<size_t>(RSTRING_LEN(content)));
719
840
 
720
- case couchbase::protocol::status::subdoc_xattr_invalid_flag_combo:
721
- return rb_id2sym(rb_intern("xattr_invalid_flag_combo"));
841
+ couchbase::operations::upsert_request req{ doc_id, value };
842
+ req.flags = FIX2UINT(flags);
843
+
844
+ if (!NIL_P(options)) {
845
+ Check_Type(options, T_HASH);
846
+ VALUE durability_level = rb_hash_aref(options, rb_id2sym(rb_intern("durability_level")));
847
+ if (!NIL_P(durability_level)) {
848
+ Check_Type(durability_level, T_SYMBOL);
849
+ ID level = rb_sym2id(durability_level);
850
+ if (level == rb_intern("none")) {
851
+ req.durability_level = couchbase::protocol::durability_level::none;
852
+ } else if (level == rb_intern("majority_and_persist_to_active")) {
853
+ req.durability_level = couchbase::protocol::durability_level::majority_and_persist_to_active;
854
+ } else if (level == rb_intern("persist_to_majority")) {
855
+ req.durability_level = couchbase::protocol::durability_level::persist_to_majority;
856
+ } else {
857
+ rb_raise(rb_eArgError, "Unknown durability level");
858
+ }
859
+ VALUE durability_timeout = rb_hash_aref(options, rb_id2sym(rb_intern("durability_timeout")));
860
+ if (!NIL_P(durability_timeout)) {
861
+ Check_Type(durability_timeout, T_FIXNUM);
862
+ req.durability_timeout = FIX2UINT(durability_timeout);
863
+ }
864
+ }
865
+ VALUE expiration = rb_hash_aref(options, rb_id2sym(rb_intern("expiration")));
866
+ if (!NIL_P(expiration)) {
867
+ Check_Type(expiration, T_FIXNUM);
868
+ req.expiration = FIX2UINT(expiration);
869
+ }
870
+ }
722
871
 
723
- case couchbase::protocol::status::subdoc_xattr_invalid_key_combo:
724
- return rb_id2sym(rb_intern("xattr_invalid_key_combo"));
872
+ auto barrier = std::make_shared<std::promise<couchbase::operations::upsert_response>>();
873
+ auto f = barrier->get_future();
874
+ backend->cluster->execute(req, [barrier](couchbase::operations::upsert_response resp) mutable { barrier->set_value(resp); });
875
+ auto resp = f.get();
876
+ if (resp.ec) {
877
+ cb_raise_error_code(resp.ec, fmt::format("unable to upsert {}", doc_id));
878
+ }
725
879
 
726
- case couchbase::protocol::status::subdoc_xattr_unknown_macro:
727
- return rb_id2sym(rb_intern("xattr_unknown_macro"));
880
+ return cb__extract_mutation_result(resp);
881
+ }
728
882
 
729
- case couchbase::protocol::status::subdoc_xattr_unknown_vattr:
730
- return rb_id2sym(rb_intern("xattr_unknown_vattr"));
883
+ static VALUE
884
+ cb_Backend_document_replace(VALUE self, VALUE bucket, VALUE collection, VALUE id, VALUE content, VALUE flags, VALUE options)
885
+ {
886
+ cb_backend_data* backend = nullptr;
887
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
731
888
 
732
- case couchbase::protocol::status::subdoc_xattr_cannot_modify_vattr:
733
- return rb_id2sym(rb_intern("xattr_cannot_modify_vattr"));
889
+ if (!backend->cluster) {
890
+ rb_raise(rb_eArgError, "Cluster has been closed already");
891
+ }
734
892
 
735
- default:
736
- return rb_id2sym(rb_intern("unknown"));
893
+ Check_Type(bucket, T_STRING);
894
+ Check_Type(collection, T_STRING);
895
+ Check_Type(id, T_STRING);
896
+ Check_Type(content, T_STRING);
897
+ Check_Type(flags, T_FIXNUM);
898
+
899
+ couchbase::document_id doc_id;
900
+ doc_id.bucket.assign(RSTRING_PTR(bucket), static_cast<size_t>(RSTRING_LEN(bucket)));
901
+ doc_id.collection.assign(RSTRING_PTR(collection), static_cast<size_t>(RSTRING_LEN(collection)));
902
+ doc_id.key.assign(RSTRING_PTR(id), static_cast<size_t>(RSTRING_LEN(id)));
903
+ std::string value(RSTRING_PTR(content), static_cast<size_t>(RSTRING_LEN(content)));
904
+
905
+ couchbase::operations::replace_request req{ doc_id, value };
906
+ req.flags = FIX2UINT(flags);
907
+
908
+ if (!NIL_P(options)) {
909
+ Check_Type(options, T_HASH);
910
+ VALUE durability_level = rb_hash_aref(options, rb_id2sym(rb_intern("durability_level")));
911
+ if (!NIL_P(durability_level)) {
912
+ Check_Type(durability_level, T_SYMBOL);
913
+ ID level = rb_sym2id(durability_level);
914
+ if (level == rb_intern("none")) {
915
+ req.durability_level = couchbase::protocol::durability_level::none;
916
+ } else if (level == rb_intern("majority_and_persist_to_active")) {
917
+ req.durability_level = couchbase::protocol::durability_level::majority_and_persist_to_active;
918
+ } else if (level == rb_intern("persist_to_majority")) {
919
+ req.durability_level = couchbase::protocol::durability_level::persist_to_majority;
920
+ } else {
921
+ rb_raise(rb_eArgError, "Unknown durability level");
922
+ }
923
+ VALUE durability_timeout = rb_hash_aref(options, rb_id2sym(rb_intern("durability_timeout")));
924
+ if (!NIL_P(durability_timeout)) {
925
+ Check_Type(durability_timeout, T_FIXNUM);
926
+ req.durability_timeout = FIX2UINT(durability_timeout);
927
+ }
928
+ }
929
+ VALUE expiration = rb_hash_aref(options, rb_id2sym(rb_intern("expiration")));
930
+ if (!NIL_P(expiration)) {
931
+ Check_Type(expiration, T_FIXNUM);
932
+ req.expiration = FIX2UINT(expiration);
933
+ }
934
+ VALUE cas = rb_hash_aref(options, rb_id2sym(rb_intern("cas")));
935
+ if (!NIL_P(cas)) {
936
+ switch (TYPE(cas)) {
937
+ case T_FIXNUM:
938
+ case T_BIGNUM:
939
+ req.cas = NUM2ULL(cas);
940
+ break;
941
+ default:
942
+ rb_raise(rb_eArgError, "CAS must be an Integer");
943
+ }
944
+ }
945
+ }
946
+
947
+ auto barrier = std::make_shared<std::promise<couchbase::operations::replace_response>>();
948
+ auto f = barrier->get_future();
949
+ backend->cluster->execute(req, [barrier](couchbase::operations::replace_response resp) mutable { barrier->set_value(resp); });
950
+ auto resp = f.get();
951
+ if (resp.ec) {
952
+ cb_raise_error_code(resp.ec, fmt::format("unable to replace {}", doc_id));
737
953
  }
954
+
955
+ return cb__extract_mutation_result(resp);
738
956
  }
739
957
 
740
958
  static VALUE
741
- cb_Backend_lookup_in(VALUE self, VALUE bucket, VALUE collection, VALUE id, VALUE access_deleted, VALUE specs)
959
+ cb_Backend_document_insert(VALUE self, VALUE bucket, VALUE collection, VALUE id, VALUE content, VALUE flags, VALUE options)
742
960
  {
743
961
  cb_backend_data* backend = nullptr;
744
962
  TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
@@ -750,74 +968,59 @@ cb_Backend_lookup_in(VALUE self, VALUE bucket, VALUE collection, VALUE id, VALUE
750
968
  Check_Type(bucket, T_STRING);
751
969
  Check_Type(collection, T_STRING);
752
970
  Check_Type(id, T_STRING);
753
- Check_Type(specs, T_ARRAY);
754
- if (RARRAY_LEN(specs) <= 0) {
755
- rb_raise(rb_eArgError, "Array with specs cannot be empty");
756
- }
971
+ Check_Type(content, T_STRING);
972
+ Check_Type(flags, T_FIXNUM);
757
973
 
758
- couchbase::operations::document_id doc_id;
974
+ couchbase::document_id doc_id;
759
975
  doc_id.bucket.assign(RSTRING_PTR(bucket), static_cast<size_t>(RSTRING_LEN(bucket)));
760
976
  doc_id.collection.assign(RSTRING_PTR(collection), static_cast<size_t>(RSTRING_LEN(collection)));
761
977
  doc_id.key.assign(RSTRING_PTR(id), static_cast<size_t>(RSTRING_LEN(id)));
978
+ std::string value(RSTRING_PTR(content), static_cast<size_t>(RSTRING_LEN(content)));
762
979
 
763
- couchbase::operations::lookup_in_request req{ doc_id };
764
- req.access_deleted = RTEST(access_deleted);
765
- auto entries_size = static_cast<size_t>(RARRAY_LEN(specs));
766
- req.specs.entries.reserve(entries_size);
767
- for (size_t i = 0; i < entries_size; ++i) {
768
- VALUE entry = rb_ary_entry(specs, static_cast<long>(i));
769
- Check_Type(entry, T_HASH);
770
- VALUE operation = rb_hash_aref(entry, rb_id2sym(rb_intern("opcode")));
771
- Check_Type(operation, T_SYMBOL);
772
- ID operation_id = rb_sym2id(operation);
773
- couchbase::protocol::subdoc_opcode opcode;
774
- if (operation_id == rb_intern("get") || operation_id == rb_intern("get_doc")) {
775
- opcode = couchbase::protocol::subdoc_opcode::get;
776
- } else if (operation_id == rb_intern("exists")) {
777
- opcode = couchbase::protocol::subdoc_opcode::exists;
778
- } else if (operation_id == rb_intern("count")) {
779
- opcode = couchbase::protocol::subdoc_opcode::get_count;
780
- } else {
781
- rb_raise(rb_eArgError, "Unsupported operation for subdocument lookup");
980
+ couchbase::operations::insert_request req{ doc_id, value };
981
+ req.flags = FIX2UINT(flags);
982
+
983
+ if (!NIL_P(options)) {
984
+ Check_Type(options, T_HASH);
985
+ VALUE durability_level = rb_hash_aref(options, rb_id2sym(rb_intern("durability_level")));
986
+ if (!NIL_P(durability_level)) {
987
+ Check_Type(durability_level, T_SYMBOL);
988
+ ID level = rb_sym2id(durability_level);
989
+ if (level == rb_intern("none")) {
990
+ req.durability_level = couchbase::protocol::durability_level::none;
991
+ } else if (level == rb_intern("majority_and_persist_to_active")) {
992
+ req.durability_level = couchbase::protocol::durability_level::majority_and_persist_to_active;
993
+ } else if (level == rb_intern("persist_to_majority")) {
994
+ req.durability_level = couchbase::protocol::durability_level::persist_to_majority;
995
+ } else {
996
+ rb_raise(rb_eArgError, "Unknown durability level");
997
+ }
998
+ VALUE durability_timeout = rb_hash_aref(options, rb_id2sym(rb_intern("durability_timeout")));
999
+ if (!NIL_P(durability_timeout)) {
1000
+ Check_Type(durability_timeout, T_FIXNUM);
1001
+ req.durability_timeout = FIX2UINT(durability_timeout);
1002
+ }
1003
+ }
1004
+ VALUE expiration = rb_hash_aref(options, rb_id2sym(rb_intern("expiration")));
1005
+ if (!NIL_P(expiration)) {
1006
+ Check_Type(expiration, T_FIXNUM);
1007
+ req.expiration = FIX2UINT(expiration);
782
1008
  }
783
- bool xattr = RTEST(rb_hash_aref(entry, rb_id2sym(rb_intern("xattr"))));
784
- VALUE path = rb_hash_aref(entry, rb_id2sym(rb_intern("path")));
785
- Check_Type(path, T_STRING);
786
- req.specs.add_spec(opcode, xattr, std::string(RSTRING_PTR(path), static_cast<size_t>(RSTRING_LEN(path))));
787
1009
  }
788
1010
 
789
- auto barrier = std::make_shared<std::promise<couchbase::operations::lookup_in_response>>();
1011
+ auto barrier = std::make_shared<std::promise<couchbase::operations::insert_response>>();
790
1012
  auto f = barrier->get_future();
791
- backend->cluster->execute(req, [barrier](couchbase::operations::lookup_in_response resp) mutable { barrier->set_value(resp); });
1013
+ backend->cluster->execute(req, [barrier](couchbase::operations::insert_response resp) mutable { barrier->set_value(resp); });
792
1014
  auto resp = f.get();
793
1015
  if (resp.ec) {
794
- cb_raise_error_code(resp.ec, fmt::format("unable fetch {}", doc_id));
1016
+ cb_raise_error_code(resp.ec, fmt::format("unable to insert {}", doc_id));
795
1017
  }
796
1018
 
797
- VALUE res = rb_hash_new();
798
- rb_hash_aset(res, rb_id2sym(rb_intern("cas")), ULONG2NUM(resp.cas));
799
- VALUE fields = rb_ary_new_capa(static_cast<long>(resp.fields.size()));
800
- rb_hash_aset(res, rb_id2sym(rb_intern("fields")), fields);
801
- for (size_t i = 0; i < resp.fields.size(); ++i) {
802
- VALUE entry = rb_hash_new();
803
- rb_hash_aset(entry, rb_id2sym(rb_intern("exists")), resp.fields[i].exists ? Qtrue : Qfalse);
804
- rb_hash_aset(
805
- entry, rb_id2sym(rb_intern("path")), rb_str_new(resp.fields[i].path.data(), static_cast<long>(resp.fields[i].path.size())));
806
- rb_hash_aset(
807
- entry, rb_id2sym(rb_intern("value")), rb_str_new(resp.fields[i].value.data(), static_cast<long>(resp.fields[i].value.size())));
808
- rb_hash_aset(entry, rb_id2sym(rb_intern("status")), cb__map_subdoc_status(resp.fields[i].status));
809
- if (resp.fields[i].opcode == couchbase::protocol::subdoc_opcode::get && resp.fields[i].path.empty()) {
810
- rb_hash_aset(entry, rb_id2sym(rb_intern("type")), rb_id2sym(rb_intern("get_doc")));
811
- } else {
812
- rb_hash_aset(entry, rb_id2sym(rb_intern("type")), cb__map_subdoc_opcode(resp.fields[i].opcode));
813
- }
814
- rb_ary_store(fields, static_cast<long>(i), entry);
815
- }
816
- return res;
1019
+ return cb__extract_mutation_result(resp);
817
1020
  }
818
1021
 
819
1022
  static VALUE
820
- cb_Backend_mutate_in(VALUE self, VALUE bucket, VALUE collection, VALUE id, VALUE access_deleted, VALUE specs)
1023
+ cb_Backend_document_remove(VALUE self, VALUE bucket, VALUE collection, VALUE id, VALUE options)
821
1024
  {
822
1025
  cb_backend_data* backend = nullptr;
823
1026
  TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
@@ -829,122 +1032,1414 @@ cb_Backend_mutate_in(VALUE self, VALUE bucket, VALUE collection, VALUE id, VALUE
829
1032
  Check_Type(bucket, T_STRING);
830
1033
  Check_Type(collection, T_STRING);
831
1034
  Check_Type(id, T_STRING);
832
- Check_Type(specs, T_ARRAY);
833
- if (RARRAY_LEN(specs) <= 0) {
834
- rb_raise(rb_eArgError, "Array with specs cannot be empty");
835
- }
836
1035
 
837
- couchbase::operations::document_id doc_id;
1036
+ couchbase::document_id doc_id;
838
1037
  doc_id.bucket.assign(RSTRING_PTR(bucket), static_cast<size_t>(RSTRING_LEN(bucket)));
839
1038
  doc_id.collection.assign(RSTRING_PTR(collection), static_cast<size_t>(RSTRING_LEN(collection)));
840
1039
  doc_id.key.assign(RSTRING_PTR(id), static_cast<size_t>(RSTRING_LEN(id)));
841
1040
 
842
- couchbase::operations::mutate_in_request req{ doc_id };
843
- req.access_deleted = RTEST(access_deleted);
844
- auto entries_size = static_cast<size_t>(RARRAY_LEN(specs));
845
- req.specs.entries.reserve(entries_size);
846
- for (size_t i = 0; i < entries_size; ++i) {
847
- VALUE entry = rb_ary_entry(specs, static_cast<long>(i));
848
- Check_Type(entry, T_HASH);
849
- VALUE operation = rb_hash_aref(entry, rb_id2sym(rb_intern("opcode")));
850
- Check_Type(operation, T_SYMBOL);
851
- ID operation_id = rb_sym2id(operation);
1041
+ couchbase::operations::remove_request req{ doc_id };
1042
+ if (!NIL_P(options)) {
1043
+ Check_Type(options, T_HASH);
1044
+ VALUE durability_level = rb_hash_aref(options, rb_id2sym(rb_intern("durability_level")));
1045
+ if (!NIL_P(durability_level)) {
1046
+ Check_Type(durability_level, T_SYMBOL);
1047
+ ID level = rb_sym2id(durability_level);
1048
+ if (level == rb_intern("none")) {
1049
+ req.durability_level = couchbase::protocol::durability_level::none;
1050
+ } else if (level == rb_intern("majority_and_persist_to_active")) {
1051
+ req.durability_level = couchbase::protocol::durability_level::majority_and_persist_to_active;
1052
+ } else if (level == rb_intern("persist_to_majority")) {
1053
+ req.durability_level = couchbase::protocol::durability_level::persist_to_majority;
1054
+ } else {
1055
+ rb_raise(rb_eArgError, "Unknown durability level");
1056
+ }
1057
+ VALUE durability_timeout = rb_hash_aref(options, rb_id2sym(rb_intern("durability_timeout")));
1058
+ if (!NIL_P(durability_timeout)) {
1059
+ Check_Type(durability_timeout, T_FIXNUM);
1060
+ req.durability_timeout = FIX2UINT(durability_timeout);
1061
+ }
1062
+ }
1063
+ }
1064
+
1065
+ auto barrier = std::make_shared<std::promise<couchbase::operations::remove_response>>();
1066
+ auto f = barrier->get_future();
1067
+ backend->cluster->execute(req, [barrier](couchbase::operations::remove_response resp) mutable { barrier->set_value(resp); });
1068
+ auto resp = f.get();
1069
+ if (resp.ec) {
1070
+ cb_raise_error_code(resp.ec, fmt::format("unable to remove {}", doc_id));
1071
+ }
1072
+ return cb__extract_mutation_result(resp);
1073
+ }
1074
+
1075
+ static VALUE
1076
+ cb_Backend_document_increment(VALUE self, VALUE bucket, VALUE collection, VALUE id, VALUE options)
1077
+ {
1078
+ cb_backend_data* backend = nullptr;
1079
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
1080
+
1081
+ if (!backend->cluster) {
1082
+ rb_raise(rb_eArgError, "Cluster has been closed already");
1083
+ }
1084
+
1085
+ Check_Type(bucket, T_STRING);
1086
+ Check_Type(collection, T_STRING);
1087
+ Check_Type(id, T_STRING);
1088
+
1089
+ couchbase::document_id doc_id;
1090
+ doc_id.bucket.assign(RSTRING_PTR(bucket), static_cast<size_t>(RSTRING_LEN(bucket)));
1091
+ doc_id.collection.assign(RSTRING_PTR(collection), static_cast<size_t>(RSTRING_LEN(collection)));
1092
+ doc_id.key.assign(RSTRING_PTR(id), static_cast<size_t>(RSTRING_LEN(id)));
1093
+
1094
+ couchbase::operations::increment_request req{ doc_id };
1095
+ if (!NIL_P(options)) {
1096
+ Check_Type(options, T_HASH);
1097
+ VALUE durability_level = rb_hash_aref(options, rb_id2sym(rb_intern("durability_level")));
1098
+ if (!NIL_P(durability_level)) {
1099
+ Check_Type(durability_level, T_SYMBOL);
1100
+ ID level = rb_sym2id(durability_level);
1101
+ if (level == rb_intern("none")) {
1102
+ req.durability_level = couchbase::protocol::durability_level::none;
1103
+ } else if (level == rb_intern("majority_and_persist_to_active")) {
1104
+ req.durability_level = couchbase::protocol::durability_level::majority_and_persist_to_active;
1105
+ } else if (level == rb_intern("persist_to_majority")) {
1106
+ req.durability_level = couchbase::protocol::durability_level::persist_to_majority;
1107
+ } else {
1108
+ rb_raise(rb_eArgError, "Unknown durability level");
1109
+ }
1110
+ VALUE durability_timeout = rb_hash_aref(options, rb_id2sym(rb_intern("durability_timeout")));
1111
+ if (!NIL_P(durability_timeout)) {
1112
+ Check_Type(durability_timeout, T_FIXNUM);
1113
+ req.durability_timeout = FIX2UINT(durability_timeout);
1114
+ }
1115
+ }
1116
+ VALUE delta = rb_hash_aref(options, rb_id2sym(rb_intern("delta")));
1117
+ if (!NIL_P(delta)) {
1118
+ switch (TYPE(delta)) {
1119
+ case T_FIXNUM:
1120
+ case T_BIGNUM:
1121
+ req.delta = NUM2ULL(delta);
1122
+ break;
1123
+ default:
1124
+ rb_raise(rb_eArgError, "delta must be an Integer");
1125
+ }
1126
+ }
1127
+ VALUE initial_value = rb_hash_aref(options, rb_id2sym(rb_intern("initial_value")));
1128
+ if (!NIL_P(initial_value)) {
1129
+ switch (TYPE(initial_value)) {
1130
+ case T_FIXNUM:
1131
+ case T_BIGNUM:
1132
+ req.initial_value = NUM2ULL(initial_value);
1133
+ break;
1134
+ default:
1135
+ rb_raise(rb_eArgError, "initial_value must be an Integer");
1136
+ }
1137
+ }
1138
+ VALUE expiration = rb_hash_aref(options, rb_id2sym(rb_intern("expiration")));
1139
+ if (!NIL_P(expiration)) {
1140
+ Check_Type(expiration, T_FIXNUM);
1141
+ req.expiration = FIX2UINT(expiration);
1142
+ }
1143
+ }
1144
+
1145
+ auto barrier = std::make_shared<std::promise<couchbase::operations::increment_response>>();
1146
+ auto f = barrier->get_future();
1147
+ backend->cluster->execute(req, [barrier](couchbase::operations::increment_response resp) mutable { barrier->set_value(resp); });
1148
+ auto resp = f.get();
1149
+ if (resp.ec) {
1150
+ cb_raise_error_code(resp.ec, fmt::format("unable to increment {} by {}", doc_id, req.delta));
1151
+ }
1152
+ VALUE res = cb__extract_mutation_result(resp);
1153
+ rb_hash_aset(res, rb_id2sym(rb_intern("content")), ULL2NUM(resp.content));
1154
+ return res;
1155
+ }
1156
+
1157
+ static VALUE
1158
+ cb_Backend_document_decrement(VALUE self, VALUE bucket, VALUE collection, VALUE id, VALUE options)
1159
+ {
1160
+ cb_backend_data* backend = nullptr;
1161
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
1162
+
1163
+ if (!backend->cluster) {
1164
+ rb_raise(rb_eArgError, "Cluster has been closed already");
1165
+ }
1166
+
1167
+ Check_Type(bucket, T_STRING);
1168
+ Check_Type(collection, T_STRING);
1169
+ Check_Type(id, T_STRING);
1170
+ Check_Type(options, T_HASH);
1171
+
1172
+ couchbase::document_id doc_id;
1173
+ doc_id.bucket.assign(RSTRING_PTR(bucket), static_cast<size_t>(RSTRING_LEN(bucket)));
1174
+ doc_id.collection.assign(RSTRING_PTR(collection), static_cast<size_t>(RSTRING_LEN(collection)));
1175
+ doc_id.key.assign(RSTRING_PTR(id), static_cast<size_t>(RSTRING_LEN(id)));
1176
+
1177
+ couchbase::operations::decrement_request req{ doc_id };
1178
+ if (!NIL_P(options)) {
1179
+ Check_Type(options, T_HASH);
1180
+ VALUE durability_level = rb_hash_aref(options, rb_id2sym(rb_intern("durability_level")));
1181
+ if (!NIL_P(durability_level)) {
1182
+ Check_Type(durability_level, T_SYMBOL);
1183
+ ID level = rb_sym2id(durability_level);
1184
+ if (level == rb_intern("none")) {
1185
+ req.durability_level = couchbase::protocol::durability_level::none;
1186
+ } else if (level == rb_intern("majority_and_persist_to_active")) {
1187
+ req.durability_level = couchbase::protocol::durability_level::majority_and_persist_to_active;
1188
+ } else if (level == rb_intern("persist_to_majority")) {
1189
+ req.durability_level = couchbase::protocol::durability_level::persist_to_majority;
1190
+ } else {
1191
+ rb_raise(rb_eArgError, "Unknown durability level");
1192
+ }
1193
+ VALUE durability_timeout = rb_hash_aref(options, rb_id2sym(rb_intern("durability_timeout")));
1194
+ if (!NIL_P(durability_timeout)) {
1195
+ Check_Type(durability_timeout, T_FIXNUM);
1196
+ req.durability_timeout = FIX2UINT(durability_timeout);
1197
+ }
1198
+ }
1199
+ VALUE delta = rb_hash_aref(options, rb_id2sym(rb_intern("delta")));
1200
+ if (!NIL_P(delta)) {
1201
+ switch (TYPE(delta)) {
1202
+ case T_FIXNUM:
1203
+ case T_BIGNUM:
1204
+ req.delta = NUM2ULL(delta);
1205
+ break;
1206
+ default:
1207
+ rb_raise(rb_eArgError, "delta must be an Integer");
1208
+ }
1209
+ }
1210
+ VALUE initial_value = rb_hash_aref(options, rb_id2sym(rb_intern("initial_value")));
1211
+ if (!NIL_P(initial_value)) {
1212
+ switch (TYPE(initial_value)) {
1213
+ case T_FIXNUM:
1214
+ case T_BIGNUM:
1215
+ req.initial_value = NUM2ULL(initial_value);
1216
+ break;
1217
+ default:
1218
+ rb_raise(rb_eArgError, "initial_value must be an Integer");
1219
+ }
1220
+ }
1221
+ VALUE expiration = rb_hash_aref(options, rb_id2sym(rb_intern("expiration")));
1222
+ if (!NIL_P(expiration)) {
1223
+ Check_Type(expiration, T_FIXNUM);
1224
+ req.expiration = FIX2UINT(expiration);
1225
+ }
1226
+ }
1227
+
1228
+ auto barrier = std::make_shared<std::promise<couchbase::operations::decrement_response>>();
1229
+ auto f = barrier->get_future();
1230
+ backend->cluster->execute(req, [barrier](couchbase::operations::decrement_response resp) mutable { barrier->set_value(resp); });
1231
+ auto resp = f.get();
1232
+ if (resp.ec) {
1233
+ cb_raise_error_code(resp.ec, fmt::format("unable to decrement {} by {}", doc_id, req.delta));
1234
+ }
1235
+ VALUE res = cb__extract_mutation_result(resp);
1236
+ rb_hash_aset(res, rb_id2sym(rb_intern("content")), ULL2NUM(resp.content));
1237
+ return res;
1238
+ }
1239
+
1240
+ static VALUE
1241
+ cb__map_subdoc_opcode(couchbase::protocol::subdoc_opcode opcode)
1242
+ {
1243
+ switch (opcode) {
1244
+ case couchbase::protocol::subdoc_opcode::get:
1245
+ return rb_id2sym(rb_intern("get"));
1246
+
1247
+ case couchbase::protocol::subdoc_opcode::exists:
1248
+ return rb_id2sym(rb_intern("exists"));
1249
+
1250
+ case couchbase::protocol::subdoc_opcode::dict_add:
1251
+ return rb_id2sym(rb_intern("dict_add"));
1252
+
1253
+ case couchbase::protocol::subdoc_opcode::dict_upsert:
1254
+ return rb_id2sym(rb_intern("dict_upsert"));
1255
+
1256
+ case couchbase::protocol::subdoc_opcode::remove:
1257
+ return rb_id2sym(rb_intern("remove"));
1258
+
1259
+ case couchbase::protocol::subdoc_opcode::replace:
1260
+ return rb_id2sym(rb_intern("replace"));
1261
+
1262
+ case couchbase::protocol::subdoc_opcode::array_push_last:
1263
+ return rb_id2sym(rb_intern("array_push_last"));
1264
+
1265
+ case couchbase::protocol::subdoc_opcode::array_push_first:
1266
+ return rb_id2sym(rb_intern("array_push_first"));
1267
+
1268
+ case couchbase::protocol::subdoc_opcode::array_insert:
1269
+ return rb_id2sym(rb_intern("array_insert"));
1270
+
1271
+ case couchbase::protocol::subdoc_opcode::array_add_unique:
1272
+ return rb_id2sym(rb_intern("array_add_unique"));
1273
+
1274
+ case couchbase::protocol::subdoc_opcode::counter:
1275
+ return rb_id2sym(rb_intern("counter"));
1276
+
1277
+ case couchbase::protocol::subdoc_opcode::get_count:
1278
+ return rb_id2sym(rb_intern("count"));
1279
+ }
1280
+ return rb_id2sym(rb_intern("unknown"));
1281
+ }
1282
+
1283
+ static VALUE
1284
+ cb__map_subdoc_status(couchbase::protocol::status status)
1285
+ {
1286
+ switch (status) {
1287
+ case couchbase::protocol::status::success:
1288
+ return rb_id2sym(rb_intern("success"));
1289
+
1290
+ case couchbase::protocol::status::subdoc_path_mismatch:
1291
+ return rb_id2sym(rb_intern("path_mismatch"));
1292
+
1293
+ case couchbase::protocol::status::subdoc_path_invalid:
1294
+ return rb_id2sym(rb_intern("path_invalid"));
1295
+
1296
+ case couchbase::protocol::status::subdoc_path_too_big:
1297
+ return rb_id2sym(rb_intern("path_too_big"));
1298
+
1299
+ case couchbase::protocol::status::subdoc_value_cannot_insert:
1300
+ return rb_id2sym(rb_intern("value_cannot_insert"));
1301
+
1302
+ case couchbase::protocol::status::subdoc_doc_not_json:
1303
+ return rb_id2sym(rb_intern("doc_not_json"));
1304
+
1305
+ case couchbase::protocol::status::subdoc_num_range_error:
1306
+ return rb_id2sym(rb_intern("num_range"));
1307
+
1308
+ case couchbase::protocol::status::subdoc_delta_invalid:
1309
+ return rb_id2sym(rb_intern("delta_invalid"));
1310
+
1311
+ case couchbase::protocol::status::subdoc_path_exists:
1312
+ return rb_id2sym(rb_intern("path_exists"));
1313
+
1314
+ case couchbase::protocol::status::subdoc_value_too_deep:
1315
+ return rb_id2sym(rb_intern("value_too_deep"));
1316
+
1317
+ case couchbase::protocol::status::subdoc_invalid_combo:
1318
+ return rb_id2sym(rb_intern("invalid_combo"));
1319
+
1320
+ case couchbase::protocol::status::subdoc_xattr_invalid_flag_combo:
1321
+ return rb_id2sym(rb_intern("xattr_invalid_flag_combo"));
1322
+
1323
+ case couchbase::protocol::status::subdoc_xattr_invalid_key_combo:
1324
+ return rb_id2sym(rb_intern("xattr_invalid_key_combo"));
1325
+
1326
+ case couchbase::protocol::status::subdoc_xattr_unknown_macro:
1327
+ return rb_id2sym(rb_intern("xattr_unknown_macro"));
1328
+
1329
+ case couchbase::protocol::status::subdoc_xattr_unknown_vattr:
1330
+ return rb_id2sym(rb_intern("xattr_unknown_vattr"));
1331
+
1332
+ case couchbase::protocol::status::subdoc_xattr_cannot_modify_vattr:
1333
+ return rb_id2sym(rb_intern("xattr_cannot_modify_vattr"));
1334
+
1335
+ default:
1336
+ return rb_id2sym(rb_intern("unknown"));
1337
+ }
1338
+ }
1339
+
1340
+ static VALUE
1341
+ cb_Backend_document_lookup_in(VALUE self, VALUE bucket, VALUE collection, VALUE id, VALUE access_deleted, VALUE specs)
1342
+ {
1343
+ cb_backend_data* backend = nullptr;
1344
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
1345
+
1346
+ if (!backend->cluster) {
1347
+ rb_raise(rb_eArgError, "Cluster has been closed already");
1348
+ }
1349
+
1350
+ Check_Type(bucket, T_STRING);
1351
+ Check_Type(collection, T_STRING);
1352
+ Check_Type(id, T_STRING);
1353
+ Check_Type(specs, T_ARRAY);
1354
+ if (RARRAY_LEN(specs) <= 0) {
1355
+ rb_raise(rb_eArgError, "Array with specs cannot be empty");
1356
+ }
1357
+
1358
+ couchbase::document_id doc_id;
1359
+ doc_id.bucket.assign(RSTRING_PTR(bucket), static_cast<size_t>(RSTRING_LEN(bucket)));
1360
+ doc_id.collection.assign(RSTRING_PTR(collection), static_cast<size_t>(RSTRING_LEN(collection)));
1361
+ doc_id.key.assign(RSTRING_PTR(id), static_cast<size_t>(RSTRING_LEN(id)));
1362
+
1363
+ couchbase::operations::lookup_in_request req{ doc_id };
1364
+ req.access_deleted = RTEST(access_deleted);
1365
+ auto entries_size = static_cast<size_t>(RARRAY_LEN(specs));
1366
+ req.specs.entries.reserve(entries_size);
1367
+ for (size_t i = 0; i < entries_size; ++i) {
1368
+ VALUE entry = rb_ary_entry(specs, static_cast<long>(i));
1369
+ Check_Type(entry, T_HASH);
1370
+ VALUE operation = rb_hash_aref(entry, rb_id2sym(rb_intern("opcode")));
1371
+ Check_Type(operation, T_SYMBOL);
1372
+ ID operation_id = rb_sym2id(operation);
852
1373
  couchbase::protocol::subdoc_opcode opcode;
853
- if (operation_id == rb_intern("dict_add")) {
854
- opcode = couchbase::protocol::subdoc_opcode::dict_add;
855
- } else if (operation_id == rb_intern("dict_upsert")) {
856
- opcode = couchbase::protocol::subdoc_opcode::dict_upsert;
857
- } else if (operation_id == rb_intern("remove")) {
858
- opcode = couchbase::protocol::subdoc_opcode::remove;
859
- } else if (operation_id == rb_intern("replace")) {
860
- opcode = couchbase::protocol::subdoc_opcode::replace;
861
- } else if (operation_id == rb_intern("array_push_last")) {
862
- opcode = couchbase::protocol::subdoc_opcode::array_push_last;
863
- } else if (operation_id == rb_intern("array_push_first")) {
864
- opcode = couchbase::protocol::subdoc_opcode::array_push_first;
865
- } else if (operation_id == rb_intern("array_insert")) {
866
- opcode = couchbase::protocol::subdoc_opcode::array_insert;
867
- } else if (operation_id == rb_intern("array_add_unique")) {
868
- opcode = couchbase::protocol::subdoc_opcode::array_add_unique;
869
- } else if (operation_id == rb_intern("counter")) {
870
- opcode = couchbase::protocol::subdoc_opcode::counter;
1374
+ if (operation_id == rb_intern("get") || operation_id == rb_intern("get_doc")) {
1375
+ opcode = couchbase::protocol::subdoc_opcode::get;
1376
+ } else if (operation_id == rb_intern("exists")) {
1377
+ opcode = couchbase::protocol::subdoc_opcode::exists;
1378
+ } else if (operation_id == rb_intern("count")) {
1379
+ opcode = couchbase::protocol::subdoc_opcode::get_count;
871
1380
  } else {
872
- rb_raise(rb_eArgError, "Unsupported operation for subdocument mutation: %+" PRIsVALUE, operation);
1381
+ rb_raise(rb_eArgError, "Unsupported operation for subdocument lookup");
873
1382
  }
874
1383
  bool xattr = RTEST(rb_hash_aref(entry, rb_id2sym(rb_intern("xattr"))));
875
- bool create_parents = RTEST(rb_hash_aref(entry, rb_id2sym(rb_intern("create_parents"))));
876
- bool expand_macros = RTEST(rb_hash_aref(entry, rb_id2sym(rb_intern("expand_macros"))));
877
1384
  VALUE path = rb_hash_aref(entry, rb_id2sym(rb_intern("path")));
878
1385
  Check_Type(path, T_STRING);
879
- VALUE param = rb_hash_aref(entry, rb_id2sym(rb_intern("param")));
880
- if (NIL_P(param)) {
881
- req.specs.add_spec(opcode, xattr, std::string(RSTRING_PTR(path), static_cast<size_t>(RSTRING_LEN(path))));
882
- } else if (opcode == couchbase::protocol::subdoc_opcode::counter) {
883
- Check_Type(param, T_FIXNUM);
884
- req.specs.add_spec(opcode,
885
- xattr,
886
- create_parents,
887
- expand_macros,
888
- std::string(RSTRING_PTR(path), static_cast<size_t>(RSTRING_LEN(path))),
889
- FIX2LONG(param));
890
- } else {
891
- Check_Type(param, T_STRING);
892
- req.specs.add_spec(opcode,
893
- xattr,
894
- create_parents,
895
- expand_macros,
896
- std::string(RSTRING_PTR(path), static_cast<size_t>(RSTRING_LEN(path))),
897
- std::string(RSTRING_PTR(param), static_cast<size_t>(RSTRING_LEN(param))));
898
- }
1386
+ req.specs.add_spec(opcode, xattr, std::string(RSTRING_PTR(path), static_cast<size_t>(RSTRING_LEN(path))));
1387
+ }
1388
+
1389
+ auto barrier = std::make_shared<std::promise<couchbase::operations::lookup_in_response>>();
1390
+ auto f = barrier->get_future();
1391
+ backend->cluster->execute(req, [barrier](couchbase::operations::lookup_in_response resp) mutable { barrier->set_value(resp); });
1392
+ auto resp = f.get();
1393
+ if (resp.ec) {
1394
+ cb_raise_error_code(resp.ec, fmt::format("unable fetch {}", doc_id));
1395
+ }
1396
+
1397
+ VALUE res = rb_hash_new();
1398
+ rb_hash_aset(res, rb_id2sym(rb_intern("cas")), ULL2NUM(resp.cas));
1399
+ VALUE fields = rb_ary_new_capa(static_cast<long>(resp.fields.size()));
1400
+ rb_hash_aset(res, rb_id2sym(rb_intern("fields")), fields);
1401
+ for (size_t i = 0; i < resp.fields.size(); ++i) {
1402
+ VALUE entry = rb_hash_new();
1403
+ rb_hash_aset(entry, rb_id2sym(rb_intern("exists")), resp.fields[i].exists ? Qtrue : Qfalse);
1404
+ rb_hash_aset(
1405
+ entry, rb_id2sym(rb_intern("path")), rb_str_new(resp.fields[i].path.data(), static_cast<long>(resp.fields[i].path.size())));
1406
+ rb_hash_aset(
1407
+ entry, rb_id2sym(rb_intern("value")), rb_str_new(resp.fields[i].value.data(), static_cast<long>(resp.fields[i].value.size())));
1408
+ rb_hash_aset(entry, rb_id2sym(rb_intern("status")), cb__map_subdoc_status(resp.fields[i].status));
1409
+ if (resp.fields[i].opcode == couchbase::protocol::subdoc_opcode::get && resp.fields[i].path.empty()) {
1410
+ rb_hash_aset(entry, rb_id2sym(rb_intern("type")), rb_id2sym(rb_intern("get_doc")));
1411
+ } else {
1412
+ rb_hash_aset(entry, rb_id2sym(rb_intern("type")), cb__map_subdoc_opcode(resp.fields[i].opcode));
1413
+ }
1414
+ rb_ary_store(fields, static_cast<long>(i), entry);
1415
+ }
1416
+ return res;
1417
+ }
1418
+
1419
+ static VALUE
1420
+ cb_Backend_document_mutate_in(VALUE self, VALUE bucket, VALUE collection, VALUE id, VALUE access_deleted, VALUE specs, VALUE options)
1421
+ {
1422
+ cb_backend_data* backend = nullptr;
1423
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
1424
+
1425
+ if (!backend->cluster) {
1426
+ rb_raise(rb_eArgError, "Cluster has been closed already");
1427
+ }
1428
+
1429
+ Check_Type(bucket, T_STRING);
1430
+ Check_Type(collection, T_STRING);
1431
+ Check_Type(id, T_STRING);
1432
+ Check_Type(specs, T_ARRAY);
1433
+ if (RARRAY_LEN(specs) <= 0) {
1434
+ rb_raise(rb_eArgError, "Array with specs cannot be empty");
1435
+ }
1436
+
1437
+ couchbase::document_id doc_id;
1438
+ doc_id.bucket.assign(RSTRING_PTR(bucket), static_cast<size_t>(RSTRING_LEN(bucket)));
1439
+ doc_id.collection.assign(RSTRING_PTR(collection), static_cast<size_t>(RSTRING_LEN(collection)));
1440
+ doc_id.key.assign(RSTRING_PTR(id), static_cast<size_t>(RSTRING_LEN(id)));
1441
+
1442
+ couchbase::operations::mutate_in_request req{ doc_id };
1443
+ if (!NIL_P(options)) {
1444
+ Check_Type(options, T_HASH);
1445
+ VALUE durability_level = rb_hash_aref(options, rb_id2sym(rb_intern("durability_level")));
1446
+ if (!NIL_P(durability_level)) {
1447
+ Check_Type(durability_level, T_SYMBOL);
1448
+ ID level = rb_sym2id(durability_level);
1449
+ if (level == rb_intern("none")) {
1450
+ req.durability_level = couchbase::protocol::durability_level::none;
1451
+ } else if (level == rb_intern("majority_and_persist_to_active")) {
1452
+ req.durability_level = couchbase::protocol::durability_level::majority_and_persist_to_active;
1453
+ } else if (level == rb_intern("persist_to_majority")) {
1454
+ req.durability_level = couchbase::protocol::durability_level::persist_to_majority;
1455
+ } else {
1456
+ rb_raise(rb_eArgError, "Unknown durability level");
1457
+ }
1458
+ VALUE durability_timeout = rb_hash_aref(options, rb_id2sym(rb_intern("durability_timeout")));
1459
+ if (!NIL_P(durability_timeout)) {
1460
+ Check_Type(durability_timeout, T_FIXNUM);
1461
+ req.durability_timeout = FIX2UINT(durability_timeout);
1462
+ }
1463
+ }
1464
+ }
1465
+ req.access_deleted = RTEST(access_deleted);
1466
+ auto entries_size = static_cast<size_t>(RARRAY_LEN(specs));
1467
+ req.specs.entries.reserve(entries_size);
1468
+ for (size_t i = 0; i < entries_size; ++i) {
1469
+ VALUE entry = rb_ary_entry(specs, static_cast<long>(i));
1470
+ Check_Type(entry, T_HASH);
1471
+ VALUE operation = rb_hash_aref(entry, rb_id2sym(rb_intern("opcode")));
1472
+ Check_Type(operation, T_SYMBOL);
1473
+ ID operation_id = rb_sym2id(operation);
1474
+ couchbase::protocol::subdoc_opcode opcode;
1475
+ if (operation_id == rb_intern("dict_add")) {
1476
+ opcode = couchbase::protocol::subdoc_opcode::dict_add;
1477
+ } else if (operation_id == rb_intern("dict_upsert")) {
1478
+ opcode = couchbase::protocol::subdoc_opcode::dict_upsert;
1479
+ } else if (operation_id == rb_intern("remove")) {
1480
+ opcode = couchbase::protocol::subdoc_opcode::remove;
1481
+ } else if (operation_id == rb_intern("replace")) {
1482
+ opcode = couchbase::protocol::subdoc_opcode::replace;
1483
+ } else if (operation_id == rb_intern("array_push_last")) {
1484
+ opcode = couchbase::protocol::subdoc_opcode::array_push_last;
1485
+ } else if (operation_id == rb_intern("array_push_first")) {
1486
+ opcode = couchbase::protocol::subdoc_opcode::array_push_first;
1487
+ } else if (operation_id == rb_intern("array_insert")) {
1488
+ opcode = couchbase::protocol::subdoc_opcode::array_insert;
1489
+ } else if (operation_id == rb_intern("array_add_unique")) {
1490
+ opcode = couchbase::protocol::subdoc_opcode::array_add_unique;
1491
+ } else if (operation_id == rb_intern("counter")) {
1492
+ opcode = couchbase::protocol::subdoc_opcode::counter;
1493
+ } else {
1494
+ rb_raise(rb_eArgError, "Unsupported operation for subdocument mutation: %+" PRIsVALUE, operation);
1495
+ }
1496
+ bool xattr = RTEST(rb_hash_aref(entry, rb_id2sym(rb_intern("xattr"))));
1497
+ bool create_parents = RTEST(rb_hash_aref(entry, rb_id2sym(rb_intern("create_parents"))));
1498
+ bool expand_macros = RTEST(rb_hash_aref(entry, rb_id2sym(rb_intern("expand_macros"))));
1499
+ VALUE path = rb_hash_aref(entry, rb_id2sym(rb_intern("path")));
1500
+ Check_Type(path, T_STRING);
1501
+ VALUE param = rb_hash_aref(entry, rb_id2sym(rb_intern("param")));
1502
+ if (NIL_P(param)) {
1503
+ req.specs.add_spec(opcode, xattr, std::string(RSTRING_PTR(path), static_cast<size_t>(RSTRING_LEN(path))));
1504
+ } else if (opcode == couchbase::protocol::subdoc_opcode::counter) {
1505
+ Check_Type(param, T_FIXNUM);
1506
+ req.specs.add_spec(opcode,
1507
+ xattr,
1508
+ create_parents,
1509
+ expand_macros,
1510
+ std::string(RSTRING_PTR(path), static_cast<size_t>(RSTRING_LEN(path))),
1511
+ FIX2LONG(param));
1512
+ } else {
1513
+ Check_Type(param, T_STRING);
1514
+ req.specs.add_spec(opcode,
1515
+ xattr,
1516
+ create_parents,
1517
+ expand_macros,
1518
+ std::string(RSTRING_PTR(path), static_cast<size_t>(RSTRING_LEN(path))),
1519
+ std::string(RSTRING_PTR(param), static_cast<size_t>(RSTRING_LEN(param))));
1520
+ }
1521
+ }
1522
+
1523
+ auto barrier = std::make_shared<std::promise<couchbase::operations::mutate_in_response>>();
1524
+ auto f = barrier->get_future();
1525
+ backend->cluster->execute(req, [barrier](couchbase::operations::mutate_in_response resp) mutable { barrier->set_value(resp); });
1526
+ auto resp = f.get();
1527
+ if (resp.ec) {
1528
+ cb_raise_error_code(resp.ec, fmt::format("unable to mutate {}", doc_id));
1529
+ }
1530
+
1531
+ VALUE res = cb__extract_mutation_result(resp);
1532
+ if (resp.first_error_index) {
1533
+ rb_hash_aset(res, rb_id2sym(rb_intern("first_error_index")), ULONG2NUM(resp.first_error_index.value()));
1534
+ }
1535
+ VALUE fields = rb_ary_new_capa(static_cast<long>(resp.fields.size()));
1536
+ rb_hash_aset(res, rb_id2sym(rb_intern("fields")), fields);
1537
+ for (size_t i = 0; i < resp.fields.size(); ++i) {
1538
+ VALUE entry = rb_hash_new();
1539
+ rb_hash_aset(
1540
+ entry, rb_id2sym(rb_intern("path")), rb_str_new(resp.fields[i].path.data(), static_cast<long>(resp.fields[i].path.size())));
1541
+ if (resp.fields[i].opcode == couchbase::protocol::subdoc_opcode::counter) {
1542
+ rb_hash_aset(entry, rb_id2sym(rb_intern("value")), LONG2NUM(std::stoll(resp.fields[i].value)));
1543
+ } else {
1544
+ rb_hash_aset(entry,
1545
+ rb_id2sym(rb_intern("value")),
1546
+ rb_str_new(resp.fields[i].value.data(), static_cast<long>(resp.fields[i].value.size())));
1547
+ }
1548
+ rb_hash_aset(entry, rb_id2sym(rb_intern("status")), cb__map_subdoc_status(resp.fields[i].status));
1549
+ rb_hash_aset(entry, rb_id2sym(rb_intern("type")), cb__map_subdoc_opcode(resp.fields[i].opcode));
1550
+ rb_ary_store(fields, static_cast<long>(i), entry);
1551
+ }
1552
+ return res;
1553
+ }
1554
+
1555
+ static int
1556
+ cb__for_each_named_param(VALUE key, VALUE value, VALUE arg)
1557
+ {
1558
+ auto* preq = reinterpret_cast<couchbase::operations::query_request*>(arg);
1559
+ Check_Type(key, T_STRING);
1560
+ Check_Type(value, T_STRING);
1561
+ preq->named_parameters.emplace(
1562
+ std::string_view(RSTRING_PTR(key), static_cast<std::size_t>(RSTRING_LEN(key))),
1563
+ tao::json::from_string(std::string_view(RSTRING_PTR(value), static_cast<std::size_t>(RSTRING_LEN(value)))));
1564
+ return ST_CONTINUE;
1565
+ }
1566
+
1567
+ static VALUE
1568
+ cb_Backend_document_query(VALUE self, VALUE statement, VALUE options)
1569
+ {
1570
+ cb_backend_data* backend = nullptr;
1571
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
1572
+
1573
+ if (!backend->cluster) {
1574
+ rb_raise(rb_eArgError, "Cluster has been closed already");
1575
+ }
1576
+
1577
+ Check_Type(statement, T_STRING);
1578
+ Check_Type(options, T_HASH);
1579
+
1580
+ couchbase::operations::query_request req;
1581
+ req.statement.assign(RSTRING_PTR(statement), static_cast<size_t>(RSTRING_LEN(statement)));
1582
+ VALUE client_context_id = rb_hash_aref(options, rb_id2sym(rb_intern("client_context_id")));
1583
+ if (!NIL_P(client_context_id)) {
1584
+ Check_Type(client_context_id, T_STRING);
1585
+ req.client_context_id.assign(RSTRING_PTR(client_context_id), static_cast<size_t>(RSTRING_LEN(client_context_id)));
1586
+ }
1587
+ VALUE timeout = rb_hash_aref(options, rb_id2sym(rb_intern("timeout")));
1588
+ if (!NIL_P(timeout)) {
1589
+ switch (TYPE(timeout)) {
1590
+ case T_FIXNUM:
1591
+ case T_BIGNUM:
1592
+ break;
1593
+ default:
1594
+ rb_raise(rb_eArgError, "timeout must be an Integer");
1595
+ }
1596
+ req.timeout = NUM2ULL(timeout);
1597
+ }
1598
+ VALUE adhoc = rb_hash_aref(options, rb_id2sym(rb_intern("adhoc")));
1599
+ if (!NIL_P(adhoc)) {
1600
+ req.adhoc = RTEST(adhoc);
1601
+ }
1602
+ VALUE metrics = rb_hash_aref(options, rb_id2sym(rb_intern("metrics")));
1603
+ if (!NIL_P(metrics)) {
1604
+ req.metrics = RTEST(metrics);
1605
+ }
1606
+ VALUE readonly = rb_hash_aref(options, rb_id2sym(rb_intern("readonly")));
1607
+ if (!NIL_P(readonly)) {
1608
+ req.readonly = RTEST(readonly);
1609
+ }
1610
+ VALUE scan_cap = rb_hash_aref(options, rb_id2sym(rb_intern("scan_cap")));
1611
+ if (!NIL_P(scan_cap)) {
1612
+ req.scan_cap = NUM2ULONG(scan_cap);
1613
+ }
1614
+ VALUE scan_wait = rb_hash_aref(options, rb_id2sym(rb_intern("scan_wait")));
1615
+ if (!NIL_P(scan_wait)) {
1616
+ req.scan_wait = NUM2ULONG(scan_wait);
1617
+ }
1618
+ VALUE max_parallelism = rb_hash_aref(options, rb_id2sym(rb_intern("max_parallelism")));
1619
+ if (!NIL_P(max_parallelism)) {
1620
+ req.max_parallelism = NUM2ULONG(max_parallelism);
1621
+ }
1622
+ VALUE pipeline_cap = rb_hash_aref(options, rb_id2sym(rb_intern("pipeline_cap")));
1623
+ if (!NIL_P(pipeline_cap)) {
1624
+ req.pipeline_cap = NUM2ULONG(pipeline_cap);
1625
+ }
1626
+ VALUE pipeline_batch = rb_hash_aref(options, rb_id2sym(rb_intern("pipeline_batch")));
1627
+ if (!NIL_P(pipeline_batch)) {
1628
+ req.pipeline_batch = NUM2ULONG(pipeline_batch);
1629
+ }
1630
+ VALUE profile = rb_hash_aref(options, rb_id2sym(rb_intern("profile")));
1631
+ if (!NIL_P(profile)) {
1632
+ Check_Type(profile, T_SYMBOL);
1633
+ ID mode = rb_sym2id(profile);
1634
+ if (mode == rb_intern("phases")) {
1635
+ req.profile = couchbase::operations::query_request::profile_mode::phases;
1636
+ } else if (mode == rb_intern("timings")) {
1637
+ req.profile = couchbase::operations::query_request::profile_mode::timings;
1638
+ } else if (mode == rb_intern("off")) {
1639
+ req.profile = couchbase::operations::query_request::profile_mode::off;
1640
+ }
1641
+ }
1642
+ VALUE positional_params = rb_hash_aref(options, rb_id2sym(rb_intern("positional_parameters")));
1643
+ if (!NIL_P(positional_params)) {
1644
+ Check_Type(positional_params, T_ARRAY);
1645
+ auto entries_num = static_cast<size_t>(RARRAY_LEN(positional_params));
1646
+ req.positional_parameters.reserve(entries_num);
1647
+ for (size_t i = 0; i < entries_num; ++i) {
1648
+ VALUE entry = rb_ary_entry(positional_params, static_cast<long>(i));
1649
+ Check_Type(entry, T_STRING);
1650
+ req.positional_parameters.emplace_back(
1651
+ tao::json::from_string(std::string_view(RSTRING_PTR(entry), static_cast<std::size_t>(RSTRING_LEN(entry)))));
1652
+ }
1653
+ }
1654
+ VALUE named_params = rb_hash_aref(options, rb_id2sym(rb_intern("named_parameters")));
1655
+ if (!NIL_P(named_params)) {
1656
+ Check_Type(named_params, T_HASH);
1657
+ rb_hash_foreach(named_params, INT_FUNC(cb__for_each_named_param), reinterpret_cast<VALUE>(&req));
1658
+ }
1659
+ VALUE scan_consistency = rb_hash_aref(options, rb_id2sym(rb_intern("scan_consistency")));
1660
+ if (!NIL_P(scan_consistency)) {
1661
+ Check_Type(scan_consistency, T_SYMBOL);
1662
+ ID type = rb_sym2id(scan_consistency);
1663
+ if (type == rb_intern("not_bounded")) {
1664
+ req.scan_consistency = couchbase::operations::query_request::scan_consistency_type::not_bounded;
1665
+ } else if (type == rb_intern("request_plus")) {
1666
+ req.scan_consistency = couchbase::operations::query_request::scan_consistency_type::request_plus;
1667
+ }
1668
+ }
1669
+ VALUE mutation_state = rb_hash_aref(options, rb_id2sym(rb_intern("mutation_state")));
1670
+ if (!NIL_P(mutation_state)) {
1671
+ Check_Type(mutation_state, T_ARRAY);
1672
+ auto state_size = static_cast<size_t>(RARRAY_LEN(mutation_state));
1673
+ req.mutation_state.reserve(state_size);
1674
+ for (size_t i = 0; i < state_size; ++i) {
1675
+ VALUE token = rb_ary_entry(mutation_state, static_cast<long>(i));
1676
+ Check_Type(token, T_HASH);
1677
+ VALUE bucket_name = rb_hash_aref(token, rb_id2sym(rb_intern("bucket_name")));
1678
+ Check_Type(bucket_name, T_STRING);
1679
+ VALUE partition_id = rb_hash_aref(token, rb_id2sym(rb_intern("partition_id")));
1680
+ Check_Type(partition_id, T_FIXNUM);
1681
+ VALUE partition_uuid = rb_hash_aref(token, rb_id2sym(rb_intern("partition_uuid")));
1682
+ switch (TYPE(partition_uuid)) {
1683
+ case T_FIXNUM:
1684
+ case T_BIGNUM:
1685
+ break;
1686
+ default:
1687
+ rb_raise(rb_eArgError, "partition_uuid must be an Integer");
1688
+ }
1689
+ VALUE sequence_number = rb_hash_aref(token, rb_id2sym(rb_intern("sequence_number")));
1690
+ switch (TYPE(sequence_number)) {
1691
+ case T_FIXNUM:
1692
+ case T_BIGNUM:
1693
+ break;
1694
+ default:
1695
+ rb_raise(rb_eArgError, "sequence_number must be an Integer");
1696
+ }
1697
+ req.mutation_state.emplace_back(
1698
+ couchbase::mutation_token{ NUM2ULL(partition_uuid),
1699
+ NUM2ULL(sequence_number),
1700
+ gsl::narrow_cast<std::uint16_t>(NUM2UINT(partition_id)),
1701
+ std::string(RSTRING_PTR(bucket_name), static_cast<std::size_t>(RSTRING_LEN(bucket_name))) });
1702
+ }
1703
+ }
1704
+
1705
+ VALUE raw_params = rb_hash_aref(options, rb_id2sym(rb_intern("raw_parameters")));
1706
+ if (!NIL_P(raw_params)) {
1707
+ Check_Type(raw_params, T_HASH);
1708
+ rb_hash_foreach(raw_params, INT_FUNC(cb__for_each_named_param), reinterpret_cast<VALUE>(&req));
1709
+ }
1710
+
1711
+ auto barrier = std::make_shared<std::promise<couchbase::operations::query_response>>();
1712
+ auto f = barrier->get_future();
1713
+ backend->cluster->execute_http(req, [barrier](couchbase::operations::query_response resp) mutable { barrier->set_value(resp); });
1714
+ auto resp = f.get();
1715
+ if (resp.ec) {
1716
+ if (resp.payload.meta_data.errors && !resp.payload.meta_data.errors->empty()) {
1717
+ const auto& first_error = resp.payload.meta_data.errors->front();
1718
+ cb_raise_error_code(resp.ec,
1719
+ fmt::format("unable to query: \"{}{}\" ({}: {})",
1720
+ req.statement.substr(0, 50),
1721
+ req.statement.size() > 50 ? "..." : "",
1722
+ first_error.code,
1723
+ first_error.message));
1724
+ } else {
1725
+ cb_raise_error_code(
1726
+ resp.ec, fmt::format("unable to query: \"{}{}\"", req.statement.substr(0, 50), req.statement.size() > 50 ? "..." : ""));
1727
+ }
1728
+ }
1729
+ VALUE res = rb_hash_new();
1730
+ VALUE rows = rb_ary_new_capa(static_cast<long>(resp.payload.rows.size()));
1731
+ rb_hash_aset(res, rb_id2sym(rb_intern("rows")), rows);
1732
+ for (auto& row : resp.payload.rows) {
1733
+ rb_ary_push(rows, rb_str_new(row.data(), static_cast<long>(row.size())));
1734
+ }
1735
+ VALUE meta = rb_hash_new();
1736
+ rb_hash_aset(res, rb_id2sym(rb_intern("meta")), meta);
1737
+ rb_hash_aset(meta,
1738
+ rb_id2sym(rb_intern("status")),
1739
+ rb_id2sym(rb_intern2(resp.payload.meta_data.status.data(), static_cast<long>(resp.payload.meta_data.status.size()))));
1740
+ rb_hash_aset(meta,
1741
+ rb_id2sym(rb_intern("request_id")),
1742
+ rb_str_new(resp.payload.meta_data.request_id.data(), static_cast<long>(resp.payload.meta_data.request_id.size())));
1743
+ rb_hash_aset(
1744
+ meta,
1745
+ rb_id2sym(rb_intern("client_context_id")),
1746
+ rb_str_new(resp.payload.meta_data.client_context_id.data(), static_cast<long>(resp.payload.meta_data.client_context_id.size())));
1747
+ if (resp.payload.meta_data.signature) {
1748
+ rb_hash_aset(meta,
1749
+ rb_id2sym(rb_intern("signature")),
1750
+ rb_str_new(resp.payload.meta_data.signature->data(), static_cast<long>(resp.payload.meta_data.signature->size())));
1751
+ }
1752
+ if (resp.payload.meta_data.profile) {
1753
+ rb_hash_aset(meta,
1754
+ rb_id2sym(rb_intern("profile")),
1755
+ rb_str_new(resp.payload.meta_data.profile->data(), static_cast<long>(resp.payload.meta_data.profile->size())));
1756
+ }
1757
+ metrics = rb_hash_new();
1758
+ rb_hash_aset(meta, rb_id2sym(rb_intern("metrics")), metrics);
1759
+ rb_hash_aset(metrics,
1760
+ rb_id2sym(rb_intern("elapsed_time")),
1761
+ rb_str_new(resp.payload.meta_data.metrics.elapsed_time.data(),
1762
+ static_cast<long>(resp.payload.meta_data.metrics.elapsed_time.size())));
1763
+ rb_hash_aset(metrics,
1764
+ rb_id2sym(rb_intern("execution_time")),
1765
+ rb_str_new(resp.payload.meta_data.metrics.execution_time.data(),
1766
+ static_cast<long>(resp.payload.meta_data.metrics.execution_time.size())));
1767
+ rb_hash_aset(metrics, rb_id2sym(rb_intern("result_count")), ULL2NUM(resp.payload.meta_data.metrics.result_count));
1768
+ rb_hash_aset(metrics, rb_id2sym(rb_intern("result_size")), ULL2NUM(resp.payload.meta_data.metrics.result_count));
1769
+ if (resp.payload.meta_data.metrics.sort_count) {
1770
+ rb_hash_aset(metrics, rb_id2sym(rb_intern("sort_count")), ULL2NUM(*resp.payload.meta_data.metrics.sort_count));
1771
+ }
1772
+ if (resp.payload.meta_data.metrics.mutation_count) {
1773
+ rb_hash_aset(metrics, rb_id2sym(rb_intern("mutation_count")), ULL2NUM(*resp.payload.meta_data.metrics.mutation_count));
1774
+ }
1775
+ if (resp.payload.meta_data.metrics.error_count) {
1776
+ rb_hash_aset(metrics, rb_id2sym(rb_intern("error_count")), ULL2NUM(*resp.payload.meta_data.metrics.error_count));
1777
+ }
1778
+ if (resp.payload.meta_data.metrics.warning_count) {
1779
+ rb_hash_aset(metrics, rb_id2sym(rb_intern("warning_count")), ULL2NUM(*resp.payload.meta_data.metrics.warning_count));
1780
+ }
1781
+
1782
+ return res;
1783
+ }
1784
+
1785
+ static void
1786
+ cb__generate_bucket_settings(VALUE bucket, couchbase::operations::bucket_settings& entry, bool is_create)
1787
+ {
1788
+ {
1789
+ VALUE bucket_type = rb_hash_aref(bucket, rb_id2sym(rb_intern("bucket_type")));
1790
+ Check_Type(bucket_type, T_SYMBOL);
1791
+ if (bucket_type == rb_id2sym(rb_intern("couchbase")) || bucket_type == rb_id2sym(rb_intern("membase"))) {
1792
+ entry.bucket_type = couchbase::operations::bucket_settings::bucket_type::couchbase;
1793
+ } else if (bucket_type == rb_id2sym(rb_intern("memcached"))) {
1794
+ entry.bucket_type = couchbase::operations::bucket_settings::bucket_type::memcached;
1795
+ } else if (bucket_type == rb_id2sym(rb_intern("ephemeral"))) {
1796
+ entry.bucket_type = couchbase::operations::bucket_settings::bucket_type::ephemeral;
1797
+ } else {
1798
+ rb_raise(rb_eArgError, "unknown bucket type");
1799
+ }
1800
+ }
1801
+ {
1802
+ VALUE name = rb_hash_aref(bucket, rb_id2sym(rb_intern("name")));
1803
+ Check_Type(name, T_STRING);
1804
+ entry.name.assign(RSTRING_PTR(name), static_cast<size_t>(RSTRING_LEN(name)));
1805
+ }
1806
+ {
1807
+ VALUE quota = rb_hash_aref(bucket, rb_id2sym(rb_intern("ram_quota_mb")));
1808
+ Check_Type(quota, T_FIXNUM);
1809
+ entry.ram_quota_mb = FIX2ULONG(quota);
1810
+ }
1811
+ {
1812
+ VALUE expiry = rb_hash_aref(bucket, rb_id2sym(rb_intern("max_expiry")));
1813
+ if (!NIL_P(expiry)) {
1814
+ Check_Type(expiry, T_FIXNUM);
1815
+ entry.max_expiry = FIX2UINT(expiry);
1816
+ }
1817
+ }
1818
+ {
1819
+ VALUE num_replicas = rb_hash_aref(bucket, rb_id2sym(rb_intern("num_replicas")));
1820
+ if (!NIL_P(num_replicas)) {
1821
+ Check_Type(num_replicas, T_FIXNUM);
1822
+ entry.num_replicas = FIX2UINT(num_replicas);
1823
+ }
1824
+ }
1825
+ {
1826
+ VALUE replica_indexes = rb_hash_aref(bucket, rb_id2sym(rb_intern("replica_indexes")));
1827
+ if (!NIL_P(replica_indexes)) {
1828
+ entry.replica_indexes = RTEST(replica_indexes);
1829
+ }
1830
+ }
1831
+ {
1832
+ VALUE flush_enabled = rb_hash_aref(bucket, rb_id2sym(rb_intern("flush_enabled")));
1833
+ if (!NIL_P(flush_enabled)) {
1834
+ entry.flush_enabled = RTEST(flush_enabled);
1835
+ }
1836
+ }
1837
+ {
1838
+ VALUE compression_mode = rb_hash_aref(bucket, rb_id2sym(rb_intern("compression_mode")));
1839
+ if (!NIL_P(compression_mode)) {
1840
+ Check_Type(compression_mode, T_SYMBOL);
1841
+ if (compression_mode == rb_id2sym(rb_intern("active"))) {
1842
+ entry.compression_mode = couchbase::operations::bucket_settings::compression_mode::active;
1843
+ } else if (compression_mode == rb_id2sym(rb_intern("passive"))) {
1844
+ entry.compression_mode = couchbase::operations::bucket_settings::compression_mode::passive;
1845
+ } else if (compression_mode == rb_id2sym(rb_intern("off"))) {
1846
+ entry.compression_mode = couchbase::operations::bucket_settings::compression_mode::off;
1847
+ } else {
1848
+ rb_raise(rb_eArgError, "unknown compression mode");
1849
+ }
1850
+ }
1851
+ }
1852
+ {
1853
+ VALUE ejection_policy = rb_hash_aref(bucket, rb_id2sym(rb_intern("ejection_policy")));
1854
+ if (!NIL_P(ejection_policy)) {
1855
+ Check_Type(ejection_policy, T_SYMBOL);
1856
+ if (ejection_policy == rb_id2sym(rb_intern("full"))) {
1857
+ entry.ejection_policy = couchbase::operations::bucket_settings::ejection_policy::full;
1858
+ } else if (ejection_policy == rb_id2sym(rb_intern("value_only"))) {
1859
+ entry.ejection_policy = couchbase::operations::bucket_settings::ejection_policy::value_only;
1860
+ } else {
1861
+ rb_raise(rb_eArgError, "unknown ejection policy");
1862
+ }
1863
+ }
1864
+ }
1865
+ if (is_create) {
1866
+ VALUE conflict_resolution_type = rb_hash_aref(bucket, rb_id2sym(rb_intern("conflict_resolution_type")));
1867
+ if (!NIL_P(conflict_resolution_type)) {
1868
+ Check_Type(conflict_resolution_type, T_SYMBOL);
1869
+ if (conflict_resolution_type == rb_id2sym(rb_intern("timestamp"))) {
1870
+ entry.conflict_resolution_type = couchbase::operations::bucket_settings::conflict_resolution_type::timestamp;
1871
+ } else if (conflict_resolution_type == rb_id2sym(rb_intern("sequence_number"))) {
1872
+ entry.conflict_resolution_type = couchbase::operations::bucket_settings::conflict_resolution_type::sequence_number;
1873
+ } else {
1874
+ rb_raise(rb_eArgError, "unknown conflict resolution type");
1875
+ }
1876
+ }
1877
+ }
1878
+ }
1879
+
1880
+ static VALUE
1881
+ cb_Backend_bucket_create(VALUE self, VALUE bucket_settings)
1882
+ {
1883
+ cb_backend_data* backend = nullptr;
1884
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
1885
+
1886
+ if (!backend->cluster) {
1887
+ rb_raise(rb_eArgError, "Cluster has been closed already");
1888
+ }
1889
+
1890
+ Check_Type(bucket_settings, T_HASH);
1891
+ couchbase::operations::bucket_create_request req{};
1892
+ cb__generate_bucket_settings(bucket_settings, req.bucket, true);
1893
+ auto barrier = std::make_shared<std::promise<couchbase::operations::bucket_create_response>>();
1894
+ auto f = barrier->get_future();
1895
+ backend->cluster->execute_http(req,
1896
+ [barrier](couchbase::operations::bucket_create_response resp) mutable { barrier->set_value(resp); });
1897
+ auto resp = f.get();
1898
+ if (resp.ec) {
1899
+ cb_raise_error_code(resp.ec,
1900
+ fmt::format("unable to create bucket \"{}\" on the cluster ({})", req.bucket.name, resp.error_message));
1901
+ }
1902
+
1903
+ return Qtrue;
1904
+ }
1905
+
1906
+ static VALUE
1907
+ cb_Backend_bucket_update(VALUE self, VALUE bucket_settings)
1908
+ {
1909
+ cb_backend_data* backend = nullptr;
1910
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
1911
+
1912
+ if (!backend->cluster) {
1913
+ rb_raise(rb_eArgError, "Cluster has been closed already");
1914
+ }
1915
+
1916
+ Check_Type(bucket_settings, T_HASH);
1917
+ couchbase::operations::bucket_update_request req{};
1918
+ cb__generate_bucket_settings(bucket_settings, req.bucket, false);
1919
+ auto barrier = std::make_shared<std::promise<couchbase::operations::bucket_update_response>>();
1920
+ auto f = barrier->get_future();
1921
+ backend->cluster->execute_http(req,
1922
+ [barrier](couchbase::operations::bucket_update_response resp) mutable { barrier->set_value(resp); });
1923
+ auto resp = f.get();
1924
+ if (resp.ec) {
1925
+ cb_raise_error_code(resp.ec,
1926
+ fmt::format("unable to update bucket \"{}\" on the cluster ({})", req.bucket.name, resp.error_message));
1927
+ }
1928
+
1929
+ return Qtrue;
1930
+ }
1931
+
1932
+ static VALUE
1933
+ cb_Backend_bucket_drop(VALUE self, VALUE bucket_name)
1934
+ {
1935
+ cb_backend_data* backend = nullptr;
1936
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
1937
+
1938
+ if (!backend->cluster) {
1939
+ rb_raise(rb_eArgError, "Cluster has been closed already");
1940
+ }
1941
+
1942
+ Check_Type(bucket_name, T_STRING);
1943
+
1944
+ couchbase::operations::bucket_drop_request req{};
1945
+ req.name.assign(RSTRING_PTR(bucket_name), static_cast<size_t>(RSTRING_LEN(bucket_name)));
1946
+ auto barrier = std::make_shared<std::promise<couchbase::operations::bucket_drop_response>>();
1947
+ auto f = barrier->get_future();
1948
+ backend->cluster->execute_http(req, [barrier](couchbase::operations::bucket_drop_response resp) mutable { barrier->set_value(resp); });
1949
+ auto resp = f.get();
1950
+ if (resp.ec) {
1951
+ cb_raise_error_code(resp.ec, fmt::format("unable to remove bucket \"{}\" on the cluster", req.name));
1952
+ }
1953
+
1954
+ return Qtrue;
1955
+ }
1956
+
1957
+ static VALUE
1958
+ cb_Backend_bucket_flush(VALUE self, VALUE bucket_name)
1959
+ {
1960
+ cb_backend_data* backend = nullptr;
1961
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
1962
+
1963
+ if (!backend->cluster) {
1964
+ rb_raise(rb_eArgError, "Cluster has been closed already");
1965
+ }
1966
+
1967
+ Check_Type(bucket_name, T_STRING);
1968
+
1969
+ couchbase::operations::bucket_flush_request req{};
1970
+ req.name.assign(RSTRING_PTR(bucket_name), static_cast<size_t>(RSTRING_LEN(bucket_name)));
1971
+ auto barrier = std::make_shared<std::promise<couchbase::operations::bucket_flush_response>>();
1972
+ auto f = barrier->get_future();
1973
+ backend->cluster->execute_http(req, [barrier](couchbase::operations::bucket_flush_response resp) mutable { barrier->set_value(resp); });
1974
+ auto resp = f.get();
1975
+ if (resp.ec) {
1976
+ cb_raise_error_code(resp.ec, fmt::format("unable to remove bucket \"{}\" on the cluster", req.name));
1977
+ }
1978
+
1979
+ return Qtrue;
1980
+ }
1981
+
1982
+ static void
1983
+ cb__extract_bucket_settings(const couchbase::operations::bucket_settings& entry, VALUE bucket)
1984
+ {
1985
+ switch (entry.bucket_type) {
1986
+ case couchbase::operations::bucket_settings::bucket_type::couchbase:
1987
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("bucket_type")), rb_id2sym(rb_intern("couchbase")));
1988
+ break;
1989
+ case couchbase::operations::bucket_settings::bucket_type::memcached:
1990
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("bucket_type")), rb_id2sym(rb_intern("memcached")));
1991
+ break;
1992
+ case couchbase::operations::bucket_settings::bucket_type::ephemeral:
1993
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("bucket_type")), rb_id2sym(rb_intern("ephemeral")));
1994
+ break;
1995
+ case couchbase::operations::bucket_settings::bucket_type::unknown:
1996
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("bucket_type")), Qnil);
1997
+ break;
1998
+ }
1999
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("name")), rb_str_new(entry.name.data(), static_cast<long>(entry.name.size())));
2000
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("uuid")), rb_str_new(entry.uuid.data(), static_cast<long>(entry.uuid.size())));
2001
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("ram_quota_mb")), ULL2NUM(entry.ram_quota_mb));
2002
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("max_expiry")), ULONG2NUM(entry.max_expiry));
2003
+ switch (entry.compression_mode) {
2004
+ case couchbase::operations::bucket_settings::compression_mode::off:
2005
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("compression_mode")), rb_id2sym(rb_intern("off")));
2006
+ break;
2007
+ case couchbase::operations::bucket_settings::compression_mode::active:
2008
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("compression_mode")), rb_id2sym(rb_intern("active")));
2009
+ break;
2010
+ case couchbase::operations::bucket_settings::compression_mode::passive:
2011
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("compression_mode")), rb_id2sym(rb_intern("passive")));
2012
+ break;
2013
+ case couchbase::operations::bucket_settings::compression_mode::unknown:
2014
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("compression_mode")), Qnil);
2015
+ break;
2016
+ }
2017
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("num_replicas")), ULONG2NUM(entry.num_replicas));
2018
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("replica_indexes")), entry.replica_indexes ? Qtrue : Qfalse);
2019
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("flush_enabled")), entry.flush_enabled ? Qtrue : Qfalse);
2020
+ switch (entry.ejection_policy) {
2021
+ case couchbase::operations::bucket_settings::ejection_policy::full:
2022
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("ejection_policy")), rb_id2sym(rb_intern("full")));
2023
+ break;
2024
+ case couchbase::operations::bucket_settings::ejection_policy::value_only:
2025
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("ejection_policy")), rb_id2sym(rb_intern("value_only")));
2026
+ break;
2027
+ case couchbase::operations::bucket_settings::ejection_policy::unknown:
2028
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("ejection_policy")), Qnil);
2029
+ break;
2030
+ }
2031
+ switch (entry.conflict_resolution_type) {
2032
+ case couchbase::operations::bucket_settings::conflict_resolution_type::timestamp:
2033
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("conflict_resolution_type")), rb_id2sym(rb_intern("timestamp")));
2034
+ break;
2035
+ case couchbase::operations::bucket_settings::conflict_resolution_type::sequence_number:
2036
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("conflict_resolution_type")), rb_id2sym(rb_intern("sequence_number")));
2037
+ break;
2038
+ case couchbase::operations::bucket_settings::conflict_resolution_type::unknown:
2039
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("conflict_resolution_type")), Qnil);
2040
+ break;
2041
+ }
2042
+ VALUE capabilities = rb_ary_new_capa(static_cast<long>(entry.capabilities.size()));
2043
+ for (const auto& capa : entry.capabilities) {
2044
+ rb_ary_push(capabilities, rb_str_new(capa.data(), static_cast<long>(capa.size())));
2045
+ }
2046
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("capabilities")), capabilities);
2047
+ VALUE nodes = rb_ary_new_capa(static_cast<long>(entry.nodes.size()));
2048
+ for (const auto& n : entry.nodes) {
2049
+ VALUE node = rb_hash_new();
2050
+ rb_hash_aset(node, rb_id2sym(rb_intern("status")), rb_str_new(n.status.data(), static_cast<long>(n.status.size())));
2051
+ rb_hash_aset(node, rb_id2sym(rb_intern("hostname")), rb_str_new(n.hostname.data(), static_cast<long>(n.hostname.size())));
2052
+ rb_hash_aset(node, rb_id2sym(rb_intern("version")), rb_str_new(n.version.data(), static_cast<long>(n.version.size())));
2053
+ rb_ary_push(nodes, node);
2054
+ }
2055
+ rb_hash_aset(bucket, rb_id2sym(rb_intern("nodes")), nodes);
2056
+ }
2057
+
2058
+ static VALUE
2059
+ cb_Backend_bucket_get_all(VALUE self)
2060
+ {
2061
+ cb_backend_data* backend = nullptr;
2062
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
2063
+
2064
+ if (!backend->cluster) {
2065
+ rb_raise(rb_eArgError, "Cluster has been closed already");
2066
+ }
2067
+
2068
+ couchbase::operations::bucket_get_all_request req{};
2069
+ auto barrier = std::make_shared<std::promise<couchbase::operations::bucket_get_all_response>>();
2070
+ auto f = barrier->get_future();
2071
+ backend->cluster->execute_http(req,
2072
+ [barrier](couchbase::operations::bucket_get_all_response resp) mutable { barrier->set_value(resp); });
2073
+ auto resp = f.get();
2074
+ if (resp.ec) {
2075
+ cb_raise_error_code(resp.ec, "unable to get list of the buckets of the cluster");
2076
+ }
2077
+
2078
+ VALUE res = rb_ary_new_capa(static_cast<long>(resp.buckets.size()));
2079
+ for (const auto& entry : resp.buckets) {
2080
+ VALUE bucket = rb_hash_new();
2081
+ cb__extract_bucket_settings(entry, bucket);
2082
+ rb_ary_push(res, bucket);
2083
+ }
2084
+
2085
+ return res;
2086
+ }
2087
+
2088
+ static VALUE
2089
+ cb_Backend_bucket_get(VALUE self, VALUE bucket_name)
2090
+ {
2091
+ cb_backend_data* backend = nullptr;
2092
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
2093
+
2094
+ if (!backend->cluster) {
2095
+ rb_raise(rb_eArgError, "Cluster has been closed already");
2096
+ }
2097
+
2098
+ Check_Type(bucket_name, T_STRING);
2099
+
2100
+ couchbase::operations::bucket_get_request req{};
2101
+ req.name.assign(RSTRING_PTR(bucket_name), static_cast<size_t>(RSTRING_LEN(bucket_name)));
2102
+ auto barrier = std::make_shared<std::promise<couchbase::operations::bucket_get_response>>();
2103
+ auto f = barrier->get_future();
2104
+ backend->cluster->execute_http(req, [barrier](couchbase::operations::bucket_get_response resp) mutable { barrier->set_value(resp); });
2105
+ auto resp = f.get();
2106
+ if (resp.ec) {
2107
+ cb_raise_error_code(resp.ec, fmt::format("unable to locate bucket \"{}\" on the cluster", req.name));
2108
+ }
2109
+
2110
+ VALUE res = rb_hash_new();
2111
+ cb__extract_bucket_settings(resp.bucket, res);
2112
+
2113
+ return res;
2114
+ }
2115
+
2116
+ static VALUE
2117
+ cb_Backend_cluster_enable_developer_preview(VALUE self)
2118
+ {
2119
+ cb_backend_data* backend = nullptr;
2120
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
2121
+
2122
+ if (!backend->cluster) {
2123
+ rb_raise(rb_eArgError, "Cluster has been closed already");
2124
+ }
2125
+
2126
+ couchbase::operations::cluster_developer_preview_enable_request req{};
2127
+ auto barrier = std::make_shared<std::promise<couchbase::operations::cluster_developer_preview_enable_response>>();
2128
+ auto f = barrier->get_future();
2129
+ backend->cluster->execute_http(
2130
+ req, [barrier](couchbase::operations::cluster_developer_preview_enable_response resp) mutable { barrier->set_value(resp); });
2131
+ auto resp = f.get();
2132
+ if (resp.ec) {
2133
+ cb_raise_error_code(resp.ec, fmt::format("unable to enable developer preview for this cluster"));
2134
+ }
2135
+ spdlog::critical("Developer preview cannot be disabled once it is enabled. If you enter developer preview mode you will not be able to "
2136
+ "upgrade. DO NOT USE IN PRODUCTION.");
2137
+ return Qtrue;
2138
+ }
2139
+
2140
+ static VALUE
2141
+ cb_Backend_scope_get_all(VALUE self, VALUE bucket_name)
2142
+ {
2143
+ cb_backend_data* backend = nullptr;
2144
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
2145
+
2146
+ if (!backend->cluster) {
2147
+ rb_raise(rb_eArgError, "Cluster has been closed already");
2148
+ }
2149
+
2150
+ Check_Type(bucket_name, T_STRING);
2151
+
2152
+ couchbase::operations::scope_get_all_request req{};
2153
+ req.bucket_name.assign(RSTRING_PTR(bucket_name), static_cast<size_t>(RSTRING_LEN(bucket_name)));
2154
+ auto barrier = std::make_shared<std::promise<couchbase::operations::scope_get_all_response>>();
2155
+ auto f = barrier->get_future();
2156
+ backend->cluster->execute_http(req,
2157
+ [barrier](couchbase::operations::scope_get_all_response resp) mutable { barrier->set_value(resp); });
2158
+ auto resp = f.get();
2159
+ if (resp.ec) {
2160
+ cb_raise_error_code(resp.ec, fmt::format("unable to get list of the scopes of the bucket \"{}\"", req.bucket_name));
2161
+ }
2162
+
2163
+ VALUE res = rb_hash_new();
2164
+ rb_hash_aset(res, rb_id2sym(rb_intern("uid")), ULL2NUM(resp.manifest.uid));
2165
+ VALUE scopes = rb_ary_new_capa(static_cast<long>(resp.manifest.scopes.size()));
2166
+ for (const auto& s : resp.manifest.scopes) {
2167
+ VALUE scope = rb_hash_new();
2168
+ rb_hash_aset(scope, rb_id2sym(rb_intern("uid")), ULL2NUM(s.uid));
2169
+ rb_hash_aset(scope, rb_id2sym(rb_intern("name")), rb_str_new(s.name.data(), static_cast<long>(s.name.size())));
2170
+ VALUE collections = rb_ary_new_capa(static_cast<long>(s.collections.size()));
2171
+ for (const auto& c : s.collections) {
2172
+ VALUE collection = rb_hash_new();
2173
+ rb_hash_aset(collection, rb_id2sym(rb_intern("uid")), ULL2NUM(c.uid));
2174
+ rb_hash_aset(collection, rb_id2sym(rb_intern("name")), rb_str_new(c.name.data(), static_cast<long>(c.name.size())));
2175
+ rb_ary_push(collections, collection);
2176
+ }
2177
+ rb_hash_aset(scope, rb_id2sym(rb_intern("collections")), collections);
2178
+ rb_ary_push(scopes, scope);
2179
+ }
2180
+ rb_hash_aset(res, rb_id2sym(rb_intern("scopes")), scopes);
2181
+
2182
+ return res;
2183
+ }
2184
+
2185
+ static VALUE
2186
+ cb_Backend_scope_create(VALUE self, VALUE bucket_name, VALUE scope_name)
2187
+ {
2188
+ cb_backend_data* backend = nullptr;
2189
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
2190
+
2191
+ if (!backend->cluster) {
2192
+ rb_raise(rb_eArgError, "Cluster has been closed already");
2193
+ }
2194
+
2195
+ Check_Type(bucket_name, T_STRING);
2196
+ Check_Type(scope_name, T_STRING);
2197
+
2198
+ couchbase::operations::scope_create_request req{};
2199
+ req.bucket_name.assign(RSTRING_PTR(bucket_name), static_cast<size_t>(RSTRING_LEN(bucket_name)));
2200
+ req.scope_name.assign(RSTRING_PTR(scope_name), static_cast<size_t>(RSTRING_LEN(scope_name)));
2201
+ auto barrier = std::make_shared<std::promise<couchbase::operations::scope_create_response>>();
2202
+ auto f = barrier->get_future();
2203
+ backend->cluster->execute_http(req, [barrier](couchbase::operations::scope_create_response resp) mutable { barrier->set_value(resp); });
2204
+ auto resp = f.get();
2205
+ if (resp.ec) {
2206
+ cb_raise_error_code(resp.ec, fmt::format("unable to create the scope on the bucket \"{}\"", req.bucket_name));
2207
+ }
2208
+ return ULL2NUM(resp.uid);
2209
+ }
2210
+
2211
+ static VALUE
2212
+ cb_Backend_scope_drop(VALUE self, VALUE bucket_name, VALUE scope_name)
2213
+ {
2214
+ cb_backend_data* backend = nullptr;
2215
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
2216
+
2217
+ if (!backend->cluster) {
2218
+ rb_raise(rb_eArgError, "Cluster has been closed already");
2219
+ }
2220
+
2221
+ Check_Type(bucket_name, T_STRING);
2222
+ Check_Type(scope_name, T_STRING);
2223
+
2224
+ couchbase::operations::scope_drop_request req{};
2225
+ req.bucket_name.assign(RSTRING_PTR(bucket_name), static_cast<size_t>(RSTRING_LEN(bucket_name)));
2226
+ req.scope_name.assign(RSTRING_PTR(scope_name), static_cast<size_t>(RSTRING_LEN(scope_name)));
2227
+ auto barrier = std::make_shared<std::promise<couchbase::operations::scope_drop_response>>();
2228
+ auto f = barrier->get_future();
2229
+ backend->cluster->execute_http(req, [barrier](couchbase::operations::scope_drop_response resp) mutable { barrier->set_value(resp); });
2230
+ auto resp = f.get();
2231
+ if (resp.ec) {
2232
+ cb_raise_error_code(resp.ec, fmt::format("unable to drop the scope \"{}\" on the bucket \"{}\"", req.scope_name, req.bucket_name));
2233
+ }
2234
+ return ULL2NUM(resp.uid);
2235
+ }
2236
+
2237
+ static VALUE
2238
+ cb_Backend_collection_create(VALUE self, VALUE bucket_name, VALUE scope_name, VALUE collection_name, VALUE max_expiry)
2239
+ {
2240
+ cb_backend_data* backend = nullptr;
2241
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
2242
+
2243
+ if (!backend->cluster) {
2244
+ rb_raise(rb_eArgError, "Cluster has been closed already");
899
2245
  }
900
2246
 
901
- auto barrier = std::make_shared<std::promise<couchbase::operations::mutate_in_response>>();
2247
+ Check_Type(bucket_name, T_STRING);
2248
+ Check_Type(scope_name, T_STRING);
2249
+ Check_Type(collection_name, T_STRING);
2250
+
2251
+ couchbase::operations::collection_create_request req{};
2252
+ req.bucket_name.assign(RSTRING_PTR(bucket_name), static_cast<size_t>(RSTRING_LEN(bucket_name)));
2253
+ req.scope_name.assign(RSTRING_PTR(scope_name), static_cast<size_t>(RSTRING_LEN(scope_name)));
2254
+ req.collection_name.assign(RSTRING_PTR(collection_name), static_cast<size_t>(RSTRING_LEN(collection_name)));
2255
+
2256
+ if (!NIL_P(max_expiry)) {
2257
+ Check_Type(max_expiry, T_FIXNUM);
2258
+ req.max_expiry = FIX2UINT(max_expiry);
2259
+ }
2260
+ auto barrier = std::make_shared<std::promise<couchbase::operations::collection_create_response>>();
902
2261
  auto f = barrier->get_future();
903
- backend->cluster->execute(req, [barrier](couchbase::operations::mutate_in_response resp) mutable { barrier->set_value(resp); });
2262
+ backend->cluster->execute_http(req,
2263
+ [barrier](couchbase::operations::collection_create_response resp) mutable { barrier->set_value(resp); });
904
2264
  auto resp = f.get();
905
2265
  if (resp.ec) {
906
- cb_raise_error_code(resp.ec, fmt::format("unable to mutate {}", doc_id));
2266
+ cb_raise_error_code(resp.ec, fmt::format("unable to create the collection on the bucket \"{}\"", req.bucket_name));
907
2267
  }
2268
+ return ULL2NUM(resp.uid);
2269
+ }
908
2270
 
909
- VALUE res = rb_hash_new();
910
- rb_hash_aset(res, rb_id2sym(rb_intern("cas")), ULONG2NUM(resp.cas));
911
- if (resp.first_error_index) {
912
- rb_hash_aset(res, rb_id2sym(rb_intern("first_error_index")), ULONG2NUM(resp.first_error_index.value()));
2271
+ static VALUE
2272
+ cb_Backend_collection_drop(VALUE self, VALUE bucket_name, VALUE scope_name, VALUE collection_name)
2273
+ {
2274
+ cb_backend_data* backend = nullptr;
2275
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
2276
+
2277
+ if (!backend->cluster) {
2278
+ rb_raise(rb_eArgError, "Cluster has been closed already");
913
2279
  }
914
- VALUE fields = rb_ary_new_capa(static_cast<long>(resp.fields.size()));
915
- rb_hash_aset(res, rb_id2sym(rb_intern("fields")), fields);
916
- for (size_t i = 0; i < resp.fields.size(); ++i) {
917
- VALUE entry = rb_hash_new();
2280
+
2281
+ Check_Type(bucket_name, T_STRING);
2282
+ Check_Type(scope_name, T_STRING);
2283
+ Check_Type(collection_name, T_STRING);
2284
+
2285
+ couchbase::operations::collection_drop_request req{};
2286
+ req.bucket_name.assign(RSTRING_PTR(bucket_name), static_cast<size_t>(RSTRING_LEN(bucket_name)));
2287
+ req.scope_name.assign(RSTRING_PTR(scope_name), static_cast<size_t>(RSTRING_LEN(scope_name)));
2288
+ req.collection_name.assign(RSTRING_PTR(collection_name), static_cast<size_t>(RSTRING_LEN(collection_name)));
2289
+
2290
+ auto barrier = std::make_shared<std::promise<couchbase::operations::collection_drop_response>>();
2291
+ auto f = barrier->get_future();
2292
+ backend->cluster->execute_http(req,
2293
+ [barrier](couchbase::operations::collection_drop_response resp) mutable { barrier->set_value(resp); });
2294
+ auto resp = f.get();
2295
+ if (resp.ec) {
2296
+ cb_raise_error_code(
2297
+ resp.ec,
2298
+ fmt::format(
2299
+ R"(unable to drop the collection "{}.{}" on the bucket "{}")", req.scope_name, req.collection_name, req.bucket_name));
2300
+ }
2301
+ return ULL2NUM(resp.uid);
2302
+ }
2303
+
2304
+ static VALUE
2305
+ cb_Backend_query_index_get_all(VALUE self, VALUE bucket_name)
2306
+ {
2307
+ cb_backend_data* backend = nullptr;
2308
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
2309
+
2310
+ if (!backend->cluster) {
2311
+ rb_raise(rb_eArgError, "Cluster has been closed already");
2312
+ }
2313
+
2314
+ Check_Type(bucket_name, T_STRING);
2315
+
2316
+ couchbase::operations::query_index_get_all_request req{};
2317
+ req.bucket_name.assign(RSTRING_PTR(bucket_name), static_cast<size_t>(RSTRING_LEN(bucket_name)));
2318
+ auto barrier = std::make_shared<std::promise<couchbase::operations::query_index_get_all_response>>();
2319
+ auto f = barrier->get_future();
2320
+ backend->cluster->execute_http(
2321
+ req, [barrier](couchbase::operations::query_index_get_all_response resp) mutable { barrier->set_value(resp); });
2322
+ auto resp = f.get();
2323
+ if (resp.ec) {
2324
+ cb_raise_error_code(resp.ec, fmt::format("unable to get list of the indexes of the bucket \"{}\"", req.bucket_name));
2325
+ }
2326
+
2327
+ VALUE res = rb_hash_new();
2328
+ rb_hash_aset(res, rb_id2sym(rb_intern("status")), rb_str_new(resp.status.data(), static_cast<long>(resp.status.size())));
2329
+ VALUE indexes = rb_ary_new_capa(static_cast<long>(resp.indexes.size()));
2330
+ for (const auto& idx : resp.indexes) {
2331
+ VALUE index = rb_hash_new();
2332
+ rb_hash_aset(index, rb_id2sym(rb_intern("id")), rb_str_new(idx.id.data(), static_cast<long>(idx.id.size())));
2333
+ rb_hash_aset(index, rb_id2sym(rb_intern("state")), rb_str_new(idx.state.data(), static_cast<long>(idx.state.size())));
2334
+ rb_hash_aset(index, rb_id2sym(rb_intern("name")), rb_str_new(idx.name.data(), static_cast<long>(idx.name.size())));
918
2335
  rb_hash_aset(
919
- entry, rb_id2sym(rb_intern("path")), rb_str_new(resp.fields[i].path.data(), static_cast<long>(resp.fields[i].path.size())));
920
- if (resp.fields[i].opcode == couchbase::protocol::subdoc_opcode::counter) {
921
- rb_hash_aset(entry, rb_id2sym(rb_intern("value")), LONG2NUM(std::stoll(resp.fields[i].value)));
922
- } else {
923
- rb_hash_aset(entry,
924
- rb_id2sym(rb_intern("value")),
925
- rb_str_new(resp.fields[i].value.data(), static_cast<long>(resp.fields[i].value.size())));
2336
+ index, rb_id2sym(rb_intern("datastore_id")), rb_str_new(idx.datastore_id.data(), static_cast<long>(idx.datastore_id.size())));
2337
+ rb_hash_aset(
2338
+ index, rb_id2sym(rb_intern("keyspace_id")), rb_str_new(idx.keyspace_id.data(), static_cast<long>(idx.keyspace_id.size())));
2339
+ rb_hash_aset(
2340
+ index, rb_id2sym(rb_intern("namespace_id")), rb_str_new(idx.namespace_id.data(), static_cast<long>(idx.namespace_id.size())));
2341
+ rb_hash_aset(index, rb_id2sym(rb_intern("type")), rb_str_new(idx.type.data(), static_cast<long>(idx.type.size())));
2342
+ rb_hash_aset(index, rb_id2sym(rb_intern("is_primary")), idx.is_primary ? Qtrue : Qfalse);
2343
+ VALUE index_key = rb_ary_new_capa(static_cast<long>(idx.index_key.size()));
2344
+ for (const auto& key : idx.index_key) {
2345
+ rb_ary_push(index_key, rb_str_new(key.data(), static_cast<long>(key.size())));
926
2346
  }
927
- rb_hash_aset(entry, rb_id2sym(rb_intern("status")), cb__map_subdoc_status(resp.fields[i].status));
928
- rb_hash_aset(entry, rb_id2sym(rb_intern("type")), cb__map_subdoc_opcode(resp.fields[i].opcode));
929
- rb_ary_store(fields, static_cast<long>(i), entry);
2347
+ rb_hash_aset(index, rb_id2sym(rb_intern("index_key")), index_key);
2348
+ if (idx.condition) {
2349
+ rb_hash_aset(
2350
+ index, rb_id2sym(rb_intern("condition")), rb_str_new(idx.condition->data(), static_cast<long>(idx.condition->size())));
2351
+ }
2352
+ rb_ary_push(indexes, index);
930
2353
  }
2354
+
2355
+ rb_hash_aset(res, rb_id2sym(rb_intern("indexes")), indexes);
2356
+
931
2357
  return res;
932
2358
  }
933
2359
 
934
- static int
935
- cb__for_each_named_param(VALUE key, VALUE value, VALUE arg)
2360
+ static VALUE
2361
+ cb_Backend_query_index_create(VALUE self, VALUE bucket_name, VALUE index_name, VALUE fields, VALUE options)
936
2362
  {
937
- auto* preq = reinterpret_cast<couchbase::operations::query_request*>(arg);
938
- Check_Type(key, T_STRING);
939
- Check_Type(value, T_STRING);
940
- preq->named_parameters.emplace(
941
- std::string_view(RSTRING_PTR(key), static_cast<std::size_t>(RSTRING_LEN(key))),
942
- tao::json::from_string(std::string_view(RSTRING_PTR(value), static_cast<std::size_t>(RSTRING_LEN(value)))));
943
- return ST_CONTINUE;
2363
+ cb_backend_data* backend = nullptr;
2364
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
2365
+
2366
+ if (!backend->cluster) {
2367
+ rb_raise(rb_eArgError, "Cluster has been closed already");
2368
+ }
2369
+
2370
+ Check_Type(bucket_name, T_STRING);
2371
+ Check_Type(index_name, T_STRING);
2372
+ Check_Type(fields, T_ARRAY);
2373
+
2374
+ couchbase::operations::query_index_create_request req{};
2375
+ req.bucket_name.assign(RSTRING_PTR(bucket_name), static_cast<size_t>(RSTRING_LEN(bucket_name)));
2376
+ req.index_name.assign(RSTRING_PTR(index_name), static_cast<size_t>(RSTRING_LEN(index_name)));
2377
+ auto fields_num = static_cast<size_t>(RARRAY_LEN(fields));
2378
+ req.fields.reserve(fields_num);
2379
+ for (size_t i = 0; i < fields_num; ++i) {
2380
+ VALUE entry = rb_ary_entry(fields, static_cast<long>(i));
2381
+ Check_Type(entry, T_STRING);
2382
+ req.fields.emplace_back(RSTRING_PTR(entry), static_cast<std::size_t>(RSTRING_LEN(entry)));
2383
+ }
2384
+ if (!NIL_P(options)) {
2385
+ Check_Type(options, T_HASH);
2386
+ VALUE ignore_if_exists = rb_hash_aref(options, rb_id2sym(rb_intern("ignore_if_exists")));
2387
+ if (ignore_if_exists == Qtrue) {
2388
+ req.ignore_if_exists = true;
2389
+ } else if (ignore_if_exists == Qfalse) {
2390
+ req.ignore_if_exists = false;
2391
+ } /* else use backend default */
2392
+ VALUE deferred = rb_hash_aref(options, rb_id2sym(rb_intern("deferred")));
2393
+ if (deferred == Qtrue) {
2394
+ req.deferred = true;
2395
+ } else if (deferred == Qfalse) {
2396
+ req.deferred = false;
2397
+ } /* else use backend default */
2398
+ VALUE num_replicas = rb_hash_aref(options, rb_id2sym(rb_intern("num_replicas")));
2399
+ if (!NIL_P(num_replicas)) {
2400
+ req.num_replicas = NUM2UINT(num_replicas);
2401
+ } /* else use backend default */
2402
+ VALUE condition = rb_hash_aref(options, rb_id2sym(rb_intern("condition")));
2403
+ if (!NIL_P(condition)) {
2404
+ req.condition.emplace(std::string(RSTRING_PTR(condition), static_cast<std::size_t>(RSTRING_LEN(condition))));
2405
+ } /* else use backend default */
2406
+ }
2407
+
2408
+ auto barrier = std::make_shared<std::promise<couchbase::operations::query_index_create_response>>();
2409
+ auto f = barrier->get_future();
2410
+ backend->cluster->execute_http(
2411
+ req, [barrier](couchbase::operations::query_index_create_response resp) mutable { barrier->set_value(resp); });
2412
+ auto resp = f.get();
2413
+ if (resp.ec) {
2414
+ if (!resp.errors.empty()) {
2415
+ const auto& first_error = resp.errors.front();
2416
+ cb_raise_error_code(resp.ec,
2417
+ fmt::format(R"(unable to create index "{}" on the bucket "{}" ({}: {}))",
2418
+ req.index_name,
2419
+ req.bucket_name,
2420
+ first_error.code,
2421
+ first_error.message));
2422
+ } else {
2423
+ cb_raise_error_code(resp.ec, fmt::format(R"(unable to create index "{}" on the bucket "{}")", req.index_name, req.bucket_name));
2424
+ }
2425
+ }
2426
+ VALUE res = rb_hash_new();
2427
+ rb_hash_aset(res, rb_id2sym(rb_intern("status")), rb_str_new(resp.status.data(), static_cast<long>(resp.status.size())));
2428
+ if (!resp.errors.empty()) {
2429
+ VALUE errors = rb_ary_new_capa(static_cast<long>(resp.errors.size()));
2430
+ for (const auto& err : resp.errors) {
2431
+ VALUE error = rb_hash_new();
2432
+ rb_hash_aset(error, rb_id2sym(rb_intern("code")), ULL2NUM(err.code));
2433
+ rb_hash_aset(error, rb_id2sym(rb_intern("message")), rb_str_new(err.message.data(), static_cast<long>(err.message.size())));
2434
+ rb_ary_push(errors, error);
2435
+ }
2436
+ rb_hash_aset(res, rb_id2sym(rb_intern("errors")), errors);
2437
+ }
2438
+ return res;
944
2439
  }
945
2440
 
946
2441
  static VALUE
947
- cb_Backend_query(VALUE self, VALUE statement, VALUE options)
2442
+ cb_Backend_query_index_drop(VALUE self, VALUE bucket_name, VALUE index_name, VALUE options)
948
2443
  {
949
2444
  cb_backend_data* backend = nullptr;
950
2445
  TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
@@ -953,114 +2448,247 @@ cb_Backend_query(VALUE self, VALUE statement, VALUE options)
953
2448
  rb_raise(rb_eArgError, "Cluster has been closed already");
954
2449
  }
955
2450
 
956
- Check_Type(statement, T_STRING);
957
- Check_Type(options, T_HASH);
2451
+ Check_Type(bucket_name, T_STRING);
2452
+ Check_Type(index_name, T_STRING);
2453
+
2454
+ couchbase::operations::query_index_drop_request req{};
2455
+ req.bucket_name.assign(RSTRING_PTR(bucket_name), static_cast<size_t>(RSTRING_LEN(bucket_name)));
2456
+ req.index_name.assign(RSTRING_PTR(index_name), static_cast<size_t>(RSTRING_LEN(index_name)));
2457
+ if (!NIL_P(options)) {
2458
+ Check_Type(options, T_HASH);
2459
+ VALUE ignore_if_does_not_exist = rb_hash_aref(options, rb_id2sym(rb_intern("ignore_if_does_not_exist")));
2460
+ if (ignore_if_does_not_exist == Qtrue) {
2461
+ req.ignore_if_does_not_exist = true;
2462
+ } else if (ignore_if_does_not_exist == Qfalse) {
2463
+ req.ignore_if_does_not_exist = false;
2464
+ } /* else use backend default */
2465
+ }
958
2466
 
959
- couchbase::operations::query_request req;
960
- req.statement.assign(RSTRING_PTR(statement), static_cast<size_t>(RSTRING_LEN(statement)));
961
- VALUE adhoc = rb_hash_aref(options, rb_id2sym(rb_intern("adhoc")));
962
- if (!NIL_P(adhoc)) {
963
- req.adhoc = RTEST(adhoc);
2467
+ auto barrier = std::make_shared<std::promise<couchbase::operations::query_index_drop_response>>();
2468
+ auto f = barrier->get_future();
2469
+ backend->cluster->execute_http(req,
2470
+ [barrier](couchbase::operations::query_index_drop_response resp) mutable { barrier->set_value(resp); });
2471
+ auto resp = f.get();
2472
+ if (resp.ec) {
2473
+ if (!resp.errors.empty()) {
2474
+ const auto& first_error = resp.errors.front();
2475
+ cb_raise_error_code(resp.ec,
2476
+ fmt::format(R"(unable to drop index "{}" on the bucket "{}" ({}: {}))",
2477
+ req.index_name,
2478
+ req.bucket_name,
2479
+ first_error.code,
2480
+ first_error.message));
2481
+ } else {
2482
+ cb_raise_error_code(resp.ec, fmt::format(R"(unable to drop index "{}" on the bucket "{}")", req.index_name, req.bucket_name));
2483
+ }
964
2484
  }
965
- VALUE metrics = rb_hash_aref(options, rb_id2sym(rb_intern("metrics")));
966
- if (!NIL_P(metrics)) {
967
- req.metrics = RTEST(metrics);
2485
+ VALUE res = rb_hash_new();
2486
+ rb_hash_aset(res, rb_id2sym(rb_intern("status")), rb_str_new(resp.status.data(), static_cast<long>(resp.status.size())));
2487
+ if (!resp.errors.empty()) {
2488
+ VALUE errors = rb_ary_new_capa(static_cast<long>(resp.errors.size()));
2489
+ for (const auto& err : resp.errors) {
2490
+ VALUE error = rb_hash_new();
2491
+ rb_hash_aset(error, rb_id2sym(rb_intern("code")), ULL2NUM(err.code));
2492
+ rb_hash_aset(error, rb_id2sym(rb_intern("message")), rb_str_new(err.message.data(), static_cast<long>(err.message.size())));
2493
+ rb_ary_push(errors, error);
2494
+ }
2495
+ rb_hash_aset(res, rb_id2sym(rb_intern("errors")), errors);
968
2496
  }
969
- VALUE readonly = rb_hash_aref(options, rb_id2sym(rb_intern("readonly")));
970
- if (!NIL_P(readonly)) {
971
- req.readonly = RTEST(readonly);
2497
+ return res;
2498
+ }
2499
+
2500
+ static VALUE
2501
+ cb_Backend_query_index_create_primary(VALUE self, VALUE bucket_name, VALUE options)
2502
+ {
2503
+ cb_backend_data* backend = nullptr;
2504
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
2505
+
2506
+ if (!backend->cluster) {
2507
+ rb_raise(rb_eArgError, "Cluster has been closed already");
972
2508
  }
973
- VALUE profile = rb_hash_aref(options, rb_id2sym(rb_intern("profile")));
974
- if (!NIL_P(profile)) {
975
- Check_Type(profile, T_SYMBOL);
976
- ID mode = rb_sym2id(profile);
977
- if (mode == rb_intern("phases")) {
978
- req.profile = couchbase::operations::query_request::profile_mode::phases;
979
- } else if (mode == rb_intern("timings")) {
980
- req.profile = couchbase::operations::query_request::profile_mode::timings;
981
- } else if (mode == rb_intern("off")) {
982
- req.profile = couchbase::operations::query_request::profile_mode::off;
2509
+
2510
+ Check_Type(bucket_name, T_STRING);
2511
+ if (!NIL_P(options)) {
2512
+ Check_Type(options, T_HASH);
2513
+ }
2514
+
2515
+ couchbase::operations::query_index_create_request req{};
2516
+ req.is_primary = true;
2517
+ req.bucket_name.assign(RSTRING_PTR(bucket_name), static_cast<size_t>(RSTRING_LEN(bucket_name)));
2518
+ if (!NIL_P(options)) {
2519
+ Check_Type(options, T_HASH);
2520
+ VALUE ignore_if_exists = rb_hash_aref(options, rb_id2sym(rb_intern("ignore_if_exists")));
2521
+ if (ignore_if_exists == Qtrue) {
2522
+ req.ignore_if_exists = true;
2523
+ } else if (ignore_if_exists == Qfalse) {
2524
+ req.ignore_if_exists = false;
2525
+ } /* else use backend default */
2526
+ VALUE deferred = rb_hash_aref(options, rb_id2sym(rb_intern("deferred")));
2527
+ if (deferred == Qtrue) {
2528
+ req.deferred = true;
2529
+ } else if (deferred == Qfalse) {
2530
+ req.deferred = false;
2531
+ } /* else use backend default */
2532
+ VALUE num_replicas = rb_hash_aref(options, rb_id2sym(rb_intern("num_replicas")));
2533
+ if (!NIL_P(num_replicas)) {
2534
+ req.num_replicas = NUM2UINT(num_replicas);
2535
+ } /* else use backend default */
2536
+ VALUE index_name = rb_hash_aref(options, rb_id2sym(rb_intern("index_name")));
2537
+ if (!NIL_P(index_name)) {
2538
+ req.index_name.assign(RSTRING_PTR(index_name), static_cast<size_t>(RSTRING_LEN(index_name)));
2539
+ } /* else use backend default */
2540
+ }
2541
+
2542
+ auto barrier = std::make_shared<std::promise<couchbase::operations::query_index_create_response>>();
2543
+ auto f = barrier->get_future();
2544
+ backend->cluster->execute_http(
2545
+ req, [barrier](couchbase::operations::query_index_create_response resp) mutable { barrier->set_value(resp); });
2546
+ auto resp = f.get();
2547
+ if (resp.ec) {
2548
+ if (!resp.errors.empty()) {
2549
+ const auto& first_error = resp.errors.front();
2550
+ cb_raise_error_code(
2551
+ resp.ec,
2552
+ fmt::format(
2553
+ R"(unable to create primary index on the bucket "{}" ({}: {}))", req.bucket_name, first_error.code, first_error.message));
2554
+ } else {
2555
+ cb_raise_error_code(resp.ec,
2556
+ fmt::format(R"(unable to create primary index on the bucket "{}")", req.index_name, req.bucket_name));
983
2557
  }
984
2558
  }
985
- VALUE positional_params = rb_hash_aref(options, rb_id2sym(rb_intern("positional_parameters")));
986
- if (!NIL_P(positional_params)) {
987
- Check_Type(positional_params, T_ARRAY);
988
- auto entries_num = static_cast<size_t>(RARRAY_LEN(positional_params));
989
- req.positional_parameters.reserve(entries_num);
990
- for (size_t i = 0; i < entries_num; ++i) {
991
- VALUE entry = rb_ary_entry(positional_params, static_cast<long>(i));
992
- Check_Type(entry, T_STRING);
993
- req.positional_parameters.emplace_back(
994
- tao::json::from_string(std::string_view(RSTRING_PTR(entry), static_cast<std::size_t>(RSTRING_LEN(entry)))));
2559
+ VALUE res = rb_hash_new();
2560
+ rb_hash_aset(res, rb_id2sym(rb_intern("status")), rb_str_new(resp.status.data(), static_cast<long>(resp.status.size())));
2561
+ if (!resp.errors.empty()) {
2562
+ VALUE errors = rb_ary_new_capa(static_cast<long>(resp.errors.size()));
2563
+ for (const auto& err : resp.errors) {
2564
+ VALUE error = rb_hash_new();
2565
+ rb_hash_aset(error, rb_id2sym(rb_intern("code")), ULL2NUM(err.code));
2566
+ rb_hash_aset(error, rb_id2sym(rb_intern("message")), rb_str_new(err.message.data(), static_cast<long>(err.message.size())));
2567
+ rb_ary_push(errors, error);
995
2568
  }
2569
+ rb_hash_aset(res, rb_id2sym(rb_intern("errors")), errors);
996
2570
  }
997
- VALUE named_params = rb_hash_aref(options, rb_id2sym(rb_intern("named_parameters")));
998
- if (!NIL_P(named_params)) {
999
- Check_Type(named_params, T_HASH);
1000
- rb_hash_foreach(named_params, INT_FUNC(cb__for_each_named_param), reinterpret_cast<VALUE>(&req));
2571
+ return res;
2572
+ }
2573
+
2574
+ static VALUE
2575
+ cb_Backend_query_index_drop_primary(VALUE self, VALUE bucket_name, VALUE options)
2576
+ {
2577
+ cb_backend_data* backend = nullptr;
2578
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
2579
+
2580
+ if (!backend->cluster) {
2581
+ rb_raise(rb_eArgError, "Cluster has been closed already");
1001
2582
  }
1002
2583
 
1003
- auto barrier = std::make_shared<std::promise<couchbase::operations::query_response>>();
2584
+ Check_Type(bucket_name, T_STRING);
2585
+
2586
+ couchbase::operations::query_index_drop_request req{};
2587
+ req.is_primary = true;
2588
+ req.bucket_name.assign(RSTRING_PTR(bucket_name), static_cast<size_t>(RSTRING_LEN(bucket_name)));
2589
+ if (!NIL_P(options)) {
2590
+ Check_Type(options, T_HASH);
2591
+ VALUE ignore_if_does_not_exist = rb_hash_aref(options, rb_id2sym(rb_intern("ignore_if_does_not_exist")));
2592
+ if (ignore_if_does_not_exist == Qtrue) {
2593
+ req.ignore_if_does_not_exist = true;
2594
+ } else if (ignore_if_does_not_exist == Qfalse) {
2595
+ req.ignore_if_does_not_exist = false;
2596
+ } /* else use backend default */
2597
+ VALUE index_name = rb_hash_aref(options, rb_id2sym(rb_intern("index_name")));
2598
+ if (!NIL_P(index_name)) {
2599
+ Check_Type(options, T_STRING);
2600
+ req.is_primary = false;
2601
+ req.bucket_name.assign(RSTRING_PTR(index_name), static_cast<size_t>(RSTRING_LEN(index_name)));
2602
+ }
2603
+ }
2604
+
2605
+ auto barrier = std::make_shared<std::promise<couchbase::operations::query_index_drop_response>>();
1004
2606
  auto f = barrier->get_future();
1005
- backend->cluster->execute(req, [barrier](couchbase::operations::query_response resp) mutable { barrier->set_value(resp); });
2607
+ backend->cluster->execute_http(req,
2608
+ [barrier](couchbase::operations::query_index_drop_response resp) mutable { barrier->set_value(resp); });
1006
2609
  auto resp = f.get();
1007
2610
  if (resp.ec) {
1008
- cb_raise_error_code(resp.ec, fmt::format("unable to query: {}", req.statement.substr(0, 50)));
2611
+ if (!resp.errors.empty()) {
2612
+ const auto& first_error = resp.errors.front();
2613
+ cb_raise_error_code(
2614
+ resp.ec,
2615
+ fmt::format(
2616
+ R"(unable to drop primary index on the bucket "{}" ({}: {}))", req.bucket_name, first_error.code, first_error.message));
2617
+ } else {
2618
+ cb_raise_error_code(resp.ec, fmt::format(R"(unable to drop primary index on the bucket "{}")", req.bucket_name));
2619
+ }
1009
2620
  }
1010
2621
  VALUE res = rb_hash_new();
1011
- VALUE rows = rb_ary_new_capa(static_cast<long>(resp.payload.rows.size()));
1012
- rb_hash_aset(res, rb_id2sym(rb_intern("rows")), rows);
1013
- for (auto& row : resp.payload.rows) {
1014
- rb_ary_push(rows, rb_str_new(row.data(), static_cast<long>(row.size())));
1015
- }
1016
- VALUE meta = rb_hash_new();
1017
- rb_hash_aset(res, rb_id2sym(rb_intern("meta")), meta);
1018
- rb_hash_aset(meta,
1019
- rb_id2sym(rb_intern("status")),
1020
- rb_id2sym(rb_intern2(resp.payload.meta_data.status.data(), static_cast<long>(resp.payload.meta_data.status.size()))));
1021
- rb_hash_aset(meta,
1022
- rb_id2sym(rb_intern("request_id")),
1023
- rb_str_new(resp.payload.meta_data.request_id.data(), static_cast<long>(resp.payload.meta_data.request_id.size())));
1024
- rb_hash_aset(
1025
- meta,
1026
- rb_id2sym(rb_intern("client_context_id")),
1027
- rb_str_new(resp.payload.meta_data.client_context_id.data(), static_cast<long>(resp.payload.meta_data.client_context_id.size())));
1028
- if (resp.payload.meta_data.signature) {
1029
- rb_hash_aset(meta,
1030
- rb_id2sym(rb_intern("signature")),
1031
- rb_str_new(resp.payload.meta_data.signature->data(), static_cast<long>(resp.payload.meta_data.signature->size())));
2622
+ rb_hash_aset(res, rb_id2sym(rb_intern("status")), rb_str_new(resp.status.data(), static_cast<long>(resp.status.size())));
2623
+ if (!resp.errors.empty()) {
2624
+ VALUE errors = rb_ary_new_capa(static_cast<long>(resp.errors.size()));
2625
+ for (const auto& err : resp.errors) {
2626
+ VALUE error = rb_hash_new();
2627
+ rb_hash_aset(error, rb_id2sym(rb_intern("code")), ULL2NUM(err.code));
2628
+ rb_hash_aset(error, rb_id2sym(rb_intern("message")), rb_str_new(err.message.data(), static_cast<long>(err.message.size())));
2629
+ rb_ary_push(errors, error);
2630
+ }
2631
+ rb_hash_aset(res, rb_id2sym(rb_intern("errors")), errors);
1032
2632
  }
1033
- if (resp.payload.meta_data.profile) {
1034
- rb_hash_aset(meta,
1035
- rb_id2sym(rb_intern("profile")),
1036
- rb_str_new(resp.payload.meta_data.profile->data(), static_cast<long>(resp.payload.meta_data.profile->size())));
2633
+ return res;
2634
+ }
2635
+
2636
+ static VALUE
2637
+ cb_Backend_query_index_build_deferred(VALUE self, VALUE bucket_name, VALUE options)
2638
+ {
2639
+ cb_backend_data* backend = nullptr;
2640
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
2641
+
2642
+ if (!backend->cluster) {
2643
+ rb_raise(rb_eArgError, "Cluster has been closed already");
1037
2644
  }
1038
- metrics = rb_hash_new();
1039
- rb_hash_aset(meta, rb_id2sym(rb_intern("metrics")), metrics);
1040
- rb_hash_aset(metrics,
1041
- rb_id2sym(rb_intern("elapsed_time")),
1042
- rb_str_new(resp.payload.meta_data.metrics.elapsed_time.data(),
1043
- static_cast<long>(resp.payload.meta_data.metrics.elapsed_time.size())));
1044
- rb_hash_aset(metrics,
1045
- rb_id2sym(rb_intern("execution_time")),
1046
- rb_str_new(resp.payload.meta_data.metrics.execution_time.data(),
1047
- static_cast<long>(resp.payload.meta_data.metrics.execution_time.size())));
1048
- rb_hash_aset(metrics, rb_id2sym(rb_intern("result_count")), ULL2NUM(resp.payload.meta_data.metrics.result_count));
1049
- rb_hash_aset(metrics, rb_id2sym(rb_intern("result_size")), ULL2NUM(resp.payload.meta_data.metrics.result_count));
1050
- if (resp.payload.meta_data.metrics.sort_count) {
1051
- rb_hash_aset(metrics, rb_id2sym(rb_intern("sort_count")), ULL2NUM(*resp.payload.meta_data.metrics.sort_count));
2645
+
2646
+ Check_Type(bucket_name, T_STRING);
2647
+ if (!NIL_P(options)) {
2648
+ Check_Type(options, T_HASH);
1052
2649
  }
1053
- if (resp.payload.meta_data.metrics.mutation_count) {
1054
- rb_hash_aset(metrics, rb_id2sym(rb_intern("mutation_count")), ULL2NUM(*resp.payload.meta_data.metrics.mutation_count));
2650
+
2651
+ couchbase::operations::query_index_build_deferred_request req{};
2652
+ req.bucket_name.assign(RSTRING_PTR(bucket_name), static_cast<size_t>(RSTRING_LEN(bucket_name)));
2653
+ auto barrier = std::make_shared<std::promise<couchbase::operations::query_index_build_deferred_response>>();
2654
+ auto f = barrier->get_future();
2655
+ backend->cluster->execute_http(
2656
+ req, [barrier](couchbase::operations::query_index_build_deferred_response resp) mutable { barrier->set_value(resp); });
2657
+ auto resp = f.get();
2658
+ if (resp.ec) {
2659
+ if (!resp.errors.empty()) {
2660
+ const auto& first_error = resp.errors.front();
2661
+ cb_raise_error_code(
2662
+ resp.ec,
2663
+ fmt::format(
2664
+ R"(unable to drop primary index on the bucket "{}" ({}: {}))", req.bucket_name, first_error.code, first_error.message));
2665
+
2666
+ } else {
2667
+ cb_raise_error_code(resp.ec,
2668
+ fmt::format("unable to trigger build for deferred indexes for the bucket \"{}\"", req.bucket_name));
2669
+ }
1055
2670
  }
1056
- if (resp.payload.meta_data.metrics.error_count) {
1057
- rb_hash_aset(metrics, rb_id2sym(rb_intern("error_count")), ULL2NUM(*resp.payload.meta_data.metrics.error_count));
2671
+ return Qtrue;
2672
+ }
2673
+
2674
+ static VALUE
2675
+ cb_Backend_query_index_watch(VALUE self, VALUE bucket_name, VALUE index_names, VALUE timeout, VALUE options)
2676
+ {
2677
+ cb_backend_data* backend = nullptr;
2678
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
2679
+
2680
+ if (!backend->cluster) {
2681
+ rb_raise(rb_eArgError, "Cluster has been closed already");
1058
2682
  }
1059
- if (resp.payload.meta_data.metrics.warning_count) {
1060
- rb_hash_aset(metrics, rb_id2sym(rb_intern("warning_count")), ULL2NUM(*resp.payload.meta_data.metrics.warning_count));
2683
+
2684
+ Check_Type(bucket_name, T_STRING);
2685
+ Check_Type(index_names, T_ARRAY);
2686
+ Check_Type(timeout, T_FIXNUM);
2687
+ if (!NIL_P(options)) {
2688
+ Check_Type(options, T_HASH);
1061
2689
  }
1062
2690
 
1063
- return res;
2691
+ return Qtrue;
1064
2692
  }
1065
2693
 
1066
2694
  static void
@@ -1071,12 +2699,45 @@ init_backend(VALUE mCouchbase)
1071
2699
  rb_define_method(cBackend, "open", VALUE_FUNC(cb_Backend_open), 3);
1072
2700
  rb_define_method(cBackend, "close", VALUE_FUNC(cb_Backend_close), 0);
1073
2701
  rb_define_method(cBackend, "open_bucket", VALUE_FUNC(cb_Backend_open_bucket), 1);
1074
- rb_define_method(cBackend, "get", VALUE_FUNC(cb_Backend_get), 3);
1075
- rb_define_method(cBackend, "upsert", VALUE_FUNC(cb_Backend_upsert), 4);
1076
- rb_define_method(cBackend, "remove", VALUE_FUNC(cb_Backend_remove), 3);
1077
- rb_define_method(cBackend, "lookup_in", VALUE_FUNC(cb_Backend_lookup_in), 5);
1078
- rb_define_method(cBackend, "mutate_in", VALUE_FUNC(cb_Backend_mutate_in), 5);
1079
- rb_define_method(cBackend, "query", VALUE_FUNC(cb_Backend_query), 2);
2702
+
2703
+ rb_define_method(cBackend, "document_get", VALUE_FUNC(cb_Backend_document_get), 3);
2704
+ rb_define_method(cBackend, "document_get_and_lock", VALUE_FUNC(cb_Backend_document_get_and_lock), 4);
2705
+ rb_define_method(cBackend, "document_get_and_touch", VALUE_FUNC(cb_Backend_document_get_and_touch), 4);
2706
+ rb_define_method(cBackend, "document_insert", VALUE_FUNC(cb_Backend_document_insert), 6);
2707
+ rb_define_method(cBackend, "document_replace", VALUE_FUNC(cb_Backend_document_replace), 6);
2708
+ rb_define_method(cBackend, "document_upsert", VALUE_FUNC(cb_Backend_document_upsert), 6);
2709
+ rb_define_method(cBackend, "document_remove", VALUE_FUNC(cb_Backend_document_remove), 4);
2710
+ rb_define_method(cBackend, "document_lookup_in", VALUE_FUNC(cb_Backend_document_lookup_in), 5);
2711
+ rb_define_method(cBackend, "document_mutate_in", VALUE_FUNC(cb_Backend_document_mutate_in), 6);
2712
+ rb_define_method(cBackend, "document_query", VALUE_FUNC(cb_Backend_document_query), 2);
2713
+ rb_define_method(cBackend, "document_touch", VALUE_FUNC(cb_Backend_document_touch), 4);
2714
+ rb_define_method(cBackend, "document_exists", VALUE_FUNC(cb_Backend_document_exists), 3);
2715
+ rb_define_method(cBackend, "document_unlock", VALUE_FUNC(cb_Backend_document_unlock), 4);
2716
+ rb_define_method(cBackend, "document_increment", VALUE_FUNC(cb_Backend_document_increment), 4);
2717
+ rb_define_method(cBackend, "document_decrement", VALUE_FUNC(cb_Backend_document_decrement), 4);
2718
+
2719
+ rb_define_method(cBackend, "bucket_create", VALUE_FUNC(cb_Backend_bucket_create), 1);
2720
+ rb_define_method(cBackend, "bucket_update", VALUE_FUNC(cb_Backend_bucket_update), 1);
2721
+ rb_define_method(cBackend, "bucket_drop", VALUE_FUNC(cb_Backend_bucket_drop), 1);
2722
+ rb_define_method(cBackend, "bucket_flush", VALUE_FUNC(cb_Backend_bucket_flush), 1);
2723
+ rb_define_method(cBackend, "bucket_get_all", VALUE_FUNC(cb_Backend_bucket_get_all), 0);
2724
+ rb_define_method(cBackend, "bucket_get", VALUE_FUNC(cb_Backend_bucket_get), 1);
2725
+
2726
+ rb_define_method(cBackend, "cluster_enable_developer_preview!", VALUE_FUNC(cb_Backend_cluster_enable_developer_preview), 0);
2727
+
2728
+ rb_define_method(cBackend, "scope_get_all", VALUE_FUNC(cb_Backend_scope_get_all), 1);
2729
+ rb_define_method(cBackend, "scope_create", VALUE_FUNC(cb_Backend_scope_create), 2);
2730
+ rb_define_method(cBackend, "scope_drop", VALUE_FUNC(cb_Backend_scope_drop), 2);
2731
+ rb_define_method(cBackend, "collection_create", VALUE_FUNC(cb_Backend_collection_create), 4);
2732
+ rb_define_method(cBackend, "collection_drop", VALUE_FUNC(cb_Backend_collection_drop), 3);
2733
+
2734
+ rb_define_method(cBackend, "query_index_get_all", VALUE_FUNC(cb_Backend_query_index_get_all), 1);
2735
+ rb_define_method(cBackend, "query_index_create", VALUE_FUNC(cb_Backend_query_index_create), 4);
2736
+ rb_define_method(cBackend, "query_index_create_primary", VALUE_FUNC(cb_Backend_query_index_create_primary), 2);
2737
+ rb_define_method(cBackend, "query_index_drop", VALUE_FUNC(cb_Backend_query_index_drop), 3);
2738
+ rb_define_method(cBackend, "query_index_drop_primary", VALUE_FUNC(cb_Backend_query_index_drop_primary), 2);
2739
+ rb_define_method(cBackend, "query_index_build_deferred", VALUE_FUNC(cb_Backend_query_index_build_deferred), 2);
2740
+ rb_define_method(cBackend, "query_index_watch", VALUE_FUNC(cb_Backend_query_index_watch), 4);
1080
2741
  }
1081
2742
 
1082
2743
  extern "C" {