couchbase 3.0.0.alpha.3-universal-darwin-19 → 3.0.0.alpha.4-universal-darwin-19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. checksums.yaml +4 -4
  2. data/.github/workflows/tests-6.0.3.yml +4 -1
  3. data/.github/workflows/tests-dev-preview.yml +4 -1
  4. data/.github/workflows/tests.yml +4 -1
  5. data/README.md +1 -1
  6. data/bin/check-cluster +31 -0
  7. data/bin/init-cluster +16 -4
  8. data/examples/analytics.rb +221 -0
  9. data/examples/managing_analytics_indexes.rb +72 -0
  10. data/examples/managing_view_indexes.rb +54 -0
  11. data/examples/search_with_consistency.rb +84 -0
  12. data/examples/view.rb +50 -0
  13. data/ext/.clang-tidy +1 -0
  14. data/ext/build_version.hxx.in +1 -1
  15. data/ext/couchbase/bucket.hxx +0 -1
  16. data/ext/couchbase/couchbase.cxx +1421 -55
  17. data/ext/couchbase/io/dns_client.hxx +215 -0
  18. data/ext/couchbase/io/dns_codec.hxx +207 -0
  19. data/ext/couchbase/io/dns_config.hxx +116 -0
  20. data/ext/couchbase/io/dns_message.hxx +558 -0
  21. data/ext/couchbase/io/http_session.hxx +16 -4
  22. data/ext/couchbase/io/mcbp_session.hxx +2 -1
  23. data/ext/couchbase/mutation_token.hxx +1 -1
  24. data/ext/couchbase/operations.hxx +19 -0
  25. data/ext/couchbase/operations/analytics_dataset_create.hxx +117 -0
  26. data/ext/couchbase/operations/analytics_dataset_drop.hxx +103 -0
  27. data/ext/couchbase/operations/analytics_dataset_get_all.hxx +107 -0
  28. data/ext/couchbase/operations/analytics_dataverse_create.hxx +104 -0
  29. data/ext/couchbase/operations/analytics_dataverse_drop.hxx +104 -0
  30. data/ext/couchbase/operations/analytics_get_pending_mutations.hxx +91 -0
  31. data/ext/couchbase/operations/analytics_index_create.hxx +128 -0
  32. data/ext/couchbase/operations/analytics_index_drop.hxx +110 -0
  33. data/ext/couchbase/operations/analytics_index_get_all.hxx +106 -0
  34. data/ext/couchbase/operations/analytics_link_connect.hxx +102 -0
  35. data/ext/couchbase/operations/analytics_link_disconnect.hxx +101 -0
  36. data/ext/couchbase/operations/design_document.hxx +59 -0
  37. data/ext/couchbase/operations/document_analytics.hxx +293 -0
  38. data/ext/couchbase/operations/document_query.hxx +2 -2
  39. data/ext/couchbase/operations/document_search.hxx +19 -1
  40. data/ext/couchbase/operations/document_view.hxx +227 -0
  41. data/ext/couchbase/operations/search_index.hxx +17 -0
  42. data/ext/couchbase/operations/search_index_control_ingest.hxx +3 -1
  43. data/ext/couchbase/operations/view_index_drop.hxx +67 -0
  44. data/ext/couchbase/operations/view_index_get.hxx +90 -0
  45. data/ext/couchbase/operations/view_index_get_all.hxx +125 -0
  46. data/ext/couchbase/operations/view_index_upsert.hxx +87 -0
  47. data/ext/couchbase/service_type.hxx +38 -1
  48. data/ext/couchbase/timeout_defaults.hxx +3 -1
  49. data/ext/couchbase/utils/connection_string.hxx +231 -0
  50. data/ext/couchbase/version.hxx +1 -1
  51. data/ext/test/main.cxx +3 -12
  52. data/lib/couchbase/analytics_options.rb +165 -0
  53. data/lib/couchbase/bucket.rb +49 -0
  54. data/lib/couchbase/cluster.rb +46 -207
  55. data/lib/couchbase/management/analytics_index_manager.rb +138 -24
  56. data/lib/couchbase/management/view_index_manager.rb +63 -10
  57. data/lib/couchbase/query_options.rb +219 -0
  58. data/lib/couchbase/search_options.rb +6 -6
  59. data/lib/couchbase/version.rb +1 -1
  60. data/lib/couchbase/view_options.rb +155 -0
  61. metadata +34 -2
@@ -0,0 +1,54 @@
1
+ require "couchbase"
2
+ include Couchbase
3
+
4
+ def measure(msg)
5
+ start = Time.now
6
+ yield
7
+ printf "%s in %.2f seconds\n", msg, Time.now - start
8
+ end
9
+
10
+ def display_indexes(manager, namespace)
11
+ indexes = manager.get_all_design_documents(namespace)
12
+ puts "\"#{namespace}\" namespace of the bucket \"#{manager.bucket_name}\" contains #{indexes.size} design documents:"
13
+ indexes.each do |index|
14
+ puts " * #{index.name} (#{index.views.size} views)"
15
+ index.views.each do |name, view|
16
+ puts " - #{name}"
17
+ puts " map:\n#{view.map.strip.gsub(/^/, " | ")}" if view.has_map?
18
+ puts " reduce:\n#{view.reduce.strip.gsub(/^/, " | ")}" if view.has_reduce?
19
+ end
20
+ end
21
+ end
22
+
23
+ bucket_name = "beer-sample"
24
+
25
+ options = Cluster::ClusterOptions.new
26
+ options.authenticate("Administrator", "password")
27
+ cluster = Cluster.connect("couchbase://localhost", options)
28
+
29
+ bucket = cluster.bucket(bucket_name)
30
+ manager = bucket.view_indexes
31
+
32
+ display_indexes(manager, :production)
33
+
34
+ design_document_name = "test"
35
+ begin
36
+ manager.drop_design_document(design_document_name, :development)
37
+ rescue Error::DesignDocumentNotFound
38
+ # ignore
39
+ end
40
+
41
+ view = Management::View.new
42
+ view.map_function = "function (doc, meta) { emit(meta.id, null) }"
43
+
44
+ design_document = Management::DesignDocument.new
45
+ design_document.name = design_document_name
46
+ design_document.views["get_all"] = view
47
+
48
+ manager.upsert_design_document(design_document, :development)
49
+
50
+ display_indexes(manager, :development)
51
+
52
+ # copy design document from development namespace to production
53
+ manager.publish_design_document(design_document_name)
54
+ display_indexes(manager, :production)
@@ -0,0 +1,84 @@
1
+ require 'couchbase'
2
+
3
+ include Couchbase
4
+
5
+ options = Cluster::ClusterOptions.new
6
+ options.authenticate("Administrator", "password")
7
+ cluster = Cluster.connect("couchbase://localhost", options)
8
+
9
+ bucket_name = "default"
10
+ # create index definition, if it does not exist already
11
+ search_index_name = "knob_search"
12
+ begin
13
+ cluster.search_indexes.get_index(search_index_name)
14
+ rescue Error::IndexNotFound
15
+ index = Management::SearchIndex.new
16
+ index.type = "fulltext-index"
17
+ index.name = search_index_name
18
+ index.source_type = "couchbase"
19
+ index.source_name = bucket_name
20
+ index.params = {
21
+ mapping: {
22
+ types: {
23
+ "knob" => {
24
+ properties: {
25
+ "name" => {
26
+ fields: [
27
+ {
28
+ name: "name",
29
+ type: "text",
30
+ include_in_all: true,
31
+ include_term_vectors: true,
32
+ index: true,
33
+ store: true,
34
+ docvalues: true,
35
+ }
36
+ ]
37
+ },
38
+ }
39
+ }
40
+ }
41
+ }
42
+ }
43
+
44
+ cluster.search_indexes.upsert_index(index)
45
+
46
+ num_indexed = 0
47
+ loop do
48
+ sleep(1)
49
+ num = cluster.search_indexes.get_indexed_documents_count(search_index_name)
50
+ break if num_indexed == num
51
+ num_indexed = num
52
+ puts "indexing #{search_index_name.inspect}: #{num_indexed} documents"
53
+ end
54
+ end
55
+
56
+ collection = cluster.bucket(bucket_name).default_collection
57
+
58
+ # The application need to to know exactly which mutation affect the result,
59
+ # and supply mutation tokens from those operations.
60
+ random_string = ("a".."z").to_a.sample(10).join
61
+ res = collection.upsert("user:#{random_string}", {
62
+ "name" => "Brass Doorknob",
63
+ "email" => "brass.doorknob@example.com",
64
+ "data" => random_string,
65
+ })
66
+
67
+ state = MutationState.new(res.mutation_token)
68
+ # state.add(*tokens) could be used to add more tokens
69
+
70
+ query = Cluster::SearchQuery.term("doorknob")
71
+ options = Cluster::SearchOptions.new
72
+ options.timeout = 10_000
73
+ options.consistent_with(state)
74
+ res = cluster.search_query(search_index_name, query, options)
75
+
76
+ res.rows.each do |row|
77
+ if row.id == "user:#{random_string}"
78
+ puts "--- Found our newly created document!"
79
+ end
80
+ if ENV['REMOVE_DOOR_KNOBS']
81
+ puts "Removing #{row.id} (requested via env)"
82
+ collection.remove(row.id)
83
+ end
84
+ end
data/examples/view.rb ADDED
@@ -0,0 +1,50 @@
1
+ # coding: utf-8
2
+ #
3
+ require 'couchbase'
4
+
5
+ include Couchbase
6
+
7
+ options = Cluster::ClusterOptions.new
8
+ options.authenticate("Administrator", "password")
9
+ cluster = Cluster.connect("couchbase://localhost", options)
10
+
11
+ bucket = cluster.bucket("beer-sample")
12
+ collection = bucket.default_collection
13
+
14
+ options = Bucket::ViewOptions.new
15
+ options.reduce = true
16
+ options.group_level = 1
17
+ res = bucket.view_query("beer", "by_location", options)
18
+ puts "Breweries by country:"
19
+ res.rows.each do |row|
20
+ puts "#{row.key.first}: #{row.value} breweries"
21
+ end
22
+
23
+ options = Bucket::ViewOptions.new
24
+ options.limit = 10
25
+ options.order = :descending
26
+ res = bucket.view_query("beer", "brewery_beers", options)
27
+ puts "\nTotal documents in 'beer/brewery_beers' index: #{res.meta_data.total_rows}"
28
+ puts "Last #{options.limit} documents:"
29
+ res.rows.each_with_index do |row|
30
+ doc = collection.get(row.id)
31
+ puts "#{row.id} (type: #{doc.content['type']}, key: #{row.key})"
32
+ end
33
+
34
+
35
+ random_number = rand(0..1_000_000_000)
36
+ unique_brewery_id = "random_brewery:#{random_number}"
37
+ collection.upsert("random_brewery:#{random_number}", {
38
+ "name" => "Random brewery: #{random_number}",
39
+ "type" => "brewery"
40
+ })
41
+ puts "\nRequest with consistency. Generated brewery name: #{unique_brewery_id}"
42
+ options = Bucket::ViewOptions.new
43
+ options.start_key = ["random_brewery:"]
44
+ options.scan_consistency = :request_plus
45
+ res = bucket.view_query("beer", "brewery_beers", options)
46
+ res.rows.each do |row|
47
+ if row.id == unique_brewery_id
48
+ puts "Found newly created document: #{collection.get(row.id).content}"
49
+ end
50
+ end
data/ext/.clang-tidy CHANGED
@@ -18,4 +18,5 @@ Checks: |-
18
18
  -cppcoreguidelines-pro-bounds-pointer-arithmetic,
19
19
  -modernize-use-trailing-return-type,
20
20
  -llvmlibc-callee-namespace,
21
+ -abseil-*,
21
22
  -fuchsia-*
@@ -22,5 +22,5 @@
22
22
  #define BACKEND_C_COMPILER "@CMAKE_C_COMPILER_ID@ @CMAKE_C_COMPILER_VERSION@"
23
23
  #define BACKEND_SYSTEM "@CMAKE_SYSTEM@"
24
24
  #define BACKEND_SYSTEM_PROCESSOR "@CMAKE_SYSTEM_PROCESSOR@"
25
- #define BACKEND_GIT_REVISION "86b887957baf7230bfbe853de48b4507b9c2c18d"
25
+ #define BACKEND_GIT_REVISION "bb2110dbfe863e98cd246a07a3ae54f87a7a20b1"
26
26
 
@@ -86,7 +86,6 @@ class bucket
86
86
  }
87
87
 
88
88
  private:
89
-
90
89
  asio::io_context& ctx_;
91
90
  std::string name_;
92
91
  configuration config_;
@@ -29,6 +29,9 @@
29
29
  #include <cluster.hxx>
30
30
  #include <operations.hxx>
31
31
 
32
+ #include <io/dns_client.hxx>
33
+ #include <utils/connection_string.hxx>
34
+
32
35
  #include <ruby.h>
33
36
  #if defined(HAVE_RUBY_VERSION_H)
34
37
  #include <ruby/version.h>
@@ -989,6 +992,8 @@ cb_Backend_document_upsert(VALUE self, VALUE bucket, VALUE collection, VALUE id,
989
992
  ID level = rb_sym2id(durability_level);
990
993
  if (level == rb_intern("none")) {
991
994
  req.durability_level = couchbase::protocol::durability_level::none;
995
+ } else if (level == rb_intern("majority")) {
996
+ req.durability_level = couchbase::protocol::durability_level::majority;
992
997
  } else if (level == rb_intern("majority_and_persist_to_active")) {
993
998
  req.durability_level = couchbase::protocol::durability_level::majority_and_persist_to_active;
994
999
  } else if (level == rb_intern("persist_to_majority")) {
@@ -1060,6 +1065,8 @@ cb_Backend_document_replace(VALUE self, VALUE bucket, VALUE collection, VALUE id
1060
1065
  ID level = rb_sym2id(durability_level);
1061
1066
  if (level == rb_intern("none")) {
1062
1067
  req.durability_level = couchbase::protocol::durability_level::none;
1068
+ } else if (level == rb_intern("majority")) {
1069
+ req.durability_level = couchbase::protocol::durability_level::majority;
1063
1070
  } else if (level == rb_intern("majority_and_persist_to_active")) {
1064
1071
  req.durability_level = couchbase::protocol::durability_level::majority_and_persist_to_active;
1065
1072
  } else if (level == rb_intern("persist_to_majority")) {
@@ -1142,6 +1149,8 @@ cb_Backend_document_insert(VALUE self, VALUE bucket, VALUE collection, VALUE id,
1142
1149
  ID level = rb_sym2id(durability_level);
1143
1150
  if (level == rb_intern("none")) {
1144
1151
  req.durability_level = couchbase::protocol::durability_level::none;
1152
+ } else if (level == rb_intern("majority")) {
1153
+ req.durability_level = couchbase::protocol::durability_level::majority;
1145
1154
  } else if (level == rb_intern("majority_and_persist_to_active")) {
1146
1155
  req.durability_level = couchbase::protocol::durability_level::majority_and_persist_to_active;
1147
1156
  } else if (level == rb_intern("persist_to_majority")) {
@@ -1208,6 +1217,8 @@ cb_Backend_document_remove(VALUE self, VALUE bucket, VALUE collection, VALUE id,
1208
1217
  ID level = rb_sym2id(durability_level);
1209
1218
  if (level == rb_intern("none")) {
1210
1219
  req.durability_level = couchbase::protocol::durability_level::none;
1220
+ } else if (level == rb_intern("majority")) {
1221
+ req.durability_level = couchbase::protocol::durability_level::majority;
1211
1222
  } else if (level == rb_intern("majority_and_persist_to_active")) {
1212
1223
  req.durability_level = couchbase::protocol::durability_level::majority_and_persist_to_active;
1213
1224
  } else if (level == rb_intern("persist_to_majority")) {
@@ -1268,6 +1279,8 @@ cb_Backend_document_increment(VALUE self, VALUE bucket, VALUE collection, VALUE
1268
1279
  ID level = rb_sym2id(durability_level);
1269
1280
  if (level == rb_intern("none")) {
1270
1281
  req.durability_level = couchbase::protocol::durability_level::none;
1282
+ } else if (level == rb_intern("majority")) {
1283
+ req.durability_level = couchbase::protocol::durability_level::majority;
1271
1284
  } else if (level == rb_intern("majority_and_persist_to_active")) {
1272
1285
  req.durability_level = couchbase::protocol::durability_level::majority_and_persist_to_active;
1273
1286
  } else if (level == rb_intern("persist_to_majority")) {
@@ -1358,6 +1371,8 @@ cb_Backend_document_decrement(VALUE self, VALUE bucket, VALUE collection, VALUE
1358
1371
  ID level = rb_sym2id(durability_level);
1359
1372
  if (level == rb_intern("none")) {
1360
1373
  req.durability_level = couchbase::protocol::durability_level::none;
1374
+ } else if (level == rb_intern("majority")) {
1375
+ req.durability_level = couchbase::protocol::durability_level::majority;
1361
1376
  } else if (level == rb_intern("majority_and_persist_to_active")) {
1362
1377
  req.durability_level = couchbase::protocol::durability_level::majority_and_persist_to_active;
1363
1378
  } else if (level == rb_intern("persist_to_majority")) {
@@ -1650,6 +1665,8 @@ cb_Backend_document_mutate_in(VALUE self,
1650
1665
  ID level = rb_sym2id(durability_level);
1651
1666
  if (level == rb_intern("none")) {
1652
1667
  req.durability_level = couchbase::protocol::durability_level::none;
1668
+ } else if (level == rb_intern("majority")) {
1669
+ req.durability_level = couchbase::protocol::durability_level::majority;
1653
1670
  } else if (level == rb_intern("majority_and_persist_to_active")) {
1654
1671
  req.durability_level = couchbase::protocol::durability_level::majority_and_persist_to_active;
1655
1672
  } else if (level == rb_intern("persist_to_majority")) {
@@ -3886,68 +3903,1417 @@ cb_Backend_document_search(VALUE self, VALUE index_name, VALUE query, VALUE opti
3886
3903
  return Qnil;
3887
3904
  }
3888
3905
 
3889
- static void
3890
- init_backend(VALUE mCouchbase)
3906
+ static VALUE
3907
+ cb_Backend_dns_srv(VALUE self, VALUE hostname, VALUE service)
3891
3908
  {
3892
- VALUE cBackend = rb_define_class_under(mCouchbase, "Backend", rb_cBasicObject);
3893
- rb_define_alloc_func(cBackend, cb_Backend_allocate);
3894
- rb_define_method(cBackend, "open", VALUE_FUNC(cb_Backend_open), 3);
3895
- rb_define_method(cBackend, "close", VALUE_FUNC(cb_Backend_close), 0);
3896
- rb_define_method(cBackend, "open_bucket", VALUE_FUNC(cb_Backend_open_bucket), 1);
3909
+ (void)self;
3910
+ Check_Type(hostname, T_STRING);
3911
+ Check_Type(service, T_SYMBOL);
3897
3912
 
3898
- rb_define_method(cBackend, "document_get", VALUE_FUNC(cb_Backend_document_get), 4);
3899
- rb_define_method(cBackend, "document_get_projected", VALUE_FUNC(cb_Backend_document_get_projected), 7);
3900
- rb_define_method(cBackend, "document_get_and_lock", VALUE_FUNC(cb_Backend_document_get_and_lock), 5);
3901
- rb_define_method(cBackend, "document_get_and_touch", VALUE_FUNC(cb_Backend_document_get_and_touch), 5);
3902
- rb_define_method(cBackend, "document_insert", VALUE_FUNC(cb_Backend_document_insert), 7);
3903
- rb_define_method(cBackend, "document_replace", VALUE_FUNC(cb_Backend_document_replace), 7);
3904
- rb_define_method(cBackend, "document_upsert", VALUE_FUNC(cb_Backend_document_upsert), 7);
3905
- rb_define_method(cBackend, "document_remove", VALUE_FUNC(cb_Backend_document_remove), 5);
3906
- rb_define_method(cBackend, "document_lookup_in", VALUE_FUNC(cb_Backend_document_lookup_in), 6);
3907
- rb_define_method(cBackend, "document_mutate_in", VALUE_FUNC(cb_Backend_document_mutate_in), 7);
3908
- rb_define_method(cBackend, "document_query", VALUE_FUNC(cb_Backend_document_query), 2);
3909
- rb_define_method(cBackend, "document_touch", VALUE_FUNC(cb_Backend_document_touch), 5);
3910
- rb_define_method(cBackend, "document_exists", VALUE_FUNC(cb_Backend_document_exists), 4);
3911
- rb_define_method(cBackend, "document_unlock", VALUE_FUNC(cb_Backend_document_unlock), 5);
3912
- rb_define_method(cBackend, "document_increment", VALUE_FUNC(cb_Backend_document_increment), 5);
3913
- rb_define_method(cBackend, "document_decrement", VALUE_FUNC(cb_Backend_document_decrement), 5);
3914
- rb_define_method(cBackend, "document_search", VALUE_FUNC(cb_Backend_document_search), 3);
3913
+ bool tls = false;
3915
3914
 
3916
- rb_define_method(cBackend, "bucket_create", VALUE_FUNC(cb_Backend_bucket_create), 2);
3917
- rb_define_method(cBackend, "bucket_update", VALUE_FUNC(cb_Backend_bucket_update), 2);
3918
- rb_define_method(cBackend, "bucket_drop", VALUE_FUNC(cb_Backend_bucket_drop), 2);
3919
- rb_define_method(cBackend, "bucket_flush", VALUE_FUNC(cb_Backend_bucket_flush), 2);
3920
- rb_define_method(cBackend, "bucket_get_all", VALUE_FUNC(cb_Backend_bucket_get_all), 1);
3921
- rb_define_method(cBackend, "bucket_get", VALUE_FUNC(cb_Backend_bucket_get), 2);
3915
+ ID type = rb_sym2id(service);
3916
+ if (type == rb_intern("couchbase")) {
3917
+ tls = false;
3918
+ } else if (type == rb_intern("couchbases")) {
3919
+ tls = true;
3920
+ } else {
3921
+ rb_raise(rb_eArgError, "Unsupported service type: %+" PRIsVALUE, service);
3922
+ }
3923
+ VALUE exc = Qnil;
3924
+ do {
3925
+ asio::io_context ctx;
3922
3926
 
3923
- rb_define_method(cBackend, "cluster_enable_developer_preview!", VALUE_FUNC(cb_Backend_cluster_enable_developer_preview), 0);
3927
+ couchbase::io::dns::dns_client client(ctx);
3928
+ std::string host_name(RSTRING_PTR(hostname), static_cast<size_t>(RSTRING_LEN(hostname)));
3929
+ std::string service_name("_couchbase");
3930
+ if (tls) {
3931
+ service_name = "_couchbases";
3932
+ }
3933
+ auto barrier = std::make_shared<std::promise<couchbase::io::dns::dns_client::dns_srv_response>>();
3934
+ auto f = barrier->get_future();
3935
+ client.query_srv(
3936
+ host_name, service_name, [barrier](couchbase::io::dns::dns_client::dns_srv_response resp) mutable { barrier->set_value(resp); });
3937
+ ctx.run();
3938
+ auto resp = f.get();
3939
+ if (resp.ec) {
3940
+ exc = cb__map_error_code(resp.ec, fmt::format("DNS SRV query failure for name \"{}\" (service: {})", host_name, service_name));
3941
+ break;
3942
+ }
3924
3943
 
3925
- rb_define_method(cBackend, "scope_get_all", VALUE_FUNC(cb_Backend_scope_get_all), 2);
3926
- rb_define_method(cBackend, "scope_create", VALUE_FUNC(cb_Backend_scope_create), 3);
3927
- rb_define_method(cBackend, "scope_drop", VALUE_FUNC(cb_Backend_scope_drop), 3);
3928
- rb_define_method(cBackend, "collection_create", VALUE_FUNC(cb_Backend_collection_create), 5);
3929
- rb_define_method(cBackend, "collection_drop", VALUE_FUNC(cb_Backend_collection_drop), 4);
3944
+ VALUE res = rb_ary_new();
3945
+ for (const auto& target : resp.targets) {
3946
+ VALUE addr = rb_hash_new();
3947
+ rb_hash_aset(
3948
+ addr, rb_id2sym(rb_intern("hostname")), rb_str_new(target.hostname.data(), static_cast<long>(target.hostname.size())));
3949
+ rb_hash_aset(addr, rb_id2sym(rb_intern("port")), UINT2NUM(target.port));
3950
+ rb_ary_push(res, addr);
3951
+ }
3952
+ return res;
3953
+ } while (false);
3954
+ rb_exc_raise(exc);
3955
+ return Qnil;
3956
+ }
3930
3957
 
3931
- rb_define_method(cBackend, "query_index_get_all", VALUE_FUNC(cb_Backend_query_index_get_all), 2);
3932
- rb_define_method(cBackend, "query_index_create", VALUE_FUNC(cb_Backend_query_index_create), 5);
3933
- rb_define_method(cBackend, "query_index_create_primary", VALUE_FUNC(cb_Backend_query_index_create_primary), 3);
3934
- rb_define_method(cBackend, "query_index_drop", VALUE_FUNC(cb_Backend_query_index_drop), 4);
3935
- rb_define_method(cBackend, "query_index_drop_primary", VALUE_FUNC(cb_Backend_query_index_drop_primary), 3);
3936
- rb_define_method(cBackend, "query_index_build_deferred", VALUE_FUNC(cb_Backend_query_index_build_deferred), 2);
3937
- rb_define_method(cBackend, "query_index_watch", VALUE_FUNC(cb_Backend_query_index_watch), 4);
3958
+ static VALUE
3959
+ cb_Backend_analytics_get_pending_mutations(VALUE self, VALUE timeout)
3960
+ {
3961
+ cb_backend_data* backend = nullptr;
3962
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
3938
3963
 
3939
- rb_define_method(cBackend, "search_index_get_all", VALUE_FUNC(cb_Backend_search_index_get_all), 1);
3940
- rb_define_method(cBackend, "search_index_get", VALUE_FUNC(cb_Backend_search_index_get), 2);
3941
- rb_define_method(cBackend, "search_index_upsert", VALUE_FUNC(cb_Backend_search_index_upsert), 2);
3942
- rb_define_method(cBackend, "search_index_drop", VALUE_FUNC(cb_Backend_search_index_drop), 2);
3943
- rb_define_method(cBackend, "search_index_get_documents_count", VALUE_FUNC(cb_Backend_search_index_get_documents_count), 2);
3944
- rb_define_method(cBackend, "search_index_pause_ingest", VALUE_FUNC(cb_Backend_search_index_pause_ingest), 2);
3945
- rb_define_method(cBackend, "search_index_resume_ingest", VALUE_FUNC(cb_Backend_search_index_resume_ingest), 2);
3946
- rb_define_method(cBackend, "search_index_allow_querying", VALUE_FUNC(cb_Backend_search_index_allow_querying), 2);
3947
- rb_define_method(cBackend, "search_index_disallow_querying", VALUE_FUNC(cb_Backend_search_index_disallow_querying), 2);
3948
- rb_define_method(cBackend, "search_index_freeze_plan", VALUE_FUNC(cb_Backend_search_index_freeze_plan), 2);
3949
- rb_define_method(cBackend, "search_index_unfreeze_plan", VALUE_FUNC(cb_Backend_search_index_unfreeze_plan), 2);
3950
- rb_define_method(cBackend, "search_index_analyze_document", VALUE_FUNC(cb_Backend_search_index_analyze_document), 3);
3964
+ if (!backend->cluster) {
3965
+ rb_raise(rb_eArgError, "Cluster has been closed already");
3966
+ }
3967
+
3968
+ VALUE exc = Qnil;
3969
+ do {
3970
+ couchbase::operations::analytics_get_pending_mutations_request req{};
3971
+ cb__extract_timeout(req, timeout);
3972
+ auto barrier = std::make_shared<std::promise<couchbase::operations::analytics_get_pending_mutations_response>>();
3973
+ auto f = barrier->get_future();
3974
+ backend->cluster->execute_http(
3975
+ req, [barrier](couchbase::operations::analytics_get_pending_mutations_response resp) mutable { barrier->set_value(resp); });
3976
+ auto resp = f.get();
3977
+ if (resp.ec) {
3978
+ if (resp.errors.empty()) {
3979
+ exc = cb__map_error_code(resp.ec, "unable to get pending mutations for the analytics service");
3980
+ } else {
3981
+ const auto& first_error = resp.errors.front();
3982
+ exc = cb__map_error_code(
3983
+ resp.ec,
3984
+ fmt::format("unable to get pending mutations for the analytics service ({}: {})", first_error.code, first_error.message));
3985
+ }
3986
+ break;
3987
+ }
3988
+ VALUE res = rb_hash_new();
3989
+ for (const auto& entry : resp.stats) {
3990
+ rb_hash_aset(res, rb_str_new(entry.first.data(), static_cast<long>(entry.first.size())), ULL2NUM(entry.second));
3991
+ }
3992
+ return res;
3993
+ } while (false);
3994
+ rb_exc_raise(exc);
3995
+ return Qnil;
3996
+ }
3997
+
3998
+ static VALUE
3999
+ cb_Backend_analytics_dataset_get_all(VALUE self, VALUE timeout)
4000
+ {
4001
+ cb_backend_data* backend = nullptr;
4002
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
4003
+
4004
+ if (!backend->cluster) {
4005
+ rb_raise(rb_eArgError, "Cluster has been closed already");
4006
+ }
4007
+
4008
+ VALUE exc = Qnil;
4009
+ do {
4010
+ couchbase::operations::analytics_dataset_get_all_request req{};
4011
+ cb__extract_timeout(req, timeout);
4012
+ auto barrier = std::make_shared<std::promise<couchbase::operations::analytics_dataset_get_all_response>>();
4013
+ auto f = barrier->get_future();
4014
+ backend->cluster->execute_http(
4015
+ req, [barrier](couchbase::operations::analytics_dataset_get_all_response resp) mutable { barrier->set_value(resp); });
4016
+ auto resp = f.get();
4017
+ if (resp.ec) {
4018
+ if (resp.errors.empty()) {
4019
+ exc = cb__map_error_code(resp.ec, "unable to fetch all datasets");
4020
+ } else {
4021
+ const auto& first_error = resp.errors.front();
4022
+ exc =
4023
+ cb__map_error_code(resp.ec, fmt::format("unable to fetch all datasets ({}: {})", first_error.code, first_error.message));
4024
+ }
4025
+ break;
4026
+ }
4027
+ VALUE res = rb_ary_new_capa(static_cast<long>(resp.datasets.size()));
4028
+ for (const auto& ds : resp.datasets) {
4029
+ VALUE dataset = rb_hash_new();
4030
+ rb_hash_aset(dataset, rb_id2sym(rb_intern("name")), rb_str_new(ds.name.data(), static_cast<long>(ds.name.size())));
4031
+ rb_hash_aset(dataset,
4032
+ rb_id2sym(rb_intern("dataverse_name")),
4033
+ rb_str_new(ds.dataverse_name.data(), static_cast<long>(ds.dataverse_name.size())));
4034
+ rb_hash_aset(
4035
+ dataset, rb_id2sym(rb_intern("link_name")), rb_str_new(ds.link_name.data(), static_cast<long>(ds.link_name.size())));
4036
+ rb_hash_aset(
4037
+ dataset, rb_id2sym(rb_intern("bucket_name")), rb_str_new(ds.bucket_name.data(), static_cast<long>(ds.bucket_name.size())));
4038
+ rb_ary_push(res, dataset);
4039
+ }
4040
+ return res;
4041
+ } while (false);
4042
+ rb_exc_raise(exc);
4043
+ return Qnil;
4044
+ }
4045
+
4046
+ static VALUE
4047
+ cb_Backend_analytics_dataset_drop(VALUE self, VALUE dataset_name, VALUE dataverse_name, VALUE ignore_if_does_not_exist, VALUE timeout)
4048
+ {
4049
+ cb_backend_data* backend = nullptr;
4050
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
4051
+
4052
+ if (!backend->cluster) {
4053
+ rb_raise(rb_eArgError, "Cluster has been closed already");
4054
+ }
4055
+
4056
+ Check_Type(dataset_name, T_STRING);
4057
+ if (!NIL_P(dataverse_name)) {
4058
+ Check_Type(dataverse_name, T_STRING);
4059
+ }
4060
+
4061
+ VALUE exc = Qnil;
4062
+ do {
4063
+ couchbase::operations::analytics_dataset_drop_request req{};
4064
+ cb__extract_timeout(req, timeout);
4065
+ req.dataset_name.assign(RSTRING_PTR(dataset_name), static_cast<size_t>(RSTRING_LEN(dataset_name)));
4066
+ if (!NIL_P(dataverse_name)) {
4067
+ req.dataverse_name.assign(RSTRING_PTR(dataverse_name), static_cast<size_t>(RSTRING_LEN(dataverse_name)));
4068
+ }
4069
+ if (!NIL_P(ignore_if_does_not_exist)) {
4070
+ req.ignore_if_does_not_exist = RTEST(ignore_if_does_not_exist);
4071
+ }
4072
+ auto barrier = std::make_shared<std::promise<couchbase::operations::analytics_dataset_drop_response>>();
4073
+ auto f = barrier->get_future();
4074
+ backend->cluster->execute_http(
4075
+ req, [barrier](couchbase::operations::analytics_dataset_drop_response resp) mutable { barrier->set_value(resp); });
4076
+ auto resp = f.get();
4077
+ if (resp.ec) {
4078
+ if (resp.errors.empty()) {
4079
+ exc = cb__map_error_code(resp.ec, fmt::format("unable to drop dataset `{}`.`{}`", req.dataverse_name, req.dataset_name));
4080
+ } else {
4081
+ const auto& first_error = resp.errors.front();
4082
+ exc = cb__map_error_code(resp.ec,
4083
+ fmt::format("unable to drop dataset `{}`.`{}` ({}: {})",
4084
+ req.dataverse_name,
4085
+ req.dataset_name,
4086
+ first_error.code,
4087
+ first_error.message));
4088
+ }
4089
+ break;
4090
+ }
4091
+ return Qtrue;
4092
+ } while (false);
4093
+ rb_exc_raise(exc);
4094
+ return Qnil;
4095
+ }
4096
+
4097
+ static VALUE
4098
+ cb_Backend_analytics_dataset_create(VALUE self,
4099
+ VALUE dataset_name,
4100
+ VALUE bucket_name,
4101
+ VALUE condition,
4102
+ VALUE dataverse_name,
4103
+ VALUE ignore_if_exists,
4104
+ VALUE timeout)
4105
+ {
4106
+ cb_backend_data* backend = nullptr;
4107
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
4108
+
4109
+ if (!backend->cluster) {
4110
+ rb_raise(rb_eArgError, "Cluster has been closed already");
4111
+ }
4112
+
4113
+ Check_Type(dataset_name, T_STRING);
4114
+ Check_Type(bucket_name, T_STRING);
4115
+ if (!NIL_P(condition)) {
4116
+ Check_Type(condition, T_STRING);
4117
+ }
4118
+ if (!NIL_P(dataverse_name)) {
4119
+ Check_Type(dataverse_name, T_STRING);
4120
+ }
4121
+
4122
+ VALUE exc = Qnil;
4123
+ do {
4124
+ couchbase::operations::analytics_dataset_create_request req{};
4125
+ cb__extract_timeout(req, timeout);
4126
+ req.dataset_name.assign(RSTRING_PTR(dataset_name), static_cast<size_t>(RSTRING_LEN(dataset_name)));
4127
+ req.bucket_name.assign(RSTRING_PTR(bucket_name), static_cast<size_t>(RSTRING_LEN(bucket_name)));
4128
+ if (!NIL_P(condition)) {
4129
+ req.condition.emplace(std::string(RSTRING_PTR(condition), static_cast<size_t>(RSTRING_LEN(condition))));
4130
+ }
4131
+ if (!NIL_P(dataverse_name)) {
4132
+ req.dataverse_name.assign(RSTRING_PTR(dataverse_name), static_cast<size_t>(RSTRING_LEN(dataverse_name)));
4133
+ }
4134
+ if (!NIL_P(ignore_if_exists)) {
4135
+ req.ignore_if_exists = RTEST(ignore_if_exists);
4136
+ }
4137
+ auto barrier = std::make_shared<std::promise<couchbase::operations::analytics_dataset_create_response>>();
4138
+ auto f = barrier->get_future();
4139
+ backend->cluster->execute_http(
4140
+ req, [barrier](couchbase::operations::analytics_dataset_create_response resp) mutable { barrier->set_value(resp); });
4141
+ auto resp = f.get();
4142
+ if (resp.ec) {
4143
+ if (resp.errors.empty()) {
4144
+ exc = cb__map_error_code(resp.ec, fmt::format("unable to create dataset `{}`.`{}`", req.dataverse_name, req.dataset_name));
4145
+ } else {
4146
+ const auto& first_error = resp.errors.front();
4147
+ exc = cb__map_error_code(resp.ec,
4148
+ fmt::format("unable to create dataset `{}`.`{}` ({}: {})",
4149
+ req.dataverse_name,
4150
+ req.dataset_name,
4151
+ first_error.code,
4152
+ first_error.message));
4153
+ }
4154
+ break;
4155
+ }
4156
+ return Qtrue;
4157
+ } while (false);
4158
+ rb_exc_raise(exc);
4159
+ return Qnil;
4160
+ }
4161
+
4162
+ static VALUE
4163
+ cb_Backend_analytics_dataverse_drop(VALUE self, VALUE dataverse_name, VALUE ignore_if_does_not_exist, VALUE timeout)
4164
+ {
4165
+ cb_backend_data* backend = nullptr;
4166
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
4167
+
4168
+ if (!backend->cluster) {
4169
+ rb_raise(rb_eArgError, "Cluster has been closed already");
4170
+ }
4171
+
4172
+ Check_Type(dataverse_name, T_STRING);
4173
+
4174
+ VALUE exc = Qnil;
4175
+ do {
4176
+ couchbase::operations::analytics_dataverse_drop_request req{};
4177
+ cb__extract_timeout(req, timeout);
4178
+ req.dataverse_name.assign(RSTRING_PTR(dataverse_name), static_cast<size_t>(RSTRING_LEN(dataverse_name)));
4179
+ if (!NIL_P(ignore_if_does_not_exist)) {
4180
+ req.ignore_if_does_not_exist = RTEST(ignore_if_does_not_exist);
4181
+ }
4182
+ auto barrier = std::make_shared<std::promise<couchbase::operations::analytics_dataverse_drop_response>>();
4183
+ auto f = barrier->get_future();
4184
+ backend->cluster->execute_http(
4185
+ req, [barrier](couchbase::operations::analytics_dataverse_drop_response resp) mutable { barrier->set_value(resp); });
4186
+ auto resp = f.get();
4187
+ if (resp.ec) {
4188
+ if (resp.errors.empty()) {
4189
+ exc = cb__map_error_code(resp.ec, fmt::format("unable to drop dataverse `{}`", req.dataverse_name));
4190
+ } else {
4191
+ const auto& first_error = resp.errors.front();
4192
+ exc = cb__map_error_code(
4193
+ resp.ec,
4194
+ fmt::format("unable to drop dataverse `{}` ({}: {})", req.dataverse_name, first_error.code, first_error.message));
4195
+ }
4196
+ break;
4197
+ }
4198
+ return Qtrue;
4199
+ } while (false);
4200
+ rb_exc_raise(exc);
4201
+ return Qnil;
4202
+ }
4203
+
4204
+ static VALUE
4205
+ cb_Backend_analytics_dataverse_create(VALUE self, VALUE dataverse_name, VALUE ignore_if_exists, VALUE timeout)
4206
+ {
4207
+ cb_backend_data* backend = nullptr;
4208
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
4209
+
4210
+ if (!backend->cluster) {
4211
+ rb_raise(rb_eArgError, "Cluster has been closed already");
4212
+ }
4213
+
4214
+ Check_Type(dataverse_name, T_STRING);
4215
+ if (!NIL_P(dataverse_name)) {
4216
+ Check_Type(dataverse_name, T_STRING);
4217
+ }
4218
+
4219
+ VALUE exc = Qnil;
4220
+ do {
4221
+ couchbase::operations::analytics_dataverse_create_request req{};
4222
+ cb__extract_timeout(req, timeout);
4223
+ req.dataverse_name.assign(RSTRING_PTR(dataverse_name), static_cast<size_t>(RSTRING_LEN(dataverse_name)));
4224
+ if (!NIL_P(ignore_if_exists)) {
4225
+ req.ignore_if_exists = RTEST(ignore_if_exists);
4226
+ }
4227
+ auto barrier = std::make_shared<std::promise<couchbase::operations::analytics_dataverse_create_response>>();
4228
+ auto f = barrier->get_future();
4229
+ backend->cluster->execute_http(
4230
+ req, [barrier](couchbase::operations::analytics_dataverse_create_response resp) mutable { barrier->set_value(resp); });
4231
+ auto resp = f.get();
4232
+ if (resp.ec) {
4233
+ if (resp.errors.empty()) {
4234
+ exc = cb__map_error_code(resp.ec, fmt::format("unable to create dataverse `{}`", req.dataverse_name));
4235
+ } else {
4236
+ const auto& first_error = resp.errors.front();
4237
+ exc = cb__map_error_code(
4238
+ resp.ec,
4239
+ fmt::format("unable to create dataverse `{}` ({}: {})", req.dataverse_name, first_error.code, first_error.message));
4240
+ }
4241
+ break;
4242
+ }
4243
+ return Qtrue;
4244
+ } while (false);
4245
+ rb_exc_raise(exc);
4246
+ return Qnil;
4247
+ }
4248
+
4249
+ static VALUE
4250
+ cb_Backend_analytics_index_get_all(VALUE self, VALUE timeout)
4251
+ {
4252
+ cb_backend_data* backend = nullptr;
4253
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
4254
+
4255
+ if (!backend->cluster) {
4256
+ rb_raise(rb_eArgError, "Cluster has been closed already");
4257
+ }
4258
+
4259
+ VALUE exc = Qnil;
4260
+ do {
4261
+ couchbase::operations::analytics_index_get_all_request req{};
4262
+ cb__extract_timeout(req, timeout);
4263
+ auto barrier = std::make_shared<std::promise<couchbase::operations::analytics_index_get_all_response>>();
4264
+ auto f = barrier->get_future();
4265
+ backend->cluster->execute_http(
4266
+ req, [barrier](couchbase::operations::analytics_index_get_all_response resp) mutable { barrier->set_value(resp); });
4267
+ auto resp = f.get();
4268
+ if (resp.ec) {
4269
+ if (resp.errors.empty()) {
4270
+ exc = cb__map_error_code(resp.ec, "unable to fetch all indexes");
4271
+ } else {
4272
+ const auto& first_error = resp.errors.front();
4273
+ exc =
4274
+ cb__map_error_code(resp.ec, fmt::format("unable to fetch all indexes ({}: {})", first_error.code, first_error.message));
4275
+ }
4276
+ break;
4277
+ }
4278
+ VALUE res = rb_ary_new_capa(static_cast<long>(resp.indexes.size()));
4279
+ for (const auto& idx : resp.indexes) {
4280
+ VALUE index = rb_hash_new();
4281
+ rb_hash_aset(index, rb_id2sym(rb_intern("name")), rb_str_new(idx.name.data(), static_cast<long>(idx.name.size())));
4282
+ rb_hash_aset(
4283
+ index, rb_id2sym(rb_intern("dataset_name")), rb_str_new(idx.dataset_name.data(), static_cast<long>(idx.dataset_name.size())));
4284
+ rb_hash_aset(index,
4285
+ rb_id2sym(rb_intern("dataverse_name")),
4286
+ rb_str_new(idx.dataverse_name.data(), static_cast<long>(idx.dataverse_name.size())));
4287
+ rb_hash_aset(index, rb_id2sym(rb_intern("is_primary")), idx.is_primary ? Qtrue : Qfalse);
4288
+ rb_ary_push(res, index);
4289
+ }
4290
+ return res;
4291
+ } while (false);
4292
+ rb_exc_raise(exc);
4293
+ return Qnil;
4294
+ }
4295
+
4296
+ static VALUE
4297
+ cb_Backend_analytics_index_create(VALUE self,
4298
+ VALUE index_name,
4299
+ VALUE dataset_name,
4300
+ VALUE fields,
4301
+ VALUE dataverse_name,
4302
+ VALUE ignore_if_exists,
4303
+ VALUE timeout)
4304
+ {
4305
+ cb_backend_data* backend = nullptr;
4306
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
4307
+
4308
+ if (!backend->cluster) {
4309
+ rb_raise(rb_eArgError, "Cluster has been closed already");
4310
+ }
4311
+
4312
+ Check_Type(index_name, T_STRING);
4313
+ Check_Type(dataset_name, T_STRING);
4314
+ Check_Type(fields, T_ARRAY);
4315
+ if (!NIL_P(dataverse_name)) {
4316
+ Check_Type(dataverse_name, T_STRING);
4317
+ }
4318
+
4319
+ VALUE exc = Qnil;
4320
+ do {
4321
+ couchbase::operations::analytics_index_create_request req{};
4322
+ cb__extract_timeout(req, timeout);
4323
+ req.index_name.assign(RSTRING_PTR(index_name), static_cast<size_t>(RSTRING_LEN(index_name)));
4324
+ req.dataset_name.assign(RSTRING_PTR(dataset_name), static_cast<size_t>(RSTRING_LEN(dataset_name)));
4325
+ auto fields_num = static_cast<size_t>(RARRAY_LEN(fields));
4326
+ for (size_t i = 0; i < fields_num; ++i) {
4327
+ VALUE entry = rb_ary_entry(fields, static_cast<long>(i));
4328
+ Check_Type(entry, T_ARRAY);
4329
+ if (RARRAY_LEN(entry) == 2) {
4330
+ VALUE field = rb_ary_entry(entry, 0);
4331
+ VALUE type = rb_ary_entry(entry, 1);
4332
+ req.fields.emplace(std::string(RSTRING_PTR(field), static_cast<std::size_t>(RSTRING_LEN(field))),
4333
+ std::string(RSTRING_PTR(type), static_cast<std::size_t>(RSTRING_LEN(type))));
4334
+ }
4335
+ }
4336
+ if (!NIL_P(dataverse_name)) {
4337
+ req.dataverse_name.assign(RSTRING_PTR(dataverse_name), static_cast<size_t>(RSTRING_LEN(dataverse_name)));
4338
+ }
4339
+ if (!NIL_P(ignore_if_exists)) {
4340
+ req.ignore_if_exists = RTEST(ignore_if_exists);
4341
+ }
4342
+ auto barrier = std::make_shared<std::promise<couchbase::operations::analytics_index_create_response>>();
4343
+ auto f = barrier->get_future();
4344
+ backend->cluster->execute_http(
4345
+ req, [barrier](couchbase::operations::analytics_index_create_response resp) mutable { barrier->set_value(resp); });
4346
+ auto resp = f.get();
4347
+ if (resp.ec) {
4348
+ if (resp.errors.empty()) {
4349
+ exc = cb__map_error_code(
4350
+ resp.ec, fmt::format("unable to create index `{}` on `{}`.`{}`", req.index_name, req.dataverse_name, req.dataset_name));
4351
+ } else {
4352
+ const auto& first_error = resp.errors.front();
4353
+ exc = cb__map_error_code(resp.ec,
4354
+ fmt::format("unable to create index `{}` on `{}`.`{}` ({}: {})",
4355
+ req.index_name,
4356
+ req.dataverse_name,
4357
+ req.dataset_name,
4358
+ first_error.code,
4359
+ first_error.message));
4360
+ }
4361
+ break;
4362
+ }
4363
+ return Qtrue;
4364
+ } while (false);
4365
+ rb_exc_raise(exc);
4366
+ return Qnil;
4367
+ }
4368
+
4369
+ static VALUE
4370
+ cb_Backend_analytics_index_drop(VALUE self,
4371
+ VALUE index_name,
4372
+ VALUE dataset_name,
4373
+ VALUE dataverse_name,
4374
+ VALUE ignore_if_does_not_exist,
4375
+ VALUE timeout)
4376
+ {
4377
+ cb_backend_data* backend = nullptr;
4378
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
4379
+
4380
+ if (!backend->cluster) {
4381
+ rb_raise(rb_eArgError, "Cluster has been closed already");
4382
+ }
4383
+
4384
+ Check_Type(index_name, T_STRING);
4385
+ Check_Type(dataset_name, T_STRING);
4386
+ if (!NIL_P(dataverse_name)) {
4387
+ Check_Type(dataverse_name, T_STRING);
4388
+ }
4389
+
4390
+ VALUE exc = Qnil;
4391
+ do {
4392
+ couchbase::operations::analytics_index_drop_request req{};
4393
+ cb__extract_timeout(req, timeout);
4394
+ req.index_name.assign(RSTRING_PTR(index_name), static_cast<size_t>(RSTRING_LEN(index_name)));
4395
+ req.dataset_name.assign(RSTRING_PTR(dataset_name), static_cast<size_t>(RSTRING_LEN(dataset_name)));
4396
+ if (!NIL_P(dataverse_name)) {
4397
+ req.dataverse_name.assign(RSTRING_PTR(dataverse_name), static_cast<size_t>(RSTRING_LEN(dataverse_name)));
4398
+ }
4399
+ if (!NIL_P(ignore_if_does_not_exist)) {
4400
+ req.ignore_if_does_not_exist = RTEST(ignore_if_does_not_exist);
4401
+ }
4402
+ auto barrier = std::make_shared<std::promise<couchbase::operations::analytics_index_drop_response>>();
4403
+ auto f = barrier->get_future();
4404
+ backend->cluster->execute_http(
4405
+ req, [barrier](couchbase::operations::analytics_index_drop_response resp) mutable { barrier->set_value(resp); });
4406
+ auto resp = f.get();
4407
+ if (resp.ec) {
4408
+ if (resp.errors.empty()) {
4409
+ exc = cb__map_error_code(
4410
+ resp.ec, fmt::format("unable to drop index `{}`.`{}`.`{}`", req.dataverse_name, req.dataset_name, req.index_name));
4411
+ } else {
4412
+ const auto& first_error = resp.errors.front();
4413
+ exc = cb__map_error_code(resp.ec,
4414
+ fmt::format("unable to drop index `{}`.`{}`.`{}` ({}: {})",
4415
+ req.dataverse_name,
4416
+ req.dataset_name,
4417
+ req.index_name,
4418
+ first_error.code,
4419
+ first_error.message));
4420
+ }
4421
+ break;
4422
+ }
4423
+ return Qtrue;
4424
+ } while (false);
4425
+ rb_exc_raise(exc);
4426
+ return Qnil;
4427
+ }
4428
+
4429
+ static VALUE
4430
+ cb_Backend_analytics_link_connect(VALUE self, VALUE link_name, VALUE force, VALUE dataverse_name, VALUE timeout)
4431
+ {
4432
+ cb_backend_data* backend = nullptr;
4433
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
4434
+
4435
+ if (!backend->cluster) {
4436
+ rb_raise(rb_eArgError, "Cluster has been closed already");
4437
+ }
4438
+
4439
+ Check_Type(link_name, T_STRING);
4440
+ if (!NIL_P(dataverse_name)) {
4441
+ Check_Type(dataverse_name, T_STRING);
4442
+ }
4443
+
4444
+ VALUE exc = Qnil;
4445
+ do {
4446
+ couchbase::operations::analytics_link_connect_request req{};
4447
+ cb__extract_timeout(req, timeout);
4448
+ req.link_name.assign(RSTRING_PTR(link_name), static_cast<size_t>(RSTRING_LEN(link_name)));
4449
+ if (!NIL_P(dataverse_name)) {
4450
+ req.dataverse_name.assign(RSTRING_PTR(dataverse_name), static_cast<size_t>(RSTRING_LEN(dataverse_name)));
4451
+ }
4452
+ if (!NIL_P(force)) {
4453
+ req.force = RTEST(force);
4454
+ }
4455
+ auto barrier = std::make_shared<std::promise<couchbase::operations::analytics_link_connect_response>>();
4456
+ auto f = barrier->get_future();
4457
+ backend->cluster->execute_http(
4458
+ req, [barrier](couchbase::operations::analytics_link_connect_response resp) mutable { barrier->set_value(resp); });
4459
+ auto resp = f.get();
4460
+ if (resp.ec) {
4461
+ if (resp.errors.empty()) {
4462
+ exc = cb__map_error_code(resp.ec, fmt::format("unable to connect link `{}` on `{}`", req.link_name, req.dataverse_name));
4463
+ } else {
4464
+ const auto& first_error = resp.errors.front();
4465
+ exc = cb__map_error_code(resp.ec,
4466
+ fmt::format("unable to connect link `{}` on `{}` ({}: {})",
4467
+ req.link_name,
4468
+ req.dataverse_name,
4469
+ first_error.code,
4470
+ first_error.message));
4471
+ }
4472
+ break;
4473
+ }
4474
+ return Qtrue;
4475
+ } while (false);
4476
+ rb_exc_raise(exc);
4477
+ return Qnil;
4478
+ }
4479
+
4480
+ static VALUE
4481
+ cb_Backend_analytics_link_disconnect(VALUE self, VALUE link_name, VALUE dataverse_name, VALUE timeout)
4482
+ {
4483
+ cb_backend_data* backend = nullptr;
4484
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
4485
+
4486
+ if (!backend->cluster) {
4487
+ rb_raise(rb_eArgError, "Cluster has been closed already");
4488
+ }
4489
+
4490
+ Check_Type(link_name, T_STRING);
4491
+ if (!NIL_P(dataverse_name)) {
4492
+ Check_Type(dataverse_name, T_STRING);
4493
+ }
4494
+
4495
+ VALUE exc = Qnil;
4496
+ do {
4497
+ couchbase::operations::analytics_link_disconnect_request req{};
4498
+ cb__extract_timeout(req, timeout);
4499
+ req.link_name.assign(RSTRING_PTR(link_name), static_cast<size_t>(RSTRING_LEN(link_name)));
4500
+ if (!NIL_P(dataverse_name)) {
4501
+ req.dataverse_name.assign(RSTRING_PTR(dataverse_name), static_cast<size_t>(RSTRING_LEN(dataverse_name)));
4502
+ }
4503
+ auto barrier = std::make_shared<std::promise<couchbase::operations::analytics_link_disconnect_response>>();
4504
+ auto f = barrier->get_future();
4505
+ backend->cluster->execute_http(
4506
+ req, [barrier](couchbase::operations::analytics_link_disconnect_response resp) mutable { barrier->set_value(resp); });
4507
+ auto resp = f.get();
4508
+ if (resp.ec) {
4509
+ if (resp.errors.empty()) {
4510
+ exc = cb__map_error_code(resp.ec, fmt::format("unable to disconnect link `{}` on `{}`", req.link_name, req.dataverse_name));
4511
+ } else {
4512
+ const auto& first_error = resp.errors.front();
4513
+ exc = cb__map_error_code(resp.ec,
4514
+ fmt::format("unable to disconnect link `{}` on `{}` ({}: {})",
4515
+ req.link_name,
4516
+ req.dataverse_name,
4517
+ first_error.code,
4518
+ first_error.message));
4519
+ }
4520
+ break;
4521
+ }
4522
+ return Qtrue;
4523
+ } while (false);
4524
+ rb_exc_raise(exc);
4525
+ return Qnil;
4526
+ }
4527
+
4528
+ static int
4529
+ cb__for_each_named_param__analytics(VALUE key, VALUE value, VALUE arg)
4530
+ {
4531
+ auto* preq = reinterpret_cast<couchbase::operations::analytics_request*>(arg);
4532
+ Check_Type(key, T_STRING);
4533
+ Check_Type(value, T_STRING);
4534
+ preq->named_parameters.emplace(
4535
+ std::string_view(RSTRING_PTR(key), static_cast<std::size_t>(RSTRING_LEN(key))),
4536
+ tao::json::from_string(std::string_view(RSTRING_PTR(value), static_cast<std::size_t>(RSTRING_LEN(value)))));
4537
+ return ST_CONTINUE;
4538
+ }
4539
+
4540
+ static VALUE
4541
+ cb_Backend_document_analytics(VALUE self, VALUE statement, VALUE options)
4542
+ {
4543
+ cb_backend_data* backend = nullptr;
4544
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
4545
+
4546
+ if (!backend->cluster) {
4547
+ rb_raise(rb_eArgError, "Cluster has been closed already");
4548
+ }
4549
+
4550
+ Check_Type(statement, T_STRING);
4551
+ Check_Type(options, T_HASH);
4552
+
4553
+ VALUE exc = Qnil;
4554
+ do {
4555
+ couchbase::operations::analytics_request req;
4556
+ req.statement.assign(RSTRING_PTR(statement), static_cast<size_t>(RSTRING_LEN(statement)));
4557
+ VALUE client_context_id = rb_hash_aref(options, rb_id2sym(rb_intern("client_context_id")));
4558
+ if (!NIL_P(client_context_id)) {
4559
+ Check_Type(client_context_id, T_STRING);
4560
+ req.client_context_id.assign(RSTRING_PTR(client_context_id), static_cast<size_t>(RSTRING_LEN(client_context_id)));
4561
+ }
4562
+ cb__extract_timeout(req, rb_hash_aref(options, rb_id2sym(rb_intern("timeout"))));
4563
+ VALUE readonly = rb_hash_aref(options, rb_id2sym(rb_intern("readonly")));
4564
+ if (!NIL_P(readonly)) {
4565
+ req.readonly = RTEST(readonly);
4566
+ }
4567
+ VALUE priority = rb_hash_aref(options, rb_id2sym(rb_intern("priority")));
4568
+ if (!NIL_P(priority)) {
4569
+ req.priority = RTEST(priority);
4570
+ }
4571
+ VALUE scan_wait = rb_hash_aref(options, rb_id2sym(rb_intern("scan_wait")));
4572
+ if (!NIL_P(scan_wait)) {
4573
+ req.scan_wait = NUM2ULONG(scan_wait);
4574
+ }
4575
+ VALUE positional_params = rb_hash_aref(options, rb_id2sym(rb_intern("positional_parameters")));
4576
+ if (!NIL_P(positional_params)) {
4577
+ Check_Type(positional_params, T_ARRAY);
4578
+ auto entries_num = static_cast<size_t>(RARRAY_LEN(positional_params));
4579
+ req.positional_parameters.reserve(entries_num);
4580
+ for (size_t i = 0; i < entries_num; ++i) {
4581
+ VALUE entry = rb_ary_entry(positional_params, static_cast<long>(i));
4582
+ Check_Type(entry, T_STRING);
4583
+ req.positional_parameters.emplace_back(
4584
+ tao::json::from_string(std::string_view(RSTRING_PTR(entry), static_cast<std::size_t>(RSTRING_LEN(entry)))));
4585
+ }
4586
+ }
4587
+ VALUE named_params = rb_hash_aref(options, rb_id2sym(rb_intern("named_parameters")));
4588
+ if (!NIL_P(named_params)) {
4589
+ Check_Type(named_params, T_HASH);
4590
+ rb_hash_foreach(named_params, INT_FUNC(cb__for_each_named_param__analytics), reinterpret_cast<VALUE>(&req));
4591
+ }
4592
+ VALUE scan_consistency = rb_hash_aref(options, rb_id2sym(rb_intern("scan_consistency")));
4593
+ if (!NIL_P(scan_consistency)) {
4594
+ Check_Type(scan_consistency, T_SYMBOL);
4595
+ ID type = rb_sym2id(scan_consistency);
4596
+ if (type == rb_intern("not_bounded")) {
4597
+ req.scan_consistency = couchbase::operations::analytics_request::scan_consistency_type::not_bounded;
4598
+ } else if (type == rb_intern("request_plus")) {
4599
+ req.scan_consistency = couchbase::operations::analytics_request::scan_consistency_type::request_plus;
4600
+ }
4601
+ }
4602
+
4603
+ VALUE raw_params = rb_hash_aref(options, rb_id2sym(rb_intern("raw_parameters")));
4604
+ if (!NIL_P(raw_params)) {
4605
+ Check_Type(raw_params, T_HASH);
4606
+ rb_hash_foreach(raw_params, INT_FUNC(cb__for_each_named_param__analytics), reinterpret_cast<VALUE>(&req));
4607
+ }
4608
+
4609
+ auto barrier = std::make_shared<std::promise<couchbase::operations::analytics_response>>();
4610
+ auto f = barrier->get_future();
4611
+ backend->cluster->execute_http(req,
4612
+ [barrier](couchbase::operations::analytics_response resp) mutable { barrier->set_value(resp); });
4613
+ auto resp = f.get();
4614
+ if (resp.ec) {
4615
+ if (resp.payload.meta_data.errors && !resp.payload.meta_data.errors->empty()) {
4616
+ const auto& first_error = resp.payload.meta_data.errors->front();
4617
+ exc = cb__map_error_code(resp.ec,
4618
+ fmt::format("unable to execute analytics query: \"{}{}\" ({}: {})",
4619
+ req.statement.substr(0, 50),
4620
+ req.statement.size() > 50 ? "..." : "",
4621
+ first_error.code,
4622
+ first_error.message));
4623
+ } else {
4624
+ exc = cb__map_error_code(resp.ec,
4625
+ fmt::format("unable to execute analytics query: \"{}{}\"",
4626
+ req.statement.substr(0, 50),
4627
+ req.statement.size() > 50 ? "..." : ""));
4628
+ }
4629
+ break;
4630
+ }
4631
+ VALUE res = rb_hash_new();
4632
+ VALUE rows = rb_ary_new_capa(static_cast<long>(resp.payload.rows.size()));
4633
+ rb_hash_aset(res, rb_id2sym(rb_intern("rows")), rows);
4634
+ for (auto& row : resp.payload.rows) {
4635
+ rb_ary_push(rows, rb_str_new(row.data(), static_cast<long>(row.size())));
4636
+ }
4637
+ VALUE meta = rb_hash_new();
4638
+ rb_hash_aset(res, rb_id2sym(rb_intern("meta")), meta);
4639
+ rb_hash_aset(meta,
4640
+ rb_id2sym(rb_intern("status")),
4641
+ rb_id2sym(rb_intern2(resp.payload.meta_data.status.data(), static_cast<long>(resp.payload.meta_data.status.size()))));
4642
+ rb_hash_aset(meta,
4643
+ rb_id2sym(rb_intern("request_id")),
4644
+ rb_str_new(resp.payload.meta_data.request_id.data(), static_cast<long>(resp.payload.meta_data.request_id.size())));
4645
+ rb_hash_aset(
4646
+ meta,
4647
+ rb_id2sym(rb_intern("client_context_id")),
4648
+ rb_str_new(resp.payload.meta_data.client_context_id.data(), static_cast<long>(resp.payload.meta_data.client_context_id.size())));
4649
+ if (resp.payload.meta_data.signature) {
4650
+ rb_hash_aset(meta,
4651
+ rb_id2sym(rb_intern("signature")),
4652
+ rb_str_new(resp.payload.meta_data.signature->data(), static_cast<long>(resp.payload.meta_data.signature->size())));
4653
+ }
4654
+ if (resp.payload.meta_data.profile) {
4655
+ rb_hash_aset(meta,
4656
+ rb_id2sym(rb_intern("profile")),
4657
+ rb_str_new(resp.payload.meta_data.profile->data(), static_cast<long>(resp.payload.meta_data.profile->size())));
4658
+ }
4659
+ VALUE metrics = rb_hash_new();
4660
+ rb_hash_aset(meta, rb_id2sym(rb_intern("metrics")), metrics);
4661
+ rb_hash_aset(metrics,
4662
+ rb_id2sym(rb_intern("elapsed_time")),
4663
+ rb_str_new(resp.payload.meta_data.metrics.elapsed_time.data(),
4664
+ static_cast<long>(resp.payload.meta_data.metrics.elapsed_time.size())));
4665
+ rb_hash_aset(metrics,
4666
+ rb_id2sym(rb_intern("execution_time")),
4667
+ rb_str_new(resp.payload.meta_data.metrics.execution_time.data(),
4668
+ static_cast<long>(resp.payload.meta_data.metrics.execution_time.size())));
4669
+ rb_hash_aset(metrics, rb_id2sym(rb_intern("result_count")), ULL2NUM(resp.payload.meta_data.metrics.result_count));
4670
+ rb_hash_aset(metrics, rb_id2sym(rb_intern("result_size")), ULL2NUM(resp.payload.meta_data.metrics.result_count));
4671
+ if (resp.payload.meta_data.metrics.sort_count) {
4672
+ rb_hash_aset(metrics, rb_id2sym(rb_intern("sort_count")), ULL2NUM(*resp.payload.meta_data.metrics.sort_count));
4673
+ }
4674
+ if (resp.payload.meta_data.metrics.mutation_count) {
4675
+ rb_hash_aset(metrics, rb_id2sym(rb_intern("mutation_count")), ULL2NUM(*resp.payload.meta_data.metrics.mutation_count));
4676
+ }
4677
+ if (resp.payload.meta_data.metrics.error_count) {
4678
+ rb_hash_aset(metrics, rb_id2sym(rb_intern("error_count")), ULL2NUM(*resp.payload.meta_data.metrics.error_count));
4679
+ }
4680
+ if (resp.payload.meta_data.metrics.warning_count) {
4681
+ rb_hash_aset(metrics, rb_id2sym(rb_intern("warning_count")), ULL2NUM(*resp.payload.meta_data.metrics.warning_count));
4682
+ }
4683
+
4684
+ return res;
4685
+ } while (false);
4686
+ rb_exc_raise(exc);
4687
+ return Qnil;
4688
+ }
4689
+
4690
+ static VALUE
4691
+ cb_Backend_parse_connection_string(VALUE self, VALUE connection_string)
4692
+ {
4693
+ (void)self;
4694
+ Check_Type(connection_string, T_STRING);
4695
+
4696
+ std::string input(RSTRING_PTR(connection_string), static_cast<size_t>(RSTRING_LEN(connection_string)));
4697
+ auto connstr = couchbase::utils::parse_connection_string(input);
4698
+
4699
+ VALUE res = rb_hash_new();
4700
+ if (!connstr.scheme.empty()) {
4701
+ rb_hash_aset(res, rb_id2sym(rb_intern("scheme")), rb_str_new(connstr.scheme.data(), static_cast<long>(connstr.scheme.size())));
4702
+ rb_hash_aset(res, rb_id2sym(rb_intern("tls")), connstr.tls ? Qtrue : Qfalse);
4703
+ }
4704
+
4705
+ VALUE nodes = rb_ary_new_capa(static_cast<long>(connstr.bootstrap_nodes.size()));
4706
+ for (const auto& entry : connstr.bootstrap_nodes) {
4707
+ VALUE node = rb_hash_new();
4708
+ rb_hash_aset(node, rb_id2sym(rb_intern("address")), rb_str_new(entry.address.data(), static_cast<long>(entry.address.size())));
4709
+ if (entry.port > 0) {
4710
+ rb_hash_aset(node, rb_id2sym(rb_intern("port")), UINT2NUM(entry.port));
4711
+ }
4712
+ switch (entry.mode) {
4713
+ case couchbase::utils::connection_string::bootstrap_mode::gcccp:
4714
+ rb_hash_aset(node, rb_id2sym(rb_intern("mode")), rb_id2sym(rb_intern("gcccp")));
4715
+ break;
4716
+ case couchbase::utils::connection_string::bootstrap_mode::http:
4717
+ rb_hash_aset(node, rb_id2sym(rb_intern("mode")), rb_id2sym(rb_intern("http")));
4718
+ break;
4719
+ case couchbase::utils::connection_string::bootstrap_mode::unspecified:
4720
+ break;
4721
+ }
4722
+ switch (entry.type) {
4723
+ case couchbase::utils::connection_string::address_type::ipv4:
4724
+ rb_hash_aset(node, rb_id2sym(rb_intern("type")), rb_id2sym(rb_intern("ipv4")));
4725
+ break;
4726
+ case couchbase::utils::connection_string::address_type::ipv6:
4727
+ rb_hash_aset(node, rb_id2sym(rb_intern("type")), rb_id2sym(rb_intern("ipv6")));
4728
+ break;
4729
+ case couchbase::utils::connection_string::address_type::dns:
4730
+ rb_hash_aset(node, rb_id2sym(rb_intern("type")), rb_id2sym(rb_intern("dns")));
4731
+ break;
4732
+ }
4733
+ rb_ary_push(nodes, node);
4734
+ }
4735
+ rb_hash_aset(res, rb_id2sym(rb_intern("nodes")), nodes);
4736
+
4737
+ VALUE params = rb_hash_new();
4738
+ for (const auto& param : connstr.params) {
4739
+ rb_hash_aset(params,
4740
+ rb_str_new(param.first.data(), static_cast<long>(param.first.size())),
4741
+ rb_str_new(param.second.data(), static_cast<long>(param.second.size())));
4742
+ }
4743
+ rb_hash_aset(res, rb_id2sym(rb_intern("params")), params);
4744
+
4745
+ if (connstr.default_bucket_name) {
4746
+ rb_hash_aset(res,
4747
+ rb_id2sym(rb_intern("default_bucket_name")),
4748
+ rb_str_new(connstr.default_bucket_name->data(), static_cast<long>(connstr.default_bucket_name->size())));
4749
+ }
4750
+ if (connstr.default_port > 0) {
4751
+ rb_hash_aset(res, rb_id2sym(rb_intern("default_port")), UINT2NUM(connstr.default_port));
4752
+ }
4753
+ switch (connstr.default_mode) {
4754
+ case couchbase::utils::connection_string::bootstrap_mode::gcccp:
4755
+ rb_hash_aset(res, rb_id2sym(rb_intern("default_mode")), rb_id2sym(rb_intern("gcccp")));
4756
+ break;
4757
+ case couchbase::utils::connection_string::bootstrap_mode::http:
4758
+ rb_hash_aset(res, rb_id2sym(rb_intern("default_mode")), rb_id2sym(rb_intern("http")));
4759
+ break;
4760
+ case couchbase::utils::connection_string::bootstrap_mode::unspecified:
4761
+ break;
4762
+ }
4763
+ if (connstr.error) {
4764
+ rb_hash_aset(res, rb_id2sym(rb_intern("error")), rb_str_new(connstr.error->data(), static_cast<long>(connstr.error->size())));
4765
+ }
4766
+ return res;
4767
+ }
4768
+
4769
+ static VALUE
4770
+ cb_Backend_view_index_get_all(VALUE self, VALUE bucket_name, VALUE name_space, VALUE timeout)
4771
+ {
4772
+ cb_backend_data* backend = nullptr;
4773
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
4774
+
4775
+ if (!backend->cluster) {
4776
+ rb_raise(rb_eArgError, "Cluster has been closed already");
4777
+ }
4778
+
4779
+ Check_Type(bucket_name, T_STRING);
4780
+ Check_Type(name_space, T_SYMBOL);
4781
+
4782
+ couchbase::operations::design_document::name_space ns;
4783
+ ID type = rb_sym2id(name_space);
4784
+ if (type == rb_intern("development")) {
4785
+ ns = couchbase::operations::design_document::name_space::development;
4786
+ } else if (type == rb_intern("production")) {
4787
+ ns = couchbase::operations::design_document::name_space::production;
4788
+ } else {
4789
+ rb_raise(rb_eArgError, "Unknown design document namespace: %+" PRIsVALUE, type);
4790
+ }
4791
+
4792
+ VALUE exc = Qnil;
4793
+ do {
4794
+ couchbase::operations::view_index_get_all_request req{};
4795
+ req.bucket_name.assign(RSTRING_PTR(bucket_name), static_cast<size_t>(RSTRING_LEN(bucket_name)));
4796
+ req.name_space = ns;
4797
+ cb__extract_timeout(req, timeout);
4798
+ auto barrier = std::make_shared<std::promise<couchbase::operations::view_index_get_all_response>>();
4799
+ auto f = barrier->get_future();
4800
+ backend->cluster->execute_http(
4801
+ req, [barrier](couchbase::operations::view_index_get_all_response resp) mutable { barrier->set_value(resp); });
4802
+ auto resp = f.get();
4803
+ if (resp.ec) {
4804
+ exc = cb__map_error_code(resp.ec, "unable to get list of the design documents");
4805
+ break;
4806
+ }
4807
+ VALUE res = rb_ary_new_capa(static_cast<long>(resp.design_documents.size()));
4808
+ for (const auto& entry : resp.design_documents) {
4809
+ VALUE dd = rb_hash_new();
4810
+ rb_hash_aset(dd, rb_id2sym(rb_intern("name")), rb_str_new(entry.name.data(), static_cast<long>(entry.name.size())));
4811
+ rb_hash_aset(dd, rb_id2sym(rb_intern("rev")), rb_str_new(entry.rev.data(), static_cast<long>(entry.rev.size())));
4812
+ switch (entry.ns) {
4813
+ case couchbase::operations::design_document::name_space::development:
4814
+ rb_hash_aset(dd, rb_id2sym(rb_intern("namespace")), rb_id2sym(rb_intern("development")));
4815
+ break;
4816
+ case couchbase::operations::design_document::name_space::production:
4817
+ rb_hash_aset(dd, rb_id2sym(rb_intern("namespace")), rb_id2sym(rb_intern("production")));
4818
+ break;
4819
+ }
4820
+ VALUE views = rb_hash_new();
4821
+ for (const auto& view_entry : entry.views) {
4822
+ VALUE view_name = rb_str_new(view_entry.first.data(), static_cast<long>(view_entry.first.size()));
4823
+ VALUE view = rb_hash_new();
4824
+ rb_hash_aset(view, rb_id2sym(rb_intern("name")), view_name);
4825
+ if (view_entry.second.map) {
4826
+ rb_hash_aset(view,
4827
+ rb_id2sym(rb_intern("map")),
4828
+ rb_str_new(view_entry.second.map->data(), static_cast<long>(view_entry.second.map->size())));
4829
+ }
4830
+ if (view_entry.second.reduce) {
4831
+ rb_hash_aset(view,
4832
+ rb_id2sym(rb_intern("reduce")),
4833
+ rb_str_new(view_entry.second.reduce->data(), static_cast<long>(view_entry.second.reduce->size())));
4834
+ }
4835
+ rb_hash_aset(views, view_name, view);
4836
+ }
4837
+ rb_hash_aset(dd, rb_id2sym(rb_intern("views")), views);
4838
+ rb_ary_push(res, dd);
4839
+ }
4840
+ return res;
4841
+ } while (false);
4842
+ rb_exc_raise(exc);
4843
+ return Qnil;
4844
+ }
4845
+
4846
+ static VALUE
4847
+ cb_Backend_view_index_get(VALUE self, VALUE bucket_name, VALUE document_name, VALUE name_space, VALUE timeout)
4848
+ {
4849
+ cb_backend_data* backend = nullptr;
4850
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
4851
+
4852
+ if (!backend->cluster) {
4853
+ rb_raise(rb_eArgError, "Cluster has been closed already");
4854
+ }
4855
+
4856
+ Check_Type(bucket_name, T_STRING);
4857
+ Check_Type(document_name, T_STRING);
4858
+ Check_Type(name_space, T_SYMBOL);
4859
+
4860
+ couchbase::operations::design_document::name_space ns;
4861
+ ID type = rb_sym2id(name_space);
4862
+ if (type == rb_intern("development")) {
4863
+ ns = couchbase::operations::design_document::name_space::development;
4864
+ } else if (type == rb_intern("production")) {
4865
+ ns = couchbase::operations::design_document::name_space::production;
4866
+ } else {
4867
+ rb_raise(rb_eArgError, "Unknown design document namespace: %+" PRIsVALUE, type);
4868
+ }
4869
+
4870
+ VALUE exc = Qnil;
4871
+ do {
4872
+ couchbase::operations::view_index_get_request req{};
4873
+ req.bucket_name.assign(RSTRING_PTR(bucket_name), static_cast<size_t>(RSTRING_LEN(bucket_name)));
4874
+ req.document_name.assign(RSTRING_PTR(document_name), static_cast<size_t>(RSTRING_LEN(document_name)));
4875
+ req.name_space = ns;
4876
+ cb__extract_timeout(req, timeout);
4877
+ auto barrier = std::make_shared<std::promise<couchbase::operations::view_index_get_response>>();
4878
+ auto f = barrier->get_future();
4879
+ backend->cluster->execute_http(
4880
+ req, [barrier](couchbase::operations::view_index_get_response resp) mutable { barrier->set_value(resp); });
4881
+ auto resp = f.get();
4882
+ if (resp.ec) {
4883
+ exc = cb__map_error_code(
4884
+ resp.ec,
4885
+ fmt::format(R"(unable to get design document "{}" ({}) on bucket "{}")", req.document_name, req.name_space, req.bucket_name));
4886
+ break;
4887
+ }
4888
+ VALUE res = rb_hash_new();
4889
+ rb_hash_aset(
4890
+ res, rb_id2sym(rb_intern("name")), rb_str_new(resp.document.name.data(), static_cast<long>(resp.document.name.size())));
4891
+ rb_hash_aset(res, rb_id2sym(rb_intern("rev")), rb_str_new(resp.document.rev.data(), static_cast<long>(resp.document.rev.size())));
4892
+ switch (resp.document.ns) {
4893
+ case couchbase::operations::design_document::name_space::development:
4894
+ rb_hash_aset(res, rb_id2sym(rb_intern("namespace")), rb_id2sym(rb_intern("development")));
4895
+ break;
4896
+ case couchbase::operations::design_document::name_space::production:
4897
+ rb_hash_aset(res, rb_id2sym(rb_intern("namespace")), rb_id2sym(rb_intern("production")));
4898
+ break;
4899
+ }
4900
+ VALUE views = rb_hash_new();
4901
+ for (const auto& view_entry : resp.document.views) {
4902
+ VALUE view_name = rb_str_new(view_entry.first.data(), static_cast<long>(view_entry.first.size()));
4903
+ VALUE view = rb_hash_new();
4904
+ rb_hash_aset(view, rb_id2sym(rb_intern("name")), view_name);
4905
+ if (view_entry.second.map) {
4906
+ rb_hash_aset(view,
4907
+ rb_id2sym(rb_intern("map")),
4908
+ rb_str_new(view_entry.second.map->data(), static_cast<long>(view_entry.second.map->size())));
4909
+ }
4910
+ if (view_entry.second.reduce) {
4911
+ rb_hash_aset(view,
4912
+ rb_id2sym(rb_intern("reduce")),
4913
+ rb_str_new(view_entry.second.reduce->data(), static_cast<long>(view_entry.second.reduce->size())));
4914
+ }
4915
+ rb_hash_aset(views, view_name, view);
4916
+ }
4917
+ rb_hash_aset(res, rb_id2sym(rb_intern("views")), views);
4918
+ return res;
4919
+ } while (false);
4920
+ rb_exc_raise(exc);
4921
+ return Qnil;
4922
+ }
4923
+
4924
+ static VALUE
4925
+ cb_Backend_view_index_drop(VALUE self, VALUE bucket_name, VALUE document_name, VALUE name_space, VALUE timeout)
4926
+ {
4927
+ cb_backend_data* backend = nullptr;
4928
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
4929
+
4930
+ if (!backend->cluster) {
4931
+ rb_raise(rb_eArgError, "Cluster has been closed already");
4932
+ }
4933
+
4934
+ Check_Type(bucket_name, T_STRING);
4935
+ Check_Type(document_name, T_STRING);
4936
+ Check_Type(name_space, T_SYMBOL);
4937
+
4938
+ couchbase::operations::design_document::name_space ns;
4939
+ ID type = rb_sym2id(name_space);
4940
+ if (type == rb_intern("development")) {
4941
+ ns = couchbase::operations::design_document::name_space::development;
4942
+ } else if (type == rb_intern("production")) {
4943
+ ns = couchbase::operations::design_document::name_space::production;
4944
+ } else {
4945
+ rb_raise(rb_eArgError, "Unknown design document namespace: %+" PRIsVALUE, type);
4946
+ }
4947
+
4948
+ VALUE exc = Qnil;
4949
+ do {
4950
+ couchbase::operations::view_index_drop_request req{};
4951
+ req.bucket_name.assign(RSTRING_PTR(bucket_name), static_cast<size_t>(RSTRING_LEN(bucket_name)));
4952
+ req.document_name.assign(RSTRING_PTR(document_name), static_cast<size_t>(RSTRING_LEN(document_name)));
4953
+ req.name_space = ns;
4954
+ cb__extract_timeout(req, timeout);
4955
+ auto barrier = std::make_shared<std::promise<couchbase::operations::view_index_drop_response>>();
4956
+ auto f = barrier->get_future();
4957
+ backend->cluster->execute_http(
4958
+ req, [barrier](couchbase::operations::view_index_drop_response resp) mutable { barrier->set_value(resp); });
4959
+ auto resp = f.get();
4960
+ if (resp.ec) {
4961
+ exc = cb__map_error_code(
4962
+ resp.ec,
4963
+ fmt::format(
4964
+ R"(unable to drop design document "{}" ({}) on bucket "{}")", req.document_name, req.name_space, req.bucket_name));
4965
+ break;
4966
+ }
4967
+ return Qtrue;
4968
+ } while (false);
4969
+ rb_exc_raise(exc);
4970
+ return Qnil;
4971
+ }
4972
+
4973
+ static VALUE
4974
+ cb_Backend_view_index_upsert(VALUE self, VALUE bucket_name, VALUE document, VALUE name_space, VALUE timeout)
4975
+ {
4976
+ cb_backend_data* backend = nullptr;
4977
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
4978
+
4979
+ if (!backend->cluster) {
4980
+ rb_raise(rb_eArgError, "Cluster has been closed already");
4981
+ }
4982
+
4983
+ Check_Type(bucket_name, T_STRING);
4984
+ Check_Type(document, T_HASH);
4985
+ Check_Type(name_space, T_SYMBOL);
4986
+
4987
+ couchbase::operations::design_document::name_space ns;
4988
+ ID type = rb_sym2id(name_space);
4989
+ if (type == rb_intern("development")) {
4990
+ ns = couchbase::operations::design_document::name_space::development;
4991
+ } else if (type == rb_intern("production")) {
4992
+ ns = couchbase::operations::design_document::name_space::production;
4993
+ } else {
4994
+ rb_raise(rb_eArgError, "Unknown design document namespace: %+" PRIsVALUE, type);
4995
+ }
4996
+
4997
+ VALUE exc = Qnil;
4998
+ do {
4999
+ couchbase::operations::view_index_upsert_request req{};
5000
+ req.bucket_name.assign(RSTRING_PTR(bucket_name), static_cast<size_t>(RSTRING_LEN(bucket_name)));
5001
+ req.document.ns = ns;
5002
+ VALUE document_name = rb_hash_aref(document, rb_id2sym(rb_intern("name")));
5003
+ if (!NIL_P(document_name)) {
5004
+ Check_Type(document_name, T_STRING);
5005
+ req.document.name.assign(RSTRING_PTR(document_name), static_cast<size_t>(RSTRING_LEN(document_name)));
5006
+ }
5007
+ VALUE views = rb_hash_aref(document, rb_id2sym(rb_intern("views")));
5008
+ if (!NIL_P(views)) {
5009
+ Check_Type(views, T_ARRAY);
5010
+ auto entries_num = static_cast<size_t>(RARRAY_LEN(views));
5011
+ for (size_t i = 0; i < entries_num; ++i) {
5012
+ VALUE entry = rb_ary_entry(views, static_cast<long>(i));
5013
+ Check_Type(entry, T_HASH);
5014
+ couchbase::operations::design_document::view view;
5015
+ VALUE name = rb_hash_aref(entry, rb_id2sym(rb_intern("name")));
5016
+ Check_Type(name, T_STRING);
5017
+ view.name.assign(RSTRING_PTR(name), static_cast<std::size_t>(RSTRING_LEN(name)));
5018
+ VALUE map = rb_hash_aref(entry, rb_id2sym(rb_intern("map")));
5019
+ if (!NIL_P(map)) {
5020
+ view.map.emplace(std::string(RSTRING_PTR(map), static_cast<std::size_t>(RSTRING_LEN(map))));
5021
+ }
5022
+ VALUE reduce = rb_hash_aref(entry, rb_id2sym(rb_intern("reduce")));
5023
+ if (!NIL_P(reduce)) {
5024
+ view.reduce.emplace(std::string(RSTRING_PTR(reduce), static_cast<std::size_t>(RSTRING_LEN(reduce))));
5025
+ }
5026
+ req.document.views[view.name] = view;
5027
+ }
5028
+ }
5029
+
5030
+ cb__extract_timeout(req, timeout);
5031
+ auto barrier = std::make_shared<std::promise<couchbase::operations::view_index_upsert_response>>();
5032
+ auto f = barrier->get_future();
5033
+ backend->cluster->execute_http(
5034
+ req, [barrier](couchbase::operations::view_index_upsert_response resp) mutable { barrier->set_value(resp); });
5035
+ auto resp = f.get();
5036
+ if (resp.ec) {
5037
+ exc = cb__map_error_code(
5038
+ resp.ec,
5039
+ fmt::format(
5040
+ R"(unable to store design document "{}" ({}) on bucket "{}")", req.document.name, req.document.ns, req.bucket_name));
5041
+ break;
5042
+ }
5043
+ return Qtrue;
5044
+ } while (false);
5045
+ rb_exc_raise(exc);
5046
+ return Qnil;
5047
+ }
5048
+
5049
+ static VALUE
5050
+ cb_Backend_document_view(VALUE self, VALUE bucket_name, VALUE design_document_name, VALUE view_name, VALUE name_space, VALUE options)
5051
+ {
5052
+ cb_backend_data* backend = nullptr;
5053
+ TypedData_Get_Struct(self, cb_backend_data, &cb_backend_type, backend);
5054
+
5055
+ if (!backend->cluster) {
5056
+ rb_raise(rb_eArgError, "Cluster has been closed already");
5057
+ }
5058
+
5059
+ Check_Type(bucket_name, T_STRING);
5060
+ Check_Type(design_document_name, T_STRING);
5061
+ Check_Type(view_name, T_STRING);
5062
+ Check_Type(name_space, T_SYMBOL);
5063
+ couchbase::operations::design_document::name_space ns;
5064
+ ID type = rb_sym2id(name_space);
5065
+ if (type == rb_intern("development")) {
5066
+ ns = couchbase::operations::design_document::name_space::development;
5067
+ } else if (type == rb_intern("production")) {
5068
+ ns = couchbase::operations::design_document::name_space::production;
5069
+ } else {
5070
+ rb_raise(rb_eArgError, "Unknown design document namespace: %+" PRIsVALUE, type);
5071
+ }
5072
+ if (!NIL_P(options)) {
5073
+ Check_Type(options, T_HASH);
5074
+ }
5075
+
5076
+ VALUE exc = Qnil;
5077
+ do {
5078
+ couchbase::operations::document_view_request req{};
5079
+ req.bucket_name.assign(RSTRING_PTR(bucket_name), static_cast<size_t>(RSTRING_LEN(bucket_name)));
5080
+ req.document_name.assign(RSTRING_PTR(design_document_name), static_cast<size_t>(RSTRING_LEN(design_document_name)));
5081
+ req.view_name.assign(RSTRING_PTR(view_name), static_cast<size_t>(RSTRING_LEN(view_name)));
5082
+ req.name_space = ns;
5083
+ if (!NIL_P(options)) {
5084
+ cb__extract_timeout(req, rb_hash_aref(options, rb_id2sym(rb_intern("timeout"))));
5085
+ VALUE debug = rb_hash_aref(options, rb_id2sym(rb_intern("debug")));
5086
+ if (!NIL_P(debug)) {
5087
+ req.debug = RTEST(debug);
5088
+ }
5089
+ VALUE limit = rb_hash_aref(options, rb_id2sym(rb_intern("limit")));
5090
+ if (!NIL_P(limit)) {
5091
+ Check_Type(limit, T_FIXNUM);
5092
+ req.limit = FIX2ULONG(limit);
5093
+ }
5094
+ VALUE skip = rb_hash_aref(options, rb_id2sym(rb_intern("skip")));
5095
+ if (!NIL_P(skip)) {
5096
+ Check_Type(skip, T_FIXNUM);
5097
+ req.skip = FIX2ULONG(skip);
5098
+ }
5099
+ VALUE scan_consistency = rb_hash_aref(options, rb_id2sym(rb_intern("scan_consistency")));
5100
+ if (!NIL_P(scan_consistency)) {
5101
+ Check_Type(scan_consistency, T_SYMBOL);
5102
+ ID consistency = rb_sym2id(scan_consistency);
5103
+ if (consistency == rb_intern("request_plus")) {
5104
+ req.consistency = couchbase::operations::document_view_request::scan_consistency::request_plus;
5105
+ } else if (consistency == rb_intern("update_after")) {
5106
+ req.consistency = couchbase::operations::document_view_request::scan_consistency::update_after;
5107
+ } else if (consistency == rb_intern("not_bounded")) {
5108
+ req.consistency = couchbase::operations::document_view_request::scan_consistency::not_bounded;
5109
+ }
5110
+ }
5111
+ VALUE key = rb_hash_aref(options, rb_id2sym(rb_intern("key")));
5112
+ if (!NIL_P(key)) {
5113
+ Check_Type(key, T_STRING);
5114
+ req.key.emplace(RSTRING_PTR(key), static_cast<size_t>(RSTRING_LEN(key)));
5115
+ }
5116
+ VALUE start_key = rb_hash_aref(options, rb_id2sym(rb_intern("start_key")));
5117
+ if (!NIL_P(start_key)) {
5118
+ Check_Type(start_key, T_STRING);
5119
+ req.start_key.emplace(RSTRING_PTR(start_key), static_cast<size_t>(RSTRING_LEN(start_key)));
5120
+ }
5121
+ VALUE end_key = rb_hash_aref(options, rb_id2sym(rb_intern("end_key")));
5122
+ if (!NIL_P(end_key)) {
5123
+ Check_Type(end_key, T_STRING);
5124
+ req.end_key.emplace(RSTRING_PTR(end_key), static_cast<size_t>(RSTRING_LEN(end_key)));
5125
+ }
5126
+ VALUE start_key_doc_id = rb_hash_aref(options, rb_id2sym(rb_intern("start_key_doc_id")));
5127
+ if (!NIL_P(start_key_doc_id)) {
5128
+ Check_Type(start_key_doc_id, T_STRING);
5129
+ req.start_key_doc_id.emplace(RSTRING_PTR(start_key_doc_id), static_cast<size_t>(RSTRING_LEN(start_key_doc_id)));
5130
+ }
5131
+ VALUE end_key_doc_id = rb_hash_aref(options, rb_id2sym(rb_intern("end_key_doc_id")));
5132
+ if (!NIL_P(end_key_doc_id)) {
5133
+ Check_Type(end_key_doc_id, T_STRING);
5134
+ req.end_key_doc_id.emplace(RSTRING_PTR(end_key_doc_id), static_cast<size_t>(RSTRING_LEN(end_key_doc_id)));
5135
+ }
5136
+ VALUE inclusive_end = rb_hash_aref(options, rb_id2sym(rb_intern("inclusive_end")));
5137
+ if (!NIL_P(inclusive_end)) {
5138
+ req.inclusive_end = RTEST(inclusive_end);
5139
+ }
5140
+ VALUE reduce = rb_hash_aref(options, rb_id2sym(rb_intern("reduce")));
5141
+ if (!NIL_P(reduce)) {
5142
+ req.reduce = RTEST(reduce);
5143
+ }
5144
+ VALUE group = rb_hash_aref(options, rb_id2sym(rb_intern("group")));
5145
+ if (!NIL_P(group)) {
5146
+ req.group = RTEST(group);
5147
+ }
5148
+ VALUE group_level = rb_hash_aref(options, rb_id2sym(rb_intern("group_level")));
5149
+ if (!NIL_P(group_level)) {
5150
+ Check_Type(group_level, T_FIXNUM);
5151
+ req.group_level = FIX2ULONG(group_level);
5152
+ }
5153
+ VALUE sort_order = rb_hash_aref(options, rb_id2sym(rb_intern("order")));
5154
+ if (!NIL_P(sort_order)) {
5155
+ Check_Type(sort_order, T_SYMBOL);
5156
+ ID order = rb_sym2id(sort_order);
5157
+ if (order == rb_intern("ascending")) {
5158
+ req.order = couchbase::operations::document_view_request::sort_order::ascending;
5159
+ } else if (order == rb_intern("descending")) {
5160
+ req.order = couchbase::operations::document_view_request::sort_order::descending;
5161
+ }
5162
+ }
5163
+ VALUE keys = rb_hash_aref(options, rb_id2sym(rb_intern("keys")));
5164
+ if (!NIL_P(keys)) {
5165
+ Check_Type(keys, T_ARRAY);
5166
+ auto entries_num = static_cast<size_t>(RARRAY_LEN(keys));
5167
+ req.keys.reserve(entries_num);
5168
+ for (size_t i = 0; i < entries_num; ++i) {
5169
+ VALUE entry = rb_ary_entry(keys, static_cast<long>(i));
5170
+ Check_Type(entry, T_STRING);
5171
+ req.keys.emplace_back(std::string(RSTRING_PTR(entry), static_cast<std::size_t>(RSTRING_LEN(entry))));
5172
+ }
5173
+ }
5174
+ }
5175
+
5176
+ auto barrier = std::make_shared<std::promise<couchbase::operations::document_view_response>>();
5177
+ auto f = barrier->get_future();
5178
+ backend->cluster->execute_http(req,
5179
+ [barrier](couchbase::operations::document_view_response resp) mutable { barrier->set_value(resp); });
5180
+ auto resp = f.get();
5181
+ if (resp.ec) {
5182
+ if (resp.error) {
5183
+ exc = cb__map_error_code(
5184
+ resp.ec,
5185
+ fmt::format(R"(unable to execute query for view "{}" of design document "{}" ({}) on bucket "{}": {} ({}))",
5186
+ req.view_name,
5187
+ req.document_name,
5188
+ req.name_space,
5189
+ req.bucket_name,
5190
+ resp.error->code,
5191
+ resp.error->message));
5192
+ } else {
5193
+ exc = cb__map_error_code(resp.ec,
5194
+ fmt::format(R"(unable to execute query for view "{}" of design document "{}" ({}) on bucket "{}")",
5195
+ req.view_name,
5196
+ req.document_name,
5197
+ req.name_space,
5198
+ req.bucket_name));
5199
+ }
5200
+ break;
5201
+ }
5202
+ VALUE res = rb_hash_new();
5203
+
5204
+ VALUE meta = rb_hash_new();
5205
+ if (resp.meta_data.total_rows) {
5206
+ rb_hash_aset(meta, rb_id2sym(rb_intern("total_rows")), ULL2NUM(*resp.meta_data.total_rows));
5207
+ }
5208
+ if (resp.meta_data.debug_info) {
5209
+ rb_hash_aset(meta,
5210
+ rb_id2sym(rb_intern("debug_info")),
5211
+ rb_str_new(resp.meta_data.debug_info->data(), static_cast<long>(resp.meta_data.debug_info->size())));
5212
+ }
5213
+ rb_hash_aset(res, rb_id2sym(rb_intern("meta")), meta);
5214
+
5215
+ VALUE rows = rb_ary_new_capa(static_cast<long>(resp.rows.size()));
5216
+ for (const auto& entry : resp.rows) {
5217
+ VALUE row = rb_hash_new();
5218
+ if (entry.id) {
5219
+ rb_hash_aset(row, rb_id2sym(rb_intern("id")), rb_str_new(entry.id->data(), static_cast<long>(entry.id->size())));
5220
+ }
5221
+ rb_hash_aset(row, rb_id2sym(rb_intern("key")), rb_str_new(entry.key.data(), static_cast<long>(entry.key.size())));
5222
+ rb_hash_aset(row, rb_id2sym(rb_intern("value")), rb_str_new(entry.value.data(), static_cast<long>(entry.value.size())));
5223
+ rb_ary_push(rows, row);
5224
+ }
5225
+ rb_hash_aset(res, rb_id2sym(rb_intern("rows")), rows);
5226
+
5227
+ return res;
5228
+ } while (false);
5229
+ rb_exc_raise(exc);
5230
+ return Qnil;
5231
+ }
5232
+
5233
+ static void
5234
+ init_backend(VALUE mCouchbase)
5235
+ {
5236
+ VALUE cBackend = rb_define_class_under(mCouchbase, "Backend", rb_cBasicObject);
5237
+ rb_define_alloc_func(cBackend, cb_Backend_allocate);
5238
+ rb_define_method(cBackend, "open", VALUE_FUNC(cb_Backend_open), 3);
5239
+ rb_define_method(cBackend, "close", VALUE_FUNC(cb_Backend_close), 0);
5240
+ rb_define_method(cBackend, "open_bucket", VALUE_FUNC(cb_Backend_open_bucket), 1);
5241
+
5242
+ rb_define_method(cBackend, "document_get", VALUE_FUNC(cb_Backend_document_get), 4);
5243
+ rb_define_method(cBackend, "document_get_projected", VALUE_FUNC(cb_Backend_document_get_projected), 7);
5244
+ rb_define_method(cBackend, "document_get_and_lock", VALUE_FUNC(cb_Backend_document_get_and_lock), 5);
5245
+ rb_define_method(cBackend, "document_get_and_touch", VALUE_FUNC(cb_Backend_document_get_and_touch), 5);
5246
+ rb_define_method(cBackend, "document_insert", VALUE_FUNC(cb_Backend_document_insert), 7);
5247
+ rb_define_method(cBackend, "document_replace", VALUE_FUNC(cb_Backend_document_replace), 7);
5248
+ rb_define_method(cBackend, "document_upsert", VALUE_FUNC(cb_Backend_document_upsert), 7);
5249
+ rb_define_method(cBackend, "document_remove", VALUE_FUNC(cb_Backend_document_remove), 5);
5250
+ rb_define_method(cBackend, "document_lookup_in", VALUE_FUNC(cb_Backend_document_lookup_in), 6);
5251
+ rb_define_method(cBackend, "document_mutate_in", VALUE_FUNC(cb_Backend_document_mutate_in), 7);
5252
+ rb_define_method(cBackend, "document_query", VALUE_FUNC(cb_Backend_document_query), 2);
5253
+ rb_define_method(cBackend, "document_touch", VALUE_FUNC(cb_Backend_document_touch), 5);
5254
+ rb_define_method(cBackend, "document_exists", VALUE_FUNC(cb_Backend_document_exists), 4);
5255
+ rb_define_method(cBackend, "document_unlock", VALUE_FUNC(cb_Backend_document_unlock), 5);
5256
+ rb_define_method(cBackend, "document_increment", VALUE_FUNC(cb_Backend_document_increment), 5);
5257
+ rb_define_method(cBackend, "document_decrement", VALUE_FUNC(cb_Backend_document_decrement), 5);
5258
+ rb_define_method(cBackend, "document_search", VALUE_FUNC(cb_Backend_document_search), 3);
5259
+ rb_define_method(cBackend, "document_analytics", VALUE_FUNC(cb_Backend_document_analytics), 2);
5260
+ rb_define_method(cBackend, "document_view", VALUE_FUNC(cb_Backend_document_view), 5);
5261
+
5262
+ rb_define_method(cBackend, "bucket_create", VALUE_FUNC(cb_Backend_bucket_create), 2);
5263
+ rb_define_method(cBackend, "bucket_update", VALUE_FUNC(cb_Backend_bucket_update), 2);
5264
+ rb_define_method(cBackend, "bucket_drop", VALUE_FUNC(cb_Backend_bucket_drop), 2);
5265
+ rb_define_method(cBackend, "bucket_flush", VALUE_FUNC(cb_Backend_bucket_flush), 2);
5266
+ rb_define_method(cBackend, "bucket_get_all", VALUE_FUNC(cb_Backend_bucket_get_all), 1);
5267
+ rb_define_method(cBackend, "bucket_get", VALUE_FUNC(cb_Backend_bucket_get), 2);
5268
+
5269
+ rb_define_method(cBackend, "cluster_enable_developer_preview!", VALUE_FUNC(cb_Backend_cluster_enable_developer_preview), 0);
5270
+
5271
+ rb_define_method(cBackend, "scope_get_all", VALUE_FUNC(cb_Backend_scope_get_all), 2);
5272
+ rb_define_method(cBackend, "scope_create", VALUE_FUNC(cb_Backend_scope_create), 3);
5273
+ rb_define_method(cBackend, "scope_drop", VALUE_FUNC(cb_Backend_scope_drop), 3);
5274
+ rb_define_method(cBackend, "collection_create", VALUE_FUNC(cb_Backend_collection_create), 5);
5275
+ rb_define_method(cBackend, "collection_drop", VALUE_FUNC(cb_Backend_collection_drop), 4);
5276
+
5277
+ rb_define_method(cBackend, "query_index_get_all", VALUE_FUNC(cb_Backend_query_index_get_all), 2);
5278
+ rb_define_method(cBackend, "query_index_create", VALUE_FUNC(cb_Backend_query_index_create), 5);
5279
+ rb_define_method(cBackend, "query_index_create_primary", VALUE_FUNC(cb_Backend_query_index_create_primary), 3);
5280
+ rb_define_method(cBackend, "query_index_drop", VALUE_FUNC(cb_Backend_query_index_drop), 4);
5281
+ rb_define_method(cBackend, "query_index_drop_primary", VALUE_FUNC(cb_Backend_query_index_drop_primary), 3);
5282
+ rb_define_method(cBackend, "query_index_build_deferred", VALUE_FUNC(cb_Backend_query_index_build_deferred), 2);
5283
+ rb_define_method(cBackend, "query_index_watch", VALUE_FUNC(cb_Backend_query_index_watch), 4);
5284
+
5285
+ rb_define_method(cBackend, "search_index_get_all", VALUE_FUNC(cb_Backend_search_index_get_all), 1);
5286
+ rb_define_method(cBackend, "search_index_get", VALUE_FUNC(cb_Backend_search_index_get), 2);
5287
+ rb_define_method(cBackend, "search_index_upsert", VALUE_FUNC(cb_Backend_search_index_upsert), 2);
5288
+ rb_define_method(cBackend, "search_index_drop", VALUE_FUNC(cb_Backend_search_index_drop), 2);
5289
+ rb_define_method(cBackend, "search_index_get_documents_count", VALUE_FUNC(cb_Backend_search_index_get_documents_count), 2);
5290
+ rb_define_method(cBackend, "search_index_pause_ingest", VALUE_FUNC(cb_Backend_search_index_pause_ingest), 2);
5291
+ rb_define_method(cBackend, "search_index_resume_ingest", VALUE_FUNC(cb_Backend_search_index_resume_ingest), 2);
5292
+ rb_define_method(cBackend, "search_index_allow_querying", VALUE_FUNC(cb_Backend_search_index_allow_querying), 2);
5293
+ rb_define_method(cBackend, "search_index_disallow_querying", VALUE_FUNC(cb_Backend_search_index_disallow_querying), 2);
5294
+ rb_define_method(cBackend, "search_index_freeze_plan", VALUE_FUNC(cb_Backend_search_index_freeze_plan), 2);
5295
+ rb_define_method(cBackend, "search_index_unfreeze_plan", VALUE_FUNC(cb_Backend_search_index_unfreeze_plan), 2);
5296
+ rb_define_method(cBackend, "search_index_analyze_document", VALUE_FUNC(cb_Backend_search_index_analyze_document), 3);
5297
+
5298
+ rb_define_method(cBackend, "analytics_get_pending_mutations", VALUE_FUNC(cb_Backend_analytics_get_pending_mutations), 1);
5299
+ rb_define_method(cBackend, "analytics_dataverse_drop", VALUE_FUNC(cb_Backend_analytics_dataverse_drop), 3);
5300
+ rb_define_method(cBackend, "analytics_dataverse_create", VALUE_FUNC(cb_Backend_analytics_dataverse_create), 3);
5301
+ rb_define_method(cBackend, "analytics_dataset_create", VALUE_FUNC(cb_Backend_analytics_dataset_create), 6);
5302
+ rb_define_method(cBackend, "analytics_dataset_drop", VALUE_FUNC(cb_Backend_analytics_dataset_drop), 4);
5303
+ rb_define_method(cBackend, "analytics_dataset_get_all", VALUE_FUNC(cb_Backend_analytics_dataset_get_all), 1);
5304
+ rb_define_method(cBackend, "analytics_index_get_all", VALUE_FUNC(cb_Backend_analytics_index_get_all), 1);
5305
+ rb_define_method(cBackend, "analytics_index_create", VALUE_FUNC(cb_Backend_analytics_index_create), 6);
5306
+ rb_define_method(cBackend, "analytics_index_drop", VALUE_FUNC(cb_Backend_analytics_index_drop), 5);
5307
+ rb_define_method(cBackend, "analytics_link_connect", VALUE_FUNC(cb_Backend_analytics_link_connect), 4);
5308
+ rb_define_method(cBackend, "analytics_link_disconnect", VALUE_FUNC(cb_Backend_analytics_link_disconnect), 3);
5309
+
5310
+ rb_define_method(cBackend, "view_index_get_all", VALUE_FUNC(cb_Backend_view_index_get_all), 3);
5311
+ rb_define_method(cBackend, "view_index_get", VALUE_FUNC(cb_Backend_view_index_get), 4);
5312
+ rb_define_method(cBackend, "view_index_drop", VALUE_FUNC(cb_Backend_view_index_drop), 4);
5313
+ rb_define_method(cBackend, "view_index_upsert", VALUE_FUNC(cb_Backend_view_index_upsert), 4);
5314
+
5315
+ rb_define_singleton_method(cBackend, "dns_srv", VALUE_FUNC(cb_Backend_dns_srv), 2);
5316
+ rb_define_singleton_method(cBackend, "parse_connection_string", VALUE_FUNC(cb_Backend_parse_connection_string), 1);
3951
5317
  }
3952
5318
 
3953
5319
  extern "C" {