couchbase 1.2.1-x86-mingw32 → 1.2.2-x86-mingw32
Sign up to get free protection for your applications and to get access to all the features.
- data/README.markdown +50 -5
- data/RELEASE_NOTES.markdown +256 -145
- data/couchbase.gemspec +2 -4
- data/examples/chat-em/Gemfile +7 -0
- data/examples/chat-em/README.markdown +45 -0
- data/examples/chat-em/server.rb +82 -0
- data/ext/couchbase_ext/arguments.c +18 -17
- data/ext/couchbase_ext/arithmetic.c +17 -25
- data/ext/couchbase_ext/bucket.c +227 -32
- data/ext/couchbase_ext/context.c +64 -0
- data/ext/couchbase_ext/couchbase_ext.c +106 -14
- data/ext/couchbase_ext/couchbase_ext.h +81 -12
- data/ext/couchbase_ext/delete.c +18 -25
- data/ext/couchbase_ext/eventmachine_plugin.c +452 -0
- data/ext/couchbase_ext/extconf.rb +2 -0
- data/ext/couchbase_ext/get.c +18 -31
- data/ext/couchbase_ext/http.c +40 -31
- data/ext/couchbase_ext/multithread_plugin.c +38 -201
- data/ext/couchbase_ext/observe.c +17 -25
- data/ext/couchbase_ext/plugin_common.c +171 -0
- data/ext/couchbase_ext/result.c +18 -12
- data/ext/couchbase_ext/stats.c +17 -25
- data/ext/couchbase_ext/store.c +43 -47
- data/ext/couchbase_ext/touch.c +18 -25
- data/ext/couchbase_ext/unlock.c +18 -25
- data/ext/couchbase_ext/utils.c +23 -8
- data/ext/couchbase_ext/version.c +16 -24
- data/lib/couchbase.rb +1 -0
- data/lib/couchbase/bucket.rb +1 -1
- data/lib/couchbase/constants.rb +12 -0
- data/lib/couchbase/version.rb +1 -1
- data/lib/couchbase/view.rb +210 -60
- data/lib/couchbase/view_row.rb +103 -61
- data/tasks/compile.rake +1 -1
- data/test/test_async.rb +63 -0
- data/test/test_eventmachine.rb +70 -0
- metadata +24 -49
- data/tasks/doc.rake +0 -27
data/ext/couchbase_ext/store.c
CHANGED
@@ -31,51 +31,52 @@ storage_observe_callback(VALUE args, VALUE cookie)
|
|
31
31
|
if (!RTEST(ctx->observe_options)) {
|
32
32
|
ctx->nqueries--;
|
33
33
|
if (ctx->nqueries == 0) {
|
34
|
-
|
34
|
+
ctx->proc = Qnil;
|
35
35
|
if (bucket->async) {
|
36
|
-
|
36
|
+
cb_context_free(ctx);
|
37
37
|
}
|
38
38
|
}
|
39
39
|
}
|
40
40
|
return Qnil;
|
41
41
|
}
|
42
42
|
|
43
|
+
VALUE
|
44
|
+
storage_opcode_to_sym(lcb_storage_t operation)
|
45
|
+
{
|
46
|
+
switch(operation) {
|
47
|
+
case LCB_ADD:
|
48
|
+
return cb_sym_add;
|
49
|
+
case LCB_REPLACE:
|
50
|
+
return cb_sym_replace;
|
51
|
+
case LCB_SET:
|
52
|
+
return cb_sym_set;
|
53
|
+
case LCB_APPEND:
|
54
|
+
return cb_sym_append;
|
55
|
+
case LCB_PREPEND:
|
56
|
+
return cb_sym_prepend;
|
57
|
+
default:
|
58
|
+
return Qnil;
|
59
|
+
}
|
60
|
+
}
|
61
|
+
|
43
62
|
void
|
44
63
|
cb_storage_callback(lcb_t handle, const void *cookie, lcb_storage_t operation,
|
45
64
|
lcb_error_t error, const lcb_store_resp_t *resp)
|
46
65
|
{
|
47
66
|
struct cb_context_st *ctx = (struct cb_context_st *)cookie;
|
48
67
|
struct cb_bucket_st *bucket = ctx->bucket;
|
49
|
-
VALUE key, cas,
|
68
|
+
VALUE key, cas, exc, res;
|
50
69
|
|
51
70
|
key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey);
|
52
71
|
cb_strip_key_prefix(bucket, key);
|
53
72
|
|
54
73
|
cas = resp->v.v0.cas > 0 ? ULL2NUM(resp->v.v0.cas) : Qnil;
|
55
|
-
|
56
|
-
case LCB_ADD:
|
57
|
-
ctx->operation = cb_sym_add;
|
58
|
-
break;
|
59
|
-
case LCB_REPLACE:
|
60
|
-
ctx->operation = cb_sym_replace;
|
61
|
-
break;
|
62
|
-
case LCB_SET:
|
63
|
-
ctx->operation = cb_sym_set;
|
64
|
-
break;
|
65
|
-
case LCB_APPEND:
|
66
|
-
ctx->operation = cb_sym_append;
|
67
|
-
break;
|
68
|
-
case LCB_PREPEND:
|
69
|
-
ctx->operation = cb_sym_prepend;
|
70
|
-
break;
|
71
|
-
default:
|
72
|
-
ctx->operation = Qnil;
|
73
|
-
}
|
74
|
+
ctx->operation = storage_opcode_to_sym(operation);
|
74
75
|
exc = cb_check_error(error, "failed to store value", key);
|
75
76
|
if (exc != Qnil) {
|
76
77
|
rb_ivar_set(exc, cb_id_iv_cas, cas);
|
77
78
|
rb_ivar_set(exc, cb_id_iv_operation, ctx->operation);
|
78
|
-
ctx->exception =
|
79
|
+
ctx->exception = exc;
|
79
80
|
}
|
80
81
|
|
81
82
|
if (bucket->async) { /* asynchronous */
|
@@ -86,7 +87,7 @@ cb_storage_callback(lcb_t handle, const void *cookie, lcb_storage_t operation,
|
|
86
87
|
args[1] = ctx->observe_options;
|
87
88
|
rb_block_call(bucket->self, cb_id_observe_and_wait, 2, args,
|
88
89
|
storage_observe_callback, (VALUE)ctx);
|
89
|
-
|
90
|
+
ctx->observe_options = Qnil;
|
90
91
|
} else if (ctx->proc != Qnil) {
|
91
92
|
res = rb_class_new_instance(0, NULL, cb_cResult);
|
92
93
|
rb_ivar_set(res, cb_id_iv_error, exc);
|
@@ -96,15 +97,15 @@ cb_storage_callback(lcb_t handle, const void *cookie, lcb_storage_t operation,
|
|
96
97
|
cb_proc_call(bucket, ctx->proc, 1, res);
|
97
98
|
}
|
98
99
|
} else { /* synchronous */
|
99
|
-
rb_hash_aset(
|
100
|
+
rb_hash_aset(ctx->rv, key, cas);
|
100
101
|
}
|
101
102
|
|
102
103
|
if (!RTEST(ctx->observe_options)) {
|
103
104
|
ctx->nqueries--;
|
104
105
|
if (ctx->nqueries == 0) {
|
105
|
-
|
106
|
+
ctx->proc = Qnil;
|
106
107
|
if (bucket->async) {
|
107
|
-
|
108
|
+
cb_context_free(ctx);
|
108
109
|
}
|
109
110
|
}
|
110
111
|
}
|
@@ -116,40 +117,36 @@ cb_bucket_store(lcb_storage_t cmd, int argc, VALUE *argv, VALUE self)
|
|
116
117
|
{
|
117
118
|
struct cb_bucket_st *bucket = DATA_PTR(self);
|
118
119
|
struct cb_context_st *ctx;
|
119
|
-
VALUE
|
120
|
+
VALUE rv, proc, exc, obs = Qnil;
|
120
121
|
lcb_error_t err;
|
121
122
|
struct cb_params_st params;
|
122
123
|
|
123
|
-
if (bucket
|
124
|
-
|
124
|
+
if (!cb_bucket_connected_bang(bucket, storage_opcode_to_sym(cmd))) {
|
125
|
+
return Qnil;
|
125
126
|
}
|
126
|
-
|
127
|
+
memset(¶ms, 0, sizeof(struct cb_params_st));
|
128
|
+
rb_scan_args(argc, argv, "0*&", ¶ms.args, &proc);
|
127
129
|
if (!bucket->async && proc != Qnil) {
|
128
130
|
rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks");
|
129
131
|
}
|
130
|
-
memset(¶ms, 0, sizeof(struct cb_params_st));
|
131
132
|
params.type = cb_cmd_store;
|
132
133
|
params.bucket = bucket;
|
133
134
|
params.cmd.store.operation = cmd;
|
134
|
-
cb_params_build(¶ms
|
135
|
-
ctx = calloc(1, sizeof(struct cb_context_st));
|
136
|
-
if (ctx == NULL) {
|
137
|
-
rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for context");
|
138
|
-
}
|
139
|
-
rv = rb_hash_new();
|
140
|
-
ctx->rv = &rv;
|
141
|
-
ctx->bucket = bucket;
|
142
|
-
ctx->proc = cb_gc_protect(bucket, proc);
|
143
|
-
ctx->observe_options = cb_gc_protect(bucket, obs);
|
144
|
-
ctx->exception = Qnil;
|
135
|
+
cb_params_build(¶ms);
|
145
136
|
obs = params.cmd.store.observe;
|
137
|
+
ctx = cb_context_alloc(bucket);
|
138
|
+
if (!bucket->async) {
|
139
|
+
ctx->rv = rb_hash_new();
|
140
|
+
ctx->observe_options = obs;
|
141
|
+
}
|
142
|
+
ctx->proc = proc;
|
146
143
|
ctx->nqueries = params.cmd.store.num;
|
147
144
|
err = lcb_store(bucket->handle, (const void *)ctx,
|
148
145
|
params.cmd.store.num, params.cmd.store.ptr);
|
149
146
|
cb_params_destroy(¶ms);
|
150
147
|
exc = cb_check_error(err, "failed to schedule set request", Qnil);
|
151
148
|
if (exc != Qnil) {
|
152
|
-
|
149
|
+
cb_context_free(ctx);
|
153
150
|
rb_exc_raise(exc);
|
154
151
|
}
|
155
152
|
bucket->nbytes += params.npayload;
|
@@ -162,9 +159,9 @@ cb_bucket_store(lcb_storage_t cmd, int argc, VALUE *argv, VALUE self)
|
|
162
159
|
lcb_wait(bucket->handle);
|
163
160
|
}
|
164
161
|
exc = ctx->exception;
|
165
|
-
|
162
|
+
rv = ctx->rv;
|
163
|
+
cb_context_free(ctx);
|
166
164
|
if (exc != Qnil) {
|
167
|
-
cb_gc_unprotect(bucket, exc);
|
168
165
|
rb_exc_raise(exc);
|
169
166
|
}
|
170
167
|
exc = bucket->exception;
|
@@ -173,7 +170,6 @@ cb_bucket_store(lcb_storage_t cmd, int argc, VALUE *argv, VALUE self)
|
|
173
170
|
rb_exc_raise(exc);
|
174
171
|
}
|
175
172
|
if (RTEST(obs)) {
|
176
|
-
cb_gc_unprotect(bucket, obs);
|
177
173
|
rv = rb_funcall(bucket->self, cb_id_observe_and_wait, 2, rv, obs);
|
178
174
|
}
|
179
175
|
if (params.cmd.store.num > 1) {
|
data/ext/couchbase_ext/touch.c
CHANGED
@@ -22,7 +22,7 @@ cb_touch_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb
|
|
22
22
|
{
|
23
23
|
struct cb_context_st *ctx = (struct cb_context_st *)cookie;
|
24
24
|
struct cb_bucket_st *bucket = ctx->bucket;
|
25
|
-
VALUE key,
|
25
|
+
VALUE key, exc = Qnil, res;
|
26
26
|
|
27
27
|
ctx->nqueries--;
|
28
28
|
key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey);
|
@@ -32,7 +32,7 @@ cb_touch_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb
|
|
32
32
|
exc = cb_check_error(error, "failed to touch value", key);
|
33
33
|
if (exc != Qnil) {
|
34
34
|
rb_ivar_set(exc, cb_id_iv_operation, cb_sym_touch);
|
35
|
-
ctx->exception =
|
35
|
+
ctx->exception = exc;
|
36
36
|
}
|
37
37
|
}
|
38
38
|
|
@@ -45,12 +45,12 @@ cb_touch_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb
|
|
45
45
|
cb_proc_call(bucket, ctx->proc, 1, res);
|
46
46
|
}
|
47
47
|
} else { /* synchronous */
|
48
|
-
rb_hash_aset(
|
48
|
+
rb_hash_aset(ctx->rv, key, (error == LCB_SUCCESS) ? Qtrue : Qfalse);
|
49
49
|
}
|
50
50
|
if (ctx->nqueries == 0) {
|
51
|
-
|
51
|
+
ctx->proc = Qnil;
|
52
52
|
if (bucket->async) {
|
53
|
-
|
53
|
+
cb_context_free(ctx);
|
54
54
|
}
|
55
55
|
}
|
56
56
|
(void)handle;
|
@@ -126,39 +126,31 @@ cb_bucket_touch(int argc, VALUE *argv, VALUE self)
|
|
126
126
|
{
|
127
127
|
struct cb_bucket_st *bucket = DATA_PTR(self);
|
128
128
|
struct cb_context_st *ctx;
|
129
|
-
VALUE
|
129
|
+
VALUE rv, proc, exc;
|
130
130
|
lcb_error_t err;
|
131
131
|
struct cb_params_st params;
|
132
132
|
|
133
|
-
if (bucket
|
134
|
-
|
133
|
+
if (!cb_bucket_connected_bang(bucket, cb_sym_touch)) {
|
134
|
+
return Qnil;
|
135
135
|
}
|
136
|
-
|
136
|
+
|
137
|
+
memset(¶ms, 0, sizeof(struct cb_params_st));
|
138
|
+
rb_scan_args(argc, argv, "0*&", ¶ms.args, &proc);
|
137
139
|
if (!bucket->async && proc != Qnil) {
|
138
140
|
rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks");
|
139
141
|
}
|
140
|
-
rb_funcall(args, cb_id_flatten_bang, 0);
|
141
|
-
memset(¶ms, 0, sizeof(struct cb_params_st));
|
142
|
+
rb_funcall(params.args, cb_id_flatten_bang, 0);
|
142
143
|
params.type = cb_cmd_touch;
|
143
144
|
params.bucket = bucket;
|
144
|
-
cb_params_build(¶ms
|
145
|
-
ctx =
|
146
|
-
if (ctx == NULL) {
|
147
|
-
rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for context");
|
148
|
-
}
|
149
|
-
ctx->proc = cb_gc_protect(bucket, proc);
|
150
|
-
ctx->bucket = bucket;
|
151
|
-
rv = rb_hash_new();
|
152
|
-
ctx->rv = &rv;
|
153
|
-
ctx->exception = Qnil;
|
145
|
+
cb_params_build(¶ms);
|
146
|
+
ctx = cb_context_alloc_common(bucket, proc, params.cmd.touch.num);
|
154
147
|
ctx->quiet = params.cmd.touch.quiet;
|
155
|
-
ctx->nqueries = params.cmd.touch.num;
|
156
148
|
err = lcb_touch(bucket->handle, (const void *)ctx,
|
157
149
|
params.cmd.touch.num, params.cmd.touch.ptr);
|
158
150
|
cb_params_destroy(¶ms);
|
159
151
|
exc = cb_check_error(err, "failed to schedule touch request", Qnil);
|
160
152
|
if (exc != Qnil) {
|
161
|
-
|
153
|
+
cb_context_free(ctx);
|
162
154
|
rb_exc_raise(exc);
|
163
155
|
}
|
164
156
|
bucket->nbytes += params.npayload;
|
@@ -171,9 +163,10 @@ cb_bucket_touch(int argc, VALUE *argv, VALUE self)
|
|
171
163
|
lcb_wait(bucket->handle);
|
172
164
|
}
|
173
165
|
exc = ctx->exception;
|
174
|
-
|
166
|
+
rv = ctx->rv;
|
167
|
+
cb_context_free(ctx);
|
175
168
|
if (exc != Qnil) {
|
176
|
-
rb_exc_raise(
|
169
|
+
rb_exc_raise(exc);
|
177
170
|
}
|
178
171
|
exc = bucket->exception;
|
179
172
|
if (exc != Qnil) {
|
data/ext/couchbase_ext/unlock.c
CHANGED
@@ -22,7 +22,7 @@ cb_unlock_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lc
|
|
22
22
|
{
|
23
23
|
struct cb_context_st *ctx = (struct cb_context_st *)cookie;
|
24
24
|
struct cb_bucket_st *bucket = ctx->bucket;
|
25
|
-
VALUE key,
|
25
|
+
VALUE key, exc = Qnil, res;
|
26
26
|
|
27
27
|
ctx->nqueries--;
|
28
28
|
key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey);
|
@@ -32,7 +32,7 @@ cb_unlock_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lc
|
|
32
32
|
exc = cb_check_error(error, "failed to unlock value", key);
|
33
33
|
if (exc != Qnil) {
|
34
34
|
rb_ivar_set(exc, cb_id_iv_operation, cb_sym_unlock);
|
35
|
-
ctx->exception =
|
35
|
+
ctx->exception = exc;
|
36
36
|
}
|
37
37
|
}
|
38
38
|
|
@@ -45,12 +45,12 @@ cb_unlock_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lc
|
|
45
45
|
cb_proc_call(bucket, ctx->proc, 1, res);
|
46
46
|
}
|
47
47
|
} else { /* synchronous */
|
48
|
-
rb_hash_aset(
|
48
|
+
rb_hash_aset(ctx->rv, key, (error == LCB_SUCCESS) ? Qtrue : Qfalse);
|
49
49
|
}
|
50
50
|
if (ctx->nqueries == 0) {
|
51
|
-
|
51
|
+
ctx->proc = Qnil;
|
52
52
|
if (bucket->async) {
|
53
|
-
|
53
|
+
cb_context_free(ctx);
|
54
54
|
}
|
55
55
|
}
|
56
56
|
(void)handle;
|
@@ -116,39 +116,31 @@ cb_bucket_unlock(int argc, VALUE *argv, VALUE self)
|
|
116
116
|
{
|
117
117
|
struct cb_bucket_st *bucket = DATA_PTR(self);
|
118
118
|
struct cb_context_st *ctx;
|
119
|
-
VALUE
|
119
|
+
VALUE rv, proc, exc;
|
120
120
|
lcb_error_t err;
|
121
121
|
struct cb_params_st params;
|
122
122
|
|
123
|
-
if (bucket
|
124
|
-
|
123
|
+
if (!cb_bucket_connected_bang(bucket, cb_sym_unlock)) {
|
124
|
+
return Qnil;
|
125
125
|
}
|
126
|
-
|
126
|
+
|
127
|
+
memset(¶ms, 0, sizeof(struct cb_params_st));
|
128
|
+
rb_scan_args(argc, argv, "0*&", ¶ms.args, &proc);
|
127
129
|
if (!bucket->async && proc != Qnil) {
|
128
130
|
rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks");
|
129
131
|
}
|
130
|
-
rb_funcall(args, cb_id_flatten_bang, 0);
|
131
|
-
memset(¶ms, 0, sizeof(struct cb_params_st));
|
132
|
+
rb_funcall(params.args, cb_id_flatten_bang, 0);
|
132
133
|
params.type = cb_cmd_unlock;
|
133
134
|
params.bucket = bucket;
|
134
|
-
cb_params_build(¶ms
|
135
|
-
ctx =
|
136
|
-
if (ctx == NULL) {
|
137
|
-
rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for context");
|
138
|
-
}
|
139
|
-
ctx->proc = cb_gc_protect(bucket, proc);
|
140
|
-
ctx->bucket = bucket;
|
141
|
-
rv = rb_hash_new();
|
142
|
-
ctx->rv = &rv;
|
143
|
-
ctx->exception = Qnil;
|
135
|
+
cb_params_build(¶ms);
|
136
|
+
ctx = cb_context_alloc_common(bucket, proc, params.cmd.unlock.num);
|
144
137
|
ctx->quiet = params.cmd.unlock.quiet;
|
145
|
-
ctx->nqueries = params.cmd.unlock.num;
|
146
138
|
err = lcb_unlock(bucket->handle, (const void *)ctx,
|
147
139
|
params.cmd.unlock.num, params.cmd.unlock.ptr);
|
148
140
|
cb_params_destroy(¶ms);
|
149
141
|
exc = cb_check_error(err, "failed to schedule unlock request", Qnil);
|
150
142
|
if (exc != Qnil) {
|
151
|
-
|
143
|
+
cb_context_free(ctx);
|
152
144
|
rb_exc_raise(exc);
|
153
145
|
}
|
154
146
|
bucket->nbytes += params.npayload;
|
@@ -161,9 +153,10 @@ cb_bucket_unlock(int argc, VALUE *argv, VALUE self)
|
|
161
153
|
lcb_wait(bucket->handle);
|
162
154
|
}
|
163
155
|
exc = ctx->exception;
|
164
|
-
|
156
|
+
rv = ctx->rv;
|
157
|
+
cb_context_free(ctx);
|
165
158
|
if (exc != Qnil) {
|
166
|
-
rb_exc_raise(
|
159
|
+
rb_exc_raise(exc);
|
167
160
|
}
|
168
161
|
exc = bucket->exception;
|
169
162
|
if (exc != Qnil) {
|
data/ext/couchbase_ext/utils.c
CHANGED
@@ -17,18 +17,16 @@
|
|
17
17
|
|
18
18
|
#include "couchbase_ext.h"
|
19
19
|
|
20
|
-
|
21
|
-
|
20
|
+
void
|
21
|
+
cb_gc_protect_ptr(struct cb_bucket_st *bucket, void *ptr, mark_f mark_func)
|
22
22
|
{
|
23
|
-
|
24
|
-
return val;
|
23
|
+
st_insert(bucket->object_space, (st_index_t)ptr, (st_data_t)mark_func);
|
25
24
|
}
|
26
25
|
|
27
|
-
|
28
|
-
|
26
|
+
void
|
27
|
+
cb_gc_unprotect_ptr(struct cb_bucket_st *bucket, void *ptr)
|
29
28
|
{
|
30
|
-
|
31
|
-
return val;
|
29
|
+
st_delete(bucket->object_space, (st_index_t*)&ptr, NULL);
|
32
30
|
}
|
33
31
|
|
34
32
|
struct proc_params_st
|
@@ -67,6 +65,23 @@ cb_async_error_notify(struct cb_bucket_st *bucket, VALUE exc)
|
|
67
65
|
}
|
68
66
|
}
|
69
67
|
|
68
|
+
int
|
69
|
+
cb_bucket_connected_bang(struct cb_bucket_st *bucket, VALUE operation)
|
70
|
+
{
|
71
|
+
if (bucket->handle == NULL || !bucket->connected) {
|
72
|
+
VALUE exc = rb_exc_new2(cb_eConnectError, "not connected to the server");
|
73
|
+
rb_ivar_set(exc, cb_id_iv_operation, operation);
|
74
|
+
rb_ivar_set(exc, cb_id_iv_value, bucket->self);
|
75
|
+
if (bucket->async) {
|
76
|
+
cb_async_error_notify(bucket, exc);
|
77
|
+
} else {
|
78
|
+
rb_exc_raise(exc);
|
79
|
+
}
|
80
|
+
return 0;
|
81
|
+
}
|
82
|
+
return 1;
|
83
|
+
}
|
84
|
+
|
70
85
|
static VALUE
|
71
86
|
func_call_failed(VALUE ptr, VALUE exc)
|
72
87
|
{
|
data/ext/couchbase_ext/version.c
CHANGED
@@ -22,13 +22,13 @@ cb_version_callback(lcb_t handle, const void *cookie, lcb_error_t error, const l
|
|
22
22
|
{
|
23
23
|
struct cb_context_st *ctx = (struct cb_context_st *)cookie;
|
24
24
|
struct cb_bucket_st *bucket = ctx->bucket;
|
25
|
-
VALUE node, val,
|
25
|
+
VALUE node, val, exc, res;
|
26
26
|
|
27
27
|
node = resp->v.v0.server_endpoint ? STR_NEW_CSTR(resp->v.v0.server_endpoint) : Qnil;
|
28
28
|
exc = cb_check_error(error, "failed to get version", node);
|
29
29
|
if (exc != Qnil) {
|
30
30
|
rb_ivar_set(exc, cb_id_iv_operation, cb_sym_version);
|
31
|
-
ctx->exception =
|
31
|
+
ctx->exception = exc;
|
32
32
|
}
|
33
33
|
|
34
34
|
if (node != Qnil) {
|
@@ -44,14 +44,14 @@ cb_version_callback(lcb_t handle, const void *cookie, lcb_error_t error, const l
|
|
44
44
|
}
|
45
45
|
} else { /* synchronous */
|
46
46
|
if (NIL_P(exc)) {
|
47
|
-
rb_hash_aset(
|
47
|
+
rb_hash_aset(ctx->rv, node, val);
|
48
48
|
}
|
49
49
|
}
|
50
50
|
} else {
|
51
51
|
ctx->nqueries--;
|
52
|
-
|
52
|
+
ctx->proc = Qnil;
|
53
53
|
if (bucket->async) {
|
54
|
-
|
54
|
+
cb_context_free(ctx);
|
55
55
|
}
|
56
56
|
}
|
57
57
|
|
@@ -90,37 +90,29 @@ cb_bucket_version(int argc, VALUE *argv, VALUE self)
|
|
90
90
|
{
|
91
91
|
struct cb_bucket_st *bucket = DATA_PTR(self);
|
92
92
|
struct cb_context_st *ctx;
|
93
|
-
VALUE rv, exc,
|
93
|
+
VALUE rv, exc, proc;
|
94
94
|
lcb_error_t err;
|
95
95
|
struct cb_params_st params;
|
96
96
|
|
97
|
-
if (bucket
|
98
|
-
|
97
|
+
if (!cb_bucket_connected_bang(bucket, cb_sym_version)) {
|
98
|
+
return Qnil;
|
99
99
|
}
|
100
|
-
|
100
|
+
|
101
|
+
memset(¶ms, 0, sizeof(struct cb_params_st));
|
102
|
+
rb_scan_args(argc, argv, "0*&", ¶ms.args, &proc);
|
101
103
|
if (!bucket->async && proc != Qnil) {
|
102
104
|
rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks");
|
103
105
|
}
|
104
|
-
memset(¶ms, 0, sizeof(struct cb_params_st));
|
105
106
|
params.type = cb_cmd_version;
|
106
107
|
params.bucket = bucket;
|
107
|
-
cb_params_build(¶ms
|
108
|
-
ctx =
|
109
|
-
if (ctx == NULL) {
|
110
|
-
rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for context");
|
111
|
-
}
|
112
|
-
rv = rb_hash_new();
|
113
|
-
ctx->rv = &rv;
|
114
|
-
ctx->bucket = bucket;
|
115
|
-
ctx->exception = Qnil;
|
116
|
-
ctx->proc = cb_gc_protect(bucket, proc);
|
117
|
-
ctx->nqueries = params.cmd.version.num;
|
108
|
+
cb_params_build(¶ms);
|
109
|
+
ctx = cb_context_alloc_common(bucket, proc, params.cmd.version.num);
|
118
110
|
err = lcb_server_versions(bucket->handle, (const void *)ctx,
|
119
111
|
params.cmd.version.num, params.cmd.version.ptr);
|
120
112
|
exc = cb_check_error(err, "failed to schedule version request", Qnil);
|
121
113
|
cb_params_destroy(¶ms);
|
122
114
|
if (exc != Qnil) {
|
123
|
-
|
115
|
+
cb_context_free(ctx);
|
124
116
|
rb_exc_raise(exc);
|
125
117
|
}
|
126
118
|
bucket->nbytes += params.npayload;
|
@@ -133,9 +125,9 @@ cb_bucket_version(int argc, VALUE *argv, VALUE self)
|
|
133
125
|
lcb_wait(bucket->handle);
|
134
126
|
}
|
135
127
|
exc = ctx->exception;
|
136
|
-
|
128
|
+
rv = ctx->rv;
|
129
|
+
cb_context_free(ctx);
|
137
130
|
if (exc != Qnil) {
|
138
|
-
cb_gc_unprotect(bucket, exc);
|
139
131
|
rb_exc_raise(exc);
|
140
132
|
}
|
141
133
|
exc = bucket->exception;
|