couchbase 1.2.0.beta-x86-mingw32 → 1.2.0-x86-mingw32
Sign up to get free protection for your applications and to get access to all the features.
- data/.travis.yml +1 -1
- data/Makefile +3 -0
- data/README.markdown +15 -4
- data/RELEASE_NOTES.markdown +513 -0
- data/couchbase.gemspec +0 -1
- data/ext/couchbase_ext/arguments.c +161 -244
- data/ext/couchbase_ext/arithmetic.c +29 -37
- data/ext/couchbase_ext/bucket.c +252 -219
- data/ext/couchbase_ext/couchbase_ext.c +540 -417
- data/ext/couchbase_ext/couchbase_ext.h +218 -191
- data/ext/couchbase_ext/delete.c +30 -27
- data/ext/couchbase_ext/extconf.rb +15 -3
- data/ext/couchbase_ext/get.c +45 -37
- data/ext/couchbase_ext/http.c +95 -74
- data/ext/couchbase_ext/multithread_plugin.c +1201 -0
- data/ext/couchbase_ext/observe.c +42 -37
- data/ext/couchbase_ext/result.c +17 -20
- data/ext/couchbase_ext/stats.c +30 -28
- data/ext/couchbase_ext/store.c +46 -39
- data/ext/couchbase_ext/timer.c +11 -11
- data/ext/couchbase_ext/touch.c +30 -27
- data/ext/couchbase_ext/unlock.c +30 -27
- data/ext/couchbase_ext/utils.c +166 -89
- data/ext/couchbase_ext/version.c +29 -26
- data/lib/action_dispatch/middleware/session/couchbase_store.rb +2 -2
- data/lib/active_support/cache/couchbase_store.rb +6 -6
- data/lib/couchbase.rb +1 -0
- data/lib/couchbase/bucket.rb +6 -11
- data/lib/couchbase/cluster.rb +105 -0
- data/lib/couchbase/utils.rb +8 -5
- data/lib/couchbase/version.rb +1 -1
- data/lib/couchbase/view.rb +51 -5
- data/lib/couchbase/view_row.rb +1 -1
- data/lib/ext/multi_json_fix.rb +13 -9
- data/lib/rack/session/couchbase.rb +11 -7
- data/tasks/compile.rake +1 -1
- data/tasks/test.rake +40 -34
- data/tasks/util.rake +1 -1
- data/test/setup.rb +9 -2
- data/test/test_arithmetic.rb +37 -0
- data/test/test_async.rb +22 -18
- data/test/test_unlock.rb +0 -1
- data/test/test_utils.rb +32 -0
- metadata +13 -23
- data/HISTORY.markdown +0 -215
data/ext/couchbase_ext/observe.c
CHANGED
@@ -18,43 +18,43 @@
|
|
18
18
|
#include "couchbase_ext.h"
|
19
19
|
|
20
20
|
void
|
21
|
-
|
21
|
+
cb_observe_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_observe_resp_t *resp)
|
22
22
|
{
|
23
|
-
struct
|
24
|
-
struct
|
25
|
-
VALUE key, res, *rv = ctx->rv;
|
23
|
+
struct cb_context_st *ctx = (struct cb_context_st *)cookie;
|
24
|
+
struct cb_bucket_st *bucket = ctx->bucket;
|
25
|
+
VALUE key, res, *rv = ctx->rv, exc;
|
26
26
|
|
27
27
|
if (resp->v.v0.key) {
|
28
28
|
key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey);
|
29
|
-
|
30
|
-
if (
|
31
|
-
cb_gc_protect(bucket,
|
29
|
+
exc = cb_check_error(error, "failed to execute observe request", key);
|
30
|
+
if (exc != Qnil) {
|
31
|
+
ctx->exception = cb_gc_protect(bucket, exc);
|
32
32
|
}
|
33
|
-
res = rb_class_new_instance(0, NULL,
|
34
|
-
rb_ivar_set(res,
|
35
|
-
rb_ivar_set(res,
|
36
|
-
rb_ivar_set(res,
|
37
|
-
rb_ivar_set(res,
|
38
|
-
rb_ivar_set(res,
|
39
|
-
rb_ivar_set(res,
|
40
|
-
rb_ivar_set(res,
|
41
|
-
rb_ivar_set(res,
|
33
|
+
res = rb_class_new_instance(0, NULL, cb_cResult);
|
34
|
+
rb_ivar_set(res, cb_id_iv_completed, Qfalse);
|
35
|
+
rb_ivar_set(res, cb_id_iv_error, ctx->exception);
|
36
|
+
rb_ivar_set(res, cb_id_iv_operation, cb_sym_observe);
|
37
|
+
rb_ivar_set(res, cb_id_iv_key, key);
|
38
|
+
rb_ivar_set(res, cb_id_iv_cas, ULL2NUM(resp->v.v0.cas));
|
39
|
+
rb_ivar_set(res, cb_id_iv_from_master, resp->v.v0.from_master ? Qtrue : Qfalse);
|
40
|
+
rb_ivar_set(res, cb_id_iv_time_to_persist, ULONG2NUM(resp->v.v0.ttp));
|
41
|
+
rb_ivar_set(res, cb_id_iv_time_to_replicate, ULONG2NUM(resp->v.v0.ttr));
|
42
42
|
switch (resp->v.v0.status) {
|
43
43
|
case LCB_OBSERVE_FOUND:
|
44
|
-
rb_ivar_set(res,
|
44
|
+
rb_ivar_set(res, cb_id_iv_status, cb_sym_found);
|
45
45
|
break;
|
46
46
|
case LCB_OBSERVE_PERSISTED:
|
47
|
-
rb_ivar_set(res,
|
47
|
+
rb_ivar_set(res, cb_id_iv_status, cb_sym_persisted);
|
48
48
|
break;
|
49
49
|
case LCB_OBSERVE_NOT_FOUND:
|
50
|
-
rb_ivar_set(res,
|
50
|
+
rb_ivar_set(res, cb_id_iv_status, cb_sym_not_found);
|
51
51
|
break;
|
52
52
|
default:
|
53
|
-
rb_ivar_set(res,
|
53
|
+
rb_ivar_set(res, cb_id_iv_status, Qnil);
|
54
54
|
}
|
55
55
|
if (bucket->async) { /* asynchronous */
|
56
56
|
if (ctx->proc != Qnil) {
|
57
|
-
cb_proc_call(ctx->proc, 1, res);
|
57
|
+
cb_proc_call(bucket, ctx->proc, 1, res);
|
58
58
|
}
|
59
59
|
} else { /* synchronous */
|
60
60
|
if (NIL_P(ctx->exception)) {
|
@@ -68,12 +68,15 @@ observe_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_
|
|
68
68
|
}
|
69
69
|
} else {
|
70
70
|
if (bucket->async && ctx->proc != Qnil) {
|
71
|
-
res = rb_class_new_instance(0, NULL,
|
72
|
-
rb_ivar_set(res,
|
73
|
-
cb_proc_call(ctx->proc, 1, res);
|
71
|
+
res = rb_class_new_instance(0, NULL, cb_cResult);
|
72
|
+
rb_ivar_set(res, cb_id_iv_completed, Qtrue);
|
73
|
+
cb_proc_call(bucket, ctx->proc, 1, res);
|
74
74
|
}
|
75
75
|
ctx->nqueries--;
|
76
76
|
cb_gc_unprotect(bucket, ctx->proc);
|
77
|
+
if (bucket->async) {
|
78
|
+
free(ctx);
|
79
|
+
}
|
77
80
|
}
|
78
81
|
(void)handle;
|
79
82
|
}
|
@@ -110,26 +113,26 @@ observe_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_
|
|
110
113
|
VALUE
|
111
114
|
cb_bucket_observe(int argc, VALUE *argv, VALUE self)
|
112
115
|
{
|
113
|
-
struct
|
114
|
-
struct
|
116
|
+
struct cb_bucket_st *bucket = DATA_PTR(self);
|
117
|
+
struct cb_context_st *ctx;
|
115
118
|
VALUE args, rv, proc, exc;
|
116
119
|
lcb_error_t err;
|
117
|
-
struct
|
120
|
+
struct cb_params_st params;
|
118
121
|
|
119
122
|
if (bucket->handle == NULL) {
|
120
|
-
rb_raise(
|
123
|
+
rb_raise(cb_eConnectError, "closed connection");
|
121
124
|
}
|
122
125
|
rb_scan_args(argc, argv, "0*&", &args, &proc);
|
123
126
|
if (!bucket->async && proc != Qnil) {
|
124
127
|
rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks");
|
125
128
|
}
|
126
|
-
memset(¶ms, 0, sizeof(struct
|
127
|
-
params.type =
|
129
|
+
memset(¶ms, 0, sizeof(struct cb_params_st));
|
130
|
+
params.type = cb_cmd_observe;
|
128
131
|
params.bucket = bucket;
|
129
132
|
cb_params_build(¶ms, RARRAY_LEN(args), args);
|
130
|
-
ctx =
|
133
|
+
ctx = calloc(1, sizeof(struct cb_context_st));
|
131
134
|
if (ctx == NULL) {
|
132
|
-
rb_raise(
|
135
|
+
rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for context");
|
133
136
|
}
|
134
137
|
ctx->proc = cb_gc_protect(bucket, proc);
|
135
138
|
ctx->bucket = bucket;
|
@@ -142,12 +145,12 @@ cb_bucket_observe(int argc, VALUE *argv, VALUE self)
|
|
142
145
|
cb_params_destroy(¶ms);
|
143
146
|
exc = cb_check_error(err, "failed to schedule observe request", Qnil);
|
144
147
|
if (exc != Qnil) {
|
145
|
-
|
148
|
+
free(ctx);
|
146
149
|
rb_exc_raise(exc);
|
147
150
|
}
|
148
151
|
bucket->nbytes += params.npayload;
|
149
152
|
if (bucket->async) {
|
150
|
-
|
153
|
+
cb_maybe_do_loop(bucket);
|
151
154
|
return Qnil;
|
152
155
|
} else {
|
153
156
|
if (ctx->nqueries > 0) {
|
@@ -155,13 +158,15 @@ cb_bucket_observe(int argc, VALUE *argv, VALUE self)
|
|
155
158
|
lcb_wait(bucket->handle);
|
156
159
|
}
|
157
160
|
exc = ctx->exception;
|
158
|
-
|
161
|
+
free(ctx);
|
159
162
|
if (exc != Qnil) {
|
160
163
|
cb_gc_unprotect(bucket, exc);
|
161
164
|
rb_exc_raise(exc);
|
162
165
|
}
|
163
|
-
|
164
|
-
|
166
|
+
exc = bucket->exception;
|
167
|
+
if (exc != Qnil) {
|
168
|
+
bucket->exception = Qnil;
|
169
|
+
rb_exc_raise(exc);
|
165
170
|
}
|
166
171
|
if (params.cmd.observe.num > 1 || params.cmd.observe.array) {
|
167
172
|
return rv; /* return as a hash {key => {}, ...} */
|
data/ext/couchbase_ext/result.c
CHANGED
@@ -28,7 +28,7 @@
|
|
28
28
|
VALUE
|
29
29
|
cb_result_success_p(VALUE self)
|
30
30
|
{
|
31
|
-
return RTEST(rb_ivar_get(self,
|
31
|
+
return RTEST(rb_ivar_get(self, cb_id_iv_error)) ? Qfalse : Qtrue;
|
32
32
|
}
|
33
33
|
|
34
34
|
/*
|
@@ -41,7 +41,7 @@ cb_result_success_p(VALUE self)
|
|
41
41
|
VALUE
|
42
42
|
cb_result_inspect(VALUE self)
|
43
43
|
{
|
44
|
-
VALUE str, attr
|
44
|
+
VALUE str, attr;
|
45
45
|
char buf[100];
|
46
46
|
|
47
47
|
str = rb_str_buf_new2("#<");
|
@@ -49,70 +49,67 @@ cb_result_inspect(VALUE self)
|
|
49
49
|
snprintf(buf, 100, ":%p", (void *)self);
|
50
50
|
rb_str_buf_cat2(str, buf);
|
51
51
|
|
52
|
-
attr = rb_ivar_get(self,
|
52
|
+
attr = rb_ivar_get(self, cb_id_iv_operation);
|
53
53
|
if (RTEST(attr)) {
|
54
|
-
|
55
|
-
|
56
|
-
error = INT2FIX(0);
|
54
|
+
rb_str_buf_cat2(str, " operation=");
|
55
|
+
rb_str_append(str, rb_inspect(attr));
|
57
56
|
}
|
58
|
-
rb_str_buf_cat2(str, " error=0x");
|
59
|
-
rb_str_append(str, rb_funcall(error, id_to_s, 1, INT2FIX(16)));
|
60
57
|
|
61
|
-
attr = rb_ivar_get(self,
|
58
|
+
attr = rb_ivar_get(self, cb_id_iv_error);
|
62
59
|
if (RTEST(attr)) {
|
63
|
-
rb_str_buf_cat2(str, "
|
60
|
+
rb_str_buf_cat2(str, " error=");
|
64
61
|
rb_str_append(str, rb_inspect(attr));
|
65
62
|
}
|
66
63
|
|
67
|
-
attr = rb_ivar_get(self,
|
64
|
+
attr = rb_ivar_get(self, cb_id_iv_key);
|
68
65
|
if (RTEST(attr)) {
|
69
66
|
rb_str_buf_cat2(str, " key=");
|
70
67
|
rb_str_append(str, rb_inspect(attr));
|
71
68
|
}
|
72
69
|
|
73
|
-
attr = rb_ivar_get(self,
|
70
|
+
attr = rb_ivar_get(self, cb_id_iv_status);
|
74
71
|
if (RTEST(attr)) {
|
75
72
|
rb_str_buf_cat2(str, " status=");
|
76
73
|
rb_str_append(str, rb_inspect(attr));
|
77
74
|
}
|
78
75
|
|
79
|
-
attr = rb_ivar_get(self,
|
76
|
+
attr = rb_ivar_get(self, cb_id_iv_cas);
|
80
77
|
if (RTEST(attr)) {
|
81
78
|
rb_str_buf_cat2(str, " cas=");
|
82
79
|
rb_str_append(str, rb_inspect(attr));
|
83
80
|
}
|
84
81
|
|
85
|
-
attr = rb_ivar_get(self,
|
82
|
+
attr = rb_ivar_get(self, cb_id_iv_flags);
|
86
83
|
if (RTEST(attr)) {
|
87
84
|
rb_str_buf_cat2(str, " flags=0x");
|
88
|
-
rb_str_append(str, rb_funcall(attr,
|
85
|
+
rb_str_append(str, rb_funcall(attr, cb_id_to_s, 1, INT2FIX(16)));
|
89
86
|
}
|
90
87
|
|
91
|
-
attr = rb_ivar_get(self,
|
88
|
+
attr = rb_ivar_get(self, cb_id_iv_node);
|
92
89
|
if (RTEST(attr)) {
|
93
90
|
rb_str_buf_cat2(str, " node=");
|
94
91
|
rb_str_append(str, rb_inspect(attr));
|
95
92
|
}
|
96
93
|
|
97
|
-
attr = rb_ivar_get(self,
|
94
|
+
attr = rb_ivar_get(self, cb_id_iv_from_master);
|
98
95
|
if (attr != Qnil) {
|
99
96
|
rb_str_buf_cat2(str, " from_master=");
|
100
97
|
rb_str_append(str, rb_inspect(attr));
|
101
98
|
}
|
102
99
|
|
103
|
-
attr = rb_ivar_get(self,
|
100
|
+
attr = rb_ivar_get(self, cb_id_iv_time_to_persist);
|
104
101
|
if (RTEST(attr)) {
|
105
102
|
rb_str_buf_cat2(str, " time_to_persist=");
|
106
103
|
rb_str_append(str, rb_inspect(attr));
|
107
104
|
}
|
108
105
|
|
109
|
-
attr = rb_ivar_get(self,
|
106
|
+
attr = rb_ivar_get(self, cb_id_iv_time_to_replicate);
|
110
107
|
if (RTEST(attr)) {
|
111
108
|
rb_str_buf_cat2(str, " time_to_replicate=");
|
112
109
|
rb_str_append(str, rb_inspect(attr));
|
113
110
|
}
|
114
111
|
|
115
|
-
attr = rb_ivar_get(self,
|
112
|
+
attr = rb_ivar_get(self, cb_id_iv_headers);
|
116
113
|
if (RTEST(attr)) {
|
117
114
|
rb_str_buf_cat2(str, " headers=");
|
118
115
|
rb_str_append(str, rb_inspect(attr));
|
data/ext/couchbase_ext/stats.c
CHANGED
@@ -18,32 +18,30 @@
|
|
18
18
|
#include "couchbase_ext.h"
|
19
19
|
|
20
20
|
void
|
21
|
-
|
21
|
+
cb_stat_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_server_stat_resp_t *resp)
|
22
22
|
{
|
23
|
-
struct
|
24
|
-
struct
|
23
|
+
struct cb_context_st *ctx = (struct cb_context_st *)cookie;
|
24
|
+
struct cb_bucket_st *bucket = ctx->bucket;
|
25
25
|
VALUE stats, node, key, val, *rv = ctx->rv, exc = Qnil, res;
|
26
26
|
|
27
27
|
node = resp->v.v0.server_endpoint ? STR_NEW_CSTR(resp->v.v0.server_endpoint) : Qnil;
|
28
28
|
exc = cb_check_error(error, "failed to fetch stats", node);
|
29
29
|
if (exc != Qnil) {
|
30
|
-
rb_ivar_set(exc,
|
31
|
-
|
32
|
-
ctx->exception = cb_gc_protect(bucket, exc);
|
33
|
-
}
|
30
|
+
rb_ivar_set(exc, cb_id_iv_operation, cb_sym_stats);
|
31
|
+
ctx->exception = cb_gc_protect(bucket, exc);
|
34
32
|
}
|
35
33
|
if (node != Qnil) {
|
36
34
|
key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey);
|
37
35
|
val = STR_NEW((const char*)resp->v.v0.bytes, resp->v.v0.nbytes);
|
38
36
|
if (bucket->async) { /* asynchronous */
|
39
37
|
if (ctx->proc != Qnil) {
|
40
|
-
res = rb_class_new_instance(0, NULL,
|
41
|
-
rb_ivar_set(res,
|
42
|
-
rb_ivar_set(res,
|
43
|
-
rb_ivar_set(res,
|
44
|
-
rb_ivar_set(res,
|
45
|
-
rb_ivar_set(res,
|
46
|
-
cb_proc_call(ctx->proc, 1, res);
|
38
|
+
res = rb_class_new_instance(0, NULL, cb_cResult);
|
39
|
+
rb_ivar_set(res, cb_id_iv_error, exc);
|
40
|
+
rb_ivar_set(res, cb_id_iv_operation, cb_sym_stats);
|
41
|
+
rb_ivar_set(res, cb_id_iv_node, node);
|
42
|
+
rb_ivar_set(res, cb_id_iv_key, key);
|
43
|
+
rb_ivar_set(res, cb_id_iv_value, val);
|
44
|
+
cb_proc_call(bucket, ctx->proc, 1, res);
|
47
45
|
}
|
48
46
|
} else { /* synchronous */
|
49
47
|
if (NIL_P(exc)) {
|
@@ -56,8 +54,10 @@ stat_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_ser
|
|
56
54
|
}
|
57
55
|
}
|
58
56
|
} else {
|
59
|
-
ctx->nqueries--;
|
60
57
|
cb_gc_unprotect(bucket, ctx->proc);
|
58
|
+
if (bucket->async) {
|
59
|
+
free(ctx);
|
60
|
+
}
|
61
61
|
}
|
62
62
|
(void)handle;
|
63
63
|
}
|
@@ -107,26 +107,26 @@ stat_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_ser
|
|
107
107
|
VALUE
|
108
108
|
cb_bucket_stats(int argc, VALUE *argv, VALUE self)
|
109
109
|
{
|
110
|
-
struct
|
111
|
-
struct
|
110
|
+
struct cb_bucket_st *bucket = DATA_PTR(self);
|
111
|
+
struct cb_context_st *ctx;
|
112
112
|
VALUE rv, exc, args, proc;
|
113
113
|
lcb_error_t err;
|
114
|
-
struct
|
114
|
+
struct cb_params_st params;
|
115
115
|
|
116
116
|
if (bucket->handle == NULL) {
|
117
|
-
rb_raise(
|
117
|
+
rb_raise(cb_eConnectError, "closed connection");
|
118
118
|
}
|
119
119
|
rb_scan_args(argc, argv, "0*&", &args, &proc);
|
120
120
|
if (!bucket->async && proc != Qnil) {
|
121
121
|
rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks");
|
122
122
|
}
|
123
|
-
memset(¶ms, 0, sizeof(struct
|
124
|
-
params.type =
|
123
|
+
memset(¶ms, 0, sizeof(struct cb_params_st));
|
124
|
+
params.type = cb_cmd_stats;
|
125
125
|
params.bucket = bucket;
|
126
126
|
cb_params_build(¶ms, RARRAY_LEN(args), args);
|
127
|
-
ctx =
|
127
|
+
ctx = calloc(1, sizeof(struct cb_context_st));
|
128
128
|
if (ctx == NULL) {
|
129
|
-
rb_raise(
|
129
|
+
rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for context");
|
130
130
|
}
|
131
131
|
rv = rb_hash_new();
|
132
132
|
ctx->rv = &rv;
|
@@ -139,12 +139,12 @@ cb_bucket_stats(int argc, VALUE *argv, VALUE self)
|
|
139
139
|
exc = cb_check_error(err, "failed to schedule stat request", Qnil);
|
140
140
|
cb_params_destroy(¶ms);
|
141
141
|
if (exc != Qnil) {
|
142
|
-
|
142
|
+
free(ctx);
|
143
143
|
rb_exc_raise(exc);
|
144
144
|
}
|
145
145
|
bucket->nbytes += params.npayload;
|
146
146
|
if (bucket->async) {
|
147
|
-
|
147
|
+
cb_maybe_do_loop(bucket);
|
148
148
|
return Qnil;
|
149
149
|
} else {
|
150
150
|
if (ctx->nqueries > 0) {
|
@@ -152,13 +152,15 @@ cb_bucket_stats(int argc, VALUE *argv, VALUE self)
|
|
152
152
|
lcb_wait(bucket->handle);
|
153
153
|
}
|
154
154
|
exc = ctx->exception;
|
155
|
-
|
155
|
+
free(ctx);
|
156
156
|
if (exc != Qnil) {
|
157
157
|
cb_gc_unprotect(bucket, exc);
|
158
158
|
rb_exc_raise(exc);
|
159
159
|
}
|
160
|
-
|
161
|
-
|
160
|
+
exc = bucket->exception;
|
161
|
+
if (exc != Qnil) {
|
162
|
+
bucket->exception = Qnil;
|
163
|
+
rb_exc_raise(exc);
|
162
164
|
}
|
163
165
|
return rv;
|
164
166
|
}
|
data/ext/couchbase_ext/store.c
CHANGED
@@ -20,60 +20,62 @@
|
|
20
20
|
static VALUE
|
21
21
|
storage_observe_callback(VALUE args, VALUE cookie)
|
22
22
|
{
|
23
|
-
struct
|
23
|
+
struct cb_context_st *ctx = (struct cb_context_st *)cookie;
|
24
|
+
struct cb_bucket_st *bucket = ctx->bucket;
|
24
25
|
VALUE res = rb_ary_shift(args);
|
25
26
|
|
26
27
|
if (ctx->proc != Qnil) {
|
27
|
-
rb_ivar_set(res,
|
28
|
-
cb_proc_call(ctx->proc, 1, res);
|
28
|
+
rb_ivar_set(res, cb_id_iv_operation, ctx->operation);
|
29
|
+
cb_proc_call(bucket, ctx->proc, 1, res);
|
29
30
|
}
|
30
31
|
if (!RTEST(ctx->observe_options)) {
|
31
32
|
ctx->nqueries--;
|
32
33
|
if (ctx->nqueries == 0) {
|
33
|
-
cb_gc_unprotect(
|
34
|
+
cb_gc_unprotect(bucket, ctx->proc);
|
35
|
+
if (bucket->async) {
|
36
|
+
free(ctx);
|
37
|
+
}
|
34
38
|
}
|
35
39
|
}
|
36
40
|
return Qnil;
|
37
41
|
}
|
38
42
|
|
39
43
|
void
|
40
|
-
|
44
|
+
cb_storage_callback(lcb_t handle, const void *cookie, lcb_storage_t operation,
|
41
45
|
lcb_error_t error, const lcb_store_resp_t *resp)
|
42
46
|
{
|
43
|
-
struct
|
44
|
-
struct
|
47
|
+
struct cb_context_st *ctx = (struct cb_context_st *)cookie;
|
48
|
+
struct cb_bucket_st *bucket = ctx->bucket;
|
45
49
|
VALUE key, cas, *rv = ctx->rv, exc, res;
|
46
50
|
|
47
51
|
key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey);
|
48
|
-
|
52
|
+
cb_strip_key_prefix(bucket, key);
|
49
53
|
|
50
54
|
cas = resp->v.v0.cas > 0 ? ULL2NUM(resp->v.v0.cas) : Qnil;
|
51
55
|
switch(operation) {
|
52
56
|
case LCB_ADD:
|
53
|
-
ctx->operation =
|
57
|
+
ctx->operation = cb_sym_add;
|
54
58
|
break;
|
55
59
|
case LCB_REPLACE:
|
56
|
-
ctx->operation =
|
60
|
+
ctx->operation = cb_sym_replace;
|
57
61
|
break;
|
58
62
|
case LCB_SET:
|
59
|
-
ctx->operation =
|
63
|
+
ctx->operation = cb_sym_set;
|
60
64
|
break;
|
61
65
|
case LCB_APPEND:
|
62
|
-
ctx->operation =
|
66
|
+
ctx->operation = cb_sym_append;
|
63
67
|
break;
|
64
68
|
case LCB_PREPEND:
|
65
|
-
ctx->operation =
|
69
|
+
ctx->operation = cb_sym_prepend;
|
66
70
|
break;
|
67
71
|
default:
|
68
72
|
ctx->operation = Qnil;
|
69
73
|
}
|
70
74
|
exc = cb_check_error(error, "failed to store value", key);
|
71
75
|
if (exc != Qnil) {
|
72
|
-
rb_ivar_set(exc,
|
73
|
-
rb_ivar_set(exc,
|
74
|
-
|
75
|
-
ctx->exception = cb_gc_protect(bucket, exc);
|
76
|
-
}
|
76
|
+
rb_ivar_set(exc, cb_id_iv_cas, cas);
|
77
|
+
rb_ivar_set(exc, cb_id_iv_operation, ctx->operation);
|
78
|
+
ctx->exception = cb_gc_protect(bucket, exc);
|
77
79
|
}
|
78
80
|
|
79
81
|
if (bucket->async) { /* asynchronous */
|
@@ -82,16 +84,16 @@ storage_callback(lcb_t handle, const void *cookie, lcb_storage_t operation,
|
|
82
84
|
args[0] = rb_hash_new();
|
83
85
|
rb_hash_aset(args[0], key, cas);
|
84
86
|
args[1] = ctx->observe_options;
|
85
|
-
rb_block_call(bucket->self,
|
87
|
+
rb_block_call(bucket->self, cb_id_observe_and_wait, 2, args,
|
86
88
|
storage_observe_callback, (VALUE)ctx);
|
87
89
|
cb_gc_unprotect(bucket, ctx->observe_options);
|
88
90
|
} else if (ctx->proc != Qnil) {
|
89
|
-
res = rb_class_new_instance(0, NULL,
|
90
|
-
rb_ivar_set(res,
|
91
|
-
rb_ivar_set(res,
|
92
|
-
rb_ivar_set(res,
|
93
|
-
rb_ivar_set(res,
|
94
|
-
cb_proc_call(ctx->proc, 1, res);
|
91
|
+
res = rb_class_new_instance(0, NULL, cb_cResult);
|
92
|
+
rb_ivar_set(res, cb_id_iv_error, exc);
|
93
|
+
rb_ivar_set(res, cb_id_iv_key, key);
|
94
|
+
rb_ivar_set(res, cb_id_iv_operation, ctx->operation);
|
95
|
+
rb_ivar_set(res, cb_id_iv_cas, cas);
|
96
|
+
cb_proc_call(bucket, ctx->proc, 1, res);
|
95
97
|
}
|
96
98
|
} else { /* synchronous */
|
97
99
|
rb_hash_aset(*rv, key, cas);
|
@@ -101,6 +103,9 @@ storage_callback(lcb_t handle, const void *cookie, lcb_storage_t operation,
|
|
101
103
|
ctx->nqueries--;
|
102
104
|
if (ctx->nqueries == 0) {
|
103
105
|
cb_gc_unprotect(bucket, ctx->proc);
|
106
|
+
if (bucket->async) {
|
107
|
+
free(ctx);
|
108
|
+
}
|
104
109
|
}
|
105
110
|
}
|
106
111
|
(void)handle;
|
@@ -109,27 +114,27 @@ storage_callback(lcb_t handle, const void *cookie, lcb_storage_t operation,
|
|
109
114
|
static inline VALUE
|
110
115
|
cb_bucket_store(lcb_storage_t cmd, int argc, VALUE *argv, VALUE self)
|
111
116
|
{
|
112
|
-
struct
|
113
|
-
struct
|
117
|
+
struct cb_bucket_st *bucket = DATA_PTR(self);
|
118
|
+
struct cb_context_st *ctx;
|
114
119
|
VALUE args, rv, proc, exc, obs = Qnil;
|
115
120
|
lcb_error_t err;
|
116
|
-
struct
|
121
|
+
struct cb_params_st params;
|
117
122
|
|
118
123
|
if (bucket->handle == NULL) {
|
119
|
-
rb_raise(
|
124
|
+
rb_raise(cb_eConnectError, "closed connection");
|
120
125
|
}
|
121
126
|
rb_scan_args(argc, argv, "0*&", &args, &proc);
|
122
127
|
if (!bucket->async && proc != Qnil) {
|
123
128
|
rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks");
|
124
129
|
}
|
125
|
-
memset(¶ms, 0, sizeof(struct
|
126
|
-
params.type =
|
130
|
+
memset(¶ms, 0, sizeof(struct cb_params_st));
|
131
|
+
params.type = cb_cmd_store;
|
127
132
|
params.bucket = bucket;
|
128
133
|
params.cmd.store.operation = cmd;
|
129
134
|
cb_params_build(¶ms, RARRAY_LEN(args), args);
|
130
|
-
ctx =
|
135
|
+
ctx = calloc(1, sizeof(struct cb_context_st));
|
131
136
|
if (ctx == NULL) {
|
132
|
-
rb_raise(
|
137
|
+
rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for context");
|
133
138
|
}
|
134
139
|
rv = rb_hash_new();
|
135
140
|
ctx->rv = &rv;
|
@@ -143,12 +148,12 @@ cb_bucket_store(lcb_storage_t cmd, int argc, VALUE *argv, VALUE self)
|
|
143
148
|
cb_params_destroy(¶ms);
|
144
149
|
exc = cb_check_error(err, "failed to schedule set request", Qnil);
|
145
150
|
if (exc != Qnil) {
|
146
|
-
|
151
|
+
free(ctx);
|
147
152
|
rb_exc_raise(exc);
|
148
153
|
}
|
149
154
|
bucket->nbytes += params.npayload;
|
150
155
|
if (bucket->async) {
|
151
|
-
|
156
|
+
cb_maybe_do_loop(bucket);
|
152
157
|
return Qnil;
|
153
158
|
} else {
|
154
159
|
if (ctx->nqueries > 0) {
|
@@ -156,17 +161,19 @@ cb_bucket_store(lcb_storage_t cmd, int argc, VALUE *argv, VALUE self)
|
|
156
161
|
lcb_wait(bucket->handle);
|
157
162
|
}
|
158
163
|
exc = ctx->exception;
|
159
|
-
|
164
|
+
free(ctx);
|
160
165
|
if (exc != Qnil) {
|
161
166
|
cb_gc_unprotect(bucket, exc);
|
162
167
|
rb_exc_raise(exc);
|
163
168
|
}
|
164
|
-
|
165
|
-
|
169
|
+
exc = bucket->exception;
|
170
|
+
if (exc != Qnil) {
|
171
|
+
bucket->exception = Qnil;
|
172
|
+
rb_exc_raise(exc);
|
166
173
|
}
|
167
174
|
if (RTEST(obs)) {
|
168
175
|
cb_gc_unprotect(bucket, obs);
|
169
|
-
return rb_funcall(bucket->self,
|
176
|
+
return rb_funcall(bucket->self, cb_id_observe_and_wait, 2, rv, obs);
|
170
177
|
}
|
171
178
|
if (params.cmd.store.num > 1) {
|
172
179
|
return rv; /* return as a hash {key => cas, ...} */
|