couchbase 1.2.0.z.beta-x86-mingw32 → 1.2.1-x86-mingw32

Sign up to get free protection for your applications and to get access to all the features.
Files changed (45) hide show
  1. data/.travis.yml +1 -1
  2. data/Makefile +3 -0
  3. data/README.markdown +15 -4
  4. data/RELEASE_NOTES.markdown +526 -0
  5. data/couchbase.gemspec +0 -1
  6. data/ext/couchbase_ext/arguments.c +161 -244
  7. data/ext/couchbase_ext/arithmetic.c +29 -37
  8. data/ext/couchbase_ext/bucket.c +252 -219
  9. data/ext/couchbase_ext/couchbase_ext.c +540 -417
  10. data/ext/couchbase_ext/couchbase_ext.h +218 -191
  11. data/ext/couchbase_ext/delete.c +30 -27
  12. data/ext/couchbase_ext/extconf.rb +15 -3
  13. data/ext/couchbase_ext/get.c +45 -37
  14. data/ext/couchbase_ext/http.c +95 -74
  15. data/ext/couchbase_ext/multithread_plugin.c +1238 -0
  16. data/ext/couchbase_ext/observe.c +42 -37
  17. data/ext/couchbase_ext/result.c +17 -20
  18. data/ext/couchbase_ext/stats.c +30 -28
  19. data/ext/couchbase_ext/store.c +47 -39
  20. data/ext/couchbase_ext/timer.c +11 -11
  21. data/ext/couchbase_ext/touch.c +30 -27
  22. data/ext/couchbase_ext/unlock.c +30 -27
  23. data/ext/couchbase_ext/utils.c +166 -89
  24. data/ext/couchbase_ext/version.c +29 -26
  25. data/lib/action_dispatch/middleware/session/couchbase_store.rb +2 -2
  26. data/lib/active_support/cache/couchbase_store.rb +6 -6
  27. data/lib/couchbase.rb +1 -0
  28. data/lib/couchbase/bucket.rb +6 -11
  29. data/lib/couchbase/cluster.rb +105 -0
  30. data/lib/couchbase/utils.rb +8 -5
  31. data/lib/couchbase/version.rb +1 -1
  32. data/lib/couchbase/view.rb +51 -5
  33. data/lib/couchbase/view_row.rb +1 -1
  34. data/lib/ext/multi_json_fix.rb +13 -9
  35. data/lib/rack/session/couchbase.rb +11 -7
  36. data/tasks/compile.rake +1 -1
  37. data/tasks/test.rake +40 -34
  38. data/tasks/util.rake +1 -1
  39. data/test/setup.rb +9 -2
  40. data/test/test_arithmetic.rb +37 -0
  41. data/test/test_async.rb +22 -18
  42. data/test/test_unlock.rb +0 -1
  43. data/test/test_utils.rb +32 -0
  44. metadata +13 -23
  45. data/HISTORY.markdown +0 -219
@@ -18,43 +18,43 @@
18
18
  #include "couchbase_ext.h"
19
19
 
20
20
  void
21
- observe_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_observe_resp_t *resp)
21
+ cb_observe_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_observe_resp_t *resp)
22
22
  {
23
- struct context_st *ctx = (struct context_st *)cookie;
24
- struct bucket_st *bucket = ctx->bucket;
25
- VALUE key, res, *rv = ctx->rv;
23
+ struct cb_context_st *ctx = (struct cb_context_st *)cookie;
24
+ struct cb_bucket_st *bucket = ctx->bucket;
25
+ VALUE key, res, *rv = ctx->rv, exc;
26
26
 
27
27
  if (resp->v.v0.key) {
28
28
  key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey);
29
- ctx->exception = cb_check_error(error, "failed to execute observe request", key);
30
- if (ctx->exception) {
31
- cb_gc_protect(bucket, ctx->exception);
29
+ exc = cb_check_error(error, "failed to execute observe request", key);
30
+ if (exc != Qnil) {
31
+ ctx->exception = cb_gc_protect(bucket, exc);
32
32
  }
33
- res = rb_class_new_instance(0, NULL, cResult);
34
- rb_ivar_set(res, id_iv_completed, Qfalse);
35
- rb_ivar_set(res, id_iv_error, ctx->exception);
36
- rb_ivar_set(res, id_iv_operation, sym_observe);
37
- rb_ivar_set(res, id_iv_key, key);
38
- rb_ivar_set(res, id_iv_cas, ULL2NUM(resp->v.v0.cas));
39
- rb_ivar_set(res, id_iv_from_master, resp->v.v0.from_master ? Qtrue : Qfalse);
40
- rb_ivar_set(res, id_iv_time_to_persist, ULONG2NUM(resp->v.v0.ttp));
41
- rb_ivar_set(res, id_iv_time_to_replicate, ULONG2NUM(resp->v.v0.ttr));
33
+ res = rb_class_new_instance(0, NULL, cb_cResult);
34
+ rb_ivar_set(res, cb_id_iv_completed, Qfalse);
35
+ rb_ivar_set(res, cb_id_iv_error, ctx->exception);
36
+ rb_ivar_set(res, cb_id_iv_operation, cb_sym_observe);
37
+ rb_ivar_set(res, cb_id_iv_key, key);
38
+ rb_ivar_set(res, cb_id_iv_cas, ULL2NUM(resp->v.v0.cas));
39
+ rb_ivar_set(res, cb_id_iv_from_master, resp->v.v0.from_master ? Qtrue : Qfalse);
40
+ rb_ivar_set(res, cb_id_iv_time_to_persist, ULONG2NUM(resp->v.v0.ttp));
41
+ rb_ivar_set(res, cb_id_iv_time_to_replicate, ULONG2NUM(resp->v.v0.ttr));
42
42
  switch (resp->v.v0.status) {
43
43
  case LCB_OBSERVE_FOUND:
44
- rb_ivar_set(res, id_iv_status, sym_found);
44
+ rb_ivar_set(res, cb_id_iv_status, cb_sym_found);
45
45
  break;
46
46
  case LCB_OBSERVE_PERSISTED:
47
- rb_ivar_set(res, id_iv_status, sym_persisted);
47
+ rb_ivar_set(res, cb_id_iv_status, cb_sym_persisted);
48
48
  break;
49
49
  case LCB_OBSERVE_NOT_FOUND:
50
- rb_ivar_set(res, id_iv_status, sym_not_found);
50
+ rb_ivar_set(res, cb_id_iv_status, cb_sym_not_found);
51
51
  break;
52
52
  default:
53
- rb_ivar_set(res, id_iv_status, Qnil);
53
+ rb_ivar_set(res, cb_id_iv_status, Qnil);
54
54
  }
55
55
  if (bucket->async) { /* asynchronous */
56
56
  if (ctx->proc != Qnil) {
57
- cb_proc_call(ctx->proc, 1, res);
57
+ cb_proc_call(bucket, ctx->proc, 1, res);
58
58
  }
59
59
  } else { /* synchronous */
60
60
  if (NIL_P(ctx->exception)) {
@@ -68,12 +68,15 @@ observe_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_
68
68
  }
69
69
  } else {
70
70
  if (bucket->async && ctx->proc != Qnil) {
71
- res = rb_class_new_instance(0, NULL, cResult);
72
- rb_ivar_set(res, id_iv_completed, Qtrue);
73
- cb_proc_call(ctx->proc, 1, res);
71
+ res = rb_class_new_instance(0, NULL, cb_cResult);
72
+ rb_ivar_set(res, cb_id_iv_completed, Qtrue);
73
+ cb_proc_call(bucket, ctx->proc, 1, res);
74
74
  }
75
75
  ctx->nqueries--;
76
76
  cb_gc_unprotect(bucket, ctx->proc);
77
+ if (bucket->async) {
78
+ free(ctx);
79
+ }
77
80
  }
78
81
  (void)handle;
79
82
  }
@@ -110,26 +113,26 @@ observe_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_
110
113
  VALUE
111
114
  cb_bucket_observe(int argc, VALUE *argv, VALUE self)
112
115
  {
113
- struct bucket_st *bucket = DATA_PTR(self);
114
- struct context_st *ctx;
116
+ struct cb_bucket_st *bucket = DATA_PTR(self);
117
+ struct cb_context_st *ctx;
115
118
  VALUE args, rv, proc, exc;
116
119
  lcb_error_t err;
117
- struct params_st params;
120
+ struct cb_params_st params;
118
121
 
119
122
  if (bucket->handle == NULL) {
120
- rb_raise(eConnectError, "closed connection");
123
+ rb_raise(cb_eConnectError, "closed connection");
121
124
  }
122
125
  rb_scan_args(argc, argv, "0*&", &args, &proc);
123
126
  if (!bucket->async && proc != Qnil) {
124
127
  rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks");
125
128
  }
126
- memset(&params, 0, sizeof(struct params_st));
127
- params.type = cmd_observe;
129
+ memset(&params, 0, sizeof(struct cb_params_st));
130
+ params.type = cb_cmd_observe;
128
131
  params.bucket = bucket;
129
132
  cb_params_build(&params, RARRAY_LEN(args), args);
130
- ctx = xcalloc(1, sizeof(struct context_st));
133
+ ctx = calloc(1, sizeof(struct cb_context_st));
131
134
  if (ctx == NULL) {
132
- rb_raise(eClientNoMemoryError, "failed to allocate memory for context");
135
+ rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for context");
133
136
  }
134
137
  ctx->proc = cb_gc_protect(bucket, proc);
135
138
  ctx->bucket = bucket;
@@ -142,12 +145,12 @@ cb_bucket_observe(int argc, VALUE *argv, VALUE self)
142
145
  cb_params_destroy(&params);
143
146
  exc = cb_check_error(err, "failed to schedule observe request", Qnil);
144
147
  if (exc != Qnil) {
145
- xfree(ctx);
148
+ free(ctx);
146
149
  rb_exc_raise(exc);
147
150
  }
148
151
  bucket->nbytes += params.npayload;
149
152
  if (bucket->async) {
150
- maybe_do_loop(bucket);
153
+ cb_maybe_do_loop(bucket);
151
154
  return Qnil;
152
155
  } else {
153
156
  if (ctx->nqueries > 0) {
@@ -155,13 +158,15 @@ cb_bucket_observe(int argc, VALUE *argv, VALUE self)
155
158
  lcb_wait(bucket->handle);
156
159
  }
157
160
  exc = ctx->exception;
158
- xfree(ctx);
161
+ free(ctx);
159
162
  if (exc != Qnil) {
160
163
  cb_gc_unprotect(bucket, exc);
161
164
  rb_exc_raise(exc);
162
165
  }
163
- if (bucket->exception != Qnil) {
164
- rb_exc_raise(bucket->exception);
166
+ exc = bucket->exception;
167
+ if (exc != Qnil) {
168
+ bucket->exception = Qnil;
169
+ rb_exc_raise(exc);
165
170
  }
166
171
  if (params.cmd.observe.num > 1 || params.cmd.observe.array) {
167
172
  return rv; /* return as a hash {key => {}, ...} */
@@ -28,7 +28,7 @@
28
28
  VALUE
29
29
  cb_result_success_p(VALUE self)
30
30
  {
31
- return RTEST(rb_ivar_get(self, id_iv_error)) ? Qfalse : Qtrue;
31
+ return RTEST(rb_ivar_get(self, cb_id_iv_error)) ? Qfalse : Qtrue;
32
32
  }
33
33
 
34
34
  /*
@@ -41,7 +41,7 @@ cb_result_success_p(VALUE self)
41
41
  VALUE
42
42
  cb_result_inspect(VALUE self)
43
43
  {
44
- VALUE str, attr, error;
44
+ VALUE str, attr;
45
45
  char buf[100];
46
46
 
47
47
  str = rb_str_buf_new2("#<");
@@ -49,70 +49,67 @@ cb_result_inspect(VALUE self)
49
49
  snprintf(buf, 100, ":%p", (void *)self);
50
50
  rb_str_buf_cat2(str, buf);
51
51
 
52
- attr = rb_ivar_get(self, id_iv_error);
52
+ attr = rb_ivar_get(self, cb_id_iv_operation);
53
53
  if (RTEST(attr)) {
54
- error = rb_ivar_get(attr, id_iv_error);
55
- } else {
56
- error = INT2FIX(0);
54
+ rb_str_buf_cat2(str, " operation=");
55
+ rb_str_append(str, rb_inspect(attr));
57
56
  }
58
- rb_str_buf_cat2(str, " error=0x");
59
- rb_str_append(str, rb_funcall(error, id_to_s, 1, INT2FIX(16)));
60
57
 
61
- attr = rb_ivar_get(self, id_iv_operation);
58
+ attr = rb_ivar_get(self, cb_id_iv_error);
62
59
  if (RTEST(attr)) {
63
- rb_str_buf_cat2(str, " operation=");
60
+ rb_str_buf_cat2(str, " error=");
64
61
  rb_str_append(str, rb_inspect(attr));
65
62
  }
66
63
 
67
- attr = rb_ivar_get(self, id_iv_key);
64
+ attr = rb_ivar_get(self, cb_id_iv_key);
68
65
  if (RTEST(attr)) {
69
66
  rb_str_buf_cat2(str, " key=");
70
67
  rb_str_append(str, rb_inspect(attr));
71
68
  }
72
69
 
73
- attr = rb_ivar_get(self, id_iv_status);
70
+ attr = rb_ivar_get(self, cb_id_iv_status);
74
71
  if (RTEST(attr)) {
75
72
  rb_str_buf_cat2(str, " status=");
76
73
  rb_str_append(str, rb_inspect(attr));
77
74
  }
78
75
 
79
- attr = rb_ivar_get(self, id_iv_cas);
76
+ attr = rb_ivar_get(self, cb_id_iv_cas);
80
77
  if (RTEST(attr)) {
81
78
  rb_str_buf_cat2(str, " cas=");
82
79
  rb_str_append(str, rb_inspect(attr));
83
80
  }
84
81
 
85
- attr = rb_ivar_get(self, id_iv_flags);
82
+ attr = rb_ivar_get(self, cb_id_iv_flags);
86
83
  if (RTEST(attr)) {
87
84
  rb_str_buf_cat2(str, " flags=0x");
88
- rb_str_append(str, rb_funcall(attr, id_to_s, 1, INT2FIX(16)));
85
+ rb_str_append(str, rb_funcall(attr, cb_id_to_s, 1, INT2FIX(16)));
89
86
  }
90
87
 
91
- attr = rb_ivar_get(self, id_iv_node);
88
+ attr = rb_ivar_get(self, cb_id_iv_node);
92
89
  if (RTEST(attr)) {
93
90
  rb_str_buf_cat2(str, " node=");
94
91
  rb_str_append(str, rb_inspect(attr));
95
92
  }
96
93
 
97
- attr = rb_ivar_get(self, id_iv_from_master);
94
+ attr = rb_ivar_get(self, cb_id_iv_from_master);
98
95
  if (attr != Qnil) {
99
96
  rb_str_buf_cat2(str, " from_master=");
100
97
  rb_str_append(str, rb_inspect(attr));
101
98
  }
102
99
 
103
- attr = rb_ivar_get(self, id_iv_time_to_persist);
100
+ attr = rb_ivar_get(self, cb_id_iv_time_to_persist);
104
101
  if (RTEST(attr)) {
105
102
  rb_str_buf_cat2(str, " time_to_persist=");
106
103
  rb_str_append(str, rb_inspect(attr));
107
104
  }
108
105
 
109
- attr = rb_ivar_get(self, id_iv_time_to_replicate);
106
+ attr = rb_ivar_get(self, cb_id_iv_time_to_replicate);
110
107
  if (RTEST(attr)) {
111
108
  rb_str_buf_cat2(str, " time_to_replicate=");
112
109
  rb_str_append(str, rb_inspect(attr));
113
110
  }
114
111
 
115
- attr = rb_ivar_get(self, id_iv_headers);
112
+ attr = rb_ivar_get(self, cb_id_iv_headers);
116
113
  if (RTEST(attr)) {
117
114
  rb_str_buf_cat2(str, " headers=");
118
115
  rb_str_append(str, rb_inspect(attr));
@@ -18,32 +18,30 @@
18
18
  #include "couchbase_ext.h"
19
19
 
20
20
  void
21
- stat_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_server_stat_resp_t *resp)
21
+ cb_stat_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_server_stat_resp_t *resp)
22
22
  {
23
- struct context_st *ctx = (struct context_st *)cookie;
24
- struct bucket_st *bucket = ctx->bucket;
23
+ struct cb_context_st *ctx = (struct cb_context_st *)cookie;
24
+ struct cb_bucket_st *bucket = ctx->bucket;
25
25
  VALUE stats, node, key, val, *rv = ctx->rv, exc = Qnil, res;
26
26
 
27
27
  node = resp->v.v0.server_endpoint ? STR_NEW_CSTR(resp->v.v0.server_endpoint) : Qnil;
28
28
  exc = cb_check_error(error, "failed to fetch stats", node);
29
29
  if (exc != Qnil) {
30
- rb_ivar_set(exc, id_iv_operation, sym_stats);
31
- if (NIL_P(ctx->exception)) {
32
- ctx->exception = cb_gc_protect(bucket, exc);
33
- }
30
+ rb_ivar_set(exc, cb_id_iv_operation, cb_sym_stats);
31
+ ctx->exception = cb_gc_protect(bucket, exc);
34
32
  }
35
33
  if (node != Qnil) {
36
34
  key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey);
37
35
  val = STR_NEW((const char*)resp->v.v0.bytes, resp->v.v0.nbytes);
38
36
  if (bucket->async) { /* asynchronous */
39
37
  if (ctx->proc != Qnil) {
40
- res = rb_class_new_instance(0, NULL, cResult);
41
- rb_ivar_set(res, id_iv_error, exc);
42
- rb_ivar_set(res, id_iv_operation, sym_stats);
43
- rb_ivar_set(res, id_iv_node, node);
44
- rb_ivar_set(res, id_iv_key, key);
45
- rb_ivar_set(res, id_iv_value, val);
46
- cb_proc_call(ctx->proc, 1, res);
38
+ res = rb_class_new_instance(0, NULL, cb_cResult);
39
+ rb_ivar_set(res, cb_id_iv_error, exc);
40
+ rb_ivar_set(res, cb_id_iv_operation, cb_sym_stats);
41
+ rb_ivar_set(res, cb_id_iv_node, node);
42
+ rb_ivar_set(res, cb_id_iv_key, key);
43
+ rb_ivar_set(res, cb_id_iv_value, val);
44
+ cb_proc_call(bucket, ctx->proc, 1, res);
47
45
  }
48
46
  } else { /* synchronous */
49
47
  if (NIL_P(exc)) {
@@ -56,8 +54,10 @@ stat_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_ser
56
54
  }
57
55
  }
58
56
  } else {
59
- ctx->nqueries--;
60
57
  cb_gc_unprotect(bucket, ctx->proc);
58
+ if (bucket->async) {
59
+ free(ctx);
60
+ }
61
61
  }
62
62
  (void)handle;
63
63
  }
@@ -107,26 +107,26 @@ stat_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_ser
107
107
  VALUE
108
108
  cb_bucket_stats(int argc, VALUE *argv, VALUE self)
109
109
  {
110
- struct bucket_st *bucket = DATA_PTR(self);
111
- struct context_st *ctx;
110
+ struct cb_bucket_st *bucket = DATA_PTR(self);
111
+ struct cb_context_st *ctx;
112
112
  VALUE rv, exc, args, proc;
113
113
  lcb_error_t err;
114
- struct params_st params;
114
+ struct cb_params_st params;
115
115
 
116
116
  if (bucket->handle == NULL) {
117
- rb_raise(eConnectError, "closed connection");
117
+ rb_raise(cb_eConnectError, "closed connection");
118
118
  }
119
119
  rb_scan_args(argc, argv, "0*&", &args, &proc);
120
120
  if (!bucket->async && proc != Qnil) {
121
121
  rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks");
122
122
  }
123
- memset(&params, 0, sizeof(struct params_st));
124
- params.type = cmd_stats;
123
+ memset(&params, 0, sizeof(struct cb_params_st));
124
+ params.type = cb_cmd_stats;
125
125
  params.bucket = bucket;
126
126
  cb_params_build(&params, RARRAY_LEN(args), args);
127
- ctx = xcalloc(1, sizeof(struct context_st));
127
+ ctx = calloc(1, sizeof(struct cb_context_st));
128
128
  if (ctx == NULL) {
129
- rb_raise(eClientNoMemoryError, "failed to allocate memory for context");
129
+ rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for context");
130
130
  }
131
131
  rv = rb_hash_new();
132
132
  ctx->rv = &rv;
@@ -139,12 +139,12 @@ cb_bucket_stats(int argc, VALUE *argv, VALUE self)
139
139
  exc = cb_check_error(err, "failed to schedule stat request", Qnil);
140
140
  cb_params_destroy(&params);
141
141
  if (exc != Qnil) {
142
- xfree(ctx);
142
+ free(ctx);
143
143
  rb_exc_raise(exc);
144
144
  }
145
145
  bucket->nbytes += params.npayload;
146
146
  if (bucket->async) {
147
- maybe_do_loop(bucket);
147
+ cb_maybe_do_loop(bucket);
148
148
  return Qnil;
149
149
  } else {
150
150
  if (ctx->nqueries > 0) {
@@ -152,13 +152,15 @@ cb_bucket_stats(int argc, VALUE *argv, VALUE self)
152
152
  lcb_wait(bucket->handle);
153
153
  }
154
154
  exc = ctx->exception;
155
- xfree(ctx);
155
+ free(ctx);
156
156
  if (exc != Qnil) {
157
157
  cb_gc_unprotect(bucket, exc);
158
158
  rb_exc_raise(exc);
159
159
  }
160
- if (bucket->exception != Qnil) {
161
- rb_exc_raise(bucket->exception);
160
+ exc = bucket->exception;
161
+ if (exc != Qnil) {
162
+ bucket->exception = Qnil;
163
+ rb_exc_raise(exc);
162
164
  }
163
165
  return rv;
164
166
  }
@@ -20,60 +20,62 @@
20
20
  static VALUE
21
21
  storage_observe_callback(VALUE args, VALUE cookie)
22
22
  {
23
- struct context_st *ctx = (struct context_st *)cookie;
23
+ struct cb_context_st *ctx = (struct cb_context_st *)cookie;
24
+ struct cb_bucket_st *bucket = ctx->bucket;
24
25
  VALUE res = rb_ary_shift(args);
25
26
 
26
27
  if (ctx->proc != Qnil) {
27
- rb_ivar_set(res, id_iv_operation, ctx->operation);
28
- cb_proc_call(ctx->proc, 1, res);
28
+ rb_ivar_set(res, cb_id_iv_operation, ctx->operation);
29
+ cb_proc_call(bucket, ctx->proc, 1, res);
29
30
  }
30
31
  if (!RTEST(ctx->observe_options)) {
31
32
  ctx->nqueries--;
32
33
  if (ctx->nqueries == 0) {
33
- cb_gc_unprotect(ctx->bucket, ctx->proc);
34
+ cb_gc_unprotect(bucket, ctx->proc);
35
+ if (bucket->async) {
36
+ free(ctx);
37
+ }
34
38
  }
35
39
  }
36
40
  return Qnil;
37
41
  }
38
42
 
39
43
  void
40
- storage_callback(lcb_t handle, const void *cookie, lcb_storage_t operation,
44
+ cb_storage_callback(lcb_t handle, const void *cookie, lcb_storage_t operation,
41
45
  lcb_error_t error, const lcb_store_resp_t *resp)
42
46
  {
43
- struct context_st *ctx = (struct context_st *)cookie;
44
- struct bucket_st *bucket = ctx->bucket;
47
+ struct cb_context_st *ctx = (struct cb_context_st *)cookie;
48
+ struct cb_bucket_st *bucket = ctx->bucket;
45
49
  VALUE key, cas, *rv = ctx->rv, exc, res;
46
50
 
47
51
  key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey);
48
- strip_key_prefix(bucket, key);
52
+ cb_strip_key_prefix(bucket, key);
49
53
 
50
54
  cas = resp->v.v0.cas > 0 ? ULL2NUM(resp->v.v0.cas) : Qnil;
51
55
  switch(operation) {
52
56
  case LCB_ADD:
53
- ctx->operation = sym_add;
57
+ ctx->operation = cb_sym_add;
54
58
  break;
55
59
  case LCB_REPLACE:
56
- ctx->operation = sym_replace;
60
+ ctx->operation = cb_sym_replace;
57
61
  break;
58
62
  case LCB_SET:
59
- ctx->operation = sym_set;
63
+ ctx->operation = cb_sym_set;
60
64
  break;
61
65
  case LCB_APPEND:
62
- ctx->operation = sym_append;
66
+ ctx->operation = cb_sym_append;
63
67
  break;
64
68
  case LCB_PREPEND:
65
- ctx->operation = sym_prepend;
69
+ ctx->operation = cb_sym_prepend;
66
70
  break;
67
71
  default:
68
72
  ctx->operation = Qnil;
69
73
  }
70
74
  exc = cb_check_error(error, "failed to store value", key);
71
75
  if (exc != Qnil) {
72
- rb_ivar_set(exc, id_iv_cas, cas);
73
- rb_ivar_set(exc, id_iv_operation, ctx->operation);
74
- if (NIL_P(ctx->exception)) {
75
- ctx->exception = cb_gc_protect(bucket, exc);
76
- }
76
+ rb_ivar_set(exc, cb_id_iv_cas, cas);
77
+ rb_ivar_set(exc, cb_id_iv_operation, ctx->operation);
78
+ ctx->exception = cb_gc_protect(bucket, exc);
77
79
  }
78
80
 
79
81
  if (bucket->async) { /* asynchronous */
@@ -82,16 +84,16 @@ storage_callback(lcb_t handle, const void *cookie, lcb_storage_t operation,
82
84
  args[0] = rb_hash_new();
83
85
  rb_hash_aset(args[0], key, cas);
84
86
  args[1] = ctx->observe_options;
85
- rb_block_call(bucket->self, id_observe_and_wait, 2, args,
87
+ rb_block_call(bucket->self, cb_id_observe_and_wait, 2, args,
86
88
  storage_observe_callback, (VALUE)ctx);
87
89
  cb_gc_unprotect(bucket, ctx->observe_options);
88
90
  } else if (ctx->proc != Qnil) {
89
- res = rb_class_new_instance(0, NULL, cResult);
90
- rb_ivar_set(res, id_iv_error, exc);
91
- rb_ivar_set(res, id_iv_key, key);
92
- rb_ivar_set(res, id_iv_operation, ctx->operation);
93
- rb_ivar_set(res, id_iv_cas, cas);
94
- cb_proc_call(ctx->proc, 1, res);
91
+ res = rb_class_new_instance(0, NULL, cb_cResult);
92
+ rb_ivar_set(res, cb_id_iv_error, exc);
93
+ rb_ivar_set(res, cb_id_iv_key, key);
94
+ rb_ivar_set(res, cb_id_iv_operation, ctx->operation);
95
+ rb_ivar_set(res, cb_id_iv_cas, cas);
96
+ cb_proc_call(bucket, ctx->proc, 1, res);
95
97
  }
96
98
  } else { /* synchronous */
97
99
  rb_hash_aset(*rv, key, cas);
@@ -101,6 +103,9 @@ storage_callback(lcb_t handle, const void *cookie, lcb_storage_t operation,
101
103
  ctx->nqueries--;
102
104
  if (ctx->nqueries == 0) {
103
105
  cb_gc_unprotect(bucket, ctx->proc);
106
+ if (bucket->async) {
107
+ free(ctx);
108
+ }
104
109
  }
105
110
  }
106
111
  (void)handle;
@@ -109,27 +114,27 @@ storage_callback(lcb_t handle, const void *cookie, lcb_storage_t operation,
109
114
  static inline VALUE
110
115
  cb_bucket_store(lcb_storage_t cmd, int argc, VALUE *argv, VALUE self)
111
116
  {
112
- struct bucket_st *bucket = DATA_PTR(self);
113
- struct context_st *ctx;
117
+ struct cb_bucket_st *bucket = DATA_PTR(self);
118
+ struct cb_context_st *ctx;
114
119
  VALUE args, rv, proc, exc, obs = Qnil;
115
120
  lcb_error_t err;
116
- struct params_st params;
121
+ struct cb_params_st params;
117
122
 
118
123
  if (bucket->handle == NULL) {
119
- rb_raise(eConnectError, "closed connection");
124
+ rb_raise(cb_eConnectError, "closed connection");
120
125
  }
121
126
  rb_scan_args(argc, argv, "0*&", &args, &proc);
122
127
  if (!bucket->async && proc != Qnil) {
123
128
  rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks");
124
129
  }
125
- memset(&params, 0, sizeof(struct params_st));
126
- params.type = cmd_store;
130
+ memset(&params, 0, sizeof(struct cb_params_st));
131
+ params.type = cb_cmd_store;
127
132
  params.bucket = bucket;
128
133
  params.cmd.store.operation = cmd;
129
134
  cb_params_build(&params, RARRAY_LEN(args), args);
130
- ctx = xcalloc(1, sizeof(struct context_st));
135
+ ctx = calloc(1, sizeof(struct cb_context_st));
131
136
  if (ctx == NULL) {
132
- rb_raise(eClientNoMemoryError, "failed to allocate memory for context");
137
+ rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for context");
133
138
  }
134
139
  rv = rb_hash_new();
135
140
  ctx->rv = &rv;
@@ -137,18 +142,19 @@ cb_bucket_store(lcb_storage_t cmd, int argc, VALUE *argv, VALUE self)
137
142
  ctx->proc = cb_gc_protect(bucket, proc);
138
143
  ctx->observe_options = cb_gc_protect(bucket, obs);
139
144
  ctx->exception = Qnil;
145
+ obs = params.cmd.store.observe;
140
146
  ctx->nqueries = params.cmd.store.num;
141
147
  err = lcb_store(bucket->handle, (const void *)ctx,
142
148
  params.cmd.store.num, params.cmd.store.ptr);
143
149
  cb_params_destroy(&params);
144
150
  exc = cb_check_error(err, "failed to schedule set request", Qnil);
145
151
  if (exc != Qnil) {
146
- xfree(ctx);
152
+ free(ctx);
147
153
  rb_exc_raise(exc);
148
154
  }
149
155
  bucket->nbytes += params.npayload;
150
156
  if (bucket->async) {
151
- maybe_do_loop(bucket);
157
+ cb_maybe_do_loop(bucket);
152
158
  return Qnil;
153
159
  } else {
154
160
  if (ctx->nqueries > 0) {
@@ -156,17 +162,19 @@ cb_bucket_store(lcb_storage_t cmd, int argc, VALUE *argv, VALUE self)
156
162
  lcb_wait(bucket->handle);
157
163
  }
158
164
  exc = ctx->exception;
159
- xfree(ctx);
165
+ free(ctx);
160
166
  if (exc != Qnil) {
161
167
  cb_gc_unprotect(bucket, exc);
162
168
  rb_exc_raise(exc);
163
169
  }
164
- if (bucket->exception != Qnil) {
165
- rb_exc_raise(bucket->exception);
170
+ exc = bucket->exception;
171
+ if (exc != Qnil) {
172
+ bucket->exception = Qnil;
173
+ rb_exc_raise(exc);
166
174
  }
167
175
  if (RTEST(obs)) {
168
176
  cb_gc_unprotect(bucket, obs);
169
- return rb_funcall(bucket->self, id_observe_and_wait, 2, rv, obs);
177
+ rv = rb_funcall(bucket->self, cb_id_observe_and_wait, 2, rv, obs);
170
178
  }
171
179
  if (params.cmd.store.num > 1) {
172
180
  return rv; /* return as a hash {key => cas, ...} */