couchbase 1.2.0.z.beta4 → 1.2.0.z.beta5

Sign up to get free protection for your applications and to get access to all the features.
@@ -18,10 +18,10 @@
18
18
  #include "couchbase_ext.h"
19
19
 
20
20
  void
21
- observe_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_observe_resp_t *resp)
21
+ cb_observe_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_observe_resp_t *resp)
22
22
  {
23
- struct context_st *ctx = (struct context_st *)cookie;
24
- struct bucket_st *bucket = ctx->bucket;
23
+ struct cb_context_st *ctx = (struct cb_context_st *)cookie;
24
+ struct cb_bucket_st *bucket = ctx->bucket;
25
25
  VALUE key, res, *rv = ctx->rv;
26
26
 
27
27
  if (resp->v.v0.key) {
@@ -30,27 +30,27 @@ observe_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_
30
30
  if (ctx->exception) {
31
31
  cb_gc_protect(bucket, ctx->exception);
32
32
  }
33
- res = rb_class_new_instance(0, NULL, cResult);
34
- rb_ivar_set(res, id_iv_completed, Qfalse);
35
- rb_ivar_set(res, id_iv_error, ctx->exception);
36
- rb_ivar_set(res, id_iv_operation, sym_observe);
37
- rb_ivar_set(res, id_iv_key, key);
38
- rb_ivar_set(res, id_iv_cas, ULL2NUM(resp->v.v0.cas));
39
- rb_ivar_set(res, id_iv_from_master, resp->v.v0.from_master ? Qtrue : Qfalse);
40
- rb_ivar_set(res, id_iv_time_to_persist, ULONG2NUM(resp->v.v0.ttp));
41
- rb_ivar_set(res, id_iv_time_to_replicate, ULONG2NUM(resp->v.v0.ttr));
33
+ res = rb_class_new_instance(0, NULL, cb_cResult);
34
+ rb_ivar_set(res, cb_id_iv_completed, Qfalse);
35
+ rb_ivar_set(res, cb_id_iv_error, ctx->exception);
36
+ rb_ivar_set(res, cb_id_iv_operation, cb_sym_observe);
37
+ rb_ivar_set(res, cb_id_iv_key, key);
38
+ rb_ivar_set(res, cb_id_iv_cas, ULL2NUM(resp->v.v0.cas));
39
+ rb_ivar_set(res, cb_id_iv_from_master, resp->v.v0.from_master ? Qtrue : Qfalse);
40
+ rb_ivar_set(res, cb_id_iv_time_to_persist, ULONG2NUM(resp->v.v0.ttp));
41
+ rb_ivar_set(res, cb_id_iv_time_to_replicate, ULONG2NUM(resp->v.v0.ttr));
42
42
  switch (resp->v.v0.status) {
43
43
  case LCB_OBSERVE_FOUND:
44
- rb_ivar_set(res, id_iv_status, sym_found);
44
+ rb_ivar_set(res, cb_id_iv_status, cb_sym_found);
45
45
  break;
46
46
  case LCB_OBSERVE_PERSISTED:
47
- rb_ivar_set(res, id_iv_status, sym_persisted);
47
+ rb_ivar_set(res, cb_id_iv_status, cb_sym_persisted);
48
48
  break;
49
49
  case LCB_OBSERVE_NOT_FOUND:
50
- rb_ivar_set(res, id_iv_status, sym_not_found);
50
+ rb_ivar_set(res, cb_id_iv_status, cb_sym_not_found);
51
51
  break;
52
52
  default:
53
- rb_ivar_set(res, id_iv_status, Qnil);
53
+ rb_ivar_set(res, cb_id_iv_status, Qnil);
54
54
  }
55
55
  if (bucket->async) { /* asynchronous */
56
56
  if (ctx->proc != Qnil) {
@@ -68,12 +68,15 @@ observe_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_
68
68
  }
69
69
  } else {
70
70
  if (bucket->async && ctx->proc != Qnil) {
71
- res = rb_class_new_instance(0, NULL, cResult);
72
- rb_ivar_set(res, id_iv_completed, Qtrue);
71
+ res = rb_class_new_instance(0, NULL, cb_cResult);
72
+ rb_ivar_set(res, cb_id_iv_completed, Qtrue);
73
73
  cb_proc_call(ctx->proc, 1, res);
74
74
  }
75
75
  ctx->nqueries--;
76
76
  cb_gc_unprotect(bucket, ctx->proc);
77
+ if (bucket->async) {
78
+ xfree(ctx);
79
+ }
77
80
  }
78
81
  (void)handle;
79
82
  }
@@ -110,26 +113,26 @@ observe_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_
110
113
  VALUE
111
114
  cb_bucket_observe(int argc, VALUE *argv, VALUE self)
112
115
  {
113
- struct bucket_st *bucket = DATA_PTR(self);
114
- struct context_st *ctx;
116
+ struct cb_bucket_st *bucket = DATA_PTR(self);
117
+ struct cb_context_st *ctx;
115
118
  VALUE args, rv, proc, exc;
116
119
  lcb_error_t err;
117
- struct params_st params;
120
+ struct cb_params_st params;
118
121
 
119
122
  if (bucket->handle == NULL) {
120
- rb_raise(eConnectError, "closed connection");
123
+ rb_raise(cb_eConnectError, "closed connection");
121
124
  }
122
125
  rb_scan_args(argc, argv, "0*&", &args, &proc);
123
126
  if (!bucket->async && proc != Qnil) {
124
127
  rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks");
125
128
  }
126
- memset(&params, 0, sizeof(struct params_st));
127
- params.type = cmd_observe;
129
+ memset(&params, 0, sizeof(struct cb_params_st));
130
+ params.type = cb_cmd_observe;
128
131
  params.bucket = bucket;
129
132
  cb_params_build(&params, RARRAY_LEN(args), args);
130
- ctx = xcalloc(1, sizeof(struct context_st));
133
+ ctx = xcalloc(1, sizeof(struct cb_context_st));
131
134
  if (ctx == NULL) {
132
- rb_raise(eClientNoMemoryError, "failed to allocate memory for context");
135
+ rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for context");
133
136
  }
134
137
  ctx->proc = cb_gc_protect(bucket, proc);
135
138
  ctx->bucket = bucket;
@@ -147,7 +150,7 @@ cb_bucket_observe(int argc, VALUE *argv, VALUE self)
147
150
  }
148
151
  bucket->nbytes += params.npayload;
149
152
  if (bucket->async) {
150
- maybe_do_loop(bucket);
153
+ cb_maybe_do_loop(bucket);
151
154
  return Qnil;
152
155
  } else {
153
156
  if (ctx->nqueries > 0) {
@@ -28,7 +28,7 @@
28
28
  VALUE
29
29
  cb_result_success_p(VALUE self)
30
30
  {
31
- return RTEST(rb_ivar_get(self, id_iv_error)) ? Qfalse : Qtrue;
31
+ return RTEST(rb_ivar_get(self, cb_id_iv_error)) ? Qfalse : Qtrue;
32
32
  }
33
33
 
34
34
  /*
@@ -49,67 +49,67 @@ cb_result_inspect(VALUE self)
49
49
  snprintf(buf, 100, ":%p", (void *)self);
50
50
  rb_str_buf_cat2(str, buf);
51
51
 
52
- attr = rb_ivar_get(self, id_iv_operation);
52
+ attr = rb_ivar_get(self, cb_id_iv_operation);
53
53
  if (RTEST(attr)) {
54
54
  rb_str_buf_cat2(str, " operation=");
55
55
  rb_str_append(str, rb_inspect(attr));
56
56
  }
57
57
 
58
- attr = rb_ivar_get(self, id_iv_error);
58
+ attr = rb_ivar_get(self, cb_id_iv_error);
59
59
  if (RTEST(attr)) {
60
60
  rb_str_buf_cat2(str, " error=");
61
61
  rb_str_append(str, rb_inspect(attr));
62
62
  }
63
63
 
64
- attr = rb_ivar_get(self, id_iv_key);
64
+ attr = rb_ivar_get(self, cb_id_iv_key);
65
65
  if (RTEST(attr)) {
66
66
  rb_str_buf_cat2(str, " key=");
67
67
  rb_str_append(str, rb_inspect(attr));
68
68
  }
69
69
 
70
- attr = rb_ivar_get(self, id_iv_status);
70
+ attr = rb_ivar_get(self, cb_id_iv_status);
71
71
  if (RTEST(attr)) {
72
72
  rb_str_buf_cat2(str, " status=");
73
73
  rb_str_append(str, rb_inspect(attr));
74
74
  }
75
75
 
76
- attr = rb_ivar_get(self, id_iv_cas);
76
+ attr = rb_ivar_get(self, cb_id_iv_cas);
77
77
  if (RTEST(attr)) {
78
78
  rb_str_buf_cat2(str, " cas=");
79
79
  rb_str_append(str, rb_inspect(attr));
80
80
  }
81
81
 
82
- attr = rb_ivar_get(self, id_iv_flags);
82
+ attr = rb_ivar_get(self, cb_id_iv_flags);
83
83
  if (RTEST(attr)) {
84
84
  rb_str_buf_cat2(str, " flags=0x");
85
- rb_str_append(str, rb_funcall(attr, id_to_s, 1, INT2FIX(16)));
85
+ rb_str_append(str, rb_funcall(attr, cb_id_to_s, 1, INT2FIX(16)));
86
86
  }
87
87
 
88
- attr = rb_ivar_get(self, id_iv_node);
88
+ attr = rb_ivar_get(self, cb_id_iv_node);
89
89
  if (RTEST(attr)) {
90
90
  rb_str_buf_cat2(str, " node=");
91
91
  rb_str_append(str, rb_inspect(attr));
92
92
  }
93
93
 
94
- attr = rb_ivar_get(self, id_iv_from_master);
94
+ attr = rb_ivar_get(self, cb_id_iv_from_master);
95
95
  if (attr != Qnil) {
96
96
  rb_str_buf_cat2(str, " from_master=");
97
97
  rb_str_append(str, rb_inspect(attr));
98
98
  }
99
99
 
100
- attr = rb_ivar_get(self, id_iv_time_to_persist);
100
+ attr = rb_ivar_get(self, cb_id_iv_time_to_persist);
101
101
  if (RTEST(attr)) {
102
102
  rb_str_buf_cat2(str, " time_to_persist=");
103
103
  rb_str_append(str, rb_inspect(attr));
104
104
  }
105
105
 
106
- attr = rb_ivar_get(self, id_iv_time_to_replicate);
106
+ attr = rb_ivar_get(self, cb_id_iv_time_to_replicate);
107
107
  if (RTEST(attr)) {
108
108
  rb_str_buf_cat2(str, " time_to_replicate=");
109
109
  rb_str_append(str, rb_inspect(attr));
110
110
  }
111
111
 
112
- attr = rb_ivar_get(self, id_iv_headers);
112
+ attr = rb_ivar_get(self, cb_id_iv_headers);
113
113
  if (RTEST(attr)) {
114
114
  rb_str_buf_cat2(str, " headers=");
115
115
  rb_str_append(str, rb_inspect(attr));
@@ -18,16 +18,16 @@
18
18
  #include "couchbase_ext.h"
19
19
 
20
20
  void
21
- stat_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_server_stat_resp_t *resp)
21
+ cb_stat_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_server_stat_resp_t *resp)
22
22
  {
23
- struct context_st *ctx = (struct context_st *)cookie;
24
- struct bucket_st *bucket = ctx->bucket;
23
+ struct cb_context_st *ctx = (struct cb_context_st *)cookie;
24
+ struct cb_bucket_st *bucket = ctx->bucket;
25
25
  VALUE stats, node, key, val, *rv = ctx->rv, exc = Qnil, res;
26
26
 
27
27
  node = resp->v.v0.server_endpoint ? STR_NEW_CSTR(resp->v.v0.server_endpoint) : Qnil;
28
28
  exc = cb_check_error(error, "failed to fetch stats", node);
29
29
  if (exc != Qnil) {
30
- rb_ivar_set(exc, id_iv_operation, sym_stats);
30
+ rb_ivar_set(exc, cb_id_iv_operation, cb_sym_stats);
31
31
  if (NIL_P(ctx->exception)) {
32
32
  ctx->exception = cb_gc_protect(bucket, exc);
33
33
  }
@@ -37,12 +37,12 @@ stat_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_ser
37
37
  val = STR_NEW((const char*)resp->v.v0.bytes, resp->v.v0.nbytes);
38
38
  if (bucket->async) { /* asynchronous */
39
39
  if (ctx->proc != Qnil) {
40
- res = rb_class_new_instance(0, NULL, cResult);
41
- rb_ivar_set(res, id_iv_error, exc);
42
- rb_ivar_set(res, id_iv_operation, sym_stats);
43
- rb_ivar_set(res, id_iv_node, node);
44
- rb_ivar_set(res, id_iv_key, key);
45
- rb_ivar_set(res, id_iv_value, val);
40
+ res = rb_class_new_instance(0, NULL, cb_cResult);
41
+ rb_ivar_set(res, cb_id_iv_error, exc);
42
+ rb_ivar_set(res, cb_id_iv_operation, cb_sym_stats);
43
+ rb_ivar_set(res, cb_id_iv_node, node);
44
+ rb_ivar_set(res, cb_id_iv_key, key);
45
+ rb_ivar_set(res, cb_id_iv_value, val);
46
46
  cb_proc_call(ctx->proc, 1, res);
47
47
  }
48
48
  } else { /* synchronous */
@@ -56,8 +56,10 @@ stat_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_ser
56
56
  }
57
57
  }
58
58
  } else {
59
- ctx->nqueries--;
60
59
  cb_gc_unprotect(bucket, ctx->proc);
60
+ if (bucket->async) {
61
+ xfree(ctx);
62
+ }
61
63
  }
62
64
  (void)handle;
63
65
  }
@@ -107,26 +109,26 @@ stat_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_ser
107
109
  VALUE
108
110
  cb_bucket_stats(int argc, VALUE *argv, VALUE self)
109
111
  {
110
- struct bucket_st *bucket = DATA_PTR(self);
111
- struct context_st *ctx;
112
+ struct cb_bucket_st *bucket = DATA_PTR(self);
113
+ struct cb_context_st *ctx;
112
114
  VALUE rv, exc, args, proc;
113
115
  lcb_error_t err;
114
- struct params_st params;
116
+ struct cb_params_st params;
115
117
 
116
118
  if (bucket->handle == NULL) {
117
- rb_raise(eConnectError, "closed connection");
119
+ rb_raise(cb_eConnectError, "closed connection");
118
120
  }
119
121
  rb_scan_args(argc, argv, "0*&", &args, &proc);
120
122
  if (!bucket->async && proc != Qnil) {
121
123
  rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks");
122
124
  }
123
- memset(&params, 0, sizeof(struct params_st));
124
- params.type = cmd_stats;
125
+ memset(&params, 0, sizeof(struct cb_params_st));
126
+ params.type = cb_cmd_stats;
125
127
  params.bucket = bucket;
126
128
  cb_params_build(&params, RARRAY_LEN(args), args);
127
- ctx = xcalloc(1, sizeof(struct context_st));
129
+ ctx = xcalloc(1, sizeof(struct cb_context_st));
128
130
  if (ctx == NULL) {
129
- rb_raise(eClientNoMemoryError, "failed to allocate memory for context");
131
+ rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for context");
130
132
  }
131
133
  rv = rb_hash_new();
132
134
  ctx->rv = &rv;
@@ -144,7 +146,7 @@ cb_bucket_stats(int argc, VALUE *argv, VALUE self)
144
146
  }
145
147
  bucket->nbytes += params.npayload;
146
148
  if (bucket->async) {
147
- maybe_do_loop(bucket);
149
+ cb_maybe_do_loop(bucket);
148
150
  return Qnil;
149
151
  } else {
150
152
  if (ctx->nqueries > 0) {
@@ -20,57 +20,61 @@
20
20
  static VALUE
21
21
  storage_observe_callback(VALUE args, VALUE cookie)
22
22
  {
23
- struct context_st *ctx = (struct context_st *)cookie;
23
+ struct cb_context_st *ctx = (struct cb_context_st *)cookie;
24
+ struct cb_bucket_st *bucket = ctx->bucket;
24
25
  VALUE res = rb_ary_shift(args);
25
26
 
26
27
  if (ctx->proc != Qnil) {
27
- rb_ivar_set(res, id_iv_operation, ctx->operation);
28
+ rb_ivar_set(res, cb_id_iv_operation, ctx->operation);
28
29
  cb_proc_call(ctx->proc, 1, res);
29
30
  }
30
31
  if (!RTEST(ctx->observe_options)) {
31
32
  ctx->nqueries--;
32
33
  if (ctx->nqueries == 0) {
33
- cb_gc_unprotect(ctx->bucket, ctx->proc);
34
+ cb_gc_unprotect(bucket, ctx->proc);
35
+ if (bucket->async) {
36
+ xfree(ctx);
37
+ }
34
38
  }
35
39
  }
36
40
  return Qnil;
37
41
  }
38
42
 
39
43
  void
40
- storage_callback(lcb_t handle, const void *cookie, lcb_storage_t operation,
44
+ cb_storage_callback(lcb_t handle, const void *cookie, lcb_storage_t operation,
41
45
  lcb_error_t error, const lcb_store_resp_t *resp)
42
46
  {
43
- struct context_st *ctx = (struct context_st *)cookie;
44
- struct bucket_st *bucket = ctx->bucket;
47
+ struct cb_context_st *ctx = (struct cb_context_st *)cookie;
48
+ struct cb_bucket_st *bucket = ctx->bucket;
45
49
  VALUE key, cas, *rv = ctx->rv, exc, res;
46
50
 
47
51
  key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey);
48
- strip_key_prefix(bucket, key);
52
+ cb_strip_key_prefix(bucket, key);
49
53
 
50
54
  cas = resp->v.v0.cas > 0 ? ULL2NUM(resp->v.v0.cas) : Qnil;
51
55
  switch(operation) {
52
56
  case LCB_ADD:
53
- ctx->operation = sym_add;
57
+ ctx->operation = cb_sym_add;
54
58
  break;
55
59
  case LCB_REPLACE:
56
- ctx->operation = sym_replace;
60
+ ctx->operation = cb_sym_replace;
57
61
  break;
58
62
  case LCB_SET:
59
- ctx->operation = sym_set;
63
+ ctx->operation = cb_sym_set;
60
64
  break;
61
65
  case LCB_APPEND:
62
- ctx->operation = sym_append;
66
+ ctx->operation = cb_sym_append;
63
67
  break;
64
68
  case LCB_PREPEND:
65
- ctx->operation = sym_prepend;
69
+ ctx->operation = cb_sym_prepend;
66
70
  break;
67
71
  default:
68
72
  ctx->operation = Qnil;
69
73
  }
70
74
  exc = cb_check_error(error, "failed to store value", key);
71
75
  if (exc != Qnil) {
72
- rb_ivar_set(exc, id_iv_cas, cas);
73
- rb_ivar_set(exc, id_iv_operation, ctx->operation);
76
+ rb_ivar_set(exc, cb_id_iv_cas, cas);
77
+ rb_ivar_set(exc, cb_id_iv_operation, ctx->operation);
74
78
  if (NIL_P(ctx->exception)) {
75
79
  ctx->exception = cb_gc_protect(bucket, exc);
76
80
  }
@@ -82,15 +86,15 @@ storage_callback(lcb_t handle, const void *cookie, lcb_storage_t operation,
82
86
  args[0] = rb_hash_new();
83
87
  rb_hash_aset(args[0], key, cas);
84
88
  args[1] = ctx->observe_options;
85
- rb_block_call(bucket->self, id_observe_and_wait, 2, args,
89
+ rb_block_call(bucket->self, cb_id_observe_and_wait, 2, args,
86
90
  storage_observe_callback, (VALUE)ctx);
87
91
  cb_gc_unprotect(bucket, ctx->observe_options);
88
92
  } else if (ctx->proc != Qnil) {
89
- res = rb_class_new_instance(0, NULL, cResult);
90
- rb_ivar_set(res, id_iv_error, exc);
91
- rb_ivar_set(res, id_iv_key, key);
92
- rb_ivar_set(res, id_iv_operation, ctx->operation);
93
- rb_ivar_set(res, id_iv_cas, cas);
93
+ res = rb_class_new_instance(0, NULL, cb_cResult);
94
+ rb_ivar_set(res, cb_id_iv_error, exc);
95
+ rb_ivar_set(res, cb_id_iv_key, key);
96
+ rb_ivar_set(res, cb_id_iv_operation, ctx->operation);
97
+ rb_ivar_set(res, cb_id_iv_cas, cas);
94
98
  cb_proc_call(ctx->proc, 1, res);
95
99
  }
96
100
  } else { /* synchronous */
@@ -101,6 +105,9 @@ storage_callback(lcb_t handle, const void *cookie, lcb_storage_t operation,
101
105
  ctx->nqueries--;
102
106
  if (ctx->nqueries == 0) {
103
107
  cb_gc_unprotect(bucket, ctx->proc);
108
+ if (bucket->async) {
109
+ xfree(ctx);
110
+ }
104
111
  }
105
112
  }
106
113
  (void)handle;
@@ -109,27 +116,27 @@ storage_callback(lcb_t handle, const void *cookie, lcb_storage_t operation,
109
116
  static inline VALUE
110
117
  cb_bucket_store(lcb_storage_t cmd, int argc, VALUE *argv, VALUE self)
111
118
  {
112
- struct bucket_st *bucket = DATA_PTR(self);
113
- struct context_st *ctx;
119
+ struct cb_bucket_st *bucket = DATA_PTR(self);
120
+ struct cb_context_st *ctx;
114
121
  VALUE args, rv, proc, exc, obs = Qnil;
115
122
  lcb_error_t err;
116
- struct params_st params;
123
+ struct cb_params_st params;
117
124
 
118
125
  if (bucket->handle == NULL) {
119
- rb_raise(eConnectError, "closed connection");
126
+ rb_raise(cb_eConnectError, "closed connection");
120
127
  }
121
128
  rb_scan_args(argc, argv, "0*&", &args, &proc);
122
129
  if (!bucket->async && proc != Qnil) {
123
130
  rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks");
124
131
  }
125
- memset(&params, 0, sizeof(struct params_st));
126
- params.type = cmd_store;
132
+ memset(&params, 0, sizeof(struct cb_params_st));
133
+ params.type = cb_cmd_store;
127
134
  params.bucket = bucket;
128
135
  params.cmd.store.operation = cmd;
129
136
  cb_params_build(&params, RARRAY_LEN(args), args);
130
- ctx = xcalloc(1, sizeof(struct context_st));
137
+ ctx = xcalloc(1, sizeof(struct cb_context_st));
131
138
  if (ctx == NULL) {
132
- rb_raise(eClientNoMemoryError, "failed to allocate memory for context");
139
+ rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for context");
133
140
  }
134
141
  rv = rb_hash_new();
135
142
  ctx->rv = &rv;
@@ -148,7 +155,7 @@ cb_bucket_store(lcb_storage_t cmd, int argc, VALUE *argv, VALUE self)
148
155
  }
149
156
  bucket->nbytes += params.npayload;
150
157
  if (bucket->async) {
151
- maybe_do_loop(bucket);
158
+ cb_maybe_do_loop(bucket);
152
159
  return Qnil;
153
160
  } else {
154
161
  if (ctx->nqueries > 0) {
@@ -168,7 +175,7 @@ cb_bucket_store(lcb_storage_t cmd, int argc, VALUE *argv, VALUE self)
168
175
  }
169
176
  if (RTEST(obs)) {
170
177
  cb_gc_unprotect(bucket, obs);
171
- return rb_funcall(bucket->self, id_observe_and_wait, 2, rv, obs);
178
+ return rb_funcall(bucket->self, cb_id_observe_and_wait, 2, rv, obs);
172
179
  }
173
180
  if (params.cmd.store.num > 1) {
174
181
  return rv; /* return as a hash {key => cas, ...} */