couchbase 1.2.0.z.beta4-x86-mingw32 → 1.2.0.z.beta5-x86-mingw32

Sign up to get free protection for your applications and to get access to all the features.
@@ -26,7 +26,7 @@ cb_timer_free(void *ptr)
26
26
  void
27
27
  cb_timer_mark(void *ptr)
28
28
  {
29
- struct timer_st *timer = ptr;
29
+ struct cb_timer_st *timer = ptr;
30
30
  if (timer) {
31
31
  rb_gc_mark(timer->callback);
32
32
  }
@@ -36,10 +36,10 @@ cb_timer_mark(void *ptr)
36
36
  cb_timer_alloc(VALUE klass)
37
37
  {
38
38
  VALUE obj;
39
- struct timer_st *timer;
39
+ struct cb_timer_st *timer;
40
40
 
41
41
  /* allocate new bucket struct and set it to zero */
42
- obj = Data_Make_Struct(klass, struct timer_st, cb_timer_mark,
42
+ obj = Data_Make_Struct(klass, struct cb_timer_st, cb_timer_mark,
43
43
  cb_timer_free, timer);
44
44
  return obj;
45
45
  }
@@ -56,7 +56,7 @@ cb_timer_alloc(VALUE klass)
56
56
  cb_timer_inspect(VALUE self)
57
57
  {
58
58
  VALUE str;
59
- struct timer_st *tm = DATA_PTR(self);
59
+ struct cb_timer_st *tm = DATA_PTR(self);
60
60
  char buf[200];
61
61
 
62
62
  str = rb_str_buf_new2("#<");
@@ -97,7 +97,7 @@ cb_timer_inspect(VALUE self)
97
97
  VALUE
98
98
  cb_timer_cancel(VALUE self)
99
99
  {
100
- struct timer_st *tm = DATA_PTR(self);
100
+ struct cb_timer_st *tm = DATA_PTR(self);
101
101
  lcb_timer_destroy(tm->bucket->handle, tm->timer);
102
102
  return self;
103
103
  }
@@ -105,7 +105,7 @@ cb_timer_cancel(VALUE self)
105
105
  static VALUE
106
106
  trigger_timer(VALUE timer)
107
107
  {
108
- struct timer_st *tm = DATA_PTR(timer);
108
+ struct cb_timer_st *tm = DATA_PTR(timer);
109
109
  return cb_proc_call(tm->callback, 1, timer);
110
110
  }
111
111
 
@@ -113,7 +113,7 @@ trigger_timer(VALUE timer)
113
113
  timer_callback(lcb_timer_t timer, lcb_t instance,
114
114
  const void *cookie)
115
115
  {
116
- struct timer_st *tm = (struct timer_st *)cookie;
116
+ struct cb_timer_st *tm = (struct cb_timer_st *)cookie;
117
117
  int error = 0;
118
118
 
119
119
  rb_protect(trigger_timer, tm->self, &error);
@@ -162,14 +162,14 @@ timer_callback(lcb_timer_t timer, lcb_t instance,
162
162
  VALUE
163
163
  cb_timer_init(int argc, VALUE *argv, VALUE self)
164
164
  {
165
- struct timer_st *tm = DATA_PTR(self);
165
+ struct cb_timer_st *tm = DATA_PTR(self);
166
166
  VALUE bucket, opts, timeout, exc, cb;
167
167
  lcb_error_t err;
168
168
 
169
169
  rb_need_block();
170
170
  rb_scan_args(argc, argv, "21&", &bucket, &timeout, &opts, &cb);
171
171
 
172
- if (CLASS_OF(bucket) != cBucket) {
172
+ if (CLASS_OF(bucket) != cb_cBucket) {
173
173
  rb_raise(rb_eTypeError, "wrong argument type (expected Couchbase::Bucket)");
174
174
  }
175
175
  tm->self = self;
@@ -178,7 +178,7 @@ cb_timer_init(int argc, VALUE *argv, VALUE self)
178
178
  tm->bucket = DATA_PTR(bucket);
179
179
  if (opts != Qnil) {
180
180
  Check_Type(opts, T_HASH);
181
- tm->periodic = RTEST(rb_hash_aref(opts, sym_periodic));
181
+ tm->periodic = RTEST(rb_hash_aref(opts, cb_sym_periodic));
182
182
  }
183
183
  tm->timer = lcb_timer_create(tm->bucket->handle, tm, tm->usec,
184
184
  tm->periodic, timer_callback, &err);
@@ -18,20 +18,20 @@
18
18
  #include "couchbase_ext.h"
19
19
 
20
20
  void
21
- touch_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_touch_resp_t *resp)
21
+ cb_touch_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_touch_resp_t *resp)
22
22
  {
23
- struct context_st *ctx = (struct context_st *)cookie;
24
- struct bucket_st *bucket = ctx->bucket;
23
+ struct cb_context_st *ctx = (struct cb_context_st *)cookie;
24
+ struct cb_bucket_st *bucket = ctx->bucket;
25
25
  VALUE key, *rv = ctx->rv, exc = Qnil, res;
26
26
 
27
27
  ctx->nqueries--;
28
28
  key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey);
29
- strip_key_prefix(bucket, key);
29
+ cb_strip_key_prefix(bucket, key);
30
30
 
31
31
  if (error != LCB_KEY_ENOENT || !ctx->quiet) {
32
32
  exc = cb_check_error(error, "failed to touch value", key);
33
33
  if (exc != Qnil) {
34
- rb_ivar_set(exc, id_iv_operation, sym_touch);
34
+ rb_ivar_set(exc, cb_id_iv_operation, cb_sym_touch);
35
35
  if (NIL_P(ctx->exception)) {
36
36
  ctx->exception = cb_gc_protect(bucket, exc);
37
37
  }
@@ -40,10 +40,10 @@ touch_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_to
40
40
 
41
41
  if (bucket->async) { /* asynchronous */
42
42
  if (ctx->proc != Qnil) {
43
- res = rb_class_new_instance(0, NULL, cResult);
44
- rb_ivar_set(res, id_iv_error, exc);
45
- rb_ivar_set(res, id_iv_operation, sym_touch);
46
- rb_ivar_set(res, id_iv_key, key);
43
+ res = rb_class_new_instance(0, NULL, cb_cResult);
44
+ rb_ivar_set(res, cb_id_iv_error, exc);
45
+ rb_ivar_set(res, cb_id_iv_operation, cb_sym_touch);
46
+ rb_ivar_set(res, cb_id_iv_key, key);
47
47
  cb_proc_call(ctx->proc, 1, res);
48
48
  }
49
49
  } else { /* synchronous */
@@ -51,6 +51,9 @@ touch_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_to
51
51
  }
52
52
  if (ctx->nqueries == 0) {
53
53
  cb_gc_unprotect(bucket, ctx->proc);
54
+ if (bucket->async) {
55
+ xfree(ctx);
56
+ }
54
57
  }
55
58
  (void)handle;
56
59
  }
@@ -123,27 +126,27 @@ touch_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_to
123
126
  VALUE
124
127
  cb_bucket_touch(int argc, VALUE *argv, VALUE self)
125
128
  {
126
- struct bucket_st *bucket = DATA_PTR(self);
127
- struct context_st *ctx;
129
+ struct cb_bucket_st *bucket = DATA_PTR(self);
130
+ struct cb_context_st *ctx;
128
131
  VALUE args, rv, proc, exc;
129
132
  lcb_error_t err;
130
- struct params_st params;
133
+ struct cb_params_st params;
131
134
 
132
135
  if (bucket->handle == NULL) {
133
- rb_raise(eConnectError, "closed connection");
136
+ rb_raise(cb_eConnectError, "closed connection");
134
137
  }
135
138
  rb_scan_args(argc, argv, "0*&", &args, &proc);
136
139
  if (!bucket->async && proc != Qnil) {
137
140
  rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks");
138
141
  }
139
- rb_funcall(args, id_flatten_bang, 0);
140
- memset(&params, 0, sizeof(struct params_st));
141
- params.type = cmd_touch;
142
+ rb_funcall(args, cb_id_flatten_bang, 0);
143
+ memset(&params, 0, sizeof(struct cb_params_st));
144
+ params.type = cb_cmd_touch;
142
145
  params.bucket = bucket;
143
146
  cb_params_build(&params, RARRAY_LEN(args), args);
144
- ctx = xcalloc(1, sizeof(struct context_st));
147
+ ctx = xcalloc(1, sizeof(struct cb_context_st));
145
148
  if (ctx == NULL) {
146
- rb_raise(eClientNoMemoryError, "failed to allocate memory for context");
149
+ rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for context");
147
150
  }
148
151
  ctx->proc = cb_gc_protect(bucket, proc);
149
152
  ctx->bucket = bucket;
@@ -162,7 +165,7 @@ cb_bucket_touch(int argc, VALUE *argv, VALUE self)
162
165
  }
163
166
  bucket->nbytes += params.npayload;
164
167
  if (bucket->async) {
165
- maybe_do_loop(bucket);
168
+ cb_maybe_do_loop(bucket);
166
169
  return Qnil;
167
170
  } else {
168
171
  if (ctx->nqueries > 0) {
@@ -18,20 +18,20 @@
18
18
  #include "couchbase_ext.h"
19
19
 
20
20
  void
21
- unlock_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_unlock_resp_t *resp)
21
+ cb_unlock_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_unlock_resp_t *resp)
22
22
  {
23
- struct context_st *ctx = (struct context_st *)cookie;
24
- struct bucket_st *bucket = ctx->bucket;
23
+ struct cb_context_st *ctx = (struct cb_context_st *)cookie;
24
+ struct cb_bucket_st *bucket = ctx->bucket;
25
25
  VALUE key, *rv = ctx->rv, exc = Qnil, res;
26
26
 
27
27
  ctx->nqueries--;
28
28
  key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey);
29
- strip_key_prefix(bucket, key);
29
+ cb_strip_key_prefix(bucket, key);
30
30
 
31
31
  if (error != LCB_KEY_ENOENT || !ctx->quiet) {
32
32
  exc = cb_check_error(error, "failed to unlock value", key);
33
33
  if (exc != Qnil) {
34
- rb_ivar_set(exc, id_iv_operation, sym_unlock);
34
+ rb_ivar_set(exc, cb_id_iv_operation, cb_sym_unlock);
35
35
  if (NIL_P(ctx->exception)) {
36
36
  ctx->exception = cb_gc_protect(bucket, exc);
37
37
  }
@@ -40,10 +40,10 @@ unlock_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_u
40
40
 
41
41
  if (bucket->async) { /* asynchronous */
42
42
  if (ctx->proc != Qnil) {
43
- res = rb_class_new_instance(0, NULL, cResult);
44
- rb_ivar_set(res, id_iv_error, exc);
45
- rb_ivar_set(res, id_iv_operation, sym_unlock);
46
- rb_ivar_set(res, id_iv_key, key);
43
+ res = rb_class_new_instance(0, NULL, cb_cResult);
44
+ rb_ivar_set(res, cb_id_iv_error, exc);
45
+ rb_ivar_set(res, cb_id_iv_operation, cb_sym_unlock);
46
+ rb_ivar_set(res, cb_id_iv_key, key);
47
47
  cb_proc_call(ctx->proc, 1, res);
48
48
  }
49
49
  } else { /* synchronous */
@@ -51,6 +51,9 @@ unlock_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_u
51
51
  }
52
52
  if (ctx->nqueries == 0) {
53
53
  cb_gc_unprotect(bucket, ctx->proc);
54
+ if (bucket->async) {
55
+ xfree(ctx);
56
+ }
54
57
  }
55
58
  (void)handle;
56
59
  }
@@ -113,27 +116,27 @@ unlock_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_u
113
116
  VALUE
114
117
  cb_bucket_unlock(int argc, VALUE *argv, VALUE self)
115
118
  {
116
- struct bucket_st *bucket = DATA_PTR(self);
117
- struct context_st *ctx;
119
+ struct cb_bucket_st *bucket = DATA_PTR(self);
120
+ struct cb_context_st *ctx;
118
121
  VALUE args, rv, proc, exc;
119
122
  lcb_error_t err;
120
- struct params_st params;
123
+ struct cb_params_st params;
121
124
 
122
125
  if (bucket->handle == NULL) {
123
- rb_raise(eConnectError, "closed connection");
126
+ rb_raise(cb_eConnectError, "closed connection");
124
127
  }
125
128
  rb_scan_args(argc, argv, "0*&", &args, &proc);
126
129
  if (!bucket->async && proc != Qnil) {
127
130
  rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks");
128
131
  }
129
- rb_funcall(args, id_flatten_bang, 0);
130
- memset(&params, 0, sizeof(struct params_st));
131
- params.type = cmd_unlock;
132
+ rb_funcall(args, cb_id_flatten_bang, 0);
133
+ memset(&params, 0, sizeof(struct cb_params_st));
134
+ params.type = cb_cmd_unlock;
132
135
  params.bucket = bucket;
133
136
  cb_params_build(&params, RARRAY_LEN(args), args);
134
- ctx = xcalloc(1, sizeof(struct context_st));
137
+ ctx = xcalloc(1, sizeof(struct cb_context_st));
135
138
  if (ctx == NULL) {
136
- rb_raise(eClientNoMemoryError, "failed to allocate memory for context");
139
+ rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for context");
137
140
  }
138
141
  ctx->proc = cb_gc_protect(bucket, proc);
139
142
  ctx->bucket = bucket;
@@ -152,7 +155,7 @@ cb_bucket_unlock(int argc, VALUE *argv, VALUE self)
152
155
  }
153
156
  bucket->nbytes += params.npayload;
154
157
  if (bucket->async) {
155
- maybe_do_loop(bucket);
158
+ cb_maybe_do_loop(bucket);
156
159
  return Qnil;
157
160
  } else {
158
161
  if (ctx->nqueries > 0) {
@@ -18,16 +18,16 @@
18
18
  #include "couchbase_ext.h"
19
19
 
20
20
  VALUE
21
- cb_gc_protect(struct bucket_st *bucket, VALUE val)
21
+ cb_gc_protect(struct cb_bucket_st *bucket, VALUE val)
22
22
  {
23
23
  rb_hash_aset(bucket->object_space, val|1, val);
24
24
  return val;
25
25
  }
26
26
 
27
27
  VALUE
28
- cb_gc_unprotect(struct bucket_st *bucket, VALUE val)
28
+ cb_gc_unprotect(struct cb_bucket_st *bucket, VALUE val)
29
29
  {
30
- rb_funcall(bucket->object_space, id_delete, 1, val|1);
30
+ rb_funcall(bucket->object_space, cb_id_delete, 1, val|1);
31
31
  return val;
32
32
  }
33
33
 
@@ -39,7 +39,7 @@ cb_proc_call(VALUE recv, int argc, ...)
39
39
  int arity;
40
40
  int ii;
41
41
 
42
- arity = FIX2INT(rb_funcall(recv, id_arity, 0));
42
+ arity = FIX2INT(rb_funcall(recv, cb_id_arity, 0));
43
43
  if (arity < 0) {
44
44
  arity = argc;
45
45
  }
@@ -57,13 +57,13 @@ cb_proc_call(VALUE recv, int argc, ...)
57
57
  } else {
58
58
  argv = NULL;
59
59
  }
60
- return rb_funcall2(recv, id_call, arity, argv);
60
+ return rb_funcall2(recv, cb_id_call, arity, argv);
61
61
  }
62
62
 
63
63
  VALUE
64
64
  cb_hash_delete(VALUE hash, VALUE key)
65
65
  {
66
- return rb_funcall(hash, id_delete, 1, key);
66
+ return rb_funcall(hash, cb_id_delete, 1, key);
67
67
  }
68
68
 
69
69
  /* Helper to convert return code from libcouchbase to meaningful exception.
@@ -83,87 +83,87 @@ cb_check_error_with_status(lcb_error_t rc, const char *msg, VALUE key,
83
83
  }
84
84
  switch (rc) {
85
85
  case LCB_AUTH_ERROR:
86
- klass = eAuthError;
86
+ klass = cb_eAuthError;
87
87
  break;
88
88
  case LCB_DELTA_BADVAL:
89
- klass = eDeltaBadvalError;
89
+ klass = cb_eDeltaBadvalError;
90
90
  break;
91
91
  case LCB_E2BIG:
92
- klass = eTooBigError;
92
+ klass = cb_eTooBigError;
93
93
  break;
94
94
  case LCB_EBUSY:
95
- klass = eBusyError;
95
+ klass = cb_eBusyError;
96
96
  break;
97
97
  case LCB_EINTERNAL:
98
- klass = eInternalError;
98
+ klass = cb_eInternalError;
99
99
  break;
100
100
  case LCB_EINVAL:
101
- klass = eInvalidError;
101
+ klass = cb_eInvalidError;
102
102
  break;
103
103
  case LCB_ENOMEM:
104
- klass = eNoMemoryError;
104
+ klass = cb_eNoMemoryError;
105
105
  break;
106
106
  case LCB_ERANGE:
107
- klass = eRangeError;
107
+ klass = cb_eRangeError;
108
108
  break;
109
109
  case LCB_ETMPFAIL:
110
- klass = eTmpFailError;
110
+ klass = cb_eTmpFailError;
111
111
  break;
112
112
  case LCB_KEY_EEXISTS:
113
- klass = eKeyExistsError;
113
+ klass = cb_eKeyExistsError;
114
114
  break;
115
115
  case LCB_KEY_ENOENT:
116
- klass = eNotFoundError;
116
+ klass = cb_eNotFoundError;
117
117
  break;
118
118
  case LCB_DLOPEN_FAILED:
119
- klass = eDlopenFailedError;
119
+ klass = cb_eDlopenFailedError;
120
120
  break;
121
121
  case LCB_DLSYM_FAILED:
122
- klass = eDlsymFailedError;
122
+ klass = cb_eDlsymFailedError;
123
123
  break;
124
124
  case LCB_NETWORK_ERROR:
125
- klass = eNetworkError;
125
+ klass = cb_eNetworkError;
126
126
  break;
127
127
  case LCB_NOT_MY_VBUCKET:
128
- klass = eNotMyVbucketError;
128
+ klass = cb_eNotMyVbucketError;
129
129
  break;
130
130
  case LCB_NOT_STORED:
131
- klass = eNotStoredError;
131
+ klass = cb_eNotStoredError;
132
132
  break;
133
133
  case LCB_NOT_SUPPORTED:
134
- klass = eNotSupportedError;
134
+ klass = cb_eNotSupportedError;
135
135
  break;
136
136
  case LCB_UNKNOWN_COMMAND:
137
- klass = eUnknownCommandError;
137
+ klass = cb_eUnknownCommandError;
138
138
  break;
139
139
  case LCB_UNKNOWN_HOST:
140
- klass = eUnknownHostError;
140
+ klass = cb_eUnknownHostError;
141
141
  break;
142
142
  case LCB_PROTOCOL_ERROR:
143
- klass = eProtocolError;
143
+ klass = cb_eProtocolError;
144
144
  break;
145
145
  case LCB_ETIMEDOUT:
146
- klass = eTimeoutError;
146
+ klass = cb_eTimeoutError;
147
147
  break;
148
148
  case LCB_CONNECT_ERROR:
149
- klass = eConnectError;
149
+ klass = cb_eConnectError;
150
150
  break;
151
151
  case LCB_BUCKET_ENOENT:
152
- klass = eBucketNotFoundError;
152
+ klass = cb_eBucketNotFoundError;
153
153
  break;
154
154
  case LCB_CLIENT_ENOMEM:
155
- klass = eClientNoMemoryError;
155
+ klass = cb_eClientNoMemoryError;
156
156
  break;
157
157
  case LCB_CLIENT_ETMPFAIL:
158
- klass = eClientTmpFailError;
158
+ klass = cb_eClientTmpFailError;
159
159
  break;
160
160
  case LCB_EBADHANDLE:
161
- klass = eBadHandleError;
161
+ klass = cb_eBadHandleError;
162
162
  break;
163
163
  case LCB_ERROR:
164
164
  /* fall through */
165
165
  default:
166
- klass = eLibcouchbaseError;
166
+ klass = cb_eLibcouchbaseError;
167
167
  }
168
168
 
169
169
  str = rb_str_buf_new2(msg ? msg : "");
@@ -174,7 +174,7 @@ cb_check_error_with_status(lcb_error_t rc, const char *msg, VALUE key,
174
174
  }
175
175
  if (status > 0) {
176
176
  const char *reason = NULL;
177
- klass = eHTTPError;
177
+ klass = cb_eHTTPError;
178
178
  snprintf(buf, 300, "status=\"%d\"", status);
179
179
  rb_str_buf_cat2(str, buf);
180
180
  switch (status) {
@@ -272,11 +272,11 @@ cb_check_error_with_status(lcb_error_t rc, const char *msg, VALUE key,
272
272
  snprintf(buf, 300, "error=0x%02x)", rc);
273
273
  rb_str_buf_cat2(str, buf);
274
274
  exc = rb_exc_new3(klass, str);
275
- rb_ivar_set(exc, id_iv_error, INT2FIX(rc));
276
- rb_ivar_set(exc, id_iv_key, key);
277
- rb_ivar_set(exc, id_iv_cas, Qnil);
278
- rb_ivar_set(exc, id_iv_operation, Qnil);
279
- rb_ivar_set(exc, id_iv_status, status ? INT2FIX(status) : Qnil);
275
+ rb_ivar_set(exc, cb_id_iv_error, INT2FIX(rc));
276
+ rb_ivar_set(exc, cb_id_iv_key, key);
277
+ rb_ivar_set(exc, cb_id_iv_cas, Qnil);
278
+ rb_ivar_set(exc, cb_id_iv_operation, Qnil);
279
+ rb_ivar_set(exc, cb_id_iv_status, status ? INT2FIX(status) : Qnil);
280
280
  return exc;
281
281
  }
282
282
 
@@ -288,35 +288,35 @@ cb_check_error(lcb_error_t rc, const char *msg, VALUE key)
288
288
 
289
289
 
290
290
  uint32_t
291
- flags_set_format(uint32_t flags, ID format)
291
+ cb_flags_set_format(uint32_t flags, ID format)
292
292
  {
293
- flags &= ~((uint32_t)FMT_MASK); /* clear format bits */
294
-
295
- if (format == sym_document) {
296
- return flags | FMT_DOCUMENT;
297
- } else if (format == sym_marshal) {
298
- return flags | FMT_MARSHAL;
299
- } else if (format == sym_plain) {
300
- return flags | FMT_PLAIN;
293
+ flags &= ~((uint32_t)CB_FMT_MASK); /* clear format bits */
294
+
295
+ if (format == cb_sym_document) {
296
+ return flags | CB_FMT_DOCUMENT;
297
+ } else if (format == cb_sym_marshal) {
298
+ return flags | CB_FMT_MARSHAL;
299
+ } else if (format == cb_sym_plain) {
300
+ return flags | CB_FMT_PLAIN;
301
301
  }
302
302
  return flags; /* document is the default */
303
303
  }
304
304
 
305
305
  ID
306
- flags_get_format(uint32_t flags)
306
+ cb_flags_get_format(uint32_t flags)
307
307
  {
308
- flags &= FMT_MASK; /* select format bits */
308
+ flags &= CB_FMT_MASK; /* select format bits */
309
309
 
310
310
  switch (flags) {
311
- case FMT_DOCUMENT:
312
- return sym_document;
313
- case FMT_MARSHAL:
314
- return sym_marshal;
315
- case FMT_PLAIN:
311
+ case CB_FMT_DOCUMENT:
312
+ return cb_sym_document;
313
+ case CB_FMT_MARSHAL:
314
+ return cb_sym_marshal;
315
+ case CB_FMT_PLAIN:
316
316
  /* fall through */
317
317
  default:
318
318
  /* all other formats treated as plain */
319
- return sym_plain;
319
+ return cb_sym_plain;
320
320
  }
321
321
  }
322
322
 
@@ -325,14 +325,14 @@ flags_get_format(uint32_t flags)
325
325
  do_encode(VALUE *args)
326
326
  {
327
327
  VALUE val = args[0];
328
- uint32_t flags = ((uint32_t)args[1] & FMT_MASK);
328
+ uint32_t flags = ((uint32_t)args[1] & CB_FMT_MASK);
329
329
 
330
330
  switch (flags) {
331
- case FMT_DOCUMENT:
332
- return rb_funcall(mMultiJson, id_dump, 1, val);
333
- case FMT_MARSHAL:
334
- return rb_funcall(mMarshal, id_dump, 1, val);
335
- case FMT_PLAIN:
331
+ case CB_FMT_DOCUMENT:
332
+ return rb_funcall(cb_mMultiJson, cb_id_dump, 1, val);
333
+ case CB_FMT_MARSHAL:
334
+ return rb_funcall(cb_mMarshal, cb_id_dump, 1, val);
335
+ case CB_FMT_PLAIN:
336
336
  /* fall through */
337
337
  default:
338
338
  /* all other formats treated as plain */
@@ -347,22 +347,22 @@ do_decode(VALUE *args)
347
347
  VALUE force_format = args[2];
348
348
 
349
349
  if (TYPE(force_format) == T_SYMBOL) {
350
- if (force_format == sym_document) {
351
- return rb_funcall(mMultiJson, id_load, 1, blob);
352
- } else if (force_format == sym_marshal) {
353
- return rb_funcall(mMarshal, id_load, 1, blob);
354
- } else { /* sym_plain and any other symbol */
350
+ if (force_format == cb_sym_document) {
351
+ return rb_funcall(cb_mMultiJson, cb_id_load, 1, blob);
352
+ } else if (force_format == cb_sym_marshal) {
353
+ return rb_funcall(cb_mMarshal, cb_id_load, 1, blob);
354
+ } else { /* cb_sym_plain and any other cb_symbol */
355
355
  return blob;
356
356
  }
357
357
  } else {
358
- uint32_t flags = ((uint32_t)args[1] & FMT_MASK);
358
+ uint32_t flags = ((uint32_t)args[1] & CB_FMT_MASK);
359
359
 
360
360
  switch (flags) {
361
- case FMT_DOCUMENT:
362
- return rb_funcall(mMultiJson, id_load, 1, blob);
363
- case FMT_MARSHAL:
364
- return rb_funcall(mMarshal, id_load, 1, blob);
365
- case FMT_PLAIN:
361
+ case CB_FMT_DOCUMENT:
362
+ return rb_funcall(cb_mMultiJson, cb_id_load, 1, blob);
363
+ case CB_FMT_MARSHAL:
364
+ return rb_funcall(cb_mMarshal, cb_id_load, 1, blob);
365
+ case CB_FMT_PLAIN:
366
366
  /* fall through */
367
367
  default:
368
368
  /* all other formats treated as plain */
@@ -379,7 +379,7 @@ coding_failed(VALUE unused, VALUE exc)
379
379
  }
380
380
 
381
381
  VALUE
382
- encode_value(VALUE val, uint32_t flags)
382
+ cb_encode_value(VALUE val, uint32_t flags)
383
383
  {
384
384
  VALUE blob, args[2];
385
385
 
@@ -390,7 +390,7 @@ encode_value(VALUE val, uint32_t flags)
390
390
  }
391
391
 
392
392
  VALUE
393
- decode_value(VALUE blob, uint32_t flags, VALUE force_format)
393
+ cb_decode_value(VALUE blob, uint32_t flags, VALUE force_format)
394
394
  {
395
395
  VALUE val, args[3];
396
396
 
@@ -406,7 +406,7 @@ decode_value(VALUE blob, uint32_t flags, VALUE force_format)
406
406
  }
407
407
 
408
408
  void
409
- strip_key_prefix(struct bucket_st *bucket, VALUE key)
409
+ cb_strip_key_prefix(struct cb_bucket_st *bucket, VALUE key)
410
410
  {
411
411
  if (bucket->key_prefix) {
412
412
  rb_str_update(key, 0, RSTRING_LEN(bucket->key_prefix_val), STR_NEW_CSTR(""));
@@ -414,7 +414,7 @@ strip_key_prefix(struct bucket_st *bucket, VALUE key)
414
414
  }
415
415
 
416
416
  VALUE
417
- unify_key(struct bucket_st *bucket, VALUE key, int apply_prefix)
417
+ cb_unify_key(struct cb_bucket_st *bucket, VALUE key, int apply_prefix)
418
418
  {
419
419
  VALUE ret = Qnil, tmp;
420
420
 
@@ -434,7 +434,7 @@ unify_key(struct bucket_st *bucket, VALUE key, int apply_prefix)
434
434
  }
435
435
 
436
436
  void
437
- cb_build_headers(struct context_st *ctx, const char * const *headers)
437
+ cb_build_headers(struct cb_context_st *ctx, const char * const *headers)
438
438
  {
439
439
  if (!ctx->headers_built) {
440
440
  VALUE key = Qnil, val;