couchbase 1.2.1-x86-mingw32 → 1.2.2-x86-mingw32

Sign up to get free protection for your applications and to get access to all the features.
@@ -22,13 +22,13 @@ cb_observe_callback(lcb_t handle, const void *cookie, lcb_error_t error, const l
22
22
  {
23
23
  struct cb_context_st *ctx = (struct cb_context_st *)cookie;
24
24
  struct cb_bucket_st *bucket = ctx->bucket;
25
- VALUE key, res, *rv = ctx->rv, exc;
25
+ VALUE key, res, exc;
26
26
 
27
27
  if (resp->v.v0.key) {
28
28
  key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey);
29
29
  exc = cb_check_error(error, "failed to execute observe request", key);
30
30
  if (exc != Qnil) {
31
- ctx->exception = cb_gc_protect(bucket, exc);
31
+ ctx->exception = exc;
32
32
  }
33
33
  res = rb_class_new_instance(0, NULL, cb_cResult);
34
34
  rb_ivar_set(res, cb_id_iv_completed, Qfalse);
@@ -58,10 +58,10 @@ cb_observe_callback(lcb_t handle, const void *cookie, lcb_error_t error, const l
58
58
  }
59
59
  } else { /* synchronous */
60
60
  if (NIL_P(ctx->exception)) {
61
- VALUE stats = rb_hash_aref(*rv, key);
61
+ VALUE stats = rb_hash_aref(ctx->rv, key);
62
62
  if (NIL_P(stats)) {
63
63
  stats = rb_ary_new();
64
- rb_hash_aset(*rv, key, stats);
64
+ rb_hash_aset(ctx->rv, key, stats);
65
65
  }
66
66
  rb_ary_push(stats, res);
67
67
  }
@@ -73,9 +73,9 @@ cb_observe_callback(lcb_t handle, const void *cookie, lcb_error_t error, const l
73
73
  cb_proc_call(bucket, ctx->proc, 1, res);
74
74
  }
75
75
  ctx->nqueries--;
76
- cb_gc_unprotect(bucket, ctx->proc);
76
+ ctx->proc = Qnil;
77
77
  if (bucket->async) {
78
- free(ctx);
78
+ cb_context_free(ctx);
79
79
  }
80
80
  }
81
81
  (void)handle;
@@ -115,37 +115,29 @@ cb_bucket_observe(int argc, VALUE *argv, VALUE self)
115
115
  {
116
116
  struct cb_bucket_st *bucket = DATA_PTR(self);
117
117
  struct cb_context_st *ctx;
118
- VALUE args, rv, proc, exc;
118
+ VALUE rv, proc, exc;
119
119
  lcb_error_t err;
120
120
  struct cb_params_st params;
121
121
 
122
- if (bucket->handle == NULL) {
123
- rb_raise(cb_eConnectError, "closed connection");
122
+ if (!cb_bucket_connected_bang(bucket, cb_sym_observe)) {
123
+ return Qnil;
124
124
  }
125
- rb_scan_args(argc, argv, "0*&", &args, &proc);
125
+
126
+ memset(&params, 0, sizeof(struct cb_params_st));
127
+ rb_scan_args(argc, argv, "0*&", &params.args, &proc);
126
128
  if (!bucket->async && proc != Qnil) {
127
129
  rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks");
128
130
  }
129
- memset(&params, 0, sizeof(struct cb_params_st));
130
131
  params.type = cb_cmd_observe;
131
132
  params.bucket = bucket;
132
- cb_params_build(&params, RARRAY_LEN(args), args);
133
- ctx = calloc(1, sizeof(struct cb_context_st));
134
- if (ctx == NULL) {
135
- rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for context");
136
- }
137
- ctx->proc = cb_gc_protect(bucket, proc);
138
- ctx->bucket = bucket;
139
- rv = rb_hash_new();
140
- ctx->rv = &rv;
141
- ctx->exception = Qnil;
142
- ctx->nqueries = params.cmd.observe.num;
133
+ cb_params_build(&params);
134
+ ctx = cb_context_alloc_common(bucket, proc, params.cmd.observe.num);
143
135
  err = lcb_observe(bucket->handle, (const void *)ctx,
144
136
  params.cmd.observe.num, params.cmd.observe.ptr);
145
137
  cb_params_destroy(&params);
146
138
  exc = cb_check_error(err, "failed to schedule observe request", Qnil);
147
139
  if (exc != Qnil) {
148
- free(ctx);
140
+ cb_context_free(ctx);
149
141
  rb_exc_raise(exc);
150
142
  }
151
143
  bucket->nbytes += params.npayload;
@@ -158,9 +150,9 @@ cb_bucket_observe(int argc, VALUE *argv, VALUE self)
158
150
  lcb_wait(bucket->handle);
159
151
  }
160
152
  exc = ctx->exception;
161
- free(ctx);
153
+ rv = ctx->rv;
154
+ cb_context_free(ctx);
162
155
  if (exc != Qnil) {
163
- cb_gc_unprotect(bucket, exc);
164
156
  rb_exc_raise(exc);
165
157
  }
166
158
  exc = bucket->exception;
@@ -0,0 +1,171 @@
1
+ /* vim: ft=c et ts=8 sts=4 sw=4 cino=
2
+ *
3
+ * Copyright 2012 Couchbase, Inc.
4
+ *
5
+ * Licensed under the Apache License, Version 2.0 (the "License");
6
+ * you may not use this file except in compliance with the License.
7
+ * You may obtain a copy of the License at
8
+ *
9
+ * http://www.apache.org/licenses/LICENSE-2.0
10
+ *
11
+ * Unless required by applicable law or agreed to in writing, software
12
+ * distributed under the License is distributed on an "AS IS" BASIS,
13
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ * See the License for the specific language governing permissions and
15
+ * limitations under the License.
16
+ */
17
+
18
+ #include "couchbase_ext.h"
19
+
20
+ #ifndef _WIN32
21
+
22
+ #include <errno.h>
23
+ #include <sys/types.h>
24
+ #include <sys/socket.h>
25
+
26
+ #ifndef RUBY_WIN32_H
27
+ # include <unistd.h>
28
+ #ifdef HAVE_FCNTL_H
29
+ # include <fcntl.h>
30
+ #endif
31
+ #define INVALID_SOCKET (-1)
32
+ #else /* RUBY_WIN32_h */
33
+ static st_table *socket_2_fd = NULL;
34
+ #endif
35
+
36
+ /* Copied from libev plugin */
37
+ lcb_ssize_t
38
+ cb_io_recv(struct lcb_io_opt_st *iops, lcb_socket_t sock,
39
+ void *buffer, lcb_size_t len, int flags)
40
+ {
41
+ lcb_ssize_t ret = recv(sock, buffer, len, flags);
42
+ if (ret < 0) {
43
+ iops->v.v0.error = errno;
44
+ }
45
+ return ret;
46
+ }
47
+
48
+ lcb_ssize_t
49
+ cb_io_recvv(struct lcb_io_opt_st *iops, lcb_socket_t sock,
50
+ struct lcb_iovec_st *iov, lcb_size_t niov)
51
+ {
52
+ struct msghdr msg;
53
+ struct iovec vec[2];
54
+ lcb_ssize_t ret;
55
+
56
+ if (niov != 2) {
57
+ return -1;
58
+ }
59
+ memset(&msg, 0, sizeof(msg));
60
+ msg.msg_iov = vec;
61
+ msg.msg_iovlen = iov[1].iov_len ? (lcb_size_t)2 : (lcb_size_t)1;
62
+ msg.msg_iov[0].iov_base = iov[0].iov_base;
63
+ msg.msg_iov[0].iov_len = iov[0].iov_len;
64
+ msg.msg_iov[1].iov_base = iov[1].iov_base;
65
+ msg.msg_iov[1].iov_len = iov[1].iov_len;
66
+ ret = recvmsg(sock, &msg, 0);
67
+
68
+ if (ret < 0) {
69
+ iops->v.v0.error = errno;
70
+ }
71
+
72
+ return ret;
73
+ }
74
+
75
+ lcb_ssize_t
76
+ cb_io_send(struct lcb_io_opt_st *iops, lcb_socket_t sock,
77
+ const void *msg, lcb_size_t len, int flags)
78
+ {
79
+ lcb_ssize_t ret = send(sock, msg, len, flags);
80
+ if (ret < 0) {
81
+ iops->v.v0.error = errno;
82
+ }
83
+ return ret;
84
+ }
85
+
86
+ lcb_ssize_t
87
+ cb_io_sendv(struct lcb_io_opt_st *iops, lcb_socket_t sock,
88
+ struct lcb_iovec_st *iov, lcb_size_t niov)
89
+ {
90
+ struct msghdr msg;
91
+ struct iovec vec[2];
92
+ lcb_ssize_t ret;
93
+
94
+ if (niov != 2) {
95
+ return -1;
96
+ }
97
+ memset(&msg, 0, sizeof(msg));
98
+ msg.msg_iov = vec;
99
+ msg.msg_iovlen = iov[1].iov_len ? (lcb_size_t)2 : (lcb_size_t)1;
100
+ msg.msg_iov[0].iov_base = iov[0].iov_base;
101
+ msg.msg_iov[0].iov_len = iov[0].iov_len;
102
+ msg.msg_iov[1].iov_base = iov[1].iov_base;
103
+ msg.msg_iov[1].iov_len = iov[1].iov_len;
104
+ ret = sendmsg(sock, &msg, 0);
105
+
106
+ if (ret < 0) {
107
+ iops->v.v0.error = errno;
108
+ }
109
+ return ret;
110
+ }
111
+
112
+ static int
113
+ make_socket_nonblocking(lcb_socket_t sock)
114
+ {
115
+ int flags = 0;
116
+ #ifdef F_GETFL
117
+ if ((flags = fcntl(sock, F_GETFL, NULL)) < 0) {
118
+ return -1;
119
+ }
120
+ #endif
121
+ if (fcntl(sock, F_SETFL, flags | O_NONBLOCK) == -1) {
122
+ return -1;
123
+ }
124
+
125
+ return 0;
126
+ }
127
+
128
+ static int
129
+ close_socket(lcb_socket_t sock)
130
+ {
131
+ return close(sock);
132
+ }
133
+
134
+ lcb_socket_t
135
+ cb_io_socket(struct lcb_io_opt_st *iops, int domain, int type,
136
+ int protocol)
137
+ {
138
+ lcb_socket_t sock = socket(domain, type, protocol);
139
+ if (sock == INVALID_SOCKET) {
140
+ iops->v.v0.error = errno;
141
+ } else {
142
+ if (make_socket_nonblocking(sock) != 0) {
143
+ int error = errno;
144
+ iops->v.v0.close(iops, sock);
145
+ iops->v.v0.error = error;
146
+ sock = INVALID_SOCKET;
147
+ }
148
+ }
149
+
150
+ return sock;
151
+ }
152
+
153
+ void
154
+ cb_io_close(struct lcb_io_opt_st *iops, lcb_socket_t sock)
155
+ {
156
+ close_socket(sock);
157
+ (void)iops;
158
+ }
159
+
160
+ int
161
+ cb_io_connect(struct lcb_io_opt_st *iops, lcb_socket_t sock,
162
+ const struct sockaddr *name, unsigned int namelen)
163
+ {
164
+ int ret = connect(sock, name, (socklen_t)namelen);
165
+ if (ret < 0) {
166
+ iops->v.v0.error = errno;
167
+ }
168
+ return ret;
169
+ }
170
+
171
+ #endif
@@ -28,7 +28,7 @@
28
28
  VALUE
29
29
  cb_result_success_p(VALUE self)
30
30
  {
31
- return RTEST(rb_ivar_get(self, cb_id_iv_error)) ? Qfalse : Qtrue;
31
+ return RTEST(rb_attr_get(self, cb_id_iv_error)) ? Qfalse : Qtrue;
32
32
  }
33
33
 
34
34
  /*
@@ -49,67 +49,73 @@ cb_result_inspect(VALUE self)
49
49
  snprintf(buf, 100, ":%p", (void *)self);
50
50
  rb_str_buf_cat2(str, buf);
51
51
 
52
- attr = rb_ivar_get(self, cb_id_iv_operation);
52
+ attr = rb_attr_get(self, cb_id_iv_operation);
53
53
  if (RTEST(attr)) {
54
54
  rb_str_buf_cat2(str, " operation=");
55
55
  rb_str_append(str, rb_inspect(attr));
56
56
  }
57
57
 
58
- attr = rb_ivar_get(self, cb_id_iv_error);
58
+ attr = rb_attr_get(self, cb_id_iv_error);
59
59
  if (RTEST(attr)) {
60
60
  rb_str_buf_cat2(str, " error=");
61
61
  rb_str_append(str, rb_inspect(attr));
62
62
  }
63
63
 
64
- attr = rb_ivar_get(self, cb_id_iv_key);
64
+ attr = rb_attr_get(self, cb_id_iv_value);
65
+ if (RTEST(attr) && rb_obj_is_kind_of(attr, cb_cBucket)) {
66
+ rb_str_buf_cat2(str, " bucket="); /* value also accessible using alias #bucket */
67
+ rb_str_append(str, rb_inspect(attr));
68
+ }
69
+
70
+ attr = rb_attr_get(self, cb_id_iv_key);
65
71
  if (RTEST(attr)) {
66
72
  rb_str_buf_cat2(str, " key=");
67
73
  rb_str_append(str, rb_inspect(attr));
68
74
  }
69
75
 
70
- attr = rb_ivar_get(self, cb_id_iv_status);
76
+ attr = rb_attr_get(self, cb_id_iv_status);
71
77
  if (RTEST(attr)) {
72
78
  rb_str_buf_cat2(str, " status=");
73
79
  rb_str_append(str, rb_inspect(attr));
74
80
  }
75
81
 
76
- attr = rb_ivar_get(self, cb_id_iv_cas);
82
+ attr = rb_attr_get(self, cb_id_iv_cas);
77
83
  if (RTEST(attr)) {
78
84
  rb_str_buf_cat2(str, " cas=");
79
85
  rb_str_append(str, rb_inspect(attr));
80
86
  }
81
87
 
82
- attr = rb_ivar_get(self, cb_id_iv_flags);
88
+ attr = rb_attr_get(self, cb_id_iv_flags);
83
89
  if (RTEST(attr)) {
84
90
  rb_str_buf_cat2(str, " flags=0x");
85
91
  rb_str_append(str, rb_funcall(attr, cb_id_to_s, 1, INT2FIX(16)));
86
92
  }
87
93
 
88
- attr = rb_ivar_get(self, cb_id_iv_node);
94
+ attr = rb_attr_get(self, cb_id_iv_node);
89
95
  if (RTEST(attr)) {
90
96
  rb_str_buf_cat2(str, " node=");
91
97
  rb_str_append(str, rb_inspect(attr));
92
98
  }
93
99
 
94
- attr = rb_ivar_get(self, cb_id_iv_from_master);
100
+ attr = rb_attr_get(self, cb_id_iv_from_master);
95
101
  if (attr != Qnil) {
96
102
  rb_str_buf_cat2(str, " from_master=");
97
103
  rb_str_append(str, rb_inspect(attr));
98
104
  }
99
105
 
100
- attr = rb_ivar_get(self, cb_id_iv_time_to_persist);
106
+ attr = rb_attr_get(self, cb_id_iv_time_to_persist);
101
107
  if (RTEST(attr)) {
102
108
  rb_str_buf_cat2(str, " time_to_persist=");
103
109
  rb_str_append(str, rb_inspect(attr));
104
110
  }
105
111
 
106
- attr = rb_ivar_get(self, cb_id_iv_time_to_replicate);
112
+ attr = rb_attr_get(self, cb_id_iv_time_to_replicate);
107
113
  if (RTEST(attr)) {
108
114
  rb_str_buf_cat2(str, " time_to_replicate=");
109
115
  rb_str_append(str, rb_inspect(attr));
110
116
  }
111
117
 
112
- attr = rb_ivar_get(self, cb_id_iv_headers);
118
+ attr = rb_attr_get(self, cb_id_iv_headers);
113
119
  if (RTEST(attr)) {
114
120
  rb_str_buf_cat2(str, " headers=");
115
121
  rb_str_append(str, rb_inspect(attr));
@@ -22,13 +22,13 @@ cb_stat_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_
22
22
  {
23
23
  struct cb_context_st *ctx = (struct cb_context_st *)cookie;
24
24
  struct cb_bucket_st *bucket = ctx->bucket;
25
- VALUE stats, node, key, val, *rv = ctx->rv, exc = Qnil, res;
25
+ VALUE stats, node, key, val, exc = Qnil, res;
26
26
 
27
27
  node = resp->v.v0.server_endpoint ? STR_NEW_CSTR(resp->v.v0.server_endpoint) : Qnil;
28
28
  exc = cb_check_error(error, "failed to fetch stats", node);
29
29
  if (exc != Qnil) {
30
30
  rb_ivar_set(exc, cb_id_iv_operation, cb_sym_stats);
31
- ctx->exception = cb_gc_protect(bucket, exc);
31
+ ctx->exception = exc;
32
32
  }
33
33
  if (node != Qnil) {
34
34
  key = STR_NEW((const char*)resp->v.v0.key, resp->v.v0.nkey);
@@ -45,18 +45,18 @@ cb_stat_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_
45
45
  }
46
46
  } else { /* synchronous */
47
47
  if (NIL_P(exc)) {
48
- stats = rb_hash_aref(*rv, key);
48
+ stats = rb_hash_aref(ctx->rv, key);
49
49
  if (NIL_P(stats)) {
50
50
  stats = rb_hash_new();
51
- rb_hash_aset(*rv, key, stats);
51
+ rb_hash_aset(ctx->rv, key, stats);
52
52
  }
53
53
  rb_hash_aset(stats, node, val);
54
54
  }
55
55
  }
56
56
  } else {
57
- cb_gc_unprotect(bucket, ctx->proc);
57
+ ctx->proc = Qnil;
58
58
  if (bucket->async) {
59
- free(ctx);
59
+ cb_context_free(ctx);
60
60
  }
61
61
  }
62
62
  (void)handle;
@@ -109,37 +109,29 @@ cb_bucket_stats(int argc, VALUE *argv, VALUE self)
109
109
  {
110
110
  struct cb_bucket_st *bucket = DATA_PTR(self);
111
111
  struct cb_context_st *ctx;
112
- VALUE rv, exc, args, proc;
112
+ VALUE rv, exc, proc;
113
113
  lcb_error_t err;
114
114
  struct cb_params_st params;
115
115
 
116
- if (bucket->handle == NULL) {
117
- rb_raise(cb_eConnectError, "closed connection");
116
+ if (!cb_bucket_connected_bang(bucket, cb_sym_stats)) {
117
+ return Qnil;
118
118
  }
119
- rb_scan_args(argc, argv, "0*&", &args, &proc);
119
+
120
+ memset(&params, 0, sizeof(struct cb_params_st));
121
+ rb_scan_args(argc, argv, "0*&", &params.args, &proc);
120
122
  if (!bucket->async && proc != Qnil) {
121
123
  rb_raise(rb_eArgError, "synchronous mode doesn't support callbacks");
122
124
  }
123
- memset(&params, 0, sizeof(struct cb_params_st));
124
125
  params.type = cb_cmd_stats;
125
126
  params.bucket = bucket;
126
- cb_params_build(&params, RARRAY_LEN(args), args);
127
- ctx = calloc(1, sizeof(struct cb_context_st));
128
- if (ctx == NULL) {
129
- rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for context");
130
- }
131
- rv = rb_hash_new();
132
- ctx->rv = &rv;
133
- ctx->bucket = bucket;
134
- ctx->proc = cb_gc_protect(bucket, proc);
135
- ctx->exception = Qnil;
136
- ctx->nqueries = params.cmd.stats.num;
127
+ cb_params_build(&params);
128
+ ctx = cb_context_alloc_common(bucket, proc, params.cmd.stats.num);
137
129
  err = lcb_server_stats(bucket->handle, (const void *)ctx,
138
130
  params.cmd.stats.num, params.cmd.stats.ptr);
139
131
  exc = cb_check_error(err, "failed to schedule stat request", Qnil);
140
132
  cb_params_destroy(&params);
141
133
  if (exc != Qnil) {
142
- free(ctx);
134
+ cb_context_free(ctx);
143
135
  rb_exc_raise(exc);
144
136
  }
145
137
  bucket->nbytes += params.npayload;
@@ -152,9 +144,9 @@ cb_bucket_stats(int argc, VALUE *argv, VALUE self)
152
144
  lcb_wait(bucket->handle);
153
145
  }
154
146
  exc = ctx->exception;
155
- free(ctx);
147
+ rv = ctx->rv;
148
+ cb_context_free(ctx);
156
149
  if (exc != Qnil) {
157
- cb_gc_unprotect(bucket, exc);
158
150
  rb_exc_raise(exc);
159
151
  }
160
152
  exc = bucket->exception;