couchbase 1.2.0.z.beta5 → 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/RELEASE_NOTES.markdown +513 -0
- data/couchbase.gemspec +0 -1
- data/ext/couchbase_ext/arguments.c +50 -147
- data/ext/couchbase_ext/arithmetic.c +6 -17
- data/ext/couchbase_ext/bucket.c +97 -111
- data/ext/couchbase_ext/couchbase_ext.c +10 -0
- data/ext/couchbase_ext/couchbase_ext.h +20 -9
- data/ext/couchbase_ext/delete.c +6 -8
- data/ext/couchbase_ext/extconf.rb +6 -0
- data/ext/couchbase_ext/get.c +7 -9
- data/ext/couchbase_ext/http.c +20 -19
- data/ext/couchbase_ext/multithread_plugin.c +1201 -0
- data/ext/couchbase_ext/observe.c +10 -10
- data/ext/couchbase_ext/stats.c +6 -8
- data/ext/couchbase_ext/store.c +8 -10
- data/ext/couchbase_ext/timer.c +1 -1
- data/ext/couchbase_ext/touch.c +6 -8
- data/ext/couchbase_ext/unlock.c +6 -8
- data/ext/couchbase_ext/utils.c +75 -5
- data/ext/couchbase_ext/version.c +6 -8
- data/lib/couchbase/version.rb +1 -1
- data/lib/couchbase/view.rb +6 -2
- data/lib/ext/multi_json_fix.rb +3 -0
- data/lib/rack/session/couchbase.rb +8 -5
- data/tasks/compile.rake +1 -1
- data/tasks/test.rake +0 -1
- data/tasks/util.rake +1 -1
- data/test/setup.rb +5 -2
- data/test/test_async.rb +2 -2
- metadata +11 -29
- data/HISTORY.markdown +0 -268
@@ -167,6 +167,9 @@ VALUE cb_eClientNoMemoryError; /* LCB_CLIENT_ENOMEM = 0x1a */
|
|
167
167
|
VALUE cb_eClientTmpFailError; /* LCB_CLIENT_ETMPFAIL = 0x1b */
|
168
168
|
VALUE cb_eBadHandleError; /* LCB_EBADHANDLE = 0x1c */
|
169
169
|
|
170
|
+
/* Default Strings */
|
171
|
+
VALUE cb_vStrDefault;
|
172
|
+
VALUE cb_vStrEmpty;
|
170
173
|
|
171
174
|
/* Ruby Extension initializer */
|
172
175
|
void
|
@@ -1075,4 +1078,11 @@ Init_couchbase_ext(void)
|
|
1075
1078
|
cb_sym_username = ID2SYM(rb_intern("username"));
|
1076
1079
|
cb_sym_version = ID2SYM(rb_intern("version"));
|
1077
1080
|
cb_sym_view = ID2SYM(rb_intern("view"));
|
1081
|
+
|
1082
|
+
cb_vStrDefault = STR_NEW_CSTR("default");
|
1083
|
+
rb_str_freeze(cb_vStrDefault);
|
1084
|
+
rb_const_set(cb_mCouchbase, rb_intern("_STR_DEFAULT"), cb_vStrDefault);
|
1085
|
+
cb_vStrEmpty = STR_NEW_CSTR("");
|
1086
|
+
rb_str_freeze(cb_vStrEmpty);
|
1087
|
+
rb_const_set(cb_mCouchbase, rb_intern("_STR_EMPTY"), cb_vStrEmpty);
|
1078
1088
|
}
|
@@ -51,6 +51,10 @@ extern hrtime_t gethrtime(void);
|
|
51
51
|
#define va_init_list(a,b) va_start(a)
|
52
52
|
#endif
|
53
53
|
|
54
|
+
#ifndef HAVE_RB_HASH_LOOKUP2
|
55
|
+
VALUE rb_hash_lookup2(VALUE, VALUE, VALUE);
|
56
|
+
#endif
|
57
|
+
|
54
58
|
#define cb_debug_object(OBJ) \
|
55
59
|
rb_funcall(rb_stderr, rb_intern("print"), 1, rb_funcall(OBJ, rb_intern("object_id"), 0)); \
|
56
60
|
rb_funcall(rb_stderr, rb_intern("print"), 1, STR_NEW_CSTR(" ")); \
|
@@ -71,12 +75,12 @@ struct cb_bucket_st
|
|
71
75
|
lcb_type_t type;
|
72
76
|
struct lcb_io_opt_st *io;
|
73
77
|
uint16_t port;
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
78
|
+
VALUE authority;
|
79
|
+
VALUE hostname;
|
80
|
+
VALUE pool;
|
81
|
+
VALUE bucket;
|
82
|
+
VALUE username;
|
83
|
+
VALUE password;
|
80
84
|
int async;
|
81
85
|
int quiet;
|
82
86
|
VALUE default_format; /* should update +default_flags+ on change */
|
@@ -91,9 +95,8 @@ struct cb_bucket_st
|
|
91
95
|
VALUE exception; /* error delivered by error_callback */
|
92
96
|
VALUE on_error_proc; /* is using to deliver errors in async mode */
|
93
97
|
VALUE environment; /* sym_development or sym_production */
|
94
|
-
char *key_prefix;
|
95
98
|
VALUE key_prefix_val;
|
96
|
-
|
99
|
+
VALUE node_list;
|
97
100
|
VALUE object_space;
|
98
101
|
VALUE self; /* the pointer to bucket representation in ruby land */
|
99
102
|
};
|
@@ -289,12 +292,16 @@ extern VALUE cb_eClientNoMemoryError; /* LCB_CLIENT_ENOMEM = 0x1a */
|
|
289
292
|
extern VALUE cb_eClientTmpFailError; /* LCB_CLIENT_ETMPFAIL = 0x1b */
|
290
293
|
extern VALUE cb_eBadHandleError; /* LCB_EBADHANDLE = 0x1c */
|
291
294
|
|
295
|
+
/* Default Strings */
|
296
|
+
extern VALUE cb_vStrDefault;
|
297
|
+
extern VALUE cb_vStrEmpty;
|
298
|
+
|
292
299
|
void cb_strip_key_prefix(struct cb_bucket_st *bucket, VALUE key);
|
293
300
|
VALUE cb_check_error(lcb_error_t rc, const char *msg, VALUE key);
|
294
301
|
VALUE cb_check_error_with_status(lcb_error_t rc, const char *msg, VALUE key, lcb_http_status_t status);
|
295
302
|
VALUE cb_gc_protect(struct cb_bucket_st *bucket, VALUE val);
|
296
303
|
VALUE cb_gc_unprotect(struct cb_bucket_st *bucket, VALUE val);
|
297
|
-
VALUE cb_proc_call(VALUE recv, int argc, ...);
|
304
|
+
VALUE cb_proc_call(struct cb_bucket_st *bucket, VALUE recv, int argc, ...);
|
298
305
|
int cb_first_value_i(VALUE key, VALUE value, VALUE arg);
|
299
306
|
void cb_build_headers(struct cb_context_st *ctx, const char * const *headers);
|
300
307
|
void cb_maybe_do_loop(struct cb_bucket_st *bucket);
|
@@ -303,6 +310,8 @@ VALUE cb_encode_value(VALUE val, uint32_t flags);
|
|
303
310
|
VALUE cb_decode_value(VALUE blob, uint32_t flags, VALUE force_format);
|
304
311
|
uint32_t cb_flags_set_format(uint32_t flags, ID format);
|
305
312
|
ID cb_flags_get_format(uint32_t flags);
|
313
|
+
void cb_async_error_notify(struct cb_bucket_st *bucket, VALUE exc);
|
314
|
+
|
306
315
|
|
307
316
|
void cb_storage_callback(lcb_t handle, const void *cookie, lcb_storage_t operation, lcb_error_t error, const lcb_store_resp_t *resp);
|
308
317
|
void cb_get_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_get_resp_t *resp);
|
@@ -531,5 +540,7 @@ struct cb_params_st
|
|
531
540
|
void cb_params_destroy(struct cb_params_st *params);
|
532
541
|
void cb_params_build(struct cb_params_st *params, int argc, VALUE argv);
|
533
542
|
|
543
|
+
LIBCOUCHBASE_API
|
544
|
+
lcb_error_t cb_create_ruby_mt_io_opts(int version, lcb_io_opt_t *io, void *arg);
|
534
545
|
#endif
|
535
546
|
|
data/ext/couchbase_ext/delete.c
CHANGED
@@ -32,9 +32,7 @@ cb_delete_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lc
|
|
32
32
|
exc = cb_check_error(error, "failed to remove value", key);
|
33
33
|
if (exc != Qnil) {
|
34
34
|
rb_ivar_set(exc, cb_id_iv_operation, cb_sym_delete);
|
35
|
-
|
36
|
-
ctx->exception = cb_gc_protect(bucket, exc);
|
37
|
-
}
|
35
|
+
ctx->exception = cb_gc_protect(bucket, exc);
|
38
36
|
}
|
39
37
|
}
|
40
38
|
if (bucket->async) { /* asynchronous */
|
@@ -43,7 +41,7 @@ cb_delete_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lc
|
|
43
41
|
rb_ivar_set(res, cb_id_iv_error, exc);
|
44
42
|
rb_ivar_set(res, cb_id_iv_operation, cb_sym_delete);
|
45
43
|
rb_ivar_set(res, cb_id_iv_key, key);
|
46
|
-
cb_proc_call(ctx->proc, 1, res);
|
44
|
+
cb_proc_call(bucket, ctx->proc, 1, res);
|
47
45
|
}
|
48
46
|
} else { /* synchronous */
|
49
47
|
rb_hash_aset(*rv, key, (error == LCB_SUCCESS) ? Qtrue : Qfalse);
|
@@ -51,7 +49,7 @@ cb_delete_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lc
|
|
51
49
|
if (ctx->nqueries == 0) {
|
52
50
|
cb_gc_unprotect(bucket, ctx->proc);
|
53
51
|
if (bucket->async) {
|
54
|
-
|
52
|
+
free(ctx);
|
55
53
|
}
|
56
54
|
}
|
57
55
|
(void)handle;
|
@@ -122,7 +120,7 @@ cb_bucket_delete(int argc, VALUE *argv, VALUE self)
|
|
122
120
|
params.bucket = bucket;
|
123
121
|
cb_params_build(¶ms, RARRAY_LEN(args), args);
|
124
122
|
|
125
|
-
ctx =
|
123
|
+
ctx = calloc(1, sizeof(struct cb_context_st));
|
126
124
|
if (ctx == NULL) {
|
127
125
|
rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for context");
|
128
126
|
}
|
@@ -138,7 +136,7 @@ cb_bucket_delete(int argc, VALUE *argv, VALUE self)
|
|
138
136
|
cb_params_destroy(¶ms);
|
139
137
|
exc = cb_check_error(err, "failed to schedule delete request", Qnil);
|
140
138
|
if (exc != Qnil) {
|
141
|
-
|
139
|
+
free(ctx);
|
142
140
|
rb_exc_raise(exc);
|
143
141
|
}
|
144
142
|
bucket->nbytes += params.npayload;
|
@@ -151,7 +149,7 @@ cb_bucket_delete(int argc, VALUE *argv, VALUE self)
|
|
151
149
|
lcb_wait(bucket->handle);
|
152
150
|
}
|
153
151
|
exc = ctx->exception;
|
154
|
-
|
152
|
+
free(ctx);
|
155
153
|
if (exc != Qnil) {
|
156
154
|
rb_exc_raise(cb_gc_unprotect(bucket, exc));
|
157
155
|
}
|
@@ -130,9 +130,15 @@ have_library("couchbase", "lcb_verify_compiler_setup", "libcouchbase/couchbase.h
|
|
130
130
|
have_header("mach/mach_time.h")
|
131
131
|
have_header("stdint.h") or die("Failed to locate stdint.h")
|
132
132
|
have_header("sys/time.h")
|
133
|
+
have_header("fcntl.h")
|
133
134
|
have_func("clock_gettime")
|
134
135
|
have_func("gettimeofday")
|
135
136
|
have_func("QueryPerformanceCounter")
|
137
|
+
have_func("rb_hash_lookup2")
|
138
|
+
have_func("rb_thread_fd_select")
|
139
|
+
have_func("rb_thread_blocking_region")
|
140
|
+
have_func("poll", "poll.h")
|
141
|
+
have_func("ppoll", "poll.h")
|
136
142
|
define("_GNU_SOURCE")
|
137
143
|
create_header("couchbase_config.h")
|
138
144
|
create_makefile("couchbase_ext")
|
data/ext/couchbase_ext/get.c
CHANGED
@@ -32,9 +32,7 @@ cb_get_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_g
|
|
32
32
|
exc = cb_check_error(error, "failed to get value", key);
|
33
33
|
if (exc != Qnil) {
|
34
34
|
rb_ivar_set(exc, cb_id_iv_operation, cb_sym_get);
|
35
|
-
|
36
|
-
ctx->exception = cb_gc_protect(bucket, exc);
|
37
|
-
}
|
35
|
+
ctx->exception = cb_gc_protect(bucket, exc);
|
38
36
|
}
|
39
37
|
}
|
40
38
|
|
@@ -59,7 +57,7 @@ cb_get_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_g
|
|
59
57
|
val = raw;
|
60
58
|
}
|
61
59
|
} else if (cb_flags_get_format(resp->v.v0.flags) == cb_sym_plain) {
|
62
|
-
val =
|
60
|
+
val = cb_vStrEmpty;
|
63
61
|
}
|
64
62
|
if (bucket->async) { /* asynchronous */
|
65
63
|
if (ctx->proc != Qnil) {
|
@@ -70,7 +68,7 @@ cb_get_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_g
|
|
70
68
|
rb_ivar_set(res, cb_id_iv_value, val);
|
71
69
|
rb_ivar_set(res, cb_id_iv_flags, flags);
|
72
70
|
rb_ivar_set(res, cb_id_iv_cas, cas);
|
73
|
-
cb_proc_call(ctx->proc, 1, res);
|
71
|
+
cb_proc_call(bucket, ctx->proc, 1, res);
|
74
72
|
}
|
75
73
|
} else { /* synchronous */
|
76
74
|
if (NIL_P(exc) && error != LCB_KEY_ENOENT) {
|
@@ -85,7 +83,7 @@ cb_get_callback(lcb_t handle, const void *cookie, lcb_error_t error, const lcb_g
|
|
85
83
|
if (ctx->nqueries == 0) {
|
86
84
|
cb_gc_unprotect(bucket, ctx->proc);
|
87
85
|
if (bucket->async) {
|
88
|
-
|
86
|
+
free(ctx);
|
89
87
|
}
|
90
88
|
}
|
91
89
|
(void)handle;
|
@@ -241,7 +239,7 @@ cb_bucket_get(int argc, VALUE *argv, VALUE self)
|
|
241
239
|
params.bucket = bucket;
|
242
240
|
params.cmd.get.keys_ary = cb_gc_protect(bucket, rb_ary_new());
|
243
241
|
cb_params_build(¶ms, RARRAY_LEN(args), args);
|
244
|
-
ctx =
|
242
|
+
ctx = calloc(1, sizeof(struct cb_context_st));
|
245
243
|
if (ctx == NULL) {
|
246
244
|
rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for context");
|
247
245
|
}
|
@@ -265,7 +263,7 @@ cb_bucket_get(int argc, VALUE *argv, VALUE self)
|
|
265
263
|
cb_gc_unprotect(bucket, params.cmd.get.keys_ary);
|
266
264
|
exc = cb_check_error(err, "failed to schedule get request", Qnil);
|
267
265
|
if (exc != Qnil) {
|
268
|
-
|
266
|
+
free(ctx);
|
269
267
|
rb_exc_raise(exc);
|
270
268
|
}
|
271
269
|
bucket->nbytes += params.npayload;
|
@@ -278,7 +276,7 @@ cb_bucket_get(int argc, VALUE *argv, VALUE self)
|
|
278
276
|
lcb_wait(bucket->handle);
|
279
277
|
}
|
280
278
|
exc = ctx->exception;
|
281
|
-
|
279
|
+
free(ctx);
|
282
280
|
if (exc != Qnil) {
|
283
281
|
cb_gc_unprotect(bucket, exc);
|
284
282
|
rb_exc_raise(exc);
|
data/ext/couchbase_ext/http.c
CHANGED
@@ -22,20 +22,19 @@ cb_http_complete_callback(lcb_http_request_t request, lcb_t handle, const void *
|
|
22
22
|
{
|
23
23
|
struct cb_context_st *ctx = (struct cb_context_st *)cookie;
|
24
24
|
struct cb_bucket_st *bucket = ctx->bucket;
|
25
|
-
VALUE *rv = ctx->rv, key, val, res;
|
25
|
+
VALUE *rv = ctx->rv, key, val, res, exc;
|
26
26
|
lcb_http_status_t status;
|
27
27
|
|
28
28
|
ctx->request->completed = 1;
|
29
29
|
key = STR_NEW((const char*)resp->v.v0.path, resp->v.v0.npath);
|
30
30
|
val = resp->v.v0.nbytes ? STR_NEW((const char*)resp->v.v0.bytes, resp->v.v0.nbytes) : Qnil;
|
31
|
-
|
32
|
-
|
33
|
-
if (ctx->exception != Qnil) {
|
31
|
+
exc = cb_check_error_with_status(error, "failed to execute HTTP request", key, resp->v.v0.status);
|
32
|
+
if (exc != Qnil) {
|
34
33
|
if (val != Qnil) {
|
35
|
-
rb_ivar_set(
|
34
|
+
rb_ivar_set(exc, cb_id_iv_body, val);
|
36
35
|
}
|
37
|
-
rb_funcall(
|
38
|
-
cb_gc_protect(bucket,
|
36
|
+
rb_funcall(exc, cb_id_parse_body_bang, 0);
|
37
|
+
ctx->exception = cb_gc_protect(bucket, exc);
|
39
38
|
}
|
40
39
|
status = resp->v.v0.status;
|
41
40
|
if (resp->v.v0.headers) {
|
@@ -55,7 +54,8 @@ cb_http_complete_callback(lcb_http_request_t request, lcb_t handle, const void *
|
|
55
54
|
res = val;
|
56
55
|
}
|
57
56
|
if (ctx->proc != Qnil) {
|
58
|
-
cb_proc_call(ctx->proc, 1, res);
|
57
|
+
cb_proc_call(bucket, ctx->proc, 1, res);
|
58
|
+
cb_gc_unprotect(bucket, ctx->proc);
|
59
59
|
}
|
60
60
|
if (!bucket->async && ctx->exception == Qnil) {
|
61
61
|
*rv = res;
|
@@ -107,7 +107,7 @@ cb_http_data_callback(lcb_http_request_t request, lcb_t handle, const void *cook
|
|
107
107
|
} else {
|
108
108
|
res = val;
|
109
109
|
}
|
110
|
-
cb_proc_call(ctx->proc, 1, res);
|
110
|
+
cb_proc_call(bucket, ctx->proc, 1, res);
|
111
111
|
}
|
112
112
|
(void)handle;
|
113
113
|
}
|
@@ -123,11 +123,11 @@ cb_http_request_free(void *ptr)
|
|
123
123
|
&& !request->completed) {
|
124
124
|
lcb_cancel_http_request(request->bucket->handle, request->request);
|
125
125
|
}
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
xfree(request);
|
126
|
+
free((char *)request->cmd.v.v0.content_type);
|
127
|
+
free((char *)request->cmd.v.v0.path);
|
128
|
+
free((char *)request->cmd.v.v0.body);
|
130
129
|
}
|
130
|
+
xfree(request);
|
131
131
|
}
|
132
132
|
|
133
133
|
void
|
@@ -242,7 +242,7 @@ cb_http_request_init(int argc, VALUE *argv, VALUE self)
|
|
242
242
|
}
|
243
243
|
if ((arg = rb_hash_aref(opts, cb_sym_content_type)) != Qnil) {
|
244
244
|
Check_Type(arg, T_STRING);
|
245
|
-
|
245
|
+
free((char *)request->cmd.v.v0.content_type);
|
246
246
|
request->cmd.v.v0.content_type = strdup(RSTRING_PTR(arg));
|
247
247
|
}
|
248
248
|
}
|
@@ -280,24 +280,25 @@ cb_http_request_perform(VALUE self)
|
|
280
280
|
lcb_error_t err;
|
281
281
|
struct cb_bucket_st *bucket;
|
282
282
|
|
283
|
-
ctx =
|
283
|
+
ctx = calloc(1, sizeof(struct cb_context_st));
|
284
284
|
if (ctx == NULL) {
|
285
285
|
rb_raise(cb_eClientNoMemoryError, "failed to allocate memory");
|
286
286
|
}
|
287
287
|
rv = Qnil;
|
288
288
|
ctx->rv = &rv;
|
289
289
|
ctx->bucket = bucket = req->bucket;
|
290
|
-
ctx->proc = rb_block_given_p() ? rb_block_proc() : req->on_body_callback;
|
290
|
+
ctx->proc = rb_block_given_p() ? cb_gc_protect(bucket, rb_block_proc()) : req->on_body_callback;
|
291
291
|
ctx->extended = req->extended;
|
292
292
|
ctx->request = req;
|
293
293
|
ctx->headers_val = cb_gc_protect(bucket, rb_hash_new());
|
294
|
+
ctx->exception = Qnil;
|
294
295
|
|
295
296
|
err = lcb_make_http_request(bucket->handle, (const void *)ctx,
|
296
297
|
req->type, &req->cmd, &req->request);
|
297
298
|
exc = cb_check_error(err, "failed to schedule document request",
|
298
299
|
STR_NEW(req->cmd.v.v0.path, req->cmd.v.v0.npath));
|
299
300
|
if (exc != Qnil) {
|
300
|
-
|
301
|
+
free(ctx);
|
301
302
|
rb_exc_raise(exc);
|
302
303
|
}
|
303
304
|
req->running = 1;
|
@@ -308,7 +309,7 @@ cb_http_request_perform(VALUE self)
|
|
308
309
|
lcb_wait(bucket->handle);
|
309
310
|
if (req->completed) {
|
310
311
|
exc = ctx->exception;
|
311
|
-
|
312
|
+
free(ctx);
|
312
313
|
if (exc != Qnil) {
|
313
314
|
cb_gc_unprotect(bucket, exc);
|
314
315
|
rb_exc_raise(exc);
|
@@ -340,7 +341,7 @@ cb_http_request_continue(VALUE self)
|
|
340
341
|
if (req->completed) {
|
341
342
|
exc = req->ctx->exception;
|
342
343
|
rv = req->ctx->rv;
|
343
|
-
|
344
|
+
free(req->ctx);
|
344
345
|
if (exc != Qnil) {
|
345
346
|
cb_gc_unprotect(req->bucket, exc);
|
346
347
|
rb_exc_raise(exc);
|
@@ -0,0 +1,1201 @@
|
|
1
|
+
/* vim: ft=c et ts=8 sts=4 sw=4 cino=
|
2
|
+
*
|
3
|
+
* Copyright 2012 Couchbase, Inc.
|
4
|
+
*
|
5
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
6
|
+
* you may not use this file except in compliance with the License.
|
7
|
+
* You may obtain a copy of the License at
|
8
|
+
*
|
9
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
10
|
+
*
|
11
|
+
* Unless required by applicable law or agreed to in writing, software
|
12
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
13
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14
|
+
* See the License for the specific language governing permissions and
|
15
|
+
* limitations under the License.
|
16
|
+
*/
|
17
|
+
|
18
|
+
#include "couchbase_ext.h"
|
19
|
+
|
20
|
+
|
21
|
+
#ifndef _WIN32
|
22
|
+
|
23
|
+
#ifndef HAVE_RB_THREAD_BLOCKING_REGION
|
24
|
+
#include <rubysig.h>
|
25
|
+
#endif
|
26
|
+
#include <errno.h>
|
27
|
+
#include <sys/types.h>
|
28
|
+
#include <sys/socket.h>
|
29
|
+
#include <unistd.h>
|
30
|
+
#ifdef HAVE_FCNTL_H
|
31
|
+
#include <fcntl.h>
|
32
|
+
#endif
|
33
|
+
#ifdef HAVE_POLL
|
34
|
+
#include <poll.h>
|
35
|
+
#endif
|
36
|
+
#define INVALID_SOCKET (-1)
|
37
|
+
|
38
|
+
/* Copied from libev plugin */
|
39
|
+
static lcb_ssize_t
|
40
|
+
lcb_io_recv(struct lcb_io_opt_st *iops, lcb_socket_t sock,
|
41
|
+
void *buffer, lcb_size_t len, int flags)
|
42
|
+
{
|
43
|
+
lcb_ssize_t ret = recv(sock, buffer, len, flags);
|
44
|
+
if (ret < 0) {
|
45
|
+
iops->v.v0.error = errno;
|
46
|
+
}
|
47
|
+
return ret;
|
48
|
+
}
|
49
|
+
|
50
|
+
static lcb_ssize_t
|
51
|
+
lcb_io_recvv(struct lcb_io_opt_st *iops, lcb_socket_t sock,
|
52
|
+
struct lcb_iovec_st *iov, lcb_size_t niov)
|
53
|
+
{
|
54
|
+
struct msghdr msg;
|
55
|
+
struct iovec vec[2];
|
56
|
+
lcb_ssize_t ret;
|
57
|
+
|
58
|
+
if (niov != 2) {
|
59
|
+
return -1;
|
60
|
+
}
|
61
|
+
memset(&msg, 0, sizeof(msg));
|
62
|
+
msg.msg_iov = vec;
|
63
|
+
msg.msg_iovlen = iov[1].iov_len ? (lcb_size_t)2 : (lcb_size_t)1;
|
64
|
+
msg.msg_iov[0].iov_base = iov[0].iov_base;
|
65
|
+
msg.msg_iov[0].iov_len = iov[0].iov_len;
|
66
|
+
msg.msg_iov[1].iov_base = iov[1].iov_base;
|
67
|
+
msg.msg_iov[1].iov_len = iov[1].iov_len;
|
68
|
+
ret = recvmsg(sock, &msg, 0);
|
69
|
+
|
70
|
+
if (ret < 0) {
|
71
|
+
iops->v.v0.error = errno;
|
72
|
+
}
|
73
|
+
|
74
|
+
return ret;
|
75
|
+
}
|
76
|
+
|
77
|
+
static lcb_ssize_t
|
78
|
+
lcb_io_send(struct lcb_io_opt_st *iops, lcb_socket_t sock,
|
79
|
+
const void *msg, lcb_size_t len, int flags)
|
80
|
+
{
|
81
|
+
lcb_ssize_t ret = send(sock, msg, len, flags);
|
82
|
+
if (ret < 0) {
|
83
|
+
iops->v.v0.error = errno;
|
84
|
+
}
|
85
|
+
return ret;
|
86
|
+
}
|
87
|
+
|
88
|
+
static lcb_ssize_t
|
89
|
+
lcb_io_sendv(struct lcb_io_opt_st *iops, lcb_socket_t sock,
|
90
|
+
struct lcb_iovec_st *iov, lcb_size_t niov)
|
91
|
+
{
|
92
|
+
struct msghdr msg;
|
93
|
+
struct iovec vec[2];
|
94
|
+
lcb_ssize_t ret;
|
95
|
+
|
96
|
+
if (niov != 2) {
|
97
|
+
return -1;
|
98
|
+
}
|
99
|
+
memset(&msg, 0, sizeof(msg));
|
100
|
+
msg.msg_iov = vec;
|
101
|
+
msg.msg_iovlen = iov[1].iov_len ? (lcb_size_t)2 : (lcb_size_t)1;
|
102
|
+
msg.msg_iov[0].iov_base = iov[0].iov_base;
|
103
|
+
msg.msg_iov[0].iov_len = iov[0].iov_len;
|
104
|
+
msg.msg_iov[1].iov_base = iov[1].iov_base;
|
105
|
+
msg.msg_iov[1].iov_len = iov[1].iov_len;
|
106
|
+
ret = sendmsg(sock, &msg, 0);
|
107
|
+
|
108
|
+
if (ret < 0) {
|
109
|
+
iops->v.v0.error = errno;
|
110
|
+
}
|
111
|
+
return ret;
|
112
|
+
}
|
113
|
+
|
114
|
+
static int
|
115
|
+
make_socket_nonblocking(lcb_socket_t sock)
|
116
|
+
{
|
117
|
+
int flags;
|
118
|
+
if ((flags = fcntl(sock, F_GETFL, NULL)) < 0) {
|
119
|
+
return -1;
|
120
|
+
}
|
121
|
+
if (fcntl(sock, F_SETFL, flags | O_NONBLOCK) == -1) {
|
122
|
+
return -1;
|
123
|
+
}
|
124
|
+
|
125
|
+
return 0;
|
126
|
+
}
|
127
|
+
|
128
|
+
static int
|
129
|
+
close_socket(lcb_socket_t sock)
|
130
|
+
{
|
131
|
+
return close(sock);
|
132
|
+
}
|
133
|
+
|
134
|
+
static lcb_socket_t
|
135
|
+
lcb_io_socket(struct lcb_io_opt_st *iops, int domain, int type,
|
136
|
+
int protocol)
|
137
|
+
{
|
138
|
+
lcb_socket_t sock = socket(domain, type, protocol);
|
139
|
+
if (sock == INVALID_SOCKET) {
|
140
|
+
iops->v.v0.error = errno;
|
141
|
+
} else {
|
142
|
+
if (make_socket_nonblocking(sock) != 0) {
|
143
|
+
int error = errno;
|
144
|
+
iops->v.v0.close(iops, sock);
|
145
|
+
iops->v.v0.error = error;
|
146
|
+
sock = INVALID_SOCKET;
|
147
|
+
}
|
148
|
+
}
|
149
|
+
|
150
|
+
return sock;
|
151
|
+
}
|
152
|
+
|
153
|
+
static void
|
154
|
+
lcb_io_close(struct lcb_io_opt_st *iops, lcb_socket_t sock)
|
155
|
+
{
|
156
|
+
close_socket(sock);
|
157
|
+
(void)iops;
|
158
|
+
}
|
159
|
+
|
160
|
+
static int
|
161
|
+
lcb_io_connect(struct lcb_io_opt_st *iops, lcb_socket_t sock,
|
162
|
+
const struct sockaddr *name, unsigned int namelen)
|
163
|
+
{
|
164
|
+
int ret = connect(sock, name, (socklen_t)namelen);
|
165
|
+
if (ret < 0) {
|
166
|
+
iops->v.v0.error = errno;
|
167
|
+
}
|
168
|
+
return ret;
|
169
|
+
}
|
170
|
+
|
171
|
+
/* events sorted array */
|
172
|
+
typedef struct rb_mt_event rb_mt_event;
|
173
|
+
struct rb_mt_event {
|
174
|
+
void *cb_data;
|
175
|
+
void (*handler)(lcb_socket_t sock, short which, void *cb_data);
|
176
|
+
lcb_socket_t socket;
|
177
|
+
int loop_index;
|
178
|
+
short flags;
|
179
|
+
short actual_flags;
|
180
|
+
short inserted;
|
181
|
+
rb_mt_event *next;
|
182
|
+
};
|
183
|
+
|
184
|
+
typedef struct rb_mt_socket_list rb_mt_socket_list;
|
185
|
+
struct rb_mt_socket_list {
|
186
|
+
lcb_socket_t socket;
|
187
|
+
short flags;
|
188
|
+
rb_mt_event *first;
|
189
|
+
};
|
190
|
+
|
191
|
+
typedef struct rb_mt_events rb_mt_events;
|
192
|
+
struct rb_mt_events {
|
193
|
+
uint32_t capa;
|
194
|
+
uint32_t count;
|
195
|
+
rb_mt_socket_list *sockets;
|
196
|
+
};
|
197
|
+
|
198
|
+
static int
|
199
|
+
events_init(rb_mt_events *events)
|
200
|
+
{
|
201
|
+
rb_mt_socket_list *new_socks = malloc(4 * sizeof(*new_socks));
|
202
|
+
if (new_socks == NULL) {
|
203
|
+
return 0;
|
204
|
+
}
|
205
|
+
events->capa = 4;
|
206
|
+
events->count = 0;
|
207
|
+
events->sockets = new_socks;
|
208
|
+
return 1;
|
209
|
+
}
|
210
|
+
|
211
|
+
static void
|
212
|
+
events_finalize(rb_mt_events *events)
|
213
|
+
{
|
214
|
+
if (events->sockets) {
|
215
|
+
uint32_t i;
|
216
|
+
for(i = 0; i < events->count; i++) {
|
217
|
+
rb_mt_socket_list *list = &events->sockets[i];
|
218
|
+
while(list->first) {
|
219
|
+
rb_mt_event *next = list->first->next;
|
220
|
+
free(list->first);
|
221
|
+
list->first = next;
|
222
|
+
}
|
223
|
+
}
|
224
|
+
free(events->sockets);
|
225
|
+
events->sockets = NULL;
|
226
|
+
}
|
227
|
+
events->capa = 0;
|
228
|
+
events->count = 0;
|
229
|
+
}
|
230
|
+
|
231
|
+
static uint32_t
|
232
|
+
events_index(rb_mt_events *events, lcb_socket_t socket)
|
233
|
+
{
|
234
|
+
uint32_t m, l = 0, r = events->count;
|
235
|
+
while(l < r) {
|
236
|
+
m = l + (r - l) / 2;
|
237
|
+
if (events->sockets[m].socket >= socket) {
|
238
|
+
r = m;
|
239
|
+
} else {
|
240
|
+
l = m + 1;
|
241
|
+
}
|
242
|
+
}
|
243
|
+
return l;
|
244
|
+
}
|
245
|
+
|
246
|
+
static void
|
247
|
+
events_insert(rb_mt_events *events, rb_mt_event *event)
|
248
|
+
{
|
249
|
+
uint32_t i = events_index(events, event->socket);
|
250
|
+
rb_mt_socket_list *list = &events->sockets[i];
|
251
|
+
if (i == events->count || list->socket != event->socket) {
|
252
|
+
if (events->capa == events->count) {
|
253
|
+
uint32_t new_capa = events->capa << 1;
|
254
|
+
rb_mt_socket_list *new_socks = realloc(events->sockets, new_capa * sizeof(*new_socks));
|
255
|
+
if (new_socks == NULL) {
|
256
|
+
rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for events array");
|
257
|
+
}
|
258
|
+
events->sockets = new_socks;
|
259
|
+
events->capa = new_capa;
|
260
|
+
list = &events->sockets[i];
|
261
|
+
}
|
262
|
+
if (i < events->count) {
|
263
|
+
MEMMOVE(events->sockets+i+1, events->sockets+i, rb_mt_socket_list, events->count - i);
|
264
|
+
}
|
265
|
+
events->count++;
|
266
|
+
list->socket = event->socket;
|
267
|
+
list->flags = event->flags;
|
268
|
+
list->first = event;
|
269
|
+
event->next = NULL;
|
270
|
+
} else {
|
271
|
+
list->flags |= event->flags;
|
272
|
+
event->next = list->first;
|
273
|
+
list->first = event;
|
274
|
+
}
|
275
|
+
event->inserted = 1;
|
276
|
+
}
|
277
|
+
|
278
|
+
static void
|
279
|
+
event_list_fix_flags(rb_mt_socket_list *list)
|
280
|
+
{
|
281
|
+
short flags = 0;
|
282
|
+
rb_mt_event *event = list->first;
|
283
|
+
while (event) {
|
284
|
+
flags |= event->flags;
|
285
|
+
event = event->next;
|
286
|
+
}
|
287
|
+
list->flags = flags;
|
288
|
+
}
|
289
|
+
|
290
|
+
static void
|
291
|
+
events_remove(rb_mt_events *events, rb_mt_event *event)
|
292
|
+
{
|
293
|
+
uint32_t i = events_index(events, event->socket);
|
294
|
+
rb_mt_socket_list *list = &events->sockets[i];
|
295
|
+
rb_mt_event **next;
|
296
|
+
if (list->socket != event->socket) {
|
297
|
+
rb_raise(rb_eIndexError, "There is no socket in event loop");
|
298
|
+
}
|
299
|
+
next = &list->first;
|
300
|
+
for(;;) {
|
301
|
+
if (*next == NULL) {
|
302
|
+
rb_raise(rb_eIndexError, "There is no event in event loop");
|
303
|
+
}
|
304
|
+
if (*next == event) {
|
305
|
+
*next = event->next;
|
306
|
+
event->next = NULL;
|
307
|
+
event->inserted = 0;
|
308
|
+
break;
|
309
|
+
}
|
310
|
+
next = &event->next;
|
311
|
+
}
|
312
|
+
if (list->first == NULL) {
|
313
|
+
MEMMOVE(events->sockets + i, events->sockets + i + 1, rb_mt_socket_list, events->count - i - 1);
|
314
|
+
events->count--;
|
315
|
+
} else {
|
316
|
+
event_list_fix_flags(list);
|
317
|
+
}
|
318
|
+
}
|
319
|
+
|
320
|
+
static void
|
321
|
+
events_fix_flags(rb_mt_events *events, lcb_socket_t socket)
|
322
|
+
{
|
323
|
+
uint32_t i = events_index(events, socket);
|
324
|
+
rb_mt_socket_list *list = &events->sockets[i];
|
325
|
+
if (list->socket != socket) {
|
326
|
+
rb_raise(rb_eIndexError, "There is no socket in event loop");
|
327
|
+
}
|
328
|
+
event_list_fix_flags(list);
|
329
|
+
}
|
330
|
+
|
331
|
+
static inline lcb_socket_t
|
332
|
+
events_max_fd(rb_mt_events *events)
|
333
|
+
{
|
334
|
+
if (events->count) {
|
335
|
+
return events->sockets[events->count - 1].socket;
|
336
|
+
} else {
|
337
|
+
return -1;
|
338
|
+
}
|
339
|
+
}
|
340
|
+
|
341
|
+
/* events sorted array end */
|
342
|
+
|
343
|
+
/* timers heap */
|
344
|
+
typedef struct rb_mt_timer rb_mt_timer;
|
345
|
+
struct rb_mt_timer {
|
346
|
+
void *cb_data;
|
347
|
+
void (*handler)(lcb_socket_t sock, short which, void *cb_data);
|
348
|
+
int index;
|
349
|
+
hrtime_t ts;
|
350
|
+
hrtime_t period;
|
351
|
+
};
|
352
|
+
|
353
|
+
typedef struct rb_mt_timers rb_mt_timers;
|
354
|
+
struct rb_mt_timers {
|
355
|
+
uint32_t capa;
|
356
|
+
uint32_t count;
|
357
|
+
rb_mt_timer **timers;
|
358
|
+
};
|
359
|
+
|
360
|
+
static int
|
361
|
+
timers_init(rb_mt_timers *timers)
|
362
|
+
{
|
363
|
+
rb_mt_timer **new_timers = malloc(4 * sizeof(*new_timers));
|
364
|
+
if (new_timers == NULL) {
|
365
|
+
return 0;
|
366
|
+
}
|
367
|
+
timers->capa = 4;
|
368
|
+
timers->count = 0;
|
369
|
+
timers->timers = new_timers;
|
370
|
+
return 1;
|
371
|
+
}
|
372
|
+
|
373
|
+
static void
|
374
|
+
timers_finalize(rb_mt_timers *timers)
|
375
|
+
{
|
376
|
+
if (timers->timers) {
|
377
|
+
uint32_t i;
|
378
|
+
for(i = 0; i < timers->count; i++) {
|
379
|
+
free(timers->timers[i]);
|
380
|
+
}
|
381
|
+
free(timers->timers);
|
382
|
+
timers->timers = NULL;
|
383
|
+
}
|
384
|
+
timers->count = 0;
|
385
|
+
timers->capa = 0;
|
386
|
+
}
|
387
|
+
|
388
|
+
#define tms_at(_timers, at) (_timers)->timers[(at)]
|
389
|
+
#define tms_ts_at(timers, at) tms_at((timers), (at))->ts
|
390
|
+
|
391
|
+
static void
|
392
|
+
timers_move_last(rb_mt_timers *timers, uint32_t to)
|
393
|
+
{
|
394
|
+
if (to < timers->count - 1) {
|
395
|
+
rb_mt_timer *last = tms_at(timers, timers->count - 1);
|
396
|
+
tms_at(timers, to) = last;
|
397
|
+
last->index = to;
|
398
|
+
}
|
399
|
+
timers->count--;
|
400
|
+
}
|
401
|
+
|
402
|
+
static inline void
|
403
|
+
timers_swap(rb_mt_timers *timers, uint32_t i, uint32_t j)
|
404
|
+
{
|
405
|
+
rb_mt_timer *itmp = tms_at(timers, j);
|
406
|
+
rb_mt_timer *jtmp = tms_at(timers, i);
|
407
|
+
tms_at(timers, i) = itmp;
|
408
|
+
tms_at(timers, j) = jtmp;
|
409
|
+
itmp->index = i;
|
410
|
+
jtmp->index = j;
|
411
|
+
}
|
412
|
+
|
413
|
+
static void timers_heapify_up(rb_mt_timers *timers, uint32_t pos);
|
414
|
+
|
415
|
+
static void
|
416
|
+
timers_insert(rb_mt_timers *timers, rb_mt_timer *timer)
|
417
|
+
{
|
418
|
+
if (timers->count == timers->capa) {
|
419
|
+
rb_mt_timer **new_timers;
|
420
|
+
size_t new_capa = timers->capa << 1;
|
421
|
+
new_timers = realloc(timers->timers, new_capa * sizeof(rb_mt_timer*));
|
422
|
+
if (new_timers == NULL) {
|
423
|
+
rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for timers heap");
|
424
|
+
}
|
425
|
+
timers->timers = new_timers;
|
426
|
+
timers->capa = new_capa;
|
427
|
+
}
|
428
|
+
tms_at(timers, timers->count) = timer;
|
429
|
+
timer->index = timers->count;
|
430
|
+
timers->count++;
|
431
|
+
timers_heapify_up(timers, timer->index);
|
432
|
+
}
|
433
|
+
|
434
|
+
static void
|
435
|
+
timers_heapify_up(rb_mt_timers *timers, uint32_t pos)
|
436
|
+
{
|
437
|
+
hrtime_t cur_ts = tms_ts_at(timers, pos);
|
438
|
+
uint32_t higher = (pos - 1) / 2;
|
439
|
+
while (pos && tms_ts_at(timers, higher) > cur_ts) {
|
440
|
+
timers_swap(timers, higher, pos);
|
441
|
+
pos = higher;
|
442
|
+
higher = (pos - 1) / 2;
|
443
|
+
}
|
444
|
+
}
|
445
|
+
|
446
|
+
static void
|
447
|
+
timers_heapify_down(rb_mt_timers *timers, uint32_t pos)
|
448
|
+
{
|
449
|
+
uint32_t count = timers->count;
|
450
|
+
uint32_t middle = (timers->count - 2) / 2;
|
451
|
+
hrtime_t cur_ts = tms_ts_at(timers, pos);
|
452
|
+
if (count == 1) return;
|
453
|
+
while (pos <= middle) {
|
454
|
+
uint32_t min_pos = pos;
|
455
|
+
hrtime_t ch_ts, min_ts = cur_ts;
|
456
|
+
|
457
|
+
if ((ch_ts = tms_ts_at(timers, pos * 2 + 1)) < min_ts) {
|
458
|
+
min_pos = pos * 2 + 1;
|
459
|
+
min_ts = ch_ts;
|
460
|
+
}
|
461
|
+
|
462
|
+
if (pos * 2 + 2 < count && tms_ts_at(timers, pos * 2 + 2) < min_ts) {
|
463
|
+
min_pos = pos * 2 + 2;
|
464
|
+
}
|
465
|
+
|
466
|
+
if (min_pos == pos) break;
|
467
|
+
timers_swap(timers, pos, min_pos);
|
468
|
+
pos = min_pos;
|
469
|
+
}
|
470
|
+
}
|
471
|
+
|
472
|
+
static void
|
473
|
+
timers_heapify_item(rb_mt_timers *timers, uint32_t pos)
|
474
|
+
{
|
475
|
+
if (pos && tms_ts_at(timers, pos) < tms_ts_at(timers, (pos - 1) / 2)) {
|
476
|
+
timers_heapify_up(timers, pos);
|
477
|
+
} else {
|
478
|
+
timers_heapify_down(timers, pos);
|
479
|
+
}
|
480
|
+
}
|
481
|
+
|
482
|
+
static inline hrtime_t
|
483
|
+
timers_minimum(rb_mt_timers *timers)
|
484
|
+
{
|
485
|
+
if (timers->count) {
|
486
|
+
return tms_ts_at(timers, 0);
|
487
|
+
} else {
|
488
|
+
return 0;
|
489
|
+
}
|
490
|
+
}
|
491
|
+
|
492
|
+
static inline rb_mt_timer *
|
493
|
+
timers_first(rb_mt_timers *timers)
|
494
|
+
{
|
495
|
+
if (timers->count) {
|
496
|
+
return tms_at(timers, 0);
|
497
|
+
} else {
|
498
|
+
return 0;
|
499
|
+
}
|
500
|
+
}
|
501
|
+
|
502
|
+
static void
|
503
|
+
timers_remove_timer(rb_mt_timers *timers, rb_mt_timer *timer)
|
504
|
+
{
|
505
|
+
uint32_t at = timer->index;
|
506
|
+
timer->index = -1;
|
507
|
+
if (at < timers->count - 1) {
|
508
|
+
timers_move_last(timers, at);
|
509
|
+
timers_heapify_item(timers, at);
|
510
|
+
} else {
|
511
|
+
timers->count--;
|
512
|
+
}
|
513
|
+
}
|
514
|
+
|
515
|
+
static void
|
516
|
+
timers_run(rb_mt_timers *timers, hrtime_t now)
|
517
|
+
{
|
518
|
+
hrtime_t next_time = timers_minimum(timers);
|
519
|
+
while (next_time && next_time < now) {
|
520
|
+
rb_mt_timer *first = timers_first(timers);
|
521
|
+
|
522
|
+
first->ts = now + first->period;
|
523
|
+
timers_heapify_item(timers, 0);
|
524
|
+
|
525
|
+
first->handler(-1, 0, first->cb_data);
|
526
|
+
|
527
|
+
next_time = timers_minimum(timers);
|
528
|
+
}
|
529
|
+
}
|
530
|
+
/* timers heap end */
|
531
|
+
|
532
|
+
/* callbacks array */
|
533
|
+
typedef struct rb_mt_callbacks rb_mt_callbacks;
|
534
|
+
struct rb_mt_callbacks {
|
535
|
+
uint32_t capa;
|
536
|
+
uint32_t count;
|
537
|
+
rb_mt_event **events;
|
538
|
+
};
|
539
|
+
|
540
|
+
static int
|
541
|
+
callbacks_init(rb_mt_callbacks *callbacks)
|
542
|
+
{
|
543
|
+
rb_mt_event **new_events = calloc(4, sizeof(*new_events));
|
544
|
+
if (new_events == NULL) {
|
545
|
+
return 0;
|
546
|
+
}
|
547
|
+
callbacks->events = new_events;
|
548
|
+
callbacks->capa = 4;
|
549
|
+
callbacks->count = 0;
|
550
|
+
return 1;
|
551
|
+
}
|
552
|
+
|
553
|
+
static void
|
554
|
+
callbacks_finalize(rb_mt_callbacks *callbacks)
|
555
|
+
{
|
556
|
+
if (callbacks->events) {
|
557
|
+
free(callbacks->events);
|
558
|
+
callbacks->events = NULL;
|
559
|
+
}
|
560
|
+
callbacks->capa = 0;
|
561
|
+
callbacks->count = 0;
|
562
|
+
}
|
563
|
+
|
564
|
+
static void
|
565
|
+
callbacks_push(rb_mt_callbacks *callbacks, rb_mt_event *event)
|
566
|
+
{
|
567
|
+
if (callbacks->count == callbacks->capa) {
|
568
|
+
uint32_t new_capa = callbacks->capa << 1;
|
569
|
+
rb_mt_event **new_events = realloc(callbacks->events, new_capa * sizeof(*new_events));
|
570
|
+
if (new_events == NULL) {
|
571
|
+
rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for callbacks array");
|
572
|
+
}
|
573
|
+
callbacks->capa = new_capa;
|
574
|
+
callbacks->events = new_events;
|
575
|
+
}
|
576
|
+
callbacks->events[callbacks->count] = event;
|
577
|
+
callbacks->count++;
|
578
|
+
}
|
579
|
+
|
580
|
+
static void
|
581
|
+
callbacks_remove(rb_mt_callbacks *callbacks, rb_mt_event *event)
|
582
|
+
{
|
583
|
+
int i = event->loop_index;
|
584
|
+
if (i >= 0) {
|
585
|
+
if (callbacks->events[i] != event) {
|
586
|
+
rb_raise(rb_eIndexError, "callback index belongs to different callback");
|
587
|
+
}
|
588
|
+
event->loop_index = -1;
|
589
|
+
callbacks->events[i] = NULL;
|
590
|
+
}
|
591
|
+
}
|
592
|
+
|
593
|
+
static void
|
594
|
+
callbacks_run(rb_mt_callbacks *callbacks)
|
595
|
+
{
|
596
|
+
uint32_t i;
|
597
|
+
for(i = 0; i < callbacks->count; i++) {
|
598
|
+
rb_mt_event *cb = callbacks->events[i];
|
599
|
+
if (cb) {
|
600
|
+
cb->handler(cb->socket, cb->actual_flags, cb->cb_data);
|
601
|
+
}
|
602
|
+
}
|
603
|
+
callbacks->count = 0;
|
604
|
+
}
|
605
|
+
|
606
|
+
static void
|
607
|
+
callbacks_clean(rb_mt_callbacks *callbacks)
|
608
|
+
{
|
609
|
+
uint32_t i;
|
610
|
+
for(i = 0; i < callbacks->count; i++) {
|
611
|
+
if (callbacks->events[i]) {
|
612
|
+
callbacks->events[i]->loop_index = -1;
|
613
|
+
callbacks->events[i] = NULL;
|
614
|
+
}
|
615
|
+
}
|
616
|
+
callbacks->count = 0;
|
617
|
+
}
|
618
|
+
/* callbacks array end */
|
619
|
+
|
620
|
+
typedef struct rb_mt_loop rb_mt_loop;
|
621
|
+
struct rb_mt_loop {
|
622
|
+
rb_mt_events events;
|
623
|
+
rb_mt_timers timers;
|
624
|
+
rb_mt_callbacks callbacks;
|
625
|
+
short run;
|
626
|
+
};
|
627
|
+
|
628
|
+
static rb_mt_loop*
|
629
|
+
loop_create()
|
630
|
+
{
|
631
|
+
rb_mt_loop *loop = calloc(1, sizeof(*loop));
|
632
|
+
if (loop == NULL) return NULL;
|
633
|
+
if (!events_init(&loop->events)) goto free_loop;
|
634
|
+
if (!timers_init(&loop->timers)) goto free_events;
|
635
|
+
if (!callbacks_init(&loop->callbacks)) goto free_timers;
|
636
|
+
return loop;
|
637
|
+
|
638
|
+
free_timers:
|
639
|
+
timers_finalize(&loop->timers);
|
640
|
+
free_events:
|
641
|
+
events_finalize(&loop->events);
|
642
|
+
free_loop:
|
643
|
+
free(loop);
|
644
|
+
return NULL;
|
645
|
+
}
|
646
|
+
|
647
|
+
static void
|
648
|
+
loop_destroy(rb_mt_loop *loop)
|
649
|
+
{
|
650
|
+
events_finalize(&loop->events);
|
651
|
+
timers_finalize(&loop->timers);
|
652
|
+
callbacks_finalize(&loop->callbacks);
|
653
|
+
free(loop);
|
654
|
+
}
|
655
|
+
|
656
|
+
static void
|
657
|
+
loop_remove_event(rb_mt_loop *loop, rb_mt_event *event)
|
658
|
+
{
|
659
|
+
if (event->inserted) {
|
660
|
+
events_remove(&loop->events, event);
|
661
|
+
}
|
662
|
+
callbacks_remove(&loop->callbacks, event);
|
663
|
+
}
|
664
|
+
|
665
|
+
static void
|
666
|
+
loop_enque_events(rb_mt_callbacks *callbacks, rb_mt_event *sock, short flags)
|
667
|
+
{
|
668
|
+
while (sock) {
|
669
|
+
short actual = sock->flags & flags;
|
670
|
+
if (actual) {
|
671
|
+
sock->actual_flags = actual;
|
672
|
+
callbacks_push(callbacks, (rb_mt_event*)sock);
|
673
|
+
}
|
674
|
+
sock = sock->next;
|
675
|
+
}
|
676
|
+
}
|
677
|
+
|
678
|
+
/* loop select implementation */
|
679
|
+
#ifndef HAVE_RB_THREAD_FD_SELECT
|
680
|
+
typedef fd_set rb_fdset_t;
|
681
|
+
#define rb_fd_init FD_ZERO
|
682
|
+
#define rb_fd_set FD_SET
|
683
|
+
#define rb_fd_isset FD_ISSET
|
684
|
+
#define rb_fd_term(set) (void)0
|
685
|
+
#define rb_thread_fd_select rb_thread_select
|
686
|
+
#endif
|
687
|
+
|
688
|
+
typedef struct loop_select_arg {
|
689
|
+
rb_mt_loop *loop;
|
690
|
+
rb_fdset_t in, out;
|
691
|
+
} ls_arg;
|
692
|
+
|
693
|
+
static VALUE
|
694
|
+
loop_run_select(VALUE argp)
|
695
|
+
{
|
696
|
+
ls_arg *args = (ls_arg*) argp;
|
697
|
+
rb_mt_loop *loop = args->loop;
|
698
|
+
rb_fdset_t *in = NULL, *out = NULL;
|
699
|
+
struct timeval timeout;
|
700
|
+
struct timeval *timeoutp = NULL;
|
701
|
+
int result, max = 0;
|
702
|
+
hrtime_t now, next_time;
|
703
|
+
|
704
|
+
next_time = timers_minimum(&loop->timers);
|
705
|
+
if (next_time) {
|
706
|
+
now = gethrtime();
|
707
|
+
if (next_time <= now) {
|
708
|
+
timeout.tv_sec = 0;
|
709
|
+
timeout.tv_usec = 0;
|
710
|
+
} else {
|
711
|
+
hrtime_t hrto = (next_time - now) / 1000;
|
712
|
+
timeout.tv_sec = (long)(hrto / 1000000);
|
713
|
+
timeout.tv_usec = (long)(hrto % 1000000);
|
714
|
+
}
|
715
|
+
timeoutp = &timeout;
|
716
|
+
}
|
717
|
+
|
718
|
+
if (loop->events.count) {
|
719
|
+
uint32_t i;
|
720
|
+
rb_fd_init(&args->in);
|
721
|
+
rb_fd_init(&args->out);
|
722
|
+
for(i = 0; i < loop->events.count; i++) {
|
723
|
+
rb_mt_socket_list *list = &loop->events.sockets[i];
|
724
|
+
if (list->flags & LCB_READ_EVENT) {
|
725
|
+
in = &args->in;
|
726
|
+
rb_fd_set(list->socket, in);
|
727
|
+
}
|
728
|
+
if (list->flags & LCB_WRITE_EVENT) {
|
729
|
+
out = &args->out;
|
730
|
+
rb_fd_set(list->socket, out);
|
731
|
+
}
|
732
|
+
}
|
733
|
+
max = events_max_fd(&loop->events) + 1;
|
734
|
+
}
|
735
|
+
|
736
|
+
result = rb_thread_fd_select(max, in, out, NULL, timeoutp);
|
737
|
+
|
738
|
+
if (result < 0) {
|
739
|
+
rb_sys_fail("rb_thread_fd_select");
|
740
|
+
}
|
741
|
+
/* fix current time so that socket callbacks will not cause timers timeouts */
|
742
|
+
if (next_time) {
|
743
|
+
now = gethrtime();
|
744
|
+
}
|
745
|
+
|
746
|
+
if (result > 0) {
|
747
|
+
uint32_t i;
|
748
|
+
for(i = 0; i < loop->events.count && result; i++) {
|
749
|
+
rb_mt_socket_list *list = loop->events.sockets + i;
|
750
|
+
rb_mt_event *sock = list->first;
|
751
|
+
short flags = 0;
|
752
|
+
if (in && rb_fd_isset(list->socket, in)) {
|
753
|
+
flags |= LCB_READ_EVENT;
|
754
|
+
result--;
|
755
|
+
}
|
756
|
+
if (out && rb_fd_isset(list->socket, out)) {
|
757
|
+
flags |= LCB_WRITE_EVENT;
|
758
|
+
result--;
|
759
|
+
}
|
760
|
+
if (flags) {
|
761
|
+
loop_enque_events(&loop->callbacks, sock, flags);
|
762
|
+
}
|
763
|
+
}
|
764
|
+
callbacks_run(&loop->callbacks);
|
765
|
+
}
|
766
|
+
|
767
|
+
if (next_time) {
|
768
|
+
timers_run(&loop->timers, now);
|
769
|
+
}
|
770
|
+
if (loop->events.count == 0 && loop->timers.count == 0) {
|
771
|
+
loop->run = 0;
|
772
|
+
}
|
773
|
+
return Qnil;
|
774
|
+
}
|
775
|
+
|
776
|
+
static VALUE
|
777
|
+
loop_select_cleanup(VALUE argp)
|
778
|
+
{
|
779
|
+
ls_arg *args = (ls_arg*) argp;
|
780
|
+
rb_fd_term(&args->in);
|
781
|
+
rb_fd_term(&args->out);
|
782
|
+
callbacks_clean(&args->loop->callbacks);
|
783
|
+
return Qnil;
|
784
|
+
}
|
785
|
+
/* loop select implementaion end */
|
786
|
+
|
787
|
+
/* loop poll implementation */
|
788
|
+
#ifdef HAVE_POLL
|
789
|
+
/* code influenced by ruby's source and cool.io */
|
790
|
+
#define POLLIN_SET (POLLIN | POLLHUP | POLLERR)
|
791
|
+
#define POLLOUT_SET (POLLOUT | POLLHUP | POLLERR)
|
792
|
+
|
793
|
+
#ifndef HAVE_PPOLL
|
794
|
+
#if SIZEOF_TIME_T == SIZEOF_LONG
|
795
|
+
typedef unsigned long unsigned_time_t;
|
796
|
+
#elif SIZEOF_TIME_T == SIZEOF_INT
|
797
|
+
typedef unsigned int unsigned_time_t;
|
798
|
+
#elif SIZEOF_TIME_T == SIZEOF_LONG_LONG
|
799
|
+
typedef unsigned LONG_LONG unsigned_time_t;
|
800
|
+
#else
|
801
|
+
# error cannot find integer type which size is same as time_t.
|
802
|
+
#endif
|
803
|
+
#define TIMET_MAX (~(time_t)0 <= 0 ? (time_t)((~(unsigned_time_t)0) >> 1) : (time_t)(~(unsigned_time_t)0))
|
804
|
+
static int
|
805
|
+
ppoll(struct pollfd *fds, nfds_t nfds,
|
806
|
+
const struct timespec *ts, const sigset_t *sigmask)
|
807
|
+
{
|
808
|
+
int timeout_ms;
|
809
|
+
|
810
|
+
if (ts) {
|
811
|
+
int tmp, tmp2;
|
812
|
+
|
813
|
+
if (ts->tv_sec > TIMET_MAX/1000) {
|
814
|
+
timeout_ms = -1;
|
815
|
+
} else {
|
816
|
+
tmp = ts->tv_sec * 1000;
|
817
|
+
tmp2 = (ts->tv_nsec + 999999) / (1000 * 1000);
|
818
|
+
if (TIMET_MAX - tmp < tmp2) {
|
819
|
+
timeout_ms = -1;
|
820
|
+
} else {
|
821
|
+
timeout_ms = tmp + tmp2;
|
822
|
+
}
|
823
|
+
}
|
824
|
+
} else {
|
825
|
+
timeout_ms = -1;
|
826
|
+
}
|
827
|
+
|
828
|
+
(void)sigmask;
|
829
|
+
|
830
|
+
return poll(fds, nfds, timeout_ms);
|
831
|
+
}
|
832
|
+
#endif
|
833
|
+
|
834
|
+
typedef struct poll_args lp_arg;
|
835
|
+
struct poll_args {
|
836
|
+
rb_mt_loop *loop;
|
837
|
+
struct pollfd *fds;
|
838
|
+
nfds_t nfd;
|
839
|
+
struct timespec *ts;
|
840
|
+
int result;
|
841
|
+
int lerrno;
|
842
|
+
};
|
843
|
+
|
844
|
+
#ifdef HAVE_RB_THREAD_BLOCKING_REGION
|
845
|
+
static VALUE
|
846
|
+
loop_blocking_poll(void *argp)
|
847
|
+
{
|
848
|
+
lp_arg *args = argp;
|
849
|
+
args->result = ppoll(args->fds, args->nfd, args->ts, NULL);
|
850
|
+
if (args->result < 0) args->lerrno = errno;
|
851
|
+
return Qnil;
|
852
|
+
}
|
853
|
+
#endif
|
854
|
+
|
855
|
+
static VALUE
|
856
|
+
loop_run_poll(VALUE argp)
|
857
|
+
{
|
858
|
+
lp_arg *args = (struct poll_args *)argp;
|
859
|
+
rb_mt_loop *loop = args->loop;
|
860
|
+
struct timespec ts;
|
861
|
+
hrtime_t now, next_time;
|
862
|
+
|
863
|
+
if (loop->events.count) {
|
864
|
+
uint32_t i;
|
865
|
+
args->fds = calloc(loop->events.count, sizeof(struct pollfd));
|
866
|
+
if (args->fds == NULL) {
|
867
|
+
rb_raise(cb_eClientNoMemoryError, "failed to allocate memory for pollfd");
|
868
|
+
}
|
869
|
+
for(i = 0; i < loop->events.count; i++) {
|
870
|
+
rb_mt_socket_list *list = &loop->events.sockets[i];
|
871
|
+
args->fds[i].fd = list->socket;
|
872
|
+
args->fds[i].events =
|
873
|
+
(list->flags & LCB_READ_EVENT ? POLLIN : 0) |
|
874
|
+
(list->flags & LCB_WRITE_EVENT ? POLLOUT : 0);
|
875
|
+
}
|
876
|
+
args->nfd = loop->events.count;
|
877
|
+
}
|
878
|
+
|
879
|
+
retry:
|
880
|
+
next_time = timers_minimum(&loop->timers);
|
881
|
+
if (next_time) {
|
882
|
+
now = gethrtime();
|
883
|
+
if (next_time <= now) {
|
884
|
+
ts.tv_sec = 0;
|
885
|
+
ts.tv_nsec = 0;
|
886
|
+
} else {
|
887
|
+
hrtime_t hrto = next_time - now;
|
888
|
+
ts.tv_sec = (long)(hrto / 1000000000);
|
889
|
+
ts.tv_nsec = (long)(hrto % 1000000000);
|
890
|
+
}
|
891
|
+
args->ts = &ts;
|
892
|
+
} else {
|
893
|
+
args->ts = NULL;
|
894
|
+
}
|
895
|
+
|
896
|
+
#ifdef HAVE_RB_THREAD_BLOCKING_REGION
|
897
|
+
rb_thread_blocking_region(loop_blocking_poll, args, RUBY_UBF_PROCESS, NULL);
|
898
|
+
#else
|
899
|
+
if (rb_thread_alone()) {
|
900
|
+
TRAP_BEG;
|
901
|
+
args->result = ppoll(args->fds, args->nfd, args->ts, NULL);
|
902
|
+
if (args->result < 0) args->lerrno = errno;
|
903
|
+
TRAP_END;
|
904
|
+
} else {
|
905
|
+
struct timespec mini_pause;
|
906
|
+
int exact = 0;
|
907
|
+
mini_pause.tv_sec = 0;
|
908
|
+
/* 5 millisecond pause */
|
909
|
+
mini_pause.tv_nsec = 5000000;
|
910
|
+
if (args->ts && ts.tv_sec == 0 && ts.tv_nsec < 5000000) {
|
911
|
+
mini_pause.tv_nsec = ts.tv_nsec;
|
912
|
+
exact = 1;
|
913
|
+
}
|
914
|
+
TRAP_BEG;
|
915
|
+
args->result = ppoll(args->fds, args->nfd, &mini_pause, NULL);
|
916
|
+
if (args->result < 0) args->lerrno = errno;
|
917
|
+
TRAP_END;
|
918
|
+
if (args->result == 0 && !exact) {
|
919
|
+
args->result = -1;
|
920
|
+
args->lerrno = EINTR;
|
921
|
+
}
|
922
|
+
}
|
923
|
+
#endif
|
924
|
+
|
925
|
+
if (args->result < 0) {
|
926
|
+
errno = args->lerrno;
|
927
|
+
switch (errno) {
|
928
|
+
case EINTR:
|
929
|
+
#ifdef ERESTART
|
930
|
+
case ERESTART:
|
931
|
+
#endif
|
932
|
+
#ifndef HAVE_RB_THREAD_BLOCKING_REGION
|
933
|
+
rb_thread_schedule();
|
934
|
+
#endif
|
935
|
+
goto retry;
|
936
|
+
}
|
937
|
+
rb_sys_fail("poll");
|
938
|
+
return Qnil;
|
939
|
+
}
|
940
|
+
|
941
|
+
if (next_time) {
|
942
|
+
now = gethrtime();
|
943
|
+
}
|
944
|
+
|
945
|
+
if (args->result > 0) {
|
946
|
+
uint32_t cnt = args->result;
|
947
|
+
uint32_t fd_n = 0, ev_n = 0;
|
948
|
+
while (cnt && fd_n < args->nfd && ev_n < loop->events.count) {
|
949
|
+
struct pollfd *res = args->fds + fd_n;
|
950
|
+
rb_mt_socket_list *list = loop->events.sockets + ev_n;
|
951
|
+
rb_mt_event *sock = list->first;
|
952
|
+
|
953
|
+
/* if plugin used correctly, this checks are noop */
|
954
|
+
if (res->fd < list->socket) {
|
955
|
+
fd_n++;
|
956
|
+
continue;
|
957
|
+
} else if (res->fd > list->socket) {
|
958
|
+
ev_n++;
|
959
|
+
continue;
|
960
|
+
}
|
961
|
+
|
962
|
+
if (res->revents) {
|
963
|
+
short flags =
|
964
|
+
((res->revents & POLLIN_SET) ? LCB_READ_EVENT : 0) |
|
965
|
+
((res->revents & POLLOUT_SET) ? LCB_WRITE_EVENT : 0);
|
966
|
+
cnt--;
|
967
|
+
loop_enque_events(&loop->callbacks, sock, flags);
|
968
|
+
}
|
969
|
+
fd_n++;
|
970
|
+
ev_n++;
|
971
|
+
}
|
972
|
+
callbacks_run(&loop->callbacks);
|
973
|
+
}
|
974
|
+
|
975
|
+
if (next_time) {
|
976
|
+
timers_run(&loop->timers, now);
|
977
|
+
}
|
978
|
+
if (loop->events.count == 0 && loop->timers.count == 0) {
|
979
|
+
loop->run = 0;
|
980
|
+
}
|
981
|
+
return Qnil;
|
982
|
+
}
|
983
|
+
|
984
|
+
static VALUE
|
985
|
+
loop_poll_cleanup(VALUE argp)
|
986
|
+
{
|
987
|
+
lp_arg *args = (struct poll_args *)argp;
|
988
|
+
if (args->fds) {
|
989
|
+
free(args->fds);
|
990
|
+
}
|
991
|
+
callbacks_clean(&args->loop->callbacks);
|
992
|
+
return Qnil;
|
993
|
+
}
|
994
|
+
#endif
|
995
|
+
/* loop poll implementation end */
|
996
|
+
|
997
|
+
static void
|
998
|
+
loop_run(rb_mt_loop *loop)
|
999
|
+
{
|
1000
|
+
|
1001
|
+
loop->run = 1;
|
1002
|
+
|
1003
|
+
while(loop->run) {
|
1004
|
+
#ifdef HAVE_POLL
|
1005
|
+
/* prefer use of poll when it gives some benefits, but use rb_thread_fd_select when it is sufficient */
|
1006
|
+
lcb_socket_t max = events_max_fd(&loop->events);
|
1007
|
+
int use_poll = max >= 128;
|
1008
|
+
if (use_poll) {
|
1009
|
+
lp_arg args;
|
1010
|
+
memset(&args, 0, sizeof(args));
|
1011
|
+
args.loop = loop;
|
1012
|
+
rb_ensure(loop_run_poll, (VALUE)&args, loop_poll_cleanup, (VALUE)&args);
|
1013
|
+
} else
|
1014
|
+
#endif
|
1015
|
+
{
|
1016
|
+
ls_arg args;
|
1017
|
+
memset(&args, 0, sizeof(args));
|
1018
|
+
args.loop = loop;
|
1019
|
+
rb_ensure(loop_run_select, (VALUE)&args, loop_select_cleanup, (VALUE)&args);
|
1020
|
+
}
|
1021
|
+
}
|
1022
|
+
}
|
1023
|
+
|
1024
|
+
static void *
|
1025
|
+
lcb_io_create_event(struct lcb_io_opt_st *iops)
|
1026
|
+
{
|
1027
|
+
rb_mt_event *event = calloc(1, sizeof(*event));
|
1028
|
+
(void)iops;
|
1029
|
+
event->loop_index = -1;
|
1030
|
+
return event;
|
1031
|
+
}
|
1032
|
+
|
1033
|
+
static int
|
1034
|
+
lcb_io_update_event(struct lcb_io_opt_st *iops,
|
1035
|
+
lcb_socket_t sock,
|
1036
|
+
void *eventp,
|
1037
|
+
short flags,
|
1038
|
+
void *cb_data,
|
1039
|
+
void (*handler)(lcb_socket_t sock,
|
1040
|
+
short which,
|
1041
|
+
void *cb_data))
|
1042
|
+
{
|
1043
|
+
rb_mt_loop *loop = iops->v.v0.cookie;
|
1044
|
+
rb_mt_event *event = eventp;
|
1045
|
+
short old_flags = event->flags;
|
1046
|
+
|
1047
|
+
if (event->inserted && old_flags == flags &&
|
1048
|
+
cb_data == event->cb_data && handler == event->handler)
|
1049
|
+
{
|
1050
|
+
return 0;
|
1051
|
+
}
|
1052
|
+
loop_remove_event(loop, event);
|
1053
|
+
event->flags = flags;
|
1054
|
+
event->cb_data = cb_data;
|
1055
|
+
event->handler = handler;
|
1056
|
+
event->socket = sock;
|
1057
|
+
if (!event->inserted) {
|
1058
|
+
events_insert(&loop->events, event);
|
1059
|
+
}
|
1060
|
+
if ((old_flags & flags) != old_flags) {
|
1061
|
+
events_fix_flags(&loop->events, sock);
|
1062
|
+
}
|
1063
|
+
return 0;
|
1064
|
+
}
|
1065
|
+
|
1066
|
+
static void
|
1067
|
+
lcb_io_delete_event(struct lcb_io_opt_st *iops,
|
1068
|
+
lcb_socket_t sock,
|
1069
|
+
void *event)
|
1070
|
+
{
|
1071
|
+
loop_remove_event((rb_mt_loop*)iops->v.v0.cookie, (rb_mt_event*)event);
|
1072
|
+
(void)sock;
|
1073
|
+
}
|
1074
|
+
|
1075
|
+
static void
|
1076
|
+
lcb_io_destroy_event(struct lcb_io_opt_st *iops,
|
1077
|
+
void *event)
|
1078
|
+
{
|
1079
|
+
lcb_io_delete_event(iops, -1, event);
|
1080
|
+
free(event);
|
1081
|
+
}
|
1082
|
+
|
1083
|
+
static void *
|
1084
|
+
lcb_io_create_timer(struct lcb_io_opt_st *iops)
|
1085
|
+
{
|
1086
|
+
rb_mt_timer *timer = calloc(1, sizeof(*timer));
|
1087
|
+
timer->index = -1;
|
1088
|
+
(void)iops;
|
1089
|
+
return timer;
|
1090
|
+
}
|
1091
|
+
|
1092
|
+
static int
|
1093
|
+
lcb_io_update_timer(struct lcb_io_opt_st *iops, void *event,
|
1094
|
+
lcb_uint32_t usec, void *cb_data,
|
1095
|
+
void (*handler)(lcb_socket_t sock, short which, void *cb_data))
|
1096
|
+
{
|
1097
|
+
rb_mt_loop *loop = iops->v.v0.cookie;
|
1098
|
+
rb_mt_timer *timer = event;
|
1099
|
+
|
1100
|
+
timer->period = usec * (hrtime_t)1000;
|
1101
|
+
timer->ts = gethrtime() + timer->period;
|
1102
|
+
timer->cb_data = cb_data;
|
1103
|
+
timer->handler = handler;
|
1104
|
+
if (timer->index != -1) {
|
1105
|
+
timers_heapify_item(&loop->timers, timer->index);
|
1106
|
+
} else {
|
1107
|
+
timers_insert(&loop->timers, timer);
|
1108
|
+
}
|
1109
|
+
return 0;
|
1110
|
+
}
|
1111
|
+
|
1112
|
+
static void
|
1113
|
+
lcb_io_delete_timer(struct lcb_io_opt_st *iops, void *event)
|
1114
|
+
{
|
1115
|
+
rb_mt_loop *loop = iops->v.v0.cookie;
|
1116
|
+
rb_mt_timer *timer = event;
|
1117
|
+
if (timer->index != -1) {
|
1118
|
+
timers_remove_timer(&loop->timers, timer);
|
1119
|
+
}
|
1120
|
+
}
|
1121
|
+
|
1122
|
+
static void
|
1123
|
+
lcb_io_destroy_timer(struct lcb_io_opt_st *iops, void *timer)
|
1124
|
+
{
|
1125
|
+
lcb_io_delete_timer(iops, timer);
|
1126
|
+
free(timer);
|
1127
|
+
}
|
1128
|
+
|
1129
|
+
static void
|
1130
|
+
lcb_io_stop_event_loop(struct lcb_io_opt_st *iops)
|
1131
|
+
{
|
1132
|
+
rb_mt_loop *loop = iops->v.v0.cookie;
|
1133
|
+
loop->run = 0;
|
1134
|
+
}
|
1135
|
+
|
1136
|
+
static void
|
1137
|
+
lcb_io_run_event_loop(struct lcb_io_opt_st *iops)
|
1138
|
+
{
|
1139
|
+
rb_mt_loop *loop = iops->v.v0.cookie;
|
1140
|
+
loop_run(loop);
|
1141
|
+
}
|
1142
|
+
|
1143
|
+
static void
|
1144
|
+
lcb_destroy_io_opts(struct lcb_io_opt_st *iops)
|
1145
|
+
{
|
1146
|
+
rb_mt_loop *loop = iops->v.v0.cookie;
|
1147
|
+
loop_destroy(loop);
|
1148
|
+
free(iops);
|
1149
|
+
}
|
1150
|
+
|
1151
|
+
LIBCOUCHBASE_API lcb_error_t
|
1152
|
+
cb_create_ruby_mt_io_opts(int version, lcb_io_opt_t *io, void *arg)
|
1153
|
+
{
|
1154
|
+
struct lcb_io_opt_st *ret;
|
1155
|
+
rb_mt_loop *loop;
|
1156
|
+
(void)arg;
|
1157
|
+
if (version != 0) {
|
1158
|
+
return LCB_PLUGIN_VERSION_MISMATCH;
|
1159
|
+
}
|
1160
|
+
ret = calloc(1, sizeof(*ret));
|
1161
|
+
if (ret == NULL) {
|
1162
|
+
free(ret);
|
1163
|
+
return LCB_CLIENT_ENOMEM;
|
1164
|
+
}
|
1165
|
+
|
1166
|
+
ret->version = 0;
|
1167
|
+
ret->dlhandle = NULL;
|
1168
|
+
ret->destructor = lcb_destroy_io_opts;
|
1169
|
+
/* consider that struct isn't allocated by the library,
|
1170
|
+
* `need_cleanup' flag might be set in lcb_create() */
|
1171
|
+
ret->v.v0.need_cleanup = 0;
|
1172
|
+
ret->v.v0.recv = lcb_io_recv;
|
1173
|
+
ret->v.v0.send = lcb_io_send;
|
1174
|
+
ret->v.v0.recvv = lcb_io_recvv;
|
1175
|
+
ret->v.v0.sendv = lcb_io_sendv;
|
1176
|
+
ret->v.v0.socket = lcb_io_socket;
|
1177
|
+
ret->v.v0.close = lcb_io_close;
|
1178
|
+
ret->v.v0.connect = lcb_io_connect;
|
1179
|
+
ret->v.v0.delete_event = lcb_io_delete_event;
|
1180
|
+
ret->v.v0.destroy_event = lcb_io_destroy_event;
|
1181
|
+
ret->v.v0.create_event = lcb_io_create_event;
|
1182
|
+
ret->v.v0.update_event = lcb_io_update_event;
|
1183
|
+
|
1184
|
+
ret->v.v0.delete_timer = lcb_io_delete_timer;
|
1185
|
+
ret->v.v0.destroy_timer = lcb_io_destroy_timer;
|
1186
|
+
ret->v.v0.create_timer = lcb_io_create_timer;
|
1187
|
+
ret->v.v0.update_timer = lcb_io_update_timer;
|
1188
|
+
|
1189
|
+
ret->v.v0.run_event_loop = lcb_io_run_event_loop;
|
1190
|
+
ret->v.v0.stop_event_loop = lcb_io_stop_event_loop;
|
1191
|
+
|
1192
|
+
loop = loop_create();
|
1193
|
+
if (loop == NULL) {
|
1194
|
+
free(ret);
|
1195
|
+
return LCB_CLIENT_ENOMEM;
|
1196
|
+
}
|
1197
|
+
ret->v.v0.cookie = loop;
|
1198
|
+
*io = ret;
|
1199
|
+
return LCB_SUCCESS;
|
1200
|
+
}
|
1201
|
+
#endif /* _WIN32 */
|