event 0.1.1 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 5039784abaf0f999187afbed8ca930bf99cf66cc089136da3b3e42ea6831b729
4
- data.tar.gz: 712d0693031af7bd93c1da59e5ad716d8be81a895d1d3ea23dca6cf80e42f5d0
3
+ metadata.gz: 0e30a246aa5197439b7c1d9511cd158b56b7f4965ef18a5eaee6057832d5bf8f
4
+ data.tar.gz: d579312da337ed20caf61f0fd6be719874a90b2f43860cc7315244abc59f11e0
5
5
  SHA512:
6
- metadata.gz: 3de44f33a4410d1cf5b52186ac606d1d03f33326f3124e69b50b705ac8840c2f7facc5924378353b2fce55324cbf353fc407bf60f041e6b0b0990d889eaad7dc
7
- data.tar.gz: e34bc430b17351ba2210e89c2d3cec4049eafd3e2e8c3297d777c6dfae6cdc9dc9a823b83bf503c4c1a9e7463f74064d0632f1e641cdac9d5034012f76bf50c9
6
+ metadata.gz: b183345fc94fd11872fd10b0401f87330b5334a016c43f4ffe6113c8a4c1f43c89a7eaba6319ff1530dd0f5364cb0a47843781abb45c233f4f9e619f837cdb8e
7
+ data.tar.gz: d702a3aa9ae7792a4d689a9afe23b5c99e93823b59b50de7058b1ff1ffd983b7bba30931b5581397deca8bc11726248c46516e7c933331fdb8c25feaeed7793e
@@ -0,0 +1,29 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ enum Event {
22
+ READABLE = 1,
23
+ PRIORITY = 2,
24
+ WRITABLE = 4,
25
+ ERROR = 8,
26
+ HANGUP = 16
27
+ };
28
+
29
+ #include <ruby/thread.h>
@@ -0,0 +1,309 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include "kqueue.h"
22
+ #include "backend.h"
23
+
24
+ #include <sys/epoll.h>
25
+ #include <time.h>
26
+ #include <errno.h>
27
+
28
+ static VALUE Event_Backend_EPoll = Qnil;
29
+ static ID id_fileno, id_transfer;
30
+
31
+ enum {EPOLL_MAX_EVENTS = 64};
32
+
33
+ struct Event_Backend_EPoll {
34
+ VALUE loop;
35
+ int descriptor;
36
+ };
37
+
38
+ void Event_Backend_EPoll_Type_mark(void *_data)
39
+ {
40
+ struct Event_Backend_EPoll *data = _data;
41
+ rb_gc_mark(data->loop);
42
+ }
43
+
44
+ static
45
+ void close_internal(struct Event_Backend_EPoll *data) {
46
+ if (data->descriptor >= 0) {
47
+ close(data->descriptor);
48
+ data->descriptor = -1;
49
+ }
50
+ }
51
+
52
+ void Event_Backend_EPoll_Type_free(void *_data)
53
+ {
54
+ struct Event_Backend_EPoll *data = _data;
55
+
56
+ close_internal(data);
57
+
58
+ free(data);
59
+ }
60
+
61
+ size_t Event_Backend_EPoll_Type_size(const void *data)
62
+ {
63
+ return sizeof(struct Event_Backend_EPoll);
64
+ }
65
+
66
+ static const rb_data_type_t Event_Backend_EPoll_Type = {
67
+ .wrap_struct_name = "Event::Backend::EPoll",
68
+ .function = {
69
+ .dmark = Event_Backend_EPoll_Type_mark,
70
+ .dfree = Event_Backend_EPoll_Type_free,
71
+ .dsize = Event_Backend_EPoll_Type_size,
72
+ },
73
+ .data = NULL,
74
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY,
75
+ };
76
+
77
+ VALUE Event_Backend_EPoll_allocate(VALUE self) {
78
+ struct Event_Backend_EPoll *data = NULL;
79
+ VALUE instance = TypedData_Make_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
80
+
81
+ data->loop = Qnil;
82
+ data->descriptor = -1;
83
+
84
+ return instance;
85
+ }
86
+
87
+ VALUE Event_Backend_EPoll_initialize(VALUE self, VALUE loop) {
88
+ struct Event_Backend_EPoll *data = NULL;
89
+ TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
90
+
91
+ data->loop = loop;
92
+ int result = epoll_create1(EPOLL_CLOEXEC);
93
+
94
+ if (result == -1) {
95
+ rb_sys_fail("epoll_create");
96
+ } else {
97
+ data->descriptor = result;
98
+
99
+ rb_update_max_fd(data->descriptor);
100
+ }
101
+
102
+ return self;
103
+ }
104
+
105
+ VALUE Event_Backend_EPoll_close(VALUE self) {
106
+ struct Event_Backend_EPoll *data = NULL;
107
+ TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
108
+
109
+ close_internal(data);
110
+
111
+ return Qnil;
112
+ }
113
+
114
+ static inline
115
+ uint32_t epoll_flags_from_events(int events) {
116
+ uint32_t flags = 0;
117
+
118
+ if (events & READABLE) flags |= EPOLLIN;
119
+ if (events & PRIORITY) flags |= EPOLLPRI;
120
+ if (events & WRITABLE) flags |= EPOLLOUT;
121
+
122
+ flags |= EPOLLRDHUP;
123
+ flags |= EPOLLONESHOT;
124
+
125
+ return flags;
126
+ }
127
+
128
+ static inline
129
+ int events_from_epoll_flags(uint32_t flags) {
130
+ int events = 0;
131
+
132
+ if (flags & EPOLLIN) events |= READABLE;
133
+ if (flags & EPOLLPRI) events |= PRIORITY;
134
+ if (flags & EPOLLOUT) events |= WRITABLE;
135
+
136
+ return events;
137
+ }
138
+
139
+ struct io_wait_arguments {
140
+ struct Event_Backend_EPoll *data;
141
+ int descriptor;
142
+ int duplicate;
143
+ };
144
+
145
+ static
146
+ VALUE io_wait_ensure(VALUE _arguments) {
147
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
148
+
149
+ if (arguments->duplicate >= 0) {
150
+ epoll_ctl(arguments->data->descriptor, EPOLL_CTL_DEL, arguments->duplicate, NULL);
151
+
152
+ close(arguments->duplicate);
153
+ } else {
154
+ epoll_ctl(arguments->data->descriptor, EPOLL_CTL_DEL, arguments->descriptor, NULL);
155
+ }
156
+
157
+ return Qnil;
158
+ };
159
+
160
+ static
161
+ VALUE io_wait_transfer(VALUE _arguments) {
162
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
163
+
164
+ VALUE result = rb_funcall(arguments->data->loop, id_transfer, 0);
165
+
166
+ return INT2NUM(events_from_epoll_flags(NUM2INT(result)));
167
+ };
168
+
169
+ VALUE Event_Backend_EPoll_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
170
+ struct Event_Backend_EPoll *data = NULL;
171
+ TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
172
+
173
+ struct epoll_event event = {0};
174
+
175
+ int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
176
+ int duplicate = -1;
177
+
178
+ event.events = epoll_flags_from_events(NUM2INT(events));
179
+ event.data.ptr = (void*)fiber;
180
+
181
+ // fprintf(stderr, "<- fiber=%p descriptor=%d\n", (void*)fiber, descriptor);
182
+
183
+ // A better approach is to batch all changes:
184
+ int result = epoll_ctl(data->descriptor, EPOLL_CTL_ADD, descriptor, &event);
185
+
186
+ if (result == -1 && errno == EEXIST) {
187
+ // The file descriptor was already inserted into epoll.
188
+ duplicate = descriptor = dup(descriptor);
189
+
190
+ rb_update_max_fd(duplicate);
191
+
192
+ if (descriptor == -1)
193
+ rb_sys_fail("dup");
194
+
195
+ result = epoll_ctl(data->descriptor, EPOLL_CTL_ADD, descriptor, &event);
196
+ }
197
+
198
+ if (result == -1) {
199
+ rb_sys_fail("epoll_ctl");
200
+ }
201
+
202
+ struct io_wait_arguments io_wait_arguments = {
203
+ .data = data,
204
+ .descriptor = descriptor,
205
+ .duplicate = duplicate
206
+ };
207
+
208
+ return rb_ensure(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_ensure, (VALUE)&io_wait_arguments);
209
+ }
210
+
211
+ static
212
+ int make_timeout(VALUE duration) {
213
+ if (duration == Qnil) {
214
+ return -1;
215
+ }
216
+
217
+ if (FIXNUM_P(duration)) {
218
+ return NUM2LONG(duration) * 1000L;
219
+ }
220
+
221
+ else if (RB_FLOAT_TYPE_P(duration)) {
222
+ double value = RFLOAT_VALUE(duration);
223
+
224
+ return value * 1000;
225
+ }
226
+
227
+ rb_raise(rb_eRuntimeError, "unable to convert timeout");
228
+ }
229
+
230
+ struct select_arguments {
231
+ struct Event_Backend_EPoll *data;
232
+
233
+ int count;
234
+ struct epoll_event events[EPOLL_MAX_EVENTS];
235
+
236
+ int timeout;
237
+ };
238
+
239
+ static
240
+ void * select_internal(void *_arguments) {
241
+ struct select_arguments * arguments = (struct select_arguments *)_arguments;
242
+
243
+ arguments->count = epoll_wait(arguments->data->descriptor, arguments->events, EPOLL_MAX_EVENTS, arguments->timeout);
244
+
245
+ return NULL;
246
+ }
247
+
248
+ static
249
+ void select_internal_without_gvl(struct select_arguments *arguments) {
250
+ rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
251
+
252
+ if (arguments->count == -1) {
253
+ rb_sys_fail("select_internal_without_gvl:epoll_wait");
254
+ }
255
+ }
256
+
257
+ static
258
+ void select_internal_with_gvl(struct select_arguments *arguments) {
259
+ select_internal((void *)arguments);
260
+
261
+ if (arguments->count == -1) {
262
+ rb_sys_fail("select_internal_with_gvl:epoll_wait");
263
+ }
264
+ }
265
+
266
+ VALUE Event_Backend_EPoll_select(VALUE self, VALUE duration) {
267
+ struct Event_Backend_EPoll *data = NULL;
268
+ TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
269
+
270
+ struct select_arguments arguments = {
271
+ .data = data,
272
+ .timeout = 0
273
+ };
274
+
275
+ select_internal_without_gvl(&arguments);
276
+
277
+ if (arguments.count == 0) {
278
+ arguments.timeout = make_timeout(duration);
279
+
280
+ if (arguments.timeout != 0) {
281
+ select_internal_with_gvl(&arguments);
282
+ }
283
+ }
284
+
285
+ for (int i = 0; i < arguments.count; i += 1) {
286
+ VALUE fiber = (VALUE)arguments.events[i].data.ptr;
287
+ VALUE result = INT2NUM(arguments.events[i].events);
288
+
289
+ // fprintf(stderr, "-> fiber=%p descriptor=%d\n", (void*)fiber, events[i].data.fd);
290
+
291
+ rb_funcall(fiber, id_transfer, 1, result);
292
+ }
293
+
294
+ return INT2NUM(arguments.count);
295
+ }
296
+
297
+ void Init_Event_Backend_EPoll(VALUE Event_Backend) {
298
+ id_fileno = rb_intern("fileno");
299
+ id_transfer = rb_intern("transfer");
300
+
301
+ Event_Backend_EPoll = rb_define_class_under(Event_Backend, "EPoll", rb_cObject);
302
+
303
+ rb_define_alloc_func(Event_Backend_EPoll, Event_Backend_EPoll_allocate);
304
+ rb_define_method(Event_Backend_EPoll, "initialize", Event_Backend_EPoll_initialize, 1);
305
+ rb_define_method(Event_Backend_EPoll, "close", Event_Backend_EPoll_close, 0);
306
+
307
+ rb_define_method(Event_Backend_EPoll, "io_wait", Event_Backend_EPoll_io_wait, 3);
308
+ rb_define_method(Event_Backend_EPoll, "select", Event_Backend_EPoll_select, 1);
309
+ }
@@ -0,0 +1,25 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #pragma once
22
+
23
+ #define EVENT_BACKEND_EPOLL
24
+
25
+ void Init_Event_Backend_EPoll(VALUE Event_Backend);
@@ -0,0 +1,346 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include "kqueue.h"
22
+ #include "backend.h"
23
+
24
+ #include <sys/event.h>
25
+ #include <sys/ioctl.h>
26
+ #include <time.h>
27
+
28
+ static VALUE Event_Backend_KQueue = Qnil;
29
+ static ID id_fileno, id_transfer;
30
+
31
+ enum {KQUEUE_MAX_EVENTS = 64};
32
+
33
+ struct Event_Backend_KQueue {
34
+ VALUE loop;
35
+ int descriptor;
36
+ };
37
+
38
+ void Event_Backend_KQueue_Type_mark(void *_data)
39
+ {
40
+ struct Event_Backend_KQueue *data = _data;
41
+ rb_gc_mark(data->loop);
42
+ }
43
+
44
+ static
45
+ void close_internal(struct Event_Backend_KQueue *data) {
46
+ if (data->descriptor >= 0) {
47
+ close(data->descriptor);
48
+ data->descriptor = -1;
49
+ }
50
+ }
51
+
52
+ void Event_Backend_KQueue_Type_free(void *_data)
53
+ {
54
+ struct Event_Backend_KQueue *data = _data;
55
+
56
+ close_internal(data);
57
+
58
+ free(data);
59
+ }
60
+
61
+ size_t Event_Backend_KQueue_Type_size(const void *data)
62
+ {
63
+ return sizeof(struct Event_Backend_KQueue);
64
+ }
65
+
66
+ static const rb_data_type_t Event_Backend_KQueue_Type = {
67
+ .wrap_struct_name = "Event::Backend::KQueue",
68
+ .function = {
69
+ .dmark = Event_Backend_KQueue_Type_mark,
70
+ .dfree = Event_Backend_KQueue_Type_free,
71
+ .dsize = Event_Backend_KQueue_Type_size,
72
+ },
73
+ .data = NULL,
74
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY,
75
+ };
76
+
77
+ VALUE Event_Backend_KQueue_allocate(VALUE self) {
78
+ struct Event_Backend_KQueue *data = NULL;
79
+ VALUE instance = TypedData_Make_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
80
+
81
+ data->loop = Qnil;
82
+ data->descriptor = -1;
83
+
84
+ return instance;
85
+ }
86
+
87
+ VALUE Event_Backend_KQueue_initialize(VALUE self, VALUE loop) {
88
+ struct Event_Backend_KQueue *data = NULL;
89
+ TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
90
+
91
+ data->loop = loop;
92
+ int result = kqueue();
93
+
94
+ if (result == -1) {
95
+ rb_sys_fail("kqueue");
96
+ } else {
97
+ ioctl(result, FIOCLEX);
98
+ data->descriptor = result;
99
+
100
+ rb_update_max_fd(data->descriptor);
101
+ }
102
+
103
+ return self;
104
+ }
105
+
106
+ VALUE Event_Backend_KQueue_close(VALUE self) {
107
+ struct Event_Backend_KQueue *data = NULL;
108
+ TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
109
+
110
+ close_internal(data);
111
+
112
+ return Qnil;
113
+ }
114
+
115
+ static
116
+ int io_add_filters(int descriptor, int ident, int events, VALUE fiber) {
117
+ int count = 0;
118
+ struct kevent kevents[2] = {0};
119
+
120
+ if (events & READABLE) {
121
+ kevents[count].ident = ident;
122
+ kevents[count].filter = EVFILT_READ;
123
+ kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
124
+ kevents[count].udata = (void*)fiber;
125
+
126
+ // #ifdef EV_OOBAND
127
+ // if (events & PRIORITY) {
128
+ // kevents[count].flags |= EV_OOBAND;
129
+ // }
130
+ // #endif
131
+
132
+ count++;
133
+ }
134
+
135
+ if (events & WRITABLE) {
136
+ kevents[count].ident = ident;
137
+ kevents[count].filter = EVFILT_WRITE;
138
+ kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
139
+ kevents[count].udata = (void*)fiber;
140
+ count++;
141
+ }
142
+
143
+ int result = kevent(descriptor, kevents, count, NULL, 0, NULL);
144
+
145
+ if (result == -1) {
146
+ rb_sys_fail("kevent(register)");
147
+ }
148
+
149
+ return events;
150
+ }
151
+
152
+ static
153
+ void io_remove_filters(int descriptor, int ident, int events) {
154
+ int count = 0;
155
+ struct kevent kevents[2] = {0};
156
+
157
+ if (events & READABLE) {
158
+ kevents[count].ident = ident;
159
+ kevents[count].filter = EVFILT_READ;
160
+ kevents[count].flags = EV_DELETE;
161
+
162
+ count++;
163
+ }
164
+
165
+ if (events & WRITABLE) {
166
+ kevents[count].ident = ident;
167
+ kevents[count].filter = EVFILT_WRITE;
168
+ kevents[count].flags = EV_DELETE;
169
+ count++;
170
+ }
171
+
172
+ // Ignore the result.
173
+ kevent(descriptor, kevents, count, NULL, 0, NULL);
174
+ }
175
+
176
+ struct io_wait_arguments {
177
+ struct Event_Backend_KQueue *data;
178
+ int events;
179
+ int descriptor;
180
+ };
181
+
182
+ static
183
+ VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
184
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
185
+
186
+ io_remove_filters(arguments->data->descriptor, arguments->descriptor, arguments->events);
187
+
188
+ rb_exc_raise(exception);
189
+ };
190
+
191
+ static inline
192
+ int events_from_kqueue_filter(int filter) {
193
+ if (filter == EVFILT_READ) return READABLE;
194
+ if (filter == EVFILT_WRITE) return WRITABLE;
195
+
196
+ return 0;
197
+ }
198
+
199
+ static
200
+ VALUE io_wait_transfer(VALUE _arguments) {
201
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
202
+
203
+ VALUE result = rb_funcall(arguments->data->loop, id_transfer, 0);
204
+
205
+ return INT2NUM(events_from_kqueue_filter(NUM2INT(result)));
206
+ };
207
+
208
+ VALUE Event_Backend_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
209
+ struct Event_Backend_KQueue *data = NULL;
210
+ TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
211
+
212
+ int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
213
+
214
+ struct io_wait_arguments io_wait_arguments = {
215
+ .events = io_add_filters(data->descriptor, descriptor, NUM2INT(events), fiber),
216
+ .data = data,
217
+ .descriptor = descriptor,
218
+ };
219
+
220
+ return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
221
+ }
222
+
223
+ static
224
+ struct timespec * make_timeout(VALUE duration, struct timespec * storage) {
225
+ if (duration == Qnil) {
226
+ return NULL;
227
+ }
228
+
229
+ if (FIXNUM_P(duration)) {
230
+ storage->tv_sec = NUM2TIMET(duration);
231
+ storage->tv_nsec = 0;
232
+
233
+ return storage;
234
+ }
235
+
236
+ else if (RB_FLOAT_TYPE_P(duration)) {
237
+ double value = RFLOAT_VALUE(duration);
238
+ time_t seconds = value;
239
+
240
+ storage->tv_sec = seconds;
241
+ storage->tv_nsec = (value - seconds) * 1000000000L;
242
+
243
+ return storage;
244
+ }
245
+
246
+ rb_raise(rb_eRuntimeError, "unable to convert timeout");
247
+ }
248
+
249
+ static
250
+ int timeout_nonblocking(struct timespec * timespec) {
251
+ return timespec && timespec->tv_sec == 0 && timespec->tv_nsec == 0;
252
+ }
253
+
254
+ struct select_arguments {
255
+ struct Event_Backend_KQueue *data;
256
+
257
+ int count;
258
+ struct kevent events[KQUEUE_MAX_EVENTS];
259
+
260
+ struct timespec storage;
261
+ struct timespec *timeout;
262
+ };
263
+
264
+ static
265
+ void * select_internal(void *_arguments) {
266
+ struct select_arguments * arguments = (struct select_arguments *)_arguments;
267
+
268
+ arguments->count = kevent(arguments->data->descriptor, NULL, 0, arguments->events, arguments->count, arguments->timeout);
269
+
270
+ return NULL;
271
+ }
272
+
273
+ static
274
+ void select_internal_without_gvl(struct select_arguments *arguments) {
275
+ rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
276
+
277
+ if (arguments->count == -1) {
278
+ rb_sys_fail("select_internal_without_gvl:kevent");
279
+ }
280
+ }
281
+
282
+ static
283
+ void select_internal_with_gvl(struct select_arguments *arguments) {
284
+ select_internal((void *)arguments);
285
+
286
+ if (arguments->count == -1) {
287
+ rb_sys_fail("select_internal_with_gvl:kevent");
288
+ }
289
+ }
290
+
291
+ VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
292
+ struct Event_Backend_KQueue *data = NULL;
293
+ TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
294
+
295
+ struct select_arguments arguments = {
296
+ .data = data,
297
+ .count = KQUEUE_MAX_EVENTS,
298
+ .storage = {
299
+ .tv_sec = 0,
300
+ .tv_nsec = 0
301
+ }
302
+ };
303
+
304
+ // We break this implementation into two parts.
305
+ // (1) count = kevent(..., timeout = 0)
306
+ // (2) without gvl: kevent(..., timeout = 0) if count == 0 and timeout != 0
307
+ // This allows us to avoid releasing and reacquiring the GVL.
308
+ // Non-comprehensive testing shows this gives a 1.5x speedup.
309
+ arguments.timeout = &arguments.storage;
310
+
311
+ // First do the syscall with no timeout to get any immediately available events:
312
+ select_internal_with_gvl(&arguments);
313
+
314
+ // If there were no pending events, if we have a timeout, wait for more events:
315
+ if (arguments.count == 0) {
316
+ arguments.timeout = make_timeout(duration, &arguments.storage);
317
+
318
+ if (!timeout_nonblocking(arguments.timeout)) {
319
+ arguments.count = KQUEUE_MAX_EVENTS;
320
+
321
+ select_internal_without_gvl(&arguments);
322
+ }
323
+ }
324
+
325
+ for (int i = 0; i < arguments.count; i += 1) {
326
+ VALUE fiber = (VALUE)arguments.events[i].udata;
327
+ VALUE result = INT2NUM(arguments.events[i].filter);
328
+ rb_funcall(fiber, id_transfer, 1, result);
329
+ }
330
+
331
+ return INT2NUM(arguments.count);
332
+ }
333
+
334
+ void Init_Event_Backend_KQueue(VALUE Event_Backend) {
335
+ id_fileno = rb_intern("fileno");
336
+ id_transfer = rb_intern("transfer");
337
+
338
+ Event_Backend_KQueue = rb_define_class_under(Event_Backend, "KQueue", rb_cObject);
339
+
340
+ rb_define_alloc_func(Event_Backend_KQueue, Event_Backend_KQueue_allocate);
341
+ rb_define_method(Event_Backend_KQueue, "initialize", Event_Backend_KQueue_initialize, 1);
342
+ rb_define_method(Event_Backend_KQueue, "initialize", Event_Backend_KQueue_close, 0);
343
+
344
+ rb_define_method(Event_Backend_KQueue, "io_wait", Event_Backend_KQueue_io_wait, 3);
345
+ rb_define_method(Event_Backend_KQueue, "select", Event_Backend_KQueue_select, 1);
346
+ }