event 0.1.0 → 0.2.3

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,25 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #pragma once
22
+
23
+ #define EVENT_BACKEND_EPOLL
24
+
25
+ void Init_Event_Backend_EPoll(VALUE Event_Backend);
@@ -0,0 +1,330 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include "kqueue.h"
22
+ #include "backend.h"
23
+
24
+ #include <sys/event.h>
25
+ #include <sys/ioctl.h>
26
+ #include <time.h>
27
+
28
+ static VALUE Event_Backend_KQueue = Qnil;
29
+ static ID id_fileno, id_transfer;
30
+
31
+ enum {KQUEUE_MAX_EVENTS = 64};
32
+
33
+ struct Event_Backend_KQueue {
34
+ VALUE loop;
35
+ int descriptor;
36
+ };
37
+
38
+ void Event_Backend_KQueue_Type_mark(void *_data)
39
+ {
40
+ struct Event_Backend_KQueue *data = _data;
41
+ rb_gc_mark(data->loop);
42
+ }
43
+
44
+ void Event_Backend_KQueue_Type_free(void *_data)
45
+ {
46
+ struct Event_Backend_KQueue *data = _data;
47
+
48
+ if (data->descriptor >= 0) {
49
+ close(data->descriptor);
50
+ }
51
+
52
+ free(data);
53
+ }
54
+
55
+ size_t Event_Backend_KQueue_Type_size(const void *data)
56
+ {
57
+ return sizeof(struct Event_Backend_KQueue);
58
+ }
59
+
60
+ static const rb_data_type_t Event_Backend_KQueue_Type = {
61
+ .wrap_struct_name = "Event::Backend::KQueue",
62
+ .function = {
63
+ .dmark = Event_Backend_KQueue_Type_mark,
64
+ .dfree = Event_Backend_KQueue_Type_free,
65
+ .dsize = Event_Backend_KQueue_Type_size,
66
+ },
67
+ .data = NULL,
68
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY,
69
+ };
70
+
71
+ VALUE Event_Backend_KQueue_allocate(VALUE self) {
72
+ struct Event_Backend_KQueue *data = NULL;
73
+ VALUE instance = TypedData_Make_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
74
+
75
+ data->loop = Qnil;
76
+ data->descriptor = -1;
77
+
78
+ return instance;
79
+ }
80
+
81
+ VALUE Event_Backend_KQueue_initialize(VALUE self, VALUE loop) {
82
+ struct Event_Backend_KQueue *data = NULL;
83
+ TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
84
+
85
+ data->loop = loop;
86
+ int result = kqueue();
87
+
88
+ if (result == -1) {
89
+ rb_sys_fail("kqueue");
90
+ } else {
91
+ ioctl(result, FIOCLEX);
92
+ data->descriptor = result;
93
+
94
+ rb_update_max_fd(data->descriptor);
95
+ }
96
+
97
+ return self;
98
+ }
99
+
100
+ static
101
+ int io_add_filters(int descriptor, int ident, int events, VALUE fiber) {
102
+ int count = 0;
103
+ struct kevent kevents[2] = {0};
104
+
105
+ if (events & READABLE) {
106
+ kevents[count].ident = ident;
107
+ kevents[count].filter = EVFILT_READ;
108
+ kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
109
+ kevents[count].udata = (void*)fiber;
110
+
111
+ // #ifdef EV_OOBAND
112
+ // if (events & PRIORITY) {
113
+ // kevents[count].flags |= EV_OOBAND;
114
+ // }
115
+ // #endif
116
+
117
+ count++;
118
+ }
119
+
120
+ if (events & WRITABLE) {
121
+ kevents[count].ident = ident;
122
+ kevents[count].filter = EVFILT_WRITE;
123
+ kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
124
+ kevents[count].udata = (void*)fiber;
125
+ count++;
126
+ }
127
+
128
+ int result = kevent(descriptor, kevents, count, NULL, 0, NULL);
129
+
130
+ if (result == -1) {
131
+ rb_sys_fail("kevent(register)");
132
+ }
133
+
134
+ return events;
135
+ }
136
+
137
+ static
138
+ void io_remove_filters(int descriptor, int ident, int events) {
139
+ int count = 0;
140
+ struct kevent kevents[2] = {0};
141
+
142
+ if (events & READABLE) {
143
+ kevents[count].ident = ident;
144
+ kevents[count].filter = EVFILT_READ;
145
+ kevents[count].flags = EV_DELETE;
146
+
147
+ count++;
148
+ }
149
+
150
+ if (events & WRITABLE) {
151
+ kevents[count].ident = ident;
152
+ kevents[count].filter = EVFILT_WRITE;
153
+ kevents[count].flags = EV_DELETE;
154
+ count++;
155
+ }
156
+
157
+ // Ignore the result.
158
+ kevent(descriptor, kevents, count, NULL, 0, NULL);
159
+ }
160
+
161
+ struct io_wait_arguments {
162
+ struct Event_Backend_KQueue *data;
163
+ int events;
164
+ int descriptor;
165
+ };
166
+
167
+ static
168
+ VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
169
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
170
+
171
+ io_remove_filters(arguments->data->descriptor, arguments->descriptor, arguments->events);
172
+
173
+ rb_exc_raise(exception);
174
+ };
175
+
176
+ static inline
177
+ int events_from_kqueue_filter(int filter) {
178
+ if (filter == EVFILT_READ) return READABLE;
179
+ if (filter == EVFILT_WRITE) return WRITABLE;
180
+
181
+ return 0;
182
+ }
183
+
184
+ static
185
+ VALUE io_wait_transfer(VALUE _arguments) {
186
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
187
+
188
+ VALUE result = rb_funcall(arguments->data->loop, id_transfer, 0);
189
+
190
+ return INT2NUM(events_from_kqueue_filter(NUM2INT(result)));
191
+ };
192
+
193
+ VALUE Event_Backend_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
194
+ struct Event_Backend_KQueue *data = NULL;
195
+ TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
196
+
197
+ int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
198
+
199
+ struct io_wait_arguments io_wait_arguments = {
200
+ .events = io_add_filters(data->descriptor, descriptor, NUM2INT(events), fiber),
201
+ .data = data,
202
+ .descriptor = descriptor,
203
+ };
204
+
205
+ return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
206
+ }
207
+
208
+ static
209
+ struct timespec * make_timeout(VALUE duration, struct timespec * storage) {
210
+ if (duration == Qnil) {
211
+ return NULL;
212
+ }
213
+
214
+ if (FIXNUM_P(duration)) {
215
+ storage->tv_sec = NUM2TIMET(duration);
216
+ storage->tv_nsec = 0;
217
+
218
+ return storage;
219
+ }
220
+
221
+ else if (RB_FLOAT_TYPE_P(duration)) {
222
+ double value = RFLOAT_VALUE(duration);
223
+ time_t seconds = value;
224
+
225
+ storage->tv_sec = seconds;
226
+ storage->tv_nsec = (value - seconds) * 1000000000L;
227
+
228
+ return storage;
229
+ }
230
+
231
+ rb_raise(rb_eRuntimeError, "unable to convert timeout");
232
+ }
233
+
234
+ static
235
+ int timeout_nonblocking(struct timespec * timespec) {
236
+ return timespec && timespec->tv_sec == 0 && timespec->tv_nsec == 0;
237
+ }
238
+
239
+ struct select_arguments {
240
+ struct Event_Backend_KQueue *data;
241
+
242
+ int count;
243
+ struct kevent events[KQUEUE_MAX_EVENTS];
244
+
245
+ struct timespec storage;
246
+ struct timespec *timeout;
247
+ };
248
+
249
+ static
250
+ void * select_internal(void *_arguments) {
251
+ struct select_arguments * arguments = (struct select_arguments *)_arguments;
252
+
253
+ arguments->count = kevent(arguments->data->descriptor, NULL, 0, arguments->events, arguments->count, arguments->timeout);
254
+
255
+ return NULL;
256
+ }
257
+
258
+ static
259
+ void select_internal_without_gvl(struct select_arguments *arguments) {
260
+ rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
261
+
262
+ if (arguments->count == -1) {
263
+ rb_sys_fail("select_internal_without_gvl:kevent");
264
+ }
265
+ }
266
+
267
+ static
268
+ void select_internal_with_gvl(struct select_arguments *arguments) {
269
+ select_internal((void *)arguments);
270
+
271
+ if (arguments->count == -1) {
272
+ rb_sys_fail("select_internal_with_gvl:kevent");
273
+ }
274
+ }
275
+
276
+ VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
277
+ struct Event_Backend_KQueue *data = NULL;
278
+ TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
279
+
280
+ struct select_arguments arguments = {
281
+ .data = data,
282
+ .count = KQUEUE_MAX_EVENTS,
283
+ .storage = {
284
+ .tv_sec = 0,
285
+ .tv_nsec = 0
286
+ }
287
+ };
288
+
289
+ // We break this implementation into two parts.
290
+ // (1) count = kevent(..., timeout = 0)
291
+ // (2) without gvl: kevent(..., timeout = 0) if count == 0 and timeout != 0
292
+ // This allows us to avoid releasing and reacquiring the GVL.
293
+ // Non-comprehensive testing shows this gives a 1.5x speedup.
294
+ arguments.timeout = &arguments.storage;
295
+
296
+ // First do the syscall with no timeout to get any immediately available events:
297
+ select_internal_with_gvl(&arguments);
298
+
299
+ // If there were no pending events, if we have a timeout, wait for more events:
300
+ if (arguments.count == 0) {
301
+ arguments.timeout = make_timeout(duration, &arguments.storage);
302
+
303
+ if (!timeout_nonblocking(arguments.timeout)) {
304
+ arguments.count = KQUEUE_MAX_EVENTS;
305
+
306
+ select_internal_without_gvl(&arguments);
307
+ }
308
+ }
309
+
310
+ for (int i = 0; i < arguments.count; i += 1) {
311
+ VALUE fiber = (VALUE)arguments.events[i].udata;
312
+ VALUE result = INT2NUM(arguments.events[i].filter);
313
+ rb_funcall(fiber, id_transfer, 1, result);
314
+ }
315
+
316
+ return INT2NUM(arguments.count);
317
+ }
318
+
319
+ void Init_Event_Backend_KQueue(VALUE Event_Backend) {
320
+ id_fileno = rb_intern("fileno");
321
+ id_transfer = rb_intern("transfer");
322
+
323
+ Event_Backend_KQueue = rb_define_class_under(Event_Backend, "KQueue", rb_cObject);
324
+
325
+ rb_define_alloc_func(Event_Backend_KQueue, Event_Backend_KQueue_allocate);
326
+ rb_define_method(Event_Backend_KQueue, "initialize", Event_Backend_KQueue_initialize, 1);
327
+
328
+ rb_define_method(Event_Backend_KQueue, "io_wait", Event_Backend_KQueue_io_wait, 3);
329
+ rb_define_method(Event_Backend_KQueue, "select", Event_Backend_KQueue_select, 1);
330
+ }
@@ -0,0 +1,27 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #pragma once
22
+
23
+ #include <ruby.h>
24
+
25
+ #define EVENT_BACKEND_KQUEUE
26
+
27
+ void Init_Event_Backend_KQueue(VALUE Event_Backend);
@@ -0,0 +1,407 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include "uring.h"
22
+ #include "backend.h"
23
+
24
+ #include <liburing.h>
25
+ #include <poll.h>
26
+ #include <time.h>
27
+
28
+ static VALUE Event_Backend_URing = Qnil;
29
+ static ID id_fileno, id_transfer;
30
+
31
+ enum {URING_ENTRIES = 64};
32
+ enum {URING_MAX_EVENTS = 64};
33
+
34
+ struct Event_Backend_URing {
35
+ VALUE loop;
36
+ struct io_uring ring;
37
+ };
38
+
39
+ void Event_Backend_URing_Type_mark(void *_data)
40
+ {
41
+ struct Event_Backend_URing *data = _data;
42
+ rb_gc_mark(data->loop);
43
+ }
44
+
45
+ void Event_Backend_URing_Type_free(void *_data)
46
+ {
47
+ struct Event_Backend_URing *data = _data;
48
+
49
+ if (data->ring.ring_fd >= 0) {
50
+ io_uring_queue_exit(&data->ring);
51
+ data->ring.ring_fd = -1;
52
+ }
53
+
54
+ free(data);
55
+ }
56
+
57
+ size_t Event_Backend_URing_Type_size(const void *data)
58
+ {
59
+ return sizeof(struct Event_Backend_URing);
60
+ }
61
+
62
+ static const rb_data_type_t Event_Backend_URing_Type = {
63
+ .wrap_struct_name = "Event::Backend::URing",
64
+ .function = {
65
+ .dmark = Event_Backend_URing_Type_mark,
66
+ .dfree = Event_Backend_URing_Type_free,
67
+ .dsize = Event_Backend_URing_Type_size,
68
+ },
69
+ .data = NULL,
70
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY,
71
+ };
72
+
73
+ VALUE Event_Backend_URing_allocate(VALUE self) {
74
+ struct Event_Backend_URing *data = NULL;
75
+ VALUE instance = TypedData_Make_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
76
+
77
+ data->loop = Qnil;
78
+ data->ring.ring_fd = -1;
79
+
80
+ return instance;
81
+ }
82
+
83
+ VALUE Event_Backend_URing_initialize(VALUE self, VALUE loop) {
84
+ struct Event_Backend_URing *data = NULL;
85
+ TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
86
+
87
+ data->loop = loop;
88
+
89
+ int result = io_uring_queue_init(URING_ENTRIES, &data->ring, 0);
90
+
91
+ if (result < 0) {
92
+ rb_syserr_fail(-result, "io_uring_queue_init");
93
+ }
94
+
95
+ rb_update_max_fd(data->ring.ring_fd);
96
+
97
+ return self;
98
+ }
99
+
100
+ static inline
101
+ short poll_flags_from_events(int events) {
102
+ short flags = 0;
103
+
104
+ if (events & READABLE) flags |= POLLIN;
105
+ if (events & PRIORITY) flags |= POLLPRI;
106
+ if (events & WRITABLE) flags |= POLLOUT;
107
+
108
+ flags |= POLLERR;
109
+ flags |= POLLHUP;
110
+
111
+ return flags;
112
+ }
113
+
114
+ static inline
115
+ int events_from_poll_flags(short flags) {
116
+ int events = 0;
117
+
118
+ if (flags & POLLIN) events |= READABLE;
119
+ if (flags & POLLPRI) events |= PRIORITY;
120
+ if (flags & POLLOUT) events |= WRITABLE;
121
+
122
+ return events;
123
+ }
124
+
125
+ struct io_wait_arguments {
126
+ struct Event_Backend_URing *data;
127
+ VALUE fiber;
128
+ short flags;
129
+ };
130
+
131
+ static
132
+ VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
133
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
134
+ struct Event_Backend_URing *data = arguments->data;
135
+
136
+ struct io_uring_sqe *sqe = Event_Backend_URing_io_uring_get_sqe(data);
137
+
138
+ // fprintf(stderr, "poll_remove(%p, %p)\n", sqe, (void*)arguments->fiber);
139
+
140
+ io_uring_prep_poll_remove(sqe, (void*)arguments->fiber);
141
+ io_uring_submit(&data->ring);
142
+
143
+ rb_exc_raise(exception);
144
+ };
145
+
146
+ static
147
+ VALUE io_wait_transfer(VALUE _arguments) {
148
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
149
+ struct Event_Backend_URing *data = arguments->data;
150
+
151
+ VALUE result = rb_funcall(data->loop, id_transfer, 0);
152
+
153
+ // We explicitly filter the resulting events based on the requested events.
154
+ // In some cases, poll will report events we didn't ask for.
155
+ short flags = arguments->flags & NUM2INT(result);
156
+
157
+ return INT2NUM(events_from_poll_flags(flags));
158
+ };
159
+
160
+ struct io_uring_sqe *Event_Backend_URing_io_uring_get_sqe(struct Event_Backend_URing *data) {
161
+ struct io_uring_sqe *sqe = NULL;
162
+
163
+ while (true) {
164
+ sqe = io_uring_get_sqe(&data->ring);
165
+ if (sqe != NULL) {
166
+ return sqe;
167
+ }
168
+ // The sqe is full, we need to poll before submitting more events.
169
+ Event_Backend_URing_select(self, INT2NUM(0));
170
+ }
171
+ }
172
+
173
+ VALUE Event_Backend_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
174
+ struct Event_Backend_URing *data = NULL;
175
+ TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
176
+
177
+ int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
178
+ struct io_uring_sqe *sqe = Event_Backend_URing_io_uring_get_sqe(data);
179
+
180
+ short flags = poll_flags_from_events(NUM2INT(events));
181
+
182
+ // fprintf(stderr, "poll_add(%p, %d, %d, %p)\n", sqe, descriptor, flags, (void*)fiber);
183
+
184
+ io_uring_prep_poll_add(sqe, descriptor, flags);
185
+ io_uring_sqe_set_data(sqe, (void*)fiber);
186
+ io_uring_submit(&data->ring);
187
+
188
+ struct io_wait_arguments io_wait_arguments = {
189
+ .data = data,
190
+ .fiber = fiber,
191
+ .flags = flags
192
+ };
193
+
194
+ return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
195
+ }
196
+
197
+ inline static
198
+ void resize_to_capacity(VALUE string, size_t offset, size_t length) {
199
+ size_t current_length = RSTRING_LEN(string);
200
+ long difference = (long)(offset + length) - (long)current_length;
201
+
202
+ difference += 1;
203
+
204
+ if (difference > 0) {
205
+ rb_str_modify_expand(string, difference);
206
+ } else {
207
+ rb_str_modify(string);
208
+ }
209
+ }
210
+
211
+ inline static
212
+ void resize_to_fit(VALUE string, size_t offset, size_t length) {
213
+ size_t current_length = RSTRING_LEN(string);
214
+
215
+ if (current_length < (offset + length)) {
216
+ rb_str_set_len(string, offset + length);
217
+ }
218
+ }
219
+
220
+ VALUE Event_Backend_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE offset, VALUE length) {
221
+ struct Event_Backend_URing *data = NULL;
222
+ TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
223
+
224
+ resize_to_capacity(buffer, NUM2SIZET(offset), NUM2SIZET(length));
225
+
226
+ int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
227
+ struct io_uring_sqe *sqe = Event_Backend_URing_io_uring_get_sqe(data);
228
+
229
+ struct iovec iovecs[1];
230
+ iovecs[0].iov_base = RSTRING_PTR(buffer) + NUM2SIZET(offset);
231
+ iovecs[0].iov_len = NUM2SIZET(length);
232
+
233
+ io_uring_prep_readv(sqe, descriptor, iovecs, 1, 0);
234
+ io_uring_sqe_set_data(sqe, (void*)fiber);
235
+ io_uring_submit(&data->ring);
236
+
237
+ // fprintf(stderr, "prep_readv(%p, %d, %ld)\n", sqe, descriptor, iovecs[0].iov_len);
238
+
239
+ int result = NUM2INT(rb_funcall(data->loop, id_transfer, 0));
240
+
241
+ if (result < 0) {
242
+ rb_syserr_fail(-result, strerror(-result));
243
+ }
244
+
245
+ resize_to_fit(buffer, NUM2SIZET(offset), (size_t)result);
246
+
247
+ return INT2NUM(result);
248
+ }
249
+
250
+ VALUE Event_Backend_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE offset, VALUE length) {
251
+ struct Event_Backend_URing *data = NULL;
252
+ TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
253
+
254
+ if ((size_t)RSTRING_LEN(buffer) < NUM2SIZET(offset) + NUM2SIZET(length)) {
255
+ rb_raise(rb_eRuntimeError, "invalid offset/length exceeds bounds of buffer");
256
+ }
257
+
258
+ int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
259
+ struct io_uring_sqe *sqe = Event_Backend_URing_io_uring_get_sqe(data);
260
+
261
+ struct iovec iovecs[1];
262
+ iovecs[0].iov_base = RSTRING_PTR(buffer) + NUM2SIZET(offset);
263
+ iovecs[0].iov_len = NUM2SIZET(length);
264
+
265
+ io_uring_prep_writev(sqe, descriptor, iovecs, 1, 0);
266
+ io_uring_sqe_set_data(sqe, (void*)fiber);
267
+ io_uring_submit(&data->ring);
268
+
269
+ // fprintf(stderr, "prep_writev(%p, %d, %ld)\n", sqe, descriptor, iovecs[0].iov_len);
270
+
271
+ int result = NUM2INT(rb_funcall(data->loop, id_transfer, 0));
272
+
273
+ if (result < 0) {
274
+ rb_syserr_fail(-result, strerror(-result));
275
+ }
276
+
277
+ return INT2NUM(result);
278
+ }
279
+
280
+ static
281
+ struct __kernel_timespec * make_timeout(VALUE duration, struct __kernel_timespec *storage) {
282
+ if (duration == Qnil) {
283
+ return NULL;
284
+ }
285
+
286
+ if (FIXNUM_P(duration)) {
287
+ storage->tv_sec = NUM2TIMET(duration);
288
+ storage->tv_nsec = 0;
289
+
290
+ return storage;
291
+ }
292
+
293
+ else if (RB_FLOAT_TYPE_P(duration)) {
294
+ double value = RFLOAT_VALUE(duration);
295
+ time_t seconds = value;
296
+
297
+ storage->tv_sec = seconds;
298
+ storage->tv_nsec = (value - seconds) * 1000000000L;
299
+
300
+ return storage;
301
+ }
302
+
303
+ rb_raise(rb_eRuntimeError, "unable to convert timeout");
304
+ }
305
+
306
+ static
307
+ int timeout_nonblocking(struct __kernel_timespec *timespec) {
308
+ return timespec && timespec->tv_sec == 0 && timespec->tv_nsec == 0;
309
+ }
310
+
311
+ struct select_arguments {
312
+ struct Event_Backend_URing *data;
313
+
314
+ int count;
315
+ struct io_uring_cqe **cqes;
316
+
317
+ struct __kernel_timespec storage;
318
+ struct __kernel_timespec *timeout;
319
+ };
320
+
321
+ static
322
+ void * select_internal(void *_arguments) {
323
+ struct select_arguments * arguments = (struct select_arguments *)_arguments;
324
+
325
+ arguments->count = io_uring_wait_cqes(&arguments->data->ring, arguments->cqes, 1, arguments->timeout, NULL);
326
+
327
+ // If waiting resulted in a timeout, there are 0 events.
328
+ if (arguments->count == -ETIME) {
329
+ arguments->count = 0;
330
+ }
331
+
332
+ return NULL;
333
+ }
334
+
335
+ static
336
+ int select_internal_without_gvl(struct select_arguments *arguments) {
337
+ rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
338
+
339
+ if (arguments->count < 0) {
340
+ rb_syserr_fail(-arguments->count, "select_internal_without_gvl:io_uring_wait_cqes");
341
+ }
342
+
343
+ return arguments->count;
344
+ }
345
+
346
+ VALUE Event_Backend_URing_select(VALUE self, VALUE duration) {
347
+ struct Event_Backend_URing *data = NULL;
348
+ TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
349
+
350
+ struct io_uring_cqe *cqes[URING_MAX_EVENTS];
351
+
352
+ // This is a non-blocking operation:
353
+ int result = io_uring_peek_batch_cqe(&data->ring, cqes, URING_MAX_EVENTS);
354
+
355
+ if (result < 0) {
356
+ rb_syserr_fail(-result, strerror(-result));
357
+ } else if (result == 0) {
358
+ // We might need to wait for events:
359
+ struct select_arguments arguments = {
360
+ .data = data,
361
+ .cqes = cqes,
362
+ .timeout = NULL,
363
+ };
364
+
365
+ arguments.timeout = make_timeout(duration, &arguments.storage);
366
+
367
+ if (!timeout_nonblocking(arguments.timeout)) {
368
+ result = select_internal_without_gvl(&arguments);
369
+ }
370
+ }
371
+
372
+ // fprintf(stderr, "cqes count=%d\n", result);
373
+
374
+ for (int i = 0; i < result; i += 1) {
375
+ // If the operation was cancelled, or the operation has no user data (fiber):
376
+ if (cqes[i]->res == -ECANCELED || cqes[i]->user_data == 0) {
377
+ continue;
378
+ }
379
+
380
+ VALUE fiber = (VALUE)io_uring_cqe_get_data(cqes[i]);
381
+ VALUE result = INT2NUM(cqes[i]->res);
382
+
383
+ // fprintf(stderr, "cqes[i] res=%d user_data=%p\n", cqes[i]->res, (void*)cqes[i]->user_data);
384
+
385
+ io_uring_cqe_seen(&data->ring, cqes[i]);
386
+
387
+ rb_funcall(fiber, id_transfer, 1, result);
388
+ }
389
+
390
+ return INT2NUM(result);
391
+ }
392
+
393
+ void Init_Event_Backend_URing(VALUE Event_Backend) {
394
+ id_fileno = rb_intern("fileno");
395
+ id_transfer = rb_intern("transfer");
396
+
397
+ Event_Backend_URing = rb_define_class_under(Event_Backend, "URing", rb_cObject);
398
+
399
+ rb_define_alloc_func(Event_Backend_URing, Event_Backend_URing_allocate);
400
+ rb_define_method(Event_Backend_URing, "initialize", Event_Backend_URing_initialize, 1);
401
+
402
+ rb_define_method(Event_Backend_URing, "io_wait", Event_Backend_URing_io_wait, 3);
403
+ rb_define_method(Event_Backend_URing, "select", Event_Backend_URing_select, 1);
404
+
405
+ rb_define_method(Event_Backend_URing, "io_read", Event_Backend_URing_io_read, 5);
406
+ rb_define_method(Event_Backend_URing, "io_write", Event_Backend_URing_io_write, 5);
407
+ }