event 0.5.0 → 0.8.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,428 +0,0 @@
1
- // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
- //
3
- // Permission is hereby granted, free of charge, to any person obtaining a copy
4
- // of this software and associated documentation files (the "Software"), to deal
5
- // in the Software without restriction, including without limitation the rights
6
- // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
- // copies of the Software, and to permit persons to whom the Software is
8
- // furnished to do so, subject to the following conditions:
9
- //
10
- // The above copyright notice and this permission notice shall be included in
11
- // all copies or substantial portions of the Software.
12
- //
13
- // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
- // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
- // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
- // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
- // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
- // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
- // THE SOFTWARE.
20
-
21
- #include "kqueue.h"
22
- #include "backend.h"
23
-
24
- #include <sys/event.h>
25
- #include <sys/ioctl.h>
26
- #include <time.h>
27
- #include <errno.h>
28
-
29
- static VALUE Event_Backend_KQueue = Qnil;
30
- static ID id_fileno;
31
-
32
- enum {KQUEUE_MAX_EVENTS = 64};
33
-
34
- struct Event_Backend_KQueue {
35
- VALUE loop;
36
- int descriptor;
37
- };
38
-
39
- void Event_Backend_KQueue_Type_mark(void *_data)
40
- {
41
- struct Event_Backend_KQueue *data = _data;
42
- rb_gc_mark(data->loop);
43
- }
44
-
45
- static
46
- void close_internal(struct Event_Backend_KQueue *data) {
47
- if (data->descriptor >= 0) {
48
- close(data->descriptor);
49
- data->descriptor = -1;
50
- }
51
- }
52
-
53
- void Event_Backend_KQueue_Type_free(void *_data)
54
- {
55
- struct Event_Backend_KQueue *data = _data;
56
-
57
- close_internal(data);
58
-
59
- free(data);
60
- }
61
-
62
- size_t Event_Backend_KQueue_Type_size(const void *data)
63
- {
64
- return sizeof(struct Event_Backend_KQueue);
65
- }
66
-
67
- static const rb_data_type_t Event_Backend_KQueue_Type = {
68
- .wrap_struct_name = "Event::Backend::KQueue",
69
- .function = {
70
- .dmark = Event_Backend_KQueue_Type_mark,
71
- .dfree = Event_Backend_KQueue_Type_free,
72
- .dsize = Event_Backend_KQueue_Type_size,
73
- },
74
- .data = NULL,
75
- .flags = RUBY_TYPED_FREE_IMMEDIATELY,
76
- };
77
-
78
- VALUE Event_Backend_KQueue_allocate(VALUE self) {
79
- struct Event_Backend_KQueue *data = NULL;
80
- VALUE instance = TypedData_Make_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
81
-
82
- data->loop = Qnil;
83
- data->descriptor = -1;
84
-
85
- return instance;
86
- }
87
-
88
- VALUE Event_Backend_KQueue_initialize(VALUE self, VALUE loop) {
89
- struct Event_Backend_KQueue *data = NULL;
90
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
91
-
92
- data->loop = loop;
93
- int result = kqueue();
94
-
95
- if (result == -1) {
96
- rb_sys_fail("kqueue");
97
- } else {
98
- ioctl(result, FIOCLEX);
99
- data->descriptor = result;
100
-
101
- rb_update_max_fd(data->descriptor);
102
- }
103
-
104
- return self;
105
- }
106
-
107
- VALUE Event_Backend_KQueue_close(VALUE self) {
108
- struct Event_Backend_KQueue *data = NULL;
109
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
110
-
111
- close_internal(data);
112
-
113
- return Qnil;
114
- }
115
-
116
- struct process_wait_arguments {
117
- struct Event_Backend_KQueue *data;
118
- pid_t pid;
119
- int flags;
120
- };
121
-
122
- static
123
- int process_add_filters(int descriptor, int ident, VALUE fiber) {
124
- struct kevent event = {0};
125
-
126
- event.ident = ident;
127
- event.filter = EVFILT_PROC;
128
- event.flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
129
- event.fflags = NOTE_EXIT;
130
- event.udata = (void*)fiber;
131
-
132
- int result = kevent(descriptor, &event, 1, NULL, 0, NULL);
133
-
134
- if (result == -1) {
135
- // No such process - the process has probably already terminated:
136
- if (errno == ESRCH) {
137
- return 0;
138
- }
139
-
140
- rb_sys_fail("kevent(process_add_filters)");
141
- }
142
-
143
- return 1;
144
- }
145
-
146
- static
147
- void process_remove_filters(int descriptor, int ident) {
148
- struct kevent event = {0};
149
-
150
- event.ident = ident;
151
- event.filter = EVFILT_PROC;
152
- event.flags = EV_DELETE;
153
- event.fflags = NOTE_EXIT;
154
-
155
- // Ignore the result.
156
- kevent(descriptor, &event, 1, NULL, 0, NULL);
157
- }
158
-
159
- static
160
- VALUE process_wait_transfer(VALUE _arguments) {
161
- struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
162
-
163
- Event_Backend_transfer(arguments->data->loop);
164
-
165
- return Event_Backend_process_status_wait(arguments->pid);
166
- }
167
-
168
- static
169
- VALUE process_wait_rescue(VALUE _arguments, VALUE exception) {
170
- struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
171
-
172
- process_remove_filters(arguments->data->descriptor, arguments->pid);
173
-
174
- rb_exc_raise(exception);
175
- }
176
-
177
- VALUE Event_Backend_KQueue_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
178
- struct Event_Backend_KQueue *data = NULL;
179
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
180
-
181
- struct process_wait_arguments process_wait_arguments = {
182
- .data = data,
183
- .pid = NUM2PIDT(pid),
184
- .flags = NUM2INT(flags),
185
- };
186
-
187
- int waiting = process_add_filters(data->descriptor, process_wait_arguments.pid, fiber);
188
-
189
- if (waiting) {
190
- return rb_rescue(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_rescue, (VALUE)&process_wait_arguments);
191
- } else {
192
- return Event_Backend_process_status_wait(process_wait_arguments.pid);
193
- }
194
- }
195
-
196
- static
197
- int io_add_filters(int descriptor, int ident, int events, VALUE fiber) {
198
- int count = 0;
199
- struct kevent kevents[2] = {0};
200
-
201
- if (events & READABLE) {
202
- kevents[count].ident = ident;
203
- kevents[count].filter = EVFILT_READ;
204
- kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
205
- kevents[count].udata = (void*)fiber;
206
-
207
- // #ifdef EV_OOBAND
208
- // if (events & PRIORITY) {
209
- // kevents[count].flags |= EV_OOBAND;
210
- // }
211
- // #endif
212
-
213
- count++;
214
- }
215
-
216
- if (events & WRITABLE) {
217
- kevents[count].ident = ident;
218
- kevents[count].filter = EVFILT_WRITE;
219
- kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
220
- kevents[count].udata = (void*)fiber;
221
- count++;
222
- }
223
-
224
- int result = kevent(descriptor, kevents, count, NULL, 0, NULL);
225
-
226
- if (result == -1) {
227
- rb_sys_fail("kevent(io_add_filters)");
228
- }
229
-
230
- return events;
231
- }
232
-
233
- static
234
- void io_remove_filters(int descriptor, int ident, int events) {
235
- int count = 0;
236
- struct kevent kevents[2] = {0};
237
-
238
- if (events & READABLE) {
239
- kevents[count].ident = ident;
240
- kevents[count].filter = EVFILT_READ;
241
- kevents[count].flags = EV_DELETE;
242
-
243
- count++;
244
- }
245
-
246
- if (events & WRITABLE) {
247
- kevents[count].ident = ident;
248
- kevents[count].filter = EVFILT_WRITE;
249
- kevents[count].flags = EV_DELETE;
250
- count++;
251
- }
252
-
253
- // Ignore the result.
254
- kevent(descriptor, kevents, count, NULL, 0, NULL);
255
- }
256
-
257
- struct io_wait_arguments {
258
- struct Event_Backend_KQueue *data;
259
- int events;
260
- int descriptor;
261
- };
262
-
263
- static
264
- VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
265
- struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
266
-
267
- io_remove_filters(arguments->data->descriptor, arguments->descriptor, arguments->events);
268
-
269
- rb_exc_raise(exception);
270
- };
271
-
272
- static inline
273
- int events_from_kqueue_filter(int filter) {
274
- if (filter == EVFILT_READ) return READABLE;
275
- if (filter == EVFILT_WRITE) return WRITABLE;
276
-
277
- return 0;
278
- }
279
-
280
- static
281
- VALUE io_wait_transfer(VALUE _arguments) {
282
- struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
283
-
284
- VALUE result = Event_Backend_transfer(arguments->data->loop);
285
-
286
- return INT2NUM(events_from_kqueue_filter(NUM2INT(result)));
287
- };
288
-
289
- VALUE Event_Backend_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
290
- struct Event_Backend_KQueue *data = NULL;
291
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
292
-
293
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
294
-
295
- struct io_wait_arguments io_wait_arguments = {
296
- .events = io_add_filters(data->descriptor, descriptor, NUM2INT(events), fiber),
297
- .data = data,
298
- .descriptor = descriptor,
299
- };
300
-
301
- return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
302
- }
303
-
304
- static
305
- struct timespec * make_timeout(VALUE duration, struct timespec * storage) {
306
- if (duration == Qnil) {
307
- return NULL;
308
- }
309
-
310
- if (FIXNUM_P(duration)) {
311
- storage->tv_sec = NUM2TIMET(duration);
312
- storage->tv_nsec = 0;
313
-
314
- return storage;
315
- }
316
-
317
- else if (RB_FLOAT_TYPE_P(duration)) {
318
- double value = RFLOAT_VALUE(duration);
319
- time_t seconds = value;
320
-
321
- storage->tv_sec = seconds;
322
- storage->tv_nsec = (value - seconds) * 1000000000L;
323
-
324
- return storage;
325
- }
326
-
327
- rb_raise(rb_eRuntimeError, "unable to convert timeout");
328
- }
329
-
330
- static
331
- int timeout_nonblocking(struct timespec * timespec) {
332
- return timespec && timespec->tv_sec == 0 && timespec->tv_nsec == 0;
333
- }
334
-
335
- struct select_arguments {
336
- struct Event_Backend_KQueue *data;
337
-
338
- int count;
339
- struct kevent events[KQUEUE_MAX_EVENTS];
340
-
341
- struct timespec storage;
342
- struct timespec *timeout;
343
- };
344
-
345
- static
346
- void * select_internal(void *_arguments) {
347
- struct select_arguments * arguments = (struct select_arguments *)_arguments;
348
-
349
- arguments->count = kevent(arguments->data->descriptor, NULL, 0, arguments->events, arguments->count, arguments->timeout);
350
-
351
- return NULL;
352
- }
353
-
354
- static
355
- void select_internal_without_gvl(struct select_arguments *arguments) {
356
- rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
357
-
358
- if (arguments->count == -1) {
359
- rb_sys_fail("select_internal_without_gvl:kevent");
360
- }
361
- }
362
-
363
- static
364
- void select_internal_with_gvl(struct select_arguments *arguments) {
365
- select_internal((void *)arguments);
366
-
367
- if (arguments->count == -1) {
368
- rb_sys_fail("select_internal_with_gvl:kevent");
369
- }
370
- }
371
-
372
- VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
373
- struct Event_Backend_KQueue *data = NULL;
374
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
375
-
376
- struct select_arguments arguments = {
377
- .data = data,
378
- .count = KQUEUE_MAX_EVENTS,
379
- .storage = {
380
- .tv_sec = 0,
381
- .tv_nsec = 0
382
- }
383
- };
384
-
385
- // We break this implementation into two parts.
386
- // (1) count = kevent(..., timeout = 0)
387
- // (2) without gvl: kevent(..., timeout = 0) if count == 0 and timeout != 0
388
- // This allows us to avoid releasing and reacquiring the GVL.
389
- // Non-comprehensive testing shows this gives a 1.5x speedup.
390
- arguments.timeout = &arguments.storage;
391
-
392
- // First do the syscall with no timeout to get any immediately available events:
393
- select_internal_with_gvl(&arguments);
394
-
395
- // If there were no pending events, if we have a timeout, wait for more events:
396
- if (arguments.count == 0) {
397
- arguments.timeout = make_timeout(duration, &arguments.storage);
398
-
399
- if (!timeout_nonblocking(arguments.timeout)) {
400
- arguments.count = KQUEUE_MAX_EVENTS;
401
-
402
- select_internal_without_gvl(&arguments);
403
- }
404
- }
405
-
406
- for (int i = 0; i < arguments.count; i += 1) {
407
- VALUE fiber = (VALUE)arguments.events[i].udata;
408
- VALUE result = INT2NUM(arguments.events[i].filter);
409
-
410
- Event_Backend_transfer_result(fiber, result);
411
- }
412
-
413
- return INT2NUM(arguments.count);
414
- }
415
-
416
- void Init_Event_Backend_KQueue(VALUE Event_Backend) {
417
- id_fileno = rb_intern("fileno");
418
-
419
- Event_Backend_KQueue = rb_define_class_under(Event_Backend, "KQueue", rb_cObject);
420
-
421
- rb_define_alloc_func(Event_Backend_KQueue, Event_Backend_KQueue_allocate);
422
- rb_define_method(Event_Backend_KQueue, "initialize", Event_Backend_KQueue_initialize, 1);
423
- rb_define_method(Event_Backend_KQueue, "close", Event_Backend_KQueue_close, 0);
424
-
425
- rb_define_method(Event_Backend_KQueue, "io_wait", Event_Backend_KQueue_io_wait, 3);
426
- rb_define_method(Event_Backend_KQueue, "select", Event_Backend_KQueue_select, 1);
427
- rb_define_method(Event_Backend_KQueue, "process_wait", Event_Backend_KQueue_process_wait, 3);
428
- }
@@ -1,488 +0,0 @@
1
- // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
- //
3
- // Permission is hereby granted, free of charge, to any person obtaining a copy
4
- // of this software and associated documentation files (the "Software"), to deal
5
- // in the Software without restriction, including without limitation the rights
6
- // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
- // copies of the Software, and to permit persons to whom the Software is
8
- // furnished to do so, subject to the following conditions:
9
- //
10
- // The above copyright notice and this permission notice shall be included in
11
- // all copies or substantial portions of the Software.
12
- //
13
- // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
- // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
- // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
- // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
- // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
- // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
- // THE SOFTWARE.
20
-
21
- #include "uring.h"
22
- #include "backend.h"
23
-
24
- #include <liburing.h>
25
- #include <poll.h>
26
- #include <time.h>
27
-
28
- #include "pidfd.c"
29
-
30
- static VALUE Event_Backend_URing = Qnil;
31
- static ID id_fileno;
32
-
33
- enum {URING_ENTRIES = 128};
34
- enum {URING_MAX_EVENTS = 128};
35
-
36
- struct Event_Backend_URing {
37
- VALUE loop;
38
- struct io_uring ring;
39
- };
40
-
41
- void Event_Backend_URing_Type_mark(void *_data)
42
- {
43
- struct Event_Backend_URing *data = _data;
44
- rb_gc_mark(data->loop);
45
- }
46
-
47
- static
48
- void close_internal(struct Event_Backend_URing *data) {
49
- if (data->ring.ring_fd >= 0) {
50
- io_uring_queue_exit(&data->ring);
51
- data->ring.ring_fd = -1;
52
- }
53
- }
54
-
55
- void Event_Backend_URing_Type_free(void *_data)
56
- {
57
- struct Event_Backend_URing *data = _data;
58
-
59
- close_internal(data);
60
-
61
- free(data);
62
- }
63
-
64
- size_t Event_Backend_URing_Type_size(const void *data)
65
- {
66
- return sizeof(struct Event_Backend_URing);
67
- }
68
-
69
- static const rb_data_type_t Event_Backend_URing_Type = {
70
- .wrap_struct_name = "Event::Backend::URing",
71
- .function = {
72
- .dmark = Event_Backend_URing_Type_mark,
73
- .dfree = Event_Backend_URing_Type_free,
74
- .dsize = Event_Backend_URing_Type_size,
75
- },
76
- .data = NULL,
77
- .flags = RUBY_TYPED_FREE_IMMEDIATELY,
78
- };
79
-
80
- VALUE Event_Backend_URing_allocate(VALUE self) {
81
- struct Event_Backend_URing *data = NULL;
82
- VALUE instance = TypedData_Make_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
83
-
84
- data->loop = Qnil;
85
- data->ring.ring_fd = -1;
86
-
87
- return instance;
88
- }
89
-
90
- VALUE Event_Backend_URing_initialize(VALUE self, VALUE loop) {
91
- struct Event_Backend_URing *data = NULL;
92
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
93
-
94
- data->loop = loop;
95
-
96
- int result = io_uring_queue_init(URING_ENTRIES, &data->ring, 0);
97
-
98
- if (result < 0) {
99
- rb_syserr_fail(-result, "io_uring_queue_init");
100
- }
101
-
102
- rb_update_max_fd(data->ring.ring_fd);
103
-
104
- return self;
105
- }
106
-
107
- VALUE Event_Backend_URing_close(VALUE self) {
108
- struct Event_Backend_URing *data = NULL;
109
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
110
-
111
- close_internal(data);
112
-
113
- return Qnil;
114
- }
115
-
116
- struct io_uring_sqe * io_get_sqe(struct Event_Backend_URing *data) {
117
- struct io_uring_sqe *sqe = io_uring_get_sqe(&data->ring);
118
-
119
- while (sqe == NULL) {
120
- io_uring_submit(&data->ring);
121
- sqe = io_uring_get_sqe(&data->ring);
122
- }
123
-
124
- // fprintf(stderr, "io_get_sqe -> %p\n", sqe);
125
-
126
- return sqe;
127
- }
128
-
129
- struct process_wait_arguments {
130
- struct Event_Backend_URing *data;
131
- pid_t pid;
132
- int flags;
133
- int descriptor;
134
- };
135
-
136
- static
137
- VALUE process_wait_transfer(VALUE _arguments) {
138
- struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
139
-
140
- Event_Backend_transfer(arguments->data->loop);
141
-
142
- return Event_Backend_process_status_wait(arguments->pid);
143
- }
144
-
145
- static
146
- VALUE process_wait_ensure(VALUE _arguments) {
147
- struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
148
-
149
- close(arguments->descriptor);
150
-
151
- return Qnil;
152
- }
153
-
154
- VALUE Event_Backend_URing_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
155
- struct Event_Backend_URing *data = NULL;
156
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
157
-
158
- struct process_wait_arguments process_wait_arguments = {
159
- .data = data,
160
- .pid = NUM2PIDT(pid),
161
- .flags = NUM2INT(flags),
162
- };
163
-
164
- process_wait_arguments.descriptor = pidfd_open(process_wait_arguments.pid, 0);
165
- rb_update_max_fd(process_wait_arguments.descriptor);
166
-
167
- struct io_uring_sqe *sqe = io_get_sqe(data);
168
- assert(sqe);
169
-
170
- io_uring_prep_poll_add(sqe, process_wait_arguments.descriptor, POLLIN|POLLHUP|POLLERR);
171
- io_uring_sqe_set_data(sqe, (void*)fiber);
172
-
173
- return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
174
- }
175
-
176
- static inline
177
- short poll_flags_from_events(int events) {
178
- short flags = 0;
179
-
180
- if (events & READABLE) flags |= POLLIN;
181
- if (events & PRIORITY) flags |= POLLPRI;
182
- if (events & WRITABLE) flags |= POLLOUT;
183
-
184
- flags |= POLLERR;
185
- flags |= POLLHUP;
186
-
187
- return flags;
188
- }
189
-
190
- static inline
191
- int events_from_poll_flags(short flags) {
192
- int events = 0;
193
-
194
- if (flags & POLLIN) events |= READABLE;
195
- if (flags & POLLPRI) events |= PRIORITY;
196
- if (flags & POLLOUT) events |= WRITABLE;
197
-
198
- return events;
199
- }
200
-
201
- struct io_wait_arguments {
202
- struct Event_Backend_URing *data;
203
- VALUE fiber;
204
- short flags;
205
- };
206
-
207
- static
208
- VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
209
- struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
210
- struct Event_Backend_URing *data = arguments->data;
211
-
212
- struct io_uring_sqe *sqe = io_get_sqe(data);
213
- assert(sqe);
214
-
215
- // fprintf(stderr, "poll_remove(%p, %p)\n", sqe, (void*)arguments->fiber);
216
-
217
- io_uring_prep_poll_remove(sqe, (void*)arguments->fiber);
218
- io_uring_submit(&data->ring);
219
-
220
- rb_exc_raise(exception);
221
- };
222
-
223
- static
224
- VALUE io_wait_transfer(VALUE _arguments) {
225
- struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
226
- struct Event_Backend_URing *data = arguments->data;
227
-
228
- VALUE result = Event_Backend_transfer(data->loop);
229
-
230
- // We explicitly filter the resulting events based on the requested events.
231
- // In some cases, poll will report events we didn't ask for.
232
- short flags = arguments->flags & NUM2INT(result);
233
-
234
- return INT2NUM(events_from_poll_flags(flags));
235
- };
236
-
237
- VALUE Event_Backend_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
238
- struct Event_Backend_URing *data = NULL;
239
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
240
-
241
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
242
- struct io_uring_sqe *sqe = io_get_sqe(data);
243
- assert(sqe);
244
-
245
- short flags = poll_flags_from_events(NUM2INT(events));
246
-
247
- // fprintf(stderr, "poll_add(%p, %d, %d, %p)\n", sqe, descriptor, flags, (void*)fiber);
248
-
249
- io_uring_prep_poll_add(sqe, descriptor, flags);
250
- io_uring_sqe_set_data(sqe, (void*)fiber);
251
- // fprintf(stderr, "io_uring_submit\n");
252
- // io_uring_submit(&data->ring);
253
-
254
- struct io_wait_arguments io_wait_arguments = {
255
- .data = data,
256
- .fiber = fiber,
257
- .flags = flags
258
- };
259
-
260
- return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
261
- }
262
-
263
- inline static
264
- void resize_to_capacity(VALUE string, size_t offset, size_t length) {
265
- size_t current_length = RSTRING_LEN(string);
266
- long difference = (long)(offset + length) - (long)current_length;
267
-
268
- difference += 1;
269
-
270
- if (difference > 0) {
271
- rb_str_modify_expand(string, difference);
272
- } else {
273
- rb_str_modify(string);
274
- }
275
- }
276
-
277
- inline static
278
- void resize_to_fit(VALUE string, size_t offset, size_t length) {
279
- size_t current_length = RSTRING_LEN(string);
280
-
281
- if (current_length < (offset + length)) {
282
- rb_str_set_len(string, offset + length);
283
- }
284
- }
285
-
286
- VALUE Event_Backend_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE offset, VALUE length) {
287
- struct Event_Backend_URing *data = NULL;
288
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
289
-
290
- resize_to_capacity(buffer, NUM2SIZET(offset), NUM2SIZET(length));
291
-
292
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
293
- struct io_uring_sqe *sqe = io_get_sqe(data);
294
- assert(sqe);
295
-
296
- struct iovec iovecs[1];
297
- iovecs[0].iov_base = RSTRING_PTR(buffer) + NUM2SIZET(offset);
298
- iovecs[0].iov_len = NUM2SIZET(length);
299
-
300
- io_uring_prep_readv(sqe, descriptor, iovecs, 1, 0);
301
- io_uring_sqe_set_data(sqe, (void*)fiber);
302
- io_uring_submit(&data->ring);
303
-
304
- // fprintf(stderr, "prep_readv(%p, %d, %ld)\n", sqe, descriptor, iovecs[0].iov_len);
305
-
306
- int result = NUM2INT(Event_Backend_transfer(data->loop));
307
-
308
- if (result < 0) {
309
- rb_syserr_fail(-result, strerror(-result));
310
- }
311
-
312
- resize_to_fit(buffer, NUM2SIZET(offset), (size_t)result);
313
-
314
- return INT2NUM(result);
315
- }
316
-
317
- VALUE Event_Backend_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE offset, VALUE length) {
318
- struct Event_Backend_URing *data = NULL;
319
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
320
-
321
- if ((size_t)RSTRING_LEN(buffer) < NUM2SIZET(offset) + NUM2SIZET(length)) {
322
- rb_raise(rb_eRuntimeError, "invalid offset/length exceeds bounds of buffer");
323
- }
324
-
325
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
326
- struct io_uring_sqe *sqe = io_get_sqe(data);
327
-
328
- struct iovec iovecs[1];
329
- iovecs[0].iov_base = RSTRING_PTR(buffer) + NUM2SIZET(offset);
330
- iovecs[0].iov_len = NUM2SIZET(length);
331
-
332
- io_uring_prep_writev(sqe, descriptor, iovecs, 1, 0);
333
- io_uring_sqe_set_data(sqe, (void*)fiber);
334
- io_uring_submit(&data->ring);
335
-
336
- // fprintf(stderr, "prep_writev(%p, %d, %ld)\n", sqe, descriptor, iovecs[0].iov_len);
337
-
338
- int result = NUM2INT(Event_Backend_transfer(data->loop));
339
-
340
- if (result < 0) {
341
- rb_syserr_fail(-result, strerror(-result));
342
- }
343
-
344
- return INT2NUM(result);
345
- }
346
-
347
- static
348
- struct __kernel_timespec * make_timeout(VALUE duration, struct __kernel_timespec *storage) {
349
- if (duration == Qnil) {
350
- return NULL;
351
- }
352
-
353
- if (FIXNUM_P(duration)) {
354
- storage->tv_sec = NUM2TIMET(duration);
355
- storage->tv_nsec = 0;
356
-
357
- return storage;
358
- }
359
-
360
- else if (RB_FLOAT_TYPE_P(duration)) {
361
- double value = RFLOAT_VALUE(duration);
362
- time_t seconds = value;
363
-
364
- storage->tv_sec = seconds;
365
- storage->tv_nsec = (value - seconds) * 1000000000L;
366
-
367
- return storage;
368
- }
369
-
370
- rb_raise(rb_eRuntimeError, "unable to convert timeout");
371
- }
372
-
373
- static
374
- int timeout_nonblocking(struct __kernel_timespec *timespec) {
375
- return timespec && timespec->tv_sec == 0 && timespec->tv_nsec == 0;
376
- }
377
-
378
- struct select_arguments {
379
- struct Event_Backend_URing *data;
380
-
381
- int result;
382
-
383
- struct __kernel_timespec storage;
384
- struct __kernel_timespec *timeout;
385
- };
386
-
387
- static
388
- void * select_internal(void *_arguments) {
389
- struct select_arguments * arguments = (struct select_arguments *)_arguments;
390
-
391
- io_uring_submit(&arguments->data->ring);
392
-
393
- struct io_uring_cqe *cqe = NULL;
394
- arguments->result = io_uring_wait_cqe_timeout(&arguments->data->ring, &cqe, arguments->timeout);
395
-
396
- return NULL;
397
- }
398
-
399
- static
400
- int select_internal_without_gvl(struct select_arguments *arguments) {
401
- rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
402
-
403
- if (arguments->result == -ETIME) {
404
- arguments->result = 0;
405
- } else if (arguments->result < 0) {
406
- rb_syserr_fail(-arguments->result, "select_internal_without_gvl:io_uring_wait_cqes");
407
- } else {
408
- // At least 1 event is waiting:
409
- arguments->result = 1;
410
- }
411
-
412
- return arguments->result;
413
- }
414
-
415
- static inline
416
- unsigned select_process_completions(struct io_uring *ring) {
417
- unsigned completed = 0;
418
- unsigned head;
419
- struct io_uring_cqe *cqe;
420
-
421
- io_uring_for_each_cqe(ring, head, cqe) {
422
- ++completed;
423
-
424
- // If the operation was cancelled, or the operation has no user data (fiber):
425
- if (cqe->res == -ECANCELED || cqe->user_data == 0 || cqe->user_data == LIBURING_UDATA_TIMEOUT) {
426
- continue;
427
- }
428
-
429
- VALUE fiber = (VALUE)cqe->user_data;
430
- VALUE result = INT2NUM(cqe->res);
431
-
432
- // fprintf(stderr, "cqe res=%d user_data=%p\n", cqe->res, (void*)cqe->user_data);
433
-
434
- Event_Backend_transfer_result(fiber, result);
435
- }
436
-
437
- if (completed) {
438
- io_uring_cq_advance(ring, completed);
439
- }
440
-
441
- return completed;
442
- }
443
-
444
- VALUE Event_Backend_URing_select(VALUE self, VALUE duration) {
445
- struct Event_Backend_URing *data = NULL;
446
- TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
447
-
448
- int result = select_process_completions(&data->ring);
449
-
450
- if (result < 0) {
451
- rb_syserr_fail(-result, strerror(-result));
452
- } else if (result == 0) {
453
- // We might need to wait for events:
454
- struct select_arguments arguments = {
455
- .data = data,
456
- .timeout = NULL,
457
- };
458
-
459
- arguments.timeout = make_timeout(duration, &arguments.storage);
460
-
461
- if (!timeout_nonblocking(arguments.timeout)) {
462
- result = select_internal_without_gvl(&arguments);
463
- } else {
464
- io_uring_submit(&data->ring);
465
- }
466
- }
467
-
468
- result = select_process_completions(&data->ring);
469
-
470
- return INT2NUM(result);
471
- }
472
-
473
- void Init_Event_Backend_URing(VALUE Event_Backend) {
474
- id_fileno = rb_intern("fileno");
475
-
476
- Event_Backend_URing = rb_define_class_under(Event_Backend, "URing", rb_cObject);
477
-
478
- rb_define_alloc_func(Event_Backend_URing, Event_Backend_URing_allocate);
479
- rb_define_method(Event_Backend_URing, "initialize", Event_Backend_URing_initialize, 1);
480
- rb_define_method(Event_Backend_URing, "close", Event_Backend_URing_close, 0);
481
-
482
- rb_define_method(Event_Backend_URing, "io_wait", Event_Backend_URing_io_wait, 3);
483
- rb_define_method(Event_Backend_URing, "select", Event_Backend_URing_select, 1);
484
-
485
- rb_define_method(Event_Backend_URing, "io_read", Event_Backend_URing_io_read, 5);
486
- rb_define_method(Event_Backend_URing, "io_write", Event_Backend_URing_io_write, 5);
487
- rb_define_method(Event_Backend_URing, "process_wait", Event_Backend_URing_process_wait, 3);
488
- }