io-event-machty 1.0.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,722 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include "uring.h"
22
+ #include "selector.h"
23
+
24
+ #include <liburing.h>
25
+ #include <poll.h>
26
+ #include <time.h>
27
+
28
+ #include "pidfd.c"
29
+
30
+ #include <linux/version.h>
31
+
32
+ enum {
33
+ DEBUG = 0,
34
+ DEBUG_IO_READ = 0,
35
+ };
36
+
37
+ static VALUE IO_Event_Selector_URing = Qnil;
38
+
39
+ enum {URING_ENTRIES = 64};
40
+
41
+ struct IO_Event_Selector_URing {
42
+ struct IO_Event_Selector backend;
43
+ struct io_uring ring;
44
+ size_t pending;
45
+ int blocked;
46
+ };
47
+
48
+ void IO_Event_Selector_URing_Type_mark(void *_data)
49
+ {
50
+ struct IO_Event_Selector_URing *data = _data;
51
+ IO_Event_Selector_mark(&data->backend);
52
+ }
53
+
54
+ static
55
+ void close_internal(struct IO_Event_Selector_URing *data) {
56
+ if (data->ring.ring_fd >= 0) {
57
+ io_uring_queue_exit(&data->ring);
58
+ data->ring.ring_fd = -1;
59
+ }
60
+ }
61
+
62
+ void IO_Event_Selector_URing_Type_free(void *_data)
63
+ {
64
+ struct IO_Event_Selector_URing *data = _data;
65
+
66
+ close_internal(data);
67
+
68
+ free(data);
69
+ }
70
+
71
+ size_t IO_Event_Selector_URing_Type_size(const void *data)
72
+ {
73
+ return sizeof(struct IO_Event_Selector_URing);
74
+ }
75
+
76
+ static const rb_data_type_t IO_Event_Selector_URing_Type = {
77
+ .wrap_struct_name = "IO_Event::Backend::URing",
78
+ .function = {
79
+ .dmark = IO_Event_Selector_URing_Type_mark,
80
+ .dfree = IO_Event_Selector_URing_Type_free,
81
+ .dsize = IO_Event_Selector_URing_Type_size,
82
+ },
83
+ .data = NULL,
84
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY,
85
+ };
86
+
87
+ VALUE IO_Event_Selector_URing_allocate(VALUE self) {
88
+ struct IO_Event_Selector_URing *data = NULL;
89
+ VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
90
+
91
+ IO_Event_Selector_initialize(&data->backend, Qnil);
92
+ data->ring.ring_fd = -1;
93
+
94
+ data->pending = 0;
95
+ data->blocked = 0;
96
+
97
+ return instance;
98
+ }
99
+
100
+ VALUE IO_Event_Selector_URing_initialize(VALUE self, VALUE loop) {
101
+ struct IO_Event_Selector_URing *data = NULL;
102
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
103
+
104
+ IO_Event_Selector_initialize(&data->backend, loop);
105
+ int result = io_uring_queue_init(URING_ENTRIES, &data->ring, 0);
106
+
107
+ if (result < 0) {
108
+ rb_syserr_fail(-result, "IO_Event_Selector_URing_initialize:io_uring_queue_init");
109
+ }
110
+
111
+ rb_update_max_fd(data->ring.ring_fd);
112
+
113
+ return self;
114
+ }
115
+
116
+ VALUE IO_Event_Selector_URing_loop(VALUE self) {
117
+ struct IO_Event_Selector_URing *data = NULL;
118
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
119
+
120
+ return data->backend.loop;
121
+ }
122
+
123
+ VALUE IO_Event_Selector_URing_close(VALUE self) {
124
+ struct IO_Event_Selector_URing *data = NULL;
125
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
126
+
127
+ close_internal(data);
128
+
129
+ return Qnil;
130
+ }
131
+
132
+ VALUE IO_Event_Selector_URing_transfer(VALUE self)
133
+ {
134
+ struct IO_Event_Selector_URing *data = NULL;
135
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
136
+
137
+ return IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
138
+ }
139
+
140
+ VALUE IO_Event_Selector_URing_resume(int argc, VALUE *argv, VALUE self)
141
+ {
142
+ struct IO_Event_Selector_URing *data = NULL;
143
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
144
+
145
+ return IO_Event_Selector_resume(&data->backend, argc, argv);
146
+ }
147
+
148
+ VALUE IO_Event_Selector_URing_yield(VALUE self)
149
+ {
150
+ struct IO_Event_Selector_URing *data = NULL;
151
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
152
+
153
+ return IO_Event_Selector_yield(&data->backend);
154
+ }
155
+
156
+ VALUE IO_Event_Selector_URing_push(VALUE self, VALUE fiber)
157
+ {
158
+ struct IO_Event_Selector_URing *data = NULL;
159
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
160
+
161
+ IO_Event_Selector_queue_push(&data->backend, fiber);
162
+
163
+ return Qnil;
164
+ }
165
+
166
+ VALUE IO_Event_Selector_URing_raise(int argc, VALUE *argv, VALUE self)
167
+ {
168
+ struct IO_Event_Selector_URing *data = NULL;
169
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
170
+
171
+ return IO_Event_Selector_raise(&data->backend, argc, argv);
172
+ }
173
+
174
+ int blocked;
175
+ VALUE IO_Event_Selector_URing_ready_p(VALUE self) {
176
+ struct IO_Event_Selector_URing *data = NULL;
177
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
178
+
179
+ return data->backend.ready ? Qtrue : Qfalse;
180
+ }
181
+
182
+ static
183
+ int io_uring_submit_flush(struct IO_Event_Selector_URing *data) {
184
+ if (data->pending) {
185
+ if (DEBUG) fprintf(stderr, "io_uring_submit_flush(pending=%ld)\n", data->pending);
186
+
187
+ // Try to submit:
188
+ int result = io_uring_submit(&data->ring);
189
+
190
+ if (result >= 0) {
191
+ // If it was submitted, reset pending count:
192
+ data->pending = 0;
193
+ } else if (result != -EBUSY && result != -EAGAIN) {
194
+ rb_syserr_fail(-result, "io_uring_submit_flush:io_uring_submit");
195
+ }
196
+
197
+ return result;
198
+ }
199
+
200
+ return 0;
201
+ }
202
+
203
+ static
204
+ int io_uring_submit_now(struct IO_Event_Selector_URing *data) {
205
+ while (true) {
206
+ int result = io_uring_submit(&data->ring);
207
+
208
+ if (result >= 0) {
209
+ data->pending = 0;
210
+ return result;
211
+ }
212
+
213
+ if (result == -EBUSY || result == -EAGAIN) {
214
+ IO_Event_Selector_yield(&data->backend);
215
+ } else {
216
+ rb_syserr_fail(-result, "io_uring_submit_now:io_uring_submit");
217
+ }
218
+ }
219
+ }
220
+
221
+ static
222
+ void io_uring_submit_pending(struct IO_Event_Selector_URing *data) {
223
+ data->pending += 1;
224
+ }
225
+
226
+ struct io_uring_sqe * io_get_sqe(struct IO_Event_Selector_URing *data) {
227
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&data->ring);
228
+
229
+ while (sqe == NULL) {
230
+ // The submit queue is full, we need to drain it:
231
+ io_uring_submit_now(data);
232
+
233
+ sqe = io_uring_get_sqe(&data->ring);
234
+ }
235
+
236
+ return sqe;
237
+ }
238
+
239
+ struct process_wait_arguments {
240
+ struct IO_Event_Selector_URing *data;
241
+ pid_t pid;
242
+ int flags;
243
+ int descriptor;
244
+ };
245
+
246
+ static
247
+ VALUE process_wait_transfer(VALUE _arguments) {
248
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
249
+
250
+ IO_Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
251
+
252
+ return IO_Event_Selector_process_status_wait(arguments->pid);
253
+ }
254
+
255
+ static
256
+ VALUE process_wait_ensure(VALUE _arguments) {
257
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
258
+
259
+ close(arguments->descriptor);
260
+
261
+ return Qnil;
262
+ }
263
+
264
+ VALUE IO_Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
265
+ struct IO_Event_Selector_URing *data = NULL;
266
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
267
+
268
+ struct process_wait_arguments process_wait_arguments = {
269
+ .data = data,
270
+ .pid = NUM2PIDT(pid),
271
+ .flags = NUM2INT(flags),
272
+ };
273
+
274
+ process_wait_arguments.descriptor = pidfd_open(process_wait_arguments.pid, 0);
275
+ rb_update_max_fd(process_wait_arguments.descriptor);
276
+
277
+ struct io_uring_sqe *sqe = io_get_sqe(data);
278
+
279
+ if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_process_wait:io_uring_prep_poll_add(%p)\n", (void*)fiber);
280
+ io_uring_prep_poll_add(sqe, process_wait_arguments.descriptor, POLLIN|POLLHUP|POLLERR);
281
+ io_uring_sqe_set_data(sqe, (void*)fiber);
282
+ io_uring_submit_pending(data);
283
+
284
+ return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
285
+ }
286
+
287
+ static inline
288
+ short poll_flags_from_events(int events) {
289
+ short flags = 0;
290
+
291
+ if (events & IO_EVENT_READABLE) flags |= POLLIN;
292
+ if (events & IO_EVENT_PRIORITY) flags |= POLLPRI;
293
+ if (events & IO_EVENT_WRITABLE) flags |= POLLOUT;
294
+
295
+ flags |= POLLERR;
296
+ flags |= POLLHUP;
297
+
298
+ return flags;
299
+ }
300
+
301
+ static inline
302
+ int events_from_poll_flags(short flags) {
303
+ int events = 0;
304
+
305
+ if (flags & POLLIN) events |= IO_EVENT_READABLE;
306
+ if (flags & POLLPRI) events |= IO_EVENT_PRIORITY;
307
+ if (flags & POLLOUT) events |= IO_EVENT_WRITABLE;
308
+
309
+ return events;
310
+ }
311
+
312
+ struct io_wait_arguments {
313
+ struct IO_Event_Selector_URing *data;
314
+ VALUE fiber;
315
+ short flags;
316
+ };
317
+
318
+ static
319
+ VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
320
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
321
+ struct IO_Event_Selector_URing *data = arguments->data;
322
+
323
+ struct io_uring_sqe *sqe = io_get_sqe(data);
324
+
325
+ if (DEBUG) fprintf(stderr, "io_wait_rescue:io_uring_prep_poll_remove(%p)\n", (void*)arguments->fiber);
326
+
327
+ io_uring_prep_poll_remove(sqe, (void*)arguments->fiber);
328
+ io_uring_submit_now(data);
329
+
330
+ rb_exc_raise(exception);
331
+ };
332
+
333
+ static
334
+ VALUE io_wait_transfer(VALUE _arguments) {
335
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
336
+ struct IO_Event_Selector_URing *data = arguments->data;
337
+
338
+ VALUE result = IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
339
+ if (DEBUG) fprintf(stderr, "io_wait:IO_Event_Selector_fiber_transfer -> %d\n", RB_NUM2INT(result));
340
+
341
+ if (!RTEST(result)) {
342
+ return Qfalse;
343
+ }
344
+
345
+ // We explicitly filter the resulting events based on the requested events.
346
+ // In some cases, poll will report events we didn't ask for.
347
+ short flags = arguments->flags & NUM2INT(result);
348
+
349
+ return INT2NUM(events_from_poll_flags(flags));
350
+ };
351
+
352
+ VALUE IO_Event_Selector_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
353
+ struct IO_Event_Selector_URing *data = NULL;
354
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
355
+
356
+ int descriptor = IO_Event_Selector_io_descriptor(io);
357
+ struct io_uring_sqe *sqe = io_get_sqe(data);
358
+
359
+ short flags = poll_flags_from_events(NUM2INT(events));
360
+
361
+ if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_io_wait:io_uring_prep_poll_add(descriptor=%d, flags=%d, fiber=%p)\n", descriptor, flags, (void*)fiber);
362
+
363
+ io_uring_prep_poll_add(sqe, descriptor, flags);
364
+ io_uring_sqe_set_data(sqe, (void*)fiber);
365
+
366
+ // If we are going to wait, we assume that we are waiting for a while:
367
+ io_uring_submit_pending(data);
368
+
369
+ struct io_wait_arguments io_wait_arguments = {
370
+ .data = data,
371
+ .fiber = fiber,
372
+ .flags = flags
373
+ };
374
+
375
+ return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
376
+ }
377
+
378
+ #ifdef HAVE_RUBY_IO_BUFFER_H
379
+
380
+ #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,16,0)
381
+ static inline off_t io_seekable(int descriptor) {
382
+ return -1;
383
+ }
384
+ #else
385
+ #warning Upgrade your kernel to 5.16! io_uring bugs prevent efficient io_read/io_write hooks.
386
+ static inline off_t io_seekable(int descriptor)
387
+ {
388
+ if (lseek(descriptor, 0, SEEK_CUR) == -1) {
389
+ return 0;
390
+ } else {
391
+ return -1;
392
+ }
393
+ }
394
+ #endif
395
+
396
+ static int io_read(struct IO_Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
397
+ struct io_uring_sqe *sqe = io_get_sqe(data);
398
+
399
+ if (DEBUG) fprintf(stderr, "io_read:io_uring_prep_read(fiber=%p)\n", (void*)fiber);
400
+
401
+ io_uring_prep_read(sqe, descriptor, buffer, length, io_seekable(descriptor));
402
+ io_uring_sqe_set_data(sqe, (void*)fiber);
403
+ io_uring_submit_now(data);
404
+
405
+ VALUE result = IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
406
+ if (DEBUG) fprintf(stderr, "io_read:IO_Event_Selector_fiber_transfer -> %d\n", RB_NUM2INT(result));
407
+
408
+ return RB_NUM2INT(result);
409
+ }
410
+
411
+ VALUE IO_Event_Selector_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
412
+ struct IO_Event_Selector_URing *data = NULL;
413
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
414
+
415
+ int descriptor = IO_Event_Selector_io_descriptor(io);
416
+
417
+ void *base;
418
+ size_t size;
419
+ rb_io_buffer_get_bytes_for_writing(buffer, &base, &size);
420
+
421
+ size_t offset = 0;
422
+ size_t length = NUM2SIZET(_length);
423
+
424
+ while (true) {
425
+ size_t maximum_size = size - offset;
426
+ if (DEBUG_IO_READ) fprintf(stderr, "io_read(%d, +%ld, %ld)\n", descriptor, offset, maximum_size);
427
+ int result = io_read(data, fiber, descriptor, (char*)base+offset, maximum_size);
428
+ if (DEBUG_IO_READ) fprintf(stderr, "io_read(%d, +%ld, %ld) -> %d\n", descriptor, offset, maximum_size, result);
429
+
430
+ if (result > 0) {
431
+ offset += result;
432
+ if ((size_t)result >= length) break;
433
+ length -= result;
434
+ } else if (result == 0) {
435
+ break;
436
+ } else if (length > 0 && IO_Event_try_again(-result)) {
437
+ IO_Event_Selector_URing_io_wait(self, fiber, io, RB_INT2NUM(IO_EVENT_READABLE));
438
+ } else {
439
+ return rb_fiber_scheduler_io_result(-1, -result);
440
+ }
441
+ }
442
+
443
+ return rb_fiber_scheduler_io_result(offset, 0);
444
+ }
445
+
446
+ static
447
+ int io_write(struct IO_Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
448
+ struct io_uring_sqe *sqe = io_get_sqe(data);
449
+
450
+ if (DEBUG) fprintf(stderr, "io_write:io_uring_prep_write(fiber=%p)\n", (void*)fiber);
451
+
452
+ io_uring_prep_write(sqe, descriptor, buffer, length, io_seekable(descriptor));
453
+ io_uring_sqe_set_data(sqe, (void*)fiber);
454
+ io_uring_submit_pending(data);
455
+
456
+ int result = RB_NUM2INT(IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL));
457
+ if (DEBUG) fprintf(stderr, "io_write:IO_Event_Selector_fiber_transfer -> %d\n", result);
458
+
459
+ return result;
460
+ }
461
+
462
+ VALUE IO_Event_Selector_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
463
+ struct IO_Event_Selector_URing *data = NULL;
464
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
465
+
466
+ int descriptor = IO_Event_Selector_io_descriptor(io);
467
+
468
+ const void *base;
469
+ size_t size;
470
+ rb_io_buffer_get_bytes_for_reading(buffer, &base, &size);
471
+
472
+ size_t offset = 0;
473
+ size_t length = NUM2SIZET(_length);
474
+
475
+ if (length > size) {
476
+ rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
477
+ }
478
+
479
+ while (true) {
480
+ size_t maximum_size = size - offset;
481
+ int result = io_write(data, fiber, descriptor, (char*)base+offset, maximum_size);
482
+
483
+ if (result > 0) {
484
+ offset += result;
485
+ if ((size_t)result >= length) break;
486
+ length -= result;
487
+ } else if (result == 0) {
488
+ break;
489
+ } else if (length > 0 && IO_Event_try_again(-result)) {
490
+ IO_Event_Selector_URing_io_wait(self, fiber, io, RB_INT2NUM(IO_EVENT_WRITABLE));
491
+ } else {
492
+ return rb_fiber_scheduler_io_result(-1, -result);
493
+ }
494
+ }
495
+
496
+ return rb_fiber_scheduler_io_result(offset, 0);
497
+ }
498
+
499
+ #endif
500
+
501
+ static const int ASYNC_CLOSE = 1;
502
+
503
+ VALUE IO_Event_Selector_URing_io_close(VALUE self, VALUE io) {
504
+ struct IO_Event_Selector_URing *data = NULL;
505
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
506
+
507
+ int descriptor = IO_Event_Selector_io_descriptor(io);
508
+
509
+ if (ASYNC_CLOSE) {
510
+ struct io_uring_sqe *sqe = io_get_sqe(data);
511
+
512
+ io_uring_prep_close(sqe, descriptor);
513
+ io_uring_sqe_set_data(sqe, NULL);
514
+ io_uring_submit_now(data);
515
+ } else {
516
+ close(descriptor);
517
+ }
518
+
519
+ // We don't wait for the result of close since it has no use in pratice:
520
+ return Qtrue;
521
+ }
522
+
523
+ static
524
+ struct __kernel_timespec * make_timeout(VALUE duration, struct __kernel_timespec *storage) {
525
+ if (duration == Qnil) {
526
+ return NULL;
527
+ }
528
+
529
+ if (FIXNUM_P(duration)) {
530
+ storage->tv_sec = NUM2TIMET(duration);
531
+ storage->tv_nsec = 0;
532
+
533
+ return storage;
534
+ }
535
+
536
+ else if (RB_FLOAT_TYPE_P(duration)) {
537
+ double value = RFLOAT_VALUE(duration);
538
+ time_t seconds = value;
539
+
540
+ storage->tv_sec = seconds;
541
+ storage->tv_nsec = (value - seconds) * 1000000000L;
542
+
543
+ return storage;
544
+ }
545
+
546
+ rb_raise(rb_eRuntimeError, "unable to convert timeout");
547
+ }
548
+
549
+ static
550
+ int timeout_nonblocking(struct __kernel_timespec *timespec) {
551
+ return timespec && timespec->tv_sec == 0 && timespec->tv_nsec == 0;
552
+ }
553
+
554
+ struct select_arguments {
555
+ struct IO_Event_Selector_URing *data;
556
+
557
+ int result;
558
+
559
+ struct __kernel_timespec storage;
560
+ struct __kernel_timespec *timeout;
561
+ };
562
+
563
+ static
564
+ void * select_internal(void *_arguments) {
565
+ struct select_arguments * arguments = (struct select_arguments *)_arguments;
566
+ struct io_uring_cqe *cqe = NULL;
567
+
568
+ arguments->result = io_uring_wait_cqe_timeout(&arguments->data->ring, &cqe, arguments->timeout);
569
+
570
+ return NULL;
571
+ }
572
+
573
+ static
574
+ int select_internal_without_gvl(struct select_arguments *arguments) {
575
+ io_uring_submit_flush(arguments->data);
576
+
577
+ arguments->data->blocked = 1;
578
+ rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
579
+ arguments->data->blocked = 0;
580
+
581
+ if (arguments->result == -ETIME) {
582
+ arguments->result = 0;
583
+ } else if (arguments->result == -EINTR) {
584
+ arguments->result = 0;
585
+ } else if (arguments->result < 0) {
586
+ rb_syserr_fail(-arguments->result, "select_internal_without_gvl:io_uring_wait_cqe_timeout");
587
+ } else {
588
+ // At least 1 event is waiting:
589
+ arguments->result = 1;
590
+ }
591
+
592
+ return arguments->result;
593
+ }
594
+
595
+ static inline
596
+ unsigned select_process_completions(struct io_uring *ring) {
597
+ unsigned completed = 0;
598
+ unsigned head;
599
+ struct io_uring_cqe *cqe;
600
+
601
+ io_uring_for_each_cqe(ring, head, cqe) {
602
+ ++completed;
603
+
604
+ // If the operation was cancelled, or the operation has no user data (fiber):
605
+ if (cqe->res == -ECANCELED || cqe->user_data == 0 || cqe->user_data == LIBURING_UDATA_TIMEOUT) {
606
+ io_uring_cq_advance(ring, 1);
607
+ continue;
608
+ }
609
+
610
+ VALUE fiber = (VALUE)cqe->user_data;
611
+ VALUE result = RB_INT2NUM(cqe->res);
612
+
613
+ if (DEBUG) fprintf(stderr, "cqe res=%d user_data=%p\n", cqe->res, (void*)cqe->user_data);
614
+
615
+ io_uring_cq_advance(ring, 1);
616
+
617
+ IO_Event_Selector_fiber_transfer(fiber, 1, &result);
618
+ }
619
+
620
+ // io_uring_cq_advance(ring, completed);
621
+
622
+ if (DEBUG) fprintf(stderr, "select_process_completions(completed=%d)\n", completed);
623
+
624
+ return completed;
625
+ }
626
+
627
+ VALUE IO_Event_Selector_URing_select(VALUE self, VALUE duration) {
628
+ struct IO_Event_Selector_URing *data = NULL;
629
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
630
+
631
+ int ready = IO_Event_Selector_queue_flush(&data->backend);
632
+
633
+ int result = select_process_completions(&data->ring);
634
+
635
+ // If we:
636
+ // 1. Didn't process any ready fibers, and
637
+ // 2. Didn't process any events from non-blocking select (above), and
638
+ // 3. There are no items in the ready list,
639
+ // then we can perform a blocking select.
640
+ if (!ready && !result && !data->backend.ready) {
641
+ // We might need to wait for events:
642
+ struct select_arguments arguments = {
643
+ .data = data,
644
+ .timeout = NULL,
645
+ };
646
+
647
+ arguments.timeout = make_timeout(duration, &arguments.storage);
648
+
649
+ if (!data->backend.ready && !timeout_nonblocking(arguments.timeout)) {
650
+ // This is a blocking operation, we wait for events:
651
+ result = select_internal_without_gvl(&arguments);
652
+ } else {
653
+ // The timeout specified required "nonblocking" behaviour so we just flush the SQ if required:
654
+ io_uring_submit_flush(data);
655
+ }
656
+
657
+ // After waiting/flushing the SQ, check if there are any completions:
658
+ result = select_process_completions(&data->ring);
659
+ }
660
+
661
+ return RB_INT2NUM(result);
662
+ }
663
+
664
+ VALUE IO_Event_Selector_URing_wakeup(VALUE self) {
665
+ struct IO_Event_Selector_URing *data = NULL;
666
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
667
+
668
+ // If we are blocking, we can schedule a nop event to wake up the selector:
669
+ if (data->blocked) {
670
+ struct io_uring_sqe *sqe = NULL;
671
+
672
+ while (true) {
673
+ sqe = io_uring_get_sqe(&data->ring);
674
+ if (sqe) break;
675
+
676
+ rb_thread_schedule();
677
+
678
+ // It's possible we became unblocked already, so we can assume the selector has already cycled at least once:
679
+ if (!data->blocked) return Qfalse;
680
+ }
681
+
682
+ io_uring_prep_nop(sqe);
683
+ io_uring_submit(&data->ring);
684
+
685
+ return Qtrue;
686
+ }
687
+
688
+ return Qfalse;
689
+ }
690
+
691
+ void Init_IO_Event_Selector_URing(VALUE IO_Event_Selector) {
692
+ IO_Event_Selector_URing = rb_define_class_under(IO_Event_Selector, "URing", rb_cObject);
693
+ rb_gc_register_mark_object(IO_Event_Selector_URing);
694
+
695
+ rb_define_alloc_func(IO_Event_Selector_URing, IO_Event_Selector_URing_allocate);
696
+ rb_define_method(IO_Event_Selector_URing, "initialize", IO_Event_Selector_URing_initialize, 1);
697
+
698
+ rb_define_method(IO_Event_Selector_URing, "loop", IO_Event_Selector_URing_loop, 0);
699
+
700
+ rb_define_method(IO_Event_Selector_URing, "transfer", IO_Event_Selector_URing_transfer, 0);
701
+ rb_define_method(IO_Event_Selector_URing, "resume", IO_Event_Selector_URing_resume, -1);
702
+ rb_define_method(IO_Event_Selector_URing, "yield", IO_Event_Selector_URing_yield, 0);
703
+ rb_define_method(IO_Event_Selector_URing, "push", IO_Event_Selector_URing_push, 1);
704
+ rb_define_method(IO_Event_Selector_URing, "raise", IO_Event_Selector_URing_raise, -1);
705
+
706
+ rb_define_method(IO_Event_Selector_URing, "ready?", IO_Event_Selector_URing_ready_p, 0);
707
+
708
+ rb_define_method(IO_Event_Selector_URing, "select", IO_Event_Selector_URing_select, 1);
709
+ rb_define_method(IO_Event_Selector_URing, "wakeup", IO_Event_Selector_URing_wakeup, 0);
710
+ rb_define_method(IO_Event_Selector_URing, "close", IO_Event_Selector_URing_close, 0);
711
+
712
+ rb_define_method(IO_Event_Selector_URing, "io_wait", IO_Event_Selector_URing_io_wait, 3);
713
+
714
+ #ifdef HAVE_RUBY_IO_BUFFER_H
715
+ rb_define_method(IO_Event_Selector_URing, "io_read", IO_Event_Selector_URing_io_read, 4);
716
+ rb_define_method(IO_Event_Selector_URing, "io_write", IO_Event_Selector_URing_io_write, 4);
717
+ #endif
718
+
719
+ rb_define_method(IO_Event_Selector_URing, "io_close", IO_Event_Selector_URing_io_close, 1);
720
+
721
+ rb_define_method(IO_Event_Selector_URing, "process_wait", IO_Event_Selector_URing_process_wait, 3);
722
+ }