io-event 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,644 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include "uring.h"
22
+ #include "selector.h"
23
+
24
+ #include <liburing.h>
25
+ #include <poll.h>
26
+ #include <time.h>
27
+
28
+ #include "pidfd.c"
29
+
30
+ static const int DEBUG = 0;
31
+
32
+ static VALUE IO_Event_Selector_URing = Qnil;
33
+
34
+ enum {URING_ENTRIES = 64};
35
+
36
+ struct IO_Event_Selector_URing {
37
+ struct IO_Event_Selector backend;
38
+ struct io_uring ring;
39
+ size_t pending;
40
+ };
41
+
42
+ void IO_Event_Selector_URing_Type_mark(void *_data)
43
+ {
44
+ struct IO_Event_Selector_URing *data = _data;
45
+ IO_Event_Selector_mark(&data->backend);
46
+ }
47
+
48
+ static
49
+ void close_internal(struct IO_Event_Selector_URing *data) {
50
+ if (data->ring.ring_fd >= 0) {
51
+ io_uring_queue_exit(&data->ring);
52
+ data->ring.ring_fd = -1;
53
+ }
54
+ }
55
+
56
+ void IO_Event_Selector_URing_Type_free(void *_data)
57
+ {
58
+ struct IO_Event_Selector_URing *data = _data;
59
+
60
+ close_internal(data);
61
+
62
+ free(data);
63
+ }
64
+
65
+ size_t IO_Event_Selector_URing_Type_size(const void *data)
66
+ {
67
+ return sizeof(struct IO_Event_Selector_URing);
68
+ }
69
+
70
+ static const rb_data_type_t IO_Event_Selector_URing_Type = {
71
+ .wrap_struct_name = "IO_Event::Backend::URing",
72
+ .function = {
73
+ .dmark = IO_Event_Selector_URing_Type_mark,
74
+ .dfree = IO_Event_Selector_URing_Type_free,
75
+ .dsize = IO_Event_Selector_URing_Type_size,
76
+ },
77
+ .data = NULL,
78
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY,
79
+ };
80
+
81
+ VALUE IO_Event_Selector_URing_allocate(VALUE self) {
82
+ struct IO_Event_Selector_URing *data = NULL;
83
+ VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
84
+
85
+ IO_Event_Selector_initialize(&data->backend, Qnil);
86
+ data->ring.ring_fd = -1;
87
+
88
+ data->pending = 0;
89
+
90
+ return instance;
91
+ }
92
+
93
+ VALUE IO_Event_Selector_URing_initialize(VALUE self, VALUE loop) {
94
+ struct IO_Event_Selector_URing *data = NULL;
95
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
96
+
97
+ IO_Event_Selector_initialize(&data->backend, loop);
98
+ int result = io_uring_queue_init(URING_ENTRIES, &data->ring, 0);
99
+
100
+ if (result < 0) {
101
+ rb_syserr_fail(-result, "io_uring_queue_init");
102
+ }
103
+
104
+ rb_update_max_fd(data->ring.ring_fd);
105
+
106
+ return self;
107
+ }
108
+
109
+ VALUE IO_Event_Selector_URing_close(VALUE self) {
110
+ struct IO_Event_Selector_URing *data = NULL;
111
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
112
+
113
+ close_internal(data);
114
+
115
+ return Qnil;
116
+ }
117
+
118
+ VALUE IO_Event_Selector_URing_transfer(VALUE self)
119
+ {
120
+ struct IO_Event_Selector_URing *data = NULL;
121
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
122
+
123
+ return IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
124
+ }
125
+
126
+ VALUE IO_Event_Selector_URing_resume(int argc, VALUE *argv, VALUE self)
127
+ {
128
+ struct IO_Event_Selector_URing *data = NULL;
129
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
130
+
131
+ return IO_Event_Selector_resume(&data->backend, argc, argv);
132
+ }
133
+
134
+ VALUE IO_Event_Selector_URing_yield(VALUE self)
135
+ {
136
+ struct IO_Event_Selector_URing *data = NULL;
137
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
138
+
139
+ return IO_Event_Selector_yield(&data->backend);
140
+ }
141
+
142
+ VALUE IO_Event_Selector_URing_push(VALUE self, VALUE fiber)
143
+ {
144
+ struct IO_Event_Selector_URing *data = NULL;
145
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
146
+
147
+ IO_Event_Selector_queue_push(&data->backend, fiber);
148
+
149
+ return Qnil;
150
+ }
151
+
152
+ VALUE IO_Event_Selector_URing_raise(int argc, VALUE *argv, VALUE self)
153
+ {
154
+ struct IO_Event_Selector_URing *data = NULL;
155
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
156
+
157
+ return IO_Event_Selector_raise(&data->backend, argc, argv);
158
+ }
159
+
160
+ VALUE IO_Event_Selector_URing_ready_p(VALUE self) {
161
+ struct IO_Event_Selector_URing *data = NULL;
162
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
163
+
164
+ return data->backend.ready ? Qtrue : Qfalse;
165
+ }
166
+
167
+ static
168
+ int io_uring_submit_flush(struct IO_Event_Selector_URing *data) {
169
+ if (data->pending) {
170
+ if (DEBUG) fprintf(stderr, "io_uring_submit_flush(pending=%ld)\n", data->pending);
171
+
172
+ // Try to submit:
173
+ int result = io_uring_submit(&data->ring);
174
+
175
+ if (result >= 0) {
176
+ // If it was submitted, reset pending count:
177
+ data->pending = 0;
178
+ } else if (result != -EBUSY && result != -EAGAIN) {
179
+ rb_syserr_fail(-result, "io_uring_submit_flush");
180
+ }
181
+
182
+ return result;
183
+ }
184
+
185
+ return 0;
186
+ }
187
+
188
+ static
189
+ int io_uring_submit_now(struct IO_Event_Selector_URing *data) {
190
+ while (true) {
191
+ int result = io_uring_submit(&data->ring);
192
+
193
+ if (result >= 0) {
194
+ data->pending = 0;
195
+ return result;
196
+ }
197
+
198
+ if (result == -EBUSY || result == -EAGAIN) {
199
+ IO_Event_Selector_yield(&data->backend);
200
+ } else {
201
+ rb_syserr_fail(-result, "io_uring_submit_now");
202
+ }
203
+ }
204
+ }
205
+
206
+ static
207
+ void io_uring_submit_pending(struct IO_Event_Selector_URing *data) {
208
+ data->pending += 1;
209
+ }
210
+
211
+ struct io_uring_sqe * io_get_sqe(struct IO_Event_Selector_URing *data) {
212
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&data->ring);
213
+
214
+ while (sqe == NULL) {
215
+ // The submit queue is full, we need to drain it:
216
+ io_uring_submit_now(data);
217
+
218
+ sqe = io_uring_get_sqe(&data->ring);
219
+ }
220
+
221
+ return sqe;
222
+ }
223
+
224
+ struct process_wait_arguments {
225
+ struct IO_Event_Selector_URing *data;
226
+ pid_t pid;
227
+ int flags;
228
+ int descriptor;
229
+ };
230
+
231
+ static
232
+ VALUE process_wait_transfer(VALUE _arguments) {
233
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
234
+
235
+ IO_Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
236
+
237
+ return IO_Event_Selector_process_status_wait(arguments->pid);
238
+ }
239
+
240
+ static
241
+ VALUE process_wait_ensure(VALUE _arguments) {
242
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
243
+
244
+ close(arguments->descriptor);
245
+
246
+ return Qnil;
247
+ }
248
+
249
+ VALUE IO_Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
250
+ struct IO_Event_Selector_URing *data = NULL;
251
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
252
+
253
+ struct process_wait_arguments process_wait_arguments = {
254
+ .data = data,
255
+ .pid = NUM2PIDT(pid),
256
+ .flags = NUM2INT(flags),
257
+ };
258
+
259
+ process_wait_arguments.descriptor = pidfd_open(process_wait_arguments.pid, 0);
260
+ rb_update_max_fd(process_wait_arguments.descriptor);
261
+
262
+ struct io_uring_sqe *sqe = io_get_sqe(data);
263
+
264
+ if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_process_wait:io_uring_prep_poll_add(%p)\n", (void*)fiber);
265
+ io_uring_prep_poll_add(sqe, process_wait_arguments.descriptor, POLLIN|POLLHUP|POLLERR);
266
+ io_uring_sqe_set_data(sqe, (void*)fiber);
267
+ io_uring_submit_pending(data);
268
+
269
+ return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
270
+ }
271
+
272
+ static inline
273
+ short poll_flags_from_events(int events) {
274
+ short flags = 0;
275
+
276
+ if (events & IO_EVENT_READABLE) flags |= POLLIN;
277
+ if (events & IO_EVENT_PRIORITY) flags |= POLLPRI;
278
+ if (events & IO_EVENT_WRITABLE) flags |= POLLOUT;
279
+
280
+ flags |= POLLERR;
281
+ flags |= POLLHUP;
282
+
283
+ return flags;
284
+ }
285
+
286
+ static inline
287
+ int events_from_poll_flags(short flags) {
288
+ int events = 0;
289
+
290
+ if (flags & POLLIN) events |= IO_EVENT_READABLE;
291
+ if (flags & POLLPRI) events |= IO_EVENT_PRIORITY;
292
+ if (flags & POLLOUT) events |= IO_EVENT_WRITABLE;
293
+
294
+ return events;
295
+ }
296
+
297
+ struct io_wait_arguments {
298
+ struct IO_Event_Selector_URing *data;
299
+ VALUE fiber;
300
+ short flags;
301
+ };
302
+
303
+ static
304
+ VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
305
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
306
+ struct IO_Event_Selector_URing *data = arguments->data;
307
+
308
+ struct io_uring_sqe *sqe = io_get_sqe(data);
309
+
310
+ if (DEBUG) fprintf(stderr, "io_wait_rescue:io_uring_prep_poll_remove(%p)\n", (void*)arguments->fiber);
311
+
312
+ io_uring_prep_poll_remove(sqe, (void*)arguments->fiber);
313
+ io_uring_submit_now(data);
314
+
315
+ rb_exc_raise(exception);
316
+ };
317
+
318
+ static
319
+ VALUE io_wait_transfer(VALUE _arguments) {
320
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
321
+ struct IO_Event_Selector_URing *data = arguments->data;
322
+
323
+ VALUE result = IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
324
+ if (DEBUG) fprintf(stderr, "io_wait:IO_Event_Selector_fiber_transfer -> %d\n", RB_NUM2INT(result));
325
+
326
+ // We explicitly filter the resulting events based on the requested events.
327
+ // In some cases, poll will report events we didn't ask for.
328
+ short flags = arguments->flags & NUM2INT(result);
329
+
330
+ return INT2NUM(events_from_poll_flags(flags));
331
+ };
332
+
333
+ VALUE IO_Event_Selector_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
334
+ struct IO_Event_Selector_URing *data = NULL;
335
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
336
+
337
+ int descriptor = IO_Event_Selector_io_descriptor(io);
338
+ struct io_uring_sqe *sqe = io_get_sqe(data);
339
+
340
+ short flags = poll_flags_from_events(NUM2INT(events));
341
+
342
+ if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_io_wait:io_uring_prep_poll_add(descriptor=%d, flags=%d, fiber=%p)\n", descriptor, flags, (void*)fiber);
343
+
344
+ io_uring_prep_poll_add(sqe, descriptor, flags);
345
+ io_uring_sqe_set_data(sqe, (void*)fiber);
346
+
347
+ // If we are going to wait, we assume that we are waiting for a while:
348
+ io_uring_submit_pending(data);
349
+
350
+ struct io_wait_arguments io_wait_arguments = {
351
+ .data = data,
352
+ .fiber = fiber,
353
+ .flags = flags
354
+ };
355
+
356
+ return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
357
+ }
358
+
359
+ #ifdef HAVE_RUBY_IO_BUFFER_H
360
+
361
+ static int io_read(struct IO_Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
362
+ struct io_uring_sqe *sqe = io_get_sqe(data);
363
+
364
+ if (DEBUG) fprintf(stderr, "io_read:io_uring_prep_read(fiber=%p)\n", (void*)fiber);
365
+
366
+ io_uring_prep_read(sqe, descriptor, buffer, length, 0);
367
+ io_uring_sqe_set_data(sqe, (void*)fiber);
368
+ io_uring_submit_now(data);
369
+
370
+ VALUE result = IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
371
+ if (DEBUG) fprintf(stderr, "io_read:IO_Event_Selector_fiber_transfer -> %d\n", RB_NUM2INT(result));
372
+
373
+ return RB_NUM2INT(result);
374
+ }
375
+
376
+ VALUE IO_Event_Selector_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
377
+ struct IO_Event_Selector_URing *data = NULL;
378
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
379
+
380
+ int descriptor = IO_Event_Selector_io_descriptor(io);
381
+
382
+ void *base;
383
+ size_t size;
384
+ rb_io_buffer_get_mutable(buffer, &base, &size);
385
+
386
+ size_t offset = 0;
387
+ size_t length = NUM2SIZET(_length);
388
+
389
+ while (length > 0) {
390
+ size_t maximum_size = size - offset;
391
+ int result = io_read(data, fiber, descriptor, (char*)base+offset, maximum_size);
392
+
393
+ if (result == 0) {
394
+ break;
395
+ } else if (result > 0) {
396
+ offset += result;
397
+ if ((size_t)result >= length) break;
398
+ length -= result;
399
+ } else if (-result == EAGAIN || -result == EWOULDBLOCK) {
400
+ IO_Event_Selector_URing_io_wait(self, fiber, io, RB_INT2NUM(IO_EVENT_READABLE));
401
+ } else {
402
+ rb_syserr_fail(-result, strerror(-result));
403
+ }
404
+ }
405
+
406
+ return SIZET2NUM(offset);
407
+ }
408
+
409
+ static
410
+ int io_write(struct IO_Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
411
+ struct io_uring_sqe *sqe = io_get_sqe(data);
412
+
413
+ if (DEBUG) fprintf(stderr, "io_write:io_uring_prep_write(fiber=%p)\n", (void*)fiber);
414
+
415
+ io_uring_prep_write(sqe, descriptor, buffer, length, 0);
416
+ io_uring_sqe_set_data(sqe, (void*)fiber);
417
+ io_uring_submit_pending(data);
418
+
419
+ int result = RB_NUM2INT(IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL));
420
+ if (DEBUG) fprintf(stderr, "io_write:IO_Event_Selector_fiber_transfer -> %d\n", result);
421
+
422
+ return result;
423
+ }
424
+
425
+ VALUE IO_Event_Selector_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
426
+ struct IO_Event_Selector_URing *data = NULL;
427
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
428
+
429
+ int descriptor = IO_Event_Selector_io_descriptor(io);
430
+
431
+ const void *base;
432
+ size_t size;
433
+ rb_io_buffer_get_immutable(buffer, &base, &size);
434
+
435
+ size_t offset = 0;
436
+ size_t length = NUM2SIZET(_length);
437
+
438
+ if (length > size) {
439
+ rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
440
+ }
441
+
442
+ while (length > 0) {
443
+ int result = io_write(data, fiber, descriptor, (char*)base+offset, length);
444
+
445
+ if (result >= 0) {
446
+ offset += result;
447
+ if ((size_t)result >= length) break;
448
+ length -= result;
449
+ } else if (-result == EAGAIN || -result == EWOULDBLOCK) {
450
+ IO_Event_Selector_URing_io_wait(self, fiber, io, RB_INT2NUM(IO_EVENT_WRITABLE));
451
+ } else {
452
+ rb_syserr_fail(-result, strerror(-result));
453
+ }
454
+ }
455
+
456
+ return SIZET2NUM(offset);
457
+ }
458
+
459
+ #endif
460
+
461
+ static const int ASYNC_CLOSE = 1;
462
+
463
+ VALUE IO_Event_Selector_URing_io_close(VALUE self, VALUE io) {
464
+ struct IO_Event_Selector_URing *data = NULL;
465
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
466
+
467
+ int descriptor = IO_Event_Selector_io_descriptor(io);
468
+
469
+ if (ASYNC_CLOSE) {
470
+ struct io_uring_sqe *sqe = io_get_sqe(data);
471
+
472
+ io_uring_prep_close(sqe, descriptor);
473
+ io_uring_sqe_set_data(sqe, NULL);
474
+ io_uring_submit_now(data);
475
+ } else {
476
+ close(descriptor);
477
+ }
478
+
479
+ // We don't wait for the result of close since it has no use in pratice:
480
+ return Qtrue;
481
+ }
482
+
483
+ static
484
+ struct __kernel_timespec * make_timeout(VALUE duration, struct __kernel_timespec *storage) {
485
+ if (duration == Qnil) {
486
+ return NULL;
487
+ }
488
+
489
+ if (FIXNUM_P(duration)) {
490
+ storage->tv_sec = NUM2TIMET(duration);
491
+ storage->tv_nsec = 0;
492
+
493
+ return storage;
494
+ }
495
+
496
+ else if (RB_FLOAT_TYPE_P(duration)) {
497
+ double value = RFLOAT_VALUE(duration);
498
+ time_t seconds = value;
499
+
500
+ storage->tv_sec = seconds;
501
+ storage->tv_nsec = (value - seconds) * 1000000000L;
502
+
503
+ return storage;
504
+ }
505
+
506
+ rb_raise(rb_eRuntimeError, "unable to convert timeout");
507
+ }
508
+
509
+ static
510
+ int timeout_nonblocking(struct __kernel_timespec *timespec) {
511
+ return timespec && timespec->tv_sec == 0 && timespec->tv_nsec == 0;
512
+ }
513
+
514
+ struct select_arguments {
515
+ struct IO_Event_Selector_URing *data;
516
+
517
+ int result;
518
+
519
+ struct __kernel_timespec storage;
520
+ struct __kernel_timespec *timeout;
521
+ };
522
+
523
+ static
524
+ void * select_internal(void *_arguments) {
525
+ struct select_arguments * arguments = (struct select_arguments *)_arguments;
526
+
527
+ io_uring_submit_flush(arguments->data);
528
+
529
+ struct io_uring_cqe *cqe = NULL;
530
+ arguments->result = io_uring_wait_cqe_timeout(&arguments->data->ring, &cqe, arguments->timeout);
531
+
532
+ return NULL;
533
+ }
534
+
535
+ static
536
+ int select_internal_without_gvl(struct select_arguments *arguments) {
537
+ rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
538
+
539
+ if (arguments->result == -ETIME) {
540
+ arguments->result = 0;
541
+ } else if (arguments->result < 0) {
542
+ rb_syserr_fail(-arguments->result, "select_internal_without_gvl:io_uring_wait_cqes");
543
+ } else {
544
+ // At least 1 event is waiting:
545
+ arguments->result = 1;
546
+ }
547
+
548
+ return arguments->result;
549
+ }
550
+
551
+ static inline
552
+ unsigned select_process_completions(struct io_uring *ring) {
553
+ unsigned completed = 0;
554
+ unsigned head;
555
+ struct io_uring_cqe *cqe;
556
+
557
+ io_uring_for_each_cqe(ring, head, cqe) {
558
+ ++completed;
559
+
560
+ // If the operation was cancelled, or the operation has no user data (fiber):
561
+ if (cqe->res == -ECANCELED || cqe->user_data == 0 || cqe->user_data == LIBURING_UDATA_TIMEOUT) {
562
+ io_uring_cq_advance(ring, 1);
563
+ continue;
564
+ }
565
+
566
+ VALUE fiber = (VALUE)cqe->user_data;
567
+ VALUE result = RB_INT2NUM(cqe->res);
568
+
569
+ if (DEBUG) fprintf(stderr, "cqe res=%d user_data=%p\n", cqe->res, (void*)cqe->user_data);
570
+
571
+ io_uring_cq_advance(ring, 1);
572
+
573
+ IO_Event_Selector_fiber_transfer(fiber, 1, &result);
574
+ }
575
+
576
+ // io_uring_cq_advance(ring, completed);
577
+
578
+ if (DEBUG) fprintf(stderr, "select_process_completions(completed=%d)\n", completed);
579
+
580
+ return completed;
581
+ }
582
+
583
+ VALUE IO_Event_Selector_URing_select(VALUE self, VALUE duration) {
584
+ struct IO_Event_Selector_URing *data = NULL;
585
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
586
+
587
+ int ready = IO_Event_Selector_queue_flush(&data->backend);
588
+
589
+ int result = select_process_completions(&data->ring);
590
+
591
+ // If the ready list was empty and we didn't process any completions:
592
+ if (!ready && result == 0) {
593
+ // We might need to wait for events:
594
+ struct select_arguments arguments = {
595
+ .data = data,
596
+ .timeout = NULL,
597
+ };
598
+
599
+ arguments.timeout = make_timeout(duration, &arguments.storage);
600
+
601
+ if (!data->backend.ready && !timeout_nonblocking(arguments.timeout)) {
602
+ // This is a blocking operation, we wait for events:
603
+ result = select_internal_without_gvl(&arguments);
604
+ } else {
605
+ // The timeout specified required "nonblocking" behaviour so we just flush the SQ if required:
606
+ io_uring_submit_flush(data);
607
+ }
608
+
609
+ // After waiting/flushing the SQ, check if there are any completions:
610
+ result = select_process_completions(&data->ring);
611
+ }
612
+
613
+ return RB_INT2NUM(result);
614
+ }
615
+
616
+ void Init_IO_Event_Selector_URing(VALUE IO_Event_Selector) {
617
+ IO_Event_Selector_URing = rb_define_class_under(IO_Event_Selector, "URing", rb_cObject);
618
+ rb_gc_register_mark_object(IO_Event_Selector_URing);
619
+
620
+ rb_define_alloc_func(IO_Event_Selector_URing, IO_Event_Selector_URing_allocate);
621
+ rb_define_method(IO_Event_Selector_URing, "initialize", IO_Event_Selector_URing_initialize, 1);
622
+
623
+ rb_define_method(IO_Event_Selector_URing, "transfer", IO_Event_Selector_URing_transfer, 0);
624
+ rb_define_method(IO_Event_Selector_URing, "resume", IO_Event_Selector_URing_resume, -1);
625
+ rb_define_method(IO_Event_Selector_URing, "yield", IO_Event_Selector_URing_yield, 0);
626
+ rb_define_method(IO_Event_Selector_URing, "push", IO_Event_Selector_URing_push, 1);
627
+ rb_define_method(IO_Event_Selector_URing, "raise", IO_Event_Selector_URing_raise, -1);
628
+
629
+ rb_define_method(IO_Event_Selector_URing, "ready?", IO_Event_Selector_URing_ready_p, 0);
630
+
631
+ rb_define_method(IO_Event_Selector_URing, "select", IO_Event_Selector_URing_select, 1);
632
+ rb_define_method(IO_Event_Selector_URing, "close", IO_Event_Selector_URing_close, 0);
633
+
634
+ rb_define_method(IO_Event_Selector_URing, "io_wait", IO_Event_Selector_URing_io_wait, 3);
635
+
636
+ #ifdef HAVE_RUBY_IO_BUFFER_H
637
+ rb_define_method(IO_Event_Selector_URing, "io_read", IO_Event_Selector_URing_io_read, 4);
638
+ rb_define_method(IO_Event_Selector_URing, "io_write", IO_Event_Selector_URing_io_write, 4);
639
+ #endif
640
+
641
+ rb_define_method(IO_Event_Selector_URing, "io_close", IO_Event_Selector_URing_io_close, 1);
642
+
643
+ rb_define_method(IO_Event_Selector_URing, "process_wait", IO_Event_Selector_URing_process_wait, 3);
644
+ }
@@ -0,0 +1,28 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #pragma once
22
+
23
+ #include <ruby.h>
24
+
25
+ #define IO_EVENT_SELECTOR_URING
26
+
27
+ void Init_IO_Event_Selector_URing(VALUE IO_Event_Selector);
28
+ VALUE IO_Event_Selector_URing_select(VALUE self, VALUE duration);
data/ext/kqueue.o ADDED
Binary file