io-event 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,652 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include "kqueue.h"
22
+ #include "selector.h"
23
+
24
+ #include <sys/event.h>
25
+ #include <sys/ioctl.h>
26
+ #include <time.h>
27
+ #include <errno.h>
28
+
29
+ static VALUE IO_Event_Selector_KQueue = Qnil;
30
+
31
+ enum {KQUEUE_MAX_EVENTS = 64};
32
+
33
+ struct IO_Event_Selector_KQueue {
34
+ struct IO_Event_Selector backend;
35
+ int descriptor;
36
+ };
37
+
38
+ void IO_Event_Selector_KQueue_Type_mark(void *_data)
39
+ {
40
+ struct IO_Event_Selector_KQueue *data = _data;
41
+ IO_Event_Selector_mark(&data->backend);
42
+ }
43
+
44
+ static
45
+ void close_internal(struct IO_Event_Selector_KQueue *data) {
46
+ if (data->descriptor >= 0) {
47
+ close(data->descriptor);
48
+ data->descriptor = -1;
49
+ }
50
+ }
51
+
52
+ void IO_Event_Selector_KQueue_Type_free(void *_data)
53
+ {
54
+ struct IO_Event_Selector_KQueue *data = _data;
55
+
56
+ close_internal(data);
57
+
58
+ free(data);
59
+ }
60
+
61
+ size_t IO_Event_Selector_KQueue_Type_size(const void *data)
62
+ {
63
+ return sizeof(struct IO_Event_Selector_KQueue);
64
+ }
65
+
66
+ static const rb_data_type_t IO_Event_Selector_KQueue_Type = {
67
+ .wrap_struct_name = "IO_Event::Backend::KQueue",
68
+ .function = {
69
+ .dmark = IO_Event_Selector_KQueue_Type_mark,
70
+ .dfree = IO_Event_Selector_KQueue_Type_free,
71
+ .dsize = IO_Event_Selector_KQueue_Type_size,
72
+ },
73
+ .data = NULL,
74
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY,
75
+ };
76
+
77
+ VALUE IO_Event_Selector_KQueue_allocate(VALUE self) {
78
+ struct IO_Event_Selector_KQueue *data = NULL;
79
+ VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
80
+
81
+ IO_Event_Selector_initialize(&data->backend, Qnil);
82
+ data->descriptor = -1;
83
+
84
+ return instance;
85
+ }
86
+
87
+ VALUE IO_Event_Selector_KQueue_initialize(VALUE self, VALUE loop) {
88
+ struct IO_Event_Selector_KQueue *data = NULL;
89
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
90
+
91
+ IO_Event_Selector_initialize(&data->backend, loop);
92
+ int result = kqueue();
93
+
94
+ if (result == -1) {
95
+ rb_sys_fail("kqueue");
96
+ } else {
97
+ ioctl(result, FIOCLEX);
98
+ data->descriptor = result;
99
+
100
+ rb_update_max_fd(data->descriptor);
101
+ }
102
+
103
+ return self;
104
+ }
105
+
106
+ VALUE IO_Event_Selector_KQueue_close(VALUE self) {
107
+ struct IO_Event_Selector_KQueue *data = NULL;
108
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
109
+
110
+ close_internal(data);
111
+
112
+ return Qnil;
113
+ }
114
+
115
+ VALUE IO_Event_Selector_KQueue_transfer(VALUE self)
116
+ {
117
+ struct IO_Event_Selector_KQueue *data = NULL;
118
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
119
+
120
+ return IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
121
+ }
122
+
123
+ VALUE IO_Event_Selector_KQueue_resume(int argc, VALUE *argv, VALUE self)
124
+ {
125
+ struct IO_Event_Selector_KQueue *data = NULL;
126
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
127
+
128
+ return IO_Event_Selector_resume(&data->backend, argc, argv);
129
+ }
130
+
131
+ VALUE IO_Event_Selector_KQueue_yield(VALUE self)
132
+ {
133
+ struct IO_Event_Selector_KQueue *data = NULL;
134
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
135
+
136
+ return IO_Event_Selector_yield(&data->backend);
137
+ }
138
+
139
+ VALUE IO_Event_Selector_KQueue_push(VALUE self, VALUE fiber)
140
+ {
141
+ struct IO_Event_Selector_KQueue *data = NULL;
142
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
143
+
144
+ IO_Event_Selector_queue_push(&data->backend, fiber);
145
+
146
+ return Qnil;
147
+ }
148
+
149
+ VALUE IO_Event_Selector_KQueue_raise(int argc, VALUE *argv, VALUE self)
150
+ {
151
+ struct IO_Event_Selector_KQueue *data = NULL;
152
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
153
+
154
+ return IO_Event_Selector_raise(&data->backend, argc, argv);
155
+ }
156
+
157
+ VALUE IO_Event_Selector_KQueue_ready_p(VALUE self) {
158
+ struct IO_Event_Selector_KQueue *data = NULL;
159
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
160
+
161
+ return data->backend.ready ? Qtrue : Qfalse;
162
+ }
163
+
164
+ struct process_wait_arguments {
165
+ struct IO_Event_Selector_KQueue *data;
166
+ pid_t pid;
167
+ int flags;
168
+ };
169
+
170
+ static
171
+ int process_add_filters(int descriptor, int ident, VALUE fiber) {
172
+ struct kevent event = {0};
173
+
174
+ event.ident = ident;
175
+ event.filter = EVFILT_PROC;
176
+ event.flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
177
+ event.fflags = NOTE_EXIT;
178
+ event.udata = (void*)fiber;
179
+
180
+ int result = kevent(descriptor, &event, 1, NULL, 0, NULL);
181
+
182
+ if (result == -1) {
183
+ // No such process - the process has probably already terminated:
184
+ if (errno == ESRCH) {
185
+ return 0;
186
+ }
187
+
188
+ rb_sys_fail("kevent(process_add_filters)");
189
+ }
190
+
191
+ return 1;
192
+ }
193
+
194
+ static
195
+ void process_remove_filters(int descriptor, int ident) {
196
+ struct kevent event = {0};
197
+
198
+ event.ident = ident;
199
+ event.filter = EVFILT_PROC;
200
+ event.flags = EV_DELETE;
201
+ event.fflags = NOTE_EXIT;
202
+
203
+ // Ignore the result.
204
+ kevent(descriptor, &event, 1, NULL, 0, NULL);
205
+ }
206
+
207
+ static
208
+ VALUE process_wait_transfer(VALUE _arguments) {
209
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
210
+
211
+ IO_Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
212
+
213
+ return IO_Event_Selector_process_status_wait(arguments->pid);
214
+ }
215
+
216
+ static
217
+ VALUE process_wait_rescue(VALUE _arguments, VALUE exception) {
218
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
219
+
220
+ process_remove_filters(arguments->data->descriptor, arguments->pid);
221
+
222
+ rb_exc_raise(exception);
223
+ }
224
+
225
+ VALUE IO_Event_Selector_KQueue_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
226
+ struct IO_Event_Selector_KQueue *data = NULL;
227
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
228
+
229
+ struct process_wait_arguments process_wait_arguments = {
230
+ .data = data,
231
+ .pid = NUM2PIDT(pid),
232
+ .flags = RB_NUM2INT(flags),
233
+ };
234
+
235
+ VALUE result = Qnil;
236
+
237
+ // This loop should not be needed but I have seen a race condition between NOTE_EXIT and `waitpid`, thus the result would be (unexpectedly) nil. So we put this in a loop to retry if the race condition shows up:
238
+ while (NIL_P(result)) {
239
+ int waiting = process_add_filters(data->descriptor, process_wait_arguments.pid, fiber);
240
+
241
+ if (waiting) {
242
+ result = rb_rescue(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_rescue, (VALUE)&process_wait_arguments);
243
+ } else {
244
+ result = IO_Event_Selector_process_status_wait(process_wait_arguments.pid);
245
+ }
246
+ }
247
+
248
+ return result;
249
+ }
250
+
251
+ static
252
+ int io_add_filters(int descriptor, int ident, int events, VALUE fiber) {
253
+ int count = 0;
254
+ struct kevent kevents[2] = {0};
255
+
256
+ if (events & IO_EVENT_READABLE) {
257
+ kevents[count].ident = ident;
258
+ kevents[count].filter = EVFILT_READ;
259
+ kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
260
+ kevents[count].udata = (void*)fiber;
261
+
262
+ // #ifdef EV_OOBAND
263
+ // if (events & PRIORITY) {
264
+ // kevents[count].flags |= EV_OOBAND;
265
+ // }
266
+ // #endif
267
+
268
+ count++;
269
+ }
270
+
271
+ if (events & IO_EVENT_WRITABLE) {
272
+ kevents[count].ident = ident;
273
+ kevents[count].filter = EVFILT_WRITE;
274
+ kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
275
+ kevents[count].udata = (void*)fiber;
276
+ count++;
277
+ }
278
+
279
+ int result = kevent(descriptor, kevents, count, NULL, 0, NULL);
280
+
281
+ if (result == -1) {
282
+ rb_sys_fail("kevent(io_add_filters)");
283
+ }
284
+
285
+ return events;
286
+ }
287
+
288
+ static
289
+ void io_remove_filters(int descriptor, int ident, int events) {
290
+ int count = 0;
291
+ struct kevent kevents[2] = {0};
292
+
293
+ if (events & IO_EVENT_READABLE) {
294
+ kevents[count].ident = ident;
295
+ kevents[count].filter = EVFILT_READ;
296
+ kevents[count].flags = EV_DELETE;
297
+
298
+ count++;
299
+ }
300
+
301
+ if (events & IO_EVENT_WRITABLE) {
302
+ kevents[count].ident = ident;
303
+ kevents[count].filter = EVFILT_WRITE;
304
+ kevents[count].flags = EV_DELETE;
305
+ count++;
306
+ }
307
+
308
+ // Ignore the result.
309
+ kevent(descriptor, kevents, count, NULL, 0, NULL);
310
+ }
311
+
312
+ struct io_wait_arguments {
313
+ struct IO_Event_Selector_KQueue *data;
314
+ int events;
315
+ int descriptor;
316
+ };
317
+
318
+ static
319
+ VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
320
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
321
+
322
+ io_remove_filters(arguments->data->descriptor, arguments->descriptor, arguments->events);
323
+
324
+ rb_exc_raise(exception);
325
+ }
326
+
327
+ static inline
328
+ int events_from_kqueue_filter(int filter) {
329
+ if (filter == EVFILT_READ) return IO_EVENT_READABLE;
330
+ if (filter == EVFILT_WRITE) return IO_EVENT_WRITABLE;
331
+
332
+ return 0;
333
+ }
334
+
335
+ static
336
+ VALUE io_wait_transfer(VALUE _arguments) {
337
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
338
+
339
+ VALUE result = IO_Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
340
+
341
+ return INT2NUM(events_from_kqueue_filter(RB_NUM2INT(result)));
342
+ }
343
+
344
+ VALUE IO_Event_Selector_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
345
+ struct IO_Event_Selector_KQueue *data = NULL;
346
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
347
+
348
+ int descriptor = IO_Event_Selector_io_descriptor(io);
349
+
350
+ struct io_wait_arguments io_wait_arguments = {
351
+ .events = io_add_filters(data->descriptor, descriptor, RB_NUM2INT(events), fiber),
352
+ .data = data,
353
+ .descriptor = descriptor,
354
+ };
355
+
356
+ return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
357
+ }
358
+
359
+ #ifdef HAVE_RUBY_IO_BUFFER_H
360
+
361
+ struct io_read_arguments {
362
+ VALUE self;
363
+ VALUE fiber;
364
+ VALUE io;
365
+
366
+ int flags;
367
+
368
+ int descriptor;
369
+
370
+ VALUE buffer;
371
+ size_t length;
372
+ };
373
+
374
+ static
375
+ VALUE io_read_loop(VALUE _arguments) {
376
+ struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
377
+
378
+ void *base;
379
+ size_t size;
380
+ rb_io_buffer_get_mutable(arguments->buffer, &base, &size);
381
+
382
+ size_t offset = 0;
383
+ size_t length = arguments->length;
384
+
385
+ while (length > 0) {
386
+ size_t maximum_size = size - offset;
387
+ ssize_t result = read(arguments->descriptor, (char*)base+offset, maximum_size);
388
+
389
+ if (result == 0) {
390
+ break;
391
+ } else if (result > 0) {
392
+ offset += result;
393
+ length -= result;
394
+ } else if (errno == EAGAIN || errno == EWOULDBLOCK) {
395
+ IO_Event_Selector_KQueue_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(IO_EVENT_READABLE));
396
+ } else {
397
+ rb_sys_fail("IO_Event_Selector_KQueue_io_read");
398
+ }
399
+ }
400
+
401
+ return SIZET2NUM(offset);
402
+ }
403
+
404
+ static
405
+ VALUE io_read_ensure(VALUE _arguments) {
406
+ struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
407
+
408
+ IO_Event_Selector_nonblock_restore(arguments->descriptor, arguments->flags);
409
+
410
+ return Qnil;
411
+ }
412
+
413
+ VALUE IO_Event_Selector_KQueue_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
414
+ struct IO_Event_Selector_KQueue *data = NULL;
415
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
416
+
417
+ int descriptor = IO_Event_Selector_io_descriptor(io);
418
+
419
+ size_t length = NUM2SIZET(_length);
420
+
421
+ struct io_read_arguments io_read_arguments = {
422
+ .self = self,
423
+ .fiber = fiber,
424
+ .io = io,
425
+
426
+ .flags = IO_Event_Selector_nonblock_set(descriptor),
427
+ .descriptor = descriptor,
428
+ .buffer = buffer,
429
+ .length = length,
430
+ };
431
+
432
+ return rb_ensure(io_read_loop, (VALUE)&io_read_arguments, io_read_ensure, (VALUE)&io_read_arguments);
433
+ }
434
+
435
+ struct io_write_arguments {
436
+ VALUE self;
437
+ VALUE fiber;
438
+ VALUE io;
439
+
440
+ int flags;
441
+
442
+ int descriptor;
443
+
444
+ VALUE buffer;
445
+ size_t length;
446
+ };
447
+
448
+ static
449
+ VALUE io_write_loop(VALUE _arguments) {
450
+ struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
451
+
452
+ const void *base;
453
+ size_t size;
454
+ rb_io_buffer_get_immutable(arguments->buffer, &base, &size);
455
+
456
+ size_t offset = 0;
457
+ size_t length = arguments->length;
458
+
459
+ if (length > size) {
460
+ rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
461
+ }
462
+
463
+ while (length > 0) {
464
+ ssize_t result = write(arguments->descriptor, (char*)base+offset, length);
465
+
466
+ if (result >= 0) {
467
+ offset += result;
468
+ length -= result;
469
+ } else if (errno == EAGAIN || errno == EWOULDBLOCK) {
470
+ IO_Event_Selector_KQueue_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(IO_EVENT_WRITABLE));
471
+ } else {
472
+ rb_sys_fail("IO_Event_Selector_KQueue_io_write");
473
+ }
474
+ }
475
+
476
+ return SIZET2NUM(offset);
477
+ };
478
+
479
+ static
480
+ VALUE io_write_ensure(VALUE _arguments) {
481
+ struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
482
+
483
+ IO_Event_Selector_nonblock_restore(arguments->descriptor, arguments->flags);
484
+
485
+ return Qnil;
486
+ };
487
+
488
+ VALUE IO_Event_Selector_KQueue_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
489
+ struct IO_Event_Selector_KQueue *data = NULL;
490
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
491
+
492
+ int descriptor = IO_Event_Selector_io_descriptor(io);
493
+
494
+ size_t length = NUM2SIZET(_length);
495
+
496
+ struct io_write_arguments io_write_arguments = {
497
+ .self = self,
498
+ .fiber = fiber,
499
+ .io = io,
500
+
501
+ .flags = IO_Event_Selector_nonblock_set(descriptor),
502
+ .descriptor = descriptor,
503
+ .buffer = buffer,
504
+ .length = length,
505
+ };
506
+
507
+ return rb_ensure(io_write_loop, (VALUE)&io_write_arguments, io_write_ensure, (VALUE)&io_write_arguments);
508
+ }
509
+
510
+ #endif
511
+
512
+ static
513
+ struct timespec * make_timeout(VALUE duration, struct timespec * storage) {
514
+ if (duration == Qnil) {
515
+ return NULL;
516
+ }
517
+
518
+ if (FIXNUM_P(duration)) {
519
+ storage->tv_sec = NUM2TIMET(duration);
520
+ storage->tv_nsec = 0;
521
+
522
+ return storage;
523
+ }
524
+
525
+ else if (RB_FLOAT_TYPE_P(duration)) {
526
+ double value = RFLOAT_VALUE(duration);
527
+ time_t seconds = value;
528
+
529
+ storage->tv_sec = seconds;
530
+ storage->tv_nsec = (value - seconds) * 1000000000L;
531
+
532
+ return storage;
533
+ }
534
+
535
+ rb_raise(rb_eRuntimeError, "unable to convert timeout");
536
+ }
537
+
538
+ static
539
+ int timeout_nonblocking(struct timespec * timespec) {
540
+ return timespec && timespec->tv_sec == 0 && timespec->tv_nsec == 0;
541
+ }
542
+
543
+ struct select_arguments {
544
+ struct IO_Event_Selector_KQueue *data;
545
+
546
+ int count;
547
+ struct kevent events[KQUEUE_MAX_EVENTS];
548
+
549
+ struct timespec storage;
550
+ struct timespec *timeout;
551
+ };
552
+
553
+ static
554
+ void * select_internal(void *_arguments) {
555
+ struct select_arguments * arguments = (struct select_arguments *)_arguments;
556
+
557
+ arguments->count = kevent(arguments->data->descriptor, NULL, 0, arguments->events, arguments->count, arguments->timeout);
558
+
559
+ return NULL;
560
+ }
561
+
562
+ static
563
+ void select_internal_without_gvl(struct select_arguments *arguments) {
564
+ rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
565
+
566
+ if (arguments->count == -1) {
567
+ rb_sys_fail("select_internal_without_gvl:kevent");
568
+ }
569
+ }
570
+
571
+ static
572
+ void select_internal_with_gvl(struct select_arguments *arguments) {
573
+ select_internal((void *)arguments);
574
+
575
+ if (arguments->count == -1) {
576
+ rb_sys_fail("select_internal_with_gvl:kevent");
577
+ }
578
+ }
579
+
580
+ VALUE IO_Event_Selector_KQueue_select(VALUE self, VALUE duration) {
581
+ struct IO_Event_Selector_KQueue *data = NULL;
582
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
583
+
584
+ int ready = IO_Event_Selector_queue_flush(&data->backend);
585
+
586
+ struct select_arguments arguments = {
587
+ .data = data,
588
+ .count = KQUEUE_MAX_EVENTS,
589
+ .storage = {
590
+ .tv_sec = 0,
591
+ .tv_nsec = 0
592
+ }
593
+ };
594
+
595
+ // We break this implementation into two parts.
596
+ // (1) count = kevent(..., timeout = 0)
597
+ // (2) without gvl: kevent(..., timeout = 0) if count == 0 and timeout != 0
598
+ // This allows us to avoid releasing and reacquiring the GVL.
599
+ // Non-comprehensive testing shows this gives a 1.5x speedup.
600
+ arguments.timeout = &arguments.storage;
601
+
602
+ // First do the syscall with no timeout to get any immediately available events:
603
+ select_internal_with_gvl(&arguments);
604
+
605
+ // If there were no pending events, if we have a timeout, wait for more events:
606
+ if (!ready && arguments.count == 0) {
607
+ arguments.timeout = make_timeout(duration, &arguments.storage);
608
+
609
+ if (!timeout_nonblocking(arguments.timeout)) {
610
+ arguments.count = KQUEUE_MAX_EVENTS;
611
+
612
+ select_internal_without_gvl(&arguments);
613
+ }
614
+ }
615
+
616
+ for (int i = 0; i < arguments.count; i += 1) {
617
+ VALUE fiber = (VALUE)arguments.events[i].udata;
618
+ VALUE result = INT2NUM(arguments.events[i].filter);
619
+
620
+ IO_Event_Selector_fiber_transfer(fiber, 1, &result);
621
+ }
622
+
623
+ return INT2NUM(arguments.count);
624
+ }
625
+
626
+ void Init_IO_Event_Selector_KQueue(VALUE IO_Event_Selector) {
627
+ IO_Event_Selector_KQueue = rb_define_class_under(IO_Event_Selector, "KQueue", rb_cObject);
628
+ rb_gc_register_mark_object(IO_Event_Selector_KQueue);
629
+
630
+ rb_define_alloc_func(IO_Event_Selector_KQueue, IO_Event_Selector_KQueue_allocate);
631
+ rb_define_method(IO_Event_Selector_KQueue, "initialize", IO_Event_Selector_KQueue_initialize, 1);
632
+
633
+ rb_define_method(IO_Event_Selector_KQueue, "transfer", IO_Event_Selector_KQueue_transfer, 0);
634
+ rb_define_method(IO_Event_Selector_KQueue, "resume", IO_Event_Selector_KQueue_resume, -1);
635
+ rb_define_method(IO_Event_Selector_KQueue, "yield", IO_Event_Selector_KQueue_yield, 0);
636
+ rb_define_method(IO_Event_Selector_KQueue, "push", IO_Event_Selector_KQueue_push, 1);
637
+ rb_define_method(IO_Event_Selector_KQueue, "raise", IO_Event_Selector_KQueue_raise, -1);
638
+
639
+ rb_define_method(IO_Event_Selector_KQueue, "ready?", IO_Event_Selector_KQueue_ready_p, 0);
640
+
641
+ rb_define_method(IO_Event_Selector_KQueue, "select", IO_Event_Selector_KQueue_select, 1);
642
+ rb_define_method(IO_Event_Selector_KQueue, "close", IO_Event_Selector_KQueue_close, 0);
643
+
644
+ rb_define_method(IO_Event_Selector_KQueue, "io_wait", IO_Event_Selector_KQueue_io_wait, 3);
645
+
646
+ #ifdef HAVE_RUBY_IO_BUFFER_H
647
+ rb_define_method(IO_Event_Selector_KQueue, "io_read", IO_Event_Selector_KQueue_io_read, 4);
648
+ rb_define_method(IO_Event_Selector_KQueue, "io_write", IO_Event_Selector_KQueue_io_write, 4);
649
+ #endif
650
+
651
+ rb_define_method(IO_Event_Selector_KQueue, "process_wait", IO_Event_Selector_KQueue_process_wait, 3);
652
+ }
@@ -0,0 +1,27 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #pragma once
22
+
23
+ #include <ruby.h>
24
+
25
+ #define IO_EVENT_SELECTOR_KQUEUE
26
+
27
+ void Init_IO_Event_Selector_KQueue(VALUE IO_Event_Selector);
@@ -0,0 +1,36 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include <sys/types.h>
22
+ #include <sys/syscall.h>
23
+ #include <unistd.h>
24
+ #include <poll.h>
25
+ #include <stdlib.h>
26
+ #include <stdio.h>
27
+
28
+ #ifndef __NR_pidfd_open
29
+ #define __NR_pidfd_open 434 /* System call # on most architectures */
30
+ #endif
31
+
32
+ static int
33
+ pidfd_open(pid_t pid, unsigned int flags)
34
+ {
35
+ return syscall(__NR_pidfd_open, pid, flags);
36
+ }