io-event-machty 1.0.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,657 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include "kqueue.h"
22
+ #include "selector.h"
23
+
24
+ #include <sys/epoll.h>
25
+ #include <time.h>
26
+ #include <errno.h>
27
+
28
+ #include "pidfd.c"
29
+ #include "../interrupt.h"
30
+
31
+ static const int DEBUG = 0;
32
+
33
+ static VALUE IO_Event_Selector_EPoll = Qnil;
34
+
35
+ enum {EPOLL_MAX_EVENTS = 64};
36
+
37
+ struct IO_Event_Selector_EPoll {
38
+ struct IO_Event_Selector backend;
39
+ int descriptor;
40
+ int blocked;
41
+ struct IO_Event_Interrupt interrupt;
42
+ };
43
+
44
+ void IO_Event_Selector_EPoll_Type_mark(void *_data)
45
+ {
46
+ struct IO_Event_Selector_EPoll *data = _data;
47
+ IO_Event_Selector_mark(&data->backend);
48
+ }
49
+
50
+ static
51
+ void close_internal(struct IO_Event_Selector_EPoll *data) {
52
+ if (data->descriptor >= 0) {
53
+ close(data->descriptor);
54
+ data->descriptor = -1;
55
+
56
+ IO_Event_Interrupt_close(&data->interrupt);
57
+ }
58
+ }
59
+
60
+ void IO_Event_Selector_EPoll_Type_free(void *_data)
61
+ {
62
+ struct IO_Event_Selector_EPoll *data = _data;
63
+
64
+ close_internal(data);
65
+
66
+ free(data);
67
+ }
68
+
69
+ size_t IO_Event_Selector_EPoll_Type_size(const void *data)
70
+ {
71
+ return sizeof(struct IO_Event_Selector_EPoll);
72
+ }
73
+
74
+ static const rb_data_type_t IO_Event_Selector_EPoll_Type = {
75
+ .wrap_struct_name = "IO_Event::Backend::EPoll",
76
+ .function = {
77
+ .dmark = IO_Event_Selector_EPoll_Type_mark,
78
+ .dfree = IO_Event_Selector_EPoll_Type_free,
79
+ .dsize = IO_Event_Selector_EPoll_Type_size,
80
+ },
81
+ .data = NULL,
82
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY,
83
+ };
84
+
85
+ VALUE IO_Event_Selector_EPoll_allocate(VALUE self) {
86
+ struct IO_Event_Selector_EPoll *data = NULL;
87
+ VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
88
+
89
+ IO_Event_Selector_initialize(&data->backend, Qnil);
90
+ data->descriptor = -1;
91
+
92
+ return instance;
93
+ }
94
+
95
+ void IO_Event_Interrupt_add(struct IO_Event_Interrupt *interrupt, struct IO_Event_Selector_EPoll *data) {
96
+ int descriptor = IO_Event_Interrupt_descriptor(interrupt);
97
+
98
+ struct epoll_event event = {
99
+ .events = EPOLLIN|EPOLLRDHUP,
100
+ .data = {.ptr = NULL},
101
+ };
102
+
103
+ int result = epoll_ctl(data->descriptor, EPOLL_CTL_ADD, descriptor, &event);
104
+
105
+ if (result == -1) {
106
+ rb_sys_fail("IO_Event_Interrupt_add:epoll_ctl");
107
+ }
108
+ }
109
+
110
+ VALUE IO_Event_Selector_EPoll_initialize(VALUE self, VALUE loop) {
111
+ struct IO_Event_Selector_EPoll *data = NULL;
112
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
113
+
114
+ IO_Event_Selector_initialize(&data->backend, loop);
115
+ int result = epoll_create1(EPOLL_CLOEXEC);
116
+
117
+ if (result == -1) {
118
+ rb_sys_fail("IO_Event_Selector_EPoll_initialize:epoll_create");
119
+ } else {
120
+ data->descriptor = result;
121
+
122
+ rb_update_max_fd(data->descriptor);
123
+ }
124
+
125
+ IO_Event_Interrupt_open(&data->interrupt);
126
+ IO_Event_Interrupt_add(&data->interrupt, data);
127
+
128
+ return self;
129
+ }
130
+
131
+ VALUE IO_Event_Selector_EPoll_loop(VALUE self) {
132
+ struct IO_Event_Selector_EPoll *data = NULL;
133
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
134
+
135
+ return data->backend.loop;
136
+ }
137
+
138
+ VALUE IO_Event_Selector_EPoll_close(VALUE self) {
139
+ struct IO_Event_Selector_EPoll *data = NULL;
140
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
141
+
142
+ close_internal(data);
143
+
144
+ return Qnil;
145
+ }
146
+
147
+ VALUE IO_Event_Selector_EPoll_transfer(VALUE self)
148
+ {
149
+ struct IO_Event_Selector_EPoll *data = NULL;
150
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
151
+
152
+ return IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
153
+ }
154
+
155
+ VALUE IO_Event_Selector_EPoll_resume(int argc, VALUE *argv, VALUE self)
156
+ {
157
+ struct IO_Event_Selector_EPoll *data = NULL;
158
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
159
+
160
+ return IO_Event_Selector_resume(&data->backend, argc, argv);
161
+ }
162
+
163
+ VALUE IO_Event_Selector_EPoll_yield(VALUE self)
164
+ {
165
+ struct IO_Event_Selector_EPoll *data = NULL;
166
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
167
+
168
+ return IO_Event_Selector_yield(&data->backend);
169
+ }
170
+
171
+ VALUE IO_Event_Selector_EPoll_push(VALUE self, VALUE fiber)
172
+ {
173
+ struct IO_Event_Selector_EPoll *data = NULL;
174
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
175
+
176
+ IO_Event_Selector_queue_push(&data->backend, fiber);
177
+
178
+ return Qnil;
179
+ }
180
+
181
+ VALUE IO_Event_Selector_EPoll_raise(int argc, VALUE *argv, VALUE self)
182
+ {
183
+ struct IO_Event_Selector_EPoll *data = NULL;
184
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
185
+
186
+ return IO_Event_Selector_raise(&data->backend, argc, argv);
187
+ }
188
+
189
+ VALUE IO_Event_Selector_EPoll_ready_p(VALUE self) {
190
+ struct IO_Event_Selector_EPoll *data = NULL;
191
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
192
+
193
+ return data->backend.ready ? Qtrue : Qfalse;
194
+ }
195
+
196
+ struct process_wait_arguments {
197
+ struct IO_Event_Selector_EPoll *data;
198
+ pid_t pid;
199
+ int flags;
200
+ int descriptor;
201
+ };
202
+
203
+ static
204
+ VALUE process_wait_transfer(VALUE _arguments) {
205
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
206
+
207
+ IO_Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
208
+
209
+ return IO_Event_Selector_process_status_wait(arguments->pid);
210
+ }
211
+
212
+ static
213
+ VALUE process_wait_ensure(VALUE _arguments) {
214
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
215
+
216
+ // epoll_ctl(arguments->data->descriptor, EPOLL_CTL_DEL, arguments->descriptor, NULL);
217
+
218
+ close(arguments->descriptor);
219
+
220
+ return Qnil;
221
+ }
222
+
223
+ VALUE IO_Event_Selector_EPoll_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
224
+ struct IO_Event_Selector_EPoll *data = NULL;
225
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
226
+
227
+ struct process_wait_arguments process_wait_arguments = {
228
+ .data = data,
229
+ .pid = NUM2PIDT(pid),
230
+ .flags = NUM2INT(flags),
231
+ };
232
+
233
+ process_wait_arguments.descriptor = pidfd_open(process_wait_arguments.pid, 0);
234
+ rb_update_max_fd(process_wait_arguments.descriptor);
235
+
236
+ struct epoll_event event = {
237
+ .events = EPOLLIN|EPOLLRDHUP|EPOLLONESHOT,
238
+ .data = {.ptr = (void*)fiber},
239
+ };
240
+
241
+ int result = epoll_ctl(data->descriptor, EPOLL_CTL_ADD, process_wait_arguments.descriptor, &event);
242
+
243
+ if (result == -1) {
244
+ rb_sys_fail("IO_Event_Selector_EPoll_process_wait:epoll_ctl");
245
+ }
246
+
247
+ return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
248
+ }
249
+
250
+ static inline
251
+ uint32_t epoll_flags_from_events(int events) {
252
+ uint32_t flags = 0;
253
+
254
+ if (events & IO_EVENT_READABLE) flags |= EPOLLIN;
255
+ if (events & IO_EVENT_PRIORITY) flags |= EPOLLPRI;
256
+ if (events & IO_EVENT_WRITABLE) flags |= EPOLLOUT;
257
+
258
+ flags |= EPOLLRDHUP;
259
+ flags |= EPOLLONESHOT;
260
+
261
+ return flags;
262
+ }
263
+
264
+ static inline
265
+ int events_from_epoll_flags(uint32_t flags) {
266
+ int events = 0;
267
+
268
+ if (flags & EPOLLIN) events |= IO_EVENT_READABLE;
269
+ if (flags & EPOLLPRI) events |= IO_EVENT_PRIORITY;
270
+ if (flags & EPOLLOUT) events |= IO_EVENT_WRITABLE;
271
+
272
+ return events;
273
+ }
274
+
275
+ struct io_wait_arguments {
276
+ struct IO_Event_Selector_EPoll *data;
277
+ int descriptor;
278
+ int duplicate;
279
+ };
280
+
281
+ static
282
+ VALUE io_wait_ensure(VALUE _arguments) {
283
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
284
+
285
+ if (arguments->duplicate >= 0) {
286
+ epoll_ctl(arguments->data->descriptor, EPOLL_CTL_DEL, arguments->duplicate, NULL);
287
+
288
+ close(arguments->duplicate);
289
+ } else {
290
+ epoll_ctl(arguments->data->descriptor, EPOLL_CTL_DEL, arguments->descriptor, NULL);
291
+ }
292
+
293
+ return Qnil;
294
+ };
295
+
296
+ static
297
+ VALUE io_wait_transfer(VALUE _arguments) {
298
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
299
+
300
+ VALUE result = IO_Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
301
+
302
+ // If the fiber is being cancelled, it might be resumed with nil:
303
+ if (!RTEST(result)) {
304
+ return Qfalse;
305
+ }
306
+
307
+ return INT2NUM(events_from_epoll_flags(NUM2INT(result)));
308
+ };
309
+
310
+ VALUE IO_Event_Selector_EPoll_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
311
+ struct IO_Event_Selector_EPoll *data = NULL;
312
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
313
+
314
+ struct epoll_event event = {0};
315
+
316
+ int descriptor = IO_Event_Selector_io_descriptor(io);
317
+ int duplicate = -1;
318
+
319
+ event.events = epoll_flags_from_events(NUM2INT(events));
320
+ event.data.ptr = (void*)fiber;
321
+
322
+ // fprintf(stderr, "<- fiber=%p descriptor=%d\n", (void*)fiber, descriptor);
323
+
324
+ // A better approach is to batch all changes:
325
+ int result = epoll_ctl(data->descriptor, EPOLL_CTL_ADD, descriptor, &event);
326
+
327
+ if (result == -1 && errno == EEXIST) {
328
+ // The file descriptor was already inserted into epoll.
329
+ duplicate = descriptor = dup(descriptor);
330
+
331
+ rb_update_max_fd(duplicate);
332
+
333
+ if (descriptor == -1)
334
+ rb_sys_fail("IO_Event_Selector_EPoll_io_wait:dup");
335
+
336
+ result = epoll_ctl(data->descriptor, EPOLL_CTL_ADD, descriptor, &event);
337
+ }
338
+
339
+ if (result == -1) {
340
+ rb_sys_fail("IO_Event_Selector_EPoll_io_wait:epoll_ctl");
341
+ }
342
+
343
+ struct io_wait_arguments io_wait_arguments = {
344
+ .data = data,
345
+ .descriptor = descriptor,
346
+ .duplicate = duplicate
347
+ };
348
+
349
+ return rb_ensure(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_ensure, (VALUE)&io_wait_arguments);
350
+ }
351
+
352
+ #ifdef HAVE_RUBY_IO_BUFFER_H
353
+
354
+ struct io_read_arguments {
355
+ VALUE self;
356
+ VALUE fiber;
357
+ VALUE io;
358
+
359
+ int flags;
360
+
361
+ int descriptor;
362
+
363
+ VALUE buffer;
364
+ size_t length;
365
+ };
366
+
367
+ static
368
+ VALUE io_read_loop(VALUE _arguments) {
369
+ struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
370
+
371
+ void *base;
372
+ size_t size;
373
+ rb_io_buffer_get_bytes_for_writing(arguments->buffer, &base, &size);
374
+
375
+ size_t offset = 0;
376
+ size_t length = arguments->length;
377
+
378
+ while (true) {
379
+ size_t maximum_size = size - offset;
380
+ ssize_t result = read(arguments->descriptor, (char*)base+offset, maximum_size);
381
+
382
+ if (result > 0) {
383
+ offset += result;
384
+ if ((size_t)result >= length) break;
385
+ length -= result;
386
+ } else if (result == 0) {
387
+ break;
388
+ } else if (length > 0 && IO_Event_try_again(errno)) {
389
+ IO_Event_Selector_EPoll_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(IO_EVENT_READABLE));
390
+ } else {
391
+ return rb_fiber_scheduler_io_result(-1, errno);
392
+ }
393
+ }
394
+
395
+ return rb_fiber_scheduler_io_result(offset, 0);
396
+ }
397
+
398
+ static
399
+ VALUE io_read_ensure(VALUE _arguments) {
400
+ struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
401
+
402
+ IO_Event_Selector_nonblock_restore(arguments->descriptor, arguments->flags);
403
+
404
+ return Qnil;
405
+ }
406
+
407
+ VALUE IO_Event_Selector_EPoll_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
408
+ int descriptor = IO_Event_Selector_io_descriptor(io);
409
+
410
+ size_t length = NUM2SIZET(_length);
411
+
412
+ struct io_read_arguments io_read_arguments = {
413
+ .self = self,
414
+ .fiber = fiber,
415
+ .io = io,
416
+
417
+ .flags = IO_Event_Selector_nonblock_set(descriptor),
418
+ .descriptor = descriptor,
419
+ .buffer = buffer,
420
+ .length = length,
421
+ };
422
+
423
+ return rb_ensure(io_read_loop, (VALUE)&io_read_arguments, io_read_ensure, (VALUE)&io_read_arguments);
424
+ }
425
+
426
+ struct io_write_arguments {
427
+ VALUE self;
428
+ VALUE fiber;
429
+ VALUE io;
430
+
431
+ int flags;
432
+
433
+ int descriptor;
434
+
435
+ VALUE buffer;
436
+ size_t length;
437
+ };
438
+
439
+ static
440
+ VALUE io_write_loop(VALUE _arguments) {
441
+ struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
442
+
443
+ const void *base;
444
+ size_t size;
445
+ rb_io_buffer_get_bytes_for_reading(arguments->buffer, &base, &size);
446
+
447
+ size_t offset = 0;
448
+ size_t length = arguments->length;
449
+
450
+ if (length > size) {
451
+ rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
452
+ }
453
+
454
+ while (true) {
455
+ size_t maximum_size = size - offset;
456
+ ssize_t result = write(arguments->descriptor, (char*)base+offset, maximum_size);
457
+
458
+ if (result > 0) {
459
+ offset += result;
460
+ if ((size_t)result >= length) break;
461
+ length -= result;
462
+ } else if (result == 0) {
463
+ break;
464
+ } else if (length > 0 && IO_Event_try_again(errno)) {
465
+ IO_Event_Selector_EPoll_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(IO_EVENT_WRITABLE));
466
+ } else {
467
+ return rb_fiber_scheduler_io_result(-1, errno);
468
+ }
469
+ }
470
+
471
+ return rb_fiber_scheduler_io_result(offset, 0);
472
+ };
473
+
474
+ static
475
+ VALUE io_write_ensure(VALUE _arguments) {
476
+ struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
477
+
478
+ IO_Event_Selector_nonblock_restore(arguments->descriptor, arguments->flags);
479
+
480
+ return Qnil;
481
+ };
482
+
483
+ VALUE IO_Event_Selector_EPoll_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
484
+ int descriptor = IO_Event_Selector_io_descriptor(io);
485
+
486
+ size_t length = NUM2SIZET(_length);
487
+
488
+ struct io_write_arguments io_write_arguments = {
489
+ .self = self,
490
+ .fiber = fiber,
491
+ .io = io,
492
+
493
+ .flags = IO_Event_Selector_nonblock_set(descriptor),
494
+ .descriptor = descriptor,
495
+ .buffer = buffer,
496
+ .length = length,
497
+ };
498
+
499
+ return rb_ensure(io_write_loop, (VALUE)&io_write_arguments, io_write_ensure, (VALUE)&io_write_arguments);
500
+ }
501
+
502
+ #endif
503
+
504
+ static
505
+ int make_timeout(VALUE duration) {
506
+ if (duration == Qnil) {
507
+ return -1;
508
+ }
509
+
510
+ if (FIXNUM_P(duration)) {
511
+ return NUM2LONG(duration) * 1000L;
512
+ }
513
+
514
+ else if (RB_FLOAT_TYPE_P(duration)) {
515
+ double value = RFLOAT_VALUE(duration);
516
+
517
+ return value * 1000;
518
+ }
519
+
520
+ rb_raise(rb_eRuntimeError, "unable to convert timeout");
521
+ }
522
+
523
+ struct select_arguments {
524
+ struct IO_Event_Selector_EPoll *data;
525
+
526
+ int count;
527
+ struct epoll_event events[EPOLL_MAX_EVENTS];
528
+
529
+ int timeout;
530
+ };
531
+
532
+ static
533
+ void * select_internal(void *_arguments) {
534
+ struct select_arguments * arguments = (struct select_arguments *)_arguments;
535
+
536
+ arguments->count = epoll_wait(arguments->data->descriptor, arguments->events, EPOLL_MAX_EVENTS, arguments->timeout);
537
+
538
+ return NULL;
539
+ }
540
+
541
+ static
542
+ void select_internal_without_gvl(struct select_arguments *arguments) {
543
+ arguments->data->blocked = 1;
544
+ rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
545
+ arguments->data->blocked = 0;
546
+
547
+ if (arguments->count == -1) {
548
+ if (errno != EINTR) {
549
+ rb_sys_fail("select_internal_without_gvl:epoll_wait");
550
+ } else {
551
+ arguments->count = 0;
552
+ }
553
+ }
554
+ }
555
+
556
+ static
557
+ void select_internal_with_gvl(struct select_arguments *arguments) {
558
+ select_internal((void *)arguments);
559
+
560
+ if (arguments->count == -1) {
561
+ if (errno != EINTR) {
562
+ rb_sys_fail("select_internal_with_gvl:epoll_wait");
563
+ } else {
564
+ arguments->count = 0;
565
+ }
566
+ }
567
+ }
568
+
569
+ VALUE IO_Event_Selector_EPoll_select(VALUE self, VALUE duration) {
570
+ struct IO_Event_Selector_EPoll *data = NULL;
571
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
572
+
573
+ int ready = IO_Event_Selector_queue_flush(&data->backend);
574
+
575
+ struct select_arguments arguments = {
576
+ .data = data,
577
+ .timeout = 0
578
+ };
579
+
580
+ // Process any currently pending events:
581
+ select_internal_with_gvl(&arguments);
582
+
583
+ // If we:
584
+ // 1. Didn't process any ready fibers, and
585
+ // 2. Didn't process any events from non-blocking select (above), and
586
+ // 3. There are no items in the ready list,
587
+ // then we can perform a blocking select.
588
+ if (!ready && !arguments.count && !data->backend.ready) {
589
+ arguments.timeout = make_timeout(duration);
590
+
591
+ if (arguments.timeout != 0) {
592
+ // Wait for events to occur
593
+ select_internal_without_gvl(&arguments);
594
+ }
595
+ }
596
+
597
+ for (int i = 0; i < arguments.count; i += 1) {
598
+ const struct epoll_event *event = &arguments.events[i];
599
+ if (DEBUG) fprintf(stderr, "-> ptr=%p events=%d\n", event->data.ptr, event->events);
600
+
601
+ if (event->data.ptr) {
602
+ VALUE fiber = (VALUE)event->data.ptr;
603
+ VALUE result = INT2NUM(event->events);
604
+
605
+ IO_Event_Selector_fiber_transfer(fiber, 1, &result);
606
+ } else {
607
+ IO_Event_Interrupt_clear(&data->interrupt);
608
+ }
609
+ }
610
+
611
+ return INT2NUM(arguments.count);
612
+ }
613
+
614
+ VALUE IO_Event_Selector_EPoll_wakeup(VALUE self) {
615
+ struct IO_Event_Selector_EPoll *data = NULL;
616
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
617
+
618
+ // If we are blocking, we can schedule a nop event to wake up the selector:
619
+ if (data->blocked) {
620
+ IO_Event_Interrupt_signal(&data->interrupt);
621
+
622
+ return Qtrue;
623
+ }
624
+
625
+ return Qfalse;
626
+ }
627
+
628
+ void Init_IO_Event_Selector_EPoll(VALUE IO_Event_Selector) {
629
+ IO_Event_Selector_EPoll = rb_define_class_under(IO_Event_Selector, "EPoll", rb_cObject);
630
+ rb_gc_register_mark_object(IO_Event_Selector_EPoll);
631
+
632
+ rb_define_alloc_func(IO_Event_Selector_EPoll, IO_Event_Selector_EPoll_allocate);
633
+ rb_define_method(IO_Event_Selector_EPoll, "initialize", IO_Event_Selector_EPoll_initialize, 1);
634
+
635
+ rb_define_method(IO_Event_Selector_EPoll, "loop", IO_Event_Selector_EPoll_loop, 0);
636
+
637
+ rb_define_method(IO_Event_Selector_EPoll, "transfer", IO_Event_Selector_EPoll_transfer, 0);
638
+ rb_define_method(IO_Event_Selector_EPoll, "resume", IO_Event_Selector_EPoll_resume, -1);
639
+ rb_define_method(IO_Event_Selector_EPoll, "yield", IO_Event_Selector_EPoll_yield, 0);
640
+ rb_define_method(IO_Event_Selector_EPoll, "push", IO_Event_Selector_EPoll_push, 1);
641
+ rb_define_method(IO_Event_Selector_EPoll, "raise", IO_Event_Selector_EPoll_raise, -1);
642
+
643
+ rb_define_method(IO_Event_Selector_EPoll, "ready?", IO_Event_Selector_EPoll_ready_p, 0);
644
+
645
+ rb_define_method(IO_Event_Selector_EPoll, "select", IO_Event_Selector_EPoll_select, 1);
646
+ rb_define_method(IO_Event_Selector_EPoll, "wakeup", IO_Event_Selector_EPoll_wakeup, 0);
647
+ rb_define_method(IO_Event_Selector_EPoll, "close", IO_Event_Selector_EPoll_close, 0);
648
+
649
+ rb_define_method(IO_Event_Selector_EPoll, "io_wait", IO_Event_Selector_EPoll_io_wait, 3);
650
+
651
+ #ifdef HAVE_RUBY_IO_BUFFER_H
652
+ rb_define_method(IO_Event_Selector_EPoll, "io_read", IO_Event_Selector_EPoll_io_read, 4);
653
+ rb_define_method(IO_Event_Selector_EPoll, "io_write", IO_Event_Selector_EPoll_io_write, 4);
654
+ #endif
655
+
656
+ rb_define_method(IO_Event_Selector_EPoll, "process_wait", IO_Event_Selector_EPoll_process_wait, 3);
657
+ }
@@ -0,0 +1,27 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #pragma once
22
+
23
+ #include <ruby.h>
24
+
25
+ #define IO_EVENT_SELECTOR_EPOLL
26
+
27
+ void Init_IO_Event_Selector_EPoll(VALUE IO_Event_Selector);