io-event 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,577 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include "kqueue.h"
22
+ #include "selector.h"
23
+
24
+ #include <sys/epoll.h>
25
+ #include <time.h>
26
+ #include <errno.h>
27
+
28
+ #include "pidfd.c"
29
+
30
+ static VALUE IO_Event_Selector_EPoll = Qnil;
31
+
32
+ enum {EPOLL_MAX_EVENTS = 64};
33
+
34
+ struct IO_Event_Selector_EPoll {
35
+ struct IO_Event_Selector backend;
36
+ int descriptor;
37
+ };
38
+
39
+ void IO_Event_Selector_EPoll_Type_mark(void *_data)
40
+ {
41
+ struct IO_Event_Selector_EPoll *data = _data;
42
+ IO_Event_Selector_mark(&data->backend);
43
+ }
44
+
45
+ static
46
+ void close_internal(struct IO_Event_Selector_EPoll *data) {
47
+ if (data->descriptor >= 0) {
48
+ close(data->descriptor);
49
+ data->descriptor = -1;
50
+ }
51
+ }
52
+
53
+ void IO_Event_Selector_EPoll_Type_free(void *_data)
54
+ {
55
+ struct IO_Event_Selector_EPoll *data = _data;
56
+
57
+ close_internal(data);
58
+
59
+ free(data);
60
+ }
61
+
62
+ size_t IO_Event_Selector_EPoll_Type_size(const void *data)
63
+ {
64
+ return sizeof(struct IO_Event_Selector_EPoll);
65
+ }
66
+
67
+ static const rb_data_type_t IO_Event_Selector_EPoll_Type = {
68
+ .wrap_struct_name = "IO_Event::Backend::EPoll",
69
+ .function = {
70
+ .dmark = IO_Event_Selector_EPoll_Type_mark,
71
+ .dfree = IO_Event_Selector_EPoll_Type_free,
72
+ .dsize = IO_Event_Selector_EPoll_Type_size,
73
+ },
74
+ .data = NULL,
75
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY,
76
+ };
77
+
78
+ VALUE IO_Event_Selector_EPoll_allocate(VALUE self) {
79
+ struct IO_Event_Selector_EPoll *data = NULL;
80
+ VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
81
+
82
+ IO_Event_Selector_initialize(&data->backend, Qnil);
83
+ data->descriptor = -1;
84
+
85
+ return instance;
86
+ }
87
+
88
+ VALUE IO_Event_Selector_EPoll_initialize(VALUE self, VALUE loop) {
89
+ struct IO_Event_Selector_EPoll *data = NULL;
90
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
91
+
92
+ IO_Event_Selector_initialize(&data->backend, loop);
93
+ int result = epoll_create1(EPOLL_CLOEXEC);
94
+
95
+ if (result == -1) {
96
+ rb_sys_fail("epoll_create");
97
+ } else {
98
+ data->descriptor = result;
99
+
100
+ rb_update_max_fd(data->descriptor);
101
+ }
102
+
103
+ return self;
104
+ }
105
+
106
+ VALUE IO_Event_Selector_EPoll_close(VALUE self) {
107
+ struct IO_Event_Selector_EPoll *data = NULL;
108
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
109
+
110
+ close_internal(data);
111
+
112
+ return Qnil;
113
+ }
114
+
115
+ VALUE IO_Event_Selector_EPoll_transfer(VALUE self)
116
+ {
117
+ struct IO_Event_Selector_EPoll *data = NULL;
118
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
119
+
120
+ return IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
121
+ }
122
+
123
+ VALUE IO_Event_Selector_EPoll_resume(int argc, VALUE *argv, VALUE self)
124
+ {
125
+ struct IO_Event_Selector_EPoll *data = NULL;
126
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
127
+
128
+ return IO_Event_Selector_resume(&data->backend, argc, argv);
129
+ }
130
+
131
+ VALUE IO_Event_Selector_EPoll_yield(VALUE self)
132
+ {
133
+ struct IO_Event_Selector_EPoll *data = NULL;
134
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
135
+
136
+ return IO_Event_Selector_yield(&data->backend);
137
+ }
138
+
139
+ VALUE IO_Event_Selector_EPoll_push(VALUE self, VALUE fiber)
140
+ {
141
+ struct IO_Event_Selector_EPoll *data = NULL;
142
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
143
+
144
+ IO_Event_Selector_queue_push(&data->backend, fiber);
145
+
146
+ return Qnil;
147
+ }
148
+
149
+ VALUE IO_Event_Selector_EPoll_raise(int argc, VALUE *argv, VALUE self)
150
+ {
151
+ struct IO_Event_Selector_EPoll *data = NULL;
152
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
153
+
154
+ return IO_Event_Selector_raise(&data->backend, argc, argv);
155
+ }
156
+
157
+ VALUE IO_Event_Selector_EPoll_ready_p(VALUE self) {
158
+ struct IO_Event_Selector_EPoll *data = NULL;
159
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
160
+
161
+ return data->backend.ready ? Qtrue : Qfalse;
162
+ }
163
+
164
+ struct process_wait_arguments {
165
+ struct IO_Event_Selector_EPoll *data;
166
+ pid_t pid;
167
+ int flags;
168
+ int descriptor;
169
+ };
170
+
171
+ static
172
+ VALUE process_wait_transfer(VALUE _arguments) {
173
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
174
+
175
+ IO_Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
176
+
177
+ return IO_Event_Selector_process_status_wait(arguments->pid);
178
+ }
179
+
180
+ static
181
+ VALUE process_wait_ensure(VALUE _arguments) {
182
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
183
+
184
+ // epoll_ctl(arguments->data->descriptor, EPOLL_CTL_DEL, arguments->descriptor, NULL);
185
+
186
+ close(arguments->descriptor);
187
+
188
+ return Qnil;
189
+ }
190
+
191
+ VALUE IO_Event_Selector_EPoll_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
192
+ struct IO_Event_Selector_EPoll *data = NULL;
193
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
194
+
195
+ struct process_wait_arguments process_wait_arguments = {
196
+ .data = data,
197
+ .pid = NUM2PIDT(pid),
198
+ .flags = NUM2INT(flags),
199
+ };
200
+
201
+ process_wait_arguments.descriptor = pidfd_open(process_wait_arguments.pid, 0);
202
+ rb_update_max_fd(process_wait_arguments.descriptor);
203
+
204
+ struct epoll_event event = {
205
+ .events = EPOLLIN|EPOLLRDHUP|EPOLLONESHOT,
206
+ .data = {.ptr = (void*)fiber},
207
+ };
208
+
209
+ int result = epoll_ctl(data->descriptor, EPOLL_CTL_ADD, process_wait_arguments.descriptor, &event);
210
+
211
+ if (result == -1) {
212
+ rb_sys_fail("epoll_ctl(process_wait)");
213
+ }
214
+
215
+ return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
216
+ }
217
+
218
+ static inline
219
+ uint32_t epoll_flags_from_events(int events) {
220
+ uint32_t flags = 0;
221
+
222
+ if (events & IO_EVENT_READABLE) flags |= EPOLLIN;
223
+ if (events & IO_EVENT_PRIORITY) flags |= EPOLLPRI;
224
+ if (events & IO_EVENT_WRITABLE) flags |= EPOLLOUT;
225
+
226
+ flags |= EPOLLRDHUP;
227
+ flags |= EPOLLONESHOT;
228
+
229
+ return flags;
230
+ }
231
+
232
+ static inline
233
+ int events_from_epoll_flags(uint32_t flags) {
234
+ int events = 0;
235
+
236
+ if (flags & EPOLLIN) events |= IO_EVENT_READABLE;
237
+ if (flags & EPOLLPRI) events |= IO_EVENT_PRIORITY;
238
+ if (flags & EPOLLOUT) events |= IO_EVENT_WRITABLE;
239
+
240
+ return events;
241
+ }
242
+
243
+ struct io_wait_arguments {
244
+ struct IO_Event_Selector_EPoll *data;
245
+ int descriptor;
246
+ int duplicate;
247
+ };
248
+
249
+ static
250
+ VALUE io_wait_ensure(VALUE _arguments) {
251
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
252
+
253
+ if (arguments->duplicate >= 0) {
254
+ epoll_ctl(arguments->data->descriptor, EPOLL_CTL_DEL, arguments->duplicate, NULL);
255
+
256
+ close(arguments->duplicate);
257
+ } else {
258
+ epoll_ctl(arguments->data->descriptor, EPOLL_CTL_DEL, arguments->descriptor, NULL);
259
+ }
260
+
261
+ return Qnil;
262
+ };
263
+
264
+ static
265
+ VALUE io_wait_transfer(VALUE _arguments) {
266
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
267
+
268
+ VALUE result = IO_Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
269
+
270
+ return INT2NUM(events_from_epoll_flags(NUM2INT(result)));
271
+ };
272
+
273
+ VALUE IO_Event_Selector_EPoll_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
274
+ struct IO_Event_Selector_EPoll *data = NULL;
275
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
276
+
277
+ struct epoll_event event = {0};
278
+
279
+ int descriptor = IO_Event_Selector_io_descriptor(io);
280
+ int duplicate = -1;
281
+
282
+ event.events = epoll_flags_from_events(NUM2INT(events));
283
+ event.data.ptr = (void*)fiber;
284
+
285
+ // fprintf(stderr, "<- fiber=%p descriptor=%d\n", (void*)fiber, descriptor);
286
+
287
+ // A better approach is to batch all changes:
288
+ int result = epoll_ctl(data->descriptor, EPOLL_CTL_ADD, descriptor, &event);
289
+
290
+ if (result == -1 && errno == EEXIST) {
291
+ // The file descriptor was already inserted into epoll.
292
+ duplicate = descriptor = dup(descriptor);
293
+
294
+ rb_update_max_fd(duplicate);
295
+
296
+ if (descriptor == -1)
297
+ rb_sys_fail("dup");
298
+
299
+ result = epoll_ctl(data->descriptor, EPOLL_CTL_ADD, descriptor, &event);
300
+ }
301
+
302
+ if (result == -1) {
303
+ rb_sys_fail("epoll_ctl");
304
+ }
305
+
306
+ struct io_wait_arguments io_wait_arguments = {
307
+ .data = data,
308
+ .descriptor = descriptor,
309
+ .duplicate = duplicate
310
+ };
311
+
312
+ return rb_ensure(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_ensure, (VALUE)&io_wait_arguments);
313
+ }
314
+
315
+ #ifdef HAVE_RUBY_IO_BUFFER_H
316
+
317
+ struct io_read_arguments {
318
+ VALUE self;
319
+ VALUE fiber;
320
+ VALUE io;
321
+
322
+ int flags;
323
+
324
+ int descriptor;
325
+
326
+ VALUE buffer;
327
+ size_t length;
328
+ };
329
+
330
+ static
331
+ VALUE io_read_loop(VALUE _arguments) {
332
+ struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
333
+
334
+ void *base;
335
+ size_t size;
336
+ rb_io_buffer_get_mutable(arguments->buffer, &base, &size);
337
+
338
+ size_t offset = 0;
339
+ size_t length = arguments->length;
340
+
341
+ while (length > 0) {
342
+ size_t maximum_size = size - offset;
343
+ ssize_t result = read(arguments->descriptor, (char*)base+offset, maximum_size);
344
+
345
+ if (result == 0) {
346
+ break;
347
+ } else if (result > 0) {
348
+ offset += result;
349
+ length -= result;
350
+ } else if (errno == EAGAIN || errno == EWOULDBLOCK) {
351
+ IO_Event_Selector_EPoll_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(IO_EVENT_READABLE));
352
+ } else {
353
+ rb_sys_fail("IO_Event_Selector_EPoll_io_read");
354
+ }
355
+ }
356
+
357
+ return SIZET2NUM(offset);
358
+ }
359
+
360
+ static
361
+ VALUE io_read_ensure(VALUE _arguments) {
362
+ struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
363
+
364
+ IO_Event_Selector_nonblock_restore(arguments->descriptor, arguments->flags);
365
+
366
+ return Qnil;
367
+ }
368
+
369
+ VALUE IO_Event_Selector_EPoll_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
370
+ int descriptor = IO_Event_Selector_io_descriptor(io);
371
+
372
+ size_t length = NUM2SIZET(_length);
373
+
374
+ struct io_read_arguments io_read_arguments = {
375
+ .self = self,
376
+ .fiber = fiber,
377
+ .io = io,
378
+
379
+ .flags = IO_Event_Selector_nonblock_set(descriptor),
380
+ .descriptor = descriptor,
381
+ .buffer = buffer,
382
+ .length = length,
383
+ };
384
+
385
+ return rb_ensure(io_read_loop, (VALUE)&io_read_arguments, io_read_ensure, (VALUE)&io_read_arguments);
386
+ }
387
+
388
+ struct io_write_arguments {
389
+ VALUE self;
390
+ VALUE fiber;
391
+ VALUE io;
392
+
393
+ int flags;
394
+
395
+ int descriptor;
396
+
397
+ VALUE buffer;
398
+ size_t length;
399
+ };
400
+
401
+ static
402
+ VALUE io_write_loop(VALUE _arguments) {
403
+ struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
404
+
405
+ const void *base;
406
+ size_t size;
407
+ rb_io_buffer_get_immutable(arguments->buffer, &base, &size);
408
+
409
+ size_t offset = 0;
410
+ size_t length = arguments->length;
411
+
412
+ if (length > size) {
413
+ rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
414
+ }
415
+
416
+ while (length > 0) {
417
+ ssize_t result = write(arguments->descriptor, (char*)base+offset, length);
418
+
419
+ if (result >= 0) {
420
+ offset += result;
421
+ length -= result;
422
+ } else if (errno == EAGAIN || errno == EWOULDBLOCK) {
423
+ IO_Event_Selector_EPoll_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(IO_EVENT_WRITABLE));
424
+ } else {
425
+ rb_sys_fail("IO_Event_Selector_EPoll_io_write");
426
+ }
427
+ }
428
+
429
+ return SIZET2NUM(offset);
430
+ };
431
+
432
+ static
433
+ VALUE io_write_ensure(VALUE _arguments) {
434
+ struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
435
+
436
+ IO_Event_Selector_nonblock_restore(arguments->descriptor, arguments->flags);
437
+
438
+ return Qnil;
439
+ };
440
+
441
+ VALUE IO_Event_Selector_EPoll_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
442
+ int descriptor = IO_Event_Selector_io_descriptor(io);
443
+
444
+ size_t length = NUM2SIZET(_length);
445
+
446
+ struct io_write_arguments io_write_arguments = {
447
+ .self = self,
448
+ .fiber = fiber,
449
+ .io = io,
450
+
451
+ .flags = IO_Event_Selector_nonblock_set(descriptor),
452
+ .descriptor = descriptor,
453
+ .buffer = buffer,
454
+ .length = length,
455
+ };
456
+
457
+ return rb_ensure(io_write_loop, (VALUE)&io_write_arguments, io_write_ensure, (VALUE)&io_write_arguments);
458
+ }
459
+
460
+ #endif
461
+
462
+ static
463
+ int make_timeout(VALUE duration) {
464
+ if (duration == Qnil) {
465
+ return -1;
466
+ }
467
+
468
+ if (FIXNUM_P(duration)) {
469
+ return NUM2LONG(duration) * 1000L;
470
+ }
471
+
472
+ else if (RB_FLOAT_TYPE_P(duration)) {
473
+ double value = RFLOAT_VALUE(duration);
474
+
475
+ return value * 1000;
476
+ }
477
+
478
+ rb_raise(rb_eRuntimeError, "unable to convert timeout");
479
+ }
480
+
481
+ struct select_arguments {
482
+ struct IO_Event_Selector_EPoll *data;
483
+
484
+ int count;
485
+ struct epoll_event events[EPOLL_MAX_EVENTS];
486
+
487
+ int timeout;
488
+ };
489
+
490
+ static
491
+ void * select_internal(void *_arguments) {
492
+ struct select_arguments * arguments = (struct select_arguments *)_arguments;
493
+
494
+ arguments->count = epoll_wait(arguments->data->descriptor, arguments->events, EPOLL_MAX_EVENTS, arguments->timeout);
495
+
496
+ return NULL;
497
+ }
498
+
499
+ static
500
+ void select_internal_without_gvl(struct select_arguments *arguments) {
501
+ rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
502
+
503
+ if (arguments->count == -1) {
504
+ rb_sys_fail("select_internal_without_gvl:epoll_wait");
505
+ }
506
+ }
507
+
508
+ static
509
+ void select_internal_with_gvl(struct select_arguments *arguments) {
510
+ select_internal((void *)arguments);
511
+
512
+ if (arguments->count == -1) {
513
+ rb_sys_fail("select_internal_with_gvl:epoll_wait");
514
+ }
515
+ }
516
+
517
+ VALUE IO_Event_Selector_EPoll_select(VALUE self, VALUE duration) {
518
+ struct IO_Event_Selector_EPoll *data = NULL;
519
+ TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, data);
520
+
521
+ int ready = IO_Event_Selector_queue_flush(&data->backend);
522
+
523
+ struct select_arguments arguments = {
524
+ .data = data,
525
+ .timeout = 0
526
+ };
527
+
528
+ select_internal_with_gvl(&arguments);
529
+
530
+ // If the ready list was empty and no events were processed:
531
+ if (!ready && arguments.count == 0) {
532
+ arguments.timeout = make_timeout(duration);
533
+
534
+ if (arguments.timeout != 0) {
535
+ select_internal_without_gvl(&arguments);
536
+ }
537
+ }
538
+
539
+ for (int i = 0; i < arguments.count; i += 1) {
540
+ VALUE fiber = (VALUE)arguments.events[i].data.ptr;
541
+ VALUE result = INT2NUM(arguments.events[i].events);
542
+
543
+ // fprintf(stderr, "-> fiber=%p descriptor=%d\n", (void*)fiber, events[i].data.fd);
544
+
545
+ IO_Event_Selector_fiber_transfer(fiber, 1, &result);
546
+ }
547
+
548
+ return INT2NUM(arguments.count);
549
+ }
550
+
551
+ void Init_IO_Event_Selector_EPoll(VALUE IO_Event_Selector) {
552
+ IO_Event_Selector_EPoll = rb_define_class_under(IO_Event_Selector, "EPoll", rb_cObject);
553
+ rb_gc_register_mark_object(IO_Event_Selector_EPoll);
554
+
555
+ rb_define_alloc_func(IO_Event_Selector_EPoll, IO_Event_Selector_EPoll_allocate);
556
+ rb_define_method(IO_Event_Selector_EPoll, "initialize", IO_Event_Selector_EPoll_initialize, 1);
557
+
558
+ rb_define_method(IO_Event_Selector_EPoll, "transfer", IO_Event_Selector_EPoll_transfer, 0);
559
+ rb_define_method(IO_Event_Selector_EPoll, "resume", IO_Event_Selector_EPoll_resume, -1);
560
+ rb_define_method(IO_Event_Selector_EPoll, "yield", IO_Event_Selector_EPoll_yield, 0);
561
+ rb_define_method(IO_Event_Selector_EPoll, "push", IO_Event_Selector_EPoll_push, 1);
562
+ rb_define_method(IO_Event_Selector_EPoll, "raise", IO_Event_Selector_EPoll_raise, -1);
563
+
564
+ rb_define_method(IO_Event_Selector_EPoll, "ready?", IO_Event_Selector_EPoll_ready_p, 0);
565
+
566
+ rb_define_method(IO_Event_Selector_EPoll, "select", IO_Event_Selector_EPoll_select, 1);
567
+ rb_define_method(IO_Event_Selector_EPoll, "close", IO_Event_Selector_EPoll_close, 0);
568
+
569
+ rb_define_method(IO_Event_Selector_EPoll, "io_wait", IO_Event_Selector_EPoll_io_wait, 3);
570
+
571
+ #ifdef HAVE_RUBY_IO_BUFFER_H
572
+ rb_define_method(IO_Event_Selector_EPoll, "io_read", IO_Event_Selector_EPoll_io_read, 4);
573
+ rb_define_method(IO_Event_Selector_EPoll, "io_write", IO_Event_Selector_EPoll_io_write, 4);
574
+ #endif
575
+
576
+ rb_define_method(IO_Event_Selector_EPoll, "process_wait", IO_Event_Selector_EPoll_process_wait, 3);
577
+ }
@@ -0,0 +1,25 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #pragma once
22
+
23
+ #define IO_EVENT_SELECTOR_EPOLL
24
+
25
+ void Init_IO_Event_Selector_EPoll(VALUE IO_Event_Selector);