event 0.4.4 → 0.8.0

Sign up to get free protection for your applications and to get access to all the features.
Binary file
@@ -0,0 +1,570 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include "kqueue.h"
22
+ #include "selector.h"
23
+
24
+ #include <sys/epoll.h>
25
+ #include <time.h>
26
+ #include <errno.h>
27
+
28
+ #include "pidfd.c"
29
+
30
+ static VALUE Event_Selector_EPoll = Qnil;
31
+
32
+ enum {EPOLL_MAX_EVENTS = 64};
33
+
34
+ struct Event_Selector_EPoll {
35
+ struct Event_Selector backend;
36
+ int descriptor;
37
+ };
38
+
39
+ void Event_Selector_EPoll_Type_mark(void *_data)
40
+ {
41
+ struct Event_Selector_EPoll *data = _data;
42
+ Event_Selector_mark(&data->backend);
43
+ }
44
+
45
+ static
46
+ void close_internal(struct Event_Selector_EPoll *data) {
47
+ if (data->descriptor >= 0) {
48
+ close(data->descriptor);
49
+ data->descriptor = -1;
50
+ }
51
+ }
52
+
53
+ void Event_Selector_EPoll_Type_free(void *_data)
54
+ {
55
+ struct Event_Selector_EPoll *data = _data;
56
+
57
+ close_internal(data);
58
+
59
+ free(data);
60
+ }
61
+
62
+ size_t Event_Selector_EPoll_Type_size(const void *data)
63
+ {
64
+ return sizeof(struct Event_Selector_EPoll);
65
+ }
66
+
67
+ static const rb_data_type_t Event_Selector_EPoll_Type = {
68
+ .wrap_struct_name = "Event::Backend::EPoll",
69
+ .function = {
70
+ .dmark = Event_Selector_EPoll_Type_mark,
71
+ .dfree = Event_Selector_EPoll_Type_free,
72
+ .dsize = Event_Selector_EPoll_Type_size,
73
+ },
74
+ .data = NULL,
75
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY,
76
+ };
77
+
78
+ VALUE Event_Selector_EPoll_allocate(VALUE self) {
79
+ struct Event_Selector_EPoll *data = NULL;
80
+ VALUE instance = TypedData_Make_Struct(self, struct Event_Selector_EPoll, &Event_Selector_EPoll_Type, data);
81
+
82
+ Event_Selector_initialize(&data->backend, Qnil);
83
+ data->descriptor = -1;
84
+
85
+ return instance;
86
+ }
87
+
88
+ VALUE Event_Selector_EPoll_initialize(VALUE self, VALUE loop) {
89
+ struct Event_Selector_EPoll *data = NULL;
90
+ TypedData_Get_Struct(self, struct Event_Selector_EPoll, &Event_Selector_EPoll_Type, data);
91
+
92
+ Event_Selector_initialize(&data->backend, loop);
93
+ int result = epoll_create1(EPOLL_CLOEXEC);
94
+
95
+ if (result == -1) {
96
+ rb_sys_fail("epoll_create");
97
+ } else {
98
+ data->descriptor = result;
99
+
100
+ rb_update_max_fd(data->descriptor);
101
+ }
102
+
103
+ return self;
104
+ }
105
+
106
+ VALUE Event_Selector_EPoll_close(VALUE self) {
107
+ struct Event_Selector_EPoll *data = NULL;
108
+ TypedData_Get_Struct(self, struct Event_Selector_EPoll, &Event_Selector_EPoll_Type, data);
109
+
110
+ close_internal(data);
111
+
112
+ return Qnil;
113
+ }
114
+
115
+ VALUE Event_Selector_EPoll_transfer(int argc, VALUE *argv, VALUE self)
116
+ {
117
+ struct Event_Selector_EPoll *data = NULL;
118
+ TypedData_Get_Struct(self, struct Event_Selector_EPoll, &Event_Selector_EPoll_Type, data);
119
+
120
+ Event_Selector_wait_and_transfer(&data->backend, argc, argv);
121
+
122
+ return Qnil;
123
+ }
124
+
125
+ VALUE Event_Selector_EPoll_yield(VALUE self)
126
+ {
127
+ struct Event_Selector_EPoll *data = NULL;
128
+ TypedData_Get_Struct(self, struct Event_Selector_EPoll, &Event_Selector_EPoll_Type, data);
129
+
130
+ Event_Selector_yield(&data->backend);
131
+
132
+ return Qnil;
133
+ }
134
+
135
+ VALUE Event_Selector_EPoll_push(VALUE self, VALUE fiber)
136
+ {
137
+ struct Event_Selector_EPoll *data = NULL;
138
+ TypedData_Get_Struct(self, struct Event_Selector_EPoll, &Event_Selector_EPoll_Type, data);
139
+
140
+ Event_Selector_queue_push(&data->backend, fiber);
141
+
142
+ return Qnil;
143
+ }
144
+
145
+ VALUE Event_Selector_EPoll_raise(int argc, VALUE *argv, VALUE self)
146
+ {
147
+ struct Event_Selector_EPoll *data = NULL;
148
+ TypedData_Get_Struct(self, struct Event_Selector_EPoll, &Event_Selector_EPoll_Type, data);
149
+
150
+ return Event_Selector_wait_and_raise(&data->backend, argc, argv);
151
+ }
152
+
153
+ VALUE Event_Selector_EPoll_ready_p(VALUE self) {
154
+ struct Event_Selector_EPoll *data = NULL;
155
+ TypedData_Get_Struct(self, struct Event_Selector_EPoll, &Event_Selector_EPoll_Type, data);
156
+
157
+ return data->backend.ready ? Qtrue : Qfalse;
158
+ }
159
+
160
+ struct process_wait_arguments {
161
+ struct Event_Selector_EPoll *data;
162
+ pid_t pid;
163
+ int flags;
164
+ int descriptor;
165
+ };
166
+
167
+ static
168
+ VALUE process_wait_transfer(VALUE _arguments) {
169
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
170
+
171
+ Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
172
+
173
+ return Event_Selector_process_status_wait(arguments->pid);
174
+ }
175
+
176
+ static
177
+ VALUE process_wait_ensure(VALUE _arguments) {
178
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
179
+
180
+ // epoll_ctl(arguments->data->descriptor, EPOLL_CTL_DEL, arguments->descriptor, NULL);
181
+
182
+ close(arguments->descriptor);
183
+
184
+ return Qnil;
185
+ }
186
+
187
+ VALUE Event_Selector_EPoll_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
188
+ struct Event_Selector_EPoll *data = NULL;
189
+ TypedData_Get_Struct(self, struct Event_Selector_EPoll, &Event_Selector_EPoll_Type, data);
190
+
191
+ struct process_wait_arguments process_wait_arguments = {
192
+ .data = data,
193
+ .pid = NUM2PIDT(pid),
194
+ .flags = NUM2INT(flags),
195
+ };
196
+
197
+ process_wait_arguments.descriptor = pidfd_open(process_wait_arguments.pid, 0);
198
+ rb_update_max_fd(process_wait_arguments.descriptor);
199
+
200
+ struct epoll_event event = {
201
+ .events = EPOLLIN|EPOLLRDHUP|EPOLLONESHOT,
202
+ .data = {.ptr = (void*)fiber},
203
+ };
204
+
205
+ int result = epoll_ctl(data->descriptor, EPOLL_CTL_ADD, process_wait_arguments.descriptor, &event);
206
+
207
+ if (result == -1) {
208
+ rb_sys_fail("epoll_ctl(process_wait)");
209
+ }
210
+
211
+ return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
212
+ }
213
+
214
+ static inline
215
+ uint32_t epoll_flags_from_events(int events) {
216
+ uint32_t flags = 0;
217
+
218
+ if (events & EVENT_READABLE) flags |= EPOLLIN;
219
+ if (events & EVENT_PRIORITY) flags |= EPOLLPRI;
220
+ if (events & EVENT_WRITABLE) flags |= EPOLLOUT;
221
+
222
+ flags |= EPOLLRDHUP;
223
+ flags |= EPOLLONESHOT;
224
+
225
+ return flags;
226
+ }
227
+
228
+ static inline
229
+ int events_from_epoll_flags(uint32_t flags) {
230
+ int events = 0;
231
+
232
+ if (flags & EPOLLIN) events |= EVENT_READABLE;
233
+ if (flags & EPOLLPRI) events |= EVENT_PRIORITY;
234
+ if (flags & EPOLLOUT) events |= EVENT_WRITABLE;
235
+
236
+ return events;
237
+ }
238
+
239
+ struct io_wait_arguments {
240
+ struct Event_Selector_EPoll *data;
241
+ int descriptor;
242
+ int duplicate;
243
+ };
244
+
245
+ static
246
+ VALUE io_wait_ensure(VALUE _arguments) {
247
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
248
+
249
+ if (arguments->duplicate >= 0) {
250
+ epoll_ctl(arguments->data->descriptor, EPOLL_CTL_DEL, arguments->duplicate, NULL);
251
+
252
+ close(arguments->duplicate);
253
+ } else {
254
+ epoll_ctl(arguments->data->descriptor, EPOLL_CTL_DEL, arguments->descriptor, NULL);
255
+ }
256
+
257
+ return Qnil;
258
+ };
259
+
260
+ static
261
+ VALUE io_wait_transfer(VALUE _arguments) {
262
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
263
+
264
+ VALUE result = Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
265
+
266
+ return INT2NUM(events_from_epoll_flags(NUM2INT(result)));
267
+ };
268
+
269
+ VALUE Event_Selector_EPoll_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
270
+ struct Event_Selector_EPoll *data = NULL;
271
+ TypedData_Get_Struct(self, struct Event_Selector_EPoll, &Event_Selector_EPoll_Type, data);
272
+
273
+ struct epoll_event event = {0};
274
+
275
+ int descriptor = Event_Selector_io_descriptor(io);
276
+ int duplicate = -1;
277
+
278
+ event.events = epoll_flags_from_events(NUM2INT(events));
279
+ event.data.ptr = (void*)fiber;
280
+
281
+ // fprintf(stderr, "<- fiber=%p descriptor=%d\n", (void*)fiber, descriptor);
282
+
283
+ // A better approach is to batch all changes:
284
+ int result = epoll_ctl(data->descriptor, EPOLL_CTL_ADD, descriptor, &event);
285
+
286
+ if (result == -1 && errno == EEXIST) {
287
+ // The file descriptor was already inserted into epoll.
288
+ duplicate = descriptor = dup(descriptor);
289
+
290
+ rb_update_max_fd(duplicate);
291
+
292
+ if (descriptor == -1)
293
+ rb_sys_fail("dup");
294
+
295
+ result = epoll_ctl(data->descriptor, EPOLL_CTL_ADD, descriptor, &event);
296
+ }
297
+
298
+ if (result == -1) {
299
+ rb_sys_fail("epoll_ctl");
300
+ }
301
+
302
+ struct io_wait_arguments io_wait_arguments = {
303
+ .data = data,
304
+ .descriptor = descriptor,
305
+ .duplicate = duplicate
306
+ };
307
+
308
+ return rb_ensure(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_ensure, (VALUE)&io_wait_arguments);
309
+ }
310
+
311
+ #ifdef HAVE_RUBY_IO_BUFFER_H
312
+
313
+ struct io_read_arguments {
314
+ VALUE self;
315
+ VALUE fiber;
316
+ VALUE io;
317
+
318
+ int flags;
319
+
320
+ int descriptor;
321
+
322
+ VALUE buffer;
323
+ size_t length;
324
+ };
325
+
326
+ static
327
+ VALUE io_read_loop(VALUE _arguments) {
328
+ struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
329
+
330
+ void *base;
331
+ size_t size;
332
+ rb_io_buffer_get_mutable(arguments->buffer, &base, &size);
333
+
334
+ size_t offset = 0;
335
+ size_t length = arguments->length;
336
+
337
+ while (length > 0) {
338
+ size_t maximum_size = size - offset;
339
+ ssize_t result = read(arguments->descriptor, (char*)base+offset, maximum_size);
340
+
341
+ if (result == 0) {
342
+ break;
343
+ } else if (result > 0) {
344
+ offset += result;
345
+ length -= result;
346
+ } else if (errno == EAGAIN || errno == EWOULDBLOCK) {
347
+ Event_Selector_EPoll_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(EVENT_READABLE));
348
+ } else {
349
+ rb_sys_fail("Event_Selector_EPoll_io_read");
350
+ }
351
+ }
352
+
353
+ return SIZET2NUM(offset);
354
+ }
355
+
356
+ static
357
+ VALUE io_read_ensure(VALUE _arguments) {
358
+ struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
359
+
360
+ Event_Selector_nonblock_restore(arguments->descriptor, arguments->flags);
361
+
362
+ return Qnil;
363
+ }
364
+
365
+ VALUE Event_Selector_EPoll_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
366
+ int descriptor = Event_Selector_io_descriptor(io);
367
+
368
+ size_t length = NUM2SIZET(_length);
369
+
370
+ struct io_read_arguments io_read_arguments = {
371
+ .self = self,
372
+ .fiber = fiber,
373
+ .io = io,
374
+
375
+ .flags = Event_Selector_nonblock_set(descriptor),
376
+ .descriptor = descriptor,
377
+ .buffer = buffer,
378
+ .length = length,
379
+ };
380
+
381
+ return rb_ensure(io_read_loop, (VALUE)&io_read_arguments, io_read_ensure, (VALUE)&io_read_arguments);
382
+ }
383
+
384
+ struct io_write_arguments {
385
+ VALUE self;
386
+ VALUE fiber;
387
+ VALUE io;
388
+
389
+ int flags;
390
+
391
+ int descriptor;
392
+
393
+ VALUE buffer;
394
+ size_t length;
395
+ };
396
+
397
+ static
398
+ VALUE io_write_loop(VALUE _arguments) {
399
+ struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
400
+
401
+ const void *base;
402
+ size_t size;
403
+ rb_io_buffer_get_immutable(arguments->buffer, &base, &size);
404
+
405
+ size_t offset = 0;
406
+ size_t length = arguments->length;
407
+
408
+ if (length > size) {
409
+ rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
410
+ }
411
+
412
+ while (length > 0) {
413
+ ssize_t result = write(arguments->descriptor, (char*)base+offset, length);
414
+
415
+ if (result >= 0) {
416
+ offset += result;
417
+ length -= result;
418
+ } else if (errno == EAGAIN || errno == EWOULDBLOCK) {
419
+ Event_Selector_EPoll_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(EVENT_WRITABLE));
420
+ } else {
421
+ rb_sys_fail("Event_Selector_EPoll_io_write");
422
+ }
423
+ }
424
+
425
+ return SIZET2NUM(offset);
426
+ };
427
+
428
+ static
429
+ VALUE io_write_ensure(VALUE _arguments) {
430
+ struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
431
+
432
+ Event_Selector_nonblock_restore(arguments->descriptor, arguments->flags);
433
+
434
+ return Qnil;
435
+ };
436
+
437
+ VALUE Event_Selector_EPoll_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
438
+ int descriptor = Event_Selector_io_descriptor(io);
439
+
440
+ size_t length = NUM2SIZET(_length);
441
+
442
+ struct io_write_arguments io_write_arguments = {
443
+ .self = self,
444
+ .fiber = fiber,
445
+ .io = io,
446
+
447
+ .flags = Event_Selector_nonblock_set(descriptor),
448
+ .descriptor = descriptor,
449
+ .buffer = buffer,
450
+ .length = length,
451
+ };
452
+
453
+ return rb_ensure(io_write_loop, (VALUE)&io_write_arguments, io_write_ensure, (VALUE)&io_write_arguments);
454
+ }
455
+
456
+ #endif
457
+
458
+ static
459
+ int make_timeout(VALUE duration) {
460
+ if (duration == Qnil) {
461
+ return -1;
462
+ }
463
+
464
+ if (FIXNUM_P(duration)) {
465
+ return NUM2LONG(duration) * 1000L;
466
+ }
467
+
468
+ else if (RB_FLOAT_TYPE_P(duration)) {
469
+ double value = RFLOAT_VALUE(duration);
470
+
471
+ return value * 1000;
472
+ }
473
+
474
+ rb_raise(rb_eRuntimeError, "unable to convert timeout");
475
+ }
476
+
477
+ struct select_arguments {
478
+ struct Event_Selector_EPoll *data;
479
+
480
+ int count;
481
+ struct epoll_event events[EPOLL_MAX_EVENTS];
482
+
483
+ int timeout;
484
+ };
485
+
486
+ static
487
+ void * select_internal(void *_arguments) {
488
+ struct select_arguments * arguments = (struct select_arguments *)_arguments;
489
+
490
+ arguments->count = epoll_wait(arguments->data->descriptor, arguments->events, EPOLL_MAX_EVENTS, arguments->timeout);
491
+
492
+ return NULL;
493
+ }
494
+
495
+ static
496
+ void select_internal_without_gvl(struct select_arguments *arguments) {
497
+ rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
498
+
499
+ if (arguments->count == -1) {
500
+ rb_sys_fail("select_internal_without_gvl:epoll_wait");
501
+ }
502
+ }
503
+
504
+ static
505
+ void select_internal_with_gvl(struct select_arguments *arguments) {
506
+ select_internal((void *)arguments);
507
+
508
+ if (arguments->count == -1) {
509
+ rb_sys_fail("select_internal_with_gvl:epoll_wait");
510
+ }
511
+ }
512
+
513
+ VALUE Event_Selector_EPoll_select(VALUE self, VALUE duration) {
514
+ struct Event_Selector_EPoll *data = NULL;
515
+ TypedData_Get_Struct(self, struct Event_Selector_EPoll, &Event_Selector_EPoll_Type, data);
516
+
517
+ int ready = Event_Selector_queue_flush(&data->backend);
518
+
519
+ struct select_arguments arguments = {
520
+ .data = data,
521
+ .timeout = 0
522
+ };
523
+
524
+ select_internal_with_gvl(&arguments);
525
+
526
+ if (!ready && arguments.count == 0) {
527
+ arguments.timeout = make_timeout(duration);
528
+
529
+ if (arguments.timeout != 0) {
530
+ select_internal_without_gvl(&arguments);
531
+ }
532
+ }
533
+
534
+ for (int i = 0; i < arguments.count; i += 1) {
535
+ VALUE fiber = (VALUE)arguments.events[i].data.ptr;
536
+ VALUE result = INT2NUM(arguments.events[i].events);
537
+
538
+ // fprintf(stderr, "-> fiber=%p descriptor=%d\n", (void*)fiber, events[i].data.fd);
539
+
540
+ Event_Selector_fiber_transfer(fiber, 1, &result);
541
+ }
542
+
543
+ return INT2NUM(arguments.count);
544
+ }
545
+
546
+ void Init_Event_Selector_EPoll(VALUE Event_Selector) {
547
+ Event_Selector_EPoll = rb_define_class_under(Event_Selector, "EPoll", rb_cObject);
548
+
549
+ rb_define_alloc_func(Event_Selector_EPoll, Event_Selector_EPoll_allocate);
550
+ rb_define_method(Event_Selector_EPoll, "initialize", Event_Selector_EPoll_initialize, 1);
551
+
552
+ rb_define_method(Event_Selector_EPoll, "transfer", Event_Selector_EPoll_transfer, -1);
553
+ rb_define_method(Event_Selector_EPoll, "yield", Event_Selector_EPoll_yield, 0);
554
+ rb_define_method(Event_Selector_EPoll, "push", Event_Selector_EPoll_push, 1);
555
+ rb_define_method(Event_Selector_EPoll, "raise", Event_Selector_EPoll_raise, -1);
556
+
557
+ rb_define_method(Event_Selector_EPoll, "ready?", Event_Selector_EPoll_ready_p, 0);
558
+
559
+ rb_define_method(Event_Selector_EPoll, "select", Event_Selector_EPoll_select, 1);
560
+ rb_define_method(Event_Selector_EPoll, "close", Event_Selector_EPoll_close, 0);
561
+
562
+ rb_define_method(Event_Selector_EPoll, "io_wait", Event_Selector_EPoll_io_wait, 3);
563
+
564
+ #ifdef HAVE_RUBY_IO_BUFFER_H
565
+ rb_define_method(Event_Selector_EPoll, "io_read", Event_Selector_EPoll_io_read, 4);
566
+ rb_define_method(Event_Selector_EPoll, "io_write", Event_Selector_EPoll_io_write, 4);
567
+ #endif
568
+
569
+ rb_define_method(Event_Selector_EPoll, "process_wait", Event_Selector_EPoll_process_wait, 3);
570
+ }