event 0.4.4 → 0.8.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -20,6 +20,6 @@
20
20
 
21
21
  #pragma once
22
22
 
23
- #define EVENT_BACKEND_EPOLL
23
+ #define EVENT_SELECTOR_EPOLL
24
24
 
25
- void Init_Event_Backend_EPoll(VALUE Event_Backend);
25
+ void Init_Event_Selector_EPoll(VALUE Event_Selector);
@@ -0,0 +1,637 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include "kqueue.h"
22
+ #include "selector.h"
23
+
24
+ #include <sys/event.h>
25
+ #include <sys/ioctl.h>
26
+ #include <time.h>
27
+ #include <errno.h>
28
+
29
+ static VALUE Event_Selector_KQueue = Qnil;
30
+
31
+ enum {KQUEUE_MAX_EVENTS = 64};
32
+
33
+ struct Event_Selector_KQueue {
34
+ struct Event_Selector backend;
35
+ int descriptor;
36
+ };
37
+
38
+ void Event_Selector_KQueue_Type_mark(void *_data)
39
+ {
40
+ struct Event_Selector_KQueue *data = _data;
41
+ Event_Selector_mark(&data->backend);
42
+ }
43
+
44
+ static
45
+ void close_internal(struct Event_Selector_KQueue *data) {
46
+ if (data->descriptor >= 0) {
47
+ close(data->descriptor);
48
+ data->descriptor = -1;
49
+ }
50
+ }
51
+
52
+ void Event_Selector_KQueue_Type_free(void *_data)
53
+ {
54
+ struct Event_Selector_KQueue *data = _data;
55
+
56
+ close_internal(data);
57
+
58
+ free(data);
59
+ }
60
+
61
+ size_t Event_Selector_KQueue_Type_size(const void *data)
62
+ {
63
+ return sizeof(struct Event_Selector_KQueue);
64
+ }
65
+
66
+ static const rb_data_type_t Event_Selector_KQueue_Type = {
67
+ .wrap_struct_name = "Event::Backend::KQueue",
68
+ .function = {
69
+ .dmark = Event_Selector_KQueue_Type_mark,
70
+ .dfree = Event_Selector_KQueue_Type_free,
71
+ .dsize = Event_Selector_KQueue_Type_size,
72
+ },
73
+ .data = NULL,
74
+ .flags = RUBY_TYPED_FREE_IMMEDIATELY,
75
+ };
76
+
77
+ VALUE Event_Selector_KQueue_allocate(VALUE self) {
78
+ struct Event_Selector_KQueue *data = NULL;
79
+ VALUE instance = TypedData_Make_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
80
+
81
+ Event_Selector_initialize(&data->backend, Qnil);
82
+ data->descriptor = -1;
83
+
84
+ return instance;
85
+ }
86
+
87
+ VALUE Event_Selector_KQueue_initialize(VALUE self, VALUE loop) {
88
+ struct Event_Selector_KQueue *data = NULL;
89
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
90
+
91
+ Event_Selector_initialize(&data->backend, loop);
92
+ int result = kqueue();
93
+
94
+ if (result == -1) {
95
+ rb_sys_fail("kqueue");
96
+ } else {
97
+ ioctl(result, FIOCLEX);
98
+ data->descriptor = result;
99
+
100
+ rb_update_max_fd(data->descriptor);
101
+ }
102
+
103
+ return self;
104
+ }
105
+
106
+ VALUE Event_Selector_KQueue_close(VALUE self) {
107
+ struct Event_Selector_KQueue *data = NULL;
108
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
109
+
110
+ close_internal(data);
111
+
112
+ return Qnil;
113
+ }
114
+
115
+ VALUE Event_Selector_KQueue_transfer(int argc, VALUE *argv, VALUE self)
116
+ {
117
+ struct Event_Selector_KQueue *data = NULL;
118
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
119
+
120
+ return Event_Selector_wait_and_transfer(&data->backend, argc, argv);
121
+ }
122
+
123
+ VALUE Event_Selector_KQueue_yield(VALUE self)
124
+ {
125
+ struct Event_Selector_KQueue *data = NULL;
126
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
127
+
128
+ Event_Selector_yield(&data->backend);
129
+
130
+ return Qnil;
131
+ }
132
+
133
+ VALUE Event_Selector_KQueue_push(VALUE self, VALUE fiber)
134
+ {
135
+ struct Event_Selector_KQueue *data = NULL;
136
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
137
+
138
+ Event_Selector_queue_push(&data->backend, fiber);
139
+
140
+ return Qnil;
141
+ }
142
+
143
+ VALUE Event_Selector_KQueue_raise(int argc, VALUE *argv, VALUE self)
144
+ {
145
+ struct Event_Selector_KQueue *data = NULL;
146
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
147
+
148
+ return Event_Selector_wait_and_raise(&data->backend, argc, argv);
149
+ }
150
+
151
+ VALUE Event_Selector_KQueue_ready_p(VALUE self) {
152
+ struct Event_Selector_KQueue *data = NULL;
153
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
154
+
155
+ return data->backend.ready ? Qtrue : Qfalse;
156
+ }
157
+
158
+ struct process_wait_arguments {
159
+ struct Event_Selector_KQueue *data;
160
+ pid_t pid;
161
+ int flags;
162
+ };
163
+
164
+ static
165
+ int process_add_filters(int descriptor, int ident, VALUE fiber) {
166
+ struct kevent event = {0};
167
+
168
+ event.ident = ident;
169
+ event.filter = EVFILT_PROC;
170
+ event.flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
171
+ event.fflags = NOTE_EXIT;
172
+ event.udata = (void*)fiber;
173
+
174
+ int result = kevent(descriptor, &event, 1, NULL, 0, NULL);
175
+
176
+ if (result == -1) {
177
+ // No such process - the process has probably already terminated:
178
+ if (errno == ESRCH) {
179
+ return 0;
180
+ }
181
+
182
+ rb_sys_fail("kevent(process_add_filters)");
183
+ }
184
+
185
+ return 1;
186
+ }
187
+
188
+ static
189
+ void process_remove_filters(int descriptor, int ident) {
190
+ struct kevent event = {0};
191
+
192
+ event.ident = ident;
193
+ event.filter = EVFILT_PROC;
194
+ event.flags = EV_DELETE;
195
+ event.fflags = NOTE_EXIT;
196
+
197
+ // Ignore the result.
198
+ kevent(descriptor, &event, 1, NULL, 0, NULL);
199
+ }
200
+
201
+ static
202
+ VALUE process_wait_transfer(VALUE _arguments) {
203
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
204
+
205
+ Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
206
+
207
+ return Event_Selector_process_status_wait(arguments->pid);
208
+ }
209
+
210
+ static
211
+ VALUE process_wait_rescue(VALUE _arguments, VALUE exception) {
212
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
213
+
214
+ process_remove_filters(arguments->data->descriptor, arguments->pid);
215
+
216
+ rb_exc_raise(exception);
217
+ }
218
+
219
+ VALUE Event_Selector_KQueue_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
220
+ struct Event_Selector_KQueue *data = NULL;
221
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
222
+
223
+ struct process_wait_arguments process_wait_arguments = {
224
+ .data = data,
225
+ .pid = NUM2PIDT(pid),
226
+ .flags = RB_NUM2INT(flags),
227
+ };
228
+
229
+ int waiting = process_add_filters(data->descriptor, process_wait_arguments.pid, fiber);
230
+
231
+ if (waiting) {
232
+ return rb_rescue(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_rescue, (VALUE)&process_wait_arguments);
233
+ } else {
234
+ return Event_Selector_process_status_wait(process_wait_arguments.pid);
235
+ }
236
+ }
237
+
238
+ static
239
+ int io_add_filters(int descriptor, int ident, int events, VALUE fiber) {
240
+ int count = 0;
241
+ struct kevent kevents[2] = {0};
242
+
243
+ if (events & EVENT_READABLE) {
244
+ kevents[count].ident = ident;
245
+ kevents[count].filter = EVFILT_READ;
246
+ kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
247
+ kevents[count].udata = (void*)fiber;
248
+
249
+ // #ifdef EV_OOBAND
250
+ // if (events & PRIORITY) {
251
+ // kevents[count].flags |= EV_OOBAND;
252
+ // }
253
+ // #endif
254
+
255
+ count++;
256
+ }
257
+
258
+ if (events & EVENT_WRITABLE) {
259
+ kevents[count].ident = ident;
260
+ kevents[count].filter = EVFILT_WRITE;
261
+ kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
262
+ kevents[count].udata = (void*)fiber;
263
+ count++;
264
+ }
265
+
266
+ int result = kevent(descriptor, kevents, count, NULL, 0, NULL);
267
+
268
+ if (result == -1) {
269
+ rb_sys_fail("kevent(io_add_filters)");
270
+ }
271
+
272
+ return events;
273
+ }
274
+
275
+ static
276
+ void io_remove_filters(int descriptor, int ident, int events) {
277
+ int count = 0;
278
+ struct kevent kevents[2] = {0};
279
+
280
+ if (events & EVENT_READABLE) {
281
+ kevents[count].ident = ident;
282
+ kevents[count].filter = EVFILT_READ;
283
+ kevents[count].flags = EV_DELETE;
284
+
285
+ count++;
286
+ }
287
+
288
+ if (events & EVENT_WRITABLE) {
289
+ kevents[count].ident = ident;
290
+ kevents[count].filter = EVFILT_WRITE;
291
+ kevents[count].flags = EV_DELETE;
292
+ count++;
293
+ }
294
+
295
+ // Ignore the result.
296
+ kevent(descriptor, kevents, count, NULL, 0, NULL);
297
+ }
298
+
299
+ struct io_wait_arguments {
300
+ struct Event_Selector_KQueue *data;
301
+ int events;
302
+ int descriptor;
303
+ };
304
+
305
+ static
306
+ VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
307
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
308
+
309
+ io_remove_filters(arguments->data->descriptor, arguments->descriptor, arguments->events);
310
+
311
+ rb_exc_raise(exception);
312
+ }
313
+
314
+ static inline
315
+ int events_from_kqueue_filter(int filter) {
316
+ if (filter == EVFILT_READ) return EVENT_READABLE;
317
+ if (filter == EVFILT_WRITE) return EVENT_WRITABLE;
318
+
319
+ return 0;
320
+ }
321
+
322
+ static
323
+ VALUE io_wait_transfer(VALUE _arguments) {
324
+ struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
325
+
326
+ VALUE result = Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
327
+
328
+ return INT2NUM(events_from_kqueue_filter(RB_NUM2INT(result)));
329
+ }
330
+
331
+ VALUE Event_Selector_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
332
+ struct Event_Selector_KQueue *data = NULL;
333
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
334
+
335
+ int descriptor = Event_Selector_io_descriptor(io);
336
+
337
+ struct io_wait_arguments io_wait_arguments = {
338
+ .events = io_add_filters(data->descriptor, descriptor, RB_NUM2INT(events), fiber),
339
+ .data = data,
340
+ .descriptor = descriptor,
341
+ };
342
+
343
+ return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
344
+ }
345
+
346
+ #ifdef HAVE_RUBY_IO_BUFFER_H
347
+
348
+ struct io_read_arguments {
349
+ VALUE self;
350
+ VALUE fiber;
351
+ VALUE io;
352
+
353
+ int flags;
354
+
355
+ int descriptor;
356
+
357
+ VALUE buffer;
358
+ size_t length;
359
+ };
360
+
361
+ static
362
+ VALUE io_read_loop(VALUE _arguments) {
363
+ struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
364
+
365
+ void *base;
366
+ size_t size;
367
+ rb_io_buffer_get_mutable(arguments->buffer, &base, &size);
368
+
369
+ size_t offset = 0;
370
+ size_t length = arguments->length;
371
+
372
+ while (length > 0) {
373
+ size_t maximum_size = size - offset;
374
+ ssize_t result = read(arguments->descriptor, (char*)base+offset, maximum_size);
375
+
376
+ if (result == 0) {
377
+ break;
378
+ } else if (result > 0) {
379
+ offset += result;
380
+ length -= result;
381
+ } else if (errno == EAGAIN || errno == EWOULDBLOCK) {
382
+ Event_Selector_KQueue_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(READABLE));
383
+ } else {
384
+ rb_sys_fail("Event_Selector_KQueue_io_read");
385
+ }
386
+ }
387
+
388
+ return SIZET2NUM(offset);
389
+ }
390
+
391
+ static
392
+ VALUE io_read_ensure(VALUE _arguments) {
393
+ struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
394
+
395
+ Event_Selector_nonblock_restore(arguments->descriptor, arguments->flags);
396
+
397
+ return Qnil;
398
+ }
399
+
400
+ VALUE Event_Selector_KQueue_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
401
+ struct Event_Selector_KQueue *data = NULL;
402
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
403
+
404
+ int descriptor = Event_Selector_io_descriptor(io);
405
+
406
+ size_t length = NUM2SIZET(_length);
407
+
408
+ struct io_read_arguments io_read_arguments = {
409
+ .self = self,
410
+ .fiber = fiber,
411
+ .io = io,
412
+
413
+ .flags = Event_Selector_nonblock_set(descriptor),
414
+ .descriptor = descriptor,
415
+ .buffer = buffer,
416
+ .length = length,
417
+ };
418
+
419
+ return rb_ensure(io_read_loop, (VALUE)&io_read_arguments, io_read_ensure, (VALUE)&io_read_arguments);
420
+ }
421
+
422
+ struct io_write_arguments {
423
+ VALUE self;
424
+ VALUE fiber;
425
+ VALUE io;
426
+
427
+ int flags;
428
+
429
+ int descriptor;
430
+
431
+ VALUE buffer;
432
+ size_t length;
433
+ };
434
+
435
+ static
436
+ VALUE io_write_loop(VALUE _arguments) {
437
+ struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
438
+
439
+ const void *base;
440
+ size_t size;
441
+ rb_io_buffer_get_immutable(arguments->buffer, &base, &size);
442
+
443
+ size_t offset = 0;
444
+ size_t length = arguments->length;
445
+
446
+ if (length > size) {
447
+ rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
448
+ }
449
+
450
+ while (length > 0) {
451
+ ssize_t result = write(arguments->descriptor, (char*)base+offset, length);
452
+
453
+ if (result >= 0) {
454
+ offset += result;
455
+ length -= result;
456
+ } else if (errno == EAGAIN || errno == EWOULDBLOCK) {
457
+ Event_Selector_KQueue_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(WRITABLE));
458
+ } else {
459
+ rb_sys_fail("Event_Selector_KQueue_io_write");
460
+ }
461
+ }
462
+
463
+ return SIZET2NUM(offset);
464
+ };
465
+
466
+ static
467
+ VALUE io_write_ensure(VALUE _arguments) {
468
+ struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
469
+
470
+ Event_Selector_nonblock_restore(arguments->descriptor, arguments->flags);
471
+
472
+ return Qnil;
473
+ };
474
+
475
+ VALUE Event_Selector_KQueue_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
476
+ struct Event_Selector_KQueue *data = NULL;
477
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
478
+
479
+ int descriptor = Event_Selector_io_descriptor(io);
480
+
481
+ size_t length = NUM2SIZET(_length);
482
+
483
+ struct io_write_arguments io_write_arguments = {
484
+ .self = self,
485
+ .fiber = fiber,
486
+ .io = io,
487
+
488
+ .flags = Event_Selector_nonblock_set(descriptor),
489
+ .descriptor = descriptor,
490
+ .buffer = buffer,
491
+ .length = length,
492
+ };
493
+
494
+ return rb_ensure(io_write_loop, (VALUE)&io_write_arguments, io_write_ensure, (VALUE)&io_write_arguments);
495
+ }
496
+
497
+ #endif
498
+
499
+ static
500
+ struct timespec * make_timeout(VALUE duration, struct timespec * storage) {
501
+ if (duration == Qnil) {
502
+ return NULL;
503
+ }
504
+
505
+ if (FIXNUM_P(duration)) {
506
+ storage->tv_sec = NUM2TIMET(duration);
507
+ storage->tv_nsec = 0;
508
+
509
+ return storage;
510
+ }
511
+
512
+ else if (RB_FLOAT_TYPE_P(duration)) {
513
+ double value = RFLOAT_VALUE(duration);
514
+ time_t seconds = value;
515
+
516
+ storage->tv_sec = seconds;
517
+ storage->tv_nsec = (value - seconds) * 1000000000L;
518
+
519
+ return storage;
520
+ }
521
+
522
+ rb_raise(rb_eRuntimeError, "unable to convert timeout");
523
+ }
524
+
525
+ static
526
+ int timeout_nonblocking(struct timespec * timespec) {
527
+ return timespec && timespec->tv_sec == 0 && timespec->tv_nsec == 0;
528
+ }
529
+
530
+ struct select_arguments {
531
+ struct Event_Selector_KQueue *data;
532
+
533
+ int count;
534
+ struct kevent events[KQUEUE_MAX_EVENTS];
535
+
536
+ struct timespec storage;
537
+ struct timespec *timeout;
538
+ };
539
+
540
+ static
541
+ void * select_internal(void *_arguments) {
542
+ struct select_arguments * arguments = (struct select_arguments *)_arguments;
543
+
544
+ arguments->count = kevent(arguments->data->descriptor, NULL, 0, arguments->events, arguments->count, arguments->timeout);
545
+
546
+ return NULL;
547
+ }
548
+
549
+ static
550
+ void select_internal_without_gvl(struct select_arguments *arguments) {
551
+ rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
552
+
553
+ if (arguments->count == -1) {
554
+ rb_sys_fail("select_internal_without_gvl:kevent");
555
+ }
556
+ }
557
+
558
+ static
559
+ void select_internal_with_gvl(struct select_arguments *arguments) {
560
+ select_internal((void *)arguments);
561
+
562
+ if (arguments->count == -1) {
563
+ rb_sys_fail("select_internal_with_gvl:kevent");
564
+ }
565
+ }
566
+
567
+ VALUE Event_Selector_KQueue_select(VALUE self, VALUE duration) {
568
+ struct Event_Selector_KQueue *data = NULL;
569
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
570
+
571
+ int ready = Event_Selector_queue_flush(&data->backend);
572
+
573
+ struct select_arguments arguments = {
574
+ .data = data,
575
+ .count = KQUEUE_MAX_EVENTS,
576
+ .storage = {
577
+ .tv_sec = 0,
578
+ .tv_nsec = 0
579
+ }
580
+ };
581
+
582
+ // We break this implementation into two parts.
583
+ // (1) count = kevent(..., timeout = 0)
584
+ // (2) without gvl: kevent(..., timeout = 0) if count == 0 and timeout != 0
585
+ // This allows us to avoid releasing and reacquiring the GVL.
586
+ // Non-comprehensive testing shows this gives a 1.5x speedup.
587
+ arguments.timeout = &arguments.storage;
588
+
589
+ // First do the syscall with no timeout to get any immediately available events:
590
+ select_internal_with_gvl(&arguments);
591
+
592
+ // If there were no pending events, if we have a timeout, wait for more events:
593
+ if (arguments.count == 0 && !ready) {
594
+ arguments.timeout = make_timeout(duration, &arguments.storage);
595
+
596
+ if (!timeout_nonblocking(arguments.timeout)) {
597
+ arguments.count = KQUEUE_MAX_EVENTS;
598
+
599
+ select_internal_without_gvl(&arguments);
600
+ }
601
+ }
602
+
603
+ for (int i = 0; i < arguments.count; i += 1) {
604
+ VALUE fiber = (VALUE)arguments.events[i].udata;
605
+ VALUE result = INT2NUM(arguments.events[i].filter);
606
+
607
+ Event_Selector_fiber_transfer(fiber, 1, &result);
608
+ }
609
+
610
+ return INT2NUM(arguments.count);
611
+ }
612
+
613
+ void Init_Event_Selector_KQueue(VALUE Event_Selector) {
614
+ Event_Selector_KQueue = rb_define_class_under(Event_Selector, "KQueue", rb_cObject);
615
+
616
+ rb_define_alloc_func(Event_Selector_KQueue, Event_Selector_KQueue_allocate);
617
+ rb_define_method(Event_Selector_KQueue, "initialize", Event_Selector_KQueue_initialize, 1);
618
+
619
+ rb_define_method(Event_Selector_KQueue, "transfer", Event_Selector_KQueue_transfer, -1);
620
+ rb_define_method(Event_Selector_KQueue, "yield", Event_Selector_KQueue_yield, 0);
621
+ rb_define_method(Event_Selector_KQueue, "push", Event_Selector_KQueue_push, 1);
622
+ rb_define_method(Event_Selector_KQueue, "raise", Event_Selector_KQueue_raise, -1);
623
+
624
+ rb_define_method(Event_Selector_KQueue, "ready?", Event_Selector_KQueue_ready_p, 0);
625
+
626
+ rb_define_method(Event_Selector_KQueue, "select", Event_Selector_KQueue_select, 1);
627
+ rb_define_method(Event_Selector_KQueue, "close", Event_Selector_KQueue_close, 0);
628
+
629
+ rb_define_method(Event_Selector_KQueue, "io_wait", Event_Selector_KQueue_io_wait, 3);
630
+
631
+ #ifdef HAVE_RUBY_IO_BUFFER_H
632
+ rb_define_method(Event_Selector_KQueue, "io_read", Event_Selector_KQueue_io_read, 4);
633
+ rb_define_method(Event_Selector_KQueue, "io_write", Event_Selector_KQueue_io_write, 4);
634
+ #endif
635
+
636
+ rb_define_method(Event_Selector_KQueue, "process_wait", Event_Selector_KQueue_process_wait, 3);
637
+ }