event 0.7.0 → 0.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/ext/event/event.c +12 -12
- data/ext/event/event.h +3 -3
- data/ext/event/extconf.rb +8 -5
- data/ext/event/{backend → selector}/epoll.c +114 -90
- data/ext/event/{backend → selector}/epoll.h +2 -2
- data/ext/event/{backend → selector}/kqueue.c +115 -94
- data/ext/event/{backend → selector}/kqueue.h +2 -2
- data/ext/event/{backend → selector}/pidfd.c +0 -0
- data/ext/event/selector/selector.c +285 -0
- data/ext/event/selector/selector.h +123 -0
- data/ext/event/{backend → selector}/uring.c +144 -133
- data/ext/event/{backend → selector}/uring.h +3 -3
- data/lib/event.rb +1 -1
- data/lib/event/debug.rb +126 -0
- data/lib/event/selector.rb +25 -2
- data/lib/event/{backend → selector}/select.rb +59 -11
- data/lib/event/version.rb +1 -1
- metadata +14 -22
- data/ext/event/Makefile +0 -267
- data/ext/event/backend.o +0 -0
- data/ext/event/backend/backend.c +0 -178
- data/ext/event/backend/backend.h +0 -112
- data/ext/event/event.bundle +0 -0
- data/ext/event/event.o +0 -0
- data/ext/event/extconf.h +0 -4
- data/ext/event/kqueue.o +0 -0
- data/ext/event/mkmf.log +0 -195
- data/lib/event/backend.rb +0 -49
- data/lib/event/debug/selector.rb +0 -108
@@ -19,76 +19,76 @@
|
|
19
19
|
// THE SOFTWARE.
|
20
20
|
|
21
21
|
#include "kqueue.h"
|
22
|
-
#include "
|
22
|
+
#include "selector.h"
|
23
23
|
|
24
24
|
#include <sys/event.h>
|
25
25
|
#include <sys/ioctl.h>
|
26
26
|
#include <time.h>
|
27
27
|
#include <errno.h>
|
28
28
|
|
29
|
-
static VALUE
|
29
|
+
static VALUE Event_Selector_KQueue = Qnil;
|
30
30
|
|
31
31
|
enum {KQUEUE_MAX_EVENTS = 64};
|
32
32
|
|
33
|
-
struct
|
34
|
-
struct
|
33
|
+
struct Event_Selector_KQueue {
|
34
|
+
struct Event_Selector backend;
|
35
35
|
int descriptor;
|
36
36
|
};
|
37
37
|
|
38
|
-
void
|
38
|
+
void Event_Selector_KQueue_Type_mark(void *_data)
|
39
39
|
{
|
40
|
-
struct
|
41
|
-
|
40
|
+
struct Event_Selector_KQueue *data = _data;
|
41
|
+
Event_Selector_mark(&data->backend);
|
42
42
|
}
|
43
43
|
|
44
44
|
static
|
45
|
-
void close_internal(struct
|
45
|
+
void close_internal(struct Event_Selector_KQueue *data) {
|
46
46
|
if (data->descriptor >= 0) {
|
47
47
|
close(data->descriptor);
|
48
48
|
data->descriptor = -1;
|
49
49
|
}
|
50
50
|
}
|
51
51
|
|
52
|
-
void
|
52
|
+
void Event_Selector_KQueue_Type_free(void *_data)
|
53
53
|
{
|
54
|
-
struct
|
54
|
+
struct Event_Selector_KQueue *data = _data;
|
55
55
|
|
56
56
|
close_internal(data);
|
57
57
|
|
58
58
|
free(data);
|
59
59
|
}
|
60
60
|
|
61
|
-
size_t
|
61
|
+
size_t Event_Selector_KQueue_Type_size(const void *data)
|
62
62
|
{
|
63
|
-
return sizeof(struct
|
63
|
+
return sizeof(struct Event_Selector_KQueue);
|
64
64
|
}
|
65
65
|
|
66
|
-
static const rb_data_type_t
|
66
|
+
static const rb_data_type_t Event_Selector_KQueue_Type = {
|
67
67
|
.wrap_struct_name = "Event::Backend::KQueue",
|
68
68
|
.function = {
|
69
|
-
.dmark =
|
70
|
-
.dfree =
|
71
|
-
.dsize =
|
69
|
+
.dmark = Event_Selector_KQueue_Type_mark,
|
70
|
+
.dfree = Event_Selector_KQueue_Type_free,
|
71
|
+
.dsize = Event_Selector_KQueue_Type_size,
|
72
72
|
},
|
73
73
|
.data = NULL,
|
74
74
|
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
75
75
|
};
|
76
76
|
|
77
|
-
VALUE
|
78
|
-
struct
|
79
|
-
VALUE instance = TypedData_Make_Struct(self, struct
|
77
|
+
VALUE Event_Selector_KQueue_allocate(VALUE self) {
|
78
|
+
struct Event_Selector_KQueue *data = NULL;
|
79
|
+
VALUE instance = TypedData_Make_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
|
80
80
|
|
81
|
-
|
81
|
+
Event_Selector_initialize(&data->backend, Qnil);
|
82
82
|
data->descriptor = -1;
|
83
83
|
|
84
84
|
return instance;
|
85
85
|
}
|
86
86
|
|
87
|
-
VALUE
|
88
|
-
struct
|
89
|
-
TypedData_Get_Struct(self, struct
|
87
|
+
VALUE Event_Selector_KQueue_initialize(VALUE self, VALUE loop) {
|
88
|
+
struct Event_Selector_KQueue *data = NULL;
|
89
|
+
TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
|
90
90
|
|
91
|
-
|
91
|
+
Event_Selector_initialize(&data->backend, loop);
|
92
92
|
int result = kqueue();
|
93
93
|
|
94
94
|
if (result == -1) {
|
@@ -103,44 +103,60 @@ VALUE Event_Backend_KQueue_initialize(VALUE self, VALUE loop) {
|
|
103
103
|
return self;
|
104
104
|
}
|
105
105
|
|
106
|
-
VALUE
|
107
|
-
struct
|
108
|
-
TypedData_Get_Struct(self, struct
|
106
|
+
VALUE Event_Selector_KQueue_close(VALUE self) {
|
107
|
+
struct Event_Selector_KQueue *data = NULL;
|
108
|
+
TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
|
109
109
|
|
110
110
|
close_internal(data);
|
111
111
|
|
112
112
|
return Qnil;
|
113
113
|
}
|
114
114
|
|
115
|
-
VALUE
|
115
|
+
VALUE Event_Selector_KQueue_resume(int argc, VALUE *argv, VALUE self)
|
116
116
|
{
|
117
|
-
struct
|
118
|
-
TypedData_Get_Struct(self, struct
|
117
|
+
struct Event_Selector_KQueue *data = NULL;
|
118
|
+
TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
|
119
119
|
|
120
|
-
|
120
|
+
return Event_Selector_resume(&data->backend, argc, argv);
|
121
|
+
}
|
122
|
+
|
123
|
+
VALUE Event_Selector_KQueue_yield(VALUE self)
|
124
|
+
{
|
125
|
+
struct Event_Selector_KQueue *data = NULL;
|
126
|
+
TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
|
127
|
+
|
128
|
+
Event_Selector_yield(&data->backend);
|
121
129
|
|
122
130
|
return Qnil;
|
123
131
|
}
|
124
132
|
|
125
|
-
VALUE
|
133
|
+
VALUE Event_Selector_KQueue_push(VALUE self, VALUE fiber)
|
126
134
|
{
|
127
|
-
struct
|
128
|
-
TypedData_Get_Struct(self, struct
|
135
|
+
struct Event_Selector_KQueue *data = NULL;
|
136
|
+
TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
|
129
137
|
|
130
|
-
|
138
|
+
Event_Selector_queue_push(&data->backend, fiber);
|
131
139
|
|
132
140
|
return Qnil;
|
133
141
|
}
|
134
142
|
|
135
|
-
VALUE
|
136
|
-
|
137
|
-
|
143
|
+
VALUE Event_Selector_KQueue_raise(int argc, VALUE *argv, VALUE self)
|
144
|
+
{
|
145
|
+
struct Event_Selector_KQueue *data = NULL;
|
146
|
+
TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
|
147
|
+
|
148
|
+
return Event_Selector_wait_and_raise(&data->backend, argc, argv);
|
149
|
+
}
|
150
|
+
|
151
|
+
VALUE Event_Selector_KQueue_ready_p(VALUE self) {
|
152
|
+
struct Event_Selector_KQueue *data = NULL;
|
153
|
+
TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
|
138
154
|
|
139
155
|
return data->backend.ready ? Qtrue : Qfalse;
|
140
156
|
}
|
141
157
|
|
142
158
|
struct process_wait_arguments {
|
143
|
-
struct
|
159
|
+
struct Event_Selector_KQueue *data;
|
144
160
|
pid_t pid;
|
145
161
|
int flags;
|
146
162
|
};
|
@@ -186,9 +202,9 @@ static
|
|
186
202
|
VALUE process_wait_transfer(VALUE _arguments) {
|
187
203
|
struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
|
188
204
|
|
189
|
-
|
205
|
+
Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
|
190
206
|
|
191
|
-
return
|
207
|
+
return Event_Selector_process_status_wait(arguments->pid);
|
192
208
|
}
|
193
209
|
|
194
210
|
static
|
@@ -200,9 +216,9 @@ VALUE process_wait_rescue(VALUE _arguments, VALUE exception) {
|
|
200
216
|
rb_exc_raise(exception);
|
201
217
|
}
|
202
218
|
|
203
|
-
VALUE
|
204
|
-
struct
|
205
|
-
TypedData_Get_Struct(self, struct
|
219
|
+
VALUE Event_Selector_KQueue_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
|
220
|
+
struct Event_Selector_KQueue *data = NULL;
|
221
|
+
TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
|
206
222
|
|
207
223
|
struct process_wait_arguments process_wait_arguments = {
|
208
224
|
.data = data,
|
@@ -215,7 +231,7 @@ VALUE Event_Backend_KQueue_process_wait(VALUE self, VALUE fiber, VALUE pid, VALU
|
|
215
231
|
if (waiting) {
|
216
232
|
return rb_rescue(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_rescue, (VALUE)&process_wait_arguments);
|
217
233
|
} else {
|
218
|
-
return
|
234
|
+
return Event_Selector_process_status_wait(process_wait_arguments.pid);
|
219
235
|
}
|
220
236
|
}
|
221
237
|
|
@@ -224,7 +240,7 @@ int io_add_filters(int descriptor, int ident, int events, VALUE fiber) {
|
|
224
240
|
int count = 0;
|
225
241
|
struct kevent kevents[2] = {0};
|
226
242
|
|
227
|
-
if (events &
|
243
|
+
if (events & EVENT_READABLE) {
|
228
244
|
kevents[count].ident = ident;
|
229
245
|
kevents[count].filter = EVFILT_READ;
|
230
246
|
kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
|
@@ -239,7 +255,7 @@ int io_add_filters(int descriptor, int ident, int events, VALUE fiber) {
|
|
239
255
|
count++;
|
240
256
|
}
|
241
257
|
|
242
|
-
if (events &
|
258
|
+
if (events & EVENT_WRITABLE) {
|
243
259
|
kevents[count].ident = ident;
|
244
260
|
kevents[count].filter = EVFILT_WRITE;
|
245
261
|
kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
|
@@ -261,7 +277,7 @@ void io_remove_filters(int descriptor, int ident, int events) {
|
|
261
277
|
int count = 0;
|
262
278
|
struct kevent kevents[2] = {0};
|
263
279
|
|
264
|
-
if (events &
|
280
|
+
if (events & EVENT_READABLE) {
|
265
281
|
kevents[count].ident = ident;
|
266
282
|
kevents[count].filter = EVFILT_READ;
|
267
283
|
kevents[count].flags = EV_DELETE;
|
@@ -269,7 +285,7 @@ void io_remove_filters(int descriptor, int ident, int events) {
|
|
269
285
|
count++;
|
270
286
|
}
|
271
287
|
|
272
|
-
if (events &
|
288
|
+
if (events & EVENT_WRITABLE) {
|
273
289
|
kevents[count].ident = ident;
|
274
290
|
kevents[count].filter = EVFILT_WRITE;
|
275
291
|
kevents[count].flags = EV_DELETE;
|
@@ -281,7 +297,7 @@ void io_remove_filters(int descriptor, int ident, int events) {
|
|
281
297
|
}
|
282
298
|
|
283
299
|
struct io_wait_arguments {
|
284
|
-
struct
|
300
|
+
struct Event_Selector_KQueue *data;
|
285
301
|
int events;
|
286
302
|
int descriptor;
|
287
303
|
};
|
@@ -297,8 +313,8 @@ VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
|
|
297
313
|
|
298
314
|
static inline
|
299
315
|
int events_from_kqueue_filter(int filter) {
|
300
|
-
if (filter == EVFILT_READ) return
|
301
|
-
if (filter == EVFILT_WRITE) return
|
316
|
+
if (filter == EVFILT_READ) return EVENT_READABLE;
|
317
|
+
if (filter == EVFILT_WRITE) return EVENT_WRITABLE;
|
302
318
|
|
303
319
|
return 0;
|
304
320
|
}
|
@@ -307,16 +323,16 @@ static
|
|
307
323
|
VALUE io_wait_transfer(VALUE _arguments) {
|
308
324
|
struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
|
309
325
|
|
310
|
-
VALUE result =
|
326
|
+
VALUE result = Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
|
311
327
|
|
312
328
|
return INT2NUM(events_from_kqueue_filter(RB_NUM2INT(result)));
|
313
329
|
}
|
314
330
|
|
315
|
-
VALUE
|
316
|
-
struct
|
317
|
-
TypedData_Get_Struct(self, struct
|
331
|
+
VALUE Event_Selector_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
|
332
|
+
struct Event_Selector_KQueue *data = NULL;
|
333
|
+
TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
|
318
334
|
|
319
|
-
int descriptor =
|
335
|
+
int descriptor = Event_Selector_io_descriptor(io);
|
320
336
|
|
321
337
|
struct io_wait_arguments io_wait_arguments = {
|
322
338
|
.events = io_add_filters(data->descriptor, descriptor, RB_NUM2INT(events), fiber),
|
@@ -363,9 +379,9 @@ VALUE io_read_loop(VALUE _arguments) {
|
|
363
379
|
offset += result;
|
364
380
|
length -= result;
|
365
381
|
} else if (errno == EAGAIN || errno == EWOULDBLOCK) {
|
366
|
-
|
382
|
+
Event_Selector_KQueue_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(EVENT_READABLE));
|
367
383
|
} else {
|
368
|
-
rb_sys_fail("
|
384
|
+
rb_sys_fail("Event_Selector_KQueue_io_read");
|
369
385
|
}
|
370
386
|
}
|
371
387
|
|
@@ -376,16 +392,16 @@ static
|
|
376
392
|
VALUE io_read_ensure(VALUE _arguments) {
|
377
393
|
struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
|
378
394
|
|
379
|
-
|
395
|
+
Event_Selector_nonblock_restore(arguments->descriptor, arguments->flags);
|
380
396
|
|
381
397
|
return Qnil;
|
382
398
|
}
|
383
399
|
|
384
|
-
VALUE
|
385
|
-
struct
|
386
|
-
TypedData_Get_Struct(self, struct
|
400
|
+
VALUE Event_Selector_KQueue_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
|
401
|
+
struct Event_Selector_KQueue *data = NULL;
|
402
|
+
TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
|
387
403
|
|
388
|
-
int descriptor =
|
404
|
+
int descriptor = Event_Selector_io_descriptor(io);
|
389
405
|
|
390
406
|
size_t length = NUM2SIZET(_length);
|
391
407
|
|
@@ -394,7 +410,7 @@ VALUE Event_Backend_KQueue_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buff
|
|
394
410
|
.fiber = fiber,
|
395
411
|
.io = io,
|
396
412
|
|
397
|
-
.flags =
|
413
|
+
.flags = Event_Selector_nonblock_set(descriptor),
|
398
414
|
.descriptor = descriptor,
|
399
415
|
.buffer = buffer,
|
400
416
|
.length = length,
|
@@ -438,9 +454,9 @@ VALUE io_write_loop(VALUE _arguments) {
|
|
438
454
|
offset += result;
|
439
455
|
length -= result;
|
440
456
|
} else if (errno == EAGAIN || errno == EWOULDBLOCK) {
|
441
|
-
|
457
|
+
Event_Selector_KQueue_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(EVENT_WRITABLE));
|
442
458
|
} else {
|
443
|
-
rb_sys_fail("
|
459
|
+
rb_sys_fail("Event_Selector_KQueue_io_write");
|
444
460
|
}
|
445
461
|
}
|
446
462
|
|
@@ -451,16 +467,16 @@ static
|
|
451
467
|
VALUE io_write_ensure(VALUE _arguments) {
|
452
468
|
struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
|
453
469
|
|
454
|
-
|
470
|
+
Event_Selector_nonblock_restore(arguments->descriptor, arguments->flags);
|
455
471
|
|
456
472
|
return Qnil;
|
457
473
|
};
|
458
474
|
|
459
|
-
VALUE
|
460
|
-
struct
|
461
|
-
TypedData_Get_Struct(self, struct
|
475
|
+
VALUE Event_Selector_KQueue_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
|
476
|
+
struct Event_Selector_KQueue *data = NULL;
|
477
|
+
TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
|
462
478
|
|
463
|
-
int descriptor =
|
479
|
+
int descriptor = Event_Selector_io_descriptor(io);
|
464
480
|
|
465
481
|
size_t length = NUM2SIZET(_length);
|
466
482
|
|
@@ -469,7 +485,7 @@ VALUE Event_Backend_KQueue_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buf
|
|
469
485
|
.fiber = fiber,
|
470
486
|
.io = io,
|
471
487
|
|
472
|
-
.flags =
|
488
|
+
.flags = Event_Selector_nonblock_set(descriptor),
|
473
489
|
.descriptor = descriptor,
|
474
490
|
.buffer = buffer,
|
475
491
|
.length = length,
|
@@ -512,7 +528,7 @@ int timeout_nonblocking(struct timespec * timespec) {
|
|
512
528
|
}
|
513
529
|
|
514
530
|
struct select_arguments {
|
515
|
-
struct
|
531
|
+
struct Event_Selector_KQueue *data;
|
516
532
|
|
517
533
|
int count;
|
518
534
|
struct kevent events[KQUEUE_MAX_EVENTS];
|
@@ -548,11 +564,11 @@ void select_internal_with_gvl(struct select_arguments *arguments) {
|
|
548
564
|
}
|
549
565
|
}
|
550
566
|
|
551
|
-
VALUE
|
552
|
-
struct
|
553
|
-
TypedData_Get_Struct(self, struct
|
567
|
+
VALUE Event_Selector_KQueue_select(VALUE self, VALUE duration) {
|
568
|
+
struct Event_Selector_KQueue *data = NULL;
|
569
|
+
TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
|
554
570
|
|
555
|
-
|
571
|
+
int ready = Event_Selector_queue_flush(&data->backend);
|
556
572
|
|
557
573
|
struct select_arguments arguments = {
|
558
574
|
.data = data,
|
@@ -574,10 +590,10 @@ VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
|
|
574
590
|
select_internal_with_gvl(&arguments);
|
575
591
|
|
576
592
|
// If there were no pending events, if we have a timeout, wait for more events:
|
577
|
-
if (arguments.count == 0) {
|
593
|
+
if (!ready && arguments.count == 0) {
|
578
594
|
arguments.timeout = make_timeout(duration, &arguments.storage);
|
579
595
|
|
580
|
-
if (!
|
596
|
+
if (!timeout_nonblocking(arguments.timeout)) {
|
581
597
|
arguments.count = KQUEUE_MAX_EVENTS;
|
582
598
|
|
583
599
|
select_internal_without_gvl(&arguments);
|
@@ -588,29 +604,34 @@ VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
|
|
588
604
|
VALUE fiber = (VALUE)arguments.events[i].udata;
|
589
605
|
VALUE result = INT2NUM(arguments.events[i].filter);
|
590
606
|
|
591
|
-
|
607
|
+
Event_Selector_fiber_transfer(fiber, 1, &result);
|
592
608
|
}
|
593
609
|
|
594
610
|
return INT2NUM(arguments.count);
|
595
611
|
}
|
596
612
|
|
597
|
-
void
|
598
|
-
|
613
|
+
void Init_Event_Selector_KQueue(VALUE Event_Selector) {
|
614
|
+
Event_Selector_KQueue = rb_define_class_under(Event_Selector, "KQueue", rb_cObject);
|
615
|
+
|
616
|
+
rb_define_alloc_func(Event_Selector_KQueue, Event_Selector_KQueue_allocate);
|
617
|
+
rb_define_method(Event_Selector_KQueue, "initialize", Event_Selector_KQueue_initialize, 1);
|
618
|
+
|
619
|
+
rb_define_method(Event_Selector_KQueue, "resume", Event_Selector_KQueue_resume, -1);
|
620
|
+
rb_define_method(Event_Selector_KQueue, "yield", Event_Selector_KQueue_yield, 0);
|
621
|
+
rb_define_method(Event_Selector_KQueue, "push", Event_Selector_KQueue_push, 1);
|
622
|
+
rb_define_method(Event_Selector_KQueue, "raise", Event_Selector_KQueue_raise, -1);
|
623
|
+
|
624
|
+
rb_define_method(Event_Selector_KQueue, "ready?", Event_Selector_KQueue_ready_p, 0);
|
599
625
|
|
600
|
-
|
601
|
-
rb_define_method(
|
602
|
-
rb_define_method(Event_Backend_KQueue, "transfer", Event_Backend_KQueue_transfer, 1);
|
603
|
-
rb_define_method(Event_Backend_KQueue, "defer", Event_Backend_KQueue_defer, 0);
|
604
|
-
rb_define_method(Event_Backend_KQueue, "ready?", Event_Backend_KQueue_ready_p, 0);
|
605
|
-
rb_define_method(Event_Backend_KQueue, "select", Event_Backend_KQueue_select, 1);
|
606
|
-
rb_define_method(Event_Backend_KQueue, "close", Event_Backend_KQueue_close, 0);
|
626
|
+
rb_define_method(Event_Selector_KQueue, "select", Event_Selector_KQueue_select, 1);
|
627
|
+
rb_define_method(Event_Selector_KQueue, "close", Event_Selector_KQueue_close, 0);
|
607
628
|
|
608
|
-
rb_define_method(
|
629
|
+
rb_define_method(Event_Selector_KQueue, "io_wait", Event_Selector_KQueue_io_wait, 3);
|
609
630
|
|
610
631
|
#ifdef HAVE_RUBY_IO_BUFFER_H
|
611
|
-
rb_define_method(
|
612
|
-
rb_define_method(
|
632
|
+
rb_define_method(Event_Selector_KQueue, "io_read", Event_Selector_KQueue_io_read, 4);
|
633
|
+
rb_define_method(Event_Selector_KQueue, "io_write", Event_Selector_KQueue_io_write, 4);
|
613
634
|
#endif
|
614
635
|
|
615
|
-
rb_define_method(
|
636
|
+
rb_define_method(Event_Selector_KQueue, "process_wait", Event_Selector_KQueue_process_wait, 3);
|
616
637
|
}
|