event 0.7.0 → 0.9.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -20,6 +20,6 @@
20
20
 
21
21
  #pragma once
22
22
 
23
- #define EVENT_BACKEND_EPOLL
23
+ #define EVENT_SELECTOR_EPOLL
24
24
 
25
- void Init_Event_Backend_EPoll(VALUE Event_Backend);
25
+ void Init_Event_Selector_EPoll(VALUE Event_Selector);
@@ -19,76 +19,76 @@
19
19
  // THE SOFTWARE.
20
20
 
21
21
  #include "kqueue.h"
22
- #include "backend.h"
22
+ #include "selector.h"
23
23
 
24
24
  #include <sys/event.h>
25
25
  #include <sys/ioctl.h>
26
26
  #include <time.h>
27
27
  #include <errno.h>
28
28
 
29
- static VALUE Event_Backend_KQueue = Qnil;
29
+ static VALUE Event_Selector_KQueue = Qnil;
30
30
 
31
31
  enum {KQUEUE_MAX_EVENTS = 64};
32
32
 
33
- struct Event_Backend_KQueue {
34
- struct Event_Backend backend;
33
+ struct Event_Selector_KQueue {
34
+ struct Event_Selector backend;
35
35
  int descriptor;
36
36
  };
37
37
 
38
- void Event_Backend_KQueue_Type_mark(void *_data)
38
+ void Event_Selector_KQueue_Type_mark(void *_data)
39
39
  {
40
- struct Event_Backend_KQueue *data = _data;
41
- Event_Backend_mark(&data->backend);
40
+ struct Event_Selector_KQueue *data = _data;
41
+ Event_Selector_mark(&data->backend);
42
42
  }
43
43
 
44
44
  static
45
- void close_internal(struct Event_Backend_KQueue *data) {
45
+ void close_internal(struct Event_Selector_KQueue *data) {
46
46
  if (data->descriptor >= 0) {
47
47
  close(data->descriptor);
48
48
  data->descriptor = -1;
49
49
  }
50
50
  }
51
51
 
52
- void Event_Backend_KQueue_Type_free(void *_data)
52
+ void Event_Selector_KQueue_Type_free(void *_data)
53
53
  {
54
- struct Event_Backend_KQueue *data = _data;
54
+ struct Event_Selector_KQueue *data = _data;
55
55
 
56
56
  close_internal(data);
57
57
 
58
58
  free(data);
59
59
  }
60
60
 
61
- size_t Event_Backend_KQueue_Type_size(const void *data)
61
+ size_t Event_Selector_KQueue_Type_size(const void *data)
62
62
  {
63
- return sizeof(struct Event_Backend_KQueue);
63
+ return sizeof(struct Event_Selector_KQueue);
64
64
  }
65
65
 
66
- static const rb_data_type_t Event_Backend_KQueue_Type = {
66
+ static const rb_data_type_t Event_Selector_KQueue_Type = {
67
67
  .wrap_struct_name = "Event::Backend::KQueue",
68
68
  .function = {
69
- .dmark = Event_Backend_KQueue_Type_mark,
70
- .dfree = Event_Backend_KQueue_Type_free,
71
- .dsize = Event_Backend_KQueue_Type_size,
69
+ .dmark = Event_Selector_KQueue_Type_mark,
70
+ .dfree = Event_Selector_KQueue_Type_free,
71
+ .dsize = Event_Selector_KQueue_Type_size,
72
72
  },
73
73
  .data = NULL,
74
74
  .flags = RUBY_TYPED_FREE_IMMEDIATELY,
75
75
  };
76
76
 
77
- VALUE Event_Backend_KQueue_allocate(VALUE self) {
78
- struct Event_Backend_KQueue *data = NULL;
79
- VALUE instance = TypedData_Make_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
77
+ VALUE Event_Selector_KQueue_allocate(VALUE self) {
78
+ struct Event_Selector_KQueue *data = NULL;
79
+ VALUE instance = TypedData_Make_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
80
80
 
81
- Event_Backend_initialize(&data->backend, Qnil);
81
+ Event_Selector_initialize(&data->backend, Qnil);
82
82
  data->descriptor = -1;
83
83
 
84
84
  return instance;
85
85
  }
86
86
 
87
- VALUE Event_Backend_KQueue_initialize(VALUE self, VALUE loop) {
88
- struct Event_Backend_KQueue *data = NULL;
89
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
87
+ VALUE Event_Selector_KQueue_initialize(VALUE self, VALUE loop) {
88
+ struct Event_Selector_KQueue *data = NULL;
89
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
90
90
 
91
- Event_Backend_initialize(&data->backend, loop);
91
+ Event_Selector_initialize(&data->backend, loop);
92
92
  int result = kqueue();
93
93
 
94
94
  if (result == -1) {
@@ -103,44 +103,60 @@ VALUE Event_Backend_KQueue_initialize(VALUE self, VALUE loop) {
103
103
  return self;
104
104
  }
105
105
 
106
- VALUE Event_Backend_KQueue_close(VALUE self) {
107
- struct Event_Backend_KQueue *data = NULL;
108
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
106
+ VALUE Event_Selector_KQueue_close(VALUE self) {
107
+ struct Event_Selector_KQueue *data = NULL;
108
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
109
109
 
110
110
  close_internal(data);
111
111
 
112
112
  return Qnil;
113
113
  }
114
114
 
115
- VALUE Event_Backend_KQueue_transfer(VALUE self, VALUE fiber)
115
+ VALUE Event_Selector_KQueue_resume(int argc, VALUE *argv, VALUE self)
116
116
  {
117
- struct Event_Backend_KQueue *data = NULL;
118
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
117
+ struct Event_Selector_KQueue *data = NULL;
118
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
119
119
 
120
- Event_Backend_wait_and_transfer(&data->backend, fiber);
120
+ return Event_Selector_resume(&data->backend, argc, argv);
121
+ }
122
+
123
+ VALUE Event_Selector_KQueue_yield(VALUE self)
124
+ {
125
+ struct Event_Selector_KQueue *data = NULL;
126
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
127
+
128
+ Event_Selector_yield(&data->backend);
121
129
 
122
130
  return Qnil;
123
131
  }
124
132
 
125
- VALUE Event_Backend_KQueue_defer(VALUE self)
133
+ VALUE Event_Selector_KQueue_push(VALUE self, VALUE fiber)
126
134
  {
127
- struct Event_Backend_KQueue *data = NULL;
128
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
135
+ struct Event_Selector_KQueue *data = NULL;
136
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
129
137
 
130
- Event_Backend_defer(&data->backend);
138
+ Event_Selector_queue_push(&data->backend, fiber);
131
139
 
132
140
  return Qnil;
133
141
  }
134
142
 
135
- VALUE Event_Backend_KQueue_ready_p(VALUE self) {
136
- struct Event_Backend_KQueue *data = NULL;
137
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
143
+ VALUE Event_Selector_KQueue_raise(int argc, VALUE *argv, VALUE self)
144
+ {
145
+ struct Event_Selector_KQueue *data = NULL;
146
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
147
+
148
+ return Event_Selector_wait_and_raise(&data->backend, argc, argv);
149
+ }
150
+
151
+ VALUE Event_Selector_KQueue_ready_p(VALUE self) {
152
+ struct Event_Selector_KQueue *data = NULL;
153
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
138
154
 
139
155
  return data->backend.ready ? Qtrue : Qfalse;
140
156
  }
141
157
 
142
158
  struct process_wait_arguments {
143
- struct Event_Backend_KQueue *data;
159
+ struct Event_Selector_KQueue *data;
144
160
  pid_t pid;
145
161
  int flags;
146
162
  };
@@ -186,9 +202,9 @@ static
186
202
  VALUE process_wait_transfer(VALUE _arguments) {
187
203
  struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
188
204
 
189
- Event_Backend_fiber_transfer(arguments->data->backend.loop);
205
+ Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
190
206
 
191
- return Event_Backend_process_status_wait(arguments->pid);
207
+ return Event_Selector_process_status_wait(arguments->pid);
192
208
  }
193
209
 
194
210
  static
@@ -200,9 +216,9 @@ VALUE process_wait_rescue(VALUE _arguments, VALUE exception) {
200
216
  rb_exc_raise(exception);
201
217
  }
202
218
 
203
- VALUE Event_Backend_KQueue_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
204
- struct Event_Backend_KQueue *data = NULL;
205
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
219
+ VALUE Event_Selector_KQueue_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
220
+ struct Event_Selector_KQueue *data = NULL;
221
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
206
222
 
207
223
  struct process_wait_arguments process_wait_arguments = {
208
224
  .data = data,
@@ -215,7 +231,7 @@ VALUE Event_Backend_KQueue_process_wait(VALUE self, VALUE fiber, VALUE pid, VALU
215
231
  if (waiting) {
216
232
  return rb_rescue(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_rescue, (VALUE)&process_wait_arguments);
217
233
  } else {
218
- return Event_Backend_process_status_wait(process_wait_arguments.pid);
234
+ return Event_Selector_process_status_wait(process_wait_arguments.pid);
219
235
  }
220
236
  }
221
237
 
@@ -224,7 +240,7 @@ int io_add_filters(int descriptor, int ident, int events, VALUE fiber) {
224
240
  int count = 0;
225
241
  struct kevent kevents[2] = {0};
226
242
 
227
- if (events & READABLE) {
243
+ if (events & EVENT_READABLE) {
228
244
  kevents[count].ident = ident;
229
245
  kevents[count].filter = EVFILT_READ;
230
246
  kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
@@ -239,7 +255,7 @@ int io_add_filters(int descriptor, int ident, int events, VALUE fiber) {
239
255
  count++;
240
256
  }
241
257
 
242
- if (events & WRITABLE) {
258
+ if (events & EVENT_WRITABLE) {
243
259
  kevents[count].ident = ident;
244
260
  kevents[count].filter = EVFILT_WRITE;
245
261
  kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
@@ -261,7 +277,7 @@ void io_remove_filters(int descriptor, int ident, int events) {
261
277
  int count = 0;
262
278
  struct kevent kevents[2] = {0};
263
279
 
264
- if (events & READABLE) {
280
+ if (events & EVENT_READABLE) {
265
281
  kevents[count].ident = ident;
266
282
  kevents[count].filter = EVFILT_READ;
267
283
  kevents[count].flags = EV_DELETE;
@@ -269,7 +285,7 @@ void io_remove_filters(int descriptor, int ident, int events) {
269
285
  count++;
270
286
  }
271
287
 
272
- if (events & WRITABLE) {
288
+ if (events & EVENT_WRITABLE) {
273
289
  kevents[count].ident = ident;
274
290
  kevents[count].filter = EVFILT_WRITE;
275
291
  kevents[count].flags = EV_DELETE;
@@ -281,7 +297,7 @@ void io_remove_filters(int descriptor, int ident, int events) {
281
297
  }
282
298
 
283
299
  struct io_wait_arguments {
284
- struct Event_Backend_KQueue *data;
300
+ struct Event_Selector_KQueue *data;
285
301
  int events;
286
302
  int descriptor;
287
303
  };
@@ -297,8 +313,8 @@ VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
297
313
 
298
314
  static inline
299
315
  int events_from_kqueue_filter(int filter) {
300
- if (filter == EVFILT_READ) return READABLE;
301
- if (filter == EVFILT_WRITE) return WRITABLE;
316
+ if (filter == EVFILT_READ) return EVENT_READABLE;
317
+ if (filter == EVFILT_WRITE) return EVENT_WRITABLE;
302
318
 
303
319
  return 0;
304
320
  }
@@ -307,16 +323,16 @@ static
307
323
  VALUE io_wait_transfer(VALUE _arguments) {
308
324
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
309
325
 
310
- VALUE result = Event_Backend_fiber_transfer(arguments->data->backend.loop);
326
+ VALUE result = Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
311
327
 
312
328
  return INT2NUM(events_from_kqueue_filter(RB_NUM2INT(result)));
313
329
  }
314
330
 
315
- VALUE Event_Backend_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
316
- struct Event_Backend_KQueue *data = NULL;
317
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
331
+ VALUE Event_Selector_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
332
+ struct Event_Selector_KQueue *data = NULL;
333
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
318
334
 
319
- int descriptor = Event_Backend_io_descriptor(io);
335
+ int descriptor = Event_Selector_io_descriptor(io);
320
336
 
321
337
  struct io_wait_arguments io_wait_arguments = {
322
338
  .events = io_add_filters(data->descriptor, descriptor, RB_NUM2INT(events), fiber),
@@ -363,9 +379,9 @@ VALUE io_read_loop(VALUE _arguments) {
363
379
  offset += result;
364
380
  length -= result;
365
381
  } else if (errno == EAGAIN || errno == EWOULDBLOCK) {
366
- Event_Backend_KQueue_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(READABLE));
382
+ Event_Selector_KQueue_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(EVENT_READABLE));
367
383
  } else {
368
- rb_sys_fail("Event_Backend_KQueue_io_read");
384
+ rb_sys_fail("Event_Selector_KQueue_io_read");
369
385
  }
370
386
  }
371
387
 
@@ -376,16 +392,16 @@ static
376
392
  VALUE io_read_ensure(VALUE _arguments) {
377
393
  struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
378
394
 
379
- Event_Backend_nonblock_restore(arguments->descriptor, arguments->flags);
395
+ Event_Selector_nonblock_restore(arguments->descriptor, arguments->flags);
380
396
 
381
397
  return Qnil;
382
398
  }
383
399
 
384
- VALUE Event_Backend_KQueue_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
385
- struct Event_Backend_KQueue *data = NULL;
386
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
400
+ VALUE Event_Selector_KQueue_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
401
+ struct Event_Selector_KQueue *data = NULL;
402
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
387
403
 
388
- int descriptor = Event_Backend_io_descriptor(io);
404
+ int descriptor = Event_Selector_io_descriptor(io);
389
405
 
390
406
  size_t length = NUM2SIZET(_length);
391
407
 
@@ -394,7 +410,7 @@ VALUE Event_Backend_KQueue_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buff
394
410
  .fiber = fiber,
395
411
  .io = io,
396
412
 
397
- .flags = Event_Backend_nonblock_set(descriptor),
413
+ .flags = Event_Selector_nonblock_set(descriptor),
398
414
  .descriptor = descriptor,
399
415
  .buffer = buffer,
400
416
  .length = length,
@@ -438,9 +454,9 @@ VALUE io_write_loop(VALUE _arguments) {
438
454
  offset += result;
439
455
  length -= result;
440
456
  } else if (errno == EAGAIN || errno == EWOULDBLOCK) {
441
- Event_Backend_KQueue_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(WRITABLE));
457
+ Event_Selector_KQueue_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(EVENT_WRITABLE));
442
458
  } else {
443
- rb_sys_fail("Event_Backend_KQueue_io_write");
459
+ rb_sys_fail("Event_Selector_KQueue_io_write");
444
460
  }
445
461
  }
446
462
 
@@ -451,16 +467,16 @@ static
451
467
  VALUE io_write_ensure(VALUE _arguments) {
452
468
  struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
453
469
 
454
- Event_Backend_nonblock_restore(arguments->descriptor, arguments->flags);
470
+ Event_Selector_nonblock_restore(arguments->descriptor, arguments->flags);
455
471
 
456
472
  return Qnil;
457
473
  };
458
474
 
459
- VALUE Event_Backend_KQueue_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
460
- struct Event_Backend_KQueue *data = NULL;
461
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
475
+ VALUE Event_Selector_KQueue_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
476
+ struct Event_Selector_KQueue *data = NULL;
477
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
462
478
 
463
- int descriptor = Event_Backend_io_descriptor(io);
479
+ int descriptor = Event_Selector_io_descriptor(io);
464
480
 
465
481
  size_t length = NUM2SIZET(_length);
466
482
 
@@ -469,7 +485,7 @@ VALUE Event_Backend_KQueue_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buf
469
485
  .fiber = fiber,
470
486
  .io = io,
471
487
 
472
- .flags = Event_Backend_nonblock_set(descriptor),
488
+ .flags = Event_Selector_nonblock_set(descriptor),
473
489
  .descriptor = descriptor,
474
490
  .buffer = buffer,
475
491
  .length = length,
@@ -512,7 +528,7 @@ int timeout_nonblocking(struct timespec * timespec) {
512
528
  }
513
529
 
514
530
  struct select_arguments {
515
- struct Event_Backend_KQueue *data;
531
+ struct Event_Selector_KQueue *data;
516
532
 
517
533
  int count;
518
534
  struct kevent events[KQUEUE_MAX_EVENTS];
@@ -548,11 +564,11 @@ void select_internal_with_gvl(struct select_arguments *arguments) {
548
564
  }
549
565
  }
550
566
 
551
- VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
552
- struct Event_Backend_KQueue *data = NULL;
553
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
567
+ VALUE Event_Selector_KQueue_select(VALUE self, VALUE duration) {
568
+ struct Event_Selector_KQueue *data = NULL;
569
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
554
570
 
555
- Event_Backend_ready_pop(&data->backend);
571
+ int ready = Event_Selector_queue_flush(&data->backend);
556
572
 
557
573
  struct select_arguments arguments = {
558
574
  .data = data,
@@ -574,10 +590,10 @@ VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
574
590
  select_internal_with_gvl(&arguments);
575
591
 
576
592
  // If there were no pending events, if we have a timeout, wait for more events:
577
- if (arguments.count == 0) {
593
+ if (!ready && arguments.count == 0) {
578
594
  arguments.timeout = make_timeout(duration, &arguments.storage);
579
595
 
580
- if (!data->backend.ready && !timeout_nonblocking(arguments.timeout)) {
596
+ if (!timeout_nonblocking(arguments.timeout)) {
581
597
  arguments.count = KQUEUE_MAX_EVENTS;
582
598
 
583
599
  select_internal_without_gvl(&arguments);
@@ -588,29 +604,34 @@ VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
588
604
  VALUE fiber = (VALUE)arguments.events[i].udata;
589
605
  VALUE result = INT2NUM(arguments.events[i].filter);
590
606
 
591
- Event_Backend_fiber_transfer_result(fiber, result);
607
+ Event_Selector_fiber_transfer(fiber, 1, &result);
592
608
  }
593
609
 
594
610
  return INT2NUM(arguments.count);
595
611
  }
596
612
 
597
- void Init_Event_Backend_KQueue(VALUE Event_Backend) {
598
- Event_Backend_KQueue = rb_define_class_under(Event_Backend, "KQueue", rb_cObject);
613
+ void Init_Event_Selector_KQueue(VALUE Event_Selector) {
614
+ Event_Selector_KQueue = rb_define_class_under(Event_Selector, "KQueue", rb_cObject);
615
+
616
+ rb_define_alloc_func(Event_Selector_KQueue, Event_Selector_KQueue_allocate);
617
+ rb_define_method(Event_Selector_KQueue, "initialize", Event_Selector_KQueue_initialize, 1);
618
+
619
+ rb_define_method(Event_Selector_KQueue, "resume", Event_Selector_KQueue_resume, -1);
620
+ rb_define_method(Event_Selector_KQueue, "yield", Event_Selector_KQueue_yield, 0);
621
+ rb_define_method(Event_Selector_KQueue, "push", Event_Selector_KQueue_push, 1);
622
+ rb_define_method(Event_Selector_KQueue, "raise", Event_Selector_KQueue_raise, -1);
623
+
624
+ rb_define_method(Event_Selector_KQueue, "ready?", Event_Selector_KQueue_ready_p, 0);
599
625
 
600
- rb_define_alloc_func(Event_Backend_KQueue, Event_Backend_KQueue_allocate);
601
- rb_define_method(Event_Backend_KQueue, "initialize", Event_Backend_KQueue_initialize, 1);
602
- rb_define_method(Event_Backend_KQueue, "transfer", Event_Backend_KQueue_transfer, 1);
603
- rb_define_method(Event_Backend_KQueue, "defer", Event_Backend_KQueue_defer, 0);
604
- rb_define_method(Event_Backend_KQueue, "ready?", Event_Backend_KQueue_ready_p, 0);
605
- rb_define_method(Event_Backend_KQueue, "select", Event_Backend_KQueue_select, 1);
606
- rb_define_method(Event_Backend_KQueue, "close", Event_Backend_KQueue_close, 0);
626
+ rb_define_method(Event_Selector_KQueue, "select", Event_Selector_KQueue_select, 1);
627
+ rb_define_method(Event_Selector_KQueue, "close", Event_Selector_KQueue_close, 0);
607
628
 
608
- rb_define_method(Event_Backend_KQueue, "io_wait", Event_Backend_KQueue_io_wait, 3);
629
+ rb_define_method(Event_Selector_KQueue, "io_wait", Event_Selector_KQueue_io_wait, 3);
609
630
 
610
631
  #ifdef HAVE_RUBY_IO_BUFFER_H
611
- rb_define_method(Event_Backend_KQueue, "io_read", Event_Backend_KQueue_io_read, 4);
612
- rb_define_method(Event_Backend_KQueue, "io_write", Event_Backend_KQueue_io_write, 4);
632
+ rb_define_method(Event_Selector_KQueue, "io_read", Event_Selector_KQueue_io_read, 4);
633
+ rb_define_method(Event_Selector_KQueue, "io_write", Event_Selector_KQueue_io_write, 4);
613
634
  #endif
614
635
 
615
- rb_define_method(Event_Backend_KQueue, "process_wait", Event_Backend_KQueue_process_wait, 3);
636
+ rb_define_method(Event_Selector_KQueue, "process_wait", Event_Selector_KQueue_process_wait, 3);
616
637
  }