event 0.6.0 → 0.8.2

Sign up to get free protection for your applications and to get access to all the features.
@@ -20,6 +20,6 @@
20
20
 
21
21
  #pragma once
22
22
 
23
- #define EVENT_BACKEND_EPOLL
23
+ #define EVENT_SELECTOR_EPOLL
24
24
 
25
- void Init_Event_Backend_EPoll(VALUE Event_Backend);
25
+ void Init_Event_Selector_EPoll(VALUE Event_Selector);
@@ -19,77 +19,76 @@
19
19
  // THE SOFTWARE.
20
20
 
21
21
  #include "kqueue.h"
22
- #include "backend.h"
22
+ #include "selector.h"
23
23
 
24
24
  #include <sys/event.h>
25
25
  #include <sys/ioctl.h>
26
26
  #include <time.h>
27
27
  #include <errno.h>
28
28
 
29
- static VALUE Event_Backend_KQueue = Qnil;
30
- static ID id_fileno;
29
+ static VALUE Event_Selector_KQueue = Qnil;
31
30
 
32
31
  enum {KQUEUE_MAX_EVENTS = 64};
33
32
 
34
- struct Event_Backend_KQueue {
35
- VALUE loop;
33
+ struct Event_Selector_KQueue {
34
+ struct Event_Selector backend;
36
35
  int descriptor;
37
36
  };
38
37
 
39
- void Event_Backend_KQueue_Type_mark(void *_data)
38
+ void Event_Selector_KQueue_Type_mark(void *_data)
40
39
  {
41
- struct Event_Backend_KQueue *data = _data;
42
- rb_gc_mark(data->loop);
40
+ struct Event_Selector_KQueue *data = _data;
41
+ Event_Selector_mark(&data->backend);
43
42
  }
44
43
 
45
44
  static
46
- void close_internal(struct Event_Backend_KQueue *data) {
45
+ void close_internal(struct Event_Selector_KQueue *data) {
47
46
  if (data->descriptor >= 0) {
48
47
  close(data->descriptor);
49
48
  data->descriptor = -1;
50
49
  }
51
50
  }
52
51
 
53
- void Event_Backend_KQueue_Type_free(void *_data)
52
+ void Event_Selector_KQueue_Type_free(void *_data)
54
53
  {
55
- struct Event_Backend_KQueue *data = _data;
54
+ struct Event_Selector_KQueue *data = _data;
56
55
 
57
56
  close_internal(data);
58
57
 
59
58
  free(data);
60
59
  }
61
60
 
62
- size_t Event_Backend_KQueue_Type_size(const void *data)
61
+ size_t Event_Selector_KQueue_Type_size(const void *data)
63
62
  {
64
- return sizeof(struct Event_Backend_KQueue);
63
+ return sizeof(struct Event_Selector_KQueue);
65
64
  }
66
65
 
67
- static const rb_data_type_t Event_Backend_KQueue_Type = {
66
+ static const rb_data_type_t Event_Selector_KQueue_Type = {
68
67
  .wrap_struct_name = "Event::Backend::KQueue",
69
68
  .function = {
70
- .dmark = Event_Backend_KQueue_Type_mark,
71
- .dfree = Event_Backend_KQueue_Type_free,
72
- .dsize = Event_Backend_KQueue_Type_size,
69
+ .dmark = Event_Selector_KQueue_Type_mark,
70
+ .dfree = Event_Selector_KQueue_Type_free,
71
+ .dsize = Event_Selector_KQueue_Type_size,
73
72
  },
74
73
  .data = NULL,
75
74
  .flags = RUBY_TYPED_FREE_IMMEDIATELY,
76
75
  };
77
76
 
78
- VALUE Event_Backend_KQueue_allocate(VALUE self) {
79
- struct Event_Backend_KQueue *data = NULL;
80
- VALUE instance = TypedData_Make_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
77
+ VALUE Event_Selector_KQueue_allocate(VALUE self) {
78
+ struct Event_Selector_KQueue *data = NULL;
79
+ VALUE instance = TypedData_Make_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
81
80
 
82
- data->loop = Qnil;
81
+ Event_Selector_initialize(&data->backend, Qnil);
83
82
  data->descriptor = -1;
84
83
 
85
84
  return instance;
86
85
  }
87
86
 
88
- VALUE Event_Backend_KQueue_initialize(VALUE self, VALUE loop) {
89
- struct Event_Backend_KQueue *data = NULL;
90
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
87
+ VALUE Event_Selector_KQueue_initialize(VALUE self, VALUE loop) {
88
+ struct Event_Selector_KQueue *data = NULL;
89
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
91
90
 
92
- data->loop = loop;
91
+ Event_Selector_initialize(&data->backend, loop);
93
92
  int result = kqueue();
94
93
 
95
94
  if (result == -1) {
@@ -104,17 +103,60 @@ VALUE Event_Backend_KQueue_initialize(VALUE self, VALUE loop) {
104
103
  return self;
105
104
  }
106
105
 
107
- VALUE Event_Backend_KQueue_close(VALUE self) {
108
- struct Event_Backend_KQueue *data = NULL;
109
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
106
+ VALUE Event_Selector_KQueue_close(VALUE self) {
107
+ struct Event_Selector_KQueue *data = NULL;
108
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
110
109
 
111
110
  close_internal(data);
112
111
 
113
112
  return Qnil;
114
113
  }
115
114
 
115
+ VALUE Event_Selector_KQueue_transfer(int argc, VALUE *argv, VALUE self)
116
+ {
117
+ struct Event_Selector_KQueue *data = NULL;
118
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
119
+
120
+ return Event_Selector_wait_and_transfer(&data->backend, argc, argv);
121
+ }
122
+
123
+ VALUE Event_Selector_KQueue_yield(VALUE self)
124
+ {
125
+ struct Event_Selector_KQueue *data = NULL;
126
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
127
+
128
+ Event_Selector_yield(&data->backend);
129
+
130
+ return Qnil;
131
+ }
132
+
133
+ VALUE Event_Selector_KQueue_push(VALUE self, VALUE fiber)
134
+ {
135
+ struct Event_Selector_KQueue *data = NULL;
136
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
137
+
138
+ Event_Selector_queue_push(&data->backend, fiber);
139
+
140
+ return Qnil;
141
+ }
142
+
143
+ VALUE Event_Selector_KQueue_raise(int argc, VALUE *argv, VALUE self)
144
+ {
145
+ struct Event_Selector_KQueue *data = NULL;
146
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
147
+
148
+ return Event_Selector_wait_and_raise(&data->backend, argc, argv);
149
+ }
150
+
151
+ VALUE Event_Selector_KQueue_ready_p(VALUE self) {
152
+ struct Event_Selector_KQueue *data = NULL;
153
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
154
+
155
+ return data->backend.ready ? Qtrue : Qfalse;
156
+ }
157
+
116
158
  struct process_wait_arguments {
117
- struct Event_Backend_KQueue *data;
159
+ struct Event_Selector_KQueue *data;
118
160
  pid_t pid;
119
161
  int flags;
120
162
  };
@@ -160,9 +202,9 @@ static
160
202
  VALUE process_wait_transfer(VALUE _arguments) {
161
203
  struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
162
204
 
163
- Event_Backend_transfer(arguments->data->loop);
205
+ Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
164
206
 
165
- return Event_Backend_process_status_wait(arguments->pid);
207
+ return Event_Selector_process_status_wait(arguments->pid);
166
208
  }
167
209
 
168
210
  static
@@ -174,9 +216,9 @@ VALUE process_wait_rescue(VALUE _arguments, VALUE exception) {
174
216
  rb_exc_raise(exception);
175
217
  }
176
218
 
177
- VALUE Event_Backend_KQueue_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
178
- struct Event_Backend_KQueue *data = NULL;
179
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
219
+ VALUE Event_Selector_KQueue_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
220
+ struct Event_Selector_KQueue *data = NULL;
221
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
180
222
 
181
223
  struct process_wait_arguments process_wait_arguments = {
182
224
  .data = data,
@@ -189,7 +231,7 @@ VALUE Event_Backend_KQueue_process_wait(VALUE self, VALUE fiber, VALUE pid, VALU
189
231
  if (waiting) {
190
232
  return rb_rescue(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_rescue, (VALUE)&process_wait_arguments);
191
233
  } else {
192
- return Event_Backend_process_status_wait(process_wait_arguments.pid);
234
+ return Event_Selector_process_status_wait(process_wait_arguments.pid);
193
235
  }
194
236
  }
195
237
 
@@ -198,7 +240,7 @@ int io_add_filters(int descriptor, int ident, int events, VALUE fiber) {
198
240
  int count = 0;
199
241
  struct kevent kevents[2] = {0};
200
242
 
201
- if (events & READABLE) {
243
+ if (events & EVENT_READABLE) {
202
244
  kevents[count].ident = ident;
203
245
  kevents[count].filter = EVFILT_READ;
204
246
  kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
@@ -213,7 +255,7 @@ int io_add_filters(int descriptor, int ident, int events, VALUE fiber) {
213
255
  count++;
214
256
  }
215
257
 
216
- if (events & WRITABLE) {
258
+ if (events & EVENT_WRITABLE) {
217
259
  kevents[count].ident = ident;
218
260
  kevents[count].filter = EVFILT_WRITE;
219
261
  kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
@@ -235,7 +277,7 @@ void io_remove_filters(int descriptor, int ident, int events) {
235
277
  int count = 0;
236
278
  struct kevent kevents[2] = {0};
237
279
 
238
- if (events & READABLE) {
280
+ if (events & EVENT_READABLE) {
239
281
  kevents[count].ident = ident;
240
282
  kevents[count].filter = EVFILT_READ;
241
283
  kevents[count].flags = EV_DELETE;
@@ -243,7 +285,7 @@ void io_remove_filters(int descriptor, int ident, int events) {
243
285
  count++;
244
286
  }
245
287
 
246
- if (events & WRITABLE) {
288
+ if (events & EVENT_WRITABLE) {
247
289
  kevents[count].ident = ident;
248
290
  kevents[count].filter = EVFILT_WRITE;
249
291
  kevents[count].flags = EV_DELETE;
@@ -255,7 +297,7 @@ void io_remove_filters(int descriptor, int ident, int events) {
255
297
  }
256
298
 
257
299
  struct io_wait_arguments {
258
- struct Event_Backend_KQueue *data;
300
+ struct Event_Selector_KQueue *data;
259
301
  int events;
260
302
  int descriptor;
261
303
  };
@@ -271,8 +313,8 @@ VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
271
313
 
272
314
  static inline
273
315
  int events_from_kqueue_filter(int filter) {
274
- if (filter == EVFILT_READ) return READABLE;
275
- if (filter == EVFILT_WRITE) return WRITABLE;
316
+ if (filter == EVFILT_READ) return EVENT_READABLE;
317
+ if (filter == EVFILT_WRITE) return EVENT_WRITABLE;
276
318
 
277
319
  return 0;
278
320
  }
@@ -281,16 +323,16 @@ static
281
323
  VALUE io_wait_transfer(VALUE _arguments) {
282
324
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
283
325
 
284
- VALUE result = Event_Backend_transfer(arguments->data->loop);
326
+ VALUE result = Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
285
327
 
286
328
  return INT2NUM(events_from_kqueue_filter(RB_NUM2INT(result)));
287
329
  }
288
330
 
289
- VALUE Event_Backend_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
290
- struct Event_Backend_KQueue *data = NULL;
291
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
331
+ VALUE Event_Selector_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
332
+ struct Event_Selector_KQueue *data = NULL;
333
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
292
334
 
293
- int descriptor = RB_NUM2INT(rb_funcall(io, id_fileno, 0));
335
+ int descriptor = Event_Selector_io_descriptor(io);
294
336
 
295
337
  struct io_wait_arguments io_wait_arguments = {
296
338
  .events = io_add_filters(data->descriptor, descriptor, RB_NUM2INT(events), fiber),
@@ -301,6 +343,8 @@ VALUE Event_Backend_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE even
301
343
  return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
302
344
  }
303
345
 
346
+ #ifdef HAVE_RUBY_IO_BUFFER_H
347
+
304
348
  struct io_read_arguments {
305
349
  VALUE self;
306
350
  VALUE fiber;
@@ -311,7 +355,6 @@ struct io_read_arguments {
311
355
  int descriptor;
312
356
 
313
357
  VALUE buffer;
314
- size_t offset;
315
358
  size_t length;
316
359
  };
317
360
 
@@ -319,46 +362,47 @@ static
319
362
  VALUE io_read_loop(VALUE _arguments) {
320
363
  struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
321
364
 
322
- size_t offset = arguments->offset;
365
+ void *base;
366
+ size_t size;
367
+ rb_io_buffer_get_mutable(arguments->buffer, &base, &size);
368
+
369
+ size_t offset = 0;
323
370
  size_t length = arguments->length;
324
- size_t total = 0;
325
371
 
326
372
  while (length > 0) {
327
- char *buffer = Event_Backend_resize_to_capacity(arguments->buffer, offset, length);
328
- ssize_t result = read(arguments->descriptor, buffer+offset, length);
373
+ size_t maximum_size = size - offset;
374
+ ssize_t result = read(arguments->descriptor, (char*)base+offset, maximum_size);
329
375
 
330
- if (result >= 0) {
376
+ if (result == 0) {
377
+ break;
378
+ } else if (result > 0) {
331
379
  offset += result;
332
380
  length -= result;
333
- total += result;
334
381
  } else if (errno == EAGAIN || errno == EWOULDBLOCK) {
335
- Event_Backend_KQueue_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(READABLE));
382
+ Event_Selector_KQueue_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(READABLE));
336
383
  } else {
337
- rb_sys_fail("Event_Backend_KQueue_io_read");
384
+ rb_sys_fail("Event_Selector_KQueue_io_read");
338
385
  }
339
386
  }
340
387
 
341
- Event_Backend_resize_to_fit(arguments->buffer, arguments->offset, arguments->length);
342
-
343
- return SIZET2NUM(total);
388
+ return SIZET2NUM(offset);
344
389
  }
345
390
 
346
391
  static
347
392
  VALUE io_read_ensure(VALUE _arguments) {
348
393
  struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
349
394
 
350
- Event_Backend_nonblock_restore(arguments->descriptor, arguments->flags);
395
+ Event_Selector_nonblock_restore(arguments->descriptor, arguments->flags);
351
396
 
352
397
  return Qnil;
353
398
  }
354
399
 
355
- VALUE Event_Backend_KQueue_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _offset, VALUE _length) {
356
- struct Event_Backend_KQueue *data = NULL;
357
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
400
+ VALUE Event_Selector_KQueue_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
401
+ struct Event_Selector_KQueue *data = NULL;
402
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
358
403
 
359
- int descriptor = RB_NUM2INT(rb_funcall(io, id_fileno, 0));
404
+ int descriptor = Event_Selector_io_descriptor(io);
360
405
 
361
- size_t offset = NUM2SIZET(_offset);
362
406
  size_t length = NUM2SIZET(_length);
363
407
 
364
408
  struct io_read_arguments io_read_arguments = {
@@ -366,10 +410,9 @@ VALUE Event_Backend_KQueue_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buff
366
410
  .fiber = fiber,
367
411
  .io = io,
368
412
 
369
- .flags = Event_Backend_nonblock_set(descriptor),
413
+ .flags = Event_Selector_nonblock_set(descriptor),
370
414
  .descriptor = descriptor,
371
415
  .buffer = buffer,
372
- .offset = offset,
373
416
  .length = length,
374
417
  };
375
418
 
@@ -386,7 +429,6 @@ struct io_write_arguments {
386
429
  int descriptor;
387
430
 
388
431
  VALUE buffer;
389
- size_t offset;
390
432
  size_t length;
391
433
  };
392
434
 
@@ -394,44 +436,48 @@ static
394
436
  VALUE io_write_loop(VALUE _arguments) {
395
437
  struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
396
438
 
397
- size_t offset = arguments->offset;
439
+ const void *base;
440
+ size_t size;
441
+ rb_io_buffer_get_immutable(arguments->buffer, &base, &size);
442
+
443
+ size_t offset = 0;
398
444
  size_t length = arguments->length;
399
- size_t total = 0;
445
+
446
+ if (length > size) {
447
+ rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
448
+ }
400
449
 
401
450
  while (length > 0) {
402
- char *buffer = Event_Backend_verify_size(arguments->buffer, offset, length);
403
- ssize_t result = write(arguments->descriptor, buffer+offset, length);
451
+ ssize_t result = write(arguments->descriptor, (char*)base+offset, length);
404
452
 
405
453
  if (result >= 0) {
406
- length -= result;
407
454
  offset += result;
408
- total += result;
455
+ length -= result;
409
456
  } else if (errno == EAGAIN || errno == EWOULDBLOCK) {
410
- Event_Backend_KQueue_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(WRITABLE));
457
+ Event_Selector_KQueue_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(WRITABLE));
411
458
  } else {
412
- rb_sys_fail("Event_Backend_KQueue_io_write");
459
+ rb_sys_fail("Event_Selector_KQueue_io_write");
413
460
  }
414
461
  }
415
462
 
416
- return SIZET2NUM(total);
463
+ return SIZET2NUM(offset);
417
464
  };
418
465
 
419
466
  static
420
467
  VALUE io_write_ensure(VALUE _arguments) {
421
468
  struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
422
469
 
423
- Event_Backend_nonblock_restore(arguments->descriptor, arguments->flags);
470
+ Event_Selector_nonblock_restore(arguments->descriptor, arguments->flags);
424
471
 
425
472
  return Qnil;
426
473
  };
427
474
 
428
- VALUE Event_Backend_KQueue_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _offset, VALUE _length) {
429
- struct Event_Backend_KQueue *data = NULL;
430
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
475
+ VALUE Event_Selector_KQueue_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
476
+ struct Event_Selector_KQueue *data = NULL;
477
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
431
478
 
432
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
479
+ int descriptor = Event_Selector_io_descriptor(io);
433
480
 
434
- size_t offset = NUM2SIZET(_offset);
435
481
  size_t length = NUM2SIZET(_length);
436
482
 
437
483
  struct io_write_arguments io_write_arguments = {
@@ -439,16 +485,17 @@ VALUE Event_Backend_KQueue_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buf
439
485
  .fiber = fiber,
440
486
  .io = io,
441
487
 
442
- .flags = Event_Backend_nonblock_set(descriptor),
488
+ .flags = Event_Selector_nonblock_set(descriptor),
443
489
  .descriptor = descriptor,
444
490
  .buffer = buffer,
445
- .offset = offset,
446
491
  .length = length,
447
492
  };
448
493
 
449
494
  return rb_ensure(io_write_loop, (VALUE)&io_write_arguments, io_write_ensure, (VALUE)&io_write_arguments);
450
495
  }
451
496
 
497
+ #endif
498
+
452
499
  static
453
500
  struct timespec * make_timeout(VALUE duration, struct timespec * storage) {
454
501
  if (duration == Qnil) {
@@ -481,7 +528,7 @@ int timeout_nonblocking(struct timespec * timespec) {
481
528
  }
482
529
 
483
530
  struct select_arguments {
484
- struct Event_Backend_KQueue *data;
531
+ struct Event_Selector_KQueue *data;
485
532
 
486
533
  int count;
487
534
  struct kevent events[KQUEUE_MAX_EVENTS];
@@ -517,9 +564,11 @@ void select_internal_with_gvl(struct select_arguments *arguments) {
517
564
  }
518
565
  }
519
566
 
520
- VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
521
- struct Event_Backend_KQueue *data = NULL;
522
- TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
567
+ VALUE Event_Selector_KQueue_select(VALUE self, VALUE duration) {
568
+ struct Event_Selector_KQueue *data = NULL;
569
+ TypedData_Get_Struct(self, struct Event_Selector_KQueue, &Event_Selector_KQueue_Type, data);
570
+
571
+ int ready = Event_Selector_queue_flush(&data->backend);
523
572
 
524
573
  struct select_arguments arguments = {
525
574
  .data = data,
@@ -541,7 +590,7 @@ VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
541
590
  select_internal_with_gvl(&arguments);
542
591
 
543
592
  // If there were no pending events, if we have a timeout, wait for more events:
544
- if (arguments.count == 0) {
593
+ if (!ready && arguments.count == 0) {
545
594
  arguments.timeout = make_timeout(duration, &arguments.storage);
546
595
 
547
596
  if (!timeout_nonblocking(arguments.timeout)) {
@@ -555,25 +604,34 @@ VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
555
604
  VALUE fiber = (VALUE)arguments.events[i].udata;
556
605
  VALUE result = INT2NUM(arguments.events[i].filter);
557
606
 
558
- Event_Backend_transfer_result(fiber, result);
607
+ Event_Selector_fiber_transfer(fiber, 1, &result);
559
608
  }
560
609
 
561
610
  return INT2NUM(arguments.count);
562
611
  }
563
612
 
564
- void Init_Event_Backend_KQueue(VALUE Event_Backend) {
565
- id_fileno = rb_intern("fileno");
613
+ void Init_Event_Selector_KQueue(VALUE Event_Selector) {
614
+ Event_Selector_KQueue = rb_define_class_under(Event_Selector, "KQueue", rb_cObject);
615
+
616
+ rb_define_alloc_func(Event_Selector_KQueue, Event_Selector_KQueue_allocate);
617
+ rb_define_method(Event_Selector_KQueue, "initialize", Event_Selector_KQueue_initialize, 1);
618
+
619
+ rb_define_method(Event_Selector_KQueue, "transfer", Event_Selector_KQueue_transfer, -1);
620
+ rb_define_method(Event_Selector_KQueue, "yield", Event_Selector_KQueue_yield, 0);
621
+ rb_define_method(Event_Selector_KQueue, "push", Event_Selector_KQueue_push, 1);
622
+ rb_define_method(Event_Selector_KQueue, "raise", Event_Selector_KQueue_raise, -1);
623
+
624
+ rb_define_method(Event_Selector_KQueue, "ready?", Event_Selector_KQueue_ready_p, 0);
566
625
 
567
- Event_Backend_KQueue = rb_define_class_under(Event_Backend, "KQueue", rb_cObject);
626
+ rb_define_method(Event_Selector_KQueue, "select", Event_Selector_KQueue_select, 1);
627
+ rb_define_method(Event_Selector_KQueue, "close", Event_Selector_KQueue_close, 0);
568
628
 
569
- rb_define_alloc_func(Event_Backend_KQueue, Event_Backend_KQueue_allocate);
570
- rb_define_method(Event_Backend_KQueue, "initialize", Event_Backend_KQueue_initialize, 1);
571
- rb_define_method(Event_Backend_KQueue, "select", Event_Backend_KQueue_select, 1);
572
- rb_define_method(Event_Backend_KQueue, "close", Event_Backend_KQueue_close, 0);
629
+ rb_define_method(Event_Selector_KQueue, "io_wait", Event_Selector_KQueue_io_wait, 3);
573
630
 
574
- rb_define_method(Event_Backend_KQueue, "io_wait", Event_Backend_KQueue_io_wait, 3);
575
- rb_define_method(Event_Backend_KQueue, "io_read", Event_Backend_KQueue_io_read, 5);
576
- rb_define_method(Event_Backend_KQueue, "io_write", Event_Backend_KQueue_io_write, 5);
631
+ #ifdef HAVE_RUBY_IO_BUFFER_H
632
+ rb_define_method(Event_Selector_KQueue, "io_read", Event_Selector_KQueue_io_read, 4);
633
+ rb_define_method(Event_Selector_KQueue, "io_write", Event_Selector_KQueue_io_write, 4);
634
+ #endif
577
635
 
578
- rb_define_method(Event_Backend_KQueue, "process_wait", Event_Backend_KQueue_process_wait, 3);
636
+ rb_define_method(Event_Selector_KQueue, "process_wait", Event_Selector_KQueue_process_wait, 3);
579
637
  }