event 0.4.2 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -18,6 +18,16 @@
18
18
  // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
19
  // THE SOFTWARE.
20
20
 
21
+ #include <ruby.h>
22
+ #include <ruby/thread.h>
23
+ #include <ruby/io.h>
24
+
25
+ #ifdef HAVE_RUBY_IO_BUFFER_H
26
+ #include <ruby/io/buffer.h>
27
+ #endif
28
+
29
+ #include <time.h>
30
+
21
31
  enum Event {
22
32
  READABLE = 1,
23
33
  PRIORITY = 2,
@@ -26,4 +36,77 @@ enum Event {
26
36
  HANGUP = 16
27
37
  };
28
38
 
29
- #include <ruby/thread.h>
39
+ void Init_Event_Backend();
40
+
41
+ #ifdef HAVE__RB_FIBER_TRANSFER
42
+ #define Event_Backend_fiber_transfer(fiber) rb_fiber_transfer(fiber, 0, NULL)
43
+ #define Event_Backend_fiber_transfer_result(fiber, argument) rb_fiber_transfer(fiber, 1, &argument)
44
+ #else
45
+ VALUE Event_Backend_fiber_transfer(VALUE fiber);
46
+ VALUE Event_Backend_fiber_transfer_result(VALUE fiber, VALUE argument);
47
+ #endif
48
+
49
+ #ifdef HAVE_RB_IO_DESCRIPTOR
50
+ #define Event_Backend_io_descriptor(io) rb_io_descriptor(io)
51
+ #else
52
+ int Event_Backend_io_descriptor(VALUE io);
53
+ #endif
54
+
55
+ #ifdef HAVE_RB_PROCESS_STATUS_WAIT
56
+ #define Event_Backend_process_status_wait(pid) rb_process_status_wait(pid)
57
+ #else
58
+ VALUE Event_Backend_process_status_wait(rb_pid_t pid);
59
+ #endif
60
+
61
+ int Event_Backend_nonblock_set(int file_descriptor);
62
+ void Event_Backend_nonblock_restore(int file_descriptor, int flags);
63
+
64
+ struct Event_Backend_Queue {
65
+ struct Event_Backend_Queue *behind;
66
+ struct Event_Backend_Queue *infront;
67
+
68
+ VALUE fiber;
69
+ };
70
+
71
+ struct Event_Backend {
72
+ VALUE loop;
73
+
74
+ // Append to waiting.
75
+ struct Event_Backend_Queue *waiting;
76
+ // Process from ready.
77
+ struct Event_Backend_Queue *ready;
78
+ };
79
+
80
+ inline
81
+ void Event_Backend_initialize(struct Event_Backend *backend, VALUE loop) {
82
+ backend->loop = loop;
83
+ backend->waiting = NULL;
84
+ backend->ready = NULL;
85
+ }
86
+
87
+ inline
88
+ void Event_Backend_mark(struct Event_Backend *backend) {
89
+ rb_gc_mark(backend->loop);
90
+
91
+ struct Event_Backend_Queue *ready = backend->ready;
92
+ while (ready) {
93
+ rb_gc_mark(ready->fiber);
94
+ ready = ready->behind;
95
+ }
96
+ }
97
+
98
+ void Event_Backend_wait_and_transfer(struct Event_Backend *backend, VALUE fiber);
99
+
100
+ inline
101
+ void Event_Backend_defer(struct Event_Backend *backend)
102
+ {
103
+ Event_Backend_wait_and_transfer(backend, backend->loop);
104
+ }
105
+
106
+ void Event_Backend_ready_pop(struct Event_Backend *backend);
107
+
108
+ void Event_Backend_elapsed_time(struct timespec* start, struct timespec* stop, struct timespec *duration);
109
+ void Event_Backend_current_time(struct timespec *time);
110
+
111
+ #define PRINTF_TIMESPEC "%lld.%.9ld"
112
+ #define PRINTF_TIMESPEC_ARGS(ts) (long long)((ts).tv_sec), (ts).tv_nsec
@@ -25,20 +25,21 @@
25
25
  #include <time.h>
26
26
  #include <errno.h>
27
27
 
28
+ #include "pidfd.c"
29
+
28
30
  static VALUE Event_Backend_EPoll = Qnil;
29
- static ID id_fileno, id_transfer;
30
31
 
31
32
  enum {EPOLL_MAX_EVENTS = 64};
32
33
 
33
34
  struct Event_Backend_EPoll {
34
- VALUE loop;
35
+ struct Event_Backend backend;
35
36
  int descriptor;
36
37
  };
37
38
 
38
39
  void Event_Backend_EPoll_Type_mark(void *_data)
39
40
  {
40
41
  struct Event_Backend_EPoll *data = _data;
41
- rb_gc_mark(data->loop);
42
+ Event_Backend_mark(&data->backend);
42
43
  }
43
44
 
44
45
  static
@@ -78,7 +79,7 @@ VALUE Event_Backend_EPoll_allocate(VALUE self) {
78
79
  struct Event_Backend_EPoll *data = NULL;
79
80
  VALUE instance = TypedData_Make_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
80
81
 
81
- data->loop = Qnil;
82
+ Event_Backend_initialize(&data->backend, Qnil);
82
83
  data->descriptor = -1;
83
84
 
84
85
  return instance;
@@ -88,7 +89,7 @@ VALUE Event_Backend_EPoll_initialize(VALUE self, VALUE loop) {
88
89
  struct Event_Backend_EPoll *data = NULL;
89
90
  TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
90
91
 
91
- data->loop = loop;
92
+ Event_Backend_initialize(&data->backend, loop);
92
93
  int result = epoll_create1(EPOLL_CLOEXEC);
93
94
 
94
95
  if (result == -1) {
@@ -111,6 +112,87 @@ VALUE Event_Backend_EPoll_close(VALUE self) {
111
112
  return Qnil;
112
113
  }
113
114
 
115
+ VALUE Event_Backend_EPoll_transfer(VALUE self, VALUE fiber)
116
+ {
117
+ struct Event_Backend_EPoll *data = NULL;
118
+ TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
119
+
120
+ Event_Backend_wait_and_transfer(&data->backend, fiber);
121
+
122
+ return Qnil;
123
+ }
124
+
125
+ VALUE Event_Backend_EPoll_defer(VALUE self)
126
+ {
127
+ struct Event_Backend_EPoll *data = NULL;
128
+ TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
129
+
130
+ Event_Backend_defer(&data->backend);
131
+
132
+ return Qnil;
133
+ }
134
+
135
+ VALUE Event_Backend_EPoll_ready_p(VALUE self) {
136
+ struct Event_Backend_EPoll *data = NULL;
137
+ TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
138
+
139
+ return data->backend.ready ? Qtrue : Qfalse;
140
+ }
141
+
142
+ struct process_wait_arguments {
143
+ struct Event_Backend_EPoll *data;
144
+ pid_t pid;
145
+ int flags;
146
+ int descriptor;
147
+ };
148
+
149
+ static
150
+ VALUE process_wait_transfer(VALUE _arguments) {
151
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
152
+
153
+ Event_Backend_fiber_transfer(arguments->data->backend.loop);
154
+
155
+ return Event_Backend_process_status_wait(arguments->pid);
156
+ }
157
+
158
+ static
159
+ VALUE process_wait_ensure(VALUE _arguments) {
160
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
161
+
162
+ // epoll_ctl(arguments->data->descriptor, EPOLL_CTL_DEL, arguments->descriptor, NULL);
163
+
164
+ close(arguments->descriptor);
165
+
166
+ return Qnil;
167
+ }
168
+
169
+ VALUE Event_Backend_EPoll_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
170
+ struct Event_Backend_EPoll *data = NULL;
171
+ TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
172
+
173
+ struct process_wait_arguments process_wait_arguments = {
174
+ .data = data,
175
+ .pid = NUM2PIDT(pid),
176
+ .flags = NUM2INT(flags),
177
+ };
178
+
179
+ process_wait_arguments.descriptor = pidfd_open(process_wait_arguments.pid, 0);
180
+ rb_update_max_fd(process_wait_arguments.descriptor);
181
+
182
+ struct epoll_event event = {
183
+ .events = EPOLLIN|EPOLLRDHUP|EPOLLONESHOT,
184
+ .data = {.ptr = (void*)fiber},
185
+ };
186
+
187
+ int result = epoll_ctl(data->descriptor, EPOLL_CTL_ADD, process_wait_arguments.descriptor, &event);
188
+
189
+ if (result == -1) {
190
+ rb_sys_fail("epoll_ctl(process_wait)");
191
+ }
192
+
193
+ return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
194
+ }
195
+
114
196
  static inline
115
197
  uint32_t epoll_flags_from_events(int events) {
116
198
  uint32_t flags = 0;
@@ -161,7 +243,7 @@ static
161
243
  VALUE io_wait_transfer(VALUE _arguments) {
162
244
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
163
245
 
164
- VALUE result = rb_funcall(arguments->data->loop, id_transfer, 0);
246
+ VALUE result = Event_Backend_fiber_transfer(arguments->data->backend.loop);
165
247
 
166
248
  return INT2NUM(events_from_epoll_flags(NUM2INT(result)));
167
249
  };
@@ -172,7 +254,7 @@ VALUE Event_Backend_EPoll_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE event
172
254
 
173
255
  struct epoll_event event = {0};
174
256
 
175
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
257
+ int descriptor = Event_Backend_io_descriptor(io);
176
258
  int duplicate = -1;
177
259
 
178
260
  event.events = epoll_flags_from_events(NUM2INT(events));
@@ -208,6 +290,153 @@ VALUE Event_Backend_EPoll_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE event
208
290
  return rb_ensure(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_ensure, (VALUE)&io_wait_arguments);
209
291
  }
210
292
 
293
+ #ifdef HAVE_RUBY_IO_BUFFER_H
294
+
295
+ struct io_read_arguments {
296
+ VALUE self;
297
+ VALUE fiber;
298
+ VALUE io;
299
+
300
+ int flags;
301
+
302
+ int descriptor;
303
+
304
+ VALUE buffer;
305
+ size_t length;
306
+ };
307
+
308
+ static
309
+ VALUE io_read_loop(VALUE _arguments) {
310
+ struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
311
+
312
+ void *base;
313
+ size_t size;
314
+ rb_io_buffer_get_mutable(arguments->buffer, &base, &size);
315
+
316
+ size_t offset = 0;
317
+ size_t length = arguments->length;
318
+
319
+ while (length > 0) {
320
+ size_t maximum_size = size - offset;
321
+ ssize_t result = read(arguments->descriptor, (char*)base+offset, maximum_size);
322
+
323
+ if (result == 0) {
324
+ break;
325
+ } else if (result > 0) {
326
+ offset += result;
327
+ length -= result;
328
+ } else if (errno == EAGAIN || errno == EWOULDBLOCK) {
329
+ Event_Backend_EPoll_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(READABLE));
330
+ } else {
331
+ rb_sys_fail("Event_Backend_EPoll_io_read");
332
+ }
333
+ }
334
+
335
+ return SIZET2NUM(offset);
336
+ }
337
+
338
+ static
339
+ VALUE io_read_ensure(VALUE _arguments) {
340
+ struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
341
+
342
+ Event_Backend_nonblock_restore(arguments->descriptor, arguments->flags);
343
+
344
+ return Qnil;
345
+ }
346
+
347
+ VALUE Event_Backend_EPoll_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
348
+ int descriptor = Event_Backend_io_descriptor(io);
349
+
350
+ size_t length = NUM2SIZET(_length);
351
+
352
+ struct io_read_arguments io_read_arguments = {
353
+ .self = self,
354
+ .fiber = fiber,
355
+ .io = io,
356
+
357
+ .flags = Event_Backend_nonblock_set(descriptor),
358
+ .descriptor = descriptor,
359
+ .buffer = buffer,
360
+ .length = length,
361
+ };
362
+
363
+ return rb_ensure(io_read_loop, (VALUE)&io_read_arguments, io_read_ensure, (VALUE)&io_read_arguments);
364
+ }
365
+
366
+ struct io_write_arguments {
367
+ VALUE self;
368
+ VALUE fiber;
369
+ VALUE io;
370
+
371
+ int flags;
372
+
373
+ int descriptor;
374
+
375
+ VALUE buffer;
376
+ size_t length;
377
+ };
378
+
379
+ static
380
+ VALUE io_write_loop(VALUE _arguments) {
381
+ struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
382
+
383
+ const void *base;
384
+ size_t size;
385
+ rb_io_buffer_get_immutable(arguments->buffer, &base, &size);
386
+
387
+ size_t offset = 0;
388
+ size_t length = arguments->length;
389
+
390
+ if (length > size) {
391
+ rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
392
+ }
393
+
394
+ while (length > 0) {
395
+ ssize_t result = write(arguments->descriptor, (char*)base+offset, length);
396
+
397
+ if (result >= 0) {
398
+ offset += result;
399
+ length -= result;
400
+ } else if (errno == EAGAIN || errno == EWOULDBLOCK) {
401
+ Event_Backend_EPoll_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(WRITABLE));
402
+ } else {
403
+ rb_sys_fail("Event_Backend_EPoll_io_write");
404
+ }
405
+ }
406
+
407
+ return SIZET2NUM(offset);
408
+ };
409
+
410
+ static
411
+ VALUE io_write_ensure(VALUE _arguments) {
412
+ struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
413
+
414
+ Event_Backend_nonblock_restore(arguments->descriptor, arguments->flags);
415
+
416
+ return Qnil;
417
+ };
418
+
419
+ VALUE Event_Backend_EPoll_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
420
+ int descriptor = Event_Backend_io_descriptor(io);
421
+
422
+ size_t length = NUM2SIZET(_length);
423
+
424
+ struct io_write_arguments io_write_arguments = {
425
+ .self = self,
426
+ .fiber = fiber,
427
+ .io = io,
428
+
429
+ .flags = Event_Backend_nonblock_set(descriptor),
430
+ .descriptor = descriptor,
431
+ .buffer = buffer,
432
+ .length = length,
433
+ };
434
+
435
+ return rb_ensure(io_write_loop, (VALUE)&io_write_arguments, io_write_ensure, (VALUE)&io_write_arguments);
436
+ }
437
+
438
+ #endif
439
+
211
440
  static
212
441
  int make_timeout(VALUE duration) {
213
442
  if (duration == Qnil) {
@@ -267,6 +496,8 @@ VALUE Event_Backend_EPoll_select(VALUE self, VALUE duration) {
267
496
  struct Event_Backend_EPoll *data = NULL;
268
497
  TypedData_Get_Struct(self, struct Event_Backend_EPoll, &Event_Backend_EPoll_Type, data);
269
498
 
499
+ Event_Backend_ready_pop(&data->backend);
500
+
270
501
  struct select_arguments arguments = {
271
502
  .data = data,
272
503
  .timeout = 0
@@ -277,7 +508,7 @@ VALUE Event_Backend_EPoll_select(VALUE self, VALUE duration) {
277
508
  if (arguments.count == 0) {
278
509
  arguments.timeout = make_timeout(duration);
279
510
 
280
- if (arguments.timeout != 0) {
511
+ if (!data->backend.ready && arguments.timeout != 0) {
281
512
  select_internal_without_gvl(&arguments);
282
513
  }
283
514
  }
@@ -288,22 +519,29 @@ VALUE Event_Backend_EPoll_select(VALUE self, VALUE duration) {
288
519
 
289
520
  // fprintf(stderr, "-> fiber=%p descriptor=%d\n", (void*)fiber, events[i].data.fd);
290
521
 
291
- rb_funcall(fiber, id_transfer, 1, result);
522
+ Event_Backend_fiber_transfer_result(fiber, result);
292
523
  }
293
524
 
294
525
  return INT2NUM(arguments.count);
295
526
  }
296
527
 
297
528
  void Init_Event_Backend_EPoll(VALUE Event_Backend) {
298
- id_fileno = rb_intern("fileno");
299
- id_transfer = rb_intern("transfer");
300
-
301
529
  Event_Backend_EPoll = rb_define_class_under(Event_Backend, "EPoll", rb_cObject);
302
530
 
303
531
  rb_define_alloc_func(Event_Backend_EPoll, Event_Backend_EPoll_allocate);
304
532
  rb_define_method(Event_Backend_EPoll, "initialize", Event_Backend_EPoll_initialize, 1);
533
+ rb_define_method(Event_Backend_EPoll, "transfer", Event_Backend_EPoll_transfer, 1);
534
+ rb_define_method(Event_Backend_EPoll, "defer", Event_Backend_EPoll_defer, 0);
535
+ rb_define_method(Event_Backend_EPoll, "ready?", Event_Backend_EPoll_ready_p, 0);
536
+ rb_define_method(Event_Backend_EPoll, "select", Event_Backend_EPoll_select, 1);
305
537
  rb_define_method(Event_Backend_EPoll, "close", Event_Backend_EPoll_close, 0);
306
538
 
307
539
  rb_define_method(Event_Backend_EPoll, "io_wait", Event_Backend_EPoll_io_wait, 3);
308
- rb_define_method(Event_Backend_EPoll, "select", Event_Backend_EPoll_select, 1);
540
+
541
+ #ifdef HAVE_RUBY_IO_BUFFER_H
542
+ rb_define_method(Event_Backend_EPoll, "io_read", Event_Backend_EPoll_io_read, 4);
543
+ rb_define_method(Event_Backend_EPoll, "io_write", Event_Backend_EPoll_io_write, 4);
544
+ #endif
545
+
546
+ rb_define_method(Event_Backend_EPoll, "process_wait", Event_Backend_EPoll_process_wait, 3);
309
547
  }
@@ -24,21 +24,21 @@
24
24
  #include <sys/event.h>
25
25
  #include <sys/ioctl.h>
26
26
  #include <time.h>
27
+ #include <errno.h>
27
28
 
28
29
  static VALUE Event_Backend_KQueue = Qnil;
29
- static ID id_fileno, id_transfer;
30
30
 
31
31
  enum {KQUEUE_MAX_EVENTS = 64};
32
32
 
33
33
  struct Event_Backend_KQueue {
34
- VALUE loop;
34
+ struct Event_Backend backend;
35
35
  int descriptor;
36
36
  };
37
37
 
38
38
  void Event_Backend_KQueue_Type_mark(void *_data)
39
39
  {
40
40
  struct Event_Backend_KQueue *data = _data;
41
- rb_gc_mark(data->loop);
41
+ Event_Backend_mark(&data->backend);
42
42
  }
43
43
 
44
44
  static
@@ -78,7 +78,7 @@ VALUE Event_Backend_KQueue_allocate(VALUE self) {
78
78
  struct Event_Backend_KQueue *data = NULL;
79
79
  VALUE instance = TypedData_Make_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
80
80
 
81
- data->loop = Qnil;
81
+ Event_Backend_initialize(&data->backend, Qnil);
82
82
  data->descriptor = -1;
83
83
 
84
84
  return instance;
@@ -88,7 +88,7 @@ VALUE Event_Backend_KQueue_initialize(VALUE self, VALUE loop) {
88
88
  struct Event_Backend_KQueue *data = NULL;
89
89
  TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
90
90
 
91
- data->loop = loop;
91
+ Event_Backend_initialize(&data->backend, loop);
92
92
  int result = kqueue();
93
93
 
94
94
  if (result == -1) {
@@ -112,6 +112,113 @@ VALUE Event_Backend_KQueue_close(VALUE self) {
112
112
  return Qnil;
113
113
  }
114
114
 
115
+ VALUE Event_Backend_KQueue_transfer(VALUE self, VALUE fiber)
116
+ {
117
+ struct Event_Backend_KQueue *data = NULL;
118
+ TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
119
+
120
+ Event_Backend_wait_and_transfer(&data->backend, fiber);
121
+
122
+ return Qnil;
123
+ }
124
+
125
+ VALUE Event_Backend_KQueue_defer(VALUE self)
126
+ {
127
+ struct Event_Backend_KQueue *data = NULL;
128
+ TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
129
+
130
+ Event_Backend_defer(&data->backend);
131
+
132
+ return Qnil;
133
+ }
134
+
135
+ VALUE Event_Backend_KQueue_ready_p(VALUE self) {
136
+ struct Event_Backend_KQueue *data = NULL;
137
+ TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
138
+
139
+ return data->backend.ready ? Qtrue : Qfalse;
140
+ }
141
+
142
+ struct process_wait_arguments {
143
+ struct Event_Backend_KQueue *data;
144
+ pid_t pid;
145
+ int flags;
146
+ };
147
+
148
+ static
149
+ int process_add_filters(int descriptor, int ident, VALUE fiber) {
150
+ struct kevent event = {0};
151
+
152
+ event.ident = ident;
153
+ event.filter = EVFILT_PROC;
154
+ event.flags = EV_ADD | EV_ENABLE | EV_ONESHOT;
155
+ event.fflags = NOTE_EXIT;
156
+ event.udata = (void*)fiber;
157
+
158
+ int result = kevent(descriptor, &event, 1, NULL, 0, NULL);
159
+
160
+ if (result == -1) {
161
+ // No such process - the process has probably already terminated:
162
+ if (errno == ESRCH) {
163
+ return 0;
164
+ }
165
+
166
+ rb_sys_fail("kevent(process_add_filters)");
167
+ }
168
+
169
+ return 1;
170
+ }
171
+
172
+ static
173
+ void process_remove_filters(int descriptor, int ident) {
174
+ struct kevent event = {0};
175
+
176
+ event.ident = ident;
177
+ event.filter = EVFILT_PROC;
178
+ event.flags = EV_DELETE;
179
+ event.fflags = NOTE_EXIT;
180
+
181
+ // Ignore the result.
182
+ kevent(descriptor, &event, 1, NULL, 0, NULL);
183
+ }
184
+
185
+ static
186
+ VALUE process_wait_transfer(VALUE _arguments) {
187
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
188
+
189
+ Event_Backend_fiber_transfer(arguments->data->backend.loop);
190
+
191
+ return Event_Backend_process_status_wait(arguments->pid);
192
+ }
193
+
194
+ static
195
+ VALUE process_wait_rescue(VALUE _arguments, VALUE exception) {
196
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
197
+
198
+ process_remove_filters(arguments->data->descriptor, arguments->pid);
199
+
200
+ rb_exc_raise(exception);
201
+ }
202
+
203
+ VALUE Event_Backend_KQueue_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
204
+ struct Event_Backend_KQueue *data = NULL;
205
+ TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
206
+
207
+ struct process_wait_arguments process_wait_arguments = {
208
+ .data = data,
209
+ .pid = NUM2PIDT(pid),
210
+ .flags = RB_NUM2INT(flags),
211
+ };
212
+
213
+ int waiting = process_add_filters(data->descriptor, process_wait_arguments.pid, fiber);
214
+
215
+ if (waiting) {
216
+ return rb_rescue(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_rescue, (VALUE)&process_wait_arguments);
217
+ } else {
218
+ return Event_Backend_process_status_wait(process_wait_arguments.pid);
219
+ }
220
+ }
221
+
115
222
  static
116
223
  int io_add_filters(int descriptor, int ident, int events, VALUE fiber) {
117
224
  int count = 0;
@@ -143,7 +250,7 @@ int io_add_filters(int descriptor, int ident, int events, VALUE fiber) {
143
250
  int result = kevent(descriptor, kevents, count, NULL, 0, NULL);
144
251
 
145
252
  if (result == -1) {
146
- rb_sys_fail("kevent(register)");
253
+ rb_sys_fail("kevent(io_add_filters)");
147
254
  }
148
255
 
149
256
  return events;
@@ -186,7 +293,7 @@ VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
186
293
  io_remove_filters(arguments->data->descriptor, arguments->descriptor, arguments->events);
187
294
 
188
295
  rb_exc_raise(exception);
189
- };
296
+ }
190
297
 
191
298
  static inline
192
299
  int events_from_kqueue_filter(int filter) {
@@ -200,19 +307,19 @@ static
200
307
  VALUE io_wait_transfer(VALUE _arguments) {
201
308
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
202
309
 
203
- VALUE result = rb_funcall(arguments->data->loop, id_transfer, 0);
310
+ VALUE result = Event_Backend_fiber_transfer(arguments->data->backend.loop);
204
311
 
205
- return INT2NUM(events_from_kqueue_filter(NUM2INT(result)));
206
- };
312
+ return INT2NUM(events_from_kqueue_filter(RB_NUM2INT(result)));
313
+ }
207
314
 
208
315
  VALUE Event_Backend_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
209
316
  struct Event_Backend_KQueue *data = NULL;
210
317
  TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
211
318
 
212
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
319
+ int descriptor = Event_Backend_io_descriptor(io);
213
320
 
214
321
  struct io_wait_arguments io_wait_arguments = {
215
- .events = io_add_filters(data->descriptor, descriptor, NUM2INT(events), fiber),
322
+ .events = io_add_filters(data->descriptor, descriptor, RB_NUM2INT(events), fiber),
216
323
  .data = data,
217
324
  .descriptor = descriptor,
218
325
  };
@@ -220,6 +327,159 @@ VALUE Event_Backend_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE even
220
327
  return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
221
328
  }
222
329
 
330
+ #ifdef HAVE_RUBY_IO_BUFFER_H
331
+
332
+ struct io_read_arguments {
333
+ VALUE self;
334
+ VALUE fiber;
335
+ VALUE io;
336
+
337
+ int flags;
338
+
339
+ int descriptor;
340
+
341
+ VALUE buffer;
342
+ size_t length;
343
+ };
344
+
345
+ static
346
+ VALUE io_read_loop(VALUE _arguments) {
347
+ struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
348
+
349
+ void *base;
350
+ size_t size;
351
+ rb_io_buffer_get_mutable(arguments->buffer, &base, &size);
352
+
353
+ size_t offset = 0;
354
+ size_t length = arguments->length;
355
+
356
+ while (length > 0) {
357
+ size_t maximum_size = size - offset;
358
+ ssize_t result = read(arguments->descriptor, (char*)base+offset, maximum_size);
359
+
360
+ if (result == 0) {
361
+ break;
362
+ } else if (result > 0) {
363
+ offset += result;
364
+ length -= result;
365
+ } else if (errno == EAGAIN || errno == EWOULDBLOCK) {
366
+ Event_Backend_KQueue_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(READABLE));
367
+ } else {
368
+ rb_sys_fail("Event_Backend_KQueue_io_read");
369
+ }
370
+ }
371
+
372
+ return SIZET2NUM(offset);
373
+ }
374
+
375
+ static
376
+ VALUE io_read_ensure(VALUE _arguments) {
377
+ struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
378
+
379
+ Event_Backend_nonblock_restore(arguments->descriptor, arguments->flags);
380
+
381
+ return Qnil;
382
+ }
383
+
384
+ VALUE Event_Backend_KQueue_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
385
+ struct Event_Backend_KQueue *data = NULL;
386
+ TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
387
+
388
+ int descriptor = Event_Backend_io_descriptor(io);
389
+
390
+ size_t length = NUM2SIZET(_length);
391
+
392
+ struct io_read_arguments io_read_arguments = {
393
+ .self = self,
394
+ .fiber = fiber,
395
+ .io = io,
396
+
397
+ .flags = Event_Backend_nonblock_set(descriptor),
398
+ .descriptor = descriptor,
399
+ .buffer = buffer,
400
+ .length = length,
401
+ };
402
+
403
+ return rb_ensure(io_read_loop, (VALUE)&io_read_arguments, io_read_ensure, (VALUE)&io_read_arguments);
404
+ }
405
+
406
+ struct io_write_arguments {
407
+ VALUE self;
408
+ VALUE fiber;
409
+ VALUE io;
410
+
411
+ int flags;
412
+
413
+ int descriptor;
414
+
415
+ VALUE buffer;
416
+ size_t length;
417
+ };
418
+
419
+ static
420
+ VALUE io_write_loop(VALUE _arguments) {
421
+ struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
422
+
423
+ const void *base;
424
+ size_t size;
425
+ rb_io_buffer_get_immutable(arguments->buffer, &base, &size);
426
+
427
+ size_t offset = 0;
428
+ size_t length = arguments->length;
429
+
430
+ if (length > size) {
431
+ rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
432
+ }
433
+
434
+ while (length > 0) {
435
+ ssize_t result = write(arguments->descriptor, (char*)base+offset, length);
436
+
437
+ if (result >= 0) {
438
+ offset += result;
439
+ length -= result;
440
+ } else if (errno == EAGAIN || errno == EWOULDBLOCK) {
441
+ Event_Backend_KQueue_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(WRITABLE));
442
+ } else {
443
+ rb_sys_fail("Event_Backend_KQueue_io_write");
444
+ }
445
+ }
446
+
447
+ return SIZET2NUM(offset);
448
+ };
449
+
450
+ static
451
+ VALUE io_write_ensure(VALUE _arguments) {
452
+ struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
453
+
454
+ Event_Backend_nonblock_restore(arguments->descriptor, arguments->flags);
455
+
456
+ return Qnil;
457
+ };
458
+
459
+ VALUE Event_Backend_KQueue_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
460
+ struct Event_Backend_KQueue *data = NULL;
461
+ TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
462
+
463
+ int descriptor = Event_Backend_io_descriptor(io);
464
+
465
+ size_t length = NUM2SIZET(_length);
466
+
467
+ struct io_write_arguments io_write_arguments = {
468
+ .self = self,
469
+ .fiber = fiber,
470
+ .io = io,
471
+
472
+ .flags = Event_Backend_nonblock_set(descriptor),
473
+ .descriptor = descriptor,
474
+ .buffer = buffer,
475
+ .length = length,
476
+ };
477
+
478
+ return rb_ensure(io_write_loop, (VALUE)&io_write_arguments, io_write_ensure, (VALUE)&io_write_arguments);
479
+ }
480
+
481
+ #endif
482
+
223
483
  static
224
484
  struct timespec * make_timeout(VALUE duration, struct timespec * storage) {
225
485
  if (duration == Qnil) {
@@ -292,6 +552,8 @@ VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
292
552
  struct Event_Backend_KQueue *data = NULL;
293
553
  TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
294
554
 
555
+ Event_Backend_ready_pop(&data->backend);
556
+
295
557
  struct select_arguments arguments = {
296
558
  .data = data,
297
559
  .count = KQUEUE_MAX_EVENTS,
@@ -315,7 +577,7 @@ VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
315
577
  if (arguments.count == 0) {
316
578
  arguments.timeout = make_timeout(duration, &arguments.storage);
317
579
 
318
- if (!timeout_nonblocking(arguments.timeout)) {
580
+ if (!data->backend.ready && !timeout_nonblocking(arguments.timeout)) {
319
581
  arguments.count = KQUEUE_MAX_EVENTS;
320
582
 
321
583
  select_internal_without_gvl(&arguments);
@@ -325,22 +587,30 @@ VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
325
587
  for (int i = 0; i < arguments.count; i += 1) {
326
588
  VALUE fiber = (VALUE)arguments.events[i].udata;
327
589
  VALUE result = INT2NUM(arguments.events[i].filter);
328
- rb_funcall(fiber, id_transfer, 1, result);
590
+
591
+ Event_Backend_fiber_transfer_result(fiber, result);
329
592
  }
330
593
 
331
594
  return INT2NUM(arguments.count);
332
595
  }
333
596
 
334
597
  void Init_Event_Backend_KQueue(VALUE Event_Backend) {
335
- id_fileno = rb_intern("fileno");
336
- id_transfer = rb_intern("transfer");
337
-
338
598
  Event_Backend_KQueue = rb_define_class_under(Event_Backend, "KQueue", rb_cObject);
339
599
 
340
600
  rb_define_alloc_func(Event_Backend_KQueue, Event_Backend_KQueue_allocate);
341
601
  rb_define_method(Event_Backend_KQueue, "initialize", Event_Backend_KQueue_initialize, 1);
602
+ rb_define_method(Event_Backend_KQueue, "transfer", Event_Backend_KQueue_transfer, 1);
603
+ rb_define_method(Event_Backend_KQueue, "defer", Event_Backend_KQueue_defer, 0);
604
+ rb_define_method(Event_Backend_KQueue, "ready?", Event_Backend_KQueue_ready_p, 0);
605
+ rb_define_method(Event_Backend_KQueue, "select", Event_Backend_KQueue_select, 1);
342
606
  rb_define_method(Event_Backend_KQueue, "close", Event_Backend_KQueue_close, 0);
343
607
 
344
608
  rb_define_method(Event_Backend_KQueue, "io_wait", Event_Backend_KQueue_io_wait, 3);
345
- rb_define_method(Event_Backend_KQueue, "select", Event_Backend_KQueue_select, 1);
609
+
610
+ #ifdef HAVE_RUBY_IO_BUFFER_H
611
+ rb_define_method(Event_Backend_KQueue, "io_read", Event_Backend_KQueue_io_read, 4);
612
+ rb_define_method(Event_Backend_KQueue, "io_write", Event_Backend_KQueue_io_write, 4);
613
+ #endif
614
+
615
+ rb_define_method(Event_Backend_KQueue, "process_wait", Event_Backend_KQueue_process_wait, 3);
346
616
  }