io-event 1.2.3 → 1.3.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -20,11 +20,17 @@
20
20
 
21
21
  #include "kqueue.h"
22
22
  #include "selector.h"
23
+ #include "list.h"
24
+ #include "array.h"
23
25
 
24
26
  #include <sys/event.h>
25
27
  #include <sys/ioctl.h>
26
28
  #include <time.h>
27
29
  #include <errno.h>
30
+ #include <sys/wait.h>
31
+ #include <signal.h>
32
+
33
+ #include "../interrupt.h"
28
34
 
29
35
  enum {
30
36
  DEBUG = 0,
@@ -33,49 +39,144 @@ enum {
33
39
  DEBUG_IO_WAIT = 0
34
40
  };
35
41
 
42
+ #ifndef EVFILT_USER
43
+ #define IO_EVENT_SELECTOR_KQUEUE_USE_INTERRUPT
44
+ #endif
45
+
36
46
  static VALUE IO_Event_Selector_KQueue = Qnil;
37
47
 
38
48
  enum {KQUEUE_MAX_EVENTS = 64};
39
49
 
40
- struct IO_Event_Selector_KQueue {
50
+ // This represents an actual fiber waiting for a specific event.
51
+ struct IO_Event_Selector_KQueue_Waiting
52
+ {
53
+ struct IO_Event_List list;
54
+
55
+ // The events the fiber is waiting for.
56
+ enum IO_Event events;
57
+
58
+ // The events that are currently ready.
59
+ enum IO_Event ready;
60
+
61
+ // The fiber value itself.
62
+ VALUE fiber;
63
+ };
64
+
65
+ struct IO_Event_Selector_KQueue
66
+ {
41
67
  struct IO_Event_Selector backend;
42
68
  int descriptor;
43
-
44
69
  int blocked;
70
+
71
+ #ifdef IO_EVENT_SELECTOR_KQUEUE_USE_INTERRUPT
72
+ struct IO_Event_Interrupt interrupt;
73
+ #endif
74
+ struct IO_Event_Array descriptors;
75
+ };
76
+
77
+ // This represents zero or more fibers waiting for a specific descriptor.
78
+ struct IO_Event_Selector_KQueue_Descriptor
79
+ {
80
+ struct IO_Event_List list;
81
+
82
+ // The union of all events we are waiting for:
83
+ enum IO_Event waiting_events;
84
+
85
+ // The union of events we are registered for:
86
+ enum IO_Event registered_events;
87
+
88
+ // The events that are currently ready:
89
+ enum IO_Event ready_events;
45
90
  };
46
91
 
47
- void IO_Event_Selector_KQueue_Type_mark(void *_data)
92
+ static
93
+ void IO_Event_Selector_KQueue_Waiting_mark(struct IO_Event_List *_waiting)
94
+ {
95
+ struct IO_Event_Selector_KQueue_Waiting *waiting = (void*)_waiting;
96
+
97
+ if (waiting->fiber) {
98
+ rb_gc_mark_movable(waiting->fiber);
99
+ }
100
+ }
101
+
102
+ static
103
+ void IO_Event_Selector_KQueue_Descriptor_mark(void *_descriptor)
104
+ {
105
+ struct IO_Event_Selector_KQueue_Descriptor *descriptor = _descriptor;
106
+
107
+ IO_Event_List_immutable_each(&descriptor->list, IO_Event_Selector_KQueue_Waiting_mark);
108
+ }
109
+
110
+ static
111
+ void IO_Event_Selector_KQueue_Type_mark(void *_selector)
112
+ {
113
+ struct IO_Event_Selector_KQueue *selector = _selector;
114
+ IO_Event_Selector_mark(&selector->backend);
115
+ IO_Event_Array_each(&selector->descriptors, IO_Event_Selector_KQueue_Descriptor_mark);
116
+ }
117
+
118
+ static
119
+ void IO_Event_Selector_KQueue_Waiting_compact(struct IO_Event_List *_waiting)
120
+ {
121
+ struct IO_Event_Selector_KQueue_Waiting *waiting = (void*)_waiting;
122
+
123
+ if (waiting->fiber) {
124
+ waiting->fiber = rb_gc_location(waiting->fiber);
125
+ }
126
+ }
127
+
128
+ static
129
+ void IO_Event_Selector_KQueue_Descriptor_compact(void *_descriptor)
130
+ {
131
+ struct IO_Event_Selector_KQueue_Descriptor *descriptor = _descriptor;
132
+
133
+ IO_Event_List_immutable_each(&descriptor->list, IO_Event_Selector_KQueue_Waiting_compact);
134
+ }
135
+
136
+ static
137
+ void IO_Event_Selector_KQueue_Type_compact(void *_selector)
48
138
  {
49
- struct IO_Event_Selector_KQueue *data = _data;
50
- IO_Event_Selector_mark(&data->backend);
139
+ struct IO_Event_Selector_KQueue *selector = _selector;
140
+ IO_Event_Selector_compact(&selector->backend);
141
+ IO_Event_Array_each(&selector->descriptors, IO_Event_Selector_KQueue_Descriptor_compact);
51
142
  }
52
143
 
53
144
  static
54
- void close_internal(struct IO_Event_Selector_KQueue *data) {
55
- if (data->descriptor >= 0) {
56
- close(data->descriptor);
57
- data->descriptor = -1;
145
+ void close_internal(struct IO_Event_Selector_KQueue *selector)
146
+ {
147
+ if (selector->descriptor >= 0) {
148
+ close(selector->descriptor);
149
+ selector->descriptor = -1;
58
150
  }
59
151
  }
60
152
 
61
- void IO_Event_Selector_KQueue_Type_free(void *_data)
153
+ static
154
+ void IO_Event_Selector_KQueue_Type_free(void *_selector)
62
155
  {
63
- struct IO_Event_Selector_KQueue *data = _data;
156
+ struct IO_Event_Selector_KQueue *selector = _selector;
64
157
 
65
- close_internal(data);
158
+ close_internal(selector);
66
159
 
67
- free(data);
160
+ IO_Event_Array_free(&selector->descriptors);
161
+
162
+ free(selector);
68
163
  }
69
164
 
70
- size_t IO_Event_Selector_KQueue_Type_size(const void *data)
165
+ static
166
+ size_t IO_Event_Selector_KQueue_Type_size(const void *_selector)
71
167
  {
72
- return sizeof(struct IO_Event_Selector_KQueue);
168
+ const struct IO_Event_Selector_KQueue *selector = _selector;
169
+
170
+ return sizeof(struct IO_Event_Selector_KQueue)
171
+ + IO_Event_Array_memory_size(&selector->descriptors)
172
+ ;
73
173
  }
74
174
 
75
175
  static const rb_data_type_t IO_Event_Selector_KQueue_Type = {
76
176
  .wrap_struct_name = "IO_Event::Backend::KQueue",
77
177
  .function = {
78
178
  .dmark = IO_Event_Selector_KQueue_Type_mark,
179
+ .dcompact = IO_Event_Selector_KQueue_Type_compact,
79
180
  .dfree = IO_Event_Selector_KQueue_Type_free,
80
181
  .dsize = IO_Event_Selector_KQueue_Type_size,
81
182
  },
@@ -83,301 +184,387 @@ static const rb_data_type_t IO_Event_Selector_KQueue_Type = {
83
184
  .flags = RUBY_TYPED_FREE_IMMEDIATELY,
84
185
  };
85
186
 
187
+ inline static
188
+ struct IO_Event_Selector_KQueue_Descriptor * IO_Event_Selector_KQueue_Descriptor_lookup(struct IO_Event_Selector_KQueue *selector, uintptr_t descriptor)
189
+ {
190
+ struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor = IO_Event_Array_lookup(&selector->descriptors, descriptor);
191
+
192
+ if (!kqueue_descriptor) {
193
+ rb_sys_fail("IO_Event_Selector_KQueue_Descriptor_lookup:IO_Event_Array_lookup");
194
+ }
195
+
196
+ return kqueue_descriptor;
197
+ }
198
+
199
+ inline static
200
+ enum IO_Event events_from_kevent_filter(int filter)
201
+ {
202
+ switch (filter) {
203
+ case EVFILT_READ:
204
+ return IO_EVENT_READABLE;
205
+ case EVFILT_WRITE:
206
+ return IO_EVENT_WRITABLE;
207
+ case EVFILT_PROC:
208
+ return IO_EVENT_EXIT;
209
+ default:
210
+ return 0;
211
+ }
212
+ }
213
+
214
+ inline static
215
+ int IO_Event_Selector_KQueue_Descriptor_update(struct IO_Event_Selector_KQueue *selector, uintptr_t identifier, struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor)
216
+ {
217
+ int count = 0;
218
+ struct kevent kevents[3] = {0};
219
+
220
+ if (kqueue_descriptor->waiting_events & IO_EVENT_READABLE) {
221
+ kevents[count].ident = identifier;
222
+ kevents[count].filter = EVFILT_READ;
223
+ kevents[count].flags = EV_ADD | EV_ONESHOT;
224
+ kevents[count].udata = (void *)kqueue_descriptor;
225
+ // #ifdef EV_OOBAND
226
+ // if (events & IO_EVENT_PRIORITY) {
227
+ // kevents[count].flags |= EV_OOBAND;
228
+ // }
229
+ // #endif
230
+ count++;
231
+ }
232
+
233
+ if (kqueue_descriptor->waiting_events & IO_EVENT_WRITABLE) {
234
+ kevents[count].ident = identifier;
235
+ kevents[count].filter = EVFILT_WRITE;
236
+ kevents[count].flags = EV_ADD | EV_ONESHOT;
237
+ kevents[count].udata = (void *)kqueue_descriptor;
238
+ count++;
239
+ }
240
+
241
+ if (kqueue_descriptor->waiting_events & IO_EVENT_EXIT) {
242
+ kevents[count].ident = identifier;
243
+ kevents[count].filter = EVFILT_PROC;
244
+ kevents[count].flags = EV_ADD | EV_ONESHOT;
245
+ kevents[count].fflags = NOTE_EXIT;
246
+ kevents[count].udata = (void *)kqueue_descriptor;
247
+ count++;
248
+ }
249
+
250
+ if (count == 0) {
251
+ return 0;
252
+ }
253
+
254
+ int result = kevent(selector->descriptor, kevents, count, NULL, 0, NULL);
255
+
256
+ if (result == -1) {
257
+ return result;
258
+ }
259
+
260
+ kqueue_descriptor->registered_events = kqueue_descriptor->waiting_events;
261
+
262
+ return result;
263
+ }
264
+
265
+ inline static
266
+ int IO_Event_Selector_KQueue_Waiting_register(struct IO_Event_Selector_KQueue *selector, uintptr_t identifier, struct IO_Event_Selector_KQueue_Waiting *waiting)
267
+ {
268
+ struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor = IO_Event_Selector_KQueue_Descriptor_lookup(selector, identifier);
269
+
270
+ // We are waiting for these events:
271
+ kqueue_descriptor->waiting_events |= waiting->events;
272
+
273
+ int result = IO_Event_Selector_KQueue_Descriptor_update(selector, identifier, kqueue_descriptor);
274
+ if (result == -1) return -1;
275
+
276
+ IO_Event_List_prepend(&kqueue_descriptor->list, &waiting->list);
277
+
278
+ return result;
279
+ }
280
+
281
+ inline static
282
+ void IO_Event_Selector_KQueue_Waiting_cancel(struct IO_Event_Selector_KQueue_Waiting *waiting)
283
+ {
284
+ IO_Event_List_pop(&waiting->list);
285
+ waiting->fiber = 0;
286
+ }
287
+
288
+ void IO_Event_Selector_KQueue_Descriptor_initialize(void *element)
289
+ {
290
+ struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor = element;
291
+ IO_Event_List_initialize(&kqueue_descriptor->list);
292
+ kqueue_descriptor->waiting_events = 0;
293
+ kqueue_descriptor->registered_events = 0;
294
+ kqueue_descriptor->ready_events = 0;
295
+ }
296
+
297
+ void IO_Event_Selector_KQueue_Descriptor_free(void *element)
298
+ {
299
+ struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor = element;
300
+
301
+ IO_Event_List_free(&kqueue_descriptor->list);
302
+ }
303
+
86
304
  VALUE IO_Event_Selector_KQueue_allocate(VALUE self) {
87
- struct IO_Event_Selector_KQueue *data = NULL;
88
- VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
305
+ struct IO_Event_Selector_KQueue *selector = NULL;
306
+ VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
89
307
 
90
- IO_Event_Selector_initialize(&data->backend, Qnil);
91
- data->descriptor = -1;
92
- data->blocked = 0;
308
+ IO_Event_Selector_initialize(&selector->backend, Qnil);
309
+ selector->descriptor = -1;
310
+ selector->blocked = 0;
311
+
312
+ selector->descriptors.element_initialize = IO_Event_Selector_KQueue_Descriptor_initialize;
313
+ selector->descriptors.element_free = IO_Event_Selector_KQueue_Descriptor_free;
314
+ IO_Event_Array_allocate(&selector->descriptors, 1024, sizeof(struct IO_Event_Selector_KQueue_Descriptor));
93
315
 
94
316
  return instance;
95
317
  }
96
318
 
319
+ #ifdef IO_EVENT_SELECTOR_KQUEUE_USE_INTERRUPT
320
+ void IO_Event_Interrupt_add(struct IO_Event_Interrupt *interrupt, struct IO_Event_Selector_KQueue *selector) {
321
+ int descriptor = IO_Event_Interrupt_descriptor(interrupt);
322
+
323
+ struct kevent kev = {
324
+ .filter = EVFILT_READ,
325
+ .ident = descriptor,
326
+ .flags = EV_ADD | EV_CLEAR,
327
+ };
328
+
329
+ int result = kevent(selector->descriptor, &kev, 1, NULL, 0, NULL);
330
+
331
+ if (result == -1) {
332
+ rb_sys_fail("IO_Event_Interrupt_add:kevent");
333
+ }
334
+ }
335
+ #endif
336
+
97
337
  VALUE IO_Event_Selector_KQueue_initialize(VALUE self, VALUE loop) {
98
- struct IO_Event_Selector_KQueue *data = NULL;
99
- TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
338
+ struct IO_Event_Selector_KQueue *selector = NULL;
339
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
100
340
 
101
- IO_Event_Selector_initialize(&data->backend, loop);
341
+ IO_Event_Selector_initialize(&selector->backend, loop);
102
342
  int result = kqueue();
103
343
 
104
344
  if (result == -1) {
105
345
  rb_sys_fail("IO_Event_Selector_KQueue_initialize:kqueue");
106
346
  } else {
347
+ // Make sure the descriptor is closed on exec.
107
348
  ioctl(result, FIOCLEX);
108
- data->descriptor = result;
109
349
 
110
- rb_update_max_fd(data->descriptor);
350
+ selector->descriptor = result;
351
+
352
+ rb_update_max_fd(selector->descriptor);
111
353
  }
112
354
 
355
+ #ifdef IO_EVENT_SELECTOR_KQUEUE_USE_INTERRUPT
356
+ IO_Event_Interrupt_open(&selector->interrupt);
357
+ IO_Event_Interrupt_add(&selector->interrupt, selector);
358
+ #endif
359
+
113
360
  return self;
114
361
  }
115
362
 
116
363
  VALUE IO_Event_Selector_KQueue_loop(VALUE self) {
117
- struct IO_Event_Selector_KQueue *data = NULL;
118
- TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
364
+ struct IO_Event_Selector_KQueue *selector = NULL;
365
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
119
366
 
120
- return data->backend.loop;
367
+ return selector->backend.loop;
121
368
  }
122
369
 
123
370
  VALUE IO_Event_Selector_KQueue_close(VALUE self) {
124
- struct IO_Event_Selector_KQueue *data = NULL;
125
- TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
371
+ struct IO_Event_Selector_KQueue *selector = NULL;
372
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
373
+
374
+ close_internal(selector);
126
375
 
127
- close_internal(data);
376
+ #ifdef IO_EVENT_SELECTOR_KQUEUE_USE_INTERRUPT
377
+ IO_Event_Interrupt_close(&selector->interrupt);
378
+ #endif
128
379
 
129
380
  return Qnil;
130
381
  }
131
382
 
132
383
  VALUE IO_Event_Selector_KQueue_transfer(VALUE self)
133
384
  {
134
- struct IO_Event_Selector_KQueue *data = NULL;
135
- TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
385
+ struct IO_Event_Selector_KQueue *selector = NULL;
386
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
136
387
 
137
- return IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
388
+ return IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
138
389
  }
139
390
 
140
391
  VALUE IO_Event_Selector_KQueue_resume(int argc, VALUE *argv, VALUE self)
141
392
  {
142
- struct IO_Event_Selector_KQueue *data = NULL;
143
- TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
393
+ struct IO_Event_Selector_KQueue *selector = NULL;
394
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
144
395
 
145
- return IO_Event_Selector_resume(&data->backend, argc, argv);
396
+ return IO_Event_Selector_resume(&selector->backend, argc, argv);
146
397
  }
147
398
 
148
399
  VALUE IO_Event_Selector_KQueue_yield(VALUE self)
149
400
  {
150
- struct IO_Event_Selector_KQueue *data = NULL;
151
- TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
401
+ struct IO_Event_Selector_KQueue *selector = NULL;
402
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
152
403
 
153
- return IO_Event_Selector_yield(&data->backend);
404
+ return IO_Event_Selector_yield(&selector->backend);
154
405
  }
155
406
 
156
407
  VALUE IO_Event_Selector_KQueue_push(VALUE self, VALUE fiber)
157
408
  {
158
- struct IO_Event_Selector_KQueue *data = NULL;
159
- TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
409
+ struct IO_Event_Selector_KQueue *selector = NULL;
410
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
160
411
 
161
- IO_Event_Selector_queue_push(&data->backend, fiber);
412
+ IO_Event_Selector_queue_push(&selector->backend, fiber);
162
413
 
163
414
  return Qnil;
164
415
  }
165
416
 
166
417
  VALUE IO_Event_Selector_KQueue_raise(int argc, VALUE *argv, VALUE self)
167
418
  {
168
- struct IO_Event_Selector_KQueue *data = NULL;
169
- TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
419
+ struct IO_Event_Selector_KQueue *selector = NULL;
420
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
170
421
 
171
- return IO_Event_Selector_raise(&data->backend, argc, argv);
422
+ return IO_Event_Selector_raise(&selector->backend, argc, argv);
172
423
  }
173
424
 
174
425
  VALUE IO_Event_Selector_KQueue_ready_p(VALUE self) {
175
- struct IO_Event_Selector_KQueue *data = NULL;
176
- TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
426
+ struct IO_Event_Selector_KQueue *selector = NULL;
427
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
177
428
 
178
- return data->backend.ready ? Qtrue : Qfalse;
429
+ return selector->backend.ready ? Qtrue : Qfalse;
179
430
  }
180
431
 
181
432
  struct process_wait_arguments {
182
- struct IO_Event_Selector_KQueue *data;
433
+ struct IO_Event_Selector_KQueue *selector;
434
+ struct IO_Event_Selector_KQueue_Waiting *waiting;
183
435
  pid_t pid;
184
- int flags;
185
436
  };
186
437
 
187
438
  static
188
- int process_add_filters(int descriptor, int ident, VALUE fiber) {
189
- struct kevent event = {0};
190
-
191
- event.ident = ident;
192
- event.filter = EVFILT_PROC;
193
- event.flags = EV_ADD | EV_ENABLE | EV_ONESHOT | EV_UDATA_SPECIFIC;
194
- event.fflags = NOTE_EXIT;
195
- event.udata = (void*)fiber;
196
-
197
- int result = kevent(descriptor, &event, 1, NULL, 0, NULL);
198
-
439
+ void process_prewait(pid_t pid) {
440
+ #if defined(WNOWAIT)
441
+ // FreeBSD seems to have an issue where kevent() can return an EVFILT_PROC/NOTE_EXIT event for a process even though a wait with WNOHANG on it immediately after will not return it (but it does after a small delay). Similarly, OpenBSD/NetBSD seem to sometimes fail the kevent() call with ESRCH (indicating the process has already terminated) even though a WNOHANG may not return it immediately after.
442
+ // To deal with this, do a hanging WNOWAIT wait on the process to make sure it is "terminated enough" for future WNOHANG waits to return it.
443
+ // Using waitid() for this because OpenBSD only supports WNOWAIT with waitid().
444
+ int result;
445
+ do {
446
+ siginfo_t info;
447
+ result = waitid(P_PID, pid, &info, WEXITED | WNOWAIT);
448
+ // This can sometimes get interrupted by SIGCHLD.
449
+ } while (result == -1 && errno == EINTR);
199
450
  if (result == -1) {
200
- // No such process - the process has probably already terminated:
201
- if (errno == ESRCH) {
202
- return 0;
203
- }
204
-
205
- rb_sys_fail("process_add_filters:kevent");
451
+ rb_sys_fail("process_prewait:waitid");
206
452
  }
207
-
208
- return 1;
209
- }
210
-
211
- static
212
- void process_remove_filters(int descriptor, int ident) {
213
- struct kevent event = {0};
214
-
215
- event.ident = ident;
216
- event.filter = EVFILT_PROC;
217
- event.flags = EV_DELETE | EV_UDATA_SPECIFIC;
218
- event.fflags = NOTE_EXIT;
219
-
220
- // Ignore the result.
221
- kevent(descriptor, &event, 1, NULL, 0, NULL);
453
+ #endif
222
454
  }
223
455
 
224
456
  static
225
457
  VALUE process_wait_transfer(VALUE _arguments) {
226
458
  struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
227
459
 
228
- IO_Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
460
+ IO_Event_Selector_fiber_transfer(arguments->selector->backend.loop, 0, NULL);
229
461
 
230
- return IO_Event_Selector_process_status_wait(arguments->pid);
462
+ if (arguments->waiting->ready) {
463
+ process_prewait(arguments->pid);
464
+ return IO_Event_Selector_process_status_wait(arguments->pid);
465
+ } else {
466
+ return Qfalse;
467
+ }
231
468
  }
232
469
 
233
470
  static
234
- VALUE process_wait_rescue(VALUE _arguments, VALUE exception) {
471
+ VALUE process_wait_ensure(VALUE _arguments) {
235
472
  struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
236
473
 
237
- process_remove_filters(arguments->data->descriptor, arguments->pid);
474
+ IO_Event_Selector_KQueue_Waiting_cancel(arguments->waiting);
238
475
 
239
- rb_exc_raise(exception);
476
+ return Qnil;
240
477
  }
241
478
 
242
- VALUE IO_Event_Selector_KQueue_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
243
- struct IO_Event_Selector_KQueue *data = NULL;
244
- TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
245
-
246
- struct process_wait_arguments process_wait_arguments = {
247
- .data = data,
248
- .pid = NUM2PIDT(pid),
249
- .flags = RB_NUM2INT(flags),
250
- };
251
-
252
- VALUE result = Qnil;
253
-
254
- // This loop should not be needed but I have seen a race condition between NOTE_EXIT and `waitpid`, thus the result would be (unexpectedly) nil. So we put this in a loop to retry if the race condition shows up:
255
- while (NIL_P(result)) {
256
- int waiting = process_add_filters(data->descriptor, process_wait_arguments.pid, fiber);
257
-
258
- if (waiting) {
259
- result = rb_rescue(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_rescue, (VALUE)&process_wait_arguments);
260
- } else {
261
- result = IO_Event_Selector_process_status_wait(process_wait_arguments.pid);
262
- }
263
- }
264
-
265
- return result;
266
- }
479
+ struct IO_Event_List_Type IO_Event_Selector_KQueue_process_wait_list_type = {};
267
480
 
268
- static
269
- int io_add_filters(int descriptor, int ident, int events, VALUE fiber) {
270
- int count = 0;
271
- struct kevent kevents[2] = {0};
481
+ VALUE IO_Event_Selector_KQueue_process_wait(VALUE self, VALUE fiber, VALUE _pid, VALUE _flags) {
482
+ struct IO_Event_Selector_KQueue *selector = NULL;
483
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
272
484
 
273
- if (events & IO_EVENT_READABLE) {
274
- kevents[count].ident = ident;
275
- kevents[count].filter = EVFILT_READ;
276
- kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT | EV_UDATA_SPECIFIC;
277
- kevents[count].udata = (void*)fiber;
278
-
279
- // #ifdef EV_OOBAND
280
- // if (events & PRIORITY) {
281
- // kevents[count].flags |= EV_OOBAND;
282
- // }
283
- // #endif
284
-
285
- count++;
286
- }
485
+ pid_t pid = NUM2PIDT(_pid);
287
486
 
288
- if (events & IO_EVENT_WRITABLE) {
289
- kevents[count].ident = ident;
290
- kevents[count].filter = EVFILT_WRITE;
291
- kevents[count].flags = EV_ADD | EV_ENABLE | EV_ONESHOT | EV_UDATA_SPECIFIC;
292
- kevents[count].udata = (void*)fiber;
293
- count++;
294
- }
487
+ struct IO_Event_Selector_KQueue_Waiting waiting = {
488
+ .list = {.type = &IO_Event_Selector_KQueue_process_wait_list_type},
489
+ .fiber = fiber,
490
+ .events = IO_EVENT_EXIT,
491
+ };
295
492
 
296
- int result = kevent(descriptor, kevents, count, NULL, 0, NULL);
493
+ struct process_wait_arguments process_wait_arguments = {
494
+ .selector = selector,
495
+ .waiting = &waiting,
496
+ .pid = pid,
497
+ };
297
498
 
499
+ int result = IO_Event_Selector_KQueue_Waiting_register(selector, pid, &waiting);
298
500
  if (result == -1) {
299
- rb_sys_fail("io_add_filters:kevent");
300
- }
301
-
302
- return events;
303
- }
304
-
305
- static
306
- void io_remove_filters(int descriptor, int ident, int events) {
307
- int count = 0;
308
- struct kevent kevents[2] = {0};
309
-
310
- if (events & IO_EVENT_READABLE) {
311
- kevents[count].ident = ident;
312
- kevents[count].filter = EVFILT_READ;
313
- kevents[count].flags = EV_DELETE | EV_UDATA_SPECIFIC;
501
+ // OpenBSD/NetBSD return ESRCH when attempting to register an EVFILT_PROC event for a zombie process.
502
+ if (errno == ESRCH) {
503
+ process_prewait(pid);
504
+
505
+ return IO_Event_Selector_process_status_wait(pid);
506
+ }
314
507
 
315
- count++;
316
- }
317
-
318
- if (events & IO_EVENT_WRITABLE) {
319
- kevents[count].ident = ident;
320
- kevents[count].filter = EVFILT_WRITE;
321
- kevents[count].flags = EV_DELETE | EV_UDATA_SPECIFIC;
322
- count++;
508
+ rb_sys_fail("IO_Event_Selector_KQueue_process_wait:IO_Event_Selector_KQueue_Waiting_register");
323
509
  }
324
510
 
325
- // Ignore the result.
326
- kevent(descriptor, kevents, count, NULL, 0, NULL);
511
+ return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
327
512
  }
328
513
 
329
514
  struct io_wait_arguments {
330
- struct IO_Event_Selector_KQueue *data;
331
- int events;
332
- int descriptor;
515
+ struct IO_Event_Selector_KQueue *selector;
516
+ struct IO_Event_Selector_KQueue_Waiting *waiting;
333
517
  };
334
518
 
335
519
  static
336
- VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
520
+ VALUE io_wait_ensure(VALUE _arguments) {
337
521
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
338
522
 
339
- io_remove_filters(arguments->data->descriptor, arguments->descriptor, arguments->events);
340
-
341
- rb_exc_raise(exception);
342
- }
343
-
344
- static inline
345
- int events_from_kqueue_filter(int filter) {
346
- if (filter == EVFILT_READ) return IO_EVENT_READABLE;
347
- if (filter == EVFILT_WRITE) return IO_EVENT_WRITABLE;
523
+ IO_Event_Selector_KQueue_Waiting_cancel(arguments->waiting);
348
524
 
349
- return 0;
525
+ return Qnil;
350
526
  }
351
527
 
352
528
  static
353
529
  VALUE io_wait_transfer(VALUE _arguments) {
354
530
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
355
531
 
356
- VALUE result = IO_Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
532
+ IO_Event_Selector_fiber_transfer(arguments->selector->backend.loop, 0, NULL);
357
533
 
358
- // If the fiber is being cancelled, it might be resumed with nil:
359
- if (!RTEST(result)) {
534
+ if (arguments->waiting->ready) {
535
+ return RB_INT2NUM(arguments->waiting->ready);
536
+ } else {
360
537
  return Qfalse;
361
538
  }
362
-
363
- return INT2NUM(events_from_kqueue_filter(RB_NUM2INT(result)));
364
539
  }
365
540
 
541
+ struct IO_Event_List_Type IO_Event_Selector_KQueue_io_wait_list_type = {};
542
+
366
543
  VALUE IO_Event_Selector_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
367
- struct IO_Event_Selector_KQueue *data = NULL;
368
- TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
544
+ struct IO_Event_Selector_KQueue *selector = NULL;
545
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
369
546
 
370
547
  int descriptor = IO_Event_Selector_io_descriptor(io);
371
548
 
549
+ struct IO_Event_Selector_KQueue_Waiting waiting = {
550
+ .list = {.type = &IO_Event_Selector_KQueue_io_wait_list_type},
551
+ .fiber = fiber,
552
+ .events = RB_NUM2INT(events),
553
+ };
554
+
555
+ int result = IO_Event_Selector_KQueue_Waiting_register(selector, descriptor, &waiting);
556
+ if (result == -1) {
557
+ rb_sys_fail("IO_Event_Selector_KQueue_io_wait:IO_Event_Selector_KQueue_Waiting_register");
558
+ }
559
+
372
560
  struct io_wait_arguments io_wait_arguments = {
373
- .events = io_add_filters(data->descriptor, descriptor, RB_NUM2INT(events), fiber),
374
- .data = data,
375
- .descriptor = descriptor,
561
+ .selector = selector,
562
+ .waiting = &waiting,
376
563
  };
377
564
 
378
565
  if (DEBUG_IO_WAIT) fprintf(stderr, "IO_Event_Selector_KQueue_io_wait descriptor=%d\n", descriptor);
379
566
 
380
- return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
567
+ return rb_ensure(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_ensure, (VALUE)&io_wait_arguments);
381
568
  }
382
569
 
383
570
  #ifdef HAVE_RUBY_IO_BUFFER_H
@@ -406,16 +593,18 @@ VALUE io_read_loop(VALUE _arguments) {
406
593
 
407
594
  size_t length = arguments->length;
408
595
  size_t offset = arguments->offset;
596
+ size_t total = 0;
409
597
 
410
598
  if (DEBUG_IO_READ) fprintf(stderr, "io_read_loop(fd=%d, length=%zu)\n", arguments->descriptor, length);
411
599
 
412
- while (true) {
413
- size_t maximum_size = size - offset;
600
+ size_t maximum_size = size - offset;
601
+ while (maximum_size) {
414
602
  if (DEBUG_IO_READ) fprintf(stderr, "read(%d, +%ld, %ld)\n", arguments->descriptor, offset, maximum_size);
415
603
  ssize_t result = read(arguments->descriptor, (char*)base+offset, maximum_size);
416
604
  if (DEBUG_IO_READ) fprintf(stderr, "read(%d, +%ld, %ld) -> %zd\n", arguments->descriptor, offset, maximum_size, result);
417
605
 
418
606
  if (result > 0) {
607
+ total += result;
419
608
  offset += result;
420
609
  if ((size_t)result >= length) break;
421
610
  length -= result;
@@ -428,10 +617,12 @@ VALUE io_read_loop(VALUE _arguments) {
428
617
  if (DEBUG_IO_READ) fprintf(stderr, "io_read_loop(fd=%d, length=%zu) -> errno=%d\n", arguments->descriptor, length, errno);
429
618
  return rb_fiber_scheduler_io_result(-1, errno);
430
619
  }
620
+
621
+ maximum_size = size - offset;
431
622
  }
432
623
 
433
624
  if (DEBUG_IO_READ) fprintf(stderr, "io_read_loop(fd=%d, length=%zu) -> %zu\n", arguments->descriptor, length, offset);
434
- return rb_fiber_scheduler_io_result(offset, 0);
625
+ return rb_fiber_scheduler_io_result(total, 0);
435
626
  }
436
627
 
437
628
  static
@@ -444,8 +635,8 @@ VALUE io_read_ensure(VALUE _arguments) {
444
635
  }
445
636
 
446
637
  VALUE IO_Event_Selector_KQueue_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length, VALUE _offset) {
447
- struct IO_Event_Selector_KQueue *data = NULL;
448
- TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
638
+ struct IO_Event_Selector_KQueue *selector = NULL;
639
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
449
640
 
450
641
  int descriptor = IO_Event_Selector_io_descriptor(io);
451
642
 
@@ -504,6 +695,7 @@ VALUE io_write_loop(VALUE _arguments) {
504
695
 
505
696
  size_t length = arguments->length;
506
697
  size_t offset = arguments->offset;
698
+ size_t total = 0;
507
699
 
508
700
  if (length > size) {
509
701
  rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
@@ -511,13 +703,14 @@ VALUE io_write_loop(VALUE _arguments) {
511
703
 
512
704
  if (DEBUG_IO_WRITE) fprintf(stderr, "io_write_loop(fd=%d, length=%zu)\n", arguments->descriptor, length);
513
705
 
514
- while (true) {
515
- size_t maximum_size = size - offset;
706
+ size_t maximum_size = size - offset;
707
+ while (maximum_size) {
516
708
  if (DEBUG_IO_WRITE) fprintf(stderr, "write(%d, +%ld, %ld, length=%zu)\n", arguments->descriptor, offset, maximum_size, length);
517
709
  ssize_t result = write(arguments->descriptor, (char*)base+offset, maximum_size);
518
710
  if (DEBUG_IO_WRITE) fprintf(stderr, "write(%d, +%ld, %ld) -> %zd\n", arguments->descriptor, offset, maximum_size, result);
519
711
 
520
712
  if (result > 0) {
713
+ total += result;
521
714
  offset += result;
522
715
  if ((size_t)result >= length) break;
523
716
  length -= result;
@@ -530,10 +723,12 @@ VALUE io_write_loop(VALUE _arguments) {
530
723
  if (DEBUG_IO_WRITE) fprintf(stderr, "io_write_loop(fd=%d, length=%zu) -> errno=%d\n", arguments->descriptor, length, errno);
531
724
  return rb_fiber_scheduler_io_result(-1, errno);
532
725
  }
726
+
727
+ maximum_size = size - offset;
533
728
  }
534
729
 
535
730
  if (DEBUG_IO_READ) fprintf(stderr, "io_write_loop(fd=%d, length=%zu) -> %zu\n", arguments->descriptor, length, offset);
536
- return rb_fiber_scheduler_io_result(offset, 0);
731
+ return rb_fiber_scheduler_io_result(total, 0);
537
732
  };
538
733
 
539
734
  static
@@ -546,8 +741,8 @@ VALUE io_write_ensure(VALUE _arguments) {
546
741
  };
547
742
 
548
743
  VALUE IO_Event_Selector_KQueue_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length, VALUE _offset) {
549
- struct IO_Event_Selector_KQueue *data = NULL;
550
- TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
744
+ struct IO_Event_Selector_KQueue *selector = NULL;
745
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
551
746
 
552
747
  int descriptor = IO_Event_Selector_io_descriptor(io);
553
748
 
@@ -616,7 +811,7 @@ int timeout_nonblocking(struct timespec * timespec) {
616
811
  }
617
812
 
618
813
  struct select_arguments {
619
- struct IO_Event_Selector_KQueue *data;
814
+ struct IO_Event_Selector_KQueue *selector;
620
815
 
621
816
  int count;
622
817
  struct kevent events[KQUEUE_MAX_EVENTS];
@@ -629,17 +824,17 @@ static
629
824
  void * select_internal(void *_arguments) {
630
825
  struct select_arguments * arguments = (struct select_arguments *)_arguments;
631
826
 
632
- arguments->count = kevent(arguments->data->descriptor, NULL, 0, arguments->events, arguments->count, arguments->timeout);
827
+ arguments->count = kevent(arguments->selector->descriptor, NULL, 0, arguments->events, arguments->count, arguments->timeout);
633
828
 
634
829
  return NULL;
635
830
  }
636
831
 
637
832
  static
638
833
  void select_internal_without_gvl(struct select_arguments *arguments) {
639
- arguments->data->blocked = 1;
834
+ arguments->selector->blocked = 1;
640
835
 
641
836
  rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
642
- arguments->data->blocked = 0;
837
+ arguments->selector->blocked = 0;
643
838
 
644
839
  if (arguments->count == -1) {
645
840
  if (errno != EINTR) {
@@ -663,14 +858,60 @@ void select_internal_with_gvl(struct select_arguments *arguments) {
663
858
  }
664
859
  }
665
860
 
861
+ static
862
+ int IO_Event_Selector_KQueue_handle(struct IO_Event_Selector_KQueue *selector, uintptr_t identifier, struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor)
863
+ {
864
+ // This is the mask of all events that occured for the given descriptor:
865
+ enum IO_Event ready_events = kqueue_descriptor->ready_events;
866
+
867
+ if (ready_events) {
868
+ kqueue_descriptor->ready_events = 0;
869
+ // Since we use one-shot semantics, we need to re-arm the events that are ready if needed:
870
+ kqueue_descriptor->registered_events &= ~ready_events;
871
+ } else {
872
+ return 0;
873
+ }
874
+
875
+ struct IO_Event_List *list = &kqueue_descriptor->list;
876
+ struct IO_Event_List *node = list->tail;
877
+ struct IO_Event_List saved = {NULL, NULL};
878
+
879
+ // Reset the events back to 0 so that we can re-arm if necessary:
880
+ kqueue_descriptor->waiting_events = 0;
881
+
882
+ // It's possible (but unlikely) that the address of list will changing during iteration.
883
+ while (node != list) {
884
+ struct IO_Event_Selector_KQueue_Waiting *waiting = (struct IO_Event_Selector_KQueue_Waiting *)node;
885
+
886
+ enum IO_Event matching_events = waiting->events & ready_events;
887
+
888
+ if (DEBUG) fprintf(stderr, "IO_Event_Selector_KQueue_handle: identifier=%lu, ready_events=%d, matching_events=%d\n", identifier, ready_events, matching_events);
889
+
890
+ if (matching_events) {
891
+ IO_Event_List_append(node, &saved);
892
+
893
+ waiting->ready = matching_events;
894
+ IO_Event_Selector_fiber_transfer(waiting->fiber, 0, NULL);
895
+
896
+ node = saved.tail;
897
+ IO_Event_List_pop(&saved);
898
+ } else {
899
+ kqueue_descriptor->waiting_events |= waiting->events;
900
+ node = node->tail;
901
+ }
902
+ }
903
+
904
+ return IO_Event_Selector_KQueue_Descriptor_update(selector, identifier, kqueue_descriptor);
905
+ }
906
+
666
907
  VALUE IO_Event_Selector_KQueue_select(VALUE self, VALUE duration) {
667
- struct IO_Event_Selector_KQueue *data = NULL;
668
- TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
908
+ struct IO_Event_Selector_KQueue *selector = NULL;
909
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
669
910
 
670
- int ready = IO_Event_Selector_queue_flush(&data->backend);
911
+ int ready = IO_Event_Selector_queue_flush(&selector->backend);
671
912
 
672
913
  struct select_arguments arguments = {
673
- .data = data,
914
+ .selector = selector,
674
915
  .count = KQUEUE_MAX_EVENTS,
675
916
  .storage = {
676
917
  .tv_sec = 0,
@@ -696,7 +937,7 @@ VALUE IO_Event_Selector_KQueue_select(VALUE self, VALUE duration) {
696
937
  // 2. Didn't process any events from non-blocking select (above), and
697
938
  // 3. There are no items in the ready list,
698
939
  // then we can perform a blocking select.
699
- if (!ready && !arguments.count && !data->backend.ready) {
940
+ if (!ready && !arguments.count && !selector->backend.ready) {
700
941
  arguments.timeout = make_timeout(duration, &arguments.storage);
701
942
 
702
943
  if (!timeout_nonblocking(arguments.timeout)) {
@@ -709,32 +950,54 @@ VALUE IO_Event_Selector_KQueue_select(VALUE self, VALUE duration) {
709
950
 
710
951
  for (int i = 0; i < arguments.count; i += 1) {
711
952
  if (arguments.events[i].udata) {
712
- VALUE fiber = (VALUE)arguments.events[i].udata;
713
- VALUE result = INT2NUM(arguments.events[i].filter);
714
-
715
- IO_Event_Selector_fiber_transfer(fiber, 1, &result);
953
+ struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor = arguments.events[i].udata;
954
+ kqueue_descriptor->ready_events |= events_from_kevent_filter(arguments.events[i].filter);
716
955
  }
717
956
  }
718
957
 
719
- return INT2NUM(arguments.count);
958
+ for (int i = 0; i < arguments.count; i += 1) {
959
+ if (arguments.events[i].udata) {
960
+ struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor = arguments.events[i].udata;
961
+ IO_Event_Selector_KQueue_handle(selector, arguments.events[i].ident, kqueue_descriptor);
962
+ } else {
963
+ #ifdef IO_EVENT_SELECTOR_KQUEUE_USE_INTERRUPT
964
+ IO_Event_Interrupt_clear(&selector->interrupt);
965
+ #endif
966
+ }
967
+ }
968
+
969
+ return RB_INT2NUM(arguments.count);
720
970
  }
721
971
 
722
972
  VALUE IO_Event_Selector_KQueue_wakeup(VALUE self) {
723
- struct IO_Event_Selector_KQueue *data = NULL;
724
- TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, data);
973
+ struct IO_Event_Selector_KQueue *selector = NULL;
974
+ TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
725
975
 
726
- if (data->blocked) {
976
+ if (selector->blocked) {
977
+ #ifdef IO_EVENT_SELECTOR_KQUEUE_USE_INTERRUPT
978
+ IO_Event_Interrupt_signal(&selector->interrupt);
979
+ #else
727
980
  struct kevent trigger = {0};
728
981
 
729
982
  trigger.filter = EVFILT_USER;
730
- trigger.flags = EV_ADD | EV_CLEAR | EV_UDATA_SPECIFIC;
983
+ trigger.flags = EV_ADD | EV_CLEAR;
984
+
985
+ int result = kevent(selector->descriptor, &trigger, 1, NULL, 0, NULL);
986
+
987
+ if (result == -1) {
988
+ rb_sys_fail("IO_Event_Selector_KQueue_wakeup:kevent");
989
+ }
990
+
991
+ // FreeBSD apparently only works if the NOTE_TRIGGER is done as a separate call.
992
+ trigger.flags = 0;
731
993
  trigger.fflags = NOTE_TRIGGER;
732
994
 
733
- int result = kevent(data->descriptor, &trigger, 1, NULL, 0, NULL);
995
+ result = kevent(selector->descriptor, &trigger, 1, NULL, 0, NULL);
734
996
 
735
997
  if (result == -1) {
736
998
  rb_sys_fail("IO_Event_Selector_KQueue_wakeup:kevent");
737
999
  }
1000
+ #endif
738
1001
 
739
1002
  return Qtrue;
740
1003
  }