io-event 1.2.3 → 1.3.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/ext/extconf.rb +5 -3
- data/ext/io/event/selector/array.h +135 -0
- data/ext/io/event/selector/epoll.c +435 -196
- data/ext/io/event/selector/kqueue.c +481 -218
- data/ext/io/event/selector/list.h +87 -0
- data/ext/io/event/selector/selector.c +14 -14
- data/ext/io/event/selector/selector.h +20 -6
- data/ext/io/event/selector/uring.c +399 -216
- data/lib/io/event/selector/select.rb +34 -14
- data/lib/io/event/selector.rb +1 -5
- data/lib/io/event/version.rb +1 -1
- data.tar.gz.sig +0 -0
- metadata +4 -2
- metadata.gz.sig +0 -0
@@ -20,6 +20,8 @@
|
|
20
20
|
|
21
21
|
#include "kqueue.h"
|
22
22
|
#include "selector.h"
|
23
|
+
#include "list.h"
|
24
|
+
#include "array.h"
|
23
25
|
|
24
26
|
#include <sys/epoll.h>
|
25
27
|
#include <time.h>
|
@@ -36,47 +38,145 @@ static VALUE IO_Event_Selector_EPoll = Qnil;
|
|
36
38
|
|
37
39
|
enum {EPOLL_MAX_EVENTS = 64};
|
38
40
|
|
39
|
-
|
41
|
+
// This represents an actual fiber waiting for a specific event.
|
42
|
+
struct IO_Event_Selector_EPoll_Waiting
|
43
|
+
{
|
44
|
+
struct IO_Event_List list;
|
45
|
+
|
46
|
+
// The events the fiber is waiting for.
|
47
|
+
enum IO_Event events;
|
48
|
+
|
49
|
+
// The events that are currently ready.
|
50
|
+
enum IO_Event ready;
|
51
|
+
|
52
|
+
// The fiber value itself.
|
53
|
+
VALUE fiber;
|
54
|
+
};
|
55
|
+
|
56
|
+
struct IO_Event_Selector_EPoll
|
57
|
+
{
|
40
58
|
struct IO_Event_Selector backend;
|
41
59
|
int descriptor;
|
42
60
|
int blocked;
|
61
|
+
|
43
62
|
struct IO_Event_Interrupt interrupt;
|
63
|
+
struct IO_Event_Array descriptors;
|
44
64
|
};
|
45
65
|
|
46
|
-
|
66
|
+
// This represents zero or more fibers waiting for a specific descriptor.
|
67
|
+
struct IO_Event_Selector_EPoll_Descriptor
|
47
68
|
{
|
48
|
-
struct
|
49
|
-
|
69
|
+
struct IO_Event_List list;
|
70
|
+
|
71
|
+
// The last IO object that was used to register events.
|
72
|
+
VALUE io;
|
73
|
+
|
74
|
+
// The union of all events we are waiting for:
|
75
|
+
enum IO_Event waiting_events;
|
76
|
+
|
77
|
+
// The union of events we are registered for:
|
78
|
+
enum IO_Event registered_events;
|
79
|
+
};
|
80
|
+
|
81
|
+
static
|
82
|
+
void IO_Event_Selector_EPoll_Waiting_mark(struct IO_Event_List *_waiting)
|
83
|
+
{
|
84
|
+
struct IO_Event_Selector_EPoll_Waiting *waiting = (void*)_waiting;
|
85
|
+
|
86
|
+
if (waiting->fiber) {
|
87
|
+
rb_gc_mark_movable(waiting->fiber);
|
88
|
+
}
|
50
89
|
}
|
51
90
|
|
52
91
|
static
|
53
|
-
void
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
92
|
+
void IO_Event_Selector_EPoll_Descriptor_mark(void *_descriptor)
|
93
|
+
{
|
94
|
+
struct IO_Event_Selector_EPoll_Descriptor *descriptor = _descriptor;
|
95
|
+
|
96
|
+
IO_Event_List_immutable_each(&descriptor->list, IO_Event_Selector_EPoll_Waiting_mark);
|
97
|
+
|
98
|
+
if (descriptor->io) {
|
99
|
+
rb_gc_mark_movable(descriptor->io);
|
59
100
|
}
|
60
101
|
}
|
61
102
|
|
62
|
-
|
103
|
+
static
|
104
|
+
void IO_Event_Selector_EPoll_Type_mark(void *_selector)
|
63
105
|
{
|
64
|
-
struct IO_Event_Selector_EPoll *
|
106
|
+
struct IO_Event_Selector_EPoll *selector = _selector;
|
65
107
|
|
66
|
-
|
108
|
+
IO_Event_Selector_mark(&selector->backend);
|
109
|
+
IO_Event_Array_each(&selector->descriptors, IO_Event_Selector_EPoll_Descriptor_mark);
|
110
|
+
}
|
111
|
+
|
112
|
+
static
|
113
|
+
void IO_Event_Selector_EPoll_Waiting_compact(struct IO_Event_List *_waiting)
|
114
|
+
{
|
115
|
+
struct IO_Event_Selector_EPoll_Waiting *waiting = (void*)_waiting;
|
67
116
|
|
68
|
-
|
117
|
+
if (waiting->fiber) {
|
118
|
+
waiting->fiber = rb_gc_location(waiting->fiber);
|
119
|
+
}
|
69
120
|
}
|
70
121
|
|
71
|
-
|
122
|
+
static
|
123
|
+
void IO_Event_Selector_EPoll_Descriptor_compact(void *_descriptor)
|
124
|
+
{
|
125
|
+
struct IO_Event_Selector_EPoll_Descriptor *descriptor = _descriptor;
|
126
|
+
|
127
|
+
IO_Event_List_immutable_each(&descriptor->list, IO_Event_Selector_EPoll_Waiting_compact);
|
128
|
+
|
129
|
+
if (descriptor->io) {
|
130
|
+
descriptor->io = rb_gc_location(descriptor->io);
|
131
|
+
}
|
132
|
+
}
|
133
|
+
|
134
|
+
static
|
135
|
+
void IO_Event_Selector_EPoll_Type_compact(void *_selector)
|
136
|
+
{
|
137
|
+
struct IO_Event_Selector_EPoll *selector = _selector;
|
138
|
+
|
139
|
+
IO_Event_Selector_compact(&selector->backend);
|
140
|
+
IO_Event_Array_each(&selector->descriptors, IO_Event_Selector_EPoll_Descriptor_compact);
|
141
|
+
}
|
142
|
+
|
143
|
+
static
|
144
|
+
void close_internal(struct IO_Event_Selector_EPoll *selector)
|
145
|
+
{
|
146
|
+
if (selector->descriptor >= 0) {
|
147
|
+
close(selector->descriptor);
|
148
|
+
selector->descriptor = -1;
|
149
|
+
|
150
|
+
IO_Event_Interrupt_close(&selector->interrupt);
|
151
|
+
}
|
152
|
+
}
|
153
|
+
static
|
154
|
+
void IO_Event_Selector_EPoll_Type_free(void *_selector)
|
155
|
+
{
|
156
|
+
struct IO_Event_Selector_EPoll *selector = _selector;
|
157
|
+
|
158
|
+
close_internal(selector);
|
159
|
+
|
160
|
+
IO_Event_Array_free(&selector->descriptors);
|
161
|
+
|
162
|
+
free(selector);
|
163
|
+
}
|
164
|
+
|
165
|
+
static
|
166
|
+
size_t IO_Event_Selector_EPoll_Type_size(const void *_selector)
|
72
167
|
{
|
73
|
-
|
168
|
+
const struct IO_Event_Selector_EPoll *selector = _selector;
|
169
|
+
|
170
|
+
return sizeof(struct IO_Event_Selector_EPoll)
|
171
|
+
+ IO_Event_Array_memory_size(&selector->descriptors)
|
172
|
+
;
|
74
173
|
}
|
75
174
|
|
76
175
|
static const rb_data_type_t IO_Event_Selector_EPoll_Type = {
|
77
176
|
.wrap_struct_name = "IO_Event::Backend::EPoll",
|
78
177
|
.function = {
|
79
178
|
.dmark = IO_Event_Selector_EPoll_Type_mark,
|
179
|
+
.dcompact = IO_Event_Selector_EPoll_Type_compact,
|
80
180
|
.dfree = IO_Event_Selector_EPoll_Type_free,
|
81
181
|
.dsize = IO_Event_Selector_EPoll_Type_size,
|
82
182
|
},
|
@@ -84,25 +184,172 @@ static const rb_data_type_t IO_Event_Selector_EPoll_Type = {
|
|
84
184
|
.flags = RUBY_TYPED_FREE_IMMEDIATELY,
|
85
185
|
};
|
86
186
|
|
187
|
+
inline static
|
188
|
+
struct IO_Event_Selector_EPoll_Descriptor * IO_Event_Selector_EPoll_Descriptor_lookup(struct IO_Event_Selector_EPoll *selector, int descriptor)
|
189
|
+
{
|
190
|
+
struct IO_Event_Selector_EPoll_Descriptor *epoll_descriptor = IO_Event_Array_lookup(&selector->descriptors, descriptor);
|
191
|
+
|
192
|
+
if (!epoll_descriptor) {
|
193
|
+
rb_sys_fail("IO_Event_Selector_EPoll_Descriptor_lookup:IO_Event_Array_lookup");
|
194
|
+
}
|
195
|
+
|
196
|
+
return epoll_descriptor;
|
197
|
+
}
|
198
|
+
|
199
|
+
static inline
|
200
|
+
uint32_t epoll_flags_from_events(int events)
|
201
|
+
{
|
202
|
+
uint32_t flags = 0;
|
203
|
+
|
204
|
+
if (events & IO_EVENT_READABLE) flags |= EPOLLIN;
|
205
|
+
if (events & IO_EVENT_PRIORITY) flags |= EPOLLPRI;
|
206
|
+
if (events & IO_EVENT_WRITABLE) flags |= EPOLLOUT;
|
207
|
+
|
208
|
+
flags |= EPOLLHUP;
|
209
|
+
flags |= EPOLLERR;
|
210
|
+
|
211
|
+
if (DEBUG) fprintf(stderr, "epoll_flags_from_events events=%d flags=%d\n", events, flags);
|
212
|
+
|
213
|
+
return flags;
|
214
|
+
}
|
215
|
+
|
216
|
+
static inline
|
217
|
+
int events_from_epoll_flags(uint32_t flags)
|
218
|
+
{
|
219
|
+
int events = 0;
|
220
|
+
|
221
|
+
if (DEBUG) fprintf(stderr, "events_from_epoll_flags flags=%d\n", flags);
|
222
|
+
|
223
|
+
// Occasionally, (and noted specifically when dealing with child processes stdout), flags will only be POLLHUP. In this case, we arm the file descriptor for reading so that the HUP will be noted, rather than potentially ignored, since there is no dedicated event for it.
|
224
|
+
// if (flags & (EPOLLIN)) events |= IO_EVENT_READABLE;
|
225
|
+
if (flags & (EPOLLIN|EPOLLHUP|EPOLLERR)) events |= IO_EVENT_READABLE;
|
226
|
+
if (flags & EPOLLPRI) events |= IO_EVENT_PRIORITY;
|
227
|
+
if (flags & EPOLLOUT) events |= IO_EVENT_WRITABLE;
|
228
|
+
|
229
|
+
return events;
|
230
|
+
}
|
231
|
+
|
232
|
+
inline static
|
233
|
+
int IO_Event_Selector_EPoll_Descriptor_update(struct IO_Event_Selector_EPoll *selector, VALUE io, int descriptor, struct IO_Event_Selector_EPoll_Descriptor *epoll_descriptor)
|
234
|
+
{
|
235
|
+
if (epoll_descriptor->io == io) {
|
236
|
+
if (epoll_descriptor->registered_events == epoll_descriptor->waiting_events) {
|
237
|
+
// All the events we are interested in are already registered.
|
238
|
+
return 0;
|
239
|
+
}
|
240
|
+
} else {
|
241
|
+
// The IO has changed, we need to reset the state:
|
242
|
+
epoll_descriptor->registered_events = 0;
|
243
|
+
epoll_descriptor->io = io;
|
244
|
+
}
|
245
|
+
|
246
|
+
if (epoll_descriptor->waiting_events == 0) {
|
247
|
+
if (epoll_descriptor->registered_events) {
|
248
|
+
// We are no longer interested in any events.
|
249
|
+
epoll_ctl(selector->descriptor, EPOLL_CTL_DEL, descriptor, NULL);
|
250
|
+
epoll_descriptor->registered_events = 0;
|
251
|
+
}
|
252
|
+
|
253
|
+
epoll_descriptor->io = 0;
|
254
|
+
|
255
|
+
return 0;
|
256
|
+
}
|
257
|
+
|
258
|
+
// We need to register for additional events:
|
259
|
+
struct epoll_event event = {
|
260
|
+
.events = epoll_flags_from_events(epoll_descriptor->waiting_events),
|
261
|
+
.data = {.fd = descriptor},
|
262
|
+
};
|
263
|
+
|
264
|
+
int operation;
|
265
|
+
|
266
|
+
if (epoll_descriptor->registered_events) {
|
267
|
+
operation = EPOLL_CTL_MOD;
|
268
|
+
} else {
|
269
|
+
operation = EPOLL_CTL_ADD;
|
270
|
+
}
|
271
|
+
|
272
|
+
int result = epoll_ctl(selector->descriptor, operation, descriptor, &event);
|
273
|
+
if (result == -1) {
|
274
|
+
if (errno == ENOENT) {
|
275
|
+
result = epoll_ctl(selector->descriptor, EPOLL_CTL_ADD, descriptor, &event);
|
276
|
+
} else if (errno == EEXIST) {
|
277
|
+
result = epoll_ctl(selector->descriptor, EPOLL_CTL_MOD, descriptor, &event);
|
278
|
+
}
|
279
|
+
|
280
|
+
if (result == -1) {
|
281
|
+
return -1;
|
282
|
+
}
|
283
|
+
}
|
284
|
+
|
285
|
+
epoll_descriptor->registered_events = epoll_descriptor->waiting_events;
|
286
|
+
|
287
|
+
return 1;
|
288
|
+
}
|
289
|
+
|
290
|
+
inline static
|
291
|
+
int IO_Event_Selector_EPoll_Waiting_register(struct IO_Event_Selector_EPoll *selector, VALUE io, int descriptor, struct IO_Event_Selector_EPoll_Waiting *waiting)
|
292
|
+
{
|
293
|
+
struct IO_Event_Selector_EPoll_Descriptor *epoll_descriptor = IO_Event_Selector_EPoll_Descriptor_lookup(selector, descriptor);
|
294
|
+
|
295
|
+
// We are waiting for these events:
|
296
|
+
epoll_descriptor->waiting_events |= waiting->events;
|
297
|
+
|
298
|
+
int result = IO_Event_Selector_EPoll_Descriptor_update(selector, io, descriptor, epoll_descriptor);
|
299
|
+
if (result == -1) return -1;
|
300
|
+
|
301
|
+
IO_Event_List_prepend(&epoll_descriptor->list, &waiting->list);
|
302
|
+
|
303
|
+
return result;
|
304
|
+
}
|
305
|
+
|
306
|
+
inline static
|
307
|
+
void IO_Event_Selector_EPoll_Waiting_cancel(struct IO_Event_Selector_EPoll_Waiting *waiting)
|
308
|
+
{
|
309
|
+
IO_Event_List_pop(&waiting->list);
|
310
|
+
waiting->fiber = 0;
|
311
|
+
}
|
312
|
+
|
313
|
+
void IO_Event_Selector_EPoll_Descriptor_initialize(void *element)
|
314
|
+
{
|
315
|
+
struct IO_Event_Selector_EPoll_Descriptor *epoll_descriptor = element;
|
316
|
+
IO_Event_List_initialize(&epoll_descriptor->list);
|
317
|
+
epoll_descriptor->io = 0;
|
318
|
+
epoll_descriptor->waiting_events = 0;
|
319
|
+
epoll_descriptor->registered_events = 0;
|
320
|
+
}
|
321
|
+
|
322
|
+
void IO_Event_Selector_EPoll_Descriptor_free(void *element)
|
323
|
+
{
|
324
|
+
struct IO_Event_Selector_EPoll_Descriptor *epoll_descriptor = element;
|
325
|
+
|
326
|
+
IO_Event_List_free(&epoll_descriptor->list);
|
327
|
+
}
|
328
|
+
|
87
329
|
VALUE IO_Event_Selector_EPoll_allocate(VALUE self) {
|
88
|
-
struct IO_Event_Selector_EPoll *
|
89
|
-
VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
330
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
331
|
+
VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
332
|
+
|
333
|
+
IO_Event_Selector_initialize(&selector->backend, Qnil);
|
334
|
+
selector->descriptor = -1;
|
335
|
+
selector->blocked = 0;
|
90
336
|
|
91
|
-
|
92
|
-
|
337
|
+
selector->descriptors.element_initialize = IO_Event_Selector_EPoll_Descriptor_initialize;
|
338
|
+
selector->descriptors.element_free = IO_Event_Selector_EPoll_Descriptor_free;
|
339
|
+
IO_Event_Array_allocate(&selector->descriptors, 1024, sizeof(struct IO_Event_Selector_EPoll_Descriptor));
|
93
340
|
|
94
341
|
return instance;
|
95
342
|
}
|
96
343
|
|
97
|
-
void IO_Event_Interrupt_add(struct IO_Event_Interrupt *interrupt, struct IO_Event_Selector_EPoll *
|
344
|
+
void IO_Event_Interrupt_add(struct IO_Event_Interrupt *interrupt, struct IO_Event_Selector_EPoll *selector) {
|
98
345
|
int descriptor = IO_Event_Interrupt_descriptor(interrupt);
|
99
346
|
|
100
347
|
struct epoll_event event = {
|
101
348
|
.events = EPOLLIN|EPOLLRDHUP,
|
102
|
-
.data = {.
|
349
|
+
.data = {.fd = -1},
|
103
350
|
};
|
104
351
|
|
105
|
-
int result = epoll_ctl(
|
352
|
+
int result = epoll_ctl(selector->descriptor, EPOLL_CTL_ADD, descriptor, &event);
|
106
353
|
|
107
354
|
if (result == -1) {
|
108
355
|
rb_sys_fail("IO_Event_Interrupt_add:epoll_ctl");
|
@@ -110,95 +357,95 @@ void IO_Event_Interrupt_add(struct IO_Event_Interrupt *interrupt, struct IO_Even
|
|
110
357
|
}
|
111
358
|
|
112
359
|
VALUE IO_Event_Selector_EPoll_initialize(VALUE self, VALUE loop) {
|
113
|
-
struct IO_Event_Selector_EPoll *
|
114
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
360
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
361
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
115
362
|
|
116
|
-
IO_Event_Selector_initialize(&
|
363
|
+
IO_Event_Selector_initialize(&selector->backend, loop);
|
117
364
|
int result = epoll_create1(EPOLL_CLOEXEC);
|
118
365
|
|
119
366
|
if (result == -1) {
|
120
367
|
rb_sys_fail("IO_Event_Selector_EPoll_initialize:epoll_create");
|
121
368
|
} else {
|
122
|
-
|
369
|
+
selector->descriptor = result;
|
123
370
|
|
124
|
-
rb_update_max_fd(
|
371
|
+
rb_update_max_fd(selector->descriptor);
|
125
372
|
}
|
126
373
|
|
127
|
-
IO_Event_Interrupt_open(&
|
128
|
-
IO_Event_Interrupt_add(&
|
374
|
+
IO_Event_Interrupt_open(&selector->interrupt);
|
375
|
+
IO_Event_Interrupt_add(&selector->interrupt, selector);
|
129
376
|
|
130
377
|
return self;
|
131
378
|
}
|
132
379
|
|
133
380
|
VALUE IO_Event_Selector_EPoll_loop(VALUE self) {
|
134
|
-
struct IO_Event_Selector_EPoll *
|
135
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
381
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
382
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
136
383
|
|
137
|
-
return
|
384
|
+
return selector->backend.loop;
|
138
385
|
}
|
139
386
|
|
140
387
|
VALUE IO_Event_Selector_EPoll_close(VALUE self) {
|
141
|
-
struct IO_Event_Selector_EPoll *
|
142
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
388
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
389
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
143
390
|
|
144
|
-
close_internal(
|
391
|
+
close_internal(selector);
|
145
392
|
|
146
393
|
return Qnil;
|
147
394
|
}
|
148
395
|
|
149
396
|
VALUE IO_Event_Selector_EPoll_transfer(VALUE self)
|
150
397
|
{
|
151
|
-
struct IO_Event_Selector_EPoll *
|
152
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
398
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
399
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
153
400
|
|
154
|
-
return IO_Event_Selector_fiber_transfer(
|
401
|
+
return IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
|
155
402
|
}
|
156
403
|
|
157
404
|
VALUE IO_Event_Selector_EPoll_resume(int argc, VALUE *argv, VALUE self)
|
158
405
|
{
|
159
|
-
struct IO_Event_Selector_EPoll *
|
160
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
406
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
407
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
161
408
|
|
162
|
-
return IO_Event_Selector_resume(&
|
409
|
+
return IO_Event_Selector_resume(&selector->backend, argc, argv);
|
163
410
|
}
|
164
411
|
|
165
412
|
VALUE IO_Event_Selector_EPoll_yield(VALUE self)
|
166
413
|
{
|
167
|
-
struct IO_Event_Selector_EPoll *
|
168
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
414
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
415
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
169
416
|
|
170
|
-
return IO_Event_Selector_yield(&
|
417
|
+
return IO_Event_Selector_yield(&selector->backend);
|
171
418
|
}
|
172
419
|
|
173
420
|
VALUE IO_Event_Selector_EPoll_push(VALUE self, VALUE fiber)
|
174
421
|
{
|
175
|
-
struct IO_Event_Selector_EPoll *
|
176
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
422
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
423
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
177
424
|
|
178
|
-
IO_Event_Selector_queue_push(&
|
425
|
+
IO_Event_Selector_queue_push(&selector->backend, fiber);
|
179
426
|
|
180
427
|
return Qnil;
|
181
428
|
}
|
182
429
|
|
183
430
|
VALUE IO_Event_Selector_EPoll_raise(int argc, VALUE *argv, VALUE self)
|
184
431
|
{
|
185
|
-
struct IO_Event_Selector_EPoll *
|
186
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
432
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
433
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
187
434
|
|
188
|
-
return IO_Event_Selector_raise(&
|
435
|
+
return IO_Event_Selector_raise(&selector->backend, argc, argv);
|
189
436
|
}
|
190
437
|
|
191
438
|
VALUE IO_Event_Selector_EPoll_ready_p(VALUE self) {
|
192
|
-
struct IO_Event_Selector_EPoll *
|
193
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
439
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
440
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
194
441
|
|
195
|
-
return
|
442
|
+
return selector->backend.ready ? Qtrue : Qfalse;
|
196
443
|
}
|
197
444
|
|
198
445
|
struct process_wait_arguments {
|
199
|
-
struct IO_Event_Selector_EPoll *
|
200
|
-
|
201
|
-
int
|
446
|
+
struct IO_Event_Selector_EPoll *selector;
|
447
|
+
struct IO_Event_Selector_EPoll_Waiting *waiting;
|
448
|
+
int pid;
|
202
449
|
int descriptor;
|
203
450
|
};
|
204
451
|
|
@@ -206,106 +453,76 @@ static
|
|
206
453
|
VALUE process_wait_transfer(VALUE _arguments) {
|
207
454
|
struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
|
208
455
|
|
209
|
-
IO_Event_Selector_fiber_transfer(arguments->
|
456
|
+
IO_Event_Selector_fiber_transfer(arguments->selector->backend.loop, 0, NULL);
|
210
457
|
|
211
|
-
|
458
|
+
if (arguments->waiting->ready) {
|
459
|
+
return IO_Event_Selector_process_status_wait(arguments->pid);
|
460
|
+
} else {
|
461
|
+
return Qfalse;
|
462
|
+
}
|
212
463
|
}
|
213
464
|
|
214
465
|
static
|
215
466
|
VALUE process_wait_ensure(VALUE _arguments) {
|
216
467
|
struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
|
217
468
|
|
218
|
-
// epoll_ctl(arguments->data->descriptor, EPOLL_CTL_DEL, arguments->descriptor, NULL);
|
219
|
-
|
220
469
|
close(arguments->descriptor);
|
221
470
|
|
471
|
+
IO_Event_Selector_EPoll_Waiting_cancel(arguments->waiting);
|
472
|
+
|
222
473
|
return Qnil;
|
223
474
|
}
|
224
475
|
|
225
|
-
|
226
|
-
|
227
|
-
|
476
|
+
struct IO_Event_List_Type IO_Event_Selector_EPoll_process_wait_list_type = {};
|
477
|
+
|
478
|
+
VALUE IO_Event_Selector_EPoll_process_wait(VALUE self, VALUE fiber, VALUE _pid, VALUE _flags) {
|
479
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
480
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
228
481
|
|
229
|
-
|
230
|
-
|
231
|
-
.pid = NUM2PIDT(pid),
|
232
|
-
.flags = NUM2INT(flags),
|
233
|
-
};
|
482
|
+
pid_t pid = NUM2PIDT(_pid);
|
483
|
+
// int flags = NUM2INT(_flags);
|
234
484
|
|
235
|
-
|
485
|
+
int descriptor = pidfd_open(pid, 0);
|
236
486
|
|
237
|
-
if (
|
487
|
+
if (descriptor == -1) {
|
238
488
|
rb_sys_fail("IO_Event_Selector_EPoll_process_wait:pidfd_open");
|
239
489
|
}
|
240
490
|
|
241
|
-
rb_update_max_fd(
|
491
|
+
rb_update_max_fd(descriptor);
|
242
492
|
|
243
|
-
struct
|
244
|
-
.
|
245
|
-
.
|
493
|
+
struct IO_Event_Selector_EPoll_Waiting waiting = {
|
494
|
+
.list = {.type = &IO_Event_Selector_EPoll_process_wait_list_type},
|
495
|
+
.fiber = fiber,
|
496
|
+
.events = IO_EVENT_READABLE,
|
246
497
|
};
|
247
498
|
|
248
|
-
int result =
|
499
|
+
int result = IO_Event_Selector_EPoll_Waiting_register(selector, 0, descriptor, &waiting);
|
249
500
|
|
250
501
|
if (result == -1) {
|
251
|
-
close(
|
252
|
-
rb_sys_fail("IO_Event_Selector_EPoll_process_wait:
|
502
|
+
close(descriptor);
|
503
|
+
rb_sys_fail("IO_Event_Selector_EPoll_process_wait:IO_Event_Selector_EPoll_Waiting_register");
|
253
504
|
}
|
254
505
|
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
if (events & IO_EVENT_READABLE) flags |= EPOLLIN;
|
263
|
-
if (events & IO_EVENT_PRIORITY) flags |= EPOLLPRI;
|
264
|
-
if (events & IO_EVENT_WRITABLE) flags |= EPOLLOUT;
|
265
|
-
|
266
|
-
flags |= EPOLLHUP;
|
267
|
-
flags |= EPOLLERR;
|
268
|
-
|
269
|
-
// Immediately remove this descriptor after reading one event:
|
270
|
-
flags |= EPOLLONESHOT;
|
271
|
-
|
272
|
-
if (DEBUG) fprintf(stderr, "epoll_flags_from_events events=%d flags=%d\n", events, flags);
|
273
|
-
|
274
|
-
return flags;
|
275
|
-
}
|
276
|
-
|
277
|
-
static inline
|
278
|
-
int events_from_epoll_flags(uint32_t flags) {
|
279
|
-
int events = 0;
|
280
|
-
|
281
|
-
if (DEBUG) fprintf(stderr, "events_from_epoll_flags flags=%d\n", flags);
|
282
|
-
|
283
|
-
// Occasionally, (and noted specifically when dealing with child processes stdout), flags will only be POLLHUP. In this case, we arm the file descriptor for reading so that the HUP will be noted, rather than potentially ignored, since there is no dedicated event for it.
|
284
|
-
// if (flags & (EPOLLIN)) events |= IO_EVENT_READABLE;
|
285
|
-
if (flags & (EPOLLIN|EPOLLHUP|EPOLLERR)) events |= IO_EVENT_READABLE;
|
286
|
-
if (flags & EPOLLPRI) events |= IO_EVENT_PRIORITY;
|
287
|
-
if (flags & EPOLLOUT) events |= IO_EVENT_WRITABLE;
|
506
|
+
struct process_wait_arguments process_wait_arguments = {
|
507
|
+
.selector = selector,
|
508
|
+
.pid = pid,
|
509
|
+
.descriptor = descriptor,
|
510
|
+
.waiting = &waiting,
|
511
|
+
};
|
288
512
|
|
289
|
-
return
|
513
|
+
return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
|
290
514
|
}
|
291
515
|
|
292
516
|
struct io_wait_arguments {
|
293
|
-
struct IO_Event_Selector_EPoll *
|
294
|
-
|
295
|
-
int duplicate;
|
517
|
+
struct IO_Event_Selector_EPoll *selector;
|
518
|
+
struct IO_Event_Selector_EPoll_Waiting *waiting;
|
296
519
|
};
|
297
520
|
|
298
521
|
static
|
299
522
|
VALUE io_wait_ensure(VALUE _arguments) {
|
300
523
|
struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
|
301
524
|
|
302
|
-
|
303
|
-
epoll_ctl(arguments->data->descriptor, EPOLL_CTL_DEL, arguments->duplicate, NULL);
|
304
|
-
|
305
|
-
close(arguments->duplicate);
|
306
|
-
} else {
|
307
|
-
epoll_ctl(arguments->data->descriptor, EPOLL_CTL_DEL, arguments->descriptor, NULL);
|
308
|
-
}
|
525
|
+
IO_Event_Selector_EPoll_Waiting_cancel(arguments->waiting);
|
309
526
|
|
310
527
|
return Qnil;
|
311
528
|
};
|
@@ -314,72 +531,44 @@ static
|
|
314
531
|
VALUE io_wait_transfer(VALUE _arguments) {
|
315
532
|
struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
|
316
533
|
|
317
|
-
|
534
|
+
IO_Event_Selector_fiber_transfer(arguments->selector->backend.loop, 0, NULL);
|
318
535
|
|
319
|
-
if (
|
320
|
-
|
321
|
-
|
322
|
-
if (!RTEST(result)) {
|
323
|
-
if (DEBUG) fprintf(stderr, "io_wait_transfer flags=false\n");
|
536
|
+
if (arguments->waiting->ready) {
|
537
|
+
return RB_INT2NUM(arguments->waiting->ready);
|
538
|
+
} else {
|
324
539
|
return Qfalse;
|
325
540
|
}
|
326
|
-
|
327
|
-
if (DEBUG) fprintf(stderr, "io_wait_transfer flags=%d\n", NUM2INT(result));
|
328
|
-
|
329
|
-
return INT2NUM(events_from_epoll_flags(NUM2INT(result)));
|
330
541
|
};
|
331
542
|
|
543
|
+
struct IO_Event_List_Type IO_Event_Selector_EPoll_io_wait_list_type = {};
|
544
|
+
|
332
545
|
VALUE IO_Event_Selector_EPoll_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
|
333
|
-
struct IO_Event_Selector_EPoll *
|
334
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
546
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
547
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
335
548
|
|
336
|
-
|
549
|
+
int descriptor = IO_Event_Selector_io_descriptor(io);
|
337
550
|
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
if (DEBUG) fprintf(stderr, "<- fiber=%p descriptor=%d\n", (void*)fiber, descriptor);
|
345
|
-
|
346
|
-
// A better approach is to batch all changes:
|
347
|
-
int result = epoll_ctl(data->descriptor, EPOLL_CTL_ADD, descriptor, &event);
|
551
|
+
struct IO_Event_Selector_EPoll_Waiting waiting = {
|
552
|
+
.list = {.type = &IO_Event_Selector_EPoll_io_wait_list_type},
|
553
|
+
.fiber = fiber,
|
554
|
+
.events = RB_NUM2INT(events),
|
555
|
+
};
|
348
556
|
|
349
|
-
|
350
|
-
// The file descriptor was already inserted into epoll.
|
351
|
-
duplicate = dup(descriptor);
|
352
|
-
|
353
|
-
if (duplicate == -1) {
|
354
|
-
rb_sys_fail("IO_Event_Selector_EPoll_io_wait:dup");
|
355
|
-
}
|
356
|
-
|
357
|
-
descriptor = duplicate;
|
358
|
-
|
359
|
-
rb_update_max_fd(descriptor);
|
360
|
-
|
361
|
-
result = epoll_ctl(data->descriptor, EPOLL_CTL_ADD, descriptor, &event);
|
362
|
-
}
|
557
|
+
int result = IO_Event_Selector_EPoll_Waiting_register(selector, io, descriptor, &waiting);
|
363
558
|
|
364
559
|
if (result == -1) {
|
365
|
-
// If we duplicated the file descriptor, ensure it's closed:
|
366
|
-
if (duplicate >= 0) {
|
367
|
-
close(duplicate);
|
368
|
-
}
|
369
|
-
|
370
560
|
if (errno == EPERM) {
|
371
|
-
IO_Event_Selector_queue_push(&
|
372
|
-
IO_Event_Selector_yield(&
|
561
|
+
IO_Event_Selector_queue_push(&selector->backend, fiber);
|
562
|
+
IO_Event_Selector_yield(&selector->backend);
|
373
563
|
return events;
|
374
564
|
}
|
375
565
|
|
376
|
-
rb_sys_fail("IO_Event_Selector_EPoll_io_wait:
|
566
|
+
rb_sys_fail("IO_Event_Selector_EPoll_io_wait:IO_Event_Selector_EPoll_Waiting_register");
|
377
567
|
}
|
378
568
|
|
379
569
|
struct io_wait_arguments io_wait_arguments = {
|
380
|
-
.
|
381
|
-
.
|
382
|
-
.duplicate = duplicate
|
570
|
+
.selector = selector,
|
571
|
+
.waiting = &waiting,
|
383
572
|
};
|
384
573
|
|
385
574
|
return rb_ensure(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_ensure, (VALUE)&io_wait_arguments);
|
@@ -411,12 +600,14 @@ VALUE io_read_loop(VALUE _arguments) {
|
|
411
600
|
|
412
601
|
size_t length = arguments->length;
|
413
602
|
size_t offset = arguments->offset;
|
603
|
+
size_t total = 0;
|
414
604
|
|
415
|
-
|
416
|
-
|
605
|
+
size_t maximum_size = size - offset;
|
606
|
+
while (maximum_size) {
|
417
607
|
ssize_t result = read(arguments->descriptor, (char*)base+offset, maximum_size);
|
418
608
|
|
419
609
|
if (result > 0) {
|
610
|
+
total += result;
|
420
611
|
offset += result;
|
421
612
|
if ((size_t)result >= length) break;
|
422
613
|
length -= result;
|
@@ -427,9 +618,11 @@ VALUE io_read_loop(VALUE _arguments) {
|
|
427
618
|
} else {
|
428
619
|
return rb_fiber_scheduler_io_result(-1, errno);
|
429
620
|
}
|
621
|
+
|
622
|
+
maximum_size = size - offset;
|
430
623
|
}
|
431
624
|
|
432
|
-
return rb_fiber_scheduler_io_result(
|
625
|
+
return rb_fiber_scheduler_io_result(total, 0);
|
433
626
|
}
|
434
627
|
|
435
628
|
static
|
@@ -499,16 +692,18 @@ VALUE io_write_loop(VALUE _arguments) {
|
|
499
692
|
|
500
693
|
size_t length = arguments->length;
|
501
694
|
size_t offset = arguments->offset;
|
695
|
+
size_t total = 0;
|
502
696
|
|
503
697
|
if (length > size) {
|
504
698
|
rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
|
505
699
|
}
|
506
700
|
|
507
|
-
|
508
|
-
|
701
|
+
size_t maximum_size = size - offset;
|
702
|
+
while (maximum_size) {
|
509
703
|
ssize_t result = write(arguments->descriptor, (char*)base+offset, maximum_size);
|
510
704
|
|
511
705
|
if (result > 0) {
|
706
|
+
total += result;
|
512
707
|
offset += result;
|
513
708
|
if ((size_t)result >= length) break;
|
514
709
|
length -= result;
|
@@ -519,9 +714,11 @@ VALUE io_write_loop(VALUE _arguments) {
|
|
519
714
|
} else {
|
520
715
|
return rb_fiber_scheduler_io_result(-1, errno);
|
521
716
|
}
|
717
|
+
|
718
|
+
maximum_size = size - offset;
|
522
719
|
}
|
523
720
|
|
524
|
-
return rb_fiber_scheduler_io_result(
|
721
|
+
return rb_fiber_scheduler_io_result(total, 0);
|
525
722
|
};
|
526
723
|
|
527
724
|
static
|
@@ -601,7 +798,7 @@ int timeout_nonblocking(struct timespec * timespec) {
|
|
601
798
|
}
|
602
799
|
|
603
800
|
struct select_arguments {
|
604
|
-
struct IO_Event_Selector_EPoll *
|
801
|
+
struct IO_Event_Selector_EPoll *selector;
|
605
802
|
|
606
803
|
int count;
|
607
804
|
struct epoll_event events[EPOLL_MAX_EVENTS];
|
@@ -636,7 +833,7 @@ void * select_internal(void *_arguments) {
|
|
636
833
|
struct select_arguments * arguments = (struct select_arguments *)_arguments;
|
637
834
|
|
638
835
|
#if defined(HAVE_EPOLL_PWAIT2)
|
639
|
-
arguments->count = epoll_pwait2(arguments->
|
836
|
+
arguments->count = epoll_pwait2(arguments->selector->descriptor, arguments->events, EPOLL_MAX_EVENTS, arguments->timeout, NULL);
|
640
837
|
|
641
838
|
// Comment out the above line and enable the below lines to test ENOSYS code path.
|
642
839
|
// arguments->count = -1;
|
@@ -650,16 +847,16 @@ void * select_internal(void *_arguments) {
|
|
650
847
|
}
|
651
848
|
#endif
|
652
849
|
|
653
|
-
arguments->count = epoll_wait(arguments->
|
850
|
+
arguments->count = epoll_wait(arguments->selector->descriptor, arguments->events, EPOLL_MAX_EVENTS, make_timeout_ms(arguments->timeout));
|
654
851
|
|
655
852
|
return NULL;
|
656
853
|
}
|
657
854
|
|
658
855
|
static
|
659
856
|
void select_internal_without_gvl(struct select_arguments *arguments) {
|
660
|
-
arguments->
|
857
|
+
arguments->selector->blocked = 1;
|
661
858
|
rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
|
662
|
-
arguments->
|
859
|
+
arguments->selector->blocked = 0;
|
663
860
|
|
664
861
|
if (arguments->count == -1) {
|
665
862
|
if (errno != EINTR) {
|
@@ -683,14 +880,59 @@ void select_internal_with_gvl(struct select_arguments *arguments) {
|
|
683
880
|
}
|
684
881
|
}
|
685
882
|
|
883
|
+
static
|
884
|
+
int IO_Event_Selector_EPoll_handle(struct IO_Event_Selector_EPoll *selector, const struct epoll_event *event)
|
885
|
+
{
|
886
|
+
int descriptor = event->data.fd;
|
887
|
+
|
888
|
+
// This is the mask of all events that occured for the given descriptor:
|
889
|
+
enum IO_Event ready_events = events_from_epoll_flags(event->events);
|
890
|
+
|
891
|
+
struct IO_Event_Selector_EPoll_Descriptor *epoll_descriptor = IO_Event_Selector_EPoll_Descriptor_lookup(selector, descriptor);
|
892
|
+
struct IO_Event_List *list = &epoll_descriptor->list;
|
893
|
+
struct IO_Event_List *node = list->tail;
|
894
|
+
struct IO_Event_List saved = {NULL, NULL};
|
895
|
+
|
896
|
+
// Reset the events back to 0 so that we can re-arm if necessary:
|
897
|
+
epoll_descriptor->waiting_events = 0;
|
898
|
+
|
899
|
+
// It's possible (but unlikely) that the address of list will changing during iteration.
|
900
|
+
while (node != list) {
|
901
|
+
struct IO_Event_Selector_EPoll_Waiting *waiting = (struct IO_Event_Selector_EPoll_Waiting *)node;
|
902
|
+
|
903
|
+
// Compute the intersection of the events we are waiting for and the events that occured:
|
904
|
+
enum IO_Event matching_events = waiting->events & ready_events;
|
905
|
+
|
906
|
+
if (DEBUG) fprintf(stderr, "IO_Event_Selector_EPoll_handle: descriptor=%d, ready_events=%d, matching_events=%d\n", descriptor, ready_events, matching_events);
|
907
|
+
|
908
|
+
if (matching_events) {
|
909
|
+
IO_Event_List_append(node, &saved);
|
910
|
+
|
911
|
+
// Resume the fiber:
|
912
|
+
waiting->ready = matching_events;
|
913
|
+
IO_Event_Selector_fiber_transfer(waiting->fiber, 0, NULL);
|
914
|
+
|
915
|
+
node = saved.tail;
|
916
|
+
IO_Event_List_pop(&saved);
|
917
|
+
} else {
|
918
|
+
// We are still waiting for the events:
|
919
|
+
epoll_descriptor->waiting_events |= waiting->events;
|
920
|
+
node = node->tail;
|
921
|
+
}
|
922
|
+
}
|
923
|
+
|
924
|
+
return IO_Event_Selector_EPoll_Descriptor_update(selector, epoll_descriptor->io, descriptor, epoll_descriptor);
|
925
|
+
}
|
926
|
+
|
927
|
+
// TODO This function is not re-entrant and we should document and assert as such.
|
686
928
|
VALUE IO_Event_Selector_EPoll_select(VALUE self, VALUE duration) {
|
687
|
-
struct IO_Event_Selector_EPoll *
|
688
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
929
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
930
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
689
931
|
|
690
|
-
int ready = IO_Event_Selector_queue_flush(&
|
932
|
+
int ready = IO_Event_Selector_queue_flush(&selector->backend);
|
691
933
|
|
692
934
|
struct select_arguments arguments = {
|
693
|
-
.
|
935
|
+
.selector = selector,
|
694
936
|
.storage = {
|
695
937
|
.tv_sec = 0,
|
696
938
|
.tv_nsec = 0
|
@@ -707,7 +949,7 @@ VALUE IO_Event_Selector_EPoll_select(VALUE self, VALUE duration) {
|
|
707
949
|
// 2. Didn't process any events from non-blocking select (above), and
|
708
950
|
// 3. There are no items in the ready list,
|
709
951
|
// then we can perform a blocking select.
|
710
|
-
if (!ready && !arguments.count && !
|
952
|
+
if (!ready && !arguments.count && !selector->backend.ready) {
|
711
953
|
arguments.timeout = make_timeout(duration, &arguments.storage);
|
712
954
|
|
713
955
|
if (!timeout_nonblocking(arguments.timeout)) {
|
@@ -720,13 +962,10 @@ VALUE IO_Event_Selector_EPoll_select(VALUE self, VALUE duration) {
|
|
720
962
|
const struct epoll_event *event = &arguments.events[i];
|
721
963
|
if (DEBUG) fprintf(stderr, "-> ptr=%p events=%d\n", event->data.ptr, event->events);
|
722
964
|
|
723
|
-
if (event->data.
|
724
|
-
|
725
|
-
VALUE result = INT2NUM(event->events);
|
726
|
-
|
727
|
-
IO_Event_Selector_fiber_transfer(fiber, 1, &result);
|
965
|
+
if (event->data.fd >= 0) {
|
966
|
+
IO_Event_Selector_EPoll_handle(selector, event);
|
728
967
|
} else {
|
729
|
-
IO_Event_Interrupt_clear(&
|
968
|
+
IO_Event_Interrupt_clear(&selector->interrupt);
|
730
969
|
}
|
731
970
|
}
|
732
971
|
|
@@ -734,12 +973,12 @@ VALUE IO_Event_Selector_EPoll_select(VALUE self, VALUE duration) {
|
|
734
973
|
}
|
735
974
|
|
736
975
|
VALUE IO_Event_Selector_EPoll_wakeup(VALUE self) {
|
737
|
-
struct IO_Event_Selector_EPoll *
|
738
|
-
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type,
|
976
|
+
struct IO_Event_Selector_EPoll *selector = NULL;
|
977
|
+
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
|
739
978
|
|
740
979
|
// If we are blocking, we can schedule a nop event to wake up the selector:
|
741
|
-
if (
|
742
|
-
IO_Event_Interrupt_signal(&
|
980
|
+
if (selector->blocked) {
|
981
|
+
IO_Event_Interrupt_signal(&selector->interrupt);
|
743
982
|
|
744
983
|
return Qtrue;
|
745
984
|
}
|