io-event 1.15.1 → 1.16.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- checksums.yaml.gz.sig +0 -0
- data/ext/io/event/array.h +17 -34
- data/ext/io/event/selector/epoll.c +3 -11
- data/ext/io/event/selector/kqueue.c +3 -11
- data/ext/io/event/selector/selector.c +3 -3
- data/ext/io/event/selector/uring.c +168 -68
- data/ext/io/event/worker_pool.c +14 -5
- data/lib/io/event/debug/selector.rb +25 -0
- data/lib/io/event/version.rb +1 -1
- data/license.md +2 -0
- data/readme.md +14 -9
- data/releases.md +14 -0
- data.tar.gz.sig +0 -0
- metadata +4 -2
- metadata.gz.sig +0 -0
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: 4928b16497387fabcf4eb19d7dfefe5840674e385b086db2f0df0a15367e4d09
|
|
4
|
+
data.tar.gz: 635ceb881767fc1d6fd876f39da295b9402971f3aaf0e5778ef022108abaf890
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: 943e688606c2df3398c465103e1212143d2af98cf0b36f761f54ca4f9f627fe833992106c5b8c851fe61b7dba5b73dcf81978ec6db7f126af6532b25bd19815f
|
|
7
|
+
data.tar.gz: e3870c0635b7f3ea0fd01390f5401adb2cd99fc9e7afc326b05efbf870715c609a5cde47f45b0e326223c7caad752ae51258cbe6f92eac7a7e9b799e58de3f2a
|
checksums.yaml.gz.sig
CHANGED
|
Binary file
|
data/ext/io/event/array.h
CHANGED
|
@@ -5,8 +5,6 @@
|
|
|
5
5
|
|
|
6
6
|
#include <ruby.h>
|
|
7
7
|
#include <stdlib.h>
|
|
8
|
-
#include <errno.h>
|
|
9
|
-
#include <assert.h>
|
|
10
8
|
|
|
11
9
|
static const size_t IO_EVENT_ARRAY_MAXIMUM_COUNT = SIZE_MAX / sizeof(void*);
|
|
12
10
|
static const size_t IO_EVENT_ARRAY_DEFAULT_COUNT = 128;
|
|
@@ -28,26 +26,18 @@ struct IO_Event_Array {
|
|
|
28
26
|
void (*element_free)(void*);
|
|
29
27
|
};
|
|
30
28
|
|
|
31
|
-
|
|
29
|
+
// Initialise an empty array. Raises `NoMemoryError` if Ruby's allocator cannot satisfy the request.
|
|
30
|
+
inline static void IO_Event_Array_initialize(struct IO_Event_Array *array, size_t count, size_t element_size)
|
|
32
31
|
{
|
|
33
32
|
array->limit = 0;
|
|
34
33
|
array->element_size = element_size;
|
|
35
34
|
|
|
36
35
|
if (count) {
|
|
37
|
-
array->base = (void**)
|
|
38
|
-
|
|
39
|
-
if (array->base == NULL) {
|
|
40
|
-
return -1;
|
|
41
|
-
}
|
|
42
|
-
|
|
36
|
+
array->base = (void**)xcalloc(count, sizeof(void*));
|
|
43
37
|
array->count = count;
|
|
44
|
-
|
|
45
|
-
return 1;
|
|
46
38
|
} else {
|
|
47
39
|
array->base = NULL;
|
|
48
40
|
array->count = 0;
|
|
49
|
-
|
|
50
|
-
return 0;
|
|
51
41
|
}
|
|
52
42
|
}
|
|
53
43
|
|
|
@@ -72,24 +62,24 @@ inline static void IO_Event_Array_free(struct IO_Event_Array *array)
|
|
|
72
62
|
if (element) {
|
|
73
63
|
array->element_free(element);
|
|
74
64
|
|
|
75
|
-
|
|
65
|
+
xfree(element);
|
|
76
66
|
}
|
|
77
67
|
}
|
|
78
68
|
|
|
79
|
-
|
|
69
|
+
xfree(base);
|
|
80
70
|
}
|
|
81
71
|
}
|
|
82
72
|
|
|
83
|
-
|
|
73
|
+
// Grow the array so it can hold at least `count` slots. Raises `RangeError` if `count` exceeds the per-array maximum, or `NoMemoryError` if Ruby's allocator cannot satisfy the request. On success the array's existing contents are preserved and any newly added slots are zero-initialised.
|
|
74
|
+
inline static void IO_Event_Array_resize(struct IO_Event_Array *array, size_t count)
|
|
84
75
|
{
|
|
85
76
|
if (count <= array->count) {
|
|
86
77
|
// Already big enough:
|
|
87
|
-
return
|
|
78
|
+
return;
|
|
88
79
|
}
|
|
89
80
|
|
|
90
81
|
if (count > IO_EVENT_ARRAY_MAXIMUM_COUNT) {
|
|
91
|
-
|
|
92
|
-
return -1;
|
|
82
|
+
rb_raise(rb_eRangeError, "Array size exceeds maximum count!");
|
|
93
83
|
}
|
|
94
84
|
|
|
95
85
|
size_t new_count = array->count;
|
|
@@ -107,31 +97,24 @@ inline static int IO_Event_Array_resize(struct IO_Event_Array *array, size_t cou
|
|
|
107
97
|
new_count *= 2;
|
|
108
98
|
}
|
|
109
99
|
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
if (new_base == NULL) {
|
|
113
|
-
return -1;
|
|
114
|
-
}
|
|
100
|
+
// `xrealloc2` checks `new_count * sizeof(void*)` for overflow and raises `NoMemoryError` on allocation failure, so no NULL check is required.
|
|
101
|
+
void **new_base = (void**)xrealloc2(array->base, new_count, sizeof(void*));
|
|
115
102
|
|
|
116
103
|
// Zero out the new memory:
|
|
117
104
|
memset(new_base + array->count, 0, (new_count - array->count) * sizeof(void*));
|
|
118
105
|
|
|
119
106
|
array->base = (void**)new_base;
|
|
120
107
|
array->count = new_count;
|
|
121
|
-
|
|
122
|
-
// Resizing sucessful:
|
|
123
|
-
return 1;
|
|
124
108
|
}
|
|
125
109
|
|
|
110
|
+
// Look up the element at the given index, allocating it lazily on first access. Raises if the array cannot be grown or the element cannot be allocated.
|
|
126
111
|
inline static void* IO_Event_Array_lookup(struct IO_Event_Array *array, size_t index)
|
|
127
112
|
{
|
|
128
113
|
size_t count = index + 1;
|
|
129
114
|
|
|
130
|
-
// Resize the array if necessary:
|
|
115
|
+
// Resize the array if necessary (may raise):
|
|
131
116
|
if (count > array->count) {
|
|
132
|
-
|
|
133
|
-
return NULL;
|
|
134
|
-
}
|
|
117
|
+
IO_Event_Array_resize(array, count);
|
|
135
118
|
}
|
|
136
119
|
|
|
137
120
|
// Get the element:
|
|
@@ -139,8 +122,8 @@ inline static void* IO_Event_Array_lookup(struct IO_Event_Array *array, size_t i
|
|
|
139
122
|
|
|
140
123
|
// Allocate the element if it doesn't exist:
|
|
141
124
|
if (*element == NULL) {
|
|
142
|
-
|
|
143
|
-
|
|
125
|
+
// Ruby's allocator triggers GC on memory pressure and raises `NoMemoryError` on failure, so no NULL check is required.
|
|
126
|
+
*element = xmalloc(array->element_size);
|
|
144
127
|
|
|
145
128
|
if (array->element_initialize) {
|
|
146
129
|
array->element_initialize(*element);
|
|
@@ -166,7 +149,7 @@ inline static void IO_Event_Array_truncate(struct IO_Event_Array *array, size_t
|
|
|
166
149
|
void **element = array->base + i;
|
|
167
150
|
if (*element) {
|
|
168
151
|
array->element_free(*element);
|
|
169
|
-
|
|
152
|
+
xfree(*element);
|
|
170
153
|
*element = NULL;
|
|
171
154
|
}
|
|
172
155
|
}
|
|
@@ -175,13 +175,8 @@ static const rb_data_type_t IO_Event_Selector_EPoll_Type = {
|
|
|
175
175
|
inline static
|
|
176
176
|
struct IO_Event_Selector_EPoll_Descriptor * IO_Event_Selector_EPoll_Descriptor_lookup(struct IO_Event_Selector_EPoll *selector, int descriptor)
|
|
177
177
|
{
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
if (!epoll_descriptor) {
|
|
181
|
-
rb_sys_fail("IO_Event_Selector_EPoll_Descriptor_lookup:IO_Event_Array_lookup");
|
|
182
|
-
}
|
|
183
|
-
|
|
184
|
-
return epoll_descriptor;
|
|
178
|
+
// `IO_Event_Array_lookup` raises on allocation failure, so the returned pointer is always non-NULL.
|
|
179
|
+
return IO_Event_Array_lookup(&selector->descriptors, descriptor);
|
|
185
180
|
}
|
|
186
181
|
|
|
187
182
|
static inline
|
|
@@ -324,10 +319,7 @@ VALUE IO_Event_Selector_EPoll_allocate(VALUE self) {
|
|
|
324
319
|
|
|
325
320
|
selector->descriptors.element_initialize = IO_Event_Selector_EPoll_Descriptor_initialize;
|
|
326
321
|
selector->descriptors.element_free = IO_Event_Selector_EPoll_Descriptor_free;
|
|
327
|
-
|
|
328
|
-
if (result < 0) {
|
|
329
|
-
rb_sys_fail("IO_Event_Selector_EPoll_allocate:IO_Event_Array_initialize");
|
|
330
|
-
}
|
|
322
|
+
IO_Event_Array_initialize(&selector->descriptors, IO_EVENT_ARRAY_DEFAULT_COUNT, sizeof(struct IO_Event_Selector_EPoll_Descriptor));
|
|
331
323
|
|
|
332
324
|
return instance;
|
|
333
325
|
}
|
|
@@ -174,13 +174,8 @@ static const rb_data_type_t IO_Event_Selector_KQueue_Type = {
|
|
|
174
174
|
inline static
|
|
175
175
|
struct IO_Event_Selector_KQueue_Descriptor * IO_Event_Selector_KQueue_Descriptor_lookup(struct IO_Event_Selector_KQueue *selector, uintptr_t descriptor)
|
|
176
176
|
{
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
if (!kqueue_descriptor) {
|
|
180
|
-
rb_sys_fail("IO_Event_Selector_KQueue_Descriptor_lookup:IO_Event_Array_lookup");
|
|
181
|
-
}
|
|
182
|
-
|
|
183
|
-
return kqueue_descriptor;
|
|
177
|
+
// `IO_Event_Array_lookup` raises on allocation failure, so the returned pointer is always non-NULL.
|
|
178
|
+
return IO_Event_Array_lookup(&selector->descriptors, descriptor);
|
|
184
179
|
}
|
|
185
180
|
|
|
186
181
|
inline static
|
|
@@ -299,10 +294,7 @@ VALUE IO_Event_Selector_KQueue_allocate(VALUE self) {
|
|
|
299
294
|
selector->descriptors.element_initialize = IO_Event_Selector_KQueue_Descriptor_initialize;
|
|
300
295
|
selector->descriptors.element_free = IO_Event_Selector_KQueue_Descriptor_free;
|
|
301
296
|
|
|
302
|
-
|
|
303
|
-
if (result < 0) {
|
|
304
|
-
rb_sys_fail("IO_Event_Selector_KQueue_allocate:IO_Event_Array_initialize");
|
|
305
|
-
}
|
|
297
|
+
IO_Event_Array_initialize(&selector->descriptors, IO_EVENT_ARRAY_DEFAULT_COUNT, sizeof(struct IO_Event_Selector_KQueue_Descriptor));
|
|
306
298
|
|
|
307
299
|
return instance;
|
|
308
300
|
}
|
|
@@ -246,8 +246,8 @@ VALUE IO_Event_Selector_raise(struct IO_Event_Selector *backend, int argc, VALUE
|
|
|
246
246
|
|
|
247
247
|
void IO_Event_Selector_ready_push(struct IO_Event_Selector *backend, VALUE fiber)
|
|
248
248
|
{
|
|
249
|
-
|
|
250
|
-
|
|
249
|
+
// Ruby's allocator triggers GC on memory pressure and raises `NoMemoryError` on failure, so no NULL check is required.
|
|
250
|
+
struct IO_Event_Selector_Queue *waiting = xmalloc(sizeof(struct IO_Event_Selector_Queue));
|
|
251
251
|
|
|
252
252
|
waiting->head = NULL;
|
|
253
253
|
waiting->tail = NULL;
|
|
@@ -268,7 +268,7 @@ void IO_Event_Selector_ready_pop(struct IO_Event_Selector *backend, struct IO_Ev
|
|
|
268
268
|
if (ready->flags & IO_EVENT_SELECTOR_QUEUE_INTERNAL) {
|
|
269
269
|
// This means that the fiber was added to the ready queue by the selector itself, and we need to transfer control to it, but before we do that, we need to remove it from the queue, as there is no expectation that returning from `transfer` will remove it.
|
|
270
270
|
queue_pop(backend, ready);
|
|
271
|
-
|
|
271
|
+
xfree(ready);
|
|
272
272
|
} else if (ready->flags & IO_EVENT_SELECTOR_QUEUE_FIBER) {
|
|
273
273
|
// This means the fiber added itself to the ready queue, and we need to transfer control back to it. Transferring control back to the fiber will call `queue_pop` and remove it from the queue.
|
|
274
274
|
} else {
|
|
@@ -8,9 +8,12 @@
|
|
|
8
8
|
|
|
9
9
|
#include <liburing.h>
|
|
10
10
|
#include <poll.h>
|
|
11
|
+
#include <stdbool.h>
|
|
11
12
|
#include <stdint.h>
|
|
12
13
|
#include <time.h>
|
|
13
14
|
|
|
15
|
+
#include "../interrupt.h"
|
|
16
|
+
|
|
14
17
|
#include "pidfd.c"
|
|
15
18
|
|
|
16
19
|
#include <linux/version.h>
|
|
@@ -33,9 +36,21 @@ struct IO_Event_Selector_URing
|
|
|
33
36
|
|
|
34
37
|
// Flag indicating whether the selector is currently blocked in a system call.
|
|
35
38
|
// Set to 1 when blocked in io_uring_wait_cqe_timeout() without GVL, 0 otherwise.
|
|
36
|
-
// Used by wakeup() to determine if an interrupt signal is needed.
|
|
37
39
|
int blocked;
|
|
38
40
|
|
|
41
|
+
// Interrupt used to wake the selector from another thread without touching the ring's SQ.
|
|
42
|
+
// This allows IORING_SETUP_SINGLE_ISSUER: only the owner thread ever submits SQEs.
|
|
43
|
+
// Uses eventfd on Linux, pipe fallback elsewhere.
|
|
44
|
+
struct IO_Event_Interrupt interrupt;
|
|
45
|
+
|
|
46
|
+
// Whether an async read on interrupt is currently pending in the ring.
|
|
47
|
+
// The read is re-submitted before each blocking wait when not registered.
|
|
48
|
+
int wakeup_registered;
|
|
49
|
+
|
|
50
|
+
// Buffer for the pending async read on the interrupt descriptor.
|
|
51
|
+
// Must remain valid for the lifetime of the in-flight SQE.
|
|
52
|
+
uint64_t wakeup_value;
|
|
53
|
+
|
|
39
54
|
struct timespec idle_duration;
|
|
40
55
|
|
|
41
56
|
struct IO_Event_Array completions;
|
|
@@ -101,6 +116,12 @@ void IO_Event_Selector_URing_Type_compact(void *_selector)
|
|
|
101
116
|
static
|
|
102
117
|
void close_internal(struct IO_Event_Selector_URing *selector)
|
|
103
118
|
{
|
|
119
|
+
if (selector->interrupt.descriptor >= 0) {
|
|
120
|
+
IO_Event_Interrupt_close(&selector->interrupt);
|
|
121
|
+
selector->interrupt.descriptor = -1;
|
|
122
|
+
selector->wakeup_registered = 0;
|
|
123
|
+
}
|
|
124
|
+
|
|
104
125
|
if (selector->ring.ring_fd >= 0) {
|
|
105
126
|
io_uring_queue_exit(&selector->ring);
|
|
106
127
|
selector->ring.ring_fd = -1;
|
|
@@ -220,15 +241,14 @@ VALUE IO_Event_Selector_URing_allocate(VALUE self) {
|
|
|
220
241
|
|
|
221
242
|
selector->pending = 0;
|
|
222
243
|
selector->blocked = 0;
|
|
244
|
+
selector->interrupt.descriptor = -1;
|
|
245
|
+
selector->wakeup_registered = 0;
|
|
223
246
|
|
|
224
247
|
IO_Event_List_initialize(&selector->free_list);
|
|
225
248
|
|
|
226
249
|
selector->completions.element_initialize = IO_Event_Selector_URing_Completion_initialize;
|
|
227
250
|
selector->completions.element_free = IO_Event_Selector_URing_Completion_free;
|
|
228
|
-
|
|
229
|
-
if (result < 0) {
|
|
230
|
-
rb_sys_fail("IO_Event_Selector_URing_allocate:IO_Event_Array_initialize");
|
|
231
|
-
}
|
|
251
|
+
IO_Event_Array_initialize(&selector->completions, IO_EVENT_ARRAY_DEFAULT_COUNT, sizeof(struct IO_Event_Selector_URing_Completion));
|
|
232
252
|
|
|
233
253
|
return instance;
|
|
234
254
|
}
|
|
@@ -240,7 +260,42 @@ VALUE IO_Event_Selector_URing_initialize(VALUE self, VALUE loop) {
|
|
|
240
260
|
TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
|
|
241
261
|
|
|
242
262
|
IO_Event_Selector_initialize(&selector->backend, self, loop);
|
|
243
|
-
|
|
263
|
+
|
|
264
|
+
unsigned int flags = 0;
|
|
265
|
+
// IORING_SETUP_SINGLE_ISSUER (kernel 6.0+): only the owner thread submits SQEs.
|
|
266
|
+
// Safe here because wakeup() uses eventfd (no ring access from other threads).
|
|
267
|
+
#ifdef IORING_SETUP_SINGLE_ISSUER
|
|
268
|
+
flags |= IORING_SETUP_SINGLE_ISSUER;
|
|
269
|
+
#endif
|
|
270
|
+
// IORING_SETUP_DEFER_TASKRUN (kernel 6.1+, requires SINGLE_ISSUER): defer io_uring
|
|
271
|
+
// task work to the application thread rather than a kernel thread, reducing
|
|
272
|
+
// cross-CPU signaling overhead.
|
|
273
|
+
#ifdef IORING_SETUP_DEFER_TASKRUN
|
|
274
|
+
flags |= IORING_SETUP_DEFER_TASKRUN;
|
|
275
|
+
#endif
|
|
276
|
+
// IORING_SETUP_TASKRUN_FLAG (kernel 5.19+, always available alongside
|
|
277
|
+
// DEFER_TASKRUN): the kernel surfaces IORING_SQ_TASKRUN in sq.flags whenever
|
|
278
|
+
// task work is pending, so select() can skip the io_uring_get_events()
|
|
279
|
+
// syscall when there is nothing deferred to flush.
|
|
280
|
+
#ifdef IORING_SETUP_TASKRUN_FLAG
|
|
281
|
+
flags |= IORING_SETUP_TASKRUN_FLAG;
|
|
282
|
+
#endif
|
|
283
|
+
// IORING_SETUP_SUBMIT_ALL (kernel 5.18+): keep processing the rest of the SQE
|
|
284
|
+
// batch even when one fails, reducing the frequency of short submits.
|
|
285
|
+
#ifdef IORING_SETUP_SUBMIT_ALL
|
|
286
|
+
flags |= IORING_SETUP_SUBMIT_ALL;
|
|
287
|
+
#endif
|
|
288
|
+
|
|
289
|
+
int result = io_uring_queue_init(URING_ENTRIES, &selector->ring, flags);
|
|
290
|
+
|
|
291
|
+
#ifdef IORING_SETUP_SUBMIT_ALL
|
|
292
|
+
if (result == -EINVAL) {
|
|
293
|
+
// IORING_SETUP_SUBMIT_ALL was added in Linux 5.18; retry without it.
|
|
294
|
+
if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_initialize: no IORING_SETUP_SUBMIT_ALL\n");
|
|
295
|
+
flags &= ~IORING_SETUP_SUBMIT_ALL;
|
|
296
|
+
result = io_uring_queue_init(URING_ENTRIES, &selector->ring, flags);
|
|
297
|
+
}
|
|
298
|
+
#endif
|
|
244
299
|
|
|
245
300
|
if (result < 0) {
|
|
246
301
|
rb_syserr_fail(-result, "IO_Event_Selector_URing_initialize:io_uring_queue_init");
|
|
@@ -248,6 +303,16 @@ VALUE IO_Event_Selector_URing_initialize(VALUE self, VALUE loop) {
|
|
|
248
303
|
|
|
249
304
|
rb_update_max_fd(selector->ring.ring_fd);
|
|
250
305
|
|
|
306
|
+
// Interrupt for cross-thread wakeup: another thread calls signal(); the owner
|
|
307
|
+
// thread submits an async read before each blocking wait so the ring wakes up
|
|
308
|
+
// without the waking thread ever touching the SQ.
|
|
309
|
+
IO_Event_Interrupt_open(&selector->interrupt);
|
|
310
|
+
if (selector->interrupt.descriptor < 0) {
|
|
311
|
+
io_uring_queue_exit(&selector->ring);
|
|
312
|
+
selector->ring.ring_fd = -1;
|
|
313
|
+
rb_sys_fail("IO_Event_Selector_URing_initialize:IO_Event_Interrupt_open");
|
|
314
|
+
}
|
|
315
|
+
|
|
251
316
|
return self;
|
|
252
317
|
}
|
|
253
318
|
|
|
@@ -353,59 +418,46 @@ void IO_Event_Selector_URing_dump_completion_queue(struct IO_Event_Selector_URin
|
|
|
353
418
|
}
|
|
354
419
|
}
|
|
355
420
|
|
|
356
|
-
// Flush the submission queue
|
|
421
|
+
// Flush the submission queue, optionally yielding if unsuccessful.
|
|
357
422
|
static
|
|
358
|
-
int
|
|
359
|
-
|
|
360
|
-
if (DEBUG) fprintf(stderr, "io_uring_submit_flush(pending=%ld)\n", selector->pending);
|
|
361
|
-
|
|
362
|
-
// Try to submit:
|
|
423
|
+
int io_uring_submit_all(struct IO_Event_Selector_URing *selector, bool yield) {
|
|
424
|
+
while (selector->pending > 0) {
|
|
363
425
|
int result = io_uring_submit(&selector->ring);
|
|
364
426
|
|
|
365
427
|
if (result >= 0) {
|
|
366
|
-
//
|
|
367
|
-
selector->pending
|
|
368
|
-
} else if (result
|
|
369
|
-
|
|
428
|
+
// io_uring_submit() returns the number of submitted SQEs
|
|
429
|
+
selector->pending -= result;
|
|
430
|
+
} else if (result == -EBUSY || result == -EAGAIN) {
|
|
431
|
+
if (yield) IO_Event_Selector_yield(&selector->backend);
|
|
432
|
+
} else {
|
|
433
|
+
rb_syserr_fail(-result, "io_uring_submit_all:io_uring_submit");
|
|
434
|
+
return result;
|
|
370
435
|
}
|
|
371
|
-
|
|
372
|
-
return result;
|
|
373
436
|
}
|
|
374
|
-
|
|
375
|
-
if (DEBUG)
|
|
376
|
-
IO_Event_Selector_URing_dump_completion_queue(selector);
|
|
377
|
-
}
|
|
378
|
-
|
|
437
|
+
|
|
438
|
+
if (DEBUG) IO_Event_Selector_URing_dump_completion_queue(selector);
|
|
379
439
|
return 0;
|
|
380
440
|
}
|
|
381
441
|
|
|
442
|
+
// Flush the submission queue if pending operations are present.
|
|
443
|
+
static
|
|
444
|
+
int io_uring_submit_flush(struct IO_Event_Selector_URing *selector) {
|
|
445
|
+
if (DEBUG) fprintf(stderr, "io_uring_submit_now(pending=%ld)\n", selector->pending);
|
|
446
|
+
|
|
447
|
+
return io_uring_submit_all(selector, false);
|
|
448
|
+
}
|
|
449
|
+
|
|
382
450
|
// Immediately flush the submission queue, yielding to the event loop if it was not successful.
|
|
383
451
|
static
|
|
384
452
|
int io_uring_submit_now(struct IO_Event_Selector_URing *selector) {
|
|
385
453
|
if (DEBUG) fprintf(stderr, "io_uring_submit_now(pending=%ld)\n", selector->pending);
|
|
386
454
|
|
|
387
|
-
|
|
388
|
-
int result = io_uring_submit(&selector->ring);
|
|
389
|
-
|
|
390
|
-
if (result >= 0) {
|
|
391
|
-
selector->pending = 0;
|
|
392
|
-
if (DEBUG) IO_Event_Selector_URing_dump_completion_queue(selector);
|
|
393
|
-
return result;
|
|
394
|
-
}
|
|
395
|
-
|
|
396
|
-
if (result == -EBUSY || result == -EAGAIN) {
|
|
397
|
-
IO_Event_Selector_yield(&selector->backend);
|
|
398
|
-
} else {
|
|
399
|
-
rb_syserr_fail(-result, "io_uring_submit_now:io_uring_submit");
|
|
400
|
-
}
|
|
401
|
-
}
|
|
455
|
+
return io_uring_submit_all(selector, true);
|
|
402
456
|
}
|
|
403
457
|
|
|
404
458
|
// Submit a pending operation. This does not submit the operation immediately, but instead defers it to the next call to `io_uring_submit_flush` or `io_uring_submit_now`. This is useful for operations that are not urgent, but should be used with care as it can lead to a deadlock if the submission queue is not flushed.
|
|
405
459
|
static
|
|
406
460
|
void io_uring_submit_pending(struct IO_Event_Selector_URing *selector) {
|
|
407
|
-
selector->pending += 1;
|
|
408
|
-
|
|
409
461
|
if (DEBUG) fprintf(stderr, "io_uring_submit_pending(ring=%p, pending=%ld)\n", &selector->ring, selector->pending);
|
|
410
462
|
}
|
|
411
463
|
|
|
@@ -418,7 +470,8 @@ struct io_uring_sqe * io_get_sqe(struct IO_Event_Selector_URing *selector) {
|
|
|
418
470
|
|
|
419
471
|
sqe = io_uring_get_sqe(&selector->ring);
|
|
420
472
|
}
|
|
421
|
-
|
|
473
|
+
|
|
474
|
+
selector->pending += 1;
|
|
422
475
|
return sqe;
|
|
423
476
|
}
|
|
424
477
|
|
|
@@ -997,12 +1050,13 @@ VALUE IO_Event_Selector_URing_io_pwrite(VALUE self, VALUE fiber, VALUE io, VALUE
|
|
|
997
1050
|
|
|
998
1051
|
static const int ASYNC_CLOSE = 1;
|
|
999
1052
|
|
|
1000
|
-
VALUE IO_Event_Selector_URing_io_close(VALUE self, VALUE
|
|
1053
|
+
VALUE IO_Event_Selector_URing_io_close(VALUE self, VALUE _descriptor) {
|
|
1001
1054
|
struct IO_Event_Selector_URing *selector = NULL;
|
|
1002
1055
|
TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
|
|
1003
1056
|
|
|
1004
|
-
|
|
1005
|
-
|
|
1057
|
+
// Ruby's fiber scheduler `io_close` hook is invoked with a raw integer file descriptor (Ruby 4.0+); it does not pass the `IO` object.
|
|
1058
|
+
int descriptor = RB_NUM2INT(_descriptor);
|
|
1059
|
+
|
|
1006
1060
|
if (ASYNC_CLOSE) {
|
|
1007
1061
|
struct io_uring_sqe *sqe = io_get_sqe(selector);
|
|
1008
1062
|
io_uring_prep_close(sqe, descriptor);
|
|
@@ -1015,8 +1069,8 @@ VALUE IO_Event_Selector_URing_io_close(VALUE self, VALUE io) {
|
|
|
1015
1069
|
} else {
|
|
1016
1070
|
close(descriptor);
|
|
1017
1071
|
}
|
|
1018
|
-
|
|
1019
|
-
// We don't wait for the result of close since it has no use in
|
|
1072
|
+
|
|
1073
|
+
// We don't wait for the result of close since it has no use in practice:
|
|
1020
1074
|
return Qtrue;
|
|
1021
1075
|
}
|
|
1022
1076
|
|
|
@@ -1071,11 +1125,24 @@ void * select_internal(void *_arguments) {
|
|
|
1071
1125
|
|
|
1072
1126
|
static
|
|
1073
1127
|
int select_internal_without_gvl(struct select_arguments *arguments) {
|
|
1074
|
-
|
|
1128
|
+
struct IO_Event_Selector_URing *selector = arguments->selector;
|
|
1129
|
+
|
|
1130
|
+
// Submit an async read on the wakeup eventfd before releasing the GVL.
|
|
1131
|
+
// When wakeup() writes to the fd the read completes, consuming the counter
|
|
1132
|
+
// atomically — no separate poll + drain step required.
|
|
1133
|
+
// The address of the interrupt struct serves as a unique sentinel in user_data.
|
|
1134
|
+
if (!selector->wakeup_registered) {
|
|
1135
|
+
struct io_uring_sqe *sqe = io_get_sqe(selector);
|
|
1136
|
+
io_uring_prep_read(sqe, IO_Event_Interrupt_descriptor(&selector->interrupt), &selector->wakeup_value, sizeof(selector->wakeup_value), 0);
|
|
1137
|
+
io_uring_sqe_set_data(sqe, &selector->interrupt);
|
|
1138
|
+
selector->wakeup_registered = 1;
|
|
1139
|
+
}
|
|
1075
1140
|
|
|
1076
|
-
|
|
1141
|
+
io_uring_submit_flush(selector);
|
|
1142
|
+
|
|
1143
|
+
selector->blocked = 1;
|
|
1077
1144
|
rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
|
|
1078
|
-
|
|
1145
|
+
selector->blocked = 0;
|
|
1079
1146
|
|
|
1080
1147
|
if (arguments->result == -ETIME) {
|
|
1081
1148
|
arguments->result = 0;
|
|
@@ -1114,6 +1181,14 @@ unsigned select_process_completions(struct IO_Event_Selector_URing *selector) {
|
|
|
1114
1181
|
continue;
|
|
1115
1182
|
}
|
|
1116
1183
|
|
|
1184
|
+
// Interrupt read completion — the read already consumed the counter.
|
|
1185
|
+
// Clear the flag so the next blocking wait re-submits the read.
|
|
1186
|
+
if (io_uring_cqe_get_data(cqe) == &selector->interrupt) {
|
|
1187
|
+
selector->wakeup_registered = 0;
|
|
1188
|
+
io_uring_cq_advance(ring, 1);
|
|
1189
|
+
continue;
|
|
1190
|
+
}
|
|
1191
|
+
|
|
1117
1192
|
struct IO_Event_Selector_URing_Completion *completion = (void*)cqe->user_data;
|
|
1118
1193
|
struct IO_Event_Selector_URing_Waiting *waiting = completion->waiting;
|
|
1119
1194
|
|
|
@@ -1156,6 +1231,25 @@ VALUE IO_Event_Selector_URing_select(VALUE self, VALUE duration) {
|
|
|
1156
1231
|
// Flush any pending events:
|
|
1157
1232
|
io_uring_submit_flush(selector);
|
|
1158
1233
|
|
|
1234
|
+
#ifdef IORING_SETUP_DEFER_TASKRUN
|
|
1235
|
+
// With DEFER_TASKRUN the kernel holds completions as "deferred task work"
|
|
1236
|
+
// rather than placing them directly into the CQ. We need to flush that work
|
|
1237
|
+
// into the CQ so the non-blocking select_process_completions below can see
|
|
1238
|
+
// it. With TASKRUN_FLAG enabled the kernel sets IORING_SQ_TASKRUN in
|
|
1239
|
+
// sq.flags whenever task work is pending; a relaxed atomic load is enough
|
|
1240
|
+
// to check, and we only pay for an io_uring_enter syscall (via
|
|
1241
|
+
// io_uring_get_events) when there is actually deferred work to flush.
|
|
1242
|
+
if (selector->ring.flags & IORING_SETUP_DEFER_TASKRUN) {
|
|
1243
|
+
#ifdef IORING_SETUP_TASKRUN_FLAG
|
|
1244
|
+
unsigned sq_flags = __atomic_load_n(selector->ring.sq.kflags, __ATOMIC_RELAXED);
|
|
1245
|
+
if (sq_flags & IORING_SQ_TASKRUN)
|
|
1246
|
+
#endif
|
|
1247
|
+
{
|
|
1248
|
+
io_uring_get_events(&selector->ring);
|
|
1249
|
+
}
|
|
1250
|
+
}
|
|
1251
|
+
#endif
|
|
1252
|
+
|
|
1159
1253
|
int ready = IO_Event_Selector_ready_flush(&selector->backend);
|
|
1160
1254
|
|
|
1161
1255
|
int result = select_process_completions(selector);
|
|
@@ -1199,25 +1293,10 @@ VALUE IO_Event_Selector_URing_wakeup(VALUE self) {
|
|
|
1199
1293
|
struct IO_Event_Selector_URing *selector = NULL;
|
|
1200
1294
|
TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
|
|
1201
1295
|
|
|
1202
|
-
//
|
|
1296
|
+
// Wake the selector by signalling the interrupt. This is safe from any thread
|
|
1297
|
+
// and never touches the ring's SQ, which is required for IORING_SETUP_SINGLE_ISSUER.
|
|
1203
1298
|
if (selector->blocked) {
|
|
1204
|
-
|
|
1205
|
-
|
|
1206
|
-
while (true) {
|
|
1207
|
-
sqe = io_uring_get_sqe(&selector->ring);
|
|
1208
|
-
if (sqe) break;
|
|
1209
|
-
|
|
1210
|
-
rb_thread_schedule();
|
|
1211
|
-
|
|
1212
|
-
// It's possible we became unblocked already, so we can assume the selector has already cycled at least once:
|
|
1213
|
-
if (!selector->blocked) return Qfalse;
|
|
1214
|
-
}
|
|
1215
|
-
|
|
1216
|
-
io_uring_prep_nop(sqe);
|
|
1217
|
-
// If you don't set this line, the SQE will eventually be recycled and have valid user selector which can cause odd behaviour:
|
|
1218
|
-
io_uring_sqe_set_data(sqe, NULL);
|
|
1219
|
-
io_uring_submit(&selector->ring);
|
|
1220
|
-
|
|
1299
|
+
IO_Event_Interrupt_signal(&selector->interrupt);
|
|
1221
1300
|
return Qtrue;
|
|
1222
1301
|
}
|
|
1223
1302
|
|
|
@@ -1228,7 +1307,28 @@ VALUE IO_Event_Selector_URing_wakeup(VALUE self) {
|
|
|
1228
1307
|
|
|
1229
1308
|
static int IO_Event_Selector_URing_supported_p(void) {
|
|
1230
1309
|
struct io_uring ring;
|
|
1231
|
-
|
|
1310
|
+
|
|
1311
|
+
unsigned int flags = 0;
|
|
1312
|
+
#ifdef IORING_SETUP_SINGLE_ISSUER
|
|
1313
|
+
flags |= IORING_SETUP_SINGLE_ISSUER;
|
|
1314
|
+
#endif
|
|
1315
|
+
#ifdef IORING_SETUP_DEFER_TASKRUN
|
|
1316
|
+
flags |= IORING_SETUP_DEFER_TASKRUN;
|
|
1317
|
+
#endif
|
|
1318
|
+
#ifdef IORING_SETUP_TASKRUN_FLAG
|
|
1319
|
+
flags |= IORING_SETUP_TASKRUN_FLAG;
|
|
1320
|
+
#endif
|
|
1321
|
+
#ifdef IORING_SETUP_SUBMIT_ALL
|
|
1322
|
+
flags |= IORING_SETUP_SUBMIT_ALL;
|
|
1323
|
+
#endif
|
|
1324
|
+
int result = io_uring_queue_init(32, &ring, flags);
|
|
1325
|
+
|
|
1326
|
+
#ifdef IORING_SETUP_SUBMIT_ALL
|
|
1327
|
+
if (result == -EINVAL) {
|
|
1328
|
+
flags &= ~IORING_SETUP_SUBMIT_ALL;
|
|
1329
|
+
result = io_uring_queue_init(32, &ring, flags);
|
|
1330
|
+
}
|
|
1331
|
+
#endif
|
|
1232
1332
|
|
|
1233
1333
|
if (result < 0) {
|
|
1234
1334
|
rb_warn("io_uring_queue_init() was available at compile time but failed at run time: %s\n", strerror(-result));
|
data/ext/io/event/worker_pool.c
CHANGED
|
@@ -91,11 +91,20 @@ static void worker_pool_mark(void *ptr)
|
|
|
91
91
|
struct IO_Event_WorkerPool *pool = (struct IO_Event_WorkerPool *)ptr;
|
|
92
92
|
struct IO_Event_WorkerPool_Worker *worker = pool->workers;
|
|
93
93
|
while (worker) {
|
|
94
|
-
struct IO_Event_WorkerPool_Worker *next = worker->next;
|
|
95
94
|
// We need to mark the thread even though its marked through the VM's ractors because we call `join`
|
|
96
95
|
// on them after their completion. They could be freed by then.
|
|
97
|
-
|
|
98
|
-
worker = next;
|
|
96
|
+
rb_gc_mark_movable(worker->thread);
|
|
97
|
+
worker = worker->next;
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
static void worker_pool_compact(void *ptr)
|
|
102
|
+
{
|
|
103
|
+
struct IO_Event_WorkerPool *pool = (struct IO_Event_WorkerPool *)ptr;
|
|
104
|
+
struct IO_Event_WorkerPool_Worker *worker = pool->workers;
|
|
105
|
+
while (worker) {
|
|
106
|
+
worker->thread = rb_gc_location(worker->thread);
|
|
107
|
+
worker = worker->next;
|
|
99
108
|
}
|
|
100
109
|
}
|
|
101
110
|
|
|
@@ -107,8 +116,8 @@ static size_t worker_pool_size(const void *ptr) {
|
|
|
107
116
|
// Ruby TypedData structures
|
|
108
117
|
static const rb_data_type_t IO_Event_WorkerPool_type = {
|
|
109
118
|
"IO::Event::WorkerPool",
|
|
110
|
-
{worker_pool_mark, worker_pool_free, worker_pool_size,},
|
|
111
|
-
0, 0, RUBY_TYPED_FREE_IMMEDIATELY
|
|
119
|
+
{worker_pool_mark, worker_pool_free, worker_pool_size, worker_pool_compact},
|
|
120
|
+
0, 0, RUBY_TYPED_FREE_IMMEDIATELY | RUBY_TYPED_WB_PROTECTED
|
|
112
121
|
};
|
|
113
122
|
|
|
114
123
|
// Helper function to enqueue work (must be called with mutex held)
|
|
@@ -10,6 +10,17 @@ module IO::Event
|
|
|
10
10
|
#
|
|
11
11
|
# You can enable this in the default selector by setting the `IO_EVENT_DEBUG_SELECTOR` environment variable. In addition, you can log all selector operations to a file by setting the `IO_EVENT_DEBUG_SELECTOR_LOG` environment variable. This is useful for debugging and understanding the behavior of the event loop.
|
|
12
12
|
class Selector
|
|
13
|
+
# Forwarders for optional selector hooks that not every backing selector implements (e.g. `io_close` is only provided by `URing`). Each method here is mixed into the wrapper's singleton class only when the wrapped selector actually defines a method of the same name, so feature detection via `respond_to?` continues to reflect the real backend.
|
|
14
|
+
module Forwarders
|
|
15
|
+
# Close a file descriptor, forwarded to the underlying selector. Ruby invokes this hook with a raw integer descriptor (Ruby 4.0+).
|
|
16
|
+
#
|
|
17
|
+
# @parameter descriptor [Integer] The raw file descriptor being closed.
|
|
18
|
+
def io_close(descriptor)
|
|
19
|
+
log("Closing file descriptor #{descriptor}")
|
|
20
|
+
@selector.io_close(descriptor)
|
|
21
|
+
end
|
|
22
|
+
end
|
|
23
|
+
|
|
13
24
|
# Wrap the given selector with debugging.
|
|
14
25
|
#
|
|
15
26
|
# @parameter selector [Selector] The selector to wrap.
|
|
@@ -40,6 +51,20 @@ module IO::Event
|
|
|
40
51
|
end
|
|
41
52
|
|
|
42
53
|
@log = log
|
|
54
|
+
|
|
55
|
+
install_optional_forwarders(selector)
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
private def install_optional_forwarders(selector)
|
|
59
|
+
forwarders = nil
|
|
60
|
+
|
|
61
|
+
Forwarders.instance_methods(false).each do |name|
|
|
62
|
+
next unless selector.class.method_defined?(name)
|
|
63
|
+
forwarders ||= Module.new
|
|
64
|
+
forwarders.define_method(name, Forwarders.instance_method(name))
|
|
65
|
+
end
|
|
66
|
+
|
|
67
|
+
singleton_class.include(forwarders) if forwarders
|
|
43
68
|
end
|
|
44
69
|
|
|
45
70
|
# The idle duration of the underlying selector.
|
data/lib/io/event/version.rb
CHANGED
data/license.md
CHANGED
|
@@ -17,6 +17,8 @@ Copyright, 2026, by William T. Nelson.
|
|
|
17
17
|
Copyright, 2026, by Stan Hu.
|
|
18
18
|
Copyright, 2026, by John Hawthorn.
|
|
19
19
|
Copyright, 2026, by Italo Brandão.
|
|
20
|
+
Copyright, 2026, by Fletcher Dares.
|
|
21
|
+
Copyright, 2026, by Tavian Barnes.
|
|
20
22
|
|
|
21
23
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
22
24
|
of this software and associated documentation files (the "Software"), to deal
|
data/readme.md
CHANGED
|
@@ -18,6 +18,20 @@ Please see the [project documentation](https://socketry.github.io/io-event/) for
|
|
|
18
18
|
|
|
19
19
|
Please see the [project releases](https://socketry.github.io/io-event/releases/index) for all releases.
|
|
20
20
|
|
|
21
|
+
### v1.16.0
|
|
22
|
+
|
|
23
|
+
- Use `eventfd` for `URing` cross-thread wakeup, and enable `IORING_SETUP_SINGLE_ISSUER`, `IORING_SETUP_DEFER_TASKRUN`, and `IORING_SETUP_TASKRUN_FLAG`. The waking thread now signals via `eventfd` rather than submitting a `NOP` SQE, which unlocks the single-issuer optimisation, defers task work to the application thread, and lets `select()` skip the `io_uring_get_events()` syscall when no task work is pending.
|
|
24
|
+
- Add support for the `io_close` fiber-scheduler hook (Ruby 4.0+). The `URing` selector performs the close asynchronously via the ring; the `Debug::Selector` and `TestScheduler` wrappers forward to the underlying selector when supported.
|
|
25
|
+
- Improve `WorkerPool` GC compaction support and add proper write barriers, fixing potential use-after-free under compacting GC.
|
|
26
|
+
- Keep blocked scheduler fibers alive during GC by registering them as roots in `TestScheduler#block`, preventing premature collection and the resulting use-after-free crash on resume.
|
|
27
|
+
- Use Ruby's `xmalloc` / `xcalloc` / `xrealloc2` / `xfree` for all internal selector allocations (the per-fiber ready-queue entries in `IO_Event_Selector_ready_push`, and both the backing array and per-element allocations in `IO_Event_Array`). Previously a raw `malloc` paired with a debug-build-only `assert(...)` would silently dereference `NULL` and crash in release builds under memory pressure; the Ruby allocators trigger a GC sweep on pressure and raise `NoMemoryError` / `RangeError` on real failure, so the `-1` return-code paths through `IO_Event_Array_initialize` / `_resize` / `_lookup` and their callers in `epoll.c` / `kqueue.c` / `uring.c` are removed in favour of straight exception propagation.
|
|
28
|
+
- Correctly handle short `io_uring_submit()` results in the `URing` selector. `io_uring_submit()` returns the number of SQEs actually accepted by the kernel and can be short (SQE prep errors, `ENOMEM`, transient `EAGAIN`); the old accounting reset `pending = 0` on any success and silently lost track of unsubmitted SQEs.
|
|
29
|
+
- Enable `IORING_SETUP_SUBMIT_ALL` (kernel 5.18+) on the `URing` selector so the kernel keeps processing the rest of an SQE batch past individual errors, reducing the frequency of short submits in practice.
|
|
30
|
+
|
|
31
|
+
### v1.15.1
|
|
32
|
+
|
|
33
|
+
- Simplify closed-IO handling in the `Select` selector: rely on Ruby 4's `rb_thread_io_close_interrupt` to wake fibers waiting on a descriptor that's been closed, removing a custom error-recovery path that could mis-attribute `IOError` / `Errno::EBADF` to the wrong waiter.
|
|
34
|
+
|
|
21
35
|
### v1.15.0
|
|
22
36
|
|
|
23
37
|
- Add bounds checks, in the unlikely event of a user providing an invalid offset that exceeds the buffer size. This prevents potential memory corruption and ensures safe operation when using buffered IO methods.
|
|
@@ -50,15 +64,6 @@ Please see the [project releases](https://socketry.github.io/io-event/releases/i
|
|
|
50
64
|
|
|
51
65
|
- Improved consistency of handling closed IO when invoking `#select`.
|
|
52
66
|
|
|
53
|
-
### v1.10.0
|
|
54
|
-
|
|
55
|
-
- `IO::Event::Profiler` is moved to dedicated gem: [fiber-profiler](https://github.com/socketry/fiber-profiler).
|
|
56
|
-
- Perform runtime checks for native selectors to ensure they are supported in the current environment. While compile-time checks determine availability, restrictions like seccomp and SELinux may still prevent them from working.
|
|
57
|
-
|
|
58
|
-
### v1.9.0
|
|
59
|
-
|
|
60
|
-
- Improved `IO::Event::Profiler` for detecting stalls.
|
|
61
|
-
|
|
62
67
|
## Contributing
|
|
63
68
|
|
|
64
69
|
We welcome contributions to this project.
|
data/releases.md
CHANGED
|
@@ -1,5 +1,19 @@
|
|
|
1
1
|
# Releases
|
|
2
2
|
|
|
3
|
+
## v1.16.0
|
|
4
|
+
|
|
5
|
+
- Use `eventfd` for `URing` cross-thread wakeup, and enable `IORING_SETUP_SINGLE_ISSUER`, `IORING_SETUP_DEFER_TASKRUN`, and `IORING_SETUP_TASKRUN_FLAG`. The waking thread now signals via `eventfd` rather than submitting a `NOP` SQE, which unlocks the single-issuer optimisation, defers task work to the application thread, and lets `select()` skip the `io_uring_get_events()` syscall when no task work is pending.
|
|
6
|
+
- Add support for the `io_close` fiber-scheduler hook (Ruby 4.0+). The `URing` selector performs the close asynchronously via the ring; the `Debug::Selector` and `TestScheduler` wrappers forward to the underlying selector when supported.
|
|
7
|
+
- Improve `WorkerPool` GC compaction support and add proper write barriers, fixing potential use-after-free under compacting GC.
|
|
8
|
+
- Keep blocked scheduler fibers alive during GC by registering them as roots in `TestScheduler#block`, preventing premature collection and the resulting use-after-free crash on resume.
|
|
9
|
+
- Use Ruby's `xmalloc` / `xcalloc` / `xrealloc2` / `xfree` for all internal selector allocations (the per-fiber ready-queue entries in `IO_Event_Selector_ready_push`, and both the backing array and per-element allocations in `IO_Event_Array`). Previously a raw `malloc` paired with a debug-build-only `assert(...)` would silently dereference `NULL` and crash in release builds under memory pressure; the Ruby allocators trigger a GC sweep on pressure and raise `NoMemoryError` / `RangeError` on real failure, so the `-1` return-code paths through `IO_Event_Array_initialize` / `_resize` / `_lookup` and their callers in `epoll.c` / `kqueue.c` / `uring.c` are removed in favour of straight exception propagation.
|
|
10
|
+
- Correctly handle short `io_uring_submit()` results in the `URing` selector. `io_uring_submit()` returns the number of SQEs actually accepted by the kernel and can be short (SQE prep errors, `ENOMEM`, transient `EAGAIN`); the old accounting reset `pending = 0` on any success and silently lost track of unsubmitted SQEs.
|
|
11
|
+
- Enable `IORING_SETUP_SUBMIT_ALL` (kernel 5.18+) on the `URing` selector so the kernel keeps processing the rest of an SQE batch past individual errors, reducing the frequency of short submits in practice.
|
|
12
|
+
|
|
13
|
+
## v1.15.1
|
|
14
|
+
|
|
15
|
+
- Simplify closed-IO handling in the `Select` selector: rely on Ruby 4's `rb_thread_io_close_interrupt` to wake fibers waiting on a descriptor that's been closed, removing a custom error-recovery path that could mis-attribute `IOError` / `Errno::EBADF` to the wrong waiter.
|
|
16
|
+
|
|
3
17
|
## v1.15.0
|
|
4
18
|
|
|
5
19
|
- Add bounds checks, in the unlikely event of a user providing an invalid offset that exceeds the buffer size. This prevents potential memory corruption and ensures safe operation when using buffered IO methods.
|
data.tar.gz.sig
CHANGED
|
Binary file
|
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: io-event
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 1.
|
|
4
|
+
version: 1.16.0
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Samuel Williams
|
|
@@ -11,9 +11,11 @@ authors:
|
|
|
11
11
|
- Benoit Daloze
|
|
12
12
|
- Bruno Sutic
|
|
13
13
|
- Shizuo Fujita
|
|
14
|
+
- Tavian Barnes
|
|
14
15
|
- Alex Matchneer
|
|
15
16
|
- Anthony Ross
|
|
16
17
|
- Delton Ding
|
|
18
|
+
- Fletcher Dares
|
|
17
19
|
- Italo Brandão
|
|
18
20
|
- John Hawthorn
|
|
19
21
|
- Luke Gruber
|
|
@@ -121,7 +123,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
|
121
123
|
- !ruby/object:Gem::Version
|
|
122
124
|
version: '0'
|
|
123
125
|
requirements: []
|
|
124
|
-
rubygems_version:
|
|
126
|
+
rubygems_version: 4.0.6
|
|
125
127
|
specification_version: 4
|
|
126
128
|
summary: An event loop.
|
|
127
129
|
test_files: []
|
metadata.gz.sig
CHANGED
|
Binary file
|