io-event 1.2.2 → 1.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -20,6 +20,8 @@
20
20
 
21
21
  #include "uring.h"
22
22
  #include "selector.h"
23
+ #include "list.h"
24
+ #include "array.h"
23
25
 
24
26
  #include <liburing.h>
25
27
  #include <poll.h>
@@ -32,7 +34,7 @@
32
34
 
33
35
  enum {
34
36
  DEBUG = 0,
35
- DEBUG_IO_READ = 0,
37
+ DEBUG_COMPLETION = 0,
36
38
  };
37
39
 
38
40
  static VALUE IO_Event_Selector_URing = Qnil;
@@ -41,45 +43,109 @@ enum {URING_ENTRIES = 64};
41
43
 
42
44
  #pragma mark - Data Type
43
45
 
44
- struct IO_Event_Selector_URing {
46
+ struct IO_Event_Selector_URing
47
+ {
45
48
  struct IO_Event_Selector backend;
46
49
  struct io_uring ring;
47
50
  size_t pending;
48
51
  int blocked;
52
+
53
+ struct IO_Event_Array completions;
54
+ struct IO_Event_List free_list;
49
55
  };
50
56
 
51
- void IO_Event_Selector_URing_Type_mark(void *_data)
57
+ struct IO_Event_Selector_URing_Completion;
58
+
59
+ struct IO_Event_Selector_URing_Waiting
52
60
  {
53
- struct IO_Event_Selector_URing *data = _data;
54
- IO_Event_Selector_mark(&data->backend);
61
+ struct IO_Event_Selector_URing_Completion *completion;
62
+
63
+ VALUE fiber;
64
+
65
+ // The result of the operation.
66
+ int32_t result;
67
+
68
+ // Any associated flags.
69
+ uint32_t flags;
70
+ };
71
+
72
+ struct IO_Event_Selector_URing_Completion
73
+ {
74
+ struct IO_Event_List list;
75
+
76
+ struct IO_Event_Selector_URing_Waiting *waiting;
77
+ };
78
+
79
+ static
80
+ void IO_Event_Selector_URing_Completion_mark(void *_completion)
81
+ {
82
+ struct IO_Event_Selector_URing_Completion *completion = _completion;
83
+
84
+ if (completion->waiting) {
85
+ rb_gc_mark_movable(completion->waiting->fiber);
86
+ }
87
+ }
88
+
89
+ void IO_Event_Selector_URing_Type_mark(void *_selector)
90
+ {
91
+ struct IO_Event_Selector_URing *selector = _selector;
92
+ IO_Event_Selector_mark(&selector->backend);
93
+ IO_Event_Array_each(&selector->completions, IO_Event_Selector_URing_Completion_mark);
55
94
  }
56
95
 
57
96
  static
58
- void close_internal(struct IO_Event_Selector_URing *data) {
59
- if (data->ring.ring_fd >= 0) {
60
- io_uring_queue_exit(&data->ring);
61
- data->ring.ring_fd = -1;
97
+ void IO_Event_Selector_URing_Completion_compact(void *_completion)
98
+ {
99
+ struct IO_Event_Selector_URing_Completion *completion = _completion;
100
+
101
+ if (completion->waiting) {
102
+ completion->waiting->fiber = rb_gc_location(completion->waiting->fiber);
103
+ }
104
+ }
105
+
106
+ void IO_Event_Selector_URing_Type_compact(void *_selector)
107
+ {
108
+ struct IO_Event_Selector_URing *selector = _selector;
109
+ IO_Event_Selector_compact(&selector->backend);
110
+ IO_Event_Array_each(&selector->completions, IO_Event_Selector_URing_Completion_compact);
111
+ }
112
+
113
+ static
114
+ void close_internal(struct IO_Event_Selector_URing *selector)
115
+ {
116
+ if (selector->ring.ring_fd >= 0) {
117
+ io_uring_queue_exit(&selector->ring);
118
+ selector->ring.ring_fd = -1;
62
119
  }
63
120
  }
64
121
 
65
- void IO_Event_Selector_URing_Type_free(void *_data)
122
+ static
123
+ void IO_Event_Selector_URing_Type_free(void *_selector)
66
124
  {
67
- struct IO_Event_Selector_URing *data = _data;
125
+ struct IO_Event_Selector_URing *selector = _selector;
68
126
 
69
- close_internal(data);
127
+ close_internal(selector);
70
128
 
71
- free(data);
129
+ IO_Event_Array_free(&selector->completions);
130
+
131
+ free(selector);
72
132
  }
73
133
 
74
- size_t IO_Event_Selector_URing_Type_size(const void *data)
134
+ static
135
+ size_t IO_Event_Selector_URing_Type_size(const void *_selector)
75
136
  {
76
- return sizeof(struct IO_Event_Selector_URing);
137
+ const struct IO_Event_Selector_URing *selector = _selector;
138
+
139
+ return sizeof(struct IO_Event_Selector_URing)
140
+ + IO_Event_Array_memory_size(&selector->completions)
141
+ ;
77
142
  }
78
143
 
79
144
  static const rb_data_type_t IO_Event_Selector_URing_Type = {
80
145
  .wrap_struct_name = "IO_Event::Backend::URing",
81
146
  .function = {
82
147
  .dmark = IO_Event_Selector_URing_Type_mark,
148
+ .dcompact = IO_Event_Selector_URing_Type_compact,
83
149
  .dfree = IO_Event_Selector_URing_Type_free,
84
150
  .dsize = IO_Event_Selector_URing_Type_size,
85
151
  },
@@ -87,15 +153,90 @@ static const rb_data_type_t IO_Event_Selector_URing_Type = {
87
153
  .flags = RUBY_TYPED_FREE_IMMEDIATELY,
88
154
  };
89
155
 
156
+ inline static
157
+ struct IO_Event_Selector_URing_Completion * IO_Event_Selector_URing_Completion_acquire(struct IO_Event_Selector_URing *selector, struct IO_Event_Selector_URing_Waiting *waiting)
158
+ {
159
+ struct IO_Event_Selector_URing_Completion *completion = NULL;
160
+
161
+ if (!IO_Event_List_empty(&selector->free_list)) {
162
+ completion = (struct IO_Event_Selector_URing_Completion*)selector->free_list.tail;
163
+ IO_Event_List_pop(&completion->list);
164
+ } else {
165
+ completion = IO_Event_Array_push(&selector->completions);
166
+ IO_Event_List_clear(&completion->list);
167
+ }
168
+
169
+ if (DEBUG_COMPLETION) fprintf(stderr, "IO_Event_Selector_URing_Completion_acquire(%p, limit=%ld)\n", (void*)completion, selector->completions.limit);
170
+
171
+ waiting->completion = completion;
172
+ completion->waiting = waiting;
173
+
174
+ return completion;
175
+ }
176
+
177
+ inline static
178
+ void IO_Event_Selector_URing_Completion_cancel(struct IO_Event_Selector_URing_Completion *completion)
179
+ {
180
+ if (DEBUG_COMPLETION) fprintf(stderr, "IO_Event_Selector_URing_Completion_cancel(%p)\n", (void*)completion);
181
+
182
+ if (completion->waiting) {
183
+ completion->waiting->completion = NULL;
184
+ completion->waiting = NULL;
185
+ }
186
+ }
187
+
188
+ inline static
189
+ void IO_Event_Selector_URing_Completion_release(struct IO_Event_Selector_URing *selector, struct IO_Event_Selector_URing_Completion *completion)
190
+ {
191
+ if (DEBUG_COMPLETION) fprintf(stderr, "IO_Event_Selector_URing_Completion_release(%p)\n", (void*)completion);
192
+
193
+ IO_Event_Selector_URing_Completion_cancel(completion);
194
+ IO_Event_List_prepend(&selector->free_list, &completion->list);
195
+ }
196
+
197
+ inline static
198
+ void IO_Event_Selector_URing_Waiting_cancel(struct IO_Event_Selector_URing_Waiting *waiting)
199
+ {
200
+ if (DEBUG_COMPLETION) fprintf(stderr, "IO_Event_Selector_URing_Waiting_cancel(%p, %p)\n", (void*)waiting, (void*)waiting->completion);
201
+
202
+ if (waiting->completion) {
203
+ waiting->completion->waiting = NULL;
204
+ waiting->completion = NULL;
205
+ }
206
+
207
+ waiting->fiber = 0;
208
+ }
209
+
210
+ struct IO_Event_List_Type IO_Event_Selector_URing_Completion_Type = {};
211
+
212
+ void IO_Event_Selector_URing_Completion_initialize(void *element)
213
+ {
214
+ struct IO_Event_Selector_URing_Completion *completion = element;
215
+ IO_Event_List_initialize(&completion->list);
216
+ completion->list.type = &IO_Event_Selector_URing_Completion_Type;
217
+ }
218
+
219
+ void IO_Event_Selector_URing_Completion_free(void *element)
220
+ {
221
+ struct IO_Event_Selector_URing_Completion *completion = element;
222
+ IO_Event_Selector_URing_Completion_cancel(completion);
223
+ }
224
+
90
225
  VALUE IO_Event_Selector_URing_allocate(VALUE self) {
91
- struct IO_Event_Selector_URing *data = NULL;
92
- VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
226
+ struct IO_Event_Selector_URing *selector = NULL;
227
+ VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
228
+
229
+ IO_Event_Selector_initialize(&selector->backend, Qnil);
230
+ selector->ring.ring_fd = -1;
93
231
 
94
- IO_Event_Selector_initialize(&data->backend, Qnil);
95
- data->ring.ring_fd = -1;
232
+ selector->pending = 0;
233
+ selector->blocked = 0;
96
234
 
97
- data->pending = 0;
98
- data->blocked = 0;
235
+ IO_Event_List_initialize(&selector->free_list);
236
+
237
+ selector->completions.element_initialize = IO_Event_Selector_URing_Completion_initialize;
238
+ selector->completions.element_free = IO_Event_Selector_URing_Completion_free;
239
+ IO_Event_Array_allocate(&selector->completions, 1024, sizeof(struct IO_Event_Selector_URing_Completion));
99
240
 
100
241
  return instance;
101
242
  }
@@ -103,100 +244,126 @@ VALUE IO_Event_Selector_URing_allocate(VALUE self) {
103
244
  #pragma mark - Methods
104
245
 
105
246
  VALUE IO_Event_Selector_URing_initialize(VALUE self, VALUE loop) {
106
- struct IO_Event_Selector_URing *data = NULL;
107
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
247
+ struct IO_Event_Selector_URing *selector = NULL;
248
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
108
249
 
109
- IO_Event_Selector_initialize(&data->backend, loop);
110
- int result = io_uring_queue_init(URING_ENTRIES, &data->ring, 0);
250
+ IO_Event_Selector_initialize(&selector->backend, loop);
251
+ int result = io_uring_queue_init(URING_ENTRIES, &selector->ring, 0);
111
252
 
112
253
  if (result < 0) {
113
254
  rb_syserr_fail(-result, "IO_Event_Selector_URing_initialize:io_uring_queue_init");
114
255
  }
115
256
 
116
- rb_update_max_fd(data->ring.ring_fd);
257
+ rb_update_max_fd(selector->ring.ring_fd);
117
258
 
118
259
  return self;
119
260
  }
120
261
 
121
262
  VALUE IO_Event_Selector_URing_loop(VALUE self) {
122
- struct IO_Event_Selector_URing *data = NULL;
123
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
263
+ struct IO_Event_Selector_URing *selector = NULL;
264
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
124
265
 
125
- return data->backend.loop;
266
+ return selector->backend.loop;
126
267
  }
127
268
 
128
269
  VALUE IO_Event_Selector_URing_close(VALUE self) {
129
- struct IO_Event_Selector_URing *data = NULL;
130
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
270
+ struct IO_Event_Selector_URing *selector = NULL;
271
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
131
272
 
132
- close_internal(data);
273
+ close_internal(selector);
133
274
 
134
275
  return Qnil;
135
276
  }
136
277
 
137
278
  VALUE IO_Event_Selector_URing_transfer(VALUE self)
138
279
  {
139
- struct IO_Event_Selector_URing *data = NULL;
140
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
280
+ struct IO_Event_Selector_URing *selector = NULL;
281
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
141
282
 
142
- return IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
283
+ return IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
143
284
  }
144
285
 
145
286
  VALUE IO_Event_Selector_URing_resume(int argc, VALUE *argv, VALUE self)
146
287
  {
147
- struct IO_Event_Selector_URing *data = NULL;
148
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
288
+ struct IO_Event_Selector_URing *selector = NULL;
289
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
149
290
 
150
- return IO_Event_Selector_resume(&data->backend, argc, argv);
291
+ return IO_Event_Selector_resume(&selector->backend, argc, argv);
151
292
  }
152
293
 
153
294
  VALUE IO_Event_Selector_URing_yield(VALUE self)
154
295
  {
155
- struct IO_Event_Selector_URing *data = NULL;
156
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
296
+ struct IO_Event_Selector_URing *selector = NULL;
297
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
157
298
 
158
- return IO_Event_Selector_yield(&data->backend);
299
+ return IO_Event_Selector_yield(&selector->backend);
159
300
  }
160
301
 
161
302
  VALUE IO_Event_Selector_URing_push(VALUE self, VALUE fiber)
162
303
  {
163
- struct IO_Event_Selector_URing *data = NULL;
164
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
304
+ struct IO_Event_Selector_URing *selector = NULL;
305
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
165
306
 
166
- IO_Event_Selector_queue_push(&data->backend, fiber);
307
+ IO_Event_Selector_queue_push(&selector->backend, fiber);
167
308
 
168
309
  return Qnil;
169
310
  }
170
311
 
171
312
  VALUE IO_Event_Selector_URing_raise(int argc, VALUE *argv, VALUE self)
172
313
  {
173
- struct IO_Event_Selector_URing *data = NULL;
174
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
314
+ struct IO_Event_Selector_URing *selector = NULL;
315
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
175
316
 
176
- return IO_Event_Selector_raise(&data->backend, argc, argv);
317
+ return IO_Event_Selector_raise(&selector->backend, argc, argv);
177
318
  }
178
319
 
179
320
  VALUE IO_Event_Selector_URing_ready_p(VALUE self) {
180
- struct IO_Event_Selector_URing *data = NULL;
181
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
321
+ struct IO_Event_Selector_URing *selector = NULL;
322
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
182
323
 
183
- return data->backend.ready ? Qtrue : Qfalse;
324
+ return selector->backend.ready ? Qtrue : Qfalse;
184
325
  }
185
326
 
186
327
  #pragma mark - Submission Queue
187
328
 
329
+ static
330
+ void IO_Event_Selector_URing_dump_completion_queue(struct IO_Event_Selector_URing *selector)
331
+ {
332
+ struct io_uring *ring = &selector->ring;
333
+ unsigned head;
334
+ struct io_uring_cqe *cqe;
335
+
336
+ if (DEBUG) {
337
+ int first = 1;
338
+ io_uring_for_each_cqe(ring, head, cqe) {
339
+ if (!first) {
340
+ fprintf(stderr, ", ");
341
+ }
342
+ else {
343
+ fprintf(stderr, "CQ: [");
344
+ first = 0;
345
+ }
346
+
347
+ fprintf(stderr, "%d:%p", (int)cqe->res, (void*)cqe->user_data);
348
+ }
349
+ if (!first) {
350
+ fprintf(stderr, "]\n");
351
+ }
352
+ }
353
+ }
354
+
188
355
  // Flush the submission queue if pending operations are present.
189
356
  static
190
- int io_uring_submit_flush(struct IO_Event_Selector_URing *data) {
191
- if (data->pending) {
192
- if (DEBUG) fprintf(stderr, "io_uring_submit_flush(pending=%ld)\n", data->pending);
357
+ int io_uring_submit_flush(struct IO_Event_Selector_URing *selector) {
358
+ if (selector->pending) {
359
+ if (DEBUG) fprintf(stderr, "io_uring_submit_flush(pending=%ld)\n", selector->pending);
193
360
 
194
361
  // Try to submit:
195
- int result = io_uring_submit(&data->ring);
362
+ int result = io_uring_submit(&selector->ring);
196
363
 
197
364
  if (result >= 0) {
198
365
  // If it was submitted, reset pending count:
199
- data->pending = 0;
366
+ selector->pending = 0;
200
367
  } else if (result != -EBUSY && result != -EAGAIN) {
201
368
  rb_syserr_fail(-result, "io_uring_submit_flush:io_uring_submit");
202
369
  }
@@ -204,24 +371,29 @@ int io_uring_submit_flush(struct IO_Event_Selector_URing *data) {
204
371
  return result;
205
372
  }
206
373
 
374
+ if (DEBUG) {
375
+ IO_Event_Selector_URing_dump_completion_queue(selector);
376
+ }
377
+
207
378
  return 0;
208
379
  }
209
380
 
210
381
  // Immediately flush the submission queue, yielding to the event loop if it was not successful.
211
382
  static
212
- int io_uring_submit_now(struct IO_Event_Selector_URing *data) {
213
- if (DEBUG && data->pending) fprintf(stderr, "io_uring_submit_now(pending=%ld)\n", data->pending);
214
-
383
+ int io_uring_submit_now(struct IO_Event_Selector_URing *selector) {
384
+ if (DEBUG) fprintf(stderr, "io_uring_submit_now(pending=%ld)\n", selector->pending);
385
+
215
386
  while (true) {
216
- int result = io_uring_submit(&data->ring);
387
+ int result = io_uring_submit(&selector->ring);
217
388
 
218
389
  if (result >= 0) {
219
- data->pending = 0;
390
+ selector->pending = 0;
391
+ if (DEBUG) IO_Event_Selector_URing_dump_completion_queue(selector);
220
392
  return result;
221
393
  }
222
394
 
223
395
  if (result == -EBUSY || result == -EAGAIN) {
224
- IO_Event_Selector_yield(&data->backend);
396
+ IO_Event_Selector_yield(&selector->backend);
225
397
  } else {
226
398
  rb_syserr_fail(-result, "io_uring_submit_now:io_uring_submit");
227
399
  }
@@ -230,20 +402,20 @@ int io_uring_submit_now(struct IO_Event_Selector_URing *data) {
230
402
 
231
403
  // Submit a pending operation. This does not submit the operation immediately, but instead defers it to the next call to `io_uring_submit_flush` or `io_uring_submit_now`. This is useful for operations that are not urgent, but should be used with care as it can lead to a deadlock if the submission queue is not flushed.
232
404
  static
233
- void io_uring_submit_pending(struct IO_Event_Selector_URing *data) {
234
- data->pending += 1;
405
+ void io_uring_submit_pending(struct IO_Event_Selector_URing *selector) {
406
+ selector->pending += 1;
235
407
 
236
- if (DEBUG) fprintf(stderr, "io_uring_submit_pending(ring=%p, pending=%ld)\n", &data->ring, data->pending);
408
+ if (DEBUG) fprintf(stderr, "io_uring_submit_pending(ring=%p, pending=%ld)\n", &selector->ring, selector->pending);
237
409
  }
238
410
 
239
- struct io_uring_sqe * io_get_sqe(struct IO_Event_Selector_URing *data) {
240
- struct io_uring_sqe *sqe = io_uring_get_sqe(&data->ring);
411
+ struct io_uring_sqe * io_get_sqe(struct IO_Event_Selector_URing *selector) {
412
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&selector->ring);
241
413
 
242
414
  while (sqe == NULL) {
243
415
  // The submit queue is full, we need to drain it:
244
- io_uring_submit_now(data);
416
+ io_uring_submit_now(selector);
245
417
 
246
- sqe = io_uring_get_sqe(&data->ring);
418
+ sqe = io_uring_get_sqe(&selector->ring);
247
419
  }
248
420
 
249
421
  return sqe;
@@ -252,7 +424,9 @@ struct io_uring_sqe * io_get_sqe(struct IO_Event_Selector_URing *data) {
252
424
  #pragma mark - Process.wait
253
425
 
254
426
  struct process_wait_arguments {
255
- struct IO_Event_Selector_URing *data;
427
+ struct IO_Event_Selector_URing *selector;
428
+ struct IO_Event_Selector_URing_Waiting *waiting;
429
+
256
430
  pid_t pid;
257
431
  int flags;
258
432
  int descriptor;
@@ -262,9 +436,13 @@ static
262
436
  VALUE process_wait_transfer(VALUE _arguments) {
263
437
  struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
264
438
 
265
- IO_Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
439
+ IO_Event_Selector_fiber_transfer(arguments->selector->backend.loop, 0, NULL);
266
440
 
267
- return IO_Event_Selector_process_status_wait(arguments->pid);
441
+ if (arguments->waiting->result) {
442
+ return IO_Event_Selector_process_status_wait(arguments->pid, arguments->flags);
443
+ } else {
444
+ return Qfalse;
445
+ }
268
446
  }
269
447
 
270
448
  static
@@ -273,29 +451,44 @@ VALUE process_wait_ensure(VALUE _arguments) {
273
451
 
274
452
  close(arguments->descriptor);
275
453
 
454
+ IO_Event_Selector_URing_Waiting_cancel(arguments->waiting);
455
+
276
456
  return Qnil;
277
457
  }
278
458
 
279
- VALUE IO_Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
280
- struct IO_Event_Selector_URing *data = NULL;
281
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
459
+ VALUE IO_Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE _pid, VALUE _flags) {
460
+ struct IO_Event_Selector_URing *selector = NULL;
461
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
282
462
 
283
- struct process_wait_arguments process_wait_arguments = {
284
- .data = data,
285
- .pid = NUM2PIDT(pid),
286
- .flags = NUM2INT(flags),
463
+ pid_t pid = NUM2PIDT(_pid);
464
+ int flags = NUM2INT(_flags);
465
+
466
+ int descriptor = pidfd_open(pid, 0);
467
+ if (descriptor < 0) {
468
+ rb_syserr_fail(errno, "IO_Event_Selector_URing_process_wait:pidfd_open");
469
+ }
470
+ rb_update_max_fd(descriptor);
471
+
472
+ struct IO_Event_Selector_URing_Waiting waiting = {
473
+ .fiber = fiber,
287
474
  };
288
475
 
289
- process_wait_arguments.descriptor = pidfd_open(process_wait_arguments.pid, 0);
290
- rb_update_max_fd(process_wait_arguments.descriptor);
476
+ struct IO_Event_Selector_URing_Completion *completion = IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
477
+
478
+ struct process_wait_arguments process_wait_arguments = {
479
+ .selector = selector,
480
+ .waiting = &waiting,
481
+ .pid = pid,
482
+ .flags = flags,
483
+ .descriptor = descriptor,
484
+ };
291
485
 
292
- struct io_uring_sqe *sqe = io_get_sqe(data);
293
-
294
486
  if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_process_wait:io_uring_prep_poll_add(%p)\n", (void*)fiber);
295
- io_uring_prep_poll_add(sqe, process_wait_arguments.descriptor, POLLIN|POLLHUP|POLLERR);
296
- io_uring_sqe_set_data(sqe, (void*)fiber);
297
- io_uring_submit_pending(data);
298
-
487
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
488
+ io_uring_prep_poll_add(sqe, descriptor, POLLIN|POLLHUP|POLLERR);
489
+ io_uring_sqe_set_data(sqe, completion);
490
+ io_uring_submit_pending(selector);
491
+
299
492
  return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
300
493
  }
301
494
 
@@ -328,70 +521,76 @@ int events_from_poll_flags(short flags) {
328
521
  }
329
522
 
330
523
  struct io_wait_arguments {
331
- struct IO_Event_Selector_URing *data;
332
- VALUE fiber;
524
+ struct IO_Event_Selector_URing *selector;
525
+ struct IO_Event_Selector_URing_Waiting *waiting;
333
526
  short flags;
334
527
  };
335
528
 
336
529
  static
337
- VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
530
+ VALUE io_wait_ensure(VALUE _arguments) {
338
531
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
339
- struct IO_Event_Selector_URing *data = arguments->data;
340
532
 
341
- struct io_uring_sqe *sqe = io_get_sqe(data);
533
+ // If the operation is still in progress, cancel it:
534
+ if (arguments->waiting->completion) {
535
+ if (DEBUG) fprintf(stderr, "io_wait_ensure:io_uring_prep_cancel(waiting=%p, completion=%p)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion);
536
+ struct io_uring_sqe *sqe = io_get_sqe(arguments->selector);
537
+ io_uring_prep_cancel(sqe, (void*)arguments->waiting->completion, 0);
538
+ io_uring_sqe_set_data(sqe, NULL);
539
+ io_uring_submit_now(arguments->selector);
540
+ }
342
541
 
343
- if (DEBUG) fprintf(stderr, "io_wait_rescue:io_uring_prep_poll_remove(%p)\n", (void*)arguments->fiber);
542
+ IO_Event_Selector_URing_Waiting_cancel(arguments->waiting);
344
543
 
345
- io_uring_prep_poll_remove(sqe, (uintptr_t)arguments->fiber);
346
- io_uring_sqe_set_data(sqe, NULL);
347
- io_uring_submit_now(data);
348
-
349
- rb_exc_raise(exception);
544
+ return Qnil;
350
545
  };
351
546
 
352
547
  static
353
548
  VALUE io_wait_transfer(VALUE _arguments) {
354
549
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
355
- struct IO_Event_Selector_URing *data = arguments->data;
356
-
357
- VALUE result = IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
358
- if (DEBUG) fprintf(stderr, "io_wait:IO_Event_Selector_fiber_transfer -> %d\n", RB_NUM2INT(result));
359
-
360
- if (!RTEST(result)) {
550
+ struct IO_Event_Selector_URing *selector = arguments->selector;
551
+
552
+ IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
553
+
554
+ if (DEBUG) fprintf(stderr, "io_wait_transfer:waiting=%p, result=%d\n", (void*)arguments->waiting, arguments->waiting->result);
555
+
556
+ if (arguments->waiting->result) {
557
+ // We explicitly filter the resulting events based on the requested events.
558
+ // In some cases, poll will report events we didn't ask for.
559
+ return RB_INT2NUM(events_from_poll_flags(arguments->waiting->result & arguments->flags));
560
+ } else {
361
561
  return Qfalse;
362
562
  }
363
-
364
- // We explicitly filter the resulting events based on the requested events.
365
- // In some cases, poll will report events we didn't ask for.
366
- short flags = arguments->flags & NUM2INT(result);
367
-
368
- return INT2NUM(events_from_poll_flags(flags));
369
563
  };
370
564
 
371
565
  VALUE IO_Event_Selector_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
372
- struct IO_Event_Selector_URing *data = NULL;
373
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
566
+ struct IO_Event_Selector_URing *selector = NULL;
567
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
374
568
 
375
569
  int descriptor = IO_Event_Selector_io_descriptor(io);
376
- struct io_uring_sqe *sqe = io_get_sqe(data);
377
570
 
378
571
  short flags = poll_flags_from_events(NUM2INT(events));
379
572
 
380
573
  if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_io_wait:io_uring_prep_poll_add(descriptor=%d, flags=%d, fiber=%p)\n", descriptor, flags, (void*)fiber);
381
574
 
382
- io_uring_prep_poll_add(sqe, descriptor, flags);
383
- io_uring_sqe_set_data(sqe, (void*)fiber);
575
+ struct IO_Event_Selector_URing_Waiting waiting = {
576
+ .fiber = fiber,
577
+ };
384
578
 
579
+ struct IO_Event_Selector_URing_Completion *completion = IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
580
+
581
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
582
+ io_uring_prep_poll_add(sqe, descriptor, flags);
583
+ io_uring_sqe_set_data(sqe, completion);
385
584
  // If we are going to wait, we assume that we are waiting for a while:
386
- io_uring_submit_pending(data);
585
+ io_uring_submit_pending(selector);
387
586
 
388
587
  struct io_wait_arguments io_wait_arguments = {
389
- .data = data,
390
- .fiber = fiber,
588
+ .selector = selector,
589
+ .waiting = &waiting,
391
590
  .flags = flags
392
591
  };
393
592
 
394
- return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
593
+ return rb_ensure(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_ensure, (VALUE)&io_wait_arguments);
395
594
  }
396
595
 
397
596
  #ifdef HAVE_RUBY_IO_BUFFER_H
@@ -417,8 +616,8 @@ static inline off_t io_seekable(int descriptor)
417
616
  #pragma mark - IO#read
418
617
 
419
618
  struct io_read_arguments {
420
- struct IO_Event_Selector_URing *data;
421
- VALUE fiber;
619
+ struct IO_Event_Selector_URing *selector;
620
+ struct IO_Event_Selector_URing_Waiting *waiting;
422
621
  int descriptor;
423
622
  char *buffer;
424
623
  size_t length;
@@ -428,58 +627,65 @@ static VALUE
428
627
  io_read_submit(VALUE _arguments)
429
628
  {
430
629
  struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
431
- struct IO_Event_Selector_URing *data = arguments->data;
432
- struct io_uring_sqe *sqe = io_get_sqe(data);
630
+ struct IO_Event_Selector_URing *selector = arguments->selector;
433
631
 
434
- if (DEBUG) fprintf(stderr, "io_read_submit:io_uring_prep_read(fiber=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->fiber, arguments->descriptor, arguments->buffer, arguments->length);
632
+ if (DEBUG) fprintf(stderr, "io_read_submit:io_uring_prep_read(waiting=%p, completion=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion, arguments->descriptor, arguments->buffer, arguments->length);
435
633
 
634
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
436
635
  io_uring_prep_read(sqe, arguments->descriptor, arguments->buffer, arguments->length, io_seekable(arguments->descriptor));
437
- io_uring_sqe_set_data(sqe, (void*)arguments->fiber);
438
- io_uring_submit_now(data);
636
+ io_uring_sqe_set_data(sqe, arguments->waiting->completion);
637
+ io_uring_submit_now(selector);
638
+
639
+ IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
439
640
 
440
- return IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
641
+ return RB_INT2NUM(arguments->waiting->result);
441
642
  }
442
643
 
443
644
  static VALUE
444
- io_read_cancel(VALUE _arguments, VALUE exception)
645
+ io_read_ensure(VALUE _arguments)
445
646
  {
446
647
  struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
447
- struct IO_Event_Selector_URing *data = arguments->data;
648
+ struct IO_Event_Selector_URing *selector = arguments->selector;
448
649
 
449
- struct io_uring_sqe *sqe = io_get_sqe(data);
450
-
451
- if (DEBUG) fprintf(stderr, "io_read_cancel:io_uring_prep_cancel(fiber=%p)\n", (void*)arguments->fiber);
650
+ // If the operation is still in progress, cancel it:
651
+ if (arguments->waiting->completion) {
652
+ if (DEBUG) fprintf(stderr, "io_read_ensure:io_uring_prep_cancel(waiting=%p, completion=%p)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion);
653
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
654
+ io_uring_prep_cancel(sqe, (void*)arguments->waiting->completion, 0);
655
+ io_uring_sqe_set_data(sqe, NULL);
656
+ io_uring_submit_now(selector);
657
+ }
452
658
 
453
- io_uring_prep_cancel(sqe, (void*)arguments->fiber, 0);
454
- io_uring_sqe_set_data(sqe, NULL);
455
- io_uring_submit_now(data);
659
+ IO_Event_Selector_URing_Waiting_cancel(arguments->waiting);
456
660
 
457
- rb_exc_raise(exception);
661
+ return Qnil;
458
662
  }
459
663
 
460
664
  static int
461
- io_read(struct IO_Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length)
665
+ io_read(struct IO_Event_Selector_URing *selector, VALUE fiber, int descriptor, char *buffer, size_t length)
462
666
  {
463
- struct io_read_arguments io_read_arguments = {
464
- .data = data,
667
+ struct IO_Event_Selector_URing_Waiting waiting = {
465
668
  .fiber = fiber,
669
+ };
670
+
671
+ IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
672
+
673
+ struct io_read_arguments io_read_arguments = {
674
+ .selector = selector,
675
+ .waiting = &waiting,
466
676
  .descriptor = descriptor,
467
677
  .buffer = buffer,
468
678
  .length = length
469
679
  };
470
680
 
471
- int result = RB_NUM2INT(
472
- rb_rescue(io_read_submit, (VALUE)&io_read_arguments, io_read_cancel, (VALUE)&io_read_arguments)
681
+ return RB_NUM2INT(
682
+ rb_ensure(io_read_submit, (VALUE)&io_read_arguments, io_read_ensure, (VALUE)&io_read_arguments)
473
683
  );
474
-
475
- if (DEBUG) fprintf(stderr, "io_read:IO_Event_Selector_fiber_transfer -> %d\n", result);
476
-
477
- return result;
478
684
  }
479
685
 
480
686
  VALUE IO_Event_Selector_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length, VALUE _offset) {
481
- struct IO_Event_Selector_URing *data = NULL;
482
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
687
+ struct IO_Event_Selector_URing *selector = NULL;
688
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
483
689
 
484
690
  int descriptor = IO_Event_Selector_io_descriptor(io);
485
691
 
@@ -489,14 +695,14 @@ VALUE IO_Event_Selector_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE b
489
695
 
490
696
  size_t length = NUM2SIZET(_length);
491
697
  size_t offset = NUM2SIZET(_offset);
698
+ size_t total = 0;
492
699
 
493
- while (true) {
494
- size_t maximum_size = size - offset;
495
- if (DEBUG_IO_READ) fprintf(stderr, "io_read(%d, +%ld, %ld)\n", descriptor, offset, maximum_size);
496
- int result = io_read(data, fiber, descriptor, (char*)base+offset, maximum_size);
497
- if (DEBUG_IO_READ) fprintf(stderr, "io_read(%d, +%ld, %ld) -> %d\n", descriptor, offset, maximum_size, result);
700
+ size_t maximum_size = size - offset;
701
+ while (maximum_size) {
702
+ int result = io_read(selector, fiber, descriptor, (char*)base+offset, maximum_size);
498
703
 
499
704
  if (result > 0) {
705
+ total += result;
500
706
  offset += result;
501
707
  if ((size_t)result >= length) break;
502
708
  length -= result;
@@ -507,9 +713,11 @@ VALUE IO_Event_Selector_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE b
507
713
  } else {
508
714
  return rb_fiber_scheduler_io_result(-1, -result);
509
715
  }
716
+
717
+ maximum_size = size - offset;
510
718
  }
511
719
 
512
- return rb_fiber_scheduler_io_result(offset, 0);
720
+ return rb_fiber_scheduler_io_result(total, 0);
513
721
  }
514
722
 
515
723
  static VALUE IO_Event_Selector_URing_io_read_compatible(int argc, VALUE *argv, VALUE self)
@@ -528,8 +736,8 @@ static VALUE IO_Event_Selector_URing_io_read_compatible(int argc, VALUE *argv, V
528
736
  #pragma mark - IO#write
529
737
 
530
738
  struct io_write_arguments {
531
- struct IO_Event_Selector_URing *data;
532
- VALUE fiber;
739
+ struct IO_Event_Selector_URing *selector;
740
+ struct IO_Event_Selector_URing_Waiting *waiting;
533
741
  int descriptor;
534
742
  char *buffer;
535
743
  size_t length;
@@ -539,59 +747,65 @@ static VALUE
539
747
  io_write_submit(VALUE _argument)
540
748
  {
541
749
  struct io_write_arguments *arguments = (struct io_write_arguments*)_argument;
542
- struct IO_Event_Selector_URing *data = arguments->data;
543
-
544
- struct io_uring_sqe *sqe = io_get_sqe(data);
750
+ struct IO_Event_Selector_URing *selector = arguments->selector;
545
751
 
546
- if (DEBUG) fprintf(stderr, "io_write_submit:io_uring_prep_write(fiber=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->fiber, arguments->descriptor, arguments->buffer, arguments->length);
752
+ if (DEBUG) fprintf(stderr, "io_write_submit:io_uring_prep_write(waiting=%p, completion=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion, arguments->descriptor, arguments->buffer, arguments->length);
547
753
 
754
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
548
755
  io_uring_prep_write(sqe, arguments->descriptor, arguments->buffer, arguments->length, io_seekable(arguments->descriptor));
549
- io_uring_sqe_set_data(sqe, (void*)arguments->fiber);
550
- io_uring_submit_pending(data);
756
+ io_uring_sqe_set_data(sqe, arguments->waiting->completion);
757
+ io_uring_submit_pending(selector);
551
758
 
552
- return IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
759
+ IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
760
+
761
+ return RB_INT2NUM(arguments->waiting->result);
553
762
  }
554
763
 
555
764
  static VALUE
556
- io_write_cancel(VALUE _argument, VALUE exception)
765
+ io_write_ensure(VALUE _argument)
557
766
  {
558
767
  struct io_write_arguments *arguments = (struct io_write_arguments*)_argument;
559
- struct IO_Event_Selector_URing *data = arguments->data;
560
-
561
- struct io_uring_sqe *sqe = io_get_sqe(data);
768
+ struct IO_Event_Selector_URing *selector = arguments->selector;
562
769
 
563
- if (DEBUG) fprintf(stderr, "io_wait_rescue:io_uring_prep_cancel(%p)\n", (void*)arguments->fiber);
770
+ // If the operation is still in progress, cancel it:
771
+ if (arguments->waiting->completion) {
772
+ if (DEBUG) fprintf(stderr, "io_write_ensure:io_uring_prep_cancel(waiting=%p, completion=%p)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion);
773
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
774
+ io_uring_prep_cancel(sqe, (void*)arguments->waiting->completion, 0);
775
+ io_uring_sqe_set_data(sqe, NULL);
776
+ io_uring_submit_now(selector);
777
+ }
564
778
 
565
- io_uring_prep_cancel(sqe, (void*)arguments->fiber, 0);
566
- io_uring_sqe_set_data(sqe, NULL);
567
- io_uring_submit_now(data);
779
+ IO_Event_Selector_URing_Waiting_cancel(arguments->waiting);
568
780
 
569
- rb_exc_raise(exception);
781
+ return Qnil;
570
782
  }
571
783
 
572
784
  static int
573
- io_write(struct IO_Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length)
785
+ io_write(struct IO_Event_Selector_URing *selector, VALUE fiber, int descriptor, char *buffer, size_t length)
574
786
  {
575
- struct io_write_arguments arguments = {
576
- .data = data,
787
+ struct IO_Event_Selector_URing_Waiting waiting = {
577
788
  .fiber = fiber,
789
+ };
790
+
791
+ IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
792
+
793
+ struct io_write_arguments arguments = {
794
+ .selector = selector,
795
+ .waiting = &waiting,
578
796
  .descriptor = descriptor,
579
797
  .buffer = buffer,
580
798
  .length = length,
581
799
  };
582
800
 
583
- int result = RB_NUM2INT(
584
- rb_rescue(io_write_submit, (VALUE)&arguments, io_write_cancel, (VALUE)&arguments)
801
+ return RB_NUM2INT(
802
+ rb_ensure(io_write_submit, (VALUE)&arguments, io_write_ensure, (VALUE)&arguments)
585
803
  );
586
-
587
- if (DEBUG) fprintf(stderr, "io_write:IO_Event_Selector_fiber_transfer -> %d\n", result);
588
-
589
- return result;
590
804
  }
591
805
 
592
806
  VALUE IO_Event_Selector_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length, VALUE _offset) {
593
- struct IO_Event_Selector_URing *data = NULL;
594
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
807
+ struct IO_Event_Selector_URing *selector = NULL;
808
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
595
809
 
596
810
  int descriptor = IO_Event_Selector_io_descriptor(io);
597
811
 
@@ -601,16 +815,18 @@ VALUE IO_Event_Selector_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE
601
815
 
602
816
  size_t length = NUM2SIZET(_length);
603
817
  size_t offset = NUM2SIZET(_offset);
818
+ size_t total = 0;
604
819
 
605
820
  if (length > size) {
606
821
  rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
607
822
  }
608
-
609
- while (true) {
610
- size_t maximum_size = size - offset;
611
- int result = io_write(data, fiber, descriptor, (char*)base+offset, maximum_size);
823
+
824
+ size_t maximum_size = size - offset;
825
+ while (maximum_size) {
826
+ int result = io_write(selector, fiber, descriptor, (char*)base+offset, maximum_size);
612
827
 
613
828
  if (result > 0) {
829
+ total += result;
614
830
  offset += result;
615
831
  if ((size_t)result >= length) break;
616
832
  length -= result;
@@ -621,9 +837,11 @@ VALUE IO_Event_Selector_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE
621
837
  } else {
622
838
  return rb_fiber_scheduler_io_result(-1, -result);
623
839
  }
840
+
841
+ maximum_size = size - offset;
624
842
  }
625
843
 
626
- return rb_fiber_scheduler_io_result(offset, 0);
844
+ return rb_fiber_scheduler_io_result(total, 0);
627
845
  }
628
846
 
629
847
  static VALUE IO_Event_Selector_URing_io_write_compatible(int argc, VALUE *argv, VALUE self)
@@ -646,17 +864,16 @@ static VALUE IO_Event_Selector_URing_io_write_compatible(int argc, VALUE *argv,
646
864
  static const int ASYNC_CLOSE = 1;
647
865
 
648
866
  VALUE IO_Event_Selector_URing_io_close(VALUE self, VALUE io) {
649
- struct IO_Event_Selector_URing *data = NULL;
650
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
867
+ struct IO_Event_Selector_URing *selector = NULL;
868
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
651
869
 
652
870
  int descriptor = IO_Event_Selector_io_descriptor(io);
653
871
 
654
872
  if (ASYNC_CLOSE) {
655
- struct io_uring_sqe *sqe = io_get_sqe(data);
656
-
873
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
657
874
  io_uring_prep_close(sqe, descriptor);
658
875
  io_uring_sqe_set_data(sqe, NULL);
659
- io_uring_submit_now(data);
876
+ io_uring_submit_now(selector);
660
877
  } else {
661
878
  close(descriptor);
662
879
  }
@@ -699,7 +916,7 @@ int timeout_nonblocking(struct __kernel_timespec *timespec) {
699
916
  }
700
917
 
701
918
  struct select_arguments {
702
- struct IO_Event_Selector_URing *data;
919
+ struct IO_Event_Selector_URing *selector;
703
920
 
704
921
  int result;
705
922
 
@@ -712,18 +929,18 @@ void * select_internal(void *_arguments) {
712
929
  struct select_arguments * arguments = (struct select_arguments *)_arguments;
713
930
  struct io_uring_cqe *cqe = NULL;
714
931
 
715
- arguments->result = io_uring_wait_cqe_timeout(&arguments->data->ring, &cqe, arguments->timeout);
932
+ arguments->result = io_uring_wait_cqe_timeout(&arguments->selector->ring, &cqe, arguments->timeout);
716
933
 
717
934
  return NULL;
718
935
  }
719
936
 
720
937
  static
721
938
  int select_internal_without_gvl(struct select_arguments *arguments) {
722
- io_uring_submit_flush(arguments->data);
939
+ io_uring_submit_flush(arguments->selector);
723
940
 
724
- arguments->data->blocked = 1;
941
+ arguments->selector->blocked = 1;
725
942
  rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
726
- arguments->data->blocked = 0;
943
+ arguments->selector->blocked = 0;
727
944
 
728
945
  if (arguments->result == -ETIME) {
729
946
  arguments->result = 0;
@@ -740,96 +957,115 @@ int select_internal_without_gvl(struct select_arguments *arguments) {
740
957
  }
741
958
 
742
959
  static inline
743
- unsigned select_process_completions(struct io_uring *ring) {
960
+ unsigned select_process_completions(struct IO_Event_Selector_URing *selector) {
961
+ struct io_uring *ring = &selector->ring;
744
962
  unsigned completed = 0;
745
963
  unsigned head;
746
964
  struct io_uring_cqe *cqe;
747
965
 
966
+ if (DEBUG) {
967
+ fprintf(stderr, "select_process_completions: selector=%p\n", (void*)selector);
968
+ IO_Event_Selector_URing_dump_completion_queue(selector);
969
+ }
970
+
748
971
  io_uring_for_each_cqe(ring, head, cqe) {
972
+ if (DEBUG) fprintf(stderr, "select_process_completions: cqe res=%d user_data=%p\n", cqe->res, (void*)cqe->user_data);
973
+
749
974
  ++completed;
750
975
 
751
- // If the operation was cancelled, or the operation has no user data (fiber):
752
- if (cqe->res == -ECANCELED || cqe->user_data == 0 || cqe->user_data == LIBURING_UDATA_TIMEOUT) {
976
+ // If the operation was cancelled, or the operation has no user data:
977
+ if (cqe->user_data == 0 || cqe->user_data == LIBURING_UDATA_TIMEOUT) {
753
978
  io_uring_cq_advance(ring, 1);
754
979
  continue;
755
980
  }
756
981
 
757
- VALUE fiber = (VALUE)cqe->user_data;
758
- VALUE result = RB_INT2NUM(cqe->res);
982
+ struct IO_Event_Selector_URing_Completion *completion = (void*)cqe->user_data;
983
+ struct IO_Event_Selector_URing_Waiting *waiting = completion->waiting;
759
984
 
760
- if (DEBUG) fprintf(stderr, "cqe res=%d user_data=%p\n", cqe->res, (void*)cqe->user_data);
985
+ if (DEBUG) fprintf(stderr, "select_process_completions: completion=%p waiting=%p\n", (void*)completion, (void*)waiting);
986
+
987
+ if (waiting) {
988
+ waiting->result = cqe->res;
989
+ waiting->flags = cqe->flags;
990
+ }
761
991
 
762
992
  io_uring_cq_advance(ring, 1);
993
+ // This marks the waiting operation as "complete":
994
+ IO_Event_Selector_URing_Completion_release(selector, completion);
763
995
 
764
- IO_Event_Selector_fiber_transfer(fiber, 1, &result);
996
+ if (waiting && waiting->fiber) {
997
+ assert(waiting->result != -ECANCELED);
998
+
999
+ IO_Event_Selector_fiber_transfer(waiting->fiber, 0, NULL);
1000
+ }
765
1001
  }
766
1002
 
767
- // io_uring_cq_advance(ring, completed);
768
-
769
1003
  if (DEBUG && completed > 0) fprintf(stderr, "select_process_completions(completed=%d)\n", completed);
770
1004
 
771
1005
  return completed;
772
1006
  }
773
1007
 
774
1008
  VALUE IO_Event_Selector_URing_select(VALUE self, VALUE duration) {
775
- struct IO_Event_Selector_URing *data = NULL;
776
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
1009
+ struct IO_Event_Selector_URing *selector = NULL;
1010
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
777
1011
 
778
1012
  // Flush any pending events:
779
- io_uring_submit_flush(data);
1013
+ io_uring_submit_flush(selector);
780
1014
 
781
- int ready = IO_Event_Selector_queue_flush(&data->backend);
1015
+ int ready = IO_Event_Selector_queue_flush(&selector->backend);
782
1016
 
783
- int result = select_process_completions(&data->ring);
1017
+ int result = select_process_completions(selector);
784
1018
 
785
1019
  // If we:
786
1020
  // 1. Didn't process any ready fibers, and
787
1021
  // 2. Didn't process any events from non-blocking select (above), and
788
1022
  // 3. There are no items in the ready list,
789
1023
  // then we can perform a blocking select.
790
- if (!ready && !result && !data->backend.ready) {
1024
+ if (!ready && !result && !selector->backend.ready) {
791
1025
  // We might need to wait for events:
792
1026
  struct select_arguments arguments = {
793
- .data = data,
1027
+ .selector = selector,
794
1028
  .timeout = NULL,
795
1029
  };
796
1030
 
797
1031
  arguments.timeout = make_timeout(duration, &arguments.storage);
798
1032
 
799
- if (!data->backend.ready && !timeout_nonblocking(arguments.timeout)) {
1033
+ if (!selector->backend.ready && !timeout_nonblocking(arguments.timeout)) {
800
1034
  // This is a blocking operation, we wait for events:
801
1035
  result = select_internal_without_gvl(&arguments);
1036
+
1037
+ // After waiting/flushing the SQ, check if there are any completions:
1038
+ if (result > 0) {
1039
+ result = select_process_completions(selector);
1040
+ }
802
1041
  }
803
-
804
- // After waiting/flushing the SQ, check if there are any completions:
805
- result = select_process_completions(&data->ring);
806
1042
  }
807
1043
 
808
1044
  return RB_INT2NUM(result);
809
1045
  }
810
1046
 
811
1047
  VALUE IO_Event_Selector_URing_wakeup(VALUE self) {
812
- struct IO_Event_Selector_URing *data = NULL;
813
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
1048
+ struct IO_Event_Selector_URing *selector = NULL;
1049
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
814
1050
 
815
1051
  // If we are blocking, we can schedule a nop event to wake up the selector:
816
- if (data->blocked) {
1052
+ if (selector->blocked) {
817
1053
  struct io_uring_sqe *sqe = NULL;
818
1054
 
819
1055
  while (true) {
820
- sqe = io_uring_get_sqe(&data->ring);
1056
+ sqe = io_uring_get_sqe(&selector->ring);
821
1057
  if (sqe) break;
822
1058
 
823
1059
  rb_thread_schedule();
824
1060
 
825
1061
  // It's possible we became unblocked already, so we can assume the selector has already cycled at least once:
826
- if (!data->blocked) return Qfalse;
1062
+ if (!selector->blocked) return Qfalse;
827
1063
  }
828
1064
 
829
1065
  io_uring_prep_nop(sqe);
830
- // If you don't set this line, the SQE will eventually be recycled and have valid user data which can cause odd behaviour:
1066
+ // If you don't set this line, the SQE will eventually be recycled and have valid user selector which can cause odd behaviour:
831
1067
  io_uring_sqe_set_data(sqe, NULL);
832
- io_uring_submit(&data->ring);
1068
+ io_uring_submit(&selector->ring);
833
1069
 
834
1070
  return Qtrue;
835
1071
  }