io-event 1.2.3 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -20,6 +20,8 @@
20
20
 
21
21
  #include "uring.h"
22
22
  #include "selector.h"
23
+ #include "list.h"
24
+ #include "array.h"
23
25
 
24
26
  #include <liburing.h>
25
27
  #include <poll.h>
@@ -32,7 +34,6 @@
32
34
 
33
35
  enum {
34
36
  DEBUG = 0,
35
- DEBUG_IO_READ = 0,
36
37
  };
37
38
 
38
39
  static VALUE IO_Event_Selector_URing = Qnil;
@@ -41,45 +42,109 @@ enum {URING_ENTRIES = 64};
41
42
 
42
43
  #pragma mark - Data Type
43
44
 
44
- struct IO_Event_Selector_URing {
45
+ struct IO_Event_Selector_URing
46
+ {
45
47
  struct IO_Event_Selector backend;
46
48
  struct io_uring ring;
47
49
  size_t pending;
48
50
  int blocked;
51
+
52
+ struct IO_Event_Array completions;
53
+ struct IO_Event_List free_list;
49
54
  };
50
55
 
51
- void IO_Event_Selector_URing_Type_mark(void *_data)
56
+ struct IO_Event_Selector_URing_Completion;
57
+
58
+ struct IO_Event_Selector_URing_Waiting
59
+ {
60
+ struct IO_Event_Selector_URing_Completion *completion;
61
+
62
+ VALUE fiber;
63
+
64
+ // The result of the operation.
65
+ int32_t result;
66
+
67
+ // Any associated flags.
68
+ uint32_t flags;
69
+ };
70
+
71
+ struct IO_Event_Selector_URing_Completion
72
+ {
73
+ struct IO_Event_List list;
74
+
75
+ struct IO_Event_Selector_URing_Waiting *waiting;
76
+ };
77
+
78
+ static
79
+ void IO_Event_Selector_URing_Completion_mark(void *_completion)
80
+ {
81
+ struct IO_Event_Selector_URing_Completion *completion = _completion;
82
+
83
+ if (completion->waiting) {
84
+ rb_gc_mark_movable(completion->waiting->fiber);
85
+ }
86
+ }
87
+
88
+ void IO_Event_Selector_URing_Type_mark(void *_selector)
52
89
  {
53
- struct IO_Event_Selector_URing *data = _data;
54
- IO_Event_Selector_mark(&data->backend);
90
+ struct IO_Event_Selector_URing *selector = _selector;
91
+ IO_Event_Selector_mark(&selector->backend);
92
+ IO_Event_Array_each(&selector->completions, IO_Event_Selector_URing_Completion_mark);
55
93
  }
56
94
 
57
95
  static
58
- void close_internal(struct IO_Event_Selector_URing *data) {
59
- if (data->ring.ring_fd >= 0) {
60
- io_uring_queue_exit(&data->ring);
61
- data->ring.ring_fd = -1;
96
+ void IO_Event_Selector_URing_Completion_compact(void *_completion)
97
+ {
98
+ struct IO_Event_Selector_URing_Completion *completion = _completion;
99
+
100
+ if (completion->waiting) {
101
+ completion->waiting->fiber = rb_gc_location(completion->waiting->fiber);
62
102
  }
63
103
  }
64
104
 
65
- void IO_Event_Selector_URing_Type_free(void *_data)
105
+ void IO_Event_Selector_URing_Type_compact(void *_selector)
106
+ {
107
+ struct IO_Event_Selector_URing *selector = _selector;
108
+ IO_Event_Selector_compact(&selector->backend);
109
+ IO_Event_Array_each(&selector->completions, IO_Event_Selector_URing_Completion_compact);
110
+ }
111
+
112
+ static
113
+ void close_internal(struct IO_Event_Selector_URing *selector)
66
114
  {
67
- struct IO_Event_Selector_URing *data = _data;
115
+ if (selector->ring.ring_fd >= 0) {
116
+ io_uring_queue_exit(&selector->ring);
117
+ selector->ring.ring_fd = -1;
118
+ }
119
+ }
120
+
121
+ static
122
+ void IO_Event_Selector_URing_Type_free(void *_selector)
123
+ {
124
+ struct IO_Event_Selector_URing *selector = _selector;
125
+
126
+ close_internal(selector);
68
127
 
69
- close_internal(data);
128
+ IO_Event_Array_free(&selector->completions);
70
129
 
71
- free(data);
130
+ free(selector);
72
131
  }
73
132
 
74
- size_t IO_Event_Selector_URing_Type_size(const void *data)
133
+ static
134
+ size_t IO_Event_Selector_URing_Type_size(const void *_selector)
75
135
  {
76
- return sizeof(struct IO_Event_Selector_URing);
136
+ const struct IO_Event_Selector_URing *selector = _selector;
137
+
138
+ return sizeof(struct IO_Event_Selector_URing)
139
+ + IO_Event_Array_memory_size(&selector->completions)
140
+ ;
77
141
  }
78
142
 
79
143
  static const rb_data_type_t IO_Event_Selector_URing_Type = {
80
144
  .wrap_struct_name = "IO_Event::Backend::URing",
81
145
  .function = {
82
146
  .dmark = IO_Event_Selector_URing_Type_mark,
147
+ .dcompact = IO_Event_Selector_URing_Type_compact,
83
148
  .dfree = IO_Event_Selector_URing_Type_free,
84
149
  .dsize = IO_Event_Selector_URing_Type_size,
85
150
  },
@@ -87,15 +152,79 @@ static const rb_data_type_t IO_Event_Selector_URing_Type = {
87
152
  .flags = RUBY_TYPED_FREE_IMMEDIATELY,
88
153
  };
89
154
 
155
+ inline static
156
+ struct IO_Event_Selector_URing_Completion * IO_Event_Selector_URing_Completion_acquire(struct IO_Event_Selector_URing *selector, struct IO_Event_Selector_URing_Waiting *waiting)
157
+ {
158
+ struct IO_Event_Selector_URing_Completion *completion = NULL;
159
+
160
+ if (!IO_Event_List_empty(&selector->free_list)) {
161
+ completion = (struct IO_Event_Selector_URing_Completion*)selector->free_list.tail;
162
+ IO_Event_List_pop(&completion->list);
163
+ } else {
164
+ completion = IO_Event_Array_push(&selector->completions);
165
+ IO_Event_List_clear(&completion->list);
166
+ }
167
+
168
+ waiting->completion = completion;
169
+ completion->waiting = waiting;
170
+
171
+ return completion;
172
+ }
173
+
174
+ inline static
175
+ void IO_Event_Selector_URing_Completion_cancel(struct IO_Event_Selector_URing_Completion *completion)
176
+ {
177
+ if (completion->waiting) {
178
+ completion->waiting->completion = NULL;
179
+ completion->waiting = NULL;
180
+ }
181
+ }
182
+
183
+ inline static
184
+ void IO_Event_Selector_URing_Completion_release(struct IO_Event_Selector_URing *selector, struct IO_Event_Selector_URing_Completion *completion)
185
+ {
186
+ IO_Event_Selector_URing_Completion_cancel(completion);
187
+ IO_Event_List_prepend(&selector->free_list, &completion->list);
188
+ }
189
+
190
+ inline static
191
+ void IO_Event_Selector_URing_Waiting_cancel(struct IO_Event_Selector_URing *selector, struct IO_Event_Selector_URing_Waiting *waiting)
192
+ {
193
+ if (waiting->completion) {
194
+ waiting->completion->waiting = NULL;
195
+ waiting->completion = NULL;
196
+ }
197
+
198
+ waiting->fiber = 0;
199
+ }
200
+
201
+ void IO_Event_Selector_URing_Completion_initialize(void *element)
202
+ {
203
+ struct IO_Event_Selector_URing_Completion *completion = element;
204
+ IO_Event_List_initialize(&completion->list);
205
+ }
206
+
207
+ void IO_Event_Selector_URing_Completion_free(void *element)
208
+ {
209
+ struct IO_Event_Selector_URing_Completion *completion = element;
210
+ IO_Event_Selector_URing_Completion_cancel(completion);
211
+ }
212
+
90
213
  VALUE IO_Event_Selector_URing_allocate(VALUE self) {
91
- struct IO_Event_Selector_URing *data = NULL;
92
- VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
214
+ struct IO_Event_Selector_URing *selector = NULL;
215
+ VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
93
216
 
94
- IO_Event_Selector_initialize(&data->backend, Qnil);
95
- data->ring.ring_fd = -1;
217
+ IO_Event_Selector_initialize(&selector->backend, Qnil);
218
+ selector->ring.ring_fd = -1;
96
219
 
97
- data->pending = 0;
98
- data->blocked = 0;
220
+ selector->pending = 0;
221
+ selector->blocked = 0;
222
+
223
+ IO_Event_List_initialize(&selector->free_list);
224
+
225
+ selector->completions.element_initialize = IO_Event_Selector_URing_Completion_initialize;
226
+ selector->completions.element_free = IO_Event_Selector_URing_Completion_free;
227
+ IO_Event_Array_allocate(&selector->completions, 1024, sizeof(struct IO_Event_Selector_URing_Completion));
99
228
 
100
229
  return instance;
101
230
  }
@@ -103,100 +232,100 @@ VALUE IO_Event_Selector_URing_allocate(VALUE self) {
103
232
  #pragma mark - Methods
104
233
 
105
234
  VALUE IO_Event_Selector_URing_initialize(VALUE self, VALUE loop) {
106
- struct IO_Event_Selector_URing *data = NULL;
107
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
235
+ struct IO_Event_Selector_URing *selector = NULL;
236
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
108
237
 
109
- IO_Event_Selector_initialize(&data->backend, loop);
110
- int result = io_uring_queue_init(URING_ENTRIES, &data->ring, 0);
238
+ IO_Event_Selector_initialize(&selector->backend, loop);
239
+ int result = io_uring_queue_init(URING_ENTRIES, &selector->ring, 0);
111
240
 
112
241
  if (result < 0) {
113
242
  rb_syserr_fail(-result, "IO_Event_Selector_URing_initialize:io_uring_queue_init");
114
243
  }
115
244
 
116
- rb_update_max_fd(data->ring.ring_fd);
245
+ rb_update_max_fd(selector->ring.ring_fd);
117
246
 
118
247
  return self;
119
248
  }
120
249
 
121
250
  VALUE IO_Event_Selector_URing_loop(VALUE self) {
122
- struct IO_Event_Selector_URing *data = NULL;
123
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
251
+ struct IO_Event_Selector_URing *selector = NULL;
252
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
124
253
 
125
- return data->backend.loop;
254
+ return selector->backend.loop;
126
255
  }
127
256
 
128
257
  VALUE IO_Event_Selector_URing_close(VALUE self) {
129
- struct IO_Event_Selector_URing *data = NULL;
130
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
258
+ struct IO_Event_Selector_URing *selector = NULL;
259
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
131
260
 
132
- close_internal(data);
261
+ close_internal(selector);
133
262
 
134
263
  return Qnil;
135
264
  }
136
265
 
137
266
  VALUE IO_Event_Selector_URing_transfer(VALUE self)
138
267
  {
139
- struct IO_Event_Selector_URing *data = NULL;
140
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
268
+ struct IO_Event_Selector_URing *selector = NULL;
269
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
141
270
 
142
- return IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
271
+ return IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
143
272
  }
144
273
 
145
274
  VALUE IO_Event_Selector_URing_resume(int argc, VALUE *argv, VALUE self)
146
275
  {
147
- struct IO_Event_Selector_URing *data = NULL;
148
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
276
+ struct IO_Event_Selector_URing *selector = NULL;
277
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
149
278
 
150
- return IO_Event_Selector_resume(&data->backend, argc, argv);
279
+ return IO_Event_Selector_resume(&selector->backend, argc, argv);
151
280
  }
152
281
 
153
282
  VALUE IO_Event_Selector_URing_yield(VALUE self)
154
283
  {
155
- struct IO_Event_Selector_URing *data = NULL;
156
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
284
+ struct IO_Event_Selector_URing *selector = NULL;
285
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
157
286
 
158
- return IO_Event_Selector_yield(&data->backend);
287
+ return IO_Event_Selector_yield(&selector->backend);
159
288
  }
160
289
 
161
290
  VALUE IO_Event_Selector_URing_push(VALUE self, VALUE fiber)
162
291
  {
163
- struct IO_Event_Selector_URing *data = NULL;
164
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
292
+ struct IO_Event_Selector_URing *selector = NULL;
293
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
165
294
 
166
- IO_Event_Selector_queue_push(&data->backend, fiber);
295
+ IO_Event_Selector_queue_push(&selector->backend, fiber);
167
296
 
168
297
  return Qnil;
169
298
  }
170
299
 
171
300
  VALUE IO_Event_Selector_URing_raise(int argc, VALUE *argv, VALUE self)
172
301
  {
173
- struct IO_Event_Selector_URing *data = NULL;
174
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
302
+ struct IO_Event_Selector_URing *selector = NULL;
303
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
175
304
 
176
- return IO_Event_Selector_raise(&data->backend, argc, argv);
305
+ return IO_Event_Selector_raise(&selector->backend, argc, argv);
177
306
  }
178
307
 
179
308
  VALUE IO_Event_Selector_URing_ready_p(VALUE self) {
180
- struct IO_Event_Selector_URing *data = NULL;
181
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
309
+ struct IO_Event_Selector_URing *selector = NULL;
310
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
182
311
 
183
- return data->backend.ready ? Qtrue : Qfalse;
312
+ return selector->backend.ready ? Qtrue : Qfalse;
184
313
  }
185
314
 
186
315
  #pragma mark - Submission Queue
187
316
 
188
317
  // Flush the submission queue if pending operations are present.
189
318
  static
190
- int io_uring_submit_flush(struct IO_Event_Selector_URing *data) {
191
- if (data->pending) {
192
- if (DEBUG) fprintf(stderr, "io_uring_submit_flush(pending=%ld)\n", data->pending);
319
+ int io_uring_submit_flush(struct IO_Event_Selector_URing *selector) {
320
+ if (selector->pending) {
321
+ if (DEBUG) fprintf(stderr, "io_uring_submit_flush(pending=%ld)\n", selector->pending);
193
322
 
194
323
  // Try to submit:
195
- int result = io_uring_submit(&data->ring);
324
+ int result = io_uring_submit(&selector->ring);
196
325
 
197
326
  if (result >= 0) {
198
327
  // If it was submitted, reset pending count:
199
- data->pending = 0;
328
+ selector->pending = 0;
200
329
  } else if (result != -EBUSY && result != -EAGAIN) {
201
330
  rb_syserr_fail(-result, "io_uring_submit_flush:io_uring_submit");
202
331
  }
@@ -209,19 +338,19 @@ int io_uring_submit_flush(struct IO_Event_Selector_URing *data) {
209
338
 
210
339
  // Immediately flush the submission queue, yielding to the event loop if it was not successful.
211
340
  static
212
- int io_uring_submit_now(struct IO_Event_Selector_URing *data) {
213
- if (DEBUG && data->pending) fprintf(stderr, "io_uring_submit_now(pending=%ld)\n", data->pending);
341
+ int io_uring_submit_now(struct IO_Event_Selector_URing *selector) {
342
+ if (DEBUG && selector->pending) fprintf(stderr, "io_uring_submit_now(pending=%ld)\n", selector->pending);
214
343
 
215
344
  while (true) {
216
- int result = io_uring_submit(&data->ring);
345
+ int result = io_uring_submit(&selector->ring);
217
346
 
218
347
  if (result >= 0) {
219
- data->pending = 0;
348
+ selector->pending = 0;
220
349
  return result;
221
350
  }
222
351
 
223
352
  if (result == -EBUSY || result == -EAGAIN) {
224
- IO_Event_Selector_yield(&data->backend);
353
+ IO_Event_Selector_yield(&selector->backend);
225
354
  } else {
226
355
  rb_syserr_fail(-result, "io_uring_submit_now:io_uring_submit");
227
356
  }
@@ -230,20 +359,20 @@ int io_uring_submit_now(struct IO_Event_Selector_URing *data) {
230
359
 
231
360
  // Submit a pending operation. This does not submit the operation immediately, but instead defers it to the next call to `io_uring_submit_flush` or `io_uring_submit_now`. This is useful for operations that are not urgent, but should be used with care as it can lead to a deadlock if the submission queue is not flushed.
232
361
  static
233
- void io_uring_submit_pending(struct IO_Event_Selector_URing *data) {
234
- data->pending += 1;
362
+ void io_uring_submit_pending(struct IO_Event_Selector_URing *selector) {
363
+ selector->pending += 1;
235
364
 
236
- if (DEBUG) fprintf(stderr, "io_uring_submit_pending(ring=%p, pending=%ld)\n", &data->ring, data->pending);
365
+ if (DEBUG) fprintf(stderr, "io_uring_submit_pending(ring=%p, pending=%ld)\n", &selector->ring, selector->pending);
237
366
  }
238
367
 
239
- struct io_uring_sqe * io_get_sqe(struct IO_Event_Selector_URing *data) {
240
- struct io_uring_sqe *sqe = io_uring_get_sqe(&data->ring);
368
+ struct io_uring_sqe * io_get_sqe(struct IO_Event_Selector_URing *selector) {
369
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&selector->ring);
241
370
 
242
371
  while (sqe == NULL) {
243
372
  // The submit queue is full, we need to drain it:
244
- io_uring_submit_now(data);
373
+ io_uring_submit_now(selector);
245
374
 
246
- sqe = io_uring_get_sqe(&data->ring);
375
+ sqe = io_uring_get_sqe(&selector->ring);
247
376
  }
248
377
 
249
378
  return sqe;
@@ -252,9 +381,10 @@ struct io_uring_sqe * io_get_sqe(struct IO_Event_Selector_URing *data) {
252
381
  #pragma mark - Process.wait
253
382
 
254
383
  struct process_wait_arguments {
255
- struct IO_Event_Selector_URing *data;
384
+ struct IO_Event_Selector_URing *selector;
385
+ struct IO_Event_Selector_URing_Waiting *waiting;
386
+
256
387
  pid_t pid;
257
- int flags;
258
388
  int descriptor;
259
389
  };
260
390
 
@@ -262,9 +392,13 @@ static
262
392
  VALUE process_wait_transfer(VALUE _arguments) {
263
393
  struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
264
394
 
265
- IO_Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
395
+ IO_Event_Selector_fiber_transfer(arguments->selector->backend.loop, 0, NULL);
266
396
 
267
- return IO_Event_Selector_process_status_wait(arguments->pid);
397
+ if (arguments->waiting->result) {
398
+ return IO_Event_Selector_process_status_wait(arguments->pid);
399
+ } else {
400
+ return Qfalse;
401
+ }
268
402
  }
269
403
 
270
404
  static
@@ -273,29 +407,43 @@ VALUE process_wait_ensure(VALUE _arguments) {
273
407
 
274
408
  close(arguments->descriptor);
275
409
 
410
+ IO_Event_Selector_URing_Waiting_cancel(arguments->selector, arguments->waiting);
411
+
276
412
  return Qnil;
277
413
  }
278
414
 
279
- VALUE IO_Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
280
- struct IO_Event_Selector_URing *data = NULL;
281
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
415
+ VALUE IO_Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE _pid, VALUE _flags) {
416
+ struct IO_Event_Selector_URing *selector = NULL;
417
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
418
+
419
+ pid_t pid = NUM2PIDT(_pid);
420
+
421
+ int descriptor = pidfd_open(pid, 0);
422
+ if (descriptor < 0) {
423
+ rb_syserr_fail(errno, "IO_Event_Selector_URing_process_wait:pidfd_open");
424
+ }
425
+ rb_update_max_fd(descriptor);
426
+
427
+ struct IO_Event_Selector_URing_Waiting waiting = {
428
+ .fiber = fiber,
429
+ };
430
+
431
+ struct IO_Event_Selector_URing_Completion *completion = IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
282
432
 
283
433
  struct process_wait_arguments process_wait_arguments = {
284
- .data = data,
285
- .pid = NUM2PIDT(pid),
286
- .flags = NUM2INT(flags),
434
+ .selector = selector,
435
+ .waiting = &waiting,
436
+ .pid = pid,
437
+ .descriptor = descriptor,
287
438
  };
288
439
 
289
- process_wait_arguments.descriptor = pidfd_open(process_wait_arguments.pid, 0);
290
- rb_update_max_fd(process_wait_arguments.descriptor);
440
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
291
441
 
292
- struct io_uring_sqe *sqe = io_get_sqe(data);
293
-
294
442
  if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_process_wait:io_uring_prep_poll_add(%p)\n", (void*)fiber);
295
- io_uring_prep_poll_add(sqe, process_wait_arguments.descriptor, POLLIN|POLLHUP|POLLERR);
296
- io_uring_sqe_set_data(sqe, (void*)fiber);
297
- io_uring_submit_pending(data);
298
-
443
+ io_uring_prep_poll_add(sqe, descriptor, POLLIN|POLLHUP|POLLERR);
444
+ io_uring_sqe_set_data(sqe, completion);
445
+ io_uring_submit_pending(selector);
446
+
299
447
  return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
300
448
  }
301
449
 
@@ -328,70 +476,72 @@ int events_from_poll_flags(short flags) {
328
476
  }
329
477
 
330
478
  struct io_wait_arguments {
331
- struct IO_Event_Selector_URing *data;
332
- VALUE fiber;
479
+ struct IO_Event_Selector_URing *selector;
480
+ struct IO_Event_Selector_URing_Waiting *waiting;
333
481
  short flags;
334
482
  };
335
483
 
336
484
  static
337
- VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
485
+ VALUE io_wait_ensure(VALUE _arguments) {
338
486
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
339
- struct IO_Event_Selector_URing *data = arguments->data;
340
487
 
341
- struct io_uring_sqe *sqe = io_get_sqe(data);
488
+ // We may want to consider cancellation. Be aware that the order of operations is important here:
489
+ // io_uring_prep_cancel(sqe, (void*)arguments->waiting, 0);
490
+ // io_uring_sqe_set_data(sqe, NULL);
491
+ // io_uring_submit_now(selector);
342
492
 
343
- if (DEBUG) fprintf(stderr, "io_wait_rescue:io_uring_prep_poll_remove(%p)\n", (void*)arguments->fiber);
493
+ IO_Event_Selector_URing_Waiting_cancel(arguments->selector, arguments->waiting);
344
494
 
345
- io_uring_prep_poll_remove(sqe, (uintptr_t)arguments->fiber);
346
- io_uring_sqe_set_data(sqe, NULL);
347
- io_uring_submit_now(data);
348
-
349
- rb_exc_raise(exception);
495
+ return Qnil;
350
496
  };
351
497
 
352
498
  static
353
499
  VALUE io_wait_transfer(VALUE _arguments) {
354
500
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
355
- struct IO_Event_Selector_URing *data = arguments->data;
356
-
357
- VALUE result = IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
358
- if (DEBUG) fprintf(stderr, "io_wait:IO_Event_Selector_fiber_transfer -> %d\n", RB_NUM2INT(result));
359
-
360
- if (!RTEST(result)) {
501
+ struct IO_Event_Selector_URing *selector = arguments->selector;
502
+
503
+ IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
504
+
505
+ if (arguments->waiting->result) {
506
+ // We explicitly filter the resulting events based on the requested events.
507
+ // In some cases, poll will report events we didn't ask for.
508
+ return RB_INT2NUM(events_from_poll_flags(arguments->waiting->result & arguments->flags));
509
+ } else {
361
510
  return Qfalse;
362
511
  }
363
-
364
- // We explicitly filter the resulting events based on the requested events.
365
- // In some cases, poll will report events we didn't ask for.
366
- short flags = arguments->flags & NUM2INT(result);
367
-
368
- return INT2NUM(events_from_poll_flags(flags));
369
512
  };
370
513
 
371
514
  VALUE IO_Event_Selector_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
372
- struct IO_Event_Selector_URing *data = NULL;
373
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
515
+ struct IO_Event_Selector_URing *selector = NULL;
516
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
374
517
 
375
518
  int descriptor = IO_Event_Selector_io_descriptor(io);
376
- struct io_uring_sqe *sqe = io_get_sqe(data);
519
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
377
520
 
378
521
  short flags = poll_flags_from_events(NUM2INT(events));
379
522
 
380
523
  if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_io_wait:io_uring_prep_poll_add(descriptor=%d, flags=%d, fiber=%p)\n", descriptor, flags, (void*)fiber);
381
524
 
382
525
  io_uring_prep_poll_add(sqe, descriptor, flags);
383
- io_uring_sqe_set_data(sqe, (void*)fiber);
526
+
527
+ struct IO_Event_Selector_URing_Waiting waiting = {
528
+ .fiber = fiber,
529
+ };
530
+
531
+ struct IO_Event_Selector_URing_Completion *completion = IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
532
+
533
+ io_uring_sqe_set_data(sqe, completion);
384
534
 
385
535
  // If we are going to wait, we assume that we are waiting for a while:
386
- io_uring_submit_pending(data);
536
+ io_uring_submit_pending(selector);
387
537
 
388
538
  struct io_wait_arguments io_wait_arguments = {
389
- .data = data,
390
- .fiber = fiber,
539
+ .selector = selector,
540
+ .waiting = &waiting,
391
541
  .flags = flags
392
542
  };
393
543
 
394
- return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
544
+ return rb_ensure(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_ensure, (VALUE)&io_wait_arguments);
395
545
  }
396
546
 
397
547
  #ifdef HAVE_RUBY_IO_BUFFER_H
@@ -417,8 +567,8 @@ static inline off_t io_seekable(int descriptor)
417
567
  #pragma mark - IO#read
418
568
 
419
569
  struct io_read_arguments {
420
- struct IO_Event_Selector_URing *data;
421
- VALUE fiber;
570
+ struct IO_Event_Selector_URing *selector;
571
+ struct IO_Event_Selector_URing_Waiting *waiting;
422
572
  int descriptor;
423
573
  char *buffer;
424
574
  size_t length;
@@ -428,58 +578,67 @@ static VALUE
428
578
  io_read_submit(VALUE _arguments)
429
579
  {
430
580
  struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
431
- struct IO_Event_Selector_URing *data = arguments->data;
432
- struct io_uring_sqe *sqe = io_get_sqe(data);
581
+ struct IO_Event_Selector_URing *selector = arguments->selector;
582
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
433
583
 
434
- if (DEBUG) fprintf(stderr, "io_read_submit:io_uring_prep_read(fiber=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->fiber, arguments->descriptor, arguments->buffer, arguments->length);
584
+ if (DEBUG) fprintf(stderr, "io_read_submit:io_uring_prep_read(fiber=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->waiting, arguments->descriptor, arguments->buffer, arguments->length);
435
585
 
436
586
  io_uring_prep_read(sqe, arguments->descriptor, arguments->buffer, arguments->length, io_seekable(arguments->descriptor));
437
- io_uring_sqe_set_data(sqe, (void*)arguments->fiber);
438
- io_uring_submit_now(data);
587
+ io_uring_sqe_set_data(sqe, arguments->waiting->completion);
588
+ io_uring_submit_now(selector);
589
+
590
+ IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
439
591
 
440
- return IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
592
+ return RB_INT2NUM(arguments->waiting->result);
441
593
  }
442
594
 
443
595
  static VALUE
444
- io_read_cancel(VALUE _arguments, VALUE exception)
596
+ io_read_ensure(VALUE _arguments)
445
597
  {
446
598
  struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
447
- struct IO_Event_Selector_URing *data = arguments->data;
599
+ struct IO_Event_Selector_URing *selector = arguments->selector;
448
600
 
449
- struct io_uring_sqe *sqe = io_get_sqe(data);
601
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
450
602
 
451
- if (DEBUG) fprintf(stderr, "io_read_cancel:io_uring_prep_cancel(fiber=%p)\n", (void*)arguments->fiber);
603
+ if (DEBUG) fprintf(stderr, "io_read_cancel:io_uring_prep_cancel(fiber=%p)\n", (void*)arguments->waiting);
604
+
605
+ // If the operation has already completed, we don't need to cancel it:
606
+ if (!arguments->waiting->result) {
607
+ io_uring_prep_cancel(sqe, (void*)arguments->waiting, 0);
608
+ io_uring_sqe_set_data(sqe, NULL);
609
+ io_uring_submit_now(selector);
610
+ }
452
611
 
453
- io_uring_prep_cancel(sqe, (void*)arguments->fiber, 0);
454
- io_uring_sqe_set_data(sqe, NULL);
455
- io_uring_submit_now(data);
612
+ IO_Event_Selector_URing_Waiting_cancel(arguments->selector, arguments->waiting);
456
613
 
457
- rb_exc_raise(exception);
614
+ return Qnil;
458
615
  }
459
616
 
460
617
  static int
461
- io_read(struct IO_Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length)
618
+ io_read(struct IO_Event_Selector_URing *selector, VALUE fiber, int descriptor, char *buffer, size_t length)
462
619
  {
463
- struct io_read_arguments io_read_arguments = {
464
- .data = data,
620
+ struct IO_Event_Selector_URing_Waiting waiting = {
465
621
  .fiber = fiber,
622
+ };
623
+
624
+ IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
625
+
626
+ struct io_read_arguments io_read_arguments = {
627
+ .selector = selector,
628
+ .waiting = &waiting,
466
629
  .descriptor = descriptor,
467
630
  .buffer = buffer,
468
631
  .length = length
469
632
  };
470
633
 
471
- int result = RB_NUM2INT(
472
- rb_rescue(io_read_submit, (VALUE)&io_read_arguments, io_read_cancel, (VALUE)&io_read_arguments)
634
+ return RB_NUM2INT(
635
+ rb_ensure(io_read_submit, (VALUE)&io_read_arguments, io_read_ensure, (VALUE)&io_read_arguments)
473
636
  );
474
-
475
- if (DEBUG) fprintf(stderr, "io_read:IO_Event_Selector_fiber_transfer -> %d\n", result);
476
-
477
- return result;
478
637
  }
479
638
 
480
639
  VALUE IO_Event_Selector_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length, VALUE _offset) {
481
- struct IO_Event_Selector_URing *data = NULL;
482
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
640
+ struct IO_Event_Selector_URing *selector = NULL;
641
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
483
642
 
484
643
  int descriptor = IO_Event_Selector_io_descriptor(io);
485
644
 
@@ -489,14 +648,14 @@ VALUE IO_Event_Selector_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE b
489
648
 
490
649
  size_t length = NUM2SIZET(_length);
491
650
  size_t offset = NUM2SIZET(_offset);
651
+ size_t total = 0;
492
652
 
493
- while (true) {
494
- size_t maximum_size = size - offset;
495
- if (DEBUG_IO_READ) fprintf(stderr, "io_read(%d, +%ld, %ld)\n", descriptor, offset, maximum_size);
496
- int result = io_read(data, fiber, descriptor, (char*)base+offset, maximum_size);
497
- if (DEBUG_IO_READ) fprintf(stderr, "io_read(%d, +%ld, %ld) -> %d\n", descriptor, offset, maximum_size, result);
653
+ size_t maximum_size = size - offset;
654
+ while (maximum_size) {
655
+ int result = io_read(selector, fiber, descriptor, (char*)base+offset, maximum_size);
498
656
 
499
657
  if (result > 0) {
658
+ total += result;
500
659
  offset += result;
501
660
  if ((size_t)result >= length) break;
502
661
  length -= result;
@@ -507,9 +666,11 @@ VALUE IO_Event_Selector_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE b
507
666
  } else {
508
667
  return rb_fiber_scheduler_io_result(-1, -result);
509
668
  }
669
+
670
+ maximum_size = size - offset;
510
671
  }
511
672
 
512
- return rb_fiber_scheduler_io_result(offset, 0);
673
+ return rb_fiber_scheduler_io_result(total, 0);
513
674
  }
514
675
 
515
676
  static VALUE IO_Event_Selector_URing_io_read_compatible(int argc, VALUE *argv, VALUE self)
@@ -528,8 +689,8 @@ static VALUE IO_Event_Selector_URing_io_read_compatible(int argc, VALUE *argv, V
528
689
  #pragma mark - IO#write
529
690
 
530
691
  struct io_write_arguments {
531
- struct IO_Event_Selector_URing *data;
532
- VALUE fiber;
692
+ struct IO_Event_Selector_URing *selector;
693
+ struct IO_Event_Selector_URing_Waiting *waiting;
533
694
  int descriptor;
534
695
  char *buffer;
535
696
  size_t length;
@@ -539,59 +700,67 @@ static VALUE
539
700
  io_write_submit(VALUE _argument)
540
701
  {
541
702
  struct io_write_arguments *arguments = (struct io_write_arguments*)_argument;
542
- struct IO_Event_Selector_URing *data = arguments->data;
703
+ struct IO_Event_Selector_URing *selector = arguments->selector;
543
704
 
544
- struct io_uring_sqe *sqe = io_get_sqe(data);
705
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
545
706
 
546
- if (DEBUG) fprintf(stderr, "io_write_submit:io_uring_prep_write(fiber=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->fiber, arguments->descriptor, arguments->buffer, arguments->length);
707
+ if (DEBUG) fprintf(stderr, "io_write_submit:io_uring_prep_write(fiber=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->waiting, arguments->descriptor, arguments->buffer, arguments->length);
547
708
 
548
709
  io_uring_prep_write(sqe, arguments->descriptor, arguments->buffer, arguments->length, io_seekable(arguments->descriptor));
549
- io_uring_sqe_set_data(sqe, (void*)arguments->fiber);
550
- io_uring_submit_pending(data);
710
+ io_uring_sqe_set_data(sqe, arguments->waiting->completion);
711
+ io_uring_submit_pending(selector);
712
+
713
+ IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
551
714
 
552
- return IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
715
+ return RB_INT2NUM(arguments->waiting->result);
553
716
  }
554
717
 
555
718
  static VALUE
556
- io_write_cancel(VALUE _argument, VALUE exception)
719
+ io_write_ensure(VALUE _argument)
557
720
  {
558
721
  struct io_write_arguments *arguments = (struct io_write_arguments*)_argument;
559
- struct IO_Event_Selector_URing *data = arguments->data;
722
+ struct IO_Event_Selector_URing *selector = arguments->selector;
560
723
 
561
- struct io_uring_sqe *sqe = io_get_sqe(data);
724
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
562
725
 
563
- if (DEBUG) fprintf(stderr, "io_wait_rescue:io_uring_prep_cancel(%p)\n", (void*)arguments->fiber);
726
+ if (DEBUG) fprintf(stderr, "io_wait_rescue:io_uring_prep_cancel(%p)\n", (void*)arguments->waiting);
564
727
 
565
- io_uring_prep_cancel(sqe, (void*)arguments->fiber, 0);
566
- io_uring_sqe_set_data(sqe, NULL);
567
- io_uring_submit_now(data);
728
+ if (!arguments->waiting->result) {
729
+ io_uring_prep_cancel(sqe, (void*)arguments->waiting, 0);
730
+ io_uring_sqe_set_data(sqe, NULL);
731
+ io_uring_submit_now(selector);
732
+ }
568
733
 
569
- rb_exc_raise(exception);
734
+ IO_Event_Selector_URing_Waiting_cancel(arguments->selector, arguments->waiting);
735
+
736
+ return Qnil;
570
737
  }
571
738
 
572
739
  static int
573
- io_write(struct IO_Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length)
740
+ io_write(struct IO_Event_Selector_URing *selector, VALUE fiber, int descriptor, char *buffer, size_t length)
574
741
  {
575
- struct io_write_arguments arguments = {
576
- .data = data,
742
+ struct IO_Event_Selector_URing_Waiting waiting = {
577
743
  .fiber = fiber,
744
+ };
745
+
746
+ IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
747
+
748
+ struct io_write_arguments arguments = {
749
+ .selector = selector,
750
+ .waiting = &waiting,
578
751
  .descriptor = descriptor,
579
752
  .buffer = buffer,
580
753
  .length = length,
581
754
  };
582
755
 
583
- int result = RB_NUM2INT(
584
- rb_rescue(io_write_submit, (VALUE)&arguments, io_write_cancel, (VALUE)&arguments)
756
+ return RB_NUM2INT(
757
+ rb_ensure(io_write_submit, (VALUE)&arguments, io_write_ensure, (VALUE)&arguments)
585
758
  );
586
-
587
- if (DEBUG) fprintf(stderr, "io_write:IO_Event_Selector_fiber_transfer -> %d\n", result);
588
-
589
- return result;
590
759
  }
591
760
 
592
761
  VALUE IO_Event_Selector_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length, VALUE _offset) {
593
- struct IO_Event_Selector_URing *data = NULL;
594
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
762
+ struct IO_Event_Selector_URing *selector = NULL;
763
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
595
764
 
596
765
  int descriptor = IO_Event_Selector_io_descriptor(io);
597
766
 
@@ -601,16 +770,18 @@ VALUE IO_Event_Selector_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE
601
770
 
602
771
  size_t length = NUM2SIZET(_length);
603
772
  size_t offset = NUM2SIZET(_offset);
773
+ size_t total = 0;
604
774
 
605
775
  if (length > size) {
606
776
  rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
607
777
  }
608
-
609
- while (true) {
610
- size_t maximum_size = size - offset;
611
- int result = io_write(data, fiber, descriptor, (char*)base+offset, maximum_size);
778
+
779
+ size_t maximum_size = size - offset;
780
+ while (maximum_size) {
781
+ int result = io_write(selector, fiber, descriptor, (char*)base+offset, maximum_size);
612
782
 
613
783
  if (result > 0) {
784
+ total += result;
614
785
  offset += result;
615
786
  if ((size_t)result >= length) break;
616
787
  length -= result;
@@ -621,9 +792,11 @@ VALUE IO_Event_Selector_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE
621
792
  } else {
622
793
  return rb_fiber_scheduler_io_result(-1, -result);
623
794
  }
795
+
796
+ maximum_size = size - offset;
624
797
  }
625
798
 
626
- return rb_fiber_scheduler_io_result(offset, 0);
799
+ return rb_fiber_scheduler_io_result(total, 0);
627
800
  }
628
801
 
629
802
  static VALUE IO_Event_Selector_URing_io_write_compatible(int argc, VALUE *argv, VALUE self)
@@ -646,17 +819,17 @@ static VALUE IO_Event_Selector_URing_io_write_compatible(int argc, VALUE *argv,
646
819
  static const int ASYNC_CLOSE = 1;
647
820
 
648
821
  VALUE IO_Event_Selector_URing_io_close(VALUE self, VALUE io) {
649
- struct IO_Event_Selector_URing *data = NULL;
650
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
822
+ struct IO_Event_Selector_URing *selector = NULL;
823
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
651
824
 
652
825
  int descriptor = IO_Event_Selector_io_descriptor(io);
653
826
 
654
827
  if (ASYNC_CLOSE) {
655
- struct io_uring_sqe *sqe = io_get_sqe(data);
828
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
656
829
 
657
830
  io_uring_prep_close(sqe, descriptor);
658
831
  io_uring_sqe_set_data(sqe, NULL);
659
- io_uring_submit_now(data);
832
+ io_uring_submit_now(selector);
660
833
  } else {
661
834
  close(descriptor);
662
835
  }
@@ -699,7 +872,7 @@ int timeout_nonblocking(struct __kernel_timespec *timespec) {
699
872
  }
700
873
 
701
874
  struct select_arguments {
702
- struct IO_Event_Selector_URing *data;
875
+ struct IO_Event_Selector_URing *selector;
703
876
 
704
877
  int result;
705
878
 
@@ -712,18 +885,18 @@ void * select_internal(void *_arguments) {
712
885
  struct select_arguments * arguments = (struct select_arguments *)_arguments;
713
886
  struct io_uring_cqe *cqe = NULL;
714
887
 
715
- arguments->result = io_uring_wait_cqe_timeout(&arguments->data->ring, &cqe, arguments->timeout);
888
+ arguments->result = io_uring_wait_cqe_timeout(&arguments->selector->ring, &cqe, arguments->timeout);
716
889
 
717
890
  return NULL;
718
891
  }
719
892
 
720
893
  static
721
894
  int select_internal_without_gvl(struct select_arguments *arguments) {
722
- io_uring_submit_flush(arguments->data);
895
+ io_uring_submit_flush(arguments->selector);
723
896
 
724
- arguments->data->blocked = 1;
897
+ arguments->selector->blocked = 1;
725
898
  rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
726
- arguments->data->blocked = 0;
899
+ arguments->selector->blocked = 0;
727
900
 
728
901
  if (arguments->result == -ETIME) {
729
902
  arguments->result = 0;
@@ -740,7 +913,8 @@ int select_internal_without_gvl(struct select_arguments *arguments) {
740
913
  }
741
914
 
742
915
  static inline
743
- unsigned select_process_completions(struct io_uring *ring) {
916
+ unsigned select_process_completions(struct IO_Event_Selector_URing *selector) {
917
+ struct io_uring *ring = &selector->ring;
744
918
  unsigned completed = 0;
745
919
  unsigned head;
746
920
  struct io_uring_cqe *cqe;
@@ -749,87 +923,96 @@ unsigned select_process_completions(struct io_uring *ring) {
749
923
  ++completed;
750
924
 
751
925
  // If the operation was cancelled, or the operation has no user data (fiber):
752
- if (cqe->res == -ECANCELED || cqe->user_data == 0 || cqe->user_data == LIBURING_UDATA_TIMEOUT) {
926
+ if (cqe->user_data == 0 || cqe->user_data == LIBURING_UDATA_TIMEOUT) {
753
927
  io_uring_cq_advance(ring, 1);
754
928
  continue;
755
929
  }
756
930
 
757
- VALUE fiber = (VALUE)cqe->user_data;
758
- VALUE result = RB_INT2NUM(cqe->res);
759
-
760
931
  if (DEBUG) fprintf(stderr, "cqe res=%d user_data=%p\n", cqe->res, (void*)cqe->user_data);
761
932
 
933
+ struct IO_Event_Selector_URing_Completion *completion = (void*)cqe->user_data;
934
+ struct IO_Event_Selector_URing_Waiting *waiting = completion->waiting;
935
+
936
+ if (waiting) {
937
+ waiting->result = cqe->res;
938
+ waiting->flags = cqe->flags;
939
+ }
940
+
762
941
  io_uring_cq_advance(ring, 1);
763
942
 
764
- IO_Event_Selector_fiber_transfer(fiber, 1, &result);
943
+ if (waiting && waiting->fiber) {
944
+ IO_Event_Selector_fiber_transfer(waiting->fiber, 0, NULL);
945
+ }
946
+
947
+ IO_Event_Selector_URing_Completion_release(selector, completion);
765
948
  }
766
949
 
767
- // io_uring_cq_advance(ring, completed);
768
-
769
950
  if (DEBUG && completed > 0) fprintf(stderr, "select_process_completions(completed=%d)\n", completed);
770
951
 
771
952
  return completed;
772
953
  }
773
954
 
774
955
  VALUE IO_Event_Selector_URing_select(VALUE self, VALUE duration) {
775
- struct IO_Event_Selector_URing *data = NULL;
776
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
956
+ struct IO_Event_Selector_URing *selector = NULL;
957
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
777
958
 
778
959
  // Flush any pending events:
779
- io_uring_submit_flush(data);
960
+ io_uring_submit_flush(selector);
780
961
 
781
- int ready = IO_Event_Selector_queue_flush(&data->backend);
962
+ int ready = IO_Event_Selector_queue_flush(&selector->backend);
782
963
 
783
- int result = select_process_completions(&data->ring);
964
+ int result = select_process_completions(selector);
784
965
 
785
966
  // If we:
786
967
  // 1. Didn't process any ready fibers, and
787
968
  // 2. Didn't process any events from non-blocking select (above), and
788
969
  // 3. There are no items in the ready list,
789
970
  // then we can perform a blocking select.
790
- if (!ready && !result && !data->backend.ready) {
971
+ if (!ready && !result && !selector->backend.ready) {
791
972
  // We might need to wait for events:
792
973
  struct select_arguments arguments = {
793
- .data = data,
974
+ .selector = selector,
794
975
  .timeout = NULL,
795
976
  };
796
977
 
797
978
  arguments.timeout = make_timeout(duration, &arguments.storage);
798
979
 
799
- if (!data->backend.ready && !timeout_nonblocking(arguments.timeout)) {
980
+ if (!selector->backend.ready && !timeout_nonblocking(arguments.timeout)) {
800
981
  // This is a blocking operation, we wait for events:
801
982
  result = select_internal_without_gvl(&arguments);
983
+
984
+ // After waiting/flushing the SQ, check if there are any completions:
985
+ if (result > 0) {
986
+ result = select_process_completions(selector);
987
+ }
802
988
  }
803
-
804
- // After waiting/flushing the SQ, check if there are any completions:
805
- result = select_process_completions(&data->ring);
806
989
  }
807
990
 
808
991
  return RB_INT2NUM(result);
809
992
  }
810
993
 
811
994
  VALUE IO_Event_Selector_URing_wakeup(VALUE self) {
812
- struct IO_Event_Selector_URing *data = NULL;
813
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
995
+ struct IO_Event_Selector_URing *selector = NULL;
996
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
814
997
 
815
998
  // If we are blocking, we can schedule a nop event to wake up the selector:
816
- if (data->blocked) {
999
+ if (selector->blocked) {
817
1000
  struct io_uring_sqe *sqe = NULL;
818
1001
 
819
1002
  while (true) {
820
- sqe = io_uring_get_sqe(&data->ring);
1003
+ sqe = io_uring_get_sqe(&selector->ring);
821
1004
  if (sqe) break;
822
1005
 
823
1006
  rb_thread_schedule();
824
1007
 
825
1008
  // It's possible we became unblocked already, so we can assume the selector has already cycled at least once:
826
- if (!data->blocked) return Qfalse;
1009
+ if (!selector->blocked) return Qfalse;
827
1010
  }
828
1011
 
829
1012
  io_uring_prep_nop(sqe);
830
- // If you don't set this line, the SQE will eventually be recycled and have valid user data which can cause odd behaviour:
1013
+ // If you don't set this line, the SQE will eventually be recycled and have valid user selector which can cause odd behaviour:
831
1014
  io_uring_sqe_set_data(sqe, NULL);
832
- io_uring_submit(&data->ring);
1015
+ io_uring_submit(&selector->ring);
833
1016
 
834
1017
  return Qtrue;
835
1018
  }