io-event 1.2.3 → 1.3.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -20,6 +20,8 @@
20
20
 
21
21
  #include "uring.h"
22
22
  #include "selector.h"
23
+ #include "list.h"
24
+ #include "array.h"
23
25
 
24
26
  #include <liburing.h>
25
27
  #include <poll.h>
@@ -32,7 +34,7 @@
32
34
 
33
35
  enum {
34
36
  DEBUG = 0,
35
- DEBUG_IO_READ = 0,
37
+ DEBUG_COMPLETION = 0,
36
38
  };
37
39
 
38
40
  static VALUE IO_Event_Selector_URing = Qnil;
@@ -41,45 +43,109 @@ enum {URING_ENTRIES = 64};
41
43
 
42
44
  #pragma mark - Data Type
43
45
 
44
- struct IO_Event_Selector_URing {
46
+ struct IO_Event_Selector_URing
47
+ {
45
48
  struct IO_Event_Selector backend;
46
49
  struct io_uring ring;
47
50
  size_t pending;
48
51
  int blocked;
52
+
53
+ struct IO_Event_Array completions;
54
+ struct IO_Event_List free_list;
55
+ };
56
+
57
+ struct IO_Event_Selector_URing_Completion;
58
+
59
+ struct IO_Event_Selector_URing_Waiting
60
+ {
61
+ struct IO_Event_Selector_URing_Completion *completion;
62
+
63
+ VALUE fiber;
64
+
65
+ // The result of the operation.
66
+ int32_t result;
67
+
68
+ // Any associated flags.
69
+ uint32_t flags;
49
70
  };
50
71
 
51
- void IO_Event_Selector_URing_Type_mark(void *_data)
72
+ struct IO_Event_Selector_URing_Completion
73
+ {
74
+ struct IO_Event_List list;
75
+
76
+ struct IO_Event_Selector_URing_Waiting *waiting;
77
+ };
78
+
79
+ static
80
+ void IO_Event_Selector_URing_Completion_mark(void *_completion)
81
+ {
82
+ struct IO_Event_Selector_URing_Completion *completion = _completion;
83
+
84
+ if (completion->waiting) {
85
+ rb_gc_mark_movable(completion->waiting->fiber);
86
+ }
87
+ }
88
+
89
+ void IO_Event_Selector_URing_Type_mark(void *_selector)
52
90
  {
53
- struct IO_Event_Selector_URing *data = _data;
54
- IO_Event_Selector_mark(&data->backend);
91
+ struct IO_Event_Selector_URing *selector = _selector;
92
+ IO_Event_Selector_mark(&selector->backend);
93
+ IO_Event_Array_each(&selector->completions, IO_Event_Selector_URing_Completion_mark);
55
94
  }
56
95
 
57
96
  static
58
- void close_internal(struct IO_Event_Selector_URing *data) {
59
- if (data->ring.ring_fd >= 0) {
60
- io_uring_queue_exit(&data->ring);
61
- data->ring.ring_fd = -1;
97
+ void IO_Event_Selector_URing_Completion_compact(void *_completion)
98
+ {
99
+ struct IO_Event_Selector_URing_Completion *completion = _completion;
100
+
101
+ if (completion->waiting) {
102
+ completion->waiting->fiber = rb_gc_location(completion->waiting->fiber);
62
103
  }
63
104
  }
64
105
 
65
- void IO_Event_Selector_URing_Type_free(void *_data)
106
+ void IO_Event_Selector_URing_Type_compact(void *_selector)
107
+ {
108
+ struct IO_Event_Selector_URing *selector = _selector;
109
+ IO_Event_Selector_compact(&selector->backend);
110
+ IO_Event_Array_each(&selector->completions, IO_Event_Selector_URing_Completion_compact);
111
+ }
112
+
113
+ static
114
+ void close_internal(struct IO_Event_Selector_URing *selector)
66
115
  {
67
- struct IO_Event_Selector_URing *data = _data;
116
+ if (selector->ring.ring_fd >= 0) {
117
+ io_uring_queue_exit(&selector->ring);
118
+ selector->ring.ring_fd = -1;
119
+ }
120
+ }
121
+
122
+ static
123
+ void IO_Event_Selector_URing_Type_free(void *_selector)
124
+ {
125
+ struct IO_Event_Selector_URing *selector = _selector;
126
+
127
+ close_internal(selector);
68
128
 
69
- close_internal(data);
129
+ IO_Event_Array_free(&selector->completions);
70
130
 
71
- free(data);
131
+ free(selector);
72
132
  }
73
133
 
74
- size_t IO_Event_Selector_URing_Type_size(const void *data)
134
+ static
135
+ size_t IO_Event_Selector_URing_Type_size(const void *_selector)
75
136
  {
76
- return sizeof(struct IO_Event_Selector_URing);
137
+ const struct IO_Event_Selector_URing *selector = _selector;
138
+
139
+ return sizeof(struct IO_Event_Selector_URing)
140
+ + IO_Event_Array_memory_size(&selector->completions)
141
+ ;
77
142
  }
78
143
 
79
144
  static const rb_data_type_t IO_Event_Selector_URing_Type = {
80
145
  .wrap_struct_name = "IO_Event::Backend::URing",
81
146
  .function = {
82
147
  .dmark = IO_Event_Selector_URing_Type_mark,
148
+ .dcompact = IO_Event_Selector_URing_Type_compact,
83
149
  .dfree = IO_Event_Selector_URing_Type_free,
84
150
  .dsize = IO_Event_Selector_URing_Type_size,
85
151
  },
@@ -87,15 +153,90 @@ static const rb_data_type_t IO_Event_Selector_URing_Type = {
87
153
  .flags = RUBY_TYPED_FREE_IMMEDIATELY,
88
154
  };
89
155
 
156
+ inline static
157
+ struct IO_Event_Selector_URing_Completion * IO_Event_Selector_URing_Completion_acquire(struct IO_Event_Selector_URing *selector, struct IO_Event_Selector_URing_Waiting *waiting)
158
+ {
159
+ struct IO_Event_Selector_URing_Completion *completion = NULL;
160
+
161
+ if (!IO_Event_List_empty(&selector->free_list)) {
162
+ completion = (struct IO_Event_Selector_URing_Completion*)selector->free_list.tail;
163
+ IO_Event_List_pop(&completion->list);
164
+ } else {
165
+ completion = IO_Event_Array_push(&selector->completions);
166
+ IO_Event_List_clear(&completion->list);
167
+ }
168
+
169
+ if (DEBUG_COMPLETION) fprintf(stderr, "IO_Event_Selector_URing_Completion_acquire(%p, limit=%ld)\n", (void*)completion, selector->completions.limit);
170
+
171
+ waiting->completion = completion;
172
+ completion->waiting = waiting;
173
+
174
+ return completion;
175
+ }
176
+
177
+ inline static
178
+ void IO_Event_Selector_URing_Completion_cancel(struct IO_Event_Selector_URing_Completion *completion)
179
+ {
180
+ if (DEBUG_COMPLETION) fprintf(stderr, "IO_Event_Selector_URing_Completion_cancel(%p)\n", (void*)completion);
181
+
182
+ if (completion->waiting) {
183
+ completion->waiting->completion = NULL;
184
+ completion->waiting = NULL;
185
+ }
186
+ }
187
+
188
+ inline static
189
+ void IO_Event_Selector_URing_Completion_release(struct IO_Event_Selector_URing *selector, struct IO_Event_Selector_URing_Completion *completion)
190
+ {
191
+ if (DEBUG_COMPLETION) fprintf(stderr, "IO_Event_Selector_URing_Completion_release(%p)\n", (void*)completion);
192
+
193
+ IO_Event_Selector_URing_Completion_cancel(completion);
194
+ IO_Event_List_prepend(&selector->free_list, &completion->list);
195
+ }
196
+
197
+ inline static
198
+ void IO_Event_Selector_URing_Waiting_cancel(struct IO_Event_Selector_URing_Waiting *waiting)
199
+ {
200
+ if (DEBUG_COMPLETION) fprintf(stderr, "IO_Event_Selector_URing_Waiting_cancel(%p, %p)\n", (void*)waiting, (void*)waiting->completion);
201
+
202
+ if (waiting->completion) {
203
+ waiting->completion->waiting = NULL;
204
+ waiting->completion = NULL;
205
+ }
206
+
207
+ waiting->fiber = 0;
208
+ }
209
+
210
+ struct IO_Event_List_Type IO_Event_Selector_URing_Completion_Type = {};
211
+
212
+ void IO_Event_Selector_URing_Completion_initialize(void *element)
213
+ {
214
+ struct IO_Event_Selector_URing_Completion *completion = element;
215
+ IO_Event_List_initialize(&completion->list);
216
+ completion->list.type = &IO_Event_Selector_URing_Completion_Type;
217
+ }
218
+
219
+ void IO_Event_Selector_URing_Completion_free(void *element)
220
+ {
221
+ struct IO_Event_Selector_URing_Completion *completion = element;
222
+ IO_Event_Selector_URing_Completion_cancel(completion);
223
+ }
224
+
90
225
  VALUE IO_Event_Selector_URing_allocate(VALUE self) {
91
- struct IO_Event_Selector_URing *data = NULL;
92
- VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
226
+ struct IO_Event_Selector_URing *selector = NULL;
227
+ VALUE instance = TypedData_Make_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
228
+
229
+ IO_Event_Selector_initialize(&selector->backend, Qnil);
230
+ selector->ring.ring_fd = -1;
93
231
 
94
- IO_Event_Selector_initialize(&data->backend, Qnil);
95
- data->ring.ring_fd = -1;
232
+ selector->pending = 0;
233
+ selector->blocked = 0;
96
234
 
97
- data->pending = 0;
98
- data->blocked = 0;
235
+ IO_Event_List_initialize(&selector->free_list);
236
+
237
+ selector->completions.element_initialize = IO_Event_Selector_URing_Completion_initialize;
238
+ selector->completions.element_free = IO_Event_Selector_URing_Completion_free;
239
+ IO_Event_Array_allocate(&selector->completions, 1024, sizeof(struct IO_Event_Selector_URing_Completion));
99
240
 
100
241
  return instance;
101
242
  }
@@ -103,100 +244,100 @@ VALUE IO_Event_Selector_URing_allocate(VALUE self) {
103
244
  #pragma mark - Methods
104
245
 
105
246
  VALUE IO_Event_Selector_URing_initialize(VALUE self, VALUE loop) {
106
- struct IO_Event_Selector_URing *data = NULL;
107
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
247
+ struct IO_Event_Selector_URing *selector = NULL;
248
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
108
249
 
109
- IO_Event_Selector_initialize(&data->backend, loop);
110
- int result = io_uring_queue_init(URING_ENTRIES, &data->ring, 0);
250
+ IO_Event_Selector_initialize(&selector->backend, loop);
251
+ int result = io_uring_queue_init(URING_ENTRIES, &selector->ring, 0);
111
252
 
112
253
  if (result < 0) {
113
254
  rb_syserr_fail(-result, "IO_Event_Selector_URing_initialize:io_uring_queue_init");
114
255
  }
115
256
 
116
- rb_update_max_fd(data->ring.ring_fd);
257
+ rb_update_max_fd(selector->ring.ring_fd);
117
258
 
118
259
  return self;
119
260
  }
120
261
 
121
262
  VALUE IO_Event_Selector_URing_loop(VALUE self) {
122
- struct IO_Event_Selector_URing *data = NULL;
123
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
263
+ struct IO_Event_Selector_URing *selector = NULL;
264
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
124
265
 
125
- return data->backend.loop;
266
+ return selector->backend.loop;
126
267
  }
127
268
 
128
269
  VALUE IO_Event_Selector_URing_close(VALUE self) {
129
- struct IO_Event_Selector_URing *data = NULL;
130
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
270
+ struct IO_Event_Selector_URing *selector = NULL;
271
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
131
272
 
132
- close_internal(data);
273
+ close_internal(selector);
133
274
 
134
275
  return Qnil;
135
276
  }
136
277
 
137
278
  VALUE IO_Event_Selector_URing_transfer(VALUE self)
138
279
  {
139
- struct IO_Event_Selector_URing *data = NULL;
140
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
280
+ struct IO_Event_Selector_URing *selector = NULL;
281
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
141
282
 
142
- return IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
283
+ return IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
143
284
  }
144
285
 
145
286
  VALUE IO_Event_Selector_URing_resume(int argc, VALUE *argv, VALUE self)
146
287
  {
147
- struct IO_Event_Selector_URing *data = NULL;
148
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
288
+ struct IO_Event_Selector_URing *selector = NULL;
289
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
149
290
 
150
- return IO_Event_Selector_resume(&data->backend, argc, argv);
291
+ return IO_Event_Selector_resume(&selector->backend, argc, argv);
151
292
  }
152
293
 
153
294
  VALUE IO_Event_Selector_URing_yield(VALUE self)
154
295
  {
155
- struct IO_Event_Selector_URing *data = NULL;
156
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
296
+ struct IO_Event_Selector_URing *selector = NULL;
297
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
157
298
 
158
- return IO_Event_Selector_yield(&data->backend);
299
+ return IO_Event_Selector_yield(&selector->backend);
159
300
  }
160
301
 
161
302
  VALUE IO_Event_Selector_URing_push(VALUE self, VALUE fiber)
162
303
  {
163
- struct IO_Event_Selector_URing *data = NULL;
164
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
304
+ struct IO_Event_Selector_URing *selector = NULL;
305
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
165
306
 
166
- IO_Event_Selector_queue_push(&data->backend, fiber);
307
+ IO_Event_Selector_queue_push(&selector->backend, fiber);
167
308
 
168
309
  return Qnil;
169
310
  }
170
311
 
171
312
  VALUE IO_Event_Selector_URing_raise(int argc, VALUE *argv, VALUE self)
172
313
  {
173
- struct IO_Event_Selector_URing *data = NULL;
174
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
314
+ struct IO_Event_Selector_URing *selector = NULL;
315
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
175
316
 
176
- return IO_Event_Selector_raise(&data->backend, argc, argv);
317
+ return IO_Event_Selector_raise(&selector->backend, argc, argv);
177
318
  }
178
319
 
179
320
  VALUE IO_Event_Selector_URing_ready_p(VALUE self) {
180
- struct IO_Event_Selector_URing *data = NULL;
181
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
321
+ struct IO_Event_Selector_URing *selector = NULL;
322
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
182
323
 
183
- return data->backend.ready ? Qtrue : Qfalse;
324
+ return selector->backend.ready ? Qtrue : Qfalse;
184
325
  }
185
326
 
186
327
  #pragma mark - Submission Queue
187
328
 
188
329
  // Flush the submission queue if pending operations are present.
189
330
  static
190
- int io_uring_submit_flush(struct IO_Event_Selector_URing *data) {
191
- if (data->pending) {
192
- if (DEBUG) fprintf(stderr, "io_uring_submit_flush(pending=%ld)\n", data->pending);
331
+ int io_uring_submit_flush(struct IO_Event_Selector_URing *selector) {
332
+ if (selector->pending) {
333
+ if (DEBUG) fprintf(stderr, "io_uring_submit_flush(pending=%ld)\n", selector->pending);
193
334
 
194
335
  // Try to submit:
195
- int result = io_uring_submit(&data->ring);
336
+ int result = io_uring_submit(&selector->ring);
196
337
 
197
338
  if (result >= 0) {
198
339
  // If it was submitted, reset pending count:
199
- data->pending = 0;
340
+ selector->pending = 0;
200
341
  } else if (result != -EBUSY && result != -EAGAIN) {
201
342
  rb_syserr_fail(-result, "io_uring_submit_flush:io_uring_submit");
202
343
  }
@@ -209,41 +350,47 @@ int io_uring_submit_flush(struct IO_Event_Selector_URing *data) {
209
350
 
210
351
  // Immediately flush the submission queue, yielding to the event loop if it was not successful.
211
352
  static
212
- int io_uring_submit_now(struct IO_Event_Selector_URing *data) {
213
- if (DEBUG && data->pending) fprintf(stderr, "io_uring_submit_now(pending=%ld)\n", data->pending);
353
+ int io_uring_submit_now(struct IO_Event_Selector_URing *selector) {
354
+ if (DEBUG && selector->pending) fprintf(stderr, "io_uring_submit_now(pending=%ld)\n", selector->pending);
214
355
 
215
356
  while (true) {
216
- int result = io_uring_submit(&data->ring);
357
+ int result = io_uring_submit(&selector->ring);
217
358
 
218
359
  if (result >= 0) {
219
- data->pending = 0;
360
+ selector->pending = 0;
220
361
  return result;
221
362
  }
222
363
 
223
364
  if (result == -EBUSY || result == -EAGAIN) {
224
- IO_Event_Selector_yield(&data->backend);
365
+ IO_Event_Selector_yield(&selector->backend);
225
366
  } else {
226
367
  rb_syserr_fail(-result, "io_uring_submit_now:io_uring_submit");
227
368
  }
228
369
  }
229
370
  }
230
371
 
372
+ static
373
+ void IO_Event_Selector_URing_submit_sqe(struct io_uring_sqe *sqe)
374
+ {
375
+ if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_submit_sqe(%p): user_data=%p opcode=%d\n", sqe, (void*)sqe->user_data, sqe->opcode);
376
+ }
377
+
231
378
  // Submit a pending operation. This does not submit the operation immediately, but instead defers it to the next call to `io_uring_submit_flush` or `io_uring_submit_now`. This is useful for operations that are not urgent, but should be used with care as it can lead to a deadlock if the submission queue is not flushed.
232
379
  static
233
- void io_uring_submit_pending(struct IO_Event_Selector_URing *data) {
234
- data->pending += 1;
380
+ void io_uring_submit_pending(struct IO_Event_Selector_URing *selector) {
381
+ selector->pending += 1;
235
382
 
236
- if (DEBUG) fprintf(stderr, "io_uring_submit_pending(ring=%p, pending=%ld)\n", &data->ring, data->pending);
383
+ if (DEBUG) fprintf(stderr, "io_uring_submit_pending(ring=%p, pending=%ld)\n", &selector->ring, selector->pending);
237
384
  }
238
385
 
239
- struct io_uring_sqe * io_get_sqe(struct IO_Event_Selector_URing *data) {
240
- struct io_uring_sqe *sqe = io_uring_get_sqe(&data->ring);
386
+ struct io_uring_sqe * io_get_sqe(struct IO_Event_Selector_URing *selector) {
387
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&selector->ring);
241
388
 
242
389
  while (sqe == NULL) {
243
390
  // The submit queue is full, we need to drain it:
244
- io_uring_submit_now(data);
391
+ io_uring_submit_now(selector);
245
392
 
246
- sqe = io_uring_get_sqe(&data->ring);
393
+ sqe = io_uring_get_sqe(&selector->ring);
247
394
  }
248
395
 
249
396
  return sqe;
@@ -252,9 +399,10 @@ struct io_uring_sqe * io_get_sqe(struct IO_Event_Selector_URing *data) {
252
399
  #pragma mark - Process.wait
253
400
 
254
401
  struct process_wait_arguments {
255
- struct IO_Event_Selector_URing *data;
402
+ struct IO_Event_Selector_URing *selector;
403
+ struct IO_Event_Selector_URing_Waiting *waiting;
404
+
256
405
  pid_t pid;
257
- int flags;
258
406
  int descriptor;
259
407
  };
260
408
 
@@ -262,9 +410,13 @@ static
262
410
  VALUE process_wait_transfer(VALUE _arguments) {
263
411
  struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
264
412
 
265
- IO_Event_Selector_fiber_transfer(arguments->data->backend.loop, 0, NULL);
413
+ IO_Event_Selector_fiber_transfer(arguments->selector->backend.loop, 0, NULL);
266
414
 
267
- return IO_Event_Selector_process_status_wait(arguments->pid);
415
+ if (arguments->waiting->result) {
416
+ return IO_Event_Selector_process_status_wait(arguments->pid);
417
+ } else {
418
+ return Qfalse;
419
+ }
268
420
  }
269
421
 
270
422
  static
@@ -273,29 +425,44 @@ VALUE process_wait_ensure(VALUE _arguments) {
273
425
 
274
426
  close(arguments->descriptor);
275
427
 
428
+ IO_Event_Selector_URing_Waiting_cancel(arguments->waiting);
429
+
276
430
  return Qnil;
277
431
  }
278
432
 
279
- VALUE IO_Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
280
- struct IO_Event_Selector_URing *data = NULL;
281
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
433
+ VALUE IO_Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE _pid, VALUE _flags) {
434
+ struct IO_Event_Selector_URing *selector = NULL;
435
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
436
+
437
+ pid_t pid = NUM2PIDT(_pid);
438
+
439
+ int descriptor = pidfd_open(pid, 0);
440
+ if (descriptor < 0) {
441
+ rb_syserr_fail(errno, "IO_Event_Selector_URing_process_wait:pidfd_open");
442
+ }
443
+ rb_update_max_fd(descriptor);
444
+
445
+ struct IO_Event_Selector_URing_Waiting waiting = {
446
+ .fiber = fiber,
447
+ };
448
+
449
+ struct IO_Event_Selector_URing_Completion *completion = IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
282
450
 
283
451
  struct process_wait_arguments process_wait_arguments = {
284
- .data = data,
285
- .pid = NUM2PIDT(pid),
286
- .flags = NUM2INT(flags),
452
+ .selector = selector,
453
+ .waiting = &waiting,
454
+ .pid = pid,
455
+ .descriptor = descriptor,
287
456
  };
288
457
 
289
- process_wait_arguments.descriptor = pidfd_open(process_wait_arguments.pid, 0);
290
- rb_update_max_fd(process_wait_arguments.descriptor);
458
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
291
459
 
292
- struct io_uring_sqe *sqe = io_get_sqe(data);
293
-
294
460
  if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_process_wait:io_uring_prep_poll_add(%p)\n", (void*)fiber);
295
- io_uring_prep_poll_add(sqe, process_wait_arguments.descriptor, POLLIN|POLLHUP|POLLERR);
296
- io_uring_sqe_set_data(sqe, (void*)fiber);
297
- io_uring_submit_pending(data);
298
-
461
+ io_uring_prep_poll_add(sqe, descriptor, POLLIN|POLLHUP|POLLERR);
462
+ io_uring_sqe_set_data(sqe, completion);
463
+ IO_Event_Selector_URing_submit_sqe(sqe);
464
+ io_uring_submit_pending(selector);
465
+
299
466
  return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
300
467
  }
301
468
 
@@ -328,70 +495,81 @@ int events_from_poll_flags(short flags) {
328
495
  }
329
496
 
330
497
  struct io_wait_arguments {
331
- struct IO_Event_Selector_URing *data;
332
- VALUE fiber;
498
+ struct IO_Event_Selector_URing *selector;
499
+ struct IO_Event_Selector_URing_Waiting *waiting;
333
500
  short flags;
334
501
  };
335
502
 
336
503
  static
337
- VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
504
+ VALUE io_wait_ensure(VALUE _arguments) {
338
505
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
339
- struct IO_Event_Selector_URing *data = arguments->data;
340
506
 
341
- struct io_uring_sqe *sqe = io_get_sqe(data);
507
+ if (DEBUG) fprintf(stderr, "io_wait_ensure:io_uring_prep_cancel(waiting=%p, completion=%p)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion);
342
508
 
343
- if (DEBUG) fprintf(stderr, "io_wait_rescue:io_uring_prep_poll_remove(%p)\n", (void*)arguments->fiber);
509
+ // If the operation is still in progress, cancel it:
510
+ if (arguments->waiting->completion) {
511
+ struct io_uring_sqe *sqe = io_get_sqe(arguments->selector);
512
+ io_uring_prep_cancel(sqe, (void*)arguments->waiting->completion, 0);
513
+ io_uring_sqe_set_data(sqe, NULL);
514
+ IO_Event_Selector_URing_submit_sqe(sqe);
515
+ io_uring_submit_now(arguments->selector);
516
+ }
344
517
 
345
- io_uring_prep_poll_remove(sqe, (uintptr_t)arguments->fiber);
346
- io_uring_sqe_set_data(sqe, NULL);
347
- io_uring_submit_now(data);
348
-
349
- rb_exc_raise(exception);
518
+ IO_Event_Selector_URing_Waiting_cancel(arguments->waiting);
519
+
520
+ return Qnil;
350
521
  };
351
522
 
352
523
  static
353
524
  VALUE io_wait_transfer(VALUE _arguments) {
354
525
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
355
- struct IO_Event_Selector_URing *data = arguments->data;
356
-
357
- VALUE result = IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
358
- if (DEBUG) fprintf(stderr, "io_wait:IO_Event_Selector_fiber_transfer -> %d\n", RB_NUM2INT(result));
359
-
360
- if (!RTEST(result)) {
526
+ struct IO_Event_Selector_URing *selector = arguments->selector;
527
+
528
+ IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
529
+
530
+ if (DEBUG) fprintf(stderr, "io_wait_transfer:waiting=%p, result=%d\n", (void*)arguments->waiting, arguments->waiting->result);
531
+
532
+ if (arguments->waiting->result) {
533
+ // We explicitly filter the resulting events based on the requested events.
534
+ // In some cases, poll will report events we didn't ask for.
535
+ return RB_INT2NUM(events_from_poll_flags(arguments->waiting->result & arguments->flags));
536
+ } else {
361
537
  return Qfalse;
362
538
  }
363
-
364
- // We explicitly filter the resulting events based on the requested events.
365
- // In some cases, poll will report events we didn't ask for.
366
- short flags = arguments->flags & NUM2INT(result);
367
-
368
- return INT2NUM(events_from_poll_flags(flags));
369
539
  };
370
540
 
371
541
  VALUE IO_Event_Selector_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
372
- struct IO_Event_Selector_URing *data = NULL;
373
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
542
+ struct IO_Event_Selector_URing *selector = NULL;
543
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
374
544
 
375
545
  int descriptor = IO_Event_Selector_io_descriptor(io);
376
- struct io_uring_sqe *sqe = io_get_sqe(data);
546
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
377
547
 
378
548
  short flags = poll_flags_from_events(NUM2INT(events));
379
549
 
380
550
  if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_io_wait:io_uring_prep_poll_add(descriptor=%d, flags=%d, fiber=%p)\n", descriptor, flags, (void*)fiber);
381
551
 
382
552
  io_uring_prep_poll_add(sqe, descriptor, flags);
383
- io_uring_sqe_set_data(sqe, (void*)fiber);
553
+
554
+ struct IO_Event_Selector_URing_Waiting waiting = {
555
+ .fiber = fiber,
556
+ };
557
+
558
+ struct IO_Event_Selector_URing_Completion *completion = IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
559
+
560
+ io_uring_sqe_set_data(sqe, completion);
561
+ IO_Event_Selector_URing_submit_sqe(sqe);
384
562
 
385
563
  // If we are going to wait, we assume that we are waiting for a while:
386
- io_uring_submit_pending(data);
564
+ io_uring_submit_pending(selector);
387
565
 
388
566
  struct io_wait_arguments io_wait_arguments = {
389
- .data = data,
390
- .fiber = fiber,
567
+ .selector = selector,
568
+ .waiting = &waiting,
391
569
  .flags = flags
392
570
  };
393
571
 
394
- return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
572
+ return rb_ensure(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_ensure, (VALUE)&io_wait_arguments);
395
573
  }
396
574
 
397
575
  #ifdef HAVE_RUBY_IO_BUFFER_H
@@ -417,8 +595,8 @@ static inline off_t io_seekable(int descriptor)
417
595
  #pragma mark - IO#read
418
596
 
419
597
  struct io_read_arguments {
420
- struct IO_Event_Selector_URing *data;
421
- VALUE fiber;
598
+ struct IO_Event_Selector_URing *selector;
599
+ struct IO_Event_Selector_URing_Waiting *waiting;
422
600
  int descriptor;
423
601
  char *buffer;
424
602
  size_t length;
@@ -428,58 +606,69 @@ static VALUE
428
606
  io_read_submit(VALUE _arguments)
429
607
  {
430
608
  struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
431
- struct IO_Event_Selector_URing *data = arguments->data;
432
- struct io_uring_sqe *sqe = io_get_sqe(data);
609
+ struct IO_Event_Selector_URing *selector = arguments->selector;
610
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
433
611
 
434
- if (DEBUG) fprintf(stderr, "io_read_submit:io_uring_prep_read(fiber=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->fiber, arguments->descriptor, arguments->buffer, arguments->length);
612
+ if (DEBUG) fprintf(stderr, "io_read_submit:io_uring_prep_read(waiting=%p, completion=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion, arguments->descriptor, arguments->buffer, arguments->length);
435
613
 
436
614
  io_uring_prep_read(sqe, arguments->descriptor, arguments->buffer, arguments->length, io_seekable(arguments->descriptor));
437
- io_uring_sqe_set_data(sqe, (void*)arguments->fiber);
438
- io_uring_submit_now(data);
615
+ io_uring_sqe_set_data(sqe, arguments->waiting->completion);
616
+ IO_Event_Selector_URing_submit_sqe(sqe);
617
+ io_uring_submit_now(selector);
618
+
619
+ IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
439
620
 
440
- return IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
621
+ return RB_INT2NUM(arguments->waiting->result);
441
622
  }
442
623
 
443
624
  static VALUE
444
- io_read_cancel(VALUE _arguments, VALUE exception)
625
+ io_read_ensure(VALUE _arguments)
445
626
  {
446
627
  struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
447
- struct IO_Event_Selector_URing *data = arguments->data;
628
+ struct IO_Event_Selector_URing *selector = arguments->selector;
448
629
 
449
- struct io_uring_sqe *sqe = io_get_sqe(data);
630
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
450
631
 
451
- if (DEBUG) fprintf(stderr, "io_read_cancel:io_uring_prep_cancel(fiber=%p)\n", (void*)arguments->fiber);
632
+ if (DEBUG) fprintf(stderr, "io_read_ensure:io_uring_prep_cancel(waiting=%p, completion=%p)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion);
452
633
 
453
- io_uring_prep_cancel(sqe, (void*)arguments->fiber, 0);
454
- io_uring_sqe_set_data(sqe, NULL);
455
- io_uring_submit_now(data);
634
+ // If the operation is still in progress, cancel it:
635
+ if (arguments->waiting->completion) {
636
+ io_uring_prep_cancel(sqe, (void*)arguments->waiting->completion, 0);
637
+ io_uring_sqe_set_data(sqe, NULL);
638
+ IO_Event_Selector_URing_submit_sqe(sqe);
639
+ io_uring_submit_now(selector);
640
+ }
456
641
 
457
- rb_exc_raise(exception);
642
+ IO_Event_Selector_URing_Waiting_cancel(arguments->waiting);
643
+
644
+ return Qnil;
458
645
  }
459
646
 
460
647
  static int
461
- io_read(struct IO_Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length)
648
+ io_read(struct IO_Event_Selector_URing *selector, VALUE fiber, int descriptor, char *buffer, size_t length)
462
649
  {
463
- struct io_read_arguments io_read_arguments = {
464
- .data = data,
650
+ struct IO_Event_Selector_URing_Waiting waiting = {
465
651
  .fiber = fiber,
652
+ };
653
+
654
+ IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
655
+
656
+ struct io_read_arguments io_read_arguments = {
657
+ .selector = selector,
658
+ .waiting = &waiting,
466
659
  .descriptor = descriptor,
467
660
  .buffer = buffer,
468
661
  .length = length
469
662
  };
470
663
 
471
- int result = RB_NUM2INT(
472
- rb_rescue(io_read_submit, (VALUE)&io_read_arguments, io_read_cancel, (VALUE)&io_read_arguments)
664
+ return RB_NUM2INT(
665
+ rb_ensure(io_read_submit, (VALUE)&io_read_arguments, io_read_ensure, (VALUE)&io_read_arguments)
473
666
  );
474
-
475
- if (DEBUG) fprintf(stderr, "io_read:IO_Event_Selector_fiber_transfer -> %d\n", result);
476
-
477
- return result;
478
667
  }
479
668
 
480
669
  VALUE IO_Event_Selector_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length, VALUE _offset) {
481
- struct IO_Event_Selector_URing *data = NULL;
482
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
670
+ struct IO_Event_Selector_URing *selector = NULL;
671
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
483
672
 
484
673
  int descriptor = IO_Event_Selector_io_descriptor(io);
485
674
 
@@ -489,14 +678,14 @@ VALUE IO_Event_Selector_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE b
489
678
 
490
679
  size_t length = NUM2SIZET(_length);
491
680
  size_t offset = NUM2SIZET(_offset);
681
+ size_t total = 0;
492
682
 
493
- while (true) {
494
- size_t maximum_size = size - offset;
495
- if (DEBUG_IO_READ) fprintf(stderr, "io_read(%d, +%ld, %ld)\n", descriptor, offset, maximum_size);
496
- int result = io_read(data, fiber, descriptor, (char*)base+offset, maximum_size);
497
- if (DEBUG_IO_READ) fprintf(stderr, "io_read(%d, +%ld, %ld) -> %d\n", descriptor, offset, maximum_size, result);
683
+ size_t maximum_size = size - offset;
684
+ while (maximum_size) {
685
+ int result = io_read(selector, fiber, descriptor, (char*)base+offset, maximum_size);
498
686
 
499
687
  if (result > 0) {
688
+ total += result;
500
689
  offset += result;
501
690
  if ((size_t)result >= length) break;
502
691
  length -= result;
@@ -507,9 +696,11 @@ VALUE IO_Event_Selector_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE b
507
696
  } else {
508
697
  return rb_fiber_scheduler_io_result(-1, -result);
509
698
  }
699
+
700
+ maximum_size = size - offset;
510
701
  }
511
702
 
512
- return rb_fiber_scheduler_io_result(offset, 0);
703
+ return rb_fiber_scheduler_io_result(total, 0);
513
704
  }
514
705
 
515
706
  static VALUE IO_Event_Selector_URing_io_read_compatible(int argc, VALUE *argv, VALUE self)
@@ -528,8 +719,8 @@ static VALUE IO_Event_Selector_URing_io_read_compatible(int argc, VALUE *argv, V
528
719
  #pragma mark - IO#write
529
720
 
530
721
  struct io_write_arguments {
531
- struct IO_Event_Selector_URing *data;
532
- VALUE fiber;
722
+ struct IO_Event_Selector_URing *selector;
723
+ struct IO_Event_Selector_URing_Waiting *waiting;
533
724
  int descriptor;
534
725
  char *buffer;
535
726
  size_t length;
@@ -539,59 +730,70 @@ static VALUE
539
730
  io_write_submit(VALUE _argument)
540
731
  {
541
732
  struct io_write_arguments *arguments = (struct io_write_arguments*)_argument;
542
- struct IO_Event_Selector_URing *data = arguments->data;
733
+ struct IO_Event_Selector_URing *selector = arguments->selector;
543
734
 
544
- struct io_uring_sqe *sqe = io_get_sqe(data);
735
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
545
736
 
546
- if (DEBUG) fprintf(stderr, "io_write_submit:io_uring_prep_write(fiber=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->fiber, arguments->descriptor, arguments->buffer, arguments->length);
737
+ if (DEBUG) fprintf(stderr, "io_write_submit:io_uring_prep_write(waiting=%p, completion=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion, arguments->descriptor, arguments->buffer, arguments->length);
547
738
 
548
739
  io_uring_prep_write(sqe, arguments->descriptor, arguments->buffer, arguments->length, io_seekable(arguments->descriptor));
549
- io_uring_sqe_set_data(sqe, (void*)arguments->fiber);
550
- io_uring_submit_pending(data);
740
+ io_uring_sqe_set_data(sqe, arguments->waiting->completion);
741
+ IO_Event_Selector_URing_submit_sqe(sqe);
742
+ io_uring_submit_pending(selector);
551
743
 
552
- return IO_Event_Selector_fiber_transfer(data->backend.loop, 0, NULL);
744
+ IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
745
+
746
+ return RB_INT2NUM(arguments->waiting->result);
553
747
  }
554
748
 
555
749
  static VALUE
556
- io_write_cancel(VALUE _argument, VALUE exception)
750
+ io_write_ensure(VALUE _argument)
557
751
  {
558
752
  struct io_write_arguments *arguments = (struct io_write_arguments*)_argument;
559
- struct IO_Event_Selector_URing *data = arguments->data;
753
+ struct IO_Event_Selector_URing *selector = arguments->selector;
560
754
 
561
- struct io_uring_sqe *sqe = io_get_sqe(data);
755
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
562
756
 
563
- if (DEBUG) fprintf(stderr, "io_wait_rescue:io_uring_prep_cancel(%p)\n", (void*)arguments->fiber);
757
+ if (DEBUG) fprintf(stderr, "io_write_ensure:io_uring_prep_cancel(waiting=%p, completion=%p)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion);
564
758
 
565
- io_uring_prep_cancel(sqe, (void*)arguments->fiber, 0);
566
- io_uring_sqe_set_data(sqe, NULL);
567
- io_uring_submit_now(data);
759
+ // If the operation is still in progress, cancel it:
760
+ if (arguments->waiting->completion) {
761
+ io_uring_prep_cancel(sqe, (void*)arguments->waiting->completion, 0);
762
+ io_uring_sqe_set_data(sqe, NULL);
763
+ IO_Event_Selector_URing_submit_sqe(sqe);
764
+ io_uring_submit_now(selector);
765
+ }
568
766
 
569
- rb_exc_raise(exception);
767
+ IO_Event_Selector_URing_Waiting_cancel(arguments->waiting);
768
+
769
+ return Qnil;
570
770
  }
571
771
 
572
772
  static int
573
- io_write(struct IO_Event_Selector_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length)
773
+ io_write(struct IO_Event_Selector_URing *selector, VALUE fiber, int descriptor, char *buffer, size_t length)
574
774
  {
575
- struct io_write_arguments arguments = {
576
- .data = data,
775
+ struct IO_Event_Selector_URing_Waiting waiting = {
577
776
  .fiber = fiber,
777
+ };
778
+
779
+ IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
780
+
781
+ struct io_write_arguments arguments = {
782
+ .selector = selector,
783
+ .waiting = &waiting,
578
784
  .descriptor = descriptor,
579
785
  .buffer = buffer,
580
786
  .length = length,
581
787
  };
582
788
 
583
- int result = RB_NUM2INT(
584
- rb_rescue(io_write_submit, (VALUE)&arguments, io_write_cancel, (VALUE)&arguments)
789
+ return RB_NUM2INT(
790
+ rb_ensure(io_write_submit, (VALUE)&arguments, io_write_ensure, (VALUE)&arguments)
585
791
  );
586
-
587
- if (DEBUG) fprintf(stderr, "io_write:IO_Event_Selector_fiber_transfer -> %d\n", result);
588
-
589
- return result;
590
792
  }
591
793
 
592
794
  VALUE IO_Event_Selector_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length, VALUE _offset) {
593
- struct IO_Event_Selector_URing *data = NULL;
594
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
795
+ struct IO_Event_Selector_URing *selector = NULL;
796
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
595
797
 
596
798
  int descriptor = IO_Event_Selector_io_descriptor(io);
597
799
 
@@ -601,16 +803,18 @@ VALUE IO_Event_Selector_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE
601
803
 
602
804
  size_t length = NUM2SIZET(_length);
603
805
  size_t offset = NUM2SIZET(_offset);
806
+ size_t total = 0;
604
807
 
605
808
  if (length > size) {
606
809
  rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
607
810
  }
608
-
609
- while (true) {
610
- size_t maximum_size = size - offset;
611
- int result = io_write(data, fiber, descriptor, (char*)base+offset, maximum_size);
811
+
812
+ size_t maximum_size = size - offset;
813
+ while (maximum_size) {
814
+ int result = io_write(selector, fiber, descriptor, (char*)base+offset, maximum_size);
612
815
 
613
816
  if (result > 0) {
817
+ total += result;
614
818
  offset += result;
615
819
  if ((size_t)result >= length) break;
616
820
  length -= result;
@@ -621,9 +825,11 @@ VALUE IO_Event_Selector_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE
621
825
  } else {
622
826
  return rb_fiber_scheduler_io_result(-1, -result);
623
827
  }
828
+
829
+ maximum_size = size - offset;
624
830
  }
625
831
 
626
- return rb_fiber_scheduler_io_result(offset, 0);
832
+ return rb_fiber_scheduler_io_result(total, 0);
627
833
  }
628
834
 
629
835
  static VALUE IO_Event_Selector_URing_io_write_compatible(int argc, VALUE *argv, VALUE self)
@@ -646,17 +852,18 @@ static VALUE IO_Event_Selector_URing_io_write_compatible(int argc, VALUE *argv,
646
852
  static const int ASYNC_CLOSE = 1;
647
853
 
648
854
  VALUE IO_Event_Selector_URing_io_close(VALUE self, VALUE io) {
649
- struct IO_Event_Selector_URing *data = NULL;
650
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
855
+ struct IO_Event_Selector_URing *selector = NULL;
856
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
651
857
 
652
858
  int descriptor = IO_Event_Selector_io_descriptor(io);
653
859
 
654
860
  if (ASYNC_CLOSE) {
655
- struct io_uring_sqe *sqe = io_get_sqe(data);
861
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
656
862
 
657
863
  io_uring_prep_close(sqe, descriptor);
658
864
  io_uring_sqe_set_data(sqe, NULL);
659
- io_uring_submit_now(data);
865
+ IO_Event_Selector_URing_submit_sqe(sqe);
866
+ io_uring_submit_now(selector);
660
867
  } else {
661
868
  close(descriptor);
662
869
  }
@@ -699,7 +906,7 @@ int timeout_nonblocking(struct __kernel_timespec *timespec) {
699
906
  }
700
907
 
701
908
  struct select_arguments {
702
- struct IO_Event_Selector_URing *data;
909
+ struct IO_Event_Selector_URing *selector;
703
910
 
704
911
  int result;
705
912
 
@@ -712,18 +919,18 @@ void * select_internal(void *_arguments) {
712
919
  struct select_arguments * arguments = (struct select_arguments *)_arguments;
713
920
  struct io_uring_cqe *cqe = NULL;
714
921
 
715
- arguments->result = io_uring_wait_cqe_timeout(&arguments->data->ring, &cqe, arguments->timeout);
922
+ arguments->result = io_uring_wait_cqe_timeout(&arguments->selector->ring, &cqe, arguments->timeout);
716
923
 
717
924
  return NULL;
718
925
  }
719
926
 
720
927
  static
721
928
  int select_internal_without_gvl(struct select_arguments *arguments) {
722
- io_uring_submit_flush(arguments->data);
929
+ io_uring_submit_flush(arguments->selector);
723
930
 
724
- arguments->data->blocked = 1;
931
+ arguments->selector->blocked = 1;
725
932
  rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
726
- arguments->data->blocked = 0;
933
+ arguments->selector->blocked = 0;
727
934
 
728
935
  if (arguments->result == -ETIME) {
729
936
  arguments->result = 0;
@@ -740,96 +947,112 @@ int select_internal_without_gvl(struct select_arguments *arguments) {
740
947
  }
741
948
 
742
949
  static inline
743
- unsigned select_process_completions(struct io_uring *ring) {
950
+ unsigned select_process_completions(struct IO_Event_Selector_URing *selector) {
951
+ struct io_uring *ring = &selector->ring;
744
952
  unsigned completed = 0;
745
953
  unsigned head;
746
954
  struct io_uring_cqe *cqe;
747
955
 
956
+ if (DEBUG) fprintf(stderr, "select_process_completions...\n");
957
+
748
958
  io_uring_for_each_cqe(ring, head, cqe) {
959
+ if (DEBUG) fprintf(stderr, "select_process_completions: cqe res=%d user_data=%p\n", cqe->res, (void*)cqe->user_data);
960
+
749
961
  ++completed;
750
962
 
751
- // If the operation was cancelled, or the operation has no user data (fiber):
752
- if (cqe->res == -ECANCELED || cqe->user_data == 0 || cqe->user_data == LIBURING_UDATA_TIMEOUT) {
963
+ // If the operation was cancelled, or the operation has no user data:
964
+ if (cqe->user_data == 0 || cqe->user_data == LIBURING_UDATA_TIMEOUT) {
753
965
  io_uring_cq_advance(ring, 1);
754
966
  continue;
755
967
  }
756
968
 
757
- VALUE fiber = (VALUE)cqe->user_data;
758
- VALUE result = RB_INT2NUM(cqe->res);
969
+ struct IO_Event_Selector_URing_Completion *completion = (void*)cqe->user_data;
970
+ struct IO_Event_Selector_URing_Waiting *waiting = completion->waiting;
759
971
 
760
- if (DEBUG) fprintf(stderr, "cqe res=%d user_data=%p\n", cqe->res, (void*)cqe->user_data);
972
+ if (DEBUG) fprintf(stderr, "select_process_completions: completion=%p waiting=%p\n", (void*)completion, (void*)waiting);
761
973
 
762
- io_uring_cq_advance(ring, 1);
974
+ if (waiting) {
975
+ waiting->result = cqe->res;
976
+ waiting->flags = cqe->flags;
977
+ }
978
+
979
+ if (waiting && waiting->fiber) {
980
+ assert(waiting->result != -ECANCELED);
981
+
982
+ IO_Event_Selector_fiber_transfer(waiting->fiber, 0, NULL);
983
+ }
763
984
 
764
- IO_Event_Selector_fiber_transfer(fiber, 1, &result);
985
+ // This marks the waiting operation as "complete":
986
+ IO_Event_Selector_URing_Completion_release(selector, completion);
987
+ io_uring_cq_advance(ring, 1);
765
988
  }
766
989
 
767
- // io_uring_cq_advance(ring, completed);
768
-
769
990
  if (DEBUG && completed > 0) fprintf(stderr, "select_process_completions(completed=%d)\n", completed);
770
991
 
771
992
  return completed;
772
993
  }
773
994
 
774
995
  VALUE IO_Event_Selector_URing_select(VALUE self, VALUE duration) {
775
- struct IO_Event_Selector_URing *data = NULL;
776
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
996
+ struct IO_Event_Selector_URing *selector = NULL;
997
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
777
998
 
778
999
  // Flush any pending events:
779
- io_uring_submit_flush(data);
1000
+ io_uring_submit_flush(selector);
780
1001
 
781
- int ready = IO_Event_Selector_queue_flush(&data->backend);
1002
+ int ready = IO_Event_Selector_queue_flush(&selector->backend);
782
1003
 
783
- int result = select_process_completions(&data->ring);
1004
+ int result = select_process_completions(selector);
784
1005
 
785
1006
  // If we:
786
1007
  // 1. Didn't process any ready fibers, and
787
1008
  // 2. Didn't process any events from non-blocking select (above), and
788
1009
  // 3. There are no items in the ready list,
789
1010
  // then we can perform a blocking select.
790
- if (!ready && !result && !data->backend.ready) {
1011
+ if (!ready && !result && !selector->backend.ready) {
791
1012
  // We might need to wait for events:
792
1013
  struct select_arguments arguments = {
793
- .data = data,
1014
+ .selector = selector,
794
1015
  .timeout = NULL,
795
1016
  };
796
1017
 
797
1018
  arguments.timeout = make_timeout(duration, &arguments.storage);
798
1019
 
799
- if (!data->backend.ready && !timeout_nonblocking(arguments.timeout)) {
1020
+ if (!selector->backend.ready && !timeout_nonblocking(arguments.timeout)) {
800
1021
  // This is a blocking operation, we wait for events:
801
1022
  result = select_internal_without_gvl(&arguments);
1023
+
1024
+ // After waiting/flushing the SQ, check if there are any completions:
1025
+ if (result > 0) {
1026
+ result = select_process_completions(selector);
1027
+ }
802
1028
  }
803
-
804
- // After waiting/flushing the SQ, check if there are any completions:
805
- result = select_process_completions(&data->ring);
806
1029
  }
807
1030
 
808
1031
  return RB_INT2NUM(result);
809
1032
  }
810
1033
 
811
1034
  VALUE IO_Event_Selector_URing_wakeup(VALUE self) {
812
- struct IO_Event_Selector_URing *data = NULL;
813
- TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, data);
1035
+ struct IO_Event_Selector_URing *selector = NULL;
1036
+ TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
814
1037
 
815
1038
  // If we are blocking, we can schedule a nop event to wake up the selector:
816
- if (data->blocked) {
1039
+ if (selector->blocked) {
817
1040
  struct io_uring_sqe *sqe = NULL;
818
1041
 
819
1042
  while (true) {
820
- sqe = io_uring_get_sqe(&data->ring);
1043
+ sqe = io_uring_get_sqe(&selector->ring);
821
1044
  if (sqe) break;
822
1045
 
823
1046
  rb_thread_schedule();
824
1047
 
825
1048
  // It's possible we became unblocked already, so we can assume the selector has already cycled at least once:
826
- if (!data->blocked) return Qfalse;
1049
+ if (!selector->blocked) return Qfalse;
827
1050
  }
828
1051
 
829
1052
  io_uring_prep_nop(sqe);
830
- // If you don't set this line, the SQE will eventually be recycled and have valid user data which can cause odd behaviour:
1053
+ // If you don't set this line, the SQE will eventually be recycled and have valid user selector which can cause odd behaviour:
831
1054
  io_uring_sqe_set_data(sqe, NULL);
832
- io_uring_submit(&data->ring);
1055
+ io_uring_submit(&selector->ring);
833
1056
 
834
1057
  return Qtrue;
835
1058
  }