event 0.4.2 → 0.7.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,36 @@
1
+ // Copyright, 2021, by Samuel G. D. Williams. <http://www.codeotaku.com>
2
+ //
3
+ // Permission is hereby granted, free of charge, to any person obtaining a copy
4
+ // of this software and associated documentation files (the "Software"), to deal
5
+ // in the Software without restriction, including without limitation the rights
6
+ // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7
+ // copies of the Software, and to permit persons to whom the Software is
8
+ // furnished to do so, subject to the following conditions:
9
+ //
10
+ // The above copyright notice and this permission notice shall be included in
11
+ // all copies or substantial portions of the Software.
12
+ //
13
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14
+ // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
+ // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16
+ // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17
+ // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18
+ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19
+ // THE SOFTWARE.
20
+
21
+ #include <sys/types.h>
22
+ #include <sys/syscall.h>
23
+ #include <unistd.h>
24
+ #include <poll.h>
25
+ #include <stdlib.h>
26
+ #include <stdio.h>
27
+
28
+ #ifndef __NR_pidfd_open
29
+ #define __NR_pidfd_open 434 /* System call # on most architectures */
30
+ #endif
31
+
32
+ static int
33
+ pidfd_open(pid_t pid, unsigned int flags)
34
+ {
35
+ return syscall(__NR_pidfd_open, pid, flags);
36
+ }
@@ -25,21 +25,27 @@
25
25
  #include <poll.h>
26
26
  #include <time.h>
27
27
 
28
+ #include "pidfd.c"
29
+
30
+ static const int DEBUG = 0;
31
+
32
+ // This option controls whether to all `io_uring_submit()` after every operation:
33
+ static const int EARLY_SUBMIT = 1;
34
+
28
35
  static VALUE Event_Backend_URing = Qnil;
29
- static ID id_fileno, id_transfer;
30
36
 
31
- enum {URING_ENTRIES = 128};
32
- enum {URING_MAX_EVENTS = 128};
37
+ enum {URING_ENTRIES = 64};
33
38
 
34
39
  struct Event_Backend_URing {
35
- VALUE loop;
40
+ struct Event_Backend backend;
36
41
  struct io_uring ring;
42
+ size_t pending;
37
43
  };
38
44
 
39
45
  void Event_Backend_URing_Type_mark(void *_data)
40
46
  {
41
47
  struct Event_Backend_URing *data = _data;
42
- rb_gc_mark(data->loop);
48
+ Event_Backend_mark(&data->backend);
43
49
  }
44
50
 
45
51
  static
@@ -79,9 +85,11 @@ VALUE Event_Backend_URing_allocate(VALUE self) {
79
85
  struct Event_Backend_URing *data = NULL;
80
86
  VALUE instance = TypedData_Make_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
81
87
 
82
- data->loop = Qnil;
88
+ Event_Backend_initialize(&data->backend, Qnil);
83
89
  data->ring.ring_fd = -1;
84
90
 
91
+ data->pending = 0;
92
+
85
93
  return instance;
86
94
  }
87
95
 
@@ -89,8 +97,7 @@ VALUE Event_Backend_URing_initialize(VALUE self, VALUE loop) {
89
97
  struct Event_Backend_URing *data = NULL;
90
98
  TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
91
99
 
92
- data->loop = loop;
93
-
100
+ Event_Backend_initialize(&data->backend, loop);
94
101
  int result = io_uring_queue_init(URING_ENTRIES, &data->ring, 0);
95
102
 
96
103
  if (result < 0) {
@@ -111,6 +118,142 @@ VALUE Event_Backend_URing_close(VALUE self) {
111
118
  return Qnil;
112
119
  }
113
120
 
121
+ VALUE Event_Backend_URing_transfer(VALUE self, VALUE fiber)
122
+ {
123
+ struct Event_Backend_URing *data = NULL;
124
+ TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
125
+
126
+ Event_Backend_wait_and_transfer(&data->backend, fiber);
127
+
128
+ return Qnil;
129
+ }
130
+
131
+ VALUE Event_Backend_URing_defer(VALUE self)
132
+ {
133
+ struct Event_Backend_URing *data = NULL;
134
+ TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
135
+
136
+ Event_Backend_defer(&data->backend);
137
+
138
+ return Qnil;
139
+ }
140
+
141
+ VALUE Event_Backend_URing_ready_p(VALUE self) {
142
+ struct Event_Backend_URing *data = NULL;
143
+ TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
144
+
145
+ return data->backend.ready ? Qtrue : Qfalse;
146
+ }
147
+
148
+ static
149
+ int io_uring_submit_flush(struct Event_Backend_URing *data) {
150
+ if (data->pending) {
151
+ if (DEBUG) fprintf(stderr, "io_uring_submit_flush(pending=%ld)\n", data->pending);
152
+
153
+ // Try to submit:
154
+ int result = io_uring_submit(&data->ring);
155
+
156
+ if (result >= 0) {
157
+ // If it was submitted, reset pending count:
158
+ data->pending = 0;
159
+ } else if (result != -EBUSY && result != -EAGAIN) {
160
+ rb_syserr_fail(-result, "io_uring_submit_flush");
161
+ }
162
+
163
+ return result;
164
+ }
165
+
166
+ return 0;
167
+ }
168
+
169
+ static
170
+ int io_uring_submit_now(struct Event_Backend_URing *data) {
171
+ while (true) {
172
+ int result = io_uring_submit(&data->ring);
173
+
174
+ if (result >= 0) {
175
+ data->pending = 0;
176
+ return result;
177
+ }
178
+
179
+ if (result == -EBUSY || result == -EAGAIN) {
180
+ Event_Backend_defer(&data->backend);
181
+ } else {
182
+ rb_syserr_fail(-result, "io_uring_submit_now");
183
+ }
184
+ }
185
+ }
186
+
187
+ static
188
+ void io_uring_submit_pending(struct Event_Backend_URing *data) {
189
+ if (EARLY_SUBMIT) {
190
+ io_uring_submit_now(data);
191
+ } else {
192
+ data->pending += 1;
193
+ }
194
+ }
195
+
196
+ struct io_uring_sqe * io_get_sqe(struct Event_Backend_URing *data) {
197
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&data->ring);
198
+
199
+ while (sqe == NULL) {
200
+ // The submit queue is full, we need to drain it:
201
+ io_uring_submit_now(data);
202
+
203
+ sqe = io_uring_get_sqe(&data->ring);
204
+ }
205
+
206
+ return sqe;
207
+ }
208
+
209
+ struct process_wait_arguments {
210
+ struct Event_Backend_URing *data;
211
+ pid_t pid;
212
+ int flags;
213
+ int descriptor;
214
+ };
215
+
216
+ static
217
+ VALUE process_wait_transfer(VALUE _arguments) {
218
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
219
+
220
+ Event_Backend_fiber_transfer(arguments->data->backend.loop);
221
+
222
+ return Event_Backend_process_status_wait(arguments->pid);
223
+ }
224
+
225
+ static
226
+ VALUE process_wait_ensure(VALUE _arguments) {
227
+ struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
228
+
229
+ close(arguments->descriptor);
230
+
231
+ return Qnil;
232
+ }
233
+
234
+ VALUE Event_Backend_URing_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE flags) {
235
+ struct Event_Backend_URing *data = NULL;
236
+ TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
237
+
238
+ struct process_wait_arguments process_wait_arguments = {
239
+ .data = data,
240
+ .pid = NUM2PIDT(pid),
241
+ .flags = NUM2INT(flags),
242
+ };
243
+
244
+ process_wait_arguments.descriptor = pidfd_open(process_wait_arguments.pid, 0);
245
+ rb_update_max_fd(process_wait_arguments.descriptor);
246
+
247
+ struct io_uring_sqe *sqe = io_get_sqe(data);
248
+
249
+ if (DEBUG) fprintf(stderr, "Event_Backend_URing_process_wait:io_uring_prep_poll_add(%p)\n", (void*)fiber);
250
+ io_uring_prep_poll_add(sqe, process_wait_arguments.descriptor, POLLIN|POLLHUP|POLLERR);
251
+ io_uring_sqe_set_data(sqe, (void*)fiber);
252
+ io_uring_submit_pending(data);
253
+
254
+ return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
255
+ }
256
+
114
257
  static inline
115
258
  short poll_flags_from_events(int events) {
116
259
  short flags = 0;
@@ -142,16 +285,6 @@ struct io_wait_arguments {
142
285
  short flags;
143
286
  };
144
287
 
145
- struct io_uring_sqe * io_get_sqe(struct Event_Backend_URing *data) {
146
- struct io_uring_sqe *sqe = io_uring_get_sqe(&data->ring);
147
-
148
- while (sqe == NULL) {
149
- sqe = io_uring_get_sqe(&data->ring);
150
- }
151
-
152
- return sqe;
153
- }
154
-
155
288
  static
156
289
  VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
157
290
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
@@ -159,10 +292,9 @@ VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
159
292
 
160
293
  struct io_uring_sqe *sqe = io_get_sqe(data);
161
294
 
162
- // fprintf(stderr, "poll_remove(%p, %p)\n", sqe, (void*)arguments->fiber);
295
+ if (DEBUG) fprintf(stderr, "io_wait_rescue:io_uring_prep_poll_remove(%p)\n", (void*)arguments->fiber);
163
296
 
164
297
  io_uring_prep_poll_remove(sqe, (void*)arguments->fiber);
165
- io_uring_submit(&data->ring);
166
298
 
167
299
  rb_exc_raise(exception);
168
300
  };
@@ -171,9 +303,10 @@ static
171
303
  VALUE io_wait_transfer(VALUE _arguments) {
172
304
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
173
305
  struct Event_Backend_URing *data = arguments->data;
174
-
175
- VALUE result = rb_funcall(data->loop, id_transfer, 0);
176
-
306
+
307
+ VALUE result = Event_Backend_fiber_transfer(data->backend.loop);
308
+ if (DEBUG) fprintf(stderr, "io_wait:Event_Backend_fiber_transfer -> %d\n", RB_NUM2INT(result));
309
+
177
310
  // We explicitly filter the resulting events based on the requested events.
178
311
  // In some cases, poll will report events we didn't ask for.
179
312
  short flags = arguments->flags & NUM2INT(result);
@@ -185,18 +318,16 @@ VALUE Event_Backend_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE event
185
318
  struct Event_Backend_URing *data = NULL;
186
319
  TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
187
320
 
188
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
321
+ int descriptor = Event_Backend_io_descriptor(io);
189
322
  struct io_uring_sqe *sqe = io_get_sqe(data);
190
323
 
191
- if (!sqe) return INT2NUM(0);
192
-
193
324
  short flags = poll_flags_from_events(NUM2INT(events));
194
325
 
195
- // fprintf(stderr, "poll_add(%p, %d, %d, %p)\n", sqe, descriptor, flags, (void*)fiber);
326
+ if (DEBUG) fprintf(stderr, "Event_Backend_URing_io_wait:io_uring_prep_poll_add(descriptor=%d, flags=%d, fiber=%p)\n", descriptor, flags, (void*)fiber);
196
327
 
197
328
  io_uring_prep_poll_add(sqe, descriptor, flags);
198
329
  io_uring_sqe_set_data(sqe, (void*)fiber);
199
- io_uring_submit(&data->ring);
330
+ io_uring_submit_pending(data);
200
331
 
201
332
  struct io_wait_arguments io_wait_arguments = {
202
333
  .data = data,
@@ -207,87 +338,131 @@ VALUE Event_Backend_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE event
207
338
  return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
208
339
  }
209
340
 
210
- inline static
211
- void resize_to_capacity(VALUE string, size_t offset, size_t length) {
212
- size_t current_length = RSTRING_LEN(string);
213
- long difference = (long)(offset + length) - (long)current_length;
214
-
215
- difference += 1;
216
-
217
- if (difference > 0) {
218
- rb_str_modify_expand(string, difference);
219
- } else {
220
- rb_str_modify(string);
221
- }
222
- }
341
+ #ifdef HAVE_RUBY_IO_BUFFER_H
342
+
343
+ static int io_read(struct Event_Backend_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
344
+ struct io_uring_sqe *sqe = io_get_sqe(data);
345
+
346
+ if (DEBUG) fprintf(stderr, "io_read:io_uring_prep_read(fiber=%p)\n", (void*)fiber);
223
347
 
224
- inline static
225
- void resize_to_fit(VALUE string, size_t offset, size_t length) {
226
- size_t current_length = RSTRING_LEN(string);
348
+ io_uring_prep_read(sqe, descriptor, buffer, length, 0);
349
+ io_uring_sqe_set_data(sqe, (void*)fiber);
350
+ io_uring_submit_pending(data);
227
351
 
228
- if (current_length < (offset + length)) {
229
- rb_str_set_len(string, offset + length);
230
- }
352
+ VALUE result = Event_Backend_fiber_transfer(data->backend.loop);
353
+ if (DEBUG) fprintf(stderr, "io_read:Event_Backend_fiber_transfer -> %d\n", RB_NUM2INT(result));
354
+
355
+ return RB_NUM2INT(result);
231
356
  }
232
357
 
233
- VALUE Event_Backend_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE offset, VALUE length) {
358
+ VALUE Event_Backend_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
234
359
  struct Event_Backend_URing *data = NULL;
235
360
  TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
236
361
 
237
- resize_to_capacity(buffer, NUM2SIZET(offset), NUM2SIZET(length));
238
-
239
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
240
- struct io_uring_sqe *sqe = io_get_sqe(data);
241
-
242
- struct iovec iovecs[1];
243
- iovecs[0].iov_base = RSTRING_PTR(buffer) + NUM2SIZET(offset);
244
- iovecs[0].iov_len = NUM2SIZET(length);
245
-
246
- io_uring_prep_readv(sqe, descriptor, iovecs, 1, 0);
247
- io_uring_sqe_set_data(sqe, (void*)fiber);
248
- io_uring_submit(&data->ring);
362
+ int descriptor = Event_Backend_io_descriptor(io);
249
363
 
250
- // fprintf(stderr, "prep_readv(%p, %d, %ld)\n", sqe, descriptor, iovecs[0].iov_len);
364
+ void *base;
365
+ size_t size;
366
+ rb_io_buffer_get_mutable(buffer, &base, &size);
251
367
 
252
- int result = NUM2INT(rb_funcall(data->loop, id_transfer, 0));
368
+ size_t offset = 0;
369
+ size_t length = NUM2SIZET(_length);
253
370
 
254
- if (result < 0) {
255
- rb_syserr_fail(-result, strerror(-result));
371
+ while (length > 0) {
372
+ size_t maximum_size = size - offset;
373
+ int result = io_read(data, fiber, descriptor, (char*)base+offset, maximum_size);
374
+
375
+ if (result == 0) {
376
+ break;
377
+ } else if (result > 0) {
378
+ offset += result;
379
+ if ((size_t)result >= length) break;
380
+ length -= result;
381
+ } else if (-result == EAGAIN || -result == EWOULDBLOCK) {
382
+ Event_Backend_URing_io_wait(self, fiber, io, RB_INT2NUM(READABLE));
383
+ } else {
384
+ rb_syserr_fail(-result, strerror(-result));
385
+ }
256
386
  }
257
387
 
258
- resize_to_fit(buffer, NUM2SIZET(offset), (size_t)result);
388
+ return SIZET2NUM(offset);
389
+ }
390
+
391
+ static
392
+ int io_write(struct Event_Backend_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
393
+ struct io_uring_sqe *sqe = io_get_sqe(data);
394
+
395
+ if (DEBUG) fprintf(stderr, "io_write:io_uring_prep_write(fiber=%p)\n", (void*)fiber);
396
+
397
+ io_uring_prep_write(sqe, descriptor, buffer, length, 0);
398
+ io_uring_sqe_set_data(sqe, (void*)fiber);
399
+ io_uring_submit_pending(data);
259
400
 
260
- return INT2NUM(result);
401
+ int result = RB_NUM2INT(Event_Backend_fiber_transfer(data->backend.loop));
402
+ if (DEBUG) fprintf(stderr, "io_write:Event_Backend_fiber_transfer -> %d\n", result);
403
+
404
+ return result;
261
405
  }
262
406
 
263
- VALUE Event_Backend_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE offset, VALUE length) {
407
+ VALUE Event_Backend_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
264
408
  struct Event_Backend_URing *data = NULL;
265
409
  TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
266
410
 
267
- if ((size_t)RSTRING_LEN(buffer) < NUM2SIZET(offset) + NUM2SIZET(length)) {
268
- rb_raise(rb_eRuntimeError, "invalid offset/length exceeds bounds of buffer");
269
- }
411
+ int descriptor = Event_Backend_io_descriptor(io);
270
412
 
271
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
272
- struct io_uring_sqe *sqe = io_get_sqe(data);
413
+ const void *base;
414
+ size_t size;
415
+ rb_io_buffer_get_immutable(buffer, &base, &size);
273
416
 
274
- struct iovec iovecs[1];
275
- iovecs[0].iov_base = RSTRING_PTR(buffer) + NUM2SIZET(offset);
276
- iovecs[0].iov_len = NUM2SIZET(length);
417
+ size_t offset = 0;
418
+ size_t length = NUM2SIZET(_length);
277
419
 
278
- io_uring_prep_writev(sqe, descriptor, iovecs, 1, 0);
279
- io_uring_sqe_set_data(sqe, (void*)fiber);
280
- io_uring_submit(&data->ring);
420
+ if (length > size) {
421
+ rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
422
+ }
281
423
 
282
- // fprintf(stderr, "prep_writev(%p, %d, %ld)\n", sqe, descriptor, iovecs[0].iov_len);
424
+ while (length > 0) {
425
+ int result = io_write(data, fiber, descriptor, (char*)base+offset, length);
426
+
427
+ if (result >= 0) {
428
+ offset += result;
429
+ if ((size_t)result >= length) break;
430
+ length -= result;
431
+ } else if (-result == EAGAIN || -result == EWOULDBLOCK) {
432
+ Event_Backend_URing_io_wait(self, fiber, io, RB_INT2NUM(WRITABLE));
433
+ } else {
434
+ rb_syserr_fail(-result, strerror(-result));
435
+ }
436
+ }
283
437
 
284
- int result = NUM2INT(rb_funcall(data->loop, id_transfer, 0));
438
+ return SIZET2NUM(offset);
439
+ }
440
+
441
+ #endif
442
+
443
+ static const int ASYNC_CLOSE = 2;
444
+
445
+ VALUE Event_Backend_URing_io_close(VALUE self, VALUE io) {
446
+ struct Event_Backend_URing *data = NULL;
447
+ TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
285
448
 
286
- if (result < 0) {
287
- rb_syserr_fail(-result, strerror(-result));
449
+ int descriptor = Event_Backend_io_descriptor(io);
450
+
451
+ if (ASYNC_CLOSE) {
452
+ struct io_uring_sqe *sqe = io_get_sqe(data);
453
+
454
+ io_uring_prep_close(sqe, descriptor);
455
+ io_uring_sqe_set_data(sqe, NULL);
456
+ if (ASYNC_CLOSE == 1)
457
+ io_uring_submit_now(data);
458
+ else if (ASYNC_CLOSE == 2)
459
+ io_uring_submit_pending(data);
460
+ } else {
461
+ close(descriptor);
288
462
  }
289
-
290
- return INT2NUM(result);
463
+
464
+ // We don't wait for the result of close since it has no use in pratice:
465
+ return Qtrue;
291
466
  }
292
467
 
293
468
  static
@@ -324,8 +499,7 @@ int timeout_nonblocking(struct __kernel_timespec *timespec) {
324
499
  struct select_arguments {
325
500
  struct Event_Backend_URing *data;
326
501
 
327
- int count;
328
- struct io_uring_cqe **cqes;
502
+ int result;
329
503
 
330
504
  struct __kernel_timespec storage;
331
505
  struct __kernel_timespec *timeout;
@@ -334,13 +508,11 @@ struct select_arguments {
334
508
  static
335
509
  void * select_internal(void *_arguments) {
336
510
  struct select_arguments * arguments = (struct select_arguments *)_arguments;
337
-
338
- arguments->count = io_uring_wait_cqes(&arguments->data->ring, arguments->cqes, 1, arguments->timeout, NULL);
339
-
340
- // If waiting resulted in a timeout, there are 0 events.
341
- if (arguments->count == -ETIME) {
342
- arguments->count = 0;
343
- }
511
+
512
+ io_uring_submit_flush(arguments->data);
513
+
514
+ struct io_uring_cqe *cqe = NULL;
515
+ arguments->result = io_uring_wait_cqe_timeout(&arguments->data->ring, &cqe, arguments->timeout);
344
516
 
345
517
  return NULL;
346
518
  }
@@ -349,73 +521,107 @@ static
349
521
  int select_internal_without_gvl(struct select_arguments *arguments) {
350
522
  rb_thread_call_without_gvl(select_internal, (void *)arguments, RUBY_UBF_IO, 0);
351
523
 
352
- if (arguments->count < 0) {
353
- rb_syserr_fail(-arguments->count, "select_internal_without_gvl:io_uring_wait_cqes");
524
+ if (arguments->result == -ETIME) {
525
+ arguments->result = 0;
526
+ } else if (arguments->result < 0) {
527
+ rb_syserr_fail(-arguments->result, "select_internal_without_gvl:io_uring_wait_cqes");
528
+ } else {
529
+ // At least 1 event is waiting:
530
+ arguments->result = 1;
354
531
  }
355
532
 
356
- return arguments->count;
533
+ return arguments->result;
534
+ }
535
+
536
+ static inline
537
+ unsigned select_process_completions(struct io_uring *ring) {
538
+ unsigned completed = 0;
539
+ unsigned head;
540
+ struct io_uring_cqe *cqe;
541
+
542
+ io_uring_for_each_cqe(ring, head, cqe) {
543
+ ++completed;
544
+
545
+ // If the operation was cancelled, or the operation has no user data (fiber):
546
+ if (cqe->res == -ECANCELED || cqe->user_data == 0 || cqe->user_data == LIBURING_UDATA_TIMEOUT) {
547
+ io_uring_cq_advance(ring, 1);
548
+ continue;
549
+ }
550
+
551
+ VALUE fiber = (VALUE)cqe->user_data;
552
+ VALUE result = RB_INT2NUM(cqe->res);
553
+
554
+ if (DEBUG) fprintf(stderr, "cqe res=%d user_data=%p\n", cqe->res, (void*)cqe->user_data);
555
+
556
+ io_uring_cq_advance(ring, 1);
557
+
558
+ Event_Backend_fiber_transfer_result(fiber, result);
559
+ }
560
+
561
+ // io_uring_cq_advance(ring, completed);
562
+
563
+ if (DEBUG) fprintf(stderr, "select_process_completions(completed=%d)\n", completed);
564
+
565
+ return completed;
357
566
  }
358
567
 
359
568
  VALUE Event_Backend_URing_select(VALUE self, VALUE duration) {
360
569
  struct Event_Backend_URing *data = NULL;
361
570
  TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
362
571
 
363
- struct io_uring_cqe *cqes[URING_MAX_EVENTS];
364
-
365
- // This is a non-blocking operation:
366
- int result = io_uring_peek_batch_cqe(&data->ring, cqes, URING_MAX_EVENTS);
572
+ Event_Backend_ready_pop(&data->backend);
367
573
 
368
- if (result < 0) {
369
- rb_syserr_fail(-result, strerror(-result));
370
- } else if (result == 0) {
574
+ int result = 0;
575
+
576
+ // There can only be events waiting if we have been submitting them early:
577
+ if (EARLY_SUBMIT) {
578
+ result = select_process_completions(&data->ring);
579
+ }
580
+
581
+ // If we aren't submitting events early, we need to submit them and/or wait for them:
582
+ if (result == 0) {
371
583
  // We might need to wait for events:
372
584
  struct select_arguments arguments = {
373
585
  .data = data,
374
- .cqes = cqes,
375
586
  .timeout = NULL,
376
587
  };
377
588
 
378
589
  arguments.timeout = make_timeout(duration, &arguments.storage);
379
590
 
380
- if (!timeout_nonblocking(arguments.timeout)) {
591
+ if (!data->backend.ready && !timeout_nonblocking(arguments.timeout)) {
592
+ // This is a blocking operation, we wait for events:
381
593
  result = select_internal_without_gvl(&arguments);
594
+ } else {
595
+ // The timeout specified required "nonblocking" behaviour so we just flush the SQ if required:
596
+ io_uring_submit_flush(data);
382
597
  }
598
+
599
+ // After waiting/flushing the SQ, check if there are any completions:
600
+ result = select_process_completions(&data->ring);
383
601
  }
384
602
 
385
- // fprintf(stderr, "cqes count=%d\n", result);
386
-
387
- for (int i = 0; i < result; i += 1) {
388
- // If the operation was cancelled, or the operation has no user data (fiber):
389
- if (cqes[i]->res == -ECANCELED || cqes[i]->user_data == 0) {
390
- continue;
391
- }
392
-
393
- VALUE fiber = (VALUE)io_uring_cqe_get_data(cqes[i]);
394
- VALUE result = INT2NUM(cqes[i]->res);
395
-
396
- // fprintf(stderr, "cqes[i] res=%d user_data=%p\n", cqes[i]->res, (void*)cqes[i]->user_data);
397
-
398
- io_uring_cqe_seen(&data->ring, cqes[i]);
399
-
400
- rb_funcall(fiber, id_transfer, 1, result);
401
- }
402
-
403
- return INT2NUM(result);
603
+ return RB_INT2NUM(result);
404
604
  }
405
605
 
406
606
  void Init_Event_Backend_URing(VALUE Event_Backend) {
407
- id_fileno = rb_intern("fileno");
408
- id_transfer = rb_intern("transfer");
409
-
410
607
  Event_Backend_URing = rb_define_class_under(Event_Backend, "URing", rb_cObject);
411
608
 
412
609
  rb_define_alloc_func(Event_Backend_URing, Event_Backend_URing_allocate);
413
610
  rb_define_method(Event_Backend_URing, "initialize", Event_Backend_URing_initialize, 1);
611
+ rb_define_method(Event_Backend_URing, "transfer", Event_Backend_URing_transfer, 1);
612
+ rb_define_method(Event_Backend_URing, "defer", Event_Backend_URing_defer, 0);
613
+ rb_define_method(Event_Backend_URing, "ready?", Event_Backend_URing_ready_p, 0);
614
+ rb_define_method(Event_Backend_URing, "select", Event_Backend_URing_select, 1);
414
615
  rb_define_method(Event_Backend_URing, "close", Event_Backend_URing_close, 0);
415
616
 
416
617
  rb_define_method(Event_Backend_URing, "io_wait", Event_Backend_URing_io_wait, 3);
417
- rb_define_method(Event_Backend_URing, "select", Event_Backend_URing_select, 1);
418
618
 
419
- rb_define_method(Event_Backend_URing, "io_read", Event_Backend_URing_io_read, 5);
420
- rb_define_method(Event_Backend_URing, "io_write", Event_Backend_URing_io_write, 5);
619
+ #ifdef HAVE_RUBY_IO_BUFFER_H
620
+ rb_define_method(Event_Backend_URing, "io_read", Event_Backend_URing_io_read, 4);
621
+ rb_define_method(Event_Backend_URing, "io_write", Event_Backend_URing_io_write, 4);
622
+ #endif
623
+
624
+ rb_define_method(Event_Backend_URing, "io_close", Event_Backend_URing_io_close, 1);
625
+
626
+ rb_define_method(Event_Backend_URing, "process_wait", Event_Backend_URing_process_wait, 3);
421
627
  }