event 0.6.0 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -27,19 +27,18 @@
27
27
  #include <errno.h>
28
28
 
29
29
  static VALUE Event_Backend_KQueue = Qnil;
30
- static ID id_fileno;
31
30
 
32
31
  enum {KQUEUE_MAX_EVENTS = 64};
33
32
 
34
33
  struct Event_Backend_KQueue {
35
- VALUE loop;
34
+ struct Event_Backend backend;
36
35
  int descriptor;
37
36
  };
38
37
 
39
38
  void Event_Backend_KQueue_Type_mark(void *_data)
40
39
  {
41
40
  struct Event_Backend_KQueue *data = _data;
42
- rb_gc_mark(data->loop);
41
+ Event_Backend_mark(&data->backend);
43
42
  }
44
43
 
45
44
  static
@@ -79,7 +78,7 @@ VALUE Event_Backend_KQueue_allocate(VALUE self) {
79
78
  struct Event_Backend_KQueue *data = NULL;
80
79
  VALUE instance = TypedData_Make_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
81
80
 
82
- data->loop = Qnil;
81
+ Event_Backend_initialize(&data->backend, Qnil);
83
82
  data->descriptor = -1;
84
83
 
85
84
  return instance;
@@ -89,7 +88,7 @@ VALUE Event_Backend_KQueue_initialize(VALUE self, VALUE loop) {
89
88
  struct Event_Backend_KQueue *data = NULL;
90
89
  TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
91
90
 
92
- data->loop = loop;
91
+ Event_Backend_initialize(&data->backend, loop);
93
92
  int result = kqueue();
94
93
 
95
94
  if (result == -1) {
@@ -113,6 +112,33 @@ VALUE Event_Backend_KQueue_close(VALUE self) {
113
112
  return Qnil;
114
113
  }
115
114
 
115
+ VALUE Event_Backend_KQueue_transfer(VALUE self, VALUE fiber)
116
+ {
117
+ struct Event_Backend_KQueue *data = NULL;
118
+ TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
119
+
120
+ Event_Backend_wait_and_transfer(&data->backend, fiber);
121
+
122
+ return Qnil;
123
+ }
124
+
125
+ VALUE Event_Backend_KQueue_defer(VALUE self)
126
+ {
127
+ struct Event_Backend_KQueue *data = NULL;
128
+ TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
129
+
130
+ Event_Backend_defer(&data->backend);
131
+
132
+ return Qnil;
133
+ }
134
+
135
+ VALUE Event_Backend_KQueue_ready_p(VALUE self) {
136
+ struct Event_Backend_KQueue *data = NULL;
137
+ TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
138
+
139
+ return data->backend.ready ? Qtrue : Qfalse;
140
+ }
141
+
116
142
  struct process_wait_arguments {
117
143
  struct Event_Backend_KQueue *data;
118
144
  pid_t pid;
@@ -160,7 +186,7 @@ static
160
186
  VALUE process_wait_transfer(VALUE _arguments) {
161
187
  struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
162
188
 
163
- Event_Backend_transfer(arguments->data->loop);
189
+ Event_Backend_fiber_transfer(arguments->data->backend.loop);
164
190
 
165
191
  return Event_Backend_process_status_wait(arguments->pid);
166
192
  }
@@ -281,7 +307,7 @@ static
281
307
  VALUE io_wait_transfer(VALUE _arguments) {
282
308
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
283
309
 
284
- VALUE result = Event_Backend_transfer(arguments->data->loop);
310
+ VALUE result = Event_Backend_fiber_transfer(arguments->data->backend.loop);
285
311
 
286
312
  return INT2NUM(events_from_kqueue_filter(RB_NUM2INT(result)));
287
313
  }
@@ -290,7 +316,7 @@ VALUE Event_Backend_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE even
290
316
  struct Event_Backend_KQueue *data = NULL;
291
317
  TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
292
318
 
293
- int descriptor = RB_NUM2INT(rb_funcall(io, id_fileno, 0));
319
+ int descriptor = Event_Backend_io_descriptor(io);
294
320
 
295
321
  struct io_wait_arguments io_wait_arguments = {
296
322
  .events = io_add_filters(data->descriptor, descriptor, RB_NUM2INT(events), fiber),
@@ -301,6 +327,8 @@ VALUE Event_Backend_KQueue_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE even
301
327
  return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
302
328
  }
303
329
 
330
+ #ifdef HAVE_RUBY_IO_BUFFER_H
331
+
304
332
  struct io_read_arguments {
305
333
  VALUE self;
306
334
  VALUE fiber;
@@ -311,7 +339,6 @@ struct io_read_arguments {
311
339
  int descriptor;
312
340
 
313
341
  VALUE buffer;
314
- size_t offset;
315
342
  size_t length;
316
343
  };
317
344
 
@@ -319,18 +346,22 @@ static
319
346
  VALUE io_read_loop(VALUE _arguments) {
320
347
  struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
321
348
 
322
- size_t offset = arguments->offset;
349
+ void *base;
350
+ size_t size;
351
+ rb_io_buffer_get_mutable(arguments->buffer, &base, &size);
352
+
353
+ size_t offset = 0;
323
354
  size_t length = arguments->length;
324
- size_t total = 0;
325
355
 
326
356
  while (length > 0) {
327
- char *buffer = Event_Backend_resize_to_capacity(arguments->buffer, offset, length);
328
- ssize_t result = read(arguments->descriptor, buffer+offset, length);
357
+ size_t maximum_size = size - offset;
358
+ ssize_t result = read(arguments->descriptor, (char*)base+offset, maximum_size);
329
359
 
330
- if (result >= 0) {
360
+ if (result == 0) {
361
+ break;
362
+ } else if (result > 0) {
331
363
  offset += result;
332
364
  length -= result;
333
- total += result;
334
365
  } else if (errno == EAGAIN || errno == EWOULDBLOCK) {
335
366
  Event_Backend_KQueue_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(READABLE));
336
367
  } else {
@@ -338,9 +369,7 @@ VALUE io_read_loop(VALUE _arguments) {
338
369
  }
339
370
  }
340
371
 
341
- Event_Backend_resize_to_fit(arguments->buffer, arguments->offset, arguments->length);
342
-
343
- return SIZET2NUM(total);
372
+ return SIZET2NUM(offset);
344
373
  }
345
374
 
346
375
  static
@@ -352,13 +381,12 @@ VALUE io_read_ensure(VALUE _arguments) {
352
381
  return Qnil;
353
382
  }
354
383
 
355
- VALUE Event_Backend_KQueue_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _offset, VALUE _length) {
384
+ VALUE Event_Backend_KQueue_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
356
385
  struct Event_Backend_KQueue *data = NULL;
357
386
  TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
358
387
 
359
- int descriptor = RB_NUM2INT(rb_funcall(io, id_fileno, 0));
388
+ int descriptor = Event_Backend_io_descriptor(io);
360
389
 
361
- size_t offset = NUM2SIZET(_offset);
362
390
  size_t length = NUM2SIZET(_length);
363
391
 
364
392
  struct io_read_arguments io_read_arguments = {
@@ -369,7 +397,6 @@ VALUE Event_Backend_KQueue_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buff
369
397
  .flags = Event_Backend_nonblock_set(descriptor),
370
398
  .descriptor = descriptor,
371
399
  .buffer = buffer,
372
- .offset = offset,
373
400
  .length = length,
374
401
  };
375
402
 
@@ -386,7 +413,6 @@ struct io_write_arguments {
386
413
  int descriptor;
387
414
 
388
415
  VALUE buffer;
389
- size_t offset;
390
416
  size_t length;
391
417
  };
392
418
 
@@ -394,18 +420,23 @@ static
394
420
  VALUE io_write_loop(VALUE _arguments) {
395
421
  struct io_write_arguments *arguments = (struct io_write_arguments *)_arguments;
396
422
 
397
- size_t offset = arguments->offset;
423
+ const void *base;
424
+ size_t size;
425
+ rb_io_buffer_get_immutable(arguments->buffer, &base, &size);
426
+
427
+ size_t offset = 0;
398
428
  size_t length = arguments->length;
399
- size_t total = 0;
429
+
430
+ if (length > size) {
431
+ rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
432
+ }
400
433
 
401
434
  while (length > 0) {
402
- char *buffer = Event_Backend_verify_size(arguments->buffer, offset, length);
403
- ssize_t result = write(arguments->descriptor, buffer+offset, length);
435
+ ssize_t result = write(arguments->descriptor, (char*)base+offset, length);
404
436
 
405
437
  if (result >= 0) {
406
- length -= result;
407
438
  offset += result;
408
- total += result;
439
+ length -= result;
409
440
  } else if (errno == EAGAIN || errno == EWOULDBLOCK) {
410
441
  Event_Backend_KQueue_io_wait(arguments->self, arguments->fiber, arguments->io, RB_INT2NUM(WRITABLE));
411
442
  } else {
@@ -413,7 +444,7 @@ VALUE io_write_loop(VALUE _arguments) {
413
444
  }
414
445
  }
415
446
 
416
- return SIZET2NUM(total);
447
+ return SIZET2NUM(offset);
417
448
  };
418
449
 
419
450
  static
@@ -425,13 +456,12 @@ VALUE io_write_ensure(VALUE _arguments) {
425
456
  return Qnil;
426
457
  };
427
458
 
428
- VALUE Event_Backend_KQueue_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _offset, VALUE _length) {
459
+ VALUE Event_Backend_KQueue_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
429
460
  struct Event_Backend_KQueue *data = NULL;
430
461
  TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
431
462
 
432
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
463
+ int descriptor = Event_Backend_io_descriptor(io);
433
464
 
434
- size_t offset = NUM2SIZET(_offset);
435
465
  size_t length = NUM2SIZET(_length);
436
466
 
437
467
  struct io_write_arguments io_write_arguments = {
@@ -442,13 +472,14 @@ VALUE Event_Backend_KQueue_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buf
442
472
  .flags = Event_Backend_nonblock_set(descriptor),
443
473
  .descriptor = descriptor,
444
474
  .buffer = buffer,
445
- .offset = offset,
446
475
  .length = length,
447
476
  };
448
477
 
449
478
  return rb_ensure(io_write_loop, (VALUE)&io_write_arguments, io_write_ensure, (VALUE)&io_write_arguments);
450
479
  }
451
480
 
481
+ #endif
482
+
452
483
  static
453
484
  struct timespec * make_timeout(VALUE duration, struct timespec * storage) {
454
485
  if (duration == Qnil) {
@@ -521,6 +552,8 @@ VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
521
552
  struct Event_Backend_KQueue *data = NULL;
522
553
  TypedData_Get_Struct(self, struct Event_Backend_KQueue, &Event_Backend_KQueue_Type, data);
523
554
 
555
+ Event_Backend_ready_pop(&data->backend);
556
+
524
557
  struct select_arguments arguments = {
525
558
  .data = data,
526
559
  .count = KQUEUE_MAX_EVENTS,
@@ -544,7 +577,7 @@ VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
544
577
  if (arguments.count == 0) {
545
578
  arguments.timeout = make_timeout(duration, &arguments.storage);
546
579
 
547
- if (!timeout_nonblocking(arguments.timeout)) {
580
+ if (!data->backend.ready && !timeout_nonblocking(arguments.timeout)) {
548
581
  arguments.count = KQUEUE_MAX_EVENTS;
549
582
 
550
583
  select_internal_without_gvl(&arguments);
@@ -555,25 +588,29 @@ VALUE Event_Backend_KQueue_select(VALUE self, VALUE duration) {
555
588
  VALUE fiber = (VALUE)arguments.events[i].udata;
556
589
  VALUE result = INT2NUM(arguments.events[i].filter);
557
590
 
558
- Event_Backend_transfer_result(fiber, result);
591
+ Event_Backend_fiber_transfer_result(fiber, result);
559
592
  }
560
593
 
561
594
  return INT2NUM(arguments.count);
562
595
  }
563
596
 
564
597
  void Init_Event_Backend_KQueue(VALUE Event_Backend) {
565
- id_fileno = rb_intern("fileno");
566
-
567
598
  Event_Backend_KQueue = rb_define_class_under(Event_Backend, "KQueue", rb_cObject);
568
599
 
569
600
  rb_define_alloc_func(Event_Backend_KQueue, Event_Backend_KQueue_allocate);
570
601
  rb_define_method(Event_Backend_KQueue, "initialize", Event_Backend_KQueue_initialize, 1);
602
+ rb_define_method(Event_Backend_KQueue, "transfer", Event_Backend_KQueue_transfer, 1);
603
+ rb_define_method(Event_Backend_KQueue, "defer", Event_Backend_KQueue_defer, 0);
604
+ rb_define_method(Event_Backend_KQueue, "ready?", Event_Backend_KQueue_ready_p, 0);
571
605
  rb_define_method(Event_Backend_KQueue, "select", Event_Backend_KQueue_select, 1);
572
606
  rb_define_method(Event_Backend_KQueue, "close", Event_Backend_KQueue_close, 0);
573
607
 
574
608
  rb_define_method(Event_Backend_KQueue, "io_wait", Event_Backend_KQueue_io_wait, 3);
575
- rb_define_method(Event_Backend_KQueue, "io_read", Event_Backend_KQueue_io_read, 5);
576
- rb_define_method(Event_Backend_KQueue, "io_write", Event_Backend_KQueue_io_write, 5);
609
+
610
+ #ifdef HAVE_RUBY_IO_BUFFER_H
611
+ rb_define_method(Event_Backend_KQueue, "io_read", Event_Backend_KQueue_io_read, 4);
612
+ rb_define_method(Event_Backend_KQueue, "io_write", Event_Backend_KQueue_io_write, 4);
613
+ #endif
577
614
 
578
615
  rb_define_method(Event_Backend_KQueue, "process_wait", Event_Backend_KQueue_process_wait, 3);
579
616
  }
@@ -27,21 +27,25 @@
27
27
 
28
28
  #include "pidfd.c"
29
29
 
30
+ static const int DEBUG = 0;
31
+
32
+ // This option controls whether to all `io_uring_submit()` after every operation:
33
+ static const int EARLY_SUBMIT = 1;
34
+
30
35
  static VALUE Event_Backend_URing = Qnil;
31
- static ID id_fileno;
32
36
 
33
- enum {URING_ENTRIES = 128};
34
- enum {URING_MAX_EVENTS = 128};
37
+ enum {URING_ENTRIES = 64};
35
38
 
36
39
  struct Event_Backend_URing {
37
- VALUE loop;
40
+ struct Event_Backend backend;
38
41
  struct io_uring ring;
42
+ size_t pending;
39
43
  };
40
44
 
41
45
  void Event_Backend_URing_Type_mark(void *_data)
42
46
  {
43
47
  struct Event_Backend_URing *data = _data;
44
- rb_gc_mark(data->loop);
48
+ Event_Backend_mark(&data->backend);
45
49
  }
46
50
 
47
51
  static
@@ -81,9 +85,11 @@ VALUE Event_Backend_URing_allocate(VALUE self) {
81
85
  struct Event_Backend_URing *data = NULL;
82
86
  VALUE instance = TypedData_Make_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
83
87
 
84
- data->loop = Qnil;
88
+ Event_Backend_initialize(&data->backend, Qnil);
85
89
  data->ring.ring_fd = -1;
86
90
 
91
+ data->pending = 0;
92
+
87
93
  return instance;
88
94
  }
89
95
 
@@ -91,8 +97,7 @@ VALUE Event_Backend_URing_initialize(VALUE self, VALUE loop) {
91
97
  struct Event_Backend_URing *data = NULL;
92
98
  TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
93
99
 
94
- data->loop = loop;
95
-
100
+ Event_Backend_initialize(&data->backend, loop);
96
101
  int result = io_uring_queue_init(URING_ENTRIES, &data->ring, 0);
97
102
 
98
103
  if (result < 0) {
@@ -113,16 +118,91 @@ VALUE Event_Backend_URing_close(VALUE self) {
113
118
  return Qnil;
114
119
  }
115
120
 
121
+ VALUE Event_Backend_URing_transfer(VALUE self, VALUE fiber)
122
+ {
123
+ struct Event_Backend_URing *data = NULL;
124
+ TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
125
+
126
+ Event_Backend_wait_and_transfer(&data->backend, fiber);
127
+
128
+ return Qnil;
129
+ }
130
+
131
+ VALUE Event_Backend_URing_defer(VALUE self)
132
+ {
133
+ struct Event_Backend_URing *data = NULL;
134
+ TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
135
+
136
+ Event_Backend_defer(&data->backend);
137
+
138
+ return Qnil;
139
+ }
140
+
141
+ VALUE Event_Backend_URing_ready_p(VALUE self) {
142
+ struct Event_Backend_URing *data = NULL;
143
+ TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
144
+
145
+ return data->backend.ready ? Qtrue : Qfalse;
146
+ }
147
+
148
+ static
149
+ int io_uring_submit_flush(struct Event_Backend_URing *data) {
150
+ if (data->pending) {
151
+ if (DEBUG) fprintf(stderr, "io_uring_submit_flush(pending=%ld)\n", data->pending);
152
+
153
+ // Try to submit:
154
+ int result = io_uring_submit(&data->ring);
155
+
156
+ if (result >= 0) {
157
+ // If it was submitted, reset pending count:
158
+ data->pending = 0;
159
+ } else if (result != -EBUSY && result != -EAGAIN) {
160
+ rb_syserr_fail(-result, "io_uring_submit_flush");
161
+ }
162
+
163
+ return result;
164
+ }
165
+
166
+ return 0;
167
+ }
168
+
169
+ static
170
+ int io_uring_submit_now(struct Event_Backend_URing *data) {
171
+ while (true) {
172
+ int result = io_uring_submit(&data->ring);
173
+
174
+ if (result >= 0) {
175
+ data->pending = 0;
176
+ return result;
177
+ }
178
+
179
+ if (result == -EBUSY || result == -EAGAIN) {
180
+ Event_Backend_defer(&data->backend);
181
+ } else {
182
+ rb_syserr_fail(-result, "io_uring_submit_now");
183
+ }
184
+ }
185
+ }
186
+
187
+ static
188
+ void io_uring_submit_pending(struct Event_Backend_URing *data) {
189
+ if (EARLY_SUBMIT) {
190
+ io_uring_submit_now(data);
191
+ } else {
192
+ data->pending += 1;
193
+ }
194
+ }
195
+
116
196
  struct io_uring_sqe * io_get_sqe(struct Event_Backend_URing *data) {
117
197
  struct io_uring_sqe *sqe = io_uring_get_sqe(&data->ring);
118
198
 
119
199
  while (sqe == NULL) {
120
- io_uring_submit(&data->ring);
200
+ // The submit queue is full, we need to drain it:
201
+ io_uring_submit_now(data);
202
+
121
203
  sqe = io_uring_get_sqe(&data->ring);
122
204
  }
123
205
 
124
- // fprintf(stderr, "io_get_sqe -> %p\n", sqe);
125
-
126
206
  return sqe;
127
207
  }
128
208
 
@@ -137,7 +217,7 @@ static
137
217
  VALUE process_wait_transfer(VALUE _arguments) {
138
218
  struct process_wait_arguments *arguments = (struct process_wait_arguments *)_arguments;
139
219
 
140
- Event_Backend_transfer(arguments->data->loop);
220
+ Event_Backend_fiber_transfer(arguments->data->backend.loop);
141
221
 
142
222
  return Event_Backend_process_status_wait(arguments->pid);
143
223
  }
@@ -165,11 +245,12 @@ VALUE Event_Backend_URing_process_wait(VALUE self, VALUE fiber, VALUE pid, VALUE
165
245
  rb_update_max_fd(process_wait_arguments.descriptor);
166
246
 
167
247
  struct io_uring_sqe *sqe = io_get_sqe(data);
168
- assert(sqe);
169
-
248
+
249
+ if (DEBUG) fprintf(stderr, "Event_Backend_URing_process_wait:io_uring_prep_poll_add(%p)\n", (void*)fiber);
170
250
  io_uring_prep_poll_add(sqe, process_wait_arguments.descriptor, POLLIN|POLLHUP|POLLERR);
171
251
  io_uring_sqe_set_data(sqe, (void*)fiber);
172
-
252
+ io_uring_submit_pending(data);
253
+
173
254
  return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
174
255
  }
175
256
 
@@ -210,12 +291,10 @@ VALUE io_wait_rescue(VALUE _arguments, VALUE exception) {
210
291
  struct Event_Backend_URing *data = arguments->data;
211
292
 
212
293
  struct io_uring_sqe *sqe = io_get_sqe(data);
213
- assert(sqe);
214
294
 
215
- // fprintf(stderr, "poll_remove(%p, %p)\n", sqe, (void*)arguments->fiber);
295
+ if (DEBUG) fprintf(stderr, "io_wait_rescue:io_uring_prep_poll_remove(%p)\n", (void*)arguments->fiber);
216
296
 
217
297
  io_uring_prep_poll_remove(sqe, (void*)arguments->fiber);
218
- io_uring_submit(&data->ring);
219
298
 
220
299
  rb_exc_raise(exception);
221
300
  };
@@ -225,8 +304,9 @@ VALUE io_wait_transfer(VALUE _arguments) {
225
304
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
226
305
  struct Event_Backend_URing *data = arguments->data;
227
306
 
228
- VALUE result = Event_Backend_transfer(data->loop);
229
-
307
+ VALUE result = Event_Backend_fiber_transfer(data->backend.loop);
308
+ if (DEBUG) fprintf(stderr, "io_wait:Event_Backend_fiber_transfer -> %d\n", RB_NUM2INT(result));
309
+
230
310
  // We explicitly filter the resulting events based on the requested events.
231
311
  // In some cases, poll will report events we didn't ask for.
232
312
  short flags = arguments->flags & NUM2INT(result);
@@ -238,18 +318,16 @@ VALUE Event_Backend_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE event
238
318
  struct Event_Backend_URing *data = NULL;
239
319
  TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
240
320
 
241
- int descriptor = NUM2INT(rb_funcall(io, id_fileno, 0));
321
+ int descriptor = Event_Backend_io_descriptor(io);
242
322
  struct io_uring_sqe *sqe = io_get_sqe(data);
243
- assert(sqe);
244
323
 
245
324
  short flags = poll_flags_from_events(NUM2INT(events));
246
325
 
247
- // fprintf(stderr, "poll_add(%p, %d, %d, %p)\n", sqe, descriptor, flags, (void*)fiber);
326
+ if (DEBUG) fprintf(stderr, "Event_Backend_URing_io_wait:io_uring_prep_poll_add(descriptor=%d, flags=%d, fiber=%p)\n", descriptor, flags, (void*)fiber);
248
327
 
249
328
  io_uring_prep_poll_add(sqe, descriptor, flags);
250
329
  io_uring_sqe_set_data(sqe, (void*)fiber);
251
- // fprintf(stderr, "io_uring_submit\n");
252
- // io_uring_submit(&data->ring);
330
+ io_uring_submit_pending(data);
253
331
 
254
332
  struct io_wait_arguments io_wait_arguments = {
255
333
  .data = data,
@@ -260,42 +338,46 @@ VALUE Event_Backend_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE event
260
338
  return rb_rescue(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_rescue, (VALUE)&io_wait_arguments);
261
339
  }
262
340
 
263
- static
264
- int io_read(struct Event_Backend_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
341
+ #ifdef HAVE_RUBY_IO_BUFFER_H
342
+
343
+ static int io_read(struct Event_Backend_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
265
344
  struct io_uring_sqe *sqe = io_get_sqe(data);
266
- assert(sqe);
267
-
268
- struct iovec iovecs[1];
269
- iovecs[0].iov_base = buffer;
270
- iovecs[0].iov_len = length;
271
-
272
- io_uring_prep_readv(sqe, descriptor, iovecs, 1, 0);
345
+
346
+ if (DEBUG) fprintf(stderr, "io_read:io_uring_prep_read(fiber=%p)\n", (void*)fiber);
347
+
348
+ io_uring_prep_read(sqe, descriptor, buffer, length, 0);
273
349
  io_uring_sqe_set_data(sqe, (void*)fiber);
274
- io_uring_submit(&data->ring);
350
+ io_uring_submit_pending(data);
275
351
 
276
- return NUM2INT(Event_Backend_transfer(data->loop));
352
+ VALUE result = Event_Backend_fiber_transfer(data->backend.loop);
353
+ if (DEBUG) fprintf(stderr, "io_read:Event_Backend_fiber_transfer -> %d\n", RB_NUM2INT(result));
354
+
355
+ return RB_NUM2INT(result);
277
356
  }
278
357
 
279
- VALUE Event_Backend_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE _buffer, VALUE _offset, VALUE _length) {
358
+ VALUE Event_Backend_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
280
359
  struct Event_Backend_URing *data = NULL;
281
360
  TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
282
361
 
283
- int descriptor = RB_NUM2INT(rb_funcall(io, id_fileno, 0));
362
+ int descriptor = Event_Backend_io_descriptor(io);
284
363
 
285
- size_t offset = NUM2SIZET(_offset);
286
- size_t length = NUM2SIZET(_length);
364
+ void *base;
365
+ size_t size;
366
+ rb_io_buffer_get_mutable(buffer, &base, &size);
287
367
 
288
- size_t start = offset;
289
- size_t total = 0;
368
+ size_t offset = 0;
369
+ size_t length = NUM2SIZET(_length);
290
370
 
291
371
  while (length > 0) {
292
- char *buffer = Event_Backend_resize_to_capacity(_buffer, offset, length);
293
- int result = io_read(data, fiber, descriptor, buffer+offset, length);
372
+ size_t maximum_size = size - offset;
373
+ int result = io_read(data, fiber, descriptor, (char*)base+offset, maximum_size);
294
374
 
295
- if (result >= 0) {
375
+ if (result == 0) {
376
+ break;
377
+ } else if (result > 0) {
296
378
  offset += result;
379
+ if ((size_t)result >= length) break;
297
380
  length -= result;
298
- total += result;
299
381
  } else if (-result == EAGAIN || -result == EWOULDBLOCK) {
300
382
  Event_Backend_URing_io_wait(self, fiber, io, RB_INT2NUM(READABLE));
301
383
  } else {
@@ -303,47 +385,49 @@ VALUE Event_Backend_URing_io_read(VALUE self, VALUE fiber, VALUE io, VALUE _buff
303
385
  }
304
386
  }
305
387
 
306
- Event_Backend_resize_to_fit(_buffer, start, total);
307
-
308
- return SIZET2NUM(total);
388
+ return SIZET2NUM(offset);
309
389
  }
310
390
 
311
391
  static
312
392
  int io_write(struct Event_Backend_URing *data, VALUE fiber, int descriptor, char *buffer, size_t length) {
313
393
  struct io_uring_sqe *sqe = io_get_sqe(data);
314
- assert(sqe);
315
394
 
316
- struct iovec iovecs[1];
317
- iovecs[0].iov_base = buffer;
318
- iovecs[0].iov_len = length;
319
-
320
- io_uring_prep_writev(sqe, descriptor, iovecs, 1, 0);
395
+ if (DEBUG) fprintf(stderr, "io_write:io_uring_prep_write(fiber=%p)\n", (void*)fiber);
396
+
397
+ io_uring_prep_write(sqe, descriptor, buffer, length, 0);
321
398
  io_uring_sqe_set_data(sqe, (void*)fiber);
322
- io_uring_submit(&data->ring);
399
+ io_uring_submit_pending(data);
323
400
 
324
- return NUM2INT(Event_Backend_transfer(data->loop));
401
+ int result = RB_NUM2INT(Event_Backend_fiber_transfer(data->backend.loop));
402
+ if (DEBUG) fprintf(stderr, "io_write:Event_Backend_fiber_transfer -> %d\n", result);
403
+
404
+ return result;
325
405
  }
326
406
 
327
- VALUE Event_Backend_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE _buffer, VALUE _offset, VALUE _length) {
407
+ VALUE Event_Backend_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE buffer, VALUE _length) {
328
408
  struct Event_Backend_URing *data = NULL;
329
409
  TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
330
410
 
331
- int descriptor = RB_NUM2INT(rb_funcall(io, id_fileno, 0));
411
+ int descriptor = Event_Backend_io_descriptor(io);
332
412
 
333
- size_t offset = NUM2SIZET(_offset);
334
- size_t length = NUM2SIZET(_length);
413
+ const void *base;
414
+ size_t size;
415
+ rb_io_buffer_get_immutable(buffer, &base, &size);
335
416
 
336
- char *buffer = Event_Backend_verify_size(_buffer, offset, length);
417
+ size_t offset = 0;
418
+ size_t length = NUM2SIZET(_length);
337
419
 
338
- size_t total = 0;
420
+ if (length > size) {
421
+ rb_raise(rb_eRuntimeError, "Length exceeds size of buffer!");
422
+ }
339
423
 
340
424
  while (length > 0) {
341
- int result = io_write(data, fiber, descriptor, buffer+offset, length);
425
+ int result = io_write(data, fiber, descriptor, (char*)base+offset, length);
342
426
 
343
427
  if (result >= 0) {
344
- length -= result;
345
428
  offset += result;
346
- total += result;
429
+ if ((size_t)result >= length) break;
430
+ length -= result;
347
431
  } else if (-result == EAGAIN || -result == EWOULDBLOCK) {
348
432
  Event_Backend_URing_io_wait(self, fiber, io, RB_INT2NUM(WRITABLE));
349
433
  } else {
@@ -351,7 +435,34 @@ VALUE Event_Backend_URing_io_write(VALUE self, VALUE fiber, VALUE io, VALUE _buf
351
435
  }
352
436
  }
353
437
 
354
- return SIZET2NUM(total);
438
+ return SIZET2NUM(offset);
439
+ }
440
+
441
+ #endif
442
+
443
+ static const int ASYNC_CLOSE = 2;
444
+
445
+ VALUE Event_Backend_URing_io_close(VALUE self, VALUE io) {
446
+ struct Event_Backend_URing *data = NULL;
447
+ TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
448
+
449
+ int descriptor = Event_Backend_io_descriptor(io);
450
+
451
+ if (ASYNC_CLOSE) {
452
+ struct io_uring_sqe *sqe = io_get_sqe(data);
453
+
454
+ io_uring_prep_close(sqe, descriptor);
455
+ io_uring_sqe_set_data(sqe, NULL);
456
+ if (ASYNC_CLOSE == 1)
457
+ io_uring_submit_now(data);
458
+ else if (ASYNC_CLOSE == 2)
459
+ io_uring_submit_pending(data);
460
+ } else {
461
+ close(descriptor);
462
+ }
463
+
464
+ // We don't wait for the result of close since it has no use in pratice:
465
+ return Qtrue;
355
466
  }
356
467
 
357
468
  static
@@ -397,9 +508,9 @@ struct select_arguments {
397
508
  static
398
509
  void * select_internal(void *_arguments) {
399
510
  struct select_arguments * arguments = (struct select_arguments *)_arguments;
400
-
401
- io_uring_submit(&arguments->data->ring);
402
-
511
+
512
+ io_uring_submit_flush(arguments->data);
513
+
403
514
  struct io_uring_cqe *cqe = NULL;
404
515
  arguments->result = io_uring_wait_cqe_timeout(&arguments->data->ring, &cqe, arguments->timeout);
405
516
 
@@ -433,21 +544,24 @@ unsigned select_process_completions(struct io_uring *ring) {
433
544
 
434
545
  // If the operation was cancelled, or the operation has no user data (fiber):
435
546
  if (cqe->res == -ECANCELED || cqe->user_data == 0 || cqe->user_data == LIBURING_UDATA_TIMEOUT) {
547
+ io_uring_cq_advance(ring, 1);
436
548
  continue;
437
549
  }
438
550
 
439
551
  VALUE fiber = (VALUE)cqe->user_data;
440
- VALUE result = INT2NUM(cqe->res);
552
+ VALUE result = RB_INT2NUM(cqe->res);
441
553
 
442
- // fprintf(stderr, "cqe res=%d user_data=%p\n", cqe->res, (void*)cqe->user_data);
443
-
444
- Event_Backend_transfer_result(fiber, result);
445
- }
446
-
447
- if (completed) {
448
- io_uring_cq_advance(ring, completed);
554
+ if (DEBUG) fprintf(stderr, "cqe res=%d user_data=%p\n", cqe->res, (void*)cqe->user_data);
555
+
556
+ io_uring_cq_advance(ring, 1);
557
+
558
+ Event_Backend_fiber_transfer_result(fiber, result);
449
559
  }
450
560
 
561
+ // io_uring_cq_advance(ring, completed);
562
+
563
+ if (DEBUG) fprintf(stderr, "select_process_completions(completed=%d)\n", completed);
564
+
451
565
  return completed;
452
566
  }
453
567
 
@@ -455,11 +569,17 @@ VALUE Event_Backend_URing_select(VALUE self, VALUE duration) {
455
569
  struct Event_Backend_URing *data = NULL;
456
570
  TypedData_Get_Struct(self, struct Event_Backend_URing, &Event_Backend_URing_Type, data);
457
571
 
458
- int result = select_process_completions(&data->ring);
572
+ Event_Backend_ready_pop(&data->backend);
459
573
 
460
- if (result < 0) {
461
- rb_syserr_fail(-result, strerror(-result));
462
- } else if (result == 0) {
574
+ int result = 0;
575
+
576
+ // There can only be events waiting if we have been submitting them early:
577
+ if (EARLY_SUBMIT) {
578
+ result = select_process_completions(&data->ring);
579
+ }
580
+
581
+ // If we aren't submitting events early, we need to submit them and/or wait for them:
582
+ if (result == 0) {
463
583
  // We might need to wait for events:
464
584
  struct select_arguments arguments = {
465
585
  .data = data,
@@ -468,31 +588,40 @@ VALUE Event_Backend_URing_select(VALUE self, VALUE duration) {
468
588
 
469
589
  arguments.timeout = make_timeout(duration, &arguments.storage);
470
590
 
471
- if (!timeout_nonblocking(arguments.timeout)) {
591
+ if (!data->backend.ready && !timeout_nonblocking(arguments.timeout)) {
592
+ // This is a blocking operation, we wait for events:
472
593
  result = select_internal_without_gvl(&arguments);
473
594
  } else {
474
- io_uring_submit(&data->ring);
595
+ // The timeout specified required "nonblocking" behaviour so we just flush the SQ if required:
596
+ io_uring_submit_flush(data);
475
597
  }
598
+
599
+ // After waiting/flushing the SQ, check if there are any completions:
600
+ result = select_process_completions(&data->ring);
476
601
  }
477
602
 
478
- result = select_process_completions(&data->ring);
479
-
480
- return INT2NUM(result);
603
+ return RB_INT2NUM(result);
481
604
  }
482
605
 
483
606
  void Init_Event_Backend_URing(VALUE Event_Backend) {
484
- id_fileno = rb_intern("fileno");
485
-
486
607
  Event_Backend_URing = rb_define_class_under(Event_Backend, "URing", rb_cObject);
487
608
 
488
609
  rb_define_alloc_func(Event_Backend_URing, Event_Backend_URing_allocate);
489
610
  rb_define_method(Event_Backend_URing, "initialize", Event_Backend_URing_initialize, 1);
611
+ rb_define_method(Event_Backend_URing, "transfer", Event_Backend_URing_transfer, 1);
612
+ rb_define_method(Event_Backend_URing, "defer", Event_Backend_URing_defer, 0);
613
+ rb_define_method(Event_Backend_URing, "ready?", Event_Backend_URing_ready_p, 0);
490
614
  rb_define_method(Event_Backend_URing, "select", Event_Backend_URing_select, 1);
491
615
  rb_define_method(Event_Backend_URing, "close", Event_Backend_URing_close, 0);
492
616
 
493
617
  rb_define_method(Event_Backend_URing, "io_wait", Event_Backend_URing_io_wait, 3);
494
- rb_define_method(Event_Backend_URing, "io_read", Event_Backend_URing_io_read, 5);
495
- rb_define_method(Event_Backend_URing, "io_write", Event_Backend_URing_io_write, 5);
618
+
619
+ #ifdef HAVE_RUBY_IO_BUFFER_H
620
+ rb_define_method(Event_Backend_URing, "io_read", Event_Backend_URing_io_read, 4);
621
+ rb_define_method(Event_Backend_URing, "io_write", Event_Backend_URing_io_write, 4);
622
+ #endif
623
+
624
+ rb_define_method(Event_Backend_URing, "io_close", Event_Backend_URing_io_close, 1);
496
625
 
497
626
  rb_define_method(Event_Backend_URing, "process_wait", Event_Backend_URing_process_wait, 3);
498
627
  }