io-event 1.3.1 → 1.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: de2b826a00b27006568dad0139dd9d695ce8708882607fd1e1e4c4eda7d727cd
4
- data.tar.gz: 3c34946c623e2c03d31c05f7e62283b005d8e8c8d1a6dd78d80a9cf847e7231c
3
+ metadata.gz: 3362ef0fb1bd825601c2476c06e99d32aa911a4e66b189b1518f40b6246b1ab6
4
+ data.tar.gz: a41015ab836dd0e28656427001882a87ba26a424395babb81aecd0e6d07d5fdd
5
5
  SHA512:
6
- metadata.gz: e2f752ea2f36c3cb848b3db0a8f2db62d27cd7a97f1dc1c245a8a094e8bd44368a4bc9bfcef3091d6473d02f2027423ddbdb9adcf9073d1103404408860ae411
7
- data.tar.gz: bab17c94312eedabf98144e2363a37e0e9356e264f51108dcde0e0a188249eaa543ba164fe330a7fa3332b623418fe8cd5d3190774e723794b9d0fc121688f26
6
+ metadata.gz: e1563b616697985daed4bb585bfdeaec00f6dfbaa9f377e99b278a9070f5ee32b475136e0f8f7626e4238a0d623a0bdb745ef03a612ec0de4b7f2eb9ef2f4510
7
+ data.tar.gz: d061bea14bd722ae77eddc68e81d0c4138a5de1b1b12e6fce22a45bc62391c7bf3d9dd25cf0f876361309b04d9e480a06c9752128103c999a504d01e6ffb78d3
checksums.yaml.gz.sig CHANGED
Binary file
@@ -446,6 +446,7 @@ struct process_wait_arguments {
446
446
  struct IO_Event_Selector_EPoll *selector;
447
447
  struct IO_Event_Selector_EPoll_Waiting *waiting;
448
448
  int pid;
449
+ int flags;
449
450
  int descriptor;
450
451
  };
451
452
 
@@ -456,7 +457,7 @@ VALUE process_wait_transfer(VALUE _arguments) {
456
457
  IO_Event_Selector_fiber_transfer(arguments->selector->backend.loop, 0, NULL);
457
458
 
458
459
  if (arguments->waiting->ready) {
459
- return IO_Event_Selector_process_status_wait(arguments->pid);
460
+ return IO_Event_Selector_process_status_wait(arguments->pid, arguments->flags);
460
461
  } else {
461
462
  return Qfalse;
462
463
  }
@@ -480,7 +481,7 @@ VALUE IO_Event_Selector_EPoll_process_wait(VALUE self, VALUE fiber, VALUE _pid,
480
481
  TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
481
482
 
482
483
  pid_t pid = NUM2PIDT(_pid);
483
- // int flags = NUM2INT(_flags);
484
+ int flags = NUM2INT(_flags);
484
485
 
485
486
  int descriptor = pidfd_open(pid, 0);
486
487
 
@@ -506,6 +507,7 @@ VALUE IO_Event_Selector_EPoll_process_wait(VALUE self, VALUE fiber, VALUE _pid,
506
507
  struct process_wait_arguments process_wait_arguments = {
507
508
  .selector = selector,
508
509
  .pid = pid,
510
+ .flags = flags,
509
511
  .descriptor = descriptor,
510
512
  .waiting = &waiting,
511
513
  };
@@ -802,9 +804,11 @@ struct select_arguments {
802
804
 
803
805
  int count;
804
806
  struct epoll_event events[EPOLL_MAX_EVENTS];
805
-
807
+
806
808
  struct timespec * timeout;
807
809
  struct timespec storage;
810
+
811
+ struct IO_Event_List saved;
808
812
  };
809
813
 
810
814
  static int make_timeout_ms(struct timespec * timeout) {
@@ -881,7 +885,7 @@ void select_internal_with_gvl(struct select_arguments *arguments) {
881
885
  }
882
886
 
883
887
  static
884
- int IO_Event_Selector_EPoll_handle(struct IO_Event_Selector_EPoll *selector, const struct epoll_event *event)
888
+ int IO_Event_Selector_EPoll_handle(struct IO_Event_Selector_EPoll *selector, const struct epoll_event *event, struct IO_Event_List *saved)
885
889
  {
886
890
  int descriptor = event->data.fd;
887
891
 
@@ -891,29 +895,32 @@ int IO_Event_Selector_EPoll_handle(struct IO_Event_Selector_EPoll *selector, con
891
895
  struct IO_Event_Selector_EPoll_Descriptor *epoll_descriptor = IO_Event_Selector_EPoll_Descriptor_lookup(selector, descriptor);
892
896
  struct IO_Event_List *list = &epoll_descriptor->list;
893
897
  struct IO_Event_List *node = list->tail;
894
- struct IO_Event_List saved = {NULL, NULL};
895
898
 
896
899
  // Reset the events back to 0 so that we can re-arm if necessary:
897
900
  epoll_descriptor->waiting_events = 0;
898
901
 
902
+ if (DEBUG) fprintf(stderr, "IO_Event_Selector_EPoll_handle: descriptor=%d, ready_events=%d epoll_descriptor=%p\n", descriptor, ready_events, epoll_descriptor);
903
+
899
904
  // It's possible (but unlikely) that the address of list will changing during iteration.
900
905
  while (node != list) {
906
+ if (DEBUG) fprintf(stderr, "IO_Event_Selector_EPoll_handle: node=%p list=%p type=%p\n", node, list, node->type);
907
+
901
908
  struct IO_Event_Selector_EPoll_Waiting *waiting = (struct IO_Event_Selector_EPoll_Waiting *)node;
902
909
 
903
910
  // Compute the intersection of the events we are waiting for and the events that occured:
904
911
  enum IO_Event matching_events = waiting->events & ready_events;
905
912
 
906
- if (DEBUG) fprintf(stderr, "IO_Event_Selector_EPoll_handle: descriptor=%d, ready_events=%d, matching_events=%d\n", descriptor, ready_events, matching_events);
913
+ if (DEBUG) fprintf(stderr, "IO_Event_Selector_EPoll_handle: descriptor=%d, ready_events=%d, waiting_events=%d, matching_events=%d\n", descriptor, ready_events, waiting->events, matching_events);
907
914
 
908
915
  if (matching_events) {
909
- IO_Event_List_append(node, &saved);
916
+ IO_Event_List_append(node, saved);
910
917
 
911
918
  // Resume the fiber:
912
919
  waiting->ready = matching_events;
913
920
  IO_Event_Selector_fiber_transfer(waiting->fiber, 0, NULL);
914
921
 
915
- node = saved.tail;
916
- IO_Event_List_pop(&saved);
922
+ node = saved->tail;
923
+ IO_Event_List_pop(saved);
917
924
  } else {
918
925
  // We are still waiting for the events:
919
926
  epoll_descriptor->waiting_events |= waiting->events;
@@ -924,6 +931,36 @@ int IO_Event_Selector_EPoll_handle(struct IO_Event_Selector_EPoll *selector, con
924
931
  return IO_Event_Selector_EPoll_Descriptor_update(selector, epoll_descriptor->io, descriptor, epoll_descriptor);
925
932
  }
926
933
 
934
+ static
935
+ VALUE select_handle_events(VALUE _arguments)
936
+ {
937
+ struct select_arguments *arguments = (struct select_arguments *)_arguments;
938
+ struct IO_Event_Selector_EPoll *selector = arguments->selector;
939
+
940
+ for (int i = 0; i < arguments->count; i += 1) {
941
+ const struct epoll_event *event = &arguments->events[i];
942
+ if (DEBUG) fprintf(stderr, "-> fd=%d events=%d\n", event->data.fd, event->events);
943
+
944
+ if (event->data.fd >= 0) {
945
+ IO_Event_Selector_EPoll_handle(selector, event, &arguments->saved);
946
+ } else {
947
+ IO_Event_Interrupt_clear(&selector->interrupt);
948
+ }
949
+ }
950
+
951
+ return INT2NUM(arguments->count);
952
+ }
953
+
954
+ static
955
+ VALUE select_handle_events_ensure(VALUE _arguments)
956
+ {
957
+ struct select_arguments *arguments = (struct select_arguments *)_arguments;
958
+
959
+ IO_Event_List_free(&arguments->saved);
960
+
961
+ return Qnil;
962
+ }
963
+
927
964
  // TODO This function is not re-entrant and we should document and assert as such.
928
965
  VALUE IO_Event_Selector_EPoll_select(VALUE self, VALUE duration) {
929
966
  struct IO_Event_Selector_EPoll *selector = NULL;
@@ -937,6 +974,7 @@ VALUE IO_Event_Selector_EPoll_select(VALUE self, VALUE duration) {
937
974
  .tv_sec = 0,
938
975
  .tv_nsec = 0
939
976
  },
977
+ .saved = {},
940
978
  };
941
979
 
942
980
  arguments.timeout = &arguments.storage;
@@ -958,18 +996,11 @@ VALUE IO_Event_Selector_EPoll_select(VALUE self, VALUE duration) {
958
996
  }
959
997
  }
960
998
 
961
- for (int i = 0; i < arguments.count; i += 1) {
962
- const struct epoll_event *event = &arguments.events[i];
963
- if (DEBUG) fprintf(stderr, "-> ptr=%p events=%d\n", event->data.ptr, event->events);
964
-
965
- if (event->data.fd >= 0) {
966
- IO_Event_Selector_EPoll_handle(selector, event);
967
- } else {
968
- IO_Event_Interrupt_clear(&selector->interrupt);
969
- }
999
+ if (arguments.count) {
1000
+ return rb_ensure(select_handle_events, (VALUE)&arguments, select_handle_events_ensure, (VALUE)&arguments);
1001
+ } else {
1002
+ return RB_INT2NUM(0);
970
1003
  }
971
-
972
- return INT2NUM(arguments.count);
973
1004
  }
974
1005
 
975
1006
  VALUE IO_Event_Selector_EPoll_wakeup(VALUE self) {
@@ -433,6 +433,7 @@ struct process_wait_arguments {
433
433
  struct IO_Event_Selector_KQueue *selector;
434
434
  struct IO_Event_Selector_KQueue_Waiting *waiting;
435
435
  pid_t pid;
436
+ int flags;
436
437
  };
437
438
 
438
439
  static
@@ -461,7 +462,7 @@ VALUE process_wait_transfer(VALUE _arguments) {
461
462
 
462
463
  if (arguments->waiting->ready) {
463
464
  process_prewait(arguments->pid);
464
- return IO_Event_Selector_process_status_wait(arguments->pid);
465
+ return IO_Event_Selector_process_status_wait(arguments->pid, arguments->flags);
465
466
  } else {
466
467
  return Qfalse;
467
468
  }
@@ -483,6 +484,7 @@ VALUE IO_Event_Selector_KQueue_process_wait(VALUE self, VALUE fiber, VALUE _pid,
483
484
  TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
484
485
 
485
486
  pid_t pid = NUM2PIDT(_pid);
487
+ int flags = NUM2INT(_flags);
486
488
 
487
489
  struct IO_Event_Selector_KQueue_Waiting waiting = {
488
490
  .list = {.type = &IO_Event_Selector_KQueue_process_wait_list_type},
@@ -494,6 +496,7 @@ VALUE IO_Event_Selector_KQueue_process_wait(VALUE self, VALUE fiber, VALUE _pid,
494
496
  .selector = selector,
495
497
  .waiting = &waiting,
496
498
  .pid = pid,
499
+ .flags = flags,
497
500
  };
498
501
 
499
502
  int result = IO_Event_Selector_KQueue_Waiting_register(selector, pid, &waiting);
@@ -502,7 +505,7 @@ VALUE IO_Event_Selector_KQueue_process_wait(VALUE self, VALUE fiber, VALUE _pid,
502
505
  if (errno == ESRCH) {
503
506
  process_prewait(pid);
504
507
 
505
- return IO_Event_Selector_process_status_wait(pid);
508
+ return IO_Event_Selector_process_status_wait(pid, flags);
506
509
  }
507
510
 
508
511
  rb_sys_fail("IO_Event_Selector_KQueue_process_wait:IO_Event_Selector_KQueue_Waiting_register");
@@ -818,6 +821,8 @@ struct select_arguments {
818
821
 
819
822
  struct timespec storage;
820
823
  struct timespec *timeout;
824
+
825
+ struct IO_Event_List saved;
821
826
  };
822
827
 
823
828
  static
@@ -859,7 +864,7 @@ void select_internal_with_gvl(struct select_arguments *arguments) {
859
864
  }
860
865
 
861
866
  static
862
- int IO_Event_Selector_KQueue_handle(struct IO_Event_Selector_KQueue *selector, uintptr_t identifier, struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor)
867
+ int IO_Event_Selector_KQueue_handle(struct IO_Event_Selector_KQueue *selector, uintptr_t identifier, struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor, struct IO_Event_List *saved)
863
868
  {
864
869
  // This is the mask of all events that occured for the given descriptor:
865
870
  enum IO_Event ready_events = kqueue_descriptor->ready_events;
@@ -874,7 +879,6 @@ int IO_Event_Selector_KQueue_handle(struct IO_Event_Selector_KQueue *selector, u
874
879
 
875
880
  struct IO_Event_List *list = &kqueue_descriptor->list;
876
881
  struct IO_Event_List *node = list->tail;
877
- struct IO_Event_List saved = {NULL, NULL};
878
882
 
879
883
  // Reset the events back to 0 so that we can re-arm if necessary:
880
884
  kqueue_descriptor->waiting_events = 0;
@@ -888,13 +892,13 @@ int IO_Event_Selector_KQueue_handle(struct IO_Event_Selector_KQueue *selector, u
888
892
  if (DEBUG) fprintf(stderr, "IO_Event_Selector_KQueue_handle: identifier=%lu, ready_events=%d, matching_events=%d\n", identifier, ready_events, matching_events);
889
893
 
890
894
  if (matching_events) {
891
- IO_Event_List_append(node, &saved);
895
+ IO_Event_List_append(node, saved);
892
896
 
893
897
  waiting->ready = matching_events;
894
898
  IO_Event_Selector_fiber_transfer(waiting->fiber, 0, NULL);
895
899
 
896
- node = saved.tail;
897
- IO_Event_List_pop(&saved);
900
+ node = saved->tail;
901
+ IO_Event_List_pop(saved);
898
902
  } else {
899
903
  kqueue_descriptor->waiting_events |= waiting->events;
900
904
  node = node->tail;
@@ -904,6 +908,43 @@ int IO_Event_Selector_KQueue_handle(struct IO_Event_Selector_KQueue *selector, u
904
908
  return IO_Event_Selector_KQueue_Descriptor_update(selector, identifier, kqueue_descriptor);
905
909
  }
906
910
 
911
+ static
912
+ VALUE select_handle_events(VALUE _arguments)
913
+ {
914
+ struct select_arguments *arguments = (struct select_arguments *)_arguments;
915
+ struct IO_Event_Selector_KQueue *selector = arguments->selector;
916
+
917
+ for (int i = 0; i < arguments->count; i += 1) {
918
+ if (arguments->events[i].udata) {
919
+ struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor = arguments->events[i].udata;
920
+ kqueue_descriptor->ready_events |= events_from_kevent_filter(arguments->events[i].filter);
921
+ }
922
+ }
923
+
924
+ for (int i = 0; i < arguments->count; i += 1) {
925
+ if (arguments->events[i].udata) {
926
+ struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor = arguments->events[i].udata;
927
+ IO_Event_Selector_KQueue_handle(selector, arguments->events[i].ident, kqueue_descriptor, &arguments->saved);
928
+ } else {
929
+ #ifdef IO_EVENT_SELECTOR_KQUEUE_USE_INTERRUPT
930
+ IO_Event_Interrupt_clear(&selector->interrupt);
931
+ #endif
932
+ }
933
+ }
934
+
935
+ return RB_INT2NUM(arguments->count);
936
+ }
937
+
938
+ static
939
+ VALUE select_handle_events_ensure(VALUE _arguments)
940
+ {
941
+ struct select_arguments *arguments = (struct select_arguments *)_arguments;
942
+
943
+ IO_Event_List_free(&arguments->saved);
944
+
945
+ return Qnil;
946
+ }
947
+
907
948
  VALUE IO_Event_Selector_KQueue_select(VALUE self, VALUE duration) {
908
949
  struct IO_Event_Selector_KQueue *selector = NULL;
909
950
  TypedData_Get_Struct(self, struct IO_Event_Selector_KQueue, &IO_Event_Selector_KQueue_Type, selector);
@@ -916,7 +957,8 @@ VALUE IO_Event_Selector_KQueue_select(VALUE self, VALUE duration) {
916
957
  .storage = {
917
958
  .tv_sec = 0,
918
959
  .tv_nsec = 0
919
- }
960
+ },
961
+ .saved = {},
920
962
  };
921
963
 
922
964
  arguments.timeout = &arguments.storage;
@@ -948,25 +990,11 @@ VALUE IO_Event_Selector_KQueue_select(VALUE self, VALUE duration) {
948
990
  }
949
991
  }
950
992
 
951
- for (int i = 0; i < arguments.count; i += 1) {
952
- if (arguments.events[i].udata) {
953
- struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor = arguments.events[i].udata;
954
- kqueue_descriptor->ready_events |= events_from_kevent_filter(arguments.events[i].filter);
955
- }
956
- }
957
-
958
- for (int i = 0; i < arguments.count; i += 1) {
959
- if (arguments.events[i].udata) {
960
- struct IO_Event_Selector_KQueue_Descriptor *kqueue_descriptor = arguments.events[i].udata;
961
- IO_Event_Selector_KQueue_handle(selector, arguments.events[i].ident, kqueue_descriptor);
962
- } else {
963
- #ifdef IO_EVENT_SELECTOR_KQUEUE_USE_INTERRUPT
964
- IO_Event_Interrupt_clear(&selector->interrupt);
965
- #endif
966
- }
993
+ if (arguments.count) {
994
+ return rb_ensure(select_handle_events, (VALUE)&arguments, select_handle_events_ensure, (VALUE)&arguments);
995
+ } else {
996
+ return RB_INT2NUM(0);
967
997
  }
968
-
969
- return RB_INT2NUM(arguments.count);
970
998
  }
971
999
 
972
1000
  VALUE IO_Event_Selector_KQueue_wakeup(VALUE self) {
@@ -21,6 +21,7 @@ inline static void IO_Event_List_initialize(struct IO_Event_List *list)
21
21
  inline static void IO_Event_List_clear(struct IO_Event_List *list)
22
22
  {
23
23
  list->head = list->tail = NULL;
24
+ list->type = 0;
24
25
  }
25
26
 
26
27
  // Append an item to the end of the list.
@@ -64,7 +65,7 @@ inline static void IO_Event_List_pop(struct IO_Event_List *node)
64
65
 
65
66
  inline static void IO_Event_List_free(struct IO_Event_List *node)
66
67
  {
67
- if (node->head != node->tail) {
68
+ if (node->head && node->tail) {
68
69
  IO_Event_List_pop(node);
69
70
  }
70
71
  }
@@ -25,10 +25,6 @@ static const int DEBUG = 0;
25
25
 
26
26
  static ID id_transfer, id_alive_p;
27
27
 
28
- #ifndef HAVE_RB_PROCESS_STATUS_WAIT
29
- static VALUE process_wnohang;
30
- #endif
31
-
32
28
  VALUE IO_Event_Selector_fiber_transfer(VALUE fiber, int argc, VALUE *argv) {
33
29
  // TODO Consider introducing something like `rb_fiber_scheduler_transfer(...)`.
34
30
  #ifdef HAVE__RB_FIBER_TRANSFER
@@ -76,9 +72,9 @@ int IO_Event_Selector_io_descriptor(VALUE io) {
76
72
  static ID id_wait;
77
73
  static VALUE rb_Process_Status = Qnil;
78
74
 
79
- VALUE IO_Event_Selector_process_status_wait(rb_pid_t pid)
75
+ VALUE IO_Event_Selector_process_status_wait(rb_pid_t pid, int flags)
80
76
  {
81
- return rb_funcall(rb_Process_Status, id_wait, 2, PIDT2NUM(pid), process_wnohang);
77
+ return rb_funcall(rb_Process_Status, id_wait, 2, PIDT2NUM(pid), INT2NUM(flags | WNOHANG));
82
78
  }
83
79
  #endif
84
80
 
@@ -157,7 +153,6 @@ void Init_IO_Event_Selector(VALUE IO_Event_Selector) {
157
153
 
158
154
  #ifndef HAVE_RB_PROCESS_STATUS_WAIT
159
155
  id_wait = rb_intern("wait");
160
- process_wnohang = rb_const_get(rb_mProcess, rb_intern("WNOHANG"));
161
156
  rb_Process_Status = rb_const_get_at(rb_mProcess, rb_intern("Status"));
162
157
  rb_gc_register_mark_object(rb_Process_Status);
163
158
  #endif
@@ -66,10 +66,11 @@ VALUE IO_Event_Selector_fiber_raise(VALUE fiber, int argc, VALUE *argv);
66
66
  int IO_Event_Selector_io_descriptor(VALUE io);
67
67
  #endif
68
68
 
69
+ // Reap a process without hanging.
69
70
  #ifdef HAVE_RB_PROCESS_STATUS_WAIT
70
- #define IO_Event_Selector_process_status_wait(pid) rb_process_status_wait(pid)
71
+ #define IO_Event_Selector_process_status_wait(pid, flags) rb_process_status_wait(pid, flags | WNOHANG)
71
72
  #else
72
- VALUE IO_Event_Selector_process_status_wait(rb_pid_t pid);
73
+ VALUE IO_Event_Selector_process_status_wait(rb_pid_t pid, int flags);
73
74
  #endif
74
75
 
75
76
  int IO_Event_Selector_nonblock_set(int file_descriptor);
@@ -326,6 +326,32 @@ VALUE IO_Event_Selector_URing_ready_p(VALUE self) {
326
326
 
327
327
  #pragma mark - Submission Queue
328
328
 
329
+ static
330
+ void IO_Event_Selector_URing_dump_completion_queue(struct IO_Event_Selector_URing *selector)
331
+ {
332
+ struct io_uring *ring = &selector->ring;
333
+ unsigned head;
334
+ struct io_uring_cqe *cqe;
335
+
336
+ if (DEBUG) {
337
+ int first = 1;
338
+ io_uring_for_each_cqe(ring, head, cqe) {
339
+ if (!first) {
340
+ fprintf(stderr, ", ");
341
+ }
342
+ else {
343
+ fprintf(stderr, "CQ: [");
344
+ first = 0;
345
+ }
346
+
347
+ fprintf(stderr, "%d:%p", (int)cqe->res, (void*)cqe->user_data);
348
+ }
349
+ if (!first) {
350
+ fprintf(stderr, "]\n");
351
+ }
352
+ }
353
+ }
354
+
329
355
  // Flush the submission queue if pending operations are present.
330
356
  static
331
357
  int io_uring_submit_flush(struct IO_Event_Selector_URing *selector) {
@@ -345,19 +371,24 @@ int io_uring_submit_flush(struct IO_Event_Selector_URing *selector) {
345
371
  return result;
346
372
  }
347
373
 
374
+ if (DEBUG) {
375
+ IO_Event_Selector_URing_dump_completion_queue(selector);
376
+ }
377
+
348
378
  return 0;
349
379
  }
350
380
 
351
381
  // Immediately flush the submission queue, yielding to the event loop if it was not successful.
352
382
  static
353
383
  int io_uring_submit_now(struct IO_Event_Selector_URing *selector) {
354
- if (DEBUG && selector->pending) fprintf(stderr, "io_uring_submit_now(pending=%ld)\n", selector->pending);
355
-
384
+ if (DEBUG) fprintf(stderr, "io_uring_submit_now(pending=%ld)\n", selector->pending);
385
+
356
386
  while (true) {
357
387
  int result = io_uring_submit(&selector->ring);
358
388
 
359
389
  if (result >= 0) {
360
390
  selector->pending = 0;
391
+ if (DEBUG) IO_Event_Selector_URing_dump_completion_queue(selector);
361
392
  return result;
362
393
  }
363
394
 
@@ -369,12 +400,6 @@ int io_uring_submit_now(struct IO_Event_Selector_URing *selector) {
369
400
  }
370
401
  }
371
402
 
372
- static
373
- void IO_Event_Selector_URing_submit_sqe(struct io_uring_sqe *sqe)
374
- {
375
- if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_submit_sqe(%p): user_data=%p opcode=%d\n", sqe, (void*)sqe->user_data, sqe->opcode);
376
- }
377
-
378
403
  // Submit a pending operation. This does not submit the operation immediately, but instead defers it to the next call to `io_uring_submit_flush` or `io_uring_submit_now`. This is useful for operations that are not urgent, but should be used with care as it can lead to a deadlock if the submission queue is not flushed.
379
404
  static
380
405
  void io_uring_submit_pending(struct IO_Event_Selector_URing *selector) {
@@ -403,6 +428,7 @@ struct process_wait_arguments {
403
428
  struct IO_Event_Selector_URing_Waiting *waiting;
404
429
 
405
430
  pid_t pid;
431
+ int flags;
406
432
  int descriptor;
407
433
  };
408
434
 
@@ -413,7 +439,7 @@ VALUE process_wait_transfer(VALUE _arguments) {
413
439
  IO_Event_Selector_fiber_transfer(arguments->selector->backend.loop, 0, NULL);
414
440
 
415
441
  if (arguments->waiting->result) {
416
- return IO_Event_Selector_process_status_wait(arguments->pid);
442
+ return IO_Event_Selector_process_status_wait(arguments->pid, arguments->flags);
417
443
  } else {
418
444
  return Qfalse;
419
445
  }
@@ -435,6 +461,7 @@ VALUE IO_Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE _pid,
435
461
  TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
436
462
 
437
463
  pid_t pid = NUM2PIDT(_pid);
464
+ int flags = NUM2INT(_flags);
438
465
 
439
466
  int descriptor = pidfd_open(pid, 0);
440
467
  if (descriptor < 0) {
@@ -452,15 +479,14 @@ VALUE IO_Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE _pid,
452
479
  .selector = selector,
453
480
  .waiting = &waiting,
454
481
  .pid = pid,
482
+ .flags = flags,
455
483
  .descriptor = descriptor,
456
484
  };
457
485
 
458
- struct io_uring_sqe *sqe = io_get_sqe(selector);
459
-
460
486
  if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_process_wait:io_uring_prep_poll_add(%p)\n", (void*)fiber);
487
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
461
488
  io_uring_prep_poll_add(sqe, descriptor, POLLIN|POLLHUP|POLLERR);
462
489
  io_uring_sqe_set_data(sqe, completion);
463
- IO_Event_Selector_URing_submit_sqe(sqe);
464
490
  io_uring_submit_pending(selector);
465
491
 
466
492
  return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
@@ -504,14 +530,12 @@ static
504
530
  VALUE io_wait_ensure(VALUE _arguments) {
505
531
  struct io_wait_arguments *arguments = (struct io_wait_arguments *)_arguments;
506
532
 
507
- if (DEBUG) fprintf(stderr, "io_wait_ensure:io_uring_prep_cancel(waiting=%p, completion=%p)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion);
508
-
509
533
  // If the operation is still in progress, cancel it:
510
534
  if (arguments->waiting->completion) {
535
+ if (DEBUG) fprintf(stderr, "io_wait_ensure:io_uring_prep_cancel(waiting=%p, completion=%p)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion);
511
536
  struct io_uring_sqe *sqe = io_get_sqe(arguments->selector);
512
537
  io_uring_prep_cancel(sqe, (void*)arguments->waiting->completion, 0);
513
538
  io_uring_sqe_set_data(sqe, NULL);
514
- IO_Event_Selector_URing_submit_sqe(sqe);
515
539
  io_uring_submit_now(arguments->selector);
516
540
  }
517
541
 
@@ -543,23 +567,20 @@ VALUE IO_Event_Selector_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE e
543
567
  TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector);
544
568
 
545
569
  int descriptor = IO_Event_Selector_io_descriptor(io);
546
- struct io_uring_sqe *sqe = io_get_sqe(selector);
547
570
 
548
571
  short flags = poll_flags_from_events(NUM2INT(events));
549
572
 
550
573
  if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_io_wait:io_uring_prep_poll_add(descriptor=%d, flags=%d, fiber=%p)\n", descriptor, flags, (void*)fiber);
551
574
 
552
- io_uring_prep_poll_add(sqe, descriptor, flags);
553
-
554
575
  struct IO_Event_Selector_URing_Waiting waiting = {
555
576
  .fiber = fiber,
556
577
  };
557
578
 
558
579
  struct IO_Event_Selector_URing_Completion *completion = IO_Event_Selector_URing_Completion_acquire(selector, &waiting);
559
580
 
581
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
582
+ io_uring_prep_poll_add(sqe, descriptor, flags);
560
583
  io_uring_sqe_set_data(sqe, completion);
561
- IO_Event_Selector_URing_submit_sqe(sqe);
562
-
563
584
  // If we are going to wait, we assume that we are waiting for a while:
564
585
  io_uring_submit_pending(selector);
565
586
 
@@ -607,13 +628,12 @@ io_read_submit(VALUE _arguments)
607
628
  {
608
629
  struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
609
630
  struct IO_Event_Selector_URing *selector = arguments->selector;
610
- struct io_uring_sqe *sqe = io_get_sqe(selector);
611
631
 
612
632
  if (DEBUG) fprintf(stderr, "io_read_submit:io_uring_prep_read(waiting=%p, completion=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion, arguments->descriptor, arguments->buffer, arguments->length);
613
633
 
634
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
614
635
  io_uring_prep_read(sqe, arguments->descriptor, arguments->buffer, arguments->length, io_seekable(arguments->descriptor));
615
636
  io_uring_sqe_set_data(sqe, arguments->waiting->completion);
616
- IO_Event_Selector_URing_submit_sqe(sqe);
617
637
  io_uring_submit_now(selector);
618
638
 
619
639
  IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
@@ -627,15 +647,12 @@ io_read_ensure(VALUE _arguments)
627
647
  struct io_read_arguments *arguments = (struct io_read_arguments *)_arguments;
628
648
  struct IO_Event_Selector_URing *selector = arguments->selector;
629
649
 
630
- struct io_uring_sqe *sqe = io_get_sqe(selector);
631
-
632
- if (DEBUG) fprintf(stderr, "io_read_ensure:io_uring_prep_cancel(waiting=%p, completion=%p)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion);
633
-
634
650
  // If the operation is still in progress, cancel it:
635
651
  if (arguments->waiting->completion) {
652
+ if (DEBUG) fprintf(stderr, "io_read_ensure:io_uring_prep_cancel(waiting=%p, completion=%p)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion);
653
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
636
654
  io_uring_prep_cancel(sqe, (void*)arguments->waiting->completion, 0);
637
655
  io_uring_sqe_set_data(sqe, NULL);
638
- IO_Event_Selector_URing_submit_sqe(sqe);
639
656
  io_uring_submit_now(selector);
640
657
  }
641
658
 
@@ -732,13 +749,11 @@ io_write_submit(VALUE _argument)
732
749
  struct io_write_arguments *arguments = (struct io_write_arguments*)_argument;
733
750
  struct IO_Event_Selector_URing *selector = arguments->selector;
734
751
 
735
- struct io_uring_sqe *sqe = io_get_sqe(selector);
736
-
737
752
  if (DEBUG) fprintf(stderr, "io_write_submit:io_uring_prep_write(waiting=%p, completion=%p, descriptor=%d, buffer=%p, length=%ld)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion, arguments->descriptor, arguments->buffer, arguments->length);
738
753
 
754
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
739
755
  io_uring_prep_write(sqe, arguments->descriptor, arguments->buffer, arguments->length, io_seekable(arguments->descriptor));
740
756
  io_uring_sqe_set_data(sqe, arguments->waiting->completion);
741
- IO_Event_Selector_URing_submit_sqe(sqe);
742
757
  io_uring_submit_pending(selector);
743
758
 
744
759
  IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
@@ -752,15 +767,12 @@ io_write_ensure(VALUE _argument)
752
767
  struct io_write_arguments *arguments = (struct io_write_arguments*)_argument;
753
768
  struct IO_Event_Selector_URing *selector = arguments->selector;
754
769
 
755
- struct io_uring_sqe *sqe = io_get_sqe(selector);
756
-
757
- if (DEBUG) fprintf(stderr, "io_write_ensure:io_uring_prep_cancel(waiting=%p, completion=%p)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion);
758
-
759
770
  // If the operation is still in progress, cancel it:
760
771
  if (arguments->waiting->completion) {
772
+ if (DEBUG) fprintf(stderr, "io_write_ensure:io_uring_prep_cancel(waiting=%p, completion=%p)\n", (void*)arguments->waiting, (void*)arguments->waiting->completion);
773
+ struct io_uring_sqe *sqe = io_get_sqe(selector);
761
774
  io_uring_prep_cancel(sqe, (void*)arguments->waiting->completion, 0);
762
775
  io_uring_sqe_set_data(sqe, NULL);
763
- IO_Event_Selector_URing_submit_sqe(sqe);
764
776
  io_uring_submit_now(selector);
765
777
  }
766
778
 
@@ -859,10 +871,8 @@ VALUE IO_Event_Selector_URing_io_close(VALUE self, VALUE io) {
859
871
 
860
872
  if (ASYNC_CLOSE) {
861
873
  struct io_uring_sqe *sqe = io_get_sqe(selector);
862
-
863
874
  io_uring_prep_close(sqe, descriptor);
864
875
  io_uring_sqe_set_data(sqe, NULL);
865
- IO_Event_Selector_URing_submit_sqe(sqe);
866
876
  io_uring_submit_now(selector);
867
877
  } else {
868
878
  close(descriptor);
@@ -953,7 +963,10 @@ unsigned select_process_completions(struct IO_Event_Selector_URing *selector) {
953
963
  unsigned head;
954
964
  struct io_uring_cqe *cqe;
955
965
 
956
- if (DEBUG) fprintf(stderr, "select_process_completions...\n");
966
+ if (DEBUG) {
967
+ fprintf(stderr, "select_process_completions: selector=%p\n", (void*)selector);
968
+ IO_Event_Selector_URing_dump_completion_queue(selector);
969
+ }
957
970
 
958
971
  io_uring_for_each_cqe(ring, head, cqe) {
959
972
  if (DEBUG) fprintf(stderr, "select_process_completions: cqe res=%d user_data=%p\n", cqe->res, (void*)cqe->user_data);
@@ -976,15 +989,15 @@ unsigned select_process_completions(struct IO_Event_Selector_URing *selector) {
976
989
  waiting->flags = cqe->flags;
977
990
  }
978
991
 
992
+ io_uring_cq_advance(ring, 1);
993
+ // This marks the waiting operation as "complete":
994
+ IO_Event_Selector_URing_Completion_release(selector, completion);
995
+
979
996
  if (waiting && waiting->fiber) {
980
997
  assert(waiting->result != -ECANCELED);
981
998
 
982
999
  IO_Event_Selector_fiber_transfer(waiting->fiber, 0, NULL);
983
1000
  }
984
-
985
- // This marks the waiting operation as "complete":
986
- IO_Event_Selector_URing_Completion_release(selector, completion);
987
- io_uring_cq_advance(ring, 1);
988
1001
  }
989
1002
 
990
1003
  if (DEBUG && completed > 0) fprintf(stderr, "select_process_completions(completed=%d)\n", completed);
@@ -160,7 +160,66 @@ module IO::Event
160
160
  errno == EAGAIN or errno == EWOULDBLOCK
161
161
  end
162
162
 
163
- if Support.fiber_scheduler_v2?
163
+ if Support.fiber_scheduler_v3?
164
+ # Ruby 3.3+, full IO::Buffer support.
165
+
166
+ # @parameter length [Integer] The minimum number of bytes to read.
167
+ # @parameter offset [Integer] The offset into the buffer to read to.
168
+ def io_read(fiber, io, buffer, length, offset = 0)
169
+ total = 0
170
+
171
+ Selector.nonblock(io) do
172
+ while true
173
+ result = Fiber.blocking{buffer.read(io, 0, offset)}
174
+
175
+ if result < 0
176
+ if again?(result)
177
+ self.io_wait(fiber, io, IO::READABLE)
178
+ else
179
+ return result
180
+ end
181
+ elsif result == 0
182
+ break
183
+ else
184
+ total += result
185
+ break if total >= length
186
+ offset += result
187
+ end
188
+ end
189
+ end
190
+
191
+ return total
192
+ end
193
+
194
+ # @parameter length [Integer] The minimum number of bytes to write.
195
+ # @parameter offset [Integer] The offset into the buffer to write from.
196
+ def io_write(fiber, io, buffer, length, offset = 0)
197
+ total = 0
198
+
199
+ Selector.nonblock(io) do
200
+ while true
201
+ result = Fiber.blocking{buffer.write(io, 0, offset)}
202
+
203
+ if result < 0
204
+ if again?(result)
205
+ self.io_wait(fiber, io, IO::READABLE)
206
+ else
207
+ return result
208
+ end
209
+ elsif result == 0
210
+ break result
211
+ else
212
+ total += result
213
+ break if total >= length
214
+ offset += result
215
+ end
216
+ end
217
+ end
218
+
219
+ return total
220
+ end
221
+ elsif Support.fiber_scheduler_v2?
222
+ # Ruby 3.2, most IO::Buffer support, but slightly clunky read/write methods.
164
223
  def io_read(fiber, io, buffer, length, offset = 0)
165
224
  total = 0
166
225
 
@@ -219,8 +278,11 @@ module IO::Event
219
278
  return total
220
279
  end
221
280
  elsif Support.fiber_scheduler_v1?
281
+ # Ruby <= 3.1, limited IO::Buffer support.
222
282
  def io_read(fiber, _io, buffer, length, offset = 0)
283
+ # We need to avoid any internal buffering, so we use a duplicated IO object:
223
284
  io = IO.for_fd(_io.fileno, autoclose: false)
285
+
224
286
  total = 0
225
287
 
226
288
  maximum_size = buffer.size - offset
@@ -261,7 +323,9 @@ module IO::Event
261
323
  end
262
324
 
263
325
  def io_write(fiber, _io, buffer, length, offset = 0)
326
+ # We need to avoid any internal buffering, so we use a duplicated IO object:
264
327
  io = IO.for_fd(_io.fileno, autoclose: false)
328
+
265
329
  total = 0
266
330
 
267
331
  maximum_size = buffer.size - offset
@@ -17,6 +17,17 @@ class IO
17
17
  def self.fiber_scheduler_v2?
18
18
  IO.const_defined?(:Buffer) and Fiber.respond_to?(:blocking) and IO::Buffer.instance_method(:read).arity == -1
19
19
  end
20
+
21
+ def self.fiber_scheduler_v3?
22
+ if fiber_scheduler_v2?
23
+ begin
24
+ IO::Buffer.new.slice(0, 0).write(STDOUT)
25
+ return true
26
+ rescue
27
+ return false
28
+ end
29
+ end
30
+ end
20
31
  end
21
32
  end
22
33
  end
@@ -5,6 +5,6 @@
5
5
 
6
6
  class IO
7
7
  module Event
8
- VERSION = "1.3.1"
8
+ VERSION = "1.3.3"
9
9
  end
10
10
  end
data.tar.gz.sig CHANGED
Binary file
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: io-event
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.3.1
4
+ version: 1.3.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - Samuel Williams
@@ -42,7 +42,7 @@ cert_chain:
42
42
  Q2K9NVun/S785AP05vKkXZEFYxqG6EW012U4oLcFl5MySFajYXRYbuUpH6AY+HP8
43
43
  voD0MPg1DssDLKwXyt1eKD/+Fq0bFWhwVM/1XiAXL7lyYUyOq24KHgQ2Csg=
44
44
  -----END CERTIFICATE-----
45
- date: 2023-08-23 00:00:00.000000000 Z
45
+ date: 2023-10-24 00:00:00.000000000 Z
46
46
  dependencies: []
47
47
  description:
48
48
  email:
metadata.gz.sig CHANGED
Binary file