uringmachine 0.19 → 0.20.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +15 -0
  3. data/TODO.md +40 -0
  4. data/examples/bm_fileno.rb +33 -0
  5. data/examples/bm_mutex.rb +85 -0
  6. data/examples/bm_mutex_single.rb +33 -0
  7. data/examples/bm_queue.rb +27 -28
  8. data/examples/bm_send.rb +2 -5
  9. data/examples/bm_snooze.rb +20 -42
  10. data/examples/fiber_scheduler_demo.rb +15 -51
  11. data/examples/fiber_scheduler_fork.rb +24 -0
  12. data/examples/nc_ssl.rb +71 -0
  13. data/ext/um/extconf.rb +5 -15
  14. data/ext/um/um.c +73 -42
  15. data/ext/um/um.h +21 -11
  16. data/ext/um/um_async_op_class.c +2 -2
  17. data/ext/um/um_buffer.c +1 -1
  18. data/ext/um/um_class.c +94 -23
  19. data/ext/um/um_const.c +51 -3
  20. data/ext/um/um_mutex_class.c +1 -1
  21. data/ext/um/um_queue_class.c +1 -1
  22. data/ext/um/um_stream.c +5 -5
  23. data/ext/um/um_stream_class.c +3 -0
  24. data/ext/um/um_sync.c +22 -27
  25. data/ext/um/um_utils.c +59 -19
  26. data/grant-2025/journal.md +229 -0
  27. data/grant-2025/tasks.md +66 -0
  28. data/lib/uringmachine/fiber_scheduler.rb +180 -48
  29. data/lib/uringmachine/version.rb +1 -1
  30. data/lib/uringmachine.rb +6 -0
  31. data/test/test_fiber_scheduler.rb +138 -0
  32. data/test/test_stream.rb +2 -2
  33. data/test/test_um.rb +451 -33
  34. data/vendor/liburing/.github/workflows/ci.yml +94 -1
  35. data/vendor/liburing/.github/workflows/test_build.c +9 -0
  36. data/vendor/liburing/configure +27 -0
  37. data/vendor/liburing/examples/Makefile +6 -0
  38. data/vendor/liburing/examples/helpers.c +8 -0
  39. data/vendor/liburing/examples/helpers.h +5 -0
  40. data/vendor/liburing/liburing.spec +1 -1
  41. data/vendor/liburing/src/Makefile +9 -3
  42. data/vendor/liburing/src/include/liburing/barrier.h +11 -5
  43. data/vendor/liburing/src/include/liburing/io_uring/query.h +41 -0
  44. data/vendor/liburing/src/include/liburing/io_uring.h +50 -0
  45. data/vendor/liburing/src/include/liburing/sanitize.h +16 -4
  46. data/vendor/liburing/src/include/liburing.h +445 -121
  47. data/vendor/liburing/src/liburing-ffi.map +15 -0
  48. data/vendor/liburing/src/liburing.map +8 -0
  49. data/vendor/liburing/src/sanitize.c +4 -1
  50. data/vendor/liburing/src/setup.c +7 -4
  51. data/vendor/liburing/test/232c93d07b74.c +4 -16
  52. data/vendor/liburing/test/Makefile +15 -1
  53. data/vendor/liburing/test/accept.c +2 -13
  54. data/vendor/liburing/test/conn-unreach.c +132 -0
  55. data/vendor/liburing/test/fd-pass.c +32 -7
  56. data/vendor/liburing/test/fdinfo.c +39 -12
  57. data/vendor/liburing/test/fifo-futex-poll.c +114 -0
  58. data/vendor/liburing/test/fifo-nonblock-read.c +1 -12
  59. data/vendor/liburing/test/futex.c +1 -1
  60. data/vendor/liburing/test/helpers.c +99 -2
  61. data/vendor/liburing/test/helpers.h +9 -0
  62. data/vendor/liburing/test/io_uring_passthrough.c +6 -12
  63. data/vendor/liburing/test/mock_file.c +379 -0
  64. data/vendor/liburing/test/mock_file.h +47 -0
  65. data/vendor/liburing/test/nop.c +2 -2
  66. data/vendor/liburing/test/nop32-overflow.c +150 -0
  67. data/vendor/liburing/test/nop32.c +126 -0
  68. data/vendor/liburing/test/pipe.c +166 -0
  69. data/vendor/liburing/test/poll-race-mshot.c +13 -1
  70. data/vendor/liburing/test/recv-mshot-fair.c +81 -34
  71. data/vendor/liburing/test/recvsend_bundle.c +1 -1
  72. data/vendor/liburing/test/resize-rings.c +2 -0
  73. data/vendor/liburing/test/ring-query.c +322 -0
  74. data/vendor/liburing/test/ringbuf-loop.c +87 -0
  75. data/vendor/liburing/test/runtests.sh +2 -2
  76. data/vendor/liburing/test/send-zerocopy.c +43 -5
  77. data/vendor/liburing/test/send_recv.c +102 -32
  78. data/vendor/liburing/test/shutdown.c +2 -12
  79. data/vendor/liburing/test/socket-nb.c +3 -14
  80. data/vendor/liburing/test/socket-rw-eagain.c +2 -12
  81. data/vendor/liburing/test/socket-rw-offset.c +2 -12
  82. data/vendor/liburing/test/socket-rw.c +2 -12
  83. data/vendor/liburing/test/sqe-mixed-bad-wrap.c +87 -0
  84. data/vendor/liburing/test/sqe-mixed-nop.c +82 -0
  85. data/vendor/liburing/test/sqe-mixed-uring_cmd.c +153 -0
  86. data/vendor/liburing/test/timestamp.c +56 -19
  87. data/vendor/liburing/test/vec-regbuf.c +2 -4
  88. data/vendor/liburing/test/wq-aff.c +7 -0
  89. metadata +24 -2
data/ext/um/um.c CHANGED
@@ -1,6 +1,6 @@
1
1
  #include <float.h>
2
2
  #include "um.h"
3
- #include "ruby/thread.h"
3
+ #include <ruby/thread.h>
4
4
 
5
5
  void um_setup(VALUE self, struct um *machine) {
6
6
  memset(machine, 0, sizeof(struct um));
@@ -43,7 +43,7 @@ inline struct io_uring_sqe *um_get_sqe(struct um *machine, struct um_op *op) {
43
43
  sqe = io_uring_get_sqe(&machine->ring);
44
44
  if (likely(sqe)) goto done;
45
45
 
46
- rb_raise(rb_eRuntimeError, "Failed to get SQE");
46
+ um_raise_internal_error("Failed to get SQE");
47
47
 
48
48
  // TODO: retry getting SQE?
49
49
 
@@ -194,7 +194,10 @@ inline VALUE process_runqueue_op(struct um *machine, struct um_op *op) {
194
194
  if (unlikely(op->flags & OP_F_TRANSIENT))
195
195
  um_op_free(machine, op);
196
196
 
197
- return rb_fiber_transfer(fiber, 1, &value);
197
+ VALUE ret = rb_fiber_transfer(fiber, 1, &value);
198
+ RB_GC_GUARD(value);
199
+ RB_GC_GUARD(ret);
200
+ return ret;
198
201
  }
199
202
 
200
203
  inline VALUE um_fiber_switch(struct um *machine) {
@@ -266,6 +269,7 @@ inline void um_prep_op(struct um *machine, struct um_op *op, enum op_kind kind,
266
269
  VALUE fiber = (flags & OP_F_FREE_ON_COMPLETE) ? Qnil : rb_fiber_current();
267
270
  RB_OBJ_WRITE(machine->self, &op->fiber, fiber);
268
271
  RB_OBJ_WRITE(machine->self, &op->value, Qnil);
272
+ RB_OBJ_WRITE(machine->self, &op->async_op, Qnil);
269
273
  }
270
274
 
271
275
  inline void um_schedule(struct um *machine, VALUE fiber, VALUE value) {
@@ -275,6 +279,7 @@ inline void um_schedule(struct um *machine, VALUE fiber, VALUE value) {
275
279
  op->flags = OP_F_TRANSIENT;
276
280
  RB_OBJ_WRITE(machine->self, &op->fiber, fiber);
277
281
  RB_OBJ_WRITE(machine->self, &op->value, value);
282
+ RB_OBJ_WRITE(machine->self, &op->async_op, Qnil);
278
283
  um_runqueue_push(machine, op);
279
284
  }
280
285
 
@@ -311,6 +316,7 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class) {
311
316
  op->ts = um_double_to_timespec(NUM2DBL(interval));
312
317
  RB_OBJ_WRITE(machine->self, &op->fiber, rb_fiber_current());
313
318
  RB_OBJ_WRITE(machine->self, &op->value, rb_funcall(class, ID_new, 0));
319
+ RB_OBJ_WRITE(machine->self, &op->async_op, Qnil);
314
320
 
315
321
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
316
322
  io_uring_prep_timeout(sqe, &op->ts, 0, 0);
@@ -347,7 +353,7 @@ VALUE um_sleep(struct um *machine, double duration) {
347
353
  return ret;
348
354
  }
349
355
 
350
- inline VALUE um_read(struct um *machine, int fd, VALUE buffer, int maxlen, int buffer_offset) {
356
+ VALUE um_read(struct um *machine, int fd, VALUE buffer, size_t maxlen, ssize_t buffer_offset) {
351
357
  struct um_op op;
352
358
  um_prep_op(machine, &op, OP_READ, 0);
353
359
  struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
@@ -360,14 +366,13 @@ inline VALUE um_read(struct um *machine, int fd, VALUE buffer, int maxlen, int b
360
366
  ret = INT2NUM(op.result.res);
361
367
 
362
368
  }
363
- RB_GC_GUARD(buffer);
364
369
 
365
370
  RAISE_IF_EXCEPTION(ret);
366
371
  RB_GC_GUARD(ret);
367
372
  return ret;
368
373
  }
369
374
 
370
- inline size_t um_read_raw(struct um *machine, int fd, char *buffer, int maxlen) {
375
+ size_t um_read_raw(struct um *machine, int fd, char *buffer, size_t maxlen) {
371
376
  struct um_op op;
372
377
  um_prep_op(machine, &op, OP_READ, 0);
373
378
  struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
@@ -384,36 +389,43 @@ inline size_t um_read_raw(struct um *machine, int fd, char *buffer, int maxlen)
384
389
  return 0;
385
390
  }
386
391
 
387
- VALUE um_write(struct um *machine, int fd, VALUE str, int len) {
392
+ VALUE um_write(struct um *machine, int fd, VALUE buffer, size_t len) {
388
393
  struct um_op op;
389
394
  um_prep_op(machine, &op, OP_WRITE, 0);
390
395
  struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
391
- const int str_len = RSTRING_LEN(str);
392
- if (len > str_len) len = str_len;
393
396
 
394
- io_uring_prep_write(sqe, fd, RSTRING_PTR(str), len, -1);
397
+ const void *base;
398
+ size_t size;
399
+ um_get_buffer_bytes_for_writing(buffer, &base, &size);
400
+ if ((len == (size_t)-1) || (len > size)) len = size;
401
+
402
+ io_uring_prep_write(sqe, fd, base, len, -1);
395
403
 
396
404
  VALUE ret = um_fiber_switch(machine);
397
405
  if (um_check_completion(machine, &op))
398
406
  ret = INT2NUM(op.result.res);
399
407
 
400
- RB_GC_GUARD(str);
401
-
402
408
  RAISE_IF_EXCEPTION(ret);
403
409
  RB_GC_GUARD(ret);
404
410
  return ret;
405
411
  }
406
412
 
407
- VALUE um_write_async(struct um *machine, int fd, VALUE str) {
413
+ VALUE um_write_async(struct um *machine, int fd, VALUE buffer) {
408
414
  struct um_op *op = um_op_alloc(machine);
409
415
  um_prep_op(machine, op, OP_WRITE_ASYNC, OP_F_TRANSIENT | OP_F_FREE_ON_COMPLETE);
410
- RB_OBJ_WRITE(machine->self, &op->value, str);
416
+ RB_OBJ_WRITE(machine->self, &op->fiber, Qnil);
417
+ RB_OBJ_WRITE(machine->self, &op->value, buffer);
418
+ RB_OBJ_WRITE(machine->self, &op->async_op, Qnil);
419
+
420
+ const void *base;
421
+ size_t size;
422
+ um_get_buffer_bytes_for_writing(buffer, &base, &size);
411
423
 
412
424
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
413
- io_uring_prep_write(sqe, fd, RSTRING_PTR(str), RSTRING_LEN(str), -1);
425
+ io_uring_prep_write(sqe, fd, base, size, -1);
414
426
  um_op_transient_add(machine, op);
415
427
 
416
- return str;
428
+ return buffer;
417
429
  }
418
430
 
419
431
  VALUE um_close(struct um *machine, int fd) {
@@ -434,6 +446,9 @@ VALUE um_close(struct um *machine, int fd) {
434
446
  VALUE um_close_async(struct um *machine, int fd) {
435
447
  struct um_op *op = um_op_alloc(machine);
436
448
  um_prep_op(machine, op, OP_CLOSE_ASYNC, OP_F_FREE_ON_COMPLETE);
449
+ RB_OBJ_WRITE(machine->self, &op->fiber, Qnil);
450
+ RB_OBJ_WRITE(machine->self, &op->value, Qnil);
451
+ RB_OBJ_WRITE(machine->self, &op->async_op, Qnil);
437
452
 
438
453
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
439
454
  io_uring_prep_close(sqe, fd);
@@ -486,18 +501,22 @@ VALUE um_connect(struct um *machine, int fd, const struct sockaddr *addr, sockle
486
501
  return ret;
487
502
  }
488
503
 
489
- VALUE um_send(struct um *machine, int fd, VALUE buffer, int len, int flags) {
504
+ VALUE um_send(struct um *machine, int fd, VALUE buffer, size_t len, int flags) {
490
505
  struct um_op op;
491
506
  um_prep_op(machine, &op, OP_SEND, 0);
492
507
  struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
493
- io_uring_prep_send(sqe, fd, RSTRING_PTR(buffer), len, flags);
508
+
509
+ const void *base;
510
+ size_t size;
511
+ um_get_buffer_bytes_for_writing(buffer, &base, &size);
512
+ if ((len == (size_t)-1) || (len > size)) len = size;
513
+
514
+ io_uring_prep_send(sqe, fd, base, len, flags);
494
515
 
495
516
  VALUE ret = um_fiber_switch(machine);
496
517
  if (um_check_completion(machine, &op))
497
518
  ret = INT2NUM(op.result.res);
498
519
 
499
- RB_GC_GUARD(buffer);
500
-
501
520
  RAISE_IF_EXCEPTION(ret);
502
521
  RB_GC_GUARD(ret);
503
522
  return ret;
@@ -523,11 +542,12 @@ VALUE um_send_bundle(struct um *machine, int fd, int bgid, VALUE strings) {
523
542
  return ret;
524
543
  }
525
544
 
526
- VALUE um_recv(struct um *machine, int fd, VALUE buffer, int maxlen, int flags) {
545
+ VALUE um_recv(struct um *machine, int fd, VALUE buffer, size_t maxlen, int flags) {
527
546
  struct um_op op;
528
547
  um_prep_op(machine, &op, OP_RECV, 0);
529
548
  struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
530
549
  void *ptr = um_prepare_read_buffer(buffer, maxlen, 0);
550
+
531
551
  io_uring_prep_recv(sqe, fd, ptr, maxlen, flags);
532
552
 
533
553
  VALUE ret = um_fiber_switch(machine);
@@ -536,8 +556,6 @@ VALUE um_recv(struct um *machine, int fd, VALUE buffer, int maxlen, int flags) {
536
556
  ret = INT2NUM(op.result.res);
537
557
  }
538
558
 
539
- RB_GC_GUARD(buffer);
540
-
541
559
  RAISE_IF_EXCEPTION(ret);
542
560
  RB_GC_GUARD(ret);
543
561
  return ret;
@@ -577,7 +595,6 @@ VALUE um_getsockopt(struct um *machine, int fd, int level, int opt) {
577
595
  VALUE ret = Qnil;
578
596
  int value;
579
597
 
580
- #ifdef HAVE_IO_URING_PREP_CMD_SOCK
581
598
  struct um_op op;
582
599
  um_prep_op(machine, &op, OP_GETSOCKOPT, 0);
583
600
  struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
@@ -586,13 +603,6 @@ VALUE um_getsockopt(struct um *machine, int fd, int level, int opt) {
586
603
  ret = um_fiber_switch(machine);
587
604
  if (um_check_completion(machine, &op))
588
605
  ret = INT2NUM(value);
589
- #else
590
- socklen_t nvalue = sizeof(value);
591
- int res = getsockopt(fd, level, opt, &value, &nvalue);
592
- if (res)
593
- rb_syserr_fail(errno, strerror(errno));
594
- ret = INT2NUM(value);
595
- #endif
596
606
 
597
607
  RAISE_IF_EXCEPTION(ret);
598
608
  RB_GC_GUARD(ret);
@@ -602,7 +612,6 @@ VALUE um_getsockopt(struct um *machine, int fd, int level, int opt) {
602
612
  VALUE um_setsockopt(struct um *machine, int fd, int level, int opt, int value) {
603
613
  VALUE ret = Qnil;
604
614
 
605
- #ifdef HAVE_IO_URING_PREP_CMD_SOCK
606
615
  struct um_op op;
607
616
  um_prep_op(machine, &op, OP_SETSOCKOPT, 0);
608
617
  struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
@@ -611,12 +620,6 @@ VALUE um_setsockopt(struct um *machine, int fd, int level, int opt, int value) {
611
620
  ret = um_fiber_switch(machine);
612
621
  if (um_check_completion(machine, &op))
613
622
  ret = INT2NUM(op.result.res);
614
- #else
615
- int res = setsockopt(fd, level, opt, &value, sizeof(value));
616
- if (res)
617
- rb_syserr_fail(errno, strerror(errno));
618
- ret = INT2NUM(0);
619
- #endif
620
623
 
621
624
  RAISE_IF_EXCEPTION(ret);
622
625
  RB_GC_GUARD(ret);
@@ -643,6 +646,10 @@ VALUE um_shutdown(struct um *machine, int fd, int how) {
643
646
  VALUE um_shutdown_async(struct um *machine, int fd, int how) {
644
647
  struct um_op *op = um_op_alloc(machine);
645
648
  um_prep_op(machine, op, OP_SHUTDOWN_ASYNC, OP_F_FREE_ON_COMPLETE);
649
+ RB_OBJ_WRITE(machine->self, &op->fiber, Qnil);
650
+ RB_OBJ_WRITE(machine->self, &op->value, Qnil);
651
+ RB_OBJ_WRITE(machine->self, &op->async_op, Qnil);
652
+
646
653
  struct io_uring_sqe *sqe = um_get_sqe(machine, op);
647
654
  io_uring_prep_shutdown(sqe, fd, how);
648
655
 
@@ -679,13 +686,13 @@ VALUE um_poll(struct um *machine, int fd, unsigned mask) {
679
686
  return ret;
680
687
  }
681
688
 
682
- VALUE um_waitpid(struct um *machine, int pid, int options) {
689
+ VALUE um_waitid(struct um *machine, int idtype, int id, int options) {
683
690
  struct um_op op;
684
- um_prep_op(machine, &op, OP_WAITPID, 0);
691
+ um_prep_op(machine, &op, OP_WAITID, 0);
685
692
  struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
686
693
 
687
694
  siginfo_t infop;
688
- io_uring_prep_waitid(sqe, pid == 0 ? P_ALL : P_PID, pid, &infop, options, 0);
695
+ io_uring_prep_waitid(sqe, idtype, id, &infop, options, 0);
689
696
 
690
697
  VALUE ret = um_fiber_switch(machine);
691
698
  if (um_check_completion(machine, &op))
@@ -694,7 +701,31 @@ VALUE um_waitpid(struct um *machine, int pid, int options) {
694
701
  RAISE_IF_EXCEPTION(ret);
695
702
  RB_GC_GUARD(ret);
696
703
 
697
- return rb_ary_new_from_args(2, INT2NUM(infop.si_pid), INT2NUM(infop.si_status));
704
+ return rb_ary_new_from_args(
705
+ 3, INT2NUM(infop.si_pid), INT2NUM(infop.si_status), INT2NUM(infop.si_code)
706
+ );
707
+ }
708
+
709
+ VALUE um_waitid_status(struct um *machine, int idtype, int id, int options) {
710
+ #ifdef HAVE_RB_PROCESS_STATUS_NEW
711
+ struct um_op op;
712
+ um_prep_op(machine, &op, OP_WAITID, 0);
713
+ struct io_uring_sqe *sqe = um_get_sqe(machine, &op);
714
+
715
+ siginfo_t infop;
716
+ io_uring_prep_waitid(sqe, idtype, id, &infop, options | WNOWAIT, 0);
717
+
718
+ VALUE ret = um_fiber_switch(machine);
719
+ if (um_check_completion(machine, &op))
720
+ ret = INT2NUM(op.result.res);
721
+
722
+ RAISE_IF_EXCEPTION(ret);
723
+ RB_GC_GUARD(ret);
724
+
725
+ return rb_process_status_new(infop.si_pid, (infop.si_status & 0xff) << 8, 0);
726
+ #else
727
+ rb_raise(rb_eNotImpError, "Missing rb_process_status_new");
728
+ #endif
698
729
  }
699
730
 
700
731
  #define hash_set(h, sym, v) rb_hash_aset(h, ID2SYM(rb_intern(sym)), v)
data/ext/um/um.h CHANGED
@@ -20,6 +20,9 @@
20
20
  #define likely(cond) __builtin_expect(!!(cond), 1)
21
21
  #endif
22
22
 
23
+ #define IO_BUFFER_P(buffer) \
24
+ (TYPE(buffer) == RUBY_T_DATA) && rb_obj_is_instance_of(buffer, rb_cIOBuffer)
25
+
23
26
  enum op_kind {
24
27
  OP_TIMEOUT,
25
28
  OP_SCHEDULE,
@@ -47,7 +50,7 @@ enum op_kind {
47
50
  OP_SHUTDOWN_ASYNC,
48
51
 
49
52
  OP_POLL,
50
- OP_WAITPID,
53
+ OP_WAITID,
51
54
 
52
55
  OP_FUTEX_WAIT,
53
56
  OP_FUTEX_WAKE,
@@ -131,6 +134,7 @@ struct um {
131
134
 
132
135
  struct um_mutex {
133
136
  uint32_t state;
137
+ uint32_t num_waiters;
134
138
  };
135
139
 
136
140
  struct um_queue_entry {
@@ -173,9 +177,11 @@ struct um_write_buffer {
173
177
  };
174
178
 
175
179
  extern VALUE cUM;
180
+ extern VALUE eUMError;
176
181
  extern VALUE cMutex;
177
182
  extern VALUE cQueue;
178
183
  extern VALUE cAsyncOp;
184
+ extern VALUE eStreamRESPError;
179
185
 
180
186
  struct um *um_get_machine(VALUE self);
181
187
  void um_setup(VALUE self, struct um *machine);
@@ -208,8 +214,9 @@ VALUE um_raise_exception(VALUE v);
208
214
 
209
215
  void um_prep_op(struct um *machine, struct um_op *op, enum op_kind kind, unsigned flags);
210
216
  void um_raise_on_error_result(int result);
211
- void * um_prepare_read_buffer(VALUE buffer, unsigned len, int ofs);
212
- void um_update_read_buffer(struct um *machine, VALUE buffer, int buffer_offset, __s32 result, __u32 flags);
217
+ void um_get_buffer_bytes_for_writing(VALUE buffer, const void **base, size_t *size);
218
+ void * um_prepare_read_buffer(VALUE buffer, ssize_t len, ssize_t ofs);
219
+ void um_update_read_buffer(struct um *machine, VALUE buffer, ssize_t buffer_offset, __s32 result, __u32 flags);
213
220
  int um_setup_buffer_ring(struct um *machine, unsigned size, unsigned count);
214
221
  VALUE um_get_string_from_buffer_ring(struct um *machine, int bgid, __s32 result, __u32 flags);
215
222
  void um_add_strings_to_buffer_ring(struct um *machine, int bgid, VALUE strings);
@@ -229,25 +236,26 @@ VALUE um_timeout(struct um *machine, VALUE interval, VALUE class);
229
236
 
230
237
  VALUE um_sleep(struct um *machine, double duration);
231
238
  VALUE um_periodically(struct um *machine, double interval);
232
- VALUE um_read(struct um *machine, int fd, VALUE buffer, int maxlen, int buffer_offset);
233
- size_t um_read_raw(struct um *machine, int fd, char *buffer, int maxlen);
239
+ VALUE um_read(struct um *machine, int fd, VALUE buffer, size_t maxlen, ssize_t buffer_offset);
240
+ size_t um_read_raw(struct um *machine, int fd, char *buffer, size_t maxlen);
234
241
  VALUE um_read_each(struct um *machine, int fd, int bgid);
235
- VALUE um_write(struct um *machine, int fd, VALUE str, int len);
242
+ VALUE um_write(struct um *machine, int fd, VALUE buffer, size_t len);
243
+ VALUE um_write_async(struct um *machine, int fd, VALUE buffer);
236
244
  VALUE um_close(struct um *machine, int fd);
237
245
  VALUE um_close_async(struct um *machine, int fd);
238
246
  VALUE um_open(struct um *machine, VALUE pathname, int flags, int mode);
239
247
  VALUE um_poll(struct um *machine, int fd, unsigned mask);
240
- VALUE um_waitpid(struct um *machine, int pid, int options);
248
+ VALUE um_waitid(struct um *machine, int idtype, int id, int options);
249
+ VALUE um_waitid_status(struct um *machine, int idtype, int id, int options);
241
250
  VALUE um_statx(struct um *machine, int dirfd, VALUE path, int flags, unsigned int mask);
242
- VALUE um_write_async(struct um *machine, int fd, VALUE str);
243
251
 
244
252
  VALUE um_accept(struct um *machine, int fd);
245
253
  VALUE um_accept_each(struct um *machine, int fd);
246
254
  VALUE um_socket(struct um *machine, int domain, int type, int protocol, uint flags);
247
255
  VALUE um_connect(struct um *machine, int fd, const struct sockaddr *addr, socklen_t addrlen);
248
- VALUE um_send(struct um *machine, int fd, VALUE buffer, int len, int flags);
256
+ VALUE um_send(struct um *machine, int fd, VALUE buffer, size_t len, int flags);
249
257
  VALUE um_send_bundle(struct um *machine, int fd, int bgid, VALUE strings);
250
- VALUE um_recv(struct um *machine, int fd, VALUE buffer, int maxlen, int flags);
258
+ VALUE um_recv(struct um *machine, int fd, VALUE buffer, size_t maxlen, int flags);
251
259
  VALUE um_recv_each(struct um *machine, int fd, int bgid, int flags);
252
260
  VALUE um_bind(struct um *machine, int fd, struct sockaddr *addr, socklen_t addrlen);
253
261
  VALUE um_listen(struct um *machine, int fd, int backlog);
@@ -266,7 +274,7 @@ struct um_mutex *Mutex_data(VALUE self);
266
274
  struct um_queue *Queue_data(VALUE self);
267
275
 
268
276
  void um_mutex_init(struct um_mutex *mutex);
269
- VALUE um_mutex_synchronize(struct um *machine, VALUE mutex, uint32_t *state);
277
+ VALUE um_mutex_synchronize(struct um *machine, struct um_mutex *mutex);
270
278
 
271
279
  void um_queue_init(struct um_queue *queue);
272
280
  void um_queue_free(struct um_queue *queue);
@@ -282,6 +290,8 @@ VALUE stream_get_string(struct um_stream *stream, VALUE buf, ssize_t len);
282
290
  VALUE resp_decode(struct um_stream *stream, VALUE out_buffer);
283
291
  void resp_encode(struct um_write_buffer *buf, VALUE obj);
284
292
 
293
+ __attribute__((noreturn)) void um_raise_internal_error(const char *msg);
294
+
285
295
  void write_buffer_init(struct um_write_buffer *buf, VALUE str);
286
296
  void write_buffer_update_len(struct um_write_buffer *buf);
287
297
 
@@ -52,7 +52,7 @@ void um_async_op_set(VALUE self, struct um *machine, struct um_op *op) {
52
52
 
53
53
  inline void raise_on_missing_op(struct um_async_op *async_op) {
54
54
  if (!async_op->op)
55
- rb_raise(rb_eRuntimeError, "Missing op");
55
+ um_raise_internal_error("Missing op");
56
56
  }
57
57
 
58
58
  inline int async_op_is_done(struct um_async_op *async_op) {
@@ -67,7 +67,7 @@ VALUE AsyncOp_kind(VALUE self) {
67
67
  case OP_TIMEOUT:
68
68
  return SYM_timeout;
69
69
  default:
70
- rb_raise(rb_eRuntimeError, "Invalid op kind");
70
+ um_raise_internal_error("Invalid op kind");
71
71
  }
72
72
  }
73
73
 
data/ext/um/um_buffer.c CHANGED
@@ -28,7 +28,7 @@ inline struct um_buffer *um_buffer_checkout(struct um *machine, int len) {
28
28
 
29
29
  buffer->len = buffer_size(len);
30
30
  if (posix_memalign(&buffer->ptr, 4096, buffer->len))
31
- rb_raise(rb_eRuntimeError, "Failed to allocate buffer");
31
+ um_raise_internal_error("Failed to allocate buffer");
32
32
  }
33
33
  return buffer;
34
34
  }
data/ext/um/um_class.c CHANGED
@@ -1,7 +1,13 @@
1
1
  #include "um.h"
2
2
  #include <arpa/inet.h>
3
+ #include <ruby/io.h>
4
+ #include <sys/syscall.h>
5
+ #include <unistd.h>
3
6
 
4
7
  VALUE cUM;
8
+ VALUE eUMError;
9
+
10
+ static ID id_fileno;
5
11
 
6
12
  static void UM_mark(void *ptr) {
7
13
  struct um *machine = ptr;
@@ -43,7 +49,7 @@ static VALUE UM_allocate(VALUE klass) {
43
49
  inline struct um *um_get_machine(VALUE self) {
44
50
  struct um *um;
45
51
  TypedData_Get_Struct(self, struct um, &UringMachine_type, um);
46
- if (!um->ring_initialized) rb_raise(rb_eRuntimeError, "Machine not initialized");
52
+ if (!um->ring_initialized) um_raise_internal_error("Machine not initialized");
47
53
 
48
54
  return um;
49
55
  }
@@ -103,21 +109,17 @@ VALUE UM_read(int argc, VALUE *argv, VALUE self) {
103
109
  VALUE buffer;
104
110
  VALUE maxlen;
105
111
  VALUE buffer_offset;
106
- rb_scan_args(argc, argv, "31", &fd, &buffer, &maxlen, &buffer_offset);
112
+ rb_scan_args(argc, argv, "22", &fd, &buffer, &maxlen, &buffer_offset);
107
113
 
108
- return um_read(
109
- machine, NUM2INT(fd), buffer, NUM2INT(maxlen),
110
- NIL_P(buffer_offset) ? 0 : NUM2INT(buffer_offset)
111
- );
114
+ ssize_t maxlen_i = NIL_P(maxlen) ? -1 : NUM2INT(maxlen);
115
+ ssize_t buffer_offset_i = NIL_P(buffer_offset) ? 0 : NUM2INT(buffer_offset);
116
+
117
+ return um_read(machine, NUM2INT(fd), buffer, maxlen_i, buffer_offset_i);
112
118
  }
113
119
 
114
120
  VALUE UM_read_each(VALUE self, VALUE fd, VALUE bgid) {
115
- #ifdef HAVE_IO_URING_PREP_READ_MULTISHOT
116
121
  struct um *machine = um_get_machine(self);
117
122
  return um_read_each(machine, NUM2INT(fd), NUM2INT(bgid));
118
- #else
119
- rb_raise(rb_eRuntimeError, "Not supported by kernel");
120
- #endif
121
123
  }
122
124
 
123
125
  VALUE UM_write(int argc, VALUE *argv, VALUE self) {
@@ -127,13 +129,13 @@ VALUE UM_write(int argc, VALUE *argv, VALUE self) {
127
129
  VALUE len;
128
130
  rb_scan_args(argc, argv, "21", &fd, &buffer, &len);
129
131
 
130
- int bytes = NIL_P(len) ? RSTRING_LEN(buffer) : NUM2INT(len);
132
+ size_t bytes = NIL_P(len) ? (size_t)-1 : NUM2UINT(len);
131
133
  return um_write(machine, NUM2INT(fd), buffer, bytes);
132
134
  }
133
135
 
134
- VALUE UM_write_async(VALUE self, VALUE fd, VALUE str) {
136
+ VALUE UM_write_async(VALUE self, VALUE fd, VALUE buffer) {
135
137
  struct um *machine = um_get_machine(self);
136
- return um_write_async(machine, NUM2INT(fd), str);
138
+ return um_write_async(machine, NUM2INT(fd), buffer);
137
139
  }
138
140
 
139
141
  VALUE UM_statx(VALUE self, VALUE dirfd, VALUE path, VALUE flags, VALUE mask) {
@@ -270,12 +272,10 @@ VALUE UM_setsockopt(VALUE self, VALUE fd, VALUE level, VALUE opt, VALUE value) {
270
272
  return um_setsockopt(machine, NUM2INT(fd), NUM2INT(level), NUM2INT(opt), numeric_value(value));
271
273
  }
272
274
 
273
- #ifdef HAVE_IO_URING_PREP_FUTEX
274
-
275
275
  VALUE UM_mutex_synchronize(VALUE self, VALUE mutex) {
276
276
  struct um *machine = um_get_machine(self);
277
277
  struct um_mutex *mutex_data = Mutex_data(mutex);
278
- return um_mutex_synchronize(machine, mutex, &mutex_data->state);
278
+ return um_mutex_synchronize(machine, mutex_data);
279
279
  }
280
280
 
281
281
  VALUE UM_queue_push(VALUE self, VALUE queue, VALUE value) {
@@ -302,8 +302,6 @@ VALUE UM_queue_shift(VALUE self, VALUE queue) {
302
302
  return um_queue_shift(machine, que);
303
303
  }
304
304
 
305
- #endif
306
-
307
305
  struct um_open_ctx {
308
306
  VALUE self;
309
307
  VALUE fd;
@@ -332,11 +330,18 @@ VALUE UM_poll(VALUE self, VALUE fd, VALUE mask) {
332
330
  return um_poll(machine, NUM2INT(fd), NUM2UINT(mask));
333
331
  }
334
332
 
335
- VALUE UM_waitpid(VALUE self, VALUE pid, VALUE options) {
333
+ VALUE UM_waitid(VALUE self, VALUE idtype, VALUE id, VALUE options) {
336
334
  struct um *machine = um_get_machine(self);
337
- return um_waitpid(machine, NUM2INT(pid), NUM2INT(options));
335
+ return um_waitid(machine, NUM2INT(idtype), NUM2INT(id), NUM2INT(options));
338
336
  }
339
337
 
338
+ #ifdef HAVE_RB_PROCESS_STATUS_NEW
339
+ VALUE UM_waitid_status(VALUE self, VALUE idtype, VALUE id, VALUE options) {
340
+ struct um *machine = um_get_machine(self);
341
+ return um_waitid_status(machine, NUM2INT(idtype), NUM2INT(id), NUM2INT(options));
342
+ }
343
+ #endif
344
+
340
345
  VALUE UM_prep_timeout(VALUE self, VALUE interval) {
341
346
  struct um *machine = um_get_machine(self);
342
347
  return um_prep_timeout(machine, NUM2DBL(interval));
@@ -353,10 +358,65 @@ VALUE UM_pipe(VALUE self) {
353
358
  return rb_ary_new_from_args(2, INT2NUM(fds[0]), INT2NUM(fds[1]));
354
359
  }
355
360
 
361
+ VALUE UM_pidfd_open(VALUE self, VALUE pid) {
362
+ int fd = syscall(SYS_pidfd_open, NUM2INT(pid), 0);
363
+ if (fd == -1) {
364
+ int e = errno;
365
+ rb_syserr_fail(e, strerror(e));
366
+ }
367
+
368
+ return INT2NUM(fd);
369
+ }
370
+
371
+ VALUE UM_pidfd_send_signal(VALUE self, VALUE fd, VALUE sig) {
372
+ int ret = syscall(
373
+ SYS_pidfd_send_signal, NUM2INT(fd), NUM2INT(sig), NULL, 0
374
+ );
375
+ if (ret) {
376
+ int e = errno;
377
+ rb_syserr_fail(e, strerror(e));
378
+ }
379
+
380
+ return fd;
381
+ }
382
+
383
+ VALUE UM_io_nonblock_p(VALUE self, VALUE io) {
384
+ int fd = rb_io_descriptor(io);
385
+ int oflags = fcntl(fd, F_GETFL);
386
+ if (oflags == -1) return Qnil;
387
+
388
+ return (oflags & O_NONBLOCK) ? Qtrue : Qfalse;
389
+ }
390
+
391
+ VALUE UM_io_set_nonblock(VALUE self, VALUE io, VALUE nonblock) {
392
+ int fd = rb_io_descriptor(io);
393
+ int oflags = fcntl(fd, F_GETFL);
394
+ if (oflags == -1) return Qnil;
395
+
396
+ if (RTEST(nonblock)) {
397
+ if (!(oflags & O_NONBLOCK)) {
398
+ oflags |= O_NONBLOCK;
399
+ fcntl(fd, F_SETFL, oflags);
400
+ }
401
+ }
402
+ else {
403
+ if (oflags & O_NONBLOCK) {
404
+ oflags &= ~O_NONBLOCK;
405
+ fcntl(fd, F_SETFL, oflags);
406
+ }
407
+ }
408
+ return nonblock;
409
+ }
410
+
356
411
  VALUE UM_kernel_version(VALUE self) {
357
412
  return INT2NUM(UM_KERNEL_VERSION);
358
413
  }
359
414
 
415
+ VALUE UM_debug(VALUE self, VALUE str) {
416
+ printf("%s\n", StringValueCStr(str));
417
+ return Qnil;
418
+ }
419
+
360
420
  void Init_UM(void) {
361
421
  rb_ext_ractor_safe(true);
362
422
 
@@ -368,7 +428,13 @@ void Init_UM(void) {
368
428
  rb_define_method(cUM, "setup_buffer_ring", UM_setup_buffer_ring, 2);
369
429
 
370
430
  rb_define_singleton_method(cUM, "pipe", UM_pipe, 0);
431
+ rb_define_singleton_method(cUM, "pidfd_open", UM_pidfd_open, 1);
432
+ rb_define_singleton_method(cUM, "pidfd_send_signal", UM_pidfd_send_signal, 2);
433
+
434
+ rb_define_singleton_method(cUM, "io_nonblock?", UM_io_nonblock_p, 1);
435
+ rb_define_singleton_method(cUM, "io_set_nonblock", UM_io_set_nonblock, 2);
371
436
  rb_define_singleton_method(cUM, "kernel_version", UM_kernel_version, 0);
437
+ rb_define_singleton_method(cUM, "debug", UM_debug, 1);
372
438
 
373
439
 
374
440
  rb_define_method(cUM, "schedule", UM_schedule, 2);
@@ -388,7 +454,10 @@ void Init_UM(void) {
388
454
  rb_define_method(cUM, "statx", UM_statx, 4);
389
455
 
390
456
  rb_define_method(cUM, "poll", UM_poll, 2);
391
- rb_define_method(cUM, "waitpid", UM_waitpid, 2);
457
+ rb_define_method(cUM, "waitid", UM_waitid, 3);
458
+ #ifdef HAVE_RB_PROCESS_STATUS_NEW
459
+ rb_define_method(cUM, "waitid_status", UM_waitid_status, 3);
460
+ #endif
392
461
 
393
462
  rb_define_method(cUM, "accept", UM_accept, 1);
394
463
  rb_define_method(cUM, "accept_each", UM_accept_each, 1);
@@ -407,13 +476,15 @@ void Init_UM(void) {
407
476
 
408
477
  rb_define_method(cUM, "prep_timeout", UM_prep_timeout, 1);
409
478
 
410
- #ifdef HAVE_IO_URING_PREP_FUTEX
411
479
  rb_define_method(cUM, "pop", UM_queue_pop, 1);
412
480
  rb_define_method(cUM, "push", UM_queue_push, 2);
413
481
  rb_define_method(cUM, "shift", UM_queue_shift, 1);
414
482
  rb_define_method(cUM, "synchronize", UM_mutex_synchronize, 1);
415
483
  rb_define_method(cUM, "unshift", UM_queue_unshift, 2);
416
- #endif
484
+
485
+ eUMError = rb_define_class_under(cUM, "Error", rb_eStandardError);
417
486
 
418
487
  um_define_net_constants(cUM);
488
+
489
+ id_fileno = rb_intern_const("fileno");
419
490
  }