polyphony 0.70 → 0.73.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (78) hide show
  1. checksums.yaml +4 -4
  2. data/.github/FUNDING.yml +1 -0
  3. data/.github/workflows/test.yml +3 -2
  4. data/.gitignore +3 -1
  5. data/CHANGELOG.md +32 -4
  6. data/Gemfile.lock +11 -11
  7. data/TODO.md +1 -1
  8. data/bin/pdbg +1 -1
  9. data/bin/polyphony-debug +0 -0
  10. data/bin/stress.rb +0 -0
  11. data/bin/test +0 -0
  12. data/docs/_user-guide/all-about-timers.md +1 -1
  13. data/docs/api-reference/exception.md +5 -1
  14. data/docs/api-reference/fiber.md +2 -2
  15. data/docs/faq.md +1 -1
  16. data/docs/getting-started/overview.md +8 -8
  17. data/docs/getting-started/tutorial.md +3 -3
  18. data/docs/main-concepts/concurrency.md +1 -1
  19. data/docs/main-concepts/extending.md +3 -3
  20. data/docs/main-concepts/fiber-scheduling.md +1 -1
  21. data/examples/core/calc.rb +37 -0
  22. data/examples/core/calc_with_restart.rb +40 -0
  23. data/examples/core/calc_with_supervise.rb +37 -0
  24. data/examples/core/message_based_supervision.rb +1 -1
  25. data/examples/core/ring.rb +29 -0
  26. data/examples/io/rack_server.rb +1 -1
  27. data/examples/io/tunnel.rb +1 -1
  28. data/examples/performance/fiber_transfer.rb +1 -1
  29. data/examples/performance/line_splitting.rb +1 -1
  30. data/examples/performance/thread-vs-fiber/compare.rb +1 -1
  31. data/ext/polyphony/backend_common.c +15 -9
  32. data/ext/polyphony/backend_common.h +1 -1
  33. data/ext/polyphony/backend_io_uring.c +56 -64
  34. data/ext/polyphony/backend_io_uring_context.c +1 -1
  35. data/ext/polyphony/backend_io_uring_context.h +1 -1
  36. data/ext/polyphony/backend_libev.c +36 -30
  37. data/ext/polyphony/extconf.rb +25 -13
  38. data/ext/polyphony/polyphony.h +5 -1
  39. data/ext/polyphony/queue.c +2 -2
  40. data/ext/polyphony/runqueue_ring_buffer.c +3 -2
  41. data/ext/polyphony/thread.c +1 -1
  42. data/lib/polyphony/adapters/irb.rb +11 -1
  43. data/lib/polyphony/{extensions → core}/debug.rb +0 -0
  44. data/lib/polyphony/core/global_api.rb +3 -6
  45. data/lib/polyphony/core/timer.rb +2 -2
  46. data/lib/polyphony/debugger.rb +3 -3
  47. data/lib/polyphony/extensions/exception.rb +45 -0
  48. data/lib/polyphony/extensions/fiber.rb +27 -9
  49. data/lib/polyphony/extensions/io.rb +2 -2
  50. data/lib/polyphony/extensions/{core.rb → kernel.rb} +0 -73
  51. data/lib/polyphony/extensions/openssl.rb +20 -5
  52. data/lib/polyphony/extensions/process.rb +19 -0
  53. data/lib/polyphony/extensions/socket.rb +3 -4
  54. data/lib/polyphony/extensions/timeout.rb +10 -0
  55. data/lib/polyphony/extensions.rb +9 -0
  56. data/lib/polyphony/version.rb +1 -1
  57. data/lib/polyphony.rb +2 -5
  58. data/polyphony.gemspec +1 -1
  59. data/test/coverage.rb +2 -2
  60. data/test/stress.rb +1 -1
  61. data/test/test_backend.rb +12 -12
  62. data/test/test_event.rb +1 -1
  63. data/test/test_ext.rb +1 -1
  64. data/test/test_fiber.rb +52 -7
  65. data/test/test_global_api.rb +16 -3
  66. data/test/test_io.rb +3 -3
  67. data/test/test_process_supervision.rb +1 -1
  68. data/test/test_queue.rb +6 -6
  69. data/test/test_signal.rb +20 -1
  70. data/test/test_socket.rb +12 -10
  71. data/test/test_supervise.rb +85 -0
  72. data/test/test_sync.rb +2 -2
  73. data/test/test_thread.rb +22 -2
  74. data/test/test_thread_pool.rb +1 -1
  75. data/test/test_throttler.rb +1 -1
  76. data/test/test_timer.rb +2 -2
  77. data/test/test_trace.rb +1 -1
  78. metadata +13 -4
@@ -34,7 +34,7 @@ inline void backend_base_mark(struct Backend_base *base) {
34
34
  void backend_base_reset(struct Backend_base *base) {
35
35
  runqueue_finalize(&base->runqueue);
36
36
  runqueue_finalize(&base->parked_runqueue);
37
-
37
+
38
38
  runqueue_initialize(&base->runqueue);
39
39
  runqueue_initialize(&base->parked_runqueue);
40
40
 
@@ -46,13 +46,13 @@ void backend_base_reset(struct Backend_base *base) {
46
46
  base->idle_gc_period = 0;
47
47
  base->idle_gc_last_time = 0;
48
48
  base->idle_proc = Qnil;
49
- base->trace_proc = Qnil;
49
+ base->trace_proc = Qnil;
50
50
  }
51
51
 
52
52
  const unsigned int ANTI_STARVE_SWITCH_COUNT_THRESHOLD = 64;
53
53
 
54
54
  inline void conditional_nonblocking_poll(VALUE backend, struct Backend_base *base, VALUE current, VALUE next) {
55
- if ((base->switch_count % ANTI_STARVE_SWITCH_COUNT_THRESHOLD) == 0 || next == current)
55
+ if ((base->switch_count % ANTI_STARVE_SWITCH_COUNT_THRESHOLD) == 0 || next == current)
56
56
  Backend_poll(backend, Qnil);
57
57
  }
58
58
 
@@ -62,7 +62,7 @@ VALUE backend_base_switch_fiber(VALUE backend, struct Backend_base *base) {
62
62
  unsigned int pending_ops_count = base->pending_count;
63
63
  unsigned int backend_was_polled = 0;
64
64
  unsigned int idle_tasks_run_count = 0;
65
-
65
+
66
66
  base->switch_count++;
67
67
  COND_TRACE(base, 2, SYM_fiber_switchpoint, current_fiber);
68
68
 
@@ -82,7 +82,7 @@ VALUE backend_base_switch_fiber(VALUE backend, struct Backend_base *base) {
82
82
 
83
83
  break;
84
84
  }
85
-
85
+
86
86
  if (!idle_tasks_run_count) {
87
87
  idle_tasks_run_count++;
88
88
  backend_run_idle_tasks(base);
@@ -112,7 +112,7 @@ void backend_base_schedule_fiber(VALUE thread, VALUE backend, struct Backend_bas
112
112
 
113
113
  COND_TRACE(base, 4, SYM_fiber_schedule, fiber, value, prioritize ? Qtrue : Qfalse);
114
114
 
115
- runqueue_t *runqueue = rb_ivar_get(fiber, ID_ivar_parked) == Qtrue ?
115
+ runqueue_t *runqueue = rb_ivar_get(fiber, ID_ivar_parked) == Qtrue ?
116
116
  &base->parked_runqueue : &base->runqueue;
117
117
 
118
118
  (prioritize ? runqueue_unshift : runqueue_push)(runqueue, fiber, value, already_runnable);
@@ -202,7 +202,6 @@ inline rb_encoding* io_read_encoding(rb_io_t *fptr) {
202
202
  }
203
203
 
204
204
  inline VALUE io_enc_str(VALUE str, rb_io_t *fptr) {
205
- OBJ_TAINT(str);
206
205
  rb_enc_associate(str, io_read_encoding(fptr));
207
206
  return str;
208
207
  }
@@ -246,6 +245,13 @@ inline double current_time() {
246
245
  return t / 1e9;
247
246
  }
248
247
 
248
+ inline uint64_t current_time_ns() {
249
+ struct timespec ts;
250
+ clock_gettime(CLOCK_MONOTONIC, &ts);
251
+ uint64_t ns = ts.tv_sec;
252
+ return ns * 1e9 + ts.tv_nsec;
253
+ }
254
+
249
255
  inline VALUE backend_timeout_exception(VALUE exception) {
250
256
  if (rb_obj_is_kind_of(exception, rb_cArray) == Qtrue)
251
257
  return rb_funcall(rb_ary_entry(exception, 0), ID_new, 1, rb_ary_entry(exception, 1));
@@ -300,7 +306,7 @@ inline void io_verify_blocking_mode(rb_io_t *fptr, VALUE io, VALUE blocking) {
300
306
  int flags = fcntl(fptr->fd, F_GETFL);
301
307
  if (flags == -1) return;
302
308
  int is_nonblocking = flags & O_NONBLOCK;
303
-
309
+
304
310
  if (blocking == Qtrue) {
305
311
  if (!is_nonblocking) return;
306
312
  flags &= ~O_NONBLOCK;
@@ -375,7 +381,7 @@ void backend_setup_stats_symbols() {
375
381
  SYM_switch_count = ID2SYM(rb_intern("switch_count"));
376
382
  SYM_poll_count = ID2SYM(rb_intern("poll_count"));
377
383
  SYM_pending_ops = ID2SYM(rb_intern("pending_ops"));
378
-
384
+
379
385
  rb_global_variable(&SYM_runqueue_size);
380
386
  rb_global_variable(&SYM_runqueue_length);
381
387
  rb_global_variable(&SYM_runqueue_max_length);
@@ -82,7 +82,6 @@ VALUE backend_snooze();
82
82
  shrinkable = io_setstrbuf(&str, len); \
83
83
  buf = RSTRING_PTR(str); \
84
84
  total = 0; \
85
- OBJ_TAINT(str); \
86
85
  }
87
86
 
88
87
  #define READ_LOOP_YIELD_STR() { \
@@ -101,6 +100,7 @@ VALUE backend_snooze();
101
100
 
102
101
  void rectify_io_file_pos(rb_io_t *fptr);
103
102
  double current_time();
103
+ uint64_t current_time_ns();
104
104
  VALUE backend_timeout_exception(VALUE exception);
105
105
  VALUE Backend_timeout_ensure_safe(VALUE arg);
106
106
  VALUE Backend_timeout_ensure_safe(VALUE arg);
@@ -127,7 +127,7 @@ void *io_uring_backend_poll_without_gvl(void *ptr) {
127
127
 
128
128
  // copied from queue.c
129
129
  static inline bool cq_ring_needs_flush(struct io_uring *ring) {
130
- return IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_CQ_OVERFLOW;
130
+ return IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_CQ_OVERFLOW;
131
131
  }
132
132
 
133
133
  static inline void io_uring_backend_handle_completion(struct io_uring_cqe *cqe, Backend_t *backend) {
@@ -141,13 +141,13 @@ static inline void io_uring_backend_handle_completion(struct io_uring_cqe *cqe,
141
141
  context_store_release(&backend->store, ctx);
142
142
  }
143
143
 
144
- // adapted from io_uring_peek_batch_cqe in queue.c
144
+ // adapted from io_uring_peek_batch_cqe in queue.c
145
145
  // this peeks at cqes and handles each available cqe
146
146
  void io_uring_backend_handle_ready_cqes(Backend_t *backend) {
147
147
  struct io_uring *ring = &backend->ring;
148
- bool overflow_checked = false;
148
+ bool overflow_checked = false;
149
149
  struct io_uring_cqe *cqe;
150
- unsigned head;
150
+ unsigned head;
151
151
  unsigned cqe_count;
152
152
 
153
153
  again:
@@ -158,16 +158,16 @@ again:
158
158
  }
159
159
  io_uring_cq_advance(ring, cqe_count);
160
160
 
161
- if (overflow_checked) goto done;
161
+ if (overflow_checked) goto done;
162
162
 
163
- if (cq_ring_needs_flush(ring)) {
164
- __sys_io_uring_enter(ring->ring_fd, 0, 0, IORING_ENTER_GETEVENTS, NULL);
165
- overflow_checked = true;
166
- goto again;
167
- }
163
+ if (cq_ring_needs_flush(ring)) {
164
+ __sys_io_uring_enter(ring->ring_fd, 0, 0, IORING_ENTER_GETEVENTS, NULL);
165
+ overflow_checked = true;
166
+ goto again;
167
+ }
168
168
 
169
169
  done:
170
- return;
170
+ return;
171
171
  }
172
172
 
173
173
  void io_uring_backend_poll(Backend_t *backend) {
@@ -200,19 +200,12 @@ inline VALUE Backend_poll(VALUE self, VALUE blocking) {
200
200
  }
201
201
 
202
202
  COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_enter, rb_fiber_current());
203
- // if (SHOULD_TRACE(&backend->base))
204
- // printf(
205
- // "io_uring_poll(blocking_mode: %d, pending: %d, taken: %d, available: %d, runqueue: %d\n",
206
- // is_blocking,
207
- // backend->base.pending_count,
208
- // backend->store.taken_count,
209
- // backend->store.available_count,
210
- // backend->base.runqueue.entries.count
211
- // );
203
+
212
204
  if (is_blocking) io_uring_backend_poll(backend);
213
205
  io_uring_backend_handle_ready_cqes(backend);
214
- COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_leave, rb_fiber_current());
215
206
 
207
+ COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_leave, rb_fiber_current());
208
+
216
209
  return self;
217
210
  }
218
211
 
@@ -255,7 +248,7 @@ VALUE Backend_wakeup(VALUE self) {
255
248
  io_uring_prep_nop(sqe);
256
249
  backend->pending_sqes = 0;
257
250
  io_uring_submit(&backend->ring);
258
-
251
+
259
252
  return Qtrue;
260
253
  }
261
254
 
@@ -340,7 +333,6 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof,
340
333
  rb_io_check_byte_readable(fptr);
341
334
  io_unset_nonblock(fptr, io);
342
335
  rectify_io_file_pos(fptr);
343
- OBJ_TAINT(str);
344
336
 
345
337
  while (1) {
346
338
  VALUE resume_value = Qnil;
@@ -411,7 +403,7 @@ VALUE Backend_read_loop(VALUE self, VALUE io, VALUE maxlen) {
411
403
  op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
412
404
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
413
405
  io_uring_prep_read(sqe, fptr->fd, buf, len, -1);
414
-
406
+
415
407
  ssize_t result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
416
408
  int completed = context_store_release(&backend->store, ctx);
417
409
  if (!completed) {
@@ -461,7 +453,7 @@ VALUE Backend_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method) {
461
453
  op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
462
454
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
463
455
  io_uring_prep_read(sqe, fptr->fd, buf, len, -1);
464
-
456
+
465
457
  ssize_t result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
466
458
  int completed = context_store_release(&backend->store, ctx);
467
459
  if (!completed) {
@@ -507,7 +499,7 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
507
499
  op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITE);
508
500
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
509
501
  io_uring_prep_write(sqe, fptr->fd, buf, left, 0);
510
-
502
+
511
503
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
512
504
  int completed = context_store_release(&backend->store, ctx);
513
505
  if (!completed) {
@@ -516,7 +508,7 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
516
508
  return resume_value;
517
509
  }
518
510
  RB_GC_GUARD(resume_value);
519
-
511
+
520
512
  if (result < 0)
521
513
  rb_syserr_fail(-result, strerror(-result));
522
514
  else {
@@ -559,7 +551,7 @@ VALUE Backend_writev(VALUE self, VALUE io, int argc, VALUE *argv) {
559
551
  op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITEV);
560
552
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
561
553
  io_uring_prep_writev(sqe, fptr->fd, iov_ptr, iov_count, -1);
562
-
554
+
563
555
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
564
556
  int completed = context_store_release(&backend->store, ctx);
565
557
  if (!completed) {
@@ -628,14 +620,13 @@ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length, VALUE pos) {
628
620
  rb_io_check_byte_readable(fptr);
629
621
  io_unset_nonblock(fptr, io);
630
622
  rectify_io_file_pos(fptr);
631
- OBJ_TAINT(str);
632
623
 
633
624
  while (1) {
634
625
  VALUE resume_value = Qnil;
635
626
  op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
636
627
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
637
628
  io_uring_prep_recv(sqe, fptr->fd, buf, len - total, 0);
638
-
629
+
639
630
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
640
631
  int completed = context_store_release(&backend->store, ctx);
641
632
  if (!completed) {
@@ -685,7 +676,7 @@ VALUE Backend_recv_loop(VALUE self, VALUE io, VALUE maxlen) {
685
676
  op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
686
677
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
687
678
  io_uring_prep_recv(sqe, fptr->fd, buf, len, 0);
688
-
679
+
689
680
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
690
681
  int completed = context_store_release(&backend->store, ctx);
691
682
  if (!completed) {
@@ -734,7 +725,7 @@ VALUE Backend_recv_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method)
734
725
  op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
735
726
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
736
727
  io_uring_prep_recv(sqe, fptr->fd, buf, len, 0);
737
-
728
+
738
729
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
739
730
  int completed = context_store_release(&backend->store, ctx);
740
731
  if (!completed) {
@@ -780,7 +771,7 @@ VALUE Backend_send(VALUE self, VALUE io, VALUE str, VALUE flags) {
780
771
  op_context_t *ctx = context_store_acquire(&backend->store, OP_SEND);
781
772
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
782
773
  io_uring_prep_send(sqe, fptr->fd, buf, left, flags_int);
783
-
774
+
784
775
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
785
776
  int completed = context_store_release(&backend->store, ctx);
786
777
  if (!completed) {
@@ -817,7 +808,7 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE soc
817
808
  op_context_t *ctx = context_store_acquire(&backend->store, OP_ACCEPT);
818
809
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
819
810
  io_uring_prep_accept(sqe, fptr->fd, &addr, &len, 0);
820
-
811
+
821
812
  int fd = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
822
813
  int completed = context_store_release(&backend->store, ctx);
823
814
  RAISE_IF_EXCEPTION(resume_value);
@@ -838,7 +829,7 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE soc
838
829
  rb_io_synchronized(fp);
839
830
 
840
831
  // if (rsock_do_not_reverse_lookup) {
841
- // fp->mode |= FMODE_NOREVLOOKUP;
832
+ // fp->mode |= FMODE_NOREVLOOKUP;
842
833
  // }
843
834
  if (loop) {
844
835
  rb_yield(socket);
@@ -888,7 +879,7 @@ VALUE io_uring_backend_splice(Backend_t *backend, VALUE src, VALUE dest, VALUE m
888
879
  op_context_t *ctx = context_store_acquire(&backend->store, OP_SPLICE);
889
880
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
890
881
  io_uring_prep_splice(sqe, src_fptr->fd, -1, dest_fptr->fd, -1, NUM2INT(maxlen), 0);
891
-
882
+
892
883
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
893
884
  int completed = context_store_release(&backend->store, ctx);
894
885
  RAISE_IF_EXCEPTION(resume_value);
@@ -944,7 +935,7 @@ VALUE Backend_connect(VALUE self, VALUE sock, VALUE host, VALUE port) {
944
935
  RAISE_IF_EXCEPTION(resume_value);
945
936
  if (!completed) return resume_value;
946
937
  RB_GC_GUARD(resume_value);
947
-
938
+
948
939
  if (result < 0) rb_syserr_fail(-result, strerror(-result));
949
940
  return sock;
950
941
  }
@@ -969,7 +960,7 @@ inline struct __kernel_timespec double_to_timespec(double duration) {
969
960
  double duration_fraction = modf(duration, &duration_integral);
970
961
  struct __kernel_timespec ts;
971
962
  ts.tv_sec = duration_integral;
972
- ts.tv_nsec = floor(duration_fraction * 1000000000);
963
+ ts.tv_nsec = floor(duration_fraction * 1000000000);
973
964
  return ts;
974
965
  }
975
966
 
@@ -981,7 +972,7 @@ inline struct __kernel_timespec duration_to_timespec(VALUE duration) {
981
972
  int io_uring_backend_submit_timeout_and_await(Backend_t *backend, double duration, VALUE *resume_value) {
982
973
  struct __kernel_timespec ts = double_to_timespec(duration);
983
974
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
984
-
975
+
985
976
  op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
986
977
  io_uring_prep_timeout(sqe, &ts, 0, 0);
987
978
  io_uring_backend_defer_submit_and_await(backend, sqe, ctx, resume_value);
@@ -1001,29 +992,34 @@ VALUE Backend_sleep(VALUE self, VALUE duration) {
1001
992
 
1002
993
  VALUE Backend_timer_loop(VALUE self, VALUE interval) {
1003
994
  Backend_t *backend;
1004
- double interval_d = NUM2DBL(interval);
995
+ uint64_t interval_ns = NUM2DBL(interval) * 1e9;
996
+ uint64_t next_time_ns = 0;
997
+ VALUE resume_value = Qnil;
998
+
1005
999
  GetBackend(self, backend);
1006
- double next_time = 0.;
1007
1000
 
1008
1001
  while (1) {
1009
- double now = current_time();
1010
- if (next_time == 0.) next_time = current_time() + interval_d;
1011
- double sleep_duration = next_time - now;
1012
- if (sleep_duration < 0) sleep_duration = 0;
1013
-
1014
- VALUE resume_value = Qnil;
1015
- int completed = io_uring_backend_submit_timeout_and_await(backend, sleep_duration, &resume_value);
1016
- RAISE_IF_EXCEPTION(resume_value);
1017
- if (!completed) return resume_value;
1018
- RB_GC_GUARD(resume_value);
1002
+ double now_ns = current_time_ns();
1003
+ if (next_time_ns == 0) next_time_ns = now_ns + interval_ns;
1004
+ if (next_time_ns > now_ns) {
1005
+ double sleep_duration = ((double)(next_time_ns - now_ns))/1e9;
1006
+ int completed = io_uring_backend_submit_timeout_and_await(backend, sleep_duration, &resume_value);
1007
+ RAISE_IF_EXCEPTION(resume_value);
1008
+ if (!completed) return resume_value;
1009
+ }
1010
+ else {
1011
+ resume_value = backend_snooze();
1012
+ RAISE_IF_EXCEPTION(resume_value);
1013
+ }
1019
1014
 
1020
1015
  rb_yield(Qnil);
1021
1016
 
1022
1017
  while (1) {
1023
- next_time += interval_d;
1024
- if (next_time > now) break;
1018
+ next_time_ns += interval_ns;
1019
+ if (next_time_ns > now_ns) break;
1025
1020
  }
1026
1021
  }
1022
+ RB_GC_GUARD(resume_value);
1027
1023
  }
1028
1024
 
1029
1025
  struct Backend_timeout_ctx {
@@ -1051,7 +1047,7 @@ VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
1051
1047
  VALUE exception;
1052
1048
  VALUE move_on_value = Qnil;
1053
1049
  rb_scan_args(argc, argv, "21", &duration, &exception, &move_on_value);
1054
-
1050
+
1055
1051
  struct __kernel_timespec ts = duration_to_timespec(duration);
1056
1052
  Backend_t *backend;
1057
1053
  GetBackend(self, backend);
@@ -1059,7 +1055,7 @@ VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
1059
1055
  VALUE timeout = rb_funcall(cTimeoutException, ID_new, 0);
1060
1056
 
1061
1057
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1062
-
1058
+
1063
1059
  op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
1064
1060
  ctx->resume_value = timeout;
1065
1061
  io_uring_prep_timeout(sqe, &ts, 0, 0);
@@ -1094,7 +1090,7 @@ VALUE Backend_waitpid(VALUE self, VALUE pid) {
1094
1090
  RAISE_IF_EXCEPTION(resume_value);
1095
1091
  RB_GC_GUARD(resume_value);
1096
1092
  }
1097
-
1093
+
1098
1094
  int status;
1099
1095
  pid_t ret = waitpid(pid_int, &status, WNOHANG);
1100
1096
  if (ret < 0) {
@@ -1244,7 +1240,7 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1244
1240
  }
1245
1241
  rb_raise(rb_eRuntimeError, "Invalid op specified or bad op arity");
1246
1242
  }
1247
-
1243
+
1248
1244
  io_uring_sqe_set_data(last_sqe, ctx);
1249
1245
  unsigned int flags = (i == (argc - 1)) ? IOSQE_ASYNC : IOSQE_ASYNC | IOSQE_IO_LINK;
1250
1246
  io_uring_sqe_set_flags(last_sqe, flags);
@@ -1259,7 +1255,7 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1259
1255
  int completed = context_store_release(&backend->store, ctx);
1260
1256
  if (!completed) {
1261
1257
  Backend_chain_ctx_attach_buffers(ctx, argc, argv);
1262
-
1258
+
1263
1259
  // op was not completed (an exception was raised), so we need to cancel it
1264
1260
  ctx->result = -ECANCELED;
1265
1261
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
@@ -1269,7 +1265,7 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1269
1265
  RAISE_IF_EXCEPTION(resume_value);
1270
1266
  return resume_value;
1271
1267
  }
1272
-
1268
+
1273
1269
  RB_GC_GUARD(resume_value);
1274
1270
  return INT2NUM(result);
1275
1271
  }
@@ -1404,7 +1400,7 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1404
1400
 
1405
1401
  SPLICE_CHUNKS_AWAIT_OPS(backend, &ctx, &chunk_len, &switchpoint_result);
1406
1402
  if (chunk_len == 0) break;
1407
-
1403
+
1408
1404
  total += chunk_len;
1409
1405
  chunk_len_value = INT2NUM(chunk_len);
1410
1406
 
@@ -1526,10 +1522,6 @@ void Init_Backend() {
1526
1522
  rb_define_method(cBackend, "waitpid", Backend_waitpid, 1);
1527
1523
  rb_define_method(cBackend, "write", Backend_write_m, -1);
1528
1524
 
1529
- #ifdef POLYPHONY_UNSET_NONBLOCK
1530
- ID_ivar_is_nonblocking = rb_intern("@is_nonblocking");
1531
- #endif
1532
-
1533
1525
  SYM_io_uring = ID2SYM(rb_intern("io_uring"));
1534
1526
  SYM_send = ID2SYM(rb_intern("send"));
1535
1527
  SYM_splice = ID2SYM(rb_intern("splice"));
@@ -64,7 +64,7 @@ inline int context_store_release(op_context_store_t *store, op_context_t *ctx) {
64
64
  // printf("release %p %d (%s, ref_count: %d)\n", ctx, ctx->id, op_type_to_str(ctx->type), ctx->ref_count);
65
65
 
66
66
  assert(ctx->ref_count);
67
-
67
+
68
68
  ctx->ref_count--;
69
69
  if (ctx->ref_count) return 0;
70
70
 
@@ -22,7 +22,7 @@ typedef struct op_context {
22
22
  struct op_context *prev;
23
23
  struct op_context *next;
24
24
  enum op_type type: 16;
25
- unsigned int ref_count : 16;
25
+ unsigned int ref_count : 16;
26
26
  int id;
27
27
  int result;
28
28
  VALUE fiber;
@@ -118,7 +118,7 @@ inline struct ev_loop *libev_new_loop() {
118
118
 
119
119
  static VALUE Backend_initialize(VALUE self) {
120
120
  Backend_t *backend;
121
-
121
+
122
122
  GetBackend(self, backend);
123
123
 
124
124
  backend_base_initialize(&backend->base);
@@ -188,7 +188,7 @@ inline void Backend_unschedule_fiber(VALUE self, VALUE fiber) {
188
188
  Backend_t *backend;
189
189
  GetBackend(self, backend);
190
190
 
191
- runqueue_delete(&backend->base.runqueue, fiber);
191
+ runqueue_delete(&backend->base.runqueue, fiber);
192
192
  }
193
193
 
194
194
  inline VALUE Backend_switch_fiber(VALUE self) {
@@ -287,7 +287,6 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof,
287
287
  io_verify_blocking_mode(fptr, io, Qfalse);
288
288
  rectify_io_file_pos(fptr);
289
289
  watcher.fiber = Qnil;
290
- OBJ_TAINT(str);
291
290
 
292
291
  while (1) {
293
292
  backend->base.op_count++;
@@ -971,7 +970,7 @@ VALUE Backend_splice(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
971
970
  io_verify_blocking_mode(dest_fptr, dest, Qfalse);
972
971
 
973
972
  watcher.fiber = Qnil;
974
-
973
+
975
974
  while (1) {
976
975
  backend->base.op_count++;
977
976
  ssize_t n = read(src_fptr->fd, buf, len);
@@ -1047,7 +1046,7 @@ VALUE Backend_splice_to_eof(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
1047
1046
 
1048
1047
  watcher.fiber = Qnil;
1049
1048
 
1050
- while (1) {
1049
+ while (1) {
1051
1050
  char *ptr = buf;
1052
1051
  while (1) {
1053
1052
  backend->base.op_count++;
@@ -1147,33 +1146,40 @@ VALUE Backend_sleep(VALUE self, VALUE duration) {
1147
1146
  noreturn VALUE Backend_timer_loop(VALUE self, VALUE interval) {
1148
1147
  Backend_t *backend;
1149
1148
  struct libev_timer watcher;
1150
- double interval_d = NUM2DBL(interval);
1151
-
1152
- GetBackend(self, backend);
1153
1149
  watcher.fiber = rb_fiber_current();
1150
+ uint64_t interval_ns = NUM2DBL(interval) * 1e9;
1151
+ uint64_t next_time_ns = 0;
1152
+ VALUE resume_value = Qnil;
1154
1153
 
1155
- double next_time = 0.;
1154
+ GetBackend(self, backend);
1156
1155
 
1157
1156
  while (1) {
1158
- double now = current_time();
1159
- if (next_time == 0.) next_time = current_time() + interval_d;
1160
- double sleep_duration = next_time - now;
1161
- if (sleep_duration < 0) sleep_duration = 0;
1162
-
1163
- VALUE switchpoint_result = Qnil;
1164
- ev_timer_init(&watcher.timer, Backend_timer_callback, sleep_duration, 0.);
1165
- ev_timer_start(backend->ev_loop, &watcher.timer);
1166
- backend->base.op_count++;
1167
- switchpoint_result = backend_await((struct Backend_base *)backend);
1168
- ev_timer_stop(backend->ev_loop, &watcher.timer);
1169
- RAISE_IF_EXCEPTION(switchpoint_result);
1170
- RB_GC_GUARD(switchpoint_result);
1157
+ uint64_t now_ns = current_time_ns();
1158
+ if (next_time_ns == 0) next_time_ns = now_ns + interval_ns;
1159
+ double sleep_duration = ((double)(next_time_ns - now_ns))/1e9;
1160
+
1161
+ if (next_time_ns > now_ns) {
1162
+ double sleep_duration = ((double)(next_time_ns - now_ns))/1e9;
1163
+ ev_timer_init(&watcher.timer, Backend_timer_callback, sleep_duration, 0.);
1164
+ ev_timer_start(backend->ev_loop, &watcher.timer);
1165
+ backend->base.op_count++;
1166
+ resume_value = backend_await((struct Backend_base *)backend);
1167
+ ev_timer_stop(backend->ev_loop, &watcher.timer);
1168
+ RAISE_IF_EXCEPTION(resume_value);
1169
+ }
1170
+ else {
1171
+ resume_value = backend_snooze();
1172
+ RAISE_IF_EXCEPTION(resume_value);
1173
+ }
1171
1174
 
1172
1175
  rb_yield(Qnil);
1173
- do {
1174
- next_time += interval_d;
1175
- } while (next_time <= now);
1176
+
1177
+ while (1) {
1178
+ next_time_ns += interval_ns;
1179
+ if (next_time_ns > now_ns) break;
1180
+ }
1176
1181
  }
1182
+ RB_GC_GUARD(resume_value);
1177
1183
  }
1178
1184
 
1179
1185
  struct libev_timeout {
@@ -1219,7 +1225,7 @@ VALUE Backend_timeout(int argc,VALUE *argv, VALUE self) {
1219
1225
 
1220
1226
  struct Backend_timeout_ctx timeout_ctx = {backend, &watcher};
1221
1227
  result = rb_ensure(Backend_timeout_ensure_safe, Qnil, Backend_timeout_ensure, (VALUE)&timeout_ctx);
1222
-
1228
+
1223
1229
  if (result == timeout) {
1224
1230
  if (exception == Qnil) return move_on_value;
1225
1231
  RAISE_EXCEPTION(backend_timeout_exception(exception));
@@ -1337,7 +1343,7 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1337
1343
  else
1338
1344
  rb_raise(rb_eRuntimeError, "Invalid op specified or bad op arity");
1339
1345
  }
1340
-
1346
+
1341
1347
  RB_GC_GUARD(result);
1342
1348
  return result;
1343
1349
  }
@@ -1386,14 +1392,14 @@ inline int splice_chunks_write(Backend_t *backend, int fd, VALUE str, struct lib
1386
1392
  return 0;
1387
1393
  }
1388
1394
 
1389
- static inline int splice_chunks_splice(Backend_t *backend, int src_fd, int dest_fd, int maxlen,
1395
+ static inline int splice_chunks_splice(Backend_t *backend, int src_fd, int dest_fd, int maxlen,
1390
1396
  struct libev_rw_io *watcher, VALUE *result, int *chunk_len) {
1391
1397
  #ifdef POLYPHONY_LINUX
1392
1398
  backend->base.op_count++;
1393
1399
  while (1) {
1394
1400
  *chunk_len = splice(src_fd, 0, dest_fd, 0, maxlen, 0);
1395
1401
  if (*chunk_len >= 0) return 0;
1396
-
1402
+
1397
1403
  int err = errno;
1398
1404
  if (err != EWOULDBLOCK && err != EAGAIN) return err;
1399
1405
 
@@ -1489,7 +1495,7 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1489
1495
  err = splice_chunks_splice(backend, src_fptr->fd, pipefd[1], maxlen, &watcher, &result, &chunk_len);
1490
1496
  if (err == -1) goto error; else if (err) goto syscallerror;
1491
1497
  if (chunk_len == 0) break;
1492
-
1498
+
1493
1499
  total += chunk_len;
1494
1500
  chunk_len_value = INT2NUM(chunk_len);
1495
1501