polyphony 0.69 → 0.73

Sign up to get free protection for your applications and to get access to all the features.
Files changed (79) hide show
  1. checksums.yaml +4 -4
  2. data/.github/FUNDING.yml +1 -0
  3. data/.github/workflows/test.yml +2 -2
  4. data/.gitignore +3 -1
  5. data/CHANGELOG.md +33 -4
  6. data/Gemfile.lock +2 -2
  7. data/TODO.md +1 -24
  8. data/bin/pdbg +1 -1
  9. data/bin/polyphony-debug +0 -0
  10. data/bin/stress.rb +0 -0
  11. data/bin/test +0 -0
  12. data/docs/_user-guide/all-about-timers.md +1 -1
  13. data/docs/api-reference/exception.md +5 -1
  14. data/docs/api-reference/fiber.md +2 -2
  15. data/docs/faq.md +1 -1
  16. data/docs/getting-started/overview.md +8 -8
  17. data/docs/getting-started/tutorial.md +3 -3
  18. data/docs/main-concepts/concurrency.md +1 -1
  19. data/docs/main-concepts/extending.md +3 -3
  20. data/docs/main-concepts/fiber-scheduling.md +1 -1
  21. data/examples/core/calc.rb +37 -0
  22. data/examples/core/calc_with_restart.rb +40 -0
  23. data/examples/core/calc_with_supervise.rb +37 -0
  24. data/examples/core/message_based_supervision.rb +1 -1
  25. data/examples/core/ring.rb +29 -0
  26. data/examples/io/rack_server.rb +1 -1
  27. data/examples/io/tunnel.rb +1 -1
  28. data/examples/performance/fiber_transfer.rb +1 -1
  29. data/examples/performance/line_splitting.rb +1 -1
  30. data/examples/performance/thread-vs-fiber/compare.rb +1 -1
  31. data/ext/polyphony/backend_common.c +31 -7
  32. data/ext/polyphony/backend_common.h +2 -1
  33. data/ext/polyphony/backend_io_uring.c +57 -67
  34. data/ext/polyphony/backend_io_uring_context.c +1 -1
  35. data/ext/polyphony/backend_io_uring_context.h +1 -1
  36. data/ext/polyphony/backend_libev.c +38 -30
  37. data/ext/polyphony/extconf.rb +25 -13
  38. data/ext/polyphony/polyphony.h +5 -1
  39. data/ext/polyphony/queue.c +2 -2
  40. data/ext/polyphony/runqueue_ring_buffer.c +3 -2
  41. data/ext/polyphony/thread.c +1 -1
  42. data/lib/polyphony/adapters/irb.rb +11 -1
  43. data/lib/polyphony/{extensions → core}/debug.rb +0 -0
  44. data/lib/polyphony/core/global_api.rb +3 -6
  45. data/lib/polyphony/core/timer.rb +2 -2
  46. data/lib/polyphony/debugger.rb +3 -3
  47. data/lib/polyphony/extensions/exception.rb +45 -0
  48. data/lib/polyphony/extensions/fiber.rb +30 -16
  49. data/lib/polyphony/extensions/io.rb +2 -2
  50. data/lib/polyphony/extensions/{core.rb → kernel.rb} +0 -73
  51. data/lib/polyphony/extensions/openssl.rb +20 -5
  52. data/lib/polyphony/extensions/process.rb +19 -0
  53. data/lib/polyphony/extensions/socket.rb +3 -4
  54. data/lib/polyphony/extensions/timeout.rb +10 -0
  55. data/lib/polyphony/extensions.rb +9 -0
  56. data/lib/polyphony/net.rb +0 -1
  57. data/lib/polyphony/version.rb +1 -1
  58. data/lib/polyphony.rb +2 -5
  59. data/polyphony.gemspec +1 -1
  60. data/test/coverage.rb +2 -2
  61. data/test/stress.rb +1 -1
  62. data/test/test_backend.rb +12 -12
  63. data/test/test_event.rb +1 -1
  64. data/test/test_ext.rb +1 -1
  65. data/test/test_fiber.rb +52 -12
  66. data/test/test_global_api.rb +16 -4
  67. data/test/test_io.rb +3 -3
  68. data/test/test_process_supervision.rb +39 -10
  69. data/test/test_queue.rb +6 -6
  70. data/test/test_signal.rb +20 -1
  71. data/test/test_socket.rb +12 -10
  72. data/test/test_supervise.rb +249 -81
  73. data/test/test_sync.rb +2 -2
  74. data/test/test_thread.rb +22 -2
  75. data/test/test_thread_pool.rb +1 -1
  76. data/test/test_throttler.rb +1 -1
  77. data/test/test_timer.rb +2 -2
  78. data/test/test_trace.rb +1 -1
  79. metadata +18 -9
@@ -31,10 +31,28 @@ inline void backend_base_mark(struct Backend_base *base) {
31
31
  runqueue_mark(&base->parked_runqueue);
32
32
  }
33
33
 
34
+ void backend_base_reset(struct Backend_base *base) {
35
+ runqueue_finalize(&base->runqueue);
36
+ runqueue_finalize(&base->parked_runqueue);
37
+
38
+ runqueue_initialize(&base->runqueue);
39
+ runqueue_initialize(&base->parked_runqueue);
40
+
41
+ base->currently_polling = 0;
42
+ base->op_count = 0;
43
+ base->switch_count = 0;
44
+ base->poll_count = 0;
45
+ base->pending_count = 0;
46
+ base->idle_gc_period = 0;
47
+ base->idle_gc_last_time = 0;
48
+ base->idle_proc = Qnil;
49
+ base->trace_proc = Qnil;
50
+ }
51
+
34
52
  const unsigned int ANTI_STARVE_SWITCH_COUNT_THRESHOLD = 64;
35
53
 
36
54
  inline void conditional_nonblocking_poll(VALUE backend, struct Backend_base *base, VALUE current, VALUE next) {
37
- if ((base->switch_count % ANTI_STARVE_SWITCH_COUNT_THRESHOLD) == 0 || next == current)
55
+ if ((base->switch_count % ANTI_STARVE_SWITCH_COUNT_THRESHOLD) == 0 || next == current)
38
56
  Backend_poll(backend, Qnil);
39
57
  }
40
58
 
@@ -44,7 +62,7 @@ VALUE backend_base_switch_fiber(VALUE backend, struct Backend_base *base) {
44
62
  unsigned int pending_ops_count = base->pending_count;
45
63
  unsigned int backend_was_polled = 0;
46
64
  unsigned int idle_tasks_run_count = 0;
47
-
65
+
48
66
  base->switch_count++;
49
67
  COND_TRACE(base, 2, SYM_fiber_switchpoint, current_fiber);
50
68
 
@@ -64,7 +82,7 @@ VALUE backend_base_switch_fiber(VALUE backend, struct Backend_base *base) {
64
82
 
65
83
  break;
66
84
  }
67
-
85
+
68
86
  if (!idle_tasks_run_count) {
69
87
  idle_tasks_run_count++;
70
88
  backend_run_idle_tasks(base);
@@ -94,7 +112,7 @@ void backend_base_schedule_fiber(VALUE thread, VALUE backend, struct Backend_bas
94
112
 
95
113
  COND_TRACE(base, 4, SYM_fiber_schedule, fiber, value, prioritize ? Qtrue : Qfalse);
96
114
 
97
- runqueue_t *runqueue = rb_ivar_get(fiber, ID_ivar_parked) == Qtrue ?
115
+ runqueue_t *runqueue = rb_ivar_get(fiber, ID_ivar_parked) == Qtrue ?
98
116
  &base->parked_runqueue : &base->runqueue;
99
117
 
100
118
  (prioritize ? runqueue_unshift : runqueue_push)(runqueue, fiber, value, already_runnable);
@@ -184,7 +202,6 @@ inline rb_encoding* io_read_encoding(rb_io_t *fptr) {
184
202
  }
185
203
 
186
204
  inline VALUE io_enc_str(VALUE str, rb_io_t *fptr) {
187
- OBJ_TAINT(str);
188
205
  rb_enc_associate(str, io_read_encoding(fptr));
189
206
  return str;
190
207
  }
@@ -228,6 +245,13 @@ inline double current_time() {
228
245
  return t / 1e9;
229
246
  }
230
247
 
248
+ inline uint64_t current_time_ns() {
249
+ struct timespec ts;
250
+ clock_gettime(CLOCK_MONOTONIC, &ts);
251
+ uint64_t ns = ts.tv_sec;
252
+ return ns * 1e9 + ts.tv_nsec;
253
+ }
254
+
231
255
  inline VALUE backend_timeout_exception(VALUE exception) {
232
256
  if (rb_obj_is_kind_of(exception, rb_cArray) == Qtrue)
233
257
  return rb_funcall(rb_ary_entry(exception, 0), ID_new, 1, rb_ary_entry(exception, 1));
@@ -282,7 +306,7 @@ inline void io_verify_blocking_mode(rb_io_t *fptr, VALUE io, VALUE blocking) {
282
306
  int flags = fcntl(fptr->fd, F_GETFL);
283
307
  if (flags == -1) return;
284
308
  int is_nonblocking = flags & O_NONBLOCK;
285
-
309
+
286
310
  if (blocking == Qtrue) {
287
311
  if (!is_nonblocking) return;
288
312
  flags &= ~O_NONBLOCK;
@@ -357,7 +381,7 @@ void backend_setup_stats_symbols() {
357
381
  SYM_switch_count = ID2SYM(rb_intern("switch_count"));
358
382
  SYM_poll_count = ID2SYM(rb_intern("poll_count"));
359
383
  SYM_pending_ops = ID2SYM(rb_intern("pending_ops"));
360
-
384
+
361
385
  rb_global_variable(&SYM_runqueue_size);
362
386
  rb_global_variable(&SYM_runqueue_length);
363
387
  rb_global_variable(&SYM_runqueue_max_length);
@@ -32,6 +32,7 @@ struct Backend_base {
32
32
  void backend_base_initialize(struct Backend_base *base);
33
33
  void backend_base_finalize(struct Backend_base *base);
34
34
  void backend_base_mark(struct Backend_base *base);
35
+ void backend_base_reset(struct Backend_base *base);
35
36
  VALUE backend_base_switch_fiber(VALUE backend, struct Backend_base *base);
36
37
  void backend_base_schedule_fiber(VALUE thread, VALUE backend, struct Backend_base *base, VALUE fiber, VALUE value, int prioritize);
37
38
  void backend_base_park_fiber(struct Backend_base *base, VALUE fiber);
@@ -81,7 +82,6 @@ VALUE backend_snooze();
81
82
  shrinkable = io_setstrbuf(&str, len); \
82
83
  buf = RSTRING_PTR(str); \
83
84
  total = 0; \
84
- OBJ_TAINT(str); \
85
85
  }
86
86
 
87
87
  #define READ_LOOP_YIELD_STR() { \
@@ -100,6 +100,7 @@ VALUE backend_snooze();
100
100
 
101
101
  void rectify_io_file_pos(rb_io_t *fptr);
102
102
  double current_time();
103
+ uint64_t current_time_ns();
103
104
  VALUE backend_timeout_exception(VALUE exception);
104
105
  VALUE Backend_timeout_ensure_safe(VALUE arg);
105
106
  VALUE Backend_timeout_ensure_safe(VALUE arg);
@@ -106,9 +106,7 @@ VALUE Backend_post_fork(VALUE self) {
106
106
  io_uring_queue_exit(&backend->ring);
107
107
  io_uring_queue_init(backend->prepared_limit, &backend->ring, 0);
108
108
  context_store_free(&backend->store);
109
- backend->base.currently_polling = 0;
110
- backend->base.pending_count = 0;
111
- backend->pending_sqes = 0;
109
+ backend_base_reset(&backend->base);
112
110
 
113
111
  return self;
114
112
  }
@@ -129,7 +127,7 @@ void *io_uring_backend_poll_without_gvl(void *ptr) {
129
127
 
130
128
  // copied from queue.c
131
129
  static inline bool cq_ring_needs_flush(struct io_uring *ring) {
132
- return IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_CQ_OVERFLOW;
130
+ return IO_URING_READ_ONCE(*ring->sq.kflags) & IORING_SQ_CQ_OVERFLOW;
133
131
  }
134
132
 
135
133
  static inline void io_uring_backend_handle_completion(struct io_uring_cqe *cqe, Backend_t *backend) {
@@ -143,13 +141,13 @@ static inline void io_uring_backend_handle_completion(struct io_uring_cqe *cqe,
143
141
  context_store_release(&backend->store, ctx);
144
142
  }
145
143
 
146
- // adapted from io_uring_peek_batch_cqe in queue.c
144
+ // adapted from io_uring_peek_batch_cqe in queue.c
147
145
  // this peeks at cqes and handles each available cqe
148
146
  void io_uring_backend_handle_ready_cqes(Backend_t *backend) {
149
147
  struct io_uring *ring = &backend->ring;
150
- bool overflow_checked = false;
148
+ bool overflow_checked = false;
151
149
  struct io_uring_cqe *cqe;
152
- unsigned head;
150
+ unsigned head;
153
151
  unsigned cqe_count;
154
152
 
155
153
  again:
@@ -160,16 +158,16 @@ again:
160
158
  }
161
159
  io_uring_cq_advance(ring, cqe_count);
162
160
 
163
- if (overflow_checked) goto done;
161
+ if (overflow_checked) goto done;
164
162
 
165
- if (cq_ring_needs_flush(ring)) {
166
- __sys_io_uring_enter(ring->ring_fd, 0, 0, IORING_ENTER_GETEVENTS, NULL);
167
- overflow_checked = true;
168
- goto again;
169
- }
163
+ if (cq_ring_needs_flush(ring)) {
164
+ __sys_io_uring_enter(ring->ring_fd, 0, 0, IORING_ENTER_GETEVENTS, NULL);
165
+ overflow_checked = true;
166
+ goto again;
167
+ }
170
168
 
171
169
  done:
172
- return;
170
+ return;
173
171
  }
174
172
 
175
173
  void io_uring_backend_poll(Backend_t *backend) {
@@ -202,19 +200,12 @@ inline VALUE Backend_poll(VALUE self, VALUE blocking) {
202
200
  }
203
201
 
204
202
  COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_enter, rb_fiber_current());
205
- // if (SHOULD_TRACE(&backend->base))
206
- // printf(
207
- // "io_uring_poll(blocking_mode: %d, pending: %d, taken: %d, available: %d, runqueue: %d\n",
208
- // is_blocking,
209
- // backend->base.pending_count,
210
- // backend->store.taken_count,
211
- // backend->store.available_count,
212
- // backend->base.runqueue.entries.count
213
- // );
203
+
214
204
  if (is_blocking) io_uring_backend_poll(backend);
215
205
  io_uring_backend_handle_ready_cqes(backend);
216
- COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_leave, rb_fiber_current());
217
206
 
207
+ COND_TRACE(&backend->base, 2, SYM_fiber_event_poll_leave, rb_fiber_current());
208
+
218
209
  return self;
219
210
  }
220
211
 
@@ -257,7 +248,7 @@ VALUE Backend_wakeup(VALUE self) {
257
248
  io_uring_prep_nop(sqe);
258
249
  backend->pending_sqes = 0;
259
250
  io_uring_submit(&backend->ring);
260
-
251
+
261
252
  return Qtrue;
262
253
  }
263
254
 
@@ -342,7 +333,6 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof,
342
333
  rb_io_check_byte_readable(fptr);
343
334
  io_unset_nonblock(fptr, io);
344
335
  rectify_io_file_pos(fptr);
345
- OBJ_TAINT(str);
346
336
 
347
337
  while (1) {
348
338
  VALUE resume_value = Qnil;
@@ -413,7 +403,7 @@ VALUE Backend_read_loop(VALUE self, VALUE io, VALUE maxlen) {
413
403
  op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
414
404
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
415
405
  io_uring_prep_read(sqe, fptr->fd, buf, len, -1);
416
-
406
+
417
407
  ssize_t result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
418
408
  int completed = context_store_release(&backend->store, ctx);
419
409
  if (!completed) {
@@ -463,7 +453,7 @@ VALUE Backend_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method) {
463
453
  op_context_t *ctx = context_store_acquire(&backend->store, OP_READ);
464
454
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
465
455
  io_uring_prep_read(sqe, fptr->fd, buf, len, -1);
466
-
456
+
467
457
  ssize_t result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
468
458
  int completed = context_store_release(&backend->store, ctx);
469
459
  if (!completed) {
@@ -509,7 +499,7 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
509
499
  op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITE);
510
500
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
511
501
  io_uring_prep_write(sqe, fptr->fd, buf, left, 0);
512
-
502
+
513
503
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
514
504
  int completed = context_store_release(&backend->store, ctx);
515
505
  if (!completed) {
@@ -518,7 +508,7 @@ VALUE Backend_write(VALUE self, VALUE io, VALUE str) {
518
508
  return resume_value;
519
509
  }
520
510
  RB_GC_GUARD(resume_value);
521
-
511
+
522
512
  if (result < 0)
523
513
  rb_syserr_fail(-result, strerror(-result));
524
514
  else {
@@ -561,7 +551,7 @@ VALUE Backend_writev(VALUE self, VALUE io, int argc, VALUE *argv) {
561
551
  op_context_t *ctx = context_store_acquire(&backend->store, OP_WRITEV);
562
552
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
563
553
  io_uring_prep_writev(sqe, fptr->fd, iov_ptr, iov_count, -1);
564
-
554
+
565
555
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
566
556
  int completed = context_store_release(&backend->store, ctx);
567
557
  if (!completed) {
@@ -630,14 +620,13 @@ VALUE Backend_recv(VALUE self, VALUE io, VALUE str, VALUE length, VALUE pos) {
630
620
  rb_io_check_byte_readable(fptr);
631
621
  io_unset_nonblock(fptr, io);
632
622
  rectify_io_file_pos(fptr);
633
- OBJ_TAINT(str);
634
623
 
635
624
  while (1) {
636
625
  VALUE resume_value = Qnil;
637
626
  op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
638
627
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
639
628
  io_uring_prep_recv(sqe, fptr->fd, buf, len - total, 0);
640
-
629
+
641
630
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
642
631
  int completed = context_store_release(&backend->store, ctx);
643
632
  if (!completed) {
@@ -687,7 +676,7 @@ VALUE Backend_recv_loop(VALUE self, VALUE io, VALUE maxlen) {
687
676
  op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
688
677
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
689
678
  io_uring_prep_recv(sqe, fptr->fd, buf, len, 0);
690
-
679
+
691
680
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
692
681
  int completed = context_store_release(&backend->store, ctx);
693
682
  if (!completed) {
@@ -736,7 +725,7 @@ VALUE Backend_recv_feed_loop(VALUE self, VALUE io, VALUE receiver, VALUE method)
736
725
  op_context_t *ctx = context_store_acquire(&backend->store, OP_RECV);
737
726
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
738
727
  io_uring_prep_recv(sqe, fptr->fd, buf, len, 0);
739
-
728
+
740
729
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
741
730
  int completed = context_store_release(&backend->store, ctx);
742
731
  if (!completed) {
@@ -782,7 +771,7 @@ VALUE Backend_send(VALUE self, VALUE io, VALUE str, VALUE flags) {
782
771
  op_context_t *ctx = context_store_acquire(&backend->store, OP_SEND);
783
772
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
784
773
  io_uring_prep_send(sqe, fptr->fd, buf, left, flags_int);
785
-
774
+
786
775
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
787
776
  int completed = context_store_release(&backend->store, ctx);
788
777
  if (!completed) {
@@ -819,7 +808,7 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE soc
819
808
  op_context_t *ctx = context_store_acquire(&backend->store, OP_ACCEPT);
820
809
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
821
810
  io_uring_prep_accept(sqe, fptr->fd, &addr, &len, 0);
822
-
811
+
823
812
  int fd = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
824
813
  int completed = context_store_release(&backend->store, ctx);
825
814
  RAISE_IF_EXCEPTION(resume_value);
@@ -840,7 +829,7 @@ VALUE io_uring_backend_accept(Backend_t *backend, VALUE server_socket, VALUE soc
840
829
  rb_io_synchronized(fp);
841
830
 
842
831
  // if (rsock_do_not_reverse_lookup) {
843
- // fp->mode |= FMODE_NOREVLOOKUP;
832
+ // fp->mode |= FMODE_NOREVLOOKUP;
844
833
  // }
845
834
  if (loop) {
846
835
  rb_yield(socket);
@@ -890,7 +879,7 @@ VALUE io_uring_backend_splice(Backend_t *backend, VALUE src, VALUE dest, VALUE m
890
879
  op_context_t *ctx = context_store_acquire(&backend->store, OP_SPLICE);
891
880
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
892
881
  io_uring_prep_splice(sqe, src_fptr->fd, -1, dest_fptr->fd, -1, NUM2INT(maxlen), 0);
893
-
882
+
894
883
  int result = io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
895
884
  int completed = context_store_release(&backend->store, ctx);
896
885
  RAISE_IF_EXCEPTION(resume_value);
@@ -946,7 +935,7 @@ VALUE Backend_connect(VALUE self, VALUE sock, VALUE host, VALUE port) {
946
935
  RAISE_IF_EXCEPTION(resume_value);
947
936
  if (!completed) return resume_value;
948
937
  RB_GC_GUARD(resume_value);
949
-
938
+
950
939
  if (result < 0) rb_syserr_fail(-result, strerror(-result));
951
940
  return sock;
952
941
  }
@@ -971,7 +960,7 @@ inline struct __kernel_timespec double_to_timespec(double duration) {
971
960
  double duration_fraction = modf(duration, &duration_integral);
972
961
  struct __kernel_timespec ts;
973
962
  ts.tv_sec = duration_integral;
974
- ts.tv_nsec = floor(duration_fraction * 1000000000);
963
+ ts.tv_nsec = floor(duration_fraction * 1000000000);
975
964
  return ts;
976
965
  }
977
966
 
@@ -983,7 +972,7 @@ inline struct __kernel_timespec duration_to_timespec(VALUE duration) {
983
972
  int io_uring_backend_submit_timeout_and_await(Backend_t *backend, double duration, VALUE *resume_value) {
984
973
  struct __kernel_timespec ts = double_to_timespec(duration);
985
974
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
986
-
975
+
987
976
  op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
988
977
  io_uring_prep_timeout(sqe, &ts, 0, 0);
989
978
  io_uring_backend_defer_submit_and_await(backend, sqe, ctx, resume_value);
@@ -1003,29 +992,34 @@ VALUE Backend_sleep(VALUE self, VALUE duration) {
1003
992
 
1004
993
  VALUE Backend_timer_loop(VALUE self, VALUE interval) {
1005
994
  Backend_t *backend;
1006
- double interval_d = NUM2DBL(interval);
995
+ uint64_t interval_ns = NUM2DBL(interval) * 1e9;
996
+ uint64_t next_time_ns = 0;
997
+ VALUE resume_value = Qnil;
998
+
1007
999
  GetBackend(self, backend);
1008
- double next_time = 0.;
1009
1000
 
1010
1001
  while (1) {
1011
- double now = current_time();
1012
- if (next_time == 0.) next_time = current_time() + interval_d;
1013
- double sleep_duration = next_time - now;
1014
- if (sleep_duration < 0) sleep_duration = 0;
1015
-
1016
- VALUE resume_value = Qnil;
1017
- int completed = io_uring_backend_submit_timeout_and_await(backend, sleep_duration, &resume_value);
1018
- RAISE_IF_EXCEPTION(resume_value);
1019
- if (!completed) return resume_value;
1020
- RB_GC_GUARD(resume_value);
1002
+ double now_ns = current_time_ns();
1003
+ if (next_time_ns == 0) next_time_ns = now_ns + interval_ns;
1004
+ if (next_time_ns > now_ns) {
1005
+ double sleep_duration = ((double)(next_time_ns - now_ns))/1e9;
1006
+ int completed = io_uring_backend_submit_timeout_and_await(backend, sleep_duration, &resume_value);
1007
+ RAISE_IF_EXCEPTION(resume_value);
1008
+ if (!completed) return resume_value;
1009
+ }
1010
+ else {
1011
+ resume_value = backend_snooze();
1012
+ RAISE_IF_EXCEPTION(resume_value);
1013
+ }
1021
1014
 
1022
1015
  rb_yield(Qnil);
1023
1016
 
1024
1017
  while (1) {
1025
- next_time += interval_d;
1026
- if (next_time > now) break;
1018
+ next_time_ns += interval_ns;
1019
+ if (next_time_ns > now_ns) break;
1027
1020
  }
1028
1021
  }
1022
+ RB_GC_GUARD(resume_value);
1029
1023
  }
1030
1024
 
1031
1025
  struct Backend_timeout_ctx {
@@ -1053,7 +1047,7 @@ VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
1053
1047
  VALUE exception;
1054
1048
  VALUE move_on_value = Qnil;
1055
1049
  rb_scan_args(argc, argv, "21", &duration, &exception, &move_on_value);
1056
-
1050
+
1057
1051
  struct __kernel_timespec ts = duration_to_timespec(duration);
1058
1052
  Backend_t *backend;
1059
1053
  GetBackend(self, backend);
@@ -1061,7 +1055,7 @@ VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
1061
1055
  VALUE timeout = rb_funcall(cTimeoutException, ID_new, 0);
1062
1056
 
1063
1057
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
1064
-
1058
+
1065
1059
  op_context_t *ctx = context_store_acquire(&backend->store, OP_TIMEOUT);
1066
1060
  ctx->resume_value = timeout;
1067
1061
  io_uring_prep_timeout(sqe, &ts, 0, 0);
@@ -1096,7 +1090,7 @@ VALUE Backend_waitpid(VALUE self, VALUE pid) {
1096
1090
  RAISE_IF_EXCEPTION(resume_value);
1097
1091
  RB_GC_GUARD(resume_value);
1098
1092
  }
1099
-
1093
+
1100
1094
  int status;
1101
1095
  pid_t ret = waitpid(pid_int, &status, WNOHANG);
1102
1096
  if (ret < 0) {
@@ -1246,7 +1240,7 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1246
1240
  }
1247
1241
  rb_raise(rb_eRuntimeError, "Invalid op specified or bad op arity");
1248
1242
  }
1249
-
1243
+
1250
1244
  io_uring_sqe_set_data(last_sqe, ctx);
1251
1245
  unsigned int flags = (i == (argc - 1)) ? IOSQE_ASYNC : IOSQE_ASYNC | IOSQE_IO_LINK;
1252
1246
  io_uring_sqe_set_flags(last_sqe, flags);
@@ -1261,7 +1255,7 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1261
1255
  int completed = context_store_release(&backend->store, ctx);
1262
1256
  if (!completed) {
1263
1257
  Backend_chain_ctx_attach_buffers(ctx, argc, argv);
1264
-
1258
+
1265
1259
  // op was not completed (an exception was raised), so we need to cancel it
1266
1260
  ctx->result = -ECANCELED;
1267
1261
  struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
@@ -1271,7 +1265,7 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1271
1265
  RAISE_IF_EXCEPTION(resume_value);
1272
1266
  return resume_value;
1273
1267
  }
1274
-
1268
+
1275
1269
  RB_GC_GUARD(resume_value);
1276
1270
  return INT2NUM(result);
1277
1271
  }
@@ -1406,7 +1400,7 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1406
1400
 
1407
1401
  SPLICE_CHUNKS_AWAIT_OPS(backend, &ctx, &chunk_len, &switchpoint_result);
1408
1402
  if (chunk_len == 0) break;
1409
-
1403
+
1410
1404
  total += chunk_len;
1411
1405
  chunk_len_value = INT2NUM(chunk_len);
1412
1406
 
@@ -1528,10 +1522,6 @@ void Init_Backend() {
1528
1522
  rb_define_method(cBackend, "waitpid", Backend_waitpid, 1);
1529
1523
  rb_define_method(cBackend, "write", Backend_write_m, -1);
1530
1524
 
1531
- #ifdef POLYPHONY_UNSET_NONBLOCK
1532
- ID_ivar_is_nonblocking = rb_intern("@is_nonblocking");
1533
- #endif
1534
-
1535
1525
  SYM_io_uring = ID2SYM(rb_intern("io_uring"));
1536
1526
  SYM_send = ID2SYM(rb_intern("send"));
1537
1527
  SYM_splice = ID2SYM(rb_intern("splice"));
@@ -64,7 +64,7 @@ inline int context_store_release(op_context_store_t *store, op_context_t *ctx) {
64
64
  // printf("release %p %d (%s, ref_count: %d)\n", ctx, ctx->id, op_type_to_str(ctx->type), ctx->ref_count);
65
65
 
66
66
  assert(ctx->ref_count);
67
-
67
+
68
68
  ctx->ref_count--;
69
69
  if (ctx->ref_count) return 0;
70
70
 
@@ -22,7 +22,7 @@ typedef struct op_context {
22
22
  struct op_context *prev;
23
23
  struct op_context *next;
24
24
  enum op_type type: 16;
25
- unsigned int ref_count : 16;
25
+ unsigned int ref_count : 16;
26
26
  int id;
27
27
  int result;
28
28
  VALUE fiber;
@@ -118,7 +118,7 @@ inline struct ev_loop *libev_new_loop() {
118
118
 
119
119
  static VALUE Backend_initialize(VALUE self) {
120
120
  Backend_t *backend;
121
-
121
+
122
122
  GetBackend(self, backend);
123
123
 
124
124
  backend_base_initialize(&backend->base);
@@ -157,6 +157,8 @@ VALUE Backend_post_fork(VALUE self) {
157
157
  ev_loop_destroy(backend->ev_loop);
158
158
  backend->ev_loop = EV_DEFAULT;
159
159
 
160
+ backend_base_reset(&backend->base);
161
+
160
162
  return self;
161
163
  }
162
164
 
@@ -186,7 +188,7 @@ inline void Backend_unschedule_fiber(VALUE self, VALUE fiber) {
186
188
  Backend_t *backend;
187
189
  GetBackend(self, backend);
188
190
 
189
- runqueue_delete(&backend->base.runqueue, fiber);
191
+ runqueue_delete(&backend->base.runqueue, fiber);
190
192
  }
191
193
 
192
194
  inline VALUE Backend_switch_fiber(VALUE self) {
@@ -285,7 +287,6 @@ VALUE Backend_read(VALUE self, VALUE io, VALUE str, VALUE length, VALUE to_eof,
285
287
  io_verify_blocking_mode(fptr, io, Qfalse);
286
288
  rectify_io_file_pos(fptr);
287
289
  watcher.fiber = Qnil;
288
- OBJ_TAINT(str);
289
290
 
290
291
  while (1) {
291
292
  backend->base.op_count++;
@@ -969,7 +970,7 @@ VALUE Backend_splice(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
969
970
  io_verify_blocking_mode(dest_fptr, dest, Qfalse);
970
971
 
971
972
  watcher.fiber = Qnil;
972
-
973
+
973
974
  while (1) {
974
975
  backend->base.op_count++;
975
976
  ssize_t n = read(src_fptr->fd, buf, len);
@@ -1045,7 +1046,7 @@ VALUE Backend_splice_to_eof(VALUE self, VALUE src, VALUE dest, VALUE maxlen) {
1045
1046
 
1046
1047
  watcher.fiber = Qnil;
1047
1048
 
1048
- while (1) {
1049
+ while (1) {
1049
1050
  char *ptr = buf;
1050
1051
  while (1) {
1051
1052
  backend->base.op_count++;
@@ -1145,33 +1146,40 @@ VALUE Backend_sleep(VALUE self, VALUE duration) {
1145
1146
  noreturn VALUE Backend_timer_loop(VALUE self, VALUE interval) {
1146
1147
  Backend_t *backend;
1147
1148
  struct libev_timer watcher;
1148
- double interval_d = NUM2DBL(interval);
1149
-
1150
- GetBackend(self, backend);
1151
1149
  watcher.fiber = rb_fiber_current();
1150
+ uint64_t interval_ns = NUM2DBL(interval) * 1e9;
1151
+ uint64_t next_time_ns = 0;
1152
+ VALUE resume_value = Qnil;
1152
1153
 
1153
- double next_time = 0.;
1154
+ GetBackend(self, backend);
1154
1155
 
1155
1156
  while (1) {
1156
- double now = current_time();
1157
- if (next_time == 0.) next_time = current_time() + interval_d;
1158
- double sleep_duration = next_time - now;
1159
- if (sleep_duration < 0) sleep_duration = 0;
1160
-
1161
- VALUE switchpoint_result = Qnil;
1162
- ev_timer_init(&watcher.timer, Backend_timer_callback, sleep_duration, 0.);
1163
- ev_timer_start(backend->ev_loop, &watcher.timer);
1164
- backend->base.op_count++;
1165
- switchpoint_result = backend_await((struct Backend_base *)backend);
1166
- ev_timer_stop(backend->ev_loop, &watcher.timer);
1167
- RAISE_IF_EXCEPTION(switchpoint_result);
1168
- RB_GC_GUARD(switchpoint_result);
1157
+ uint64_t now_ns = current_time_ns();
1158
+ if (next_time_ns == 0) next_time_ns = now_ns + interval_ns;
1159
+ double sleep_duration = ((double)(next_time_ns - now_ns))/1e9;
1160
+
1161
+ if (next_time_ns > now_ns) {
1162
+ double sleep_duration = ((double)(next_time_ns - now_ns))/1e9;
1163
+ ev_timer_init(&watcher.timer, Backend_timer_callback, sleep_duration, 0.);
1164
+ ev_timer_start(backend->ev_loop, &watcher.timer);
1165
+ backend->base.op_count++;
1166
+ resume_value = backend_await((struct Backend_base *)backend);
1167
+ ev_timer_stop(backend->ev_loop, &watcher.timer);
1168
+ RAISE_IF_EXCEPTION(resume_value);
1169
+ }
1170
+ else {
1171
+ resume_value = backend_snooze();
1172
+ RAISE_IF_EXCEPTION(resume_value);
1173
+ }
1169
1174
 
1170
1175
  rb_yield(Qnil);
1171
- do {
1172
- next_time += interval_d;
1173
- } while (next_time <= now);
1176
+
1177
+ while (1) {
1178
+ next_time_ns += interval_ns;
1179
+ if (next_time_ns > now_ns) break;
1180
+ }
1174
1181
  }
1182
+ RB_GC_GUARD(resume_value);
1175
1183
  }
1176
1184
 
1177
1185
  struct libev_timeout {
@@ -1217,7 +1225,7 @@ VALUE Backend_timeout(int argc,VALUE *argv, VALUE self) {
1217
1225
 
1218
1226
  struct Backend_timeout_ctx timeout_ctx = {backend, &watcher};
1219
1227
  result = rb_ensure(Backend_timeout_ensure_safe, Qnil, Backend_timeout_ensure, (VALUE)&timeout_ctx);
1220
-
1228
+
1221
1229
  if (result == timeout) {
1222
1230
  if (exception == Qnil) return move_on_value;
1223
1231
  RAISE_EXCEPTION(backend_timeout_exception(exception));
@@ -1335,7 +1343,7 @@ VALUE Backend_chain(int argc,VALUE *argv, VALUE self) {
1335
1343
  else
1336
1344
  rb_raise(rb_eRuntimeError, "Invalid op specified or bad op arity");
1337
1345
  }
1338
-
1346
+
1339
1347
  RB_GC_GUARD(result);
1340
1348
  return result;
1341
1349
  }
@@ -1384,14 +1392,14 @@ inline int splice_chunks_write(Backend_t *backend, int fd, VALUE str, struct lib
1384
1392
  return 0;
1385
1393
  }
1386
1394
 
1387
- static inline int splice_chunks_splice(Backend_t *backend, int src_fd, int dest_fd, int maxlen,
1395
+ static inline int splice_chunks_splice(Backend_t *backend, int src_fd, int dest_fd, int maxlen,
1388
1396
  struct libev_rw_io *watcher, VALUE *result, int *chunk_len) {
1389
1397
  #ifdef POLYPHONY_LINUX
1390
1398
  backend->base.op_count++;
1391
1399
  while (1) {
1392
1400
  *chunk_len = splice(src_fd, 0, dest_fd, 0, maxlen, 0);
1393
1401
  if (*chunk_len >= 0) return 0;
1394
-
1402
+
1395
1403
  int err = errno;
1396
1404
  if (err != EWOULDBLOCK && err != EAGAIN) return err;
1397
1405
 
@@ -1487,7 +1495,7 @@ VALUE Backend_splice_chunks(VALUE self, VALUE src, VALUE dest, VALUE prefix, VAL
1487
1495
  err = splice_chunks_splice(backend, src_fptr->fd, pipefd[1], maxlen, &watcher, &result, &chunk_len);
1488
1496
  if (err == -1) goto error; else if (err) goto syscallerror;
1489
1497
  if (chunk_len == 0) break;
1490
-
1498
+
1491
1499
  total += chunk_len;
1492
1500
  chunk_len_value = INT2NUM(chunk_len);
1493
1501