polyphony 0.46.0 → 0.47.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +24 -0
  3. data/Gemfile.lock +1 -1
  4. data/TODO.md +54 -23
  5. data/bin/test +4 -0
  6. data/examples/core/enumerable.rb +64 -0
  7. data/examples/performance/fiber_resume.rb +43 -0
  8. data/examples/performance/fiber_transfer.rb +13 -4
  9. data/examples/performance/thread-vs-fiber/compare.rb +59 -0
  10. data/examples/performance/thread-vs-fiber/em_server.rb +33 -0
  11. data/examples/performance/thread-vs-fiber/polyphony_server.rb +9 -19
  12. data/examples/performance/thread-vs-fiber/threaded_server.rb +22 -15
  13. data/examples/performance/thread_switch.rb +44 -0
  14. data/ext/polyphony/backend_common.h +20 -0
  15. data/ext/polyphony/backend_io_uring.c +127 -16
  16. data/ext/polyphony/backend_io_uring_context.c +1 -0
  17. data/ext/polyphony/backend_io_uring_context.h +1 -0
  18. data/ext/polyphony/backend_libev.c +102 -0
  19. data/ext/polyphony/fiber.c +11 -7
  20. data/ext/polyphony/polyphony.c +3 -0
  21. data/ext/polyphony/polyphony.h +7 -7
  22. data/ext/polyphony/queue.c +99 -34
  23. data/ext/polyphony/thread.c +1 -3
  24. data/lib/polyphony/core/exceptions.rb +0 -4
  25. data/lib/polyphony/core/global_api.rb +49 -31
  26. data/lib/polyphony/extensions/core.rb +9 -15
  27. data/lib/polyphony/extensions/fiber.rb +8 -2
  28. data/lib/polyphony/extensions/openssl.rb +6 -0
  29. data/lib/polyphony/extensions/socket.rb +18 -4
  30. data/lib/polyphony/version.rb +1 -1
  31. data/test/helper.rb +1 -1
  32. data/test/stress.rb +1 -1
  33. data/test/test_backend.rb +59 -0
  34. data/test/test_fiber.rb +33 -4
  35. data/test/test_global_api.rb +85 -1
  36. data/test/test_queue.rb +117 -0
  37. data/test/test_signal.rb +18 -0
  38. data/test/test_socket.rb +2 -2
  39. metadata +8 -2
@@ -0,0 +1,44 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'fiber'
4
+
5
+ class Fiber
6
+ attr_accessor :next
7
+ end
8
+
9
+ # This program shows how the performance of Fiber.transfer degrades as the fiber
10
+ # count increases
11
+
12
+ def run(num_threads)
13
+ count = 0
14
+
15
+ GC.start
16
+ GC.disable
17
+
18
+ threads = []
19
+ t0 = Time.now
20
+ limit = 10_000_000 / num_threads
21
+ num_threads.times do
22
+ threads << Thread.new do
23
+ individual_count = 0
24
+ loop do
25
+ individual_count += 1
26
+ count += 1
27
+ break if individual_count == limit
28
+ end
29
+ end
30
+ end
31
+
32
+ threads.each(&:join)
33
+ elapsed = Time.now - t0
34
+
35
+ puts "threads: #{num_threads} count: #{count} rate: #{count / elapsed}"
36
+ rescue Exception => e
37
+ puts "Stopped at #{count} threads"
38
+ p e
39
+ end
40
+
41
+ run(100)
42
+ run(1000)
43
+ run(10000)
44
+ run(100000)
@@ -1,3 +1,5 @@
1
+ #include <time.h>
2
+
1
3
  #include "ruby.h"
2
4
  #include "ruby/io.h"
3
5
 
@@ -107,3 +109,21 @@ inline void rectify_io_file_pos(rb_io_t *fptr) {
107
109
  fptr->rbuf.len = 0;
108
110
  }
109
111
  }
112
+
113
+ inline double current_time() {
114
+ struct timespec ts;
115
+ clock_gettime(CLOCK_MONOTONIC, &ts);
116
+ long long ns = ts.tv_sec;
117
+ ns = ns * 1000000000 + ts.tv_nsec;
118
+ double t = ns;
119
+ return t / 1e9;
120
+ }
121
+
122
+ inline VALUE backend_timeout_exception(VALUE exception) {
123
+ if (RTEST(rb_obj_is_kind_of(exception, rb_cArray)))
124
+ return rb_funcall(rb_ary_entry(exception, 0), ID_new, 1, rb_ary_entry(exception, 1));
125
+ else if (RTEST(rb_obj_is_kind_of(exception, rb_cClass)))
126
+ return rb_funcall(exception, ID_new, 0);
127
+ else
128
+ return rb_funcall(rb_eRuntimeError, ID_new, 1, exception);
129
+ }
@@ -7,18 +7,18 @@
7
7
  #include <fcntl.h>
8
8
  #include <netinet/in.h>
9
9
  #include <arpa/inet.h>
10
-
11
- #include "polyphony.h"
12
- #include "../liburing/liburing.h"
13
- #include "ruby/thread.h"
14
- #include "backend_io_uring_context.h"
15
-
10
+ #include <stdnoreturn.h>
16
11
  #include <poll.h>
17
12
  #include <sys/types.h>
18
13
  #include <sys/eventfd.h>
19
14
  #include <sys/wait.h>
20
15
  #include <errno.h>
21
16
 
17
+ #include "polyphony.h"
18
+ #include "../liburing/liburing.h"
19
+ #include "ruby/thread.h"
20
+ #include "backend_io_uring_context.h"
21
+
22
22
  #ifndef __NR_pidfd_open
23
23
  #define __NR_pidfd_open 434 /* System call # on most architectures */
24
24
  #endif
@@ -171,7 +171,7 @@ void io_uring_backend_handle_completion(struct io_uring_cqe *cqe, Backend_t *bac
171
171
  // otherwise, we mark it as completed, schedule the fiber and let it deal
172
172
  // with releasing the context
173
173
  ctx->completed = 1;
174
- if (ctx->result != -ECANCELED) Fiber_make_runnable(ctx->fiber, Qnil);
174
+ if (ctx->result != -ECANCELED) Fiber_make_runnable(ctx->fiber, ctx->resume_value);
175
175
  }
176
176
  }
177
177
 
@@ -304,6 +304,7 @@ int io_uring_backend_defer_submit_and_await(
304
304
 
305
305
  if (value_ptr) (*value_ptr) = switchpoint_result;
306
306
  RB_GC_GUARD(switchpoint_result);
307
+ RB_GC_GUARD(ctx->fiber);
307
308
  return ctx->result;
308
309
  }
309
310
 
@@ -773,29 +774,137 @@ VALUE Backend_wait_io(VALUE self, VALUE io, VALUE write) {
773
774
  return self;
774
775
  }
775
776
 
776
- VALUE Backend_sleep(VALUE self, VALUE duration) {
777
- Backend_t *backend;
778
- struct io_uring_sqe *sqe;
777
+ inline struct __kernel_timespec double_to_timespec(double duration) {
779
778
  double duration_integral;
780
- double duration_fraction = modf(NUM2DBL(duration), &duration_integral);
779
+ double duration_fraction = modf(duration, &duration_integral);
781
780
  struct __kernel_timespec ts;
782
-
783
- GetBackend(self, backend);
784
- sqe = io_uring_get_sqe(&backend->ring);
785
781
  ts.tv_sec = duration_integral;
786
782
  ts.tv_nsec = floor(duration_fraction * 1000000000);
783
+ return ts;
784
+ }
785
+
786
+ inline struct __kernel_timespec duration_to_timespec(VALUE duration) {
787
+ return double_to_timespec(NUM2DBL(duration));
788
+ }
789
+
790
+ // returns true if completed, 0 otherwise
791
+ int io_uring_backend_submit_timeout_and_await(Backend_t *backend, double duration, VALUE *resume_value) {
792
+ struct __kernel_timespec ts = double_to_timespec(duration);
793
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
787
794
 
788
- VALUE resume_value = Qnil;
789
795
  op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_TIMEOUT);
790
796
  io_uring_prep_timeout(sqe, &ts, 0, 0);
791
797
 
792
- io_uring_backend_defer_submit_and_await(backend, sqe, ctx, &resume_value);
798
+ io_uring_backend_defer_submit_and_await(backend, sqe, ctx, resume_value);
793
799
  OP_CONTEXT_RELEASE(&backend->store, ctx);
800
+ return ctx->completed;
801
+ }
802
+
803
+ VALUE Backend_sleep(VALUE self, VALUE duration) {
804
+ Backend_t *backend;
805
+ GetBackend(self, backend);
806
+
807
+ VALUE resume_value = Qnil;
808
+ io_uring_backend_submit_timeout_and_await(backend, NUM2DBL(duration), &resume_value);
794
809
  RAISE_IF_EXCEPTION(resume_value);
795
810
  RB_GC_GUARD(resume_value);
796
811
  return resume_value;
797
812
  }
798
813
 
814
+ VALUE Backend_timer_loop(VALUE self, VALUE interval) {
815
+ Backend_t *backend;
816
+ double interval_d = NUM2DBL(interval);
817
+ GetBackend(self, backend);
818
+ double next_time = 0.;
819
+
820
+ while (1) {
821
+ double now = current_time();
822
+ if (next_time == 0.) next_time = current_time() + interval_d;
823
+ double sleep_duration = next_time - now;
824
+ if (sleep_duration < 0) sleep_duration = 0;
825
+
826
+ VALUE resume_value = Qnil;
827
+ int completed = io_uring_backend_submit_timeout_and_await(backend, sleep_duration, &resume_value);
828
+ RAISE_IF_EXCEPTION(resume_value);
829
+ if (!completed) return resume_value;
830
+ RB_GC_GUARD(resume_value);
831
+
832
+ rb_yield(Qnil);
833
+
834
+ while (1) {
835
+ next_time += interval_d;
836
+ if (next_time > now) break;
837
+ }
838
+ }
839
+ }
840
+
841
+ VALUE Backend_timeout_safe(VALUE arg) {
842
+ return rb_yield(arg);
843
+ }
844
+
845
+ VALUE Backend_timeout_rescue(VALUE arg, VALUE exception) {
846
+ return exception;
847
+ }
848
+
849
+ VALUE Backend_timeout_ensure_safe(VALUE arg) {
850
+ return rb_rescue2(Backend_timeout_safe, Qnil, Backend_timeout_rescue, Qnil, rb_eException, (VALUE)0);
851
+ }
852
+
853
+ struct Backend_timeout_ctx {
854
+ Backend_t *backend;
855
+ op_context_t *ctx;
856
+ };
857
+
858
+ VALUE Backend_timeout_ensure(VALUE arg) {
859
+ struct Backend_timeout_ctx *timeout_ctx = (struct Backend_timeout_ctx *)arg;
860
+ if (!timeout_ctx->ctx->completed) {
861
+ timeout_ctx->ctx->result = -ECANCELED;
862
+
863
+ // op was not completed, so we need to cancel it
864
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&timeout_ctx->backend->ring);
865
+ io_uring_prep_cancel(sqe, timeout_ctx->ctx, 0);
866
+ timeout_ctx->backend->pending_sqes = 0;
867
+ io_uring_submit(&timeout_ctx->backend->ring);
868
+ }
869
+ OP_CONTEXT_RELEASE(&timeout_ctx->backend->store, timeout_ctx->ctx);
870
+ return Qnil;
871
+ }
872
+
873
+ VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
874
+ VALUE duration;
875
+ VALUE exception;
876
+ VALUE move_on_value = Qnil;
877
+ rb_scan_args(argc, argv, "21", &duration, &exception, &move_on_value);
878
+
879
+ struct __kernel_timespec ts = duration_to_timespec(duration);
880
+ Backend_t *backend;
881
+ GetBackend(self, backend);
882
+ VALUE result = Qnil;
883
+ VALUE timeout = rb_funcall(cTimeoutException, ID_new, 0);
884
+
885
+ struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
886
+
887
+ op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_TIMEOUT);
888
+ ctx->resume_value = timeout;
889
+ io_uring_prep_timeout(sqe, &ts, 0, 0);
890
+ io_uring_sqe_set_data(sqe, ctx);
891
+ io_uring_sqe_set_flags(sqe, IOSQE_ASYNC);
892
+ io_uring_backend_defer_submit(backend);
893
+
894
+ struct Backend_timeout_ctx timeout_ctx = {backend, ctx};
895
+ result = rb_ensure(Backend_timeout_ensure_safe, Qnil, Backend_timeout_ensure, (VALUE)&timeout_ctx);
896
+
897
+ if (result == timeout) {
898
+ if (exception == Qnil) return move_on_value;
899
+ RAISE_EXCEPTION(backend_timeout_exception(exception));
900
+ }
901
+
902
+ RAISE_IF_EXCEPTION(result);
903
+ RB_GC_GUARD(result);
904
+ RB_GC_GUARD(timeout);
905
+ return result;
906
+ }
907
+
799
908
  VALUE Backend_waitpid(VALUE self, VALUE pid) {
800
909
  Backend_t *backend;
801
910
  int pid_int = NUM2INT(pid);
@@ -864,6 +973,8 @@ void Init_Backend() {
864
973
  rb_define_method(cBackend, "connect", Backend_connect, 3);
865
974
  rb_define_method(cBackend, "wait_io", Backend_wait_io, 2);
866
975
  rb_define_method(cBackend, "sleep", Backend_sleep, 1);
976
+ rb_define_method(cBackend, "timer_loop", Backend_timer_loop, 1);
977
+ rb_define_method(cBackend, "timeout", Backend_timeout, -1);
867
978
  rb_define_method(cBackend, "waitpid", Backend_waitpid, 1);
868
979
  rb_define_method(cBackend, "wait_event", Backend_wait_event, 1);
869
980
 
@@ -42,6 +42,7 @@ inline op_context_t *context_store_acquire(op_context_store_t *store, enum op_ty
42
42
 
43
43
  ctx->type = type;
44
44
  ctx->fiber = rb_fiber_current();
45
+ ctx->resume_value = Qnil;
45
46
  ctx->completed = 0;
46
47
  ctx->result = 0;
47
48
 
@@ -24,6 +24,7 @@ typedef struct op_context {
24
24
  int id;
25
25
  int result;
26
26
  VALUE fiber;
27
+ VALUE resume_value;
27
28
  } op_context_t;
28
29
 
29
30
  typedef struct op_context_store {
@@ -7,6 +7,7 @@
7
7
  #include <fcntl.h>
8
8
  #include <netinet/in.h>
9
9
  #include <arpa/inet.h>
10
+ #include <stdnoreturn.h>
10
11
 
11
12
  #include "polyphony.h"
12
13
  #include "../libev/ev.h"
@@ -688,6 +689,105 @@ VALUE Backend_sleep(VALUE self, VALUE duration) {
688
689
  return switchpoint_result;
689
690
  }
690
691
 
692
+ noreturn VALUE Backend_timer_loop(VALUE self, VALUE interval) {
693
+ Backend_t *backend;
694
+ struct libev_timer watcher;
695
+ double interval_d = NUM2DBL(interval);
696
+
697
+ GetBackend(self, backend);
698
+ watcher.fiber = rb_fiber_current();
699
+
700
+ double next_time = 0.;
701
+
702
+ while (1) {
703
+ double now = current_time();
704
+ if (next_time == 0.) next_time = current_time() + interval_d;
705
+ double sleep_duration = next_time - now;
706
+ if (sleep_duration < 0) sleep_duration = 0;
707
+
708
+ VALUE switchpoint_result = Qnil;
709
+ ev_timer_init(&watcher.timer, Backend_timer_callback, sleep_duration, 0.);
710
+ ev_timer_start(backend->ev_loop, &watcher.timer);
711
+ switchpoint_result = backend_await(backend);
712
+ ev_timer_stop(backend->ev_loop, &watcher.timer);
713
+ RAISE_IF_EXCEPTION(switchpoint_result);
714
+ RB_GC_GUARD(switchpoint_result);
715
+
716
+ rb_yield(Qnil);
717
+
718
+ while (1) {
719
+ next_time += interval_d;
720
+ if (next_time > now) break;
721
+ }
722
+ }
723
+ }
724
+
725
+ VALUE Backend_timeout_safe(VALUE arg) {
726
+ return rb_yield(arg);
727
+ }
728
+
729
+ VALUE Backend_timeout_rescue(VALUE arg, VALUE exception) {
730
+ return exception;
731
+ }
732
+
733
+ VALUE Backend_timeout_ensure_safe(VALUE arg) {
734
+ return rb_rescue2(Backend_timeout_safe, Qnil, Backend_timeout_rescue, Qnil, rb_eException, (VALUE)0);
735
+ }
736
+
737
+ struct libev_timeout {
738
+ struct ev_timer timer;
739
+ VALUE fiber;
740
+ VALUE resume_value;
741
+ };
742
+
743
+ struct Backend_timeout_ctx {
744
+ Backend_t *backend;
745
+ struct libev_timeout *watcher;
746
+ };
747
+
748
+ VALUE Backend_timeout_ensure(VALUE arg) {
749
+ struct Backend_timeout_ctx *timeout_ctx = (struct Backend_timeout_ctx *)arg;
750
+ ev_timer_stop(timeout_ctx->backend->ev_loop, &(timeout_ctx->watcher->timer));
751
+ return Qnil;
752
+ }
753
+
754
+ void Backend_timeout_callback(EV_P_ ev_timer *w, int revents)
755
+ {
756
+ struct libev_timeout *watcher = (struct libev_timeout *)w;
757
+ Fiber_make_runnable(watcher->fiber, watcher->resume_value);
758
+ }
759
+
760
+ VALUE Backend_timeout(int argc,VALUE *argv, VALUE self) {
761
+ VALUE duration;
762
+ VALUE exception;
763
+ VALUE move_on_value = Qnil;
764
+ rb_scan_args(argc, argv, "21", &duration, &exception, &move_on_value);
765
+
766
+ Backend_t *backend;
767
+ struct libev_timeout watcher;
768
+ VALUE result = Qnil;
769
+ VALUE timeout = rb_funcall(cTimeoutException, ID_new, 0);
770
+
771
+ GetBackend(self, backend);
772
+ watcher.fiber = rb_fiber_current();
773
+ watcher.resume_value = timeout;
774
+ ev_timer_init(&watcher.timer, Backend_timeout_callback, NUM2DBL(duration), 0.);
775
+ ev_timer_start(backend->ev_loop, &watcher.timer);
776
+
777
+ struct Backend_timeout_ctx timeout_ctx = {backend, &watcher};
778
+ result = rb_ensure(Backend_timeout_ensure_safe, Qnil, Backend_timeout_ensure, (VALUE)&timeout_ctx);
779
+
780
+ if (result == timeout) {
781
+ if (exception == Qnil) return move_on_value;
782
+ RAISE_EXCEPTION(backend_timeout_exception(exception));
783
+ }
784
+
785
+ RAISE_IF_EXCEPTION(result);
786
+ RB_GC_GUARD(result);
787
+ RB_GC_GUARD(timeout);
788
+ return result;
789
+ }
790
+
691
791
  struct libev_child {
692
792
  struct ev_child child;
693
793
  VALUE fiber;
@@ -777,6 +877,8 @@ void Init_Backend() {
777
877
  rb_define_method(cBackend, "send", Backend_write, 2);
778
878
  rb_define_method(cBackend, "wait_io", Backend_wait_io, 2);
779
879
  rb_define_method(cBackend, "sleep", Backend_sleep, 1);
880
+ rb_define_method(cBackend, "timer_loop", Backend_timer_loop, 1);
881
+ rb_define_method(cBackend, "timeout", Backend_timeout, -1);
780
882
  rb_define_method(cBackend, "waitpid", Backend_waitpid, 1);
781
883
  rb_define_method(cBackend, "wait_event", Backend_wait_event, 1);
782
884
 
@@ -21,7 +21,7 @@ VALUE SYM_fiber_terminate;
21
21
 
22
22
  static VALUE Fiber_safe_transfer(int argc, VALUE *argv, VALUE self) {
23
23
  VALUE arg = (argc == 0) ? Qnil : argv[0];
24
- VALUE ret = rb_funcall(self, ID_transfer, 1, arg);
24
+ VALUE ret = FIBER_TRANSFER(self, arg);
25
25
 
26
26
  RAISE_IF_EXCEPTION(ret);
27
27
  RB_GC_GUARD(ret);
@@ -42,10 +42,6 @@ inline VALUE Fiber_auto_watcher(VALUE self) {
42
42
  void Fiber_make_runnable(VALUE fiber, VALUE value) {
43
43
  VALUE thread = rb_ivar_get(fiber, ID_ivar_thread);
44
44
  if (thread == Qnil) {
45
- INSPECT("Fiber with no thread", fiber);
46
- TRACE_CALLER();
47
- TRACE_C_STACK();
48
- exit(-1);
49
45
  rb_raise(rb_eRuntimeError, "No thread set for fiber");
50
46
  // rb_warn("No thread set for fiber");
51
47
  return;
@@ -57,7 +53,6 @@ void Fiber_make_runnable(VALUE fiber, VALUE value) {
57
53
  void Fiber_make_runnable_with_priority(VALUE fiber, VALUE value) {
58
54
  VALUE thread = rb_ivar_get(fiber, ID_ivar_thread);
59
55
  if (thread == Qnil) {
60
- INSPECT("Fiber with no thread", fiber);
61
56
  rb_raise(rb_eRuntimeError, "No thread set for fiber");
62
57
  // rb_warn("No thread set for fiber");
63
58
  return;
@@ -133,6 +128,15 @@ VALUE Fiber_receive(VALUE self) {
133
128
  return Queue_shift(mailbox);
134
129
  }
135
130
 
131
+ VALUE Fiber_mailbox(VALUE self) {
132
+ VALUE mailbox = rb_ivar_get(self, ID_ivar_mailbox);
133
+ if (mailbox == Qnil) {
134
+ mailbox = rb_funcall(cQueue, ID_new, 0);
135
+ rb_ivar_set(self, ID_ivar_mailbox, mailbox);
136
+ }
137
+ return mailbox;
138
+ }
139
+
136
140
  VALUE Fiber_receive_all_pending(VALUE self) {
137
141
  VALUE mailbox = rb_ivar_get(self, ID_ivar_mailbox);
138
142
  return (mailbox == Qnil) ? rb_ary_new() : Queue_shift_all(mailbox);
@@ -151,9 +155,9 @@ void Init_Fiber() {
151
155
 
152
156
  rb_define_method(cFiber, "<<", Fiber_send, 1);
153
157
  rb_define_method(cFiber, "send", Fiber_send, 1);
154
-
155
158
  rb_define_method(cFiber, "receive", Fiber_receive, 0);
156
159
  rb_define_method(cFiber, "receive_all_pending", Fiber_receive_all_pending, 0);
160
+ rb_define_method(cFiber, "mailbox", Fiber_mailbox, 0);
157
161
 
158
162
  SYM_dead = ID2SYM(rb_intern("dead"));
159
163
  SYM_running = ID2SYM(rb_intern("running"));