polyphony 0.46.0 → 0.47.3

Sign up to get free protection for your applications and to get access to all the features.
Files changed (39) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +24 -0
  3. data/Gemfile.lock +1 -1
  4. data/TODO.md +54 -23
  5. data/bin/test +4 -0
  6. data/examples/core/enumerable.rb +64 -0
  7. data/examples/performance/fiber_resume.rb +43 -0
  8. data/examples/performance/fiber_transfer.rb +13 -4
  9. data/examples/performance/thread-vs-fiber/compare.rb +59 -0
  10. data/examples/performance/thread-vs-fiber/em_server.rb +33 -0
  11. data/examples/performance/thread-vs-fiber/polyphony_server.rb +9 -19
  12. data/examples/performance/thread-vs-fiber/threaded_server.rb +22 -15
  13. data/examples/performance/thread_switch.rb +44 -0
  14. data/ext/polyphony/backend_common.h +20 -0
  15. data/ext/polyphony/backend_io_uring.c +127 -16
  16. data/ext/polyphony/backend_io_uring_context.c +1 -0
  17. data/ext/polyphony/backend_io_uring_context.h +1 -0
  18. data/ext/polyphony/backend_libev.c +102 -0
  19. data/ext/polyphony/fiber.c +11 -7
  20. data/ext/polyphony/polyphony.c +3 -0
  21. data/ext/polyphony/polyphony.h +7 -7
  22. data/ext/polyphony/queue.c +99 -34
  23. data/ext/polyphony/thread.c +1 -3
  24. data/lib/polyphony/core/exceptions.rb +0 -4
  25. data/lib/polyphony/core/global_api.rb +49 -31
  26. data/lib/polyphony/extensions/core.rb +9 -15
  27. data/lib/polyphony/extensions/fiber.rb +8 -2
  28. data/lib/polyphony/extensions/openssl.rb +6 -0
  29. data/lib/polyphony/extensions/socket.rb +18 -4
  30. data/lib/polyphony/version.rb +1 -1
  31. data/test/helper.rb +1 -1
  32. data/test/stress.rb +1 -1
  33. data/test/test_backend.rb +59 -0
  34. data/test/test_fiber.rb +33 -4
  35. data/test/test_global_api.rb +85 -1
  36. data/test/test_queue.rb +117 -0
  37. data/test/test_signal.rb +18 -0
  38. data/test/test_socket.rb +2 -2
  39. metadata +8 -2
@@ -1,6 +1,7 @@
1
1
  #include "polyphony.h"
2
2
 
3
3
  VALUE mPolyphony;
4
+ VALUE cTimeoutException;
4
5
 
5
6
  ID ID_call;
6
7
  ID ID_caller;
@@ -55,6 +56,8 @@ void Init_Polyphony() {
55
56
  rb_define_global_function("snooze", Polyphony_snooze, 0);
56
57
  rb_define_global_function("suspend", Polyphony_suspend, 0);
57
58
 
59
+ cTimeoutException = rb_define_class_under(mPolyphony, "TimeoutException", rb_eException);
60
+
58
61
  ID_call = rb_intern("call");
59
62
  ID_caller = rb_intern("caller");
60
63
  ID_clear = rb_intern("clear");
@@ -23,12 +23,15 @@
23
23
  #define TRACE(...) rb_funcall(rb_cObject, ID_fiber_trace, __VA_ARGS__)
24
24
  #define COND_TRACE(...) if (__tracing_enabled__) { TRACE(__VA_ARGS__); }
25
25
 
26
+ // exceptions
26
27
  #define TEST_EXCEPTION(ret) (RTEST(rb_obj_is_kind_of(ret, rb_eException)))
27
-
28
28
  #define RAISE_EXCEPTION(e) rb_funcall(e, ID_invoke, 0);
29
29
  #define RAISE_IF_EXCEPTION(ret) if (RTEST(rb_obj_is_kind_of(ret, rb_eException))) { RAISE_EXCEPTION(ret); }
30
30
  #define RAISE_IF_NOT_NIL(ret) if (ret != Qnil) { RAISE_EXCEPTION(ret); }
31
31
 
32
+ // Fiber#transfer
33
+ #define FIBER_TRANSFER(fiber, value) rb_funcall(fiber, ID_transfer, 1, value)
34
+
32
35
  extern backend_interface_t backend_interface;
33
36
  #define __BACKEND__ (backend_interface)
34
37
 
@@ -36,6 +39,7 @@ extern VALUE mPolyphony;
36
39
  extern VALUE cQueue;
37
40
  extern VALUE cEvent;
38
41
  extern VALUE cRunqueue;
42
+ extern VALUE cTimeoutException;
39
43
 
40
44
  extern ID ID_call;
41
45
  extern ID ID_caller;
@@ -79,12 +83,6 @@ VALUE Queue_push(VALUE self, VALUE value);
79
83
  VALUE Queue_unshift(VALUE self, VALUE value);
80
84
  VALUE Queue_shift(VALUE self);
81
85
  VALUE Queue_shift_all(VALUE self);
82
- VALUE Queue_shift_no_wait(VALUE self);
83
- VALUE Queue_clear(VALUE self);
84
- VALUE Queue_delete(VALUE self, VALUE value);
85
- long Queue_len(VALUE self);
86
- void Queue_trace(VALUE self);
87
-
88
86
 
89
87
  void Runqueue_push(VALUE self, VALUE fiber, VALUE value, int reschedule);
90
88
  void Runqueue_unshift(VALUE self, VALUE fiber, VALUE value, int reschedule);
@@ -98,4 +96,6 @@ VALUE Thread_schedule_fiber(VALUE thread, VALUE fiber, VALUE value);
98
96
  VALUE Thread_schedule_fiber_with_priority(VALUE thread, VALUE fiber, VALUE value);
99
97
  VALUE Thread_switch_fiber(VALUE thread);
100
98
 
99
+ VALUE Polyphony_snooze(VALUE self);
100
+
101
101
  #endif /* POLYPHONY_H */
@@ -4,6 +4,8 @@
4
4
  typedef struct queue {
5
5
  ring_buffer values;
6
6
  ring_buffer shift_queue;
7
+ ring_buffer push_queue;
8
+ unsigned int capacity;
7
9
  } Queue_t;
8
10
 
9
11
  VALUE cQueue = Qnil;
@@ -12,12 +14,14 @@ static void Queue_mark(void *ptr) {
12
14
  Queue_t *queue = ptr;
13
15
  ring_buffer_mark(&queue->values);
14
16
  ring_buffer_mark(&queue->shift_queue);
17
+ ring_buffer_mark(&queue->push_queue);
15
18
  }
16
19
 
17
20
  static void Queue_free(void *ptr) {
18
21
  Queue_t *queue = ptr;
19
22
  ring_buffer_free(&queue->values);
20
23
  ring_buffer_free(&queue->shift_queue);
24
+ ring_buffer_free(&queue->push_queue);
21
25
  xfree(ptr);
22
26
  }
23
27
 
@@ -41,36 +45,77 @@ static VALUE Queue_allocate(VALUE klass) {
41
45
  #define GetQueue(obj, queue) \
42
46
  TypedData_Get_Struct((obj), Queue_t, &Queue_type, (queue))
43
47
 
44
- static VALUE Queue_initialize(VALUE self) {
48
+ static VALUE Queue_initialize(int argc, VALUE *argv, VALUE self) {
45
49
  Queue_t *queue;
46
50
  GetQueue(self, queue);
47
51
 
48
52
  ring_buffer_init(&queue->values);
49
53
  ring_buffer_init(&queue->shift_queue);
54
+ ring_buffer_init(&queue->push_queue);
55
+ queue->capacity = (argc == 1) ? NUM2UINT(argv[0]) : 0;
50
56
 
51
57
  return self;
52
58
  }
53
59
 
60
+ inline void queue_resume_first_blocked_fiber(ring_buffer *queue) {
61
+ if (queue->count) {
62
+ VALUE fiber = ring_buffer_shift(queue);
63
+ if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
64
+ }
65
+ }
66
+
67
+ inline void queue_resume_all_blocked_fibers(ring_buffer *queue) {
68
+ while (queue->count) {
69
+ VALUE fiber = ring_buffer_shift(queue);
70
+ if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
71
+ }
72
+ }
73
+
74
+ inline void queue_resume_blocked_fibers_to_capacity(Queue_t *queue) {
75
+ for (unsigned int i = queue->values.count; (i < queue->capacity) && queue->push_queue.count; i++) {
76
+ VALUE fiber = ring_buffer_shift(&queue->push_queue);
77
+ if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
78
+ }
79
+ }
80
+
81
+ inline void capped_queue_block_push(Queue_t *queue) {
82
+ VALUE fiber = rb_fiber_current();
83
+ VALUE backend = rb_ivar_get(rb_thread_current(), ID_ivar_backend);
84
+ VALUE switchpoint_result;
85
+ while (1) {
86
+ if (queue->capacity > queue->values.count) Fiber_make_runnable(fiber, Qnil);
87
+
88
+ ring_buffer_push(&queue->push_queue, fiber);
89
+ switchpoint_result = __BACKEND__.wait_event(backend, Qnil);
90
+ ring_buffer_delete(&queue->push_queue, fiber);
91
+
92
+ RAISE_IF_EXCEPTION(switchpoint_result);
93
+ RB_GC_GUARD(switchpoint_result);
94
+ if (queue->capacity > queue->values.count) break;
95
+ }
96
+ }
97
+
54
98
  VALUE Queue_push(VALUE self, VALUE value) {
55
99
  Queue_t *queue;
56
100
  GetQueue(self, queue);
57
101
 
58
- if (queue->shift_queue.count > 0) {
59
- VALUE fiber = ring_buffer_shift(&queue->shift_queue);
60
- if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
61
- }
102
+ if (queue->capacity) capped_queue_block_push(queue);
103
+
104
+ queue_resume_first_blocked_fiber(&queue->shift_queue);
62
105
  ring_buffer_push(&queue->values, value);
106
+
63
107
  return self;
64
108
  }
65
109
 
66
110
  VALUE Queue_unshift(VALUE self, VALUE value) {
67
111
  Queue_t *queue;
68
112
  GetQueue(self, queue);
69
- if (queue->shift_queue.count > 0) {
70
- VALUE fiber = ring_buffer_shift(&queue->shift_queue);
71
- if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
72
- }
113
+
114
+ if (queue->capacity) capped_queue_block_push(queue);
115
+
116
+ queue_resume_first_blocked_fiber(&queue->shift_queue);
73
117
  ring_buffer_unshift(&queue->values, value);
118
+
74
119
  return self;
75
120
  }
76
121
 
@@ -83,42 +128,63 @@ VALUE Queue_shift(VALUE self) {
83
128
  VALUE backend = rb_ivar_get(thread, ID_ivar_backend);
84
129
 
85
130
  while (1) {
86
- ring_buffer_push(&queue->shift_queue, fiber);
87
- if (queue->values.count > 0) Fiber_make_runnable(fiber, Qnil);
131
+ if (queue->values.count) Fiber_make_runnable(fiber, Qnil);
88
132
 
133
+ ring_buffer_push(&queue->shift_queue, fiber);
89
134
  VALUE switchpoint_result = __BACKEND__.wait_event(backend, Qnil);
90
135
  ring_buffer_delete(&queue->shift_queue, fiber);
91
136
 
92
137
  RAISE_IF_EXCEPTION(switchpoint_result);
93
138
  RB_GC_GUARD(switchpoint_result);
94
-
95
- if (queue->values.count > 0)
96
- return ring_buffer_shift(&queue->values);
139
+ if (queue->values.count) break;
97
140
  }
98
-
99
- return Qnil;
141
+ VALUE value = ring_buffer_shift(&queue->values);
142
+ if ((queue->capacity) && (queue->capacity > queue->values.count))
143
+ queue_resume_first_blocked_fiber(&queue->push_queue);
144
+ RB_GC_GUARD(value);
145
+ return value;
100
146
  }
101
147
 
102
- VALUE Queue_shift_no_wait(VALUE self) {
103
- Queue_t *queue;
148
+ VALUE Queue_delete(VALUE self, VALUE value) {
149
+ Queue_t *queue;
104
150
  GetQueue(self, queue);
105
151
 
106
- return ring_buffer_shift(&queue->values);
152
+ ring_buffer_delete(&queue->values, value);
153
+
154
+ if (queue->capacity && (queue->capacity > queue->values.count))
155
+ queue_resume_first_blocked_fiber(&queue->push_queue);
156
+
157
+ return self;
107
158
  }
108
159
 
109
- VALUE Queue_delete(VALUE self, VALUE value) {
160
+ VALUE Queue_cap(VALUE self, VALUE cap) {
161
+ unsigned int new_capacity = NUM2UINT(cap);
110
162
  Queue_t *queue;
111
163
  GetQueue(self, queue);
112
-
113
- ring_buffer_delete(&queue->values, value);
164
+ queue->capacity = new_capacity;
165
+
166
+ if (queue->capacity)
167
+ queue_resume_blocked_fibers_to_capacity(queue);
168
+ else
169
+ queue_resume_all_blocked_fibers(&queue->push_queue);
170
+
114
171
  return self;
115
172
  }
116
173
 
174
+ VALUE Queue_capped_p(VALUE self) {
175
+ Queue_t *queue;
176
+ GetQueue(self, queue);
177
+
178
+ return queue->capacity ? UINT2NUM(queue->capacity) : Qnil;
179
+ }
180
+
117
181
  VALUE Queue_clear(VALUE self) {
118
182
  Queue_t *queue;
119
183
  GetQueue(self, queue);
120
184
 
121
185
  ring_buffer_clear(&queue->values);
186
+ if (queue->capacity) queue_resume_blocked_fibers_to_capacity(queue);
187
+
122
188
  return self;
123
189
  }
124
190
 
@@ -134,6 +200,7 @@ VALUE Queue_shift_each(VALUE self) {
134
200
  GetQueue(self, queue);
135
201
 
136
202
  ring_buffer_shift_each(&queue->values);
203
+ if (queue->capacity) queue_resume_blocked_fibers_to_capacity(queue);
137
204
  return self;
138
205
  }
139
206
 
@@ -141,7 +208,9 @@ VALUE Queue_shift_all(VALUE self) {
141
208
  Queue_t *queue;
142
209
  GetQueue(self, queue);
143
210
 
144
- return ring_buffer_shift_all(&queue->values);
211
+ VALUE result = ring_buffer_shift_all(&queue->values);
212
+ if (queue->capacity) queue_resume_blocked_fibers_to_capacity(queue);
213
+ return result;
145
214
  }
146
215
 
147
216
  VALUE Queue_flush_waiters(VALUE self, VALUE value) {
@@ -160,14 +229,14 @@ VALUE Queue_empty_p(VALUE self) {
160
229
  Queue_t *queue;
161
230
  GetQueue(self, queue);
162
231
 
163
- return (queue->values.count == 0) ? Qtrue : Qfalse;
232
+ return (!queue->values.count) ? Qtrue : Qfalse;
164
233
  }
165
234
 
166
235
  VALUE Queue_pending_p(VALUE self) {
167
236
  Queue_t *queue;
168
237
  GetQueue(self, queue);
169
238
 
170
- return (queue->shift_queue.count > 0) ? Qtrue : Qfalse;
239
+ return (queue->shift_queue.count) ? Qtrue : Qfalse;
171
240
  }
172
241
 
173
242
  VALUE Queue_size_m(VALUE self) {
@@ -177,26 +246,22 @@ VALUE Queue_size_m(VALUE self) {
177
246
  return INT2NUM(queue->values.count);
178
247
  }
179
248
 
180
- void Queue_trace(VALUE self) {
181
- Queue_t *queue;
182
- GetQueue(self, queue);
183
-
184
- printf("run queue size: %d count: %d\n", queue->values.size, queue->values.count);
185
- }
186
-
187
249
  void Init_Queue() {
188
250
  cQueue = rb_define_class_under(mPolyphony, "Queue", rb_cData);
189
251
  rb_define_alloc_func(cQueue, Queue_allocate);
190
252
 
191
- rb_define_method(cQueue, "initialize", Queue_initialize, 0);
253
+ rb_define_method(cQueue, "initialize", Queue_initialize, -1);
192
254
  rb_define_method(cQueue, "push", Queue_push, 1);
193
255
  rb_define_method(cQueue, "<<", Queue_push, 1);
194
256
  rb_define_method(cQueue, "unshift", Queue_unshift, 1);
195
257
 
196
258
  rb_define_method(cQueue, "shift", Queue_shift, 0);
197
259
  rb_define_method(cQueue, "pop", Queue_shift, 0);
198
- rb_define_method(cQueue, "shift_no_wait", Queue_shift_no_wait, 0);
199
260
  rb_define_method(cQueue, "delete", Queue_delete, 1);
261
+ rb_define_method(cQueue, "clear", Queue_clear, 0);
262
+
263
+ rb_define_method(cQueue, "cap", Queue_cap, 1);
264
+ rb_define_method(cQueue, "capped?", Queue_capped_p, 0);
200
265
 
201
266
  rb_define_method(cQueue, "shift_each", Queue_shift_each, 0);
202
267
  rb_define_method(cQueue, "shift_all", Queue_shift_all, 0);
@@ -4,7 +4,6 @@ ID ID_deactivate_all_watchers_post_fork;
4
4
  ID ID_ivar_backend;
5
5
  ID ID_ivar_join_wait_queue;
6
6
  ID ID_ivar_main_fiber;
7
- ID ID_ivar_result;
8
7
  ID ID_ivar_terminated;
9
8
  ID ID_ivar_runqueue;
10
9
  ID ID_stop;
@@ -116,7 +115,7 @@ VALUE Thread_switch_fiber(VALUE self) {
116
115
  RB_GC_GUARD(next.fiber);
117
116
  RB_GC_GUARD(next.value);
118
117
  return (next.fiber == current_fiber) ?
119
- next.value : rb_funcall(next.fiber, ID_transfer, 1, next.value);
118
+ next.value : FIBER_TRANSFER(next.fiber, next.value);
120
119
  }
121
120
 
122
121
  VALUE Thread_reset_fiber_scheduling(VALUE self) {
@@ -162,7 +161,6 @@ void Init_Thread() {
162
161
  ID_ivar_backend = rb_intern("@backend");
163
162
  ID_ivar_join_wait_queue = rb_intern("@join_wait_queue");
164
163
  ID_ivar_main_fiber = rb_intern("@main_fiber");
165
- ID_ivar_result = rb_intern("@result");
166
164
  ID_ivar_terminated = rb_intern("@terminated");
167
165
  ID_ivar_runqueue = rb_intern("@runqueue");
168
166
  ID_stop = rb_intern("stop");
@@ -14,10 +14,6 @@ module Polyphony
14
14
  @caller_backtrace = caller
15
15
  @value = value
16
16
  end
17
-
18
- def backtrace
19
- sanitize(@caller_backtrace)
20
- end
21
17
  end
22
18
 
23
19
  # MoveOn is used to interrupt a long-running blocking operation, while
@@ -16,64 +16,82 @@ module Polyphony
16
16
  end
17
17
 
18
18
  def cancel_after(interval, with_exception: Polyphony::Cancel, &block)
19
- fiber = ::Fiber.current
20
- canceller = spin do
19
+ if !block
20
+ cancel_after_blockless_canceller(Fiber.current, interval, with_exception)
21
+ elsif block.arity > 0
22
+ cancel_after_with_block(Fiber.current, interval, with_exception, &block)
23
+ else
24
+ Thread.current.backend.timeout(interval, with_exception, &block)
25
+ end
26
+ end
27
+
28
+ def cancel_after_blockless_canceller(fiber, interval, with_exception)
29
+ spin do
21
30
  sleep interval
22
31
  exception = cancel_exception(with_exception)
32
+ exception.__raising_fiber__ = nil
23
33
  fiber.schedule exception
24
34
  end
25
- block ? cancel_after_wrap_block(canceller, &block) : canceller
26
35
  end
27
36
 
28
- def cancel_exception(exception)
29
- return exception.new if exception.is_a?(Class)
30
-
31
- RuntimeError.new(exception)
32
- end
33
-
34
- def cancel_after_wrap_block(canceller, &block)
37
+ def cancel_after_with_block(fiber, interval, with_exception, &block)
38
+ canceller = cancel_after_blockless_canceller(fiber, interval, with_exception)
35
39
  block.call(canceller)
36
40
  ensure
37
41
  canceller.stop
38
42
  end
39
43
 
44
+ def cancel_exception(exception)
45
+ case exception
46
+ when Class then exception.new
47
+ when Array then exception[0].new(exception[1])
48
+ else RuntimeError.new(exception)
49
+ end
50
+ end
51
+
40
52
  def spin(tag = nil, &block)
41
53
  Fiber.current.spin(tag, caller, &block)
42
54
  end
43
55
 
44
- def spin_loop(tag = nil, rate: nil, &block)
45
- if rate
56
+ def spin_loop(tag = nil, rate: nil, interval: nil, &block)
57
+ if rate || interval
46
58
  Fiber.current.spin(tag, caller) do
47
- throttled_loop(rate, &block)
59
+ throttled_loop(rate: rate, interval: interval, &block)
48
60
  end
49
61
  else
50
62
  Fiber.current.spin(tag, caller) { loop(&block) }
51
63
  end
52
64
  end
53
65
 
54
- def every(interval)
55
- next_time = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + interval
56
- loop do
57
- now = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
58
- Thread.current.backend.sleep(next_time - now)
59
- yield
60
- loop do
61
- next_time += interval
62
- break if next_time > now
63
- end
64
- end
66
+ def spin_scope
67
+ raise unless block_given?
68
+
69
+ spin do
70
+ result = yield
71
+ Fiber.current.await_all_children
72
+ result
73
+ end.await
74
+ end
75
+
76
+ def every(interval, &block)
77
+ Thread.current.backend.timer_loop(interval, &block)
65
78
  end
66
79
 
67
80
  def move_on_after(interval, with_value: nil, &block)
68
- fiber = ::Fiber.current
69
- unless block
70
- return spin do
71
- sleep interval
72
- fiber.schedule with_value
73
- end
81
+ if !block
82
+ move_on_blockless_canceller(Fiber.current, interval, with_value)
83
+ elsif block.arity > 0
84
+ move_on_after_with_block(Fiber.current, interval, with_value, &block)
85
+ else
86
+ Thread.current.backend.timeout(interval, nil, with_value, &block)
74
87
  end
88
+ end
75
89
 
76
- move_on_after_with_block(fiber, interval, with_value, &block)
90
+ def move_on_blockless_canceller(fiber, interval, with_value)
91
+ spin do
92
+ sleep interval
93
+ fiber.schedule with_value
94
+ end
77
95
  end
78
96
 
79
97
  def move_on_after_with_block(fiber, interval, with_value, &block)