polyphony 0.46.1 → 0.47.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -42,6 +42,7 @@ inline op_context_t *context_store_acquire(op_context_store_t *store, enum op_ty
42
42
 
43
43
  ctx->type = type;
44
44
  ctx->fiber = rb_fiber_current();
45
+ ctx->resume_value = Qnil;
45
46
  ctx->completed = 0;
46
47
  ctx->result = 0;
47
48
 
@@ -24,6 +24,7 @@ typedef struct op_context {
24
24
  int id;
25
25
  int result;
26
26
  VALUE fiber;
27
+ VALUE resume_value;
27
28
  } op_context_t;
28
29
 
29
30
  typedef struct op_context_store {
@@ -722,6 +722,72 @@ noreturn VALUE Backend_timer_loop(VALUE self, VALUE interval) {
722
722
  }
723
723
  }
724
724
 
725
+ VALUE Backend_timeout_safe(VALUE arg) {
726
+ return rb_yield(arg);
727
+ }
728
+
729
+ VALUE Backend_timeout_rescue(VALUE arg, VALUE exception) {
730
+ return exception;
731
+ }
732
+
733
+ VALUE Backend_timeout_ensure_safe(VALUE arg) {
734
+ return rb_rescue2(Backend_timeout_safe, Qnil, Backend_timeout_rescue, Qnil, rb_eException, (VALUE)0);
735
+ }
736
+
737
+ struct libev_timeout {
738
+ struct ev_timer timer;
739
+ VALUE fiber;
740
+ VALUE resume_value;
741
+ };
742
+
743
+ struct Backend_timeout_ctx {
744
+ Backend_t *backend;
745
+ struct libev_timeout *watcher;
746
+ };
747
+
748
+ VALUE Backend_timeout_ensure(VALUE arg) {
749
+ struct Backend_timeout_ctx *timeout_ctx = (struct Backend_timeout_ctx *)arg;
750
+ ev_timer_stop(timeout_ctx->backend->ev_loop, &(timeout_ctx->watcher->timer));
751
+ return Qnil;
752
+ }
753
+
754
+ void Backend_timeout_callback(EV_P_ ev_timer *w, int revents)
755
+ {
756
+ struct libev_timeout *watcher = (struct libev_timeout *)w;
757
+ Fiber_make_runnable(watcher->fiber, watcher->resume_value);
758
+ }
759
+
760
+ VALUE Backend_timeout(int argc,VALUE *argv, VALUE self) {
761
+ VALUE duration;
762
+ VALUE exception;
763
+ VALUE move_on_value = Qnil;
764
+ rb_scan_args(argc, argv, "21", &duration, &exception, &move_on_value);
765
+
766
+ Backend_t *backend;
767
+ struct libev_timeout watcher;
768
+ VALUE result = Qnil;
769
+ VALUE timeout = rb_funcall(cTimeoutException, ID_new, 0);
770
+
771
+ GetBackend(self, backend);
772
+ watcher.fiber = rb_fiber_current();
773
+ watcher.resume_value = timeout;
774
+ ev_timer_init(&watcher.timer, Backend_timeout_callback, NUM2DBL(duration), 0.);
775
+ ev_timer_start(backend->ev_loop, &watcher.timer);
776
+
777
+ struct Backend_timeout_ctx timeout_ctx = {backend, &watcher};
778
+ result = rb_ensure(Backend_timeout_ensure_safe, Qnil, Backend_timeout_ensure, (VALUE)&timeout_ctx);
779
+
780
+ if (result == timeout) {
781
+ if (exception == Qnil) return move_on_value;
782
+ RAISE_EXCEPTION(backend_timeout_exception(exception));
783
+ }
784
+
785
+ RAISE_IF_EXCEPTION(result);
786
+ RB_GC_GUARD(result);
787
+ RB_GC_GUARD(timeout);
788
+ return result;
789
+ }
790
+
725
791
  struct libev_child {
726
792
  struct ev_child child;
727
793
  VALUE fiber;
@@ -812,6 +878,7 @@ void Init_Backend() {
812
878
  rb_define_method(cBackend, "wait_io", Backend_wait_io, 2);
813
879
  rb_define_method(cBackend, "sleep", Backend_sleep, 1);
814
880
  rb_define_method(cBackend, "timer_loop", Backend_timer_loop, 1);
881
+ rb_define_method(cBackend, "timeout", Backend_timeout, -1);
815
882
  rb_define_method(cBackend, "waitpid", Backend_waitpid, 1);
816
883
  rb_define_method(cBackend, "wait_event", Backend_wait_event, 1);
817
884
 
@@ -128,6 +128,15 @@ VALUE Fiber_receive(VALUE self) {
128
128
  return Queue_shift(mailbox);
129
129
  }
130
130
 
131
+ VALUE Fiber_mailbox(VALUE self) {
132
+ VALUE mailbox = rb_ivar_get(self, ID_ivar_mailbox);
133
+ if (mailbox == Qnil) {
134
+ mailbox = rb_funcall(cQueue, ID_new, 0);
135
+ rb_ivar_set(self, ID_ivar_mailbox, mailbox);
136
+ }
137
+ return mailbox;
138
+ }
139
+
131
140
  VALUE Fiber_receive_all_pending(VALUE self) {
132
141
  VALUE mailbox = rb_ivar_get(self, ID_ivar_mailbox);
133
142
  return (mailbox == Qnil) ? rb_ary_new() : Queue_shift_all(mailbox);
@@ -146,9 +155,9 @@ void Init_Fiber() {
146
155
 
147
156
  rb_define_method(cFiber, "<<", Fiber_send, 1);
148
157
  rb_define_method(cFiber, "send", Fiber_send, 1);
149
-
150
158
  rb_define_method(cFiber, "receive", Fiber_receive, 0);
151
159
  rb_define_method(cFiber, "receive_all_pending", Fiber_receive_all_pending, 0);
160
+ rb_define_method(cFiber, "mailbox", Fiber_mailbox, 0);
152
161
 
153
162
  SYM_dead = ID2SYM(rb_intern("dead"));
154
163
  SYM_running = ID2SYM(rb_intern("running"));
@@ -1,6 +1,7 @@
1
1
  #include "polyphony.h"
2
2
 
3
3
  VALUE mPolyphony;
4
+ VALUE cTimeoutException;
4
5
 
5
6
  ID ID_call;
6
7
  ID ID_caller;
@@ -55,6 +56,8 @@ void Init_Polyphony() {
55
56
  rb_define_global_function("snooze", Polyphony_snooze, 0);
56
57
  rb_define_global_function("suspend", Polyphony_suspend, 0);
57
58
 
59
+ cTimeoutException = rb_define_class_under(mPolyphony, "TimeoutException", rb_eException);
60
+
58
61
  ID_call = rb_intern("call");
59
62
  ID_caller = rb_intern("caller");
60
63
  ID_clear = rb_intern("clear");
@@ -39,6 +39,7 @@ extern VALUE mPolyphony;
39
39
  extern VALUE cQueue;
40
40
  extern VALUE cEvent;
41
41
  extern VALUE cRunqueue;
42
+ extern VALUE cTimeoutException;
42
43
 
43
44
  extern ID ID_call;
44
45
  extern ID ID_caller;
@@ -82,12 +83,6 @@ VALUE Queue_push(VALUE self, VALUE value);
82
83
  VALUE Queue_unshift(VALUE self, VALUE value);
83
84
  VALUE Queue_shift(VALUE self);
84
85
  VALUE Queue_shift_all(VALUE self);
85
- VALUE Queue_shift_no_wait(VALUE self);
86
- VALUE Queue_clear(VALUE self);
87
- VALUE Queue_delete(VALUE self, VALUE value);
88
- long Queue_len(VALUE self);
89
- void Queue_trace(VALUE self);
90
-
91
86
 
92
87
  void Runqueue_push(VALUE self, VALUE fiber, VALUE value, int reschedule);
93
88
  void Runqueue_unshift(VALUE self, VALUE fiber, VALUE value, int reschedule);
@@ -101,4 +96,6 @@ VALUE Thread_schedule_fiber(VALUE thread, VALUE fiber, VALUE value);
101
96
  VALUE Thread_schedule_fiber_with_priority(VALUE thread, VALUE fiber, VALUE value);
102
97
  VALUE Thread_switch_fiber(VALUE thread);
103
98
 
99
+ VALUE Polyphony_snooze(VALUE self);
100
+
104
101
  #endif /* POLYPHONY_H */
@@ -4,6 +4,8 @@
4
4
  typedef struct queue {
5
5
  ring_buffer values;
6
6
  ring_buffer shift_queue;
7
+ ring_buffer push_queue;
8
+ unsigned int capacity;
7
9
  } Queue_t;
8
10
 
9
11
  VALUE cQueue = Qnil;
@@ -12,12 +14,14 @@ static void Queue_mark(void *ptr) {
12
14
  Queue_t *queue = ptr;
13
15
  ring_buffer_mark(&queue->values);
14
16
  ring_buffer_mark(&queue->shift_queue);
17
+ ring_buffer_mark(&queue->push_queue);
15
18
  }
16
19
 
17
20
  static void Queue_free(void *ptr) {
18
21
  Queue_t *queue = ptr;
19
22
  ring_buffer_free(&queue->values);
20
23
  ring_buffer_free(&queue->shift_queue);
24
+ ring_buffer_free(&queue->push_queue);
21
25
  xfree(ptr);
22
26
  }
23
27
 
@@ -41,36 +45,77 @@ static VALUE Queue_allocate(VALUE klass) {
41
45
  #define GetQueue(obj, queue) \
42
46
  TypedData_Get_Struct((obj), Queue_t, &Queue_type, (queue))
43
47
 
44
- static VALUE Queue_initialize(VALUE self) {
48
+ static VALUE Queue_initialize(int argc, VALUE *argv, VALUE self) {
45
49
  Queue_t *queue;
46
50
  GetQueue(self, queue);
47
51
 
48
52
  ring_buffer_init(&queue->values);
49
53
  ring_buffer_init(&queue->shift_queue);
54
+ ring_buffer_init(&queue->push_queue);
55
+ queue->capacity = (argc == 1) ? NUM2UINT(argv[0]) : 0;
50
56
 
51
57
  return self;
52
58
  }
53
59
 
60
+ inline void queue_resume_first_blocked_fiber(ring_buffer *queue) {
61
+ if (queue->count) {
62
+ VALUE fiber = ring_buffer_shift(queue);
63
+ if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
64
+ }
65
+ }
66
+
67
+ inline void queue_resume_all_blocked_fibers(ring_buffer *queue) {
68
+ while (queue->count) {
69
+ VALUE fiber = ring_buffer_shift(queue);
70
+ if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
71
+ }
72
+ }
73
+
74
+ inline void queue_resume_blocked_fibers_to_capacity(Queue_t *queue) {
75
+ for (unsigned int i = queue->values.count; (i < queue->capacity) && queue->push_queue.count; i++) {
76
+ VALUE fiber = ring_buffer_shift(&queue->push_queue);
77
+ if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
78
+ }
79
+ }
80
+
81
+ inline void capped_queue_block_push(Queue_t *queue) {
82
+ VALUE fiber = rb_fiber_current();
83
+ VALUE backend = rb_ivar_get(rb_thread_current(), ID_ivar_backend);
84
+ VALUE switchpoint_result;
85
+ while (1) {
86
+ if (queue->capacity > queue->values.count) Fiber_make_runnable(fiber, Qnil);
87
+
88
+ ring_buffer_push(&queue->push_queue, fiber);
89
+ switchpoint_result = __BACKEND__.wait_event(backend, Qnil);
90
+ ring_buffer_delete(&queue->push_queue, fiber);
91
+
92
+ RAISE_IF_EXCEPTION(switchpoint_result);
93
+ RB_GC_GUARD(switchpoint_result);
94
+ if (queue->capacity > queue->values.count) break;
95
+ }
96
+ }
97
+
54
98
  VALUE Queue_push(VALUE self, VALUE value) {
55
99
  Queue_t *queue;
56
100
  GetQueue(self, queue);
57
101
 
58
- if (queue->shift_queue.count > 0) {
59
- VALUE fiber = ring_buffer_shift(&queue->shift_queue);
60
- if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
61
- }
102
+ if (queue->capacity) capped_queue_block_push(queue);
103
+
104
+ queue_resume_first_blocked_fiber(&queue->shift_queue);
62
105
  ring_buffer_push(&queue->values, value);
106
+
63
107
  return self;
64
108
  }
65
109
 
66
110
  VALUE Queue_unshift(VALUE self, VALUE value) {
67
111
  Queue_t *queue;
68
112
  GetQueue(self, queue);
69
- if (queue->shift_queue.count > 0) {
70
- VALUE fiber = ring_buffer_shift(&queue->shift_queue);
71
- if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
72
- }
113
+
114
+ if (queue->capacity) capped_queue_block_push(queue);
115
+
116
+ queue_resume_first_blocked_fiber(&queue->shift_queue);
73
117
  ring_buffer_unshift(&queue->values, value);
118
+
74
119
  return self;
75
120
  }
76
121
 
@@ -83,42 +128,63 @@ VALUE Queue_shift(VALUE self) {
83
128
  VALUE backend = rb_ivar_get(thread, ID_ivar_backend);
84
129
 
85
130
  while (1) {
86
- ring_buffer_push(&queue->shift_queue, fiber);
87
- if (queue->values.count > 0) Fiber_make_runnable(fiber, Qnil);
131
+ if (queue->values.count) Fiber_make_runnable(fiber, Qnil);
88
132
 
133
+ ring_buffer_push(&queue->shift_queue, fiber);
89
134
  VALUE switchpoint_result = __BACKEND__.wait_event(backend, Qnil);
90
135
  ring_buffer_delete(&queue->shift_queue, fiber);
91
136
 
92
137
  RAISE_IF_EXCEPTION(switchpoint_result);
93
138
  RB_GC_GUARD(switchpoint_result);
94
-
95
- if (queue->values.count > 0)
96
- return ring_buffer_shift(&queue->values);
139
+ if (queue->values.count) break;
97
140
  }
98
-
99
- return Qnil;
141
+ VALUE value = ring_buffer_shift(&queue->values);
142
+ if ((queue->capacity) && (queue->capacity > queue->values.count))
143
+ queue_resume_first_blocked_fiber(&queue->push_queue);
144
+ RB_GC_GUARD(value);
145
+ return value;
100
146
  }
101
147
 
102
- VALUE Queue_shift_no_wait(VALUE self) {
103
- Queue_t *queue;
148
+ VALUE Queue_delete(VALUE self, VALUE value) {
149
+ Queue_t *queue;
104
150
  GetQueue(self, queue);
105
151
 
106
- return ring_buffer_shift(&queue->values);
152
+ ring_buffer_delete(&queue->values, value);
153
+
154
+ if (queue->capacity && (queue->capacity > queue->values.count))
155
+ queue_resume_first_blocked_fiber(&queue->push_queue);
156
+
157
+ return self;
107
158
  }
108
159
 
109
- VALUE Queue_delete(VALUE self, VALUE value) {
160
+ VALUE Queue_cap(VALUE self, VALUE cap) {
161
+ unsigned int new_capacity = NUM2UINT(cap);
110
162
  Queue_t *queue;
111
163
  GetQueue(self, queue);
112
-
113
- ring_buffer_delete(&queue->values, value);
164
+ queue->capacity = new_capacity;
165
+
166
+ if (queue->capacity)
167
+ queue_resume_blocked_fibers_to_capacity(queue);
168
+ else
169
+ queue_resume_all_blocked_fibers(&queue->push_queue);
170
+
114
171
  return self;
115
172
  }
116
173
 
174
+ VALUE Queue_capped_p(VALUE self) {
175
+ Queue_t *queue;
176
+ GetQueue(self, queue);
177
+
178
+ return queue->capacity ? UINT2NUM(queue->capacity) : Qnil;
179
+ }
180
+
117
181
  VALUE Queue_clear(VALUE self) {
118
182
  Queue_t *queue;
119
183
  GetQueue(self, queue);
120
184
 
121
185
  ring_buffer_clear(&queue->values);
186
+ if (queue->capacity) queue_resume_blocked_fibers_to_capacity(queue);
187
+
122
188
  return self;
123
189
  }
124
190
 
@@ -134,6 +200,7 @@ VALUE Queue_shift_each(VALUE self) {
134
200
  GetQueue(self, queue);
135
201
 
136
202
  ring_buffer_shift_each(&queue->values);
203
+ if (queue->capacity) queue_resume_blocked_fibers_to_capacity(queue);
137
204
  return self;
138
205
  }
139
206
 
@@ -141,7 +208,9 @@ VALUE Queue_shift_all(VALUE self) {
141
208
  Queue_t *queue;
142
209
  GetQueue(self, queue);
143
210
 
144
- return ring_buffer_shift_all(&queue->values);
211
+ VALUE result = ring_buffer_shift_all(&queue->values);
212
+ if (queue->capacity) queue_resume_blocked_fibers_to_capacity(queue);
213
+ return result;
145
214
  }
146
215
 
147
216
  VALUE Queue_flush_waiters(VALUE self, VALUE value) {
@@ -160,14 +229,14 @@ VALUE Queue_empty_p(VALUE self) {
160
229
  Queue_t *queue;
161
230
  GetQueue(self, queue);
162
231
 
163
- return (queue->values.count == 0) ? Qtrue : Qfalse;
232
+ return (!queue->values.count) ? Qtrue : Qfalse;
164
233
  }
165
234
 
166
235
  VALUE Queue_pending_p(VALUE self) {
167
236
  Queue_t *queue;
168
237
  GetQueue(self, queue);
169
238
 
170
- return (queue->shift_queue.count > 0) ? Qtrue : Qfalse;
239
+ return (queue->shift_queue.count) ? Qtrue : Qfalse;
171
240
  }
172
241
 
173
242
  VALUE Queue_size_m(VALUE self) {
@@ -177,26 +246,22 @@ VALUE Queue_size_m(VALUE self) {
177
246
  return INT2NUM(queue->values.count);
178
247
  }
179
248
 
180
- void Queue_trace(VALUE self) {
181
- Queue_t *queue;
182
- GetQueue(self, queue);
183
-
184
- printf("run queue size: %d count: %d\n", queue->values.size, queue->values.count);
185
- }
186
-
187
249
  void Init_Queue() {
188
250
  cQueue = rb_define_class_under(mPolyphony, "Queue", rb_cData);
189
251
  rb_define_alloc_func(cQueue, Queue_allocate);
190
252
 
191
- rb_define_method(cQueue, "initialize", Queue_initialize, 0);
253
+ rb_define_method(cQueue, "initialize", Queue_initialize, -1);
192
254
  rb_define_method(cQueue, "push", Queue_push, 1);
193
255
  rb_define_method(cQueue, "<<", Queue_push, 1);
194
256
  rb_define_method(cQueue, "unshift", Queue_unshift, 1);
195
257
 
196
258
  rb_define_method(cQueue, "shift", Queue_shift, 0);
197
259
  rb_define_method(cQueue, "pop", Queue_shift, 0);
198
- rb_define_method(cQueue, "shift_no_wait", Queue_shift_no_wait, 0);
199
260
  rb_define_method(cQueue, "delete", Queue_delete, 1);
261
+ rb_define_method(cQueue, "clear", Queue_clear, 0);
262
+
263
+ rb_define_method(cQueue, "cap", Queue_cap, 1);
264
+ rb_define_method(cQueue, "capped?", Queue_capped_p, 0);
200
265
 
201
266
  rb_define_method(cQueue, "shift_each", Queue_shift_each, 0);
202
267
  rb_define_method(cQueue, "shift_all", Queue_shift_all, 0);
@@ -16,16 +16,29 @@ module Polyphony
16
16
  end
17
17
 
18
18
  def cancel_after(interval, with_exception: Polyphony::Cancel, &block)
19
- fiber = ::Fiber.current
20
- canceller = spin do
19
+ if !block
20
+ cancel_after_blockless_canceller(Fiber.current, interval, with_exception)
21
+ elsif block.arity > 0
22
+ cancel_after_with_block(Fiber.current, interval, with_exception, &block)
23
+ else
24
+ Thread.current.backend.timeout(interval, with_exception, &block)
25
+ end
26
+ end
27
+
28
+ def cancel_after_blockless_canceller(fiber, interval, with_exception)
29
+ spin do
21
30
  sleep interval
22
31
  exception = cancel_exception(with_exception)
23
- # we don't want the cancelling fiber caller location as part of the
24
- # exception backtrace
25
32
  exception.__raising_fiber__ = nil
26
33
  fiber.schedule exception
27
34
  end
28
- block ? cancel_after_wrap_block(canceller, &block) : canceller
35
+ end
36
+
37
+ def cancel_after_with_block(fiber, interval, with_exception, &block)
38
+ canceller = cancel_after_blockless_canceller(fiber, interval, with_exception)
39
+ block.call(canceller)
40
+ ensure
41
+ canceller.stop
29
42
  end
30
43
 
31
44
  def cancel_exception(exception)
@@ -36,12 +49,6 @@ module Polyphony
36
49
  end
37
50
  end
38
51
 
39
- def cancel_after_wrap_block(canceller, &block)
40
- block.call(canceller)
41
- ensure
42
- canceller.stop
43
- end
44
-
45
52
  def spin(tag = nil, &block)
46
53
  Fiber.current.spin(tag, caller, &block)
47
54
  end
@@ -56,6 +63,16 @@ module Polyphony
56
63
  end
57
64
  end
58
65
 
66
+ def spin_scope
67
+ raise unless block_given?
68
+
69
+ spin do
70
+ result = yield
71
+ Fiber.current.await_all_children
72
+ result
73
+ end.await
74
+ end
75
+
59
76
  def every(interval)
60
77
  next_time = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + interval
61
78
  loop do
@@ -70,15 +87,20 @@ module Polyphony
70
87
  end
71
88
 
72
89
  def move_on_after(interval, with_value: nil, &block)
73
- fiber = ::Fiber.current
74
- unless block
75
- return spin do
76
- sleep interval
77
- fiber.schedule with_value
78
- end
90
+ if !block
91
+ move_on_blockless_canceller(Fiber.current, interval, with_value)
92
+ elsif block.arity > 0
93
+ move_on_after_with_block(Fiber.current, interval, with_value, &block)
94
+ else
95
+ Thread.current.backend.timeout(interval, nil, with_value, &block)
79
96
  end
97
+ end
80
98
 
81
- move_on_after_with_block(fiber, interval, with_value, &block)
99
+ def move_on_blockless_canceller(fiber, interval, with_value)
100
+ spin do
101
+ sleep interval
102
+ fiber.schedule with_value
103
+ end
82
104
  end
83
105
 
84
106
  def move_on_after_with_block(fiber, interval, with_value, &block)