polyphony 0.45.5 → 0.47.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/test.yml +2 -0
- data/.gitmodules +0 -0
- data/CHANGELOG.md +23 -0
- data/Gemfile.lock +1 -1
- data/README.md +3 -3
- data/Rakefile +1 -1
- data/TODO.md +21 -22
- data/bin/test +4 -0
- data/examples/core/enumerable.rb +64 -0
- data/examples/performance/fiber_resume.rb +43 -0
- data/examples/performance/fiber_transfer.rb +13 -4
- data/examples/performance/thread-vs-fiber/compare.rb +59 -0
- data/examples/performance/thread-vs-fiber/em_server.rb +33 -0
- data/examples/performance/thread-vs-fiber/polyphony_server.rb +10 -21
- data/examples/performance/thread-vs-fiber/threaded_server.rb +22 -15
- data/examples/performance/thread_switch.rb +44 -0
- data/ext/liburing/liburing.h +585 -0
- data/ext/liburing/liburing/README.md +4 -0
- data/ext/liburing/liburing/barrier.h +73 -0
- data/ext/liburing/liburing/compat.h +15 -0
- data/ext/liburing/liburing/io_uring.h +343 -0
- data/ext/liburing/queue.c +333 -0
- data/ext/liburing/register.c +187 -0
- data/ext/liburing/setup.c +210 -0
- data/ext/liburing/syscall.c +54 -0
- data/ext/liburing/syscall.h +18 -0
- data/ext/polyphony/backend.h +0 -14
- data/ext/polyphony/backend_common.h +129 -0
- data/ext/polyphony/backend_io_uring.c +995 -0
- data/ext/polyphony/backend_io_uring_context.c +74 -0
- data/ext/polyphony/backend_io_uring_context.h +53 -0
- data/ext/polyphony/{libev_backend.c → backend_libev.c} +304 -294
- data/ext/polyphony/event.c +1 -1
- data/ext/polyphony/extconf.rb +31 -13
- data/ext/polyphony/fiber.c +35 -24
- data/ext/polyphony/libev.c +4 -0
- data/ext/polyphony/libev.h +8 -2
- data/ext/polyphony/liburing.c +8 -0
- data/ext/polyphony/playground.c +51 -0
- data/ext/polyphony/polyphony.c +8 -5
- data/ext/polyphony/polyphony.h +23 -19
- data/ext/polyphony/polyphony_ext.c +10 -4
- data/ext/polyphony/queue.c +100 -35
- data/ext/polyphony/thread.c +10 -10
- data/lib/polyphony/adapters/trace.rb +2 -2
- data/lib/polyphony/core/exceptions.rb +0 -4
- data/lib/polyphony/core/global_api.rb +45 -21
- data/lib/polyphony/core/resource_pool.rb +12 -1
- data/lib/polyphony/extensions/core.rb +9 -15
- data/lib/polyphony/extensions/debug.rb +13 -0
- data/lib/polyphony/extensions/fiber.rb +8 -4
- data/lib/polyphony/extensions/openssl.rb +6 -0
- data/lib/polyphony/extensions/socket.rb +73 -10
- data/lib/polyphony/version.rb +1 -1
- data/test/helper.rb +36 -4
- data/test/io_uring_test.rb +55 -0
- data/test/stress.rb +4 -1
- data/test/test_backend.rb +63 -6
- data/test/test_ext.rb +1 -2
- data/test/test_fiber.rb +55 -20
- data/test/test_global_api.rb +107 -35
- data/test/test_queue.rb +117 -0
- data/test/test_resource_pool.rb +21 -0
- data/test/test_socket.rb +2 -2
- data/test/test_throttler.rb +3 -6
- data/test/test_trace.rb +7 -5
- metadata +28 -3
data/ext/polyphony/queue.c
CHANGED
@@ -4,6 +4,8 @@
|
|
4
4
|
typedef struct queue {
|
5
5
|
ring_buffer values;
|
6
6
|
ring_buffer shift_queue;
|
7
|
+
ring_buffer push_queue;
|
8
|
+
unsigned int capacity;
|
7
9
|
} Queue_t;
|
8
10
|
|
9
11
|
VALUE cQueue = Qnil;
|
@@ -12,12 +14,14 @@ static void Queue_mark(void *ptr) {
|
|
12
14
|
Queue_t *queue = ptr;
|
13
15
|
ring_buffer_mark(&queue->values);
|
14
16
|
ring_buffer_mark(&queue->shift_queue);
|
17
|
+
ring_buffer_mark(&queue->push_queue);
|
15
18
|
}
|
16
19
|
|
17
20
|
static void Queue_free(void *ptr) {
|
18
21
|
Queue_t *queue = ptr;
|
19
22
|
ring_buffer_free(&queue->values);
|
20
23
|
ring_buffer_free(&queue->shift_queue);
|
24
|
+
ring_buffer_free(&queue->push_queue);
|
21
25
|
xfree(ptr);
|
22
26
|
}
|
23
27
|
|
@@ -41,36 +45,77 @@ static VALUE Queue_allocate(VALUE klass) {
|
|
41
45
|
#define GetQueue(obj, queue) \
|
42
46
|
TypedData_Get_Struct((obj), Queue_t, &Queue_type, (queue))
|
43
47
|
|
44
|
-
static VALUE Queue_initialize(VALUE self) {
|
48
|
+
static VALUE Queue_initialize(int argc, VALUE *argv, VALUE self) {
|
45
49
|
Queue_t *queue;
|
46
50
|
GetQueue(self, queue);
|
47
51
|
|
48
52
|
ring_buffer_init(&queue->values);
|
49
53
|
ring_buffer_init(&queue->shift_queue);
|
54
|
+
ring_buffer_init(&queue->push_queue);
|
55
|
+
queue->capacity = (argc == 1) ? NUM2UINT(argv[0]) : 0;
|
50
56
|
|
51
57
|
return self;
|
52
58
|
}
|
53
59
|
|
60
|
+
inline void queue_resume_first_blocked_fiber(ring_buffer *queue) {
|
61
|
+
if (queue->count) {
|
62
|
+
VALUE fiber = ring_buffer_shift(queue);
|
63
|
+
if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
|
64
|
+
}
|
65
|
+
}
|
66
|
+
|
67
|
+
inline void queue_resume_all_blocked_fibers(ring_buffer *queue) {
|
68
|
+
while (queue->count) {
|
69
|
+
VALUE fiber = ring_buffer_shift(queue);
|
70
|
+
if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
|
71
|
+
}
|
72
|
+
}
|
73
|
+
|
74
|
+
inline void queue_resume_blocked_fibers_to_capacity(Queue_t *queue) {
|
75
|
+
for (unsigned int i = queue->values.count; (i < queue->capacity) && queue->push_queue.count; i++) {
|
76
|
+
VALUE fiber = ring_buffer_shift(&queue->push_queue);
|
77
|
+
if (fiber != Qnil) Fiber_make_runnable(fiber, Qnil);
|
78
|
+
}
|
79
|
+
}
|
80
|
+
|
81
|
+
inline void capped_queue_block_push(Queue_t *queue) {
|
82
|
+
VALUE fiber = rb_fiber_current();
|
83
|
+
VALUE backend = rb_ivar_get(rb_thread_current(), ID_ivar_backend);
|
84
|
+
VALUE switchpoint_result;
|
85
|
+
while (1) {
|
86
|
+
if (queue->capacity > queue->values.count) Fiber_make_runnable(fiber, Qnil);
|
87
|
+
|
88
|
+
ring_buffer_push(&queue->push_queue, fiber);
|
89
|
+
switchpoint_result = __BACKEND__.wait_event(backend, Qnil);
|
90
|
+
ring_buffer_delete(&queue->push_queue, fiber);
|
91
|
+
|
92
|
+
RAISE_IF_EXCEPTION(switchpoint_result);
|
93
|
+
RB_GC_GUARD(switchpoint_result);
|
94
|
+
if (queue->capacity > queue->values.count) break;
|
95
|
+
}
|
96
|
+
}
|
97
|
+
|
54
98
|
VALUE Queue_push(VALUE self, VALUE value) {
|
55
99
|
Queue_t *queue;
|
56
100
|
GetQueue(self, queue);
|
57
101
|
|
58
|
-
if (queue->
|
59
|
-
|
60
|
-
|
61
|
-
}
|
102
|
+
if (queue->capacity) capped_queue_block_push(queue);
|
103
|
+
|
104
|
+
queue_resume_first_blocked_fiber(&queue->shift_queue);
|
62
105
|
ring_buffer_push(&queue->values, value);
|
106
|
+
|
63
107
|
return self;
|
64
108
|
}
|
65
109
|
|
66
110
|
VALUE Queue_unshift(VALUE self, VALUE value) {
|
67
111
|
Queue_t *queue;
|
68
112
|
GetQueue(self, queue);
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
113
|
+
|
114
|
+
if (queue->capacity) capped_queue_block_push(queue);
|
115
|
+
|
116
|
+
queue_resume_first_blocked_fiber(&queue->shift_queue);
|
73
117
|
ring_buffer_unshift(&queue->values, value);
|
118
|
+
|
74
119
|
return self;
|
75
120
|
}
|
76
121
|
|
@@ -83,42 +128,63 @@ VALUE Queue_shift(VALUE self) {
|
|
83
128
|
VALUE backend = rb_ivar_get(thread, ID_ivar_backend);
|
84
129
|
|
85
130
|
while (1) {
|
86
|
-
|
87
|
-
if (queue->values.count > 0) Fiber_make_runnable(fiber, Qnil);
|
131
|
+
if (queue->values.count) Fiber_make_runnable(fiber, Qnil);
|
88
132
|
|
133
|
+
ring_buffer_push(&queue->shift_queue, fiber);
|
89
134
|
VALUE switchpoint_result = __BACKEND__.wait_event(backend, Qnil);
|
90
135
|
ring_buffer_delete(&queue->shift_queue, fiber);
|
91
136
|
|
92
|
-
|
137
|
+
RAISE_IF_EXCEPTION(switchpoint_result);
|
93
138
|
RB_GC_GUARD(switchpoint_result);
|
94
|
-
|
95
|
-
if (queue->values.count > 0)
|
96
|
-
return ring_buffer_shift(&queue->values);
|
139
|
+
if (queue->values.count) break;
|
97
140
|
}
|
98
|
-
|
99
|
-
|
141
|
+
VALUE value = ring_buffer_shift(&queue->values);
|
142
|
+
if ((queue->capacity) && (queue->capacity > queue->values.count))
|
143
|
+
queue_resume_first_blocked_fiber(&queue->push_queue);
|
144
|
+
RB_GC_GUARD(value);
|
145
|
+
return value;
|
100
146
|
}
|
101
147
|
|
102
|
-
VALUE
|
103
|
-
|
148
|
+
VALUE Queue_delete(VALUE self, VALUE value) {
|
149
|
+
Queue_t *queue;
|
104
150
|
GetQueue(self, queue);
|
105
151
|
|
106
|
-
|
152
|
+
ring_buffer_delete(&queue->values, value);
|
153
|
+
|
154
|
+
if (queue->capacity && (queue->capacity > queue->values.count))
|
155
|
+
queue_resume_first_blocked_fiber(&queue->push_queue);
|
156
|
+
|
157
|
+
return self;
|
107
158
|
}
|
108
159
|
|
109
|
-
VALUE
|
160
|
+
VALUE Queue_cap(VALUE self, VALUE cap) {
|
161
|
+
unsigned int new_capacity = NUM2UINT(cap);
|
110
162
|
Queue_t *queue;
|
111
163
|
GetQueue(self, queue);
|
112
|
-
|
113
|
-
|
164
|
+
queue->capacity = new_capacity;
|
165
|
+
|
166
|
+
if (queue->capacity)
|
167
|
+
queue_resume_blocked_fibers_to_capacity(queue);
|
168
|
+
else
|
169
|
+
queue_resume_all_blocked_fibers(&queue->push_queue);
|
170
|
+
|
114
171
|
return self;
|
115
172
|
}
|
116
173
|
|
174
|
+
VALUE Queue_capped_p(VALUE self) {
|
175
|
+
Queue_t *queue;
|
176
|
+
GetQueue(self, queue);
|
177
|
+
|
178
|
+
return queue->capacity ? UINT2NUM(queue->capacity) : Qnil;
|
179
|
+
}
|
180
|
+
|
117
181
|
VALUE Queue_clear(VALUE self) {
|
118
182
|
Queue_t *queue;
|
119
183
|
GetQueue(self, queue);
|
120
184
|
|
121
185
|
ring_buffer_clear(&queue->values);
|
186
|
+
if (queue->capacity) queue_resume_blocked_fibers_to_capacity(queue);
|
187
|
+
|
122
188
|
return self;
|
123
189
|
}
|
124
190
|
|
@@ -134,6 +200,7 @@ VALUE Queue_shift_each(VALUE self) {
|
|
134
200
|
GetQueue(self, queue);
|
135
201
|
|
136
202
|
ring_buffer_shift_each(&queue->values);
|
203
|
+
if (queue->capacity) queue_resume_blocked_fibers_to_capacity(queue);
|
137
204
|
return self;
|
138
205
|
}
|
139
206
|
|
@@ -141,7 +208,9 @@ VALUE Queue_shift_all(VALUE self) {
|
|
141
208
|
Queue_t *queue;
|
142
209
|
GetQueue(self, queue);
|
143
210
|
|
144
|
-
|
211
|
+
VALUE result = ring_buffer_shift_all(&queue->values);
|
212
|
+
if (queue->capacity) queue_resume_blocked_fibers_to_capacity(queue);
|
213
|
+
return result;
|
145
214
|
}
|
146
215
|
|
147
216
|
VALUE Queue_flush_waiters(VALUE self, VALUE value) {
|
@@ -160,14 +229,14 @@ VALUE Queue_empty_p(VALUE self) {
|
|
160
229
|
Queue_t *queue;
|
161
230
|
GetQueue(self, queue);
|
162
231
|
|
163
|
-
return (queue->values.count
|
232
|
+
return (!queue->values.count) ? Qtrue : Qfalse;
|
164
233
|
}
|
165
234
|
|
166
235
|
VALUE Queue_pending_p(VALUE self) {
|
167
236
|
Queue_t *queue;
|
168
237
|
GetQueue(self, queue);
|
169
238
|
|
170
|
-
return (queue->shift_queue.count
|
239
|
+
return (queue->shift_queue.count) ? Qtrue : Qfalse;
|
171
240
|
}
|
172
241
|
|
173
242
|
VALUE Queue_size_m(VALUE self) {
|
@@ -177,26 +246,22 @@ VALUE Queue_size_m(VALUE self) {
|
|
177
246
|
return INT2NUM(queue->values.count);
|
178
247
|
}
|
179
248
|
|
180
|
-
void Queue_trace(VALUE self) {
|
181
|
-
Queue_t *queue;
|
182
|
-
GetQueue(self, queue);
|
183
|
-
|
184
|
-
printf("run queue size: %d count: %d\n", queue->values.size, queue->values.count);
|
185
|
-
}
|
186
|
-
|
187
249
|
void Init_Queue() {
|
188
250
|
cQueue = rb_define_class_under(mPolyphony, "Queue", rb_cData);
|
189
251
|
rb_define_alloc_func(cQueue, Queue_allocate);
|
190
252
|
|
191
|
-
rb_define_method(cQueue, "initialize", Queue_initialize,
|
253
|
+
rb_define_method(cQueue, "initialize", Queue_initialize, -1);
|
192
254
|
rb_define_method(cQueue, "push", Queue_push, 1);
|
193
255
|
rb_define_method(cQueue, "<<", Queue_push, 1);
|
194
256
|
rb_define_method(cQueue, "unshift", Queue_unshift, 1);
|
195
257
|
|
196
258
|
rb_define_method(cQueue, "shift", Queue_shift, 0);
|
197
259
|
rb_define_method(cQueue, "pop", Queue_shift, 0);
|
198
|
-
rb_define_method(cQueue, "shift_no_wait", Queue_shift_no_wait, 0);
|
199
260
|
rb_define_method(cQueue, "delete", Queue_delete, 1);
|
261
|
+
rb_define_method(cQueue, "clear", Queue_clear, 0);
|
262
|
+
|
263
|
+
rb_define_method(cQueue, "cap", Queue_cap, 1);
|
264
|
+
rb_define_method(cQueue, "capped?", Queue_capped_p, 0);
|
200
265
|
|
201
266
|
rb_define_method(cQueue, "shift_each", Queue_shift_each, 0);
|
202
267
|
rb_define_method(cQueue, "shift_all", Queue_shift_all, 0);
|
data/ext/polyphony/thread.c
CHANGED
@@ -115,7 +115,7 @@ VALUE Thread_switch_fiber(VALUE self) {
|
|
115
115
|
RB_GC_GUARD(next.fiber);
|
116
116
|
RB_GC_GUARD(next.value);
|
117
117
|
return (next.fiber == current_fiber) ?
|
118
|
-
next.value :
|
118
|
+
next.value : FIBER_TRANSFER(next.fiber, next.value);
|
119
119
|
}
|
120
120
|
|
121
121
|
VALUE Thread_reset_fiber_scheduling(VALUE self) {
|
@@ -125,7 +125,7 @@ VALUE Thread_reset_fiber_scheduling(VALUE self) {
|
|
125
125
|
return self;
|
126
126
|
}
|
127
127
|
|
128
|
-
VALUE
|
128
|
+
VALUE Thread_fiber_schedule_and_wakeup(VALUE self, VALUE fiber, VALUE resume_obj) {
|
129
129
|
VALUE backend = rb_ivar_get(self, ID_ivar_backend);
|
130
130
|
if (fiber != Qnil) {
|
131
131
|
Thread_schedule_fiber_with_priority(self, fiber, resume_obj);
|
@@ -148,7 +148,7 @@ void Init_Thread() {
|
|
148
148
|
rb_define_method(rb_cThread, "setup_fiber_scheduling", Thread_setup_fiber_scheduling, 0);
|
149
149
|
rb_define_method(rb_cThread, "reset_fiber_scheduling", Thread_reset_fiber_scheduling, 0);
|
150
150
|
rb_define_method(rb_cThread, "fiber_scheduling_stats", Thread_fiber_scheduling_stats, 0);
|
151
|
-
rb_define_method(rb_cThread, "
|
151
|
+
rb_define_method(rb_cThread, "schedule_and_wakeup", Thread_fiber_schedule_and_wakeup, 2);
|
152
152
|
|
153
153
|
rb_define_method(rb_cThread, "schedule_fiber", Thread_schedule_fiber, 2);
|
154
154
|
rb_define_method(rb_cThread, "schedule_fiber_with_priority",
|
@@ -157,13 +157,13 @@ void Init_Thread() {
|
|
157
157
|
|
158
158
|
rb_define_method(rb_cThread, "debug!", Thread_debug, 0);
|
159
159
|
|
160
|
-
ID_deactivate_all_watchers_post_fork
|
161
|
-
ID_ivar_backend
|
162
|
-
ID_ivar_join_wait_queue
|
163
|
-
ID_ivar_main_fiber
|
164
|
-
|
165
|
-
|
166
|
-
ID_stop
|
160
|
+
ID_deactivate_all_watchers_post_fork = rb_intern("deactivate_all_watchers_post_fork");
|
161
|
+
ID_ivar_backend = rb_intern("@backend");
|
162
|
+
ID_ivar_join_wait_queue = rb_intern("@join_wait_queue");
|
163
|
+
ID_ivar_main_fiber = rb_intern("@main_fiber");
|
164
|
+
ID_ivar_terminated = rb_intern("@terminated");
|
165
|
+
ID_ivar_runqueue = rb_intern("@runqueue");
|
166
|
+
ID_stop = rb_intern("stop");
|
167
167
|
|
168
168
|
SYM_scheduled_fibers = ID2SYM(rb_intern("scheduled_fibers"));
|
169
169
|
SYM_pending_watchers = ID2SYM(rb_intern("pending_watchers"));
|
@@ -118,13 +118,13 @@ module Polyphony
|
|
118
118
|
|
119
119
|
ALL_FIBER_EVENTS = %i[
|
120
120
|
fiber_create fiber_terminate fiber_schedule fiber_switchpoint fiber_run
|
121
|
-
|
121
|
+
fiber_event_poll_enter fiber_event_poll_leave
|
122
122
|
].freeze
|
123
123
|
|
124
124
|
def event_masks(events)
|
125
125
|
events.each_with_object([[], []]) do |e, masks|
|
126
126
|
case e
|
127
|
-
when
|
127
|
+
when /^fiber_/
|
128
128
|
masks[1] += e == :fiber_all ? ALL_FIBER_EVENTS : [e]
|
129
129
|
masks[0] << :c_return unless masks[0].include?(:c_return)
|
130
130
|
else
|
@@ -16,27 +16,39 @@ module Polyphony
|
|
16
16
|
end
|
17
17
|
|
18
18
|
def cancel_after(interval, with_exception: Polyphony::Cancel, &block)
|
19
|
-
|
20
|
-
|
19
|
+
if !block
|
20
|
+
cancel_after_blockless_canceller(Fiber.current, interval, with_exception)
|
21
|
+
elsif block.arity > 0
|
22
|
+
cancel_after_with_block(Fiber.current, interval, with_exception, &block)
|
23
|
+
else
|
24
|
+
Thread.current.backend.timeout(interval, with_exception, &block)
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
def cancel_after_blockless_canceller(fiber, interval, with_exception)
|
29
|
+
spin do
|
21
30
|
sleep interval
|
22
31
|
exception = cancel_exception(with_exception)
|
32
|
+
exception.__raising_fiber__ = nil
|
23
33
|
fiber.schedule exception
|
24
34
|
end
|
25
|
-
block ? cancel_after_wrap_block(canceller, &block) : canceller
|
26
35
|
end
|
27
36
|
|
28
|
-
def
|
29
|
-
|
30
|
-
|
31
|
-
RuntimeError.new(exception)
|
32
|
-
end
|
33
|
-
|
34
|
-
def cancel_after_wrap_block(canceller, &block)
|
37
|
+
def cancel_after_with_block(fiber, interval, with_exception, &block)
|
38
|
+
canceller = cancel_after_blockless_canceller(fiber, interval, with_exception)
|
35
39
|
block.call(canceller)
|
36
40
|
ensure
|
37
41
|
canceller.stop
|
38
42
|
end
|
39
43
|
|
44
|
+
def cancel_exception(exception)
|
45
|
+
case exception
|
46
|
+
when Class then exception.new
|
47
|
+
when Array then exception[0].new(exception[1])
|
48
|
+
else RuntimeError.new(exception)
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
40
52
|
def spin(tag = nil, &block)
|
41
53
|
Fiber.current.spin(tag, caller, &block)
|
42
54
|
end
|
@@ -51,6 +63,16 @@ module Polyphony
|
|
51
63
|
end
|
52
64
|
end
|
53
65
|
|
66
|
+
def spin_scope
|
67
|
+
raise unless block_given?
|
68
|
+
|
69
|
+
spin do
|
70
|
+
result = yield
|
71
|
+
Fiber.current.await_all_children
|
72
|
+
result
|
73
|
+
end.await
|
74
|
+
end
|
75
|
+
|
54
76
|
def every(interval)
|
55
77
|
next_time = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + interval
|
56
78
|
loop do
|
@@ -65,15 +87,20 @@ module Polyphony
|
|
65
87
|
end
|
66
88
|
|
67
89
|
def move_on_after(interval, with_value: nil, &block)
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
90
|
+
if !block
|
91
|
+
move_on_blockless_canceller(Fiber.current, interval, with_value)
|
92
|
+
elsif block.arity > 0
|
93
|
+
move_on_after_with_block(Fiber.current, interval, with_value, &block)
|
94
|
+
else
|
95
|
+
Thread.current.backend.timeout(interval, nil, with_value, &block)
|
74
96
|
end
|
97
|
+
end
|
75
98
|
|
76
|
-
|
99
|
+
def move_on_blockless_canceller(fiber, interval, with_value)
|
100
|
+
spin do
|
101
|
+
sleep interval
|
102
|
+
fiber.schedule with_value
|
103
|
+
end
|
77
104
|
end
|
78
105
|
|
79
106
|
def move_on_after_with_block(fiber, interval, with_value, &block)
|
@@ -107,10 +134,7 @@ module Polyphony
|
|
107
134
|
end
|
108
135
|
|
109
136
|
def sleep_forever
|
110
|
-
Thread.current.backend.
|
111
|
-
loop { sleep 60 }
|
112
|
-
ensure
|
113
|
-
Thread.current.backend.unref
|
137
|
+
Thread.current.backend.wait_event(true)
|
114
138
|
end
|
115
139
|
|
116
140
|
def throttled_loop(rate = nil, **opts, &block)
|
@@ -58,7 +58,18 @@ module Polyphony
|
|
58
58
|
# Discards the currently-acquired resource
|
59
59
|
# instead of returning it to the pool when done.
|
60
60
|
def discard!
|
61
|
-
|
61
|
+
if block_given?
|
62
|
+
@size.times do
|
63
|
+
acquire do |r|
|
64
|
+
next if yield(r)
|
65
|
+
|
66
|
+
@size -= 1
|
67
|
+
@acquired_resources.delete(Fiber.current)
|
68
|
+
end
|
69
|
+
end
|
70
|
+
else
|
71
|
+
@size -= 1 if @acquired_resources.delete(Fiber.current)
|
72
|
+
end
|
62
73
|
end
|
63
74
|
|
64
75
|
def preheat!
|