polyphony 0.46.1 → 0.47.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +8 -0
- data/Gemfile.lock +1 -1
- data/TODO.md +0 -14
- data/examples/core/enumerable.rb +64 -0
- data/examples/performance/fiber_resume.rb +43 -0
- data/examples/performance/thread-vs-fiber/compare.rb +59 -0
- data/examples/performance/thread-vs-fiber/em_server.rb +33 -0
- data/examples/performance/thread-vs-fiber/polyphony_server.rb +4 -3
- data/examples/performance/thread-vs-fiber/threaded_server.rb +22 -15
- data/examples/performance/thread_switch.rb +44 -0
- data/ext/polyphony/backend_common.h +9 -0
- data/ext/polyphony/backend_io_uring.c +81 -5
- data/ext/polyphony/backend_io_uring_context.c +1 -0
- data/ext/polyphony/backend_io_uring_context.h +1 -0
- data/ext/polyphony/backend_libev.c +67 -0
- data/ext/polyphony/fiber.c +10 -1
- data/ext/polyphony/polyphony.c +3 -0
- data/ext/polyphony/polyphony.h +3 -6
- data/ext/polyphony/queue.c +99 -34
- data/lib/polyphony/core/global_api.rb +40 -18
- data/lib/polyphony/extensions/fiber.rb +6 -2
- data/lib/polyphony/version.rb +1 -1
- data/test/test_backend.rb +48 -0
- data/test/test_fiber.rb +33 -4
- data/test/test_global_api.rb +61 -0
- data/test/test_queue.rb +117 -0
- metadata +7 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 606d74c424178b9e8bb17cd7b2848e0e1c1b092e77eeb98ce1026fd7d1f38c61
|
4
|
+
data.tar.gz: 9e003833af5f2c8505ea41d2ebe728fbf776f428f47d925b0185fd592d102737
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 1cfb1c81d3b38a545ceb856d4619c11386407081ecd43a6e6209d2ed4efb1c436242aa016657d379f0a0331ca24944f5e51c5359ad44afc3a88d53af1de7976b
|
7
|
+
data.tar.gz: 9b47ba16c7de942bea952f9f12d225aed86a505c2fd6cf7740a88d5d398af0e91e37a16e3ee9af2129ad88c8ff17c2b5aba7dcf6630811af62202e4b2585d6dc
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,11 @@
|
|
1
|
+
## 0.47.0
|
2
|
+
|
3
|
+
* Implement `#spin_scope` used for creating blocking fiber scopes
|
4
|
+
* Reimplement `move_on_after`, `cancel_after`, `Timeout.timeout` using
|
5
|
+
`Backend#timeout` (avoids creating canceller fiber for most common use case)
|
6
|
+
* Implement `Backend#timeout` API
|
7
|
+
* Implemented capped queues
|
8
|
+
|
1
9
|
## 0.46.1
|
2
10
|
|
3
11
|
* Add `TCPServer#accept_loop`, `OpenSSL::SSL::SSLSocket#accept_loop` method
|
data/Gemfile.lock
CHANGED
data/TODO.md
CHANGED
@@ -192,17 +192,3 @@ Prior art:
|
|
192
192
|
|
193
193
|
- https://github.com/socketry/async-dns
|
194
194
|
|
195
|
-
## Work on API
|
196
|
-
|
197
|
-
- Add option for setting the exception raised on cancelling using `#cancel_after`:
|
198
|
-
|
199
|
-
```ruby
|
200
|
-
cancel_after(3, with_error: MyErrorClass) do
|
201
|
-
do_my_thing
|
202
|
-
end
|
203
|
-
# or a RuntimeError with message
|
204
|
-
cancel_after(3, with_error: 'Cancelled due to timeout') do
|
205
|
-
do_my_thing
|
206
|
-
end
|
207
|
-
```
|
208
|
-
|
@@ -0,0 +1,64 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'bundler/setup'
|
4
|
+
require 'polyphony'
|
5
|
+
|
6
|
+
Exception.__disable_sanitized_backtrace__ = true
|
7
|
+
|
8
|
+
module Enumerable
|
9
|
+
def map_concurrently(&block)
|
10
|
+
spin do
|
11
|
+
results = []
|
12
|
+
each_with_index do |i, idx|
|
13
|
+
spin { results[idx] = block.(i) }
|
14
|
+
end
|
15
|
+
Fiber.current.await_all_children
|
16
|
+
results
|
17
|
+
end.await
|
18
|
+
end
|
19
|
+
|
20
|
+
def each_concurrently(max_fibers: nil, &block)
|
21
|
+
return each_concurrently_with_fiber_pool(max_fibers, &block) if max_fibers
|
22
|
+
|
23
|
+
spin do
|
24
|
+
results = []
|
25
|
+
each do |i|
|
26
|
+
spin(&block).schedule(i)
|
27
|
+
end
|
28
|
+
Fiber.current.await_all_children
|
29
|
+
end.await
|
30
|
+
self
|
31
|
+
end
|
32
|
+
|
33
|
+
def each_concurrently_with_fiber_pool(max_fibers, &block)
|
34
|
+
spin do
|
35
|
+
fiber_count = 0
|
36
|
+
workers = []
|
37
|
+
each do |i|
|
38
|
+
if fiber_count < max_fibers
|
39
|
+
workers << spin do
|
40
|
+
loop do
|
41
|
+
item = receive
|
42
|
+
break if item == :__stop__
|
43
|
+
block.(item)
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
fiber = workers.shift
|
49
|
+
fiber << i
|
50
|
+
workers << fiber
|
51
|
+
end
|
52
|
+
workers.each { |f| f << :__stop__ }
|
53
|
+
Fiber.current.await_all_children
|
54
|
+
end.await
|
55
|
+
self
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
o = 1..3
|
60
|
+
o.each_concurrently(max_fibers: 2) do |i|
|
61
|
+
puts "#{Fiber.current} sleep #{i}"
|
62
|
+
sleep(i)
|
63
|
+
puts "wakeup #{i}"
|
64
|
+
end
|
@@ -0,0 +1,43 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'fiber'
|
4
|
+
|
5
|
+
class Fiber
|
6
|
+
attr_accessor :next
|
7
|
+
end
|
8
|
+
|
9
|
+
# This program shows how the performance of Fiber.transfer degrades as the fiber
|
10
|
+
# count increases
|
11
|
+
|
12
|
+
def run(num_fibers)
|
13
|
+
count = 0
|
14
|
+
|
15
|
+
GC.start
|
16
|
+
GC.disable
|
17
|
+
|
18
|
+
fibers = []
|
19
|
+
num_fibers.times do
|
20
|
+
fibers << Fiber.new { loop { Fiber.yield } }
|
21
|
+
end
|
22
|
+
|
23
|
+
t0 = Time.now
|
24
|
+
|
25
|
+
while count < 1000000
|
26
|
+
fibers.each do |f|
|
27
|
+
count += 1
|
28
|
+
f.resume
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
elapsed = Time.now - t0
|
33
|
+
|
34
|
+
puts "fibers: #{num_fibers} count: #{count} rate: #{count / elapsed}"
|
35
|
+
rescue Exception => e
|
36
|
+
puts "Stopped at #{count} fibers"
|
37
|
+
p e
|
38
|
+
end
|
39
|
+
|
40
|
+
run(100)
|
41
|
+
run(1000)
|
42
|
+
run(10000)
|
43
|
+
run(100000)
|
@@ -0,0 +1,59 @@
|
|
1
|
+
SERVERS = {
|
2
|
+
polyphony: {
|
3
|
+
port: 1234,
|
4
|
+
cmd: 'ruby examples/performance/thread-vs-fiber/polyphony_server.rb'
|
5
|
+
},
|
6
|
+
threaded: {
|
7
|
+
port: 1235,
|
8
|
+
cmd: 'ruby examples/performance/thread-vs-fiber/threaded_server.rb'
|
9
|
+
},
|
10
|
+
em: {
|
11
|
+
port: 1236,
|
12
|
+
cmd: 'ruby examples/performance/thread-vs-fiber/em_server.rb'
|
13
|
+
}
|
14
|
+
}
|
15
|
+
SETTINGS = [
|
16
|
+
'-t1 -c1',
|
17
|
+
'-t4 -c8',
|
18
|
+
'-t8 -c64',
|
19
|
+
'-t16 -c512',
|
20
|
+
'-t32 -c4096',
|
21
|
+
'-t64 -c8192',
|
22
|
+
'-t128 -c16384',
|
23
|
+
'-t256 -c32768'
|
24
|
+
]
|
25
|
+
|
26
|
+
def run_test(name, port, cmd, setting)
|
27
|
+
puts "*" * 80
|
28
|
+
puts "Run #{name} (#{port}): #{setting}"
|
29
|
+
puts "*" * 80
|
30
|
+
|
31
|
+
pid = spawn("#{cmd} > /dev/null 2>&1")
|
32
|
+
sleep 1
|
33
|
+
|
34
|
+
output = `wrk -d60 #{setting} \"http://127.0.0.1:#{port}/\"`
|
35
|
+
puts output
|
36
|
+
(output =~ /Requests\/sec:\s+(\d+)/) && $1.to_i
|
37
|
+
ensure
|
38
|
+
Process.kill('KILL', pid)
|
39
|
+
Process.wait(pid)
|
40
|
+
3.times { puts }
|
41
|
+
end
|
42
|
+
|
43
|
+
def perform_benchmark
|
44
|
+
results = []
|
45
|
+
SETTINGS.each do |s|
|
46
|
+
results << SERVERS.inject({}) do |h, (n, o)|
|
47
|
+
h[n] = run_test(n, o[:port], o[:cmd], s)
|
48
|
+
h
|
49
|
+
end
|
50
|
+
end
|
51
|
+
results
|
52
|
+
end
|
53
|
+
|
54
|
+
results = []
|
55
|
+
3.times { results << perform_benchmark }
|
56
|
+
|
57
|
+
require 'pp'
|
58
|
+
puts "results:"
|
59
|
+
pp results
|
@@ -0,0 +1,33 @@
|
|
1
|
+
require 'eventmachine'
|
2
|
+
require 'http/parser'
|
3
|
+
require 'socket'
|
4
|
+
|
5
|
+
module HTTPServer
|
6
|
+
def post_init
|
7
|
+
@parser = Http::Parser.new
|
8
|
+
@pending_requests = []
|
9
|
+
@parser.on_message_complete = proc { @pending_requests << @parser }
|
10
|
+
end
|
11
|
+
|
12
|
+
def receive_data(data)
|
13
|
+
@parser << data
|
14
|
+
write_response while @pending_requests.shift
|
15
|
+
end
|
16
|
+
|
17
|
+
def write_response
|
18
|
+
status_code = "200 OK"
|
19
|
+
data = "Hello world!\n"
|
20
|
+
headers = "Content-Type: text/plain\r\nContent-Length: #{data.bytesize}\r\n"
|
21
|
+
send_data "HTTP/1.1 #{status_code}\r\n#{headers}\r\n#{data}"
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
EventMachine::run do
|
26
|
+
EventMachine::start_server(
|
27
|
+
'0.0.0.0',
|
28
|
+
1236,
|
29
|
+
HTTPServer
|
30
|
+
)
|
31
|
+
puts "pid #{Process.pid} EventMachine listening on port 1236"
|
32
|
+
|
33
|
+
end
|
@@ -27,7 +27,8 @@ def write_response(socket)
|
|
27
27
|
end
|
28
28
|
|
29
29
|
server = TCPServer.open('0.0.0.0', 1234)
|
30
|
-
puts "pid #{Process.pid}"
|
31
|
-
puts "listening on port 1234"
|
30
|
+
puts "pid #{Process.pid} Polyphony (#{Thread.current.backend.kind}) listening on port 1234"
|
32
31
|
|
33
|
-
server.accept_loop
|
32
|
+
server.accept_loop do |c|
|
33
|
+
spin { handle_client(c) }
|
34
|
+
end
|
@@ -1,23 +1,30 @@
|
|
1
|
-
require 'thread'
|
2
1
|
require 'http/parser'
|
3
2
|
require 'socket'
|
4
3
|
|
5
|
-
def handle_client(
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
end
|
14
|
-
client.read_loop { |data| parser << data }
|
15
|
-
client.close
|
4
|
+
def handle_client(socket)
|
5
|
+
pending_requests = []
|
6
|
+
parser = Http::Parser.new
|
7
|
+
parser.on_message_complete = proc { pending_requests << parser }
|
8
|
+
|
9
|
+
while (data = socket.recv(8192))
|
10
|
+
parser << data
|
11
|
+
write_response(socket) while pending_requests.shift
|
16
12
|
end
|
13
|
+
rescue IOError, SystemCallError => e
|
14
|
+
# ignore
|
15
|
+
ensure
|
16
|
+
socket.close
|
17
|
+
end
|
18
|
+
|
19
|
+
def write_response(socket)
|
20
|
+
status_code = "200 OK"
|
21
|
+
data = "Hello world!\n"
|
22
|
+
headers = "Content-Type: text/plain\r\nContent-Length: #{data.bytesize}\r\n"
|
23
|
+
socket.write "HTTP/1.1 #{status_code}\r\n#{headers}\r\n#{data}"
|
17
24
|
end
|
18
25
|
|
19
|
-
server = TCPServer.open(
|
20
|
-
puts "
|
26
|
+
server = TCPServer.open(1235)
|
27
|
+
puts "pid #{Process.pid} threaded listening on port 1235"
|
21
28
|
while socket = server.accept
|
22
|
-
handle_client(socket)
|
29
|
+
Thread.new { handle_client(socket) }
|
23
30
|
end
|
@@ -0,0 +1,44 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'fiber'
|
4
|
+
|
5
|
+
class Fiber
|
6
|
+
attr_accessor :next
|
7
|
+
end
|
8
|
+
|
9
|
+
# This program shows how the performance of Fiber.transfer degrades as the fiber
|
10
|
+
# count increases
|
11
|
+
|
12
|
+
def run(num_threads)
|
13
|
+
count = 0
|
14
|
+
|
15
|
+
GC.start
|
16
|
+
GC.disable
|
17
|
+
|
18
|
+
threads = []
|
19
|
+
t0 = Time.now
|
20
|
+
limit = 10_000_000 / num_threads
|
21
|
+
num_threads.times do
|
22
|
+
threads << Thread.new do
|
23
|
+
individual_count = 0
|
24
|
+
loop do
|
25
|
+
individual_count += 1
|
26
|
+
count += 1
|
27
|
+
break if individual_count == limit
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
threads.each(&:join)
|
33
|
+
elapsed = Time.now - t0
|
34
|
+
|
35
|
+
puts "threads: #{num_threads} count: #{count} rate: #{count / elapsed}"
|
36
|
+
rescue Exception => e
|
37
|
+
puts "Stopped at #{count} threads"
|
38
|
+
p e
|
39
|
+
end
|
40
|
+
|
41
|
+
run(100)
|
42
|
+
run(1000)
|
43
|
+
run(10000)
|
44
|
+
run(100000)
|
@@ -118,3 +118,12 @@ inline double current_time() {
|
|
118
118
|
double t = ns;
|
119
119
|
return t / 1e9;
|
120
120
|
}
|
121
|
+
|
122
|
+
inline VALUE backend_timeout_exception(VALUE exception) {
|
123
|
+
if (RTEST(rb_obj_is_kind_of(exception, rb_cArray)))
|
124
|
+
return rb_funcall(rb_ary_entry(exception, 0), ID_new, 1, rb_ary_entry(exception, 1));
|
125
|
+
else if (RTEST(rb_obj_is_kind_of(exception, rb_cClass)))
|
126
|
+
return rb_funcall(exception, ID_new, 0);
|
127
|
+
else
|
128
|
+
return rb_funcall(rb_eRuntimeError, ID_new, 1, exception);
|
129
|
+
}
|
@@ -171,7 +171,7 @@ void io_uring_backend_handle_completion(struct io_uring_cqe *cqe, Backend_t *bac
|
|
171
171
|
// otherwise, we mark it as completed, schedule the fiber and let it deal
|
172
172
|
// with releasing the context
|
173
173
|
ctx->completed = 1;
|
174
|
-
if (ctx->result != -ECANCELED) Fiber_make_runnable(ctx->fiber,
|
174
|
+
if (ctx->result != -ECANCELED) Fiber_make_runnable(ctx->fiber, ctx->resume_value);
|
175
175
|
}
|
176
176
|
}
|
177
177
|
|
@@ -774,15 +774,23 @@ VALUE Backend_wait_io(VALUE self, VALUE io, VALUE write) {
|
|
774
774
|
return self;
|
775
775
|
}
|
776
776
|
|
777
|
-
|
778
|
-
int io_uring_backend_submit_timeout_and_await(Backend_t *backend, double duration, VALUE *resume_value) {
|
777
|
+
inline struct __kernel_timespec double_to_timespec(double duration) {
|
779
778
|
double duration_integral;
|
780
779
|
double duration_fraction = modf(duration, &duration_integral);
|
781
780
|
struct __kernel_timespec ts;
|
782
|
-
|
783
|
-
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
784
781
|
ts.tv_sec = duration_integral;
|
785
782
|
ts.tv_nsec = floor(duration_fraction * 1000000000);
|
783
|
+
return ts;
|
784
|
+
}
|
785
|
+
|
786
|
+
inline struct __kernel_timespec duration_to_timespec(VALUE duration) {
|
787
|
+
return double_to_timespec(NUM2DBL(duration));
|
788
|
+
}
|
789
|
+
|
790
|
+
// returns true if completed, 0 otherwise
|
791
|
+
int io_uring_backend_submit_timeout_and_await(Backend_t *backend, double duration, VALUE *resume_value) {
|
792
|
+
struct __kernel_timespec ts = double_to_timespec(duration);
|
793
|
+
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
786
794
|
|
787
795
|
op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_TIMEOUT);
|
788
796
|
io_uring_prep_timeout(sqe, &ts, 0, 0);
|
@@ -830,6 +838,73 @@ VALUE Backend_timer_loop(VALUE self, VALUE interval) {
|
|
830
838
|
}
|
831
839
|
}
|
832
840
|
|
841
|
+
VALUE Backend_timeout_safe(VALUE arg) {
|
842
|
+
return rb_yield(arg);
|
843
|
+
}
|
844
|
+
|
845
|
+
VALUE Backend_timeout_rescue(VALUE arg, VALUE exception) {
|
846
|
+
return exception;
|
847
|
+
}
|
848
|
+
|
849
|
+
VALUE Backend_timeout_ensure_safe(VALUE arg) {
|
850
|
+
return rb_rescue2(Backend_timeout_safe, Qnil, Backend_timeout_rescue, Qnil, rb_eException, (VALUE)0);
|
851
|
+
}
|
852
|
+
|
853
|
+
struct Backend_timeout_ctx {
|
854
|
+
Backend_t *backend;
|
855
|
+
op_context_t *ctx;
|
856
|
+
};
|
857
|
+
|
858
|
+
VALUE Backend_timeout_ensure(VALUE arg) {
|
859
|
+
struct Backend_timeout_ctx *timeout_ctx = (struct Backend_timeout_ctx *)arg;
|
860
|
+
if (!timeout_ctx->ctx->completed) {
|
861
|
+
timeout_ctx->ctx->result = -ECANCELED;
|
862
|
+
|
863
|
+
// op was not completed, so we need to cancel it
|
864
|
+
struct io_uring_sqe *sqe = io_uring_get_sqe(&timeout_ctx->backend->ring);
|
865
|
+
io_uring_prep_cancel(sqe, timeout_ctx->ctx, 0);
|
866
|
+
timeout_ctx->backend->pending_sqes = 0;
|
867
|
+
io_uring_submit(&timeout_ctx->backend->ring);
|
868
|
+
}
|
869
|
+
OP_CONTEXT_RELEASE(&timeout_ctx->backend->store, timeout_ctx->ctx);
|
870
|
+
return Qnil;
|
871
|
+
}
|
872
|
+
|
873
|
+
VALUE Backend_timeout(int argc, VALUE *argv, VALUE self) {
|
874
|
+
VALUE duration;
|
875
|
+
VALUE exception;
|
876
|
+
VALUE move_on_value = Qnil;
|
877
|
+
rb_scan_args(argc, argv, "21", &duration, &exception, &move_on_value);
|
878
|
+
|
879
|
+
struct __kernel_timespec ts = duration_to_timespec(duration);
|
880
|
+
Backend_t *backend;
|
881
|
+
GetBackend(self, backend);
|
882
|
+
VALUE result = Qnil;
|
883
|
+
VALUE timeout = rb_funcall(cTimeoutException, ID_new, 0);
|
884
|
+
|
885
|
+
struct io_uring_sqe *sqe = io_uring_get_sqe(&backend->ring);
|
886
|
+
|
887
|
+
op_context_t *ctx = OP_CONTEXT_ACQUIRE(&backend->store, OP_TIMEOUT);
|
888
|
+
ctx->resume_value = timeout;
|
889
|
+
io_uring_prep_timeout(sqe, &ts, 0, 0);
|
890
|
+
io_uring_sqe_set_data(sqe, ctx);
|
891
|
+
io_uring_sqe_set_flags(sqe, IOSQE_ASYNC);
|
892
|
+
io_uring_backend_defer_submit(backend);
|
893
|
+
|
894
|
+
struct Backend_timeout_ctx timeout_ctx = {backend, ctx};
|
895
|
+
result = rb_ensure(Backend_timeout_ensure_safe, Qnil, Backend_timeout_ensure, (VALUE)&timeout_ctx);
|
896
|
+
|
897
|
+
if (result == timeout) {
|
898
|
+
if (exception == Qnil) return move_on_value;
|
899
|
+
RAISE_EXCEPTION(backend_timeout_exception(exception));
|
900
|
+
}
|
901
|
+
|
902
|
+
RAISE_IF_EXCEPTION(result);
|
903
|
+
RB_GC_GUARD(result);
|
904
|
+
RB_GC_GUARD(timeout);
|
905
|
+
return result;
|
906
|
+
}
|
907
|
+
|
833
908
|
VALUE Backend_waitpid(VALUE self, VALUE pid) {
|
834
909
|
Backend_t *backend;
|
835
910
|
int pid_int = NUM2INT(pid);
|
@@ -899,6 +974,7 @@ void Init_Backend() {
|
|
899
974
|
rb_define_method(cBackend, "wait_io", Backend_wait_io, 2);
|
900
975
|
rb_define_method(cBackend, "sleep", Backend_sleep, 1);
|
901
976
|
rb_define_method(cBackend, "timer_loop", Backend_timer_loop, 1);
|
977
|
+
rb_define_method(cBackend, "timeout", Backend_timeout, -1);
|
902
978
|
rb_define_method(cBackend, "waitpid", Backend_waitpid, 1);
|
903
979
|
rb_define_method(cBackend, "wait_event", Backend_wait_event, 1);
|
904
980
|
|