concurrently 1.0.1 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.gitignore +1 -1
- data/.travis.yml +8 -3
- data/README.md +70 -60
- data/RELEASE_NOTES.md +16 -1
- data/Rakefile +98 -14
- data/concurrently.gemspec +16 -12
- data/ext/mruby/io.rb +1 -1
- data/guides/Overview.md +191 -66
- data/guides/Performance.md +300 -102
- data/guides/Troubleshooting.md +28 -28
- data/lib/Ruby/concurrently/proc/evaluation/error.rb +10 -0
- data/lib/all/concurrently/error.rb +0 -3
- data/lib/all/concurrently/evaluation.rb +8 -12
- data/lib/all/concurrently/event_loop.rb +1 -1
- data/lib/all/concurrently/event_loop/fiber.rb +3 -3
- data/lib/all/concurrently/event_loop/io_selector.rb +1 -1
- data/lib/all/concurrently/event_loop/run_queue.rb +29 -17
- data/lib/all/concurrently/proc.rb +13 -13
- data/lib/all/concurrently/proc/evaluation.rb +29 -29
- data/lib/all/concurrently/proc/evaluation/error.rb +13 -0
- data/lib/all/concurrently/proc/fiber.rb +3 -6
- data/lib/all/concurrently/version.rb +1 -1
- data/lib/all/io.rb +118 -41
- data/lib/all/kernel.rb +82 -29
- data/lib/mruby/concurrently/event_loop/io_selector.rb +46 -0
- data/lib/mruby/kernel.rb +1 -1
- data/mrbgem.rake +28 -17
- data/mruby_builds/build_config.rb +67 -0
- data/perf/Ruby/stage.rb +23 -0
- data/perf/benchmark_call_methods.rb +32 -0
- data/perf/benchmark_call_methods_waiting.rb +52 -0
- data/perf/benchmark_wait_methods.rb +38 -0
- data/perf/mruby/stage.rb +8 -0
- data/perf/profile_await_readable.rb +10 -0
- data/perf/{concurrent_proc_call.rb → profile_call.rb} +1 -5
- data/perf/{concurrent_proc_call_and_forget.rb → profile_call_and_forget.rb} +1 -5
- data/perf/{concurrent_proc_call_detached.rb → profile_call_detached.rb} +1 -5
- data/perf/{concurrent_proc_call_nonblock.rb → profile_call_nonblock.rb} +1 -5
- data/perf/profile_wait.rb +7 -0
- data/perf/stage.rb +47 -0
- data/perf/stage/benchmark.rb +47 -0
- data/perf/stage/benchmark/code_gen.rb +29 -0
- data/perf/stage/benchmark/code_gen/batch.rb +41 -0
- data/perf/stage/benchmark/code_gen/single.rb +38 -0
- metadata +27 -23
- data/ext/mruby/array.rb +0 -19
- data/lib/Ruby/concurrently/error.rb +0 -4
- data/perf/_shared/stage.rb +0 -33
- data/perf/concurrent_proc_calls.rb +0 -49
- data/perf/concurrent_proc_calls_awaiting.rb +0 -48
@@ -0,0 +1,47 @@
|
|
1
|
+
class Stage
|
2
|
+
class Benchmark
|
3
|
+
SECONDS = 1
|
4
|
+
RESULT_HEADER = "Results for #{RUBY_ENGINE} #{RUBY_ENGINE_VERSION}"
|
5
|
+
RESULT_FORMAT = " %-25s %8d executions in %2.4f seconds"
|
6
|
+
|
7
|
+
def self.header
|
8
|
+
<<DOC
|
9
|
+
Benchmarks
|
10
|
+
----------
|
11
|
+
DOC
|
12
|
+
end
|
13
|
+
|
14
|
+
def self.result_header
|
15
|
+
"#{RESULT_HEADER}\n#{'-'*RESULT_HEADER.length}"
|
16
|
+
end
|
17
|
+
|
18
|
+
def initialize(stage, name, opts = {})
|
19
|
+
@stage = stage
|
20
|
+
@name = name
|
21
|
+
@opts = opts
|
22
|
+
|
23
|
+
opts[:call] ||= :call_nonblock
|
24
|
+
opts[:batch_size] ||= 1
|
25
|
+
|
26
|
+
code_gen = CodeGen.const_get(opts[:batch_size] > 1 ? :Batch : :Single).new(opts)
|
27
|
+
proc_lines = code_gen.proc_lines
|
28
|
+
args_lines = code_gen.args_lines
|
29
|
+
call_lines = code_gen.call_lines
|
30
|
+
|
31
|
+
@code = eval [*proc_lines, *args_lines, *call_lines].join "\n"
|
32
|
+
|
33
|
+
proc_lines << "" if proc_lines.size > 1
|
34
|
+
call_lines[0] = "while elapsed_seconds < #{SECONDS}"
|
35
|
+
@desc = [" #{@name}:", *proc_lines, *args_lines, "", *call_lines, ""].join "\n "
|
36
|
+
end
|
37
|
+
|
38
|
+
attr_reader :desc
|
39
|
+
|
40
|
+
def run
|
41
|
+
result = @stage.gc_disabled do
|
42
|
+
@stage.execute(seconds: SECONDS, &@code)
|
43
|
+
end
|
44
|
+
puts sprintf(RESULT_FORMAT, "#{@name}:", @opts[:batch_size]*result[:iterations], result[:time])
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
@@ -0,0 +1,29 @@
|
|
1
|
+
class Stage
|
2
|
+
class Benchmark
|
3
|
+
class CodeGen
|
4
|
+
def initialize(opts)
|
5
|
+
opts.each do |key, value|
|
6
|
+
instance_variable_set "@#{key}", value
|
7
|
+
end
|
8
|
+
end
|
9
|
+
|
10
|
+
def proc_lines
|
11
|
+
@proc.chomp.split("\n").tap do |lines|
|
12
|
+
lines[0] = "test_proc = #{lines[0]}"
|
13
|
+
end
|
14
|
+
end
|
15
|
+
|
16
|
+
def args_lines
|
17
|
+
if @args
|
18
|
+
@args.chomp.split("\n")
|
19
|
+
else
|
20
|
+
[]
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
def call_lines
|
25
|
+
["proc do", "test_proc.#{@call}#{(@args ? "(*args)" : "")}", "end"]
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
@@ -0,0 +1,41 @@
|
|
1
|
+
class Stage
|
2
|
+
class Benchmark
|
3
|
+
class CodeGen
|
4
|
+
class Batch < CodeGen
|
5
|
+
def args_lines
|
6
|
+
case (lines = super).size
|
7
|
+
when 0
|
8
|
+
["batch = Array.new(#{@batch_size})"]
|
9
|
+
else
|
10
|
+
lines.each{ |l| l.replace " #{l}" }
|
11
|
+
lines.unshift "batch = Array.new(#{@batch_size}) do |idx|"
|
12
|
+
lines.push "end"
|
13
|
+
end
|
14
|
+
end
|
15
|
+
|
16
|
+
def call_lines
|
17
|
+
lines = super
|
18
|
+
blk = "{#{@args ? " |*args|" : nil} #{lines[1]} }"
|
19
|
+
if @sync
|
20
|
+
@sync = @call if @sync == true
|
21
|
+
case @sync
|
22
|
+
when :call_nonblock, :call_detached, :await_result
|
23
|
+
lines[1] = "evaluations = batch.map#{blk}"
|
24
|
+
lines.insert 2, "evaluations.each{ |evaluation| evaluation.await_result }"
|
25
|
+
when :call
|
26
|
+
lines[1] = "batch.each#{blk}"
|
27
|
+
lines.insert 2, "# Concurrently::Proc#call already synchronizes the results of evaluations"
|
28
|
+
when :call_and_forget, :wait
|
29
|
+
lines[1] = "batch.each#{blk}"
|
30
|
+
lines.insert 2, "wait 0"
|
31
|
+
end
|
32
|
+
else
|
33
|
+
lines[1] = "batch.each#{blk}"
|
34
|
+
end
|
35
|
+
lines[1..-2].each{ |l| l.replace " #{l}" }
|
36
|
+
lines
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
@@ -0,0 +1,38 @@
|
|
1
|
+
class Stage
|
2
|
+
class Benchmark
|
3
|
+
class CodeGen
|
4
|
+
class Single < CodeGen
|
5
|
+
def args_lines
|
6
|
+
case (lines = super).size
|
7
|
+
when 0
|
8
|
+
lines
|
9
|
+
when 1
|
10
|
+
lines[0] = "args = #{lines[0]}"
|
11
|
+
else
|
12
|
+
lines.each{ |l| l.replace " #{l}" }
|
13
|
+
lines.unshift "args = begin"
|
14
|
+
lines.push "end"
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
def call_lines
|
19
|
+
lines = super
|
20
|
+
if @sync
|
21
|
+
@sync = @call if @sync == true
|
22
|
+
case @sync
|
23
|
+
when :call_nonblock, :call_detached, :await_result
|
24
|
+
lines[1] = "evaluation = #{lines[1]}"
|
25
|
+
lines.insert 2, "evaluation.await_result"
|
26
|
+
when :call
|
27
|
+
lines.insert 2, "# Concurrently::Proc#call already synchronizes the results of evaluations"
|
28
|
+
when :call_and_forget, :wait
|
29
|
+
lines.insert 2, "wait 0"
|
30
|
+
end
|
31
|
+
end
|
32
|
+
lines[1..-2].each{ |l| l.replace " #{l}" }
|
33
|
+
lines
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: concurrently
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.0
|
4
|
+
version: 1.1.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Christopher Aue
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date: 2017-
|
11
|
+
date: 2017-07-10 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: nio4r
|
@@ -52,18 +52,11 @@ dependencies:
|
|
52
52
|
- - "~>"
|
53
53
|
- !ruby/object:Gem::Version
|
54
54
|
version: '2.2'
|
55
|
-
description:
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
similar to a regular proc. Calling a concurrent proc creates a concurrent
|
61
|
-
evaluation which is kind of a lightweight thread: It can wait for stuff without
|
62
|
-
blocking other concurrent evaluations.
|
63
|
-
|
64
|
-
Under the hood, concurrent procs are evaluated inside fibers. They can wait for
|
65
|
-
readiness of I/O or a period of time (or the result of other concurrent
|
66
|
-
evaluations).
|
55
|
+
description: "Concurrently is a concurrency framework for Ruby and mruby based on\nfibers.
|
56
|
+
With it code can be evaluated independently in its own execution\ncontext similar
|
57
|
+
to a thread:\n\n hello = concurrently do\n wait 0.2 # seconds\n \"hello\"\n
|
58
|
+
\ end\n \n world = concurrently do\n wait 0.1 # seconds\n \"world\"\n
|
59
|
+
\ end\n \n puts \"#{hello.await_result} #{world.await_result}\"\n"
|
67
60
|
email:
|
68
61
|
- rubygems@christopheraue.net
|
69
62
|
executables: []
|
@@ -82,7 +75,6 @@ files:
|
|
82
75
|
- concurrently.gemspec
|
83
76
|
- ext/Ruby/thread.rb
|
84
77
|
- ext/all/array.rb
|
85
|
-
- ext/mruby/array.rb
|
86
78
|
- ext/mruby/fiber.rb
|
87
79
|
- ext/mruby/io.rb
|
88
80
|
- guides/Installation.md
|
@@ -90,9 +82,9 @@ files:
|
|
90
82
|
- guides/Performance.md
|
91
83
|
- guides/Troubleshooting.md
|
92
84
|
- lib/Ruby/concurrently.rb
|
93
|
-
- lib/Ruby/concurrently/error.rb
|
94
85
|
- lib/Ruby/concurrently/event_loop.rb
|
95
86
|
- lib/Ruby/concurrently/event_loop/io_selector.rb
|
87
|
+
- lib/Ruby/concurrently/proc/evaluation/error.rb
|
96
88
|
- lib/all/concurrently/error.rb
|
97
89
|
- lib/all/concurrently/evaluation.rb
|
98
90
|
- lib/all/concurrently/evaluation/error.rb
|
@@ -103,20 +95,32 @@ files:
|
|
103
95
|
- lib/all/concurrently/event_loop/run_queue.rb
|
104
96
|
- lib/all/concurrently/proc.rb
|
105
97
|
- lib/all/concurrently/proc/evaluation.rb
|
98
|
+
- lib/all/concurrently/proc/evaluation/error.rb
|
106
99
|
- lib/all/concurrently/proc/fiber.rb
|
107
100
|
- lib/all/concurrently/version.rb
|
108
101
|
- lib/all/io.rb
|
109
102
|
- lib/all/kernel.rb
|
103
|
+
- lib/mruby/concurrently/event_loop/io_selector.rb
|
110
104
|
- lib/mruby/concurrently/proc.rb
|
111
105
|
- lib/mruby/kernel.rb
|
112
106
|
- mrbgem.rake
|
113
|
-
-
|
114
|
-
- perf/
|
115
|
-
- perf/
|
116
|
-
- perf/
|
117
|
-
- perf/
|
118
|
-
- perf/
|
119
|
-
- perf/
|
107
|
+
- mruby_builds/build_config.rb
|
108
|
+
- perf/Ruby/stage.rb
|
109
|
+
- perf/benchmark_call_methods.rb
|
110
|
+
- perf/benchmark_call_methods_waiting.rb
|
111
|
+
- perf/benchmark_wait_methods.rb
|
112
|
+
- perf/mruby/stage.rb
|
113
|
+
- perf/profile_await_readable.rb
|
114
|
+
- perf/profile_call.rb
|
115
|
+
- perf/profile_call_and_forget.rb
|
116
|
+
- perf/profile_call_detached.rb
|
117
|
+
- perf/profile_call_nonblock.rb
|
118
|
+
- perf/profile_wait.rb
|
119
|
+
- perf/stage.rb
|
120
|
+
- perf/stage/benchmark.rb
|
121
|
+
- perf/stage/benchmark/code_gen.rb
|
122
|
+
- perf/stage/benchmark/code_gen/batch.rb
|
123
|
+
- perf/stage/benchmark/code_gen/single.rb
|
120
124
|
homepage: https://github.com/christopheraue/m-ruby-concurrently
|
121
125
|
licenses:
|
122
126
|
- Apache-2.0
|
data/ext/mruby/array.rb
DELETED
@@ -1,19 +0,0 @@
|
|
1
|
-
# @api mruby_patches
|
2
|
-
# @since 1.0.0
|
3
|
-
class Array
|
4
|
-
# Alias for original Array#pop
|
5
|
-
alias_method :pop_single, :pop
|
6
|
-
|
7
|
-
# Reimplements Array#pop to add support for popping multiple items at once.
|
8
|
-
#
|
9
|
-
# By default, Array#pop can only pop a single item in mruby
|
10
|
-
def pop(n = nil)
|
11
|
-
if n
|
12
|
-
res = []
|
13
|
-
n.times{ res << pop_single }
|
14
|
-
res.reverse!
|
15
|
-
else
|
16
|
-
pop_single
|
17
|
-
end
|
18
|
-
end
|
19
|
-
end
|
data/perf/_shared/stage.rb
DELETED
@@ -1,33 +0,0 @@
|
|
1
|
-
require 'bundler'
|
2
|
-
|
3
|
-
Bundler.require :default
|
4
|
-
Bundler.require :perf
|
5
|
-
|
6
|
-
class Stage
|
7
|
-
def measure(seconds: 1) # &test
|
8
|
-
GC.start
|
9
|
-
GC.disable
|
10
|
-
profile = RubyProf::Profile.new(merge_fibers: true).tap(&:start) if ARGV[0] == 'profile'
|
11
|
-
|
12
|
-
event_loop = Concurrently::EventLoop.current
|
13
|
-
event_loop.reinitialize!
|
14
|
-
iterations = 0
|
15
|
-
start_time = event_loop.lifetime
|
16
|
-
end_time = start_time + seconds
|
17
|
-
while event_loop.lifetime < end_time
|
18
|
-
yield
|
19
|
-
iterations += 1
|
20
|
-
end
|
21
|
-
stop_time = event_loop.lifetime
|
22
|
-
|
23
|
-
if ARGV[0] == 'profile'
|
24
|
-
printer = ARGV[1].dup || 'flat'
|
25
|
-
printer[0] = printer[0].capitalize
|
26
|
-
RubyProf.const_get("#{printer}Printer").new(profile.stop).print(STDOUT, sort_method: :self_time)
|
27
|
-
end
|
28
|
-
GC.enable
|
29
|
-
|
30
|
-
{ iterations: iterations, time: (stop_time-start_time) }
|
31
|
-
end
|
32
|
-
end
|
33
|
-
|
@@ -1,49 +0,0 @@
|
|
1
|
-
#!/bin/env ruby
|
2
|
-
|
3
|
-
require_relative "_shared/stage"
|
4
|
-
|
5
|
-
stage = Stage.new
|
6
|
-
format = " %-25s %7d executions in %2.4f seconds"
|
7
|
-
|
8
|
-
puts <<-DOC
|
9
|
-
Benchmarked Code
|
10
|
-
----------------
|
11
|
-
proc = proc{}
|
12
|
-
conproc = concurrent_proc{}
|
13
|
-
|
14
|
-
while elapsed_seconds < 1
|
15
|
-
# CODE #
|
16
|
-
end
|
17
|
-
|
18
|
-
Results
|
19
|
-
-------
|
20
|
-
# CODE #
|
21
|
-
DOC
|
22
|
-
|
23
|
-
proc = proc{}
|
24
|
-
conproc = concurrent_proc{}
|
25
|
-
|
26
|
-
result = stage.measure(seconds: 1) do
|
27
|
-
proc.call
|
28
|
-
end
|
29
|
-
puts sprintf(format, "proc.call:", result[:iterations], result[:time])
|
30
|
-
|
31
|
-
result = stage.measure(seconds: 1) do
|
32
|
-
conproc.call
|
33
|
-
end
|
34
|
-
puts sprintf(format, "conproc.call:", result[:iterations], result[:time])
|
35
|
-
|
36
|
-
result = stage.measure(seconds: 1) do
|
37
|
-
conproc.call_nonblock
|
38
|
-
end
|
39
|
-
puts sprintf(format, "conproc.call_nonblock:", result[:iterations], result[:time])
|
40
|
-
|
41
|
-
result = stage.measure(seconds: 1) do
|
42
|
-
conproc.call_detached
|
43
|
-
end
|
44
|
-
puts sprintf(format, "conproc.call_detached:", result[:iterations], result[:time])
|
45
|
-
|
46
|
-
result = stage.measure(seconds: 1) do
|
47
|
-
conproc.call_and_forget
|
48
|
-
end
|
49
|
-
puts sprintf(format, "conproc.call_and_forget:", result[:iterations], result[:time])
|
@@ -1,48 +0,0 @@
|
|
1
|
-
#!/bin/env ruby
|
2
|
-
|
3
|
-
require_relative "_shared/stage"
|
4
|
-
|
5
|
-
stage = Stage.new
|
6
|
-
format = " %-25s %7d executions in %2.4f seconds"
|
7
|
-
factor = ARGV.fetch(0, 1).to_i
|
8
|
-
|
9
|
-
puts <<-DOC
|
10
|
-
Benchmarked Code
|
11
|
-
----------------
|
12
|
-
conproc = concurrent_proc{ wait 0 }
|
13
|
-
|
14
|
-
while elapsed_seconds < 1
|
15
|
-
#{factor}.times{ # CODE # }
|
16
|
-
wait 0 # to enter the event loop
|
17
|
-
end
|
18
|
-
|
19
|
-
Results
|
20
|
-
-------
|
21
|
-
# CODE #
|
22
|
-
DOC
|
23
|
-
|
24
|
-
conproc = concurrent_proc{ wait 0 }
|
25
|
-
|
26
|
-
result = stage.measure(seconds: 1) do
|
27
|
-
factor.times{ conproc.call }
|
28
|
-
# no need to enter the event loop manually. It already happens in #call
|
29
|
-
end
|
30
|
-
puts sprintf(format, "conproc.call:", factor*result[:iterations], result[:time])
|
31
|
-
|
32
|
-
result = stage.measure(seconds: 1) do
|
33
|
-
factor.times{ conproc.call_nonblock }
|
34
|
-
wait 0
|
35
|
-
end
|
36
|
-
puts sprintf(format, "conproc.call_nonblock:", factor*result[:iterations], result[:time])
|
37
|
-
|
38
|
-
result = stage.measure(seconds: 1) do
|
39
|
-
factor.times{ conproc.call_detached }
|
40
|
-
wait 0
|
41
|
-
end
|
42
|
-
puts sprintf(format, "conproc.call_detached:", factor*result[:iterations], result[:time])
|
43
|
-
|
44
|
-
result = stage.measure(seconds: 1) do
|
45
|
-
factor.times{ conproc.call_and_forget }
|
46
|
-
wait 0
|
47
|
-
end
|
48
|
-
puts sprintf(format, "conproc.call_and_forget:", factor*result[:iterations], result[:time])
|