benchmark_driver 0.2.4 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
- SHA1:
3
- metadata.gz: ac69f7f7cf96d697a609937bc4181d1fe4e97f40
4
- data.tar.gz: 7dc64c8395cc258e4ed3e818999182b47f005a31
2
+ SHA256:
3
+ metadata.gz: 95e89d2f7ab5cee3192bf505ab51a848615bee2c2b2f021cd9c41e62d23c641a
4
+ data.tar.gz: 3973aae7876002ae741bb6443a5054f3e7a8be4069e4989d61ef2a0b02615766
5
5
  SHA512:
6
- metadata.gz: 15346cbb2e16e720d0eb21f42afd5d804953f19e1511d4ee4f66d4050843ed5eb6ea692a9cc1d45deb4ab71ed9a506a751bf77c559f7daebcc27bcba19771ddb
7
- data.tar.gz: 2108238e1807ef87140241bbb2bd4988ce88c482d19da5204869e38b2ce0e0d99de711aefd929b50b6c33b48456040309c6cab31aedbe6273801865999ed48be
6
+ metadata.gz: 546445be415fd833e7f180d853c5b44440f4ca76cf2a77cd5dfbef0d38fba7441aeefa71a99dc3ab97f6d2f7050d293670c70fc520d446228da680041a1d9b75
7
+ data.tar.gz: b309b8421f76a5039788556786678d863ed11d85bd891a6ab1bb1ffdf203a96db8d536cb3cc4ff1973b78bdaf296e87464ae00a8dbff6e3537045ffc5e844991
data/.travis.yml CHANGED
@@ -1,11 +1,11 @@
1
1
  sudo: false
2
2
  language: ruby
3
3
  rvm:
4
- - 2.4.1
4
+ - 2.4.2
5
5
  before_install: gem install bundler -v 1.15.4
6
6
  script:
7
7
  - bundle exec rake
8
8
 
9
9
  # Test some options
10
- - bundle exec exe/benchmark_driver benchmarks/example_single.yml -e ruby1::ruby -e ruby2::ruby -i
11
- - bundle exec exe/benchmark_driver benchmarks/example_single.yml -e ruby1::ruby -e ruby2::ruby -i 2
10
+ - bundle exec exe/benchmark-driver benchmarks/example_single.yml -e ruby1::ruby -e ruby2::ruby -i
11
+ - bundle exec exe/benchmark-driver benchmarks/example_single.yml -e ruby1::ruby -e ruby2::ruby -i 2
data/README.md CHANGED
@@ -1,4 +1,4 @@
1
- # BenchmarkDriver [![Build Status](https://travis-ci.org/k0kubun/benchmark_driver.svg?branch=master)](https://travis-ci.org/k0kubun/benchmark_driver)
1
+ # Benchmark::Driver [![Build Status](https://travis-ci.org/k0kubun/benchmark_driver.svg?branch=master)](https://travis-ci.org/k0kubun/benchmark_driver)
2
2
 
3
3
  Benchmark driver for different Ruby executables
4
4
 
@@ -9,8 +9,8 @@ Benchmark driver for different Ruby executables
9
9
  ## Usage
10
10
 
11
11
  ```
12
- $ exe/benchmark_driver -h
13
- Usage: benchmark_driver [options] [YAML]
12
+ $ benchmark-driver -h
13
+ Usage: benchmark-driver [options] [YAML]
14
14
  -e, --executables [EXECS] Ruby executables (e1::path1; e2::path2; e3::path3;...)
15
15
  -i, --ips [SECONDS] Measure IPS in duration seconds (default: 1)
16
16
  -l, --loop-count [COUNT] Measure execution time with loop count (default: 100000)
@@ -31,7 +31,7 @@ benchmark: erb.result
31
31
  you can benchmark the script with multiple ruby executables.
32
32
 
33
33
  ```
34
- $ exe/benchmark_driver benchmarks/example_single.yml -e ruby1::ruby -e ruby2::ruby
34
+ $ benchmark-driver benchmarks/example_single.yml -e ruby1::ruby -e ruby2::ruby
35
35
  benchmark results:
36
36
  Execution time (sec)
37
37
  name ruby1 ruby2
@@ -45,7 +45,7 @@ example_single 0.986
45
45
  And you can change benchmark output to IPS (iteration per second) by `-i` option.
46
46
 
47
47
  ```
48
- $ exe/benchmark_driver benchmarks/example_single.yml -e ruby1::ruby -e ruby2::ruby -i
48
+ $ benchmark-driver benchmarks/example_single.yml -e ruby1::ruby -e ruby2::ruby -i
49
49
  Result -------------------------------------------
50
50
  ruby1 ruby2
51
51
  example_single 99414.1 i/s 99723.3 i/s
@@ -76,7 +76,7 @@ benchmarks:
76
76
  you can benchmark the scripts with multiple ruby executables.
77
77
 
78
78
  ```
79
- $ exe/benchmark_driver benchmarks/example_multi.yml -e ruby1::ruby -e ruby2::ruby
79
+ $ benchmark-driver benchmarks/example_multi.yml -e ruby1::ruby -e ruby2::ruby
80
80
  benchmark results:
81
81
  Execution time (sec)
82
82
  name ruby1 ruby2
@@ -90,7 +90,7 @@ interpolation 1.002
90
90
  ```
91
91
 
92
92
  ```
93
- $ exe/benchmark_driver benchmarks/example_multi.yml -e ruby1::ruby -e ruby2::ruby -i
93
+ $ benchmark-driver benchmarks/example_multi.yml -e ruby1::ruby -e ruby2::ruby -i
94
94
  Result -------------------------------------------
95
95
  ruby1 ruby2
96
96
  join 4701954.3 i/s 4639520.3 i/s
@@ -142,7 +142,7 @@ benchmarks:
142
142
  If you have a trouble like an unexpectedly fast result, you should check benchmark script by `-v`.
143
143
 
144
144
  ```
145
- $ exe/benchmark_driver benchmarks/example_multi.yml -v
145
+ $ benchmark-driver benchmarks/example_multi.yml -v
146
146
  --- Running "join" with "ruby" 957780 times ---
147
147
  a = 'a' * 100
148
148
  b = 'b' * 100
data/Rakefile CHANGED
@@ -7,7 +7,7 @@ task :benchmarks do
7
7
 
8
8
  Dir.glob(File.expand_path('./benchmarks/**/*.yml', __dir__)).sort.each do |path|
9
9
  Bundler.with_clean_env do
10
- sh [File.expand_path('./exe/benchmark_driver', __dir__), path].shelljoin
10
+ sh [File.expand_path('./exe/benchmark-driver', __dir__), path].shelljoin
11
11
  end
12
12
  end
13
13
  end
@@ -1,11 +1,11 @@
1
1
  # coding: utf-8
2
2
  lib = File.expand_path('../lib', __FILE__)
3
3
  $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
4
- require 'benchmark_driver/version'
4
+ require 'benchmark/driver/version'
5
5
 
6
6
  Gem::Specification.new do |spec|
7
7
  spec.name = 'benchmark_driver'
8
- spec.version = BenchmarkDriver::VERSION
8
+ spec.version = Benchmark::Driver::VERSION
9
9
  spec.authors = ['Takashi Kokubun']
10
10
  spec.email = ['takashikkbn@gmail.com']
11
11
 
@@ -20,11 +20,11 @@ args = OptionParser.new do |o|
20
20
  options[:execs] << "#{version}::#{`RBENV_VERSION='#{version}' rbenv which ruby`.rstrip}"
21
21
  end
22
22
  end
23
- o.on('-i [DURATION]', '--ips [SECONDS]', "Measure IPS in duration seconds (default: #{BenchmarkDriver::DEFAULT_IPS_DURATION})") do |i|
23
+ o.on('-i [DURATION]', '--ips [SECONDS]', "Measure IPS in duration seconds (default: #{Benchmark::Driver::DEFAULT_IPS_DURATION})") do |i|
24
24
  options[:measure_type] = 'ips'
25
25
  options[:measure_num] = Integer(i) if i
26
26
  end
27
- o.on('-l [COUNT]', '--loop-count [COUNT]', "Measure execution time with loop count (default: #{BenchmarkDriver::DEFAULT_LOOP_COUNT})") do |l|
27
+ o.on('-l [COUNT]', '--loop-count [COUNT]', "Measure execution time with loop count (default: #{Benchmark::Driver::DEFAULT_LOOP_COUNT})") do |l|
28
28
  options[:measure_type] = 'loop_count'
29
29
  options[:measure_num] = Integer(l) if l
30
30
  end
@@ -34,7 +34,7 @@ args = OptionParser.new do |o|
34
34
  end.parse!(ARGV)
35
35
  abort "No YAML file is specified" if args.empty?
36
36
 
37
- driver = BenchmarkDriver.new(options)
37
+ driver = Benchmark::Driver.new(options)
38
38
  args.each do |yaml|
39
39
  default = { name: File.basename(yaml, '.*') }
40
40
  driver.run(default.merge(YAML.load(File.read(yaml))))
@@ -0,0 +1,273 @@
1
+ require 'benchmark'
2
+ require 'benchmark/driver/version'
3
+ require 'tempfile'
4
+
5
+ class Benchmark::Driver
6
+ MEASURE_TYPES = %w[loop_count ips]
7
+ DEFAULT_LOOP_COUNT = 100_000
8
+ DEFAULT_IPS_DURATION = 1
9
+
10
+ # @param [String] measure_type - "loop_count"|"ips"
11
+ # @param [Integer,nil] measure_num - Loop count for "loop_type", duration seconds for "ips"
12
+ # @param [Array<String>] execs - ["path1", "path2"] or `["ruby1::path1", "ruby2::path2"]`
13
+ # @param [Boolean] verbose
14
+ def initialize(measure_type: 'loop_count', measure_num: nil, execs: ['ruby'], verbose: false)
15
+ unless MEASURE_TYPES.include?(measure_type)
16
+ abort "unsupported measure type: #{measure_type.dump}"
17
+ end
18
+ @measure_type = measure_type
19
+ @measure_num = measure_num
20
+ @execs = execs.map do |exec|
21
+ name, path = exec.split('::', 2)
22
+ Executable.new(name, path || name)
23
+ end
24
+ @verbose = verbose
25
+ end
26
+
27
+ # @param [Hash] root_hash
28
+ def run(root_hash)
29
+ root = BenchmarkRoot.new(Hash[root_hash.map { |k, v| [k.to_sym, v] }])
30
+
31
+ results = root.benchmarks.map do |benchmark|
32
+ metrics_by_exec = {}
33
+ iterations = calc_iterations(@execs.first, benchmark)
34
+ @execs.each do |exec|
35
+ if @verbose
36
+ puts "--- Running #{benchmark.name.dump} with #{exec.name.dump} #{iterations} times ---"
37
+ puts "#{benchmark.benchmark_script(iterations)}\n"
38
+ end
39
+ elapsed_time = run_benchmark(exec, benchmark, iterations)
40
+ metrics_by_exec[exec] = BenchmarkMetrics.new(iterations, elapsed_time)
41
+ end
42
+ BenchmarkResult.new(benchmark.name, metrics_by_exec)
43
+ end
44
+ puts if @verbose
45
+
46
+ case @measure_type
47
+ when 'loop_count'
48
+ LoopCountReporter.report(@execs, results)
49
+ when 'ips'
50
+ IpsReporter.report(@execs, results)
51
+ else
52
+ raise "unexpected measure type: #{@measure_type.dump}"
53
+ end
54
+ end
55
+
56
+ private
57
+
58
+ # Estimate iterations to finish benchmark within `@duration`.
59
+ def calc_iterations(exec, benchmark)
60
+ case @measure_type
61
+ when 'loop_count'
62
+ @measure_num || benchmark.loop_count || DEFAULT_LOOP_COUNT
63
+ when 'ips'
64
+ # TODO: Change to try from 1, 10, 100 ...
65
+ base = 1000
66
+ time = run_benchmark(exec, benchmark, base)
67
+ duration = @measure_num || DEFAULT_IPS_DURATION
68
+ (duration / time * base).to_i
69
+ else
70
+ raise "unexpected measure type: #{@measure_type.dump}"
71
+ end
72
+ end
73
+
74
+ def run_benchmark(exec, benchmark, iterations)
75
+ # TODO: raise error if negative
76
+ measure_script(exec.path, benchmark.benchmark_script(iterations)) -
77
+ measure_script(exec.path, benchmark.overhead_script(iterations))
78
+ end
79
+
80
+ def measure_script(ruby, script)
81
+ Tempfile.create(File.basename(__FILE__)) do |f|
82
+ f.write(script)
83
+ f.close
84
+
85
+ cmd = "#{ruby} #{f.path}"
86
+ Benchmark.measure { system(cmd, out: File::NULL) }.real
87
+ end
88
+ end
89
+
90
+ class BenchmarkRoot
91
+ # @param [String] name
92
+ # @param [String] prelude
93
+ # @param [Integer,nil] loop_count
94
+ # @param [String,nil] benchmark - For running single instant benchmark
95
+ # @param [Array<Hash>] benchmarks - For running multiple benchmarks
96
+ def initialize(name:, prelude: '', loop_count: nil, benchmark: nil, benchmarks: [])
97
+ if benchmark
98
+ unless benchmarks.empty?
99
+ raise ArgumentError.new("Only either :benchmark or :benchmarks can be specified")
100
+ end
101
+ @benchmarks = [BenchmarkScript.new(name: name, prelude: prelude, benchmark: benchmark)]
102
+ else
103
+ @benchmarks = benchmarks.map do |hash|
104
+ BenchmarkScript.new(Hash[hash.map { |k, v| [k.to_sym, v] }]).tap do |b|
105
+ b.inherit_root(prelude: prelude, loop_count: loop_count)
106
+ end
107
+ end
108
+ end
109
+ end
110
+
111
+ # @return [Array<BenchmarkScript>]
112
+ attr_reader :benchmarks
113
+ end
114
+
115
+ class BenchmarkScript
116
+ # @param [String] name
117
+ # @param [String] prelude
118
+ # @param [String] benchmark
119
+ def initialize(name:, prelude: '', loop_count: nil, benchmark:)
120
+ @name = name
121
+ @prelude = prelude
122
+ @loop_count = loop_count
123
+ @benchmark = benchmark
124
+ end
125
+
126
+ # @return [String]
127
+ attr_reader :name
128
+
129
+ # @return [Integer]
130
+ attr_reader :loop_count
131
+
132
+ def inherit_root(prelude:, loop_count:)
133
+ @prelude = "#{prelude}\n#{@prelude}"
134
+ if @loop_count.nil? && loop_count
135
+ @loop_count = loop_count
136
+ end
137
+ end
138
+
139
+ def overhead_script(iterations)
140
+ <<-RUBY
141
+ #{@prelude}
142
+ __benchmark_driver_i = 0
143
+ while __benchmark_driver_i < #{iterations}
144
+ __benchmark_driver_i += 1
145
+ end
146
+ RUBY
147
+ end
148
+
149
+ def benchmark_script(iterations)
150
+ <<-RUBY
151
+ #{@prelude}
152
+ __benchmark_driver_i = 0
153
+ while __benchmark_driver_i < #{iterations}
154
+ __benchmark_driver_i += 1
155
+ #{@benchmark}
156
+ end
157
+ RUBY
158
+ end
159
+ end
160
+
161
+ class BenchmarkResult < Struct.new(
162
+ :name, # @param [String]
163
+ :metrics_by_exec, # @param [Hash{ Executable => BenchmarkMetrics }]
164
+ )
165
+ def iterations_of(exec)
166
+ metrics_by_exec.fetch(exec).iterations
167
+ end
168
+
169
+ def elapsed_time_of(exec)
170
+ metrics_by_exec.fetch(exec).elapsed_time
171
+ end
172
+
173
+ def ips_of(exec)
174
+ iterations_of(exec) / elapsed_time_of(exec)
175
+ end
176
+ end
177
+
178
+ class BenchmarkMetrics < Struct.new(
179
+ :iterations, # @param [Integer]
180
+ :elapsed_time, # @param [Float] - Elapsed time in seconds
181
+ )
182
+ end
183
+
184
+ class Executable < Struct.new(
185
+ :name, # @param [String]
186
+ :path, # @param [String]
187
+ )
188
+ end
189
+
190
+ module LoopCountReporter
191
+ class << self
192
+ # @param [Array<Executable>] execs
193
+ # @param [Array<BenchmarkResult>] results
194
+ def report(execs, results)
195
+ puts "benchmark results:"
196
+ puts "Execution time (sec)"
197
+ puts "#{'%-16s' % 'name'} #{execs.map { |e| "%-8s" % e.name }.join(' ')}"
198
+
199
+ results.each do |result|
200
+ print '%-16s ' % result.name
201
+ puts execs.map { |exec|
202
+ "%-8s" % ("%.3f" % result.elapsed_time_of(exec))
203
+ }.join(' ')
204
+ end
205
+ puts
206
+
207
+ if execs.size > 1
208
+ report_speedup(execs, results)
209
+ end
210
+ end
211
+
212
+ private
213
+
214
+ def report_speedup(execs, results)
215
+ compared = execs.first
216
+ rest = execs - [compared]
217
+
218
+ puts "Speedup ratio: compare with the result of `#{compared.name}' (greater is better)"
219
+ puts "#{'%-16s' % 'name'} #{rest.map { |e| "%-8s" % e.name }.join(' ')}"
220
+ results.each do |result|
221
+ print '%-16s ' % result.name
222
+ puts rest.map { |exec|
223
+ "%-8s" % ("%.3f" % (result.ips_of(exec) / result.ips_of(compared)))
224
+ }.join(' ')
225
+ end
226
+ puts
227
+ end
228
+ end
229
+ end
230
+
231
+ module IpsReporter
232
+ class << self
233
+ # @param [Array<Executable>] execs
234
+ # @param [Array<BenchmarkResult>] results
235
+ def report(execs, results)
236
+ puts "Result -------------------------------------------"
237
+ puts "#{' ' * 16} #{execs.map { |e| "%13s" % e.name }.join(' ')}"
238
+
239
+ results.each do |result|
240
+ print '%16s ' % result.name
241
+ puts execs.map { |exec|
242
+ "%13s" % ("%.1f i/s" % result.ips_of(exec))
243
+ }.join(' ')
244
+ end
245
+ puts
246
+
247
+ if execs.size > 1
248
+ compare(execs, results)
249
+ end
250
+ end
251
+
252
+ private
253
+
254
+ def compare(execs, results)
255
+ results.each do |result|
256
+ puts "Comparison: #{result.name}"
257
+
258
+ sorted = execs.sort_by { |e| -result.ips_of(e) }
259
+ first = sorted.first
260
+
261
+ sorted.each do |exec|
262
+ if exec == first
263
+ puts "%16s: %12s i/s" % [first.name, "%.1f" % result.ips_of(first)]
264
+ else
265
+ puts "%16s: %12s i/s - %.2fx slower" % [exec.name, "%.1f" % result.ips_of(exec), result.ips_of(first) / result.ips_of(exec)]
266
+ end
267
+ end
268
+ puts
269
+ end
270
+ end
271
+ end
272
+ end
273
+ end
@@ -0,0 +1,5 @@
1
+ module Benchmark
2
+ class Driver
3
+ VERSION = '0.3.0'
4
+ end
5
+ end
@@ -1,273 +1 @@
1
- require 'benchmark_driver/version'
2
- require 'benchmark'
3
- require 'tempfile'
4
-
5
- class BenchmarkDriver
6
- MEASURE_TYPES = %w[loop_count ips]
7
- DEFAULT_LOOP_COUNT = 100_000
8
- DEFAULT_IPS_DURATION = 1
9
-
10
- # @param [String] measure_type - "loop_count"|"ips"
11
- # @param [Integer,nil] measure_num - Loop count for "loop_type", duration seconds for "ips"
12
- # @param [Array<String>] execs - ["path1", "path2"] or `["ruby1::path1", "ruby2::path2"]`
13
- # @param [Boolean] verbose
14
- def initialize(measure_type: 'loop_count', measure_num: nil, execs: ['ruby'], verbose: false)
15
- unless MEASURE_TYPES.include?(measure_type)
16
- abort "unsupported measure type: #{measure_type.dump}"
17
- end
18
- @measure_type = measure_type
19
- @measure_num = measure_num
20
- @execs = execs.map do |exec|
21
- name, path = exec.split('::', 2)
22
- Executable.new(name, path || name)
23
- end
24
- @verbose = verbose
25
- end
26
-
27
- # @param [Hash] root_hash
28
- def run(root_hash)
29
- root = BenchmarkRoot.new(Hash[root_hash.map { |k, v| [k.to_sym, v] }])
30
-
31
- results = root.benchmarks.map do |benchmark|
32
- metrics_by_exec = {}
33
- iterations = calc_iterations(@execs.first, benchmark)
34
- @execs.each do |exec|
35
- if @verbose
36
- puts "--- Running #{benchmark.name.dump} with #{exec.name.dump} #{iterations} times ---"
37
- puts "#{benchmark.benchmark_script(iterations)}\n"
38
- end
39
- elapsed_time = run_benchmark(exec, benchmark, iterations)
40
- metrics_by_exec[exec] = BenchmarkMetrics.new(iterations, elapsed_time)
41
- end
42
- BenchmarkResult.new(benchmark.name, metrics_by_exec)
43
- end
44
- puts if @verbose
45
-
46
- case @measure_type
47
- when 'loop_count'
48
- LoopCountReporter.report(@execs, results)
49
- when 'ips'
50
- IpsReporter.report(@execs, results)
51
- else
52
- raise "unexpected measure type: #{@measure_type.dump}"
53
- end
54
- end
55
-
56
- private
57
-
58
- # Estimate iterations to finish benchmark within `@duration`.
59
- def calc_iterations(exec, benchmark)
60
- case @measure_type
61
- when 'loop_count'
62
- @measure_num || benchmark.loop_count || DEFAULT_LOOP_COUNT
63
- when 'ips'
64
- # TODO: Change to try from 1, 10, 100 ...
65
- base = 1000
66
- time = run_benchmark(exec, benchmark, base)
67
- duration = @measure_num || DEFAULT_IPS_DURATION
68
- (duration / time * base).to_i
69
- else
70
- raise "unexpected measure type: #{@measure_type.dump}"
71
- end
72
- end
73
-
74
- def run_benchmark(exec, benchmark, iterations)
75
- # TODO: raise error if negative
76
- measure_script(exec.path, benchmark.benchmark_script(iterations)) -
77
- measure_script(exec.path, benchmark.overhead_script(iterations))
78
- end
79
-
80
- def measure_script(ruby, script)
81
- Tempfile.create(File.basename(__FILE__)) do |f|
82
- f.write(script)
83
- f.close
84
-
85
- cmd = "#{ruby} #{f.path}"
86
- Benchmark.measure { system(cmd, out: File::NULL) }.real
87
- end
88
- end
89
-
90
- class BenchmarkRoot
91
- # @param [String] name
92
- # @param [String] prelude
93
- # @param [Integer,nil] loop_count
94
- # @param [String,nil] benchmark - For running single instant benchmark
95
- # @param [Array<Hash>] benchmarks - For running multiple benchmarks
96
- def initialize(name:, prelude: '', loop_count: nil, benchmark: nil, benchmarks: [])
97
- if benchmark
98
- unless benchmarks.empty?
99
- raise ArgumentError.new("Only either :benchmark or :benchmarks can be specified")
100
- end
101
- @benchmarks = [BenchmarkScript.new(name: name, prelude: prelude, benchmark: benchmark)]
102
- else
103
- @benchmarks = benchmarks.map do |hash|
104
- BenchmarkScript.new(Hash[hash.map { |k, v| [k.to_sym, v] }]).tap do |b|
105
- b.inherit_root(prelude: prelude, loop_count: loop_count)
106
- end
107
- end
108
- end
109
- end
110
-
111
- # @return [Array<BenchmarkScript>]
112
- attr_reader :benchmarks
113
- end
114
-
115
- class BenchmarkScript
116
- # @param [String] name
117
- # @param [String] prelude
118
- # @param [String] benchmark
119
- def initialize(name:, prelude: '', loop_count: nil, benchmark:)
120
- @name = name
121
- @prelude = prelude
122
- @loop_count = loop_count
123
- @benchmark = benchmark
124
- end
125
-
126
- # @return [String]
127
- attr_reader :name
128
-
129
- # @return [Integer]
130
- attr_reader :loop_count
131
-
132
- def inherit_root(prelude:, loop_count:)
133
- @prelude = "#{prelude}\n#{@prelude}"
134
- if @loop_count.nil? && loop_count
135
- @loop_count = loop_count
136
- end
137
- end
138
-
139
- def overhead_script(iterations)
140
- <<-RUBY
141
- #{@prelude}
142
- __benchmark_driver_i = 0
143
- while __benchmark_driver_i < #{iterations}
144
- __benchmark_driver_i += 1
145
- end
146
- RUBY
147
- end
148
-
149
- def benchmark_script(iterations)
150
- <<-RUBY
151
- #{@prelude}
152
- __benchmark_driver_i = 0
153
- while __benchmark_driver_i < #{iterations}
154
- __benchmark_driver_i += 1
155
- #{@benchmark}
156
- end
157
- RUBY
158
- end
159
- end
160
-
161
- class BenchmarkResult < Struct.new(
162
- :name, # @param [String]
163
- :metrics_by_exec, # @param [Hash{ Executable => BenchmarkMetrics }]
164
- )
165
- def iterations_of(exec)
166
- metrics_by_exec.fetch(exec).iterations
167
- end
168
-
169
- def elapsed_time_of(exec)
170
- metrics_by_exec.fetch(exec).elapsed_time
171
- end
172
-
173
- def ips_of(exec)
174
- iterations_of(exec) / elapsed_time_of(exec)
175
- end
176
- end
177
-
178
- class BenchmarkMetrics < Struct.new(
179
- :iterations, # @param [Integer]
180
- :elapsed_time, # @param [Float] - Elapsed time in seconds
181
- )
182
- end
183
-
184
- class Executable < Struct.new(
185
- :name, # @param [String]
186
- :path, # @param [String]
187
- )
188
- end
189
-
190
- module LoopCountReporter
191
- class << self
192
- # @param [Array<Executable>] execs
193
- # @param [Array<BenchmarkResult>] results
194
- def report(execs, results)
195
- puts "benchmark results:"
196
- puts "Execution time (sec)"
197
- puts "#{'%-16s' % 'name'} #{execs.map { |e| "%-8s" % e.name }.join(' ')}"
198
-
199
- results.each do |result|
200
- print '%-16s ' % result.name
201
- puts execs.map { |exec|
202
- "%-8s" % ("%.3f" % result.elapsed_time_of(exec))
203
- }.join(' ')
204
- end
205
- puts
206
-
207
- if execs.size > 1
208
- report_speedup(execs, results)
209
- end
210
- end
211
-
212
- private
213
-
214
- def report_speedup(execs, results)
215
- compared = execs.first
216
- rest = execs - [compared]
217
-
218
- puts "Speedup ratio: compare with the result of `#{compared.name}' (greater is better)"
219
- puts "#{'%-16s' % 'name'} #{rest.map { |e| "%-8s" % e.name }.join(' ')}"
220
- results.each do |result|
221
- print '%-16s ' % result.name
222
- puts rest.map { |exec|
223
- "%-8s" % ("%.3f" % (result.ips_of(exec) / result.ips_of(compared)))
224
- }.join(' ')
225
- end
226
- puts
227
- end
228
- end
229
- end
230
-
231
- module IpsReporter
232
- class << self
233
- # @param [Array<Executable>] execs
234
- # @param [Array<BenchmarkResult>] results
235
- def report(execs, results)
236
- puts "Result -------------------------------------------"
237
- puts "#{' ' * 16} #{execs.map { |e| "%13s" % e.name }.join(' ')}"
238
-
239
- results.each do |result|
240
- print '%16s ' % result.name
241
- puts execs.map { |exec|
242
- "%13s" % ("%.1f i/s" % result.ips_of(exec))
243
- }.join(' ')
244
- end
245
- puts
246
-
247
- if execs.size > 1
248
- compare(execs, results)
249
- end
250
- end
251
-
252
- private
253
-
254
- def compare(execs, results)
255
- results.each do |result|
256
- puts "Comparison: #{result.name}"
257
-
258
- sorted = execs.sort_by { |e| -result.ips_of(e) }
259
- first = sorted.first
260
-
261
- sorted.each do |exec|
262
- if exec == first
263
- puts "%16s: %12s i/s" % [first.name, "%.1f" % result.ips_of(first)]
264
- else
265
- puts "%16s: %12s i/s - %.2fx slower" % [exec.name, "%.1f" % result.ips_of(exec), result.ips_of(first) / result.ips_of(exec)]
266
- end
267
- end
268
- puts
269
- end
270
- end
271
- end
272
- end
273
- end
1
+ require 'benchmark/driver'
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: benchmark_driver
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.4
4
+ version: 0.3.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Takashi Kokubun
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2017-10-25 00:00:00.000000000 Z
11
+ date: 2017-11-22 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -42,7 +42,7 @@ description: Benchmark driver for different Ruby executables
42
42
  email:
43
43
  - takashikkbn@gmail.com
44
44
  executables:
45
- - benchmark_driver
45
+ - benchmark-driver
46
46
  extensions: []
47
47
  extra_rdoc_files: []
48
48
  files:
@@ -59,9 +59,10 @@ files:
59
59
  - benchmarks/lib/erb.yml
60
60
  - bin/console
61
61
  - bin/setup
62
- - exe/benchmark_driver
62
+ - exe/benchmark-driver
63
+ - lib/benchmark/driver.rb
64
+ - lib/benchmark/driver/version.rb
63
65
  - lib/benchmark_driver.rb
64
- - lib/benchmark_driver/version.rb
65
66
  homepage: https://github.com/k0kubun/benchmark_driver
66
67
  licenses:
67
68
  - MIT
@@ -82,7 +83,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
82
83
  version: '0'
83
84
  requirements: []
84
85
  rubyforge_project:
85
- rubygems_version: 2.6.11
86
+ rubygems_version: 2.7.2
86
87
  signing_key:
87
88
  specification_version: 4
88
89
  summary: Benchmark driver for different Ruby executables
@@ -1,3 +0,0 @@
1
- class BenchmarkDriver
2
- VERSION = '0.2.4'
3
- end