benchmark_driver 0.3.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. checksums.yaml +5 -5
  2. data/.gitignore +0 -4
  3. data/.travis.yml +10 -6
  4. data/Gemfile +7 -2
  5. data/Gemfile.lock +30 -0
  6. data/README.md +125 -117
  7. data/Rakefile +14 -7
  8. data/benchmark_driver.gemspec +2 -4
  9. data/bin/console +1 -1
  10. data/examples/call.rb +12 -0
  11. data/examples/call_blank.rb +13 -0
  12. data/examples/call_erb.rb +33 -0
  13. data/examples/call_interpolation.rb +13 -0
  14. data/examples/exec_blank.rb +14 -0
  15. data/examples/exec_interpolation.rb +15 -0
  16. data/examples/yaml/array_duration_time.yml +3 -0
  17. data/examples/yaml/array_loop.yml +3 -0
  18. data/examples/yaml/array_loop_memory.yml +6 -0
  19. data/examples/yaml/array_loop_time.yml +4 -0
  20. data/examples/yaml/blank_hash.yml +8 -0
  21. data/examples/yaml/blank_hash_array.yml +10 -0
  22. data/examples/yaml/blank_loop.yml +9 -0
  23. data/examples/yaml/blank_loop_time.yml +10 -0
  24. data/examples/yaml/blank_string.yml +6 -0
  25. data/examples/yaml/blank_string_array.yml +8 -0
  26. data/examples/yaml/example_multi.yml +6 -0
  27. data/{benchmarks → examples/yaml}/example_single.yml +0 -0
  28. data/exe/benchmark-driver +44 -18
  29. data/lib/benchmark/driver.rb +52 -257
  30. data/lib/benchmark/driver/benchmark_result.rb +21 -0
  31. data/lib/benchmark/driver/configuration.rb +65 -0
  32. data/lib/benchmark/driver/duration_runner.rb +24 -0
  33. data/lib/benchmark/driver/error.rb +16 -0
  34. data/lib/benchmark/driver/repeatable_runner.rb +18 -0
  35. data/lib/benchmark/driver/ruby_dsl_parser.rb +57 -0
  36. data/lib/benchmark/driver/time.rb +12 -0
  37. data/lib/benchmark/driver/version.rb +2 -2
  38. data/lib/benchmark/driver/yaml_parser.rb +103 -0
  39. data/lib/benchmark/output.rb +16 -0
  40. data/lib/benchmark/output/ips.rb +114 -0
  41. data/lib/benchmark/output/memory.rb +57 -0
  42. data/lib/benchmark/output/time.rb +57 -0
  43. data/lib/benchmark/runner.rb +13 -0
  44. data/lib/benchmark/runner/call.rb +97 -0
  45. data/lib/benchmark/runner/exec.rb +190 -0
  46. metadata +40 -10
  47. data/benchmarks/core/array.yml +0 -4
  48. data/benchmarks/example_multi.yml +0 -10
  49. data/benchmarks/lib/erb.yml +0 -30
data/examples/call.rb ADDED
@@ -0,0 +1,12 @@
1
+ require 'benchmark/driver'
2
+
3
+ Benchmark.driver do |x|
4
+ large_a = "Hellooooooooooooooooooooooooooooooooooooooooooooooooooo"
5
+ large_b = "Wooooooooooooooooooooooooooooooooooooooooooooooooooorld"
6
+
7
+ small_a = "Hello"
8
+ small_b = "World"
9
+
10
+ x.report('large') { "#{large_a}, #{large_b}!" }
11
+ x.report('small') { "#{small_a}, #{small_b}!" }
12
+ end
@@ -0,0 +1,13 @@
1
+ require 'benchmark/driver'
2
+
3
+ class Array
4
+ alias_method :blank?, :empty?
5
+ end
6
+
7
+ Benchmark.driver(runner: :call) do |x|
8
+ array = []
9
+
10
+ x.report('array.empty?') { array.empty? }
11
+ x.report('array.blank?') { array.blank? }
12
+ x.compare!
13
+ end
@@ -0,0 +1,33 @@
1
+ require 'benchmark/driver'
2
+ require 'erb'
3
+ require 'erubi'
4
+ require 'erubis'
5
+
6
+ data = DATA.read
7
+
8
+ mod = Module.new
9
+ mod.instance_eval("def self.erb(title, content); #{ERB.new(data).src}; end", "(ERB)")
10
+ mod.instance_eval("def self.erubis(title, content); #{Erubi::Engine.new(data).src}; end", "(Erubi)")
11
+ mod.instance_eval("def self.erubi(title, content); #{Erubis::Eruby.new(data).src}; end", "(Erubis)")
12
+
13
+ title = "hello world!"
14
+ content = "hello world!\n" * 10
15
+
16
+ Benchmark.driver do |x|
17
+ x.report("ERB #{RUBY_VERSION}") { mod.erb(title, content) }
18
+ x.report("Erubis #{Erubis::VERSION}") { mod.erubis(title, content) }
19
+ x.report("Erubi #{Erubi::VERSION}") { mod.erubi(title, content) }
20
+ x.compare!
21
+ end
22
+
23
+ __END__
24
+
25
+ <html>
26
+ <head> <%= title %> </head>
27
+ <body>
28
+ <h1> <%= title %> </h1>
29
+ <p>
30
+ <%= content %>
31
+ </p>
32
+ </body>
33
+ </html>
@@ -0,0 +1,13 @@
1
+ require 'benchmark/driver'
2
+
3
+ Benchmark.driver do |x|
4
+ large_a = "Hellooooooooooooooooooooooooooooooooooooooooooooooooooo"
5
+ large_b = "Wooooooooooooooooooooooooooooooooooooooooooooooooooorld"
6
+
7
+ small_a = "Hello"
8
+ small_b = "World"
9
+
10
+ x.report('large') { "#{large_a}, #{large_b}!" }
11
+ x.report('small') { "#{small_a}, #{small_b}!" }
12
+ x.compare!
13
+ end
@@ -0,0 +1,14 @@
1
+ require 'benchmark/driver'
2
+
3
+ Benchmark.driver(runner: :exec) do |x|
4
+ x.prelude = <<-EOS
5
+ class Array
6
+ alias_method :blank?, :empty?
7
+ end
8
+ array = []
9
+ EOS
10
+
11
+ x.report(script: 'array.empty?')
12
+ x.report(script: 'array.blank?')
13
+ x.compare!
14
+ end
@@ -0,0 +1,15 @@
1
+ require 'benchmark/driver'
2
+
3
+ Benchmark.driver(runner: :exec) do |x|
4
+ x.prelude = <<-EOS
5
+ large_a = "Hellooooooooooooooooooooooooooooooooooooooooooooooooooo"
6
+ large_b = "Wooooooooooooooooooooooooooooooooooooooooooooooooooorld"
7
+
8
+ small_a = "Hello"
9
+ small_b = "World"
10
+ EOS
11
+
12
+ x.report('large', script: '"#{large_a}, #{large_b}!"')
13
+ x.report('small', script: '"#{small_a}, #{small_b}!"')
14
+ x.compare!
15
+ end
@@ -0,0 +1,3 @@
1
+ output: time
2
+ benchmark:
3
+ bm_vm2_array: a = [1,2,3,4,5,6,7,8,9,10]
@@ -0,0 +1,3 @@
1
+ loop_count: 6000000
2
+ benchmark:
3
+ bm_vm2_array: a = [1,2,3,4,5,6,7,8,9,10]
@@ -0,0 +1,6 @@
1
+ output: memory
2
+ loop_count: 6000000
3
+ benchmark:
4
+ array10: a = [1,2,3,4,5,6,7,8,9,10]
5
+ array100: a = [1,2,3,4,5,6,7,8,9,10] * 10
6
+ array1000: a = [1,2,3,4,5,6,7,8,9,10] * 100
@@ -0,0 +1,4 @@
1
+ output: time
2
+ loop_count: 6000000
3
+ benchmark:
4
+ bm_vm2_array: a = [1,2,3,4,5,6,7,8,9,10]
@@ -0,0 +1,8 @@
1
+ prelude: |
2
+ class Array
3
+ alias_method :blank?, :empty?
4
+ end
5
+ array = []
6
+ benchmark:
7
+ empty: array.empty?
8
+ blank: array.blank?
@@ -0,0 +1,10 @@
1
+ prelude: |
2
+ class Array
3
+ alias_method :blank?, :empty?
4
+ end
5
+ array = []
6
+ benchmark:
7
+ - name: Array#empty?
8
+ script: array.empty?
9
+ - name: Array#blank?
10
+ script: array.blank?
@@ -0,0 +1,9 @@
1
+ loop_count: 20000000
2
+ prelude: |
3
+ class Array
4
+ alias_method :blank?, :empty?
5
+ end
6
+ array = []
7
+ benchmark:
8
+ empty: array.empty?
9
+ blank: array.blank?
@@ -0,0 +1,10 @@
1
+ output: time
2
+ loop_count: 20000000
3
+ prelude: |
4
+ class Array
5
+ alias_method :blank?, :empty?
6
+ end
7
+ array = []
8
+ benchmark:
9
+ empty: array.empty?
10
+ blank: array.blank?
@@ -0,0 +1,6 @@
1
+ prelude: |
2
+ class Array
3
+ alias_method :blank?, :empty?
4
+ end
5
+ array = []
6
+ benchmark: array.blank?
@@ -0,0 +1,8 @@
1
+ prelude: |
2
+ class Array
3
+ alias_method :blank?, :empty?
4
+ end
5
+ array = []
6
+ benchmark:
7
+ - array.empty?
8
+ - array.blank?
@@ -0,0 +1,6 @@
1
+ prelude: |
2
+ a = 'a' * 100
3
+ b = 'b' * 100
4
+ benchmark:
5
+ join: '[a, b].join'
6
+ str-interp: '"#{a}#{b}"'
File without changes
data/exe/benchmark-driver CHANGED
@@ -1,7 +1,8 @@
1
1
  #!/usr/bin/env ruby
2
2
  $:.unshift File.expand_path('../lib', __dir__)
3
3
 
4
- require 'benchmark_driver'
4
+ require 'benchmark/driver'
5
+ require 'benchmark/driver/yaml_parser'
5
6
  require 'optparse'
6
7
  require 'yaml'
7
8
 
@@ -10,32 +11,57 @@ args = OptionParser.new do |o|
10
11
  o.banner = "Usage: #{File.basename($0, '.*')} [options] [YAML]"
11
12
  o.on('-e', '--executables [EXECS]', 'Ruby executables (e1::path1; e2::path2; e3::path3;...)') do |e|
12
13
  options[:execs] ||= []
13
- e.split(/;/).each do |path|
14
- options[:execs] << path
14
+ e.split(';').each do |name_path|
15
+ name, path = name_path.split('::', 2)
16
+ options[:execs] << Benchmark::Driver::Configuration::Executable.new(name, path || name)
15
17
  end
16
18
  end
17
- o.on('-r', '--rbenv [VERSION]', 'Ruby executable in rbenv') do |r|
19
+ o.on('--rbenv [VERSIONS]', 'Ruby executables in rbenv (2.3.5;2.4.2;...)') do |r|
18
20
  options[:execs] ||= []
19
- r.split(/;/).each do |version|
20
- options[:execs] << "#{version}::#{`RBENV_VERSION='#{version}' rbenv which ruby`.rstrip}"
21
+ r.split(';').each do |version|
22
+ path = `RBENV_VERSION='#{version}' rbenv which ruby`.rstrip
23
+ abort "Failed to execute 'rbenv which ruby'" unless $?.success?
24
+ options[:execs] << Benchmark::Driver::Configuration::Executable.new(version, path)
21
25
  end
22
26
  end
23
- o.on('-i [DURATION]', '--ips [SECONDS]', "Measure IPS in duration seconds (default: #{Benchmark::Driver::DEFAULT_IPS_DURATION})") do |i|
24
- options[:measure_type] = 'ips'
25
- options[:measure_num] = Integer(i) if i
27
+ o.on('-c', '--compare', 'Compare results (currently only supported in ips output)') do |v|
28
+ options[:compare] = v
26
29
  end
27
- o.on('-l [COUNT]', '--loop-count [COUNT]', "Measure execution time with loop count (default: #{Benchmark::Driver::DEFAULT_LOOP_COUNT})") do |l|
28
- options[:measure_type] = 'loop_count'
29
- options[:measure_num] = Integer(l) if l
30
- end
31
- o.on('-v', '--verbose') do |v|
32
- options[:verbose] = v
30
+ o.on('-r', '--repeat-count [NUM]', 'Try benchmark NUM times and use the fastest result') do |v|
31
+ begin
32
+ options[:repeat_count] = Integer(v)
33
+ rescue ArgumentError
34
+ abort "-r, --repeat-count must take Integer, but got #{v.inspect}"
35
+ end
33
36
  end
34
37
  end.parse!(ARGV)
35
38
  abort "No YAML file is specified" if args.empty?
36
39
 
37
- driver = Benchmark::Driver.new(options)
38
40
  args.each do |yaml|
39
- default = { name: File.basename(yaml, '.*') }
40
- driver.run(default.merge(YAML.load(File.read(yaml))))
41
+ yaml = YAML.load(File.read(yaml))
42
+ Benchmark::Driver::Configuration.symbolize_keys!(yaml)
43
+
44
+ begin
45
+ config = Benchmark::Driver::YamlParser.parse(yaml)
46
+ rescue ArgumentError
47
+ $stderr.puts "benchmark-driver: Failed to parse #{yaml.dump}."
48
+ $stderr.puts ' YAML format may be wrong. See error below:'
49
+ $stderr.puts
50
+ raise
51
+ end
52
+
53
+ options.each do |key, value|
54
+ case key
55
+ when :compare
56
+ config.output_options.compare = value
57
+ when :execs
58
+ config.runner_options.executables = value
59
+ when :repeat_count
60
+ config.runner_options.repeat_count = value
61
+ else
62
+ raise "Unhandled option: #{key.inspect}"
63
+ end
64
+ end
65
+
66
+ Benchmark::Driver.run(config)
41
67
  end
@@ -1,273 +1,68 @@
1
- require 'benchmark'
2
- require 'benchmark/driver/version'
3
- require 'tempfile'
4
-
5
- class Benchmark::Driver
6
- MEASURE_TYPES = %w[loop_count ips]
7
- DEFAULT_LOOP_COUNT = 100_000
8
- DEFAULT_IPS_DURATION = 1
9
-
10
- # @param [String] measure_type - "loop_count"|"ips"
11
- # @param [Integer,nil] measure_num - Loop count for "loop_type", duration seconds for "ips"
12
- # @param [Array<String>] execs - ["path1", "path2"] or `["ruby1::path1", "ruby2::path2"]`
13
- # @param [Boolean] verbose
14
- def initialize(measure_type: 'loop_count', measure_num: nil, execs: ['ruby'], verbose: false)
15
- unless MEASURE_TYPES.include?(measure_type)
16
- abort "unsupported measure type: #{measure_type.dump}"
17
- end
18
- @measure_type = measure_type
19
- @measure_num = measure_num
20
- @execs = execs.map do |exec|
21
- name, path = exec.split('::', 2)
22
- Executable.new(name, path || name)
23
- end
24
- @verbose = verbose
25
- end
26
-
27
- # @param [Hash] root_hash
28
- def run(root_hash)
29
- root = BenchmarkRoot.new(Hash[root_hash.map { |k, v| [k.to_sym, v] }])
30
-
31
- results = root.benchmarks.map do |benchmark|
32
- metrics_by_exec = {}
33
- iterations = calc_iterations(@execs.first, benchmark)
34
- @execs.each do |exec|
35
- if @verbose
36
- puts "--- Running #{benchmark.name.dump} with #{exec.name.dump} #{iterations} times ---"
37
- puts "#{benchmark.benchmark_script(iterations)}\n"
38
- end
39
- elapsed_time = run_benchmark(exec, benchmark, iterations)
40
- metrics_by_exec[exec] = BenchmarkMetrics.new(iterations, elapsed_time)
41
- end
42
- BenchmarkResult.new(benchmark.name, metrics_by_exec)
43
- end
44
- puts if @verbose
45
-
46
- case @measure_type
47
- when 'loop_count'
48
- LoopCountReporter.report(@execs, results)
49
- when 'ips'
50
- IpsReporter.report(@execs, results)
51
- else
52
- raise "unexpected measure type: #{@measure_type.dump}"
53
- end
54
- end
55
-
56
- private
57
-
58
- # Estimate iterations to finish benchmark within `@duration`.
59
- def calc_iterations(exec, benchmark)
60
- case @measure_type
61
- when 'loop_count'
62
- @measure_num || benchmark.loop_count || DEFAULT_LOOP_COUNT
63
- when 'ips'
64
- # TODO: Change to try from 1, 10, 100 ...
65
- base = 1000
66
- time = run_benchmark(exec, benchmark, base)
67
- duration = @measure_num || DEFAULT_IPS_DURATION
68
- (duration / time * base).to_i
69
- else
70
- raise "unexpected measure type: #{@measure_type.dump}"
71
- end
72
- end
73
-
74
- def run_benchmark(exec, benchmark, iterations)
75
- # TODO: raise error if negative
76
- measure_script(exec.path, benchmark.benchmark_script(iterations)) -
77
- measure_script(exec.path, benchmark.overhead_script(iterations))
78
- end
79
-
80
- def measure_script(ruby, script)
81
- Tempfile.create(File.basename(__FILE__)) do |f|
82
- f.write(script)
83
- f.close
84
-
85
- cmd = "#{ruby} #{f.path}"
86
- Benchmark.measure { system(cmd, out: File::NULL) }.real
87
- end
88
- end
89
-
90
- class BenchmarkRoot
91
- # @param [String] name
92
- # @param [String] prelude
93
- # @param [Integer,nil] loop_count
94
- # @param [String,nil] benchmark - For running single instant benchmark
95
- # @param [Array<Hash>] benchmarks - For running multiple benchmarks
96
- def initialize(name:, prelude: '', loop_count: nil, benchmark: nil, benchmarks: [])
97
- if benchmark
98
- unless benchmarks.empty?
99
- raise ArgumentError.new("Only either :benchmark or :benchmarks can be specified")
100
- end
101
- @benchmarks = [BenchmarkScript.new(name: name, prelude: prelude, benchmark: benchmark)]
102
- else
103
- @benchmarks = benchmarks.map do |hash|
104
- BenchmarkScript.new(Hash[hash.map { |k, v| [k.to_sym, v] }]).tap do |b|
105
- b.inherit_root(prelude: prelude, loop_count: loop_count)
106
- end
107
- end
108
- end
109
- end
110
-
111
- # @return [Array<BenchmarkScript>]
112
- attr_reader :benchmarks
113
- end
114
-
115
- class BenchmarkScript
116
- # @param [String] name
117
- # @param [String] prelude
118
- # @param [String] benchmark
119
- def initialize(name:, prelude: '', loop_count: nil, benchmark:)
120
- @name = name
121
- @prelude = prelude
122
- @loop_count = loop_count
123
- @benchmark = benchmark
124
- end
125
-
126
- # @return [String]
127
- attr_reader :name
128
-
129
- # @return [Integer]
130
- attr_reader :loop_count
131
-
132
- def inherit_root(prelude:, loop_count:)
133
- @prelude = "#{prelude}\n#{@prelude}"
134
- if @loop_count.nil? && loop_count
135
- @loop_count = loop_count
136
- end
137
- end
138
-
139
- def overhead_script(iterations)
140
- <<-RUBY
141
- #{@prelude}
142
- __benchmark_driver_i = 0
143
- while __benchmark_driver_i < #{iterations}
144
- __benchmark_driver_i += 1
145
- end
146
- RUBY
147
- end
148
-
149
- def benchmark_script(iterations)
150
- <<-RUBY
151
- #{@prelude}
152
- __benchmark_driver_i = 0
153
- while __benchmark_driver_i < #{iterations}
154
- __benchmark_driver_i += 1
155
- #{@benchmark}
156
- end
157
- RUBY
158
- end
159
- end
160
-
161
- class BenchmarkResult < Struct.new(
162
- :name, # @param [String]
163
- :metrics_by_exec, # @param [Hash{ Executable => BenchmarkMetrics }]
164
- )
165
- def iterations_of(exec)
166
- metrics_by_exec.fetch(exec).iterations
167
- end
168
-
169
- def elapsed_time_of(exec)
170
- metrics_by_exec.fetch(exec).elapsed_time
171
- end
172
-
173
- def ips_of(exec)
174
- iterations_of(exec) / elapsed_time_of(exec)
175
- end
176
- end
177
-
178
- class BenchmarkMetrics < Struct.new(
179
- :iterations, # @param [Integer]
180
- :elapsed_time, # @param [Float] - Elapsed time in seconds
181
- )
182
- end
183
-
184
- class Executable < Struct.new(
185
- :name, # @param [String]
186
- :path, # @param [String]
187
- )
188
- end
189
-
190
- module LoopCountReporter
1
+ module Benchmark
2
+ module Driver
191
3
  class << self
192
- # @param [Array<Executable>] execs
193
- # @param [Array<BenchmarkResult>] results
194
- def report(execs, results)
195
- puts "benchmark results:"
196
- puts "Execution time (sec)"
197
- puts "#{'%-16s' % 'name'} #{execs.map { |e| "%-8s" % e.name }.join(' ')}"
198
-
199
- results.each do |result|
200
- print '%-16s ' % result.name
201
- puts execs.map { |exec|
202
- "%-8s" % ("%.3f" % result.elapsed_time_of(exec))
203
- }.join(' ')
4
+ # Main function which is used by both RubyDriver and YamlDriver.
5
+ # @param [Benchmark::Driver::Configuration] config
6
+ def run(config)
7
+ validate_config(config)
8
+
9
+ runner_class = Runner.find(config.runner_options.type)
10
+ output_class = Output.find(config.output_options.type)
11
+
12
+ missing_fields = output_class::REQUIRED_FIELDS - runner_class::SUPPORTED_FIELDS
13
+ unless missing_fields.empty?
14
+ raise ArgumentError.new(
15
+ "#{output_class.name} requires #{missing_fields.inspect} fields "\
16
+ "which are not supported by #{runner_class.name}. Try using another runner."
17
+ )
204
18
  end
205
- puts
206
19
 
207
- if execs.size > 1
208
- report_speedup(execs, results)
20
+ without_stdout_buffering do
21
+ runner = runner_class.new(
22
+ config.runner_options,
23
+ output: output_class.new(
24
+ jobs: config.jobs,
25
+ executables: config.runner_options.executables,
26
+ options: config.output_options,
27
+ ),
28
+ )
29
+ runner.run(config)
209
30
  end
31
+ rescue Benchmark::Driver::Error => e
32
+ $stderr.puts "\n\nFailed to execute benchmark!\n\n#{e.class.name}:\n #{e.message}"
33
+ exit 1
210
34
  end
211
35
 
212
36
  private
213
37
 
214
- def report_speedup(execs, results)
215
- compared = execs.first
216
- rest = execs - [compared]
38
+ def validate_config(config)
39
+ # TODO: make sure all scripts are the same class
40
+ end
217
41
 
218
- puts "Speedup ratio: compare with the result of `#{compared.name}' (greater is better)"
219
- puts "#{'%-16s' % 'name'} #{rest.map { |e| "%-8s" % e.name }.join(' ')}"
220
- results.each do |result|
221
- print '%-16s ' % result.name
222
- puts rest.map { |exec|
223
- "%-8s" % ("%.3f" % (result.ips_of(exec) / result.ips_of(compared)))
224
- }.join(' ')
225
- end
226
- puts
42
+ # benchmark_driver ouputs logs ASAP. This enables sync flag for it.
43
+ #
44
+ # Currently benchmark_driver supports only output to stdout.
45
+ # In future exetension, this may be included in Output plugins.
46
+ def without_stdout_buffering
47
+ sync, $stdout.sync = $stdout.sync, true
48
+ yield
49
+ ensure
50
+ $stdout.sync = sync
227
51
  end
228
52
  end
229
53
  end
230
54
 
231
- module IpsReporter
232
- class << self
233
- # @param [Array<Executable>] execs
234
- # @param [Array<BenchmarkResult>] results
235
- def report(execs, results)
236
- puts "Result -------------------------------------------"
237
- puts "#{' ' * 16} #{execs.map { |e| "%13s" % e.name }.join(' ')}"
238
-
239
- results.each do |result|
240
- print '%16s ' % result.name
241
- puts execs.map { |exec|
242
- "%13s" % ("%.1f i/s" % result.ips_of(exec))
243
- }.join(' ')
244
- end
245
- puts
246
-
247
- if execs.size > 1
248
- compare(execs, results)
249
- end
250
- end
251
-
252
- private
253
-
254
- def compare(execs, results)
255
- results.each do |result|
256
- puts "Comparison: #{result.name}"
55
+ # RubyDriver entrypoint.
56
+ def self.driver(*args, &block)
57
+ dsl = Driver::RubyDslParser.new(*args)
58
+ block.call(dsl)
257
59
 
258
- sorted = execs.sort_by { |e| -result.ips_of(e) }
259
- first = sorted.first
260
-
261
- sorted.each do |exec|
262
- if exec == first
263
- puts "%16s: %12s i/s" % [first.name, "%.1f" % result.ips_of(first)]
264
- else
265
- puts "%16s: %12s i/s - %.2fx slower" % [exec.name, "%.1f" % result.ips_of(exec), result.ips_of(first) / result.ips_of(exec)]
266
- end
267
- end
268
- puts
269
- end
270
- end
271
- end
60
+ Driver.run(dsl.configuration)
272
61
  end
273
62
  end
63
+
64
+ require 'benchmark/output'
65
+ require 'benchmark/runner'
66
+ require 'benchmark/driver/error'
67
+ require 'benchmark/driver/ruby_dsl_parser'
68
+ require 'benchmark/driver/version'