benchmark_driver 0.15.12 → 0.15.17

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 4c069979523cfd025fba37653a117db89a52062bfedd7fe3d2c8b8ef5eaab329
4
- data.tar.gz: 23f32d6a140120ccbddd2ce704590cde72496fc0e7834674fecc865d04e8c943
3
+ metadata.gz: cc6888fbbe3ec73fcb84874bbc31535a08ae07859cbc8b86c0dd53d2e33ed2b7
4
+ data.tar.gz: cc052011eb026f8dd4d250a191fcbe30bdb3024b2157659c80696649d0c58840
5
5
  SHA512:
6
- metadata.gz: d9d9a2cde658660d21dcf36a2ff872ac42c7a518aaf2a917744eacbcf8e2e95814ded6edb140a3b9d23273291850a2948cc47c09297d5b973b531da3a2582fd5
7
- data.tar.gz: 48a7c466c98a4304c3cc44bcfc7adf2d277c546e226cb3082999a1a384f038f969c7782b338b851bafd9c319d2d1aad4adca243b1c358346a2b79c1ca0ac0c78
6
+ metadata.gz: 2516d64721d7b53f1466468263464024ebcdcc9906787ff29b38a494e2f8ecab275ccad7fcdc4928c1482c43b4ec0195d2733b38476ca919ff95b3b304cc4174
7
+ data.tar.gz: e6e62c506e94fe8e2add9af0ea3bd6abd531299117d28e7a2b1cbfee1d56db309907d1787b6cb090bdd64cefe0206641ec753b15ceb0625ed118406e61af1d33
data/CHANGELOG.md CHANGED
@@ -1,3 +1,25 @@
1
+ # v0.15.17
2
+
3
+ - Jobs returned from job parsers are made mutable for plugins
4
+
5
+ # v0.15.16
6
+
7
+ - Add `--alternate` runner option to run executables alternately.
8
+ This is supported only for `ruby_stdout` runner for now.
9
+
10
+ # v0.15.15
11
+
12
+ - An absolute path is passed to `command_stdout`'s `working_directory`
13
+ and v0.15.14's relative path resolution of job type
14
+
15
+ # v0.15.14
16
+
17
+ - YAML's `type` key allows a value with `/` to specify a relative path of a runner plugin
18
+
19
+ # v0.15.13
20
+
21
+ - Show a command and stdout on `-vv` for `ips`, `time`, and `block` runner
22
+
1
23
  # v0.15.12
2
24
 
3
25
  - Show comparison on `--output=markdown` when `--output-compare` is also specified
data/exe/benchmark-driver CHANGED
@@ -67,6 +67,9 @@ config = BenchmarkDriver::Config.new.tap do |c|
67
67
  end
68
68
  c.repeat_result = v
69
69
  end
70
+ o.on('--alternate', 'Alternate executables instead of running the same executable in a row with --repeat-count') do |v|
71
+ c.alternate = v
72
+ end
70
73
  o.on('--bundler', 'Install and use gems specified in Gemfile') do |v|
71
74
  bundler = v
72
75
  end
@@ -124,7 +127,7 @@ jobs = config.paths.flat_map do |path|
124
127
 
125
128
  begin
126
129
  # `working_directory` is YAML-specific special parameter, mainly for "command_stdout"
127
- BenchmarkDriver::JobParser.parse(job, default_params: { working_directory: File.dirname(path) })
130
+ BenchmarkDriver::JobParser.parse(job, working_directory: File.expand_path(File.dirname(path)))
128
131
  rescue ArgumentError
129
132
  $stderr.puts "benchmark-driver: Failed to parse #{path.dump}."
130
133
  $stderr.puts ' YAML format may be wrong. See error below:'
@@ -13,6 +13,7 @@ module BenchmarkDriver
13
13
  :filters, # @param [Array<Regexp>]
14
14
  :repeat_count, # @param [Integer]
15
15
  :repeat_result, # @param [String]
16
+ :alternate, # @param [TrueClass,FalseClass]
16
17
  :run_duration, # @param [Float]
17
18
  :timeout, # @param [Float,nil]
18
19
  :verbose, # @param [Integer]
@@ -23,15 +24,17 @@ module BenchmarkDriver
23
24
  filters: [],
24
25
  repeat_count: 1,
25
26
  repeat_result: 'best',
27
+ alternate: false,
26
28
  run_duration: 3.0,
27
29
  verbose: 0,
28
30
  },
29
31
  )
30
32
 
31
- # Subset of FullConfig passed to JobRunner
33
+ # Subset of Config passed to JobRunner
32
34
  Config::RunnerConfig = ::BenchmarkDriver::Struct.new(
33
35
  :repeat_count, # @param [Integer]
34
36
  :repeat_result, # @param [String]
37
+ :alternate, # @param [TrueClass,FalseClass]
35
38
  :run_duration, # @param [Float]
36
39
  :timeout, # @param [Float,nil]
37
40
  :verbose, # @param [Integer]
@@ -26,7 +26,7 @@ module BenchmarkDriver
26
26
  job.teardown.prepend("#{teardown}\n") if teardown
27
27
  job.loop_count ||= loop_count
28
28
  job.required_ruby_version ||= required_ruby_version
29
- end.each(&:freeze)
29
+ end
30
30
  end
31
31
 
32
32
  private
@@ -3,24 +3,27 @@ require 'benchmark_driver/runner'
3
3
  module BenchmarkDriver
4
4
  class << JobParser = Module.new
5
5
  # @param [Hash] config
6
- # @param [Hash] default_params - Special default values not written in job definition
7
- def parse(config, default_params: {})
6
+ # @param [Hash] working_directory - YAML-specific special parameter for "command_stdout" and a relative path in type
7
+ def parse(config, working_directory: nil)
8
8
  config = symbolize_keys(config)
9
9
  type = config.fetch(:type)
10
10
  if !type.is_a?(String)
11
11
  raise ArgumentError.new("Invalid type: #{config[:type].inspect} (expected String)")
12
- elsif !type.match(/\A[A-Za-z0-9_]+\z/)
13
- raise ArgumentError.new("Invalid type: #{config[:type].inspect} (expected to include only [A-Za-z0-9_])")
12
+ elsif !type.match(/\A[A-Za-z0-9_\/]+\z/)
13
+ raise ArgumentError.new("Invalid type: #{config[:type].inspect} (expected to include only [A-Za-z0-9_\/])")
14
14
  end
15
15
  config.delete(:type)
16
16
 
17
17
  # Dynamic dispatch for plugin support
18
- require "benchmark_driver/runner/#{type}"
18
+ if type.include?('/')
19
+ require File.join(working_directory || '.', type)
20
+ type = File.basename(type)
21
+ else
22
+ require "benchmark_driver/runner/#{type}"
23
+ end
19
24
  job = ::BenchmarkDriver.const_get("Runner::#{camelize(type)}::JobParser", false).parse(**config)
20
- default_params.each do |key, value|
21
- if job.respond_to?(key) && job.respond_to?("#{key}=") && job.public_send(key).nil?
22
- job.public_send("#{key}=", value)
23
- end
25
+ if job.respond_to?(:working_directory) && job.respond_to?(:working_directory=) && job.working_directory.nil?
26
+ job.working_directory = working_directory
24
27
  end
25
28
  job
26
29
  end
@@ -8,6 +8,7 @@ class BenchmarkDriver::Output::Markdown
8
8
  # @param [Array<BenchmarkDriver::Metric>] metrics
9
9
  # @param [Array<BenchmarkDriver::Job>] jobs
10
10
  # @param [Array<BenchmarkDriver::Context>] contexts
11
+ # @param [Hash{ Symbol => Object }] options
11
12
  def initialize(metrics:, jobs:, contexts:, options:)
12
13
  @metrics = metrics
13
14
  @contexts = contexts
@@ -30,6 +30,10 @@ module BenchmarkDriver
30
30
  contexts_jobs.group_by(&:metrics).each do |metrics, metrics_jobs|
31
31
  metrics_jobs.group_by(&:class).each do |klass, klass_jobs|
32
32
  runner = runner_for(klass)
33
+ if runner_config.alternate && runner != BenchmarkDriver::Runner::RubyStdout
34
+ abort "--alternate is supported only for ruby_stdout runner for now"
35
+ end
36
+
33
37
  contexts = build_contexts(contexts, executables: config.executables)
34
38
  output = Output.new(
35
39
  type: config.output_type,
@@ -40,8 +40,8 @@ class BenchmarkDriver::Runner::Ips
40
40
  @output.report(values: { metric => value }, duration: duration, loop_count: loop_count)
41
41
  end
42
42
 
43
- loop_count = (loop_count.to_f * @config.run_duration / duration).floor
44
- Job.new(**job.to_h.merge(loop_count: loop_count))
43
+ job.loop_count = (loop_count.to_f * @config.run_duration / duration).floor
44
+ job
45
45
  end
46
46
  end
47
47
  end
@@ -107,8 +107,8 @@ class BenchmarkDriver::Runner::Ips
107
107
 
108
108
  duration = Tempfile.open(['benchmark_driver-', '.rb']) do |f|
109
109
  with_script(benchmark.render(result: f.path)) do |path|
110
- IO.popen([*context.executable.command, path], &:read) # TODO: print stdout if verbose=2
111
- if $?.success? && ((value = Float(f.read)) > 0)
110
+ success = execute(*context.executable.command, path, exception: false)
111
+ if success && ((value = Float(f.read)) > 0)
112
112
  value
113
113
  else
114
114
  BenchmarkDriver::Result::ERROR
@@ -137,10 +137,7 @@ class BenchmarkDriver::Runner::Ips
137
137
  end
138
138
 
139
139
  def with_script(script)
140
- if @config.verbose >= 2
141
- sep = '-' * 30
142
- $stdout.puts "\n\n#{sep}[Script begin]#{sep}\n#{script}#{sep}[Script end]#{sep}\n\n"
143
- end
140
+ debug_output('Script', script) if @config.verbose >= 2
144
141
 
145
142
  Tempfile.open(['benchmark_driver-', '.rb']) do |f|
146
143
  f.puts script
@@ -149,11 +146,20 @@ class BenchmarkDriver::Runner::Ips
149
146
  end
150
147
  end
151
148
 
152
- def execute(*args)
153
- IO.popen(args, &:read) # TODO: print stdout if verbose=2
154
- unless $?.success?
155
- raise "Failed to execute: #{args.shelljoin} (status: #{$?.exitstatus})"
149
+ def execute(*args, exception: true)
150
+ $stderr.puts "$ #{args.shelljoin}" if @config.verbose >= 2
151
+
152
+ stdout = IO.popen(args, &:read)
153
+ debug_output('Command output', stdout) if @config.verbose >= 2
154
+ if exception && !$?.success?
155
+ raise "Failed to execute: #{args.shelljoin} (status: #{$?})"
156
156
  end
157
+ $?.success?
158
+ end
159
+
160
+ def debug_output(name, text)
161
+ sep = '-' * 30
162
+ $stdout.puts "\n\n#{sep}[#{name} begin]#{sep}\n#{text}#{sep}[#{name} end]#{sep}\n\n"
157
163
  end
158
164
 
159
165
  WarmupScript = ::BenchmarkDriver::Struct.new(:preludes, :script, :teardown, :loop_count, :first_warmup_duration, :second_warmup_duration) do
@@ -77,6 +77,63 @@ class BenchmarkDriver::Runner::RubyStdout
77
77
  # This method is dynamically called by `BenchmarkDriver::JobRunner.run`
78
78
  # @param [Array<BenchmarkDriver::Default::Job>] jobs
79
79
  def run(jobs)
80
+ if @config.alternate
81
+ alternated_run(jobs)
82
+ else
83
+ incremental_run(jobs)
84
+ end
85
+ end
86
+
87
+ private
88
+
89
+ # Special mode. Execution order: RubyA, RubyB, ..., RubyA, RubyB, ...
90
+ def alternated_run(jobs)
91
+ metric = jobs.first.metrics.first
92
+
93
+ @output.with_benchmark do
94
+ jobs.each do |job|
95
+ @output.with_job(name: job.name) do
96
+ # Running benchmarks in an alternated manner is NOT compatible with two things:
97
+ # * Output plugins. They expect RubyA, RubyA, RubyB, RubyB, ...
98
+ # * BenchmarkDriver::Repeater. It should be used for results of the same condition.
99
+ #
100
+ # Therefore, we run all benchmarks with executables alternated first here, and then
101
+ # aggregate the results as if the same executable were repeated in a row.
102
+ context_results = Hash.new do |hash, context|
103
+ hash[context] = []
104
+ end
105
+ jobs.each do |job|
106
+ @config.repeat_count.times do
107
+ @contexts.each do |context|
108
+ context_results[context] << run_job(job, exec: context.executable)
109
+ end
110
+ end
111
+ end
112
+
113
+ # Aggregate reslts by BenchmarkDriver::Repeater and pass them to output.
114
+ @contexts.each do |context|
115
+ repeat_params = { config: @config, larger_better: metric.larger_better }
116
+ result = BenchmarkDriver::Repeater.with_repeat(**repeat_params) do
117
+ context_results[context].shift
118
+ end
119
+ value, environment = result.value
120
+
121
+ exec = context.executable
122
+ @output.with_context(name: exec.name, executable: exec) do
123
+ @output.report(
124
+ values: { metric => value },
125
+ all_values: { metric => result.all_values },
126
+ environment: environment,
127
+ )
128
+ end
129
+ end
130
+ end
131
+ end
132
+ end
133
+ end
134
+
135
+ # Default mode. Execution order: RubyA, RubyA, RubyB, RubyB, ...
136
+ def incremental_run(jobs)
80
137
  metric = jobs.first.metrics.first
81
138
 
82
139
  @output.with_benchmark do
@@ -86,20 +143,7 @@ class BenchmarkDriver::Runner::RubyStdout
86
143
  exec = context.executable
87
144
  repeat_params = { config: @config, larger_better: metric.larger_better }
88
145
  result = BenchmarkDriver::Repeater.with_repeat(**repeat_params) do
89
- begin
90
- stdout = with_chdir(job.working_directory) do
91
- with_ruby_prefix(exec) { execute(*exec.command, *job.command) }
92
- end
93
- script = StdoutToMetrics.new(
94
- stdout: stdout,
95
- value_from_stdout: job.value_from_stdout,
96
- environment_from_stdout: job.environment_from_stdout,
97
- )
98
- [script.value, script.environment]
99
- rescue CommandFailure => e
100
- $stderr.puts("\n```\n#{e.message}```\n")
101
- [BenchmarkDriver::Result::ERROR, {}]
102
- end
146
+ run_job(job, exec: exec)
103
147
  end
104
148
  value, environment = result.value
105
149
 
@@ -116,7 +160,21 @@ class BenchmarkDriver::Runner::RubyStdout
116
160
  end
117
161
  end
118
162
 
119
- private
163
+ # Run a job and return what BenchmarkDriver::Repeater.with_repeat takes.
164
+ def run_job(job, exec:)
165
+ stdout = with_chdir(job.working_directory) do
166
+ with_ruby_prefix(exec) { execute(*exec.command, *job.command) }
167
+ end
168
+ script = StdoutToMetrics.new(
169
+ stdout: stdout,
170
+ value_from_stdout: job.value_from_stdout,
171
+ environment_from_stdout: job.environment_from_stdout,
172
+ )
173
+ [script.value, script.environment]
174
+ rescue CommandFailure => e
175
+ $stderr.puts("\n```\n#{e.message}```\n")
176
+ [BenchmarkDriver::Result::ERROR, {}]
177
+ end
120
178
 
121
179
  def with_ruby_prefix(executable, &block)
122
180
  env = ENV.to_h.dup
@@ -1,3 +1,3 @@
1
1
  module BenchmarkDriver
2
- VERSION = '0.15.12'
2
+ VERSION = '0.15.17'
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: benchmark_driver
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.15.12
4
+ version: 0.15.17
5
5
  platform: ruby
6
6
  authors:
7
7
  - Takashi Kokubun
8
- autorequire:
8
+ autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2020-04-22 00:00:00.000000000 Z
11
+ date: 2021-02-11 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -131,7 +131,7 @@ homepage: https://github.com/benchmark-driver/benchmark-driver
131
131
  licenses:
132
132
  - MIT
133
133
  metadata: {}
134
- post_install_message:
134
+ post_install_message:
135
135
  rdoc_options: []
136
136
  require_paths:
137
137
  - lib
@@ -146,8 +146,8 @@ required_rubygems_version: !ruby/object:Gem::Requirement
146
146
  - !ruby/object:Gem::Version
147
147
  version: '0'
148
148
  requirements: []
149
- rubygems_version: 3.2.0.pre1
150
- signing_key:
149
+ rubygems_version: 3.2.6
150
+ signing_key:
151
151
  specification_version: 4
152
152
  summary: Fully-featured accurate benchmark driver for Ruby
153
153
  test_files: []