benchmark_driver 0.15.15 → 0.15.18

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: d6a5ecae78649aa29ad4d54635e170cb6ab998cc1f4e67102a821dd39a5646ac
4
- data.tar.gz: 0c131020c654dc8b4fb6cf40dd134fc959267602722d8eaa538f39c2e99768cd
3
+ metadata.gz: 056db37cfc1c2453c9ab994d4b1241bb450ad7e138e69e2ae4a5995825976753
4
+ data.tar.gz: f827c814001e175ebcb878181ff0b6b411466bb34bf4ff4a34e02b0f1fdafdda
5
5
  SHA512:
6
- metadata.gz: 1e471938da9128c589182326c6dcbc6a0eb62249150165452b96635f598c2ad8b3e322107ee41b51f440d6ddaaffd5735d5b8046a660abdfaea9cdf64322d433
7
- data.tar.gz: 3f4ce47df9e8d5b62c3bd73aef5ebe89976b44886ccf94bd3d78b9f58a62538744f62b6dd709eb70216257183efa888d0e1be1ed5cac29e4a77c2a8498002dde
6
+ metadata.gz: f34f11c0d5a30be2e232af08d00e2e1b2420a161aa18c1f020a0f342f53804b82da754c61632a008ec7822f710edab2af7a1dde18cf1a67a4e51ab14852b3b3a
7
+ data.tar.gz: e0601d752e1f55d95ae2c1b83f6dfaa432290b28856ceca553500597f8b06b508c34909df98cd96f3aaf0989df9d2ef76fa0c1e31099f80486b37fa641ce15dd
data/CHANGELOG.md CHANGED
@@ -1,3 +1,16 @@
1
+ # v0.15.18
2
+
3
+ - Mention `--output=all` in help
4
+
5
+ # v0.15.17
6
+
7
+ - Jobs returned from job parsers are made mutable for plugins
8
+
9
+ # v0.15.16
10
+
11
+ - Add `--alternate` runner option to run executables alternately.
12
+ This is supported only for `ruby_stdout` runner for now.
13
+
1
14
  # v0.15.15
2
15
 
3
16
  - An absolute path is passed to `command_stdout`'s `working_directory`
data/exe/benchmark-driver CHANGED
@@ -18,7 +18,7 @@ config = BenchmarkDriver::Config.new.tap do |c|
18
18
  o.on('-r', '--runner TYPE', String, 'Specify runner type: ips, time, memory, once, block (default: ips)') do |d|
19
19
  c.runner_type = d
20
20
  end
21
- o.on('-o', '--output TYPE', String, 'Specify output type: compare, simple, markdown, record (default: compare)') do |out|
21
+ o.on('-o', '--output TYPE', String, 'Specify output type: compare, simple, markdown, record, all (default: compare)') do |out|
22
22
  c.output_type = out
23
23
  begin
24
24
  plugin_options = BenchmarkDriver::Output.get(out).const_get('OPTIONS', false)
@@ -67,6 +67,9 @@ config = BenchmarkDriver::Config.new.tap do |c|
67
67
  end
68
68
  c.repeat_result = v
69
69
  end
70
+ o.on('--alternate', 'Alternate executables instead of running the same executable in a row with --repeat-count') do |v|
71
+ c.alternate = v
72
+ end
70
73
  o.on('--bundler', 'Install and use gems specified in Gemfile') do |v|
71
74
  bundler = v
72
75
  end
@@ -13,6 +13,7 @@ module BenchmarkDriver
13
13
  :filters, # @param [Array<Regexp>]
14
14
  :repeat_count, # @param [Integer]
15
15
  :repeat_result, # @param [String]
16
+ :alternate, # @param [TrueClass,FalseClass]
16
17
  :run_duration, # @param [Float]
17
18
  :timeout, # @param [Float,nil]
18
19
  :verbose, # @param [Integer]
@@ -23,15 +24,17 @@ module BenchmarkDriver
23
24
  filters: [],
24
25
  repeat_count: 1,
25
26
  repeat_result: 'best',
27
+ alternate: false,
26
28
  run_duration: 3.0,
27
29
  verbose: 0,
28
30
  },
29
31
  )
30
32
 
31
- # Subset of FullConfig passed to JobRunner
33
+ # Subset of Config passed to JobRunner
32
34
  Config::RunnerConfig = ::BenchmarkDriver::Struct.new(
33
35
  :repeat_count, # @param [Integer]
34
36
  :repeat_result, # @param [String]
37
+ :alternate, # @param [TrueClass,FalseClass]
35
38
  :run_duration, # @param [Float]
36
39
  :timeout, # @param [Float,nil]
37
40
  :verbose, # @param [Integer]
@@ -26,7 +26,7 @@ module BenchmarkDriver
26
26
  job.teardown.prepend("#{teardown}\n") if teardown
27
27
  job.loop_count ||= loop_count
28
28
  job.required_ruby_version ||= required_ruby_version
29
- end.each(&:freeze)
29
+ end
30
30
  end
31
31
 
32
32
  private
@@ -8,6 +8,7 @@ class BenchmarkDriver::Output::Markdown
8
8
  # @param [Array<BenchmarkDriver::Metric>] metrics
9
9
  # @param [Array<BenchmarkDriver::Job>] jobs
10
10
  # @param [Array<BenchmarkDriver::Context>] contexts
11
+ # @param [Hash{ Symbol => Object }] options
11
12
  def initialize(metrics:, jobs:, contexts:, options:)
12
13
  @metrics = metrics
13
14
  @contexts = contexts
@@ -40,8 +40,8 @@ class BenchmarkDriver::Runner::Ips
40
40
  @output.report(values: { metric => value }, duration: duration, loop_count: loop_count)
41
41
  end
42
42
 
43
- loop_count = (loop_count.to_f * @config.run_duration / duration).floor
44
- Job.new(**job.to_h.merge(loop_count: loop_count))
43
+ job.loop_count = (loop_count.to_f * @config.run_duration / duration).floor
44
+ job
45
45
  end
46
46
  end
47
47
  end
@@ -77,6 +77,63 @@ class BenchmarkDriver::Runner::RubyStdout
77
77
  # This method is dynamically called by `BenchmarkDriver::JobRunner.run`
78
78
  # @param [Array<BenchmarkDriver::Default::Job>] jobs
79
79
  def run(jobs)
80
+ if @config.alternate
81
+ alternated_run(jobs)
82
+ else
83
+ incremental_run(jobs)
84
+ end
85
+ end
86
+
87
+ private
88
+
89
+ # Special mode. Execution order: RubyA, RubyB, ..., RubyA, RubyB, ...
90
+ def alternated_run(jobs)
91
+ metric = jobs.first.metrics.first
92
+
93
+ @output.with_benchmark do
94
+ jobs.each do |job|
95
+ @output.with_job(name: job.name) do
96
+ # Running benchmarks in an alternated manner is NOT compatible with two things:
97
+ # * Output plugins. They expect RubyA, RubyA, RubyB, RubyB, ...
98
+ # * BenchmarkDriver::Repeater. It should be used for results of the same condition.
99
+ #
100
+ # Therefore, we run all benchmarks with executables alternated first here, and then
101
+ # aggregate the results as if the same executable were repeated in a row.
102
+ context_results = Hash.new do |hash, context|
103
+ hash[context] = []
104
+ end
105
+ jobs.each do |job|
106
+ @config.repeat_count.times do
107
+ @contexts.each do |context|
108
+ context_results[context] << run_job(job, exec: context.executable)
109
+ end
110
+ end
111
+ end
112
+
113
+ # Aggregate reslts by BenchmarkDriver::Repeater and pass them to output.
114
+ @contexts.each do |context|
115
+ repeat_params = { config: @config, larger_better: metric.larger_better }
116
+ result = BenchmarkDriver::Repeater.with_repeat(**repeat_params) do
117
+ context_results[context].shift
118
+ end
119
+ value, environment = result.value
120
+
121
+ exec = context.executable
122
+ @output.with_context(name: exec.name, executable: exec) do
123
+ @output.report(
124
+ values: { metric => value },
125
+ all_values: { metric => result.all_values },
126
+ environment: environment,
127
+ )
128
+ end
129
+ end
130
+ end
131
+ end
132
+ end
133
+ end
134
+
135
+ # Default mode. Execution order: RubyA, RubyA, RubyB, RubyB, ...
136
+ def incremental_run(jobs)
80
137
  metric = jobs.first.metrics.first
81
138
 
82
139
  @output.with_benchmark do
@@ -86,20 +143,7 @@ class BenchmarkDriver::Runner::RubyStdout
86
143
  exec = context.executable
87
144
  repeat_params = { config: @config, larger_better: metric.larger_better }
88
145
  result = BenchmarkDriver::Repeater.with_repeat(**repeat_params) do
89
- begin
90
- stdout = with_chdir(job.working_directory) do
91
- with_ruby_prefix(exec) { execute(*exec.command, *job.command) }
92
- end
93
- script = StdoutToMetrics.new(
94
- stdout: stdout,
95
- value_from_stdout: job.value_from_stdout,
96
- environment_from_stdout: job.environment_from_stdout,
97
- )
98
- [script.value, script.environment]
99
- rescue CommandFailure => e
100
- $stderr.puts("\n```\n#{e.message}```\n")
101
- [BenchmarkDriver::Result::ERROR, {}]
102
- end
146
+ run_job(job, exec: exec)
103
147
  end
104
148
  value, environment = result.value
105
149
 
@@ -116,7 +160,21 @@ class BenchmarkDriver::Runner::RubyStdout
116
160
  end
117
161
  end
118
162
 
119
- private
163
+ # Run a job and return what BenchmarkDriver::Repeater.with_repeat takes.
164
+ def run_job(job, exec:)
165
+ stdout = with_chdir(job.working_directory) do
166
+ with_ruby_prefix(exec) { execute(*exec.command, *job.command) }
167
+ end
168
+ script = StdoutToMetrics.new(
169
+ stdout: stdout,
170
+ value_from_stdout: job.value_from_stdout,
171
+ environment_from_stdout: job.environment_from_stdout,
172
+ )
173
+ [script.value, script.environment]
174
+ rescue CommandFailure => e
175
+ $stderr.puts("\n```\n#{e.message}```\n")
176
+ [BenchmarkDriver::Result::ERROR, {}]
177
+ end
120
178
 
121
179
  def with_ruby_prefix(executable, &block)
122
180
  env = ENV.to_h.dup
@@ -30,6 +30,10 @@ module BenchmarkDriver
30
30
  contexts_jobs.group_by(&:metrics).each do |metrics, metrics_jobs|
31
31
  metrics_jobs.group_by(&:class).each do |klass, klass_jobs|
32
32
  runner = runner_for(klass)
33
+ if runner_config.alternate && runner != BenchmarkDriver::Runner::RubyStdout
34
+ abort "--alternate is supported only for ruby_stdout runner for now"
35
+ end
36
+
33
37
  contexts = build_contexts(contexts, executables: config.executables)
34
38
  output = Output.new(
35
39
  type: config.output_type,
@@ -1,3 +1,3 @@
1
1
  module BenchmarkDriver
2
- VERSION = '0.15.15'
2
+ VERSION = '0.15.18'
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: benchmark_driver
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.15.15
4
+ version: 0.15.18
5
5
  platform: ruby
6
6
  authors:
7
7
  - Takashi Kokubun
8
- autorequire:
8
+ autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2020-06-22 00:00:00.000000000 Z
11
+ date: 2022-06-08 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -131,7 +131,7 @@ homepage: https://github.com/benchmark-driver/benchmark-driver
131
131
  licenses:
132
132
  - MIT
133
133
  metadata: {}
134
- post_install_message:
134
+ post_install_message:
135
135
  rdoc_options: []
136
136
  require_paths:
137
137
  - lib
@@ -146,8 +146,8 @@ required_rubygems_version: !ruby/object:Gem::Requirement
146
146
  - !ruby/object:Gem::Version
147
147
  version: '0'
148
148
  requirements: []
149
- rubygems_version: 3.2.0.pre1
150
- signing_key:
149
+ rubygems_version: 3.1.2
150
+ signing_key:
151
151
  specification_version: 4
152
152
  summary: Fully-featured accurate benchmark driver for Ruby
153
153
  test_files: []