benchmark_driver 0.12.0 → 0.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: cc2f6826606613671a2e6b8d0384421924bd9487
4
- data.tar.gz: b310edeb8fa144e6eb6569b670ae7291e5a77a70
3
+ metadata.gz: 8d80a15877a535548463502efb2a1a39b708c3e7
4
+ data.tar.gz: 8a569023b7cde0b811f4bfc96c13587e863a7e1d
5
5
  SHA512:
6
- metadata.gz: a121e71123b95d5dba0f130f2efa65aaeb9adb42c4e9046703c3afcbd3726a698b7f33e870d3c873a5eda7bbaea20bce2061f87a085118a9d8a6b36b183c6e1a
7
- data.tar.gz: 942d159581bf537e62c15b46dba3769c56eb981d98d7d7d1055b872b004cdbc9b85713d2447e009b9b61f53357c45d8a116c19aa730af3541e94af1fe4336303
6
+ metadata.gz: 2d44bf5752e35b615c219916d45828e0e535729b2290ca590d6c3d16b4ed8928afb2c66cdac65609af78f85792374a1715089efad2b6921f1616bcc43c74b824
7
+ data.tar.gz: a8744d5d19ac34aaf2951a6f7704a2fddc6f4eeeb7a9e079808a59469db16723b03aeff09a9d79b6563f8bda92795e8f99f1539225ffe28bc1b342e554443143
data/.travis.yml CHANGED
@@ -8,6 +8,9 @@ cache: bundler
8
8
  branches:
9
9
  only:
10
10
  - master
11
- before_install: gem install bundler -v 1.15.4
11
+ before_install:
12
+ - gem install bundler -v 1.15.4
13
+ - gem install haml -v 4.0.7
14
+ - gem install haml -v 5.0.4
12
15
  script:
13
16
  - RSPEC_RETRIES=3 VERBOSE=1 bundle exec rake
data/CHANGELOG.md CHANGED
@@ -1,3 +1,8 @@
1
+ # v0.13.0
2
+
3
+ - Add support for benchmark with different versions of gems and preludes
4
+ - [breaking change] Runner plugin interface is sightly changed
5
+
1
6
  # v0.12.0
2
7
 
3
8
  - [breaking change] Plugin interface is changed again
@@ -18,7 +18,7 @@ module BenchmarkDriver
18
18
  end
19
19
 
20
20
  # The main API you need to override if you make a class inherit `BenchmarkDriver::BulkOutput`.
21
- # @param [Hash{ BenchmarkDriver::Job => Hash{ BenchmarkDriver::Context => { BenchmarkDriver::Metric => Float } } }] result
21
+ # @param [Hash{ BenchmarkDriver::Job => Hash{ BenchmarkDriver::Context => BenchmarkDriver::Result } }] job_context_result
22
22
  # @param [Array<BenchmarkDriver::Metric>] metrics
23
23
  def bulk_output(job_context_result:, metrics:)
24
24
  raise NotImplementedError.new("#{self.class} must override #bulk_output")
@@ -25,7 +25,6 @@ module BenchmarkDriver
25
25
 
26
26
  # Subset of FullConfig passed to JobRunner
27
27
  Config::RunnerConfig = ::BenchmarkDriver::Struct.new(
28
- :executables, # @param [Array<BenchmarkDriver::Config::Executable>]
29
28
  :repeat_count, # @param [Integer]
30
29
  :repeat_result, # @param [String]
31
30
  :run_duration, # @param [Float]
@@ -4,6 +4,7 @@ module BenchmarkDriver
4
4
  DefaultJob = ::BenchmarkDriver::Struct.new(
5
5
  :name, # @param [String] name - This is mandatory for all runner
6
6
  :metrics, # @param [Array<BenchmarkDriver::Metric>] - This is mandatory for all runner too, set by job parser.
7
+ :contexts, # @param [Array<BenchmarkDriver::Context>] - This is optional parameter for runners.
7
8
  :script, # @param [String] benchmark
8
9
  :prelude, # @param [String,nil] prelude (optional)
9
10
  :teardown, # @param [String,nil] after (optional)
@@ -11,17 +12,17 @@ module BenchmarkDriver
11
12
  :required_ruby_version, # @param [String,nil] required_ruby_version (optional)
12
13
  defaults: { prelude: '', teardown: '' },
13
14
  ) do
14
- def runnable_execs(executables)
15
+ def runnable_contexts(contexts)
15
16
  if required_ruby_version
16
- executables.select do |executable|
17
- Gem::Version.new(executable.version) >= Gem::Version.new(required_ruby_version)
17
+ contexts.select do |context|
18
+ Gem::Version.new(context.executable.version) >= Gem::Version.new(required_ruby_version)
18
19
  end.tap do |result|
19
20
  if result.empty?
20
21
  raise "No Ruby executables conforming required_ruby_version (#{required_ruby_version}) are specified"
21
22
  end
22
23
  end
23
24
  else
24
- executables
25
+ contexts
25
26
  end
26
27
  end
27
28
  end
@@ -18,8 +18,9 @@ module BenchmarkDriver
18
18
  # @param [Integer] loop_count
19
19
  # @param [String] required_ruby_version
20
20
  # @return [Array<BenchmarkDriver::Default::Job>]
21
- def parse(prelude: nil, benchmark:, teardown: nil, loop_count: nil, required_ruby_version: nil)
21
+ def parse(contexts: [], prelude: nil, benchmark:, teardown: nil, loop_count: nil, required_ruby_version: nil)
22
22
  parse_benchmark(benchmark).each do |job|
23
+ job.contexts = parse_contexts(contexts)
23
24
  job.metrics = job_metrics
24
25
  job.prelude.prepend("#{prelude}\n") if prelude
25
26
  job.teardown.prepend("#{teardown}\n") if teardown
@@ -66,5 +67,26 @@ module BenchmarkDriver
66
67
  def job_class
67
68
  raise NotImplementedError # override this
68
69
  end
70
+
71
+ def parse_contexts(contexts)
72
+ if contexts.is_a?(Array)
73
+ contexts.map { |context| parse_context(context) }
74
+ else
75
+ raise ArgumentError.new("contexts must be Array, but got: #{contexts.inspect}")
76
+ end
77
+ end
78
+
79
+ def parse_context(name: nil, prelude: '', gems: {})
80
+ # Version lock with `Bundler.require`-like behavior (no `require: xxx` support yet)
81
+ gems.each do |gem, version|
82
+ prelude = "gem '#{gem}', '#{version}'\nrequire '#{gem}'\n#{prelude}"
83
+ end
84
+
85
+ BenchmarkDriver::Context.new(
86
+ name: name,
87
+ gems: gems,
88
+ prelude: prelude,
89
+ )
90
+ end
69
91
  end
70
92
  end
@@ -24,7 +24,7 @@ module BenchmarkDriver
24
24
  :executable, # @param [BenchmarkDriver::Config::Executable] - Measured Ruby executable
25
25
  :gems, # @param [Hash{ String => String,nil }] - Gem -> version pairs used for the benchmark
26
26
  :prelude, # @param [String,nil] - Context specific setup script (optional)
27
- defaults: { gems: {} },
27
+ defaults: { prelude: '', gems: {} },
28
28
  )
29
29
 
30
30
  # Everything that can be known after running benchmark
@@ -8,11 +8,11 @@ module BenchmarkDriver
8
8
  # metrics=
9
9
  # with_warmup
10
10
  # with_job(name:)
11
- # with_context(name:, executable:)
11
+ # with_context(name:, executable:, gems:)
12
12
  # report(values:, duration: nil, loop_count: nil, environment: {})
13
13
  # with_benchmark
14
14
  # with_job(name:)
15
- # with_context(name:, executable:)
15
+ # with_context(name:, executable:, gems:)
16
16
  # report(values:, duration: nil, loop_count: nil, environment: {})
17
17
  class Output
18
18
  require 'benchmark_driver/output/compare'
@@ -65,10 +65,9 @@ module BenchmarkDriver
65
65
 
66
66
  # @param [String] name
67
67
  # @param [BenchmarkDriver::Config::Executable] executable
68
- # @param [Float] duration
69
- # @param [Integer] loop_count
70
- def with_context(name:, executable:, &block)
71
- context = BenchmarkDriver::Context.new(name: name, executable: executable)
68
+ # @param [Hash{ String => String}] gems
69
+ def with_context(name:, executable:, gems: {}, &block)
70
+ context = BenchmarkDriver::Context.new(name: name, executable: executable, gems: gems)
72
71
  @output.with_context(context) do
73
72
  block.call
74
73
  end
@@ -25,7 +25,7 @@ class BenchmarkDriver::Output::Compare
25
25
  hash[job] = {}
26
26
  end
27
27
 
28
- without_stdout_buffering do
28
+ result = without_stdout_buffering do
29
29
  $stdout.puts 'Calculating -------------------------------------'
30
30
  if @context_names.size > 1
31
31
  $stdout.print(' ' * @name_length)
@@ -37,12 +37,12 @@ class BenchmarkDriver::Output::Compare
37
37
 
38
38
  block.call
39
39
  end
40
- ensure
41
40
  if @context_names.size > 1
42
41
  compare_executables
43
42
  elsif @job_names.size > 1
44
43
  compare_jobs
45
44
  end
45
+ result
46
46
  end
47
47
 
48
48
  # @param [BenchmarkDriver::Job] job
@@ -56,8 +56,7 @@ class BenchmarkDriver::Output::Compare
56
56
  @job = name
57
57
  @job_results = []
58
58
  @job_contexts = []
59
- block.call
60
- ensure
59
+ result = block.call
61
60
  $stdout.print(@metrics.first.unit)
62
61
  loop_count = @job_results.first.loop_count
63
62
  if loop_count && @job_results.all? { |r| r.loop_count == loop_count }
@@ -68,6 +67,7 @@ class BenchmarkDriver::Output::Compare
68
67
  end
69
68
  end
70
69
  $stdout.puts
70
+ result
71
71
  end
72
72
 
73
73
  # @param [BenchmarkDriver::Context] context
@@ -161,9 +161,9 @@ class BenchmarkDriver::Output::Compare
161
161
  def compare_jobs
162
162
  $stdout.puts "\nComparison:"
163
163
  results = @job_context_result.flat_map do |job, context_result|
164
- context_result.map { |context, result| Result.new(job: job, value: result.values.values.first, executable: context.executable) }
164
+ context_result.map { |context, result| Result.new(job: job, value: result.values.values.first, context: context) }
165
165
  end
166
- show_results(results, show_executable: false)
166
+ show_results(results, show_context: false)
167
167
  end
168
168
 
169
169
  def compare_executables
@@ -172,15 +172,15 @@ class BenchmarkDriver::Output::Compare
172
172
  @job_context_result.each do |job, context_result|
173
173
  $stdout.puts("%#{@name_length + 2 + 11}s" % job)
174
174
  results = context_result.flat_map do |context, result|
175
- result.values.values.map { |value| Result.new(job: job, value: value, executable: context.executable) }
175
+ result.values.values.map { |value| Result.new(job: job, value: value, context: context) }
176
176
  end
177
- show_results(results, show_executable: true)
177
+ show_results(results, show_context: true)
178
178
  end
179
179
  end
180
180
 
181
181
  # @param [Array<BenchmarkDriver::Output::Compare::Result>] results
182
- # @param [TrueClass,FalseClass] show_executable
183
- def show_results(results, show_executable:)
182
+ # @param [TrueClass,FalseClass] show_context
183
+ def show_results(results, show_context:)
184
184
  results = results.sort_by do |result|
185
185
  if @metrics.first.larger_better
186
186
  -result.value
@@ -199,8 +199,8 @@ class BenchmarkDriver::Output::Compare
199
199
  end
200
200
  slower = "- %.2fx #{@metrics.first.worse_word}" % ratio
201
201
  end
202
- if show_executable
203
- name = result.executable.name
202
+ if show_context
203
+ name = result.context.name
204
204
  else
205
205
  name = result.job
206
206
  end
@@ -209,5 +209,5 @@ class BenchmarkDriver::Output::Compare
209
209
  $stdout.puts
210
210
  end
211
211
 
212
- Result = ::BenchmarkDriver::Struct.new(:job, :value, :executable)
212
+ Result = ::BenchmarkDriver::Struct.new(:job, :value, :context)
213
213
  end
@@ -1,5 +1,5 @@
1
1
  class BenchmarkDriver::Output::Simple
2
- NAME_LENGTH = 8
2
+ NAME_LENGTH = 10
3
3
 
4
4
  # @param [Array<BenchmarkDriver::Metric>] metrics
5
5
  # @param [Array<BenchmarkDriver::Job>] jobs
@@ -28,7 +28,7 @@ class BenchmarkDriver::Output::Simple
28
28
  # Show executable names
29
29
  if @context_names.size > 1
30
30
  $stdout.print("#{' ' * @name_length} ")
31
- @context_name.each do |context_name|
31
+ @context_names.each do |context_name|
32
32
  $stdout.print("%#{NAME_LENGTH}s " % context_name)
33
33
  end
34
34
  $stdout.puts
@@ -36,7 +36,7 @@ class BenchmarkDriver::Output::Simple
36
36
 
37
37
  block.call
38
38
  end
39
- rescue
39
+ ensure
40
40
  @with_benchmark = false
41
41
  end
42
42
 
@@ -21,26 +21,26 @@ module BenchmarkDriver
21
21
  end
22
22
 
23
23
  runner_config = Config::RunnerConfig.new(
24
- executables: config.executables,
25
24
  repeat_count: config.repeat_count,
26
25
  repeat_result: config.repeat_result,
27
26
  run_duration: config.run_duration,
28
27
  verbose: config.verbose,
29
28
  )
30
29
 
31
- jobs.group_by(&:class).each do |klass, klass_jobs|
32
- klass_jobs.group_by(&:metrics).each do |metrics, metrics_jobs|
33
- runner = runner_for(klass)
34
- output = Output.new(
35
- type: config.output_type,
36
- metrics: metrics,
37
- jobs: jobs.map { |job| BenchmarkDriver::Job.new(name: job.name) },
38
- contexts: config.executables.map { |exec|
39
- BenchmarkDriver::Context.new(name: exec.name, executable: exec)
40
- },
41
- )
42
- with_clean_env do
43
- runner.new(config: runner_config, output: output).run(metrics_jobs)
30
+ jobs.group_by{ |j| j.respond_to?(:contexts) && j.contexts }.each do |contexts, contexts_jobs|
31
+ contexts_jobs.group_by(&:metrics).each do |metrics, metrics_jobs|
32
+ metrics_jobs.group_by(&:class).each do |klass, klass_jobs|
33
+ runner = runner_for(klass)
34
+ contexts = build_contexts(contexts, executables: config.executables)
35
+ output = Output.new(
36
+ type: config.output_type,
37
+ metrics: metrics,
38
+ jobs: klass_jobs.map { |job| BenchmarkDriver::Job.new(name: job.name) },
39
+ contexts: contexts,
40
+ )
41
+ with_clean_env do
42
+ runner.new(config: runner_config, output: output, contexts: contexts).run(klass_jobs)
43
+ end
44
44
  end
45
45
  end
46
46
  end
@@ -48,6 +48,36 @@ module BenchmarkDriver
48
48
 
49
49
  private
50
50
 
51
+ def build_contexts(contexts, executables:)
52
+ # If contexts are not specified, just use executables as contexts.
53
+ if !contexts.is_a?(Array) || contexts.empty?
54
+ return executables.map { |exec|
55
+ BenchmarkDriver::Context.new(name: exec.name, executable: exec)
56
+ }
57
+ end
58
+
59
+ # Create direct product of contexts
60
+ contexts.product(executables).map do |context, executable|
61
+ name = context.name
62
+ if name.nil?
63
+ # Use the first gem name and version by default
64
+ name = context.gems.first.join(' ')
65
+
66
+ # Append Ruby executable name if it's matrix
67
+ if executables.size > 1
68
+ name = "#{name} (#{executable.name})"
69
+ end
70
+ end
71
+
72
+ BenchmarkDriver::Context.new(
73
+ name: name,
74
+ executable: executable,
75
+ gems: context.gems,
76
+ prelude: context.prelude,
77
+ )
78
+ end
79
+ end
80
+
51
81
  # Dynamically find class (BenchmarkDriver::*::JobRunner) for plugin support
52
82
  # @param [Class] klass - BenchmarkDriver::*::Job
53
83
  # @return [Class]
@@ -47,9 +47,11 @@ class BenchmarkDriver::Runner::CommandStdout
47
47
 
48
48
  # @param [BenchmarkDriver::Config::RunnerConfig] config
49
49
  # @param [BenchmarkDriver::Output] output
50
- def initialize(config:, output:)
50
+ # @param [BenchmarkDriver::Context] contexts
51
+ def initialize(config:, output:, contexts:)
51
52
  @config = config
52
53
  @output = output
54
+ @contexts = contexts
53
55
  end
54
56
 
55
57
  # This method is dynamically called by `BenchmarkDriver::JobRunner.run`
@@ -60,7 +62,8 @@ class BenchmarkDriver::Runner::CommandStdout
60
62
  @output.with_benchmark do
61
63
  jobs.each do |job|
62
64
  @output.with_job(name: job.name) do
63
- @config.executables.each do |exec|
65
+ @contexts.each do |context|
66
+ exec = context.executable
64
67
  value = BenchmarkDriver::Repeater.with_repeat(config: @config, larger_better: metric.larger_better) do
65
68
  stdout = with_chdir(job.working_directory) do
66
69
  with_ruby_prefix(exec) { execute(*exec.command, *job.command) }
@@ -16,9 +16,11 @@ class BenchmarkDriver::Runner::Ips
16
16
 
17
17
  # @param [BenchmarkDriver::Config::RunnerConfig] config
18
18
  # @param [BenchmarkDriver::Output] output
19
- def initialize(config:, output:)
19
+ # @param [BenchmarkDriver::Context] contexts
20
+ def initialize(config:, output:, contexts:)
20
21
  @config = config
21
22
  @output = output
23
+ @contexts = contexts
22
24
  end
23
25
 
24
26
  # This method is dynamically called by `BenchmarkDriver::JobRunner.run`
@@ -30,11 +32,11 @@ class BenchmarkDriver::Runner::Ips
30
32
  next job if job.loop_count # skip warmup if loop_count is set
31
33
 
32
34
  @output.with_job(name: job.name) do
33
- executable = job.runnable_execs(@config.executables).first
34
- duration, loop_count = run_warmup(job, exec: executable)
35
+ context = job.runnable_contexts(@contexts).first
36
+ duration, loop_count = run_warmup(job, context: context)
35
37
  value, duration = value_duration(duration: duration, loop_count: loop_count)
36
38
 
37
- @output.with_context(name: executable.name, executable: executable) do
39
+ @output.with_context(name: context.name, executable: context.executable, gems: context.gems) do
38
40
  @output.report(values: { metric => value }, duration: duration, loop_count: loop_count)
39
41
  end
40
42
 
@@ -48,12 +50,12 @@ class BenchmarkDriver::Runner::Ips
48
50
  @output.with_benchmark do
49
51
  jobs.each do |job|
50
52
  @output.with_job(name: job.name) do
51
- job.runnable_execs(@config.executables).each do |exec|
53
+ job.runnable_contexts(@contexts).each do |context|
52
54
  repeat_params = { config: @config, larger_better: true, rest_on_average: :average }
53
55
  value, duration = BenchmarkDriver::Repeater.with_repeat(repeat_params) do
54
- run_benchmark(job, exec: exec)
56
+ run_benchmark(job, context: context)
55
57
  end
56
- @output.with_context(name: exec.name, executable: exec) do
58
+ @output.with_context(name: context.name, executable: context.executable, gems: context.gems) do
57
59
  @output.report(values: { metric => value }, duration: duration, loop_count: job.loop_count)
58
60
  end
59
61
  end
@@ -65,10 +67,10 @@ class BenchmarkDriver::Runner::Ips
65
67
  private
66
68
 
67
69
  # @param [BenchmarkDriver::Runner::Ips::Job] job - loop_count is nil
68
- # @param [BenchmarkDriver::Config::Executable] exec
69
- def run_warmup(job, exec:)
70
+ # @param [BenchmarkDriver::Context] context
71
+ def run_warmup(job, context:)
70
72
  warmup = WarmupScript.new(
71
- prelude: job.prelude,
73
+ prelude: "#{context.prelude}\n#{job.prelude}",
72
74
  script: job.script,
73
75
  teardown: job.teardown,
74
76
  loop_count: job.loop_count,
@@ -78,7 +80,7 @@ class BenchmarkDriver::Runner::Ips
78
80
 
79
81
  duration, loop_count = Tempfile.open(['benchmark_driver-', '.rb']) do |f|
80
82
  with_script(warmup.render(result: f.path)) do |path|
81
- execute(*exec.command, path)
83
+ execute(*context.executable.command, path)
82
84
  end
83
85
  eval(f.read)
84
86
  end
@@ -87,11 +89,11 @@ class BenchmarkDriver::Runner::Ips
87
89
  end
88
90
 
89
91
  # @param [BenchmarkDriver::Runner::Ips::Job] job - loop_count is not nil
90
- # @param [BenchmarkDriver::Config::Executable] exec
92
+ # @param [BenchmarkDriver::Context] context
91
93
  # @return [BenchmarkDriver::Metrics]
92
- def run_benchmark(job, exec:)
94
+ def run_benchmark(job, context:)
93
95
  benchmark = BenchmarkScript.new(
94
- prelude: job.prelude,
96
+ prelude: "#{context.prelude}\n#{job.prelude}",
95
97
  script: job.script,
96
98
  teardown: job.teardown,
97
99
  loop_count: job.loop_count,
@@ -99,7 +101,7 @@ class BenchmarkDriver::Runner::Ips
99
101
 
100
102
  duration = Tempfile.open(['benchmark_driver-', '.rb']) do |f|
101
103
  with_script(benchmark.render(result: f.path)) do |path|
102
- execute(*exec.command, path)
104
+ execute(*context.executable.command, path)
103
105
  end
104
106
  Float(f.read)
105
107
  end
@@ -18,9 +18,11 @@ class BenchmarkDriver::Runner::Memory
18
18
 
19
19
  # @param [BenchmarkDriver::Config::RunnerConfig] config
20
20
  # @param [BenchmarkDriver::Output] output
21
- def initialize(config:, output:)
21
+ # @param [BenchmarkDriver::Context] contexts
22
+ def initialize(config:, output:, contexts:)
22
23
  @config = config
23
24
  @output = output
25
+ @contexts = contexts
24
26
  end
25
27
 
26
28
  # This method is dynamically called by `BenchmarkDriver::JobRunner.run`
@@ -40,11 +42,11 @@ class BenchmarkDriver::Runner::Memory
40
42
  @output.with_benchmark do
41
43
  jobs.each do |job|
42
44
  @output.with_job(name: job.name) do
43
- job.runnable_execs(@config.executables).each do |exec|
45
+ job.runnable_contexts(@contexts).each do |context|
44
46
  value = BenchmarkDriver::Repeater.with_repeat(config: @config, larger_better: false) do
45
- run_benchmark(job, exec: exec)
47
+ run_benchmark(job, context: context)
46
48
  end
47
- @output.with_context(name: exec.name, executable: exec) do
49
+ @output.with_context(name: context.name, executable: context.executable, gems: context.gems) do
48
50
  @output.report(values: { METRIC => value }, loop_count: job.loop_count)
49
51
  end
50
52
  end
@@ -56,18 +58,18 @@ class BenchmarkDriver::Runner::Memory
56
58
  private
57
59
 
58
60
  # @param [BenchmarkDriver::Runner::Ips::Job] job - loop_count is not nil
59
- # @param [BenchmarkDriver::Config::Executable] exec
61
+ # @param [BenchmarkDriver::Context] context
60
62
  # @return [BenchmarkDriver::Metrics]
61
- def run_benchmark(job, exec:)
63
+ def run_benchmark(job, context:)
62
64
  benchmark = BenchmarkScript.new(
63
- prelude: job.prelude,
65
+ prelude: "#{context.prelude}\n#{job.prelude}",
64
66
  script: job.script,
65
67
  teardown: job.teardown,
66
68
  loop_count: job.loop_count,
67
69
  )
68
70
 
69
71
  output = with_script(benchmark.render) do |path|
70
- execute('/usr/bin/time', *exec.command, path)
72
+ execute('/usr/bin/time', *context.executable.command, path)
71
73
  end
72
74
 
73
75
  match_data = /^(?<user>\d+.\d+)user\s+(?<system>\d+.\d+)system\s+(?<elapsed1>\d+):(?<elapsed2>\d+.\d+)elapsed.+\([^\s]+\s+(?<maxresident>\d+)maxresident\)k$/.match(output)
@@ -16,9 +16,11 @@ class BenchmarkDriver::Runner::Once
16
16
 
17
17
  # @param [BenchmarkDriver::Config::RunnerConfig] config
18
18
  # @param [BenchmarkDriver::Output] output
19
- def initialize(config:, output:)
19
+ # @param [BenchmarkDriver::Context] contexts
20
+ def initialize(config:, output:, contexts:)
20
21
  @config = config
21
22
  @output = output
23
+ @contexts = contexts
22
24
  end
23
25
 
24
26
  # This method is dynamically called by `BenchmarkDriver::JobRunner.run`
@@ -31,9 +33,9 @@ class BenchmarkDriver::Runner::Once
31
33
  @output.with_benchmark do
32
34
  jobs.each do |job|
33
35
  @output.with_job(name: job.name) do
34
- job.runnable_execs(@config.executables).each do |exec|
35
- duration = run_benchmark(job, exec: exec) # no repeat support
36
- @output.with_context(name: exec.name, executable: exec) do
36
+ job.runnable_contexts(@contexts).each do |context|
37
+ duration = run_benchmark(job, context: context) # no repeat support
38
+ @output.with_context(name: context.name, executable: context.executable, gems: context.gems) do
37
39
  @output.report(values: { METRIC => 1.0 / duration }, duration: duration, loop_count: 1)
38
40
  end
39
41
  end
@@ -45,11 +47,11 @@ class BenchmarkDriver::Runner::Once
45
47
  private
46
48
 
47
49
  # @param [BenchmarkDriver::Runner::Ips::Job] job - loop_count is not nil
48
- # @param [BenchmarkDriver::Config::Executable] exec
50
+ # @param [BenchmarkDriver::Context] context
49
51
  # @return [Float] duration
50
- def run_benchmark(job, exec:)
52
+ def run_benchmark(job, context:)
51
53
  benchmark = BenchmarkScript.new(
52
- prelude: job.prelude,
54
+ prelude: "#{context.prelude}\n#{job.prelude}",
53
55
  script: job.script,
54
56
  teardown: job.teardown,
55
57
  loop_count: job.loop_count,
@@ -57,7 +59,7 @@ class BenchmarkDriver::Runner::Once
57
59
 
58
60
  Tempfile.open(['benchmark_driver-', '.rb']) do |f|
59
61
  with_script(benchmark.render(result: f.path)) do |path|
60
- execute(*exec.command, path)
62
+ execute(*context.executable.command, path)
61
63
  end
62
64
  Float(f.read)
63
65
  end
@@ -30,9 +30,11 @@ class BenchmarkDriver::Runner::Recorded
30
30
 
31
31
  # @param [BenchmarkDriver::Config::RunnerConfig] config
32
32
  # @param [BenchmarkDriver::Output] output
33
- def initialize(config:, output:)
33
+ # @param [BenchmarkDriver::Context] contexts
34
+ def initialize(config:, output:, contexts:)
34
35
  @config = config
35
36
  @output = output
37
+ @contexts = contexts
36
38
  end
37
39
 
38
40
  # This method is dynamically called by `BenchmarkDriver::JobRunner.run`
@@ -48,7 +50,7 @@ class BenchmarkDriver::Runner::Recorded
48
50
  records.each do |record|
49
51
  @output.with_job(name: record.name) do
50
52
  record.benchmark_results.each do |context, result|
51
- @output.with_context(name: context.name, executable: context.executable) do
53
+ @output.with_context(name: context.name, executable: context.executable, gems: context.gems) do
52
54
  @output.report(
53
55
  values: result.values,
54
56
  duration: result.duration,
@@ -64,9 +64,11 @@ class BenchmarkDriver::Runner::RubyStdout
64
64
 
65
65
  # @param [BenchmarkDriver::Config::RunnerConfig] config
66
66
  # @param [BenchmarkDriver::Output] output
67
- def initialize(config:, output:)
67
+ # @param [BenchmarkDriver::Context] contexts
68
+ def initialize(config:, output:, contexts:)
68
69
  @config = config
69
70
  @output = output
71
+ @contexts = contexts
70
72
  end
71
73
 
72
74
  # This method is dynamically called by `BenchmarkDriver::JobRunner.run`
@@ -77,7 +79,8 @@ class BenchmarkDriver::Runner::RubyStdout
77
79
  @output.with_benchmark do
78
80
  jobs.each do |job|
79
81
  @output.with_job(name: job.name) do
80
- @config.executables.each do |exec|
82
+ @contexts.each do |context|
83
+ exec = context.executable
81
84
  repeat_params = { config: @config, larger_better: metric.larger_better }
82
85
  value, environment = BenchmarkDriver::Repeater.with_repeat(repeat_params) do
83
86
  stdout = with_chdir(job.working_directory) do
@@ -1,3 +1,3 @@
1
1
  module BenchmarkDriver
2
- VERSION = '0.12.0'
2
+ VERSION = '0.13.0'
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: benchmark_driver
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.12.0
4
+ version: 0.13.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Takashi Kokubun
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2018-07-01 00:00:00.000000000 Z
11
+ date: 2018-07-02 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler