benchmark_driver 0.10.16 → 0.11.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (48) hide show
  1. checksums.yaml +5 -5
  2. data/.rspec +1 -0
  3. data/.travis.yml +1 -1
  4. data/CHANGELOG.md +16 -0
  5. data/README.md +25 -9
  6. data/Rakefile +5 -48
  7. data/benchmark-driver/.gitignore +12 -0
  8. data/benchmark-driver/CODE_OF_CONDUCT.md +74 -0
  9. data/benchmark-driver/Gemfile +6 -0
  10. data/benchmark-driver/LICENSE.txt +21 -0
  11. data/benchmark-driver/README.md +8 -0
  12. data/benchmark-driver/Rakefile +1 -0
  13. data/benchmark-driver/benchmark-driver.gemspec +21 -0
  14. data/benchmark-driver/bin/console +14 -0
  15. data/benchmark-driver/bin/setup +8 -0
  16. data/benchmark-driver/lib/benchmark-driver.rb +1 -0
  17. data/benchmark-driver/lib/benchmark/driver.rb +1 -0
  18. data/benchmark_driver.gemspec +3 -1
  19. data/exe/benchmark-driver +3 -3
  20. data/lib/benchmark_driver/config.rb +3 -3
  21. data/lib/benchmark_driver/metric.rb +70 -0
  22. data/lib/benchmark_driver/output.rb +62 -8
  23. data/lib/benchmark_driver/output/compare.rb +68 -52
  24. data/lib/benchmark_driver/output/markdown.rb +21 -16
  25. data/lib/benchmark_driver/output/record.rb +26 -21
  26. data/lib/benchmark_driver/output/simple.rb +21 -16
  27. data/lib/benchmark_driver/runner.rb +5 -3
  28. data/lib/benchmark_driver/runner/command_stdout.rb +19 -19
  29. data/lib/benchmark_driver/runner/ips.rb +30 -29
  30. data/lib/benchmark_driver/runner/memory.rb +15 -16
  31. data/lib/benchmark_driver/runner/once.rb +11 -15
  32. data/lib/benchmark_driver/runner/recorded.rb +28 -21
  33. data/lib/benchmark_driver/runner/ruby_stdout.rb +157 -0
  34. data/lib/benchmark_driver/runner/time.rb +7 -10
  35. data/lib/benchmark_driver/version.rb +1 -1
  36. metadata +46 -16
  37. data/examples/exec_blank.rb +0 -13
  38. data/examples/exec_blank_simple.rb +0 -13
  39. data/examples/yaml/array_duration_time.yml +0 -2
  40. data/examples/yaml/array_loop.yml +0 -3
  41. data/examples/yaml/blank_hash.yml +0 -8
  42. data/examples/yaml/blank_hash_array.yml +0 -10
  43. data/examples/yaml/blank_loop.yml +0 -9
  44. data/examples/yaml/blank_string.yml +0 -6
  45. data/examples/yaml/blank_string_array.yml +0 -8
  46. data/examples/yaml/example_multi.yml +0 -6
  47. data/examples/yaml/example_single.yml +0 -4
  48. data/lib/benchmark_driver/metrics.rb +0 -17
@@ -0,0 +1,70 @@
1
+ require 'benchmark_driver/struct'
2
+
3
+ # All benchmark results should be expressed by this model.
4
+ module BenchmarkDriver
5
+ # BenchmarkDriver returns benchmark results with the following nested Hash structure:
6
+ # {
7
+ # BenchmarkDriver::Job => {
8
+ # BenchmarkDriver::Context => {
9
+ # BenchmarkDriver::Metric => Float
10
+ # }
11
+ # }
12
+ # }
13
+
14
+ # A kind of thing to be measured
15
+ Metric = ::BenchmarkDriver::Struct.new(
16
+ :name, # @param [String] - Metric name or description like "Max Resident Set Size"
17
+ :unit, # @param [String] - A unit like "MiB"
18
+ :larger_better, # @param [TrueClass,FalseClass] - If true, larger value is preferred when measured multiple times.
19
+ :worse_word, # @param [String] - A label shown when the value is worse.
20
+ defaults: { larger_better: true, worse_word: 'slower' },
21
+ )
22
+
23
+ # Benchmark conditions used to measure a metric
24
+ Context = ::BenchmarkDriver::Struct.new(
25
+ :name, # @param [String] - Name of the context
26
+ :executable, # @param [BenchmarkDriver::Config::Executable] - Measured Ruby executable
27
+ :gems, # @param [Hash{ String => String,nil }] - Gem -> version pairs used for the benchmark
28
+ :prelude, # @param [String,nil] - Context specific setup script (optional)
29
+ :duration, # @param [Float,nil] - Time taken to run the benchmark job (optional)
30
+ :loop_count, # @param [Integer,nil] - Times to run the benchmark job (optional)
31
+ :environment, # @param [Hash] - Any other key -> value pairs to express the benchmark context
32
+ defaults: { gems: {}, environment: {} },
33
+ )
34
+
35
+ # Holding identifier of measured workload
36
+ Job = ::BenchmarkDriver::Struct.new(
37
+ :name, # @param [String] - Name of the benchmark task
38
+ )
39
+
40
+ #=[RubyBench mapping]=================================|
41
+ #
42
+ # BenchmarkRun:
43
+ # result -> { context.name => value } | { "default"=>"44.666666666666664", "default_jit"=>"59.333333333333336" }
44
+ # environment -> context | "---\nRuby version: 'ruby 2.6.0dev (2018-05-14 trunk 63417) [x86_64-linux]\n\n'\nChecksum: '59662'\n"
45
+ # initiator -> (not supported) | #<Commit sha1: "6f0de6ed9...", message: "error.c: check redefined ...", url: "https://github.com/tgxworld/ruby/commit/6f0de6ed98...", repo_id: 6>
46
+ #
47
+ # BenchmarkType:
48
+ # category -> job.name | "app_erb", "Optcarrot Lan_Master.nes"
49
+ # script_url -> (not supported) | "https://raw.githubusercontent.com/mame/optcarrot/master/lib/optcarrot/nes.rb"
50
+ # repo -> (not supported) | #<Repo name: "ruby", url: "https://github.com/tgxworld/ruby">
51
+ # repo.organization -> (not supported) | #<Organization name: "ruby", url: "https://github.com/tgxworld/">
52
+ #
53
+ # BenchmarkResultType:
54
+ # name -> metric.name | "Number of frames"
55
+ # unit -> metric.unit | "fps"
56
+ #
57
+ #=====================================================|
58
+
59
+ #----
60
+ # legacy
61
+
62
+ module Metrics
63
+ Type = ::BenchmarkDriver::Struct.new(
64
+ :unit, # @param [String] - A label of unit for the value.
65
+ :larger_better, # @param [TrueClass,FalseClass] - If true, larger value is preferred when measured multiple times.
66
+ :worse_word, # @param [String] - A label shown when the value is worse.
67
+ defaults: { larger_better: true, worse_word: 'slower' },
68
+ )
69
+ end
70
+ end
@@ -1,29 +1,83 @@
1
1
  module BenchmarkDriver
2
- module Output
2
+ # BenchmarkDriver::Runner::* --> BenchmarkDriver::Output --> BenchmarkDriver::Output::*
3
+ #
4
+ # This is interface between runner plugin and output plugin, so that they can be loosely
5
+ # coupled and to simplify implementation of both runner and output.
6
+ #
7
+ # Runner should call its interface in the following manner:
8
+ # metrics=
9
+ # with_warmup
10
+ # with_job(name:)
11
+ # with_context(name:, executable:, duration: nil, loop_count: nil)
12
+ # report(value:, metric:)
13
+ # with_benchmark
14
+ # with_job(name:)
15
+ # with_context(name:, executable:, duration: nil, loop_count: nil)
16
+ # report(value:, metric:)
17
+ class Output
3
18
  require 'benchmark_driver/output/compare'
4
19
  require 'benchmark_driver/output/markdown'
5
20
  require 'benchmark_driver/output/record'
6
21
  require 'benchmark_driver/output/simple'
7
- end
8
22
 
9
- class << Output
10
23
  # BenchmarkDriver::Output is pluggable.
11
24
  # Create `BenchmarkDriver::Output::Foo` as benchmark_dirver-output-foo.gem and specify `-o foo`.
12
25
  #
13
26
  # @param [String] type
14
- def find(type)
27
+ # @param [Array<String>] job_names
28
+ # @param [Array<String>] context_names
29
+ def initialize(type:, job_names:, context_names:)
15
30
  if type.include?(':')
16
31
  raise ArgumentError.new("Output type '#{type}' cannot contain ':'")
17
32
  end
18
33
 
19
34
  require "benchmark_driver/output/#{type}" # for plugin
20
- ::BenchmarkDriver::Output.const_get(camelize(type), false)
35
+ camelized = type.split('_').map(&:capitalize).join
36
+
37
+ @output = ::BenchmarkDriver::Output.const_get(camelized, false).new(
38
+ job_names: job_names,
39
+ context_names: context_names,
40
+ )
41
+ end
42
+
43
+ # @param [Array<BenchmarkDriver::Metric>] metrics
44
+ def metrics=(metrics)
45
+ @output.metrics = metrics
21
46
  end
22
47
 
23
- private
48
+ def with_warmup(&block)
49
+ @output.with_warmup(&block)
50
+ end
51
+
52
+ def with_benchmark(&block)
53
+ @output.with_benchmark(&block)
54
+ end
55
+
56
+ # @param [String] name
57
+ def with_job(name:, &block)
58
+ job = BenchmarkDriver::Job.new(name: name)
59
+ @output.with_job(job) do
60
+ block.call
61
+ end
62
+ end
63
+
64
+ # @param [String] name
65
+ # @param [BenchmarkDriver::Config::Executable] executable
66
+ # @param [Float] duration
67
+ # @param [Integer] loop_count
68
+ def with_context(name:, executable:, duration: nil, loop_count: nil, environment: {}, &block)
69
+ context = BenchmarkDriver::Context.new(
70
+ name: name, executable: executable, duration: duration, loop_count: loop_count, environment: environment,
71
+ )
72
+ @output.with_context(context) do
73
+ block.call
74
+ end
75
+ end
24
76
 
25
- def camelize(str)
26
- str.split('_').map(&:capitalize).join
77
+ # @param [Float] value
78
+ # @param [BenchmarkDriver::Metric] metic
79
+ def report(value:, metric:)
80
+ @output.report(value: value, metric: metric)
27
81
  end
28
82
  end
29
83
  end
@@ -2,14 +2,15 @@
2
2
  class BenchmarkDriver::Output::Compare
3
3
  NAME_LENGTH = 20
4
4
 
5
- # @param [BenchmarkDriver::Metrics::Type] metrics_type
6
- attr_writer :metrics_type
7
-
8
- # @param [Array<BenchmarkDriver::*::Job>] jobs
9
- # @param [Array<BenchmarkDriver::Config::Executable>] executables
10
- def initialize(jobs:, executables:)
11
- @jobs = jobs
12
- @executables = executables
5
+ # @param [Array<BenchmarkDriver::Metric>] metrics
6
+ attr_writer :metrics
7
+
8
+ # @param [Array<String>] job_names
9
+ # @param [Array<String>] context_names
10
+ def initialize(job_names:, context_names:)
11
+ @job_names = job_names
12
+ @context_names = context_names
13
+ @name_length = [job_names.map(&:length).max, NAME_LENGTH].max
13
14
  end
14
15
 
15
16
  def with_warmup(&block)
@@ -21,14 +22,16 @@ class BenchmarkDriver::Output::Compare
21
22
  end
22
23
 
23
24
  def with_benchmark(&block)
24
- @metrics_by_job = Hash.new { |h, k| h[k] = [] }
25
+ @job_context_values = Hash.new do |h1, k1|
26
+ h1[k1] = Hash.new { |h2, k2| h2[k2] = [] }
27
+ end
25
28
 
26
29
  without_stdout_buffering do
27
30
  $stdout.puts 'Calculating -------------------------------------'
28
- if @executables.size > 1
29
- $stdout.print(' ' * NAME_LENGTH)
30
- @executables.each do |executable|
31
- $stdout.print(' %10s ' % executable.name)
31
+ if @context_names.size > 1
32
+ $stdout.print(' ' * @name_length)
33
+ @context_names.each do |context_name|
34
+ $stdout.print(' %10s ' % context_name)
32
35
  end
33
36
  $stdout.puts
34
37
  end
@@ -36,28 +39,30 @@ class BenchmarkDriver::Output::Compare
36
39
  block.call
37
40
  end
38
41
  ensure
39
- if @executables.size > 1
42
+ if @context_names.size > 1
40
43
  compare_executables
41
- elsif @jobs.size > 1
44
+ elsif @job_names.size > 1
42
45
  compare_jobs
43
46
  end
44
47
  end
45
48
 
46
- # @param [BenchmarkDriver::*::Job] job
49
+ # @param [BenchmarkDriver::Job] job
47
50
  def with_job(job, &block)
48
- if job.name.length > NAME_LENGTH
49
- $stdout.puts(job.name)
51
+ name = job.name
52
+ if name.length > @name_length
53
+ $stdout.puts(name)
50
54
  else
51
- $stdout.print("%#{NAME_LENGTH}s" % job.name)
55
+ $stdout.print("%#{@name_length}s" % name)
52
56
  end
53
- @current_job = job
54
- @job_metrics = []
57
+ @job = name
58
+ @job_contexts = []
55
59
  block.call
56
60
  ensure
57
- $stdout.print(@metrics_type.unit)
58
- if job.respond_to?(:loop_count) && job.loop_count
59
- $stdout.print(" - #{humanize(job.loop_count)} times")
60
- if @job_metrics.all? { |metrics| metrics.duration }
61
+ $stdout.print(@metrics.first.unit)
62
+ loop_count = @job_contexts.first.loop_count
63
+ if loop_count && @job_contexts.all? { |c| c.loop_count == loop_count }
64
+ $stdout.print(" - #{humanize(loop_count)} times")
65
+ if @job_contexts.all? { |context| !context.duration.nil? }
61
66
  $stdout.print(" in")
62
67
  show_durations
63
68
  end
@@ -65,28 +70,35 @@ class BenchmarkDriver::Output::Compare
65
70
  $stdout.puts
66
71
  end
67
72
 
68
- # @param [BenchmarkDriver::Metrics] metrics
69
- def report(metrics)
70
- if defined?(@metrics_by_job)
71
- @metrics_by_job[@current_job] << metrics
73
+ # @param [BenchmarkDriver::Context] context
74
+ def with_context(context, &block)
75
+ @context = context
76
+ @job_contexts << context
77
+ block.call
78
+ end
79
+
80
+ # @param [Float] value
81
+ # @param [BenchmarkDriver::Metric] metic
82
+ def report(value:, metric:)
83
+ if defined?(@job_context_values)
84
+ @job_context_values[@job][@context] << value
72
85
  end
73
86
 
74
- @job_metrics << metrics
75
- $stdout.print("#{humanize(metrics.value, [10, metrics.executable.name.length].max)} ")
87
+ $stdout.print("#{humanize(value, [10, @context.name.length].max)} ")
76
88
  end
77
89
 
78
90
  private
79
91
 
80
92
  def show_durations
81
- @job_metrics.each do |metrics|
82
- $stdout.print(' %3.6fs' % metrics.duration)
93
+ @job_contexts.each do |context|
94
+ $stdout.print(' %3.6fs' % context.duration)
83
95
  end
84
96
 
85
97
  # Show pretty seconds / clocks too. As it takes long width, it's shown only with a single executable.
86
- if @job_metrics.size == 1
87
- metrics = @job_metrics.first
88
- sec = metrics.duration
89
- iter = @current_job.loop_count
98
+ if @job_contexts.size == 1
99
+ context = @job_contexts.first
100
+ sec = context.duration
101
+ iter = context.loop_count
90
102
  if File.exist?('/proc/cpuinfo') && (clks = estimate_clock(sec, iter)) < 1_000
91
103
  $stdout.print(" (#{pretty_sec(sec, iter)}/i, #{clks}clocks/i)")
92
104
  else
@@ -148,16 +160,20 @@ class BenchmarkDriver::Output::Compare
148
160
 
149
161
  def compare_jobs
150
162
  $stdout.puts "\nComparison:"
151
- results = @metrics_by_job.map { |job, metrics| Result.new(job: job, metrics: metrics.first) }
163
+ results = @job_context_values.flat_map do |job, context_values|
164
+ context_values.map { |context, values| Result.new(job: job, value: values.first, executable: context.executable) }
165
+ end
152
166
  show_results(results, show_executable: false)
153
167
  end
154
168
 
155
169
  def compare_executables
156
170
  $stdout.puts "\nComparison:"
157
171
 
158
- @metrics_by_job.each do |job, metrics|
159
- $stdout.puts("%#{NAME_LENGTH + 2 + 11}s" % job.name)
160
- results = metrics.map { |metrics| Result.new(job: job, metrics: metrics) }
172
+ @job_context_values.each do |job, context_values|
173
+ $stdout.puts("%#{@name_length + 2 + 11}s" % job)
174
+ results = context_values.flat_map do |context, values|
175
+ values.map { |value| Result.new(job: job, value: value, executable: context.executable) }
176
+ end
161
177
  show_results(results, show_executable: true)
162
178
  end
163
179
  end
@@ -166,32 +182,32 @@ class BenchmarkDriver::Output::Compare
166
182
  # @param [TrueClass,FalseClass] show_executable
167
183
  def show_results(results, show_executable:)
168
184
  results = results.sort_by do |result|
169
- if @metrics_type.larger_better
170
- -result.metrics.value
185
+ if @metrics.first.larger_better
186
+ -result.value
171
187
  else
172
- result.metrics.value
188
+ result.value
173
189
  end
174
190
  end
175
191
 
176
192
  first = results.first
177
193
  results.each do |result|
178
194
  if result != first
179
- if @metrics_type.larger_better
180
- ratio = (first.metrics.value / result.metrics.value)
195
+ if @metrics.first.larger_better
196
+ ratio = (first.value / result.value)
181
197
  else
182
- ratio = (result.metrics.value / first.metrics.value)
198
+ ratio = (result.value / first.value)
183
199
  end
184
- slower = "- %.2fx #{@metrics_type.worse_word}" % ratio
200
+ slower = "- %.2fx #{@metrics.first.worse_word}" % ratio
185
201
  end
186
202
  if show_executable
187
- name = result.metrics.executable.name
203
+ name = result.executable.name
188
204
  else
189
- name = result.job.name
205
+ name = result.job
190
206
  end
191
- $stdout.puts("%#{NAME_LENGTH}s: %11.1f %s #{slower}" % [name, result.metrics.value, @metrics_type.unit])
207
+ $stdout.puts("%#{@name_length}s: %11.1f %s #{slower}" % [name, result.value, @metrics.first.unit])
192
208
  end
193
209
  $stdout.puts
194
210
  end
195
211
 
196
- Result = ::BenchmarkDriver::Struct.new(:job, :metrics)
212
+ Result = ::BenchmarkDriver::Struct.new(:job, :value, :executable)
197
213
  end
@@ -1,15 +1,14 @@
1
1
  class BenchmarkDriver::Output::Markdown
2
2
  NAME_LENGTH = 8
3
3
 
4
- # @param [BenchmarkDriver::Metrics::Type] metrics_type
5
- attr_writer :metrics_type
4
+ # @param [Array<BenchmarkDriver::Metric>] metrics
5
+ attr_writer :metrics
6
6
 
7
- # @param [Array<BenchmarkDriver::*::Job>] jobs
8
- # @param [Array<BenchmarkDriver::Config::Executable>] executables
9
- def initialize(jobs:, executables:)
10
- @jobs = jobs
11
- @executables = executables
12
- @name_length = jobs.map { |j| j.name.size }.max
7
+ # @param [Array<String>] job_names
8
+ # @param [Array<String>] context_names
9
+ def initialize(job_names:, context_names:)
10
+ @context_names = context_names
11
+ @name_length = job_names.map(&:size).max
13
12
  end
14
13
 
15
14
  def with_warmup(&block)
@@ -25,18 +24,18 @@ class BenchmarkDriver::Output::Markdown
25
24
  @with_benchmark = true
26
25
  without_stdout_buffering do
27
26
  # Show header
28
- $stdout.puts "# benchmark results (#{@metrics_type.unit})\n\n"
27
+ $stdout.puts "# #{@metrics.first.name} (#{@metrics.first.unit})\n\n"
29
28
 
30
29
  # Show executable names
31
30
  $stdout.print("|#{' ' * @name_length} ")
32
- @executables.each do |executable|
33
- $stdout.print("|%#{NAME_LENGTH}s" % executable.name) # same size as humanize
31
+ @context_names.each do |context_name|
32
+ $stdout.print("|%#{NAME_LENGTH}s" % context_name) # same size as humanize
34
33
  end
35
34
  $stdout.puts('|')
36
35
 
37
36
  # Show header separator
38
37
  $stdout.print("|:#{'-' * (@name_length - 1)}--")
39
- @executables.each do |executable|
38
+ @context_names.each do |context_name|
40
39
  $stdout.print("|:#{'-' * (NAME_LENGTH - 1)}") # same size as humanize
41
40
  end
42
41
  $stdout.puts('|')
@@ -47,7 +46,7 @@ class BenchmarkDriver::Output::Markdown
47
46
  @with_benchmark = false
48
47
  end
49
48
 
50
- # @param [BenchmarkDriver::*::Job] job
49
+ # @param [BenchmarkDriver::Job] job
51
50
  def with_job(job, &block)
52
51
  if @with_benchmark
53
52
  $stdout.print("|%-#{@name_length}s " % job.name)
@@ -59,10 +58,16 @@ class BenchmarkDriver::Output::Markdown
59
58
  end
60
59
  end
61
60
 
62
- # @param [BenchmarkDriver::Metrics] metrics
63
- def report(metrics)
61
+ # @param [BenchmarkDriver::Context] context
62
+ def with_context(context, &block)
63
+ block.call
64
+ end
65
+
66
+ # @param [Floa] value
67
+ # @param [BenchmarkDriver::Metric] metic
68
+ def report(value:, metric:)
64
69
  if @with_benchmark
65
- $stdout.print("|%#{NAME_LENGTH}s" % humanize(metrics.value))
70
+ $stdout.print("|%#{NAME_LENGTH}s" % humanize(value))
66
71
  else
67
72
  $stdout.print '.'
68
73
  end
@@ -1,14 +1,16 @@
1
1
  class BenchmarkDriver::Output::Record
2
- # @param [BenchmarkDriver::Metrics::Type] metrics_type
3
- attr_writer :metrics_type
4
-
5
- # @param [Array<BenchmarkDriver::*::Job>] jobs
6
- # @param [Array<BenchmarkDriver::Config::Executable>] executables
7
- def initialize(jobs:, executables:)
8
- @jobs = jobs
9
- @executables = executables
10
- @metrics_by_job = Hash.new do |h1, k1|
11
- h1[k1] = Hash.new { |h2, k2| h2[k2] = [] }
2
+ # @param [Array<BenchmarkDriver::Metric>] metrics
3
+ attr_writer :metrics
4
+
5
+ # @param [Array<String>] job_names
6
+ # @param [Array<String>] context_names
7
+ def initialize(job_names:, context_names:)
8
+ @job_warmup_context_metric_value = Hash.new do |h1, k1|
9
+ h1[k1] = Hash.new do |h2, k2|
10
+ h2[k2] = Hash.new do |h3, k3|
11
+ h3[k3] = {}
12
+ end
13
+ end
12
14
  end
13
15
  end
14
16
 
@@ -29,20 +31,23 @@ class BenchmarkDriver::Output::Record
29
31
  save_record
30
32
  end
31
33
 
32
- # @param [BenchmarkDriver::*::Job] job
34
+ # @param [BenchmarkDriver::Job] job
33
35
  def with_job(job, &block)
34
- @current_job = job
36
+ @job = job.name
35
37
  block.call
36
38
  end
37
39
 
38
- # @param [BenchmarkDriver::Metrics] metrics
39
- def report(metrics)
40
+ # @param [BenchmarkDriver::Context] context
41
+ def with_context(context, &block)
42
+ @context = context
43
+ block.call
44
+ end
45
+
46
+ # @param [Float] value
47
+ # @param [BenchmarkDriver::Metric] metic
48
+ def report(value:, metric:)
40
49
  $stdout.print '.'
41
- if @with_benchmark
42
- @metrics_by_job[@current_job][:benchmark] << metrics
43
- else
44
- @metrics_by_job[@current_job][:warmup] << metrics
45
- end
50
+ @job_warmup_context_metric_value[@job][!@with_benchmark][@context][metric] = value
46
51
  end
47
52
 
48
53
  private
@@ -51,8 +56,8 @@ class BenchmarkDriver::Output::Record
51
56
  jobs = @benchmark_metrics
52
57
  yaml = {
53
58
  'type' => 'recorded',
54
- 'metrics_by_job' => @metrics_by_job,
55
- 'metrics_type' => @metrics_type,
59
+ 'job_warmup_context_metric_value' => @job_warmup_context_metric_value,
60
+ 'metrics' => @metrics,
56
61
  }.to_yaml
57
62
  File.write('benchmark_driver.record.yml', yaml)
58
63
  end