benchmark_driver 0.10.16 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -5
- data/.rspec +1 -0
- data/.travis.yml +1 -1
- data/CHANGELOG.md +16 -0
- data/README.md +25 -9
- data/Rakefile +5 -48
- data/benchmark-driver/.gitignore +12 -0
- data/benchmark-driver/CODE_OF_CONDUCT.md +74 -0
- data/benchmark-driver/Gemfile +6 -0
- data/benchmark-driver/LICENSE.txt +21 -0
- data/benchmark-driver/README.md +8 -0
- data/benchmark-driver/Rakefile +1 -0
- data/benchmark-driver/benchmark-driver.gemspec +21 -0
- data/benchmark-driver/bin/console +14 -0
- data/benchmark-driver/bin/setup +8 -0
- data/benchmark-driver/lib/benchmark-driver.rb +1 -0
- data/benchmark-driver/lib/benchmark/driver.rb +1 -0
- data/benchmark_driver.gemspec +3 -1
- data/exe/benchmark-driver +3 -3
- data/lib/benchmark_driver/config.rb +3 -3
- data/lib/benchmark_driver/metric.rb +70 -0
- data/lib/benchmark_driver/output.rb +62 -8
- data/lib/benchmark_driver/output/compare.rb +68 -52
- data/lib/benchmark_driver/output/markdown.rb +21 -16
- data/lib/benchmark_driver/output/record.rb +26 -21
- data/lib/benchmark_driver/output/simple.rb +21 -16
- data/lib/benchmark_driver/runner.rb +5 -3
- data/lib/benchmark_driver/runner/command_stdout.rb +19 -19
- data/lib/benchmark_driver/runner/ips.rb +30 -29
- data/lib/benchmark_driver/runner/memory.rb +15 -16
- data/lib/benchmark_driver/runner/once.rb +11 -15
- data/lib/benchmark_driver/runner/recorded.rb +28 -21
- data/lib/benchmark_driver/runner/ruby_stdout.rb +157 -0
- data/lib/benchmark_driver/runner/time.rb +7 -10
- data/lib/benchmark_driver/version.rb +1 -1
- metadata +46 -16
- data/examples/exec_blank.rb +0 -13
- data/examples/exec_blank_simple.rb +0 -13
- data/examples/yaml/array_duration_time.yml +0 -2
- data/examples/yaml/array_loop.yml +0 -3
- data/examples/yaml/blank_hash.yml +0 -8
- data/examples/yaml/blank_hash_array.yml +0 -10
- data/examples/yaml/blank_loop.yml +0 -9
- data/examples/yaml/blank_string.yml +0 -6
- data/examples/yaml/blank_string_array.yml +0 -8
- data/examples/yaml/example_multi.yml +0 -6
- data/examples/yaml/example_single.yml +0 -4
- data/lib/benchmark_driver/metrics.rb +0 -17
@@ -1,15 +1,14 @@
|
|
1
1
|
class BenchmarkDriver::Output::Simple
|
2
2
|
NAME_LENGTH = 8
|
3
3
|
|
4
|
-
# @param [BenchmarkDriver::
|
5
|
-
attr_writer :
|
4
|
+
# @param [Array<BenchmarkDriver::Metric>] metrics
|
5
|
+
attr_writer :metrics
|
6
6
|
|
7
|
-
# @param [Array<
|
8
|
-
# @param [Array<
|
9
|
-
def initialize(
|
10
|
-
@
|
11
|
-
@
|
12
|
-
@name_length = jobs.map { |j| j.name.size }.max
|
7
|
+
# @param [Array<String>] job_names
|
8
|
+
# @param [Array<String>] context_names
|
9
|
+
def initialize(job_names:, context_names:)
|
10
|
+
@context_names = context_names
|
11
|
+
@name_length = job_names.map(&:size).max
|
13
12
|
end
|
14
13
|
|
15
14
|
def with_warmup(&block)
|
@@ -25,13 +24,13 @@ class BenchmarkDriver::Output::Simple
|
|
25
24
|
@with_benchmark = true
|
26
25
|
without_stdout_buffering do
|
27
26
|
# Show header
|
28
|
-
$stdout.puts "
|
27
|
+
$stdout.puts "#{@metrics.first.name} (#{@metrics.first.unit}):"
|
29
28
|
|
30
29
|
# Show executable names
|
31
|
-
if @
|
30
|
+
if @context_names.size > 1
|
32
31
|
$stdout.print("#{' ' * @name_length} ")
|
33
|
-
@
|
34
|
-
$stdout.print("%#{NAME_LENGTH}s " %
|
32
|
+
@context_name.each do |context_name|
|
33
|
+
$stdout.print("%#{NAME_LENGTH}s " % context_name)
|
35
34
|
end
|
36
35
|
$stdout.puts
|
37
36
|
end
|
@@ -42,7 +41,7 @@ class BenchmarkDriver::Output::Simple
|
|
42
41
|
@with_benchmark = false
|
43
42
|
end
|
44
43
|
|
45
|
-
# @param [BenchmarkDriver
|
44
|
+
# @param [BenchmarkDriver::Job] job
|
46
45
|
def with_job(job, &block)
|
47
46
|
if @with_benchmark
|
48
47
|
$stdout.print("%-#{@name_length}s " % job.name)
|
@@ -54,10 +53,16 @@ class BenchmarkDriver::Output::Simple
|
|
54
53
|
end
|
55
54
|
end
|
56
55
|
|
57
|
-
# @param [BenchmarkDriver::
|
58
|
-
def
|
56
|
+
# @param [BenchmarkDriver::Context] context
|
57
|
+
def with_context(context, &block)
|
58
|
+
block.call
|
59
|
+
end
|
60
|
+
|
61
|
+
# @param [Float] value
|
62
|
+
# @param [BenchmarkDriver::Metric] metic
|
63
|
+
def report(value:, metric:)
|
59
64
|
if @with_benchmark
|
60
|
-
$stdout.print("%#{NAME_LENGTH}s " % humanize(
|
65
|
+
$stdout.print("%#{NAME_LENGTH}s " % humanize(value))
|
61
66
|
else
|
62
67
|
$stdout.print '.'
|
63
68
|
end
|
@@ -5,6 +5,7 @@ module BenchmarkDriver
|
|
5
5
|
require 'benchmark_driver/runner/memory'
|
6
6
|
require 'benchmark_driver/runner/once'
|
7
7
|
require 'benchmark_driver/runner/recorded'
|
8
|
+
require 'benchmark_driver/runner/ruby_stdout'
|
8
9
|
require 'benchmark_driver/runner/time'
|
9
10
|
end
|
10
11
|
|
@@ -28,9 +29,10 @@ module BenchmarkDriver
|
|
28
29
|
|
29
30
|
jobs.group_by(&:class).each do |klass, jobs_group|
|
30
31
|
runner = runner_for(klass)
|
31
|
-
output = Output.
|
32
|
-
|
33
|
-
|
32
|
+
output = Output.new(
|
33
|
+
type: config.output_type,
|
34
|
+
job_names: jobs.map(&:name),
|
35
|
+
context_names: config.executables.map(&:name),
|
34
36
|
)
|
35
37
|
with_clean_env do
|
36
38
|
runner.new(config: runner_config, output: output).run(jobs)
|
@@ -1,17 +1,17 @@
|
|
1
1
|
require 'benchmark_driver/struct'
|
2
|
-
require 'benchmark_driver/
|
2
|
+
require 'benchmark_driver/metric'
|
3
3
|
require 'tempfile'
|
4
4
|
require 'shellwords'
|
5
5
|
require 'open3'
|
6
6
|
|
7
|
-
#
|
7
|
+
# Use stdout of ruby command
|
8
8
|
class BenchmarkDriver::Runner::CommandStdout
|
9
9
|
# JobParser returns this, `BenchmarkDriver::Runner.runner_for` searches "*::Job"
|
10
10
|
Job = ::BenchmarkDriver::Struct.new(
|
11
11
|
:name, # @param [String] name - This is mandatory for all runner
|
12
12
|
:command, # @param [Array<String>]
|
13
13
|
:working_directory, # @param [String,NilClass]
|
14
|
-
:
|
14
|
+
:metrics, # @param [Array<BenchmarkDriver::Metric>]
|
15
15
|
:stdout_to_metrics, # @param [String]
|
16
16
|
)
|
17
17
|
# Dynamically fetched and used by `BenchmarkDriver::JobParser.parse`
|
@@ -26,24 +26,27 @@ class BenchmarkDriver::Runner::CommandStdout
|
|
26
26
|
name: name,
|
27
27
|
command: command.shellsplit,
|
28
28
|
working_directory: working_directory,
|
29
|
-
|
29
|
+
metrics: parse_metrics(metrics_type),
|
30
30
|
stdout_to_metrics: stdout_to_metrics,
|
31
31
|
)
|
32
32
|
end
|
33
33
|
|
34
34
|
private
|
35
35
|
|
36
|
-
def
|
37
|
-
|
36
|
+
def parse_metrics(unit:, name: nil, larger_better: nil, worse_word: nil)
|
37
|
+
name ||= unit
|
38
|
+
metric = BenchmarkDriver::Metric.new(
|
39
|
+
name: name,
|
38
40
|
unit: unit,
|
39
41
|
larger_better: larger_better,
|
40
42
|
worse_word: worse_word,
|
41
43
|
)
|
44
|
+
[metric]
|
42
45
|
end
|
43
46
|
end
|
44
47
|
|
45
48
|
# @param [BenchmarkDriver::Config::RunnerConfig] config
|
46
|
-
# @param [BenchmarkDriver::Output
|
49
|
+
# @param [BenchmarkDriver::Output] output
|
47
50
|
def initialize(config:, output:)
|
48
51
|
@config = config
|
49
52
|
@output = output
|
@@ -52,14 +55,14 @@ class BenchmarkDriver::Runner::CommandStdout
|
|
52
55
|
# This method is dynamically called by `BenchmarkDriver::JobRunner.run`
|
53
56
|
# @param [Array<BenchmarkDriver::Default::Job>] jobs
|
54
57
|
def run(jobs)
|
55
|
-
|
56
|
-
@output.
|
58
|
+
metric = jobs.first.metrics.first
|
59
|
+
@output.metrics = [metric]
|
57
60
|
|
58
61
|
@output.with_benchmark do
|
59
62
|
jobs.each do |job|
|
60
|
-
@output.with_job(job) do
|
63
|
+
@output.with_job(name: job.name) do
|
61
64
|
@config.executables.each do |exec|
|
62
|
-
best_value = with_repeat(
|
65
|
+
best_value = with_repeat(metric) do
|
63
66
|
stdout = with_chdir(job.working_directory) do
|
64
67
|
with_ruby_prefix(exec) { execute(*exec.command, *job.command) }
|
65
68
|
end
|
@@ -69,12 +72,9 @@ class BenchmarkDriver::Runner::CommandStdout
|
|
69
72
|
).metrics_value
|
70
73
|
end
|
71
74
|
|
72
|
-
@output.
|
73
|
-
|
74
|
-
|
75
|
-
executable: exec,
|
76
|
-
)
|
77
|
-
)
|
75
|
+
@output.with_context(name: exec.name, executable: exec) do
|
76
|
+
@output.report(value: best_value, metric: metric)
|
77
|
+
end
|
78
78
|
end
|
79
79
|
end
|
80
80
|
end
|
@@ -108,12 +108,12 @@ class BenchmarkDriver::Runner::CommandStdout
|
|
108
108
|
end
|
109
109
|
|
110
110
|
# Return multiple times and return the best metrics
|
111
|
-
def with_repeat(
|
111
|
+
def with_repeat(metric, &block)
|
112
112
|
values = @config.repeat_count.times.map do
|
113
113
|
block.call
|
114
114
|
end
|
115
115
|
values.sort_by do |value|
|
116
|
-
if
|
116
|
+
if metric.larger_better
|
117
117
|
value
|
118
118
|
else
|
119
119
|
-value
|
@@ -1,5 +1,5 @@
|
|
1
1
|
require 'benchmark_driver/struct'
|
2
|
-
require 'benchmark_driver/
|
2
|
+
require 'benchmark_driver/metric'
|
3
3
|
require 'benchmark_driver/default_job'
|
4
4
|
require 'benchmark_driver/default_job_parser'
|
5
5
|
require 'tempfile'
|
@@ -12,10 +12,10 @@ class BenchmarkDriver::Runner::Ips
|
|
12
12
|
# Dynamically fetched and used by `BenchmarkDriver::JobParser.parse`
|
13
13
|
JobParser = BenchmarkDriver::DefaultJobParser.for(Job)
|
14
14
|
|
15
|
-
|
15
|
+
METRIC = BenchmarkDriver::Metric.new(name: 'Iteration per second', unit: 'i/s')
|
16
16
|
|
17
17
|
# @param [BenchmarkDriver::Config::RunnerConfig] config
|
18
|
-
# @param [BenchmarkDriver::Output
|
18
|
+
# @param [BenchmarkDriver::Output] output
|
19
19
|
def initialize(config:, output:)
|
20
20
|
@config = config
|
21
21
|
@output = output
|
@@ -24,19 +24,23 @@ class BenchmarkDriver::Runner::Ips
|
|
24
24
|
# This method is dynamically called by `BenchmarkDriver::JobRunner.run`
|
25
25
|
# @param [Array<BenchmarkDriver::Default::Job>] jobs
|
26
26
|
def run(jobs)
|
27
|
-
|
27
|
+
@output.metrics = [metric]
|
28
28
|
|
29
29
|
if jobs.any? { |job| job.loop_count.nil? }
|
30
30
|
@output.with_warmup do
|
31
31
|
jobs = jobs.map do |job|
|
32
32
|
next job if job.loop_count # skip warmup if loop_count is set
|
33
33
|
|
34
|
-
@output.with_job(job) do
|
35
|
-
|
36
|
-
|
37
|
-
|
34
|
+
@output.with_job(name: job.name) do
|
35
|
+
executable = job.runnable_execs(@config.executables).first
|
36
|
+
duration, loop_count = run_warmup(job, exec: executable)
|
37
|
+
value, duration = value_duration(duration: duration, loop_count: loop_count)
|
38
38
|
|
39
|
-
|
39
|
+
@output.with_context(name: executable.name, executable: executable, duration: duration, loop_count: loop_count) do
|
40
|
+
@output.report(value: value, metric: metric)
|
41
|
+
end
|
42
|
+
|
43
|
+
loop_count = (loop_count.to_f * @config.run_duration / duration).floor
|
40
44
|
Job.new(job.to_h.merge(loop_count: loop_count))
|
41
45
|
end
|
42
46
|
end
|
@@ -45,12 +49,14 @@ class BenchmarkDriver::Runner::Ips
|
|
45
49
|
|
46
50
|
@output.with_benchmark do
|
47
51
|
jobs.each do |job|
|
48
|
-
@output.with_job(job) do
|
52
|
+
@output.with_job(name: job.name) do
|
49
53
|
job.runnable_execs(@config.executables).each do |exec|
|
50
|
-
|
54
|
+
value, duration = with_repeat(@config.repeat_count) do
|
51
55
|
run_benchmark(job, exec: exec)
|
52
56
|
end
|
53
|
-
@output.
|
57
|
+
@output.with_context(name: exec.name, executable: exec, duration: duration) do
|
58
|
+
@output.report(value: value, metric: metric)
|
59
|
+
end
|
54
60
|
end
|
55
61
|
end
|
56
62
|
end
|
@@ -71,23 +77,23 @@ class BenchmarkDriver::Runner::Ips
|
|
71
77
|
second_warmup_duration: @config.run_duration / 3.0, # default: 1.0
|
72
78
|
)
|
73
79
|
|
74
|
-
|
80
|
+
duration, loop_count = Tempfile.open(['benchmark_driver-', '.rb']) do |f|
|
75
81
|
with_script(warmup.render(result: f.path)) do |path|
|
76
82
|
execute(*exec.command, path)
|
77
83
|
end
|
78
84
|
eval(f.read)
|
79
85
|
end
|
80
86
|
|
81
|
-
|
87
|
+
[duration, loop_count]
|
82
88
|
end
|
83
89
|
|
84
90
|
# Return multiple times and return the best metrics
|
85
91
|
def with_repeat(repeat_times, &block)
|
86
|
-
|
92
|
+
value_durations = repeat_times.times.map do
|
87
93
|
block.call
|
88
94
|
end
|
89
|
-
|
90
|
-
|
95
|
+
value_durations.sort_by do |value, _|
|
96
|
+
metric.larger_better ? value : -value
|
91
97
|
end.last
|
92
98
|
end
|
93
99
|
|
@@ -109,25 +115,20 @@ class BenchmarkDriver::Runner::Ips
|
|
109
115
|
Float(f.read)
|
110
116
|
end
|
111
117
|
|
112
|
-
|
118
|
+
value_duration(
|
113
119
|
loop_count: job.loop_count,
|
114
120
|
duration: duration,
|
115
|
-
executable: exec,
|
116
121
|
)
|
117
122
|
end
|
118
123
|
|
119
124
|
# This method is overridden by BenchmarkDriver::Runner::Time
|
120
|
-
def
|
121
|
-
|
122
|
-
value: loop_count.to_f / duration,
|
123
|
-
duration: duration,
|
124
|
-
executable: executable,
|
125
|
-
)
|
125
|
+
def metric
|
126
|
+
METRIC
|
126
127
|
end
|
127
128
|
|
128
|
-
#
|
129
|
-
def
|
130
|
-
|
129
|
+
# Overridden by BenchmarkDriver::Runner::Time
|
130
|
+
def value_duration(duration:, loop_count:)
|
131
|
+
[loop_count.to_f / duration, duration]
|
131
132
|
end
|
132
133
|
|
133
134
|
def with_script(script)
|
@@ -186,7 +187,7 @@ end
|
|
186
187
|
|
187
188
|
#{teardown}
|
188
189
|
|
189
|
-
File.write(#{result.dump},
|
190
|
+
File.write(#{result.dump}, [__bmdv_duration, __bmdv_loops].inspect)
|
190
191
|
RUBY
|
191
192
|
end
|
192
193
|
end
|
@@ -1,5 +1,5 @@
|
|
1
1
|
require 'benchmark_driver/struct'
|
2
|
-
require 'benchmark_driver/
|
2
|
+
require 'benchmark_driver/metric'
|
3
3
|
require 'benchmark_driver/default_job'
|
4
4
|
require 'benchmark_driver/default_job_parser'
|
5
5
|
require 'tempfile'
|
@@ -12,10 +12,12 @@ class BenchmarkDriver::Runner::Memory
|
|
12
12
|
# Dynamically fetched and used by `BenchmarkDriver::JobParser.parse`
|
13
13
|
JobParser = BenchmarkDriver::DefaultJobParser.for(Job)
|
14
14
|
|
15
|
-
|
15
|
+
METRIC = BenchmarkDriver::Metric.new(
|
16
|
+
name: 'Max resident set size', unit: 'bytes', larger_better: false, worse_word: 'larger',
|
17
|
+
)
|
16
18
|
|
17
19
|
# @param [BenchmarkDriver::Config::RunnerConfig] config
|
18
|
-
# @param [BenchmarkDriver::Output
|
20
|
+
# @param [BenchmarkDriver::Output] output
|
19
21
|
def initialize(config:, output:)
|
20
22
|
@config = config
|
21
23
|
@output = output
|
@@ -29,7 +31,7 @@ class BenchmarkDriver::Runner::Memory
|
|
29
31
|
raise "memory output is not supported for '#{Etc.uname[:sysname]}' for now"
|
30
32
|
end
|
31
33
|
|
32
|
-
@output.
|
34
|
+
@output.metrics = [METRIC]
|
33
35
|
|
34
36
|
if jobs.any? { |job| job.loop_count.nil? }
|
35
37
|
jobs = jobs.map do |job|
|
@@ -39,12 +41,14 @@ class BenchmarkDriver::Runner::Memory
|
|
39
41
|
|
40
42
|
@output.with_benchmark do
|
41
43
|
jobs.each do |job|
|
42
|
-
@output.with_job(job) do
|
44
|
+
@output.with_job(name: job.name) do
|
43
45
|
job.runnable_execs(@config.executables).each do |exec|
|
44
|
-
|
46
|
+
best_value = with_repeat(@config.repeat_count) do
|
45
47
|
run_benchmark(job, exec: exec)
|
46
48
|
end
|
47
|
-
@output.
|
49
|
+
@output.with_context(name: exec.name, executable: exec, loop_count: job.loop_count) do
|
50
|
+
@output.report(value: best_value, metric: METRIC)
|
51
|
+
end
|
48
52
|
end
|
49
53
|
end
|
50
54
|
end
|
@@ -53,14 +57,12 @@ class BenchmarkDriver::Runner::Memory
|
|
53
57
|
|
54
58
|
private
|
55
59
|
|
56
|
-
# Return multiple times and return the best
|
60
|
+
# Return multiple times and return the best value (smallest usage)
|
57
61
|
def with_repeat(repeat_times, &block)
|
58
|
-
|
62
|
+
values = repeat_times.times.map do
|
59
63
|
block.call
|
60
64
|
end
|
61
|
-
|
62
|
-
metrics.value
|
63
|
-
end.first
|
65
|
+
values.sort.first
|
64
66
|
end
|
65
67
|
|
66
68
|
# @param [BenchmarkDriver::Runner::Ips::Job] job - loop_count is not nil
|
@@ -81,10 +83,7 @@ class BenchmarkDriver::Runner::Memory
|
|
81
83
|
match_data = /^(?<user>\d+.\d+)user\s+(?<system>\d+.\d+)system\s+(?<elapsed1>\d+):(?<elapsed2>\d+.\d+)elapsed.+\([^\s]+\s+(?<maxresident>\d+)maxresident\)k$/.match(output)
|
82
84
|
raise "Unexpected format given from /usr/bin/time:\n#{out}" unless match_data[:maxresident]
|
83
85
|
|
84
|
-
|
85
|
-
value: Integer(match_data[:maxresident]) * 1000.0, # kilobytes -> bytes
|
86
|
-
executable: exec,
|
87
|
-
)
|
86
|
+
Integer(match_data[:maxresident]) * 1000.0 # kilobytes -> bytes
|
88
87
|
end
|
89
88
|
|
90
89
|
def with_script(script)
|
@@ -1,5 +1,5 @@
|
|
1
1
|
require 'benchmark_driver/struct'
|
2
|
-
require 'benchmark_driver/
|
2
|
+
require 'benchmark_driver/metric'
|
3
3
|
require 'benchmark_driver/default_job'
|
4
4
|
require 'benchmark_driver/default_job_parser'
|
5
5
|
require 'tempfile'
|
@@ -12,10 +12,10 @@ class BenchmarkDriver::Runner::Once
|
|
12
12
|
# Dynamically fetched and used by `BenchmarkDriver::JobParser.parse`
|
13
13
|
JobParser = BenchmarkDriver::DefaultJobParser.for(Job)
|
14
14
|
|
15
|
-
|
15
|
+
METRIC = BenchmarkDriver::Metric.new(name: 'Iteration per second', unit: 'i/s')
|
16
16
|
|
17
17
|
# @param [BenchmarkDriver::Config::RunnerConfig] config
|
18
|
-
# @param [BenchmarkDriver::Output
|
18
|
+
# @param [BenchmarkDriver::Output] output
|
19
19
|
def initialize(config:, output:)
|
20
20
|
@config = config
|
21
21
|
@output = output
|
@@ -24,7 +24,7 @@ class BenchmarkDriver::Runner::Once
|
|
24
24
|
# This method is dynamically called by `BenchmarkDriver::JobRunner.run`
|
25
25
|
# @param [Array<BenchmarkDriver::Default::Job>] jobs
|
26
26
|
def run(jobs)
|
27
|
-
@output.
|
27
|
+
@output.metrics = [METRIC]
|
28
28
|
|
29
29
|
jobs = jobs.map do |job|
|
30
30
|
Job.new(job.to_h.merge(loop_count: 1)) # to show this on output
|
@@ -32,10 +32,12 @@ class BenchmarkDriver::Runner::Once
|
|
32
32
|
|
33
33
|
@output.with_benchmark do
|
34
34
|
jobs.each do |job|
|
35
|
-
@output.with_job(job) do
|
35
|
+
@output.with_job(name: job.name) do
|
36
36
|
job.runnable_execs(@config.executables).each do |exec|
|
37
|
-
|
38
|
-
@output.
|
37
|
+
duration = run_benchmark(job, exec: exec) # no repeat support
|
38
|
+
@output.with_context(name: exec.name, executable: exec, duration: duration, loop_count: 1) do
|
39
|
+
@output.report(value: 1.0 / duration, metric: METRIC)
|
40
|
+
end
|
39
41
|
end
|
40
42
|
end
|
41
43
|
end
|
@@ -46,7 +48,7 @@ class BenchmarkDriver::Runner::Once
|
|
46
48
|
|
47
49
|
# @param [BenchmarkDriver::Runner::Ips::Job] job - loop_count is not nil
|
48
50
|
# @param [BenchmarkDriver::Config::Executable] exec
|
49
|
-
# @return [
|
51
|
+
# @return [Float] duration
|
50
52
|
def run_benchmark(job, exec:)
|
51
53
|
benchmark = BenchmarkScript.new(
|
52
54
|
prelude: job.prelude,
|
@@ -55,18 +57,12 @@ class BenchmarkDriver::Runner::Once
|
|
55
57
|
loop_count: job.loop_count,
|
56
58
|
)
|
57
59
|
|
58
|
-
|
60
|
+
Tempfile.open(['benchmark_driver-', '.rb']) do |f|
|
59
61
|
with_script(benchmark.render(result: f.path)) do |path|
|
60
62
|
execute(*exec.command, path)
|
61
63
|
end
|
62
64
|
Float(f.read)
|
63
65
|
end
|
64
|
-
|
65
|
-
BenchmarkDriver::Metrics.new(
|
66
|
-
value: 1.0 / duration,
|
67
|
-
duration: duration,
|
68
|
-
executable: exec,
|
69
|
-
)
|
70
66
|
end
|
71
67
|
|
72
68
|
def with_script(script)
|