benchmark_driver 0.8.6 → 0.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -5
- data/.travis.yml +1 -3
- data/CHANGELOG.md +9 -0
- data/Gemfile +1 -6
- data/README.md +51 -52
- data/benchmark_driver.gemspec +3 -2
- data/bin/console +4 -11
- data/examples/exec_blank.rb +2 -2
- data/examples/exec_blank_simple.rb +2 -3
- data/exe/benchmark-driver +74 -83
- data/lib/benchmark_driver.rb +12 -1
- data/lib/benchmark_driver/config.rb +36 -0
- data/lib/benchmark_driver/default_job.rb +12 -0
- data/lib/benchmark_driver/default_job_parser.rb +68 -0
- data/lib/benchmark_driver/job_parser.rb +42 -0
- data/lib/benchmark_driver/metrics.rb +17 -0
- data/lib/benchmark_driver/output.rb +27 -0
- data/lib/benchmark_driver/output/compare.rb +196 -0
- data/lib/benchmark_driver/output/markdown.rb +102 -0
- data/lib/benchmark_driver/output/simple.rb +97 -0
- data/lib/benchmark_driver/rbenv.rb +11 -0
- data/lib/benchmark_driver/ruby_interface.rb +51 -0
- data/lib/benchmark_driver/runner.rb +42 -0
- data/lib/benchmark_driver/runner/ips.rb +239 -0
- data/lib/benchmark_driver/runner/memory.rb +142 -0
- data/lib/benchmark_driver/runner/time.rb +18 -0
- data/lib/benchmark_driver/struct.rb +85 -0
- data/lib/benchmark_driver/version.rb +3 -0
- metadata +21 -33
- data/bin/bench +0 -4
- data/examples/call.rb +0 -12
- data/examples/call_blank.rb +0 -13
- data/examples/call_erb.rb +0 -33
- data/examples/call_interpolation.rb +0 -13
- data/examples/eval_blank.rb +0 -12
- data/examples/eval_blank_loop.rb +0 -13
- data/examples/eval_interpolation.rb +0 -15
- data/lib/benchmark/driver.rb +0 -101
- data/lib/benchmark/driver/benchmark_result.rb +0 -21
- data/lib/benchmark/driver/bundle_installer.rb +0 -45
- data/lib/benchmark/driver/bundler.rb +0 -12
- data/lib/benchmark/driver/configuration.rb +0 -77
- data/lib/benchmark/driver/duration_runner.rb +0 -24
- data/lib/benchmark/driver/error.rb +0 -16
- data/lib/benchmark/driver/repeatable_runner.rb +0 -18
- data/lib/benchmark/driver/ruby_dsl_parser.rb +0 -78
- data/lib/benchmark/driver/time.rb +0 -12
- data/lib/benchmark/driver/version.rb +0 -5
- data/lib/benchmark/driver/yaml_parser.rb +0 -55
- data/lib/benchmark/output.rb +0 -20
- data/lib/benchmark/output/ips.rb +0 -143
- data/lib/benchmark/output/markdown.rb +0 -73
- data/lib/benchmark/output/memory.rb +0 -57
- data/lib/benchmark/output/time.rb +0 -57
- data/lib/benchmark/runner.rb +0 -14
- data/lib/benchmark/runner/call.rb +0 -97
- data/lib/benchmark/runner/eval.rb +0 -147
- data/lib/benchmark/runner/exec.rb +0 -193
@@ -0,0 +1,97 @@
|
|
1
|
+
class BenchmarkDriver::Output::Simple
|
2
|
+
NAME_LENGTH = 8
|
3
|
+
|
4
|
+
# @param [Array<BenchmarkDriver::*::Job>] jobs
|
5
|
+
# @param [Array<BenchmarkDriver::Config::Executable>] executables
|
6
|
+
# @param [BenchmarkDriver::Metrics::Type] metrics_type
|
7
|
+
def initialize(jobs:, executables:, metrics_type:)
|
8
|
+
@jobs = jobs
|
9
|
+
@executables = executables
|
10
|
+
@metrics_type = metrics_type
|
11
|
+
@name_length = jobs.map { |j| j.name.size }.max
|
12
|
+
end
|
13
|
+
|
14
|
+
# @param [BenchmarkDriver::Metrics] metrics
|
15
|
+
def with_warmup(&block)
|
16
|
+
without_stdout_buffering do
|
17
|
+
$stdout.print 'warming up'
|
18
|
+
block.call
|
19
|
+
end
|
20
|
+
ensure
|
21
|
+
$stdout.puts
|
22
|
+
end
|
23
|
+
|
24
|
+
# @param [BenchmarkDriver::Metrics] metrics
|
25
|
+
def with_benchmark(&block)
|
26
|
+
@with_benchmark = true
|
27
|
+
without_stdout_buffering do
|
28
|
+
# Show header
|
29
|
+
$stdout.puts "benchmark results (#{@metrics_type.unit}):"
|
30
|
+
|
31
|
+
# Show executable names
|
32
|
+
if @executables.size > 1
|
33
|
+
$stdout.print("#{' ' * @name_length} ")
|
34
|
+
@executables.each do |executable|
|
35
|
+
$stdout.print("%#{NAME_LENGTH}s " % executable.name)
|
36
|
+
end
|
37
|
+
$stdout.puts
|
38
|
+
end
|
39
|
+
|
40
|
+
block.call
|
41
|
+
end
|
42
|
+
rescue
|
43
|
+
@with_benchmark = false
|
44
|
+
end
|
45
|
+
|
46
|
+
# @param [BenchmarkDriver::*::Job] job
|
47
|
+
def with_job(job, &block)
|
48
|
+
if @with_benchmark
|
49
|
+
$stdout.print("%-#{@name_length}s " % job.name)
|
50
|
+
end
|
51
|
+
block.call
|
52
|
+
ensure
|
53
|
+
if @with_benchmark
|
54
|
+
$stdout.puts
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
# @param [BenchmarkDriver::Metrics] metrics
|
59
|
+
def report(metrics)
|
60
|
+
if @with_benchmark
|
61
|
+
$stdout.print("%#{NAME_LENGTH}s " % humanize(metrics.value))
|
62
|
+
else
|
63
|
+
$stdout.print '.'
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
private
|
68
|
+
|
69
|
+
# benchmark_driver ouputs logs ASAP. This enables sync flag for it.
|
70
|
+
def without_stdout_buffering
|
71
|
+
sync, $stdout.sync = $stdout.sync, true
|
72
|
+
yield
|
73
|
+
ensure
|
74
|
+
$stdout.sync = sync
|
75
|
+
end
|
76
|
+
|
77
|
+
def humanize(value)
|
78
|
+
if value < 0
|
79
|
+
raise ArgumentError.new("Negative value: #{value.inspect}")
|
80
|
+
end
|
81
|
+
|
82
|
+
scale = (Math.log10(value) / 3).to_i
|
83
|
+
prefix = "%6.3f" % (value.to_f / (1000 ** scale))
|
84
|
+
suffix =
|
85
|
+
case scale
|
86
|
+
when 1; 'k'
|
87
|
+
when 2; 'M'
|
88
|
+
when 3; 'G'
|
89
|
+
when 4; 'T'
|
90
|
+
when 5; 'Q'
|
91
|
+
else # < 1000 or > 10^15, no scale or suffix
|
92
|
+
scale = 0
|
93
|
+
return " #{prefix}"
|
94
|
+
end
|
95
|
+
"#{prefix}#{suffix}"
|
96
|
+
end
|
97
|
+
end
|
@@ -0,0 +1,51 @@
|
|
1
|
+
module BenchmarkDriver
|
2
|
+
class RubyInterface
|
3
|
+
def self.run(**args, &block)
|
4
|
+
new(**args).tap { |x| block.call(x) }.run
|
5
|
+
end
|
6
|
+
|
7
|
+
# Build jobs and run. This is NOT interface for users.
|
8
|
+
def run
|
9
|
+
jobs = @jobs.flat_map do |job|
|
10
|
+
BenchmarkDriver::JobParser.parse({
|
11
|
+
type: @config.runner_type,
|
12
|
+
prelude: @prelude,
|
13
|
+
}.merge!(job))
|
14
|
+
end
|
15
|
+
BenchmarkDriver::Runner.run(jobs, config: @config)
|
16
|
+
end
|
17
|
+
|
18
|
+
#
|
19
|
+
# Config APIs from here
|
20
|
+
#
|
21
|
+
|
22
|
+
# @param [String,NilClass] output
|
23
|
+
# @param [String,NilClass] runner
|
24
|
+
def initialize(output: nil, runner: nil)
|
25
|
+
@prelude = ''
|
26
|
+
@jobs = []
|
27
|
+
@config = BenchmarkDriver::Config.new
|
28
|
+
@config.output_type = output.to_s if output
|
29
|
+
@config.runner_type = runner.to_s if runner
|
30
|
+
end
|
31
|
+
|
32
|
+
# @param [String] script
|
33
|
+
def prelude(script)
|
34
|
+
@prelude << "#{script}\n"
|
35
|
+
end
|
36
|
+
|
37
|
+
# @param [String] name - Name shown on result output.
|
38
|
+
# @param [String,nil] script - Benchmarked script in String. If nil, name is considered as script too.
|
39
|
+
def report(name, script = nil)
|
40
|
+
if script.nil?
|
41
|
+
script = name
|
42
|
+
end
|
43
|
+
@jobs << { benchmark: [{ name: name, script: script }] }
|
44
|
+
end
|
45
|
+
|
46
|
+
# Backward compatibility. This is actually default now.
|
47
|
+
def compare!
|
48
|
+
@config.output_type = 'compare'
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
@@ -0,0 +1,42 @@
|
|
1
|
+
module BenchmarkDriver
|
2
|
+
module Runner
|
3
|
+
require 'benchmark_driver/runner/ips'
|
4
|
+
require 'benchmark_driver/runner/memory'
|
5
|
+
require 'benchmark_driver/runner/time'
|
6
|
+
end
|
7
|
+
|
8
|
+
class << Runner
|
9
|
+
# Main function which is used by both CLI and `Benchmark.driver`.
|
10
|
+
# @param [Array<BenchmarkDriver::*::Job>] jobs
|
11
|
+
# @param [BenchmarkDriver::Config] config
|
12
|
+
def run(jobs, config:)
|
13
|
+
runner_config = Config::RunnerConfig.new(
|
14
|
+
executables: config.executables,
|
15
|
+
repeat_count: config.repeat_count,
|
16
|
+
run_duration: config.run_duration,
|
17
|
+
)
|
18
|
+
|
19
|
+
jobs.group_by(&:class).each do |klass, jobs_group|
|
20
|
+
runner = runner_for(klass)
|
21
|
+
output = Output.find(config.output_type).new(
|
22
|
+
jobs: jobs,
|
23
|
+
executables: config.executables,
|
24
|
+
metrics_type: runner::MetricsType,
|
25
|
+
)
|
26
|
+
runner.new(config: runner_config, output: output).run(jobs)
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
private
|
31
|
+
|
32
|
+
# Dynamically find class (BenchmarkDriver::*::JobRunner) for plugin support
|
33
|
+
# @param [Class] klass - BenchmarkDriver::*::Job
|
34
|
+
# @return [Class]
|
35
|
+
def runner_for(klass)
|
36
|
+
unless match = klass.name.match(/\ABenchmarkDriver::Runner::(?<namespace>[^:]+)::Job\z/)
|
37
|
+
raise "Unexpected job class: #{klass}"
|
38
|
+
end
|
39
|
+
BenchmarkDriver.const_get("Runner::#{match[:namespace]}", false)
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
@@ -0,0 +1,239 @@
|
|
1
|
+
require 'benchmark_driver/struct'
|
2
|
+
require 'benchmark_driver/metrics'
|
3
|
+
require 'benchmark_driver/default_job'
|
4
|
+
require 'benchmark_driver/default_job_parser'
|
5
|
+
require 'tempfile'
|
6
|
+
require 'shellwords'
|
7
|
+
|
8
|
+
class BenchmarkDriver::Runner::Ips
|
9
|
+
# JobParser returns this, `BenchmarkDriver::Runner.runner_for` searches "*::Job"
|
10
|
+
Job = Class.new(BenchmarkDriver::DefaultJob)
|
11
|
+
# Dynamically fetched and used by `BenchmarkDriver::JobParser.parse`
|
12
|
+
JobParser = BenchmarkDriver::DefaultJobParser.for(Job)
|
13
|
+
# Passed to `output` by `BenchmarkDriver::Runner.run`
|
14
|
+
MetricsType = BenchmarkDriver::Metrics::Type.new(unit: 'i/s')
|
15
|
+
|
16
|
+
# @param [BenchmarkDriver::Config::RunnerConfig] config
|
17
|
+
# @param [BenchmarkDriver::Output::*] output
|
18
|
+
def initialize(config:, output:)
|
19
|
+
@config = config
|
20
|
+
@output = output
|
21
|
+
end
|
22
|
+
|
23
|
+
# This method is dynamically called by `BenchmarkDriver::JobRunner.run`
|
24
|
+
# @param [Array<BenchmarkDriver::Default::Job>] jobs
|
25
|
+
def run(jobs)
|
26
|
+
if jobs.any? { |job| job.loop_count.nil? }
|
27
|
+
@output.with_warmup do
|
28
|
+
jobs = jobs.map do |job|
|
29
|
+
next job if job.loop_count # skip warmup if loop_count is set
|
30
|
+
|
31
|
+
@output.with_job(job) do
|
32
|
+
result = run_warmup(job, exec: @config.executables.first)
|
33
|
+
metrics = build_metrics(result)
|
34
|
+
@output.report(metrics)
|
35
|
+
|
36
|
+
loop_count = (result.fetch(:loop_count).to_f * @config.run_duration / result.fetch(:duration)).floor
|
37
|
+
Job.new(job.to_h.merge(loop_count: loop_count))
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
@output.with_benchmark do
|
44
|
+
jobs.each do |job|
|
45
|
+
@output.with_job(job) do
|
46
|
+
@config.executables.each do |exec|
|
47
|
+
best_metrics = with_repeat(@config.repeat_count) do
|
48
|
+
run_benchmark(job, exec: exec)
|
49
|
+
end
|
50
|
+
@output.report(best_metrics)
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
|
57
|
+
private
|
58
|
+
|
59
|
+
# @param [BenchmarkDriver::Runner::Ips::Job] job - loop_count is nil
|
60
|
+
# @param [BenchmarkDriver::Config::Executable] exec
|
61
|
+
def run_warmup(job, exec:)
|
62
|
+
warmup = WarmupScript.new(
|
63
|
+
prelude: job.prelude,
|
64
|
+
script: job.script,
|
65
|
+
teardown: job.teardown,
|
66
|
+
loop_count: job.loop_count,
|
67
|
+
first_warmup_duration: @config.run_duration / 6.0, # default: 0.5
|
68
|
+
second_warmup_duration: @config.run_duration / 3.0, # default: 1.0
|
69
|
+
)
|
70
|
+
|
71
|
+
hash = Tempfile.open(['benchmark_driver-', '.rb']) do |f|
|
72
|
+
with_script(warmup.render(result: f.path)) do |path|
|
73
|
+
execute(*exec.command, path)
|
74
|
+
end
|
75
|
+
eval(f.read)
|
76
|
+
end
|
77
|
+
|
78
|
+
hash.merge(executable: exec)
|
79
|
+
end
|
80
|
+
|
81
|
+
# Return multiple times and return the best metrics
|
82
|
+
def with_repeat(repeat_times, &block)
|
83
|
+
all_metrics = repeat_times.times.map do
|
84
|
+
block.call
|
85
|
+
end
|
86
|
+
all_metrics.sort_by do |metrics|
|
87
|
+
metrics.value
|
88
|
+
end.last
|
89
|
+
end
|
90
|
+
|
91
|
+
# @param [BenchmarkDriver::Runner::Ips::Job] job - loop_count is not nil
|
92
|
+
# @param [BenchmarkDriver::Config::Executable] exec
|
93
|
+
# @return [BenchmarkDriver::Metrics]
|
94
|
+
def run_benchmark(job, exec:)
|
95
|
+
benchmark = BenchmarkScript.new(
|
96
|
+
prelude: job.prelude,
|
97
|
+
script: job.script,
|
98
|
+
teardown: job.teardown,
|
99
|
+
loop_count: job.loop_count,
|
100
|
+
)
|
101
|
+
|
102
|
+
duration = Tempfile.open(['benchmark_driver-', '.rb']) do |f|
|
103
|
+
with_script(benchmark.render(result: f.path)) do |path|
|
104
|
+
execute(*exec.command, path)
|
105
|
+
end
|
106
|
+
Float(f.read)
|
107
|
+
end
|
108
|
+
|
109
|
+
build_metrics(
|
110
|
+
loop_count: job.loop_count,
|
111
|
+
duration: duration,
|
112
|
+
executable: exec,
|
113
|
+
)
|
114
|
+
end
|
115
|
+
|
116
|
+
# This method is overridden by BenchmarkDriver::Runner::Time
|
117
|
+
def build_metrics(duration:, executable:, loop_count:)
|
118
|
+
BenchmarkDriver::Metrics.new(
|
119
|
+
value: loop_count.to_f / duration,
|
120
|
+
duration: duration,
|
121
|
+
executable: executable,
|
122
|
+
)
|
123
|
+
end
|
124
|
+
|
125
|
+
def with_script(script)
|
126
|
+
Tempfile.open(['benchmark_driver-', '.rb']) do |f|
|
127
|
+
f.puts script
|
128
|
+
f.close
|
129
|
+
return yield(f.path)
|
130
|
+
end
|
131
|
+
end
|
132
|
+
|
133
|
+
def execute(*args)
|
134
|
+
IO.popen(args, &:read) # handle stdout?
|
135
|
+
unless $?.success?
|
136
|
+
raise "Failed to execute: #{args.shelljoin} (status: #{$?.exitstatus})"
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
140
|
+
WarmupScript = ::BenchmarkDriver::Struct.new(:prelude, :script, :teardown, :loop_count, :first_warmup_duration, :second_warmup_duration) do
|
141
|
+
# @param [String] result - A file to write result
|
142
|
+
def render(result:)
|
143
|
+
<<-RUBY
|
144
|
+
#{prelude}
|
145
|
+
|
146
|
+
# first warmup
|
147
|
+
__bmdv_i = 0
|
148
|
+
__bmdv_before = Time.now
|
149
|
+
__bmdv_target = __bmdv_before + #{first_warmup_duration}
|
150
|
+
while Time.now < __bmdv_target
|
151
|
+
#{script}
|
152
|
+
__bmdv_i += 1
|
153
|
+
end
|
154
|
+
__bmdv_after = Time.now
|
155
|
+
|
156
|
+
# second warmup
|
157
|
+
__bmdv_ip100ms = (__bmdv_i.to_f / (__bmdv_after - __bmdv_before) / 10.0).floor
|
158
|
+
__bmdv_loops = 0
|
159
|
+
__bmdv_duration = 0.0
|
160
|
+
__bmdv_target = Time.now + #{second_warmup_duration}
|
161
|
+
while Time.now < __bmdv_target
|
162
|
+
__bmdv_i = 0
|
163
|
+
__bmdv_before = Time.now
|
164
|
+
while __bmdv_i < __bmdv_ip100ms
|
165
|
+
#{script}
|
166
|
+
__bmdv_i += 1
|
167
|
+
end
|
168
|
+
__bmdv_after = Time.now
|
169
|
+
|
170
|
+
__bmdv_loops += __bmdv_i
|
171
|
+
__bmdv_duration += (__bmdv_after - __bmdv_before)
|
172
|
+
end
|
173
|
+
|
174
|
+
#{teardown}
|
175
|
+
|
176
|
+
File.write(#{result.dump}, { duration: __bmdv_duration, loop_count: __bmdv_loops }.inspect)
|
177
|
+
RUBY
|
178
|
+
end
|
179
|
+
end
|
180
|
+
private_constant :WarmupScript
|
181
|
+
|
182
|
+
# @param [String] prelude
|
183
|
+
# @param [String] script
|
184
|
+
# @param [String] teardown
|
185
|
+
# @param [Integer] loop_count
|
186
|
+
BenchmarkScript = ::BenchmarkDriver::Struct.new(:prelude, :script, :teardown, :loop_count) do
|
187
|
+
# @param [String] result - A file to write result
|
188
|
+
def render(result:)
|
189
|
+
<<-RUBY
|
190
|
+
#{prelude}
|
191
|
+
|
192
|
+
if Process.respond_to?(:clock_gettime) # Ruby 2.1+
|
193
|
+
__bmdv_empty_before = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
194
|
+
#{while_loop('', loop_count)}
|
195
|
+
__bmdv_empty_after = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
196
|
+
else
|
197
|
+
__bmdv_empty_before = Time.now
|
198
|
+
#{while_loop('', loop_count)}
|
199
|
+
__bmdv_empty_after = Time.now
|
200
|
+
end
|
201
|
+
|
202
|
+
if Process.respond_to?(:clock_gettime) # Ruby 2.1+
|
203
|
+
__bmdv_script_before = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
204
|
+
#{while_loop(script, loop_count)}
|
205
|
+
__bmdv_script_after = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
206
|
+
else
|
207
|
+
__bmdv_script_before = Time.now
|
208
|
+
#{while_loop(script, loop_count)}
|
209
|
+
__bmdv_script_after = Time.now
|
210
|
+
end
|
211
|
+
|
212
|
+
#{teardown}
|
213
|
+
|
214
|
+
File.write(
|
215
|
+
#{result.dump},
|
216
|
+
((__bmdv_script_after - __bmdv_script_before) - (__bmdv_empty_after - __bmdv_empty_before)).inspect,
|
217
|
+
)
|
218
|
+
RUBY
|
219
|
+
end
|
220
|
+
|
221
|
+
private
|
222
|
+
|
223
|
+
def while_loop(content, times)
|
224
|
+
if !times.is_a?(Integer) || times <= 0
|
225
|
+
raise ArgumentError.new("Unexpected times: #{times.inspect}")
|
226
|
+
end
|
227
|
+
|
228
|
+
# TODO: execute in batch
|
229
|
+
<<-RUBY
|
230
|
+
__bmdv_i = 0
|
231
|
+
while __bmdv_i < #{times}
|
232
|
+
#{content}
|
233
|
+
__bmdv_i += 1
|
234
|
+
end
|
235
|
+
RUBY
|
236
|
+
end
|
237
|
+
end
|
238
|
+
private_constant :BenchmarkScript
|
239
|
+
end
|
@@ -0,0 +1,142 @@
|
|
1
|
+
require 'benchmark_driver/struct'
|
2
|
+
require 'benchmark_driver/metrics'
|
3
|
+
require 'benchmark_driver/default_job'
|
4
|
+
require 'benchmark_driver/default_job_parser'
|
5
|
+
require 'tempfile'
|
6
|
+
require 'shellwords'
|
7
|
+
|
8
|
+
# Max resident set size
|
9
|
+
class BenchmarkDriver::Runner::Memory
|
10
|
+
# JobParser returns this, `BenchmarkDriver::Runner.runner_for` searches "*::Job"
|
11
|
+
Job = Class.new(BenchmarkDriver::DefaultJob)
|
12
|
+
# Dynamically fetched and used by `BenchmarkDriver::JobParser.parse`
|
13
|
+
JobParser = BenchmarkDriver::DefaultJobParser.for(Job)
|
14
|
+
# Passed to `output` by `BenchmarkDriver::Runner.run`
|
15
|
+
MetricsType = BenchmarkDriver::Metrics::Type.new(unit: 'bytes', larger_better: false, worse_word: 'larger')
|
16
|
+
|
17
|
+
# @param [BenchmarkDriver::Config::RunnerConfig] config
|
18
|
+
# @param [BenchmarkDriver::Output::*] output
|
19
|
+
def initialize(config:, output:)
|
20
|
+
@config = config
|
21
|
+
@output = output
|
22
|
+
end
|
23
|
+
|
24
|
+
# This method is dynamically called by `BenchmarkDriver::JobRunner.run`
|
25
|
+
# @param [Array<BenchmarkDriver::Default::Job>] jobs
|
26
|
+
def run(jobs)
|
27
|
+
# Currently Linux's time(1) support only...
|
28
|
+
if Etc.uname.fetch(:sysname) != 'Linux'
|
29
|
+
raise "memory output is not supported for '#{Etc.uname[:sysname]}' for now"
|
30
|
+
end
|
31
|
+
|
32
|
+
if jobs.any? { |job| job.loop_count.nil? }
|
33
|
+
jobs = jobs.map do |job|
|
34
|
+
job.loop_count ? job : Job.new(job.to_h.merge(loop_count: 1))
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
@output.with_benchmark do
|
39
|
+
jobs.each do |job|
|
40
|
+
@output.with_job(job) do
|
41
|
+
@config.executables.each do |exec|
|
42
|
+
best_metrics = with_repeat(@config.repeat_count) do
|
43
|
+
run_benchmark(job, exec: exec)
|
44
|
+
end
|
45
|
+
@output.report(best_metrics)
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
private
|
53
|
+
|
54
|
+
# Return multiple times and return the best metrics
|
55
|
+
def with_repeat(repeat_times, &block)
|
56
|
+
all_metrics = repeat_times.times.map do
|
57
|
+
block.call
|
58
|
+
end
|
59
|
+
all_metrics.sort_by do |metrics|
|
60
|
+
metrics.value
|
61
|
+
end.first
|
62
|
+
end
|
63
|
+
|
64
|
+
# @param [BenchmarkDriver::Runner::Ips::Job] job - loop_count is not nil
|
65
|
+
# @param [BenchmarkDriver::Config::Executable] exec
|
66
|
+
# @return [BenchmarkDriver::Metrics]
|
67
|
+
def run_benchmark(job, exec:)
|
68
|
+
benchmark = BenchmarkScript.new(
|
69
|
+
prelude: job.prelude,
|
70
|
+
script: job.script,
|
71
|
+
teardown: job.teardown,
|
72
|
+
loop_count: job.loop_count,
|
73
|
+
)
|
74
|
+
|
75
|
+
output = Tempfile.open(['benchmark_driver-', '.rb']) do |f|
|
76
|
+
with_script(benchmark.render(result: f.path)) do |path|
|
77
|
+
execute('/usr/bin/time', *exec.command, path)
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
match_data = /^(?<user>\d+.\d+)user\s+(?<system>\d+.\d+)system\s+(?<elapsed1>\d+):(?<elapsed2>\d+.\d+)elapsed.+\([^\s]+\s+(?<maxresident>\d+)maxresident\)k$/.match(output)
|
82
|
+
raise "Unexpected format given from /usr/bin/time:\n#{out}" unless match_data[:maxresident]
|
83
|
+
|
84
|
+
BenchmarkDriver::Metrics.new(
|
85
|
+
value: Integer(match_data[:maxresident]) * 1000.0, # kilobytes -> bytes
|
86
|
+
executable: exec,
|
87
|
+
)
|
88
|
+
end
|
89
|
+
|
90
|
+
def with_script(script)
|
91
|
+
Tempfile.open(['benchmark_driver-', '.rb']) do |f|
|
92
|
+
f.puts script
|
93
|
+
f.close
|
94
|
+
return yield(f.path)
|
95
|
+
end
|
96
|
+
end
|
97
|
+
|
98
|
+
def execute(*args)
|
99
|
+
output = IO.popen(args, err: [:child, :out], &:read) # handle stdout?
|
100
|
+
unless $?.success?
|
101
|
+
raise "Failed to execute: #{args.shelljoin} (status: #{$?.exitstatus})"
|
102
|
+
end
|
103
|
+
output
|
104
|
+
end
|
105
|
+
|
106
|
+
# @param [String] prelude
|
107
|
+
# @param [String] script
|
108
|
+
# @param [String] teardown
|
109
|
+
# @param [Integer] loop_count
|
110
|
+
BenchmarkScript = ::BenchmarkDriver::Struct.new(:prelude, :script, :teardown, :loop_count) do
|
111
|
+
# @param [String] result - A file to write result
|
112
|
+
def render(result:)
|
113
|
+
<<-RUBY
|
114
|
+
#{prelude}
|
115
|
+
#{while_loop('', loop_count)}
|
116
|
+
#{teardown}
|
117
|
+
RUBY
|
118
|
+
end
|
119
|
+
|
120
|
+
private
|
121
|
+
|
122
|
+
def while_loop(content, times)
|
123
|
+
if !times.is_a?(Integer) || times <= 0
|
124
|
+
raise ArgumentError.new("Unexpected times: #{times.inspect}")
|
125
|
+
end
|
126
|
+
|
127
|
+
# TODO: execute in batch
|
128
|
+
if times > 1
|
129
|
+
<<-RUBY
|
130
|
+
__bmdv_i = 0
|
131
|
+
while __bmdv_i < #{times}
|
132
|
+
#{content}
|
133
|
+
__bmdv_i += 1
|
134
|
+
end
|
135
|
+
RUBY
|
136
|
+
else
|
137
|
+
content
|
138
|
+
end
|
139
|
+
end
|
140
|
+
end
|
141
|
+
private_constant :BenchmarkScript
|
142
|
+
end
|