benchmark_driver_monotonic_raw 0.14.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +10 -0
- data/.rspec +1 -0
- data/.travis.yml +16 -0
- data/CHANGELOG.md +357 -0
- data/Gemfile +8 -0
- data/LICENSE.txt +21 -0
- data/README.md +386 -0
- data/Rakefile +9 -0
- data/benchmark-driver/.gitignore +12 -0
- data/benchmark-driver/CODE_OF_CONDUCT.md +74 -0
- data/benchmark-driver/Gemfile +6 -0
- data/benchmark-driver/LICENSE.txt +21 -0
- data/benchmark-driver/README.md +8 -0
- data/benchmark-driver/Rakefile +1 -0
- data/benchmark-driver/benchmark-driver.gemspec +21 -0
- data/benchmark-driver/bin/console +14 -0
- data/benchmark-driver/bin/setup +8 -0
- data/benchmark-driver/lib/benchmark-driver.rb +1 -0
- data/benchmark-driver/lib/benchmark/driver.rb +1 -0
- data/benchmark_driver.gemspec +28 -0
- data/bin/console +7 -0
- data/bin/setup +8 -0
- data/exe/benchmark-driver +118 -0
- data/images/optcarrot.png +0 -0
- data/lib/benchmark_driver.rb +14 -0
- data/lib/benchmark_driver/bulk_output.rb +59 -0
- data/lib/benchmark_driver/config.rb +59 -0
- data/lib/benchmark_driver/default_job.rb +29 -0
- data/lib/benchmark_driver/default_job_parser.rb +91 -0
- data/lib/benchmark_driver/job_parser.rb +55 -0
- data/lib/benchmark_driver/metric.rb +79 -0
- data/lib/benchmark_driver/output.rb +88 -0
- data/lib/benchmark_driver/output/compare.rb +216 -0
- data/lib/benchmark_driver/output/markdown.rb +107 -0
- data/lib/benchmark_driver/output/record.rb +61 -0
- data/lib/benchmark_driver/output/simple.rb +103 -0
- data/lib/benchmark_driver/rbenv.rb +25 -0
- data/lib/benchmark_driver/repeater.rb +52 -0
- data/lib/benchmark_driver/ruby_interface.rb +83 -0
- data/lib/benchmark_driver/runner.rb +103 -0
- data/lib/benchmark_driver/runner/command_stdout.rb +118 -0
- data/lib/benchmark_driver/runner/ips.rb +259 -0
- data/lib/benchmark_driver/runner/memory.rb +150 -0
- data/lib/benchmark_driver/runner/once.rb +118 -0
- data/lib/benchmark_driver/runner/recorded.rb +73 -0
- data/lib/benchmark_driver/runner/ruby_stdout.rb +146 -0
- data/lib/benchmark_driver/runner/time.rb +20 -0
- data/lib/benchmark_driver/struct.rb +98 -0
- data/lib/benchmark_driver/version.rb +3 -0
- metadata +150 -0
@@ -0,0 +1,118 @@
|
|
1
|
+
require 'benchmark_driver/struct'
|
2
|
+
require 'benchmark_driver/metric'
|
3
|
+
require 'tempfile'
|
4
|
+
require 'shellwords'
|
5
|
+
require 'open3'
|
6
|
+
|
7
|
+
# Use stdout of ruby command
|
8
|
+
class BenchmarkDriver::Runner::CommandStdout
|
9
|
+
# JobParser returns this, `BenchmarkDriver::Runner.runner_for` searches "*::Job"
|
10
|
+
Job = ::BenchmarkDriver::Struct.new(
|
11
|
+
:name, # @param [String] name - This is mandatory for all runner
|
12
|
+
:metrics, # @param [Array<BenchmarkDriver::Metric>]
|
13
|
+
:command, # @param [Array<String>]
|
14
|
+
:working_directory, # @param [String,NilClass]
|
15
|
+
:stdout_to_metrics, # @param [String]
|
16
|
+
)
|
17
|
+
# Dynamically fetched and used by `BenchmarkDriver::JobParser.parse`
|
18
|
+
class << JobParser = Module.new
|
19
|
+
# @param [String] name
|
20
|
+
# @param [String] command
|
21
|
+
# @param [String,NilClass] working_directory
|
22
|
+
# @param [Hash] metrics_type
|
23
|
+
# @param [String] stdout_to_metrics
|
24
|
+
def parse(name:, command:, working_directory: nil, metrics_type:, stdout_to_metrics:)
|
25
|
+
Job.new(
|
26
|
+
name: name,
|
27
|
+
command: command.shellsplit,
|
28
|
+
working_directory: working_directory,
|
29
|
+
metrics: parse_metrics(metrics_type),
|
30
|
+
stdout_to_metrics: stdout_to_metrics,
|
31
|
+
)
|
32
|
+
end
|
33
|
+
|
34
|
+
private
|
35
|
+
|
36
|
+
def parse_metrics(unit:, name: nil, larger_better: nil, worse_word: nil)
|
37
|
+
name ||= unit
|
38
|
+
metric = BenchmarkDriver::Metric.new(
|
39
|
+
name: name,
|
40
|
+
unit: unit,
|
41
|
+
larger_better: larger_better,
|
42
|
+
worse_word: worse_word,
|
43
|
+
)
|
44
|
+
[metric]
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
# @param [BenchmarkDriver::Config::RunnerConfig] config
|
49
|
+
# @param [BenchmarkDriver::Output] output
|
50
|
+
# @param [BenchmarkDriver::Context] contexts
|
51
|
+
def initialize(config:, output:, contexts:)
|
52
|
+
@config = config
|
53
|
+
@output = output
|
54
|
+
@contexts = contexts
|
55
|
+
end
|
56
|
+
|
57
|
+
# This method is dynamically called by `BenchmarkDriver::JobRunner.run`
|
58
|
+
# @param [Array<BenchmarkDriver::Default::Job>] jobs
|
59
|
+
def run(jobs)
|
60
|
+
metric = jobs.first.metrics.first
|
61
|
+
|
62
|
+
@output.with_benchmark do
|
63
|
+
jobs.each do |job|
|
64
|
+
@output.with_job(name: job.name) do
|
65
|
+
@contexts.each do |context|
|
66
|
+
exec = context.executable
|
67
|
+
value = BenchmarkDriver::Repeater.with_repeat(config: @config, larger_better: metric.larger_better) do
|
68
|
+
stdout = with_chdir(job.working_directory) do
|
69
|
+
with_ruby_prefix(exec) { execute(*exec.command, *job.command) }
|
70
|
+
end
|
71
|
+
StdoutToMetrics.new(
|
72
|
+
stdout: stdout,
|
73
|
+
stdout_to_metrics: job.stdout_to_metrics,
|
74
|
+
).metrics_value
|
75
|
+
end
|
76
|
+
|
77
|
+
@output.with_context(name: exec.name, executable: exec) do
|
78
|
+
@output.report(values: { metric => value })
|
79
|
+
end
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|
85
|
+
|
86
|
+
private
|
87
|
+
|
88
|
+
def with_ruby_prefix(executable, &block)
|
89
|
+
env = ENV.to_h.dup
|
90
|
+
ENV['PATH'] = "#{File.dirname(executable.command.first)}:#{ENV['PATH']}"
|
91
|
+
block.call
|
92
|
+
ensure
|
93
|
+
ENV.replace(env)
|
94
|
+
end
|
95
|
+
|
96
|
+
def with_chdir(working_directory, &block)
|
97
|
+
if working_directory
|
98
|
+
Dir.chdir(working_directory) { block.call }
|
99
|
+
else
|
100
|
+
block.call
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
def execute(*args)
|
105
|
+
stdout, stderr, status = Open3.capture3(*args)
|
106
|
+
unless status.success?
|
107
|
+
raise "Failed to execute: #{args.shelljoin} (status: #{$?.exitstatus}):\n[stdout]:\n#{stdout}\n[stderr]:\n#{stderr}"
|
108
|
+
end
|
109
|
+
stdout
|
110
|
+
end
|
111
|
+
|
112
|
+
StdoutToMetrics = ::BenchmarkDriver::Struct.new(:stdout, :stdout_to_metrics) do
|
113
|
+
def metrics_value
|
114
|
+
eval(stdout_to_metrics, binding)
|
115
|
+
end
|
116
|
+
end
|
117
|
+
private_constant :StdoutToMetrics
|
118
|
+
end
|
@@ -0,0 +1,259 @@
|
|
1
|
+
require 'benchmark_driver/struct'
|
2
|
+
require 'benchmark_driver/metric'
|
3
|
+
require 'benchmark_driver/default_job'
|
4
|
+
require 'benchmark_driver/default_job_parser'
|
5
|
+
require 'tempfile'
|
6
|
+
require 'shellwords'
|
7
|
+
|
8
|
+
# Show iteration per second.
|
9
|
+
class BenchmarkDriver::Runner::Ips
|
10
|
+
METRIC = BenchmarkDriver::Metric.new(name: 'Iteration per second', unit: 'i/s')
|
11
|
+
|
12
|
+
# JobParser returns this, `BenchmarkDriver::Runner.runner_for` searches "*::Job"
|
13
|
+
Job = Class.new(BenchmarkDriver::DefaultJob)
|
14
|
+
# Dynamically fetched and used by `BenchmarkDriver::JobParser.parse`
|
15
|
+
JobParser = BenchmarkDriver::DefaultJobParser.for(klass: Job, metrics: [METRIC])
|
16
|
+
|
17
|
+
# @param [BenchmarkDriver::Config::RunnerConfig] config
|
18
|
+
# @param [BenchmarkDriver::Output] output
|
19
|
+
# @param [BenchmarkDriver::Context] contexts
|
20
|
+
def initialize(config:, output:, contexts:)
|
21
|
+
@config = config
|
22
|
+
@output = output
|
23
|
+
@contexts = contexts
|
24
|
+
end
|
25
|
+
|
26
|
+
# This method is dynamically called by `BenchmarkDriver::JobRunner.run`
|
27
|
+
# @param [Array<BenchmarkDriver::Default::Job>] jobs
|
28
|
+
def run(jobs)
|
29
|
+
if jobs.any? { |job| job.loop_count.nil? }
|
30
|
+
@output.with_warmup do
|
31
|
+
jobs = jobs.map do |job|
|
32
|
+
next job if job.loop_count # skip warmup if loop_count is set
|
33
|
+
|
34
|
+
@output.with_job(name: job.name) do
|
35
|
+
context = job.runnable_contexts(@contexts).first
|
36
|
+
duration, loop_count = run_warmup(job, context: context)
|
37
|
+
value, duration = value_duration(duration: duration, loop_count: loop_count)
|
38
|
+
|
39
|
+
@output.with_context(name: context.name, executable: context.executable, gems: context.gems, prelude: context.prelude) do
|
40
|
+
@output.report(values: { metric => value }, duration: duration, loop_count: loop_count)
|
41
|
+
end
|
42
|
+
|
43
|
+
loop_count = (loop_count.to_f * @config.run_duration / duration).floor
|
44
|
+
Job.new(job.to_h.merge(loop_count: loop_count))
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
@output.with_benchmark do
|
51
|
+
jobs.each do |job|
|
52
|
+
@output.with_job(name: job.name) do
|
53
|
+
job.runnable_contexts(@contexts).each do |context|
|
54
|
+
repeat_params = { config: @config, larger_better: true, rest_on_average: :average }
|
55
|
+
value, duration = BenchmarkDriver::Repeater.with_repeat(repeat_params) do
|
56
|
+
run_benchmark(job, context: context)
|
57
|
+
end
|
58
|
+
@output.with_context(name: context.name, executable: context.executable, gems: context.gems, prelude: context.prelude) do
|
59
|
+
@output.report(values: { metric => value }, duration: duration, loop_count: job.loop_count)
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
private
|
68
|
+
|
69
|
+
# @param [BenchmarkDriver::Runner::Ips::Job] job - loop_count is nil
|
70
|
+
# @param [BenchmarkDriver::Context] context
|
71
|
+
def run_warmup(job, context:)
|
72
|
+
warmup = WarmupScript.new(
|
73
|
+
preludes: [context.prelude, job.prelude],
|
74
|
+
script: job.script,
|
75
|
+
teardown: job.teardown,
|
76
|
+
loop_count: job.loop_count,
|
77
|
+
first_warmup_duration: @config.run_duration / 6.0, # default: 0.5
|
78
|
+
second_warmup_duration: @config.run_duration / 3.0, # default: 1.0
|
79
|
+
)
|
80
|
+
|
81
|
+
duration, loop_count = Tempfile.open(['benchmark_driver-', '.rb']) do |f|
|
82
|
+
with_script(warmup.render(result: f.path)) do |path|
|
83
|
+
execute(*context.executable.command, path)
|
84
|
+
end
|
85
|
+
eval(f.read)
|
86
|
+
end
|
87
|
+
|
88
|
+
[duration, loop_count]
|
89
|
+
end
|
90
|
+
|
91
|
+
# @param [BenchmarkDriver::Runner::Ips::Job] job - loop_count is not nil
|
92
|
+
# @param [BenchmarkDriver::Context] context
|
93
|
+
# @return [BenchmarkDriver::Metrics]
|
94
|
+
def run_benchmark(job, context:)
|
95
|
+
benchmark = BenchmarkScript.new(
|
96
|
+
preludes: [context.prelude, job.prelude],
|
97
|
+
script: job.script,
|
98
|
+
teardown: job.teardown,
|
99
|
+
loop_count: job.loop_count,
|
100
|
+
)
|
101
|
+
|
102
|
+
duration = Tempfile.open(['benchmark_driver-', '.rb']) do |f|
|
103
|
+
with_script(benchmark.render(result: f.path)) do |path|
|
104
|
+
IO.popen([*context.executable.command, path], &:read) # TODO: print stdout if verbose=2
|
105
|
+
if $?.success?
|
106
|
+
Float(f.read)
|
107
|
+
else
|
108
|
+
BenchmarkDriver::Result::ERROR
|
109
|
+
end
|
110
|
+
end
|
111
|
+
end
|
112
|
+
|
113
|
+
value_duration(
|
114
|
+
loop_count: job.loop_count,
|
115
|
+
duration: duration,
|
116
|
+
)
|
117
|
+
end
|
118
|
+
|
119
|
+
# This method is overridden by BenchmarkDriver::Runner::Time
|
120
|
+
def metric
|
121
|
+
METRIC
|
122
|
+
end
|
123
|
+
|
124
|
+
# Overridden by BenchmarkDriver::Runner::Time
|
125
|
+
def value_duration(duration:, loop_count:)
|
126
|
+
if BenchmarkDriver::Result::ERROR.equal?(duration)
|
127
|
+
[BenchmarkDriver::Result::ERROR, BenchmarkDriver::Result::ERROR]
|
128
|
+
else
|
129
|
+
[loop_count.to_f / duration, duration]
|
130
|
+
end
|
131
|
+
end
|
132
|
+
|
133
|
+
def with_script(script)
|
134
|
+
if @config.verbose >= 2
|
135
|
+
sep = '-' * 30
|
136
|
+
$stdout.puts "\n\n#{sep}[Script begin]#{sep}\n#{script}#{sep}[Script end]#{sep}\n\n"
|
137
|
+
end
|
138
|
+
|
139
|
+
Tempfile.open(['benchmark_driver-', '.rb']) do |f|
|
140
|
+
f.puts script
|
141
|
+
f.close
|
142
|
+
return yield(f.path)
|
143
|
+
end
|
144
|
+
end
|
145
|
+
|
146
|
+
def execute(*args)
|
147
|
+
IO.popen(args, &:read) # TODO: print stdout if verbose=2
|
148
|
+
unless $?.success?
|
149
|
+
raise "Failed to execute: #{args.shelljoin} (status: #{$?.exitstatus})"
|
150
|
+
end
|
151
|
+
end
|
152
|
+
|
153
|
+
WarmupScript = ::BenchmarkDriver::Struct.new(:preludes, :script, :teardown, :loop_count, :first_warmup_duration, :second_warmup_duration) do
|
154
|
+
# @param [String] result - A file to write result
|
155
|
+
def render(result:)
|
156
|
+
prelude = preludes.reject(&:nil?).reject(&:empty?).join("\n")
|
157
|
+
<<-RUBY
|
158
|
+
#{prelude}
|
159
|
+
|
160
|
+
# first warmup
|
161
|
+
__bmdv_i = 0
|
162
|
+
__bmdv_before = Time.now
|
163
|
+
__bmdv_target = __bmdv_before + #{first_warmup_duration}
|
164
|
+
while Time.now < __bmdv_target
|
165
|
+
#{script}
|
166
|
+
__bmdv_i += 1
|
167
|
+
end
|
168
|
+
__bmdv_after = Time.now
|
169
|
+
|
170
|
+
# second warmup
|
171
|
+
__bmdv_ip100ms = (__bmdv_i.to_f / (__bmdv_after - __bmdv_before) / 10.0).ceil
|
172
|
+
__bmdv_loops = 0
|
173
|
+
__bmdv_duration = 0.0
|
174
|
+
__bmdv_target = Time.now + #{second_warmup_duration}
|
175
|
+
while Time.now < __bmdv_target
|
176
|
+
__bmdv_i = 0
|
177
|
+
__bmdv_before = Time.now
|
178
|
+
while __bmdv_i < __bmdv_ip100ms
|
179
|
+
#{script}
|
180
|
+
__bmdv_i += 1
|
181
|
+
end
|
182
|
+
__bmdv_after = Time.now
|
183
|
+
|
184
|
+
__bmdv_loops += __bmdv_i
|
185
|
+
__bmdv_duration += (__bmdv_after - __bmdv_before)
|
186
|
+
end
|
187
|
+
|
188
|
+
#{teardown}
|
189
|
+
|
190
|
+
File.write(#{result.dump}, [__bmdv_duration, __bmdv_loops].inspect)
|
191
|
+
RUBY
|
192
|
+
end
|
193
|
+
end
|
194
|
+
private_constant :WarmupScript
|
195
|
+
|
196
|
+
# @param [String] prelude
|
197
|
+
# @param [String] script
|
198
|
+
# @param [String] teardown
|
199
|
+
# @param [Integer] loop_count
|
200
|
+
BenchmarkScript = ::BenchmarkDriver::Struct.new(:preludes, :script, :teardown, :loop_count) do
|
201
|
+
# @param [String] result - A file to write result
|
202
|
+
def render(result:)
|
203
|
+
prelude = preludes.reject(&:nil?).reject(&:empty?).join("\n")
|
204
|
+
<<-RUBY
|
205
|
+
#{prelude}
|
206
|
+
|
207
|
+
if #{loop_count} == 1
|
208
|
+
__bmdv_empty_before = 0
|
209
|
+
__bmdv_empty_after = 0
|
210
|
+
elsif Process.respond_to?(:clock_gettime) # Ruby 2.1+
|
211
|
+
__bmdv_empty_before = Process.clock_gettime(Process::CLOCK_MONOTONIC_RAW)
|
212
|
+
#{while_loop('', loop_count)}
|
213
|
+
__bmdv_empty_after = Process.clock_gettime(Process::CLOCK_MONOTONIC_RAW)
|
214
|
+
else
|
215
|
+
__bmdv_empty_before = Time.now
|
216
|
+
#{while_loop('', loop_count)}
|
217
|
+
__bmdv_empty_after = Time.now
|
218
|
+
end
|
219
|
+
|
220
|
+
if Process.respond_to?(:clock_gettime) # Ruby 2.1+
|
221
|
+
__bmdv_script_before = Process.clock_gettime(Process::CLOCK_MONOTONIC_RAW)
|
222
|
+
#{while_loop(script, loop_count)}
|
223
|
+
__bmdv_script_after = Process.clock_gettime(Process::CLOCK_MONOTONIC_RAW)
|
224
|
+
else
|
225
|
+
__bmdv_script_before = Time.now
|
226
|
+
#{while_loop(script, loop_count)}
|
227
|
+
__bmdv_script_after = Time.now
|
228
|
+
end
|
229
|
+
|
230
|
+
#{teardown}
|
231
|
+
|
232
|
+
File.write(
|
233
|
+
#{result.dump},
|
234
|
+
((__bmdv_script_after - __bmdv_script_before) - (__bmdv_empty_after - __bmdv_empty_before)).inspect,
|
235
|
+
)
|
236
|
+
RUBY
|
237
|
+
end
|
238
|
+
|
239
|
+
private
|
240
|
+
|
241
|
+
def while_loop(content, times)
|
242
|
+
if !times.is_a?(Integer) || times <= 0
|
243
|
+
raise ArgumentError.new("Unexpected times: #{times.inspect}")
|
244
|
+
elsif times == 1
|
245
|
+
return content
|
246
|
+
end
|
247
|
+
|
248
|
+
# TODO: execute in batch
|
249
|
+
<<-RUBY
|
250
|
+
__bmdv_i = 0
|
251
|
+
while __bmdv_i < #{times}
|
252
|
+
#{content}
|
253
|
+
__bmdv_i += 1
|
254
|
+
end
|
255
|
+
RUBY
|
256
|
+
end
|
257
|
+
end
|
258
|
+
private_constant :BenchmarkScript
|
259
|
+
end
|
@@ -0,0 +1,150 @@
|
|
1
|
+
require 'benchmark_driver/struct'
|
2
|
+
require 'benchmark_driver/metric'
|
3
|
+
require 'benchmark_driver/default_job'
|
4
|
+
require 'benchmark_driver/default_job_parser'
|
5
|
+
require 'tempfile'
|
6
|
+
require 'shellwords'
|
7
|
+
|
8
|
+
# Max resident set size
|
9
|
+
class BenchmarkDriver::Runner::Memory
|
10
|
+
METRIC = BenchmarkDriver::Metric.new(
|
11
|
+
name: 'Max resident set size', unit: 'bytes', larger_better: false, worse_word: 'larger',
|
12
|
+
)
|
13
|
+
|
14
|
+
# JobParser returns this, `BenchmarkDriver::Runner.runner_for` searches "*::Job"
|
15
|
+
Job = Class.new(BenchmarkDriver::DefaultJob)
|
16
|
+
# Dynamically fetched and used by `BenchmarkDriver::JobParser.parse`
|
17
|
+
JobParser = BenchmarkDriver::DefaultJobParser.for(klass: Job, metrics: [METRIC])
|
18
|
+
|
19
|
+
# @param [BenchmarkDriver::Config::RunnerConfig] config
|
20
|
+
# @param [BenchmarkDriver::Output] output
|
21
|
+
# @param [BenchmarkDriver::Context] contexts
|
22
|
+
def initialize(config:, output:, contexts:)
|
23
|
+
@config = config
|
24
|
+
@output = output
|
25
|
+
@contexts = contexts
|
26
|
+
end
|
27
|
+
|
28
|
+
# This method is dynamically called by `BenchmarkDriver::JobRunner.run`
|
29
|
+
# @param [Array<BenchmarkDriver::Default::Job>] jobs
|
30
|
+
def run(jobs)
|
31
|
+
# Currently Linux's time(1) support only...
|
32
|
+
case Etc.uname.fetch(:sysname)
|
33
|
+
when 'Linux'
|
34
|
+
@time_command = ['/usr/bin/time']
|
35
|
+
when 'Darwin'
|
36
|
+
@time_command = ['/usr/bin/time', '-l']
|
37
|
+
else
|
38
|
+
raise "memory output is not supported for '#{Etc.uname[:sysname]}' for now"
|
39
|
+
end
|
40
|
+
|
41
|
+
if jobs.any? { |job| job.loop_count.nil? }
|
42
|
+
jobs = jobs.map do |job|
|
43
|
+
job.loop_count ? job : Job.new(job.to_h.merge(loop_count: 1))
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
@output.with_benchmark do
|
48
|
+
jobs.each do |job|
|
49
|
+
@output.with_job(name: job.name) do
|
50
|
+
job.runnable_contexts(@contexts).each do |context|
|
51
|
+
value = BenchmarkDriver::Repeater.with_repeat(config: @config, larger_better: false) do
|
52
|
+
run_benchmark(job, context: context)
|
53
|
+
end
|
54
|
+
@output.with_context(name: context.name, executable: context.executable, gems: context.gems, prelude: context.prelude) do
|
55
|
+
@output.report(values: { METRIC => value }, loop_count: job.loop_count)
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
private
|
64
|
+
|
65
|
+
# @param [BenchmarkDriver::Runner::Ips::Job] job - loop_count is not nil
|
66
|
+
# @param [BenchmarkDriver::Context] context
|
67
|
+
# @return [BenchmarkDriver::Metrics]
|
68
|
+
def run_benchmark(job, context:)
|
69
|
+
benchmark = BenchmarkScript.new(
|
70
|
+
preludes: [context.prelude, job.prelude],
|
71
|
+
script: job.script,
|
72
|
+
teardown: job.teardown,
|
73
|
+
loop_count: job.loop_count,
|
74
|
+
)
|
75
|
+
|
76
|
+
with_script(benchmark.render) do |path|
|
77
|
+
output = IO.popen([*@time_command, *context.executable.command, path], err: [:child, :out], &:read)
|
78
|
+
if $?.success?
|
79
|
+
extract_maxresident_from_time_output(output)
|
80
|
+
else
|
81
|
+
$stdout.print(output)
|
82
|
+
BenchmarkDriver::Result::ERROR
|
83
|
+
end
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
def extract_maxresident_from_time_output(output)
|
88
|
+
case Etc.uname.fetch(:sysname)
|
89
|
+
when 'Linux'
|
90
|
+
pattern = /^(?<user>\d+.\d+)user\s+(?<system>\d+.\d+)system\s+(?<elapsed1>\d+):(?<elapsed2>\d+.\d+)elapsed.+\([^\s]+\s+(?<maxresident>\d+)maxresident\)k$/
|
91
|
+
scale = 1000.0 # kilobytes -> bytes
|
92
|
+
when 'Darwin'
|
93
|
+
pattern = /^\s+(?<real>\d+\.\d+)\s+real\s+(?<user>\d+\.\d+)\s+user\s+(?<system>\d+\.\d+)\s+sys$\s+(?<maxresident>\d+)\s+maximum resident set size$/
|
94
|
+
scale = 1.0
|
95
|
+
end
|
96
|
+
match_data = pattern.match(output)
|
97
|
+
raise "Unexpected format given from /usr/bin/time:\n#{out}" unless match_data[:maxresident]
|
98
|
+
Integer(match_data[:maxresident]) * scale
|
99
|
+
end
|
100
|
+
|
101
|
+
def with_script(script)
|
102
|
+
if @config.verbose >= 2
|
103
|
+
sep = '-' * 30
|
104
|
+
$stdout.puts "\n\n#{sep}[Script begin]#{sep}\n#{script}#{sep}[Script end]#{sep}\n\n"
|
105
|
+
end
|
106
|
+
|
107
|
+
Tempfile.open(['benchmark_driver-', '.rb']) do |f|
|
108
|
+
f.puts script
|
109
|
+
f.close
|
110
|
+
return yield(f.path)
|
111
|
+
end
|
112
|
+
end
|
113
|
+
|
114
|
+
# @param [String] prelude
|
115
|
+
# @param [String] script
|
116
|
+
# @param [String] teardown
|
117
|
+
# @param [Integer] loop_count
|
118
|
+
BenchmarkScript = ::BenchmarkDriver::Struct.new(:preludes, :script, :teardown, :loop_count) do
|
119
|
+
def render
|
120
|
+
prelude = preludes.reject(&:nil?).reject(&:empty?).join("\n")
|
121
|
+
<<-RUBY
|
122
|
+
#{prelude}
|
123
|
+
#{while_loop(script, loop_count)}
|
124
|
+
#{teardown}
|
125
|
+
RUBY
|
126
|
+
end
|
127
|
+
|
128
|
+
private
|
129
|
+
|
130
|
+
def while_loop(content, times)
|
131
|
+
if !times.is_a?(Integer) || times <= 0
|
132
|
+
raise ArgumentError.new("Unexpected times: #{times.inspect}")
|
133
|
+
end
|
134
|
+
|
135
|
+
# TODO: execute in batch
|
136
|
+
if times > 1
|
137
|
+
<<-RUBY
|
138
|
+
__bmdv_i = 0
|
139
|
+
while __bmdv_i < #{times}
|
140
|
+
#{content}
|
141
|
+
__bmdv_i += 1
|
142
|
+
end
|
143
|
+
RUBY
|
144
|
+
else
|
145
|
+
content
|
146
|
+
end
|
147
|
+
end
|
148
|
+
end
|
149
|
+
private_constant :BenchmarkScript
|
150
|
+
end
|