benchmark_driver 0.11.1 → 0.12.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +6 -0
- data/lib/benchmark_driver/bulk_output.rb +11 -16
- data/lib/benchmark_driver/default_job.rb +1 -0
- data/lib/benchmark_driver/default_job_parser.rb +4 -4
- data/lib/benchmark_driver/metric.rb +35 -27
- data/lib/benchmark_driver/output.rb +21 -15
- data/lib/benchmark_driver/output/compare.rb +30 -30
- data/lib/benchmark_driver/output/markdown.rb +9 -11
- data/lib/benchmark_driver/output/record.rb +13 -15
- data/lib/benchmark_driver/output/simple.rb +9 -11
- data/lib/benchmark_driver/runner.rb +14 -9
- data/lib/benchmark_driver/runner/command_stdout.rb +2 -3
- data/lib/benchmark_driver/runner/ips.rb +7 -9
- data/lib/benchmark_driver/runner/memory.rb +7 -9
- data/lib/benchmark_driver/runner/once.rb +5 -7
- data/lib/benchmark_driver/runner/recorded.rb +15 -19
- data/lib/benchmark_driver/runner/ruby_stdout.rb +3 -18
- data/lib/benchmark_driver/runner/time.rb +3 -3
- data/lib/benchmark_driver/version.rb +1 -1
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: cc2f6826606613671a2e6b8d0384421924bd9487
|
4
|
+
data.tar.gz: b310edeb8fa144e6eb6569b670ae7291e5a77a70
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: a121e71123b95d5dba0f130f2efa65aaeb9adb42c4e9046703c3afcbd3726a698b7f33e870d3c873a5eda7bbaea20bce2061f87a085118a9d8a6b36b183c6e1a
|
7
|
+
data.tar.gz: 942d159581bf537e62c15b46dba3769c56eb981d98d7d7d1055b872b004cdbc9b85713d2447e009b9b61f53357c45d8a116c19aa730af3541e94af1fe4336303
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,9 @@
|
|
1
|
+
# v0.12.0
|
2
|
+
|
3
|
+
- [breaking change] Plugin interface is changed again
|
4
|
+
- Fix bugs in a case that multiple YAMLs with different types are specified
|
5
|
+
- Output plugin is now ensured to yield the same metrics
|
6
|
+
|
1
7
|
# v0.11.1
|
2
8
|
|
3
9
|
- Add `--repeat-result` option to return the best, the worst or an average result with `--repeat-count`
|
@@ -11,18 +11,16 @@ module BenchmarkDriver
|
|
11
11
|
# only `#bulk_output` that takes all inputs at once.
|
12
12
|
class BulkOutput
|
13
13
|
# @param [Array<BenchmarkDriver::Metric>] metrics
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
def initialize(job_names:, context_names:)
|
19
|
-
# noop
|
14
|
+
# @param [Array<BenchmarkDriver::Job>] jobs
|
15
|
+
# @param [Array<BenchmarkDriver::Context>] contexts
|
16
|
+
def initialize(metrics:, jobs:, contexts:)
|
17
|
+
@metrics = metrics
|
20
18
|
end
|
21
19
|
|
22
20
|
# The main API you need to override if you make a class inherit `BenchmarkDriver::BulkOutput`.
|
23
21
|
# @param [Hash{ BenchmarkDriver::Job => Hash{ BenchmarkDriver::Context => { BenchmarkDriver::Metric => Float } } }] result
|
24
22
|
# @param [Array<BenchmarkDriver::Metric>] metrics
|
25
|
-
def bulk_output(
|
23
|
+
def bulk_output(job_context_result:, metrics:)
|
26
24
|
raise NotImplementedError.new("#{self.class} must override #bulk_output")
|
27
25
|
end
|
28
26
|
|
@@ -31,13 +29,11 @@ module BenchmarkDriver
|
|
31
29
|
end
|
32
30
|
|
33
31
|
def with_benchmark(&block)
|
34
|
-
@
|
35
|
-
|
36
|
-
h2[context] = {}
|
37
|
-
end
|
32
|
+
@job_context_result = Hash.new do |hash, job|
|
33
|
+
hash[job] = {}
|
38
34
|
end
|
39
35
|
result = block.call
|
40
|
-
bulk_output(
|
36
|
+
bulk_output(job_context_result: @job_context_result, metrics: @metrics)
|
41
37
|
result
|
42
38
|
end
|
43
39
|
|
@@ -53,10 +49,9 @@ module BenchmarkDriver
|
|
53
49
|
block.call
|
54
50
|
end
|
55
51
|
|
56
|
-
# @param [
|
57
|
-
|
58
|
-
|
59
|
-
@result[@job][@context][metric] = value
|
52
|
+
# @param [BenchmarkDriver::Result] result
|
53
|
+
def report(result)
|
54
|
+
@job_context_result[@job][@context] = result
|
60
55
|
end
|
61
56
|
end
|
62
57
|
end
|
@@ -3,6 +3,7 @@ require 'benchmark_driver/struct'
|
|
3
3
|
module BenchmarkDriver
|
4
4
|
DefaultJob = ::BenchmarkDriver::Struct.new(
|
5
5
|
:name, # @param [String] name - This is mandatory for all runner
|
6
|
+
:metrics, # @param [Array<BenchmarkDriver::Metric>] - This is mandatory for all runner too, set by job parser.
|
6
7
|
:script, # @param [String] benchmark
|
7
8
|
:prelude, # @param [String,nil] prelude (optional)
|
8
9
|
:teardown, # @param [String,nil] after (optional)
|
@@ -1,14 +1,13 @@
|
|
1
1
|
module BenchmarkDriver
|
2
2
|
module DefaultJobParser
|
3
3
|
# Build default JobParser for given job klass
|
4
|
-
def self.for(klass)
|
4
|
+
def self.for(klass:, metrics:)
|
5
5
|
Module.new.tap do |parser|
|
6
6
|
class << parser
|
7
7
|
include DefaultJobParser
|
8
8
|
end
|
9
|
-
parser.define_singleton_method(:job_class)
|
10
|
-
|
11
|
-
end
|
9
|
+
parser.define_singleton_method(:job_class) { klass }
|
10
|
+
parser.define_singleton_method(:job_metrics) { metrics }
|
12
11
|
end
|
13
12
|
end
|
14
13
|
|
@@ -21,6 +20,7 @@ module BenchmarkDriver
|
|
21
20
|
# @return [Array<BenchmarkDriver::Default::Job>]
|
22
21
|
def parse(prelude: nil, benchmark:, teardown: nil, loop_count: nil, required_ruby_version: nil)
|
23
22
|
parse_benchmark(benchmark).each do |job|
|
23
|
+
job.metrics = job_metrics
|
24
24
|
job.prelude.prepend("#{prelude}\n") if prelude
|
25
25
|
job.teardown.prepend("#{teardown}\n") if teardown
|
26
26
|
job.loop_count ||= loop_count
|
@@ -4,57 +4,65 @@ require 'benchmark_driver/struct'
|
|
4
4
|
module BenchmarkDriver
|
5
5
|
# BenchmarkDriver returns benchmark results with the following nested Hash structure:
|
6
6
|
# {
|
7
|
-
# BenchmarkDriver::Job => {
|
8
|
-
# BenchmarkDriver::Context =>
|
9
|
-
#
|
10
|
-
#
|
7
|
+
# #<BenchmarkDriver::Job> => {
|
8
|
+
# #<BenchmarkDriver::Context> => #<BenchmarkDriver::Result
|
9
|
+
# metrics: {
|
10
|
+
# #<BenchmarkDriver::Metric> => Float
|
11
|
+
# }
|
12
|
+
# >
|
11
13
|
# }
|
12
14
|
# }
|
13
15
|
|
14
|
-
#
|
15
|
-
|
16
|
-
:name,
|
17
|
-
:unit, # @param [String] - A unit like "MiB"
|
18
|
-
:larger_better, # @param [TrueClass,FalseClass] - If true, larger value is preferred when measured multiple times.
|
19
|
-
:worse_word, # @param [String] - A label shown when the value is worse.
|
20
|
-
defaults: { larger_better: true, worse_word: 'slower' },
|
16
|
+
# Holding identifier of measured workload
|
17
|
+
Job = ::BenchmarkDriver::Struct.new(
|
18
|
+
:name, # @param [String] - Name of the benchmark task
|
21
19
|
)
|
22
20
|
|
23
|
-
# Benchmark conditions
|
21
|
+
# Benchmark conditions that can be known before running benchmark
|
24
22
|
Context = ::BenchmarkDriver::Struct.new(
|
25
23
|
:name, # @param [String] - Name of the context
|
26
24
|
:executable, # @param [BenchmarkDriver::Config::Executable] - Measured Ruby executable
|
27
25
|
:gems, # @param [Hash{ String => String,nil }] - Gem -> version pairs used for the benchmark
|
28
26
|
:prelude, # @param [String,nil] - Context specific setup script (optional)
|
27
|
+
defaults: { gems: {} },
|
28
|
+
)
|
29
|
+
|
30
|
+
# Everything that can be known after running benchmark
|
31
|
+
Result = ::BenchmarkDriver::Struct.new(
|
32
|
+
:values, # @param [Hash{ BenchmarkDriver::Metric => Float }] - Main benchmark results
|
29
33
|
:duration, # @param [Float,nil] - Time taken to run the benchmark job (optional)
|
30
34
|
:loop_count, # @param [Integer,nil] - Times to run the benchmark job (optional)
|
31
35
|
:environment, # @param [Hash] - Any other key -> value pairs to express the benchmark context
|
32
|
-
defaults: {
|
36
|
+
defaults: { environment: {} },
|
33
37
|
)
|
34
38
|
|
35
|
-
#
|
36
|
-
|
37
|
-
:name,
|
39
|
+
# A kind of thing to be measured
|
40
|
+
Metric = ::BenchmarkDriver::Struct.new(
|
41
|
+
:name, # @param [String] - Metric name or description like "Max Resident Set Size"
|
42
|
+
:unit, # @param [String] - A unit like "MiB"
|
43
|
+
:larger_better, # @param [TrueClass,FalseClass] - If true, larger value is preferred when measured multiple times.
|
44
|
+
:worse_word, # @param [String] - A label shown when the value is worse.
|
45
|
+
defaults: { larger_better: true, worse_word: 'slower' },
|
38
46
|
)
|
39
47
|
|
40
|
-
#=[RubyBench mapping]
|
48
|
+
#=[RubyBench mapping]=======================================|
|
41
49
|
#
|
42
50
|
# BenchmarkRun:
|
43
|
-
# result -> { context.name => value }
|
44
|
-
# environment ->
|
45
|
-
# initiator -> (not supported)
|
51
|
+
# result -> { context.name => result.value } | { "default"=>"44.666666666666664", "default_jit"=>"59.333333333333336" }
|
52
|
+
# environment -> result.environment | "---\nRuby version: 'ruby 2.6.0dev (2018-05-14 trunk 63417) [x86_64-linux]\n\n'\nChecksum: '59662'\n"
|
53
|
+
# initiator -> (not supported) | #<Commit sha1: "6f0de6ed9...", message: "error.c: check redefined ...", url: "https://github.com/tgxworld/ruby/commit/6f0de6ed98...", repo_id: 6>
|
46
54
|
#
|
47
55
|
# BenchmarkType:
|
48
|
-
# category -> job.name
|
49
|
-
# script_url -> (not supported)
|
50
|
-
# repo -> (not supported)
|
51
|
-
# repo.organization -> (not supported)
|
56
|
+
# category -> job.name | "app_erb", "Optcarrot Lan_Master.nes"
|
57
|
+
# script_url -> (not supported) | "https://raw.githubusercontent.com/mame/optcarrot/master/lib/optcarrot/nes.rb"
|
58
|
+
# repo -> (not supported) | #<Repo name: "ruby", url: "https://github.com/tgxworld/ruby">
|
59
|
+
# repo.organization -> (not supported) | #<Organization name: "ruby", url: "https://github.com/tgxworld/">
|
52
60
|
#
|
53
61
|
# BenchmarkResultType:
|
54
|
-
# name -> metric.name
|
55
|
-
# unit -> metric.unit
|
62
|
+
# name -> metric.name | "Number of frames"
|
63
|
+
# unit -> metric.unit | "fps"
|
56
64
|
#
|
57
|
-
|
65
|
+
#===========================================================|
|
58
66
|
|
59
67
|
#----
|
60
68
|
# legacy
|
@@ -8,12 +8,12 @@ module BenchmarkDriver
|
|
8
8
|
# metrics=
|
9
9
|
# with_warmup
|
10
10
|
# with_job(name:)
|
11
|
-
# with_context(name:, executable
|
12
|
-
# report(
|
11
|
+
# with_context(name:, executable:)
|
12
|
+
# report(values:, duration: nil, loop_count: nil, environment: {})
|
13
13
|
# with_benchmark
|
14
14
|
# with_job(name:)
|
15
|
-
# with_context(name:, executable
|
16
|
-
# report(
|
15
|
+
# with_context(name:, executable:)
|
16
|
+
# report(values:, duration: nil, loop_count: nil, environment: {})
|
17
17
|
class Output
|
18
18
|
require 'benchmark_driver/output/compare'
|
19
19
|
require 'benchmark_driver/output/markdown'
|
@@ -24,9 +24,10 @@ module BenchmarkDriver
|
|
24
24
|
# Create `BenchmarkDriver::Output::Foo` as benchmark_dirver-output-foo.gem and specify `-o foo`.
|
25
25
|
#
|
26
26
|
# @param [String] type
|
27
|
-
# @param [Array<
|
28
|
-
# @param [Array<
|
29
|
-
|
27
|
+
# @param [Array<BenchmarkDriver::Metric>] metrics
|
28
|
+
# @param [Array<BenchmarkDriver::Job>] jobs
|
29
|
+
# @param [Array<BenchmarkDriver::Context>] contexts
|
30
|
+
def initialize(type:, metrics:, jobs:, contexts:)
|
30
31
|
if type.include?(':')
|
31
32
|
raise ArgumentError.new("Output type '#{type}' cannot contain ':'")
|
32
33
|
end
|
@@ -35,8 +36,9 @@ module BenchmarkDriver
|
|
35
36
|
camelized = type.split('_').map(&:capitalize).join
|
36
37
|
|
37
38
|
@output = ::BenchmarkDriver::Output.const_get(camelized, false).new(
|
38
|
-
|
39
|
-
|
39
|
+
metrics: metrics,
|
40
|
+
jobs: jobs,
|
41
|
+
contexts: contexts,
|
40
42
|
)
|
41
43
|
end
|
42
44
|
|
@@ -65,10 +67,8 @@ module BenchmarkDriver
|
|
65
67
|
# @param [BenchmarkDriver::Config::Executable] executable
|
66
68
|
# @param [Float] duration
|
67
69
|
# @param [Integer] loop_count
|
68
|
-
def with_context(name:, executable:,
|
69
|
-
context = BenchmarkDriver::Context.new(
|
70
|
-
name: name, executable: executable, duration: duration, loop_count: loop_count, environment: environment,
|
71
|
-
)
|
70
|
+
def with_context(name:, executable:, &block)
|
71
|
+
context = BenchmarkDriver::Context.new(name: name, executable: executable)
|
72
72
|
@output.with_context(context) do
|
73
73
|
block.call
|
74
74
|
end
|
@@ -76,8 +76,14 @@ module BenchmarkDriver
|
|
76
76
|
|
77
77
|
# @param [Float] value
|
78
78
|
# @param [BenchmarkDriver::Metric] metic
|
79
|
-
def report(
|
80
|
-
|
79
|
+
def report(values:, duration: nil, loop_count: nil, environment: {})
|
80
|
+
result = BenchmarkDriver::Result.new(
|
81
|
+
values: values,
|
82
|
+
duration: duration,
|
83
|
+
loop_count: loop_count,
|
84
|
+
environment: environment,
|
85
|
+
)
|
86
|
+
@output.report(result)
|
81
87
|
end
|
82
88
|
end
|
83
89
|
end
|
@@ -3,14 +3,13 @@ class BenchmarkDriver::Output::Compare
|
|
3
3
|
NAME_LENGTH = 20
|
4
4
|
|
5
5
|
# @param [Array<BenchmarkDriver::Metric>] metrics
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
@
|
12
|
-
@
|
13
|
-
@name_length = [job_names.map(&:length).max, NAME_LENGTH].max
|
6
|
+
# @param [Array<BenchmarkDriver::Job>] jobs
|
7
|
+
# @param [Array<BenchmarkDriver::Context>] contexts
|
8
|
+
def initialize(metrics:, jobs:, contexts:)
|
9
|
+
@metrics = metrics
|
10
|
+
@job_names = jobs.map(&:name)
|
11
|
+
@context_names = contexts.map(&:name)
|
12
|
+
@name_length = [@job_names.map(&:length).max, NAME_LENGTH].max
|
14
13
|
end
|
15
14
|
|
16
15
|
def with_warmup(&block)
|
@@ -22,8 +21,8 @@ class BenchmarkDriver::Output::Compare
|
|
22
21
|
end
|
23
22
|
|
24
23
|
def with_benchmark(&block)
|
25
|
-
@
|
26
|
-
|
24
|
+
@job_context_result = Hash.new do |hash, job|
|
25
|
+
hash[job] = {}
|
27
26
|
end
|
28
27
|
|
29
28
|
without_stdout_buffering do
|
@@ -55,14 +54,15 @@ class BenchmarkDriver::Output::Compare
|
|
55
54
|
$stdout.print("%#{@name_length}s" % name)
|
56
55
|
end
|
57
56
|
@job = name
|
57
|
+
@job_results = []
|
58
58
|
@job_contexts = []
|
59
59
|
block.call
|
60
60
|
ensure
|
61
61
|
$stdout.print(@metrics.first.unit)
|
62
|
-
loop_count = @
|
63
|
-
if loop_count && @
|
62
|
+
loop_count = @job_results.first.loop_count
|
63
|
+
if loop_count && @job_results.all? { |r| r.loop_count == loop_count }
|
64
64
|
$stdout.print(" - #{humanize(loop_count)} times")
|
65
|
-
if @
|
65
|
+
if @job_results.all? { |result| !result.duration.nil? }
|
66
66
|
$stdout.print(" in")
|
67
67
|
show_durations
|
68
68
|
end
|
@@ -77,28 +77,28 @@ class BenchmarkDriver::Output::Compare
|
|
77
77
|
block.call
|
78
78
|
end
|
79
79
|
|
80
|
-
# @param [
|
81
|
-
|
82
|
-
|
83
|
-
if defined?(@
|
84
|
-
@
|
80
|
+
# @param [BenchmarkDriver::Result] result
|
81
|
+
def report(result)
|
82
|
+
@job_results << result
|
83
|
+
if defined?(@job_context_result)
|
84
|
+
@job_context_result[@job][@context] = result
|
85
85
|
end
|
86
86
|
|
87
|
-
$stdout.print("#{humanize(
|
87
|
+
$stdout.print("#{humanize(result.values.values.first, [10, @context.name.length].max)} ")
|
88
88
|
end
|
89
89
|
|
90
90
|
private
|
91
91
|
|
92
92
|
def show_durations
|
93
|
-
@
|
94
|
-
$stdout.print(' %3.6fs' %
|
93
|
+
@job_results.each do |result|
|
94
|
+
$stdout.print(' %3.6fs' % result.duration)
|
95
95
|
end
|
96
96
|
|
97
97
|
# Show pretty seconds / clocks too. As it takes long width, it's shown only with a single executable.
|
98
|
-
if @
|
99
|
-
|
100
|
-
sec =
|
101
|
-
iter =
|
98
|
+
if @job_results.size == 1
|
99
|
+
result = @job_results.first
|
100
|
+
sec = result.duration
|
101
|
+
iter = result.loop_count
|
102
102
|
if File.exist?('/proc/cpuinfo') && (clks = estimate_clock(sec, iter)) < 1_000
|
103
103
|
$stdout.print(" (#{pretty_sec(sec, iter)}/i, #{clks}clocks/i)")
|
104
104
|
else
|
@@ -160,8 +160,8 @@ class BenchmarkDriver::Output::Compare
|
|
160
160
|
|
161
161
|
def compare_jobs
|
162
162
|
$stdout.puts "\nComparison:"
|
163
|
-
results = @
|
164
|
-
|
163
|
+
results = @job_context_result.flat_map do |job, context_result|
|
164
|
+
context_result.map { |context, result| Result.new(job: job, value: result.values.values.first, executable: context.executable) }
|
165
165
|
end
|
166
166
|
show_results(results, show_executable: false)
|
167
167
|
end
|
@@ -169,10 +169,10 @@ class BenchmarkDriver::Output::Compare
|
|
169
169
|
def compare_executables
|
170
170
|
$stdout.puts "\nComparison:"
|
171
171
|
|
172
|
-
@
|
172
|
+
@job_context_result.each do |job, context_result|
|
173
173
|
$stdout.puts("%#{@name_length + 2 + 11}s" % job)
|
174
|
-
results =
|
175
|
-
values.map { |value| Result.new(job: job, value: value, executable: context.executable) }
|
174
|
+
results = context_result.flat_map do |context, result|
|
175
|
+
result.values.values.map { |value| Result.new(job: job, value: value, executable: context.executable) }
|
176
176
|
end
|
177
177
|
show_results(results, show_executable: true)
|
178
178
|
end
|
@@ -2,13 +2,12 @@ class BenchmarkDriver::Output::Markdown
|
|
2
2
|
NAME_LENGTH = 8
|
3
3
|
|
4
4
|
# @param [Array<BenchmarkDriver::Metric>] metrics
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
@
|
11
|
-
@name_length = job_names.map(&:size).max
|
5
|
+
# @param [Array<BenchmarkDriver::Job>] jobs
|
6
|
+
# @param [Array<BenchmarkDriver::Context>] contexts
|
7
|
+
def initialize(metrics:, jobs:, contexts:)
|
8
|
+
@metrics = metrics
|
9
|
+
@context_names = contexts.map(&:name)
|
10
|
+
@name_length = jobs.map(&:name).map(&:size).max
|
12
11
|
end
|
13
12
|
|
14
13
|
def with_warmup(&block)
|
@@ -63,11 +62,10 @@ class BenchmarkDriver::Output::Markdown
|
|
63
62
|
block.call
|
64
63
|
end
|
65
64
|
|
66
|
-
# @param [
|
67
|
-
|
68
|
-
def report(value:, metric:)
|
65
|
+
# @param [BenchmarkDriver::Result] result
|
66
|
+
def report(result)
|
69
67
|
if @with_benchmark
|
70
|
-
$stdout.print("|%#{NAME_LENGTH}s" % humanize(
|
68
|
+
$stdout.print("|%#{NAME_LENGTH}s" % humanize(result.values.fetch(@metrics.first)))
|
71
69
|
else
|
72
70
|
$stdout.print '.'
|
73
71
|
end
|
@@ -1,14 +1,13 @@
|
|
1
1
|
class BenchmarkDriver::Output::Record
|
2
2
|
# @param [Array<BenchmarkDriver::Metric>] metrics
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
h3[k3] = {}
|
3
|
+
# @param [Array<BenchmarkDriver::Job>] jobs
|
4
|
+
# @param [Array<BenchmarkDriver::Context>] contexts
|
5
|
+
def initialize(metrics:, jobs:, contexts:)
|
6
|
+
@metrics = metrics
|
7
|
+
@job_warmup_context_result = Hash.new do |h1, job|
|
8
|
+
h1[job] = Hash.new do |h2, warmup|
|
9
|
+
h2[warmup] = Hash.new do |h3, context|
|
10
|
+
h3[context] = {}
|
12
11
|
end
|
13
12
|
end
|
14
13
|
end
|
@@ -33,7 +32,7 @@ class BenchmarkDriver::Output::Record
|
|
33
32
|
|
34
33
|
# @param [BenchmarkDriver::Job] job
|
35
34
|
def with_job(job, &block)
|
36
|
-
@job = job
|
35
|
+
@job = job
|
37
36
|
block.call
|
38
37
|
end
|
39
38
|
|
@@ -43,11 +42,10 @@ class BenchmarkDriver::Output::Record
|
|
43
42
|
block.call
|
44
43
|
end
|
45
44
|
|
46
|
-
# @param [
|
47
|
-
|
48
|
-
def report(value:, metric:)
|
45
|
+
# @param [BenchmarkDriver::Result] result
|
46
|
+
def report(result)
|
49
47
|
$stdout.print '.'
|
50
|
-
@
|
48
|
+
@job_warmup_context_result[@job][!@with_benchmark][@context] = result
|
51
49
|
end
|
52
50
|
|
53
51
|
private
|
@@ -56,7 +54,7 @@ class BenchmarkDriver::Output::Record
|
|
56
54
|
jobs = @benchmark_metrics
|
57
55
|
yaml = {
|
58
56
|
'type' => 'recorded',
|
59
|
-
'
|
57
|
+
'job_warmup_context_result' => @job_warmup_context_result,
|
60
58
|
'metrics' => @metrics,
|
61
59
|
}.to_yaml
|
62
60
|
File.write('benchmark_driver.record.yml', yaml)
|
@@ -2,13 +2,12 @@ class BenchmarkDriver::Output::Simple
|
|
2
2
|
NAME_LENGTH = 8
|
3
3
|
|
4
4
|
# @param [Array<BenchmarkDriver::Metric>] metrics
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
@
|
11
|
-
@name_length = job_names.map(&:size).max
|
5
|
+
# @param [Array<BenchmarkDriver::Job>] jobs
|
6
|
+
# @param [Array<BenchmarkDriver::Context>] contexts
|
7
|
+
def initialize(metrics:, jobs:, contexts:)
|
8
|
+
@metrics = metrics
|
9
|
+
@context_names = contexts.map(&:name)
|
10
|
+
@name_length = jobs.map(&:name).map(&:size).max
|
12
11
|
end
|
13
12
|
|
14
13
|
def with_warmup(&block)
|
@@ -58,11 +57,10 @@ class BenchmarkDriver::Output::Simple
|
|
58
57
|
block.call
|
59
58
|
end
|
60
59
|
|
61
|
-
# @param [
|
62
|
-
|
63
|
-
def report(value:, metric:)
|
60
|
+
# @param [BenchmarkDriver::Result] result
|
61
|
+
def report(result)
|
64
62
|
if @with_benchmark
|
65
|
-
$stdout.print("%#{NAME_LENGTH}s " % humanize(
|
63
|
+
$stdout.print("%#{NAME_LENGTH}s " % humanize(result.values.fetch(@metrics.first)))
|
66
64
|
else
|
67
65
|
$stdout.print '.'
|
68
66
|
end
|
@@ -28,15 +28,20 @@ module BenchmarkDriver
|
|
28
28
|
verbose: config.verbose,
|
29
29
|
)
|
30
30
|
|
31
|
-
jobs.group_by(&:class).each do |klass,
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
31
|
+
jobs.group_by(&:class).each do |klass, klass_jobs|
|
32
|
+
klass_jobs.group_by(&:metrics).each do |metrics, metrics_jobs|
|
33
|
+
runner = runner_for(klass)
|
34
|
+
output = Output.new(
|
35
|
+
type: config.output_type,
|
36
|
+
metrics: metrics,
|
37
|
+
jobs: jobs.map { |job| BenchmarkDriver::Job.new(name: job.name) },
|
38
|
+
contexts: config.executables.map { |exec|
|
39
|
+
BenchmarkDriver::Context.new(name: exec.name, executable: exec)
|
40
|
+
},
|
41
|
+
)
|
42
|
+
with_clean_env do
|
43
|
+
runner.new(config: runner_config, output: output).run(metrics_jobs)
|
44
|
+
end
|
40
45
|
end
|
41
46
|
end
|
42
47
|
end
|
@@ -9,9 +9,9 @@ class BenchmarkDriver::Runner::CommandStdout
|
|
9
9
|
# JobParser returns this, `BenchmarkDriver::Runner.runner_for` searches "*::Job"
|
10
10
|
Job = ::BenchmarkDriver::Struct.new(
|
11
11
|
:name, # @param [String] name - This is mandatory for all runner
|
12
|
+
:metrics, # @param [Array<BenchmarkDriver::Metric>]
|
12
13
|
:command, # @param [Array<String>]
|
13
14
|
:working_directory, # @param [String,NilClass]
|
14
|
-
:metrics, # @param [Array<BenchmarkDriver::Metric>]
|
15
15
|
:stdout_to_metrics, # @param [String]
|
16
16
|
)
|
17
17
|
# Dynamically fetched and used by `BenchmarkDriver::JobParser.parse`
|
@@ -56,7 +56,6 @@ class BenchmarkDriver::Runner::CommandStdout
|
|
56
56
|
# @param [Array<BenchmarkDriver::Default::Job>] jobs
|
57
57
|
def run(jobs)
|
58
58
|
metric = jobs.first.metrics.first
|
59
|
-
@output.metrics = [metric]
|
60
59
|
|
61
60
|
@output.with_benchmark do
|
62
61
|
jobs.each do |job|
|
@@ -73,7 +72,7 @@ class BenchmarkDriver::Runner::CommandStdout
|
|
73
72
|
end
|
74
73
|
|
75
74
|
@output.with_context(name: exec.name, executable: exec) do
|
76
|
-
@output.report(
|
75
|
+
@output.report(values: { metric => value })
|
77
76
|
end
|
78
77
|
end
|
79
78
|
end
|
@@ -7,12 +7,12 @@ require 'shellwords'
|
|
7
7
|
|
8
8
|
# Show iteration per second.
|
9
9
|
class BenchmarkDriver::Runner::Ips
|
10
|
+
METRIC = BenchmarkDriver::Metric.new(name: 'Iteration per second', unit: 'i/s')
|
11
|
+
|
10
12
|
# JobParser returns this, `BenchmarkDriver::Runner.runner_for` searches "*::Job"
|
11
13
|
Job = Class.new(BenchmarkDriver::DefaultJob)
|
12
14
|
# Dynamically fetched and used by `BenchmarkDriver::JobParser.parse`
|
13
|
-
JobParser = BenchmarkDriver::DefaultJobParser.for(Job)
|
14
|
-
|
15
|
-
METRIC = BenchmarkDriver::Metric.new(name: 'Iteration per second', unit: 'i/s')
|
15
|
+
JobParser = BenchmarkDriver::DefaultJobParser.for(klass: Job, metrics: [METRIC])
|
16
16
|
|
17
17
|
# @param [BenchmarkDriver::Config::RunnerConfig] config
|
18
18
|
# @param [BenchmarkDriver::Output] output
|
@@ -24,8 +24,6 @@ class BenchmarkDriver::Runner::Ips
|
|
24
24
|
# This method is dynamically called by `BenchmarkDriver::JobRunner.run`
|
25
25
|
# @param [Array<BenchmarkDriver::Default::Job>] jobs
|
26
26
|
def run(jobs)
|
27
|
-
@output.metrics = [metric]
|
28
|
-
|
29
27
|
if jobs.any? { |job| job.loop_count.nil? }
|
30
28
|
@output.with_warmup do
|
31
29
|
jobs = jobs.map do |job|
|
@@ -36,8 +34,8 @@ class BenchmarkDriver::Runner::Ips
|
|
36
34
|
duration, loop_count = run_warmup(job, exec: executable)
|
37
35
|
value, duration = value_duration(duration: duration, loop_count: loop_count)
|
38
36
|
|
39
|
-
@output.with_context(name: executable.name, executable: executable
|
40
|
-
@output.report(
|
37
|
+
@output.with_context(name: executable.name, executable: executable) do
|
38
|
+
@output.report(values: { metric => value }, duration: duration, loop_count: loop_count)
|
41
39
|
end
|
42
40
|
|
43
41
|
loop_count = (loop_count.to_f * @config.run_duration / duration).floor
|
@@ -55,8 +53,8 @@ class BenchmarkDriver::Runner::Ips
|
|
55
53
|
value, duration = BenchmarkDriver::Repeater.with_repeat(repeat_params) do
|
56
54
|
run_benchmark(job, exec: exec)
|
57
55
|
end
|
58
|
-
@output.with_context(name: exec.name, executable: exec
|
59
|
-
@output.report(
|
56
|
+
@output.with_context(name: exec.name, executable: exec) do
|
57
|
+
@output.report(values: { metric => value }, duration: duration, loop_count: job.loop_count)
|
60
58
|
end
|
61
59
|
end
|
62
60
|
end
|
@@ -7,15 +7,15 @@ require 'shellwords'
|
|
7
7
|
|
8
8
|
# Max resident set size
|
9
9
|
class BenchmarkDriver::Runner::Memory
|
10
|
-
# JobParser returns this, `BenchmarkDriver::Runner.runner_for` searches "*::Job"
|
11
|
-
Job = Class.new(BenchmarkDriver::DefaultJob)
|
12
|
-
# Dynamically fetched and used by `BenchmarkDriver::JobParser.parse`
|
13
|
-
JobParser = BenchmarkDriver::DefaultJobParser.for(Job)
|
14
|
-
|
15
10
|
METRIC = BenchmarkDriver::Metric.new(
|
16
11
|
name: 'Max resident set size', unit: 'bytes', larger_better: false, worse_word: 'larger',
|
17
12
|
)
|
18
13
|
|
14
|
+
# JobParser returns this, `BenchmarkDriver::Runner.runner_for` searches "*::Job"
|
15
|
+
Job = Class.new(BenchmarkDriver::DefaultJob)
|
16
|
+
# Dynamically fetched and used by `BenchmarkDriver::JobParser.parse`
|
17
|
+
JobParser = BenchmarkDriver::DefaultJobParser.for(klass: Job, metrics: [METRIC])
|
18
|
+
|
19
19
|
# @param [BenchmarkDriver::Config::RunnerConfig] config
|
20
20
|
# @param [BenchmarkDriver::Output] output
|
21
21
|
def initialize(config:, output:)
|
@@ -31,8 +31,6 @@ class BenchmarkDriver::Runner::Memory
|
|
31
31
|
raise "memory output is not supported for '#{Etc.uname[:sysname]}' for now"
|
32
32
|
end
|
33
33
|
|
34
|
-
@output.metrics = [METRIC]
|
35
|
-
|
36
34
|
if jobs.any? { |job| job.loop_count.nil? }
|
37
35
|
jobs = jobs.map do |job|
|
38
36
|
job.loop_count ? job : Job.new(job.to_h.merge(loop_count: 1))
|
@@ -46,8 +44,8 @@ class BenchmarkDriver::Runner::Memory
|
|
46
44
|
value = BenchmarkDriver::Repeater.with_repeat(config: @config, larger_better: false) do
|
47
45
|
run_benchmark(job, exec: exec)
|
48
46
|
end
|
49
|
-
@output.with_context(name: exec.name, executable: exec
|
50
|
-
@output.report(
|
47
|
+
@output.with_context(name: exec.name, executable: exec) do
|
48
|
+
@output.report(values: { METRIC => value }, loop_count: job.loop_count)
|
51
49
|
end
|
52
50
|
end
|
53
51
|
end
|
@@ -7,12 +7,12 @@ require 'shellwords'
|
|
7
7
|
|
8
8
|
# Run only once, for testing
|
9
9
|
class BenchmarkDriver::Runner::Once
|
10
|
+
METRIC = BenchmarkDriver::Metric.new(name: 'Iteration per second', unit: 'i/s')
|
11
|
+
|
10
12
|
# JobParser returns this, `BenchmarkDriver::Runner.runner_for` searches "*::Job"
|
11
13
|
Job = Class.new(BenchmarkDriver::DefaultJob)
|
12
14
|
# Dynamically fetched and used by `BenchmarkDriver::JobParser.parse`
|
13
|
-
JobParser = BenchmarkDriver::DefaultJobParser.for(Job)
|
14
|
-
|
15
|
-
METRIC = BenchmarkDriver::Metric.new(name: 'Iteration per second', unit: 'i/s')
|
15
|
+
JobParser = BenchmarkDriver::DefaultJobParser.for(klass: Job, metrics: [METRIC])
|
16
16
|
|
17
17
|
# @param [BenchmarkDriver::Config::RunnerConfig] config
|
18
18
|
# @param [BenchmarkDriver::Output] output
|
@@ -24,8 +24,6 @@ class BenchmarkDriver::Runner::Once
|
|
24
24
|
# This method is dynamically called by `BenchmarkDriver::JobRunner.run`
|
25
25
|
# @param [Array<BenchmarkDriver::Default::Job>] jobs
|
26
26
|
def run(jobs)
|
27
|
-
@output.metrics = [METRIC]
|
28
|
-
|
29
27
|
jobs = jobs.map do |job|
|
30
28
|
Job.new(job.to_h.merge(loop_count: 1)) # to show this on output
|
31
29
|
end
|
@@ -35,8 +33,8 @@ class BenchmarkDriver::Runner::Once
|
|
35
33
|
@output.with_job(name: job.name) do
|
36
34
|
job.runnable_execs(@config.executables).each do |exec|
|
37
35
|
duration = run_benchmark(job, exec: exec) # no repeat support
|
38
|
-
@output.with_context(name: exec.name, executable: exec
|
39
|
-
@output.report(
|
36
|
+
@output.with_context(name: exec.name, executable: exec) do
|
37
|
+
@output.report(values: { METRIC => 1.0 / duration }, duration: duration, loop_count: 1)
|
40
38
|
end
|
41
39
|
end
|
42
40
|
end
|
@@ -8,20 +8,20 @@ class BenchmarkDriver::Runner::Recorded
|
|
8
8
|
# JobParser returns this, `BenchmarkDriver::Runner.runner_for` searches "*::Job"
|
9
9
|
Job = ::BenchmarkDriver::Struct.new(
|
10
10
|
:name, # @param [String] name - This is mandatory for all runner
|
11
|
+
:metrics, # @param [Array<BenchmarkDriver::Metric>]
|
11
12
|
:warmup_results, # @param [Hash{ BenchmarkDriver::Context => Array<BenchmarkDriver::Metric> } }]
|
12
13
|
:benchmark_results, # @param [Hash{ BenchmarkDriver::Context => Array<BenchmarkDriver::Metric> } }]
|
13
|
-
:metrics, # @param [Array<BenchmarkDriver::Metric>]
|
14
14
|
)
|
15
15
|
# Dynamically fetched and used by `BenchmarkDriver::JobParser.parse`
|
16
16
|
class << JobParser = Module.new
|
17
|
-
# @param [Hash{
|
17
|
+
# @param [Hash{ BenchmarkDriver::Job => Hash{ TrueClass,FalseClass => Hash{ BenchmarkDriver::Context => BenchmarkDriver::Result } } }] job_warmup_context_result
|
18
18
|
# @param [BenchmarkDriver::Metrics::Type] metrics
|
19
|
-
def parse(
|
20
|
-
|
19
|
+
def parse(job_warmup_context_result:, metrics:)
|
20
|
+
job_warmup_context_result.map do |job, warmup_context_result|
|
21
21
|
Job.new(
|
22
|
-
name:
|
23
|
-
warmup_results:
|
24
|
-
benchmark_results:
|
22
|
+
name: job.name,
|
23
|
+
warmup_results: warmup_context_result.fetch(true, {}),
|
24
|
+
benchmark_results: warmup_context_result.fetch(false, {}),
|
25
25
|
metrics: metrics,
|
26
26
|
)
|
27
27
|
end
|
@@ -38,8 +38,6 @@ class BenchmarkDriver::Runner::Recorded
|
|
38
38
|
# This method is dynamically called by `BenchmarkDriver::JobRunner.run`
|
39
39
|
# @param [Array<BenchmarkDriver::Runner::Recorded::Job>] record
|
40
40
|
def run(records)
|
41
|
-
@output.metrics = records.first.metrics
|
42
|
-
|
43
41
|
records.each do |record|
|
44
42
|
unless record.warmup_results.empty?
|
45
43
|
# TODO:
|
@@ -49,16 +47,14 @@ class BenchmarkDriver::Runner::Recorded
|
|
49
47
|
@output.with_benchmark do
|
50
48
|
records.each do |record|
|
51
49
|
@output.with_job(name: record.name) do
|
52
|
-
record.benchmark_results.each do |context,
|
53
|
-
@output.with_context(
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
@output.report(value: value, metric: metric)
|
61
|
-
end
|
50
|
+
record.benchmark_results.each do |context, result|
|
51
|
+
@output.with_context(name: context.name, executable: context.executable) do
|
52
|
+
@output.report(
|
53
|
+
values: result.values,
|
54
|
+
duration: result.duration,
|
55
|
+
loop_count: result.loop_count,
|
56
|
+
environment: result.environment,
|
57
|
+
)
|
62
58
|
end
|
63
59
|
end
|
64
60
|
end
|
@@ -9,9 +9,9 @@ class BenchmarkDriver::Runner::RubyStdout
|
|
9
9
|
# JobParser returns this, `BenchmarkDriver::Runner.runner_for` searches "*::Job"
|
10
10
|
Job = ::BenchmarkDriver::Struct.new(
|
11
11
|
:name, # @param [String] name - This is mandatory for all runner
|
12
|
+
:metrics, # @param [Array<BenchmarkDriver::Metric>]
|
12
13
|
:command, # @param [Array<String>]
|
13
14
|
:working_directory, # @param [String,NilClass]
|
14
|
-
:metrics, # @param [Array<BenchmarkDriver::Metric>]
|
15
15
|
:value_from_stdout, # @param [String]
|
16
16
|
:environment_from_stdout # @param [Hash{ String => String }]
|
17
17
|
)
|
@@ -73,7 +73,6 @@ class BenchmarkDriver::Runner::RubyStdout
|
|
73
73
|
# @param [Array<BenchmarkDriver::Default::Job>] jobs
|
74
74
|
def run(jobs)
|
75
75
|
metric = jobs.first.metrics.first
|
76
|
-
@output.metrics = [metric]
|
77
76
|
|
78
77
|
@output.with_benchmark do
|
79
78
|
jobs.each do |job|
|
@@ -92,8 +91,8 @@ class BenchmarkDriver::Runner::RubyStdout
|
|
92
91
|
[script.value, script.environment]
|
93
92
|
end
|
94
93
|
|
95
|
-
@output.with_context(name: exec.name, executable: exec
|
96
|
-
@output.report(
|
94
|
+
@output.with_context(name: exec.name, executable: exec) do
|
95
|
+
@output.report(values: { metric => value }, environment: environment)
|
97
96
|
end
|
98
97
|
end
|
99
98
|
end
|
@@ -127,20 +126,6 @@ class BenchmarkDriver::Runner::RubyStdout
|
|
127
126
|
stdout
|
128
127
|
end
|
129
128
|
|
130
|
-
# Return multiple times and return the best metrics
|
131
|
-
def with_repeat(metric, &block)
|
132
|
-
value_environments = @config.repeat_count.times.map do
|
133
|
-
block.call
|
134
|
-
end
|
135
|
-
value_environments.sort_by do |value, _|
|
136
|
-
if metric.larger_better
|
137
|
-
value
|
138
|
-
else
|
139
|
-
-value
|
140
|
-
end
|
141
|
-
end.last
|
142
|
-
end
|
143
|
-
|
144
129
|
StdoutToMetrics = ::BenchmarkDriver::Struct.new(:stdout, :value_from_stdout, :environment_from_stdout) do
|
145
130
|
def value
|
146
131
|
value = eval(value_from_stdout, binding)
|
@@ -1,12 +1,12 @@
|
|
1
1
|
require 'benchmark_driver/runner/ips'
|
2
2
|
|
3
3
|
class BenchmarkDriver::Runner::Time < BenchmarkDriver::Runner::Ips
|
4
|
+
METRIC = BenchmarkDriver::Metric.new(name: 'Execution time', unit: 's', larger_better: false)
|
5
|
+
|
4
6
|
# JobParser returns this, `BenchmarkDriver::Runner.runner_for` searches "*::Job"
|
5
7
|
Job = Class.new(BenchmarkDriver::DefaultJob)
|
6
8
|
# Dynamically fetched and used by `BenchmarkDriver::JobParser.parse`
|
7
|
-
JobParser = BenchmarkDriver::DefaultJobParser.for(Job)
|
8
|
-
|
9
|
-
METRIC = BenchmarkDriver::Metric.new(name: 'Execution time', unit: 's', larger_better: false)
|
9
|
+
JobParser = BenchmarkDriver::DefaultJobParser.for(klass: Job, metrics: [METRIC])
|
10
10
|
|
11
11
|
# Overriding BenchmarkDriver::Runner::Ips#metric
|
12
12
|
def metric
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: benchmark_driver
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.12.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Takashi Kokubun
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2018-
|
11
|
+
date: 2018-07-01 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: bundler
|