benchmark_driver_monotonic_raw 0.14.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.gitignore +10 -0
- data/.rspec +1 -0
- data/.travis.yml +16 -0
- data/CHANGELOG.md +357 -0
- data/Gemfile +8 -0
- data/LICENSE.txt +21 -0
- data/README.md +386 -0
- data/Rakefile +9 -0
- data/benchmark-driver/.gitignore +12 -0
- data/benchmark-driver/CODE_OF_CONDUCT.md +74 -0
- data/benchmark-driver/Gemfile +6 -0
- data/benchmark-driver/LICENSE.txt +21 -0
- data/benchmark-driver/README.md +8 -0
- data/benchmark-driver/Rakefile +1 -0
- data/benchmark-driver/benchmark-driver.gemspec +21 -0
- data/benchmark-driver/bin/console +14 -0
- data/benchmark-driver/bin/setup +8 -0
- data/benchmark-driver/lib/benchmark-driver.rb +1 -0
- data/benchmark-driver/lib/benchmark/driver.rb +1 -0
- data/benchmark_driver.gemspec +28 -0
- data/bin/console +7 -0
- data/bin/setup +8 -0
- data/exe/benchmark-driver +118 -0
- data/images/optcarrot.png +0 -0
- data/lib/benchmark_driver.rb +14 -0
- data/lib/benchmark_driver/bulk_output.rb +59 -0
- data/lib/benchmark_driver/config.rb +59 -0
- data/lib/benchmark_driver/default_job.rb +29 -0
- data/lib/benchmark_driver/default_job_parser.rb +91 -0
- data/lib/benchmark_driver/job_parser.rb +55 -0
- data/lib/benchmark_driver/metric.rb +79 -0
- data/lib/benchmark_driver/output.rb +88 -0
- data/lib/benchmark_driver/output/compare.rb +216 -0
- data/lib/benchmark_driver/output/markdown.rb +107 -0
- data/lib/benchmark_driver/output/record.rb +61 -0
- data/lib/benchmark_driver/output/simple.rb +103 -0
- data/lib/benchmark_driver/rbenv.rb +25 -0
- data/lib/benchmark_driver/repeater.rb +52 -0
- data/lib/benchmark_driver/ruby_interface.rb +83 -0
- data/lib/benchmark_driver/runner.rb +103 -0
- data/lib/benchmark_driver/runner/command_stdout.rb +118 -0
- data/lib/benchmark_driver/runner/ips.rb +259 -0
- data/lib/benchmark_driver/runner/memory.rb +150 -0
- data/lib/benchmark_driver/runner/once.rb +118 -0
- data/lib/benchmark_driver/runner/recorded.rb +73 -0
- data/lib/benchmark_driver/runner/ruby_stdout.rb +146 -0
- data/lib/benchmark_driver/runner/time.rb +20 -0
- data/lib/benchmark_driver/struct.rb +98 -0
- data/lib/benchmark_driver/version.rb +3 -0
- metadata +150 -0
@@ -0,0 +1,29 @@
|
|
1
|
+
require 'benchmark_driver/struct'
|
2
|
+
|
3
|
+
module BenchmarkDriver
|
4
|
+
DefaultJob = ::BenchmarkDriver::Struct.new(
|
5
|
+
:name, # @param [String] name - This is mandatory for all runner
|
6
|
+
:metrics, # @param [Array<BenchmarkDriver::Metric>] - This is mandatory for all runner too, set by job parser.
|
7
|
+
:contexts, # @param [Array<BenchmarkDriver::Context>] - This is optional parameter for runners.
|
8
|
+
:script, # @param [String] benchmark
|
9
|
+
:prelude, # @param [String,nil] prelude (optional)
|
10
|
+
:teardown, # @param [String,nil] after (optional)
|
11
|
+
:loop_count, # @param [Integer,nil] loop_count (optional)
|
12
|
+
:required_ruby_version, # @param [String,nil] required_ruby_version (optional)
|
13
|
+
defaults: { prelude: '', teardown: '' },
|
14
|
+
) do
|
15
|
+
def runnable_contexts(contexts)
|
16
|
+
if required_ruby_version
|
17
|
+
contexts.select do |context|
|
18
|
+
Gem::Version.new(context.executable.version) >= Gem::Version.new(required_ruby_version)
|
19
|
+
end.tap do |result|
|
20
|
+
if result.empty?
|
21
|
+
raise "No Ruby executables conforming required_ruby_version (#{required_ruby_version}) are specified"
|
22
|
+
end
|
23
|
+
end
|
24
|
+
else
|
25
|
+
contexts
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
@@ -0,0 +1,91 @@
|
|
1
|
+
module BenchmarkDriver
|
2
|
+
module DefaultJobParser
|
3
|
+
# Build default JobParser for given job klass
|
4
|
+
def self.for(klass:, metrics:)
|
5
|
+
Module.new.tap do |parser|
|
6
|
+
class << parser
|
7
|
+
include DefaultJobParser
|
8
|
+
end
|
9
|
+
parser.define_singleton_method(:job_class) { klass }
|
10
|
+
parser.define_singleton_method(:job_metrics) { metrics }
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
# This method is dynamically called by `BenchmarkDriver::JobParser.parse`
|
15
|
+
# @param [String] prelude
|
16
|
+
# @param [String,Array<String,Hash{ Symbol => String }>,Hash{ Symbol => String }] benchmark
|
17
|
+
# @param [String] teardown
|
18
|
+
# @param [Integer] loop_count
|
19
|
+
# @param [String] required_ruby_version
|
20
|
+
# @return [Array<BenchmarkDriver::Default::Job>]
|
21
|
+
def parse(contexts: [], prelude: nil, benchmark:, teardown: nil, loop_count: nil, required_ruby_version: nil)
|
22
|
+
parse_benchmark(benchmark).each do |job|
|
23
|
+
job.contexts = parse_contexts(contexts)
|
24
|
+
job.metrics = job_metrics
|
25
|
+
job.prelude.prepend("#{prelude}\n") if prelude
|
26
|
+
job.teardown.prepend("#{teardown}\n") if teardown
|
27
|
+
job.loop_count ||= loop_count
|
28
|
+
job.required_ruby_version ||= required_ruby_version
|
29
|
+
end.each(&:freeze)
|
30
|
+
end
|
31
|
+
|
32
|
+
private
|
33
|
+
|
34
|
+
# @param [String,Array<String,Hash{ Symbol => String }>,Hash{ Symbol => String }] benchmark
|
35
|
+
def parse_benchmark(benchmark)
|
36
|
+
case benchmark
|
37
|
+
when String
|
38
|
+
[parse_job(benchmark)]
|
39
|
+
when Array
|
40
|
+
benchmark.map { |b| parse_job(b) }
|
41
|
+
when Hash
|
42
|
+
benchmark.map do |key, value|
|
43
|
+
job_class.new(name: key.to_s, script: value)
|
44
|
+
end
|
45
|
+
else
|
46
|
+
raise ArgumentError.new("benchmark must be String, Array or Hash, but got: #{benchmark.inspect}")
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
# @param [String,Hash{ Symbol => String }>] bench
|
51
|
+
def parse_job(benchmark)
|
52
|
+
case benchmark
|
53
|
+
when String
|
54
|
+
job_class.new(name: benchmark, script: benchmark)
|
55
|
+
when Hash
|
56
|
+
parse_job_hash(benchmark)
|
57
|
+
else
|
58
|
+
raise ArgumentError.new("Expected String or Hash in element of benchmark, but got: #{benchmark.inspect}")
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
def parse_job_hash(name: nil, prelude: '', script:, teardown: '', loop_count: nil, required_ruby_version: nil)
|
63
|
+
name ||= script
|
64
|
+
job_class.new(name: name, prelude: prelude, script: script, teardown: teardown, loop_count: loop_count, required_ruby_version: required_ruby_version)
|
65
|
+
end
|
66
|
+
|
67
|
+
def job_class
|
68
|
+
raise NotImplementedError # override this
|
69
|
+
end
|
70
|
+
|
71
|
+
def parse_contexts(contexts)
|
72
|
+
if contexts.is_a?(Array)
|
73
|
+
contexts.map { |context| parse_context(context) }
|
74
|
+
else
|
75
|
+
raise ArgumentError.new("contexts must be Array, but got: #{contexts.inspect}")
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
def parse_context(name: nil, prelude: '', gems: {}, require: true)
|
80
|
+
gems.each do |gem, version|
|
81
|
+
prelude = "gem '#{gem}', '#{version}'\n#{("require '#{gem}'\n" if require)}#{prelude}"
|
82
|
+
end
|
83
|
+
|
84
|
+
BenchmarkDriver::Context.new(
|
85
|
+
name: name,
|
86
|
+
gems: gems,
|
87
|
+
prelude: prelude,
|
88
|
+
)
|
89
|
+
end
|
90
|
+
end
|
91
|
+
end
|
@@ -0,0 +1,55 @@
|
|
1
|
+
require 'benchmark_driver/runner'
|
2
|
+
|
3
|
+
module BenchmarkDriver
|
4
|
+
class << JobParser = Module.new
|
5
|
+
# @param [Hash] config
|
6
|
+
# @param [Hash] default_params - Special default values not written in job definition
|
7
|
+
def parse(config, default_params: {})
|
8
|
+
config = symbolize_keys(config)
|
9
|
+
type = config.fetch(:type)
|
10
|
+
if !type.is_a?(String)
|
11
|
+
raise ArgumentError.new("Invalid type: #{config[:type].inspect} (expected String)")
|
12
|
+
elsif !type.match(/\A[A-Za-z0-9_]+\z/)
|
13
|
+
raise ArgumentError.new("Invalid type: #{config[:type].inspect} (expected to include only [A-Za-z0-9_])")
|
14
|
+
end
|
15
|
+
config.delete(:type)
|
16
|
+
|
17
|
+
# Dynamic dispatch for plugin support
|
18
|
+
require "benchmark_driver/runner/#{type}"
|
19
|
+
job = ::BenchmarkDriver.const_get("Runner::#{camelize(type)}::JobParser", false).parse(config)
|
20
|
+
default_params.each do |key, value|
|
21
|
+
if job.respond_to?(key) && job.respond_to?("#{key}=") && job.public_send(key).nil?
|
22
|
+
job.public_send("#{key}=", value)
|
23
|
+
end
|
24
|
+
end
|
25
|
+
job
|
26
|
+
end
|
27
|
+
|
28
|
+
private
|
29
|
+
|
30
|
+
def camelize(str)
|
31
|
+
str.split('_').map(&:capitalize).join
|
32
|
+
end
|
33
|
+
|
34
|
+
# @param [Object] config
|
35
|
+
def symbolize_keys(config)
|
36
|
+
case config
|
37
|
+
when Hash
|
38
|
+
config.dup.tap do |hash|
|
39
|
+
hash.keys.each do |key|
|
40
|
+
case key
|
41
|
+
when String, Symbol
|
42
|
+
hash[key.to_sym] = symbolize_keys(hash.delete(key))
|
43
|
+
else # Struct
|
44
|
+
hash[key] = symbolize_keys(hash.delete(key))
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
when Array
|
49
|
+
config.map { |c| symbolize_keys(c) }
|
50
|
+
else
|
51
|
+
config
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
@@ -0,0 +1,79 @@
|
|
1
|
+
require 'benchmark_driver/struct'
|
2
|
+
|
3
|
+
# All benchmark results should be expressed by this model.
|
4
|
+
module BenchmarkDriver
|
5
|
+
# BenchmarkDriver returns benchmark results with the following nested Hash structure:
|
6
|
+
# {
|
7
|
+
# #<BenchmarkDriver::Job> => {
|
8
|
+
# #<BenchmarkDriver::Context> => #<BenchmarkDriver::Result
|
9
|
+
# metrics: {
|
10
|
+
# #<BenchmarkDriver::Metric> => Float
|
11
|
+
# }
|
12
|
+
# >
|
13
|
+
# }
|
14
|
+
# }
|
15
|
+
|
16
|
+
# Holding identifier of measured workload
|
17
|
+
Job = ::BenchmarkDriver::Struct.new(
|
18
|
+
:name, # @param [String] - Name of the benchmark task
|
19
|
+
)
|
20
|
+
|
21
|
+
# Benchmark conditions that can be known before running benchmark
|
22
|
+
Context = ::BenchmarkDriver::Struct.new(
|
23
|
+
:name, # @param [String] - Name of the context
|
24
|
+
:executable, # @param [BenchmarkDriver::Config::Executable] - Measured Ruby executable
|
25
|
+
:gems, # @param [Hash{ String => String,nil }] - Gem -> version pairs used for the benchmark
|
26
|
+
:prelude, # @param [String,nil] - Context specific setup script (optional)
|
27
|
+
defaults: { prelude: '', gems: {} },
|
28
|
+
)
|
29
|
+
|
30
|
+
# Everything that can be known after running benchmark
|
31
|
+
Result = ::BenchmarkDriver::Struct.new(
|
32
|
+
:values, # @param [Hash{ BenchmarkDriver::Metric => Float }] - Main benchmark results
|
33
|
+
:duration, # @param [Float,nil] - Time taken to run the benchmark job (optional)
|
34
|
+
:loop_count, # @param [Integer,nil] - Times to run the benchmark job (optional)
|
35
|
+
:environment, # @param [Hash] - Any other key -> value pairs to express the benchmark context
|
36
|
+
defaults: { environment: {} },
|
37
|
+
)
|
38
|
+
Result::ERROR = 0
|
39
|
+
|
40
|
+
# A kind of thing to be measured
|
41
|
+
Metric = ::BenchmarkDriver::Struct.new(
|
42
|
+
:name, # @param [String] - Metric name or description like "Max Resident Set Size"
|
43
|
+
:unit, # @param [String] - A unit like "MiB"
|
44
|
+
:larger_better, # @param [TrueClass,FalseClass] - If true, larger value is preferred when measured multiple times.
|
45
|
+
:worse_word, # @param [String] - A label shown when the value is worse.
|
46
|
+
defaults: { larger_better: true, worse_word: 'slower' },
|
47
|
+
)
|
48
|
+
|
49
|
+
#=[RubyBench mapping]=======================================|
|
50
|
+
#
|
51
|
+
# BenchmarkRun:
|
52
|
+
# result -> { context.name => result.value } | { "default"=>"44.666666666666664", "default_jit"=>"59.333333333333336" }
|
53
|
+
# environment -> result.environment | "---\nRuby version: 'ruby 2.6.0dev (2018-05-14 trunk 63417) [x86_64-linux]\n\n'\nChecksum: '59662'\n"
|
54
|
+
# initiator -> (not supported) | #<Commit sha1: "6f0de6ed9...", message: "error.c: check redefined ...", url: "https://github.com/tgxworld/ruby/commit/6f0de6ed98...", repo_id: 6>
|
55
|
+
#
|
56
|
+
# BenchmarkType:
|
57
|
+
# category -> job.name | "app_erb", "Optcarrot Lan_Master.nes"
|
58
|
+
# script_url -> (not supported) | "https://raw.githubusercontent.com/mame/optcarrot/master/lib/optcarrot/nes.rb"
|
59
|
+
# repo -> (not supported) | #<Repo name: "ruby", url: "https://github.com/tgxworld/ruby">
|
60
|
+
# repo.organization -> (not supported) | #<Organization name: "ruby", url: "https://github.com/tgxworld/">
|
61
|
+
#
|
62
|
+
# BenchmarkResultType:
|
63
|
+
# name -> metric.name | "Number of frames"
|
64
|
+
# unit -> metric.unit | "fps"
|
65
|
+
#
|
66
|
+
#===========================================================|
|
67
|
+
|
68
|
+
#----
|
69
|
+
# legacy
|
70
|
+
|
71
|
+
module Metrics
|
72
|
+
Type = ::BenchmarkDriver::Struct.new(
|
73
|
+
:unit, # @param [String] - A label of unit for the value.
|
74
|
+
:larger_better, # @param [TrueClass,FalseClass] - If true, larger value is preferred when measured multiple times.
|
75
|
+
:worse_word, # @param [String] - A label shown when the value is worse.
|
76
|
+
defaults: { larger_better: true, worse_word: 'slower' },
|
77
|
+
)
|
78
|
+
end
|
79
|
+
end
|
@@ -0,0 +1,88 @@
|
|
1
|
+
module BenchmarkDriver
|
2
|
+
# BenchmarkDriver::Runner::* --> BenchmarkDriver::Output --> BenchmarkDriver::Output::*
|
3
|
+
#
|
4
|
+
# This is interface between runner plugin and output plugin, so that they can be loosely
|
5
|
+
# coupled and to simplify implementation of both runner and output.
|
6
|
+
#
|
7
|
+
# Runner should call its interface in the following manner:
|
8
|
+
# metrics=
|
9
|
+
# with_warmup
|
10
|
+
# with_job(name:)
|
11
|
+
# with_context(name:, executable:, gems:)
|
12
|
+
# report(values:, duration: nil, loop_count: nil, environment: {})
|
13
|
+
# with_benchmark
|
14
|
+
# with_job(name:)
|
15
|
+
# with_context(name:, executable:, gems:)
|
16
|
+
# report(values:, duration: nil, loop_count: nil, environment: {})
|
17
|
+
class Output
|
18
|
+
require 'benchmark_driver/output/compare'
|
19
|
+
require 'benchmark_driver/output/markdown'
|
20
|
+
require 'benchmark_driver/output/record'
|
21
|
+
require 'benchmark_driver/output/simple'
|
22
|
+
|
23
|
+
# BenchmarkDriver::Output is pluggable.
|
24
|
+
# Create `BenchmarkDriver::Output::Foo` as benchmark_dirver-output-foo.gem and specify `-o foo`.
|
25
|
+
#
|
26
|
+
# @param [String] type
|
27
|
+
# @param [Array<BenchmarkDriver::Metric>] metrics
|
28
|
+
# @param [Array<BenchmarkDriver::Job>] jobs
|
29
|
+
# @param [Array<BenchmarkDriver::Context>] contexts
|
30
|
+
def initialize(type:, metrics:, jobs:, contexts:)
|
31
|
+
if type.include?(':')
|
32
|
+
raise ArgumentError.new("Output type '#{type}' cannot contain ':'")
|
33
|
+
end
|
34
|
+
|
35
|
+
require "benchmark_driver/output/#{type}" # for plugin
|
36
|
+
camelized = type.split('_').map(&:capitalize).join
|
37
|
+
|
38
|
+
@output = ::BenchmarkDriver::Output.const_get(camelized, false).new(
|
39
|
+
metrics: metrics,
|
40
|
+
jobs: jobs,
|
41
|
+
contexts: contexts,
|
42
|
+
)
|
43
|
+
end
|
44
|
+
|
45
|
+
# @param [Array<BenchmarkDriver::Metric>] metrics
|
46
|
+
def metrics=(metrics)
|
47
|
+
@output.metrics = metrics
|
48
|
+
end
|
49
|
+
|
50
|
+
def with_warmup(&block)
|
51
|
+
@output.with_warmup(&block)
|
52
|
+
end
|
53
|
+
|
54
|
+
def with_benchmark(&block)
|
55
|
+
@output.with_benchmark(&block)
|
56
|
+
end
|
57
|
+
|
58
|
+
# @param [String] name
|
59
|
+
def with_job(name:, &block)
|
60
|
+
job = BenchmarkDriver::Job.new(name: name)
|
61
|
+
@output.with_job(job) do
|
62
|
+
block.call
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
# @param [String] name
|
67
|
+
# @param [BenchmarkDriver::Config::Executable] executable
|
68
|
+
# @param [Hash{ String => String}] gems
|
69
|
+
def with_context(name:, executable:, gems: {}, prelude: '', &block)
|
70
|
+
context = BenchmarkDriver::Context.new(name: name, executable: executable, gems: gems, prelude: prelude)
|
71
|
+
@output.with_context(context) do
|
72
|
+
block.call
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
# @param [Float] value
|
77
|
+
# @param [BenchmarkDriver::Metric] metic
|
78
|
+
def report(values:, duration: nil, loop_count: nil, environment: {})
|
79
|
+
result = BenchmarkDriver::Result.new(
|
80
|
+
values: values,
|
81
|
+
duration: duration,
|
82
|
+
loop_count: loop_count,
|
83
|
+
environment: environment,
|
84
|
+
)
|
85
|
+
@output.report(result)
|
86
|
+
end
|
87
|
+
end
|
88
|
+
end
|
@@ -0,0 +1,216 @@
|
|
1
|
+
# Compare output like benchmark-ips
|
2
|
+
class BenchmarkDriver::Output::Compare
|
3
|
+
NAME_LENGTH = 20
|
4
|
+
|
5
|
+
# @param [Array<BenchmarkDriver::Metric>] metrics
|
6
|
+
# @param [Array<BenchmarkDriver::Job>] jobs
|
7
|
+
# @param [Array<BenchmarkDriver::Context>] contexts
|
8
|
+
def initialize(metrics:, jobs:, contexts:)
|
9
|
+
@metrics = metrics
|
10
|
+
@job_names = jobs.map(&:name)
|
11
|
+
@context_names = contexts.map(&:name)
|
12
|
+
@name_length = [@job_names.map(&:length).max, NAME_LENGTH].max
|
13
|
+
end
|
14
|
+
|
15
|
+
def with_warmup(&block)
|
16
|
+
without_stdout_buffering do
|
17
|
+
$stdout.puts 'Warming up --------------------------------------'
|
18
|
+
# TODO: show exec name if it has multiple ones
|
19
|
+
block.call
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
def with_benchmark(&block)
|
24
|
+
@job_context_result = Hash.new do |hash, job|
|
25
|
+
hash[job] = {}
|
26
|
+
end
|
27
|
+
|
28
|
+
result = without_stdout_buffering do
|
29
|
+
$stdout.puts 'Calculating -------------------------------------'
|
30
|
+
if @context_names.size > 1
|
31
|
+
$stdout.print(' ' * @name_length)
|
32
|
+
@context_names.each do |context_name|
|
33
|
+
$stdout.print(' %10s ' % context_name)
|
34
|
+
end
|
35
|
+
$stdout.puts
|
36
|
+
end
|
37
|
+
|
38
|
+
block.call
|
39
|
+
end
|
40
|
+
if @context_names.size > 1
|
41
|
+
compare_executables
|
42
|
+
elsif @job_names.size > 1
|
43
|
+
compare_jobs
|
44
|
+
end
|
45
|
+
result
|
46
|
+
end
|
47
|
+
|
48
|
+
# @param [BenchmarkDriver::Job] job
|
49
|
+
def with_job(job, &block)
|
50
|
+
name = job.name
|
51
|
+
if name.length > @name_length
|
52
|
+
$stdout.puts(name)
|
53
|
+
else
|
54
|
+
$stdout.print("%#{@name_length}s" % name)
|
55
|
+
end
|
56
|
+
@job = name
|
57
|
+
@job_results = []
|
58
|
+
@job_contexts = []
|
59
|
+
result = block.call
|
60
|
+
$stdout.print(@metrics.first.unit)
|
61
|
+
loop_count = @job_results.first.loop_count
|
62
|
+
if loop_count && @job_results.all? { |r| r.loop_count == loop_count }
|
63
|
+
$stdout.print(" - #{humanize(loop_count)} times")
|
64
|
+
if @job_results.all? { |job_result| !job_result.duration.nil? }
|
65
|
+
$stdout.print(" in")
|
66
|
+
show_durations
|
67
|
+
end
|
68
|
+
end
|
69
|
+
$stdout.puts
|
70
|
+
result
|
71
|
+
end
|
72
|
+
|
73
|
+
# @param [BenchmarkDriver::Context] context
|
74
|
+
def with_context(context, &block)
|
75
|
+
@context = context
|
76
|
+
@job_contexts << context
|
77
|
+
block.call
|
78
|
+
end
|
79
|
+
|
80
|
+
# @param [BenchmarkDriver::Result] result
|
81
|
+
def report(result)
|
82
|
+
@job_results << result
|
83
|
+
if defined?(@job_context_result)
|
84
|
+
@job_context_result[@job][@context] = result
|
85
|
+
end
|
86
|
+
|
87
|
+
$stdout.print("#{humanize(result.values.values.first, [10, @context.name.length].max)} ")
|
88
|
+
end
|
89
|
+
|
90
|
+
private
|
91
|
+
|
92
|
+
def show_durations
|
93
|
+
@job_results.each do |result|
|
94
|
+
$stdout.print(' %3.6fs' % result.duration)
|
95
|
+
end
|
96
|
+
|
97
|
+
# Show pretty seconds / clocks too. As it takes long width, it's shown only with a single executable.
|
98
|
+
if @job_results.size == 1
|
99
|
+
result = @job_results.first
|
100
|
+
sec = result.duration
|
101
|
+
iter = result.loop_count
|
102
|
+
if File.exist?('/proc/cpuinfo') && (clks = estimate_clock(sec, iter)) < 1_000
|
103
|
+
$stdout.print(" (#{pretty_sec(sec, iter)}/i, #{clks}clocks/i)")
|
104
|
+
else
|
105
|
+
$stdout.print(" (#{pretty_sec(sec, iter)}/i)")
|
106
|
+
end
|
107
|
+
end
|
108
|
+
end
|
109
|
+
|
110
|
+
# benchmark_driver ouputs logs ASAP. This enables sync flag for it.
|
111
|
+
def without_stdout_buffering
|
112
|
+
sync, $stdout.sync = $stdout.sync, true
|
113
|
+
yield
|
114
|
+
ensure
|
115
|
+
$stdout.sync = sync
|
116
|
+
end
|
117
|
+
|
118
|
+
def humanize(value, width = 10)
|
119
|
+
if BenchmarkDriver::Result::ERROR.equal?(value)
|
120
|
+
return " %#{width}s" % 'ERROR'
|
121
|
+
elsif value == 0.0
|
122
|
+
return " %#{width}.3f" % 0.0
|
123
|
+
elsif value < 0
|
124
|
+
raise ArgumentError.new("Negative value: #{value.inspect}")
|
125
|
+
end
|
126
|
+
|
127
|
+
scale = (Math.log10(value) / 3).to_i
|
128
|
+
return "%#{width}s" % value.to_s if scale < 0 # like 1.23e-04
|
129
|
+
|
130
|
+
prefix = "%#{width}.3f" % (value.to_f / (1000 ** scale))
|
131
|
+
suffix =
|
132
|
+
case scale
|
133
|
+
when 1; 'k'
|
134
|
+
when 2; 'M'
|
135
|
+
when 3; 'G'
|
136
|
+
when 4; 'T'
|
137
|
+
when 5; 'Q'
|
138
|
+
else # < 1000 or > 10^15, no scale or suffix
|
139
|
+
return " #{prefix}"
|
140
|
+
end
|
141
|
+
"#{prefix}#{suffix}"
|
142
|
+
end
|
143
|
+
|
144
|
+
def pretty_sec(sec, iter)
|
145
|
+
r = Rational(sec, iter)
|
146
|
+
case
|
147
|
+
when r >= 1
|
148
|
+
"#{'%3.2f' % r.to_f}s"
|
149
|
+
when r >= 1/1000r
|
150
|
+
"#{'%3.2f' % (r * 1_000).to_f}ms"
|
151
|
+
when r >= 1/1000_000r
|
152
|
+
"#{'%3.2f' % (r * 1_000_000).to_f}μs"
|
153
|
+
else
|
154
|
+
"#{'%3.2f' % (r * 1_000_000_000).to_f}ns"
|
155
|
+
end
|
156
|
+
end
|
157
|
+
|
158
|
+
def estimate_clock sec, iter
|
159
|
+
hz = File.read('/proc/cpuinfo').scan(/cpu MHz\s+:\s+([\d\.]+)/){|(f)| break hz = Rational(f.to_f) * 1_000_000}
|
160
|
+
r = Rational(sec, iter)
|
161
|
+
Integer(r/(1/hz))
|
162
|
+
end
|
163
|
+
|
164
|
+
def compare_jobs
|
165
|
+
$stdout.puts "\nComparison:"
|
166
|
+
results = @job_context_result.flat_map do |job, context_result|
|
167
|
+
context_result.map { |context, result| Result.new(job: job, value: result.values.values.first, context: context) }
|
168
|
+
end
|
169
|
+
show_results(results, show_context: false)
|
170
|
+
end
|
171
|
+
|
172
|
+
def compare_executables
|
173
|
+
$stdout.puts "\nComparison:"
|
174
|
+
|
175
|
+
@job_context_result.each do |job, context_result|
|
176
|
+
$stdout.puts("%#{@name_length + 2 + 11}s" % job)
|
177
|
+
results = context_result.flat_map do |context, result|
|
178
|
+
result.values.values.map { |value| Result.new(job: job, value: value, context: context) }
|
179
|
+
end
|
180
|
+
show_results(results, show_context: true)
|
181
|
+
end
|
182
|
+
end
|
183
|
+
|
184
|
+
# @param [Array<BenchmarkDriver::Output::Compare::Result>] results
|
185
|
+
# @param [TrueClass,FalseClass] show_context
|
186
|
+
def show_results(results, show_context:)
|
187
|
+
results = results.sort_by do |result|
|
188
|
+
if @metrics.first.larger_better
|
189
|
+
-result.value
|
190
|
+
else
|
191
|
+
result.value
|
192
|
+
end
|
193
|
+
end
|
194
|
+
|
195
|
+
first = results.first
|
196
|
+
results.each do |result|
|
197
|
+
if result != first
|
198
|
+
if @metrics.first.larger_better
|
199
|
+
ratio = (first.value / result.value)
|
200
|
+
else
|
201
|
+
ratio = (result.value / first.value)
|
202
|
+
end
|
203
|
+
slower = "- %.2fx #{@metrics.first.worse_word}" % ratio
|
204
|
+
end
|
205
|
+
if show_context
|
206
|
+
name = result.context.name
|
207
|
+
else
|
208
|
+
name = result.job
|
209
|
+
end
|
210
|
+
$stdout.puts("%#{@name_length}s: %11.1f %s #{slower}" % [name, result.value, @metrics.first.unit])
|
211
|
+
end
|
212
|
+
$stdout.puts
|
213
|
+
end
|
214
|
+
|
215
|
+
Result = ::BenchmarkDriver::Struct.new(:job, :value, :context)
|
216
|
+
end
|