benchmark_driver 0.14.13 → 0.14.14

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 22df737d5e378a5ea29b9667ffc96fc767427f27b0bb9e21b548b4fb5caf4463
4
- data.tar.gz: fe45db762725218c739bb412a06189221fb6f5cb0e89a27071a1d302ce90c5bd
3
+ metadata.gz: 576179b82e3d7f201f8a58373e279219b2a20ffc7a74bb6133b09655db54ec45
4
+ data.tar.gz: c57f3b432fd5f7309f6bba0e318b75e425bb8e1e17e70916eed60b8e7a3b7785
5
5
  SHA512:
6
- metadata.gz: 5d903d9b8d9b821f6eefbd21defc201714f6ec2e5436ca9374228756873a7693f37c4974cff38bfb2da01711bf394b8c516c8f6ce974ad19574a484ed780d4a7
7
- data.tar.gz: 9b7e96da26336be348c4660328330c467f4f92ded7ad60538c23a6bfeb72fb58eddc8e7b7f1cedcccd30d340f34869191b71466e093c2ff93e2b6517ec658c87
6
+ metadata.gz: df832e8687b266851d5339039fa2e3a2b53d49ce8a25e5f2d7cf2c7691bd7f8c8394148c1d3de4c8dd0c7b5ab6d97f7a8e0ea923914478b7afc358c3d731178a
7
+ data.tar.gz: 569980b49fbcaf4515698f0519d7aa025bfe9d4acf7281d7d34cd85324e5afd0622b194417d744da1526b6ab2d7b5a15cc89c69d4cef9154650822231ab1df48
data/CHANGELOG.md CHANGED
@@ -1,3 +1,7 @@
1
+ # v0.14.14
2
+
3
+ - Add `all` output plugin to compare all sorted results
4
+
1
5
  # v0.14.13
2
6
 
3
7
  - Add macOS support to memory runner
@@ -87,7 +91,7 @@
87
91
 
88
92
  - Add support for benchmark with different versions of gems and preludes
89
93
  - Only YAML is supported for now, Ruby interface will come later
90
- - [breaking change] Runner plugin interface is sightly changed
94
+ - [breaking change] Runner plugin interface is slightly changed
91
95
 
92
96
  # v0.12.0
93
97
 
data/README.md CHANGED
@@ -272,7 +272,7 @@ ips, time, memory, once
272
272
  |:-------|:------------|
273
273
  | ips | Iteration per second (default) |
274
274
  | time | Elapsed seconds |
275
- | memory | Max resident set. This is supported only on Linux for now. |
275
+ | memory | Max resident set. This is supported only on Linux and macOS for now. |
276
276
  | once | Forces `loop_count` to 1 for testing |
277
277
  | ruby\_stdout | Special runner to integrate existing benchmarks |
278
278
 
@@ -360,7 +360,7 @@ See following example:
360
360
 
361
361
  * https://github.com/benchmark-driver/optcarrot
362
362
 
363
- If you benchmark can run with `ruby foo bar`, specify `foo bar` to `command:`.
363
+ If your benchmark can run with `ruby foo bar`, specify `foo bar` to `command:`.
364
364
  Then write `stdout_to_metrics:` to convert stdout to metrics. This runner can be used only with YAML interface for now.
365
365
 
366
366
  ```
@@ -30,6 +30,7 @@ module BenchmarkDriver
30
30
  # Everything that can be known after running benchmark
31
31
  Result = ::BenchmarkDriver::Struct.new(
32
32
  :values, # @param [Hash{ BenchmarkDriver::Metric => Float }] - Main benchmark results
33
+ :all_values, # @param [Hash{ BenchmarkDriver::Metric => Float }] - All benchmark results. Used by --output=all (optional)
33
34
  :duration, # @param [Float,nil] - Time taken to run the benchmark job (optional)
34
35
  :loop_count, # @param [Integer,nil] - Times to run the benchmark job (optional)
35
36
  :environment, # @param [Hash] - Any other key -> value pairs to express the benchmark context
@@ -73,11 +73,13 @@ module BenchmarkDriver
73
73
  end
74
74
  end
75
75
 
76
- # @param [Float] value
76
+ # @param [Hash{ BenchmarkDriver::Metric => Float }] values
77
+ # @param [Hash{ BenchmarkDriver::Metric => [Float] },nil] values
77
78
  # @param [BenchmarkDriver::Metric] metic
78
- def report(values:, duration: nil, loop_count: nil, environment: {})
79
+ def report(values:, all_values: nil, duration: nil, loop_count: nil, environment: {})
79
80
  result = BenchmarkDriver::Result.new(
80
81
  values: values,
82
+ all_values: all_values,
81
83
  duration: duration,
82
84
  loop_count: loop_count,
83
85
  environment: environment,
@@ -0,0 +1,104 @@
1
+ class BenchmarkDriver::Output::All
2
+ NAME_LENGTH = 20
3
+ CONTEXT_LENGTH = 20
4
+
5
+ # @param [Array<BenchmarkDriver::Metric>] metrics
6
+ # @param [Array<BenchmarkDriver::Job>] jobs
7
+ # @param [Array<BenchmarkDriver::Context>] contexts
8
+ def initialize(metrics:, jobs:, contexts:)
9
+ @metrics = metrics
10
+ @job_names = jobs.map(&:name)
11
+ @context_names = contexts.map(&:name)
12
+ @name_length = [@job_names.map(&:length).max, NAME_LENGTH].max
13
+ end
14
+
15
+ def with_warmup(&block)
16
+ without_stdout_buffering do
17
+ $stdout.puts 'Warming up --------------------------------------'
18
+ # TODO: show exec name if it has multiple ones
19
+ block.call
20
+ end
21
+ end
22
+
23
+ def with_benchmark(&block)
24
+ @job_context_result = Hash.new do |hash, job|
25
+ hash[job] = {}
26
+ end
27
+
28
+ result = without_stdout_buffering do
29
+ $stdout.puts 'Calculating -------------------------------------'
30
+ if @context_names.size > 1
31
+ $stdout.print(' ' * @name_length)
32
+ @context_names.each do |context_name|
33
+ $stdout.print(" %#{CONTEXT_LENGTH}s " % context_name)
34
+ end
35
+ $stdout.puts
36
+ end
37
+
38
+ block.call
39
+ end
40
+ result
41
+ end
42
+
43
+ # @param [BenchmarkDriver::Job] job
44
+ def with_job(job, &block)
45
+ @job_name = "%#{@name_length}s" % job.name
46
+ $stdout.print(@job_name)
47
+ @context_values = {}
48
+ block.call
49
+ end
50
+
51
+ # @param [BenchmarkDriver::Context] context
52
+ def with_context(context, &block)
53
+ @context = context
54
+ block.call
55
+ end
56
+
57
+ # @param [BenchmarkDriver::Result] result
58
+ def report(result)
59
+ if result.all_values.nil? || !defined?(@context_values)
60
+ $stdout.puts(" %#{[CONTEXT_LENGTH, @context.name.length].max}s " % result.values.values.first.to_s)
61
+ return
62
+ end
63
+
64
+ num_values = result.all_values.values.first.size
65
+ if @context_values.empty?
66
+ print("\r")
67
+ else
68
+ print("\e[#{num_values}F")
69
+ end
70
+ @context_values[@context] = result.all_values.values.first.sort
71
+
72
+ precision = result.values.values.first.to_s.sub(/\A\d+\./, '').length
73
+ num_values.times do |i|
74
+ if i == 0
75
+ $stdout.print(@job_name)
76
+ else
77
+ print(" " * [@job_name.length, NAME_LENGTH].max)
78
+ end
79
+
80
+ @context_values.each do |context, values|
81
+ $stdout.print(" %#{[CONTEXT_LENGTH, context.name.length].max}.#{precision}f " % values[i])
82
+ end
83
+ (@context_names - @context_values.keys.map(&:name)).each do |context_name|
84
+ print(" " * ([CONTEXT_LENGTH, context_name.length].max + 2))
85
+ end
86
+
87
+ if i == 0
88
+ $stdout.puts(@metrics.first.unit)
89
+ else
90
+ $stdout.puts
91
+ end
92
+ end
93
+ end
94
+
95
+ private
96
+
97
+ # benchmark_driver ouputs logs ASAP. This enables sync flag for it.
98
+ def without_stdout_buffering
99
+ sync, $stdout.sync = $stdout.sync, true
100
+ yield
101
+ ensure
102
+ $stdout.sync = sync
103
+ end
104
+ end
@@ -1,25 +1,36 @@
1
+ require 'benchmark_driver/struct'
2
+
1
3
  module BenchmarkDriver
2
4
  # Repeat calling block and return desired result: "best", "worst" or "average".
3
5
  module Repeater
4
6
  VALID_TYPES = %w[best worst average]
5
7
 
8
+ RepeatResult = ::BenchmarkDriver::Struct.new(
9
+ :value, # the value desired by --repeat-result
10
+ :all_values, # all benchmark results. Used by --output=all.
11
+ )
12
+
6
13
  class << self
7
14
  # `block.call` can return multiple objects, but the first one is used for sort.
8
15
  # When `config.repeat_result == 'average'`, how to deal with rest objects is decided
9
16
  # by `:rest_on_average` option.
17
+ #
18
+ # @param [Proc] block - returns Float or [Float, ...]
10
19
  def with_repeat(config:, larger_better:, rest_on_average: :first, &block)
11
20
  values = config.repeat_count.times.map { block.call }
12
21
 
13
- case config.repeat_result
14
- when 'best'
15
- best_result(values, larger_better)
16
- when 'worst'
17
- best_result(values, !larger_better)
18
- when 'average'
19
- average_result(values, rest_on_average)
20
- else
21
- raise "unexpected repeat_result #{config.repeat_result.inspect}"
22
- end
22
+ desired_value =
23
+ case config.repeat_result
24
+ when 'best'
25
+ best_result(values, larger_better)
26
+ when 'worst'
27
+ best_result(values, !larger_better)
28
+ when 'average'
29
+ average_result(values, rest_on_average)
30
+ else
31
+ raise "unexpected repeat_result #{config.repeat_result.inspect}"
32
+ end
33
+ RepeatResult.new(value: desired_value, all_values: extract_values(values))
23
34
  end
24
35
 
25
36
  private
@@ -47,6 +58,12 @@ module BenchmarkDriver
47
58
  raise "unexpected rest_on_average #{rest_on_average.inspect}"
48
59
  end
49
60
  end
61
+
62
+ def extract_values(values)
63
+ values.map do |value|
64
+ value.is_a?(Array) ? value.first : value
65
+ end
66
+ end
50
67
  end
51
68
  end
52
69
  end
@@ -64,7 +64,7 @@ class BenchmarkDriver::Runner::CommandStdout
64
64
  @output.with_job(name: job.name) do
65
65
  @contexts.each do |context|
66
66
  exec = context.executable
67
- value = BenchmarkDriver::Repeater.with_repeat(config: @config, larger_better: metric.larger_better) do
67
+ result = BenchmarkDriver::Repeater.with_repeat(config: @config, larger_better: metric.larger_better) do
68
68
  stdout = with_chdir(job.working_directory) do
69
69
  with_ruby_prefix(exec) { execute(*exec.command, *job.command) }
70
70
  end
@@ -75,7 +75,7 @@ class BenchmarkDriver::Runner::CommandStdout
75
75
  end
76
76
 
77
77
  @output.with_context(name: exec.name, executable: exec) do
78
- @output.report(values: { metric => value })
78
+ @output.report(values: { metric => result.value }, all_values: { metric => result.all_values })
79
79
  end
80
80
  end
81
81
  end
@@ -52,11 +52,17 @@ class BenchmarkDriver::Runner::Ips
52
52
  @output.with_job(name: job.name) do
53
53
  job.runnable_contexts(@contexts).each do |context|
54
54
  repeat_params = { config: @config, larger_better: true, rest_on_average: :average }
55
- value, duration = BenchmarkDriver::Repeater.with_repeat(repeat_params) do
55
+ result = BenchmarkDriver::Repeater.with_repeat(repeat_params) do
56
56
  run_benchmark(job, context: context)
57
57
  end
58
+ value, duration = result.value
58
59
  @output.with_context(name: context.name, executable: context.executable, gems: context.gems, prelude: context.prelude) do
59
- @output.report(values: { metric => value }, duration: duration, loop_count: job.loop_count)
60
+ @output.report(
61
+ values: { metric => value },
62
+ all_values: { metric => result.all_values },
63
+ duration: duration,
64
+ loop_count: job.loop_count,
65
+ )
60
66
  end
61
67
  end
62
68
  end
@@ -48,11 +48,11 @@ class BenchmarkDriver::Runner::Memory
48
48
  jobs.each do |job|
49
49
  @output.with_job(name: job.name) do
50
50
  job.runnable_contexts(@contexts).each do |context|
51
- value = BenchmarkDriver::Repeater.with_repeat(config: @config, larger_better: false) do
51
+ result = BenchmarkDriver::Repeater.with_repeat(config: @config, larger_better: false) do
52
52
  run_benchmark(job, context: context)
53
53
  end
54
54
  @output.with_context(name: context.name, executable: context.executable, gems: context.gems, prelude: context.prelude) do
55
- @output.report(values: { METRIC => value }, loop_count: job.loop_count)
55
+ @output.report(values: { METRIC => result.value }, all_values: { METRIC => result.all_values }, loop_count: job.loop_count)
56
56
  end
57
57
  end
58
58
  end
@@ -82,7 +82,7 @@ class BenchmarkDriver::Runner::RubyStdout
82
82
  @contexts.each do |context|
83
83
  exec = context.executable
84
84
  repeat_params = { config: @config, larger_better: metric.larger_better }
85
- value, environment = BenchmarkDriver::Repeater.with_repeat(repeat_params) do
85
+ result = BenchmarkDriver::Repeater.with_repeat(repeat_params) do
86
86
  stdout = with_chdir(job.working_directory) do
87
87
  with_ruby_prefix(exec) { execute(*exec.command, *job.command) }
88
88
  end
@@ -93,9 +93,14 @@ class BenchmarkDriver::Runner::RubyStdout
93
93
  )
94
94
  [script.value, script.environment]
95
95
  end
96
+ value, environment = result.value
96
97
 
97
98
  @output.with_context(name: exec.name, executable: exec) do
98
- @output.report(values: { metric => value }, environment: environment)
99
+ @output.report(
100
+ values: { metric => value },
101
+ all_values: { metric => result.all_values },
102
+ environment: environment,
103
+ )
99
104
  end
100
105
  end
101
106
  end
@@ -1,3 +1,3 @@
1
1
  module BenchmarkDriver
2
- VERSION = '0.14.13'
2
+ VERSION = '0.14.14'
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: benchmark_driver
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.14.13
4
+ version: 0.14.14
5
5
  platform: ruby
6
6
  authors:
7
7
  - Takashi Kokubun
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2018-12-24 00:00:00.000000000 Z
11
+ date: 2019-03-25 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -106,6 +106,7 @@ files:
106
106
  - lib/benchmark_driver/job_parser.rb
107
107
  - lib/benchmark_driver/metric.rb
108
108
  - lib/benchmark_driver/output.rb
109
+ - lib/benchmark_driver/output/all.rb
109
110
  - lib/benchmark_driver/output/compare.rb
110
111
  - lib/benchmark_driver/output/markdown.rb
111
112
  - lib/benchmark_driver/output/record.rb
@@ -142,8 +143,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
142
143
  - !ruby/object:Gem::Version
143
144
  version: '0'
144
145
  requirements: []
145
- rubyforge_project:
146
- rubygems_version: 2.7.6
146
+ rubygems_version: 3.0.3
147
147
  signing_key:
148
148
  specification_version: 4
149
149
  summary: Fully-featured accurate benchmark driver for Ruby