benchmark_driver 0.11.0 → 0.11.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 16b3e45ce8476d4acf4e3528d04079f1346b6436
4
- data.tar.gz: 0e4a7a94f47ba9bb596ba96904712db4c6935032
3
+ metadata.gz: 606b300f9ffeb798a6ac330be1e97795898afa7c
4
+ data.tar.gz: fb3cfa3a44eff01b189635c8ebaffcb73737567d
5
5
  SHA512:
6
- metadata.gz: 75b834584e0fc996dda837d879af768c6bab9acba07fb53f284103ec73aafd53b8b5cc82001b9067a7d0403420c97fa30f59146bcff9c3341c89cf4c003e915e
7
- data.tar.gz: cafd73b0bdaeb3b8635a0cbc847eaf9d3b6969d80369093004db615618cab41b6e004640f81a58bd8d38389243fc4061c230473ed696b7524bc15d23bbe6d87d
6
+ metadata.gz: 8401a061574125ed5af0be54293fb2936bf8f856abb23c242232dff213770a008726839742353194d790ee6e883a00cfc1c47d42e820c7e7936f39e9d3c1d317
7
+ data.tar.gz: 7f742808cd90e3cdb2b983bced6a8a224a9be18c3a5121e679316edaba9648bff90711888d20fe06274c16eefe5340d6c0b52ef5c417fe1ebe0495167be23cae
data/CHANGELOG.md CHANGED
@@ -1,3 +1,8 @@
1
+ # v0.11.1
2
+
3
+ - Add `--repeat-result` option to return the best, the worst or an average result with `--repeat-count`
4
+ - Add `BenchmarkDriver::BulkOutput` to make an output plugin casually
5
+
1
6
  # v0.11.0
2
7
 
3
8
  - [breaking change] Plugin interface is completely changed, so all plugins need migration
data/README.md CHANGED
@@ -273,7 +273,7 @@ ips, time, memory, once
273
273
  | time | Elapsed seconds |
274
274
  | memory | Max resident set. This is supported only on Linux for now. |
275
275
  | once | Forces `loop_count` to 1 for testing |
276
- | command\_stdout | Special runner to integrate existing benchmarks |
276
+ | ruby\_stdout | Special runner to integrate existing benchmarks |
277
277
 
278
278
  ### ips
279
279
 
@@ -353,12 +353,11 @@ Comparison:
353
353
  2.4.3: 1531393.6 i/s - 1.58x slower
354
354
  ```
355
355
 
356
- ### command\_stdout
356
+ ### ruby\_stdout
357
357
 
358
- See following examples:
358
+ See following example:
359
359
 
360
360
  * https://github.com/benchmark-driver/optcarrot
361
- * https://github.com/benchmark-driver/fluentd-benchmark
362
361
 
363
362
  If you benchmark can run with `ruby foo bar`, specify `foo bar` to `command:`.
364
363
  Then write `stdout_to_metrics:` to convert stdout to metrics. This runner can be used only with YAML interface for now.
data/exe/benchmark-driver CHANGED
@@ -42,6 +42,12 @@ config = BenchmarkDriver::Config.new.tap do |c|
42
42
  abort "-r, --repeat-count must take Integer, but got #{v.inspect}"
43
43
  end
44
44
  end
45
+ o.on('--repeat-result [TYPE]', 'Yield "best", "average" or "worst" result with --repeat-count (default: best)') do |v|
46
+ unless BenchmarkDriver::Repeater::VALID_TYPES.include?(v)
47
+ raise ArgumentError.new("--repeat-result must be #{BenchmarkDriver::Repeater::VALID_TYPES.join(', ')} but got #{v.inspect}")
48
+ end
49
+ c.repeat_result = v
50
+ end
45
51
  o.on('--bundler', 'Install and use gems specified in Gemfile') do |v|
46
52
  bundler = v
47
53
  end
@@ -1,7 +1,9 @@
1
+ require 'benchmark_driver/bulk_output'
1
2
  require 'benchmark_driver/config'
2
3
  require 'benchmark_driver/job_parser'
3
4
  require 'benchmark_driver/output'
4
5
  require 'benchmark_driver/rbenv'
6
+ require 'benchmark_driver/repeater'
5
7
  require 'benchmark_driver/ruby_interface'
6
8
  require 'benchmark_driver/runner'
7
9
  require 'benchmark_driver/version'
@@ -0,0 +1,62 @@
1
+ module BenchmarkDriver
2
+ # This is API for your casual output plugin and NOT internally used by BenchmarkDriver.
3
+ #
4
+ # By fully utilizing with_*/report APIs, you can implement streaming-output plugins.
5
+ # See also: lib/benchmark_driver/output.rb (this class's instance will be `@output`)
6
+ # But using these APIs can be difficult because the API is not stable yet and it's hard
7
+ # to deal with the complex state machine.
8
+ #
9
+ # If you don't need to output results in a streaming manner, you can create an output
10
+ # plugin class that inherits `BenchmarkDriver::BulkOutput`, which requires to override
11
+ # only `#bulk_output` that takes all inputs at once.
12
+ class BulkOutput
13
+ # @param [Array<BenchmarkDriver::Metric>] metrics
14
+ attr_writer :metrics
15
+
16
+ # @param [Array<String>] job_names
17
+ # @param [Array<String>] context_names
18
+ def initialize(job_names:, context_names:)
19
+ # noop
20
+ end
21
+
22
+ # The main API you need to override if you make a class inherit `BenchmarkDriver::BulkOutput`.
23
+ # @param [Hash{ BenchmarkDriver::Job => Hash{ BenchmarkDriver::Context => { BenchmarkDriver::Metric => Float } } }] result
24
+ # @param [Array<BenchmarkDriver::Metric>] metrics
25
+ def bulk_output(result:, metrics:)
26
+ raise NotImplementedError.new("#{self.class} must override #bulk_output")
27
+ end
28
+
29
+ def with_warmup(&block)
30
+ block.call # noop
31
+ end
32
+
33
+ def with_benchmark(&block)
34
+ @result = Hash.new do |h1, job|
35
+ h1[job] = Hash.new do |h2, context|
36
+ h2[context] = {}
37
+ end
38
+ end
39
+ result = block.call
40
+ bulk_output(result: @result, metrics: @metrics)
41
+ result
42
+ end
43
+
44
+ # @param [BenchmarkDriver::Job] job
45
+ def with_job(job, &block)
46
+ @job = job
47
+ block.call
48
+ end
49
+
50
+ # @param [BenchmarkDriver::Context] context
51
+ def with_context(context, &block)
52
+ @context = context
53
+ block.call
54
+ end
55
+
56
+ # @param [Float] value
57
+ # @param [BenchmarkDriver::Metric] metic
58
+ def report(value:, metric:)
59
+ @result[@job][@context][metric] = value
60
+ end
61
+ end
62
+ end
@@ -3,19 +3,21 @@ require 'benchmark_driver/struct'
3
3
  module BenchmarkDriver
4
4
  # All CLI options
5
5
  Config = ::BenchmarkDriver::Struct.new(
6
- :runner_type, # @param [String]
7
- :output_type, # @param [String]
8
- :paths, # @param [Array<String>]
9
- :executables, # @param [Array<BenchmarkDriver::Config::Executable>]
10
- :filters, # @param [Array<Regexp>]
11
- :repeat_count, # @param [Integer]
12
- :run_duration, # @param [Float]
13
- :verbose, # @param [Integer]
6
+ :runner_type, # @param [String]
7
+ :output_type, # @param [String]
8
+ :paths, # @param [Array<String>]
9
+ :executables, # @param [Array<BenchmarkDriver::Config::Executable>]
10
+ :filters, # @param [Array<Regexp>]
11
+ :repeat_count, # @param [Integer]
12
+ :repeat_result, # @param [String]
13
+ :run_duration, # @param [Float]
14
+ :verbose, # @param [Integer]
14
15
  defaults: {
15
16
  runner_type: 'ips',
16
17
  output_type: 'compare',
17
18
  filters: [],
18
19
  repeat_count: 1,
20
+ repeat_result: 'best',
19
21
  run_duration: 3.0,
20
22
  verbose: 0,
21
23
  },
@@ -23,10 +25,11 @@ module BenchmarkDriver
23
25
 
24
26
  # Subset of FullConfig passed to JobRunner
25
27
  Config::RunnerConfig = ::BenchmarkDriver::Struct.new(
26
- :executables, # @param [Array<BenchmarkDriver::Config::Executable>]
27
- :repeat_count, # @param [Integer]
28
- :run_duration, # @param [Float]
29
- :verbose, # @param [Integer]
28
+ :executables, # @param [Array<BenchmarkDriver::Config::Executable>]
29
+ :repeat_count, # @param [Integer]
30
+ :repeat_result, # @param [String]
31
+ :run_duration, # @param [Float]
32
+ :verbose, # @param [Integer]
30
33
  )
31
34
 
32
35
  Config::Executable = ::BenchmarkDriver::Struct.new(
@@ -0,0 +1,52 @@
1
+ module BenchmarkDriver
2
+ # Repeat calling block and return desired result: "best", "worst" or "average".
3
+ module Repeater
4
+ VALID_TYPES = %w[best worst average]
5
+
6
+ class << self
7
+ # `block.call` can return multiple objects, but the first one is used for sort.
8
+ # When `config.repeat_result == 'average'`, how to deal with rest objects is decided
9
+ # by `:rest_on_average` option.
10
+ def with_repeat(config:, larger_better:, rest_on_average: :first, &block)
11
+ values = config.repeat_count.times.map { block.call }
12
+
13
+ case config.repeat_result
14
+ when 'best'
15
+ best_result(values, larger_better)
16
+ when 'worst'
17
+ best_result(values, !larger_better)
18
+ when 'average'
19
+ average_result(values, rest_on_average)
20
+ else
21
+ raise "unexpected repeat_result #{config.repeat_result.inspect}"
22
+ end
23
+ end
24
+
25
+ private
26
+
27
+ def best_result(values, larger_better)
28
+ values.sort_by do |value, *|
29
+ larger_better ? value : -value
30
+ end.last
31
+ end
32
+
33
+ def average_result(values, rest_on_average)
34
+ unless values.first.is_a?(Array)
35
+ return values.inject(&:+) / values.size.to_f
36
+ end
37
+
38
+ case rest_on_average
39
+ when :first
40
+ rest = values.first[1..-1]
41
+ [values.map { |v| v[0] }.inject(&:+) / values.size.to_f, *rest]
42
+ when :average
43
+ values.first.size.times.map do |index|
44
+ values.map { |v| v[index] }.inject(&:+) / values.first.size.to_f
45
+ end
46
+ else
47
+ raise "unexpected rest_on_average #{rest_on_average.inspect}"
48
+ end
49
+ end
50
+ end
51
+ end
52
+ end
@@ -23,6 +23,7 @@ module BenchmarkDriver
23
23
  runner_config = Config::RunnerConfig.new(
24
24
  executables: config.executables,
25
25
  repeat_count: config.repeat_count,
26
+ repeat_result: config.repeat_result,
26
27
  run_duration: config.run_duration,
27
28
  verbose: config.verbose,
28
29
  )
@@ -62,7 +62,7 @@ class BenchmarkDriver::Runner::CommandStdout
62
62
  jobs.each do |job|
63
63
  @output.with_job(name: job.name) do
64
64
  @config.executables.each do |exec|
65
- best_value = with_repeat(metric) do
65
+ value = BenchmarkDriver::Repeater.with_repeat(config: @config, larger_better: metric.larger_better) do
66
66
  stdout = with_chdir(job.working_directory) do
67
67
  with_ruby_prefix(exec) { execute(*exec.command, *job.command) }
68
68
  end
@@ -73,7 +73,7 @@ class BenchmarkDriver::Runner::CommandStdout
73
73
  end
74
74
 
75
75
  @output.with_context(name: exec.name, executable: exec) do
76
- @output.report(value: best_value, metric: metric)
76
+ @output.report(value: value, metric: metric)
77
77
  end
78
78
  end
79
79
  end
@@ -107,20 +107,6 @@ class BenchmarkDriver::Runner::CommandStdout
107
107
  stdout
108
108
  end
109
109
 
110
- # Return multiple times and return the best metrics
111
- def with_repeat(metric, &block)
112
- values = @config.repeat_count.times.map do
113
- block.call
114
- end
115
- values.sort_by do |value|
116
- if metric.larger_better
117
- value
118
- else
119
- -value
120
- end
121
- end.last
122
- end
123
-
124
110
  StdoutToMetrics = ::BenchmarkDriver::Struct.new(:stdout, :stdout_to_metrics) do
125
111
  def metrics_value
126
112
  eval(stdout_to_metrics, binding)
@@ -51,7 +51,8 @@ class BenchmarkDriver::Runner::Ips
51
51
  jobs.each do |job|
52
52
  @output.with_job(name: job.name) do
53
53
  job.runnable_execs(@config.executables).each do |exec|
54
- value, duration = with_repeat(@config.repeat_count) do
54
+ repeat_params = { config: @config, larger_better: true, rest_on_average: :average }
55
+ value, duration = BenchmarkDriver::Repeater.with_repeat(repeat_params) do
55
56
  run_benchmark(job, exec: exec)
56
57
  end
57
58
  @output.with_context(name: exec.name, executable: exec, duration: duration) do
@@ -87,16 +88,6 @@ class BenchmarkDriver::Runner::Ips
87
88
  [duration, loop_count]
88
89
  end
89
90
 
90
- # Return multiple times and return the best metrics
91
- def with_repeat(repeat_times, &block)
92
- value_durations = repeat_times.times.map do
93
- block.call
94
- end
95
- value_durations.sort_by do |value, _|
96
- metric.larger_better ? value : -value
97
- end.last
98
- end
99
-
100
91
  # @param [BenchmarkDriver::Runner::Ips::Job] job - loop_count is not nil
101
92
  # @param [BenchmarkDriver::Config::Executable] exec
102
93
  # @return [BenchmarkDriver::Metrics]
@@ -43,11 +43,11 @@ class BenchmarkDriver::Runner::Memory
43
43
  jobs.each do |job|
44
44
  @output.with_job(name: job.name) do
45
45
  job.runnable_execs(@config.executables).each do |exec|
46
- best_value = with_repeat(@config.repeat_count) do
46
+ value = BenchmarkDriver::Repeater.with_repeat(config: @config, larger_better: false) do
47
47
  run_benchmark(job, exec: exec)
48
48
  end
49
49
  @output.with_context(name: exec.name, executable: exec, loop_count: job.loop_count) do
50
- @output.report(value: best_value, metric: METRIC)
50
+ @output.report(value: value, metric: METRIC)
51
51
  end
52
52
  end
53
53
  end
@@ -57,14 +57,6 @@ class BenchmarkDriver::Runner::Memory
57
57
 
58
58
  private
59
59
 
60
- # Return multiple times and return the best value (smallest usage)
61
- def with_repeat(repeat_times, &block)
62
- values = repeat_times.times.map do
63
- block.call
64
- end
65
- values.sort.first
66
- end
67
-
68
60
  # @param [BenchmarkDriver::Runner::Ips::Job] job - loop_count is not nil
69
61
  # @param [BenchmarkDriver::Config::Executable] exec
70
62
  # @return [BenchmarkDriver::Metrics]
@@ -79,7 +79,8 @@ class BenchmarkDriver::Runner::RubyStdout
79
79
  jobs.each do |job|
80
80
  @output.with_job(name: job.name) do
81
81
  @config.executables.each do |exec|
82
- best_value, environment = with_repeat(metric) do
82
+ repeat_params = { config: @config, larger_better: metric.larger_better }
83
+ value, environment = BenchmarkDriver::Repeater.with_repeat(repeat_params) do
83
84
  stdout = with_chdir(job.working_directory) do
84
85
  with_ruby_prefix(exec) { execute(*exec.command, *job.command) }
85
86
  end
@@ -92,7 +93,7 @@ class BenchmarkDriver::Runner::RubyStdout
92
93
  end
93
94
 
94
95
  @output.with_context(name: exec.name, executable: exec, environment: environment) do
95
- @output.report(value: best_value, metric: metric)
96
+ @output.report(value: value, metric: metric)
96
97
  end
97
98
  end
98
99
  end
@@ -1,3 +1,3 @@
1
1
  module BenchmarkDriver
2
- VERSION = '0.11.0'
2
+ VERSION = '0.11.1'
3
3
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: benchmark_driver
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.11.0
4
+ version: 0.11.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - Takashi Kokubun
@@ -99,6 +99,7 @@ files:
99
99
  - exe/benchmark-driver
100
100
  - images/optcarrot.png
101
101
  - lib/benchmark_driver.rb
102
+ - lib/benchmark_driver/bulk_output.rb
102
103
  - lib/benchmark_driver/config.rb
103
104
  - lib/benchmark_driver/default_job.rb
104
105
  - lib/benchmark_driver/default_job_parser.rb
@@ -110,6 +111,7 @@ files:
110
111
  - lib/benchmark_driver/output/record.rb
111
112
  - lib/benchmark_driver/output/simple.rb
112
113
  - lib/benchmark_driver/rbenv.rb
114
+ - lib/benchmark_driver/repeater.rb
113
115
  - lib/benchmark_driver/ruby_interface.rb
114
116
  - lib/benchmark_driver/runner.rb
115
117
  - lib/benchmark_driver/runner/command_stdout.rb