benchmark_driver 0.13.3 → 0.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 7a40d650e58bfb47dae94c1dfe4383dd0682a6749ae3c463cf10663ef8a69035
4
- data.tar.gz: 57cfa42e50210ea8384b2ba2831553f6aaf3cdfe3b87d1466d8497344343bf6c
3
+ metadata.gz: 7493fee045d80ab0918a271649807b7a984e30a6c2e90aa39d4a35d8c19e4198
4
+ data.tar.gz: 04d607c363d64d0f2c2c3e2fd6c6f107d1ad924eb757e048f112131249660a6b
5
5
  SHA512:
6
- metadata.gz: 4e4c9dcd507974ee703252c8049b7e538846ebf2ae6523303a20ed5e42d644a990de6890a6acb50dc9257e0292e95912ee895337697e28b1ba8cc72eff38f875
7
- data.tar.gz: bd12fb18af42c59187ba1b8e4b0568f2ece0b46ec8857f1ca003907007756e4817effc55a7104b217bd3b8492db1a4ee97972e11fddb6472ed4d9d6ddb8c1756
6
+ metadata.gz: 496a23c97b27783b2fef8f1da1a31a10c4e1efa4b2c487acd400ee2687f2c58e5d458fa800355a6121cc4728a0af6077c87a4067b524f9e71506ef21cde74ebd
7
+ data.tar.gz: c65d611995c53f273a803234e9bd9c9d31cd6edbfc1a79a92066a5f6a438583cf601eb54319d3e81b18ab4c9f06a2e13b3e642155307e8555a1b2fd9f465ffca
@@ -1,3 +1,15 @@
1
+ # v0.14.0
2
+
3
+ - `benchmark-driver` command also takes `*.rb` file to run single-execution benchmark
4
+ - Non-`*.rb` filename is considered as YAML file as before
5
+ - [breaking change] `--verbose` no longer takes LEVEL argument
6
+ - Added `-v` for `-v`/`-vv` instead of `--verbose 1` and `--verbose 2`.
7
+ - `--verbose --verbose` can also be used instead of `--verbose 2` if you want.
8
+ - [breaking change] Use ` `(space) to split Ruby arguments in `-e`/`--executables`/`--rbenv`
9
+ - ex) `-e "/path/to/ruby --jit"` should be used instead of `-e "/path/to/ruby,--jit"`
10
+ - [experimental] Return 0.0 as value when benchmark script fails on ips/time/memory/once runners
11
+ - Exit status is 0 for that case. `benchmark-driver` exits abnormaly only on its internal bug.
12
+
1
13
  # v0.13.3
2
14
 
3
15
  - Support `require: false` in contexts to skip automatic requirement with a gem name
data/README.md CHANGED
@@ -70,16 +70,17 @@ With `benchmark-driver` command, you can describe benchmark with YAML input.
70
70
 
71
71
  ```
72
72
  $ benchmark-driver -h
73
- Usage: benchmark-driver [options] [YAML]
73
+ Usage: benchmark-driver [options] [YAML|RUBY]
74
74
  -r, --runner [TYPE] Specify runner type: ips, time, memory, once (default: ips)
75
75
  -o, --output [TYPE] Specify output type: compare, simple, markdown, record (default: compare)
76
76
  -e, --executables [EXECS] Ruby executables (e1::path1,arg1,...; e2::path2,arg2;...)
77
77
  --rbenv [VERSIONS] Ruby executables in rbenv (x.x.x,arg1,...;y.y.y,arg2,...;...)
78
- --repeat-count [NUM] Try benchmark NUM times and use the fastest result (TODO)
78
+ --repeat-count [NUM] Try benchmark NUM times and use the fastest result or the worst memory usage
79
+ --repeat-result [TYPE] Yield "best", "average" or "worst" result with --repeat-count (default: best)
79
80
  --bundler Install and use gems specified in Gemfile
80
81
  --filter [REGEXP] Filter out benchmarks with given regexp
81
- --verbose [LEVEL] Show some verbose outputs: 0, 1, 2 (default: 0)
82
82
  --run-duration [SECONDS] Warmup estimates loop_count to run for this duration (default: 3)
83
+ -v, --verbose Verbose mode. Multiple -v options increase visilibity (max: 2)
83
84
  ```
84
85
 
85
86
  #### Running single script
@@ -363,16 +364,16 @@ If you benchmark can run with `ruby foo bar`, specify `foo bar` to `command:`.
363
364
  Then write `stdout_to_metrics:` to convert stdout to metrics. This runner can be used only with YAML interface for now.
364
365
 
365
366
  ```
366
- $ benchmark-driver benchmark.yml --verbose 1 --rbenv '2.6.0-dev;2.6.0-dev,--jit'
367
+ $ benchmark-driver benchmark.yml --verbose 1 --rbenv '2.6.0-dev;2.6.0-dev --jit'
367
368
  2.6.0-dev: ruby 2.6.0dev (2018-03-21 trunk 62870) [x86_64-linux]
368
- 2.6.0-dev,--jit: ruby 2.6.0dev (2018-03-21 trunk 62870) +JIT [x86_64-linux]
369
+ 2.6.0-dev --jit: ruby 2.6.0dev (2018-03-21 trunk 62870) +JIT [x86_64-linux]
369
370
  Calculating -------------------------------------
370
- 2.6.0-dev 2.6.0-dev,--jit
371
+ 2.6.0-dev 2.6.0-dev --jit
371
372
  optcarrot 51.866 67.445 fps
372
373
 
373
374
  Comparison:
374
375
  optcarrot
375
- 2.6.0-dev,--jit: 67.4 fps
376
+ 2.6.0-dev --jit: 67.4 fps
376
377
  2.6.0-dev: 51.9 fps - 1.30x slower
377
378
  ```
378
379
 
@@ -3,6 +3,7 @@ $:.unshift File.expand_path('../lib', __dir__)
3
3
 
4
4
  require 'benchmark_driver'
5
5
  require 'optparse'
6
+ require 'shellwords'
6
7
  require 'yaml'
7
8
 
8
9
  # Parse command line options
@@ -10,7 +11,7 @@ config = BenchmarkDriver::Config.new.tap do |c|
10
11
  executables = []
11
12
  bundler = false
12
13
  parser = OptionParser.new do |o|
13
- o.banner = "Usage: #{File.basename($0, '.*')} [options] [YAML]"
14
+ o.banner = "Usage: #{File.basename($0, '.*')} [options] [YAML|RUBY]"
14
15
  o.on('-r', '--runner [TYPE]', 'Specify runner type: ips, time, memory, once (default: ips)') do |d|
15
16
  abort '-r, --runner must take argument but not given' if d.nil?
16
17
  c.runner_type = d
@@ -24,7 +25,7 @@ config = BenchmarkDriver::Config.new.tap do |c|
24
25
  e.split(';').each do |name_path|
25
26
  name, path = name_path.split('::', 2)
26
27
  path ||= name # if `::` is not given, regard whole string as path
27
- command = path.split(',')
28
+ command = path.shellsplit
28
29
  command[0] = File.expand_path(command[0])
29
30
  executables << BenchmarkDriver::Config::Executable.new(name: name, command: command)
30
31
  end
@@ -54,13 +55,6 @@ config = BenchmarkDriver::Config.new.tap do |c|
54
55
  o.on('--filter [REGEXP]', 'Filter out benchmarks with given regexp') do |v|
55
56
  c.filters << Regexp.compile(v)
56
57
  end
57
- o.on('--verbose [LEVEL]', 'Show some verbose outputs: 0, 1, 2 (default: 0)') do |v|
58
- begin
59
- c.verbose = Integer(v)
60
- rescue ArgumentError
61
- abort "--verbose must take Integer, but got #{v.inspect}"
62
- end
63
- end
64
58
  o.on('--run-duration [SECONDS]', 'Warmup estimates loop_count to run for this duration (default: 3)') do |v|
65
59
  begin
66
60
  c.run_duration = Float(v)
@@ -68,6 +62,9 @@ config = BenchmarkDriver::Config.new.tap do |c|
68
62
  abort "--run-duration must take Float, but got #{v.inspect}"
69
63
  end
70
64
  end
65
+ o.on('-v', '--verbose', 'Verbose mode. Multiple -v options increase visilibity (max: 2)') do |v|
66
+ c.verbose += 1
67
+ end
71
68
  end
72
69
  c.paths = parser.parse!(ARGV)
73
70
  if c.paths.empty?
@@ -89,8 +86,17 @@ end
89
86
 
90
87
  # Parse benchmark job definitions
91
88
  jobs = config.paths.flat_map do |path|
92
- job = YAML.load(File.read(path))
93
- job = { 'type' => config.runner_type }.merge!(job)
89
+ job = { 'type' => config.runner_type }
90
+
91
+ # Treat *.rb as a single-execution benchmark, others are considered as YAML definition
92
+ if path.end_with?('.rb')
93
+ name = File.basename(path).sub(/\.rb\z/, '')
94
+ script = File.read(path)
95
+ prelude = script.slice!(/\A(^#[^\n]+\n)+/m) || '' # preserve magic comment
96
+ job.merge!('prelude' => prelude, 'benchmark' => { name => script }, 'loop_count' => 1)
97
+ else
98
+ job.merge!(YAML.load_file(path))
99
+ end
94
100
 
95
101
  begin
96
102
  # `working_directory` is YAML-specific special parameter, mainly for "command_stdout"
@@ -35,6 +35,7 @@ module BenchmarkDriver
35
35
  :environment, # @param [Hash] - Any other key -> value pairs to express the benchmark context
36
36
  defaults: { environment: {} },
37
37
  )
38
+ Result::ERROR = 0.0
38
39
 
39
40
  # A kind of thing to be measured
40
41
  Metric = ::BenchmarkDriver::Struct.new(
@@ -116,7 +116,9 @@ class BenchmarkDriver::Output::Compare
116
116
  end
117
117
 
118
118
  def humanize(value, width = 10)
119
- if value <= 0
119
+ if value == BenchmarkDriver::Result::ERROR
120
+ return " %#{width}s" % 'ERROR'
121
+ elsif value < 0
120
122
  raise ArgumentError.new("Non positive value: #{value.inspect}")
121
123
  end
122
124
 
@@ -77,7 +77,9 @@ class BenchmarkDriver::Output::Simple
77
77
  end
78
78
 
79
79
  def humanize(value)
80
- if value < 0
80
+ if value == BenchmarkDriver::Result::ERROR
81
+ return 'ERROR'
82
+ elsif value < 0
81
83
  raise ArgumentError.new("Negative value: #{value.inspect}")
82
84
  end
83
85
 
@@ -1,3 +1,5 @@
1
+ require 'shellwords'
2
+
1
3
  module BenchmarkDriver
2
4
  module Rbenv
3
5
  # @param [String] version
@@ -13,7 +15,7 @@ module BenchmarkDriver
13
15
  def self.parse_spec(full_spec)
14
16
  name, spec = full_spec.split('::', 2)
15
17
  spec ||= name # if `::` is not given, regard whole string as spec
16
- version, *args = spec.split(',')
18
+ version, *args = spec.shellsplit
17
19
  BenchmarkDriver::Config::Executable.new(
18
20
  name: name,
19
21
  command: [BenchmarkDriver::Rbenv.ruby_path(version), *args],
@@ -101,9 +101,13 @@ class BenchmarkDriver::Runner::Ips
101
101
 
102
102
  duration = Tempfile.open(['benchmark_driver-', '.rb']) do |f|
103
103
  with_script(benchmark.render(result: f.path)) do |path|
104
- execute(*context.executable.command, path)
104
+ IO.popen([*context.executable.command, path], &:read) # TODO: print stdout if verbose=2
105
+ if $?.success?
106
+ Float(f.read)
107
+ else
108
+ BenchmarkDriver::Result::ERROR
109
+ end
105
110
  end
106
- Float(f.read)
107
111
  end
108
112
 
109
113
  value_duration(
@@ -119,7 +123,11 @@ class BenchmarkDriver::Runner::Ips
119
123
 
120
124
  # Overridden by BenchmarkDriver::Runner::Time
121
125
  def value_duration(duration:, loop_count:)
122
- [loop_count.to_f / duration, duration]
126
+ if duration == BenchmarkDriver::Result::ERROR
127
+ [BenchmarkDriver::Result::ERROR, BenchmarkDriver::Result::ERROR]
128
+ else
129
+ [loop_count.to_f / duration, duration]
130
+ end
123
131
  end
124
132
 
125
133
  def with_script(script)
@@ -136,7 +144,7 @@ class BenchmarkDriver::Runner::Ips
136
144
  end
137
145
 
138
146
  def execute(*args)
139
- stdout = IO.popen(args, &:read) # handle stdout?
147
+ stdout = IO.popen(args, &:read) # TODO: print stdout if verbose=2
140
148
  unless $?.success?
141
149
  raise "Failed to execute: #{args.shelljoin} (status: #{$?.exitstatus})"
142
150
  end
@@ -68,14 +68,18 @@ class BenchmarkDriver::Runner::Memory
68
68
  loop_count: job.loop_count,
69
69
  )
70
70
 
71
- output = with_script(benchmark.render) do |path|
72
- execute('/usr/bin/time', *context.executable.command, path)
73
- end
74
-
75
- match_data = /^(?<user>\d+.\d+)user\s+(?<system>\d+.\d+)system\s+(?<elapsed1>\d+):(?<elapsed2>\d+.\d+)elapsed.+\([^\s]+\s+(?<maxresident>\d+)maxresident\)k$/.match(output)
76
- raise "Unexpected format given from /usr/bin/time:\n#{out}" unless match_data[:maxresident]
71
+ with_script(benchmark.render) do |path|
72
+ output = IO.popen(['/usr/bin/time', *context.executable.command, path], err: [:child, :out], &:read)
73
+ if $?.success?
74
+ match_data = /^(?<user>\d+.\d+)user\s+(?<system>\d+.\d+)system\s+(?<elapsed1>\d+):(?<elapsed2>\d+.\d+)elapsed.+\([^\s]+\s+(?<maxresident>\d+)maxresident\)k$/.match(output)
75
+ raise "Unexpected format given from /usr/bin/time:\n#{out}" unless match_data[:maxresident]
77
76
 
78
- Integer(match_data[:maxresident]) * 1000.0 # kilobytes -> bytes
77
+ Integer(match_data[:maxresident]) * 1000.0 # kilobytes -> bytes
78
+ else
79
+ $stdout.print(output)
80
+ BenchmarkDriver::Result::ERROR
81
+ end
82
+ end
79
83
  end
80
84
 
81
85
  def with_script(script)
@@ -91,14 +95,6 @@ class BenchmarkDriver::Runner::Memory
91
95
  end
92
96
  end
93
97
 
94
- def execute(*args)
95
- output = IO.popen(args, err: [:child, :out], &:read) # handle stdout?
96
- unless $?.success?
97
- raise "Failed to execute: #{args.shelljoin} (status: #{$?.exitstatus})"
98
- end
99
- output
100
- end
101
-
102
98
  # @param [String] prelude
103
99
  # @param [String] script
104
100
  # @param [String] teardown
@@ -35,8 +35,14 @@ class BenchmarkDriver::Runner::Once
35
35
  @output.with_job(name: job.name) do
36
36
  job.runnable_contexts(@contexts).each do |context|
37
37
  duration = run_benchmark(job, context: context) # no repeat support
38
+ if duration == BenchmarkDriver::Result::ERROR
39
+ value = BenchmarkDriver::Result::ERROR
40
+ else
41
+ value = 1.0 / duration
42
+ end
43
+
38
44
  @output.with_context(name: context.name, executable: context.executable, gems: context.gems) do
39
- @output.report(values: { METRIC => 1.0 / duration }, duration: duration, loop_count: 1)
45
+ @output.report(values: { METRIC => value }, duration: duration, loop_count: 1)
40
46
  end
41
47
  end
42
48
  end
@@ -59,9 +65,13 @@ class BenchmarkDriver::Runner::Once
59
65
 
60
66
  Tempfile.open(['benchmark_driver-', '.rb']) do |f|
61
67
  with_script(benchmark.render(result: f.path)) do |path|
62
- execute(*context.executable.command, path)
68
+ IO.popen([*context.executable.command, path], &:read) # TODO: print stdout if verbose=2
69
+ if $?.success?
70
+ Float(f.read)
71
+ else
72
+ BenchmarkDriver::Result::ERROR
73
+ end
63
74
  end
64
- Float(f.read)
65
75
  end
66
76
  end
67
77
 
@@ -1,3 +1,3 @@
1
1
  module BenchmarkDriver
2
- VERSION = '0.13.3'
2
+ VERSION = '0.14.0'
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: benchmark_driver
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.13.3
4
+ version: 0.14.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Takashi Kokubun
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2018-07-08 00:00:00.000000000 Z
11
+ date: 2018-07-09 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler