benchmark 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +9 -0
- data/.travis.yml +6 -0
- data/Gemfile +9 -0
- data/LICENSE.txt +22 -0
- data/README.md +138 -0
- data/Rakefile +8 -0
- data/benchmark.gemspec +27 -0
- data/bin/console +14 -0
- data/bin/setup +8 -0
- data/lib/benchmark.rb +565 -0
- data/lib/benchmark/version.rb +3 -0
- metadata +56 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 3c8c0b0e48cfa346f2e333800e2ae464167e6b84d140c4aacec32b3757f323e6
|
4
|
+
data.tar.gz: cb2477677def26e350a7413871050415fd1b65e6928982953d6ee7e851e1e0a1
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: f55ceb4de6f64ed14fdd4d3a8d5c267883a095d96f40b198a8cc4a8d7873e64d8997447a7a5e6056cbfedb8b253cbd1b38e5216d464307026c759199017e5c51
|
7
|
+
data.tar.gz: 35e6aa851d15e26c3c3f6188902bacf74c50d67991a22a649ea4269d61edb17a92713af082b748a1241e6af2026d404b796d0d678abf6b14cdb08e149baa014c
|
data/.gitignore
ADDED
data/.travis.yml
ADDED
data/Gemfile
ADDED
data/LICENSE.txt
ADDED
@@ -0,0 +1,22 @@
|
|
1
|
+
Copyright (C) 1993-2013 Yukihiro Matsumoto. All rights reserved.
|
2
|
+
|
3
|
+
Redistribution and use in source and binary forms, with or without
|
4
|
+
modification, are permitted provided that the following conditions
|
5
|
+
are met:
|
6
|
+
1. Redistributions of source code must retain the above copyright
|
7
|
+
notice, this list of conditions and the following disclaimer.
|
8
|
+
2. Redistributions in binary form must reproduce the above copyright
|
9
|
+
notice, this list of conditions and the following disclaimer in the
|
10
|
+
documentation and/or other materials provided with the distribution.
|
11
|
+
|
12
|
+
THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
13
|
+
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
14
|
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
15
|
+
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
16
|
+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
17
|
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
18
|
+
OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
19
|
+
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
20
|
+
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
21
|
+
OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
22
|
+
SUCH DAMAGE.
|
data/README.md
ADDED
@@ -0,0 +1,138 @@
|
|
1
|
+
# Benchmark
|
2
|
+
|
3
|
+
The Benchmark module provides methods for benchmarking Ruby code, giving detailed reports on the time taken for each task.
|
4
|
+
|
5
|
+
## Installation
|
6
|
+
|
7
|
+
Add this line to your application's Gemfile:
|
8
|
+
|
9
|
+
```ruby
|
10
|
+
gem 'benchmark'
|
11
|
+
```
|
12
|
+
|
13
|
+
And then execute:
|
14
|
+
|
15
|
+
$ bundle
|
16
|
+
|
17
|
+
Or install it yourself as:
|
18
|
+
|
19
|
+
$ gem install benchmark
|
20
|
+
|
21
|
+
## Usage
|
22
|
+
|
23
|
+
The Benchmark module provides methods to measure and report the time used to execute Ruby code.
|
24
|
+
|
25
|
+
Measure the time to construct the string given by the expression <code>"a"*1_000_000_000</code>:
|
26
|
+
|
27
|
+
```ruby
|
28
|
+
require 'benchmark'
|
29
|
+
puts Benchmark.measure { "a"*1_000_000_000 }
|
30
|
+
```
|
31
|
+
|
32
|
+
On my machine (OSX 10.8.3 on i5 1.7 GHz) this generates:
|
33
|
+
|
34
|
+
```
|
35
|
+
0.350000 0.400000 0.750000 ( 0.835234)
|
36
|
+
```
|
37
|
+
|
38
|
+
This report shows the user CPU time, system CPU time, the sum of the user and system CPU times, and the elapsed real time. The unit of time is seconds.
|
39
|
+
|
40
|
+
Do some experiments sequentially using the #bm method:
|
41
|
+
|
42
|
+
```ruby
|
43
|
+
require 'benchmark'
|
44
|
+
n = 5000000
|
45
|
+
Benchmark.bm do |x|
|
46
|
+
x.report { for i in 1..n; a = "1"; end }
|
47
|
+
x.report { n.times do ; a = "1"; end }
|
48
|
+
x.report { 1.upto(n) do ; a = "1"; end }
|
49
|
+
end
|
50
|
+
```
|
51
|
+
|
52
|
+
The result:
|
53
|
+
|
54
|
+
```
|
55
|
+
user system total real
|
56
|
+
1.010000 0.000000 1.010000 ( 1.014479)
|
57
|
+
1.000000 0.000000 1.000000 ( 0.998261)
|
58
|
+
0.980000 0.000000 0.980000 ( 0.981335)
|
59
|
+
```
|
60
|
+
|
61
|
+
Continuing the previous example, put a label in each report:
|
62
|
+
|
63
|
+
```ruby
|
64
|
+
require 'benchmark'
|
65
|
+
n = 5000000
|
66
|
+
Benchmark.bm(7) do |x|
|
67
|
+
x.report("for:") { for i in 1..n; a = "1"; end }
|
68
|
+
x.report("times:") { n.times do ; a = "1"; end }
|
69
|
+
x.report("upto:") { 1.upto(n) do ; a = "1"; end }
|
70
|
+
end
|
71
|
+
```
|
72
|
+
|
73
|
+
The result:
|
74
|
+
|
75
|
+
```
|
76
|
+
user system total real
|
77
|
+
for: 1.010000 0.000000 1.010000 ( 1.015688)
|
78
|
+
times: 1.000000 0.000000 1.000000 ( 1.003611)
|
79
|
+
upto: 1.030000 0.000000 1.030000 ( 1.028098)
|
80
|
+
```
|
81
|
+
|
82
|
+
The times for some benchmarks depend on the order in which items are run. These differences are due to the cost of memory allocation and garbage collection. To avoid these discrepancies, the #bmbm method is provided. For example, to compare ways to sort an array of floats:
|
83
|
+
|
84
|
+
```ruby
|
85
|
+
require 'benchmark'
|
86
|
+
array = (1..1000000).map { rand }
|
87
|
+
Benchmark.bmbm do |x|
|
88
|
+
x.report("sort!") { array.dup.sort! }
|
89
|
+
x.report("sort") { array.dup.sort }
|
90
|
+
end
|
91
|
+
```
|
92
|
+
|
93
|
+
The result:
|
94
|
+
|
95
|
+
```
|
96
|
+
Rehearsal -----------------------------------------
|
97
|
+
sort! 1.490000 0.010000 1.500000 ( 1.490520)
|
98
|
+
sort 1.460000 0.000000 1.460000 ( 1.463025)
|
99
|
+
-------------------------------- total: 2.960000sec
|
100
|
+
user system total real
|
101
|
+
sort! 1.460000 0.000000 1.460000 ( 1.460465)
|
102
|
+
sort 1.450000 0.010000 1.460000 ( 1.448327)
|
103
|
+
```
|
104
|
+
|
105
|
+
Report statistics of sequential experiments with unique labels, using the #benchmark method:
|
106
|
+
|
107
|
+
```ruby
|
108
|
+
require 'benchmark'
|
109
|
+
include Benchmark # we need the CAPTION and FORMAT constants
|
110
|
+
n = 5000000
|
111
|
+
Benchmark.benchmark(CAPTION, 7, FORMAT, ">total:", ">avg:") do |x|
|
112
|
+
tf = x.report("for:") { for i in 1..n; a = "1"; end }
|
113
|
+
tt = x.report("times:") { n.times do ; a = "1"; end }
|
114
|
+
tu = x.report("upto:") { 1.upto(n) do ; a = "1"; end }
|
115
|
+
[tf+tt+tu, (tf+tt+tu)/3]
|
116
|
+
end
|
117
|
+
```
|
118
|
+
|
119
|
+
The result:
|
120
|
+
|
121
|
+
```
|
122
|
+
user system total real
|
123
|
+
for: 0.950000 0.000000 0.950000 ( 0.952039)
|
124
|
+
times: 0.980000 0.000000 0.980000 ( 0.984938)
|
125
|
+
upto: 0.950000 0.000000 0.950000 ( 0.946787)
|
126
|
+
>total: 2.880000 0.000000 2.880000 ( 2.883764)
|
127
|
+
>avg: 0.960000 0.000000 0.960000 ( 0.961255)
|
128
|
+
```
|
129
|
+
|
130
|
+
## Development
|
131
|
+
|
132
|
+
After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake test` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment.
|
133
|
+
|
134
|
+
To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](https://rubygems.org).
|
135
|
+
|
136
|
+
## Contributing
|
137
|
+
|
138
|
+
Bug reports and pull requests are welcome on GitHub at https://github.com/ruby/benchmark.
|
data/Rakefile
ADDED
data/benchmark.gemspec
ADDED
@@ -0,0 +1,27 @@
|
|
1
|
+
lib = File.expand_path("lib", __dir__)
|
2
|
+
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
|
3
|
+
require "benchmark/version"
|
4
|
+
|
5
|
+
Gem::Specification.new do |spec|
|
6
|
+
spec.name = "benchmark"
|
7
|
+
spec.version = Benchmark::VERSION
|
8
|
+
spec.authors = ["Hiroshi SHIBATA"]
|
9
|
+
spec.email = ["hsbt@ruby-lang.org"]
|
10
|
+
|
11
|
+
spec.summary = %q{a performance benchmarking library}
|
12
|
+
spec.description = spec.summary
|
13
|
+
spec.homepage = "https://github.com/ruby/benchmark"
|
14
|
+
spec.license = "BSD-2-Clause"
|
15
|
+
|
16
|
+
spec.metadata["homepage_uri"] = spec.homepage
|
17
|
+
spec.metadata["source_code_uri"] = spec.homepage
|
18
|
+
|
19
|
+
# Specify which files should be added to the gem when it is released.
|
20
|
+
# The `git ls-files -z` loads the files in the RubyGem that have been added into git.
|
21
|
+
spec.files = Dir.chdir(File.expand_path('..', __FILE__)) do
|
22
|
+
`git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features)/}) }
|
23
|
+
end
|
24
|
+
spec.bindir = "exe"
|
25
|
+
spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
|
26
|
+
spec.require_paths = ["lib"]
|
27
|
+
end
|
data/bin/console
ADDED
@@ -0,0 +1,14 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
require "bundler/setup"
|
4
|
+
require "benchmark"
|
5
|
+
|
6
|
+
# You can add fixtures and/or initialization code here to make experimenting
|
7
|
+
# with your gem easier. You can also use a different console, if you like.
|
8
|
+
|
9
|
+
# (If you use this, don't forget to add pry to your Gemfile!)
|
10
|
+
# require "pry"
|
11
|
+
# Pry.start
|
12
|
+
|
13
|
+
require "irb"
|
14
|
+
IRB.start(__FILE__)
|
data/bin/setup
ADDED
data/lib/benchmark.rb
ADDED
@@ -0,0 +1,565 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
#--
|
3
|
+
# benchmark.rb - a performance benchmarking library
|
4
|
+
#
|
5
|
+
# $Id$
|
6
|
+
#
|
7
|
+
# Created by Gotoken (gotoken@notwork.org).
|
8
|
+
#
|
9
|
+
# Documentation by Gotoken (original RD), Lyle Johnson (RDoc conversion), and
|
10
|
+
# Gavin Sinclair (editing).
|
11
|
+
#++
|
12
|
+
#
|
13
|
+
# == Overview
|
14
|
+
#
|
15
|
+
# The Benchmark module provides methods for benchmarking Ruby code, giving
|
16
|
+
# detailed reports on the time taken for each task.
|
17
|
+
#
|
18
|
+
|
19
|
+
# The Benchmark module provides methods to measure and report the time
|
20
|
+
# used to execute Ruby code.
|
21
|
+
#
|
22
|
+
# * Measure the time to construct the string given by the expression
|
23
|
+
# <code>"a"*1_000_000_000</code>:
|
24
|
+
#
|
25
|
+
# require 'benchmark'
|
26
|
+
#
|
27
|
+
# puts Benchmark.measure { "a"*1_000_000_000 }
|
28
|
+
#
|
29
|
+
# On my machine (OSX 10.8.3 on i5 1.7 GHz) this generates:
|
30
|
+
#
|
31
|
+
# 0.350000 0.400000 0.750000 ( 0.835234)
|
32
|
+
#
|
33
|
+
# This report shows the user CPU time, system CPU time, the sum of
|
34
|
+
# the user and system CPU times, and the elapsed real time. The unit
|
35
|
+
# of time is seconds.
|
36
|
+
#
|
37
|
+
# * Do some experiments sequentially using the #bm method:
|
38
|
+
#
|
39
|
+
# require 'benchmark'
|
40
|
+
#
|
41
|
+
# n = 5000000
|
42
|
+
# Benchmark.bm do |x|
|
43
|
+
# x.report { for i in 1..n; a = "1"; end }
|
44
|
+
# x.report { n.times do ; a = "1"; end }
|
45
|
+
# x.report { 1.upto(n) do ; a = "1"; end }
|
46
|
+
# end
|
47
|
+
#
|
48
|
+
# The result:
|
49
|
+
#
|
50
|
+
# user system total real
|
51
|
+
# 1.010000 0.000000 1.010000 ( 1.014479)
|
52
|
+
# 1.000000 0.000000 1.000000 ( 0.998261)
|
53
|
+
# 0.980000 0.000000 0.980000 ( 0.981335)
|
54
|
+
#
|
55
|
+
# * Continuing the previous example, put a label in each report:
|
56
|
+
#
|
57
|
+
# require 'benchmark'
|
58
|
+
#
|
59
|
+
# n = 5000000
|
60
|
+
# Benchmark.bm(7) do |x|
|
61
|
+
# x.report("for:") { for i in 1..n; a = "1"; end }
|
62
|
+
# x.report("times:") { n.times do ; a = "1"; end }
|
63
|
+
# x.report("upto:") { 1.upto(n) do ; a = "1"; end }
|
64
|
+
# end
|
65
|
+
#
|
66
|
+
# The result:
|
67
|
+
#
|
68
|
+
# user system total real
|
69
|
+
# for: 1.010000 0.000000 1.010000 ( 1.015688)
|
70
|
+
# times: 1.000000 0.000000 1.000000 ( 1.003611)
|
71
|
+
# upto: 1.030000 0.000000 1.030000 ( 1.028098)
|
72
|
+
#
|
73
|
+
# * The times for some benchmarks depend on the order in which items
|
74
|
+
# are run. These differences are due to the cost of memory
|
75
|
+
# allocation and garbage collection. To avoid these discrepancies,
|
76
|
+
# the #bmbm method is provided. For example, to compare ways to
|
77
|
+
# sort an array of floats:
|
78
|
+
#
|
79
|
+
# require 'benchmark'
|
80
|
+
#
|
81
|
+
# array = (1..1000000).map { rand }
|
82
|
+
#
|
83
|
+
# Benchmark.bmbm do |x|
|
84
|
+
# x.report("sort!") { array.dup.sort! }
|
85
|
+
# x.report("sort") { array.dup.sort }
|
86
|
+
# end
|
87
|
+
#
|
88
|
+
# The result:
|
89
|
+
#
|
90
|
+
# Rehearsal -----------------------------------------
|
91
|
+
# sort! 1.490000 0.010000 1.500000 ( 1.490520)
|
92
|
+
# sort 1.460000 0.000000 1.460000 ( 1.463025)
|
93
|
+
# -------------------------------- total: 2.960000sec
|
94
|
+
#
|
95
|
+
# user system total real
|
96
|
+
# sort! 1.460000 0.000000 1.460000 ( 1.460465)
|
97
|
+
# sort 1.450000 0.010000 1.460000 ( 1.448327)
|
98
|
+
#
|
99
|
+
# * Report statistics of sequential experiments with unique labels,
|
100
|
+
# using the #benchmark method:
|
101
|
+
#
|
102
|
+
# require 'benchmark'
|
103
|
+
# include Benchmark # we need the CAPTION and FORMAT constants
|
104
|
+
#
|
105
|
+
# n = 5000000
|
106
|
+
# Benchmark.benchmark(CAPTION, 7, FORMAT, ">total:", ">avg:") do |x|
|
107
|
+
# tf = x.report("for:") { for i in 1..n; a = "1"; end }
|
108
|
+
# tt = x.report("times:") { n.times do ; a = "1"; end }
|
109
|
+
# tu = x.report("upto:") { 1.upto(n) do ; a = "1"; end }
|
110
|
+
# [tf+tt+tu, (tf+tt+tu)/3]
|
111
|
+
# end
|
112
|
+
#
|
113
|
+
# The result:
|
114
|
+
#
|
115
|
+
# user system total real
|
116
|
+
# for: 0.950000 0.000000 0.950000 ( 0.952039)
|
117
|
+
# times: 0.980000 0.000000 0.980000 ( 0.984938)
|
118
|
+
# upto: 0.950000 0.000000 0.950000 ( 0.946787)
|
119
|
+
# >total: 2.880000 0.000000 2.880000 ( 2.883764)
|
120
|
+
# >avg: 0.960000 0.000000 0.960000 ( 0.961255)
|
121
|
+
|
122
|
+
module Benchmark
|
123
|
+
|
124
|
+
BENCHMARK_VERSION = "2002-04-25" # :nodoc:
|
125
|
+
|
126
|
+
# Invokes the block with a Benchmark::Report object, which
|
127
|
+
# may be used to collect and report on the results of individual
|
128
|
+
# benchmark tests. Reserves +label_width+ leading spaces for
|
129
|
+
# labels on each line. Prints +caption+ at the top of the
|
130
|
+
# report, and uses +format+ to format each line.
|
131
|
+
# Returns an array of Benchmark::Tms objects.
|
132
|
+
#
|
133
|
+
# If the block returns an array of
|
134
|
+
# Benchmark::Tms objects, these will be used to format
|
135
|
+
# additional lines of output. If +labels+ parameter are
|
136
|
+
# given, these are used to label these extra lines.
|
137
|
+
#
|
138
|
+
# _Note_: Other methods provide a simpler interface to this one, and are
|
139
|
+
# suitable for nearly all benchmarking requirements. See the examples in
|
140
|
+
# Benchmark, and the #bm and #bmbm methods.
|
141
|
+
#
|
142
|
+
# Example:
|
143
|
+
#
|
144
|
+
# require 'benchmark'
|
145
|
+
# include Benchmark # we need the CAPTION and FORMAT constants
|
146
|
+
#
|
147
|
+
# n = 5000000
|
148
|
+
# Benchmark.benchmark(CAPTION, 7, FORMAT, ">total:", ">avg:") do |x|
|
149
|
+
# tf = x.report("for:") { for i in 1..n; a = "1"; end }
|
150
|
+
# tt = x.report("times:") { n.times do ; a = "1"; end }
|
151
|
+
# tu = x.report("upto:") { 1.upto(n) do ; a = "1"; end }
|
152
|
+
# [tf+tt+tu, (tf+tt+tu)/3]
|
153
|
+
# end
|
154
|
+
#
|
155
|
+
# Generates:
|
156
|
+
#
|
157
|
+
# user system total real
|
158
|
+
# for: 0.970000 0.000000 0.970000 ( 0.970493)
|
159
|
+
# times: 0.990000 0.000000 0.990000 ( 0.989542)
|
160
|
+
# upto: 0.970000 0.000000 0.970000 ( 0.972854)
|
161
|
+
# >total: 2.930000 0.000000 2.930000 ( 2.932889)
|
162
|
+
# >avg: 0.976667 0.000000 0.976667 ( 0.977630)
|
163
|
+
#
|
164
|
+
|
165
|
+
def benchmark(caption = "", label_width = nil, format = nil, *labels) # :yield: report
|
166
|
+
sync = STDOUT.sync
|
167
|
+
STDOUT.sync = true
|
168
|
+
label_width ||= 0
|
169
|
+
label_width += 1
|
170
|
+
format ||= FORMAT
|
171
|
+
print ' '*label_width + caption unless caption.empty?
|
172
|
+
report = Report.new(label_width, format)
|
173
|
+
results = yield(report)
|
174
|
+
Array === results and results.grep(Tms).each {|t|
|
175
|
+
print((labels.shift || t.label || "").ljust(label_width), t.format(format))
|
176
|
+
}
|
177
|
+
report.list
|
178
|
+
ensure
|
179
|
+
STDOUT.sync = sync unless sync.nil?
|
180
|
+
end
|
181
|
+
|
182
|
+
|
183
|
+
# A simple interface to the #benchmark method, #bm generates sequential
|
184
|
+
# reports with labels. +label_width+ and +labels+ parameters have the same
|
185
|
+
# meaning as for #benchmark.
|
186
|
+
#
|
187
|
+
# require 'benchmark'
|
188
|
+
#
|
189
|
+
# n = 5000000
|
190
|
+
# Benchmark.bm(7) do |x|
|
191
|
+
# x.report("for:") { for i in 1..n; a = "1"; end }
|
192
|
+
# x.report("times:") { n.times do ; a = "1"; end }
|
193
|
+
# x.report("upto:") { 1.upto(n) do ; a = "1"; end }
|
194
|
+
# end
|
195
|
+
#
|
196
|
+
# Generates:
|
197
|
+
#
|
198
|
+
# user system total real
|
199
|
+
# for: 0.960000 0.000000 0.960000 ( 0.957966)
|
200
|
+
# times: 0.960000 0.000000 0.960000 ( 0.960423)
|
201
|
+
# upto: 0.950000 0.000000 0.950000 ( 0.954864)
|
202
|
+
#
|
203
|
+
|
204
|
+
def bm(label_width = 0, *labels, &blk) # :yield: report
|
205
|
+
benchmark(CAPTION, label_width, FORMAT, *labels, &blk)
|
206
|
+
end
|
207
|
+
|
208
|
+
|
209
|
+
# Sometimes benchmark results are skewed because code executed
|
210
|
+
# earlier encounters different garbage collection overheads than
|
211
|
+
# that run later. #bmbm attempts to minimize this effect by running
|
212
|
+
# the tests twice, the first time as a rehearsal in order to get the
|
213
|
+
# runtime environment stable, the second time for
|
214
|
+
# real. GC.start is executed before the start of each of
|
215
|
+
# the real timings; the cost of this is not included in the
|
216
|
+
# timings. In reality, though, there's only so much that #bmbm can
|
217
|
+
# do, and the results are not guaranteed to be isolated from garbage
|
218
|
+
# collection and other effects.
|
219
|
+
#
|
220
|
+
# Because #bmbm takes two passes through the tests, it can
|
221
|
+
# calculate the required label width.
|
222
|
+
#
|
223
|
+
# require 'benchmark'
|
224
|
+
#
|
225
|
+
# array = (1..1000000).map { rand }
|
226
|
+
#
|
227
|
+
# Benchmark.bmbm do |x|
|
228
|
+
# x.report("sort!") { array.dup.sort! }
|
229
|
+
# x.report("sort") { array.dup.sort }
|
230
|
+
# end
|
231
|
+
#
|
232
|
+
# Generates:
|
233
|
+
#
|
234
|
+
# Rehearsal -----------------------------------------
|
235
|
+
# sort! 1.440000 0.010000 1.450000 ( 1.446833)
|
236
|
+
# sort 1.440000 0.000000 1.440000 ( 1.448257)
|
237
|
+
# -------------------------------- total: 2.890000sec
|
238
|
+
#
|
239
|
+
# user system total real
|
240
|
+
# sort! 1.460000 0.000000 1.460000 ( 1.458065)
|
241
|
+
# sort 1.450000 0.000000 1.450000 ( 1.455963)
|
242
|
+
#
|
243
|
+
# #bmbm yields a Benchmark::Job object and returns an array of
|
244
|
+
# Benchmark::Tms objects.
|
245
|
+
#
|
246
|
+
def bmbm(width = 0) # :yield: job
|
247
|
+
job = Job.new(width)
|
248
|
+
yield(job)
|
249
|
+
width = job.width + 1
|
250
|
+
sync = STDOUT.sync
|
251
|
+
STDOUT.sync = true
|
252
|
+
|
253
|
+
# rehearsal
|
254
|
+
puts 'Rehearsal '.ljust(width+CAPTION.length,'-')
|
255
|
+
ets = job.list.inject(Tms.new) { |sum,(label,item)|
|
256
|
+
print label.ljust(width)
|
257
|
+
res = Benchmark.measure(&item)
|
258
|
+
print res.format
|
259
|
+
sum + res
|
260
|
+
}.format("total: %tsec")
|
261
|
+
print " #{ets}\n\n".rjust(width+CAPTION.length+2,'-')
|
262
|
+
|
263
|
+
# take
|
264
|
+
print ' '*width + CAPTION
|
265
|
+
job.list.map { |label,item|
|
266
|
+
GC.start
|
267
|
+
print label.ljust(width)
|
268
|
+
Benchmark.measure(label, &item).tap { |res| print res }
|
269
|
+
}
|
270
|
+
ensure
|
271
|
+
STDOUT.sync = sync unless sync.nil?
|
272
|
+
end
|
273
|
+
|
274
|
+
#
|
275
|
+
# Returns the time used to execute the given block as a
|
276
|
+
# Benchmark::Tms object. Takes +label+ option.
|
277
|
+
#
|
278
|
+
# require 'benchmark'
|
279
|
+
#
|
280
|
+
# n = 1000000
|
281
|
+
#
|
282
|
+
# time = Benchmark.measure do
|
283
|
+
# n.times { a = "1" }
|
284
|
+
# end
|
285
|
+
# puts time
|
286
|
+
#
|
287
|
+
# Generates:
|
288
|
+
#
|
289
|
+
# 0.220000 0.000000 0.220000 ( 0.227313)
|
290
|
+
#
|
291
|
+
def measure(label = "") # :yield:
|
292
|
+
t0, r0 = Process.times, Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
293
|
+
yield
|
294
|
+
t1, r1 = Process.times, Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
295
|
+
Benchmark::Tms.new(t1.utime - t0.utime,
|
296
|
+
t1.stime - t0.stime,
|
297
|
+
t1.cutime - t0.cutime,
|
298
|
+
t1.cstime - t0.cstime,
|
299
|
+
r1 - r0,
|
300
|
+
label)
|
301
|
+
end
|
302
|
+
|
303
|
+
#
|
304
|
+
# Returns the elapsed real time used to execute the given block.
|
305
|
+
#
|
306
|
+
def realtime # :yield:
|
307
|
+
r0 = Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
308
|
+
yield
|
309
|
+
Process.clock_gettime(Process::CLOCK_MONOTONIC) - r0
|
310
|
+
end
|
311
|
+
|
312
|
+
module_function :benchmark, :measure, :realtime, :bm, :bmbm
|
313
|
+
|
314
|
+
#
|
315
|
+
# A Job is a sequence of labelled blocks to be processed by the
|
316
|
+
# Benchmark.bmbm method. It is of little direct interest to the user.
|
317
|
+
#
|
318
|
+
class Job # :nodoc:
|
319
|
+
#
|
320
|
+
# Returns an initialized Job instance.
|
321
|
+
# Usually, one doesn't call this method directly, as new
|
322
|
+
# Job objects are created by the #bmbm method.
|
323
|
+
# +width+ is a initial value for the label offset used in formatting;
|
324
|
+
# the #bmbm method passes its +width+ argument to this constructor.
|
325
|
+
#
|
326
|
+
def initialize(width)
|
327
|
+
@width = width
|
328
|
+
@list = []
|
329
|
+
end
|
330
|
+
|
331
|
+
#
|
332
|
+
# Registers the given label and block pair in the job list.
|
333
|
+
#
|
334
|
+
def item(label = "", &blk) # :yield:
|
335
|
+
raise ArgumentError, "no block" unless block_given?
|
336
|
+
label = label.to_s
|
337
|
+
w = label.length
|
338
|
+
@width = w if @width < w
|
339
|
+
@list << [label, blk]
|
340
|
+
self
|
341
|
+
end
|
342
|
+
|
343
|
+
alias report item
|
344
|
+
|
345
|
+
# An array of 2-element arrays, consisting of label and block pairs.
|
346
|
+
attr_reader :list
|
347
|
+
|
348
|
+
# Length of the widest label in the #list.
|
349
|
+
attr_reader :width
|
350
|
+
end
|
351
|
+
|
352
|
+
#
|
353
|
+
# This class is used by the Benchmark.benchmark and Benchmark.bm methods.
|
354
|
+
# It is of little direct interest to the user.
|
355
|
+
#
|
356
|
+
class Report # :nodoc:
|
357
|
+
#
|
358
|
+
# Returns an initialized Report instance.
|
359
|
+
# Usually, one doesn't call this method directly, as new
|
360
|
+
# Report objects are created by the #benchmark and #bm methods.
|
361
|
+
# +width+ and +format+ are the label offset and
|
362
|
+
# format string used by Tms#format.
|
363
|
+
#
|
364
|
+
def initialize(width = 0, format = nil)
|
365
|
+
@width, @format, @list = width, format, []
|
366
|
+
end
|
367
|
+
|
368
|
+
#
|
369
|
+
# Prints the +label+ and measured time for the block,
|
370
|
+
# formatted by +format+. See Tms#format for the
|
371
|
+
# formatting rules.
|
372
|
+
#
|
373
|
+
def item(label = "", *format, &blk) # :yield:
|
374
|
+
print label.to_s.ljust(@width)
|
375
|
+
@list << res = Benchmark.measure(label, &blk)
|
376
|
+
print res.format(@format, *format)
|
377
|
+
res
|
378
|
+
end
|
379
|
+
|
380
|
+
alias report item
|
381
|
+
|
382
|
+
# An array of Benchmark::Tms objects representing each item.
|
383
|
+
attr_reader :list
|
384
|
+
end
|
385
|
+
|
386
|
+
|
387
|
+
|
388
|
+
#
|
389
|
+
# A data object, representing the times associated with a benchmark
|
390
|
+
# measurement.
|
391
|
+
#
|
392
|
+
class Tms
|
393
|
+
|
394
|
+
# Default caption, see also Benchmark::CAPTION
|
395
|
+
CAPTION = " user system total real\n"
|
396
|
+
|
397
|
+
# Default format string, see also Benchmark::FORMAT
|
398
|
+
FORMAT = "%10.6u %10.6y %10.6t %10.6r\n"
|
399
|
+
|
400
|
+
# User CPU time
|
401
|
+
attr_reader :utime
|
402
|
+
|
403
|
+
# System CPU time
|
404
|
+
attr_reader :stime
|
405
|
+
|
406
|
+
# User CPU time of children
|
407
|
+
attr_reader :cutime
|
408
|
+
|
409
|
+
# System CPU time of children
|
410
|
+
attr_reader :cstime
|
411
|
+
|
412
|
+
# Elapsed real time
|
413
|
+
attr_reader :real
|
414
|
+
|
415
|
+
# Total time, that is +utime+ + +stime+ + +cutime+ + +cstime+
|
416
|
+
attr_reader :total
|
417
|
+
|
418
|
+
# Label
|
419
|
+
attr_reader :label
|
420
|
+
|
421
|
+
#
|
422
|
+
# Returns an initialized Tms object which has
|
423
|
+
# +utime+ as the user CPU time, +stime+ as the system CPU time,
|
424
|
+
# +cutime+ as the children's user CPU time, +cstime+ as the children's
|
425
|
+
# system CPU time, +real+ as the elapsed real time and +label+ as the label.
|
426
|
+
#
|
427
|
+
def initialize(utime = 0.0, stime = 0.0, cutime = 0.0, cstime = 0.0, real = 0.0, label = nil)
|
428
|
+
@utime, @stime, @cutime, @cstime, @real, @label = utime, stime, cutime, cstime, real, label.to_s
|
429
|
+
@total = @utime + @stime + @cutime + @cstime
|
430
|
+
end
|
431
|
+
|
432
|
+
#
|
433
|
+
# Returns a new Tms object whose times are the sum of the times for this
|
434
|
+
# Tms object, plus the time required to execute the code block (+blk+).
|
435
|
+
#
|
436
|
+
def add(&blk) # :yield:
|
437
|
+
self + Benchmark.measure(&blk)
|
438
|
+
end
|
439
|
+
|
440
|
+
#
|
441
|
+
# An in-place version of #add.
|
442
|
+
# Changes the times of this Tms object by making it the sum of the times
|
443
|
+
# for this Tms object, plus the time required to execute
|
444
|
+
# the code block (+blk+).
|
445
|
+
#
|
446
|
+
def add!(&blk)
|
447
|
+
t = Benchmark.measure(&blk)
|
448
|
+
@utime = utime + t.utime
|
449
|
+
@stime = stime + t.stime
|
450
|
+
@cutime = cutime + t.cutime
|
451
|
+
@cstime = cstime + t.cstime
|
452
|
+
@real = real + t.real
|
453
|
+
self
|
454
|
+
end
|
455
|
+
|
456
|
+
#
|
457
|
+
# Returns a new Tms object obtained by memberwise summation
|
458
|
+
# of the individual times for this Tms object with those of the +other+
|
459
|
+
# Tms object.
|
460
|
+
# This method and #/() are useful for taking statistics.
|
461
|
+
#
|
462
|
+
def +(other); memberwise(:+, other) end
|
463
|
+
|
464
|
+
#
|
465
|
+
# Returns a new Tms object obtained by memberwise subtraction
|
466
|
+
# of the individual times for the +other+ Tms object from those of this
|
467
|
+
# Tms object.
|
468
|
+
#
|
469
|
+
def -(other); memberwise(:-, other) end
|
470
|
+
|
471
|
+
#
|
472
|
+
# Returns a new Tms object obtained by memberwise multiplication
|
473
|
+
# of the individual times for this Tms object by +x+.
|
474
|
+
#
|
475
|
+
def *(x); memberwise(:*, x) end
|
476
|
+
|
477
|
+
#
|
478
|
+
# Returns a new Tms object obtained by memberwise division
|
479
|
+
# of the individual times for this Tms object by +x+.
|
480
|
+
# This method and #+() are useful for taking statistics.
|
481
|
+
#
|
482
|
+
def /(x); memberwise(:/, x) end
|
483
|
+
|
484
|
+
#
|
485
|
+
# Returns the contents of this Tms object as
|
486
|
+
# a formatted string, according to a +format+ string
|
487
|
+
# like that passed to Kernel.format. In addition, #format
|
488
|
+
# accepts the following extensions:
|
489
|
+
#
|
490
|
+
# <tt>%u</tt>:: Replaced by the user CPU time, as reported by Tms#utime.
|
491
|
+
# <tt>%y</tt>:: Replaced by the system CPU time, as reported by #stime (Mnemonic: y of "s*y*stem")
|
492
|
+
# <tt>%U</tt>:: Replaced by the children's user CPU time, as reported by Tms#cutime
|
493
|
+
# <tt>%Y</tt>:: Replaced by the children's system CPU time, as reported by Tms#cstime
|
494
|
+
# <tt>%t</tt>:: Replaced by the total CPU time, as reported by Tms#total
|
495
|
+
# <tt>%r</tt>:: Replaced by the elapsed real time, as reported by Tms#real
|
496
|
+
# <tt>%n</tt>:: Replaced by the label string, as reported by Tms#label (Mnemonic: n of "*n*ame")
|
497
|
+
#
|
498
|
+
# If +format+ is not given, FORMAT is used as default value, detailing the
|
499
|
+
# user, system and real elapsed time.
|
500
|
+
#
|
501
|
+
def format(format = nil, *args)
|
502
|
+
str = (format || FORMAT).dup
|
503
|
+
str.gsub!(/(%[-+.\d]*)n/) { "#{$1}s" % label }
|
504
|
+
str.gsub!(/(%[-+.\d]*)u/) { "#{$1}f" % utime }
|
505
|
+
str.gsub!(/(%[-+.\d]*)y/) { "#{$1}f" % stime }
|
506
|
+
str.gsub!(/(%[-+.\d]*)U/) { "#{$1}f" % cutime }
|
507
|
+
str.gsub!(/(%[-+.\d]*)Y/) { "#{$1}f" % cstime }
|
508
|
+
str.gsub!(/(%[-+.\d]*)t/) { "#{$1}f" % total }
|
509
|
+
str.gsub!(/(%[-+.\d]*)r/) { "(#{$1}f)" % real }
|
510
|
+
format ? str % args : str
|
511
|
+
end
|
512
|
+
|
513
|
+
#
|
514
|
+
# Same as #format.
|
515
|
+
#
|
516
|
+
def to_s
|
517
|
+
format
|
518
|
+
end
|
519
|
+
|
520
|
+
#
|
521
|
+
# Returns a new 6-element array, consisting of the
|
522
|
+
# label, user CPU time, system CPU time, children's
|
523
|
+
# user CPU time, children's system CPU time and elapsed
|
524
|
+
# real time.
|
525
|
+
#
|
526
|
+
def to_a
|
527
|
+
[@label, @utime, @stime, @cutime, @cstime, @real]
|
528
|
+
end
|
529
|
+
|
530
|
+
protected
|
531
|
+
|
532
|
+
#
|
533
|
+
# Returns a new Tms object obtained by memberwise operation +op+
|
534
|
+
# of the individual times for this Tms object with those of the other
|
535
|
+
# Tms object (+x+).
|
536
|
+
#
|
537
|
+
# +op+ can be a mathematical operation such as <tt>+</tt>, <tt>-</tt>,
|
538
|
+
# <tt>*</tt>, <tt>/</tt>
|
539
|
+
#
|
540
|
+
def memberwise(op, x)
|
541
|
+
case x
|
542
|
+
when Benchmark::Tms
|
543
|
+
Benchmark::Tms.new(utime.__send__(op, x.utime),
|
544
|
+
stime.__send__(op, x.stime),
|
545
|
+
cutime.__send__(op, x.cutime),
|
546
|
+
cstime.__send__(op, x.cstime),
|
547
|
+
real.__send__(op, x.real)
|
548
|
+
)
|
549
|
+
else
|
550
|
+
Benchmark::Tms.new(utime.__send__(op, x),
|
551
|
+
stime.__send__(op, x),
|
552
|
+
cutime.__send__(op, x),
|
553
|
+
cstime.__send__(op, x),
|
554
|
+
real.__send__(op, x)
|
555
|
+
)
|
556
|
+
end
|
557
|
+
end
|
558
|
+
end
|
559
|
+
|
560
|
+
# The default caption string (heading above the output times).
|
561
|
+
CAPTION = Benchmark::Tms::CAPTION
|
562
|
+
|
563
|
+
# The default format string used to display times. See also Benchmark::Tms#format.
|
564
|
+
FORMAT = Benchmark::Tms::FORMAT
|
565
|
+
end
|
metadata
ADDED
@@ -0,0 +1,56 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: benchmark
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.1.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Hiroshi SHIBATA
|
8
|
+
autorequire:
|
9
|
+
bindir: exe
|
10
|
+
cert_chain: []
|
11
|
+
date: 2019-11-06 00:00:00.000000000 Z
|
12
|
+
dependencies: []
|
13
|
+
description: a performance benchmarking library
|
14
|
+
email:
|
15
|
+
- hsbt@ruby-lang.org
|
16
|
+
executables: []
|
17
|
+
extensions: []
|
18
|
+
extra_rdoc_files: []
|
19
|
+
files:
|
20
|
+
- ".gitignore"
|
21
|
+
- ".travis.yml"
|
22
|
+
- Gemfile
|
23
|
+
- LICENSE.txt
|
24
|
+
- README.md
|
25
|
+
- Rakefile
|
26
|
+
- benchmark.gemspec
|
27
|
+
- bin/console
|
28
|
+
- bin/setup
|
29
|
+
- lib/benchmark.rb
|
30
|
+
- lib/benchmark/version.rb
|
31
|
+
homepage: https://github.com/ruby/benchmark
|
32
|
+
licenses:
|
33
|
+
- BSD-2-Clause
|
34
|
+
metadata:
|
35
|
+
homepage_uri: https://github.com/ruby/benchmark
|
36
|
+
source_code_uri: https://github.com/ruby/benchmark
|
37
|
+
post_install_message:
|
38
|
+
rdoc_options: []
|
39
|
+
require_paths:
|
40
|
+
- lib
|
41
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
42
|
+
requirements:
|
43
|
+
- - ">="
|
44
|
+
- !ruby/object:Gem::Version
|
45
|
+
version: '0'
|
46
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
47
|
+
requirements:
|
48
|
+
- - ">="
|
49
|
+
- !ruby/object:Gem::Version
|
50
|
+
version: '0'
|
51
|
+
requirements: []
|
52
|
+
rubygems_version: 3.0.3
|
53
|
+
signing_key:
|
54
|
+
specification_version: 4
|
55
|
+
summary: a performance benchmarking library
|
56
|
+
test_files: []
|