railsbench 0.8.4
Sign up to get free protection for your applications and to get access to all the features.
- data/BUGS +3 -0
- data/CHANGELOG +0 -0
- data/INSTALL +55 -0
- data/LICENSE +222 -0
- data/Manifest.txt +38 -0
- data/README +254 -0
- data/Rakefile +51 -0
- data/bin/railsbench +50 -0
- data/config/benchmarking.rb +8 -0
- data/config/benchmarks.rb +20 -0
- data/config/benchmarks.yml +49 -0
- data/install.rb +60 -0
- data/lib/benchmark.rb +576 -0
- data/lib/railsbench/gc_info.rb +123 -0
- data/lib/railsbench/perf_info.rb +145 -0
- data/lib/railsbench/perf_utils.rb +65 -0
- data/lib/railsbench/railsbenchmark.rb +397 -0
- data/lib/railsbench/version.rb +9 -0
- data/lib/railsbench/write_headers_only.rb +15 -0
- data/ruby184gc.patch +516 -0
- data/ruby185gc.patch +535 -0
- data/script/perf_bench +76 -0
- data/script/perf_comp +155 -0
- data/script/perf_comp_gc +109 -0
- data/script/perf_diff +48 -0
- data/script/perf_diff_gc +89 -0
- data/script/perf_html +82 -0
- data/script/perf_loop +38 -0
- data/script/perf_plot +94 -0
- data/script/perf_plot_gc +111 -0
- data/script/perf_prof +51 -0
- data/script/perf_run +34 -0
- data/script/perf_run_gc +46 -0
- data/script/perf_tex +62 -0
- data/script/perf_times +70 -0
- data/script/perf_times_gc +86 -0
- data/script/run_urls +46 -0
- data/setup.rb +1585 -0
- data/test/railsbench_test.rb +11 -0
- metadata +91 -0
data/Rakefile
ADDED
@@ -0,0 +1,51 @@
|
|
1
|
+
require 'rubygems'
|
2
|
+
require 'rake'
|
3
|
+
require 'rake/clean'
|
4
|
+
require 'rake/testtask'
|
5
|
+
require 'rake/packagetask'
|
6
|
+
require 'rake/gempackagetask'
|
7
|
+
require 'rake/rdoctask'
|
8
|
+
require 'rake/contrib/rubyforgepublisher'
|
9
|
+
require 'fileutils'
|
10
|
+
require 'hoe'
|
11
|
+
include FileUtils
|
12
|
+
require File.join(File.dirname(__FILE__), 'lib', 'railsbench', 'version')
|
13
|
+
|
14
|
+
AUTHOR = "Stefan Kaes" # can also be an array of Authors
|
15
|
+
EMAIL = "skaes@gmx.net"
|
16
|
+
DESCRIPTION = "rails benchmarking tools"
|
17
|
+
GEM_NAME = "railsbench" # what ppl will type to install your gem
|
18
|
+
RUBYFORGE_PROJECT = "railsbench" # The unix name for your project
|
19
|
+
HOMEPATH = "http://#{RUBYFORGE_PROJECT}.rubyforge.org"
|
20
|
+
RELEASE_TYPES = %w(gem tar zip) # can use: gem, tar, zip
|
21
|
+
|
22
|
+
|
23
|
+
NAME = "railsbench"
|
24
|
+
REV = nil # UNCOMMENT IF REQUIRED: File.read(".svn/entries")[/committed-rev="(d+)"/, 1] rescue nil
|
25
|
+
VERS = (ENV['VERSION'] ||= (Railsbench::VERSION::STRING + (REV ? ".#{REV}" : "")))
|
26
|
+
CLEAN.include ['**/.*.sw?', '*.gem', '.config']
|
27
|
+
RDOC_OPTS = ['--quiet', '--title', "railsbench documentation",
|
28
|
+
"--opname", "index.html",
|
29
|
+
"--line-numbers",
|
30
|
+
"--main", "README",
|
31
|
+
"--inline-source"]
|
32
|
+
|
33
|
+
# Generate all the Rake tasks
|
34
|
+
# Run 'rake -T' to see list of generated tasks (from gem root directory)
|
35
|
+
hoe = Hoe.new(GEM_NAME, VERS) do |p|
|
36
|
+
p.author = AUTHOR
|
37
|
+
p.description = DESCRIPTION
|
38
|
+
p.email = EMAIL
|
39
|
+
p.summary = DESCRIPTION
|
40
|
+
p.url = HOMEPATH
|
41
|
+
p.rubyforge_name = RUBYFORGE_PROJECT if RUBYFORGE_PROJECT
|
42
|
+
p.test_globs = ["test/**/*_test.rb"]
|
43
|
+
p.clean_globs = CLEAN #An array of file patterns to delete on clean.
|
44
|
+
p.need_zip = true
|
45
|
+
p.spec_extras = {:has_rdoc => false}
|
46
|
+
|
47
|
+
# == Optional
|
48
|
+
#p.changes - A description of the release's latest changes.
|
49
|
+
#p.extra_deps - An array of rubygem dependencies.
|
50
|
+
#p.spec_extras - A hash of extra values to set in the gemspec.
|
51
|
+
end
|
data/bin/railsbench
ADDED
@@ -0,0 +1,50 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
CMDS = %w(
|
4
|
+
base
|
5
|
+
perf_comp
|
6
|
+
perf_comp_gc
|
7
|
+
perf_diff
|
8
|
+
perf_diff_gc
|
9
|
+
perf_html
|
10
|
+
perf_plot
|
11
|
+
perf_plot_gc
|
12
|
+
perf_prof
|
13
|
+
perf_run
|
14
|
+
perf_run_gc
|
15
|
+
perf_tex
|
16
|
+
perf_times
|
17
|
+
perf_times_gc
|
18
|
+
install
|
19
|
+
run_urls
|
20
|
+
)
|
21
|
+
|
22
|
+
cmd = ARGV.shift
|
23
|
+
unless CMDS.include? cmd
|
24
|
+
$stderr.puts "railsbench: unkown command: #{cmd}"
|
25
|
+
$stderr.puts "use one of: #{CMDS.sort.join(', ')}"
|
26
|
+
exit 1
|
27
|
+
end
|
28
|
+
|
29
|
+
RAILSBENCH_BASE = File.expand_path(File.dirname(__FILE__) + '/..')
|
30
|
+
|
31
|
+
def quoted_args
|
32
|
+
ARGV.map{|s| "'#{s}'" }.join(' ')
|
33
|
+
end
|
34
|
+
|
35
|
+
case cmd
|
36
|
+
when 'base'
|
37
|
+
puts "railsbench is installed in: #{RAILSBENCH_BASE}"
|
38
|
+
puts "load path: #{$:.join("\n")}"
|
39
|
+
exit
|
40
|
+
else
|
41
|
+
if cmd == 'install'
|
42
|
+
script = RAILSBENCH_BASE + '/install.rb'
|
43
|
+
else
|
44
|
+
script = RAILSBENCH_BASE + '/script/' + cmd
|
45
|
+
end
|
46
|
+
command = "sh -c \"#{script} #{quoted_args}\""
|
47
|
+
# puts command
|
48
|
+
exec(command)
|
49
|
+
end
|
50
|
+
|
@@ -0,0 +1,20 @@
|
|
1
|
+
# create benchmarker instance
|
2
|
+
RAILS_BENCHMARKER = RailsBenchmark.new
|
3
|
+
|
4
|
+
# If your session storage is ActiveRecordStore, and if you want
|
5
|
+
# sessions to be automatically deleted after benchmarking, use
|
6
|
+
# RAILS_BENCHMARKER = RailsBenchmarkWithActiveRecordStore.new
|
7
|
+
|
8
|
+
# WARNING: don't use RailsBenchmarkWithActiveRecordStore running on
|
9
|
+
# your production database!
|
10
|
+
|
11
|
+
# If your application runs from a url which is not your servers root,
|
12
|
+
# you should set relative_url_root on the benchmarker instance,
|
13
|
+
# especially if you use page caching.
|
14
|
+
# RAILS_BENCHMARKER.relative_url_root = '/blog'
|
15
|
+
|
16
|
+
# Create session data required to run the benchmark.
|
17
|
+
# Customize the code below if your benchmark needs session data.
|
18
|
+
|
19
|
+
# require 'user'
|
20
|
+
# RAILS_BENCHMARKER.session_data = {'account' => User.find_first("name='stefan'")}
|
@@ -0,0 +1,49 @@
|
|
1
|
+
default:
|
2
|
+
uri: /
|
3
|
+
|
4
|
+
all:
|
5
|
+
new_sessions, same_session
|
6
|
+
|
7
|
+
new_sessions:
|
8
|
+
empty, welcome
|
9
|
+
|
10
|
+
same_session:
|
11
|
+
rezept, myknzlpzl, show, cat, cat5, letter
|
12
|
+
|
13
|
+
empty:
|
14
|
+
uri: /empty/index
|
15
|
+
new_session: true
|
16
|
+
|
17
|
+
welcome:
|
18
|
+
uri: /welcome/index
|
19
|
+
new_session: true
|
20
|
+
|
21
|
+
rezept:
|
22
|
+
uri: /rezept/index
|
23
|
+
|
24
|
+
myknzlpzl:
|
25
|
+
uri: /rezept/myknzlpzl
|
26
|
+
|
27
|
+
show:
|
28
|
+
uri: /rezept/show/713
|
29
|
+
|
30
|
+
cat:
|
31
|
+
uri: /rezept/cat/Hauptspeise
|
32
|
+
|
33
|
+
cat5:
|
34
|
+
uri: /rezept/cat/Hauptspeise?page=5
|
35
|
+
|
36
|
+
letter:
|
37
|
+
uri: /rezept/letter/G
|
38
|
+
|
39
|
+
links:
|
40
|
+
-
|
41
|
+
uri: /test/url_test
|
42
|
+
-
|
43
|
+
uri: /test/link_test
|
44
|
+
|
45
|
+
alpha:
|
46
|
+
uri: /rezept/alphabetic
|
47
|
+
|
48
|
+
test_index:
|
49
|
+
uri: /test/index
|
data/install.rb
ADDED
@@ -0,0 +1,60 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
require 'fileutils'
|
4
|
+
if ARGV.include?('--dry-run')
|
5
|
+
include FileUtils::DryRun
|
6
|
+
else
|
7
|
+
include FileUtils::Verbose
|
8
|
+
end
|
9
|
+
|
10
|
+
require 'yaml'
|
11
|
+
|
12
|
+
unless RAILS_ROOT = ENV['RAILS_ROOT']
|
13
|
+
STDERR.puts "RAILS_ROOT must be fined in your environment"
|
14
|
+
exit 1
|
15
|
+
end
|
16
|
+
|
17
|
+
RAILS_CONFIG = RAILS_ROOT + "/config/"
|
18
|
+
RAILS_ENVS = RAILS_ROOT + "/config/environments/"
|
19
|
+
|
20
|
+
install("config/benchmarks.rb", RAILS_CONFIG, :mode => 0644) unless
|
21
|
+
File.exists?(RAILS_CONFIG + "benchmarks.rb")
|
22
|
+
|
23
|
+
install("config/benchmarks.yml", RAILS_CONFIG, :mode => 0644) unless
|
24
|
+
File.exists?(RAILS_CONFIG + "benchmarks.yml")
|
25
|
+
|
26
|
+
install("config/benchmarking.rb", RAILS_ENVS, :mode => 0644) unless
|
27
|
+
File.exists?(RAILS_ENVS + "benchmarking.rb")
|
28
|
+
|
29
|
+
database = YAML::load(File.open(RAILS_CONFIG + "database.yml"))
|
30
|
+
unless database["benchmarking"]
|
31
|
+
puts "creating database configuration: benchmarking"
|
32
|
+
File.open(RAILS_CONFIG + "database.yml", "ab") do |file|
|
33
|
+
file.puts "\nbenchmarking:\n"
|
34
|
+
%w(adapter database host username password).each do |k|
|
35
|
+
file.puts " #{k}: #{database['development'][k]}"
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
__END__
|
41
|
+
|
42
|
+
### Local Variables: ***
|
43
|
+
### mode:ruby ***
|
44
|
+
### End: ***
|
45
|
+
|
46
|
+
# Copyright (C) 2005, 2006 Stefan Kaes
|
47
|
+
#
|
48
|
+
# This program is free software; you can redistribute it and/or modify
|
49
|
+
# it under the terms of the GNU General Public License as published by
|
50
|
+
# the Free Software Foundation; either version 2 of the License, or
|
51
|
+
# (at your option) any later version.
|
52
|
+
#
|
53
|
+
# This program is distributed in the hope that it will be useful,
|
54
|
+
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
55
|
+
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
56
|
+
# GNU General Public License for more details.
|
57
|
+
#
|
58
|
+
# You should have received a copy of the GNU General Public License
|
59
|
+
# along with this program; if not, write to the Free Software
|
60
|
+
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
data/lib/benchmark.rb
ADDED
@@ -0,0 +1,576 @@
|
|
1
|
+
=begin
|
2
|
+
#
|
3
|
+
# benchmark.rb - a performance benchmarking library
|
4
|
+
#
|
5
|
+
# $Id: benchmark.rb 35 2005-08-25 06:40:42Z stkaes $
|
6
|
+
#
|
7
|
+
# Created by Gotoken (gotoken@notwork.org).
|
8
|
+
#
|
9
|
+
# Documentation by Gotoken (original RD), Lyle Johnson (RDoc conversion), and
|
10
|
+
# Gavin Sinclair (editing).
|
11
|
+
#
|
12
|
+
=end
|
13
|
+
|
14
|
+
# == Overview
|
15
|
+
#
|
16
|
+
# The Benchmark module provides methods for benchmarking Ruby code, giving
|
17
|
+
# detailed reports on the time taken for each task.
|
18
|
+
#
|
19
|
+
|
20
|
+
# The Benchmark module provides methods to measure and report the time
|
21
|
+
# used to execute Ruby code.
|
22
|
+
#
|
23
|
+
# * Measure the time to construct the string given by the expression
|
24
|
+
# <tt>"a"*1_000_000</tt>:
|
25
|
+
#
|
26
|
+
# require 'benchmark'
|
27
|
+
#
|
28
|
+
# puts Benchmark.measure { "a"*1_000_000 }
|
29
|
+
#
|
30
|
+
# On my machine (FreeBSD 3.2 on P5, 100MHz) this generates:
|
31
|
+
#
|
32
|
+
# 1.166667 0.050000 1.216667 ( 0.571355)
|
33
|
+
#
|
34
|
+
# This report shows the user CPU time, system CPU time, the sum of
|
35
|
+
# the user and system CPU times, and the elapsed real time. The unit
|
36
|
+
# of time is seconds.
|
37
|
+
#
|
38
|
+
# * Do some experiments sequentially using the #bm method:
|
39
|
+
#
|
40
|
+
# require 'benchmark'
|
41
|
+
#
|
42
|
+
# n = 50000
|
43
|
+
# Benchmark.bm do |x|
|
44
|
+
# x.report { for i in 1..n; a = "1"; end }
|
45
|
+
# x.report { n.times do ; a = "1"; end }
|
46
|
+
# x.report { 1.upto(n) do ; a = "1"; end }
|
47
|
+
# end
|
48
|
+
#
|
49
|
+
# The result:
|
50
|
+
#
|
51
|
+
# user system total real
|
52
|
+
# 1.033333 0.016667 1.016667 ( 0.492106)
|
53
|
+
# 1.483333 0.000000 1.483333 ( 0.694605)
|
54
|
+
# 1.516667 0.000000 1.516667 ( 0.711077)
|
55
|
+
#
|
56
|
+
# * Continuing the previous example, put a label in each report:
|
57
|
+
#
|
58
|
+
# require 'benchmark'
|
59
|
+
#
|
60
|
+
# n = 50000
|
61
|
+
# Benchmark.bm(7) do |x|
|
62
|
+
# x.report("for:") { for i in 1..n; a = "1"; end }
|
63
|
+
# x.report("times:") { n.times do ; a = "1"; end }
|
64
|
+
# x.report("upto:") { 1.upto(n) do ; a = "1"; end }
|
65
|
+
# end
|
66
|
+
#
|
67
|
+
# The result:
|
68
|
+
#
|
69
|
+
# user system total real
|
70
|
+
# for: 1.050000 0.000000 1.050000 ( 0.503462)
|
71
|
+
# times: 1.533333 0.016667 1.550000 ( 0.735473)
|
72
|
+
# upto: 1.500000 0.016667 1.516667 ( 0.711239)
|
73
|
+
#
|
74
|
+
#
|
75
|
+
# * The times for some benchmarks depend on the order in which items
|
76
|
+
# are run. These differences are due to the cost of memory
|
77
|
+
# allocation and garbage collection. To avoid these discrepancies,
|
78
|
+
# the #bmbm method is provided. For example, to compare ways to
|
79
|
+
# sort an array of floats:
|
80
|
+
#
|
81
|
+
# require 'benchmark'
|
82
|
+
#
|
83
|
+
# array = (1..1000000).map { rand }
|
84
|
+
#
|
85
|
+
# Benchmark.bmbm do |x|
|
86
|
+
# x.report("sort!") { array.dup.sort! }
|
87
|
+
# x.report("sort") { array.dup.sort }
|
88
|
+
# end
|
89
|
+
#
|
90
|
+
# The result:
|
91
|
+
#
|
92
|
+
# Rehearsal -----------------------------------------
|
93
|
+
# sort! 11.928000 0.010000 11.938000 ( 12.756000)
|
94
|
+
# sort 13.048000 0.020000 13.068000 ( 13.857000)
|
95
|
+
# ------------------------------- total: 25.006000sec
|
96
|
+
#
|
97
|
+
# user system total real
|
98
|
+
# sort! 12.959000 0.010000 12.969000 ( 13.793000)
|
99
|
+
# sort 12.007000 0.000000 12.007000 ( 12.791000)
|
100
|
+
#
|
101
|
+
#
|
102
|
+
# * Report statistics of sequential experiments with unique labels,
|
103
|
+
# using the #benchmark method:
|
104
|
+
#
|
105
|
+
# require 'benchmark'
|
106
|
+
#
|
107
|
+
# n = 50000
|
108
|
+
# Benchmark.benchmark(" "*7 + CAPTION, 7, FMTSTR, ">total:", ">avg:") do |x|
|
109
|
+
# tf = x.report("for:") { for i in 1..n; a = "1"; end }
|
110
|
+
# tt = x.report("times:") { n.times do ; a = "1"; end }
|
111
|
+
# tu = x.report("upto:") { 1.upto(n) do ; a = "1"; end }
|
112
|
+
# [tf+tt+tu, (tf+tt+tu)/3]
|
113
|
+
# end
|
114
|
+
#
|
115
|
+
# The result:
|
116
|
+
#
|
117
|
+
# user system total real
|
118
|
+
# for: 1.016667 0.016667 1.033333 ( 0.485749)
|
119
|
+
# times: 1.450000 0.016667 1.466667 ( 0.681367)
|
120
|
+
# upto: 1.533333 0.000000 1.533333 ( 0.722166)
|
121
|
+
# >total: 4.000000 0.033333 4.033333 ( 1.889282)
|
122
|
+
# >avg: 1.333333 0.011111 1.344444 ( 0.629761)
|
123
|
+
|
124
|
+
module Benchmark
|
125
|
+
|
126
|
+
BENCHMARK_VERSION = "2002-04-25" #:nodoc"
|
127
|
+
|
128
|
+
OUTPUT = STDOUT unless defined?(OUTPUT)
|
129
|
+
SYNC = true unless defined?(SYNC)
|
130
|
+
|
131
|
+
def Benchmark::times() # :nodoc:
|
132
|
+
Process::times()
|
133
|
+
end
|
134
|
+
|
135
|
+
|
136
|
+
# Invokes the block with a <tt>Benchmark::Report</tt> object, which
|
137
|
+
# may be used to collect and report on the results of individual
|
138
|
+
# benchmark tests. Reserves <i>label_width</i> leading spaces for
|
139
|
+
# labels on each line. Prints _caption_ at the top of the
|
140
|
+
# report, and uses _fmt_ to format each line.
|
141
|
+
# If the block returns an array of
|
142
|
+
# <tt>Benchmark::Tms</tt> objects, these will be used to format
|
143
|
+
# additional lines of output. If _label_ parameters are
|
144
|
+
# given, these are used to label these extra lines.
|
145
|
+
#
|
146
|
+
# _Note_: Other methods provide a simpler interface to this one, and are
|
147
|
+
# suitable for nearly all benchmarking requirements. See the examples in
|
148
|
+
# Benchmark, and the #bm and #bmbm methods.
|
149
|
+
#
|
150
|
+
# Example:
|
151
|
+
#
|
152
|
+
# require 'benchmark'
|
153
|
+
# include Benchmark # we need the CAPTION and FMTSTR constants
|
154
|
+
#
|
155
|
+
# n = 50000
|
156
|
+
# Benchmark.benchmark(" "*7 + CAPTION, 7, FMTSTR, ">total:", ">avg:") do |x|
|
157
|
+
# tf = x.report("for:") { for i in 1..n; a = "1"; end }
|
158
|
+
# tt = x.report("times:") { n.times do ; a = "1"; end }
|
159
|
+
# tu = x.report("upto:") { 1.upto(n) do ; a = "1"; end }
|
160
|
+
# [tf+tt+tu, (tf+tt+tu)/3]
|
161
|
+
# end
|
162
|
+
#
|
163
|
+
# <i>Generates:</i>
|
164
|
+
#
|
165
|
+
# user system total real
|
166
|
+
# for: 1.016667 0.016667 1.033333 ( 0.485749)
|
167
|
+
# times: 1.450000 0.016667 1.466667 ( 0.681367)
|
168
|
+
# upto: 1.533333 0.000000 1.533333 ( 0.722166)
|
169
|
+
# >total: 4.000000 0.033333 4.033333 ( 1.889282)
|
170
|
+
# >avg: 1.333333 0.011111 1.344444 ( 0.629761)
|
171
|
+
#
|
172
|
+
|
173
|
+
def benchmark(caption = "", label_width = nil, fmtstr = nil, *labels) # :yield: report
|
174
|
+
if SYNC
|
175
|
+
sync = OUTPUT.sync
|
176
|
+
OUTPUT.sync = true
|
177
|
+
end
|
178
|
+
label_width ||= 0
|
179
|
+
fmtstr ||= FMTSTR
|
180
|
+
raise ArgumentError, "no block" unless iterator?
|
181
|
+
OUTPUT.print caption
|
182
|
+
results = yield(Report.new(label_width, fmtstr))
|
183
|
+
Array === results and results.grep(Tms).each {|t|
|
184
|
+
OUTPUT.print((labels.shift || t.label || "").ljust(label_width),
|
185
|
+
t.format(fmtstr))
|
186
|
+
}
|
187
|
+
OUTPUT.sync = sync if SYNC
|
188
|
+
end
|
189
|
+
|
190
|
+
|
191
|
+
# A simple interface to the #benchmark method, #bm is generates sequential reports
|
192
|
+
# with labels. The parameters have the same meaning as for #benchmark.
|
193
|
+
#
|
194
|
+
# require 'benchmark'
|
195
|
+
#
|
196
|
+
# n = 50000
|
197
|
+
# Benchmark.bm(7) do |x|
|
198
|
+
# x.report("for:") { for i in 1..n; a = "1"; end }
|
199
|
+
# x.report("times:") { n.times do ; a = "1"; end }
|
200
|
+
# x.report("upto:") { 1.upto(n) do ; a = "1"; end }
|
201
|
+
# end
|
202
|
+
#
|
203
|
+
# <i>Generates:</i>
|
204
|
+
#
|
205
|
+
# user system total real
|
206
|
+
# for: 1.050000 0.000000 1.050000 ( 0.503462)
|
207
|
+
# times: 1.533333 0.016667 1.550000 ( 0.735473)
|
208
|
+
# upto: 1.500000 0.016667 1.516667 ( 0.711239)
|
209
|
+
#
|
210
|
+
|
211
|
+
def bm(label_width = 0, *labels, &blk) # :yield: report
|
212
|
+
benchmark(" "*label_width + CAPTION, label_width, FMTSTR, *labels, &blk)
|
213
|
+
end
|
214
|
+
|
215
|
+
|
216
|
+
# Sometimes benchmark results are skewed because code executed
|
217
|
+
# earlier encounters different garbage collection overheads than
|
218
|
+
# that run later. #bmbm attempts to minimize this effect by running
|
219
|
+
# the tests twice, the first time as a rehearsal in order to get the
|
220
|
+
# runtime environment stable, the second time for
|
221
|
+
# real. <tt>GC.start</tt> is executed before the start of each of
|
222
|
+
# the real timings; the cost of this is not included in the
|
223
|
+
# timings. In reality, though, there's only so much that #bmbm can
|
224
|
+
# do, and the results are not guaranteed to be isolated from garbage
|
225
|
+
# collection and other effects.
|
226
|
+
#
|
227
|
+
# Because #bmbm takes two passes through the tests, it can
|
228
|
+
# calculate the required label width.
|
229
|
+
#
|
230
|
+
# require 'benchmark'
|
231
|
+
#
|
232
|
+
# array = (1..1000000).map { rand }
|
233
|
+
#
|
234
|
+
# Benchmark.bmbm do |x|
|
235
|
+
# x.report("sort!") { array.dup.sort! }
|
236
|
+
# x.report("sort") { array.dup.sort }
|
237
|
+
# end
|
238
|
+
#
|
239
|
+
# <i>Generates:</i>
|
240
|
+
#
|
241
|
+
# Rehearsal -----------------------------------------
|
242
|
+
# sort! 11.928000 0.010000 11.938000 ( 12.756000)
|
243
|
+
# sort 13.048000 0.020000 13.068000 ( 13.857000)
|
244
|
+
# ------------------------------- total: 25.006000sec
|
245
|
+
#
|
246
|
+
# user system total real
|
247
|
+
# sort! 12.959000 0.010000 12.969000 ( 13.793000)
|
248
|
+
# sort 12.007000 0.000000 12.007000 ( 12.791000)
|
249
|
+
#
|
250
|
+
# #bmbm yields a Benchmark::Job object and returns an array of
|
251
|
+
# Benchmark::Tms objects.
|
252
|
+
#
|
253
|
+
def bmbm(width = 0, &blk) # :yield: job
|
254
|
+
job = Job.new(width)
|
255
|
+
yield(job)
|
256
|
+
width = job.width
|
257
|
+
if SYNC
|
258
|
+
sync = OUTPUT.sync
|
259
|
+
OUTPUT.sync = true
|
260
|
+
end
|
261
|
+
|
262
|
+
# rehearsal
|
263
|
+
OUTPUT.print "Rehearsal "
|
264
|
+
puts '-'*(width+CAPTION.length - "Rehearsal ".length)
|
265
|
+
list = []
|
266
|
+
job.list.each{|label,item|
|
267
|
+
OUTPUT.print(label.ljust(width))
|
268
|
+
res = Benchmark::measure(&item)
|
269
|
+
OUTPUT.print res.format()
|
270
|
+
list.push res
|
271
|
+
}
|
272
|
+
sum = Tms.new; list.each{|i| sum += i}
|
273
|
+
ets = sum.format("total: %tsec")
|
274
|
+
OUTPUT.printf("%s %s\n\n",
|
275
|
+
"-"*(width+CAPTION.length-ets.length-1), ets)
|
276
|
+
|
277
|
+
# take
|
278
|
+
OUTPUT.print ' '*width, CAPTION
|
279
|
+
list = []
|
280
|
+
ary = []
|
281
|
+
job.list.each{|label,item|
|
282
|
+
GC::start
|
283
|
+
OUTPUT.print label.ljust(width)
|
284
|
+
res = Benchmark::measure(&item)
|
285
|
+
OUTPUT.print res.format()
|
286
|
+
ary.push res
|
287
|
+
list.push [label, res]
|
288
|
+
}
|
289
|
+
|
290
|
+
OUTPUT.sync = sync if SYNC
|
291
|
+
ary
|
292
|
+
end
|
293
|
+
|
294
|
+
#
|
295
|
+
# Returns the time used to execute the given block as a
|
296
|
+
# Benchmark::Tms object.
|
297
|
+
#
|
298
|
+
def measure(label = "") # :yield:
|
299
|
+
t0, r0 = Benchmark.times, Time.now
|
300
|
+
yield
|
301
|
+
t1, r1 = Benchmark.times, Time.now
|
302
|
+
Benchmark::Tms.new(t1.utime - t0.utime,
|
303
|
+
t1.stime - t0.stime,
|
304
|
+
t1.cutime - t0.cutime,
|
305
|
+
t1.cstime - t0.cstime,
|
306
|
+
r1.to_f - r0.to_f,
|
307
|
+
label)
|
308
|
+
end
|
309
|
+
|
310
|
+
#
|
311
|
+
# Returns the elapsed real time used to execute the given block.
|
312
|
+
#
|
313
|
+
def realtime(&blk) # :yield:
|
314
|
+
Benchmark::measure(&blk).real
|
315
|
+
end
|
316
|
+
|
317
|
+
|
318
|
+
|
319
|
+
#
|
320
|
+
# A Job is a sequence of labelled blocks to be processed by the
|
321
|
+
# Benchmark.bmbm method. It is of little direct interest to the user.
|
322
|
+
#
|
323
|
+
class Job # :nodoc:
|
324
|
+
#
|
325
|
+
# Returns an initialized Job instance.
|
326
|
+
# Usually, one doesn't call this method directly, as new
|
327
|
+
# Job objects are created by the #bmbm method.
|
328
|
+
# _width_ is a initial value for the label offset used in formatting;
|
329
|
+
# the #bmbm method passes its _width_ argument to this constructor.
|
330
|
+
#
|
331
|
+
def initialize(width)
|
332
|
+
@width = width
|
333
|
+
@list = []
|
334
|
+
end
|
335
|
+
|
336
|
+
#
|
337
|
+
# Registers the given label and block pair in the job list.
|
338
|
+
#
|
339
|
+
def item(label = "", &blk) # :yield:
|
340
|
+
raise ArgmentError, "no block" unless block_given?
|
341
|
+
label.concat ' '
|
342
|
+
w = label.length
|
343
|
+
@width = w if @width < w
|
344
|
+
@list.push [label, blk]
|
345
|
+
self
|
346
|
+
end
|
347
|
+
|
348
|
+
alias report item
|
349
|
+
|
350
|
+
# An array of 2-element arrays, consisting of label and block pairs.
|
351
|
+
attr_reader :list
|
352
|
+
|
353
|
+
# Length of the widest label in the #list, plus one.
|
354
|
+
attr_reader :width
|
355
|
+
end
|
356
|
+
|
357
|
+
module_function :benchmark, :measure, :realtime, :bm, :bmbm
|
358
|
+
|
359
|
+
|
360
|
+
|
361
|
+
#
|
362
|
+
# This class is used by the Benchmark.benchmark and Benchmark.bm methods.
|
363
|
+
# It is of little direct interest to the user.
|
364
|
+
#
|
365
|
+
class Report # :nodoc:
|
366
|
+
#
|
367
|
+
# Returns an initialized Report instance.
|
368
|
+
# Usually, one doesn't call this method directly, as new
|
369
|
+
# Report objects are created by the #benchmark and #bm methods.
|
370
|
+
# _width_ and _fmtstr_ are the label offset and
|
371
|
+
# format string used by Tms#format.
|
372
|
+
#
|
373
|
+
def initialize(width = 0, fmtstr = nil)
|
374
|
+
@width, @fmtstr = width, fmtstr
|
375
|
+
end
|
376
|
+
|
377
|
+
#
|
378
|
+
# Prints the _label_ and measured time for the block,
|
379
|
+
# formatted by _fmt_. See Tms#format for the
|
380
|
+
# formatting rules.
|
381
|
+
#
|
382
|
+
def item(label = "", *fmt, &blk) # :yield:
|
383
|
+
OUTPUT.print label.ljust(@width)
|
384
|
+
res = Benchmark::measure(&blk)
|
385
|
+
OUTPUT.print res.format(@fmtstr, *fmt)
|
386
|
+
res
|
387
|
+
end
|
388
|
+
|
389
|
+
alias report item
|
390
|
+
end
|
391
|
+
|
392
|
+
|
393
|
+
|
394
|
+
#
|
395
|
+
# A data object, representing the times associated with a benchmark
|
396
|
+
# measurement.
|
397
|
+
#
|
398
|
+
class Tms
|
399
|
+
CAPTION = " user system total real\n"
|
400
|
+
FMTSTR = "%10.6u %10.6y %10.6t %10.6r\n"
|
401
|
+
|
402
|
+
# User CPU time
|
403
|
+
attr_reader :utime
|
404
|
+
|
405
|
+
# System CPU time
|
406
|
+
attr_reader :stime
|
407
|
+
|
408
|
+
# User CPU time of children
|
409
|
+
attr_reader :cutime
|
410
|
+
|
411
|
+
# System CPU time of children
|
412
|
+
attr_reader :cstime
|
413
|
+
|
414
|
+
# Elapsed real time
|
415
|
+
attr_reader :real
|
416
|
+
|
417
|
+
# Total time, that is _utime_ + _stime_ + _cutime_ + _cstime_
|
418
|
+
attr_reader :total
|
419
|
+
|
420
|
+
# Label
|
421
|
+
attr_reader :label
|
422
|
+
|
423
|
+
#
|
424
|
+
# Returns an initialized Tms object which has
|
425
|
+
# _u_ as the user CPU time, _s_ as the system CPU time,
|
426
|
+
# _cu_ as the children's user CPU time, _cs_ as the children's
|
427
|
+
# system CPU time, _real_ as the elapsed real time and _l_
|
428
|
+
# as the label.
|
429
|
+
#
|
430
|
+
def initialize(u = 0.0, s = 0.0, cu = 0.0, cs = 0.0, real = 0.0, l = nil)
|
431
|
+
@utime, @stime, @cutime, @cstime, @real, @label = u, s, cu, cs, real, l
|
432
|
+
@total = @utime + @stime + @cutime + @cstime
|
433
|
+
end
|
434
|
+
|
435
|
+
#
|
436
|
+
# Returns a new Tms object whose times are the sum of the times for this
|
437
|
+
# Tms object, plus the time required to execute the code block (_blk_).
|
438
|
+
#
|
439
|
+
def add(&blk) # :yield:
|
440
|
+
self + Benchmark::measure(&blk)
|
441
|
+
end
|
442
|
+
|
443
|
+
#
|
444
|
+
# An in-place version of #add.
|
445
|
+
#
|
446
|
+
def add!
|
447
|
+
t = Benchmark::measure(&blk)
|
448
|
+
@utime = utime + t.utime
|
449
|
+
@stime = stime + t.stime
|
450
|
+
@cutime = cutime + t.cutime
|
451
|
+
@cstime = cstime + t.cstime
|
452
|
+
@real = real + t.real
|
453
|
+
self
|
454
|
+
end
|
455
|
+
|
456
|
+
#
|
457
|
+
# Returns a new Tms object obtained by memberwise summation
|
458
|
+
# of the individual times for this Tms object with those of the other
|
459
|
+
# Tms object.
|
460
|
+
# This method and #/() are useful for taking statistics.
|
461
|
+
#
|
462
|
+
def +(other); memberwise(:+, other) end
|
463
|
+
|
464
|
+
#
|
465
|
+
# Returns a new Tms object obtained by memberwise subtraction
|
466
|
+
# of the individual times for the other Tms object from those of this
|
467
|
+
# Tms object.
|
468
|
+
#
|
469
|
+
def -(other); memberwise(:-, other) end
|
470
|
+
|
471
|
+
#
|
472
|
+
# Returns a new Tms object obtained by memberwise multiplication
|
473
|
+
# of the individual times for this Tms object by _x_.
|
474
|
+
#
|
475
|
+
def *(x); memberwise(:*, x) end
|
476
|
+
|
477
|
+
#
|
478
|
+
# Returns a new Tms object obtained by memberwise division
|
479
|
+
# of the individual times for this Tms object by _x_.
|
480
|
+
# This method and #+() are useful for taking statistics.
|
481
|
+
#
|
482
|
+
def /(x); memberwise(:/, x) end
|
483
|
+
|
484
|
+
#
|
485
|
+
# Returns the contents of this Tms object as
|
486
|
+
# a formatted string, according to a format string
|
487
|
+
# like that passed to Kernel.format. In addition, #format
|
488
|
+
# accepts the following extensions:
|
489
|
+
#
|
490
|
+
# <tt>%u</tt>:: Replaced by the user CPU time, as reported by Tms#utime.
|
491
|
+
# <tt>%y</tt>:: Replaced by the system CPU time, as reported by #stime (Mnemonic: y of "s*y*stem")
|
492
|
+
# <tt>%U</tt>:: Replaced by the children's user CPU time, as reported by Tms#cutime
|
493
|
+
# <tt>%Y</tt>:: Replaced by the children's system CPU time, as reported by Tms#cstime
|
494
|
+
# <tt>%t</tt>:: Replaced by the total CPU time, as reported by Tms#total
|
495
|
+
# <tt>%r</tt>:: Replaced by the elapsed real time, as reported by Tms#real
|
496
|
+
# <tt>%n</tt>:: Replaced by the label string, as reported by Tms#label (Mnemonic: n of "*n*ame")
|
497
|
+
#
|
498
|
+
# If _fmtstr_ is not given, FMTSTR is used as default value, detailing the
|
499
|
+
# user, system and real elapsed time.
|
500
|
+
#
|
501
|
+
def format(arg0 = nil, *args)
|
502
|
+
fmtstr = (arg0 || FMTSTR).dup
|
503
|
+
fmtstr.gsub!(/(%[-+\.\d]*)n/){"#{$1}s" % label}
|
504
|
+
fmtstr.gsub!(/(%[-+\.\d]*)u/){"#{$1}f" % utime}
|
505
|
+
fmtstr.gsub!(/(%[-+\.\d]*)y/){"#{$1}f" % stime}
|
506
|
+
fmtstr.gsub!(/(%[-+\.\d]*)U/){"#{$1}f" % cutime}
|
507
|
+
fmtstr.gsub!(/(%[-+\.\d]*)Y/){"#{$1}f" % cstime}
|
508
|
+
fmtstr.gsub!(/(%[-+\.\d]*)t/){"#{$1}f" % total}
|
509
|
+
fmtstr.gsub!(/(%[-+\.\d]*)r/){"(#{$1}f)" % real}
|
510
|
+
arg0 ? Kernel::format(fmtstr, *args) : fmtstr
|
511
|
+
end
|
512
|
+
|
513
|
+
#
|
514
|
+
# Same as #format.
|
515
|
+
#
|
516
|
+
def to_s
|
517
|
+
format
|
518
|
+
end
|
519
|
+
|
520
|
+
#
|
521
|
+
# Returns a new 6-element array, consisting of the
|
522
|
+
# label, user CPU time, system CPU time, children's
|
523
|
+
# user CPU time, children's system CPU time and elapsed
|
524
|
+
# real time.
|
525
|
+
#
|
526
|
+
def to_a
|
527
|
+
[@label, @utime, @stime, @cutime, @cstime, @real]
|
528
|
+
end
|
529
|
+
|
530
|
+
protected
|
531
|
+
def memberwise(op, x)
|
532
|
+
case x
|
533
|
+
when Benchmark::Tms
|
534
|
+
Benchmark::Tms.new(utime.__send__(op, x.utime),
|
535
|
+
stime.__send__(op, x.stime),
|
536
|
+
cutime.__send__(op, x.cutime),
|
537
|
+
cstime.__send__(op, x.cstime),
|
538
|
+
real.__send__(op, x.real)
|
539
|
+
)
|
540
|
+
else
|
541
|
+
Benchmark::Tms.new(utime.__send__(op, x),
|
542
|
+
stime.__send__(op, x),
|
543
|
+
cutime.__send__(op, x),
|
544
|
+
cstime.__send__(op, x),
|
545
|
+
real.__send__(op, x)
|
546
|
+
)
|
547
|
+
end
|
548
|
+
end
|
549
|
+
end
|
550
|
+
|
551
|
+
# The default caption string (heading above the output times).
|
552
|
+
CAPTION = Benchmark::Tms::CAPTION
|
553
|
+
|
554
|
+
# The default format string used to display times. See also Benchmark::Tms#format.
|
555
|
+
FMTSTR = Benchmark::Tms::FMTSTR
|
556
|
+
end
|
557
|
+
|
558
|
+
if __FILE__ == $0
|
559
|
+
include Benchmark
|
560
|
+
|
561
|
+
n = ARGV[0].to_i.nonzero? || 50000
|
562
|
+
puts %Q([#{n} times iterations of `a = "1"'])
|
563
|
+
benchmark(" " + CAPTION, 7, FMTSTR) do |x|
|
564
|
+
x.report("for:") {for i in 1..n; a = "1"; end} # Benchmark::measure
|
565
|
+
x.report("times:") {n.times do ; a = "1"; end}
|
566
|
+
x.report("upto:") {1.upto(n) do ; a = "1"; end}
|
567
|
+
end
|
568
|
+
|
569
|
+
benchmark do
|
570
|
+
[
|
571
|
+
measure{for i in 1..n; a = "1"; end}, # Benchmark::measure
|
572
|
+
measure{n.times do ; a = "1"; end},
|
573
|
+
measure{1.upto(n) do ; a = "1"; end}
|
574
|
+
]
|
575
|
+
end
|
576
|
+
end
|