kamal-railsbench 0.9.9.pre
Sign up to get free protection for your applications and to get access to all the features.
- data/BUGS +2 -0
- data/CHANGELOG +2124 -0
- data/GCPATCH +73 -0
- data/INSTALL +75 -0
- data/LICENSE +222 -0
- data/Manifest.txt +53 -0
- data/PROBLEMS +56 -0
- data/README +337 -0
- data/Rakefile +51 -0
- data/bin/railsbench +80 -0
- data/config/benchmarking.rb +21 -0
- data/config/benchmarks.rb +21 -0
- data/config/benchmarks.yml +2 -0
- data/images/empty.png +0 -0
- data/images/minus.png +0 -0
- data/images/plus.png +0 -0
- data/install.rb +70 -0
- data/latest_changes.txt +18 -0
- data/lib/benchmark.rb +576 -0
- data/lib/railsbench/benchmark.rb +576 -0
- data/lib/railsbench/benchmark_specs.rb +63 -0
- data/lib/railsbench/gc_info.rb +158 -0
- data/lib/railsbench/perf_info.rb +146 -0
- data/lib/railsbench/perf_utils.rb +202 -0
- data/lib/railsbench/railsbenchmark.rb +640 -0
- data/lib/railsbench/version.rb +9 -0
- data/lib/railsbench/write_headers_only.rb +15 -0
- data/postinstall.rb +12 -0
- data/ruby184gc.patch +516 -0
- data/ruby185gc.patch +562 -0
- data/ruby186gc.patch +564 -0
- data/ruby19gc.patch +2425 -0
- data/script/convert_raw_data_files +49 -0
- data/script/generate_benchmarks +171 -0
- data/script/perf_bench +74 -0
- data/script/perf_comp +151 -0
- data/script/perf_comp_gc +113 -0
- data/script/perf_diff +48 -0
- data/script/perf_diff_gc +53 -0
- data/script/perf_html +103 -0
- data/script/perf_plot +225 -0
- data/script/perf_plot_gc +254 -0
- data/script/perf_prof +87 -0
- data/script/perf_run +39 -0
- data/script/perf_run_gc +40 -0
- data/script/perf_table +104 -0
- data/script/perf_tex +58 -0
- data/script/perf_times +66 -0
- data/script/perf_times_gc +94 -0
- data/script/run_urls +57 -0
- data/setup.rb +1585 -0
- data/test/railsbench_test.rb +11 -0
- data/test/test_helper.rb +2 -0
- metadata +133 -0
@@ -0,0 +1,576 @@
|
|
1
|
+
=begin
|
2
|
+
#
|
3
|
+
# benchmark.rb - a performance benchmarking library
|
4
|
+
#
|
5
|
+
# $Id$
|
6
|
+
#
|
7
|
+
# Created by Gotoken (gotoken@notwork.org).
|
8
|
+
#
|
9
|
+
# Documentation by Gotoken (original RD), Lyle Johnson (RDoc conversion), and
|
10
|
+
# Gavin Sinclair (editing).
|
11
|
+
#
|
12
|
+
=end
|
13
|
+
|
14
|
+
# == Overview
|
15
|
+
#
|
16
|
+
# The Benchmark module provides methods for benchmarking Ruby code, giving
|
17
|
+
# detailed reports on the time taken for each task.
|
18
|
+
#
|
19
|
+
|
20
|
+
# The Benchmark module provides methods to measure and report the time
|
21
|
+
# used to execute Ruby code.
|
22
|
+
#
|
23
|
+
# * Measure the time to construct the string given by the expression
|
24
|
+
# <tt>"a"*1_000_000</tt>:
|
25
|
+
#
|
26
|
+
# require 'benchmark'
|
27
|
+
#
|
28
|
+
# puts Benchmark.measure { "a"*1_000_000 }
|
29
|
+
#
|
30
|
+
# On my machine (FreeBSD 3.2 on P5, 100MHz) this generates:
|
31
|
+
#
|
32
|
+
# 1.166667 0.050000 1.216667 ( 0.571355)
|
33
|
+
#
|
34
|
+
# This report shows the user CPU time, system CPU time, the sum of
|
35
|
+
# the user and system CPU times, and the elapsed real time. The unit
|
36
|
+
# of time is seconds.
|
37
|
+
#
|
38
|
+
# * Do some experiments sequentially using the #bm method:
|
39
|
+
#
|
40
|
+
# require 'benchmark'
|
41
|
+
#
|
42
|
+
# n = 50000
|
43
|
+
# Benchmark.bm do |x|
|
44
|
+
# x.report { for i in 1..n; a = "1"; end }
|
45
|
+
# x.report { n.times do ; a = "1"; end }
|
46
|
+
# x.report { 1.upto(n) do ; a = "1"; end }
|
47
|
+
# end
|
48
|
+
#
|
49
|
+
# The result:
|
50
|
+
#
|
51
|
+
# user system total real
|
52
|
+
# 1.033333 0.016667 1.016667 ( 0.492106)
|
53
|
+
# 1.483333 0.000000 1.483333 ( 0.694605)
|
54
|
+
# 1.516667 0.000000 1.516667 ( 0.711077)
|
55
|
+
#
|
56
|
+
# * Continuing the previous example, put a label in each report:
|
57
|
+
#
|
58
|
+
# require 'benchmark'
|
59
|
+
#
|
60
|
+
# n = 50000
|
61
|
+
# Benchmark.bm(7) do |x|
|
62
|
+
# x.report("for:") { for i in 1..n; a = "1"; end }
|
63
|
+
# x.report("times:") { n.times do ; a = "1"; end }
|
64
|
+
# x.report("upto:") { 1.upto(n) do ; a = "1"; end }
|
65
|
+
# end
|
66
|
+
#
|
67
|
+
# The result:
|
68
|
+
#
|
69
|
+
# user system total real
|
70
|
+
# for: 1.050000 0.000000 1.050000 ( 0.503462)
|
71
|
+
# times: 1.533333 0.016667 1.550000 ( 0.735473)
|
72
|
+
# upto: 1.500000 0.016667 1.516667 ( 0.711239)
|
73
|
+
#
|
74
|
+
#
|
75
|
+
# * The times for some benchmarks depend on the order in which items
|
76
|
+
# are run. These differences are due to the cost of memory
|
77
|
+
# allocation and garbage collection. To avoid these discrepancies,
|
78
|
+
# the #bmbm method is provided. For example, to compare ways to
|
79
|
+
# sort an array of floats:
|
80
|
+
#
|
81
|
+
# require 'benchmark'
|
82
|
+
#
|
83
|
+
# array = (1..1000000).map { rand }
|
84
|
+
#
|
85
|
+
# Benchmark.bmbm do |x|
|
86
|
+
# x.report("sort!") { array.dup.sort! }
|
87
|
+
# x.report("sort") { array.dup.sort }
|
88
|
+
# end
|
89
|
+
#
|
90
|
+
# The result:
|
91
|
+
#
|
92
|
+
# Rehearsal -----------------------------------------
|
93
|
+
# sort! 11.928000 0.010000 11.938000 ( 12.756000)
|
94
|
+
# sort 13.048000 0.020000 13.068000 ( 13.857000)
|
95
|
+
# ------------------------------- total: 25.006000sec
|
96
|
+
#
|
97
|
+
# user system total real
|
98
|
+
# sort! 12.959000 0.010000 12.969000 ( 13.793000)
|
99
|
+
# sort 12.007000 0.000000 12.007000 ( 12.791000)
|
100
|
+
#
|
101
|
+
#
|
102
|
+
# * Report statistics of sequential experiments with unique labels,
|
103
|
+
# using the #benchmark method:
|
104
|
+
#
|
105
|
+
# require 'benchmark'
|
106
|
+
#
|
107
|
+
# n = 50000
|
108
|
+
# Benchmark.benchmark(" "*7 + CAPTION, 7, FMTSTR, ">total:", ">avg:") do |x|
|
109
|
+
# tf = x.report("for:") { for i in 1..n; a = "1"; end }
|
110
|
+
# tt = x.report("times:") { n.times do ; a = "1"; end }
|
111
|
+
# tu = x.report("upto:") { 1.upto(n) do ; a = "1"; end }
|
112
|
+
# [tf+tt+tu, (tf+tt+tu)/3]
|
113
|
+
# end
|
114
|
+
#
|
115
|
+
# The result:
|
116
|
+
#
|
117
|
+
# user system total real
|
118
|
+
# for: 1.016667 0.016667 1.033333 ( 0.485749)
|
119
|
+
# times: 1.450000 0.016667 1.466667 ( 0.681367)
|
120
|
+
# upto: 1.533333 0.000000 1.533333 ( 0.722166)
|
121
|
+
# >total: 4.000000 0.033333 4.033333 ( 1.889282)
|
122
|
+
# >avg: 1.333333 0.011111 1.344444 ( 0.629761)
|
123
|
+
|
124
|
+
module Benchmark
|
125
|
+
|
126
|
+
BENCHMARK_VERSION = "2002-04-25" #:nodoc"
|
127
|
+
|
128
|
+
OUTPUT = STDOUT unless defined?(OUTPUT)
|
129
|
+
SYNC = true unless defined?(SYNC)
|
130
|
+
|
131
|
+
def Benchmark::times() # :nodoc:
|
132
|
+
Process::times()
|
133
|
+
end
|
134
|
+
|
135
|
+
|
136
|
+
# Invokes the block with a <tt>Benchmark::Report</tt> object, which
|
137
|
+
# may be used to collect and report on the results of individual
|
138
|
+
# benchmark tests. Reserves <i>label_width</i> leading spaces for
|
139
|
+
# labels on each line. Prints _caption_ at the top of the
|
140
|
+
# report, and uses _fmt_ to format each line.
|
141
|
+
# If the block returns an array of
|
142
|
+
# <tt>Benchmark::Tms</tt> objects, these will be used to format
|
143
|
+
# additional lines of output. If _label_ parameters are
|
144
|
+
# given, these are used to label these extra lines.
|
145
|
+
#
|
146
|
+
# _Note_: Other methods provide a simpler interface to this one, and are
|
147
|
+
# suitable for nearly all benchmarking requirements. See the examples in
|
148
|
+
# Benchmark, and the #bm and #bmbm methods.
|
149
|
+
#
|
150
|
+
# Example:
|
151
|
+
#
|
152
|
+
# require 'benchmark'
|
153
|
+
# include Benchmark # we need the CAPTION and FMTSTR constants
|
154
|
+
#
|
155
|
+
# n = 50000
|
156
|
+
# Benchmark.benchmark(" "*7 + CAPTION, 7, FMTSTR, ">total:", ">avg:") do |x|
|
157
|
+
# tf = x.report("for:") { for i in 1..n; a = "1"; end }
|
158
|
+
# tt = x.report("times:") { n.times do ; a = "1"; end }
|
159
|
+
# tu = x.report("upto:") { 1.upto(n) do ; a = "1"; end }
|
160
|
+
# [tf+tt+tu, (tf+tt+tu)/3]
|
161
|
+
# end
|
162
|
+
#
|
163
|
+
# <i>Generates:</i>
|
164
|
+
#
|
165
|
+
# user system total real
|
166
|
+
# for: 1.016667 0.016667 1.033333 ( 0.485749)
|
167
|
+
# times: 1.450000 0.016667 1.466667 ( 0.681367)
|
168
|
+
# upto: 1.533333 0.000000 1.533333 ( 0.722166)
|
169
|
+
# >total: 4.000000 0.033333 4.033333 ( 1.889282)
|
170
|
+
# >avg: 1.333333 0.011111 1.344444 ( 0.629761)
|
171
|
+
#
|
172
|
+
|
173
|
+
def benchmark(caption = "", label_width = nil, fmtstr = nil, *labels) # :yield: report
|
174
|
+
if SYNC
|
175
|
+
sync = OUTPUT.sync
|
176
|
+
OUTPUT.sync = true
|
177
|
+
end
|
178
|
+
label_width ||= 0
|
179
|
+
fmtstr ||= FMTSTR
|
180
|
+
raise ArgumentError, "no block" unless iterator?
|
181
|
+
OUTPUT.print caption
|
182
|
+
results = yield(Report.new(label_width, fmtstr))
|
183
|
+
Array === results and results.grep(Tms).each {|t|
|
184
|
+
OUTPUT.print((labels.shift || t.label || "").ljust(label_width),
|
185
|
+
t.format(fmtstr))
|
186
|
+
}
|
187
|
+
OUTPUT.sync = sync if SYNC
|
188
|
+
end
|
189
|
+
|
190
|
+
|
191
|
+
# A simple interface to the #benchmark method, #bm is generates sequential reports
|
192
|
+
# with labels. The parameters have the same meaning as for #benchmark.
|
193
|
+
#
|
194
|
+
# require 'benchmark'
|
195
|
+
#
|
196
|
+
# n = 50000
|
197
|
+
# Benchmark.bm(7) do |x|
|
198
|
+
# x.report("for:") { for i in 1..n; a = "1"; end }
|
199
|
+
# x.report("times:") { n.times do ; a = "1"; end }
|
200
|
+
# x.report("upto:") { 1.upto(n) do ; a = "1"; end }
|
201
|
+
# end
|
202
|
+
#
|
203
|
+
# <i>Generates:</i>
|
204
|
+
#
|
205
|
+
# user system total real
|
206
|
+
# for: 1.050000 0.000000 1.050000 ( 0.503462)
|
207
|
+
# times: 1.533333 0.016667 1.550000 ( 0.735473)
|
208
|
+
# upto: 1.500000 0.016667 1.516667 ( 0.711239)
|
209
|
+
#
|
210
|
+
|
211
|
+
def bm(label_width = 0, *labels, &blk) # :yield: report
|
212
|
+
benchmark(" "*label_width + CAPTION, label_width, FMTSTR, *labels, &blk)
|
213
|
+
end
|
214
|
+
|
215
|
+
|
216
|
+
# Sometimes benchmark results are skewed because code executed
|
217
|
+
# earlier encounters different garbage collection overheads than
|
218
|
+
# that run later. #bmbm attempts to minimize this effect by running
|
219
|
+
# the tests twice, the first time as a rehearsal in order to get the
|
220
|
+
# runtime environment stable, the second time for
|
221
|
+
# real. <tt>GC.start</tt> is executed before the start of each of
|
222
|
+
# the real timings; the cost of this is not included in the
|
223
|
+
# timings. In reality, though, there's only so much that #bmbm can
|
224
|
+
# do, and the results are not guaranteed to be isolated from garbage
|
225
|
+
# collection and other effects.
|
226
|
+
#
|
227
|
+
# Because #bmbm takes two passes through the tests, it can
|
228
|
+
# calculate the required label width.
|
229
|
+
#
|
230
|
+
# require 'benchmark'
|
231
|
+
#
|
232
|
+
# array = (1..1000000).map { rand }
|
233
|
+
#
|
234
|
+
# Benchmark.bmbm do |x|
|
235
|
+
# x.report("sort!") { array.dup.sort! }
|
236
|
+
# x.report("sort") { array.dup.sort }
|
237
|
+
# end
|
238
|
+
#
|
239
|
+
# <i>Generates:</i>
|
240
|
+
#
|
241
|
+
# Rehearsal -----------------------------------------
|
242
|
+
# sort! 11.928000 0.010000 11.938000 ( 12.756000)
|
243
|
+
# sort 13.048000 0.020000 13.068000 ( 13.857000)
|
244
|
+
# ------------------------------- total: 25.006000sec
|
245
|
+
#
|
246
|
+
# user system total real
|
247
|
+
# sort! 12.959000 0.010000 12.969000 ( 13.793000)
|
248
|
+
# sort 12.007000 0.000000 12.007000 ( 12.791000)
|
249
|
+
#
|
250
|
+
# #bmbm yields a Benchmark::Job object and returns an array of
|
251
|
+
# Benchmark::Tms objects.
|
252
|
+
#
|
253
|
+
def bmbm(width = 0, &blk) # :yield: job
|
254
|
+
job = Job.new(width)
|
255
|
+
yield(job)
|
256
|
+
width = job.width
|
257
|
+
if SYNC
|
258
|
+
sync = OUTPUT.sync
|
259
|
+
OUTPUT.sync = true
|
260
|
+
end
|
261
|
+
|
262
|
+
# rehearsal
|
263
|
+
OUTPUT.print "Rehearsal "
|
264
|
+
puts '-'*(width+CAPTION.length - "Rehearsal ".length)
|
265
|
+
list = []
|
266
|
+
job.list.each{|label,item|
|
267
|
+
OUTPUT.print(label.ljust(width))
|
268
|
+
res = Benchmark::measure(&item)
|
269
|
+
OUTPUT.print res.format()
|
270
|
+
list.push res
|
271
|
+
}
|
272
|
+
sum = Tms.new; list.each{|i| sum += i}
|
273
|
+
ets = sum.format("total: %tsec")
|
274
|
+
OUTPUT.printf("%s %s\n\n",
|
275
|
+
"-"*(width+CAPTION.length-ets.length-1), ets)
|
276
|
+
|
277
|
+
# take
|
278
|
+
OUTPUT.print ' '*width, CAPTION
|
279
|
+
list = []
|
280
|
+
ary = []
|
281
|
+
job.list.each{|label,item|
|
282
|
+
GC::start
|
283
|
+
OUTPUT.print label.ljust(width)
|
284
|
+
res = Benchmark::measure(&item)
|
285
|
+
OUTPUT.print res.format()
|
286
|
+
ary.push res
|
287
|
+
list.push [label, res]
|
288
|
+
}
|
289
|
+
|
290
|
+
OUTPUT.sync = sync if SYNC
|
291
|
+
ary
|
292
|
+
end
|
293
|
+
|
294
|
+
#
|
295
|
+
# Returns the time used to execute the given block as a
|
296
|
+
# Benchmark::Tms object.
|
297
|
+
#
|
298
|
+
def measure(label = "") # :yield:
|
299
|
+
t0, r0 = Benchmark.times, Time.now
|
300
|
+
yield
|
301
|
+
t1, r1 = Benchmark.times, Time.now
|
302
|
+
Benchmark::Tms.new(t1.utime - t0.utime,
|
303
|
+
t1.stime - t0.stime,
|
304
|
+
t1.cutime - t0.cutime,
|
305
|
+
t1.cstime - t0.cstime,
|
306
|
+
r1.to_f - r0.to_f,
|
307
|
+
label)
|
308
|
+
end
|
309
|
+
|
310
|
+
#
|
311
|
+
# Returns the elapsed real time used to execute the given block.
|
312
|
+
#
|
313
|
+
def realtime(&blk) # :yield:
|
314
|
+
Benchmark::measure(&blk).real
|
315
|
+
end
|
316
|
+
|
317
|
+
|
318
|
+
|
319
|
+
#
|
320
|
+
# A Job is a sequence of labelled blocks to be processed by the
|
321
|
+
# Benchmark.bmbm method. It is of little direct interest to the user.
|
322
|
+
#
|
323
|
+
class Job # :nodoc:
|
324
|
+
#
|
325
|
+
# Returns an initialized Job instance.
|
326
|
+
# Usually, one doesn't call this method directly, as new
|
327
|
+
# Job objects are created by the #bmbm method.
|
328
|
+
# _width_ is a initial value for the label offset used in formatting;
|
329
|
+
# the #bmbm method passes its _width_ argument to this constructor.
|
330
|
+
#
|
331
|
+
def initialize(width)
|
332
|
+
@width = width
|
333
|
+
@list = []
|
334
|
+
end
|
335
|
+
|
336
|
+
#
|
337
|
+
# Registers the given label and block pair in the job list.
|
338
|
+
#
|
339
|
+
def item(label = "", &blk) # :yield:
|
340
|
+
raise ArgmentError, "no block" unless block_given?
|
341
|
+
label.concat ' '
|
342
|
+
w = label.length
|
343
|
+
@width = w if @width < w
|
344
|
+
@list.push [label, blk]
|
345
|
+
self
|
346
|
+
end
|
347
|
+
|
348
|
+
alias report item
|
349
|
+
|
350
|
+
# An array of 2-element arrays, consisting of label and block pairs.
|
351
|
+
attr_reader :list
|
352
|
+
|
353
|
+
# Length of the widest label in the #list, plus one.
|
354
|
+
attr_reader :width
|
355
|
+
end
|
356
|
+
|
357
|
+
module_function :benchmark, :measure, :realtime, :bm, :bmbm
|
358
|
+
|
359
|
+
|
360
|
+
|
361
|
+
#
|
362
|
+
# This class is used by the Benchmark.benchmark and Benchmark.bm methods.
|
363
|
+
# It is of little direct interest to the user.
|
364
|
+
#
|
365
|
+
class Report # :nodoc:
|
366
|
+
#
|
367
|
+
# Returns an initialized Report instance.
|
368
|
+
# Usually, one doesn't call this method directly, as new
|
369
|
+
# Report objects are created by the #benchmark and #bm methods.
|
370
|
+
# _width_ and _fmtstr_ are the label offset and
|
371
|
+
# format string used by Tms#format.
|
372
|
+
#
|
373
|
+
def initialize(width = 0, fmtstr = nil)
|
374
|
+
@width, @fmtstr = width, fmtstr
|
375
|
+
end
|
376
|
+
|
377
|
+
#
|
378
|
+
# Prints the _label_ and measured time for the block,
|
379
|
+
# formatted by _fmt_. See Tms#format for the
|
380
|
+
# formatting rules.
|
381
|
+
#
|
382
|
+
def item(label = "", *fmt, &blk) # :yield:
|
383
|
+
OUTPUT.print label.ljust(@width)
|
384
|
+
res = Benchmark::measure(&blk)
|
385
|
+
OUTPUT.print res.format(@fmtstr, *fmt)
|
386
|
+
res
|
387
|
+
end
|
388
|
+
|
389
|
+
alias report item
|
390
|
+
end
|
391
|
+
|
392
|
+
|
393
|
+
|
394
|
+
#
|
395
|
+
# A data object, representing the times associated with a benchmark
|
396
|
+
# measurement.
|
397
|
+
#
|
398
|
+
class Tms
|
399
|
+
CAPTION = " user system total real\n"
|
400
|
+
FMTSTR = "%10.6u %10.6y %10.6t %10.6r\n"
|
401
|
+
|
402
|
+
# User CPU time
|
403
|
+
attr_reader :utime
|
404
|
+
|
405
|
+
# System CPU time
|
406
|
+
attr_reader :stime
|
407
|
+
|
408
|
+
# User CPU time of children
|
409
|
+
attr_reader :cutime
|
410
|
+
|
411
|
+
# System CPU time of children
|
412
|
+
attr_reader :cstime
|
413
|
+
|
414
|
+
# Elapsed real time
|
415
|
+
attr_reader :real
|
416
|
+
|
417
|
+
# Total time, that is _utime_ + _stime_ + _cutime_ + _cstime_
|
418
|
+
attr_reader :total
|
419
|
+
|
420
|
+
# Label
|
421
|
+
attr_reader :label
|
422
|
+
|
423
|
+
#
|
424
|
+
# Returns an initialized Tms object which has
|
425
|
+
# _u_ as the user CPU time, _s_ as the system CPU time,
|
426
|
+
# _cu_ as the children's user CPU time, _cs_ as the children's
|
427
|
+
# system CPU time, _real_ as the elapsed real time and _l_
|
428
|
+
# as the label.
|
429
|
+
#
|
430
|
+
def initialize(u = 0.0, s = 0.0, cu = 0.0, cs = 0.0, real = 0.0, l = nil)
|
431
|
+
@utime, @stime, @cutime, @cstime, @real, @label = u, s, cu, cs, real, l
|
432
|
+
@total = @utime + @stime + @cutime + @cstime
|
433
|
+
end
|
434
|
+
|
435
|
+
#
|
436
|
+
# Returns a new Tms object whose times are the sum of the times for this
|
437
|
+
# Tms object, plus the time required to execute the code block (_blk_).
|
438
|
+
#
|
439
|
+
def add(&blk) # :yield:
|
440
|
+
self + Benchmark::measure(&blk)
|
441
|
+
end
|
442
|
+
|
443
|
+
#
|
444
|
+
# An in-place version of #add.
|
445
|
+
#
|
446
|
+
def add!
|
447
|
+
t = Benchmark::measure(&blk)
|
448
|
+
@utime = utime + t.utime
|
449
|
+
@stime = stime + t.stime
|
450
|
+
@cutime = cutime + t.cutime
|
451
|
+
@cstime = cstime + t.cstime
|
452
|
+
@real = real + t.real
|
453
|
+
self
|
454
|
+
end
|
455
|
+
|
456
|
+
#
|
457
|
+
# Returns a new Tms object obtained by memberwise summation
|
458
|
+
# of the individual times for this Tms object with those of the other
|
459
|
+
# Tms object.
|
460
|
+
# This method and #/() are useful for taking statistics.
|
461
|
+
#
|
462
|
+
def +(other); memberwise(:+, other) end
|
463
|
+
|
464
|
+
#
|
465
|
+
# Returns a new Tms object obtained by memberwise subtraction
|
466
|
+
# of the individual times for the other Tms object from those of this
|
467
|
+
# Tms object.
|
468
|
+
#
|
469
|
+
def -(other); memberwise(:-, other) end
|
470
|
+
|
471
|
+
#
|
472
|
+
# Returns a new Tms object obtained by memberwise multiplication
|
473
|
+
# of the individual times for this Tms object by _x_.
|
474
|
+
#
|
475
|
+
def *(x); memberwise(:*, x) end
|
476
|
+
|
477
|
+
#
|
478
|
+
# Returns a new Tms object obtained by memberwise division
|
479
|
+
# of the individual times for this Tms object by _x_.
|
480
|
+
# This method and #+() are useful for taking statistics.
|
481
|
+
#
|
482
|
+
def /(x); memberwise(:/, x) end
|
483
|
+
|
484
|
+
#
|
485
|
+
# Returns the contents of this Tms object as
|
486
|
+
# a formatted string, according to a format string
|
487
|
+
# like that passed to Kernel.format. In addition, #format
|
488
|
+
# accepts the following extensions:
|
489
|
+
#
|
490
|
+
# <tt>%u</tt>:: Replaced by the user CPU time, as reported by Tms#utime.
|
491
|
+
# <tt>%y</tt>:: Replaced by the system CPU time, as reported by #stime (Mnemonic: y of "s*y*stem")
|
492
|
+
# <tt>%U</tt>:: Replaced by the children's user CPU time, as reported by Tms#cutime
|
493
|
+
# <tt>%Y</tt>:: Replaced by the children's system CPU time, as reported by Tms#cstime
|
494
|
+
# <tt>%t</tt>:: Replaced by the total CPU time, as reported by Tms#total
|
495
|
+
# <tt>%r</tt>:: Replaced by the elapsed real time, as reported by Tms#real
|
496
|
+
# <tt>%n</tt>:: Replaced by the label string, as reported by Tms#label (Mnemonic: n of "*n*ame")
|
497
|
+
#
|
498
|
+
# If _fmtstr_ is not given, FMTSTR is used as default value, detailing the
|
499
|
+
# user, system and real elapsed time.
|
500
|
+
#
|
501
|
+
def format(arg0 = nil, *args)
|
502
|
+
fmtstr = (arg0 || FMTSTR).dup
|
503
|
+
fmtstr.gsub!(/(%[-+\.\d]*)n/){"#{$1}s" % label}
|
504
|
+
fmtstr.gsub!(/(%[-+\.\d]*)u/){"#{$1}f" % utime}
|
505
|
+
fmtstr.gsub!(/(%[-+\.\d]*)y/){"#{$1}f" % stime}
|
506
|
+
fmtstr.gsub!(/(%[-+\.\d]*)U/){"#{$1}f" % cutime}
|
507
|
+
fmtstr.gsub!(/(%[-+\.\d]*)Y/){"#{$1}f" % cstime}
|
508
|
+
fmtstr.gsub!(/(%[-+\.\d]*)t/){"#{$1}f" % total}
|
509
|
+
fmtstr.gsub!(/(%[-+\.\d]*)r/){"(#{$1}f)" % real}
|
510
|
+
arg0 ? Kernel::format(fmtstr, *args) : fmtstr
|
511
|
+
end
|
512
|
+
|
513
|
+
#
|
514
|
+
# Same as #format.
|
515
|
+
#
|
516
|
+
def to_s
|
517
|
+
format
|
518
|
+
end
|
519
|
+
|
520
|
+
#
|
521
|
+
# Returns a new 6-element array, consisting of the
|
522
|
+
# label, user CPU time, system CPU time, children's
|
523
|
+
# user CPU time, children's system CPU time and elapsed
|
524
|
+
# real time.
|
525
|
+
#
|
526
|
+
def to_a
|
527
|
+
[@label, @utime, @stime, @cutime, @cstime, @real]
|
528
|
+
end
|
529
|
+
|
530
|
+
protected
|
531
|
+
def memberwise(op, x)
|
532
|
+
case x
|
533
|
+
when Benchmark::Tms
|
534
|
+
Benchmark::Tms.new(utime.__send__(op, x.utime),
|
535
|
+
stime.__send__(op, x.stime),
|
536
|
+
cutime.__send__(op, x.cutime),
|
537
|
+
cstime.__send__(op, x.cstime),
|
538
|
+
real.__send__(op, x.real)
|
539
|
+
)
|
540
|
+
else
|
541
|
+
Benchmark::Tms.new(utime.__send__(op, x),
|
542
|
+
stime.__send__(op, x),
|
543
|
+
cutime.__send__(op, x),
|
544
|
+
cstime.__send__(op, x),
|
545
|
+
real.__send__(op, x)
|
546
|
+
)
|
547
|
+
end
|
548
|
+
end
|
549
|
+
end
|
550
|
+
|
551
|
+
# The default caption string (heading above the output times).
|
552
|
+
CAPTION = Benchmark::Tms::CAPTION
|
553
|
+
|
554
|
+
# The default format string used to display times. See also Benchmark::Tms#format.
|
555
|
+
FMTSTR = Benchmark::Tms::FMTSTR
|
556
|
+
end
|
557
|
+
|
558
|
+
if __FILE__ == $0
|
559
|
+
include Benchmark
|
560
|
+
|
561
|
+
n = ARGV[0].to_i.nonzero? || 50000
|
562
|
+
puts %Q([#{n} times iterations of `a = "1"'])
|
563
|
+
benchmark(" " + CAPTION, 7, FMTSTR) do |x|
|
564
|
+
x.report("for:") {for i in 1..n; a = "1"; end} # Benchmark::measure
|
565
|
+
x.report("times:") {n.times do ; a = "1"; end}
|
566
|
+
x.report("upto:") {1.upto(n) do ; a = "1"; end}
|
567
|
+
end
|
568
|
+
|
569
|
+
benchmark do
|
570
|
+
[
|
571
|
+
measure{for i in 1..n; a = "1"; end}, # Benchmark::measure
|
572
|
+
measure{n.times do ; a = "1"; end},
|
573
|
+
measure{1.upto(n) do ; a = "1"; end}
|
574
|
+
]
|
575
|
+
end
|
576
|
+
end
|