benchmarker 0.1.0 → 1.0.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -1,694 +1,1666 @@
1
+ # -*- coding: utf-8 -*-
2
+ # frozen_string_literal: true
3
+
1
4
  ###
2
- ### $Release: 0.1.0 $
3
- ### $Copyright: copyright(c) 2010-2011 kuwata-lab.com all rights reserved $
4
- ### $License: Public Domain $
5
+ ### $Release: 1.0.0 $
6
+ ### $Copyright: copyright(c) 2010-2021 kuwata-lab.com all rights reserved $
7
+ ### $License: MIT License $
5
8
  ###
6
9
 
7
- $:.unshift File.class_eval { expand_path(dirname(__FILE__)) }
8
- $:.unshift File.class_eval { expand_path(join(dirname(__FILE__), '../lib')) }
10
+ $LOAD_PATH.unshift File.class_eval { join(dirname(dirname(__FILE__)), 'lib') }
9
11
 
10
12
  require 'oktest'
13
+
14
+ BENCHMARKER_IGNORE_CMDOPTS = true
11
15
  require 'benchmarker'
12
16
 
13
17
 
14
- class Benchmarker_TC
15
- include Oktest::TestCase
18
+ Oktest.scope do
19
+
16
20
 
17
- def test_SELF_new
18
- spec "creates runner object and returns it." do
19
- ret = Benchmarker.new
20
- ok {ret}.is_a?(Benchmarker::RUNNER)
21
+ + topic(Benchmarker) do
22
+
23
+ + topic('.new()') do
24
+ - spec("[!2zh7w] creates new Benchmark object wit options..") do
25
+ bm = Benchmarker.new()
26
+ ok {bm}.is_a?(Benchmarker::Benchmark)
27
+ ok {bm.width} == 30
28
+ ok {bm.loop} == 1
29
+ ok {bm.iter} == 1
30
+ ok {bm.extra} == 0
31
+ ok {bm.inverse} == false
32
+ ok {bm.outfile} == nil
33
+ #
34
+ bm = Benchmarker.new(width: 25, loop: 100, iter: 3, extra: 2, inverse: true, outfile: "tmp.js")
35
+ ok {bm}.is_a?(Benchmarker::Benchmark)
36
+ ok {bm.width} == 25
37
+ ok {bm.loop} == 100
38
+ ok {bm.iter} == 3
39
+ ok {bm.extra} == 2
40
+ ok {bm.inverse} == true
41
+ ok {bm.outfile} == "tmp.js"
42
+ end
43
+ - spec("[!s7y6x] overwrites existing options by command-line options.") do
44
+ kws = {width: 15, loop: 1000, iter: 20, extra: 3, inverse: true, outfile: "tmp2.js"}
45
+ bm = dummy_values(Benchmarker::OPTIONS, **kws) {
46
+ Benchmarker.new()
47
+ }
48
+ ok {bm}.is_a?(Benchmarker::Benchmark)
49
+ ok {bm.width} == 15
50
+ ok {bm.loop} == 1000
51
+ ok {bm.iter} == 20
52
+ ok {bm.extra} == 3
53
+ ok {bm.inverse} == true
54
+ ok {bm.outfile} == "tmp2.js"
55
+ end
21
56
  end
22
- end
23
57
 
24
- def test_SELF_platform
25
- spec "returns platform information." do
26
- s = Benchmarker.platform()
27
- ok {s} =~ /^benchmarker\.rb:\s+release \d+\.\d+\.\d+/
28
- rexp = /^RUBY_VERSION:\s+(.*)/
29
- ok {s} =~ rexp
30
- ok {s =~ rexp and $1} == RUBY_VERSION
31
- rexp = /^RUBY_PATCHLEVEL:\s+(.*)/
32
- ok {s} =~ rexp
33
- ok {s =~ rexp and $1} == RUBY_PATCHLEVEL.to_s
34
- rexp = /^RUBY_PLATFORM:\s+(.*)/
35
- ok {s} =~ rexp
36
- ok {s =~ rexp and $1} == RUBY_PLATFORM
37
- i = 0
38
- s.each_line {|line| i += 1 }
39
- ok {i} == 4
58
+ + topic('.scope()') do
59
+ - spec("[!4f695] creates Benchmark object, define tasks, and run them.") do
60
+ this = self
61
+ sout, serr = capture_sio do
62
+ Benchmarker.scope() do
63
+ this.ok {self}.is_a?(Benchmarker::Scope)
64
+ task "1+1" do
65
+ 1+1
66
+ end
67
+ task "1-1" do
68
+ 1-1
69
+ end
70
+ end
71
+ end
72
+ ok {serr} == ""
73
+ ok {sout} =~ /^## benchmarker: *release \d+.\d+.\d+$/
74
+ ok {sout} =~ /^## Ranking/
75
+ ok {sout} =~ /^1\+1 +0\./
76
+ ok {sout} =~ /^1\-1 +0\./
77
+ ok {sout} =~ /^## Matrix/
78
+ end
40
79
  end
41
- end
42
80
 
43
- end
81
+ end
44
82
 
45
83
 
46
- class Benchmarker::Runner_TC
47
- include Oktest::TestCase
84
+ + topic(Benchmarker::Benchmark) do
48
85
 
49
- def test_initialize
50
- spec "takes :loop, :cycle, and :extra options." do
51
- runner = Benchmarker::RUNNER.new(:loop=>10, :cycle=>20, :extra=>30)
52
- ok {runner.instance_variable_get('@loop')} == 10
53
- ok {runner.instance_variable_get('@cycle')} == 20
54
- ok {runner.instance_variable_get('@extra')} == 30
86
+ fixture :bm do
87
+ Benchmarker::Benchmark.new
55
88
  end
56
- end
57
89
 
58
- def test_task
59
- runner = nil
60
- ret = nil
61
- called = false
62
- sout, serr = dummy_io() do
63
- runner = Benchmarker::RUNNER.new # should be inside dummy_io() block!
64
- ret = runner.task("label1") { called = true }
65
- runner.task("label2", :skip=>"# not installed.") { nil }
66
- end
67
- spec "prints section title if not printed yet." do
68
- ok {sout} =~ /\A\n## {28} user sys total real\n.*\n/
69
- ok {serr} == ""
70
- end
71
- spec "creates task objet and returns it." do
72
- ok {ret}.is_a?(Benchmarker::TASK)
73
- end
74
- spec "saves created task object unless :skip optin is not specified." do
75
- task = ret
76
- ok {runner.tasks} == [task]
77
- end
78
- spec "runs task when :skip option is not specified." do
79
- ok {called} == true
80
- ok {sout} =~ /\A\n.*\nlabel1 0\.\d+ 0\.\d+ 0\.\d+ 0\.\d+\n/
81
- end
82
- spec "skip block and prints message when :skip option is specified." do
83
- ok {sout} =~ /^label2 *\# not installed\.\n/
84
- end
85
- spec "subtracts times of empty task if exists." do
86
- empty_task = runner.empty_task { nil }
87
- empty_task.user = 10.0
88
- empty_task.sys = 5.0
89
- empty_task.total = 15.0
90
- empty_task.real = 20.0
91
- t = runner.task("label2") { x = 1+1 }
92
- ok {t.user }.in_delta?(-10.0, 0.1)
93
- ok {t.sys }.in_delta?(- 5.0, 0.1)
94
- ok {t.total}.in_delta?(-15.0, 0.1)
95
- ok {t.real }.in_delta?(-20.0, 0.1)
90
+ + topic('#initialize()') do
91
+ - spec("[!0mz0f] error when filter string is invalid format.") do
92
+ pr = proc { Benchmarker::Benchmark.new(filter: 'foobar') }
93
+ ok {pr}.raise?(ArgumentError, "foobar: invalid filter.")
94
+ end
95
+ - spec("[!xo7bq] error when filter operator is invalid.") do
96
+ pr = proc { Benchmarker::Benchmark.new(filter: 'task==foobar') }
97
+ ok {pr}.raise?(ArgumentError, "task==foobar: expected operator is '=' or '!='.")
98
+ end
96
99
  end
97
- end
98
100
 
99
- def test_empty_task
100
- runner = task = sout = nil
101
- spec "creates empty task object and returns it." do
102
- sout, serr = dummy_io() do
103
- runner = Benchmarker::RUNNER.new # should be inside dummy_io() block!
104
- task = runner.empty_task { nil }
101
+ + topic('#clear()') do
102
+ - spec("[!phqdn] clears benchmark result and JSON data.") do |bm|
103
+ bm.scope do
104
+ task "foo" do nil end
105
+ task "bar" do nil end
106
+ end
107
+ capture_sio { bm.run() }
108
+ result_foo = bm.instance_eval{@entries[0][1]}
109
+ result_bar = bm.instance_eval{@entries[1][1]}
110
+ ok {result_foo.length} == 1
111
+ ok {result_bar.length} == 1
112
+ #
113
+ bm.clear()
114
+ ok {result_foo.length} == 0
115
+ ok {result_bar.length} == 0
105
116
  end
106
- ok {task}.is_a?(Benchmarker::TASK)
107
- ok {task.label} == "(Empty)"
108
- end
109
- spec "prints section title if not printed yet." do
110
- ok {sout} =~ /^## +user +sys +total +real\n/
111
- end
112
- spec "saves empty task object." do
113
- ok {runner.instance_variable_get('@_empty_task')} == task
114
117
  end
115
- spec "don't add empty task to @tasks." do
116
- ok {runner.tasks} == []
118
+
119
+ + topic('#scope()') do
120
+ - spec("[!wrjy0] creates wrapper object and yields block with it as self.") do |bm|
121
+ this = self
122
+ ret = bm.scope() do |*args|
123
+ this.ok {self} != this
124
+ this.ok {self}.is_a?(Benchmarker::Scope)
125
+ this.ok {self}.respond_to?(:task)
126
+ this.ok {self}.respond_to?(:empty_task)
127
+ end
128
+ end
129
+ - spec("[!6h24d] passes benchmark object as argument of block.") do |bm|
130
+ this = self
131
+ bm.scope() do |*args|
132
+ this.ok {args} == [bm]
133
+ end
134
+ end
135
+ - spec("[!y0uwr] returns self.") do |bm|
136
+ ret = bm.scope() do nil end
137
+ ok {ret}.same?(bm)
138
+ end
117
139
  end
118
- spec "clear @_empty_task." do
119
- # pass
140
+
141
+ + topic('#define_empty_task()') do
142
+ - spec("[!w66xp] creates empty task.") do |bm|
143
+ ok {bm.instance_eval{@empty_task}} == nil
144
+ ret = bm.define_empty_task() do nil end
145
+ ok {bm.instance_eval{@empty_task}} != nil
146
+ ok {bm.instance_eval{@empty_task}}.is_a?(Benchmarker::Task)
147
+ ok {bm.instance_eval{@empty_task}.name} == nil
148
+ ok {ret}.is_a?(Benchmarker::Task)
149
+ ok {ret.name} == nil
150
+ end
151
+ - spec("[!qzr1s] error when called more than once.") do |bm|
152
+ pr = proc { bm.define_empty_task() do nil end }
153
+ ok {pr}.NOT.raise?()
154
+ ok {pr}.raise?(RuntimeError, "cannot define empty task more than once.")
155
+ end
120
156
  end
121
- end
122
157
 
123
- def test_skip_task
124
- runner = nil
125
- sout, serr = dummy_io() do
126
- runner = Benchmarker::RUNNER.new
127
- runner.skip_task("bench1", "-- not installed --")
128
- runner.skip_task("bench2", "** not supported **")
158
+ + topic('#define_task()') do
159
+ - spec("[!re6b8] creates new task.") do |bm|
160
+ ret = bm.define_task("foobar") do nil end
161
+ ok {ret}.is_a?(Benchmarker::Task)
162
+ ok {ret.name} == "foobar"
163
+ end
164
+ - spec("[!r8o0p] can take a tag.") do |bm|
165
+ ret = bm.define_task("balbla", tag: 'curr') do nil end
166
+ ok {ret.tag} == "curr"
167
+ end
129
168
  end
130
- spec "prints headers if they are not printed." do
131
- ok {sout} =~ /^## +user +sys +total +real\n/
169
+
170
+ + topic('#define_hook()') do |bm|
171
+ - spec("[!2u53t] register proc object with symbol key.") do |bm|
172
+ called = false
173
+ bm.define_hook(:hook1) do called = true end
174
+ ok {called} == false
175
+ bm.__send__(:call_hook, :hook1)
176
+ ok {called} == true
177
+ end
132
178
  end
133
- spec "prints task label and message instead of times." do
134
- ok {sout} =~ /^bench1 +\-\- not installed \-\-\n/
135
- ok {sout} =~ /^bench2 +\*\* not supported \*\*\n/
179
+
180
+ + topic('#call_hook()') do |bm|
181
+ - spec("[!0to2s] calls hook with arguments.") do |bm|
182
+ args = nil
183
+ bm.define_hook(:hook2) do |*a| args = a end
184
+ ok {args} == nil
185
+ bm.__send__(:call_hook, :hook2, "abc", tag: "xyz")
186
+ ok {args} == ["abc", {tag: "xyz"}]
187
+ end
136
188
  end
137
- spec "don't change @tasks." do
138
- ok {runner.instance_variable_get('@tasks')} == []
189
+
190
+ + topic('#run()') do
191
+ - spec("[!0fo0l] runs benchmark tasks and reports result.") do |bm|
192
+ foo_called = false; bar_called = false
193
+ bm.define_task("foo") do foo_called = true end
194
+ bm.define_task("bar") do bar_called = true end
195
+ ok {foo_called} == false
196
+ ok {bar_called} == false
197
+ sout, serr = capture_sio do
198
+ bm.run()
199
+ end
200
+ ok {foo_called} == true
201
+ ok {bar_called} == true
202
+ ok {serr} == ""
203
+ ok {sout} =~ /^\#\# benchmarker:/
204
+ ok {sout} =~ /^foo +/
205
+ ok {sout} =~ /^bar +/
206
+ end
207
+ - spec("[!6h26u] runs preriminary round when `warmup: true` provided.") do |bm|
208
+ called = 0
209
+ bm.define_task("foo") do called += 1 end
210
+ sout, serr = capture_sio do
211
+ bm.run(warmup: true)
212
+ end
213
+ ok {called} == 2
214
+ n = 0
215
+ sout.scan(/^## +.*\nfoo +/) { n += 1 }
216
+ ok {n} == 1
217
+ end
218
+ - spec("[!2j4ks] calls 'before_all' hook.") do |bm|
219
+ called = 0
220
+ bm.define_hook(:before_all) do called += 1 end
221
+ ok {called} == 0
222
+ capture_sio { bm.run() }
223
+ ok {called} == 1
224
+ end
225
+ - spec("[!w1rq7] calls 'after_all' hook even if error raised.") do |bm|
226
+ called = 0
227
+ bm.define_hook(:after_all) do called += 1 end
228
+ bm.define_task("foo") do 1/0 end # raises ZeroDivisionError
229
+ ok {called} == 0
230
+ capture_sio do
231
+ pr = proc { bm.run() }
232
+ ok {pr}.raise?(ZeroDivisionError)
233
+ end
234
+ ok {called} == 1
235
+ end
139
236
  end
140
- end
141
237
 
142
- def test__before_all
143
- spec "prints platform information." do
144
- sout, serr = dummy_io() do
145
- runner = Benchmarker::RUNNER.new
146
- runner._before_all()
238
+ + topic('#_ignore_output()') do
239
+ - spec("[!wazs7] ignores output in block argument.") do |bm|
240
+ called = false
241
+ sout, serr = capture_sio do
242
+ puts "aaa"
243
+ bm.__send__(:_ignore_output) do
244
+ puts "bbb"
245
+ called = true
246
+ end
247
+ puts "ccc"
248
+ end
249
+ ok {called} == true
250
+ ok {sout} == "aaa\nccc\n"
251
+ ok {serr} == ""
147
252
  end
148
- ok {sout} == Benchmarker.platform()
149
253
  end
150
- end
151
254
 
152
- def test__after_all
153
- spec "prints statistics of benchmarks." do
154
- tr = tracer()
155
- sout, serr = dummy_io() do
156
- runner = Benchmarker::RUNNER.new
157
- tr.trace_method(runner.stats, :all)
158
- runner.task("label1") { nil }
159
- runner.task("label2") { nil }
160
- runner._after_all()
255
+ + topic('#filter_tasks()') do
256
+ def new_bm(filter)
257
+ bm = Benchmarker::Benchmark.new(filter: filter).scope do
258
+ task "foo" do nil end
259
+ task "bar", tag: 'xx' do nil end
260
+ task "baz", tag: ['xx', 'yy'] do nil end
261
+ end
262
+ bm
263
+ end
264
+ def task_names(bm)
265
+ bm.instance_eval {@entries}.collect {|t,_| t.name}
266
+ end
267
+ - spec("[!f1n1v] filters tasks by task name when filer string is 'task=...'.") do
268
+ bm = new_bm('task=bar')
269
+ ok {task_names(bm)} == ["foo", "bar", "baz"]
270
+ bm.__send__(:filter_tasks)
271
+ ok {task_names(bm)} == ["bar"]
272
+ #
273
+ bm = new_bm('task=ba*')
274
+ ok {task_names(bm)} == ["foo", "bar", "baz"]
275
+ bm.__send__(:filter_tasks)
276
+ ok {task_names(bm)} == ["bar", "baz"]
277
+ #
278
+ bm = new_bm('task=*z')
279
+ ok {task_names(bm)} == ["foo", "bar", "baz"]
280
+ bm.__send__(:filter_tasks)
281
+ ok {task_names(bm)} == ["baz"]
282
+ #
283
+ bm = new_bm('task=*xx*')
284
+ ok {task_names(bm)} == ["foo", "bar", "baz"]
285
+ bm.__send__(:filter_tasks)
286
+ ok {task_names(bm)} == []
287
+ end
288
+ - spec("[!m79cf] filters tasks by tag value when filer string is 'tag=...'.") do
289
+ bm = new_bm('tag=xx')
290
+ bm.__send__(:filter_tasks)
291
+ ok {task_names(bm)} == ["bar", "baz"]
292
+ #
293
+ bm = new_bm('tag=yy')
294
+ bm.__send__(:filter_tasks)
295
+ ok {task_names(bm)} == ["baz"]
296
+ #
297
+ bm = new_bm('tag=*x')
298
+ bm.__send__(:filter_tasks)
299
+ ok {task_names(bm)} == ["bar", "baz"]
300
+ #
301
+ bm = new_bm('tag=zz')
302
+ bm.__send__(:filter_tasks)
303
+ ok {task_names(bm)} == []
304
+ end
305
+ - spec("[!0in0q] supports negative filter by '!=' operator.") do
306
+ bm = new_bm('task!=bar')
307
+ bm.__send__(:filter_tasks)
308
+ ok {task_names(bm)} == ["foo", "baz"]
309
+ #
310
+ bm = new_bm('task!=ba*')
311
+ bm.__send__(:filter_tasks)
312
+ ok {task_names(bm)} == ["foo"]
313
+ #
314
+ bm = new_bm('tag!=xx')
315
+ bm.__send__(:filter_tasks)
316
+ ok {task_names(bm)} == ["foo"]
317
+ #
318
+ bm = new_bm('tag!=yy')
319
+ bm.__send__(:filter_tasks)
320
+ ok {task_names(bm)} == ["foo", "bar"]
321
+ end
322
+ - spec("[!g207d] do nothing when filter string is not provided.") do
323
+ bm = new_bm(nil)
324
+ bm.__send__(:filter_tasks)
325
+ ok {task_names(bm)} == ["foo", "bar", "baz"]
161
326
  end
162
- ok {tr[0].name} == :all
163
- ok {sout} =~ /^## Ranking/
164
- ok {sout} =~ /^## Matrix/
165
327
  end
166
- end
167
328
 
168
- def test__run
169
- spec "when @cycle > 1..." do
170
- runner = sout = serr = block_param = nil
171
- spec "yields block @cycle times when @extra is not specified." do
172
- i = 0
173
- sout, serr = dummy_io() do
174
- runner = Benchmarker::RUNNER.new(:cycle=>2)
175
- runner._run do |r|
176
- i +=1
177
- block_param = r
178
- r.task('taskA') { nil }
179
- r.task('taskB') { nil }
329
+ + topic('#invoke_tasks()') do
330
+ def new_bm(**kwargs)
331
+ called = []
332
+ bm = Benchmarker::Benchmark.new(**kwargs).scope do
333
+ empty_task do called << :empty end
334
+ task "foo" do called << :foo end
335
+ task "bar" do called << :bar end
336
+ task "baz" do called << :baz end
337
+ end
338
+ return bm, called
339
+ end
340
+ class Task2 < Benchmarker::Task
341
+ def invoke(loop=1)
342
+ super
343
+ case @name
344
+ when nil ; a = [0.002, 0.001, 0.003, 0.0031]
345
+ when "foo" ; a = [0.005, 0.003, 0.008, 0.0085]
346
+ when "bar" ; a = [0.007, 0.004, 0.011, 0.0115]
347
+ when "baz" ; a = [0.009, 0.005, 0.014, 0.0145]
348
+ else ; raise "** internal error"
180
349
  end
350
+ Benchmarker::TimeSet.new(*a)
181
351
  end
182
- ok {i} == 2
183
- end
184
- runner2 = sout2 = serr2 = block_param2 = nil
185
- spec "yields block @cycle + 2*@extra times when @extra is specified." do
186
- i = 0
187
- sout2, serr2 = dummy_io() do
188
- runner2 = Benchmarker::RUNNER.new(:cycle=>5, :extra=>1)
189
- runner2._run do |r|
190
- i +=1
191
- block_param2 = r
192
- r.task('taskA') { nil }
193
- r.task('taskB') { nil }
352
+ end
353
+ def with_dummy_task_class()
354
+ Benchmarker.module_eval { remove_const :TASK; const_set :TASK, Task2 }
355
+ yield
356
+ ensure
357
+ Benchmarker.module_eval { remove_const :TASK; const_set :TASK, Benchmarker::Task }
358
+ end
359
+ - spec("[!3hgos] invokes empty task at first if defined.") do
360
+ bm, called = new_bm()
361
+ sout, serr = capture_sio { bm.__send__(:invoke_tasks) }
362
+ ok {called.first} == :empty
363
+ ok {sout} =~ /^## +.*\n\(Empty\) +/
364
+ end
365
+ - spec("[!xf84h] invokes all tasks.") do
366
+ bm, called = new_bm()
367
+ sout, serr = capture_sio { bm.__send__(:invoke_tasks) }
368
+ ok {called} == [:empty, :foo, :bar, :baz]
369
+ ok {sout} =~ /^\(Empty\) +.*\nfoo +.*\nbar +.*\nbaz +.*\n/
370
+ end
371
+ - spec("[!6g36c] invokes task with validator if validator defined.") do
372
+ bm = Benchmarker::Benchmark.new().scope do
373
+ task "foo" do 100 end
374
+ task "bar" do 123 end
375
+ validate do |actual, name|
376
+ actual == 100 or
377
+ raise "task(#{name.inspect}): #{actual.inspect} == 100: failed."
194
378
  end
195
379
  end
196
- ok {i} == 7
197
- end
198
- spec "prints output of cycle into stderr." do
199
- not_ok {sout} =~ /^## \(#1\)/
200
- not_ok {sout} =~ /^## \(#2\)/
201
- ok {serr} =~ /^## \(#1\)/
202
- ok {serr} =~ /^## \(#2\)/
203
- not_ok {sout2} =~ /^## \(#1\)/
204
- not_ok {sout2} =~ /^## \(#2\)/
205
- ok {serr2} =~ /^## \(#1\)/
206
- ok {serr2} =~ /^## \(#2\)/
207
- end
208
- spec "yields block with self as block paramter." do
209
- ok {block_param}.same?(runner)
210
- ok {block_param2}.same?(runner2)
211
- end
212
- spec "reports average of results." do
213
- ok {sout} =~ /^## Average of 2/
214
- ok {sout2} =~ /^## Average of 5 \(=7-2\*1\)/
215
- end
216
- end
217
- spec "when @cycle == 0 or not specified..." do
218
- runner = sout = block_param = nil
219
- spec "yields block only once." do
220
- i = 0
221
- sout, serr = dummy_io() do
222
- runner = Benchmarker::RUNNER.new()
223
- runner._run do |r|
224
- i +=1
225
- block_param = r
226
- r.task('taskA') { nil }
227
- r.task('taskB') { nil }
380
+ pr = proc do
381
+ capture_sio { bm.__send__(:invoke_tasks) }
382
+ end
383
+ ok {pr}.raise?(RuntimeError, "task(\"bar\"): 123 == 100: failed.")
384
+ end
385
+ - spec("[!c8yak] invokes tasks once if 'iter' option not specified.") do
386
+ bm, called = new_bm(iter: nil)
387
+ sout, serr = capture_sio { bm.__send__(:invoke_tasks) }
388
+ ok {called} == [:empty, :foo, :bar, :baz] * 1
389
+ ok {sout} !~ /^## \(#\d\)/
390
+ end
391
+ - spec("[!unond] invokes tasks multiple times if 'iter' option specified.") do
392
+ bm, called = new_bm(iter: 5)
393
+ sout, serr = capture_sio { bm.__send__(:invoke_tasks) }
394
+ ok {called} == [:empty, :foo, :bar, :baz] * 5
395
+ ok {sout} =~ /^## \(#1\)/
396
+ ok {sout} =~ /^## \(#5\)/
397
+ end
398
+ - spec("[!wzvdb] invokes tasks 16 times if 'iter' is 10 and 'extra' is 3.") do
399
+ bm, called = new_bm(iter: 10, extra: 3)
400
+ sout, serr = capture_sio { bm.__send__(:invoke_tasks) }
401
+ ok {called} == [:empty, :foo, :bar, :baz] * 16
402
+ ok {sout} =~ /^## \(#1\)/
403
+ ok {sout} =~ /^## \(#16\)/
404
+ end
405
+ - spec("[!hbass] calls 'before' hook with task name and tag.") do
406
+ called = 0
407
+ argslist = []
408
+ bm = Benchmarker::Benchmark.new().scope do
409
+ before do |*a| called += 1; argslist << a end
410
+ task "foo" do nil end
411
+ task "bar", tag: 'yy' do nil end
412
+ end
413
+ ok {called} == 0
414
+ ok {argslist} == []
415
+ capture_sio { bm.__send__(:invoke_tasks) }
416
+ ok {called} == 2
417
+ ok {argslist} == [["foo", nil], ["bar", 'yy']]
418
+ end
419
+ - spec("[!7960c] calls 'after' hook with task name and tag even if error raised.") do
420
+ called = 0
421
+ argslist = []
422
+ bm = Benchmarker::Benchmark.new().scope do
423
+ after do |*a| called += 1; argslist << a end
424
+ task "foo" do nil end
425
+ task "bar", tag: 'yy' do 1/0 end # raises ZeroDivisionError
426
+ task "baz", tag: 'zz' do nil end
427
+ end
428
+ ok {called} == 0
429
+ ok {argslist} == []
430
+ capture_sio do
431
+ pr = proc { bm.__send__(:invoke_tasks) }
432
+ ok {pr}.raise?(ZeroDivisionError)
433
+ end
434
+ ok {called} == 2
435
+ ok {argslist} == [["foo", nil], ["bar", 'yy']]
436
+ end
437
+ - spec("[!fv4cv] skips task invocation if skip reason is specified.") do
438
+ called = []
439
+ bm = Benchmarker::Benchmark.new().scope do
440
+ empty_task do called << :empty end
441
+ task "foo" do called << :foo end
442
+ task "bar", skip: "not installed" do called << :bar end
443
+ task "baz" do called << :baz end
444
+ end
445
+ sout, serr = capture_sio { bm.__send__(:invoke_tasks) }
446
+ ok {called} == [:empty, :foo, :baz] # :bar is not included
447
+ end
448
+ - spec("[!dyofw] prints reason if 'skip:' option specified.") do
449
+ bm = Benchmarker::Benchmark.new().scope do
450
+ task "foo" do nil end
451
+ task "bar", skip: "not installed" do nil end
452
+ task "baz" do nil end
453
+ end
454
+ sout, serr = capture_sio { bm.__send__(:invoke_tasks) }
455
+ ok {sout} =~ /^bar +\# Skipped \(reason: not installed\)$/
456
+ end
457
+ - spec("[!ygpx0] records reason of skip into JSON data.") do
458
+ with_dummy_task_class do
459
+ bm = Benchmarker::Benchmark.new().scope do
460
+ task "foo" do nil end
461
+ task "bar", skip: "not installed" do nil end
462
+ task "baz" do nil end
228
463
  end
464
+ sout, serr = capture_sio { bm.__send__(:invoke_tasks) }
465
+ ok {bm.instance_eval{@jdata}} == {
466
+ :Results => [
467
+ [
468
+ ["foo", 0.005, 0.003, 0.008, 0.0085],
469
+ ["bar", nil, nil, nil, nil, "not installed"],
470
+ ["baz", 0.009, 0.005, 0.014, 0.0145],
471
+ ]
472
+ ]
473
+ }
229
474
  end
230
- ok {i} == 1
231
- ok {sout} =~ /^## *user/
232
475
  end
233
- spec "yields block with self as block paramter." do
234
- ok {block_param}.same?(runner)
476
+ - spec("[!513ok] subtract timeset of empty loop from timeset of each task.") do
477
+ with_dummy_task_class do
478
+ bm, called = new_bm()
479
+ sout, serr = capture_sio { bm.__send__(:invoke_tasks) }
480
+ sout =~ /^foo +0\.0030 0\.0020 0\.0050 0\.0054$/
481
+ sout =~ /^bar +0\.0050 0\.0030 0\.0080 0\.0084$/
482
+ sout =~ /^baz +0\.0070 0\.0040 0\.0110 0\.0114$/
483
+ end
484
+ end
485
+ - spec("[!yg9i7] prints result unless quiet mode.") do
486
+ with_dummy_task_class do
487
+ bm, _ = new_bm()
488
+ sout, serr = capture_sio { bm.__send__(:invoke_tasks) }
489
+ ok {sout} =~ /^## .* +user sys total real$/
490
+ ok {sout} =~ /^\(Empty\) +\d+\.\d+ +\d+\.\d+ +\d+\.\d+ +\d+\.\d+$/
491
+ ok {sout} =~ /^foo +\d+\.\d+ +\d+\.\d+ +\d+\.\d+ +\d+\.\d+$/
492
+ ok {sout} =~ /^bar +\d+\.\d+ +\d+\.\d+ +\d+\.\d+ +\d+\.\d+$/
493
+ ok {sout} =~ /^baz +\d+\.\d+ +\d+\.\d+ +\d+\.\d+ +\d+\.\d+$/
494
+ end
495
+ end
496
+ - spec("[!94916] suppresses result if quiet mode.") do
497
+ with_dummy_task_class do
498
+ bm, _ = new_bm(quiet: true, iter: 2)
499
+ sout, serr = capture_sio { bm.__send__(:invoke_tasks) }
500
+ ok {sout} !~ /^## .* +user sys total real$/
501
+ ok {sout} !~ /^\(Empty\) +\d+\.\d+ +\d+\.\d+ +\d+\.\d+ +\d+\.\d+$/
502
+ ok {sout} !~ /^foo +\d+\.\d+ +\d+\.\d+ +\d+\.\d+ +\d+\.\d+$/
503
+ ok {sout} !~ /^bar +\d+\.\d+ +\d+\.\d+ +\d+\.\d+ +\d+\.\d+$/
504
+ ok {sout} !~ /^baz +\d+\.\d+ +\d+\.\d+ +\d+\.\d+ +\d+\.\d+$/
505
+ end
506
+ end
507
+ - spec("[!5axhl] prints result even on quiet mode if no 'iter' nor 'extra'.") do
508
+ with_dummy_task_class do
509
+ bm, _ = new_bm(quiet: true)
510
+ sout, serr = capture_sio { bm.__send__(:invoke_tasks) }
511
+ ok {sout} =~ /^## .* +user sys total real$/
512
+ ok {sout} =~ /^\(Empty\) +\d+\.\d+ +\d+\.\d+ +\d+\.\d+ +\d+\.\d+$/
513
+ ok {sout} =~ /^foo +\d+\.\d+ +\d+\.\d+ +\d+\.\d+ +\d+\.\d+$/
514
+ ok {sout} =~ /^bar +\d+\.\d+ +\d+\.\d+ +\d+\.\d+ +\d+\.\d+$/
515
+ ok {sout} =~ /^baz +\d+\.\d+ +\d+\.\d+ +\d+\.\d+ +\d+\.\d+$/
516
+ end
517
+ end
518
+ - spec("[!knjls] records result of empty loop into JSON data.") do
519
+ with_dummy_task_class do
520
+ bm, _ = new_bm()
521
+ capture_sio { bm.__send__(:invoke_tasks) }
522
+ jdata = bm.instance_variable_get(:@jdata)
523
+ ok {jdata}.key?(:Results)
524
+ ok {jdata[:Results][0][0]} == ["(Empty)", 0.002, 0.001, 0.003, 0.0031]
525
+ end
526
+ end
527
+ - spec("[!ejxif] records result of each task into JSON data.") do
528
+ with_dummy_task_class do
529
+ bm, _ = new_bm()
530
+ capture_sio { bm.__send__(:invoke_tasks) }
531
+ jdata = bm.instance_variable_get(:@jdata)
532
+ ok {jdata}.key?(:Results)
533
+ ok {jdata[:Results]} == [
534
+ [
535
+ ["(Empty)", 0.002, 0.001, 0.003, 0.0031],
536
+ ["foo" , 0.003, 0.002, 0.005, 0.0054],
537
+ ["bar" , 0.005, 0.003, 0.008, 0.0084],
538
+ ["baz" , 0.007, 0.004, 0.011, 0.0114],
539
+ ],
540
+ ]
541
+ #
542
+ bm, _ = new_bm(iter: 3)
543
+ capture_sio { bm.__send__(:invoke_tasks) }
544
+ jdata = bm.instance_variable_get(:@jdata)
545
+ ok {jdata}.key?(:Results)
546
+ result = [
547
+ ["(Empty)", 0.002, 0.001, 0.003, 0.0031],
548
+ ["foo" , 0.003, 0.002, 0.005, 0.0054],
549
+ ["bar" , 0.005, 0.003, 0.008, 0.0084],
550
+ ["baz" , 0.007, 0.004, 0.011, 0.0114],
551
+ ]
552
+ ok {jdata[:Results]} == [result, result, result]
553
+ end
554
+ end
555
+ - spec("[!vbhvz] sleeps N seconds after each task if `sleep` option specified.") do
556
+ new_bm = proc {|kwargs|
557
+ Benchmarker::Benchmark.new(**kwargs).scope do
558
+ empty_task do nil end
559
+ task "foo" do nil end
560
+ task "bar" do nil end
561
+ end
562
+ }
563
+ #
564
+ bm = new_bm.call({})
565
+ start = Time.now
566
+ capture_sio { bm.__send__(:invoke_tasks) }
567
+ ok {Time.now - start} < 0.1
568
+ #
569
+ bm = new_bm.call({sleep: 1})
570
+ start = Time.now
571
+ capture_sio { bm.__send__(:invoke_tasks) }
572
+ ok {Time.now - start} > 3.0
235
573
  end
236
574
  end
237
- end
238
575
 
239
- def test__calc_average
240
- sos = proc do |label, user, sys, total, real|
241
- t = Benchmarker::TASK.new(label)
242
- t.user, t.sys, t.total, t.real = user, sys, total, real
243
- t
244
- end
245
- all_tasks = []
246
- all_tasks << [
247
- sos.call("Haruhi", 11.1, 0.2, 11.3, 11.3),
248
- sos.call("Mikuru", 14.1, 0.2, 14.3, 14.1),
249
- sos.call("Yuki", 10.1, 0.2, 10.3, 10.4),
250
- sos.call("Itsuki", 12.1, 0.2, 12.3, 12.1),
251
- sos.call("Kyon", 13.1, 0.2, 13.3, 13.5),
252
- ]
253
- all_tasks << [
254
- sos.call("Haruhi", 11.1, 0.2, 11.3, 11.9),
255
- sos.call("Mikuru", 14.1, 0.2, 14.3, 14.2),
256
- sos.call("Yuki", 10.1, 0.2, 10.3, 10.6),
257
- sos.call("Itsuki", 12.1, 0.2, 12.3, 12.5),
258
- sos.call("Kyon", 13.1, 0.2, 13.3, 13.3),
259
- ]
260
- all_tasks << [
261
- sos.call("Haruhi", 11.1, 0.2, 11.3, 11.5),
262
- sos.call("Mikuru", 14.1, 0.2, 14.3, 14.8),
263
- sos.call("Yuki", 10.1, 0.2, 10.3, 10.9),
264
- sos.call("Itsuki", 12.1, 0.2, 12.3, 12.7),
265
- sos.call("Kyon", 13.1, 0.2, 13.3, 13.9),
266
- ]
267
- all_tasks << [
268
- sos.call("Haruhi", 11.1, 0.2, 11.3, 11.3),
269
- sos.call("Mikuru", 14.1, 0.2, 14.3, 14.2),
270
- sos.call("Yuki", 10.1, 0.2, 10.3, 10.3),
271
- sos.call("Itsuki", 12.1, 0.2, 12.3, 12.8),
272
- sos.call("Kyon", 13.1, 0.2, 13.3, 13.4),
273
- ]
274
- all_tasks << [
275
- sos.call("Haruhi", 11.1, 0.2, 11.3, 11.6),
276
- sos.call("Mikuru", 14.1, 0.2, 14.3, 14.2),
277
- sos.call("Yuki", 10.1, 0.2, 10.3, 10.6),
278
- sos.call("Itsuki", 12.1, 0.2, 12.3, 12.4),
279
- sos.call("Kyon", 13.1, 0.2, 13.3, 13.3),
280
- ]
281
- all_tasks << [
282
- sos.call("Haruhi", 11.1, 0.2, 11.3, 11.3),
283
- sos.call("Mikuru", 14.1, 0.2, 14.3, 14.8),
284
- sos.call("Yuki", 10.1, 0.2, 10.3, 10.3),
285
- sos.call("Itsuki", 12.1, 0.2, 12.3, 12.2),
286
- sos.call("Kyon", 13.1, 0.2, 13.3, 13.7),
287
- ]
288
- #
289
- expected = <<'END'
290
-
291
- ## Remove Min & Max min cycle max cycle
292
- Haruhi 11.3000 (#1) 11.9000 (#2)
293
- 11.3000 (#6) 11.6000 (#5)
294
- Mikuru 14.1000 (#1) 14.8000 (#6)
295
- 14.2000 (#2) 14.8000 (#3)
296
- Yuki 10.3000 (#6) 10.9000 (#3)
297
- 10.3000 (#4) 10.6000 (#5)
298
- Itsuki 12.1000 (#1) 12.8000 (#4)
299
- 12.2000 (#6) 12.7000 (#3)
300
- Kyon 13.3000 (#5) 13.9000 (#3)
301
- 13.3000 (#2) 13.7000 (#6)
302
-
303
- ## Average of 2 user sys total real
304
- Haruhi 11.1000 0.2000 11.3000 11.4000
305
- Mikuru 14.1000 0.2000 14.3000 14.2000
306
- Yuki 10.1000 0.2000 10.3000 10.5000
307
- Itsuki 12.1000 0.2000 12.3000 12.4500
308
- Kyon 13.1000 0.2000 13.3000 13.4500
309
- END
310
- #
311
- spec "calculates average times of tasks." do
312
- avg_tasks = nil
313
- sout, serr = dummy_io() do
314
- runner = Benchmarker::RUNNER.new(:cycle=>2)
315
- avg_tasks = runner.__send__(:_calc_averages, all_tasks, 2)
316
- runner.__send__(:_report_average_section, avg_tasks)
576
+ + topic('#ignore_skipped_tasks()') do
577
+ def task_names(bm)
578
+ bm.instance_eval {@entries}.collect {|t,_| t.name}
579
+ end
580
+ - spec("[!5gpo7] removes skipped tasks and leaves other tasks.") do
581
+ bm = Benchmarker::Benchmark.new().scope do
582
+ empty_task do nil end
583
+ task "foo", skip: "not installed" do nil end
584
+ task "bar", skip: "not installed" do nil end
585
+ task "baz" do nil end
586
+ end
587
+ capture_sio { bm.__send__(:invoke_tasks) }
588
+ ok {task_names(bm)} == ["foo", "bar", "baz"]
589
+ bm.__send__(:ignore_skipped_tasks)
590
+ ok {task_names(bm)} == ["baz"]
317
591
  end
318
- ok {sout} == expected
319
592
  end
320
- end
321
593
 
322
- def test__get_average_section_title
323
- spec "returns 'Average of N (=x-2*y)' string if label width is enough wide." do
324
- runner = Benchmarker::RUNNER.new(:width=>24, :cycle=>5, :extra=>1)
325
- title = runner.__send__(:_get_average_section_title)
326
- ok {title} == "Average of 5 (=7-2*1)"
594
+ + topic('#report_environment()') do
595
+ - spec("[!rx7nn] prints ruby version, platform, several options, and so on.") do
596
+ bm = Benchmarker::Benchmark.new(title: "string concat", loop: 1000, inverse: true)
597
+ sout, serr = capture_sio { bm.__send__(:report_environment) }
598
+ ok {serr} == ""
599
+ ok {sout} =~ /^## title: +string concat$/
600
+ ok {sout} =~ /^## options: +loop=1000, iter=1, extra=0, inverse=true$/
601
+ ok {sout} =~ /^## benchmarker: +release \d+\.\d+\.\d+$/
602
+ ok {sout} =~ /^## ruby engine: +\w+ \(engine version .*\)$/
603
+ ok {sout} =~ /^## ruby platform: +.+$/
604
+ ok {sout} =~ /^## ruby path: +.+$/
605
+ ok {sout} =~ /^## compiler: +.*$/
606
+ ok {sout} =~ /^## cpu model: +.+$/
607
+ end
327
608
  end
328
- spec "returns 'Average of N' string if label width is not enough wide." do
329
- runner = Benchmarker::RUNNER.new(:width=>23, :cycle=>5, :extra=>1)
330
- title = runner.__send__(:_get_average_section_title)
331
- ok {title} == "Average of 5"
609
+
610
+ fixture :bm5 do
611
+ bm = Benchmarker::Benchmark.new(iter: 5, extra: 2).scope do
612
+ task "foo" do nil end
613
+ task "bar" do nil end
614
+ end
615
+ entries = bm.instance_eval{@entries}
616
+ #ok {entries[0][1]}.is_a?(Benchmarker::Result)
617
+ #ok {entries[1][1]}.is_a?(Benchmarker::Result)
618
+ #
619
+ entries[0][1].add(Benchmarker::TimeSet.new(1.1, 2.1, 3.1, 4.3))
620
+ entries[0][1].add(Benchmarker::TimeSet.new(1.2, 2.2, 3.2, 4.1))
621
+ entries[0][1].add(Benchmarker::TimeSet.new(1.3, 2.3, 3.3, 4.4))
622
+ entries[0][1].add(Benchmarker::TimeSet.new(1.4, 2.4, 3.4, 4.5))
623
+ entries[0][1].add(Benchmarker::TimeSet.new(1.5, 2.5, 3.5, 4.9))
624
+ entries[0][1].add(Benchmarker::TimeSet.new(1.6, 2.6, 3.6, 4.2))
625
+ entries[0][1].add(Benchmarker::TimeSet.new(1.7, 2.7, 3.7, 4.6))
626
+ entries[0][1].add(Benchmarker::TimeSet.new(1.8, 2.8, 3.8, 4.8))
627
+ entries[0][1].add(Benchmarker::TimeSet.new(1.9, 2.9, 3.9, 4.7))
628
+ #
629
+ entries[1][1].add(Benchmarker::TimeSet.new(1.1, 2.1, 3.1, 4.3))
630
+ entries[1][1].add(Benchmarker::TimeSet.new(1.2, 2.2, 3.2, 4.1))
631
+ entries[1][1].add(Benchmarker::TimeSet.new(1.3, 2.3, 3.3, 4.4))
632
+ entries[1][1].add(Benchmarker::TimeSet.new(1.4, 2.4, 3.4, 4.5))
633
+ entries[1][1].add(Benchmarker::TimeSet.new(1.5, 2.5, 3.5, 4.9))
634
+ entries[1][1].add(Benchmarker::TimeSet.new(1.6, 2.6, 3.6, 4.2))
635
+ entries[1][1].add(Benchmarker::TimeSet.new(1.7, 2.7, 3.7, 4.6))
636
+ entries[1][1].add(Benchmarker::TimeSet.new(1.8, 2.8, 3.8, 4.8))
637
+ entries[1][1].add(Benchmarker::TimeSet.new(1.9, 2.9, 3.9, 4.7))
638
+ #
639
+ bm
332
640
  end
333
- end
334
641
 
335
- end
642
+ + topic('#_removed_minmax()') do
643
+ - spec("[!uxe7e] removes best and worst results if 'extra' option specified.") do |bm5|
644
+ bm5.__send__(:_remove_minmax)
645
+ arr = bm5.instance_eval{@entries}.collect {|task, r|
646
+ real_list = []
647
+ r.each {|t| real_list << t.real }
648
+ [task.name, real_list]
649
+ }
650
+ ok {arr} == [
651
+ ["foo", [4.30, 4.40, 4.50, 4.60, 4.70]],
652
+ ["bar", [4.30, 4.40, 4.50, 4.60, 4.70]],
653
+ ]
654
+ end
655
+ - spec("[!is6ll] returns removed min and max data.") do |bm5|
656
+ rows = bm5.__send__(:_remove_minmax)
657
+ ok {rows} == [
658
+ ["foo", 4.10, "(#2)", 4.90, "(#5)"],
659
+ [nil , 4.20, "(#6)", 4.80, "(#8)"],
660
+ ["bar", 4.10, "(#2)", 4.90, "(#5)"],
661
+ [nil , 4.20, "(#6)", 4.80, "(#8)"],
662
+ ]
663
+ end
664
+ - spec("[!xwddz] sets removed best and worst results into JSON data.") do |bm5|
665
+ bm5.__send__(:_remove_minmax)
666
+ ok {bm5.instance_eval{@jdata}} == {
667
+ :RemovedMinMax => [
668
+ ["foo", 4.10, "(#2)", 4.90, "(#5)"],
669
+ [nil , 4.20, "(#6)", 4.80, "(#8)"],
670
+ ["bar", 4.10, "(#2)", 4.90, "(#5)"],
671
+ [nil , 4.20, "(#6)", 4.80, "(#8)"],
672
+ ]
673
+ }
674
+ end
675
+ end
336
676
 
677
+ + topic('#_render_minmax()') do
678
+ - spec("[!p71ax] returns rendered string.") do |bm5|
679
+ rows = bm5.__send__(:_remove_minmax)
680
+ str = bm5.__send__(:_render_minmax, rows)
681
+ ok {str} == <<"END"
337
682
 
338
- class Benchmarker::Task_TC
339
- include Oktest::TestCase
340
-
341
- def before
342
- @task1 = Benchmarker::TASK.new("label1")
343
- @task1.user = 1.5
344
- @task1.sys = 0.5
345
- @task1.total = 2.0
346
- @task1.real = 2.25
347
- @task2 = Benchmarker::TASK.new("label1")
348
- @task2.user = 1.125
349
- @task2.sys = 0.25
350
- @task2.total = 1.375
351
- @task2.real = 1.5
352
- end
683
+ ## Removed Min & Max min iter max iter
684
+ foo \e[0;36m 4.1000\e[0m \e[0;35m (#2)\e[0m \e[0;36m 4.9000\e[0m \e[0;35m (#5)\e[0m
685
+ \e[0;36m 4.2000\e[0m \e[0;35m (#6)\e[0m \e[0;36m 4.8000\e[0m \e[0;35m (#8)\e[0m
686
+ bar \e[0;36m 4.1000\e[0m \e[0;35m (#2)\e[0m \e[0;36m 4.9000\e[0m \e[0;35m (#5)\e[0m
687
+ \e[0;36m 4.2000\e[0m \e[0;35m (#6)\e[0m \e[0;36m 4.8000\e[0m \e[0;35m (#8)\e[0m
688
+ END
689
+ end
690
+ end
353
691
 
354
- def test_initialize
355
- t = nil
356
- spec "takes label and loop." do
357
- t = Benchmarker::TASK.new("label1", 123)
358
- ok {t.label} == "label1"
359
- ok {t.loop} == 123
692
+ + topic('#_calc_average()') do
693
+ - spec("[!qu29s] calculates average of real times for each task.") do |bm5|
694
+ rows = bm5.__send__(:_calc_average)
695
+ ok {rows} == [
696
+ ["foo", 1.50, 2.50, 3.50, 4.50],
697
+ ["bar", 1.50, 2.50, 3.50, 4.50],
698
+ ]
699
+ end
700
+ - spec("[!jxf28] sets average results into JSON data.") do |bm5|
701
+ bm5.__send__(:_calc_average)
702
+ ok {bm5.instance_eval{@jdata}} == {
703
+ :Average => [
704
+ ["foo", 1.50, 2.50, 3.50, 4.50],
705
+ ["bar", 1.50, 2.50, 3.50, 4.50],
706
+ ]
707
+ }
708
+ end
360
709
  end
361
- spec "sets all times to zero." do
362
- ok {t.user} == 0.0
363
- ok {t.sys} == 0.0
364
- ok {t.total} == 0.0
365
- ok {t.real} == 0.0
710
+
711
+ + topic('#_render_average()') do
712
+ - spec("[!j9wlv] returns rendered string.") do |bm5|
713
+ rows = bm5.__send__(:_calc_average)
714
+ str = bm5.__send__(:_render_average, rows)
715
+ ok {str} == <<"END"
716
+
717
+ ## Average of 5 (=9-2*2) user sys total real
718
+ foo 1.5000 2.5000 3.5000 \e[0;36m 4.5000\e[0m
719
+ bar 1.5000 2.5000 3.5000 \e[0;36m 4.5000\e[0m
720
+ END
721
+ end
366
722
  end
367
- end
368
723
 
369
- def test_run
370
- spec "yields block for @loop times." do
371
- task = Benchmarker::TASK.new("label2")
372
- i = 0
373
- task.run { i += 1 }
374
- ok {i} == i
375
- task.loop = 3
376
- i = 0
377
- task.run { i += 1 }
378
- ok {i} == 3
379
- end
380
- spec "measures times." do
381
- task = Benchmarker::TASK.new("label2")
382
- task.user = task.sys = task.total = task.real = -1.0
383
- task.run { nil }
384
- delta = 0.001
385
- ok {task.user }.in_delta?(0.0, delta)
386
- ok {task.sys }.in_delta?(0.0, delta)
387
- ok {task.total}.in_delta?(0.0, delta)
388
- ok {task.real }.in_delta?(0.0, delta)
724
+ + topic('#report_stats()') do
725
+ - spec("[!0jn7d] sorts results by real sec.") do
726
+ bm = Benchmarker::Benchmark.new().scope do
727
+ task "foo" do nil end
728
+ task "bar" do nil end
729
+ task "baz" do nil end
730
+ end
731
+ entries = bm.instance_eval{@entries}
732
+ entries[0][1].add(Benchmarker::TimeSet.new(1.1, 2.1, 3.2, 4.3))
733
+ entries[1][1].add(Benchmarker::TimeSet.new(1.1, 2.1, 3.2, 3.3))
734
+ entries[2][1].add(Benchmarker::TimeSet.new(1.1, 2.1, 3.2, 5.3))
735
+ #
736
+ sout, serr = capture_sio { bm.__send__(:report_stats) }
737
+ ok {sout} == <<'END'
738
+
739
+ ## Ranking real
740
+ bar 3.3000 (100.0%) ********************
741
+ foo 4.3000 ( 76.7%) ***************
742
+ baz 5.3000 ( 62.3%) ************
743
+
744
+ ## Matrix real [1] [2] [3]
745
+ [1] bar 3.3000 100.0% 130.3% 160.6%
746
+ [2] foo 4.3000 76.7% 100.0% 123.3%
747
+ [3] baz 5.3000 62.3% 81.1% 100.0%
748
+ END
749
+ end
389
750
  end
390
- end
391
751
 
392
- def test_add
393
- spec "returns self." do
394
- ok {@task1.add(@task2)}.same?(@task1)
752
+ fixture :pairs do
753
+ [
754
+ ["foo", 1.11],
755
+ ["bar", 2.22],
756
+ ["baz", 3.33],
757
+ ]
395
758
  end
396
- spec "adds other's times into self." do
397
- ok {@task1.user } == 2.625
398
- ok {@task1.sys } == 0.75
399
- ok {@task1.total} == 3.375
400
- ok {@task1.real } == 3.75
759
+
760
+ + topic('#_render_ranking()') do
761
+ - spec("[!2lu55] calculates ranking data and sets it into JSON data.") do |bm, pairs|
762
+ bm.__send__(:_render_ranking, pairs)
763
+ ok {bm.instance_eval{@jdata}} == {
764
+ :Ranking => [
765
+ ["foo", 1.11, "100.0%", "0.90 times/sec", "********************"],
766
+ ["bar", 2.22, "50.0%", "0.45 times/sec", "**********" ],
767
+ ["baz", 3.33, "33.3%", "0.30 times/sec", "*******" ],
768
+ ]
769
+ }
770
+ end
771
+ - spec("[!55x8r] returns rendered string of ranking.") do |bm, pairs|
772
+ str = bm.__send__(:_render_ranking, pairs)
773
+ ok {str} == <<"END"
774
+
775
+ ## Ranking real
776
+ foo \e[0;36m 1.1100\e[0m (100.0%) ********************
777
+ bar \e[0;36m 2.2200\e[0m ( 50.0%) **********
778
+ baz \e[0;36m 3.3300\e[0m ( 33.3%) *******
779
+ END
780
+ end
401
781
  end
402
- end
403
782
 
404
- def test_sub
405
- spec "returns self." do
406
- ok {@task1.sub(@task2)}.same?(@task1)
783
+ + topic('#_render_matrix()') do
784
+ - spec("[!2lu55] calculates ranking data and sets it into JSON data.") do |bm, pairs|
785
+ bm.__send__(:_render_matrix, pairs)
786
+ ok {bm.instance_eval{@jdata}} == {
787
+ :Matrix => [
788
+ ["[1] foo", 1.11, "100.0%", "200.0%", "300.0%"],
789
+ ["[2] bar", 2.22, "50.0%", "100.0%", "150.0%"],
790
+ ["[3] baz", 3.33, "33.3%", "66.7%", "100.0%"],
791
+ ]
792
+ }
793
+ end
794
+ - spec("[!rwfxu] returns rendered string of matrix.") do |bm, pairs|
795
+ str = bm.__send__(:_render_matrix, pairs)
796
+ ok {str} == <<"END"
797
+
798
+ ## Matrix real [1] [2] [3]
799
+ [1] foo \e[0;36m 1.1100\e[0m 100.0% 200.0% 300.0%
800
+ [2] bar \e[0;36m 2.2200\e[0m 50.0% 100.0% 150.0%
801
+ [3] baz \e[0;36m 3.3300\e[0m 33.3% 66.7% 100.0%
802
+ END
803
+ end
407
804
  end
408
- spec "substracts other's times from self." do
409
- ok {@task1.user } == 0.375
410
- ok {@task1.sys } == 0.25
411
- ok {@task1.total} == 0.625
412
- ok {@task1.real } == 0.75
805
+
806
+ + topic('#write_outfile()') do
807
+ - spec("[!o8ah6] writes result data into JSON file if 'outfile' option specified.") do
808
+ tmpfile = "tmp#{rand().to_s[2..6]}.json"
809
+ at_end { File.unlink tmpfile if File.exist?(tmpfile) }
810
+ jdata = {
811
+ :Ranking => [
812
+ ["foo", 1.11, "100.0%", "0.90 times/sec", "********************"],
813
+ ["bar", 2.22, "50.0%", "0.45 times/sec", "**********" ],
814
+ ["baz", 3.33, "33.3%", "0.30 times/sec", "*******" ],
815
+ ],
816
+ :Matrix => [
817
+ ["[1] foo", 1.11, "100.0%", "200.0%", "300.0%"],
818
+ ["[2] bar", 2.22, "50.0%", "100.0%", "150.0%"],
819
+ ["[3] baz", 3.33, "33.3%", "66.7%", "100.0%"],
820
+ ],
821
+ }
822
+ #
823
+ bm1 = Benchmarker::Benchmark.new()
824
+ bm1.instance_eval { @jdata = jdata }
825
+ bm1.__send__(:write_outfile)
826
+ ok {tmpfile}.NOT.file_exist?
827
+ #
828
+ bm2 = Benchmarker::Benchmark.new(outfile: tmpfile)
829
+ bm2.instance_eval { @jdata = jdata }
830
+ bm2.__send__(:write_outfile)
831
+ ok {tmpfile}.file_exist?
832
+ actual = JSON.load(File.read(tmpfile))
833
+ ok {actual} == {"Ranking"=>jdata[:Ranking], "Matrix"=>jdata[:Matrix]}
834
+ end
413
835
  end
836
+
837
+ + topic('#colorize?') do
838
+ - spec("[!cy10n] returns true if '-c' option specified.") do
839
+ bm = Benchmarker.new(colorize: true)
840
+ ok {bm.__send__(:colorize?)} == true
841
+ capture_sio do
842
+ ok {bm.__send__(:colorize?)} == true
843
+ end
844
+ end
845
+ - spec("[!e0gcz] returns false if '-C' option specified.") do
846
+ bm = Benchmarker.new(colorize: false)
847
+ ok {bm.__send__(:colorize?)} == false
848
+ capture_sio do
849
+ ok {bm.__send__(:colorize?)} == false
850
+ end
851
+ end
852
+ - spec("[!6v90d] returns result of `Color.colorize?` if neither '-c' nor '-C' specified.") do
853
+ bm = Benchmarker.new()
854
+ ok {bm.__send__(:colorize?)} == true
855
+ capture_sio do
856
+ ok {bm.__send__(:colorize?)} == false
857
+ end
858
+ end
859
+ end
860
+
414
861
  end
415
862
 
416
- def test_mul
417
- spec "returns self." do
418
- ok {@task1.mul(2)}.same?(@task1)
863
+
864
+ + topic(Benchmarker::Scope) do
865
+
866
+ fixture :bm do
867
+ Benchmarker::Benchmark.new()
419
868
  end
420
- spec "multiplies times with n." do
421
- ok {@task1.user } == 3.0
422
- ok {@task1.sys } == 1.0
423
- ok {@task1.total} == 4.0
424
- ok {@task1.real } == 4.5
869
+
870
+ fixture :scope do |bm|
871
+ Benchmarker::Scope.new(bm)
425
872
  end
426
- end
427
873
 
428
- def test_div
429
- spec "returns self." do
430
- ok {@task1.div(2)}.same?(@task1)
874
+ + topic('#task()') do
875
+ - spec("[!j6pmr] creates new task object.") do |scope|
876
+ task = scope.task "name1", tag: "abc" do end
877
+ ok {task}.is_a?(Benchmarker::Task)
878
+ ok {task.name} == "name1"
879
+ ok {task.tag} == "abc"
880
+ end
881
+ - spec("[!kh7r9] define empty-loop task if name is nil.") do |scope|
882
+ task = scope.task nil do end
883
+ ok {task}.is_a?(Benchmarker::Task)
884
+ ok {task.name} == nil
885
+ end
886
+ + case_when("[!843ju] when code argument provided...") do
887
+ - spec("[!bwfak] code argument and block argument are exclusive.") do |scope|
888
+ pr = proc { scope.task "foo", "x = 1+1" do nil end }
889
+ ok {pr}.raise?(Benchmarker::TaskError, "task(\"foo\"): cannot accept String argument when block argument given.")
890
+ end
891
+ - spec("[!4dm9q] generates block argument if code argument passed.") do |scope|
892
+ x = 0
893
+ task = scope.task "foo", "x += 1", binding()
894
+ ok {task.instance_eval{@block}}.is_a?(Proc)
895
+ task.instance_eval{@block}.call()
896
+ ok {x} == 100
897
+ end
898
+ end
431
899
  end
432
- spec "divides times by n." do
433
- ok {@task1.user } == 0.75
434
- ok {@task1.sys } == 0.25
435
- ok {@task1.total} == 1.0
436
- ok {@task1.real } == 1.125
900
+
901
+ + topic('#empty_task()') do
902
+ - spec("[!ycoch] creates new empty-loop task object.") do |scope|
903
+ task = scope.empty_task do end
904
+ ok {task}.is_a?(Benchmarker::Task)
905
+ ok {task.name} == nil
906
+ end
437
907
  end
438
- end
439
908
 
440
- def test_SELF_average
441
- klass = Benchmarker::TASK
442
- spec "returns empty task when argument is empty." do
443
- t = klass.average([])
444
- ok {t.label} == nil
445
- ok {t.user} == 0.0
446
- end
447
- spec "create new task with label." do
448
- t = klass.average([@task1, @task2])
449
- ok {t.label} == @task1.label
450
- not_ok {t.label}.same?(@task1)
451
- end
452
- spec "returns averaged task." do
453
- t = klass.average([@task1, @task2, @task1, @task2])
454
- ok {t.user } == (@task1.user + @task2.user ) / 2
455
- ok {t.sys } == (@task1.sys + @task2.sys ) / 2
456
- ok {t.total} == (@task1.total + @task2.total) / 2
457
- ok {t.real } == (@task1.real + @task2.real ) / 2
909
+ + topic('#before()') do
910
+ - spec("[!2ir4q] defines 'before' hook.") do |scope, bm|
911
+ called = false
912
+ scope.before do called = true end
913
+ ok {called} == false
914
+ bm.__send__(:call_hook, :before)
915
+ ok {called} == true
916
+ end
458
917
  end
459
- end
460
918
 
461
- end
919
+ + topic('#after()') do
920
+ - spec("[!05up6] defines 'after' hook.") do |scope, bm|
921
+ called = false
922
+ scope.after do called = true end
923
+ ok {called} == false
924
+ bm.__send__(:call_hook, :after)
925
+ ok {called} == true
926
+ end
927
+ end
462
928
 
929
+ + topic('#before_all()') do
930
+ - spec("[!1oier] defines 'before_all' hook.") do |scope, bm|
931
+ called = false
932
+ scope.before_all do called = true end
933
+ ok {called} == false
934
+ bm.__send__(:call_hook, :before_all)
935
+ ok {called} == true
936
+ end
937
+ end
463
938
 
464
- class Benchmarker::Reporter_TC
465
- include Oktest::TestCase
939
+ + topic('#after_all()') do
940
+ - spec("[!z7xop] defines 'after_all' hook.") do |scope, bm|
941
+ called = false
942
+ scope.after_all do called = true end
943
+ ok {called} == false
944
+ bm.__send__(:call_hook, :after_all)
945
+ ok {called} == true
946
+ end
947
+ end
466
948
 
467
- def before
468
- @buf = ""
469
- @r = Benchmarker::Reporter.new(:out=>@buf)
470
- end
949
+ + topic('#validate()') do
950
+ - spec("[!q2aev] defines validator.") do
951
+ bm = Benchmarker::Benchmark.new()
952
+ scope = Benchmarker::Scope.new(bm)
953
+ ok {bm.instance_eval{@hooks[:validate]}} == nil
954
+ scope.validate do |ret| end
955
+ ok {bm.instance_eval{@hooks[:validate]}} != nil
956
+ ok {bm.instance_eval{@hooks[:validate]}}.is_a?(Proc)
957
+ end
958
+ end
471
959
 
472
- def test_initialize
473
- spec "takes :out, :err, :width, and :format options." do
474
- r = Benchmarker::Reporter.new(:out=>$stderr, :err=>$stdout, :width=>123, :format=>"%10.1f")
475
- ok {r.out}.same?($stderr)
476
- ok {r.err}.same?($stdout)
477
- ok {r.label_width} == 123
478
- ok {r.format_time} == "%10.1f"
960
+ + topic('#assert()') do
961
+ - spec("[!a0c7e] do nothing if assertion succeeded.") do |scope|
962
+ capture_sio do
963
+ pr = proc { scope.assert 1+1 == 2, "1+1 is 2" }
964
+ ok {pr}.NOT.raise?
965
+ end
966
+ end
967
+ - spec("[!5vmbc] raises error if assertion failed.") do |scope|
968
+ capture_sio do
969
+ pr = proc { scope.assert 1+1 == 1, "1+1 is not 1" }
970
+ ok {pr}.raise?(Benchmarker::ValidationFailed, "1+1 is not 1")
971
+ end
972
+ end
973
+ - spec("[!7vt5l] puts newline if assertion failed.") do |scope|
974
+ sout, serr = capture_sio do
975
+ pr = proc { scope.assert true, "" }
976
+ ok {pr}.NOT.raise?(Benchmarker::ValidationFailed)
977
+ end
978
+ ok {sout} == ""
979
+ #
980
+ sout, serr = capture_sio do
981
+ pr = proc { scope.assert false, "" }
982
+ ok {pr}.raise?(Benchmarker::ValidationFailed)
983
+ end
984
+ ok {sout} == "\n"
985
+ end
986
+ - spec("[!mhw59] makes error backtrace compact.") do |scope|
987
+ capture_sio do
988
+ pr = proc { scope.assert false, "" }
989
+ ok {pr}.raise?(Benchmarker::ValidationFailed) do |exc|
990
+ ok {exc.backtrace}.all? {|x| x !~ /benchmarker\.rb/ }
991
+ end
992
+ end
993
+ end
479
994
  end
480
- end
481
995
 
482
- def test__switch_out_to_err
483
- spec "switches @out to @err temporarily." do
484
- sout, serr = dummy_io() do
485
- r = Benchmarker::Reporter.new()
486
- r.write("Haruhi\n")
487
- r._switch_out_to_err() do
488
- r.write("Sasaki\n")
996
+ + topic('#assert_eq()') do
997
+ - spec("[!8m6bh] do nothing if ectual == expected.") do |scope|
998
+ capture_sio do
999
+ pr = proc { scope.assert_eq 1+1, 2 }
1000
+ ok {pr}.NOT.raise?
1001
+ end
1002
+ end
1003
+ - spec("[!f9ey6] raises error unless actual == expected.") do |scope|
1004
+ capture_sio do
1005
+ pr = proc { scope.assert_eq 'a'*3, 'aa' }
1006
+ ok {pr}.raise?(Benchmarker::ValidationFailed, '"aaa" == "aa": failed.')
489
1007
  end
490
- r.write("Kyon\n")
491
1008
  end
492
- ok {sout} == "Haruhi\nKyon\n"
493
- ok {serr} == "Sasaki\n"
494
1009
  end
1010
+
495
1011
  end
496
1012
 
497
- def test_label_width=()
498
- spec "sets @label_width." do
499
- @r.label_width = 123
500
- ok {@r.label_width} == 123
501
- end
502
- spec "sets @format_label, too." do
503
- ok {@r.instance_variable_get('@format_label')} == "%-123s"
1013
+
1014
+ + topic(Benchmarker::Task) do
1015
+
1016
+ + topic('#invoke()') do
1017
+ + case_when("[!s2f6v] when task block is build from repeated code...") do
1018
+ - spec("[!i2r8o] error when number of loop is less than 100.") do
1019
+ capture_sio do
1020
+ pr = proc do
1021
+ Benchmarker.scope(loop: 100) do
1022
+ task "foo", "x = 1+1"
1023
+ end
1024
+ end
1025
+ ok {pr}.NOT.raise?
1026
+ #
1027
+ pr = proc do
1028
+ Benchmarker.scope(loop: 99) do
1029
+ task "foo", "x = 1+1"
1030
+ end
1031
+ end
1032
+ ok {pr}.raise?(Benchmarker::TaskError, 'task("foo"): number of loop (=99) should be >= 100, but not.')
1033
+ end
1034
+ end
1035
+ - spec("[!kzno6] error when number of loop is not a multiple of 100.") do
1036
+ capture_sio do
1037
+ pr = proc do
1038
+ Benchmarker.scope(loop: 200) do
1039
+ task "foo", "x = 1+1"
1040
+ end
1041
+ end
1042
+ ok {pr}.NOT.raise?
1043
+ #
1044
+ pr = proc do
1045
+ Benchmarker.scope(loop: 250) do
1046
+ task "foo", "x = 1+1"
1047
+ end
1048
+ end
1049
+ ok {pr}.raise?(Benchmarker::TaskError, 'task("foo"): number of loop (=250) should be a multiple of 100, but not.')
1050
+ end
1051
+ end
1052
+ - spec("[!gbukv] changes number of loop to 1/100.") do
1053
+ capture_sio do
1054
+ called = 0
1055
+ Benchmarker.scope(loop: 200) do
1056
+ task "foo", "called +=1", binding()
1057
+ end
1058
+ ok {called} == 200
1059
+ end
1060
+ end
1061
+ end
1062
+ - spec("[!frq25] kicks GC before calling task block.") do
1063
+ capture_sio do
1064
+ rec = recorder()
1065
+ rec.record_method(GC, :start)
1066
+ called = false
1067
+ Benchmarker.scope() do
1068
+ task "foo" do called = true end
1069
+ end
1070
+ ok {called} == true
1071
+ ok {rec[0].obj} == GC
1072
+ ok {rec[0].name} == :start
1073
+ end
1074
+ end
1075
+ - spec("[!tgql6] invokes block N times.") do
1076
+ cnt = 0
1077
+ task = Benchmarker::Task.new("name1") do cnt += 1 end
1078
+ task.invoke(3)
1079
+ ok {cnt} == 3
1080
+ end
1081
+ - spec("[!9e5pr] returns TimeSet object.") do
1082
+ task = Benchmarker::Task.new("name1") do nil end
1083
+ ret = task.invoke()
1084
+ ok {ret}.is_a?(Benchmarker::TimeSet)
1085
+ end
1086
+ - spec("[!zw4kt] yields validator with result value of block.") do
1087
+ task = Benchmarker::Task.new("name1", tag: "curr") do 234 end
1088
+ args = nil
1089
+ task.invoke() do |*a| args = a end
1090
+ ok {args} == [234, "name1", "curr"]
1091
+ end
504
1092
  end
1093
+
505
1094
  end
506
1095
 
507
- def test_format_time=()
508
- spec "sets @format_time." do
509
- @r.format_time = "%10.2f"
510
- ok {@r.format_time} == "%10.2f"
1096
+
1097
+ + topic(Benchmarker::TimeSet) do
1098
+
1099
+ + topic('#-()') do
1100
+ - spec("[!cpwgf] returns new TimeSet object.") do
1101
+ t1 = Benchmarker::TimeSet.new(2.0, 3.0, 4.0, 5.0)
1102
+ t2 = Benchmarker::TimeSet.new(2.5, 3.5, 5.0, 5.25)
1103
+ t3 = t2 - t1
1104
+ ok {t3} != t1
1105
+ ok {t3} != t2
1106
+ ok {t3.user} == 0.5
1107
+ ok {t3.sys} == 0.5
1108
+ ok {t3.total} == 1.0
1109
+ ok {t3.real} == 0.25
1110
+ end
511
1111
  end
512
- spec "sets @format_header, too." do
513
- ok {@r.instance_variable_get('@format_header')} == "%10s"
1112
+
1113
+ + topic('#div()') do
1114
+ - spec("[!4o9ns] returns new TimeSet object which values are divided by n.") do
1115
+ t1 = Benchmarker::TimeSet.new(2.5, 3.5, 5.0, 5.25)
1116
+ t2 = t1.div(100)
1117
+ ok {t2.user } == 0.025
1118
+ ok {t2.sys } == 0.035
1119
+ ok {t2.total} == 0.050
1120
+ ok {t2.real } == 0.0525
1121
+ end
514
1122
  end
1123
+
515
1124
  end
516
1125
 
517
- def test_write
518
- spec "writes arguments to @out with '<<' operator." do
519
- @r.write("Haruhi", nil, 32)
520
- ok {@buf} == "Haruhi32"
1126
+
1127
+ + topic(Benchmarker::Result) do
1128
+
1129
+ fixture :r do
1130
+ Benchmarker::Result.new
521
1131
  end
522
- spec "saves the last argument." do
523
- ok {@r.instance_variable_get('@_prev')} == 32
1132
+
1133
+ + topic('#add()') do
1134
+ - spec("[!thyms] adds timeset and returns self.") do |r|
1135
+ t = Benchmarker::TimeSet.new(1.0, 2.0, 3.0, 4.0)
1136
+ r.add(t)
1137
+ ok {r[0]} == t
1138
+ end
524
1139
  end
525
- spec "returns self." do
526
- ok {@r.write()}.same?(@r)
1140
+
1141
+ + topic('#clear()') do
1142
+ - spec("[!fxrn6] clears timeset array.") do |r|
1143
+ ok {r.length} == 0
1144
+ r.add(Benchmarker::TimeSet.new(1.0, 2.0, 3.0, 4.0))
1145
+ r.add(Benchmarker::TimeSet.new(0.0, 0.0, 0.0, 0.0))
1146
+ ok {r.length} == 2
1147
+ r.clear()
1148
+ ok {r.length} == 0
1149
+ end
527
1150
  end
528
- end
529
1151
 
530
- def test_report_section_title
531
- ret = @r.report_section_title("SOS")
532
- spec "prints newline at first." do
533
- ok {@buf} =~ /\A\n/
1152
+ + topic('#skipped?') do
1153
+ - spec("[!bvzk9] returns true if reason has set, or returns false.") do |r|
1154
+ ok {r.skipped?} == false
1155
+ r.skipped = "why skipped"
1156
+ ok {r.skipped?} == true
1157
+ end
534
1158
  end
535
- spec "prints section title with @format_label." do
536
- ok {@buf} =~ /\A\n## SOS {24}/
1159
+
1160
+ + topic('#remove_minmax()') do
1161
+ - spec("[!b55zh] removes best and worst timeset and returns them.") do |r|
1162
+ klass = Benchmarker::TimeSet
1163
+ arr = [
1164
+ klass.new(0.1, 0.1, 0.1, 0.3),
1165
+ klass.new(0.1, 0.1, 0.1, 0.1),
1166
+ klass.new(0.1, 0.1, 0.1, 0.4),
1167
+ klass.new(0.1, 0.1, 0.1, 0.5),
1168
+ klass.new(0.1, 0.1, 0.1, 0.9),
1169
+ klass.new(0.1, 0.1, 0.1, 0.2),
1170
+ klass.new(0.1, 0.1, 0.1, 0.6),
1171
+ klass.new(0.1, 0.1, 0.1, 0.8),
1172
+ klass.new(0.1, 0.1, 0.1, 0.7),
1173
+ ]
1174
+ #
1175
+ r1 = Benchmarker::Result.new
1176
+ arr.each {|t| r1.add(t) }
1177
+ removed = r1.remove_minmax(1)
1178
+ ok {removed} == [
1179
+ [0.1, 2, 0.9, 5],
1180
+ ]
1181
+ vals = []; r1.each {|t| vals << t.real }
1182
+ ok {vals} == [0.3, 0.4, 0.5, 0.2, 0.6, 0.8, 0.7]
1183
+ #
1184
+ r2 = Benchmarker::Result.new
1185
+ arr.each {|t| r2.add(t) }
1186
+ removed = r2.remove_minmax(2)
1187
+ ok {removed} == [
1188
+ [0.1, 2, 0.9, 5],
1189
+ [0.2, 6, 0.8, 8],
1190
+ ]
1191
+ vals = []; r2.each {|t| vals << t.real }
1192
+ ok {vals} == [0.3, 0.4, 0.5, 0.6, 0.7]
1193
+ end
537
1194
  end
538
- spec "returns self." do
539
- ok {ret}.same?(@r)
1195
+
1196
+ + topic('#calc_average()') do
1197
+ - spec("[!b91w3] returns average of timeddata.") do |r|
1198
+ klass = Benchmarker::TimeSet
1199
+ arr = [
1200
+ klass.new(0.1, 0.1, 0.3, 0.3),
1201
+ klass.new(0.2, 0.1, 0.3, 0.1),
1202
+ klass.new(0.3, 0.1, 0.3, 0.4),
1203
+ klass.new(0.4, 0.1, 0.3, 0.5),
1204
+ klass.new(0.5, 0.1, 0.3, 0.9),
1205
+ klass.new(0.6, 0.1, 0.3, 0.2),
1206
+ klass.new(0.7, 0.1, 0.3, 0.6),
1207
+ klass.new(0.8, 0.1, 0.3, 0.8),
1208
+ klass.new(0.9, 0.1, 0.3, 0.7),
1209
+ ]
1210
+ arr.each {|t| r.add(t) }
1211
+ t = r.calc_average()
1212
+ ok {t}.is_a?(klass)
1213
+ ok {t.user }.in_delta?(0.5, 0.000000001)
1214
+ ok {t.sys }.in_delta?(0.1, 0.000000001)
1215
+ ok {t.total}.in_delta?(0.3, 0.000000001)
1216
+ ok {t.real }.in_delta?(0.5, 0.000000001)
1217
+ end
1218
+
540
1219
  end
1220
+
541
1221
  end
542
1222
 
543
- def test_report_section_headers
544
- args = ["user", "sys", "total", "real"]
545
- ret = @r.report_section_headers(*args)
546
- spec "prints headers." do
547
- ok {@buf} == " user sys total real\n"
548
- end
549
- spec "prints newline at end." do
550
- ok {@buf} =~ /\n\z/
551
- end
552
- spec "returns self." do
553
- ok {ret}.same?(@r)
1223
+
1224
+ + topic(Benchmarker::Color) do
1225
+
1226
+ + topic('.colorize?()') do
1227
+ - spec("[!fc741] returns true if stdout is a tty, else returns false.") do
1228
+ ok {Benchmarker::Color.colorize?} == true
1229
+ capture_sio do
1230
+ ok {Benchmarker::Color.colorize?} == false
1231
+ end
1232
+ end
554
1233
  end
1234
+
555
1235
  end
556
1236
 
557
- def test_report_section_header
558
- ret = @r.report_section_header("Haruhi")
559
- spec "prints header with @format_header." do
560
- ok {@buf} == " Haruhi"
561
- @buf[0..-1] = ""
562
- @r.format_time = "%5.2f"
563
- @r.report_section_header("SOS")
564
- ok {@buf} == " SOS"
565
- end
566
- spec "returns self." do
567
- ok {ret}.same?(@r)
1237
+
1238
+ + topic(Benchmarker::Misc) do
1239
+
1240
+ + topic('.environment_info()') do
1241
+ - spec("[!w1xfa] returns environment info in key-value list.") do
1242
+ arr = Benchmarker::Misc.environment_info()
1243
+ ok {arr}.is_a?(Array)
1244
+ ok {arr[0][0]} == "benchmarker"
1245
+ ok {arr[1][0]} == "ruby engine"
1246
+ ok {arr[2][0]} == "ruby version"
1247
+ ok {arr[3][0]} == "ruby platform"
1248
+ ok {arr[4][0]} == "ruby path"
1249
+ ok {arr[5][0]} == "compiler"
1250
+ ok {arr[6][0]} == "os name"
1251
+ ok {arr[7][0]} == "cpu model"
1252
+ ok {arr[8]} == nil
1253
+ end
568
1254
  end
569
- end
570
1255
 
571
- def test_report_task_label
572
- ret = @r.report_task_label("Sasaki")
573
- spec "prints task label with @format_label." do
574
- ok {@buf} == "Sasaki "
575
- @buf[0..-1] = ""
576
- @r.instance_variable_set('@format_label', "%-12s")
577
- @r.report_task_label("Sasakisan")
578
- ok {@buf} == "Sasakisan "
1256
+ + topic('.os_name()') do
1257
+ - spec("[!83vww] returns string representing os name.") do
1258
+ str = Benchmarker::Misc.os_name()
1259
+ ok {str}.is_a?(String)
1260
+ end
579
1261
  end
580
- spec "returns self." do
581
- ok {ret}.same?(@r)
1262
+
1263
+ + topic('.cpu_model()') do
1264
+ - spec("[!6ncgq] returns string representing cpu model.") do
1265
+ str = Benchmarker::Misc.cpu_model()
1266
+ ok {str}.is_a?(String)
1267
+ end
582
1268
  end
1269
+
583
1270
  end
584
1271
 
585
- def test_report_task_times
586
- ret = @r.report_task_times(1.1, 1.2, 1.3, 1.4)
587
- spec "prints task times with @format_time." do
588
- ok {@buf} == " 1.1000 1.2000 1.3000 1.4000\n"
1272
+
1273
+ + topic(Benchmarker::OptionParser) do
1274
+
1275
+ fixture :p do
1276
+ Benchmarker::OptionParser.new("hvq", "wnixoF", "I")
589
1277
  end
590
- spec "returns self." do
591
- ok {ret}.same?(@r)
1278
+
1279
+ + topic('#parse()') do
1280
+ - spec("[!2gq7g] returns options and keyvals.")do |p|
1281
+ argv = ['-hqn100', '-i', '5', '-I', '--help', '--foo=bar']
1282
+ options, keyvals = p.parse(argv)
1283
+ ok {options} == {'h'=>true, 'q'=>true, 'n'=>'100', 'i'=>'5', 'I'=>true}
1284
+ ok {keyvals} == {'help'=>true, 'foo'=>'bar'}
1285
+ ok {argv} == []
1286
+ end
1287
+ - spec("[!ulfpu] stops parsing when '--' found.") do |p|
1288
+ argv = ['-h', '--', '-i', '5']
1289
+ options, keyvals = p.parse(argv)
1290
+ ok {options} == {'h'=>true}
1291
+ ok {keyvals} == {}
1292
+ ok {argv} == ['-i', '5']
1293
+ end
1294
+ - spec("[!8f085] regards '--long=option' as key-value.") do |p|
1295
+ argv = ['--foo=bar', '--baz']
1296
+ options, keyvals = p.parse(argv)
1297
+ ok {options} == {}
1298
+ ok {keyvals} == {'foo'=>'bar', 'baz'=>true}
1299
+ ok {argv} == []
1300
+ end
1301
+ - spec("[!dkq1u] parses short options.") do |p|
1302
+ argv = ['-h', '-qn100', '-vi', '10', '-x', '2']
1303
+ options, keyvals = p.parse(argv)
1304
+ ok {options} == {'h'=>true, 'q'=>true, 'n'=>'100', 'v'=>true, 'i'=>'10', 'x'=>'2'}
1305
+ ok {keyvals} == {}
1306
+ ok {argv} == []
1307
+ end
1308
+ - spec("[!8xqla] error when required argument is not provided.") do |p|
1309
+ argv = ['-qn']
1310
+ a_ = nil
1311
+ p.parse(argv) do |*a| a_ = a end
1312
+ ok {a_} == ["-n: argument required."]
1313
+ end
1314
+ - spec("[!tmx6o] error when option is unknown.") do |p|
1315
+ argv = ['-hz']
1316
+ a_ = nil
1317
+ p.parse(argv) do |*a| a_ = a end
1318
+ ok {a_} == ["-z: unknown option."]
1319
+ end
592
1320
  end
593
- end
594
1321
 
595
- def test_report_task_time
596
- ret = @r.report_task_time(12.3)
597
- spec "prints task time with @format_time." do
598
- ok {@buf} == " 12.3000"
1322
+ + topic('.parse_options()') do
1323
+ - spec("[!v19y5] converts option argument into integer if necessary.") do
1324
+ argv = ['-h', '-n100', '-vi', '10', '-x', '2', '-I5000']
1325
+ options, keyvals = Benchmarker::OptionParser.parse_options(argv)
1326
+ ok {options} == {"h"=>true, "n"=>100, "v"=>true, "i"=>10, "x"=>2, "I"=>5000}
1327
+ ok {keyvals} == {}
1328
+ end
1329
+ - spec("[!frfz2] yields error message when argument of '-n/i/x/I' is not an integer.") do
1330
+ err = nil
1331
+ Benchmarker::OptionParser.parse_options(['-nxx']) do |s| err = s end
1332
+ ok {err} == "-n xx: integer expected."
1333
+ #
1334
+ err = nil
1335
+ Benchmarker::OptionParser.parse_options(['-iyy']) do |s| err = s end
1336
+ ok {err} == "-i yy: integer expected."
1337
+ #
1338
+ err = nil
1339
+ Benchmarker::OptionParser.parse_options(['-xzz']) do |s| err = s end
1340
+ ok {err} == "-x zz: integer expected."
1341
+ #
1342
+ err = nil
1343
+ Benchmarker::OptionParser.parse_options(['-Izz']) do |s| err = s end
1344
+ ok {err} == "-Izz: integer expected."
1345
+ end
1346
+ - spec("[!nz15w] convers '-s' option value into number (integer or float).") do
1347
+ options, _ = Benchmarker::OptionParser.parse_options(['-s', '123'])
1348
+ ok {options} == {"s"=>123}
1349
+ options, _ = Benchmarker::OptionParser.parse_options(['-s', '0.5'])
1350
+ ok {options} == {"s"=>0.5}
1351
+ end
1352
+ - spec("[!3x1m7] yields error message when argument of '-s' is not a number.") do
1353
+ err = nil
1354
+ Benchmarker::OptionParser.parse_options(['-s', 'aa']) do |s| err = s end
1355
+ ok {err} == "-s aa: number expected."
1356
+ end
1357
+ - spec("[!emavm] yields error message when argumetn of '-F' option is invalid.") do
1358
+ err = nil
1359
+ Benchmarker::OptionParser.parse_options(['-F', 'xyz']) do |s| err = s end
1360
+ ok {err} == "-F xyz: invalid filter (expected operator is '=' or '!=')."
1361
+ #
1362
+ err = nil
1363
+ Benchmarker::OptionParser.parse_options(['-F', 'name=xyz']) do |s| err = s end
1364
+ ok {err} == "-F name=xyz: expected 'task=...' or 'tag=...'."
1365
+ end
599
1366
  end
600
- spec "returns self." do
601
- ok {ret}.same?(@r)
1367
+
1368
+ + topic('.help_message()') do
1369
+ - spec("[!jnm2w] returns help message.") do
1370
+ msg = Benchmarker::OptionParser.help_message("bench.rb")
1371
+ ok {msg} == <<'END'
1372
+ Usage: bench.rb [<options>]
1373
+ -h, --help : help message
1374
+ -v : print Benchmarker version
1375
+ -w <N> : width of task name (default: 30)
1376
+ -n <N> : loop N times in each benchmark (default: 1)
1377
+ -i <N> : iterates all benchmark tasks N times (default: 1)
1378
+ -x <N> : ignore worst N results and best N results (default: 0)
1379
+ -I[<N>] : print inverse number (= N/sec) (default: same as '-n')
1380
+ -o <file> : output file in JSON format
1381
+ -q : quiet a little (suppress output of each iteration)
1382
+ -c : enable colorized output
1383
+ -C : disable colorized output
1384
+ -s <N> : sleep N seconds after each benchmark task
1385
+ -S : print sample code
1386
+ -F task=<...> : filter benchmark task by name (operator: '=' or '!=')
1387
+ -F tag=<...> : filter benchmark task by tag (operator: '=' or '!=')
1388
+ --<key>[=<val>]: define global variable `$opt_<key> = "<val>"`
1389
+ END
1390
+ end
602
1391
  end
1392
+
603
1393
  end
604
1394
 
605
- end
606
1395
 
1396
+ + topic(Benchmarker) do
607
1397
 
608
- class Benchmarker::Stats_TC
609
- include Oktest::TestCase
610
-
611
- def before
612
- @out = ""
613
- @r = Benchmarker::Reporter.new(:out=>@out)
614
- @stats = Benchmarker::Stats.new(@r)
615
- #
616
- @tasks = []
617
- sos = proc do |label, user, sys, total, real|
618
- t = Benchmarker::TASK.new(label)
619
- t.user, t.sys, t.total, t.real = user, sys, total, real
620
- @tasks << t
621
- end
622
- sos.call("Haruhi", 11.1, 0.2, 11.3, 11.5)
623
- sos.call("Mikuru", 14.1, 0.2, 14.3, 14.5)
624
- sos.call("Yuki", 10.1, 0.2, 10.3, 10.5)
625
- sos.call("Itsuki", 12.1, 0.2, 12.3, 12.5)
626
- sos.call("Kyon", 13.1, 0.2, 13.3, 13.5)
627
- end
1398
+ after do
1399
+ Benchmarker::OPTIONS.clear()
1400
+ end
628
1401
 
629
- def test_initialize
630
- r = Benchmarker::Reporter.new
631
- stats = Benchmarker::Stats.new(r)
632
- spec "takes reporter object." do
633
- ok {stats.instance_variable_get('@report')} == r
1402
+ + topic('.parse_cmdopts()') do
1403
+ - spec("[!348ip] parses command-line options.") do
1404
+ ok {Benchmarker::OPTIONS} == {}
1405
+ argv = "-q -n 1000 -i10 -x2 -I -o tmp.json".split()
1406
+ options, keyvals = Benchmarker.parse_cmdopts(argv)
1407
+ ok {options} == {'q'=>true, 'n'=>1000, 'i'=>10, 'x'=>2, 'I'=>true, 'o'=>"tmp.json"}
1408
+ ok {keyvals} == {}
1409
+ end
1410
+ - spec("[!snqxo] exits with status code 1 if error in command option.") do
1411
+ argv = ["-n abc"]
1412
+ sout, serr = capture_sio do
1413
+ pr = proc { Benchmarker.parse_cmdopts(argv) }
1414
+ ok {pr}.raise?(SystemExit) do |exc|
1415
+ ok {exc.status} == 1
1416
+ end
1417
+ end
1418
+ ok {serr} == "-n abc: integer expected.\n"
1419
+ ok {sout} == ""
1420
+ end
1421
+ - spec("[!p3b93] prints help message if '-h' or '--help' option specified.") do
1422
+ ["-h", "--help"].each do |arg|
1423
+ sout, serr = capture_sio do
1424
+ pr = proc { Benchmarker.parse_cmdopts([arg]) }
1425
+ ok {pr}.raise?(SystemExit) do |exc|
1426
+ ok {exc.status} == 0
1427
+ end
1428
+ end
1429
+ ok {serr} == ""
1430
+ ok {sout} =~ /^Usage: \S+ \[<options>\]$/
1431
+ ok {sout} =~ /^ -h, --help : help message$/
1432
+ end
1433
+ end
1434
+ - spec("[!iaryj] prints version number if '-v' option specified.") do
1435
+ argv = ["-v"]
1436
+ sout, serr = capture_sio do
1437
+ pr = proc { Benchmarker.parse_cmdopts(argv) }
1438
+ ok {pr}.raise?(SystemExit) do |exc|
1439
+ ok {exc.status} == 0
1440
+ end
1441
+ end
1442
+ ok {serr} == ""
1443
+ ok {sout} == Benchmarker::VERSION + "\n"
1444
+ end
1445
+ - spec("[!nrxsb] prints sample code if '-S' option specified.") do
1446
+ argv = ["-S"]
1447
+ sout, serr = capture_sio do
1448
+ pr = proc { Benchmarker.parse_cmdopts(argv) }
1449
+ ok {pr}.raise?(SystemExit) do |exc|
1450
+ ok {exc.status} == 0
1451
+ end
1452
+ end
1453
+ ok {serr} == ""
1454
+ ok {sout} == Benchmarker::Misc.sample_code()
1455
+ end
1456
+ - spec("[!s7y6x] keeps command-line options in order to overwirte existing options.") do
1457
+ ok {Benchmarker::OPTIONS} == {}
1458
+ argv = "-q -n 1000 -i10 -x2 -I -o tmp.json".split()
1459
+ Benchmarker.parse_cmdopts(argv)
1460
+ ok {Benchmarker::OPTIONS} == {
1461
+ :quiet=>true, :loop=>1000, :iter=>10, :extra=>2,
1462
+ :inverse=>true, :outfile=>"tmp.json",
1463
+ }
1464
+ end
1465
+ - spec("[!nexi8] option '-w' specifies task name width.") do
1466
+ ok {Benchmarker::OPTIONS} == {}
1467
+ Benchmarker.parse_cmdopts(['-w', '10'])
1468
+ ok {Benchmarker::OPTIONS} == {width: 10}
1469
+ sout, serr = capture_sio do
1470
+ Benchmarker.scope(width: 20) do
1471
+ task "foo" do nil end
1472
+ end
1473
+ end
1474
+ ok {sout} =~ /^foo 0.0000/
1475
+ end
1476
+ - spec("[!raki9] option '-n' specifies count of loop.") do
1477
+ ok {Benchmarker::OPTIONS} == {}
1478
+ Benchmarker.parse_cmdopts(['-n', '17'])
1479
+ ok {Benchmarker::OPTIONS} == {loop: 17}
1480
+ count = 0
1481
+ sout, serr = capture_sio do
1482
+ Benchmarker.scope(width: 20) do
1483
+ task "foo" do count += 1 end
1484
+ end
1485
+ end
1486
+ ok {count} == 17
1487
+ end
1488
+ - spec("[!mt7lw] option '-i' specifies number of iteration.") do
1489
+ ok {Benchmarker::OPTIONS} == {}
1490
+ Benchmarker.parse_cmdopts(['-i', '5'])
1491
+ ok {Benchmarker::OPTIONS} == {iter: 5}
1492
+ count = 0
1493
+ sout, serr = capture_sio do
1494
+ Benchmarker.scope(width: 20) do
1495
+ task "foo" do count += 1 end
1496
+ end
1497
+ end
1498
+ ok {sout} =~ /^## \(#5\)/
1499
+ ok {sout} !~ /^## \(#6\)/
1500
+ n = 0
1501
+ sout.scan(/^## \(#\d+\).*\nfoo +/) do n += 1 end
1502
+ ok {n} == 5
1503
+ ok {sout} !~ /^## Removed Min & Max/
1504
+ ok {sout} =~ /^## Average of 5 +user/
1505
+ end
1506
+ - spec("[!7f2k3] option '-x' specifies number of best/worst tasks removed.") do
1507
+ ok {Benchmarker::OPTIONS} == {}
1508
+ Benchmarker.parse_cmdopts(['-i', '5', '-x', '1'])
1509
+ ok {Benchmarker::OPTIONS} == {iter: 5, extra: 1}
1510
+ count = 0
1511
+ sout, serr = capture_sio do
1512
+ Benchmarker.scope(width: 20) do
1513
+ task "foo" do count += 1 end
1514
+ end
1515
+ end
1516
+ ok {sout} =~ /^## \(#7\)/
1517
+ ok {sout} !~ /^## \(#8\)/
1518
+ n = 0
1519
+ sout.scan(/^## \(#\d+\).*\nfoo +/) do n += 1 end
1520
+ ok {n} == 7
1521
+ ok {sout} =~ /^## Removed Min & Max/
1522
+ ok {sout} =~ /^## Average of 5 \(=7-2\*1\)/
1523
+ end
1524
+ - spec("[!r0439] option '-I' specifies inverse switch.") do
1525
+ ok {Benchmarker::OPTIONS} == {}
1526
+ Benchmarker.parse_cmdopts(['-I'])
1527
+ ok {Benchmarker::OPTIONS} == {inverse: true}
1528
+ sout, serr = capture_sio do
1529
+ fib = proc {|n| n <= 1 ? n : fib.call(n-1) + fib.call(n-2) }
1530
+ Benchmarker.scope(width: 20) do
1531
+ task "foo" do fib.call(15) end
1532
+ end
1533
+ end
1534
+ ok {sout} =~ /^## Ranking real times\/sec$/
1535
+ ok {sout} =~ /^foo +\d+\.\d+ \(100\.0%\) +\d+\.\d+$/
1536
+ end
1537
+ - spec("[!4c73x] option '-o' specifies outout JSON file.") do
1538
+ ok {Benchmarker::OPTIONS} == {}
1539
+ outfile = "tmp99.json"
1540
+ Benchmarker.parse_cmdopts(['-o', outfile])
1541
+ ok {Benchmarker::OPTIONS} == {outfile: outfile}
1542
+ at_exit { File.unlink outfile if File.exist?(outfile) }
1543
+ ok {outfile}.not_exist?
1544
+ sout, serr = capture_sio do
1545
+ Benchmarker.scope(width: 20) do
1546
+ task "foo" do nil end
1547
+ end
1548
+ end
1549
+ ok {outfile}.file_exist?
1550
+ end
1551
+ - spec("[!02ml5] option '-q' specifies quiet mode.") do
1552
+ ok {Benchmarker::OPTIONS} == {}
1553
+ Benchmarker.parse_cmdopts(['-q', '-i10', '-x1'])
1554
+ ok {Benchmarker::OPTIONS} == {quiet: true, iter: 10, extra: 1}
1555
+ count = 0
1556
+ sout, serr = capture_sio do
1557
+ Benchmarker.scope(width: 20) do
1558
+ task "foo" do count += 1 end
1559
+ end
1560
+ end
1561
+ ok {count} == 12
1562
+ ok {sout} !~ /^## \(#\d\)/
1563
+ ok {sout} =~ /^## Removed Min & Max/
1564
+ ok {sout} =~ /^## Average of 10 \(=12-2\*1\)/
1565
+ end
1566
+ - spec("[!e5hv0] option '-c' specifies colorize enabled.") do
1567
+ ok {Benchmarker::OPTIONS} == {}
1568
+ Benchmarker.parse_cmdopts(['-c'])
1569
+ ok {Benchmarker::OPTIONS} == {colorize: true}
1570
+ sout, serr = capture_sio(tty: false) do
1571
+ Benchmarker.scope() do
1572
+ task "foo" do nil end
1573
+ end
1574
+ end
1575
+ ok {sout} =~ /\e\[0;36m.*?\e\[0m/
1576
+ end
1577
+ - spec("[!e5hv0] option '-c' specifies colorize enabled.") do
1578
+ ok {Benchmarker::OPTIONS} == {}
1579
+ Benchmarker.parse_cmdopts(['-C'])
1580
+ ok {Benchmarker::OPTIONS} == {colorize: false}
1581
+ sout, serr = capture_sio(tty: true) do
1582
+ Benchmarker.scope() do
1583
+ task "foo" do nil end
1584
+ end
1585
+ end
1586
+ ok {sout} !~ /\e\[0;36m.*?\e\[0m/
1587
+ end
1588
+ - spec("[!muica] option '-F' specifies filter.") do
1589
+ ok {Benchmarker::OPTIONS} == {}
1590
+ Benchmarker.parse_cmdopts(['-F', 'task=ba*'])
1591
+ ok {Benchmarker::OPTIONS} == {filter: 'task=ba*'}
1592
+ called = []
1593
+ sout, serr = capture_sio do
1594
+ Benchmarker.scope(width: 20) do
1595
+ task "foo" do called << "foo" end
1596
+ task "bar", tag: 'curr' do called << "bar" end
1597
+ task "baz" do called << "baz" end
1598
+ end
1599
+ end
1600
+ ok {called} == ["bar", "baz"]
1601
+ #
1602
+ Benchmarker.parse_cmdopts(['-F', 'tag!=curr'])
1603
+ called = []
1604
+ sout, serr = capture_sio do
1605
+ Benchmarker.scope(width: 20) do
1606
+ task "foo" do called << "foo" end
1607
+ task "bar", tag: 'curr' do called << "bar" end
1608
+ task "baz" do called << "baz" end
1609
+ end
1610
+ end
1611
+ ok {called} == ["foo", "baz"]
1612
+ end
1613
+ - spec("[!3khc4] sets global variables if long option specified.") do
1614
+ ok {Benchmarker::OPTIONS} == {}
1615
+ ok {$opt_blabla} == nil
1616
+ Benchmarker.parse_cmdopts(['--blabla=123'])
1617
+ ok {Benchmarker::OPTIONS} == {}
1618
+ ok {$opt_blabla} == "123"
1619
+ Benchmarker.parse_cmdopts(['--blabla'])
1620
+ ok {Benchmarker::OPTIONS} == {}
1621
+ ok {$opt_blabla} == true
1622
+ end
634
1623
  end
635
- #spec "takes :real, :barchar, and :loop options." do
636
- #end
637
1624
  end
638
1625
 
639
- def test_ranking
640
- expected1 = <<'END'
641
1626
 
642
- ## Ranking real
643
- Yuki 10.5000 (100.0%) ********************
644
- Haruhi 11.5000 ( 91.3%) ******************
645
- Itsuki 12.5000 ( 84.0%) *****************
646
- Kyon 13.5000 ( 77.8%) ****************
647
- Mikuru 14.5000 ( 72.4%) **************
648
- END
649
- expected2 = <<'END'
1627
+ + topic(Benchmark) do
650
1628
 
651
- ## Ranking real
652
- Yuki 10.5000 (100.0%) 95238.10 per sec
653
- Haruhi 11.5000 ( 91.3%) 86956.52 per sec
654
- Itsuki 12.5000 ( 84.0%) 80000.00 per sec
655
- Kyon 13.5000 ( 77.8%) 74074.07 per sec
656
- Mikuru 14.5000 ( 72.4%) 68965.52 per sec
657
- END
658
- spec "prints ranking." do
659
- spec "prints barchart if @numerator is not specified." do
660
- @stats.ranking(@tasks)
661
- ok {@out} == expected1
662
- end
663
- spec "prints inverse number if @numerator specified." do
664
- @out = ""
665
- @r = Benchmarker::Reporter.new(:out=>@out)
666
- @stats = Benchmarker::Stats.new(@r, :numerator=>1000*1000)
667
- @stats.ranking(@tasks)
668
- ok {@out} == expected2
1629
+ + topic('.bm()') do
1630
+ - spec("[!2nf07] defines and runs benchmark.") do
1631
+ called = {foo: 0, bar: 0}
1632
+ sout, serr = capture_sio do
1633
+ Benchmark.bm do |x|
1634
+ x.report("foo") do called[:foo] += 1 end
1635
+ x.report("bar") do called[:bar] += 1 end
1636
+ end
1637
+ end
1638
+ ok {called} == {foo: 1, bar: 1}
1639
+ n = 0
1640
+ sout.scan(/^## +.*\nfoo +.*\nbar +.*/) { n+= 1 }
1641
+ ok {n} == 1
1642
+ ok {serr} == ""
669
1643
  end
670
1644
  end
671
- end
672
-
673
- def test_ratio_matrix
674
- expected = <<'END'
675
1645
 
676
- ## Matrix real [01] [02] [03] [04] [05]
677
- [01] Yuki 10.5000 100.0% 109.5% 119.0% 128.6% 138.1%
678
- [02] Haruhi 11.5000 91.3% 100.0% 108.7% 117.4% 126.1%
679
- [03] Itsuki 12.5000 84.0% 92.0% 100.0% 108.0% 116.0%
680
- [04] Kyon 13.5000 77.8% 85.2% 92.6% 100.0% 107.4%
681
- [05] Mikuru 14.5000 72.4% 79.3% 86.2% 93.1% 100.0%
682
- END
683
- spec "prints matrix." do
684
- @stats.ratio_matrix(@tasks)
685
- ok {@out} == expected
1646
+ + topic('.bm()') do
1647
+ - spec("[!ezbb8] defines and runs benchmark twice, reports only 2nd result.") do
1648
+ called = {foo: 0, bar: 0}
1649
+ sout, serr = capture_sio do
1650
+ Benchmark.bmbm do |x|
1651
+ x.report("foo") do called[:foo] += 1 end
1652
+ x.report("bar") do called[:bar] += 1 end
1653
+ end
1654
+ end
1655
+ ok {called} == {foo: 2, bar: 2}
1656
+ n = 0
1657
+ sout.scan(/^## +.*\nfoo +.*\nbar +.*/) { n+= 1 }
1658
+ ok {n} == 1
1659
+ ok {serr} == ""
1660
+ end
686
1661
  end
687
- end
688
1662
 
689
- end
1663
+ end
690
1664
 
691
1665
 
692
- if __FILE__ == $0
693
- Oktest::run_all()
694
1666
  end