minitest 5.12.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- checksums.yaml.gz.sig +1 -0
- data.tar.gz.sig +0 -0
- data/.autotest +34 -0
- data/History.rdoc +1347 -0
- data/Manifest.txt +27 -0
- data/README.rdoc +763 -0
- data/Rakefile +72 -0
- data/design_rationale.rb +52 -0
- data/lib/hoe/minitest.rb +32 -0
- data/lib/minitest.rb +1041 -0
- data/lib/minitest/assertions.rb +726 -0
- data/lib/minitest/autorun.rb +13 -0
- data/lib/minitest/benchmark.rb +455 -0
- data/lib/minitest/expectations.rb +284 -0
- data/lib/minitest/hell.rb +11 -0
- data/lib/minitest/mock.rb +240 -0
- data/lib/minitest/parallel.rb +70 -0
- data/lib/minitest/pride.rb +4 -0
- data/lib/minitest/pride_plugin.rb +142 -0
- data/lib/minitest/spec.rb +335 -0
- data/lib/minitest/test.rb +220 -0
- data/lib/minitest/unit.rb +45 -0
- data/test/minitest/metametameta.rb +105 -0
- data/test/minitest/test_minitest_assertions.rb +1389 -0
- data/test/minitest/test_minitest_benchmark.rb +137 -0
- data/test/minitest/test_minitest_mock.rb +874 -0
- data/test/minitest/test_minitest_reporter.rb +299 -0
- data/test/minitest/test_minitest_spec.rb +1021 -0
- data/test/minitest/test_minitest_test.rb +1055 -0
- metadata +184 -0
- metadata.gz.sig +1 -0
@@ -0,0 +1,455 @@
|
|
1
|
+
require "minitest/test"
|
2
|
+
require "minitest/spec"
|
3
|
+
|
4
|
+
module Minitest
|
5
|
+
##
|
6
|
+
# Subclass Benchmark to create your own benchmark runs. Methods
|
7
|
+
# starting with "bench_" get executed on a per-class.
|
8
|
+
#
|
9
|
+
# See Minitest::Assertions
|
10
|
+
|
11
|
+
class Benchmark < Test
|
12
|
+
def self.io # :nodoc:
|
13
|
+
@io
|
14
|
+
end
|
15
|
+
|
16
|
+
def io # :nodoc:
|
17
|
+
self.class.io
|
18
|
+
end
|
19
|
+
|
20
|
+
def self.run reporter, options = {} # :nodoc:
|
21
|
+
@io = reporter.io
|
22
|
+
super
|
23
|
+
end
|
24
|
+
|
25
|
+
def self.runnable_methods # :nodoc:
|
26
|
+
methods_matching(/^bench_/)
|
27
|
+
end
|
28
|
+
|
29
|
+
##
|
30
|
+
# Returns a set of ranges stepped exponentially from +min+ to
|
31
|
+
# +max+ by powers of +base+. Eg:
|
32
|
+
#
|
33
|
+
# bench_exp(2, 16, 2) # => [2, 4, 8, 16]
|
34
|
+
|
35
|
+
def self.bench_exp min, max, base = 10
|
36
|
+
min = (Math.log10(min) / Math.log10(base)).to_i
|
37
|
+
max = (Math.log10(max) / Math.log10(base)).to_i
|
38
|
+
|
39
|
+
(min..max).map { |m| base ** m }.to_a
|
40
|
+
end
|
41
|
+
|
42
|
+
##
|
43
|
+
# Returns a set of ranges stepped linearly from +min+ to +max+ by
|
44
|
+
# +step+. Eg:
|
45
|
+
#
|
46
|
+
# bench_linear(20, 40, 10) # => [20, 30, 40]
|
47
|
+
|
48
|
+
def self.bench_linear min, max, step = 10
|
49
|
+
(min..max).step(step).to_a
|
50
|
+
rescue LocalJumpError # 1.8.6
|
51
|
+
r = []; (min..max).step(step) { |n| r << n }; r
|
52
|
+
end
|
53
|
+
|
54
|
+
##
|
55
|
+
# Specifies the ranges used for benchmarking for that class.
|
56
|
+
# Defaults to exponential growth from 1 to 10k by powers of 10.
|
57
|
+
# Override if you need different ranges for your benchmarks.
|
58
|
+
#
|
59
|
+
# See also: ::bench_exp and ::bench_linear.
|
60
|
+
|
61
|
+
def self.bench_range
|
62
|
+
bench_exp 1, 10_000
|
63
|
+
end
|
64
|
+
|
65
|
+
##
|
66
|
+
# Runs the given +work+, gathering the times of each run. Range
|
67
|
+
# and times are then passed to a given +validation+ proc. Outputs
|
68
|
+
# the benchmark name and times in tab-separated format, making it
|
69
|
+
# easy to paste into a spreadsheet for graphing or further
|
70
|
+
# analysis.
|
71
|
+
#
|
72
|
+
# Ranges are specified by ::bench_range.
|
73
|
+
#
|
74
|
+
# Eg:
|
75
|
+
#
|
76
|
+
# def bench_algorithm
|
77
|
+
# validation = proc { |x, y| ... }
|
78
|
+
# assert_performance validation do |n|
|
79
|
+
# @obj.algorithm(n)
|
80
|
+
# end
|
81
|
+
# end
|
82
|
+
|
83
|
+
def assert_performance validation, &work
|
84
|
+
range = self.class.bench_range
|
85
|
+
|
86
|
+
io.print "#{self.name}"
|
87
|
+
|
88
|
+
times = []
|
89
|
+
|
90
|
+
range.each do |x|
|
91
|
+
GC.start
|
92
|
+
t0 = Minitest.clock_time
|
93
|
+
instance_exec(x, &work)
|
94
|
+
t = Minitest.clock_time - t0
|
95
|
+
|
96
|
+
io.print "\t%9.6f" % t
|
97
|
+
times << t
|
98
|
+
end
|
99
|
+
io.puts
|
100
|
+
|
101
|
+
validation[range, times]
|
102
|
+
end
|
103
|
+
|
104
|
+
##
|
105
|
+
# Runs the given +work+ and asserts that the times gathered fit to
|
106
|
+
# match a constant rate (eg, linear slope == 0) within a given
|
107
|
+
# +threshold+. Note: because we're testing for a slope of 0, R^2
|
108
|
+
# is not a good determining factor for the fit, so the threshold
|
109
|
+
# is applied against the slope itself. As such, you probably want
|
110
|
+
# to tighten it from the default.
|
111
|
+
#
|
112
|
+
# See https://www.graphpad.com/guides/prism/8/curve-fitting/reg_intepretingnonlinr2.htm
|
113
|
+
# for more details.
|
114
|
+
#
|
115
|
+
# Fit is calculated by #fit_linear.
|
116
|
+
#
|
117
|
+
# Ranges are specified by ::bench_range.
|
118
|
+
#
|
119
|
+
# Eg:
|
120
|
+
#
|
121
|
+
# def bench_algorithm
|
122
|
+
# assert_performance_constant 0.9999 do |n|
|
123
|
+
# @obj.algorithm(n)
|
124
|
+
# end
|
125
|
+
# end
|
126
|
+
|
127
|
+
def assert_performance_constant threshold = 0.99, &work
|
128
|
+
validation = proc do |range, times|
|
129
|
+
a, b, rr = fit_linear range, times
|
130
|
+
assert_in_delta 0, b, 1 - threshold
|
131
|
+
[a, b, rr]
|
132
|
+
end
|
133
|
+
|
134
|
+
assert_performance validation, &work
|
135
|
+
end
|
136
|
+
|
137
|
+
##
|
138
|
+
# Runs the given +work+ and asserts that the times gathered fit to
|
139
|
+
# match a exponential curve within a given error +threshold+.
|
140
|
+
#
|
141
|
+
# Fit is calculated by #fit_exponential.
|
142
|
+
#
|
143
|
+
# Ranges are specified by ::bench_range.
|
144
|
+
#
|
145
|
+
# Eg:
|
146
|
+
#
|
147
|
+
# def bench_algorithm
|
148
|
+
# assert_performance_exponential 0.9999 do |n|
|
149
|
+
# @obj.algorithm(n)
|
150
|
+
# end
|
151
|
+
# end
|
152
|
+
|
153
|
+
def assert_performance_exponential threshold = 0.99, &work
|
154
|
+
assert_performance validation_for_fit(:exponential, threshold), &work
|
155
|
+
end
|
156
|
+
|
157
|
+
##
|
158
|
+
# Runs the given +work+ and asserts that the times gathered fit to
|
159
|
+
# match a logarithmic curve within a given error +threshold+.
|
160
|
+
#
|
161
|
+
# Fit is calculated by #fit_logarithmic.
|
162
|
+
#
|
163
|
+
# Ranges are specified by ::bench_range.
|
164
|
+
#
|
165
|
+
# Eg:
|
166
|
+
#
|
167
|
+
# def bench_algorithm
|
168
|
+
# assert_performance_logarithmic 0.9999 do |n|
|
169
|
+
# @obj.algorithm(n)
|
170
|
+
# end
|
171
|
+
# end
|
172
|
+
|
173
|
+
def assert_performance_logarithmic threshold = 0.99, &work
|
174
|
+
assert_performance validation_for_fit(:logarithmic, threshold), &work
|
175
|
+
end
|
176
|
+
|
177
|
+
##
|
178
|
+
# Runs the given +work+ and asserts that the times gathered fit to
|
179
|
+
# match a straight line within a given error +threshold+.
|
180
|
+
#
|
181
|
+
# Fit is calculated by #fit_linear.
|
182
|
+
#
|
183
|
+
# Ranges are specified by ::bench_range.
|
184
|
+
#
|
185
|
+
# Eg:
|
186
|
+
#
|
187
|
+
# def bench_algorithm
|
188
|
+
# assert_performance_linear 0.9999 do |n|
|
189
|
+
# @obj.algorithm(n)
|
190
|
+
# end
|
191
|
+
# end
|
192
|
+
|
193
|
+
def assert_performance_linear threshold = 0.99, &work
|
194
|
+
assert_performance validation_for_fit(:linear, threshold), &work
|
195
|
+
end
|
196
|
+
|
197
|
+
##
|
198
|
+
# Runs the given +work+ and asserts that the times gathered curve
|
199
|
+
# fit to match a power curve within a given error +threshold+.
|
200
|
+
#
|
201
|
+
# Fit is calculated by #fit_power.
|
202
|
+
#
|
203
|
+
# Ranges are specified by ::bench_range.
|
204
|
+
#
|
205
|
+
# Eg:
|
206
|
+
#
|
207
|
+
# def bench_algorithm
|
208
|
+
# assert_performance_power 0.9999 do |x|
|
209
|
+
# @obj.algorithm
|
210
|
+
# end
|
211
|
+
# end
|
212
|
+
|
213
|
+
def assert_performance_power threshold = 0.99, &work
|
214
|
+
assert_performance validation_for_fit(:power, threshold), &work
|
215
|
+
end
|
216
|
+
|
217
|
+
##
|
218
|
+
# Takes an array of x/y pairs and calculates the general R^2 value.
|
219
|
+
#
|
220
|
+
# See: http://en.wikipedia.org/wiki/Coefficient_of_determination
|
221
|
+
|
222
|
+
def fit_error xys
|
223
|
+
y_bar = sigma(xys) { |_, y| y } / xys.size.to_f
|
224
|
+
ss_tot = sigma(xys) { |_, y| (y - y_bar) ** 2 }
|
225
|
+
ss_err = sigma(xys) { |x, y| (yield(x) - y) ** 2 }
|
226
|
+
|
227
|
+
1 - (ss_err / ss_tot)
|
228
|
+
end
|
229
|
+
|
230
|
+
##
|
231
|
+
# To fit a functional form: y = ae^(bx).
|
232
|
+
#
|
233
|
+
# Takes x and y values and returns [a, b, r^2].
|
234
|
+
#
|
235
|
+
# See: http://mathworld.wolfram.com/LeastSquaresFittingExponential.html
|
236
|
+
|
237
|
+
def fit_exponential xs, ys
|
238
|
+
n = xs.size
|
239
|
+
xys = xs.zip(ys)
|
240
|
+
sxlny = sigma(xys) { |x, y| x * Math.log(y) }
|
241
|
+
slny = sigma(xys) { |_, y| Math.log(y) }
|
242
|
+
sx2 = sigma(xys) { |x, _| x * x }
|
243
|
+
sx = sigma xs
|
244
|
+
|
245
|
+
c = n * sx2 - sx ** 2
|
246
|
+
a = (slny * sx2 - sx * sxlny) / c
|
247
|
+
b = ( n * sxlny - sx * slny ) / c
|
248
|
+
|
249
|
+
return Math.exp(a), b, fit_error(xys) { |x| Math.exp(a + b * x) }
|
250
|
+
end
|
251
|
+
|
252
|
+
##
|
253
|
+
# To fit a functional form: y = a + b*ln(x).
|
254
|
+
#
|
255
|
+
# Takes x and y values and returns [a, b, r^2].
|
256
|
+
#
|
257
|
+
# See: http://mathworld.wolfram.com/LeastSquaresFittingLogarithmic.html
|
258
|
+
|
259
|
+
def fit_logarithmic xs, ys
|
260
|
+
n = xs.size
|
261
|
+
xys = xs.zip(ys)
|
262
|
+
slnx2 = sigma(xys) { |x, _| Math.log(x) ** 2 }
|
263
|
+
slnx = sigma(xys) { |x, _| Math.log(x) }
|
264
|
+
sylnx = sigma(xys) { |x, y| y * Math.log(x) }
|
265
|
+
sy = sigma(xys) { |_, y| y }
|
266
|
+
|
267
|
+
c = n * slnx2 - slnx ** 2
|
268
|
+
b = ( n * sylnx - sy * slnx ) / c
|
269
|
+
a = (sy - b * slnx) / n
|
270
|
+
|
271
|
+
return a, b, fit_error(xys) { |x| a + b * Math.log(x) }
|
272
|
+
end
|
273
|
+
|
274
|
+
##
|
275
|
+
# Fits the functional form: a + bx.
|
276
|
+
#
|
277
|
+
# Takes x and y values and returns [a, b, r^2].
|
278
|
+
#
|
279
|
+
# See: http://mathworld.wolfram.com/LeastSquaresFitting.html
|
280
|
+
|
281
|
+
def fit_linear xs, ys
|
282
|
+
n = xs.size
|
283
|
+
xys = xs.zip(ys)
|
284
|
+
sx = sigma xs
|
285
|
+
sy = sigma ys
|
286
|
+
sx2 = sigma(xs) { |x| x ** 2 }
|
287
|
+
sxy = sigma(xys) { |x, y| x * y }
|
288
|
+
|
289
|
+
c = n * sx2 - sx**2
|
290
|
+
a = (sy * sx2 - sx * sxy) / c
|
291
|
+
b = ( n * sxy - sx * sy ) / c
|
292
|
+
|
293
|
+
return a, b, fit_error(xys) { |x| a + b * x }
|
294
|
+
end
|
295
|
+
|
296
|
+
##
|
297
|
+
# To fit a functional form: y = ax^b.
|
298
|
+
#
|
299
|
+
# Takes x and y values and returns [a, b, r^2].
|
300
|
+
#
|
301
|
+
# See: http://mathworld.wolfram.com/LeastSquaresFittingPowerLaw.html
|
302
|
+
|
303
|
+
def fit_power xs, ys
|
304
|
+
n = xs.size
|
305
|
+
xys = xs.zip(ys)
|
306
|
+
slnxlny = sigma(xys) { |x, y| Math.log(x) * Math.log(y) }
|
307
|
+
slnx = sigma(xs) { |x | Math.log(x) }
|
308
|
+
slny = sigma(ys) { | y| Math.log(y) }
|
309
|
+
slnx2 = sigma(xs) { |x | Math.log(x) ** 2 }
|
310
|
+
|
311
|
+
b = (n * slnxlny - slnx * slny) / (n * slnx2 - slnx ** 2)
|
312
|
+
a = (slny - b * slnx) / n
|
313
|
+
|
314
|
+
return Math.exp(a), b, fit_error(xys) { |x| (Math.exp(a) * (x ** b)) }
|
315
|
+
end
|
316
|
+
|
317
|
+
##
|
318
|
+
# Enumerates over +enum+ mapping +block+ if given, returning the
|
319
|
+
# sum of the result. Eg:
|
320
|
+
#
|
321
|
+
# sigma([1, 2, 3]) # => 1 + 2 + 3 => 6
|
322
|
+
# sigma([1, 2, 3]) { |n| n ** 2 } # => 1 + 4 + 9 => 14
|
323
|
+
|
324
|
+
def sigma enum, &block
|
325
|
+
enum = enum.map(&block) if block
|
326
|
+
enum.inject { |sum, n| sum + n }
|
327
|
+
end
|
328
|
+
|
329
|
+
##
|
330
|
+
# Returns a proc that calls the specified fit method and asserts
|
331
|
+
# that the error is within a tolerable threshold.
|
332
|
+
|
333
|
+
def validation_for_fit msg, threshold
|
334
|
+
proc do |range, times|
|
335
|
+
a, b, rr = send "fit_#{msg}", range, times
|
336
|
+
assert_operator rr, :>=, threshold
|
337
|
+
[a, b, rr]
|
338
|
+
end
|
339
|
+
end
|
340
|
+
end
|
341
|
+
end
|
342
|
+
|
343
|
+
module Minitest
|
344
|
+
##
|
345
|
+
# The spec version of Minitest::Benchmark.
|
346
|
+
|
347
|
+
class BenchSpec < Benchmark
|
348
|
+
extend Minitest::Spec::DSL
|
349
|
+
|
350
|
+
##
|
351
|
+
# This is used to define a new benchmark method. You usually don't
|
352
|
+
# use this directly and is intended for those needing to write new
|
353
|
+
# performance curve fits (eg: you need a specific polynomial fit).
|
354
|
+
#
|
355
|
+
# See ::bench_performance_linear for an example of how to use this.
|
356
|
+
|
357
|
+
def self.bench name, &block
|
358
|
+
define_method "bench_#{name.gsub(/\W+/, "_")}", &block
|
359
|
+
end
|
360
|
+
|
361
|
+
##
|
362
|
+
# Specifies the ranges used for benchmarking for that class.
|
363
|
+
#
|
364
|
+
# bench_range do
|
365
|
+
# bench_exp(2, 16, 2)
|
366
|
+
# end
|
367
|
+
#
|
368
|
+
# See Minitest::Benchmark#bench_range for more details.
|
369
|
+
|
370
|
+
def self.bench_range &block
|
371
|
+
return super unless block
|
372
|
+
|
373
|
+
meta = (class << self; self; end)
|
374
|
+
meta.send :define_method, "bench_range", &block
|
375
|
+
end
|
376
|
+
|
377
|
+
##
|
378
|
+
# Create a benchmark that verifies that the performance is linear.
|
379
|
+
#
|
380
|
+
# describe "my class Bench" do
|
381
|
+
# bench_performance_linear "fast_algorithm", 0.9999 do |n|
|
382
|
+
# @obj.fast_algorithm(n)
|
383
|
+
# end
|
384
|
+
# end
|
385
|
+
|
386
|
+
def self.bench_performance_linear name, threshold = 0.99, &work
|
387
|
+
bench name do
|
388
|
+
assert_performance_linear threshold, &work
|
389
|
+
end
|
390
|
+
end
|
391
|
+
|
392
|
+
##
|
393
|
+
# Create a benchmark that verifies that the performance is constant.
|
394
|
+
#
|
395
|
+
# describe "my class Bench" do
|
396
|
+
# bench_performance_constant "zoom_algorithm!" do |n|
|
397
|
+
# @obj.zoom_algorithm!(n)
|
398
|
+
# end
|
399
|
+
# end
|
400
|
+
|
401
|
+
def self.bench_performance_constant name, threshold = 0.99, &work
|
402
|
+
bench name do
|
403
|
+
assert_performance_constant threshold, &work
|
404
|
+
end
|
405
|
+
end
|
406
|
+
|
407
|
+
##
|
408
|
+
# Create a benchmark that verifies that the performance is exponential.
|
409
|
+
#
|
410
|
+
# describe "my class Bench" do
|
411
|
+
# bench_performance_exponential "algorithm" do |n|
|
412
|
+
# @obj.algorithm(n)
|
413
|
+
# end
|
414
|
+
# end
|
415
|
+
|
416
|
+
def self.bench_performance_exponential name, threshold = 0.99, &work
|
417
|
+
bench name do
|
418
|
+
assert_performance_exponential threshold, &work
|
419
|
+
end
|
420
|
+
end
|
421
|
+
|
422
|
+
|
423
|
+
##
|
424
|
+
# Create a benchmark that verifies that the performance is logarithmic.
|
425
|
+
#
|
426
|
+
# describe "my class Bench" do
|
427
|
+
# bench_performance_logarithmic "algorithm" do |n|
|
428
|
+
# @obj.algorithm(n)
|
429
|
+
# end
|
430
|
+
# end
|
431
|
+
|
432
|
+
def self.bench_performance_logarithmic name, threshold = 0.99, &work
|
433
|
+
bench name do
|
434
|
+
assert_performance_logarithmic threshold, &work
|
435
|
+
end
|
436
|
+
end
|
437
|
+
|
438
|
+
##
|
439
|
+
# Create a benchmark that verifies that the performance is power.
|
440
|
+
#
|
441
|
+
# describe "my class Bench" do
|
442
|
+
# bench_performance_power "algorithm" do |n|
|
443
|
+
# @obj.algorithm(n)
|
444
|
+
# end
|
445
|
+
# end
|
446
|
+
|
447
|
+
def self.bench_performance_power name, threshold = 0.99, &work
|
448
|
+
bench name do
|
449
|
+
assert_performance_power threshold, &work
|
450
|
+
end
|
451
|
+
end
|
452
|
+
end
|
453
|
+
|
454
|
+
Minitest::Spec.register_spec_type(/Bench(mark)?$/, Minitest::BenchSpec)
|
455
|
+
end
|