benchmark-perf 0.3.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: d7fe9d604c0acf5ef3342b1bb0f7dc4e25c4842be28f2dcfccfa294afc1c7699
4
- data.tar.gz: 9c99fac42b55347306729883c255e7976d7185e94ccf00fdf8985bbcdc40271c
3
+ metadata.gz: b9e96517aac61f030785b74eebd97e06e49e74eda0bb7ce37854ed454817a5f9
4
+ data.tar.gz: 82a06c6df41203a33abbe439fe30fc0df64a2a22c26e5ef730fcb065733e0dbd
5
5
  SHA512:
6
- metadata.gz: cc1775bb46d926fd6c23df11c4c257ead8c5818b05814b34760f1500aa8226ede1e5f46d2be974d2303ba9b2749b58ab75714662ba13485230eaa334ab3d6693
7
- data.tar.gz: 81a6b09450fbcd6bb61ac110cef837e163e8635734b59328c6c1ff06d251804e349de88efe03446ea43ceb8ee5960a19bb63d1f16c0a36e494cdedb819c16006
6
+ metadata.gz: b09c694ef8fe8d30f21407a0a16d3ce8a86ac139257a08876ad1cf2cce298d2dfccee5f2078e75ec2420985bcd8588383a442a9f1e1d85f52a17ea4889b50bb2
7
+ data.tar.gz: 4c4e308e46b4874bf656f15716500149a1d3daa869f330714c478bac1d50cb3b224cb6f2587a385d1adcc1545b45dd003020696a8468dec645d68e6531175c99
@@ -1,5 +1,15 @@
1
1
  # Change log
2
2
 
3
+ ## [v0.4.0] - 2018-09-30
4
+
5
+ ### Changed
6
+ * Change ExecutionTime#run :times argument to :repeat
7
+ * Change ExecutionTime#run to specify accepted values for :repeat argument
8
+ * Change default measurements repeat time to be once
9
+
10
+ ### Fixed
11
+ * Change ExecutionTime#run to correctly generate repeats range
12
+
3
13
  ## [v0.3.0] - 2018-09-16
4
14
 
5
15
  ### Added
@@ -40,6 +50,7 @@
40
50
 
41
51
  Initial release
42
52
 
53
+ [v0.4.0]: https://github.com/piotrmurach/benchmark-perf/compare/v0.3.0...v0.4.0
43
54
  [v0.3.0]: https://github.com/piotrmurach/benchmark-perf/compare/v0.2.1...v0.3.0
44
55
  [v0.2.1]: https://github.com/piotrmurach/benchmark-perf/compare/v0.2.0...v0.2.1
45
56
  [v0.2.0]: https://github.com/piotrmurach/benchmark-perf/compare/v0.1.1...v0.2.0
data/README.md CHANGED
@@ -59,10 +59,10 @@ mean, stddev, iter, elapsed_time = Benchmark::Perf::Iteration.run { ... }
59
59
 
60
60
  ### 2.1 Execution time
61
61
 
62
- By default `30` samples are taken, and `1` sample for the warmup phase. If you need to change number of measurement samples use `:times`:
62
+ By default `1` measurement is taken, and `1` warmup cycle is run. If you need to change number of measurements taken use `:repeat`:
63
63
 
64
64
  ```ruby
65
- mean, std_dev = Benchmark::Perf::ExecutionTime.run(times: 10) { ... }
65
+ mean, std_dev = Benchmark::Perf::ExecutionTime.run(repeat: 10) { ... }
66
66
  ```
67
67
 
68
68
  And to change number of warmup cycles use `:warmup` keyword like so:
@@ -71,8 +71,8 @@ module Benchmark
71
71
 
72
72
  # Perform work x times
73
73
  #
74
- # @param [Integer] times
75
- # how many times sample the code measuremenets
74
+ # @param [Integer] repeat
75
+ # how many times to repeat the code measuremenets
76
76
  #
77
77
  # @example
78
78
  # ExecutionTime.run(times: 10) { ... }
@@ -81,12 +81,12 @@ module Benchmark
81
81
  # average and standard deviation
82
82
  #
83
83
  # @api public
84
- def run(times: 30, io: nil, warmup: 1, &work)
85
- range = linear_range(1, times - 1)
84
+ def run(repeat: 1, io: nil, warmup: 1, &work)
85
+ check_greater(repeat, 0)
86
86
  measurements = []
87
87
  run_warmup(warmup: warmup, &work)
88
88
 
89
- range.each do
89
+ repeat.times do
90
90
  GC.start
91
91
  measurements << run_in_subprocess(io: io) do
92
92
  Perf.clock_time(&work)
@@ -97,6 +97,22 @@ module Benchmark
97
97
  [Perf.average(measurements), Perf.std_dev(measurements)]
98
98
  end
99
99
  module_function :run
100
+
101
+ # Check if expected value is greater than minimum
102
+ #
103
+ # @param [Numeric] expected
104
+ # @param [Numeric] min
105
+ #
106
+ # @raise [ArgumentError]
107
+ #
108
+ # @api private
109
+ def check_greater(expected, min)
110
+ unless expected > min
111
+ raise ArgumentError,
112
+ "Repeat value: #{expected} needs to be greater than #{min}"
113
+ end
114
+ end
115
+ module_function :check_greater
100
116
  end # ExecutionTime
101
117
  end # Perf
102
118
  end # Benchmark
@@ -2,6 +2,6 @@
2
2
 
3
3
  module Benchmark
4
4
  module Perf
5
- VERSION = "0.3.0"
5
+ VERSION = "0.4.0"
6
6
  end # Perf
7
7
  end # Benchmark
@@ -3,7 +3,7 @@
3
3
  RSpec.describe Benchmark::Perf, 'assertions' do
4
4
  it "passes asertion by performing under threshold" do
5
5
  bench = Benchmark::Perf
6
- assertion = bench.assert_perform_under(0.01, times: 2) { 'x' * 1_024 }
6
+ assertion = bench.assert_perform_under(0.01, repeat: 2) { 'x' * 1_024 }
7
7
  expect(assertion).to eq(true)
8
8
  end
9
9
 
@@ -7,16 +7,29 @@ RSpec.describe Benchmark::Perf::ExecutionTime do
7
7
 
8
8
  it "provides default benchmark range" do
9
9
  allow(described_class).to receive(:run_in_subprocess).and_return(0.1)
10
- described_class.run { 'x' * 1024 }
11
- expect(described_class).to have_received(:run_in_subprocess).exactly(30).times
10
+ described_class.run(warmup: 0) { 'x' * 1024 }
11
+ expect(described_class).to have_received(:run_in_subprocess).once
12
12
  end
13
13
 
14
14
  it "accepts custom number of samples" do
15
15
  allow(described_class).to receive(:run_in_subprocess).and_return(0.1)
16
- described_class.run(times: 12) { 'x' * 1024 }
16
+ described_class.run(repeat: 12, warmup: 0) { 'x' * 1024 }
17
17
  expect(described_class).to have_received(:run_in_subprocess).exactly(12).times
18
18
  end
19
19
 
20
+ it "doesn't accept range smaller than 1" do
21
+ allow(described_class).to receive(:run_in_subprocess).and_return(0.1)
22
+ described_class.run(repeat: 1, warmup: 1) { 'x' }
23
+ expect(described_class).to have_received(:run_in_subprocess).twice
24
+ end
25
+
26
+ it "doesn't accept range smaller than 1" do
27
+ expect {
28
+ described_class.run(repeat: 0) { 'x' }
29
+ }.to raise_error(ArgumentError,
30
+ 'Repeat value: 0 needs to be greater than 0')
31
+ end
32
+
20
33
  it "provides measurements for 30 samples by default" do
21
34
  sample = described_class.run { 'x' * 1024 }
22
35
  expect(sample).to all(be < 0.01)
@@ -39,7 +52,7 @@ RSpec.describe Benchmark::Perf::ExecutionTime do
39
52
  end
40
53
 
41
54
  it "measures work performance for 10 samples" do
42
- sample = described_class.run(times: 10) { 'x' * 1_000_000 }
55
+ sample = described_class.run(repeat: 10) { 'x' * 1_000_000 }
43
56
  expect(sample.size).to eq(2)
44
57
  expect(sample).to all(be < 0.01)
45
58
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: benchmark-perf
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.3.0
4
+ version: 0.4.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Piotr Murach
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2018-09-16 00:00:00.000000000 Z
11
+ date: 2018-09-30 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler