benchmark-perf 0.1.1 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 701d132071e0459dc7526e5bcb7f0fce1c505b70
4
- data.tar.gz: f765203b7e2b4f132782c44c027a2853aa7c3162
3
+ metadata.gz: 0f7284dedb5487f685a88a4de95d40e9ee55dffa
4
+ data.tar.gz: 3d10614990b23154c9c9f010def6b0fd88ab4d50
5
5
  SHA512:
6
- metadata.gz: 37a9d93cfad38d010814a44c77483bce25c7237c4dee9547d980dec8be0ded2ba5c180ddc22457edd9944e1d99e7472d3b669d217bd42c93efaf6c51f465d11e
7
- data.tar.gz: b9554efaa85360ae97e4d9ab7f74bbb1950c869e712cfc5cfda3562f753e4c18a34d715a4aa9efb0a4a07efa9852b7e811d39c1ccf37930500c89271aec19a91
6
+ metadata.gz: 9b73856c9343f95e520e514aaa49bf706cfe83e674191eefb09c7b50fc264d4cadf340ae04ea9d9b38c990ed4b9bcf9030ea5856653efadca6a78ee60c07db99
7
+ data.tar.gz: 950f7c866e8696dbd79022da75252350e38693922387768221de0d35b23cfc6a221d0485b153973076de39d5cdee3616a1440e80d443471b24726faeb4539fe0
@@ -1,5 +1,16 @@
1
1
  # Change log
2
2
 
3
+ ## [v0.2.0] - 2016-11-01
4
+
5
+ ### Added
6
+ * Add variance calculation
7
+
8
+ ### Changed
9
+ * Change to propagate errors from child process when measuring execution time
10
+
11
+ ### Fixed
12
+ * Fix std_dev calculation
13
+
3
14
  ## [v0.1.1] - 2016-10-29
4
15
 
5
16
  ### Fixed
@@ -9,5 +20,6 @@
9
20
 
10
21
  Initial release
11
22
 
23
+ [v0.2.0]: https://github.com/peter-murach/benchmark-perf/compare/v0.1.1...v0.2.0
12
24
  [v0.1.1]: https://github.com/peter-murach/benchmark-perf/compare/v0.1.0...v0.1.1
13
25
  [v0.1.0]: https://github.com/peter-murach/benchmark-perf/compare/v0.1.0
@@ -16,24 +16,35 @@ module Benchmark
16
16
  # the average of given measurements
17
17
  #
18
18
  # @api public
19
- def average(measurements)
19
+ def self.average(measurements)
20
20
  return 0 if measurements.empty?
21
21
  measurements.reduce(&:+).to_f / measurements.size
22
22
  end
23
23
 
24
+ # Calculate variance of measurements
25
+ #
26
+ # @param [Array[Float]] measurements
27
+ #
28
+ # @return [Float]
29
+ #
30
+ # @api public
31
+ def self.variance(measurements)
32
+ return 0 if measurements.empty?
33
+ avg = average(measurements)
34
+ total = measurements.reduce(0) do |sum, x|
35
+ sum + (x - avg)**2
36
+ end
37
+ total.to_f / measurements.size
38
+ end
39
+
24
40
  # Calculate standard deviation
25
41
  #
26
42
  # @param [Array[Float]] measurements
27
43
  #
28
44
  # @api public
29
- def std_dev(measurements)
45
+ def self.std_dev(measurements)
30
46
  return 0 if measurements.empty?
31
- average = average(measurements)
32
- Math.sqrt(
33
- measurements.reduce(0) do |sum, x|
34
- sum + (x - average)**2
35
- end.to_f / (measurements.size - 1)
36
- )
47
+ Math.sqrt(variance(measurements))
37
48
  end
38
49
 
39
50
  # Run given work and gather time statistics
@@ -43,7 +54,7 @@ module Benchmark
43
54
  # @return [Boolean]
44
55
  #
45
56
  # @api public
46
- def assert_perform_under(threshold, options = {}, &work)
57
+ def self.assert_perform_under(threshold, options = {}, &work)
47
58
  bench = ExecutionTime.new(options)
48
59
  actual, _ = bench.run(&work)
49
60
  actual <= threshold
@@ -56,12 +67,10 @@ module Benchmark
56
67
  # @return [Boolean]
57
68
  #
58
69
  # @api public
59
- def assert_perform_ips(iterations, options = {}, &work)
70
+ def self.assert_perform_ips(iterations, options = {}, &work)
60
71
  bench = Iteration.new(options)
61
72
  mean, stddev, _ = bench.run(&work)
62
73
  iterations <= (mean + 3 * stddev)
63
74
  end
64
-
65
- extend Benchmark::Perf
66
75
  end # Perf
67
76
  end # Benchmark
@@ -52,22 +52,28 @@ module Benchmark
52
52
  GC.start
53
53
  GC.disable if ENV['BENCH_DISABLE_GC']
54
54
 
55
- reader.close
56
- time = yield
57
-
58
- io.print "%9.6f" % time if io
59
- Marshal.dump(time, writer)
60
-
61
- GC.enable if ENV['BENCH_DISABLE_GC']
62
- exit!(0) # run without hooks
55
+ begin
56
+ reader.close
57
+ time = yield
58
+
59
+ io.print "%9.6f" % data if io
60
+ Marshal.dump(time, writer)
61
+ rescue => error
62
+ Marshal.dump(error, writer)
63
+ ensure
64
+ GC.enable if ENV['BENCH_DISABLE_GC']
65
+ exit!(0) # run without hooks
66
+ end
63
67
  end
64
68
 
65
69
  writer.close unless writer.closed?
66
70
  Process.waitpid(pid)
67
71
  begin
68
- Marshal.load(reader)
69
- rescue => e
70
- raise MarshalError, "#{e.class}: #{e.message}"
72
+ data = Marshal.load(reader)
73
+ raise data if data.is_a?(Exception)
74
+ data
75
+ rescue => error
76
+ raise MarshalError, "#{error.class}: #{error.message}"
71
77
  end
72
78
  end
73
79
 
@@ -77,7 +83,9 @@ module Benchmark
77
83
  def run_warmup(&work)
78
84
  GC.start
79
85
  @warmup.times do
80
- run_in_subprocess { ::Benchmark.realtime(&work) }
86
+ run_in_subprocess do
87
+ ::Benchmark.realtime(&work)
88
+ end
81
89
  end
82
90
  end
83
91
 
@@ -101,7 +109,6 @@ module Benchmark
101
109
 
102
110
  range.each do
103
111
  GC.start
104
-
105
112
  measurements << run_in_subprocess do
106
113
  ::Benchmark.realtime(&work)
107
114
  end
@@ -2,6 +2,6 @@
2
2
 
3
3
  module Benchmark
4
4
  module Perf
5
- VERSION = "0.1.1"
5
+ VERSION = "0.2.0"
6
6
  end # Perf
7
7
  end # Benchmark
@@ -0,0 +1,33 @@
1
+ # encoding: utf-8
2
+
3
+ RSpec.describe Benchmark::Perf, 'arithmetic' do
4
+ context '#average' do
5
+ it "calculates average without measurements" do
6
+ expect(Benchmark::Perf.average([])).to eq(0)
7
+ end
8
+
9
+ it "calculates average with measurements" do
10
+ expect(Benchmark::Perf.average([1,2,3])).to eq(2.0)
11
+ end
12
+ end
13
+
14
+ context '#variance' do
15
+ it "calculates variance of no measurements" do
16
+ expect(Benchmark::Perf.variance([])).to eq(0)
17
+ end
18
+
19
+ it "calculates variance of measurements" do
20
+ expect(Benchmark::Perf.variance([1,2,3])).to eq(2.to_f/3)
21
+ end
22
+ end
23
+
24
+ context '#std_dev' do
25
+ it "calculates standard deviation of no measurements" do
26
+ expect(Benchmark::Perf.std_dev([])).to eq(0)
27
+ end
28
+
29
+ it "calculates standard deviation of measurements" do
30
+ expect(Benchmark::Perf.std_dev([1,2,3])).to eq(Math.sqrt(2.to_f/3))
31
+ end
32
+ end
33
+ end
@@ -1,6 +1,6 @@
1
1
  # encoding: utf-8
2
2
 
3
- RSpec.describe Benchmark::Perf, '#assert_perform' do
3
+ RSpec.describe Benchmark::Perf, 'assertions' do
4
4
  it "passes asertion by performing under threshold" do
5
5
  bench = Benchmark::Perf
6
6
  assertion = bench.assert_perform_under(0.01, samples: 2) { 'x' * 1_024 }
@@ -17,6 +17,21 @@ RSpec.describe Benchmark::Perf::ExecutionTime do
17
17
  expect(sample).to all(be < 0.01)
18
18
  end
19
19
 
20
+ it "doesn't benchmark raised exception" do
21
+ bench = described_class.new
22
+ expect {
23
+ bench.run { raise 'boo' }
24
+ }.to raise_error(StandardError)
25
+ end
26
+
27
+ it "fails to load marshalled data" do
28
+ bench = described_class.new
29
+ allow(Marshal).to receive(:load).and_raise('boo')
30
+ expect {
31
+ bench.run { 'x' * 1024 }
32
+ }.to raise_error(Benchmark::Perf::MarshalError)
33
+ end
34
+
20
35
  it "measures complex object" do
21
36
  bench = described_class.new
22
37
  sample = bench.run { {foo: Object.new, bar: :piotr} }
@@ -15,4 +15,11 @@ RSpec.describe Benchmark::Perf::Iteration do
15
15
  expect(sample[1]).to be > 5
16
16
  expect(sample[2]).to be > 250
17
17
  end
18
+
19
+ it "does't measure broken code" do
20
+ bench = described_class.new
21
+ expect {
22
+ bench.run { raise 'boo' }
23
+ }.to raise_error(StandardError, /boo/)
24
+ end
18
25
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: benchmark-perf
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.1
4
+ version: 0.2.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Piotr Murach
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2016-10-29 00:00:00.000000000 Z
11
+ date: 2016-11-01 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -66,8 +66,8 @@ files:
66
66
  - lib/benchmark/perf/iteration.rb
67
67
  - lib/benchmark/perf/version.rb
68
68
  - spec/spec_helper.rb
69
+ - spec/unit/arithmetic_spec.rb
69
70
  - spec/unit/assertions_spec.rb
70
- - spec/unit/average_spec.rb
71
71
  - spec/unit/execution_time_spec.rb
72
72
  - spec/unit/iteration_spec.rb
73
73
  - tasks/coverage.rake
@@ -98,8 +98,8 @@ specification_version: 4
98
98
  summary: Execution time and iteration performance benchmarking
99
99
  test_files:
100
100
  - spec/spec_helper.rb
101
+ - spec/unit/arithmetic_spec.rb
101
102
  - spec/unit/assertions_spec.rb
102
- - spec/unit/average_spec.rb
103
103
  - spec/unit/execution_time_spec.rb
104
104
  - spec/unit/iteration_spec.rb
105
105
  has_rdoc:
@@ -1,11 +0,0 @@
1
- # encoding: utf-8
2
-
3
- RSpec.describe Benchmark::Perf do
4
- it "calculates average without measurements" do
5
- expect(Benchmark::Perf.average([])).to eq(0)
6
- end
7
-
8
- it "calculates average with measurements" do
9
- expect(described_class.average([1,2,3])).to eq(2.0)
10
- end
11
- end