rubyperf 1.3.2 → 1.3.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.3.2
1
+ 1.3.4
data/lib/perf/meter.rb CHANGED
@@ -43,7 +43,7 @@ module Perf
43
43
  @instrumented_methods = {METHOD_TYPE_INSTANCE=>[],METHOD_TYPE_CLASS=>[]}
44
44
  @class_methods = []
45
45
  @subtract_overhead = @options[:subtract_overhead]
46
- if @subtract_overhead
46
+ if @@overhead.nil?
47
47
  @@overhead ||= measure_overhead
48
48
  @measurements = {} # A hash of Measure; must repeat here to cleanup what measure_overhead did
49
49
  end
@@ -130,9 +130,10 @@ module Perf
130
130
  else
131
131
  t = Benchmark.measure { res=code.call }
132
132
  t -= @@overhead if @subtract_overhead && @@overhead # Factor out the overhead of measure, if we are asked to do so
133
- if t.total>=0 && t.real>=0
134
- m.time += t
135
- root.time += t if root
133
+ root.count += m.count if root
134
+ if t.total>=0 || t.real>=0
135
+ m.time += t
136
+ root.time += t if root
136
137
  end
137
138
  end
138
139
  ensure
@@ -307,6 +308,26 @@ module Perf
307
308
  restore_all_class_methods(klass)
308
309
  end
309
310
 
311
+ # Returns an index of accuracy of the measure calculated in relation to the overhead.
312
+ # The larger the accuracy, the more accurate the measure.
313
+ # accuracy < 0 means that it is not possible to calculate;
314
+ # accuracy <= 1 means that the measure is equal or smaller than the overhead.
315
+ # This makes the measure very inaccurate.
316
+ # accuracy = X means that the measure is X times the overhead.
317
+
318
+ def accuracy(path)
319
+ if @@overhead
320
+ over=@@overhead.total+@@overhead.real
321
+ if over>0 && @@overhead.total>=0 && @@overhead.real>=0
322
+ m=get_measurement(path)
323
+ if m.count>0 && m.time.total>0 && m.time.real>0
324
+ return (m.time.total+m.time.real) / (over*m.count)
325
+ end
326
+ end
327
+ end
328
+ -1
329
+ end
330
+
310
331
  protected
311
332
 
312
333
  # This method measures the overhead of calling "measure" on an instace of Perf::Meter.
@@ -13,7 +13,8 @@ module Perf
13
13
  #
14
14
  class ReportFormat
15
15
 
16
- MIN_TOTAL_TIME = 1.0e-10
16
+ MIN_TOTAL_TIME = 1.0e-10
17
+ MAX_ACCURACY_SIZE = 10
17
18
 
18
19
  # Format takes a Perf::Meter plus a hash of options and converts it into a header, followed by a series
19
20
  # of entries in a hash format that can be easily converted in any other format such as Text, HTML, XML, etc.
@@ -69,6 +70,7 @@ module Perf
69
70
  :percent => "percent",
70
71
  :count => "count", :max_count => max_count,
71
72
  :time => Benchmark::Tms::CAPTION,
73
+ :accuracy => "accuracy", :max_accuracy => MAX_ACCURACY_SIZE,
72
74
  :options => options)
73
75
 
74
76
  # Root
@@ -81,6 +83,7 @@ module Perf
81
83
  :percent => percents[what]||0.0,
82
84
  :count => m.count, :max_count => max_count,
83
85
  :time => m.time,
86
+ :accuracy => format_accuracy(perf.accuracy(m.path)), :max_accuracy => MAX_ACCURACY_SIZE,
84
87
  :options => options)
85
88
  end
86
89
 
@@ -129,5 +132,24 @@ module Perf
129
132
  ""
130
133
  end
131
134
 
135
+ # Format the accuracy
136
+ # See Perf::Meter#accuracy for more information
137
+
138
+ def format_accuracy(accuracy)
139
+ if accuracy<0
140
+ "unknown"
141
+ elsif accuracy<=1
142
+ "very poor"
143
+ elsif accuracy<=50
144
+ "poor"
145
+ elsif accuracy<=100
146
+ "fair"
147
+ elsif accuracy<=1000
148
+ "good"
149
+ else
150
+ "excellent"
151
+ end
152
+ end
153
+
132
154
  end
133
155
  end
@@ -30,6 +30,55 @@ module Perf
30
30
  # :count_format => sprintf format of the count (see COUNT_FORMAT for default)
31
31
  # :indent_string => what string to use to indent the path (see INDENT for default)
32
32
  #
33
+ # ==== Stylesheet Example
34
+ #
35
+ # table.rubyperf_report
36
+ # {
37
+ # border: solid 2px #555555;
38
+ # padding: 0;
39
+ # }
40
+ #
41
+ # table.rubyperf_report tr.odd_row
42
+ # {
43
+ # background-color: #CCCCCC;
44
+ # }
45
+ #
46
+ # table.rubyperf_report tr.even_row
47
+ # {
48
+ # background-color: #DDDDDD;
49
+ # padding: 0;
50
+ # }
51
+ #
52
+ # table.rubyperf_report td.title
53
+ # {
54
+ # text-align: left;
55
+ # padding: 0;
56
+ # font-family: "Courier New";
57
+ # font-weight: bold;
58
+ # }
59
+ #
60
+ # table.rubyperf_report td.accuracy
61
+ # {
62
+ # text-align: left;
63
+ # padding: 0;
64
+ # font-family: "Courier New";
65
+ # }
66
+ #
67
+ # table.rubyperf_report td.percent, td.count, td.system_time, td.user_time, td.real_time, td.total_time
68
+ # {
69
+ # text-align: right;
70
+ # padding: 0;
71
+ # font-family: "Courier New";
72
+ # }
73
+ #
74
+ # table.rubyperf_report th
75
+ # {
76
+ # background-color: #888888;
77
+ # border: 2px solid black;
78
+ # text-align: left;
79
+ # padding: 2px;
80
+ # }
81
+
33
82
  # ==== Example
34
83
  #
35
84
  # m=Perf::Meter.new
@@ -50,6 +99,7 @@ module Perf
50
99
  "<table class='rubyperf_report'><tr>" \
51
100
  "<th class='title'>#{v[:title]}</th>" \
52
101
  "<th class='percent'>%</th>" \
102
+ "<th class='accuracy'>accuracy</th>" \
53
103
  "<th class='count'>count</th>" \
54
104
  "<th class='user_time'>user</th>" \
55
105
  "<th class='system_time'>system</th>" \
@@ -65,6 +115,7 @@ module Perf
65
115
  "<tr class='#{@line % 2==0 ? "even_row" : "odd_row"}'>" \
66
116
  "<td class='title'>#{v[:title]}</td>" \
67
117
  "<td class='percent'>#{percent}</td>" \
118
+ "<td class='accuracy'>#{v[:accuracy]}</td>" \
68
119
  "<td class='count'>#{@count_format % v[:count]}</td>" \
69
120
  "<td class='user_time'>#{@time_format % v[:time].utime}</td>" \
70
121
  "<td class='system_time'>#{@time_format % v[:time].stime}</td>" \
@@ -56,7 +56,7 @@ module Perf
56
56
 
57
57
  def format_measure(v)
58
58
  percent= v[:percent].is_a?(String) ? v[:percent] : (PERCENT_FORMAT%v[:percent])
59
- "#{v[:title].ljust(v[:max_title]+EXTRA_SPACES_AFTER_TITLE," ")}: #{percent.rjust(7," ")}% #{v[:count].to_s.rjust(v[:max_count]," ")} #{v[:time].to_s.gsub(/\n/,'')}\n"
59
+ "#{v[:title].ljust(v[:max_title]+EXTRA_SPACES_AFTER_TITLE," ")}: #{percent.rjust(7," ")}% #{v[:accuracy].rjust(v[:max_accuracy]," ")} #{v[:count].to_s.rjust(v[:max_count]," ")} #{v[:time].to_s.gsub(/\n/,'')}\n"
60
60
  end
61
61
 
62
62
  def format_title(what,options)
data/rubyperf.gemspec CHANGED
@@ -5,7 +5,7 @@
5
5
 
6
6
  Gem::Specification.new do |s|
7
7
  s.name = %q{rubyperf}
8
- s.version = "1.3.2"
8
+ s.version = "1.3.4"
9
9
 
10
10
  s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version=
11
11
  s.authors = ["lpasqualis"]
@@ -66,7 +66,7 @@ class TestPerfMeter < Test::Unit::TestCase
66
66
  Array.new(1000000,"abc").reverse.sort
67
67
  end
68
68
 
69
- assert_equal ['\blocks,0',
69
+ assert_equal ['\blocks,10',
70
70
  '\blocks\empty,1',
71
71
  '\blocks\emtpy_loop,1',
72
72
  '\blocks\rough_overhead_x10000,1',
@@ -87,7 +87,7 @@ class TestPerfMeter < Test::Unit::TestCase
87
87
  '\blocks\string_operations\help,1',
88
88
  '\blocks\test = "1",1',
89
89
  '\blocks\test = "false",2',
90
- '\methods,0',
90
+ '\methods,3',
91
91
  '\methods\#<Class:Array>.new,1',
92
92
  '\methods\Array.reverse,1',
93
93
  '\methods\Array.sort,1'],
@@ -110,7 +110,7 @@ class TestPerfMeter < Test::Unit::TestCase
110
110
  assert_equal 6,m.report_simple.length
111
111
  assert_equal 6,m.report_html(:percent_format=>"%.8f").length
112
112
  assert_equal 6,m.report_html.length
113
- assert_equal ['\methods,0',
113
+ assert_equal ['\methods,3',
114
114
  '\methods\#<Class:PerfTestExample>.static_method,1',
115
115
  '\methods\PerfTestExample.test,1',
116
116
  '\methods\PerfTestExample.test_np,1'],
@@ -124,6 +124,18 @@ class TestPerfMeter < Test::Unit::TestCase
124
124
  assert m.has_measures?
125
125
  end
126
126
 
127
+ def test_accurancy
128
+ m=Perf::Meter.new
129
+ m.measure(:b) do
130
+ end
131
+ m.measure(:a) do
132
+ ("123"*1_000_000).reverse
133
+ end
134
+ assert m.accuracy(m.measurements['\blocks'].path) >= 0
135
+ assert m.accuracy(m.measurements['\blocks\a'].path) >= 0
136
+ assert m.accuracy(m.measurements['\blocks\b'].path) < 0
137
+ end
138
+
127
139
  def test_methods_with_measure
128
140
  Perf::MeterFactory.clear_all!
129
141
  m=Perf::MeterFactory.get
@@ -209,7 +221,7 @@ class TestPerfMeter < Test::Unit::TestCase
209
221
  m.measure(:b) { }
210
222
  m.measure(:d) { m.measure(:c) { m.measure(:d) {} }}
211
223
 
212
- assert_equal ['\blocks,0',
224
+ assert_equal ['\blocks,3',
213
225
  '\blocks\a,1',
214
226
  '\blocks\b,1',
215
227
  '\blocks\d,1',
@@ -258,14 +270,14 @@ class TestPerfMeter < Test::Unit::TestCase
258
270
  a.test_np
259
271
  #puts puts m.report_simple
260
272
  m.restore_all_methods(PerfTestExample)
261
- assert_equal ['\blocks,0',
273
+ assert_equal ['\blocks,3',
262
274
  '\blocks\measure_test,1',
263
275
  '\blocks\measure_test_np,1',
264
276
  '\blocks\some_expressions,1',
265
277
  '\blocks\some_expressions\expression1 = "1111",1',
266
278
  '\blocks\some_expressions\expression1 = "13579",1',
267
279
  '\blocks\some_expressions\expression2 = "string",1',
268
- '\methods,0',
280
+ '\methods,5',
269
281
  '\methods\#<Class:PerfTestExample>.static_method,1',
270
282
  '\methods\PerfTestExample.test,1',
271
283
  '\methods\PerfTestExample.test_np,2'],
@@ -285,7 +297,7 @@ class TestPerfMeter < Test::Unit::TestCase
285
297
  b2_yes_overhead=Benchmark.measure { runs.times { a.reverse! } }
286
298
 
287
299
 
288
- assert_equal ['\blocks,0',
300
+ assert_equal ['\blocks,500500',
289
301
  '\blocks\a,1000'],
290
302
  m_no_overhead.report_list_of_measures
291
303
 
metadata CHANGED
@@ -5,9 +5,9 @@ version: !ruby/object:Gem::Version
5
5
  segments:
6
6
  - 1
7
7
  - 3
8
- - 2
8
+ - 4
9
9
  segments_generated: true
10
- version: 1.3.2
10
+ version: 1.3.4
11
11
  platform: ruby
12
12
  authors:
13
13
  - lpasqualis