benchmark-sweet 0.2.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,16 @@
1
+ module Benchmark
2
+ module Sweet
3
+ module IPS
4
+ def run_ips
5
+ require "benchmark/ips"
6
+ rpt = Benchmark.ips(warmup: options[:warmup], time: options[:time], quiet: options[:quiet]) do |x|
7
+ items.each { |e| x.item(e.label, e.action || e.block) }
8
+ #x.compare! if compare
9
+ end
10
+ rpt.entries.each do |entry|
11
+ add_entry(entry.label, "ips", entry.stats)
12
+ end
13
+ end
14
+ end
15
+ end
16
+ end
@@ -0,0 +1,30 @@
1
+ module Benchmark
2
+ module Sweet
3
+ # borrowed heavily from Benchmark::IPS::Job::Entry
4
+ # may be able to fallback on that - will need to generate a &block friendly proc for that structure
5
+ class Item
6
+ attr_reader :label, :action
7
+ def initialize(label, action = nil)
8
+ @label = label
9
+ @action = action || @label[:method] #raise("Item needs an action")
10
+ end
11
+
12
+ def block
13
+ @block ||= action.kind_of?(String) ? compile(action) : action
14
+ end
15
+
16
+ # to use with Job::Entry...
17
+ # def call_once ; call_times(1) ; end
18
+ # def callback_proc
19
+ # lambda(&method(:call_once))
20
+ # end
21
+ def compile(str)
22
+ eval <<-CODE
23
+ Proc.new do
24
+ #{str}
25
+ end
26
+ CODE
27
+ end
28
+ end
29
+ end
30
+ end
@@ -0,0 +1,244 @@
1
+ module Benchmark
2
+ module Sweet
3
+ # abstract notion of a job
4
+ class Job
5
+ include Benchmark::Sweet::IPS
6
+ include Benchmark::Sweet::Memory
7
+ include Benchmark::Sweet::Queries
8
+
9
+ # metrics calculated by the ips test suite
10
+ IPS_METRICS = %w(ips).freeze
11
+ # metrics calculated by the memory test suite
12
+ MEMORY_METRICS = %w(memsize memsize_retained objects objects_retained strings strings_retained).freeze
13
+ # metrics calculated by the database test suite
14
+ DATABASE_METRICS = %w(rows queries ignored ignored cached).freeze
15
+ ALL_METRICS = (IPS_METRICS + MEMORY_METRICS + DATABASE_METRICS).freeze
16
+ HIGHER_BETTER = %w(ips).freeze
17
+
18
+ # @returns [Array<Job::Item>] list of report items to run
19
+ attr_reader :items
20
+ # @returns [Hash<String,Hash<String,Stat>>] entries[name][metric] = value
21
+ attr_reader :entries
22
+
23
+ # @option options :quiet [Boolean] true to suppress the display of interim test calculations
24
+ # @option options :warmup [Number] For ips tests, the amount of time to warmup
25
+ # @option options :time [Number] For ips tests, the amount of time to the calculations
26
+ # @option options :metrics [String|Symbol,Array<String|Symbol] list of metrics to run
27
+ # TODO: :confidence
28
+ attr_reader :options
29
+
30
+ # lambda used to group metrics that should be compared
31
+ # The lambda takes the label as an argument and returns a unique object per comparison group
32
+ # NOTE: This lambda takes a label hash as an argument
33
+ # While other lambdas in this api take a comparison object
34
+ # a symbol is assumed to refer to the label
35
+ # @return [Nil|Lambda] lambda for grouping
36
+ attr_reader :grouping
37
+
38
+ def initialize(options = {})
39
+ @options = options
40
+ @options[:metrics] ||= IPS_METRICS + %w()
41
+ validate_metrics(@options[:metrics])
42
+ @items = []
43
+ @entries = {}
44
+ @symbolize_keys = false
45
+ # load / save
46
+ @filename = nil
47
+ # display
48
+ @grouping = nil
49
+ @report_options = {}
50
+ @report_block = nil
51
+ # current item metadata
52
+ @meta = {}
53
+ end
54
+
55
+ def configure(options)
56
+ @options.merge!(options)
57
+ end
58
+
59
+ # @returns [Boolean] true to run iterations per second tests
60
+ def ips? ; !(relevant_metric_names & IPS_METRICS).empty? ; end
61
+ # @returns [Boolean] true to run memory tests
62
+ def memory? ; !(relevant_metric_names & MEMORY_METRICS).empty? ; end
63
+ # @returns [Boolean] true to run database queries tests
64
+ def database? ; !(relevant_metric_names & DATABASE_METRICS).empty? ; end
65
+
66
+ # @returns [Boolean] true to suppress the display of interim test calculations
67
+ def quiet? ; options[:quiet] ; end
68
+
69
+ # @returns [Boolean] true to run tests for data that has already been processed
70
+ def force? ; options[:force] ; end
71
+
72
+ # @returns [Array<String>] List of metrics to compare
73
+ def relevant_metric_names ; options[:metrics] ; end
74
+
75
+ # items to run (typical benchmark/benchmark-ips use case)
76
+ def item(label, action = nil, &block)
77
+ # could use Benchmark::IPS::Job::Entry
78
+ current_meta = label.kind_of?(Hash) ? @meta.merge(label) : @meta.merge(method: label)
79
+ @items << Item.new(current_meta, action || block)
80
+ end
81
+ alias report item
82
+
83
+ def metadata(options)
84
+ @old_meta = @meta
85
+ @meta = @meta.merge(options)
86
+ return unless block_given?
87
+ yield
88
+ @meta = @old_meta
89
+ end
90
+
91
+ def save_file(filename)
92
+ @filename = filename
93
+ end
94
+
95
+ # &block - a lambda that accepts a label and a stats object
96
+ # returns a unique object for each set of metrics that should be compared with each other
97
+ #
98
+ # unfortunatly, this currently has a different signature than all other lambdas
99
+ # at this time, there are no comparisons created yet. so it is hard to pass one in
100
+ # example:
101
+ # x.compare_by { |label, value| label[:data] }
102
+ # x.compare_by :data
103
+ #
104
+ def compare_by(*symbol, &block)
105
+ @grouping = symbol.empty? ? block : Proc.new { |label, value| symbol.map { |s| label[s] } }
106
+ end
107
+
108
+ # Setup the testing framework
109
+ # TODO: would be easier to debug if these were part of run_report
110
+ # @keyword :grouping [Symbol|lambda|nil] proc with parameters label, stat that generates grouping names
111
+ # defaults to the compare_by value
112
+ # @keyword :sort [Boolean] true to sort the rows (default false). NOTE: grouping names ARE sorted
113
+ # @keyword :row [Symbol|lambda] a lambda (default - display the full label)
114
+ # @keyword :column [Symbol|lambda] (default :metric)
115
+ # @keyword :value (default :comp_short - the value and delta)
116
+ # for color, consider passing `value: ->(m){ m.comp_short("\033[#{m.color}m#{m[field]}\e[0m") }`
117
+ def report_with(args = {}, &block)
118
+ @report_options = args
119
+ @report_block = block
120
+ # Assume the display grouping is the same as comparison grouping unless an explicit value was provided
121
+ if !args.key?(:grouping) && @grouping
122
+ args[:grouping] = @grouping.respond_to?(:call) ? -> v { @grouping.call(v.label, v.stats) } : @grouping
123
+ end
124
+ end
125
+
126
+ # if we are using symbols as keys for our labels
127
+ def labels_have_symbols!
128
+ end
129
+
130
+ # report results
131
+ def add_entry(label, metric, stat)
132
+ (@entries[metric] ||= {})[label] = stat.respond_to?(:central_tendency) ? stat : create_stats(stat)
133
+ end
134
+
135
+ def entry_stat(label, metric)
136
+ @entries.dig(metric, label)
137
+ end
138
+
139
+ def relevant_entries
140
+ relevant_metric_names.map { |n| [n, @entries[n] ] }
141
+ end
142
+ # serialization
143
+
144
+ def load_entries(filename = @filename)
145
+ # ? have ips save / load their own data?
146
+ return unless filename && File.exist?(filename)
147
+ require "json"
148
+
149
+ JSON.load(IO.read(filename)).each do |v|
150
+ n = v["name"]
151
+ n.symbolize_keys!
152
+ add_entry n, v["metric"], v["samples"]
153
+ end
154
+
155
+ end
156
+
157
+ def save_entries(filename = @filename)
158
+ return unless filename
159
+ require "json"
160
+
161
+ # sanity checking
162
+ symbol_value = false
163
+
164
+ data = @entries.flat_map do |metric_name, metric_values|
165
+ metric_values.map do |label, stat|
166
+ # warnings
167
+ symbol_values ||= label.kind_of?(Hash) && label.values.detect { |v| v.nil? || v.kind_of?(Symbol) }
168
+ {
169
+ 'name' => label,
170
+ 'metric' => metric_name,
171
+ 'samples' => stat.samples,
172
+ # extra data like measured_us, iter, and others?
173
+ }
174
+ end
175
+ end
176
+
177
+ puts "", "Warning: Please use strings or numbers for label hash values (not nils or symbols). Symbols are not JSON friendly." if symbol_value
178
+ IO.write(filename, JSON.pretty_generate(data) << "\n")
179
+ end
180
+
181
+ def run
182
+ # run metrics if they are requested and haven't run yet
183
+ # only run the suites that provide the data the user needs.
184
+ # if the first node has the data, assumes all do
185
+ #
186
+ # TODO: may want to override these values
187
+ run_ips if ips? && (force? || !@entries.dig(IPS_METRICS.first, items.first.label))
188
+ run_memory if memory? && (force? || !@entries.dig(MEMORY_METRICS.first, items.first.label))
189
+ run_queries if database? && (force? || !@entries.dig(DATABASE_METRICS.first, items.first.label))
190
+ end
191
+
192
+ # ? metric => label(:version, :method) => stats
193
+ # ? label(:metric, :version, :method) => stats
194
+ # @returns [Hash<String,Hash<String,Comparison>>] Same as entries, but contains comparisons not Stats
195
+ def run_report
196
+ comparison_values.tap do |results|
197
+ display_report(results)
198
+ end
199
+ end
200
+
201
+ def display_report(comparisons)
202
+ if !@report_block || @report_block.arity == 2
203
+ Benchmark::Sweet.table(comparisons, **@report_options, &@report_block)
204
+ else
205
+ @report_block.call(comparisons)
206
+ end
207
+ end
208
+
209
+ # of note, this groups with @grouping (defined by group_by)
210
+ # but then all data continues to the next step
211
+ # this allows you to make comparisons across rows / columns / grouping
212
+ def comparison_values
213
+ relevant_entries.flat_map do |metric_name, metric_entries|
214
+ # TODO: map these to Comparison(metric_name, label, stats) So we only have 1 type of lambda
215
+ partitioned_metrics = grouping ? metric_entries.group_by(&grouping) : {nil => metric_entries}
216
+ partitioned_metrics.flat_map do |grouping_name, grouped_metrics|
217
+ sorted = grouped_metrics.sort_by { |n, e| e.central_tendency }
218
+ sorted.reverse! if HIGHER_BETTER.include?(metric_name)
219
+
220
+ _best_label, best_stats = sorted.first
221
+ total = sorted.count
222
+
223
+ # TODO: fix ranking. i / total doesn't work as well when there is only 1 entry or some entries are the same
224
+ sorted.each_with_index.map { |(label, stats), i| Comparison.new(metric_name, label, stats, i, total, best_stats) }
225
+ end
226
+ end
227
+ end
228
+
229
+ private
230
+
231
+ def validate_metrics(metric_options)
232
+ if !(invalid = metric_options - ALL_METRICS).empty?
233
+ $stderr.puts "unknown metrics: #{invalid.join(", ")}"
234
+ $stderr.puts "choose: #{(ALL_METRICS).join(", ")}"
235
+ raise IllegalArgument, "unknown metric: #{invalid.join(", ")}"
236
+ end
237
+ end
238
+
239
+ def create_stats(samples)
240
+ Benchmark::IPS::Stats::SD.new(Array(samples))
241
+ end
242
+ end
243
+ end
244
+ end
@@ -0,0 +1,41 @@
1
+ module Benchmark
2
+ module Sweet
3
+ module Memory
4
+ def run_memory
5
+ # I'd prefer to use benchmark/memory - but not sure if it bought us enough
6
+ # require "benchmark/memory"
7
+ # rpt = Benchmark.memory(quiet: options[:quiet]) do |x|
8
+ # items.each { |item| x.report(item.label, &item.block) }
9
+ # x.compare! if compare
10
+ # end
11
+ # rpt.entries.each do |e|
12
+ # add_entry e.label, "memory", e.measurement.memory.allocated
13
+ # add_entry e.label, "memory_retained", e.measurement.memory.retained
14
+ # add_entry e.label, "objects", e.measurement.objects.allocated
15
+ # add_entry e.label, "objects_retained", e.measurement.objects.retained
16
+ # add_entry e.label, "string", e.measurement.string.allocated
17
+ # add_entry e.label, "string_retained", e.measurement.string.retained
18
+ # end
19
+ require 'memory_profiler'
20
+ puts "Memory Profiling----------" unless quiet?
21
+
22
+ items.each do |entry|
23
+ name = entry.label
24
+
25
+ $stdout.printf("%20s ", name.to_s) unless quiet?
26
+ rpts = (options[:memory] || 1).times.map { MemoryProfiler.report(&entry.block) }
27
+ tot_stat = add_entry(name, "memsize", rpts.map(&:total_allocated_memsize))
28
+ totr_stat = add_entry name, "memsize_retained", rpts.map(&:total_retained_memsize)
29
+ obj_stat = add_entry name, "objects", rpts.map(&:total_allocated) ## ? size
30
+ objr_stat = add_entry name, "objects_retained", rpts.map(&:total_retained)
31
+ str_stat = add_entry(name, "strings", rpts.map { |rpt| rpt.strings_allocated.size })
32
+ strr_stat = add_entry(name, "strings_retained", rpts.map { |rpt| rpt.strings_retained.size })
33
+
34
+ $stdout.printf("%10s alloc/ret %10s strings/ret\n",
35
+ "#{tot_stat.central_tendency}/#{totr_stat.central_tendency}",
36
+ "#{str_stat.central_tendency}/#{strr_stat.central_tendency}") unless quiet?
37
+ end
38
+ end
39
+ end
40
+ end
41
+ end
@@ -0,0 +1,60 @@
1
+ module Benchmark
2
+ module Sweet
3
+ module Queries
4
+ def run_queries
5
+ items.each do |entry|
6
+ values = ::Benchmark::Sweet::Queries::QueryCounter.count(&entry.block) # { entry.call_times(1) }
7
+ add_entry entry.label, "rows", values[:instance_count]
8
+ add_entry entry.label, "queries", values[:sql_count]
9
+ add_entry entry.label, "ignored", values[:ignored_count]
10
+ add_entry entry.label, "cached", values[:cache_count]
11
+ unless options[:quiet]
12
+ printf "%20s: %3d queries %5d ar_objects", entry.label, values[:sql_count], values[:instance_count]
13
+ printf " (%d ignored)", values[:ignored_count] if values[:ignored_count] > 0
14
+ puts
15
+ end
16
+ end
17
+ end
18
+
19
+ # Derived from code found in http://stackoverflow.com/questions/5490411/counting-the-number-of-queries-performed
20
+ #
21
+ # This could get much more elaborate
22
+ # results could be separated by payload[:statement_name] (sometimes nil) or payload[:class_name]
23
+ # Could add explains for all queries (and determine index usage)
24
+ class QueryCounter
25
+ def self.count(&block)
26
+ new.count(&block)
27
+ end
28
+
29
+ CACHE_STATEMENT = "CACHE".freeze
30
+ IGNORED_STATEMENTS = %w(CACHE SCHEMA).freeze
31
+ IGNORED_QUERIES = /^(?:ROLLBACK|BEGIN|COMMIT|SAVEPOINT|RELEASE)/.freeze
32
+
33
+ def callback(_name, _start, _finish, _id, payload)
34
+ if payload[:sql]
35
+ if payload[:name] == CACHE_STATEMENT
36
+ @instance[:cache_count] += 1
37
+ elsif IGNORED_STATEMENTS.include?(payload[:name]) || IGNORED_QUERIES.match(payload[:sql])
38
+ @instances[:ignored_count] += 1
39
+ else
40
+ @instances[:sql_count] += 1
41
+ end
42
+ else
43
+ @instances[:instance_count] += payload[:record_count]
44
+ end
45
+ end
46
+
47
+ def callback_proc
48
+ lambda(&method(:callback))
49
+ end
50
+
51
+ # TODO: possibly setup a single subscribe and use a context/thread local to properly count metrics
52
+ def count(&block)
53
+ @instances = {cache_count: 0, ignored_count: 0, sql_count: 0, instance_count: 0}
54
+ ActiveSupport::Notifications.subscribed(callback_proc, /active_record/, &block)
55
+ @instances
56
+ end
57
+ end
58
+ end
59
+ end
60
+ end
@@ -0,0 +1,5 @@
1
+ module Benchmark
2
+ module Sweet
3
+ VERSION = "0.2.0"
4
+ end
5
+ end
metadata ADDED
@@ -0,0 +1,189 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: benchmark-sweet
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.2.0
5
+ platform: ruby
6
+ authors:
7
+ - Keenan Brock
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+ date: 2020-06-22 00:00:00.000000000 Z
12
+ dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: bundler
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - "~>"
18
+ - !ruby/object:Gem::Version
19
+ version: 2.1.4
20
+ type: :development
21
+ prerelease: false
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - "~>"
25
+ - !ruby/object:Gem::Version
26
+ version: 2.1.4
27
+ - !ruby/object:Gem::Dependency
28
+ name: rake
29
+ requirement: !ruby/object:Gem::Requirement
30
+ requirements:
31
+ - - ">="
32
+ - !ruby/object:Gem::Version
33
+ version: 12.3.3
34
+ type: :development
35
+ prerelease: false
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - ">="
39
+ - !ruby/object:Gem::Version
40
+ version: 12.3.3
41
+ - !ruby/object:Gem::Dependency
42
+ name: rspec
43
+ requirement: !ruby/object:Gem::Requirement
44
+ requirements:
45
+ - - "~>"
46
+ - !ruby/object:Gem::Version
47
+ version: '3.0'
48
+ type: :development
49
+ prerelease: false
50
+ version_requirements: !ruby/object:Gem::Requirement
51
+ requirements:
52
+ - - "~>"
53
+ - !ruby/object:Gem::Version
54
+ version: '3.0'
55
+ - !ruby/object:Gem::Dependency
56
+ name: activerecord
57
+ requirement: !ruby/object:Gem::Requirement
58
+ requirements:
59
+ - - ">="
60
+ - !ruby/object:Gem::Version
61
+ version: '0'
62
+ type: :development
63
+ prerelease: false
64
+ version_requirements: !ruby/object:Gem::Requirement
65
+ requirements:
66
+ - - ">="
67
+ - !ruby/object:Gem::Version
68
+ version: '0'
69
+ - !ruby/object:Gem::Dependency
70
+ name: benchmark-ips
71
+ requirement: !ruby/object:Gem::Requirement
72
+ requirements:
73
+ - - "~>"
74
+ - !ruby/object:Gem::Version
75
+ version: 2.8.2
76
+ type: :runtime
77
+ prerelease: false
78
+ version_requirements: !ruby/object:Gem::Requirement
79
+ requirements:
80
+ - - "~>"
81
+ - !ruby/object:Gem::Version
82
+ version: 2.8.2
83
+ - !ruby/object:Gem::Dependency
84
+ name: memory_profiler
85
+ requirement: !ruby/object:Gem::Requirement
86
+ requirements:
87
+ - - "~>"
88
+ - !ruby/object:Gem::Version
89
+ version: 0.9.0
90
+ type: :runtime
91
+ prerelease: false
92
+ version_requirements: !ruby/object:Gem::Requirement
93
+ requirements:
94
+ - - "~>"
95
+ - !ruby/object:Gem::Version
96
+ version: 0.9.0
97
+ - !ruby/object:Gem::Dependency
98
+ name: more_core_extensions
99
+ requirement: !ruby/object:Gem::Requirement
100
+ requirements:
101
+ - - ">="
102
+ - !ruby/object:Gem::Version
103
+ version: '0'
104
+ type: :runtime
105
+ prerelease: false
106
+ version_requirements: !ruby/object:Gem::Requirement
107
+ requirements:
108
+ - - ">="
109
+ - !ruby/object:Gem::Version
110
+ version: '0'
111
+ - !ruby/object:Gem::Dependency
112
+ name: activesupport
113
+ requirement: !ruby/object:Gem::Requirement
114
+ requirements:
115
+ - - ">="
116
+ - !ruby/object:Gem::Version
117
+ version: '0'
118
+ type: :runtime
119
+ prerelease: false
120
+ version_requirements: !ruby/object:Gem::Requirement
121
+ requirements:
122
+ - - ">="
123
+ - !ruby/object:Gem::Version
124
+ version: '0'
125
+ description: |2
126
+ Benchmark Sweet is a suite to run multiple kinds of metrics.
127
+ It can be configured to run memory, sql query, and ips benchmarks on a common set of code.
128
+ This data can be collected across multiple runs of the code, to support multiple ruby or
129
+ gem versions.
130
+ This also generates more complex comparisons
131
+ email:
132
+ - keenan@thebrocks.net
133
+ executables: []
134
+ extensions: []
135
+ extra_rdoc_files: []
136
+ files:
137
+ - ".gitignore"
138
+ - ".rspec"
139
+ - ".travis.yml"
140
+ - CHANGELOG.md
141
+ - Gemfile
142
+ - LICENSE.txt
143
+ - README.md
144
+ - Rakefile
145
+ - benchmark-sweet.gemspec
146
+ - bin/console
147
+ - bin/setup
148
+ - examples/benchmark_big_small.rb
149
+ - examples/benchmark_big_split.rb
150
+ - examples/benchmark_blank.rb
151
+ - examples/benchmark_rpt_database.rb
152
+ - examples/benchmark_simple_database.rb
153
+ - lib/benchmark-sweet.rb
154
+ - lib/benchmark/sweet.rb
155
+ - lib/benchmark/sweet/comparison.rb
156
+ - lib/benchmark/sweet/ips.rb
157
+ - lib/benchmark/sweet/item.rb
158
+ - lib/benchmark/sweet/job.rb
159
+ - lib/benchmark/sweet/memory.rb
160
+ - lib/benchmark/sweet/queries.rb
161
+ - lib/benchmark/sweet/version.rb
162
+ homepage: https://github.com/kbrock/benchmark-sweet
163
+ licenses:
164
+ - MIT
165
+ metadata:
166
+ homepage_uri: https://github.com/kbrock/benchmark-sweet
167
+ source_code_uri: http://github.com/kbrock/benchmark-sweet
168
+ changelog_uri: http://github.com/kbrock/benchmark-sweet/CHANGELOG.md
169
+ post_install_message:
170
+ rdoc_options: []
171
+ require_paths:
172
+ - lib
173
+ required_ruby_version: !ruby/object:Gem::Requirement
174
+ requirements:
175
+ - - ">="
176
+ - !ruby/object:Gem::Version
177
+ version: '0'
178
+ required_rubygems_version: !ruby/object:Gem::Requirement
179
+ requirements:
180
+ - - ">="
181
+ - !ruby/object:Gem::Version
182
+ version: '0'
183
+ requirements: []
184
+ rubyforge_project:
185
+ rubygems_version: 2.7.6.2
186
+ signing_key:
187
+ specification_version: 4
188
+ summary: Suite to run multiple benchmarks
189
+ test_files: []