benchmark-sweet 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,49 @@
1
+ require 'benchmark/sweet'
2
+ require 'active_support/all'
3
+ require "more_core_extensions/all" # [].tabelize
4
+
5
+ # version 2.3.7
6
+ #
7
+ # method | nil_ips | str_ips | nil_memsize | str_memsize
8
+ # -------------+------------------------+---------------+--------------------+-------------
9
+ # ?split:[] | 51825798.8 i/s | 1407946.4 i/s | 40.0 bytes | 360.0 bytes
10
+ # &&split||[] | 46730725.8 i/s - 1.11x | 1413355.3 i/s | 40.0 bytes | 360.0 bytes
11
+ # to_s.split | 5685237.1 i/s - 9.12x | 1396494.3 i/s | 80.0 bytes - 2.00x | 360.0 bytes
12
+ #
13
+ # version 2.4.4
14
+ #
15
+ # method | nil_ips | str_ips | nil_memsize | str_memsize
16
+ # -------------+------------------------+---------------+--------------------+-------------
17
+ # ?split:[] | 51559454.4 i/s | 1438780.8 i/s | 40.0 bytes | 360.0 bytes
18
+ # &.split||[] | 46446196.0 i/s - 1.11x | 1437665.3 i/s | 40.0 bytes | 360.0 bytes
19
+ # &&split||[] | 43356335.6 i/s - 1.19x | 1434466.6 i/s | 40.0 bytes | 360.0 bytes
20
+ # to_s.split | 5835694.7 i/s - 8.84x | 1427819.0 i/s | 80.0 bytes - 2.00x | 360.0 bytes
21
+
22
+ NSTRING = nil
23
+ DELIMITER = '/'.freeze
24
+ STRING = "ab/cd/ef/gh".freeze
25
+
26
+ Benchmark.items(metrics: %w(ips memsize), memory: 3, warmup: 1, time: 3, quiet: false, force: ENV["FORCE"] == "true") do |x|
27
+ x.metadata version: RUBY_VERSION
28
+ x.metadata data: "nil" do
29
+ x.report("to_s.split", "NSTRING.to_s.split(DELIMITER)")
30
+ x.report("?split:[]", "NSTRING ? NSTRING.split(DELIMITER) : []")
31
+ x.report("&&split||[]", "NSTRING && NSTRING.split(DELIMITER) || []")
32
+ x.report("&.split||[]", "NSTRING&.split(DELIMITER) || []") if RUBY_VERSION >= "2.4"
33
+ end
34
+
35
+ x.metadata data: "str" do
36
+ x.report("to_s.split", "STRING.to_s.split(DELIMITER)")
37
+ x.report("?split:[]", "STRING ? STRING.split(DELIMITER) : []")
38
+ x.report("&&split||[]", "STRING && STRING.split(DELIMITER) || []")
39
+ x.report("&.split||[]", "STRING&.split(DELIMITER) || []") if RUBY_VERSION >= "2.4"
40
+ end
41
+
42
+ # partition the data by ruby version and whether data is present
43
+ # that way we're only comparing similar values
44
+ # note: this is not necessarily the correlation to how the data is displayed
45
+ x.compare_by :version, :data
46
+ x.report_with grouping: [:metric, :version], row: :method, column: [:data], value: :comp_short
47
+
48
+ x.save_file (ENV["SAVE_FILE"] == "true") ? $0.sub(/\.rb$/, '.json') : ENV["SAVE_FILE"] if ENV["SAVE_FILE"]
49
+ end
@@ -0,0 +1,56 @@
1
+ require "benchmark/sweet"
2
+ require "active_support/all"
3
+ require "more_core_extensions/all" # [].tabelize
4
+
5
+ # method | NIL | EMPTY | FULL
6
+ # --------------------+------------------------+------------------------+------------------------
7
+ # x&.empty? | 13752904.8 i/s | 13089322.9 i/s | 13352488.3 i/s
8
+ # !x && x.empty? | 13334422.1 i/s | 12019275.5 i/s - 1.14x | 11882260.1 i/s - 1.16x
9
+ # x.blank? | 11889050.9 i/s - 1.16x | 11673162.3 i/s - 1.18x | 12039255.2 i/s - 1.14x
10
+ # x.nil? || x.empty? | 11620573.1 i/s - 1.18x | 10676420.2 i/s - 1.29x | 10048254.7 i/s - 1.37x
11
+ # x.try!(:empty) | 6240643.7 i/s - 2.20x | 3962583.3 i/s - 3.47x | 4071200.6 i/s - 3.38x
12
+ # x.try(:empty) | 6044172.3 i/s - 2.28x | 2385145.0 i/s - 5.76x | 2454406.3 i/s - 5.60x
13
+ # x.empty? | | 13743969.3 i/s | 13754672.4 i/s
14
+
15
+ ANIL=nil
16
+ EMPTY=[].freeze
17
+ FULL=["a"].freeze
18
+
19
+ Benchmark.items(metrics: %w(ips)) do |x|
20
+ x.metadata version: RUBY_VERSION
21
+ x.metadata data: 'NIL' do
22
+ x.report("x.nil? || x.empty?") { ANIL.nil? || ANIL.empty? }
23
+ x.report("!x && x.empty?") { !ANIL || ANIL.empty? }
24
+ x.report("x&.empty?") { ANIL&.empty? }
25
+ x.report("x.try!(:empty)") { ANIL.try!(:empty?) }
26
+ x.report("x.try(:empty)") { ANIL.try(:empty?) }
27
+ x.report("x.blank?") { ANIL.blank? }
28
+ end
29
+
30
+ x.metadata data: 'EMPTY' do
31
+ x.report("x.nil? || x.empty?") { EMPTY.nil? || EMPTY.empty? }
32
+ x.report("!x && x.empty?") { !EMPTY || EMPTY.empty? }
33
+ x.report("x&.empty?") { EMPTY&.empty? }
34
+ x.report("x.try!(:empty)") { EMPTY.try!(:empty?) }
35
+ x.report("x.try(:empty)") { EMPTY.try(:empty?) }
36
+ x.report("x.blank?") { EMPTY.blank? }
37
+ # base case
38
+ x.report("x.empty?") { EMPTY.empty? }
39
+ end
40
+
41
+ x.metadata data: 'FULL' do
42
+ x.report("x.nil? || x.empty?") { FULL.nil? || FULL.empty? }
43
+ x.report("!x && x.empty?") { !FULL || FULL.empty? }
44
+ x.report("x&.empty?") { FULL&.empty? }
45
+ x.report("x.try!(:empty)") { FULL.try!(:empty?) }
46
+ x.report("x.try(:empty)") { FULL.try(:empty?) }
47
+ x.report("x.blank?") { FULL.blank? }
48
+ # base case
49
+ x.report("x.empty?") { FULL.empty? }
50
+ end
51
+
52
+ x.compare_by :data
53
+ x.report_with grouping: nil, row: :method, column: :data
54
+
55
+ x.save_file (ENV["SAVE_FILE"] == "true") ? $0.sub(/\.rb$/, '.json') : ENV["SAVE_FILE"] if ENV["SAVE_FILE"]
56
+ end
@@ -0,0 +1,127 @@
1
+ require "benchmark/sweet"
2
+ require "more_core_extensions/all"
3
+ require "active_record"
4
+
5
+ # For various versions of rails, compare `Model.all.first` vs `Model.all.to_a.first`
6
+ #
7
+ # To work across versions, we need to run this multiple times (with different Gemfile each run)
8
+ #
9
+ # meta data of :version is stored to be able to distinguish the version across each of the runs
10
+ #
11
+ # options:
12
+ # DATABASE_URL
13
+ # link to the database
14
+ #
15
+ # DATABASE_URL=postgres://user@password:localhost/user_benchmark
16
+ #
17
+ # SAVE_FILE
18
+ # a save file contains the values from the various invocations.
19
+ # This uses the method `save_file`, which is optional. But without it, it won't compare across versions.
20
+ #
21
+ # running this script multiple times will only use the first value obtained
22
+ # But running this with different metadata (i.e. AR version) will run again and compare across the versions
23
+ #
24
+ # default : write to a save file named "{this script's name}.json"
25
+ # SAVE_FILE=file.json : write to a save file named "file.json"
26
+ #
27
+ # FORCE
28
+ # This tells the script to overwrite previously run identical metadata
29
+ # It uses the `force: true` option to share with benchmark that this behavior is desired
30
+ #
31
+ # FORCE=true :overwrite the previous values for this script
32
+ # FORCE=false : default behavior. don't run multiple times for the same metadata
33
+ #
34
+
35
+ # version 5.2.1
36
+ #
37
+ # grouping 6.0.2.2 (100 records)
38
+ #
39
+ # method | ips | memsize
40
+ # ------------+--------------------+---------------------
41
+ # first | 2945.9 i/s | 9808 bytes
42
+ # to_a.first | 1204.7 i/s - 2.45x | 68200 bytes - 6.95x
43
+ #
44
+ # NOTE: the results are in color
45
+
46
+ ActiveRecord::Base.establish_connection(ENV.fetch('DATABASE_URL') { "postgres://localhost/user_benchmark" })
47
+ ActiveRecord::Migration.verbose = false
48
+
49
+ ActiveRecord::Schema.define do
50
+ create_table :users, force: true do |t|
51
+ t.string :name
52
+ end
53
+ #add_index :users, :name
54
+
55
+ create_table :accounts, force: true do |t|
56
+ t.string :name
57
+ end
58
+ end
59
+
60
+ class User < ActiveRecord::Base; end
61
+ class Account < ActiveRecord::Base; end
62
+
63
+ if User.count == 0
64
+ puts "Creating 100 users"
65
+ 100.times { |i| User.create name: "user #{i}" }
66
+ end
67
+
68
+ #in the table cells, it typically displays the value and units. this lambda is adding in colors (based upon best/worst)
69
+ VALUE_TO_S = ->(m) { m.comp_short("\e[#{m.color}m#{m.central_tendency.round(1)} #{m.units}\e[0m") }
70
+
71
+ # These are the various items that will be compared
72
+ #
73
+ # metrics is the metric that is actually run
74
+ # memory
75
+ # warmup - run the tests for this many seconds before running the actual benchmark (for ips)
76
+ # time - run the tests for this many seconds (for ips)
77
+ # quied - display the ips run information
78
+ # force - defined above. this is allowing the command line to change this value (default: false)
79
+ Benchmark.items(metrics: %w(ips memsize), memory: 3, warmup: 1, time: 3, quiet: false, force: ENV["FORCE"] == "true") do |x|
80
+ # for all examples, store this metadata with the row
81
+ # metadata is stored in the savefile along with the method name and benchmarks.
82
+ # future runs of the script that have a different version of AR will be stored as separate benchmarks
83
+ # this allows comparison across multiple versions
84
+ #
85
+ # If you are applying a patch for different behavior, or you're running against head, consider using something more comples:
86
+ #
87
+ # this can be an array. with something like version: [ActiveRecord.version.to_s, ENV["PATCH"], ENV["SHA"]].compact.join(".")
88
+ x.metadata version: ActiveRecord.version.to_s
89
+
90
+ # compare_by is a display parameter
91
+ #
92
+ # this is used to know which metadata is different and which are the same
93
+ # the best and worst value is determined across this criteria
94
+ #
95
+ # so if you want to show a different list for each version, add it to the list
96
+ # This typically will include the group and either the column or the row header
97
+ #
98
+ # defaults to unique by method (and always unique by metric)
99
+ x.compare_by { |label, _| [label[:count], label[:version]] }
100
+
101
+ # these next two cases are marked with count=100. Use row, column or grouping to group these
102
+ # These values tend to be in the compare_by block. because running on 100 values is different than 0. (but not always the case)
103
+ x.metadata count: "100" do
104
+ x.report("first") { User.all.first }
105
+ x.report("to_a.first") { User.all.to_a.first }
106
+ end
107
+
108
+ x.metadata count: "0" do
109
+ x.report("first") { Account.all.first }
110
+ x.report("to_a.first") { Account.all.to_a.first }
111
+ end
112
+
113
+ # Note, often the report title is the same as the code, in those cases just pass the name
114
+ # x.report("Account.all.to_a.first")
115
+ # x.report("Account.all.to_a.first")
116
+
117
+ # display only. parameters
118
+ # these can be symbols or lambdas
119
+ # grouping is the value for determining what data goes into each table (default - only 1 table)
120
+ # row is the title on the left hand side (default: all metadata)
121
+ # column header is the metric that was captured (i.e.: ips or metrics) (default: metric name)
122
+ # value is the text that is displayed in the cell of the table (default: "central_tendancy units")
123
+ x.report_with grouping: ->(l){ "#{l[:version]} (#{l[:count]} records)"}, row: :method, column: :metric, value: VALUE_TO_S
124
+
125
+ # defined above. benchmark sweet pretty much depends upon this json file
126
+ x.save_file ENV["SAVE_FILE"] ? ENV["SAVE_FILE"] : $0.sub(/\.rb$/, '.json')
127
+ end
@@ -0,0 +1,34 @@
1
+ require "benchmark/sweet"
2
+ require "more_core_extensions/all"
3
+ require "active_record"
4
+ #
5
+ # label | ips | queries | rows
6
+ #---------------------+-------------------+----------+----------------------
7
+ # User.all.first | 3185.3 i/s | 1.0 objs | 1.0 objs
8
+ # User.all.to_a.first | 977.1 i/s - 3.26x | 1.0 objs | 100.0 objs - 100.00x
9
+
10
+
11
+ ActiveRecord::Base.establish_connection(ENV.fetch('DATABASE_URL') { "postgres://localhost/user_benchmark" })
12
+ ActiveRecord::Migration.verbose = false
13
+
14
+ ActiveRecord::Schema.define do
15
+ create_table :users, force: true do |t|
16
+ t.string :name
17
+ end
18
+ #add_index :users, :name
19
+ end
20
+
21
+ class User < ActiveRecord::Base ;end
22
+
23
+ if User.count == 0
24
+ puts "Creating 100 users"
25
+ 100.times { |i| User.create name: "user #{i}" }
26
+ end
27
+
28
+ Benchmark.items(metrics: %w(ips queries rows)) do |x|
29
+ x.report("User.all.first")
30
+ x.report("User.all.to_a.first")
31
+
32
+ x.report_with row: :method, column: :metric
33
+ x.save_file (ENV["SAVE_FILE"] == "true") ? $0.sub(/\.rb$/, '.json') : ENV["SAVE_FILE"] if ENV["SAVE_FILE"]
34
+ end
@@ -0,0 +1 @@
1
+ require "benchmark/sweet"
@@ -0,0 +1,94 @@
1
+ require "benchmark/sweet/version"
2
+ require "benchmark/sweet/ips"
3
+ require "benchmark/sweet/memory"
4
+ require "benchmark/sweet/queries"
5
+ require "benchmark/sweet/job"
6
+ require "benchmark/sweet/comparison"
7
+ require "benchmark/sweet/item"
8
+ require "benchmark/ips"
9
+
10
+ module Benchmark
11
+ module Sweet
12
+ def items(options = {memory: true, ips: true})
13
+ job = ::Benchmark::Sweet::Job.new(options)
14
+
15
+ yield job
16
+
17
+ job.load_entries
18
+ job.run
19
+ job.save_entries
20
+
21
+ job.run_report
22
+
23
+ job # both items and entries are useful
24
+ end
25
+
26
+ # report helper methods
27
+ # these are the building blocks for the reports printed.
28
+ # These can be used to create different tables
29
+
30
+ # @param base [Array<Comparison>}] array of comparisons
31
+ # @param grouping [Symbol|Array<Symbol>|Proc] Proc passed to group_by to partition records.
32
+ # Accepts Comparison, returns an object to partition. returns nil to filter from the list
33
+ # @keyword sort [Boolean] true to sort by the grouping value (default false)
34
+ # Proc accepts the label to generate custom summary text
35
+ def self.group(base, grouping, sort: false, &block)
36
+ if grouping.nil?
37
+ yield nil, base
38
+ return
39
+ end
40
+
41
+ grouping = symbol_to_proc(grouping)
42
+
43
+ label_records = base.group_by(&grouping).select { |value, comparisons| !value.nil? }
44
+ label_records = label_records.sort_by(&:first) if sort
45
+
46
+ label_records.each(&block)
47
+ end
48
+
49
+ def self.table(base, grouping: nil, sort: false, row: :label, column: :metric, value: :comp_short)
50
+ header_name = grouping.respond_to?(:call) ? "grouping" : grouping
51
+ column = symbol_to_proc(column)
52
+ value = symbol_to_proc(value)
53
+
54
+ group(base, grouping, sort: true) do |header_value, table_comparisons|
55
+ row_key = row.kind_of?(Symbol) || row.kind_of?(String) ? row : "label"
56
+ table_rows = group(table_comparisons, row, sort: sort).map do |row_header, row_comparisons|
57
+ row_comparisons.each_with_object({row_key => row_header}) do |comparison, row_data|
58
+ row_data[column.call(comparison)] = value.call(comparison)
59
+ end
60
+ end
61
+ if block_given?
62
+ yield header_value, table_rows
63
+ else
64
+ print_table(header_name, header_value, table_rows)
65
+ end
66
+ end
67
+ end
68
+
69
+ # proc produced: -> (comparison) { comparison.method }
70
+ def self.symbol_to_proc(field, join: "_")
71
+ if field.kind_of?(Symbol) || field.kind_of?(String)
72
+ field_name = field
73
+ -> v { v[field_name] }
74
+ elsif field.kind_of?(Array)
75
+ field_names = field
76
+ if join
77
+ -> v { field_names.map { |gn| v[gn].to_s }.join(join) }
78
+ else
79
+ -> v { field_names.map { |gn| v[gn] } }
80
+ end
81
+ else
82
+ field
83
+ end
84
+ end
85
+
86
+ def self.print_table(header_name, header_value, table_rows)
87
+ require "more_core_extensions" # defines tableize
88
+ puts "", "#{header_name} #{header_value}", "" if header_value
89
+ # passing colummns to make sure table keeps the same column order
90
+ puts table_rows.tableize(:columns => table_rows.first.keys)
91
+ end
92
+ end
93
+ extend Benchmark::Sweet
94
+ end
@@ -0,0 +1,101 @@
1
+ module Benchmark
2
+ module Sweet
3
+ class Comparison
4
+ UNITS = {"ips" => "i/s", "memsize" => "bytes", "memsize_retained" => "bytes"}.freeze
5
+ attr_reader :label, :metric, :stats, :baseline, :worst
6
+ attr_reader :offset, :total
7
+ def initialize(metric, label, stats, offset, total, baseline, worst = nil)
8
+ @metric = metric
9
+ @label = label
10
+ @stats = stats
11
+ @offset = offset
12
+ @total = total
13
+ @baseline = baseline
14
+ @worst = worst
15
+ end
16
+
17
+ def [](field)
18
+ case field
19
+ when :metric then metric
20
+ when :comp_short then comp_short
21
+ when :comp_string then comp_string
22
+ when :label then label # not sure if this one makes sense
23
+ else label[field]
24
+ end
25
+ end
26
+
27
+ def central_tendency ; stats.central_tendency ; end
28
+ def error ; stats.error ; end
29
+ def units ; UNITS[metric] || "objs" ; end
30
+
31
+ def mode
32
+ @mode ||= best? ? :best : overlaps? ? :same : diff_error ? :slowerish : :slower
33
+ end
34
+
35
+ def best? ; !baseline || (baseline == stats) ; end
36
+
37
+ # @return true if it is basically the same as the best
38
+ def overlaps?
39
+ return @overlaps if defined?(@overlaps)
40
+ @overlaps = slowdown == 1 ||
41
+ stats && baseline && (stats.central_tendency == baseline.central_tendency || stats.overlaps?(baseline))
42
+ end
43
+
44
+ def worst?
45
+ @worst ? stats.overlaps?(@worst) : (total.to_i - 1 == offset.to_i) && slowdown.to_i > 1
46
+ end
47
+
48
+ def slowdown
49
+ return @slowdown if @slowdown
50
+ @slowdown, @diff_error = stats.slowdown(baseline)
51
+ @slowdown
52
+ end
53
+
54
+ def diff_error
55
+ @diff_error ||= (slowdown ; @diff_error)
56
+ end
57
+
58
+ # quick display
59
+
60
+ def comp_string(l_to_s = nil)
61
+ l_to_s ||= -> l { l.to_s }
62
+ case mode
63
+ when :best
64
+ "%20s: %10.1f %s" % [l_to_s.call(label), central_tendency, units]
65
+ when :same
66
+ "%20s: %10.1f %s - same-ish: difference falls within error" % [l_to_s.call(label), central_tendency, units]
67
+ when :slower
68
+ "%20s: %10.1f %s - %.2fx (± %.2f) slower" % [l_to_s.call(label), central_tendency, units, slowdown, error]
69
+ when :slowerish
70
+ "%20s: %10.1f %s - %.2fx slower" % [l_to_s.call(label), central_tendency, units, slowdown]
71
+ end
72
+ end
73
+
74
+ # I tend to call with:
75
+ # c.comp_short("\033[#{c.color}m#{c.central_tendency.round(1)} #{c.units}\e[0m") # "\033[31m#{value}\e[0m"
76
+ def comp_short(value = nil)
77
+ value ||= "#{central_tendency.round(1)} #{units}"
78
+ case mode
79
+ when :best, :same
80
+ value
81
+ when :slower
82
+ "%s - %.2fx (± %.2f)" % [value, slowdown, error]
83
+ when :slowerish
84
+ "%s - %.2fx" % [value, slowdown]
85
+ end
86
+ end
87
+
88
+ def color
89
+ if !baseline
90
+ ";0"
91
+ elsif best? || overlaps?
92
+ "32"
93
+ elsif worst?
94
+ "31"
95
+ else
96
+ ";0"
97
+ end
98
+ end
99
+ end
100
+ end
101
+ end