abprof 0.2.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.gitignore +10 -0
- data/.travis.yml +5 -0
- data/CODE_OF_CONDUCT.md +49 -0
- data/Gemfile +4 -0
- data/LICENSE.txt +21 -0
- data/README.md +291 -0
- data/Rakefile +10 -0
- data/abprof.gemspec +36 -0
- data/bin/console +14 -0
- data/bin/setup +8 -0
- data/examples/alt_ruby.rb +11 -0
- data/examples/command_dsl.rb +13 -0
- data/examples/for_loop_10k.rb +11 -0
- data/examples/inline_ruby_1800.rb +11 -0
- data/examples/inline_ruby_2500.rb +11 -0
- data/examples/inlined_ruby.rb +11 -0
- data/examples/profiling_ruby.rb +11 -0
- data/examples/simple_dsl.rb +20 -0
- data/examples/sleep.rb +11 -0
- data/examples/sleep_longer.rb +9 -0
- data/examples/vanilla_ruby.rb +11 -0
- data/exe/abcompare +1 -0
- data/exe/abprof +120 -0
- data/lib/abprof.rb +280 -0
- data/lib/abprof/benchmark_dsl.rb +162 -0
- data/lib/abprof/version.rb +3 -0
- metadata +175 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: d03d3d085332cd91d2e398789ec570640e2e3d3e
|
4
|
+
data.tar.gz: 9326d7c1967628febb8d45838227665d2be11d3f
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 9839e0901b40964238330cf89021ebc9e84305672705c0c66472a4f0adb26aed709031e34b3b9e14880a966f2818443605aa6bd989e5be78dbd201a4c7611807
|
7
|
+
data.tar.gz: 69ad1b67b2786b7d24359c9219760bd322d9b148c073d4b3673620c268168b6259fffd10b4c320afc83183c71e6200ef9de236b2b984417debed4299637782e1
|
data/.gitignore
ADDED
data/.travis.yml
ADDED
data/CODE_OF_CONDUCT.md
ADDED
@@ -0,0 +1,49 @@
|
|
1
|
+
# Contributor Code of Conduct
|
2
|
+
|
3
|
+
As contributors and maintainers of this project, and in the interest of
|
4
|
+
fostering an open and welcoming community, we pledge to respect all people who
|
5
|
+
contribute through reporting issues, posting feature requests, updating
|
6
|
+
documentation, submitting pull requests or patches, and other activities.
|
7
|
+
|
8
|
+
We are committed to making participation in this project a harassment-free
|
9
|
+
experience for everyone, regardless of level of experience, gender, gender
|
10
|
+
identity and expression, sexual orientation, disability, personal appearance,
|
11
|
+
body size, race, ethnicity, age, religion, or nationality.
|
12
|
+
|
13
|
+
Examples of unacceptable behavior by participants include:
|
14
|
+
|
15
|
+
* The use of sexualized language or imagery
|
16
|
+
* Personal attacks
|
17
|
+
* Trolling or insulting/derogatory comments
|
18
|
+
* Public or private harassment
|
19
|
+
* Publishing other's private information, such as physical or electronic
|
20
|
+
addresses, without explicit permission
|
21
|
+
* Other unethical or unprofessional conduct
|
22
|
+
|
23
|
+
Project maintainers have the right and responsibility to remove, edit, or
|
24
|
+
reject comments, commits, code, wiki edits, issues, and other contributions
|
25
|
+
that are not aligned to this Code of Conduct, or to ban temporarily or
|
26
|
+
permanently any contributor for other behaviors that they deem inappropriate,
|
27
|
+
threatening, offensive, or harmful.
|
28
|
+
|
29
|
+
By adopting this Code of Conduct, project maintainers commit themselves to
|
30
|
+
fairly and consistently applying these principles to every aspect of managing
|
31
|
+
this project. Project maintainers who do not follow or enforce the Code of
|
32
|
+
Conduct may be permanently removed from the project team.
|
33
|
+
|
34
|
+
This code of conduct applies both within project spaces and in public spaces
|
35
|
+
when an individual is representing the project or its community.
|
36
|
+
|
37
|
+
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
38
|
+
reported by contacting a project maintainer at the.codefolio.guy@gmail.com. All
|
39
|
+
complaints will be reviewed and investigated and will result in a response that
|
40
|
+
is deemed necessary and appropriate to the circumstances. Maintainers are
|
41
|
+
obligated to maintain confidentiality with regard to the reporter of an
|
42
|
+
incident.
|
43
|
+
|
44
|
+
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
45
|
+
version 1.3.0, available at
|
46
|
+
[http://contributor-covenant.org/version/1/3/0/][version]
|
47
|
+
|
48
|
+
[homepage]: http://contributor-covenant.org
|
49
|
+
[version]: http://contributor-covenant.org/version/1/3/0/
|
data/Gemfile
ADDED
data/LICENSE.txt
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
The MIT License (MIT)
|
2
|
+
|
3
|
+
Copyright (c) 2016 AppFolio, Inc.
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in
|
13
|
+
all copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
21
|
+
THE SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,291 @@
|
|
1
|
+
# ABProf
|
2
|
+
|
3
|
+
ABProf attempts to use simple A/B test statistical logic and apply it
|
4
|
+
to the question, "which of these two programs is faster?"
|
5
|
+
|
6
|
+
Most commonly, you profile by running a program a certain number of
|
7
|
+
times ("okay, burn it into cache for 100 iterations, then run it 5000
|
8
|
+
times and divide the total time by 5000"). Then, you make changes to
|
9
|
+
your program and do the same thing again to compare.
|
10
|
+
|
11
|
+
Real statisticians inform us that there are a few problems with that
|
12
|
+
approach :-)
|
13
|
+
|
14
|
+
We use a [Welch's T Test](https://en.wikipedia.org/wiki/Welch%27s_t-test) on a
|
15
|
+
set of measured runtimes to determine how likely the two programs are
|
16
|
+
to be different from each other, and after the P value is low enough,
|
17
|
+
we give our current estimate of which is faster and by how much.
|
18
|
+
|
19
|
+
## Installation
|
20
|
+
|
21
|
+
Add this line to your application's Gemfile:
|
22
|
+
|
23
|
+
```ruby
|
24
|
+
gem 'abprof'
|
25
|
+
```
|
26
|
+
|
27
|
+
And then execute:
|
28
|
+
|
29
|
+
$ bundle
|
30
|
+
|
31
|
+
Or install it yourself as:
|
32
|
+
|
33
|
+
$ gem install abprof
|
34
|
+
|
35
|
+
## Usage
|
36
|
+
|
37
|
+
### Quick Start - Run Two Programs
|
38
|
+
|
39
|
+
The simplest way to use ABProf is the "abcompare" command. Give it two
|
40
|
+
commands, let it run them for you and measure the results. If your
|
41
|
+
command contains spaces, put it in quotes - standard shell
|
42
|
+
programming.
|
43
|
+
|
44
|
+
$ abcompare "cd ../vanilla_ruby && ./tool/runruby.rb ../optcarrot/bin/optcarrot --benchmark ../optcarrot/examples/Lan_Master.nes >> /dev/null" \
|
45
|
+
"cd ../alt_ruby && ./tool/runruby.rb ../optcarrot/bin/optcarrot --benchmark ../optcarrot/examples/Lan_Master.nes >> /dev/null"
|
46
|
+
|
47
|
+
This defaults to basic settings (10 iterations of burn-in before
|
48
|
+
measuring, P value of 0.05, etc.) You can change them on the command
|
49
|
+
line. Running this way is simple, straightforward, and will take
|
50
|
+
a little longer to converge since it's paying the start-a-process tax every
|
51
|
+
time it takes a measurement.
|
52
|
+
|
53
|
+
Run "abcompare --help" if you want to see what command-line options
|
54
|
+
you can supply. For more control in the results, see below.
|
55
|
+
|
56
|
+
The abcompare command is identical to abprof except that it uses a raw
|
57
|
+
command, not harness code. See below for details.
|
58
|
+
|
59
|
+
### Quick Start - Test Harness
|
60
|
+
|
61
|
+
Loading and running a program is slow, and it adds a lot of variable
|
62
|
+
overhead. That can make it hard to sample the specific operations that
|
63
|
+
you want to measure. ABProf prefers to just do the operations you want
|
64
|
+
without restarting the worker processes constantly. That takes a bit
|
65
|
+
of harness code to do well.
|
66
|
+
|
67
|
+
In Ruby, there's an ABProf library you can use which will take care of
|
68
|
+
that interface. That's the easiest way to use it, especially since
|
69
|
+
you're running a benchmark anyway and would need some structure around
|
70
|
+
your code.
|
71
|
+
|
72
|
+
For a Ruby snippet to be profiled very simply, do this:
|
73
|
+
|
74
|
+
require "abprof"
|
75
|
+
|
76
|
+
ABProf::ABWorker.iteration do
|
77
|
+
# Code to measure goes here
|
78
|
+
sleep 0.1
|
79
|
+
end
|
80
|
+
|
81
|
+
ABProf::ABWorker.start
|
82
|
+
|
83
|
+
With two such files, you can compare their speed.
|
84
|
+
|
85
|
+
Under the hood, ABProf's harness uses a simple communication protocol
|
86
|
+
over STDIN and STDOUT to allow the controlling process to tell the
|
87
|
+
workers to run iterations. Mostly that's great, but it means you'll
|
88
|
+
need to make sure your worker processes aren't using STDIN for
|
89
|
+
anything else.
|
90
|
+
|
91
|
+
See the examples directory for more. For instance:
|
92
|
+
|
93
|
+
abprof examples/sleep.rb examples/sleep.rb
|
94
|
+
|
95
|
+
If abprof is just in the source directory and not installed as a gem,
|
96
|
+
you should add RUBYLIB="lib" before "abprof" above to get it to run.
|
97
|
+
|
98
|
+
### Quick Start - Benchmark DSL
|
99
|
+
|
100
|
+
Want to make a benchmark reproducible? Want better accuracy? ABProf
|
101
|
+
has a DSL (Domain-Specific Language) that can help here.
|
102
|
+
|
103
|
+
Here's a simple example:
|
104
|
+
|
105
|
+
require "abprof/benchmark_dsl"
|
106
|
+
|
107
|
+
ABProf.compare do
|
108
|
+
warmup 10
|
109
|
+
max_trials 5
|
110
|
+
min_trials 3
|
111
|
+
p_value 0.01
|
112
|
+
iters_per_trial 2
|
113
|
+
bare true
|
114
|
+
|
115
|
+
report do
|
116
|
+
10_000.times {}
|
117
|
+
end
|
118
|
+
|
119
|
+
report do
|
120
|
+
sleep 0.1
|
121
|
+
end
|
122
|
+
|
123
|
+
end
|
124
|
+
|
125
|
+
Note that "warmup" is a synonym for "burnin" here -- iterations done
|
126
|
+
before ABProf starts measuring and comparing. The "report" blocks are
|
127
|
+
run for the sample. You can also have a "report_command", which takes
|
128
|
+
a string as an argument and uses that to take a measurement.
|
129
|
+
|
130
|
+
### A Digression - Bare and Harness
|
131
|
+
|
132
|
+
"Harness" refers to ABProf's internal testing protocol, used to allow
|
133
|
+
multiple processes to communicate. A "harness process" or "harness
|
134
|
+
worker" means a second process that is used to take measurements, and
|
135
|
+
can do so repeatedly without having to restart the process.
|
136
|
+
|
137
|
+
A "bare process" means one where the work is run directly. Either a
|
138
|
+
new process is spawned for each measurement (slow, inaccurate) or a
|
139
|
+
block is run in the same Ruby process (potential for inadvertent
|
140
|
+
cross-talk.)
|
141
|
+
|
142
|
+
In general, for a "harness" process you'll need to put together a .rb
|
143
|
+
file similar to examples/sleep.rb or examples/for\_loop_10k.rb.
|
144
|
+
|
145
|
+
You can use the DSL above for either bare or harness processes ("bare
|
146
|
+
true" or "bare false") without a problem. But if you tell it to use a
|
147
|
+
harness, the process in question should be reading ABProf commands
|
148
|
+
from STDIN and writing responses to STDOUT in ABProf protocol,
|
149
|
+
normally by using the Ruby Test Harness library.
|
150
|
+
|
151
|
+
### Don't Cross the Streams
|
152
|
+
|
153
|
+
Harness-enabled tests expect to run forever, fielding requests for
|
154
|
+
work.
|
155
|
+
|
156
|
+
Non-harness-enabled tests don't know how to do harness stuff.
|
157
|
+
|
158
|
+
If you run the wrong way (abcompare with a harness, abprof with no
|
159
|
+
harness,) you'll get either an immediate crash or running forever
|
160
|
+
without ever finishing burn-in, depending which way you did it.
|
161
|
+
|
162
|
+
Normally you'll handle this by just passing your command line directly
|
163
|
+
to abcompare rather than packaging it up into a separate Ruby script.
|
164
|
+
|
165
|
+
### Comparing Rubies
|
166
|
+
|
167
|
+
I'm AppFolio's Ruby fellow, so I'm writing this to compare two
|
168
|
+
different locally-built Ruby implementations for speed. The easiest
|
169
|
+
way to do that is to build them in multiple directories, then build a
|
170
|
+
wrapper that uses that directory to run the program in question.
|
171
|
+
|
172
|
+
You can see examples such as examples/alt\_ruby.rb and
|
173
|
+
examples/vanilla\_ruby.rb and so on in the examples directory of this
|
174
|
+
gem.
|
175
|
+
|
176
|
+
Those examples use a benchmark called "optcarrot" which can be quite
|
177
|
+
slow. So you'll need to decide whether to do a quick, rough check with
|
178
|
+
a few iterations or a more in-depth check which runs many times for
|
179
|
+
high certainty.
|
180
|
+
|
181
|
+
Here's a slow, very conservative check:
|
182
|
+
|
183
|
+
abprof --burnin=10 --max-trials=50 --min-trials=50 --iters-per-trial=5 examples/vanilla_ruby.rb examples/inline_ruby_1800.rb
|
184
|
+
|
185
|
+
Note that since the minimum and maximum trials are both 50, it won't
|
186
|
+
stop at a particular certainty (P value.) It will just run for 50
|
187
|
+
trials of 5 iterations each. It takes awhile, but gives a pretty good
|
188
|
+
estimate of how fast one is compared to the other.
|
189
|
+
|
190
|
+
Here's a quicker, rougher check:
|
191
|
+
|
192
|
+
abprof --burnin=5 --max-trials=10 --iters-per-trial=1 examples/vanilla_ruby.rb examples/inline_ruby_1800.rb
|
193
|
+
|
194
|
+
It may stop after only a few trials if the difference in speed is big
|
195
|
+
enough. By default, it uses a P value of 0.05, which is (very roughly)
|
196
|
+
a one in twenty chance of a false result.
|
197
|
+
|
198
|
+
If you want a very low chance of a false positive, consider adjusting
|
199
|
+
the P value downward, to more like 0.001 (0.1% chance) or 0.00001
|
200
|
+
(0.001% chance.) This may require a lot of time to run, especially if
|
201
|
+
the two programs are of very similar speed, or have a lot of
|
202
|
+
variability in the test results.
|
203
|
+
|
204
|
+
abprof --burnin=5 --max-trials=50 --pvalue 0.001 --iters-per-trial=1 examples/sleep.rb examples/for_loop_10k.rb
|
205
|
+
|
206
|
+
### How Many Times Faster?
|
207
|
+
|
208
|
+
ABProf will try to give you an estimate of how much faster one option
|
209
|
+
is than the other. Be careful taking it at face value -- if you do a
|
210
|
+
series of trials and coincidentally get a really different-looking
|
211
|
+
run, that may give you an unexpected P value *and* an unexpected
|
212
|
+
number of times faster.
|
213
|
+
|
214
|
+
In other words, those false positives will tend to happen *together*,
|
215
|
+
not independently. If you want to actually check how much faster one
|
216
|
+
is than the other in a less-biased way, set the number of trials
|
217
|
+
and/or iterations very high, or manually run both yourself some large
|
218
|
+
number of times, rather than letting it converge to a P value and then
|
219
|
+
taking the result from the output.
|
220
|
+
|
221
|
+
See the first example under "Comparing Rubies" for one way to do
|
222
|
+
this. Setting the min and max trials equal is good practice for this
|
223
|
+
to reduce bias.
|
224
|
+
|
225
|
+
### Does This Just Take Forever?
|
226
|
+
|
227
|
+
It's easy to accidentally specify a very large number of iterations
|
228
|
+
per trial, or total trials, or otherwise make testing a slow program
|
229
|
+
take *forever*. Right now, you'll pretty much just need to notice that
|
230
|
+
it's happening and drop the iters-per-trial, the min-trials, or the P
|
231
|
+
value. When in doubt, try to start with just a very quick, rough test.
|
232
|
+
|
233
|
+
Of course, if your test is *really* slow, or you're trying to detect a
|
234
|
+
very small difference, it can just take a really long time. Like A/B
|
235
|
+
testing, this method has its pitfalls.
|
236
|
+
|
237
|
+
### More Control
|
238
|
+
|
239
|
+
Would you like to explicitly return the value(s) to compare? You can
|
240
|
+
replace the "iteration" block above with "iteration\_with\_return\_value"
|
241
|
+
or "n\_iterations\_with\_return\_value". In the former case, return a
|
242
|
+
single number at then end of the block, which is the measured value
|
243
|
+
specifically for that time through the loop. In the latter case, your
|
244
|
+
block will take a single parameter N for the number of iterations -
|
245
|
+
run the code that many times and return either a single measured speed
|
246
|
+
or time, or an array of speeds or times, which will be your samples.
|
247
|
+
|
248
|
+
This can be useful when running N iterations doesn't necessarily
|
249
|
+
generate exactly N results, or when the time the whole chunk of code
|
250
|
+
takes to run isn't the most representative number for performance. The
|
251
|
+
statistical test will help filter out random test-setup noise
|
252
|
+
somewhat, but sometimes it's best to not count the noise in your
|
253
|
+
measurement at all, for many good reasons.
|
254
|
+
|
255
|
+
## Development
|
256
|
+
|
257
|
+
After checking out the repo, run `bin/setup` to install
|
258
|
+
dependencies. Then, run `rake test` to run the tests. You can also run
|
259
|
+
`bin/console` for an interactive prompt that will allow you to
|
260
|
+
experiment.
|
261
|
+
|
262
|
+
To install this gem onto your local machine, run `bundle exec rake
|
263
|
+
install`. To release a new version, update the version number in
|
264
|
+
`version.rb`, and then run `bundle exec rake release`, which will
|
265
|
+
create a git tag for the version, push git commits and tags, and push
|
266
|
+
the `.gem` file to [rubygems.org](https://rubygems.org).
|
267
|
+
|
268
|
+
## Credit Where Credit Is Due
|
269
|
+
|
270
|
+
I feel like I maybe saw this idea (use A/B test math for a profiler)
|
271
|
+
somewhere else before, but I can't tell if I really did or if I
|
272
|
+
misunderstood or hallucinated it. Either way, why isn't this a
|
273
|
+
standard approach that's built into most profiling tools?
|
274
|
+
|
275
|
+
After I started implementation I found out that optcarrot, used by the
|
276
|
+
Ruby core team for profiling, is already using this technique (!) -- I
|
277
|
+
am using it slightly differently, but I'm clearly not the first to
|
278
|
+
think of using a statistics test to verify which of two programs is faster.
|
279
|
+
|
280
|
+
## Contributing
|
281
|
+
|
282
|
+
Bug reports and pull requests are welcome on GitHub at
|
283
|
+
https://github.com/appfolio/abprof. This project is intended to be a
|
284
|
+
safe, welcoming space for collaboration, and contributors are expected
|
285
|
+
to adhere to the
|
286
|
+
[Contributor Covenant](http://contributor-covenant.org) code of
|
287
|
+
conduct.
|
288
|
+
|
289
|
+
## License
|
290
|
+
|
291
|
+
The gem is available as open source under the terms of the [MIT License](http://opensource.org/licenses/MIT).
|
data/Rakefile
ADDED
data/abprof.gemspec
ADDED
@@ -0,0 +1,36 @@
|
|
1
|
+
# coding: utf-8
|
2
|
+
lib = File.expand_path('../lib', __FILE__)
|
3
|
+
$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
|
4
|
+
require 'abprof/version'
|
5
|
+
|
6
|
+
Gem::Specification.new do |spec|
|
7
|
+
spec.name = "abprof"
|
8
|
+
spec.version = Abprof::VERSION
|
9
|
+
spec.authors = ["Noah Gibbs"]
|
10
|
+
spec.email = ["noah.gibbs@appfolio.com"]
|
11
|
+
|
12
|
+
spec.summary = %q{Determine which of two programs is faster, statistically.}
|
13
|
+
spec.description = %q{Determine which of two program variants is faster, using A/B-Testing-style statistical techniques.}
|
14
|
+
spec.homepage = "https://github.com/appfolio/abprof"
|
15
|
+
spec.license = "MIT"
|
16
|
+
|
17
|
+
# Prevent pushing this gem to RubyGems.org. To allow pushes either set the 'allowed_push_host'
|
18
|
+
# to allow pushing to a single host or delete this section to allow pushing to any host.
|
19
|
+
#if spec.respond_to?(:metadata)
|
20
|
+
# spec.metadata['allowed_push_host'] = "TODO: Set to 'http://mygemserver.com'"
|
21
|
+
#else
|
22
|
+
# raise "RubyGems 2.0 or newer is required to protect against public gem pushes."
|
23
|
+
#end
|
24
|
+
|
25
|
+
spec.files = `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features)/}) }
|
26
|
+
spec.bindir = "exe"
|
27
|
+
spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
|
28
|
+
spec.require_paths = ["lib"]
|
29
|
+
|
30
|
+
spec.add_development_dependency "bundler", "~> 1.12"
|
31
|
+
spec.add_development_dependency "rake", "~> 10.0"
|
32
|
+
spec.add_development_dependency "minitest", "~> 5.0"
|
33
|
+
spec.add_runtime_dependency "trollop", "~>2.1", ">=2.1.0"
|
34
|
+
spec.add_runtime_dependency "statsample", "~>2.0", ">=2.0.0"
|
35
|
+
spec.add_runtime_dependency "multi_json", "~>1.12", ">=1.12.0"
|
36
|
+
end
|