rate_throttle_client 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.circleci/config.yml +81 -0
- data/.github/workflows/check_changelog.yml +10 -0
- data/.gitignore +12 -0
- data/.travis.yml +6 -0
- data/CHANGELOG.md +5 -0
- data/CODE_OF_CONDUCT.md +74 -0
- data/Gemfile +7 -0
- data/LICENSE.txt +21 -0
- data/README.md +156 -0
- data/Rakefile +58 -0
- data/bin/console +14 -0
- data/bin/setup +8 -0
- data/lib/rate_throttle_client.rb +22 -0
- data/lib/rate_throttle_client/.DS_Store +0 -0
- data/lib/rate_throttle_client/clients/base.rb +18 -0
- data/lib/rate_throttle_client/clients/exponential_backoff.rb +26 -0
- data/lib/rate_throttle_client/clients/exponential_increase_gradual_decrease.rb +34 -0
- data/lib/rate_throttle_client/clients/exponential_increase_proportional_decrease.rb +36 -0
- data/lib/rate_throttle_client/clients/exponential_increase_proportional_remaining_decrease.rb +43 -0
- data/lib/rate_throttle_client/clients/null.rb +11 -0
- data/lib/rate_throttle_client/demo.rb +292 -0
- data/lib/rate_throttle_client/info.rb +12 -0
- data/lib/rate_throttle_client/servers/.DS_Store +0 -0
- data/lib/rate_throttle_client/servers/decrease_only/config.ru +49 -0
- data/lib/rate_throttle_client/servers/gcra/config.ru +11 -0
- data/lib/rate_throttle_client/servers/gcra/gcra_fake_server.rb +52 -0
- data/lib/rate_throttle_client/version.rb +3 -0
- data/rate_throttle_client.gemspec +34 -0
- metadata +158 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: a98e4aae0d6770a20d784d63888ac487ad537bc27c2cb00127498a4afd41fb20
|
4
|
+
data.tar.gz: 9c6b04db20202345cbe2c9995c09fbd3b1f79521e5c33d5337851f0c93455d5e
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 5a32f254cb2c857c4e50cf83fdbece7e9c60c8031f9ffa1d5957e7778899e68e2ccc4565cf6a6cc96ef685a8af05681527de61ea7b0c80ac59077e904850b88a
|
7
|
+
data.tar.gz: 8a8e0cd1b0809ff00c51c2bd986e4f270a054c6ae1f99ef67ef2634b0e2cb8c659bb02727f9179c9767ad4bc22d3bd28f84333f559d140404b44ab93d0db3012
|
@@ -0,0 +1,81 @@
|
|
1
|
+
version: 2
|
2
|
+
references:
|
3
|
+
unit: &unit
|
4
|
+
run:
|
5
|
+
name: Run test suite
|
6
|
+
command: bundle exec rake
|
7
|
+
restore: &restore
|
8
|
+
restore_cache:
|
9
|
+
keys:
|
10
|
+
- v1-dependencies-{{ checksum "Gemfile.lock" }}
|
11
|
+
# fallback to using the latest cache if no exact match is found
|
12
|
+
- v1-dependencies-
|
13
|
+
bundle: &bundle
|
14
|
+
run:
|
15
|
+
name: install dependencies
|
16
|
+
command: |
|
17
|
+
bundle install --jobs=4 --retry=3 --path vendor/bundle
|
18
|
+
save: &save
|
19
|
+
save_cache:
|
20
|
+
paths:
|
21
|
+
- ./vendor/bundle
|
22
|
+
key: v1-dependencies-{{ checksum "Gemfile.lock" }}
|
23
|
+
jobs:
|
24
|
+
"ruby-2.3":
|
25
|
+
docker:
|
26
|
+
- image: circleci/ruby:2.5
|
27
|
+
steps:
|
28
|
+
- checkout
|
29
|
+
- <<: *bundle
|
30
|
+
- <<: *save
|
31
|
+
- <<: *unit
|
32
|
+
"ruby-2.4":
|
33
|
+
docker:
|
34
|
+
- image: circleci/ruby:2.5
|
35
|
+
steps:
|
36
|
+
- checkout
|
37
|
+
- <<: *bundle
|
38
|
+
- <<: *save
|
39
|
+
- <<: *unit
|
40
|
+
"ruby-2.5":
|
41
|
+
docker:
|
42
|
+
- image: circleci/ruby:2.5
|
43
|
+
steps:
|
44
|
+
- checkout
|
45
|
+
- <<: *bundle
|
46
|
+
- <<: *save
|
47
|
+
- <<: *unit
|
48
|
+
"ruby-2.6":
|
49
|
+
docker:
|
50
|
+
- image: circleci/ruby:2.6
|
51
|
+
steps:
|
52
|
+
- checkout
|
53
|
+
- <<: *bundle
|
54
|
+
- <<: *save
|
55
|
+
- <<: *unit
|
56
|
+
"ruby-2.7":
|
57
|
+
docker:
|
58
|
+
- image: circleci/ruby:2.7
|
59
|
+
steps:
|
60
|
+
- checkout
|
61
|
+
- <<: *bundle
|
62
|
+
- <<: *save
|
63
|
+
- <<: *unit
|
64
|
+
"jruby":
|
65
|
+
docker:
|
66
|
+
- image: circleci/jruby:latest
|
67
|
+
steps:
|
68
|
+
- checkout
|
69
|
+
- <<: *bundle
|
70
|
+
- <<: *save
|
71
|
+
- <<: *unit
|
72
|
+
|
73
|
+
workflows:
|
74
|
+
version: 2
|
75
|
+
build:
|
76
|
+
jobs:
|
77
|
+
- "ruby-2.3"
|
78
|
+
- "ruby-2.4"
|
79
|
+
- "ruby-2.5"
|
80
|
+
- "ruby-2.6"
|
81
|
+
- "ruby-2.7"
|
@@ -0,0 +1,10 @@
|
|
1
|
+
name: Check Changelog
|
2
|
+
on: [pull_request]
|
3
|
+
jobs:
|
4
|
+
build:
|
5
|
+
runs-on: ubuntu-latest
|
6
|
+
steps:
|
7
|
+
- uses: actions/checkout@v1
|
8
|
+
- name: Check that CHANGELOG is touched
|
9
|
+
run: |
|
10
|
+
cat $GITHUB_EVENT_PATH | jq .pull_request.title | grep -i '\[\(\(changelog skip\)\|\(ci skip\)\)\]' || git diff remotes/origin/${{ github.base_ref }} --name-only | grep CHANGELOG.md
|
data/.gitignore
ADDED
data/.travis.yml
ADDED
data/CODE_OF_CONDUCT.md
ADDED
@@ -0,0 +1,74 @@
|
|
1
|
+
# Contributor Covenant Code of Conduct
|
2
|
+
|
3
|
+
## Our Pledge
|
4
|
+
|
5
|
+
In the interest of fostering an open and welcoming environment, we as
|
6
|
+
contributors and maintainers pledge to making participation in our project and
|
7
|
+
our community a harassment-free experience for everyone, regardless of age, body
|
8
|
+
size, disability, ethnicity, gender identity and expression, level of experience,
|
9
|
+
nationality, personal appearance, race, religion, or sexual identity and
|
10
|
+
orientation.
|
11
|
+
|
12
|
+
## Our Standards
|
13
|
+
|
14
|
+
Examples of behavior that contributes to creating a positive environment
|
15
|
+
include:
|
16
|
+
|
17
|
+
* Using welcoming and inclusive language
|
18
|
+
* Being respectful of differing viewpoints and experiences
|
19
|
+
* Gracefully accepting constructive criticism
|
20
|
+
* Focusing on what is best for the community
|
21
|
+
* Showing empathy towards other community members
|
22
|
+
|
23
|
+
Examples of unacceptable behavior by participants include:
|
24
|
+
|
25
|
+
* The use of sexualized language or imagery and unwelcome sexual attention or
|
26
|
+
advances
|
27
|
+
* Trolling, insulting/derogatory comments, and personal or political attacks
|
28
|
+
* Public or private harassment
|
29
|
+
* Publishing others' private information, such as a physical or electronic
|
30
|
+
address, without explicit permission
|
31
|
+
* Other conduct which could reasonably be considered inappropriate in a
|
32
|
+
professional setting
|
33
|
+
|
34
|
+
## Our Responsibilities
|
35
|
+
|
36
|
+
Project maintainers are responsible for clarifying the standards of acceptable
|
37
|
+
behavior and are expected to take appropriate and fair corrective action in
|
38
|
+
response to any instances of unacceptable behavior.
|
39
|
+
|
40
|
+
Project maintainers have the right and responsibility to remove, edit, or
|
41
|
+
reject comments, commits, code, wiki edits, issues, and other contributions
|
42
|
+
that are not aligned to this Code of Conduct, or to ban temporarily or
|
43
|
+
permanently any contributor for other behaviors that they deem inappropriate,
|
44
|
+
threatening, offensive, or harmful.
|
45
|
+
|
46
|
+
## Scope
|
47
|
+
|
48
|
+
This Code of Conduct applies both within project spaces and in public spaces
|
49
|
+
when an individual is representing the project or its community. Examples of
|
50
|
+
representing a project or community include using an official project e-mail
|
51
|
+
address, posting via an official social media account, or acting as an appointed
|
52
|
+
representative at an online or offline event. Representation of a project may be
|
53
|
+
further defined and clarified by project maintainers.
|
54
|
+
|
55
|
+
## Enforcement
|
56
|
+
|
57
|
+
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
58
|
+
reported by contacting the project team at richard.schneeman+foo@gmail.com. All
|
59
|
+
complaints will be reviewed and investigated and will result in a response that
|
60
|
+
is deemed necessary and appropriate to the circumstances. The project team is
|
61
|
+
obligated to maintain confidentiality with regard to the reporter of an incident.
|
62
|
+
Further details of specific enforcement policies may be posted separately.
|
63
|
+
|
64
|
+
Project maintainers who do not follow or enforce the Code of Conduct in good
|
65
|
+
faith may face temporary or permanent repercussions as determined by other
|
66
|
+
members of the project's leadership.
|
67
|
+
|
68
|
+
## Attribution
|
69
|
+
|
70
|
+
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
71
|
+
available at [https://contributor-covenant.org/version/1/4][version]
|
72
|
+
|
73
|
+
[homepage]: https://contributor-covenant.org
|
74
|
+
[version]: https://contributor-covenant.org/version/1/4/
|
data/Gemfile
ADDED
data/LICENSE.txt
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
The MIT License (MIT)
|
2
|
+
|
3
|
+
Copyright (c) 2020 schneems
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in
|
13
|
+
all copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
21
|
+
THE SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,156 @@
|
|
1
|
+
# RateThrottleClient
|
2
|
+
|
3
|
+
Rate limiting is for servers, rate throttling is for clients. This library implements a number of strategies for handling rate throttling on the client and a methodology for comparing performance of those clients in simulated environments. Essentially, we don't just give you the code to rate throttle, we also give you the information to help you figure out the best strategy to rate throttle as well.
|
4
|
+
|
5
|
+
## Installation
|
6
|
+
|
7
|
+
Add this line to your application's Gemfile:
|
8
|
+
|
9
|
+
```ruby
|
10
|
+
gem 'rate_throttle_client'
|
11
|
+
```
|
12
|
+
|
13
|
+
And then execute:
|
14
|
+
|
15
|
+
$ bundle install
|
16
|
+
|
17
|
+
Or install it yourself as:
|
18
|
+
|
19
|
+
$ gem install rate_throttle_client
|
20
|
+
|
21
|
+
## Usage
|
22
|
+
|
23
|
+
Wrap requests to an API endpoint using one of the provided rate throttling classes:
|
24
|
+
|
25
|
+
```ruby
|
26
|
+
throttle = RateThrottleClient::ExponentialIncreaseProportionalRemainingDecrease.new
|
27
|
+
|
28
|
+
response = throttle.call do
|
29
|
+
Excon.get("https://api.example.com")
|
30
|
+
end
|
31
|
+
```
|
32
|
+
|
33
|
+
If the server returns a `429` status (the HTTP code indicating that a server side rate limit has been reached) then the request will be retried according to the classes' strategy.
|
34
|
+
|
35
|
+
If you're not using Excon to build your API client, then you'll need to make sure the object returned to the block responds to `status` (returning the status code) and a `headers` method.
|
36
|
+
|
37
|
+
### Config
|
38
|
+
|
39
|
+
```ruby
|
40
|
+
RateThrottleClient.config do |config|
|
41
|
+
config.log_block = ->(info){ puts "I get called when rate limiting is triggered #{info.sleep_for} #{info.request}" }
|
42
|
+
config.max_limit = 4500.to_f # Maximum number of requests per hour
|
43
|
+
config.multiplier = 1.2 # When rate limiting happens, this is amount to the sleep value is increased by
|
44
|
+
end
|
45
|
+
```
|
46
|
+
|
47
|
+
## Strategies
|
48
|
+
|
49
|
+
This library has a few strategies you can choose between:
|
50
|
+
|
51
|
+
- RateThrottleClient::ExponentialBackoff
|
52
|
+
- RateThrottleClient::ExponentialIncreaseGradualDecrease
|
53
|
+
- RateThrottleClient::ExponentialIncreaseProportionalDecrease
|
54
|
+
- RateThrottleClient::ExponentialIncreaseProportionalRemainingDecrease
|
55
|
+
|
56
|
+
To choose, you need to understand what makes a "good" throttling algorithm, and then you need some benchmarks.
|
57
|
+
|
58
|
+
## What Makes a Good Rate Throttle strategy?
|
59
|
+
|
60
|
+
- Minimize retry ratio: For example if every 50 successful requests, the client hits a rate limited request the ratio of retries is 1/50 or 2%. Why minimize this value? It takes CPU and Network resources to make requests that fail, if the client is making requests that are being limited, it's using resources that could be better spent somewhere else. The server also benefits as it spends less time dealing with rate limiting.
|
61
|
+
- Minimize standard deviation of request count across the system: If there are two clients and one client is throttling by sleeping for 100 seconds and the other is throttling for 1 second, the distribution of requests are not equitable. Ideally over time each client might go up or down, but both would see a median of 50 seconds of sleep time. Why? If processes in a system have a high variance, one process is starved for API resources. It then becomes difficult to balance or optimize otherworkloads. When a client is stuck waiting on the API, ideally it can perform other operations (for example in other threads). If one process is using 100% of CPU and slamming the API and other is using 1% of CPU and barely touching the API, it is difficult to balance the workloads.
|
62
|
+
- Minimize sleep/wait time: Retry ratio can be improved artificially by choosing high sleep times. In the real world consumers don't want to wait longer than absolutely necessarry. While a client might be able to "work steal" while it is sleeping/waiting, there's not guarantee that's the case. Essentially assume that any amount of time spent sleeping over the minimum amount of time required is wasted. This value is calculateable, but that calculation requires complete information of the distributed system.
|
63
|
+
- At high workload it should be able to consume all available requests: If a server allows 100,000 requests in a day then a client should be capable of making 100,000 requests. If the rate limiting algorithm only allows it to make 100 requests it would have low retry ratio but high wait time.
|
64
|
+
- Handle a change in work load to either slow down or speed up rate throttling: If the workload is light, then clients should not wait/sleep much. If workload is heavy, then clients should sleep/wait enough. The algorithm should adjust to a changing workload as quickly as possible.
|
65
|
+
|
66
|
+
## Benchmarks
|
67
|
+
|
68
|
+
These benchmarks are generated by running `rake bench` against the simulated "GCRA" rate limiting server. Which throttle strategy you use depends on your needs.
|
69
|
+
|
70
|
+
**Lower values are better**
|
71
|
+
|
72
|
+
### RateThrottleClient::ExponentialBackoff results (duration: 30.0 minutes, multiplier: 1.2)
|
73
|
+
|
74
|
+
```
|
75
|
+
Avg retry rate: 80.41 %
|
76
|
+
Max sleep time: 46.72 seconds
|
77
|
+
Stdev Request Count: 147.84
|
78
|
+
|
79
|
+
Raw max_sleep_vals: [46.72, 46.72, 46.72, 46.72, 46.72, 46.50, 46.50, 46.50, 46.50, 46.50]
|
80
|
+
Raw retry_ratios: [0.79, 0.79, 0.81, 0.82, 0.80, 0.80, 0.79, 0.81, 0.82, 0.81]
|
81
|
+
Raw request_counts: [1317.00, 1314.00, 1015.00, 963.00, 1254.00, 1133.00, 1334.00, 1025.00, 1024.00, 1036.00]
|
82
|
+
```
|
83
|
+
|
84
|
+
```
|
85
|
+
Time to clear workload (4500 requests, starting_sleep: 1s):
|
86
|
+
76.18 seconds
|
87
|
+
```
|
88
|
+
|
89
|
+
### RateThrottleClient::ExponentialIncreaseGradualDecrease results (duration: 30.0 minutes, multiplier: 1.2)
|
90
|
+
|
91
|
+
```
|
92
|
+
Avg retry rate: 40.56 %
|
93
|
+
Max sleep time: 139.91 seconds
|
94
|
+
Stdev Request Count: 867.73
|
95
|
+
|
96
|
+
Raw max_sleep_vals: [110.25, 110.25, 110.25, 110.25, 110.25, 139.91, 139.91, 139.91, 139.91, 139.91]
|
97
|
+
Raw retry_ratios: [0.46, 0.37, 0.38, 0.37, 0.39, 0.40, 0.41, 0.35, 0.37, 0.57]
|
98
|
+
Raw request_counts: [48.00, 57.00, 56.00, 49.00, 282.00, 85.00, 83.00, 79.00, 2821.00, 37.00]
|
99
|
+
```
|
100
|
+
|
101
|
+
```
|
102
|
+
Time to clear workload (4500 requests, starting_sleep: 1s):
|
103
|
+
65.50 seconds
|
104
|
+
```
|
105
|
+
|
106
|
+
### RateThrottleClient::ExponentialIncreaseProportionalDecrease results (duration: 30.0 minutes, multiplier: 1.2)
|
107
|
+
|
108
|
+
```
|
109
|
+
Avg retry rate: 3.66 %
|
110
|
+
Max sleep time: 17.31 seconds
|
111
|
+
Stdev Request Count: 101.94
|
112
|
+
|
113
|
+
Raw max_sleep_vals: [17.31, 17.31, 17.31, 17.31, 17.31, 17.21, 17.21, 17.21, 17.21, 17.21]
|
114
|
+
Raw retry_ratios: [0.01, 0.07, 0.03, 0.05, 0.06, 0.01, 0.07, 0.01, 0.03, 0.03]
|
115
|
+
Raw request_counts: [343.00, 123.00, 223.00, 144.00, 128.00, 348.00, 116.00, 383.00, 194.00, 203.00]
|
116
|
+
```
|
117
|
+
|
118
|
+
```
|
119
|
+
Time to clear workload (4500 requests, starting_sleep: 1s):
|
120
|
+
489.24 seconds
|
121
|
+
```
|
122
|
+
|
123
|
+
### RateThrottleClient::ExponentialIncreaseProportionalRemainingDecrease results (duration: 30.0 minutes, multiplier: 1.2)
|
124
|
+
|
125
|
+
```
|
126
|
+
Avg retry rate: 3.07 %
|
127
|
+
Max sleep time: 17.32 seconds
|
128
|
+
Stdev Request Count: 78.44
|
129
|
+
|
130
|
+
Raw max_sleep_vals: [12.14, 12.14, 12.14, 12.14, 12.14, 17.32, 17.32, 17.32, 17.32, 17.32]
|
131
|
+
Raw retry_ratios: [0.03, 0.02, 0.01, 0.02, 0.03, 0.03, 0.02, 0.05, 0.04, 0.07]
|
132
|
+
Raw request_counts: [196.00, 269.00, 386.00, 302.00, 239.00, 197.00, 265.00, 150.00, 187.00, 118.00]
|
133
|
+
```
|
134
|
+
|
135
|
+
```
|
136
|
+
Time to clear workload (4500 requests, starting_sleep: 1s):
|
137
|
+
66.92 seconds
|
138
|
+
```
|
139
|
+
|
140
|
+
## Development
|
141
|
+
|
142
|
+
After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake test` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment.
|
143
|
+
|
144
|
+
To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](https://rubygems.org).
|
145
|
+
|
146
|
+
## Contributing
|
147
|
+
|
148
|
+
Bug reports and pull requests are welcome on GitHub at https://github.com/[USERNAME]/rate_throttle_client. This project is intended to be a safe, welcoming space for collaboration, and contributors are expected to adhere to the [code of conduct](https://github.com/[USERNAME]/rate_throttle_client/blob/master/CODE_OF_CONDUCT.md).
|
149
|
+
|
150
|
+
## License
|
151
|
+
|
152
|
+
The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT).
|
153
|
+
|
154
|
+
## Code of Conduct
|
155
|
+
|
156
|
+
Everyone interacting in the RateThrottleClient project's codebases, issue trackers, chat rooms and mailing lists is expected to follow the [code of conduct](https://github.com/[USERNAME]/rate_throttle_client/blob/master/CODE_OF_CONDUCT.md).
|
data/Rakefile
ADDED
@@ -0,0 +1,58 @@
|
|
1
|
+
require "bundler/gem_tasks"
|
2
|
+
require "rake/testtask"
|
3
|
+
|
4
|
+
Rake::TestTask.new(:test) do |t|
|
5
|
+
t.libs << "test"
|
6
|
+
t.libs << "lib"
|
7
|
+
t.test_files = FileList["test/**/*_test.rb"]
|
8
|
+
end
|
9
|
+
|
10
|
+
task :default => :test
|
11
|
+
|
12
|
+
|
13
|
+
$LOAD_PATH.unshift File.expand_path("../lib", __dir__)
|
14
|
+
require "rate_throttle_client"
|
15
|
+
require 'rate_throttle_client/demo'
|
16
|
+
|
17
|
+
RateThrottleClient.config do |config|
|
18
|
+
config.log_block = ->(info){ }
|
19
|
+
end
|
20
|
+
|
21
|
+
MINUTE = 60
|
22
|
+
task :bench do
|
23
|
+
duration = 30 * MINUTE
|
24
|
+
clients = [
|
25
|
+
RateThrottleClient::ExponentialBackoff,
|
26
|
+
RateThrottleClient::ExponentialIncreaseGradualDecrease,
|
27
|
+
RateThrottleClient::ExponentialIncreaseProportionalDecrease,
|
28
|
+
RateThrottleClient::ExponentialIncreaseProportionalRemainingDecrease
|
29
|
+
]
|
30
|
+
clients.each do |klass|
|
31
|
+
begin
|
32
|
+
client = klass.new
|
33
|
+
demo = RateThrottleClient::Demo.new(client: client, duration: duration, time_scale: 10)
|
34
|
+
demo.call
|
35
|
+
ensure
|
36
|
+
demo.print_results
|
37
|
+
end
|
38
|
+
|
39
|
+
begin
|
40
|
+
workload = 4500
|
41
|
+
starting_sleep = 1
|
42
|
+
before_time = Time.now
|
43
|
+
rackup_file = Pathname.new(__dir__).join("lib/rate_throttle_client/servers/decrease_only/config.ru")
|
44
|
+
|
45
|
+
client = klass.new(starting_sleep_for: starting_sleep)
|
46
|
+
demo = RateThrottleClient::Demo.new(client: client, time_scale: 10, starting_limit: 4500, duration: duration, remaining_stop_under: 10, rackup_file: rackup_file)
|
47
|
+
demo.call
|
48
|
+
diff = Time.now - before_time
|
49
|
+
ensure
|
50
|
+
puts
|
51
|
+
puts "```"
|
52
|
+
puts "Time to clear workload (#{workload} requests, starting_sleep: #{starting_sleep}s):"
|
53
|
+
puts "#{"%.2f" % diff} seconds"
|
54
|
+
puts "```"
|
55
|
+
puts
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
data/bin/console
ADDED
@@ -0,0 +1,14 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
require "bundler/setup"
|
4
|
+
require "rate_throttle_client"
|
5
|
+
|
6
|
+
# You can add fixtures and/or initialization code here to make experimenting
|
7
|
+
# with your gem easier. You can also use a different console, if you like.
|
8
|
+
|
9
|
+
# (If you use this, don't forget to add pry to your Gemfile!)
|
10
|
+
# require "pry"
|
11
|
+
# Pry.start
|
12
|
+
|
13
|
+
require "irb"
|
14
|
+
IRB.start(__FILE__)
|
data/bin/setup
ADDED
@@ -0,0 +1,22 @@
|
|
1
|
+
require "rate_throttle_client/version"
|
2
|
+
require "rate_throttle_client/info"
|
3
|
+
|
4
|
+
require 'thread'
|
5
|
+
|
6
|
+
module RateThrottleClient
|
7
|
+
class Error < StandardError; end
|
8
|
+
class << self
|
9
|
+
attr_accessor :multiplier, :min_sleep, :max_limit, :log_block
|
10
|
+
end
|
11
|
+
self.log_block = ->(info) { puts "RateThrottleClient: sleep_for=#{info.sleep_for}" }
|
12
|
+
self.max_limit = 4500.to_f
|
13
|
+
self.min_sleep = 3600/max_limit
|
14
|
+
self.multiplier = 1.2
|
15
|
+
|
16
|
+
def self.config
|
17
|
+
yield self
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
require_relative 'rate_throttle_client/clients/base.rb'
|
22
|
+
Dir[File.dirname(__FILE__) + '/rate_throttle_client/clients/*.rb'].each { |file| require file }
|
Binary file
|
@@ -0,0 +1,18 @@
|
|
1
|
+
module RateThrottleClient
|
2
|
+
# Standard interface for Client classes
|
3
|
+
# Don't abuse this power
|
4
|
+
class Base
|
5
|
+
attr_accessor :log, :min_sleep, :multiplier, :sleep_for
|
6
|
+
|
7
|
+
def initialize(log: nil, min_sleep: nil, starting_sleep_for: 0, multiplier: nil)
|
8
|
+
@log = log || RateThrottleClient.log_block
|
9
|
+
@min_sleep = min_sleep || RateThrottleClient.min_sleep
|
10
|
+
@multiplier = multiplier || RateThrottleClient.multiplier
|
11
|
+
@sleep_for = starting_sleep_for
|
12
|
+
end
|
13
|
+
|
14
|
+
def jitter(val)
|
15
|
+
val * rand(0.0..0.1)
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
module RateThrottleClient
|
2
|
+
# Actual exponential backoff class with some extra jazz so it reports
|
3
|
+
# when sleep goes back to zero
|
4
|
+
#
|
5
|
+
# Essentially it doesn't throttle at all until it hits a 429 then it exponentially
|
6
|
+
# throttles every repeatedly limited request. When it hits a successful request it stops
|
7
|
+
# rate throttling again.
|
8
|
+
class ExponentialBackoff < Base
|
9
|
+
def call(&block)
|
10
|
+
sleep_for = @min_sleep
|
11
|
+
|
12
|
+
while (req = yield) && req.status == 429
|
13
|
+
@log.call(Info.new(sleep_for: sleep_for, request: req))
|
14
|
+
sleep(sleep_for + jitter(sleep_for))
|
15
|
+
|
16
|
+
sleep_for *= @multiplier
|
17
|
+
end
|
18
|
+
|
19
|
+
# This no-op is needed to record that we've come out of a
|
20
|
+
# retry state for the Demo class.
|
21
|
+
sleep(0)
|
22
|
+
|
23
|
+
req
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
@@ -0,0 +1,34 @@
|
|
1
|
+
module RateThrottleClient
|
2
|
+
class ExponentialIncreaseGradualDecrease < Base
|
3
|
+
attr_accessor :decrease
|
4
|
+
|
5
|
+
def initialize(*args, decrease: nil, **kargs)
|
6
|
+
super(*args, **kargs)
|
7
|
+
@decrease = decrease || @min_sleep
|
8
|
+
end
|
9
|
+
|
10
|
+
def call(&block)
|
11
|
+
sleep_for = @sleep_for
|
12
|
+
sleep(sleep_for + jitter(sleep_for))
|
13
|
+
|
14
|
+
while (req = yield) && req.status == 429
|
15
|
+
sleep_for += @min_sleep
|
16
|
+
|
17
|
+
@log.call(Info.new(sleep_for: sleep_for, request: req))
|
18
|
+
sleep(sleep_for + jitter(sleep_for))
|
19
|
+
|
20
|
+
sleep_for *= @multiplier
|
21
|
+
end
|
22
|
+
|
23
|
+
if sleep_for >= @decrease
|
24
|
+
sleep_for -= @decrease
|
25
|
+
else
|
26
|
+
sleep_for = 0
|
27
|
+
end
|
28
|
+
|
29
|
+
@sleep_for = sleep_for
|
30
|
+
|
31
|
+
req
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
@@ -0,0 +1,36 @@
|
|
1
|
+
module RateThrottleClient
|
2
|
+
class ExponentialIncreaseProportionalDecrease < Base
|
3
|
+
attr_accessor :decrease_divisor
|
4
|
+
|
5
|
+
def initialize(*args, decrease_divisor: nil, **kargs)
|
6
|
+
super(*args, **kargs)
|
7
|
+
@decrease_divisor = (decrease_divisor || RateThrottleClient.max_limit).to_f
|
8
|
+
end
|
9
|
+
|
10
|
+
def call(&block)
|
11
|
+
sleep_for = @sleep_for
|
12
|
+
sleep(sleep_for + jitter(sleep_for))
|
13
|
+
|
14
|
+
while (req = yield) && req.status == 429
|
15
|
+
sleep_for += @min_sleep
|
16
|
+
|
17
|
+
@log.call(Info.new(sleep_for: sleep_for, request: req))
|
18
|
+
sleep(sleep_for + jitter(sleep_for))
|
19
|
+
|
20
|
+
sleep_for *= @multiplier
|
21
|
+
end
|
22
|
+
|
23
|
+
decrease_value = sleep_for / @decrease_divisor
|
24
|
+
|
25
|
+
if sleep_for >= decrease_value
|
26
|
+
sleep_for -= decrease_value
|
27
|
+
else
|
28
|
+
sleep_for = 0
|
29
|
+
end
|
30
|
+
|
31
|
+
@sleep_for = sleep_for
|
32
|
+
|
33
|
+
req
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
@@ -0,0 +1,43 @@
|
|
1
|
+
module RateThrottleClient
|
2
|
+
class ExponentialIncreaseProportionalRemainingDecrease < Base
|
3
|
+
attr_accessor :decrease_divisor, :remaining_block
|
4
|
+
|
5
|
+
def initialize(*args, decrease_divisor: nil, remaining_block: nil, **kargs)
|
6
|
+
super(*args, **kargs)
|
7
|
+
@decrease_divisor = (decrease_divisor || RateThrottleClient.max_limit).to_f
|
8
|
+
@remaining_block = remaining_block || ->(req) {
|
9
|
+
req.headers["RateLimit-Remaining"].to_i
|
10
|
+
}
|
11
|
+
end
|
12
|
+
|
13
|
+
def call(&block)
|
14
|
+
sleep_for = @sleep_for
|
15
|
+
sleep(sleep_for + jitter(sleep_for))
|
16
|
+
|
17
|
+
while (req = yield) && req.status == 429
|
18
|
+
sleep_for += @min_sleep
|
19
|
+
|
20
|
+
@log.call(Info.new(sleep_for: sleep_for, request: req))
|
21
|
+
sleep(sleep_for + jitter(sleep_for))
|
22
|
+
|
23
|
+
sleep_for *= @multiplier
|
24
|
+
end
|
25
|
+
|
26
|
+
decrease_value = sleep_for * @remaining_block.call(req)
|
27
|
+
decrease_value /= @decrease_divisor
|
28
|
+
|
29
|
+
if sleep_for >= decrease_value
|
30
|
+
sleep_for -= decrease_value
|
31
|
+
else
|
32
|
+
sleep_for = 0
|
33
|
+
end
|
34
|
+
|
35
|
+
@sleep_for = sleep_for
|
36
|
+
|
37
|
+
req
|
38
|
+
end
|
39
|
+
|
40
|
+
def sleep_and_log(sleep_for: , request: )
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
@@ -0,0 +1,292 @@
|
|
1
|
+
require 'excon'
|
2
|
+
require 'pathname'
|
3
|
+
require 'fileutils'
|
4
|
+
require 'date'
|
5
|
+
require 'json'
|
6
|
+
require 'timecop'
|
7
|
+
require 'wait_for_it'
|
8
|
+
require 'enumerable/statistics'
|
9
|
+
|
10
|
+
Thread.abort_on_exception = true
|
11
|
+
|
12
|
+
# A class for simulating or "demoing" a rate throttle client
|
13
|
+
#
|
14
|
+
# Example:
|
15
|
+
#
|
16
|
+
# duration = 3600 # seconds in one hour
|
17
|
+
# client = ExponentialIncreaseSleepAndRemainingDecrease.new
|
18
|
+
# demo = RateThrottleDemo.new(client: client, stream_requests: true, duration: duration)
|
19
|
+
# demo.call
|
20
|
+
# demo.print_results
|
21
|
+
# # => max_sleep_val: [59.05, 80.58, 80.58, 80.58, 56.18, 56.18, 56.18, 59.05, 70.05, 70.05, 70.05, 59.15, 59.15, 59.15, 59.15, 70.05, 70.05, 59.15, 56.18, 80.58, 80.58, 59.05, 59.05, 59.05, 56.18]
|
22
|
+
# # => retry_ratio: [0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.02, 0.01, 0.01, 0.01, 0.00, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01]
|
23
|
+
# # => request_count: [3321.00, 1551.00, 2167.00, 2197.00, 1628.00, 1709.00, 1484.00, 3512.00, 1722.00, 2816.00, 3182.00, 2137.00, 4398.00, 2154.00, 2418.00, 2868.00, 2492.00, 2982.00, 1731.00, 2278.00, 1988.00, 4221.00, 3160.00, 2927.00, 2635.00]
|
24
|
+
#
|
25
|
+
# Arguments:
|
26
|
+
#
|
27
|
+
# Thread count can be controlled via the `thread_count` arguement, or the THREAD_COUNT env var (default is below).
|
28
|
+
# Process count can be controlled via the `process_count` arguement, or the PROCESS_COUNT env var (default is below).
|
29
|
+
# total number of clients is thread_count * process_count.
|
30
|
+
#
|
31
|
+
# Time scale can be controlled via the `time_scale` arguement, or the TIME_SCALE env var (default is below).
|
32
|
+
# The time scale value will speed up the simulation, for example `TIME_SCALE=10` means a 60 second simulation
|
33
|
+
# will complete in 6 minutes.
|
34
|
+
#
|
35
|
+
# The simulation will stop after `duration:` seconds.
|
36
|
+
# Outputting request logs to stdout can be enabled/disabled by setting `stream_requests`.
|
37
|
+
# The other way to stop a simulation is to specify `remaining_stop_under` when this value is set, the simulation
|
38
|
+
# will stop when the "remaining" limit count from the server is under this value.
|
39
|
+
#
|
40
|
+
# The starting "remaining" limit count in the server can be set via passing in the `starting_limit`, default is 0
|
41
|
+
# requests.
|
42
|
+
#
|
43
|
+
# Outputs:
|
44
|
+
#
|
45
|
+
# - Writes log outputs to stdout if `stream_requests` is true
|
46
|
+
# - Writes aggregate metrics of each client to an intermediate json file every @json_duration seconds. This is then later used to
|
47
|
+
# produce the data for `print_results`
|
48
|
+
# - Writes the last value the client slept for to a newline separated file every 1 (real time, not simulated) second. This is used to
|
49
|
+
# generate the charts using the `chart.rb` script.
|
50
|
+
module RateThrottleClient
|
51
|
+
class Demo
|
52
|
+
MINUTE = 60
|
53
|
+
THREAD_COUNT = ENV.fetch("THREAD_COUNT") { 5 }.to_i
|
54
|
+
PROCESS_COUNT = ENV.fetch("PROCESS_COUNT") { 2 }.to_i
|
55
|
+
DURATION=ENV.fetch("DURATION") { 30 }.to_i * MINUTE #
|
56
|
+
TIME_SCALE = ENV.fetch("TIME_SCALE", 1).to_f
|
57
|
+
RACKUP_FILE = Pathname.new(__dir__).join("servers/gcra/config.ru")
|
58
|
+
|
59
|
+
attr_reader :log_dir, :rackup_file
|
60
|
+
|
61
|
+
def initialize(client:,thread_count: THREAD_COUNT, process_count: PROCESS_COUNT, duration: DURATION, log_dir: nil, time_scale: TIME_SCALE, stream_requests: false, json_duration: 30, rackup_file: RACKUP_FILE, starting_limit: 0, remaining_stop_under: nil)
|
62
|
+
@client = client
|
63
|
+
@thread_count = thread_count
|
64
|
+
@process_count = process_count
|
65
|
+
@duration = duration
|
66
|
+
@time_scale = time_scale.to_f
|
67
|
+
@stream_requests = stream_requests
|
68
|
+
@rackup_file = rackup_file
|
69
|
+
@starting_limit = starting_limit
|
70
|
+
@remaining_stop_under = remaining_stop_under
|
71
|
+
|
72
|
+
if log_dir
|
73
|
+
@log_dir = Pathname.new(log_dir)
|
74
|
+
else
|
75
|
+
@log_dir = Pathname.new(__dir__).join("../../logs/clients/#{Time.now.strftime('%Y-%m-%d-%H-%M-%s-%N')}-#{client.class}")
|
76
|
+
end
|
77
|
+
|
78
|
+
@mutex = Mutex.new
|
79
|
+
@json_duration = 30 # seconds
|
80
|
+
@port = UniquePort.call
|
81
|
+
@threads = []
|
82
|
+
@pids = []
|
83
|
+
Timecop.scale(@time_scale)
|
84
|
+
|
85
|
+
FileUtils.mkdir_p(@log_dir)
|
86
|
+
end
|
87
|
+
|
88
|
+
def print_results(io = STDOUT)
|
89
|
+
result_hash = self.results
|
90
|
+
io.puts
|
91
|
+
io.puts "### #{@client.class} results (duration: #{@duration/60.0} minutes, multiplier: #{@client.multiplier})"
|
92
|
+
io.puts
|
93
|
+
io.puts "```"
|
94
|
+
io.puts "Avg retry rate: #{"%.2f" % (result_hash["retry_ratio"].mean * 100)} %"
|
95
|
+
io.puts "Max sleep time: #{"%.2f" % result_hash["max_sleep_val"].max} seconds"
|
96
|
+
io.puts "Stdev Request Count: #{"%.2f" % result_hash["request_count"].stdev}"
|
97
|
+
io.puts
|
98
|
+
result_hash.each do |key, value|
|
99
|
+
io.puts "Raw #{key}s: [#{ value.map {|x| "%.2f" % x}.join(", ")}]"
|
100
|
+
end
|
101
|
+
io.puts "```"
|
102
|
+
end
|
103
|
+
|
104
|
+
def results
|
105
|
+
result_hash = {}
|
106
|
+
|
107
|
+
@log_dir.entries.map do |entry|
|
108
|
+
@log_dir.join(entry)
|
109
|
+
end.select do |file|
|
110
|
+
file.file? && file.extname == ".json"
|
111
|
+
end.sort.map do |file|
|
112
|
+
JSON.parse(file.read)
|
113
|
+
end.each do |json|
|
114
|
+
json.each_key do |key|
|
115
|
+
result_hash[key] ||= []
|
116
|
+
result_hash[key] << json[key]
|
117
|
+
end
|
118
|
+
end
|
119
|
+
|
120
|
+
result_hash
|
121
|
+
end
|
122
|
+
|
123
|
+
def call
|
124
|
+
WaitForIt.new("bundle exec puma #{@rackup_file.to_s} -p #{@port}", env: {"TIME_SCALE" => @time_scale.to_i.to_s, "STARTING_LIMIT" => @starting_limit.to_s}, wait_for: "Use Ctrl-C to stop") do |spawn|
|
125
|
+
@process_count.times.each do
|
126
|
+
boot_process
|
127
|
+
end
|
128
|
+
|
129
|
+
@pids.map { |pid| Process.wait(pid) }
|
130
|
+
end
|
131
|
+
end
|
132
|
+
|
133
|
+
private def boot_process
|
134
|
+
@pids << fork do
|
135
|
+
run_threads
|
136
|
+
end
|
137
|
+
end
|
138
|
+
|
139
|
+
private def run_threads
|
140
|
+
@thread_count.times.each do
|
141
|
+
@threads << Thread.new do
|
142
|
+
run_client_single
|
143
|
+
end
|
144
|
+
end
|
145
|
+
|
146
|
+
# Chart support, print out the sleep value in 1 second increments to a file
|
147
|
+
Thread.new do
|
148
|
+
loop do
|
149
|
+
@threads.each do |thread|
|
150
|
+
sleep_for = thread.thread_variable_get("last_sleep_value") || 0
|
151
|
+
|
152
|
+
File.open(@log_dir.join("#{Process.pid}:#{thread.object_id}-chart-data.txt"), 'a') do |f|
|
153
|
+
f.puts(sleep_for)
|
154
|
+
end
|
155
|
+
end
|
156
|
+
sleep 1 # time gets adjusted via TIME_SCALE later in time.rb
|
157
|
+
end
|
158
|
+
end
|
159
|
+
|
160
|
+
@threads.map(&:join)
|
161
|
+
end
|
162
|
+
|
163
|
+
class TimeIsUpError < StandardError; end
|
164
|
+
|
165
|
+
private def run_client_single
|
166
|
+
end_at_time = Time.now + @duration
|
167
|
+
json_at_time = Time.now + @json_duration
|
168
|
+
request_count = 0
|
169
|
+
retry_count = 0
|
170
|
+
|
171
|
+
monkey_patch_client_sleep
|
172
|
+
loop do
|
173
|
+
begin_time = Time.now
|
174
|
+
break if begin_time > end_at_time
|
175
|
+
|
176
|
+
if begin_time > json_at_time
|
177
|
+
write_json_value(retry_count: retry_count, request_count: request_count, max_sleep_val: @client.max_sleep_val)
|
178
|
+
json_at_time = begin_time + @json_duration
|
179
|
+
end
|
180
|
+
|
181
|
+
req = nil
|
182
|
+
@client.call do
|
183
|
+
request_count += 1
|
184
|
+
|
185
|
+
req = make_request
|
186
|
+
|
187
|
+
retry_count += 1 if req.status == 429
|
188
|
+
|
189
|
+
stream_requests(req, retry_count: retry_count, request_count: request_count) if @stream_requests
|
190
|
+
req
|
191
|
+
end
|
192
|
+
|
193
|
+
if @remaining_stop_under
|
194
|
+
break if (req.headers["RateLimit-Remaining"].to_i <= @remaining_stop_under)
|
195
|
+
end
|
196
|
+
end
|
197
|
+
stop_all_theads!
|
198
|
+
|
199
|
+
rescue Excon::Error::Socket => e
|
200
|
+
raise e
|
201
|
+
rescue TimeIsUpError
|
202
|
+
# Since the sleep time can be very high, we need a way to notify sleeping threads they can stop
|
203
|
+
# When this exception is raised, do nothing and exit
|
204
|
+
ensure
|
205
|
+
write_json_value(retry_count: retry_count, request_count: request_count, max_sleep_val: @client.max_sleep_val)
|
206
|
+
end
|
207
|
+
|
208
|
+
private def monkey_patch_client_sleep
|
209
|
+
@mutex.synchronize do
|
210
|
+
if !@client.instance_variables.include?(:"@time_scale")
|
211
|
+
def @client.sleep(val)
|
212
|
+
@max_sleep_val = val if val > @max_sleep_val
|
213
|
+
Thread.current.thread_variable_set("last_sleep_value", val)
|
214
|
+
|
215
|
+
super val/@time_scale
|
216
|
+
end
|
217
|
+
|
218
|
+
def @client.max_sleep_val
|
219
|
+
@max_sleep_val
|
220
|
+
end
|
221
|
+
|
222
|
+
def @client.last_sleep
|
223
|
+
@last_sleep || 0
|
224
|
+
end
|
225
|
+
end
|
226
|
+
|
227
|
+
@client.instance_variable_set(:"@time_scale", @time_scale)
|
228
|
+
@client.instance_variable_set(:"@max_sleep_val", 0)
|
229
|
+
end
|
230
|
+
end
|
231
|
+
|
232
|
+
private def make_request
|
233
|
+
req = Excon.get("http://localhost:#{@port}")
|
234
|
+
|
235
|
+
raise "Got unexpected reponse #{req.status}. #{req.inspect}" if req.status != 200 && req.status != 429
|
236
|
+
req
|
237
|
+
rescue Excon::Error::Timeout => e
|
238
|
+
puts e.inspect
|
239
|
+
puts "retrying"
|
240
|
+
retry
|
241
|
+
end
|
242
|
+
|
243
|
+
private def stream_requests(request, retry_count:, request_count:)
|
244
|
+
status_string = String.new
|
245
|
+
status_string << "#{Process.pid}##{Thread.current.object_id}: "
|
246
|
+
status_string << "status=#{request.status} "
|
247
|
+
status_string << "remaining=#{request.headers["RateLimit-Remaining"]} "
|
248
|
+
status_string << "retry_count=#{retry_count} "
|
249
|
+
status_string << "request_count=#{request_count} "
|
250
|
+
status_string << "max_sleep_val=#{ sprintf("%.2f", @client.max_sleep_val) } "
|
251
|
+
|
252
|
+
puts status_string
|
253
|
+
end
|
254
|
+
|
255
|
+
# Even though all clients might have reached their `end_time` they might be stuck in a long `sleep`.
|
256
|
+
# This method signals to any threads that might be stuck in a `sleep` to stop via an exception that we raise
|
257
|
+
# and catch
|
258
|
+
private def stop_all_theads!
|
259
|
+
@threads.each do |t|
|
260
|
+
next if @remaining_stop_under
|
261
|
+
|
262
|
+
if t != Thread.current && t.backtrace_locations && t.backtrace_locations.first.label == "sleep"
|
263
|
+
t.raise(TimeIsUpError)
|
264
|
+
end
|
265
|
+
end
|
266
|
+
end
|
267
|
+
|
268
|
+
private def write_json_value(retry_count:, request_count:, max_sleep_val:)
|
269
|
+
results = {
|
270
|
+
max_sleep_val: max_sleep_val,
|
271
|
+
retry_ratio: retry_count / request_count.to_f,
|
272
|
+
request_count: request_count
|
273
|
+
}
|
274
|
+
|
275
|
+
File.open(@log_dir.join("#{Process.pid}:#{Thread.current.object_id}.json"), 'w+') do |f|
|
276
|
+
f.puts(results.to_json)
|
277
|
+
end
|
278
|
+
rescue TimeIsUpError
|
279
|
+
retry
|
280
|
+
end
|
281
|
+
end
|
282
|
+
|
283
|
+
require 'socket'
|
284
|
+
|
285
|
+
module UniquePort
|
286
|
+
def self.call
|
287
|
+
TCPServer.open('127.0.0.1', 0) do |server|
|
288
|
+
server.connect_address.ip_port
|
289
|
+
end
|
290
|
+
end
|
291
|
+
end
|
292
|
+
end
|
Binary file
|
@@ -0,0 +1,49 @@
|
|
1
|
+
require 'timecop'
|
2
|
+
require 'rate_throttle_client'
|
3
|
+
|
4
|
+
if ENV["TIME_SCALE"]
|
5
|
+
require 'timecop'
|
6
|
+
Timecop.scale(ENV["TIME_SCALE"].to_f)
|
7
|
+
end
|
8
|
+
|
9
|
+
module RateThrottleClient
|
10
|
+
# This server does not gain new requests over time
|
11
|
+
# it's main purpose is to benchmark how long it takes to
|
12
|
+
# clear a fixed sized workload
|
13
|
+
class NullFakeServer
|
14
|
+
|
15
|
+
def initialize(starting_limit: 0)
|
16
|
+
@limit_left = starting_limit.to_f
|
17
|
+
@mutex = Mutex.new
|
18
|
+
end
|
19
|
+
|
20
|
+
def call(_)
|
21
|
+
headers = nil
|
22
|
+
successful_request = false
|
23
|
+
|
24
|
+
@mutex.synchronize do
|
25
|
+
if @limit_left >= 1
|
26
|
+
@limit_left -= 1
|
27
|
+
successful_request = true
|
28
|
+
end
|
29
|
+
|
30
|
+
headers = { "RateLimit-Remaining" => [@limit_left.floor, 0].max, "RateLimit-Multiplier" => 1, "Content-Type" => "text/plain".freeze }
|
31
|
+
end
|
32
|
+
|
33
|
+
|
34
|
+
if !successful_request
|
35
|
+
status = 429
|
36
|
+
body = "!!!!! Nope !!!!!".freeze
|
37
|
+
else
|
38
|
+
status = 200
|
39
|
+
body = "<3<3<3 Hello world <3<3<3".freeze
|
40
|
+
end
|
41
|
+
|
42
|
+
return [status, headers, [body]]
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
starting_limit = ENV.fetch("STARTING_LIMIT", 0).to_i
|
48
|
+
run RateThrottleClient::NullFakeServer.new(starting_limit: starting_limit)
|
49
|
+
|
@@ -0,0 +1,11 @@
|
|
1
|
+
require_relative "gcra_fake_server.rb"
|
2
|
+
|
3
|
+
require 'timecop'
|
4
|
+
|
5
|
+
if ENV["TIME_SCALE"]
|
6
|
+
require 'timecop'
|
7
|
+
Timecop.scale(ENV["TIME_SCALE"].to_f)
|
8
|
+
end
|
9
|
+
starting_limit = ENV.fetch("STARTING_LIMIT", 0).to_i
|
10
|
+
|
11
|
+
run RateThrottleClient::GcraFakeServer.new(starting_limit: starting_limit)
|
@@ -0,0 +1,52 @@
|
|
1
|
+
require 'thread'
|
2
|
+
require 'rate_throttle_client'
|
3
|
+
|
4
|
+
if ENV["TIME_SCALE"]
|
5
|
+
require 'timecop'
|
6
|
+
Timecop.scale(ENV["TIME_SCALE"].to_f)
|
7
|
+
end
|
8
|
+
|
9
|
+
module RateThrottleClient
|
10
|
+
class GcraFakeServer
|
11
|
+
def initialize(starting_limit: 0)
|
12
|
+
@limit_left = starting_limit.to_f
|
13
|
+
@mutex = Mutex.new
|
14
|
+
@rate_of_gain = RateThrottleClient.max_limit / 3600.to_f
|
15
|
+
@max_requests = RateThrottleClient.max_limit
|
16
|
+
end
|
17
|
+
|
18
|
+
def call(_)
|
19
|
+
@last_request ||= Time.now
|
20
|
+
headers = nil
|
21
|
+
successful_request = false
|
22
|
+
|
23
|
+
@mutex.synchronize do
|
24
|
+
if @limit_left < @max_requests
|
25
|
+
current_request = Time.now
|
26
|
+
time_diff = current_request - @last_request
|
27
|
+
@last_request = current_request
|
28
|
+
|
29
|
+
@limit_left = [@limit_left + time_diff * @rate_of_gain, @max_requests].min
|
30
|
+
end
|
31
|
+
|
32
|
+
if @limit_left >= 1
|
33
|
+
@limit_left -= 1
|
34
|
+
successful_request = true
|
35
|
+
end
|
36
|
+
|
37
|
+
headers = { "RateLimit-Remaining" => [@limit_left.floor, 0].max, "RateLimit-Multiplier" => 1, "Content-Type" => "text/plain".freeze }
|
38
|
+
end
|
39
|
+
|
40
|
+
|
41
|
+
if !successful_request
|
42
|
+
status = 429
|
43
|
+
body = "!!!!! Nope !!!!!".freeze
|
44
|
+
else
|
45
|
+
status = 200
|
46
|
+
body = "<3<3<3 Hello world <3<3<3".freeze
|
47
|
+
end
|
48
|
+
|
49
|
+
return [status, headers, [body]]
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
@@ -0,0 +1,34 @@
|
|
1
|
+
require_relative 'lib/rate_throttle_client/version'
|
2
|
+
|
3
|
+
Gem::Specification.new do |spec|
|
4
|
+
spec.name = "rate_throttle_client"
|
5
|
+
spec.version = RateThrottleClient::VERSION
|
6
|
+
spec.authors = ["schneems"]
|
7
|
+
spec.email = ["richard.schneeman+foo@gmail.com"]
|
8
|
+
|
9
|
+
spec.summary = %q{Don't error, instead, sleep, and retry}
|
10
|
+
spec.description = %q{https://twitter.com/schneems/status/1138899094137651200}
|
11
|
+
spec.homepage = "https://github.com/zombocom/rate_throttle_client"
|
12
|
+
spec.license = "MIT"
|
13
|
+
spec.required_ruby_version = Gem::Requirement.new(">= 2.3.0")
|
14
|
+
|
15
|
+
spec.metadata["homepage_uri"] = spec.homepage
|
16
|
+
spec.metadata["source_code_uri"] = "https://github.com/zombocom/rate_throttle_client"
|
17
|
+
spec.metadata["changelog_uri"] = "https://github.com/zombocom/rate_throttle_client/blob/master/CHANGELOG.md"
|
18
|
+
|
19
|
+
# Specify which files should be added to the gem when it is released.
|
20
|
+
# The `git ls-files -z` loads the files in the RubyGem that have been added into git.
|
21
|
+
spec.files = Dir.chdir(File.expand_path('..', __FILE__)) do
|
22
|
+
`git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features)/}) }
|
23
|
+
end
|
24
|
+
spec.bindir = "exe"
|
25
|
+
spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
|
26
|
+
spec.require_paths = ["lib"]
|
27
|
+
|
28
|
+
spec.add_development_dependency "wait_for_it"
|
29
|
+
spec.add_development_dependency "m"
|
30
|
+
spec.add_development_dependency "puma"
|
31
|
+
spec.add_development_dependency "timecop"
|
32
|
+
spec.add_development_dependency "excon"
|
33
|
+
spec.add_development_dependency "enumerable-statistics"
|
34
|
+
end
|
metadata
ADDED
@@ -0,0 +1,158 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: rate_throttle_client
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.1.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- schneems
|
8
|
+
autorequire:
|
9
|
+
bindir: exe
|
10
|
+
cert_chain: []
|
11
|
+
date: 2020-04-11 00:00:00.000000000 Z
|
12
|
+
dependencies:
|
13
|
+
- !ruby/object:Gem::Dependency
|
14
|
+
name: wait_for_it
|
15
|
+
requirement: !ruby/object:Gem::Requirement
|
16
|
+
requirements:
|
17
|
+
- - ">="
|
18
|
+
- !ruby/object:Gem::Version
|
19
|
+
version: '0'
|
20
|
+
type: :development
|
21
|
+
prerelease: false
|
22
|
+
version_requirements: !ruby/object:Gem::Requirement
|
23
|
+
requirements:
|
24
|
+
- - ">="
|
25
|
+
- !ruby/object:Gem::Version
|
26
|
+
version: '0'
|
27
|
+
- !ruby/object:Gem::Dependency
|
28
|
+
name: m
|
29
|
+
requirement: !ruby/object:Gem::Requirement
|
30
|
+
requirements:
|
31
|
+
- - ">="
|
32
|
+
- !ruby/object:Gem::Version
|
33
|
+
version: '0'
|
34
|
+
type: :development
|
35
|
+
prerelease: false
|
36
|
+
version_requirements: !ruby/object:Gem::Requirement
|
37
|
+
requirements:
|
38
|
+
- - ">="
|
39
|
+
- !ruby/object:Gem::Version
|
40
|
+
version: '0'
|
41
|
+
- !ruby/object:Gem::Dependency
|
42
|
+
name: puma
|
43
|
+
requirement: !ruby/object:Gem::Requirement
|
44
|
+
requirements:
|
45
|
+
- - ">="
|
46
|
+
- !ruby/object:Gem::Version
|
47
|
+
version: '0'
|
48
|
+
type: :development
|
49
|
+
prerelease: false
|
50
|
+
version_requirements: !ruby/object:Gem::Requirement
|
51
|
+
requirements:
|
52
|
+
- - ">="
|
53
|
+
- !ruby/object:Gem::Version
|
54
|
+
version: '0'
|
55
|
+
- !ruby/object:Gem::Dependency
|
56
|
+
name: timecop
|
57
|
+
requirement: !ruby/object:Gem::Requirement
|
58
|
+
requirements:
|
59
|
+
- - ">="
|
60
|
+
- !ruby/object:Gem::Version
|
61
|
+
version: '0'
|
62
|
+
type: :development
|
63
|
+
prerelease: false
|
64
|
+
version_requirements: !ruby/object:Gem::Requirement
|
65
|
+
requirements:
|
66
|
+
- - ">="
|
67
|
+
- !ruby/object:Gem::Version
|
68
|
+
version: '0'
|
69
|
+
- !ruby/object:Gem::Dependency
|
70
|
+
name: excon
|
71
|
+
requirement: !ruby/object:Gem::Requirement
|
72
|
+
requirements:
|
73
|
+
- - ">="
|
74
|
+
- !ruby/object:Gem::Version
|
75
|
+
version: '0'
|
76
|
+
type: :development
|
77
|
+
prerelease: false
|
78
|
+
version_requirements: !ruby/object:Gem::Requirement
|
79
|
+
requirements:
|
80
|
+
- - ">="
|
81
|
+
- !ruby/object:Gem::Version
|
82
|
+
version: '0'
|
83
|
+
- !ruby/object:Gem::Dependency
|
84
|
+
name: enumerable-statistics
|
85
|
+
requirement: !ruby/object:Gem::Requirement
|
86
|
+
requirements:
|
87
|
+
- - ">="
|
88
|
+
- !ruby/object:Gem::Version
|
89
|
+
version: '0'
|
90
|
+
type: :development
|
91
|
+
prerelease: false
|
92
|
+
version_requirements: !ruby/object:Gem::Requirement
|
93
|
+
requirements:
|
94
|
+
- - ">="
|
95
|
+
- !ruby/object:Gem::Version
|
96
|
+
version: '0'
|
97
|
+
description: https://twitter.com/schneems/status/1138899094137651200
|
98
|
+
email:
|
99
|
+
- richard.schneeman+foo@gmail.com
|
100
|
+
executables: []
|
101
|
+
extensions: []
|
102
|
+
extra_rdoc_files: []
|
103
|
+
files:
|
104
|
+
- ".circleci/config.yml"
|
105
|
+
- ".github/workflows/check_changelog.yml"
|
106
|
+
- ".gitignore"
|
107
|
+
- ".travis.yml"
|
108
|
+
- CHANGELOG.md
|
109
|
+
- CODE_OF_CONDUCT.md
|
110
|
+
- Gemfile
|
111
|
+
- LICENSE.txt
|
112
|
+
- README.md
|
113
|
+
- Rakefile
|
114
|
+
- bin/console
|
115
|
+
- bin/setup
|
116
|
+
- lib/rate_throttle_client.rb
|
117
|
+
- lib/rate_throttle_client/.DS_Store
|
118
|
+
- lib/rate_throttle_client/clients/base.rb
|
119
|
+
- lib/rate_throttle_client/clients/exponential_backoff.rb
|
120
|
+
- lib/rate_throttle_client/clients/exponential_increase_gradual_decrease.rb
|
121
|
+
- lib/rate_throttle_client/clients/exponential_increase_proportional_decrease.rb
|
122
|
+
- lib/rate_throttle_client/clients/exponential_increase_proportional_remaining_decrease.rb
|
123
|
+
- lib/rate_throttle_client/clients/null.rb
|
124
|
+
- lib/rate_throttle_client/demo.rb
|
125
|
+
- lib/rate_throttle_client/info.rb
|
126
|
+
- lib/rate_throttle_client/servers/.DS_Store
|
127
|
+
- lib/rate_throttle_client/servers/decrease_only/config.ru
|
128
|
+
- lib/rate_throttle_client/servers/gcra/config.ru
|
129
|
+
- lib/rate_throttle_client/servers/gcra/gcra_fake_server.rb
|
130
|
+
- lib/rate_throttle_client/version.rb
|
131
|
+
- rate_throttle_client.gemspec
|
132
|
+
homepage: https://github.com/zombocom/rate_throttle_client
|
133
|
+
licenses:
|
134
|
+
- MIT
|
135
|
+
metadata:
|
136
|
+
homepage_uri: https://github.com/zombocom/rate_throttle_client
|
137
|
+
source_code_uri: https://github.com/zombocom/rate_throttle_client
|
138
|
+
changelog_uri: https://github.com/zombocom/rate_throttle_client/blob/master/CHANGELOG.md
|
139
|
+
post_install_message:
|
140
|
+
rdoc_options: []
|
141
|
+
require_paths:
|
142
|
+
- lib
|
143
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
144
|
+
requirements:
|
145
|
+
- - ">="
|
146
|
+
- !ruby/object:Gem::Version
|
147
|
+
version: 2.3.0
|
148
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
149
|
+
requirements:
|
150
|
+
- - ">="
|
151
|
+
- !ruby/object:Gem::Version
|
152
|
+
version: '0'
|
153
|
+
requirements: []
|
154
|
+
rubygems_version: 3.1.2
|
155
|
+
signing_key:
|
156
|
+
specification_version: 4
|
157
|
+
summary: Don't error, instead, sleep, and retry
|
158
|
+
test_files: []
|