heroku_hatchet 6.0.0 → 7.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.circleci/config.yml +2 -0
- data/CHANGELOG.md +16 -1
- data/README.md +772 -174
- data/bin/hatchet +4 -2
- data/hatchet.gemspec +1 -2
- data/hatchet.json +2 -1
- data/hatchet.lock +2 -0
- data/lib/hatchet.rb +1 -2
- data/lib/hatchet/api_rate_limit.rb +6 -17
- data/lib/hatchet/app.rb +137 -30
- data/lib/hatchet/config.rb +1 -1
- data/lib/hatchet/git_app.rb +27 -1
- data/lib/hatchet/reaper.rb +159 -56
- data/lib/hatchet/reaper/app_age.rb +49 -0
- data/lib/hatchet/reaper/reaper_throttle.rb +55 -0
- data/lib/hatchet/shell_throttle.rb +71 -0
- data/lib/hatchet/test_run.rb +2 -1
- data/lib/hatchet/version.rb +1 -1
- data/spec/hatchet/allow_failure_git_spec.rb +27 -2
- data/spec/hatchet/app_spec.rb +145 -6
- data/spec/hatchet/ci_spec.rb +10 -1
- data/spec/hatchet/lock_spec.rb +12 -1
- data/spec/unit/reaper_spec.rb +153 -0
- data/spec/unit/shell_throttle.rb +28 -0
- metadata +16 -23
@@ -0,0 +1,49 @@
|
|
1
|
+
module Hatchet
|
2
|
+
class Reaper
|
3
|
+
# Class for figuring out how old a given time is relative to another time
|
4
|
+
#
|
5
|
+
# Expects inputs as a DateTime instance
|
6
|
+
#
|
7
|
+
# Example:
|
8
|
+
#
|
9
|
+
# time_now = DateTime.parse("2020-07-28T14:40:00Z")
|
10
|
+
# age = AppAge.new(created_at: DateTIme.parse("2020-07-28T14:40:00Z"), time_now: time_now, ttl_minutes: 1)
|
11
|
+
# age.in_minutes => 0.0
|
12
|
+
# age.too_young_to_die? # => true
|
13
|
+
# age.can_delete? # => false
|
14
|
+
# age.sleep_for_ttl #=> 60
|
15
|
+
class AppAge
|
16
|
+
SECONDS_IN_A_DAY = 24 * 60 * 60
|
17
|
+
|
18
|
+
attr_reader :ttl_minutes
|
19
|
+
|
20
|
+
def initialize(created_at:, ttl_minutes:, time_now: DateTime.now.new_offset(0))
|
21
|
+
@seconds_ago = date_time_diff_in_seconds(time_now, created_at)
|
22
|
+
@ttl_minutes = ttl_minutes
|
23
|
+
@ttl_seconds = ttl_minutes * 60
|
24
|
+
end
|
25
|
+
|
26
|
+
def date_time_diff_in_seconds(now, whence)
|
27
|
+
(now - whence) * SECONDS_IN_A_DAY
|
28
|
+
end
|
29
|
+
|
30
|
+
def too_young_to_die?
|
31
|
+
!can_delete?
|
32
|
+
end
|
33
|
+
|
34
|
+
def can_delete?
|
35
|
+
@seconds_ago > @ttl_seconds
|
36
|
+
end
|
37
|
+
|
38
|
+
def sleep_for_ttl
|
39
|
+
return 0 if can_delete?
|
40
|
+
|
41
|
+
@ttl_seconds - @seconds_ago
|
42
|
+
end
|
43
|
+
|
44
|
+
def in_minutes
|
45
|
+
(@seconds_ago / 60.0).round(2)
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
@@ -0,0 +1,55 @@
|
|
1
|
+
module Hatchet
|
2
|
+
class Reaper
|
3
|
+
# This class retains and increments a sleep value between executions
|
4
|
+
#
|
5
|
+
# Every time we pause, we increase the duration of the pause 2x. If we
|
6
|
+
# do not sleep for long enough then we will burn API requests that we don't need to make.
|
7
|
+
#
|
8
|
+
# To help prevent sleeping for too long, the reaper will sleep for a maximum amount of time
|
9
|
+
# equal to the age_sleep_for_ttl. If that happens, it's likely a fairly large value and the
|
10
|
+
# internal incremental value can be reset
|
11
|
+
#
|
12
|
+
# Example:
|
13
|
+
#
|
14
|
+
# reaper_throttle = ReaperThrottle.new(initial_sleep: 2)
|
15
|
+
# reaper_throttle.call(max_sleep: 5) do |sleep_for|
|
16
|
+
# puts sleep_for # => 2
|
17
|
+
# end
|
18
|
+
# reaper_throttle.call(max_sleep: 5) do |sleep_for|
|
19
|
+
# puts sleep_for # => 4
|
20
|
+
# end
|
21
|
+
# reaper_throttle.call(max_sleep: 5) do |sleep_for|
|
22
|
+
# puts sleep_for # => 5
|
23
|
+
# end
|
24
|
+
#
|
25
|
+
# # The throttle is now reset since it hit the max_sleep value
|
26
|
+
#
|
27
|
+
# reaper_throttle.call(max_sleep: 5) do |sleep_for|
|
28
|
+
# puts sleep_for # => 2
|
29
|
+
# end
|
30
|
+
class ReaperThrottle
|
31
|
+
def initialize(initial_sleep: )
|
32
|
+
@initial_sleep = initial_sleep
|
33
|
+
@sleep_for = @initial_sleep
|
34
|
+
end
|
35
|
+
|
36
|
+
def call(max_sleep: )
|
37
|
+
raise "Must call with a block" unless block_given?
|
38
|
+
|
39
|
+
sleep_for = [@sleep_for, max_sleep].min
|
40
|
+
|
41
|
+
yield sleep_for
|
42
|
+
|
43
|
+
if sleep_for < @sleep_for
|
44
|
+
reset!
|
45
|
+
else
|
46
|
+
@sleep_for *= 2
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
def reset!
|
51
|
+
@sleep_for = @initial_sleep
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
@@ -0,0 +1,71 @@
|
|
1
|
+
module Hatchet
|
2
|
+
# A class for throttling non-http resources
|
3
|
+
#
|
4
|
+
# Non-http calls can be rate-limited for example shell calls to `heroku run ` and `git push heroku`
|
5
|
+
# this class provides an easy interface to leverage the rate throttling behavior baked into `PlatformAPI`
|
6
|
+
# for calls things that do not have a real associated web request
|
7
|
+
#
|
8
|
+
# Example:
|
9
|
+
#
|
10
|
+
# output = ""
|
11
|
+
# ShellThrottle.new(platform_api: @platform_api).call
|
12
|
+
# output = `git push heroku main`
|
13
|
+
# throw(:throttle) if output.match?(/reached the API rate limit/)
|
14
|
+
# end
|
15
|
+
# puts output
|
16
|
+
#
|
17
|
+
# In this example `git push heroku main` will retry and backoff until the output no longer matches `reached the API rate limit`.
|
18
|
+
#
|
19
|
+
class ShellThrottle
|
20
|
+
def initialize(platform_api: )
|
21
|
+
@platform_api = platform_api
|
22
|
+
end
|
23
|
+
|
24
|
+
def call
|
25
|
+
out = nil
|
26
|
+
PlatformAPI.rate_throttle.call do
|
27
|
+
catch(:throttle) do
|
28
|
+
out = yield
|
29
|
+
return
|
30
|
+
end
|
31
|
+
|
32
|
+
try_again
|
33
|
+
end
|
34
|
+
return out
|
35
|
+
end
|
36
|
+
|
37
|
+
private def success
|
38
|
+
FakeResponse.new(status: 200, remaining: remaining)
|
39
|
+
end
|
40
|
+
|
41
|
+
private def try_again
|
42
|
+
FakeResponse.new(status: 429, remaining: remaining)
|
43
|
+
end
|
44
|
+
|
45
|
+
private def remaining
|
46
|
+
@platform_api.rate_limit.info["remaining"]
|
47
|
+
end
|
48
|
+
|
49
|
+
|
50
|
+
# Helper class to be used along with the PlatformAPI.rate_throttle interface
|
51
|
+
# that expects a response object
|
52
|
+
#
|
53
|
+
# Example:
|
54
|
+
#
|
55
|
+
# FakeResponse.new(status: 200, remaining: 2).status #=> 200
|
56
|
+
# FakeResponse.new(status: 200, remaining: 2).headers["RateLimit-Remaining"] #=> 2
|
57
|
+
class FakeResponse
|
58
|
+
attr_reader :status, :headers
|
59
|
+
|
60
|
+
def initialize(status:, remaining: )
|
61
|
+
@status = status
|
62
|
+
|
63
|
+
@headers = {
|
64
|
+
"RateLimit-Remaining" => remaining,
|
65
|
+
"RateLimit-Multiplier" => 1,
|
66
|
+
"Content-Type" => "text/plain".freeze
|
67
|
+
}
|
68
|
+
end
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
data/lib/hatchet/test_run.rb
CHANGED
@@ -218,12 +218,13 @@ module Hatchet
|
|
218
218
|
"Content-Type" => "application/json"
|
219
219
|
}.merge(options[:headers] || {})
|
220
220
|
options[:body] = JSON.generate(options[:body]) if options[:body]
|
221
|
+
options[:expects] << 429 if options[:expects]
|
221
222
|
|
222
223
|
Hatchet::RETRIES.times.retry do
|
223
224
|
PlatformAPI.rate_throttle.call do
|
224
225
|
connection = Excon.new("https://api.heroku.com")
|
225
226
|
|
226
|
-
|
227
|
+
connection.request(options)
|
227
228
|
end
|
228
229
|
end
|
229
230
|
end
|
data/lib/hatchet/version.rb
CHANGED
@@ -1,15 +1,40 @@
|
|
1
1
|
require("spec_helper")
|
2
2
|
|
3
3
|
describe "AllowFailureGitTest" do
|
4
|
+
describe "release failures" do
|
5
|
+
let(:release_fail_proc) {
|
6
|
+
Proc.new do
|
7
|
+
File.open("Procfile", "w+") do |f|
|
8
|
+
f.write <<~EOM
|
9
|
+
release: echo "failing on release" && exit 1
|
10
|
+
EOM
|
11
|
+
end
|
12
|
+
end
|
13
|
+
}
|
14
|
+
|
15
|
+
it "is marked as a failure if the release fails" do
|
16
|
+
expect {
|
17
|
+
Hatchet::GitApp.new("default_ruby", before_deploy: release_fail_proc).deploy {}
|
18
|
+
}.to(raise_error(Hatchet::App::FailedReleaseError))
|
19
|
+
end
|
20
|
+
|
21
|
+
it "works when failure is allowed" do
|
22
|
+
Hatchet::GitApp.new("default_ruby", before_deploy: release_fail_proc, allow_failure: true).deploy do |app|
|
23
|
+
expect(app.output).to match("failing on release")
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
4
28
|
it "allowed failure" do
|
5
29
|
Hatchet::GitApp.new("no_lockfile", allow_failure: true).deploy do |app|
|
6
|
-
puts app.output
|
7
30
|
expect(app.deployed?).to be_falsey
|
8
31
|
expect(app.output).to match("Gemfile.lock required")
|
9
32
|
end
|
10
33
|
end
|
11
34
|
|
12
35
|
it "failure with no flag" do
|
13
|
-
expect {
|
36
|
+
expect {
|
37
|
+
Hatchet::GitApp.new("no_lockfile").deploy {}
|
38
|
+
}.to(raise_error(Hatchet::App::FailedDeploy))
|
14
39
|
end
|
15
40
|
end
|
data/spec/hatchet/app_spec.rb
CHANGED
@@ -1,6 +1,42 @@
|
|
1
1
|
require("spec_helper")
|
2
2
|
|
3
3
|
describe "AppTest" do
|
4
|
+
it "rate throttles `git push` " do
|
5
|
+
app = Hatchet::GitApp.new("default_ruby")
|
6
|
+
def app.git_push_heroku_yall
|
7
|
+
@_git_push_heroku_yall_call_count ||= 0
|
8
|
+
@_git_push_heroku_yall_call_count += 1
|
9
|
+
if @_git_push_heroku_yall_call_count >= 2
|
10
|
+
"Success"
|
11
|
+
else
|
12
|
+
raise Hatchet::App::FailedDeployError.new(self, "message", output: "Your account reached the API rate limit Please wait a few minutes before making new requests")
|
13
|
+
end
|
14
|
+
end
|
15
|
+
|
16
|
+
def app.sleep_called?; @sleep_called; end
|
17
|
+
|
18
|
+
def app.what_is_git_push_heroku_yall_call_count; @_git_push_heroku_yall_call_count; end
|
19
|
+
app.push_without_retry!
|
20
|
+
|
21
|
+
expect(app.what_is_git_push_heroku_yall_call_count).to be(2)
|
22
|
+
end
|
23
|
+
|
24
|
+
it "calls reaper if cannot create an app" do
|
25
|
+
app = Hatchet::App.new("default_ruby", buildpacks: [:default])
|
26
|
+
def app.heroku_api_create_app(*args); raise StandardError.new("made you look"); end
|
27
|
+
|
28
|
+
reaper = app.reaper
|
29
|
+
|
30
|
+
def reaper.cycle(app_exception_message: ); @app_exception_message = app_exception_message; end
|
31
|
+
def reaper.recorded_app_exception_message; @app_exception_message; end
|
32
|
+
|
33
|
+
expect {
|
34
|
+
app.create_app
|
35
|
+
}.to raise_error("made you look")
|
36
|
+
|
37
|
+
expect(reaper.recorded_app_exception_message).to match("made you look")
|
38
|
+
end
|
39
|
+
|
4
40
|
it "app with default" do
|
5
41
|
app = Hatchet::App.new("default_ruby", buildpacks: [:default])
|
6
42
|
expect(app.buildpacks.first).to match("https://github.com/heroku/heroku-buildpack-ruby")
|
@@ -13,6 +49,37 @@ describe "AppTest" do
|
|
13
49
|
expect(app.platform_api.app.info(app.name)["build_stack"]["name"]).to eq(stack)
|
14
50
|
end
|
15
51
|
|
52
|
+
it "marks itself 'finished' when done in block mode" do
|
53
|
+
app = Hatchet::Runner.new("default_ruby")
|
54
|
+
|
55
|
+
def app.push_with_retry!; nil; end
|
56
|
+
app.deploy do |app|
|
57
|
+
expect(app.platform_api.app.info(app.name)["maintenance"]).to be_falsey
|
58
|
+
end
|
59
|
+
|
60
|
+
# After the app is updated, there's no guarantee it will still exist
|
61
|
+
# so we cannot rely on an api call to determine maintenance mode
|
62
|
+
app_update_info = app.instance_variable_get(:"@app_update_info")
|
63
|
+
expect(app_update_info["name"]).to eq(app.name)
|
64
|
+
expect(app_update_info["maintenance"]).to be_truthy
|
65
|
+
end
|
66
|
+
|
67
|
+
it "marks itself 'finished' when done in non-block mode" do
|
68
|
+
app = Hatchet::Runner.new("default_ruby")
|
69
|
+
|
70
|
+
def app.push_with_retry!; nil; end
|
71
|
+
app.deploy
|
72
|
+
expect(app.platform_api.app.info(app.name)["maintenance"]).to be_falsey
|
73
|
+
|
74
|
+
app.teardown!
|
75
|
+
|
76
|
+
# After the app is updated, there's no guarantee it will still exist
|
77
|
+
# so we cannot rely on an api call to determine maintenance mode
|
78
|
+
app_update_info = app.instance_variable_get(:"@app_update_info")
|
79
|
+
expect(app_update_info["name"]).to eq(app.name)
|
80
|
+
expect(app_update_info["maintenance"]).to be_truthy
|
81
|
+
end
|
82
|
+
|
16
83
|
it "before deploy" do
|
17
84
|
@called = false
|
18
85
|
@dir = false
|
@@ -65,23 +132,95 @@ describe "AppTest" do
|
|
65
132
|
end
|
66
133
|
|
67
134
|
it "run" do
|
68
|
-
|
135
|
+
skip("Must set HATCHET_EXPENSIVE_MODE") unless ENV["HATCHET_EXPENSIVE_MODE"]
|
136
|
+
|
137
|
+
app = Hatchet::GitApp.new("default_ruby", run_multi: true)
|
69
138
|
app.deploy do
|
70
139
|
expect(app.run("ls -a Gemfile 'foo bar #baz'")).to match(/ls: cannot access 'foo bar #baz': No such file or directory\s+Gemfile/)
|
71
140
|
expect((0 != $?.exitstatus)).to be_truthy
|
72
|
-
|
141
|
+
|
73
142
|
app.run("ls erpderp", heroku: ({ "exit-code" => (Hatchet::App::SkipDefaultOption) }))
|
74
143
|
expect((0 == $?.exitstatus)).to be_truthy
|
75
|
-
|
144
|
+
|
76
145
|
app.run("ls erpderp", heroku: ({ "no-tty" => nil }))
|
77
146
|
expect((0 != $?.exitstatus)).to be_truthy
|
78
|
-
|
147
|
+
|
79
148
|
expect(app.run("echo \\$HELLO \\$NAME", raw: true, heroku: ({ "env" => "HELLO=ohai;NAME=world" }))).to match(/ohai world/)
|
80
|
-
|
149
|
+
|
81
150
|
expect(app.run("echo \\$HELLO \\$NAME", raw: true, heroku: ({ "env" => "" }))).to_not match(/ohai world/)
|
82
|
-
|
151
|
+
|
83
152
|
random_name = SecureRandom.hex
|
84
153
|
expect(app.run("mkdir foo; touch foo/#{random_name}; ls foo/")).to match(/#{random_name}/)
|
85
154
|
end
|
86
155
|
end
|
156
|
+
|
157
|
+
class AtomicCount
|
158
|
+
attr_reader :value
|
159
|
+
|
160
|
+
def initialize(value)
|
161
|
+
@value = value
|
162
|
+
@mutex = Mutex.new
|
163
|
+
end
|
164
|
+
|
165
|
+
# In MRI the `+=` is not atomic, it is two seperate virtual machine
|
166
|
+
# instructions. To protect against race conditions, we can lock with a mutex
|
167
|
+
def add(val)
|
168
|
+
@mutex.synchronize do
|
169
|
+
@value += val
|
170
|
+
end
|
171
|
+
end
|
172
|
+
end
|
173
|
+
|
174
|
+
it "run multi" do
|
175
|
+
skip("Must set HATCHET_EXPENSIVE_MODE") unless ENV["HATCHET_EXPENSIVE_MODE"]
|
176
|
+
|
177
|
+
@run_count = AtomicCount.new(0)
|
178
|
+
app = Hatchet::GitApp.new("default_ruby", run_multi: true)
|
179
|
+
app.deploy do
|
180
|
+
app.run_multi("ls") { |out| expect(out).to include("Gemfile"); @run_count.add(1) }
|
181
|
+
app.run_multi("blerg -v") { |_, status| expect(status.success?).to be_falsey; @run_count.add(1) }
|
182
|
+
app.run_multi("ruby -v") do |out, status|
|
183
|
+
expect(out).to include("ruby")
|
184
|
+
expect(status.success?).to be_truthy
|
185
|
+
|
186
|
+
@run_count.add(1)
|
187
|
+
end
|
188
|
+
|
189
|
+
expect(app.platform_api.formation.list(app.name).detect {|ps| ps["type"] == "web"}["size"].downcase).to_not eq("free")
|
190
|
+
end
|
191
|
+
|
192
|
+
# After the deploy block exits `teardown!` is called
|
193
|
+
# this ensures all `run_multi` commands have exited and the dyno should be scaled down
|
194
|
+
expect(@run_count.value).to eq(3)
|
195
|
+
end
|
196
|
+
|
197
|
+
describe "running concurrent tests in different examples works" do
|
198
|
+
# This is not a great pattern if we're running tests via a parallel runner
|
199
|
+
#
|
200
|
+
# For example this will be guaranteed to be called, not just once, but at least once for every process
|
201
|
+
# that needs to run a test. In the best case it will only fire once, in the worst case it will fire N times
|
202
|
+
# if there are N tests. It is effectively the same as a `before(:each)`
|
203
|
+
#
|
204
|
+
# Documented here: https://github.com/grosser/parallel_split_test/pull/22/files
|
205
|
+
before(:all) do
|
206
|
+
skip("Must set HATCHET_EXPENSIVE_MODE") unless ENV["HATCHET_EXPENSIVE_MODE"]
|
207
|
+
|
208
|
+
@app = Hatchet::GitApp.new("default_ruby", run_multi: true)
|
209
|
+
@app.deploy
|
210
|
+
end
|
211
|
+
|
212
|
+
after(:all) do
|
213
|
+
@app.teardown! if @app
|
214
|
+
end
|
215
|
+
|
216
|
+
it "test one" do
|
217
|
+
expect(@app.run("ls")).to include("Gemfile")
|
218
|
+
expect(@app.platform_api.formation.list(@app.name).detect {|ps| ps["type"] == "web"}["size"].downcase).to_not eq("free")
|
219
|
+
end
|
220
|
+
|
221
|
+
it "test two" do
|
222
|
+
expect(@app.run("ruby -v")).to include("ruby")
|
223
|
+
expect(@app.platform_api.formation.list(@app.name).detect {|ps| ps["type"] == "web"}["size"].downcase).to_not eq("free")
|
224
|
+
end
|
225
|
+
end
|
87
226
|
end
|
data/spec/hatchet/ci_spec.rb
CHANGED
@@ -4,7 +4,8 @@ describe "CIFourTest" do
|
|
4
4
|
it "error with bad app" do
|
5
5
|
string = SecureRandom.hex
|
6
6
|
|
7
|
-
Hatchet::GitApp.new("default_ruby")
|
7
|
+
app = Hatchet::GitApp.new("default_ruby")
|
8
|
+
app.run_ci do |test_run|
|
8
9
|
expect(test_run.output).to_not match(string)
|
9
10
|
expect(test_run.output).to match("Installing rake")
|
10
11
|
|
@@ -14,7 +15,15 @@ describe "CIFourTest" do
|
|
14
15
|
expect(test_run.output).to match(string)
|
15
16
|
expect(test_run.output).to match("Using rake")
|
16
17
|
expect(test_run.output).to_not match("Installing rake")
|
18
|
+
|
19
|
+
expect(app.platform_api.app.info(app.name)["maintenance"]).to be_falsey
|
17
20
|
end
|
21
|
+
|
22
|
+
# After the app is updated, there's no guarantee it will still exist
|
23
|
+
# so we cannot rely on an api call to determine maintenance mode
|
24
|
+
app_update_info = app.instance_variable_get(:"@app_update_info")
|
25
|
+
expect(app_update_info["name"]).to eq(app.name)
|
26
|
+
expect(app_update_info["maintenance"]).to be_truthy
|
18
27
|
end
|
19
28
|
|
20
29
|
it "error with bad app" do
|