autoscale 0.9.3 → 0.11.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +11 -2
- data/README.md +19 -13
- data/examples/complex.rb +3 -3
- data/examples/simple.rb +2 -2
- data/lib/autoscaler/binary_scaling_strategy.rb +1 -1
- data/lib/autoscaler/heroku_platform_scaler.rb +84 -0
- data/lib/autoscaler/ignore_scheduled_and_retrying.rb +5 -0
- data/lib/autoscaler/linear_scaling_strategy.rb +1 -1
- data/lib/autoscaler/sidekiq.rb +2 -2
- data/lib/autoscaler/sidekiq/client.rb +1 -1
- data/lib/autoscaler/sidekiq/entire_queue_system.rb +10 -0
- data/lib/autoscaler/sidekiq/sleep_wait_server.rb +2 -2
- data/lib/autoscaler/sidekiq/specified_queue_system.rb +10 -0
- data/lib/autoscaler/sidekiq/thread_server.rb +90 -0
- data/lib/autoscaler/version.rb +1 -1
- data/spec/autoscaler/binary_scaling_strategy_spec.rb +2 -2
- data/spec/autoscaler/counter_cache_memory_spec.rb +3 -3
- data/spec/autoscaler/counter_cache_redis_spec.rb +6 -6
- data/spec/autoscaler/delayed_shutdown_spec.rb +4 -4
- data/spec/autoscaler/heroku_platform_scaler_spec.rb +47 -0
- data/spec/autoscaler/heroku_scaler_spec.rb +8 -8
- data/spec/autoscaler/ignore_scheduled_and_retrying_spec.rb +4 -4
- data/spec/autoscaler/linear_scaling_strategy_spec.rb +13 -13
- data/spec/autoscaler/sidekiq/activity_spec.rb +4 -4
- data/spec/autoscaler/sidekiq/client_spec.rb +5 -5
- data/spec/autoscaler/sidekiq/entire_queue_system_spec.rb +11 -11
- data/spec/autoscaler/sidekiq/sleep_wait_server_spec.rb +21 -21
- data/spec/autoscaler/sidekiq/specified_queue_system_spec.rb +10 -10
- data/spec/autoscaler/sidekiq/thread_server_spec.rb +44 -0
- data/spec/spec_helper.rb +4 -2
- data/spec/test_system.rb +6 -0
- metadata +71 -15
- data/lib/autoscaler/sidekiq/celluloid_monitor.rb +0 -68
- data/lib/autoscaler/sidekiq/monitor_middleware_adapter.rb +0 -46
- data/spec/autoscaler/sidekiq/celluloid_monitor_spec.rb +0 -39
- data/spec/autoscaler/sidekiq/monitor_middleware_adapter_spec.rb +0 -16
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: d220489df83d3b499d5ba3e90f02359f106ed92e
|
4
|
+
data.tar.gz: f8c2289b5dab2e7b3f29c23b1d8f89657d372ad6
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 56359da2856356597efaeaf4be2b4c4efa222132f1d4b78a5a8ef0eec1cb3185afee195f419305c5a6d0549ba3f96018ef106cdb8a27c0843ef31c8f55ed1e67
|
7
|
+
data.tar.gz: 8d69c218da237c24f6ec31643bc557212725262afcb991eb8df353eec6717e02b68584a08b2a82b085511e9080494f5d99fc80413645775cc43ced40632dd129
|
data/CHANGELOG.md
CHANGED
@@ -1,9 +1,18 @@
|
|
1
1
|
# Changelog
|
2
2
|
|
3
|
+
## 0.11.0
|
4
|
+
|
5
|
+
- Replace celluloid monitor with thread based middleware for Sidekiq 4.0
|
6
|
+
|
3
7
|
## 0.10.0
|
4
|
-
|
5
|
-
-
|
8
|
+
|
9
|
+
- Require Sidekiq 3.5
|
10
|
+
- You may use `HerokuPlatformScaler` and `HEROKU_ACCESS_TOKEN` in place of `HerkouScaler` and `HEROKU_API_KEY`
|
6
11
|
- QueueSystem#workers returns the number of engaged SK processes.
|
12
|
+
- Linear Scaling Strategy will not scale down past number of active workers. Assumes 1-1 SK process/dyno mapping.
|
13
|
+
- Calls the SideKiq quiet api when shutting down
|
14
|
+
- Count workers currently running (Joel Van Horn)
|
15
|
+
- Update gems and use RSpec expect syntax (giviger)
|
7
16
|
|
8
17
|
## 0.9.0
|
9
18
|
|
data/README.md
CHANGED
@@ -4,7 +4,7 @@
|
|
4
4
|
|
5
5
|
## Requirements
|
6
6
|
|
7
|
-
Tested on Ruby 1.
|
7
|
+
Tested on Ruby 2.1.7 and Heroku Cedar stack.
|
8
8
|
|
9
9
|
## Installation
|
10
10
|
|
@@ -12,36 +12,38 @@ Tested on Ruby 1.9.2 and Heroku Cedar stack.
|
|
12
12
|
|
13
13
|
## Getting Started
|
14
14
|
|
15
|
-
This gem uses the [Heroku-Api](https://github.com/heroku/
|
15
|
+
This gem uses the [Heroku Platform-Api](https://github.com/heroku/platform-api.rb) gem, which requires an OAuth token from Heroku. It will also need the heroku app name. By default, these are specified through environment variables. You can also pass them to `HerokuPlatformScaler` explicitly.
|
16
16
|
|
17
|
-
|
17
|
+
HEROKU_ACCESS_TOKEN=.....
|
18
18
|
HEROKU_APP=....
|
19
19
|
|
20
|
+
Support is still present for [Heroku-Api](https://github.com/heroku/heroku.rb) via `HerkouScaler` and `HEROKU_API_KEY`, but may be removed in a future major version.
|
21
|
+
|
20
22
|
Install the middleware in your `Sidekiq.configure_` blocks
|
21
23
|
|
22
24
|
require 'autoscaler/sidekiq'
|
23
|
-
require 'autoscaler/
|
25
|
+
require 'autoscaler/heroku_platform_scaler'
|
24
26
|
|
25
27
|
Sidekiq.configure_client do |config|
|
26
28
|
config.client_middleware do |chain|
|
27
|
-
chain.add Autoscaler::Sidekiq::Client, 'default' => Autoscaler::
|
29
|
+
chain.add Autoscaler::Sidekiq::Client, 'default' => Autoscaler::HerokuPlatformScaler.new
|
28
30
|
end
|
29
31
|
end
|
30
32
|
|
31
33
|
Sidekiq.configure_server do |config|
|
32
34
|
config.server_middleware do |chain|
|
33
|
-
chain.add(Autoscaler::Sidekiq::Server, Autoscaler::
|
35
|
+
chain.add(Autoscaler::Sidekiq::Server, Autoscaler::HerokuPlatformScaler.new, 60) # 60 second timeout
|
34
36
|
end
|
35
37
|
end
|
36
38
|
|
37
39
|
## Limits and Challenges
|
38
40
|
|
39
|
-
-
|
41
|
+
- HerokuPlatformScaler includes an attempt at current-worker cache that may be overcomplication, and doesn't work very well on the server
|
40
42
|
- Multiple scale-down loops may be started, particularly if there are multiple jobs queued when the servers comes up. Heroku seems to handle multiple scale-down commands well.
|
41
43
|
- The scale-down monitor is triggered on job completion (and server middleware is only run around jobs), so if the server nevers processes any jobs, it won't turn off.
|
42
44
|
- The retry and schedule lists are considered - if you schedule a long-running task, the process will not scale-down.
|
43
45
|
- If background jobs trigger jobs in other scaled processes, please note you'll need `config.client_middleware` in your `Sidekiq.configure_server` block in order to scale-up.
|
44
|
-
- Exceptions while calling the Heroku API are caught and printed by default. See `
|
46
|
+
- Exceptions while calling the Heroku API are caught and printed by default. See `HerokuPlatformScaler#exception_handler` to override
|
45
47
|
|
46
48
|
## Experimental
|
47
49
|
|
@@ -57,24 +59,28 @@ You can pass a scaling strategy object instead of the timeout to the server midd
|
|
57
59
|
|
58
60
|
### Working caching
|
59
61
|
|
60
|
-
scaler.counter_cache = Autoscaler::CounterCacheRedis(Sidekiq.method(:redis))
|
62
|
+
scaler.counter_cache = Autoscaler::CounterCacheRedis.new(Sidekiq.method(:redis))
|
61
63
|
|
62
64
|
## Tests
|
63
65
|
|
64
66
|
The project is setup to run RSpec with Guard. It expects a redis instance on a custom port, which is started by the Guardfile.
|
65
67
|
|
66
|
-
The
|
68
|
+
The HerokuPlatformScaler is not tested by default because it makes live API requests. Specify `HEROKU_APP` and `HEROKU_ACCESS_TOKEN` on the command line, and then watch your app's logs.
|
67
69
|
|
68
|
-
HEROKU_APP=...
|
70
|
+
HEROKU_APP=... HEROKU_ACCESS_TOKEN=... guard
|
69
71
|
heroku logs --app ...
|
70
72
|
|
71
73
|
## Authors
|
72
74
|
|
73
75
|
Justin Love, [@wondible](http://twitter.com/wondible), [https://github.com/JustinLove](https://github.com/JustinLove)
|
74
76
|
|
75
|
-
|
77
|
+
### Contributors
|
76
78
|
|
77
|
-
|
79
|
+
- Benjamin Kudria [https://github.com/bkudria](https://github.com/bkudria)
|
80
|
+
- Fix Peña [https://github.com/fixr](https://github.com/fixr)
|
81
|
+
- Gabriel Givigier Guimarães [https://github.com/givigier](https://github.com/givigier)
|
82
|
+
- Matt Anderson [https://github.com/tonkapark](https://github.com/tonkapark)
|
83
|
+
- Thibaud Guillaume-Gentil [https://github.com/jilion](https://github.com/jilion)
|
78
84
|
|
79
85
|
## Licence
|
80
86
|
|
data/examples/complex.rb
CHANGED
@@ -1,15 +1,15 @@
|
|
1
1
|
require 'sidekiq'
|
2
2
|
require 'autoscaler/sidekiq'
|
3
|
-
require 'autoscaler/
|
3
|
+
require 'autoscaler/heroku_platform_scaler'
|
4
4
|
|
5
5
|
heroku = nil
|
6
6
|
if ENV['HEROKU_APP']
|
7
7
|
heroku = {}
|
8
8
|
scaleable = %w[default import] - (ENV['ALWAYS'] || '').split(' ')
|
9
9
|
scaleable.each do |queue|
|
10
|
-
heroku[queue] = Autoscaler::
|
10
|
+
heroku[queue] = Autoscaler::HerokuPlatformScaler.new(
|
11
11
|
queue,
|
12
|
-
ENV['
|
12
|
+
ENV['HEROKU_ACCESS_TOKEN'],
|
13
13
|
ENV['HEROKU_APP'])
|
14
14
|
end
|
15
15
|
end
|
data/examples/simple.rb
CHANGED
@@ -1,10 +1,10 @@
|
|
1
1
|
require 'sidekiq'
|
2
2
|
require 'autoscaler/sidekiq'
|
3
|
-
require 'autoscaler/
|
3
|
+
require 'autoscaler/heroku_platform_scaler'
|
4
4
|
|
5
5
|
heroku = nil
|
6
6
|
if ENV['HEROKU_APP']
|
7
|
-
heroku = Autoscaler::
|
7
|
+
heroku = Autoscaler::HerokuPlatformScaler.new
|
8
8
|
#heroku.exception_handler = lambda {|exception| MyApp.logger.error(exception)}
|
9
9
|
end
|
10
10
|
|
@@ -0,0 +1,84 @@
|
|
1
|
+
require 'platform-api'
|
2
|
+
require 'autoscaler/counter_cache_memory'
|
3
|
+
|
4
|
+
module Autoscaler
|
5
|
+
# Wraps the Heroku Platform API to provide just the interface that we need for scaling.
|
6
|
+
class HerokuPlatformScaler
|
7
|
+
# @param [String] type process type this scaler controls
|
8
|
+
# @param [String] token Heroku OAuth access token
|
9
|
+
# @param [String] app Heroku app name
|
10
|
+
def initialize(
|
11
|
+
type = 'worker',
|
12
|
+
token = ENV['HEROKU_ACCESS_TOKEN'],
|
13
|
+
app = ENV['HEROKU_APP'])
|
14
|
+
@client = PlatformAPI.connect_oauth(token)
|
15
|
+
@type = type
|
16
|
+
@app = app
|
17
|
+
@workers = CounterCacheMemory.new
|
18
|
+
end
|
19
|
+
|
20
|
+
attr_reader :app
|
21
|
+
attr_reader :type
|
22
|
+
|
23
|
+
# Read the current worker count (value may be cached)
|
24
|
+
# @return [Numeric] number of workers
|
25
|
+
def workers
|
26
|
+
@workers.counter {@workers.counter = heroku_get_workers}
|
27
|
+
end
|
28
|
+
|
29
|
+
# Set the number of workers (noop if workers the same)
|
30
|
+
# @param [Numeric] n number of workers
|
31
|
+
def workers=(n)
|
32
|
+
unknown = false
|
33
|
+
current = @workers.counter{unknown = true; 1}
|
34
|
+
if n != current || unknown
|
35
|
+
p "Scaling #{type} to #{n}"
|
36
|
+
heroku_set_workers(n)
|
37
|
+
@workers.counter = n
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
# Callable object which responds to exceptions during api calls #
|
42
|
+
# @example
|
43
|
+
# heroku.exception_handler = lambda {|exception| MyApp.logger.error(exception)}
|
44
|
+
# heroku.exception_handler = lambda {|exception| raise}
|
45
|
+
# # default
|
46
|
+
# lambda {|exception|
|
47
|
+
# p exception
|
48
|
+
# puts exception.backtrace
|
49
|
+
# }
|
50
|
+
attr_writer :exception_handler
|
51
|
+
|
52
|
+
# Object which supports #counter and #counter=
|
53
|
+
# Defaults to CounterCacheMemory
|
54
|
+
def counter_cache=(cache)
|
55
|
+
@workers = cache
|
56
|
+
end
|
57
|
+
|
58
|
+
private
|
59
|
+
attr_reader :client
|
60
|
+
|
61
|
+
def heroku_get_workers
|
62
|
+
client.formation.list(app)
|
63
|
+
.select {|item| item['type'] == type}
|
64
|
+
.map {|item| item['quantity']}
|
65
|
+
.reduce(0, &:+)
|
66
|
+
rescue Excon::Errors::Error => e
|
67
|
+
exception_handler.call(e)
|
68
|
+
0
|
69
|
+
end
|
70
|
+
|
71
|
+
def heroku_set_workers(n)
|
72
|
+
client.formation.update(app, type, {:quantity => n})
|
73
|
+
rescue Excon::Errors::Error, Heroku::API::Errors::Error => e
|
74
|
+
exception_handler.call(e)
|
75
|
+
end
|
76
|
+
|
77
|
+
def exception_handler
|
78
|
+
@exception_handler ||= lambda {|exception|
|
79
|
+
p exception
|
80
|
+
puts exception.backtrace
|
81
|
+
}
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|
@@ -1,9 +1,14 @@
|
|
1
1
|
module Autoscaler
|
2
|
+
# - Strategy wrapper to ignore scheduled and retrying queues. Usage:
|
3
|
+
# ``new_strategy = IgnoreScheduledAndRetrying.new(my_old_strategy)``
|
2
4
|
class IgnoreScheduledAndRetrying
|
3
5
|
def initialize(strategy)
|
4
6
|
@strategy = strategy
|
5
7
|
end
|
6
8
|
|
9
|
+
# @param [QueueSystem] system interface to the queuing system
|
10
|
+
# @param [Numeric] event_idle_time number of seconds since a job related event
|
11
|
+
# @return [Integer] target number of workers
|
7
12
|
def call(system, event_idle_time)
|
8
13
|
system.define_singleton_method(:scheduled) { 0 }
|
9
14
|
system.define_singleton_method(:retrying) { 0 }
|
data/lib/autoscaler/sidekiq.rb
CHANGED
@@ -1,11 +1,11 @@
|
|
1
1
|
require 'autoscaler/sidekiq/client'
|
2
|
-
require 'autoscaler/sidekiq/
|
2
|
+
require 'autoscaler/sidekiq/thread_server'
|
3
3
|
|
4
4
|
module Autoscaler
|
5
5
|
# namespace module for Sidekiq middlewares
|
6
6
|
module Sidekiq
|
7
7
|
# Sidekiq server middleware
|
8
8
|
# Performs scale-down when the queue is empty
|
9
|
-
Server =
|
9
|
+
Server = ThreadServer
|
10
10
|
end
|
11
11
|
end
|
@@ -6,7 +6,7 @@ module Autoscaler
|
|
6
6
|
# Sidekiq client middleware
|
7
7
|
# Performs scale-up when items are queued and there are no workers running
|
8
8
|
class Client
|
9
|
-
# @param [Hash] scalers map of queue(String) => scaler (e.g. {
|
9
|
+
# @param [Hash] scalers map of queue(String) => scaler (e.g. {HerokuPlatformScaler}).
|
10
10
|
# Which scaler to use for each sidekiq queue
|
11
11
|
def initialize(scalers)
|
12
12
|
@scalers = scalers
|
@@ -26,6 +26,16 @@ module Autoscaler
|
|
26
26
|
::Sidekiq::RetrySet.new.size
|
27
27
|
end
|
28
28
|
|
29
|
+
# @return [Boolean] if any kind of work still needs to be done
|
30
|
+
def any_work?
|
31
|
+
queued > 0 || scheduled > 0 || retrying > 0 || workers > 0
|
32
|
+
end
|
33
|
+
|
34
|
+
# @return [Integer] total amount of work
|
35
|
+
def total_work
|
36
|
+
queued + scheduled + retrying + workers
|
37
|
+
end
|
38
|
+
|
29
39
|
# @return [Array[String]]
|
30
40
|
def queue_names
|
31
41
|
sidekiq_queues.keys
|
@@ -6,7 +6,7 @@ module Autoscaler
|
|
6
6
|
# Sidekiq server middleware
|
7
7
|
# Performs scale-down when the queue is empty
|
8
8
|
class SleepWaitServer
|
9
|
-
# @param [scaler] scaler object that actually performs scaling operations (e.g. {
|
9
|
+
# @param [scaler] scaler object that actually performs scaling operations (e.g. {HerokuPlatformScaler})
|
10
10
|
# @param [Numeric] timeout number of seconds to wait before shutdown
|
11
11
|
# @param [Array[String]] specified_queues list of queues to monitor to determine if there is work left. Defaults to all sidekiq queues.
|
12
12
|
def initialize(scaler, timeout, specified_queues = nil)
|
@@ -36,7 +36,7 @@ module Autoscaler
|
|
36
36
|
attr_reader :system
|
37
37
|
|
38
38
|
def pending_work?
|
39
|
-
system.
|
39
|
+
system.any_work?
|
40
40
|
end
|
41
41
|
|
42
42
|
def working!(queue, redis)
|
@@ -32,6 +32,16 @@ module Autoscaler
|
|
32
32
|
count_set(::Sidekiq::RetrySet.new)
|
33
33
|
end
|
34
34
|
|
35
|
+
# @return [Boolean] if any kind of work still needs to be done
|
36
|
+
def any_work?
|
37
|
+
queued > 0 || scheduled > 0 || retrying > 0 || workers > 0
|
38
|
+
end
|
39
|
+
|
40
|
+
# @return [Integer] total amount of work
|
41
|
+
def total_work
|
42
|
+
queued + scheduled + retrying + workers
|
43
|
+
end
|
44
|
+
|
35
45
|
# @return [Array[String]]
|
36
46
|
attr_reader :queue_names
|
37
47
|
|
@@ -0,0 +1,90 @@
|
|
1
|
+
require 'autoscaler/sidekiq/queue_system'
|
2
|
+
require 'autoscaler/binary_scaling_strategy'
|
3
|
+
require 'autoscaler/delayed_shutdown'
|
4
|
+
require 'thread'
|
5
|
+
|
6
|
+
module Autoscaler
|
7
|
+
module Sidekiq
|
8
|
+
# Sidekiq server middleware
|
9
|
+
# spawns a thread to monitor the sidekiq server for scale-down
|
10
|
+
class ThreadServer
|
11
|
+
# @param [scaler] scaler object that actually performs scaling operations (e.g. {HerokuPlatformScaler})
|
12
|
+
# @param [Strategy,Numeric] timeout strategy object that determines target workers, or a timeout in seconds to be passed to {DelayedShutdown}+{BinaryScalingStrategy}
|
13
|
+
# @param [Array[String]] specified_queues list of queues to monitor to determine if there is work left. Defaults to all sidekiq queues.
|
14
|
+
def initialize(scaler, timeout, specified_queues = nil)
|
15
|
+
@scaler = scaler
|
16
|
+
@strategy = strategy(timeout)
|
17
|
+
@system = QueueSystem.new(specified_queues)
|
18
|
+
@mutex = Mutex.new
|
19
|
+
@done = false
|
20
|
+
end
|
21
|
+
|
22
|
+
# Sidekiq middleware api entry point
|
23
|
+
def call(worker, msg, queue, _ = nil)
|
24
|
+
yield
|
25
|
+
ensure
|
26
|
+
active_now!
|
27
|
+
wait_for_downscale
|
28
|
+
end
|
29
|
+
|
30
|
+
# Start the monitoring thread if it's not running
|
31
|
+
def wait_for_downscale
|
32
|
+
@thread ||= Thread.new do
|
33
|
+
begin
|
34
|
+
run
|
35
|
+
rescue
|
36
|
+
@thread = nil
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
# Thread core loop
|
42
|
+
# Periodically update the desired number of workers
|
43
|
+
# @param [Numeric] interval polling interval, mostly for testing
|
44
|
+
def run(interval = 15)
|
45
|
+
active_now!
|
46
|
+
|
47
|
+
workers = :unknown
|
48
|
+
|
49
|
+
begin
|
50
|
+
sleep(interval)
|
51
|
+
target_workers = @strategy.call(@system, idle_time)
|
52
|
+
workers = @scaler.workers = target_workers unless workers == target_workers
|
53
|
+
end while !@done && workers > 0
|
54
|
+
::Sidekiq::ProcessSet.new.each(&:quiet!)
|
55
|
+
end
|
56
|
+
|
57
|
+
# Shut down the thread, pause until complete
|
58
|
+
def terminate
|
59
|
+
@done = true
|
60
|
+
if @thread
|
61
|
+
t = @thread
|
62
|
+
@thread = nil
|
63
|
+
t.value
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
private
|
68
|
+
|
69
|
+
def active_now!
|
70
|
+
@mutex.synchronize do
|
71
|
+
@activity = Time.now
|
72
|
+
end
|
73
|
+
end
|
74
|
+
|
75
|
+
def idle_time
|
76
|
+
@mutex.synchronize do
|
77
|
+
Time.now - @activity
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
def strategy(timeout)
|
82
|
+
if timeout.respond_to?(:call)
|
83
|
+
timeout
|
84
|
+
else
|
85
|
+
DelayedShutdown.new(BinaryScalingStrategy.new, timeout)
|
86
|
+
end
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|
data/lib/autoscaler/version.rb
CHANGED