autoscale 0.9.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +81 -0
- data/Guardfile +12 -0
- data/README.md +81 -0
- data/examples/complex.rb +39 -0
- data/examples/simple.rb +28 -0
- data/lib/autoscaler.rb +5 -0
- data/lib/autoscaler/binary_scaling_strategy.rb +26 -0
- data/lib/autoscaler/counter_cache_memory.rb +35 -0
- data/lib/autoscaler/counter_cache_redis.rb +50 -0
- data/lib/autoscaler/delayed_shutdown.rb +44 -0
- data/lib/autoscaler/heroku_scaler.rb +81 -0
- data/lib/autoscaler/ignore_scheduled_and_retrying.rb +13 -0
- data/lib/autoscaler/linear_scaling_strategy.rb +39 -0
- data/lib/autoscaler/sidekiq.rb +11 -0
- data/lib/autoscaler/sidekiq/activity.rb +62 -0
- data/lib/autoscaler/sidekiq/celluloid_monitor.rb +67 -0
- data/lib/autoscaler/sidekiq/client.rb +50 -0
- data/lib/autoscaler/sidekiq/entire_queue_system.rb +41 -0
- data/lib/autoscaler/sidekiq/monitor_middleware_adapter.rb +46 -0
- data/lib/autoscaler/sidekiq/queue_system.rb +20 -0
- data/lib/autoscaler/sidekiq/sleep_wait_server.rb +51 -0
- data/lib/autoscaler/sidekiq/specified_queue_system.rb +48 -0
- data/lib/autoscaler/stub_scaler.rb +25 -0
- data/lib/autoscaler/version.rb +4 -0
- data/spec/autoscaler/binary_scaling_strategy_spec.rb +19 -0
- data/spec/autoscaler/counter_cache_memory_spec.rb +21 -0
- data/spec/autoscaler/counter_cache_redis_spec.rb +49 -0
- data/spec/autoscaler/delayed_shutdown_spec.rb +23 -0
- data/spec/autoscaler/heroku_scaler_spec.rb +49 -0
- data/spec/autoscaler/ignore_scheduled_and_retrying_spec.rb +33 -0
- data/spec/autoscaler/linear_scaling_strategy_spec.rb +85 -0
- data/spec/autoscaler/sidekiq/activity_spec.rb +34 -0
- data/spec/autoscaler/sidekiq/celluloid_monitor_spec.rb +39 -0
- data/spec/autoscaler/sidekiq/client_spec.rb +35 -0
- data/spec/autoscaler/sidekiq/entire_queue_system_spec.rb +65 -0
- data/spec/autoscaler/sidekiq/monitor_middleware_adapter_spec.rb +16 -0
- data/spec/autoscaler/sidekiq/sleep_wait_server_spec.rb +45 -0
- data/spec/autoscaler/sidekiq/specified_queue_system_spec.rb +63 -0
- data/spec/spec_helper.rb +16 -0
- data/spec/test_system.rb +11 -0
- metadata +187 -0
@@ -0,0 +1,13 @@
|
|
1
|
+
module Autoscaler
|
2
|
+
class IgnoreScheduledAndRetrying
|
3
|
+
def initialize(strategy)
|
4
|
+
@strategy = strategy
|
5
|
+
end
|
6
|
+
|
7
|
+
def call(system, event_idle_time)
|
8
|
+
system.define_singleton_method(:scheduled) { 0 }
|
9
|
+
system.define_singleton_method(:retrying) { 0 }
|
10
|
+
@strategy.call(system, event_idle_time)
|
11
|
+
end
|
12
|
+
end
|
13
|
+
end
|
@@ -0,0 +1,39 @@
|
|
1
|
+
module Autoscaler
|
2
|
+
# Strategies determine the target number of workers
|
3
|
+
# This strategy sets the number of workers to be proportional to the number of enqueued jobs.
|
4
|
+
class LinearScalingStrategy
|
5
|
+
#@param [integer] max_workers maximum number of workers to spin up.
|
6
|
+
#@param [integer] worker_capacity the amount of jobs one worker can handle
|
7
|
+
#@param [float] min_factor minimum work required to scale, as percentage of worker_capacity
|
8
|
+
def initialize(max_workers = 1, worker_capacity = 25, min_factor = 0)
|
9
|
+
@max_workers = max_workers # max # of workers we can scale to
|
10
|
+
@total_capacity = (@max_workers * worker_capacity).to_f # total capacity of max workers
|
11
|
+
min_capacity = [0, min_factor].max.to_f * worker_capacity # min capacity required to scale first worker
|
12
|
+
@min_capacity_percentage = min_capacity / @total_capacity # min percentage of total capacity
|
13
|
+
end
|
14
|
+
|
15
|
+
# @param [QueueSystem] system interface to the queuing system
|
16
|
+
# @param [Numeric] event_idle_time number of seconds since a job related event
|
17
|
+
# @return [Integer] target number of workers
|
18
|
+
def call(system, event_idle_time)
|
19
|
+
requested_capacity_percentage = total_work(system) / @total_capacity
|
20
|
+
|
21
|
+
# Scale requested capacity taking into account the minimum required
|
22
|
+
scale_factor = (requested_capacity_percentage - @min_capacity_percentage) / (@total_capacity - @min_capacity_percentage)
|
23
|
+
scale_factor = 0 if scale_factor.nan? # Handle DIVZERO
|
24
|
+
|
25
|
+
scaled_capacity_percentage = scale_factor * @total_capacity
|
26
|
+
|
27
|
+
ideal_workers = ([0, scaled_capacity_percentage].max * @max_workers).ceil
|
28
|
+
min_workers = [system.workers, ideal_workers].max # Don't scale down past number of currently engaged workers
|
29
|
+
max_workers = [min_workers, @max_workers].min # Don't scale up past number of max workers
|
30
|
+
|
31
|
+
return [min_workers, max_workers].min
|
32
|
+
end
|
33
|
+
|
34
|
+
private
|
35
|
+
def total_work(system)
|
36
|
+
system.queued + system.scheduled + system.retrying
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
@@ -0,0 +1,11 @@
|
|
1
|
+
require 'autoscaler/sidekiq/client'
|
2
|
+
require 'autoscaler/sidekiq/monitor_middleware_adapter'
|
3
|
+
|
4
|
+
module Autoscaler
|
5
|
+
# namespace module for Sidekiq middlewares
|
6
|
+
module Sidekiq
|
7
|
+
# Sidekiq server middleware
|
8
|
+
# Performs scale-down when the queue is empty
|
9
|
+
Server = MonitorMiddlewareAdapter
|
10
|
+
end
|
11
|
+
end
|
@@ -0,0 +1,62 @@
|
|
1
|
+
require 'sidekiq'
|
2
|
+
|
3
|
+
module Autoscaler
|
4
|
+
module Sidekiq
|
5
|
+
# Tracks activity timeouts using Sidekiq's redis connection
|
6
|
+
class Activity
|
7
|
+
# @param [Numeric] timeout number of seconds to wait before shutdown
|
8
|
+
def initialize(timeout, redis = ::Sidekiq.method(:redis))
|
9
|
+
@timeout = timeout
|
10
|
+
@redis = redis
|
11
|
+
end
|
12
|
+
|
13
|
+
# Record that a queue has activity
|
14
|
+
# @param [String] queue
|
15
|
+
def working!(queue)
|
16
|
+
active_at queue, Time.now
|
17
|
+
end
|
18
|
+
|
19
|
+
# Record that a queue is idle and timed out - mostly for test support
|
20
|
+
# @param [String] queue
|
21
|
+
def idle!(queue)
|
22
|
+
active_at queue, Time.now - timeout*2
|
23
|
+
end
|
24
|
+
|
25
|
+
# Have the watched queues timed out?
|
26
|
+
# @param [Array[String]] queues list of queues to monitor to determine if there is work left
|
27
|
+
# @return [boolean]
|
28
|
+
def idle?(queues)
|
29
|
+
idle_time(queues) > timeout
|
30
|
+
end
|
31
|
+
|
32
|
+
private
|
33
|
+
attr_reader :timeout
|
34
|
+
|
35
|
+
def idle_time(queues)
|
36
|
+
t = last_activity(queues)
|
37
|
+
return 0 unless t
|
38
|
+
Time.now - Time.parse(t)
|
39
|
+
end
|
40
|
+
|
41
|
+
def last_activity(queues)
|
42
|
+
redis {|c|
|
43
|
+
queues.map {|q| c.get('background_activity:'+q)}.compact.max
|
44
|
+
}
|
45
|
+
end
|
46
|
+
|
47
|
+
def active_at(queue, time)
|
48
|
+
redis {|c| c.set('background_activity:'+queue, time)}
|
49
|
+
end
|
50
|
+
|
51
|
+
def redis(&block)
|
52
|
+
if @redis.respond_to?(:call)
|
53
|
+
@redis.call(&block)
|
54
|
+
elsif @redis.respond_to?(:with)
|
55
|
+
@redis.with(&block)
|
56
|
+
else
|
57
|
+
block.call(@redis)
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
@@ -0,0 +1,67 @@
|
|
1
|
+
require 'celluloid'
|
2
|
+
|
3
|
+
module Autoscaler
|
4
|
+
module Sidekiq
|
5
|
+
# Actor to monitor the sidekiq server for scale-down
|
6
|
+
class CelluloidMonitor
|
7
|
+
include Celluloid
|
8
|
+
|
9
|
+
# @param [scaler] scaler object that actually performs scaling operations (e.g. {HerokuScaler})
|
10
|
+
# @param [Strategy] strategy object that decides the target number of workers (e.g. {BinaryScalingStrategy})
|
11
|
+
# @param [System] system interface to the queuing system for use by the strategy
|
12
|
+
def initialize(scaler, strategy, system)
|
13
|
+
@scaler = scaler
|
14
|
+
@strategy = strategy
|
15
|
+
@system = system
|
16
|
+
@running = false
|
17
|
+
end
|
18
|
+
|
19
|
+
# Periodically update the desired number of workers
|
20
|
+
# @param [Numeric] interval polling interval, mostly for testing
|
21
|
+
def wait_for_downscale(interval = 15)
|
22
|
+
once do
|
23
|
+
active_now!
|
24
|
+
|
25
|
+
workers = :unknown
|
26
|
+
|
27
|
+
begin
|
28
|
+
sleep(interval)
|
29
|
+
target_workers = @strategy.call(@system, idle_time)
|
30
|
+
workers = @scaler.workers = target_workers unless workers == target_workers
|
31
|
+
end while workers > 0
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
# Notify the monitor that a job is starting
|
36
|
+
def starting_job
|
37
|
+
end
|
38
|
+
|
39
|
+
# Notify the monitor that a job has finished
|
40
|
+
def finished_job
|
41
|
+
active_now!
|
42
|
+
async.wait_for_downscale
|
43
|
+
end
|
44
|
+
|
45
|
+
private
|
46
|
+
|
47
|
+
def active_now!
|
48
|
+
@activity = Time.now
|
49
|
+
end
|
50
|
+
|
51
|
+
def idle_time
|
52
|
+
Time.now - @activity
|
53
|
+
end
|
54
|
+
|
55
|
+
def once
|
56
|
+
return if @running
|
57
|
+
|
58
|
+
begin
|
59
|
+
@running = true
|
60
|
+
yield
|
61
|
+
ensure
|
62
|
+
@running = false
|
63
|
+
end
|
64
|
+
end
|
65
|
+
end
|
66
|
+
end
|
67
|
+
end
|
@@ -0,0 +1,50 @@
|
|
1
|
+
require 'autoscaler/binary_scaling_strategy'
|
2
|
+
require 'autoscaler/sidekiq/specified_queue_system'
|
3
|
+
|
4
|
+
module Autoscaler
|
5
|
+
module Sidekiq
|
6
|
+
# Sidekiq client middleware
|
7
|
+
# Performs scale-up when items are queued and there are no workers running
|
8
|
+
class Client
|
9
|
+
# @param [Hash] scalers map of queue(String) => scaler (e.g. {HerokuScaler}).
|
10
|
+
# Which scaler to use for each sidekiq queue
|
11
|
+
def initialize(scalers)
|
12
|
+
@scalers = scalers
|
13
|
+
end
|
14
|
+
|
15
|
+
# Sidekiq middleware api method
|
16
|
+
def call(worker_class, item, queue, _ = nil)
|
17
|
+
result = yield
|
18
|
+
|
19
|
+
scaler = @scalers[queue]
|
20
|
+
if scaler && scaler.workers < 1
|
21
|
+
scaler.workers = 1
|
22
|
+
end
|
23
|
+
|
24
|
+
result
|
25
|
+
end
|
26
|
+
|
27
|
+
# Check for interrupted or scheduled work on startup.
|
28
|
+
# Typically you need to construct your own instance just
|
29
|
+
# to call this method, but see add_to_chain.
|
30
|
+
# @param [Strategy] strategy object that determines target workers
|
31
|
+
# @yieldparam [String] queue mostly for testing
|
32
|
+
# @yieldreturn [QueueSystem] mostly for testing
|
33
|
+
def set_initial_workers(strategy = nil, &system_factory)
|
34
|
+
strategy ||= BinaryScalingStrategy.new
|
35
|
+
system_factory ||= lambda {|queue| SpecifiedQueueSystem.new([queue])}
|
36
|
+
@scalers.each do |queue, scaler|
|
37
|
+
scaler.workers = strategy.call(system_factory.call(queue), 0)
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
# Convenience method to avoid having to name the class and parameter
|
42
|
+
# twice when calling set_initial_workers
|
43
|
+
# @return [Client] an instance of Client for set_initial_workers
|
44
|
+
def self.add_to_chain(chain, scalers)
|
45
|
+
chain.add self, scalers
|
46
|
+
new(scalers)
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
50
|
+
end
|
@@ -0,0 +1,41 @@
|
|
1
|
+
require 'sidekiq/api'
|
2
|
+
|
3
|
+
module Autoscaler
|
4
|
+
module Sidekiq
|
5
|
+
# Interface to to interrogate the queuing system
|
6
|
+
# Includes every queue
|
7
|
+
class EntireQueueSystem
|
8
|
+
# @return [Integer] number of workers actively engaged
|
9
|
+
def workers
|
10
|
+
::Sidekiq::Workers.new.map {|pid, _, _| pid}.uniq.size
|
11
|
+
# #size may be out-of-date.
|
12
|
+
end
|
13
|
+
|
14
|
+
# @return [Integer] amount work ready to go
|
15
|
+
def queued
|
16
|
+
sidekiq_queues.values.map(&:to_i).reduce(&:+) || 0
|
17
|
+
end
|
18
|
+
|
19
|
+
# @return [Integer] amount of work scheduled for some time in the future
|
20
|
+
def scheduled
|
21
|
+
::Sidekiq::ScheduledSet.new.size
|
22
|
+
end
|
23
|
+
|
24
|
+
# @return [Integer] amount of work still being retried
|
25
|
+
def retrying
|
26
|
+
::Sidekiq::RetrySet.new.size
|
27
|
+
end
|
28
|
+
|
29
|
+
# @return [Array[String]]
|
30
|
+
def queue_names
|
31
|
+
sidekiq_queues.keys
|
32
|
+
end
|
33
|
+
|
34
|
+
private
|
35
|
+
|
36
|
+
def sidekiq_queues
|
37
|
+
::Sidekiq::Stats.new.queues
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
@@ -0,0 +1,46 @@
|
|
1
|
+
require 'autoscaler/sidekiq/queue_system'
|
2
|
+
require 'autoscaler/sidekiq/celluloid_monitor'
|
3
|
+
require 'autoscaler/binary_scaling_strategy'
|
4
|
+
require 'autoscaler/delayed_shutdown'
|
5
|
+
|
6
|
+
module Autoscaler
|
7
|
+
module Sidekiq
|
8
|
+
# Shim to the existing autoscaler interface
|
9
|
+
# Starts the monitor and notifies it of job events that may occur while it's sleeping
|
10
|
+
class MonitorMiddlewareAdapter
|
11
|
+
# @param [scaler] scaler object that actually performs scaling operations (e.g. {HerokuScaler})
|
12
|
+
# @param [Strategy,Numeric] timeout strategy object that determines target workers, or a timeout in seconds to be passed to {DelayedShutdown}+{BinaryScalingStrategy}
|
13
|
+
# @param [Array[String]] specified_queues list of queues to monitor to determine if there is work left. Defaults to all sidekiq queues.
|
14
|
+
def initialize(scaler, timeout, specified_queues = nil)
|
15
|
+
unless monitor
|
16
|
+
CelluloidMonitor.supervise_as(:autoscaler_monitor,
|
17
|
+
scaler,
|
18
|
+
strategy(timeout),
|
19
|
+
QueueSystem.new(specified_queues))
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
# Sidekiq middleware api entry point
|
24
|
+
def call(worker, msg, queue, _ = nil)
|
25
|
+
monitor.async.starting_job
|
26
|
+
yield
|
27
|
+
ensure
|
28
|
+
# monitor might have gone, e.g. if Sidekiq has received SIGTERM
|
29
|
+
monitor.async.finished_job if monitor
|
30
|
+
end
|
31
|
+
|
32
|
+
private
|
33
|
+
def monitor
|
34
|
+
Celluloid::Actor[:autoscaler_monitor]
|
35
|
+
end
|
36
|
+
|
37
|
+
def strategy(timeout)
|
38
|
+
if timeout.respond_to?(:call)
|
39
|
+
timeout
|
40
|
+
else
|
41
|
+
DelayedShutdown.new(BinaryScalingStrategy.new, timeout)
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
@@ -0,0 +1,20 @@
|
|
1
|
+
require 'sidekiq/api'
|
2
|
+
require 'autoscaler/sidekiq/specified_queue_system'
|
3
|
+
require 'autoscaler/sidekiq/entire_queue_system'
|
4
|
+
|
5
|
+
module Autoscaler
|
6
|
+
module Sidekiq
|
7
|
+
# Interface to to interrogate the queuing system
|
8
|
+
# convenience constructor for SpecifiedQueueSystem and EntireQueueSystem
|
9
|
+
module QueueSystem
|
10
|
+
# @param [Array[String]] specified_queues list of queues to monitor to determine if there is work left. Defaults to all sidekiq queues.
|
11
|
+
def self.new(specified_queues = nil)
|
12
|
+
if specified_queues
|
13
|
+
SpecifiedQueueSystem.new(specified_queues)
|
14
|
+
else
|
15
|
+
EntireQueueSystem.new
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
@@ -0,0 +1,51 @@
|
|
1
|
+
require 'autoscaler/sidekiq/queue_system'
|
2
|
+
require 'autoscaler/sidekiq/activity'
|
3
|
+
|
4
|
+
module Autoscaler
|
5
|
+
module Sidekiq
|
6
|
+
# Sidekiq server middleware
|
7
|
+
# Performs scale-down when the queue is empty
|
8
|
+
class SleepWaitServer
|
9
|
+
# @param [scaler] scaler object that actually performs scaling operations (e.g. {HerokuScaler})
|
10
|
+
# @param [Numeric] timeout number of seconds to wait before shutdown
|
11
|
+
# @param [Array[String]] specified_queues list of queues to monitor to determine if there is work left. Defaults to all sidekiq queues.
|
12
|
+
def initialize(scaler, timeout, specified_queues = nil)
|
13
|
+
@scaler = scaler
|
14
|
+
@timeout = timeout
|
15
|
+
@system = QueueSystem.new(specified_queues)
|
16
|
+
end
|
17
|
+
|
18
|
+
# Sidekiq middleware api entry point
|
19
|
+
def call(worker, msg, queue, redis = ::Sidekiq.method(:redis))
|
20
|
+
working!(queue, redis)
|
21
|
+
yield
|
22
|
+
ensure
|
23
|
+
working!(queue, redis)
|
24
|
+
wait_for_task_or_scale(redis)
|
25
|
+
end
|
26
|
+
|
27
|
+
private
|
28
|
+
def wait_for_task_or_scale(redis)
|
29
|
+
loop do
|
30
|
+
return if pending_work?
|
31
|
+
return @scaler.workers = 0 if idle?(redis)
|
32
|
+
sleep(0.5)
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
attr_reader :system
|
37
|
+
|
38
|
+
def pending_work?
|
39
|
+
system.queued > 0 || system.scheduled > 0 || system.retrying > 0
|
40
|
+
end
|
41
|
+
|
42
|
+
def working!(queue, redis)
|
43
|
+
Activity.new(@timeout, redis).working!(queue)
|
44
|
+
end
|
45
|
+
|
46
|
+
def idle?(redis)
|
47
|
+
Activity.new(@timeout, redis).idle?(system.queue_names)
|
48
|
+
end
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
@@ -0,0 +1,48 @@
|
|
1
|
+
require 'sidekiq/api'
|
2
|
+
|
3
|
+
module Autoscaler
|
4
|
+
module Sidekiq
|
5
|
+
# Interface to to interrogate the queuing system
|
6
|
+
# Includes only the queues provided to the constructor
|
7
|
+
class SpecifiedQueueSystem
|
8
|
+
# @param [Array[String]] specified_queues list of queues to monitor to determine if there is work left. Defaults to all sidekiq queues.
|
9
|
+
def initialize(specified_queues)
|
10
|
+
@queue_names = specified_queues
|
11
|
+
end
|
12
|
+
|
13
|
+
# @return [Integer] number of workers actively engaged
|
14
|
+
def workers
|
15
|
+
::Sidekiq::Workers.new.select {|_, _, work|
|
16
|
+
queue_names.include?(work['queue'])
|
17
|
+
}.map {|pid, _, _| pid}.uniq.size
|
18
|
+
end
|
19
|
+
|
20
|
+
# @return [Integer] amount work ready to go
|
21
|
+
def queued
|
22
|
+
queue_names.map {|name| sidekiq_queues[name].to_i}.reduce(&:+)
|
23
|
+
end
|
24
|
+
|
25
|
+
# @return [Integer] amount of work scheduled for some time in the future
|
26
|
+
def scheduled
|
27
|
+
count_set(::Sidekiq::ScheduledSet.new)
|
28
|
+
end
|
29
|
+
|
30
|
+
# @return [Integer] amount of work still being retried
|
31
|
+
def retrying
|
32
|
+
count_set(::Sidekiq::RetrySet.new)
|
33
|
+
end
|
34
|
+
|
35
|
+
# @return [Array[String]]
|
36
|
+
attr_reader :queue_names
|
37
|
+
|
38
|
+
private
|
39
|
+
def sidekiq_queues
|
40
|
+
::Sidekiq::Stats.new.queues
|
41
|
+
end
|
42
|
+
|
43
|
+
def count_set(set)
|
44
|
+
set.count { |job| queue_names.include?(job.queue) }
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|