autoscaler 0.8.0 → 0.9.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA1:
3
+ metadata.gz: c1870d13f3e44b1d9b2267f41fb8f5d94c94c6ec
4
+ data.tar.gz: 0a7662b031e66c6d9967567cb7612791fe6914ff
5
+ SHA512:
6
+ metadata.gz: 10566a1b1762379530e22dbfe1174212c7c23dc4078787fffdb9956d80d3dc33a4d1aba2c9298b1408579fbda1e336e05a9b1eb63124d6bfb48fd8b90de0cc94
7
+ data.tar.gz: 37fd250217015d0aabf1e99cc7773aacdbf51e6ed57bfbda0a18fe998369459b69fcbcf1126fa467ba1906539e4d20b12134ec944c17c2071849e61c5243f0ff
data/CHANGELOG.md CHANGED
@@ -1,9 +1,21 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.9.0
4
+
5
+ - CounterCacheRedis.new now takes a third parameter `worker_type`, a string used in the
6
+ Redis cache key. Allows for caching counts for various types of workers, not just `worker`
7
+ - Support for Sidekiq 3.0
8
+ - Strategy wrapper to ignore scheduled and retrying queues. Usage:
9
+ ``new_strategy = IgnoreScheduledAndRetrying.new(my_old_strategy)``
10
+ - LinearScalingStrategy now accepts a minimum amount of work (as a percentage of worker capacity)
11
+ required to begin scaling up. E.g LinearScalingStrategy.new(10, 4, 0.5) will scale to one worker
12
+ after 4*0.5 = 2 jobs are enqueued, and a maximum of 10 workers at 10*4 jobs. Old behavior is preserved
13
+ with a default value of 0.
14
+
3
15
  ## 0.8.0
4
16
 
5
- - Extracted caching of Heroku worker counts and added experimental Redis cache
6
- scaler.counter_cache = Autoscaler::CounterCacheRedis(Sidekiq.method(:redis))
17
+ - Extracted caching of Heroku worker counts and added experimental Redis cache:
18
+ ``scaler.counter_cache = Autoscaler::CounterCacheRedis.new(Sidekiq.method(:redis))``
7
19
  - Now rescues Heroku::Api::Errors in addition to Excon::Errors
8
20
 
9
21
  ## 0.7.0
data/Guardfile CHANGED
@@ -2,14 +2,13 @@ guard 'process', :name => 'redis', :command => 'redis-server spec/redis_test.con
2
2
  watch('spec/redis_test.conf')
3
3
  end
4
4
 
5
- tag = "--tag #{ENV['TAG']}" if ENV['TAG']
6
- example = "-e '#{ENV['EXAMPLE']}'" if ENV['EXAMPLE']
7
- guard 'rspec',
8
- :version => 2,
9
- :cli => "--color --format d #{tag} #{example}",
10
- :bundler => false,
11
- :spec_paths => ['spec'] do
12
- watch(%r{^spec/.+_spec\.rb$})
13
- watch(%r{^lib/(.+).rb$}) { |m| "spec/#{m[1]}_spec.rb" }
14
- watch('spec/spec_helper.rb') { "spec" }
5
+ tag = "--tag #{ENV['TAG']}" if ENV['TAG']
6
+ example = "--example '#{ENV['EXAMPLE']}'" if ENV['EXAMPLE']
7
+ %w(sidekiq-2 sidekiq-3).each do |appraisal|
8
+ guard :rspec, :cmd => "appraisal #{appraisal} rspec --color --format d #{tag} #{example}" do
9
+ watch(%r{^spec/.+_spec\.rb$})
10
+ watch(%r{^lib/(.+).rb$}) { |m| "spec/#{m[1]}_spec.rb" }
11
+ watch('spec/spec_helper.rb') { "spec" }
12
+ end
15
13
  end
14
+
@@ -7,14 +7,16 @@ module Autoscaler
7
7
  # ConnectionPool: e.g. what you pass to Sidekiq.redis=
8
8
  # Redis client: e.g. Redis.connect
9
9
  # @param [Numeric] timeout number of seconds to allow before expiration
10
- def initialize(redis, timeout = 5 * 60)
10
+ # @param [String] worker_type the name of the worker type, for cache keys
11
+ def initialize(redis, timeout = 5 * 60, worker_type = 'worker')
11
12
  @redis = redis
12
13
  @timeout = timeout
14
+ @worker_type = worker_type
13
15
  end
14
16
 
15
17
  # @param [Numeric] value new counter value
16
18
  def counter=(value)
17
- redis {|c| c.setex('autoscaler:workers', @timeout, value)}
19
+ redis {|c| c.setex(key, @timeout, value)}
18
20
  end
19
21
 
20
22
  # Raised when no block is provided to #counter
@@ -22,7 +24,7 @@ module Autoscaler
22
24
 
23
25
  # Current value. Uses the Hash#fetch api - pass a block to use in place of expired values or it will raise an exception.
24
26
  def counter
25
- value = redis {|c| c.get('autoscaler:workers')}
27
+ value = redis {|c| c.get(key)}
26
28
  return value.to_i if value
27
29
  return yield if block_given?
28
30
  raise Expired
@@ -31,6 +33,10 @@ module Autoscaler
31
33
  private
32
34
  attr_reader :timeout
33
35
 
36
+ def key
37
+ ['autoscaler', 'workers', @worker_type] * ':'
38
+ end
39
+
34
40
  def redis(&block)
35
41
  if @redis.respond_to?(:call)
36
42
  @redis.call(&block)
@@ -0,0 +1,13 @@
1
+ module Autoscaler
2
+ class IgnoreScheduledAndRetrying
3
+ def initialize(strategy)
4
+ @strategy = strategy
5
+ end
6
+
7
+ def call(system, event_idle_time)
8
+ system.define_singleton_method(:scheduled) { 0 }
9
+ system.define_singleton_method(:retrying) { 0 }
10
+ @strategy.call(system, event_idle_time)
11
+ end
12
+ end
13
+ end
@@ -2,24 +2,29 @@ module Autoscaler
2
2
  # Strategies determine the target number of workers
3
3
  # This strategy sets the number of workers to be proportional to the number of enqueued jobs.
4
4
  class LinearScalingStrategy
5
- #@param [integer] workers maximum number of workers to spin up.
5
+ #@param [integer] max_workers maximum number of workers to spin up.
6
6
  #@param [integer] worker_capacity the amount of jobs one worker can handle
7
- def initialize(workers = 1, worker_capacity = 25)
8
- @workers = workers
9
- @worker_capacity = worker_capacity
7
+ #@param [float] min_factor minimum work required to scale, as percentage of worker_capacity
8
+ def initialize(max_workers = 1, worker_capacity = 25, min_factor = 0)
9
+ @max_workers = max_workers # max # of workers we can scale to
10
+ @total_capacity = (@max_workers * worker_capacity).to_f # total capacity of max workers
11
+ min_capacity = [0, min_factor].max.to_f * worker_capacity # min capacity required to scale first worker
12
+ @min_capacity_percentage = min_capacity / @total_capacity # min percentage of total capacity
10
13
  end
11
14
 
12
15
  # @param [QueueSystem] system interface to the queuing system
13
16
  # @param [Numeric] event_idle_time number of seconds since a job related event
14
17
  # @return [Integer] target number of workers
15
18
  def call(system, event_idle_time)
16
- total_capacity = (@workers * @worker_capacity).to_f
17
- percent_capacity = total_work(system) / total_capacity
19
+ requested_capacity_percentage = total_work(system) / @total_capacity
18
20
 
19
- ideal_scale = (percent_capacity * @workers).ceil
20
- max_scale = @workers
21
+ # Scale requested capacity taking into account the minimum required
22
+ scale_factor = (requested_capacity_percentage - @min_capacity_percentage) / (@total_capacity - @min_capacity_percentage)
23
+ scaled_capacity_percentage = scale_factor * @total_capacity
21
24
 
22
- return [ideal_scale, max_scale].min
25
+ ideal_workers = ([0, scaled_capacity_percentage].max * @max_workers).ceil
26
+
27
+ return [ideal_workers, @max_workers].min
23
28
  end
24
29
 
25
30
  private
@@ -5,8 +5,9 @@ module Autoscaler
5
5
  # Tracks activity timeouts using Sidekiq's redis connection
6
6
  class Activity
7
7
  # @param [Numeric] timeout number of seconds to wait before shutdown
8
- def initialize(timeout)
8
+ def initialize(timeout, redis = ::Sidekiq.method(:redis))
9
9
  @timeout = timeout
10
+ @redis = redis
10
11
  end
11
12
 
12
13
  # Record that a queue has activity
@@ -38,13 +39,23 @@ module Autoscaler
38
39
  end
39
40
 
40
41
  def last_activity(queues)
41
- ::Sidekiq.redis {|c|
42
+ redis {|c|
42
43
  queues.map {|q| c.get('background_activity:'+q)}.compact.max
43
44
  }
44
45
  end
45
46
 
46
47
  def active_at(queue, time)
47
- ::Sidekiq.redis {|c| c.set('background_activity:'+queue, time)}
48
+ redis {|c| c.set('background_activity:'+queue, time)}
49
+ end
50
+
51
+ def redis(&block)
52
+ if @redis.respond_to?(:call)
53
+ @redis.call(&block)
54
+ elsif @redis.respond_to?(:with)
55
+ @redis.with(&block)
56
+ else
57
+ block.call(@redis)
58
+ end
48
59
  end
49
60
  end
50
61
  end
@@ -13,7 +13,7 @@ module Autoscaler
13
13
  end
14
14
 
15
15
  # Sidekiq middleware api method
16
- def call(worker_class, item, queue)
16
+ def call(worker_class, item, queue, _ = nil)
17
17
  result = yield
18
18
 
19
19
  scaler = @scalers[queue]
@@ -17,12 +17,12 @@ module Autoscaler
17
17
 
18
18
  # @return [Integer] amount of work scheduled for some time in the future
19
19
  def scheduled
20
- count_sorted_set("schedule")
20
+ ::Sidekiq::ScheduledSet.new.size
21
21
  end
22
22
 
23
23
  # @return [Integer] amount of work still being retried
24
24
  def retrying
25
- count_sorted_set("retry")
25
+ ::Sidekiq::RetrySet.new.size
26
26
  end
27
27
 
28
28
  # @return [Array[String]]
@@ -35,10 +35,6 @@ module Autoscaler
35
35
  def sidekiq_queues
36
36
  ::Sidekiq::Stats.new.queues
37
37
  end
38
-
39
- def count_sorted_set(sorted_set)
40
- ::Sidekiq::SortedSet.new(sorted_set).count
41
- end
42
38
  end
43
39
  end
44
40
  end
@@ -21,7 +21,7 @@ module Autoscaler
21
21
  end
22
22
 
23
23
  # Sidekiq middleware api entry point
24
- def call(worker, msg, queue)
24
+ def call(worker, msg, queue, _ = nil)
25
25
  monitor.async.starting_job
26
26
  yield
27
27
  ensure
@@ -10,42 +10,41 @@ module Autoscaler
10
10
  # @param [Numeric] timeout number of seconds to wait before shutdown
11
11
  # @param [Array[String]] specified_queues list of queues to monitor to determine if there is work left. Defaults to all sidekiq queues.
12
12
  def initialize(scaler, timeout, specified_queues = nil)
13
- @scaler = scaler
14
- @activity = Activity.new(timeout)
15
- @system = QueueSystem.new(specified_queues)
13
+ @scaler = scaler
14
+ @timeout = timeout
15
+ @system = QueueSystem.new(specified_queues)
16
16
  end
17
17
 
18
18
  # Sidekiq middleware api entry point
19
- def call(worker, msg, queue)
20
- working!(queue)
19
+ def call(worker, msg, queue, redis = ::Sidekiq.method(:redis))
20
+ working!(queue, redis)
21
21
  yield
22
22
  ensure
23
- working!(queue)
24
- wait_for_task_or_scale
23
+ working!(queue, redis)
24
+ wait_for_task_or_scale(redis)
25
25
  end
26
26
 
27
27
  private
28
- def wait_for_task_or_scale
28
+ def wait_for_task_or_scale(redis)
29
29
  loop do
30
30
  return if pending_work?
31
- return @scaler.workers = 0 if idle?
31
+ return @scaler.workers = 0 if idle?(redis)
32
32
  sleep(0.5)
33
33
  end
34
34
  end
35
35
 
36
36
  attr_reader :system
37
- attr_reader :activity
38
37
 
39
38
  def pending_work?
40
39
  system.queued > 0 || system.scheduled > 0 || system.retrying > 0
41
40
  end
42
41
 
43
- def working!(queue)
44
- activity.working!(queue)
42
+ def working!(queue, redis)
43
+ Activity.new(@timeout, redis).working!(queue)
45
44
  end
46
45
 
47
- def idle?
48
- activity.idle?(system.queue_names)
46
+ def idle?(redis)
47
+ Activity.new(@timeout, redis).idle?(system.queue_names)
49
48
  end
50
49
  end
51
50
  end
@@ -24,12 +24,12 @@ module Autoscaler
24
24
 
25
25
  # @return [Integer] amount of work scheduled for some time in the future
26
26
  def scheduled
27
- count_sorted_set("schedule")
27
+ count_set(::Sidekiq::ScheduledSet.new)
28
28
  end
29
29
 
30
30
  # @return [Integer] amount of work still being retried
31
31
  def retrying
32
- count_sorted_set("retry")
32
+ count_set(::Sidekiq::RetrySet.new)
33
33
  end
34
34
 
35
35
  # @return [Array[String]]
@@ -40,9 +40,8 @@ module Autoscaler
40
40
  ::Sidekiq::Stats.new.queues
41
41
  end
42
42
 
43
- def count_sorted_set(sorted_set)
44
- ss = ::Sidekiq::SortedSet.new(sorted_set)
45
- ss.count { |job| queue_names.include?(job.queue) }
43
+ def count_set(set)
44
+ set.count { |job| queue_names.include?(job.queue) }
46
45
  end
47
46
  end
48
47
  end
@@ -1,4 +1,4 @@
1
1
  module Autoscaler
2
2
  # version number
3
- VERSION = "0.8.0"
3
+ VERSION = "0.9.0"
4
4
  end
@@ -18,6 +18,15 @@ describe Autoscaler::CounterCacheRedis do
18
18
  subject.counter.should == 2
19
19
  end
20
20
 
21
+ it 'does not conflict with multiple worker types' do
22
+ other_worker_cache = cut.new(@redis, 300, 'other_worker')
23
+ subject.counter = 1
24
+ other_worker_cache.counter = 2
25
+
26
+ subject.counter.should == 1
27
+ other_worker_cache.counter = 2
28
+ end
29
+
21
30
  it 'times out' do
22
31
  cache = cut.new(Sidekiq.method(:redis), 1) # timeout 0 invalid
23
32
  cache.counter = 3
@@ -0,0 +1,33 @@
1
+ require 'spec_helper'
2
+ require 'test_system'
3
+ require 'autoscaler/ignore_scheduled_and_retrying'
4
+
5
+ describe Autoscaler::IgnoreScheduledAndRetrying do
6
+ let(:cut) {Autoscaler::IgnoreScheduledAndRetrying}
7
+
8
+ it "passes through enqueued" do
9
+ system = Struct.new(:enqueued).new(3)
10
+ strategy = proc {|system, time| system.enqueued}
11
+ cut.new(strategy).call(system, 0).should == 3
12
+ end
13
+
14
+ it "passes through workers" do
15
+ system = Struct.new(:workers).new(3)
16
+ strategy = proc {|system, time| system.workers}
17
+ cut.new(strategy).call(system, 0).should == 3
18
+ end
19
+
20
+ it "ignores scheduled" do
21
+ system = Struct.new(:scheduled).new(3)
22
+ strategy = proc {|system, time| system.scheduled}
23
+ cut.new(strategy).call(system, 0).should == 0
24
+ end
25
+
26
+ it "ignores retrying" do
27
+ system = Struct.new(:retrying).new(3)
28
+ strategy = proc {|system, time| system.retrying}
29
+ cut.new(strategy).call(system, 0).should == 0
30
+ end
31
+ end
32
+
33
+
@@ -34,4 +34,46 @@ describe Autoscaler::LinearScalingStrategy do
34
34
  strategy = cut.new(5, 2)
35
35
  strategy.call(system, 1).should == 3
36
36
  end
37
+
38
+ it "doesn't scale unless minimum is met" do
39
+ system = TestSystem.new(2)
40
+ strategy = cut.new(10, 4, 0.5)
41
+ strategy.call(system, 1).should == 0
42
+ end
43
+
44
+ it "scales proprotionally with a minimum" do
45
+ system = TestSystem.new(3)
46
+ strategy = cut.new(10, 4, 0.5)
47
+ strategy.call(system, 1).should == 1
48
+ end
49
+
50
+ it "scales maximally with a minimum" do
51
+ system = TestSystem.new(25)
52
+ strategy = cut.new(5, 4, 0.5)
53
+ strategy.call(system, 1).should == 5
54
+ end
55
+
56
+ it "scales proportionally with a minimum > 1" do
57
+ system = TestSystem.new(12)
58
+ strategy = cut.new(5, 4, 2)
59
+ strategy.call(system, 1).should == 2
60
+ end
61
+
62
+ it "scales maximally with a minimum factor > 1" do
63
+ system = TestSystem.new(30)
64
+ strategy = cut.new(5, 4, 2)
65
+ strategy.call(system, 1).should == 5
66
+ end
67
+
68
+ xit "doesn't scale down engaged workers" do
69
+ system = TestSystem.new(0, 2)
70
+ strategy = cut.new(5, 4)
71
+ strategy.call(system, 1).should == 2
72
+ end
73
+
74
+ xit "doesn't scale above max workers even if engaged workers is greater" do
75
+ system = TestSystem.new(40, 6)
76
+ strategy = cut.new(5, 4)
77
+ strategy.call(system, 1).should == 5
78
+ end
37
79
  end
@@ -18,4 +18,17 @@ describe Autoscaler::Sidekiq::Activity do
18
18
  end
19
19
  it {activity.should be_idle(['queue'])}
20
20
  end
21
+
22
+ it 'passed a connection pool' do
23
+ activity = cut.new(5, @redis)
24
+ activity.working!('queue')
25
+ activity.should_not be_idle(['queue'])
26
+ end
27
+
28
+ it 'passed a plain connection' do
29
+ connection = Redis.connect(:url => 'http://localhost:9736', :namespace => 'autoscaler')
30
+ activity = cut.new(5, connection)
31
+ activity.working!('queue')
32
+ activity.should_not be_idle(['queue'])
33
+ end
21
34
  end
@@ -13,6 +13,11 @@ describe Autoscaler::Sidekiq::Client do
13
13
  scaler.workers.should == 1
14
14
  end
15
15
 
16
+ it 'scales with a redis pool' do
17
+ client.call(Class, {}, 'queue', ::Sidekiq.method(:redis)) {}
18
+ scaler.workers.should == 1
19
+ end
20
+
16
21
  it('yields') {client.call(Class, {}, 'queue') {:foo}.should == :foo}
17
22
  end
18
23
 
@@ -12,4 +12,5 @@ describe Autoscaler::Sidekiq::MonitorMiddlewareAdapter do
12
12
  let(:server) {cut.new(scaler, 0, ['queue'])}
13
13
 
14
14
  it('yields') {server.call(Object.new, {}, 'queue') {:foo}.should == :foo}
15
+ it('yields with a redis pool') {server.call(Object.new, {}, 'queue', Sidekiq.method(:redis)) {:foo}.should == :foo}
15
16
  end
@@ -11,10 +11,7 @@ describe Autoscaler::Sidekiq::SleepWaitServer do
11
11
  let(:scaler) {TestScaler.new(1)}
12
12
  let(:server) {cut.new(scaler, 0, ['queue'])}
13
13
 
14
- def when_run
15
- server.call(Object.new, {}, 'queue') {}
16
- end
17
-
14
+ shared_examples "a sleepwait server" do
18
15
  it "scales with no work" do
19
16
  server.stub(:pending_work?).and_return(false)
20
17
  when_run
@@ -26,6 +23,23 @@ describe Autoscaler::Sidekiq::SleepWaitServer do
26
23
  when_run
27
24
  scaler.workers.should == 1
28
25
  end
26
+ end
27
+
28
+ describe "a middleware with no redis specified" do
29
+ it_behaves_like "a sleepwait server" do
30
+ def when_run
31
+ server.call(Object.new, {}, 'queue') {}
32
+ end
33
+ end
34
+ end
35
+
36
+ describe "a middleware with redis specified" do
37
+ it_behaves_like "a sleepwait server" do
38
+ def when_run
39
+ server.call(Object.new, {}, 'queue', Sidekiq.method(:redis)) {}
40
+ end
41
+ end
42
+ end
29
43
 
30
44
  it('yields') {server.call(Object.new, {}, 'queue') {:foo}.should == :foo}
31
45
  end
data/spec/test_system.rb CHANGED
@@ -1,9 +1,10 @@
1
1
  class TestSystem
2
- def initialize(pending)
2
+ def initialize(pending, current = 0)
3
3
  @pending = pending
4
+ @current = current
4
5
  end
5
6
 
6
- def workers; 0; end
7
+ def workers; @current; end
7
8
  def queued; @pending; end
8
9
  def scheduled; 0; end
9
10
  def retrying; 0; end
metadata CHANGED
@@ -1,8 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: autoscaler
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.8.0
5
- prerelease:
4
+ version: 0.9.0
6
5
  platform: ruby
7
6
  authors:
8
7
  - Justin Love
@@ -10,102 +9,96 @@ authors:
10
9
  autorequire:
11
10
  bindir: bin
12
11
  cert_chain: []
13
- date: 2014-03-07 00:00:00.000000000 Z
12
+ date: 2014-05-19 00:00:00.000000000 Z
14
13
  dependencies:
15
14
  - !ruby/object:Gem::Dependency
16
15
  name: sidekiq
17
16
  requirement: !ruby/object:Gem::Requirement
18
- none: false
19
17
  requirements:
20
- - - ~>
18
+ - - ">="
21
19
  - !ruby/object:Gem::Version
22
20
  version: '2.7'
21
+ - - "<"
22
+ - !ruby/object:Gem::Version
23
+ version: '3.1'
23
24
  type: :runtime
24
25
  prerelease: false
25
26
  version_requirements: !ruby/object:Gem::Requirement
26
- none: false
27
27
  requirements:
28
- - - ~>
28
+ - - ">="
29
29
  - !ruby/object:Gem::Version
30
30
  version: '2.7'
31
+ - - "<"
32
+ - !ruby/object:Gem::Version
33
+ version: '3.1'
31
34
  - !ruby/object:Gem::Dependency
32
35
  name: heroku-api
33
36
  requirement: !ruby/object:Gem::Requirement
34
- none: false
35
37
  requirements:
36
- - - ! '>='
38
+ - - ">="
37
39
  - !ruby/object:Gem::Version
38
40
  version: '0'
39
41
  type: :runtime
40
42
  prerelease: false
41
43
  version_requirements: !ruby/object:Gem::Requirement
42
- none: false
43
44
  requirements:
44
- - - ! '>='
45
+ - - ">="
45
46
  - !ruby/object:Gem::Version
46
47
  version: '0'
47
48
  - !ruby/object:Gem::Dependency
48
49
  name: bundler
49
50
  requirement: !ruby/object:Gem::Requirement
50
- none: false
51
51
  requirements:
52
- - - ! '>='
52
+ - - ">="
53
53
  - !ruby/object:Gem::Version
54
54
  version: '0'
55
55
  type: :development
56
56
  prerelease: false
57
57
  version_requirements: !ruby/object:Gem::Requirement
58
- none: false
59
58
  requirements:
60
- - - ! '>='
59
+ - - ">="
61
60
  - !ruby/object:Gem::Version
62
61
  version: '0'
63
62
  - !ruby/object:Gem::Dependency
64
63
  name: rspec
65
64
  requirement: !ruby/object:Gem::Requirement
66
- none: false
67
65
  requirements:
68
- - - ! '>='
66
+ - - ">="
69
67
  - !ruby/object:Gem::Version
70
68
  version: '0'
71
69
  type: :development
72
70
  prerelease: false
73
71
  version_requirements: !ruby/object:Gem::Requirement
74
- none: false
75
72
  requirements:
76
- - - ! '>='
73
+ - - ">="
77
74
  - !ruby/object:Gem::Version
78
75
  version: '0'
79
76
  - !ruby/object:Gem::Dependency
80
77
  name: guard-rspec
81
78
  requirement: !ruby/object:Gem::Requirement
82
- none: false
83
79
  requirements:
84
- - - ! '>='
80
+ - - ">="
85
81
  - !ruby/object:Gem::Version
86
82
  version: '0'
87
83
  type: :development
88
84
  prerelease: false
89
85
  version_requirements: !ruby/object:Gem::Requirement
90
- none: false
91
86
  requirements:
92
- - - ! '>='
87
+ - - ">="
93
88
  - !ruby/object:Gem::Version
94
89
  version: '0'
95
90
  - !ruby/object:Gem::Dependency
96
91
  name: guard-process
97
92
  requirement: !ruby/object:Gem::Requirement
98
- none: false
99
93
  requirements:
100
- - - ! '>='
94
+ - - ">="
101
95
  - !ruby/object:Gem::Version
102
96
  version: '0'
103
97
  type: :development
104
98
  prerelease: false
105
99
  version_requirements: !ruby/object:Gem::Requirement
106
- none: false
107
100
  requirements:
108
- - - ! '>='
101
+ - - ">="
109
102
  - !ruby/object:Gem::Version
110
103
  version: '0'
111
104
  description: Currently provides a Sidekiq middleware that does 0/1 scaling of Heroku
@@ -117,13 +110,19 @@ extensions: []
117
110
  extra_rdoc_files: []
118
111
  files:
119
112
  - CHANGELOG.md
113
+ - Guardfile
120
114
  - README.md
115
+ - examples/complex.rb
116
+ - examples/simple.rb
117
+ - lib/autoscaler.rb
121
118
  - lib/autoscaler/binary_scaling_strategy.rb
122
119
  - lib/autoscaler/counter_cache_memory.rb
123
120
  - lib/autoscaler/counter_cache_redis.rb
124
121
  - lib/autoscaler/delayed_shutdown.rb
125
122
  - lib/autoscaler/heroku_scaler.rb
123
+ - lib/autoscaler/ignore_scheduled_and_retrying.rb
126
124
  - lib/autoscaler/linear_scaling_strategy.rb
125
+ - lib/autoscaler/sidekiq.rb
127
126
  - lib/autoscaler/sidekiq/activity.rb
128
127
  - lib/autoscaler/sidekiq/celluloid_monitor.rb
129
128
  - lib/autoscaler/sidekiq/client.rb
@@ -132,18 +131,14 @@ files:
132
131
  - lib/autoscaler/sidekiq/queue_system.rb
133
132
  - lib/autoscaler/sidekiq/sleep_wait_server.rb
134
133
  - lib/autoscaler/sidekiq/specified_queue_system.rb
135
- - lib/autoscaler/sidekiq.rb
136
134
  - lib/autoscaler/stub_scaler.rb
137
135
  - lib/autoscaler/version.rb
138
- - lib/autoscaler.rb
139
- - examples/complex.rb
140
- - examples/simple.rb
141
- - Guardfile
142
136
  - spec/autoscaler/binary_scaling_strategy_spec.rb
143
137
  - spec/autoscaler/counter_cache_memory_spec.rb
144
138
  - spec/autoscaler/counter_cache_redis_spec.rb
145
139
  - spec/autoscaler/delayed_shutdown_spec.rb
146
140
  - spec/autoscaler/heroku_scaler_spec.rb
141
+ - spec/autoscaler/ignore_scheduled_and_retrying_spec.rb
147
142
  - spec/autoscaler/linear_scaling_strategy_spec.rb
148
143
  - spec/autoscaler/sidekiq/activity_spec.rb
149
144
  - spec/autoscaler/sidekiq/celluloid_monitor_spec.rb
@@ -156,27 +151,26 @@ files:
156
151
  - spec/test_system.rb
157
152
  homepage: ''
158
153
  licenses: []
154
+ metadata: {}
159
155
  post_install_message:
160
156
  rdoc_options: []
161
157
  require_paths:
162
158
  - lib
163
159
  required_ruby_version: !ruby/object:Gem::Requirement
164
- none: false
165
160
  requirements:
166
- - - ! '>='
161
+ - - ">="
167
162
  - !ruby/object:Gem::Version
168
163
  version: '0'
169
164
  required_rubygems_version: !ruby/object:Gem::Requirement
170
- none: false
171
165
  requirements:
172
- - - ! '>='
166
+ - - ">="
173
167
  - !ruby/object:Gem::Version
174
168
  version: '0'
175
169
  requirements: []
176
170
  rubyforge_project: autoscaler
177
- rubygems_version: 1.8.25
171
+ rubygems_version: 2.2.0
178
172
  signing_key:
179
- specification_version: 3
173
+ specification_version: 4
180
174
  summary: Start/stop Sidekiq workers on Heroku
181
175
  test_files:
182
176
  - Guardfile
@@ -185,6 +179,7 @@ test_files:
185
179
  - spec/autoscaler/counter_cache_redis_spec.rb
186
180
  - spec/autoscaler/delayed_shutdown_spec.rb
187
181
  - spec/autoscaler/heroku_scaler_spec.rb
182
+ - spec/autoscaler/ignore_scheduled_and_retrying_spec.rb
188
183
  - spec/autoscaler/linear_scaling_strategy_spec.rb
189
184
  - spec/autoscaler/sidekiq/activity_spec.rb
190
185
  - spec/autoscaler/sidekiq/celluloid_monitor_spec.rb