autoscaler 0.5.0 → 0.6.0

Sign up to get free protection for your applications and to get access to all the features.
data/CHANGELOG.md CHANGED
@@ -1,5 +1,10 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.6.0
4
+
5
+ - Excon errors from the Heroku API are caught be default. See `HerokuScaler#exception_handler` to override behavior
6
+ - Client side scaling occurs after enquing the job, previously it was before.
7
+
3
8
  ## 0.5.0
4
9
 
5
10
  - Experimental: `Client#set_initial_workers` to start workers on main process startup; typically:
data/README.md CHANGED
@@ -41,11 +41,16 @@ Install the middleware in your `Sidekiq.configure_` blocks
41
41
  - The scale-down monitor is triggered on job completion (and server middleware is only run around jobs), so if the server nevers processes any jobs, it won't turn off.
42
42
  - The retry and schedule lists are considered - if you schedule a long-running task, the process will not scale-down.
43
43
  - If background jobs trigger jobs in other scaled processes, please note you'll need `config.client_middleware` in your `Sidekiq.configure_server` block in order to scale-up.
44
+ - Exceptions while calling the Heroku API are caught and printed by default. See `HerokuScaler#exception_handler` to override
44
45
 
45
46
  ## Experimental
46
47
 
47
48
  You can pass a scaling strategy object instead of the timeout to the server middleware. The object (or lambda) should respond to `#call(system, idle_time)` and return the desired number of workers. See `lib/autoscaler/binary_scaling_strategy.rb` for an example.
48
49
 
50
+ `Client#set_initial_workers` to start workers on main process startup; typically:
51
+
52
+ Autoscaler::Sidekiq::Client.add_to_chain(chain, 'default' => heroku).set_initial_workers
53
+
49
54
  ## Tests
50
55
 
51
56
  The project is setup to run RSpec with Guard. It expects a redis instance on a custom port, which is started by the Guardfile.
data/examples/simple.rb CHANGED
@@ -5,6 +5,7 @@ require 'autoscaler/heroku_scaler'
5
5
  heroku = nil
6
6
  if ENV['HEROKU_APP']
7
7
  heroku = Autoscaler::HerokuScaler.new
8
+ #heroku.exception_handler = lambda {|exception| MyApp.logger.error(exception)}
8
9
  end
9
10
 
10
11
  Sidekiq.configure_client do |config|
@@ -2,7 +2,7 @@ module Autoscaler
2
2
  # Strategies determine the target number of workers
3
3
  # The default strategy has a single worker when there is anything, or shuts it down.
4
4
  class BinaryScalingStrategy
5
- #@params [integer] active_workers number of workers when in the active state.
5
+ #@param [integer] active_workers number of workers when in the active state.
6
6
  def initialize(active_workers = 1)
7
7
  @active_workers = active_workers
8
8
  end
@@ -26,7 +26,7 @@ module Autoscaler
26
26
  if known?
27
27
  @workers
28
28
  else
29
- know client.get_ps(app).body.count {|ps| ps['process'].match /#{type}\.\d?/ }
29
+ know heroku_get_workers
30
30
  end
31
31
  end
32
32
 
@@ -35,11 +35,23 @@ module Autoscaler
35
35
  def workers=(n)
36
36
  if n != @workers || !known?
37
37
  p "Scaling #{type} to #{n}"
38
- client.post_ps_scale(app, type, n)
38
+ heroku_set_workers(n)
39
39
  know n
40
40
  end
41
41
  end
42
42
 
43
+ # Callable object which responds to exceptions during api calls
44
+ #
45
+ # @example
46
+ # heroku.exception_handler = lambda {|exception| MyApp.logger.error(exception)}
47
+ # heroku.exception_handler = lambda {|exception| raise}
48
+ # # default
49
+ # lambda {|exception|
50
+ # p exception
51
+ # puts exception.backtrace
52
+ # }
53
+ attr_writer :exception_handler
54
+
43
55
  private
44
56
  attr_reader :client
45
57
 
@@ -51,5 +63,25 @@ module Autoscaler
51
63
  def known?
52
64
  Time.now < @known
53
65
  end
66
+
67
+ def heroku_get_workers
68
+ client.get_ps(app).body.count {|ps| ps['process'].match /#{type}\.\d?/ }
69
+ rescue Excon::Errors::Error => e
70
+ exception_handler.call(e)
71
+ @workers
72
+ end
73
+
74
+ def heroku_set_workers(n)
75
+ client.post_ps_scale(app, type, n)
76
+ rescue Excon::Errors::Error => e
77
+ exception_handler.call(e)
78
+ end
79
+
80
+ def exception_handler
81
+ @exception_handler ||= lambda {|exception|
82
+ p exception
83
+ puts exception.backtrace
84
+ }
85
+ end
54
86
  end
55
87
  end
@@ -14,12 +14,14 @@ module Autoscaler
14
14
 
15
15
  # Sidekiq middleware api method
16
16
  def call(worker_class, item, queue)
17
+ result = yield
18
+
17
19
  scaler = @scalers[queue]
18
20
  if scaler && scaler.workers < 1
19
21
  scaler.workers = 1
20
22
  end
21
23
 
22
- yield
24
+ result
23
25
  end
24
26
 
25
27
  # Check for interrupted or scheduled work on startup.
@@ -1,4 +1,4 @@
1
1
  module Autoscaler
2
2
  # version number
3
- VERSION = "0.5.0"
3
+ VERSION = "0.6.0"
4
4
  end
@@ -17,4 +17,27 @@ describe Autoscaler::HerokuScaler, :online => true do
17
17
 
18
18
  its(:workers) {should == 1}
19
19
  end
20
+
21
+ describe 'exception handling', :focus => true do
22
+ before do
23
+ def client.client
24
+ raise Excon::Errors::SocketError.new(Exception.new('oops'))
25
+ end
26
+ end
27
+
28
+ describe "default handler" do
29
+ it {expect{client.workers}.to_not raise_error}
30
+ it {client.workers.should == 0}
31
+ it {expect{client.workers = 1}.to_not raise_error}
32
+ end
33
+
34
+ describe "custom handler" do
35
+ before do
36
+ @caught = false
37
+ client.exception_handler = lambda {|exception| @caught = true}
38
+ end
39
+
40
+ it {client.workers; @caught.should be_true}
41
+ end
42
+ end
20
43
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: autoscaler
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.5.0
4
+ version: 0.6.0
5
5
  prerelease:
6
6
  platform: ruby
7
7
  authors:
@@ -10,7 +10,7 @@ authors:
10
10
  autorequire:
11
11
  bindir: bin
12
12
  cert_chain: []
13
- date: 2013-08-21 00:00:00.000000000 Z
13
+ date: 2013-09-05 00:00:00.000000000 Z
14
14
  dependencies:
15
15
  - !ruby/object:Gem::Dependency
16
16
  name: sidekiq