dynamo-autoscale 0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +4 -0
- data/Gemfile +13 -0
- data/Gemfile.lock +58 -0
- data/LICENSE +21 -0
- data/README.md +400 -0
- data/Rakefile +9 -0
- data/aws.sample.yml +16 -0
- data/bin/dynamo-autoscale +131 -0
- data/config/environment/common.rb +114 -0
- data/config/environment/console.rb +2 -0
- data/config/environment/test.rb +3 -0
- data/config/logger.yml +11 -0
- data/config/services/aws.rb +20 -0
- data/config/services/logger.rb +35 -0
- data/data/.gitkeep +0 -0
- data/dynamo-autoscale.gemspec +29 -0
- data/lib/dynamo-autoscale/actioner.rb +265 -0
- data/lib/dynamo-autoscale/cw_poller.rb +49 -0
- data/lib/dynamo-autoscale/dispatcher.rb +39 -0
- data/lib/dynamo-autoscale/dynamo_actioner.rb +59 -0
- data/lib/dynamo-autoscale/ext/active_support/duration.rb +7 -0
- data/lib/dynamo-autoscale/local_actioner.rb +39 -0
- data/lib/dynamo-autoscale/local_data_poll.rb +51 -0
- data/lib/dynamo-autoscale/logger.rb +15 -0
- data/lib/dynamo-autoscale/metrics.rb +192 -0
- data/lib/dynamo-autoscale/poller.rb +41 -0
- data/lib/dynamo-autoscale/pretty_formatter.rb +27 -0
- data/lib/dynamo-autoscale/rule.rb +180 -0
- data/lib/dynamo-autoscale/rule_set.rb +69 -0
- data/lib/dynamo-autoscale/table_tracker.rb +329 -0
- data/lib/dynamo-autoscale/unit_cost.rb +41 -0
- data/lib/dynamo-autoscale/version.rb +3 -0
- data/lib/dynamo-autoscale.rb +1 -0
- data/rlib/dynamodb_graph.r +15 -0
- data/rlib/dynamodb_scatterplot.r +13 -0
- data/rulesets/default.rb +5 -0
- data/rulesets/erroneous.rb +1 -0
- data/rulesets/gradual_tail.rb +11 -0
- data/rulesets/none.rb +0 -0
- data/script/console +3 -0
- data/script/historic_data +46 -0
- data/script/hourly_wastage +40 -0
- data/script/monitor +55 -0
- data/script/simulator +40 -0
- data/script/test +52 -0
- data/script/validate_ruleset +20 -0
- data/spec/actioner_spec.rb +244 -0
- data/spec/rule_set_spec.rb +89 -0
- data/spec/rule_spec.rb +491 -0
- data/spec/spec_helper.rb +4 -0
- data/spec/table_tracker_spec.rb +256 -0
- metadata +178 -0
@@ -0,0 +1,114 @@
|
|
1
|
+
require 'logger'
|
2
|
+
require 'time'
|
3
|
+
require 'csv'
|
4
|
+
require 'tempfile'
|
5
|
+
require 'aws-sdk'
|
6
|
+
require 'active_support/all'
|
7
|
+
require 'rbtree'
|
8
|
+
require 'colored'
|
9
|
+
|
10
|
+
require_relative '../../lib/dynamo-autoscale/logger'
|
11
|
+
require_relative '../../lib/dynamo-autoscale/poller'
|
12
|
+
|
13
|
+
module DynamoAutoscale
|
14
|
+
include DynamoAutoscale::Logger
|
15
|
+
|
16
|
+
DEFAULT_AWS_REGION = 'us-east-1'
|
17
|
+
|
18
|
+
def self.root
|
19
|
+
File.expand_path(File.join(File.dirname(__FILE__), '..', '..'))
|
20
|
+
end
|
21
|
+
|
22
|
+
def self.data_dir
|
23
|
+
File.join(self.root, 'data')
|
24
|
+
end
|
25
|
+
|
26
|
+
def self.env
|
27
|
+
ENV['RACK_ENV'] || 'development'
|
28
|
+
end
|
29
|
+
|
30
|
+
def self.with_config name, opts ={}
|
31
|
+
path = nil
|
32
|
+
|
33
|
+
if opts[:absolute]
|
34
|
+
path = name
|
35
|
+
else
|
36
|
+
path = File.join(root, 'config', "#{name}.yml")
|
37
|
+
end
|
38
|
+
|
39
|
+
conf = YAML.load_file(path)[env]
|
40
|
+
yield conf if block_given?
|
41
|
+
conf
|
42
|
+
end
|
43
|
+
|
44
|
+
def self.require_all path
|
45
|
+
Dir[File.join(root, path, '*.rb')].each { |file| require file }
|
46
|
+
end
|
47
|
+
|
48
|
+
def self.dispatcher= new_dispatcher
|
49
|
+
@@dispatcher = new_dispatcher
|
50
|
+
end
|
51
|
+
|
52
|
+
def self.dispatcher
|
53
|
+
@@dispatcher ||= Dispatcher.new
|
54
|
+
end
|
55
|
+
|
56
|
+
def self.poller= new_poller
|
57
|
+
@@poller = new_poller
|
58
|
+
end
|
59
|
+
|
60
|
+
def self.poller
|
61
|
+
@@poller ||= LocalDataPoll.new
|
62
|
+
end
|
63
|
+
|
64
|
+
def self.actioner_class= klass
|
65
|
+
@@actioner_class = klass
|
66
|
+
end
|
67
|
+
|
68
|
+
def self.actioner_class
|
69
|
+
@@actioner_class ||= LocalActioner
|
70
|
+
end
|
71
|
+
|
72
|
+
def self.actioner_opts= new_opts
|
73
|
+
@@actioner_opts = new_opts
|
74
|
+
end
|
75
|
+
|
76
|
+
def self.actioner_opts
|
77
|
+
@@actioner_opts ||= {}
|
78
|
+
end
|
79
|
+
|
80
|
+
def self.actioners
|
81
|
+
@@actioners ||= Hash.new { |h, k| h[k] = actioner_class.new(k, actioner_opts) }
|
82
|
+
end
|
83
|
+
|
84
|
+
def self.tables= new_tables
|
85
|
+
@@tables = new_tables
|
86
|
+
end
|
87
|
+
|
88
|
+
def self.tables
|
89
|
+
@@tables ||= Hash.new { |h, k| h[k] = TableTracker.new(k) }
|
90
|
+
end
|
91
|
+
|
92
|
+
def self.current_table= new_current_table
|
93
|
+
@@current_table = new_current_table
|
94
|
+
end
|
95
|
+
|
96
|
+
def self.current_table
|
97
|
+
@@current_table
|
98
|
+
end
|
99
|
+
|
100
|
+
def self.rules= new_rules
|
101
|
+
@@rules = new_rules
|
102
|
+
end
|
103
|
+
|
104
|
+
def self.rules
|
105
|
+
@@rules ||= RuleSet.new
|
106
|
+
end
|
107
|
+
end
|
108
|
+
|
109
|
+
DynamoAutoscale.require_all 'lib/dynamo-autoscale'
|
110
|
+
DynamoAutoscale.require_all 'lib/dynamo-autoscale/ext/**'
|
111
|
+
|
112
|
+
Dir[File.join(DynamoAutoscale.root, 'config', 'services', '*.rb')].each do |path|
|
113
|
+
load path
|
114
|
+
end
|
data/config/logger.yml
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
config_location = nil
|
2
|
+
|
3
|
+
if File.exists? './aws.yml'
|
4
|
+
config_location = './aws.yml'
|
5
|
+
elsif ENV['AWS_CONFIG'] and File.exists? ENV['AWS_CONFIG']
|
6
|
+
config_location = ENV['AWS_CONFIG']
|
7
|
+
elsif File.exists?(File.join(DynamoAutoscale.root, 'config', 'aws.yml'))
|
8
|
+
config_location = File.join(DynamoAutoscale.root, 'config', 'aws.yml')
|
9
|
+
end
|
10
|
+
|
11
|
+
if config_location.nil?
|
12
|
+
STDERR.puts "Could not load AWS configuration. Searched in: ./aws.yml and " +
|
13
|
+
"ENV['AWS_CONFIG']"
|
14
|
+
|
15
|
+
exit 1
|
16
|
+
end
|
17
|
+
|
18
|
+
DynamoAutoscale.with_config(config_location, absolute: true) do |config|
|
19
|
+
AWS.config(config)
|
20
|
+
end
|
@@ -0,0 +1,35 @@
|
|
1
|
+
DynamoAutoscale.with_config 'logger' do |config|
|
2
|
+
if config[:sync]
|
3
|
+
STDOUT.sync = true
|
4
|
+
STDERR.sync = true
|
5
|
+
end
|
6
|
+
|
7
|
+
if config[:log_to]
|
8
|
+
STDOUT.reopen(config[:log_to])
|
9
|
+
STDERR.reopen(config[:log_to])
|
10
|
+
end
|
11
|
+
|
12
|
+
DynamoAutoscale::Logger.logger = ::Logger.new(STDOUT)
|
13
|
+
|
14
|
+
if ENV['PRETTY_LOG']
|
15
|
+
DynamoAutoscale::Logger.logger.formatter = DynamoAutoscale::PrettyFormatter.new
|
16
|
+
else
|
17
|
+
DynamoAutoscale::Logger.logger.formatter = Logger::Formatter.new
|
18
|
+
end
|
19
|
+
|
20
|
+
if ENV['DEBUG']
|
21
|
+
DynamoAutoscale::Logger.logger.level = ::Logger::DEBUG
|
22
|
+
elsif config[:level]
|
23
|
+
DynamoAutoscale::Logger.logger.level = ::Logger.const_get(config[:level])
|
24
|
+
end
|
25
|
+
|
26
|
+
if ENV['SILENT'] # or DynamoAutoscale.env == "test"
|
27
|
+
DynamoAutoscale::Logger.logger.level = ::Logger::FATAL
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
if ENV['DEBUG']
|
32
|
+
AWS.config({
|
33
|
+
logger: DynamoAutoscale::Logger.logger,
|
34
|
+
})
|
35
|
+
end
|
data/data/.gitkeep
ADDED
File without changes
|
@@ -0,0 +1,29 @@
|
|
1
|
+
require 'date'
|
2
|
+
require './lib/dynamo-autoscale/version'
|
3
|
+
|
4
|
+
Gem::Specification.new do |gem|
|
5
|
+
gem.name = 'dynamo-autoscale'
|
6
|
+
gem.version = DynamoAutoscale::VERSION
|
7
|
+
gem.date = Date.today.to_s
|
8
|
+
|
9
|
+
gem.summary = "Autoscaling for DynamoDB provisioned throughputs."
|
10
|
+
gem.description = "Will automatically monitor DynamoDB tables and scale them based on rules."
|
11
|
+
|
12
|
+
gem.authors = ['InvisibleHand']
|
13
|
+
gem.email = 'developers@getinvisiblehand.com'
|
14
|
+
gem.homepage = 'http://github.com/invisiblehand/dynamo-autoscale'
|
15
|
+
|
16
|
+
gem.bindir = ['bin']
|
17
|
+
gem.executables = ['dynamo-autoscale']
|
18
|
+
|
19
|
+
gem.license = 'MIT'
|
20
|
+
|
21
|
+
gem.add_dependency 'aws-sdk'
|
22
|
+
gem.add_dependency 'rbtree'
|
23
|
+
gem.add_dependency 'ruby-prof'
|
24
|
+
gem.add_dependency 'colored'
|
25
|
+
gem.add_dependency 'activesupport'
|
26
|
+
|
27
|
+
# ensure the gem is built out of versioned files
|
28
|
+
gem.files = `git ls-files -z`.split("\0")
|
29
|
+
end
|
@@ -0,0 +1,265 @@
|
|
1
|
+
module DynamoAutoscale
|
2
|
+
class Actioner
|
3
|
+
include DynamoAutoscale::Logger
|
4
|
+
|
5
|
+
def self.minimum_throughput
|
6
|
+
@minimum_throughput ||= 10
|
7
|
+
end
|
8
|
+
|
9
|
+
def self.minimum_throughput= new_minimum_throughput
|
10
|
+
@minimum_throughput = new_minimum_throughput
|
11
|
+
end
|
12
|
+
|
13
|
+
def self.maximum_throughput
|
14
|
+
@maximum_throughput ||= 20000
|
15
|
+
end
|
16
|
+
|
17
|
+
def self.maximum_throughput= new_maximum_throughput
|
18
|
+
@maximum_throughput = new_maximum_throughput
|
19
|
+
end
|
20
|
+
|
21
|
+
attr_accessor :table, :upscales, :downscales
|
22
|
+
|
23
|
+
def initialize table, opts = {}
|
24
|
+
@table = table
|
25
|
+
@downscales = 0
|
26
|
+
@upscales = 0
|
27
|
+
@provisioned = { reads: RBTree.new, writes: RBTree.new }
|
28
|
+
@pending = { reads: nil, writes: nil }
|
29
|
+
@last_action = Time.now.utc
|
30
|
+
@last_scale_check = Time.now.utc
|
31
|
+
@downscale_warn = false
|
32
|
+
@opts = opts
|
33
|
+
end
|
34
|
+
|
35
|
+
def provisioned_for metric
|
36
|
+
@provisioned[normalize_metric(metric)]
|
37
|
+
end
|
38
|
+
|
39
|
+
def provisioned_writes
|
40
|
+
@provisioned[:writes]
|
41
|
+
end
|
42
|
+
|
43
|
+
def provisioned_reads
|
44
|
+
@provisioned[:reads]
|
45
|
+
end
|
46
|
+
|
47
|
+
def check_day_reset!
|
48
|
+
now = Time.now.utc
|
49
|
+
|
50
|
+
if now >= (check = (@last_scale_check + 1.day).midnight)
|
51
|
+
logger.info "[scales] New day! Reset scaling counts back to 0."
|
52
|
+
logger.debug "[scales] now: #{now}, comp: #{check}"
|
53
|
+
|
54
|
+
if @downscales < 4
|
55
|
+
logger.warn "[scales] Unused downscales. Used: #{@downscales}"
|
56
|
+
end
|
57
|
+
|
58
|
+
@upscales = 0
|
59
|
+
@downscales = 0
|
60
|
+
@downscale_warn = false
|
61
|
+
end
|
62
|
+
|
63
|
+
@last_scale_check = now
|
64
|
+
end
|
65
|
+
|
66
|
+
def upscales
|
67
|
+
check_day_reset!
|
68
|
+
@upscales
|
69
|
+
end
|
70
|
+
|
71
|
+
def downscales new_val = nil
|
72
|
+
check_day_reset!
|
73
|
+
@downscales
|
74
|
+
end
|
75
|
+
|
76
|
+
def set metric, to
|
77
|
+
check_day_reset!
|
78
|
+
|
79
|
+
metric = normalize_metric(metric)
|
80
|
+
ptime, _ = provisioned_for(metric).last
|
81
|
+
|
82
|
+
if ptime and ptime > 2.minutes.ago
|
83
|
+
logger.warn "[actioner] Attempted to scale the same metric more than " +
|
84
|
+
"once in a 2 minute window. Disallowing."
|
85
|
+
return false
|
86
|
+
end
|
87
|
+
|
88
|
+
from = table.last_provisioned_for(metric)
|
89
|
+
|
90
|
+
if from and to > (from * 2)
|
91
|
+
to = from * 2
|
92
|
+
|
93
|
+
logger.warn "[#{metric}] Attempted to scale up " +
|
94
|
+
"more than allowed. Capped scale to #{to}."
|
95
|
+
end
|
96
|
+
|
97
|
+
if to < Actioner.minimum_throughput
|
98
|
+
to = Actioner.minimum_throughput
|
99
|
+
|
100
|
+
logger.warn "[#{metric}] Attempted to scale down to " +
|
101
|
+
"less than minimum throughput. Capped scale to #{to}."
|
102
|
+
end
|
103
|
+
|
104
|
+
if to > Actioner.maximum_throughput
|
105
|
+
to = Actioner.maximum_throughput
|
106
|
+
|
107
|
+
logger.warn "[#{metric}] Attempted to scale up to " +
|
108
|
+
"greater than maximum throughput. Capped scale to #{to}."
|
109
|
+
end
|
110
|
+
|
111
|
+
if from and from == to
|
112
|
+
logger.info "[#{metric}] Attempted to scale to same value. Ignoring..."
|
113
|
+
return false
|
114
|
+
end
|
115
|
+
|
116
|
+
if from and from > to
|
117
|
+
downscale metric, from, to
|
118
|
+
else
|
119
|
+
upscale metric, from, to
|
120
|
+
end
|
121
|
+
end
|
122
|
+
|
123
|
+
def upscale metric, from, to
|
124
|
+
logger.info "[#{metric}][scaling up] " +
|
125
|
+
"#{from ? from.round(2) : "Unknown"} -> #{to.round(2)}"
|
126
|
+
|
127
|
+
|
128
|
+
# Because upscales are not limited, we don't need to queue this operation.
|
129
|
+
if result = scale(metric, to)
|
130
|
+
@provisioned[metric][Time.now.utc] = to
|
131
|
+
@upscales += 1
|
132
|
+
end
|
133
|
+
|
134
|
+
return result
|
135
|
+
end
|
136
|
+
|
137
|
+
def downscale metric, from, to
|
138
|
+
if @downscales >= 4
|
139
|
+
unless @downscale_warn
|
140
|
+
@downscale_warn = true
|
141
|
+
logger.warn "[#{metric.to_s.ljust(6)}][scaling failed]" +
|
142
|
+
" Hit upper limit of downward scales per day."
|
143
|
+
end
|
144
|
+
|
145
|
+
return false
|
146
|
+
end
|
147
|
+
|
148
|
+
if @pending[metric]
|
149
|
+
previous_pending = @pending[metric].last
|
150
|
+
logger.info "[#{metric}][scaling down] " +
|
151
|
+
"#{previous_pending} -> #{to.round(2)} (overwritten pending)"
|
152
|
+
else
|
153
|
+
logger.info "[#{metric}][scaling down] " +
|
154
|
+
"#{from ? from.round(2) : "Unknown"} -> #{to.round(2)}"
|
155
|
+
end
|
156
|
+
|
157
|
+
queue_operation! metric, to
|
158
|
+
end
|
159
|
+
|
160
|
+
def queue_operation! metric, value
|
161
|
+
if @pending[metric]
|
162
|
+
logger.debug "[#{metric}] Overwriting pending op with #{value.round(2)}"
|
163
|
+
end
|
164
|
+
|
165
|
+
@pending[metric] = [Time.now.utc, value]
|
166
|
+
|
167
|
+
try_flush!
|
168
|
+
end
|
169
|
+
|
170
|
+
def try_flush!
|
171
|
+
if should_flush?
|
172
|
+
if flush_operations!
|
173
|
+
@downscales += 1
|
174
|
+
@last_action = Time.now.utc
|
175
|
+
return true
|
176
|
+
else
|
177
|
+
return false
|
178
|
+
end
|
179
|
+
else
|
180
|
+
return false
|
181
|
+
end
|
182
|
+
end
|
183
|
+
|
184
|
+
def flush_operations!
|
185
|
+
result = nil
|
186
|
+
|
187
|
+
if @pending[:writes] and @pending[:reads]
|
188
|
+
_, wvalue = @pending[:writes]
|
189
|
+
_, rvalue = @pending[:reads]
|
190
|
+
|
191
|
+
if result = scale_both(rvalue, wvalue)
|
192
|
+
@provisioned[:writes][Time.now.utc] = wvalue
|
193
|
+
@provisioned[:reads][Time.now.utc] = rvalue
|
194
|
+
|
195
|
+
@pending[:writes] = nil
|
196
|
+
@pending[:reads] = nil
|
197
|
+
end
|
198
|
+
elsif @pending[:writes]
|
199
|
+
time, value = @pending[:writes]
|
200
|
+
|
201
|
+
if result = scale(:writes, value)
|
202
|
+
@provisioned[:writes][Time.now.utc] = value
|
203
|
+
|
204
|
+
@pending[:writes] = nil
|
205
|
+
end
|
206
|
+
elsif @pending[:reads]
|
207
|
+
time, value = @pending[:reads]
|
208
|
+
|
209
|
+
if result = scale(:reads, value)
|
210
|
+
@provisioned[:reads][Time.now.utc] = value
|
211
|
+
@pending[:reads] = nil
|
212
|
+
end
|
213
|
+
end
|
214
|
+
|
215
|
+
logger.info "[flush] All pending downscales have been flushed."
|
216
|
+
return result
|
217
|
+
end
|
218
|
+
|
219
|
+
def should_flush?
|
220
|
+
if @opts[:group_downscales].nil?
|
221
|
+
logger.info "[flush] Downscales are not being grouped. Should flush."
|
222
|
+
return true
|
223
|
+
end
|
224
|
+
|
225
|
+
if @pending[:reads] and @pending[:writes]
|
226
|
+
logger.info "[flush] Both a read and a write are pending. Should flush."
|
227
|
+
return true
|
228
|
+
end
|
229
|
+
|
230
|
+
now = Time.now.utc
|
231
|
+
|
232
|
+
# I know what you're thinking. How would the last action ever be in the
|
233
|
+
# future? Locally, we use Timecop to fake out the time. Unfortunately it
|
234
|
+
# doesn't kick in until after the first data point, so when this object is
|
235
|
+
# created the @last_action is set to Time.now.utc, then the time gets
|
236
|
+
# rolled back, causing the last action to be in the future. This hack
|
237
|
+
# fixes that.
|
238
|
+
@last_action = now if @last_action > now
|
239
|
+
|
240
|
+
if (@opts[:flush_after] and @last_action and
|
241
|
+
(now > @last_action + @opts[:flush_after]))
|
242
|
+
|
243
|
+
logger.info "[flush] Flush timeout of #{@opts[:flush_after]} reached."
|
244
|
+
return true
|
245
|
+
end
|
246
|
+
|
247
|
+
logger.info "[flush] Flushing conditions not met. Pending operations: " +
|
248
|
+
"#{@pending[:reads] ? "1 read" : "no reads"}, " +
|
249
|
+
"#{@pending[:writes] ? "1 write" : "no writes"}"
|
250
|
+
|
251
|
+
return false
|
252
|
+
end
|
253
|
+
|
254
|
+
private
|
255
|
+
|
256
|
+
def normalize_metric metric
|
257
|
+
case metric
|
258
|
+
when :reads, :provisioned_reads, :consumed_reads
|
259
|
+
:reads
|
260
|
+
when :writes, :provisioned_writes, :consumed_writes
|
261
|
+
:writes
|
262
|
+
end
|
263
|
+
end
|
264
|
+
end
|
265
|
+
end
|
@@ -0,0 +1,49 @@
|
|
1
|
+
module DynamoAutoscale
|
2
|
+
class CWPoller < Poller
|
3
|
+
INTERVAL = 1.minute
|
4
|
+
|
5
|
+
def poll tables, &block
|
6
|
+
if tables.nil?
|
7
|
+
tables = AWS::DynamoDB.new.tables.to_a.map(&:name)
|
8
|
+
end
|
9
|
+
|
10
|
+
loop do
|
11
|
+
# Sleep until the next interval occurrs. This calculation ensures that
|
12
|
+
# polling always happens on interval boundaries regardless of how long
|
13
|
+
# polling takes.
|
14
|
+
sleep_duration = INTERVAL - ((Time.now.to_i + INTERVAL) % INTERVAL)
|
15
|
+
logger.debug "Sleeping for #{sleep_duration} seconds..."
|
16
|
+
sleep(sleep_duration)
|
17
|
+
|
18
|
+
do_poll(tables, &block)
|
19
|
+
end
|
20
|
+
end
|
21
|
+
|
22
|
+
def do_poll tables, &block
|
23
|
+
logger.debug "Beginning CloudWatch poll..."
|
24
|
+
now = Time.now
|
25
|
+
|
26
|
+
tables.each do |table|
|
27
|
+
# This code will dispatch a message to the listening table that looks
|
28
|
+
# like this:
|
29
|
+
#
|
30
|
+
# {
|
31
|
+
# :consumed_reads=>{
|
32
|
+
# 2013-06-19 12:22:00 UTC=>2.343117697349672
|
33
|
+
# },
|
34
|
+
# :consumed_writes=>{
|
35
|
+
# 2013-06-19 12:22:00 UTC=>3.0288461538461537
|
36
|
+
# }
|
37
|
+
# }
|
38
|
+
#
|
39
|
+
# There may also be :provisioned_reads and :provisioned_writes
|
40
|
+
# depending on how the CloudWatch API feels.
|
41
|
+
block.call(table, Metrics.all_metrics(table, {
|
42
|
+
period: 5.minutes,
|
43
|
+
start_time: now - 20.minutes,
|
44
|
+
end_time: now,
|
45
|
+
}))
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
@@ -0,0 +1,39 @@
|
|
1
|
+
module DynamoAutoscale
|
2
|
+
class Dispatcher
|
3
|
+
def initialize
|
4
|
+
@last_check = {}
|
5
|
+
end
|
6
|
+
|
7
|
+
def dispatch table, time, datum, &block
|
8
|
+
DynamoAutoscale.current_table = table
|
9
|
+
logger.debug "#{time}: Dispatching to #{table.name} with data: #{datum}"
|
10
|
+
|
11
|
+
if datum[:provisioned_reads] and (datum[:consumed_reads] > datum[:provisioned_reads])
|
12
|
+
lost_reads = datum[:consumed_reads] - datum[:provisioned_reads]
|
13
|
+
|
14
|
+
logger.warn "[reads] Lost units: #{lost_reads} " +
|
15
|
+
"(#{datum[:consumed_reads]} - #{datum[:provisioned_reads]})"
|
16
|
+
end
|
17
|
+
|
18
|
+
if datum[:provisioned_writes] and (datum[:consumed_writes] > datum[:provisioned_writes])
|
19
|
+
lost_writes = datum[:consumed_writes] - datum[:provisioned_writes]
|
20
|
+
|
21
|
+
logger.warn "[writes] Lost units: #{lost_writes} " +
|
22
|
+
"(#{datum[:consumed_writes]} - #{datum[:provisioned_writes]})"
|
23
|
+
end
|
24
|
+
|
25
|
+
table.tick(time, datum)
|
26
|
+
block.call(table, time, datum) if block
|
27
|
+
|
28
|
+
if @last_check[table.name].nil? or @last_check[table.name] < time
|
29
|
+
DynamoAutoscale.rules.test(table)
|
30
|
+
@last_check[table.name] = time
|
31
|
+
else
|
32
|
+
logger.debug "#{table.name}: Skipped rule check, already checked for " +
|
33
|
+
"a later data point."
|
34
|
+
end
|
35
|
+
|
36
|
+
DynamoAutoscale.current_table = nil
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
@@ -0,0 +1,59 @@
|
|
1
|
+
module DynamoAutoscale
|
2
|
+
class DynamoActioner < Actioner
|
3
|
+
def scale metric, value
|
4
|
+
aws_throughput_key = case metric
|
5
|
+
when :reads
|
6
|
+
:read_capacity_units
|
7
|
+
when :writes
|
8
|
+
:write_capacity_units
|
9
|
+
end
|
10
|
+
|
11
|
+
dynamo_scale(table, aws_throughput_key => metric)
|
12
|
+
end
|
13
|
+
|
14
|
+
def scale_both reads, writes
|
15
|
+
dynamo_scale(table, {
|
16
|
+
read_capacity_units: reads, write_capacity_units: writes
|
17
|
+
})
|
18
|
+
end
|
19
|
+
|
20
|
+
private
|
21
|
+
|
22
|
+
def self.dynamo_scale opts
|
23
|
+
aws_table = AWS::DynamoDB.new.tables[table.name]
|
24
|
+
|
25
|
+
if aws_table.status == :updating
|
26
|
+
logger.warn "Cannot scale throughputs. Table is updating."
|
27
|
+
return false
|
28
|
+
end
|
29
|
+
|
30
|
+
aws_table.provision_throughput(opts)
|
31
|
+
return true
|
32
|
+
rescue AWS::DynamoDB::Errors::ValidationException => e
|
33
|
+
# When you try to set throughput to a negative value or the same value it
|
34
|
+
# was previously you get this.
|
35
|
+
logger.warn "[#{e.class}] #{e.message}"
|
36
|
+
return false
|
37
|
+
rescue AWS::DynamoDB::Errors::ResourceInUseException => e
|
38
|
+
# When you try to update a table that is being updated you get this.
|
39
|
+
logger.warn "[#{e.class}] #{e.message}"
|
40
|
+
return false
|
41
|
+
rescue AWS::DynamoDB::Errors::LimitExceededException => e
|
42
|
+
# When you try to increase throughput greater than 2x or you try to
|
43
|
+
# decrease more than 4 times per day you get this.
|
44
|
+
|
45
|
+
aws_description = self.describe_table(table)
|
46
|
+
decreases_today = aws_description[:provisioned_throughput][:number_of_decreases_today]
|
47
|
+
|
48
|
+
downscales(table, decreases_today)
|
49
|
+
logger.warn "[#{e.class}] #{e.message}"
|
50
|
+
return false
|
51
|
+
end
|
52
|
+
|
53
|
+
def self.describe_table
|
54
|
+
data = AWS::DynamoDB::ClientV2.new.describe_table(table_name: table.name)
|
55
|
+
|
56
|
+
data[:table]
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
@@ -0,0 +1,39 @@
|
|
1
|
+
module DynamoAutoscale
|
2
|
+
class LocalActioner < Actioner
|
3
|
+
# Dummy scaling method.
|
4
|
+
def scale metric, value
|
5
|
+
return true
|
6
|
+
end
|
7
|
+
|
8
|
+
def scale_both reads, writes
|
9
|
+
return true
|
10
|
+
end
|
11
|
+
|
12
|
+
# These filters use the arrays inside the local actioner to fake the
|
13
|
+
# provisioned reads and writes when the local data enters the system. It
|
14
|
+
# makes it look like we're actually modifying the provisioned numbers.
|
15
|
+
def self.faux_provisioning_filters
|
16
|
+
[Proc.new do |table, time, datum|
|
17
|
+
actioner = DynamoAutoscale.actioners[table]
|
18
|
+
|
19
|
+
actioner.provisioned_reads.reverse_each do |rtime, reads|
|
20
|
+
logger.debug "Checking if #{time} > #{rtime}"
|
21
|
+
if time > rtime
|
22
|
+
logger.debug "[filter] Faked provisioned_reads to be #{reads} at #{time}"
|
23
|
+
datum[:provisioned_reads] = reads
|
24
|
+
break
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
actioner.provisioned_writes.reverse_each do |wtime, writes|
|
29
|
+
logger.debug "Checking if #{time} > #{wtime}"
|
30
|
+
if time > wtime
|
31
|
+
logger.debug "[filter] Faked provisioned_writes to be #{writes} at #{time}"
|
32
|
+
datum[:provisioned_writes] = writes
|
33
|
+
break
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end]
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|