dynamo-autoscale 0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. data/.gitignore +4 -0
  2. data/Gemfile +13 -0
  3. data/Gemfile.lock +58 -0
  4. data/LICENSE +21 -0
  5. data/README.md +400 -0
  6. data/Rakefile +9 -0
  7. data/aws.sample.yml +16 -0
  8. data/bin/dynamo-autoscale +131 -0
  9. data/config/environment/common.rb +114 -0
  10. data/config/environment/console.rb +2 -0
  11. data/config/environment/test.rb +3 -0
  12. data/config/logger.yml +11 -0
  13. data/config/services/aws.rb +20 -0
  14. data/config/services/logger.rb +35 -0
  15. data/data/.gitkeep +0 -0
  16. data/dynamo-autoscale.gemspec +29 -0
  17. data/lib/dynamo-autoscale/actioner.rb +265 -0
  18. data/lib/dynamo-autoscale/cw_poller.rb +49 -0
  19. data/lib/dynamo-autoscale/dispatcher.rb +39 -0
  20. data/lib/dynamo-autoscale/dynamo_actioner.rb +59 -0
  21. data/lib/dynamo-autoscale/ext/active_support/duration.rb +7 -0
  22. data/lib/dynamo-autoscale/local_actioner.rb +39 -0
  23. data/lib/dynamo-autoscale/local_data_poll.rb +51 -0
  24. data/lib/dynamo-autoscale/logger.rb +15 -0
  25. data/lib/dynamo-autoscale/metrics.rb +192 -0
  26. data/lib/dynamo-autoscale/poller.rb +41 -0
  27. data/lib/dynamo-autoscale/pretty_formatter.rb +27 -0
  28. data/lib/dynamo-autoscale/rule.rb +180 -0
  29. data/lib/dynamo-autoscale/rule_set.rb +69 -0
  30. data/lib/dynamo-autoscale/table_tracker.rb +329 -0
  31. data/lib/dynamo-autoscale/unit_cost.rb +41 -0
  32. data/lib/dynamo-autoscale/version.rb +3 -0
  33. data/lib/dynamo-autoscale.rb +1 -0
  34. data/rlib/dynamodb_graph.r +15 -0
  35. data/rlib/dynamodb_scatterplot.r +13 -0
  36. data/rulesets/default.rb +5 -0
  37. data/rulesets/erroneous.rb +1 -0
  38. data/rulesets/gradual_tail.rb +11 -0
  39. data/rulesets/none.rb +0 -0
  40. data/script/console +3 -0
  41. data/script/historic_data +46 -0
  42. data/script/hourly_wastage +40 -0
  43. data/script/monitor +55 -0
  44. data/script/simulator +40 -0
  45. data/script/test +52 -0
  46. data/script/validate_ruleset +20 -0
  47. data/spec/actioner_spec.rb +244 -0
  48. data/spec/rule_set_spec.rb +89 -0
  49. data/spec/rule_spec.rb +491 -0
  50. data/spec/spec_helper.rb +4 -0
  51. data/spec/table_tracker_spec.rb +256 -0
  52. metadata +178 -0
@@ -0,0 +1,329 @@
1
+ module DynamoAutoscale
2
+ class TableTracker
3
+ include DynamoAutoscale::Logger
4
+
5
+ # TODO: This time window may need changing.
6
+ TIME_WINDOW = 7.days
7
+
8
+ attr_reader :name, :data
9
+
10
+ def initialize name
11
+ @name = name
12
+ clear_data
13
+ end
14
+
15
+ def clear_data
16
+ @data = RBTree.new
17
+ end
18
+
19
+ # `tick` takes two arguments. The first is a Time object, the second is
20
+ # a hash. The tick method expects data in the following format for the
21
+ # second argument:
22
+ #
23
+ # {:provisioned_writes=>600.0,
24
+ # :provisioned_reads=>800.0,
25
+ # :consumed_writes=>52.693333333333335,
26
+ # :consumed_reads=>342.4033333333333}
27
+ def tick time, datum
28
+ if time < (Time.now.utc - TIME_WINDOW)
29
+ logger.warn "Attempted to insert data outside of the time window."
30
+ return
31
+ end
32
+
33
+ # Sometimes there are gaps in the data pertaining to provisioned
34
+ # amounts. These two conditional blocks fill in those gaps.
35
+ if datum[:provisioned_writes].nil?
36
+ datum[:provisioned_writes] = last_provisioned_for :writes, at: time
37
+
38
+ if datum[:provisioned_writes]
39
+ logger.debug "Filled in gap in provisioned writes."
40
+ end
41
+ end
42
+ if datum[:provisioned_reads].nil?
43
+ datum[:provisioned_reads] = last_provisioned_for :reads, at: time
44
+
45
+ if datum[:provisioned_reads]
46
+ logger.debug "Filled in gap in provisioned reads."
47
+ end
48
+ end
49
+
50
+ @data[time] = datum
51
+
52
+ # The code below here just makes sure that we're trimming data points that
53
+ # are outside of the time window.
54
+ logger.debug "Pruning data that may be outside of time window..."
55
+ now = Time.now.utc
56
+ to_delete = @data.each.take_while { |key, _| key < (now - TIME_WINDOW) }
57
+ to_delete.each { |key, _| @data.delete(key) }
58
+ end
59
+
60
+ # Gets the last amount of provisioned throughput for whatever metric you
61
+ # pass in. Example:
62
+ #
63
+ # table.last_provisioned_for :writes
64
+ # #=> 600.0
65
+ def last_provisioned_for metric, opts = {}
66
+ key = case metric
67
+ when :reads, :provisioned_reads, :consumed_reads
68
+ :provisioned_reads
69
+ when :writes, :provisioned_writes, :consumed_writes
70
+ :provisioned_writes
71
+ end
72
+
73
+ @data.reverse_each do |time, datum|
74
+ if opts[:at].nil? or time <= opts[:at]
75
+ return datum[key] if datum[key]
76
+ end
77
+ end
78
+
79
+ return nil
80
+ end
81
+
82
+ # Gets the last amount of consumed throughput for whatever metric you
83
+ # pass in. Example:
84
+ #
85
+ # table.last_consumed_for :writes
86
+ # #=> 54.3456
87
+ def last_consumed_for metric, opts = {}
88
+ key = case metric
89
+ when :reads, :provisioned_reads, :consumed_reads
90
+ :consumed_reads
91
+ when :writes, :provisioned_writes, :consumed_writes
92
+ :consumed_writes
93
+ end
94
+
95
+ @data.reverse_each do |time, datum|
96
+ if opts[:at].nil? or time <= opts[:at]
97
+ return datum[key] if datum[key]
98
+ end
99
+ end
100
+
101
+ return nil
102
+ end
103
+
104
+ # Useful method for querying the last N points, or the last points in a
105
+ # time range. For example:
106
+ #
107
+ # table.last 5. :consumed_writes
108
+ # #=> [ array of last 5 data points ]
109
+ #
110
+ # table.last 5.minutes, :provisioned_reads
111
+ # #=> [ array containing the last 5 minutes of provisioned read data ]
112
+ #
113
+ # If there are no points present, or no points in your time range, the
114
+ # return value will be an empty array.
115
+ def last value, metric
116
+ if value.is_a? ActiveSupport::Duration
117
+ value = value.to_i
118
+ to_return = []
119
+ now = Time.now.to_i
120
+
121
+ @data.reverse_each do |time, datum|
122
+ value -= now - time.to_i
123
+ now = time.to_i
124
+ break if value < 0
125
+
126
+ to_return << datum[metric]
127
+ end
128
+
129
+ to_return
130
+ else
131
+ @data.reverse_each.take(value).map { |time, datum| datum[metric] }
132
+ end
133
+ end
134
+
135
+ # Calculate how many read units have been wasted in the current set of
136
+ # tracked data.
137
+ #
138
+ # table.wasted_read_units
139
+ # #=> 244.4
140
+ def wasted_read_units
141
+ @data.inject(0) do |memo, (_, datum)|
142
+ # if datum[:provisioned_reads] and datum[:consumed_reads]
143
+ memo += datum[:provisioned_reads] - datum[:consumed_reads]
144
+ # end
145
+
146
+ memo
147
+ end
148
+ end
149
+
150
+ # Calculate how many write units have been wasted in the current set of
151
+ # tracked data.
152
+ #
153
+ # table.wasted_write_units
154
+ # #=> 566.3
155
+ def wasted_write_units
156
+ @data.inject(0) do |memo, (_, datum)|
157
+ # if datum[:provisioned_writes] and datum[:consumed_writes]
158
+ memo += datum[:provisioned_writes] - datum[:consumed_writes]
159
+ # end
160
+
161
+ memo
162
+ end
163
+ end
164
+
165
+ # Whenever the consumed units goes above the provisioned, we refer to the
166
+ # overflow as "lost" units.
167
+ def lost_read_units
168
+ @data.inject(0) do |memo, (_, datum)|
169
+ if datum[:consumed_reads] > datum[:provisioned_reads]
170
+ memo += datum[:consumed_reads] - datum[:provisioned_reads]
171
+ end
172
+
173
+ memo
174
+ end
175
+ end
176
+
177
+ # Whenever the consumed units goes above the provisioned, we refer to the
178
+ # overflow as "lost" units.
179
+ def lost_write_units
180
+ @data.inject(0) do |memo, (_, datum)|
181
+ if datum[:consumed_writes] > datum[:provisioned_writes]
182
+ memo += datum[:consumed_writes] - datum[:provisioned_writes]
183
+ end
184
+
185
+ memo
186
+ end
187
+ end
188
+
189
+ def total_read_units
190
+ @data.inject(0) do |memo, (_, datum)|
191
+ memo += datum[:provisioned_reads] if datum[:provisioned_reads]
192
+ memo
193
+ end
194
+ end
195
+
196
+ def total_write_units
197
+ @data.inject(0) do |memo, (_, datum)|
198
+ memo += datum[:provisioned_writes] if datum[:provisioned_writes]
199
+ memo
200
+ end
201
+ end
202
+
203
+ def wasted_read_percent
204
+ (wasted_read_units / total_read_units) * 100.0
205
+ end
206
+
207
+ def wasted_write_percent
208
+ (wasted_write_units / total_write_units) * 100.0
209
+ end
210
+
211
+ def lost_write_percent
212
+ (lost_write_units / total_write_units) * 100.0
213
+ end
214
+
215
+ def lost_read_percent
216
+ (lost_read_units / total_read_units) * 100.0
217
+ end
218
+
219
+ # Returns an array of all of the time points that have data present in
220
+ # them. Example:
221
+ #
222
+ # table.tick(Time.now, { ... })
223
+ # table.tick(Time.now, { ... })
224
+ #
225
+ # table.all_times
226
+ # #=> Array with the 2 time values above in it
227
+ def all_times
228
+ @data.keys
229
+ end
230
+
231
+ # Returns the earliest point in time that we have tracked data for.
232
+ def earliest_data_time
233
+ all_times.first
234
+ end
235
+
236
+ # Returns the latest point in time that we have tracked data for.
237
+ def latest_data_time
238
+ all_times.last
239
+ end
240
+
241
+ # Pricing is pretty difficult. This isn't a good measure of success. Base
242
+ # calculations on how many units are wasted.
243
+ # def wasted_money
244
+ # UnitCost.read(wasted_read_units) + UnitCost.write(wasted_write_units)
245
+ # end
246
+
247
+ def to_csv! opts = {}
248
+ path = opts[:path] or File.join(DynamoAutoscale.root, "#{self.name}.csv")
249
+
250
+ CSV.open(path, 'w') do |csv|
251
+ csv << [
252
+ "time",
253
+ "provisioned_reads",
254
+ "provisioned_writes",
255
+ "consumed_reads",
256
+ "consumed_writes",
257
+ ]
258
+
259
+ @data.each do |time, datum|
260
+ csv << [
261
+ time.iso8601,
262
+ datum[:provisioned_reads],
263
+ datum[:provisioned_writes],
264
+ datum[:consumed_reads],
265
+ datum[:consumed_writes],
266
+ ]
267
+ end
268
+ end
269
+
270
+ path
271
+ end
272
+
273
+ def graph! opts = {}
274
+ data_tmp = File.join(Dir.tmpdir, 'data.csv')
275
+ png_tmp = opts[:path] || File.join(Dir.tmpdir, 'graph.png')
276
+ r_script = File.join(DynamoAutoscale.root, 'rlib', 'dynamodb_graph.r')
277
+
278
+ to_csv!(path: data_tmp)
279
+
280
+ `r --no-save --args #{data_tmp} #{png_tmp} < #{r_script}`
281
+
282
+ if $? != 0
283
+ logger.error "Failed to create graph."
284
+ else
285
+ `open #{png_tmp}` if opts[:open]
286
+ end
287
+
288
+ png_tmp
289
+ end
290
+
291
+ def scatterplot_for! metric
292
+ data_tmp = File.join(Dir.tmpdir, 'data.csv')
293
+ png_tmp = File.join(Dir.tmpdir, 'boxplot.png')
294
+ r_script = File.join(DynamoAutoscale.root, 'rlib', 'dynamodb_boxplot.r')
295
+
296
+ to_csv!(data_tmp)
297
+
298
+ `r --no-save --args #{data_tmp} #{png_tmp} < #{r_script}`
299
+
300
+ if $? != 0
301
+ logger.error "Failed to create graph."
302
+ else
303
+ `open #{png_tmp}`
304
+ end
305
+ end
306
+
307
+ def report!
308
+ puts " Table: #{name}"
309
+ puts "Wasted r/units: #{wasted_read_units.round(2)} (#{wasted_read_percent.round(2)}%)"
310
+ puts " Total r/units: #{total_read_units.round(2)}"
311
+ puts " Lost r/units: #{lost_read_units.round(2)} (#{lost_read_percent.round(2)}%)"
312
+ puts "Wasted w/units: #{wasted_write_units.round(2)} (#{wasted_write_percent.round(2)}%)"
313
+ puts " Total w/units: #{total_write_units.round(2)}"
314
+ puts " Lost w/units: #{lost_write_units.round(2)} (#{lost_write_percent.round(2)}%)"
315
+ puts " Upscales: #{DynamoAutoscale.actioners[self].upscales}"
316
+ puts " Downscales: #{DynamoAutoscale.actioners[self].downscales}"
317
+ puts " Fitness: #{fitness}"
318
+ end
319
+
320
+ def fitness
321
+ lost_weight = 100
322
+ wasted_weight = 1
323
+ lost = ((lost_read_percent + lost_write_percent) / 2)
324
+ wasted = ((wasted_read_percent + wasted_write_percent) / 2)
325
+
326
+ ((lost * lost_weight) + (wasted * wasted_weight)) / (lost_weight + wasted_weight)
327
+ end
328
+ end
329
+ end
@@ -0,0 +1,41 @@
1
+ module DynamoAutoscale
2
+ class UnitCost
3
+ # Pricing information obtained from: http://aws.amazon.com/dynamodb/pricing/
4
+ HOURLY_PRICING = {
5
+ 'us-east-1' => {
6
+ read: { dollars: 0.0065, per: 50 },
7
+ write: { dollars: 0.0065, per: 10 },
8
+ },
9
+ 'us-west-1' => {
10
+ read: { dollars: 0.0065, per: 50 },
11
+ write: { dollars: 0.0065, per: 10 },
12
+ },
13
+ }
14
+
15
+ # Returns the cost of N read units for an hour in a given region, which
16
+ # defaults to whatever is in
17
+ # DynamoAutoscale::DEFAULT_AWS_REGION.
18
+ #
19
+ # Example:
20
+ #
21
+ # DynamoAutoscale::UnitCost.read(500, region: 'us-west-1')
22
+ # #=> 0.065
23
+ def self.read units, opts = {}
24
+ pricing = HOURLY_PRICING[opts[:region] || DEFAULT_AWS_REGION][:read]
25
+ ((units / pricing[:per].to_f) * pricing[:dollars])
26
+ end
27
+
28
+ # Returns the cost of N write units for an hour in a given region, which
29
+ # defaults to whatever is in
30
+ # DynamoAutoscale::DEFAULT_AWS_REGION.
31
+ #
32
+ # Example:
33
+ #
34
+ # DynamoAutoscale::UnitCost.write(500, region: 'us-west-1')
35
+ # #=> 0.325
36
+ def self.write units, opts = {}
37
+ pricing = HOURLY_PRICING[opts[:region] || DEFAULT_AWS_REGION][:write]
38
+ ((units / pricing[:per].to_f) * pricing[:dollars])
39
+ end
40
+ end
41
+ end
@@ -0,0 +1,3 @@
1
+ module DynamoAutoscale
2
+ VERSION = '0.1'
3
+ end
@@ -0,0 +1 @@
1
+ require_relative '../config/environment/common'
@@ -0,0 +1,15 @@
1
+ require(ggplot2)
2
+ require(reshape)
3
+ args <- commandArgs(trailingOnly = TRUE)
4
+ data = read.csv(args[1], header=T, sep=",")
5
+
6
+ data$time = strptime(data$time, "%Y-%m-%dT%H:%M:%SZ")
7
+
8
+ measure.vars = c('provisioned_reads','provisioned_writes',
9
+ 'consumed_reads','consumed_writes')
10
+
11
+ ive.melted = melt(data, id.vars='time', measure.vars = measure.vars)
12
+
13
+ g = ggplot(ive.melted, aes(x=time, y=value, color=variable)) + geom_line()
14
+
15
+ ggsave(file=args[2], plot=g, width=20, height=8)
@@ -0,0 +1,13 @@
1
+ require(ggplot2)
2
+ require(reshape)
3
+ args <- commandArgs(trailingOnly = TRUE)
4
+ data = read.csv(args[1], header=T, sep=",")
5
+
6
+ data$time = strptime(data$time, "%Y-%m-%dT%H:%M:%SZ")
7
+ data$hour = as.factor(strftime(data$time, "%H"))
8
+ measure.vars = c(args[3])
9
+
10
+ ive.melted = melt(data, id.vars='hour', measure.vars = measure.vars)
11
+ g = ggplot(ive.melted, aes(x=hour, y=value, color=variable)) + geom_point()
12
+
13
+ ggsave(file=args[2], plot=g, width=10, height=8)
@@ -0,0 +1,5 @@
1
+ reads last: 1, greater_than: "90%", scale: { on: :consumed, by: 2 }
2
+ writes last: 1, greater_than: "90%", scale: { on: :consumed, by: 2 }
3
+
4
+ reads for: 2.hours, less_than: "50%", min: 2, scale: { on: :consumed, by: 2 }
5
+ writes for: 2.hours, less_than: "50%", min: 2, scale: { on: :consumed, by: 2 }
@@ -0,0 +1 @@
1
+ reads last: 1, greater_than: 10, less_than: 5
@@ -0,0 +1,11 @@
1
+ reads last: 2, greater_than: "90%", scale: { on: :consumed, by: 1.7 }
2
+ reads last: 2, greater_than: "80%", scale: { on: :consumed, by: 1.5 }
3
+
4
+ writes last: 2, greater_than: "90%", scale: { on: :consumed, by: 1.7 }
5
+ writes last: 2, greater_than: "80%", scale: { on: :consumed, by: 1.5 }
6
+
7
+ reads for: 2.hours, less_than: "20%", min: 10, scale: { on: :consumed, by: 1.8 }
8
+ reads for: 2.hours, less_than: "30%", min: 10, scale: { on: :consumed, by: 1.8 }
9
+
10
+ writes for: 2.hours, less_than: "20%", min: 10, scale: { on: :consumed, by: 1.8 }
11
+ writes for: 2.hours, less_than: "30%", min: 10, scale: { on: :consumed, by: 1.8 }
data/rulesets/none.rb ADDED
File without changes
data/script/console ADDED
@@ -0,0 +1,3 @@
1
+ #!/usr/bin/env bash
2
+
3
+ irb -r ./config/environment/console.rb
@@ -0,0 +1,46 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require_relative '../config/environment/common'
4
+ require 'pp'
5
+ require 'fileutils'
6
+
7
+ include DynamoAutoscale
8
+
9
+ tables = ARGV
10
+
11
+ if tables.empty?
12
+ STDERR.puts "Usage: script/historic_data table_name [another_table_name ...]"
13
+ exit 1
14
+ end
15
+
16
+ dynamo = AWS::DynamoDB.new
17
+ range = (Date.today - 5.days).upto(Date.today)
18
+ logger.info "Date range: #{range.to_a}"
19
+
20
+ # Filter out tables that do not exist in Dynamo.
21
+ tables.select! do |table|
22
+ if dynamo.tables[table].exists?
23
+ true
24
+ else
25
+ logger.error "Table #{table} does not exist. Skipping."
26
+ false
27
+ end
28
+ end
29
+
30
+ range.each do |start_day|
31
+ dir = File.join(DynamoAutoscale.data_dir, start_day.to_s)
32
+ end_day = start_day + 1.day
33
+
34
+ FileUtils.mkdir(dir) unless Dir.exists?(dir)
35
+
36
+ tables.each do |table|
37
+ logger.info "Collecting data for #{table} on #{start_day}..."
38
+ File.open(File.join(dir, "#{table}.json"), 'w') do |file|
39
+ file.write(JSON.pretty_generate(Metrics.all_metrics(table, {
40
+ period: 5.minutes,
41
+ start_time: start_day,
42
+ end_time: end_day,
43
+ })))
44
+ end
45
+ end
46
+ end
@@ -0,0 +1,40 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ # This script calculate an approximate "wastage cost" for every table (wastage
4
+ # cost is defined as provisioned throughout - consumed throughput, so throughput
5
+ # that was paid for but not used).
6
+
7
+ require_relative '../config/environment/common'
8
+ include DynamoAutoscale
9
+
10
+ tables = AWS::DynamoDB.new.tables.to_a.map(&:name)
11
+ pad = tables.map(&:length).max
12
+ total_waste = 0
13
+ opts = { period: 1.hour, start_time: 1.hour.ago, end_time: Time.now }
14
+
15
+ tables.each do |table|
16
+ pr = Metrics.provisioned_reads(table, opts).map do |datum|
17
+ datum[:average]
18
+ end.inject(:+) || 0.0
19
+
20
+ pw = Metrics.provisioned_writes(table, opts).map do |datum|
21
+ datum[:average]
22
+ end.inject(:+) || 0.0
23
+
24
+ cr = Metrics.consumed_reads(table, opts).map do |datum|
25
+ datum[:average]
26
+ end.inject(:+) || 0.0
27
+
28
+ cw = Metrics.consumed_writes(table, opts).map do |datum|
29
+ datum[:average]
30
+ end.inject(:+) || 0.0
31
+
32
+ waste_cost = UnitCost.read(pr - cr) + UnitCost.write(pw - cw)
33
+ total_waste += waste_cost
34
+
35
+ puts "#{table.rjust(pad)}: reads(#{cr.round(4)} / #{pr.round(4)}) " +
36
+ "writes(#{cw.round(4)} / #{pw.round(4)}), ~$#{waste_cost.round(4)} " +
37
+ "wasted per hour"
38
+ end
39
+
40
+ puts "Total waste cost: ~$#{total_waste.round(4)} per hour"
data/script/monitor ADDED
@@ -0,0 +1,55 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ # This script is a sort-of REPL for monitoring production data. It will
4
+ # periodically poll for production data and set off any alarms you define below.
5
+ # Useful for testing autoscaling policies.
6
+ #
7
+ # At any point of the running process, pressing ctrl + c will pause execution
8
+ # and drop you into a REPL where you can inspect the state of the program.
9
+ # Exiting the REPL will resume execution.
10
+ #
11
+ # To exit the program entirely, drop into the REPL and run "exit!".
12
+
13
+ require_relative '../config/environment/common'
14
+ require 'pp'
15
+ require 'timecop'
16
+ include DynamoAutoscale
17
+
18
+ ruleset = ARGV.shift
19
+ tables = ARGV
20
+
21
+ if tables.empty? or ruleset.nil?
22
+ STDERR.puts "Usage: script/monitor ruleset table_name [another_table_name ... ]"
23
+ exit 1
24
+ end
25
+
26
+ # These filters use the arrays inside the local actioner to fake the provisioned
27
+ # reads and writes when the local data enters the system. It makes it look like
28
+ # we're actually modifying the provisioned numbers.
29
+ filters = [
30
+ Proc.new do |time, datum|
31
+ if writes = DynamoAutoscale.actioner.provisioned_writes(table).last
32
+ datum[:provisioned_writes] = writes.last
33
+ end
34
+
35
+ if reads = DynamoAutoscale.actioner.provisioned_reads(table).last
36
+ datum[:provisioned_reads] = reads.last
37
+ end
38
+ end,
39
+ ]
40
+
41
+ DynamoAutoscale.rules = RuleSet.new(ruleset)
42
+ DynamoAutoscale.dispatcher = Dispatcher.new
43
+ DynamoAutoscale.poller = CWPoller.new(tables: tables)
44
+ DynamoAutoscale.actioner_class = LocalActioner
45
+
46
+ begin
47
+ DynamoAutoscale.poller.run
48
+ rescue SignalException, Interrupt
49
+ Ripl.start :binding => binding
50
+ retry
51
+ rescue => e
52
+ # If we error out, print the error and drop into a repl.
53
+ logger.error "Exception occurred: #{e.class}:#{e.message}"
54
+ Ripl.start :binding => binding
55
+ end
data/script/simulator ADDED
@@ -0,0 +1,40 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ # The simulator script reads from the data/ directory for historic data gathered
4
+ # from CloudWatch with the script/historic_data script. Then it will step
5
+ # through the data chronologically, dropping you into a REPL with each new
6
+ # timeslice.
7
+ #
8
+ # Useful for checking that code works without having to make calls to the
9
+ # CloudWatch API.
10
+
11
+ require_relative '../config/environment/common'
12
+ require 'timecop'
13
+ require 'pp'
14
+ include DynamoAutoscale
15
+
16
+ ruleset = ARGV.shift
17
+ tables = ARGV
18
+
19
+ if ruleset.nil? or tables.empty?
20
+ STDERR.puts "Usage: script/simulator ruleset table_name [another_table_name ... ]"
21
+ exit 1
22
+ end
23
+
24
+ filters = LocalActioner.faux_provisioning_filters
25
+ DynamoAutoscale.rules = RuleSet.new(ruleset)
26
+ DynamoAutoscale.dispatcher = Dispatcher.new
27
+ DynamoAutoscale.poller = LocalDataPoll.new(tables: tables, filters: filters)
28
+ DynamoAutoscale.actioner_class = LocalActioner
29
+
30
+ DynamoAutoscale.poller.run do |table, time, datum|
31
+ Timecop.travel(time)
32
+
33
+ puts "Event at #{time}"
34
+ puts "#{datum.pretty_inspect}"
35
+ puts
36
+ puts "Press ctrl + d or type 'exit' to step forward in time."
37
+ puts "Type 'exit!' to exit entirely."
38
+
39
+ Ripl.start :binding => binding
40
+ end
data/script/test ADDED
@@ -0,0 +1,52 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require_relative '../config/environment/common'
4
+ require 'pp'
5
+ require 'timecop'
6
+ include DynamoAutoscale
7
+ extend DynamoAutoscale
8
+
9
+ graph = !!(ARGV.delete("--graph") or ARGV.delete("-g"))
10
+ ruleset = ARGV.shift
11
+ tables = ARGV
12
+
13
+ if tables.empty? or ruleset.nil?
14
+ STDERR.puts "Usage: script/test ruleset table_name [another_table_name ... ] [-g|--graph]"
15
+ exit 1
16
+ end
17
+
18
+ filters = LocalActioner.faux_provisioning_filters
19
+ DynamoAutoscale.rules = RuleSet.new(ruleset)
20
+ DynamoAutoscale.dispatcher = Dispatcher.new
21
+ DynamoAutoscale.poller = LocalDataPoll.new(tables: tables, filters: filters)
22
+ DynamoAutoscale.actioner_class = LocalActioner
23
+ DynamoAutoscale.actioner_opts = { group_downscales: true, flush_after: 2.hours }
24
+
25
+ # Uncomment this and the below RubyProf lines if you want profiling information.
26
+ # RubyProf.start
27
+
28
+ begin
29
+ DynamoAutoscale.poller.run do |table_name, time|
30
+ Timecop.travel(time)
31
+ end
32
+ rescue Interrupt
33
+ Ripl.start binding: binding
34
+ end
35
+
36
+ # Uncomment these and the above RubyProf line if you want profiling information.
37
+ # printer = RubyProf::FlatPrinter.new(RubyProf.stop)
38
+ # printer.print(STDOUT, min_percent: 2)
39
+
40
+ # Uncomment this if you want to drop into a REPL at the end of the test.
41
+ # Ripl.start binding: binding
42
+
43
+ tables.each do |table_name|
44
+ table = DynamoAutoscale.tables[table_name]
45
+
46
+ table.report!
47
+
48
+ if graph
49
+ path = table.graph! open: true
50
+ puts "Graph saved to #{path}"
51
+ end
52
+ end
@@ -0,0 +1,20 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require_relative '../config/environment/common'
4
+ include DynamoAutoscale
5
+ extend DynamoAutoscale
6
+
7
+ ruleset = ARGV.shift
8
+
9
+ if ruleset.nil?
10
+ STDERR.puts "Usage: script/validate_ruleset ruleset"
11
+ exit 1
12
+ end
13
+
14
+ begin
15
+ RuleSet.new(ruleset)
16
+ puts "Rules seem legit."
17
+ rescue ArgumentError => e
18
+ puts "Ruleset has problem: #{e.message}"
19
+ exit 1
20
+ end