lambda_wrap 0.27.0 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/lambda_wrap/api_gateway_manager.rb +123 -173
- data/lib/lambda_wrap/api_manager.rb +270 -0
- data/lib/lambda_wrap/aws_service.rb +40 -0
- data/lib/lambda_wrap/dynamo_db_manager.rb +425 -146
- data/lib/lambda_wrap/environment.rb +53 -0
- data/lib/lambda_wrap/lambda_manager.rb +266 -171
- data/lib/lambda_wrap/version.rb +2 -1
- data/lib/lambda_wrap.rb +12 -2
- metadata +9 -8
- data/lib/lambda_wrap/s3_bucket_manager.rb +0 -32
- data/lib/lambda_wrap/zip_file_generator.rb +0 -67
@@ -1,182 +1,461 @@
|
|
1
|
-
require 'aws-sdk'
|
2
|
-
|
3
1
|
module LambdaWrap
|
4
|
-
# The
|
5
|
-
#
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
# The
|
11
|
-
#
|
12
|
-
#
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
2
|
+
# The DynamoTable class simplifies Creation, Updating, and Destroying Dynamo DB Tables.
|
3
|
+
# @since 1.0
|
4
|
+
class DynamoTable < AwsService
|
5
|
+
# Sets up the DynamoTable for the Dynamo DB Manager. Preloading the configuration in the constructor.
|
6
|
+
#
|
7
|
+
# @param [Hash] options The configuration for the DynamoDB Table.
|
8
|
+
# @option options [String] :table_name The name of the DynamoDB Table. A "Base Name" can be used here where the
|
9
|
+
# environment name can be appended upon deployment.
|
10
|
+
#
|
11
|
+
# @option options [Array<Hash>] :attribute_definitions ([{ attribute_name: 'Id', attribute_type: 'S' }]) An array of
|
12
|
+
# attributes that describe the key schema for the table and indexes. The Hash must have symbols: :attribute_name &
|
13
|
+
# :attribute_type. Please see AWS Documentation for the {http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html
|
14
|
+
# Data Model}.
|
15
|
+
#
|
16
|
+
# @option options [Array<Hash>] :key_schema ([{ attribute_name: 'Id', key_type: 'HASH' }]) Specifies the attributes
|
17
|
+
# that make up the primary key for a table or an index. The attributes in key_schema must also be defined in the
|
18
|
+
# AttributeDefinitions array. Please see AWS Documentation for the {http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html
|
19
|
+
# Data Model}.
|
20
|
+
#
|
21
|
+
# Each element in the array must be composed of:
|
22
|
+
# * <tt>:attribute_name</tt> - The name of this key attribute.
|
23
|
+
# * <tt>:key_type</tt> - The role that the key attribute will assume:
|
24
|
+
# * <tt>HASH</tt> - partition key
|
25
|
+
# * <tt>RANGE</tt> - sort key
|
26
|
+
#
|
27
|
+
# The partition key of an item is also known as its hash attribute. The term "hash attribute" derives from
|
28
|
+
# DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their
|
29
|
+
# partition key values.
|
30
|
+
#
|
31
|
+
# The sort key of an item is also known as its range attribute. The term "range attribute" derives from the way
|
32
|
+
# DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key
|
33
|
+
# value.
|
34
|
+
#
|
35
|
+
# For a simple primary key (partition key), you must provide exactly one element with a <tt>KeyType</tt> of
|
36
|
+
# <tt>HASH</tt>.
|
37
|
+
#
|
38
|
+
# For a composite primary key (partition key and sort key), you must provide exactly two elements, in this order:
|
39
|
+
# The first element must have a <tt>KeyType</tt> of <tt>HASH</tt>, and the second element must have a
|
40
|
+
# <tt>KeyType</tt> of <tt>RANGE</tt>.
|
41
|
+
#
|
42
|
+
# For more information, see {http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key
|
43
|
+
# Specifying the Primary Key} in the <em>Amazon DynamoDB Developer Guide</em>.
|
44
|
+
#
|
45
|
+
# @option options [Integer] :read_capacity_units (1) The maximum number of strongly consistent reads consumed per
|
46
|
+
# second before DynamoDB returns a <tt>ThrottlingException</tt>. Must be at least 1. For current minimum and
|
47
|
+
# maximum provisioned throughput values, see {http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html
|
48
|
+
# Limits} in the <em>Amazon DynamoDB Developer Guide</em>.
|
49
|
+
#
|
50
|
+
# @option options [Integer] :write_capacity_units (1) The maximum number of writes consumed per second before
|
51
|
+
# DynamoDB returns a <tt>ThrottlingException</tt>. Must be at least 1. For current minimum and maximum
|
52
|
+
# provisioned throughput values, see {http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html
|
53
|
+
# Limits} in the <em>Amazon DynamoDB Developer Guide</em>.
|
54
|
+
#
|
55
|
+
# @option options [Array<Hash>] :local_secondary_indexes ([]) One or more local secondary indexes (the maximum is
|
56
|
+
# five) to be created on the table. Each index is scoped to a given partition key value. There is a 10 GB size
|
57
|
+
# limit per partition key value; otherwise, the size of a local secondary index is unconstrained.
|
58
|
+
#
|
59
|
+
# Each element in the array must be a Hash with these symbols:
|
60
|
+
# * <tt>:index_name</tt> - The name of the local secondary index. Must be unique only for this table.
|
61
|
+
# * <tt>:key_schema</tt> - Specifies the key schema for the local secondary index. The key schema must begin with
|
62
|
+
# the same partition key as the table.
|
63
|
+
# * <tt>:projection</tt> - Specifies attributes that are copied (projected) from the table into the index. These
|
64
|
+
# are in addition to the primary key attributes and index key attributes, which are automatically projected. Each
|
65
|
+
# attribute specification is composed of:
|
66
|
+
# * <tt>:projection_type</tt> - One of the following:
|
67
|
+
# * <tt>KEYS_ONLY</tt> - Only the index and primary keys are projected into the index.
|
68
|
+
# * <tt>INCLUDE</tt> - Only the specified table attributes are projected into the index. The list of projected
|
69
|
+
# attributes are in <tt>non_key_attributes</tt>.
|
70
|
+
# * <tt>ALL</tt> - All of the table attributes are projected into the index.
|
71
|
+
# * <tt>:non_key_attributes</tt> - A list of one or more non-key attribute names that are projected into the
|
72
|
+
# secondary index. The total count of attributes provided in NonKeyAttributes, summed across all of the
|
73
|
+
# secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this
|
74
|
+
# counts as two distinct attributes when determining the total.
|
75
|
+
#
|
76
|
+
# @option options [Array<Hash>] :global_secondary_indexes ([]) One or more global secondary indexes (the maximum is
|
77
|
+
# five) to be created on the table. Each global secondary index (Hash) in the array includes the following:
|
78
|
+
# * <tt>:index_name</tt> - The name of the global secondary index. Must be unique only for this table.
|
79
|
+
# * <tt>:key_schema</tt> - Specifies the key schema for the global secondary index.
|
80
|
+
# * <tt>:projection</tt> - Specifies attributes that are copied (projected) from the table into the index. These
|
81
|
+
# are in addition to the primary key attributes and index key attributes, which are automatically projected. Each
|
82
|
+
# attribute specification is composed of:
|
83
|
+
# * <tt>:projection_type</tt> - One of the following:
|
84
|
+
# * <tt>KEYS_ONLY</tt> - Only the index and primary keys are projected into the index.
|
85
|
+
# * <tt>INCLUDE</tt> - Only the specified table attributes are projected into the index. The list of projected
|
86
|
+
# attributes are in <tt>NonKeyAttributes</tt>.
|
87
|
+
# * <tt>ALL</tt> - All of the table attributes are projected into the index.
|
88
|
+
# * <tt>non_key_attributes</tt> - A list of one or more non-key attribute names that are projected into the
|
89
|
+
# secondary index. The total count of attributes provided in NonKeyAttributes, summed across all of the
|
90
|
+
# secondary indexes, must not exceed 20. If you project the same attribute into two different indexes, this
|
91
|
+
# counts as two distinct attributes when determining the total.
|
92
|
+
# * <tt>:provisioned_throughput</tt> - The provisioned throughput settings for the global secondary index,
|
93
|
+
# consisting of read and write capacity units.
|
94
|
+
#
|
95
|
+
# @option options [Boolean] :append_environment_on_deploy (false) Option to append the name of the environment to
|
96
|
+
# the table name upon deployment and teardown. DynamoDB Tables cannot shard data in a similar manner as how Lambda
|
97
|
+
# aliases and API Gateway Environments work. This option is supposed to help the user with naming tables instead
|
98
|
+
# of managing the environment names on their own.
|
99
|
+
def initialize(options)
|
100
|
+
default_options = { append_environment_on_deploy: false, read_capacity_units: 1, write_capacity_units: 1,
|
101
|
+
local_secondary_indexes: nil, global_secondary_indexes: nil,
|
102
|
+
attribute_definitions: [{ attribute_name: 'Id', attribute_type: 'S' }],
|
103
|
+
key_schema: [{ attribute_name: 'Id', key_type: 'HASH' }] }
|
104
|
+
|
105
|
+
options_with_defaults = options.reverse_merge(default_options)
|
106
|
+
|
107
|
+
@table_name = options_with_defaults[:table_name]
|
108
|
+
raise ArgumentError, ':table_name is required.' unless @table_name
|
109
|
+
|
110
|
+
@attribute_definitions = options_with_defaults[:attribute_definitions]
|
111
|
+
@key_schema = options_with_defaults[:key_schema]
|
112
|
+
|
113
|
+
# Verify that all of key_schema is defined in attribute_definitions
|
114
|
+
defined_in_attribute_definitions_guard(@key_schema)
|
115
|
+
|
116
|
+
@read_capacity_units = options_with_defaults[:read_capacity_units]
|
117
|
+
@write_capacity_units = options_with_defaults[:write_capacity_units]
|
118
|
+
provisioned_throughput_guard(read_capacity_units: @read_capacity_units,
|
119
|
+
write_capacity_units: @write_capacity_units)
|
120
|
+
|
121
|
+
unless @read_capacity_units >= 1 && @write_capacity_units >= 1 && (@read_capacity_units.is_a? Integer) &&
|
122
|
+
(@write_capacity_units.is_a? Integer)
|
123
|
+
raise ArgumentExecption, 'Read and Write Capacity must be positive integers.'
|
124
|
+
end
|
125
|
+
|
126
|
+
@local_secondary_indexes = options_with_defaults[:local_secondary_indexes]
|
127
|
+
|
128
|
+
if @local_secondary_indexes && @local_secondary_indexes.length > 5
|
129
|
+
raise ArgumentError, 'Can only have 5 LocalSecondaryIndexes per table!'
|
130
|
+
end
|
131
|
+
if @local_secondary_indexes && !@local_secondary_indexes.empty?
|
132
|
+
@local_secondary_indexes.each { |lsindex| defined_in_attribute_definitions_guard(lsindex[:key_schema]) }
|
133
|
+
end
|
134
|
+
|
135
|
+
@global_secondary_indexes = options_with_defaults[:global_secondary_indexes]
|
136
|
+
|
137
|
+
if @global_secondary_indexes && @global_secondary_indexes.length > 5
|
138
|
+
raise ArgumentError, 'Can only have 5 GlobalSecondaryIndexes per table1'
|
139
|
+
end
|
140
|
+
if @global_secondary_indexes && !@global_secondary_indexes.empty?
|
141
|
+
@global_secondary_indexes.each do |gsindex|
|
142
|
+
defined_in_attribute_definitions_guard(gsindex[:key_schema])
|
143
|
+
provisioned_throughput_guard(gsindex[:provisioned_throughput])
|
144
|
+
end
|
145
|
+
end
|
146
|
+
|
147
|
+
@append_environment_on_deploy = options_with_defaults[:append_environment_on_deploy]
|
26
148
|
end
|
27
149
|
|
28
|
-
|
29
|
-
#
|
30
|
-
#
|
31
|
-
#
|
32
|
-
#
|
33
|
-
#
|
34
|
-
#
|
35
|
-
# [
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
write_capacity == table_details.provisioned_throughput.write_capacity_units
|
48
|
-
puts "Table: #{table_name} not updated. Current and requested reads/writes are same."
|
49
|
-
puts 'Current ReadCapacityUnits provisioned for the table: ' \
|
50
|
-
"#{table_details.provisioned_throughput.read_capacity_units}."
|
51
|
-
puts "Requested ReadCapacityUnits: #{read_capacity}."
|
52
|
-
puts 'Current WriteCapacityUnits provisioned for the table: ' \
|
53
|
-
"#{table_details.provisioned_throughput.write_capacity_units}."
|
54
|
-
puts "Requested WriteCapacityUnits: #{write_capacity}."
|
150
|
+
# Deploys the DynamoDB Table to the target environment. If the @append_environment_on_deploy option is set, the
|
151
|
+
# table_name will be appended with a hyphen and the environment name. This will attempt to Create or Update with
|
152
|
+
# the parameters specified from the constructor. This may take a LONG time for it will wait for any new indexes to
|
153
|
+
# be available.
|
154
|
+
#
|
155
|
+
# @param environment_options [LambdaWrap::Environment] Target environment to deploy.
|
156
|
+
# @param client [Aws::DynamoDB::Client] Client to use with SDK. Should be passed in by the API class.
|
157
|
+
# @param region [String] AWS Region string. Should be passed in by the API class.
|
158
|
+
def deploy(environment_options, client, region = 'AWS_REGION')
|
159
|
+
super
|
160
|
+
|
161
|
+
puts "Deploying Table: #{@table_name} to Environment: #{environment_options.name}"
|
162
|
+
|
163
|
+
full_table_name = @table_name + (@append_environment_on_deploy ? "-#{environment_options.name}" : '')
|
164
|
+
|
165
|
+
table_details = retrieve_table_details(full_table_name)
|
166
|
+
|
167
|
+
if table_details.nil?
|
168
|
+
create_table(full_table_name)
|
55
169
|
else
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
)
|
170
|
+
wait_until_table_is_available(full_table_name) if table_details[:table_status] != 'ACTIVE'
|
171
|
+
update_table(full_table_name, table_details)
|
172
|
+
end
|
60
173
|
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
174
|
+
puts "Dynamo Table #{full_table_name} is now available."
|
175
|
+
full_table_name
|
176
|
+
end
|
177
|
+
|
178
|
+
# Deletes the DynamoDB table specified by the table_name and the Environment name (if append_environment_on_deploy)
|
179
|
+
# was specified. Otherwise just deletes the table.
|
180
|
+
#
|
181
|
+
# @param environment_options [LambdaWrap::Environment] Target environment to teardown
|
182
|
+
# @param client [Aws::DynamoDB::Client] Client to use with SDK. Should be passed in by the API class.
|
183
|
+
# @param region [String] AWS Region string. Should be passed in by the API class.
|
184
|
+
def teardown(environment_options, client, region = 'AWS_REGION')
|
185
|
+
super
|
186
|
+
puts "Tearingdown Table: #{@table_name} from Environment: #{environment_options.name}"
|
187
|
+
full_table_name = @table_name + (@append_environment_on_deploy ? "-#{environment_options.name}" : '')
|
188
|
+
delete_table(full_table_name)
|
189
|
+
full_table_name
|
190
|
+
end
|
191
|
+
|
192
|
+
# Deletes all DynamoDB tables that are prefixed with the @table_name specified in the constructor.
|
193
|
+
# This is an attempt to tear down all DynamoTables that were deployed with the environment name appended.
|
194
|
+
#
|
195
|
+
# @param client [Aws::DynamoDB::Client] Client to use with SDK. Should be passed in by the API class.
|
196
|
+
# @param region [String] AWS Region string. Should be passed in by the API class.
|
197
|
+
def delete(client, region = 'AWS_REGION')
|
198
|
+
super
|
199
|
+
puts "Deleting all tables with prefix: #{@table_name}."
|
200
|
+
table_names = retrieve_prefixed_tables(@table_name)
|
201
|
+
table_names.each { |table_name| delete_table(table_name) }
|
202
|
+
puts "Deleted #{table_names.length} tables."
|
203
|
+
table_names.length
|
204
|
+
end
|
205
|
+
|
206
|
+
def to_s
|
207
|
+
return @table_name if @table_name && @table_name.is_a?(String)
|
208
|
+
super
|
209
|
+
end
|
210
|
+
|
211
|
+
private
|
212
|
+
|
213
|
+
def retrieve_table_details(full_table_name)
|
214
|
+
table_details = nil
|
215
|
+
begin
|
216
|
+
table_details = @client.describe_table(table_name: full_table_name).table
|
217
|
+
rescue Aws::DynamoDB::Errors::ResourceNotFoundException
|
218
|
+
puts "Table #{full_table_name} does not exist."
|
68
219
|
end
|
220
|
+
table_details
|
69
221
|
end
|
70
222
|
|
71
223
|
##
|
72
|
-
#
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
table_details = get_table_details(table_name)
|
91
|
-
|
92
|
-
if !table_details.nil?
|
93
|
-
wait_until_table_available(table_name) if table_details.table_status != 'ACTIVE'
|
94
|
-
|
95
|
-
if read_capacity > table_details.provisioned_throughput.read_capacity_units ||
|
96
|
-
write_capacity > table_details.provisioned_throughput.write_capacity_units
|
97
|
-
set_table_capacity(table_name, read_capacity, write_capacity)
|
98
|
-
has_updates = true
|
224
|
+
# Waits for the table to be available
|
225
|
+
def wait_until_table_is_available(full_table_name, delay = 5, max_attempts = 5)
|
226
|
+
puts "Waiting for Table #{full_table_name} to be available."
|
227
|
+
puts "Waiting with a #{delay} second delay between attempts, for a maximum of #{max_attempts} attempts."
|
228
|
+
max_time = Time.at(delay * max_attempts).utc.strftime('%H:%M:%S')
|
229
|
+
puts "Max waiting time will be: #{max_time} (approximate)."
|
230
|
+
# wait until the table has updated to being fully available
|
231
|
+
# waiting for ~2min at most; an error will be thrown afterwards
|
232
|
+
|
233
|
+
started_waiting_at = Time.now
|
234
|
+
max_attempts.times do |attempt|
|
235
|
+
puts "Attempt #{attempt + 1}/#{max_attempts}, \
|
236
|
+
#{Time.at(Time.now - started_waiting_at).utc.strftime('%H:%M:%S')}/#{max_time}"
|
237
|
+
|
238
|
+
details = retrieve_table_details(full_table_name)
|
239
|
+
|
240
|
+
if details.table_status != 'ACTIVE'
|
241
|
+
puts "Table: #{full_table_name} is not yet available. Status: #{details.table_status}. Retrying..."
|
99
242
|
else
|
100
|
-
|
101
|
-
|
102
|
-
|
243
|
+
updating_indexes = details.global_secondary_indexes.reject do |global_index|
|
244
|
+
global_index.index_status == 'ACTIVE'
|
245
|
+
end
|
246
|
+
return true if updating_indexes.empty?
|
247
|
+
puts 'Table is available, but the global indexes are not:'
|
248
|
+
puts(updating_indexes.map { |global_index| "#{global_index.index_name}, #{global_index.index_status}" })
|
103
249
|
end
|
104
|
-
|
105
|
-
|
106
|
-
ad = attribute_definitions || [{ attribute_name: 'Id', attribute_type: 'S' }]
|
107
|
-
ks = key_schema || [{ attribute_name: 'Id', key_type: 'HASH' }]
|
250
|
+
Kernel.sleep(delay.seconds)
|
251
|
+
end
|
108
252
|
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
253
|
+
raise Exception, "Table #{full_table_name} did not become available after #{max_attempts} attempts. " \
|
254
|
+
'Try again later or inspect the AWS console.'
|
255
|
+
end
|
256
|
+
|
257
|
+
# Updates the Dynamo Table. You can only perform one of the following update operations at once:
|
258
|
+
# * Modify the provisioned throughput settings of the table.
|
259
|
+
# * Enable or disable Streams on the table.
|
260
|
+
# * Remove a global secondary index from the table.
|
261
|
+
# * Create a new global secondary index on the table. Once the index begins backfilling,
|
262
|
+
# you can use UpdateTable to perform other operations.
|
263
|
+
def update_table(full_table_name, table_details)
|
264
|
+
# Determine if Provisioned Throughput needs to be updated.
|
265
|
+
if @read_capacity_units != table_details.provisioned_throughput.read_capacity_units &&
|
266
|
+
@write_capacity_units != table_details.provisioned_throughput.write_capacity_units
|
115
267
|
|
116
|
-
|
117
|
-
|
268
|
+
update_provisioned_throughput(
|
269
|
+
full_table_name, table_details.provisioned_throughput.read_capacity_units,
|
270
|
+
table_details.provisioned_throughput.write_capacity_units
|
271
|
+
)
|
118
272
|
|
119
|
-
|
120
|
-
|
273
|
+
# Wait up to 30 minutes.
|
274
|
+
wait_until_table_is_available(full_table_name, 5, 360)
|
121
275
|
end
|
122
276
|
|
123
|
-
if
|
124
|
-
|
125
|
-
|
277
|
+
# Determine if there are any Global Secondary Indexes to be deleted.
|
278
|
+
global_secondary_indexes_to_delete = build_global_index_deletes_array(table_details.global_secondary_indexes)
|
279
|
+
unless global_secondary_indexes_to_delete.empty?
|
280
|
+
# Loop through each index to delete, and send the update one at a time (restriction on the API).
|
281
|
+
until global_secondary_indexes_to_delete.empty?
|
282
|
+
delete_global_index(full_table_name, global_secondary_indexes_to_delete.pop)
|
283
|
+
|
284
|
+
# Wait up to 2 hours.
|
285
|
+
wait_until_table_is_available(full_table_name, 10, 720)
|
286
|
+
end
|
287
|
+
end
|
288
|
+
|
289
|
+
# Determine if there are updates to the Provisioned Throughput of the Global Secondary Indexes
|
290
|
+
global_secondary_index_updates = build_global_index_updates_array(table_details.global_secondary_indexes)
|
291
|
+
unless global_secondary_index_updates.empty?
|
292
|
+
update_global_indexes(full_table_name, global_secondary_index_updates)
|
293
|
+
|
294
|
+
# Wait up to 4 hours.
|
295
|
+
wait_until_table_is_available(full_table_name, 10, 1_440)
|
126
296
|
end
|
297
|
+
|
298
|
+
# Determine if there are new Global Secondary Indexes to be created.
|
299
|
+
new_global_secondary_indexes = build_new_global_indexes_array(table_details.global_secondary_indexes)
|
300
|
+
return if new_global_secondary_indexes.empty?
|
301
|
+
|
302
|
+
create_global_indexes(full_table_name, new_global_secondary_indexes)
|
303
|
+
|
304
|
+
# Wait up to 4 hours.
|
305
|
+
wait_until_table_is_available(full_table_name, 10, 1_440)
|
127
306
|
end
|
128
307
|
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
308
|
+
def update_provisioned_throughput(full_table_name, old_read, old_write)
|
309
|
+
puts "Updating Provisioned Throughtput for #{full_table_name}"
|
310
|
+
puts "Setting Read Capacity Units From: #{old_read} To: #{@read_capacity_units}"
|
311
|
+
puts "Setting Write Capacty Units From: #{old_write} To: #{@write_capacity_units}"
|
312
|
+
@client.update_table(
|
313
|
+
table_name: full_table_name,
|
314
|
+
provisioned_throughput: { read_capacity_units: @read_capacity_units,
|
315
|
+
write_capacity_units: @write_capacity_units }
|
316
|
+
)
|
317
|
+
end
|
318
|
+
|
319
|
+
def build_global_index_deletes_array(current_global_indexes)
|
320
|
+
return [] if current_global_indexes.empty?
|
321
|
+
current_index_names = current_global_indexes.map(&:index_name)
|
322
|
+
target_index_names = @global_secondary_indexes.map { |gsindex| gsindex[:index_name] }
|
323
|
+
current_index_names - target_index_names
|
324
|
+
end
|
325
|
+
|
326
|
+
def delete_global_index(full_table_name, index_to_delete)
|
327
|
+
puts "Deleting Global Secondary Index: #{index_to_delete} from Table: #{full_table_name}"
|
328
|
+
@client.update_table(
|
329
|
+
table_name: full_table_name,
|
330
|
+
global_secondary_index_updates: [{ delete: { index_name: index_to_delete } }]
|
331
|
+
)
|
332
|
+
end
|
333
|
+
|
334
|
+
# Looks through the list current of Global Secondary Indexes and builds an array if the Provisioned Throughput
|
335
|
+
# in the intended Indexes are higher than the current Indexes.
|
336
|
+
def build_global_index_updates_array(current_global_indexes)
|
337
|
+
indexes_to_update = []
|
338
|
+
return indexes_to_update if current_global_indexes.empty?
|
339
|
+
current_global_indexes.each do |current_index|
|
340
|
+
@global_secondary_indexes.each do |target_index|
|
341
|
+
# Find the same named index
|
342
|
+
next unless target_index[:index_name] == current_index[:index_name]
|
343
|
+
# Skip unless a different ProvisionedThroughput is specified
|
344
|
+
break unless (target_index[:provisioned_throughput][:read_capacity_units] !=
|
345
|
+
current_index.provisioned_throughput.read_capacity_units) ||
|
346
|
+
(target_index[:provisioned_throughput][:write_capacity_units] !=
|
347
|
+
current_index.provisioned_throughput.write_capacity_units)
|
348
|
+
indexes_to_update << { index_name: target_index[:index_name],
|
349
|
+
provisioned_throughput: target_index[:provisioned_throughput] }
|
350
|
+
end
|
351
|
+
end
|
352
|
+
puts indexes_to_update
|
353
|
+
indexes_to_update
|
354
|
+
end
|
355
|
+
|
356
|
+
def update_global_indexes(full_table_name, global_secondary_index_updates)
|
357
|
+
puts "Updating Global Indexes for Table: #{full_table_name}"
|
358
|
+
puts(
|
359
|
+
global_secondary_index_updates.map do |index|
|
360
|
+
"#{index[:index_name]} -\
|
361
|
+
\tRead: #{index[:provisioned_throughput][:read_capacity_units]},\
|
362
|
+
\tWrite: #{index[:provisioned_throughput][:write_capacity_units]}"
|
363
|
+
end
|
364
|
+
)
|
365
|
+
|
366
|
+
@client.update_table(
|
367
|
+
table_name: full_table_name,
|
368
|
+
global_secondary_index_updates: global_secondary_index_updates.map { |index| { update: index } }
|
369
|
+
)
|
370
|
+
end
|
371
|
+
|
372
|
+
def build_new_global_indexes_array(current_global_indexes)
|
373
|
+
return [] if !@global_secondary_indexes || @global_secondary_indexes.empty?
|
374
|
+
|
375
|
+
index_names_to_create = @global_secondary_indexes.map { |gsindex| gsindex[:index_name] } -
|
376
|
+
current_global_indexes.map(&:index_name)
|
377
|
+
|
378
|
+
@global_secondary_indexes.select do |gsindex|
|
379
|
+
index_names_to_create.include?(gsindex[:index_name])
|
380
|
+
end
|
381
|
+
end
|
382
|
+
|
383
|
+
def create_global_indexes(full_table_name, new_global_secondary_indexes)
|
384
|
+
puts "Creating new Global Indexes for Table: #{full_table_name}"
|
385
|
+
puts(new_global_secondary_indexes.map { |index| index[:index_name].to_s })
|
386
|
+
@client.update_table(
|
387
|
+
table_name: full_table_name,
|
388
|
+
global_secondary_index_updates: new_global_secondary_indexes.map { |index| { create: index } }
|
389
|
+
)
|
390
|
+
end
|
391
|
+
|
392
|
+
def create_table(full_table_name)
|
393
|
+
puts "Creating table #{full_table_name}..."
|
394
|
+
@client.create_table(
|
395
|
+
table_name: full_table_name, attribute_definitions: @attribute_definitions,
|
396
|
+
key_schema: @key_schema,
|
397
|
+
provisioned_throughput: { read_capacity_units: @read_capacity_units,
|
398
|
+
write_capacity_units: @write_capacity_units },
|
399
|
+
local_secondary_indexes: @local_secondary_indexes,
|
400
|
+
global_secondary_indexes: @global_secondary_indexes
|
401
|
+
)
|
402
|
+
# Wait 60 seconds because "DescribeTable uses an eventually consistent query"
|
403
|
+
puts 'Sleeping for 60 seconds...'
|
404
|
+
Kernel.sleep(60)
|
405
|
+
|
406
|
+
# Wait for up to 2m.
|
407
|
+
wait_until_table_is_available(full_table_name, 5, 24)
|
408
|
+
end
|
409
|
+
|
410
|
+
def delete_table(full_table_name)
|
411
|
+
puts "Trying to delete Table: #{full_table_name}"
|
412
|
+
table_details = retrieve_table_details(full_table_name)
|
136
413
|
if table_details.nil?
|
137
414
|
puts 'Table did not exist. Nothing to delete.'
|
138
415
|
else
|
139
|
-
|
140
|
-
|
416
|
+
# Wait up to 30m
|
417
|
+
wait_until_table_available(full_table_name, 5, 360) if table_details.table_status != 'ACTIVE'
|
418
|
+
@client.delete_table(table_name: full_table_name)
|
141
419
|
end
|
142
420
|
end
|
143
421
|
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
# *Arguments*
|
148
|
-
# [table_name] The dynamoDB table name to watch until it reaches an active status.
|
149
|
-
def wait_until_table_available(table_name)
|
150
|
-
max_attempts = 24
|
151
|
-
delay_between_attempts = 5
|
422
|
+
def retrieve_prefixed_tables(prefix)
|
423
|
+
retrieve_all_table_names.select { |name| name =~ /#{Regexp.quote(prefix)}[a-zA-Z0-9_\-.]*/ }
|
424
|
+
end
|
152
425
|
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
426
|
+
def retrieve_all_table_names
|
427
|
+
tables = []
|
428
|
+
response = nil
|
429
|
+
loop do
|
430
|
+
response =
|
431
|
+
if !response || response.last_evaluated_table_name.nil? || response.last_evaluated_table_name.empty?
|
432
|
+
@client.list_tables(limit: 100)
|
433
|
+
else
|
434
|
+
@client.list_tables(limit: 100, exclusive_start_table_name: response.last_evaluated_table_name)
|
162
435
|
end
|
436
|
+
tables.concat(response.table_names)
|
437
|
+
if response.table_names.empty? || response.last_evaluated_table_name.nil? ||
|
438
|
+
response.last_evaluated_table_name.empty?
|
439
|
+
return tables
|
163
440
|
end
|
164
|
-
rescue Aws::Waiters::Errors::TooManyAttemptsError => e
|
165
|
-
puts "Table #{table_name} did not become available after #{e.attempts} attempts. " \
|
166
|
-
'Try again later or inspect the AWS console.'
|
167
441
|
end
|
168
442
|
end
|
169
443
|
|
170
|
-
def
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
rescue Aws::DynamoDB::Errors::ResourceNotFoundException
|
175
|
-
puts "Table #{table_name} does not exist."
|
444
|
+
def defined_in_attribute_definitions_guard(key_schema)
|
445
|
+
if Set.new(key_schema.map { |item| item[:attribute_name] })
|
446
|
+
.subset?(Set.new(@attribute_definitions.map { |item| item[:attribute_name] }))
|
447
|
+
return true
|
176
448
|
end
|
177
|
-
|
449
|
+
raise ArgumentError, 'Not all keys in the key_schema are defined in the attribute_definitions!'
|
178
450
|
end
|
179
451
|
|
180
|
-
|
452
|
+
def provisioned_throughput_guard(provisioned_throughput)
|
453
|
+
if provisioned_throughput[:read_capacity_units] >= 1 && provisioned_throughput[:write_capacity_units] >= 1 &&
|
454
|
+
provisioned_throughput[:read_capacity_units].is_a?(Integer) &&
|
455
|
+
provisioned_throughput[:write_capacity_units].is_a?(Integer)
|
456
|
+
return true
|
457
|
+
end
|
458
|
+
raise ArgumentError, 'Read and Write Capacity for all ProvisionedThroughput must be positive integers.'
|
459
|
+
end
|
181
460
|
end
|
182
461
|
end
|