terrafying 0.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/bin/terrafying +6 -0
- data/lib/hash/deep_merge.rb +6 -0
- data/lib/terrafying/aws.rb +542 -0
- data/lib/terrafying/cli.rb +73 -0
- data/lib/terrafying/dynamodb/config.rb +17 -0
- data/lib/terrafying/dynamodb/named_lock.rb +126 -0
- data/lib/terrafying/dynamodb/state.rb +92 -0
- data/lib/terrafying/dynamodb.rb +31 -0
- data/lib/terrafying/generator.rb +166 -0
- data/lib/terrafying/lock.rb +25 -0
- data/lib/terrafying/state.rb +51 -0
- data/lib/terrafying/util.rb +32 -0
- data/lib/terrafying/version.rb +4 -0
- data/lib/terrafying.rb +270 -0
- metadata +185 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 677e99cbba54dcc89984681f4419031a618e7d8e569c7e3a0286ce46a18aa914
|
4
|
+
data.tar.gz: c1ac49346d7e00dbae316c945c0fe91116fb6598f58caf8a018927c54402843c
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 6137818b31a5b6ef8b753d3b91cd663c3068f27a142214c60ac7e66da4a61e0eed446ac2815cd6b3023b88cf59c402760ba0cd002a1a9d7136c1424ac4935d00
|
7
|
+
data.tar.gz: 01b7d784d0fdcf6aef53dea73634d44213e8115e30402492434f36ca8b360cf0ca5f2fefc5fbc2e2d0081672dce0c8356cfb5212a75632d4027457de86d32387
|
data/bin/terrafying
ADDED
@@ -0,0 +1,542 @@
|
|
1
|
+
require 'aws-sdk'
|
2
|
+
|
3
|
+
Aws.use_bundled_cert!
|
4
|
+
|
5
|
+
module Terrafying
|
6
|
+
module Aws
|
7
|
+
class Ops
|
8
|
+
|
9
|
+
attr_reader :region
|
10
|
+
|
11
|
+
def initialize(region)
|
12
|
+
full_jitter = lambda { |c| Kernel.sleep(Kernel.rand(0..[2, (0.3 * 2**c.retries)].min)) }
|
13
|
+
|
14
|
+
::Aws.config.update({
|
15
|
+
region: region,
|
16
|
+
retry_limit: 5,
|
17
|
+
retry_backoff: full_jitter
|
18
|
+
})
|
19
|
+
@autoscaling_client = ::Aws::AutoScaling::Client.new
|
20
|
+
@ec2_resource = ::Aws::EC2::Resource.new
|
21
|
+
@ec2_client = ::Aws::EC2::Client.new
|
22
|
+
@elb_client = ::Aws::ElasticLoadBalancingV2::Client.new
|
23
|
+
@route53_client = ::Aws::Route53::Client.new
|
24
|
+
@s3_client = ::Aws::S3::Client.new
|
25
|
+
@sts_client = ::Aws::STS::Client.new
|
26
|
+
|
27
|
+
@region = region
|
28
|
+
end
|
29
|
+
|
30
|
+
def account_id
|
31
|
+
@account_id_cache ||= @sts_client.get_caller_identity.account
|
32
|
+
end
|
33
|
+
|
34
|
+
def security_group(name)
|
35
|
+
@security_groups ||= {}
|
36
|
+
@security_groups[name] ||=
|
37
|
+
begin
|
38
|
+
STDERR.puts "Looking up id of security group '#{name}'"
|
39
|
+
groups = @ec2_resource.security_groups(
|
40
|
+
{
|
41
|
+
filters: [
|
42
|
+
{
|
43
|
+
name: "group-name",
|
44
|
+
values: [name],
|
45
|
+
},
|
46
|
+
],
|
47
|
+
}).limit(2)
|
48
|
+
case
|
49
|
+
when groups.count == 1
|
50
|
+
groups.first.id
|
51
|
+
when groups.count < 1
|
52
|
+
raise "No security group with name '#{name}' was found."
|
53
|
+
when groups.count > 1
|
54
|
+
raise "More than one security group with name '#{name}' found: " + groups.join(', ')
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
def security_group_in_vpc(vpc_id, name)
|
60
|
+
@security_groups_in_vpc ||= {}
|
61
|
+
@security_groups_in_vpc[vpc_id + name] ||=
|
62
|
+
begin
|
63
|
+
STDERR.puts "Looking up id of security group '#{name}'"
|
64
|
+
groups = @ec2_resource.security_groups(
|
65
|
+
{
|
66
|
+
filters: [
|
67
|
+
{
|
68
|
+
name: "group-name",
|
69
|
+
values: [name],
|
70
|
+
},
|
71
|
+
{
|
72
|
+
name: "vpc-id",
|
73
|
+
values: [vpc_id],
|
74
|
+
}
|
75
|
+
],
|
76
|
+
}).limit(2)
|
77
|
+
case
|
78
|
+
when groups.count == 1
|
79
|
+
groups.first.id
|
80
|
+
when groups.count < 1
|
81
|
+
raise "No security group with name '#{name}' was found."
|
82
|
+
when groups.count > 1
|
83
|
+
raise "More than one security group with name '#{name}' found: " + groups.join(', ')
|
84
|
+
end
|
85
|
+
end
|
86
|
+
end
|
87
|
+
|
88
|
+
def security_group_by_tags(tags)
|
89
|
+
@security_groups_by_tags ||= {}
|
90
|
+
@security_groups_by_tags[tags] ||=
|
91
|
+
begin
|
92
|
+
groups = @ec2_client.describe_security_groups(
|
93
|
+
{
|
94
|
+
filters: [
|
95
|
+
{
|
96
|
+
name: "tag-key",
|
97
|
+
values: tags.keys,
|
98
|
+
},
|
99
|
+
{
|
100
|
+
name: "tag-value",
|
101
|
+
values: tags.values
|
102
|
+
}
|
103
|
+
]
|
104
|
+
},
|
105
|
+
).security_groups
|
106
|
+
case
|
107
|
+
when groups.count == 1
|
108
|
+
groups.first.id
|
109
|
+
when groups.count < 1
|
110
|
+
raise "No security group with tags '#{tags}' was found."
|
111
|
+
when groups.count > 1
|
112
|
+
raise "More than one security group with tags '#{tags}' found: " + groups.join(', ')
|
113
|
+
end
|
114
|
+
end
|
115
|
+
end
|
116
|
+
|
117
|
+
def instance_profile(name)
|
118
|
+
@instance_profiles ||= {}
|
119
|
+
@instance_profiles[name] ||=
|
120
|
+
begin
|
121
|
+
resource = ::Aws::IAM::Resource.new
|
122
|
+
STDERR.puts "Looking up id of instance profile '#{name}'"
|
123
|
+
# unfortunately amazon don't let us filter for profiles using
|
124
|
+
# a name filter, for now we have enumerate and filter manually
|
125
|
+
coll = resource.instance_profiles
|
126
|
+
profiles = []
|
127
|
+
profiles = coll.select {|p| p.instance_profile_name =~ /#{name}/}
|
128
|
+
|
129
|
+
case
|
130
|
+
when profiles.count == 1
|
131
|
+
profiles.first.instance_profile_id
|
132
|
+
when profiles.count < 1
|
133
|
+
raise "No instance profile with name '#{name}' was found."
|
134
|
+
when profiles.count > 1
|
135
|
+
raise "More than one instance profile with name '#{name}' found: " + profiles.join(', ')
|
136
|
+
end
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
140
|
+
def route_table_for_subnet(subnet_id)
|
141
|
+
@route_table_for_subnet ||= {}
|
142
|
+
@route_table_for_subnet[subnet_id] ||=
|
143
|
+
begin
|
144
|
+
resp = @ec2_client.describe_route_tables(
|
145
|
+
{
|
146
|
+
filters: [
|
147
|
+
{ name: "association.subnet-id", values: [ subnet_id ] },
|
148
|
+
],
|
149
|
+
})
|
150
|
+
|
151
|
+
route_tables = resp.route_tables
|
152
|
+
|
153
|
+
case
|
154
|
+
when route_tables.count == 1
|
155
|
+
route_tables.first
|
156
|
+
when route_tables.count < 1
|
157
|
+
raise "No route table for subnet '#{subnet_id}' was found."
|
158
|
+
when profiles.count > 1
|
159
|
+
raise "More than route table for subnet '#{subnet_id}' found: " + route_tables.join(', ')
|
160
|
+
end
|
161
|
+
end
|
162
|
+
end
|
163
|
+
|
164
|
+
def route_table_for_vpc(vpc_id)
|
165
|
+
@route_table_for_vpc ||= {}
|
166
|
+
@route_table_for_vpc[vpc_id] ||=
|
167
|
+
begin
|
168
|
+
resp = @ec2_client.describe_route_tables(
|
169
|
+
{
|
170
|
+
filters: [
|
171
|
+
{ name: "association.main", values: [ "true" ] },
|
172
|
+
{ name: "vpc-id", values: [ vpc_id ] },
|
173
|
+
],
|
174
|
+
})
|
175
|
+
|
176
|
+
route_tables = resp.route_tables
|
177
|
+
|
178
|
+
case
|
179
|
+
when route_tables.count == 1
|
180
|
+
route_tables.first
|
181
|
+
when route_tables.count < 1
|
182
|
+
raise "No route table for vpc '#{vpc_id}' was found."
|
183
|
+
when profiles.count > 1
|
184
|
+
raise "More than route table for vpc '#{vpc_id}' found: " + route_tables.join(', ')
|
185
|
+
end
|
186
|
+
end
|
187
|
+
end
|
188
|
+
|
189
|
+
def security_groups(*names)
|
190
|
+
names.map{|n| security_group(n)}
|
191
|
+
end
|
192
|
+
|
193
|
+
def security_groups_in_vpc(vpc_id, *names)
|
194
|
+
names.map{|n| security_group_in_vpc(vpc_id, n)}
|
195
|
+
end
|
196
|
+
|
197
|
+
def subnet(name)
|
198
|
+
@subnets ||= {}
|
199
|
+
@subnets[name] ||=
|
200
|
+
begin
|
201
|
+
STDERR.puts "Looking up id of subnet '#{name}'"
|
202
|
+
subnets = @ec2_resource.subnets(
|
203
|
+
{
|
204
|
+
filters: [
|
205
|
+
{
|
206
|
+
name: "tag:Name",
|
207
|
+
values: [name],
|
208
|
+
},
|
209
|
+
],
|
210
|
+
}).limit(2)
|
211
|
+
case
|
212
|
+
when subnets.count == 1
|
213
|
+
subnets.first.id
|
214
|
+
when subnets.count < 1
|
215
|
+
raise "No subnet with name '#{name}' was found."
|
216
|
+
when subnets.count > 1
|
217
|
+
raise "More than one subnet with this name '#{name}' found : " + subnets.join(', ')
|
218
|
+
end
|
219
|
+
end
|
220
|
+
end
|
221
|
+
|
222
|
+
def subnet_by_id(id)
|
223
|
+
@subnets_by_id ||= {}
|
224
|
+
@subnets_by_id[id] ||=
|
225
|
+
begin
|
226
|
+
resp = @ec2_client.describe_subnets(
|
227
|
+
{
|
228
|
+
subnet_ids: [id],
|
229
|
+
})
|
230
|
+
subnets = resp.subnets
|
231
|
+
case
|
232
|
+
when subnets.count == 1
|
233
|
+
subnets.first
|
234
|
+
when subnets.count < 1
|
235
|
+
raise "No subnet with id '#{id}' was found."
|
236
|
+
when subnets.count > 1
|
237
|
+
raise "More than one subnet with this id '#{id}' found : " + subnets.join(', ')
|
238
|
+
end
|
239
|
+
end
|
240
|
+
end
|
241
|
+
|
242
|
+
def subnets(*names)
|
243
|
+
names.map{|n| subnet(n)}
|
244
|
+
end
|
245
|
+
|
246
|
+
def subnets_for_vpc(vpc_id)
|
247
|
+
@subnets_for_vpc ||= {}
|
248
|
+
@subnets_for_vpc[vpc_id] ||=
|
249
|
+
begin
|
250
|
+
resp = @ec2_client.describe_subnets(
|
251
|
+
{
|
252
|
+
filters: [
|
253
|
+
{ name: "vpc-id", values: [ vpc_id ] },
|
254
|
+
],
|
255
|
+
})
|
256
|
+
|
257
|
+
subnets = resp.subnets
|
258
|
+
|
259
|
+
case
|
260
|
+
when subnets.count >= 1
|
261
|
+
subnets
|
262
|
+
when subnets.count < 1
|
263
|
+
raise "No subnets found for '#{vpc_id}'."
|
264
|
+
end
|
265
|
+
end
|
266
|
+
end
|
267
|
+
|
268
|
+
def ami(name, owners=["self"])
|
269
|
+
@ami ||= {}
|
270
|
+
@ami[name] ||=
|
271
|
+
begin
|
272
|
+
STDERR.puts "looking for an image with prefix '#{name}'"
|
273
|
+
resp = @ec2_client.describe_images({owners: owners})
|
274
|
+
if resp.images.count < 1
|
275
|
+
raise "no images were found"
|
276
|
+
end
|
277
|
+
m = resp.images.select { |a| /^#{name}/.match(a.name) }
|
278
|
+
if m.count == 0
|
279
|
+
raise "no image with name '#{name}' was found"
|
280
|
+
end
|
281
|
+
m.sort { |x,y| y.creation_date <=> x.creation_date }.shift.image_id
|
282
|
+
end
|
283
|
+
end
|
284
|
+
|
285
|
+
def availability_zones
|
286
|
+
@availability_zones ||=
|
287
|
+
begin
|
288
|
+
STDERR.puts "looking for AZs in the current region"
|
289
|
+
resp = @ec2_client.describe_availability_zones({})
|
290
|
+
resp.availability_zones.map { |zone|
|
291
|
+
zone.zone_name
|
292
|
+
}
|
293
|
+
end
|
294
|
+
end
|
295
|
+
|
296
|
+
def vpc(name)
|
297
|
+
@vpcs ||= {}
|
298
|
+
@vpcs[name] ||=
|
299
|
+
begin
|
300
|
+
STDERR.puts "looking for a VPC with name '#{name}'"
|
301
|
+
resp = @ec2_client.describe_vpcs({})
|
302
|
+
matching_vpcs = resp.vpcs.select { |vpc|
|
303
|
+
name_tag = vpc.tags.select { |tag| tag.key == "Name" }.first
|
304
|
+
name_tag && name_tag.value == name
|
305
|
+
}
|
306
|
+
case
|
307
|
+
when matching_vpcs.count == 1
|
308
|
+
matching_vpcs.first
|
309
|
+
when matching_vpcs.count < 1
|
310
|
+
raise "No VPC with name '#{name}' was found."
|
311
|
+
when matching_vpcs.count > 1
|
312
|
+
raise "More than one VPC with name '#{name}' was found: " + matching_vpcs.join(', ')
|
313
|
+
end
|
314
|
+
end
|
315
|
+
end
|
316
|
+
|
317
|
+
def route_table(name)
|
318
|
+
@route_tables ||= {}
|
319
|
+
@route_tables[name] ||=
|
320
|
+
begin
|
321
|
+
STDERR.puts "looking for a route table with name '#{name}'"
|
322
|
+
route_tables = @ec2_client.describe_route_tables(
|
323
|
+
{
|
324
|
+
filters: [
|
325
|
+
{
|
326
|
+
name: "tag:Name",
|
327
|
+
values: [name],
|
328
|
+
},
|
329
|
+
],
|
330
|
+
}
|
331
|
+
).route_tables
|
332
|
+
case
|
333
|
+
when route_tables.count == 1
|
334
|
+
route_tables.first.route_table_id
|
335
|
+
when route_tables.count < 1
|
336
|
+
raise "No route table with name '#{name}' was found."
|
337
|
+
when route_tables.count > 1
|
338
|
+
raise "More than one route table with name '#{name}' was found: " + route_tables.join(', ')
|
339
|
+
end
|
340
|
+
end
|
341
|
+
end
|
342
|
+
|
343
|
+
def elastic_ip(alloc_id)
|
344
|
+
@ips ||= {}
|
345
|
+
@ips[alloc_id] ||=
|
346
|
+
begin
|
347
|
+
STDERR.puts "looking for an elastic ip with allocation_id '#{alloc_id}'"
|
348
|
+
ips = @ec2_client.describe_addresses(
|
349
|
+
{
|
350
|
+
filters: [
|
351
|
+
{
|
352
|
+
name: "allocation-id",
|
353
|
+
values: [alloc_id],
|
354
|
+
},
|
355
|
+
],
|
356
|
+
}
|
357
|
+
).addresses
|
358
|
+
case
|
359
|
+
when ips.count == 1
|
360
|
+
ips.first
|
361
|
+
when ips.count < 1
|
362
|
+
raise "No elastic ip with allocation_id '#{alloc_id}' was found."
|
363
|
+
when ips.count > 1
|
364
|
+
raise "More than one elastic ip with allocation_id '#{alloc_id}' was found: " + ips.join(', ')
|
365
|
+
end
|
366
|
+
end
|
367
|
+
end
|
368
|
+
|
369
|
+
def hosted_zone(fqdn)
|
370
|
+
@hosted_zones ||= {}
|
371
|
+
@hosted_zones[fqdn] ||=
|
372
|
+
begin
|
373
|
+
STDERR.puts "looking for a hosted zone with fqdn '#{fqdn}'"
|
374
|
+
hosted_zones = @route53_client.list_hosted_zones_by_name({ dns_name: fqdn }).hosted_zones.select { |zone|
|
375
|
+
zone.name == "#{fqdn}."
|
376
|
+
}
|
377
|
+
case
|
378
|
+
when hosted_zones.count == 1
|
379
|
+
hosted_zones.first
|
380
|
+
when hosted_zones.count < 1
|
381
|
+
raise "No hosted zone with fqdn '#{fqdn}' was found."
|
382
|
+
when hosted_zones.count > 1
|
383
|
+
raise "More than one hosted zone with name '#{fqdn}' was found: " + hosted_zones.join(', ')
|
384
|
+
end
|
385
|
+
end
|
386
|
+
end
|
387
|
+
|
388
|
+
def hosted_zone_by_tag(tag)
|
389
|
+
@hosted_zones ||= {}
|
390
|
+
@hosted_zones[tag] ||=
|
391
|
+
begin
|
392
|
+
STDERR.puts "looking for a hosted zone with tag '#{tag}'"
|
393
|
+
@aws_hosted_zones ||= @route53_client.list_hosted_zones.hosted_zones.map do |zone|
|
394
|
+
{
|
395
|
+
zone: zone,
|
396
|
+
tags: @route53_client.list_tags_for_resource({resource_type: "hostedzone", resource_id: zone.id.split('/')[2]}).resource_tag_set.tags
|
397
|
+
}
|
398
|
+
end
|
399
|
+
|
400
|
+
hosted_zones = @aws_hosted_zones.select do |z|
|
401
|
+
z[:tags].any? do |aws_tag|
|
402
|
+
tag.any? { |k, v| aws_tag.key = String(k) && aws_tag.value == v }
|
403
|
+
end
|
404
|
+
end
|
405
|
+
|
406
|
+
case
|
407
|
+
when hosted_zones.count == 1
|
408
|
+
hosted_zones.first[:zone]
|
409
|
+
when hosted_zones.count < 1
|
410
|
+
raise "No hosted zone with tag '#{tag}' was found."
|
411
|
+
when hosted_zones.count > 1
|
412
|
+
raise "More than one hosted zone with tag '#{tag}' was found: " + hosted_zones.join(', ')
|
413
|
+
end
|
414
|
+
end
|
415
|
+
end
|
416
|
+
|
417
|
+
def s3_object(bucket, key)
|
418
|
+
@s3_objects ||= {}
|
419
|
+
@s3_objects["#{bucket}-#{key}"] ||=
|
420
|
+
begin
|
421
|
+
resp = @s3_client.get_object({ bucket: bucket, key: key })
|
422
|
+
resp.body.read
|
423
|
+
end
|
424
|
+
end
|
425
|
+
|
426
|
+
def list_objects(bucket)
|
427
|
+
@list_objects ||= {}
|
428
|
+
@list_objects[bucket] ||=
|
429
|
+
begin
|
430
|
+
resp = @s3_client.list_objects_v2({ bucket: bucket })
|
431
|
+
resp.contents
|
432
|
+
end
|
433
|
+
end
|
434
|
+
|
435
|
+
def endpoint_service_by_name(service_name)
|
436
|
+
@endpoint_service ||= {}
|
437
|
+
@endpoint_service[service_name] ||=
|
438
|
+
begin
|
439
|
+
resp = @ec2_client.describe_vpc_endpoint_service_configurations(
|
440
|
+
{
|
441
|
+
filters: [
|
442
|
+
{
|
443
|
+
name: "service-name",
|
444
|
+
values: [service_name],
|
445
|
+
},
|
446
|
+
],
|
447
|
+
}
|
448
|
+
)
|
449
|
+
|
450
|
+
endpoint_services = resp.service_configurations
|
451
|
+
case
|
452
|
+
when endpoint_services.count == 1
|
453
|
+
endpoint_services.first
|
454
|
+
when endpoint_services.count < 1
|
455
|
+
raise "No endpoint service with name '#{service_name}' was found."
|
456
|
+
when endpoint_services.count > 1
|
457
|
+
raise "More than one endpoint service with name '#{service_name}' was found: " + endpoint_services.join(', ')
|
458
|
+
end
|
459
|
+
end
|
460
|
+
end
|
461
|
+
|
462
|
+
def endpoint_service_by_lb_arn(arn)
|
463
|
+
@endpoint_services_by_lb_arn ||= {}
|
464
|
+
@endpoint_services_by_lb_arn[arn] ||=
|
465
|
+
begin
|
466
|
+
resp = @ec2_client.describe_vpc_endpoint_service_configurations
|
467
|
+
|
468
|
+
services = resp.service_configurations.select { |service|
|
469
|
+
service.network_load_balancer_arns.include?(arn)
|
470
|
+
}
|
471
|
+
|
472
|
+
case
|
473
|
+
when services.count == 1
|
474
|
+
services.first
|
475
|
+
when services.count < 1
|
476
|
+
raise "No endpoint service with lb arn '#{arn}' was found."
|
477
|
+
when services.count > 1
|
478
|
+
raise "More than one endpoint service with lb arn '#{arn}' was found: " + services.join(', ')
|
479
|
+
end
|
480
|
+
end
|
481
|
+
end
|
482
|
+
|
483
|
+
def lb_by_name(name)
|
484
|
+
@lbs ||= {}
|
485
|
+
@lbs[name] ||=
|
486
|
+
begin
|
487
|
+
load_balancers = @elb_client.describe_load_balancers({ names: [name] }).load_balancers
|
488
|
+
|
489
|
+
case
|
490
|
+
when load_balancers.count == 1
|
491
|
+
load_balancers.first
|
492
|
+
when load_balancers.count < 1
|
493
|
+
raise "No load balancer with name '#{name}' was found."
|
494
|
+
when load_balancers.count > 1
|
495
|
+
raise "More than one load balancer with name '#{name}' was found: " + load_balancers.join(', ')
|
496
|
+
end
|
497
|
+
end
|
498
|
+
end
|
499
|
+
|
500
|
+
def target_groups_by_lb(arn)
|
501
|
+
@target_groups ||= {}
|
502
|
+
@target_groups[arn] ||=
|
503
|
+
begin
|
504
|
+
resp = @elb_client.describe_target_groups(
|
505
|
+
{
|
506
|
+
load_balancer_arn: arn,
|
507
|
+
}
|
508
|
+
)
|
509
|
+
|
510
|
+
resp.target_groups
|
511
|
+
end
|
512
|
+
end
|
513
|
+
|
514
|
+
def asgs_by_tags(expectedTags = {})
|
515
|
+
asgs = []
|
516
|
+
next_token = nil
|
517
|
+
|
518
|
+
loop do
|
519
|
+
resp = @autoscaling_client.describe_auto_scaling_groups({ next_token: next_token })
|
520
|
+
|
521
|
+
asgs = asgs + resp.auto_scaling_groups.select { |asg|
|
522
|
+
matches = asg.tags.select { |tag|
|
523
|
+
expectedTags[tag.key.to_sym] == tag.value ||
|
524
|
+
expectedTags[tag.key] == tag.value
|
525
|
+
}
|
526
|
+
|
527
|
+
matches.count == expectedTags.count
|
528
|
+
}
|
529
|
+
|
530
|
+
if resp.next_token
|
531
|
+
next_token = resp.next_token
|
532
|
+
else
|
533
|
+
break
|
534
|
+
end
|
535
|
+
end
|
536
|
+
|
537
|
+
asgs
|
538
|
+
end
|
539
|
+
end
|
540
|
+
|
541
|
+
end
|
542
|
+
end
|
@@ -0,0 +1,73 @@
|
|
1
|
+
require 'thor'
|
2
|
+
|
3
|
+
module Terrafying
|
4
|
+
class Cli < Thor
|
5
|
+
class_option :lock_timeout, :type => :string, :default => nil
|
6
|
+
class_option :no_lock, :type => :boolean, :default => false
|
7
|
+
class_option :keep, :type => :boolean, :default => false
|
8
|
+
class_option :target, :type => :string, :default => nil
|
9
|
+
class_option :scope, :type => :string, :default => nil
|
10
|
+
class_option :dynamodb, :type => :boolean, :default => true
|
11
|
+
|
12
|
+
desc "list PATH", "List resources defined"
|
13
|
+
def list(path)
|
14
|
+
puts "Defined resources:\n\n"
|
15
|
+
Config.new(path, options).list.each do |name|
|
16
|
+
puts "#{name}"
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
desc "plan PATH", "Show execution plan"
|
21
|
+
def plan(path)
|
22
|
+
exit Config.new(path, options).plan
|
23
|
+
end
|
24
|
+
|
25
|
+
desc "graph PATH", "Show execution graph"
|
26
|
+
def graph(path)
|
27
|
+
exit Config.new(path, options).graph
|
28
|
+
end
|
29
|
+
|
30
|
+
desc "validate PATH", "Validate the generated Terraform"
|
31
|
+
def validate(path)
|
32
|
+
exit Config.new(path, options).validate
|
33
|
+
end
|
34
|
+
|
35
|
+
desc "apply PATH", "Apply changes to resources"
|
36
|
+
option :force, :aliases => ['f'], :type => :boolean, :desc => "Forcefully remove any pending locks"
|
37
|
+
def apply(path)
|
38
|
+
exit Config.new(path, options).apply
|
39
|
+
end
|
40
|
+
|
41
|
+
desc "destroy PATH", "Destroy resources"
|
42
|
+
option :force, :aliases => ['f'], :type => :boolean, :desc => "Forcefully remove any pending locks"
|
43
|
+
def destroy(path)
|
44
|
+
exit Config.new(path, options).destroy
|
45
|
+
end
|
46
|
+
|
47
|
+
desc "json PATH", "Show terraform JSON"
|
48
|
+
def json(path)
|
49
|
+
puts(Config.new(path, options).json)
|
50
|
+
end
|
51
|
+
|
52
|
+
desc "show-state PATH", "Show state"
|
53
|
+
def show_state(path)
|
54
|
+
puts(Config.new(path, options).show_state)
|
55
|
+
end
|
56
|
+
|
57
|
+
desc "use-remote-state PATH", "Migrate to using remote state storage"
|
58
|
+
def use_remote_state(path)
|
59
|
+
puts(Config.new(path, options).use_remote_state)
|
60
|
+
end
|
61
|
+
|
62
|
+
desc "use-local-state PATH", "Migrate to using local state storage"
|
63
|
+
def use_local_state(path)
|
64
|
+
puts(Config.new(path, options).use_local_state)
|
65
|
+
end
|
66
|
+
|
67
|
+
desc "import PATH ADDR ID", "Import existing infrastructure into your Terraform state"
|
68
|
+
def import(path, addr, id)
|
69
|
+
exit Config.new(path, options).import(addr, id)
|
70
|
+
end
|
71
|
+
|
72
|
+
end
|
73
|
+
end
|
@@ -0,0 +1,17 @@
|
|
1
|
+
module Terrafying
|
2
|
+
module DynamoDb
|
3
|
+
class Config
|
4
|
+
attr_accessor :state_table, :lock_table
|
5
|
+
|
6
|
+
def initialize
|
7
|
+
@state_table = "terrafying-state"
|
8
|
+
@lock_table = "terrafying-state-lock"
|
9
|
+
end
|
10
|
+
end
|
11
|
+
|
12
|
+
def config
|
13
|
+
@config ||= Config.new
|
14
|
+
end
|
15
|
+
module_function :config
|
16
|
+
end
|
17
|
+
end
|