terrafying 1.2.0 → 1.2.1

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 733dfeec775f0dce838a28ec489e7f13e755b6c5
4
- data.tar.gz: 680c60f25757d7e23006664d441bd51526c56f1d
3
+ metadata.gz: 66fd929754eb383fcdae77c9a87e6ab11d2acfc5
4
+ data.tar.gz: 1f41bf191b22a90e98a5992f944174dee4407741
5
5
  SHA512:
6
- metadata.gz: a7a7ee78be1097f55a6982ccc48d466d742ac9aceccaf6f178b06c323737976452a6cbecef645af6cebddc30dd3d26e4344e5ddf3d19ee1a3372631c2ab4ece3
7
- data.tar.gz: cae1bb034cc772f1d338c34a0cff92663ee5f51abe6b83dbec0190b7e9c9d80d167ca0b79d4f58742a1ddf826655c5e115e0e75983afa7525f225d3a62691a9d
6
+ metadata.gz: f754d59bbd7f2eeb210b3737346ddf939ad36d3e3b543a2682f49604bcc73907b744489a69645d092435a6bfbba4ee3af8afa9c33a935212fbad4a8f9eed17ec
7
+ data.tar.gz: 4bb3d2a1458f81d5eedf02bec8927261ba47e8d1404c0c56f9704c84f71c460f083dc5737374e2a4ff3f9b55c139a44da706145ed87120ebce957cf44b31cc31
@@ -0,0 +1,6 @@
1
+ class ::Hash
2
+ def deep_merge(second)
3
+ merger = proc { |key, v1, v2| Hash === v1 && Hash === v2 ? v1.merge(v2, &merger) : v2 }
4
+ self.merge(second, &merger)
5
+ end
6
+ end
@@ -0,0 +1,257 @@
1
+ require 'fileutils'
2
+ require 'logger'
3
+ require 'pathname'
4
+ require 'securerandom'
5
+ require 'tempfile'
6
+
7
+ require 'hash/deep_merge'
8
+
9
+ require 'terrafying/aws'
10
+ require 'terrafying/cli'
11
+ require 'terrafying/generator'
12
+ require 'terrafying/lock'
13
+ require 'terrafying/version'
14
+ require 'terrafying/state'
15
+
16
+ module Terrafying
17
+
18
+ class Config
19
+
20
+ attr_reader :path, :scope
21
+
22
+ def initialize(path, options)
23
+ @path = File.expand_path(path)
24
+ @options = options
25
+ @scope = options[:scope] || scope_for_path(@path)
26
+
27
+ $stderr.puts "Scope: #{@scope}"
28
+
29
+ load(path)
30
+ end
31
+
32
+ def list
33
+ Terrafying::Generator.resource_names
34
+ end
35
+
36
+ def json
37
+ Terrafying::Generator.pretty_generate
38
+ end
39
+
40
+ def plan
41
+ exit_code = 1
42
+ with_config do
43
+ with_state(mode: :read) do
44
+ exit_code = exec_with_optional_target 'plan'
45
+ end
46
+ end
47
+ exit_code
48
+ end
49
+
50
+ def graph
51
+ exit_code = 1
52
+ with_config do
53
+ with_state(mode: :read) do
54
+ exit_code = exec_with_optional_target 'graph'
55
+ end
56
+ end
57
+ exit_code
58
+ end
59
+
60
+ def apply
61
+ exit_code = 1
62
+ with_config do
63
+ with_lock do
64
+ with_state(mode: :update) do
65
+ exit_code = exec_with_optional_target "apply -auto-approve -backup=- #{@dir}"
66
+ end
67
+ end
68
+ end
69
+ exit_code
70
+ end
71
+
72
+ def destroy
73
+ exit_code = 1
74
+ with_config do
75
+ with_lock do
76
+ with_state(mode: :update) do
77
+ exit_code = stream_command("terraform destroy -backup=- #{@dir}")
78
+ end
79
+ end
80
+ end
81
+ exit_code
82
+ end
83
+
84
+ def show_state
85
+ puts(State.store(self).get)
86
+ end
87
+
88
+ def use_remote_state
89
+ with_lock do
90
+ local = State.local(self)
91
+ state = local.get
92
+ if state
93
+ State.remote(self).put(state)
94
+ end
95
+ local.delete
96
+ end
97
+ end
98
+
99
+ def use_local_state
100
+ with_lock do
101
+ remote = State.remote(self)
102
+ state = remote.get
103
+ if state
104
+ State.local(self).put(state)
105
+ end
106
+ end
107
+ end
108
+
109
+ def import(addr, id)
110
+ exit_code = 1
111
+ with_config do
112
+ with_lock do
113
+ with_state(mode: :update) do
114
+ exit_code = exec_with_optional_target "import -backup=- #{@dir} #{addr} #{id}"
115
+ end
116
+ end
117
+ end
118
+ exit_code
119
+ end
120
+
121
+ private
122
+ def targets(options)
123
+ @options[:target].split(",").map {|target| "-target=#{target}"}.join(" ")
124
+ end
125
+
126
+ def exec_with_optional_target(command)
127
+ cmd = if @options[:target]
128
+ "terraform #{command} #{targets(@options)}"
129
+ else
130
+ "terraform #{command}"
131
+ end
132
+ stream_command(cmd)
133
+ end
134
+
135
+ def with_config(&block)
136
+ abort("***** ERROR: You must have terraform installed to run this gem *****") unless terraform_installed?
137
+ check_version
138
+ name = File.basename(@path, ".*")
139
+ dir = File.join(git_toplevel, 'tmp', SecureRandom.uuid)
140
+ terraform_files = File.join(git_toplevel, ".terraform/")
141
+ unless Dir.exists?(terraform_files)
142
+ abort("***** ERROR: No .terraform directory found. Please run 'terraform init' to install plugins *****")
143
+ end
144
+ FileUtils.mkdir_p(dir)
145
+ output_path = File.join(dir, name + ".tf.json")
146
+ FileUtils.cp_r(terraform_files, dir)
147
+ Dir.chdir(dir) do
148
+ begin
149
+ File.write(output_path, Terrafying::Generator.pretty_generate)
150
+ yield block
151
+ ensure
152
+ FileUtils.rm_rf(dir) unless @options[:keep]
153
+ end
154
+ end
155
+ end
156
+
157
+ def with_lock(&block)
158
+ lock_id = nil
159
+ begin
160
+ lock = if @options[:no_lock]
161
+ Locks.noop
162
+ else
163
+ Locks.dynamodb(scope)
164
+ end
165
+
166
+ lock_id = if @options[:force]
167
+ lock.steal
168
+ else
169
+ lock.acquire
170
+ end
171
+ yield block
172
+
173
+ # If block raises any exception we will still hold on to lock
174
+ # after process exits. This is actually what we want as
175
+ # terraform may have succeeded in updating some resources, but
176
+ # not others so we need to manually get into a consistent
177
+ # state and then re-run.
178
+ lock.release(lock_id)
179
+ end
180
+ end
181
+
182
+ def with_state(opts, &block)
183
+ if !@options[:dynamodb]
184
+ return yield(block)
185
+ end
186
+
187
+ store = State.store(self)
188
+
189
+ begin
190
+ state = store.get
191
+ File.write(State::STATE_FILENAME, state) if state
192
+ rescue => e
193
+ raise "Error retrieving state for config #{self}: #{e}"
194
+ end
195
+
196
+ yield block
197
+
198
+ begin
199
+ if opts[:mode] == :update
200
+ store.put(IO.read(State::STATE_FILENAME))
201
+ end
202
+ rescue => e
203
+ raise "Error updating state for config #{self}: #{e}"
204
+ end
205
+ end
206
+
207
+ def scope_for_path(path)
208
+ top_level_path = Pathname.new(git_toplevel)
209
+ Pathname.new(@path).relative_path_from(top_level_path).to_s
210
+ end
211
+
212
+ def git_toplevel
213
+ @top_level ||= begin
214
+ top_level = `git rev-parse --show-toplevel`
215
+ raise "Unable to find .git directory top level for '#{@path}'" if top_level.empty?
216
+ File.expand_path(top_level.chomp)
217
+ end
218
+ end
219
+
220
+ def check_version
221
+ if terraform_version != Terrafying::CLI_VERSION
222
+ abort("***** ERROR: You must have v#{Terrafying::CLI_VERSION} of terraform installed to run any command (you are running v#{terraform_version}) *****")
223
+ end
224
+ end
225
+
226
+ def terraform_installed?
227
+ which('terraform')
228
+ end
229
+
230
+ def terraform_version
231
+ `terraform -v`.split("\n").first.split("v").last
232
+ end
233
+
234
+ def stream_command(cmd)
235
+ IO.popen(cmd) do |io|
236
+ while (line = io.gets) do
237
+ puts line.gsub('\n', "\n").gsub('\\"', "\"")
238
+ end
239
+ end
240
+ return $?.exitstatus
241
+ end
242
+
243
+ # Cross-platform way of finding an executable in the $PATH.
244
+ #
245
+ # which('ruby') #=> /usr/bin/ruby
246
+ def which(cmd)
247
+ exts = ENV['PATHEXT'] ? ENV['PATHEXT'].split(';') : ['']
248
+ ENV['PATH'].split(File::PATH_SEPARATOR).each do |path|
249
+ exts.each { |ext|
250
+ exe = File.join(path, "#{cmd}#{ext}")
251
+ return exe if File.executable?(exe) && !File.directory?(exe)
252
+ }
253
+ end
254
+ return nil
255
+ end
256
+ end
257
+ end
@@ -0,0 +1,471 @@
1
+ require 'aws-sdk'
2
+
3
+ Aws.use_bundled_cert!
4
+
5
+ module Terrafying
6
+ module Aws
7
+ class Ops
8
+
9
+ attr_reader :region
10
+
11
+ def initialize(region)
12
+ ::Aws.config.update({
13
+ region: region
14
+ })
15
+ @ec2_resource = ::Aws::EC2::Resource.new
16
+ @ec2_client = ::Aws::EC2::Client.new
17
+ @elb_client = ::Aws::ElasticLoadBalancingV2::Client.new
18
+ @route53_client = ::Aws::Route53::Client.new
19
+ @s3_client = ::Aws::S3::Client.new
20
+ @sts_client = ::Aws::STS::Client.new
21
+
22
+ @region = region
23
+ end
24
+
25
+ def account_id
26
+ @account_id_cache ||= @sts_client.get_caller_identity.account
27
+ end
28
+
29
+ def security_group(name)
30
+ @security_groups ||= {}
31
+ @security_groups[name] ||=
32
+ begin
33
+ STDERR.puts "Looking up id of security group '#{name}'"
34
+ groups = @ec2_resource.security_groups(
35
+ {
36
+ filters: [
37
+ {
38
+ name: "group-name",
39
+ values: [name],
40
+ },
41
+ ],
42
+ }).limit(2)
43
+ case
44
+ when groups.count == 1
45
+ groups.first.id
46
+ when groups.count < 1
47
+ raise "No security group with name '#{name}' was found."
48
+ when groups.count > 1
49
+ raise "More than one security group with name '#{name}' found: " + groups.join(', ')
50
+ end
51
+ end
52
+ end
53
+
54
+ def security_group_by_tags(tags)
55
+ @security_groups_by_tags ||= {}
56
+ @security_groups_by_tags[tags] ||=
57
+ begin
58
+ groups = @ec2_client.describe_security_groups(
59
+ {
60
+ filters: [
61
+ {
62
+ name: "tag-key",
63
+ values: tags.keys,
64
+ },
65
+ {
66
+ name: "tag-value",
67
+ values: tags.values
68
+ }
69
+ ]
70
+ },
71
+ ).security_groups
72
+ case
73
+ when groups.count == 1
74
+ groups.first.id
75
+ when groups.count < 1
76
+ raise "No security group with tags '#{tags}' was found."
77
+ when groups.count > 1
78
+ raise "More than one security group with tags '#{tags}' found: " + groups.join(', ')
79
+ end
80
+ end
81
+ end
82
+
83
+ def instance_profile(name)
84
+ @instance_profiles ||= {}
85
+ @instance_profiles[name] ||=
86
+ begin
87
+ resource = ::Aws::IAM::Resource.new
88
+ STDERR.puts "Looking up id of instance profile '#{name}'"
89
+ # unfortunately amazon don't let us filter for profiles using
90
+ # a name filter, for now we have enumerate and filter manually
91
+ coll = resource.instance_profiles
92
+ profiles = []
93
+ profiles = coll.select {|p| p.instance_profile_name =~ /#{name}/}
94
+
95
+ case
96
+ when profiles.count == 1
97
+ profiles.first.instance_profile_id
98
+ when profiles.count < 1
99
+ raise "No instance profile with name '#{name}' was found."
100
+ when profiles.count > 1
101
+ raise "More than one instance profile with name '#{name}' found: " + profiles.join(', ')
102
+ end
103
+ end
104
+ end
105
+
106
+ def route_table_for_subnet(subnet_id)
107
+ @route_table_for_subnet ||= {}
108
+ @route_table_for_subnet[subnet_id] ||=
109
+ begin
110
+ resp = @ec2_client.describe_route_tables(
111
+ {
112
+ filters: [
113
+ { name: "association.subnet-id", values: [ subnet_id ] },
114
+ ],
115
+ })
116
+
117
+ route_tables = resp.route_tables
118
+
119
+ case
120
+ when route_tables.count == 1
121
+ route_tables.first
122
+ when route_tables.count < 1
123
+ raise "No route table for subnet '#{subnet_id}' was found."
124
+ when profiles.count > 1
125
+ raise "More than route table for subnet '#{subnet_id}' found: " + route_tables.join(', ')
126
+ end
127
+ end
128
+ end
129
+
130
+ def route_table_for_vpc(vpc_id)
131
+ @route_table_for_vpc ||= {}
132
+ @route_table_for_vpc[vpc_id] ||=
133
+ begin
134
+ resp = @ec2_client.describe_route_tables(
135
+ {
136
+ filters: [
137
+ { name: "association.main", values: [ "true" ] },
138
+ { name: "vpc-id", values: [ vpc_id ] },
139
+ ],
140
+ })
141
+
142
+ route_tables = resp.route_tables
143
+
144
+ case
145
+ when route_tables.count == 1
146
+ route_tables.first
147
+ when route_tables.count < 1
148
+ raise "No route table for vpc '#{vpc_id}' was found."
149
+ when profiles.count > 1
150
+ raise "More than route table for vpc '#{vpc_id}' found: " + route_tables.join(', ')
151
+ end
152
+ end
153
+ end
154
+
155
+ def security_groups(*names)
156
+ names.map{|n| security_group(n)}
157
+ end
158
+
159
+ def subnet(name)
160
+ @subnets ||= {}
161
+ @subnets[name] ||=
162
+ begin
163
+ STDERR.puts "Looking up id of subnet '#{name}'"
164
+ subnets = @ec2_resource.subnets(
165
+ {
166
+ filters: [
167
+ {
168
+ name: "tag:Name",
169
+ values: [name],
170
+ },
171
+ ],
172
+ }).limit(2)
173
+ case
174
+ when subnets.count == 1
175
+ subnets.first.id
176
+ when subnets.count < 1
177
+ raise "No subnet with name '#{name}' was found."
178
+ when subnets.count > 1
179
+ raise "More than one subnet with this name '#{name}' found : " + subnets.join(', ')
180
+ end
181
+ end
182
+ end
183
+
184
+ def subnet_by_id(id)
185
+ @subnets_by_id ||= {}
186
+ @subnets_by_id[id] ||=
187
+ begin
188
+ resp = @ec2_client.describe_subnets(
189
+ {
190
+ subnet_ids: [id],
191
+ })
192
+ subnets = resp.subnets
193
+ case
194
+ when subnets.count == 1
195
+ subnets.first
196
+ when subnets.count < 1
197
+ raise "No subnet with id '#{id}' was found."
198
+ when subnets.count > 1
199
+ raise "More than one subnet with this id '#{id}' found : " + subnets.join(', ')
200
+ end
201
+ end
202
+ end
203
+
204
+ def subnets(*names)
205
+ names.map{|n| subnet(n)}
206
+ end
207
+
208
+ def subnets_for_vpc(vpc_id)
209
+ @subnets_for_vpc ||= {}
210
+ @subnets_for_vpc[vpc_id] ||=
211
+ begin
212
+ resp = @ec2_client.describe_subnets(
213
+ {
214
+ filters: [
215
+ { name: "vpc-id", values: [ vpc_id ] },
216
+ ],
217
+ })
218
+
219
+ subnets = resp.subnets
220
+
221
+ case
222
+ when subnets.count >= 1
223
+ subnets
224
+ when subnets.count < 1
225
+ raise "No subnets found for '#{vpc_id}'."
226
+ end
227
+ end
228
+ end
229
+
230
+ def ami(name, owners=["self"])
231
+ @ami ||= {}
232
+ @ami[name] ||=
233
+ begin
234
+ STDERR.puts "looking for an image with prefix '#{name}'"
235
+ resp = @ec2_client.describe_images({owners: owners})
236
+ if resp.images.count < 1
237
+ raise "no images were found"
238
+ end
239
+ m = resp.images.select { |a| /^#{name}/.match(a.name) }
240
+ if m.count == 0
241
+ raise "no image with name '#{name}' was found"
242
+ end
243
+ m.sort { |x,y| y.creation_date <=> x.creation_date }.shift.image_id
244
+ end
245
+ end
246
+
247
+ def availability_zones
248
+ @availability_zones ||=
249
+ begin
250
+ STDERR.puts "looking for AZs in the current region"
251
+ resp = @ec2_client.describe_availability_zones({})
252
+ resp.availability_zones.map { |zone|
253
+ zone.zone_name
254
+ }
255
+ end
256
+ end
257
+
258
+ def vpc(name)
259
+ @vpcs ||= {}
260
+ @vpcs[name] ||=
261
+ begin
262
+ STDERR.puts "looking for a VPC with name '#{name}'"
263
+ resp = @ec2_client.describe_vpcs({})
264
+ matching_vpcs = resp.vpcs.select { |vpc|
265
+ name_tag = vpc.tags.select { |tag| tag.key == "Name" }.first
266
+ name_tag && name_tag.value == name
267
+ }
268
+ case
269
+ when matching_vpcs.count == 1
270
+ matching_vpcs.first
271
+ when matching_vpcs.count < 1
272
+ raise "No VPC with name '#{name}' was found."
273
+ when matching_vpcs.count > 1
274
+ raise "More than one VPC with name '#{name}' was found: " + matching_vpcs.join(', ')
275
+ end
276
+ end
277
+ end
278
+
279
+ def route_table(name)
280
+ @route_tables ||= {}
281
+ @route_tables[name] ||=
282
+ begin
283
+ STDERR.puts "looking for a route table with name '#{name}'"
284
+ route_tables = @ec2_client.describe_route_tables(
285
+ {
286
+ filters: [
287
+ {
288
+ name: "tag:Name",
289
+ values: [name],
290
+ },
291
+ ],
292
+ }
293
+ ).route_tables
294
+ case
295
+ when route_tables.count == 1
296
+ route_tables.first.route_table_id
297
+ when route_tables.count < 1
298
+ raise "No route table with name '#{name}' was found."
299
+ when route_tables.count > 1
300
+ raise "More than one route table with name '#{name}' was found: " + route_tables.join(', ')
301
+ end
302
+ end
303
+ end
304
+
305
+ def elastic_ip(alloc_id)
306
+ @ips ||= {}
307
+ @ips[alloc_id] ||=
308
+ begin
309
+ STDERR.puts "looking for an elastic ip with allocation_id '#{alloc_id}'"
310
+ ips = @ec2_client.describe_addresses(
311
+ {
312
+ filters: [
313
+ {
314
+ name: "allocation-id",
315
+ values: [alloc_id],
316
+ },
317
+ ],
318
+ }
319
+ ).addresses
320
+ case
321
+ when ips.count == 1
322
+ ips.first
323
+ when ips.count < 1
324
+ raise "No elastic ip with allocation_id '#{alloc_id}' was found."
325
+ when ips.count > 1
326
+ raise "More than one elastic ip with allocation_id '#{alloc_id}' was found: " + ips.join(', ')
327
+ end
328
+ end
329
+ end
330
+
331
+ def hosted_zone(fqdn)
332
+ @hosted_zones ||= {}
333
+ @hosted_zones[fqdn] ||=
334
+ begin
335
+ STDERR.puts "looking for a hosted zone with fqdn '#{fqdn}'"
336
+ hosted_zones = @route53_client.list_hosted_zones_by_name({ dns_name: fqdn }).hosted_zones.select { |zone|
337
+ zone.name == "#{fqdn}."
338
+ }
339
+ case
340
+ when hosted_zones.count == 1
341
+ hosted_zones.first
342
+ when hosted_zones.count < 1
343
+ raise "No hosted zone with fqdn '#{fqdn}' was found."
344
+ when hosted_zones.count > 1
345
+ raise "More than one hosted zone with name '#{fqdn}' was found: " + hosted_zones.join(', ')
346
+ end
347
+ end
348
+ end
349
+
350
+ def hosted_zone_by_tag(tag)
351
+ @hosted_zones ||= {}
352
+ @hosted_zones[tag] ||=
353
+ begin
354
+ STDERR.puts "looking for a hosted zone with tag '#{tag}'"
355
+ hosted_zones = @route53_client.list_hosted_zones().hosted_zones.select { |zone|
356
+ tags = @route53_client.list_tags_for_resource({resource_type: "hostedzone", resource_id: zone.id.split('/')[2]}).resource_tag_set.tags.select { |aws_tag|
357
+ tag.keys.any? { |key| String(key) == aws_tag.key && tag[key] == aws_tag.value }
358
+ }
359
+ tags.any?
360
+ }
361
+ case
362
+ when hosted_zones.count == 1
363
+ hosted_zones.first
364
+ when hosted_zones.count < 1
365
+ raise "No hosted zone with tag '#{tag}' was found."
366
+ when hosted_zones.count > 1
367
+ raise "More than one hosted zone with tag '#{tag}' was found: " + hosted_zones.join(', ')
368
+ end
369
+ end
370
+ end
371
+
372
+ def s3_object(bucket, key)
373
+ @s3_objects ||= {}
374
+ @s3_objects["#{bucket}-#{key}"] ||=
375
+ begin
376
+ resp = @s3_client.get_object({ bucket: bucket, key: key })
377
+ resp.body.read
378
+ end
379
+ end
380
+
381
+ def list_objects(bucket)
382
+ @list_objects ||= {}
383
+ @list_objects[bucket] ||=
384
+ begin
385
+ resp = @s3_client.list_objects_v2({ bucket: bucket })
386
+ resp.contents
387
+ end
388
+ end
389
+
390
+ def endpoint_service_by_name(service_name)
391
+ @endpoint_service ||= {}
392
+ @endpoint_service[service_name] ||=
393
+ begin
394
+ resp = @ec2_client.describe_vpc_endpoint_service_configurations(
395
+ {
396
+ filters: [
397
+ {
398
+ name: "service-name",
399
+ values: [service_name],
400
+ },
401
+ ],
402
+ }
403
+ )
404
+
405
+ endpoint_services = resp.service_configurations
406
+ case
407
+ when endpoint_services.count == 1
408
+ endpoint_services.first
409
+ when endpoint_services.count < 1
410
+ raise "No endpoint service with name '#{service_name}' was found."
411
+ when endpoint_services.count > 1
412
+ raise "More than one endpoint service with name '#{service_name}' was found: " + endpoint_services.join(', ')
413
+ end
414
+ end
415
+ end
416
+
417
+ def endpoint_service_by_lb_arn(arn)
418
+ @endpoint_services_by_lb_arn ||= {}
419
+ @endpoint_services_by_lb_arn[arn] ||=
420
+ begin
421
+ resp = @ec2_client.describe_vpc_endpoint_service_configurations
422
+
423
+ services = resp.service_configurations.select { |service|
424
+ service.network_load_balancer_arns.include?(arn)
425
+ }
426
+
427
+ case
428
+ when services.count == 1
429
+ services.first
430
+ when services.count < 1
431
+ raise "No endpoint service with lb arn '#{arn}' was found."
432
+ when services.count > 1
433
+ raise "More than one endpoint service with lb arn '#{arn}' was found: " + services.join(', ')
434
+ end
435
+ end
436
+ end
437
+
438
+ def lb_by_name(name)
439
+ @lbs ||= {}
440
+ @lbs[name] ||=
441
+ begin
442
+ load_balancers = @elb_client.describe_load_balancers({ names: [name] }).load_balancers
443
+
444
+ case
445
+ when load_balancers.count == 1
446
+ load_balancers.first
447
+ when load_balancers.count < 1
448
+ raise "No load balancer with name '#{name}' was found."
449
+ when load_balancers.count > 1
450
+ raise "More than one load balancer with name '#{name}' was found: " + load_balancers.join(', ')
451
+ end
452
+ end
453
+ end
454
+
455
+ def target_groups_by_lb(arn)
456
+ @target_groups ||= {}
457
+ @target_groups[arn] ||=
458
+ begin
459
+ resp = @elb_client.describe_target_groups(
460
+ {
461
+ load_balancer_arn: arn,
462
+ }
463
+ )
464
+
465
+ resp.target_groups
466
+ end
467
+ end
468
+ end
469
+
470
+ end
471
+ end
@@ -0,0 +1,67 @@
1
+ require 'thor'
2
+
3
+ module Terrafying
4
+ class Cli < Thor
5
+ class_option :no_lock, :type => :boolean, :default => false
6
+ class_option :keep, :type => :boolean, :default => false
7
+ class_option :target, :type => :string, :default => nil
8
+ class_option :scope, :type => :string, :default => nil
9
+ class_option :dynamodb, :type => :boolean, :default => true
10
+
11
+ desc "list PATH", "List resources defined"
12
+ def list(path)
13
+ puts "Defined resources:\n\n"
14
+ Config.new(path, options).list.each do |name|
15
+ puts "#{name}"
16
+ end
17
+ end
18
+
19
+ desc "plan PATH", "Show execution plan"
20
+ def plan(path)
21
+ exit Config.new(path, options).plan
22
+ end
23
+
24
+ desc "graph PATH", "Show execution graph"
25
+ def graph(path)
26
+ exit Config.new(path, options).graph
27
+ end
28
+
29
+ desc "apply PATH", "Apply changes to resources"
30
+ option :force, :aliases => ['f'], :type => :boolean, :desc => "Forcefully remove any pending locks"
31
+ def apply(path)
32
+ exit Config.new(path, options).apply
33
+ end
34
+
35
+ desc "destroy PATH", "Destroy resources"
36
+ option :force, :aliases => ['f'], :type => :boolean, :desc => "Forcefully remove any pending locks"
37
+ def destroy(path)
38
+ exit Config.new(path, options).destroy
39
+ end
40
+
41
+ desc "json PATH", "Show terraform JSON"
42
+ def json(path)
43
+ puts(Config.new(path, options).json)
44
+ end
45
+
46
+ desc "show-state PATH", "Show state"
47
+ def show_state(path)
48
+ puts(Config.new(path, options).show_state)
49
+ end
50
+
51
+ desc "use-remote-state PATH", "Migrate to using remote state storage"
52
+ def use_remote_state(path)
53
+ puts(Config.new(path, options).use_remote_state)
54
+ end
55
+
56
+ desc "use-local-state PATH", "Migrate to using local state storage"
57
+ def use_local_state(path)
58
+ puts(Config.new(path, options).use_local_state)
59
+ end
60
+
61
+ desc "import PATH ADDR ID", "Import existing infrastructure into your Terraform state"
62
+ def import(path, addr, id)
63
+ exit Config.new(path, options).import(addr, id)
64
+ end
65
+
66
+ end
67
+ end
@@ -0,0 +1,31 @@
1
+ require 'aws-sdk'
2
+ require 'json'
3
+ require 'securerandom'
4
+
5
+ # oh rubby
6
+ class ::Aws::DynamoDB::Client
7
+ def ensure_table(table_spec, &block)
8
+ retried = false
9
+ begin
10
+ yield block
11
+ rescue ::Aws::DynamoDB::Errors::ResourceNotFoundException => e
12
+ if not retried
13
+ create_table(table_spec)
14
+ retry
15
+ else
16
+ raise e
17
+ end
18
+ end
19
+ end
20
+ end
21
+
22
+ module Terrafying
23
+ module DynamoDb
24
+ def self.client
25
+ @@client ||= ::Aws::DynamoDB::Client.new({
26
+ region: Terrafying::Context::REGION,
27
+ #endpoint: 'http://localhost:8000',
28
+ })
29
+ end
30
+ end
31
+ end
@@ -0,0 +1,17 @@
1
+ module Terrafying
2
+ module DynamoDb
3
+ class Config
4
+ attr_accessor :state_table, :lock_table
5
+
6
+ def initialize
7
+ @state_table = "terrafying-state"
8
+ @lock_table = "terrafying-state-lock"
9
+ end
10
+ end
11
+
12
+ def config
13
+ @config ||= Config.new
14
+ end
15
+ module_function :config
16
+ end
17
+ end
@@ -0,0 +1,126 @@
1
+ require 'terrafying/dynamodb'
2
+ require 'terrafying/dynamodb/config'
3
+
4
+ module Terrafying
5
+ module DynamoDb
6
+ class NamedLock
7
+ def initialize(table_name, name)
8
+ @table_name = table_name
9
+ @name = name
10
+ @client = Terrafying::DynamoDb.client
11
+ end
12
+
13
+ def status
14
+ @client.ensure_table(table) do
15
+ resp = @client.get_item({
16
+ table_name: @table_name,
17
+ key: {
18
+ "name" => @name,
19
+ },
20
+ consistent_read: true,
21
+ })
22
+ if resp.item
23
+ return {
24
+ status: :locked,
25
+ locked_at: resp.item["locked_at"],
26
+ metadata: resp.item["metadata"]
27
+ }
28
+ else
29
+ return {
30
+ status: :unlocked
31
+ }
32
+ end
33
+ end
34
+ end
35
+
36
+ def acquire
37
+ @client.ensure_table(table) do
38
+ begin
39
+ lock_id = SecureRandom.uuid
40
+ @client.update_item(acquire_request(lock_id))
41
+ return lock_id
42
+ rescue ::Aws::DynamoDB::Errors::ConditionalCheckFailedException
43
+ raise "Unable to acquire lock: #{status.inspect}" # TODO
44
+ end
45
+ end
46
+ end
47
+
48
+ def steal
49
+ @client.ensure_table(table) do
50
+ begin
51
+ lock_id = SecureRandom.uuid
52
+ req = acquire_request(lock_id)
53
+ req.delete(:condition_expression)
54
+ @client.update_item(req)
55
+ return lock_id
56
+ rescue ::Aws::DynamoDB::Errors::ConditionalCheckFailedException
57
+ raise "Unable to steal lock: #{status.inspect}" # TODO
58
+ end
59
+ end
60
+ end
61
+
62
+ def release(lock_id)
63
+ @client.ensure_table(table) do
64
+ begin
65
+ @client.delete_item({
66
+ table_name: @table_name,
67
+ key: {
68
+ "name" => @name,
69
+ },
70
+ return_values: "NONE",
71
+ condition_expression: "lock_id = :lock_id",
72
+ expression_attribute_values: {
73
+ ":lock_id" => lock_id,
74
+ },
75
+ })
76
+ nil
77
+ rescue ::Aws::DynamoDB::Errors::ConditionalCheckFailedException
78
+ raise "Unable to release lock: #{status.inspect}" # TODO
79
+ end
80
+ end
81
+ end
82
+
83
+ private
84
+ def acquire_request(lock_id)
85
+ {
86
+ table_name: @table_name,
87
+ key: {
88
+ "name" => @name,
89
+ },
90
+ return_values: "NONE",
91
+ update_expression: "SET lock_id = :lock_id, locked_at = :locked_at, metadata = :metadata",
92
+ condition_expression: "attribute_not_exists(lock_id)",
93
+ expression_attribute_values: {
94
+ ":lock_id" => lock_id,
95
+ ":locked_at" => Time.now.to_s,
96
+ ":metadata" => {
97
+ "owner" => "#{`git config user.name`.chomp} (#{`git config user.email`.chomp})",
98
+ },
99
+ },
100
+ }
101
+ end
102
+
103
+ def table
104
+ {
105
+ table_name: @table_name,
106
+ attribute_definitions: [
107
+ {
108
+ attribute_name: "name",
109
+ attribute_type: "S",
110
+ },
111
+ ],
112
+ key_schema: [
113
+ {
114
+ attribute_name: "name",
115
+ key_type: "HASH",
116
+ },
117
+ ],
118
+ provisioned_throughput: {
119
+ read_capacity_units: 1,
120
+ write_capacity_units: 1,
121
+ }
122
+ }
123
+ end
124
+ end
125
+ end
126
+ end
@@ -0,0 +1,92 @@
1
+ require 'digest'
2
+ require 'terrafying/dynamodb/config'
3
+
4
+ module Terrafying
5
+ module DynamoDb
6
+ class StateStore
7
+ def initialize(scope, opts = {})
8
+ @scope = scope
9
+ @client = Terrafying::DynamoDb.client
10
+ @table_name = Terrafying::DynamoDb.config.state_table
11
+ end
12
+
13
+ def get
14
+ @client.ensure_table(table) do
15
+ resp = @client.query({
16
+ table_name: @table_name,
17
+ limit: 1,
18
+ key_conditions: {
19
+ "scope" => {
20
+ attribute_value_list: [@scope],
21
+ comparison_operator: "EQ",
22
+ }
23
+ },
24
+ scan_index_forward: false,
25
+ })
26
+ case resp.items.count
27
+ when 0 then return nil
28
+ when 1 then return resp.items.first["state"]
29
+ else raise 'More than one item found when retrieving state. This is a bug and should never happen.' if resp.items.count != 1
30
+ end
31
+ end
32
+ end
33
+
34
+ def put(state)
35
+ @client.ensure_table(table) do
36
+ sha256 = Digest::SHA256.hexdigest(state)
37
+ json = JSON.parse(state)
38
+ @client.update_item({
39
+ table_name: @table_name,
40
+ key: {
41
+ "scope" => @scope,
42
+ "serial" => json["serial"].to_i,
43
+ },
44
+ return_values: "NONE",
45
+ update_expression: "SET sha256 = :sha256, #state = :state",
46
+ condition_expression: "attribute_not_exists(serial) OR sha256 = :sha256",
47
+ expression_attribute_names: {
48
+ "#state" => "state",
49
+ },
50
+ expression_attribute_values: {
51
+ ":sha256" => sha256,
52
+ ":state" => state,
53
+ }
54
+ })
55
+ end
56
+ end
57
+
58
+ def table
59
+ {
60
+ table_name: @table_name,
61
+ attribute_definitions: [
62
+ {
63
+ attribute_name: "scope",
64
+ attribute_type: "S",
65
+ },
66
+ {
67
+ attribute_name: "serial",
68
+ attribute_type: "N",
69
+ }
70
+ ],
71
+ key_schema: [
72
+ {
73
+ attribute_name: "scope",
74
+ key_type: "HASH",
75
+ },
76
+ {
77
+ attribute_name: "serial",
78
+ key_type: "RANGE",
79
+ },
80
+
81
+ ],
82
+ provisioned_throughput: {
83
+ read_capacity_units: 1,
84
+ write_capacity_units: 1,
85
+ }
86
+ }
87
+ end
88
+ end
89
+ end
90
+ end
91
+
92
+
@@ -0,0 +1,134 @@
1
+ require 'json'
2
+ require 'base64'
3
+ require 'erb'
4
+ require 'ostruct'
5
+ require 'deep_merge'
6
+ require 'terrafying/aws'
7
+
8
+ module Terrafying
9
+
10
+ class Context
11
+
12
+ REGION = ENV.fetch("AWS_REGION", "eu-west-1")
13
+
14
+ PROVIDER_DEFAULTS = {
15
+ aws: { region: REGION }
16
+ }
17
+
18
+ attr_reader :output
19
+
20
+ def initialize
21
+ @output = {
22
+ "resource" => {}
23
+ }
24
+ @children = []
25
+ end
26
+
27
+ def aws
28
+ @@aws ||= Terrafying::Aws::Ops.new REGION
29
+ end
30
+
31
+ def provider(name, spec)
32
+ @output["provider"] ||= {}
33
+ @output["provider"][name] = spec
34
+ end
35
+
36
+ def data(type, name, spec)
37
+ @output["data"] ||= {}
38
+ @output["data"][type.to_s] ||= {}
39
+ @output["data"][type.to_s][name.to_s] = spec
40
+ id_of(type, name)
41
+ end
42
+
43
+ def resource(type, name, attributes)
44
+ @output["resource"][type.to_s] ||= {}
45
+ @output["resource"][type.to_s][name.to_s] = attributes
46
+ id_of(type, name)
47
+ end
48
+
49
+ def template(relative_path, params = {})
50
+ dir = caller_locations[0].path
51
+ filename = File.join(File.dirname(dir), relative_path)
52
+ erb = ERB.new(IO.read(filename))
53
+ erb.filename = filename
54
+ erb.result(OpenStruct.new(params).instance_eval { binding })
55
+ end
56
+
57
+ def output_with_children
58
+ @children.inject(@output) { |out, c| out.deep_merge(c.output_with_children) }
59
+ end
60
+
61
+ def id_of(type,name)
62
+ "${#{type}.#{name}.id}"
63
+ end
64
+
65
+ def output_of(type, name, value)
66
+ "${#{type}.#{name}.#{value}}"
67
+ end
68
+
69
+ def pretty_generate
70
+ JSON.pretty_generate(output_with_children)
71
+ end
72
+
73
+ def resource_names
74
+ out = output_with_children
75
+ ret = []
76
+ for type in out["resource"].keys
77
+ for id in out["resource"][type].keys
78
+ ret << "#{type}.#{id}"
79
+ end
80
+ end
81
+ ret
82
+ end
83
+
84
+ def resources
85
+ out = output_with_children
86
+ ret = []
87
+ for type in out["resource"].keys
88
+ for id in out["resource"][type].keys
89
+ ret << "${#{type}.#{id}.id}"
90
+ end
91
+ end
92
+ ret
93
+ end
94
+
95
+ def add!(*c)
96
+ @children.push(*c)
97
+ c[0]
98
+ end
99
+
100
+ def tf_safe(str)
101
+ str.gsub(/[\.\s\/\?]/, "-")
102
+ end
103
+
104
+ end
105
+
106
+ class RootContext < Context
107
+
108
+ def initialize
109
+ super
110
+
111
+ output["provider"] = PROVIDER_DEFAULTS
112
+ end
113
+
114
+ def backend(name, spec)
115
+ @output["terraform"] = {
116
+ backend: {
117
+ name => spec,
118
+ },
119
+ }
120
+ end
121
+
122
+ def generate(&block)
123
+ instance_eval(&block)
124
+ end
125
+
126
+ def method_missing(fn, *args)
127
+ resource(fn, args.shift.to_s, args.first)
128
+ end
129
+
130
+ end
131
+
132
+ Generator = RootContext.new
133
+
134
+ end
@@ -0,0 +1,25 @@
1
+ require 'terrafying/dynamodb/named_lock'
2
+
3
+ module Terrafying
4
+ module Locks
5
+ class NoOpLock
6
+ def acquire
7
+ ""
8
+ end
9
+ def steal
10
+ ""
11
+ end
12
+ def release(lock_id)
13
+ end
14
+ end
15
+
16
+ def self.noop
17
+ NoOpLock.new
18
+ end
19
+
20
+ def self.dynamodb(scope)
21
+ Terrafying::DynamoDb::NamedLock.new(Terrafying::DynamoDb.config.lock_table, scope)
22
+ end
23
+
24
+ end
25
+ end
@@ -0,0 +1,51 @@
1
+ require 'terrafying/dynamodb/state'
2
+
3
+ module Terrafying
4
+ module State
5
+
6
+ STATE_FILENAME = "terraform.tfstate"
7
+
8
+ def self.store(config)
9
+ if LocalStateStore.has_local_state?(config)
10
+ local(config)
11
+ else
12
+ remote(config)
13
+ end
14
+ end
15
+
16
+ def self.local(config)
17
+ LocalStateStore.new(config.path)
18
+ end
19
+
20
+ def self.remote(config)
21
+ Terrafying::DynamoDb::StateStore.new(config.scope)
22
+ end
23
+
24
+ class LocalStateStore
25
+ def initialize(path)
26
+ @path = LocalStateStore.state_path(path)
27
+ end
28
+
29
+ def get
30
+ IO.read(@path)
31
+ end
32
+
33
+ def put(state)
34
+ IO.write(@path, state)
35
+ end
36
+
37
+ def delete
38
+ File.delete(@path)
39
+ end
40
+
41
+ def self.has_local_state?(config)
42
+ File.exists?(state_path(config.path))
43
+ end
44
+
45
+ private
46
+ def self.state_path(path)
47
+ File.join(File.dirname(path), STATE_FILENAME)
48
+ end
49
+ end
50
+ end
51
+ end
@@ -0,0 +1,32 @@
1
+
2
+ require 'yaml'
3
+
4
+ def data_url_from_string(str)
5
+ b64_contents = Base64.strict_encode64(str)
6
+ return "data:;base64,#{b64_contents}"
7
+ end
8
+
9
+ module Terrafying
10
+
11
+ module Util
12
+
13
+ def self.to_ignition(yaml)
14
+ config = YAML.load(yaml)
15
+
16
+ if config.has_key? "storage" and config["storage"].has_key? "files"
17
+ files = config["storage"]["files"]
18
+ config["storage"]["files"] = files.each { |file|
19
+ if file["contents"].is_a? String
20
+ file["contents"] = {
21
+ source: data_url_from_string(file["contents"]),
22
+ }
23
+ end
24
+ }
25
+ end
26
+
27
+ JSON.pretty_generate(config)
28
+ end
29
+
30
+ end
31
+
32
+ end
@@ -0,0 +1,4 @@
1
+ module Terrafying
2
+ VERSION = "1.2.1"
3
+ CLI_VERSION = "0.11.2"
4
+ end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: terrafying
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.2.0
4
+ version: 1.2.1
5
5
  platform: ruby
6
6
  authors:
7
7
  - uSwitch Limited
@@ -145,6 +145,19 @@ extensions: []
145
145
  extra_rdoc_files: []
146
146
  files:
147
147
  - bin/terrafying
148
+ - lib/hash/deep_merge.rb
149
+ - lib/terrafying.rb
150
+ - lib/terrafying/aws.rb
151
+ - lib/terrafying/cli.rb
152
+ - lib/terrafying/dynamodb.rb
153
+ - lib/terrafying/dynamodb/config.rb
154
+ - lib/terrafying/dynamodb/named_lock.rb
155
+ - lib/terrafying/dynamodb/state.rb
156
+ - lib/terrafying/generator.rb
157
+ - lib/terrafying/lock.rb
158
+ - lib/terrafying/state.rb
159
+ - lib/terrafying/util.rb
160
+ - lib/terrafying/version.rb
148
161
  homepage: https://github.com/uswitch/terrafying
149
162
  licenses:
150
163
  - Apache-2.0