terrafying-components 1.4.3 → 1.4.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,46 @@
1
+ #!/bin/bash
2
+
3
+ set -euo pipefail
4
+
5
+ app_id="${1}"
6
+ tenant_id="${2}"
7
+ new_fqdn="${3}"
8
+
9
+ set +u
10
+ if [[ ! -z $AZURE_USER ]] && [[ ! -z $AZURE_PASSWORD ]]
11
+ then
12
+ set -u
13
+ az login --service-principal \
14
+ --allow-no-subscriptions \
15
+ -t "${tenant_id}" \
16
+ -u "${AZURE_USER}" \
17
+ -p "${AZURE_PASSWORD}" >/dev/null
18
+
19
+ function finish {
20
+ exit_code=$?
21
+ az logout
22
+ exit $exit_code
23
+ }
24
+ trap finish EXIT
25
+ else
26
+ set -u
27
+ fi
28
+
29
+ app="$(az ad app show --id ${app_id})"
30
+
31
+ if [[ $? == 0 ]]
32
+ then
33
+ reply_urls="$(echo ${app} | jq -r '.replyUrls | join(" ")')"
34
+
35
+ if echo "${reply_urls}" | grep "${new_fqdn}" &>/dev/null
36
+ then
37
+ echo "Already contains a reply url for '${new_fqdn}'"
38
+ else
39
+ new_reply_urls="${reply_urls} https://${new_fqdn}/oauth2/callback"
40
+
41
+ az ad app update --id ${app_id} --reply-urls ${new_reply_urls}
42
+ fi
43
+ else
44
+ echo "App wasn't found"
45
+ exit 1
46
+ fi
@@ -0,0 +1,115 @@
1
+ ---
2
+ ignition:
3
+ version: "2.1.0"
4
+
5
+
6
+ passwd:
7
+ users:
8
+ - name: "admin"
9
+ passwordHash: "x"
10
+ sshAuthorizedKeys:
11
+ - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDlIRM5H4lz/t8PfiGptR8cWqVrD8NCwROZly1XuYooWiIdTinJAvkATdR54ic6YuNenoKOeqtTj0dkytbzi5xItBti6cqzHvFAOXhKzVCsR5n/Mdt2KPbp215pFS96yfsx/24Z4cGygXe24SEXv28KdWZ1nWQyUrlne9jMaB7n3cGzNaXOPy42l03/bAaflWoyD7gyyS9XHDuZkLxVrhtlO43UtIXL4IZzKTCy1cZdaabZxRsrSzHUp+/5p3fHYgcYmwyO0Y+W3TP6LRC2ebeQe0r4Rh1o83sRvcbmuG14Wk7wDVFtu0vjtwOflCsRHGRibep/rXvtqf1/bG3brQ9tAV3oIeZ4r4yPUZenwIfM2pQH3ElD3kqw+Pdh1la1QXb4FDxeEw29nLU1eQF6ggnp3gzJJDRQWRB/I2RjKje5M1NcKjn+8ZBG1PHBikaOkjMRkXzOb8JQxKcWmeuRHo/ZTh7oUDDMObAVej9K91eCP3Dtz5QqmL1+J4+7YCjoBOBPpX3J+mrkxHRHcJYEIyJF6/mNDtov09Vji7Rdd0j1A5CJ2fvcuuUGK0LUHMSEyzLKg830E8b/kIZrufWEsljqlcHrYCJAO/LYl77PFe6B/vNfZtyw4fI6juGDlZNYWYXLJ2c+poCS8+2o1KjCli1lV2BOAjDUKEJxQ9NAw6MxPQ== tom.booth@uswitch.com"
12
+ groups: ["sudo", "docker"]
13
+
14
+ systemd:
15
+ units:
16
+ <% if disable_update_engine %>
17
+ - name: update-engine.service
18
+ mask: true
19
+ - name: locksmithd.service
20
+ mask: true
21
+ <% end %>
22
+ <% volumes.each { |volume| %>
23
+ - name: "<%= volume[:mount].tr('/','-')[1..-1] %>.mount"
24
+ enabled: true
25
+ contents: |
26
+ [Install]
27
+ WantedBy=local-fs.target
28
+
29
+ [Unit]
30
+ Before=docker.service
31
+
32
+ [Mount]
33
+ What=<%= volume[:device] %>
34
+ Where=<%= volume[:mount] %>
35
+ Type=ext4
36
+ <% } %>
37
+
38
+ <% units.each { |unit| %>
39
+ - name: "<%= unit[:name] %>"
40
+ enabled: true<% if unit.has_key?(:contents) %>
41
+ contents: "<%= unit[:contents].dump[1..-2] %>"<% end %><% if unit.has_key?(:dropins) %>
42
+ dropins:
43
+ <% unit[:dropins].each { |dropin| %>
44
+ - contents: "<%= dropin[:contents].dump[1..-2] %>"
45
+ name: "<%= dropin[:name] %>"
46
+ <% } %><% end %>
47
+ <% } %>
48
+
49
+
50
+ storage:
51
+ <% if volumes.count > 0 %>
52
+ filesystems:
53
+ <% volumes.each { |volume| %>
54
+ - name: <%= volume[:mount].tr('/','-')[1..-1] %>
55
+ mount:
56
+ device: <%= volume[:device] %>
57
+ format: ext4
58
+ <% } %>
59
+ <% end %>
60
+ files:
61
+ <% files.each { |file| %>
62
+ - filesystem: "root"
63
+ path: <%= file[:path] %>
64
+ mode: <%= file[:mode] %>
65
+ user: { id: 0 }
66
+ group: { id: 0 }
67
+ <% if file[:contents].is_a?(Hash) %>
68
+ contents:
69
+ source: "<%= file[:contents][:source] %>"
70
+ <% else %>
71
+ contents: "<%= file[:contents].gsub(/\n/, '\\n').gsub(/\"/, '\\"') %>"
72
+ <% end %>
73
+ <% } %>
74
+ <% cas.each { |ca| %>
75
+ - filesystem: "root"
76
+ path: "/etc/ssl/<%= ca.name %>/ca.cert"
77
+ mode: 0444
78
+ user: { id: 0 }
79
+ group: { id: 0 }
80
+ contents:
81
+ source: "<%= ca.source %>"
82
+ <% } %>
83
+ <% keypairs.each { |keypair| %>
84
+ <% if keypair.has_key?(:name) %>
85
+ - filesystem: "root"
86
+ path: "/etc/ssl/<%= keypair[:ca].name %>/<%= keypair[:name] %>/cert"
87
+ mode: 0444
88
+ user: { id: 0 }
89
+ group: { id: 0 }
90
+ contents:
91
+ source: "<%= keypair[:source][:cert] %>"
92
+ - filesystem: "root"
93
+ path: "/etc/ssl/<%= keypair[:ca].name %>/<%= keypair[:name] %>/key"
94
+ mode: 0444
95
+ user: { id: 0 }
96
+ group: { id: 0 }
97
+ contents:
98
+ source: "<%= keypair[:source][:key] %>"
99
+ <% else %>
100
+ - filesystem: "root"
101
+ path: "/etc/ssl/<%= keypair[:ca].name %>/ca.key"
102
+ mode: 0444
103
+ user: { id: 0 }
104
+ group: { id: 0 }
105
+ contents:
106
+ source: "<%= keypair[:source][:key] %>"
107
+ <% end %>
108
+ <% } %>
109
+ - filesystem: "root"
110
+ path: '/etc/usersync.env'
111
+ mode: 0644
112
+ user: { id: 0 }
113
+ group: { id: 0 }
114
+ contents: |
115
+ USERSYNC_SSH_GROUP="<%= ssh_group %>"
@@ -0,0 +1,129 @@
1
+
2
+ module Terrafying
3
+
4
+ module Components
5
+
6
+ module Usable
7
+
8
+ def security_group
9
+ @security_group
10
+ end
11
+
12
+ def ingress_security_group
13
+ @ingress_security_group || @security_group
14
+ end
15
+
16
+ def egress_security_group
17
+ @egress_security_group || @security_group
18
+ end
19
+
20
+ def path_mtu_setup!
21
+ resource :aws_security_group_rule, "#{@name}-path-mtu", {
22
+ security_group_id: self.egress_security_group,
23
+ type: "ingress",
24
+ protocol: 1, # icmp
25
+ from_port: 3, # icmp type
26
+ to_port: 4, # icmp code
27
+ cidr_blocks: ["0.0.0.0/0"],
28
+ }
29
+ end
30
+
31
+ def pingable_by_cidr(*cidrs)
32
+ ident = Digest::SHA2.hexdigest cidrs.to_s
33
+
34
+ resource :aws_security_group_rule, "#{@name}-to-#{ident}-ping", {
35
+ security_group_id: self.ingress_security_group,
36
+ type: "ingress",
37
+ protocol: 1, # icmp
38
+ from_port: 8, # icmp type
39
+ to_port: 0, # icmp code
40
+ cidr_blocks: cidrs,
41
+ }
42
+ end
43
+
44
+ def used_by_cidr(*cidrs)
45
+ cidrs.map { |cidr|
46
+ cidr_ident = cidr.gsub(/[\.\/]/, "-")
47
+
48
+ @ports.map {|port|
49
+ resource :aws_security_group_rule, "#{@name}-to-#{cidr_ident}-#{port[:name]}", {
50
+ security_group_id: self.ingress_security_group,
51
+ type: "ingress",
52
+ from_port: port[:upstream_port],
53
+ to_port: port[:upstream_port],
54
+ protocol: port[:type] == "udp" ? "udp" : "tcp",
55
+ cidr_blocks: [cidr],
56
+ }
57
+ }
58
+ }
59
+ end
60
+
61
+ def pingable_by(*other_resources)
62
+ other_resources.map { |other_resource|
63
+ resource :aws_security_group_rule, "#{@name}-to-#{other_resource.name}-ping", {
64
+ security_group_id: self.ingress_security_group,
65
+ type: "ingress",
66
+ protocol: 1, # icmp
67
+ from_port: 8, # icmp type
68
+ to_port: 0, # icmp code
69
+ source_security_group_id: other_resource.egress_security_group,
70
+ }
71
+
72
+ resource :aws_security_group_rule, "#{@name}-to-#{other_resource.name}-pingv6", {
73
+ security_group_id: self.ingress_security_group,
74
+ type: "ingress",
75
+ protocol: 58, # icmpv6
76
+ from_port: 128, # icmp type
77
+ to_port: 0, # icmp code
78
+ source_security_group_id: other_resource.egress_security_group,
79
+ }
80
+
81
+ resource :aws_security_group_rule, "#{other_resource.name}-to-#{@name}-ping", {
82
+ security_group_id: other_resource.egress_security_group,
83
+ type: "egress",
84
+ protocol: 1, # icmp
85
+ from_port: 8, # icmp type
86
+ to_port: 0, # icmp code
87
+ source_security_group_id: self.ingress_security_group,
88
+ }
89
+
90
+ resource :aws_security_group_rule, "#{other_resource.name}-to-#{@name}-pingv6", {
91
+ security_group_id: other_resource.egress_security_group,
92
+ type: "egress",
93
+ protocol: 58, # icmpv6
94
+ from_port: 128, # icmp type
95
+ to_port: 0, # icmp code
96
+ source_security_group_id: self.ingress_security_group,
97
+ }
98
+ }
99
+ end
100
+
101
+ def used_by(*other_resources)
102
+ other_resources.map { |other_resource|
103
+ @ports.map {|port|
104
+ resource :aws_security_group_rule, "#{@name}-to-#{other_resource.name}-#{port[:name]}", {
105
+ security_group_id: self.ingress_security_group,
106
+ type: "ingress",
107
+ from_port: port[:upstream_port],
108
+ to_port: port[:upstream_port],
109
+ protocol: port[:type] == "udp" ? "udp" : "tcp",
110
+ source_security_group_id: other_resource.egress_security_group,
111
+ }
112
+
113
+ resource :aws_security_group_rule, "#{other_resource.name}-to-#{@name}-#{port[:name]}", {
114
+ security_group_id: other_resource.egress_security_group,
115
+ type: "egress",
116
+ from_port: port[:downstream_port],
117
+ to_port: port[:downstream_port],
118
+ protocol: port[:type] == "udp" ? "udp" : "tcp",
119
+ source_security_group_id: self.ingress_security_group,
120
+ }
121
+ }
122
+ }
123
+ end
124
+
125
+ end
126
+
127
+ end
128
+
129
+ end
@@ -0,0 +1,5 @@
1
+ module Terrafying
2
+ module Components
3
+ VERSION = "1.4.4"
4
+ end
5
+ end
@@ -0,0 +1,417 @@
1
+
2
+ require 'netaddr'
3
+
4
+ require 'terrafying/components/subnet'
5
+ require 'terrafying/components/zone'
6
+ require 'terrafying/generator'
7
+
8
+ module Terrafying
9
+
10
+ module Components
11
+ DEFAULT_SSH_GROUP = 'cloud-team'
12
+ DEFAULT_ZONE = "vpc.usw.co"
13
+
14
+ class VPC < Terrafying::Context
15
+
16
+ attr_reader :id, :name, :cidr, :zone, :azs, :subnets, :internal_ssh_security_group, :ssh_group
17
+
18
+ def self.find(name)
19
+ VPC.new.find name
20
+ end
21
+
22
+ def self.create(name, cidr, options={})
23
+ VPC.new.create name, cidr, options
24
+ end
25
+
26
+ def initialize()
27
+ super
28
+ end
29
+
30
+
31
+ def find(name)
32
+ vpc = aws.vpc(name)
33
+
34
+ @name = name
35
+ @id = vpc.vpc_id
36
+ @cidr = vpc.cidr_block
37
+ @zone = Terrafying::Components::Zone.find_by_tag({vpc: @id})
38
+ if @zone.nil?
39
+ raise "Failed to find zone"
40
+ end
41
+
42
+ @subnets = aws.subnets_for_vpc(vpc.vpc_id).each_with_object({}) { |subnet, subnets|
43
+ subnet_inst = Subnet.find(subnet.subnet_id)
44
+
45
+ subnet_name_tag = subnet.tags.detect { |tag| tag.key == "subnet_name" }
46
+
47
+ if subnet_name_tag
48
+ key = subnet_name_tag.value.to_sym
49
+ else
50
+ key = subnet_inst.public ? :public : :private
51
+ end
52
+
53
+ if subnets.has_key?(key)
54
+ subnets[key] << subnet_inst
55
+ else
56
+ subnets[key] = [ subnet_inst ]
57
+ end
58
+ }
59
+
60
+ # need to sort subnets so they are in az order
61
+ @subnets.each { |_, s| s.sort! { |a, b| a.az <=> b.az } }
62
+
63
+ tags = vpc.tags.select { |tag| tag.key == "ssh_group"}
64
+ if tags.count > 0
65
+ @ssh_group = tags[0].value
66
+ else
67
+ @ssh_group = DEFAULT_SSH_GROUP
68
+ end
69
+
70
+ @internal_ssh_security_group = aws.security_group("#{tf_safe(name)}-internal-ssh")
71
+ self
72
+ end
73
+
74
+ def create(name, raw_cidr, options={})
75
+ options = {
76
+ subnet_size: 24,
77
+ internet_access: true,
78
+ nat_eips: [],
79
+ azs: aws.availability_zones,
80
+ tags: {},
81
+ ssh_group: DEFAULT_SSH_GROUP,
82
+ }.merge(options)
83
+
84
+ if options[:parent_zone].nil?
85
+ options[:parent_zone] = Zone.find(DEFAULT_ZONE)
86
+ end
87
+
88
+ if options[:subnets].nil?
89
+ if options[:internet_access]
90
+ options[:subnets] = {
91
+ public: { public: true },
92
+ private: { internet: true },
93
+ }
94
+ else
95
+ options[:subnets] = {
96
+ private: { },
97
+ }
98
+ end
99
+ end
100
+
101
+ @name = name
102
+ @cidr = raw_cidr
103
+ @azs = options[:azs]
104
+ @tags = options[:tags]
105
+ @ssh_group = options[:ssh_group]
106
+
107
+ cidr = NetAddr::CIDR.create(raw_cidr)
108
+
109
+ @remaining_ip_space = NetAddr::Tree.new
110
+ @remaining_ip_space.add! cidr
111
+ @subnet_size = options[:subnet_size]
112
+ @subnets = {}
113
+
114
+ per_az_subnet_size = options[:subnets].values.reduce(0) { |memo, s|
115
+ memo + (1 << (32 - s.fetch(:bit_size, @subnet_size)))
116
+ }
117
+ total_subnet_size = per_az_subnet_size * @azs.count
118
+
119
+ if total_subnet_size > cidr.size
120
+ raise "Not enough space for subnets in CIDR"
121
+ end
122
+
123
+ @id = resource :aws_vpc, name, {
124
+ cidr_block: cidr.to_s,
125
+ enable_dns_hostnames: true,
126
+ tags: { Name: name, ssh_group: @ssh_group }.merge(@tags),
127
+ }
128
+
129
+ @zone = add! Terrafying::Components::Zone.create("#{name}.#{options[:parent_zone].fqdn}", {
130
+ parent_zone: options[:parent_zone],
131
+ tags: { vpc: @id }.merge(@tags),
132
+ })
133
+
134
+ dhcp = resource :aws_vpc_dhcp_options, name, {
135
+ domain_name: @zone.fqdn,
136
+ domain_name_servers: ["AmazonProvidedDNS"],
137
+ tags: { Name: name }.merge(@tags),
138
+ }
139
+
140
+ resource :aws_vpc_dhcp_options_association, name, {
141
+ vpc_id: @id,
142
+ dhcp_options_id: dhcp,
143
+ }
144
+
145
+
146
+ if options[:internet_access]
147
+
148
+ if options[:nat_eips].size == 0
149
+ options[:nat_eips] = @azs.map{ |az| resource :aws_eip, "#{name}-nat-gateway-#{az}", { vpc: true } }
150
+ elsif options[:nat_eips].size != @azs.count
151
+ raise "The nubmer of nat eips has to match the number of AZs"
152
+ end
153
+
154
+ @internet_gateway = resource :aws_internet_gateway, name, {
155
+ vpc_id: @id,
156
+ tags: {
157
+ Name: name,
158
+ }.merge(@tags)
159
+ }
160
+ allocate_subnets!(:nat_gateway, { bit_size: 28, public: true })
161
+
162
+ @nat_gateways = @azs.zip(@subnets[:nat_gateway], options[:nat_eips]).map { |az, subnet, eip|
163
+ resource :aws_nat_gateway, "#{name}-#{az}", {
164
+ allocation_id: eip,
165
+ subnet_id: subnet.id,
166
+ }
167
+ }
168
+
169
+ end
170
+
171
+ options[:subnets].each { |key, config| allocate_subnets! key, config }
172
+
173
+ @internal_ssh_security_group = resource :aws_security_group, "#{name}-internal-ssh", {
174
+ name: "#{name}-internal-ssh",
175
+ description: "Allows SSH between machines inside the VPC CIDR",
176
+ tags: @tags,
177
+ vpc_id: @id,
178
+ ingress: [
179
+ {
180
+ from_port: 22,
181
+ to_port: 22,
182
+ protocol: "tcp",
183
+ cidr_blocks: [@cidr],
184
+ },
185
+ ],
186
+ egress: [
187
+ {
188
+ from_port: 22,
189
+ to_port: 22,
190
+ protocol: "tcp",
191
+ cidr_blocks: [@cidr],
192
+ },
193
+ ],
194
+ }
195
+ self
196
+ end
197
+
198
+ def peer_with_external(account_id, vpc_id, cidrs, options={})
199
+ options = {
200
+ region: "eu-west-1",
201
+ subnets: @subnets.values.flatten,
202
+ }.merge(options)
203
+
204
+ peering_connection = resource :aws_vpc_peering_connection, "#{@name}-to-#{account_id}-#{vpc_id}", {
205
+ peer_owner_id: account_id,
206
+ peer_vpc_id: vpc_id,
207
+ peer_region: options[:region],
208
+ vpc_id: @id,
209
+ auto_accept: false,
210
+ tags: { Name: "#{@name} to #{account_id}.#{vpc_id}" }.merge(@tags),
211
+ }
212
+
213
+ our_route_tables = options[:subnets].map(&:route_table).sort.uniq
214
+
215
+ our_route_tables.product(cidrs).each { |route_table, cidr|
216
+ hash = Digest::SHA2.hexdigest "#{route_table}-#{tf_safe(cidr)}"
217
+
218
+ resource :aws_route, "#{@name}-to-#{account_id}-#{vpc_id}-peer-#{hash}", {
219
+ route_table_id: route_table,
220
+ destination_cidr_block: cidr,
221
+ vpc_peering_connection_id: peering_connection,
222
+ }
223
+ }
224
+ end
225
+
226
+ def peer_with_vpn(ip_address, cidrs, options={})
227
+ options = {
228
+ bgp_asn: 65000,
229
+ type: "ipsec.1",
230
+ tunnels: [],
231
+ static_routes_only: true,
232
+ subnets: @subnets.values.flatten,
233
+ }.merge(options)
234
+
235
+ ident = tf_safe(ip_address)
236
+
237
+ if options[:tunnels].count > 2
238
+ raise "You can only define a max of two tunnels"
239
+ end
240
+
241
+ customer_gateway = resource :aws_customer_gateway, ident, {
242
+ bgp_asn: options[:bgp_asn],
243
+ ip_address: ip_address,
244
+ type: options[:type],
245
+ tags: {
246
+ Name: "Connection to #{ip_address}"
247
+ }.merge(@tags),
248
+ }
249
+
250
+ vpn_gateway = resource :aws_vpn_gateway, ident, {
251
+ vpc_id: @id,
252
+ tags: {
253
+ Name: "Connection to #{ip_address}"
254
+ }.merge(@tags),
255
+ }
256
+
257
+ connection_config = {
258
+ vpn_gateway_id: vpn_gateway,
259
+ customer_gateway_id: customer_gateway,
260
+ type: options[:type],
261
+ static_routes_only: options[:static_routes_only],
262
+ tags: {
263
+ Name: "Connection to #{ip_address}"
264
+ }.merge(@tags),
265
+ }
266
+
267
+ options[:tunnels].each.with_index { |tunnel, i|
268
+ connection_config["tunnel#{i+1}_inside_cidr"] = tunnel[:cidr]
269
+
270
+ if tunnel.has_key?(:key)
271
+ connection_config["tunnel#{i+1}_preshared_key"] = tunnel[:key]
272
+ end
273
+ }
274
+
275
+ connection = resource :aws_vpn_connection, ident, connection_config
276
+
277
+ cidrs.each { |cidr|
278
+ resource :aws_vpn_connection_route, "#{ident}-#{tf_safe(cidr)}", {
279
+ destination_cidr_block: cidr,
280
+ vpn_connection_id: connection,
281
+ }
282
+ }
283
+
284
+ route_tables = options[:subnets].map(&:route_table).sort.uniq
285
+ route_tables.product(cidrs).each { |route_table, cidr|
286
+ hash = Digest::SHA2.hexdigest "#{route_table}-#{tf_safe(cidr)}"
287
+
288
+ resource :aws_route, "#{@name}-to-#{ident}-peer-#{hash}", {
289
+ route_table_id: route_table,
290
+ destination_cidr_block: cidr,
291
+ gateway_id: vpn_gateway,
292
+ }
293
+ }
294
+
295
+ end
296
+
297
+ def peer_with(other_vpc, options={})
298
+ options = {
299
+ complete: false,
300
+ peerings: [
301
+ { from: @subnets.values.flatten, to: other_vpc.subnets.values.flatten },
302
+ { from: other_vpc.subnets.values.flatten, to: @subnets.values.flatten },
303
+ ],
304
+ }.merge(options)
305
+
306
+ other_vpc_ident = tf_safe(other_vpc.name)
307
+
308
+ our_cidr = NetAddr::CIDR.create(@cidr)
309
+ other_cidr = NetAddr::CIDR.create(other_vpc.cidr)
310
+ if our_cidr.contains? other_cidr[0] or our_cidr.contains? other_cidr.last
311
+ raise "VPCs to be peered have overlapping CIDRs"
312
+ end
313
+
314
+ peering_connection = resource :aws_vpc_peering_connection, "#{@name}-to-#{other_vpc_ident}", {
315
+ peer_vpc_id: other_vpc.id,
316
+ vpc_id: @id,
317
+ auto_accept: true,
318
+ tags: { Name: "#{@name} to #{other_vpc.name}" }.merge(@tags),
319
+ }
320
+
321
+ if options[:complete]
322
+ our_route_tables = @subnets.values.flatten.map(&:route_table).sort.uniq
323
+ their_route_tables = other_vpc.subnets.values.flatten.map(&:route_table).sort.uniq
324
+
325
+ (our_route_tables.product([other_vpc.cidr]) + their_route_tables.product([@cidr])).each { |route_table, cidr|
326
+ hash = Digest::SHA2.hexdigest "#{route_table}-#{tf_safe(cidr)}"
327
+
328
+ resource :aws_route, "#{@name}-#{other_vpc_ident}-peer-#{hash}", {
329
+ route_table_id: route_table,
330
+ destination_cidr_block: cidr,
331
+ vpc_peering_connection_id: peering_connection,
332
+ }
333
+ }
334
+ else
335
+ options[:peerings].each.with_index { |peering, i|
336
+ route_tables = peering[:from].map(&:route_table).sort.uniq
337
+ cidrs = peering[:to].map(&:cidr).sort.uniq
338
+
339
+ route_tables.product(cidrs).each { |route_table, cidr|
340
+
341
+ hash = Digest::SHA2.hexdigest "#{route_table}-#{tf_safe(cidr)}"
342
+
343
+ resource :aws_route, "#{@name}-#{other_vpc_ident}-peer-#{hash}", {
344
+ route_table_id: route_table,
345
+ destination_cidr_block: cidr,
346
+ vpc_peering_connection_id: peering_connection,
347
+ }
348
+ }
349
+ }
350
+ end
351
+ end
352
+
353
+ def extract_subnet!(bit_size)
354
+ if bit_size > 28 # aws can't have smaller
355
+ bit_size = 28
356
+ end
357
+
358
+ targets = @remaining_ip_space.find_space({ Subnet: bit_size })
359
+
360
+ if targets.count == 0
361
+ raise "Run out of ip space to allocate a /#{bit_size}"
362
+ end
363
+
364
+ target = targets[0]
365
+
366
+ @remaining_ip_space.delete!(target)
367
+
368
+ if target.bits == bit_size
369
+ new_subnet = target
370
+ else
371
+ new_subnet = target.subnet({ Bits: bit_size, Objectify: true })[0]
372
+
373
+ target.remainder(new_subnet).each { |rem|
374
+ @remaining_ip_space.add!(rem)
375
+ }
376
+ end
377
+
378
+ return new_subnet.to_s
379
+ end
380
+
381
+ def allocate_subnets!(name, options = {})
382
+ options = {
383
+ public: false,
384
+ bit_size: @subnet_size,
385
+ internet: true,
386
+ tags: {},
387
+ }.merge(options)
388
+
389
+ if options[:public]
390
+ gateways = [@internet_gateway] * @azs.count
391
+ elsif options[:internet] && @nat_gateways != nil
392
+ gateways = @nat_gateways
393
+ else
394
+ gateways = [nil] * @azs.count
395
+ end
396
+
397
+ @subnets[name] = @azs.zip(gateways).map { |az, gateway|
398
+ subnet_options = { tags: { subnet_name: name }.merge(options[:tags]).merge(@tags) }
399
+ if gateway != nil
400
+ if options[:public]
401
+ subnet_options[:gateway] = gateway
402
+ elsif options[:internet]
403
+ subnet_options[:nat_gateway] = gateway
404
+ end
405
+ end
406
+
407
+ add! Terrafying::Components::Subnet.create_in(
408
+ self, name, az, extract_subnet!(options[:bit_size]), subnet_options
409
+ )
410
+ }
411
+ end
412
+
413
+ end
414
+
415
+ end
416
+
417
+ end