cloud-mu 3.1.4 → 3.1.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/ansible/roles/mu-windows/README.md +33 -0
- data/ansible/roles/mu-windows/defaults/main.yml +2 -0
- data/ansible/roles/mu-windows/handlers/main.yml +2 -0
- data/ansible/roles/mu-windows/meta/main.yml +53 -0
- data/ansible/roles/mu-windows/tasks/main.yml +20 -0
- data/ansible/roles/mu-windows/tests/inventory +2 -0
- data/ansible/roles/mu-windows/tests/test.yml +5 -0
- data/ansible/roles/mu-windows/vars/main.yml +2 -0
- data/cloud-mu.gemspec +4 -2
- data/cookbooks/mu-tools/recipes/selinux.rb +2 -1
- data/cookbooks/mu-tools/recipes/windows-client.rb +140 -144
- data/cookbooks/mu-tools/resources/windows_users.rb +44 -43
- data/extras/image-generators/AWS/win2k12.yaml +16 -13
- data/extras/image-generators/AWS/win2k16.yaml +16 -13
- data/extras/image-generators/AWS/win2k19.yaml +19 -0
- data/modules/mu.rb +72 -9
- data/modules/mu/adoption.rb +14 -2
- data/modules/mu/cloud.rb +111 -10
- data/modules/mu/clouds/aws.rb +23 -7
- data/modules/mu/clouds/aws/container_cluster.rb +640 -692
- data/modules/mu/clouds/aws/dnszone.rb +49 -45
- data/modules/mu/clouds/aws/firewall_rule.rb +177 -214
- data/modules/mu/clouds/aws/role.rb +17 -8
- data/modules/mu/clouds/aws/search_domain.rb +1 -1
- data/modules/mu/clouds/aws/server.rb +734 -1027
- data/modules/mu/clouds/aws/userdata/windows.erb +2 -1
- data/modules/mu/clouds/aws/vpc.rb +297 -786
- data/modules/mu/clouds/aws/vpc_subnet.rb +286 -0
- data/modules/mu/clouds/google/bucket.rb +1 -1
- data/modules/mu/clouds/google/container_cluster.rb +21 -17
- data/modules/mu/clouds/google/function.rb +8 -2
- data/modules/mu/clouds/google/server.rb +102 -32
- data/modules/mu/clouds/google/vpc.rb +1 -1
- data/modules/mu/config.rb +12 -1
- data/modules/mu/config/server.yml +1 -0
- data/modules/mu/defaults/AWS.yaml +51 -28
- data/modules/mu/groomers/ansible.rb +54 -17
- data/modules/mu/groomers/chef.rb +13 -7
- data/modules/mu/master/ssl.rb +0 -1
- data/modules/mu/mommacat.rb +8 -0
- data/modules/tests/ecs.yaml +23 -0
- data/modules/tests/includes-and-params.yaml +2 -1
- data/modules/tests/server-with-scrub-muisms.yaml +1 -0
- data/modules/tests/win2k12.yaml +25 -0
- data/modules/tests/win2k16.yaml +25 -0
- data/modules/tests/win2k19.yaml +25 -0
- data/requirements.txt +1 -0
- metadata +50 -4
- data/extras/image-generators/AWS/windows.yaml +0 -18
- data/modules/tests/needwork/win2k12.yaml +0 -13
data/modules/mu/clouds/aws.rb
CHANGED
@@ -182,18 +182,34 @@ end
|
|
182
182
|
end
|
183
183
|
end
|
184
184
|
|
185
|
-
# Tag
|
185
|
+
# Tag an EC2 resource
|
186
186
|
#
|
187
187
|
# @param resource [String]: The cloud provider identifier of the resource to tag
|
188
188
|
# @param region [String]: The cloud provider region
|
189
|
+
# @param credentials [String]: Credentials to authorize API requests
|
190
|
+
# @param optional [Boolean]: Whether to apply our optional generic tags
|
191
|
+
# @param nametag [String]: A +Name+ tag to apply
|
192
|
+
# @param othertags [Array<Hash>]: Miscellaneous custom tags, in Basket of Kittens style
|
189
193
|
# @return [void]
|
190
|
-
def self.createStandardTags(resource = nil, region: MU.curRegion, credentials: nil)
|
194
|
+
def self.createStandardTags(resource = nil, region: MU.curRegion, credentials: nil, optional: true, nametag: nil, othertags: nil)
|
191
195
|
tags = []
|
192
196
|
MU::MommaCat.listStandardTags.each_pair { |name, value|
|
193
|
-
if !value.nil?
|
194
|
-
tags << {key: name, value: value}
|
195
|
-
end
|
197
|
+
tags << {key: name, value: value} if !value.nil?
|
196
198
|
}
|
199
|
+
if optional
|
200
|
+
MU::MommaCat.listOptionalTags.each { |key, value|
|
201
|
+
tags << {key: name, value: value} if !value.nil?
|
202
|
+
}
|
203
|
+
end
|
204
|
+
if nametag
|
205
|
+
tags << { key: "Name", value: nametag }
|
206
|
+
end
|
207
|
+
if othertags
|
208
|
+
othertags.each { |tag|
|
209
|
+
tags << { key: tag['key'], value: tag['value'] }
|
210
|
+
}
|
211
|
+
end
|
212
|
+
|
197
213
|
if MU::Cloud::CloudFormation.emitCloudFormation
|
198
214
|
return tags
|
199
215
|
end
|
@@ -215,6 +231,7 @@ end
|
|
215
231
|
end
|
216
232
|
end
|
217
233
|
MU.log "Created standard tags for resource #{resource}", MU::DEBUG, details: caller
|
234
|
+
|
218
235
|
end
|
219
236
|
|
220
237
|
@@myVPCObj = nil
|
@@ -406,7 +423,7 @@ end
|
|
406
423
|
end
|
407
424
|
|
408
425
|
begin
|
409
|
-
Timeout.timeout(
|
426
|
+
Timeout.timeout(4) do
|
410
427
|
instance_id = open("http://169.254.169.254/latest/meta-data/instance-id").read
|
411
428
|
if !instance_id.nil? and instance_id.size > 0
|
412
429
|
@@is_in_aws = true
|
@@ -1150,7 +1167,6 @@ end
|
|
1150
1167
|
|
1151
1168
|
# Tag a resource. Defaults to applying our MU deployment identifier, if no
|
1152
1169
|
# arguments other than the resource identifier are given.
|
1153
|
-
# XXX this belongs in the cloud layer(s)
|
1154
1170
|
#
|
1155
1171
|
# @param resource [String]: The cloud provider identifier of the resource to tag
|
1156
1172
|
# @param tag_name [String]: The name of the tag to create
|
@@ -39,123 +39,82 @@ module MU
|
|
39
39
|
def create
|
40
40
|
if @config['flavor'] == "EKS" or
|
41
41
|
(@config['flavor'] == "Fargate" and !@config['containers'])
|
42
|
-
subnet_ids = []
|
43
|
-
@config["vpc"]["subnets"].each { |subnet|
|
44
|
-
subnet_obj = @vpc.getSubnet(cloud_id: subnet["subnet_id"].to_s, name: subnet["subnet_name"].to_s)
|
45
|
-
raise MuError, "Couldn't find a live subnet matching #{subnet} in #{@vpc} (#{@vpc.subnets})" if subnet_obj.nil?
|
46
|
-
subnet_ids << subnet_obj.cloud_id
|
47
|
-
}
|
48
42
|
|
49
|
-
|
43
|
+
subnet_ids = mySubnets.map { |s| s.cloud_id }
|
50
44
|
|
51
|
-
|
52
|
-
|
53
|
-
@
|
54
|
-
|
45
|
+
params = {
|
46
|
+
:name => @mu_name,
|
47
|
+
:version => @config['kubernetes']['version'],
|
48
|
+
:role_arn => @deploy.findLitterMate(name: @config['name']+"controlplane", type: "roles").arn,
|
49
|
+
:resources_vpc_config => {
|
50
|
+
:security_group_ids => myFirewallRules.map { |fw| fw.cloud_id },
|
51
|
+
:subnet_ids => subnet_ids
|
55
52
|
}
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
:security_group_ids => security_groups,
|
66
|
-
:subnet_ids => subnet_ids
|
67
|
-
}
|
53
|
+
}
|
54
|
+
if @config['logging'] and @config['logging'].size > 0
|
55
|
+
params[:logging] = {
|
56
|
+
:cluster_logging => [
|
57
|
+
{
|
58
|
+
:types => @config['logging'],
|
59
|
+
:enabled => true
|
60
|
+
}
|
61
|
+
]
|
68
62
|
}
|
69
|
-
|
70
|
-
|
71
|
-
:cluster_logging => [
|
72
|
-
{
|
73
|
-
:types => @config['logging'],
|
74
|
-
:enabled => true
|
75
|
-
}
|
76
|
-
]
|
77
|
-
}
|
78
|
-
end
|
79
|
-
params.delete(:version) if params[:version] == "latest"
|
63
|
+
end
|
64
|
+
params.delete(:version) if params[:version] == "latest"
|
80
65
|
|
81
|
-
|
82
|
-
|
83
|
-
rescue Aws::EKS::Errors::UnsupportedAvailabilityZoneException => e
|
84
|
-
# this isn't the dumbest thing we've ever done, but it's up there
|
66
|
+
on_retry = Proc.new { |e|
|
67
|
+
# soul-crushing, yet effective
|
85
68
|
if e.message.match(/because (#{Regexp.quote(@config['region'])}[a-z]), the targeted availability zone, does not currently have sufficient capacity/)
|
86
69
|
bad_az = Regexp.last_match(1)
|
87
70
|
deletia = nil
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
deletia = subnet
|
71
|
+
mySubnets.each { |subnet|
|
72
|
+
if subnet.az == bad_az
|
73
|
+
deletia = subnet.cloud_id
|
92
74
|
break
|
93
75
|
end
|
94
76
|
}
|
95
77
|
raise e if deletia.nil?
|
96
78
|
MU.log "#{bad_az} does not have EKS capacity. Dropping #{deletia} from ContainerCluster '#{@config['name']}' and retrying.", MU::NOTICE
|
97
|
-
subnet_ids.delete(deletia)
|
98
|
-
retry
|
79
|
+
params[:resources_vpc_config][:subnet_ids].delete(deletia)
|
99
80
|
end
|
100
|
-
|
101
|
-
if e.message.match(/role with arn: #{Regexp.quote(role_arn)}.*?(could not be assumed|does not exist)/i)
|
102
|
-
sleep 5
|
103
|
-
retry
|
104
|
-
else
|
105
|
-
MU.log e.message, MU::WARN, details: params
|
106
|
-
sleep 5
|
107
|
-
retry
|
108
|
-
end
|
109
|
-
end
|
81
|
+
}
|
110
82
|
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
83
|
+
MU.retrier([Aws::EKS::Errors::UnsupportedAvailabilityZoneException, Aws::EKS::Errors::InvalidParameterException], on_retry: on_retry, max: subnet_ids.size) {
|
84
|
+
MU.log "Creating EKS cluster #{@mu_name}", details: params
|
85
|
+
MU::Cloud::AWS.eks(region: @config['region'], credentials: @config['credentials']).create_cluster(params)
|
86
|
+
}
|
87
|
+
@cloud_id = @mu_name
|
88
|
+
|
89
|
+
loop_if = Proc.new {
|
90
|
+
cloud_desc(use_cache: false).status != "ACTIVE"
|
91
|
+
}
|
92
|
+
|
93
|
+
MU.retrier(ignoreme: [Aws::EKS::Errors::ResourceNotFoundException], wait: 30, max: 60, loop_if: loop_if) { |retries, _wait|
|
94
|
+
if cloud_desc.status == "FAILED"
|
119
95
|
raise MuError, "EKS cluster #{@mu_name} had FAILED status"
|
120
96
|
end
|
121
|
-
if retries > 0 and (retries % 3) == 0 and status != "ACTIVE"
|
122
|
-
MU.log "Waiting for EKS cluster #{@mu_name} to become active (currently #{status})", MU::NOTICE
|
97
|
+
if retries > 0 and (retries % 3) == 0 and cloud_desc.status != "ACTIVE"
|
98
|
+
MU.log "Waiting for EKS cluster #{@mu_name} to become active (currently #{cloud_desc.status})", MU::NOTICE
|
123
99
|
end
|
124
|
-
|
125
|
-
retries += 1
|
126
|
-
rescue Aws::EKS::Errors::ResourceNotFoundException => e
|
127
|
-
if retries < 30
|
128
|
-
if retries > 0 and (retries % 3) == 0
|
129
|
-
MU.log "Got #{e.message} trying to describe EKS cluster #{@mu_name}, waiting and retrying", MU::WARN, details: resp
|
130
|
-
end
|
131
|
-
sleep 30
|
132
|
-
retries += 1
|
133
|
-
retry
|
134
|
-
else
|
135
|
-
raise e
|
136
|
-
end
|
137
|
-
end while status != "ACTIVE"
|
100
|
+
}
|
138
101
|
|
139
102
|
MU.log "Creation of EKS cluster #{@mu_name} complete"
|
140
103
|
else
|
141
104
|
MU::Cloud::AWS.ecs(region: @config['region'], credentials: @config['credentials']).create_cluster(
|
142
105
|
cluster_name: @mu_name
|
143
106
|
)
|
144
|
-
|
107
|
+
@cloud_id = @mu_name
|
145
108
|
end
|
146
|
-
@cloud_id = @mu_name
|
147
109
|
end
|
148
110
|
|
149
111
|
# Called automatically by {MU::Deploy#createResources}
|
150
112
|
def groom
|
151
113
|
|
152
|
-
|
153
|
-
@deploy.findLitterMate(type: "server_pools", name: @config["name"]+"workers")
|
154
|
-
end
|
155
|
-
resource_lookup = MU::Cloud::AWS.listInstanceTypes(@config['region'])[@config['region']]
|
156
|
-
|
114
|
+
# EKS or Fargate-EKS: do Kubernetes things
|
157
115
|
if @config['flavor'] == "EKS" or
|
158
116
|
(@config['flavor'] == "Fargate" and !@config['containers'])
|
117
|
+
|
159
118
|
# This will be needed if a loadbalancer has never been created in
|
160
119
|
# this account; EKS applications might want one, but will fail in
|
161
120
|
# confusing ways if this hasn't been done.
|
@@ -166,239 +125,17 @@ module MU
|
|
166
125
|
rescue ::Aws::IAM::Errors::InvalidInput
|
167
126
|
end
|
168
127
|
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
tagme_elb = []
|
173
|
-
@vpc.subnets.each { |s|
|
174
|
-
tagme << s.cloud_id
|
175
|
-
tagme_elb << s.cloud_id if !s.private?
|
176
|
-
}
|
177
|
-
rtbs = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_route_tables(
|
178
|
-
filters: [ { name: "vpc-id", values: [@vpc.cloud_id] } ]
|
179
|
-
).route_tables
|
180
|
-
tagme.concat(rtbs.map { |r| r.route_table_id } )
|
181
|
-
main_sg = @deploy.findLitterMate(type: "firewall_rules", name: "server_pool#{@config['name']}workers")
|
182
|
-
tagme << main_sg.cloud_id if main_sg
|
183
|
-
MU.log "Applying kubernetes.io tags to VPC resources", details: tagme
|
184
|
-
MU::Cloud::AWS.createTag(tagme, "kubernetes.io/cluster/#{@mu_name}", "shared", credentials: @config['credentials'])
|
185
|
-
MU::Cloud::AWS.createTag(tagme_elb, "kubernetes.io/cluster/elb", @mu_name, credentials: @config['credentials'])
|
128
|
+
apply_kubernetes_tags
|
129
|
+
create_fargate_kubernetes_profile if @config['flavor'] == "Fargate"
|
130
|
+
apply_kubernetes_resources
|
186
131
|
|
187
|
-
if @config['flavor'] == "Fargate"
|
188
|
-
fargate_subnets = []
|
189
|
-
@config["vpc"]["subnets"].each { |subnet|
|
190
|
-
subnet_obj = @vpc.getSubnet(cloud_id: subnet["subnet_id"].to_s, name: subnet["subnet_name"].to_s)
|
191
|
-
raise MuError, "Couldn't find a live subnet matching #{subnet} in #{@vpc} (#{@vpc.subnets})" if subnet_obj.nil?
|
192
|
-
next if !subnet_obj.private?
|
193
|
-
fargate_subnets << subnet_obj.cloud_id
|
194
|
-
}
|
195
|
-
podrole_arn = @deploy.findLitterMate(name: @config['name']+"pods", type: "roles").arn
|
196
|
-
poolnum = 0
|
197
|
-
poolthreads =[]
|
198
|
-
@config['kubernetes_pools'].each { |selectors|
|
199
|
-
profname = @mu_name+"-"+poolnum.to_s
|
200
|
-
poolnum += 1
|
201
|
-
desc = {
|
202
|
-
:fargate_profile_name => profname,
|
203
|
-
:cluster_name => @mu_name,
|
204
|
-
:pod_execution_role_arn => podrole_arn,
|
205
|
-
:selectors => selectors,
|
206
|
-
:subnets => fargate_subnets.sort,
|
207
|
-
:tags => @tags
|
208
|
-
}
|
209
|
-
begin
|
210
|
-
resp = MU::Cloud::AWS.eks(region: @config['region'], credentials: @config['credentials']).describe_fargate_profile(
|
211
|
-
cluster_name: @mu_name,
|
212
|
-
fargate_profile_name: profname
|
213
|
-
)
|
214
|
-
if resp and resp.fargate_profile
|
215
|
-
old_desc = MU.structToHash(resp.fargate_profile, stringify_keys: true)
|
216
|
-
new_desc = MU.structToHash(desc, stringify_keys: true)
|
217
|
-
["created_at", "status", "fargate_profile_arn"].each { |k|
|
218
|
-
old_desc.delete(k)
|
219
|
-
}
|
220
|
-
old_desc["subnets"].sort!
|
221
|
-
if !old_desc.eql?(new_desc)
|
222
|
-
MU.log "Deleting Fargate profile #{profname} in order to apply changes", MU::WARN, details: desc
|
223
|
-
MU::Cloud::AWS::ContainerCluster.purge_fargate_profile(profname, @mu_name, @config['region'], @credentials)
|
224
|
-
else
|
225
|
-
next
|
226
|
-
end
|
227
|
-
end
|
228
|
-
rescue Aws::EKS::Errors::ResourceNotFoundException
|
229
|
-
# This is just fine!
|
230
|
-
end
|
231
|
-
MU.log "Creating EKS Fargate profile #{profname}", details: desc
|
232
|
-
resp = MU::Cloud::AWS.eks(region: @config['region'], credentials: @config['credentials']).create_fargate_profile(desc)
|
233
|
-
begin
|
234
|
-
resp = MU::Cloud::AWS.eks(region: @config['region'], credentials: @config['credentials']).describe_fargate_profile(
|
235
|
-
cluster_name: @mu_name,
|
236
|
-
fargate_profile_name: profname
|
237
|
-
)
|
238
|
-
sleep 1 if resp.fargate_profile.status == "CREATING"
|
239
|
-
end while resp.fargate_profile.status == "CREATING"
|
240
|
-
MU.log "Creation of EKS Fargate profile #{profname} complete"
|
241
|
-
}
|
242
|
-
end
|
243
|
-
|
244
|
-
me = cloud_desc
|
245
|
-
@endpoint = me.endpoint
|
246
|
-
@cacert = me.certificate_authority.data
|
247
|
-
@cluster = @mu_name
|
248
|
-
if @config['flavor'] != "Fargate"
|
249
|
-
resp = MU::Cloud::AWS.iam(credentials: @config['credentials']).get_role(role_name: @mu_name+"WORKERS")
|
250
|
-
@worker_role_arn = resp.role.arn
|
251
|
-
end
|
252
|
-
kube_conf = @deploy.deploy_dir+"/kubeconfig-#{@config['name']}"
|
253
|
-
gitlab_helper = @deploy.deploy_dir+"/gitlab-eks-helper-#{@config['name']}.sh"
|
254
|
-
|
255
|
-
File.open(kube_conf, "w"){ |k|
|
256
|
-
k.puts kube.result(binding)
|
257
|
-
}
|
258
|
-
gitlab = ERB.new(File.read(MU.myRoot+"/extras/gitlab-eks-helper.sh.erb"))
|
259
|
-
File.open(gitlab_helper, "w"){ |k|
|
260
|
-
k.puts gitlab.result(binding)
|
261
|
-
}
|
262
|
-
|
263
|
-
if @config['flavor'] != "Fargate"
|
264
|
-
eks_auth = @deploy.deploy_dir+"/eks-auth-cm-#{@config['name']}.yaml"
|
265
|
-
File.open(eks_auth, "w"){ |k|
|
266
|
-
k.puts configmap.result(binding)
|
267
|
-
}
|
268
|
-
authmap_cmd = %Q{#{MU::Master.kubectl} --kubeconfig "#{kube_conf}" apply -f "#{eks_auth}"}
|
269
|
-
MU.log "Configuring Kubernetes <=> IAM mapping for worker nodes", MU::NOTICE, details: authmap_cmd
|
270
|
-
# maybe guard this mess
|
271
|
-
retries = 0
|
272
|
-
begin
|
273
|
-
puts %x{#{authmap_cmd}}
|
274
|
-
if $?.exitstatus != 0
|
275
|
-
if retries >= 10
|
276
|
-
raise MuError, "Failed to apply #{authmap_cmd}"
|
277
|
-
end
|
278
|
-
sleep 10
|
279
|
-
retries += 1
|
280
|
-
end
|
281
|
-
end while $?.exitstatus != 0
|
282
|
-
|
283
|
-
end
|
284
|
-
|
285
|
-
# and this one
|
286
|
-
admin_user_cmd = %Q{#{MU::Master.kubectl} --kubeconfig "#{kube_conf}" apply -f "#{MU.myRoot}/extras/admin-user.yaml"}
|
287
|
-
admin_role_cmd = %Q{#{MU::Master.kubectl} --kubeconfig "#{kube_conf}" apply -f "#{MU.myRoot}/extras/admin-role-binding.yaml"}
|
288
|
-
MU.log "Configuring Kubernetes admin-user and role", MU::NOTICE, details: admin_user_cmd+"\n"+admin_role_cmd
|
289
|
-
%x{#{admin_user_cmd}}
|
290
|
-
%x{#{admin_role_cmd}}
|
291
|
-
|
292
|
-
if @config['kubernetes_resources']
|
293
|
-
MU::Master.applyKubernetesResources(
|
294
|
-
@config['name'],
|
295
|
-
@config['kubernetes_resources'],
|
296
|
-
kubeconfig: kube_conf,
|
297
|
-
outputdir: @deploy.deploy_dir
|
298
|
-
)
|
299
|
-
end
|
300
|
-
|
301
|
-
MU.log %Q{How to interact with your EKS cluster\nkubectl --kubeconfig "#{kube_conf}" get all\nkubectl --kubeconfig "#{kube_conf}" create -f some_k8s_deploy.yml\nkubectl --kubeconfig "#{kube_conf}" get nodes}, MU::SUMMARY
|
302
132
|
elsif @config['flavor'] != "Fargate"
|
303
|
-
|
304
|
-
cluster: @mu_name
|
305
|
-
})
|
306
|
-
existing = {}
|
307
|
-
if resp
|
308
|
-
uuids = []
|
309
|
-
resp.container_instance_arns.each { |arn|
|
310
|
-
uuids << arn.sub(/^.*?:container-instance\//, "")
|
311
|
-
}
|
312
|
-
if uuids.size > 0
|
313
|
-
resp = MU::Cloud::AWS.ecs(region: @config['region'], credentials: @config['credentials']).describe_container_instances({
|
314
|
-
cluster: @mu_name,
|
315
|
-
container_instances: uuids
|
316
|
-
})
|
317
|
-
resp.container_instances.each { |i|
|
318
|
-
existing[i.ec2_instance_id] = i
|
319
|
-
}
|
320
|
-
end
|
321
|
-
end
|
322
|
-
|
323
|
-
threads = []
|
324
|
-
serverpool.listNodes.each { |mynode|
|
325
|
-
resources = resource_lookup[node.cloud_desc.instance_type]
|
326
|
-
threads << Thread.new(mynode) { |node|
|
327
|
-
ident_doc = nil
|
328
|
-
ident_doc_sig = nil
|
329
|
-
if !node.windows?
|
330
|
-
session = node.getSSHSession(10, 30)
|
331
|
-
ident_doc = session.exec!("curl -s http://169.254.169.254/latest/dynamic/instance-identity/document/")
|
332
|
-
ident_doc_sig = session.exec!("curl -s http://169.254.169.254/latest/dynamic/instance-identity/signature/")
|
333
|
-
# else
|
334
|
-
# begin
|
335
|
-
# session = node.getWinRMSession(1, 60)
|
336
|
-
# rescue StandardError # XXX
|
337
|
-
# session = node.getSSHSession(1, 60)
|
338
|
-
# end
|
339
|
-
end
|
340
|
-
MU.log "Identity document for #{node}", MU::DEBUG, details: ident_doc
|
341
|
-
MU.log "Identity document signature for #{node}", MU::DEBUG, details: ident_doc_sig
|
342
|
-
params = {
|
343
|
-
:cluster => @mu_name,
|
344
|
-
:instance_identity_document => ident_doc,
|
345
|
-
:instance_identity_document_signature => ident_doc_sig,
|
346
|
-
:total_resources => [
|
347
|
-
{
|
348
|
-
:name => "CPU",
|
349
|
-
:type => "INTEGER",
|
350
|
-
:integer_value => resources["vcpu"].to_i
|
351
|
-
},
|
352
|
-
{
|
353
|
-
:name => "MEMORY",
|
354
|
-
:type => "INTEGER",
|
355
|
-
:integer_value => (resources["memory"]*1024*1024).to_i
|
356
|
-
}
|
357
|
-
]
|
358
|
-
}
|
359
|
-
if !existing.has_key?(node.cloud_id)
|
360
|
-
MU.log "Registering ECS instance #{node} in cluster #{@mu_name}", details: params
|
361
|
-
else
|
362
|
-
params[:container_instance_arn] = existing[node.cloud_id].container_instance_arn
|
363
|
-
MU.log "Updating ECS instance #{node} in cluster #{@mu_name}", MU::NOTICE, details: params
|
364
|
-
end
|
365
|
-
MU::Cloud::AWS.ecs(region: @config['region'], credentials: @config['credentials']).register_container_instance(params)
|
366
|
-
|
367
|
-
}
|
368
|
-
}
|
369
|
-
threads.each { |t|
|
370
|
-
t.join
|
371
|
-
}
|
133
|
+
manage_ecs_workers
|
372
134
|
end
|
373
135
|
|
136
|
+
# ECS: manage containers/services/tasks
|
374
137
|
if @config['flavor'] != "EKS" and @config['containers']
|
375
138
|
|
376
|
-
security_groups = []
|
377
|
-
if @dependencies.has_key?("firewall_rule")
|
378
|
-
@dependencies['firewall_rule'].values.each { |sg|
|
379
|
-
security_groups << sg.cloud_id
|
380
|
-
}
|
381
|
-
end
|
382
|
-
|
383
|
-
tasks_registered = 0
|
384
|
-
retries = 0
|
385
|
-
svc_resp = begin
|
386
|
-
MU::Cloud::AWS.ecs(region: @config['region'], credentials: @config['credentials']).list_services(
|
387
|
-
cluster: arn
|
388
|
-
)
|
389
|
-
rescue Aws::ECS::Errors::ClusterNotFoundException => e
|
390
|
-
if retries < 10
|
391
|
-
sleep 5
|
392
|
-
retries += 1
|
393
|
-
retry
|
394
|
-
else
|
395
|
-
raise e
|
396
|
-
end
|
397
|
-
end
|
398
|
-
existing_svcs = svc_resp.service_arns.map { |s|
|
399
|
-
s.gsub(/.*?:service\/(.*)/, '\1')
|
400
|
-
}
|
401
|
-
|
402
139
|
# Reorganize things so that we have services and task definitions
|
403
140
|
# mapped to the set of containers they must contain
|
404
141
|
tasks = {}
|
@@ -409,238 +146,35 @@ module MU
|
|
409
146
|
tasks[service_name] << c
|
410
147
|
}
|
411
148
|
|
149
|
+
existing_svcs = list_ecs_services
|
150
|
+
|
412
151
|
tasks.each_pair { |service_name, containers|
|
413
|
-
launch_type = @config['flavor'] == "ECS" ? "EC2" : "FARGATE"
|
414
|
-
cpu_total = 0
|
415
|
-
mem_total = 0
|
416
152
|
role_arn = nil
|
417
|
-
lbs = []
|
418
153
|
|
419
|
-
container_definitions = containers
|
420
|
-
|
154
|
+
container_definitions, role, lbs = get_ecs_container_definitions(containers)
|
155
|
+
role_arn ||= role
|
156
|
+
|
157
|
+
cpu_total = mem_total = 0
|
158
|
+
containers.each { |c|
|
421
159
|
cpu_total += c['cpu']
|
422
160
|
mem_total += c['memory']
|
423
|
-
|
424
|
-
if c["role"] and !role_arn
|
425
|
-
found = MU::MommaCat.findStray(
|
426
|
-
@config['cloud'],
|
427
|
-
"role",
|
428
|
-
cloud_id: c["role"]["id"],
|
429
|
-
name: c["role"]["name"],
|
430
|
-
deploy_id: c["role"]["deploy_id"] || @deploy.deploy_id,
|
431
|
-
dummy_ok: false
|
432
|
-
)
|
433
|
-
if found
|
434
|
-
found = found.first
|
435
|
-
if found and found.cloudobj
|
436
|
-
role_arn = found.cloudobj.arn
|
437
|
-
end
|
438
|
-
else
|
439
|
-
raise MuError, "Unable to find execution role from #{c["role"]}"
|
440
|
-
end
|
441
|
-
end
|
442
|
-
|
443
|
-
if c['loadbalancers'] != []
|
444
|
-
c['loadbalancers'].each {|lb|
|
445
|
-
found = @deploy.findLitterMate(name: lb['name'], type: "loadbalancer")
|
446
|
-
if found
|
447
|
-
MU.log "Mapping LB #{found.mu_name} to service #{c['name']}", MU::INFO
|
448
|
-
if found.cloud_desc.type != "classic"
|
449
|
-
elb_groups = MU::Cloud::AWS.elb2(region: @config['region'], credentials: @config['credentials']).describe_target_groups({
|
450
|
-
load_balancer_arn: found.cloud_desc.load_balancer_arn
|
451
|
-
})
|
452
|
-
matching_target_groups = []
|
453
|
-
elb_groups.target_groups.each { |tg|
|
454
|
-
if tg.port.to_i == lb['container_port'].to_i
|
455
|
-
matching_target_groups << {
|
456
|
-
arn: tg['target_group_arn'],
|
457
|
-
name: tg['target_group_name']
|
458
|
-
}
|
459
|
-
end
|
460
|
-
}
|
461
|
-
if matching_target_groups.length >= 1
|
462
|
-
MU.log "#{matching_target_groups.length} matching target groups found. Mapping #{container_name} to target group #{matching_target_groups.first['name']}", MU::INFO
|
463
|
-
lbs << {
|
464
|
-
container_name: container_name,
|
465
|
-
container_port: lb['container_port'],
|
466
|
-
target_group_arn: matching_target_groups.first[:arn]
|
467
|
-
}
|
468
|
-
else
|
469
|
-
raise MuError, "No matching target groups found"
|
470
|
-
end
|
471
|
-
elsif @config['flavor'] == "Fargate" && found.cloud_desc.type == "classic"
|
472
|
-
raise MuError, "Classic Load Balancers are not supported with Fargate."
|
473
|
-
else
|
474
|
-
MU.log "Mapping Classic LB #{found.mu_name} to service #{container_name}", MU::INFO
|
475
|
-
lbs << {
|
476
|
-
container_name: container_name,
|
477
|
-
container_port: lb['container_port'],
|
478
|
-
load_balancer_name: found.mu_name
|
479
|
-
}
|
480
|
-
end
|
481
|
-
else
|
482
|
-
raise MuError, "Unable to find loadbalancers from #{c["loadbalancers"].first['name']}"
|
483
|
-
end
|
484
|
-
}
|
485
|
-
end
|
486
|
-
|
487
|
-
params = {
|
488
|
-
name: @mu_name+"-"+c['name'].upcase,
|
489
|
-
image: c['image'],
|
490
|
-
memory: c['memory'],
|
491
|
-
cpu: c['cpu']
|
492
|
-
}
|
493
|
-
if !@config['vpc']
|
494
|
-
c['hostname'] ||= @mu_name+"-"+c['name'].upcase
|
495
|
-
end
|
496
|
-
[:essential, :hostname, :start_timeout, :stop_timeout, :user, :working_directory, :disable_networking, :privileged, :readonly_root_filesystem, :interactive, :pseudo_terminal, :links, :entry_point, :command, :dns_servers, :dns_search_domains, :docker_security_options, :port_mappings, :repository_credentials, :mount_points, :environment, :volumes_from, :secrets, :depends_on, :extra_hosts, :docker_labels, :ulimits, :system_controls, :health_check, :resource_requirements].each { |param|
|
497
|
-
if c.has_key?(param.to_s)
|
498
|
-
params[param] = if !c[param.to_s].nil? and (c[param.to_s].is_a?(Hash) or c[param.to_s].is_a?(Array))
|
499
|
-
MU.strToSym(c[param.to_s])
|
500
|
-
else
|
501
|
-
c[param.to_s]
|
502
|
-
end
|
503
|
-
end
|
504
|
-
}
|
505
|
-
if @config['vpc']
|
506
|
-
[:hostname, :dns_servers, :dns_search_domains, :links].each { |param|
|
507
|
-
if params[param]
|
508
|
-
MU.log "Container parameter #{param.to_s} not supported in VPC clusters, ignoring", MU::WARN
|
509
|
-
params.delete(param)
|
510
|
-
end
|
511
|
-
}
|
512
|
-
end
|
513
|
-
if @config['flavor'] == "Fargate"
|
514
|
-
[:privileged, :docker_security_options].each { |param|
|
515
|
-
if params[param]
|
516
|
-
MU.log "Container parameter #{param.to_s} not supported in Fargate clusters, ignoring", MU::WARN
|
517
|
-
params.delete(param)
|
518
|
-
end
|
519
|
-
}
|
520
|
-
end
|
521
|
-
if c['log_configuration']
|
522
|
-
log_obj = @deploy.findLitterMate(name: c['log_configuration']['options']['awslogs-group'], type: "logs")
|
523
|
-
if log_obj
|
524
|
-
c['log_configuration']['options']['awslogs-group'] = log_obj.mu_name
|
525
|
-
end
|
526
|
-
params[:log_configuration] = MU.strToSym(c['log_configuration'])
|
527
|
-
end
|
528
|
-
params
|
529
161
|
}
|
530
|
-
|
531
162
|
cpu_total = 2 if cpu_total == 0
|
532
163
|
mem_total = 2 if mem_total == 0
|
533
164
|
|
534
|
-
|
535
|
-
family: @deploy.deploy_id,
|
536
|
-
container_definitions: container_definitions,
|
537
|
-
requires_compatibilities: [launch_type]
|
538
|
-
}
|
539
|
-
|
540
|
-
if @config['volumes']
|
541
|
-
task_params[:volumes] = []
|
542
|
-
@config['volumes'].each { |v|
|
543
|
-
vol = { :name => v['name'] }
|
544
|
-
if v['type'] == "host"
|
545
|
-
vol[:host] = {}
|
546
|
-
if v['host_volume_source_path']
|
547
|
-
vol[:host][:source_path] = v['host_volume_source_path']
|
548
|
-
end
|
549
|
-
elsif v['type'] == "docker"
|
550
|
-
vol[:docker_volume_configuration] = MU.strToSym(v['docker_volume_configuration'])
|
551
|
-
else
|
552
|
-
raise MuError, "Invalid volume type '#{v['type']}' specified in ContainerCluster '#{@mu_name}'"
|
553
|
-
end
|
554
|
-
task_params[:volumes] << vol
|
555
|
-
}
|
556
|
-
end
|
165
|
+
task_def = register_ecs_task(container_definitions, service_name, cpu_total, mem_total, role_arn: role_arn)
|
557
166
|
|
558
|
-
|
559
|
-
|
560
|
-
|
561
|
-
end
|
562
|
-
if @config['flavor'] == "Fargate"
|
563
|
-
task_params[:network_mode] = "awsvpc"
|
564
|
-
task_params[:cpu] = cpu_total.to_i.to_s
|
565
|
-
task_params[:memory] = mem_total.to_i.to_s
|
566
|
-
end
|
567
|
-
|
568
|
-
tasks_registered += 1
|
569
|
-
MU.log "Registering task definition #{service_name} with #{container_definitions.size.to_s} containers"
|
570
|
-
|
571
|
-
# XXX this helpfully keeps revisions, but let's compare anyway and avoid cluttering with identical ones
|
572
|
-
resp = MU::Cloud::AWS.ecs(region: @config['region'], credentials: @config['credentials']).register_task_definition(task_params)
|
167
|
+
create_update_ecs_service(task_def, service_name, lbs, existing_svcs)
|
168
|
+
existing_svcs << service_name
|
169
|
+
}
|
573
170
|
|
574
|
-
|
575
|
-
|
576
|
-
|
577
|
-
:
|
578
|
-
:service_name => service_name,
|
579
|
-
:launch_type => launch_type,
|
580
|
-
:task_definition => task_def,
|
581
|
-
:load_balancers => lbs
|
171
|
+
if tasks.size > 0
|
172
|
+
tasks_failing = false
|
173
|
+
MU.retrier(wait: 15, max: 10, loop_if: Proc.new { tasks_failing }){ |retries, _wait|
|
174
|
+
tasks_failing = !MU::Cloud::AWS::ContainerCluster.tasksRunning?(@mu_name, log: (retries > 0), region: @config['region'], credentials: @config['credentials'])
|
582
175
|
}
|
583
|
-
if @config['vpc']
|
584
|
-
subnet_ids = []
|
585
|
-
all_public = true
|
586
|
-
|
587
|
-
subnets =
|
588
|
-
if @config["vpc"]["subnets"].empty?
|
589
|
-
@vpc.subnets
|
590
|
-
else
|
591
|
-
subnet_objects= []
|
592
|
-
@config["vpc"]["subnets"].each { |subnet|
|
593
|
-
sobj = @vpc.getSubnet(cloud_id: subnet["subnet_id"], name: subnet["subnet_name"])
|
594
|
-
if sobj.nil?
|
595
|
-
MU.log "Got nil result from @vpc.getSubnet(cloud_id: #{subnet["subnet_id"]}, name: #{subnet["subnet_name"]})", MU::WARN
|
596
|
-
else
|
597
|
-
subnet_objects << sobj
|
598
|
-
end
|
599
|
-
}
|
600
|
-
subnet_objects
|
601
|
-
end
|
602
|
-
|
603
|
-
subnets.each { |subnet_obj|
|
604
|
-
subnet_ids << subnet_obj.cloud_id
|
605
|
-
all_public = false if subnet_obj.private?
|
606
|
-
}
|
607
|
-
|
608
|
-
service_params[:network_configuration] = {
|
609
|
-
:awsvpc_configuration => {
|
610
|
-
:subnets => subnet_ids,
|
611
|
-
:security_groups => security_groups,
|
612
|
-
:assign_public_ip => all_public ? "ENABLED" : "DISABLED"
|
613
|
-
}
|
614
|
-
}
|
615
|
-
end
|
616
|
-
|
617
|
-
if !existing_svcs.include?(service_name)
|
618
|
-
MU.log "Creating Service #{service_name}"
|
619
176
|
|
620
|
-
|
621
|
-
else
|
622
|
-
service_params[:service] = service_params[:service_name].dup
|
623
|
-
service_params.delete(:service_name)
|
624
|
-
service_params.delete(:launch_type)
|
625
|
-
MU.log "Updating Service #{service_name}", MU::NOTICE, details: service_params
|
626
|
-
|
627
|
-
resp = MU::Cloud::AWS.ecs(region: @config['region'], credentials: @config['credentials']).update_service(service_params)
|
628
|
-
end
|
629
|
-
existing_svcs << service_name
|
630
|
-
}
|
631
|
-
|
632
|
-
max_retries = 10
|
633
|
-
retries = 0
|
634
|
-
if tasks_registered > 0
|
635
|
-
retry_me = false
|
636
|
-
begin
|
637
|
-
retry_me = !MU::Cloud::AWS::ContainerCluster.tasksRunning?(@mu_name, log: (retries > 0), region: @config['region'], credentials: @config['credentials'])
|
638
|
-
retries += 1
|
639
|
-
sleep 15 if retry_me
|
640
|
-
end while retry_me and retries < max_retries
|
641
|
-
tasks = nil
|
642
|
-
|
643
|
-
if retry_me
|
177
|
+
if tasks_failing
|
644
178
|
MU.log "Not all tasks successfully launched in cluster #{@mu_name}", MU::WARN
|
645
179
|
end
|
646
180
|
end
|
@@ -876,79 +410,14 @@ MU.log c.name, MU::NOTICE, details: t
|
|
876
410
|
MU.log "AWS::ContainerCluster.cleanup: need to support flags['known']", MU::DEBUG, details: flags
|
877
411
|
MU.log "Placeholder: AWS ContainerCluster artifacts do not support tags, so ignoremaster cleanup flag has no effect", MU::DEBUG, details: ignoremaster
|
878
412
|
|
879
|
-
|
880
|
-
|
881
|
-
if resp and resp.cluster_arns and resp.cluster_arns.size > 0
|
882
|
-
resp.cluster_arns.each { |arn|
|
883
|
-
if arn.match(/:cluster\/(#{MU.deploy_id}[^:]+)$/)
|
884
|
-
cluster = Regexp.last_match[1]
|
413
|
+
purge_ecs_clusters(noop: noop, region: region, credentials: credentials)
|
885
414
|
|
886
|
-
|
887
|
-
cluster: arn
|
888
|
-
)
|
889
|
-
if svc_resp and svc_resp.service_arns
|
890
|
-
svc_resp.service_arns.each { |svc_arn|
|
891
|
-
svc_name = svc_arn.gsub(/.*?:service\/(.*)/, '\1')
|
892
|
-
MU.log "Deleting Service #{svc_name} from ECS Cluster #{cluster}"
|
893
|
-
if !noop
|
894
|
-
MU::Cloud::AWS.ecs(region: region, credentials: credentials).delete_service(
|
895
|
-
cluster: arn,
|
896
|
-
service: svc_name,
|
897
|
-
force: true # man forget scaling up and down if we're just deleting the cluster
|
898
|
-
)
|
899
|
-
end
|
900
|
-
}
|
901
|
-
end
|
902
|
-
|
903
|
-
instances = MU::Cloud::AWS.ecs(credentials: credentials, region: region).list_container_instances({
|
904
|
-
cluster: cluster
|
905
|
-
})
|
906
|
-
if instances
|
907
|
-
instances.container_instance_arns.each { |instance_arn|
|
908
|
-
uuid = instance_arn.sub(/^.*?:container-instance\//, "")
|
909
|
-
MU.log "Deregistering instance #{uuid} from ECS Cluster #{cluster}"
|
910
|
-
if !noop
|
911
|
-
resp = MU::Cloud::AWS.ecs(credentials: credentials, region: region).deregister_container_instance({
|
912
|
-
cluster: cluster,
|
913
|
-
container_instance: uuid,
|
914
|
-
force: true,
|
915
|
-
})
|
916
|
-
end
|
917
|
-
}
|
918
|
-
end
|
919
|
-
MU.log "Deleting ECS Cluster #{cluster}"
|
920
|
-
if !noop
|
921
|
-
# TODO de-register container instances
|
922
|
-
begin
|
923
|
-
MU::Cloud::AWS.ecs(credentials: credentials, region: region).delete_cluster(
|
924
|
-
cluster: cluster
|
925
|
-
)
|
926
|
-
rescue Aws::ECS::Errors::ClusterContainsTasksException
|
927
|
-
sleep 5
|
928
|
-
retry
|
929
|
-
end
|
930
|
-
end
|
931
|
-
end
|
932
|
-
}
|
933
|
-
end
|
934
|
-
|
935
|
-
tasks = MU::Cloud::AWS.ecs(region: region, credentials: credentials).list_task_definitions(
|
936
|
-
family_prefix: MU.deploy_id
|
937
|
-
)
|
415
|
+
purge_eks_clusters(noop: noop, region: region, credentials: credentials)
|
938
416
|
|
939
|
-
|
940
|
-
tasks.task_definition_arns.each { |arn|
|
941
|
-
MU.log "Deregistering Fargate task definition #{arn}"
|
942
|
-
if !noop
|
943
|
-
MU::Cloud::AWS.ecs(region: region, credentials: credentials).deregister_task_definition(
|
944
|
-
task_definition: arn
|
945
|
-
)
|
946
|
-
end
|
947
|
-
}
|
948
|
-
end
|
417
|
+
end
|
949
418
|
|
419
|
+
def self.purge_eks_clusters(noop: false, region: MU.curRegion, credentials: nil)
|
950
420
|
return if !MU::Cloud::AWS::ContainerCluster.EKSRegions.include?(region)
|
951
|
-
|
952
421
|
resp = begin
|
953
422
|
MU::Cloud::AWS.eks(credentials: credentials, region: region).list_clusters
|
954
423
|
rescue Aws::EKS::Errors::AccessDeniedException
|
@@ -957,73 +426,120 @@ MU.log c.name, MU::NOTICE, details: t
|
|
957
426
|
return
|
958
427
|
end
|
959
428
|
|
429
|
+
return if !resp or !resp.clusters
|
960
430
|
|
961
|
-
|
962
|
-
|
963
|
-
if cluster.match(/^#{MU.deploy_id}-/)
|
431
|
+
resp.clusters.each { |cluster|
|
432
|
+
if cluster.match(/^#{MU.deploy_id}-/)
|
964
433
|
|
965
|
-
|
966
|
-
|
967
|
-
|
434
|
+
desc = MU::Cloud::AWS.eks(credentials: credentials, region: region).describe_cluster(
|
435
|
+
name: cluster
|
436
|
+
).cluster
|
968
437
|
|
969
|
-
|
970
|
-
|
971
|
-
|
972
|
-
|
973
|
-
|
974
|
-
|
975
|
-
|
976
|
-
|
977
|
-
|
978
|
-
|
438
|
+
profiles = MU::Cloud::AWS.eks(region: region, credentials: credentials).list_fargate_profiles(
|
439
|
+
cluster_name: cluster
|
440
|
+
)
|
441
|
+
if profiles and profiles.fargate_profile_names
|
442
|
+
profiles.fargate_profile_names.each { |profile|
|
443
|
+
MU.log "Deleting Fargate EKS profile #{profile}"
|
444
|
+
next if noop
|
445
|
+
MU::Cloud::AWS::ContainerCluster.purge_fargate_profile(profile, cluster, region, credentials)
|
446
|
+
}
|
447
|
+
end
|
448
|
+
|
449
|
+
remove_kubernetes_tags(cluster, desc, region: region, credentials: credentials, noop: noop)
|
450
|
+
|
451
|
+
MU.log "Deleting EKS Cluster #{cluster}"
|
452
|
+
next if noop
|
453
|
+
MU::Cloud::AWS.eks(credentials: credentials, region: region).delete_cluster(
|
454
|
+
name: cluster
|
455
|
+
)
|
456
|
+
|
457
|
+
status = nil
|
458
|
+
loop_if = Proc.new {
|
459
|
+
status != "FAILED"
|
460
|
+
}
|
979
461
|
|
980
|
-
|
981
|
-
|
982
|
-
|
983
|
-
|
984
|
-
)
|
985
|
-
|
986
|
-
# subnets
|
987
|
-
untag.concat(subnets.map { |s| s.subnet_id } )
|
988
|
-
rtbs = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_route_tables(
|
989
|
-
filters: [ { name: "vpc-id", values: [desc.resources_vpc_config.vpc_id] } ]
|
990
|
-
).route_tables
|
991
|
-
untag.concat(rtbs.map { |r| r.route_table_id } )
|
992
|
-
untag.concat(desc.resources_vpc_config.subnet_ids)
|
993
|
-
untag.concat(desc.resources_vpc_config.security_group_ids)
|
994
|
-
MU.log "Removing Kubernetes tags from VPC resources for #{cluster}", details: untag
|
995
|
-
if !noop
|
996
|
-
MU::Cloud::AWS.removeTag("kubernetes.io/cluster/#{cluster}", "shared", untag)
|
997
|
-
MU::Cloud::AWS.removeTag("kubernetes.io/cluster/elb", cluster, untag)
|
462
|
+
MU.retrier(ignoreme: [Aws::EKS::Errors::ResourceNotFoundException], wait: 60){ |retries, _wait|
|
463
|
+
status = MU::Cloud::AWS.eks(credentials: credentials, region: region).describe_cluster(
|
464
|
+
name: cluster
|
465
|
+
).cluster.status
|
466
|
+
if retries > 0 and (retries % 3) == 0
|
467
|
+
MU.log "Waiting for EKS cluster #{cluster} to finish deleting (status #{status})", MU::NOTICE
|
998
468
|
end
|
999
|
-
|
1000
|
-
if !noop
|
1001
|
-
MU::Cloud::AWS.eks(credentials: credentials, region: region).delete_cluster(
|
1002
|
-
name: cluster
|
1003
|
-
)
|
1004
|
-
begin
|
1005
|
-
status = nil
|
1006
|
-
retries = 0
|
1007
|
-
begin
|
1008
|
-
deletion = MU::Cloud::AWS.eks(credentials: credentials, region: region).describe_cluster(
|
1009
|
-
name: cluster
|
1010
|
-
)
|
1011
|
-
status = deletion.cluster.status
|
1012
|
-
if retries > 0 and (retries % 3) == 0
|
1013
|
-
MU.log "Waiting for EKS cluster #{cluster} to finish deleting (status #{status})", MU::NOTICE
|
1014
|
-
end
|
1015
|
-
retries += 1
|
1016
|
-
sleep 30
|
1017
|
-
end while status
|
1018
|
-
rescue Aws::EKS::Errors::ResourceNotFoundException
|
1019
|
-
# this is what we want
|
1020
|
-
end
|
469
|
+
}
|
1021
470
|
# MU::Cloud::AWS::Server.removeIAMProfile(cluster)
|
1022
|
-
|
471
|
+
end
|
472
|
+
}
|
473
|
+
end
|
474
|
+
private_class_method :purge_eks_clusters
|
475
|
+
|
476
|
+
def self.purge_ecs_clusters(noop: false, region: MU.curRegion, credentials: nil)
|
477
|
+
resp = MU::Cloud::AWS.ecs(credentials: credentials, region: region).list_clusters
|
478
|
+
|
479
|
+
return if !resp or !resp.cluster_arns or resp.cluster_arns.empty?
|
480
|
+
|
481
|
+
resp.cluster_arns.each { |arn|
|
482
|
+
if arn.match(/:cluster\/(#{MU.deploy_id}[^:]+)$/)
|
483
|
+
cluster = Regexp.last_match[1]
|
484
|
+
|
485
|
+
svc_resp = MU::Cloud::AWS.ecs(region: region, credentials: credentials).list_services(
|
486
|
+
cluster: arn
|
487
|
+
)
|
488
|
+
if svc_resp and svc_resp.service_arns
|
489
|
+
svc_resp.service_arns.each { |svc_arn|
|
490
|
+
svc_name = svc_arn.gsub(/.*?:service\/(.*)/, '\1')
|
491
|
+
MU.log "Deleting Service #{svc_name} from ECS Cluster #{cluster}"
|
492
|
+
next if noop
|
493
|
+
MU::Cloud::AWS.ecs(region: region, credentials: credentials).delete_service(
|
494
|
+
cluster: arn,
|
495
|
+
service: svc_name,
|
496
|
+
force: true # man forget scaling up and down if we're just deleting the cluster
|
497
|
+
)
|
498
|
+
}
|
499
|
+
end
|
500
|
+
|
501
|
+
instances = MU::Cloud::AWS.ecs(credentials: credentials, region: region).list_container_instances({
|
502
|
+
cluster: cluster
|
503
|
+
})
|
504
|
+
if instances
|
505
|
+
instances.container_instance_arns.each { |instance_arn|
|
506
|
+
uuid = instance_arn.sub(/^.*?:container-instance\//, "")
|
507
|
+
MU.log "Deregistering instance #{uuid} from ECS Cluster #{cluster}"
|
508
|
+
next if noop
|
509
|
+
resp = MU::Cloud::AWS.ecs(credentials: credentials, region: region).deregister_container_instance({
|
510
|
+
cluster: cluster,
|
511
|
+
container_instance: uuid,
|
512
|
+
force: true,
|
513
|
+
})
|
514
|
+
}
|
515
|
+
end
|
516
|
+
MU.log "Deleting ECS Cluster #{cluster}"
|
517
|
+
next if noop
|
518
|
+
MU.retrier([Aws::ECS::Errors::ClusterContainsTasksException], wait: 5){
|
519
|
+
# TODO de-register container instances
|
520
|
+
MU::Cloud::AWS.ecs(credentials: credentials, region: region).delete_cluster(
|
521
|
+
cluster: cluster
|
522
|
+
)
|
523
|
+
}
|
524
|
+
end
|
525
|
+
}
|
526
|
+
|
527
|
+
tasks = MU::Cloud::AWS.ecs(region: region, credentials: credentials).list_task_definitions(
|
528
|
+
family_prefix: MU.deploy_id
|
529
|
+
)
|
530
|
+
|
531
|
+
if tasks and tasks.task_definition_arns
|
532
|
+
tasks.task_definition_arns.each { |arn|
|
533
|
+
MU.log "Deregistering Fargate task definition #{arn}"
|
534
|
+
if !noop
|
535
|
+
MU::Cloud::AWS.ecs(region: region, credentials: credentials).deregister_task_definition(
|
536
|
+
task_definition: arn
|
537
|
+
)
|
1023
538
|
end
|
1024
539
|
}
|
1025
540
|
end
|
1026
541
|
end
|
542
|
+
private_class_method :purge_ecs_clusters
|
1027
543
|
|
1028
544
|
# Locate an existing container_cluster.
|
1029
545
|
# @return [Hash<String,OpenStruct>]: The cloud provider's complete descriptions of matching container_clusters.
|
@@ -1034,10 +550,8 @@ MU.log c.name, MU::NOTICE, details: t
|
|
1034
550
|
resp = MU::Cloud::AWS.ecs(region: args[:region], credentials: args[:credentials]).describe_clusters(clusters: [args[:cloud_id]])
|
1035
551
|
if resp.clusters and resp.clusters.size > 0
|
1036
552
|
found[args[:cloud_id]] = resp.clusters.first
|
1037
|
-
|
1038
|
-
|
1039
|
-
# XXX name collision is possible here
|
1040
|
-
if found.size == 0
|
553
|
+
else
|
554
|
+
# XXX misses due to name collision are possible here
|
1041
555
|
desc = MU::Cloud::AWS.eks(region: args[:region], credentials: args[:credentials]).describe_cluster(name: args[:cloud_id])
|
1042
556
|
found[args[:cloud_id]] = desc.cluster if desc and desc.cluster
|
1043
557
|
end
|
@@ -1045,14 +559,14 @@ MU.log c.name, MU::NOTICE, details: t
|
|
1045
559
|
next_token = nil
|
1046
560
|
begin
|
1047
561
|
resp = MU::Cloud::AWS.ecs(region: args[:region], credentials: args[:credentials]).list_clusters(next_token: next_token)
|
1048
|
-
if resp
|
1049
|
-
|
1050
|
-
|
1051
|
-
|
1052
|
-
|
1053
|
-
|
1054
|
-
|
1055
|
-
|
562
|
+
break if !resp or !resp.cluster_arns
|
563
|
+
next_token = resp.next_token
|
564
|
+
names = resp.cluster_arns.map { |a| a.sub(/.*?:cluster\//, '') }
|
565
|
+
descs = MU::Cloud::AWS.ecs(region: args[:region], credentials: args[:credentials]).describe_clusters(clusters: names)
|
566
|
+
if descs and descs.clusters
|
567
|
+
descs.clusters.each { |c|
|
568
|
+
found[c.cluster_name] = c
|
569
|
+
}
|
1056
570
|
end
|
1057
571
|
end while next_token
|
1058
572
|
|
@@ -1060,14 +574,12 @@ MU.log c.name, MU::NOTICE, details: t
|
|
1060
574
|
next_token = nil
|
1061
575
|
begin
|
1062
576
|
resp = MU::Cloud::AWS.eks(region: args[:region], credentials: args[:credentials]).list_clusters(next_token: next_token)
|
1063
|
-
if resp
|
1064
|
-
|
1065
|
-
|
1066
|
-
|
1067
|
-
|
1068
|
-
|
1069
|
-
next_token = resp.next_token
|
1070
|
-
end
|
577
|
+
break if !resp or !resp.clusters
|
578
|
+
resp.clusters.each { |c|
|
579
|
+
desc = MU::Cloud::AWS.eks(region: args[:region], credentials: args[:credentials]).describe_cluster(name: c)
|
580
|
+
found[c] = desc.cluster if desc and desc.cluster
|
581
|
+
}
|
582
|
+
next_token = resp.next_token
|
1071
583
|
rescue Aws::EKS::Errors::AccessDeniedException
|
1072
584
|
# not all regions support EKS
|
1073
585
|
end while next_token
|
@@ -1709,7 +1221,7 @@ MU.log c.name, MU::NOTICE, details: t
|
|
1709
1221
|
|
1710
1222
|
cluster["flavor"] = "EKS" if cluster["flavor"].match(/^Kubernetes$/i)
|
1711
1223
|
|
1712
|
-
if cluster["flavor"] == "ECS" and cluster["kubernetes"] and !MU::Cloud::AWS.isGovCloud?(cluster["region"])
|
1224
|
+
if cluster["flavor"] == "ECS" and cluster["kubernetes"] and !MU::Cloud::AWS.isGovCloud?(cluster["region"]) and !cluster["containers"] and MU::Cloud::AWS::ContainerCluster.EKSRegions.include?(cluster['region'])
|
1713
1225
|
cluster["flavor"] = "EKS"
|
1714
1226
|
MU.log "Setting flavor of ContainerCluster '#{cluster['name']}' to EKS ('kubernetes' stanza was specified)", MU::NOTICE
|
1715
1227
|
end
|
@@ -1869,7 +1381,8 @@ MU.log c.name, MU::NOTICE, details: t
|
|
1869
1381
|
|
1870
1382
|
|
1871
1383
|
if ["ECS", "EKS"].include?(cluster["flavor"])
|
1872
|
-
|
1384
|
+
version = cluster["kubernetes"] ? cluster['kubernetes']['version'] : nil
|
1385
|
+
std_ami = getStandardImage(cluster["flavor"], cluster['region'], version: version, gpu: cluster['gpu'])
|
1873
1386
|
cluster["host_image"] ||= std_ami
|
1874
1387
|
if cluster["host_image"] != std_ami
|
1875
1388
|
if cluster["flavor"] == "ECS"
|
@@ -2033,28 +1546,463 @@ MU.log c.name, MU::NOTICE, details: t
|
|
2033
1546
|
sleep 10
|
2034
1547
|
retry
|
2035
1548
|
end
|
2036
|
-
|
2037
|
-
|
2038
|
-
begin
|
2039
|
-
begin
|
1549
|
+
|
1550
|
+
loop_if = Proc.new {
|
2040
1551
|
check = MU::Cloud::AWS.eks(region: region, credentials: credentials).describe_fargate_profile(
|
2041
1552
|
cluster_name: cluster,
|
2042
1553
|
fargate_profile_name: profile
|
2043
1554
|
)
|
2044
|
-
|
2045
|
-
|
2046
|
-
end
|
1555
|
+
check.fargate_profile.status == "DELETING"
|
1556
|
+
}
|
2047
1557
|
|
1558
|
+
MU.retrier(ignoreme: [Aws::EKS::Errors::ResourceNotFoundException], wait: 30, max: 40, loop_if: loop_if) {
|
2048
1559
|
if check.fargate_profile.status != "DELETING"
|
2049
|
-
MU.log "Failed to delete Fargate EKS profile #{profile}", MU::ERR, details: check
|
2050
1560
|
break
|
2051
|
-
|
2052
|
-
if retries > 0 and (retries % 3) == 0
|
1561
|
+
elsif retries > 0 and (retries % 3) == 0
|
2053
1562
|
MU.log "Waiting for Fargate EKS profile #{profile} to delete (status #{check.fargate_profile.status})", MU::NOTICE
|
2054
1563
|
end
|
2055
|
-
|
2056
|
-
|
2057
|
-
|
1564
|
+
}
|
1565
|
+
end
|
1566
|
+
|
1567
|
+
private
|
1568
|
+
|
1569
|
+
def apply_kubernetes_resources
|
1570
|
+
kube = ERB.new(File.read(MU.myRoot+"/cookbooks/mu-tools/templates/default/kubeconfig-eks.erb"))
|
1571
|
+
configmap = ERB.new(File.read(MU.myRoot+"/extras/aws-auth-cm.yaml.erb"))
|
1572
|
+
@endpoint = cloud_desc.endpoint
|
1573
|
+
@cacert = cloud_desc.certificate_authority.data
|
1574
|
+
@cluster = @mu_name
|
1575
|
+
if @config['flavor'] != "Fargate"
|
1576
|
+
resp = MU::Cloud::AWS.iam(credentials: @config['credentials']).get_role(role_name: @mu_name+"WORKERS")
|
1577
|
+
@worker_role_arn = resp.role.arn
|
1578
|
+
end
|
1579
|
+
kube_conf = @deploy.deploy_dir+"/kubeconfig-#{@config['name']}"
|
1580
|
+
gitlab_helper = @deploy.deploy_dir+"/gitlab-eks-helper-#{@config['name']}.sh"
|
1581
|
+
|
1582
|
+
File.open(kube_conf, "w"){ |k|
|
1583
|
+
k.puts kube.result(binding)
|
1584
|
+
}
|
1585
|
+
gitlab = ERB.new(File.read(MU.myRoot+"/extras/gitlab-eks-helper.sh.erb"))
|
1586
|
+
File.open(gitlab_helper, "w"){ |k|
|
1587
|
+
k.puts gitlab.result(binding)
|
1588
|
+
}
|
1589
|
+
|
1590
|
+
if @config['flavor'] != "Fargate"
|
1591
|
+
eks_auth = @deploy.deploy_dir+"/eks-auth-cm-#{@config['name']}.yaml"
|
1592
|
+
File.open(eks_auth, "w"){ |k|
|
1593
|
+
k.puts configmap.result(binding)
|
1594
|
+
}
|
1595
|
+
authmap_cmd = %Q{#{MU::Master.kubectl} --kubeconfig "#{kube_conf}" apply -f "#{eks_auth}"}
|
1596
|
+
|
1597
|
+
MU.log "Configuring Kubernetes <=> IAM mapping for worker nodes", MU::NOTICE, details: authmap_cmd
|
1598
|
+
|
1599
|
+
MU.retrier(max: 10, wait: 10, loop_if: Proc.new {$?.exitstatus != 0}){
|
1600
|
+
puts %x{#{authmap_cmd}}
|
1601
|
+
}
|
1602
|
+
raise MuError, "Failed to apply #{authmap_cmd}" if $?.exitstatus != 0
|
1603
|
+
end
|
1604
|
+
|
1605
|
+
admin_user_cmd = %Q{#{MU::Master.kubectl} --kubeconfig "#{kube_conf}" apply -f "#{MU.myRoot}/extras/admin-user.yaml"}
|
1606
|
+
admin_role_cmd = %Q{#{MU::Master.kubectl} --kubeconfig "#{kube_conf}" apply -f "#{MU.myRoot}/extras/admin-role-binding.yaml"}
|
1607
|
+
MU.log "Configuring Kubernetes admin-user and role", MU::NOTICE, details: admin_user_cmd+"\n"+admin_role_cmd
|
1608
|
+
%x{#{admin_user_cmd}}
|
1609
|
+
%x{#{admin_role_cmd}}
|
1610
|
+
|
1611
|
+
if @config['kubernetes_resources']
|
1612
|
+
MU::Master.applyKubernetesResources(
|
1613
|
+
@config['name'],
|
1614
|
+
@config['kubernetes_resources'],
|
1615
|
+
kubeconfig: kube_conf,
|
1616
|
+
outputdir: @deploy.deploy_dir
|
1617
|
+
)
|
1618
|
+
end
|
1619
|
+
|
1620
|
+
MU.log %Q{How to interact with your EKS cluster\nkubectl --kubeconfig "#{kube_conf}" get all\nkubectl --kubeconfig "#{kube_conf}" create -f some_k8s_deploy.yml\nkubectl --kubeconfig "#{kube_conf}" get nodes}, MU::SUMMARY
|
1621
|
+
end
|
1622
|
+
|
1623
|
+
def create_fargate_kubernetes_profile
|
1624
|
+
fargate_subnets = mySubnets.map { |s| s.cloud_id }
|
1625
|
+
|
1626
|
+
podrole_arn = @deploy.findLitterMate(name: @config['name']+"pods", type: "roles").arn
|
1627
|
+
poolnum = 0
|
1628
|
+
|
1629
|
+
@config['kubernetes_pools'].each { |selectors|
|
1630
|
+
profname = @mu_name+"-"+poolnum.to_s
|
1631
|
+
poolnum += 1
|
1632
|
+
desc = {
|
1633
|
+
:fargate_profile_name => profname,
|
1634
|
+
:cluster_name => @mu_name,
|
1635
|
+
:pod_execution_role_arn => podrole_arn,
|
1636
|
+
:selectors => selectors,
|
1637
|
+
:subnets => fargate_subnets.sort,
|
1638
|
+
:tags => @tags
|
1639
|
+
}
|
1640
|
+
begin
|
1641
|
+
resp = MU::Cloud::AWS.eks(region: @config['region'], credentials: @config['credentials']).describe_fargate_profile(
|
1642
|
+
cluster_name: @mu_name,
|
1643
|
+
fargate_profile_name: profname
|
1644
|
+
)
|
1645
|
+
if resp and resp.fargate_profile
|
1646
|
+
old_desc = MU.structToHash(resp.fargate_profile, stringify_keys: true)
|
1647
|
+
new_desc = MU.structToHash(desc, stringify_keys: true)
|
1648
|
+
["created_at", "status", "fargate_profile_arn"].each { |k|
|
1649
|
+
old_desc.delete(k)
|
1650
|
+
}
|
1651
|
+
old_desc["subnets"].sort!
|
1652
|
+
if !old_desc.eql?(new_desc)
|
1653
|
+
MU.log "Deleting Fargate profile #{profname} in order to apply changes", MU::WARN, details: desc
|
1654
|
+
MU::Cloud::AWS::ContainerCluster.purge_fargate_profile(profname, @mu_name, @config['region'], @credentials)
|
1655
|
+
else
|
1656
|
+
next
|
1657
|
+
end
|
1658
|
+
end
|
1659
|
+
rescue Aws::EKS::Errors::ResourceNotFoundException
|
1660
|
+
# This is just fine!
|
1661
|
+
end
|
1662
|
+
MU.log "Creating EKS Fargate profile #{profname}", details: desc
|
1663
|
+
resp = MU::Cloud::AWS.eks(region: @config['region'], credentials: @config['credentials']).create_fargate_profile(desc)
|
1664
|
+
begin
|
1665
|
+
resp = MU::Cloud::AWS.eks(region: @config['region'], credentials: @config['credentials']).describe_fargate_profile(
|
1666
|
+
cluster_name: @mu_name,
|
1667
|
+
fargate_profile_name: profname
|
1668
|
+
)
|
1669
|
+
sleep 1 if resp.fargate_profile.status == "CREATING"
|
1670
|
+
end while resp.fargate_profile.status == "CREATING"
|
1671
|
+
MU.log "Creation of EKS Fargate profile #{profname} complete"
|
1672
|
+
}
|
1673
|
+
end
|
1674
|
+
|
1675
|
+
def self.remove_kubernetes_tags(cluster, desc, region: MU.myRegion, credentials: nil, noop: false)
|
1676
|
+
untag = []
|
1677
|
+
untag << desc.resources_vpc_config.vpc_id
|
1678
|
+
subnets = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_subnets(
|
1679
|
+
filters: [ { name: "vpc-id", values: [desc.resources_vpc_config.vpc_id] } ]
|
1680
|
+
).subnets
|
1681
|
+
|
1682
|
+
# subnets
|
1683
|
+
untag.concat(subnets.map { |s| s.subnet_id } )
|
1684
|
+
rtbs = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_route_tables(
|
1685
|
+
filters: [ { name: "vpc-id", values: [desc.resources_vpc_config.vpc_id] } ]
|
1686
|
+
).route_tables
|
1687
|
+
untag.concat(rtbs.map { |r| r.route_table_id } )
|
1688
|
+
untag.concat(desc.resources_vpc_config.subnet_ids)
|
1689
|
+
untag.concat(desc.resources_vpc_config.security_group_ids)
|
1690
|
+
MU.log "Removing Kubernetes tags from VPC resources for #{cluster}", details: untag
|
1691
|
+
if !noop
|
1692
|
+
MU::Cloud::AWS.removeTag("kubernetes.io/cluster/#{cluster}", "shared", untag)
|
1693
|
+
MU::Cloud::AWS.removeTag("kubernetes.io/cluster/elb", cluster, untag)
|
1694
|
+
end
|
1695
|
+
end
|
1696
|
+
private_class_method :remove_kubernetes_tags
|
1697
|
+
|
1698
|
+
def apply_kubernetes_tags
|
1699
|
+
tagme = [@vpc.cloud_id]
|
1700
|
+
tagme_elb = []
|
1701
|
+
@vpc.subnets.each { |s|
|
1702
|
+
tagme << s.cloud_id
|
1703
|
+
tagme_elb << s.cloud_id if !s.private?
|
1704
|
+
}
|
1705
|
+
rtbs = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_route_tables(
|
1706
|
+
filters: [ { name: "vpc-id", values: [@vpc.cloud_id] } ]
|
1707
|
+
).route_tables
|
1708
|
+
tagme.concat(rtbs.map { |r| r.route_table_id } )
|
1709
|
+
main_sg = @deploy.findLitterMate(type: "firewall_rules", name: "server_pool#{@config['name']}workers")
|
1710
|
+
tagme << main_sg.cloud_id if main_sg
|
1711
|
+
MU.log "Applying kubernetes.io tags to VPC resources", details: tagme
|
1712
|
+
MU::Cloud::AWS.createTag(tagme, "kubernetes.io/cluster/#{@mu_name}", "shared", credentials: @config['credentials'])
|
1713
|
+
MU::Cloud::AWS.createTag(tagme_elb, "kubernetes.io/cluster/elb", @mu_name, credentials: @config['credentials'])
|
1714
|
+
end
|
1715
|
+
|
1716
|
+
def manage_ecs_workers
|
1717
|
+
resp = MU::Cloud::AWS.ecs(region: @config['region'], credentials: @config['credentials']).list_container_instances({
|
1718
|
+
cluster: @mu_name
|
1719
|
+
})
|
1720
|
+
existing = {}
|
1721
|
+
if resp
|
1722
|
+
uuids = []
|
1723
|
+
resp.container_instance_arns.each { |arn|
|
1724
|
+
uuids << arn.sub(/^.*?:container-instance\//, "")
|
1725
|
+
}
|
1726
|
+
if uuids.size > 0
|
1727
|
+
resp = MU::Cloud::AWS.ecs(region: @config['region'], credentials: @config['credentials']).describe_container_instances({
|
1728
|
+
cluster: @mu_name,
|
1729
|
+
container_instances: uuids
|
1730
|
+
})
|
1731
|
+
resp.container_instances.each { |i|
|
1732
|
+
existing[i.ec2_instance_id] = i
|
1733
|
+
}
|
1734
|
+
end
|
1735
|
+
end
|
1736
|
+
|
1737
|
+
threads = []
|
1738
|
+
resource_lookup = MU::Cloud::AWS.listInstanceTypes(@config['region'])[@config['region']]
|
1739
|
+
serverpool = if ['EKS', 'ECS'].include?(@config['flavor'])
|
1740
|
+
@deploy.findLitterMate(type: "server_pools", name: @config["name"]+"workers")
|
1741
|
+
end
|
1742
|
+
serverpool.listNodes.each { |mynode|
|
1743
|
+
resources = resource_lookup[node.cloud_desc.instance_type]
|
1744
|
+
threads << Thread.new(mynode) { |node|
|
1745
|
+
ident_doc = nil
|
1746
|
+
ident_doc_sig = nil
|
1747
|
+
if !node.windows?
|
1748
|
+
session = node.getSSHSession(10, 30)
|
1749
|
+
ident_doc = session.exec!("curl -s http://169.254.169.254/latest/dynamic/instance-identity/document/")
|
1750
|
+
ident_doc_sig = session.exec!("curl -s http://169.254.169.254/latest/dynamic/instance-identity/signature/")
|
1751
|
+
# else
|
1752
|
+
# begin
|
1753
|
+
# session = node.getWinRMSession(1, 60)
|
1754
|
+
# rescue StandardError # XXX
|
1755
|
+
# session = node.getSSHSession(1, 60)
|
1756
|
+
# end
|
1757
|
+
end
|
1758
|
+
MU.log "Identity document for #{node}", MU::DEBUG, details: ident_doc
|
1759
|
+
MU.log "Identity document signature for #{node}", MU::DEBUG, details: ident_doc_sig
|
1760
|
+
params = {
|
1761
|
+
:cluster => @mu_name,
|
1762
|
+
:instance_identity_document => ident_doc,
|
1763
|
+
:instance_identity_document_signature => ident_doc_sig,
|
1764
|
+
:total_resources => [
|
1765
|
+
{
|
1766
|
+
:name => "CPU",
|
1767
|
+
:type => "INTEGER",
|
1768
|
+
:integer_value => resources["vcpu"].to_i
|
1769
|
+
},
|
1770
|
+
{
|
1771
|
+
:name => "MEMORY",
|
1772
|
+
:type => "INTEGER",
|
1773
|
+
:integer_value => (resources["memory"]*1024*1024).to_i
|
1774
|
+
}
|
1775
|
+
]
|
1776
|
+
}
|
1777
|
+
if !existing.has_key?(node.cloud_id)
|
1778
|
+
MU.log "Registering ECS instance #{node} in cluster #{@mu_name}", details: params
|
1779
|
+
else
|
1780
|
+
params[:container_instance_arn] = existing[node.cloud_id].container_instance_arn
|
1781
|
+
MU.log "Updating ECS instance #{node} in cluster #{@mu_name}", MU::NOTICE, details: params
|
1782
|
+
end
|
1783
|
+
MU::Cloud::AWS.ecs(region: @config['region'], credentials: @config['credentials']).register_container_instance(params)
|
1784
|
+
|
1785
|
+
}
|
1786
|
+
}
|
1787
|
+
threads.each { |t|
|
1788
|
+
t.join
|
1789
|
+
}
|
1790
|
+
end
|
1791
|
+
|
1792
|
+
def get_ecs_loadbalancers(container_name)
|
1793
|
+
lbs = []
|
1794
|
+
|
1795
|
+
if @loadbalancers and !@loadbalancers.empty?
|
1796
|
+
@loadbalancers.each {|lb|
|
1797
|
+
MU.log "Mapping LB #{lb.mu_name} to service #{c['name']}", MU::INFO
|
1798
|
+
if lb.cloud_desc.type != "classic"
|
1799
|
+
elb_groups = MU::Cloud::AWS.elb2(region: @config['region'], credentials: @config['credentials']).describe_target_groups({
|
1800
|
+
load_balancer_arn: lb.cloud_desc.load_balancer_arn
|
1801
|
+
})
|
1802
|
+
matching_target_groups = []
|
1803
|
+
elb_groups.target_groups.each { |tg|
|
1804
|
+
if tg.port.to_i == lb['container_port'].to_i
|
1805
|
+
matching_target_groups << {
|
1806
|
+
arn: tg['target_group_arn'],
|
1807
|
+
name: tg['target_group_name']
|
1808
|
+
}
|
1809
|
+
end
|
1810
|
+
}
|
1811
|
+
if matching_target_groups.length >= 1
|
1812
|
+
MU.log "#{matching_target_groups.length} matching target groups lb. Mapping #{container_name} to target group #{matching_target_groups.first['name']}", MU::INFO
|
1813
|
+
lbs << {
|
1814
|
+
container_name: container_name,
|
1815
|
+
container_port: lb['container_port'],
|
1816
|
+
target_group_arn: matching_target_groups.first[:arn]
|
1817
|
+
}
|
1818
|
+
else
|
1819
|
+
raise MuError, "No matching target groups lb"
|
1820
|
+
end
|
1821
|
+
elsif @config['flavor'] == "Fargate" && lb.cloud_desc.type == "classic"
|
1822
|
+
raise MuError, "Classic Load Balancers are not supported with Fargate."
|
1823
|
+
else
|
1824
|
+
MU.log "Mapping Classic LB #{lb.mu_name} to service #{container_name}", MU::INFO
|
1825
|
+
lbs << {
|
1826
|
+
container_name: container_name,
|
1827
|
+
container_port: lb['container_port'],
|
1828
|
+
load_balancer_name: lb.mu_name
|
1829
|
+
}
|
1830
|
+
end
|
1831
|
+
}
|
1832
|
+
end
|
1833
|
+
|
1834
|
+
lbs
|
1835
|
+
end
|
1836
|
+
|
1837
|
+
def get_ecs_container_definitions(containers)
|
1838
|
+
role_arn = nil
|
1839
|
+
lbs = []
|
1840
|
+
|
1841
|
+
defs = containers.map { |c|
|
1842
|
+
container_name = @mu_name+"-"+c['name'].upcase
|
1843
|
+
lbs.concat(get_ecs_loadbalancers(container_name))
|
1844
|
+
|
1845
|
+
if c["role"] and !role_arn
|
1846
|
+
found = MU::MommaCat.findStray(
|
1847
|
+
@config['cloud'],
|
1848
|
+
"role",
|
1849
|
+
cloud_id: c["role"]["id"],
|
1850
|
+
name: c["role"]["name"],
|
1851
|
+
deploy_id: c["role"]["deploy_id"] || @deploy.deploy_id,
|
1852
|
+
dummy_ok: false
|
1853
|
+
)
|
1854
|
+
if found
|
1855
|
+
found = found.first
|
1856
|
+
if found and found.cloudobj
|
1857
|
+
role_arn = found.cloudobj.arn
|
1858
|
+
end
|
1859
|
+
else
|
1860
|
+
raise MuError, "Unable to find execution role from #{c["role"]}"
|
1861
|
+
end
|
1862
|
+
end
|
1863
|
+
|
1864
|
+
params = {
|
1865
|
+
name: @mu_name+"-"+c['name'].upcase,
|
1866
|
+
image: c['image'],
|
1867
|
+
memory: c['memory'],
|
1868
|
+
cpu: c['cpu']
|
1869
|
+
}
|
1870
|
+
if !@config['vpc']
|
1871
|
+
c['hostname'] ||= @mu_name+"-"+c['name'].upcase
|
1872
|
+
end
|
1873
|
+
[:essential, :hostname, :start_timeout, :stop_timeout, :user, :working_directory, :disable_networking, :privileged, :readonly_root_filesystem, :interactive, :pseudo_terminal, :links, :entry_point, :command, :dns_servers, :dns_search_domains, :docker_security_options, :port_mappings, :repository_credentials, :mount_points, :environment, :volumes_from, :secrets, :depends_on, :extra_hosts, :docker_labels, :ulimits, :system_controls, :health_check, :resource_requirements].each { |param|
|
1874
|
+
if c.has_key?(param.to_s)
|
1875
|
+
params[param] = if !c[param.to_s].nil? and (c[param.to_s].is_a?(Hash) or c[param.to_s].is_a?(Array))
|
1876
|
+
MU.strToSym(c[param.to_s])
|
1877
|
+
else
|
1878
|
+
c[param.to_s]
|
1879
|
+
end
|
1880
|
+
end
|
1881
|
+
}
|
1882
|
+
if @config['vpc']
|
1883
|
+
[:hostname, :dns_servers, :dns_search_domains, :links].each { |param|
|
1884
|
+
if params[param]
|
1885
|
+
MU.log "Container parameter #{param.to_s} not supported in VPC clusters, ignoring", MU::WARN
|
1886
|
+
params.delete(param)
|
1887
|
+
end
|
1888
|
+
}
|
1889
|
+
end
|
1890
|
+
if @config['flavor'] == "Fargate"
|
1891
|
+
[:privileged, :docker_security_options].each { |param|
|
1892
|
+
if params[param]
|
1893
|
+
MU.log "Container parameter #{param.to_s} not supported in Fargate clusters, ignoring", MU::WARN
|
1894
|
+
params.delete(param)
|
1895
|
+
end
|
1896
|
+
}
|
1897
|
+
end
|
1898
|
+
if c['log_configuration']
|
1899
|
+
log_obj = @deploy.findLitterMate(name: c['log_configuration']['options']['awslogs-group'], type: "logs")
|
1900
|
+
if log_obj
|
1901
|
+
c['log_configuration']['options']['awslogs-group'] = log_obj.mu_name
|
1902
|
+
end
|
1903
|
+
params[:log_configuration] = MU.strToSym(c['log_configuration'])
|
1904
|
+
end
|
1905
|
+
params
|
1906
|
+
}
|
1907
|
+
|
1908
|
+
[defs, role_arn, lbs]
|
1909
|
+
end
|
1910
|
+
|
1911
|
+
def register_ecs_task(container_definitions, service_name, cpu_total = 2, mem_total = 2, role_arn: nil)
|
1912
|
+
task_params = {
|
1913
|
+
family: @deploy.deploy_id,
|
1914
|
+
container_definitions: container_definitions,
|
1915
|
+
requires_compatibilities: [@config['flavor'] == "ECS" ? "EC2" : "FARGATE"]
|
1916
|
+
}
|
1917
|
+
|
1918
|
+
if @config['volumes']
|
1919
|
+
task_params[:volumes] = []
|
1920
|
+
@config['volumes'].each { |v|
|
1921
|
+
vol = { :name => v['name'] }
|
1922
|
+
if v['type'] == "host"
|
1923
|
+
vol[:host] = {}
|
1924
|
+
if v['host_volume_source_path']
|
1925
|
+
vol[:host][:source_path] = v['host_volume_source_path']
|
1926
|
+
end
|
1927
|
+
elsif v['type'] == "docker"
|
1928
|
+
vol[:docker_volume_configuration] = MU.strToSym(v['docker_volume_configuration'])
|
1929
|
+
else
|
1930
|
+
raise MuError, "Invalid volume type '#{v['type']}' specified in ContainerCluster '#{@mu_name}'"
|
1931
|
+
end
|
1932
|
+
task_params[:volumes] << vol
|
1933
|
+
}
|
1934
|
+
end
|
1935
|
+
|
1936
|
+
if role_arn
|
1937
|
+
task_params[:execution_role_arn] = role_arn
|
1938
|
+
task_params[:task_role_arn] = role_arn
|
1939
|
+
end
|
1940
|
+
if @config['flavor'] == "Fargate"
|
1941
|
+
task_params[:network_mode] = "awsvpc"
|
1942
|
+
task_params[:cpu] = cpu_total.to_i.to_s
|
1943
|
+
task_params[:memory] = mem_total.to_i.to_s
|
1944
|
+
end
|
1945
|
+
|
1946
|
+
MU.log "Registering task definition #{service_name} with #{container_definitions.size.to_s} containers"
|
1947
|
+
|
1948
|
+
# XXX this helpfully keeps revisions, but let's compare anyway and avoid cluttering with identical ones
|
1949
|
+
resp = MU::Cloud::AWS.ecs(region: @config['region'], credentials: @config['credentials']).register_task_definition(task_params)
|
1950
|
+
|
1951
|
+
resp.task_definition.task_definition_arn
|
1952
|
+
end
|
1953
|
+
|
1954
|
+
def list_ecs_services
|
1955
|
+
svc_resp = nil
|
1956
|
+
MU.retrier([Aws::ECS::Errors::ClusterNotFoundException], wait: 5, max: 10){
|
1957
|
+
svc_resp = MU::Cloud::AWS.ecs(region: @config['region'], credentials: @config['credentials']).list_services(
|
1958
|
+
cluster: arn
|
1959
|
+
)
|
1960
|
+
}
|
1961
|
+
|
1962
|
+
svc_resp.service_arns.map { |s|
|
1963
|
+
s.gsub(/.*?:service\/(.*)/, '\1')
|
1964
|
+
}
|
1965
|
+
end
|
1966
|
+
|
1967
|
+
def create_update_ecs_service(task_def, service_name, lbs, existing_svcs)
|
1968
|
+
service_params = {
|
1969
|
+
:cluster => @mu_name,
|
1970
|
+
:desired_count => @config['instance_count'], # XXX this makes no sense
|
1971
|
+
:service_name => service_name,
|
1972
|
+
:launch_type => @config['flavor'] == "ECS" ? "EC2" : "FARGATE",
|
1973
|
+
:task_definition => task_def,
|
1974
|
+
:load_balancers => lbs
|
1975
|
+
}
|
1976
|
+
if @config['vpc']
|
1977
|
+
subnet_ids = []
|
1978
|
+
all_public = true
|
1979
|
+
|
1980
|
+
mySubnets.each { |subnet|
|
1981
|
+
subnet_ids << subnet.cloud_id
|
1982
|
+
all_public = false if subnet.private?
|
1983
|
+
}
|
1984
|
+
|
1985
|
+
service_params[:network_configuration] = {
|
1986
|
+
:awsvpc_configuration => {
|
1987
|
+
:subnets => subnet_ids,
|
1988
|
+
:security_groups => myFirewallRules.map { |fw| fw.cloud_id },
|
1989
|
+
:assign_public_ip => all_public ? "ENABLED" : "DISABLED"
|
1990
|
+
}
|
1991
|
+
}
|
1992
|
+
end
|
1993
|
+
|
1994
|
+
if !existing_svcs.include?(service_name)
|
1995
|
+
MU.log "Creating Service #{service_name}"
|
1996
|
+
|
1997
|
+
MU::Cloud::AWS.ecs(region: @config['region'], credentials: @config['credentials']).create_service(service_params)
|
1998
|
+
else
|
1999
|
+
service_params[:service] = service_params[:service_name].dup
|
2000
|
+
service_params.delete(:service_name)
|
2001
|
+
service_params.delete(:launch_type)
|
2002
|
+
MU.log "Updating Service #{service_name}", MU::NOTICE, details: service_params
|
2003
|
+
|
2004
|
+
MU::Cloud::AWS.ecs(region: @config['region'], credentials: @config['credentials']).update_service(service_params)
|
2005
|
+
end
|
2058
2006
|
end
|
2059
2007
|
|
2060
2008
|
end
|