cloud-mu 3.2.0 → 3.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Dockerfile +1 -1
- data/ansible/roles/mu-nat/tasks/main.yml +3 -0
- data/bin/mu-adopt +12 -1
- data/bin/mu-aws-setup +41 -7
- data/bin/mu-azure-setup +34 -0
- data/bin/mu-configure +214 -119
- data/bin/mu-gcp-setup +37 -2
- data/bin/mu-load-config.rb +2 -1
- data/bin/mu-node-manage +3 -0
- data/bin/mu-refresh-ssl +67 -0
- data/bin/mu-run-tests +28 -6
- data/bin/mu-self-update +30 -10
- data/bin/mu-upload-chef-artifacts +30 -26
- data/cloud-mu.gemspec +10 -8
- data/cookbooks/mu-master/attributes/default.rb +5 -1
- data/cookbooks/mu-master/metadata.rb +2 -2
- data/cookbooks/mu-master/recipes/default.rb +81 -26
- data/cookbooks/mu-master/recipes/init.rb +197 -62
- data/cookbooks/mu-master/recipes/update_nagios_only.rb +1 -1
- data/cookbooks/mu-master/recipes/vault.rb +78 -77
- data/cookbooks/mu-master/templates/default/mods/rewrite.conf.erb +1 -0
- data/cookbooks/mu-master/templates/default/nagios.conf.erb +103 -0
- data/cookbooks/mu-master/templates/default/web_app.conf.erb +14 -30
- data/cookbooks/mu-tools/attributes/default.rb +12 -0
- data/cookbooks/mu-tools/files/centos-6/CentOS-Base.repo +47 -0
- data/cookbooks/mu-tools/libraries/helper.rb +98 -4
- data/cookbooks/mu-tools/libraries/monkey.rb +1 -1
- data/cookbooks/mu-tools/recipes/apply_security.rb +31 -9
- data/cookbooks/mu-tools/recipes/aws_api.rb +8 -2
- data/cookbooks/mu-tools/recipes/base_repositories.rb +1 -1
- data/cookbooks/mu-tools/recipes/gcloud.rb +2 -9
- data/cookbooks/mu-tools/recipes/google_api.rb +7 -0
- data/cookbooks/mu-tools/recipes/rsyslog.rb +8 -1
- data/cookbooks/mu-tools/resources/disk.rb +113 -42
- data/cookbooks/mu-tools/resources/mommacat_request.rb +1 -2
- data/cookbooks/mu-tools/templates/centos-8/sshd_config.erb +215 -0
- data/extras/Gemfile.lock.bootstrap +394 -0
- data/extras/bucketstubs/error.html +0 -0
- data/extras/bucketstubs/index.html +0 -0
- data/extras/clean-stock-amis +11 -3
- data/extras/generate-stock-images +6 -3
- data/extras/git_rpm/build.sh +20 -0
- data/extras/git_rpm/mugit.spec +53 -0
- data/extras/image-generators/AWS/centos7.yaml +19 -16
- data/extras/image-generators/AWS/{rhel7.yaml → rhel71.yaml} +0 -0
- data/extras/image-generators/AWS/{win2k12.yaml → win2k12r2.yaml} +0 -0
- data/extras/image-generators/VMWare/centos8.yaml +15 -0
- data/extras/openssl_rpm/build.sh +19 -0
- data/extras/openssl_rpm/mussl.spec +46 -0
- data/extras/python_rpm/muthon.spec +14 -4
- data/extras/ruby_rpm/muby.spec +9 -5
- data/extras/sqlite_rpm/build.sh +19 -0
- data/extras/sqlite_rpm/muqlite.spec +47 -0
- data/install/installer +7 -5
- data/modules/mommacat.ru +2 -2
- data/modules/mu.rb +14 -7
- data/modules/mu/adoption.rb +5 -5
- data/modules/mu/cleanup.rb +47 -25
- data/modules/mu/cloud.rb +29 -1
- data/modules/mu/cloud/dnszone.rb +0 -2
- data/modules/mu/cloud/machine_images.rb +1 -1
- data/modules/mu/cloud/providers.rb +6 -1
- data/modules/mu/cloud/resource_base.rb +16 -7
- data/modules/mu/cloud/ssh_sessions.rb +5 -1
- data/modules/mu/cloud/wrappers.rb +20 -7
- data/modules/mu/config.rb +28 -12
- data/modules/mu/config/bucket.rb +31 -2
- data/modules/mu/config/cache_cluster.rb +1 -1
- data/modules/mu/config/cdn.rb +100 -0
- data/modules/mu/config/container_cluster.rb +1 -1
- data/modules/mu/config/database.rb +3 -3
- data/modules/mu/config/dnszone.rb +4 -3
- data/modules/mu/config/endpoint.rb +1 -0
- data/modules/mu/config/firewall_rule.rb +1 -1
- data/modules/mu/config/function.rb +16 -7
- data/modules/mu/config/job.rb +89 -0
- data/modules/mu/config/notifier.rb +7 -18
- data/modules/mu/config/ref.rb +55 -9
- data/modules/mu/config/schema_helpers.rb +12 -3
- data/modules/mu/config/server.rb +11 -5
- data/modules/mu/config/server_pool.rb +2 -2
- data/modules/mu/config/vpc.rb +11 -10
- data/modules/mu/defaults/AWS.yaml +106 -106
- data/modules/mu/deploy.rb +40 -14
- data/modules/mu/groomers/chef.rb +2 -2
- data/modules/mu/master.rb +70 -3
- data/modules/mu/mommacat.rb +28 -9
- data/modules/mu/mommacat/daemon.rb +13 -7
- data/modules/mu/mommacat/naming.rb +2 -2
- data/modules/mu/mommacat/search.rb +16 -5
- data/modules/mu/mommacat/storage.rb +67 -32
- data/modules/mu/providers/aws.rb +298 -85
- data/modules/mu/providers/aws/alarm.rb +5 -5
- data/modules/mu/providers/aws/bucket.rb +284 -50
- data/modules/mu/providers/aws/cache_cluster.rb +26 -26
- data/modules/mu/providers/aws/cdn.rb +782 -0
- data/modules/mu/providers/aws/collection.rb +16 -16
- data/modules/mu/providers/aws/container_cluster.rb +84 -64
- data/modules/mu/providers/aws/database.rb +59 -55
- data/modules/mu/providers/aws/dnszone.rb +29 -12
- data/modules/mu/providers/aws/endpoint.rb +535 -50
- data/modules/mu/providers/aws/firewall_rule.rb +32 -26
- data/modules/mu/providers/aws/folder.rb +1 -1
- data/modules/mu/providers/aws/function.rb +300 -134
- data/modules/mu/providers/aws/group.rb +16 -14
- data/modules/mu/providers/aws/habitat.rb +4 -4
- data/modules/mu/providers/aws/job.rb +469 -0
- data/modules/mu/providers/aws/loadbalancer.rb +67 -45
- data/modules/mu/providers/aws/log.rb +17 -17
- data/modules/mu/providers/aws/msg_queue.rb +22 -13
- data/modules/mu/providers/aws/nosqldb.rb +99 -8
- data/modules/mu/providers/aws/notifier.rb +137 -65
- data/modules/mu/providers/aws/role.rb +119 -83
- data/modules/mu/providers/aws/search_domain.rb +166 -30
- data/modules/mu/providers/aws/server.rb +209 -118
- data/modules/mu/providers/aws/server_pool.rb +95 -130
- data/modules/mu/providers/aws/storage_pool.rb +19 -11
- data/modules/mu/providers/aws/user.rb +5 -5
- data/modules/mu/providers/aws/userdata/linux.erb +5 -4
- data/modules/mu/providers/aws/vpc.rb +109 -54
- data/modules/mu/providers/aws/vpc_subnet.rb +43 -39
- data/modules/mu/providers/azure.rb +78 -12
- data/modules/mu/providers/azure/server.rb +20 -4
- data/modules/mu/providers/cloudformation/server.rb +1 -1
- data/modules/mu/providers/google.rb +21 -5
- data/modules/mu/providers/google/bucket.rb +1 -1
- data/modules/mu/providers/google/container_cluster.rb +1 -1
- data/modules/mu/providers/google/database.rb +1 -1
- data/modules/mu/providers/google/firewall_rule.rb +1 -1
- data/modules/mu/providers/google/folder.rb +7 -3
- data/modules/mu/providers/google/function.rb +66 -31
- data/modules/mu/providers/google/group.rb +1 -1
- data/modules/mu/providers/google/habitat.rb +1 -1
- data/modules/mu/providers/google/loadbalancer.rb +1 -1
- data/modules/mu/providers/google/role.rb +6 -3
- data/modules/mu/providers/google/server.rb +1 -1
- data/modules/mu/providers/google/server_pool.rb +1 -1
- data/modules/mu/providers/google/user.rb +1 -1
- data/modules/mu/providers/google/vpc.rb +28 -3
- data/modules/tests/aws-jobs-functions.yaml +46 -0
- data/modules/tests/aws-servers-with-handrolled-iam.yaml +37 -0
- data/modules/tests/centos6.yaml +4 -0
- data/modules/tests/centos7.yaml +4 -0
- data/modules/tests/ecs.yaml +2 -2
- data/modules/tests/eks.yaml +1 -1
- data/modules/tests/functions/node-function/lambda_function.js +10 -0
- data/modules/tests/functions/python-function/lambda_function.py +12 -0
- data/modules/tests/k8s.yaml +1 -1
- data/modules/tests/microservice_app.yaml +288 -0
- data/modules/tests/rds.yaml +5 -5
- data/modules/tests/regrooms/rds.yaml +5 -5
- data/modules/tests/server-with-scrub-muisms.yaml +1 -1
- data/modules/tests/super_complex_bok.yml +2 -2
- data/modules/tests/super_simple_bok.yml +2 -2
- metadata +42 -17
|
@@ -25,15 +25,15 @@ module MU
|
|
|
25
25
|
def initialize(**args)
|
|
26
26
|
super
|
|
27
27
|
@mu_name ||= @deploy.getResourceName(@config['name'], need_unique_string: true)
|
|
28
|
-
MU.setVar("curRegion", @
|
|
28
|
+
MU.setVar("curRegion", @region) if !@region.nil?
|
|
29
29
|
end
|
|
30
30
|
|
|
31
31
|
|
|
32
32
|
# Called automatically by {MU::Deploy#createResources}
|
|
33
33
|
def create
|
|
34
34
|
flag="SUCCESS"
|
|
35
|
-
MU.setVar("curRegion", @
|
|
36
|
-
region = @
|
|
35
|
+
MU.setVar("curRegion", @region) if !@region.nil?
|
|
36
|
+
region = @region
|
|
37
37
|
server=@config["name"]
|
|
38
38
|
stack_name = getStackName(@config["name"])
|
|
39
39
|
|
|
@@ -108,10 +108,10 @@ module MU
|
|
|
108
108
|
end
|
|
109
109
|
|
|
110
110
|
MU.log "Creating CloudFormation stack '#{@config['name']}'", details: stack_descriptor
|
|
111
|
-
MU::Cloud::AWS.cloudformation(region: region, credentials: @
|
|
111
|
+
MU::Cloud::AWS.cloudformation(region: region, credentials: @credentials).create_stack(stack_descriptor);
|
|
112
112
|
|
|
113
113
|
sleep(10);
|
|
114
|
-
stack_response = MU::Cloud::AWS.cloudformation(region: region, credentials: @
|
|
114
|
+
stack_response = MU::Cloud::AWS.cloudformation(region: region, credentials: @credentials).describe_stacks({:stack_name => stack_name}).stacks.first
|
|
115
115
|
attempts = 0
|
|
116
116
|
begin
|
|
117
117
|
if attempts % 5 == 0
|
|
@@ -119,7 +119,7 @@ module MU
|
|
|
119
119
|
else
|
|
120
120
|
MU.log "Waiting for CloudFormation stack '#{@config['name']}' to be ready...", MU::DEBUG
|
|
121
121
|
end
|
|
122
|
-
stack_response =MU::Cloud::AWS.cloudformation(region: region, credentials: @
|
|
122
|
+
stack_response =MU::Cloud::AWS.cloudformation(region: region, credentials: @credentials).describe_stacks({:stack_name => stack_name}).stacks.first
|
|
123
123
|
sleep 60
|
|
124
124
|
end while stack_response.stack_status == "CREATE_IN_PROGRESS"
|
|
125
125
|
|
|
@@ -135,14 +135,14 @@ module MU
|
|
|
135
135
|
end
|
|
136
136
|
|
|
137
137
|
if flag == "FAIL" then
|
|
138
|
-
MU::Cloud::AWS.cloudformation(region: region, credentials: @
|
|
138
|
+
MU::Cloud::AWS.cloudformation(region: region, credentials: @credentials).delete_stack({:stack_name => stack_name})
|
|
139
139
|
exit 1
|
|
140
140
|
end
|
|
141
141
|
|
|
142
142
|
MU.log "CloudFormation stack '#{@config['name']}' complete"
|
|
143
143
|
|
|
144
144
|
begin
|
|
145
|
-
resources = MU::Cloud::AWS.cloudformation(region: region, credentials: @
|
|
145
|
+
resources = MU::Cloud::AWS.cloudformation(region: region, credentials: @credentials).describe_stack_resources(:stack_name => stack_name)
|
|
146
146
|
|
|
147
147
|
resources[:stack_resources].each { |resource|
|
|
148
148
|
|
|
@@ -150,7 +150,7 @@ module MU
|
|
|
150
150
|
when "AWS::EC2::Instance"
|
|
151
151
|
MU::Cloud::AWS.createStandardTags(resource.physical_resource_id)
|
|
152
152
|
instance_name = MU.deploy_id+"-"+@config['name']+"-"+resource.logical_resource_id
|
|
153
|
-
MU::Cloud::AWS.createTag(resource.physical_resource_id, "Name", instance_name, credentials: @
|
|
153
|
+
MU::Cloud::AWS.createTag(resource.physical_resource_id, "Name", instance_name, credentials: @credentials)
|
|
154
154
|
|
|
155
155
|
instance = MU::Cloud.resourceClass("AWS", "Server").notifyDeploy(
|
|
156
156
|
@config['name']+"-"+resource.logical_resource_id,
|
|
@@ -177,14 +177,14 @@ module MU
|
|
|
177
177
|
|
|
178
178
|
when "AWS::EC2::SecurityGroup"
|
|
179
179
|
MU::Cloud::AWS.createStandardTags(resource.physical_resource_id)
|
|
180
|
-
MU::Cloud::AWS.createTag(resource.physical_resource_id, "Name", MU.deploy_id+"-"+@config['name']+'-'+resource.logical_resource_id, credentials: @
|
|
180
|
+
MU::Cloud::AWS.createTag(resource.physical_resource_id, "Name", MU.deploy_id+"-"+@config['name']+'-'+resource.logical_resource_id, credentials: @credentials)
|
|
181
181
|
MU::Cloud.resourceClass("AWS", "FirewallRule").notifyDeploy(
|
|
182
182
|
@config['name']+"-"+resource.logical_resource_id,
|
|
183
183
|
resource.physical_resource_id
|
|
184
184
|
)
|
|
185
185
|
when "AWS::EC2::Subnet"
|
|
186
186
|
MU::Cloud::AWS.createStandardTags(resource.physical_resource_id)
|
|
187
|
-
MU::Cloud::AWS.createTag(resource.physical_resource_id, "Name", MU.deploy_id+"-"+@config['name']+'-'+resource.logical_resource_id, credentials: @
|
|
187
|
+
MU::Cloud::AWS.createTag(resource.physical_resource_id, "Name", MU.deploy_id+"-"+@config['name']+'-'+resource.logical_resource_id, credentials: @credentials)
|
|
188
188
|
data = {
|
|
189
189
|
"collection" => @config["name"],
|
|
190
190
|
"subnet_id" => resource.physical_resource_id,
|
|
@@ -192,7 +192,7 @@ module MU
|
|
|
192
192
|
@deploy.notify("subnets", @config['name']+"-"+resource.logical_resource_id, data)
|
|
193
193
|
when "AWS::EC2::VPC"
|
|
194
194
|
MU::Cloud::AWS.createStandardTags(resource.physical_resource_id)
|
|
195
|
-
MU::Cloud::AWS.createTag(resource.physical_resource_id, "Name", MU.deploy_id+"-"+@config['name']+'-'+resource.logical_resource_id, credentials: @
|
|
195
|
+
MU::Cloud::AWS.createTag(resource.physical_resource_id, "Name", MU.deploy_id+"-"+@config['name']+'-'+resource.logical_resource_id, credentials: @credentials)
|
|
196
196
|
data = {
|
|
197
197
|
"collection" => @config["name"],
|
|
198
198
|
"vpc_id" => resource.physical_resource_id,
|
|
@@ -200,10 +200,10 @@ module MU
|
|
|
200
200
|
@deploy.notify("vpcs", @config['name']+"-"+resource.logical_resource_id, data)
|
|
201
201
|
when "AWS::EC2::InternetGateway"
|
|
202
202
|
MU::Cloud::AWS.createStandardTags(resource.physical_resource_id)
|
|
203
|
-
MU::Cloud::AWS.createTag(resource.physical_resource_id, "Name", MU.deploy_id+"-"+@config['name']+'-'+resource.logical_resource_id, credentials: @
|
|
203
|
+
MU::Cloud::AWS.createTag(resource.physical_resource_id, "Name", MU.deploy_id+"-"+@config['name']+'-'+resource.logical_resource_id, credentials: @credentials)
|
|
204
204
|
when "AWS::EC2::RouteTable"
|
|
205
205
|
MU::Cloud::AWS.createStandardTags(resource.physical_resource_id)
|
|
206
|
-
MU::Cloud::AWS.createTag(resource.physical_resource_id, "Name", MU.deploy_id+"-"+@config['name']+'-'+resource.logical_resource_id, credentials: @
|
|
206
|
+
MU::Cloud::AWS.createTag(resource.physical_resource_id, "Name", MU.deploy_id+"-"+@config['name']+'-'+resource.logical_resource_id, credentials: @credentials)
|
|
207
207
|
|
|
208
208
|
# The rest of these aren't anything we act on
|
|
209
209
|
when "AWS::EC2::Route"
|
|
@@ -242,7 +242,7 @@ module MU
|
|
|
242
242
|
# @param region [String]: The cloud provider region
|
|
243
243
|
# @param wait [Boolean]: Block on the removal of this stack; AWS deletion will continue in the background otherwise if false.
|
|
244
244
|
# @return [void]
|
|
245
|
-
def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, wait: false, credentials: nil, flags: {})
|
|
245
|
+
def self.cleanup(noop: false, deploy_id: MU.deploy_id, ignoremaster: false, region: MU.curRegion, wait: false, credentials: nil, flags: {})
|
|
246
246
|
MU.log "AWS::Collection.cleanup: need to support flags['known']", MU::DEBUG, details: flags
|
|
247
247
|
MU.log "Placeholder: AWS Collection artifacts do not support tags, so ignoremaster cleanup flag has no effect", MU::DEBUG, details: ignoremaster
|
|
248
248
|
|
|
@@ -251,7 +251,7 @@ module MU
|
|
|
251
251
|
resp.stacks.each { |stack|
|
|
252
252
|
ok = false
|
|
253
253
|
stack.tags.each { |tag|
|
|
254
|
-
ok = true if (tag.key == "MU-ID") and tag.value ==
|
|
254
|
+
ok = true if (tag.key == "MU-ID") and tag.value == deploy_id
|
|
255
255
|
}
|
|
256
256
|
if ok
|
|
257
257
|
MU.log "Deleting CloudFormation stack #{stack.stack_name})"
|
|
@@ -65,7 +65,7 @@ module MU
|
|
|
65
65
|
|
|
66
66
|
on_retry = Proc.new { |e|
|
|
67
67
|
# soul-crushing, yet effective
|
|
68
|
-
if e.message.match(/because (#{Regexp.quote(@
|
|
68
|
+
if e.message.match(/because (#{Regexp.quote(@region)}[a-z]), the targeted availability zone, does not currently have sufficient capacity/)
|
|
69
69
|
bad_az = Regexp.last_match(1)
|
|
70
70
|
deletia = []
|
|
71
71
|
mySubnets.each { |subnet|
|
|
@@ -81,7 +81,7 @@ module MU
|
|
|
81
81
|
|
|
82
82
|
MU.retrier([Aws::EKS::Errors::UnsupportedAvailabilityZoneException, Aws::EKS::Errors::InvalidParameterException], on_retry: on_retry, max: subnet_ids.size) {
|
|
83
83
|
MU.log "Creating EKS cluster #{@mu_name}", details: params
|
|
84
|
-
MU::Cloud::AWS.eks(region: @
|
|
84
|
+
MU::Cloud::AWS.eks(region: @region, credentials: @credentials).create_cluster(params)
|
|
85
85
|
}
|
|
86
86
|
@cloud_id = @mu_name
|
|
87
87
|
|
|
@@ -100,7 +100,7 @@ module MU
|
|
|
100
100
|
|
|
101
101
|
MU.log "Creation of EKS cluster #{@mu_name} complete"
|
|
102
102
|
else
|
|
103
|
-
MU::Cloud::AWS.ecs(region: @
|
|
103
|
+
MU::Cloud::AWS.ecs(region: @region, credentials: @credentials).create_cluster(
|
|
104
104
|
cluster_name: @mu_name
|
|
105
105
|
)
|
|
106
106
|
@cloud_id = @mu_name
|
|
@@ -118,7 +118,7 @@ module MU
|
|
|
118
118
|
# this account; EKS applications might want one, but will fail in
|
|
119
119
|
# confusing ways if this hasn't been done.
|
|
120
120
|
begin
|
|
121
|
-
MU::Cloud::AWS.iam(credentials: @
|
|
121
|
+
MU::Cloud::AWS.iam(credentials: @credentials).create_service_linked_role(
|
|
122
122
|
aws_service_name: "elasticloadbalancing.amazonaws.com"
|
|
123
123
|
)
|
|
124
124
|
rescue ::Aws::IAM::Errors::InvalidInput
|
|
@@ -170,7 +170,7 @@ module MU
|
|
|
170
170
|
if tasks.size > 0
|
|
171
171
|
tasks_failing = false
|
|
172
172
|
MU.retrier(wait: 15, max: 10, loop_if: Proc.new { tasks_failing }){ |retries, _wait|
|
|
173
|
-
tasks_failing = !MU::Cloud::AWS::ContainerCluster.tasksRunning?(@mu_name, log: (retries > 0), region: @
|
|
173
|
+
tasks_failing = !MU::Cloud::AWS::ContainerCluster.tasksRunning?(@mu_name, log: (retries > 0), region: @region, credentials: @credentials)
|
|
174
174
|
}
|
|
175
175
|
|
|
176
176
|
if tasks_failing
|
|
@@ -287,14 +287,15 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
287
287
|
# @return [OpenStruct]
|
|
288
288
|
def cloud_desc(use_cache: true)
|
|
289
289
|
return @cloud_desc_cache if @cloud_desc_cache and use_cache
|
|
290
|
+
return nil if !@cloud_id
|
|
290
291
|
@cloud_desc_cache = if @config['flavor'] == "EKS" or
|
|
291
292
|
(@config['flavor'] == "Fargate" and !@config['containers'])
|
|
292
|
-
resp = MU::Cloud::AWS.eks(region: @
|
|
293
|
+
resp = MU::Cloud::AWS.eks(region: @region, credentials: @credentials).describe_cluster(
|
|
293
294
|
name: @cloud_id
|
|
294
295
|
)
|
|
295
296
|
resp.cluster
|
|
296
297
|
else
|
|
297
|
-
resp = MU::Cloud::AWS.ecs(region: @
|
|
298
|
+
resp = MU::Cloud::AWS.ecs(region: @region, credentials: @credentials).describe_clusters(
|
|
298
299
|
clusters: [@cloud_id]
|
|
299
300
|
)
|
|
300
301
|
resp.clusters.first
|
|
@@ -317,7 +318,7 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
317
318
|
def notify
|
|
318
319
|
deploy_struct = MU.structToHash(cloud_desc)
|
|
319
320
|
deploy_struct['cloud_id'] = @mu_name
|
|
320
|
-
deploy_struct["region"] = @
|
|
321
|
+
deploy_struct["region"] = @region
|
|
321
322
|
if @config['flavor'] == "EKS"
|
|
322
323
|
deploy_struct["max_pods"] = @config['kubernetes']['max_pods'].to_s
|
|
323
324
|
# XXX if FargateKS, get the Fargate Profile artifact
|
|
@@ -326,7 +327,7 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
326
327
|
end
|
|
327
328
|
|
|
328
329
|
@@eks_versions = {}
|
|
329
|
-
@@
|
|
330
|
+
@@eks_version_semaphores = {}
|
|
330
331
|
# Use the AWS SSM API to fetch the current version of the Amazon Linux
|
|
331
332
|
# ECS-optimized AMI, so we can use it as a default AMI for ECS deploys.
|
|
332
333
|
# @param flavor [String]: ECS or EKS
|
|
@@ -339,24 +340,22 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
339
340
|
names: ["/aws/service/#{flavor.downcase}/optimized-ami/amazon-linux/recommended"]
|
|
340
341
|
)
|
|
341
342
|
else
|
|
342
|
-
@@
|
|
343
|
+
@@eks_version_semaphores[region] ||= Mutex.new
|
|
344
|
+
|
|
345
|
+
@@eks_version_semaphores[region].synchronize {
|
|
343
346
|
if !@@eks_versions[region]
|
|
344
347
|
@@eks_versions[region] ||= []
|
|
345
348
|
versions = {}
|
|
346
|
-
resp =
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
)
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
versions[Regexp.last_match[1]] = true
|
|
357
|
-
}
|
|
358
|
-
next_token = resp.next_token
|
|
359
|
-
end while !next_token.nil?
|
|
349
|
+
resp = MU::Cloud::AWS.ssm(region: region).get_parameters_by_path(
|
|
350
|
+
path: "/aws/service/#{flavor.downcase}/optimized-ami",
|
|
351
|
+
recursive: true,
|
|
352
|
+
max_results: 10 # as high as it goes, ugh
|
|
353
|
+
)
|
|
354
|
+
|
|
355
|
+
resp.parameters.each { |p|
|
|
356
|
+
p.name.match(/\/aws\/service\/eks\/optimized-ami\/([^\/]+?)\//)
|
|
357
|
+
versions[Regexp.last_match[1]] = true
|
|
358
|
+
}
|
|
360
359
|
@@eks_versions[region] = versions.keys.sort { |a, b| MU.version_sort(a, b) }
|
|
361
360
|
end
|
|
362
361
|
}
|
|
@@ -376,16 +375,31 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
376
375
|
nil
|
|
377
376
|
end
|
|
378
377
|
|
|
378
|
+
@@supported_eks_region_cache = []
|
|
379
|
+
@@eks_region_semaphore = Mutex.new
|
|
380
|
+
|
|
379
381
|
# Return the list of regions where we know EKS is supported.
|
|
380
|
-
def self.EKSRegions(credentials = nil
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
382
|
+
def self.EKSRegions(credentials = nil)
|
|
383
|
+
@@eks_region_semaphore.synchronize {
|
|
384
|
+
if @@supported_eks_region_cache and !@@supported_eks_region_cache.empty?
|
|
385
|
+
return @@supported_eks_region_cache
|
|
386
|
+
end
|
|
387
|
+
start = Time.now
|
|
388
|
+
# the SSM API is painfully slow for large result sets, so thread
|
|
389
|
+
# these and do them in parallel
|
|
390
|
+
@@supported_eks_region_cache = []
|
|
391
|
+
region_threads = []
|
|
392
|
+
MU::Cloud::AWS.listRegions(credentials: credentials).each { |region|
|
|
393
|
+
region_threads << Thread.new(region) { |r|
|
|
394
|
+
r_start = Time.now
|
|
395
|
+
ami = getStandardImage("EKS", r)
|
|
396
|
+
@@supported_eks_region_cache << r if ami
|
|
397
|
+
}
|
|
398
|
+
}
|
|
399
|
+
region_threads.each { |t| t.join }
|
|
387
400
|
|
|
388
|
-
|
|
401
|
+
@@supported_eks_region_cache
|
|
402
|
+
}
|
|
389
403
|
end
|
|
390
404
|
|
|
391
405
|
# Does this resource type exist as a global (cloud-wide) artifact, or
|
|
@@ -406,30 +420,32 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
406
420
|
# @param ignoremaster [Boolean]: If true, will remove resources not flagged as originating from this Mu server
|
|
407
421
|
# @param region [String]: The cloud provider region
|
|
408
422
|
# @return [void]
|
|
409
|
-
def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {})
|
|
423
|
+
def self.cleanup(noop: false, deploy_id: MU.deploy_id, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {})
|
|
410
424
|
MU.log "AWS::ContainerCluster.cleanup: need to support flags['known']", MU::DEBUG, details: flags
|
|
411
425
|
MU.log "Placeholder: AWS ContainerCluster artifacts do not support tags, so ignoremaster cleanup flag has no effect", MU::DEBUG, details: ignoremaster
|
|
412
426
|
|
|
413
|
-
purge_ecs_clusters(noop: noop, region: region, credentials: credentials)
|
|
427
|
+
purge_ecs_clusters(noop: noop, region: region, credentials: credentials, deploy_id: deploy_id)
|
|
414
428
|
|
|
415
|
-
purge_eks_clusters(noop: noop, region: region, credentials: credentials)
|
|
429
|
+
purge_eks_clusters(noop: noop, region: region, credentials: credentials, deploy_id: deploy_id)
|
|
416
430
|
|
|
417
431
|
end
|
|
418
432
|
|
|
419
|
-
def self.purge_eks_clusters(noop: false, region: MU.curRegion, credentials: nil)
|
|
420
|
-
return if !MU::Cloud::AWS::ContainerCluster.EKSRegions(credentials, region: region).include?(region)
|
|
433
|
+
def self.purge_eks_clusters(noop: false, region: MU.curRegion, credentials: nil, deploy_id: MU.deploy_id)
|
|
421
434
|
resp = begin
|
|
422
435
|
MU::Cloud::AWS.eks(credentials: credentials, region: region).list_clusters
|
|
423
436
|
rescue Aws::EKS::Errors::AccessDeniedException
|
|
424
437
|
# EKS isn't actually live in this region, even though SSM lists
|
|
425
438
|
# base images for it
|
|
439
|
+
if @@supported_eks_region_cache
|
|
440
|
+
@@supported_eks_region_cache.delete(region)
|
|
441
|
+
end
|
|
426
442
|
return
|
|
427
443
|
end
|
|
428
444
|
|
|
429
445
|
return if !resp or !resp.clusters
|
|
430
446
|
|
|
431
447
|
resp.clusters.each { |cluster|
|
|
432
|
-
if cluster.match(/^#{
|
|
448
|
+
if cluster.match(/^#{deploy_id}-/)
|
|
433
449
|
|
|
434
450
|
desc = MU::Cloud::AWS.eks(credentials: credentials, region: region).describe_cluster(
|
|
435
451
|
name: cluster
|
|
@@ -473,13 +489,14 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
473
489
|
end
|
|
474
490
|
private_class_method :purge_eks_clusters
|
|
475
491
|
|
|
476
|
-
def self.purge_ecs_clusters(noop: false, region: MU.curRegion, credentials: nil)
|
|
492
|
+
def self.purge_ecs_clusters(noop: false, region: MU.curRegion, credentials: nil, deploy_id: MU.deploy_id)
|
|
493
|
+
start = Time.now
|
|
477
494
|
resp = MU::Cloud::AWS.ecs(credentials: credentials, region: region).list_clusters
|
|
478
495
|
|
|
479
496
|
return if !resp or !resp.cluster_arns or resp.cluster_arns.empty?
|
|
480
497
|
|
|
481
498
|
resp.cluster_arns.each { |arn|
|
|
482
|
-
if arn.match(/:cluster\/(#{
|
|
499
|
+
if arn.match(/:cluster\/(#{deploy_id}[^:]+)$/)
|
|
483
500
|
cluster = Regexp.last_match[1]
|
|
484
501
|
|
|
485
502
|
svc_resp = MU::Cloud::AWS.ecs(region: region, credentials: credentials).list_services(
|
|
@@ -525,7 +542,7 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
525
542
|
}
|
|
526
543
|
|
|
527
544
|
tasks = MU::Cloud::AWS.ecs(region: region, credentials: credentials).list_task_definitions(
|
|
528
|
-
family_prefix:
|
|
545
|
+
family_prefix: deploy_id
|
|
529
546
|
)
|
|
530
547
|
|
|
531
548
|
if tasks and tasks.task_definition_arns
|
|
@@ -1221,12 +1238,12 @@ start = Time.now
|
|
|
1221
1238
|
|
|
1222
1239
|
cluster["flavor"] = "EKS" if cluster["flavor"].match(/^Kubernetes$/i)
|
|
1223
1240
|
|
|
1224
|
-
if cluster["flavor"] == "ECS" and cluster["kubernetes"] and !MU::Cloud::AWS.isGovCloud?(cluster["region"]) and !cluster["containers"] and MU::Cloud::AWS::ContainerCluster.EKSRegions(cluster['credentials']
|
|
1241
|
+
if cluster["flavor"] == "ECS" and cluster["kubernetes"] and !MU::Cloud::AWS.isGovCloud?(cluster["region"]) and !cluster["containers"] and MU::Cloud::AWS::ContainerCluster.EKSRegions(cluster['credentials']).include?(cluster['region'])
|
|
1225
1242
|
cluster["flavor"] = "EKS"
|
|
1226
1243
|
MU.log "Setting flavor of ContainerCluster '#{cluster['name']}' to EKS ('kubernetes' stanza was specified)", MU::NOTICE
|
|
1227
1244
|
end
|
|
1228
1245
|
|
|
1229
|
-
if cluster["flavor"] == "EKS" and !MU::Cloud::AWS::ContainerCluster.EKSRegions(cluster['credentials']
|
|
1246
|
+
if cluster["flavor"] == "EKS" and !MU::Cloud::AWS::ContainerCluster.EKSRegions(cluster['credentials']).include?(cluster['region'])
|
|
1230
1247
|
MU.log "EKS is only available in some regions", MU::ERR, details: MU::Cloud::AWS::ContainerCluster.EKSRegions
|
|
1231
1248
|
ok = false
|
|
1232
1249
|
end
|
|
@@ -1364,7 +1381,7 @@ start = Time.now
|
|
|
1364
1381
|
role["tags"] = cluster["tags"] if !cluster["tags"].nil?
|
|
1365
1382
|
role["optional_tags"] = cluster["optional_tags"] if !cluster["optional_tags"].nil?
|
|
1366
1383
|
configurator.insertKitten(role, "roles")
|
|
1367
|
-
MU::Config.addDependency(cluster, cluster["name"]+"pods", "role",
|
|
1384
|
+
MU::Config.addDependency(cluster, cluster["name"]+"pods", "role", their_phase: "groom")
|
|
1368
1385
|
if !MU::Master.kubectl
|
|
1369
1386
|
MU.log "Since I can't find a kubectl executable, you will have to handle all service account, user, and role bindings manually!", MU::WARN
|
|
1370
1387
|
end
|
|
@@ -1484,7 +1501,8 @@ start = Time.now
|
|
|
1484
1501
|
worker_pool[k] = cluster[k]
|
|
1485
1502
|
end
|
|
1486
1503
|
}
|
|
1487
|
-
|
|
1504
|
+
else
|
|
1505
|
+
worker_pool["groom"] = false # don't meddle with ECS workers unnecessarily
|
|
1488
1506
|
end
|
|
1489
1507
|
|
|
1490
1508
|
configurator.insertKitten(worker_pool, "server_pools")
|
|
@@ -1512,7 +1530,7 @@ start = Time.now
|
|
|
1512
1530
|
role["tags"] = cluster["tags"] if !cluster["tags"].nil?
|
|
1513
1531
|
role["optional_tags"] = cluster["optional_tags"] if !cluster["optional_tags"].nil?
|
|
1514
1532
|
configurator.insertKitten(role, "roles")
|
|
1515
|
-
MU::Config.addDependency(cluster, cluster["name"]+"controlplane", "role",
|
|
1533
|
+
MU::Config.addDependency(cluster, cluster["name"]+"controlplane", "role", their_phase: "groom")
|
|
1516
1534
|
end
|
|
1517
1535
|
|
|
1518
1536
|
ok
|
|
@@ -1562,7 +1580,7 @@ start = Time.now
|
|
|
1562
1580
|
@cacert = cloud_desc.certificate_authority.data
|
|
1563
1581
|
@cluster = @mu_name
|
|
1564
1582
|
if @config['flavor'] != "Fargate"
|
|
1565
|
-
resp = MU::Cloud::AWS.iam(credentials: @
|
|
1583
|
+
resp = MU::Cloud::AWS.iam(credentials: @credentials).get_role(role_name: @mu_name+"WORKERS")
|
|
1566
1584
|
@worker_role_arn = resp.role.arn
|
|
1567
1585
|
end
|
|
1568
1586
|
kube_conf = @deploy.deploy_dir+"/kubeconfig-#{@config['name']}"
|
|
@@ -1629,7 +1647,7 @@ start = Time.now
|
|
|
1629
1647
|
:tags => @tags
|
|
1630
1648
|
}
|
|
1631
1649
|
begin
|
|
1632
|
-
resp = MU::Cloud::AWS.eks(region: @
|
|
1650
|
+
resp = MU::Cloud::AWS.eks(region: @region, credentials: @credentials).describe_fargate_profile(
|
|
1633
1651
|
cluster_name: @mu_name,
|
|
1634
1652
|
fargate_profile_name: profname
|
|
1635
1653
|
)
|
|
@@ -1642,7 +1660,7 @@ start = Time.now
|
|
|
1642
1660
|
old_desc["subnets"].sort!
|
|
1643
1661
|
if !old_desc.eql?(new_desc)
|
|
1644
1662
|
MU.log "Deleting Fargate profile #{profname} in order to apply changes", MU::WARN, details: desc
|
|
1645
|
-
MU::Cloud::AWS::ContainerCluster.purge_fargate_profile(profname, @mu_name, @
|
|
1663
|
+
MU::Cloud::AWS::ContainerCluster.purge_fargate_profile(profname, @mu_name, @region, @credentials)
|
|
1646
1664
|
else
|
|
1647
1665
|
next
|
|
1648
1666
|
end
|
|
@@ -1651,9 +1669,9 @@ start = Time.now
|
|
|
1651
1669
|
# This is just fine!
|
|
1652
1670
|
end
|
|
1653
1671
|
MU.log "Creating EKS Fargate profile #{profname}", details: desc
|
|
1654
|
-
resp = MU::Cloud::AWS.eks(region: @
|
|
1672
|
+
resp = MU::Cloud::AWS.eks(region: @region, credentials: @credentials).create_fargate_profile(desc)
|
|
1655
1673
|
begin
|
|
1656
|
-
resp = MU::Cloud::AWS.eks(region: @
|
|
1674
|
+
resp = MU::Cloud::AWS.eks(region: @region, credentials: @credentials).describe_fargate_profile(
|
|
1657
1675
|
cluster_name: @mu_name,
|
|
1658
1676
|
fargate_profile_name: profname
|
|
1659
1677
|
)
|
|
@@ -1693,19 +1711,19 @@ start = Time.now
|
|
|
1693
1711
|
tagme << s.cloud_id
|
|
1694
1712
|
tagme_elb << s.cloud_id if !s.private?
|
|
1695
1713
|
}
|
|
1696
|
-
rtbs = MU::Cloud::AWS.ec2(region: @
|
|
1714
|
+
rtbs = MU::Cloud::AWS.ec2(region: @region, credentials: @credentials).describe_route_tables(
|
|
1697
1715
|
filters: [ { name: "vpc-id", values: [@vpc.cloud_id] } ]
|
|
1698
1716
|
).route_tables
|
|
1699
1717
|
tagme.concat(rtbs.map { |r| r.route_table_id } )
|
|
1700
1718
|
main_sg = @deploy.findLitterMate(type: "firewall_rules", name: "server_pool#{@config['name']}workers")
|
|
1701
1719
|
tagme << main_sg.cloud_id if main_sg
|
|
1702
1720
|
MU.log "Applying kubernetes.io tags to VPC resources", details: tagme
|
|
1703
|
-
MU::Cloud::AWS.createTag(tagme, "kubernetes.io/cluster/#{@mu_name}", "shared", credentials: @
|
|
1704
|
-
MU::Cloud::AWS.createTag(tagme_elb, "kubernetes.io/cluster/elb", @mu_name, credentials: @
|
|
1721
|
+
MU::Cloud::AWS.createTag(tagme, "kubernetes.io/cluster/#{@mu_name}", "shared", credentials: @credentials)
|
|
1722
|
+
MU::Cloud::AWS.createTag(tagme_elb, "kubernetes.io/cluster/elb", @mu_name, credentials: @credentials)
|
|
1705
1723
|
end
|
|
1706
1724
|
|
|
1707
1725
|
def manage_ecs_workers
|
|
1708
|
-
resp = MU::Cloud::AWS.ecs(region: @
|
|
1726
|
+
resp = MU::Cloud::AWS.ecs(region: @region, credentials: @credentials).list_container_instances({
|
|
1709
1727
|
cluster: @mu_name
|
|
1710
1728
|
})
|
|
1711
1729
|
existing = {}
|
|
@@ -1715,7 +1733,7 @@ start = Time.now
|
|
|
1715
1733
|
uuids << arn.sub(/^.*?:container-instance\//, "")
|
|
1716
1734
|
}
|
|
1717
1735
|
if uuids.size > 0
|
|
1718
|
-
resp = MU::Cloud::AWS.ecs(region: @
|
|
1736
|
+
resp = MU::Cloud::AWS.ecs(region: @region, credentials: @credentials).describe_container_instances({
|
|
1719
1737
|
cluster: @mu_name,
|
|
1720
1738
|
container_instances: uuids
|
|
1721
1739
|
})
|
|
@@ -1726,12 +1744,12 @@ start = Time.now
|
|
|
1726
1744
|
end
|
|
1727
1745
|
|
|
1728
1746
|
threads = []
|
|
1729
|
-
resource_lookup = MU::Cloud::AWS.listInstanceTypes(@
|
|
1747
|
+
resource_lookup = MU::Cloud::AWS.listInstanceTypes(@region)[@region]
|
|
1730
1748
|
serverpool = if ['EKS', 'ECS'].include?(@config['flavor'])
|
|
1731
1749
|
@deploy.findLitterMate(type: "server_pools", name: @config["name"]+"workers")
|
|
1732
1750
|
end
|
|
1733
1751
|
serverpool.listNodes.each { |mynode|
|
|
1734
|
-
resources = resource_lookup[
|
|
1752
|
+
resources = resource_lookup[mynode.cloud_desc.instance_type]
|
|
1735
1753
|
threads << Thread.new(mynode) { |node|
|
|
1736
1754
|
ident_doc = nil
|
|
1737
1755
|
ident_doc_sig = nil
|
|
@@ -1771,7 +1789,7 @@ start = Time.now
|
|
|
1771
1789
|
params[:container_instance_arn] = existing[node.cloud_id].container_instance_arn
|
|
1772
1790
|
MU.log "Updating ECS instance #{node} in cluster #{@mu_name}", MU::NOTICE, details: params
|
|
1773
1791
|
end
|
|
1774
|
-
MU::Cloud::AWS.ecs(region: @
|
|
1792
|
+
MU::Cloud::AWS.ecs(region: @region, credentials: @credentials).register_container_instance(params)
|
|
1775
1793
|
|
|
1776
1794
|
}
|
|
1777
1795
|
}
|
|
@@ -1787,7 +1805,7 @@ start = Time.now
|
|
|
1787
1805
|
@loadbalancers.each {|lb|
|
|
1788
1806
|
MU.log "Mapping LB #{lb.mu_name} to service #{c['name']}", MU::INFO
|
|
1789
1807
|
if lb.cloud_desc.type != "classic"
|
|
1790
|
-
elb_groups = MU::Cloud::AWS.elb2(region: @
|
|
1808
|
+
elb_groups = MU::Cloud::AWS.elb2(region: @region, credentials: @credentials).describe_target_groups({
|
|
1791
1809
|
load_balancer_arn: lb.cloud_desc.load_balancer_arn
|
|
1792
1810
|
})
|
|
1793
1811
|
matching_target_groups = []
|
|
@@ -1932,12 +1950,14 @@ start = Time.now
|
|
|
1932
1950
|
task_params[:network_mode] = "awsvpc"
|
|
1933
1951
|
task_params[:cpu] = cpu_total.to_i.to_s
|
|
1934
1952
|
task_params[:memory] = mem_total.to_i.to_s
|
|
1953
|
+
elsif @config['vpc']
|
|
1954
|
+
task_params[:network_mode] = "awsvpc"
|
|
1935
1955
|
end
|
|
1936
1956
|
|
|
1937
1957
|
MU.log "Registering task definition #{service_name} with #{container_definitions.size.to_s} containers"
|
|
1938
1958
|
|
|
1939
1959
|
# XXX this helpfully keeps revisions, but let's compare anyway and avoid cluttering with identical ones
|
|
1940
|
-
resp = MU::Cloud::AWS.ecs(region: @
|
|
1960
|
+
resp = MU::Cloud::AWS.ecs(region: @region, credentials: @credentials).register_task_definition(task_params)
|
|
1941
1961
|
|
|
1942
1962
|
resp.task_definition.task_definition_arn
|
|
1943
1963
|
end
|
|
@@ -1945,7 +1965,7 @@ start = Time.now
|
|
|
1945
1965
|
def list_ecs_services
|
|
1946
1966
|
svc_resp = nil
|
|
1947
1967
|
MU.retrier([Aws::ECS::Errors::ClusterNotFoundException], wait: 5, max: 10){
|
|
1948
|
-
svc_resp = MU::Cloud::AWS.ecs(region: @
|
|
1968
|
+
svc_resp = MU::Cloud::AWS.ecs(region: @region, credentials: @credentials).list_services(
|
|
1949
1969
|
cluster: arn
|
|
1950
1970
|
)
|
|
1951
1971
|
}
|
|
@@ -1985,14 +2005,14 @@ start = Time.now
|
|
|
1985
2005
|
if !existing_svcs.include?(service_name)
|
|
1986
2006
|
MU.log "Creating Service #{service_name}"
|
|
1987
2007
|
|
|
1988
|
-
MU::Cloud::AWS.ecs(region: @
|
|
2008
|
+
MU::Cloud::AWS.ecs(region: @region, credentials: @credentials).create_service(service_params)
|
|
1989
2009
|
else
|
|
1990
2010
|
service_params[:service] = service_params[:service_name].dup
|
|
1991
2011
|
service_params.delete(:service_name)
|
|
1992
2012
|
service_params.delete(:launch_type)
|
|
1993
2013
|
MU.log "Updating Service #{service_name}", MU::NOTICE, details: service_params
|
|
1994
2014
|
|
|
1995
|
-
MU::Cloud::AWS.ecs(region: @
|
|
2015
|
+
MU::Cloud::AWS.ecs(region: @region, credentials: @credentials).update_service(service_params)
|
|
1996
2016
|
end
|
|
1997
2017
|
end
|
|
1998
2018
|
|