cloud-mu 3.1.5 → 3.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Dockerfile +5 -1
- data/ansible/roles/mu-windows/files/LaunchConfig.json +9 -0
- data/ansible/roles/mu-windows/files/config.xml +76 -0
- data/ansible/roles/mu-windows/tasks/main.yml +16 -0
- data/bin/mu-adopt +16 -12
- data/bin/mu-azure-tests +57 -0
- data/bin/mu-cleanup +2 -4
- data/bin/mu-configure +52 -0
- data/bin/mu-deploy +3 -3
- data/bin/mu-findstray-tests +25 -0
- data/bin/mu-gen-docs +2 -4
- data/bin/mu-load-config.rb +2 -1
- data/bin/mu-node-manage +15 -16
- data/bin/mu-run-tests +37 -12
- data/cloud-mu.gemspec +3 -3
- data/cookbooks/mu-activedirectory/resources/domain.rb +4 -4
- data/cookbooks/mu-activedirectory/resources/domain_controller.rb +4 -4
- data/cookbooks/mu-tools/libraries/helper.rb +1 -1
- data/cookbooks/mu-tools/recipes/apply_security.rb +14 -14
- data/cookbooks/mu-tools/recipes/aws_api.rb +9 -0
- data/cookbooks/mu-tools/recipes/eks.rb +2 -2
- data/cookbooks/mu-tools/recipes/windows-client.rb +25 -22
- data/extras/clean-stock-amis +25 -19
- data/extras/generate-stock-images +1 -0
- data/extras/image-generators/AWS/win2k12.yaml +2 -0
- data/extras/image-generators/AWS/win2k16.yaml +2 -0
- data/extras/image-generators/AWS/win2k19.yaml +2 -0
- data/modules/mommacat.ru +1 -1
- data/modules/mu.rb +86 -98
- data/modules/mu/adoption.rb +373 -58
- data/modules/mu/cleanup.rb +214 -303
- data/modules/mu/cloud.rb +128 -1733
- data/modules/mu/cloud/database.rb +49 -0
- data/modules/mu/cloud/dnszone.rb +44 -0
- data/modules/mu/cloud/machine_images.rb +212 -0
- data/modules/mu/cloud/providers.rb +81 -0
- data/modules/mu/cloud/resource_base.rb +929 -0
- data/modules/mu/cloud/server.rb +40 -0
- data/modules/mu/cloud/server_pool.rb +1 -0
- data/modules/mu/cloud/ssh_sessions.rb +228 -0
- data/modules/mu/cloud/winrm_sessions.rb +237 -0
- data/modules/mu/cloud/wrappers.rb +169 -0
- data/modules/mu/config.rb +123 -81
- data/modules/mu/config/alarm.rb +2 -6
- data/modules/mu/config/bucket.rb +32 -3
- data/modules/mu/config/cache_cluster.rb +2 -2
- data/modules/mu/config/cdn.rb +100 -0
- data/modules/mu/config/collection.rb +1 -1
- data/modules/mu/config/container_cluster.rb +7 -2
- data/modules/mu/config/database.rb +84 -105
- data/modules/mu/config/database.yml +1 -2
- data/modules/mu/config/dnszone.rb +5 -4
- data/modules/mu/config/doc_helpers.rb +5 -6
- data/modules/mu/config/endpoint.rb +2 -1
- data/modules/mu/config/firewall_rule.rb +3 -19
- data/modules/mu/config/folder.rb +1 -1
- data/modules/mu/config/function.rb +17 -8
- data/modules/mu/config/group.rb +1 -1
- data/modules/mu/config/habitat.rb +1 -1
- data/modules/mu/config/job.rb +89 -0
- data/modules/mu/config/loadbalancer.rb +57 -11
- data/modules/mu/config/log.rb +1 -1
- data/modules/mu/config/msg_queue.rb +1 -1
- data/modules/mu/config/nosqldb.rb +1 -1
- data/modules/mu/config/notifier.rb +8 -19
- data/modules/mu/config/ref.rb +92 -14
- data/modules/mu/config/role.rb +1 -1
- data/modules/mu/config/schema_helpers.rb +38 -37
- data/modules/mu/config/search_domain.rb +1 -1
- data/modules/mu/config/server.rb +12 -13
- data/modules/mu/config/server_pool.rb +3 -7
- data/modules/mu/config/storage_pool.rb +1 -1
- data/modules/mu/config/tail.rb +11 -0
- data/modules/mu/config/user.rb +1 -1
- data/modules/mu/config/vpc.rb +27 -23
- data/modules/mu/config/vpc.yml +0 -1
- data/modules/mu/defaults/AWS.yaml +90 -90
- data/modules/mu/defaults/Azure.yaml +1 -0
- data/modules/mu/defaults/Google.yaml +1 -0
- data/modules/mu/deploy.rb +34 -20
- data/modules/mu/groomer.rb +16 -1
- data/modules/mu/groomers/ansible.rb +69 -4
- data/modules/mu/groomers/chef.rb +51 -4
- data/modules/mu/logger.rb +120 -144
- data/modules/mu/master.rb +97 -4
- data/modules/mu/mommacat.rb +160 -874
- data/modules/mu/mommacat/daemon.rb +23 -14
- data/modules/mu/mommacat/naming.rb +110 -3
- data/modules/mu/mommacat/search.rb +497 -0
- data/modules/mu/mommacat/storage.rb +252 -194
- data/modules/mu/{clouds → providers}/README.md +1 -1
- data/modules/mu/{clouds → providers}/aws.rb +258 -57
- data/modules/mu/{clouds → providers}/aws/alarm.rb +3 -3
- data/modules/mu/{clouds → providers}/aws/bucket.rb +275 -41
- data/modules/mu/{clouds → providers}/aws/cache_cluster.rb +14 -50
- data/modules/mu/providers/aws/cdn.rb +782 -0
- data/modules/mu/{clouds → providers}/aws/collection.rb +5 -5
- data/modules/mu/{clouds → providers}/aws/container_cluster.rb +95 -84
- data/modules/mu/providers/aws/database.rb +1744 -0
- data/modules/mu/{clouds → providers}/aws/dnszone.rb +26 -12
- data/modules/mu/providers/aws/endpoint.rb +1072 -0
- data/modules/mu/{clouds → providers}/aws/firewall_rule.rb +39 -32
- data/modules/mu/{clouds → providers}/aws/folder.rb +1 -1
- data/modules/mu/{clouds → providers}/aws/function.rb +289 -134
- data/modules/mu/{clouds → providers}/aws/group.rb +18 -20
- data/modules/mu/{clouds → providers}/aws/habitat.rb +3 -3
- data/modules/mu/providers/aws/job.rb +466 -0
- data/modules/mu/{clouds → providers}/aws/loadbalancer.rb +77 -47
- data/modules/mu/{clouds → providers}/aws/log.rb +5 -5
- data/modules/mu/{clouds → providers}/aws/msg_queue.rb +14 -11
- data/modules/mu/{clouds → providers}/aws/nosqldb.rb +96 -5
- data/modules/mu/{clouds → providers}/aws/notifier.rb +135 -63
- data/modules/mu/{clouds → providers}/aws/role.rb +76 -48
- data/modules/mu/{clouds → providers}/aws/search_domain.rb +172 -41
- data/modules/mu/{clouds → providers}/aws/server.rb +66 -98
- data/modules/mu/{clouds → providers}/aws/server_pool.rb +42 -60
- data/modules/mu/{clouds → providers}/aws/storage_pool.rb +21 -38
- data/modules/mu/{clouds → providers}/aws/user.rb +12 -16
- data/modules/mu/{clouds → providers}/aws/userdata/README.md +0 -0
- data/modules/mu/{clouds → providers}/aws/userdata/linux.erb +5 -4
- data/modules/mu/{clouds → providers}/aws/userdata/windows.erb +0 -0
- data/modules/mu/{clouds → providers}/aws/vpc.rb +143 -74
- data/modules/mu/{clouds → providers}/aws/vpc_subnet.rb +0 -0
- data/modules/mu/{clouds → providers}/azure.rb +13 -0
- data/modules/mu/{clouds → providers}/azure/container_cluster.rb +1 -5
- data/modules/mu/{clouds → providers}/azure/firewall_rule.rb +8 -1
- data/modules/mu/{clouds → providers}/azure/habitat.rb +0 -0
- data/modules/mu/{clouds → providers}/azure/loadbalancer.rb +0 -0
- data/modules/mu/{clouds → providers}/azure/role.rb +0 -0
- data/modules/mu/{clouds → providers}/azure/server.rb +32 -24
- data/modules/mu/{clouds → providers}/azure/user.rb +1 -1
- data/modules/mu/{clouds → providers}/azure/userdata/README.md +0 -0
- data/modules/mu/{clouds → providers}/azure/userdata/linux.erb +0 -0
- data/modules/mu/{clouds → providers}/azure/userdata/windows.erb +0 -0
- data/modules/mu/{clouds → providers}/azure/vpc.rb +4 -6
- data/modules/mu/{clouds → providers}/cloudformation.rb +10 -0
- data/modules/mu/{clouds → providers}/cloudformation/alarm.rb +3 -3
- data/modules/mu/{clouds → providers}/cloudformation/cache_cluster.rb +3 -3
- data/modules/mu/{clouds → providers}/cloudformation/collection.rb +3 -3
- data/modules/mu/{clouds → providers}/cloudformation/database.rb +6 -17
- data/modules/mu/{clouds → providers}/cloudformation/dnszone.rb +3 -3
- data/modules/mu/{clouds → providers}/cloudformation/firewall_rule.rb +3 -3
- data/modules/mu/{clouds → providers}/cloudformation/loadbalancer.rb +3 -3
- data/modules/mu/{clouds → providers}/cloudformation/log.rb +3 -3
- data/modules/mu/{clouds → providers}/cloudformation/server.rb +7 -7
- data/modules/mu/{clouds → providers}/cloudformation/server_pool.rb +5 -5
- data/modules/mu/{clouds → providers}/cloudformation/vpc.rb +3 -3
- data/modules/mu/{clouds → providers}/docker.rb +0 -0
- data/modules/mu/{clouds → providers}/google.rb +29 -6
- data/modules/mu/{clouds → providers}/google/bucket.rb +4 -4
- data/modules/mu/{clouds → providers}/google/container_cluster.rb +38 -20
- data/modules/mu/{clouds → providers}/google/database.rb +5 -12
- data/modules/mu/{clouds → providers}/google/firewall_rule.rb +5 -5
- data/modules/mu/{clouds → providers}/google/folder.rb +5 -9
- data/modules/mu/{clouds → providers}/google/function.rb +6 -6
- data/modules/mu/{clouds → providers}/google/group.rb +9 -17
- data/modules/mu/{clouds → providers}/google/habitat.rb +4 -8
- data/modules/mu/{clouds → providers}/google/loadbalancer.rb +5 -5
- data/modules/mu/{clouds → providers}/google/role.rb +50 -31
- data/modules/mu/{clouds → providers}/google/server.rb +41 -24
- data/modules/mu/{clouds → providers}/google/server_pool.rb +14 -14
- data/modules/mu/{clouds → providers}/google/user.rb +34 -24
- data/modules/mu/{clouds → providers}/google/userdata/README.md +0 -0
- data/modules/mu/{clouds → providers}/google/userdata/linux.erb +0 -0
- data/modules/mu/{clouds → providers}/google/userdata/windows.erb +0 -0
- data/modules/mu/{clouds → providers}/google/vpc.rb +45 -14
- data/modules/tests/aws-jobs-functions.yaml +46 -0
- data/modules/tests/centos6.yaml +15 -0
- data/modules/tests/centos7.yaml +15 -0
- data/modules/tests/centos8.yaml +12 -0
- data/modules/tests/ecs.yaml +2 -2
- data/modules/tests/eks.yaml +1 -1
- data/modules/tests/functions/node-function/lambda_function.js +10 -0
- data/modules/tests/functions/python-function/lambda_function.py +12 -0
- data/modules/tests/microservice_app.yaml +288 -0
- data/modules/tests/rds.yaml +108 -0
- data/modules/tests/regrooms/rds.yaml +123 -0
- data/modules/tests/server-with-scrub-muisms.yaml +1 -1
- data/modules/tests/super_complex_bok.yml +2 -2
- data/modules/tests/super_simple_bok.yml +3 -5
- data/spec/mu/clouds/azure_spec.rb +2 -2
- metadata +122 -92
- data/modules/mu/clouds/aws/database.rb +0 -1974
- data/modules/mu/clouds/aws/endpoint.rb +0 -596
|
@@ -152,7 +152,7 @@ module MU
|
|
|
152
152
|
instance_name = MU.deploy_id+"-"+@config['name']+"-"+resource.logical_resource_id
|
|
153
153
|
MU::Cloud::AWS.createTag(resource.physical_resource_id, "Name", instance_name, credentials: @config['credentials'])
|
|
154
154
|
|
|
155
|
-
instance = MU::Cloud
|
|
155
|
+
instance = MU::Cloud.resourceClass("AWS", "Server").notifyDeploy(
|
|
156
156
|
@config['name']+"-"+resource.logical_resource_id,
|
|
157
157
|
resource.physical_resource_id
|
|
158
158
|
)
|
|
@@ -170,7 +170,7 @@ module MU
|
|
|
170
170
|
|
|
171
171
|
mu_zone, _junk = MU::Cloud::DNSZone.find(name: "mu")
|
|
172
172
|
if !mu_zone.nil?
|
|
173
|
-
MU::Cloud
|
|
173
|
+
MU::Cloud.resourceClass("AWS", "DNSZone").genericMuDNSEntry(instance_name, instance["private_ip_address"], MU::Cloud::Server)
|
|
174
174
|
else
|
|
175
175
|
MU::Master.addInstanceToEtcHosts(instance["public_ip_address"], instance_name)
|
|
176
176
|
end
|
|
@@ -178,7 +178,7 @@ module MU
|
|
|
178
178
|
when "AWS::EC2::SecurityGroup"
|
|
179
179
|
MU::Cloud::AWS.createStandardTags(resource.physical_resource_id)
|
|
180
180
|
MU::Cloud::AWS.createTag(resource.physical_resource_id, "Name", MU.deploy_id+"-"+@config['name']+'-'+resource.logical_resource_id, credentials: @config['credentials'])
|
|
181
|
-
MU::Cloud
|
|
181
|
+
MU::Cloud.resourceClass("AWS", "FirewallRule").notifyDeploy(
|
|
182
182
|
@config['name']+"-"+resource.logical_resource_id,
|
|
183
183
|
resource.physical_resource_id
|
|
184
184
|
)
|
|
@@ -242,7 +242,7 @@ module MU
|
|
|
242
242
|
# @param region [String]: The cloud provider region
|
|
243
243
|
# @param wait [Boolean]: Block on the removal of this stack; AWS deletion will continue in the background otherwise if false.
|
|
244
244
|
# @return [void]
|
|
245
|
-
def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, wait: false, credentials: nil, flags: {})
|
|
245
|
+
def self.cleanup(noop: false, deploy_id: MU.deploy_id, ignoremaster: false, region: MU.curRegion, wait: false, credentials: nil, flags: {})
|
|
246
246
|
MU.log "AWS::Collection.cleanup: need to support flags['known']", MU::DEBUG, details: flags
|
|
247
247
|
MU.log "Placeholder: AWS Collection artifacts do not support tags, so ignoremaster cleanup flag has no effect", MU::DEBUG, details: ignoremaster
|
|
248
248
|
|
|
@@ -251,7 +251,7 @@ module MU
|
|
|
251
251
|
resp.stacks.each { |stack|
|
|
252
252
|
ok = false
|
|
253
253
|
stack.tags.each { |tag|
|
|
254
|
-
ok = true if (tag.key == "MU-ID") and tag.value ==
|
|
254
|
+
ok = true if (tag.key == "MU-ID") and tag.value == deploy_id
|
|
255
255
|
}
|
|
256
256
|
if ok
|
|
257
257
|
MU.log "Deleting CloudFormation stack #{stack.stack_name})"
|
|
@@ -67,16 +67,15 @@ module MU
|
|
|
67
67
|
# soul-crushing, yet effective
|
|
68
68
|
if e.message.match(/because (#{Regexp.quote(@config['region'])}[a-z]), the targeted availability zone, does not currently have sufficient capacity/)
|
|
69
69
|
bad_az = Regexp.last_match(1)
|
|
70
|
-
deletia =
|
|
70
|
+
deletia = []
|
|
71
71
|
mySubnets.each { |subnet|
|
|
72
|
-
if subnet.az == bad_az
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
72
|
+
deletia << subnet.cloud_id if subnet.az == bad_az
|
|
73
|
+
}
|
|
74
|
+
raise e if deletia.empty?
|
|
75
|
+
MU.log "#{bad_az} does not have EKS capacity. Dropping unsupported subnets from ContainerCluster '#{@config['name']}' and retrying.", MU::NOTICE, details: deletia
|
|
76
|
+
deletia.each { |subnet|
|
|
77
|
+
params[:resources_vpc_config][:subnet_ids].delete(subnet)
|
|
76
78
|
}
|
|
77
|
-
raise e if deletia.nil?
|
|
78
|
-
MU.log "#{bad_az} does not have EKS capacity. Dropping #{deletia} from ContainerCluster '#{@config['name']}' and retrying.", MU::NOTICE
|
|
79
|
-
params[:resources_vpc_config][:subnet_ids].delete(deletia)
|
|
80
79
|
end
|
|
81
80
|
}
|
|
82
81
|
|
|
@@ -288,6 +287,7 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
288
287
|
# @return [OpenStruct]
|
|
289
288
|
def cloud_desc(use_cache: true)
|
|
290
289
|
return @cloud_desc_cache if @cloud_desc_cache and use_cache
|
|
290
|
+
return nil if !@cloud_id
|
|
291
291
|
@cloud_desc_cache = if @config['flavor'] == "EKS" or
|
|
292
292
|
(@config['flavor'] == "Fargate" and !@config['containers'])
|
|
293
293
|
resp = MU::Cloud::AWS.eks(region: @config['region'], credentials: @config['credentials']).describe_cluster(
|
|
@@ -327,7 +327,7 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
327
327
|
end
|
|
328
328
|
|
|
329
329
|
@@eks_versions = {}
|
|
330
|
-
@@
|
|
330
|
+
@@eks_version_semaphores = {}
|
|
331
331
|
# Use the AWS SSM API to fetch the current version of the Amazon Linux
|
|
332
332
|
# ECS-optimized AMI, so we can use it as a default AMI for ECS deploys.
|
|
333
333
|
# @param flavor [String]: ECS or EKS
|
|
@@ -340,24 +340,22 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
340
340
|
names: ["/aws/service/#{flavor.downcase}/optimized-ami/amazon-linux/recommended"]
|
|
341
341
|
)
|
|
342
342
|
else
|
|
343
|
-
@@
|
|
343
|
+
@@eks_version_semaphores[region] ||= Mutex.new
|
|
344
|
+
|
|
345
|
+
@@eks_version_semaphores[region].synchronize {
|
|
344
346
|
if !@@eks_versions[region]
|
|
345
347
|
@@eks_versions[region] ||= []
|
|
346
348
|
versions = {}
|
|
347
|
-
resp =
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
)
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
versions[Regexp.last_match[1]] = true
|
|
358
|
-
}
|
|
359
|
-
next_token = resp.next_token
|
|
360
|
-
end while !next_token.nil?
|
|
349
|
+
resp = MU::Cloud::AWS.ssm(region: region).get_parameters_by_path(
|
|
350
|
+
path: "/aws/service/#{flavor.downcase}/optimized-ami",
|
|
351
|
+
recursive: true,
|
|
352
|
+
max_results: 10 # as high as it goes, ugh
|
|
353
|
+
)
|
|
354
|
+
|
|
355
|
+
resp.parameters.each { |p|
|
|
356
|
+
p.name.match(/\/aws\/service\/eks\/optimized-ami\/([^\/]+?)\//)
|
|
357
|
+
versions[Regexp.last_match[1]] = true
|
|
358
|
+
}
|
|
361
359
|
@@eks_versions[region] = versions.keys.sort { |a, b| MU.version_sort(a, b) }
|
|
362
360
|
end
|
|
363
361
|
}
|
|
@@ -377,15 +375,31 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
377
375
|
nil
|
|
378
376
|
end
|
|
379
377
|
|
|
378
|
+
@@supported_eks_region_cache = []
|
|
379
|
+
@@eks_region_semaphore = Mutex.new
|
|
380
|
+
|
|
380
381
|
# Return the list of regions where we know EKS is supported.
|
|
381
382
|
def self.EKSRegions(credentials = nil)
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
383
|
+
@@eks_region_semaphore.synchronize {
|
|
384
|
+
if @@supported_eks_region_cache and !@@supported_eks_region_cache.empty?
|
|
385
|
+
return @@supported_eks_region_cache
|
|
386
|
+
end
|
|
387
|
+
start = Time.now
|
|
388
|
+
# the SSM API is painfully slow for large result sets, so thread
|
|
389
|
+
# these and do them in parallel
|
|
390
|
+
@@supported_eks_region_cache = []
|
|
391
|
+
region_threads = []
|
|
392
|
+
MU::Cloud::AWS.listRegions(credentials: credentials).each { |region|
|
|
393
|
+
region_threads << Thread.new(region) { |r|
|
|
394
|
+
r_start = Time.now
|
|
395
|
+
ami = getStandardImage("EKS", r)
|
|
396
|
+
@@supported_eks_region_cache << r if ami
|
|
397
|
+
}
|
|
398
|
+
}
|
|
399
|
+
region_threads.each { |t| t.join }
|
|
387
400
|
|
|
388
|
-
|
|
401
|
+
@@supported_eks_region_cache
|
|
402
|
+
}
|
|
389
403
|
end
|
|
390
404
|
|
|
391
405
|
# Does this resource type exist as a global (cloud-wide) artifact, or
|
|
@@ -406,30 +420,32 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
406
420
|
# @param ignoremaster [Boolean]: If true, will remove resources not flagged as originating from this Mu server
|
|
407
421
|
# @param region [String]: The cloud provider region
|
|
408
422
|
# @return [void]
|
|
409
|
-
def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {})
|
|
423
|
+
def self.cleanup(noop: false, deploy_id: MU.deploy_id, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {})
|
|
410
424
|
MU.log "AWS::ContainerCluster.cleanup: need to support flags['known']", MU::DEBUG, details: flags
|
|
411
425
|
MU.log "Placeholder: AWS ContainerCluster artifacts do not support tags, so ignoremaster cleanup flag has no effect", MU::DEBUG, details: ignoremaster
|
|
412
426
|
|
|
413
|
-
purge_ecs_clusters(noop: noop, region: region, credentials: credentials)
|
|
427
|
+
purge_ecs_clusters(noop: noop, region: region, credentials: credentials, deploy_id: deploy_id)
|
|
414
428
|
|
|
415
|
-
purge_eks_clusters(noop: noop, region: region, credentials: credentials)
|
|
429
|
+
purge_eks_clusters(noop: noop, region: region, credentials: credentials, deploy_id: deploy_id)
|
|
416
430
|
|
|
417
431
|
end
|
|
418
432
|
|
|
419
|
-
def self.purge_eks_clusters(noop: false, region: MU.curRegion, credentials: nil)
|
|
420
|
-
return if !MU::Cloud::AWS::ContainerCluster.EKSRegions.include?(region)
|
|
433
|
+
def self.purge_eks_clusters(noop: false, region: MU.curRegion, credentials: nil, deploy_id: MU.deploy_id)
|
|
421
434
|
resp = begin
|
|
422
435
|
MU::Cloud::AWS.eks(credentials: credentials, region: region).list_clusters
|
|
423
436
|
rescue Aws::EKS::Errors::AccessDeniedException
|
|
424
437
|
# EKS isn't actually live in this region, even though SSM lists
|
|
425
438
|
# base images for it
|
|
439
|
+
if @@supported_eks_region_cache
|
|
440
|
+
@@supported_eks_region_cache.delete(region)
|
|
441
|
+
end
|
|
426
442
|
return
|
|
427
443
|
end
|
|
428
444
|
|
|
429
445
|
return if !resp or !resp.clusters
|
|
430
446
|
|
|
431
447
|
resp.clusters.each { |cluster|
|
|
432
|
-
if cluster.match(/^#{
|
|
448
|
+
if cluster.match(/^#{deploy_id}-/)
|
|
433
449
|
|
|
434
450
|
desc = MU::Cloud::AWS.eks(credentials: credentials, region: region).describe_cluster(
|
|
435
451
|
name: cluster
|
|
@@ -467,19 +483,20 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
467
483
|
MU.log "Waiting for EKS cluster #{cluster} to finish deleting (status #{status})", MU::NOTICE
|
|
468
484
|
end
|
|
469
485
|
}
|
|
470
|
-
# MU::Cloud
|
|
486
|
+
# MU::Cloud.resourceClass("AWS", "Server").removeIAMProfile(cluster)
|
|
471
487
|
end
|
|
472
488
|
}
|
|
473
489
|
end
|
|
474
490
|
private_class_method :purge_eks_clusters
|
|
475
491
|
|
|
476
|
-
def self.purge_ecs_clusters(noop: false, region: MU.curRegion, credentials: nil)
|
|
492
|
+
def self.purge_ecs_clusters(noop: false, region: MU.curRegion, credentials: nil, deploy_id: MU.deploy_id)
|
|
493
|
+
start = Time.now
|
|
477
494
|
resp = MU::Cloud::AWS.ecs(credentials: credentials, region: region).list_clusters
|
|
478
495
|
|
|
479
496
|
return if !resp or !resp.cluster_arns or resp.cluster_arns.empty?
|
|
480
497
|
|
|
481
498
|
resp.cluster_arns.each { |arn|
|
|
482
|
-
if arn.match(/:cluster\/(#{
|
|
499
|
+
if arn.match(/:cluster\/(#{deploy_id}[^:]+)$/)
|
|
483
500
|
cluster = Regexp.last_match[1]
|
|
484
501
|
|
|
485
502
|
svc_resp = MU::Cloud::AWS.ecs(region: region, credentials: credentials).list_services(
|
|
@@ -525,7 +542,7 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
525
542
|
}
|
|
526
543
|
|
|
527
544
|
tasks = MU::Cloud::AWS.ecs(region: region, credentials: credentials).list_task_definitions(
|
|
528
|
-
family_prefix:
|
|
545
|
+
family_prefix: deploy_id
|
|
529
546
|
)
|
|
530
547
|
|
|
531
548
|
if tasks and tasks.task_definition_arns
|
|
@@ -1215,18 +1232,18 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
1215
1232
|
# @return [Boolean]: True if validation succeeded, False otherwise
|
|
1216
1233
|
def self.validateConfig(cluster, configurator)
|
|
1217
1234
|
ok = true
|
|
1218
|
-
|
|
1219
|
-
cluster['size'] = MU::Cloud
|
|
1235
|
+
start = Time.now
|
|
1236
|
+
cluster['size'] = MU::Cloud.resourceClass("AWS", "Server").validateInstanceType(cluster["instance_type"], cluster["region"])
|
|
1220
1237
|
ok = false if cluster['size'].nil?
|
|
1221
1238
|
|
|
1222
1239
|
cluster["flavor"] = "EKS" if cluster["flavor"].match(/^Kubernetes$/i)
|
|
1223
1240
|
|
|
1224
|
-
if cluster["flavor"] == "ECS" and cluster["kubernetes"] and !MU::Cloud::AWS.isGovCloud?(cluster["region"]) and !cluster["containers"] and MU::Cloud::AWS::ContainerCluster.EKSRegions.include?(cluster['region'])
|
|
1241
|
+
if cluster["flavor"] == "ECS" and cluster["kubernetes"] and !MU::Cloud::AWS.isGovCloud?(cluster["region"]) and !cluster["containers"] and MU::Cloud::AWS::ContainerCluster.EKSRegions(cluster['credentials']).include?(cluster['region'])
|
|
1225
1242
|
cluster["flavor"] = "EKS"
|
|
1226
1243
|
MU.log "Setting flavor of ContainerCluster '#{cluster['name']}' to EKS ('kubernetes' stanza was specified)", MU::NOTICE
|
|
1227
1244
|
end
|
|
1228
1245
|
|
|
1229
|
-
if cluster["flavor"] == "EKS" and !MU::Cloud::AWS::ContainerCluster.EKSRegions.include?(cluster['region'])
|
|
1246
|
+
if cluster["flavor"] == "EKS" and !MU::Cloud::AWS::ContainerCluster.EKSRegions(cluster['credentials']).include?(cluster['region'])
|
|
1230
1247
|
MU.log "EKS is only available in some regions", MU::ERR, details: MU::Cloud::AWS::ContainerCluster.EKSRegions
|
|
1231
1248
|
ok = false
|
|
1232
1249
|
end
|
|
@@ -1296,7 +1313,7 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
1296
1313
|
end
|
|
1297
1314
|
|
|
1298
1315
|
if !created_generic_loggroup
|
|
1299
|
-
cluster
|
|
1316
|
+
MU::Config.addDependency(cluster, logname, "log")
|
|
1300
1317
|
logdesc = {
|
|
1301
1318
|
"name" => logname,
|
|
1302
1319
|
"region" => cluster["region"],
|
|
@@ -1335,10 +1352,7 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
1335
1352
|
}
|
|
1336
1353
|
configurator.insertKitten(roledesc, "roles")
|
|
1337
1354
|
|
|
1338
|
-
cluster
|
|
1339
|
-
"type" => "role",
|
|
1340
|
-
"name" => rolename
|
|
1341
|
-
}
|
|
1355
|
+
MU::Config.addDependency(cluster, rolename, "role")
|
|
1342
1356
|
end
|
|
1343
1357
|
|
|
1344
1358
|
created_generic_loggroup = true
|
|
@@ -1367,11 +1381,10 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
1367
1381
|
role["tags"] = cluster["tags"] if !cluster["tags"].nil?
|
|
1368
1382
|
role["optional_tags"] = cluster["optional_tags"] if !cluster["optional_tags"].nil?
|
|
1369
1383
|
configurator.insertKitten(role, "roles")
|
|
1370
|
-
cluster[
|
|
1371
|
-
|
|
1372
|
-
"
|
|
1373
|
-
|
|
1374
|
-
}
|
|
1384
|
+
MU::Config.addDependency(cluster, cluster["name"]+"pods", "role", phase: "groom")
|
|
1385
|
+
if !MU::Master.kubectl
|
|
1386
|
+
MU.log "Since I can't find a kubectl executable, you will have to handle all service account, user, and role bindings manually!", MU::WARN
|
|
1387
|
+
end
|
|
1375
1388
|
end
|
|
1376
1389
|
|
|
1377
1390
|
if MU::Cloud::AWS.isGovCloud?(cluster["region"]) and cluster["flavor"] == "EKS"
|
|
@@ -1470,17 +1483,17 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
1470
1483
|
end
|
|
1471
1484
|
|
|
1472
1485
|
if cluster["flavor"] == "EKS"
|
|
1486
|
+
|
|
1487
|
+
if !MU::Master.kubectl
|
|
1488
|
+
MU.log "Without a kubectl executable, I cannot bind IAM roles to EKS worker nodes", MU::ERR
|
|
1489
|
+
ok = false
|
|
1490
|
+
end
|
|
1473
1491
|
worker_pool["canned_iam_policies"] = [
|
|
1474
1492
|
"AmazonEKSWorkerNodePolicy",
|
|
1475
1493
|
"AmazonEKS_CNI_Policy",
|
|
1476
1494
|
"AmazonEC2ContainerRegistryReadOnly"
|
|
1477
1495
|
]
|
|
1478
|
-
worker_pool["
|
|
1479
|
-
{
|
|
1480
|
-
"type" => "container_cluster",
|
|
1481
|
-
"name" => cluster['name']
|
|
1482
|
-
}
|
|
1483
|
-
]
|
|
1496
|
+
MU::Config.addDependency(worker_pool, cluster["name"], "container_cluster")
|
|
1484
1497
|
worker_pool["run_list"] = ["recipe[mu-tools::eks]"]
|
|
1485
1498
|
worker_pool["run_list"].concat(cluster["run_list"]) if cluster["run_list"]
|
|
1486
1499
|
MU::Config::Server.common_properties.keys.each { |k|
|
|
@@ -1488,16 +1501,14 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
1488
1501
|
worker_pool[k] = cluster[k]
|
|
1489
1502
|
end
|
|
1490
1503
|
}
|
|
1491
|
-
|
|
1504
|
+
else
|
|
1505
|
+
worker_pool["groom"] = false # don't meddle with ECS workers unnecessarily
|
|
1492
1506
|
end
|
|
1493
1507
|
|
|
1494
1508
|
configurator.insertKitten(worker_pool, "server_pools")
|
|
1495
1509
|
|
|
1496
1510
|
if cluster["flavor"] == "ECS"
|
|
1497
|
-
cluster["
|
|
1498
|
-
"name" => cluster["name"]+"workers",
|
|
1499
|
-
"type" => "server_pool",
|
|
1500
|
-
}
|
|
1511
|
+
MU::Config.addDependency(cluster, cluster["name"]+"workers", "server_pool")
|
|
1501
1512
|
end
|
|
1502
1513
|
|
|
1503
1514
|
end
|
|
@@ -1519,11 +1530,7 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
1519
1530
|
role["tags"] = cluster["tags"] if !cluster["tags"].nil?
|
|
1520
1531
|
role["optional_tags"] = cluster["optional_tags"] if !cluster["optional_tags"].nil?
|
|
1521
1532
|
configurator.insertKitten(role, "roles")
|
|
1522
|
-
cluster[
|
|
1523
|
-
"type" => "role",
|
|
1524
|
-
"name" => cluster["name"]+"controlplane",
|
|
1525
|
-
"phase" => "groom"
|
|
1526
|
-
}
|
|
1533
|
+
MU::Config.addDependency(cluster, cluster["name"]+"controlplane", "role", phase: "groom")
|
|
1527
1534
|
end
|
|
1528
1535
|
|
|
1529
1536
|
ok
|
|
@@ -1602,19 +1609,21 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
1602
1609
|
raise MuError, "Failed to apply #{authmap_cmd}" if $?.exitstatus != 0
|
|
1603
1610
|
end
|
|
1604
1611
|
|
|
1605
|
-
|
|
1606
|
-
|
|
1607
|
-
|
|
1608
|
-
|
|
1609
|
-
|
|
1610
|
-
|
|
1611
|
-
|
|
1612
|
-
|
|
1613
|
-
|
|
1614
|
-
|
|
1615
|
-
|
|
1616
|
-
|
|
1617
|
-
|
|
1612
|
+
if MU::Master.kubectl
|
|
1613
|
+
admin_user_cmd = %Q{#{MU::Master.kubectl} --kubeconfig "#{kube_conf}" apply -f "#{MU.myRoot}/extras/admin-user.yaml"}
|
|
1614
|
+
admin_role_cmd = %Q{#{MU::Master.kubectl} --kubeconfig "#{kube_conf}" apply -f "#{MU.myRoot}/extras/admin-role-binding.yaml"}
|
|
1615
|
+
MU.log "Configuring Kubernetes admin-user and role", MU::NOTICE, details: admin_user_cmd+"\n"+admin_role_cmd
|
|
1616
|
+
%x{#{admin_user_cmd}}
|
|
1617
|
+
%x{#{admin_role_cmd}}
|
|
1618
|
+
|
|
1619
|
+
if @config['kubernetes_resources']
|
|
1620
|
+
MU::Master.applyKubernetesResources(
|
|
1621
|
+
@config['name'],
|
|
1622
|
+
@config['kubernetes_resources'],
|
|
1623
|
+
kubeconfig: kube_conf,
|
|
1624
|
+
outputdir: @deploy.deploy_dir
|
|
1625
|
+
)
|
|
1626
|
+
end
|
|
1618
1627
|
end
|
|
1619
1628
|
|
|
1620
1629
|
MU.log %Q{How to interact with your EKS cluster\nkubectl --kubeconfig "#{kube_conf}" get all\nkubectl --kubeconfig "#{kube_conf}" create -f some_k8s_deploy.yml\nkubectl --kubeconfig "#{kube_conf}" get nodes}, MU::SUMMARY
|
|
@@ -1740,7 +1749,7 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
1740
1749
|
@deploy.findLitterMate(type: "server_pools", name: @config["name"]+"workers")
|
|
1741
1750
|
end
|
|
1742
1751
|
serverpool.listNodes.each { |mynode|
|
|
1743
|
-
resources = resource_lookup[
|
|
1752
|
+
resources = resource_lookup[mynode.cloud_desc.instance_type]
|
|
1744
1753
|
threads << Thread.new(mynode) { |node|
|
|
1745
1754
|
ident_doc = nil
|
|
1746
1755
|
ident_doc_sig = nil
|
|
@@ -1941,6 +1950,8 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
1941
1950
|
task_params[:network_mode] = "awsvpc"
|
|
1942
1951
|
task_params[:cpu] = cpu_total.to_i.to_s
|
|
1943
1952
|
task_params[:memory] = mem_total.to_i.to_s
|
|
1953
|
+
elsif @config['vpc']
|
|
1954
|
+
task_params[:network_mode] = "awsvpc"
|
|
1944
1955
|
end
|
|
1945
1956
|
|
|
1946
1957
|
MU.log "Registering task definition #{service_name} with #{container_definitions.size.to_s} containers"
|
|
@@ -0,0 +1,1744 @@
|
|
|
1
|
+
## Copyright:: Copyright (c) 2014 eGlobalTech, Inc., all rights reserved
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the BSD-3 license (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License in the root of the project or at
|
|
6
|
+
#
|
|
7
|
+
# http://egt-labs.com/mu/LICENSE.html
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
autoload :Net, 'net/ssh/gateway'
|
|
16
|
+
|
|
17
|
+
module MU
|
|
18
|
+
class Cloud
|
|
19
|
+
class AWS
|
|
20
|
+
# A database as configured in {MU::Config::BasketofKittens::databases}
|
|
21
|
+
class Database < MU::Cloud::Database
|
|
22
|
+
|
|
23
|
+
# Map legal storage values for each disk type and database engine so
|
|
24
|
+
# our validator can check them for us.
|
|
25
|
+
STORAGE_RANGES = {
|
|
26
|
+
"io1" => {
|
|
27
|
+
"postgres" => 100..65536,
|
|
28
|
+
"mysql" => 100..65536,
|
|
29
|
+
"mariadb" => 100..65536,
|
|
30
|
+
"oracle-se1" => 100..65536,
|
|
31
|
+
"oracle-se2" => 100..65536,
|
|
32
|
+
"oracle-se" => 100..65536,
|
|
33
|
+
"oracle-ee" => 100..65536,
|
|
34
|
+
"sqlserver-ex" => 100..16384,
|
|
35
|
+
"sqlserver-web" => 100..16384,
|
|
36
|
+
"sqlserver-ee" => 200..16384,
|
|
37
|
+
"sqlserver-se" => 200..16384
|
|
38
|
+
},
|
|
39
|
+
"gp2" => {
|
|
40
|
+
"postgres" => 20..65536,
|
|
41
|
+
"mysql" => 20..65536,
|
|
42
|
+
"mariadb" => 20..65536,
|
|
43
|
+
"oracle-se1" => 20..65536,
|
|
44
|
+
"oracle-se2" => 20..65536,
|
|
45
|
+
"oracle-se" => 20..65536,
|
|
46
|
+
"oracle-ee" => 20..65536,
|
|
47
|
+
"sqlserver-ex" => 20..16384,
|
|
48
|
+
"sqlserver-web" => 20..16384,
|
|
49
|
+
"sqlserver-ee" => 200..16384,
|
|
50
|
+
"sqlserver-se" => 200..16384
|
|
51
|
+
},
|
|
52
|
+
"standard" => {
|
|
53
|
+
"postgres" => 5..3072,
|
|
54
|
+
"mysql" => 5..3072,
|
|
55
|
+
"mariadb" => 5..3072,
|
|
56
|
+
"oracle-se1" => 10..3072,
|
|
57
|
+
"oracle-se2" => 10..3072,
|
|
58
|
+
"oracle-se" => 10..3072,
|
|
59
|
+
"oracle-ee" => 10..3072,
|
|
60
|
+
"sqlserver-ex" => 20..1024, # ???
|
|
61
|
+
"sqlserver-web" => 20..1024, # ???
|
|
62
|
+
"sqlserver-ee" => 200..4096, # ???
|
|
63
|
+
"sqlserver-se" => 200..4096 # ???
|
|
64
|
+
}
|
|
65
|
+
}.freeze
|
|
66
|
+
|
|
67
|
+
# List of parameters that are legal to set in +modify_db_instance+ and +modify_db_cluster+
|
|
68
|
+
MODIFIABLE = {
|
|
69
|
+
"instance" => [
|
|
70
|
+
:allocated_storage,
|
|
71
|
+
:db_instance_class,
|
|
72
|
+
:db_subnet_group_name,
|
|
73
|
+
:db_security_groups,
|
|
74
|
+
:vpc_security_group_ids,
|
|
75
|
+
:master_user_password,
|
|
76
|
+
:db_parameter_group_name,
|
|
77
|
+
:backup_retention_period,
|
|
78
|
+
:preferred_backup_window,
|
|
79
|
+
:preferred_maintenance_window,
|
|
80
|
+
:multi_az,
|
|
81
|
+
:engine_version,
|
|
82
|
+
:allow_major_version_upgrade,
|
|
83
|
+
:auto_minor_version_upgrade,
|
|
84
|
+
:license_model,
|
|
85
|
+
:iops,
|
|
86
|
+
:option_group_name,
|
|
87
|
+
:new_db_instance_identifier,
|
|
88
|
+
:storage_type,
|
|
89
|
+
:tde_credential_arn,
|
|
90
|
+
:tde_credential_password,
|
|
91
|
+
:ca_certificate_identifier,
|
|
92
|
+
:domain,
|
|
93
|
+
:copy_tags_to_snapshot,
|
|
94
|
+
:monitoring_interval,
|
|
95
|
+
:db_port_number,
|
|
96
|
+
:publicly_accessible,
|
|
97
|
+
:monitoring_role_arn,
|
|
98
|
+
:domain_iam_role_name,
|
|
99
|
+
:promotion_tier,
|
|
100
|
+
:enable_iam_database_authentication,
|
|
101
|
+
:enable_performance_insights,
|
|
102
|
+
:performance_insights_kms_key_id,
|
|
103
|
+
:performance_insights_retention_period,
|
|
104
|
+
:cloudwatch_logs_export_configuration,
|
|
105
|
+
:processor_features,
|
|
106
|
+
:use_default_processor_features,
|
|
107
|
+
:deletion_protection,
|
|
108
|
+
:max_allocated_storage,
|
|
109
|
+
:certificate_rotation_restart
|
|
110
|
+
],
|
|
111
|
+
"cluster" => [
|
|
112
|
+
:new_db_cluster_identifier,
|
|
113
|
+
:backup_retention_period,
|
|
114
|
+
:db_cluster_parameter_group_name,
|
|
115
|
+
:vpc_security_group_ids,
|
|
116
|
+
:port,
|
|
117
|
+
:master_user_password,
|
|
118
|
+
:option_group_name,
|
|
119
|
+
:preferred_backup_window,
|
|
120
|
+
:preferred_maintenance_window,
|
|
121
|
+
:enable_iam_database_authentication,
|
|
122
|
+
:backtrack_window,
|
|
123
|
+
:cloudwatch_logs_export_configuration,
|
|
124
|
+
:engine_version,
|
|
125
|
+
:allow_major_version_upgrade,
|
|
126
|
+
:db_instance_parameter_group_name,
|
|
127
|
+
:domain,
|
|
128
|
+
:domain_iam_role_name,
|
|
129
|
+
:scaling_configuration,
|
|
130
|
+
:deletion_protection,
|
|
131
|
+
:enable_http_endpoint,
|
|
132
|
+
:copy_tags_to_snapshot,
|
|
133
|
+
]
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
# Initialize this cloud resource object. Calling +super+ will invoke the initializer defined under {MU::Cloud}, which should set the attribtues listed in {MU::Cloud::PUBLIC_ATTRS} as well as applicable dependency shortcuts, like +@vpc+, for us.
|
|
137
|
+
# @param args [Hash]: Hash of named arguments passed via Ruby's double-splat
|
|
138
|
+
def initialize(**args)
|
|
139
|
+
super
|
|
140
|
+
@config["groomer"] = MU::Config.defaultGroomer unless @config["groomer"]
|
|
141
|
+
@groomclass = MU::Groomer.loadGroomer(@config["groomer"])
|
|
142
|
+
|
|
143
|
+
@mu_name ||=
|
|
144
|
+
if @config and @config['engine'] and @config["engine"].match(/^sqlserver/)
|
|
145
|
+
@deploy.getResourceName(@config["name"], max_length: 15)
|
|
146
|
+
else
|
|
147
|
+
@deploy.getResourceName(@config["name"], max_length: 63)
|
|
148
|
+
end
|
|
149
|
+
|
|
150
|
+
@mu_name.gsub(/(--|-$)/i, "").gsub(/(_)/, "-").gsub!(/^[^a-z]/i, "")
|
|
151
|
+
if @config.has_key?("parameter_group_family")
|
|
152
|
+
@config["parameter_group_name"] ||= @mu_name
|
|
153
|
+
end
|
|
154
|
+
|
|
155
|
+
if args[:from_cloud_desc] and args[:from_cloud_desc].is_a?(Aws::RDS::Types::DBCluster)
|
|
156
|
+
@config['create_cluster'] = true
|
|
157
|
+
end
|
|
158
|
+
if @config['source']
|
|
159
|
+
@config["source"] = MU::Config::Ref.get(@config["source"])
|
|
160
|
+
elsif @config["read_replica_of"]
|
|
161
|
+
@config["source"] = MU::Config::Ref.get(@config["read_replica_of"])
|
|
162
|
+
end
|
|
163
|
+
end
|
|
164
|
+
|
|
165
|
+
# Called automatically by {MU::Deploy#createResources}
|
|
166
|
+
# @return [String]: The cloud provider's identifier for this database instance.
|
|
167
|
+
def create
|
|
168
|
+
# RDS is picky, we can't just use our regular node names for things like
|
|
169
|
+
# the default schema or username. And it varies from engine to engine.
|
|
170
|
+
basename = @config["name"]+@deploy.timestamp+MU.seed.downcase
|
|
171
|
+
basename.gsub!(/[^a-z0-9]/i, "")
|
|
172
|
+
@config["db_name"] = MU::Cloud::AWS::Database.getName(basename, type: "dbname", config: @config)
|
|
173
|
+
@config['master_user'] = MU::Cloud::AWS::Database.getName(basename, type: "dbuser", config: @config) unless @config['master_user']
|
|
174
|
+
@cloud_id = @mu_name
|
|
175
|
+
|
|
176
|
+
# Lets make sure automatic backups are enabled when DB instance is deployed in Multi-AZ so failover actually works. Maybe default to 1 instead?
|
|
177
|
+
if @config['multi_az_on_create'] or @config['multi_az_on_deploy'] or @config["create_cluster"]
|
|
178
|
+
if @config["backup_retention_period"].nil? or @config["backup_retention_period"] == 0
|
|
179
|
+
@config["backup_retention_period"] = 35
|
|
180
|
+
MU.log "Multi-AZ deployment specified but backup retention period disabled or set to 0. Changing to #{@config["backup_retention_period"]} ", MU::WARN
|
|
181
|
+
end
|
|
182
|
+
|
|
183
|
+
if @config["preferred_backup_window"].nil?
|
|
184
|
+
@config["preferred_backup_window"] = "05:00-05:30"
|
|
185
|
+
MU.log "Multi-AZ deployment specified but no backup window specified. Changing to #{@config["preferred_backup_window"]} ", MU::WARN
|
|
186
|
+
end
|
|
187
|
+
end
|
|
188
|
+
|
|
189
|
+
@config["snapshot_id"] =
|
|
190
|
+
if @config["creation_style"] == "existing_snapshot"
|
|
191
|
+
getExistingSnapshot ? getExistingSnapshot : createNewSnapshot
|
|
192
|
+
elsif @config["creation_style"] == "new_snapshot"
|
|
193
|
+
createNewSnapshot
|
|
194
|
+
end
|
|
195
|
+
|
|
196
|
+
@config["subnet_group_name"] = @mu_name if @vpc
|
|
197
|
+
|
|
198
|
+
if @config["create_cluster"]
|
|
199
|
+
getPassword
|
|
200
|
+
manageSubnetGroup
|
|
201
|
+
|
|
202
|
+
if @config.has_key?("parameter_group_family")
|
|
203
|
+
manageDbParameterGroup(true)
|
|
204
|
+
end
|
|
205
|
+
|
|
206
|
+
@config["cluster_identifier"] ||= @cloud_id
|
|
207
|
+
|
|
208
|
+
if @config['creation_style'] == "point_in_time"
|
|
209
|
+
create_point_in_time
|
|
210
|
+
else
|
|
211
|
+
create_basic
|
|
212
|
+
end
|
|
213
|
+
|
|
214
|
+
wait_until_available
|
|
215
|
+
|
|
216
|
+
if %w{existing_snapshot new_snapshot point_in_time}.include?(@config["creation_style"])
|
|
217
|
+
modify_db_cluster_struct = {
|
|
218
|
+
db_cluster_identifier: @cloud_id,
|
|
219
|
+
apply_immediately: true,
|
|
220
|
+
backup_retention_period: @config["backup_retention_period"],
|
|
221
|
+
db_cluster_parameter_group_name: @config["parameter_group_name"],
|
|
222
|
+
master_user_password: @config["password"],
|
|
223
|
+
preferred_backup_window: @config["preferred_backup_window"]
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
modify_db_cluster_struct[:preferred_maintenance_window] = @config["preferred_maintenance_window"] if @config["preferred_maintenance_window"]
|
|
227
|
+
MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_cluster(modify_db_cluster_struct)
|
|
228
|
+
wait_until_available
|
|
229
|
+
end
|
|
230
|
+
|
|
231
|
+
do_naming
|
|
232
|
+
elsif @config["add_cluster_node"]
|
|
233
|
+
add_cluster_node
|
|
234
|
+
else
|
|
235
|
+
add_basic
|
|
236
|
+
end
|
|
237
|
+
end
|
|
238
|
+
|
|
239
|
+
# Canonical Amazon Resource Number for this resource
|
|
240
|
+
# @return [String]
|
|
241
|
+
def arn
|
|
242
|
+
cloud_desc.db_instance_arn
|
|
243
|
+
end
|
|
244
|
+
|
|
245
|
+
# Locate an existing Database or Databases and return an array containing matching AWS resource descriptors for those that match.
|
|
246
|
+
# @return [Hash<String,OpenStruct>]: The cloud provider's complete descriptions of matching Databases
|
|
247
|
+
def self.find(**args)
|
|
248
|
+
found = {}
|
|
249
|
+
|
|
250
|
+
if args[:cloud_id]
|
|
251
|
+
if !args[:cluster]
|
|
252
|
+
begin
|
|
253
|
+
resp = MU::Cloud::AWS.rds(region: args[:region], credentials: args[:credentials]).describe_db_instances(db_instance_identifier: args[:cloud_id]).db_instances.first
|
|
254
|
+
return { args[:cloud_id] => resp } if resp
|
|
255
|
+
rescue Aws::RDS::Errors::DBInstanceNotFound
|
|
256
|
+
MU.log "No results found looking for RDS instance #{args[:cloud_id]}", MU::DEBUG
|
|
257
|
+
end
|
|
258
|
+
end
|
|
259
|
+
begin
|
|
260
|
+
resp = MU::Cloud::AWS.rds(region: args[:region], credentials: args[:credentials]).describe_db_clusters(db_cluster_identifier: args[:cloud_id]).db_clusters.first
|
|
261
|
+
rescue Aws::RDS::Errors::DBClusterNotFoundFault
|
|
262
|
+
MU.log "No results found looking for RDS cluster #{args[:cloud_id]}", MU::DEBUG
|
|
263
|
+
end
|
|
264
|
+
return { args[:cloud_id] => resp } if resp
|
|
265
|
+
|
|
266
|
+
else
|
|
267
|
+
fetch = Proc.new { |noun|
|
|
268
|
+
resp = MU::Cloud::AWS.rds(credentials: args[:credentials], region: args[:region]).send("describe_db_#{noun}s".to_sym)
|
|
269
|
+
resp.send("db_#{noun}s").each { |db|
|
|
270
|
+
found[db.send("db_#{noun}_identifier".to_sym)] = db
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
if args[:cluster] or !args.has_key?(:cluster)
|
|
274
|
+
fetch.call("cluster")
|
|
275
|
+
end
|
|
276
|
+
if !args[:cluster]
|
|
277
|
+
fetch.call("instance")
|
|
278
|
+
end
|
|
279
|
+
if args[:tag_key] and args[:tag_value]
|
|
280
|
+
keep = []
|
|
281
|
+
found.each_pair { |id, desc|
|
|
282
|
+
noun = desc.is_a?(Aws::RDS::Types::DBCluster) ? "cluster" : "db"
|
|
283
|
+
resp = MU::Cloud::AWS.rds(credentials: args[:credentials], region: args[:region]).list_tags_for_resource(
|
|
284
|
+
resource_name: MU::Cloud::AWS::Database.getARN(id, noun, "rds", region: args[:region], credentials: args[:credentials])
|
|
285
|
+
)
|
|
286
|
+
if resp and resp.tag_list
|
|
287
|
+
resp.tag_list.each { |tag|
|
|
288
|
+
if tag.key == args[:tag_key] and tag.value == args[:tag_value]
|
|
289
|
+
keep << id
|
|
290
|
+
break
|
|
291
|
+
end
|
|
292
|
+
}
|
|
293
|
+
end
|
|
294
|
+
}
|
|
295
|
+
found.reject! { |k, _v| !keep.include?(k) }
|
|
296
|
+
end
|
|
297
|
+
end
|
|
298
|
+
|
|
299
|
+
return found
|
|
300
|
+
end
|
|
301
|
+
|
|
302
|
+
# Reverse-map our cloud description into a runnable config hash.
|
|
303
|
+
# We assume that any values we have in +@config+ are placeholders, and
|
|
304
|
+
# calculate our own accordingly based on what's live in the cloud.
|
|
305
|
+
def toKitten(**_args)
|
|
306
|
+
bok = {
|
|
307
|
+
"cloud" => "AWS",
|
|
308
|
+
"region" => @config['region'],
|
|
309
|
+
"credentials" => @credentials,
|
|
310
|
+
"cloud_id" => @cloud_id,
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
# Don't adopt cluster members, they'll be picked up by the parent
|
|
314
|
+
# cluster
|
|
315
|
+
if !@config["create_cluster"] and cloud_desc.db_cluster_identifier and !cloud_desc.db_cluster_identifier.empty?
|
|
316
|
+
return nil
|
|
317
|
+
end
|
|
318
|
+
|
|
319
|
+
noun = @config["create_cluster"] ? "cluster" : "db"
|
|
320
|
+
tags = MU::Cloud::AWS.rds(credentials: @credentials, region: @config['region']).list_tags_for_resource(
|
|
321
|
+
resource_name: MU::Cloud::AWS::Database.getARN(@cloud_id, noun, "rds", region: @config['region'], credentials: @credentials)
|
|
322
|
+
).tag_list
|
|
323
|
+
if tags and !tags.empty?
|
|
324
|
+
bok['tags'] = MU.structToHash(tags, stringify_keys: true)
|
|
325
|
+
bok['name'] = MU::Adoption.tagsToName(bok['tags'])
|
|
326
|
+
end
|
|
327
|
+
bok["name"] ||= @cloud_id
|
|
328
|
+
bok['engine'] = cloud_desc.engine
|
|
329
|
+
bok['engine_version'] = cloud_desc.engine_version
|
|
330
|
+
bok['master_user'] = cloud_desc.master_username
|
|
331
|
+
bok['backup_retention_period'] = cloud_desc.backup_retention_period
|
|
332
|
+
bok["create_cluster"] = true if @config['create_cluster']
|
|
333
|
+
|
|
334
|
+
params = if bok['create_cluster']
|
|
335
|
+
MU::Cloud::AWS.rds(credentials: @credentials, region: @config['region']).describe_db_cluster_parameters(
|
|
336
|
+
db_cluster_parameter_group_name: cloud_desc.db_cluster_parameter_group
|
|
337
|
+
).parameters
|
|
338
|
+
else
|
|
339
|
+
MU::Cloud::AWS.rds(credentials: @credentials, region: @config['region']).describe_db_parameters(
|
|
340
|
+
db_parameter_group_name: cloud_desc.db_parameter_groups.first.db_parameter_group_name
|
|
341
|
+
).parameters
|
|
342
|
+
end
|
|
343
|
+
|
|
344
|
+
params.reject! { |p| ["engine-default", "system"].include?(p.source) }
|
|
345
|
+
if params and params.size > 0
|
|
346
|
+
bok[(bok['create_cluster'] ? "cluster_" : "")+'parameter_group_parameters'] = params.map { |p|
|
|
347
|
+
{ "key" => p.parameter_name, "value" => p.parameter_value }
|
|
348
|
+
}
|
|
349
|
+
end
|
|
350
|
+
|
|
351
|
+
bok['add_firewall_rules'] = cloud_desc.vpc_security_groups.map { |sg|
|
|
352
|
+
MU::Config::Ref.get(
|
|
353
|
+
id: sg.vpc_security_group_id,
|
|
354
|
+
cloud: "AWS",
|
|
355
|
+
credentials: @credentials,
|
|
356
|
+
region: @config['region'],
|
|
357
|
+
type: "firewall_rules",
|
|
358
|
+
)
|
|
359
|
+
}
|
|
360
|
+
bok['preferred_backup_window'] = cloud_desc.preferred_backup_window
|
|
361
|
+
bok['preferred_maintenance_window'] = cloud_desc.preferred_maintenance_window
|
|
362
|
+
bok['backup_retention_period'] = cloud_desc.backup_retention_period if cloud_desc.backup_retention_period > 1
|
|
363
|
+
bok['multi_az_on_groom'] = true if cloud_desc.multi_az
|
|
364
|
+
bok['storage_encrypted'] = true if cloud_desc.storage_encrypted
|
|
365
|
+
|
|
366
|
+
if bok['create_cluster']
|
|
367
|
+
bok['cluster_node_count'] = cloud_desc.db_cluster_members.size
|
|
368
|
+
bok['cluster_mode'] = cloud_desc.engine_mode
|
|
369
|
+
bok['port'] = cloud_desc.port
|
|
370
|
+
|
|
371
|
+
sizes = []
|
|
372
|
+
vpcs = []
|
|
373
|
+
# we have no sensible way to handle heterogenous cluster members, so
|
|
374
|
+
# for now just assume they're all the same
|
|
375
|
+
cloud_desc.db_cluster_members.each { |db|
|
|
376
|
+
member = MU::Cloud::AWS::Database.find(cloud_id: db.db_instance_identifier, region: @config['region'], credentials: @credentials).values.first
|
|
377
|
+
|
|
378
|
+
sizes << member.db_instance_class
|
|
379
|
+
if member.db_subnet_group and member.db_subnet_group.vpc_id
|
|
380
|
+
vpcs << member.db_subnet_group
|
|
381
|
+
end
|
|
382
|
+
bok
|
|
383
|
+
}
|
|
384
|
+
sizes.uniq!
|
|
385
|
+
vpcs.uniq!
|
|
386
|
+
bok['size'] = sizes.sort.first if !sizes.empty?
|
|
387
|
+
if !vpcs.empty?
|
|
388
|
+
myvpc = MU::MommaCat.findStray("AWS", "vpc", cloud_id: vpcs.sort.first.vpc_id, credentials: @credentials, region: @config['region'], dummy_ok: true, no_deploy_search: true).first
|
|
389
|
+
bok['vpc'] = myvpc.getReference(vpcs.sort.first.subnets.map { |s| s.subnet_identifier })
|
|
390
|
+
end
|
|
391
|
+
else
|
|
392
|
+
bok['size'] = cloud_desc.db_instance_class
|
|
393
|
+
bok['auto_minor_version_upgrade'] = true if cloud_desc.auto_minor_version_upgrade
|
|
394
|
+
if cloud_desc.db_subnet_group
|
|
395
|
+
myvpc = MU::MommaCat.findStray("AWS", "vpc", cloud_id: cloud_desc.db_subnet_group.vpc_id, credentials: @credentials, region: @config['region'], dummy_ok: true, no_deploy_search: true).first
|
|
396
|
+
bok['vpc'] = myvpc.getReference(cloud_desc.db_subnet_group.subnets.map { |s| s.subnet_identifier })
|
|
397
|
+
end
|
|
398
|
+
bok['storage_type'] = cloud_desc.storage_type
|
|
399
|
+
bok['storage'] = cloud_desc.allocated_storage
|
|
400
|
+
bok['license_model'] = cloud_desc.license_model
|
|
401
|
+
bok['publicly_accessible'] = true if cloud_desc.publicly_accessible
|
|
402
|
+
bok['port'] = cloud_desc.endpoint.port
|
|
403
|
+
|
|
404
|
+
if cloud_desc.read_replica_source_db_instance_identifier
|
|
405
|
+
bok['read_replica_of'] = MU::Config::Ref.get(
|
|
406
|
+
id: cloud_desc.read_replica_source_db_instance_identifier.split(/:/).last,
|
|
407
|
+
name: cloud_desc.read_replica_source_db_instance_identifier.split(/:/).last,
|
|
408
|
+
cloud: "AWS",
|
|
409
|
+
region: cloud_desc.read_replica_source_db_instance_identifier.split(/:/)[3],
|
|
410
|
+
credentials: @credentials,
|
|
411
|
+
type: "databases",
|
|
412
|
+
)
|
|
413
|
+
end
|
|
414
|
+
end
|
|
415
|
+
|
|
416
|
+
if cloud_desc.enabled_cloudwatch_logs_exports and
|
|
417
|
+
cloud_desc.enabled_cloudwatch_logs_exports.size > 0
|
|
418
|
+
bok['cloudwatch_logs'] = cloud_desc.enabled_cloudwatch_logs_exports
|
|
419
|
+
end
|
|
420
|
+
|
|
421
|
+
bok
|
|
422
|
+
end
|
|
423
|
+
|
|
424
|
+
# Construct an Amazon Resource Name for an RDS resource. The RDS API is
|
|
425
|
+
# peculiar, and we often need this identifier in order to do things that
|
|
426
|
+
# the other APIs can do with shorthand.
|
|
427
|
+
# @param resource [String]: The name of the resource
|
|
428
|
+
# @param resource_type [String]: The type of the resource (one of `db, es, og, pg, ri, secgrp, snapshot, subgrp`)
|
|
429
|
+
# @param client_type [String]: The name of the client (eg. elasticache, rds, ec2, s3)
|
|
430
|
+
# @param region [String]: The region in which the resource resides.
|
|
431
|
+
# @param account_number [String]: The account in which the resource resides.
|
|
432
|
+
# @return [String]
|
|
433
|
+
def self.getARN(resource, resource_type, client_type, region: MU.curRegion, account_number: nil, credentials: nil)
|
|
434
|
+
account_number ||= MU::Cloud::AWS.credToAcct(credentials)
|
|
435
|
+
aws_str = MU::Cloud::AWS.isGovCloud?(region) ? "aws-us-gov" : "aws"
|
|
436
|
+
"arn:#{aws_str}:#{client_type}:#{region}:#{account_number}:#{resource_type}:#{resource}"
|
|
437
|
+
end
|
|
438
|
+
|
|
439
|
+
# Construct all our tags.
|
|
440
|
+
# @return [Array]: All our standard tags and any custom tags.
|
|
441
|
+
def allTags
|
|
442
|
+
@tags.each_key.map { |k| { :key => k, :value => @tags[k] } }
|
|
443
|
+
end
|
|
444
|
+
|
|
445
|
+
# Create a subnet group for a database.
|
|
446
|
+
def manageSubnetGroup
|
|
447
|
+
# Finding subnets, creating security groups/adding holes, create subnet group
|
|
448
|
+
subnet_ids = []
|
|
449
|
+
|
|
450
|
+
dependencies
|
|
451
|
+
raise MuError.new "Didn't find the VPC specified for #{@mu_name}", details: @config["vpc"].to_h unless @vpc
|
|
452
|
+
|
|
453
|
+
mySubnets.each { |subnet|
|
|
454
|
+
next if @config["publicly_accessible"] and subnet.private?
|
|
455
|
+
subnet_ids << subnet.cloud_id
|
|
456
|
+
}
|
|
457
|
+
|
|
458
|
+
if @config['creation_style'] == "existing"
|
|
459
|
+
srcdb_vpc = @config['source'].kitten.cloud_desc.db_subnet_group.vpc_id
|
|
460
|
+
if srcdb_vpc != @vpc.cloud_id
|
|
461
|
+
MU.log "#{self} is deploying into #{@vpc.cloud_id}, but our source database, #{@config['identifier']}, is in #{srcdb_vpc}", MU::ERR
|
|
462
|
+
raise MuError, "Can't use 'existing' to deploy into a different VPC from the source database; try 'new_snapshot' instead"
|
|
463
|
+
end
|
|
464
|
+
end
|
|
465
|
+
|
|
466
|
+
if subnet_ids.empty?
|
|
467
|
+
raise MuError, "Couldn't find subnets in #{@vpc} to add to #{@config["subnet_group_name"]}. Make sure the subnets are valid and publicly_accessible is set correctly"
|
|
468
|
+
else
|
|
469
|
+
resp = begin
|
|
470
|
+
MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).describe_db_subnet_groups(
|
|
471
|
+
db_subnet_group_name: @config["subnet_group_name"]
|
|
472
|
+
)
|
|
473
|
+
# XXX ensure subnet group matches our config?
|
|
474
|
+
rescue ::Aws::RDS::Errors::DBSubnetGroupNotFoundFault
|
|
475
|
+
# Create subnet group
|
|
476
|
+
resp = MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_subnet_group(
|
|
477
|
+
db_subnet_group_name: @config["subnet_group_name"],
|
|
478
|
+
db_subnet_group_description: @config["subnet_group_name"],
|
|
479
|
+
subnet_ids: subnet_ids,
|
|
480
|
+
tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } }
|
|
481
|
+
)
|
|
482
|
+
# The API forces it to lowercase, for some reason? Maybe not
|
|
483
|
+
# always? Just rely on what it says.
|
|
484
|
+
@config["subnet_group_name"] = resp.db_subnet_group.db_subnet_group_name
|
|
485
|
+
resp
|
|
486
|
+
end
|
|
487
|
+
|
|
488
|
+
myFirewallRules.each { |sg|
|
|
489
|
+
next if sg.cloud_desc.vpc_id != @vpc.cloud_id
|
|
490
|
+
@config["vpc_security_group_ids"] ||= []
|
|
491
|
+
@config["vpc_security_group_ids"] << sg.cloud_id
|
|
492
|
+
}
|
|
493
|
+
end
|
|
494
|
+
|
|
495
|
+
allowBastionAccess
|
|
496
|
+
end
|
|
497
|
+
|
|
498
|
+
# Create a database parameter group.
|
|
499
|
+
def manageDbParameterGroup(cluster = false, create: true)
|
|
500
|
+
return if !@config["parameter_group_name"]
|
|
501
|
+
name_param = cluster ? :db_cluster_parameter_group_name : :db_parameter_group_name
|
|
502
|
+
fieldname = cluster ? "cluster_parameter_group_parameters" : "db_parameter_group_parameters"
|
|
503
|
+
|
|
504
|
+
params = {
|
|
505
|
+
db_parameter_group_family: @config["parameter_group_family"],
|
|
506
|
+
description: "Parameter group for #{@mu_name}",
|
|
507
|
+
tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } }
|
|
508
|
+
}
|
|
509
|
+
params[name_param] = @config["parameter_group_name"]
|
|
510
|
+
|
|
511
|
+
if create
|
|
512
|
+
MU.log "Creating a #{cluster ? "cluster" : "database" } parameter group #{@config["parameter_group_name"]}"
|
|
513
|
+
|
|
514
|
+
MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).send(cluster ? :create_db_cluster_parameter_group : :create_db_parameter_group, params)
|
|
515
|
+
end
|
|
516
|
+
|
|
517
|
+
|
|
518
|
+
if @config[fieldname] and !@config[fieldname].empty?
|
|
519
|
+
|
|
520
|
+
old_values = MU::Cloud::AWS.rds(credentials: @credentials, region: @config['region']).send(cluster ? :describe_db_cluster_parameters : :describe_db_parameters, { name_param => @config["parameter_group_name"] } ).parameters
|
|
521
|
+
old_values.map! { |p| [p.parameter_name, p.parameter_value] }.flatten
|
|
522
|
+
old_values = old_values.to_h
|
|
523
|
+
|
|
524
|
+
params = []
|
|
525
|
+
@config[fieldname].each { |item|
|
|
526
|
+
next if old_values[item["name"]] == item['value']
|
|
527
|
+
params << {parameter_name: item['name'], parameter_value: item['value'], apply_method: item['apply_method']}
|
|
528
|
+
}
|
|
529
|
+
return if params.empty?
|
|
530
|
+
|
|
531
|
+
MU.log "Modifying parameter group #{@config["parameter_group_name"]}", MU::NOTICE, details: params.map { |p| { p[:parameter_name] => p[:parameter_value] } }
|
|
532
|
+
|
|
533
|
+
MU.retrier([Aws::RDS::Errors::InvalidDBParameterGroupState], wait: 30, max: 10) {
|
|
534
|
+
if cluster
|
|
535
|
+
MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_cluster_parameter_group(
|
|
536
|
+
db_cluster_parameter_group_name: @config["parameter_group_name"],
|
|
537
|
+
parameters: params
|
|
538
|
+
)
|
|
539
|
+
else
|
|
540
|
+
MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_parameter_group(
|
|
541
|
+
db_parameter_group_name: @config["parameter_group_name"],
|
|
542
|
+
parameters: params
|
|
543
|
+
)
|
|
544
|
+
end
|
|
545
|
+
}
|
|
546
|
+
end
|
|
547
|
+
end
|
|
548
|
+
|
|
549
|
+
# Called automatically by {MU::Deploy#createResources}
|
|
550
|
+
def groom
|
|
551
|
+
cloud_desc(use_cache: false)
|
|
552
|
+
manageSubnetGroup if @vpc
|
|
553
|
+
manageDbParameterGroup(@config["create_cluster"], create: false)
|
|
554
|
+
|
|
555
|
+
noun = @config['create_cluster'] ? "cluster" : "instance"
|
|
556
|
+
|
|
557
|
+
mods = {
|
|
558
|
+
"db_#{noun}_identifier".to_sym => @cloud_id
|
|
559
|
+
}
|
|
560
|
+
|
|
561
|
+
basicParams.each_pair { |k, v|
|
|
562
|
+
next if v.nil? or !MODIFIABLE[noun].include?(k)
|
|
563
|
+
if cloud_desc.respond_to?(k) and cloud_desc.send(k) != v
|
|
564
|
+
mods[k] = v
|
|
565
|
+
end
|
|
566
|
+
}
|
|
567
|
+
|
|
568
|
+
existing_sgs = cloud_desc.vpc_security_groups.map { |sg|
|
|
569
|
+
sg.vpc_security_group_id
|
|
570
|
+
}.sort
|
|
571
|
+
|
|
572
|
+
if !@config["add_cluster_node"] and !@config["member_of_cluster"] and
|
|
573
|
+
@config["vpc_security_group_ids"] and
|
|
574
|
+
existing_sgs != @config["vpc_security_group_ids"].sort
|
|
575
|
+
mods[:vpc_security_group_ids] = @config["vpc_security_group_ids"]
|
|
576
|
+
end
|
|
577
|
+
|
|
578
|
+
|
|
579
|
+
if @config['cloudwatch_logs'] and cloud_desc.enabled_cloudwatch_logs_exports.sort != @config['cloudwatch_logs'].sort
|
|
580
|
+
mods[:cloudwatch_logs_export_configuration] = {
|
|
581
|
+
enable_log_types: @config['cloudwatch_logs'],
|
|
582
|
+
disable_log_types: cloud_desc.enabled_cloudwatch_logs_exports - @config['cloudwatch_logs']
|
|
583
|
+
}
|
|
584
|
+
end
|
|
585
|
+
|
|
586
|
+
if @config["create_cluster"]
|
|
587
|
+
@config['cluster_node_count'] ||= 1
|
|
588
|
+
if @config['cluster_mode'] == "serverless"
|
|
589
|
+
MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_current_db_cluster_capacity(
|
|
590
|
+
db_cluster_identifier: @cloud_id,
|
|
591
|
+
capacity: @config['cluster_node_count']
|
|
592
|
+
)
|
|
593
|
+
end
|
|
594
|
+
else
|
|
595
|
+
# Run SQL on deploy
|
|
596
|
+
if @config['run_sql_on_deploy']
|
|
597
|
+
run_sql_commands
|
|
598
|
+
end
|
|
599
|
+
|
|
600
|
+
if !cloud_desc.multi_az and (@config['multi_az_on_deploy'] or @config['multi_az_on_create'])
|
|
601
|
+
mods[:multi_az] = true
|
|
602
|
+
end
|
|
603
|
+
|
|
604
|
+
# XXX how do we guard this? do we?
|
|
605
|
+
# master_user_password: @config["password"],
|
|
606
|
+
# end
|
|
607
|
+
|
|
608
|
+
# XXX it's a stupid array
|
|
609
|
+
# db_parameter_group_name: @config["parameter_group_name"],
|
|
610
|
+
end
|
|
611
|
+
|
|
612
|
+
if mods.size > 1
|
|
613
|
+
MU.log "Modifying RDS instance #{@cloud_id}", MU::NOTICE, details: mods
|
|
614
|
+
mods[:apply_immediately] = true
|
|
615
|
+
wait_until_available
|
|
616
|
+
MU::Cloud::AWS.rds(region: @config['region'], credentials: @credentials).send("modify_db_#{noun}".to_sym, mods)
|
|
617
|
+
wait_until_available
|
|
618
|
+
end
|
|
619
|
+
|
|
620
|
+
end
|
|
621
|
+
|
|
622
|
+
# Generate database user, database identifier, database name based on engine-specific constraints
|
|
623
|
+
# @return [String]: Name
|
|
624
|
+
def self.getName(basename, type: 'dbname', config: nil)
|
|
625
|
+
if type == 'dbname'
|
|
626
|
+
# Apply engine-specific db name constraints
|
|
627
|
+
if config["engine"] =~ /^oracle/
|
|
628
|
+
(MU.seed.downcase+config["name"])[0..7]
|
|
629
|
+
elsif config["engine"] =~ /^sqlserver/
|
|
630
|
+
nil
|
|
631
|
+
elsif config["engine"] =~ /^mysql/
|
|
632
|
+
basename[0..63]
|
|
633
|
+
elsif config["engine"] =~ /^aurora/
|
|
634
|
+
(MU.seed.downcase+config["name"])[0..7]
|
|
635
|
+
else
|
|
636
|
+
basename
|
|
637
|
+
end
|
|
638
|
+
elsif type == 'dbuser'
|
|
639
|
+
# Apply engine-specific master username constraints
|
|
640
|
+
if config["engine"] =~ /^oracle/
|
|
641
|
+
basename[0..29].gsub(/[^a-z0-9]/i, "")
|
|
642
|
+
elsif config["engine"] =~ /^sqlserver/
|
|
643
|
+
basename[0..127].gsub(/[^a-z0-9]/i, "")
|
|
644
|
+
elsif config["engine"] =~ /^(mysql|maria)/
|
|
645
|
+
basename[0..15].gsub(/[^a-z0-9]/i, "")
|
|
646
|
+
elsif config["engine"] =~ /^aurora/
|
|
647
|
+
basename[0..15].gsub(/[^a-z0-9]/i, "")
|
|
648
|
+
else
|
|
649
|
+
basename.gsub(/[^a-z0-9]/i, "")
|
|
650
|
+
end
|
|
651
|
+
end
|
|
652
|
+
end
|
|
653
|
+
|
|
654
|
+
# Permit a host to connect to the given database instance.
|
|
655
|
+
# @param cidr [String]: The CIDR-formatted IP address or block to allow access.
|
|
656
|
+
# @return [void]
|
|
657
|
+
def allowHost(cidr)
|
|
658
|
+
# If we're an old, Classic-style database with RDS-specific
|
|
659
|
+
# authorization, punch holes in that.
|
|
660
|
+
if !cloud_desc.db_security_groups.empty?
|
|
661
|
+
cloud_desc.db_security_groups.each { |rds_sg|
|
|
662
|
+
begin
|
|
663
|
+
MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).authorize_db_security_group_ingress(
|
|
664
|
+
db_security_group_name: rds_sg.db_security_group_name,
|
|
665
|
+
cidrip: cidr
|
|
666
|
+
)
|
|
667
|
+
rescue Aws::RDS::Errors::AuthorizationAlreadyExists
|
|
668
|
+
MU.log "CIDR #{cidr} already in database instance #{@cloud_id} security group", MU::WARN
|
|
669
|
+
end
|
|
670
|
+
}
|
|
671
|
+
end
|
|
672
|
+
|
|
673
|
+
# Otherwise go get our generic EC2 ruleset and punch a hole in it
|
|
674
|
+
myFirewallRules.each { |sg|
|
|
675
|
+
sg.addRule([cidr], proto: "tcp", port: cloud_desc.endpoint.port)
|
|
676
|
+
break
|
|
677
|
+
}
|
|
678
|
+
end
|
|
679
|
+
|
|
680
|
+
# Return the metadata for this ContainerCluster
|
|
681
|
+
# @return [Hash]
|
|
682
|
+
def notify
|
|
683
|
+
deploy_struct = MU.structToHash(cloud_desc, stringify_keys: true)
|
|
684
|
+
deploy_struct['cloud_id'] = @cloud_id
|
|
685
|
+
deploy_struct["region"] ||= @config['region']
|
|
686
|
+
deploy_struct["db_name"] ||= @config['db_name']
|
|
687
|
+
deploy_struct
|
|
688
|
+
end
|
|
689
|
+
|
|
690
|
+
# Generate a snapshot from the database described in this instance.
|
|
691
|
+
# @return [String]: The cloud provider's identifier for the snapshot.
|
|
692
|
+
def createNewSnapshot
|
|
693
|
+
snap_id = @deploy.getResourceName(@config["name"]) + Time.new.strftime("%M%S").to_s
|
|
694
|
+
src_ref = MU::Config::Ref.get(@config["source"])
|
|
695
|
+
src_ref.kitten(@deploy)
|
|
696
|
+
if !src_ref.id
|
|
697
|
+
raise MuError.new "#{@mu_name} failed to get an id from reference for creating a snapshot", details: @config['source']
|
|
698
|
+
end
|
|
699
|
+
params = {
|
|
700
|
+
:tags => @tags.each_key.map { |k| { :key => k, :value => @tags[k] } }
|
|
701
|
+
}
|
|
702
|
+
if @config["create_cluster"]
|
|
703
|
+
params[:db_cluster_snapshot_identifier] = snap_id
|
|
704
|
+
params[:db_cluster_identifier] = src_ref.id
|
|
705
|
+
else
|
|
706
|
+
params[:db_snapshot_identifier] = snap_id
|
|
707
|
+
params[:db_instance_identifier] = src_ref.id
|
|
708
|
+
end
|
|
709
|
+
|
|
710
|
+
MU.retrier([Aws::RDS::Errors::InvalidDBInstanceState, Aws::RDS::Errors::InvalidDBClusterStateFault], wait: 60, max: 10) {
|
|
711
|
+
MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).send("create_db_#{@config['create_cluster'] ? "cluster_" : ""}snapshot".to_sym, params)
|
|
712
|
+
}
|
|
713
|
+
|
|
714
|
+
loop_if = Proc.new {
|
|
715
|
+
if @config["create_cluster"]
|
|
716
|
+
MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).describe_db_cluster_snapshots(db_cluster_snapshot_identifier: snap_id).db_cluster_snapshots.first.status != "available"
|
|
717
|
+
else
|
|
718
|
+
MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).describe_db_snapshots(db_snapshot_identifier: snap_id).db_snapshots.first.status != "available"
|
|
719
|
+
end
|
|
720
|
+
}
|
|
721
|
+
|
|
722
|
+
MU.retrier(wait: 15, loop_if: loop_if) { |retries, _wait|
|
|
723
|
+
MU.log "Waiting for RDS snapshot of #{src_ref.id} to be ready...", MU::NOTICE if retries % 20 == 0
|
|
724
|
+
}
|
|
725
|
+
|
|
726
|
+
return snap_id
|
|
727
|
+
end
|
|
728
|
+
|
|
729
|
+
# Fetch the latest snapshot of the database described in this instance.
|
|
730
|
+
# @return [String]: The cloud provider's identifier for the snapshot.
|
|
731
|
+
def getExistingSnapshot
|
|
732
|
+
src_ref = MU::Config::Ref.get(@config["source"])
|
|
733
|
+
resp =
|
|
734
|
+
if @config["create_cluster"]
|
|
735
|
+
MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).describe_db_cluster_snapshots(db_cluster_snapshot_identifier: src_ref.id)
|
|
736
|
+
else
|
|
737
|
+
MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).describe_db_snapshots(db_snapshot_identifier: src_ref.id)
|
|
738
|
+
end
|
|
739
|
+
|
|
740
|
+
snapshots = @config["create_cluster"] ? resp.db_cluster_snapshots : resp.db_snapshots
|
|
741
|
+
|
|
742
|
+
if snapshots.empty?
|
|
743
|
+
nil
|
|
744
|
+
else
|
|
745
|
+
sorted_snapshots = snapshots.sort_by { |snap| snap.snapshot_create_time }
|
|
746
|
+
@config["create_cluster"] ? sorted_snapshots.last.db_cluster_snapshot_identifier : sorted_snapshots.last.db_snapshot_identifier
|
|
747
|
+
end
|
|
748
|
+
end
|
|
749
|
+
|
|
750
|
+
# Does this resource type exist as a global (cloud-wide) artifact, or
|
|
751
|
+
# is it localized to a region/zone?
|
|
752
|
+
# @return [Boolean]
|
|
753
|
+
def self.isGlobal?
|
|
754
|
+
false
|
|
755
|
+
end
|
|
756
|
+
|
|
757
|
+
# Denote whether this resource implementation is experiment, ready for
|
|
758
|
+
# testing, or ready for production use.
|
|
759
|
+
def self.quality
|
|
760
|
+
MU::Cloud::RELEASE
|
|
761
|
+
end
|
|
762
|
+
|
|
763
|
+
# @return [Array<Thread>]
|
|
764
|
+
def self.threaded_resource_purge(describe_method, list_method, id_method, arn_type, region, credentials, ignoremaster, known: [], deploy_id: MU.deploy_id)
|
|
765
|
+
deletia = []
|
|
766
|
+
|
|
767
|
+
resp = MU::Cloud::AWS.rds(credentials: credentials, region: region).send(describe_method)
|
|
768
|
+
resp.send(list_method).each { |resource|
|
|
769
|
+
begin
|
|
770
|
+
arn = MU::Cloud::AWS::Database.getARN(resource.send(id_method), arn_type, "rds", region: region, credentials: credentials)
|
|
771
|
+
tags = MU::Cloud::AWS.rds(credentials: credentials, region: region).list_tags_for_resource(resource_name: arn).tag_list
|
|
772
|
+
rescue Aws::RDS::Errors::InvalidParameterValue
|
|
773
|
+
MU.log "Failed to fetch ARN of type #{arn_type} or tags of resource via #{id_method}", MU::WARN, details: [resource, arn]
|
|
774
|
+
next
|
|
775
|
+
end
|
|
776
|
+
|
|
777
|
+
if should_delete?(tags, resource.send(id_method), ignoremaster, deploy_id, MU.mu_public_ip, known)
|
|
778
|
+
deletia << resource.send(id_method)
|
|
779
|
+
end
|
|
780
|
+
}
|
|
781
|
+
|
|
782
|
+
threads = []
|
|
783
|
+
deletia.each { |id|
|
|
784
|
+
threads << Thread.new(id) { |resource_id|
|
|
785
|
+
yield(resource_id)
|
|
786
|
+
}
|
|
787
|
+
}
|
|
788
|
+
|
|
789
|
+
threads
|
|
790
|
+
end
|
|
791
|
+
|
|
792
|
+
# Called by {MU::Cleanup}. Locates resources that were created by the
|
|
793
|
+
# currently-loaded deployment, and purges them.
|
|
794
|
+
# @param noop [Boolean]: If true, will only print what would be done
|
|
795
|
+
# @param ignoremaster [Boolean]: If true, will remove resources not flagged as originating from this Mu server
|
|
796
|
+
# @param region [String]: The cloud provider region in which to operate
|
|
797
|
+
# @return [void]
|
|
798
|
+
def self.cleanup(noop: false, deploy_id: MU.deploy_id, ignoremaster: false, credentials: nil, region: MU.curRegion, flags: {})
|
|
799
|
+
|
|
800
|
+
["instance", "cluster"].each { |type|
|
|
801
|
+
threaded_resource_purge("describe_db_#{type}s".to_sym, "db_#{type}s".to_sym, "db_#{type}_identifier".to_sym, (type == "instance" ? "db" : "cluster"), region, credentials, ignoremaster, known: flags['known'], deploy_id: deploy_id) { |id|
|
|
802
|
+
terminate_rds_instance(nil, noop: noop, skipsnapshots: flags["skipsnapshots"], region: region, deploy_id: deploy_id, cloud_id: id, mu_name: id.upcase, credentials: credentials, cluster: (type == "cluster"), known: flags['known'])
|
|
803
|
+
|
|
804
|
+
}.each { |t|
|
|
805
|
+
t.join
|
|
806
|
+
}
|
|
807
|
+
}
|
|
808
|
+
|
|
809
|
+
threads = threaded_resource_purge(:describe_db_subnet_groups, :db_subnet_groups, :db_subnet_group_name, "subgrp", region, credentials, ignoremaster, known: flags['known'], deploy_id: deploy_id) { |id|
|
|
810
|
+
MU.log "Deleting RDS subnet group #{id}"
|
|
811
|
+
MU.retrier([Aws::RDS::Errors::InvalidDBSubnetGroupStateFault], wait: 30, max: 5, ignoreme: [Aws::RDS::Errors::DBSubnetGroupNotFoundFault]) {
|
|
812
|
+
MU::Cloud::AWS.rds(region: region).delete_db_subnet_group(db_subnet_group_name: id) if !noop
|
|
813
|
+
}
|
|
814
|
+
}
|
|
815
|
+
|
|
816
|
+
["db", "db_cluster"].each { |type|
|
|
817
|
+
threads.concat threaded_resource_purge("describe_#{type}_parameter_groups".to_sym, "#{type}_parameter_groups".to_sym, "#{type}_parameter_group_name".to_sym, (type == "db" ? "pg" : "cluster-pg"), region, credentials, ignoremaster, known: flags['known'], deploy_id: deploy_id) { |id|
|
|
818
|
+
MU.log "Deleting RDS #{type} parameter group #{id}"
|
|
819
|
+
MU.retrier([Aws::RDS::Errors::InvalidDBParameterGroupState], wait: 30, max: 5, ignoreme: [Aws::RDS::Errors::DBParameterGroupNotFound]) {
|
|
820
|
+
MU::Cloud::AWS.rds(region: region).send("delete_#{type}_parameter_group", { "#{type}_parameter_group_name".to_sym => id }) if !noop
|
|
821
|
+
}
|
|
822
|
+
}
|
|
823
|
+
}
|
|
824
|
+
|
|
825
|
+
# Wait for all of the databases subnet/parameter groups to finish cleanup before proceeding
|
|
826
|
+
threads.each { |t|
|
|
827
|
+
t.join
|
|
828
|
+
}
|
|
829
|
+
end
|
|
830
|
+
|
|
831
|
+
# Cloud-specific configuration properties.
|
|
832
|
+
# @param _config [MU::Config]: The calling MU::Config object
|
|
833
|
+
# @return [Array<Array,Hash>]: List of required fields, and json-schema Hash of cloud-specific configuration parameters for this resource
|
|
834
|
+
def self.schema(_config)
|
|
835
|
+
toplevel_required = []
|
|
836
|
+
rds_parameters_primitive = {
|
|
837
|
+
"type" => "array",
|
|
838
|
+
"minItems" => 1,
|
|
839
|
+
"items" => {
|
|
840
|
+
"description" => "The database parameter group parameter to change and when to apply the change.",
|
|
841
|
+
"type" => "object",
|
|
842
|
+
"title" => "Database Parameter",
|
|
843
|
+
"required" => ["name", "value"],
|
|
844
|
+
"additionalProperties" => false,
|
|
845
|
+
"properties" => {
|
|
846
|
+
"name" => {
|
|
847
|
+
"type" => "string"
|
|
848
|
+
},
|
|
849
|
+
"value" => {
|
|
850
|
+
"type" => "string"
|
|
851
|
+
},
|
|
852
|
+
"apply_method" => {
|
|
853
|
+
"enum" => ["pending-reboot", "immediate"],
|
|
854
|
+
"default" => "immediate",
|
|
855
|
+
"type" => "string"
|
|
856
|
+
}
|
|
857
|
+
}
|
|
858
|
+
}
|
|
859
|
+
}
|
|
860
|
+
|
|
861
|
+
|
|
862
|
+
schema = {
|
|
863
|
+
"db_parameter_group_parameters" => rds_parameters_primitive,
|
|
864
|
+
"cluster_parameter_group_parameters" => rds_parameters_primitive,
|
|
865
|
+
"parameter_group_family" => {
|
|
866
|
+
"type" => "String",
|
|
867
|
+
"description" => "An RDS parameter group family. See also https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_WorkingWithParamGroups.html"
|
|
868
|
+
},
|
|
869
|
+
"cluster_mode" => {
|
|
870
|
+
"type" => "string",
|
|
871
|
+
"description" => "The DB engine mode of the DB cluster",
|
|
872
|
+
"enum" => ["provisioned", "serverless", "parallelquery", "global", "multimaster"],
|
|
873
|
+
"default" => "provisioned"
|
|
874
|
+
},
|
|
875
|
+
"storage_type" => {
|
|
876
|
+
"enum" => ["standard", "gp2", "io1"],
|
|
877
|
+
"type" => "string",
|
|
878
|
+
"default" => "gp2"
|
|
879
|
+
},
|
|
880
|
+
"cloudwatch_logs" => {
|
|
881
|
+
"type" => "array",
|
|
882
|
+
"items" => {
|
|
883
|
+
"type" => "string",
|
|
884
|
+
"enum" => ["audit", "error", "general", "slowquery", "profiler", "postgresql", "alert", "listener", "trace", "upgrade", "agent"]
|
|
885
|
+
}
|
|
886
|
+
},
|
|
887
|
+
"serverless_scaling" => {
|
|
888
|
+
"type" => "object",
|
|
889
|
+
"description" => "Scaling configuration for a +serverless+ Aurora cluster",
|
|
890
|
+
"default" => {
|
|
891
|
+
"auto_pause" => false,
|
|
892
|
+
"min_capacity" => 2,
|
|
893
|
+
"max_capacity" => 2
|
|
894
|
+
},
|
|
895
|
+
"properties" => {
|
|
896
|
+
"auto_pause" => {
|
|
897
|
+
"type" => "boolean",
|
|
898
|
+
"description" => "A value that specifies whether to allow or disallow automatic pause for an Aurora DB cluster in serverless DB engine mode",
|
|
899
|
+
"default" => false
|
|
900
|
+
},
|
|
901
|
+
"min_capacity" => {
|
|
902
|
+
"type" => "integer",
|
|
903
|
+
"description" => "The minimum capacity for an Aurora DB cluster in serverless DB engine mode.",
|
|
904
|
+
"default" => 2,
|
|
905
|
+
"enum" => [2, 4, 8, 16, 32, 64, 128, 256]
|
|
906
|
+
},
|
|
907
|
+
"max_capacity" => {
|
|
908
|
+
"type" => "integer",
|
|
909
|
+
"description" => "The maximum capacity for an Aurora DB cluster in serverless DB engine mode.",
|
|
910
|
+
"default" => 2,
|
|
911
|
+
"enum" => [2, 4, 8, 16, 32, 64, 128, 256]
|
|
912
|
+
},
|
|
913
|
+
"seconds_until_auto_pause" => {
|
|
914
|
+
"type" => "integer",
|
|
915
|
+
"description" => "A DB cluster can be paused only when it's idle (it has no connections). If a DB cluster is paused for more than seven days, the DB cluster might be backed up with a snapshot. In this case, the DB cluster is restored when there is a request to connect to it.",
|
|
916
|
+
"default" => 86400
|
|
917
|
+
}
|
|
918
|
+
}
|
|
919
|
+
},
|
|
920
|
+
"license_model" => {
|
|
921
|
+
"type" => "string",
|
|
922
|
+
"enum" => ["license-included", "bring-your-own-license", "general-public-license", "postgresql-license"]
|
|
923
|
+
},
|
|
924
|
+
"ingress_rules" => MU::Cloud.resourceClass("AWS", "FirewallRule").ingressRuleAddtlSchema
|
|
925
|
+
}
|
|
926
|
+
[toplevel_required, schema]
|
|
927
|
+
end
|
|
928
|
+
|
|
929
|
+
@@engine_cache= {}
|
|
930
|
+
def self.get_supported_engines(region = MU.myRegion, credentials = nil, engine: nil)
|
|
931
|
+
@@engine_cache ||= {}
|
|
932
|
+
@@engine_cache[credentials] ||= {}
|
|
933
|
+
@@engine_cache[credentials][region] ||= {}
|
|
934
|
+
|
|
935
|
+
if !@@engine_cache[credentials][region].empty?
|
|
936
|
+
return engine ? @@engine_cache[credentials][region][engine] : @@engine_cache[credentials][region]
|
|
937
|
+
end
|
|
938
|
+
|
|
939
|
+
engines = {}
|
|
940
|
+
|
|
941
|
+
resp = MU::Cloud::AWS.rds(credentials: credentials, region: region).describe_db_engine_versions
|
|
942
|
+
|
|
943
|
+
if resp and resp.db_engine_versions
|
|
944
|
+
resp.db_engine_versions.each { |version|
|
|
945
|
+
engines[version.engine] ||= {
|
|
946
|
+
"versions" => [],
|
|
947
|
+
"families" => [],
|
|
948
|
+
"features" => {},
|
|
949
|
+
"raw" => {}
|
|
950
|
+
}
|
|
951
|
+
engines[version.engine]['versions'] << version.engine_version
|
|
952
|
+
engines[version.engine]['families'] << version.db_parameter_group_family
|
|
953
|
+
engines[version.engine]['raw'][version.engine_version] = version
|
|
954
|
+
[:supports_read_replica, :supports_log_exports_to_cloudwatch_logs].each { |feature|
|
|
955
|
+
if version.respond_to?(feature) and version.send(feature) == true
|
|
956
|
+
engines[version.engine]['features'][version.engine_version] ||= []
|
|
957
|
+
engines[version.engine]['features'][version.engine_version] << feature
|
|
958
|
+
end
|
|
959
|
+
}
|
|
960
|
+
|
|
961
|
+
}
|
|
962
|
+
engines.each_key { |e|
|
|
963
|
+
engines[e]["versions"].uniq!
|
|
964
|
+
engines[e]["versions"].sort! { |a, b| MU.version_sort(a, b) }
|
|
965
|
+
engines[e]["families"].uniq!
|
|
966
|
+
}
|
|
967
|
+
|
|
968
|
+
else
|
|
969
|
+
MU.log "Failed to get list of valid RDS engine versions in #{db['region']}, proceeding without proper validation", MU::WARN
|
|
970
|
+
end
|
|
971
|
+
|
|
972
|
+
@@engine_cache[credentials][region] = engines
|
|
973
|
+
return engine ? @@engine_cache[credentials][region][engine] : @@engine_cache[credentials][region]
|
|
974
|
+
end
|
|
975
|
+
private_class_method :get_supported_engines
|
|
976
|
+
|
|
977
|
+
# Make sure any source database/cluster/snapshot we've asked for exists
|
|
978
|
+
# and is valid.
|
|
979
|
+
def self.validate_source_data(db)
|
|
980
|
+
ok = true
|
|
981
|
+
|
|
982
|
+
if db['creation_style'] == "existing_snapshot" and
|
|
983
|
+
!db['create_cluster'] and
|
|
984
|
+
db['source'] and db["source"]["id"] and db['source']["id"].match(/:cluster-snapshot:/)
|
|
985
|
+
MU.log "Database #{db['name']}: Existing snapshot #{db["source"]["id"]} looks like a cluster snapshot, but create_cluster is not set. Add 'create_cluster: true' if you're building an RDS cluster.", MU::ERR
|
|
986
|
+
ok = false
|
|
987
|
+
elsif db["creation_style"] == "existing" or db["creation_style"] == "new_snapshot"
|
|
988
|
+
begin
|
|
989
|
+
MU::Cloud::AWS.rds(region: db['region']).describe_db_instances(
|
|
990
|
+
db_instance_identifier: db['source']['id']
|
|
991
|
+
)
|
|
992
|
+
rescue Aws::RDS::Errors::DBInstanceNotFound
|
|
993
|
+
MU.log "Source database was specified for #{db['name']}, but no such database exists in #{db['region']}", MU::ERR, db['source']
|
|
994
|
+
ok = false
|
|
995
|
+
end
|
|
996
|
+
end
|
|
997
|
+
|
|
998
|
+
ok
|
|
999
|
+
end
|
|
1000
|
+
private_class_method :validate_source_data
|
|
1001
|
+
|
|
1002
|
+
def self.validate_master_password(db)
|
|
1003
|
+
maxlen = case db['engine']
|
|
1004
|
+
when "mariadb", "mysql"
|
|
1005
|
+
41
|
|
1006
|
+
when "postgresql"
|
|
1007
|
+
41
|
|
1008
|
+
when /oracle/
|
|
1009
|
+
30
|
|
1010
|
+
when /sqlserver/
|
|
1011
|
+
128
|
|
1012
|
+
else
|
|
1013
|
+
return true
|
|
1014
|
+
end
|
|
1015
|
+
|
|
1016
|
+
pw = if !db['password'].nil?
|
|
1017
|
+
db['password']
|
|
1018
|
+
elsif db['auth_vault'] and !db['auth_vault'].empty?
|
|
1019
|
+
groomclass = MU::Groomer.loadGroomer(db['groomer'])
|
|
1020
|
+
pw = groomclass.getSecret(
|
|
1021
|
+
vault: db['auth_vault']['vault'],
|
|
1022
|
+
item: db['auth_vault']['item'],
|
|
1023
|
+
field: db['auth_vault']['password_field']
|
|
1024
|
+
)
|
|
1025
|
+
return true if pw.nil?
|
|
1026
|
+
pw
|
|
1027
|
+
end
|
|
1028
|
+
|
|
1029
|
+
if pw and (pw.length < 8 or pw.match(/[\/\\@\s]/) or pw.length > maxlen)
|
|
1030
|
+
MU.log "Database password specified in 'password' or 'auth_vault' doesn't meet RDS requirements. Must be between 8 and #{maxlen} chars and have only ASCII characters other than /, @, \", or [space].", MU::ERR
|
|
1031
|
+
return false
|
|
1032
|
+
end
|
|
1033
|
+
|
|
1034
|
+
true
|
|
1035
|
+
end
|
|
1036
|
+
private_class_method :validate_master_password
|
|
1037
|
+
|
|
1038
|
+
# Cloud-specific pre-processing of {MU::Config::BasketofKittens::databases}, bare and unvalidated.
|
|
1039
|
+
# @param db [Hash]: The resource to process and validate
|
|
1040
|
+
# @param _configurator [MU::Config]: The overall deployment configurator of which this resource is a ember
|
|
1041
|
+
# @return [Boolean]: True if validation succeeded, False otherwise
|
|
1042
|
+
def self.validateConfig(db, _configurator)
|
|
1043
|
+
ok = true
|
|
1044
|
+
|
|
1045
|
+
ok = false if !validate_source_data(db)
|
|
1046
|
+
|
|
1047
|
+
ok = false if !validate_engine(db)
|
|
1048
|
+
|
|
1049
|
+
ok = false if !valid_read_replica?(db)
|
|
1050
|
+
|
|
1051
|
+
ok = false if !valid_cloudwatch_logs?(db)
|
|
1052
|
+
|
|
1053
|
+
db["license_model"] ||=
|
|
1054
|
+
if ["postgres", "postgresql", "aurora-postgresql"].include?(db["engine"])
|
|
1055
|
+
"postgresql-license"
|
|
1056
|
+
elsif ["mysql", "mariadb"].include?(db["engine"])
|
|
1057
|
+
"general-public-license"
|
|
1058
|
+
else
|
|
1059
|
+
"license-included"
|
|
1060
|
+
end
|
|
1061
|
+
|
|
1062
|
+
ok = false if !validate_master_password(db)
|
|
1063
|
+
|
|
1064
|
+
if db["multi_az_on_create"] and db["multi_az_on_deploy"]
|
|
1065
|
+
MU.log "Both of multi_az_on_create and multi_az_on_deploy cannot be true", MU::ERR
|
|
1066
|
+
ok = false
|
|
1067
|
+
end
|
|
1068
|
+
|
|
1069
|
+
if (db["db_parameter_group_parameters"] or db["cluster_parameter_group_parameters"]) and db["parameter_group_family"].nil?
|
|
1070
|
+
engine = get_supported_engines(db['region'], db['credentials'], engine: db['engine'])
|
|
1071
|
+
db["parameter_group_family"] = engine['raw'][db['engine_version']].db_parameter_group_family
|
|
1072
|
+
end
|
|
1073
|
+
|
|
1074
|
+
# Adding rules for Database instance storage. This varies depending on storage type and database type.
|
|
1075
|
+
if !db["storage"].nil? and !db["create_cluster"] and !db["add_cluster_node"] and !STORAGE_RANGES[db["storage_type"]][db['engine']].include?(db["storage"])
|
|
1076
|
+
MU.log "Database storage size is set to #{db["storage"]}. #{db["engine"]} only supports storage sizes from #{STORAGE_RANGES[db["storage_type"]][db['engine']]} GB for #{db["storage_type"]} volumes.", MU::ERR
|
|
1077
|
+
ok = false
|
|
1078
|
+
end
|
|
1079
|
+
|
|
1080
|
+
ok = false if !validate_network_cfg(db)
|
|
1081
|
+
|
|
1082
|
+
ok
|
|
1083
|
+
end
|
|
1084
|
+
|
|
1085
|
+
private
|
|
1086
|
+
|
|
1087
|
+
def genericParams
|
|
1088
|
+
params = if @config['create_cluster']
|
|
1089
|
+
paramhash = {
|
|
1090
|
+
db_cluster_identifier: @cloud_id,
|
|
1091
|
+
engine: @config["engine"],
|
|
1092
|
+
vpc_security_group_ids: @config["vpc_security_group_ids"],
|
|
1093
|
+
tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } }
|
|
1094
|
+
}
|
|
1095
|
+
|
|
1096
|
+
if @vpc and @config["subnet_group_name"]
|
|
1097
|
+
paramhash[:db_subnet_group_name] = @config["subnet_group_name"]
|
|
1098
|
+
end
|
|
1099
|
+
|
|
1100
|
+
if @config['cloudwatch_logs']
|
|
1101
|
+
paramhash[:enable_cloudwatch_logs_exports ] = @config['cloudwatch_logs']
|
|
1102
|
+
end
|
|
1103
|
+
if @config['cluster_mode']
|
|
1104
|
+
paramhash[:engine_mode] = @config['cluster_mode']
|
|
1105
|
+
if @config['cluster_mode'] == "serverless"
|
|
1106
|
+
paramhash[:scaling_configuration] = {
|
|
1107
|
+
:auto_pause => @config['serverless_scaling']['auto_pause'],
|
|
1108
|
+
:min_capacity => @config['serverless_scaling']['min_capacity'],
|
|
1109
|
+
:max_capacity => @config['serverless_scaling']['max_capacity'],
|
|
1110
|
+
:seconds_until_auto_pause => @config['serverless_scaling']['seconds_until_auto_pause']
|
|
1111
|
+
}
|
|
1112
|
+
end
|
|
1113
|
+
end
|
|
1114
|
+
paramhash
|
|
1115
|
+
else
|
|
1116
|
+
{
|
|
1117
|
+
db_instance_identifier: @cloud_id,
|
|
1118
|
+
db_instance_class: @config["size"],
|
|
1119
|
+
engine: @config["engine"],
|
|
1120
|
+
auto_minor_version_upgrade: @config["auto_minor_version_upgrade"],
|
|
1121
|
+
license_model: @config["license_model"],
|
|
1122
|
+
db_subnet_group_name: @config["subnet_group_name"],
|
|
1123
|
+
vpc_security_group_ids: @config["vpc_security_group_ids"],
|
|
1124
|
+
publicly_accessible: @config["publicly_accessible"],
|
|
1125
|
+
copy_tags_to_snapshot: true,
|
|
1126
|
+
tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } }
|
|
1127
|
+
}
|
|
1128
|
+
end
|
|
1129
|
+
|
|
1130
|
+
if %w{existing_snapshot new_snapshot}.include?(@config["creation_style"])
|
|
1131
|
+
if @config['create_cluster']
|
|
1132
|
+
params[:snapshot_identifier] = @config["snapshot_id"]
|
|
1133
|
+
else
|
|
1134
|
+
params[:db_snapshot_identifier] = @config["snapshot_id"]
|
|
1135
|
+
end
|
|
1136
|
+
end
|
|
1137
|
+
|
|
1138
|
+
params
|
|
1139
|
+
end
|
|
1140
|
+
|
|
1141
|
+
|
|
1142
|
+
def self.validate_network_cfg(db)
|
|
1143
|
+
ok = true
|
|
1144
|
+
|
|
1145
|
+
if !db['vpc']
|
|
1146
|
+
db["vpc"] = MU::Cloud.resourceClass("AWS", "VPC").defaultVpc(db['region'], db['credentials'])
|
|
1147
|
+
if db['vpc'] and !(db['engine'].match(/sqlserver/) and db['create_read_replica'])
|
|
1148
|
+
MU.log "Using default VPC for database '#{db['name']}; this sets 'publicly_accessible' to true.", MU::WARN
|
|
1149
|
+
db['publicly_accessible'] = true
|
|
1150
|
+
end
|
|
1151
|
+
else
|
|
1152
|
+
if db["vpc"]["subnet_pref"] == "all_public" and !db['publicly_accessible'] and (db["vpc"]['subnets'].nil? or db["vpc"]['subnets'].empty?)
|
|
1153
|
+
MU.log "Setting publicly_accessible to true on database '#{db['name']}', since deploying into public subnets.", MU::WARN
|
|
1154
|
+
db['publicly_accessible'] = true
|
|
1155
|
+
elsif db["vpc"]["subnet_pref"] == "all_private" and db['publicly_accessible']
|
|
1156
|
+
MU.log "Setting publicly_accessible to false on database '#{db['name']}', since deploying into private subnets.", MU::NOTICE
|
|
1157
|
+
db['publicly_accessible'] = false
|
|
1158
|
+
end
|
|
1159
|
+
if db['engine'].match(/sqlserver/) and db['create_read_replica']
|
|
1160
|
+
MU.log "SQL Server does not support read replicas in VPC deployments", MU::ERR
|
|
1161
|
+
ok = false
|
|
1162
|
+
end
|
|
1163
|
+
end
|
|
1164
|
+
|
|
1165
|
+
ok
|
|
1166
|
+
end
|
|
1167
|
+
private_class_method :validate_network_cfg
|
|
1168
|
+
|
|
1169
|
+
def self.valid_read_replica?(db)
|
|
1170
|
+
if !db['create_read_replica'] and !db['read_replica_of']
|
|
1171
|
+
return true
|
|
1172
|
+
end
|
|
1173
|
+
|
|
1174
|
+
engine = get_supported_engines(db['region'], db['credentials'], engine: db['engine'])
|
|
1175
|
+
if engine.nil? or !engine['features'] or !engine['features'][db['engine_version']]
|
|
1176
|
+
return true # we can't be sure, so let the API sort it out later
|
|
1177
|
+
end
|
|
1178
|
+
|
|
1179
|
+
if !engine['features'][db['engine_version']].include?(:supports_read_replica)
|
|
1180
|
+
MU.log "Engine #{db['engine']} #{db['engine_version']} does not appear to support read replicas", MU::ERR
|
|
1181
|
+
return false
|
|
1182
|
+
end
|
|
1183
|
+
true
|
|
1184
|
+
end
|
|
1185
|
+
private_class_method :valid_read_replica?
|
|
1186
|
+
|
|
1187
|
+
def self.valid_cloudwatch_logs?(db)
|
|
1188
|
+
return true if !db['cloudwatch_logs']
|
|
1189
|
+
engine = get_supported_engines(db['region'], db['credentials'], engine: db['engine'])
|
|
1190
|
+
if engine.nil? or !engine['features'] or !engine['features'][db['engine_version']] or !engine['features'][db['engine_version']].include?(:supports_read_replica)
|
|
1191
|
+
MU.log "CloudWatch Logs not supported for #{db['engine']} #{db['engine_version']}", MU::ERR
|
|
1192
|
+
return false
|
|
1193
|
+
end
|
|
1194
|
+
|
|
1195
|
+
ok = true
|
|
1196
|
+
db['cloudwatch_logs'].each { |logtype|
|
|
1197
|
+
if !engine['raw'][db['engine_version']].exportable_log_types.include?(logtype)
|
|
1198
|
+
ok = false
|
|
1199
|
+
MU.log "CloudWatch Log type #{logtype} is not valid for #{db['engine']} #{db['engine_version']}. List of valid types:", MU::ERR, details: engine['raw'][db['engine_version']].exportable_log_types
|
|
1200
|
+
end
|
|
1201
|
+
}
|
|
1202
|
+
|
|
1203
|
+
ok
|
|
1204
|
+
end
|
|
1205
|
+
private_class_method :valid_cloudwatch_logs?
|
|
1206
|
+
|
|
1207
|
+
def self.validate_engine(db)
|
|
1208
|
+
ok = true
|
|
1209
|
+
|
|
1210
|
+
if db['create_cluster'] or db["member_of_cluster"] or db["add_cluster_node"] or (db['engine'] and db['engine'].match(/aurora/))
|
|
1211
|
+
case db['engine']
|
|
1212
|
+
when "mysql", "aurora", "aurora-mysql"
|
|
1213
|
+
if (db['engine_version'] and db["engine_version"].match(/^5\.6/)) or db["cluster_mode"] == "serverless"
|
|
1214
|
+
db["engine"] = "aurora"
|
|
1215
|
+
db["engine_version"] = "5.6"
|
|
1216
|
+
db['publicly_accessible'] = false
|
|
1217
|
+
else
|
|
1218
|
+
db["engine"] = "aurora-mysql"
|
|
1219
|
+
end
|
|
1220
|
+
when /postgres/
|
|
1221
|
+
db["engine"] = "aurora-postgresql"
|
|
1222
|
+
else
|
|
1223
|
+
ok = false
|
|
1224
|
+
MU.log "#{db['engine']} is not supported for clustering", MU::ERR
|
|
1225
|
+
end
|
|
1226
|
+
db["create_cluster"] = true if !(db["member_of_cluster"] or db["add_cluster_node"])
|
|
1227
|
+
end
|
|
1228
|
+
|
|
1229
|
+
db["engine"] = "oracle-se2" if db["engine"] == "oracle"
|
|
1230
|
+
db["engine"] = "sqlserver-ex" if db["engine"] == "sqlserver"
|
|
1231
|
+
|
|
1232
|
+
engine_cfg = get_supported_engines(db['region'], db['credentials'], engine: db['engine'])
|
|
1233
|
+
|
|
1234
|
+
if !engine_cfg or engine_cfg['versions'].empty? or engine_cfg['families'].empty?
|
|
1235
|
+
MU.log "RDS engine #{db['engine']} reports no supported versions in #{db['region']}", MU::ERR, details: engine_cfg
|
|
1236
|
+
return false
|
|
1237
|
+
end
|
|
1238
|
+
|
|
1239
|
+
# Resolve or default our engine version to something reasonable
|
|
1240
|
+
db['engine_version'] ||= engine_cfg['versions'].last
|
|
1241
|
+
if !engine_cfg['versions'].include?(db["engine_version"])
|
|
1242
|
+
db['engine_version'] = engine_cfg['versions'].grep(/^#{Regexp.quote(db["engine_version"])}/).last
|
|
1243
|
+
end
|
|
1244
|
+
if !engine_cfg['versions'].include?(db["engine_version"])
|
|
1245
|
+
MU.log "RDS engine '#{db['engine']}' version '#{db['engine_version']}' is not supported in #{db['region']}", MU::ERR, details: { "Known-good versions:" => engine_cfg['versions'].uniq.sort }
|
|
1246
|
+
ok = false
|
|
1247
|
+
end
|
|
1248
|
+
|
|
1249
|
+
if db["parameter_group_family"] and
|
|
1250
|
+
!engine_cfg['families'].include?(db['parameter_group_family'])
|
|
1251
|
+
MU.log "RDS engine '#{db['engine']}' parameter group family '#{db['parameter_group_family']}' is not supported.", MU::ERR, details: engine_cfg['families'].uniq.sort
|
|
1252
|
+
ok = false
|
|
1253
|
+
end
|
|
1254
|
+
|
|
1255
|
+
ok
|
|
1256
|
+
end
|
|
1257
|
+
private_class_method :validate_engine
|
|
1258
|
+
|
|
1259
|
+
def add_basic
|
|
1260
|
+
|
|
1261
|
+
getPassword
|
|
1262
|
+
if @config['source'].nil? or @config['region'] != @config['source'].region
|
|
1263
|
+
manageSubnetGroup if @vpc
|
|
1264
|
+
else
|
|
1265
|
+
MU.log "Note: Read Replicas automatically reside in the same subnet group as the source database, if they're both in the same region. This replica may not land in the VPC you intended.", MU::WARN
|
|
1266
|
+
end
|
|
1267
|
+
|
|
1268
|
+
if @config.has_key?("parameter_group_family")
|
|
1269
|
+
manageDbParameterGroup
|
|
1270
|
+
end
|
|
1271
|
+
|
|
1272
|
+
createDb
|
|
1273
|
+
end
|
|
1274
|
+
|
|
1275
|
+
|
|
1276
|
+
def add_cluster_node
|
|
1277
|
+
cluster = MU::Config::Ref.get(@config["member_of_cluster"]).kitten(@deploy)
|
|
1278
|
+
if cluster.nil? or cluster.cloud_id.nil?
|
|
1279
|
+
raise MuError.new "Failed to resolve parent cluster of #{@mu_name}", details: @config["member_of_cluster"].to_h
|
|
1280
|
+
end
|
|
1281
|
+
|
|
1282
|
+
@config['cluster_identifier'] = cluster.cloud_id.downcase
|
|
1283
|
+
|
|
1284
|
+
# We're overriding @config["subnet_group_name"] because we need each cluster member to use the cluster's subnet group instead of a unique subnet group
|
|
1285
|
+
@config["subnet_group_name"] = cluster.cloud_desc.db_subnet_group if @vpc
|
|
1286
|
+
@config["creation_style"] = "new" if @config["creation_style"] != "new"
|
|
1287
|
+
if @config.has_key?("parameter_group_family")
|
|
1288
|
+
manageDbParameterGroup
|
|
1289
|
+
end
|
|
1290
|
+
|
|
1291
|
+
createDb
|
|
1292
|
+
end
|
|
1293
|
+
|
|
1294
|
+
def basicParams
|
|
1295
|
+
params = genericParams
|
|
1296
|
+
params[:storage_encrypted] = @config["storage_encrypted"]
|
|
1297
|
+
params[:master_user_password] = @config['password']
|
|
1298
|
+
params[:engine_version] = @config["engine_version"]
|
|
1299
|
+
params[:vpc_security_group_ids] = @config["vpc_security_group_ids"]
|
|
1300
|
+
params[:preferred_maintenance_window] = @config["preferred_maintenance_window"] if @config["preferred_maintenance_window"]
|
|
1301
|
+
params[:backup_retention_period] = @config["backup_retention_period"] if @config["backup_retention_period"]
|
|
1302
|
+
|
|
1303
|
+
if @config['create_cluster']
|
|
1304
|
+
params[:database_name] = @config["db_name"]
|
|
1305
|
+
params[:db_cluster_parameter_group_name] = @config["parameter_group_name"] if @config["parameter_group_name"]
|
|
1306
|
+
else
|
|
1307
|
+
params[:enable_cloudwatch_logs_exports] = @config['cloudwatch_logs'] if @config['cloudwatch_logs'] and !@config['cloudwatch_logs'].empty?
|
|
1308
|
+
params[:db_name] = @config["db_name"] if !@config['add_cluster_node']
|
|
1309
|
+
params[:db_parameter_group_name] = @config["parameter_group_name"] if @config["parameter_group_name"]
|
|
1310
|
+
end
|
|
1311
|
+
|
|
1312
|
+
if @config['create_cluster'] or @config['add_cluster_node']
|
|
1313
|
+
params[:db_cluster_identifier] = @config["cluster_identifier"]
|
|
1314
|
+
else
|
|
1315
|
+
params[:storage_type] = @config["storage_type"]
|
|
1316
|
+
params[:allocated_storage] = @config["storage"]
|
|
1317
|
+
params[:multi_az] = @config['multi_az_on_create']
|
|
1318
|
+
end
|
|
1319
|
+
|
|
1320
|
+
noun = @config['create_cluster'] ? "cluster" : "instance"
|
|
1321
|
+
|
|
1322
|
+
if noun == "cluster" or !params[:db_cluster_identifier]
|
|
1323
|
+
params[:backup_retention_period] = @config["backup_retention_period"]
|
|
1324
|
+
params[:preferred_backup_window] = @config["preferred_backup_window"]
|
|
1325
|
+
params[:master_username] = @config['master_user']
|
|
1326
|
+
params[:port] = @config["port"] if @config["port"]
|
|
1327
|
+
params[:iops] = @config["iops"] if @config['storage_type'] == "io1"
|
|
1328
|
+
end
|
|
1329
|
+
|
|
1330
|
+
params
|
|
1331
|
+
end
|
|
1332
|
+
|
|
1333
|
+
# creation_style = new, existing, new_snapshot, existing_snapshot
|
|
1334
|
+
def create_basic
|
|
1335
|
+
params = basicParams
|
|
1336
|
+
|
|
1337
|
+
clean_parent_opts = Proc.new {
|
|
1338
|
+
[:storage_encrypted, :master_user_password, :engine_version, :allocated_storage, :backup_retention_period, :preferred_backup_window, :master_username, :db_name, :database_name].each { |p| params.delete(p) }
|
|
1339
|
+
}
|
|
1340
|
+
|
|
1341
|
+
noun = @config["create_cluster"] ? "cluster" : "instance"
|
|
1342
|
+
|
|
1343
|
+
MU.retrier([Aws::RDS::Errors::InvalidParameterValue, Aws::RDS::Errors::DBSubnetGroupNotFoundFault], max: 10, wait: 15) {
|
|
1344
|
+
if %w{existing_snapshot new_snapshot}.include?(@config["creation_style"])
|
|
1345
|
+
clean_parent_opts.call
|
|
1346
|
+
MU.log "Creating database #{noun} #{@cloud_id} from snapshot #{@config["snapshot_id"]}"
|
|
1347
|
+
MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).send("restore_db_#{noun}_from_#{noun == "instance" ? "db_" : ""}snapshot".to_sym, params)
|
|
1348
|
+
else
|
|
1349
|
+
clean_parent_opts.call if noun == "instance" and params[:db_cluster_identifier]
|
|
1350
|
+
MU.log "Creating pristine database #{noun} #{@cloud_id} (#{@config['name']}) in #{@config['region']}", MU::NOTICE, details: params
|
|
1351
|
+
MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).send("create_db_#{noun}".to_sym, params)
|
|
1352
|
+
end
|
|
1353
|
+
}
|
|
1354
|
+
end
|
|
1355
|
+
|
|
1356
|
+
# creation_style = point_in_time
|
|
1357
|
+
def create_point_in_time
|
|
1358
|
+
@config["source"].kitten(@deploy)
|
|
1359
|
+
if !@config["source"].id
|
|
1360
|
+
raise MuError.new "Database '#{@config['name']}' couldn't resolve cloud id for source database", details: @config["source"].to_h
|
|
1361
|
+
end
|
|
1362
|
+
|
|
1363
|
+
params = genericParams
|
|
1364
|
+
params.delete(:db_instance_identifier)
|
|
1365
|
+
if @config['create_cluster']
|
|
1366
|
+
params[:source_db_cluster_identifier] = @config["source"].id
|
|
1367
|
+
params[:restore_to_time] = @config["restore_time"] unless @config["restore_time"] == "latest"
|
|
1368
|
+
else
|
|
1369
|
+
params[:source_db_instance_identifier] = @config["source"].id
|
|
1370
|
+
params[:target_db_instance_identifier] = @cloud_id
|
|
1371
|
+
end
|
|
1372
|
+
params[:restore_time] = @config['restore_time'] unless @config["restore_time"] == "latest"
|
|
1373
|
+
params[:use_latest_restorable_time] = true if @config['restore_time'] == "latest"
|
|
1374
|
+
|
|
1375
|
+
|
|
1376
|
+
MU.retrier([Aws::RDS::Errors::InvalidParameterValue], max: 15, wait: 20) {
|
|
1377
|
+
MU.log "Creating database #{@config['create_cluster'] ? "cluster" : "instance" } #{@cloud_id} based on point in time backup '#{@config['restore_time']}' of #{@config['source'].id}"
|
|
1378
|
+
MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).send("restore_db_#{@config['create_cluster'] ? "cluster" : "instance"}_to_point_in_time".to_sym, params)
|
|
1379
|
+
}
|
|
1380
|
+
end
|
|
1381
|
+
|
|
1382
|
+
# creation_style = new, existing and read_replica_of is not nil
|
|
1383
|
+
def create_read_replica
|
|
1384
|
+
@config["source"].kitten(@deploy)
|
|
1385
|
+
if !@config["source"].id
|
|
1386
|
+
raise MuError.new "Database '#{@config['name']}' couldn't resolve cloud id for source database", details: @config["source"].to_h
|
|
1387
|
+
end
|
|
1388
|
+
|
|
1389
|
+
params = {
|
|
1390
|
+
db_instance_identifier: @cloud_id,
|
|
1391
|
+
source_db_instance_identifier: @config["source"].id,
|
|
1392
|
+
db_instance_class: @config["size"],
|
|
1393
|
+
auto_minor_version_upgrade: @config["auto_minor_version_upgrade"],
|
|
1394
|
+
publicly_accessible: @config["publicly_accessible"],
|
|
1395
|
+
tags: @tags.each_key.map { |k| { :key => k, :value => @tags[k] } },
|
|
1396
|
+
db_subnet_group_name: @config["subnet_group_name"],
|
|
1397
|
+
storage_type: @config["storage_type"]
|
|
1398
|
+
}
|
|
1399
|
+
if @config["source"].region and @config['region'] != @config["source"].region
|
|
1400
|
+
params[:source_db_instance_identifier] = MU::Cloud::AWS::Database.getARN(@config["source"].id, "db", "rds", region: @config["source"].region, credentials: @config['credentials'])
|
|
1401
|
+
end
|
|
1402
|
+
|
|
1403
|
+
params[:port] = @config["port"] if @config["port"]
|
|
1404
|
+
params[:iops] = @config["iops"] if @config['storage_type'] == "io1"
|
|
1405
|
+
|
|
1406
|
+
on_retry = Proc.new { |e|
|
|
1407
|
+
if e.class == Aws::RDS::Errors::DBSubnetGroupNotAllowedFault
|
|
1408
|
+
MU.log "Being forced to use source database's subnet group: #{e.message}", MU::WARN
|
|
1409
|
+
params.delete(:db_subnet_group_name)
|
|
1410
|
+
end
|
|
1411
|
+
}
|
|
1412
|
+
|
|
1413
|
+
MU.retrier([Aws::RDS::Errors::InvalidDBInstanceState, Aws::RDS::Errors::InvalidParameterValue, Aws::RDS::Errors::DBSubnetGroupNotAllowedFault], max: 10, wait: 30, on_retry: on_retry) {
|
|
1414
|
+
MU.log "Creating read replica database instance #{@cloud_id} for #{@config['source'].id}"
|
|
1415
|
+
MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).create_db_instance_read_replica(params)
|
|
1416
|
+
}
|
|
1417
|
+
end
|
|
1418
|
+
|
|
1419
|
+
# Sit on our hands until we show as available
|
|
1420
|
+
def wait_until_available
|
|
1421
|
+
loop_if = if @config["create_cluster"]
|
|
1422
|
+
Proc.new { cloud_desc(use_cache: false).status != "available" }
|
|
1423
|
+
else
|
|
1424
|
+
Proc.new { cloud_desc(use_cache: false).db_instance_status != "available" }
|
|
1425
|
+
end
|
|
1426
|
+
MU.retrier(wait: 10, max: 360, loop_if: loop_if) { |retries, _wait|
|
|
1427
|
+
if retries > 0 and retries % 20 == 0
|
|
1428
|
+
MU.log "Waiting for RDS #{@config['create_cluster'] ? "cluster" : "database" } #{@cloud_id} to be ready...", MU::NOTICE
|
|
1429
|
+
end
|
|
1430
|
+
}
|
|
1431
|
+
end
|
|
1432
|
+
|
|
1433
|
+
def do_naming
|
|
1434
|
+
if @config["create_cluster"]
|
|
1435
|
+
MU::Cloud.resourceClass("AWS", "DNSZone").genericMuDNSEntry(name: cloud_desc.db_cluster_identifier, target: "#{cloud_desc.endpoint}.", cloudclass: MU::Cloud::Database, sync_wait: @config['dns_sync_wait'])
|
|
1436
|
+
MU.log "Database cluster #{@config['name']} is at #{cloud_desc.endpoint}", MU::SUMMARY
|
|
1437
|
+
else
|
|
1438
|
+
MU::Cloud.resourceClass("AWS", "DNSZone").genericMuDNSEntry(name: cloud_desc.db_instance_identifier, target: "#{cloud_desc.endpoint.address}.", cloudclass: MU::Cloud::Database, sync_wait: @config['dns_sync_wait'])
|
|
1439
|
+
MU.log "Database #{@config['name']} is at #{cloud_desc.endpoint.address}", MU::SUMMARY
|
|
1440
|
+
end
|
|
1441
|
+
if @config['auth_vault']
|
|
1442
|
+
MU.log "knife vault show #{@config['auth_vault']['vault']} #{@config['auth_vault']['item']} for Database #{@config['name']} credentials", MU::SUMMARY
|
|
1443
|
+
end
|
|
1444
|
+
end
|
|
1445
|
+
|
|
1446
|
+
# Create a plain database instance or read replica, as described in our
|
|
1447
|
+
# +@config+.
|
|
1448
|
+
# @return [String]: The cloud provider's identifier for this database instance.
|
|
1449
|
+
def createDb
|
|
1450
|
+
|
|
1451
|
+
if @config['creation_style'] == "point_in_time"
|
|
1452
|
+
create_point_in_time
|
|
1453
|
+
elsif @config['read_replica_of']
|
|
1454
|
+
create_read_replica
|
|
1455
|
+
else
|
|
1456
|
+
create_basic
|
|
1457
|
+
end
|
|
1458
|
+
|
|
1459
|
+
wait_until_available
|
|
1460
|
+
do_naming
|
|
1461
|
+
|
|
1462
|
+
# If referencing an existing DB, insert this deploy's DB security group so it can access the thing
|
|
1463
|
+
if @config["creation_style"] == 'existing'
|
|
1464
|
+
mod_config = {}
|
|
1465
|
+
mod_config[:db_instance_identifier] = @cloud_id
|
|
1466
|
+
mod_config[:vpc_security_group_ids] = cloud_desc.vpc_security_groups.map { |sg| sg.vpc_security_group_id }
|
|
1467
|
+
|
|
1468
|
+
localdeploy_rule = @deploy.findLitterMate(type: "firewall_rule", name: "database"+@config['name'])
|
|
1469
|
+
if localdeploy_rule.nil?
|
|
1470
|
+
raise MU::MuError, "Database #{@config['name']} failed to find its generic security group 'database#{@config['name']}'"
|
|
1471
|
+
end
|
|
1472
|
+
mod_config[:vpc_security_group_ids] << localdeploy_rule.cloud_id
|
|
1473
|
+
|
|
1474
|
+
MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_instance(mod_config)
|
|
1475
|
+
MU.log "Modified database #{@cloud_id} with new security groups: #{mod_config}", MU::NOTICE
|
|
1476
|
+
end
|
|
1477
|
+
|
|
1478
|
+
# When creating from a snapshot or replicating an existing database,
|
|
1479
|
+
# some of the create arguments that we'd want to carry over aren't
|
|
1480
|
+
# applicable- but we can apply them after the fact with a modify.
|
|
1481
|
+
if %w{existing_snapshot new_snapshot point_in_time}.include?(@config["creation_style"]) or @config["read_replica_of"]
|
|
1482
|
+
mod_config = {
|
|
1483
|
+
db_instance_identifier: @cloud_id,
|
|
1484
|
+
apply_immediately: true
|
|
1485
|
+
}
|
|
1486
|
+
if !@config["read_replica_of"] or @config['region'] == @config['source'].region
|
|
1487
|
+
mod_config[:vpc_security_group_ids] = @config["vpc_security_group_ids"]
|
|
1488
|
+
end
|
|
1489
|
+
|
|
1490
|
+
if !@config["read_replica_of"]
|
|
1491
|
+
mod_config[:preferred_backup_window] = @config["preferred_backup_window"]
|
|
1492
|
+
mod_config[:backup_retention_period] = @config["backup_retention_period"]
|
|
1493
|
+
mod_config[:engine_version] = @config["engine_version"]
|
|
1494
|
+
mod_config[:allow_major_version_upgrade] = @config["allow_major_version_upgrade"] if @config['allow_major_version_upgrade']
|
|
1495
|
+
mod_config[:db_parameter_group_name] = @config["parameter_group_name"] if @config["parameter_group_name"]
|
|
1496
|
+
mod_config[:master_user_password] = @config['password']
|
|
1497
|
+
mod_config[:allocated_storage] = @config["storage"] if @config["storage"]
|
|
1498
|
+
end
|
|
1499
|
+
if @config["preferred_maintenance_window"]
|
|
1500
|
+
mod_config[:preferred_maintenance_window] = @config["preferred_maintenance_window"]
|
|
1501
|
+
end
|
|
1502
|
+
|
|
1503
|
+
MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_instance(mod_config)
|
|
1504
|
+
wait_until_available
|
|
1505
|
+
end
|
|
1506
|
+
|
|
1507
|
+
# Maybe wait for DB instance to be in available state. DB should still be writeable at this state
|
|
1508
|
+
if @config['allow_major_version_upgrade'] && @config["creation_style"] == "new"
|
|
1509
|
+
MU.log "Setting major database version upgrade on #{@cloud_id}'"
|
|
1510
|
+
|
|
1511
|
+
MU::Cloud::AWS.rds(region: @config['region'], credentials: @config['credentials']).modify_db_instance(
|
|
1512
|
+
db_instance_identifier: @cloud_id,
|
|
1513
|
+
apply_immediately: true,
|
|
1514
|
+
allow_major_version_upgrade: true
|
|
1515
|
+
)
|
|
1516
|
+
end
|
|
1517
|
+
|
|
1518
|
+
MU.log "Database #{@config['name']} (#{@mu_name}) is ready to use"
|
|
1519
|
+
@cloud_id
|
|
1520
|
+
end
|
|
1521
|
+
|
|
1522
|
+
def run_sql_commands
|
|
1523
|
+
MU.log "Running initial SQL commands on #{@config['name']}", details: @config['run_sql_on_deploy']
|
|
1524
|
+
|
|
1525
|
+
port = address = nil
|
|
1526
|
+
|
|
1527
|
+
if !cloud_desc.publicly_accessible and @vpc
|
|
1528
|
+
if @config['vpc']['nat_host_name']
|
|
1529
|
+
keypairname, _ssh_private_key, _ssh_public_key = @deploy.SSHKey
|
|
1530
|
+
begin
|
|
1531
|
+
gateway = Net::SSH::Gateway.new(
|
|
1532
|
+
@config['vpc']['nat_host_name'],
|
|
1533
|
+
@config['vpc']['nat_ssh_user'],
|
|
1534
|
+
:keys => [Etc.getpwuid(Process.uid).dir+"/.ssh"+"/"+keypairname],
|
|
1535
|
+
:keys_only => true,
|
|
1536
|
+
:auth_methods => ['publickey']
|
|
1537
|
+
)
|
|
1538
|
+
port = gateway.open(cloud_desc.endpoint.address, cloud_desc.endpoint.port)
|
|
1539
|
+
address = "127.0.0.1"
|
|
1540
|
+
MU.log "Tunneling #{@config['engine']} connection through #{@config['vpc']['nat_host_name']} via local port #{port}", MU::DEBUG
|
|
1541
|
+
rescue IOError => e
|
|
1542
|
+
MU.log "Got #{e.inspect} while connecting to #{@mu_name} through NAT #{@config['vpc']['nat_host_name']}", MU::ERR
|
|
1543
|
+
return
|
|
1544
|
+
end
|
|
1545
|
+
else
|
|
1546
|
+
MU.log "Can't run initial SQL commands! Database #{@mu_name} is not publicly accessible, but we have no NAT host for connecting to it", MU::WARN, details: @config['run_sql_on_deploy']
|
|
1547
|
+
return
|
|
1548
|
+
end
|
|
1549
|
+
else
|
|
1550
|
+
port = database.endpoint.port
|
|
1551
|
+
address = database.endpoint.address
|
|
1552
|
+
end
|
|
1553
|
+
|
|
1554
|
+
# Running SQL on deploy
|
|
1555
|
+
if @config['engine'] =~ /postgres/
|
|
1556
|
+
MU::Cloud::AWS::Database.run_sql_postgres(address, port, @config['master_user'], @config['password'], cloud_desc.db_name, @config['run_sql_on_deploy'], @config['name'])
|
|
1557
|
+
elsif @config['engine'] =~ /mysql|maria/
|
|
1558
|
+
MU::Cloud::AWS::Database.run_sql_mysql(address, port, @config['master_user'], @config['password'], cloud_desc.db_name, @config['run_sql_on_deploy'], @config['name'])
|
|
1559
|
+
end
|
|
1560
|
+
|
|
1561
|
+
# close the SQL on deploy sessions
|
|
1562
|
+
if !cloud_desc.publicly_accessible
|
|
1563
|
+
begin
|
|
1564
|
+
gateway.close(port)
|
|
1565
|
+
rescue IOError => e
|
|
1566
|
+
MU.log "Failed to close ssh session to NAT after running sql_on_deploy", MU::ERR, details: e.inspect
|
|
1567
|
+
end
|
|
1568
|
+
end
|
|
1569
|
+
end
|
|
1570
|
+
|
|
1571
|
+
def self.run_sql_postgres(address, port, user, password, db, cmds = [], identifier = nil)
|
|
1572
|
+
identifier ||= address
|
|
1573
|
+
MU.log "Initiating postgres connection to #{address}:#{port} as #{user}"
|
|
1574
|
+
autoload :PG, 'pg'
|
|
1575
|
+
begin
|
|
1576
|
+
conn = PG::Connection.new(
|
|
1577
|
+
:host => address,
|
|
1578
|
+
:port => port,
|
|
1579
|
+
:user => user,
|
|
1580
|
+
:password => password,
|
|
1581
|
+
:dbname => db
|
|
1582
|
+
)
|
|
1583
|
+
cmds.each { |cmd|
|
|
1584
|
+
MU.log "Running #{cmd} on database #{identifier}"
|
|
1585
|
+
conn.exec(cmd)
|
|
1586
|
+
}
|
|
1587
|
+
conn.finish
|
|
1588
|
+
rescue PG::Error => e
|
|
1589
|
+
MU.log "Failed to run initial SQL commands on #{identifier} via #{address}:#{port}: #{e.inspect}", MU::WARN, details: conn
|
|
1590
|
+
end
|
|
1591
|
+
end
|
|
1592
|
+
private_class_method :run_sql_postgres
|
|
1593
|
+
|
|
1594
|
+
def self.run_sql_mysql(address, port, user, password, db, cmds = [], identifier = nil)
|
|
1595
|
+
identifier ||= address
|
|
1596
|
+
autoload :Mysql, 'mysql'
|
|
1597
|
+
MU.log "Initiating mysql connection to #{address}:#{port} as #{user}"
|
|
1598
|
+
conn = Mysql.new(address, user, password, db, port)
|
|
1599
|
+
cmds.each { |cmd|
|
|
1600
|
+
MU.log "Running #{cmd} on database #{identifier}"
|
|
1601
|
+
conn.query(cmd)
|
|
1602
|
+
}
|
|
1603
|
+
conn.close
|
|
1604
|
+
end
|
|
1605
|
+
private_class_method :run_sql_mysql
|
|
1606
|
+
|
|
1607
|
+
def self.should_delete?(tags, cloud_id, ignoremaster = false, deploy_id = MU.deploy_id, master_ip = MU.mu_public_ip, known = [])
|
|
1608
|
+
|
|
1609
|
+
found_muid = false
|
|
1610
|
+
found_master = false
|
|
1611
|
+
tags.each { |tag|
|
|
1612
|
+
found_muid = true if tag.key == "MU-ID" && tag.value == deploy_id
|
|
1613
|
+
found_master = true if tag.key == "MU-MASTER-IP" && tag.value == master_ip
|
|
1614
|
+
}
|
|
1615
|
+
delete =
|
|
1616
|
+
if ignoremaster && found_muid
|
|
1617
|
+
true
|
|
1618
|
+
elsif !ignoremaster && found_muid && found_master
|
|
1619
|
+
true
|
|
1620
|
+
elsif known and cloud_id and known.include?(cloud_id)
|
|
1621
|
+
true
|
|
1622
|
+
else
|
|
1623
|
+
false
|
|
1624
|
+
end
|
|
1625
|
+
delete
|
|
1626
|
+
end
|
|
1627
|
+
private_class_method :should_delete?
|
|
1628
|
+
|
|
1629
|
+
# Remove an RDS database and associated artifacts
|
|
1630
|
+
# @param db [OpenStruct]: The cloud provider's description of the database artifact
|
|
1631
|
+
# @return [void]
|
|
1632
|
+
def self.terminate_rds_instance(db, noop: false, skipsnapshots: false, region: MU.curRegion, deploy_id: MU.deploy_id, mu_name: nil, cloud_id: nil, credentials: nil, cluster: false, known: [])
|
|
1633
|
+
db ||= MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region, credentials: credentials, cluster: cluster).values.first if cloud_id
|
|
1634
|
+
db_obj ||= MU::MommaCat.findStray(
|
|
1635
|
+
"AWS",
|
|
1636
|
+
"database",
|
|
1637
|
+
region: region,
|
|
1638
|
+
deploy_id: deploy_id,
|
|
1639
|
+
cloud_id: cloud_id,
|
|
1640
|
+
mu_name: mu_name,
|
|
1641
|
+
dummy_ok: true
|
|
1642
|
+
).first
|
|
1643
|
+
if db_obj
|
|
1644
|
+
cloud_id ||= db_obj.cloud_id
|
|
1645
|
+
db ||= db_obj.cloud_desc
|
|
1646
|
+
["parameter_group_name", "subnet_group_name"].each { |attr|
|
|
1647
|
+
if db_obj.config[attr]
|
|
1648
|
+
known ||= []
|
|
1649
|
+
known << db_obj.config[attr]
|
|
1650
|
+
end
|
|
1651
|
+
}
|
|
1652
|
+
end
|
|
1653
|
+
|
|
1654
|
+
raise MuError, "terminate_rds_instance requires a non-nil database descriptor (#{cloud_id})" if db.nil? or cloud_id.nil?
|
|
1655
|
+
|
|
1656
|
+
MU.retrier([], wait: 60, loop_if: Proc.new { %w{creating modifying backing-up}.include?(cluster ? db.status : db.db_instance_status) }) {
|
|
1657
|
+
db = MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region, credentials: credentials, cluster: cluster).values.first
|
|
1658
|
+
return if db.nil?
|
|
1659
|
+
}
|
|
1660
|
+
|
|
1661
|
+
MU::Cloud.resourceClass("AWS", "DNSZone").genericMuDNSEntry(name: cloud_id, target: (cluster ? db.endpoint : db.endpoint.address), cloudclass: MU::Cloud::Database, delete: true) if !noop
|
|
1662
|
+
|
|
1663
|
+
if %w{deleting deleted}.include?(cluster ? db.status : db.db_instance_status)
|
|
1664
|
+
MU.log "#{cloud_id} has already been terminated", MU::WARN
|
|
1665
|
+
else
|
|
1666
|
+
params = cluster ? { :db_cluster_identifier => cloud_id } : { :db_instance_identifier => cloud_id }
|
|
1667
|
+
|
|
1668
|
+
if skipsnapshots or (!cluster and (db.db_cluster_identifier or db.read_replica_source_db_instance_identifier))
|
|
1669
|
+
MU.log "Terminating #{cluster ? "cluster" : "database" } #{cloud_id} (not saving final snapshot)"
|
|
1670
|
+
params[:skip_final_snapshot] = true
|
|
1671
|
+
else
|
|
1672
|
+
MU.log "Terminating #{cluster ? "cluster" : "database" } #{cloud_id} (final snapshot: #{cloud_id}-mufinal)"
|
|
1673
|
+
params[:skip_final_snapshot] = false
|
|
1674
|
+
params[:final_db_snapshot_identifier] = "#{cloud_id}-mufinal"
|
|
1675
|
+
end
|
|
1676
|
+
|
|
1677
|
+
if !noop
|
|
1678
|
+
on_retry = Proc.new { |e|
|
|
1679
|
+
if [Aws::RDS::Errors::DBSnapshotAlreadyExists, Aws::RDS::Errors::DBClusterSnapshotAlreadyExistsFault, Aws::RDS::Errors::DBClusterQuotaExceeded].include?(e.class)
|
|
1680
|
+
MU.log e.message, MU::WARN
|
|
1681
|
+
params[:skip_final_snapshot] = true
|
|
1682
|
+
params.delete(:final_db_snapshot_identifier)
|
|
1683
|
+
end
|
|
1684
|
+
}
|
|
1685
|
+
MU.retrier([Aws::RDS::Errors::InvalidDBInstanceState, Aws::RDS::Errors::DBSnapshotAlreadyExists, Aws::RDS::Errors::InvalidDBClusterStateFault], wait: 60, max: 20, on_retry: on_retry) {
|
|
1686
|
+
if !noop
|
|
1687
|
+
cluster ? MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_cluster(params) : MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_instance(params)
|
|
1688
|
+
end
|
|
1689
|
+
}
|
|
1690
|
+
del_db = nil
|
|
1691
|
+
MU.retrier([], wait: 10, ignoreme: [Aws::RDS::Errors::DBInstanceNotFound], loop_if: Proc.new { del_db and ((!cluster and del_db.db_instance_status != "deleted") or (cluster and del_db.status != "deleted")) }) {
|
|
1692
|
+
del_db = MU::Cloud::AWS::Database.find(cloud_id: cloud_id, region: region, cluster: cluster).values.first
|
|
1693
|
+
}
|
|
1694
|
+
end
|
|
1695
|
+
end
|
|
1696
|
+
|
|
1697
|
+
purge_rds_sgs(cloud_id, region, credentials, noop)
|
|
1698
|
+
|
|
1699
|
+
purge_groomer_artifacts(db_obj, cloud_id, noop)
|
|
1700
|
+
|
|
1701
|
+
MU.log "#{cloud_id} has been terminated" if !noop
|
|
1702
|
+
end
|
|
1703
|
+
private_class_method :terminate_rds_instance
|
|
1704
|
+
|
|
1705
|
+
def self.purge_groomer_artifacts(db_obj, cloud_id, noop)
|
|
1706
|
+
return if !db_obj
|
|
1707
|
+
# Cleanup the database vault
|
|
1708
|
+
groomer =
|
|
1709
|
+
if db_obj and db_obj.respond_to?(:config) and db_obj.config
|
|
1710
|
+
db_obj.config.has_key?("groomer") ? db_obj.config["groomer"] : MU::Config.defaultGroomer
|
|
1711
|
+
else
|
|
1712
|
+
MU::Config.defaultGroomer
|
|
1713
|
+
end
|
|
1714
|
+
|
|
1715
|
+
groomclass = MU::Groomer.loadGroomer(groomer)
|
|
1716
|
+
groomclass.deleteSecret(vault: cloud_id.upcase) if !noop
|
|
1717
|
+
end
|
|
1718
|
+
private_class_method :purge_groomer_artifacts
|
|
1719
|
+
|
|
1720
|
+
def self.purge_rds_sgs(cloud_id, region, credentials, noop)
|
|
1721
|
+
rdssecgroups = []
|
|
1722
|
+
begin
|
|
1723
|
+
secgroup = MU::Cloud::AWS.rds(region: region, credentials: credentials).describe_db_security_groups(db_security_group_name: cloud_id)
|
|
1724
|
+
rdssecgroups << cloud_id if !secgroup.nil?
|
|
1725
|
+
rescue Aws::RDS::Errors::DBSecurityGroupNotFound
|
|
1726
|
+
MU.log "No such RDS security group #{cloud_id} to purge", MU::DEBUG
|
|
1727
|
+
end
|
|
1728
|
+
|
|
1729
|
+
# RDS security groups can depend on EC2 security groups, do these last
|
|
1730
|
+
rdssecgroups.each { |sg|
|
|
1731
|
+
MU.log "Removing RDS Security Group #{sg}"
|
|
1732
|
+
begin
|
|
1733
|
+
MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_security_group(db_security_group_name: sg) if !noop
|
|
1734
|
+
rescue Aws::RDS::Errors::DBSecurityGroupNotFound
|
|
1735
|
+
MU.log "RDS Security Group #{sg} disappeared before I could remove it", MU::NOTICE
|
|
1736
|
+
end
|
|
1737
|
+
}
|
|
1738
|
+
end
|
|
1739
|
+
private_class_method :purge_rds_sgs
|
|
1740
|
+
|
|
1741
|
+
end #class
|
|
1742
|
+
end #class
|
|
1743
|
+
end
|
|
1744
|
+
end #module
|