cloud-mu 3.4.0 → 3.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/ansible/roles/mu-nat/tasks/main.yml +3 -0
- data/bin/mu-aws-setup +41 -7
- data/bin/mu-azure-setup +34 -0
- data/bin/mu-configure +214 -119
- data/bin/mu-gcp-setup +37 -2
- data/bin/mu-node-manage +3 -0
- data/bin/mu-refresh-ssl +67 -0
- data/bin/mu-run-tests +14 -4
- data/bin/mu-self-update +30 -10
- data/bin/mu-upload-chef-artifacts +30 -26
- data/cloud-mu.gemspec +8 -6
- data/cookbooks/mu-master/attributes/default.rb +5 -1
- data/cookbooks/mu-master/metadata.rb +2 -2
- data/cookbooks/mu-master/recipes/default.rb +81 -26
- data/cookbooks/mu-master/recipes/init.rb +197 -62
- data/cookbooks/mu-master/recipes/update_nagios_only.rb +1 -1
- data/cookbooks/mu-master/recipes/vault.rb +78 -77
- data/cookbooks/mu-master/templates/default/mods/rewrite.conf.erb +1 -0
- data/cookbooks/mu-master/templates/default/nagios.conf.erb +103 -0
- data/cookbooks/mu-master/templates/default/web_app.conf.erb +14 -30
- data/cookbooks/mu-tools/attributes/default.rb +5 -0
- data/cookbooks/mu-tools/files/centos-6/CentOS-Base.repo +47 -0
- data/cookbooks/mu-tools/libraries/helper.rb +12 -2
- data/cookbooks/mu-tools/libraries/monkey.rb +1 -1
- data/cookbooks/mu-tools/recipes/apply_security.rb +6 -0
- data/cookbooks/mu-tools/recipes/aws_api.rb +6 -4
- data/cookbooks/mu-tools/recipes/base_repositories.rb +1 -1
- data/cookbooks/mu-tools/recipes/gcloud.rb +2 -9
- data/cookbooks/mu-tools/recipes/google_api.rb +5 -2
- data/cookbooks/mu-tools/resources/disk.rb +108 -58
- data/extras/Gemfile.lock.bootstrap +394 -0
- data/extras/bucketstubs/error.html +0 -0
- data/extras/bucketstubs/index.html +0 -0
- data/extras/clean-stock-amis +9 -9
- data/extras/git_rpm/build.sh +20 -0
- data/extras/git_rpm/mugit.spec +53 -0
- data/extras/image-generators/VMWare/centos8.yaml +15 -0
- data/extras/openssl_rpm/build.sh +19 -0
- data/extras/openssl_rpm/mussl.spec +46 -0
- data/extras/python_rpm/muthon.spec +14 -4
- data/extras/ruby_rpm/muby.spec +9 -5
- data/extras/sqlite_rpm/build.sh +19 -0
- data/extras/sqlite_rpm/muqlite.spec +47 -0
- data/install/installer +7 -5
- data/modules/mu.rb +12 -5
- data/modules/mu/cloud/machine_images.rb +1 -1
- data/modules/mu/cloud/providers.rb +6 -1
- data/modules/mu/cloud/resource_base.rb +1 -1
- data/modules/mu/cloud/ssh_sessions.rb +4 -0
- data/modules/mu/config.rb +28 -12
- data/modules/mu/config/database.rb +2 -2
- data/modules/mu/config/firewall_rule.rb +1 -1
- data/modules/mu/config/ref.rb +2 -2
- data/modules/mu/config/schema_helpers.rb +12 -3
- data/modules/mu/config/server.rb +10 -4
- data/modules/mu/config/server_pool.rb +2 -2
- data/modules/mu/config/vpc.rb +10 -10
- data/modules/mu/defaults/AWS.yaml +32 -32
- data/modules/mu/deploy.rb +23 -10
- data/modules/mu/groomers/chef.rb +2 -2
- data/modules/mu/master.rb +49 -3
- data/modules/mu/mommacat.rb +8 -5
- data/modules/mu/mommacat/naming.rb +2 -2
- data/modules/mu/mommacat/storage.rb +22 -27
- data/modules/mu/providers/aws.rb +142 -48
- data/modules/mu/providers/aws/alarm.rb +3 -3
- data/modules/mu/providers/aws/bucket.rb +19 -19
- data/modules/mu/providers/aws/cache_cluster.rb +22 -22
- data/modules/mu/providers/aws/cdn.rb +2 -2
- data/modules/mu/providers/aws/collection.rb +14 -14
- data/modules/mu/providers/aws/container_cluster.rb +27 -27
- data/modules/mu/providers/aws/database.rb +40 -39
- data/modules/mu/providers/aws/dnszone.rb +5 -5
- data/modules/mu/providers/aws/endpoint.rb +35 -35
- data/modules/mu/providers/aws/firewall_rule.rb +26 -23
- data/modules/mu/providers/aws/function.rb +28 -28
- data/modules/mu/providers/aws/group.rb +7 -7
- data/modules/mu/providers/aws/habitat.rb +2 -2
- data/modules/mu/providers/aws/job.rb +6 -6
- data/modules/mu/providers/aws/loadbalancer.rb +34 -34
- data/modules/mu/providers/aws/log.rb +14 -14
- data/modules/mu/providers/aws/msg_queue.rb +10 -10
- data/modules/mu/providers/aws/nosqldb.rb +8 -8
- data/modules/mu/providers/aws/notifier.rb +7 -7
- data/modules/mu/providers/aws/role.rb +17 -15
- data/modules/mu/providers/aws/search_domain.rb +10 -10
- data/modules/mu/providers/aws/server.rb +176 -95
- data/modules/mu/providers/aws/server_pool.rb +65 -105
- data/modules/mu/providers/aws/storage_pool.rb +17 -9
- data/modules/mu/providers/aws/user.rb +1 -1
- data/modules/mu/providers/aws/vpc.rb +103 -51
- data/modules/mu/providers/aws/vpc_subnet.rb +43 -39
- data/modules/mu/providers/azure.rb +78 -12
- data/modules/mu/providers/azure/server.rb +18 -3
- data/modules/mu/providers/cloudformation/server.rb +1 -1
- data/modules/mu/providers/google.rb +19 -4
- data/modules/mu/providers/google/folder.rb +6 -2
- data/modules/mu/providers/google/function.rb +65 -30
- data/modules/mu/providers/google/role.rb +1 -1
- data/modules/mu/providers/google/vpc.rb +27 -2
- data/modules/tests/aws-servers-with-handrolled-iam.yaml +37 -0
- data/modules/tests/k8s.yaml +1 -1
- metadata +24 -8
|
@@ -65,7 +65,7 @@ module MU
|
|
|
65
65
|
|
|
66
66
|
on_retry = Proc.new { |e|
|
|
67
67
|
# soul-crushing, yet effective
|
|
68
|
-
if e.message.match(/because (#{Regexp.quote(@
|
|
68
|
+
if e.message.match(/because (#{Regexp.quote(@region)}[a-z]), the targeted availability zone, does not currently have sufficient capacity/)
|
|
69
69
|
bad_az = Regexp.last_match(1)
|
|
70
70
|
deletia = []
|
|
71
71
|
mySubnets.each { |subnet|
|
|
@@ -81,7 +81,7 @@ module MU
|
|
|
81
81
|
|
|
82
82
|
MU.retrier([Aws::EKS::Errors::UnsupportedAvailabilityZoneException, Aws::EKS::Errors::InvalidParameterException], on_retry: on_retry, max: subnet_ids.size) {
|
|
83
83
|
MU.log "Creating EKS cluster #{@mu_name}", details: params
|
|
84
|
-
MU::Cloud::AWS.eks(region: @
|
|
84
|
+
MU::Cloud::AWS.eks(region: @region, credentials: @credentials).create_cluster(params)
|
|
85
85
|
}
|
|
86
86
|
@cloud_id = @mu_name
|
|
87
87
|
|
|
@@ -100,7 +100,7 @@ module MU
|
|
|
100
100
|
|
|
101
101
|
MU.log "Creation of EKS cluster #{@mu_name} complete"
|
|
102
102
|
else
|
|
103
|
-
MU::Cloud::AWS.ecs(region: @
|
|
103
|
+
MU::Cloud::AWS.ecs(region: @region, credentials: @credentials).create_cluster(
|
|
104
104
|
cluster_name: @mu_name
|
|
105
105
|
)
|
|
106
106
|
@cloud_id = @mu_name
|
|
@@ -118,7 +118,7 @@ module MU
|
|
|
118
118
|
# this account; EKS applications might want one, but will fail in
|
|
119
119
|
# confusing ways if this hasn't been done.
|
|
120
120
|
begin
|
|
121
|
-
MU::Cloud::AWS.iam(credentials: @
|
|
121
|
+
MU::Cloud::AWS.iam(credentials: @credentials).create_service_linked_role(
|
|
122
122
|
aws_service_name: "elasticloadbalancing.amazonaws.com"
|
|
123
123
|
)
|
|
124
124
|
rescue ::Aws::IAM::Errors::InvalidInput
|
|
@@ -170,7 +170,7 @@ module MU
|
|
|
170
170
|
if tasks.size > 0
|
|
171
171
|
tasks_failing = false
|
|
172
172
|
MU.retrier(wait: 15, max: 10, loop_if: Proc.new { tasks_failing }){ |retries, _wait|
|
|
173
|
-
tasks_failing = !MU::Cloud::AWS::ContainerCluster.tasksRunning?(@mu_name, log: (retries > 0), region: @
|
|
173
|
+
tasks_failing = !MU::Cloud::AWS::ContainerCluster.tasksRunning?(@mu_name, log: (retries > 0), region: @region, credentials: @credentials)
|
|
174
174
|
}
|
|
175
175
|
|
|
176
176
|
if tasks_failing
|
|
@@ -290,12 +290,12 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
290
290
|
return nil if !@cloud_id
|
|
291
291
|
@cloud_desc_cache = if @config['flavor'] == "EKS" or
|
|
292
292
|
(@config['flavor'] == "Fargate" and !@config['containers'])
|
|
293
|
-
resp = MU::Cloud::AWS.eks(region: @
|
|
293
|
+
resp = MU::Cloud::AWS.eks(region: @region, credentials: @credentials).describe_cluster(
|
|
294
294
|
name: @cloud_id
|
|
295
295
|
)
|
|
296
296
|
resp.cluster
|
|
297
297
|
else
|
|
298
|
-
resp = MU::Cloud::AWS.ecs(region: @
|
|
298
|
+
resp = MU::Cloud::AWS.ecs(region: @region, credentials: @credentials).describe_clusters(
|
|
299
299
|
clusters: [@cloud_id]
|
|
300
300
|
)
|
|
301
301
|
resp.clusters.first
|
|
@@ -318,7 +318,7 @@ MU.log c.name, MU::NOTICE, details: t
|
|
|
318
318
|
def notify
|
|
319
319
|
deploy_struct = MU.structToHash(cloud_desc)
|
|
320
320
|
deploy_struct['cloud_id'] = @mu_name
|
|
321
|
-
deploy_struct["region"] = @
|
|
321
|
+
deploy_struct["region"] = @region
|
|
322
322
|
if @config['flavor'] == "EKS"
|
|
323
323
|
deploy_struct["max_pods"] = @config['kubernetes']['max_pods'].to_s
|
|
324
324
|
# XXX if FargateKS, get the Fargate Profile artifact
|
|
@@ -1381,7 +1381,7 @@ start = Time.now
|
|
|
1381
1381
|
role["tags"] = cluster["tags"] if !cluster["tags"].nil?
|
|
1382
1382
|
role["optional_tags"] = cluster["optional_tags"] if !cluster["optional_tags"].nil?
|
|
1383
1383
|
configurator.insertKitten(role, "roles")
|
|
1384
|
-
MU::Config.addDependency(cluster, cluster["name"]+"pods", "role",
|
|
1384
|
+
MU::Config.addDependency(cluster, cluster["name"]+"pods", "role", their_phase: "groom")
|
|
1385
1385
|
if !MU::Master.kubectl
|
|
1386
1386
|
MU.log "Since I can't find a kubectl executable, you will have to handle all service account, user, and role bindings manually!", MU::WARN
|
|
1387
1387
|
end
|
|
@@ -1530,7 +1530,7 @@ start = Time.now
|
|
|
1530
1530
|
role["tags"] = cluster["tags"] if !cluster["tags"].nil?
|
|
1531
1531
|
role["optional_tags"] = cluster["optional_tags"] if !cluster["optional_tags"].nil?
|
|
1532
1532
|
configurator.insertKitten(role, "roles")
|
|
1533
|
-
MU::Config.addDependency(cluster, cluster["name"]+"controlplane", "role",
|
|
1533
|
+
MU::Config.addDependency(cluster, cluster["name"]+"controlplane", "role", their_phase: "groom")
|
|
1534
1534
|
end
|
|
1535
1535
|
|
|
1536
1536
|
ok
|
|
@@ -1580,7 +1580,7 @@ start = Time.now
|
|
|
1580
1580
|
@cacert = cloud_desc.certificate_authority.data
|
|
1581
1581
|
@cluster = @mu_name
|
|
1582
1582
|
if @config['flavor'] != "Fargate"
|
|
1583
|
-
resp = MU::Cloud::AWS.iam(credentials: @
|
|
1583
|
+
resp = MU::Cloud::AWS.iam(credentials: @credentials).get_role(role_name: @mu_name+"WORKERS")
|
|
1584
1584
|
@worker_role_arn = resp.role.arn
|
|
1585
1585
|
end
|
|
1586
1586
|
kube_conf = @deploy.deploy_dir+"/kubeconfig-#{@config['name']}"
|
|
@@ -1647,7 +1647,7 @@ start = Time.now
|
|
|
1647
1647
|
:tags => @tags
|
|
1648
1648
|
}
|
|
1649
1649
|
begin
|
|
1650
|
-
resp = MU::Cloud::AWS.eks(region: @
|
|
1650
|
+
resp = MU::Cloud::AWS.eks(region: @region, credentials: @credentials).describe_fargate_profile(
|
|
1651
1651
|
cluster_name: @mu_name,
|
|
1652
1652
|
fargate_profile_name: profname
|
|
1653
1653
|
)
|
|
@@ -1660,7 +1660,7 @@ start = Time.now
|
|
|
1660
1660
|
old_desc["subnets"].sort!
|
|
1661
1661
|
if !old_desc.eql?(new_desc)
|
|
1662
1662
|
MU.log "Deleting Fargate profile #{profname} in order to apply changes", MU::WARN, details: desc
|
|
1663
|
-
MU::Cloud::AWS::ContainerCluster.purge_fargate_profile(profname, @mu_name, @
|
|
1663
|
+
MU::Cloud::AWS::ContainerCluster.purge_fargate_profile(profname, @mu_name, @region, @credentials)
|
|
1664
1664
|
else
|
|
1665
1665
|
next
|
|
1666
1666
|
end
|
|
@@ -1669,9 +1669,9 @@ start = Time.now
|
|
|
1669
1669
|
# This is just fine!
|
|
1670
1670
|
end
|
|
1671
1671
|
MU.log "Creating EKS Fargate profile #{profname}", details: desc
|
|
1672
|
-
resp = MU::Cloud::AWS.eks(region: @
|
|
1672
|
+
resp = MU::Cloud::AWS.eks(region: @region, credentials: @credentials).create_fargate_profile(desc)
|
|
1673
1673
|
begin
|
|
1674
|
-
resp = MU::Cloud::AWS.eks(region: @
|
|
1674
|
+
resp = MU::Cloud::AWS.eks(region: @region, credentials: @credentials).describe_fargate_profile(
|
|
1675
1675
|
cluster_name: @mu_name,
|
|
1676
1676
|
fargate_profile_name: profname
|
|
1677
1677
|
)
|
|
@@ -1711,19 +1711,19 @@ start = Time.now
|
|
|
1711
1711
|
tagme << s.cloud_id
|
|
1712
1712
|
tagme_elb << s.cloud_id if !s.private?
|
|
1713
1713
|
}
|
|
1714
|
-
rtbs = MU::Cloud::AWS.ec2(region: @
|
|
1714
|
+
rtbs = MU::Cloud::AWS.ec2(region: @region, credentials: @credentials).describe_route_tables(
|
|
1715
1715
|
filters: [ { name: "vpc-id", values: [@vpc.cloud_id] } ]
|
|
1716
1716
|
).route_tables
|
|
1717
1717
|
tagme.concat(rtbs.map { |r| r.route_table_id } )
|
|
1718
1718
|
main_sg = @deploy.findLitterMate(type: "firewall_rules", name: "server_pool#{@config['name']}workers")
|
|
1719
1719
|
tagme << main_sg.cloud_id if main_sg
|
|
1720
1720
|
MU.log "Applying kubernetes.io tags to VPC resources", details: tagme
|
|
1721
|
-
MU::Cloud::AWS.createTag(tagme, "kubernetes.io/cluster/#{@mu_name}", "shared", credentials: @
|
|
1722
|
-
MU::Cloud::AWS.createTag(tagme_elb, "kubernetes.io/cluster/elb", @mu_name, credentials: @
|
|
1721
|
+
MU::Cloud::AWS.createTag(tagme, "kubernetes.io/cluster/#{@mu_name}", "shared", credentials: @credentials)
|
|
1722
|
+
MU::Cloud::AWS.createTag(tagme_elb, "kubernetes.io/cluster/elb", @mu_name, credentials: @credentials)
|
|
1723
1723
|
end
|
|
1724
1724
|
|
|
1725
1725
|
def manage_ecs_workers
|
|
1726
|
-
resp = MU::Cloud::AWS.ecs(region: @
|
|
1726
|
+
resp = MU::Cloud::AWS.ecs(region: @region, credentials: @credentials).list_container_instances({
|
|
1727
1727
|
cluster: @mu_name
|
|
1728
1728
|
})
|
|
1729
1729
|
existing = {}
|
|
@@ -1733,7 +1733,7 @@ start = Time.now
|
|
|
1733
1733
|
uuids << arn.sub(/^.*?:container-instance\//, "")
|
|
1734
1734
|
}
|
|
1735
1735
|
if uuids.size > 0
|
|
1736
|
-
resp = MU::Cloud::AWS.ecs(region: @
|
|
1736
|
+
resp = MU::Cloud::AWS.ecs(region: @region, credentials: @credentials).describe_container_instances({
|
|
1737
1737
|
cluster: @mu_name,
|
|
1738
1738
|
container_instances: uuids
|
|
1739
1739
|
})
|
|
@@ -1744,7 +1744,7 @@ start = Time.now
|
|
|
1744
1744
|
end
|
|
1745
1745
|
|
|
1746
1746
|
threads = []
|
|
1747
|
-
resource_lookup = MU::Cloud::AWS.listInstanceTypes(@
|
|
1747
|
+
resource_lookup = MU::Cloud::AWS.listInstanceTypes(@region)[@region]
|
|
1748
1748
|
serverpool = if ['EKS', 'ECS'].include?(@config['flavor'])
|
|
1749
1749
|
@deploy.findLitterMate(type: "server_pools", name: @config["name"]+"workers")
|
|
1750
1750
|
end
|
|
@@ -1789,7 +1789,7 @@ start = Time.now
|
|
|
1789
1789
|
params[:container_instance_arn] = existing[node.cloud_id].container_instance_arn
|
|
1790
1790
|
MU.log "Updating ECS instance #{node} in cluster #{@mu_name}", MU::NOTICE, details: params
|
|
1791
1791
|
end
|
|
1792
|
-
MU::Cloud::AWS.ecs(region: @
|
|
1792
|
+
MU::Cloud::AWS.ecs(region: @region, credentials: @credentials).register_container_instance(params)
|
|
1793
1793
|
|
|
1794
1794
|
}
|
|
1795
1795
|
}
|
|
@@ -1805,7 +1805,7 @@ start = Time.now
|
|
|
1805
1805
|
@loadbalancers.each {|lb|
|
|
1806
1806
|
MU.log "Mapping LB #{lb.mu_name} to service #{c['name']}", MU::INFO
|
|
1807
1807
|
if lb.cloud_desc.type != "classic"
|
|
1808
|
-
elb_groups = MU::Cloud::AWS.elb2(region: @
|
|
1808
|
+
elb_groups = MU::Cloud::AWS.elb2(region: @region, credentials: @credentials).describe_target_groups({
|
|
1809
1809
|
load_balancer_arn: lb.cloud_desc.load_balancer_arn
|
|
1810
1810
|
})
|
|
1811
1811
|
matching_target_groups = []
|
|
@@ -1957,7 +1957,7 @@ start = Time.now
|
|
|
1957
1957
|
MU.log "Registering task definition #{service_name} with #{container_definitions.size.to_s} containers"
|
|
1958
1958
|
|
|
1959
1959
|
# XXX this helpfully keeps revisions, but let's compare anyway and avoid cluttering with identical ones
|
|
1960
|
-
resp = MU::Cloud::AWS.ecs(region: @
|
|
1960
|
+
resp = MU::Cloud::AWS.ecs(region: @region, credentials: @credentials).register_task_definition(task_params)
|
|
1961
1961
|
|
|
1962
1962
|
resp.task_definition.task_definition_arn
|
|
1963
1963
|
end
|
|
@@ -1965,7 +1965,7 @@ start = Time.now
|
|
|
1965
1965
|
def list_ecs_services
|
|
1966
1966
|
svc_resp = nil
|
|
1967
1967
|
MU.retrier([Aws::ECS::Errors::ClusterNotFoundException], wait: 5, max: 10){
|
|
1968
|
-
svc_resp = MU::Cloud::AWS.ecs(region: @
|
|
1968
|
+
svc_resp = MU::Cloud::AWS.ecs(region: @region, credentials: @credentials).list_services(
|
|
1969
1969
|
cluster: arn
|
|
1970
1970
|
)
|
|
1971
1971
|
}
|
|
@@ -2005,14 +2005,14 @@ start = Time.now
|
|
|
2005
2005
|
if !existing_svcs.include?(service_name)
|
|
2006
2006
|
MU.log "Creating Service #{service_name}"
|
|
2007
2007
|
|
|
2008
|
-
MU::Cloud::AWS.ecs(region: @
|
|
2008
|
+
MU::Cloud::AWS.ecs(region: @region, credentials: @credentials).create_service(service_params)
|
|
2009
2009
|
else
|
|
2010
2010
|
service_params[:service] = service_params[:service_name].dup
|
|
2011
2011
|
service_params.delete(:service_name)
|
|
2012
2012
|
service_params.delete(:launch_type)
|
|
2013
2013
|
MU.log "Updating Service #{service_name}", MU::NOTICE, details: service_params
|
|
2014
2014
|
|
|
2015
|
-
MU::Cloud::AWS.ecs(region: @
|
|
2015
|
+
MU::Cloud::AWS.ecs(region: @region, credentials: @credentials).update_service(service_params)
|
|
2016
2016
|
end
|
|
2017
2017
|
end
|
|
2018
2018
|
|
|
@@ -224,7 +224,7 @@ module MU
|
|
|
224
224
|
}
|
|
225
225
|
|
|
226
226
|
modify_db_cluster_struct[:preferred_maintenance_window] = @config["preferred_maintenance_window"] if @config["preferred_maintenance_window"]
|
|
227
|
-
MU::Cloud::AWS.rds(region: @
|
|
227
|
+
MU::Cloud::AWS.rds(region: @region, credentials: @credentials).modify_db_cluster(modify_db_cluster_struct)
|
|
228
228
|
wait_until_available
|
|
229
229
|
end
|
|
230
230
|
|
|
@@ -305,7 +305,7 @@ module MU
|
|
|
305
305
|
def toKitten(**_args)
|
|
306
306
|
bok = {
|
|
307
307
|
"cloud" => "AWS",
|
|
308
|
-
"region" => @
|
|
308
|
+
"region" => @region,
|
|
309
309
|
"credentials" => @credentials,
|
|
310
310
|
"cloud_id" => @cloud_id,
|
|
311
311
|
}
|
|
@@ -317,8 +317,8 @@ module MU
|
|
|
317
317
|
end
|
|
318
318
|
|
|
319
319
|
noun = @config["create_cluster"] ? "cluster" : "db"
|
|
320
|
-
tags = MU::Cloud::AWS.rds(credentials: @credentials, region: @
|
|
321
|
-
resource_name: MU::Cloud::AWS::Database.getARN(@cloud_id, noun, "rds", region: @
|
|
320
|
+
tags = MU::Cloud::AWS.rds(credentials: @credentials, region: @region).list_tags_for_resource(
|
|
321
|
+
resource_name: MU::Cloud::AWS::Database.getARN(@cloud_id, noun, "rds", region: @region, credentials: @credentials)
|
|
322
322
|
).tag_list
|
|
323
323
|
if tags and !tags.empty?
|
|
324
324
|
bok['tags'] = MU.structToHash(tags, stringify_keys: true)
|
|
@@ -332,11 +332,11 @@ module MU
|
|
|
332
332
|
bok["create_cluster"] = true if @config['create_cluster']
|
|
333
333
|
|
|
334
334
|
params = if bok['create_cluster']
|
|
335
|
-
MU::Cloud::AWS.rds(credentials: @credentials, region: @
|
|
335
|
+
MU::Cloud::AWS.rds(credentials: @credentials, region: @region).describe_db_cluster_parameters(
|
|
336
336
|
db_cluster_parameter_group_name: cloud_desc.db_cluster_parameter_group
|
|
337
337
|
).parameters
|
|
338
338
|
else
|
|
339
|
-
MU::Cloud::AWS.rds(credentials: @credentials, region: @
|
|
339
|
+
MU::Cloud::AWS.rds(credentials: @credentials, region: @region).describe_db_parameters(
|
|
340
340
|
db_parameter_group_name: cloud_desc.db_parameter_groups.first.db_parameter_group_name
|
|
341
341
|
).parameters
|
|
342
342
|
end
|
|
@@ -353,7 +353,7 @@ module MU
|
|
|
353
353
|
id: sg.vpc_security_group_id,
|
|
354
354
|
cloud: "AWS",
|
|
355
355
|
credentials: @credentials,
|
|
356
|
-
region: @
|
|
356
|
+
region: @region,
|
|
357
357
|
type: "firewall_rules",
|
|
358
358
|
)
|
|
359
359
|
}
|
|
@@ -373,7 +373,7 @@ module MU
|
|
|
373
373
|
# we have no sensible way to handle heterogenous cluster members, so
|
|
374
374
|
# for now just assume they're all the same
|
|
375
375
|
cloud_desc.db_cluster_members.each { |db|
|
|
376
|
-
member = MU::Cloud::AWS::Database.find(cloud_id: db.db_instance_identifier, region: @
|
|
376
|
+
member = MU::Cloud::AWS::Database.find(cloud_id: db.db_instance_identifier, region: @region, credentials: @credentials).values.first
|
|
377
377
|
|
|
378
378
|
sizes << member.db_instance_class
|
|
379
379
|
if member.db_subnet_group and member.db_subnet_group.vpc_id
|
|
@@ -385,14 +385,14 @@ module MU
|
|
|
385
385
|
vpcs.uniq!
|
|
386
386
|
bok['size'] = sizes.sort.first if !sizes.empty?
|
|
387
387
|
if !vpcs.empty?
|
|
388
|
-
myvpc = MU::MommaCat.findStray("AWS", "vpc", cloud_id: vpcs.sort.first.vpc_id, credentials: @credentials, region: @
|
|
388
|
+
myvpc = MU::MommaCat.findStray("AWS", "vpc", cloud_id: vpcs.sort.first.vpc_id, credentials: @credentials, region: @region, dummy_ok: true, no_deploy_search: true).first
|
|
389
389
|
bok['vpc'] = myvpc.getReference(vpcs.sort.first.subnets.map { |s| s.subnet_identifier })
|
|
390
390
|
end
|
|
391
391
|
else
|
|
392
392
|
bok['size'] = cloud_desc.db_instance_class
|
|
393
393
|
bok['auto_minor_version_upgrade'] = true if cloud_desc.auto_minor_version_upgrade
|
|
394
394
|
if cloud_desc.db_subnet_group
|
|
395
|
-
myvpc = MU::MommaCat.findStray("AWS", "vpc", cloud_id: cloud_desc.db_subnet_group.vpc_id, credentials: @credentials, region: @
|
|
395
|
+
myvpc = MU::MommaCat.findStray("AWS", "vpc", cloud_id: cloud_desc.db_subnet_group.vpc_id, credentials: @credentials, region: @region, dummy_ok: true, no_deploy_search: true).first
|
|
396
396
|
bok['vpc'] = myvpc.getReference(cloud_desc.db_subnet_group.subnets.map { |s| s.subnet_identifier })
|
|
397
397
|
end
|
|
398
398
|
bok['storage_type'] = cloud_desc.storage_type
|
|
@@ -467,13 +467,13 @@ dependencies
|
|
|
467
467
|
raise MuError, "Couldn't find subnets in #{@vpc} to add to #{@config["subnet_group_name"]}. Make sure the subnets are valid and publicly_accessible is set correctly"
|
|
468
468
|
else
|
|
469
469
|
resp = begin
|
|
470
|
-
MU::Cloud::AWS.rds(region: @
|
|
470
|
+
MU::Cloud::AWS.rds(region: @region, credentials: @credentials).describe_db_subnet_groups(
|
|
471
471
|
db_subnet_group_name: @config["subnet_group_name"]
|
|
472
472
|
)
|
|
473
473
|
# XXX ensure subnet group matches our config?
|
|
474
474
|
rescue ::Aws::RDS::Errors::DBSubnetGroupNotFoundFault
|
|
475
475
|
# Create subnet group
|
|
476
|
-
resp = MU::Cloud::AWS.rds(region: @
|
|
476
|
+
resp = MU::Cloud::AWS.rds(region: @region, credentials: @credentials).create_db_subnet_group(
|
|
477
477
|
db_subnet_group_name: @config["subnet_group_name"],
|
|
478
478
|
db_subnet_group_description: @config["subnet_group_name"],
|
|
479
479
|
subnet_ids: subnet_ids,
|
|
@@ -511,13 +511,13 @@ dependencies
|
|
|
511
511
|
if create
|
|
512
512
|
MU.log "Creating a #{cluster ? "cluster" : "database" } parameter group #{@config["parameter_group_name"]}"
|
|
513
513
|
|
|
514
|
-
MU::Cloud::AWS.rds(region: @
|
|
514
|
+
MU::Cloud::AWS.rds(region: @region, credentials: @credentials).send(cluster ? :create_db_cluster_parameter_group : :create_db_parameter_group, params)
|
|
515
515
|
end
|
|
516
516
|
|
|
517
517
|
|
|
518
518
|
if @config[fieldname] and !@config[fieldname].empty?
|
|
519
519
|
|
|
520
|
-
old_values = MU::Cloud::AWS.rds(credentials: @credentials, region: @
|
|
520
|
+
old_values = MU::Cloud::AWS.rds(credentials: @credentials, region: @region).send(cluster ? :describe_db_cluster_parameters : :describe_db_parameters, { name_param => @config["parameter_group_name"] } ).parameters
|
|
521
521
|
old_values.map! { |p| [p.parameter_name, p.parameter_value] }.flatten
|
|
522
522
|
old_values = old_values.to_h
|
|
523
523
|
|
|
@@ -532,12 +532,12 @@ dependencies
|
|
|
532
532
|
|
|
533
533
|
MU.retrier([Aws::RDS::Errors::InvalidDBParameterGroupState], wait: 30, max: 10) {
|
|
534
534
|
if cluster
|
|
535
|
-
MU::Cloud::AWS.rds(region: @
|
|
535
|
+
MU::Cloud::AWS.rds(region: @region, credentials: @credentials).modify_db_cluster_parameter_group(
|
|
536
536
|
db_cluster_parameter_group_name: @config["parameter_group_name"],
|
|
537
537
|
parameters: params
|
|
538
538
|
)
|
|
539
539
|
else
|
|
540
|
-
MU::Cloud::AWS.rds(region: @
|
|
540
|
+
MU::Cloud::AWS.rds(region: @region, credentials: @credentials).modify_db_parameter_group(
|
|
541
541
|
db_parameter_group_name: @config["parameter_group_name"],
|
|
542
542
|
parameters: params
|
|
543
543
|
)
|
|
@@ -586,7 +586,7 @@ dependencies
|
|
|
586
586
|
if @config["create_cluster"]
|
|
587
587
|
@config['cluster_node_count'] ||= 1
|
|
588
588
|
if @config['cluster_mode'] == "serverless"
|
|
589
|
-
MU::Cloud::AWS.rds(region: @
|
|
589
|
+
MU::Cloud::AWS.rds(region: @region, credentials: @credentials).modify_current_db_cluster_capacity(
|
|
590
590
|
db_cluster_identifier: @cloud_id,
|
|
591
591
|
capacity: @config['cluster_node_count']
|
|
592
592
|
)
|
|
@@ -612,8 +612,9 @@ dependencies
|
|
|
612
612
|
if mods.size > 1
|
|
613
613
|
MU.log "Modifying RDS instance #{@cloud_id}", MU::NOTICE, details: mods
|
|
614
614
|
mods[:apply_immediately] = true
|
|
615
|
+
mods[:allow_major_version_upgrade] = true
|
|
615
616
|
wait_until_available
|
|
616
|
-
MU::Cloud::AWS.rds(region: @
|
|
617
|
+
MU::Cloud::AWS.rds(region: @region, credentials: @credentials).send("modify_db_#{noun}".to_sym, mods)
|
|
617
618
|
wait_until_available
|
|
618
619
|
end
|
|
619
620
|
|
|
@@ -660,7 +661,7 @@ dependencies
|
|
|
660
661
|
if !cloud_desc.db_security_groups.empty?
|
|
661
662
|
cloud_desc.db_security_groups.each { |rds_sg|
|
|
662
663
|
begin
|
|
663
|
-
MU::Cloud::AWS.rds(region: @
|
|
664
|
+
MU::Cloud::AWS.rds(region: @region, credentials: @credentials).authorize_db_security_group_ingress(
|
|
664
665
|
db_security_group_name: rds_sg.db_security_group_name,
|
|
665
666
|
cidrip: cidr
|
|
666
667
|
)
|
|
@@ -682,7 +683,7 @@ dependencies
|
|
|
682
683
|
def notify
|
|
683
684
|
deploy_struct = MU.structToHash(cloud_desc, stringify_keys: true)
|
|
684
685
|
deploy_struct['cloud_id'] = @cloud_id
|
|
685
|
-
deploy_struct["region"] ||= @
|
|
686
|
+
deploy_struct["region"] ||= @region
|
|
686
687
|
deploy_struct["db_name"] ||= @config['db_name']
|
|
687
688
|
deploy_struct
|
|
688
689
|
end
|
|
@@ -708,14 +709,14 @@ dependencies
|
|
|
708
709
|
end
|
|
709
710
|
|
|
710
711
|
MU.retrier([Aws::RDS::Errors::InvalidDBInstanceState, Aws::RDS::Errors::InvalidDBClusterStateFault], wait: 60, max: 10) {
|
|
711
|
-
MU::Cloud::AWS.rds(region: @
|
|
712
|
+
MU::Cloud::AWS.rds(region: @region, credentials: @credentials).send("create_db_#{@config['create_cluster'] ? "cluster_" : ""}snapshot".to_sym, params)
|
|
712
713
|
}
|
|
713
714
|
|
|
714
715
|
loop_if = Proc.new {
|
|
715
716
|
if @config["create_cluster"]
|
|
716
|
-
MU::Cloud::AWS.rds(region: @
|
|
717
|
+
MU::Cloud::AWS.rds(region: @region, credentials: @credentials).describe_db_cluster_snapshots(db_cluster_snapshot_identifier: snap_id).db_cluster_snapshots.first.status != "available"
|
|
717
718
|
else
|
|
718
|
-
MU::Cloud::AWS.rds(region: @
|
|
719
|
+
MU::Cloud::AWS.rds(region: @region, credentials: @credentials).describe_db_snapshots(db_snapshot_identifier: snap_id).db_snapshots.first.status != "available"
|
|
719
720
|
end
|
|
720
721
|
}
|
|
721
722
|
|
|
@@ -732,9 +733,9 @@ dependencies
|
|
|
732
733
|
src_ref = MU::Config::Ref.get(@config["source"])
|
|
733
734
|
resp =
|
|
734
735
|
if @config["create_cluster"]
|
|
735
|
-
MU::Cloud::AWS.rds(region: @
|
|
736
|
+
MU::Cloud::AWS.rds(region: @region, credentials: @credentials).describe_db_cluster_snapshots(db_cluster_snapshot_identifier: src_ref.id)
|
|
736
737
|
else
|
|
737
|
-
MU::Cloud::AWS.rds(region: @
|
|
738
|
+
MU::Cloud::AWS.rds(region: @region, credentials: @credentials).describe_db_snapshots(db_snapshot_identifier: src_ref.id)
|
|
738
739
|
end
|
|
739
740
|
|
|
740
741
|
snapshots = @config["create_cluster"] ? resp.db_cluster_snapshots : resp.db_snapshots
|
|
@@ -812,7 +813,7 @@ dependencies
|
|
|
812
813
|
threads = threaded_resource_purge(:describe_db_subnet_groups, :db_subnet_groups, :db_subnet_group_name, "subgrp", region, credentials, ignoremaster, known: flags['known'], deploy_id: deploy_id) { |id|
|
|
813
814
|
MU.log "Deleting RDS subnet group #{id}"
|
|
814
815
|
MU.retrier([Aws::RDS::Errors::InvalidDBSubnetGroupStateFault], wait: 30, max: 5, ignoreme: [Aws::RDS::Errors::DBSubnetGroupNotFoundFault]) {
|
|
815
|
-
MU::Cloud::AWS.rds(region: region).delete_db_subnet_group(db_subnet_group_name: id) if !noop
|
|
816
|
+
MU::Cloud::AWS.rds(region: region, credentials: credentials).delete_db_subnet_group(db_subnet_group_name: id) if !noop
|
|
816
817
|
}
|
|
817
818
|
}
|
|
818
819
|
|
|
@@ -820,7 +821,7 @@ dependencies
|
|
|
820
821
|
threads.concat threaded_resource_purge("describe_#{type}_parameter_groups".to_sym, "#{type}_parameter_groups".to_sym, "#{type}_parameter_group_name".to_sym, (type == "db" ? "pg" : "cluster-pg"), region, credentials, ignoremaster, known: flags['known'], deploy_id: deploy_id) { |id|
|
|
821
822
|
MU.log "Deleting RDS #{type} parameter group #{id}"
|
|
822
823
|
MU.retrier([Aws::RDS::Errors::InvalidDBParameterGroupState], wait: 30, max: 5, ignoreme: [Aws::RDS::Errors::DBParameterGroupNotFound]) {
|
|
823
|
-
MU::Cloud::AWS.rds(region: region).send("delete_#{type}_parameter_group", { "#{type}_parameter_group_name".to_sym => id }) if !noop
|
|
824
|
+
MU::Cloud::AWS.rds(region: region, credentials: credentials).send("delete_#{type}_parameter_group", { "#{type}_parameter_group_name".to_sym => id }) if !noop
|
|
824
825
|
}
|
|
825
826
|
}
|
|
826
827
|
}
|
|
@@ -1262,7 +1263,7 @@ dependencies
|
|
|
1262
1263
|
def add_basic
|
|
1263
1264
|
|
|
1264
1265
|
getPassword
|
|
1265
|
-
if @config['source'].nil? or @
|
|
1266
|
+
if @config['source'].nil? or @region != @config['source'].region
|
|
1266
1267
|
manageSubnetGroup if @vpc
|
|
1267
1268
|
else
|
|
1268
1269
|
MU.log "Note: Read Replicas automatically reside in the same subnet group as the source database, if they're both in the same region. This replica may not land in the VPC you intended.", MU::WARN
|
|
@@ -1347,11 +1348,11 @@ dependencies
|
|
|
1347
1348
|
if %w{existing_snapshot new_snapshot}.include?(@config["creation_style"])
|
|
1348
1349
|
clean_parent_opts.call
|
|
1349
1350
|
MU.log "Creating database #{noun} #{@cloud_id} from snapshot #{@config["snapshot_id"]}"
|
|
1350
|
-
MU::Cloud::AWS.rds(region: @
|
|
1351
|
+
MU::Cloud::AWS.rds(region: @region, credentials: @credentials).send("restore_db_#{noun}_from_#{noun == "instance" ? "db_" : ""}snapshot".to_sym, params)
|
|
1351
1352
|
else
|
|
1352
1353
|
clean_parent_opts.call if noun == "instance" and params[:db_cluster_identifier]
|
|
1353
|
-
MU.log "Creating pristine database #{noun} #{@cloud_id} (#{@config['name']}) in #{@
|
|
1354
|
-
MU::Cloud::AWS.rds(region: @
|
|
1354
|
+
MU.log "Creating pristine database #{noun} #{@cloud_id} (#{@config['name']}) in #{@region}", MU::NOTICE, details: params
|
|
1355
|
+
MU::Cloud::AWS.rds(region: @region, credentials: @credentials).send("create_db_#{noun}".to_sym, params)
|
|
1355
1356
|
end
|
|
1356
1357
|
}
|
|
1357
1358
|
end
|
|
@@ -1378,7 +1379,7 @@ dependencies
|
|
|
1378
1379
|
|
|
1379
1380
|
MU.retrier([Aws::RDS::Errors::InvalidParameterValue], max: 15, wait: 20) {
|
|
1380
1381
|
MU.log "Creating database #{@config['create_cluster'] ? "cluster" : "instance" } #{@cloud_id} based on point in time backup '#{@config['restore_time']}' of #{@config['source'].id}"
|
|
1381
|
-
MU::Cloud::AWS.rds(region: @
|
|
1382
|
+
MU::Cloud::AWS.rds(region: @region, credentials: @credentials).send("restore_db_#{@config['create_cluster'] ? "cluster" : "instance"}_to_point_in_time".to_sym, params)
|
|
1382
1383
|
}
|
|
1383
1384
|
end
|
|
1384
1385
|
|
|
@@ -1399,8 +1400,8 @@ dependencies
|
|
|
1399
1400
|
db_subnet_group_name: @config["subnet_group_name"],
|
|
1400
1401
|
storage_type: @config["storage_type"]
|
|
1401
1402
|
}
|
|
1402
|
-
if @config["source"].region and @
|
|
1403
|
-
params[:source_db_instance_identifier] = MU::Cloud::AWS::Database.getARN(@config["source"].id, "db", "rds", region: @config["source"].region, credentials: @
|
|
1403
|
+
if @config["source"].region and @region != @config["source"].region
|
|
1404
|
+
params[:source_db_instance_identifier] = MU::Cloud::AWS::Database.getARN(@config["source"].id, "db", "rds", region: @config["source"].region, credentials: @credentials)
|
|
1404
1405
|
end
|
|
1405
1406
|
|
|
1406
1407
|
params[:port] = @config["port"] if @config["port"]
|
|
@@ -1415,7 +1416,7 @@ dependencies
|
|
|
1415
1416
|
|
|
1416
1417
|
MU.retrier([Aws::RDS::Errors::InvalidDBInstanceState, Aws::RDS::Errors::InvalidParameterValue, Aws::RDS::Errors::DBSubnetGroupNotAllowedFault], max: 10, wait: 30, on_retry: on_retry) {
|
|
1417
1418
|
MU.log "Creating read replica database instance #{@cloud_id} for #{@config['source'].id}"
|
|
1418
|
-
MU::Cloud::AWS.rds(region: @
|
|
1419
|
+
MU::Cloud::AWS.rds(region: @region, credentials: @credentials).create_db_instance_read_replica(params)
|
|
1419
1420
|
}
|
|
1420
1421
|
end
|
|
1421
1422
|
|
|
@@ -1474,7 +1475,7 @@ dependencies
|
|
|
1474
1475
|
end
|
|
1475
1476
|
mod_config[:vpc_security_group_ids] << localdeploy_rule.cloud_id
|
|
1476
1477
|
|
|
1477
|
-
MU::Cloud::AWS.rds(region: @
|
|
1478
|
+
MU::Cloud::AWS.rds(region: @region, credentials: @credentials).modify_db_instance(mod_config)
|
|
1478
1479
|
MU.log "Modified database #{@cloud_id} with new security groups: #{mod_config}", MU::NOTICE
|
|
1479
1480
|
end
|
|
1480
1481
|
|
|
@@ -1486,7 +1487,7 @@ dependencies
|
|
|
1486
1487
|
db_instance_identifier: @cloud_id,
|
|
1487
1488
|
apply_immediately: true
|
|
1488
1489
|
}
|
|
1489
|
-
if !@config["read_replica_of"] or @
|
|
1490
|
+
if !@config["read_replica_of"] or @region == @config['source'].region
|
|
1490
1491
|
mod_config[:vpc_security_group_ids] = @config["vpc_security_group_ids"]
|
|
1491
1492
|
end
|
|
1492
1493
|
|
|
@@ -1503,7 +1504,7 @@ dependencies
|
|
|
1503
1504
|
mod_config[:preferred_maintenance_window] = @config["preferred_maintenance_window"]
|
|
1504
1505
|
end
|
|
1505
1506
|
|
|
1506
|
-
MU::Cloud::AWS.rds(region: @
|
|
1507
|
+
MU::Cloud::AWS.rds(region: @region, credentials: @credentials).modify_db_instance(mod_config)
|
|
1507
1508
|
wait_until_available
|
|
1508
1509
|
end
|
|
1509
1510
|
|
|
@@ -1511,7 +1512,7 @@ dependencies
|
|
|
1511
1512
|
if @config['allow_major_version_upgrade'] && @config["creation_style"] == "new"
|
|
1512
1513
|
MU.log "Setting major database version upgrade on #{@cloud_id}'"
|
|
1513
1514
|
|
|
1514
|
-
MU::Cloud::AWS.rds(region: @
|
|
1515
|
+
MU::Cloud::AWS.rds(region: @region, credentials: @credentials).modify_db_instance(
|
|
1515
1516
|
db_instance_identifier: @cloud_id,
|
|
1516
1517
|
apply_immediately: true,
|
|
1517
1518
|
allow_major_version_upgrade: true
|