cloud-mu 1.9.0.pre.beta → 2.0.0.pre.alpha

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. checksums.yaml +4 -4
  2. data/Berksfile +16 -54
  3. data/Berksfile.lock +14 -62
  4. data/bin/mu-aws-setup +131 -108
  5. data/bin/mu-configure +311 -74
  6. data/bin/mu-gcp-setup +84 -62
  7. data/bin/mu-load-config.rb +46 -2
  8. data/bin/mu-self-update +11 -9
  9. data/bin/mu-upload-chef-artifacts +4 -4
  10. data/{mu.gemspec → cloud-mu.gemspec} +2 -2
  11. data/cookbooks/awscli/Berksfile +8 -0
  12. data/cookbooks/mu-activedirectory/Berksfile +11 -0
  13. data/cookbooks/mu-firewall/Berksfile +9 -0
  14. data/cookbooks/mu-firewall/metadata.rb +1 -1
  15. data/cookbooks/mu-glusterfs/Berksfile +10 -0
  16. data/cookbooks/mu-jenkins/Berksfile +14 -0
  17. data/cookbooks/mu-master/Berksfile +23 -0
  18. data/cookbooks/mu-master/attributes/default.rb +1 -1
  19. data/cookbooks/mu-master/metadata.rb +2 -2
  20. data/cookbooks/mu-master/recipes/default.rb +1 -1
  21. data/cookbooks/mu-master/recipes/init.rb +7 -3
  22. data/cookbooks/mu-master/recipes/ssl-certs.rb +1 -0
  23. data/cookbooks/mu-mongo/Berksfile +10 -0
  24. data/cookbooks/mu-openvpn/Berksfile +11 -0
  25. data/cookbooks/mu-php54/Berksfile +13 -0
  26. data/cookbooks/mu-splunk/Berksfile +10 -0
  27. data/cookbooks/mu-tools/Berksfile +21 -0
  28. data/cookbooks/mu-tools/files/default/Mu_CA.pem +15 -15
  29. data/cookbooks/mu-utility/Berksfile +9 -0
  30. data/cookbooks/mu-utility/metadata.rb +2 -1
  31. data/cookbooks/nagios/Berksfile +7 -4
  32. data/cookbooks/s3fs/Berksfile +9 -0
  33. data/environments/dev.json +6 -6
  34. data/environments/prod.json +6 -6
  35. data/modules/mu.rb +20 -42
  36. data/modules/mu/cleanup.rb +102 -100
  37. data/modules/mu/cloud.rb +90 -28
  38. data/modules/mu/clouds/aws.rb +449 -218
  39. data/modules/mu/clouds/aws/alarm.rb +29 -17
  40. data/modules/mu/clouds/aws/cache_cluster.rb +78 -64
  41. data/modules/mu/clouds/aws/collection.rb +25 -18
  42. data/modules/mu/clouds/aws/container_cluster.rb +73 -66
  43. data/modules/mu/clouds/aws/database.rb +124 -116
  44. data/modules/mu/clouds/aws/dnszone.rb +27 -20
  45. data/modules/mu/clouds/aws/firewall_rule.rb +30 -22
  46. data/modules/mu/clouds/aws/folder.rb +18 -3
  47. data/modules/mu/clouds/aws/function.rb +77 -23
  48. data/modules/mu/clouds/aws/group.rb +19 -12
  49. data/modules/mu/clouds/aws/habitat.rb +153 -0
  50. data/modules/mu/clouds/aws/loadbalancer.rb +59 -52
  51. data/modules/mu/clouds/aws/log.rb +30 -23
  52. data/modules/mu/clouds/aws/msg_queue.rb +29 -20
  53. data/modules/mu/clouds/aws/notifier.rb +222 -0
  54. data/modules/mu/clouds/aws/role.rb +178 -90
  55. data/modules/mu/clouds/aws/search_domain.rb +40 -24
  56. data/modules/mu/clouds/aws/server.rb +169 -137
  57. data/modules/mu/clouds/aws/server_pool.rb +60 -83
  58. data/modules/mu/clouds/aws/storage_pool.rb +59 -31
  59. data/modules/mu/clouds/aws/user.rb +36 -27
  60. data/modules/mu/clouds/aws/userdata/linux.erb +101 -93
  61. data/modules/mu/clouds/aws/vpc.rb +250 -189
  62. data/modules/mu/clouds/azure.rb +132 -0
  63. data/modules/mu/clouds/cloudformation.rb +65 -1
  64. data/modules/mu/clouds/cloudformation/alarm.rb +8 -0
  65. data/modules/mu/clouds/cloudformation/cache_cluster.rb +7 -0
  66. data/modules/mu/clouds/cloudformation/collection.rb +7 -0
  67. data/modules/mu/clouds/cloudformation/database.rb +7 -0
  68. data/modules/mu/clouds/cloudformation/dnszone.rb +7 -0
  69. data/modules/mu/clouds/cloudformation/firewall_rule.rb +9 -2
  70. data/modules/mu/clouds/cloudformation/loadbalancer.rb +7 -0
  71. data/modules/mu/clouds/cloudformation/log.rb +7 -0
  72. data/modules/mu/clouds/cloudformation/server.rb +7 -0
  73. data/modules/mu/clouds/cloudformation/server_pool.rb +7 -0
  74. data/modules/mu/clouds/cloudformation/vpc.rb +7 -0
  75. data/modules/mu/clouds/google.rb +214 -110
  76. data/modules/mu/clouds/google/container_cluster.rb +42 -24
  77. data/modules/mu/clouds/google/database.rb +15 -6
  78. data/modules/mu/clouds/google/firewall_rule.rb +17 -25
  79. data/modules/mu/clouds/google/group.rb +13 -5
  80. data/modules/mu/clouds/google/habitat.rb +105 -0
  81. data/modules/mu/clouds/google/loadbalancer.rb +28 -20
  82. data/modules/mu/clouds/google/server.rb +93 -354
  83. data/modules/mu/clouds/google/server_pool.rb +18 -10
  84. data/modules/mu/clouds/google/user.rb +22 -14
  85. data/modules/mu/clouds/google/vpc.rb +97 -69
  86. data/modules/mu/config.rb +133 -38
  87. data/modules/mu/config/alarm.rb +25 -0
  88. data/modules/mu/config/cache_cluster.rb +5 -3
  89. data/modules/mu/config/cache_cluster.yml +23 -0
  90. data/modules/mu/config/database.rb +25 -16
  91. data/modules/mu/config/database.yml +3 -3
  92. data/modules/mu/config/function.rb +1 -2
  93. data/modules/mu/config/{project.rb → habitat.rb} +10 -10
  94. data/modules/mu/config/notifier.rb +85 -0
  95. data/modules/mu/config/notifier.yml +9 -0
  96. data/modules/mu/config/role.rb +1 -1
  97. data/modules/mu/config/search_domain.yml +2 -2
  98. data/modules/mu/config/server.rb +13 -1
  99. data/modules/mu/config/server.yml +3 -3
  100. data/modules/mu/config/server_pool.rb +3 -1
  101. data/modules/mu/config/storage_pool.rb +3 -1
  102. data/modules/mu/config/storage_pool.yml +19 -0
  103. data/modules/mu/config/vpc.rb +70 -8
  104. data/modules/mu/groomers/chef.rb +2 -3
  105. data/modules/mu/kittens.rb +500 -122
  106. data/modules/mu/master.rb +5 -5
  107. data/modules/mu/mommacat.rb +151 -91
  108. data/modules/tests/super_complex_bok.yml +12 -0
  109. data/modules/tests/super_simple_bok.yml +12 -0
  110. data/spec/mu/clouds/azure_spec.rb +82 -0
  111. data/spec/spec_helper.rb +105 -0
  112. metadata +26 -5
  113. data/modules/mu/clouds/aws/notification.rb +0 -139
  114. data/modules/mu/config/notification.rb +0 -44
@@ -118,10 +118,10 @@ module MU
118
118
  end
119
119
 
120
120
  MU.log "Creating CloudFormation stack '#{@config['name']}'", details: stack_descriptor
121
- res = MU::Cloud::AWS.cloudformation(region).create_stack(stack_descriptor);
121
+ res = MU::Cloud::AWS.cloudformation(region: region, credentials: @config['credentials']).create_stack(stack_descriptor);
122
122
 
123
123
  sleep(10);
124
- stack_response = MU::Cloud::AWS.cloudformation(region).describe_stacks({:stack_name => stack_name}).stacks.first
124
+ stack_response = MU::Cloud::AWS.cloudformation(region: region, credentials: @config['credentials']).describe_stacks({:stack_name => stack_name}).stacks.first
125
125
  attempts = 0
126
126
  begin
127
127
  if attempts % 5 == 0
@@ -129,7 +129,7 @@ module MU
129
129
  else
130
130
  MU.log "Waiting for CloudFormation stack '#{@config['name']}' to be ready...", MU::DEBUG
131
131
  end
132
- stack_response =MU::Cloud::AWS.cloudformation(region).describe_stacks({:stack_name => stack_name}).stacks.first
132
+ stack_response =MU::Cloud::AWS.cloudformation(region: region, credentials: @config['credentials']).describe_stacks({:stack_name => stack_name}).stacks.first
133
133
  sleep 60
134
134
  end while stack_response.stack_status == "CREATE_IN_PROGRESS"
135
135
 
@@ -145,14 +145,14 @@ module MU
145
145
  end
146
146
 
147
147
  if flag == "FAIL" then
148
- stack_response = MU::Cloud::AWS.cloudformation(region).delete_stack({:stack_name => stack_name})
148
+ stack_response = MU::Cloud::AWS.cloudformation(region: region, credentials: @config['credentials']).delete_stack({:stack_name => stack_name})
149
149
  exit 1
150
150
  end
151
151
 
152
152
  MU.log "CloudFormation stack '#{@config['name']}' complete"
153
153
 
154
154
  begin
155
- resources = MU::Cloud::AWS.cloudformation(region).describe_stack_resources(:stack_name => stack_name)
155
+ resources = MU::Cloud::AWS.cloudformation(region: region, credentials: @config['credentials']).describe_stack_resources(:stack_name => stack_name)
156
156
 
157
157
  resources[:stack_resources].each { |resource|
158
158
 
@@ -160,7 +160,7 @@ module MU
160
160
  when "AWS::EC2::Instance"
161
161
  MU::MommaCat.createStandardTags(resource.physical_resource_id)
162
162
  instance_name = MU.deploy_id+"-"+@config['name']+"-"+resource.logical_resource_id
163
- MU::MommaCat.createTag(resource.physical_resource_id, "Name", instance_name)
163
+ MU::MommaCat.createTag(resource.physical_resource_id, "Name", instance_name, credentials: @config['credentials'])
164
164
 
165
165
  instance = MU::Cloud::AWS::Server.notifyDeploy(
166
166
  @config['name']+"-"+resource.logical_resource_id,
@@ -187,14 +187,14 @@ module MU
187
187
 
188
188
  when "AWS::EC2::SecurityGroup"
189
189
  MU::MommaCat.createStandardTags(resource.physical_resource_id)
190
- MU::MommaCat.createTag(resource.physical_resource_id, "Name", MU.deploy_id+"-"+@config['name']+'-'+resource.logical_resource_id)
190
+ MU::MommaCat.createTag(resource.physical_resource_id, "Name", MU.deploy_id+"-"+@config['name']+'-'+resource.logical_resource_id, credentials: @config['credentials'])
191
191
  MU::Cloud::AWS::FirewallRule.notifyDeploy(
192
192
  @config['name']+"-"+resource.logical_resource_id,
193
193
  resource.physical_resource_id
194
194
  )
195
195
  when "AWS::EC2::Subnet"
196
196
  MU::MommaCat.createStandardTags(resource.physical_resource_id)
197
- MU::MommaCat.createTag(resource.physical_resource_id, "Name", MU.deploy_id+"-"+@config['name']+'-'+resource.logical_resource_id)
197
+ MU::MommaCat.createTag(resource.physical_resource_id, "Name", MU.deploy_id+"-"+@config['name']+'-'+resource.logical_resource_id, credentials: @config['credentials'])
198
198
  data = {
199
199
  "collection" => @config["name"],
200
200
  "subnet_id" => resource.physical_resource_id,
@@ -202,7 +202,7 @@ module MU
202
202
  @deploy.notify("subnets", @config['name']+"-"+resource.logical_resource_id, data)
203
203
  when "AWS::EC2::VPC"
204
204
  MU::MommaCat.createStandardTags(resource.physical_resource_id)
205
- MU::MommaCat.createTag(resource.physical_resource_id, "Name", MU.deploy_id+"-"+@config['name']+'-'+resource.logical_resource_id)
205
+ MU::MommaCat.createTag(resource.physical_resource_id, "Name", MU.deploy_id+"-"+@config['name']+'-'+resource.logical_resource_id, credentials: @config['credentials'])
206
206
  data = {
207
207
  "collection" => @config["name"],
208
208
  "vpc_id" => resource.physical_resource_id,
@@ -210,10 +210,10 @@ module MU
210
210
  @deploy.notify("vpcs", @config['name']+"-"+resource.logical_resource_id, data)
211
211
  when "AWS::EC2::InternetGateway"
212
212
  MU::MommaCat.createStandardTags(resource.physical_resource_id)
213
- MU::MommaCat.createTag(resource.physical_resource_id, "Name", MU.deploy_id+"-"+@config['name']+'-'+resource.logical_resource_id)
213
+ MU::MommaCat.createTag(resource.physical_resource_id, "Name", MU.deploy_id+"-"+@config['name']+'-'+resource.logical_resource_id, credentials: @config['credentials'])
214
214
  when "AWS::EC2::RouteTable"
215
215
  MU::MommaCat.createStandardTags(resource.physical_resource_id)
216
- MU::MommaCat.createTag(resource.physical_resource_id, "Name", MU.deploy_id+"-"+@config['name']+'-'+resource.logical_resource_id)
216
+ MU::MommaCat.createTag(resource.physical_resource_id, "Name", MU.deploy_id+"-"+@config['name']+'-'+resource.logical_resource_id, credentials: @config['credentials'])
217
217
 
218
218
  # The rest of these aren't anything we act on
219
219
  when "AWS::EC2::Route"
@@ -239,15 +239,22 @@ module MU
239
239
  end
240
240
  end
241
241
 
242
+ # Does this resource type exist as a global (cloud-wide) artifact, or
243
+ # is it localized to a region/zone?
244
+ # @return [Boolean]
245
+ def self.isGlobal?
246
+ false
247
+ end
248
+
242
249
  # Remove all CloudFormation stacks associated with the currently loaded deployment.
243
250
  # @param noop [Boolean]: If true, will only print what would be done
244
251
  # @param ignoremaster [Boolean]: If true, will remove resources not flagged as originating from this Mu server
245
252
  # @param region [String]: The cloud provider region
246
253
  # @param wait [Boolean]: Block on the removal of this stack; AWS deletion will continue in the background otherwise if false.
247
254
  # @return [void]
248
- def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, wait: false, flags: {})
255
+ def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, wait: false, credentials: nil, flags: {})
249
256
  # XXX needs to check tags instead of name- possible?
250
- resp = MU::Cloud::AWS.cloudformation(region).describe_stacks
257
+ resp = MU::Cloud::AWS.cloudformation(credentials: credentials, region: region).describe_stacks
251
258
  resp.stacks.each { |stack|
252
259
  ok = false
253
260
  stack.tags.each { |tag|
@@ -257,7 +264,7 @@ module MU
257
264
  MU.log "Deleting CloudFormation stack #{stack.stack_name})"
258
265
  next if noop
259
266
  if stack.stack_status != "DELETE_IN_PROGRESS"
260
- MU::Cloud::AWS.cloudformation(region).delete_stack(stack_name: stack.stack_name)
267
+ MU::Cloud::AWS.cloudformation(credentials: credentials, region: region).delete_stack(stack_name: stack.stack_name)
261
268
  end
262
269
  if wait
263
270
  last_status = ""
@@ -268,7 +275,7 @@ module MU
268
275
  mystack = nil
269
276
  sleep 30
270
277
  retries = retries + 1
271
- desc = MU::Cloud::AWS.cloudformation(region).describe_stacks(stack_name: stack.stack_name)
278
+ desc = MU::Cloud::AWS.cloudformation(credentials: credentials, region: region).describe_stacks(stack_name: stack.stack_name)
272
279
  if desc.size > 0
273
280
  mystack = desc.first.stacks.first
274
281
  if mystack.size > 0 and mystack.stack_status == "DELETE_FAILED"
@@ -300,9 +307,9 @@ module MU
300
307
  end
301
308
 
302
309
  # placeholder
303
- def self.find(cloud_id: nil)
310
+ def self.find(cloud_id: nil, region: MU.myRegion, credentials: nil)
304
311
  found = nil
305
- resp = MU::Cloud::AWS::Collection.describe_stacks(
312
+ resp = MU::Cloud::AWS.cloudformation(region: region, credentials: credentials).describe_stacks(
306
313
  stack_name: cloud_id
307
314
  )
308
315
  if resp and resp.stacks
@@ -355,7 +362,7 @@ module MU
355
362
  region = stack['region']
356
363
  stack_name = getStackName(stack)
357
364
  begin
358
- resources = MU::Cloud::AWS.cloudformation(region).describe_stack_resources(:stack_name => stack_name)
365
+ resources = MU::Cloud::AWS.cloudformation(region: region).describe_stack_resources(:stack_name => stack_name)
359
366
 
360
367
  MU.log "CloudFormation stack #{stack_name} failed", MU::ERR
361
368
 
@@ -40,29 +40,6 @@ module MU
40
40
  @mu_name ||= @deploy.getResourceName(@config["name"])
41
41
  end
42
42
 
43
- # Generate the generic EKS Kubernetes admin role for use with
44
- # aws-iam-authenticator. Management nodes need this. We do it to
45
- # our Mu Master.
46
- # TODO Maybe we can convert this to BoK-speak and get this out of
47
- # here?
48
- def self.createK8SAdminRole(rolename)
49
- resp = MU::Cloud::AWS.iam.create_role(
50
- role_name: rolename,
51
- assume_role_policy_document: '{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":"arn:aws:iam::'+MU.account_number+':root"},"Action":"sts:AssumeRole","Condition":{}}]}'
52
- )
53
- arn = resp.role.arn
54
- MU.log "Created EKS Kubernetes admin role #{rolename}"
55
- begin
56
- MU::Cloud::AWS.iam.get_role(role_name: rolename)
57
- rescue Aws::IAM::Errors::NoSuchEntity => e
58
- MU.log e.inspect, MU::WARN
59
- sleep 10
60
- retry
61
- end
62
- arn
63
- end
64
-
65
-
66
43
  # Called automatically by {MU::Deploy#createResources}
67
44
  def create
68
45
  if @config['flavor'] == "EKS"
@@ -85,7 +62,7 @@ module MU
85
62
  resp = nil
86
63
  begin
87
64
  MU.log "Creating EKS cluster #{@mu_name}"
88
- resp = MU::Cloud::AWS.eks(@config['region']).create_cluster(
65
+ resp = MU::Cloud::AWS.eks(region: @config['region'], credentials: @config['credentials']).create_cluster(
89
66
  name: @mu_name,
90
67
  version: @config['kubernetes']['version'],
91
68
  role_arn: role_arn,
@@ -126,7 +103,7 @@ module MU
126
103
  status = nil
127
104
  retries = 0
128
105
  begin
129
- resp = MU::Cloud::AWS.eks(@config['region']).describe_cluster(
106
+ resp = MU::Cloud::AWS.eks(region: @config['region'], credentials: @config['credentials']).describe_cluster(
130
107
  name: @mu_name
131
108
  )
132
109
  status = resp.cluster.status
@@ -150,7 +127,7 @@ module MU
150
127
 
151
128
  MU.log "Creation of EKS cluster #{@mu_name} complete"
152
129
  else
153
- MU::Cloud::AWS.ecs(@config['region']).create_cluster(
130
+ MU::Cloud::AWS.ecs(region: @config['region'], credentials: @config['credentials']).create_cluster(
154
131
  cluster_name: @mu_name
155
132
  )
156
133
  end
@@ -159,7 +136,8 @@ module MU
159
136
 
160
137
  # Called automatically by {MU::Deploy#createResources}
161
138
  def groom
162
- serverpool = @deploy.findLitterMate(type: "server_pools", name: @config["name"]+"-"+@config["flavor"].downcase)
139
+
140
+ serverpool = @deploy.findLitterMate(type: "server_pools", name: @config["name"]+"workers")
163
141
  resource_lookup = MU::Cloud::AWS.listInstanceTypes(@config['region'])[@config['region']]
164
142
 
165
143
  if @config['kubernetes']
@@ -171,21 +149,21 @@ module MU
171
149
  tagme << s.cloud_id
172
150
  tagme_elb << s.cloud_id if !s.private?
173
151
  }
174
- rtbs = MU::Cloud::AWS.ec2(@config['region']).describe_route_tables(
152
+ rtbs = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_route_tables(
175
153
  filters: [ { name: "vpc-id", values: [@vpc.cloud_id] } ]
176
154
  ).route_tables
177
155
  tagme.concat(rtbs.map { |r| r.route_table_id } )
178
- main_sg = @deploy.findLitterMate(type: "firewall_rules", name: "server_pool#{@config['name']}-workers")
156
+ main_sg = @deploy.findLitterMate(type: "firewall_rules", name: "server_pool#{@config['name']}workers")
179
157
  tagme << main_sg.cloud_id
180
158
  MU.log "Applying kubernetes.io tags to VPC resources", details: tagme
181
- MU::Cloud::AWS.createTag("kubernetes.io/cluster/#{@mu_name}", "shared", tagme)
182
- MU::Cloud::AWS.createTag("kubernetes.io/cluster/elb", @mu_name, tagme_elb)
159
+ MU::Cloud::AWS.createTag("kubernetes.io/cluster/#{@mu_name}", "shared", tagme, credentials: @config['credentials'])
160
+ MU::Cloud::AWS.createTag("kubernetes.io/cluster/elb", @mu_name, tagme_elb, credentials: @config['credentials'])
183
161
 
184
162
  me = cloud_desc
185
163
  @endpoint = me.endpoint
186
164
  @cacert = me.certificate_authority.data
187
165
  @cluster = @mu_name
188
- resp = MU::Cloud::AWS.iam.get_role(role_name: @mu_name+"-WORKERS")
166
+ resp = MU::Cloud::AWS.iam(credentials: @config['credentials']).get_role(role_name: @mu_name+"WORKERS")
189
167
  @worker_role_arn = resp.role.arn
190
168
  kube_conf = @deploy.deploy_dir+"/kubeconfig-#{@config['name']}"
191
169
  eks_auth = @deploy.deploy_dir+"/eks-auth-cm-#{@config['name']}.yaml"
@@ -237,7 +215,7 @@ module MU
237
215
 
238
216
  MU.log %Q{How to interact with your Kubernetes cluster\nkubectl --kubeconfig "#{kube_conf}" get all\nkubectl --kubeconfig "#{kube_conf}" create -f some_k8s_deploy.yml}, MU::SUMMARY
239
217
  else
240
- resp = MU::Cloud::AWS.ecs(@config['region']).list_container_instances({
218
+ resp = MU::Cloud::AWS.ecs(region: @config['region'], credentials: @config['credentials']).list_container_instances({
241
219
  cluster: @mu_name
242
220
  })
243
221
  existing = {}
@@ -247,7 +225,7 @@ module MU
247
225
  uuids << arn.sub(/^.*?:container-instance\//, "")
248
226
  }
249
227
  if uuids.size > 0
250
- resp = MU::Cloud::AWS.ecs(@config['region']).describe_container_instances({
228
+ resp = MU::Cloud::AWS.ecs(region: @config['region'], credentials: @config['credentials']).describe_container_instances({
251
229
  cluster: @mu_name,
252
230
  container_instances: uuids
253
231
  })
@@ -298,7 +276,7 @@ module MU
298
276
  params[:container_instance_arn] = existing[node.cloud_id].container_instance_arn
299
277
  MU.log "Updating ECS instance #{node} in cluster #{@mu_name}", MU::NOTICE, details: params
300
278
  end
301
- MU::Cloud::AWS.ecs(@config['region']).register_container_instance(params)
279
+ MU::Cloud::AWS.ecs(region: @config['region'], credentials: @config['credentials']).register_container_instance(params)
302
280
 
303
281
  }
304
282
  }
@@ -310,12 +288,12 @@ module MU
310
288
  # @return [OpenStruct]
311
289
  def cloud_desc
312
290
  if @config['flavor'] == "EKS"
313
- resp = MU::Cloud::AWS.eks(@config['region']).describe_cluster(
291
+ resp = MU::Cloud::AWS.eks(region: @config['region'], credentials: @config['credentials']).describe_cluster(
314
292
  name: @mu_name
315
293
  )
316
294
  resp.cluster
317
295
  else
318
- resp = MU::Cloud::AWS.ecs(@config['region']).describe_clusters(
296
+ resp = MU::Cloud::AWS.ecs(region: @config['region'], credentials: @config['credentials']).describe_clusters(
319
297
  clusters: [@mu_name]
320
298
  )
321
299
  resp.clusters.first
@@ -349,7 +327,7 @@ module MU
349
327
  # @param flavor [String]: ECS or EKS
350
328
  def self.getECSImageId(flavor = "ECS", region = MU.myRegion)
351
329
  if flavor == "ECS"
352
- resp = MU::Cloud::AWS.ssm(region).get_parameters(
330
+ resp = MU::Cloud::AWS.ssm(region: region).get_parameters(
353
331
  names: ["/aws/service/#{flavor.downcase}/optimized-ami/amazon-linux/recommended"]
354
332
  )
355
333
  if resp and resp.parameters and resp.parameters.size > 0
@@ -368,7 +346,7 @@ module MU
368
346
  # Use the AWS SSM API to fetch the current version of the Amazon Linux
369
347
  # EKS-optimized AMI, so we can use it as a default AMI for EKS deploys.
370
348
  def self.getEKSImageId(region = MU.myRegion)
371
- resp = MU::Cloud::AWS.ssm(region).get_parameters(
349
+ resp = MU::Cloud::AWS.ssm(region: region).get_parameters(
372
350
  names: ["/aws/service/ekss/optimized-ami/amazon-linux/recommended"]
373
351
  )
374
352
  if resp and resp.parameters and resp.parameters.size > 0
@@ -378,19 +356,26 @@ module MU
378
356
  nil
379
357
  end
380
358
 
359
+ # Does this resource type exist as a global (cloud-wide) artifact, or
360
+ # is it localized to a region/zone?
361
+ # @return [Boolean]
362
+ def self.isGlobal?
363
+ false
364
+ end
365
+
381
366
  # Remove all container_clusters associated with the currently loaded deployment.
382
367
  # @param noop [Boolean]: If true, will only print what would be done
383
368
  # @param ignoremaster [Boolean]: If true, will remove resources not flagged as originating from this Mu server
384
369
  # @param region [String]: The cloud provider region
385
370
  # @return [void]
386
- def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, flags: {})
387
- resp = MU::Cloud::AWS.ecs(region).list_clusters
371
+ def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {})
372
+ resp = MU::Cloud::AWS.ecs(credentials: credentials, region: region).list_clusters
388
373
 
389
374
  if resp and resp.cluster_arns and resp.cluster_arns.size > 0
390
375
  resp.cluster_arns.each { |arn|
391
376
  if arn.match(/:cluster\/(#{MU.deploy_id}[^:]+)$/)
392
377
  cluster = Regexp.last_match[1]
393
- instances = MU::Cloud::AWS.ecs(region).list_container_instances({
378
+ instances = MU::Cloud::AWS.ecs(credentials: credentials, region: region).list_container_instances({
394
379
  cluster: cluster
395
380
  })
396
381
  if instances
@@ -398,7 +383,7 @@ module MU
398
383
  uuid = arn.sub(/^.*?:container-instance\//, "")
399
384
  MU.log "Deregistering instance #{uuid} from ECS Cluster #{cluster}"
400
385
  if !noop
401
- resp = MU::Cloud::AWS.ecs(region).deregister_container_instance({
386
+ resp = MU::Cloud::AWS.ecs(credentials: credentials, region: region).deregister_container_instance({
402
387
  cluster: cluster,
403
388
  container_instance: uuid,
404
389
  force: true,
@@ -409,7 +394,7 @@ module MU
409
394
  MU.log "Deleting ECS Cluster #{cluster}"
410
395
  if !noop
411
396
  # TODO de-register container instances
412
- deletion = MU::Cloud::AWS.ecs(region).delete_cluster(
397
+ deletion = MU::Cloud::AWS.ecs(credentials: credentials, region: region).delete_cluster(
413
398
  cluster: cluster
414
399
  )
415
400
  end
@@ -419,25 +404,25 @@ module MU
419
404
  return if !MU::Cloud::AWS::ContainerCluster.EKSRegions.include?(region)
420
405
 
421
406
 
422
- resp = MU::Cloud::AWS.eks(region).list_clusters
407
+ resp = MU::Cloud::AWS.eks(credentials: credentials, region: region).list_clusters
423
408
 
424
409
  if resp and resp.clusters
425
410
  resp.clusters.each { |cluster|
426
411
  if cluster.match(/^#{MU.deploy_id}-/)
427
412
 
428
- desc = MU::Cloud::AWS.eks(region).describe_cluster(
413
+ desc = MU::Cloud::AWS.eks(credentials: credentials, region: region).describe_cluster(
429
414
  name: cluster
430
415
  ).cluster
431
416
 
432
417
  untag = []
433
418
  untag << desc.resources_vpc_config.vpc_id
434
- subnets = MU::Cloud::AWS.ec2(region).describe_subnets(
419
+ subnets = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_subnets(
435
420
  filters: [ { name: "vpc-id", values: [desc.resources_vpc_config.vpc_id] } ]
436
421
  ).subnets
437
422
 
438
423
  # subnets
439
424
  untag.concat(subnets.map { |s| s.subnet_id } )
440
- rtbs = MU::Cloud::AWS.ec2(region).describe_route_tables(
425
+ rtbs = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_route_tables(
441
426
  filters: [ { name: "vpc-id", values: [desc.resources_vpc_config.vpc_id] } ]
442
427
  ).route_tables
443
428
  untag.concat(rtbs.map { |r| r.route_table_id } )
@@ -450,14 +435,14 @@ module MU
450
435
  end
451
436
  MU.log "Deleting EKS Cluster #{cluster}"
452
437
  if !noop
453
- MU::Cloud::AWS.eks(region).delete_cluster(
438
+ MU::Cloud::AWS.eks(credentials: credentials, region: region).delete_cluster(
454
439
  name: cluster
455
440
  )
456
441
  begin
457
442
  status = nil
458
443
  retries = 0
459
444
  begin
460
- deletion = MU::Cloud::AWS.eks(region).describe_cluster(
445
+ deletion = MU::Cloud::AWS.eks(credentials: credentials, region: region).describe_cluster(
461
446
  name: cluster
462
447
  )
463
448
  status = deletion.cluster.status
@@ -482,12 +467,12 @@ module MU
482
467
  # @param region [String]: The cloud provider region.
483
468
  # @param flags [Hash]: Optional flags
484
469
  # @return [OpenStruct]: The cloud provider's complete descriptions of matching container_clusters.
485
- def self.find(cloud_id: nil, region: MU.curRegion, flags: {})
470
+ def self.find(cloud_id: nil, region: MU.curRegion, credentials: nil, flags: {})
486
471
  MU.log cloud_id, MU::WARN, details: flags
487
472
  MU.log region, MU::WARN
488
- resp = MU::Cloud::AWS.ecs(region).list_clusters
489
- resp = MU::Cloud::AWS.eks(region).list_clusters
490
- exit
473
+ resp = MU::Cloud::AWS.ecs(region: region, credentials: credentials).list_clusters
474
+ resp = MU::Cloud::AWS.eks(region: region, credentials: credentials).list_clusters
475
+ # XXX uh, this ain't complete
491
476
  end
492
477
 
493
478
  # Cloud-specific configuration properties.
@@ -545,6 +530,18 @@ module MU
545
530
  ok = false
546
531
  end
547
532
 
533
+ if cluster["flavor"] == "EKS" and !cluster["vpc"]
534
+ if !MU::Cloud::AWS.hosted?
535
+ MU.log "EKS cluster #{cluster['name']} must declare a VPC", MU::ERR
536
+ ok = false
537
+ else
538
+ cluster["vpc"] = {
539
+ "vpc_id" => MU.myVPC,
540
+ "subnet_pref" => "all_private"
541
+ }
542
+ end
543
+ end
544
+
548
545
  if ["ECS", "EKS"].include?(cluster["flavor"])
549
546
  std_ami = getECSImageId(cluster["flavor"], cluster['region'])
550
547
  cluster["host_image"] ||= std_ami
@@ -564,30 +561,31 @@ module MU
564
561
  if ["ECS", "EKS"].include?(cluster["flavor"])
565
562
 
566
563
  worker_pool = {
567
- "name" => cluster["name"]+"-workers",
564
+ "name" => cluster["name"]+"workers",
565
+ "credentials" => cluster["credentials"],
568
566
  "region" => cluster['region'],
569
567
  "min_size" => cluster["instance_count"],
570
568
  "max_size" => cluster["instance_count"],
571
569
  "wait_for_nodes" => cluster["instance_count"],
572
570
  "ssh_user" => cluster["host_ssh_user"],
573
- "ingress_rules" => [
574
- "sgs" => ["container_cluster#{cluster['name']}"],
575
- "port_range" => "1-65535"
576
- ],
577
571
  "basis" => {
578
572
  "launch_config" => {
579
- "name" => cluster["name"]+"-workers",
573
+ "name" => cluster["name"]+"workers",
580
574
  "size" => cluster["instance_type"]
581
575
  }
582
576
  }
583
577
  }
578
+ if cluster["flavor"] == "EKS"
579
+ worker_pool["ingress_rules"] = [
580
+ "sgs" => ["container_cluster#{cluster['name']}"],
581
+ "port_range" => "1-65535"
582
+ ]
583
+ end
584
584
  if cluster["vpc"]
585
585
  worker_pool["vpc"] = cluster["vpc"].dup
586
586
  worker_pool["vpc"]["subnet_pref"] = cluster["instance_subnet_pref"]
587
587
  worker_pool["vpc"].delete("subnets")
588
588
  end
589
- if cluster["flavor"] == "EKS"
590
- end
591
589
  if cluster["host_image"]
592
590
  worker_pool["basis"]["launch_config"]["image_id"] = cluster["host_image"]
593
591
  end
@@ -618,17 +616,24 @@ module MU
618
616
 
619
617
  if cluster["flavor"] == "ECS"
620
618
  cluster["dependencies"] << {
621
- "name" => cluster["name"]+"-workers",
619
+ "name" => cluster["name"]+"workers",
622
620
  "type" => "server_pool",
623
621
  }
624
622
  elsif cluster["flavor"] == "EKS"
625
623
  cluster['ingress_rules'] ||= []
626
624
  cluster['ingress_rules'] << {
627
- "sgs" => ["server_pool#{cluster['name']}-workers"],
625
+ "sgs" => ["server_pool#{cluster['name']}workers"],
628
626
  "port" => 443
629
627
  }
630
628
  fwname = "container_cluster#{cluster['name']}"
631
- acl = {"name" => fwname, "rules" => cluster['ingress_rules'], "region" => cluster['region'], "optional_tags" => cluster['optional_tags'] }
629
+
630
+ acl = {
631
+ "name" => fwname,
632
+ "credentials" => cluster["credentials"],
633
+ "rules" => cluster['ingress_rules'],
634
+ "region" => cluster['region'],
635
+ "optional_tags" => cluster['optional_tags']
636
+ }
632
637
  acl["tags"] = cluster['tags'] if cluster['tags'] && !cluster['tags'].empty?
633
638
  acl["vpc"] = cluster['vpc'].dup if cluster['vpc']
634
639
 
@@ -642,6 +647,7 @@ module MU
642
647
 
643
648
  role = {
644
649
  "name" => cluster["name"]+"controlplane",
650
+ "credentials" => cluster["credentials"],
645
651
  "can_assume" => [
646
652
  { "entity_id" => "eks.amazonaws.com", "entity_type" => "service" }
647
653
  ],
@@ -653,7 +659,8 @@ module MU
653
659
  configurator.insertKitten(role, "roles")
654
660
  cluster['dependencies'] << {
655
661
  "type" => "role",
656
- "name" => cluster["name"]+"controlplane"
662
+ "name" => cluster["name"]+"controlplane",
663
+ "phase" => "groom"
657
664
  }
658
665
  end
659
666
  end