cloud-mu 1.9.0.pre.beta → 2.0.0.pre.alpha

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (114) hide show
  1. checksums.yaml +4 -4
  2. data/Berksfile +16 -54
  3. data/Berksfile.lock +14 -62
  4. data/bin/mu-aws-setup +131 -108
  5. data/bin/mu-configure +311 -74
  6. data/bin/mu-gcp-setup +84 -62
  7. data/bin/mu-load-config.rb +46 -2
  8. data/bin/mu-self-update +11 -9
  9. data/bin/mu-upload-chef-artifacts +4 -4
  10. data/{mu.gemspec → cloud-mu.gemspec} +2 -2
  11. data/cookbooks/awscli/Berksfile +8 -0
  12. data/cookbooks/mu-activedirectory/Berksfile +11 -0
  13. data/cookbooks/mu-firewall/Berksfile +9 -0
  14. data/cookbooks/mu-firewall/metadata.rb +1 -1
  15. data/cookbooks/mu-glusterfs/Berksfile +10 -0
  16. data/cookbooks/mu-jenkins/Berksfile +14 -0
  17. data/cookbooks/mu-master/Berksfile +23 -0
  18. data/cookbooks/mu-master/attributes/default.rb +1 -1
  19. data/cookbooks/mu-master/metadata.rb +2 -2
  20. data/cookbooks/mu-master/recipes/default.rb +1 -1
  21. data/cookbooks/mu-master/recipes/init.rb +7 -3
  22. data/cookbooks/mu-master/recipes/ssl-certs.rb +1 -0
  23. data/cookbooks/mu-mongo/Berksfile +10 -0
  24. data/cookbooks/mu-openvpn/Berksfile +11 -0
  25. data/cookbooks/mu-php54/Berksfile +13 -0
  26. data/cookbooks/mu-splunk/Berksfile +10 -0
  27. data/cookbooks/mu-tools/Berksfile +21 -0
  28. data/cookbooks/mu-tools/files/default/Mu_CA.pem +15 -15
  29. data/cookbooks/mu-utility/Berksfile +9 -0
  30. data/cookbooks/mu-utility/metadata.rb +2 -1
  31. data/cookbooks/nagios/Berksfile +7 -4
  32. data/cookbooks/s3fs/Berksfile +9 -0
  33. data/environments/dev.json +6 -6
  34. data/environments/prod.json +6 -6
  35. data/modules/mu.rb +20 -42
  36. data/modules/mu/cleanup.rb +102 -100
  37. data/modules/mu/cloud.rb +90 -28
  38. data/modules/mu/clouds/aws.rb +449 -218
  39. data/modules/mu/clouds/aws/alarm.rb +29 -17
  40. data/modules/mu/clouds/aws/cache_cluster.rb +78 -64
  41. data/modules/mu/clouds/aws/collection.rb +25 -18
  42. data/modules/mu/clouds/aws/container_cluster.rb +73 -66
  43. data/modules/mu/clouds/aws/database.rb +124 -116
  44. data/modules/mu/clouds/aws/dnszone.rb +27 -20
  45. data/modules/mu/clouds/aws/firewall_rule.rb +30 -22
  46. data/modules/mu/clouds/aws/folder.rb +18 -3
  47. data/modules/mu/clouds/aws/function.rb +77 -23
  48. data/modules/mu/clouds/aws/group.rb +19 -12
  49. data/modules/mu/clouds/aws/habitat.rb +153 -0
  50. data/modules/mu/clouds/aws/loadbalancer.rb +59 -52
  51. data/modules/mu/clouds/aws/log.rb +30 -23
  52. data/modules/mu/clouds/aws/msg_queue.rb +29 -20
  53. data/modules/mu/clouds/aws/notifier.rb +222 -0
  54. data/modules/mu/clouds/aws/role.rb +178 -90
  55. data/modules/mu/clouds/aws/search_domain.rb +40 -24
  56. data/modules/mu/clouds/aws/server.rb +169 -137
  57. data/modules/mu/clouds/aws/server_pool.rb +60 -83
  58. data/modules/mu/clouds/aws/storage_pool.rb +59 -31
  59. data/modules/mu/clouds/aws/user.rb +36 -27
  60. data/modules/mu/clouds/aws/userdata/linux.erb +101 -93
  61. data/modules/mu/clouds/aws/vpc.rb +250 -189
  62. data/modules/mu/clouds/azure.rb +132 -0
  63. data/modules/mu/clouds/cloudformation.rb +65 -1
  64. data/modules/mu/clouds/cloudformation/alarm.rb +8 -0
  65. data/modules/mu/clouds/cloudformation/cache_cluster.rb +7 -0
  66. data/modules/mu/clouds/cloudformation/collection.rb +7 -0
  67. data/modules/mu/clouds/cloudformation/database.rb +7 -0
  68. data/modules/mu/clouds/cloudformation/dnszone.rb +7 -0
  69. data/modules/mu/clouds/cloudformation/firewall_rule.rb +9 -2
  70. data/modules/mu/clouds/cloudformation/loadbalancer.rb +7 -0
  71. data/modules/mu/clouds/cloudformation/log.rb +7 -0
  72. data/modules/mu/clouds/cloudformation/server.rb +7 -0
  73. data/modules/mu/clouds/cloudformation/server_pool.rb +7 -0
  74. data/modules/mu/clouds/cloudformation/vpc.rb +7 -0
  75. data/modules/mu/clouds/google.rb +214 -110
  76. data/modules/mu/clouds/google/container_cluster.rb +42 -24
  77. data/modules/mu/clouds/google/database.rb +15 -6
  78. data/modules/mu/clouds/google/firewall_rule.rb +17 -25
  79. data/modules/mu/clouds/google/group.rb +13 -5
  80. data/modules/mu/clouds/google/habitat.rb +105 -0
  81. data/modules/mu/clouds/google/loadbalancer.rb +28 -20
  82. data/modules/mu/clouds/google/server.rb +93 -354
  83. data/modules/mu/clouds/google/server_pool.rb +18 -10
  84. data/modules/mu/clouds/google/user.rb +22 -14
  85. data/modules/mu/clouds/google/vpc.rb +97 -69
  86. data/modules/mu/config.rb +133 -38
  87. data/modules/mu/config/alarm.rb +25 -0
  88. data/modules/mu/config/cache_cluster.rb +5 -3
  89. data/modules/mu/config/cache_cluster.yml +23 -0
  90. data/modules/mu/config/database.rb +25 -16
  91. data/modules/mu/config/database.yml +3 -3
  92. data/modules/mu/config/function.rb +1 -2
  93. data/modules/mu/config/{project.rb → habitat.rb} +10 -10
  94. data/modules/mu/config/notifier.rb +85 -0
  95. data/modules/mu/config/notifier.yml +9 -0
  96. data/modules/mu/config/role.rb +1 -1
  97. data/modules/mu/config/search_domain.yml +2 -2
  98. data/modules/mu/config/server.rb +13 -1
  99. data/modules/mu/config/server.yml +3 -3
  100. data/modules/mu/config/server_pool.rb +3 -1
  101. data/modules/mu/config/storage_pool.rb +3 -1
  102. data/modules/mu/config/storage_pool.yml +19 -0
  103. data/modules/mu/config/vpc.rb +70 -8
  104. data/modules/mu/groomers/chef.rb +2 -3
  105. data/modules/mu/kittens.rb +500 -122
  106. data/modules/mu/master.rb +5 -5
  107. data/modules/mu/mommacat.rb +151 -91
  108. data/modules/tests/super_complex_bok.yml +12 -0
  109. data/modules/tests/super_simple_bok.yml +12 -0
  110. data/spec/mu/clouds/azure_spec.rb +82 -0
  111. data/spec/spec_helper.rb +105 -0
  112. metadata +26 -5
  113. data/modules/mu/clouds/aws/notification.rb +0 -139
  114. data/modules/mu/config/notification.rb +0 -44
@@ -42,7 +42,8 @@ module MU
42
42
  params = genParams
43
43
 
44
44
  MU.log "Creating ElasticSearch domain #{@config['domain_name']}", details: params
45
- resp = MU::Cloud::AWS.elasticsearch(@config['region']).create_elasticsearch_domain(params).domain_status
45
+ pp params
46
+ resp = MU::Cloud::AWS.elasticsearch(region: @config['region'], credentials: @config['credentials']).create_elasticsearch_domain(params).domain_status
46
47
 
47
48
  tagDomain
48
49
 
@@ -58,7 +59,7 @@ module MU
58
59
  waitWhileProcessing # wait until the create finishes, if still going
59
60
 
60
61
  MU.log "Updating ElasticSearch domain #{@config['domain_name']}", MU::NOTICE, details: params
61
- MU::Cloud::AWS.elasticsearch(@config['region']).update_elasticsearch_domain_config(params)
62
+ MU::Cloud::AWS.elasticsearch(region: @config['region'], credentials: @config['credentials']).update_elasticsearch_domain_config(params)
62
63
  end
63
64
 
64
65
  waitWhileProcessing # don't return until creation/updating is complete
@@ -69,11 +70,11 @@ module MU
69
70
  # our druthers.
70
71
  def cloud_desc
71
72
  if @config['domain_name']
72
- MU::Cloud::AWS.elasticsearch(@config['region']).describe_elasticsearch_domain(
73
+ MU::Cloud::AWS.elasticsearch(region: @config['region'], credentials: @config['credentials']).describe_elasticsearch_domain(
73
74
  domain_name: @config['domain_name']
74
75
  ).domain_status
75
- elsif @deploydata['domain_name']
76
- MU::Cloud::AWS.elasticsearch(@config['region']).describe_elasticsearch_domain(
76
+ elsif @deploydata and @deploydata['domain_name']
77
+ MU::Cloud::AWS.elasticsearch(region: @config['region'], credentials: @config['credentials']).describe_elasticsearch_domain(
77
78
  domain_name: @deploydata['domain_name']
78
79
  ).domain_status
79
80
  else
@@ -91,7 +92,7 @@ module MU
91
92
  # @return [Hash]
92
93
  def notify
93
94
  deploy_struct = MU.structToHash(cloud_desc)
94
- tags = MU::Cloud::AWS.elasticsearch(@config['region']).list_tags(arn: deploy_struct[:arn]).tag_list
95
+ tags = MU::Cloud::AWS.elasticsearch(region: @config['region'], credentials: @config['credentials']).list_tags(arn: deploy_struct[:arn]).tag_list
95
96
  deploy_struct['tags'] = tags.map { |t| { t.key => t.value } }
96
97
  if deploy_struct['endpoint']
97
98
  deploy_struct['kibana'] = deploy_struct['endpoint']+"/_plugin/kibana/"
@@ -100,23 +101,30 @@ module MU
100
101
  deploy_struct
101
102
  end
102
103
 
104
+ # Does this resource type exist as a global (cloud-wide) artifact, or
105
+ # is it localized to a region/zone?
106
+ # @return [Boolean]
107
+ def self.isGlobal?
108
+ false
109
+ end
110
+
103
111
  # Remove all search_domains associated with the currently loaded deployment.
104
112
  # @param noop [Boolean]: If true, will only print what would be done
105
113
  # @param ignoremaster [Boolean]: If true, will remove resources not flagged as originating from this Mu server
106
114
  # @param region [String]: The cloud provider region
107
115
  # @return [void]
108
- def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, flags: {})
109
- list = MU::Cloud::AWS.elasticsearch(region).list_domain_names
116
+ def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {})
117
+ list = MU::Cloud::AWS.elasticsearch(region: region).list_domain_names
110
118
  if list and list.domain_names and list.domain_names.size > 0
111
- descs = MU::Cloud::AWS.elasticsearch(region).describe_elasticsearch_domains(domain_names: list.domain_names.map { |d| d.domain_name } )
119
+ descs = MU::Cloud::AWS.elasticsearch(region: region).describe_elasticsearch_domains(domain_names: list.domain_names.map { |d| d.domain_name } )
112
120
 
113
121
  descs.domain_status_list.each { |domain|
114
- tags = MU::Cloud::AWS.elasticsearch(region).list_tags(arn: domain.arn)
122
+ tags = MU::Cloud::AWS.elasticsearch(region: region).list_tags(arn: domain.arn)
115
123
  tags.tag_list.each { |tag|
116
124
  if tag.key == "MU-ID" and tag.value == MU.deploy_id
117
125
  MU.log "Deleting ElasticSearch Domain #{domain.domain_name}"
118
126
  if !noop
119
- MU::Cloud::AWS.elasticsearch(region).delete_elasticsearch_domain(domain_name: domain.domain_name)
127
+ MU::Cloud::AWS.elasticsearch(region: region).delete_elasticsearch_domain(domain_name: domain.domain_name)
120
128
  end
121
129
  break
122
130
  end
@@ -142,14 +150,14 @@ module MU
142
150
  # @param region [String]: The cloud provider region.
143
151
  # @param flags [Hash]: Optional flags
144
152
  # @return [OpenStruct]: The cloud provider's complete descriptions of matching search_domain.
145
- def self.find(cloud_id: nil, region: MU.curRegion, flags: {})
153
+ def self.find(cloud_id: nil, region: MU.curRegion, credentials: nil, flags: {})
146
154
  if cloud_id
147
155
  # Annoyingly, we might expect one of several possible artifacts,
148
156
  # since AWS couldn't decide what the real identifier of these
149
157
  # things should be
150
- list = MU::Cloud::AWS.elasticsearch(region).list_domain_names
158
+ list = MU::Cloud::AWS.elasticsearch(region: region, credentials: credentials).list_domain_names
151
159
  if list and list.domain_names and list.domain_names.size > 0
152
- descs = MU::Cloud::AWS.elasticsearch(region).describe_elasticsearch_domains(domain_names: list.domain_names.map { |d| d.domain_name } )
160
+ descs = MU::Cloud::AWS.elasticsearch(region: region, credentials: credentials).describe_elasticsearch_domains(domain_names: list.domain_names.map { |d| d.domain_name } )
153
161
  descs.domain_status_list.each { |domain|
154
162
  return domain if domain.arn == cloud_id
155
163
  return domain if domain.domain_name == cloud_id
@@ -180,6 +188,10 @@ module MU
180
188
  end
181
189
 
182
190
  schema = {
191
+ "name" => {
192
+ "type" => "string",
193
+ "pattern" => '^[a-z][a-z0-9\-]+$'
194
+ },
183
195
  "elasticsearch_version" => {
184
196
  "type" => "string",
185
197
  "default" => versions.first,
@@ -269,12 +281,12 @@ module MU
269
281
  # @return [Boolean]: True if validation succeeded, False otherwise
270
282
  def self.validateConfig(dom, configurator)
271
283
  ok = true
272
- versions = MU::Cloud::AWS.elasticsearch(dom['region']).list_elasticsearch_versions.elasticsearch_versions
284
+ versions = MU::Cloud::AWS.elasticsearch(region: dom['region']).list_elasticsearch_versions.elasticsearch_versions
273
285
  if !versions.include?(dom["elasticsearch_version"])
274
286
  MU.log "Invalid ElasticSearch version '#{dom["elasticsearch_version"]}' in SearchDomain '#{dom['name']}'", MU::ERR, details: versions
275
287
  ok = false
276
288
  else
277
- resp = MU::Cloud::AWS.elasticsearch(dom['region']).list_elasticsearch_instance_types(
289
+ resp = MU::Cloud::AWS.elasticsearch(region: dom['region']).list_elasticsearch_instance_types(
278
290
  elasticsearch_version: dom["elasticsearch_version"]
279
291
  )
280
292
 
@@ -330,7 +342,7 @@ module MU
330
342
  if configurator.haveLitterMate?(dom['slow_logs'], "log")
331
343
  dom['dependencies'] << { "name" => dom['slow_logs'], "type" => "log" }
332
344
  else
333
- log_group = MU::Cloud::AWS::Log.find(cloud_id: dom['slow_logs'], region: dom['region'])
345
+ log_group = MU::Cloud::AWS::Log.find(cloud_id: dom['slow_logs'], region: dom['region']).values.first
334
346
  if !log_group
335
347
  MU.log "Specified slow_logs CloudWatch log group '#{dom['slow_logs']}' in SearchDomain '#{dom['name']}' doesn't appear to exist", MU::ERR
336
348
  ok = false
@@ -340,7 +352,10 @@ module MU
340
352
  end
341
353
  else
342
354
  dom['slow_logs'] = dom['name']+"-slowlog"
343
- log_group = { "name" => dom['slow_logs'] }
355
+ log_group = {
356
+ "name" => dom['slow_logs'],
357
+ "credentials" => dom['credentials']
358
+ }
344
359
  ok = false if !configurator.insertKitten(log_group, "logs")
345
360
  dom['dependencies'] << { "name" => dom['slow_logs'], "type" => "log" }
346
361
  end
@@ -353,7 +368,7 @@ module MU
353
368
 
354
369
  if dom['cognito']
355
370
  begin
356
- MU::Cloud::AWS.cognito_ident(dom['region']).describe_identity_pool(
371
+ MU::Cloud::AWS.cognito_ident(region: dom['region']).describe_identity_pool(
357
372
  identity_pool_id: dom['cognito']['identity_pool_id']
358
373
  )
359
374
  rescue ::Aws::CognitoIdentity::Errors::ValidationException, Aws::CognitoIdentity::Errors::ResourceNotFoundException => e
@@ -361,7 +376,7 @@ module MU
361
376
  ok = false
362
377
  end
363
378
  begin
364
- MU::Cloud::AWS.cognito_user(dom['region']).describe_user_pool(
379
+ MU::Cloud::AWS.cognito_user(region: dom['region']).describe_user_pool(
365
380
  user_pool_id: dom['cognito']['user_pool_id']
366
381
  )
367
382
  rescue ::Aws::CognitoIdentityProvider::Errors::InvalidParameterException, Aws::CognitoIdentityProvider::Errors::ResourceNotFoundException => e
@@ -373,10 +388,10 @@ module MU
373
388
  rolename = dom['cognito']['role_arn'].sub(/.*?:role\/([a-z0-9-]+)$/, '\1')
374
389
  begin
375
390
  if !dom['cognito']['role_arn'].match(/^arn:/)
376
- role = MU::Cloud::AWS.iam(dom['region']).get_role(role_name: rolename)
391
+ role = MU::Cloud::AWS.iam.get_role(role_name: rolename)
377
392
  dom['cognito']['role_arn'] = role.role.arn
378
393
  end
379
- pols = MU::Cloud::AWS.iam(dom['region']).list_attached_role_policies(role_name: rolename).attached_policies
394
+ pols = MU::Cloud::AWS.iam.list_attached_role_policies(role_name: rolename).attached_policies
380
395
  found = false
381
396
  pols.each { |policy|
382
397
  found = true if policy.policy_name == "AmazonESCognitoAccess"
@@ -391,6 +406,7 @@ module MU
391
406
  else
392
407
  roledesc = {
393
408
  "name" => dom['name']+"cognitorole",
409
+ "credentials" => dom['credentials'],
394
410
  "can_assume" => [
395
411
  {
396
412
  "entity_id" => "es.amazonaws.com",
@@ -471,7 +487,7 @@ module MU
471
487
  arn = @config['slow_logs']
472
488
  else
473
489
  log_group = @deploy.findLitterMate(type: "log", name: @config['slow_logs'])
474
- log_group = MU::Cloud::AWS::Log.find(cloud_id: log_group.mu_name, region: log_group.cloudobj.config['region'])
490
+ log_group = MU::Cloud::AWS::Log.find(cloud_id: log_group.mu_name, region: log_group.cloudobj.config['region']).values.first
475
491
  if log_group.nil? or log_group.arn.nil?
476
492
  raise MuError, "Failed to retrieve ARN of sibling LogGroup '#{@config['slow_logs']}'"
477
493
  end
@@ -619,7 +635,7 @@ module MU
619
635
  raise MU::MuError, "Can't tag ElasticSearch domain, cloud descriptor came back without an ARN"
620
636
  end
621
637
 
622
- MU::Cloud::AWS.elasticsearch(@config['region']).add_tags(
638
+ MU::Cloud::AWS.elasticsearch(region: @config['region'], credentials: @config['credentials']).add_tags(
623
639
  arn: domain.arn,
624
640
  tag_list: tags
625
641
  )
@@ -210,15 +210,15 @@ module MU
210
210
  # @param tag_value [String]: The value of the tag to attach.
211
211
  # @param region [String]: The cloud provider region
212
212
  # @return [void]
213
- def self.tagVolumes(instance_id, device: nil, tag_name: "MU-ID", tag_value: MU.deploy_id, region: MU.curRegion)
214
- MU::Cloud::AWS.ec2(region).describe_volumes(filters: [name: "attachment.instance-id", values: [instance_id]]).each { |vol|
213
+ def self.tagVolumes(instance_id, device: nil, tag_name: "MU-ID", tag_value: MU.deploy_id, region: MU.curRegion, credentials: nil)
214
+ MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_volumes(filters: [name: "attachment.instance-id", values: [instance_id]]).each { |vol|
215
215
  vol.volumes.each { |volume|
216
216
  volume.attachments.each { |attachment|
217
217
  vol_parent = attachment.instance_id
218
218
  vol_id = attachment.volume_id
219
219
  vol_dev = attachment.device
220
220
  if vol_parent == instance_id and (vol_dev == device or device.nil?)
221
- MU::MommaCat.createTag(vol_id, tag_name, tag_value, region: region)
221
+ MU::MommaCat.createTag(vol_id, tag_name, tag_value, region: region, credentials: credentials)
222
222
  break
223
223
  end
224
224
  }
@@ -246,8 +246,8 @@ module MU
246
246
  end
247
247
  MU::MommaCat.unlock(instance.instance_id+"-create")
248
248
  else
249
- MU::MommaCat.createStandardTags(instance.instance_id, region: @config['region'])
250
- MU::MommaCat.createTag(instance.instance_id, "Name", @mu_name, region: @config['region'])
249
+ MU::MommaCat.createStandardTags(instance.instance_id, region: @config['region'], credentials: @config['credentials'])
250
+ MU::MommaCat.createTag(instance.instance_id, "Name", @mu_name, region: @config['region'], credentials: @config['credentials'])
251
251
  end
252
252
  done = true
253
253
  rescue Exception => e
@@ -258,7 +258,7 @@ module MU
258
258
  parent_thread_id = Thread.current.object_id
259
259
  Thread.new {
260
260
  MU.dupGlobals(parent_thread_id)
261
- MU::Cloud::AWS::Server.cleanup(noop: false, ignoremaster: false, skipsnapshots: true)
261
+ MU::Cloud::AWS::Server.cleanup(noop: false, ignoremaster: false, region: @config['region'], credentials: @config['credentials'], flags: { "skipsnapshots" => true } )
262
262
  }
263
263
  end
264
264
  end
@@ -286,7 +286,6 @@ module MU
286
286
 
287
287
  arn = nil
288
288
  if @config['generate_iam_role']
289
- # @config['iam_role'], @cfm_role_name, @cfm_prof_name, arn = MU::Cloud::AWS::Server.createIAMProfile(@mu_name, base_profile: @config['iam_role'], extra_policies: @config['iam_policies'])
290
289
  role = @deploy.findLitterMate(name: @config['name'], type: "roles")
291
290
  s3_objs = ["#{@deploy.deploy_id}-secret", "#{role.mu_name}.pfx", "#{role.mu_name}.crt", "#{role.mu_name}.key", "#{role.mu_name}-winrm.crt", "#{role.mu_name}-winrm.key"].map { |file|
292
291
  'arn:'+(MU::Cloud::AWS.isGovCloud?(@config['region']) ? "aws-us-gov" : "aws")+':s3:::'+MU.adminBucketName+'/'+file
@@ -349,10 +348,10 @@ module MU
349
348
  instance_descriptor[:user_data] = Base64.encode64(@userdata)
350
349
  end
351
350
 
352
- MU::Cloud::AWS::Server.waitForAMI(@config["ami_id"], region: @config['region'])
351
+ MU::Cloud::AWS::Server.waitForAMI(@config["ami_id"], region: @config['region'], credentials: @config['credentials'])
353
352
 
354
353
  # Figure out which devices are embedded in the AMI already.
355
- image = MU::Cloud::AWS.ec2(@config['region']).describe_images(image_ids: [@config["ami_id"]]).images.first
354
+ image = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_images(image_ids: [@config["ami_id"]]).images.first
356
355
  ext_disks = {}
357
356
  if !image.block_device_mappings.nil?
358
357
  image.block_device_mappings.each { |disk|
@@ -391,7 +390,7 @@ module MU
391
390
 
392
391
  retries = 0
393
392
  begin
394
- response = MU::Cloud::AWS.ec2(@config['region']).run_instances(instance_descriptor)
393
+ response = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).run_instances(instance_descriptor)
395
394
  rescue Aws::EC2::Errors::InvalidGroupNotFound, Aws::EC2::Errors::InvalidSubnetIDNotFound, Aws::EC2::Errors::InvalidParameterValue => e
396
395
  if retries < 10
397
396
  if retries > 7
@@ -419,40 +418,40 @@ module MU
419
418
  if hard
420
419
  groupname = nil
421
420
  if !@config['basis'].nil?
422
- resp = MU::Cloud::AWS.autoscale(@config['region']).describe_auto_scaling_instances(
421
+ resp = MU::Cloud::AWS.autoscale(region: @config['region'], credentials: @config['credentials']).describe_auto_scaling_instances(
423
422
  instance_ids: [@cloud_id]
424
423
  )
425
424
  groupname = resp.auto_scaling_instances.first.auto_scaling_group_name
426
425
  MU.log "Pausing Autoscale processes in #{groupname}", MU::NOTICE
427
- MU::Cloud::AWS.autoscale(@config['region']).suspend_processes(
426
+ MU::Cloud::AWS.autoscale(region: @config['region'], credentials: @config['credentials']).suspend_processes(
428
427
  auto_scaling_group_name: groupname
429
428
  )
430
429
  end
431
430
  begin
432
431
  MU.log "Stopping #{@mu_name} (#{@cloud_id})", MU::NOTICE
433
- MU::Cloud::AWS.ec2(@config['region']).stop_instances(
432
+ MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).stop_instances(
434
433
  instance_ids: [@cloud_id]
435
434
  )
436
- MU::Cloud::AWS.ec2(@config['region']).wait_until(:instance_stopped, instance_ids: [@cloud_id]) do |waiter|
435
+ MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).wait_until(:instance_stopped, instance_ids: [@cloud_id]) do |waiter|
437
436
  waiter.before_attempt do |attempts|
438
437
  MU.log "Waiting for #{@mu_name} to stop for hard reboot"
439
438
  end
440
439
  end
441
440
  MU.log "Starting #{@mu_name} (#{@cloud_id})"
442
- MU::Cloud::AWS.ec2(@config['region']).start_instances(
441
+ MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).start_instances(
443
442
  instance_ids: [@cloud_id]
444
443
  )
445
444
  ensure
446
445
  if !groupname.nil?
447
446
  MU.log "Resuming Autoscale processes in #{groupname}", MU::NOTICE
448
- MU::Cloud::AWS.autoscale(@config['region']).resume_processes(
447
+ MU::Cloud::AWS.autoscale(region: @config['region'], credentials: @config['credentials']).resume_processes(
449
448
  auto_scaling_group_name: groupname
450
449
  )
451
450
  end
452
451
  end
453
452
  else
454
453
  MU.log "Rebooting #{@mu_name} (#{@cloud_id})"
455
- MU::Cloud::AWS.ec2(@config['region']).reboot_instances(
454
+ MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).reboot_instances(
456
455
  instance_ids: [@cloud_id]
457
456
  )
458
457
  end
@@ -468,7 +467,7 @@ module MU
468
467
  return nil if @config.nil? or @deploy.nil?
469
468
 
470
469
  nat_ssh_key = nat_ssh_user = nat_ssh_host = nil
471
- if !@config["vpc"].nil? and !MU::Cloud::AWS::VPC.haveRouteToInstance?(cloud_desc, region: @config['region'])
470
+ if !@config["vpc"].nil? and !MU::Cloud::AWS::VPC.haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials'])
472
471
  if !@nat.nil?
473
472
  if @nat.is_a?(Struct) && @nat.nat_gateway_id && @nat.nat_gateway_id.start_with?("nat-")
474
473
  raise MuError, "Configured to use NAT Gateway, but I have no route to instance. Either use Bastion, or configure VPC peering"
@@ -516,18 +515,18 @@ module MU
516
515
  return false if !MU::MommaCat.lock(instance.instance_id+"-orchestrate", true)
517
516
  return false if !MU::MommaCat.lock(instance.instance_id+"-groom", true)
518
517
 
519
- MU::MommaCat.createStandardTags(instance.instance_id, region: @config['region'])
520
- MU::MommaCat.createTag(instance.instance_id, "Name", node, region: @config['region'])
518
+ MU::MommaCat.createStandardTags(instance.instance_id, region: @config['region'], credentials: @config['credentials'])
519
+ MU::MommaCat.createTag(instance.instance_id, "Name", node, region: @config['region'], credentials: @config['credentials'])
521
520
 
522
521
  if @config['optional_tags']
523
522
  MU::MommaCat.listOptionalTags.each { |key, value|
524
- MU::MommaCat.createTag(instance.instance_id, key, value, region: @config['region'])
523
+ MU::MommaCat.createTag(instance.instance_id, key, value, region: @config['region'], credentials: @config['credentials'])
525
524
  }
526
525
  end
527
526
 
528
527
  if !@config['tags'].nil?
529
528
  @config['tags'].each { |tag|
530
- MU::MommaCat.createTag(instance.instance_id, tag['key'], tag['value'], region: @config['region'])
529
+ MU::MommaCat.createTag(instance.instance_id, tag['key'], tag['value'], region: @config['region'], credentials: @config['credentials'])
531
530
  }
532
531
  end
533
532
  MU.log "Tagged #{node} (#{instance.instance_id}) with MU-ID=#{MU.deploy_id}", MU::DEBUG
@@ -552,7 +551,7 @@ module MU
552
551
  end
553
552
  sleep 40
554
553
  # Get a fresh AWS descriptor
555
- instance = MU::Cloud::Server.find(cloud_id: @cloud_id, region: @config['region']).values.first
554
+ instance = MU::Cloud::Server.find(cloud_id: @cloud_id, region: @config['region'], credentials: @config['credentials']).values.first
556
555
  if instance and instance.state.name == "terminated"
557
556
  raise MuError, "EC2 instance #{node} (#{@cloud_id}) terminating during bootstrap!"
558
557
  end
@@ -572,6 +571,8 @@ module MU
572
571
 
573
572
  # If we came up via AutoScale, the Alarm module won't have had our
574
573
  # instance ID to associate us with itself. So invoke that here.
574
+ # XXX might be possible to do this with regular alarm resources and
575
+ # dependencies now
575
576
  if !@config['basis'].nil? and @config["alarms"] and !@config["alarms"].empty?
576
577
  @config["alarms"].each { |alarm|
577
578
  alarm_obj = MU::MommaCat.findStray(
@@ -584,8 +585,8 @@ module MU
584
585
  alarm["dimensions"] = [{:name => "InstanceId", :value => @cloud_id}]
585
586
 
586
587
  if alarm["enable_notifications"]
587
- topic_arn = MU::Cloud::AWS::Notification.createTopic(alarm["notification_group"], region: @config["region"])
588
- MU::Cloud::AWS::Notification.subscribe(arn: topic_arn, protocol: alarm["notification_type"], endpoint: alarm["notification_endpoint"], region: @config["region"])
588
+ topic_arn = MU::Cloud::AWS::Notification.createTopic(alarm["notification_group"], region: @config["region"], credentials: @config['credentials'])
589
+ MU::Cloud::AWS::Notification.subscribe(arn: topic_arn, protocol: alarm["notification_type"], endpoint: alarm["notification_endpoint"], region: @config["region"], credentials: @config["credentials"])
589
590
  alarm["alarm_actions"] = [topic_arn]
590
591
  alarm["ok_actions"] = [topic_arn]
591
592
  end
@@ -606,7 +607,8 @@ module MU
606
607
  evaluation_periods: alarm["evaluation_periods"],
607
608
  threshold: alarm["threshold"],
608
609
  comparison_operator: alarm["comparison_operator"],
609
- region: @config["region"]
610
+ region: @config["region"],
611
+ credentials: @config['credentials']
610
612
  )
611
613
  }
612
614
  end
@@ -635,7 +637,7 @@ module MU
635
637
 
636
638
  if !@config['src_dst_check'] and !@config["vpc"].nil?
637
639
  MU.log "Disabling source_dest_check #{node} (making it NAT-worthy)"
638
- MU::Cloud::AWS.ec2(@config['region']).modify_instance_attribute(
640
+ MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).modify_instance_attribute(
639
641
  instance_id: @cloud_id,
640
642
  source_dest_check: {:value => false}
641
643
  )
@@ -643,7 +645,7 @@ module MU
643
645
 
644
646
  # Set console termination protection. Autoscale nodes won't set this
645
647
  # by default.
646
- MU::Cloud::AWS.ec2(@config['region']).modify_instance_attribute(
648
+ MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).modify_instance_attribute(
647
649
  instance_id: @cloud_id,
648
650
  disable_api_termination: {:value => true}
649
651
  )
@@ -651,7 +653,7 @@ module MU
651
653
  has_elastic_ip = false
652
654
  if !instance.public_ip_address.nil?
653
655
  begin
654
- resp = MU::Cloud::AWS.ec2((@config['region'])).describe_addresses(public_ips: [instance.public_ip_address])
656
+ resp = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_addresses(public_ips: [instance.public_ip_address])
655
657
  if resp.addresses.size > 0 and resp.addresses.first.instance_id == @cloud_id
656
658
  has_elastic_ip = true
657
659
  end
@@ -737,7 +739,7 @@ module MU
737
739
  end
738
740
 
739
741
  nat_ssh_key, nat_ssh_user, nat_ssh_host, canonical_ip, ssh_user, ssh_key_name = getSSHConfig
740
- if subnet.private? and !nat_ssh_host and !MU::Cloud::AWS::VPC.haveRouteToInstance?(cloud_desc, region: @config['region'])
742
+ if subnet.private? and !nat_ssh_host and !MU::Cloud::AWS::VPC.haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials'])
741
743
  raise MuError, "#{node} is in a private subnet (#{subnet}), but has no NAT host configured, and I have no other route to it"
742
744
  end
743
745
 
@@ -749,23 +751,23 @@ module MU
749
751
  @vpc.subnets { |subnet|
750
752
  subnet_id = subnet.cloud_id
751
753
  MU.log "Adding network interface on subnet #{subnet_id} for #{node}"
752
- iface = MU::Cloud::AWS.ec2(@config['region']).create_network_interface(subnet_id: subnet_id).network_interface
753
- MU::MommaCat.createStandardTags(iface.network_interface_id, region: @config['region'])
754
- MU::MommaCat.createTag(iface.network_interface_id, "Name", node+"-ETH"+device_index.to_s, region: @config['region'])
754
+ iface = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).create_network_interface(subnet_id: subnet_id).network_interface
755
+ MU::MommaCat.createStandardTags(iface.network_interface_id, region: @config['region'], credentials: @config['credentials'])
756
+ MU::MommaCat.createTag(iface.network_interface_id, "Name", node+"-ETH"+device_index.to_s, region: @config['region'], credentials: @config['credentials'])
755
757
 
756
758
  if @config['optional_tags']
757
759
  MU::MommaCat.listOptionalTags.each { |key, value|
758
- MU::MommaCat.createTag(iface.network_interface_id, key, value, region: @config['region'])
760
+ MU::MommaCat.createTag(iface.network_interface_id, key, value, region: @config['region'], credentials: @config['credentials'])
759
761
  }
760
762
  end
761
763
 
762
764
  if !@config['tags'].nil?
763
765
  @config['tags'].each { |tag|
764
- MU::MommaCat.createTag(iface.network_interface_id, tag['key'], tag['value'], region: @config['region'])
766
+ MU::MommaCat.createTag(iface.network_interface_id, tag['key'], tag['value'], region: @config['region'], credentials: @config['credentials'])
765
767
  }
766
768
  end
767
769
 
768
- MU::Cloud::AWS.ec2(@config['region']).attach_network_interface(
770
+ MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).attach_network_interface(
769
771
  network_interface_id: iface.network_interface_id,
770
772
  instance_id: instance.instance_id,
771
773
  device_index: device_index
@@ -805,29 +807,29 @@ module MU
805
807
 
806
808
  # Tag volumes with all our standard tags.
807
809
  # Maybe replace tagVolumes with this? There is one more place tagVolumes is called from
808
- volumes = MU::Cloud::AWS.ec2(@config['region']).describe_volumes(filters: [name: "attachment.instance-id", values: [instance.instance_id]])
810
+ volumes = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_volumes(filters: [name: "attachment.instance-id", values: [instance.instance_id]])
809
811
  volumes.each { |vol|
810
812
  vol.volumes.each { |volume|
811
813
  volume.attachments.each { |attachment|
812
814
  MU::MommaCat.listStandardTags.each_pair { |key, value|
813
- MU::MommaCat.createTag(attachment.volume_id, key, value, region: @config['region'])
815
+ MU::MommaCat.createTag(attachment.volume_id, key, value, region: @config['region'], credentials: @config['credentials'])
814
816
 
815
817
  if attachment.device == "/dev/sda" or attachment.device == "/dev/sda1"
816
- MU::MommaCat.createTag(attachment.volume_id, "Name", "ROOT-#{MU.deploy_id}-#{@config["name"].upcase}", region: @config['region'])
818
+ MU::MommaCat.createTag(attachment.volume_id, "Name", "ROOT-#{MU.deploy_id}-#{@config["name"].upcase}", region: @config['region'], credentials: @config['credentials'])
817
819
  else
818
- MU::MommaCat.createTag(attachment.volume_id, "Name", "#{MU.deploy_id}-#{@config["name"].upcase}-#{attachment.device.upcase}", region: @config['region'])
820
+ MU::MommaCat.createTag(attachment.volume_id, "Name", "#{MU.deploy_id}-#{@config["name"].upcase}-#{attachment.device.upcase}", region: @config['region'], credentials: @config['credentials'])
819
821
  end
820
822
  }
821
823
 
822
824
  if @config['optional_tags']
823
825
  MU::MommaCat.listOptionalTags.each { |key, value|
824
- MU::MommaCat.createTag(attachment.volume_id, key, value, region: @config['region'])
826
+ MU::MommaCat.createTag(attachment.volume_id, key, value, region: @config['region'], credentials: @config['credentials'])
825
827
  }
826
828
  end
827
829
 
828
830
  if @config['tags']
829
831
  @config['tags'].each { |tag|
830
- MU::MommaCat.createTag(attachment.volume_id, tag['key'], tag['value'], region: @config['region'])
832
+ MU::MommaCat.createTag(attachment.volume_id, tag['key'], tag['value'], region: @config['region'], credentials: @config['credentials'])
831
833
  }
832
834
  end
833
835
  }
@@ -842,7 +844,7 @@ module MU
842
844
  instance.network_interfaces.each { |int|
843
845
  if int.private_ip_address == instance.private_ip_address and int.private_ip_addresses.size < (@config['add_private_ips'] + 1)
844
846
  MU.log "Adding #{@config['add_private_ips']} extra private IP addresses to #{instance.instance_id}"
845
- MU::Cloud::AWS.ec2(@config['region']).assign_private_ip_addresses(
847
+ MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).assign_private_ip_addresses(
846
848
  network_interface_id: int.network_interface_id,
847
849
  secondary_private_ip_address_count: @config['add_private_ips'],
848
850
  allow_reassignment: false
@@ -853,31 +855,33 @@ module MU
853
855
  end
854
856
 
855
857
  begin
856
- if windows?
857
- # kick off certificate generation early; WinRM will need it
858
- cert, key = @deploy.nodeSSLCerts(self)
859
- if @config.has_key?("basis")
860
- @deploy.nodeSSLCerts(self, true)
861
- end
862
- if !@groomer.haveBootstrapped?
863
- session = getWinRMSession(50, 60, reboot_on_problems: true)
864
- initialWinRMTasks(session)
865
- begin
866
- session.close
867
- rescue Exception
868
- # this is allowed to fail- we're probably rebooting anyway
858
+ if @config['groom'].nil? or @config['groom']
859
+ if windows?
860
+ # kick off certificate generation early; WinRM will need it
861
+ cert, key = @deploy.nodeSSLCerts(self)
862
+ if @config.has_key?("basis")
863
+ @deploy.nodeSSLCerts(self, true)
869
864
  end
870
- else # for an existing Windows node: WinRM, then SSH if it fails
871
- begin
872
- session = getWinRMSession(1, 60)
873
- rescue Exception # yeah, yeah
874
- session = getSSHSession(1, 60)
875
- # XXX maybe loop at least once if this also fails?
865
+ if !@groomer.haveBootstrapped?
866
+ session = getWinRMSession(50, 60, reboot_on_problems: true)
867
+ initialWinRMTasks(session)
868
+ begin
869
+ session.close
870
+ rescue Exception
871
+ # this is allowed to fail- we're probably rebooting anyway
872
+ end
873
+ else # for an existing Windows node: WinRM, then SSH if it fails
874
+ begin
875
+ session = getWinRMSession(1, 60)
876
+ rescue Exception # yeah, yeah
877
+ session = getSSHSession(1, 60)
878
+ # XXX maybe loop at least once if this also fails?
879
+ end
876
880
  end
881
+ else
882
+ session = getSSHSession(40, 30)
883
+ initialSSHTasks(session)
877
884
  end
878
- else
879
- session = getSSHSession(40, 30)
880
- initialSSHTasks(session)
881
885
  end
882
886
  rescue BootstrapTempFail
883
887
  sleep 45
@@ -922,14 +926,16 @@ module MU
922
926
  # we're done.
923
927
  if @groomer.haveBootstrapped?
924
928
  MU.log "Node #{node} has already been bootstrapped, skipping groomer setup.", MU::NOTICE
925
- @groomer.saveDeployData
929
+ if @config['groom'].nil? or @config['groom']
930
+ @groomer.saveDeployData
931
+ end
926
932
  MU::MommaCat.unlock(instance.instance_id+"-orchestrate")
927
933
  MU::MommaCat.unlock(instance.instance_id+"-groom")
928
934
  return true
929
935
  end
930
936
 
931
937
  begin
932
- @groomer.bootstrap
938
+ @groomer.bootstrap if @config['groom'].nil? or @config['groom']
933
939
  rescue MU::Groomer::RunError
934
940
  MU::MommaCat.unlock(instance.instance_id+"-groom")
935
941
  MU::MommaCat.unlock(instance.instance_id+"-orchestrate")
@@ -954,11 +960,11 @@ module MU
954
960
  # @param region [String]: The cloud provider region
955
961
  # @param tag_key [String]: A tag key to search.
956
962
  # @param tag_value [String]: The value of the tag specified by tag_key to match when searching by tag.
957
- # @param ip [String]: An IP address associated with the instance
958
963
  # @param flags [Hash]: Optional flags
959
964
  # @return [Array<Hash<String,OpenStruct>>]: The cloud provider's complete descriptions of matching instances
960
- def self.find(cloud_id: nil, region: MU.curRegion, tag_key: "Name", tag_value: nil, ip: nil, flags: {})
965
+ def self.find(cloud_id: nil, region: MU.curRegion, tag_key: "Name", tag_value: nil, credentials: nil, flags: {})
961
966
  # XXX put that 'ip' value into opts
967
+ ip ||= flags['ip']
962
968
  instance = nil
963
969
  if !region.nil?
964
970
  regions = [region]
@@ -977,7 +983,7 @@ module MU
977
983
  MU.log "Hunting for instance with cloud id '#{cloud_id}' in #{region}", MU::DEBUG
978
984
  retries = 0
979
985
  begin
980
- MU::Cloud::AWS.ec2(region).describe_instances(
986
+ MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_instances(
981
987
  instance_ids: [cloud_id],
982
988
  filters: [
983
989
  {name: "instance-state-name", values: ["running", "pending"]}
@@ -1018,7 +1024,7 @@ module MU
1018
1024
  if instance.nil? and !ip.nil?
1019
1025
  MU.log "Hunting for instance by IP '#{ip}'", MU::DEBUG
1020
1026
  ["ip-address", "private-ip-address"].each { |filter|
1021
- response = MU::Cloud::AWS.ec2(region).describe_instances(
1027
+ response = MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_instances(
1022
1028
  filters: [
1023
1029
  {name: filter, values: [ip]},
1024
1030
  {name: "instance-state-name", values: ["running", "pending"]}
@@ -1035,7 +1041,7 @@ module MU
1035
1041
  # Fine, let's try it by tag.
1036
1042
  if !tag_value.nil?
1037
1043
  MU.log "Searching for instance by tag '#{tag_key}=#{tag_value}'", MU::DEBUG
1038
- MU::Cloud::AWS.ec2(region).describe_instances(
1044
+ MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_instances(
1039
1045
  filters: [
1040
1046
  {name: "tag:#{tag_key}", values: [tag_value]},
1041
1047
  {name: "instance-state-name", values: ["running", "pending"]}
@@ -1158,7 +1164,7 @@ module MU
1158
1164
 
1159
1165
  punchAdminNAT
1160
1166
 
1161
- MU::Cloud::AWS::Server.tagVolumes(@cloud_id)
1167
+ MU::Cloud::AWS::Server.tagVolumes(@cloud_id, credentials: @config['credentials'])
1162
1168
 
1163
1169
  # If we have a loadbalancer configured, attach us to it
1164
1170
  if !@config['loadbalancers'].nil?
@@ -1182,10 +1188,14 @@ module MU
1182
1188
  # }
1183
1189
  # end
1184
1190
 
1185
- @groomer.saveDeployData
1191
+ if @config['groom'].nil? or @config['groom']
1192
+ @groomer.saveDeployData
1193
+ end
1186
1194
 
1187
1195
  begin
1188
- @groomer.run(purpose: "Full Initial Run", max_retries: 15, reboot_first_fail: windows?)
1196
+ if @config['groom'].nil? or @config['groom']
1197
+ @groomer.run(purpose: "Full Initial Run", max_retries: 15, reboot_first_fail: windows?, timeout: @config['groomer_timeout'])
1198
+ end
1189
1199
  rescue MU::Groomer::RunError => e
1190
1200
  MU.log "Proceeding after failed initial Groomer run, but #{node} may not behave as expected!", MU::WARN, details: e.message
1191
1201
  rescue Exception => e
@@ -1221,13 +1231,15 @@ module MU
1221
1231
  copy_to_regions: img_cfg['copy_to_regions'],
1222
1232
  make_public: img_cfg['public'],
1223
1233
  region: @config['region'],
1224
- tags: @config['tags'])
1234
+ tags: @config['tags'],
1235
+ credentials: @config['credentials']
1236
+ )
1225
1237
  @deploy.notify("images", @config['name'], {"image_id" => ami_id})
1226
1238
  @config['image_created'] = true
1227
1239
  if img_cfg['image_then_destroy']
1228
- MU::Cloud::AWS::Server.waitForAMI(ami_id, region: @config['region'])
1240
+ MU::Cloud::AWS::Server.waitForAMI(ami_id, region: @config['region'], credentials: @config['credentials'])
1229
1241
  MU.log "AMI #{ami_id} ready, removing source node #{node}"
1230
- MU::Cloud::AWS::Server.terminateInstance(id: @cloud_id, region: @config['region'], deploy_id: @deploy.deploy_id, mu_name: @mu_name)
1242
+ MU::Cloud::AWS::Server.terminateInstance(id: @cloud_id, region: @config['region'], deploy_id: @deploy.deploy_id, mu_name: @mu_name, credentials: @config['credentials'])
1231
1243
  destroy
1232
1244
  end
1233
1245
  end
@@ -1238,7 +1250,7 @@ module MU
1238
1250
  # Canonical Amazon Resource Number for this resource
1239
1251
  # @return [String]
1240
1252
  def arn
1241
- "arn:"+(MU::Cloud::AWS.isGovCloud?(@config["region"]) ? "aws-us-gov" : "aws")+":ec2:"+@config['region']+":"+MU.account_number+":instance/"+@cloud_id
1253
+ "arn:"+(MU::Cloud::AWS.isGovCloud?(@config["region"]) ? "aws-us-gov" : "aws")+":ec2:"+@config['region']+":"+MU::Cloud::AWS.credToAcct(@config['credentials'])+":instance/"+@cloud_id
1242
1254
  end
1243
1255
 
1244
1256
  def cloud_desc
@@ -1246,7 +1258,7 @@ module MU
1246
1258
  retries = 0
1247
1259
  if !@cloud_id.nil?
1248
1260
  begin
1249
- return MU::Cloud::AWS.ec2(@config['region']).describe_instances(instance_ids: [@cloud_id]).reservations.first.instances.first
1261
+ return MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_instances(instance_ids: [@cloud_id]).reservations.first.instances.first
1250
1262
  rescue Aws::EC2::Errors::InvalidInstanceIDNotFound
1251
1263
  return nil
1252
1264
  rescue NoMethodError => e
@@ -1291,7 +1303,7 @@ module MU
1291
1303
  # Our deploydata gets corrupted often with server pools, this will cause us to use the wrong IP to identify a node
1292
1304
  # which will cause us to create certificates, DNS records and other artifacts with incorrect information which will cause our deploy to fail.
1293
1305
  # The cloud_id is always correct so lets use 'cloud_desc' to get the correct IPs
1294
- if MU::Cloud::AWS::VPC.haveRouteToInstance?(cloud_desc, region: @config['region']) or @deploydata["public_ip_address"].nil?
1306
+ if MU::Cloud::AWS::VPC.haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials']) or @deploydata["public_ip_address"].nil?
1295
1307
  @config['canonical_ip'] = instance.private_ip_address
1296
1308
  @deploydata["private_ip_address"] = instance.private_ip_address
1297
1309
  return instance.private_ip_address
@@ -1311,7 +1323,7 @@ module MU
1311
1323
  # @param copy_to_regions [Array<String>]: Copy the resulting AMI into the listed regions.
1312
1324
  # @param tags [Array<String>]: Extra/override tags to apply to the image.
1313
1325
  # @return [String]: The cloud provider identifier of the new machine image.
1314
- def self.createImage(name: nil, instance_id: nil, storage: {}, exclude_storage: false, make_public: false, region: MU.curRegion, copy_to_regions: [], tags: [])
1326
+ def self.createImage(name: nil, instance_id: nil, storage: {}, exclude_storage: false, make_public: false, region: MU.curRegion, copy_to_regions: [], tags: [], credentials: nil)
1315
1327
  ami_descriptor = {
1316
1328
  :instance_id => instance_id,
1317
1329
  :name => name,
@@ -1344,18 +1356,18 @@ module MU
1344
1356
  MU.log "Creating AMI from #{name}", details: ami_descriptor
1345
1357
  resp = nil
1346
1358
  begin
1347
- resp = MU::Cloud::AWS.ec2(region).create_image(ami_descriptor)
1359
+ resp = MU::Cloud::AWS.ec2(region: region).create_image(ami_descriptor)
1348
1360
  rescue Aws::EC2::Errors::InvalidAMINameDuplicate => e
1349
1361
  MU.log "AMI #{name} already exists, skipping", MU::WARN
1350
1362
  return nil
1351
1363
  end
1352
1364
  ami = resp.image_id
1353
- MU::MommaCat.createStandardTags(ami, region: region)
1354
- MU::MommaCat.createTag(ami, "Name", name, region: region)
1365
+ MU::MommaCat.createStandardTags(ami, region: region, credentials: credentials)
1366
+ MU::MommaCat.createTag(ami, "Name", name, region: region, credentials: credentials)
1355
1367
  MU.log "AMI of #{name} in region #{region}: #{ami}"
1356
1368
  if make_public
1357
- MU::Cloud::AWS::Server.waitForAMI(ami, region: region)
1358
- MU::Cloud::AWS.ec2(region).modify_image_attribute(
1369
+ MU::Cloud::AWS::Server.waitForAMI(ami, region: region, credentials: credentials)
1370
+ MU::Cloud::AWS.ec2(region: region).modify_image_attribute(
1359
1371
  image_id: ami,
1360
1372
  launch_permission: {add: [{group: "all"}]},
1361
1373
  attribute: "launchPermission"
@@ -1364,12 +1376,12 @@ module MU
1364
1376
  copythreads = []
1365
1377
  if !copy_to_regions.nil? and copy_to_regions.size > 0
1366
1378
  parent_thread_id = Thread.current.object_id
1367
- MU::Cloud::AWS::Server.waitForAMI(ami, region: region) if !make_public
1379
+ MU::Cloud::AWS::Server.waitForAMI(ami, region: region, credentials: credentials) if !make_public
1368
1380
  copy_to_regions.each { |r|
1369
1381
  next if r == region
1370
1382
  copythreads << Thread.new {
1371
1383
  MU.dupGlobals(parent_thread_id)
1372
- copy = MU::Cloud::AWS.ec2(r).copy_image(
1384
+ copy = MU::Cloud::AWS.ec2(region: r).copy_image(
1373
1385
  source_region: region,
1374
1386
  source_image_id: ami,
1375
1387
  name: name,
@@ -1378,15 +1390,15 @@ module MU
1378
1390
  MU.log "Initiated copy of #{ami} from #{region} to #{r}: #{copy.image_id}"
1379
1391
 
1380
1392
  MU::MommaCat.createStandardTags(copy.image_id, region: r)
1381
- MU::MommaCat.createTag(copy.image_id, "Name", name, region: r)
1393
+ MU::MommaCat.createTag(copy.image_id, "Name", name, region: r, credentials: credentials)
1382
1394
  if !tags.nil?
1383
1395
  tags.each { |tag|
1384
- MU::MommaCat.createTag(instance.instance_id, tag['key'], tag['value'], region: r)
1396
+ MU::MommaCat.createTag(instance.instance_id, tag['key'], tag['value'], region: r, credentials: credentials)
1385
1397
  }
1386
1398
  end
1387
- MU::Cloud::AWS::Server.waitForAMI(copy.image_id, region: r)
1399
+ MU::Cloud::AWS::Server.waitForAMI(copy.image_id, region: r, credentials: credentials)
1388
1400
  if make_public
1389
- MU::Cloud::AWS.ec2(r).modify_image_attribute(
1401
+ MU::Cloud::AWS.ec2(region: r).modify_image_attribute(
1390
1402
  image_id: copy.image_id,
1391
1403
  launch_permission: {add: [{group: "all"}]},
1392
1404
  attribute: "launchPermission"
@@ -1408,12 +1420,12 @@ module MU
1408
1420
  # flagged as ready.
1409
1421
  # @param image_id [String]: The machine image to wait for.
1410
1422
  # @param region [String]: The cloud provider region
1411
- def self.waitForAMI(image_id, region: MU.curRegion)
1423
+ def self.waitForAMI(image_id, region: MU.curRegion, credentials: nil)
1412
1424
  MU.log "Checking to see if AMI #{image_id} is available", MU::DEBUG
1413
1425
 
1414
1426
  retries = 0
1415
1427
  begin
1416
- images = MU::Cloud::AWS.ec2(region).describe_images(image_ids: [image_id]).images
1428
+ images = MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_images(image_ids: [image_id]).images
1417
1429
  if images.nil? or images.size == 0
1418
1430
  raise MuError, "No such AMI #{image_id} found"
1419
1431
  end
@@ -1497,7 +1509,7 @@ module MU
1497
1509
  retries = 0
1498
1510
  MU.log "Waiting for Windows instance password to be set by Amazon and flagged as available from the API. Note- if you're using a source AMI that already has its password set, this may fail. You'll want to set use_cloud_provider_windows_password to false if this is the case.", MU::NOTICE
1499
1511
  begin
1500
- MU::Cloud::AWS.ec2(@config['region']).wait_until(:password_data_available, instance_id: @cloud_id) do |waiter|
1512
+ MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).wait_until(:password_data_available, instance_id: @cloud_id) do |waiter|
1501
1513
  waiter.max_attempts = 60
1502
1514
  waiter.before_attempt do |attempts|
1503
1515
  MU.log "Waiting for Windows password data to be available for node #{@mu_name}", MU::NOTICE if attempts % 5 == 0
@@ -1517,7 +1529,7 @@ module MU
1517
1529
  end
1518
1530
  end
1519
1531
 
1520
- resp = MU::Cloud::AWS.ec2(@config['region']).get_password_data(instance_id: @cloud_id)
1532
+ resp = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).get_password_data(instance_id: @cloud_id)
1521
1533
  encrypted_password = resp.password_data
1522
1534
 
1523
1535
  # Note: This is already implemented in the decrypt_windows_password API call
@@ -1544,9 +1556,9 @@ module MU
1544
1556
  filters << {name: "public-ip", values: [ip]} if ip != nil
1545
1557
 
1546
1558
  if filters.size > 0
1547
- resp = MU::Cloud::AWS.ec2(region).describe_addresses(filters: filters)
1559
+ resp = MU::Cloud::AWS.ec2(region: region).describe_addresses(filters: filters)
1548
1560
  else
1549
- resp = MU::Cloud::AWS.ec2(region).describe_addresses()
1561
+ resp = MU::Cloud::AWS.ec2(region: region).describe_addresses()
1550
1562
  end
1551
1563
  resp.addresses.each { |address|
1552
1564
  return address if (address.network_interface_id.nil? || address.network_interface_id.empty?) && !@eips_used.include?(address.public_ip)
@@ -1559,10 +1571,10 @@ module MU
1559
1571
  end
1560
1572
  end
1561
1573
  if !classic
1562
- resp = MU::Cloud::AWS.ec2(region).allocate_address(domain: "vpc")
1574
+ resp = MU::Cloud::AWS.ec2(region: region).allocate_address(domain: "vpc")
1563
1575
  new_ip = resp.public_ip
1564
1576
  else
1565
- new_ip = MU::Cloud::AWS.ec2(region).allocate_address().public_ip
1577
+ new_ip = MU::Cloud::AWS.ec2(region: region).allocate_address().public_ip
1566
1578
  end
1567
1579
  filters = [{name: "public-ip", values: [new_ip]}]
1568
1580
  if resp.domain
@@ -1578,7 +1590,7 @@ module MU
1578
1590
  begin
1579
1591
  begin
1580
1592
  sleep 5
1581
- resp = MU::Cloud::AWS.ec2(region).describe_addresses(
1593
+ resp = MU::Cloud::AWS.ec2(region: region).describe_addresses(
1582
1594
  filters: filters
1583
1595
  )
1584
1596
  addr = resp.addresses.first
@@ -1602,7 +1614,7 @@ module MU
1602
1614
  return true
1603
1615
  end
1604
1616
  az = nil
1605
- MU::Cloud::AWS.ec2(@config['region']).describe_instances(
1617
+ MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_instances(
1606
1618
  instance_ids: [@cloud_id]
1607
1619
  ).reservations.each { |resp|
1608
1620
  if !resp.nil? and !resp.instances.nil?
@@ -1618,14 +1630,14 @@ module MU
1618
1630
  end
1619
1631
  }
1620
1632
  MU.log "Creating #{size}GB #{type} volume on #{dev} for #{@cloud_id}"
1621
- creation = MU::Cloud::AWS.ec2(@config['region']).create_volume(
1633
+ creation = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).create_volume(
1622
1634
  availability_zone: az,
1623
1635
  size: size,
1624
1636
  volume_type: type
1625
1637
  )
1626
1638
  begin
1627
1639
  sleep 3
1628
- creation = MU::Cloud::AWS.ec2(@config['region']).describe_volumes(volume_ids: [creation.volume_id]).volumes.first
1640
+ creation = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_volumes(volume_ids: [creation.volume_id]).volumes.first
1629
1641
  if !["creating", "available"].include?(creation.state)
1630
1642
  raise MuError, "Saw state '#{creation.state}' while creating #{size}GB #{type} volume on #{dev} for #{@cloud_id}"
1631
1643
  end
@@ -1633,12 +1645,12 @@ module MU
1633
1645
 
1634
1646
  if @deploy
1635
1647
  MU::MommaCat.listStandardTags.each_pair { |key, value|
1636
- MU::MommaCat.createTag(creation.volume_id, key, value, region: @config['region'])
1648
+ MU::MommaCat.createTag(creation.volume_id, key, value, region: @config['region'], credentials: @config['credentials'])
1637
1649
  }
1638
- MU::MommaCat.createTag(creation.volume_id, "Name", "#{MU.deploy_id}-#{@config["name"].upcase}-#{dev.upcase}", region: @config['region'])
1650
+ MU::MommaCat.createTag(creation.volume_id, "Name", "#{MU.deploy_id}-#{@config["name"].upcase}-#{dev.upcase}", region: @config['region'], credentials: @config['credentials'])
1639
1651
  end
1640
1652
 
1641
- attachment = MU::Cloud::AWS.ec2(@config['region']).attach_volume(
1653
+ attachment = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).attach_volume(
1642
1654
  device: dev,
1643
1655
  instance_id: @cloud_id,
1644
1656
  volume_id: creation.volume_id
@@ -1646,7 +1658,7 @@ module MU
1646
1658
 
1647
1659
  begin
1648
1660
  sleep 3
1649
- attachment = MU::Cloud::AWS.ec2(@config['region']).describe_volumes(volume_ids: [attachment.volume_id]).volumes.first.attachments.first
1661
+ attachment = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_volumes(volume_ids: [attachment.volume_id]).volumes.first.attachments.first
1650
1662
  if !["attaching", "attached"].include?(attachment.state)
1651
1663
  raise MuError, "Saw state '#{creation.state}' while creating #{size}GB #{type} volume on #{dev} for #{@cloud_id}"
1652
1664
  end
@@ -1662,7 +1674,7 @@ module MU
1662
1674
  return true
1663
1675
  end
1664
1676
  begin
1665
- MU::Cloud::AWS.ec2(@config['region']).describe_instances(
1677
+ MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_instances(
1666
1678
  instance_ids: [@cloud_id]
1667
1679
  ).reservations.each { |resp|
1668
1680
  if !resp.nil? and !resp.instances.nil?
@@ -1694,7 +1706,7 @@ module MU
1694
1706
  @eip_semaphore.synchronize {
1695
1707
  if !ip.nil?
1696
1708
  filters = [{name: "public-ip", values: [ip]}]
1697
- resp = MU::Cloud::AWS.ec2(region).describe_addresses(filters: filters)
1709
+ resp = MU::Cloud::AWS.ec2(region: region).describe_addresses(filters: filters)
1698
1710
  if @eips_used.include?(ip)
1699
1711
  is_free = false
1700
1712
  resp.addresses.each { |address|
@@ -1726,12 +1738,12 @@ module MU
1726
1738
  attempts = 0
1727
1739
  begin
1728
1740
  if classic
1729
- resp = MU::Cloud::AWS.ec2(region).associate_address(
1741
+ resp = MU::Cloud::AWS.ec2(region: region).associate_address(
1730
1742
  instance_id: instance_id,
1731
1743
  public_ip: elastic_ip.public_ip
1732
1744
  )
1733
1745
  else
1734
- resp = MU::Cloud::AWS.ec2(region).associate_address(
1746
+ resp = MU::Cloud::AWS.ec2(region: region).associate_address(
1735
1747
  instance_id: instance_id,
1736
1748
  allocation_id: elastic_ip.allocation_id,
1737
1749
  allow_reassociation: false
@@ -1747,7 +1759,7 @@ module MU
1747
1759
  raise MuError "#{e.message} associating #{elastic_ip.allocation_id} with #{instance_id}"
1748
1760
  rescue Aws::EC2::Errors::ResourceAlreadyAssociated => e
1749
1761
  # A previous association attempt may have succeeded, albeit slowly.
1750
- resp = MU::Cloud::AWS.ec2(region).describe_addresses(
1762
+ resp = MU::Cloud::AWS.ec2(region: region).describe_addresses(
1751
1763
  allocation_ids: [elastic_ip.allocation_id]
1752
1764
  )
1753
1765
  first_addr = resp.addresses.first
@@ -1759,14 +1771,14 @@ module MU
1759
1771
  end
1760
1772
  end
1761
1773
 
1762
- instance = MU::Cloud::AWS.ec2(region).describe_instances(instance_ids: [instance_id]).reservations.first.instances.first
1774
+ instance = MU::Cloud::AWS.ec2(region: region).describe_instances(instance_ids: [instance_id]).reservations.first.instances.first
1763
1775
  waited = false
1764
1776
  if instance.public_ip_address != elastic_ip.public_ip
1765
1777
  waited = true
1766
1778
  begin
1767
1779
  sleep 10
1768
1780
  MU.log "Waiting for Elastic IP association of #{elastic_ip.public_ip} to #{instance_id} to take effect", MU::NOTICE
1769
- instance = MU::Cloud::AWS.ec2(region).describe_instances(instance_ids: [instance_id]).reservations.first.instances.first
1781
+ instance = MU::Cloud::AWS.ec2(region: region).describe_instances(instance_ids: [instance_id]).reservations.first.instances.first
1770
1782
  end while instance.public_ip_address != elastic_ip.public_ip
1771
1783
  end
1772
1784
 
@@ -1775,12 +1787,21 @@ module MU
1775
1787
  return elastic_ip.public_ip
1776
1788
  end
1777
1789
 
1790
+ # Does this resource type exist as a global (cloud-wide) artifact, or
1791
+ # is it localized to a region/zone?
1792
+ # @return [Boolean]
1793
+ def self.isGlobal?
1794
+ false
1795
+ end
1796
+
1778
1797
  # Remove all instances associated with the currently loaded deployment. Also cleans up associated volumes, droppings in the MU master's /etc/hosts and ~/.ssh, and in whatever Groomer was used.
1779
1798
  # @param noop [Boolean]: If true, will only print what would be done
1780
1799
  # @param ignoremaster [Boolean]: If true, will remove resources not flagged as originating from this Mu server
1781
1800
  # @param region [String]: The cloud provider region
1782
1801
  # @return [void]
1783
- def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, skipsnapshots: false, onlycloud: false, flags: {})
1802
+ def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {})
1803
+ onlycloud = flags["onlycloud"]
1804
+ skipsnapshots = flags["skipsnapshots"]
1784
1805
  tagfilters = [
1785
1806
  {name: "tag:MU-ID", values: [MU.deploy_id]}
1786
1807
  ]
@@ -1794,7 +1815,7 @@ module MU
1794
1815
  # Build a list of instances we need to clean up. We guard against
1795
1816
  # accidental deletion here by requiring someone to have hand-terminated
1796
1817
  # these, by default.
1797
- resp = MU::Cloud::AWS.ec2(region).describe_instances(
1818
+ resp = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(
1798
1819
  filters: tagfilters
1799
1820
  )
1800
1821
 
@@ -1817,18 +1838,18 @@ module MU
1817
1838
  threads << Thread.new(instance) { |myinstance|
1818
1839
  MU.dupGlobals(parent_thread_id)
1819
1840
  Thread.abort_on_exception = true
1820
- MU::Cloud::AWS::Server.terminateInstance(id: myinstance.instance_id, noop: noop, onlycloud: onlycloud, region: region, deploy_id: MU.deploy_id)
1841
+ MU::Cloud::AWS::Server.terminateInstance(id: myinstance.instance_id, noop: noop, onlycloud: onlycloud, region: region, deploy_id: MU.deploy_id, credentials: credentials)
1821
1842
  }
1822
1843
  }
1823
1844
 
1824
- resp = MU::Cloud::AWS.ec2(region).describe_volumes(
1845
+ resp = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_volumes(
1825
1846
  filters: tagfilters
1826
1847
  )
1827
1848
  resp.data.volumes.each { |volume|
1828
1849
  threads << Thread.new(volume) { |myvolume|
1829
1850
  MU.dupGlobals(parent_thread_id)
1830
1851
  Thread.abort_on_exception = true
1831
- MU::Cloud::AWS::Server.delete_volume(myvolume, noop, skipsnapshots)
1852
+ MU::Cloud::AWS::Server.delete_volume(myvolume, noop, skipsnapshots, credentials: credentials)
1832
1853
  }
1833
1854
  }
1834
1855
 
@@ -1843,12 +1864,12 @@ module MU
1843
1864
  # @param id [String]: The cloud provider's identifier for the instance, to use if the full description is not available.
1844
1865
  # @param region [String]: The cloud provider region
1845
1866
  # @return [void]
1846
- def self.terminateInstance(instance: nil, noop: false, id: nil, onlycloud: false, region: MU.curRegion, deploy_id: MU.deploy_id, mu_name: nil)
1867
+ def self.terminateInstance(instance: nil, noop: false, id: nil, onlycloud: false, region: MU.curRegion, deploy_id: MU.deploy_id, mu_name: nil, credentials: nil)
1847
1868
  ips = Array.new
1848
1869
  if !instance
1849
1870
  if id
1850
1871
  begin
1851
- resp = MU::Cloud::AWS.ec2(region).describe_instances(instance_ids: [id])
1872
+ resp = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(instance_ids: [id])
1852
1873
  rescue Aws::EC2::Errors::InvalidInstanceIDNotFound => e
1853
1874
  MU.log "Instance #{id} no longer exists", MU::WARN
1854
1875
  end
@@ -1880,26 +1901,26 @@ module MU
1880
1901
  ).first
1881
1902
 
1882
1903
  begin
1883
- MU::Cloud::AWS.ec2(region).describe_instances(instance_ids: [id])
1904
+ MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(instance_ids: [id])
1884
1905
  rescue Aws::EC2::Errors::InvalidInstanceIDNotFound => e
1885
1906
  MU.log "Instance #{id} no longer exists", MU::DEBUG
1886
1907
  end
1887
1908
 
1888
- if !server_obj.nil? and MU::Cloud::AWS.hosted and !MU::Cloud::AWS.isGovCloud?
1909
+ if !server_obj.nil? and MU::Cloud::AWS.hosted? and !MU::Cloud::AWS.isGovCloud?
1889
1910
  # DNS cleanup is now done in MU::Cloud::DNSZone. Keeping this for now
1890
1911
  cleaned_dns = false
1891
1912
  mu_name = server_obj.mu_name
1892
- mu_zone = MU::Cloud::DNSZone.find(cloud_id: "platform-mu").values.first
1913
+ mu_zone = MU::Cloud::DNSZone.find(cloud_id: "platform-mu", credentials: credentials).values.first
1893
1914
  if !mu_zone.nil?
1894
1915
  zone_rrsets = []
1895
- rrsets = MU::Cloud::AWS.route53(region).list_resource_record_sets(hosted_zone_id: mu_zone.id)
1916
+ rrsets = MU::Cloud::AWS.route53(credentials: credentials).list_resource_record_sets(hosted_zone_id: mu_zone.id)
1896
1917
  rrsets.resource_record_sets.each{ |record|
1897
1918
  zone_rrsets << record
1898
1919
  }
1899
1920
 
1900
1921
  # AWS API returns a maximum of 100 results. DNS zones are likely to have more than 100 records, lets page and make sure we grab all records in a given zone
1901
1922
  while rrsets.next_record_name && rrsets.next_record_type
1902
- rrsets = MU::Cloud::AWS.route53(region).list_resource_record_sets(hosted_zone_id: mu_zone.id, start_record_name: rrsets.next_record_name, start_record_type: rrsets.next_record_type)
1923
+ rrsets = MU::Cloud::AWS.route53(credentials: credentials).list_resource_record_sets(hosted_zone_id: mu_zone.id, start_record_name: rrsets.next_record_name, start_record_type: rrsets.next_record_type)
1903
1924
  rrsets.resource_record_sets.each{ |record|
1904
1925
  zone_rrsets << record
1905
1926
  }
@@ -2002,14 +2023,14 @@ module MU
2002
2023
  MU.log "Terminating #{instance.instance_id} (#{name}) #{noop}"
2003
2024
  if !noop
2004
2025
  begin
2005
- MU::Cloud::AWS.ec2(region).modify_instance_attribute(
2026
+ MU::Cloud::AWS.ec2(credentials: credentials, region: region).modify_instance_attribute(
2006
2027
  instance_id: instance.instance_id,
2007
2028
  disable_api_termination: {value: false}
2008
2029
  )
2009
- MU::Cloud::AWS.ec2(region).terminate_instances(instance_ids: [instance.instance_id])
2030
+ MU::Cloud::AWS.ec2(credentials: credentials, region: region).terminate_instances(instance_ids: [instance.instance_id])
2010
2031
  # Small race window here with the state changing from under us
2011
2032
  rescue Aws::EC2::Errors::IncorrectInstanceState => e
2012
- resp = MU::Cloud::AWS.ec2(region).describe_instances(instance_ids: [id])
2033
+ resp = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(instance_ids: [id])
2013
2034
  if !resp.nil? and !resp.reservations.nil? and !resp.reservations.first.nil?
2014
2035
  instance = resp.reservations.first.instances.first
2015
2036
  if !instance.nil? and instance.state.name != "terminated" and instance.state.name != "terminating"
@@ -2026,7 +2047,7 @@ module MU
2026
2047
  end
2027
2048
  while instance.state.name != "terminated" and !noop
2028
2049
  sleep 30
2029
- instance_response = MU::Cloud::AWS.ec2(region).describe_instances(instance_ids: [instance.instance_id])
2050
+ instance_response = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(instance_ids: [instance.instance_id])
2030
2051
  instance = instance_response.reservations.first.instances.first
2031
2052
  end
2032
2053
  MU.log "#{instance.instance_id} (#{name}) terminated" if !noop
@@ -2156,6 +2177,7 @@ module MU
2156
2177
  else
2157
2178
  role = {
2158
2179
  "name" => server["name"],
2180
+ "credentials" => server["credentials"],
2159
2181
  "can_assume" => [
2160
2182
  {
2161
2183
  "entity_id" => "ec2.amazonaws.com",
@@ -2241,9 +2263,9 @@ module MU
2241
2263
  # @param id [String]: The cloud provider's identifier for the volume, to use if the full description is not available.
2242
2264
  # @param region [String]: The cloud provider region
2243
2265
  # @return [void]
2244
- def self.delete_volume(volume, noop, skipsnapshots, id: nil, region: MU.curRegion)
2266
+ def self.delete_volume(volume, noop, skipsnapshots, id: nil, region: MU.curRegion, credentials: nil)
2245
2267
  if !volume.nil?
2246
- resp = MU::Cloud::AWS.ec2(region).describe_volumes(volume_ids: [volume.volume_id])
2268
+ resp = MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_volumes(volume_ids: [volume.volume_id])
2247
2269
  volume = resp.data.volumes.first
2248
2270
  end
2249
2271
  name = ""
@@ -2260,15 +2282,25 @@ module MU
2260
2282
  desc = "#{MU.deploy_id}-MUfinal"
2261
2283
  end
2262
2284
 
2263
- MU::Cloud::AWS.ec2(region).create_snapshot(
2285
+ begin
2286
+ MU::Cloud::AWS.ec2(region: region, credentials: credentials).create_snapshot(
2264
2287
  volume_id: volume.volume_id,
2265
2288
  description: desc
2266
- )
2289
+ )
2290
+ rescue Aws::EC2::Errors::IncorrectState => e
2291
+ if e.message.match(/'deleting'/)
2292
+ MU.log "Cannot snapshot volume '#{name}', is already being deleted", MU::WARN
2293
+ end
2294
+ end
2267
2295
  end
2268
2296
 
2269
2297
  retries = 0
2270
2298
  begin
2271
- MU::Cloud::AWS.ec2(region).delete_volume(volume_id: volume.volume_id)
2299
+ MU::Cloud::AWS.ec2(region: region, credentials: credentials).delete_volume(volume_id: volume.volume_id)
2300
+ rescue Aws::EC2::Errors::IncorrectState => e
2301
+ MU.log "Volume #{volume.volume_id} (#{name}) in incorrect state (#{e.message}), will retry", MU::WARN
2302
+ sleep 30
2303
+ retry
2272
2304
  rescue Aws::EC2::Errors::InvalidVolumeNotFound
2273
2305
  MU.log "Volume #{volume.volume_id} (#{name}) disappeared before I could remove it!", MU::WARN
2274
2306
  rescue Aws::EC2::Errors::VolumeInUse