cloud-mu 3.1.2 → 3.2.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (201) hide show
  1. checksums.yaml +4 -4
  2. data/Dockerfile +15 -3
  3. data/ansible/roles/mu-windows/README.md +33 -0
  4. data/ansible/roles/mu-windows/defaults/main.yml +2 -0
  5. data/ansible/roles/mu-windows/files/LaunchConfig.json +9 -0
  6. data/ansible/roles/mu-windows/files/config.xml +76 -0
  7. data/ansible/roles/mu-windows/handlers/main.yml +2 -0
  8. data/ansible/roles/mu-windows/meta/main.yml +53 -0
  9. data/ansible/roles/mu-windows/tasks/main.yml +36 -0
  10. data/ansible/roles/mu-windows/tests/inventory +2 -0
  11. data/ansible/roles/mu-windows/tests/test.yml +5 -0
  12. data/ansible/roles/mu-windows/vars/main.yml +2 -0
  13. data/bin/mu-adopt +10 -13
  14. data/bin/mu-azure-tests +57 -0
  15. data/bin/mu-cleanup +2 -4
  16. data/bin/mu-configure +52 -0
  17. data/bin/mu-deploy +3 -3
  18. data/bin/mu-findstray-tests +25 -0
  19. data/bin/mu-gen-docs +2 -4
  20. data/bin/mu-load-config.rb +2 -3
  21. data/bin/mu-node-manage +15 -16
  22. data/bin/mu-run-tests +135 -37
  23. data/cloud-mu.gemspec +22 -20
  24. data/cookbooks/mu-activedirectory/resources/domain.rb +4 -4
  25. data/cookbooks/mu-activedirectory/resources/domain_controller.rb +4 -4
  26. data/cookbooks/mu-tools/libraries/helper.rb +3 -2
  27. data/cookbooks/mu-tools/libraries/monkey.rb +35 -0
  28. data/cookbooks/mu-tools/recipes/apply_security.rb +14 -14
  29. data/cookbooks/mu-tools/recipes/aws_api.rb +9 -0
  30. data/cookbooks/mu-tools/recipes/eks.rb +2 -2
  31. data/cookbooks/mu-tools/recipes/google_api.rb +2 -2
  32. data/cookbooks/mu-tools/recipes/selinux.rb +2 -1
  33. data/cookbooks/mu-tools/recipes/windows-client.rb +163 -164
  34. data/cookbooks/mu-tools/resources/disk.rb +1 -1
  35. data/cookbooks/mu-tools/resources/windows_users.rb +44 -43
  36. data/extras/clean-stock-amis +25 -19
  37. data/extras/generate-stock-images +1 -0
  38. data/extras/image-generators/AWS/win2k12.yaml +18 -13
  39. data/extras/image-generators/AWS/win2k16.yaml +18 -13
  40. data/extras/image-generators/AWS/win2k19.yaml +21 -0
  41. data/extras/image-generators/Google/centos6.yaml +1 -0
  42. data/extras/image-generators/Google/centos7.yaml +1 -1
  43. data/modules/mommacat.ru +6 -16
  44. data/modules/mu.rb +165 -111
  45. data/modules/mu/adoption.rb +401 -68
  46. data/modules/mu/cleanup.rb +199 -306
  47. data/modules/mu/cloud.rb +100 -1632
  48. data/modules/mu/cloud/database.rb +49 -0
  49. data/modules/mu/cloud/dnszone.rb +46 -0
  50. data/modules/mu/cloud/machine_images.rb +212 -0
  51. data/modules/mu/cloud/providers.rb +81 -0
  52. data/modules/mu/cloud/resource_base.rb +920 -0
  53. data/modules/mu/cloud/server.rb +40 -0
  54. data/modules/mu/cloud/server_pool.rb +1 -0
  55. data/modules/mu/cloud/ssh_sessions.rb +228 -0
  56. data/modules/mu/cloud/winrm_sessions.rb +237 -0
  57. data/modules/mu/cloud/wrappers.rb +165 -0
  58. data/modules/mu/config.rb +171 -1767
  59. data/modules/mu/config/alarm.rb +2 -6
  60. data/modules/mu/config/bucket.rb +4 -4
  61. data/modules/mu/config/cache_cluster.rb +1 -1
  62. data/modules/mu/config/collection.rb +4 -4
  63. data/modules/mu/config/container_cluster.rb +9 -4
  64. data/modules/mu/config/database.rb +83 -104
  65. data/modules/mu/config/database.yml +1 -2
  66. data/modules/mu/config/dnszone.rb +6 -6
  67. data/modules/mu/config/doc_helpers.rb +516 -0
  68. data/modules/mu/config/endpoint.rb +4 -4
  69. data/modules/mu/config/firewall_rule.rb +103 -4
  70. data/modules/mu/config/folder.rb +4 -4
  71. data/modules/mu/config/function.rb +3 -3
  72. data/modules/mu/config/group.rb +4 -4
  73. data/modules/mu/config/habitat.rb +4 -4
  74. data/modules/mu/config/loadbalancer.rb +60 -14
  75. data/modules/mu/config/log.rb +4 -4
  76. data/modules/mu/config/msg_queue.rb +4 -4
  77. data/modules/mu/config/nosqldb.rb +4 -4
  78. data/modules/mu/config/notifier.rb +3 -3
  79. data/modules/mu/config/ref.rb +365 -0
  80. data/modules/mu/config/role.rb +4 -4
  81. data/modules/mu/config/schema_helpers.rb +509 -0
  82. data/modules/mu/config/search_domain.rb +4 -4
  83. data/modules/mu/config/server.rb +97 -70
  84. data/modules/mu/config/server.yml +1 -0
  85. data/modules/mu/config/server_pool.rb +5 -9
  86. data/modules/mu/config/storage_pool.rb +1 -1
  87. data/modules/mu/config/tail.rb +200 -0
  88. data/modules/mu/config/user.rb +4 -4
  89. data/modules/mu/config/vpc.rb +70 -27
  90. data/modules/mu/config/vpc.yml +0 -1
  91. data/modules/mu/defaults/AWS.yaml +83 -60
  92. data/modules/mu/defaults/Azure.yaml +1 -0
  93. data/modules/mu/defaults/Google.yaml +3 -2
  94. data/modules/mu/deploy.rb +30 -26
  95. data/modules/mu/groomer.rb +17 -2
  96. data/modules/mu/groomers/ansible.rb +188 -41
  97. data/modules/mu/groomers/chef.rb +116 -55
  98. data/modules/mu/logger.rb +127 -148
  99. data/modules/mu/master.rb +389 -2
  100. data/modules/mu/master/chef.rb +3 -4
  101. data/modules/mu/master/ldap.rb +3 -3
  102. data/modules/mu/master/ssl.rb +12 -3
  103. data/modules/mu/mommacat.rb +217 -2612
  104. data/modules/mu/mommacat/daemon.rb +397 -0
  105. data/modules/mu/mommacat/naming.rb +473 -0
  106. data/modules/mu/mommacat/search.rb +495 -0
  107. data/modules/mu/mommacat/storage.rb +722 -0
  108. data/modules/mu/{clouds → providers}/README.md +1 -1
  109. data/modules/mu/{clouds → providers}/aws.rb +271 -112
  110. data/modules/mu/{clouds → providers}/aws/alarm.rb +5 -3
  111. data/modules/mu/{clouds → providers}/aws/bucket.rb +26 -22
  112. data/modules/mu/{clouds → providers}/aws/cache_cluster.rb +33 -67
  113. data/modules/mu/{clouds → providers}/aws/collection.rb +24 -23
  114. data/modules/mu/{clouds → providers}/aws/container_cluster.rb +681 -721
  115. data/modules/mu/providers/aws/database.rb +1744 -0
  116. data/modules/mu/{clouds → providers}/aws/dnszone.rb +64 -63
  117. data/modules/mu/{clouds → providers}/aws/endpoint.rb +22 -27
  118. data/modules/mu/{clouds → providers}/aws/firewall_rule.rb +214 -244
  119. data/modules/mu/{clouds → providers}/aws/folder.rb +7 -7
  120. data/modules/mu/{clouds → providers}/aws/function.rb +17 -22
  121. data/modules/mu/{clouds → providers}/aws/group.rb +23 -23
  122. data/modules/mu/{clouds → providers}/aws/habitat.rb +17 -14
  123. data/modules/mu/{clouds → providers}/aws/loadbalancer.rb +57 -48
  124. data/modules/mu/{clouds → providers}/aws/log.rb +15 -12
  125. data/modules/mu/{clouds → providers}/aws/msg_queue.rb +17 -16
  126. data/modules/mu/{clouds → providers}/aws/nosqldb.rb +18 -11
  127. data/modules/mu/{clouds → providers}/aws/notifier.rb +11 -6
  128. data/modules/mu/{clouds → providers}/aws/role.rb +112 -86
  129. data/modules/mu/{clouds → providers}/aws/search_domain.rb +39 -33
  130. data/modules/mu/{clouds → providers}/aws/server.rb +835 -1133
  131. data/modules/mu/{clouds → providers}/aws/server_pool.rb +56 -60
  132. data/modules/mu/{clouds → providers}/aws/storage_pool.rb +24 -42
  133. data/modules/mu/{clouds → providers}/aws/user.rb +21 -22
  134. data/modules/mu/{clouds → providers}/aws/userdata/README.md +0 -0
  135. data/modules/mu/{clouds → providers}/aws/userdata/linux.erb +0 -0
  136. data/modules/mu/{clouds → providers}/aws/userdata/windows.erb +2 -1
  137. data/modules/mu/{clouds → providers}/aws/vpc.rb +523 -929
  138. data/modules/mu/providers/aws/vpc_subnet.rb +286 -0
  139. data/modules/mu/{clouds → providers}/azure.rb +29 -9
  140. data/modules/mu/{clouds → providers}/azure/container_cluster.rb +3 -8
  141. data/modules/mu/{clouds → providers}/azure/firewall_rule.rb +18 -11
  142. data/modules/mu/{clouds → providers}/azure/habitat.rb +8 -6
  143. data/modules/mu/{clouds → providers}/azure/loadbalancer.rb +5 -5
  144. data/modules/mu/{clouds → providers}/azure/role.rb +8 -10
  145. data/modules/mu/{clouds → providers}/azure/server.rb +95 -48
  146. data/modules/mu/{clouds → providers}/azure/user.rb +6 -8
  147. data/modules/mu/{clouds → providers}/azure/userdata/README.md +0 -0
  148. data/modules/mu/{clouds → providers}/azure/userdata/linux.erb +0 -0
  149. data/modules/mu/{clouds → providers}/azure/userdata/windows.erb +0 -0
  150. data/modules/mu/{clouds → providers}/azure/vpc.rb +16 -21
  151. data/modules/mu/{clouds → providers}/cloudformation.rb +18 -7
  152. data/modules/mu/{clouds → providers}/cloudformation/alarm.rb +3 -3
  153. data/modules/mu/{clouds → providers}/cloudformation/cache_cluster.rb +3 -3
  154. data/modules/mu/{clouds → providers}/cloudformation/collection.rb +3 -3
  155. data/modules/mu/{clouds → providers}/cloudformation/database.rb +6 -17
  156. data/modules/mu/{clouds → providers}/cloudformation/dnszone.rb +3 -3
  157. data/modules/mu/{clouds → providers}/cloudformation/firewall_rule.rb +3 -3
  158. data/modules/mu/{clouds → providers}/cloudformation/loadbalancer.rb +3 -3
  159. data/modules/mu/{clouds → providers}/cloudformation/log.rb +3 -3
  160. data/modules/mu/{clouds → providers}/cloudformation/server.rb +7 -7
  161. data/modules/mu/{clouds → providers}/cloudformation/server_pool.rb +5 -5
  162. data/modules/mu/{clouds → providers}/cloudformation/vpc.rb +5 -7
  163. data/modules/mu/{clouds → providers}/docker.rb +0 -0
  164. data/modules/mu/{clouds → providers}/google.rb +67 -30
  165. data/modules/mu/{clouds → providers}/google/bucket.rb +13 -15
  166. data/modules/mu/{clouds → providers}/google/container_cluster.rb +84 -77
  167. data/modules/mu/{clouds → providers}/google/database.rb +10 -20
  168. data/modules/mu/{clouds → providers}/google/firewall_rule.rb +15 -14
  169. data/modules/mu/{clouds → providers}/google/folder.rb +20 -17
  170. data/modules/mu/{clouds → providers}/google/function.rb +139 -167
  171. data/modules/mu/{clouds → providers}/google/group.rb +29 -34
  172. data/modules/mu/{clouds → providers}/google/habitat.rb +21 -22
  173. data/modules/mu/{clouds → providers}/google/loadbalancer.rb +18 -20
  174. data/modules/mu/{clouds → providers}/google/role.rb +92 -58
  175. data/modules/mu/{clouds → providers}/google/server.rb +242 -155
  176. data/modules/mu/{clouds → providers}/google/server_pool.rb +25 -44
  177. data/modules/mu/{clouds → providers}/google/user.rb +95 -31
  178. data/modules/mu/{clouds → providers}/google/userdata/README.md +0 -0
  179. data/modules/mu/{clouds → providers}/google/userdata/linux.erb +0 -0
  180. data/modules/mu/{clouds → providers}/google/userdata/windows.erb +0 -0
  181. data/modules/mu/{clouds → providers}/google/vpc.rb +103 -79
  182. data/modules/tests/bucket.yml +4 -0
  183. data/modules/tests/centos6.yaml +11 -0
  184. data/modules/tests/centos7.yaml +11 -0
  185. data/modules/tests/centos8.yaml +12 -0
  186. data/modules/tests/ecs.yaml +23 -0
  187. data/modules/tests/includes-and-params.yaml +2 -1
  188. data/modules/tests/rds.yaml +108 -0
  189. data/modules/tests/regrooms/aws-iam.yaml +201 -0
  190. data/modules/tests/regrooms/bucket.yml +19 -0
  191. data/modules/tests/regrooms/rds.yaml +123 -0
  192. data/modules/tests/server-with-scrub-muisms.yaml +1 -0
  193. data/modules/tests/super_simple_bok.yml +1 -3
  194. data/modules/tests/win2k12.yaml +17 -5
  195. data/modules/tests/win2k16.yaml +25 -0
  196. data/modules/tests/win2k19.yaml +25 -0
  197. data/requirements.txt +1 -0
  198. data/spec/mu/clouds/azure_spec.rb +2 -2
  199. metadata +232 -154
  200. data/extras/image-generators/AWS/windows.yaml +0 -18
  201. data/modules/mu/clouds/aws/database.rb +0 -1985
@@ -35,7 +35,7 @@ module MU
35
35
  params = genParams
36
36
 
37
37
  MU.log "Creating ElasticSearch domain #{@config['domain_name']}", details: params
38
- resp = MU::Cloud::AWS.elasticsearch(region: @config['region'], credentials: @config['credentials']).create_elasticsearch_domain(params).domain_status
38
+ MU::Cloud::AWS.elasticsearch(region: @config['region'], credentials: @config['credentials']).create_elasticsearch_domain(params).domain_status
39
39
 
40
40
  tagDomain
41
41
 
@@ -57,11 +57,13 @@ module MU
57
57
  waitWhileProcessing # don't return until creation/updating is complete
58
58
  end
59
59
 
60
+ @cloud_desc_cache = nil
60
61
  # Wrapper for cloud_desc method that deals with finding the AWS
61
62
  # domain_name parameter, which isn't what we'd call ourselves if we had
62
63
  # our druthers.
63
- def cloud_desc
64
- if @config['domain_name']
64
+ def cloud_desc(use_cache: true)
65
+ return @cloud_desc_cache if @cloud_desc_cache and use_cache
66
+ @cloud_desc_cache = if @config['domain_name']
65
67
  MU::Cloud::AWS.elasticsearch(region: @config['region'], credentials: @config['credentials']).describe_elasticsearch_domain(
66
68
  domain_name: @config['domain_name']
67
69
  ).domain_status
@@ -72,6 +74,7 @@ module MU
72
74
  else
73
75
  raise MuError, "#{@mu_name} can't find its official Elasticsearch domain name!"
74
76
  end
77
+ @cloud_desc_cache
75
78
  end
76
79
 
77
80
  # Canonical Amazon Resource Number for this resource
@@ -117,25 +120,33 @@ module MU
117
120
  # @param region [String]: The cloud provider region
118
121
  # @return [void]
119
122
  def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {})
120
- list = MU::Cloud::AWS.elasticsearch(region: region).list_domain_names
123
+ MU.log "AWS::SearchDomain.cleanup: need to support flags['known']", MU::DEBUG, details: flags
124
+
125
+ list = MU::Cloud::AWS.elasticsearch(region: region, credentials: credentials).list_domain_names
121
126
  if list and list.domain_names and list.domain_names.size > 0
122
127
  names = list.domain_names.map { |d| d.domain_name }
123
128
  begin
124
129
  # why is this API so obnoxious?
125
130
  sample = names.slice!(0, (names.length >= 5 ? 5 : names.length))
126
- descs = MU::Cloud::AWS.elasticsearch(region: region).describe_elasticsearch_domains(domain_names: sample)
131
+ descs = MU::Cloud::AWS.elasticsearch(region: region, credentials: credentials).describe_elasticsearch_domains(domain_names: sample)
127
132
 
128
133
  descs.domain_status_list.each { |domain|
129
- tags = MU::Cloud::AWS.elasticsearch(region: region).list_tags(arn: domain.arn)
134
+ tags = MU::Cloud::AWS.elasticsearch(region: region, credentials: credentials).list_tags(arn: domain.arn)
135
+ deploy_match = false
136
+ master_match = false
130
137
  tags.tag_list.each { |tag|
131
138
  if tag.key == "MU-ID" and tag.value == MU.deploy_id
132
- MU.log "Deleting ElasticSearch Domain #{domain.domain_name}"
133
- if !noop
134
- MU::Cloud::AWS.elasticsearch(region: region).delete_elasticsearch_domain(domain_name: domain.domain_name)
135
- end
136
- break
139
+ deploy_match = true
140
+ elsif tag.key == "MU-MASTER-IP" and tag.value == MU.mu_public_ip
141
+ master_match = true
137
142
  end
138
143
  }
144
+ if deploy_match and (master_match or ignoremaster)
145
+ MU.log "Deleting ElasticSearch Domain #{domain.domain_name}"
146
+ if !noop
147
+ MU::Cloud::AWS.elasticsearch(region: region, credentials: credentials).delete_elasticsearch_domain(domain_name: domain.domain_name)
148
+ end
149
+ end
139
150
  }
140
151
  end while names.size > 0
141
152
  end
@@ -143,10 +154,10 @@ module MU
143
154
  unless noop
144
155
  marker = nil
145
156
  begin
146
- resp = MU::Cloud::AWS.iam.list_roles(marker: marker)
157
+ resp = MU::Cloud::AWS.iam(credentials: credentials).list_roles(marker: marker)
147
158
  resp.roles.each{ |role|
148
- # XXX Maybe we should have a more generic way to delete IAM profiles and policies. The call itself should be moved from MU::Cloud::AWS::Server.
149
- # MU::Cloud::AWS::Server.removeIAMProfile(role.role_name) if role.role_name.match(/^#{Regexp.quote(MU.deploy_id)}/)
159
+ # XXX Maybe we should have a more generic way to delete IAM profiles and policies. The call itself should be moved from MU::Cloud.resourceClass("AWS", "Server").
160
+ # MU::Cloud.resourceClass("AWS", "Server").removeIAMProfile(role.role_name) if role.role_name.match(/^#{Regexp.quote(MU.deploy_id)}/)
150
161
  }
151
162
  marker = resp.marker
152
163
  end while resp.is_truncated
@@ -181,14 +192,14 @@ module MU
181
192
  end
182
193
 
183
194
  # Cloud-specific configuration properties.
184
- # @param config [MU::Config]: The calling MU::Config object
195
+ # @param _config [MU::Config]: The calling MU::Config object
185
196
  # @return [Array<Array,Hash>]: List of required fields, and json-schema Hash of cloud-specific configuration parameters for this resource
186
- def self.schema(config)
197
+ def self.schema(_config)
187
198
  toplevel_required = ["elasticsearch_version", "instance_type"]
188
199
 
189
200
  versions = begin
190
201
  MU::Cloud::AWS.elasticsearch.list_elasticsearch_versions.elasticsearch_versions
191
- rescue MuError => e
202
+ rescue MuError
192
203
  ["7.1", "6.8", "6.7", "6.5", "6.4", "6.3", "6.2", "6.0", "5.6"]
193
204
  end
194
205
  instance_types = begin
@@ -367,9 +378,9 @@ module MU
367
378
 
368
379
  if dom['slow_logs']
369
380
  if configurator.haveLitterMate?(dom['slow_logs'], "log")
370
- dom['dependencies'] << { "name" => dom['slow_logs'], "type" => "log" }
381
+ MU::Config.addDependency(dom, dom['slow_logs'], "log")
371
382
  else
372
- log_group = MU::Cloud::AWS::Log.find(cloud_id: dom['slow_logs'], region: dom['region']).values.first
383
+ log_group = MU::Cloud.resourceClass("AWS", "Log").find(cloud_id: dom['slow_logs'], region: dom['region']).values.first
373
384
  if !log_group
374
385
  MU.log "Specified slow_logs CloudWatch log group '#{dom['slow_logs']}' in SearchDomain '#{dom['name']}' doesn't appear to exist", MU::ERR
375
386
  ok = false
@@ -384,7 +395,7 @@ module MU
384
395
  "credentials" => dom['credentials']
385
396
  }
386
397
  ok = false if !configurator.insertKitten(log_group, "logs")
387
- dom['dependencies'] << { "name" => dom['slow_logs'], "type" => "log" }
398
+ MU::Config.addDependency(dom, dom['slow_logs'], "log")
388
399
  end
389
400
 
390
401
  if dom['advanced_options']
@@ -398,7 +409,7 @@ module MU
398
409
  MU::Cloud::AWS.cognito_ident(region: dom['region']).describe_identity_pool(
399
410
  identity_pool_id: dom['cognito']['identity_pool_id']
400
411
  )
401
- rescue ::Aws::CognitoIdentity::Errors::ValidationException, Aws::CognitoIdentity::Errors::ResourceNotFoundException => e
412
+ rescue ::Aws::CognitoIdentity::Errors::ValidationException, Aws::CognitoIdentity::Errors::ResourceNotFoundException
402
413
  MU.log "Cognito identity pool #{dom['cognito']['identity_pool_id']} malformed or does not exist in SearchDomain '#{dom['name']}'", MU::ERR
403
414
  ok = false
404
415
  end
@@ -406,7 +417,7 @@ module MU
406
417
  MU::Cloud::AWS.cognito_user(region: dom['region']).describe_user_pool(
407
418
  user_pool_id: dom['cognito']['user_pool_id']
408
419
  )
409
- rescue ::Aws::CognitoIdentityProvider::Errors::InvalidParameterException, Aws::CognitoIdentityProvider::Errors::ResourceNotFoundException => e
420
+ rescue ::Aws::CognitoIdentityProvider::Errors::InvalidParameterException, Aws::CognitoIdentityProvider::Errors::ResourceNotFoundException
410
421
  MU.log "Cognito identity pool #{dom['cognito']['user_pool_id']} malformed or does not exist in SearchDomain '#{dom['name']}'", MU::ERR
411
422
  ok = false
412
423
  end
@@ -426,7 +437,7 @@ module MU
426
437
  if !found
427
438
  MU.log "IAM role #{dom['cognito']['role_arn']} exists, but not does have the AmazonESCognitoAccess policy attached. SearchDomain '#{dom['name']}' may not have necessary Cognito permissions.", MU::WARN
428
439
  end
429
- rescue Aws::IAM::Errors::NoSuchEntity => e
440
+ rescue Aws::IAM::Errors::NoSuchEntity
430
441
  MU.log "IAM role #{dom['cognito']['role_arn']} malformed or does not exist in SearchDomain '#{dom['name']}'", MU::ERR
431
442
  ok = false
432
443
  end
@@ -445,12 +456,7 @@ module MU
445
456
  ]
446
457
  }
447
458
  configurator.insertKitten(roledesc, "roles")
448
-
449
- dom['dependencies'] ||= []
450
- dom['dependencies'] << {
451
- "type" => "role",
452
- "name" => dom['name']+"cognitorole"
453
- }
459
+ MU::Config.addDependency(dom, dom['name']+"cognitorole", "role")
454
460
  end
455
461
 
456
462
  end
@@ -514,7 +520,7 @@ module MU
514
520
  arn = @config['slow_logs']
515
521
  else
516
522
  log_group = @deploy.findLitterMate(type: "log", name: @config['slow_logs'])
517
- log_group = MU::Cloud::AWS::Log.find(cloud_id: log_group.mu_name, region: log_group.cloudobj.config['region']).values.first
523
+ log_group = MU::Cloud.resourceClass("AWS", "Log").find(cloud_id: log_group.mu_name, region: log_group.cloudobj.config['region']).values.first
518
524
  if log_group.nil? or log_group.arn.nil?
519
525
  raise MuError, "Failed to retrieve ARN of sibling LogGroup '#{@config['slow_logs']}'"
520
526
  end
@@ -541,7 +547,7 @@ module MU
541
547
  params[:log_publishing_options]["SEARCH_SLOW_LOGS"] = {}
542
548
  params[:log_publishing_options]["SEARCH_SLOW_LOGS"][:enabled] = true
543
549
  params[:log_publishing_options]["SEARCH_SLOW_LOGS"][:cloud_watch_logs_log_group_arn] = arn
544
- MU::Cloud::AWS::Log.allowService("es.amazonaws.com", arn, @config['region'])
550
+ MU::Cloud.resourceClass("AWS", "Log").allowService("es.amazonaws.com", arn, @config['region'])
545
551
  end
546
552
  end
547
553
 
@@ -626,7 +632,7 @@ module MU
626
632
  # modify an existing group. AWS bug, workaround is to just apply
627
633
  # this in groom phase exclusively.
628
634
  if @config['cognito'] and !ext.nil?
629
- myrole = setIAMPolicies
635
+ setIAMPolicies
630
636
 
631
637
  if ext.nil? or !ext.cognito_options.enabled or
632
638
  ext.cognito_options.user_pool_id != @config['cognito']['user_pool_id'] or
@@ -682,7 +688,7 @@ module MU
682
688
  interval = 60
683
689
 
684
690
  begin
685
- resp = cloud_desc
691
+ resp = cloud_desc(use_cache: false)
686
692
 
687
693
  if (resp.endpoint.nil? or resp.endpoint.empty?) and
688
694
  (resp.endpoints.nil? or resp.endpoints.empty?) and
@@ -145,7 +145,7 @@ module MU
145
145
  raise MuError, "My second argument should be a hash of variables to pass into ERB templates"
146
146
  end
147
147
  $mu = OpenStruct.new(template_variables)
148
- userdata_dir = File.expand_path(MU.myRoot+"/modules/mu/clouds/aws/userdata")
148
+ userdata_dir = File.expand_path(MU.myRoot+"/modules/mu/providers/aws/userdata")
149
149
  platform = "linux" if %w{centos centos6 centos7 ubuntu ubuntu14 rhel rhel7 rhel71 amazon}.include? platform
150
150
  platform = "windows" if %w{win2k12r2 win2k12 win2k8 win2k8r2 win2k16}.include? platform
151
151
  erbfile = "#{userdata_dir}/#{platform}.erb"
@@ -212,7 +212,7 @@ module MU
212
212
  vol_id = attachment.volume_id
213
213
  vol_dev = attachment.device
214
214
  if vol_parent == instance_id and (vol_dev == device or device.nil?)
215
- MU::MommaCat.createTag(vol_id, tag_name, tag_value, region: region, credentials: credentials)
215
+ MU::Cloud::AWS.createTag(vol_id, tag_name, tag_value, region: region, credentials: credentials)
216
216
  break
217
217
  end
218
218
  }
@@ -240,11 +240,17 @@ module MU
240
240
  end
241
241
  MU::MommaCat.unlock(instance.instance_id+"-create")
242
242
  else
243
- MU::Cloud::AWS.createStandardTags(instance.instance_id, region: @config['region'], credentials: @config['credentials'])
244
- MU::MommaCat.createTag(instance.instance_id, "Name", @mu_name, region: @config['region'], credentials: @config['credentials'])
243
+ MU::Cloud::AWS.createStandardTags(
244
+ instance.instance_id,
245
+ region: @config['region'],
246
+ credentials: @config['credentials'],
247
+ optional: @config['optional_tags'],
248
+ nametag: @mu_name,
249
+ othertags: @config['tags']
250
+ )
245
251
  end
246
252
  done = true
247
- rescue Exception => e
253
+ rescue StandardError => e
248
254
  if !instance.nil? and !done
249
255
  MU.log "Aborted before I could finish setting up #{@config['name']}, cleaning it up. Stack trace will print once cleanup is complete.", MU::WARN if !@deploy.nocleanup
250
256
  MU::MommaCat.unlockAll
@@ -262,15 +268,11 @@ module MU
262
268
  return @config
263
269
  end
264
270
 
265
-
266
-
267
271
  # Create an Amazon EC2 instance.
268
272
  def createEc2Instance
269
- name = @config["name"]
270
- node = @config['mu_name']
271
273
 
272
274
  instance_descriptor = {
273
- :image_id => @config["ami_id"],
275
+ :image_id => @config["image_id"],
274
276
  :key_name => @deploy.ssh_key_name,
275
277
  :instance_type => @config["size"],
276
278
  :disable_api_termination => true,
@@ -278,64 +280,26 @@ module MU
278
280
  :max_count => 1
279
281
  }
280
282
 
281
- arn = nil
282
- if @config['generate_iam_role']
283
- role = @deploy.findLitterMate(name: @config['name'], type: "roles")
284
- s3_objs = ["#{@deploy.deploy_id}-secret", "#{role.mu_name}.pfx", "#{role.mu_name}.crt", "#{role.mu_name}.key", "#{role.mu_name}-winrm.crt", "#{role.mu_name}-winrm.key"].map { |file|
285
- 'arn:'+(MU::Cloud::AWS.isGovCloud?(@config['region']) ? "aws-us-gov" : "aws")+':s3:::'+MU::Cloud::AWS.adminBucketName(@credentials)+'/'+file
286
- }
287
- MU.log "Adding S3 read permissions to #{@mu_name}'s IAM profile", MU::NOTICE, details: s3_objs
288
- role.cloudobj.injectPolicyTargets("MuSecrets", s3_objs)
289
-
290
- @config['iam_role'] = role.mu_name
291
- arn = role.cloudobj.createInstanceProfile
292
- # @cfm_role_name, @cfm_prof_name
293
-
294
- elsif @config['iam_role'].nil?
295
- raise MuError, "#{@mu_name} has generate_iam_role set to false, but no iam_role assigned."
296
- end
297
- if !@config["iam_role"].nil?
298
- if arn
299
- instance_descriptor[:iam_instance_profile] = {arn: arn}
300
- else
301
- instance_descriptor[:iam_instance_profile] = {name: @config["iam_role"]}
302
- end
303
- end
304
-
305
- security_groups = []
306
- if @dependencies.has_key?("firewall_rule")
307
- @dependencies['firewall_rule'].values.each { |sg|
308
- security_groups << sg.cloud_id
309
- }
310
- end
283
+ instance_descriptor[:iam_instance_profile] = getIAMProfile
311
284
 
285
+ security_groups = myFirewallRules.map { |fw| fw.cloud_id }
312
286
  if security_groups.size > 0
313
287
  instance_descriptor[:security_group_ids] = security_groups
314
288
  else
315
289
  raise MuError, "Didn't get any security groups assigned to be in #{@mu_name}, that shouldn't happen"
316
290
  end
317
291
 
318
- if !@config['private_ip'].nil?
292
+ if @config['private_ip']
319
293
  instance_descriptor[:private_ip_address] = @config['private_ip']
320
294
  end
321
295
 
322
- vpc_id = subnet = nil
323
296
  if !@vpc.nil? and @config.has_key?("vpc")
324
- subnet_conf = @config['vpc']
325
- subnet_conf = @config['vpc']['subnets'].first if @config['vpc'].has_key?("subnets") and !@config['vpc']['subnets'].empty?
326
- tag_key, tag_value = subnet_conf['tag'].split(/=/, 2) if !subnet_conf['tag'].nil?
327
-
328
- subnet = @vpc.getSubnet(
329
- cloud_id: subnet_conf['subnet_id'],
330
- name: subnet_conf['subnet_name'],
331
- tag_key: tag_key,
332
- tag_value: tag_value
333
- )
297
+ subnet = mySubnets.sample
334
298
  if subnet.nil?
335
- raise MuError, "Got null subnet id out of #{subnet_conf['vpc']}"
299
+ raise MuError, "Got null subnet id out of #{@config['vpc']}"
336
300
  end
337
- MU.log "Deploying #{node} into VPC #{@vpc.cloud_id} Subnet #{subnet.cloud_id}"
338
- punchAdminNAT
301
+ MU.log "Deploying #{@mu_name} into VPC #{@vpc.cloud_id} Subnet #{subnet.cloud_id}"
302
+ allowBastionAccess
339
303
  instance_descriptor[:subnet_id] = subnet.cloud_id
340
304
  end
341
305
 
@@ -343,38 +307,10 @@ module MU
343
307
  instance_descriptor[:user_data] = Base64.encode64(@userdata)
344
308
  end
345
309
 
346
- MU::Cloud::AWS::Server.waitForAMI(@config["ami_id"], region: @config['region'], credentials: @config['credentials'])
310
+ MU::Cloud::AWS::Server.waitForAMI(@config["image_id"], region: @config['region'], credentials: @config['credentials'])
347
311
 
348
- # Figure out which devices are embedded in the AMI already.
349
- image = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_images(image_ids: [@config["ami_id"]]).images.first
350
- ext_disks = {}
351
- if !image.block_device_mappings.nil?
352
- image.block_device_mappings.each { |disk|
353
- if !disk.device_name.nil? and !disk.device_name.empty? and !disk.ebs.nil? and !disk.ebs.empty?
354
- ext_disks[disk.device_name] = MU.structToHash(disk.ebs)
355
- end
356
- }
357
- end
358
-
359
- configured_storage = Array.new
360
- cfm_volume_map = {}
361
- if @config["storage"]
362
- @config["storage"].each { |vol|
363
- # Drop the "encrypted" flag if a snapshot for this device exists
364
- # in the AMI, even if they both agree about the value of said
365
- # flag. Apparently that's a thing now.
366
- if ext_disks.has_key?(vol["device"])
367
- if ext_disks[vol["device"]].has_key?(:snapshot_id)
368
- vol.delete("encrypted")
369
- end
370
- end
371
- mapping, cfm_mapping = MU::Cloud::AWS::Server.convertBlockDeviceMapping(vol)
372
- configured_storage << mapping
373
- }
374
- end
312
+ instance_descriptor[:block_device_mappings] = MU::Cloud::AWS::Server.configureBlockDevices(image_id: @config["image_id"], storage: @config['storage'], region: @config['region'], credentials: @credentials)
375
313
 
376
- instance_descriptor[:block_device_mappings] = configured_storage
377
- instance_descriptor[:block_device_mappings].concat(@ephemeral_mappings)
378
314
  instance_descriptor[:monitoring] = {enabled: @config['monitoring']}
379
315
 
380
316
  if @tags and @tags.size > 0
@@ -386,37 +322,24 @@ module MU
386
322
  }]
387
323
  end
388
324
 
389
- MU.log "Creating EC2 instance #{node}"
390
- MU.log "Instance details for #{node}: #{instance_descriptor}", MU::DEBUG
391
- # if instance_descriptor[:block_device_mappings].empty?
392
- # instance_descriptor.delete(:block_device_mappings)
393
- # end
325
+ MU.log "Creating EC2 instance #{@mu_name}", details: instance_descriptor
394
326
 
395
- retries = 0
396
- instance = begin
397
- response = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).run_instances(instance_descriptor)
398
- if response and response.instances and response.instances.size > 0
399
- instance = response.instances.first
400
- else
401
- MU.log "halp", MU::ERR, details: response
402
- end
327
+ instance = resp = nil
328
+ loop_if = Proc.new {
329
+ instance = resp.instances.first if resp and resp.instances
330
+ resp.nil? or resp.instances.nil? or instance.nil?
331
+ }
332
+
333
+ begin
334
+ MU.retrier([Aws::EC2::Errors::InvalidGroupNotFound, Aws::EC2::Errors::InvalidSubnetIDNotFound, Aws::EC2::Errors::InvalidParameterValue], loop_if: loop_if, loop_msg: "Waiting for run_instances to return #{@mu_name}") {
335
+ resp = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).run_instances(instance_descriptor)
336
+ }
403
337
  rescue Aws::EC2::Errors::InvalidRequest => e
404
338
  MU.log e.message, MU::ERR, details: instance_descriptor
405
339
  raise e
406
- rescue Aws::EC2::Errors::InvalidGroupNotFound, Aws::EC2::Errors::InvalidSubnetIDNotFound, Aws::EC2::Errors::InvalidParameterValue => e
407
- if retries < 10
408
- if retries > 7
409
- MU.log "Seeing #{e.inspect} while trying to launch #{node}, retrying a few more times...", MU::WARN, details: instance_descriptor
410
- end
411
- sleep 10
412
- retries = retries + 1
413
- retry
414
- else
415
- raise MuError, e.inspect
416
- end
417
340
  end
418
341
 
419
- MU.log "#{node} (#{instance.instance_id}) coming online"
342
+ MU.log "#{@mu_name} (#{instance.instance_id}) coming online"
420
343
 
421
344
  instance
422
345
  end
@@ -446,7 +369,7 @@ module MU
446
369
  instance_ids: [@cloud_id]
447
370
  )
448
371
  MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).wait_until(:instance_stopped, instance_ids: [@cloud_id]) do |waiter|
449
- waiter.before_attempt do |attempts|
372
+ waiter.before_attempt do
450
373
  MU.log "Waiting for #{@mu_name} to stop for hard reboot"
451
374
  end
452
375
  end
@@ -476,14 +399,13 @@ module MU
476
399
  # Figure out what's needed to SSH into this server.
477
400
  # @return [Array<String>]: nat_ssh_key, nat_ssh_user, nat_ssh_host, canonical_ip, ssh_user, ssh_key_name, alternate_names
478
401
  def getSSHConfig
479
- node, config, deploydata = describe(cloud_id: @cloud_id)
402
+ cloud_desc(use_cache: false) # make sure we're current
480
403
  # XXX add some awesome alternate names from metadata and make sure they end
481
404
  # up in MU::MommaCat's ssh config wangling
482
- ssh_keydir = Etc.getpwuid(Process.uid).dir+"/.ssh"
483
405
  return nil if @config.nil? or @deploy.nil?
484
406
 
485
407
  nat_ssh_key = nat_ssh_user = nat_ssh_host = nil
486
- if !@config["vpc"].nil? and !MU::Cloud::AWS::VPC.haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials'])
408
+ if !@config["vpc"].nil? and !MU::Cloud.resourceClass("AWS", "VPC").haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials'])
487
409
  if !@nat.nil?
488
410
  if @nat.is_a?(Struct) && @nat.nat_gateway_id && @nat.nat_gateway_id.start_with?("nat-")
489
411
  raise MuError, "Configured to use NAT Gateway, but I have no route to instance. Either use Bastion, or configure VPC peering"
@@ -521,450 +443,81 @@ module MU
521
443
  # Apply tags, bootstrap our configuration management, and other
522
444
  # administravia for a new instance.
523
445
  def postBoot(instance_id = nil)
524
- if !instance_id.nil?
525
- @cloud_id = instance_id
526
- end
527
- node, config, deploydata = describe(cloud_id: @cloud_id)
528
- instance = cloud_desc
529
- raise MuError, "Couldn't find instance #{@mu_name} (#{@cloud_id})" if !instance
530
- @cloud_id = instance.instance_id
531
- return false if !MU::MommaCat.lock(instance.instance_id+"-orchestrate", true)
532
- return false if !MU::MommaCat.lock(instance.instance_id+"-groom", true)
533
-
534
- MU::Cloud::AWS.createStandardTags(instance.instance_id, region: @config['region'], credentials: @config['credentials'])
535
- MU::MommaCat.createTag(instance.instance_id, "Name", node, region: @config['region'], credentials: @config['credentials'])
536
-
537
- if @config['optional_tags']
538
- MU::MommaCat.listOptionalTags.each { |key, value|
539
- MU::MommaCat.createTag(instance.instance_id, key, value, region: @config['region'], credentials: @config['credentials'])
540
- }
541
- end
446
+ @cloud_id ||= instance_id
447
+ _node, _config, deploydata = describe(cloud_id: @cloud_id)
448
+
449
+ raise MuError, "Couldn't find instance #{@mu_name} (#{@cloud_id})" if !cloud_desc
450
+ return false if !MU::MommaCat.lock(@cloud_id+"-orchestrate", true)
451
+ return false if !MU::MommaCat.lock(@cloud_id+"-groom", true)
452
+ finish = Proc.new { |status|
453
+ MU::MommaCat.unlock(@cloud_id+"-orchestrate")
454
+ MU::MommaCat.unlock(@cloud_id+"-groom")
455
+ return status
456
+ }
542
457
 
543
- if !@config['tags'].nil?
544
- @config['tags'].each { |tag|
545
- MU::MommaCat.createTag(instance.instance_id, tag['key'], tag['value'], region: @config['region'], credentials: @config['credentials'])
546
- }
547
- end
548
- MU.log "Tagged #{node} (#{instance.instance_id}) with MU-ID=#{MU.deploy_id}", MU::DEBUG
458
+ MU::Cloud::AWS.createStandardTags(
459
+ @cloud_id,
460
+ region: @config['region'],
461
+ credentials: @config['credentials'],
462
+ optional: @config['optional_tags'],
463
+ nametag: @mu_name,
464
+ othertags: @config['tags']
465
+ )
549
466
 
550
467
  # Make double sure we don't lose a cached mu_windows_name value.
551
- if windows? or !@config['active_directory'].nil?
552
- if @mu_windows_name.nil?
553
- @mu_windows_name = deploydata['mu_windows_name']
554
- end
468
+ if (windows? or !@config['active_directory'].nil?)
469
+ @mu_windows_name ||= deploydata['mu_windows_name']
555
470
  end
556
471
 
557
- retries = -1
558
- max_retries = 30
559
- begin
560
- if instance.nil? or instance.state.name != "running"
561
- retries = retries + 1
562
- if !instance.nil? and instance.state.name == "terminated"
563
- raise MuError, "#{@cloud_id} appears to have been terminated mid-bootstrap!"
564
- end
565
- if retries % 3 == 0
566
- MU.log "Waiting for EC2 instance #{node} (#{@cloud_id}) to be ready...", MU::NOTICE
567
- end
568
- sleep 40
569
- # Get a fresh AWS descriptor
570
- instance = MU::Cloud::Server.find(cloud_id: @cloud_id, region: @config['region'], credentials: @config['credentials']).values.first
571
- if instance and instance.state.name == "terminated"
572
- raise MuError, "EC2 instance #{node} (#{@cloud_id}) terminating during bootstrap!"
573
- end
472
+ loop_if = Proc.new {
473
+ !cloud_desc(use_cache: false) or cloud_desc.state.name != "running"
474
+ }
475
+ MU.retrier([Aws::EC2::Errors::ServiceError], max: 30, wait: 40, loop_if: loop_if) { |retries, _wait|
476
+ if cloud_desc and cloud_desc.state.name == "terminated"
477
+ raise MuError, "#{@cloud_id} appears to have been terminated mid-bootstrap!"
574
478
  end
575
- rescue Aws::EC2::Errors::ServiceError => e
576
- if retries < max_retries
577
- MU.log "Got #{e.inspect} during initial instance creation of #{@cloud_id}, retrying...", MU::NOTICE, details: instance
578
- retries = retries + 1
579
- retry
580
- else
581
- raise MuError, "Too many retries creating #{node} (#{e.inspect})"
479
+ if retries % 3 == 0
480
+ MU.log "Waiting for EC2 instance #{@mu_name} (#{@cloud_id}) to be ready...", MU::NOTICE
582
481
  end
583
- end while instance.nil? or (instance.state.name != "running" and retries < max_retries)
584
-
585
- punchAdminNAT
586
-
587
-
588
- # If we came up via AutoScale, the Alarm module won't have had our
589
- # instance ID to associate us with itself. So invoke that here.
590
- # XXX might be possible to do this with regular alarm resources and
591
- # dependencies now
592
- if !@config['basis'].nil? and @config["alarms"] and !@config["alarms"].empty?
593
- @config["alarms"].each { |alarm|
594
- alarm_obj = MU::MommaCat.findStray(
595
- "AWS",
596
- "alarms",
597
- region: @config["region"],
598
- deploy_id: @deploy.deploy_id,
599
- name: alarm['name']
600
- ).first
601
- alarm["dimensions"] = [{:name => "InstanceId", :value => @cloud_id}]
602
-
603
- if alarm["enable_notifications"]
604
- topic_arn = MU::Cloud::AWS::Notification.createTopic(alarm["notification_group"], region: @config["region"], credentials: @config['credentials'])
605
- MU::Cloud::AWS::Notification.subscribe(arn: topic_arn, protocol: alarm["notification_type"], endpoint: alarm["notification_endpoint"], region: @config["region"], credentials: @config["credentials"])
606
- alarm["alarm_actions"] = [topic_arn]
607
- alarm["ok_actions"] = [topic_arn]
608
- end
482
+ }
609
483
 
610
- alarm_name = alarm_obj ? alarm_obj.cloud_id : "#{node}-#{alarm['name']}".upcase
611
-
612
- MU::Cloud::AWS::Alarm.setAlarm(
613
- name: alarm_name,
614
- ok_actions: alarm["ok_actions"],
615
- alarm_actions: alarm["alarm_actions"],
616
- insufficient_data_actions: alarm["no_data_actions"],
617
- metric_name: alarm["metric_name"],
618
- namespace: alarm["namespace"],
619
- statistic: alarm["statistic"],
620
- dimensions: alarm["dimensions"],
621
- period: alarm["period"],
622
- unit: alarm["unit"],
623
- evaluation_periods: alarm["evaluation_periods"],
624
- threshold: alarm["threshold"],
625
- comparison_operator: alarm["comparison_operator"],
626
- region: @config["region"],
627
- credentials: @config['credentials']
628
- )
629
- }
630
- end
484
+ allowBastionAccess
631
485
 
632
- # We have issues sometimes where our dns_records are pointing at the wrong node name and IP address.
633
- # Make sure that doesn't happen. Happens with server pools only
634
- if @config['dns_records'] && !@config['dns_records'].empty?
635
- @config['dns_records'].each { |dnsrec|
636
- if dnsrec.has_key?("name")
637
- if dnsrec['name'].start_with?(MU.deploy_id.downcase) && !dnsrec['name'].start_with?(node.downcase)
638
- MU.log "DNS records for #{node} seem to be wrong, deleting from current config", MU::WARN, details: dnsrec
639
- dnsrec.delete('name')
640
- dnsrec.delete('target')
641
- end
642
- end
643
- }
644
- end
486
+ setAlarms
645
487
 
646
488
  # Unless we're planning on associating a different IP later, set up a
647
489
  # DNS entry for this thing and let it sync in the background. We'll come
648
490
  # back to it later.
649
- if @config['static_ip'].nil? && !@named
491
+ if @config['static_ip'].nil? and !@named
650
492
  MU::MommaCat.nameKitten(self)
651
493
  @named = true
652
494
  end
653
495
 
654
496
  if !@config['src_dst_check'] and !@config["vpc"].nil?
655
- MU.log "Disabling source_dest_check #{node} (making it NAT-worthy)"
497
+ MU.log "Disabling source_dest_check #{@mu_name} (making it NAT-worthy)"
656
498
  MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).modify_instance_attribute(
657
- instance_id: @cloud_id,
658
- source_dest_check: {:value => false}
499
+ instance_id: @cloud_id,
500
+ source_dest_check: { value: false }
659
501
  )
660
502
  end
661
503
 
662
504
  # Set console termination protection. Autoscale nodes won't set this
663
505
  # by default.
664
506
  MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).modify_instance_attribute(
665
- instance_id: @cloud_id,
666
- disable_api_termination: {:value => true}
507
+ instance_id: @cloud_id,
508
+ disable_api_termination: { value: true}
667
509
  )
668
510
 
669
- has_elastic_ip = false
670
- if !instance.public_ip_address.nil?
671
- begin
672
- resp = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_addresses(public_ips: [instance.public_ip_address])
673
- if resp.addresses.size > 0 and resp.addresses.first.instance_id == @cloud_id
674
- has_elastic_ip = true
675
- end
676
- rescue Aws::EC2::Errors::InvalidAddressNotFound => e
677
- # XXX this is ok to ignore, it means the public IP isn't Elastic
678
- end
679
- end
680
-
681
- win_admin_password = nil
682
- ec2config_password = nil
683
- sshd_password = nil
684
- if windows?
685
- ssh_keydir = "#{Etc.getpwuid(Process.uid).dir}/.ssh"
686
- ssh_key_name = @deploy.ssh_key_name
687
-
688
- if @config['use_cloud_provider_windows_password']
689
- win_admin_password = getWindowsAdminPassword
690
- elsif @config['windows_auth_vault'] && !@config['windows_auth_vault'].empty?
691
- if @config["windows_auth_vault"].has_key?("password_field")
692
- win_admin_password = @groomer.getSecret(
693
- vault: @config['windows_auth_vault']['vault'],
694
- item: @config['windows_auth_vault']['item'],
695
- field: @config["windows_auth_vault"]["password_field"]
696
- )
697
- else
698
- win_admin_password = getWindowsAdminPassword
699
- end
700
-
701
- if @config["windows_auth_vault"].has_key?("ec2config_password_field")
702
- ec2config_password = @groomer.getSecret(
703
- vault: @config['windows_auth_vault']['vault'],
704
- item: @config['windows_auth_vault']['item'],
705
- field: @config["windows_auth_vault"]["ec2config_password_field"]
706
- )
707
- end
708
-
709
- if @config["windows_auth_vault"].has_key?("sshd_password_field")
710
- sshd_password = @groomer.getSecret(
711
- vault: @config['windows_auth_vault']['vault'],
712
- item: @config['windows_auth_vault']['item'],
713
- field: @config["windows_auth_vault"]["sshd_password_field"]
714
- )
715
- end
716
- end
717
-
718
- win_admin_password = MU.generateWindowsPassword if win_admin_password.nil?
719
- ec2config_password = MU.generateWindowsPassword if ec2config_password.nil?
720
- sshd_password = MU.generateWindowsPassword if sshd_password.nil?
721
-
722
- # We're creating the vault here so when we run
723
- # MU::Cloud::Server.initialSSHTasks and we need to set the Windows
724
- # Admin password we can grab it from said vault.
725
- creds = {
726
- "username" => @config['windows_admin_username'],
727
- "password" => win_admin_password,
728
- "ec2config_username" => "ec2config",
729
- "ec2config_password" => ec2config_password,
730
- "sshd_username" => "sshd_service",
731
- "sshd_password" => sshd_password
732
- }
733
- @groomer.saveSecret(vault: @mu_name, item: "windows_credentials", data: creds, permissions: "name:#{@mu_name}")
734
- end
735
-
736
- subnet = nil
737
- if !@vpc.nil? and @config.has_key?("vpc") and !instance.subnet_id.nil?
738
- subnet = @vpc.getSubnet(
739
- cloud_id: instance.subnet_id
740
- )
741
- if subnet.nil?
742
- raise MuError, "Got null subnet id out of #{@config['vpc']} when asking for #{instance.subnet_id}"
743
- end
744
- end
745
-
746
- if !subnet.nil?
747
- if !subnet.private? or (!@config['static_ip'].nil? and !@config['static_ip']['assign_ip'].nil?)
748
- if !@config['static_ip'].nil?
749
- if !@config['static_ip']['ip'].nil?
750
- public_ip = MU::Cloud::AWS::Server.associateElasticIp(instance.instance_id, classic: false, ip: @config['static_ip']['ip'])
751
- elsif !has_elastic_ip
752
- public_ip = MU::Cloud::AWS::Server.associateElasticIp(instance.instance_id)
753
- end
754
- end
755
- end
756
-
757
- nat_ssh_key, nat_ssh_user, nat_ssh_host, canonical_ip, ssh_user, ssh_key_name = getSSHConfig
758
- if subnet.private? and !nat_ssh_host and !MU::Cloud::AWS::VPC.haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials'])
759
- raise MuError, "#{node} is in a private subnet (#{subnet}), but has no bastion host configured, and I have no other route to it"
760
- end
761
-
762
- # If we've asked for additional subnets (and this @config is not a
763
- # member of a Server Pool, which has different semantics), create
764
- # extra interfaces to accomodate.
765
- if !@config['vpc']['subnets'].nil? and @config['basis'].nil?
766
- device_index = 1
767
- @vpc.subnets.each { |s|
768
- subnet_id = s.cloud_id
769
- MU.log "Adding network interface on subnet #{subnet_id} for #{node}"
770
- iface = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).create_network_interface(subnet_id: subnet_id).network_interface
771
- MU::Cloud::AWS.createStandardTags(iface.network_interface_id, region: @config['region'], credentials: @config['credentials'])
772
- MU::MommaCat.createTag(iface.network_interface_id, "Name", node+"-ETH"+device_index.to_s, region: @config['region'], credentials: @config['credentials'])
773
-
774
- if @config['optional_tags']
775
- MU::MommaCat.listOptionalTags.each { |key, value|
776
- MU::MommaCat.createTag(iface.network_interface_id, key, value, region: @config['region'], credentials: @config['credentials'])
777
- }
778
- end
779
-
780
- if !@config['tags'].nil?
781
- @config['tags'].each { |tag|
782
- MU::MommaCat.createTag(iface.network_interface_id, tag['key'], tag['value'], region: @config['region'], credentials: @config['credentials'])
783
- }
784
- end
785
-
786
- MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).attach_network_interface(
787
- network_interface_id: iface.network_interface_id,
788
- instance_id: instance.instance_id,
789
- device_index: device_index
790
- )
791
- device_index = device_index + 1
792
- }
793
- end
794
- elsif !@config['static_ip'].nil?
795
- if !@config['static_ip']['ip'].nil?
796
- public_ip = MU::Cloud::AWS::Server.associateElasticIp(instance.instance_id, classic: true, ip: @config['static_ip']['ip'])
797
- elsif !has_elastic_ip
798
- public_ip = MU::Cloud::AWS::Server.associateElasticIp(instance.instance_id, classic: true)
799
- end
800
- end
801
-
511
+ tagVolumes
512
+ configureNetworking
513
+ saveCredentials
802
514
 
803
515
  if !@config['image_then_destroy']
804
516
  notify
805
517
  end
806
518
 
807
- MU.log "EC2 instance #{node} has id #{instance.instance_id}", MU::DEBUG
808
-
809
- @config["private_dns_name"] = instance.private_dns_name
810
- @config["public_dns_name"] = instance.public_dns_name
811
- @config["private_ip_address"] = instance.private_ip_address
812
- @config["public_ip_address"] = instance.public_ip_address
813
-
814
- ext_mappings = MU.structToHash(instance.block_device_mappings)
815
-
816
- # Root disk on standard CentOS AMI
817
- # tagVolumes(instance.instance_id, "/dev/sda", "Name", "ROOT-"+MU.deploy_id+"-"+@config["name"].upcase)
818
- # Root disk on standard Ubuntu AMI
819
- # tagVolumes(instance.instance_id, "/dev/sda1", "Name", "ROOT-"+MU.deploy_id+"-"+@config["name"].upcase)
820
-
821
- # Generic deploy ID tag
822
- # tagVolumes(instance.instance_id)
823
-
824
- # Tag volumes with all our standard tags.
825
- # Maybe replace tagVolumes with this? There is one more place tagVolumes is called from
826
- volumes = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_volumes(filters: [name: "attachment.instance-id", values: [instance.instance_id]])
827
- volumes.each { |vol|
828
- vol.volumes.each { |volume|
829
- volume.attachments.each { |attachment|
830
- MU::MommaCat.listStandardTags.each_pair { |key, value|
831
- MU::MommaCat.createTag(attachment.volume_id, key, value, region: @config['region'], credentials: @config['credentials'])
832
-
833
- if attachment.device == "/dev/sda" or attachment.device == "/dev/sda1"
834
- MU::MommaCat.createTag(attachment.volume_id, "Name", "ROOT-#{MU.deploy_id}-#{@config["name"].upcase}", region: @config['region'], credentials: @config['credentials'])
835
- else
836
- MU::MommaCat.createTag(attachment.volume_id, "Name", "#{MU.deploy_id}-#{@config["name"].upcase}-#{attachment.device.upcase}", region: @config['region'], credentials: @config['credentials'])
837
- end
838
- }
839
-
840
- if @config['optional_tags']
841
- MU::MommaCat.listOptionalTags.each { |key, value|
842
- MU::MommaCat.createTag(attachment.volume_id, key, value, region: @config['region'], credentials: @config['credentials'])
843
- }
844
- end
845
-
846
- if @config['tags']
847
- @config['tags'].each { |tag|
848
- MU::MommaCat.createTag(attachment.volume_id, tag['key'], tag['value'], region: @config['region'], credentials: @config['credentials'])
849
- }
850
- end
851
- }
852
- }
853
- }
854
-
855
- canonical_name = instance.public_dns_name
856
- canonical_name = instance.private_dns_name if !canonical_name or nat_ssh_host != nil
857
- @config['canonical_name'] = canonical_name
858
-
859
- if !@config['add_private_ips'].nil?
860
- instance.network_interfaces.each { |int|
861
- if int.private_ip_address == instance.private_ip_address and int.private_ip_addresses.size < (@config['add_private_ips'] + 1)
862
- MU.log "Adding #{@config['add_private_ips']} extra private IP addresses to #{instance.instance_id}"
863
- MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).assign_private_ip_addresses(
864
- network_interface_id: int.network_interface_id,
865
- secondary_private_ip_address_count: @config['add_private_ips'],
866
- allow_reassignment: false
867
- )
868
- end
869
- }
870
- notify
871
- end
872
-
873
- begin
874
- if @config['groom'].nil? or @config['groom']
875
- if windows?
876
- # kick off certificate generation early; WinRM will need it
877
- cert, key = @deploy.nodeSSLCerts(self)
878
- if @config.has_key?("basis")
879
- @deploy.nodeSSLCerts(self, true)
880
- end
881
- if !@groomer.haveBootstrapped?
882
- session = getWinRMSession(50, 60, reboot_on_problems: true)
883
- initialWinRMTasks(session)
884
- begin
885
- session.close
886
- rescue Exception
887
- # this is allowed to fail- we're probably rebooting anyway
888
- end
889
- else # for an existing Windows node: WinRM, then SSH if it fails
890
- begin
891
- session = getWinRMSession(1, 60)
892
- rescue Exception # yeah, yeah
893
- session = getSSHSession(1, 60)
894
- # XXX maybe loop at least once if this also fails?
895
- end
896
- end
897
- else
898
- session = getSSHSession(40, 30)
899
- initialSSHTasks(session)
900
- end
901
- end
902
- rescue BootstrapTempFail
903
- sleep 45
904
- retry
905
- ensure
906
- session.close if !session.nil? and !windows?
907
- end
908
-
909
- if @config["existing_deploys"] && !@config["existing_deploys"].empty?
910
- @config["existing_deploys"].each { |ext_deploy|
911
- if ext_deploy["cloud_id"]
912
- found = MU::MommaCat.findStray(
913
- @config['cloud'],
914
- ext_deploy["cloud_type"],
915
- cloud_id: ext_deploy["cloud_id"],
916
- region: @config['region'],
917
- dummy_ok: false
918
- ).first
919
-
920
- MU.log "Couldn't find existing resource #{ext_deploy["cloud_id"]}, #{ext_deploy["cloud_type"]}", MU::ERR if found.nil?
921
- @deploy.notify(ext_deploy["cloud_type"], found.config["name"], found.deploydata, mu_name: found.mu_name, triggering_node: @mu_name)
922
- elsif ext_deploy["mu_name"] && ext_deploy["deploy_id"]
923
- MU.log "#{ext_deploy["mu_name"]} / #{ext_deploy["deploy_id"]}"
924
- found = MU::MommaCat.findStray(
925
- @config['cloud'],
926
- ext_deploy["cloud_type"],
927
- deploy_id: ext_deploy["deploy_id"],
928
- mu_name: ext_deploy["mu_name"],
929
- region: @config['region'],
930
- dummy_ok: false
931
- ).first
932
-
933
- MU.log "Couldn't find existing resource #{ext_deploy["mu_name"]}/#{ext_deploy["deploy_id"]}, #{ext_deploy["cloud_type"]}", MU::ERR if found.nil?
934
- @deploy.notify(ext_deploy["cloud_type"], found.config["name"], found.deploydata, mu_name: ext_deploy["mu_name"], triggering_node: @mu_name)
935
- else
936
- MU.log "Trying to find existing deploy, but either the cloud_id is not valid or no mu_name and deploy_id where provided", MU::ERR
937
- end
938
- }
939
- end
940
-
941
- # See if this node already exists in our config management. If it does,
942
- # we're done.
943
- if MU.inGem?
944
- MU.log "Deploying from a gem, not grooming"
945
- MU::MommaCat.unlock(instance.instance_id+"-orchestrate")
946
- MU::MommaCat.unlock(instance.instance_id+"-groom")
947
-
948
- return true
949
- elsif @groomer.haveBootstrapped?
950
- MU.log "Node #{node} has already been bootstrapped, skipping groomer setup.", MU::NOTICE
951
-
952
- if @config['groom'].nil? or @config['groom']
953
- @groomer.saveDeployData
954
- end
955
-
956
- MU::MommaCat.unlock(instance.instance_id+"-orchestrate")
957
- MU::MommaCat.unlock(instance.instance_id+"-groom")
958
- return true
959
- end
960
-
961
- begin
962
- @groomer.bootstrap if @config['groom'].nil? or @config['groom']
963
- rescue MU::Groomer::RunError
964
- MU::MommaCat.unlock(instance.instance_id+"-groom")
965
- MU::MommaCat.unlock(instance.instance_id+"-orchestrate")
966
- return false
967
- end
519
+ getIAMProfile
520
+ finish.call(false) if !bootstrapGroomer
968
521
 
969
522
  # Make sure we got our name written everywhere applicable
970
523
  if !@named
@@ -972,149 +525,83 @@ module MU
972
525
  @named = true
973
526
  end
974
527
 
975
- MU::MommaCat.unlock(instance.instance_id+"-groom")
976
- MU::MommaCat.unlock(instance.instance_id+"-orchestrate")
977
- return true
978
- end
979
-
980
- # postBoot
528
+ finish.call(true)
529
+ end #postboot
981
530
 
982
531
  # Locate an existing instance or instances and return an array containing matching AWS resource descriptors for those that match.
983
532
  # @return [Hash<String,OpenStruct>]: The cloud provider's complete descriptions of matching instances
984
533
  def self.find(**args)
985
534
  ip ||= args[:flags]['ip'] if args[:flags] and args[:flags]['ip']
986
535
 
987
- instance = nil
988
- if !args[:region].nil?
989
- regions = [args[:region]]
990
- else
991
- regions = MU::Cloud::AWS.listRegions
992
- end
536
+ regions = args[:region].nil? ? MU::Cloud::AWS.listRegions : [args[:region]]
993
537
 
994
538
  found = {}
995
539
  search_semaphore = Mutex.new
996
540
  search_threads = []
997
541
 
998
- if !ip and !args[:cloud_id] and !args[:tag_value]
999
- regions.each { |r|
1000
- search_threads << Thread.new {
1001
- MU::Cloud::AWS.ec2(region: r, credentials: args[:credentials]).describe_instances(
1002
- filters: [
1003
- {
1004
- name: "instance-state-name",
1005
- values: ["running", "pending", "stopped"]
1006
- }
1007
- ]
1008
- ).reservations.each { |resp|
1009
- if !resp.nil? and !resp.instances.nil?
1010
- resp.instances.each { |i|
1011
- search_semaphore.synchronize {
1012
- found[i.instance_id] = i
1013
- }
1014
- }
1015
- end
1016
- }
1017
- }
1018
- }
542
+ base_filter = { name: "instance-state-name", values: ["running", "pending", "stopped"] }
543
+ searches = []
1019
544
 
1020
- search_threads.each { |t|
1021
- t.join
545
+ if args[:cloud_id]
546
+ searches << {
547
+ :instance_ids => [args[:cloud_id]],
548
+ :filters => [base_filter]
1022
549
  }
1023
-
1024
- return found
1025
550
  end
1026
551
 
1027
- # If we got an instance id, go get it
1028
- if args[:cloud_id]
1029
- regions.each { |r|
1030
- search_threads << Thread.new {
1031
- MU.log "Hunting for instance with cloud id '#{args[:cloud_id]}' in #{r}", MU::DEBUG
1032
- retries = 0
1033
- begin
1034
- MU::Cloud::AWS.ec2(region: r, credentials: args[:credentials]).describe_instances(
1035
- instance_ids: [args[:cloud_id]],
1036
- filters: [
1037
- {
1038
- name: "instance-state-name",
1039
- values: ["running", "pending", "stopped"]
1040
- }
1041
- ]
1042
- ).reservations.each { |resp|
1043
- if !resp.nil? and !resp.instances.nil?
1044
- resp.instances.each { |i|
1045
- search_semaphore.synchronize {
1046
- found[i.instance_id] = i
1047
- }
1048
- }
1049
- end
1050
- }
1051
- rescue Aws::EC2::Errors::InvalidInstanceIDNotFound => e
1052
- if retries < 5
1053
- retries = retries + 1
1054
- sleep 5
1055
- else
1056
- raise MuError, "#{e.inspect} in region #{r}"
1057
- end
1058
- end
552
+ if ip
553
+ ["ip-address", "private-ip-address"].each { |ip_type|
554
+ searches << {
555
+ filters: [base_filter, {name: ip_type, values: [ip]} ],
1059
556
  }
1060
557
  }
1061
- done_threads = []
1062
- begin
1063
- search_threads.each { |t|
1064
- joined = t.join(2)
1065
- done_threads << joined if !joined.nil?
1066
- }
1067
- end while found.size < 1 and done_threads.size != search_threads.size
1068
558
  end
1069
559
 
1070
- return found if found.size > 0
1071
-
1072
- # Ok, well, let's try looking it up by IP then
1073
- if !ip.nil?
1074
- MU.log "Hunting for instance by IP '#{ip}'", MU::DEBUG
1075
- ["ip-address", "private-ip-address"].each { |filter|
1076
- regions.each { |r|
1077
- response = MU::Cloud::AWS.ec2(region: r, credentials: args[:credentials]).describe_instances(
1078
- filters: [
1079
- {name: filter, values: [ip]},
1080
- {name: "instance-state-name", values: ["running", "pending", "stopped"]}
1081
- ]
1082
- ).reservations.first
1083
- response.instances.each { |i|
1084
- found[i.instance_id] = i
1085
- }
1086
- }
560
+ if args[:tag_value] and args[:tag_key]
561
+ searches << {
562
+ filters: [
563
+ base_filter,
564
+ {name: ip_type, values: [ip]},
565
+ {name: "tag:#{args[:tag_key]}", values: [args[:tag_value]]},
566
+ ]
1087
567
  }
1088
568
  end
1089
569
 
1090
- return found if found.size > 0
570
+ if searches.empty?
571
+ searches << { filters: [base_filter] }
572
+ end
1091
573
 
1092
- # Fine, let's try it by tag.
1093
- if args[:tag_value]
1094
- MU.log "Searching for instance by tag '#{args[:tag_key]}=#{args[:tag_value]}'", MU::DEBUG
1095
- regions.each { |r|
1096
- MU::Cloud::AWS.ec2(region: r, credentials: args[:credentials]).describe_instances(
1097
- filters: [
1098
- {name: "tag:#{args[:tag_key]}", values: [args[:tag_value]]},
1099
- {name: "instance-state-name", values: ["running", "pending", "stopped"]}
1100
- ]
1101
- ).reservations.each { |resp|
1102
- if !resp.nil? and resp.instances.size > 0
1103
- resp.instances.each { |i|
1104
- found[i.instance_id] = i
574
+ regions.each { |r|
575
+ searches.each { |search|
576
+ search_threads << Thread.new(search) { |params|
577
+ MU.retrier([Aws::EC2::Errors::InvalidInstanceIDNotFound], wait: 5, max: 5, ignoreme: [Aws::EC2::Errors::InvalidInstanceIDNotFound]) {
578
+ MU::Cloud::AWS.ec2(region: r, credentials: args[:credentials]).describe_instances(params).reservations.each { |resp|
579
+ next if resp.nil? or resp.instances.nil?
580
+ resp.instances.each { |i|
581
+ search_semaphore.synchronize {
582
+ found[i.instance_id] = i
583
+ }
584
+ }
1105
585
  }
1106
- end
586
+ }
1107
587
  }
1108
588
  }
1109
- end
1110
-
1111
- return found
589
+ }
590
+ done_threads = []
591
+ begin
592
+ search_threads.each { |t|
593
+ joined = t.join(2)
594
+ done_threads << joined if !joined.nil?
595
+ }
596
+ end while found.size < 1 and done_threads.size != search_threads.size
597
+
598
+ return found
1112
599
  end
1113
600
 
1114
601
  # Reverse-map our cloud description into a runnable config hash.
1115
602
  # We assume that any values we have in +@config+ are placeholders, and
1116
603
  # calculate our own accordingly based on what's live in the cloud.
1117
- def toKitten(rootparent: nil, billing: nil, habitats: nil)
604
+ def toKitten(**_args)
1118
605
  bok = {
1119
606
  "cloud" => "AWS",
1120
607
  "credentials" => @config['credentials'],
@@ -1127,7 +614,7 @@ module MU
1127
614
  return nil
1128
615
  end
1129
616
 
1130
- asgs = MU::Cloud::AWS::ServerPool.find(
617
+ asgs = MU::Cloud.resourceClass("AWS", "ServerPool").find(
1131
618
  instance_id: @cloud_id,
1132
619
  region: @config['region'],
1133
620
  credentials: @credentials
@@ -1221,8 +708,8 @@ module MU
1221
708
 
1222
709
  int.private_ip_addresses.each { |priv_ip|
1223
710
  if !priv_ip.primary
1224
- bok['add_private_ips'] ||= []
1225
- bok['add_private_ips'] << priv_ip.private_ip_address
711
+ bok['add_private_ips'] ||= 0
712
+ bok['add_private_ips'] += 1
1226
713
  end
1227
714
  if priv_ip.association and priv_ip.association.public_ip
1228
715
  bok['associate_public_ip'] = true
@@ -1237,15 +724,15 @@ module MU
1237
724
 
1238
725
  if int.groups.size > 0
1239
726
 
1240
- require 'mu/clouds/aws/firewall_rule'
1241
- ifaces = MU::Cloud::AWS::FirewallRule.getAssociatedInterfaces(int.groups.map { |sg| sg.group_id }, credentials: @credentials, region: @config['region'])
727
+ require 'mu/providers/aws/firewall_rule'
728
+ ifaces = MU::Cloud.resourceClass("AWS", "FirewallRule").getAssociatedInterfaces(int.groups.map { |sg| sg.group_id }, credentials: @credentials, region: @config['region'])
1242
729
  done_local_rules = false
1243
730
  int.groups.each { |sg|
1244
731
  if !done_local_rules and ifaces[sg.group_id].size == 1
1245
- sg_desc = MU::Cloud::AWS::FirewallRule.find(cloud_id: sg.group_id, credentials: @credentials, region: @config['region']).values.first
732
+ sg_desc = MU::Cloud.resourceClass("AWS", "FirewallRule").find(cloud_id: sg.group_id, credentials: @credentials, region: @config['region']).values.first
1246
733
  if sg_desc
1247
- bok["ingress_rules"] = MU::Cloud::AWS::FirewallRule.rulesToBoK(sg_desc.ip_permissions)
1248
- bok["ingress_rules"].concat(MU::Cloud::AWS::FirewallRule.rulesToBoK(sg_desc.ip_permissions_egress, egress: true))
734
+ bok["ingress_rules"] = MU::Cloud.resourceClass("AWS", "FirewallRule").rulesToBoK(sg_desc.ip_permissions)
735
+ bok["ingress_rules"].concat(MU::Cloud.resourceClass("AWS", "FirewallRule").rulesToBoK(sg_desc.ip_permissions_egress, egress: true))
1249
736
  done_local_rules = true
1250
737
  next
1251
738
  end
@@ -1270,9 +757,6 @@ module MU
1270
757
  # Return a description of this resource appropriate for deployment
1271
758
  # metadata. Arguments reflect the return values of the MU::Cloud::[Resource].describe method
1272
759
  def notify
1273
- node, config, deploydata = describe(cloud_id: @cloud_id, update_cache: true)
1274
- deploydata = {} if deploydata.nil?
1275
-
1276
760
  if cloud_desc.nil?
1277
761
  raise MuError, "Failed to load instance metadata for #{@mu_name}/#{@cloud_id}"
1278
762
  end
@@ -1317,52 +801,16 @@ module MU
1317
801
  end
1318
802
  deploydata["region"] = @config['region'] if !@config['region'].nil?
1319
803
  if !@named
1320
- MU::MommaCat.nameKitten(self)
804
+ MU::MommaCat.nameKitten(self, no_dns: true)
1321
805
  @named = true
1322
806
  end
1323
807
 
1324
808
  return deploydata
1325
809
  end
1326
810
 
1327
- # If the specified server is in a VPC, and has a NAT, make sure we'll
1328
- # be letting ssh traffic in from said NAT.
1329
- def punchAdminNAT
1330
- if @config['vpc'].nil? or
1331
- (
1332
- !@config['vpc'].has_key?("nat_host_id") and
1333
- !@config['vpc'].has_key?("nat_host_tag") and
1334
- !@config['vpc'].has_key?("nat_host_ip") and
1335
- !@config['vpc'].has_key?("nat_host_name")
1336
- )
1337
- return nil
1338
- end
1339
-
1340
- return nil if @nat.is_a?(Struct) && @nat.nat_gateway_id && @nat.nat_gateway_id.start_with?("nat-")
1341
-
1342
- dependencies if @nat.nil?
1343
- if @nat.nil? or @nat.cloud_desc.nil?
1344
- raise MuError, "#{@mu_name} (#{MU.deploy_id}) is configured to use #{@config['vpc']} but I can't find the cloud descriptor for a matching NAT instance"
1345
- end
1346
- MU.log "Adding administrative holes for NAT host #{@nat.cloud_desc.private_ip_address} to #{@mu_name}"
1347
- if !@deploy.kittens['firewall_rules'].nil?
1348
- @deploy.kittens['firewall_rules'].each_pair { |name, acl|
1349
- if acl.config["admin"]
1350
- acl.addRule([@nat.cloud_desc.private_ip_address], proto: "tcp")
1351
- acl.addRule([@nat.cloud_desc.private_ip_address], proto: "udp")
1352
- acl.addRule([@nat.cloud_desc.private_ip_address], proto: "icmp")
1353
- end
1354
- }
1355
- end
1356
- end
1357
-
1358
811
  # Called automatically by {MU::Deploy#createResources}
1359
812
  def groom
1360
813
  MU::MommaCat.lock(@cloud_id+"-groom")
1361
- node, config, deploydata = describe(cloud_id: @cloud_id)
1362
-
1363
- if node.nil? or node.empty?
1364
- raise MuError, "MU::Cloud::AWS::Server.groom was called without a mu_name"
1365
- end
1366
814
 
1367
815
  # Make double sure we don't lose a cached mu_windows_name value.
1368
816
  if windows? or !@config['active_directory'].nil?
@@ -1371,9 +819,9 @@ module MU
1371
819
  end
1372
820
  end
1373
821
 
1374
- punchAdminNAT
822
+ allowBastionAccess
1375
823
 
1376
- MU::Cloud::AWS::Server.tagVolumes(@cloud_id, credentials: @config['credentials'])
824
+ tagVolumes
1377
825
 
1378
826
  # If we have a loadbalancer configured, attach us to it
1379
827
  if !@config['loadbalancers'].nil?
@@ -1402,55 +850,31 @@ module MU
1402
850
  end
1403
851
 
1404
852
  begin
853
+ getIAMProfile
854
+
855
+ dbs = @deploy.findLitterMate(type: "database", return_all: true)
856
+ if dbs
857
+ dbs.each_pair { |sib_name, sib|
858
+ @groomer.groomer_class.grantSecretAccess(@mu_name, sib_name, "database_credentials")
859
+ if sib.config and sib.config['auth_vault']
860
+ @groomer.groomer_class.grantSecretAccess(@mu_name, sib.config['auth_vault']['vault'], sib.config['auth_vault']['item'])
861
+ end
862
+ }
863
+ end
864
+
1405
865
  if @config['groom'].nil? or @config['groom']
1406
- @groomer.run(purpose: "Full Initial Run", max_retries: 15, reboot_first_fail: windows?, timeout: @config['groomer_timeout'])
866
+ @groomer.run(purpose: "Full Initial Run", max_retries: 15, reboot_first_fail: (windows? and @config['groomer'] != "Ansible"), timeout: @config['groomer_timeout'])
1407
867
  end
1408
868
  rescue MU::Groomer::RunError => e
1409
- MU.log "Proceeding after failed initial Groomer run, but #{node} may not behave as expected!", MU::WARN, details: e.message
1410
- rescue Exception => e
1411
- MU.log "Caught #{e.inspect} on #{node} in an unexpected place (after @groomer.run on Full Initial Run)", MU::ERR
869
+ raise e if !@config['create_image'].nil? and !@config['image_created']
870
+ MU.log "Proceeding after failed initial Groomer run, but #{@mu_name} may not behave as expected!", MU::WARN, details: e.message
871
+ rescue StandardError => e
872
+ raise e if !@config['create_image'].nil? and !@config['image_created']
873
+ MU.log "Caught #{e.inspect} on #{@mu_name} in an unexpected place (after @groomer.run on Full Initial Run)", MU::ERR
1412
874
  end
1413
875
 
1414
876
  if !@config['create_image'].nil? and !@config['image_created']
1415
- img_cfg = @config['create_image']
1416
- # Scrub things that don't belong on an AMI
1417
- session = getSSHSession
1418
- sudo = purgecmd = ""
1419
- sudo = "sudo" if @config['ssh_user'] != "root"
1420
- if windows?
1421
- purgecmd = "rm -rf /cygdrive/c/mu_installed_chef"
1422
- else
1423
- purgecmd = "rm -rf /opt/mu_installed_chef"
1424
- end
1425
- if img_cfg['image_then_destroy']
1426
- if windows?
1427
- purgecmd = "rm -rf /cygdrive/c/chef/ /home/#{@config['windows_admin_username']}/.ssh/authorized_keys /home/Administrator/.ssh/authorized_keys /cygdrive/c/mu-installer-ran-updates /cygdrive/c/mu_installed_chef"
1428
- # session.exec!("powershell -Command \"& {(Get-WmiObject -Class Win32_Product -Filter \"Name='UniversalForwarder'\").Uninstall()}\"")
1429
- else
1430
- purgecmd = "#{sudo} rm -rf /var/lib/cloud/instances/i-* /root/.ssh/authorized_keys /etc/ssh/ssh_host_*key* /etc/chef /etc/opscode/* /.mu-installer-ran-updates /var/chef /opt/mu_installed_chef /opt/chef ; #{sudo} sed -i 's/^HOSTNAME=.*//' /etc/sysconfig/network"
1431
- end
1432
- end
1433
- session.exec!(purgecmd)
1434
- session.close
1435
- ami_ids = MU::Cloud::AWS::Server.createImage(
1436
- name: @mu_name,
1437
- instance_id: @cloud_id,
1438
- storage: @config['storage'],
1439
- exclude_storage: img_cfg['image_exclude_storage'],
1440
- copy_to_regions: img_cfg['copy_to_regions'],
1441
- make_public: img_cfg['public'],
1442
- region: @config['region'],
1443
- tags: @config['tags'],
1444
- credentials: @config['credentials']
1445
- )
1446
- @deploy.notify("images", @config['name'], ami_ids)
1447
- @config['image_created'] = true
1448
- if img_cfg['image_then_destroy']
1449
- MU::Cloud::AWS::Server.waitForAMI(ami_ids[@config['region']], region: @config['region'], credentials: @config['credentials'])
1450
- MU.log "AMI #{ami_ids[@config['region']]} ready, removing source node #{node}"
1451
- MU::Cloud::AWS::Server.terminateInstance(id: @cloud_id, region: @config['region'], deploy_id: @deploy.deploy_id, mu_name: @mu_name, credentials: @config['credentials'])
1452
- destroy
1453
- end
877
+ createImage
1454
878
  end
1455
879
 
1456
880
  MU::MommaCat.unlock(@cloud_id+"-groom")
@@ -1462,9 +886,11 @@ module MU
1462
886
  "arn:"+(MU::Cloud::AWS.isGovCloud?(@config["region"]) ? "aws-us-gov" : "aws")+":ec2:"+@config['region']+":"+MU::Cloud::AWS.credToAcct(@config['credentials'])+":instance/"+@cloud_id
1463
887
  end
1464
888
 
889
+ @cloud_desc_cache = nil
1465
890
  # Return the cloud provider's description for this instance
1466
891
  # @return [Openstruct]
1467
- def cloud_desc
892
+ def cloud_desc(use_cache: true)
893
+ return @cloud_desc_cache if @cloud_desc_cache and use_cache
1468
894
  max_retries = 5
1469
895
  retries = 0
1470
896
  if !@cloud_id.nil?
@@ -1473,11 +899,12 @@ module MU
1473
899
  if resp and resp.reservations and resp.reservations.first and
1474
900
  resp.reservations.first.instances and
1475
901
  resp.reservations.first.instances.first
1476
- return resp.reservations.first.instances.first
902
+ @cloud_desc_cache = resp.reservations.first.instances.first
903
+ return @cloud_desc_cache
1477
904
  end
1478
905
  rescue Aws::EC2::Errors::InvalidInstanceIDNotFound
1479
906
  return nil
1480
- rescue NoMethodError => e
907
+ rescue NoMethodError
1481
908
  if retries >= max_retries
1482
909
  raise MuError, "Couldn't get a cloud descriptor for #{@mu_name} (#{@cloud_id})"
1483
910
  else
@@ -1495,23 +922,19 @@ module MU
1495
922
  # bastion hosts that may be in the path, see getSSHConfig if that's what
1496
923
  # you need.
1497
924
  def canonicalIP
1498
- mu_name, config, deploydata = describe(cloud_id: @cloud_id)
1499
-
1500
- instance = cloud_desc
1501
-
1502
- if !instance
925
+ if !cloud_desc
1503
926
  raise MuError, "Couldn't retrieve cloud descriptor for server #{self}"
1504
927
  end
1505
928
 
1506
929
  if deploydata.nil? or
1507
930
  (!deploydata.has_key?("private_ip_address") and
1508
931
  !deploydata.has_key?("public_ip_address"))
1509
- return nil if instance.nil?
932
+ return nil if cloud_desc.nil?
1510
933
  @deploydata = {} if @deploydata.nil?
1511
- @deploydata["public_ip_address"] = instance.public_ip_address
1512
- @deploydata["public_dns_name"] = instance.public_dns_name
1513
- @deploydata["private_ip_address"] = instance.private_ip_address
1514
- @deploydata["private_dns_name"] = instance.private_dns_name
934
+ @deploydata["public_ip_address"] = cloud_desc.public_ip_address
935
+ @deploydata["public_dns_name"] = cloud_desc.public_dns_name
936
+ @deploydata["private_ip_address"] = cloud_desc.private_ip_address
937
+ @deploydata["private_dns_name"] = cloud_desc.private_dns_name
1515
938
 
1516
939
  notify
1517
940
  end
@@ -1519,14 +942,14 @@ module MU
1519
942
  # Our deploydata gets corrupted often with server pools, this will cause us to use the wrong IP to identify a node
1520
943
  # which will cause us to create certificates, DNS records and other artifacts with incorrect information which will cause our deploy to fail.
1521
944
  # The cloud_id is always correct so lets use 'cloud_desc' to get the correct IPs
1522
- if MU::Cloud::AWS::VPC.haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials']) or @deploydata["public_ip_address"].nil?
1523
- @config['canonical_ip'] = instance.private_ip_address
1524
- @deploydata["private_ip_address"] = instance.private_ip_address
1525
- return instance.private_ip_address
945
+ if MU::Cloud.resourceClass("AWS", "VPC").haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials']) or @deploydata["public_ip_address"].nil?
946
+ @config['canonical_ip'] = cloud_desc.private_ip_address
947
+ @deploydata["private_ip_address"] = cloud_desc.private_ip_address
948
+ return cloud_desc.private_ip_address
1526
949
  else
1527
- @config['canonical_ip'] = instance.public_ip_address
1528
- @deploydata["public_ip_address"] = instance.public_ip_address
1529
- return instance.public_ip_address
950
+ @config['canonical_ip'] = cloud_desc.public_ip_address
951
+ @deploydata["public_ip_address"] = cloud_desc.public_ip_address
952
+ return cloud_desc.public_ip_address
1530
953
  end
1531
954
  end
1532
955
 
@@ -1574,7 +997,7 @@ module MU
1574
997
  resp = nil
1575
998
  begin
1576
999
  resp = MU::Cloud::AWS.ec2(region: region, credentials: credentials).create_image(ami_descriptor)
1577
- rescue Aws::EC2::Errors::InvalidAMINameDuplicate => e
1000
+ rescue Aws::EC2::Errors::InvalidAMINameDuplicate
1578
1001
  MU.log "AMI #{name} already exists, skipping", MU::WARN
1579
1002
  return nil
1580
1003
  end
@@ -1583,7 +1006,7 @@ module MU
1583
1006
 
1584
1007
  ami_ids[region] = ami
1585
1008
  MU::Cloud::AWS.createStandardTags(ami, region: region, credentials: credentials)
1586
- MU::MommaCat.createTag(ami, "Name", name, region: region, credentials: credentials)
1009
+ MU::Cloud::AWS.createTag(ami, "Name", name, region: region, credentials: credentials)
1587
1010
  MU.log "AMI of #{name} in region #{region}: #{ami}"
1588
1011
  if make_public
1589
1012
  MU::Cloud::AWS::Server.waitForAMI(ami, region: region, credentials: credentials)
@@ -1611,10 +1034,10 @@ module MU
1611
1034
  ami_ids[r] = copy.image_id
1612
1035
 
1613
1036
  MU::Cloud::AWS.createStandardTags(copy.image_id, region: r, credentials: credentials)
1614
- MU::MommaCat.createTag(copy.image_id, "Name", name, region: r, credentials: credentials)
1037
+ MU::Cloud::AWS.createTag(copy.image_id, "Name", name, region: r, credentials: credentials)
1615
1038
  if !tags.nil?
1616
1039
  tags.each { |tag|
1617
- MU::MommaCat.createTag(instance.instance_id, tag['key'], tag['value'], region: r, credentials: credentials)
1040
+ MU::Cloud::AWS.createTag(instance.instance_id, tag['key'], tag['value'], region: r, credentials: credentials)
1618
1041
  }
1619
1042
  end
1620
1043
  MU::Cloud::AWS::Server.waitForAMI(copy.image_id, region: r, credentials: credentials)
@@ -1719,11 +1142,27 @@ module MU
1719
1142
  # Retrieves the Cloud provider's randomly generated Windows password
1720
1143
  # Will only work on stock Amazon Windows AMIs or custom AMIs that where created with Administrator Password set to random in EC2Config
1721
1144
  # return [String]: A password string.
1722
- def getWindowsAdminPassword
1723
- if @cloud_id.nil?
1724
- node, config, deploydata = describe
1725
- @cloud_id = cloud_desc.instance_id
1145
+ def getWindowsAdminPassword(use_cache: true)
1146
+ @config['windows_auth_vault'] ||= {
1147
+ "vault" => @mu_name,
1148
+ "item" => "windows_credentials",
1149
+ "password_field" => "password"
1150
+ }
1151
+
1152
+ if use_cache
1153
+ begin
1154
+ win_admin_password = @groomer.getSecret(
1155
+ vault: @config['windows_auth_vault']['vault'],
1156
+ item: @config['windows_auth_vault']['item'],
1157
+ field: @config["windows_auth_vault"]["password_field"]
1158
+ )
1159
+
1160
+ return win_admin_password if win_admin_password
1161
+ rescue MU::Groomer::MuNoSuchSecret, MU::Groomer::RunError
1162
+ end
1726
1163
  end
1164
+
1165
+ @cloud_id ||= cloud_desc(use_cache: false).instance_id
1727
1166
  ssh_keydir = "#{Etc.getpwuid(Process.uid).dir}/.ssh"
1728
1167
  ssh_key_name = @deploy.ssh_key_name
1729
1168
 
@@ -1758,6 +1197,8 @@ module MU
1758
1197
  pem_bytes = File.open("#{ssh_keydir}/#{ssh_key_name}", 'rb') { |f| f.read }
1759
1198
  private_key = OpenSSL::PKey::RSA.new(pem_bytes)
1760
1199
  decrypted_password = private_key.private_decrypt(decoded)
1200
+ saveCredentials(decrypted_password)
1201
+
1761
1202
  return decrypted_password
1762
1203
  end
1763
1204
 
@@ -1831,61 +1272,37 @@ module MU
1831
1272
  # @param type [String]: Cloud storage type of the volume, if applicable
1832
1273
  # @param delete_on_termination [Boolean]: Value of delete_on_termination flag to set
1833
1274
  def addVolume(dev, size, type: "gp2", delete_on_termination: false)
1834
- if @cloud_id.nil? or @cloud_id.empty?
1835
- MU.log "#{self} didn't have a cloud id, couldn't determine 'active?' status", MU::ERR
1836
- return true
1275
+
1276
+ if setDeleteOntermination(dev, delete_on_termination)
1277
+ MU.log "A volume #{device} already attached to #{self}, skipping", MU::NOTICE
1278
+ return
1837
1279
  end
1838
- az = nil
1839
- MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_instances(
1840
- instance_ids: [@cloud_id]
1841
- ).reservations.each { |resp|
1842
- if !resp.nil? and !resp.instances.nil?
1843
- resp.instances.each { |instance|
1844
- az = instance.placement.availability_zone
1845
- d_o_t_changed = true
1846
- mappings = MU.structToHash(instance.block_device_mappings)
1847
- mappings.each { |vol|
1848
- if vol[:ebs]
1849
- vol[:ebs].delete(:attach_time)
1850
- vol[:ebs].delete(:status)
1851
- end
1852
- }
1853
- mappings.each { |vol|
1854
- if vol[:device_name] == dev
1855
- MU.log "A volume #{dev} already attached to #{self}, skipping", MU::NOTICE
1856
- if vol[:ebs][:delete_on_termination] != delete_on_termination
1857
- vol[:ebs][:delete_on_termination] = delete_on_termination
1858
- MU.log "Setting delete_on_termination flag to #{delete_on_termination.to_s} on #{@mu_name}'s #{dev}"
1859
- MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).modify_instance_attribute(
1860
- instance_id: @cloud_id,
1861
- block_device_mappings: mappings
1862
- )
1863
- end
1864
- return
1865
- end
1866
- }
1867
- }
1868
- end
1869
- }
1280
+
1870
1281
  MU.log "Creating #{size}GB #{type} volume on #{dev} for #{@cloud_id}"
1871
1282
  creation = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).create_volume(
1872
- availability_zone: az,
1283
+ availability_zone: cloud_desc.placement.availability_zone,
1873
1284
  size: size,
1874
1285
  volume_type: type
1875
1286
  )
1876
- begin
1877
- sleep 3
1287
+
1288
+ MU.retrier(wait: 3, loop_if: Proc.new {
1878
1289
  creation = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_volumes(volume_ids: [creation.volume_id]).volumes.first
1879
1290
  if !["creating", "available"].include?(creation.state)
1880
1291
  raise MuError, "Saw state '#{creation.state}' while creating #{size}GB #{type} volume on #{dev} for #{@cloud_id}"
1881
1292
  end
1882
- end while creation.state != "available"
1293
+ creation.state != "available"
1294
+ })
1295
+
1883
1296
 
1884
1297
  if @deploy
1885
- MU::MommaCat.listStandardTags.each_pair { |key, value|
1886
- MU::MommaCat.createTag(creation.volume_id, key, value, region: @config['region'], credentials: @config['credentials'])
1887
- }
1888
- MU::MommaCat.createTag(creation.volume_id, "Name", "#{MU.deploy_id}-#{@config["name"].upcase}-#{dev.upcase}", region: @config['region'], credentials: @config['credentials'])
1298
+ MU::Cloud::AWS.createStandardTags(
1299
+ creation.volume_id,
1300
+ region: @config['region'],
1301
+ credentials: @config['credentials'],
1302
+ optional: @config['optional_tags'],
1303
+ nametag: @mu_name+"-"+dev.upcase,
1304
+ othertags: @config['tags']
1305
+ )
1889
1306
  end
1890
1307
 
1891
1308
  attachment = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).attach_volume(
@@ -1904,29 +1321,7 @@ module MU
1904
1321
 
1905
1322
  # Set delete_on_termination, which for some reason is an instance
1906
1323
  # attribute and not on the attachment
1907
- mappings = MU.structToHash(cloud_desc.block_device_mappings)
1908
- changed = false
1909
-
1910
- mappings.each { |mapping|
1911
- if mapping[:ebs]
1912
- mapping[:ebs].delete(:attach_time)
1913
- mapping[:ebs].delete(:status)
1914
- end
1915
- if mapping[:device_name] == dev and
1916
- mapping[:ebs][:delete_on_termination] != delete_on_termination
1917
- changed = true
1918
- mapping[:ebs][:delete_on_termination] = delete_on_termination
1919
- end
1920
- }
1921
-
1922
- if changed
1923
- MU.log "Setting delete_on_termination flag to #{delete_on_termination.to_s} on #{@mu_name}'s #{dev}"
1924
- MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).modify_instance_attribute(
1925
- instance_id: @cloud_id,
1926
- block_device_mappings: mappings
1927
- )
1928
- end
1929
-
1324
+ setDeleteOntermination(dev, delete_on_termination)
1930
1325
  end
1931
1326
 
1932
1327
  # Determine whether the node in question exists at the Cloud provider
@@ -1964,13 +1359,13 @@ module MU
1964
1359
  # @param ip [String]: Request a specific IP address.
1965
1360
  # @param region [String]: The cloud provider region
1966
1361
  # @return [void]
1967
- def self.associateElasticIp(instance_id, classic: false, ip: nil, region: MU.curRegion)
1362
+ def self.associateElasticIp(instance_id, classic: false, ip: nil, region: MU.curRegion, credentials: nil)
1968
1363
  MU.log "associateElasticIp called: #{instance_id}, classic: #{classic}, ip: #{ip}, region: #{region}", MU::DEBUG
1969
1364
  elastic_ip = nil
1970
1365
  @eip_semaphore.synchronize {
1971
1366
  if !ip.nil?
1972
1367
  filters = [{name: "public-ip", values: [ip]}]
1973
- resp = MU::Cloud::AWS.ec2(region: region).describe_addresses(filters: filters)
1368
+ resp = MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_addresses(filters: filters)
1974
1369
  if @eips_used.include?(ip)
1975
1370
  is_free = false
1976
1371
  resp.addresses.each { |address|
@@ -1999,54 +1394,44 @@ module MU
1999
1394
  @eips_used << elastic_ip.public_ip
2000
1395
  MU.log "Associating Elastic IP #{elastic_ip.public_ip} with #{instance_id}", details: elastic_ip
2001
1396
  }
2002
- attempts = 0
2003
- begin
1397
+
1398
+ on_retry = Proc.new { |e|
1399
+ if e.class == Aws::EC2::Errors::ResourceAlreadyAssociated
1400
+ # A previous association attempt may have succeeded, albeit slowly.
1401
+ resp = MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_addresses(
1402
+ allocation_ids: [elastic_ip.allocation_id]
1403
+ )
1404
+ first_addr = resp.addresses.first
1405
+ if first_addr and first_addr.instance_id != instance_id
1406
+ raise MuError, "Tried to associate #{elastic_ip.public_ip} with #{instance_id}, but it's already associated with #{first_addr.instance_id}!"
1407
+ end
1408
+ end
1409
+ }
1410
+
1411
+ MU.retrier([Aws::EC2::Errors::IncorrectInstanceState, Aws::EC2::Errors::ResourceAlreadyAssociated], wait: 5, max: 6, on_retry: on_retry) {
2004
1412
  if classic
2005
- resp = MU::Cloud::AWS.ec2(region: region).associate_address(
2006
- instance_id: instance_id,
2007
- public_ip: elastic_ip.public_ip
1413
+ MU::Cloud::AWS.ec2(region: region, credentials: credentials).associate_address(
1414
+ instance_id: instance_id,
1415
+ public_ip: elastic_ip.public_ip
2008
1416
  )
2009
1417
  else
2010
- resp = MU::Cloud::AWS.ec2(region: region).associate_address(
2011
- instance_id: instance_id,
2012
- allocation_id: elastic_ip.allocation_id,
2013
- allow_reassociation: false
1418
+ MU::Cloud::AWS.ec2(region: region, credentials: credentials).associate_address(
1419
+ instance_id: instance_id,
1420
+ allocation_id: elastic_ip.allocation_id,
1421
+ allow_reassociation: false
2014
1422
  )
2015
1423
  end
2016
- rescue Aws::EC2::Errors::IncorrectInstanceState => e
2017
- attempts = attempts + 1
2018
- if attempts < 6
2019
- MU.log "Got #{e.message} associating #{elastic_ip.allocation_id} with #{instance_id}, retrying", MU::WARN
2020
- sleep 5
2021
- retry
2022
- end
2023
- raise MuError "#{e.message} associating #{elastic_ip.allocation_id} with #{instance_id}"
2024
- rescue Aws::EC2::Errors::ResourceAlreadyAssociated => e
2025
- # A previous association attempt may have succeeded, albeit slowly.
2026
- resp = MU::Cloud::AWS.ec2(region: region).describe_addresses(
2027
- allocation_ids: [elastic_ip.allocation_id]
2028
- )
2029
- first_addr = resp.addresses.first
2030
- if !first_addr.nil? and first_addr.instance_id == instance_id
2031
- MU.log "#{elastic_ip.public_ip} already associated with #{instance_id}", MU::WARN
2032
- else
2033
- MU.log "#{elastic_ip.public_ip} shows as already associated!", MU::ERR, details: resp
2034
- raise MuError, "#{elastic_ip.public_ip} shows as already associated with #{first_addr.instance_id}!"
2035
- end
2036
- end
1424
+ }
2037
1425
 
2038
- instance = MU::Cloud::AWS.ec2(region: region).describe_instances(instance_ids: [instance_id]).reservations.first.instances.first
2039
- waited = false
2040
- if instance.public_ip_address != elastic_ip.public_ip
2041
- waited = true
2042
- begin
2043
- sleep 10
2044
- MU.log "Waiting for Elastic IP association of #{elastic_ip.public_ip} to #{instance_id} to take effect", MU::NOTICE
2045
- instance = MU::Cloud::AWS.ec2(region: region).describe_instances(instance_ids: [instance_id]).reservations.first.instances.first
2046
- end while instance.public_ip_address != elastic_ip.public_ip
2047
- end
1426
+ loop_if = Proc.new {
1427
+ instance = find(cloud_id: instance_id, region: region, credentials: credentials).values.first
1428
+ instance.public_ip_address != elastic_ip.public_ip
1429
+ }
1430
+ MU.retrier(loop_if: loop_if, wait: 10, max: 3) {
1431
+ MU.log "Waiting for Elastic IP association of #{elastic_ip.public_ip} to #{instance_id} to take effect", MU::NOTICE
1432
+ }
2048
1433
 
2049
- MU.log "Elastic IP #{elastic_ip.public_ip} now associated with #{instance_id}" if waited
1434
+ MU.log "Elastic IP #{elastic_ip.public_ip} now associated with #{instance_id}"
2050
1435
 
2051
1436
  return elastic_ip.public_ip
2052
1437
  end
@@ -2078,7 +1463,6 @@ module MU
2078
1463
  if !ignoremaster
2079
1464
  tagfilters << {name: "tag:MU-MASTER-IP", values: [MU.mu_public_ip]}
2080
1465
  end
2081
- instances = Array.new
2082
1466
  unterminated = Array.new
2083
1467
  name_tags = Array.new
2084
1468
 
@@ -2119,7 +1503,7 @@ module MU
2119
1503
  threads << Thread.new(volume) { |myvolume|
2120
1504
  MU.dupGlobals(parent_thread_id)
2121
1505
  Thread.abort_on_exception = true
2122
- MU::Cloud::AWS::Server.delete_volume(myvolume, noop, skipsnapshots, credentials: credentials)
1506
+ delete_volume(myvolume, noop, skipsnapshots, credentials: credentials)
2123
1507
  }
2124
1508
  }
2125
1509
 
@@ -2129,193 +1513,113 @@ module MU
2129
1513
  }
2130
1514
  end
2131
1515
 
1516
+ # Return an instance's AWS-assigned IP addresses and hostnames.
1517
+ # @param instance [OpenStruct]
1518
+ # @param id [String]
1519
+ # @param region [String]
1520
+ # @param credentials [@String]
1521
+ # @return [Array<Array>]
1522
+ def self.getAddresses(instance = nil, id: nil, region: MU.curRegion, credentials: nil)
1523
+ return nil if !instance and !id
1524
+
1525
+ instance ||= find(cloud_id: id, region: region, credentials: credentials).values.first
1526
+ return if !instance
1527
+
1528
+ ips = []
1529
+ names = []
1530
+ instance.network_interfaces.each { |iface|
1531
+ iface.private_ip_addresses.each { |ip|
1532
+ ips << ip.private_ip_address
1533
+ names << ip.private_dns_name
1534
+ if ip.association
1535
+ ips << ip.association.public_ip
1536
+ names << ip.association.public_dns_name
1537
+ end
1538
+ }
1539
+ }
1540
+
1541
+ [ips, names]
1542
+ end
1543
+
2132
1544
  # Terminate an instance.
2133
1545
  # @param instance [OpenStruct]: The cloud provider's description of the instance.
2134
1546
  # @param id [String]: The cloud provider's identifier for the instance, to use if the full description is not available.
2135
1547
  # @param region [String]: The cloud provider region
2136
1548
  # @return [void]
2137
1549
  def self.terminateInstance(instance: nil, noop: false, id: nil, onlycloud: false, region: MU.curRegion, deploy_id: MU.deploy_id, mu_name: nil, credentials: nil)
2138
- ips = Array.new
2139
- if !instance
2140
- if id
2141
- begin
2142
- resp = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(instance_ids: [id])
2143
- rescue Aws::EC2::Errors::InvalidInstanceIDNotFound => e
2144
- MU.log "Instance #{id} no longer exists", MU::WARN
2145
- end
2146
- if !resp.nil? and !resp.reservations.nil? and !resp.reservations.first.nil?
2147
- instance = resp.reservations.first.instances.first
2148
- ips << instance.public_ip_address if !instance.public_ip_address.nil?
2149
- ips << instance.private_ip_address if !instance.private_ip_address.nil?
2150
- end
2151
- else
2152
- MU.log "You must supply an instance handle or id to terminateInstance", MU::ERR
2153
- end
2154
- else
2155
- id = instance.instance_id
2156
- end
2157
- if !MU.deploy_id.empty?
2158
- deploy_dir = File.expand_path("#{MU.dataDir}/deployments/"+MU.deploy_id)
2159
- if Dir.exist?(deploy_dir) and !noop
2160
- FileUtils.touch("#{deploy_dir}/.cleanup-"+id)
2161
- end
1550
+ if !id and !instance
1551
+ MU.log "You must supply an instance handle or id to terminateInstance", MU::ERR
1552
+ return
2162
1553
  end
1554
+ instance ||= find(cloud_id: id, region: region, credentials: credentials).values.first
1555
+ return if !instance
2163
1556
 
2164
- server_obj = MU::MommaCat.findStray(
2165
- "AWS",
2166
- "servers",
2167
- region: region,
2168
- deploy_id: deploy_id,
2169
- cloud_id: id,
2170
- mu_name: mu_name
2171
- ).first
2172
-
1557
+ id ||= instance.instance_id
2173
1558
  begin
2174
- MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(instance_ids: [id])
2175
- rescue Aws::EC2::Errors::InvalidInstanceIDNotFound => e
2176
- MU.log "Instance #{id} no longer exists", MU::DEBUG
2177
- end
2178
-
2179
- if !server_obj.nil? and MU::Cloud::AWS.hosted? and !MU::Cloud::AWS.isGovCloud?
2180
- # DNS cleanup is now done in MU::Cloud::DNSZone. Keeping this for now
2181
- cleaned_dns = false
2182
- mu_name = server_obj.mu_name
2183
- mu_zone = MU::Cloud::DNSZone.find(cloud_id: "platform-mu", credentials: credentials).values.first
2184
- if !mu_zone.nil?
2185
- zone_rrsets = []
2186
- rrsets = MU::Cloud::AWS.route53(credentials: credentials).list_resource_record_sets(hosted_zone_id: mu_zone.id)
2187
- rrsets.resource_record_sets.each{ |record|
2188
- zone_rrsets << record
2189
- }
1559
+ MU::MommaCat.lock(".cleanup-"+id)
1560
+ rescue Errno::ENOENT => e
1561
+ MU.log "No lock for terminating instance #{id} due to missing metadata", MU::DEBUG
1562
+ end
2190
1563
 
2191
- # AWS API returns a maximum of 100 results. DNS zones are likely to have more than 100 records, lets page and make sure we grab all records in a given zone
2192
- while rrsets.next_record_name && rrsets.next_record_type
2193
- rrsets = MU::Cloud::AWS.route53(credentials: credentials).list_resource_record_sets(hosted_zone_id: mu_zone.id, start_record_name: rrsets.next_record_name, start_record_type: rrsets.next_record_type)
2194
- rrsets.resource_record_sets.each{ |record|
2195
- zone_rrsets << record
2196
- }
2197
- end
2198
- end
2199
- if !onlycloud and !mu_name.nil?
2200
- # DNS cleanup is now done in MU::Cloud::DNSZone. Keeping this for now
2201
- if !zone_rrsets.nil? and !zone_rrsets.empty?
2202
- zone_rrsets.each { |rrset|
2203
- if rrset.name.match(/^#{mu_name.downcase}\.server\.#{MU.myInstanceId}\.platform-mu/i)
2204
- rrset.resource_records.each { |record|
2205
- MU::Cloud::DNSZone.genericMuDNSEntry(name: mu_name, target: record.value, cloudclass: MU::Cloud::Server, delete: true)
2206
- cleaned_dns = true
2207
- }
2208
- end
2209
- }
2210
- end
1564
+ ips, names = getAddresses(instance, region: region, credentials: credentials)
1565
+ targets = ips +names
2211
1566
 
2212
- if !noop
2213
- if !server_obj.nil? and !server_obj.config.nil?
2214
- MU.mommacat.notify(MU::Cloud::Server.cfg_plural, server_obj.config['name'], {}, mu_name: server_obj.mu_name, remove: true) if MU.mommacat
2215
- end
2216
- end
1567
+ server_obj = MU::MommaCat.findStray(
1568
+ "AWS",
1569
+ "servers",
1570
+ region: region,
1571
+ deploy_id: deploy_id,
1572
+ cloud_id: id,
1573
+ mu_name: mu_name,
1574
+ dummy_ok: true
1575
+ ).first
2217
1576
 
2218
- # If we didn't manage to find this instance's Route53 entry by sifting
2219
- # deployment metadata, see if we can get it with the Name tag.
2220
- if !mu_zone.nil? and !cleaned_dns and !instance.nil?
2221
- instance.tags.each { |tag|
2222
- if tag.key == "Name"
2223
- zone_rrsets.each { |rrset|
2224
- if rrset.name.match(/^#{tag.value.downcase}\.server\.#{MU.myInstanceId}\.platform-mu/i)
2225
- rrset.resource_records.each { |record|
2226
- MU::Cloud::DNSZone.genericMuDNSEntry(name: tag.value, target: record.value, cloudclass: MU::Cloud::Server, delete: true) if !noop
2227
- }
2228
- end
2229
- }
2230
- end
2231
- }
2232
- end
2233
- end
1577
+ if MU::Cloud::AWS.hosted? and !MU::Cloud::AWS.isGovCloud? and server_obj
1578
+ targets.each { |target|
1579
+ MU::Cloud::DNSZone.genericMuDNSEntry(name: server_obj.mu_name, target: target, cloudclass: MU::Cloud::Server, delete: true, noop: noop)
1580
+ }
2234
1581
  end
2235
1582
 
2236
- if ips.size > 0 and !onlycloud
2237
- known_hosts_files = [Etc.getpwuid(Process.uid).dir+"/.ssh/known_hosts"]
2238
- if Etc.getpwuid(Process.uid).name == "root" and !MU.inGem?
2239
- begin
2240
- known_hosts_files << Etc.getpwnam("nagios").dir+"/.ssh/known_hosts"
2241
- rescue ArgumentError
2242
- # we're in a non-nagios environment and that's ok
2243
- end
2244
- end
2245
- known_hosts_files.each { |known_hosts|
2246
- next if !File.exist?(known_hosts)
2247
- MU.log "Cleaning up #{ips} from #{known_hosts}"
2248
- if !noop
2249
- File.open(known_hosts, File::CREAT|File::RDWR, 0644) { |f|
2250
- f.flock(File::LOCK_EX)
2251
- newlines = Array.new
2252
- f.readlines.each { |line|
2253
- ip_match = false
2254
- ips.each { |ip|
2255
- if line.match(/(^|,| )#{ip}( |,)/)
2256
- MU.log "Expunging #{ip} from #{known_hosts}"
2257
- ip_match = true
2258
- end
2259
- }
2260
- newlines << line if !ip_match
2261
- }
2262
- f.rewind
2263
- f.truncate(0)
2264
- f.puts(newlines)
2265
- f.flush
2266
- f.flock(File::LOCK_UN)
2267
- }
2268
- end
1583
+ if targets.size > 0 and !onlycloud
1584
+ MU::Master.removeInstanceFromEtcHosts(server_obj.mu_name) if !noop and server_obj
1585
+ targets.each { |target|
1586
+ next if !target.match(/^\d+\.\d+\.\d+\.\d+$/)
1587
+ MU::Master.removeIPFromSSHKnownHosts(target, noop: noop)
2269
1588
  }
2270
1589
  end
2271
1590
 
2272
- return if instance.nil?
1591
+ on_retry = Proc.new {
1592
+ instance = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(instance_ids: [instance.instance_id]).reservations.first.instances.first
1593
+ if instance.state.name == "terminated"
1594
+ MU.log "#{instance.instance_id}#{server_obj ? " ("+server_obj.mu_name+")" : ""} has already been terminated, skipping"
1595
+ MU::MommaCat.unlock(".cleanup-"+id)
1596
+ return
1597
+ end
1598
+ }
2273
1599
 
2274
- name = ""
2275
- instance.tags.each { |tag|
2276
- name = tag.value if tag.key == "Name"
1600
+ loop_if = Proc.new {
1601
+ instance = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(instance_ids: [instance.instance_id]).reservations.first.instances.first
1602
+ instance.state.name != "terminated"
2277
1603
  }
2278
1604
 
2279
- if instance.state.name == "terminated"
2280
- MU.log "#{instance.instance_id} (#{name}) has already been terminated, skipping"
2281
- else
2282
- if instance.state.name == "terminating"
2283
- MU.log "#{instance.instance_id} (#{name}) already terminating, waiting"
2284
- elsif instance.state.name != "running" and instance.state.name != "pending" and instance.state.name != "stopping" and instance.state.name != "stopped"
2285
- MU.log "#{instance.instance_id} (#{name}) is in state #{instance.state.name}, waiting"
2286
- else
2287
- MU.log "Terminating #{instance.instance_id} (#{name}) #{noop}"
2288
- if !noop
2289
- begin
2290
- MU::Cloud::AWS.ec2(credentials: credentials, region: region).modify_instance_attribute(
2291
- instance_id: instance.instance_id,
2292
- disable_api_termination: {value: false}
2293
- )
2294
- MU::Cloud::AWS.ec2(credentials: credentials, region: region).terminate_instances(instance_ids: [instance.instance_id])
2295
- # Small race window here with the state changing from under us
2296
- rescue Aws::EC2::Errors::IncorrectInstanceState => e
2297
- resp = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(instance_ids: [id])
2298
- if !resp.nil? and !resp.reservations.nil? and !resp.reservations.first.nil?
2299
- instance = resp.reservations.first.instances.first
2300
- if !instance.nil? and instance.state.name != "terminated" and instance.state.name != "terminating"
2301
- sleep 5
2302
- retry
2303
- end
2304
- end
2305
- rescue Aws::EC2::Errors::InternalError => e
2306
- MU.log "Error #{e.inspect} while Terminating instance #{instance.instance_id} (#{name}), retrying", MU::WARN, details: e.inspect
2307
- sleep 5
2308
- retry
2309
- end
2310
- end
2311
- end
2312
- while instance.state.name != "terminated" and !noop
2313
- sleep 30
2314
- instance_response = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(instance_ids: [instance.instance_id])
2315
- instance = instance_response.reservations.first.instances.first
2316
- end
2317
- MU.log "#{instance.instance_id} (#{name}) terminated" if !noop
1605
+ MU.log "Terminating #{instance.instance_id}#{server_obj ? " ("+server_obj.mu_name+")" : ""}"
1606
+ if !noop
1607
+ MU.retrier([Aws::EC2::Errors::IncorrectInstanceState, Aws::EC2::Errors::InternalError], wait: 30, max: 60, loop_if: loop_if, on_retry: on_retry) {
1608
+ MU::Cloud::AWS.ec2(credentials: credentials, region: region).modify_instance_attribute(
1609
+ instance_id: instance.instance_id,
1610
+ disable_api_termination: {value: false}
1611
+ )
1612
+ MU::Cloud::AWS.ec2(credentials: credentials, region: region).terminate_instances(instance_ids: [instance.instance_id])
1613
+ }
2318
1614
  end
1615
+
1616
+ MU.log "#{instance.instance_id}#{server_obj ? " ("+server_obj.mu_name+")" : ""} terminated" if !noop
1617
+ begin
1618
+ MU::MommaCat.unlock(".cleanup-"+id)
1619
+ rescue Errno::ENOENT => e
1620
+ MU.log "No lock for terminating instance #{id} due to missing metadata", MU::DEBUG
1621
+ end
1622
+
2319
1623
  end
2320
1624
 
2321
1625
  # Return a BoK-style config hash describing a NAT instance. We use this
@@ -2336,15 +1640,19 @@ module MU
2336
1640
  end
2337
1641
 
2338
1642
  # Cloud-specific configuration properties.
2339
- # @param config [MU::Config]: The calling MU::Config object
1643
+ # @param _config [MU::Config]: The calling MU::Config object
2340
1644
  # @return [Array<Array,Hash>]: List of required fields, and json-schema Hash of cloud-specific configuration parameters for this resource
2341
- def self.schema(config)
1645
+ def self.schema(_config)
2342
1646
  toplevel_required = []
2343
1647
  schema = {
2344
1648
  "ami_id" => {
2345
1649
  "type" => "string",
2346
1650
  "description" => "Alias for +image_id+"
2347
1651
  },
1652
+ "windows_admin_username" => {
1653
+ "type" => "string",
1654
+ "default" => "Administrator"
1655
+ },
2348
1656
  "generate_iam_role" => {
2349
1657
  "type" => "boolean",
2350
1658
  "default" => true,
@@ -2368,25 +1676,47 @@ module MU
2368
1676
  "type" => "object"
2369
1677
  }
2370
1678
  },
2371
- "ingress_rules" => {
2372
- "items" => {
2373
- "properties" => {
2374
- "sgs" => {
2375
- "type" => "array",
2376
- "items" => {
2377
- "description" => "Other AWS Security Groups; resources that are associated with this group will have this rule applied to their traffic",
2378
- "type" => "string"
2379
- }
2380
- },
2381
- "lbs" => {
2382
- "type" => "array",
2383
- "items" => {
2384
- "description" => "AWS Load Balancers which will have this rule applied to their traffic",
2385
- "type" => "string"
2386
- }
2387
- }
1679
+ "ingress_rules" => MU::Cloud.resourceClass("AWS", "FirewallRule").ingressRuleAddtlSchema,
1680
+ "ssh_user" => {
1681
+ "type" => "string",
1682
+ "default" => "root",
1683
+ "default_if" => [
1684
+ {
1685
+ "key_is" => "platform",
1686
+ "value_is" => "windows",
1687
+ "set" => "Administrator"
1688
+ },
1689
+ {
1690
+ "key_is" => "platform",
1691
+ "value_is" => "win2k12",
1692
+ "set" => "Administrator"
1693
+ },
1694
+ {
1695
+ "key_is" => "platform",
1696
+ "value_is" => "win2k12r2",
1697
+ "set" => "Administrator"
1698
+ },
1699
+ {
1700
+ "key_is" => "platform",
1701
+ "value_is" => "win2k16",
1702
+ "set" => "Administrator"
1703
+ },
1704
+ {
1705
+ "key_is" => "platform",
1706
+ "value_is" => "rhel7",
1707
+ "set" => "ec2-user"
1708
+ },
1709
+ {
1710
+ "key_is" => "platform",
1711
+ "value_is" => "rhel71",
1712
+ "set" => "ec2-user"
1713
+ },
1714
+ {
1715
+ "key_is" => "platform",
1716
+ "value_is" => "amazon",
1717
+ "set" => "ec2-user"
2388
1718
  }
2389
- }
1719
+ ]
2390
1720
  }
2391
1721
  }
2392
1722
  [toplevel_required, schema]
@@ -2414,8 +1744,7 @@ module MU
2414
1744
 
2415
1745
  MU::Cloud.availableClouds.each { |cloud|
2416
1746
  next if cloud == "AWS"
2417
- cloudbase = Object.const_get("MU").const_get("Cloud").const_get(cloud)
2418
- foreign_types = (cloudbase.listInstanceTypes).values.first
1747
+ foreign_types = (MU::Cloud.cloudClass(cloud).listInstanceTypes).values.first
2419
1748
  if foreign_types.size == 1
2420
1749
  foreign_types = foreign_types.values.first
2421
1750
  end
@@ -2446,6 +1775,45 @@ module MU
2446
1775
  size
2447
1776
  end
2448
1777
 
1778
+ # Boilerplate generation of an instance role
1779
+ # @param server [Hash]: The BoK-style config hash for a +Server+ or +ServerPool+
1780
+ # @param configurator [MU::Config]
1781
+ def self.generateStandardRole(server, configurator)
1782
+ role = {
1783
+ "name" => server["name"],
1784
+ "credentials" => server["credentials"],
1785
+ "can_assume" => [
1786
+ {
1787
+ "entity_id" => "ec2.amazonaws.com",
1788
+ "entity_type" => "service"
1789
+ }
1790
+ ],
1791
+ "policies" => [
1792
+ {
1793
+ "name" => "MuSecrets",
1794
+ "permissions" => ["s3:GetObject"],
1795
+ "targets" => [
1796
+ {
1797
+ "identifier" => 'arn:'+(MU::Cloud::AWS.isGovCloud?(server['region']) ? "aws-us-gov" : "aws")+':s3:::'+MU::Cloud::AWS.adminBucketName(server['credentials'])+'/Mu_CA.pem'
1798
+ }
1799
+ ]
1800
+ }
1801
+ ]
1802
+ }
1803
+ if server['iam_policies']
1804
+ role['iam_policies'] = server['iam_policies'].dup
1805
+ end
1806
+ if server['canned_iam_policies']
1807
+ role['import'] = server['canned_iam_policies'].dup
1808
+ end
1809
+ if server['iam_role']
1810
+ # XXX maybe break this down into policies and add those?
1811
+ end
1812
+
1813
+ configurator.insertKitten(role, "roles")
1814
+ MU::Config.addDependency(server, server["name"], "role")
1815
+ end
1816
+
2449
1817
  # Cloud-specific pre-processing of {MU::Config::BasketofKittens::servers}, bare and unvalidated.
2450
1818
  # @param server [Hash]: The resource to process and validate
2451
1819
  # @param configurator [MU::Config]: The overall deployment configurator of which this resource is a member
@@ -2466,43 +1834,7 @@ module MU
2466
1834
  ok = false
2467
1835
  end
2468
1836
  else
2469
- role = {
2470
- "name" => server["name"],
2471
- "credentials" => server["credentials"],
2472
- "can_assume" => [
2473
- {
2474
- "entity_id" => "ec2.amazonaws.com",
2475
- "entity_type" => "service"
2476
- }
2477
- ],
2478
- "policies" => [
2479
- {
2480
- "name" => "MuSecrets",
2481
- "permissions" => ["s3:GetObject"],
2482
- "targets" => [
2483
- {
2484
- "identifier" => 'arn:'+(MU::Cloud::AWS.isGovCloud?(server['region']) ? "aws-us-gov" : "aws")+':s3:::'+MU::Cloud::AWS.adminBucketName(server['credentials'])+'/Mu_CA.pem'
2485
- }
2486
- ]
2487
- }
2488
- ]
2489
- }
2490
- if server['iam_policies']
2491
- role['iam_policies'] = server['iam_policies'].dup
2492
- end
2493
- if server['canned_iam_policies']
2494
- role['import'] = server['canned_iam_policies'].dup
2495
- end
2496
- if server['iam_role']
2497
- # XXX maybe break this down into policies and add those?
2498
- end
2499
-
2500
- configurator.insertKitten(role, "roles")
2501
- server["dependencies"] ||= []
2502
- server["dependencies"] << {
2503
- "type" => "role",
2504
- "name" => server["name"]
2505
- }
1837
+ generateStandardRole(server, configurator)
2506
1838
  end
2507
1839
  if !server['create_image'].nil?
2508
1840
  if server['create_image'].has_key?('copy_to_regions') and
@@ -2514,12 +1846,12 @@ module MU
2514
1846
  end
2515
1847
  end
2516
1848
 
2517
- server['ami_id'] ||= server['image_id']
1849
+ server['image_id'] ||= server['ami_id']
2518
1850
 
2519
- if server['ami_id'].nil?
1851
+ if server['image_id'].nil?
2520
1852
  img_id = MU::Cloud.getStockImage("AWS", platform: server['platform'], region: server['region'])
2521
1853
  if img_id
2522
- server['ami_id'] = configurator.getTail("server"+server['name']+"AMI", value: img_id, prettyname: "server"+server['name']+"AMI", cloudtype: "AWS::EC2::Image::Id")
1854
+ server['image_id'] = configurator.getTail("server"+server['name']+"AMI", value: img_id, prettyname: "server"+server['name']+"AMI", cloudtype: "AWS::EC2::Image::Id")
2523
1855
  else
2524
1856
  MU.log "No AMI specified for #{server['name']} and no default available for platform #{server['platform']} in region #{server['region']}", MU::ERR, details: server
2525
1857
  ok = false
@@ -2528,22 +1860,13 @@ module MU
2528
1860
 
2529
1861
  if !server["loadbalancers"].nil?
2530
1862
  server["loadbalancers"].each { |lb|
2531
- if lb["concurrent_load_balancer"] != nil
2532
- server["dependencies"] << {
2533
- "type" => "loadbalancer",
2534
- "name" => lb["concurrent_load_balancer"]
2535
- }
1863
+ lb["name"] ||= lb["concurrent_load_balancer"]
1864
+ if lb["name"]
1865
+ MU::Config.addDependency(server, lb["name"], "loadbalancer")
2536
1866
  end
2537
1867
  }
2538
1868
  end
2539
1869
 
2540
- if !server["vpc"].nil?
2541
- if server["vpc"]["subnet_name"].nil? and server["vpc"]["subnet_id"].nil? and server["vpc"]["subnet_pref"].nil?
2542
- MU.log "A server VPC block must specify a target subnet", MU::ERR
2543
- ok = false
2544
- end
2545
- end
2546
-
2547
1870
  ok
2548
1871
  end
2549
1872
 
@@ -2556,28 +1879,26 @@ module MU
2556
1879
  img = MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_images(image_ids: [ami_id]).images.first
2557
1880
  return DateTime.new if img.nil?
2558
1881
  return DateTime.parse(img.creation_date)
2559
- rescue Aws::EC2::Errors::InvalidAMIIDNotFound => e
1882
+ rescue Aws::EC2::Errors::InvalidAMIIDNotFound
2560
1883
  end
2561
1884
 
2562
1885
  return DateTime.new
2563
1886
  end
2564
1887
 
2565
- private
2566
-
2567
1888
  # Destroy a volume.
2568
1889
  # @param volume [OpenStruct]: The cloud provider's description of the volume.
2569
- # @param id [String]: The cloud provider's identifier for the volume, to use if the full description is not available.
2570
1890
  # @param region [String]: The cloud provider region
2571
1891
  # @return [void]
2572
- def self.delete_volume(volume, noop, skipsnapshots, id: nil, region: MU.curRegion, credentials: nil)
1892
+ def self.delete_volume(volume, noop, skipsnapshots, region: MU.curRegion, credentials: nil)
2573
1893
  if !volume.nil?
2574
1894
  resp = MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_volumes(volume_ids: [volume.volume_id])
2575
1895
  volume = resp.data.volumes.first
2576
1896
  end
2577
- name = ""
1897
+ name = nil
2578
1898
  volume.tags.each { |tag|
2579
1899
  name = tag.value if tag.key == "Name"
2580
1900
  }
1901
+ name ||= volume.volume_id
2581
1902
 
2582
1903
  MU.log("Deleting volume #{volume.volume_id} (#{name})")
2583
1904
  if !noop
@@ -2600,31 +1921,412 @@ module MU
2600
1921
  end
2601
1922
  end
2602
1923
 
2603
- retries = 0
2604
1924
  begin
2605
- MU::Cloud::AWS.ec2(region: region, credentials: credentials).delete_volume(volume_id: volume.volume_id)
2606
- rescue Aws::EC2::Errors::IncorrectState => e
2607
- MU.log "Volume #{volume.volume_id} (#{name}) in incorrect state (#{e.message}), will retry", MU::WARN
2608
- sleep 30
2609
- retry
2610
- rescue Aws::EC2::Errors::InvalidVolumeNotFound
2611
- MU.log "Volume #{volume.volume_id} (#{name}) disappeared before I could remove it!", MU::WARN
1925
+ MU.retrier([Aws::EC2::Errors::IncorrectState, Aws::EC2::Errors::VolumeInUse], ignoreme: [Aws::EC2::Errors::InvalidVolumeNotFound], wait: 30, max: 10){
1926
+ MU::Cloud::AWS.ec2(region: region, credentials: credentials).delete_volume(volume_id: volume.volume_id)
1927
+ }
2612
1928
  rescue Aws::EC2::Errors::VolumeInUse
2613
- if retries < 10
2614
- volume.attachments.each { |attachment|
2615
- MU.log "#{volume.volume_id} is attached to #{attachment.instance_id} as #{attachment.device}", MU::NOTICE
2616
- }
2617
- MU.log "Volume '#{name}' is still attached, waiting...", MU::NOTICE
2618
- sleep 30
2619
- retries = retries + 1
2620
- retry
1929
+ MU.log "Failed to delete #{name}", MU::ERR
1930
+ end
1931
+
1932
+ end
1933
+ end
1934
+ private_class_method :delete_volume
1935
+
1936
+ # Given some combination of a base image, BoK-configured storage, and
1937
+ # ephemeral devices, return the structure passed to EC2 to declare
1938
+ # block devicde mappings.
1939
+ # @param image_id [String]
1940
+ # @param storage [Array]
1941
+ # @param add_ephemeral [Boolean]
1942
+ # @param region [String]
1943
+ # @param credentials [String]
1944
+ def self.configureBlockDevices(image_id: nil, storage: nil, add_ephemeral: true, region: MU.myRegion, credentials: nil)
1945
+ ext_disks = {}
1946
+
1947
+ # Figure out which devices are embedded in the AMI already.
1948
+ if image_id
1949
+ image = MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_images(image_ids: [image_id]).images.first
1950
+ if !image.block_device_mappings.nil?
1951
+ image.block_device_mappings.each { |disk|
1952
+ if !disk.device_name.nil? and !disk.device_name.empty? and !disk.ebs.nil? and !disk.ebs.empty?
1953
+ ext_disks[disk.device_name] = MU.structToHash(disk.ebs)
1954
+ end
1955
+ }
1956
+ end
1957
+ end
1958
+
1959
+ configured_storage = []
1960
+ if storage
1961
+ storage.each { |vol|
1962
+ # Drop the "encrypted" flag if a snapshot for this device exists
1963
+ # in the AMI, even if they both agree about the value of said
1964
+ # flag. Apparently that's a thing now.
1965
+ if ext_disks.has_key?(vol["device"])
1966
+ if ext_disks[vol["device"]].has_key?(:snapshot_id)
1967
+ vol.delete("encrypted")
1968
+ end
1969
+ end
1970
+ mapping, _cfm_mapping = MU::Cloud::AWS::Server.convertBlockDeviceMapping(vol)
1971
+ configured_storage << mapping
1972
+ }
1973
+ end
1974
+
1975
+ configured_storage.concat(@ephemeral_mappings) if add_ephemeral
1976
+
1977
+ configured_storage
1978
+ end
1979
+
1980
+ # Return all of the IP addresses, public and private, from all of our
1981
+ # network interfaces.
1982
+ # @return [Array<String>]
1983
+ def listIPs
1984
+ MU::Cloud::AWS::Server.getAddresses(cloud_desc).first
1985
+ end
1986
+
1987
+ private
1988
+
1989
+ def bootstrapGroomer
1990
+ if (@config['groom'].nil? or @config['groom']) and !@groomer.haveBootstrapped?
1991
+ MU.retrier([BootstrapTempFail], wait: 45) {
1992
+ if windows?
1993
+ # kick off certificate generation early; WinRM will need it
1994
+ @deploy.nodeSSLCerts(self)
1995
+ @deploy.nodeSSLCerts(self, true) if @config.has_key?("basis")
1996
+ session = getWinRMSession(50, 60, reboot_on_problems: true)
1997
+ initialWinRMTasks(session)
1998
+ begin
1999
+ session.close
2000
+ rescue StandardError
2001
+ # session.close is allowed to fail- we're probably rebooting
2002
+ end
2621
2003
  else
2622
- MU.log "Failed to delete #{name}", MU::ERR
2004
+ session = getSSHSession(40, 30)
2005
+ initialSSHTasks(session)
2006
+ end
2007
+ }
2008
+ end
2009
+
2010
+ # See if this node already exists in our config management. If it
2011
+ # does, we're done.
2012
+
2013
+ if MU.inGem?
2014
+ MU.log "Deploying from a gem, not grooming"
2015
+ elsif @config['groom'].nil? or @config['groom']
2016
+ if @groomer.haveBootstrapped?
2017
+ MU.log "Node #{@mu_name} has already been bootstrapped, skipping groomer setup.", MU::NOTICE
2018
+ else
2019
+ begin
2020
+ @groomer.bootstrap
2021
+ rescue MU::Groomer::RunError
2022
+ return false
2623
2023
  end
2624
2024
  end
2025
+ @groomer.saveDeployData
2026
+ end
2027
+
2028
+ true
2029
+ end
2030
+
2031
+ def saveCredentials(win_admin_password = nil)
2032
+ ec2config_password = nil
2033
+ sshd_password = nil
2034
+ if windows?
2035
+ if @config['use_cloud_provider_windows_password']
2036
+ win_admin_password ||= getWindowsAdminPassword
2037
+ elsif @config['windows_auth_vault'] and !@config['windows_auth_vault'].empty?
2038
+ if @config["windows_auth_vault"].has_key?("password_field")
2039
+ win_admin_password ||= @groomer.getSecret(
2040
+ vault: @config['windows_auth_vault']['vault'],
2041
+ item: @config['windows_auth_vault']['item'],
2042
+ field: @config["windows_auth_vault"]["password_field"]
2043
+ )
2044
+ else
2045
+ win_admin_password ||= getWindowsAdminPassword
2046
+ end
2047
+
2048
+ if @config["windows_auth_vault"].has_key?("ec2config_password_field")
2049
+ ec2config_password = @groomer.getSecret(
2050
+ vault: @config['windows_auth_vault']['vault'],
2051
+ item: @config['windows_auth_vault']['item'],
2052
+ field: @config["windows_auth_vault"]["ec2config_password_field"]
2053
+ )
2054
+ end
2055
+
2056
+ if @config["windows_auth_vault"].has_key?("sshd_password_field")
2057
+ sshd_password = @groomer.getSecret(
2058
+ vault: @config['windows_auth_vault']['vault'],
2059
+ item: @config['windows_auth_vault']['item'],
2060
+ field: @config["windows_auth_vault"]["sshd_password_field"]
2061
+ )
2062
+ end
2063
+ end
2064
+
2065
+ win_admin_password ||= MU.generateWindowsPassword
2066
+ ec2config_password ||= MU.generateWindowsPassword
2067
+ sshd_password ||= MU.generateWindowsPassword
2068
+
2069
+ # We're creating the vault here so when we run
2070
+ # MU::Cloud::Server.initialSSHTasks and we need to set the Windows
2071
+ # Admin password we can grab it from said vault.
2072
+ creds = {
2073
+ "username" => @config['windows_admin_username'],
2074
+ "password" => win_admin_password,
2075
+ "ec2config_username" => "ec2config",
2076
+ "ec2config_password" => ec2config_password,
2077
+ "sshd_username" => "sshd_service",
2078
+ "sshd_password" => sshd_password
2079
+ }
2080
+ @groomer.saveSecret(vault: @mu_name, item: "windows_credentials", data: creds, permissions: "name:#{@mu_name}")
2625
2081
  end
2626
2082
  end
2627
2083
 
2084
+ def haveElasticIP?
2085
+ if !cloud_desc.public_ip_address.nil?
2086
+ begin
2087
+ resp = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_addresses(public_ips: [cloud_desc.public_ip_address])
2088
+ if resp.addresses.size > 0 and resp.addresses.first.instance_id == @cloud_id
2089
+ return true
2090
+ end
2091
+ rescue Aws::EC2::Errors::InvalidAddressNotFound
2092
+ # XXX this is ok to ignore, it means the public IP isn't Elastic
2093
+ end
2094
+ end
2095
+
2096
+ false
2097
+ end
2098
+
2099
+ def configureNetworking
2100
+ if !@config['static_ip'].nil?
2101
+ if !@config['static_ip']['ip'].nil?
2102
+ MU::Cloud::AWS::Server.associateElasticIp(@cloud_id, classic: @vpc.nil?, ip: @config['static_ip']['ip'])
2103
+ elsif !haveElasticIP?
2104
+ MU::Cloud::AWS::Server.associateElasticIp(@cloud_id, classic: @vpc.nil?)
2105
+ end
2106
+ end
2107
+
2108
+ if !@vpc.nil? and @config.has_key?("vpc")
2109
+ subnet = @vpc.getSubnet(cloud_id: cloud_desc.subnet_id)
2110
+
2111
+ _nat_ssh_key, _nat_ssh_user, nat_ssh_host, _canonical_ip, _ssh_user, _ssh_key_name = getSSHConfig
2112
+ if subnet.private? and !nat_ssh_host and !MU::Cloud.resourceClass("AWS", "VPC").haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials'])
2113
+ raise MuError, "#{@mu_name} is in a private subnet (#{subnet}), but has no bastion host configured, and I have no other route to it"
2114
+ end
2115
+
2116
+ # If we've asked for additional subnets (and this @config is not a
2117
+ # member of a Server Pool, which has different semantics), create
2118
+ # extra interfaces to accomodate.
2119
+ if !@config['vpc']['subnets'].nil? and @config['basis'].nil?
2120
+ device_index = 1
2121
+ mySubnets.each { |s|
2122
+ next if s.cloud_id == cloud_desc.subnet_id
2123
+
2124
+ if cloud_desc.placement.availability_zone != s.az
2125
+ MU.log "Cannot create interface in subnet #{s.to_s} for #{@mu_name} due to AZ mismatch", MU::WARN
2126
+ next
2127
+ end
2128
+ MU.log "Adding network interface on subnet #{s.cloud_id} for #{@mu_name}"
2129
+ iface = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).create_network_interface(subnet_id: s.cloud_id).network_interface
2130
+ MU::Cloud::AWS.createStandardTags(
2131
+ iface.network_interface_id,
2132
+ region: @config['region'],
2133
+ credentials: @config['credentials'],
2134
+ optional: @config['optional_tags'],
2135
+ nametag: @mu_name+"-ETH"+device_index.to_s,
2136
+ othertags: @config['tags']
2137
+ )
2138
+
2139
+ MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).attach_network_interface(
2140
+ network_interface_id: iface.network_interface_id,
2141
+ instance_id: cloud_desc.instance_id,
2142
+ device_index: device_index
2143
+ )
2144
+ device_index = device_index + 1
2145
+ }
2146
+ cloud_desc(use_cache: false)
2147
+ end
2148
+ end
2149
+
2150
+ [:private_dns_name, :public_dns_name, :private_ip_address, :public_ip_address].each { |field|
2151
+ @config[field.to_s] = cloud_desc.send(field)
2152
+ }
2153
+
2154
+ if !@config['add_private_ips'].nil?
2155
+ cloud_desc.network_interfaces.each { |int|
2156
+ if int.private_ip_address == cloud_desc.private_ip_address and int.private_ip_addresses.size < (@config['add_private_ips'] + 1)
2157
+ MU.log "Adding #{@config['add_private_ips']} extra private IP addresses to #{cloud_desc.instance_id}"
2158
+ MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).assign_private_ip_addresses(
2159
+ network_interface_id: int.network_interface_id,
2160
+ secondary_private_ip_address_count: @config['add_private_ips'],
2161
+ allow_reassignment: false
2162
+ )
2163
+ end
2164
+ }
2165
+ end
2166
+ end
2167
+
2168
+ def tagVolumes
2169
+ volumes = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_volumes(filters: [name: "attachment.instance-id", values: [@cloud_id]])
2170
+ volumes.each { |vol|
2171
+ vol.volumes.each { |volume|
2172
+ volume.attachments.each { |attachment|
2173
+ MU::Cloud::AWS.createStandardTags(
2174
+ attachment.volume_id,
2175
+ region: @config['region'],
2176
+ credentials: @config['credentials'],
2177
+ optional: @config['optional_tags'],
2178
+ nametag: ["/dev/sda", "/dev/sda1"].include?(attachment.device) ? "ROOT-"+@mu_name : @mu_name+"-"+attachment.device.upcase,
2179
+ othertags: @config['tags']
2180
+ )
2181
+
2182
+ }
2183
+ }
2184
+ }
2185
+ end
2186
+
2187
+ # If we came up via AutoScale, the Alarm module won't have had our
2188
+ # instance ID to associate us with itself. So invoke that here.
2189
+ # XXX might be possible to do this with regular alarm resources and
2190
+ # dependencies now
2191
+ def setAlarms
2192
+ if !@config['basis'].nil? and @config["alarms"] and !@config["alarms"].empty?
2193
+ @config["alarms"].each { |alarm|
2194
+ alarm_obj = MU::MommaCat.findStray(
2195
+ "AWS",
2196
+ "alarms",
2197
+ region: @config["region"],
2198
+ deploy_id: @deploy.deploy_id,
2199
+ name: alarm['name']
2200
+ ).first
2201
+ alarm["dimensions"] = [{:name => "InstanceId", :value => @cloud_id}]
2202
+
2203
+ if alarm["enable_notifications"]
2204
+ topic_arn = MU::Cloud.resourceClass("AWS", "Notification").createTopic(alarm["notification_group"], region: @config["region"], credentials: @config['credentials'])
2205
+ MU::Cloud.resourceClass("AWS", "Notification").subscribe(arn: topic_arn, protocol: alarm["notification_type"], endpoint: alarm["notification_endpoint"], region: @config["region"], credentials: @config["credentials"])
2206
+ alarm["alarm_actions"] = [topic_arn]
2207
+ alarm["ok_actions"] = [topic_arn]
2208
+ end
2209
+
2210
+ alarm_name = alarm_obj ? alarm_obj.cloud_id : "#{@mu_name}-#{alarm['name']}".upcase
2211
+
2212
+ MU::Cloud.resourceClass("AWS", "Alarm").setAlarm(
2213
+ name: alarm_name,
2214
+ ok_actions: alarm["ok_actions"],
2215
+ alarm_actions: alarm["alarm_actions"],
2216
+ insufficient_data_actions: alarm["no_data_actions"],
2217
+ metric_name: alarm["metric_name"],
2218
+ namespace: alarm["namespace"],
2219
+ statistic: alarm["statistic"],
2220
+ dimensions: alarm["dimensions"],
2221
+ period: alarm["period"],
2222
+ unit: alarm["unit"],
2223
+ evaluation_periods: alarm["evaluation_periods"],
2224
+ threshold: alarm["threshold"],
2225
+ comparison_operator: alarm["comparison_operator"],
2226
+ region: @config["region"],
2227
+ credentials: @config['credentials']
2228
+ )
2229
+ }
2230
+ end
2231
+ end
2232
+
2233
+ # We have issues sometimes where our dns_records are pointing at the wrong node name and IP address.
2234
+
2235
+ def getIAMProfile
2236
+ arn = if @config['generate_iam_role']
2237
+ role = @deploy.findLitterMate(name: @config['name'], type: "roles")
2238
+ s3_objs = ["#{@deploy.deploy_id}-secret", "#{role.mu_name}.pfx", "#{role.mu_name}.crt", "#{role.mu_name}.key", "#{role.mu_name}-winrm.crt", "#{role.mu_name}-winrm.key"].map { |file|
2239
+ 'arn:'+(MU::Cloud::AWS.isGovCloud?(@config['region']) ? "aws-us-gov" : "aws")+':s3:::'+MU::Cloud::AWS.adminBucketName(@credentials)+'/'+file
2240
+ }
2241
+ MU.log "Adding S3 read permissions to #{@mu_name}'s IAM profile", MU::NOTICE, details: s3_objs
2242
+ role.cloudobj.injectPolicyTargets("MuSecrets", s3_objs)
2243
+
2244
+ @config['iam_role'] = role.mu_name
2245
+ role.cloudobj.createInstanceProfile
2246
+
2247
+ elsif @config['iam_role'].nil?
2248
+ raise MuError, "#{@mu_name} has generate_iam_role set to false, but no iam_role assigned."
2249
+ end
2250
+
2251
+ if !@config["iam_role"].nil?
2252
+ if arn
2253
+ return {arn: arn}
2254
+ else
2255
+ return {name: @config["iam_role"]}
2256
+ end
2257
+ end
2258
+
2259
+ nil
2260
+ end
2261
+
2262
+ def setDeleteOntermination(device, delete_on_termination = false)
2263
+ mappings = MU.structToHash(cloud_desc.block_device_mappings)
2264
+ mappings.each { |vol|
2265
+ if vol[:ebs]
2266
+ vol[:ebs].delete(:attach_time)
2267
+ vol[:ebs].delete(:status)
2268
+ end
2269
+ if vol[:device_name] == device
2270
+ if vol[:ebs][:delete_on_termination] != delete_on_termination
2271
+ vol[:ebs][:delete_on_termination] = delete_on_termination
2272
+ MU.log "Setting delete_on_termination flag to #{delete_on_termination.to_s} on #{@mu_name}'s #{dev}"
2273
+ MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).modify_instance_attribute(
2274
+ instance_id: @cloud_id,
2275
+ block_device_mappings: mappings
2276
+ )
2277
+ end
2278
+ return true
2279
+ end
2280
+ }
2281
+
2282
+ false
2283
+ end
2284
+
2285
+ def createImage
2286
+ img_cfg = @config['create_image']
2287
+ # Scrub things that don't belong on an AMI
2288
+ session = windows? ? getWinRMSession : getSSHSession
2289
+ sudo = purgecmd = ""
2290
+ sudo = "sudo" if @config['ssh_user'] != "root"
2291
+ if windows?
2292
+ purgecmd = "rm -rf /cygdrive/c/mu_installed_chef"
2293
+ else
2294
+ purgecmd = "rm -rf /opt/mu_installed_chef"
2295
+ end
2296
+ if img_cfg['image_then_destroy']
2297
+ if windows?
2298
+ purgecmd = "rm -rf /cygdrive/c/chef/ /home/#{@config['windows_admin_username']}/.ssh/authorized_keys /home/Administrator/.ssh/authorized_keys /cygdrive/c/mu-installer-ran-updates /cygdrive/c/mu_installed_chef"
2299
+ # session.exec!("powershell -Command \"& {(Get-WmiObject -Class Win32_Product -Filter \"Name='UniversalForwarder'\").Uninstall()}\"")
2300
+ else
2301
+ purgecmd = "#{sudo} rm -rf /var/lib/cloud/instances/i-* /root/.ssh/authorized_keys /etc/ssh/ssh_host_*key* /etc/chef /etc/opscode/* /.mu-installer-ran-updates /var/chef /opt/mu_installed_chef /opt/chef ; #{sudo} sed -i 's/^HOSTNAME=.*//' /etc/sysconfig/network"
2302
+ end
2303
+ end
2304
+ if windows?
2305
+ session.run(purgecmd)
2306
+ else
2307
+ session.exec!(purgecmd)
2308
+ end
2309
+ session.close
2310
+ ami_ids = MU::Cloud::AWS::Server.createImage(
2311
+ name: @mu_name,
2312
+ instance_id: @cloud_id,
2313
+ storage: @config['storage'],
2314
+ exclude_storage: img_cfg['image_exclude_storage'],
2315
+ copy_to_regions: img_cfg['copy_to_regions'],
2316
+ make_public: img_cfg['public'],
2317
+ region: @config['region'],
2318
+ tags: @config['tags'],
2319
+ credentials: @config['credentials']
2320
+ )
2321
+ @deploy.notify("images", @config['name'], ami_ids)
2322
+ @config['image_created'] = true
2323
+ if img_cfg['image_then_destroy']
2324
+ MU::Cloud::AWS::Server.waitForAMI(ami_ids[@config['region']], region: @config['region'], credentials: @config['credentials'])
2325
+ MU.log "AMI #{ami_ids[@config['region']]} ready, removing source node #{@mu_name}"
2326
+ MU::Cloud::AWS::Server.terminateInstance(id: @cloud_id, region: @config['region'], deploy_id: @deploy.deploy_id, mu_name: @mu_name, credentials: @config['credentials'])
2327
+ destroy
2328
+ end
2329
+ end
2628
2330
 
2629
2331
  end #class
2630
2332
  end #class