cloud-mu 3.1.3 → 3.3.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (212) hide show
  1. checksums.yaml +4 -4
  2. data/Dockerfile +15 -3
  3. data/ansible/roles/mu-windows/README.md +33 -0
  4. data/ansible/roles/mu-windows/defaults/main.yml +2 -0
  5. data/ansible/roles/mu-windows/files/LaunchConfig.json +9 -0
  6. data/ansible/roles/mu-windows/files/config.xml +76 -0
  7. data/ansible/roles/mu-windows/handlers/main.yml +2 -0
  8. data/ansible/roles/mu-windows/meta/main.yml +53 -0
  9. data/ansible/roles/mu-windows/tasks/main.yml +36 -0
  10. data/ansible/roles/mu-windows/tests/inventory +2 -0
  11. data/ansible/roles/mu-windows/tests/test.yml +5 -0
  12. data/ansible/roles/mu-windows/vars/main.yml +2 -0
  13. data/bin/mu-adopt +21 -13
  14. data/bin/mu-azure-tests +57 -0
  15. data/bin/mu-cleanup +2 -4
  16. data/bin/mu-configure +52 -0
  17. data/bin/mu-deploy +3 -3
  18. data/bin/mu-findstray-tests +25 -0
  19. data/bin/mu-gen-docs +2 -4
  20. data/bin/mu-load-config.rb +4 -4
  21. data/bin/mu-node-manage +15 -16
  22. data/bin/mu-run-tests +147 -37
  23. data/cloud-mu.gemspec +22 -20
  24. data/cookbooks/mu-activedirectory/resources/domain.rb +4 -4
  25. data/cookbooks/mu-activedirectory/resources/domain_controller.rb +4 -4
  26. data/cookbooks/mu-tools/libraries/helper.rb +3 -2
  27. data/cookbooks/mu-tools/libraries/monkey.rb +35 -0
  28. data/cookbooks/mu-tools/recipes/apply_security.rb +14 -14
  29. data/cookbooks/mu-tools/recipes/aws_api.rb +9 -0
  30. data/cookbooks/mu-tools/recipes/eks.rb +2 -2
  31. data/cookbooks/mu-tools/recipes/google_api.rb +2 -2
  32. data/cookbooks/mu-tools/recipes/selinux.rb +2 -1
  33. data/cookbooks/mu-tools/recipes/windows-client.rb +163 -164
  34. data/cookbooks/mu-tools/resources/disk.rb +1 -1
  35. data/cookbooks/mu-tools/resources/windows_users.rb +44 -43
  36. data/extras/clean-stock-amis +25 -19
  37. data/extras/generate-stock-images +1 -0
  38. data/extras/image-generators/AWS/win2k12.yaml +18 -13
  39. data/extras/image-generators/AWS/win2k16.yaml +18 -13
  40. data/extras/image-generators/AWS/win2k19.yaml +21 -0
  41. data/extras/image-generators/Google/centos6.yaml +1 -0
  42. data/extras/image-generators/Google/centos7.yaml +1 -1
  43. data/modules/mommacat.ru +6 -16
  44. data/modules/mu.rb +158 -111
  45. data/modules/mu/adoption.rb +404 -71
  46. data/modules/mu/cleanup.rb +221 -306
  47. data/modules/mu/cloud.rb +129 -1633
  48. data/modules/mu/cloud/database.rb +49 -0
  49. data/modules/mu/cloud/dnszone.rb +44 -0
  50. data/modules/mu/cloud/machine_images.rb +212 -0
  51. data/modules/mu/cloud/providers.rb +81 -0
  52. data/modules/mu/cloud/resource_base.rb +926 -0
  53. data/modules/mu/cloud/server.rb +40 -0
  54. data/modules/mu/cloud/server_pool.rb +1 -0
  55. data/modules/mu/cloud/ssh_sessions.rb +228 -0
  56. data/modules/mu/cloud/winrm_sessions.rb +237 -0
  57. data/modules/mu/cloud/wrappers.rb +169 -0
  58. data/modules/mu/config.rb +171 -1767
  59. data/modules/mu/config/alarm.rb +2 -6
  60. data/modules/mu/config/bucket.rb +32 -3
  61. data/modules/mu/config/cache_cluster.rb +2 -2
  62. data/modules/mu/config/cdn.rb +100 -0
  63. data/modules/mu/config/collection.rb +4 -4
  64. data/modules/mu/config/container_cluster.rb +9 -4
  65. data/modules/mu/config/database.rb +84 -105
  66. data/modules/mu/config/database.yml +1 -2
  67. data/modules/mu/config/dnszone.rb +10 -9
  68. data/modules/mu/config/doc_helpers.rb +516 -0
  69. data/modules/mu/config/endpoint.rb +5 -4
  70. data/modules/mu/config/firewall_rule.rb +103 -4
  71. data/modules/mu/config/folder.rb +4 -4
  72. data/modules/mu/config/function.rb +19 -10
  73. data/modules/mu/config/group.rb +4 -4
  74. data/modules/mu/config/habitat.rb +4 -4
  75. data/modules/mu/config/job.rb +89 -0
  76. data/modules/mu/config/loadbalancer.rb +60 -14
  77. data/modules/mu/config/log.rb +4 -4
  78. data/modules/mu/config/msg_queue.rb +4 -4
  79. data/modules/mu/config/nosqldb.rb +4 -4
  80. data/modules/mu/config/notifier.rb +10 -21
  81. data/modules/mu/config/ref.rb +411 -0
  82. data/modules/mu/config/role.rb +4 -4
  83. data/modules/mu/config/schema_helpers.rb +509 -0
  84. data/modules/mu/config/search_domain.rb +4 -4
  85. data/modules/mu/config/server.rb +98 -71
  86. data/modules/mu/config/server.yml +1 -0
  87. data/modules/mu/config/server_pool.rb +5 -9
  88. data/modules/mu/config/storage_pool.rb +1 -1
  89. data/modules/mu/config/tail.rb +200 -0
  90. data/modules/mu/config/user.rb +4 -4
  91. data/modules/mu/config/vpc.rb +71 -27
  92. data/modules/mu/config/vpc.yml +0 -1
  93. data/modules/mu/defaults/AWS.yaml +91 -68
  94. data/modules/mu/defaults/Azure.yaml +1 -0
  95. data/modules/mu/defaults/Google.yaml +3 -2
  96. data/modules/mu/deploy.rb +43 -26
  97. data/modules/mu/groomer.rb +17 -2
  98. data/modules/mu/groomers/ansible.rb +188 -41
  99. data/modules/mu/groomers/chef.rb +116 -55
  100. data/modules/mu/logger.rb +127 -148
  101. data/modules/mu/master.rb +410 -2
  102. data/modules/mu/master/chef.rb +3 -4
  103. data/modules/mu/master/ldap.rb +3 -3
  104. data/modules/mu/master/ssl.rb +12 -3
  105. data/modules/mu/mommacat.rb +218 -2612
  106. data/modules/mu/mommacat/daemon.rb +403 -0
  107. data/modules/mu/mommacat/naming.rb +473 -0
  108. data/modules/mu/mommacat/search.rb +495 -0
  109. data/modules/mu/mommacat/storage.rb +722 -0
  110. data/modules/mu/{clouds → providers}/README.md +1 -1
  111. data/modules/mu/{clouds → providers}/aws.rb +380 -122
  112. data/modules/mu/{clouds → providers}/aws/alarm.rb +7 -5
  113. data/modules/mu/{clouds → providers}/aws/bucket.rb +297 -59
  114. data/modules/mu/{clouds → providers}/aws/cache_cluster.rb +37 -71
  115. data/modules/mu/providers/aws/cdn.rb +782 -0
  116. data/modules/mu/{clouds → providers}/aws/collection.rb +26 -25
  117. data/modules/mu/{clouds → providers}/aws/container_cluster.rb +724 -744
  118. data/modules/mu/providers/aws/database.rb +1744 -0
  119. data/modules/mu/{clouds → providers}/aws/dnszone.rb +88 -70
  120. data/modules/mu/providers/aws/endpoint.rb +1072 -0
  121. data/modules/mu/{clouds → providers}/aws/firewall_rule.rb +220 -247
  122. data/modules/mu/{clouds → providers}/aws/folder.rb +8 -8
  123. data/modules/mu/{clouds → providers}/aws/function.rb +300 -142
  124. data/modules/mu/{clouds → providers}/aws/group.rb +31 -29
  125. data/modules/mu/{clouds → providers}/aws/habitat.rb +18 -15
  126. data/modules/mu/providers/aws/job.rb +466 -0
  127. data/modules/mu/{clouds → providers}/aws/loadbalancer.rb +66 -56
  128. data/modules/mu/{clouds → providers}/aws/log.rb +17 -14
  129. data/modules/mu/{clouds → providers}/aws/msg_queue.rb +29 -19
  130. data/modules/mu/{clouds → providers}/aws/nosqldb.rb +114 -16
  131. data/modules/mu/{clouds → providers}/aws/notifier.rb +142 -65
  132. data/modules/mu/{clouds → providers}/aws/role.rb +158 -118
  133. data/modules/mu/{clouds → providers}/aws/search_domain.rb +201 -59
  134. data/modules/mu/{clouds → providers}/aws/server.rb +844 -1139
  135. data/modules/mu/{clouds → providers}/aws/server_pool.rb +74 -65
  136. data/modules/mu/{clouds → providers}/aws/storage_pool.rb +26 -44
  137. data/modules/mu/{clouds → providers}/aws/user.rb +24 -25
  138. data/modules/mu/{clouds → providers}/aws/userdata/README.md +0 -0
  139. data/modules/mu/{clouds → providers}/aws/userdata/linux.erb +5 -4
  140. data/modules/mu/{clouds → providers}/aws/userdata/windows.erb +2 -1
  141. data/modules/mu/{clouds → providers}/aws/vpc.rb +525 -931
  142. data/modules/mu/providers/aws/vpc_subnet.rb +286 -0
  143. data/modules/mu/{clouds → providers}/azure.rb +29 -9
  144. data/modules/mu/{clouds → providers}/azure/container_cluster.rb +3 -8
  145. data/modules/mu/{clouds → providers}/azure/firewall_rule.rb +18 -11
  146. data/modules/mu/{clouds → providers}/azure/habitat.rb +8 -6
  147. data/modules/mu/{clouds → providers}/azure/loadbalancer.rb +5 -5
  148. data/modules/mu/{clouds → providers}/azure/role.rb +8 -10
  149. data/modules/mu/{clouds → providers}/azure/server.rb +97 -49
  150. data/modules/mu/{clouds → providers}/azure/user.rb +6 -8
  151. data/modules/mu/{clouds → providers}/azure/userdata/README.md +0 -0
  152. data/modules/mu/{clouds → providers}/azure/userdata/linux.erb +0 -0
  153. data/modules/mu/{clouds → providers}/azure/userdata/windows.erb +0 -0
  154. data/modules/mu/{clouds → providers}/azure/vpc.rb +16 -21
  155. data/modules/mu/{clouds → providers}/cloudformation.rb +18 -7
  156. data/modules/mu/{clouds → providers}/cloudformation/alarm.rb +3 -3
  157. data/modules/mu/{clouds → providers}/cloudformation/cache_cluster.rb +3 -3
  158. data/modules/mu/{clouds → providers}/cloudformation/collection.rb +3 -3
  159. data/modules/mu/{clouds → providers}/cloudformation/database.rb +6 -17
  160. data/modules/mu/{clouds → providers}/cloudformation/dnszone.rb +3 -3
  161. data/modules/mu/{clouds → providers}/cloudformation/firewall_rule.rb +3 -3
  162. data/modules/mu/{clouds → providers}/cloudformation/loadbalancer.rb +3 -3
  163. data/modules/mu/{clouds → providers}/cloudformation/log.rb +3 -3
  164. data/modules/mu/{clouds → providers}/cloudformation/server.rb +7 -7
  165. data/modules/mu/{clouds → providers}/cloudformation/server_pool.rb +5 -5
  166. data/modules/mu/{clouds → providers}/cloudformation/vpc.rb +5 -7
  167. data/modules/mu/{clouds → providers}/docker.rb +0 -0
  168. data/modules/mu/{clouds → providers}/google.rb +68 -30
  169. data/modules/mu/{clouds → providers}/google/bucket.rb +13 -15
  170. data/modules/mu/{clouds → providers}/google/container_cluster.rb +85 -78
  171. data/modules/mu/{clouds → providers}/google/database.rb +11 -21
  172. data/modules/mu/{clouds → providers}/google/firewall_rule.rb +15 -14
  173. data/modules/mu/{clouds → providers}/google/folder.rb +20 -17
  174. data/modules/mu/{clouds → providers}/google/function.rb +140 -168
  175. data/modules/mu/{clouds → providers}/google/group.rb +29 -34
  176. data/modules/mu/{clouds → providers}/google/habitat.rb +21 -22
  177. data/modules/mu/{clouds → providers}/google/loadbalancer.rb +19 -21
  178. data/modules/mu/{clouds → providers}/google/role.rb +94 -58
  179. data/modules/mu/{clouds → providers}/google/server.rb +243 -156
  180. data/modules/mu/{clouds → providers}/google/server_pool.rb +26 -45
  181. data/modules/mu/{clouds → providers}/google/user.rb +95 -31
  182. data/modules/mu/{clouds → providers}/google/userdata/README.md +0 -0
  183. data/modules/mu/{clouds → providers}/google/userdata/linux.erb +0 -0
  184. data/modules/mu/{clouds → providers}/google/userdata/windows.erb +0 -0
  185. data/modules/mu/{clouds → providers}/google/vpc.rb +103 -79
  186. data/modules/tests/aws-jobs-functions.yaml +46 -0
  187. data/modules/tests/bucket.yml +4 -0
  188. data/modules/tests/centos6.yaml +15 -0
  189. data/modules/tests/centos7.yaml +15 -0
  190. data/modules/tests/centos8.yaml +12 -0
  191. data/modules/tests/ecs.yaml +23 -0
  192. data/modules/tests/eks.yaml +1 -1
  193. data/modules/tests/functions/node-function/lambda_function.js +10 -0
  194. data/modules/tests/functions/python-function/lambda_function.py +12 -0
  195. data/modules/tests/includes-and-params.yaml +2 -1
  196. data/modules/tests/microservice_app.yaml +288 -0
  197. data/modules/tests/rds.yaml +108 -0
  198. data/modules/tests/regrooms/aws-iam.yaml +201 -0
  199. data/modules/tests/regrooms/bucket.yml +19 -0
  200. data/modules/tests/regrooms/rds.yaml +123 -0
  201. data/modules/tests/server-with-scrub-muisms.yaml +2 -1
  202. data/modules/tests/super_complex_bok.yml +2 -2
  203. data/modules/tests/super_simple_bok.yml +3 -5
  204. data/modules/tests/win2k12.yaml +17 -5
  205. data/modules/tests/win2k16.yaml +25 -0
  206. data/modules/tests/win2k19.yaml +25 -0
  207. data/requirements.txt +1 -0
  208. data/spec/mu/clouds/azure_spec.rb +2 -2
  209. metadata +240 -154
  210. data/extras/image-generators/AWS/windows.yaml +0 -18
  211. data/modules/mu/clouds/aws/database.rb +0 -1985
  212. data/modules/mu/clouds/aws/endpoint.rb +0 -592
@@ -22,9 +22,9 @@ module MU
22
22
  # @param args [Hash]: Hash of named arguments passed via Ruby's double-splat
23
23
  def initialize(**args)
24
24
  super
25
- if @cloud_id and !@config['domain_name']
26
- @config['domain_name'] = @cloud_id
27
- end
25
+ describe if @mu_name and !@deploydata
26
+ @cloud_id ||= @deploydata['domain_name'] if @deploydata
27
+
28
28
  @mu_name ||= @deploy.getResourceName(@config["name"])
29
29
  end
30
30
 
@@ -35,7 +35,8 @@ module MU
35
35
  params = genParams
36
36
 
37
37
  MU.log "Creating ElasticSearch domain #{@config['domain_name']}", details: params
38
- resp = MU::Cloud::AWS.elasticsearch(region: @config['region'], credentials: @config['credentials']).create_elasticsearch_domain(params).domain_status
38
+ @cloud_id = @config['domain_name']
39
+ MU::Cloud::AWS.elasticsearch(region: @config['region'], credentials: @credentials).create_elasticsearch_domain(params).domain_status
39
40
 
40
41
  tagDomain
41
42
 
@@ -44,47 +45,50 @@ module MU
44
45
  # Called automatically by {MU::Deploy#createResources}
45
46
  def groom
46
47
  tagDomain
47
- @config['domain_name'] ||= @deploydata['domain_name']
48
+ @config['domain_name'] ||= @cloud_id
48
49
  params = genParams(cloud_desc) # get parameters that would change only
49
50
 
50
51
  if params.size > 1
51
52
  waitWhileProcessing # wait until the create finishes, if still going
52
53
 
53
54
  MU.log "Updating ElasticSearch domain #{@config['domain_name']}", MU::NOTICE, details: params
54
- MU::Cloud::AWS.elasticsearch(region: @config['region'], credentials: @config['credentials']).update_elasticsearch_domain_config(params)
55
+ MU::Cloud::AWS.elasticsearch(region: @config['region'], credentials: @credentials).update_elasticsearch_domain_config(params)
55
56
  end
56
57
 
57
58
  waitWhileProcessing # don't return until creation/updating is complete
59
+ MU.log "Search Domain #{@config['name']}: #{cloud_desc.endpoint}", MU::SUMMARY
58
60
  end
59
61
 
62
+ @cloud_desc_cache = nil
60
63
  # Wrapper for cloud_desc method that deals with finding the AWS
61
64
  # domain_name parameter, which isn't what we'd call ourselves if we had
62
65
  # our druthers.
63
- def cloud_desc
64
- if @config['domain_name']
65
- MU::Cloud::AWS.elasticsearch(region: @config['region'], credentials: @config['credentials']).describe_elasticsearch_domain(
66
- domain_name: @config['domain_name']
66
+ def cloud_desc(use_cache: true)
67
+ return @cloud_desc_cache if @cloud_desc_cache and use_cache
68
+ @cloud_id ||= @config['domain_name']
69
+ return nil if !@cloud_id
70
+ MU.retrier([::Aws::ElasticsearchService::Errors::ResourceNotFoundException], wait: 10, max: 12) {
71
+ @cloud_desc_cache = MU::Cloud::AWS.elasticsearch(region: @config['region'], credentials: @credentials).describe_elasticsearch_domain(
72
+ domain_name: @cloud_id
67
73
  ).domain_status
68
- elsif @deploydata and @deploydata['domain_name']
69
- MU::Cloud::AWS.elasticsearch(region: @config['region'], credentials: @config['credentials']).describe_elasticsearch_domain(
70
- domain_name: @deploydata['domain_name']
71
- ).domain_status
72
- else
73
- raise MuError, "#{@mu_name} can't find its official Elasticsearch domain name!"
74
- end
74
+ }
75
+
76
+ @cloud_desc_cache
75
77
  end
76
78
 
77
79
  # Canonical Amazon Resource Number for this resource
78
80
  # @return [String]
79
81
  def arn
80
- cloud_desc.arn
82
+ return nil if !cloud_desc
83
+ cloud_desc.arn.dup
81
84
  end
82
85
 
83
86
  # Return the metadata for this SearchDomain rule
84
87
  # @return [Hash]
85
88
  def notify
86
- deploy_struct = MU.structToHash(cloud_desc)
87
- tags = MU::Cloud::AWS.elasticsearch(region: @config['region'], credentials: @config['credentials']).list_tags(arn: deploy_struct[:arn]).tag_list
89
+ return nil if !cloud_desc(use_cache: false)
90
+ deploy_struct = MU.structToHash(cloud_desc, stringify_keys: true)
91
+ tags = MU::Cloud::AWS.elasticsearch(region: @config['region'], credentials: @credentials).list_tags(arn: arn).tag_list
88
92
  deploy_struct['tags'] = tags.map { |t| { t.key => t.value } }
89
93
  if deploy_struct['endpoint']
90
94
  deploy_struct['kibana'] = deploy_struct['endpoint']+"/_plugin/kibana/"
@@ -116,26 +120,34 @@ module MU
116
120
  # @param ignoremaster [Boolean]: If true, will remove resources not flagged as originating from this Mu server
117
121
  # @param region [String]: The cloud provider region
118
122
  # @return [void]
119
- def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {})
120
- list = MU::Cloud::AWS.elasticsearch(region: region).list_domain_names
123
+ def self.cleanup(noop: false, deploy_id: MU.deploy_id, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {})
124
+ MU.log "AWS::SearchDomain.cleanup: need to support flags['known']", MU::DEBUG, details: flags
125
+
126
+ list = MU::Cloud::AWS.elasticsearch(region: region, credentials: credentials).list_domain_names
121
127
  if list and list.domain_names and list.domain_names.size > 0
122
128
  names = list.domain_names.map { |d| d.domain_name }
123
129
  begin
124
130
  # why is this API so obnoxious?
125
131
  sample = names.slice!(0, (names.length >= 5 ? 5 : names.length))
126
- descs = MU::Cloud::AWS.elasticsearch(region: region).describe_elasticsearch_domains(domain_names: sample)
132
+ descs = MU::Cloud::AWS.elasticsearch(region: region, credentials: credentials).describe_elasticsearch_domains(domain_names: sample)
127
133
 
128
134
  descs.domain_status_list.each { |domain|
129
- tags = MU::Cloud::AWS.elasticsearch(region: region).list_tags(arn: domain.arn)
135
+ tags = MU::Cloud::AWS.elasticsearch(region: region, credentials: credentials).list_tags(arn: domain.arn)
136
+ deploy_match = false
137
+ master_match = false
130
138
  tags.tag_list.each { |tag|
131
- if tag.key == "MU-ID" and tag.value == MU.deploy_id
132
- MU.log "Deleting ElasticSearch Domain #{domain.domain_name}"
133
- if !noop
134
- MU::Cloud::AWS.elasticsearch(region: region).delete_elasticsearch_domain(domain_name: domain.domain_name)
135
- end
136
- break
139
+ if tag.key == "MU-ID" and tag.value == deploy_id
140
+ deploy_match = true
141
+ elsif tag.key == "MU-MASTER-IP" and tag.value == MU.mu_public_ip
142
+ master_match = true
137
143
  end
138
144
  }
145
+ if deploy_match and (master_match or ignoremaster)
146
+ MU.log "Deleting ElasticSearch Domain #{domain.domain_name}"
147
+ if !noop
148
+ MU::Cloud::AWS.elasticsearch(region: region, credentials: credentials).delete_elasticsearch_domain(domain_name: domain.domain_name)
149
+ end
150
+ end
139
151
  }
140
152
  end while names.size > 0
141
153
  end
@@ -143,10 +155,10 @@ module MU
143
155
  unless noop
144
156
  marker = nil
145
157
  begin
146
- resp = MU::Cloud::AWS.iam.list_roles(marker: marker)
158
+ resp = MU::Cloud::AWS.iam(credentials: credentials).list_roles(marker: marker)
147
159
  resp.roles.each{ |role|
148
- # XXX Maybe we should have a more generic way to delete IAM profiles and policies. The call itself should be moved from MU::Cloud::AWS::Server.
149
- # MU::Cloud::AWS::Server.removeIAMProfile(role.role_name) if role.role_name.match(/^#{Regexp.quote(MU.deploy_id)}/)
160
+ # XXX Maybe we should have a more generic way to delete IAM profiles and policies. The call itself should be moved from MU::Cloud.resourceClass("AWS", "Server").
161
+ # MU::Cloud.resourceClass("AWS", "Server").removeIAMProfile(role.role_name) if role.role_name.match(/^#{Regexp.quote(deploy_id)}/)
150
162
  }
151
163
  marker = resp.marker
152
164
  end while resp.is_truncated
@@ -180,16 +192,106 @@ module MU
180
192
  found
181
193
  end
182
194
 
195
+ # Reverse-map our cloud description into a runnable config hash.
196
+ # We assume that any values we have in +@config+ are placeholders, and
197
+ # calculate our own accordingly based on what's live in the cloud.
198
+ def toKitten(**_args)
199
+ bok = {
200
+ "cloud" => "AWS",
201
+ "credentials" => @credentials,
202
+ "cloud_id" => @cloud_id,
203
+ "region" => @config['region']
204
+ }
205
+
206
+ if !cloud_desc
207
+ MU.log "toKitten failed to load a cloud_desc from #{@cloud_id}", MU::ERR, details: @config
208
+ return nil
209
+ end
210
+
211
+ bok['name'] = cloud_desc.domain_name
212
+ bok['elasticsearch_version'] = cloud_desc.elasticsearch_version
213
+ bok['instance_count'] = cloud_desc.elasticsearch_cluster_config.instance_count
214
+ bok['instance_type'] = cloud_desc.elasticsearch_cluster_config.instance_type
215
+ bok['zone_aware'] = cloud_desc.elasticsearch_cluster_config.zone_awareness_enabled
216
+
217
+ if cloud_desc.elasticsearch_cluster_config.dedicated_master_enabled
218
+ bok['dedicated_masters'] = cloud_desc.elasticsearch_cluster_config.dedicated_master_count
219
+ bok['master_instance_type'] = cloud_desc.elasticsearch_cluster_config.dedicated_master_type
220
+ end
221
+
222
+ if cloud_desc.access_policies and !cloud_desc.access_policies.empty?
223
+ bok['access_policies'] = JSON.parse(cloud_desc.access_policies)
224
+ end
225
+
226
+ if cloud_desc.advanced_options and !cloud_desc.advanced_options.empty?
227
+ bok['advanced_options'] = cloud_desc.advanced_options
228
+ end
229
+
230
+ bok['ebs_size'] = cloud_desc.ebs_options.volume_size
231
+ bok['ebs_type'] = cloud_desc.ebs_options.volume_type
232
+ bok['ebs_iops'] = cloud_desc.ebs_options.iops if cloud_desc.ebs_options.iops
233
+
234
+ if cloud_desc.snapshot_options and cloud_desc.snapshot_options.automated_snapshot_start_hour
235
+ bok['snapshot_hour'] = cloud_desc.snapshot_options.automated_snapshot_start_hour
236
+ end
237
+
238
+ if cloud_desc.cognito_options.user_pool_id and
239
+ cloud_desc.cognito_options.identity_pool_id
240
+ bok['user_pool_id'] = cloud_desc.cognito_options.user_pool_id
241
+ bok['identity_pool_id'] = cloud_desc.cognito_options.identity_pool_id
242
+ end
243
+
244
+ tags = MU::Cloud::AWS.elasticsearch(region: @config['region'], credentials: @credentials).list_tags(arn: cloud_desc.arn).tag_list
245
+ if tags and !tags.empty?
246
+ bok['tags'] = MU.structToHash(tags)
247
+ end
248
+
249
+ if cloud_desc.vpc_options
250
+ bok['vpc'] = MU::Config::Ref.get(
251
+ id: cloud_desc.vpc_options.vpc_id,
252
+ cloud: "AWS",
253
+ credentials: @credentials,
254
+ type: "vpcs",
255
+ region: @config['region'],
256
+ subnets: cloud_desc.vpc_options.subnet_ids.map { |s| { "subnet_id" => s } }
257
+ )
258
+ if cloud_desc.vpc_options.security_group_ids and
259
+ !cloud_desc.vpc_options.security_group_ids.empty?
260
+ bok['add_firewall_rules'] = cloud_desc.vpc_options.security_group_ids.map { |sg|
261
+ MU::Config::Ref.get(
262
+ id: sg,
263
+ cloud: "AWS",
264
+ credentials: @credentials,
265
+ region: @config['region'],
266
+ type: "firewall_rules",
267
+ )
268
+ }
269
+ end
270
+ end
271
+
272
+ if cloud_desc.log_publishing_options
273
+ # XXX this is primitive... there are multiple other log types now,
274
+ # and this should be a Ref blob, not a flat string
275
+ cloud_desc.log_publishing_options.each_pair { |type, whither|
276
+ if type == "SEARCH_SLOW_LOGS"
277
+ bok['slow_logs'] = whither.cloud_watch_logs_log_group_arn
278
+ end
279
+ }
280
+ end
281
+
282
+ bok
283
+ end
284
+
183
285
  # Cloud-specific configuration properties.
184
- # @param config [MU::Config]: The calling MU::Config object
286
+ # @param _config [MU::Config]: The calling MU::Config object
185
287
  # @return [Array<Array,Hash>]: List of required fields, and json-schema Hash of cloud-specific configuration parameters for this resource
186
- def self.schema(config)
288
+ def self.schema(_config)
187
289
  toplevel_required = ["elasticsearch_version", "instance_type"]
188
290
 
189
291
  versions = begin
190
292
  MU::Cloud::AWS.elasticsearch.list_elasticsearch_versions.elasticsearch_versions
191
- rescue MuError => e
192
- ["7.1", "6.8", "6.7", "6.5", "6.4", "6.3", "6.2", "6.0", "5.6"]
293
+ rescue MuError
294
+ ["7.4", "7.1", "6.8", "6.7", "6.5", "6.4", "6.3", "6.2", "6.0", "5.6"]
193
295
  end
194
296
  instance_types = begin
195
297
  MU::Cloud::AWS.elasticsearch.list_elasticsearch_instance_types(
@@ -204,6 +306,8 @@ module MU
204
306
  ).elasticsearch_instance_types
205
307
  end
206
308
 
309
+ polschema = MU::Config::Role.schema["properties"]["policies"]
310
+ polschema.deep_merge!(MU::Cloud.resourceClass("AWS", "Role").condition_schema)
207
311
 
208
312
  schema = {
209
313
  "name" => {
@@ -225,9 +329,10 @@ module MU
225
329
  "default" => 0,
226
330
  "description" => "Separate, dedicated master node(s), over and above the search instances specified in instance_count."
227
331
  },
332
+ "policies" => polschema,
228
333
  "access_policies" => {
229
334
  "type" => "object",
230
- "description" => "An IAM policy document for access to ElasticSearch. Our parser expects this to be defined inline like the rest of your YAML/JSON Basket of Kittens, not as raw JSON. For guidance on ElasticSearch IAM capabilities, see: https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-ac.html"
335
+ "description" => "An IAM policy document for access to ElasticSearch (see {policies} for setting complex access policies with runtime dependencies). Our parser expects this to be defined inline like the rest of your YAML/JSON Basket of Kittens, not as raw JSON. For guidance on ElasticSearch IAM capabilities, see: https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-ac.html"
231
336
  },
232
337
  "master_instance_type" => {
233
338
  "type" => "string",
@@ -235,7 +340,7 @@ module MU
235
340
  },
236
341
  "ebs_type" => {
237
342
  "type" => "string",
238
- "default" => "standard",
343
+ "default" => "gp2",
239
344
  "description" => "Type of EBS storage to use for cluster nodes. If 'none' is specified, EBS storage will not be used, but this is only valid for certain instance types.",
240
345
  "enum" => ["standard", "gp2", "io1", "none"]
241
346
  },
@@ -367,9 +472,9 @@ module MU
367
472
 
368
473
  if dom['slow_logs']
369
474
  if configurator.haveLitterMate?(dom['slow_logs'], "log")
370
- dom['dependencies'] << { "name" => dom['slow_logs'], "type" => "log" }
475
+ MU::Config.addDependency(dom, dom['slow_logs'], "log")
371
476
  else
372
- log_group = MU::Cloud::AWS::Log.find(cloud_id: dom['slow_logs'], region: dom['region']).values.first
477
+ log_group = MU::Cloud.resourceClass("AWS", "Log").find(cloud_id: dom['slow_logs'], region: dom['region']).values.first
373
478
  if !log_group
374
479
  MU.log "Specified slow_logs CloudWatch log group '#{dom['slow_logs']}' in SearchDomain '#{dom['name']}' doesn't appear to exist", MU::ERR
375
480
  ok = false
@@ -384,7 +489,7 @@ module MU
384
489
  "credentials" => dom['credentials']
385
490
  }
386
491
  ok = false if !configurator.insertKitten(log_group, "logs")
387
- dom['dependencies'] << { "name" => dom['slow_logs'], "type" => "log" }
492
+ MU::Config.addDependency(dom, dom['slow_logs'], "log")
388
493
  end
389
494
 
390
495
  if dom['advanced_options']
@@ -398,7 +503,7 @@ module MU
398
503
  MU::Cloud::AWS.cognito_ident(region: dom['region']).describe_identity_pool(
399
504
  identity_pool_id: dom['cognito']['identity_pool_id']
400
505
  )
401
- rescue ::Aws::CognitoIdentity::Errors::ValidationException, Aws::CognitoIdentity::Errors::ResourceNotFoundException => e
506
+ rescue ::Aws::CognitoIdentity::Errors::ValidationException, Aws::CognitoIdentity::Errors::ResourceNotFoundException
402
507
  MU.log "Cognito identity pool #{dom['cognito']['identity_pool_id']} malformed or does not exist in SearchDomain '#{dom['name']}'", MU::ERR
403
508
  ok = false
404
509
  end
@@ -406,7 +511,7 @@ module MU
406
511
  MU::Cloud::AWS.cognito_user(region: dom['region']).describe_user_pool(
407
512
  user_pool_id: dom['cognito']['user_pool_id']
408
513
  )
409
- rescue ::Aws::CognitoIdentityProvider::Errors::InvalidParameterException, Aws::CognitoIdentityProvider::Errors::ResourceNotFoundException => e
514
+ rescue ::Aws::CognitoIdentityProvider::Errors::InvalidParameterException, Aws::CognitoIdentityProvider::Errors::ResourceNotFoundException
410
515
  MU.log "Cognito identity pool #{dom['cognito']['user_pool_id']} malformed or does not exist in SearchDomain '#{dom['name']}'", MU::ERR
411
516
  ok = false
412
517
  end
@@ -426,7 +531,7 @@ module MU
426
531
  if !found
427
532
  MU.log "IAM role #{dom['cognito']['role_arn']} exists, but not does have the AmazonESCognitoAccess policy attached. SearchDomain '#{dom['name']}' may not have necessary Cognito permissions.", MU::WARN
428
533
  end
429
- rescue Aws::IAM::Errors::NoSuchEntity => e
534
+ rescue Aws::IAM::Errors::NoSuchEntity
430
535
  MU.log "IAM role #{dom['cognito']['role_arn']} malformed or does not exist in SearchDomain '#{dom['name']}'", MU::ERR
431
536
  ok = false
432
537
  end
@@ -445,12 +550,7 @@ module MU
445
550
  ]
446
551
  }
447
552
  configurator.insertKitten(roledesc, "roles")
448
-
449
- dom['dependencies'] ||= []
450
- dom['dependencies'] << {
451
- "type" => "role",
452
- "name" => dom['name']+"cognitorole"
453
- }
553
+ MU::Config.addDependency(dom, dom['name']+"cognitorole", "role")
454
554
  end
455
555
 
456
556
  end
@@ -503,9 +603,51 @@ module MU
503
603
  params[:snapshot_options][:automated_snapshot_start_hour] = @config['snapshot_hour']
504
604
  end
505
605
 
506
- if @config['access_policies']
507
- # TODO check against ext.access_policies.options
508
- params[:access_policies] = JSON.generate(@config['access_policies'])
606
+ if ext
607
+ # Despite being called access_policies, this parameter actually
608
+ # only accepts one policy. So, we'll munge everything we have
609
+ # together into one policy with multiple Statements.
610
+ policy = nil
611
+ # TODO check against ext.access_policy.options
612
+
613
+ if @config['access_policies']
614
+ policy = @config['access_policies']
615
+ # ensure the "Statement" key is cased in a predictable way
616
+ statement_key = nil
617
+ policy.each_pair { |k, v|
618
+ if k.downcase == "statement" and k != "Statement"
619
+ statement_key = k
620
+ break
621
+ end
622
+ }
623
+ if statement_key
624
+ policy["Statement"] = policy.delete(statement_key)
625
+ end
626
+ if !policy["Statement"].is_a?(Array)
627
+ policy["Statement"] = [policy["Statement"]]
628
+ end
629
+ end
630
+
631
+ if @config['policies']
632
+ @config['policies'].each { |p|
633
+ p['targets'].each { |t|
634
+ if t['path']
635
+ t['path'].gsub!(/#SELF/, @mu_name.downcase)
636
+ end
637
+ }
638
+ parsed = MU::Cloud.resourceClass("AWS", "Role").genPolicyDocument([p], deploy_obj: @deploy, bucket_style: true).first.values.first
639
+
640
+ if policy and policy["Statement"]
641
+ policy["Statement"].concat(parsed["Statement"])
642
+ else
643
+ policy = parsed
644
+ end
645
+ }
646
+ end
647
+
648
+ if policy
649
+ params[:access_policies] = JSON.generate(policy)
650
+ end
509
651
  end
510
652
 
511
653
  if @config['slow_logs']
@@ -514,7 +656,7 @@ module MU
514
656
  arn = @config['slow_logs']
515
657
  else
516
658
  log_group = @deploy.findLitterMate(type: "log", name: @config['slow_logs'])
517
- log_group = MU::Cloud::AWS::Log.find(cloud_id: log_group.mu_name, region: log_group.cloudobj.config['region']).values.first
659
+ log_group = MU::Cloud.resourceClass("AWS", "Log").find(cloud_id: log_group.mu_name, region: log_group.cloudobj.config['region']).values.first
518
660
  if log_group.nil? or log_group.arn.nil?
519
661
  raise MuError, "Failed to retrieve ARN of sibling LogGroup '#{@config['slow_logs']}'"
520
662
  end
@@ -541,7 +683,7 @@ module MU
541
683
  params[:log_publishing_options]["SEARCH_SLOW_LOGS"] = {}
542
684
  params[:log_publishing_options]["SEARCH_SLOW_LOGS"][:enabled] = true
543
685
  params[:log_publishing_options]["SEARCH_SLOW_LOGS"][:cloud_watch_logs_log_group_arn] = arn
544
- MU::Cloud::AWS::Log.allowService("es.amazonaws.com", arn, @config['region'])
686
+ MU::Cloud.resourceClass("AWS", "Log").allowService("es.amazonaws.com", arn, @config['region'])
545
687
  end
546
688
  end
547
689
 
@@ -626,7 +768,7 @@ module MU
626
768
  # modify an existing group. AWS bug, workaround is to just apply
627
769
  # this in groom phase exclusively.
628
770
  if @config['cognito'] and !ext.nil?
629
- myrole = setIAMPolicies
771
+ setIAMPolicies
630
772
 
631
773
  if ext.nil? or !ext.cognito_options.enabled or
632
774
  ext.cognito_options.user_pool_id != @config['cognito']['user_pool_id'] or
@@ -671,7 +813,7 @@ module MU
671
813
  raise MU::MuError, "Can't tag ElasticSearch domain, cloud descriptor came back without an ARN"
672
814
  end
673
815
 
674
- MU::Cloud::AWS.elasticsearch(region: @config['region'], credentials: @config['credentials']).add_tags(
816
+ MU::Cloud::AWS.elasticsearch(region: @config['region'], credentials: @credentials).add_tags(
675
817
  arn: domain.arn,
676
818
  tag_list: tags
677
819
  )
@@ -682,7 +824,7 @@ module MU
682
824
  interval = 60
683
825
 
684
826
  begin
685
- resp = cloud_desc
827
+ resp = cloud_desc(use_cache: false)
686
828
 
687
829
  if (resp.endpoint.nil? or resp.endpoint.empty?) and
688
830
  (resp.endpoints.nil? or resp.endpoints.empty?) and
@@ -89,7 +89,7 @@ module MU
89
89
  template_variables: {
90
90
  "deployKey" => Base64.urlsafe_encode64(@deploy.public_key),
91
91
  "deploySSHKey" => @deploy.ssh_public_key,
92
- "muID" => MU.deploy_id,
92
+ "muID" => @deploy.deploy_id,
93
93
  "muUser" => MU.mu_user,
94
94
  "publicIP" => MU.mu_public_ip,
95
95
  "mommaCatPort" => MU.mommaCatPort,
@@ -145,7 +145,7 @@ module MU
145
145
  raise MuError, "My second argument should be a hash of variables to pass into ERB templates"
146
146
  end
147
147
  $mu = OpenStruct.new(template_variables)
148
- userdata_dir = File.expand_path(MU.myRoot+"/modules/mu/clouds/aws/userdata")
148
+ userdata_dir = File.expand_path(MU.myRoot+"/modules/mu/providers/aws/userdata")
149
149
  platform = "linux" if %w{centos centos6 centos7 ubuntu ubuntu14 rhel rhel7 rhel71 amazon}.include? platform
150
150
  platform = "windows" if %w{win2k12r2 win2k12 win2k8 win2k8r2 win2k16}.include? platform
151
151
  erbfile = "#{userdata_dir}/#{platform}.erb"
@@ -212,7 +212,7 @@ module MU
212
212
  vol_id = attachment.volume_id
213
213
  vol_dev = attachment.device
214
214
  if vol_parent == instance_id and (vol_dev == device or device.nil?)
215
- MU::MommaCat.createTag(vol_id, tag_name, tag_value, region: region, credentials: credentials)
215
+ MU::Cloud::AWS.createTag(vol_id, tag_name, tag_value, region: region, credentials: credentials)
216
216
  break
217
217
  end
218
218
  }
@@ -240,11 +240,17 @@ module MU
240
240
  end
241
241
  MU::MommaCat.unlock(instance.instance_id+"-create")
242
242
  else
243
- MU::Cloud::AWS.createStandardTags(instance.instance_id, region: @config['region'], credentials: @config['credentials'])
244
- MU::MommaCat.createTag(instance.instance_id, "Name", @mu_name, region: @config['region'], credentials: @config['credentials'])
243
+ MU::Cloud::AWS.createStandardTags(
244
+ instance.instance_id,
245
+ region: @config['region'],
246
+ credentials: @config['credentials'],
247
+ optional: @config['optional_tags'],
248
+ nametag: @mu_name,
249
+ othertags: @config['tags']
250
+ )
245
251
  end
246
252
  done = true
247
- rescue Exception => e
253
+ rescue StandardError => e
248
254
  if !instance.nil? and !done
249
255
  MU.log "Aborted before I could finish setting up #{@config['name']}, cleaning it up. Stack trace will print once cleanup is complete.", MU::WARN if !@deploy.nocleanup
250
256
  MU::MommaCat.unlockAll
@@ -262,15 +268,11 @@ module MU
262
268
  return @config
263
269
  end
264
270
 
265
-
266
-
267
271
  # Create an Amazon EC2 instance.
268
272
  def createEc2Instance
269
- name = @config["name"]
270
- node = @config['mu_name']
271
273
 
272
274
  instance_descriptor = {
273
- :image_id => @config["ami_id"],
275
+ :image_id => @config["image_id"],
274
276
  :key_name => @deploy.ssh_key_name,
275
277
  :instance_type => @config["size"],
276
278
  :disable_api_termination => true,
@@ -278,64 +280,26 @@ module MU
278
280
  :max_count => 1
279
281
  }
280
282
 
281
- arn = nil
282
- if @config['generate_iam_role']
283
- role = @deploy.findLitterMate(name: @config['name'], type: "roles")
284
- s3_objs = ["#{@deploy.deploy_id}-secret", "#{role.mu_name}.pfx", "#{role.mu_name}.crt", "#{role.mu_name}.key", "#{role.mu_name}-winrm.crt", "#{role.mu_name}-winrm.key"].map { |file|
285
- 'arn:'+(MU::Cloud::AWS.isGovCloud?(@config['region']) ? "aws-us-gov" : "aws")+':s3:::'+MU::Cloud::AWS.adminBucketName(@credentials)+'/'+file
286
- }
287
- MU.log "Adding S3 read permissions to #{@mu_name}'s IAM profile", MU::NOTICE, details: s3_objs
288
- role.cloudobj.injectPolicyTargets("MuSecrets", s3_objs)
289
-
290
- @config['iam_role'] = role.mu_name
291
- arn = role.cloudobj.createInstanceProfile
292
- # @cfm_role_name, @cfm_prof_name
293
-
294
- elsif @config['iam_role'].nil?
295
- raise MuError, "#{@mu_name} has generate_iam_role set to false, but no iam_role assigned."
296
- end
297
- if !@config["iam_role"].nil?
298
- if arn
299
- instance_descriptor[:iam_instance_profile] = {arn: arn}
300
- else
301
- instance_descriptor[:iam_instance_profile] = {name: @config["iam_role"]}
302
- end
303
- end
304
-
305
- security_groups = []
306
- if @dependencies.has_key?("firewall_rule")
307
- @dependencies['firewall_rule'].values.each { |sg|
308
- security_groups << sg.cloud_id
309
- }
310
- end
283
+ instance_descriptor[:iam_instance_profile] = getIAMProfile
311
284
 
285
+ security_groups = myFirewallRules.map { |fw| fw.cloud_id }
312
286
  if security_groups.size > 0
313
287
  instance_descriptor[:security_group_ids] = security_groups
314
288
  else
315
289
  raise MuError, "Didn't get any security groups assigned to be in #{@mu_name}, that shouldn't happen"
316
290
  end
317
291
 
318
- if !@config['private_ip'].nil?
292
+ if @config['private_ip']
319
293
  instance_descriptor[:private_ip_address] = @config['private_ip']
320
294
  end
321
295
 
322
- vpc_id = subnet = nil
323
296
  if !@vpc.nil? and @config.has_key?("vpc")
324
- subnet_conf = @config['vpc']
325
- subnet_conf = @config['vpc']['subnets'].first if @config['vpc'].has_key?("subnets") and !@config['vpc']['subnets'].empty?
326
- tag_key, tag_value = subnet_conf['tag'].split(/=/, 2) if !subnet_conf['tag'].nil?
327
-
328
- subnet = @vpc.getSubnet(
329
- cloud_id: subnet_conf['subnet_id'],
330
- name: subnet_conf['subnet_name'],
331
- tag_key: tag_key,
332
- tag_value: tag_value
333
- )
297
+ subnet = mySubnets.sample
334
298
  if subnet.nil?
335
- raise MuError, "Got null subnet id out of #{subnet_conf['vpc']}"
299
+ raise MuError, "Got null subnet id out of #{@config['vpc']}"
336
300
  end
337
- MU.log "Deploying #{node} into VPC #{@vpc.cloud_id} Subnet #{subnet.cloud_id}"
338
- punchAdminNAT
301
+ MU.log "Deploying #{@mu_name} into VPC #{@vpc.cloud_id} Subnet #{subnet.cloud_id}"
302
+ allowBastionAccess
339
303
  instance_descriptor[:subnet_id] = subnet.cloud_id
340
304
  end
341
305
 
@@ -343,38 +307,10 @@ module MU
343
307
  instance_descriptor[:user_data] = Base64.encode64(@userdata)
344
308
  end
345
309
 
346
- MU::Cloud::AWS::Server.waitForAMI(@config["ami_id"], region: @config['region'], credentials: @config['credentials'])
310
+ MU::Cloud::AWS::Server.waitForAMI(@config["image_id"], region: @config['region'], credentials: @config['credentials'])
347
311
 
348
- # Figure out which devices are embedded in the AMI already.
349
- image = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_images(image_ids: [@config["ami_id"]]).images.first
350
- ext_disks = {}
351
- if !image.block_device_mappings.nil?
352
- image.block_device_mappings.each { |disk|
353
- if !disk.device_name.nil? and !disk.device_name.empty? and !disk.ebs.nil? and !disk.ebs.empty?
354
- ext_disks[disk.device_name] = MU.structToHash(disk.ebs)
355
- end
356
- }
357
- end
358
-
359
- configured_storage = Array.new
360
- cfm_volume_map = {}
361
- if @config["storage"]
362
- @config["storage"].each { |vol|
363
- # Drop the "encrypted" flag if a snapshot for this device exists
364
- # in the AMI, even if they both agree about the value of said
365
- # flag. Apparently that's a thing now.
366
- if ext_disks.has_key?(vol["device"])
367
- if ext_disks[vol["device"]].has_key?(:snapshot_id)
368
- vol.delete("encrypted")
369
- end
370
- end
371
- mapping, cfm_mapping = MU::Cloud::AWS::Server.convertBlockDeviceMapping(vol)
372
- configured_storage << mapping
373
- }
374
- end
312
+ instance_descriptor[:block_device_mappings] = MU::Cloud::AWS::Server.configureBlockDevices(image_id: @config["image_id"], storage: @config['storage'], region: @config['region'], credentials: @credentials)
375
313
 
376
- instance_descriptor[:block_device_mappings] = configured_storage
377
- instance_descriptor[:block_device_mappings].concat(@ephemeral_mappings)
378
314
  instance_descriptor[:monitoring] = {enabled: @config['monitoring']}
379
315
 
380
316
  if @tags and @tags.size > 0
@@ -386,37 +322,24 @@ module MU
386
322
  }]
387
323
  end
388
324
 
389
- MU.log "Creating EC2 instance #{node}"
390
- MU.log "Instance details for #{node}: #{instance_descriptor}", MU::DEBUG
391
- # if instance_descriptor[:block_device_mappings].empty?
392
- # instance_descriptor.delete(:block_device_mappings)
393
- # end
325
+ MU.log "Creating EC2 instance #{@mu_name}", details: instance_descriptor
394
326
 
395
- retries = 0
396
- instance = begin
397
- response = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).run_instances(instance_descriptor)
398
- if response and response.instances and response.instances.size > 0
399
- instance = response.instances.first
400
- else
401
- MU.log "halp", MU::ERR, details: response
402
- end
327
+ instance = resp = nil
328
+ loop_if = Proc.new {
329
+ instance = resp.instances.first if resp and resp.instances
330
+ resp.nil? or resp.instances.nil? or instance.nil?
331
+ }
332
+
333
+ begin
334
+ MU.retrier([Aws::EC2::Errors::InvalidGroupNotFound, Aws::EC2::Errors::InvalidSubnetIDNotFound, Aws::EC2::Errors::InvalidParameterValue], loop_if: loop_if, loop_msg: "Waiting for run_instances to return #{@mu_name}") {
335
+ resp = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).run_instances(instance_descriptor)
336
+ }
403
337
  rescue Aws::EC2::Errors::InvalidRequest => e
404
338
  MU.log e.message, MU::ERR, details: instance_descriptor
405
339
  raise e
406
- rescue Aws::EC2::Errors::InvalidGroupNotFound, Aws::EC2::Errors::InvalidSubnetIDNotFound, Aws::EC2::Errors::InvalidParameterValue => e
407
- if retries < 10
408
- if retries > 7
409
- MU.log "Seeing #{e.inspect} while trying to launch #{node}, retrying a few more times...", MU::WARN, details: instance_descriptor
410
- end
411
- sleep 10
412
- retries = retries + 1
413
- retry
414
- else
415
- raise MuError, e.inspect
416
- end
417
340
  end
418
341
 
419
- MU.log "#{node} (#{instance.instance_id}) coming online"
342
+ MU.log "#{@mu_name} (#{instance.instance_id}) coming online"
420
343
 
421
344
  instance
422
345
  end
@@ -446,7 +369,7 @@ module MU
446
369
  instance_ids: [@cloud_id]
447
370
  )
448
371
  MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).wait_until(:instance_stopped, instance_ids: [@cloud_id]) do |waiter|
449
- waiter.before_attempt do |attempts|
372
+ waiter.before_attempt do
450
373
  MU.log "Waiting for #{@mu_name} to stop for hard reboot"
451
374
  end
452
375
  end
@@ -476,14 +399,13 @@ module MU
476
399
  # Figure out what's needed to SSH into this server.
477
400
  # @return [Array<String>]: nat_ssh_key, nat_ssh_user, nat_ssh_host, canonical_ip, ssh_user, ssh_key_name, alternate_names
478
401
  def getSSHConfig
479
- node, config, deploydata = describe(cloud_id: @cloud_id)
402
+ cloud_desc(use_cache: false) # make sure we're current
480
403
  # XXX add some awesome alternate names from metadata and make sure they end
481
404
  # up in MU::MommaCat's ssh config wangling
482
- ssh_keydir = Etc.getpwuid(Process.uid).dir+"/.ssh"
483
405
  return nil if @config.nil? or @deploy.nil?
484
406
 
485
407
  nat_ssh_key = nat_ssh_user = nat_ssh_host = nil
486
- if !@config["vpc"].nil? and !MU::Cloud::AWS::VPC.haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials'])
408
+ if !@config["vpc"].nil? and !MU::Cloud.resourceClass("AWS", "VPC").haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials'])
487
409
  if !@nat.nil?
488
410
  if @nat.is_a?(Struct) && @nat.nat_gateway_id && @nat.nat_gateway_id.start_with?("nat-")
489
411
  raise MuError, "Configured to use NAT Gateway, but I have no route to instance. Either use Bastion, or configure VPC peering"
@@ -521,450 +443,81 @@ module MU
521
443
  # Apply tags, bootstrap our configuration management, and other
522
444
  # administravia for a new instance.
523
445
  def postBoot(instance_id = nil)
524
- if !instance_id.nil?
525
- @cloud_id = instance_id
526
- end
527
- node, config, deploydata = describe(cloud_id: @cloud_id)
528
- instance = cloud_desc
529
- raise MuError, "Couldn't find instance #{@mu_name} (#{@cloud_id})" if !instance
530
- @cloud_id = instance.instance_id
531
- return false if !MU::MommaCat.lock(instance.instance_id+"-orchestrate", true)
532
- return false if !MU::MommaCat.lock(instance.instance_id+"-groom", true)
533
-
534
- MU::Cloud::AWS.createStandardTags(instance.instance_id, region: @config['region'], credentials: @config['credentials'])
535
- MU::MommaCat.createTag(instance.instance_id, "Name", node, region: @config['region'], credentials: @config['credentials'])
536
-
537
- if @config['optional_tags']
538
- MU::MommaCat.listOptionalTags.each { |key, value|
539
- MU::MommaCat.createTag(instance.instance_id, key, value, region: @config['region'], credentials: @config['credentials'])
540
- }
541
- end
446
+ @cloud_id ||= instance_id
447
+ _node, _config, deploydata = describe(cloud_id: @cloud_id)
448
+
449
+ raise MuError, "Couldn't find instance #{@mu_name} (#{@cloud_id})" if !cloud_desc
450
+ return false if !MU::MommaCat.lock(@cloud_id+"-orchestrate", true)
451
+ return false if !MU::MommaCat.lock(@cloud_id+"-groom", true)
452
+ finish = Proc.new { |status|
453
+ MU::MommaCat.unlock(@cloud_id+"-orchestrate")
454
+ MU::MommaCat.unlock(@cloud_id+"-groom")
455
+ return status
456
+ }
542
457
 
543
- if !@config['tags'].nil?
544
- @config['tags'].each { |tag|
545
- MU::MommaCat.createTag(instance.instance_id, tag['key'], tag['value'], region: @config['region'], credentials: @config['credentials'])
546
- }
547
- end
548
- MU.log "Tagged #{node} (#{instance.instance_id}) with MU-ID=#{MU.deploy_id}", MU::DEBUG
458
+ MU::Cloud::AWS.createStandardTags(
459
+ @cloud_id,
460
+ region: @config['region'],
461
+ credentials: @config['credentials'],
462
+ optional: @config['optional_tags'],
463
+ nametag: @mu_name,
464
+ othertags: @config['tags']
465
+ )
549
466
 
550
467
  # Make double sure we don't lose a cached mu_windows_name value.
551
- if windows? or !@config['active_directory'].nil?
552
- if @mu_windows_name.nil?
553
- @mu_windows_name = deploydata['mu_windows_name']
554
- end
468
+ if (windows? or !@config['active_directory'].nil?)
469
+ @mu_windows_name ||= deploydata['mu_windows_name']
555
470
  end
556
471
 
557
- retries = -1
558
- max_retries = 30
559
- begin
560
- if instance.nil? or instance.state.name != "running"
561
- retries = retries + 1
562
- if !instance.nil? and instance.state.name == "terminated"
563
- raise MuError, "#{@cloud_id} appears to have been terminated mid-bootstrap!"
564
- end
565
- if retries % 3 == 0
566
- MU.log "Waiting for EC2 instance #{node} (#{@cloud_id}) to be ready...", MU::NOTICE
567
- end
568
- sleep 40
569
- # Get a fresh AWS descriptor
570
- instance = MU::Cloud::Server.find(cloud_id: @cloud_id, region: @config['region'], credentials: @config['credentials']).values.first
571
- if instance and instance.state.name == "terminated"
572
- raise MuError, "EC2 instance #{node} (#{@cloud_id}) terminating during bootstrap!"
573
- end
472
+ loop_if = Proc.new {
473
+ !cloud_desc(use_cache: false) or cloud_desc.state.name != "running"
474
+ }
475
+ MU.retrier([Aws::EC2::Errors::ServiceError], max: 30, wait: 40, loop_if: loop_if) { |retries, _wait|
476
+ if cloud_desc and cloud_desc.state.name == "terminated"
477
+ raise MuError, "#{@cloud_id} appears to have been terminated mid-bootstrap!"
574
478
  end
575
- rescue Aws::EC2::Errors::ServiceError => e
576
- if retries < max_retries
577
- MU.log "Got #{e.inspect} during initial instance creation of #{@cloud_id}, retrying...", MU::NOTICE, details: instance
578
- retries = retries + 1
579
- retry
580
- else
581
- raise MuError, "Too many retries creating #{node} (#{e.inspect})"
479
+ if retries % 3 == 0
480
+ MU.log "Waiting for EC2 instance #{@mu_name} (#{@cloud_id}) to be ready...", MU::NOTICE
582
481
  end
583
- end while instance.nil? or (instance.state.name != "running" and retries < max_retries)
584
-
585
- punchAdminNAT
586
-
587
-
588
- # If we came up via AutoScale, the Alarm module won't have had our
589
- # instance ID to associate us with itself. So invoke that here.
590
- # XXX might be possible to do this with regular alarm resources and
591
- # dependencies now
592
- if !@config['basis'].nil? and @config["alarms"] and !@config["alarms"].empty?
593
- @config["alarms"].each { |alarm|
594
- alarm_obj = MU::MommaCat.findStray(
595
- "AWS",
596
- "alarms",
597
- region: @config["region"],
598
- deploy_id: @deploy.deploy_id,
599
- name: alarm['name']
600
- ).first
601
- alarm["dimensions"] = [{:name => "InstanceId", :value => @cloud_id}]
602
-
603
- if alarm["enable_notifications"]
604
- topic_arn = MU::Cloud::AWS::Notification.createTopic(alarm["notification_group"], region: @config["region"], credentials: @config['credentials'])
605
- MU::Cloud::AWS::Notification.subscribe(arn: topic_arn, protocol: alarm["notification_type"], endpoint: alarm["notification_endpoint"], region: @config["region"], credentials: @config["credentials"])
606
- alarm["alarm_actions"] = [topic_arn]
607
- alarm["ok_actions"] = [topic_arn]
608
- end
482
+ }
609
483
 
610
- alarm_name = alarm_obj ? alarm_obj.cloud_id : "#{node}-#{alarm['name']}".upcase
611
-
612
- MU::Cloud::AWS::Alarm.setAlarm(
613
- name: alarm_name,
614
- ok_actions: alarm["ok_actions"],
615
- alarm_actions: alarm["alarm_actions"],
616
- insufficient_data_actions: alarm["no_data_actions"],
617
- metric_name: alarm["metric_name"],
618
- namespace: alarm["namespace"],
619
- statistic: alarm["statistic"],
620
- dimensions: alarm["dimensions"],
621
- period: alarm["period"],
622
- unit: alarm["unit"],
623
- evaluation_periods: alarm["evaluation_periods"],
624
- threshold: alarm["threshold"],
625
- comparison_operator: alarm["comparison_operator"],
626
- region: @config["region"],
627
- credentials: @config['credentials']
628
- )
629
- }
630
- end
484
+ allowBastionAccess
631
485
 
632
- # We have issues sometimes where our dns_records are pointing at the wrong node name and IP address.
633
- # Make sure that doesn't happen. Happens with server pools only
634
- if @config['dns_records'] && !@config['dns_records'].empty?
635
- @config['dns_records'].each { |dnsrec|
636
- if dnsrec.has_key?("name")
637
- if dnsrec['name'].start_with?(MU.deploy_id.downcase) && !dnsrec['name'].start_with?(node.downcase)
638
- MU.log "DNS records for #{node} seem to be wrong, deleting from current config", MU::WARN, details: dnsrec
639
- dnsrec.delete('name')
640
- dnsrec.delete('target')
641
- end
642
- end
643
- }
644
- end
486
+ setAlarms
645
487
 
646
488
  # Unless we're planning on associating a different IP later, set up a
647
489
  # DNS entry for this thing and let it sync in the background. We'll come
648
490
  # back to it later.
649
- if @config['static_ip'].nil? && !@named
491
+ if @config['static_ip'].nil? and !@named
650
492
  MU::MommaCat.nameKitten(self)
651
493
  @named = true
652
494
  end
653
495
 
654
496
  if !@config['src_dst_check'] and !@config["vpc"].nil?
655
- MU.log "Disabling source_dest_check #{node} (making it NAT-worthy)"
497
+ MU.log "Disabling source_dest_check #{@mu_name} (making it NAT-worthy)"
656
498
  MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).modify_instance_attribute(
657
- instance_id: @cloud_id,
658
- source_dest_check: {:value => false}
499
+ instance_id: @cloud_id,
500
+ source_dest_check: { value: false }
659
501
  )
660
502
  end
661
503
 
662
504
  # Set console termination protection. Autoscale nodes won't set this
663
505
  # by default.
664
506
  MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).modify_instance_attribute(
665
- instance_id: @cloud_id,
666
- disable_api_termination: {:value => true}
507
+ instance_id: @cloud_id,
508
+ disable_api_termination: { value: true}
667
509
  )
668
510
 
669
- has_elastic_ip = false
670
- if !instance.public_ip_address.nil?
671
- begin
672
- resp = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_addresses(public_ips: [instance.public_ip_address])
673
- if resp.addresses.size > 0 and resp.addresses.first.instance_id == @cloud_id
674
- has_elastic_ip = true
675
- end
676
- rescue Aws::EC2::Errors::InvalidAddressNotFound => e
677
- # XXX this is ok to ignore, it means the public IP isn't Elastic
678
- end
679
- end
680
-
681
- win_admin_password = nil
682
- ec2config_password = nil
683
- sshd_password = nil
684
- if windows?
685
- ssh_keydir = "#{Etc.getpwuid(Process.uid).dir}/.ssh"
686
- ssh_key_name = @deploy.ssh_key_name
687
-
688
- if @config['use_cloud_provider_windows_password']
689
- win_admin_password = getWindowsAdminPassword
690
- elsif @config['windows_auth_vault'] && !@config['windows_auth_vault'].empty?
691
- if @config["windows_auth_vault"].has_key?("password_field")
692
- win_admin_password = @groomer.getSecret(
693
- vault: @config['windows_auth_vault']['vault'],
694
- item: @config['windows_auth_vault']['item'],
695
- field: @config["windows_auth_vault"]["password_field"]
696
- )
697
- else
698
- win_admin_password = getWindowsAdminPassword
699
- end
700
-
701
- if @config["windows_auth_vault"].has_key?("ec2config_password_field")
702
- ec2config_password = @groomer.getSecret(
703
- vault: @config['windows_auth_vault']['vault'],
704
- item: @config['windows_auth_vault']['item'],
705
- field: @config["windows_auth_vault"]["ec2config_password_field"]
706
- )
707
- end
708
-
709
- if @config["windows_auth_vault"].has_key?("sshd_password_field")
710
- sshd_password = @groomer.getSecret(
711
- vault: @config['windows_auth_vault']['vault'],
712
- item: @config['windows_auth_vault']['item'],
713
- field: @config["windows_auth_vault"]["sshd_password_field"]
714
- )
715
- end
716
- end
717
-
718
- win_admin_password = MU.generateWindowsPassword if win_admin_password.nil?
719
- ec2config_password = MU.generateWindowsPassword if ec2config_password.nil?
720
- sshd_password = MU.generateWindowsPassword if sshd_password.nil?
721
-
722
- # We're creating the vault here so when we run
723
- # MU::Cloud::Server.initialSSHTasks and we need to set the Windows
724
- # Admin password we can grab it from said vault.
725
- creds = {
726
- "username" => @config['windows_admin_username'],
727
- "password" => win_admin_password,
728
- "ec2config_username" => "ec2config",
729
- "ec2config_password" => ec2config_password,
730
- "sshd_username" => "sshd_service",
731
- "sshd_password" => sshd_password
732
- }
733
- @groomer.saveSecret(vault: @mu_name, item: "windows_credentials", data: creds, permissions: "name:#{@mu_name}")
734
- end
735
-
736
- subnet = nil
737
- if !@vpc.nil? and @config.has_key?("vpc") and !instance.subnet_id.nil?
738
- subnet = @vpc.getSubnet(
739
- cloud_id: instance.subnet_id
740
- )
741
- if subnet.nil?
742
- raise MuError, "Got null subnet id out of #{@config['vpc']} when asking for #{instance.subnet_id}"
743
- end
744
- end
745
-
746
- if !subnet.nil?
747
- if !subnet.private? or (!@config['static_ip'].nil? and !@config['static_ip']['assign_ip'].nil?)
748
- if !@config['static_ip'].nil?
749
- if !@config['static_ip']['ip'].nil?
750
- public_ip = MU::Cloud::AWS::Server.associateElasticIp(instance.instance_id, classic: false, ip: @config['static_ip']['ip'])
751
- elsif !has_elastic_ip
752
- public_ip = MU::Cloud::AWS::Server.associateElasticIp(instance.instance_id)
753
- end
754
- end
755
- end
756
-
757
- nat_ssh_key, nat_ssh_user, nat_ssh_host, canonical_ip, ssh_user, ssh_key_name = getSSHConfig
758
- if subnet.private? and !nat_ssh_host and !MU::Cloud::AWS::VPC.haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials'])
759
- raise MuError, "#{node} is in a private subnet (#{subnet}), but has no bastion host configured, and I have no other route to it"
760
- end
761
-
762
- # If we've asked for additional subnets (and this @config is not a
763
- # member of a Server Pool, which has different semantics), create
764
- # extra interfaces to accomodate.
765
- if !@config['vpc']['subnets'].nil? and @config['basis'].nil?
766
- device_index = 1
767
- @vpc.subnets.each { |s|
768
- subnet_id = s.cloud_id
769
- MU.log "Adding network interface on subnet #{subnet_id} for #{node}"
770
- iface = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).create_network_interface(subnet_id: subnet_id).network_interface
771
- MU::Cloud::AWS.createStandardTags(iface.network_interface_id, region: @config['region'], credentials: @config['credentials'])
772
- MU::MommaCat.createTag(iface.network_interface_id, "Name", node+"-ETH"+device_index.to_s, region: @config['region'], credentials: @config['credentials'])
773
-
774
- if @config['optional_tags']
775
- MU::MommaCat.listOptionalTags.each { |key, value|
776
- MU::MommaCat.createTag(iface.network_interface_id, key, value, region: @config['region'], credentials: @config['credentials'])
777
- }
778
- end
779
-
780
- if !@config['tags'].nil?
781
- @config['tags'].each { |tag|
782
- MU::MommaCat.createTag(iface.network_interface_id, tag['key'], tag['value'], region: @config['region'], credentials: @config['credentials'])
783
- }
784
- end
785
-
786
- MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).attach_network_interface(
787
- network_interface_id: iface.network_interface_id,
788
- instance_id: instance.instance_id,
789
- device_index: device_index
790
- )
791
- device_index = device_index + 1
792
- }
793
- end
794
- elsif !@config['static_ip'].nil?
795
- if !@config['static_ip']['ip'].nil?
796
- public_ip = MU::Cloud::AWS::Server.associateElasticIp(instance.instance_id, classic: true, ip: @config['static_ip']['ip'])
797
- elsif !has_elastic_ip
798
- public_ip = MU::Cloud::AWS::Server.associateElasticIp(instance.instance_id, classic: true)
799
- end
800
- end
801
-
511
+ tagVolumes
512
+ configureNetworking
513
+ saveCredentials
802
514
 
803
515
  if !@config['image_then_destroy']
804
516
  notify
805
517
  end
806
518
 
807
- MU.log "EC2 instance #{node} has id #{instance.instance_id}", MU::DEBUG
808
-
809
- @config["private_dns_name"] = instance.private_dns_name
810
- @config["public_dns_name"] = instance.public_dns_name
811
- @config["private_ip_address"] = instance.private_ip_address
812
- @config["public_ip_address"] = instance.public_ip_address
813
-
814
- ext_mappings = MU.structToHash(instance.block_device_mappings)
815
-
816
- # Root disk on standard CentOS AMI
817
- # tagVolumes(instance.instance_id, "/dev/sda", "Name", "ROOT-"+MU.deploy_id+"-"+@config["name"].upcase)
818
- # Root disk on standard Ubuntu AMI
819
- # tagVolumes(instance.instance_id, "/dev/sda1", "Name", "ROOT-"+MU.deploy_id+"-"+@config["name"].upcase)
820
-
821
- # Generic deploy ID tag
822
- # tagVolumes(instance.instance_id)
823
-
824
- # Tag volumes with all our standard tags.
825
- # Maybe replace tagVolumes with this? There is one more place tagVolumes is called from
826
- volumes = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_volumes(filters: [name: "attachment.instance-id", values: [instance.instance_id]])
827
- volumes.each { |vol|
828
- vol.volumes.each { |volume|
829
- volume.attachments.each { |attachment|
830
- MU::MommaCat.listStandardTags.each_pair { |key, value|
831
- MU::MommaCat.createTag(attachment.volume_id, key, value, region: @config['region'], credentials: @config['credentials'])
832
-
833
- if attachment.device == "/dev/sda" or attachment.device == "/dev/sda1"
834
- MU::MommaCat.createTag(attachment.volume_id, "Name", "ROOT-#{MU.deploy_id}-#{@config["name"].upcase}", region: @config['region'], credentials: @config['credentials'])
835
- else
836
- MU::MommaCat.createTag(attachment.volume_id, "Name", "#{MU.deploy_id}-#{@config["name"].upcase}-#{attachment.device.upcase}", region: @config['region'], credentials: @config['credentials'])
837
- end
838
- }
839
-
840
- if @config['optional_tags']
841
- MU::MommaCat.listOptionalTags.each { |key, value|
842
- MU::MommaCat.createTag(attachment.volume_id, key, value, region: @config['region'], credentials: @config['credentials'])
843
- }
844
- end
845
-
846
- if @config['tags']
847
- @config['tags'].each { |tag|
848
- MU::MommaCat.createTag(attachment.volume_id, tag['key'], tag['value'], region: @config['region'], credentials: @config['credentials'])
849
- }
850
- end
851
- }
852
- }
853
- }
854
-
855
- canonical_name = instance.public_dns_name
856
- canonical_name = instance.private_dns_name if !canonical_name or nat_ssh_host != nil
857
- @config['canonical_name'] = canonical_name
858
-
859
- if !@config['add_private_ips'].nil?
860
- instance.network_interfaces.each { |int|
861
- if int.private_ip_address == instance.private_ip_address and int.private_ip_addresses.size < (@config['add_private_ips'] + 1)
862
- MU.log "Adding #{@config['add_private_ips']} extra private IP addresses to #{instance.instance_id}"
863
- MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).assign_private_ip_addresses(
864
- network_interface_id: int.network_interface_id,
865
- secondary_private_ip_address_count: @config['add_private_ips'],
866
- allow_reassignment: false
867
- )
868
- end
869
- }
870
- notify
871
- end
872
-
873
- begin
874
- if @config['groom'].nil? or @config['groom']
875
- if windows?
876
- # kick off certificate generation early; WinRM will need it
877
- cert, key = @deploy.nodeSSLCerts(self)
878
- if @config.has_key?("basis")
879
- @deploy.nodeSSLCerts(self, true)
880
- end
881
- if !@groomer.haveBootstrapped?
882
- session = getWinRMSession(50, 60, reboot_on_problems: true)
883
- initialWinRMTasks(session)
884
- begin
885
- session.close
886
- rescue Exception
887
- # this is allowed to fail- we're probably rebooting anyway
888
- end
889
- else # for an existing Windows node: WinRM, then SSH if it fails
890
- begin
891
- session = getWinRMSession(1, 60)
892
- rescue Exception # yeah, yeah
893
- session = getSSHSession(1, 60)
894
- # XXX maybe loop at least once if this also fails?
895
- end
896
- end
897
- else
898
- session = getSSHSession(40, 30)
899
- initialSSHTasks(session)
900
- end
901
- end
902
- rescue BootstrapTempFail
903
- sleep 45
904
- retry
905
- ensure
906
- session.close if !session.nil? and !windows?
907
- end
908
-
909
- if @config["existing_deploys"] && !@config["existing_deploys"].empty?
910
- @config["existing_deploys"].each { |ext_deploy|
911
- if ext_deploy["cloud_id"]
912
- found = MU::MommaCat.findStray(
913
- @config['cloud'],
914
- ext_deploy["cloud_type"],
915
- cloud_id: ext_deploy["cloud_id"],
916
- region: @config['region'],
917
- dummy_ok: false
918
- ).first
919
-
920
- MU.log "Couldn't find existing resource #{ext_deploy["cloud_id"]}, #{ext_deploy["cloud_type"]}", MU::ERR if found.nil?
921
- @deploy.notify(ext_deploy["cloud_type"], found.config["name"], found.deploydata, mu_name: found.mu_name, triggering_node: @mu_name)
922
- elsif ext_deploy["mu_name"] && ext_deploy["deploy_id"]
923
- MU.log "#{ext_deploy["mu_name"]} / #{ext_deploy["deploy_id"]}"
924
- found = MU::MommaCat.findStray(
925
- @config['cloud'],
926
- ext_deploy["cloud_type"],
927
- deploy_id: ext_deploy["deploy_id"],
928
- mu_name: ext_deploy["mu_name"],
929
- region: @config['region'],
930
- dummy_ok: false
931
- ).first
932
-
933
- MU.log "Couldn't find existing resource #{ext_deploy["mu_name"]}/#{ext_deploy["deploy_id"]}, #{ext_deploy["cloud_type"]}", MU::ERR if found.nil?
934
- @deploy.notify(ext_deploy["cloud_type"], found.config["name"], found.deploydata, mu_name: ext_deploy["mu_name"], triggering_node: @mu_name)
935
- else
936
- MU.log "Trying to find existing deploy, but either the cloud_id is not valid or no mu_name and deploy_id where provided", MU::ERR
937
- end
938
- }
939
- end
940
-
941
- # See if this node already exists in our config management. If it does,
942
- # we're done.
943
- if MU.inGem?
944
- MU.log "Deploying from a gem, not grooming"
945
- MU::MommaCat.unlock(instance.instance_id+"-orchestrate")
946
- MU::MommaCat.unlock(instance.instance_id+"-groom")
947
-
948
- return true
949
- elsif @groomer.haveBootstrapped?
950
- MU.log "Node #{node} has already been bootstrapped, skipping groomer setup.", MU::NOTICE
951
-
952
- if @config['groom'].nil? or @config['groom']
953
- @groomer.saveDeployData
954
- end
955
-
956
- MU::MommaCat.unlock(instance.instance_id+"-orchestrate")
957
- MU::MommaCat.unlock(instance.instance_id+"-groom")
958
- return true
959
- end
960
-
961
- begin
962
- @groomer.bootstrap if @config['groom'].nil? or @config['groom']
963
- rescue MU::Groomer::RunError
964
- MU::MommaCat.unlock(instance.instance_id+"-groom")
965
- MU::MommaCat.unlock(instance.instance_id+"-orchestrate")
966
- return false
967
- end
519
+ getIAMProfile
520
+ finish.call(false) if !bootstrapGroomer
968
521
 
969
522
  # Make sure we got our name written everywhere applicable
970
523
  if !@named
@@ -972,149 +525,83 @@ module MU
972
525
  @named = true
973
526
  end
974
527
 
975
- MU::MommaCat.unlock(instance.instance_id+"-groom")
976
- MU::MommaCat.unlock(instance.instance_id+"-orchestrate")
977
- return true
978
- end
979
-
980
- # postBoot
528
+ finish.call(true)
529
+ end #postboot
981
530
 
982
531
  # Locate an existing instance or instances and return an array containing matching AWS resource descriptors for those that match.
983
532
  # @return [Hash<String,OpenStruct>]: The cloud provider's complete descriptions of matching instances
984
533
  def self.find(**args)
985
534
  ip ||= args[:flags]['ip'] if args[:flags] and args[:flags]['ip']
986
535
 
987
- instance = nil
988
- if !args[:region].nil?
989
- regions = [args[:region]]
990
- else
991
- regions = MU::Cloud::AWS.listRegions
992
- end
536
+ regions = args[:region].nil? ? MU::Cloud::AWS.listRegions : [args[:region]]
993
537
 
994
538
  found = {}
995
539
  search_semaphore = Mutex.new
996
540
  search_threads = []
997
541
 
998
- if !ip and !args[:cloud_id] and !args[:tag_value]
999
- regions.each { |r|
1000
- search_threads << Thread.new {
1001
- MU::Cloud::AWS.ec2(region: r, credentials: args[:credentials]).describe_instances(
1002
- filters: [
1003
- {
1004
- name: "instance-state-name",
1005
- values: ["running", "pending", "stopped"]
1006
- }
1007
- ]
1008
- ).reservations.each { |resp|
1009
- if !resp.nil? and !resp.instances.nil?
1010
- resp.instances.each { |i|
1011
- search_semaphore.synchronize {
1012
- found[i.instance_id] = i
1013
- }
1014
- }
1015
- end
1016
- }
1017
- }
1018
- }
542
+ base_filter = { name: "instance-state-name", values: ["running", "pending", "stopped"] }
543
+ searches = []
1019
544
 
1020
- search_threads.each { |t|
1021
- t.join
545
+ if args[:cloud_id]
546
+ searches << {
547
+ :instance_ids => [args[:cloud_id]],
548
+ :filters => [base_filter]
1022
549
  }
1023
-
1024
- return found
1025
550
  end
1026
551
 
1027
- # If we got an instance id, go get it
1028
- if args[:cloud_id]
1029
- regions.each { |r|
1030
- search_threads << Thread.new {
1031
- MU.log "Hunting for instance with cloud id '#{args[:cloud_id]}' in #{r}", MU::DEBUG
1032
- retries = 0
1033
- begin
1034
- MU::Cloud::AWS.ec2(region: r, credentials: args[:credentials]).describe_instances(
1035
- instance_ids: [args[:cloud_id]],
1036
- filters: [
1037
- {
1038
- name: "instance-state-name",
1039
- values: ["running", "pending", "stopped"]
1040
- }
1041
- ]
1042
- ).reservations.each { |resp|
1043
- if !resp.nil? and !resp.instances.nil?
1044
- resp.instances.each { |i|
1045
- search_semaphore.synchronize {
1046
- found[i.instance_id] = i
1047
- }
1048
- }
1049
- end
1050
- }
1051
- rescue Aws::EC2::Errors::InvalidInstanceIDNotFound => e
1052
- if retries < 5
1053
- retries = retries + 1
1054
- sleep 5
1055
- else
1056
- raise MuError, "#{e.inspect} in region #{r}"
1057
- end
1058
- end
552
+ if ip
553
+ ["ip-address", "private-ip-address"].each { |ip_type|
554
+ searches << {
555
+ filters: [base_filter, {name: ip_type, values: [ip]} ],
1059
556
  }
1060
557
  }
1061
- done_threads = []
1062
- begin
1063
- search_threads.each { |t|
1064
- joined = t.join(2)
1065
- done_threads << joined if !joined.nil?
1066
- }
1067
- end while found.size < 1 and done_threads.size != search_threads.size
1068
558
  end
1069
559
 
1070
- return found if found.size > 0
1071
-
1072
- # Ok, well, let's try looking it up by IP then
1073
- if !ip.nil?
1074
- MU.log "Hunting for instance by IP '#{ip}'", MU::DEBUG
1075
- ["ip-address", "private-ip-address"].each { |filter|
1076
- regions.each { |r|
1077
- response = MU::Cloud::AWS.ec2(region: r, credentials: args[:credentials]).describe_instances(
1078
- filters: [
1079
- {name: filter, values: [ip]},
1080
- {name: "instance-state-name", values: ["running", "pending", "stopped"]}
1081
- ]
1082
- ).reservations.first
1083
- response.instances.each { |i|
1084
- found[i.instance_id] = i
1085
- }
1086
- }
560
+ if args[:tag_value] and args[:tag_key]
561
+ searches << {
562
+ filters: [
563
+ base_filter,
564
+ {name: ip_type, values: [ip]},
565
+ {name: "tag:#{args[:tag_key]}", values: [args[:tag_value]]},
566
+ ]
1087
567
  }
1088
568
  end
1089
569
 
1090
- return found if found.size > 0
570
+ if searches.empty?
571
+ searches << { filters: [base_filter] }
572
+ end
1091
573
 
1092
- # Fine, let's try it by tag.
1093
- if args[:tag_value]
1094
- MU.log "Searching for instance by tag '#{args[:tag_key]}=#{args[:tag_value]}'", MU::DEBUG
1095
- regions.each { |r|
1096
- MU::Cloud::AWS.ec2(region: r, credentials: args[:credentials]).describe_instances(
1097
- filters: [
1098
- {name: "tag:#{args[:tag_key]}", values: [args[:tag_value]]},
1099
- {name: "instance-state-name", values: ["running", "pending", "stopped"]}
1100
- ]
1101
- ).reservations.each { |resp|
1102
- if !resp.nil? and resp.instances.size > 0
1103
- resp.instances.each { |i|
1104
- found[i.instance_id] = i
574
+ regions.each { |r|
575
+ searches.each { |search|
576
+ search_threads << Thread.new(search) { |params|
577
+ MU.retrier([Aws::EC2::Errors::InvalidInstanceIDNotFound], wait: 5, max: 5, ignoreme: [Aws::EC2::Errors::InvalidInstanceIDNotFound]) {
578
+ MU::Cloud::AWS.ec2(region: r, credentials: args[:credentials]).describe_instances(params).reservations.each { |resp|
579
+ next if resp.nil? or resp.instances.nil?
580
+ resp.instances.each { |i|
581
+ search_semaphore.synchronize {
582
+ found[i.instance_id] = i
583
+ }
584
+ }
1105
585
  }
1106
- end
586
+ }
1107
587
  }
1108
588
  }
1109
- end
1110
-
1111
- return found
589
+ }
590
+ done_threads = []
591
+ begin
592
+ search_threads.each { |t|
593
+ joined = t.join(2)
594
+ done_threads << joined if !joined.nil?
595
+ }
596
+ end while found.size < 1 and done_threads.size != search_threads.size
597
+
598
+ return found
1112
599
  end
1113
600
 
1114
601
  # Reverse-map our cloud description into a runnable config hash.
1115
602
  # We assume that any values we have in +@config+ are placeholders, and
1116
603
  # calculate our own accordingly based on what's live in the cloud.
1117
- def toKitten(rootparent: nil, billing: nil, habitats: nil)
604
+ def toKitten(**_args)
1118
605
  bok = {
1119
606
  "cloud" => "AWS",
1120
607
  "credentials" => @config['credentials'],
@@ -1127,7 +614,7 @@ module MU
1127
614
  return nil
1128
615
  end
1129
616
 
1130
- asgs = MU::Cloud::AWS::ServerPool.find(
617
+ asgs = MU::Cloud.resourceClass("AWS", "ServerPool").find(
1131
618
  instance_id: @cloud_id,
1132
619
  region: @config['region'],
1133
620
  credentials: @credentials
@@ -1221,8 +708,8 @@ module MU
1221
708
 
1222
709
  int.private_ip_addresses.each { |priv_ip|
1223
710
  if !priv_ip.primary
1224
- bok['add_private_ips'] ||= []
1225
- bok['add_private_ips'] << priv_ip.private_ip_address
711
+ bok['add_private_ips'] ||= 0
712
+ bok['add_private_ips'] += 1
1226
713
  end
1227
714
  if priv_ip.association and priv_ip.association.public_ip
1228
715
  bok['associate_public_ip'] = true
@@ -1237,15 +724,15 @@ module MU
1237
724
 
1238
725
  if int.groups.size > 0
1239
726
 
1240
- require 'mu/clouds/aws/firewall_rule'
1241
- ifaces = MU::Cloud::AWS::FirewallRule.getAssociatedInterfaces(int.groups.map { |sg| sg.group_id }, credentials: @credentials, region: @config['region'])
727
+ require 'mu/providers/aws/firewall_rule'
728
+ ifaces = MU::Cloud.resourceClass("AWS", "FirewallRule").getAssociatedInterfaces(int.groups.map { |sg| sg.group_id }, credentials: @credentials, region: @config['region'])
1242
729
  done_local_rules = false
1243
730
  int.groups.each { |sg|
1244
731
  if !done_local_rules and ifaces[sg.group_id].size == 1
1245
- sg_desc = MU::Cloud::AWS::FirewallRule.find(cloud_id: sg.group_id, credentials: @credentials, region: @config['region']).values.first
732
+ sg_desc = MU::Cloud.resourceClass("AWS", "FirewallRule").find(cloud_id: sg.group_id, credentials: @credentials, region: @config['region']).values.first
1246
733
  if sg_desc
1247
- bok["ingress_rules"] = MU::Cloud::AWS::FirewallRule.rulesToBoK(sg_desc.ip_permissions)
1248
- bok["ingress_rules"].concat(MU::Cloud::AWS::FirewallRule.rulesToBoK(sg_desc.ip_permissions_egress, egress: true))
734
+ bok["ingress_rules"] = MU::Cloud.resourceClass("AWS", "FirewallRule").rulesToBoK(sg_desc.ip_permissions)
735
+ bok["ingress_rules"].concat(MU::Cloud.resourceClass("AWS", "FirewallRule").rulesToBoK(sg_desc.ip_permissions_egress, egress: true))
1249
736
  done_local_rules = true
1250
737
  next
1251
738
  end
@@ -1270,9 +757,6 @@ module MU
1270
757
  # Return a description of this resource appropriate for deployment
1271
758
  # metadata. Arguments reflect the return values of the MU::Cloud::[Resource].describe method
1272
759
  def notify
1273
- node, config, deploydata = describe(cloud_id: @cloud_id, update_cache: true)
1274
- deploydata = {} if deploydata.nil?
1275
-
1276
760
  if cloud_desc.nil?
1277
761
  raise MuError, "Failed to load instance metadata for #{@mu_name}/#{@cloud_id}"
1278
762
  end
@@ -1317,52 +801,16 @@ module MU
1317
801
  end
1318
802
  deploydata["region"] = @config['region'] if !@config['region'].nil?
1319
803
  if !@named
1320
- MU::MommaCat.nameKitten(self)
804
+ MU::MommaCat.nameKitten(self, no_dns: true)
1321
805
  @named = true
1322
806
  end
1323
807
 
1324
808
  return deploydata
1325
809
  end
1326
810
 
1327
- # If the specified server is in a VPC, and has a NAT, make sure we'll
1328
- # be letting ssh traffic in from said NAT.
1329
- def punchAdminNAT
1330
- if @config['vpc'].nil? or
1331
- (
1332
- !@config['vpc'].has_key?("nat_host_id") and
1333
- !@config['vpc'].has_key?("nat_host_tag") and
1334
- !@config['vpc'].has_key?("nat_host_ip") and
1335
- !@config['vpc'].has_key?("nat_host_name")
1336
- )
1337
- return nil
1338
- end
1339
-
1340
- return nil if @nat.is_a?(Struct) && @nat.nat_gateway_id && @nat.nat_gateway_id.start_with?("nat-")
1341
-
1342
- dependencies if @nat.nil?
1343
- if @nat.nil? or @nat.cloud_desc.nil?
1344
- raise MuError, "#{@mu_name} (#{MU.deploy_id}) is configured to use #{@config['vpc']} but I can't find the cloud descriptor for a matching NAT instance"
1345
- end
1346
- MU.log "Adding administrative holes for NAT host #{@nat.cloud_desc.private_ip_address} to #{@mu_name}"
1347
- if !@deploy.kittens['firewall_rules'].nil?
1348
- @deploy.kittens['firewall_rules'].each_pair { |name, acl|
1349
- if acl.config["admin"]
1350
- acl.addRule([@nat.cloud_desc.private_ip_address], proto: "tcp")
1351
- acl.addRule([@nat.cloud_desc.private_ip_address], proto: "udp")
1352
- acl.addRule([@nat.cloud_desc.private_ip_address], proto: "icmp")
1353
- end
1354
- }
1355
- end
1356
- end
1357
-
1358
811
  # Called automatically by {MU::Deploy#createResources}
1359
812
  def groom
1360
813
  MU::MommaCat.lock(@cloud_id+"-groom")
1361
- node, config, deploydata = describe(cloud_id: @cloud_id)
1362
-
1363
- if node.nil? or node.empty?
1364
- raise MuError, "MU::Cloud::AWS::Server.groom was called without a mu_name"
1365
- end
1366
814
 
1367
815
  # Make double sure we don't lose a cached mu_windows_name value.
1368
816
  if windows? or !@config['active_directory'].nil?
@@ -1371,9 +819,9 @@ module MU
1371
819
  end
1372
820
  end
1373
821
 
1374
- punchAdminNAT
822
+ allowBastionAccess
1375
823
 
1376
- MU::Cloud::AWS::Server.tagVolumes(@cloud_id, credentials: @config['credentials'])
824
+ tagVolumes
1377
825
 
1378
826
  # If we have a loadbalancer configured, attach us to it
1379
827
  if !@config['loadbalancers'].nil?
@@ -1402,55 +850,31 @@ module MU
1402
850
  end
1403
851
 
1404
852
  begin
853
+ getIAMProfile
854
+
855
+ dbs = @deploy.findLitterMate(type: "database", return_all: true)
856
+ if dbs
857
+ dbs.each_pair { |sib_name, sib|
858
+ @groomer.groomer_class.grantSecretAccess(@mu_name, sib_name, "database_credentials")
859
+ if sib.config and sib.config['auth_vault']
860
+ @groomer.groomer_class.grantSecretAccess(@mu_name, sib.config['auth_vault']['vault'], sib.config['auth_vault']['item'])
861
+ end
862
+ }
863
+ end
864
+
1405
865
  if @config['groom'].nil? or @config['groom']
1406
- @groomer.run(purpose: "Full Initial Run", max_retries: 15, reboot_first_fail: windows?, timeout: @config['groomer_timeout'])
866
+ @groomer.run(purpose: "Full Initial Run", max_retries: 15, reboot_first_fail: (windows? and @config['groomer'] != "Ansible"), timeout: @config['groomer_timeout'])
1407
867
  end
1408
868
  rescue MU::Groomer::RunError => e
1409
- MU.log "Proceeding after failed initial Groomer run, but #{node} may not behave as expected!", MU::WARN, details: e.message
1410
- rescue Exception => e
1411
- MU.log "Caught #{e.inspect} on #{node} in an unexpected place (after @groomer.run on Full Initial Run)", MU::ERR
869
+ raise e if !@config['create_image'].nil? and !@config['image_created']
870
+ MU.log "Proceeding after failed initial Groomer run, but #{@mu_name} may not behave as expected!", MU::WARN, details: e.message
871
+ rescue StandardError => e
872
+ raise e if !@config['create_image'].nil? and !@config['image_created']
873
+ MU.log "Caught #{e.inspect} on #{@mu_name} in an unexpected place (after @groomer.run on Full Initial Run)", MU::ERR
1412
874
  end
1413
875
 
1414
876
  if !@config['create_image'].nil? and !@config['image_created']
1415
- img_cfg = @config['create_image']
1416
- # Scrub things that don't belong on an AMI
1417
- session = getSSHSession
1418
- sudo = purgecmd = ""
1419
- sudo = "sudo" if @config['ssh_user'] != "root"
1420
- if windows?
1421
- purgecmd = "rm -rf /cygdrive/c/mu_installed_chef"
1422
- else
1423
- purgecmd = "rm -rf /opt/mu_installed_chef"
1424
- end
1425
- if img_cfg['image_then_destroy']
1426
- if windows?
1427
- purgecmd = "rm -rf /cygdrive/c/chef/ /home/#{@config['windows_admin_username']}/.ssh/authorized_keys /home/Administrator/.ssh/authorized_keys /cygdrive/c/mu-installer-ran-updates /cygdrive/c/mu_installed_chef"
1428
- # session.exec!("powershell -Command \"& {(Get-WmiObject -Class Win32_Product -Filter \"Name='UniversalForwarder'\").Uninstall()}\"")
1429
- else
1430
- purgecmd = "#{sudo} rm -rf /var/lib/cloud/instances/i-* /root/.ssh/authorized_keys /etc/ssh/ssh_host_*key* /etc/chef /etc/opscode/* /.mu-installer-ran-updates /var/chef /opt/mu_installed_chef /opt/chef ; #{sudo} sed -i 's/^HOSTNAME=.*//' /etc/sysconfig/network"
1431
- end
1432
- end
1433
- session.exec!(purgecmd)
1434
- session.close
1435
- ami_ids = MU::Cloud::AWS::Server.createImage(
1436
- name: @mu_name,
1437
- instance_id: @cloud_id,
1438
- storage: @config['storage'],
1439
- exclude_storage: img_cfg['image_exclude_storage'],
1440
- copy_to_regions: img_cfg['copy_to_regions'],
1441
- make_public: img_cfg['public'],
1442
- region: @config['region'],
1443
- tags: @config['tags'],
1444
- credentials: @config['credentials']
1445
- )
1446
- @deploy.notify("images", @config['name'], ami_ids)
1447
- @config['image_created'] = true
1448
- if img_cfg['image_then_destroy']
1449
- MU::Cloud::AWS::Server.waitForAMI(ami_ids[@config['region']], region: @config['region'], credentials: @config['credentials'])
1450
- MU.log "AMI #{ami_ids[@config['region']]} ready, removing source node #{node}"
1451
- MU::Cloud::AWS::Server.terminateInstance(id: @cloud_id, region: @config['region'], deploy_id: @deploy.deploy_id, mu_name: @mu_name, credentials: @config['credentials'])
1452
- destroy
1453
- end
877
+ createImage
1454
878
  end
1455
879
 
1456
880
  MU::MommaCat.unlock(@cloud_id+"-groom")
@@ -1462,9 +886,12 @@ module MU
1462
886
  "arn:"+(MU::Cloud::AWS.isGovCloud?(@config["region"]) ? "aws-us-gov" : "aws")+":ec2:"+@config['region']+":"+MU::Cloud::AWS.credToAcct(@config['credentials'])+":instance/"+@cloud_id
1463
887
  end
1464
888
 
889
+ @cloud_desc_cache = nil
1465
890
  # Return the cloud provider's description for this instance
1466
891
  # @return [Openstruct]
1467
- def cloud_desc
892
+ def cloud_desc(use_cache: true)
893
+ return @cloud_desc_cache if @cloud_desc_cache and use_cache
894
+ return nil if !@cloud_id
1468
895
  max_retries = 5
1469
896
  retries = 0
1470
897
  if !@cloud_id.nil?
@@ -1473,11 +900,12 @@ module MU
1473
900
  if resp and resp.reservations and resp.reservations.first and
1474
901
  resp.reservations.first.instances and
1475
902
  resp.reservations.first.instances.first
1476
- return resp.reservations.first.instances.first
903
+ @cloud_desc_cache = resp.reservations.first.instances.first
904
+ return @cloud_desc_cache
1477
905
  end
1478
906
  rescue Aws::EC2::Errors::InvalidInstanceIDNotFound
1479
907
  return nil
1480
- rescue NoMethodError => e
908
+ rescue NoMethodError
1481
909
  if retries >= max_retries
1482
910
  raise MuError, "Couldn't get a cloud descriptor for #{@mu_name} (#{@cloud_id})"
1483
911
  else
@@ -1495,23 +923,19 @@ module MU
1495
923
  # bastion hosts that may be in the path, see getSSHConfig if that's what
1496
924
  # you need.
1497
925
  def canonicalIP
1498
- mu_name, config, deploydata = describe(cloud_id: @cloud_id)
1499
-
1500
- instance = cloud_desc
1501
-
1502
- if !instance
926
+ if !cloud_desc
1503
927
  raise MuError, "Couldn't retrieve cloud descriptor for server #{self}"
1504
928
  end
1505
929
 
1506
930
  if deploydata.nil? or
1507
931
  (!deploydata.has_key?("private_ip_address") and
1508
932
  !deploydata.has_key?("public_ip_address"))
1509
- return nil if instance.nil?
933
+ return nil if cloud_desc.nil?
1510
934
  @deploydata = {} if @deploydata.nil?
1511
- @deploydata["public_ip_address"] = instance.public_ip_address
1512
- @deploydata["public_dns_name"] = instance.public_dns_name
1513
- @deploydata["private_ip_address"] = instance.private_ip_address
1514
- @deploydata["private_dns_name"] = instance.private_dns_name
935
+ @deploydata["public_ip_address"] = cloud_desc.public_ip_address
936
+ @deploydata["public_dns_name"] = cloud_desc.public_dns_name
937
+ @deploydata["private_ip_address"] = cloud_desc.private_ip_address
938
+ @deploydata["private_dns_name"] = cloud_desc.private_dns_name
1515
939
 
1516
940
  notify
1517
941
  end
@@ -1519,14 +943,14 @@ module MU
1519
943
  # Our deploydata gets corrupted often with server pools, this will cause us to use the wrong IP to identify a node
1520
944
  # which will cause us to create certificates, DNS records and other artifacts with incorrect information which will cause our deploy to fail.
1521
945
  # The cloud_id is always correct so lets use 'cloud_desc' to get the correct IPs
1522
- if MU::Cloud::AWS::VPC.haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials']) or @deploydata["public_ip_address"].nil?
1523
- @config['canonical_ip'] = instance.private_ip_address
1524
- @deploydata["private_ip_address"] = instance.private_ip_address
1525
- return instance.private_ip_address
946
+ if MU::Cloud.resourceClass("AWS", "VPC").haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials']) or @deploydata["public_ip_address"].nil?
947
+ @config['canonical_ip'] = cloud_desc.private_ip_address
948
+ @deploydata["private_ip_address"] = cloud_desc.private_ip_address
949
+ return cloud_desc.private_ip_address
1526
950
  else
1527
- @config['canonical_ip'] = instance.public_ip_address
1528
- @deploydata["public_ip_address"] = instance.public_ip_address
1529
- return instance.public_ip_address
951
+ @config['canonical_ip'] = cloud_desc.public_ip_address
952
+ @deploydata["public_ip_address"] = cloud_desc.public_ip_address
953
+ return cloud_desc.public_ip_address
1530
954
  end
1531
955
  end
1532
956
 
@@ -1574,7 +998,7 @@ module MU
1574
998
  resp = nil
1575
999
  begin
1576
1000
  resp = MU::Cloud::AWS.ec2(region: region, credentials: credentials).create_image(ami_descriptor)
1577
- rescue Aws::EC2::Errors::InvalidAMINameDuplicate => e
1001
+ rescue Aws::EC2::Errors::InvalidAMINameDuplicate
1578
1002
  MU.log "AMI #{name} already exists, skipping", MU::WARN
1579
1003
  return nil
1580
1004
  end
@@ -1583,7 +1007,7 @@ module MU
1583
1007
 
1584
1008
  ami_ids[region] = ami
1585
1009
  MU::Cloud::AWS.createStandardTags(ami, region: region, credentials: credentials)
1586
- MU::MommaCat.createTag(ami, "Name", name, region: region, credentials: credentials)
1010
+ MU::Cloud::AWS.createTag(ami, "Name", name, region: region, credentials: credentials)
1587
1011
  MU.log "AMI of #{name} in region #{region}: #{ami}"
1588
1012
  if make_public
1589
1013
  MU::Cloud::AWS::Server.waitForAMI(ami, region: region, credentials: credentials)
@@ -1611,10 +1035,10 @@ module MU
1611
1035
  ami_ids[r] = copy.image_id
1612
1036
 
1613
1037
  MU::Cloud::AWS.createStandardTags(copy.image_id, region: r, credentials: credentials)
1614
- MU::MommaCat.createTag(copy.image_id, "Name", name, region: r, credentials: credentials)
1038
+ MU::Cloud::AWS.createTag(copy.image_id, "Name", name, region: r, credentials: credentials)
1615
1039
  if !tags.nil?
1616
1040
  tags.each { |tag|
1617
- MU::MommaCat.createTag(instance.instance_id, tag['key'], tag['value'], region: r, credentials: credentials)
1041
+ MU::Cloud::AWS.createTag(instance.instance_id, tag['key'], tag['value'], region: r, credentials: credentials)
1618
1042
  }
1619
1043
  end
1620
1044
  MU::Cloud::AWS::Server.waitForAMI(copy.image_id, region: r, credentials: credentials)
@@ -1719,11 +1143,27 @@ module MU
1719
1143
  # Retrieves the Cloud provider's randomly generated Windows password
1720
1144
  # Will only work on stock Amazon Windows AMIs or custom AMIs that where created with Administrator Password set to random in EC2Config
1721
1145
  # return [String]: A password string.
1722
- def getWindowsAdminPassword
1723
- if @cloud_id.nil?
1724
- node, config, deploydata = describe
1725
- @cloud_id = cloud_desc.instance_id
1146
+ def getWindowsAdminPassword(use_cache: true)
1147
+ @config['windows_auth_vault'] ||= {
1148
+ "vault" => @mu_name,
1149
+ "item" => "windows_credentials",
1150
+ "password_field" => "password"
1151
+ }
1152
+
1153
+ if use_cache
1154
+ begin
1155
+ win_admin_password = @groomer.getSecret(
1156
+ vault: @config['windows_auth_vault']['vault'],
1157
+ item: @config['windows_auth_vault']['item'],
1158
+ field: @config["windows_auth_vault"]["password_field"]
1159
+ )
1160
+
1161
+ return win_admin_password if win_admin_password
1162
+ rescue MU::Groomer::MuNoSuchSecret, MU::Groomer::RunError
1163
+ end
1726
1164
  end
1165
+
1166
+ @cloud_id ||= cloud_desc(use_cache: false).instance_id
1727
1167
  ssh_keydir = "#{Etc.getpwuid(Process.uid).dir}/.ssh"
1728
1168
  ssh_key_name = @deploy.ssh_key_name
1729
1169
 
@@ -1758,6 +1198,8 @@ module MU
1758
1198
  pem_bytes = File.open("#{ssh_keydir}/#{ssh_key_name}", 'rb') { |f| f.read }
1759
1199
  private_key = OpenSSL::PKey::RSA.new(pem_bytes)
1760
1200
  decrypted_password = private_key.private_decrypt(decoded)
1201
+ saveCredentials(decrypted_password)
1202
+
1761
1203
  return decrypted_password
1762
1204
  end
1763
1205
 
@@ -1831,61 +1273,37 @@ module MU
1831
1273
  # @param type [String]: Cloud storage type of the volume, if applicable
1832
1274
  # @param delete_on_termination [Boolean]: Value of delete_on_termination flag to set
1833
1275
  def addVolume(dev, size, type: "gp2", delete_on_termination: false)
1834
- if @cloud_id.nil? or @cloud_id.empty?
1835
- MU.log "#{self} didn't have a cloud id, couldn't determine 'active?' status", MU::ERR
1836
- return true
1276
+
1277
+ if setDeleteOntermination(dev, delete_on_termination)
1278
+ MU.log "A volume #{device} already attached to #{self}, skipping", MU::NOTICE
1279
+ return
1837
1280
  end
1838
- az = nil
1839
- MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_instances(
1840
- instance_ids: [@cloud_id]
1841
- ).reservations.each { |resp|
1842
- if !resp.nil? and !resp.instances.nil?
1843
- resp.instances.each { |instance|
1844
- az = instance.placement.availability_zone
1845
- d_o_t_changed = true
1846
- mappings = MU.structToHash(instance.block_device_mappings)
1847
- mappings.each { |vol|
1848
- if vol[:ebs]
1849
- vol[:ebs].delete(:attach_time)
1850
- vol[:ebs].delete(:status)
1851
- end
1852
- }
1853
- mappings.each { |vol|
1854
- if vol[:device_name] == dev
1855
- MU.log "A volume #{dev} already attached to #{self}, skipping", MU::NOTICE
1856
- if vol[:ebs][:delete_on_termination] != delete_on_termination
1857
- vol[:ebs][:delete_on_termination] = delete_on_termination
1858
- MU.log "Setting delete_on_termination flag to #{delete_on_termination.to_s} on #{@mu_name}'s #{dev}"
1859
- MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).modify_instance_attribute(
1860
- instance_id: @cloud_id,
1861
- block_device_mappings: mappings
1862
- )
1863
- end
1864
- return
1865
- end
1866
- }
1867
- }
1868
- end
1869
- }
1281
+
1870
1282
  MU.log "Creating #{size}GB #{type} volume on #{dev} for #{@cloud_id}"
1871
1283
  creation = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).create_volume(
1872
- availability_zone: az,
1284
+ availability_zone: cloud_desc.placement.availability_zone,
1873
1285
  size: size,
1874
1286
  volume_type: type
1875
1287
  )
1876
- begin
1877
- sleep 3
1288
+
1289
+ MU.retrier(wait: 3, loop_if: Proc.new {
1878
1290
  creation = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_volumes(volume_ids: [creation.volume_id]).volumes.first
1879
1291
  if !["creating", "available"].include?(creation.state)
1880
1292
  raise MuError, "Saw state '#{creation.state}' while creating #{size}GB #{type} volume on #{dev} for #{@cloud_id}"
1881
1293
  end
1882
- end while creation.state != "available"
1294
+ creation.state != "available"
1295
+ })
1296
+
1883
1297
 
1884
1298
  if @deploy
1885
- MU::MommaCat.listStandardTags.each_pair { |key, value|
1886
- MU::MommaCat.createTag(creation.volume_id, key, value, region: @config['region'], credentials: @config['credentials'])
1887
- }
1888
- MU::MommaCat.createTag(creation.volume_id, "Name", "#{MU.deploy_id}-#{@config["name"].upcase}-#{dev.upcase}", region: @config['region'], credentials: @config['credentials'])
1299
+ MU::Cloud::AWS.createStandardTags(
1300
+ creation.volume_id,
1301
+ region: @config['region'],
1302
+ credentials: @config['credentials'],
1303
+ optional: @config['optional_tags'],
1304
+ nametag: @mu_name+"-"+dev.upcase,
1305
+ othertags: @config['tags']
1306
+ )
1889
1307
  end
1890
1308
 
1891
1309
  attachment = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).attach_volume(
@@ -1904,29 +1322,7 @@ module MU
1904
1322
 
1905
1323
  # Set delete_on_termination, which for some reason is an instance
1906
1324
  # attribute and not on the attachment
1907
- mappings = MU.structToHash(cloud_desc.block_device_mappings)
1908
- changed = false
1909
-
1910
- mappings.each { |mapping|
1911
- if mapping[:ebs]
1912
- mapping[:ebs].delete(:attach_time)
1913
- mapping[:ebs].delete(:status)
1914
- end
1915
- if mapping[:device_name] == dev and
1916
- mapping[:ebs][:delete_on_termination] != delete_on_termination
1917
- changed = true
1918
- mapping[:ebs][:delete_on_termination] = delete_on_termination
1919
- end
1920
- }
1921
-
1922
- if changed
1923
- MU.log "Setting delete_on_termination flag to #{delete_on_termination.to_s} on #{@mu_name}'s #{dev}"
1924
- MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).modify_instance_attribute(
1925
- instance_id: @cloud_id,
1926
- block_device_mappings: mappings
1927
- )
1928
- end
1929
-
1325
+ setDeleteOntermination(dev, delete_on_termination)
1930
1326
  end
1931
1327
 
1932
1328
  # Determine whether the node in question exists at the Cloud provider
@@ -1964,13 +1360,13 @@ module MU
1964
1360
  # @param ip [String]: Request a specific IP address.
1965
1361
  # @param region [String]: The cloud provider region
1966
1362
  # @return [void]
1967
- def self.associateElasticIp(instance_id, classic: false, ip: nil, region: MU.curRegion)
1363
+ def self.associateElasticIp(instance_id, classic: false, ip: nil, region: MU.curRegion, credentials: nil)
1968
1364
  MU.log "associateElasticIp called: #{instance_id}, classic: #{classic}, ip: #{ip}, region: #{region}", MU::DEBUG
1969
1365
  elastic_ip = nil
1970
1366
  @eip_semaphore.synchronize {
1971
1367
  if !ip.nil?
1972
1368
  filters = [{name: "public-ip", values: [ip]}]
1973
- resp = MU::Cloud::AWS.ec2(region: region).describe_addresses(filters: filters)
1369
+ resp = MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_addresses(filters: filters)
1974
1370
  if @eips_used.include?(ip)
1975
1371
  is_free = false
1976
1372
  resp.addresses.each { |address|
@@ -1999,54 +1395,44 @@ module MU
1999
1395
  @eips_used << elastic_ip.public_ip
2000
1396
  MU.log "Associating Elastic IP #{elastic_ip.public_ip} with #{instance_id}", details: elastic_ip
2001
1397
  }
2002
- attempts = 0
2003
- begin
1398
+
1399
+ on_retry = Proc.new { |e|
1400
+ if e.class == Aws::EC2::Errors::ResourceAlreadyAssociated
1401
+ # A previous association attempt may have succeeded, albeit slowly.
1402
+ resp = MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_addresses(
1403
+ allocation_ids: [elastic_ip.allocation_id]
1404
+ )
1405
+ first_addr = resp.addresses.first
1406
+ if first_addr and first_addr.instance_id != instance_id
1407
+ raise MuError, "Tried to associate #{elastic_ip.public_ip} with #{instance_id}, but it's already associated with #{first_addr.instance_id}!"
1408
+ end
1409
+ end
1410
+ }
1411
+
1412
+ MU.retrier([Aws::EC2::Errors::IncorrectInstanceState, Aws::EC2::Errors::ResourceAlreadyAssociated], wait: 5, max: 6, on_retry: on_retry) {
2004
1413
  if classic
2005
- resp = MU::Cloud::AWS.ec2(region: region).associate_address(
2006
- instance_id: instance_id,
2007
- public_ip: elastic_ip.public_ip
1414
+ MU::Cloud::AWS.ec2(region: region, credentials: credentials).associate_address(
1415
+ instance_id: instance_id,
1416
+ public_ip: elastic_ip.public_ip
2008
1417
  )
2009
1418
  else
2010
- resp = MU::Cloud::AWS.ec2(region: region).associate_address(
2011
- instance_id: instance_id,
2012
- allocation_id: elastic_ip.allocation_id,
2013
- allow_reassociation: false
1419
+ MU::Cloud::AWS.ec2(region: region, credentials: credentials).associate_address(
1420
+ instance_id: instance_id,
1421
+ allocation_id: elastic_ip.allocation_id,
1422
+ allow_reassociation: false
2014
1423
  )
2015
1424
  end
2016
- rescue Aws::EC2::Errors::IncorrectInstanceState => e
2017
- attempts = attempts + 1
2018
- if attempts < 6
2019
- MU.log "Got #{e.message} associating #{elastic_ip.allocation_id} with #{instance_id}, retrying", MU::WARN
2020
- sleep 5
2021
- retry
2022
- end
2023
- raise MuError "#{e.message} associating #{elastic_ip.allocation_id} with #{instance_id}"
2024
- rescue Aws::EC2::Errors::ResourceAlreadyAssociated => e
2025
- # A previous association attempt may have succeeded, albeit slowly.
2026
- resp = MU::Cloud::AWS.ec2(region: region).describe_addresses(
2027
- allocation_ids: [elastic_ip.allocation_id]
2028
- )
2029
- first_addr = resp.addresses.first
2030
- if !first_addr.nil? and first_addr.instance_id == instance_id
2031
- MU.log "#{elastic_ip.public_ip} already associated with #{instance_id}", MU::WARN
2032
- else
2033
- MU.log "#{elastic_ip.public_ip} shows as already associated!", MU::ERR, details: resp
2034
- raise MuError, "#{elastic_ip.public_ip} shows as already associated with #{first_addr.instance_id}!"
2035
- end
2036
- end
1425
+ }
2037
1426
 
2038
- instance = MU::Cloud::AWS.ec2(region: region).describe_instances(instance_ids: [instance_id]).reservations.first.instances.first
2039
- waited = false
2040
- if instance.public_ip_address != elastic_ip.public_ip
2041
- waited = true
2042
- begin
2043
- sleep 10
2044
- MU.log "Waiting for Elastic IP association of #{elastic_ip.public_ip} to #{instance_id} to take effect", MU::NOTICE
2045
- instance = MU::Cloud::AWS.ec2(region: region).describe_instances(instance_ids: [instance_id]).reservations.first.instances.first
2046
- end while instance.public_ip_address != elastic_ip.public_ip
2047
- end
1427
+ loop_if = Proc.new {
1428
+ instance = find(cloud_id: instance_id, region: region, credentials: credentials).values.first
1429
+ instance.public_ip_address != elastic_ip.public_ip
1430
+ }
1431
+ MU.retrier(loop_if: loop_if, wait: 10, max: 3) {
1432
+ MU.log "Waiting for Elastic IP association of #{elastic_ip.public_ip} to #{instance_id} to take effect", MU::NOTICE
1433
+ }
2048
1434
 
2049
- MU.log "Elastic IP #{elastic_ip.public_ip} now associated with #{instance_id}" if waited
1435
+ MU.log "Elastic IP #{elastic_ip.public_ip} now associated with #{instance_id}"
2050
1436
 
2051
1437
  return elastic_ip.public_ip
2052
1438
  end
@@ -2069,16 +1455,15 @@ module MU
2069
1455
  # @param ignoremaster [Boolean]: If true, will remove resources not flagged as originating from this Mu server
2070
1456
  # @param region [String]: The cloud provider region
2071
1457
  # @return [void]
2072
- def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {})
1458
+ def self.cleanup(noop: false, deploy_id: MU.deploy_id, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {})
2073
1459
  onlycloud = flags["onlycloud"]
2074
1460
  skipsnapshots = flags["skipsnapshots"]
2075
1461
  tagfilters = [
2076
- {name: "tag:MU-ID", values: [MU.deploy_id]}
1462
+ {name: "tag:MU-ID", values: [deploy_id]}
2077
1463
  ]
2078
1464
  if !ignoremaster
2079
1465
  tagfilters << {name: "tag:MU-MASTER-IP", values: [MU.mu_public_ip]}
2080
1466
  end
2081
- instances = Array.new
2082
1467
  unterminated = Array.new
2083
1468
  name_tags = Array.new
2084
1469
 
@@ -2108,7 +1493,7 @@ module MU
2108
1493
  threads << Thread.new(instance) { |myinstance|
2109
1494
  MU.dupGlobals(parent_thread_id)
2110
1495
  Thread.abort_on_exception = true
2111
- MU::Cloud::AWS::Server.terminateInstance(id: myinstance.instance_id, noop: noop, onlycloud: onlycloud, region: region, deploy_id: MU.deploy_id, credentials: credentials)
1496
+ MU::Cloud::AWS::Server.terminateInstance(id: myinstance.instance_id, noop: noop, onlycloud: onlycloud, region: region, deploy_id: deploy_id, credentials: credentials)
2112
1497
  }
2113
1498
  }
2114
1499
 
@@ -2119,7 +1504,7 @@ module MU
2119
1504
  threads << Thread.new(volume) { |myvolume|
2120
1505
  MU.dupGlobals(parent_thread_id)
2121
1506
  Thread.abort_on_exception = true
2122
- MU::Cloud::AWS::Server.delete_volume(myvolume, noop, skipsnapshots, credentials: credentials)
1507
+ delete_volume(myvolume, noop, skipsnapshots, credentials: credentials, deploy_id: deploy_id)
2123
1508
  }
2124
1509
  }
2125
1510
 
@@ -2129,193 +1514,113 @@ module MU
2129
1514
  }
2130
1515
  end
2131
1516
 
1517
+ # Return an instance's AWS-assigned IP addresses and hostnames.
1518
+ # @param instance [OpenStruct]
1519
+ # @param id [String]
1520
+ # @param region [String]
1521
+ # @param credentials [@String]
1522
+ # @return [Array<Array>]
1523
+ def self.getAddresses(instance = nil, id: nil, region: MU.curRegion, credentials: nil)
1524
+ return nil if !instance and !id
1525
+
1526
+ instance ||= find(cloud_id: id, region: region, credentials: credentials).values.first
1527
+ return if !instance
1528
+
1529
+ ips = []
1530
+ names = []
1531
+ instance.network_interfaces.each { |iface|
1532
+ iface.private_ip_addresses.each { |ip|
1533
+ ips << ip.private_ip_address
1534
+ names << ip.private_dns_name
1535
+ if ip.association
1536
+ ips << ip.association.public_ip
1537
+ names << ip.association.public_dns_name
1538
+ end
1539
+ }
1540
+ }
1541
+
1542
+ [ips, names]
1543
+ end
1544
+
2132
1545
  # Terminate an instance.
2133
1546
  # @param instance [OpenStruct]: The cloud provider's description of the instance.
2134
1547
  # @param id [String]: The cloud provider's identifier for the instance, to use if the full description is not available.
2135
1548
  # @param region [String]: The cloud provider region
2136
1549
  # @return [void]
2137
1550
  def self.terminateInstance(instance: nil, noop: false, id: nil, onlycloud: false, region: MU.curRegion, deploy_id: MU.deploy_id, mu_name: nil, credentials: nil)
2138
- ips = Array.new
2139
- if !instance
2140
- if id
2141
- begin
2142
- resp = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(instance_ids: [id])
2143
- rescue Aws::EC2::Errors::InvalidInstanceIDNotFound => e
2144
- MU.log "Instance #{id} no longer exists", MU::WARN
2145
- end
2146
- if !resp.nil? and !resp.reservations.nil? and !resp.reservations.first.nil?
2147
- instance = resp.reservations.first.instances.first
2148
- ips << instance.public_ip_address if !instance.public_ip_address.nil?
2149
- ips << instance.private_ip_address if !instance.private_ip_address.nil?
2150
- end
2151
- else
2152
- MU.log "You must supply an instance handle or id to terminateInstance", MU::ERR
2153
- end
2154
- else
2155
- id = instance.instance_id
2156
- end
2157
- if !MU.deploy_id.empty?
2158
- deploy_dir = File.expand_path("#{MU.dataDir}/deployments/"+MU.deploy_id)
2159
- if Dir.exist?(deploy_dir) and !noop
2160
- FileUtils.touch("#{deploy_dir}/.cleanup-"+id)
2161
- end
1551
+ if !id and !instance
1552
+ MU.log "You must supply an instance handle or id to terminateInstance", MU::ERR
1553
+ return
2162
1554
  end
1555
+ instance ||= find(cloud_id: id, region: region, credentials: credentials).values.first
1556
+ return if !instance
2163
1557
 
2164
- server_obj = MU::MommaCat.findStray(
2165
- "AWS",
2166
- "servers",
2167
- region: region,
2168
- deploy_id: deploy_id,
2169
- cloud_id: id,
2170
- mu_name: mu_name
2171
- ).first
2172
-
1558
+ id ||= instance.instance_id
2173
1559
  begin
2174
- MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(instance_ids: [id])
2175
- rescue Aws::EC2::Errors::InvalidInstanceIDNotFound => e
2176
- MU.log "Instance #{id} no longer exists", MU::DEBUG
2177
- end
2178
-
2179
- if !server_obj.nil? and MU::Cloud::AWS.hosted? and !MU::Cloud::AWS.isGovCloud?
2180
- # DNS cleanup is now done in MU::Cloud::DNSZone. Keeping this for now
2181
- cleaned_dns = false
2182
- mu_name = server_obj.mu_name
2183
- mu_zone = MU::Cloud::DNSZone.find(cloud_id: "platform-mu", credentials: credentials).values.first
2184
- if !mu_zone.nil?
2185
- zone_rrsets = []
2186
- rrsets = MU::Cloud::AWS.route53(credentials: credentials).list_resource_record_sets(hosted_zone_id: mu_zone.id)
2187
- rrsets.resource_record_sets.each{ |record|
2188
- zone_rrsets << record
2189
- }
1560
+ MU::MommaCat.lock(".cleanup-"+id)
1561
+ rescue Errno::ENOENT => e
1562
+ MU.log "No lock for terminating instance #{id} due to missing metadata", MU::DEBUG
1563
+ end
2190
1564
 
2191
- # AWS API returns a maximum of 100 results. DNS zones are likely to have more than 100 records, lets page and make sure we grab all records in a given zone
2192
- while rrsets.next_record_name && rrsets.next_record_type
2193
- rrsets = MU::Cloud::AWS.route53(credentials: credentials).list_resource_record_sets(hosted_zone_id: mu_zone.id, start_record_name: rrsets.next_record_name, start_record_type: rrsets.next_record_type)
2194
- rrsets.resource_record_sets.each{ |record|
2195
- zone_rrsets << record
2196
- }
2197
- end
2198
- end
2199
- if !onlycloud and !mu_name.nil?
2200
- # DNS cleanup is now done in MU::Cloud::DNSZone. Keeping this for now
2201
- if !zone_rrsets.nil? and !zone_rrsets.empty?
2202
- zone_rrsets.each { |rrset|
2203
- if rrset.name.match(/^#{mu_name.downcase}\.server\.#{MU.myInstanceId}\.platform-mu/i)
2204
- rrset.resource_records.each { |record|
2205
- MU::Cloud::DNSZone.genericMuDNSEntry(name: mu_name, target: record.value, cloudclass: MU::Cloud::Server, delete: true)
2206
- cleaned_dns = true
2207
- }
2208
- end
2209
- }
2210
- end
1565
+ ips, names = getAddresses(instance, region: region, credentials: credentials)
1566
+ targets = ips +names
2211
1567
 
2212
- if !noop
2213
- if !server_obj.nil? and !server_obj.config.nil?
2214
- MU.mommacat.notify(MU::Cloud::Server.cfg_plural, server_obj.config['name'], {}, mu_name: server_obj.mu_name, remove: true) if MU.mommacat
2215
- end
2216
- end
1568
+ server_obj = MU::MommaCat.findStray(
1569
+ "AWS",
1570
+ "servers",
1571
+ region: region,
1572
+ deploy_id: deploy_id,
1573
+ cloud_id: id,
1574
+ mu_name: mu_name,
1575
+ dummy_ok: true
1576
+ ).first
2217
1577
 
2218
- # If we didn't manage to find this instance's Route53 entry by sifting
2219
- # deployment metadata, see if we can get it with the Name tag.
2220
- if !mu_zone.nil? and !cleaned_dns and !instance.nil?
2221
- instance.tags.each { |tag|
2222
- if tag.key == "Name"
2223
- zone_rrsets.each { |rrset|
2224
- if rrset.name.match(/^#{tag.value.downcase}\.server\.#{MU.myInstanceId}\.platform-mu/i)
2225
- rrset.resource_records.each { |record|
2226
- MU::Cloud::DNSZone.genericMuDNSEntry(name: tag.value, target: record.value, cloudclass: MU::Cloud::Server, delete: true) if !noop
2227
- }
2228
- end
2229
- }
2230
- end
2231
- }
2232
- end
2233
- end
1578
+ if MU::Cloud::AWS.hosted? and !MU::Cloud::AWS.isGovCloud? and server_obj
1579
+ targets.each { |target|
1580
+ MU::Cloud::DNSZone.genericMuDNSEntry(name: server_obj.mu_name, target: target, cloudclass: MU::Cloud::Server, delete: true, noop: noop)
1581
+ }
2234
1582
  end
2235
1583
 
2236
- if ips.size > 0 and !onlycloud
2237
- known_hosts_files = [Etc.getpwuid(Process.uid).dir+"/.ssh/known_hosts"]
2238
- if Etc.getpwuid(Process.uid).name == "root" and !MU.inGem?
2239
- begin
2240
- known_hosts_files << Etc.getpwnam("nagios").dir+"/.ssh/known_hosts"
2241
- rescue ArgumentError
2242
- # we're in a non-nagios environment and that's ok
2243
- end
2244
- end
2245
- known_hosts_files.each { |known_hosts|
2246
- next if !File.exist?(known_hosts)
2247
- MU.log "Cleaning up #{ips} from #{known_hosts}"
2248
- if !noop
2249
- File.open(known_hosts, File::CREAT|File::RDWR, 0644) { |f|
2250
- f.flock(File::LOCK_EX)
2251
- newlines = Array.new
2252
- f.readlines.each { |line|
2253
- ip_match = false
2254
- ips.each { |ip|
2255
- if line.match(/(^|,| )#{ip}( |,)/)
2256
- MU.log "Expunging #{ip} from #{known_hosts}"
2257
- ip_match = true
2258
- end
2259
- }
2260
- newlines << line if !ip_match
2261
- }
2262
- f.rewind
2263
- f.truncate(0)
2264
- f.puts(newlines)
2265
- f.flush
2266
- f.flock(File::LOCK_UN)
2267
- }
2268
- end
1584
+ if targets.size > 0 and !onlycloud
1585
+ MU::Master.removeInstanceFromEtcHosts(server_obj.mu_name) if !noop and server_obj
1586
+ targets.each { |target|
1587
+ next if !target.match(/^\d+\.\d+\.\d+\.\d+$/)
1588
+ MU::Master.removeIPFromSSHKnownHosts(target, noop: noop)
2269
1589
  }
2270
1590
  end
2271
1591
 
2272
- return if instance.nil?
1592
+ on_retry = Proc.new {
1593
+ instance = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(instance_ids: [instance.instance_id]).reservations.first.instances.first
1594
+ if instance.state.name == "terminated"
1595
+ MU.log "#{instance.instance_id}#{server_obj ? " ("+server_obj.mu_name+")" : ""} has already been terminated, skipping"
1596
+ MU::MommaCat.unlock(".cleanup-"+id)
1597
+ return
1598
+ end
1599
+ }
2273
1600
 
2274
- name = ""
2275
- instance.tags.each { |tag|
2276
- name = tag.value if tag.key == "Name"
1601
+ loop_if = Proc.new {
1602
+ instance = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(instance_ids: [instance.instance_id]).reservations.first.instances.first
1603
+ instance.state.name != "terminated"
2277
1604
  }
2278
1605
 
2279
- if instance.state.name == "terminated"
2280
- MU.log "#{instance.instance_id} (#{name}) has already been terminated, skipping"
2281
- else
2282
- if instance.state.name == "terminating"
2283
- MU.log "#{instance.instance_id} (#{name}) already terminating, waiting"
2284
- elsif instance.state.name != "running" and instance.state.name != "pending" and instance.state.name != "stopping" and instance.state.name != "stopped"
2285
- MU.log "#{instance.instance_id} (#{name}) is in state #{instance.state.name}, waiting"
2286
- else
2287
- MU.log "Terminating #{instance.instance_id} (#{name}) #{noop}"
2288
- if !noop
2289
- begin
2290
- MU::Cloud::AWS.ec2(credentials: credentials, region: region).modify_instance_attribute(
2291
- instance_id: instance.instance_id,
2292
- disable_api_termination: {value: false}
2293
- )
2294
- MU::Cloud::AWS.ec2(credentials: credentials, region: region).terminate_instances(instance_ids: [instance.instance_id])
2295
- # Small race window here with the state changing from under us
2296
- rescue Aws::EC2::Errors::IncorrectInstanceState => e
2297
- resp = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(instance_ids: [id])
2298
- if !resp.nil? and !resp.reservations.nil? and !resp.reservations.first.nil?
2299
- instance = resp.reservations.first.instances.first
2300
- if !instance.nil? and instance.state.name != "terminated" and instance.state.name != "terminating"
2301
- sleep 5
2302
- retry
2303
- end
2304
- end
2305
- rescue Aws::EC2::Errors::InternalError => e
2306
- MU.log "Error #{e.inspect} while Terminating instance #{instance.instance_id} (#{name}), retrying", MU::WARN, details: e.inspect
2307
- sleep 5
2308
- retry
2309
- end
2310
- end
2311
- end
2312
- while instance.state.name != "terminated" and !noop
2313
- sleep 30
2314
- instance_response = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(instance_ids: [instance.instance_id])
2315
- instance = instance_response.reservations.first.instances.first
2316
- end
2317
- MU.log "#{instance.instance_id} (#{name}) terminated" if !noop
1606
+ MU.log "Terminating #{instance.instance_id}#{server_obj ? " ("+server_obj.mu_name+")" : ""}"
1607
+ if !noop
1608
+ MU.retrier([Aws::EC2::Errors::IncorrectInstanceState, Aws::EC2::Errors::InternalError], wait: 30, max: 60, loop_if: loop_if, on_retry: on_retry) {
1609
+ MU::Cloud::AWS.ec2(credentials: credentials, region: region).modify_instance_attribute(
1610
+ instance_id: instance.instance_id,
1611
+ disable_api_termination: {value: false}
1612
+ )
1613
+ MU::Cloud::AWS.ec2(credentials: credentials, region: region).terminate_instances(instance_ids: [instance.instance_id])
1614
+ }
2318
1615
  end
1616
+
1617
+ MU.log "#{instance.instance_id}#{server_obj ? " ("+server_obj.mu_name+")" : ""} terminated" if !noop
1618
+ begin
1619
+ MU::MommaCat.unlock(".cleanup-"+id)
1620
+ rescue Errno::ENOENT => e
1621
+ MU.log "No lock for terminating instance #{id} due to missing metadata", MU::DEBUG
1622
+ end
1623
+
2319
1624
  end
2320
1625
 
2321
1626
  # Return a BoK-style config hash describing a NAT instance. We use this
@@ -2336,15 +1641,19 @@ module MU
2336
1641
  end
2337
1642
 
2338
1643
  # Cloud-specific configuration properties.
2339
- # @param config [MU::Config]: The calling MU::Config object
1644
+ # @param _config [MU::Config]: The calling MU::Config object
2340
1645
  # @return [Array<Array,Hash>]: List of required fields, and json-schema Hash of cloud-specific configuration parameters for this resource
2341
- def self.schema(config)
1646
+ def self.schema(_config)
2342
1647
  toplevel_required = []
2343
1648
  schema = {
2344
1649
  "ami_id" => {
2345
1650
  "type" => "string",
2346
1651
  "description" => "Alias for +image_id+"
2347
1652
  },
1653
+ "windows_admin_username" => {
1654
+ "type" => "string",
1655
+ "default" => "Administrator"
1656
+ },
2348
1657
  "generate_iam_role" => {
2349
1658
  "type" => "boolean",
2350
1659
  "default" => true,
@@ -2368,25 +1677,47 @@ module MU
2368
1677
  "type" => "object"
2369
1678
  }
2370
1679
  },
2371
- "ingress_rules" => {
2372
- "items" => {
2373
- "properties" => {
2374
- "sgs" => {
2375
- "type" => "array",
2376
- "items" => {
2377
- "description" => "Other AWS Security Groups; resources that are associated with this group will have this rule applied to their traffic",
2378
- "type" => "string"
2379
- }
2380
- },
2381
- "lbs" => {
2382
- "type" => "array",
2383
- "items" => {
2384
- "description" => "AWS Load Balancers which will have this rule applied to their traffic",
2385
- "type" => "string"
2386
- }
2387
- }
1680
+ "ingress_rules" => MU::Cloud.resourceClass("AWS", "FirewallRule").ingressRuleAddtlSchema,
1681
+ "ssh_user" => {
1682
+ "type" => "string",
1683
+ "default" => "root",
1684
+ "default_if" => [
1685
+ {
1686
+ "key_is" => "platform",
1687
+ "value_is" => "windows",
1688
+ "set" => "Administrator"
1689
+ },
1690
+ {
1691
+ "key_is" => "platform",
1692
+ "value_is" => "win2k12",
1693
+ "set" => "Administrator"
1694
+ },
1695
+ {
1696
+ "key_is" => "platform",
1697
+ "value_is" => "win2k12r2",
1698
+ "set" => "Administrator"
1699
+ },
1700
+ {
1701
+ "key_is" => "platform",
1702
+ "value_is" => "win2k16",
1703
+ "set" => "Administrator"
1704
+ },
1705
+ {
1706
+ "key_is" => "platform",
1707
+ "value_is" => "rhel7",
1708
+ "set" => "ec2-user"
1709
+ },
1710
+ {
1711
+ "key_is" => "platform",
1712
+ "value_is" => "rhel71",
1713
+ "set" => "ec2-user"
1714
+ },
1715
+ {
1716
+ "key_is" => "platform",
1717
+ "value_is" => "amazon",
1718
+ "set" => "ec2-user"
2388
1719
  }
2389
- }
1720
+ ]
2390
1721
  }
2391
1722
  }
2392
1723
  [toplevel_required, schema]
@@ -2414,8 +1745,7 @@ module MU
2414
1745
 
2415
1746
  MU::Cloud.availableClouds.each { |cloud|
2416
1747
  next if cloud == "AWS"
2417
- cloudbase = Object.const_get("MU").const_get("Cloud").const_get(cloud)
2418
- foreign_types = (cloudbase.listInstanceTypes).values.first
1748
+ foreign_types = (MU::Cloud.cloudClass(cloud).listInstanceTypes).values.first
2419
1749
  if foreign_types.size == 1
2420
1750
  foreign_types = foreign_types.values.first
2421
1751
  end
@@ -2446,6 +1776,45 @@ module MU
2446
1776
  size
2447
1777
  end
2448
1778
 
1779
+ # Boilerplate generation of an instance role
1780
+ # @param server [Hash]: The BoK-style config hash for a +Server+ or +ServerPool+
1781
+ # @param configurator [MU::Config]
1782
+ def self.generateStandardRole(server, configurator)
1783
+ role = {
1784
+ "name" => server["name"],
1785
+ "credentials" => server["credentials"],
1786
+ "can_assume" => [
1787
+ {
1788
+ "entity_id" => "ec2.amazonaws.com",
1789
+ "entity_type" => "service"
1790
+ }
1791
+ ],
1792
+ "policies" => [
1793
+ {
1794
+ "name" => "MuSecrets",
1795
+ "permissions" => ["s3:GetObject"],
1796
+ "targets" => [
1797
+ {
1798
+ "identifier" => 'arn:'+(MU::Cloud::AWS.isGovCloud?(server['region']) ? "aws-us-gov" : "aws")+':s3:::'+MU::Cloud::AWS.adminBucketName(server['credentials'])+'/Mu_CA.pem'
1799
+ }
1800
+ ]
1801
+ }
1802
+ ]
1803
+ }
1804
+ if server['iam_policies']
1805
+ role['iam_policies'] = server['iam_policies'].dup
1806
+ end
1807
+ if server['canned_iam_policies']
1808
+ role['import'] = server['canned_iam_policies'].dup
1809
+ end
1810
+ if server['iam_role']
1811
+ # XXX maybe break this down into policies and add those?
1812
+ end
1813
+
1814
+ configurator.insertKitten(role, "roles")
1815
+ MU::Config.addDependency(server, server["name"], "role")
1816
+ end
1817
+
2449
1818
  # Cloud-specific pre-processing of {MU::Config::BasketofKittens::servers}, bare and unvalidated.
2450
1819
  # @param server [Hash]: The resource to process and validate
2451
1820
  # @param configurator [MU::Config]: The overall deployment configurator of which this resource is a member
@@ -2466,43 +1835,7 @@ module MU
2466
1835
  ok = false
2467
1836
  end
2468
1837
  else
2469
- role = {
2470
- "name" => server["name"],
2471
- "credentials" => server["credentials"],
2472
- "can_assume" => [
2473
- {
2474
- "entity_id" => "ec2.amazonaws.com",
2475
- "entity_type" => "service"
2476
- }
2477
- ],
2478
- "policies" => [
2479
- {
2480
- "name" => "MuSecrets",
2481
- "permissions" => ["s3:GetObject"],
2482
- "targets" => [
2483
- {
2484
- "identifier" => 'arn:'+(MU::Cloud::AWS.isGovCloud?(server['region']) ? "aws-us-gov" : "aws")+':s3:::'+MU::Cloud::AWS.adminBucketName(server['credentials'])+'/Mu_CA.pem'
2485
- }
2486
- ]
2487
- }
2488
- ]
2489
- }
2490
- if server['iam_policies']
2491
- role['iam_policies'] = server['iam_policies'].dup
2492
- end
2493
- if server['canned_iam_policies']
2494
- role['import'] = server['canned_iam_policies'].dup
2495
- end
2496
- if server['iam_role']
2497
- # XXX maybe break this down into policies and add those?
2498
- end
2499
-
2500
- configurator.insertKitten(role, "roles")
2501
- server["dependencies"] ||= []
2502
- server["dependencies"] << {
2503
- "type" => "role",
2504
- "name" => server["name"]
2505
- }
1838
+ generateStandardRole(server, configurator)
2506
1839
  end
2507
1840
  if !server['create_image'].nil?
2508
1841
  if server['create_image'].has_key?('copy_to_regions') and
@@ -2514,12 +1847,12 @@ module MU
2514
1847
  end
2515
1848
  end
2516
1849
 
2517
- server['ami_id'] ||= server['image_id']
1850
+ server['image_id'] ||= server['ami_id']
2518
1851
 
2519
- if server['ami_id'].nil?
1852
+ if server['image_id'].nil?
2520
1853
  img_id = MU::Cloud.getStockImage("AWS", platform: server['platform'], region: server['region'])
2521
1854
  if img_id
2522
- server['ami_id'] = configurator.getTail("server"+server['name']+"AMI", value: img_id, prettyname: "server"+server['name']+"AMI", cloudtype: "AWS::EC2::Image::Id")
1855
+ server['image_id'] = configurator.getTail("server"+server['name']+"AMI", value: img_id, prettyname: "server"+server['name']+"AMI", cloudtype: "AWS::EC2::Image::Id")
2523
1856
  else
2524
1857
  MU.log "No AMI specified for #{server['name']} and no default available for platform #{server['platform']} in region #{server['region']}", MU::ERR, details: server
2525
1858
  ok = false
@@ -2528,22 +1861,13 @@ module MU
2528
1861
 
2529
1862
  if !server["loadbalancers"].nil?
2530
1863
  server["loadbalancers"].each { |lb|
2531
- if lb["concurrent_load_balancer"] != nil
2532
- server["dependencies"] << {
2533
- "type" => "loadbalancer",
2534
- "name" => lb["concurrent_load_balancer"]
2535
- }
1864
+ lb["name"] ||= lb["concurrent_load_balancer"]
1865
+ if lb["name"]
1866
+ MU::Config.addDependency(server, lb["name"], "loadbalancer")
2536
1867
  end
2537
1868
  }
2538
1869
  end
2539
1870
 
2540
- if !server["vpc"].nil?
2541
- if server["vpc"]["subnet_name"].nil? and server["vpc"]["subnet_id"].nil? and server["vpc"]["subnet_pref"].nil?
2542
- MU.log "A server VPC block must specify a target subnet", MU::ERR
2543
- ok = false
2544
- end
2545
- end
2546
-
2547
1871
  ok
2548
1872
  end
2549
1873
 
@@ -2556,36 +1880,34 @@ module MU
2556
1880
  img = MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_images(image_ids: [ami_id]).images.first
2557
1881
  return DateTime.new if img.nil?
2558
1882
  return DateTime.parse(img.creation_date)
2559
- rescue Aws::EC2::Errors::InvalidAMIIDNotFound => e
1883
+ rescue Aws::EC2::Errors::InvalidAMIIDNotFound
2560
1884
  end
2561
1885
 
2562
1886
  return DateTime.new
2563
1887
  end
2564
1888
 
2565
- private
2566
-
2567
1889
  # Destroy a volume.
2568
1890
  # @param volume [OpenStruct]: The cloud provider's description of the volume.
2569
- # @param id [String]: The cloud provider's identifier for the volume, to use if the full description is not available.
2570
1891
  # @param region [String]: The cloud provider region
2571
1892
  # @return [void]
2572
- def self.delete_volume(volume, noop, skipsnapshots, id: nil, region: MU.curRegion, credentials: nil)
1893
+ def self.delete_volume(volume, noop, skipsnapshots, region: MU.curRegion, credentials: nil, deploy_id: MU.deploy_id)
2573
1894
  if !volume.nil?
2574
1895
  resp = MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_volumes(volume_ids: [volume.volume_id])
2575
1896
  volume = resp.data.volumes.first
2576
1897
  end
2577
- name = ""
1898
+ name = nil
2578
1899
  volume.tags.each { |tag|
2579
1900
  name = tag.value if tag.key == "Name"
2580
1901
  }
1902
+ name ||= volume.volume_id
2581
1903
 
2582
1904
  MU.log("Deleting volume #{volume.volume_id} (#{name})")
2583
1905
  if !noop
2584
1906
  if !skipsnapshots
2585
1907
  if !name.nil? and !name.empty?
2586
- desc = "#{MU.deploy_id}-MUfinal (#{name})"
1908
+ desc = "#{deploy_id}-MUfinal (#{name})"
2587
1909
  else
2588
- desc = "#{MU.deploy_id}-MUfinal"
1910
+ desc = "#{deploy_id}-MUfinal"
2589
1911
  end
2590
1912
 
2591
1913
  begin
@@ -2600,31 +1922,414 @@ module MU
2600
1922
  end
2601
1923
  end
2602
1924
 
2603
- retries = 0
2604
1925
  begin
2605
- MU::Cloud::AWS.ec2(region: region, credentials: credentials).delete_volume(volume_id: volume.volume_id)
2606
- rescue Aws::EC2::Errors::IncorrectState => e
2607
- MU.log "Volume #{volume.volume_id} (#{name}) in incorrect state (#{e.message}), will retry", MU::WARN
2608
- sleep 30
2609
- retry
2610
- rescue Aws::EC2::Errors::InvalidVolumeNotFound
2611
- MU.log "Volume #{volume.volume_id} (#{name}) disappeared before I could remove it!", MU::WARN
1926
+ MU.retrier([Aws::EC2::Errors::IncorrectState, Aws::EC2::Errors::VolumeInUse], ignoreme: [Aws::EC2::Errors::InvalidVolumeNotFound], wait: 30, max: 10){
1927
+ MU::Cloud::AWS.ec2(region: region, credentials: credentials).delete_volume(volume_id: volume.volume_id)
1928
+ }
2612
1929
  rescue Aws::EC2::Errors::VolumeInUse
2613
- if retries < 10
2614
- volume.attachments.each { |attachment|
2615
- MU.log "#{volume.volume_id} is attached to #{attachment.instance_id} as #{attachment.device}", MU::NOTICE
2616
- }
2617
- MU.log "Volume '#{name}' is still attached, waiting...", MU::NOTICE
2618
- sleep 30
2619
- retries = retries + 1
2620
- retry
1930
+ MU.log "Failed to delete #{name}", MU::ERR
1931
+ end
1932
+
1933
+ end
1934
+ end
1935
+ private_class_method :delete_volume
1936
+
1937
+ # Given some combination of a base image, BoK-configured storage, and
1938
+ # ephemeral devices, return the structure passed to EC2 to declare
1939
+ # block devicde mappings.
1940
+ # @param image_id [String]
1941
+ # @param storage [Array]
1942
+ # @param add_ephemeral [Boolean]
1943
+ # @param region [String]
1944
+ # @param credentials [String]
1945
+ def self.configureBlockDevices(image_id: nil, storage: nil, add_ephemeral: true, region: MU.myRegion, credentials: nil)
1946
+ ext_disks = {}
1947
+
1948
+ # Figure out which devices are embedded in the AMI already.
1949
+ if image_id
1950
+ image = MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_images(image_ids: [image_id]).images.first
1951
+ if !image.block_device_mappings.nil?
1952
+ image.block_device_mappings.each { |disk|
1953
+ if !disk.device_name.nil? and !disk.device_name.empty? and !disk.ebs.nil? and !disk.ebs.empty?
1954
+ ext_disks[disk.device_name] = MU.structToHash(disk.ebs)
1955
+ end
1956
+ }
1957
+ end
1958
+ end
1959
+
1960
+ configured_storage = []
1961
+ if storage
1962
+ storage.each { |vol|
1963
+ # Drop the "encrypted" flag if a snapshot for this device exists
1964
+ # in the AMI, even if they both agree about the value of said
1965
+ # flag. Apparently that's a thing now.
1966
+ if ext_disks.has_key?(vol["device"])
1967
+ if ext_disks[vol["device"]].has_key?(:snapshot_id)
1968
+ vol.delete("encrypted")
1969
+ end
1970
+ end
1971
+ mapping, _cfm_mapping = MU::Cloud::AWS::Server.convertBlockDeviceMapping(vol)
1972
+ configured_storage << mapping
1973
+ }
1974
+ end
1975
+
1976
+ configured_storage.concat(@ephemeral_mappings) if add_ephemeral
1977
+
1978
+ configured_storage
1979
+ end
1980
+
1981
+ # Return all of the IP addresses, public and private, from all of our
1982
+ # network interfaces.
1983
+ # @return [Array<String>]
1984
+ def listIPs
1985
+ MU::Cloud::AWS::Server.getAddresses(cloud_desc).first
1986
+ end
1987
+
1988
+ private
1989
+
1990
+ def bootstrapGroomer
1991
+ if (@config['groom'].nil? or @config['groom']) and !@groomer.haveBootstrapped?
1992
+ MU.retrier([BootstrapTempFail], wait: 45) {
1993
+ if windows?
1994
+ # kick off certificate generation early; WinRM will need it
1995
+ @deploy.nodeSSLCerts(self)
1996
+ @deploy.nodeSSLCerts(self, true) if @config.has_key?("basis")
1997
+ session = getWinRMSession(50, 60, reboot_on_problems: true)
1998
+ initialWinRMTasks(session)
1999
+ begin
2000
+ session.close
2001
+ rescue StandardError
2002
+ # session.close is allowed to fail- we're probably rebooting
2003
+ end
2004
+ else
2005
+ session = getSSHSession(40, 30)
2006
+ initialSSHTasks(session)
2007
+ end
2008
+ }
2009
+ end
2010
+
2011
+ # See if this node already exists in our config management. If it
2012
+ # does, we're done.
2013
+
2014
+ if MU.inGem?
2015
+ MU.log "Deploying from a gem, not grooming"
2016
+ elsif @config['groom'].nil? or @config['groom']
2017
+ if @groomer.haveBootstrapped?
2018
+ MU.log "Node #{@mu_name} has already been bootstrapped, skipping groomer setup.", MU::NOTICE
2019
+ else
2020
+ begin
2021
+ @groomer.bootstrap
2022
+ rescue MU::Groomer::RunError
2023
+ return false
2024
+ end
2025
+ end
2026
+ @groomer.saveDeployData
2027
+ end
2028
+
2029
+ true
2030
+ end
2031
+
2032
+ def saveCredentials(win_admin_password = nil)
2033
+ ec2config_password = nil
2034
+ sshd_password = nil
2035
+ if windows?
2036
+ if @config['use_cloud_provider_windows_password']
2037
+ win_admin_password ||= getWindowsAdminPassword
2038
+ elsif @config['windows_auth_vault'] and !@config['windows_auth_vault'].empty?
2039
+ if @config["windows_auth_vault"].has_key?("password_field")
2040
+ win_admin_password ||= @groomer.getSecret(
2041
+ vault: @config['windows_auth_vault']['vault'],
2042
+ item: @config['windows_auth_vault']['item'],
2043
+ field: @config["windows_auth_vault"]["password_field"]
2044
+ )
2621
2045
  else
2622
- MU.log "Failed to delete #{name}", MU::ERR
2046
+ win_admin_password ||= getWindowsAdminPassword
2047
+ end
2048
+
2049
+ if @config["windows_auth_vault"].has_key?("ec2config_password_field")
2050
+ ec2config_password = @groomer.getSecret(
2051
+ vault: @config['windows_auth_vault']['vault'],
2052
+ item: @config['windows_auth_vault']['item'],
2053
+ field: @config["windows_auth_vault"]["ec2config_password_field"]
2054
+ )
2055
+ end
2056
+
2057
+ if @config["windows_auth_vault"].has_key?("sshd_password_field")
2058
+ sshd_password = @groomer.getSecret(
2059
+ vault: @config['windows_auth_vault']['vault'],
2060
+ item: @config['windows_auth_vault']['item'],
2061
+ field: @config["windows_auth_vault"]["sshd_password_field"]
2062
+ )
2063
+ end
2064
+ end
2065
+
2066
+ win_admin_password ||= MU.generateWindowsPassword
2067
+ ec2config_password ||= MU.generateWindowsPassword
2068
+ sshd_password ||= MU.generateWindowsPassword
2069
+
2070
+ # We're creating the vault here so when we run
2071
+ # MU::Cloud::Server.initialSSHTasks and we need to set the Windows
2072
+ # Admin password we can grab it from said vault.
2073
+ creds = {
2074
+ "username" => @config['windows_admin_username'],
2075
+ "password" => win_admin_password,
2076
+ "ec2config_username" => "ec2config",
2077
+ "ec2config_password" => ec2config_password,
2078
+ "sshd_username" => "sshd_service",
2079
+ "sshd_password" => sshd_password
2080
+ }
2081
+ @groomer.saveSecret(vault: @mu_name, item: "windows_credentials", data: creds, permissions: "name:#{@mu_name}")
2082
+ end
2083
+ end
2084
+
2085
+ def haveElasticIP?
2086
+ if !cloud_desc.public_ip_address.nil?
2087
+ begin
2088
+ resp = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_addresses(public_ips: [cloud_desc.public_ip_address])
2089
+ if resp.addresses.size > 0 and resp.addresses.first.instance_id == @cloud_id
2090
+ return true
2623
2091
  end
2092
+ rescue Aws::EC2::Errors::InvalidAddressNotFound
2093
+ # XXX this is ok to ignore, it means the public IP isn't Elastic
2624
2094
  end
2625
2095
  end
2096
+
2097
+ false
2626
2098
  end
2627
2099
 
2100
+ def configureNetworking
2101
+ if !@config['static_ip'].nil?
2102
+ if !@config['static_ip']['ip'].nil?
2103
+ MU::Cloud::AWS::Server.associateElasticIp(@cloud_id, classic: @vpc.nil?, ip: @config['static_ip']['ip'])
2104
+ elsif !haveElasticIP?
2105
+ MU::Cloud::AWS::Server.associateElasticIp(@cloud_id, classic: @vpc.nil?)
2106
+ end
2107
+ end
2108
+
2109
+ if !@vpc.nil? and @config.has_key?("vpc")
2110
+ subnet = @vpc.getSubnet(cloud_id: cloud_desc.subnet_id)
2111
+
2112
+ _nat_ssh_key, _nat_ssh_user, nat_ssh_host, _canonical_ip, _ssh_user, _ssh_key_name = getSSHConfig
2113
+ if subnet.private? and !nat_ssh_host and !MU::Cloud.resourceClass("AWS", "VPC").haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials'])
2114
+ raise MuError, "#{@mu_name} is in a private subnet (#{subnet}), but has no bastion host configured, and I have no other route to it"
2115
+ end
2116
+
2117
+ # If we've asked for additional subnets (and this @config is not a
2118
+ # member of a Server Pool, which has different semantics), create
2119
+ # extra interfaces to accomodate.
2120
+ if !@config['vpc']['subnets'].nil? and @config['basis'].nil?
2121
+ device_index = 1
2122
+ mySubnets.each { |s|
2123
+ next if s.cloud_id == cloud_desc.subnet_id
2124
+
2125
+ if cloud_desc.placement.availability_zone != s.az
2126
+ MU.log "Cannot create interface in subnet #{s.to_s} for #{@mu_name} due to AZ mismatch", MU::WARN
2127
+ next
2128
+ end
2129
+ MU.log "Adding network interface on subnet #{s.cloud_id} for #{@mu_name}"
2130
+ iface = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).create_network_interface(subnet_id: s.cloud_id).network_interface
2131
+ MU::Cloud::AWS.createStandardTags(
2132
+ iface.network_interface_id,
2133
+ region: @config['region'],
2134
+ credentials: @config['credentials'],
2135
+ optional: @config['optional_tags'],
2136
+ nametag: @mu_name+"-ETH"+device_index.to_s,
2137
+ othertags: @config['tags']
2138
+ )
2139
+
2140
+ MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).attach_network_interface(
2141
+ network_interface_id: iface.network_interface_id,
2142
+ instance_id: cloud_desc.instance_id,
2143
+ device_index: device_index
2144
+ )
2145
+ device_index = device_index + 1
2146
+ }
2147
+ cloud_desc(use_cache: false)
2148
+ end
2149
+ end
2150
+
2151
+ [:private_dns_name, :public_dns_name, :private_ip_address, :public_ip_address].each { |field|
2152
+ @config[field.to_s] = cloud_desc.send(field)
2153
+ }
2154
+
2155
+ if !@config['add_private_ips'].nil?
2156
+ cloud_desc.network_interfaces.each { |int|
2157
+ if int.private_ip_address == cloud_desc.private_ip_address and int.private_ip_addresses.size < (@config['add_private_ips'] + 1)
2158
+ MU.log "Adding #{@config['add_private_ips']} extra private IP addresses to #{cloud_desc.instance_id}"
2159
+ MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).assign_private_ip_addresses(
2160
+ network_interface_id: int.network_interface_id,
2161
+ secondary_private_ip_address_count: @config['add_private_ips'],
2162
+ allow_reassignment: false
2163
+ )
2164
+ end
2165
+ }
2166
+ end
2167
+ end
2168
+
2169
+ def tagVolumes
2170
+ volumes = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_volumes(filters: [name: "attachment.instance-id", values: [@cloud_id]])
2171
+ volumes.each { |vol|
2172
+ vol.volumes.each { |volume|
2173
+ volume.attachments.each { |attachment|
2174
+ MU::Cloud::AWS.createStandardTags(
2175
+ attachment.volume_id,
2176
+ region: @config['region'],
2177
+ credentials: @config['credentials'],
2178
+ optional: @config['optional_tags'],
2179
+ nametag: ["/dev/sda", "/dev/sda1"].include?(attachment.device) ? "ROOT-"+@mu_name : @mu_name+"-"+attachment.device.upcase,
2180
+ othertags: @config['tags']
2181
+ )
2182
+
2183
+ }
2184
+ }
2185
+ }
2186
+ end
2187
+
2188
+ # If we came up via AutoScale, the Alarm module won't have had our
2189
+ # instance ID to associate us with itself. So invoke that here.
2190
+ # XXX might be possible to do this with regular alarm resources and
2191
+ # dependencies now
2192
+ def setAlarms
2193
+ if !@config['basis'].nil? and @config["alarms"] and !@config["alarms"].empty?
2194
+ @config["alarms"].each { |alarm|
2195
+ alarm_obj = MU::MommaCat.findStray(
2196
+ "AWS",
2197
+ "alarms",
2198
+ region: @config["region"],
2199
+ deploy_id: @deploy.deploy_id,
2200
+ name: alarm['name']
2201
+ ).first
2202
+ alarm["dimensions"] = [{:name => "InstanceId", :value => @cloud_id}]
2203
+
2204
+ if alarm["enable_notifications"]
2205
+ # XXX vile, this should be a sibling resource generated by the
2206
+ # parser
2207
+ topic_arn = MU::Cloud.resourceClass("AWS", "Notification").createTopic(alarm["notification_group"], region: @config["region"], credentials: @config['credentials'])
2208
+ MU::Cloud.resourceClass("AWS", "Notification").subscribe(topic_arn, alarm["notification_endpoint"], alarm["notification_type"], region: @config["region"], credentials: @config["credentials"])
2209
+ alarm["alarm_actions"] = [topic_arn]
2210
+ alarm["ok_actions"] = [topic_arn]
2211
+ end
2212
+
2213
+ alarm_name = alarm_obj ? alarm_obj.cloud_id : "#{@mu_name}-#{alarm['name']}".upcase
2214
+
2215
+ MU::Cloud.resourceClass("AWS", "Alarm").setAlarm(
2216
+ name: alarm_name,
2217
+ ok_actions: alarm["ok_actions"],
2218
+ alarm_actions: alarm["alarm_actions"],
2219
+ insufficient_data_actions: alarm["no_data_actions"],
2220
+ metric_name: alarm["metric_name"],
2221
+ namespace: alarm["namespace"],
2222
+ statistic: alarm["statistic"],
2223
+ dimensions: alarm["dimensions"],
2224
+ period: alarm["period"],
2225
+ unit: alarm["unit"],
2226
+ evaluation_periods: alarm["evaluation_periods"],
2227
+ threshold: alarm["threshold"],
2228
+ comparison_operator: alarm["comparison_operator"],
2229
+ region: @config["region"],
2230
+ credentials: @config['credentials']
2231
+ )
2232
+ }
2233
+ end
2234
+ end
2235
+
2236
+ # We have issues sometimes where our dns_records are pointing at the wrong node name and IP address.
2237
+
2238
+ def getIAMProfile
2239
+ arn = if @config['generate_iam_role']
2240
+ role = @deploy.findLitterMate(name: @config['name'], type: "roles")
2241
+ s3_objs = ["#{@deploy.deploy_id}-secret", "#{role.mu_name}.pfx", "#{role.mu_name}.crt", "#{role.mu_name}.key", "#{role.mu_name}-winrm.crt", "#{role.mu_name}-winrm.key"].map { |file|
2242
+ 'arn:'+(MU::Cloud::AWS.isGovCloud?(@config['region']) ? "aws-us-gov" : "aws")+':s3:::'+MU::Cloud::AWS.adminBucketName(@credentials)+'/'+file
2243
+ }
2244
+ MU.log "Adding S3 read permissions to #{@mu_name}'s IAM profile", MU::NOTICE, details: s3_objs
2245
+ role.cloudobj.injectPolicyTargets("MuSecrets", s3_objs)
2246
+
2247
+ @config['iam_role'] = role.mu_name
2248
+ role.cloudobj.createInstanceProfile
2249
+
2250
+ elsif @config['iam_role'].nil?
2251
+ raise MuError, "#{@mu_name} has generate_iam_role set to false, but no iam_role assigned."
2252
+ end
2253
+
2254
+ if !@config["iam_role"].nil?
2255
+ if arn
2256
+ return {arn: arn}
2257
+ else
2258
+ return {name: @config["iam_role"]}
2259
+ end
2260
+ end
2261
+
2262
+ nil
2263
+ end
2264
+
2265
+ def setDeleteOntermination(device, delete_on_termination = false)
2266
+ mappings = MU.structToHash(cloud_desc.block_device_mappings)
2267
+ mappings.each { |vol|
2268
+ if vol[:ebs]
2269
+ vol[:ebs].delete(:attach_time)
2270
+ vol[:ebs].delete(:status)
2271
+ end
2272
+ if vol[:device_name] == device
2273
+ if vol[:ebs][:delete_on_termination] != delete_on_termination
2274
+ vol[:ebs][:delete_on_termination] = delete_on_termination
2275
+ MU.log "Setting delete_on_termination flag to #{delete_on_termination.to_s} on #{@mu_name}'s #{dev}"
2276
+ MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).modify_instance_attribute(
2277
+ instance_id: @cloud_id,
2278
+ block_device_mappings: mappings
2279
+ )
2280
+ end
2281
+ return true
2282
+ end
2283
+ }
2284
+
2285
+ false
2286
+ end
2287
+
2288
+ def createImage
2289
+ img_cfg = @config['create_image']
2290
+ # Scrub things that don't belong on an AMI
2291
+ session = windows? ? getWinRMSession : getSSHSession
2292
+ sudo = purgecmd = ""
2293
+ sudo = "sudo" if @config['ssh_user'] != "root"
2294
+ if windows?
2295
+ purgecmd = "rm -rf /cygdrive/c/mu_installed_chef"
2296
+ else
2297
+ purgecmd = "rm -rf /opt/mu_installed_chef"
2298
+ end
2299
+ if img_cfg['image_then_destroy']
2300
+ if windows?
2301
+ purgecmd = "rm -rf /cygdrive/c/chef/ /home/#{@config['windows_admin_username']}/.ssh/authorized_keys /home/Administrator/.ssh/authorized_keys /cygdrive/c/mu-installer-ran-updates /cygdrive/c/mu_installed_chef"
2302
+ # session.exec!("powershell -Command \"& {(Get-WmiObject -Class Win32_Product -Filter \"Name='UniversalForwarder'\").Uninstall()}\"")
2303
+ else
2304
+ purgecmd = "#{sudo} rm -rf /var/lib/cloud/instances/i-* /root/.ssh/authorized_keys /etc/ssh/ssh_host_*key* /etc/chef /etc/opscode/* /.mu-installer-ran-updates /var/chef /opt/mu_installed_chef /opt/chef ; #{sudo} sed -i 's/^HOSTNAME=.*//' /etc/sysconfig/network"
2305
+ end
2306
+ end
2307
+ if windows?
2308
+ session.run(purgecmd)
2309
+ else
2310
+ session.exec!(purgecmd)
2311
+ end
2312
+ session.close
2313
+ ami_ids = MU::Cloud::AWS::Server.createImage(
2314
+ name: @mu_name,
2315
+ instance_id: @cloud_id,
2316
+ storage: @config['storage'],
2317
+ exclude_storage: img_cfg['image_exclude_storage'],
2318
+ copy_to_regions: img_cfg['copy_to_regions'],
2319
+ make_public: img_cfg['public'],
2320
+ region: @config['region'],
2321
+ tags: @config['tags'],
2322
+ credentials: @config['credentials']
2323
+ )
2324
+ @deploy.notify("images", @config['name'], ami_ids)
2325
+ @config['image_created'] = true
2326
+ if img_cfg['image_then_destroy']
2327
+ MU::Cloud::AWS::Server.waitForAMI(ami_ids[@config['region']], region: @config['region'], credentials: @config['credentials'])
2328
+ MU.log "AMI #{ami_ids[@config['region']]} ready, removing source node #{@mu_name}"
2329
+ MU::Cloud::AWS::Server.terminateInstance(id: @cloud_id, region: @config['region'], deploy_id: @deploy.deploy_id, mu_name: @mu_name, credentials: @config['credentials'])
2330
+ destroy
2331
+ end
2332
+ end
2628
2333
 
2629
2334
  end #class
2630
2335
  end #class