cloud-mu 3.1.4 → 3.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. checksums.yaml +4 -4
  2. data/ansible/roles/mu-windows/README.md +33 -0
  3. data/ansible/roles/mu-windows/defaults/main.yml +2 -0
  4. data/ansible/roles/mu-windows/handlers/main.yml +2 -0
  5. data/ansible/roles/mu-windows/meta/main.yml +53 -0
  6. data/ansible/roles/mu-windows/tasks/main.yml +20 -0
  7. data/ansible/roles/mu-windows/tests/inventory +2 -0
  8. data/ansible/roles/mu-windows/tests/test.yml +5 -0
  9. data/ansible/roles/mu-windows/vars/main.yml +2 -0
  10. data/cloud-mu.gemspec +4 -2
  11. data/cookbooks/mu-tools/recipes/selinux.rb +2 -1
  12. data/cookbooks/mu-tools/recipes/windows-client.rb +140 -144
  13. data/cookbooks/mu-tools/resources/windows_users.rb +44 -43
  14. data/extras/image-generators/AWS/win2k12.yaml +16 -13
  15. data/extras/image-generators/AWS/win2k16.yaml +16 -13
  16. data/extras/image-generators/AWS/win2k19.yaml +19 -0
  17. data/modules/mu.rb +72 -9
  18. data/modules/mu/adoption.rb +14 -2
  19. data/modules/mu/cloud.rb +111 -10
  20. data/modules/mu/clouds/aws.rb +23 -7
  21. data/modules/mu/clouds/aws/container_cluster.rb +640 -692
  22. data/modules/mu/clouds/aws/dnszone.rb +49 -45
  23. data/modules/mu/clouds/aws/firewall_rule.rb +177 -214
  24. data/modules/mu/clouds/aws/role.rb +17 -8
  25. data/modules/mu/clouds/aws/search_domain.rb +1 -1
  26. data/modules/mu/clouds/aws/server.rb +734 -1027
  27. data/modules/mu/clouds/aws/userdata/windows.erb +2 -1
  28. data/modules/mu/clouds/aws/vpc.rb +297 -786
  29. data/modules/mu/clouds/aws/vpc_subnet.rb +286 -0
  30. data/modules/mu/clouds/google/bucket.rb +1 -1
  31. data/modules/mu/clouds/google/container_cluster.rb +21 -17
  32. data/modules/mu/clouds/google/function.rb +8 -2
  33. data/modules/mu/clouds/google/server.rb +102 -32
  34. data/modules/mu/clouds/google/vpc.rb +1 -1
  35. data/modules/mu/config.rb +12 -1
  36. data/modules/mu/config/server.yml +1 -0
  37. data/modules/mu/defaults/AWS.yaml +51 -28
  38. data/modules/mu/groomers/ansible.rb +54 -17
  39. data/modules/mu/groomers/chef.rb +13 -7
  40. data/modules/mu/master/ssl.rb +0 -1
  41. data/modules/mu/mommacat.rb +8 -0
  42. data/modules/tests/ecs.yaml +23 -0
  43. data/modules/tests/includes-and-params.yaml +2 -1
  44. data/modules/tests/server-with-scrub-muisms.yaml +1 -0
  45. data/modules/tests/win2k12.yaml +25 -0
  46. data/modules/tests/win2k16.yaml +25 -0
  47. data/modules/tests/win2k19.yaml +25 -0
  48. data/requirements.txt +1 -0
  49. metadata +50 -4
  50. data/extras/image-generators/AWS/windows.yaml +0 -18
  51. data/modules/tests/needwork/win2k12.yaml +0 -13
@@ -201,7 +201,11 @@ module MU
201
201
  def arn
202
202
  desc = cloud_desc
203
203
  if desc["role"]
204
- desc["role"].arn
204
+ if desc['role'].is_a?(Hash)
205
+ desc["role"][:arn] # why though
206
+ else
207
+ desc["role"].arn
208
+ end
205
209
  else
206
210
  nil
207
211
  end
@@ -290,21 +294,21 @@ end
290
294
  if !policy.match(/^#{@deploy.deploy_id}/)
291
295
  policy = @mu_name+"-"+policy.upcase
292
296
  end
293
-
294
- my_policies = cloud_desc["policies"]
297
+ my_policies = cloud_desc(use_cache: false)["policies"]
295
298
  my_policies ||= []
296
-
299
+
300
+ seen_policy = false
297
301
  my_policies.each { |p|
298
302
  if p.policy_name == policy
303
+ seen_policy = true
299
304
  old = MU::Cloud::AWS.iam(credentials: @config['credentials']).get_policy_version(
300
305
  policy_arn: p.arn,
301
306
  version_id: p.default_version_id
302
307
  ).policy_version
303
308
 
304
309
  doc = JSON.parse URI.decode_www_form_component old.document
305
-
306
310
  need_update = false
307
-
311
+
308
312
  doc["Statement"].each { |s|
309
313
  targets.each { |target|
310
314
  target_string = target
@@ -333,6 +337,10 @@ end
333
337
  end
334
338
  end
335
339
  }
340
+
341
+ if !seen_policy
342
+ MU.log "Was given new targets for policy #{policy}, but I don't see any such policy attached to role #{@cloud_id}", MU::WARN, details: targets
343
+ end
336
344
  end
337
345
 
338
346
  # Delete an IAM policy, along with attendant versions and attachments.
@@ -525,7 +533,7 @@ end
525
533
  end
526
534
  rescue ::Aws::IAM::Errors::NoSuchEntity
527
535
  end
528
-
536
+
529
537
  else
530
538
  marker = nil
531
539
  begin
@@ -614,7 +622,7 @@ end
614
622
 
615
623
  return bok if @config['bare_policies']
616
624
  end
617
-
625
+
618
626
  if desc.tags and desc.tags.size > 0
619
627
  bok["tags"] = MU.structToHash(desc.tags, stringify_keys: true)
620
628
  end
@@ -838,6 +846,7 @@ end
838
846
  else
839
847
  raise MuError, "Invalid entitytype '#{entitytype}' passed to MU::Cloud::AWS::Role.bindTo. Must be be one of: user, group, role, instance_profile"
840
848
  end
849
+ cloud_desc(use_cache: false)
841
850
  end
842
851
 
843
852
  # Create an instance profile for EC2 instances, named identically and
@@ -693,7 +693,7 @@ module MU
693
693
  interval = 60
694
694
 
695
695
  begin
696
- resp = cloud_desc
696
+ resp = cloud_desc(use_cache: false)
697
697
 
698
698
  if (resp.endpoint.nil? or resp.endpoint.empty?) and
699
699
  (resp.endpoints.nil? or resp.endpoints.empty?) and
@@ -240,8 +240,14 @@ module MU
240
240
  end
241
241
  MU::MommaCat.unlock(instance.instance_id+"-create")
242
242
  else
243
- MU::Cloud::AWS.createStandardTags(instance.instance_id, region: @config['region'], credentials: @config['credentials'])
244
- MU::Cloud::AWS.createTag(instance.instance_id, "Name", @mu_name, region: @config['region'], credentials: @config['credentials'])
243
+ MU::Cloud::AWS.createStandardTags(
244
+ instance.instance_id,
245
+ region: @config['region'],
246
+ credentials: @config['credentials'],
247
+ optional: @config['optional_tags'],
248
+ nametag: @mu_name,
249
+ othertags: @config['tags']
250
+ )
245
251
  end
246
252
  done = true
247
253
  rescue StandardError => e
@@ -262,14 +268,11 @@ module MU
262
268
  return @config
263
269
  end
264
270
 
265
-
266
-
267
271
  # Create an Amazon EC2 instance.
268
272
  def createEc2Instance
269
- node = @config['mu_name']
270
273
 
271
274
  instance_descriptor = {
272
- :image_id => @config["ami_id"],
275
+ :image_id => @config["image_id"],
273
276
  :key_name => @deploy.ssh_key_name,
274
277
  :instance_type => @config["size"],
275
278
  :disable_api_termination => true,
@@ -277,62 +280,25 @@ module MU
277
280
  :max_count => 1
278
281
  }
279
282
 
280
- arn = nil
281
- if @config['generate_iam_role']
282
- role = @deploy.findLitterMate(name: @config['name'], type: "roles")
283
- s3_objs = ["#{@deploy.deploy_id}-secret", "#{role.mu_name}.pfx", "#{role.mu_name}.crt", "#{role.mu_name}.key", "#{role.mu_name}-winrm.crt", "#{role.mu_name}-winrm.key"].map { |file|
284
- 'arn:'+(MU::Cloud::AWS.isGovCloud?(@config['region']) ? "aws-us-gov" : "aws")+':s3:::'+MU::Cloud::AWS.adminBucketName(@credentials)+'/'+file
285
- }
286
- MU.log "Adding S3 read permissions to #{@mu_name}'s IAM profile", MU::NOTICE, details: s3_objs
287
- role.cloudobj.injectPolicyTargets("MuSecrets", s3_objs)
288
-
289
- @config['iam_role'] = role.mu_name
290
- arn = role.cloudobj.createInstanceProfile
291
- # @cfm_role_name, @cfm_prof_name
292
-
293
- elsif @config['iam_role'].nil?
294
- raise MuError, "#{@mu_name} has generate_iam_role set to false, but no iam_role assigned."
295
- end
296
- if !@config["iam_role"].nil?
297
- if arn
298
- instance_descriptor[:iam_instance_profile] = {arn: arn}
299
- else
300
- instance_descriptor[:iam_instance_profile] = {name: @config["iam_role"]}
301
- end
302
- end
303
-
304
- security_groups = []
305
- if @dependencies.has_key?("firewall_rule")
306
- @dependencies['firewall_rule'].values.each { |sg|
307
- security_groups << sg.cloud_id
308
- }
309
- end
283
+ instance_descriptor[:iam_instance_profile] = getIAMProfile
310
284
 
285
+ security_groups = myFirewallRules.map { |fw| fw.cloud_id }
311
286
  if security_groups.size > 0
312
287
  instance_descriptor[:security_group_ids] = security_groups
313
288
  else
314
289
  raise MuError, "Didn't get any security groups assigned to be in #{@mu_name}, that shouldn't happen"
315
290
  end
316
291
 
317
- if !@config['private_ip'].nil?
292
+ if @config['private_ip']
318
293
  instance_descriptor[:private_ip_address] = @config['private_ip']
319
294
  end
320
295
 
321
296
  if !@vpc.nil? and @config.has_key?("vpc")
322
- subnet_conf = @config['vpc']
323
- subnet_conf = @config['vpc']['subnets'].first if @config['vpc'].has_key?("subnets") and !@config['vpc']['subnets'].empty?
324
- tag_key, tag_value = subnet_conf['tag'].split(/=/, 2) if !subnet_conf['tag'].nil?
325
-
326
- subnet = @vpc.getSubnet(
327
- cloud_id: subnet_conf['subnet_id'],
328
- name: subnet_conf['subnet_name'],
329
- tag_key: tag_key,
330
- tag_value: tag_value
331
- )
297
+ subnet = mySubnets.sample
332
298
  if subnet.nil?
333
- raise MuError, "Got null subnet id out of #{subnet_conf['vpc']}"
299
+ raise MuError, "Got null subnet id out of #{@config['vpc']}"
334
300
  end
335
- MU.log "Deploying #{node} into VPC #{@vpc.cloud_id} Subnet #{subnet.cloud_id}"
301
+ MU.log "Deploying #{@mu_name} into VPC #{@vpc.cloud_id} Subnet #{subnet.cloud_id}"
336
302
  punchAdminNAT
337
303
  instance_descriptor[:subnet_id] = subnet.cloud_id
338
304
  end
@@ -341,37 +307,10 @@ module MU
341
307
  instance_descriptor[:user_data] = Base64.encode64(@userdata)
342
308
  end
343
309
 
344
- MU::Cloud::AWS::Server.waitForAMI(@config["ami_id"], region: @config['region'], credentials: @config['credentials'])
310
+ MU::Cloud::AWS::Server.waitForAMI(@config["image_id"], region: @config['region'], credentials: @config['credentials'])
345
311
 
346
- # Figure out which devices are embedded in the AMI already.
347
- image = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_images(image_ids: [@config["ami_id"]]).images.first
348
- ext_disks = {}
349
- if !image.block_device_mappings.nil?
350
- image.block_device_mappings.each { |disk|
351
- if !disk.device_name.nil? and !disk.device_name.empty? and !disk.ebs.nil? and !disk.ebs.empty?
352
- ext_disks[disk.device_name] = MU.structToHash(disk.ebs)
353
- end
354
- }
355
- end
356
-
357
- configured_storage = Array.new
358
- if @config["storage"]
359
- @config["storage"].each { |vol|
360
- # Drop the "encrypted" flag if a snapshot for this device exists
361
- # in the AMI, even if they both agree about the value of said
362
- # flag. Apparently that's a thing now.
363
- if ext_disks.has_key?(vol["device"])
364
- if ext_disks[vol["device"]].has_key?(:snapshot_id)
365
- vol.delete("encrypted")
366
- end
367
- end
368
- mapping, _cfm_mapping = MU::Cloud::AWS::Server.convertBlockDeviceMapping(vol)
369
- configured_storage << mapping
370
- }
371
- end
312
+ instance_descriptor[:block_device_mappings] = MU::Cloud::AWS::Server.configureBlockDevices(image_id: @config["image_id"], storage: @config['storage'], region: @config['region'], credentials: @credentials)
372
313
 
373
- instance_descriptor[:block_device_mappings] = configured_storage
374
- instance_descriptor[:block_device_mappings].concat(@ephemeral_mappings)
375
314
  instance_descriptor[:monitoring] = {enabled: @config['monitoring']}
376
315
 
377
316
  if @tags and @tags.size > 0
@@ -383,37 +322,24 @@ module MU
383
322
  }]
384
323
  end
385
324
 
386
- MU.log "Creating EC2 instance #{node}"
387
- MU.log "Instance details for #{node}: #{instance_descriptor}", MU::DEBUG
388
- # if instance_descriptor[:block_device_mappings].empty?
389
- # instance_descriptor.delete(:block_device_mappings)
390
- # end
325
+ MU.log "Creating EC2 instance #{@mu_name}", details: instance_descriptor
391
326
 
392
- retries = 0
393
- instance = begin
394
- response = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).run_instances(instance_descriptor)
395
- if response and response.instances and response.instances.size > 0
396
- response.instances.first
397
- else
398
- MU.log "Got a confusing response from run_instances", MU::ERR, details: response
399
- end
327
+ instance = resp = nil
328
+ loop_if = Proc.new {
329
+ instance = resp.instances.first if resp and resp.instances
330
+ resp.nil? or resp.instances.nil? or instance.nil?
331
+ }
332
+
333
+ begin
334
+ MU.retrier([Aws::EC2::Errors::InvalidGroupNotFound, Aws::EC2::Errors::InvalidSubnetIDNotFound, Aws::EC2::Errors::InvalidParameterValue], loop_if: loop_if, loop_msg: "Waiting for run_instances to return #{@mu_name}") {
335
+ resp = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).run_instances(instance_descriptor)
336
+ }
400
337
  rescue Aws::EC2::Errors::InvalidRequest => e
401
338
  MU.log e.message, MU::ERR, details: instance_descriptor
402
339
  raise e
403
- rescue Aws::EC2::Errors::InvalidGroupNotFound, Aws::EC2::Errors::InvalidSubnetIDNotFound, Aws::EC2::Errors::InvalidParameterValue => e
404
- if retries < 10
405
- if retries > 7
406
- MU.log "Seeing #{e.inspect} while trying to launch #{node}, retrying a few more times...", MU::WARN, details: instance_descriptor
407
- end
408
- sleep 10
409
- retries = retries + 1
410
- retry
411
- else
412
- raise MuError, e.inspect
413
- end
414
340
  end
415
341
 
416
- MU.log "#{node} (#{instance.instance_id}) coming online"
342
+ MU.log "#{@mu_name} (#{instance.instance_id}) coming online"
417
343
 
418
344
  instance
419
345
  end
@@ -517,445 +443,82 @@ module MU
517
443
  # Apply tags, bootstrap our configuration management, and other
518
444
  # administravia for a new instance.
519
445
  def postBoot(instance_id = nil)
520
- if !instance_id.nil?
521
- @cloud_id = instance_id
522
- end
446
+ @cloud_id ||= instance_id
523
447
  node, _config, deploydata = describe(cloud_id: @cloud_id)
524
- instance = cloud_desc
525
- raise MuError, "Couldn't find instance #{@mu_name} (#{@cloud_id})" if !instance
526
- @cloud_id = instance.instance_id
527
- return false if !MU::MommaCat.lock(instance.instance_id+"-orchestrate", true)
528
- return false if !MU::MommaCat.lock(instance.instance_id+"-groom", true)
529
-
530
- MU::Cloud::AWS.createStandardTags(instance.instance_id, region: @config['region'], credentials: @config['credentials'])
531
- MU::Cloud::AWS.createTag(instance.instance_id, "Name", node, region: @config['region'], credentials: @config['credentials'])
532
-
533
- if @config['optional_tags']
534
- MU::MommaCat.listOptionalTags.each { |key, value|
535
- MU::Cloud::AWS.createTag(instance.instance_id, key, value, region: @config['region'], credentials: @config['credentials'])
536
- }
537
- end
448
+ @mu_name ||= node
538
449
 
539
- if !@config['tags'].nil?
540
- @config['tags'].each { |tag|
541
- MU::Cloud::AWS.createTag(instance.instance_id, tag['key'], tag['value'], region: @config['region'], credentials: @config['credentials'])
542
- }
543
- end
544
- MU.log "Tagged #{node} (#{instance.instance_id}) with MU-ID=#{MU.deploy_id}", MU::DEBUG
450
+ raise MuError, "Couldn't find instance #{@mu_name} (#{@cloud_id})" if !cloud_desc
451
+ return false if !MU::MommaCat.lock(@cloud_id+"-orchestrate", true)
452
+ return false if !MU::MommaCat.lock(@cloud_id+"-groom", true)
453
+ finish = Proc.new { |status|
454
+ MU::MommaCat.unlock(@cloud_id+"-orchestrate")
455
+ MU::MommaCat.unlock(@cloud_id+"-groom")
456
+ return status
457
+ }
458
+
459
+ MU::Cloud::AWS.createStandardTags(
460
+ @cloud_id,
461
+ region: @config['region'],
462
+ credentials: @config['credentials'],
463
+ optional: @config['optional_tags'],
464
+ nametag: @mu_name,
465
+ othertags: @config['tags']
466
+ )
545
467
 
546
468
  # Make double sure we don't lose a cached mu_windows_name value.
547
- if windows? or !@config['active_directory'].nil?
548
- if @mu_windows_name.nil?
549
- @mu_windows_name = deploydata['mu_windows_name']
550
- end
469
+ if (windows? or !@config['active_directory'].nil?)
470
+ @mu_windows_name ||= deploydata['mu_windows_name']
551
471
  end
552
472
 
553
- retries = -1
554
- max_retries = 30
555
- begin
556
- if instance.nil? or instance.state.name != "running"
557
- retries = retries + 1
558
- if !instance.nil? and instance.state.name == "terminated"
559
- raise MuError, "#{@cloud_id} appears to have been terminated mid-bootstrap!"
560
- end
561
- if retries % 3 == 0
562
- MU.log "Waiting for EC2 instance #{node} (#{@cloud_id}) to be ready...", MU::NOTICE
563
- end
564
- sleep 40
565
- # Get a fresh AWS descriptor
566
- instance = MU::Cloud::Server.find(cloud_id: @cloud_id, region: @config['region'], credentials: @config['credentials']).values.first
567
- if instance and instance.state.name == "terminated"
568
- raise MuError, "EC2 instance #{node} (#{@cloud_id}) terminating during bootstrap!"
569
- end
473
+ loop_if = Proc.new {
474
+ !cloud_desc(use_cache: false) or cloud_desc.state.name != "running"
475
+ }
476
+ MU.retrier([Aws::EC2::Errors::ServiceError], max: 30, wait: 40, loop_if: loop_if) { |retries, _wait|
477
+ if cloud_desc and cloud_desc.state.name == "terminated"
478
+ raise MuError, "#{@cloud_id} appears to have been terminated mid-bootstrap!"
570
479
  end
571
- rescue Aws::EC2::Errors::ServiceError => e
572
- if retries < max_retries
573
- MU.log "Got #{e.inspect} during initial instance creation of #{@cloud_id}, retrying...", MU::NOTICE, details: instance
574
- retries = retries + 1
575
- retry
576
- else
577
- raise MuError, "Too many retries creating #{node} (#{e.inspect})"
480
+ if retries % 3 == 0
481
+ MU.log "Waiting for EC2 instance #{@mu_name} (#{@cloud_id}) to be ready...", MU::NOTICE
578
482
  end
579
- end while instance.nil? or (instance.state.name != "running" and retries < max_retries)
483
+ }
580
484
 
581
485
  punchAdminNAT
582
486
 
583
-
584
- # If we came up via AutoScale, the Alarm module won't have had our
585
- # instance ID to associate us with itself. So invoke that here.
586
- # XXX might be possible to do this with regular alarm resources and
587
- # dependencies now
588
- if !@config['basis'].nil? and @config["alarms"] and !@config["alarms"].empty?
589
- @config["alarms"].each { |alarm|
590
- alarm_obj = MU::MommaCat.findStray(
591
- "AWS",
592
- "alarms",
593
- region: @config["region"],
594
- deploy_id: @deploy.deploy_id,
595
- name: alarm['name']
596
- ).first
597
- alarm["dimensions"] = [{:name => "InstanceId", :value => @cloud_id}]
598
-
599
- if alarm["enable_notifications"]
600
- topic_arn = MU::Cloud::AWS::Notification.createTopic(alarm["notification_group"], region: @config["region"], credentials: @config['credentials'])
601
- MU::Cloud::AWS::Notification.subscribe(arn: topic_arn, protocol: alarm["notification_type"], endpoint: alarm["notification_endpoint"], region: @config["region"], credentials: @config["credentials"])
602
- alarm["alarm_actions"] = [topic_arn]
603
- alarm["ok_actions"] = [topic_arn]
604
- end
605
-
606
- alarm_name = alarm_obj ? alarm_obj.cloud_id : "#{node}-#{alarm['name']}".upcase
607
-
608
- MU::Cloud::AWS::Alarm.setAlarm(
609
- name: alarm_name,
610
- ok_actions: alarm["ok_actions"],
611
- alarm_actions: alarm["alarm_actions"],
612
- insufficient_data_actions: alarm["no_data_actions"],
613
- metric_name: alarm["metric_name"],
614
- namespace: alarm["namespace"],
615
- statistic: alarm["statistic"],
616
- dimensions: alarm["dimensions"],
617
- period: alarm["period"],
618
- unit: alarm["unit"],
619
- evaluation_periods: alarm["evaluation_periods"],
620
- threshold: alarm["threshold"],
621
- comparison_operator: alarm["comparison_operator"],
622
- region: @config["region"],
623
- credentials: @config['credentials']
624
- )
625
- }
626
- end
627
-
628
- # We have issues sometimes where our dns_records are pointing at the wrong node name and IP address.
629
- # Make sure that doesn't happen. Happens with server pools only
630
- if @config['dns_records'] && !@config['dns_records'].empty?
631
- @config['dns_records'].each { |dnsrec|
632
- if dnsrec.has_key?("name")
633
- if dnsrec['name'].start_with?(MU.deploy_id.downcase) && !dnsrec['name'].start_with?(node.downcase)
634
- MU.log "DNS records for #{node} seem to be wrong, deleting from current config", MU::WARN, details: dnsrec
635
- dnsrec.delete('name')
636
- dnsrec.delete('target')
637
- end
638
- end
639
- }
640
- end
487
+ setAlarms
641
488
 
642
489
  # Unless we're planning on associating a different IP later, set up a
643
490
  # DNS entry for this thing and let it sync in the background. We'll come
644
491
  # back to it later.
645
- if @config['static_ip'].nil? && !@named
492
+ if @config['static_ip'].nil? and !@named
646
493
  MU::MommaCat.nameKitten(self)
647
494
  @named = true
648
495
  end
649
496
 
650
497
  if !@config['src_dst_check'] and !@config["vpc"].nil?
651
- MU.log "Disabling source_dest_check #{node} (making it NAT-worthy)"
498
+ MU.log "Disabling source_dest_check #{@mu_name} (making it NAT-worthy)"
652
499
  MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).modify_instance_attribute(
653
- instance_id: @cloud_id,
654
- source_dest_check: {:value => false}
500
+ instance_id: @cloud_id,
501
+ source_dest_check: { value: false }
655
502
  )
656
503
  end
657
504
 
658
505
  # Set console termination protection. Autoscale nodes won't set this
659
506
  # by default.
660
507
  MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).modify_instance_attribute(
661
- instance_id: @cloud_id,
662
- disable_api_termination: {:value => true}
508
+ instance_id: @cloud_id,
509
+ disable_api_termination: { value: true}
663
510
  )
664
511
 
665
- has_elastic_ip = false
666
- if !instance.public_ip_address.nil?
667
- begin
668
- resp = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_addresses(public_ips: [instance.public_ip_address])
669
- if resp.addresses.size > 0 and resp.addresses.first.instance_id == @cloud_id
670
- has_elastic_ip = true
671
- end
672
- rescue Aws::EC2::Errors::InvalidAddressNotFound
673
- # XXX this is ok to ignore, it means the public IP isn't Elastic
674
- end
675
- end
676
-
677
- win_admin_password = nil
678
- ec2config_password = nil
679
- sshd_password = nil
680
- if windows?
681
- if @config['use_cloud_provider_windows_password']
682
- win_admin_password = getWindowsAdminPassword
683
- elsif @config['windows_auth_vault'] && !@config['windows_auth_vault'].empty?
684
- if @config["windows_auth_vault"].has_key?("password_field")
685
- win_admin_password = @groomer.getSecret(
686
- vault: @config['windows_auth_vault']['vault'],
687
- item: @config['windows_auth_vault']['item'],
688
- field: @config["windows_auth_vault"]["password_field"]
689
- )
690
- else
691
- win_admin_password = getWindowsAdminPassword
692
- end
693
-
694
- if @config["windows_auth_vault"].has_key?("ec2config_password_field")
695
- ec2config_password = @groomer.getSecret(
696
- vault: @config['windows_auth_vault']['vault'],
697
- item: @config['windows_auth_vault']['item'],
698
- field: @config["windows_auth_vault"]["ec2config_password_field"]
699
- )
700
- end
701
-
702
- if @config["windows_auth_vault"].has_key?("sshd_password_field")
703
- sshd_password = @groomer.getSecret(
704
- vault: @config['windows_auth_vault']['vault'],
705
- item: @config['windows_auth_vault']['item'],
706
- field: @config["windows_auth_vault"]["sshd_password_field"]
707
- )
708
- end
709
- end
710
-
711
- win_admin_password = MU.generateWindowsPassword if win_admin_password.nil?
712
- ec2config_password = MU.generateWindowsPassword if ec2config_password.nil?
713
- sshd_password = MU.generateWindowsPassword if sshd_password.nil?
714
-
715
- # We're creating the vault here so when we run
716
- # MU::Cloud::Server.initialSSHTasks and we need to set the Windows
717
- # Admin password we can grab it from said vault.
718
- creds = {
719
- "username" => @config['windows_admin_username'],
720
- "password" => win_admin_password,
721
- "ec2config_username" => "ec2config",
722
- "ec2config_password" => ec2config_password,
723
- "sshd_username" => "sshd_service",
724
- "sshd_password" => sshd_password
725
- }
726
- @groomer.saveSecret(vault: @mu_name, item: "windows_credentials", data: creds, permissions: "name:#{@mu_name}")
727
- end
728
-
729
- subnet = nil
730
- if !@vpc.nil? and @config.has_key?("vpc") and !instance.subnet_id.nil?
731
- subnet = @vpc.getSubnet(
732
- cloud_id: instance.subnet_id
733
- )
734
- if subnet.nil?
735
- raise MuError, "Got null subnet id out of #{@config['vpc']} when asking for #{instance.subnet_id}"
736
- end
737
- end
738
-
739
- if !subnet.nil?
740
- if !subnet.private? or (!@config['static_ip'].nil? and !@config['static_ip']['assign_ip'].nil?)
741
- if !@config['static_ip'].nil?
742
- if !@config['static_ip']['ip'].nil?
743
- MU::Cloud::AWS::Server.associateElasticIp(instance.instance_id, classic: false, ip: @config['static_ip']['ip'])
744
- elsif !has_elastic_ip
745
- MU::Cloud::AWS::Server.associateElasticIp(instance.instance_id)
746
- end
747
- end
748
- end
749
-
750
- _nat_ssh_key, _nat_ssh_user, nat_ssh_host, _canonical_ip, _ssh_user, _ssh_key_name = getSSHConfig
751
- if subnet.private? and !nat_ssh_host and !MU::Cloud::AWS::VPC.haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials'])
752
- raise MuError, "#{node} is in a private subnet (#{subnet}), but has no bastion host configured, and I have no other route to it"
753
- end
754
-
755
- # If we've asked for additional subnets (and this @config is not a
756
- # member of a Server Pool, which has different semantics), create
757
- # extra interfaces to accomodate.
758
- if !@config['vpc']['subnets'].nil? and @config['basis'].nil?
759
- device_index = 1
760
- @vpc.subnets.each { |s|
761
- subnet_id = s.cloud_id
762
- MU.log "Adding network interface on subnet #{subnet_id} for #{node}"
763
- iface = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).create_network_interface(subnet_id: subnet_id).network_interface
764
- MU::Cloud::AWS.createStandardTags(iface.network_interface_id, region: @config['region'], credentials: @config['credentials'])
765
- MU::Cloud::AWS.createTag(iface.network_interface_id, "Name", node+"-ETH"+device_index.to_s, region: @config['region'], credentials: @config['credentials'])
766
-
767
- if @config['optional_tags']
768
- MU::MommaCat.listOptionalTags.each { |key, value|
769
- MU::Cloud::AWS.createTag(iface.network_interface_id, key, value, region: @config['region'], credentials: @config['credentials'])
770
- }
771
- end
772
-
773
- if !@config['tags'].nil?
774
- @config['tags'].each { |tag|
775
- MU::Cloud::AWS.createTag(iface.network_interface_id, tag['key'], tag['value'], region: @config['region'], credentials: @config['credentials'])
776
- }
777
- end
778
-
779
- MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).attach_network_interface(
780
- network_interface_id: iface.network_interface_id,
781
- instance_id: instance.instance_id,
782
- device_index: device_index
783
- )
784
- device_index = device_index + 1
785
- }
786
- end
787
- elsif !@config['static_ip'].nil?
788
- if !@config['static_ip']['ip'].nil?
789
- MU::Cloud::AWS::Server.associateElasticIp(instance.instance_id, classic: true, ip: @config['static_ip']['ip'])
790
- elsif !has_elastic_ip
791
- MU::Cloud::AWS::Server.associateElasticIp(instance.instance_id, classic: true)
792
- end
793
- end
794
-
512
+ tagVolumes
513
+ configureNetworking
514
+ saveCredentials
795
515
 
796
516
  if !@config['image_then_destroy']
797
517
  notify
798
518
  end
799
519
 
800
- MU.log "EC2 instance #{node} has id #{instance.instance_id}", MU::DEBUG
801
-
802
- @config["private_dns_name"] = instance.private_dns_name
803
- @config["public_dns_name"] = instance.public_dns_name
804
- @config["private_ip_address"] = instance.private_ip_address
805
- @config["public_ip_address"] = instance.public_ip_address
806
-
807
- # Root disk on standard CentOS AMI
808
- # tagVolumes(instance.instance_id, "/dev/sda", "Name", "ROOT-"+MU.deploy_id+"-"+@config["name"].upcase)
809
- # Root disk on standard Ubuntu AMI
810
- # tagVolumes(instance.instance_id, "/dev/sda1", "Name", "ROOT-"+MU.deploy_id+"-"+@config["name"].upcase)
811
-
812
- # Generic deploy ID tag
813
- # tagVolumes(instance.instance_id)
814
-
815
- # Tag volumes with all our standard tags.
816
- # Maybe replace tagVolumes with this? There is one more place tagVolumes is called from
817
- volumes = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_volumes(filters: [name: "attachment.instance-id", values: [instance.instance_id]])
818
- volumes.each { |vol|
819
- vol.volumes.each { |volume|
820
- volume.attachments.each { |attachment|
821
- MU::MommaCat.listStandardTags.each_pair { |key, value|
822
- MU::Cloud::AWS.createTag(attachment.volume_id, key, value, region: @config['region'], credentials: @config['credentials'])
823
-
824
- if attachment.device == "/dev/sda" or attachment.device == "/dev/sda1"
825
- MU::Cloud::AWS.createTag(attachment.volume_id, "Name", "ROOT-#{MU.deploy_id}-#{@config["name"].upcase}", region: @config['region'], credentials: @config['credentials'])
826
- else
827
- MU::Cloud::AWS.createTag(attachment.volume_id, "Name", "#{MU.deploy_id}-#{@config["name"].upcase}-#{attachment.device.upcase}", region: @config['region'], credentials: @config['credentials'])
828
- end
829
- }
830
-
831
- if @config['optional_tags']
832
- MU::MommaCat.listOptionalTags.each { |key, value|
833
- MU::Cloud::AWS.createTag(attachment.volume_id, key, value, region: @config['region'], credentials: @config['credentials'])
834
- }
835
- end
836
-
837
- if @config['tags']
838
- @config['tags'].each { |tag|
839
- MU::Cloud::AWS.createTag(attachment.volume_id, tag['key'], tag['value'], region: @config['region'], credentials: @config['credentials'])
840
- }
841
- end
842
- }
843
- }
844
- }
845
-
846
- canonical_name = instance.public_dns_name
847
- canonical_name = instance.private_dns_name if !canonical_name or nat_ssh_host != nil
848
- @config['canonical_name'] = canonical_name
849
-
850
- if !@config['add_private_ips'].nil?
851
- instance.network_interfaces.each { |int|
852
- if int.private_ip_address == instance.private_ip_address and int.private_ip_addresses.size < (@config['add_private_ips'] + 1)
853
- MU.log "Adding #{@config['add_private_ips']} extra private IP addresses to #{instance.instance_id}"
854
- MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).assign_private_ip_addresses(
855
- network_interface_id: int.network_interface_id,
856
- secondary_private_ip_address_count: @config['add_private_ips'],
857
- allow_reassignment: false
858
- )
859
- end
860
- }
861
- notify
862
- end
863
-
864
- begin
865
- if @config['groom'].nil? or @config['groom']
866
- if windows?
867
- # kick off certificate generation early; WinRM will need it
868
- @deploy.nodeSSLCerts(self)
869
- if @config.has_key?("basis")
870
- @deploy.nodeSSLCerts(self, true)
871
- end
872
- if !@groomer.haveBootstrapped?
873
- session = getWinRMSession(50, 60, reboot_on_problems: true)
874
- initialWinRMTasks(session)
875
- begin
876
- session.close
877
- rescue StandardError
878
- # this is allowed to fail- we're probably rebooting anyway
879
- end
880
- else # for an existing Windows node: WinRM, then SSH if it fails
881
- begin
882
- session = getWinRMSession(1, 60)
883
- rescue StandardError # yeah, yeah
884
- session = getSSHSession(1, 60)
885
- # XXX maybe loop at least once if this also fails?
886
- end
887
- end
888
- else
889
- session = getSSHSession(40, 30)
890
- initialSSHTasks(session)
891
- end
892
- end
893
- rescue BootstrapTempFail
894
- sleep 45
895
- retry
896
- ensure
897
- session.close if !session.nil? and !windows?
898
- end
899
-
900
- if @config["existing_deploys"] && !@config["existing_deploys"].empty?
901
- @config["existing_deploys"].each { |ext_deploy|
902
- if ext_deploy["cloud_id"]
903
- found = MU::MommaCat.findStray(
904
- @config['cloud'],
905
- ext_deploy["cloud_type"],
906
- cloud_id: ext_deploy["cloud_id"],
907
- region: @config['region'],
908
- dummy_ok: false
909
- ).first
910
-
911
- MU.log "Couldn't find existing resource #{ext_deploy["cloud_id"]}, #{ext_deploy["cloud_type"]}", MU::ERR if found.nil?
912
- @deploy.notify(ext_deploy["cloud_type"], found.config["name"], found.deploydata, mu_name: found.mu_name, triggering_node: @mu_name)
913
- elsif ext_deploy["mu_name"] && ext_deploy["deploy_id"]
914
- MU.log "#{ext_deploy["mu_name"]} / #{ext_deploy["deploy_id"]}"
915
- found = MU::MommaCat.findStray(
916
- @config['cloud'],
917
- ext_deploy["cloud_type"],
918
- deploy_id: ext_deploy["deploy_id"],
919
- mu_name: ext_deploy["mu_name"],
920
- region: @config['region'],
921
- dummy_ok: false
922
- ).first
923
-
924
- MU.log "Couldn't find existing resource #{ext_deploy["mu_name"]}/#{ext_deploy["deploy_id"]}, #{ext_deploy["cloud_type"]}", MU::ERR if found.nil?
925
- @deploy.notify(ext_deploy["cloud_type"], found.config["name"], found.deploydata, mu_name: ext_deploy["mu_name"], triggering_node: @mu_name)
926
- else
927
- MU.log "Trying to find existing deploy, but either the cloud_id is not valid or no mu_name and deploy_id where provided", MU::ERR
928
- end
929
- }
930
- end
931
-
932
- # See if this node already exists in our config management. If it does,
933
- # we're done.
934
- if MU.inGem?
935
- MU.log "Deploying from a gem, not grooming"
936
- MU::MommaCat.unlock(instance.instance_id+"-orchestrate")
937
- MU::MommaCat.unlock(instance.instance_id+"-groom")
938
-
939
- return true
940
- elsif @groomer.haveBootstrapped?
941
- MU.log "Node #{node} has already been bootstrapped, skipping groomer setup.", MU::NOTICE
942
-
943
- if @config['groom'].nil? or @config['groom']
944
- @groomer.saveDeployData
945
- end
946
-
947
- MU::MommaCat.unlock(instance.instance_id+"-orchestrate")
948
- MU::MommaCat.unlock(instance.instance_id+"-groom")
949
- return true
950
- end
951
-
952
- begin
953
- @groomer.bootstrap if @config['groom'].nil? or @config['groom']
954
- rescue MU::Groomer::RunError
955
- MU::MommaCat.unlock(instance.instance_id+"-groom")
956
- MU::MommaCat.unlock(instance.instance_id+"-orchestrate")
957
- return false
958
- end
520
+ getIAMProfile
521
+ finish.call(false) if !bootstrapGroomer
959
522
 
960
523
  # Make sure we got our name written everywhere applicable
961
524
  if !@named
@@ -963,140 +526,75 @@ module MU
963
526
  @named = true
964
527
  end
965
528
 
966
- MU::MommaCat.unlock(instance.instance_id+"-groom")
967
- MU::MommaCat.unlock(instance.instance_id+"-orchestrate")
968
- return true
969
- end
970
-
971
- # postBoot
529
+ finish.call(true)
530
+ end #postboot
972
531
 
973
532
  # Locate an existing instance or instances and return an array containing matching AWS resource descriptors for those that match.
974
533
  # @return [Hash<String,OpenStruct>]: The cloud provider's complete descriptions of matching instances
975
534
  def self.find(**args)
976
535
  ip ||= args[:flags]['ip'] if args[:flags] and args[:flags]['ip']
977
536
 
978
- if !args[:region].nil?
979
- regions = [args[:region]]
980
- else
981
- regions = MU::Cloud::AWS.listRegions
982
- end
537
+ regions = args[:region].nil? ? MU::Cloud::AWS.listRegions : [args[:region]]
983
538
 
984
539
  found = {}
985
540
  search_semaphore = Mutex.new
986
541
  search_threads = []
987
542
 
988
- if !ip and !args[:cloud_id] and !args[:tag_value]
989
- regions.each { |r|
990
- search_threads << Thread.new {
991
- MU::Cloud::AWS.ec2(region: r, credentials: args[:credentials]).describe_instances(
992
- filters: [
993
- {
994
- name: "instance-state-name",
995
- values: ["running", "pending", "stopped"]
996
- }
997
- ]
998
- ).reservations.each { |resp|
999
- if !resp.nil? and !resp.instances.nil?
1000
- resp.instances.each { |i|
1001
- search_semaphore.synchronize {
1002
- found[i.instance_id] = i
1003
- }
1004
- }
1005
- end
1006
- }
1007
- }
1008
- }
543
+ base_filter = { name: "instance-state-name", values: ["running", "pending", "stopped"] }
544
+ searches = []
1009
545
 
1010
- search_threads.each { |t|
1011
- t.join
546
+ if args[:cloud_id]
547
+ searches << {
548
+ :instance_ids => [args[:cloud_id]],
549
+ :filters => [base_filter]
1012
550
  }
1013
-
1014
- return found
1015
551
  end
1016
552
 
1017
- # If we got an instance id, go get it
1018
- if args[:cloud_id]
1019
- regions.each { |r|
1020
- search_threads << Thread.new {
1021
- MU.log "Hunting for instance with cloud id '#{args[:cloud_id]}' in #{r}", MU::DEBUG
1022
- retries = 0
1023
- begin
1024
- MU::Cloud::AWS.ec2(region: r, credentials: args[:credentials]).describe_instances(
1025
- instance_ids: [args[:cloud_id]],
1026
- filters: [
1027
- {
1028
- name: "instance-state-name",
1029
- values: ["running", "pending", "stopped"]
1030
- }
1031
- ]
1032
- ).reservations.each { |resp|
1033
- if !resp.nil? and !resp.instances.nil?
1034
- resp.instances.each { |i|
1035
- search_semaphore.synchronize {
1036
- found[i.instance_id] = i
1037
- }
1038
- }
1039
- end
1040
- }
1041
- rescue Aws::EC2::Errors::InvalidInstanceIDNotFound => e
1042
- retries += 1
1043
- if retries <= 5
1044
- sleep 5
1045
- else
1046
- raise MuError, "#{e.inspect} in region #{r}"
1047
- end
1048
- end
553
+ if ip
554
+ ["ip-address", "private-ip-address"].each { |ip_type|
555
+ searches << {
556
+ filters: [base_filter, {name: ip_type, values: [ip]} ],
1049
557
  }
1050
558
  }
1051
- done_threads = []
1052
- begin
1053
- search_threads.each { |t|
1054
- joined = t.join(2)
1055
- done_threads << joined if !joined.nil?
1056
- }
1057
- end while found.size < 1 and done_threads.size != search_threads.size
1058
559
  end
1059
560
 
1060
- return found if found.size > 0
1061
-
1062
- # Ok, well, let's try looking it up by IP then
1063
- if !ip.nil?
1064
- MU.log "Hunting for instance by IP '#{ip}'", MU::DEBUG
1065
- ["ip-address", "private-ip-address"].each { |filter|
1066
- regions.each { |r|
1067
- response = MU::Cloud::AWS.ec2(region: r, credentials: args[:credentials]).describe_instances(
1068
- filters: [
1069
- {name: filter, values: [ip]},
1070
- {name: "instance-state-name", values: ["running", "pending", "stopped"]}
1071
- ]
1072
- ).reservations.first
1073
- response.instances.each { |i|
1074
- found[i.instance_id] = i
1075
- }
1076
- }
561
+ if args[:tag_value] and args[:tag_key]
562
+ searches << {
563
+ filters: [
564
+ base_filter,
565
+ {name: ip_type, values: [ip]},
566
+ {name: "tag:#{args[:tag_key]}", values: [args[:tag_value]]},
567
+ ]
1077
568
  }
1078
569
  end
1079
570
 
1080
- return found if found.size > 0
571
+ if searches.empty?
572
+ searches << { filters: [base_filter] }
573
+ end
1081
574
 
1082
- # Fine, let's try it by tag.
1083
- if args[:tag_value]
1084
- MU.log "Searching for instance by tag '#{args[:tag_key]}=#{args[:tag_value]}'", MU::DEBUG
1085
- regions.each { |r|
1086
- MU::Cloud::AWS.ec2(region: r, credentials: args[:credentials]).describe_instances(
1087
- filters: [
1088
- {name: "tag:#{args[:tag_key]}", values: [args[:tag_value]]},
1089
- {name: "instance-state-name", values: ["running", "pending", "stopped"]}
1090
- ]
1091
- ).reservations.each { |resp|
1092
- if !resp.nil? and resp.instances.size > 0
1093
- resp.instances.each { |i|
1094
- found[i.instance_id] = i
575
+ regions.each { |r|
576
+ searches.each { |search|
577
+ search_threads << Thread.new(search) { |params|
578
+ MU.retrier([Aws::EC2::Errors::InvalidInstanceIDNotFound], wait: 5, max: 5, ignoreme: [Aws::EC2::Errors::InvalidInstanceIDNotFound]) {
579
+ MU::Cloud::AWS.ec2(region: r, credentials: args[:credentials]).describe_instances(params).reservations.each { |resp|
580
+ next if resp.nil? or resp.instances.nil?
581
+ resp.instances.each { |i|
582
+ search_semaphore.synchronize {
583
+ found[i.instance_id] = i
584
+ }
585
+ }
1095
586
  }
1096
- end
587
+ }
1097
588
  }
1098
589
  }
1099
- end
590
+ }
591
+ done_threads = []
592
+ begin
593
+ search_threads.each { |t|
594
+ joined = t.join(2)
595
+ done_threads << joined if !joined.nil?
596
+ }
597
+ end while found.size < 1 and done_threads.size != search_threads.size
1100
598
 
1101
599
  return found
1102
600
  end
@@ -1211,8 +709,8 @@ module MU
1211
709
 
1212
710
  int.private_ip_addresses.each { |priv_ip|
1213
711
  if !priv_ip.primary
1214
- bok['add_private_ips'] ||= []
1215
- bok['add_private_ips'] << priv_ip.private_ip_address
712
+ bok['add_private_ips'] ||= 0
713
+ bok['add_private_ips'] += 1
1216
714
  end
1217
715
  if priv_ip.association and priv_ip.association.public_ip
1218
716
  bok['associate_public_ip'] = true
@@ -1345,11 +843,6 @@ module MU
1345
843
  # Called automatically by {MU::Deploy#createResources}
1346
844
  def groom
1347
845
  MU::MommaCat.lock(@cloud_id+"-groom")
1348
- node, _config, deploydata = describe(cloud_id: @cloud_id)
1349
-
1350
- if node.nil? or node.empty?
1351
- raise MuError, "MU::Cloud::AWS::Server.groom was called without a mu_name"
1352
- end
1353
846
 
1354
847
  # Make double sure we don't lose a cached mu_windows_name value.
1355
848
  if windows? or !@config['active_directory'].nil?
@@ -1360,7 +853,7 @@ module MU
1360
853
 
1361
854
  punchAdminNAT
1362
855
 
1363
- MU::Cloud::AWS::Server.tagVolumes(@cloud_id, credentials: @config['credentials'])
856
+ tagVolumes
1364
857
 
1365
858
  # If we have a loadbalancer configured, attach us to it
1366
859
  if !@config['loadbalancers'].nil?
@@ -1389,55 +882,18 @@ module MU
1389
882
  end
1390
883
 
1391
884
  begin
885
+ getIAMProfile
1392
886
  if @config['groom'].nil? or @config['groom']
1393
- @groomer.run(purpose: "Full Initial Run", max_retries: 15, reboot_first_fail: windows?, timeout: @config['groomer_timeout'])
887
+ @groomer.run(purpose: "Full Initial Run", max_retries: 15, reboot_first_fail: (windows? and @config['groomer'] != "Ansible"), timeout: @config['groomer_timeout'])
1394
888
  end
1395
889
  rescue MU::Groomer::RunError => e
1396
- MU.log "Proceeding after failed initial Groomer run, but #{node} may not behave as expected!", MU::WARN, details: e.message
890
+ MU.log "Proceeding after failed initial Groomer run, but #{@mu_name} may not behave as expected!", MU::WARN, details: e.message
1397
891
  rescue StandardError => e
1398
- MU.log "Caught #{e.inspect} on #{node} in an unexpected place (after @groomer.run on Full Initial Run)", MU::ERR
892
+ MU.log "Caught #{e.inspect} on #{@mu_name} in an unexpected place (after @groomer.run on Full Initial Run)", MU::ERR
1399
893
  end
1400
894
 
1401
895
  if !@config['create_image'].nil? and !@config['image_created']
1402
- img_cfg = @config['create_image']
1403
- # Scrub things that don't belong on an AMI
1404
- session = getSSHSession
1405
- sudo = purgecmd = ""
1406
- sudo = "sudo" if @config['ssh_user'] != "root"
1407
- if windows?
1408
- purgecmd = "rm -rf /cygdrive/c/mu_installed_chef"
1409
- else
1410
- purgecmd = "rm -rf /opt/mu_installed_chef"
1411
- end
1412
- if img_cfg['image_then_destroy']
1413
- if windows?
1414
- purgecmd = "rm -rf /cygdrive/c/chef/ /home/#{@config['windows_admin_username']}/.ssh/authorized_keys /home/Administrator/.ssh/authorized_keys /cygdrive/c/mu-installer-ran-updates /cygdrive/c/mu_installed_chef"
1415
- # session.exec!("powershell -Command \"& {(Get-WmiObject -Class Win32_Product -Filter \"Name='UniversalForwarder'\").Uninstall()}\"")
1416
- else
1417
- purgecmd = "#{sudo} rm -rf /var/lib/cloud/instances/i-* /root/.ssh/authorized_keys /etc/ssh/ssh_host_*key* /etc/chef /etc/opscode/* /.mu-installer-ran-updates /var/chef /opt/mu_installed_chef /opt/chef ; #{sudo} sed -i 's/^HOSTNAME=.*//' /etc/sysconfig/network"
1418
- end
1419
- end
1420
- session.exec!(purgecmd)
1421
- session.close
1422
- ami_ids = MU::Cloud::AWS::Server.createImage(
1423
- name: @mu_name,
1424
- instance_id: @cloud_id,
1425
- storage: @config['storage'],
1426
- exclude_storage: img_cfg['image_exclude_storage'],
1427
- copy_to_regions: img_cfg['copy_to_regions'],
1428
- make_public: img_cfg['public'],
1429
- region: @config['region'],
1430
- tags: @config['tags'],
1431
- credentials: @config['credentials']
1432
- )
1433
- @deploy.notify("images", @config['name'], ami_ids)
1434
- @config['image_created'] = true
1435
- if img_cfg['image_then_destroy']
1436
- MU::Cloud::AWS::Server.waitForAMI(ami_ids[@config['region']], region: @config['region'], credentials: @config['credentials'])
1437
- MU.log "AMI #{ami_ids[@config['region']]} ready, removing source node #{node}"
1438
- MU::Cloud::AWS::Server.terminateInstance(id: @cloud_id, region: @config['region'], deploy_id: @deploy.deploy_id, mu_name: @mu_name, credentials: @config['credentials'])
1439
- destroy
1440
- end
896
+ createImage
1441
897
  end
1442
898
 
1443
899
  MU::MommaCat.unlock(@cloud_id+"-groom")
@@ -1485,23 +941,19 @@ module MU
1485
941
  # bastion hosts that may be in the path, see getSSHConfig if that's what
1486
942
  # you need.
1487
943
  def canonicalIP
1488
- _mu_name, _config, deploydata = describe(cloud_id: @cloud_id)
1489
-
1490
- instance = cloud_desc
1491
-
1492
- if !instance
944
+ if !cloud_desc
1493
945
  raise MuError, "Couldn't retrieve cloud descriptor for server #{self}"
1494
946
  end
1495
947
 
1496
948
  if deploydata.nil? or
1497
949
  (!deploydata.has_key?("private_ip_address") and
1498
950
  !deploydata.has_key?("public_ip_address"))
1499
- return nil if instance.nil?
951
+ return nil if cloud_desc.nil?
1500
952
  @deploydata = {} if @deploydata.nil?
1501
- @deploydata["public_ip_address"] = instance.public_ip_address
1502
- @deploydata["public_dns_name"] = instance.public_dns_name
1503
- @deploydata["private_ip_address"] = instance.private_ip_address
1504
- @deploydata["private_dns_name"] = instance.private_dns_name
953
+ @deploydata["public_ip_address"] = cloud_desc.public_ip_address
954
+ @deploydata["public_dns_name"] = cloud_desc.public_dns_name
955
+ @deploydata["private_ip_address"] = cloud_desc.private_ip_address
956
+ @deploydata["private_dns_name"] = cloud_desc.private_dns_name
1505
957
 
1506
958
  notify
1507
959
  end
@@ -1510,13 +962,13 @@ module MU
1510
962
  # which will cause us to create certificates, DNS records and other artifacts with incorrect information which will cause our deploy to fail.
1511
963
  # The cloud_id is always correct so lets use 'cloud_desc' to get the correct IPs
1512
964
  if MU::Cloud::AWS::VPC.haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials']) or @deploydata["public_ip_address"].nil?
1513
- @config['canonical_ip'] = instance.private_ip_address
1514
- @deploydata["private_ip_address"] = instance.private_ip_address
1515
- return instance.private_ip_address
965
+ @config['canonical_ip'] = cloud_desc.private_ip_address
966
+ @deploydata["private_ip_address"] = cloud_desc.private_ip_address
967
+ return cloud_desc.private_ip_address
1516
968
  else
1517
- @config['canonical_ip'] = instance.public_ip_address
1518
- @deploydata["public_ip_address"] = instance.public_ip_address
1519
- return instance.public_ip_address
969
+ @config['canonical_ip'] = cloud_desc.public_ip_address
970
+ @deploydata["public_ip_address"] = cloud_desc.public_ip_address
971
+ return cloud_desc.public_ip_address
1520
972
  end
1521
973
  end
1522
974
 
@@ -1709,7 +1161,26 @@ module MU
1709
1161
  # Retrieves the Cloud provider's randomly generated Windows password
1710
1162
  # Will only work on stock Amazon Windows AMIs or custom AMIs that where created with Administrator Password set to random in EC2Config
1711
1163
  # return [String]: A password string.
1712
- def getWindowsAdminPassword
1164
+ def getWindowsAdminPassword(use_cache: true)
1165
+ @config['windows_auth_vault'] ||= {
1166
+ "vault" => @mu_name,
1167
+ "item" => "windows_credentials",
1168
+ "password_field" => "password"
1169
+ }
1170
+
1171
+ if use_cache
1172
+ begin
1173
+ win_admin_password = @groomer.getSecret(
1174
+ vault: @config['windows_auth_vault']['vault'],
1175
+ item: @config['windows_auth_vault']['item'],
1176
+ field: @config["windows_auth_vault"]["password_field"]
1177
+ )
1178
+
1179
+ return win_admin_password if win_admin_password
1180
+ rescue MU::Groomer::MuNoSuchSecret, MU::Groomer::RunError
1181
+ end
1182
+ end
1183
+
1713
1184
  if @cloud_id.nil?
1714
1185
  describe
1715
1186
  @cloud_id = cloud_desc.instance_id
@@ -1748,6 +1219,8 @@ module MU
1748
1219
  pem_bytes = File.open("#{ssh_keydir}/#{ssh_key_name}", 'rb') { |f| f.read }
1749
1220
  private_key = OpenSSL::PKey::RSA.new(pem_bytes)
1750
1221
  decrypted_password = private_key.private_decrypt(decoded)
1222
+ saveCredentials(decrypted_password)
1223
+
1751
1224
  return decrypted_password
1752
1225
  end
1753
1226
 
@@ -1821,60 +1294,37 @@ module MU
1821
1294
  # @param type [String]: Cloud storage type of the volume, if applicable
1822
1295
  # @param delete_on_termination [Boolean]: Value of delete_on_termination flag to set
1823
1296
  def addVolume(dev, size, type: "gp2", delete_on_termination: false)
1824
- if @cloud_id.nil? or @cloud_id.empty?
1825
- MU.log "#{self} didn't have a cloud id, couldn't determine 'active?' status", MU::ERR
1826
- return true
1297
+
1298
+ if setDeleteOntermination(dev, delete_on_termination)
1299
+ MU.log "A volume #{device} already attached to #{self}, skipping", MU::NOTICE
1300
+ return
1827
1301
  end
1828
- az = nil
1829
- MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_instances(
1830
- instance_ids: [@cloud_id]
1831
- ).reservations.each { |resp|
1832
- if !resp.nil? and !resp.instances.nil?
1833
- resp.instances.each { |instance|
1834
- az = instance.placement.availability_zone
1835
- mappings = MU.structToHash(instance.block_device_mappings)
1836
- mappings.each { |vol|
1837
- if vol[:ebs]
1838
- vol[:ebs].delete(:attach_time)
1839
- vol[:ebs].delete(:status)
1840
- end
1841
- }
1842
- mappings.each { |vol|
1843
- if vol[:device_name] == dev
1844
- MU.log "A volume #{dev} already attached to #{self}, skipping", MU::NOTICE
1845
- if vol[:ebs][:delete_on_termination] != delete_on_termination
1846
- vol[:ebs][:delete_on_termination] = delete_on_termination
1847
- MU.log "Setting delete_on_termination flag to #{delete_on_termination.to_s} on #{@mu_name}'s #{dev}"
1848
- MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).modify_instance_attribute(
1849
- instance_id: @cloud_id,
1850
- block_device_mappings: mappings
1851
- )
1852
- end
1853
- return
1854
- end
1855
- }
1856
- }
1857
- end
1858
- }
1302
+
1859
1303
  MU.log "Creating #{size}GB #{type} volume on #{dev} for #{@cloud_id}"
1860
1304
  creation = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).create_volume(
1861
- availability_zone: az,
1305
+ availability_zone: cloud_desc.placement.availability_zone,
1862
1306
  size: size,
1863
1307
  volume_type: type
1864
1308
  )
1865
- begin
1866
- sleep 3
1309
+
1310
+ MU.retrier(wait: 3, loop_if: Proc.new {
1867
1311
  creation = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_volumes(volume_ids: [creation.volume_id]).volumes.first
1868
1312
  if !["creating", "available"].include?(creation.state)
1869
1313
  raise MuError, "Saw state '#{creation.state}' while creating #{size}GB #{type} volume on #{dev} for #{@cloud_id}"
1870
1314
  end
1871
- end while creation.state != "available"
1315
+ creation.state != "available"
1316
+ })
1317
+
1872
1318
 
1873
1319
  if @deploy
1874
- MU::MommaCat.listStandardTags.each_pair { |key, value|
1875
- MU::Cloud::AWS.createTag(creation.volume_id, key, value, region: @config['region'], credentials: @config['credentials'])
1876
- }
1877
- MU::Cloud::AWS.createTag(creation.volume_id, "Name", "#{MU.deploy_id}-#{@config["name"].upcase}-#{dev.upcase}", region: @config['region'], credentials: @config['credentials'])
1320
+ MU::Cloud::AWS.createStandardTags(
1321
+ resource_id,
1322
+ region: @config['region'],
1323
+ credentials: @config['credentials'],
1324
+ optional: @config['optional_tags'],
1325
+ nametag: @mu_name+"-"+dev.upcase,
1326
+ othertags: @config['tags']
1327
+ )
1878
1328
  end
1879
1329
 
1880
1330
  attachment = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).attach_volume(
@@ -1893,29 +1343,7 @@ module MU
1893
1343
 
1894
1344
  # Set delete_on_termination, which for some reason is an instance
1895
1345
  # attribute and not on the attachment
1896
- mappings = MU.structToHash(cloud_desc.block_device_mappings)
1897
- changed = false
1898
-
1899
- mappings.each { |mapping|
1900
- if mapping[:ebs]
1901
- mapping[:ebs].delete(:attach_time)
1902
- mapping[:ebs].delete(:status)
1903
- end
1904
- if mapping[:device_name] == dev and
1905
- mapping[:ebs][:delete_on_termination] != delete_on_termination
1906
- changed = true
1907
- mapping[:ebs][:delete_on_termination] = delete_on_termination
1908
- end
1909
- }
1910
-
1911
- if changed
1912
- MU.log "Setting delete_on_termination flag to #{delete_on_termination.to_s} on #{@mu_name}'s #{dev}"
1913
- MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).modify_instance_attribute(
1914
- instance_id: @cloud_id,
1915
- block_device_mappings: mappings
1916
- )
1917
- end
1918
-
1346
+ setDeleteOntermination(dev, delete_on_termination)
1919
1347
  end
1920
1348
 
1921
1349
  # Determine whether the node in question exists at the Cloud provider
@@ -1953,13 +1381,13 @@ module MU
1953
1381
  # @param ip [String]: Request a specific IP address.
1954
1382
  # @param region [String]: The cloud provider region
1955
1383
  # @return [void]
1956
- def self.associateElasticIp(instance_id, classic: false, ip: nil, region: MU.curRegion)
1384
+ def self.associateElasticIp(instance_id, classic: false, ip: nil, region: MU.curRegion, credentials: nil)
1957
1385
  MU.log "associateElasticIp called: #{instance_id}, classic: #{classic}, ip: #{ip}, region: #{region}", MU::DEBUG
1958
1386
  elastic_ip = nil
1959
1387
  @eip_semaphore.synchronize {
1960
1388
  if !ip.nil?
1961
1389
  filters = [{name: "public-ip", values: [ip]}]
1962
- resp = MU::Cloud::AWS.ec2(region: region).describe_addresses(filters: filters)
1390
+ resp = MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_addresses(filters: filters)
1963
1391
  if @eips_used.include?(ip)
1964
1392
  is_free = false
1965
1393
  resp.addresses.each { |address|
@@ -1988,54 +1416,44 @@ module MU
1988
1416
  @eips_used << elastic_ip.public_ip
1989
1417
  MU.log "Associating Elastic IP #{elastic_ip.public_ip} with #{instance_id}", details: elastic_ip
1990
1418
  }
1991
- attempts = 0
1992
- begin
1419
+
1420
+ on_retry = Proc.new { |e|
1421
+ if e.class == Aws::EC2::Errors::ResourceAlreadyAssociated
1422
+ # A previous association attempt may have succeeded, albeit slowly.
1423
+ resp = MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_addresses(
1424
+ allocation_ids: [elastic_ip.allocation_id]
1425
+ )
1426
+ first_addr = resp.addresses.first
1427
+ if first_addr and first_addr.instance_id != instance_id
1428
+ raise MuError, "Tried to associate #{elastic_ip.public_ip} with #{instance_id}, but it's already associated with #{first_addr.instance_id}!"
1429
+ end
1430
+ end
1431
+ }
1432
+
1433
+ MU.retrier([Aws::EC2::Errors::IncorrectInstanceState, Aws::EC2::Errors::ResourceAlreadyAssociated], wait: 5, max: 6, on_retry: on_retry) {
1993
1434
  if classic
1994
- resp = MU::Cloud::AWS.ec2(region: region).associate_address(
1995
- instance_id: instance_id,
1996
- public_ip: elastic_ip.public_ip
1435
+ MU::Cloud::AWS.ec2(region: region, credentials: credentials).associate_address(
1436
+ instance_id: instance_id,
1437
+ public_ip: elastic_ip.public_ip
1997
1438
  )
1998
1439
  else
1999
- resp = MU::Cloud::AWS.ec2(region: region).associate_address(
2000
- instance_id: instance_id,
2001
- allocation_id: elastic_ip.allocation_id,
2002
- allow_reassociation: false
1440
+ MU::Cloud::AWS.ec2(region: region, credentials: credentials).associate_address(
1441
+ instance_id: instance_id,
1442
+ allocation_id: elastic_ip.allocation_id,
1443
+ allow_reassociation: false
2003
1444
  )
2004
1445
  end
2005
- rescue Aws::EC2::Errors::IncorrectInstanceState => e
2006
- attempts = attempts + 1
2007
- if attempts < 6
2008
- MU.log "Got #{e.message} associating #{elastic_ip.allocation_id} with #{instance_id}, retrying", MU::WARN
2009
- sleep 5
2010
- retry
2011
- end
2012
- raise MuError "#{e.message} associating #{elastic_ip.allocation_id} with #{instance_id}"
2013
- rescue Aws::EC2::Errors::ResourceAlreadyAssociated => e
2014
- # A previous association attempt may have succeeded, albeit slowly.
2015
- resp = MU::Cloud::AWS.ec2(region: region).describe_addresses(
2016
- allocation_ids: [elastic_ip.allocation_id]
2017
- )
2018
- first_addr = resp.addresses.first
2019
- if !first_addr.nil? and first_addr.instance_id == instance_id
2020
- MU.log "#{elastic_ip.public_ip} already associated with #{instance_id}", MU::WARN
2021
- else
2022
- MU.log "#{elastic_ip.public_ip} shows as already associated!", MU::ERR, details: resp
2023
- raise MuError, "#{elastic_ip.public_ip} shows as already associated with #{first_addr.instance_id}!"
2024
- end
2025
- end
1446
+ }
2026
1447
 
2027
- instance = MU::Cloud::AWS.ec2(region: region).describe_instances(instance_ids: [instance_id]).reservations.first.instances.first
2028
- waited = false
2029
- if instance.public_ip_address != elastic_ip.public_ip
2030
- waited = true
2031
- begin
2032
- sleep 10
2033
- MU.log "Waiting for Elastic IP association of #{elastic_ip.public_ip} to #{instance_id} to take effect", MU::NOTICE
2034
- instance = MU::Cloud::AWS.ec2(region: region).describe_instances(instance_ids: [instance_id]).reservations.first.instances.first
2035
- end while instance.public_ip_address != elastic_ip.public_ip
2036
- end
1448
+ loop_if = Proc.new {
1449
+ instance = find(cloud_id: instance_id, region: region, credentials: credentials).values.first
1450
+ instance.public_ip_address != elastic_ip.public_ip
1451
+ }
1452
+ MU.retrier(loop_if: loop_if, wait: 10, max: 3) {
1453
+ MU.log "Waiting for Elastic IP association of #{elastic_ip.public_ip} to #{instance_id} to take effect", MU::NOTICE
1454
+ }
2037
1455
 
2038
- MU.log "Elastic IP #{elastic_ip.public_ip} now associated with #{instance_id}" if waited
1456
+ MU.log "Elastic IP #{elastic_ip.public_ip} now associated with #{instance_id}"
2039
1457
 
2040
1458
  return elastic_ip.public_ip
2041
1459
  end
@@ -2117,193 +1535,105 @@ module MU
2117
1535
  }
2118
1536
  end
2119
1537
 
1538
+ # Return an instance's AWS-assigned IP addresses and hostnames.
1539
+ # @param instance [OpenStruct]
1540
+ # @param id [String]
1541
+ # @param region [String]
1542
+ # @param credentials [@String]
1543
+ # @return [Array<Array>]
1544
+ def self.getAddresses(instance = nil, id: nil, region: MU.curRegion, credentials: nil)
1545
+ return nil if !instance and !id
1546
+
1547
+ instance ||= find(cloud_id: id, region: region, credentials: credentials).values.first
1548
+ return if !instance
1549
+
1550
+ ips = []
1551
+ names = []
1552
+ instance.network_interfaces.each { |iface|
1553
+ iface.private_ip_addresses.each { |ip|
1554
+ ips << ip.private_ip_address
1555
+ names << ip.private_dns_name
1556
+ if ip.association
1557
+ ips << ip.association.public_ip
1558
+ names << ip.association.public_dns_name
1559
+ end
1560
+ }
1561
+ }
1562
+
1563
+ [ips, names]
1564
+ end
1565
+
2120
1566
  # Terminate an instance.
2121
1567
  # @param instance [OpenStruct]: The cloud provider's description of the instance.
2122
1568
  # @param id [String]: The cloud provider's identifier for the instance, to use if the full description is not available.
2123
1569
  # @param region [String]: The cloud provider region
2124
1570
  # @return [void]
2125
1571
  def self.terminateInstance(instance: nil, noop: false, id: nil, onlycloud: false, region: MU.curRegion, deploy_id: MU.deploy_id, mu_name: nil, credentials: nil)
2126
- ips = Array.new
2127
- if !instance
2128
- if id
2129
- begin
2130
- resp = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(instance_ids: [id])
2131
- rescue Aws::EC2::Errors::InvalidInstanceIDNotFound
2132
- MU.log "Instance #{id} no longer exists", MU::WARN
2133
- end
2134
- if !resp.nil? and !resp.reservations.nil? and !resp.reservations.first.nil?
2135
- instance = resp.reservations.first.instances.first
2136
- ips << instance.public_ip_address if !instance.public_ip_address.nil?
2137
- ips << instance.private_ip_address if !instance.private_ip_address.nil?
2138
- end
2139
- else
2140
- MU.log "You must supply an instance handle or id to terminateInstance", MU::ERR
2141
- end
2142
- else
2143
- id = instance.instance_id
2144
- end
2145
- if !MU.deploy_id.empty?
2146
- deploy_dir = File.expand_path("#{MU.dataDir}/deployments/"+MU.deploy_id)
2147
- if Dir.exist?(deploy_dir) and !noop
2148
- FileUtils.touch("#{deploy_dir}/.cleanup-"+id)
2149
- end
1572
+ if !id and !instance
1573
+ MU.log "You must supply an instance handle or id to terminateInstance", MU::ERR
1574
+ return
2150
1575
  end
1576
+ instance ||= find(cloud_id: id, region: region, credentials: credentials).values.first
1577
+ return if !instance
2151
1578
 
2152
- server_obj = MU::MommaCat.findStray(
2153
- "AWS",
2154
- "servers",
2155
- region: region,
2156
- deploy_id: deploy_id,
2157
- cloud_id: id,
2158
- mu_name: mu_name
2159
- ).first
1579
+ id ||= instance.instance_id
1580
+ MU::MommaCat.lock(".cleanup-"+id)
2160
1581
 
2161
- begin
2162
- MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(instance_ids: [id])
2163
- rescue Aws::EC2::Errors::InvalidInstanceIDNotFound
2164
- MU.log "Instance #{id} no longer exists", MU::DEBUG
2165
- end
2166
-
2167
- if !server_obj.nil? and MU::Cloud::AWS.hosted? and !MU::Cloud::AWS.isGovCloud?
2168
- # DNS cleanup is now done in MU::Cloud::DNSZone. Keeping this for now
2169
- cleaned_dns = false
2170
- mu_name = server_obj.mu_name
2171
- mu_zone = MU::Cloud::DNSZone.find(cloud_id: "platform-mu", credentials: credentials).values.first
2172
- if !mu_zone.nil?
2173
- zone_rrsets = []
2174
- rrsets = MU::Cloud::AWS.route53(credentials: credentials).list_resource_record_sets(hosted_zone_id: mu_zone.id)
2175
- rrsets.resource_record_sets.each{ |record|
2176
- zone_rrsets << record
2177
- }
1582
+ ips, names = getAddresses(instance, region: region, credentials: credentials)
1583
+ targets = ips +names
2178
1584
 
2179
- # AWS API returns a maximum of 100 results. DNS zones are likely to have more than 100 records, lets page and make sure we grab all records in a given zone
2180
- while rrsets.next_record_name && rrsets.next_record_type
2181
- rrsets = MU::Cloud::AWS.route53(credentials: credentials).list_resource_record_sets(hosted_zone_id: mu_zone.id, start_record_name: rrsets.next_record_name, start_record_type: rrsets.next_record_type)
2182
- rrsets.resource_record_sets.each{ |record|
2183
- zone_rrsets << record
2184
- }
2185
- end
2186
- end
2187
- if !onlycloud and !mu_name.nil?
2188
- # DNS cleanup is now done in MU::Cloud::DNSZone. Keeping this for now
2189
- if !zone_rrsets.nil? and !zone_rrsets.empty?
2190
- zone_rrsets.each { |rrset|
2191
- if rrset.name.match(/^#{mu_name.downcase}\.server\.#{MU.myInstanceId}\.platform-mu/i)
2192
- rrset.resource_records.each { |record|
2193
- MU::Cloud::DNSZone.genericMuDNSEntry(name: mu_name, target: record.value, cloudclass: MU::Cloud::Server, delete: true)
2194
- cleaned_dns = true
2195
- }
2196
- end
2197
- }
2198
- end
2199
-
2200
- if !noop
2201
- if !server_obj.nil? and !server_obj.config.nil?
2202
- MU.mommacat.notify(MU::Cloud::Server.cfg_plural, server_obj.config['name'], {}, mu_name: server_obj.mu_name, remove: true) if MU.mommacat
2203
- end
2204
- end
1585
+ server_obj = MU::MommaCat.findStray(
1586
+ "AWS",
1587
+ "servers",
1588
+ region: region,
1589
+ deploy_id: deploy_id,
1590
+ cloud_id: id,
1591
+ mu_name: mu_name,
1592
+ dummy_ok: true
1593
+ ).first
2205
1594
 
2206
- # If we didn't manage to find this instance's Route53 entry by sifting
2207
- # deployment metadata, see if we can get it with the Name tag.
2208
- if !mu_zone.nil? and !cleaned_dns and !instance.nil?
2209
- instance.tags.each { |tag|
2210
- if tag.key == "Name"
2211
- zone_rrsets.each { |rrset|
2212
- if rrset.name.match(/^#{tag.value.downcase}\.server\.#{MU.myInstanceId}\.platform-mu/i)
2213
- rrset.resource_records.each { |record|
2214
- MU::Cloud::DNSZone.genericMuDNSEntry(name: tag.value, target: record.value, cloudclass: MU::Cloud::Server, delete: true) if !noop
2215
- }
2216
- end
2217
- }
2218
- end
2219
- }
2220
- end
2221
- end
1595
+ if MU::Cloud::AWS.hosted? and !MU::Cloud::AWS.isGovCloud? and server_obj
1596
+ targets.each { |target|
1597
+ MU::Cloud::DNSZone.genericMuDNSEntry(name: server_obj.mu_name, target: target, cloudclass: MU::Cloud::Server, delete: true, noop: noop)
1598
+ }
2222
1599
  end
2223
1600
 
2224
- if ips.size > 0 and !onlycloud
2225
- known_hosts_files = [Etc.getpwuid(Process.uid).dir+"/.ssh/known_hosts"]
2226
- if Etc.getpwuid(Process.uid).name == "root" and !MU.inGem?
2227
- begin
2228
- known_hosts_files << Etc.getpwnam("nagios").dir+"/.ssh/known_hosts"
2229
- rescue ArgumentError
2230
- # we're in a non-nagios environment and that's ok
2231
- end
2232
- end
2233
- known_hosts_files.each { |known_hosts|
2234
- next if !File.exist?(known_hosts)
2235
- MU.log "Cleaning up #{ips} from #{known_hosts}"
2236
- if !noop
2237
- File.open(known_hosts, File::CREAT|File::RDWR, 0644) { |f|
2238
- f.flock(File::LOCK_EX)
2239
- newlines = Array.new
2240
- f.readlines.each { |line|
2241
- ip_match = false
2242
- ips.each { |ip|
2243
- if line.match(/(^|,| )#{ip}( |,)/)
2244
- MU.log "Expunging #{ip} from #{known_hosts}"
2245
- ip_match = true
2246
- end
2247
- }
2248
- newlines << line if !ip_match
2249
- }
2250
- f.rewind
2251
- f.truncate(0)
2252
- f.puts(newlines)
2253
- f.flush
2254
- f.flock(File::LOCK_UN)
2255
- }
2256
- end
1601
+ if targets.size > 0 and !onlycloud
1602
+ MU::Master.removeInstanceFromEtcHosts(server_obj.mu_name) if !noop and server_obj
1603
+ targets.each { |target|
1604
+ next if !target.match(/^\d+\.\d+\.\d+\.\d+$/)
1605
+ MU::Master.removeIPFromSSHKnownHosts(target, noop: noop)
2257
1606
  }
2258
1607
  end
2259
1608
 
2260
- return if instance.nil?
1609
+ on_retry = Proc.new {
1610
+ instance = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(instance_ids: [instance.instance_id]).reservations.first.instances.first
1611
+ if instance.state.name == "terminated"
1612
+ MU.log "#{instance.instance_id}#{server_obj ? " ("+server_obj.mu_name+")" : ""} has already been terminated, skipping"
1613
+ MU::MommaCat.unlock(".cleanup-"+id)
1614
+ return
1615
+ end
1616
+ }
2261
1617
 
2262
- name = ""
2263
- instance.tags.each { |tag|
2264
- name = tag.value if tag.key == "Name"
1618
+ loop_if = Proc.new {
1619
+ instance = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(instance_ids: [instance.instance_id]).reservations.first.instances.first
1620
+ instance.state.name != "terminated"
2265
1621
  }
2266
1622
 
2267
- if instance.state.name == "terminated"
2268
- MU.log "#{instance.instance_id} (#{name}) has already been terminated, skipping"
2269
- else
2270
- if instance.state.name == "terminating"
2271
- MU.log "#{instance.instance_id} (#{name}) already terminating, waiting"
2272
- elsif instance.state.name != "running" and instance.state.name != "pending" and instance.state.name != "stopping" and instance.state.name != "stopped"
2273
- MU.log "#{instance.instance_id} (#{name}) is in state #{instance.state.name}, waiting"
2274
- else
2275
- MU.log "Terminating #{instance.instance_id} (#{name}) #{noop}"
2276
- if !noop
2277
- begin
2278
- MU::Cloud::AWS.ec2(credentials: credentials, region: region).modify_instance_attribute(
2279
- instance_id: instance.instance_id,
2280
- disable_api_termination: {value: false}
2281
- )
2282
- MU::Cloud::AWS.ec2(credentials: credentials, region: region).terminate_instances(instance_ids: [instance.instance_id])
2283
- # Small race window here with the state changing from under us
2284
- rescue Aws::EC2::Errors::IncorrectInstanceState => e
2285
- resp = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(instance_ids: [id])
2286
- if !resp.nil? and !resp.reservations.nil? and !resp.reservations.first.nil?
2287
- instance = resp.reservations.first.instances.first
2288
- if !instance.nil? and instance.state.name != "terminated" and instance.state.name != "terminating"
2289
- sleep 5
2290
- retry
2291
- end
2292
- end
2293
- rescue Aws::EC2::Errors::InternalError => e
2294
- MU.log "Error #{e.inspect} while Terminating instance #{instance.instance_id} (#{name}), retrying", MU::WARN, details: e.inspect
2295
- sleep 5
2296
- retry
2297
- end
2298
- end
2299
- end
2300
- while instance.state.name != "terminated" and !noop
2301
- sleep 30
2302
- instance_response = MU::Cloud::AWS.ec2(credentials: credentials, region: region).describe_instances(instance_ids: [instance.instance_id])
2303
- instance = instance_response.reservations.first.instances.first
2304
- end
2305
- MU.log "#{instance.instance_id} (#{name}) terminated" if !noop
1623
+ MU.log "Terminating #{instance.instance_id}#{server_obj ? " ("+server_obj.mu_name+")" : ""}"
1624
+ if !noop
1625
+ MU.retrier([Aws::EC2::Errors::IncorrectInstanceState, Aws::EC2::Errors::InternalError], wait: 30, max: 60, loop_if: loop_if, on_retry: on_retry) {
1626
+ MU::Cloud::AWS.ec2(credentials: credentials, region: region).modify_instance_attribute(
1627
+ instance_id: instance.instance_id,
1628
+ disable_api_termination: {value: false}
1629
+ )
1630
+ MU::Cloud::AWS.ec2(credentials: credentials, region: region).terminate_instances(instance_ids: [instance.instance_id])
1631
+ }
2306
1632
  end
1633
+
1634
+ MU.log "#{instance.instance_id}#{server_obj ? " ("+server_obj.mu_name+")" : ""} terminated" if !noop
1635
+ MU::MommaCat.unlock(".cleanup-"+id)
1636
+
2307
1637
  end
2308
1638
 
2309
1639
  # Return a BoK-style config hash describing a NAT instance. We use this
@@ -2479,6 +1809,50 @@ module MU
2479
1809
  size
2480
1810
  end
2481
1811
 
1812
+ # Boilerplate generation of an instance role
1813
+ # @param server [Hash]: The BoK-style config hash for a +Server+ or +ServerPool+
1814
+ # @param configurator [MU::Config]
1815
+ def self.generateStandardRole(server, configurator)
1816
+ role = {
1817
+ "name" => server["name"],
1818
+ "credentials" => server["credentials"],
1819
+ "can_assume" => [
1820
+ {
1821
+ "entity_id" => "ec2.amazonaws.com",
1822
+ "entity_type" => "service"
1823
+ }
1824
+ ],
1825
+ "policies" => [
1826
+ {
1827
+ "name" => "MuSecrets",
1828
+ "permissions" => ["s3:GetObject"],
1829
+ "targets" => [
1830
+ {
1831
+ "identifier" => 'arn:'+(MU::Cloud::AWS.isGovCloud?(server['region']) ? "aws-us-gov" : "aws")+':s3:::'+MU::Cloud::AWS.adminBucketName(server['credentials'])+'/Mu_CA.pem'
1832
+ }
1833
+ ]
1834
+ }
1835
+ ]
1836
+ }
1837
+ if server['iam_policies']
1838
+ role['iam_policies'] = server['iam_policies'].dup
1839
+ end
1840
+ if server['canned_iam_policies']
1841
+ role['import'] = server['canned_iam_policies'].dup
1842
+ end
1843
+ if server['iam_role']
1844
+ # XXX maybe break this down into policies and add those?
1845
+ end
1846
+
1847
+ configurator.insertKitten(role, "roles")
1848
+
1849
+ server["dependencies"] ||= []
1850
+ server["dependencies"] << {
1851
+ "type" => "role",
1852
+ "name" => server["name"]
1853
+ }
1854
+ end
1855
+
2482
1856
  # Cloud-specific pre-processing of {MU::Config::BasketofKittens::servers}, bare and unvalidated.
2483
1857
  # @param server [Hash]: The resource to process and validate
2484
1858
  # @param configurator [MU::Config]: The overall deployment configurator of which this resource is a member
@@ -2499,43 +1873,7 @@ module MU
2499
1873
  ok = false
2500
1874
  end
2501
1875
  else
2502
- role = {
2503
- "name" => server["name"],
2504
- "credentials" => server["credentials"],
2505
- "can_assume" => [
2506
- {
2507
- "entity_id" => "ec2.amazonaws.com",
2508
- "entity_type" => "service"
2509
- }
2510
- ],
2511
- "policies" => [
2512
- {
2513
- "name" => "MuSecrets",
2514
- "permissions" => ["s3:GetObject"],
2515
- "targets" => [
2516
- {
2517
- "identifier" => 'arn:'+(MU::Cloud::AWS.isGovCloud?(server['region']) ? "aws-us-gov" : "aws")+':s3:::'+MU::Cloud::AWS.adminBucketName(server['credentials'])+'/Mu_CA.pem'
2518
- }
2519
- ]
2520
- }
2521
- ]
2522
- }
2523
- if server['iam_policies']
2524
- role['iam_policies'] = server['iam_policies'].dup
2525
- end
2526
- if server['canned_iam_policies']
2527
- role['import'] = server['canned_iam_policies'].dup
2528
- end
2529
- if server['iam_role']
2530
- # XXX maybe break this down into policies and add those?
2531
- end
2532
-
2533
- configurator.insertKitten(role, "roles")
2534
- server["dependencies"] ||= []
2535
- server["dependencies"] << {
2536
- "type" => "role",
2537
- "name" => server["name"]
2538
- }
1876
+ generateStandardRole(server, configurator)
2539
1877
  end
2540
1878
  if !server['create_image'].nil?
2541
1879
  if server['create_image'].has_key?('copy_to_regions') and
@@ -2547,12 +1885,12 @@ module MU
2547
1885
  end
2548
1886
  end
2549
1887
 
2550
- server['ami_id'] ||= server['image_id']
1888
+ server['image_id'] ||= server['ami_id']
2551
1889
 
2552
- if server['ami_id'].nil?
1890
+ if server['image_id'].nil?
2553
1891
  img_id = MU::Cloud.getStockImage("AWS", platform: server['platform'], region: server['region'])
2554
1892
  if img_id
2555
- server['ami_id'] = configurator.getTail("server"+server['name']+"AMI", value: img_id, prettyname: "server"+server['name']+"AMI", cloudtype: "AWS::EC2::Image::Id")
1893
+ server['image_id'] = configurator.getTail("server"+server['name']+"AMI", value: img_id, prettyname: "server"+server['name']+"AMI", cloudtype: "AWS::EC2::Image::Id")
2556
1894
  else
2557
1895
  MU.log "No AMI specified for #{server['name']} and no default available for platform #{server['platform']} in region #{server['region']}", MU::ERR, details: server
2558
1896
  ok = false
@@ -2561,22 +1899,16 @@ module MU
2561
1899
 
2562
1900
  if !server["loadbalancers"].nil?
2563
1901
  server["loadbalancers"].each { |lb|
2564
- if lb["concurrent_load_balancer"] != nil
1902
+ lb["name"] ||= lb["concurrent_load_balancer"]
1903
+ if lb["name"]
2565
1904
  server["dependencies"] << {
2566
- "type" => "loadbalancer",
2567
- "name" => lb["concurrent_load_balancer"]
1905
+ "type" => "loadbalancer",
1906
+ "name" => lb["name"]
2568
1907
  }
2569
1908
  end
2570
1909
  }
2571
1910
  end
2572
1911
 
2573
- if !server["vpc"].nil?
2574
- if server["vpc"]["subnet_name"].nil? and server["vpc"]["subnet_id"].nil? and server["vpc"]["subnet_pref"].nil?
2575
- MU.log "A server VPC block must specify a target subnet", MU::ERR
2576
- ok = false
2577
- end
2578
- end
2579
-
2580
1912
  ok
2581
1913
  end
2582
1914
 
@@ -2604,10 +1936,11 @@ module MU
2604
1936
  resp = MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_volumes(volume_ids: [volume.volume_id])
2605
1937
  volume = resp.data.volumes.first
2606
1938
  end
2607
- name = ""
1939
+ name = nil
2608
1940
  volume.tags.each { |tag|
2609
1941
  name = tag.value if tag.key == "Name"
2610
1942
  }
1943
+ name ||= volume.volume_id
2611
1944
 
2612
1945
  MU.log("Deleting volume #{volume.volume_id} (#{name})")
2613
1946
  if !noop
@@ -2630,31 +1963,405 @@ module MU
2630
1963
  end
2631
1964
  end
2632
1965
 
2633
- retries = 0
2634
1966
  begin
2635
- MU::Cloud::AWS.ec2(region: region, credentials: credentials).delete_volume(volume_id: volume.volume_id)
2636
- rescue Aws::EC2::Errors::IncorrectState => e
2637
- MU.log "Volume #{volume.volume_id} (#{name}) in incorrect state (#{e.message}), will retry", MU::WARN
2638
- sleep 30
2639
- retry
2640
- rescue Aws::EC2::Errors::InvalidVolumeNotFound
2641
- MU.log "Volume #{volume.volume_id} (#{name}) disappeared before I could remove it!", MU::WARN
1967
+ MU.retrier([Aws::EC2::Errors::IncorrectState, Aws::EC2::Errors::VolumeInUse], ignoreme: [Aws::EC2::Errors::InvalidVolumeNotFound], wait: 30, max: 10){
1968
+ MU::Cloud::AWS.ec2(region: region, credentials: credentials).delete_volume(volume_id: volume.volume_id)
1969
+ }
2642
1970
  rescue Aws::EC2::Errors::VolumeInUse
2643
- if retries < 10
2644
- volume.attachments.each { |attachment|
2645
- MU.log "#{volume.volume_id} is attached to #{attachment.instance_id} as #{attachment.device}", MU::NOTICE
2646
- }
2647
- MU.log "Volume '#{name}' is still attached, waiting...", MU::NOTICE
2648
- sleep 30
2649
- retries = retries + 1
2650
- retry
1971
+ MU.log "Failed to delete #{name}", MU::ERR
1972
+ end
1973
+
1974
+ end
1975
+ end
1976
+ private_class_method :delete_volume
1977
+
1978
+ # Given some combination of a base image, BoK-configured storage, and
1979
+ # ephemeral devices, return the structure passed to EC2 to declare
1980
+ # block devicde mappings.
1981
+ # @param image_id [String]
1982
+ # @param storage [Array]
1983
+ # @param add_ephemeral [Boolean]
1984
+ # @param region [String]
1985
+ # @param credentials [String]
1986
+ def self.configureBlockDevices(image_id: nil, storage: nil, add_ephemeral: true, region: MU.myRegion, credentials: nil)
1987
+ ext_disks = {}
1988
+
1989
+ # Figure out which devices are embedded in the AMI already.
1990
+ if image_id
1991
+ image = MU::Cloud::AWS.ec2(region: region, credentials: credentials).describe_images(image_ids: [image_id]).images.first
1992
+ if !image.block_device_mappings.nil?
1993
+ image.block_device_mappings.each { |disk|
1994
+ if !disk.device_name.nil? and !disk.device_name.empty? and !disk.ebs.nil? and !disk.ebs.empty?
1995
+ ext_disks[disk.device_name] = MU.structToHash(disk.ebs)
1996
+ end
1997
+ }
1998
+ end
1999
+ end
2000
+
2001
+ configured_storage = []
2002
+ if storage
2003
+ storage.each { |vol|
2004
+ # Drop the "encrypted" flag if a snapshot for this device exists
2005
+ # in the AMI, even if they both agree about the value of said
2006
+ # flag. Apparently that's a thing now.
2007
+ if ext_disks.has_key?(vol["device"])
2008
+ if ext_disks[vol["device"]].has_key?(:snapshot_id)
2009
+ vol.delete("encrypted")
2010
+ end
2011
+ end
2012
+ mapping, _cfm_mapping = MU::Cloud::AWS::Server.convertBlockDeviceMapping(vol)
2013
+ configured_storage << mapping
2014
+ }
2015
+ end
2016
+
2017
+ configured_storage.concat(@ephemeral_mappings) if add_ephemeral
2018
+
2019
+ configured_storage
2020
+ end
2021
+
2022
+ private
2023
+
2024
+ def bootstrapGroomer
2025
+ if (@config['groom'].nil? or @config['groom']) and !@groomer.haveBootstrapped?
2026
+ MU.retrier([BootstrapTempFail], wait: 45) {
2027
+ if windows?
2028
+ # kick off certificate generation early; WinRM will need it
2029
+ @deploy.nodeSSLCerts(self)
2030
+ @deploy.nodeSSLCerts(self, true) if @config.has_key?("basis")
2031
+ session = getWinRMSession(50, 60, reboot_on_problems: true)
2032
+ initialWinRMTasks(session)
2033
+ begin
2034
+ session.close
2035
+ rescue StandardError
2036
+ # session.close is allowed to fail- we're probably rebooting
2037
+ end
2651
2038
  else
2652
- MU.log "Failed to delete #{name}", MU::ERR
2039
+ session = getSSHSession(40, 30)
2040
+ initialSSHTasks(session)
2041
+ end
2042
+ }
2043
+ end
2044
+
2045
+ # See if this node already exists in our config management. If it
2046
+ # does, we're done.
2047
+
2048
+ if MU.inGem?
2049
+ MU.log "Deploying from a gem, not grooming"
2050
+ elsif @config['groom'].nil? or @config['groom']
2051
+ if @groomer.haveBootstrapped?
2052
+ MU.log "Node #{@mu_name} has already been bootstrapped, skipping groomer setup.", MU::NOTICE
2053
+ else
2054
+ begin
2055
+ @groomer.bootstrap
2056
+ rescue MU::Groomer::RunError
2057
+ return false
2653
2058
  end
2654
2059
  end
2060
+ @groomer.saveDeployData
2061
+ end
2062
+
2063
+ true
2064
+ end
2065
+
2066
+ def saveCredentials(win_admin_password = nil)
2067
+ ec2config_password = nil
2068
+ sshd_password = nil
2069
+ if windows?
2070
+ if @config['use_cloud_provider_windows_password']
2071
+ win_admin_password ||= getWindowsAdminPassword
2072
+ elsif @config['windows_auth_vault'] and !@config['windows_auth_vault'].empty?
2073
+ if @config["windows_auth_vault"].has_key?("password_field")
2074
+ win_admin_password ||= @groomer.getSecret(
2075
+ vault: @config['windows_auth_vault']['vault'],
2076
+ item: @config['windows_auth_vault']['item'],
2077
+ field: @config["windows_auth_vault"]["password_field"]
2078
+ )
2079
+ else
2080
+ win_admin_password ||= getWindowsAdminPassword
2081
+ end
2082
+
2083
+ if @config["windows_auth_vault"].has_key?("ec2config_password_field")
2084
+ ec2config_password = @groomer.getSecret(
2085
+ vault: @config['windows_auth_vault']['vault'],
2086
+ item: @config['windows_auth_vault']['item'],
2087
+ field: @config["windows_auth_vault"]["ec2config_password_field"]
2088
+ )
2089
+ end
2090
+
2091
+ if @config["windows_auth_vault"].has_key?("sshd_password_field")
2092
+ sshd_password = @groomer.getSecret(
2093
+ vault: @config['windows_auth_vault']['vault'],
2094
+ item: @config['windows_auth_vault']['item'],
2095
+ field: @config["windows_auth_vault"]["sshd_password_field"]
2096
+ )
2097
+ end
2098
+ end
2099
+
2100
+ win_admin_password ||= MU.generateWindowsPassword
2101
+ ec2config_password ||= MU.generateWindowsPassword
2102
+ sshd_password ||= MU.generateWindowsPassword
2103
+
2104
+ # We're creating the vault here so when we run
2105
+ # MU::Cloud::Server.initialSSHTasks and we need to set the Windows
2106
+ # Admin password we can grab it from said vault.
2107
+ creds = {
2108
+ "username" => @config['windows_admin_username'],
2109
+ "password" => win_admin_password,
2110
+ "ec2config_username" => "ec2config",
2111
+ "ec2config_password" => ec2config_password,
2112
+ "sshd_username" => "sshd_service",
2113
+ "sshd_password" => sshd_password
2114
+ }
2115
+ @groomer.saveSecret(vault: @mu_name, item: "windows_credentials", data: creds, permissions: "name:#{@mu_name}")
2116
+ end
2117
+ end
2118
+
2119
+ def haveElasticIP?
2120
+ if !cloud_desc.public_ip_address.nil?
2121
+ begin
2122
+ resp = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_addresses(public_ips: [cloud_desc.public_ip_address])
2123
+ if resp.addresses.size > 0 and resp.addresses.first.instance_id == @cloud_id
2124
+ return true
2125
+ end
2126
+ rescue Aws::EC2::Errors::InvalidAddressNotFound
2127
+ # XXX this is ok to ignore, it means the public IP isn't Elastic
2128
+ end
2129
+ end
2130
+
2131
+ false
2132
+ end
2133
+
2134
+ def configureNetworking
2135
+ if !@config['static_ip'].nil?
2136
+ if !@config['static_ip']['ip'].nil?
2137
+ MU::Cloud::AWS::Server.associateElasticIp(@cloud_id, classic: @vpc.nil?, ip: @config['static_ip']['ip'])
2138
+ elsif !haveElasticIP?
2139
+ MU::Cloud::AWS::Server.associateElasticIp(@cloud_id, classic: @vpc.nil?)
2140
+ end
2141
+ end
2142
+
2143
+ if !@vpc.nil? and @config.has_key?("vpc")
2144
+ subnet = @vpc.getSubnet(cloud_id: cloud_desc.subnet_id)
2145
+
2146
+ _nat_ssh_key, _nat_ssh_user, nat_ssh_host, _canonical_ip, _ssh_user, _ssh_key_name = getSSHConfig
2147
+ if subnet.private? and !nat_ssh_host and !MU::Cloud::AWS::VPC.haveRouteToInstance?(cloud_desc, region: @config['region'], credentials: @config['credentials'])
2148
+ raise MuError, "#{@mu_name} is in a private subnet (#{subnet}), but has no bastion host configured, and I have no other route to it"
2149
+ end
2150
+
2151
+ # If we've asked for additional subnets (and this @config is not a
2152
+ # member of a Server Pool, which has different semantics), create
2153
+ # extra interfaces to accomodate.
2154
+ if !@config['vpc']['subnets'].nil? and @config['basis'].nil?
2155
+ device_index = 1
2156
+ mySubnets.each { |s|
2157
+ next if s.cloud_id == cloud_desc.subnet_id
2158
+
2159
+ if cloud_desc.placement.availability_zone != s.az
2160
+ MU.log "Cannot create interface in subnet #{s.to_s} for #{@mu_name} due to AZ mismatch", MU::WARN
2161
+ next
2162
+ end
2163
+ MU.log "Adding network interface on subnet #{s.cloud_id} for #{@mu_name}"
2164
+ iface = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).create_network_interface(subnet_id: s.cloud_id).network_interface
2165
+ MU::Cloud::AWS.createStandardTags(
2166
+ iface.network_interface_id,
2167
+ region: @config['region'],
2168
+ credentials: @config['credentials'],
2169
+ optional: @config['optional_tags'],
2170
+ nametag: @mu_name+"-ETH"+device_index.to_s,
2171
+ othertags: @config['tags']
2172
+ )
2173
+
2174
+ MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).attach_network_interface(
2175
+ network_interface_id: iface.network_interface_id,
2176
+ instance_id: cloud_desc.instance_id,
2177
+ device_index: device_index
2178
+ )
2179
+ device_index = device_index + 1
2180
+ }
2181
+ cloud_desc(use_cache: false)
2182
+ end
2183
+ end
2184
+
2185
+ [:private_dns_name, :public_dns_name, :private_ip_address, :public_ip_address].each { |field|
2186
+ @config[field.to_s] = cloud_desc.send(field)
2187
+ }
2188
+
2189
+ if !@config['add_private_ips'].nil?
2190
+ cloud_desc.network_interfaces.each { |int|
2191
+ if int.private_ip_address == cloud_desc.private_ip_address and int.private_ip_addresses.size < (@config['add_private_ips'] + 1)
2192
+ MU.log "Adding #{@config['add_private_ips']} extra private IP addresses to #{cloud_desc.instance_id}"
2193
+ MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).assign_private_ip_addresses(
2194
+ network_interface_id: int.network_interface_id,
2195
+ secondary_private_ip_address_count: @config['add_private_ips'],
2196
+ allow_reassignment: false
2197
+ )
2198
+ end
2199
+ }
2200
+ end
2201
+ end
2202
+
2203
+ def tagVolumes
2204
+ volumes = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_volumes(filters: [name: "attachment.instance-id", values: [@cloud_id]])
2205
+ volumes.each { |vol|
2206
+ vol.volumes.each { |volume|
2207
+ volume.attachments.each { |attachment|
2208
+ MU::Cloud::AWS.createStandardTags(
2209
+ attachment.volume_id,
2210
+ region: @config['region'],
2211
+ credentials: @config['credentials'],
2212
+ optional: @config['optional_tags'],
2213
+ nametag: ["/dev/sda", "/dev/sda1"].include?(attachment.device) ? "ROOT-"+@mu_name : @mu_name+"-"+attachment.device.upcase,
2214
+ othertags: @config['tags']
2215
+ )
2216
+
2217
+ }
2218
+ }
2219
+ }
2220
+ end
2221
+
2222
+ # If we came up via AutoScale, the Alarm module won't have had our
2223
+ # instance ID to associate us with itself. So invoke that here.
2224
+ # XXX might be possible to do this with regular alarm resources and
2225
+ # dependencies now
2226
+ def setAlarms
2227
+ if !@config['basis'].nil? and @config["alarms"] and !@config["alarms"].empty?
2228
+ @config["alarms"].each { |alarm|
2229
+ alarm_obj = MU::MommaCat.findStray(
2230
+ "AWS",
2231
+ "alarms",
2232
+ region: @config["region"],
2233
+ deploy_id: @deploy.deploy_id,
2234
+ name: alarm['name']
2235
+ ).first
2236
+ alarm["dimensions"] = [{:name => "InstanceId", :value => @cloud_id}]
2237
+
2238
+ if alarm["enable_notifications"]
2239
+ topic_arn = MU::Cloud::AWS::Notification.createTopic(alarm["notification_group"], region: @config["region"], credentials: @config['credentials'])
2240
+ MU::Cloud::AWS::Notification.subscribe(arn: topic_arn, protocol: alarm["notification_type"], endpoint: alarm["notification_endpoint"], region: @config["region"], credentials: @config["credentials"])
2241
+ alarm["alarm_actions"] = [topic_arn]
2242
+ alarm["ok_actions"] = [topic_arn]
2243
+ end
2244
+
2245
+ alarm_name = alarm_obj ? alarm_obj.cloud_id : "#{@mu_name}-#{alarm['name']}".upcase
2246
+
2247
+ MU::Cloud::AWS::Alarm.setAlarm(
2248
+ name: alarm_name,
2249
+ ok_actions: alarm["ok_actions"],
2250
+ alarm_actions: alarm["alarm_actions"],
2251
+ insufficient_data_actions: alarm["no_data_actions"],
2252
+ metric_name: alarm["metric_name"],
2253
+ namespace: alarm["namespace"],
2254
+ statistic: alarm["statistic"],
2255
+ dimensions: alarm["dimensions"],
2256
+ period: alarm["period"],
2257
+ unit: alarm["unit"],
2258
+ evaluation_periods: alarm["evaluation_periods"],
2259
+ threshold: alarm["threshold"],
2260
+ comparison_operator: alarm["comparison_operator"],
2261
+ region: @config["region"],
2262
+ credentials: @config['credentials']
2263
+ )
2264
+ }
2265
+ end
2266
+ end
2267
+
2268
+ # We have issues sometimes where our dns_records are pointing at the wrong node name and IP address.
2269
+
2270
+ def getIAMProfile
2271
+ arn = if @config['generate_iam_role']
2272
+ role = @deploy.findLitterMate(name: @config['name'], type: "roles")
2273
+ s3_objs = ["#{@deploy.deploy_id}-secret", "#{role.mu_name}.pfx", "#{role.mu_name}.crt", "#{role.mu_name}.key", "#{role.mu_name}-winrm.crt", "#{role.mu_name}-winrm.key"].map { |file|
2274
+ 'arn:'+(MU::Cloud::AWS.isGovCloud?(@config['region']) ? "aws-us-gov" : "aws")+':s3:::'+MU::Cloud::AWS.adminBucketName(@credentials)+'/'+file
2275
+ }
2276
+ MU.log "Adding S3 read permissions to #{@mu_name}'s IAM profile", MU::NOTICE, details: s3_objs
2277
+ role.cloudobj.injectPolicyTargets("MuSecrets", s3_objs)
2278
+
2279
+ @config['iam_role'] = role.mu_name
2280
+ role.cloudobj.createInstanceProfile
2281
+
2282
+ elsif @config['iam_role'].nil?
2283
+ raise MuError, "#{@mu_name} has generate_iam_role set to false, but no iam_role assigned."
2284
+ end
2285
+
2286
+ if !@config["iam_role"].nil?
2287
+ if arn
2288
+ return {arn: arn}
2289
+ else
2290
+ return {name: @config["iam_role"]}
2291
+ end
2292
+ end
2293
+
2294
+ nil
2295
+ end
2296
+
2297
+ def setDeleteOntermination(device, delete_on_termination = false)
2298
+ mappings = MU.structToHash(cloud_desc.block_device_mappings)
2299
+ mappings.each { |vol|
2300
+ if vol[:ebs]
2301
+ vol[:ebs].delete(:attach_time)
2302
+ vol[:ebs].delete(:status)
2303
+ end
2304
+ if vol[:device_name] == device
2305
+ if vol[:ebs][:delete_on_termination] != delete_on_termination
2306
+ vol[:ebs][:delete_on_termination] = delete_on_termination
2307
+ MU.log "Setting delete_on_termination flag to #{delete_on_termination.to_s} on #{@mu_name}'s #{dev}"
2308
+ MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).modify_instance_attribute(
2309
+ instance_id: @cloud_id,
2310
+ block_device_mappings: mappings
2311
+ )
2312
+ end
2313
+ return true
2314
+ end
2315
+ }
2316
+
2317
+ false
2318
+ end
2319
+
2320
+ def createImage
2321
+ img_cfg = @config['create_image']
2322
+ # Scrub things that don't belong on an AMI
2323
+ session = windows? ? getWinRMSession : getSSHSession
2324
+ sudo = purgecmd = ""
2325
+ sudo = "sudo" if @config['ssh_user'] != "root"
2326
+ if windows?
2327
+ purgecmd = "rm -rf /cygdrive/c/mu_installed_chef"
2328
+ else
2329
+ purgecmd = "rm -rf /opt/mu_installed_chef"
2330
+ end
2331
+ if img_cfg['image_then_destroy']
2332
+ if windows?
2333
+ purgecmd = "rm -rf /cygdrive/c/chef/ /home/#{@config['windows_admin_username']}/.ssh/authorized_keys /home/Administrator/.ssh/authorized_keys /cygdrive/c/mu-installer-ran-updates /cygdrive/c/mu_installed_chef"
2334
+ # session.exec!("powershell -Command \"& {(Get-WmiObject -Class Win32_Product -Filter \"Name='UniversalForwarder'\").Uninstall()}\"")
2335
+ else
2336
+ purgecmd = "#{sudo} rm -rf /var/lib/cloud/instances/i-* /root/.ssh/authorized_keys /etc/ssh/ssh_host_*key* /etc/chef /etc/opscode/* /.mu-installer-ran-updates /var/chef /opt/mu_installed_chef /opt/chef ; #{sudo} sed -i 's/^HOSTNAME=.*//' /etc/sysconfig/network"
2337
+ end
2338
+ end
2339
+ if windows?
2340
+ session.run(purgecmd)
2341
+ else
2342
+ session.exec!(purgecmd)
2343
+ end
2344
+ session.close
2345
+ ami_ids = MU::Cloud::AWS::Server.createImage(
2346
+ name: @mu_name,
2347
+ instance_id: @cloud_id,
2348
+ storage: @config['storage'],
2349
+ exclude_storage: img_cfg['image_exclude_storage'],
2350
+ copy_to_regions: img_cfg['copy_to_regions'],
2351
+ make_public: img_cfg['public'],
2352
+ region: @config['region'],
2353
+ tags: @config['tags'],
2354
+ credentials: @config['credentials']
2355
+ )
2356
+ @deploy.notify("images", @config['name'], ami_ids)
2357
+ @config['image_created'] = true
2358
+ if img_cfg['image_then_destroy']
2359
+ MU::Cloud::AWS::Server.waitForAMI(ami_ids[@config['region']], region: @config['region'], credentials: @config['credentials'])
2360
+ MU.log "AMI #{ami_ids[@config['region']]} ready, removing source node #{@mu_name}"
2361
+ MU::Cloud::AWS::Server.terminateInstance(id: @cloud_id, region: @config['region'], deploy_id: @deploy.deploy_id, mu_name: @mu_name, credentials: @config['credentials'])
2362
+ destroy
2655
2363
  end
2656
2364
  end
2657
- private_class_method :delete_volume
2658
2365
 
2659
2366
  end #class
2660
2367
  end #class