cloud-mu 3.1.5 → 3.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (185) hide show
  1. checksums.yaml +4 -4
  2. data/Dockerfile +5 -1
  3. data/ansible/roles/mu-windows/files/LaunchConfig.json +9 -0
  4. data/ansible/roles/mu-windows/files/config.xml +76 -0
  5. data/ansible/roles/mu-windows/tasks/main.yml +16 -0
  6. data/bin/mu-adopt +16 -12
  7. data/bin/mu-azure-tests +57 -0
  8. data/bin/mu-cleanup +2 -4
  9. data/bin/mu-configure +52 -0
  10. data/bin/mu-deploy +3 -3
  11. data/bin/mu-findstray-tests +25 -0
  12. data/bin/mu-gen-docs +2 -4
  13. data/bin/mu-load-config.rb +2 -1
  14. data/bin/mu-node-manage +15 -16
  15. data/bin/mu-run-tests +37 -12
  16. data/cloud-mu.gemspec +3 -3
  17. data/cookbooks/mu-activedirectory/resources/domain.rb +4 -4
  18. data/cookbooks/mu-activedirectory/resources/domain_controller.rb +4 -4
  19. data/cookbooks/mu-tools/libraries/helper.rb +1 -1
  20. data/cookbooks/mu-tools/recipes/apply_security.rb +14 -14
  21. data/cookbooks/mu-tools/recipes/aws_api.rb +9 -0
  22. data/cookbooks/mu-tools/recipes/eks.rb +2 -2
  23. data/cookbooks/mu-tools/recipes/windows-client.rb +25 -22
  24. data/extras/clean-stock-amis +25 -19
  25. data/extras/generate-stock-images +1 -0
  26. data/extras/image-generators/AWS/win2k12.yaml +2 -0
  27. data/extras/image-generators/AWS/win2k16.yaml +2 -0
  28. data/extras/image-generators/AWS/win2k19.yaml +2 -0
  29. data/modules/mommacat.ru +1 -1
  30. data/modules/mu.rb +86 -98
  31. data/modules/mu/adoption.rb +373 -58
  32. data/modules/mu/cleanup.rb +214 -303
  33. data/modules/mu/cloud.rb +128 -1733
  34. data/modules/mu/cloud/database.rb +49 -0
  35. data/modules/mu/cloud/dnszone.rb +44 -0
  36. data/modules/mu/cloud/machine_images.rb +212 -0
  37. data/modules/mu/cloud/providers.rb +81 -0
  38. data/modules/mu/cloud/resource_base.rb +929 -0
  39. data/modules/mu/cloud/server.rb +40 -0
  40. data/modules/mu/cloud/server_pool.rb +1 -0
  41. data/modules/mu/cloud/ssh_sessions.rb +228 -0
  42. data/modules/mu/cloud/winrm_sessions.rb +237 -0
  43. data/modules/mu/cloud/wrappers.rb +169 -0
  44. data/modules/mu/config.rb +123 -81
  45. data/modules/mu/config/alarm.rb +2 -6
  46. data/modules/mu/config/bucket.rb +32 -3
  47. data/modules/mu/config/cache_cluster.rb +2 -2
  48. data/modules/mu/config/cdn.rb +100 -0
  49. data/modules/mu/config/collection.rb +1 -1
  50. data/modules/mu/config/container_cluster.rb +7 -2
  51. data/modules/mu/config/database.rb +84 -105
  52. data/modules/mu/config/database.yml +1 -2
  53. data/modules/mu/config/dnszone.rb +5 -4
  54. data/modules/mu/config/doc_helpers.rb +5 -6
  55. data/modules/mu/config/endpoint.rb +2 -1
  56. data/modules/mu/config/firewall_rule.rb +3 -19
  57. data/modules/mu/config/folder.rb +1 -1
  58. data/modules/mu/config/function.rb +17 -8
  59. data/modules/mu/config/group.rb +1 -1
  60. data/modules/mu/config/habitat.rb +1 -1
  61. data/modules/mu/config/job.rb +89 -0
  62. data/modules/mu/config/loadbalancer.rb +57 -11
  63. data/modules/mu/config/log.rb +1 -1
  64. data/modules/mu/config/msg_queue.rb +1 -1
  65. data/modules/mu/config/nosqldb.rb +1 -1
  66. data/modules/mu/config/notifier.rb +8 -19
  67. data/modules/mu/config/ref.rb +92 -14
  68. data/modules/mu/config/role.rb +1 -1
  69. data/modules/mu/config/schema_helpers.rb +38 -37
  70. data/modules/mu/config/search_domain.rb +1 -1
  71. data/modules/mu/config/server.rb +12 -13
  72. data/modules/mu/config/server_pool.rb +3 -7
  73. data/modules/mu/config/storage_pool.rb +1 -1
  74. data/modules/mu/config/tail.rb +11 -0
  75. data/modules/mu/config/user.rb +1 -1
  76. data/modules/mu/config/vpc.rb +27 -23
  77. data/modules/mu/config/vpc.yml +0 -1
  78. data/modules/mu/defaults/AWS.yaml +90 -90
  79. data/modules/mu/defaults/Azure.yaml +1 -0
  80. data/modules/mu/defaults/Google.yaml +1 -0
  81. data/modules/mu/deploy.rb +34 -20
  82. data/modules/mu/groomer.rb +16 -1
  83. data/modules/mu/groomers/ansible.rb +69 -4
  84. data/modules/mu/groomers/chef.rb +51 -4
  85. data/modules/mu/logger.rb +120 -144
  86. data/modules/mu/master.rb +97 -4
  87. data/modules/mu/mommacat.rb +160 -874
  88. data/modules/mu/mommacat/daemon.rb +23 -14
  89. data/modules/mu/mommacat/naming.rb +110 -3
  90. data/modules/mu/mommacat/search.rb +497 -0
  91. data/modules/mu/mommacat/storage.rb +252 -194
  92. data/modules/mu/{clouds → providers}/README.md +1 -1
  93. data/modules/mu/{clouds → providers}/aws.rb +258 -57
  94. data/modules/mu/{clouds → providers}/aws/alarm.rb +3 -3
  95. data/modules/mu/{clouds → providers}/aws/bucket.rb +275 -41
  96. data/modules/mu/{clouds → providers}/aws/cache_cluster.rb +14 -50
  97. data/modules/mu/providers/aws/cdn.rb +782 -0
  98. data/modules/mu/{clouds → providers}/aws/collection.rb +5 -5
  99. data/modules/mu/{clouds → providers}/aws/container_cluster.rb +95 -84
  100. data/modules/mu/providers/aws/database.rb +1744 -0
  101. data/modules/mu/{clouds → providers}/aws/dnszone.rb +26 -12
  102. data/modules/mu/providers/aws/endpoint.rb +1072 -0
  103. data/modules/mu/{clouds → providers}/aws/firewall_rule.rb +39 -32
  104. data/modules/mu/{clouds → providers}/aws/folder.rb +1 -1
  105. data/modules/mu/{clouds → providers}/aws/function.rb +289 -134
  106. data/modules/mu/{clouds → providers}/aws/group.rb +18 -20
  107. data/modules/mu/{clouds → providers}/aws/habitat.rb +3 -3
  108. data/modules/mu/providers/aws/job.rb +466 -0
  109. data/modules/mu/{clouds → providers}/aws/loadbalancer.rb +77 -47
  110. data/modules/mu/{clouds → providers}/aws/log.rb +5 -5
  111. data/modules/mu/{clouds → providers}/aws/msg_queue.rb +14 -11
  112. data/modules/mu/{clouds → providers}/aws/nosqldb.rb +96 -5
  113. data/modules/mu/{clouds → providers}/aws/notifier.rb +135 -63
  114. data/modules/mu/{clouds → providers}/aws/role.rb +76 -48
  115. data/modules/mu/{clouds → providers}/aws/search_domain.rb +172 -41
  116. data/modules/mu/{clouds → providers}/aws/server.rb +66 -98
  117. data/modules/mu/{clouds → providers}/aws/server_pool.rb +42 -60
  118. data/modules/mu/{clouds → providers}/aws/storage_pool.rb +21 -38
  119. data/modules/mu/{clouds → providers}/aws/user.rb +12 -16
  120. data/modules/mu/{clouds → providers}/aws/userdata/README.md +0 -0
  121. data/modules/mu/{clouds → providers}/aws/userdata/linux.erb +5 -4
  122. data/modules/mu/{clouds → providers}/aws/userdata/windows.erb +0 -0
  123. data/modules/mu/{clouds → providers}/aws/vpc.rb +143 -74
  124. data/modules/mu/{clouds → providers}/aws/vpc_subnet.rb +0 -0
  125. data/modules/mu/{clouds → providers}/azure.rb +13 -0
  126. data/modules/mu/{clouds → providers}/azure/container_cluster.rb +1 -5
  127. data/modules/mu/{clouds → providers}/azure/firewall_rule.rb +8 -1
  128. data/modules/mu/{clouds → providers}/azure/habitat.rb +0 -0
  129. data/modules/mu/{clouds → providers}/azure/loadbalancer.rb +0 -0
  130. data/modules/mu/{clouds → providers}/azure/role.rb +0 -0
  131. data/modules/mu/{clouds → providers}/azure/server.rb +32 -24
  132. data/modules/mu/{clouds → providers}/azure/user.rb +1 -1
  133. data/modules/mu/{clouds → providers}/azure/userdata/README.md +0 -0
  134. data/modules/mu/{clouds → providers}/azure/userdata/linux.erb +0 -0
  135. data/modules/mu/{clouds → providers}/azure/userdata/windows.erb +0 -0
  136. data/modules/mu/{clouds → providers}/azure/vpc.rb +4 -6
  137. data/modules/mu/{clouds → providers}/cloudformation.rb +10 -0
  138. data/modules/mu/{clouds → providers}/cloudformation/alarm.rb +3 -3
  139. data/modules/mu/{clouds → providers}/cloudformation/cache_cluster.rb +3 -3
  140. data/modules/mu/{clouds → providers}/cloudformation/collection.rb +3 -3
  141. data/modules/mu/{clouds → providers}/cloudformation/database.rb +6 -17
  142. data/modules/mu/{clouds → providers}/cloudformation/dnszone.rb +3 -3
  143. data/modules/mu/{clouds → providers}/cloudformation/firewall_rule.rb +3 -3
  144. data/modules/mu/{clouds → providers}/cloudformation/loadbalancer.rb +3 -3
  145. data/modules/mu/{clouds → providers}/cloudformation/log.rb +3 -3
  146. data/modules/mu/{clouds → providers}/cloudformation/server.rb +7 -7
  147. data/modules/mu/{clouds → providers}/cloudformation/server_pool.rb +5 -5
  148. data/modules/mu/{clouds → providers}/cloudformation/vpc.rb +3 -3
  149. data/modules/mu/{clouds → providers}/docker.rb +0 -0
  150. data/modules/mu/{clouds → providers}/google.rb +29 -6
  151. data/modules/mu/{clouds → providers}/google/bucket.rb +4 -4
  152. data/modules/mu/{clouds → providers}/google/container_cluster.rb +38 -20
  153. data/modules/mu/{clouds → providers}/google/database.rb +5 -12
  154. data/modules/mu/{clouds → providers}/google/firewall_rule.rb +5 -5
  155. data/modules/mu/{clouds → providers}/google/folder.rb +5 -9
  156. data/modules/mu/{clouds → providers}/google/function.rb +6 -6
  157. data/modules/mu/{clouds → providers}/google/group.rb +9 -17
  158. data/modules/mu/{clouds → providers}/google/habitat.rb +4 -8
  159. data/modules/mu/{clouds → providers}/google/loadbalancer.rb +5 -5
  160. data/modules/mu/{clouds → providers}/google/role.rb +50 -31
  161. data/modules/mu/{clouds → providers}/google/server.rb +41 -24
  162. data/modules/mu/{clouds → providers}/google/server_pool.rb +14 -14
  163. data/modules/mu/{clouds → providers}/google/user.rb +34 -24
  164. data/modules/mu/{clouds → providers}/google/userdata/README.md +0 -0
  165. data/modules/mu/{clouds → providers}/google/userdata/linux.erb +0 -0
  166. data/modules/mu/{clouds → providers}/google/userdata/windows.erb +0 -0
  167. data/modules/mu/{clouds → providers}/google/vpc.rb +45 -14
  168. data/modules/tests/aws-jobs-functions.yaml +46 -0
  169. data/modules/tests/centos6.yaml +15 -0
  170. data/modules/tests/centos7.yaml +15 -0
  171. data/modules/tests/centos8.yaml +12 -0
  172. data/modules/tests/ecs.yaml +2 -2
  173. data/modules/tests/eks.yaml +1 -1
  174. data/modules/tests/functions/node-function/lambda_function.js +10 -0
  175. data/modules/tests/functions/python-function/lambda_function.py +12 -0
  176. data/modules/tests/microservice_app.yaml +288 -0
  177. data/modules/tests/rds.yaml +108 -0
  178. data/modules/tests/regrooms/rds.yaml +123 -0
  179. data/modules/tests/server-with-scrub-muisms.yaml +1 -1
  180. data/modules/tests/super_complex_bok.yml +2 -2
  181. data/modules/tests/super_simple_bok.yml +3 -5
  182. data/spec/mu/clouds/azure_spec.rb +2 -2
  183. metadata +122 -92
  184. data/modules/mu/clouds/aws/database.rb +0 -1974
  185. data/modules/mu/clouds/aws/endpoint.rb +0 -596
@@ -120,7 +120,7 @@ module MU
120
120
  if !@deploy.nocleanup
121
121
  Thread.new {
122
122
  MU.dupGlobals(parent_thread_id)
123
- MU::Cloud::AWS::Server.terminateInstance(id: member.instance_id)
123
+ MU::Cloud.resourceClass("AWS", "Server").terminateInstance(id: member.instance_id)
124
124
  }
125
125
  end
126
126
  end
@@ -193,9 +193,10 @@ module MU
193
193
  # @return [Array<MU::Cloud::Server>]
194
194
  def listNodes
195
195
  nodes = []
196
- me = MU::Cloud::AWS::ServerPool.find(cloud_id: cloud_id)
197
- if me and me.first and me.first.instances
198
- me.first.instances.each { |instance|
196
+ me = MU::Cloud::AWS::ServerPool.find(cloud_id: cloud_id).values.first
197
+ pp me
198
+ if me and me.instances
199
+ me.instances.each { |instance|
199
200
  found = MU::MommaCat.findStray("AWS", "server", cloud_id: instance.instance_id, region: @config["region"], dummy_ok: true)
200
201
  nodes.concat(found)
201
202
  }
@@ -425,6 +426,7 @@ module MU
425
426
  # @return [OpenStruct]
426
427
  def cloud_desc(use_cache: true)
427
428
  return @cloud_desc_cache if @cloud_desc_cache and use_cache
429
+ return nil if !@cloud_id
428
430
  @cloud_desc_cache = MU::Cloud::AWS.autoscale(region: @config['region'], credentials: @config['credentials']).describe_auto_scaling_groups(
429
431
  auto_scaling_group_names: [@mu_name]
430
432
  ).auto_scaling_groups.first
@@ -531,14 +533,25 @@ module MU
531
533
  if cloud_desc.vpc_zone_identifier and
532
534
  !cloud_desc.vpc_zone_identifier.empty?
533
535
  nets = cloud_desc.vpc_zone_identifier.split(/,/)
534
- resp = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @credentials).describe_subnets(subnet_ids: nets).subnets.first
535
- bok['vpc'] = MU::Config::Ref.get(
536
- id: resp.vpc_id,
537
- cloud: "AWS",
538
- credentials: @credentials,
539
- type: "vpcs",
540
- subnets: nets.map { |s| { "subnet_id" => s } }
541
- )
536
+ begin
537
+ resp = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @credentials).describe_subnets(subnet_ids: nets).subnets.first
538
+ bok['vpc'] = MU::Config::Ref.get(
539
+ id: resp.vpc_id,
540
+ cloud: "AWS",
541
+ credentials: @credentials,
542
+ type: "vpcs",
543
+ subnets: nets.map { |s| { "subnet_id" => s } }
544
+ )
545
+ rescue Aws::EC2::Errors::InvalidSubnetIDNotFound => e
546
+ if e.message.match(/The subnet ID '(subnet-[a-f0-9]+)' does not exist/)
547
+ nets.delete(Regexp.last_match[1])
548
+ if nets.empty?
549
+ MU.log "Autoscale Group #{@cloud_id} was configured for a VPC, but the configuration held no valid subnets", MU::WARN, details: cloud_desc.vpc_zone_identifier.split(/,/)
550
+ end
551
+ else
552
+ raise e
553
+ end
554
+ end
542
555
  end
543
556
 
544
557
  # MU.log @cloud_id, MU::NOTICE, details: cloud_desc
@@ -813,26 +826,7 @@ module MU
813
826
  }
814
827
  }
815
828
  },
816
- "ingress_rules" => {
817
- "items" => {
818
- "properties" => {
819
- "sgs" => {
820
- "type" => "array",
821
- "items" => {
822
- "description" => "Other AWS Security Groups; resources that are associated with this group will have this rule applied to their traffic",
823
- "type" => "string"
824
- }
825
- },
826
- "lbs" => {
827
- "type" => "array",
828
- "items" => {
829
- "description" => "AWS Load Balancers which will have this rule applied to their traffic",
830
- "type" => "string"
831
- }
832
- }
833
- }
834
- }
835
- }
829
+ "ingress_rules" => MU::Cloud.resourceClass("AWS", "FirewallRule").ingressRuleAddtlSchema
836
830
  }
837
831
  [toplevel_required, schema]
838
832
  end
@@ -905,7 +899,7 @@ module MU
905
899
  launch = pool["basis"]["launch_config"]
906
900
  launch['iam_policies'] ||= pool['iam_policies']
907
901
 
908
- launch['size'] = MU::Cloud::AWS::Server.validateInstanceType(launch["size"], pool["region"])
902
+ launch['size'] = MU::Cloud.resourceClass("AWS", "Server").validateInstanceType(launch["size"], pool["region"])
909
903
  ok = false if launch['size'].nil?
910
904
  if !launch['generate_iam_role']
911
905
  if !launch['iam_role'] and pool['cloud'] != "CloudFormation"
@@ -949,11 +943,7 @@ module MU
949
943
 
950
944
  role['credentials'] = pool['credentials'] if pool['credentials']
951
945
  configurator.insertKitten(role, "roles")
952
- pool["dependencies"] ||= []
953
- pool["dependencies"] << {
954
- "type" => "role",
955
- "name" => pool["name"]
956
- }
946
+ MU::Config.addDependency(pool, pool['name'], "role")
957
947
  end
958
948
  launch["ami_id"] ||= launch["image_id"]
959
949
  if launch["server"].nil? and launch["instance_id"].nil? and launch["ami_id"].nil?
@@ -967,7 +957,7 @@ module MU
967
957
  end
968
958
  end
969
959
  if launch["server"] != nil
970
- pool["dependencies"] << {"type" => "server", "name" => launch["server"]}
960
+ MU::Config.addDependency(pool, launch["server"], "server", phase: "groom")
971
961
  # XXX I dunno, maybe toss an error if this isn't done already
972
962
  # servers.each { |server|
973
963
  # if server["name"] == launch["server"]
@@ -1073,7 +1063,7 @@ module MU
1073
1063
  # @param ignoremaster [Boolean]: If true, will remove resources not flagged as originating from this Mu server
1074
1064
  # @param region [String]: The cloud provider region
1075
1065
  # @return [void]
1076
- def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {})
1066
+ def self.cleanup(noop: false, deploy_id: MU.deploy_id, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {})
1077
1067
  MU.log "AWS::ServerPool.cleanup: need to support flags['known']", MU::DEBUG, details: flags
1078
1068
 
1079
1069
  filters = [{name: "key", values: ["MU-ID"]}]
@@ -1096,7 +1086,7 @@ module MU
1096
1086
  if asg.key == "MU-MASTER-IP" and asg.value != MU.mu_public_ip and !ignoremaster
1097
1087
  no_purge << asg.resource_id
1098
1088
  end
1099
- if asg.key == "MU-ID" and asg.value == MU.deploy_id
1089
+ if asg.key == "MU-ID" and asg.value == deploy_id
1100
1090
  maybe_purge << asg.resource_id
1101
1091
  end
1102
1092
  }
@@ -1123,7 +1113,7 @@ module MU
1123
1113
  end
1124
1114
  end
1125
1115
 
1126
- # MU::Cloud::AWS::Server.removeIAMProfile(resource_id)
1116
+ # MU::Cloud.resourceClass("AWS", "Server").removeIAMProfile(resource_id)
1127
1117
 
1128
1118
  # Generally there should be a launch_configuration of the same name
1129
1119
  # XXX search for these independently, too?
@@ -1164,14 +1154,14 @@ module MU
1164
1154
  @config['basis']['launch_config']["ami_id"] = @deploy.deployment["images"][@config['basis']['launch_config']["server"]]["image_id"]
1165
1155
  MU.log "Using AMI '#{@config['basis']['launch_config']["ami_id"]}' from sibling server #{@config['basis']['launch_config']["server"]} in ServerPool #{@mu_name}"
1166
1156
  elsif !@config['basis']['launch_config']["instance_id"].nil?
1167
- @config['basis']['launch_config']["ami_id"] = MU::Cloud::AWS::Server.createImage(
1157
+ @config['basis']['launch_config']["ami_id"] = MU::Cloud.resourceClass("AWS", "Server").createImage(
1168
1158
  name: @mu_name,
1169
1159
  instance_id: @config['basis']['launch_config']["instance_id"],
1170
1160
  credentials: @config['credentials'],
1171
1161
  region: @config['region']
1172
1162
  )[@config['region']]
1173
1163
  end
1174
- MU::Cloud::AWS::Server.waitForAMI(@config['basis']['launch_config']["ami_id"], credentials: @config['credentials'])
1164
+ MU::Cloud.resourceClass("AWS", "Server").waitForAMI(@config['basis']['launch_config']["ami_id"], credentials: @config['credentials'])
1175
1165
 
1176
1166
  oldlaunch = MU::Cloud::AWS.autoscale(region: @config['region'], credentials: @config['credentials']).describe_launch_configurations(
1177
1167
  launch_configuration_names: [@mu_name]
@@ -1226,12 +1216,12 @@ module MU
1226
1216
  vol.delete("encrypted")
1227
1217
  end
1228
1218
  end
1229
- mapping, _cfm_mapping = MU::Cloud::AWS::Server.convertBlockDeviceMapping(vol)
1219
+ mapping, _cfm_mapping = MU::Cloud.resourceClass("AWS", "Server").convertBlockDeviceMapping(vol)
1230
1220
  storage << mapping
1231
1221
  }
1232
1222
  end
1233
1223
 
1234
- storage.concat(MU::Cloud::AWS::Server.ephemeral_mappings)
1224
+ storage.concat(MU::Cloud.resourceClass("AWS", "Server").ephemeral_mappings)
1235
1225
 
1236
1226
  if @config['basis']['launch_config']['generate_iam_role']
1237
1227
  role = @deploy.findLitterMate(name: @config['name'], type: "roles")
@@ -1426,20 +1416,12 @@ module MU
1426
1416
  # XXX probably have to query API to get the DNS name of this one
1427
1417
  }
1428
1418
  elsif lb["concurrent_load_balancer"]
1429
- raise MuError, "No loadbalancers exist! I need one named #{lb['concurrent_load_balancer']}" if !@deploy.deployment["loadbalancers"]
1430
- found = false
1431
- @deploy.deployment["loadbalancers"].each_pair { |lb_name, deployed_lb|
1432
- if lb_name == lb['concurrent_load_balancer']
1433
- lbs << deployed_lb["awsname"] # XXX check for classic
1434
- if deployed_lb.has_key?("targetgroups")
1435
- deployed_lb["targetgroups"].values.each { |tg_arn|
1436
- tg_arns << tg_arn
1437
- }
1438
- end
1439
- found = true
1440
- end
1441
- }
1442
- raise MuError, "I need a loadbalancer named #{lb['concurrent_load_balancer']}, but none seems to have been created!" if !found
1419
+ lb = @deploy.findLitterMate(name: lb['concurrent_load_balancer'], type: "loadbalancers")
1420
+ raise MuError, "No loadbalancers exist! I need one named #{lb['concurrent_load_balancer']}" if !lb
1421
+ lbs << lb.mu_name
1422
+ if lb.targetgroups
1423
+ tg_arns = lb.targetgroups.values.map { |tg| tg.target_group_arn }
1424
+ end
1443
1425
  end
1444
1426
  }
1445
1427
  if tg_arns.size > 0
@@ -67,7 +67,7 @@ module MU
67
67
  if target['vpc']["subnet_name"]
68
68
  subnet_obj = vpc.getSubnet(name: target['vpc']["subnet_name"])
69
69
  if subnet_obj.nil?
70
- raise MuError, "Failed to locate subnet from #{subnet} in StoragePool #{@config['name']}:#{target['name']}"
70
+ raise MuError, "Failed to locate subnet from #{target['vpc']["subnet_name"]} in StoragePool #{@config['name']}:#{target['name']}"
71
71
  end
72
72
  target['vpc']['subnet_id'] = subnet_obj.cloud_id
73
73
  end
@@ -261,49 +261,29 @@ module MU
261
261
  targets = {}
262
262
 
263
263
  if @config['mount_points'] && !@config['mount_points'].empty?
264
+ mount_targets = MU::Cloud::AWS.efs(region: @config['region'], credentials: @config['credentials']).describe_mount_targets(
265
+ file_system_id: storage_pool.file_system_id
266
+ ).mount_targets
267
+
264
268
  @config['mount_points'].each { |mp|
265
269
  subnet = nil
266
270
  dependencies
267
- mp_vpc = if mp['vpc'] and mp['vpc']['vpc_name']
268
- @deploy.findLitterMate(type: "vpc", name: mp['vpc']['vpc_name'], credentials: @config['credentials'])
269
- elsif mp['vpc']
270
- MU::MommaCat.findStray(
271
- @config['cloud'],
272
- "vpcs",
273
- deploy_id: mp['vpc']["deploy_id"],
274
- credentials: @config['credentials'],
275
- mu_name: mp['vpc']["mu_name"],
276
- cloud_id: mp['vpc']['vpc_id'],
277
- region: @config['region'],
278
- dummy_ok: false
279
- ).first
280
- # XXX non-sibling, findStray version
281
- end
271
+ mp_vpc = MU::Config::Ref.get(mp['vpc']).kitten
282
272
 
283
- mount_targets = MU::Cloud::AWS.efs(region: @config['region'], credentials: @config['credentials']).describe_mount_targets(
284
- file_system_id: storage_pool.file_system_id
285
- ).mount_targets
286
273
 
287
- # subnet_obj = mp_vpc.subnets.select { |s|
288
- # s.name == mp["vpc"]["subnet_name"] or s.cloud_id == mp["vpc"]["subnet_id"]
289
- # }.first
274
+ subnet_obj = mp_vpc.subnets.select { |s|
275
+ s.name == mp["vpc"]["subnet_name"] or s.cloud_id == mp["vpc"]["subnet_id"]
276
+ }.first
290
277
  mount_target = nil
291
- mp_vpc.subnets.each { |subnet_obj|
292
- mount_targets.map { |t|
293
- subnet_cidr_obj = NetAddr::IPv4Net.parse(subnet_obj.ip_block)
294
- if subnet_cidr_obj.contains(NetAddr::IPv4.parse(t.ip_address))
295
- mount_target = t
296
- subnet = subnet_obj.cloud_desc
297
- end
298
- }
299
- break if mount_target
278
+ mount_targets.each { |t|
279
+ subnet_cidr_obj = NetAddr::IPv4Net.parse(subnet_obj.ip_block)
280
+ if subnet_cidr_obj.contains(NetAddr::IPv4.parse(t.ip_address))
281
+ mount_target = t
282
+ subnet = subnet_obj.cloud_desc
283
+ break
284
+ end
300
285
  }
301
286
 
302
- # mount_target = MU::Cloud::AWS.efs(region: @config['region'], credentials: @config['credentials']).describe_mount_targets(
303
- # mount_target_id: mp["cloud_id"]
304
- # ).mount_targets.first
305
-
306
-
307
287
  targets[mp["name"]] = {
308
288
  "owner_id" => mount_target.owner_id,
309
289
  "cloud_id" => mount_target.mount_target_id,
@@ -353,7 +333,7 @@ module MU
353
333
  # @param ignoremaster [Boolean]: If true, will remove resources not flagged as originating from this Mu server
354
334
  # @param region [String]: The cloud provider region in which to operate
355
335
  # @return [void]
356
- def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {})
336
+ def self.cleanup(noop: false, deploy_id: MU.deploy_id, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {})
357
337
  MU.log "AWS::StoragePool.cleanup: need to support flags['known']", MU::DEBUG, details: flags
358
338
 
359
339
  supported_regions = %w{us-west-2 us-east-1 eu-west-1}
@@ -378,7 +358,7 @@ module MU
378
358
  found_muid = false
379
359
  found_master = false
380
360
  tags.each { |tag|
381
- found_muid = true if tag.key == "MU-ID" && tag.value == MU.deploy_id
361
+ found_muid = true if tag.key == "MU-ID" && tag.value == deploy_id
382
362
  found_master = true if tag.key == "MU-MASTER-IP" && tag.value == MU.mu_public_ip
383
363
  }
384
364
  next if !found_muid
@@ -493,6 +473,9 @@ module MU
493
473
 
494
474
  if pool['mount_points'] && !pool['mount_points'].empty?
495
475
  pool['mount_points'].each{ |mp|
476
+ if mp['vpc'] and mp['vpc']['name']
477
+ MU::Config.addDependency(pool, mp['vpc']['name'], "vpc")
478
+ end
496
479
  if mp['ingress_rules']
497
480
  fwname = "storage-#{mp['name']}"
498
481
  acl = {
@@ -109,7 +109,7 @@ module MU
109
109
  # Create these if necessary, then append them to the list of
110
110
  # attachable_policies
111
111
  if @config['raw_policies']
112
- pol_arns = MU::Cloud::AWS::Role.manageRawPolicies(
112
+ pol_arns = MU::Cloud.resourceClass("AWS", "Role").manageRawPolicies(
113
113
  @config['raw_policies'],
114
114
  basename: @deploy.getResourceName(@config['name']),
115
115
  credentials: @credentials
@@ -135,7 +135,7 @@ module MU
135
135
  attached_policies.each { |a|
136
136
  if !configured_policies.include?(a.policy_arn)
137
137
  MU.log "Removing IAM policy #{a.policy_arn} from user #{@mu_name}", MU::NOTICE
138
- MU::Cloud::AWS::Role.purgePolicy(a.policy_arn, @credentials)
138
+ MU::Cloud.resourceClass("AWS", "Role").purgePolicy(a.policy_arn, @credentials)
139
139
  else
140
140
  configured_policies.delete(a.policy_arn)
141
141
  end
@@ -151,7 +151,7 @@ module MU
151
151
  end
152
152
 
153
153
  if @config['inline_policies']
154
- docs = MU::Cloud::AWS::Role.genPolicyDocument(@config['inline_policies'], deploy_obj: @deploy)
154
+ docs = MU::Cloud.resourceClass("AWS", "Role").genPolicyDocument(@config['inline_policies'], deploy_obj: @deploy)
155
155
  docs.each { |doc|
156
156
  MU.log "Putting user policy #{doc.keys.first} to user #{@cloud_id} "
157
157
  MU::Cloud::AWS.iam(credentials: @credentials).put_user_policy(
@@ -190,16 +190,16 @@ module MU
190
190
  # @param noop [Boolean]: If true, will only print what would be done
191
191
  # @param ignoremaster [Boolean]: If true, will remove resources not flagged as originating from this Mu server
192
192
  # @return [void]
193
- def self.cleanup(noop: false, ignoremaster: false, credentials: nil, flags: {})
193
+ def self.cleanup(noop: false, deploy_id: MU.deploy_id, ignoremaster: false, credentials: nil, flags: {})
194
194
  MU.log "AWS::User.cleanup: need to support flags['known']", MU::DEBUG, details: flags
195
195
 
196
196
  # XXX this doesn't belong here; maybe under roles, maybe as its own stupid first-class resource
197
197
  resp = MU::Cloud::AWS.iam(credentials: credentials).list_policies(
198
- path_prefix: "/"+MU.deploy_id+"/"
198
+ path_prefix: "/"+deploy_id+"/"
199
199
  )
200
200
  if resp and resp.policies
201
201
  resp.policies.each { |policy|
202
- MU.log "Deleting policy /#{MU.deploy_id}/#{policy.policy_name}"
202
+ MU.log "Deleting policy /#{deploy_id}/#{policy.policy_name}"
203
203
  if !noop
204
204
  attachments = begin
205
205
  MU::Cloud::AWS.iam(credentials: credentials).list_entities_for_policy(
@@ -277,7 +277,7 @@ MU.log e.inspect, MU::ERR, details: policy
277
277
  has_ourdeploy = false
278
278
  has_ourmaster = false
279
279
  tags.each { |tag|
280
- if tag.key == "MU-ID" and tag.value == MU.deploy_id
280
+ if tag.key == "MU-ID" and tag.value == deploy_id
281
281
  has_ourdeploy = true
282
282
  elsif tag.key == "MU-MASTER-IP" and tag.value == MU.mu_public_ip
283
283
  has_ourmaster = true
@@ -431,7 +431,7 @@ MU.log e.inspect, MU::ERR, details: policy
431
431
  resp.policy_names.each { |pol_name|
432
432
  pol = MU::Cloud::AWS.iam(credentials: @credentials).get_user_policy(user_name: @cloud_id, policy_name: pol_name)
433
433
  doc = JSON.parse(URI.decode(pol.policy_document))
434
- bok["inline_policies"] = MU::Cloud::AWS::Role.doc2MuPolicies(pol.policy_name, doc, bok["inline_policies"])
434
+ bok["inline_policies"] = MU::Cloud.resourceClass("AWS", "Role").doc2MuPolicies(pol.policy_name, doc, bok["inline_policies"])
435
435
  }
436
436
  end
437
437
 
@@ -465,7 +465,7 @@ MU.log e.inspect, MU::ERR, details: policy
465
465
  def self.schema(_config)
466
466
  toplevel_required = []
467
467
  polschema = MU::Config::Role.schema["properties"]["policies"]
468
- polschema.deep_merge!(MU::Cloud::AWS::Role.condition_schema)
468
+ polschema.deep_merge!(MU::Cloud.resourceClass("AWS", "Role").condition_schema)
469
469
 
470
470
  schema = {
471
471
  "inline_policies" => polschema,
@@ -517,7 +517,7 @@ style long name, like +IAMTESTS-DEV-2018112815-IS-USER-FOO+"
517
517
  # If we're attaching some managed policies, make sure all of the ones
518
518
  # that should already exist do indeed exist
519
519
  if user['attachable_policies']
520
- ok = false if !MU::Cloud::AWS::Role.validateAttachablePolicies(
520
+ ok = false if !MU::Cloud.resourceClass("AWS", "Role").validateAttachablePolicies(
521
521
  user['attachable_policies'],
522
522
  credentials: user['credentials'],
523
523
  region: user['region']
@@ -530,7 +530,7 @@ style long name, like +IAMTESTS-DEV-2018112815-IS-USER-FOO+"
530
530
  if configurator.haveLitterMate?(group, "groups")
531
531
  need_dependency = true
532
532
  else
533
- found = MU::Cloud::AWS::Group.find(cloud_id: group)
533
+ found = MU::Cloud.resourceClass("AWS", "Group").find(cloud_id: group)
534
534
  if found.nil? or found.empty? or (configurator.updating and
535
535
  found.values.first.group.path == "/"+configurator.updating+"/")
536
536
  groupdesc = {
@@ -542,11 +542,7 @@ style long name, like +IAMTESTS-DEV-2018112815-IS-USER-FOO+"
542
542
  end
543
543
 
544
544
  if need_dependency
545
- user["dependencies"] ||= []
546
- user["dependencies"] << {
547
- "type" => "group",
548
- "name" => group
549
- }
545
+ MU::Config.addDependency(user, group, "group")
550
546
  end
551
547
  }
552
548
  end
@@ -42,7 +42,7 @@ if ping -c 5 8.8.8.8 > /dev/null; then
42
42
  <% if !$mu.skipApplyUpdates %>
43
43
  set +e
44
44
  if [ ! -f /.mu-installer-ran-updates ];then
45
- service ssh stop
45
+ echo "Applying package updates" > /etc/nologin
46
46
  apt-get --fix-missing -y upgrade
47
47
  touch /.mu-installer-ran-updates
48
48
  if [ $? -eq 0 ]
@@ -58,7 +58,7 @@ if ping -c 5 8.8.8.8 > /dev/null; then
58
58
  else
59
59
  echo "FAILED PACKAGE UPDATE" >&2
60
60
  fi
61
- service ssh start
61
+ rm -f /etc/nologin
62
62
  fi
63
63
  <% end %>
64
64
  elif [ -x /usr/bin/yum ];then
@@ -94,7 +94,7 @@ if ping -c 5 8.8.8.8 > /dev/null; then
94
94
  <% if !$mu.skipApplyUpdates %>
95
95
  set +e
96
96
  if [ ! -f /.mu-installer-ran-updates ];then
97
- service sshd stop
97
+ echo "Applying package updates" > /etc/nologin
98
98
  kernel_update=`yum list updates | grep kernel`
99
99
  yum -y update
100
100
  touch /.mu-installer-ran-updates
@@ -108,7 +108,7 @@ if ping -c 5 8.8.8.8 > /dev/null; then
108
108
  else
109
109
  echo "FAILED PACKAGE UPDATE" >&2
110
110
  fi
111
- service sshd start
111
+ rm -f /etc/nologin
112
112
  fi
113
113
  <% end %>
114
114
  fi
@@ -116,6 +116,7 @@ else
116
116
  /bin/logger "***** Unable to verify internet connectivity, skipping package updates from userdata"
117
117
  touch /.mu-installer-ran-updates
118
118
  fi
119
+ rm -f /etc/nologin
119
120
 
120
121
  AWSCLI='command -v aws'
121
122
  PIP='command -v pip'