cloud-mu 3.1.6 → 3.4.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (181) hide show
  1. checksums.yaml +4 -4
  2. data/Dockerfile +1 -1
  3. data/bin/mu-adopt +15 -12
  4. data/bin/mu-azure-tests +57 -0
  5. data/bin/mu-cleanup +2 -4
  6. data/bin/mu-configure +37 -1
  7. data/bin/mu-deploy +3 -3
  8. data/bin/mu-findstray-tests +25 -0
  9. data/bin/mu-gen-docs +2 -4
  10. data/bin/mu-load-config.rb +2 -1
  11. data/bin/mu-run-tests +37 -12
  12. data/cloud-mu.gemspec +4 -4
  13. data/cookbooks/mu-tools/attributes/default.rb +7 -0
  14. data/cookbooks/mu-tools/libraries/helper.rb +87 -3
  15. data/cookbooks/mu-tools/recipes/apply_security.rb +39 -23
  16. data/cookbooks/mu-tools/recipes/aws_api.rb +13 -0
  17. data/cookbooks/mu-tools/recipes/google_api.rb +4 -0
  18. data/cookbooks/mu-tools/recipes/rsyslog.rb +8 -1
  19. data/cookbooks/mu-tools/resources/disk.rb +33 -12
  20. data/cookbooks/mu-tools/resources/mommacat_request.rb +1 -2
  21. data/cookbooks/mu-tools/templates/centos-8/sshd_config.erb +215 -0
  22. data/extras/clean-stock-amis +10 -2
  23. data/extras/generate-stock-images +7 -3
  24. data/extras/image-generators/AWS/centos7.yaml +19 -16
  25. data/extras/image-generators/AWS/{rhel7.yaml → rhel71.yaml} +0 -0
  26. data/extras/image-generators/AWS/{win2k12.yaml → win2k12r2.yaml} +0 -0
  27. data/modules/mommacat.ru +2 -2
  28. data/modules/mu.rb +84 -97
  29. data/modules/mu/adoption.rb +359 -59
  30. data/modules/mu/cleanup.rb +67 -44
  31. data/modules/mu/cloud.rb +108 -1754
  32. data/modules/mu/cloud/database.rb +49 -0
  33. data/modules/mu/cloud/dnszone.rb +44 -0
  34. data/modules/mu/cloud/machine_images.rb +212 -0
  35. data/modules/mu/cloud/providers.rb +81 -0
  36. data/modules/mu/cloud/resource_base.rb +929 -0
  37. data/modules/mu/cloud/server.rb +40 -0
  38. data/modules/mu/cloud/server_pool.rb +1 -0
  39. data/modules/mu/cloud/ssh_sessions.rb +228 -0
  40. data/modules/mu/cloud/winrm_sessions.rb +237 -0
  41. data/modules/mu/cloud/wrappers.rb +178 -0
  42. data/modules/mu/config.rb +122 -80
  43. data/modules/mu/config/alarm.rb +2 -6
  44. data/modules/mu/config/bucket.rb +32 -3
  45. data/modules/mu/config/cache_cluster.rb +2 -2
  46. data/modules/mu/config/cdn.rb +100 -0
  47. data/modules/mu/config/collection.rb +1 -1
  48. data/modules/mu/config/container_cluster.rb +2 -2
  49. data/modules/mu/config/database.rb +84 -105
  50. data/modules/mu/config/database.yml +1 -2
  51. data/modules/mu/config/dnszone.rb +5 -4
  52. data/modules/mu/config/doc_helpers.rb +4 -5
  53. data/modules/mu/config/endpoint.rb +2 -1
  54. data/modules/mu/config/firewall_rule.rb +3 -19
  55. data/modules/mu/config/folder.rb +1 -1
  56. data/modules/mu/config/function.rb +17 -8
  57. data/modules/mu/config/group.rb +1 -1
  58. data/modules/mu/config/habitat.rb +1 -1
  59. data/modules/mu/config/job.rb +89 -0
  60. data/modules/mu/config/loadbalancer.rb +57 -11
  61. data/modules/mu/config/log.rb +1 -1
  62. data/modules/mu/config/msg_queue.rb +1 -1
  63. data/modules/mu/config/nosqldb.rb +1 -1
  64. data/modules/mu/config/notifier.rb +8 -19
  65. data/modules/mu/config/ref.rb +81 -9
  66. data/modules/mu/config/role.rb +1 -1
  67. data/modules/mu/config/schema_helpers.rb +30 -34
  68. data/modules/mu/config/search_domain.rb +1 -1
  69. data/modules/mu/config/server.rb +5 -13
  70. data/modules/mu/config/server_pool.rb +3 -7
  71. data/modules/mu/config/storage_pool.rb +1 -1
  72. data/modules/mu/config/tail.rb +10 -0
  73. data/modules/mu/config/user.rb +1 -1
  74. data/modules/mu/config/vpc.rb +13 -17
  75. data/modules/mu/defaults/AWS.yaml +106 -106
  76. data/modules/mu/defaults/Azure.yaml +1 -0
  77. data/modules/mu/defaults/Google.yaml +1 -0
  78. data/modules/mu/deploy.rb +33 -19
  79. data/modules/mu/groomer.rb +15 -0
  80. data/modules/mu/groomers/chef.rb +3 -0
  81. data/modules/mu/logger.rb +120 -144
  82. data/modules/mu/master.rb +22 -1
  83. data/modules/mu/mommacat.rb +71 -26
  84. data/modules/mu/mommacat/daemon.rb +23 -14
  85. data/modules/mu/mommacat/naming.rb +82 -3
  86. data/modules/mu/mommacat/search.rb +59 -16
  87. data/modules/mu/mommacat/storage.rb +119 -48
  88. data/modules/mu/{clouds → providers}/README.md +1 -1
  89. data/modules/mu/{clouds → providers}/aws.rb +248 -62
  90. data/modules/mu/{clouds → providers}/aws/alarm.rb +3 -3
  91. data/modules/mu/{clouds → providers}/aws/bucket.rb +275 -41
  92. data/modules/mu/{clouds → providers}/aws/cache_cluster.rb +14 -50
  93. data/modules/mu/providers/aws/cdn.rb +782 -0
  94. data/modules/mu/{clouds → providers}/aws/collection.rb +5 -5
  95. data/modules/mu/{clouds → providers}/aws/container_cluster.rb +65 -63
  96. data/modules/mu/providers/aws/database.rb +1747 -0
  97. data/modules/mu/{clouds → providers}/aws/dnszone.rb +26 -12
  98. data/modules/mu/providers/aws/endpoint.rb +1072 -0
  99. data/modules/mu/{clouds → providers}/aws/firewall_rule.rb +39 -32
  100. data/modules/mu/{clouds → providers}/aws/folder.rb +1 -1
  101. data/modules/mu/{clouds → providers}/aws/function.rb +291 -133
  102. data/modules/mu/{clouds → providers}/aws/group.rb +18 -20
  103. data/modules/mu/{clouds → providers}/aws/habitat.rb +3 -3
  104. data/modules/mu/providers/aws/job.rb +469 -0
  105. data/modules/mu/{clouds → providers}/aws/loadbalancer.rb +77 -47
  106. data/modules/mu/{clouds → providers}/aws/log.rb +5 -5
  107. data/modules/mu/{clouds → providers}/aws/msg_queue.rb +14 -11
  108. data/modules/mu/{clouds → providers}/aws/nosqldb.rb +96 -5
  109. data/modules/mu/{clouds → providers}/aws/notifier.rb +135 -63
  110. data/modules/mu/{clouds → providers}/aws/role.rb +112 -78
  111. data/modules/mu/{clouds → providers}/aws/search_domain.rb +172 -41
  112. data/modules/mu/{clouds → providers}/aws/server.rb +120 -145
  113. data/modules/mu/{clouds → providers}/aws/server_pool.rb +42 -60
  114. data/modules/mu/{clouds → providers}/aws/storage_pool.rb +21 -38
  115. data/modules/mu/{clouds → providers}/aws/user.rb +12 -16
  116. data/modules/mu/{clouds → providers}/aws/userdata/README.md +0 -0
  117. data/modules/mu/{clouds → providers}/aws/userdata/linux.erb +5 -4
  118. data/modules/mu/{clouds → providers}/aws/userdata/windows.erb +0 -0
  119. data/modules/mu/{clouds → providers}/aws/vpc.rb +141 -73
  120. data/modules/mu/{clouds → providers}/aws/vpc_subnet.rb +0 -0
  121. data/modules/mu/{clouds → providers}/azure.rb +4 -1
  122. data/modules/mu/{clouds → providers}/azure/container_cluster.rb +1 -5
  123. data/modules/mu/{clouds → providers}/azure/firewall_rule.rb +8 -1
  124. data/modules/mu/{clouds → providers}/azure/habitat.rb +0 -0
  125. data/modules/mu/{clouds → providers}/azure/loadbalancer.rb +0 -0
  126. data/modules/mu/{clouds → providers}/azure/role.rb +0 -0
  127. data/modules/mu/{clouds → providers}/azure/server.rb +32 -24
  128. data/modules/mu/{clouds → providers}/azure/user.rb +1 -1
  129. data/modules/mu/{clouds → providers}/azure/userdata/README.md +0 -0
  130. data/modules/mu/{clouds → providers}/azure/userdata/linux.erb +0 -0
  131. data/modules/mu/{clouds → providers}/azure/userdata/windows.erb +0 -0
  132. data/modules/mu/{clouds → providers}/azure/vpc.rb +4 -6
  133. data/modules/mu/{clouds → providers}/cloudformation.rb +1 -1
  134. data/modules/mu/{clouds → providers}/cloudformation/alarm.rb +3 -3
  135. data/modules/mu/{clouds → providers}/cloudformation/cache_cluster.rb +3 -3
  136. data/modules/mu/{clouds → providers}/cloudformation/collection.rb +3 -3
  137. data/modules/mu/{clouds → providers}/cloudformation/database.rb +6 -17
  138. data/modules/mu/{clouds → providers}/cloudformation/dnszone.rb +3 -3
  139. data/modules/mu/{clouds → providers}/cloudformation/firewall_rule.rb +3 -3
  140. data/modules/mu/{clouds → providers}/cloudformation/loadbalancer.rb +3 -3
  141. data/modules/mu/{clouds → providers}/cloudformation/log.rb +3 -3
  142. data/modules/mu/{clouds → providers}/cloudformation/server.rb +7 -7
  143. data/modules/mu/{clouds → providers}/cloudformation/server_pool.rb +5 -5
  144. data/modules/mu/{clouds → providers}/cloudformation/vpc.rb +3 -3
  145. data/modules/mu/{clouds → providers}/docker.rb +0 -0
  146. data/modules/mu/{clouds → providers}/google.rb +15 -6
  147. data/modules/mu/{clouds → providers}/google/bucket.rb +2 -2
  148. data/modules/mu/{clouds → providers}/google/container_cluster.rb +29 -14
  149. data/modules/mu/{clouds → providers}/google/database.rb +2 -9
  150. data/modules/mu/{clouds → providers}/google/firewall_rule.rb +3 -3
  151. data/modules/mu/{clouds → providers}/google/folder.rb +5 -9
  152. data/modules/mu/{clouds → providers}/google/function.rb +4 -4
  153. data/modules/mu/{clouds → providers}/google/group.rb +9 -17
  154. data/modules/mu/{clouds → providers}/google/habitat.rb +4 -8
  155. data/modules/mu/{clouds → providers}/google/loadbalancer.rb +2 -2
  156. data/modules/mu/{clouds → providers}/google/role.rb +46 -35
  157. data/modules/mu/{clouds → providers}/google/server.rb +26 -11
  158. data/modules/mu/{clouds → providers}/google/server_pool.rb +11 -11
  159. data/modules/mu/{clouds → providers}/google/user.rb +32 -22
  160. data/modules/mu/{clouds → providers}/google/userdata/README.md +0 -0
  161. data/modules/mu/{clouds → providers}/google/userdata/linux.erb +0 -0
  162. data/modules/mu/{clouds → providers}/google/userdata/windows.erb +0 -0
  163. data/modules/mu/{clouds → providers}/google/vpc.rb +38 -3
  164. data/modules/tests/aws-jobs-functions.yaml +46 -0
  165. data/modules/tests/centos6.yaml +15 -0
  166. data/modules/tests/centos7.yaml +15 -0
  167. data/modules/tests/centos8.yaml +12 -0
  168. data/modules/tests/ecs.yaml +2 -2
  169. data/modules/tests/eks.yaml +1 -1
  170. data/modules/tests/functions/node-function/lambda_function.js +10 -0
  171. data/modules/tests/functions/python-function/lambda_function.py +12 -0
  172. data/modules/tests/microservice_app.yaml +288 -0
  173. data/modules/tests/rds.yaml +108 -0
  174. data/modules/tests/regrooms/rds.yaml +123 -0
  175. data/modules/tests/server-with-scrub-muisms.yaml +1 -1
  176. data/modules/tests/super_complex_bok.yml +2 -2
  177. data/modules/tests/super_simple_bok.yml +2 -2
  178. data/spec/mu/clouds/azure_spec.rb +2 -2
  179. metadata +126 -98
  180. data/modules/mu/clouds/aws/database.rb +0 -1974
  181. data/modules/mu/clouds/aws/endpoint.rb +0 -596
@@ -120,7 +120,7 @@ module MU
120
120
  if !@deploy.nocleanup
121
121
  Thread.new {
122
122
  MU.dupGlobals(parent_thread_id)
123
- MU::Cloud::AWS::Server.terminateInstance(id: member.instance_id)
123
+ MU::Cloud.resourceClass("AWS", "Server").terminateInstance(id: member.instance_id)
124
124
  }
125
125
  end
126
126
  end
@@ -193,9 +193,10 @@ module MU
193
193
  # @return [Array<MU::Cloud::Server>]
194
194
  def listNodes
195
195
  nodes = []
196
- me = MU::Cloud::AWS::ServerPool.find(cloud_id: cloud_id)
197
- if me and me.first and me.first.instances
198
- me.first.instances.each { |instance|
196
+ me = MU::Cloud::AWS::ServerPool.find(cloud_id: cloud_id).values.first
197
+ pp me
198
+ if me and me.instances
199
+ me.instances.each { |instance|
199
200
  found = MU::MommaCat.findStray("AWS", "server", cloud_id: instance.instance_id, region: @config["region"], dummy_ok: true)
200
201
  nodes.concat(found)
201
202
  }
@@ -425,6 +426,7 @@ module MU
425
426
  # @return [OpenStruct]
426
427
  def cloud_desc(use_cache: true)
427
428
  return @cloud_desc_cache if @cloud_desc_cache and use_cache
429
+ return nil if !@cloud_id
428
430
  @cloud_desc_cache = MU::Cloud::AWS.autoscale(region: @config['region'], credentials: @config['credentials']).describe_auto_scaling_groups(
429
431
  auto_scaling_group_names: [@mu_name]
430
432
  ).auto_scaling_groups.first
@@ -531,14 +533,25 @@ module MU
531
533
  if cloud_desc.vpc_zone_identifier and
532
534
  !cloud_desc.vpc_zone_identifier.empty?
533
535
  nets = cloud_desc.vpc_zone_identifier.split(/,/)
534
- resp = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @credentials).describe_subnets(subnet_ids: nets).subnets.first
535
- bok['vpc'] = MU::Config::Ref.get(
536
- id: resp.vpc_id,
537
- cloud: "AWS",
538
- credentials: @credentials,
539
- type: "vpcs",
540
- subnets: nets.map { |s| { "subnet_id" => s } }
541
- )
536
+ begin
537
+ resp = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @credentials).describe_subnets(subnet_ids: nets).subnets.first
538
+ bok['vpc'] = MU::Config::Ref.get(
539
+ id: resp.vpc_id,
540
+ cloud: "AWS",
541
+ credentials: @credentials,
542
+ type: "vpcs",
543
+ subnets: nets.map { |s| { "subnet_id" => s } }
544
+ )
545
+ rescue Aws::EC2::Errors::InvalidSubnetIDNotFound => e
546
+ if e.message.match(/The subnet ID '(subnet-[a-f0-9]+)' does not exist/)
547
+ nets.delete(Regexp.last_match[1])
548
+ if nets.empty?
549
+ MU.log "Autoscale Group #{@cloud_id} was configured for a VPC, but the configuration held no valid subnets", MU::WARN, details: cloud_desc.vpc_zone_identifier.split(/,/)
550
+ end
551
+ else
552
+ raise e
553
+ end
554
+ end
542
555
  end
543
556
 
544
557
  # MU.log @cloud_id, MU::NOTICE, details: cloud_desc
@@ -813,26 +826,7 @@ module MU
813
826
  }
814
827
  }
815
828
  },
816
- "ingress_rules" => {
817
- "items" => {
818
- "properties" => {
819
- "sgs" => {
820
- "type" => "array",
821
- "items" => {
822
- "description" => "Other AWS Security Groups; resources that are associated with this group will have this rule applied to their traffic",
823
- "type" => "string"
824
- }
825
- },
826
- "lbs" => {
827
- "type" => "array",
828
- "items" => {
829
- "description" => "AWS Load Balancers which will have this rule applied to their traffic",
830
- "type" => "string"
831
- }
832
- }
833
- }
834
- }
835
- }
829
+ "ingress_rules" => MU::Cloud.resourceClass("AWS", "FirewallRule").ingressRuleAddtlSchema
836
830
  }
837
831
  [toplevel_required, schema]
838
832
  end
@@ -905,7 +899,7 @@ module MU
905
899
  launch = pool["basis"]["launch_config"]
906
900
  launch['iam_policies'] ||= pool['iam_policies']
907
901
 
908
- launch['size'] = MU::Cloud::AWS::Server.validateInstanceType(launch["size"], pool["region"])
902
+ launch['size'] = MU::Cloud.resourceClass("AWS", "Server").validateInstanceType(launch["size"], pool["region"])
909
903
  ok = false if launch['size'].nil?
910
904
  if !launch['generate_iam_role']
911
905
  if !launch['iam_role'] and pool['cloud'] != "CloudFormation"
@@ -949,11 +943,7 @@ module MU
949
943
 
950
944
  role['credentials'] = pool['credentials'] if pool['credentials']
951
945
  configurator.insertKitten(role, "roles")
952
- pool["dependencies"] ||= []
953
- pool["dependencies"] << {
954
- "type" => "role",
955
- "name" => pool["name"]
956
- }
946
+ MU::Config.addDependency(pool, pool['name'], "role")
957
947
  end
958
948
  launch["ami_id"] ||= launch["image_id"]
959
949
  if launch["server"].nil? and launch["instance_id"].nil? and launch["ami_id"].nil?
@@ -967,7 +957,7 @@ module MU
967
957
  end
968
958
  end
969
959
  if launch["server"] != nil
970
- pool["dependencies"] << {"type" => "server", "name" => launch["server"]}
960
+ MU::Config.addDependency(pool, launch["server"], "server", phase: "groom")
971
961
  # XXX I dunno, maybe toss an error if this isn't done already
972
962
  # servers.each { |server|
973
963
  # if server["name"] == launch["server"]
@@ -1073,7 +1063,7 @@ module MU
1073
1063
  # @param ignoremaster [Boolean]: If true, will remove resources not flagged as originating from this Mu server
1074
1064
  # @param region [String]: The cloud provider region
1075
1065
  # @return [void]
1076
- def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {})
1066
+ def self.cleanup(noop: false, deploy_id: MU.deploy_id, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {})
1077
1067
  MU.log "AWS::ServerPool.cleanup: need to support flags['known']", MU::DEBUG, details: flags
1078
1068
 
1079
1069
  filters = [{name: "key", values: ["MU-ID"]}]
@@ -1096,7 +1086,7 @@ module MU
1096
1086
  if asg.key == "MU-MASTER-IP" and asg.value != MU.mu_public_ip and !ignoremaster
1097
1087
  no_purge << asg.resource_id
1098
1088
  end
1099
- if asg.key == "MU-ID" and asg.value == MU.deploy_id
1089
+ if asg.key == "MU-ID" and asg.value == deploy_id
1100
1090
  maybe_purge << asg.resource_id
1101
1091
  end
1102
1092
  }
@@ -1123,7 +1113,7 @@ module MU
1123
1113
  end
1124
1114
  end
1125
1115
 
1126
- # MU::Cloud::AWS::Server.removeIAMProfile(resource_id)
1116
+ # MU::Cloud.resourceClass("AWS", "Server").removeIAMProfile(resource_id)
1127
1117
 
1128
1118
  # Generally there should be a launch_configuration of the same name
1129
1119
  # XXX search for these independently, too?
@@ -1164,14 +1154,14 @@ module MU
1164
1154
  @config['basis']['launch_config']["ami_id"] = @deploy.deployment["images"][@config['basis']['launch_config']["server"]]["image_id"]
1165
1155
  MU.log "Using AMI '#{@config['basis']['launch_config']["ami_id"]}' from sibling server #{@config['basis']['launch_config']["server"]} in ServerPool #{@mu_name}"
1166
1156
  elsif !@config['basis']['launch_config']["instance_id"].nil?
1167
- @config['basis']['launch_config']["ami_id"] = MU::Cloud::AWS::Server.createImage(
1157
+ @config['basis']['launch_config']["ami_id"] = MU::Cloud.resourceClass("AWS", "Server").createImage(
1168
1158
  name: @mu_name,
1169
1159
  instance_id: @config['basis']['launch_config']["instance_id"],
1170
1160
  credentials: @config['credentials'],
1171
1161
  region: @config['region']
1172
1162
  )[@config['region']]
1173
1163
  end
1174
- MU::Cloud::AWS::Server.waitForAMI(@config['basis']['launch_config']["ami_id"], credentials: @config['credentials'])
1164
+ MU::Cloud.resourceClass("AWS", "Server").waitForAMI(@config['basis']['launch_config']["ami_id"], credentials: @config['credentials'])
1175
1165
 
1176
1166
  oldlaunch = MU::Cloud::AWS.autoscale(region: @config['region'], credentials: @config['credentials']).describe_launch_configurations(
1177
1167
  launch_configuration_names: [@mu_name]
@@ -1226,12 +1216,12 @@ module MU
1226
1216
  vol.delete("encrypted")
1227
1217
  end
1228
1218
  end
1229
- mapping, _cfm_mapping = MU::Cloud::AWS::Server.convertBlockDeviceMapping(vol)
1219
+ mapping, _cfm_mapping = MU::Cloud.resourceClass("AWS", "Server").convertBlockDeviceMapping(vol)
1230
1220
  storage << mapping
1231
1221
  }
1232
1222
  end
1233
1223
 
1234
- storage.concat(MU::Cloud::AWS::Server.ephemeral_mappings)
1224
+ storage.concat(MU::Cloud.resourceClass("AWS", "Server").ephemeral_mappings)
1235
1225
 
1236
1226
  if @config['basis']['launch_config']['generate_iam_role']
1237
1227
  role = @deploy.findLitterMate(name: @config['name'], type: "roles")
@@ -1426,20 +1416,12 @@ module MU
1426
1416
  # XXX probably have to query API to get the DNS name of this one
1427
1417
  }
1428
1418
  elsif lb["concurrent_load_balancer"]
1429
- raise MuError, "No loadbalancers exist! I need one named #{lb['concurrent_load_balancer']}" if !@deploy.deployment["loadbalancers"]
1430
- found = false
1431
- @deploy.deployment["loadbalancers"].each_pair { |lb_name, deployed_lb|
1432
- if lb_name == lb['concurrent_load_balancer']
1433
- lbs << deployed_lb["awsname"] # XXX check for classic
1434
- if deployed_lb.has_key?("targetgroups")
1435
- deployed_lb["targetgroups"].values.each { |tg_arn|
1436
- tg_arns << tg_arn
1437
- }
1438
- end
1439
- found = true
1440
- end
1441
- }
1442
- raise MuError, "I need a loadbalancer named #{lb['concurrent_load_balancer']}, but none seems to have been created!" if !found
1419
+ lb = @deploy.findLitterMate(name: lb['concurrent_load_balancer'], type: "loadbalancers")
1420
+ raise MuError, "No loadbalancers exist! I need one named #{lb['concurrent_load_balancer']}" if !lb
1421
+ lbs << lb.mu_name
1422
+ if lb.targetgroups
1423
+ tg_arns = lb.targetgroups.values.map { |tg| tg.target_group_arn }
1424
+ end
1443
1425
  end
1444
1426
  }
1445
1427
  if tg_arns.size > 0
@@ -67,7 +67,7 @@ module MU
67
67
  if target['vpc']["subnet_name"]
68
68
  subnet_obj = vpc.getSubnet(name: target['vpc']["subnet_name"])
69
69
  if subnet_obj.nil?
70
- raise MuError, "Failed to locate subnet from #{subnet} in StoragePool #{@config['name']}:#{target['name']}"
70
+ raise MuError, "Failed to locate subnet from #{target['vpc']["subnet_name"]} in StoragePool #{@config['name']}:#{target['name']}"
71
71
  end
72
72
  target['vpc']['subnet_id'] = subnet_obj.cloud_id
73
73
  end
@@ -261,49 +261,29 @@ module MU
261
261
  targets = {}
262
262
 
263
263
  if @config['mount_points'] && !@config['mount_points'].empty?
264
+ mount_targets = MU::Cloud::AWS.efs(region: @config['region'], credentials: @config['credentials']).describe_mount_targets(
265
+ file_system_id: storage_pool.file_system_id
266
+ ).mount_targets
267
+
264
268
  @config['mount_points'].each { |mp|
265
269
  subnet = nil
266
270
  dependencies
267
- mp_vpc = if mp['vpc'] and mp['vpc']['vpc_name']
268
- @deploy.findLitterMate(type: "vpc", name: mp['vpc']['vpc_name'], credentials: @config['credentials'])
269
- elsif mp['vpc']
270
- MU::MommaCat.findStray(
271
- @config['cloud'],
272
- "vpcs",
273
- deploy_id: mp['vpc']["deploy_id"],
274
- credentials: @config['credentials'],
275
- mu_name: mp['vpc']["mu_name"],
276
- cloud_id: mp['vpc']['vpc_id'],
277
- region: @config['region'],
278
- dummy_ok: false
279
- ).first
280
- # XXX non-sibling, findStray version
281
- end
271
+ mp_vpc = MU::Config::Ref.get(mp['vpc']).kitten
282
272
 
283
- mount_targets = MU::Cloud::AWS.efs(region: @config['region'], credentials: @config['credentials']).describe_mount_targets(
284
- file_system_id: storage_pool.file_system_id
285
- ).mount_targets
286
273
 
287
- # subnet_obj = mp_vpc.subnets.select { |s|
288
- # s.name == mp["vpc"]["subnet_name"] or s.cloud_id == mp["vpc"]["subnet_id"]
289
- # }.first
274
+ subnet_obj = mp_vpc.subnets.select { |s|
275
+ s.name == mp["vpc"]["subnet_name"] or s.cloud_id == mp["vpc"]["subnet_id"]
276
+ }.first
290
277
  mount_target = nil
291
- mp_vpc.subnets.each { |subnet_obj|
292
- mount_targets.map { |t|
293
- subnet_cidr_obj = NetAddr::IPv4Net.parse(subnet_obj.ip_block)
294
- if subnet_cidr_obj.contains(NetAddr::IPv4.parse(t.ip_address))
295
- mount_target = t
296
- subnet = subnet_obj.cloud_desc
297
- end
298
- }
299
- break if mount_target
278
+ mount_targets.each { |t|
279
+ subnet_cidr_obj = NetAddr::IPv4Net.parse(subnet_obj.ip_block)
280
+ if subnet_cidr_obj.contains(NetAddr::IPv4.parse(t.ip_address))
281
+ mount_target = t
282
+ subnet = subnet_obj.cloud_desc
283
+ break
284
+ end
300
285
  }
301
286
 
302
- # mount_target = MU::Cloud::AWS.efs(region: @config['region'], credentials: @config['credentials']).describe_mount_targets(
303
- # mount_target_id: mp["cloud_id"]
304
- # ).mount_targets.first
305
-
306
-
307
287
  targets[mp["name"]] = {
308
288
  "owner_id" => mount_target.owner_id,
309
289
  "cloud_id" => mount_target.mount_target_id,
@@ -353,7 +333,7 @@ module MU
353
333
  # @param ignoremaster [Boolean]: If true, will remove resources not flagged as originating from this Mu server
354
334
  # @param region [String]: The cloud provider region in which to operate
355
335
  # @return [void]
356
- def self.cleanup(noop: false, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {})
336
+ def self.cleanup(noop: false, deploy_id: MU.deploy_id, ignoremaster: false, region: MU.curRegion, credentials: nil, flags: {})
357
337
  MU.log "AWS::StoragePool.cleanup: need to support flags['known']", MU::DEBUG, details: flags
358
338
 
359
339
  supported_regions = %w{us-west-2 us-east-1 eu-west-1}
@@ -378,7 +358,7 @@ module MU
378
358
  found_muid = false
379
359
  found_master = false
380
360
  tags.each { |tag|
381
- found_muid = true if tag.key == "MU-ID" && tag.value == MU.deploy_id
361
+ found_muid = true if tag.key == "MU-ID" && tag.value == deploy_id
382
362
  found_master = true if tag.key == "MU-MASTER-IP" && tag.value == MU.mu_public_ip
383
363
  }
384
364
  next if !found_muid
@@ -493,6 +473,9 @@ module MU
493
473
 
494
474
  if pool['mount_points'] && !pool['mount_points'].empty?
495
475
  pool['mount_points'].each{ |mp|
476
+ if mp['vpc'] and mp['vpc']['name']
477
+ MU::Config.addDependency(pool, mp['vpc']['name'], "vpc")
478
+ end
496
479
  if mp['ingress_rules']
497
480
  fwname = "storage-#{mp['name']}"
498
481
  acl = {
@@ -109,7 +109,7 @@ module MU
109
109
  # Create these if necessary, then append them to the list of
110
110
  # attachable_policies
111
111
  if @config['raw_policies']
112
- pol_arns = MU::Cloud::AWS::Role.manageRawPolicies(
112
+ pol_arns = MU::Cloud.resourceClass("AWS", "Role").manageRawPolicies(
113
113
  @config['raw_policies'],
114
114
  basename: @deploy.getResourceName(@config['name']),
115
115
  credentials: @credentials
@@ -135,7 +135,7 @@ module MU
135
135
  attached_policies.each { |a|
136
136
  if !configured_policies.include?(a.policy_arn)
137
137
  MU.log "Removing IAM policy #{a.policy_arn} from user #{@mu_name}", MU::NOTICE
138
- MU::Cloud::AWS::Role.purgePolicy(a.policy_arn, @credentials)
138
+ MU::Cloud.resourceClass("AWS", "Role").purgePolicy(a.policy_arn, @credentials)
139
139
  else
140
140
  configured_policies.delete(a.policy_arn)
141
141
  end
@@ -151,7 +151,7 @@ module MU
151
151
  end
152
152
 
153
153
  if @config['inline_policies']
154
- docs = MU::Cloud::AWS::Role.genPolicyDocument(@config['inline_policies'], deploy_obj: @deploy)
154
+ docs = MU::Cloud.resourceClass("AWS", "Role").genPolicyDocument(@config['inline_policies'], deploy_obj: @deploy)
155
155
  docs.each { |doc|
156
156
  MU.log "Putting user policy #{doc.keys.first} to user #{@cloud_id} "
157
157
  MU::Cloud::AWS.iam(credentials: @credentials).put_user_policy(
@@ -190,16 +190,16 @@ module MU
190
190
  # @param noop [Boolean]: If true, will only print what would be done
191
191
  # @param ignoremaster [Boolean]: If true, will remove resources not flagged as originating from this Mu server
192
192
  # @return [void]
193
- def self.cleanup(noop: false, ignoremaster: false, credentials: nil, flags: {})
193
+ def self.cleanup(noop: false, deploy_id: MU.deploy_id, ignoremaster: false, credentials: nil, flags: {})
194
194
  MU.log "AWS::User.cleanup: need to support flags['known']", MU::DEBUG, details: flags
195
195
 
196
196
  # XXX this doesn't belong here; maybe under roles, maybe as its own stupid first-class resource
197
197
  resp = MU::Cloud::AWS.iam(credentials: credentials).list_policies(
198
- path_prefix: "/"+MU.deploy_id+"/"
198
+ path_prefix: "/"+deploy_id+"/"
199
199
  )
200
200
  if resp and resp.policies
201
201
  resp.policies.each { |policy|
202
- MU.log "Deleting policy /#{MU.deploy_id}/#{policy.policy_name}"
202
+ MU.log "Deleting policy /#{deploy_id}/#{policy.policy_name}"
203
203
  if !noop
204
204
  attachments = begin
205
205
  MU::Cloud::AWS.iam(credentials: credentials).list_entities_for_policy(
@@ -277,7 +277,7 @@ MU.log e.inspect, MU::ERR, details: policy
277
277
  has_ourdeploy = false
278
278
  has_ourmaster = false
279
279
  tags.each { |tag|
280
- if tag.key == "MU-ID" and tag.value == MU.deploy_id
280
+ if tag.key == "MU-ID" and tag.value == deploy_id
281
281
  has_ourdeploy = true
282
282
  elsif tag.key == "MU-MASTER-IP" and tag.value == MU.mu_public_ip
283
283
  has_ourmaster = true
@@ -431,7 +431,7 @@ MU.log e.inspect, MU::ERR, details: policy
431
431
  resp.policy_names.each { |pol_name|
432
432
  pol = MU::Cloud::AWS.iam(credentials: @credentials).get_user_policy(user_name: @cloud_id, policy_name: pol_name)
433
433
  doc = JSON.parse(URI.decode(pol.policy_document))
434
- bok["inline_policies"] = MU::Cloud::AWS::Role.doc2MuPolicies(pol.policy_name, doc, bok["inline_policies"])
434
+ bok["inline_policies"] = MU::Cloud.resourceClass("AWS", "Role").doc2MuPolicies(pol.policy_name, doc, bok["inline_policies"])
435
435
  }
436
436
  end
437
437
 
@@ -465,7 +465,7 @@ MU.log e.inspect, MU::ERR, details: policy
465
465
  def self.schema(_config)
466
466
  toplevel_required = []
467
467
  polschema = MU::Config::Role.schema["properties"]["policies"]
468
- polschema.deep_merge!(MU::Cloud::AWS::Role.condition_schema)
468
+ polschema.deep_merge!(MU::Cloud.resourceClass("AWS", "Role").condition_schema)
469
469
 
470
470
  schema = {
471
471
  "inline_policies" => polschema,
@@ -517,7 +517,7 @@ style long name, like +IAMTESTS-DEV-2018112815-IS-USER-FOO+"
517
517
  # If we're attaching some managed policies, make sure all of the ones
518
518
  # that should already exist do indeed exist
519
519
  if user['attachable_policies']
520
- ok = false if !MU::Cloud::AWS::Role.validateAttachablePolicies(
520
+ ok = false if !MU::Cloud.resourceClass("AWS", "Role").validateAttachablePolicies(
521
521
  user['attachable_policies'],
522
522
  credentials: user['credentials'],
523
523
  region: user['region']
@@ -530,7 +530,7 @@ style long name, like +IAMTESTS-DEV-2018112815-IS-USER-FOO+"
530
530
  if configurator.haveLitterMate?(group, "groups")
531
531
  need_dependency = true
532
532
  else
533
- found = MU::Cloud::AWS::Group.find(cloud_id: group)
533
+ found = MU::Cloud.resourceClass("AWS", "Group").find(cloud_id: group)
534
534
  if found.nil? or found.empty? or (configurator.updating and
535
535
  found.values.first.group.path == "/"+configurator.updating+"/")
536
536
  groupdesc = {
@@ -542,11 +542,7 @@ style long name, like +IAMTESTS-DEV-2018112815-IS-USER-FOO+"
542
542
  end
543
543
 
544
544
  if need_dependency
545
- user["dependencies"] ||= []
546
- user["dependencies"] << {
547
- "type" => "group",
548
- "name" => group
549
- }
545
+ MU::Config.addDependency(user, group, "group")
550
546
  end
551
547
  }
552
548
  end
@@ -42,7 +42,7 @@ if ping -c 5 8.8.8.8 > /dev/null; then
42
42
  <% if !$mu.skipApplyUpdates %>
43
43
  set +e
44
44
  if [ ! -f /.mu-installer-ran-updates ];then
45
- service ssh stop
45
+ echo "Applying package updates" > /etc/nologin
46
46
  apt-get --fix-missing -y upgrade
47
47
  touch /.mu-installer-ran-updates
48
48
  if [ $? -eq 0 ]
@@ -58,7 +58,7 @@ if ping -c 5 8.8.8.8 > /dev/null; then
58
58
  else
59
59
  echo "FAILED PACKAGE UPDATE" >&2
60
60
  fi
61
- service ssh start
61
+ rm -f /etc/nologin
62
62
  fi
63
63
  <% end %>
64
64
  elif [ -x /usr/bin/yum ];then
@@ -94,7 +94,7 @@ if ping -c 5 8.8.8.8 > /dev/null; then
94
94
  <% if !$mu.skipApplyUpdates %>
95
95
  set +e
96
96
  if [ ! -f /.mu-installer-ran-updates ];then
97
- service sshd stop
97
+ echo "Applying package updates" > /etc/nologin
98
98
  kernel_update=`yum list updates | grep kernel`
99
99
  yum -y update
100
100
  touch /.mu-installer-ran-updates
@@ -108,7 +108,7 @@ if ping -c 5 8.8.8.8 > /dev/null; then
108
108
  else
109
109
  echo "FAILED PACKAGE UPDATE" >&2
110
110
  fi
111
- service sshd start
111
+ rm -f /etc/nologin
112
112
  fi
113
113
  <% end %>
114
114
  fi
@@ -116,6 +116,7 @@ else
116
116
  /bin/logger "***** Unable to verify internet connectivity, skipping package updates from userdata"
117
117
  touch /.mu-installer-ran-updates
118
118
  fi
119
+ rm -f /etc/nologin
119
120
 
120
121
  AWSCLI='command -v aws'
121
122
  PIP='command -v pip'