cloud-mu 3.0.0beta → 3.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +17 -8
  3. data/ansible/roles/mu-nat/README.md +33 -0
  4. data/ansible/roles/mu-nat/defaults/main.yml +3 -0
  5. data/ansible/roles/mu-nat/handlers/main.yml +2 -0
  6. data/ansible/roles/mu-nat/meta/main.yml +60 -0
  7. data/ansible/roles/mu-nat/tasks/main.yml +65 -0
  8. data/ansible/roles/mu-nat/tests/inventory +2 -0
  9. data/ansible/roles/mu-nat/tests/test.yml +5 -0
  10. data/ansible/roles/mu-nat/vars/main.yml +2 -0
  11. data/bin/mu-cleanup +2 -1
  12. data/bin/mu-configure +950 -948
  13. data/bin/mu-gen-docs +6 -0
  14. data/cloud-mu.gemspec +2 -2
  15. data/cookbooks/mu-tools/recipes/gcloud.rb +8 -1
  16. data/modules/mommacat.ru +1 -1
  17. data/modules/mu.rb +31 -39
  18. data/modules/mu/cloud.rb +11 -1
  19. data/modules/mu/clouds/aws.rb +8 -3
  20. data/modules/mu/clouds/aws/alarm.rb +5 -8
  21. data/modules/mu/clouds/aws/bucket.rb +15 -9
  22. data/modules/mu/clouds/aws/cache_cluster.rb +60 -26
  23. data/modules/mu/clouds/aws/collection.rb +4 -4
  24. data/modules/mu/clouds/aws/container_cluster.rb +50 -33
  25. data/modules/mu/clouds/aws/database.rb +25 -21
  26. data/modules/mu/clouds/aws/dnszone.rb +12 -14
  27. data/modules/mu/clouds/aws/endpoint.rb +5 -8
  28. data/modules/mu/clouds/aws/firewall_rule.rb +9 -4
  29. data/modules/mu/clouds/aws/folder.rb +4 -7
  30. data/modules/mu/clouds/aws/function.rb +5 -8
  31. data/modules/mu/clouds/aws/group.rb +5 -8
  32. data/modules/mu/clouds/aws/habitat.rb +2 -5
  33. data/modules/mu/clouds/aws/loadbalancer.rb +12 -16
  34. data/modules/mu/clouds/aws/log.rb +6 -9
  35. data/modules/mu/clouds/aws/msg_queue.rb +16 -19
  36. data/modules/mu/clouds/aws/nosqldb.rb +27 -18
  37. data/modules/mu/clouds/aws/notifier.rb +6 -9
  38. data/modules/mu/clouds/aws/role.rb +4 -7
  39. data/modules/mu/clouds/aws/search_domain.rb +50 -23
  40. data/modules/mu/clouds/aws/server.rb +20 -14
  41. data/modules/mu/clouds/aws/server_pool.rb +22 -12
  42. data/modules/mu/clouds/aws/storage_pool.rb +9 -14
  43. data/modules/mu/clouds/aws/user.rb +5 -8
  44. data/modules/mu/clouds/aws/userdata/linux.erb +7 -1
  45. data/modules/mu/clouds/aws/vpc.rb +16 -14
  46. data/modules/mu/clouds/azure.rb +1 -1
  47. data/modules/mu/clouds/azure/container_cluster.rb +1 -1
  48. data/modules/mu/clouds/azure/server.rb +16 -2
  49. data/modules/mu/clouds/azure/user.rb +1 -1
  50. data/modules/mu/clouds/azure/userdata/linux.erb +84 -80
  51. data/modules/mu/clouds/azure/vpc.rb +32 -13
  52. data/modules/mu/clouds/cloudformation/server.rb +1 -1
  53. data/modules/mu/clouds/google.rb +2 -3
  54. data/modules/mu/clouds/google/container_cluster.rb +9 -1
  55. data/modules/mu/clouds/google/firewall_rule.rb +6 -0
  56. data/modules/mu/clouds/google/role.rb +1 -3
  57. data/modules/mu/clouds/google/server.rb +25 -4
  58. data/modules/mu/clouds/google/user.rb +1 -1
  59. data/modules/mu/clouds/google/userdata/linux.erb +9 -5
  60. data/modules/mu/clouds/google/vpc.rb +102 -21
  61. data/modules/mu/config.rb +250 -49
  62. data/modules/mu/config/alarm.rb +1 -0
  63. data/modules/mu/config/container_cluster.yml +0 -1
  64. data/modules/mu/config/database.yml +4 -1
  65. data/modules/mu/config/search_domain.yml +4 -3
  66. data/modules/mu/config/server.rb +7 -3
  67. data/modules/mu/config/server.yml +4 -1
  68. data/modules/mu/config/server_pool.yml +2 -0
  69. data/modules/mu/config/vpc.rb +42 -29
  70. data/modules/mu/deploy.rb +12 -5
  71. data/modules/mu/groomers/ansible.rb +4 -1
  72. data/modules/mu/groomers/chef.rb +5 -1
  73. data/modules/mu/kittens.rb +60 -11
  74. data/modules/mu/logger.rb +6 -4
  75. data/modules/mu/mommacat.rb +39 -19
  76. data/modules/mu/mu.yaml.rb +276 -0
  77. metadata +13 -4
@@ -104,40 +104,35 @@ module MU
104
104
  end
105
105
 
106
106
  # Locate an existing storage pool and return an array containing matching AWS resource descriptors for those that match.
107
- # @param cloud_id [String]: The cloud provider's identifier for this resource.
108
- # @param region [String]: The cloud provider region
109
- # @param tag_key [String]: A tag key to search.
110
- # @param tag_value [String]: The value of the tag specified by tag_key to match when searching by tag.
111
- # @param flags [Hash]: Optional flags
112
- # @return [Array<Hash<String,OpenStruct>>]: The cloud provider's complete descriptions of matching storage pool
113
- def self.find(cloud_id: nil, region: MU.curRegion, tag_key: "Name", tag_value: nil, credentials: nil, flags: {})
107
+ # @return [Hash<String,OpenStruct>]: The cloud provider's complete descriptions of matching storage pool
108
+ def self.find(**args)
114
109
  map = {}
115
- if cloud_id
116
- storge_pool = MU::Cloud::AWS.efs(region: region, credentials: credentials).describe_file_systems(
117
- file_system_id: cloud_id
110
+ if args[:cloud_id]
111
+ storge_pool = MU::Cloud::AWS.efs(region: args[:region], credentials: args[:credentials]).describe_file_systems(
112
+ file_system_id: args[:cloud_id]
118
113
  ).file_systems.first
119
114
 
120
115
  map[cloud_id] = storge_pool if storge_pool
121
116
  end
122
117
 
123
118
  if tag_value
124
- storage_pools = MU::Cloud::AWS.efs(region: region, credentials: credentials).describe_file_systems.file_systems
119
+ storage_pools = MU::Cloud::AWS.efs(region: args[:region], credentials: args[:credentials]).describe_file_systems.file_systems
125
120
 
126
121
  if !storage_pools.empty?
127
122
  storage_pools.each{ |pool|
128
- tags = MU::Cloud::AWS.efs(region: region, credentials: credentials).describe_tags(
123
+ tags = MU::Cloud::AWS.efs(region: args[:region], credentials: args[:credentials]).describe_tags(
129
124
  file_system_id: pool.file_system_id
130
125
  ).tags
131
126
 
132
127
  value = nil
133
128
  tags.each{ |tag|
134
- if tag.key == tag_key
129
+ if tag.key == args[:tag_key]
135
130
  value = tag.value
136
131
  break
137
132
  end
138
133
  }
139
134
 
140
- if value == tag_value
135
+ if value == args[:tag_value]
141
136
  map[pool.file_system_id] = pool
142
137
  break
143
138
  end
@@ -280,19 +280,16 @@ module MU
280
280
  cloud_desc.arn
281
281
  end
282
282
 
283
- # Locate an existing user group.
284
- # @param cloud_id [String]: The cloud provider's identifier for this resource.
285
- # @param region [String]: The cloud provider region.
286
- # @param flags [Hash]: Optional flags
287
- # @return [OpenStruct]: The cloud provider's complete descriptions of matching user group.
288
- def self.find(cloud_id: nil, region: MU.curRegion, credentials: nil, flags: {})
283
+ # Locate an existing IAM user
284
+ # @return [Hash<String,OpenStruct>]: The cloud provider's complete descriptions of matching user group.
285
+ def self.find(**args)
289
286
  found = nil
290
287
 
291
288
  begin
292
- resp = MU::Cloud::AWS.iam.get_user(user_name: cloud_id)
289
+ resp = MU::Cloud::AWS.iam.get_user(user_name: args[:cloud_id])
293
290
  if resp and resp.user
294
291
  found ||= {}
295
- found[cloud_id] = resp.user
292
+ found[args[:cloud_id]] = resp.user
296
293
  end
297
294
  rescue ::Aws::IAM::Errors::NoSuchEntity
298
295
  end
@@ -24,6 +24,12 @@ for d in r s t u ;do
24
24
  fi
25
25
  done
26
26
 
27
+ for f in /etc/rc.local /etc/rc.d/rc.local;do
28
+ if [ -f $f ];then
29
+ chmod 755 $f
30
+ fi
31
+ done
32
+
27
33
  if ping -c 5 8.8.8.8 > /dev/null; then
28
34
  if [ -f /etc/debian_version ];then
29
35
  if ! grep '^/bin/sh /var/lib/cloud/instance/user-data.txt$' /etc/rc.local > /dev/null;then
@@ -72,12 +78,12 @@ if ping -c 5 8.8.8.8 > /dev/null; then
72
78
  cat /etc/rc.d/rc.local | grep -v '^/bin/sh /var/lib/cloud/instances/' >> /tmp/rc.local.$$
73
79
  echo "/bin/sh $userdata_dir/user-data.txt" >> /tmp/rc.local.$$
74
80
  mv /tmp/rc.local.$$ /etc/rc.d/rc.local
81
+ chmod 755 /etc/rc.d/rc.local
75
82
  fi
76
83
 
77
84
  sed -i 's/^Defaults.*requiretty$/Defaults !requiretty/' /etc/sudoers
78
85
 
79
86
  if [ "$version" == "7" ];then
80
- chmod 755 /etc/rc.d/rc.local
81
87
  systemctl reset-failed sshd.service
82
88
  fi
83
89
  if [ ! -f /usr/bin/curl ] ;then /usr/bin/yum -y install curl;fi
@@ -208,7 +208,7 @@ module MU
208
208
  begin
209
209
  if resp.state != "available"
210
210
  begin
211
- MU.log "Waiting for Subnet #{subnet_name} (#{subnet_id}) to be available", MU::NOTICE
211
+ MU.log "Waiting for Subnet #{subnet_name} (#{subnet_id}) to be available", MU::NOTICE if retries > 0 and (retries % 3) == 0
212
212
  sleep 5
213
213
  resp = MU::Cloud::AWS.ec2(region: @config['region'], credentials: @config['credentials']).describe_subnets(subnet_ids: [subnet_id]).subnets.first
214
214
  rescue Aws::EC2::Errors::InvalidSubnetIDNotFound => e
@@ -296,7 +296,7 @@ module MU
296
296
  nat_gateway_id = resp.nat_gateway_id
297
297
  attempts = 0
298
298
  MU::MommaCat.unlock("nat-gateway-eipalloc")
299
- while resp.state == "pending"
299
+ while resp.class.name != "Aws::EC2::Types::NatGateway" or resp.state == "pending"
300
300
  MU.log "Waiting for nat gateway #{nat_gateway_id} () to become available (EIP allocation: #{allocation_id})" if attempts % 5 == 0
301
301
  sleep 30
302
302
  begin
@@ -554,7 +554,7 @@ MU.log "wtf", MU::ERR, details: peer if peer_obj.nil? or peer_obj.first.nil?
554
554
  },
555
555
  {
556
556
  name: "accepter-vpc-info.vpc-id",
557
- values: [peer_id]
557
+ values: [peer_id.to_s]
558
558
  }
559
559
  ]
560
560
  )
@@ -717,12 +717,7 @@ MU.log "wtf", MU::ERR, details: peer if peer_obj.nil? or peer_obj.first.nil?
717
717
  end
718
718
 
719
719
  # Locate an existing VPC or VPCs and return an array containing matching AWS resource descriptors for those that match.
720
- # @param cloud_id [String]: The cloud provider's identifier for this resource.
721
- # @param region [String]: The cloud provider region
722
- # @param tag_key [String]: A tag key to search.
723
- # @param tag_value [String]: The value of the tag specified by tag_key to match when searching by tag.
724
- # @return [Array<Hash<String,OpenStruct>>]: The cloud provider's complete descriptions of matching VPCs
725
- # def self.find(cloud_id: nil, region: MU.curRegion, tag_key: "Name", tag_value: nil, credentials: nil, flags: {})
720
+ # @return [Hash<String,OpenStruct>]: The cloud provider's complete descriptions of matching VPCs
726
721
  def self.find(**args)
727
722
  cloud_id = args[:cloud_id]
728
723
  region = args[:region] || MU.curRegion
@@ -1870,25 +1865,32 @@ MU.log "wtf", MU::ERR, details: peer if peer_obj.nil? or peer_obj.first.nil?
1870
1865
 
1871
1866
  retries = 0
1872
1867
  subnets.each { |subnet|
1868
+ MU.log "Deleting Subnet #{subnet.subnet_id}"
1873
1869
  begin
1874
1870
  if subnet.state != "available"
1875
1871
  MU.log "Waiting for #{subnet.subnet_id} to be in a removable state...", MU::NOTICE
1876
1872
  sleep 30
1877
1873
  else
1878
- MU.log "Deleting Subnet #{subnet.subnet_id}"
1879
1874
  MU::Cloud::AWS.ec2(credentials: credentials, region: region).delete_subnet(subnet_id: subnet.subnet_id) if !noop
1880
1875
  end
1881
1876
  rescue Aws::EC2::Errors::DependencyViolation => e
1882
- if retries < 7
1883
- MU.log "#{e.inspect}, retrying in 10s", MU::WARN
1884
- sleep 10
1877
+ # We're often stuck waiting for an RDS database or something else
1878
+ # that takes 5-ever to delete.
1879
+ if retries < 19
1880
+ loglevel = (retries > 0 and (retries % 3) == 0) ? MU::NOTICE : MU::DEBUG
1881
+ MU.log "#{e.message} (retry #{retries.to_s}/20)", loglevel
1882
+ sleep 30
1883
+ retries = retries + 1
1884
+ retry
1885
+ elsif retries < 20
1886
+ MU.log "#{e.message} (final attempt)", MU::WARN
1887
+ sleep 60
1885
1888
  retries = retries + 1
1886
1889
  retry
1887
1890
  else
1888
1891
  raise e
1889
1892
  end
1890
1893
  rescue Aws::EC2::Errors::InvalidSubnetIDNotFound
1891
- MU.log "Subnet #{subnet.subnet_id} disappeared before I could remove it", MU::WARN
1892
1894
  next
1893
1895
  end while subnet.state != "available"
1894
1896
  }
@@ -376,6 +376,7 @@ module MU
376
376
  rg_obj = MU::Cloud::Azure.resources(:ResourceGroup).new
377
377
  rg_obj.location = region
378
378
  rg_obj.tags = MU::MommaCat.listStandardTags
379
+ rg_obj.tags.reject! { |k, v| v.nil? }
379
380
 
380
381
  MU::Cloud::Azure.resources(credentials: credentials).resource_groups.list.each { |rg|
381
382
  if rg.name == name and rg.location == region and rg.tags == rg_obj.tags
@@ -384,7 +385,6 @@ module MU
384
385
  end
385
386
  }
386
387
  MU.log "Configuring resource group #{name} in #{region}", details: rg_obj
387
-
388
388
  MU::Cloud::Azure.resources(credentials: credentials).resource_groups.create_or_update(
389
389
  name,
390
390
  rg_obj
@@ -71,7 +71,7 @@ module MU
71
71
  )
72
72
  end
73
73
 
74
- MU.log %Q{How to interact with your Kubernetes cluster\nkubectl --kubeconfig "#{kube_conf}" get events --all-namespaces\nkubectl --kubeconfig "#{kube_conf}" get all\nkubectl --kubeconfig "#{kube_conf}" create -f some_k8s_deploy.yml\nkubectl --kubeconfig "#{kube_conf}" get nodes}, MU::SUMMARY
74
+ MU.log %Q{How to interact with your AKS cluster\nkubectl --kubeconfig "#{kube_conf}" get events --all-namespaces\nkubectl --kubeconfig "#{kube_conf}" get all\nkubectl --kubeconfig "#{kube_conf}" create -f some_k8s_deploy.yml\nkubectl --kubeconfig "#{kube_conf}" get nodes}, MU::SUMMARY
75
75
 
76
76
  end
77
77
 
@@ -111,7 +111,8 @@ module MU
111
111
  "src_dst_check" => false,
112
112
  "bastion" => true,
113
113
  "size" => "Standard_B2s",
114
- "run_list" => [ "mu-utility::nat" ],
114
+ "run_list" => [ "mu-nat" ],
115
+ "groomer" => "Ansible",
115
116
  "platform" => "centos7",
116
117
  "associate_public_ip" => true,
117
118
  "static_ip" => { "assign_ip" => true },
@@ -469,7 +470,10 @@ module MU
469
470
  MU::Cloud.availableClouds.each { |cloud|
470
471
  next if cloud == "Azure"
471
472
  cloudbase = Object.const_get("MU").const_get("Cloud").const_get(cloud)
472
- foreign_types = (cloudbase.listInstanceTypes)[cloudbase.myRegion]
473
+ foreign_types = (cloudbase.listInstanceTypes).values.first
474
+ if foreign_types.size == 1
475
+ foreign_types = foreign_types.values.first
476
+ end
473
477
  if foreign_types and foreign_types.size > 0 and foreign_types.has_key?(size)
474
478
  vcpu = foreign_types[size]["vcpu"]
475
479
  mem = foreign_types[size]["memory"]
@@ -491,6 +495,7 @@ module MU
491
495
 
492
496
  if !foundmatch
493
497
  MU.log "Invalid size '#{size}' for Azure Compute instance in #{region}. Supported types:", MU::ERR, details: types.keys.sort.join(", ")
498
+ exit
494
499
  return nil
495
500
  end
496
501
  end
@@ -509,6 +514,8 @@ module MU
509
514
  server['ssh_user'] ||= "muadmin"
510
515
 
511
516
  server['size'] = validateInstanceType(server["size"], server["region"])
517
+ ok = false if server['size'].nil?
518
+
512
519
  if server['image_id'].nil?
513
520
  img_id = MU::Cloud.getStockImage("Azure", platform: server['platform'])
514
521
  if img_id
@@ -797,8 +804,15 @@ module MU
797
804
  if !@cloud_id
798
805
  # XXX actually guard this correctly
799
806
  MU.log "Creating VM #{@mu_name}", details: vm_obj
807
+ begin
800
808
  vm = MU::Cloud::Azure.compute(credentials: @credentials).virtual_machines.create_or_update(@resource_group, @mu_name, vm_obj)
801
809
  @cloud_id = Id.new(vm.id)
810
+ rescue ::MU::Cloud::Azure::APIError => e
811
+ if e.message.match(/InvalidParameter: /)
812
+ MU.log e.message, MU::ERR, details: vm_obj
813
+ end
814
+ raise e
815
+ end
802
816
  end
803
817
 
804
818
  end
@@ -25,7 +25,7 @@ module MU
25
25
 
26
26
  if !mu_name.nil?
27
27
  @mu_name = mu_name
28
- @cloud_id = Id.new(cloud_desc.id) if @cloud_id
28
+ @cloud_id = Id.new(cloud_desc.id) if @cloud_id and cloud_desc
29
29
  else
30
30
  @mu_name ||= @deploy.getResourceName(@config["name"], max_length: 31)
31
31
  end
@@ -15,84 +15,89 @@
15
15
 
16
16
  updates_run=0
17
17
  need_reboot=0
18
- instance_id="`curl http://metadata.google.internal/computeMetadata/v1/instance/name`"
18
+ instance_id="`curl -H Metadata:tttp://169.254.169.254/metadata/instance/compute/name?api-version=2017-08-01&format=text'`"
19
+
20
+ for f in /etc/rc.local /etc/rc.d/rc.local;do
21
+ if [ -f $f ];then
22
+ chmod 755 $f
23
+ fi
24
+ done
25
+
19
26
  if [ -f /etc/debian_version ];then
20
- if ! grep '^/bin/sh /var/lib/cloud/instance/user-data.txt$' /etc/rc.local > /dev/null;then
21
- echo "/bin/sh /var/lib/cloud/instance/user-data.txt" >> /etc/rc.local
22
- fi
23
- apt-get update -y
24
- if [ ! -f /usr/bin/curl ] ;then /usr/bin/apt-get --fix-missing -y install curl;fi
27
+ if ! grep '^/bin/sh /var/lib/cloud/instance/user-data.txt$' /etc/rc.local > /dev/null;then
28
+ echo "/bin/sh /var/lib/cloud/instance/user-data.txt" >> /etc/rc.local
29
+ fi
30
+ apt-get update -y
31
+ if [ ! -f /usr/bin/curl ] ;then /usr/bin/apt-get --fix-missing -y install curl;fi
25
32
  <% if !$mu.skipApplyUpdates %>
26
- if [ ! -f /.mu-installer-ran-updates ];then
27
- service ssh stop
28
- apt-get --fix-missing -y upgrade
29
- if [ $? -eq 0 ]
30
- then
31
- echo "Successfully updated packages"
32
- updates_run=1
33
- else
34
- echo "FAILED PACKAGE UPDATE" >&2
35
- fi
36
- # Proceed regardless
37
- touch /.mu-installer-ran-updates
33
+ if [ ! -f /.mu-installer-ran-updates ];then
34
+ service ssh stop
35
+ apt-get --fix-missing -y upgrade
36
+ if [ $? -eq 0 ]
37
+ then
38
+ echo "Successfully updated packages"
39
+ updates_run=1
40
+ else
41
+ echo "FAILED PACKAGE UPDATE" >&2
42
+ fi
43
+ # Proceed regardless
44
+ touch /.mu-installer-ran-updates
38
45
 
39
- # XXX this logic works on Ubuntu, is it Debian-friendly?
40
- latest_kernel="`ls -1 /boot/vmlinuz-* | sed -r 's/^\/boot\/vmlinuz-//' | tail -1`"
41
- running_kernel="`uname -r`"
42
- if [ "$running_kernel" != "$latest_kernel" -a "$latest_kernel" != "" ];then
43
- need_reboot=1
44
- else
45
- service ssh start
46
- fi
47
- fi
46
+ # XXX this logic works on Ubuntu, is it Debian-friendly?
47
+ latest_kernel="`ls -1 /boot/vmlinuz-* | sed -r 's/^\/boot\/vmlinuz-//' | tail -1`"
48
+ running_kernel="`uname -r`"
49
+ if [ "$running_kernel" != "$latest_kernel" -a "$latest_kernel" != "" ];then
50
+ need_reboot=1
51
+ else
52
+ service ssh start
53
+ fi
54
+ fi
48
55
  <% end %>
49
56
  elif [ -x /usr/bin/yum ];then
50
- version=`/bin/rpm -qa \*-release | grep -Ei "redhat|centos" | cut -d"-" -f3`
51
- if [ -z "$version" ];then
52
- amazon_version=`/bin/rpm -qa \*-release | grep -Ei "system-release"| cut -d"-" -f3 | cut -d"." -f1`
53
- if [ "$amazon_version" == "2014" ] || [ "$amazon_version" == "2015" ] || [ "$amazon_version" == "2016" ];then
54
- version=6
55
- fi
56
- fi
57
- if [ $version -eq 7 ];then
58
- userdata_dir="/var/lib/cloud/instances/$instance_id"
59
- else
60
- userdata_dir="/var/lib/cloud/instance"
61
- fi
62
- if ! grep "^/bin/sh $userdata_dir/user-data.txt$" /etc/rc.d/rc.local > /dev/null;then
63
- echo "/bin/sh $userdata_dir/user-data.txt" >> /etc/rc.d/rc.local
64
- fi
57
+ version=`/bin/rpm -qa \*-release | grep -Ei "redhat|centos" | cut -d"-" -f3`
58
+ if [ -z "$version" ];then
59
+ amazon_version=`/bin/rpm -qa \*-release | grep -Ei "system-release"| cut -d"-" -f3 | cut -d"." -f1`
60
+ if [ "$amazon_version" == "2014" ] || [ "$amazon_version" == "2015" ] || [ "$amazon_version" == "2016" ];then
61
+ version=6
62
+ fi
63
+ fi
64
+ if [ $version -eq 7 ];then
65
+ userdata_dir="/var/lib/cloud/instances/$instance_id"
66
+ else
67
+ userdata_dir="/var/lib/cloud/instance"
68
+ fi
69
+ if ! grep "^/bin/sh $userdata_dir/user-data.txt$" /etc/rc.d/rc.local > /dev/null;then
70
+ echo "/bin/sh $userdata_dir/user-data.txt" >> /etc/rc.d/rc.local
71
+ fi
65
72
 
66
73
  sed -i 's/^Defaults.*requiretty$/Defaults !requiretty/' /etc/sudoers
67
74
 
68
- if [ $version == 7 ];then
69
- chmod 755 /etc/rc.d/rc.local
70
- fi
71
- if [ ! -f /usr/bin/curl ] ;then /usr/bin/yum -y install curl;fi
72
- # Ugh, rando EPEL mirror
73
- if [ ! -f /etc/yum.repos.d/epel.repo ];then
74
- /bin/rpm -ivh http://mirror.metrocast.net/fedora/epel/epel-release-latest-$version.noarch.rpm
75
- fi
75
+ chmod 755 /etc/rc.d/rc.local
76
+ if [ ! -f /usr/bin/curl ] ;then /usr/bin/yum -y install curl;fi
77
+ # Ugh, rando EPEL mirror
78
+ if [ ! -f /etc/yum.repos.d/epel.repo ];then
79
+ /bin/rpm -ivh http://mirror.metrocast.net/fedora/epel/epel-release-latest-$version.noarch.rpm
80
+ fi
76
81
  <% if !$mu.skipApplyUpdates %>
77
- if [ ! -f /.mu-installer-ran-updates ];then
78
- service sshd stop
79
- kernel_update=`yum list updates | grep kernel`
80
- yum -y update
81
- if [ $? -eq 0 ]
82
- then
83
- echo "Successfully updated packages"
84
- updates_run=1
85
- else
86
- echo "FAILED PACKAGE UPDATE" >&2
87
- fi
88
- # Proceed regardless
89
- touch /.mu-installer-ran-updates
90
- if [ -n "$kernel_update" ]; then
91
- need_reboot=1
92
- else
93
- service sshd start
94
- fi
95
- fi
82
+ if [ ! -f /.mu-installer-ran-updates ];then
83
+ service sshd stop
84
+ kernel_update=`yum list updates | grep kernel`
85
+ yum -y update
86
+ if [ $? -eq 0 ]
87
+ then
88
+ echo "Successfully updated packages"
89
+ updates_run=1
90
+ else
91
+ echo "FAILED PACKAGE UPDATE" >&2
92
+ fi
93
+ # Proceed regardless
94
+ touch /.mu-installer-ran-updates
95
+ if [ -n "$kernel_update" ]; then
96
+ need_reboot=1
97
+ else
98
+ service sshd start
99
+ fi
100
+ fi
96
101
  <% end %>
97
102
  fi
98
103
 
@@ -100,20 +105,20 @@ umask 0077
100
105
 
101
106
  # Install Chef now, because why not?
102
107
  if [ ! -f /opt/chef/embedded/bin/ruby ];then
103
- curl https://www.chef.io/chef/install.sh > chef-install.sh
104
- set +e
105
- # We may run afoul of a synchronous bootstrap process doing the same thing. So
106
- # wait until we've managed to run successfully.
107
- while ! sh chef-install.sh -v <%= $mu.chefVersion %>;do
108
- sleep 10
109
- done
110
- touch /opt/mu_installed_chef
111
- set -e
108
+ curl https://www.chef.io/chef/install.sh > chef-install.sh
109
+ set +e
110
+ # We may run afoul of a synchronous bootstrap process doing the same thing. So
111
+ # wait until we've managed to run successfully.
112
+ while ! sh chef-install.sh -v <%= $mu.chefVersion %>;do
113
+ sleep 10
114
+ done
115
+ touch /opt/mu_installed_chef
116
+ set -e
112
117
  fi
113
118
 
114
119
  <% if !$mu.skipApplyUpdates %>
115
120
  if [ "$need_reboot" == "1" ];then
116
- shutdown -r now "Applying new kernel"
121
+ shutdown -r now "Applying new kernel"
117
122
  fi
118
123
  <% end %>
119
124
 
@@ -127,7 +132,6 @@ print Base64.urlsafe_encode64(key.public_encrypt(File.read("<%= $mu.muID %>-secr
127
132
  ' > encrypt_deploy_secret.rb
128
133
 
129
134
  deploykey="<%= $mu.deployKey %>"
130
- instance_id="`curl http://metadata.google.internal/computeMetadata/v1/instance/name`"
131
135
 
132
136
  # Make double-sure sshd is actually up
133
137
  service sshd restart