chef-provisioning-aws 3.0.4 → 3.0.6

Sign up to get free protection for your applications and to get access to all the features.
Files changed (144) hide show
  1. checksums.yaml +4 -4
  2. data/Gemfile +16 -5
  3. data/Rakefile +15 -6
  4. data/chef-provisioning-aws.gemspec +17 -17
  5. data/lib/chef/provider/aws_auto_scaling_group.rb +5 -6
  6. data/lib/chef/provider/aws_cache_cluster.rb +21 -15
  7. data/lib/chef/provider/aws_cache_replication_group.rb +12 -8
  8. data/lib/chef/provider/aws_cache_subnet_group.rb +3 -3
  9. data/lib/chef/provider/aws_cloudsearch_domain.rb +9 -13
  10. data/lib/chef/provider/aws_cloudwatch_alarm.rb +10 -12
  11. data/lib/chef/provider/aws_dhcp_options.rb +18 -21
  12. data/lib/chef/provider/aws_ebs_volume.rb +24 -26
  13. data/lib/chef/provider/aws_eip_address.rb +10 -13
  14. data/lib/chef/provider/aws_elasticsearch_domain.rb +19 -18
  15. data/lib/chef/provider/aws_iam_instance_profile.rb +5 -7
  16. data/lib/chef/provider/aws_iam_role.rb +14 -17
  17. data/lib/chef/provider/aws_image.rb +6 -6
  18. data/lib/chef/provider/aws_instance.rb +5 -5
  19. data/lib/chef/provider/aws_internet_gateway.rb +8 -11
  20. data/lib/chef/provider/aws_key_pair.rb +15 -17
  21. data/lib/chef/provider/aws_launch_configuration.rb +11 -14
  22. data/lib/chef/provider/aws_load_balancer.rb +1 -2
  23. data/lib/chef/provider/aws_nat_gateway.rb +6 -7
  24. data/lib/chef/provider/aws_network_acl.rb +28 -29
  25. data/lib/chef/provider/aws_network_interface.rb +25 -27
  26. data/lib/chef/provider/aws_rds_instance.rb +12 -13
  27. data/lib/chef/provider/aws_rds_parameter_group.rb +8 -8
  28. data/lib/chef/provider/aws_rds_subnet_group.rb +8 -9
  29. data/lib/chef/provider/aws_route_table.rb +19 -20
  30. data/lib/chef/provider/aws_s3_bucket.rb +22 -25
  31. data/lib/chef/provider/aws_security_group.rb +268 -285
  32. data/lib/chef/provider/aws_server_certificate.rb +6 -5
  33. data/lib/chef/provider/aws_sns_topic.rb +4 -6
  34. data/lib/chef/provider/aws_sqs_queue.rb +3 -4
  35. data/lib/chef/provider/aws_subnet.rb +29 -34
  36. data/lib/chef/provider/aws_vpc.rb +108 -116
  37. data/lib/chef/provider/aws_vpc_peering_connection.rb +11 -11
  38. data/lib/chef/provisioning/aws_driver.rb +4 -2
  39. data/lib/chef/provisioning/aws_driver/aws_provider.rb +234 -241
  40. data/lib/chef/provisioning/aws_driver/aws_rds_resource.rb +5 -7
  41. data/lib/chef/provisioning/aws_driver/aws_resource.rb +182 -185
  42. data/lib/chef/provisioning/aws_driver/aws_resource_with_entry.rb +17 -17
  43. data/lib/chef/provisioning/aws_driver/aws_taggable.rb +13 -15
  44. data/lib/chef/provisioning/aws_driver/aws_tagger.rb +47 -48
  45. data/lib/chef/provisioning/aws_driver/credentials.rb +96 -100
  46. data/lib/chef/provisioning/aws_driver/credentials2.rb +42 -45
  47. data/lib/chef/provisioning/aws_driver/driver.rb +1349 -1362
  48. data/lib/chef/provisioning/aws_driver/exceptions.rb +10 -12
  49. data/lib/chef/provisioning/aws_driver/super_lwrp.rb +60 -60
  50. data/lib/chef/provisioning/aws_driver/tagging_strategy/auto_scaling.rb +49 -50
  51. data/lib/chef/provisioning/aws_driver/tagging_strategy/ec2.rb +37 -38
  52. data/lib/chef/provisioning/aws_driver/tagging_strategy/elasticsearch.rb +14 -15
  53. data/lib/chef/provisioning/aws_driver/tagging_strategy/elb.rb +29 -31
  54. data/lib/chef/provisioning/aws_driver/tagging_strategy/rds.rb +39 -40
  55. data/lib/chef/provisioning/aws_driver/tagging_strategy/s3.rb +41 -43
  56. data/lib/chef/provisioning/aws_driver/version.rb +5 -5
  57. data/lib/chef/provisioning/driver_init/aws.rb +2 -2
  58. data/lib/chef/resource/aws_auto_scaling_group.rb +1 -1
  59. data/lib/chef/resource/aws_cache_cluster.rb +9 -12
  60. data/lib/chef/resource/aws_cache_replication_group.rb +9 -11
  61. data/lib/chef/resource/aws_cache_subnet_group.rb +8 -10
  62. data/lib/chef/resource/aws_cloudsearch_domain.rb +4 -5
  63. data/lib/chef/resource/aws_cloudwatch_alarm.rb +17 -18
  64. data/lib/chef/resource/aws_dhcp_options.rb +2 -2
  65. data/lib/chef/resource/aws_ebs_volume.rb +10 -10
  66. data/lib/chef/resource/aws_eip_address.rb +5 -5
  67. data/lib/chef/resource/aws_elasticsearch_domain.rb +4 -4
  68. data/lib/chef/resource/aws_iam_instance_profile.rb +4 -5
  69. data/lib/chef/resource/aws_iam_role.rb +2 -3
  70. data/lib/chef/resource/aws_image.rb +3 -3
  71. data/lib/chef/resource/aws_instance.rb +4 -4
  72. data/lib/chef/resource/aws_internet_gateway.rb +3 -3
  73. data/lib/chef/resource/aws_key_pair.rb +7 -7
  74. data/lib/chef/resource/aws_launch_configuration.rb +4 -4
  75. data/lib/chef/resource/aws_load_balancer.rb +7 -7
  76. data/lib/chef/resource/aws_nat_gateway.rb +11 -11
  77. data/lib/chef/resource/aws_network_acl.rb +7 -8
  78. data/lib/chef/resource/aws_network_interface.rb +9 -9
  79. data/lib/chef/resource/aws_rds_instance.rb +4 -4
  80. data/lib/chef/resource/aws_rds_parameter_group.rb +3 -3
  81. data/lib/chef/resource/aws_rds_subnet_group.rb +4 -4
  82. data/lib/chef/resource/aws_route53_hosted_zone.rb +37 -40
  83. data/lib/chef/resource/aws_route53_record_set.rb +22 -24
  84. data/lib/chef/resource/aws_route_table.rb +7 -7
  85. data/lib/chef/resource/aws_s3_bucket.rb +7 -7
  86. data/lib/chef/resource/aws_security_group.rb +10 -10
  87. data/lib/chef/resource/aws_server_certificate.rb +6 -8
  88. data/lib/chef/resource/aws_sns_topic.rb +2 -2
  89. data/lib/chef/resource/aws_sqs_queue.rb +5 -7
  90. data/lib/chef/resource/aws_subnet.rb +9 -9
  91. data/lib/chef/resource/aws_vpc.rb +11 -11
  92. data/lib/chef/resource/aws_vpc_peering_connection.rb +4 -4
  93. data/spec/aws_support.rb +44 -45
  94. data/spec/aws_support/aws_resource_run_wrapper.rb +2 -2
  95. data/spec/aws_support/deep_matcher.rb +2 -3
  96. data/spec/aws_support/deep_matcher/fuzzy_match_objects.rb +6 -9
  97. data/spec/aws_support/deep_matcher/match_values_failure_messages.rb +30 -37
  98. data/spec/aws_support/deep_matcher/matchable_array.rb +0 -1
  99. data/spec/aws_support/deep_matcher/matchable_object.rb +1 -2
  100. data/spec/aws_support/deep_matcher/rspec_monkeypatches.rb +4 -4
  101. data/spec/aws_support/delayed_stream.rb +2 -2
  102. data/spec/aws_support/matchers/create_an_aws_object.rb +6 -6
  103. data/spec/aws_support/matchers/destroy_an_aws_object.rb +6 -6
  104. data/spec/aws_support/matchers/have_aws_object_tags.rb +4 -5
  105. data/spec/aws_support/matchers/match_an_aws_object.rb +5 -6
  106. data/spec/aws_support/matchers/update_an_aws_object.rb +6 -7
  107. data/spec/integration/aws_auto_scaling_group_spec.rb +56 -64
  108. data/spec/integration/aws_cache_cluster_spec.rb +70 -71
  109. data/spec/integration/aws_cache_subnet_group_spec.rb +13 -14
  110. data/spec/integration/aws_cloudsearch_domain_spec.rb +6 -8
  111. data/spec/integration/aws_cloudwatch_alarm_spec.rb +200 -208
  112. data/spec/integration/aws_dhcp_options_spec.rb +32 -43
  113. data/spec/integration/aws_ebs_volume_spec.rb +52 -73
  114. data/spec/integration/aws_eip_address_spec.rb +24 -31
  115. data/spec/integration/aws_elasticsearch_domain_spec.rb +31 -33
  116. data/spec/integration/aws_iam_instance_profile_spec.rb +36 -45
  117. data/spec/integration/aws_iam_role_spec.rb +39 -46
  118. data/spec/integration/aws_internet_gateway_spec.rb +64 -75
  119. data/spec/integration/aws_key_pair_spec.rb +6 -6
  120. data/spec/integration/aws_launch_configuration_spec.rb +17 -18
  121. data/spec/integration/aws_nat_gateway_spec.rb +21 -24
  122. data/spec/integration/aws_network_acl_spec.rb +81 -95
  123. data/spec/integration/aws_network_interface_spec.rb +28 -43
  124. data/spec/integration/aws_rds_instance_spec.rb +29 -40
  125. data/spec/integration/aws_rds_parameter_group_spec.rb +32 -35
  126. data/spec/integration/aws_rds_subnet_group_spec.rb +30 -40
  127. data/spec/integration/aws_route53_hosted_zone_spec.rb +205 -205
  128. data/spec/integration/aws_route_table_spec.rb +118 -136
  129. data/spec/integration/aws_s3_bucket_spec.rb +19 -27
  130. data/spec/integration/aws_security_group_spec.rb +369 -388
  131. data/spec/integration/aws_server_certificate_spec.rb +16 -18
  132. data/spec/integration/aws_subnet_spec.rb +44 -58
  133. data/spec/integration/aws_vpc_peering_connection_spec.rb +43 -50
  134. data/spec/integration/aws_vpc_spec.rb +99 -115
  135. data/spec/integration/load_balancer_spec.rb +169 -183
  136. data/spec/integration/machine_batch_spec.rb +24 -31
  137. data/spec/integration/machine_image_spec.rb +54 -66
  138. data/spec/integration/machine_spec.rb +216 -237
  139. data/spec/persistence_file.txt +219 -0
  140. data/spec/spec_helper.rb +16 -17
  141. data/spec/unit/chef/provisioning/aws_driver/credentials_spec.rb +67 -74
  142. data/spec/unit/chef/provisioning/aws_driver/driver_spec.rb +29 -29
  143. data/spec/unit/chef/provisioning/aws_driver/route53_spec.rb +13 -15
  144. metadata +4 -3
@@ -1,29 +1,29 @@
1
- require 'chef/mixin/shell_out'
2
- require 'chef/mixin/deep_merge'
3
- require 'chef/provisioning/driver'
4
- require 'chef/provisioning/convergence_strategy/install_cached'
5
- require 'chef/provisioning/convergence_strategy/install_sh'
6
- require 'chef/provisioning/convergence_strategy/install_msi'
7
- require 'chef/provisioning/convergence_strategy/no_converge'
8
- require 'chef/provisioning/transport/ssh'
9
- require 'chef/provisioning/transport/winrm'
10
- require 'chef/provisioning/machine/windows_machine'
11
- require 'chef/provisioning/machine/unix_machine'
12
- require 'chef/provisioning/machine_spec'
13
-
14
- require 'chef/provisioning/aws_driver/aws_resource'
15
- require 'chef/provisioning/aws_driver/tagging_strategy/ec2'
16
- require 'chef/provisioning/aws_driver/tagging_strategy/elb'
17
- require 'chef/provisioning/aws_driver/version'
18
- require 'chef/provisioning/aws_driver/credentials'
19
- require 'chef/provisioning/aws_driver/credentials2'
20
- require 'chef/provisioning/aws_driver/aws_tagger'
21
-
22
- require 'yaml'
23
- require 'aws-sdk'
24
- require 'retryable'
25
- require 'ubuntu_ami'
26
- require 'base64'
1
+ require "chef/mixin/shell_out"
2
+ require "chef/mixin/deep_merge"
3
+ require "chef/provisioning/driver"
4
+ require "chef/provisioning/convergence_strategy/install_cached"
5
+ require "chef/provisioning/convergence_strategy/install_sh"
6
+ require "chef/provisioning/convergence_strategy/install_msi"
7
+ require "chef/provisioning/convergence_strategy/no_converge"
8
+ require "chef/provisioning/transport/ssh"
9
+ require "chef/provisioning/transport/winrm"
10
+ require "chef/provisioning/machine/windows_machine"
11
+ require "chef/provisioning/machine/unix_machine"
12
+ require "chef/provisioning/machine_spec"
13
+
14
+ require "chef/provisioning/aws_driver/aws_resource"
15
+ require "chef/provisioning/aws_driver/tagging_strategy/ec2"
16
+ require "chef/provisioning/aws_driver/tagging_strategy/elb"
17
+ require "chef/provisioning/aws_driver/version"
18
+ require "chef/provisioning/aws_driver/credentials"
19
+ require "chef/provisioning/aws_driver/credentials2"
20
+ require "chef/provisioning/aws_driver/aws_tagger"
21
+
22
+ require "yaml"
23
+ require "aws-sdk"
24
+ require "retryable"
25
+ require "ubuntu_ami"
26
+ require "base64"
27
27
 
28
28
  # loads the entire aws-sdk
29
29
  Aws.eager_autoload!
@@ -37,466 +37,463 @@ AWS_V2_SERVICES = {
37
37
  "RDS" => "rds",
38
38
  "CloudWatch" => "cloudwatch",
39
39
  "AutoScaling" => "auto_scaling"
40
- }
41
- Aws.eager_autoload!(:services => AWS_V2_SERVICES.keys)
40
+ }.freeze
41
+ Aws.eager_autoload!(services: AWS_V2_SERVICES.keys)
42
42
 
43
43
  # Need to load the resources after the SDK because `aws_sdk_types` can mess
44
44
  # up AWS loading if they are loaded too early
45
- require 'chef/resource/aws_key_pair'
46
- require 'chef/resource/aws_instance'
47
- require 'chef/resource/aws_image'
48
- require 'chef/resource/aws_load_balancer'
45
+ require "chef/resource/aws_key_pair"
46
+ require "chef/resource/aws_instance"
47
+ require "chef/resource/aws_image"
48
+ require "chef/resource/aws_load_balancer"
49
49
 
50
50
  # We add the appropriate attributes to the base resources for tagging support
51
51
  class Chef
52
- class Resource
53
- class Machine
54
- include Chef::Provisioning::AWSDriver::AWSTaggable
55
- end
56
- class MachineImage
57
- include Chef::Provisioning::AWSDriver::AWSTaggable
58
- end
59
- class LoadBalancer
60
- include Chef::Provisioning::AWSDriver::AWSTaggable
52
+ class Resource
53
+ class Machine
54
+ include Chef::Provisioning::AWSDriver::AWSTaggable
55
+ end
56
+ class MachineImage
57
+ include Chef::Provisioning::AWSDriver::AWSTaggable
58
+ end
59
+ class LoadBalancer
60
+ include Chef::Provisioning::AWSDriver::AWSTaggable
61
+ end
61
62
  end
62
63
  end
63
- end
64
64
 
65
- require 'chef/provider/load_balancer'
65
+ require "chef/provider/load_balancer"
66
66
  class Chef
67
- class Provider
68
- class LoadBalancer
69
- # We override this so we can specify a machine name as `i-123456`
70
- # This is totally a hack until we move away from base resources
71
- def get_machine_spec!(machine_name)
72
- if machine_name =~ /^i-[0-9a-f]+/
73
- Struct.new(:name, :reference).new(machine_name, {'instance_id' => machine_name})
74
- else
75
- Chef::Log.debug "Getting machine spec for #{machine_name}"
76
- Provisioning.chef_managed_entry_store(new_resource.chef_server).get!(:machine, machine_name)
67
+ class Provider
68
+ class LoadBalancer
69
+ # We override this so we can specify a machine name as `i-123456`
70
+ # This is totally a hack until we move away from base resources
71
+ def get_machine_spec!(machine_name)
72
+ if machine_name =~ /^i-[0-9a-f]+/
73
+ Struct.new(:name, :reference).new(machine_name, "instance_id" => machine_name)
74
+ else
75
+ Chef::Log.debug "Getting machine spec for #{machine_name}"
76
+ Provisioning.chef_managed_entry_store(new_resource.chef_server).get!(:machine, machine_name)
77
+ end
77
78
  end
78
79
  end
79
80
  end
80
81
  end
81
- end
82
82
 
83
83
  Chef::Provider::Machine.additional_machine_option_keys << :aws_tags
84
84
  Chef::Provider::MachineImage.additional_image_option_keys << :aws_tags
85
85
  Chef::Provider::LoadBalancer.additional_lb_option_keys << :aws_tags
86
86
 
87
87
  class Chef
88
- module Provisioning
89
- module AWSDriver
90
- # Provisions machines using the AWS SDK
91
- class Driver < Chef::Provisioning::Driver
92
-
93
- include Chef::Mixin::ShellOut
94
- include Chef::Mixin::DeepMerge
95
-
96
- attr_reader :aws_config, :aws_config_2
97
-
98
- # URL scheme:
99
- # aws:profilename:region
100
- # TODO: migration path from fog:AWS - parse that URL
101
- # canonical URL calls realpath on <path>
102
- def self.from_url(driver_url, config)
103
- Driver.new(driver_url, config)
104
- end
105
-
106
- def initialize(driver_url, config)
107
- super
108
-
109
- _, profile_name, region = driver_url.split(':')
110
- profile_name = nil if profile_name && profile_name.empty?
111
- region = nil if region && region.empty?
112
-
113
- credentials = profile_name ? aws_credentials[profile_name] : aws_credentials.default
114
- @aws_config = Aws.config.update(
115
- access_key_id: credentials[:aws_access_key_id],
116
- secret_access_key: credentials[:aws_secret_access_key],
117
- region: region || credentials[:region],
118
- http_proxy: credentials[:proxy_uri] || nil,
119
- session_token: credentials[:aws_session_token] || nil,
120
- logger: Chef::Log.logger
121
- )
122
-
123
- # TODO document how users could add something to the Aws.config themselves if they want to
124
- # Right now we are supporting both V1 and V2, so we create 2 config sets
125
- credentials2 = Credentials2.new(:profile_name => profile_name)
126
- Chef::Config.chef_provisioning ||= {}
127
- @aws_config_2 = {
128
- credentials: credentials2.get_credentials,
129
- region: region || ENV["AWS_DEFAULT_REGION"] || credentials[:region],
130
- # TODO when we get rid of V1 replace the credentials class with something that knows how
131
- # to read ~/.aws/config
132
- :http_proxy => credentials[:proxy_uri] || nil,
133
- logger: Chef::Log.logger,
134
- retry_limit: Chef::Config.chef_provisioning[:aws_retry_limit] || 5
135
- }
136
-
137
- driver = self
138
- Chef::Resource::Machine.send(:define_method, :aws_object) do
139
- resource = Chef::Resource::AwsInstance.new(name, nil)
140
- resource.driver driver
141
- resource.managed_entry_store Chef::Provisioning.chef_managed_entry_store
142
- resource.aws_object
143
- end
144
- Chef::Resource::MachineImage.send(:define_method, :aws_object) do
145
- resource = Chef::Resource::AwsImage.new(name, nil)
146
- resource.driver driver
147
- resource.managed_entry_store Chef::Provisioning.chef_managed_entry_store
148
- resource.aws_object
149
- end
150
- Chef::Resource::LoadBalancer.send(:define_method, :aws_object) do
151
- resource = Chef::Resource::AwsLoadBalancer.new(name, nil)
152
- resource.driver driver
153
- resource.managed_entry_store Chef::Provisioning.chef_managed_entry_store
154
- resource.aws_object
155
- end
156
- end
157
-
158
- def region
159
- aws_config_2[:region]
160
- end
161
-
162
- def cloudsearch
163
- @cloudsearch ||= Aws::CloudSearch::Client.new(aws_config)
164
- end
165
-
166
- def self.canonicalize_url(driver_url, config)
167
- [ driver_url, config ]
168
- end
169
-
170
- def deep_symbolize_keys(hash_like)
171
- # Process arrays first...
172
- if hash_like.is_a?(Array)
173
- # Node attributes are an ImmutableArray so lets convert them to an array first
174
- hash_like = hash_like.to_a
175
- hash_like.length.times do |e|
176
- hash_like[e]=deep_symbolize_keys(hash_like[e]) if hash_like[e].respond_to?(:values) or hash_like[e].is_a?(Array)
88
+ module Provisioning
89
+ module AWSDriver
90
+ # Provisions machines using the AWS SDK
91
+ class Driver < Chef::Provisioning::Driver
92
+ include Chef::Mixin::ShellOut
93
+ include Chef::Mixin::DeepMerge
94
+
95
+ attr_reader :aws_config, :aws_config_2
96
+
97
+ # URL scheme:
98
+ # aws:profilename:region
99
+ # TODO: migration path from fog:AWS - parse that URL
100
+ # canonical URL calls realpath on <path>
101
+ def self.from_url(driver_url, config)
102
+ Driver.new(driver_url, config)
177
103
  end
178
- return hash_like
179
- end
180
- # Otherwise return ourselves if not a hash
181
- return hash_like if not hash_like.respond_to?(:values)
182
- # Otherwise we are hash like, push on through...
183
- if hash_like.nil? || hash_like.empty?
184
- return {}
185
- end
186
- r = {}
187
- hash_like.each do |key, value|
188
- value = deep_symbolize_keys(value) if value.respond_to?(:values) or value.is_a?(Array)
189
- r[key.to_sym] = value
190
- end
191
- r
192
- end
193
-
194
- # Load balancer methods
195
- def allocate_load_balancer(action_handler, lb_spec, lb_options, machine_specs)
196
- lb_options = deep_symbolize_keys(lb_options)
197
- lb_options = AWSResource.lookup_options(lb_options, managed_entry_store: lb_spec.managed_entry_store, driver: self)
198
104
 
199
- # renaming lb_options[:port] to lb_options[:load_balancer_port]
200
- if lb_options[:listeners]
201
- lb_options[:listeners].each do |listener|
202
- listener[:load_balancer_port] = listener.delete(:port) if listener[:port]
203
- end
204
- end
205
- # We delete the attributes, tags, health check, and sticky sessions here because they are not valid in the create call
206
- # and must be applied afterward
207
- lb_attributes = lb_options.delete(:attributes)
208
- lb_aws_tags = lb_options.delete(:aws_tags)
209
- health_check = lb_options.delete(:health_check)
210
- sticky_sessions = lb_options.delete(:sticky_sessions)
211
-
212
- old_elb = nil
213
- actual_elb = load_balancer_for(lb_spec)
214
- if actual_elb.nil?
215
- lb_options[:listeners] ||= get_listeners(:http)
216
-
217
- if !lb_options[:subnets] && !lb_options[:availability_zones] && machine_specs
218
- lb_options[:subnets] = machine_specs.map { |s| ec2_resource.instance(s.reference['instance_id']).subnet.id }.uniq
219
- end
220
-
221
- perform_action = proc { |desc, &block| action_handler.perform_action(desc, &block) }
222
- Chef::Log.debug "AWS Load Balancer options: #{lb_options.inspect}"
223
-
224
- updates = [ "create load balancer #{lb_spec.name} in #{region}" ]
225
- updates << " enable availability zones #{lb_options[:availability_zones]}" if lb_options[:availability_zones]
226
- updates << " attach subnets #{lb_options[:subnets].join(', ')}" if lb_options[:subnets]
227
- updates << " with listeners #{lb_options[:listeners]}" if lb_options[:listeners]
228
- updates << " with security groups #{lb_options[:security_groups]}" if lb_options[:security_groups]
229
- updates << " with tags #{lb_options[:aws_tags]}" if lb_options[:aws_tags]
230
-
231
- action_handler.perform_action updates do
232
- # IAM says the server certificate exists, but ELB throws this error
233
- Chef::Provisioning::AWSDriver::AWSProvider.retry_with_backoff(::Aws::ElasticLoadBalancing::Errors::CertificateNotFound) do
234
- lb_options[:listeners].each do |listener|
235
- if listener.has_key?(:server_certificate)
236
- listener[:ssl_certificate_id] = listener.delete(:server_certificate)
237
- listener[:ssl_certificate_id] = listener[:ssl_certificate_id][:arn]
238
- end
239
- end
105
+ def initialize(driver_url, config)
106
+ super
107
+
108
+ _, profile_name, region = driver_url.split(":")
109
+ profile_name = nil if profile_name && profile_name.empty?
110
+ region = nil if region && region.empty?
111
+
112
+ credentials = profile_name ? aws_credentials[profile_name] : aws_credentials.default
113
+ @aws_config = Aws.config.update(
114
+ access_key_id: credentials[:aws_access_key_id],
115
+ secret_access_key: credentials[:aws_secret_access_key],
116
+ region: region || credentials[:region],
117
+ http_proxy: credentials[:proxy_uri] || nil,
118
+ session_token: credentials[:aws_session_token] || nil,
119
+ logger: Chef::Log.logger
120
+ )
121
+
122
+ # TODO: document how users could add something to the Aws.config themselves if they want to
123
+ # Right now we are supporting both V1 and V2, so we create 2 config sets
124
+ credentials2 = Credentials2.new(profile_name: profile_name)
125
+ Chef::Config.chef_provisioning ||= {}
126
+ @aws_config_2 = {
127
+ credentials: credentials2.get_credentials,
128
+ region: region || ENV["AWS_DEFAULT_REGION"] || credentials[:region],
129
+ # TODO: when we get rid of V1 replace the credentials class with something that knows how
130
+ # to read ~/.aws/config
131
+ http_proxy: credentials[:proxy_uri] || nil,
132
+ logger: Chef::Log.logger,
133
+ retry_limit: Chef::Config.chef_provisioning[:aws_retry_limit] || 5
134
+ }
240
135
 
241
- lb_options[:load_balancer_name]=lb_spec.name
242
- actual_elb = elb.create_load_balancer(lb_options)
136
+ driver = self
137
+ Chef::Resource::Machine.send(:define_method, :aws_object) do
138
+ resource = Chef::Resource::AwsInstance.new(name, nil)
139
+ resource.driver driver
140
+ resource.managed_entry_store Chef::Provisioning.chef_managed_entry_store
141
+ resource.aws_object
243
142
  end
143
+ Chef::Resource::MachineImage.send(:define_method, :aws_object) do
144
+ resource = Chef::Resource::AwsImage.new(name, nil)
145
+ resource.driver driver
146
+ resource.managed_entry_store Chef::Provisioning.chef_managed_entry_store
147
+ resource.aws_object
148
+ end
149
+ Chef::Resource::LoadBalancer.send(:define_method, :aws_object) do
150
+ resource = Chef::Resource::AwsLoadBalancer.new(name, nil)
151
+ resource.driver driver
152
+ resource.managed_entry_store Chef::Provisioning.chef_managed_entry_store
153
+ resource.aws_object
154
+ end
155
+ end
244
156
 
245
- # load aws object for load balancer after create
246
- actual_elb =load_balancer_for(lb_spec)
247
-
248
- lb_spec.reference = {
249
- 'driver_version' => Chef::Provisioning::AWSDriver::VERSION,
250
- 'allocated_at' => Time.now.utc.to_s,
251
- }
252
- lb_spec.driver_url = driver_url
157
+ def region
158
+ aws_config_2[:region]
253
159
  end
254
- else
255
- # Header gets printed the first time we make an update
256
- perform_action = proc do |desc, &block|
257
- perform_action = proc { |desc, &block| action_handler.perform_action(desc, &block) }
258
- action_handler.perform_action [ "Update load balancer #{lb_spec.name} in #{region}", desc ].flatten, &block
160
+
161
+ def cloudsearch
162
+ @cloudsearch ||= Aws::CloudSearch::Client.new(aws_config)
259
163
  end
260
164
 
261
- # TODO: refactor this whole giant method into many smaller method calls
262
- if lb_options[:scheme] && lb_options[:scheme].downcase != actual_elb.scheme
263
- # TODO CloudFormation automatically recreates the load_balancer, we should too
264
- raise "Scheme is immutable - you need to :destroy and :create the load_balancer to recreated it with the new scheme"
165
+ def self.canonicalize_url(driver_url, config)
166
+ [driver_url, config]
265
167
  end
266
168
 
267
- # Update security groups
268
- if lb_options[:security_groups]
269
- current = actual_elb.security_groups
270
- desired = lb_options[:security_groups]
271
- if current != desired
272
- perform_action.call(" updating security groups to #{desired.to_a}") do
273
- elb_client.apply_security_groups_to_load_balancer(
274
- load_balancer_name: actual_elb.load_balancer_name,
275
- security_groups: desired.to_a
276
- )
169
+ def deep_symbolize_keys(hash_like)
170
+ # Process arrays first...
171
+ if hash_like.is_a?(Array)
172
+ # Node attributes are an ImmutableArray so lets convert them to an array first
173
+ hash_like = hash_like.to_a
174
+ hash_like.length.times do |e|
175
+ hash_like[e] = deep_symbolize_keys(hash_like[e]) if hash_like[e].respond_to?(:values) || hash_like[e].is_a?(Array)
277
176
  end
177
+ return hash_like
278
178
  end
179
+ # Otherwise return ourselves if not a hash
180
+ return hash_like unless hash_like.respond_to?(:values)
181
+ # Otherwise we are hash like, push on through...
182
+ return {} if hash_like.nil? || hash_like.empty?
183
+ r = {}
184
+ hash_like.each do |key, value|
185
+ value = deep_symbolize_keys(value) if value.respond_to?(:values) || value.is_a?(Array)
186
+ r[key.to_sym] = value
187
+ end
188
+ r
279
189
  end
280
190
 
281
- if lb_options[:availability_zones] || lb_options[:subnets]
282
- # A subnet always belongs to an availability zone. When specifying a ELB spec, you can either
283
- # specify subnets OR AZs but not both. You cannot specify multiple subnets in the same AZ.
284
- # You must specify at least 1 subnet or AZ. On an update you cannot remove all subnets
285
- # or AZs - it must belong to one.
286
- if lb_options[:availability_zones] && lb_options[:subnets]
287
- # We do this check here because there is no atomic call we can make to specify both
288
- # subnets and AZs at the same time
289
- raise "You cannot specify both `availability_zones` and `subnets`"
290
- end
291
-
292
- # Users can switch from availability zones to subnets or vice versa. To ensure we do not
293
- # unassign all (which causes an AWS error) we first add all available ones, then remove
294
- # an unecessary ones
295
- actual_zones_subnets = {}
296
- actual_elb.subnets.each do |subnet|
297
- actual_zones_subnets[subnet] = Chef::Resource::AwsSubnet.get_aws_object(subnet, driver: self).availability_zone
298
- end
299
-
300
- # Only 1 of subnet or AZ will be populated b/c of our check earlier
301
- desired_subnets_zones = {}
302
- if lb_options[:availability_zones]
303
- lb_options[:availability_zones].each do |zone|
304
- # If the user specifies availability zone, we find the default subnet for that
305
- # AZ because this duplicates the create logic
306
- zone = zone.downcase
307
- filters = [
308
- {:name => 'availabilityZone', :values => [zone]},
309
- {:name => 'defaultForAz', :values => ['true']}
310
- ]
311
- default_subnet = ec2_client.describe_subnets(:filters => filters)[:subnets]
312
- if default_subnet.size != 1
313
- raise "Could not find default subnet in availability zone #{zone}"
314
- end
315
- default_subnet = default_subnet[0]
316
- desired_subnets_zones[default_subnet[:subnet_id]] = zone
191
+ # Load balancer methods
192
+ def allocate_load_balancer(action_handler, lb_spec, lb_options, machine_specs)
193
+ lb_options = deep_symbolize_keys(lb_options)
194
+ lb_options = AWSResource.lookup_options(lb_options, managed_entry_store: lb_spec.managed_entry_store, driver: self)
195
+
196
+ # renaming lb_options[:port] to lb_options[:load_balancer_port]
197
+ if lb_options[:listeners]
198
+ lb_options[:listeners].each do |listener|
199
+ listener[:load_balancer_port] = listener.delete(:port) if listener[:port]
317
200
  end
318
201
  end
319
- unless lb_options[:subnets].nil? || lb_options[:subnets].empty?
320
- subnet_query = ec2_client.describe_subnets(:subnet_ids => lb_options[:subnets])[:subnets]
321
- # AWS raises an error on an unknown subnet, but not an unknown AZ
322
- subnet_query.each do |subnet|
323
- zone = subnet[:availability_zone].downcase
324
- desired_subnets_zones[subnet[:subnet_id]] = zone
202
+ # We delete the attributes, tags, health check, and sticky sessions here because they are not valid in the create call
203
+ # and must be applied afterward
204
+ lb_attributes = lb_options.delete(:attributes)
205
+ lb_aws_tags = lb_options.delete(:aws_tags)
206
+ health_check = lb_options.delete(:health_check)
207
+ sticky_sessions = lb_options.delete(:sticky_sessions)
208
+
209
+ old_elb = nil
210
+ actual_elb = load_balancer_for(lb_spec)
211
+ if actual_elb.nil?
212
+ lb_options[:listeners] ||= get_listeners(:http)
213
+
214
+ if !lb_options[:subnets] && !lb_options[:availability_zones] && machine_specs
215
+ lb_options[:subnets] = machine_specs.map { |s| ec2_resource.instance(s.reference["instance_id"]).subnet.id }.uniq
325
216
  end
326
- end
327
217
 
328
- # We only bother attaching subnets, because doing this automatically attaches the AZ
329
- attach_subnets = desired_subnets_zones.keys - actual_zones_subnets.keys
330
- unless attach_subnets.empty?
331
- action = " attach subnets #{attach_subnets.join(', ')}"
332
- enable_zones = (desired_subnets_zones.map {|s,z| z if attach_subnets.include?(s)}).compact
333
- action += " (availability zones #{enable_zones.join(', ')})"
334
- perform_action.call(action) do
335
- begin
336
- elb.attach_load_balancer_to_subnets(
337
- load_balancer_name: actual_elb.load_balancer_name,
338
- subnets: attach_subnets
339
- )
340
- rescue ::Aws::ElasticLoadBalancing::Errors::InvalidConfigurationRequest => e
341
- Chef::Log.error "You cannot currently move from 1 subnet to another in the same availability zone. " +
342
- "Amazon does not have an atomic operation which allows this. You must create a new " +
343
- "ELB with the correct subnets and move instances into it. Tried to attach subets " +
344
- "#{attach_subnets.join(', ')} (availability zones #{enable_zones.join(', ')}) to " +
345
- "existing ELB named #{actual_elb.load_balancer_name}"
346
- raise e
218
+ perform_action = proc { |desc, &block| action_handler.perform_action(desc, &block) }
219
+ Chef::Log.debug "AWS Load Balancer options: #{lb_options.inspect}"
220
+
221
+ updates = ["create load balancer #{lb_spec.name} in #{region}"]
222
+ updates << " enable availability zones #{lb_options[:availability_zones]}" if lb_options[:availability_zones]
223
+ updates << " attach subnets #{lb_options[:subnets].join(', ')}" if lb_options[:subnets]
224
+ updates << " with listeners #{lb_options[:listeners]}" if lb_options[:listeners]
225
+ updates << " with security groups #{lb_options[:security_groups]}" if lb_options[:security_groups]
226
+ updates << " with tags #{lb_options[:aws_tags]}" if lb_options[:aws_tags]
227
+
228
+ action_handler.perform_action updates do
229
+ # IAM says the server certificate exists, but ELB throws this error
230
+ Chef::Provisioning::AWSDriver::AWSProvider.retry_with_backoff(::Aws::ElasticLoadBalancing::Errors::CertificateNotFound) do
231
+ lb_options[:listeners].each do |listener|
232
+ if listener.key?(:server_certificate)
233
+ listener[:ssl_certificate_id] = listener.delete(:server_certificate)
234
+ listener[:ssl_certificate_id] = listener[:ssl_certificate_id][:arn]
235
+ end
236
+ end
237
+
238
+ lb_options[:load_balancer_name] = lb_spec.name
239
+ actual_elb = elb.create_load_balancer(lb_options)
347
240
  end
241
+
242
+ # load aws object for load balancer after create
243
+ actual_elb = load_balancer_for(lb_spec)
244
+
245
+ lb_spec.reference = {
246
+ "driver_version" => Chef::Provisioning::AWSDriver::VERSION,
247
+ "allocated_at" => Time.now.utc.to_s
248
+ }
249
+ lb_spec.driver_url = driver_url
250
+ end
251
+ else
252
+ # Header gets printed the first time we make an update
253
+ perform_action = proc do |desc, &block|
254
+ perform_action = proc { |desc, &block| action_handler.perform_action(desc, &block) }
255
+ action_handler.perform_action ["Update load balancer #{lb_spec.name} in #{region}", desc].flatten, &block
348
256
  end
349
- end
350
257
 
351
- detach_subnets = actual_zones_subnets.keys - desired_subnets_zones.keys
352
- unless detach_subnets.empty?
353
- action = " detach subnets #{detach_subnets.join(', ')}"
354
- disable_zones = (actual_zones_subnets.map {|s,z| z if detach_subnets.include?(s)}).compact
355
- action += " (availability zones #{disable_zones.join(', ')})"
356
- perform_action.call(action) do
357
- elb.detach_load_balancer_from_subnets(
358
- load_balancer_name: actual_elb.load_balancer_name,
359
- subnets: detach_subnets
360
- )
258
+ # TODO: refactor this whole giant method into many smaller method calls
259
+ if lb_options[:scheme] && lb_options[:scheme].downcase != actual_elb.scheme
260
+ # TODO: CloudFormation automatically recreates the load_balancer, we should too
261
+ raise "Scheme is immutable - you need to :destroy and :create the load_balancer to recreated it with the new scheme"
361
262
  end
362
- end
363
- end
364
263
 
365
- # Update listeners - THIS IS NOT ATOMIC
366
- if lb_options[:listeners]
367
- add_listeners = {}
368
- lb_options[:listeners].each { |l| add_listeners[l[:load_balancer_port]] = l }
369
- actual_elb.listener_descriptions.each do |listener_description|
370
- listener = listener_description.listener
371
- desired_listener = add_listeners.delete(listener.load_balancer_port)
264
+ # Update security groups
265
+ if lb_options[:security_groups]
266
+ current = actual_elb.security_groups
267
+ desired = lb_options[:security_groups]
268
+ if current != desired
269
+ perform_action.call(" updating security groups to #{desired.to_a}") do
270
+ elb_client.apply_security_groups_to_load_balancer(
271
+ load_balancer_name: actual_elb.load_balancer_name,
272
+ security_groups: desired.to_a
273
+ )
274
+ end
275
+ end
276
+ end
372
277
 
373
- if desired_listener
374
- # listener.(port|protocol|instance_port|instance_protocol) are immutable for the life
375
- # of the listener - must create a new one and delete old one
376
- immutable_updates = []
377
- if listener.protocol != desired_listener[:protocol].to_s.upcase
378
- immutable_updates << " update protocol from #{listener.protocol.inspect} to #{desired_listener[:protocol].inspect}"
278
+ if lb_options[:availability_zones] || lb_options[:subnets]
279
+ # A subnet always belongs to an availability zone. When specifying a ELB spec, you can either
280
+ # specify subnets OR AZs but not both. You cannot specify multiple subnets in the same AZ.
281
+ # You must specify at least 1 subnet or AZ. On an update you cannot remove all subnets
282
+ # or AZs - it must belong to one.
283
+ if lb_options[:availability_zones] && lb_options[:subnets]
284
+ # We do this check here because there is no atomic call we can make to specify both
285
+ # subnets and AZs at the same time
286
+ raise "You cannot specify both `availability_zones` and `subnets`"
379
287
  end
380
288
 
381
- if listener.instance_port != desired_listener[:instance_port]
382
- immutable_updates << " update instance port from #{listener.instance_port.inspect} to #{desired_listener[:instance_port].inspect}"
289
+ # Users can switch from availability zones to subnets or vice versa. To ensure we do not
290
+ # unassign all (which causes an AWS error) we first add all available ones, then remove
291
+ # an unecessary ones
292
+ actual_zones_subnets = {}
293
+ actual_elb.subnets.each do |subnet|
294
+ actual_zones_subnets[subnet] = Chef::Resource::AwsSubnet.get_aws_object(subnet, driver: self).availability_zone
383
295
  end
384
296
 
385
- if listener.instance_protocol != desired_listener[:instance_protocol].to_s.upcase
386
- immutable_updates << " update instance protocol from #{listener.instance_protocol.inspect} to #{desired_listener[:instance_protocol].inspect}"
297
+ # Only 1 of subnet or AZ will be populated b/c of our check earlier
298
+ desired_subnets_zones = {}
299
+ if lb_options[:availability_zones]
300
+ lb_options[:availability_zones].each do |zone|
301
+ # If the user specifies availability zone, we find the default subnet for that
302
+ # AZ because this duplicates the create logic
303
+ zone = zone.downcase
304
+ filters = [
305
+ { name: "availabilityZone", values: [zone] },
306
+ { name: "defaultForAz", values: ["true"] }
307
+ ]
308
+ default_subnet = ec2_client.describe_subnets(filters: filters)[:subnets]
309
+ if default_subnet.size != 1
310
+ raise "Could not find default subnet in availability zone #{zone}"
311
+ end
312
+ default_subnet = default_subnet[0]
313
+ desired_subnets_zones[default_subnet[:subnet_id]] = zone
314
+ end
315
+ end
316
+ unless lb_options[:subnets].nil? || lb_options[:subnets].empty?
317
+ subnet_query = ec2_client.describe_subnets(subnet_ids: lb_options[:subnets])[:subnets]
318
+ # AWS raises an error on an unknown subnet, but not an unknown AZ
319
+ subnet_query.each do |subnet|
320
+ zone = subnet[:availability_zone].downcase
321
+ desired_subnets_zones[subnet[:subnet_id]] = zone
322
+ end
387
323
  end
388
324
 
389
- if !immutable_updates.empty?
390
- perform_action.call(immutable_updates) do
391
- elb.delete_load_balancer_listeners({load_balancer_name: actual_elb.load_balancer_name, load_balancer_ports: [listener.load_balancer_port]})
392
- elb.create_load_balancer_listeners({ listeners: [desired_listener], load_balancer_name: actual_elb.load_balancer_name })
393
- # actual_elb.listeners.create(desired_listener)
325
+ # We only bother attaching subnets, because doing this automatically attaches the AZ
326
+ attach_subnets = desired_subnets_zones.keys - actual_zones_subnets.keys
327
+ unless attach_subnets.empty?
328
+ action = " attach subnets #{attach_subnets.join(', ')}"
329
+ enable_zones = (desired_subnets_zones.map { |s, z| z if attach_subnets.include?(s) }).compact
330
+ action += " (availability zones #{enable_zones.join(', ')})"
331
+ perform_action.call(action) do
332
+ begin
333
+ elb.attach_load_balancer_to_subnets(
334
+ load_balancer_name: actual_elb.load_balancer_name,
335
+ subnets: attach_subnets
336
+ )
337
+ rescue ::Aws::ElasticLoadBalancing::Errors::InvalidConfigurationRequest => e
338
+ Chef::Log.error "You cannot currently move from 1 subnet to another in the same availability zone. " \
339
+ "Amazon does not have an atomic operation which allows this. You must create a new " \
340
+ "ELB with the correct subnets and move instances into it. Tried to attach subets " \
341
+ "#{attach_subnets.join(', ')} (availability zones #{enable_zones.join(', ')}) to " \
342
+ "existing ELB named #{actual_elb.load_balancer_name}"
343
+ raise e
344
+ end
394
345
  end
395
- elsif listener.ssl_certificate_id && ! server_certificate_eql?(listener.ssl_certificate_id,
396
- server_cert_from_spec(desired_listener))
397
- # Server certificate is mutable - if no immutable changes required a full recreate, update cert
398
- perform_action.call(" update server certificate from #{listener.ssl_certificate_id} to #{server_cert_from_spec(desired_listener)}") do
399
- elb.set_load_balancer_listener_ssl_certificate({
346
+ end
347
+
348
+ detach_subnets = actual_zones_subnets.keys - desired_subnets_zones.keys
349
+ unless detach_subnets.empty?
350
+ action = " detach subnets #{detach_subnets.join(', ')}"
351
+ disable_zones = (actual_zones_subnets.map { |s, z| z if detach_subnets.include?(s) }).compact
352
+ action += " (availability zones #{disable_zones.join(', ')})"
353
+ perform_action.call(action) do
354
+ elb.detach_load_balancer_from_subnets(
400
355
  load_balancer_name: actual_elb.load_balancer_name,
401
- load_balancer_port: listener.load_balancer_port,
402
- ssl_certificate_id: server_cert_from_spec(desired_listener)
403
- })
356
+ subnets: detach_subnets
357
+ )
358
+ end
359
+ end
360
+ end
361
+
362
+ # Update listeners - THIS IS NOT ATOMIC
363
+ if lb_options[:listeners]
364
+ add_listeners = {}
365
+ lb_options[:listeners].each { |l| add_listeners[l[:load_balancer_port]] = l }
366
+ actual_elb.listener_descriptions.each do |listener_description|
367
+ listener = listener_description.listener
368
+ desired_listener = add_listeners.delete(listener.load_balancer_port)
369
+
370
+ if desired_listener
371
+ # listener.(port|protocol|instance_port|instance_protocol) are immutable for the life
372
+ # of the listener - must create a new one and delete old one
373
+ immutable_updates = []
374
+ if listener.protocol != desired_listener[:protocol].to_s.upcase
375
+ immutable_updates << " update protocol from #{listener.protocol.inspect} to #{desired_listener[:protocol].inspect}"
376
+ end
377
+
378
+ if listener.instance_port != desired_listener[:instance_port]
379
+ immutable_updates << " update instance port from #{listener.instance_port.inspect} to #{desired_listener[:instance_port].inspect}"
380
+ end
381
+
382
+ if listener.instance_protocol != desired_listener[:instance_protocol].to_s.upcase
383
+ immutable_updates << " update instance protocol from #{listener.instance_protocol.inspect} to #{desired_listener[:instance_protocol].inspect}"
384
+ end
385
+
386
+ if !immutable_updates.empty?
387
+ perform_action.call(immutable_updates) do
388
+ elb.delete_load_balancer_listeners(load_balancer_name: actual_elb.load_balancer_name, load_balancer_ports: [listener.load_balancer_port])
389
+ elb.create_load_balancer_listeners(listeners: [desired_listener], load_balancer_name: actual_elb.load_balancer_name)
390
+ # actual_elb.listeners.create(desired_listener)
391
+ end
392
+ elsif listener.ssl_certificate_id && !server_certificate_eql?(listener.ssl_certificate_id,
393
+ server_cert_from_spec(desired_listener))
394
+ # Server certificate is mutable - if no immutable changes required a full recreate, update cert
395
+ perform_action.call(" update server certificate from #{listener.ssl_certificate_id} to #{server_cert_from_spec(desired_listener)}") do
396
+ elb.set_load_balancer_listener_ssl_certificate(
397
+ load_balancer_name: actual_elb.load_balancer_name,
398
+ load_balancer_port: listener.load_balancer_port,
399
+ ssl_certificate_id: server_cert_from_spec(desired_listener)
400
+ )
401
+ end
402
+ end
403
+ else
404
+ perform_action.call(" remove listener #{listener.load_balancer_port}") do
405
+ elb.delete_load_balancer_listeners(load_balancer_name: actual_elb.load_balancer_name, load_balancer_ports: [listener.load_balancer_port])
406
+ end
404
407
  end
405
408
  end
406
- else
407
- perform_action.call(" remove listener #{listener.load_balancer_port}") do
408
- elb.delete_load_balancer_listeners({load_balancer_name: actual_elb.load_balancer_name, load_balancer_ports: [listener.load_balancer_port]})
409
+
410
+ add_listeners.values.each do |listener|
411
+ updates = [" add listener #{listener[:load_balancer_port]}"]
412
+ updates << " set protocol to #{listener[:protocol].inspect}"
413
+ updates << " set instance port to #{listener[:instance_port].inspect}"
414
+ updates << " set instance protocol to #{listener[:instance_protocol].inspect}"
415
+ updates << " set server certificate to #{server_cert_from_spec(listener)}" if server_cert_from_spec(listener)
416
+ perform_action.call(updates) do
417
+ elb.create_load_balancer_listeners(listeners: [listener], load_balancer_name: actual_elb.load_balancer_name)
418
+ end
409
419
  end
410
420
  end
411
421
  end
412
422
 
413
- add_listeners.values.each do |listener|
414
- updates = [ " add listener #{listener[:load_balancer_port]}" ]
415
- updates << " set protocol to #{listener[:protocol].inspect}"
416
- updates << " set instance port to #{listener[:instance_port].inspect}"
417
- updates << " set instance protocol to #{listener[:instance_protocol].inspect}"
418
- updates << " set server certificate to #{server_cert_from_spec(listener)}" if server_cert_from_spec(listener)
419
- perform_action.call(updates) do
420
- elb.create_load_balancer_listeners({ listeners: [listener], load_balancer_name: actual_elb.load_balancer_name })
423
+ converge_elb_tags(actual_elb, lb_aws_tags, action_handler)
424
+
425
+ # Update load balancer attributes
426
+ if lb_attributes
427
+ current = elb.describe_load_balancer_attributes(load_balancer_name: actual_elb.load_balancer_name)[:load_balancer_attributes].to_hash
428
+ # Need to do a deep copy w/ Marshal load/dump to avoid overwriting current
429
+ desired = deep_merge!(lb_attributes, Marshal.load(Marshal.dump(current)))
430
+ if current != desired
431
+ perform_action.call(" updating attributes to #{desired.inspect}") do
432
+ elb.modify_load_balancer_attributes(
433
+ load_balancer_name: actual_elb.load_balancer_name,
434
+ load_balancer_attributes: desired.to_hash
435
+ )
436
+ end
421
437
  end
422
438
  end
423
- end
424
- end
425
439
 
426
- converge_elb_tags(actual_elb, lb_aws_tags, action_handler)
427
-
428
- # Update load balancer attributes
429
- if lb_attributes
430
- current = elb.describe_load_balancer_attributes(load_balancer_name: actual_elb.load_balancer_name)[:load_balancer_attributes].to_hash
431
- # Need to do a deep copy w/ Marshal load/dump to avoid overwriting current
432
- desired = deep_merge!(lb_attributes, Marshal.load(Marshal.dump(current)))
433
- if current != desired
434
- perform_action.call(" updating attributes to #{desired.inspect}") do
435
- elb.modify_load_balancer_attributes(
436
- load_balancer_name: actual_elb.load_balancer_name,
437
- load_balancer_attributes: desired.to_hash
438
- )
440
+ # Update the load balancer health check, as above
441
+ if health_check
442
+ current = elb.describe_load_balancers(load_balancer_names: [actual_elb.load_balancer_name])[:load_balancer_descriptions][0][:health_check].to_hash
443
+ desired = deep_merge!(health_check, Marshal.load(Marshal.dump(current)))
444
+ if current != desired
445
+ perform_action.call(" updating health check to #{desired.inspect}") do
446
+ elb.configure_health_check(
447
+ load_balancer_name: actual_elb.load_balancer_name,
448
+ health_check: desired.to_hash
449
+ )
450
+ end
451
+ end
439
452
  end
440
- end
441
- end
442
453
 
443
- # Update the load balancer health check, as above
444
- if health_check
445
- current = elb.describe_load_balancers(load_balancer_names: [actual_elb.load_balancer_name])[:load_balancer_descriptions][0][:health_check].to_hash
446
- desired = deep_merge!(health_check, Marshal.load(Marshal.dump(current)))
447
- if current != desired
448
- perform_action.call(" updating health check to #{desired.inspect}") do
449
- elb.configure_health_check(
450
- load_balancer_name: actual_elb.load_balancer_name,
451
- health_check: desired.to_hash
452
- )
453
- end
454
- end
455
- end
454
+ # Update the load balancer sticky sessions
455
+ if sticky_sessions
456
+ policy_name = "#{actual_elb.load_balancer_name}-sticky-session-policy"
457
+ policies = elb.describe_load_balancer_policies(load_balancer_name: actual_elb.load_balancer_name)
456
458
 
457
- # Update the load balancer sticky sessions
458
- if sticky_sessions
459
- policy_name = "#{actual_elb.load_balancer_name}-sticky-session-policy"
460
- policies = elb.describe_load_balancer_policies(load_balancer_name: actual_elb.load_balancer_name)
461
-
462
- existing_cookie_policy = policies[:policy_descriptions].detect { |pd| pd[:policy_type_name] == 'AppCookieStickinessPolicyType' && pd[:policy_name] == policy_name}
463
- existing_cookie_name = existing_cookie_policy ? (existing_cookie_policy[:policy_attribute_descriptions].detect { |pad| pad[:attribute_name] == 'CookieName' })[:attribute_value] : nil
464
- desired_cookie_name = sticky_sessions[:cookie_name]
465
-
466
- # Create or update the policy to have the desired cookie_name
467
- if existing_cookie_policy.nil?
468
- perform_action.call(" creating sticky sessions with cookie_name #{desired_cookie_name}") do
469
- elb.create_app_cookie_stickiness_policy(
470
- load_balancer_name: actual_elb.load_balancer_name,
471
- policy_name: policy_name,
472
- cookie_name: desired_cookie_name
473
- )
474
- end
475
- elsif existing_cookie_name && existing_cookie_name != desired_cookie_name
476
- perform_action.call(" updating sticky sessions from cookie_name #{existing_cookie_name} to cookie_name #{desired_cookie_name}") do
477
- elb.delete_load_balancer_policy(
478
- load_balancer_name: actual_elb.load_balancer_name,
479
- policy_name: policy_name
480
- )
481
- elb.create_app_cookie_stickiness_policy(
482
- load_balancer_name: actual_elb.load_balancer_name,
483
- policy_name: policy_name,
484
- cookie_name: desired_cookie_name
485
- )
486
- end
487
- end
459
+ existing_cookie_policy = policies[:policy_descriptions].detect { |pd| pd[:policy_type_name] == "AppCookieStickinessPolicyType" && pd[:policy_name] == policy_name }
460
+ existing_cookie_name = existing_cookie_policy ? (existing_cookie_policy[:policy_attribute_descriptions].detect { |pad| pad[:attribute_name] == "CookieName" })[:attribute_value] : nil
461
+ desired_cookie_name = sticky_sessions[:cookie_name]
488
462
 
489
- # Ensure the policy is attached to the appropriate listener
490
- elb_description = elb.describe_load_balancers(load_balancer_names: [actual_elb.load_balancer_name])[:load_balancer_descriptions].first
491
- listeners = elb_description[:listener_descriptions]
463
+ # Create or update the policy to have the desired cookie_name
464
+ if existing_cookie_policy.nil?
465
+ perform_action.call(" creating sticky sessions with cookie_name #{desired_cookie_name}") do
466
+ elb.create_app_cookie_stickiness_policy(
467
+ load_balancer_name: actual_elb.load_balancer_name,
468
+ policy_name: policy_name,
469
+ cookie_name: desired_cookie_name
470
+ )
471
+ end
472
+ elsif existing_cookie_name && existing_cookie_name != desired_cookie_name
473
+ perform_action.call(" updating sticky sessions from cookie_name #{existing_cookie_name} to cookie_name #{desired_cookie_name}") do
474
+ elb.delete_load_balancer_policy(
475
+ load_balancer_name: actual_elb.load_balancer_name,
476
+ policy_name: policy_name
477
+ )
478
+ elb.create_app_cookie_stickiness_policy(
479
+ load_balancer_name: actual_elb.load_balancer_name,
480
+ policy_name: policy_name,
481
+ cookie_name: desired_cookie_name
482
+ )
483
+ end
484
+ end
492
485
 
493
- sticky_sessions[:ports].each do |ss_port|
494
- listener = listeners.detect { |ld| ld[:listener][:load_balancer_port] == ss_port }
486
+ # Ensure the policy is attached to the appropriate listener
487
+ elb_description = elb.describe_load_balancers(load_balancer_names: [actual_elb.load_balancer_name])[:load_balancer_descriptions].first
488
+ listeners = elb_description[:listener_descriptions]
495
489
 
496
- unless listener.nil?
497
- policy_names = listener[:policy_names]
490
+ sticky_sessions[:ports].each do |ss_port|
491
+ listener = listeners.detect { |ld| ld[:listener][:load_balancer_port] == ss_port }
498
492
 
499
- unless policy_names.include?(policy_name)
493
+ next if listener.nil?
494
+ policy_names = listener[:policy_names]
495
+
496
+ next if policy_names.include?(policy_name)
500
497
  policy_names << policy_name
501
498
 
502
499
  elb.set_load_balancer_policies_of_listener(
@@ -506,157 +503,150 @@ module AWSDriver
506
503
  )
507
504
  end
508
505
  end
509
- end
510
- end
511
506
 
512
- # Update instance list, but only if there are machines specified
513
- if machine_specs
514
- instances_to_add = []
515
- if actual_elb.instances
516
- assigned_instance_ids = actual_elb.instances.map { |i| i.instance_id }
517
- instances_to_add = machine_specs.select { |s| !assigned_instance_ids.include?(s.reference['instance_id']) }
518
- instance_ids_to_remove = assigned_instance_ids - machine_specs.map { |s| s.reference['instance_id'] }
519
- end
520
-
521
- if instances_to_add.size > 0
522
- perform_action.call(" add machines #{instances_to_add.map { |s| s.name }.join(', ')}") do
523
- instance_ids_to_add = instances_to_add.map { |s| s.reference['instance_id'] }
524
- Chef::Log.debug("Adding instances #{instance_ids_to_add.join(', ')} to load balancer #{actual_elb.load_balancer_name} in region #{region}")
525
- instances_to_add.each do |instance|
526
- elb.register_instances_with_load_balancer({ instances: [ { instance_id: instance.reference['instance_id'] }], load_balancer_name: actual_elb.load_balancer_name})
507
+ # Update instance list, but only if there are machines specified
508
+ if machine_specs
509
+ instances_to_add = []
510
+ if actual_elb.instances
511
+ assigned_instance_ids = actual_elb.instances.map(&:instance_id)
512
+ instances_to_add = machine_specs.reject { |s| assigned_instance_ids.include?(s.reference["instance_id"]) }
513
+ instance_ids_to_remove = assigned_instance_ids - machine_specs.map { |s| s.reference["instance_id"] }
514
+ end
515
+
516
+ unless instances_to_add.empty?
517
+ perform_action.call(" add machines #{instances_to_add.map(&:name).join(', ')}") do
518
+ instance_ids_to_add = instances_to_add.map { |s| s.reference["instance_id"] }
519
+ Chef::Log.debug("Adding instances #{instance_ids_to_add.join(', ')} to load balancer #{actual_elb.load_balancer_name} in region #{region}")
520
+ instances_to_add.each do |instance|
521
+ elb.register_instances_with_load_balancer(instances: [{ instance_id: instance.reference["instance_id"] }], load_balancer_name: actual_elb.load_balancer_name)
522
+ end
523
+ end
524
+ end
525
+
526
+ unless instance_ids_to_remove.empty?
527
+ perform_action.call(" remove instances #{instance_ids_to_remove}") do
528
+ instances_to_remove = Hash[instance_ids_to_remove.map { |id| [:instance_id, id] }]
529
+ elb.deregister_instances_from_load_balancer(instances: [instances_to_remove], load_balancer_name: actual_elb.load_balancer_name)
530
+ end
527
531
  end
528
532
  end
529
- end
530
533
 
531
- if instance_ids_to_remove.size > 0
532
- perform_action.call(" remove instances #{instance_ids_to_remove}") do
533
- instances_to_remove = Hash[instance_ids_to_remove.map {|id| [:instance_id, id]}]
534
- elb.deregister_instances_from_load_balancer({ instances: [instances_to_remove], load_balancer_name: actual_elb.load_balancer_name})
534
+ # We have successfully switched all our instances to the (possibly) new LB
535
+ # so it is safe to delete the old one.
536
+ old_elb.delete unless old_elb.nil?
537
+ ensure
538
+ # Something went wrong before we could moved instances from the old ELB to the new one
539
+ # Don't delete the old ELB, but warn users there could now be 2 ELBs with the same name
540
+ unless old_elb.nil?
541
+ Chef::Log.warn("It is possible there are now 2 ELB instances - #{old_elb.load_balancer_name} and #{actual_elb.load_balancer_name}. " \
542
+ "Determine which is correct and manually clean up the other.")
535
543
  end
536
544
  end
537
- end
538
545
 
539
- # We have successfully switched all our instances to the (possibly) new LB
540
- # so it is safe to delete the old one.
541
- unless old_elb.nil?
542
- old_elb.delete
543
- end
544
- ensure
545
- # Something went wrong before we could moved instances from the old ELB to the new one
546
- # Don't delete the old ELB, but warn users there could now be 2 ELBs with the same name
547
- unless old_elb.nil?
548
- Chef::Log.warn("It is possible there are now 2 ELB instances - #{old_elb.load_balancer_name} and #{actual_elb.load_balancer_name}. " +
549
- "Determine which is correct and manually clean up the other.")
550
- end
551
- end
546
+ # Compare two server certificates by casting them both to strings.
547
+ #
548
+ # The parameters should either be a String containing the
549
+ # certificate ARN, or a IAM::ServerCertificate object.
550
+ def server_certificate_eql?(cert1, cert2)
551
+ server_cert_to_string(cert1) == server_cert_to_string(cert2)
552
+ end
552
553
 
553
- # Compare two server certificates by casting them both to strings.
554
- #
555
- # The parameters should either be a String containing the
556
- # certificate ARN, or a IAM::ServerCertificate object.
557
- def server_certificate_eql?(cert1, cert2)
558
- server_cert_to_string(cert1) == server_cert_to_string(cert2)
559
- end
554
+ def server_cert_to_string(cert)
555
+ if cert.is_a?(Hash) && cert.key?(:arn)
556
+ cert[:arn]
557
+ else
558
+ cert
559
+ end
560
+ end
560
561
 
561
- def server_cert_to_string(cert)
562
- if cert.is_a?(Hash) && cert.has_key?(:arn)
563
- cert[:arn]
564
- else
565
- cert
566
- end
567
- end
562
+ # Retreive the server certificate from a listener spec, prefering
563
+ # the server_certificate key.
564
+ def server_cert_from_spec(spec)
565
+ if spec[:server_certificate]
566
+ spec[:server_certificate]
567
+ elsif spec[:ssl_certificate_id]
568
+ spec[:ssl_certificate_id]
569
+ end
570
+ end
568
571
 
569
- # Retreive the server certificate from a listener spec, prefering
570
- # the server_certificate key.
571
- def server_cert_from_spec(spec)
572
- if spec[:server_certificate]
573
- spec[:server_certificate]
574
- elsif spec[:ssl_certificate_id]
575
- spec[:ssl_certificate_id]
576
- else
577
- nil
578
- end
579
- end
572
+ def ready_load_balancer(action_handler, lb_spec, lb_options, machine_spec); end
580
573
 
581
- def ready_load_balancer(action_handler, lb_spec, lb_options, machine_spec)
582
- end
574
+ def destroy_load_balancer(action_handler, lb_spec, lb_options)
575
+ lb_options = deep_symbolize_keys(lb_options)
576
+ return if lb_spec.nil?
583
577
 
584
- def destroy_load_balancer(action_handler, lb_spec, lb_options)
585
- lb_options = deep_symbolize_keys(lb_options)
586
- return if lb_spec == nil
578
+ actual_elb = load_balancer_for(lb_spec)
579
+ if actual_elb
580
+ # Remove ELB from AWS
581
+ action_handler.perform_action "Deleting EC2 ELB #{lb_spec.id}" do
582
+ elb.delete_load_balancer(load_balancer_name: actual_elb.load_balancer_name)
583
+ end
584
+ end
587
585
 
588
- actual_elb = load_balancer_for(lb_spec)
589
- if actual_elb
590
- # Remove ELB from AWS
591
- action_handler.perform_action "Deleting EC2 ELB #{lb_spec.id}" do
592
- elb.delete_load_balancer({load_balancer_name: actual_elb.load_balancer_name })
586
+ # Remove LB spec from databag
587
+ lb_spec.delete(action_handler)
593
588
  end
594
- end
595
-
596
- # Remove LB spec from databag
597
- lb_spec.delete(action_handler)
598
- end
599
589
 
600
- # Image methods
601
- def allocate_image(action_handler, image_spec, image_options, machine_spec, machine_options)
602
- actual_image = image_for(image_spec)
603
- image_options = deep_symbolize_keys(image_options)
604
- machine_options = deep_symbolize_keys(machine_options)
605
- aws_tags = image_options.delete(:aws_tags) || {}
606
- if actual_image.nil? || !actual_image.exists? || actual_image.state.to_sym == :failed
607
- action_handler.perform_action "Create image #{image_spec.name} from machine #{machine_spec.name} with options #{image_options.inspect}" do
608
- image_options[:name] ||= image_spec.name
609
- image_options[:instance_id] ||= machine_spec.reference['instance_id']
610
- image_options[:description] ||= "Image #{image_spec.name} created from machine #{machine_spec.name}"
611
- Chef::Log.debug "AWS Image options: #{image_options.inspect}"
612
- image_type = ec2_client.create_image(image_options.to_hash)
613
- actual_image = ec2_resource.image(image_type.image_id)
614
- image_spec.reference = {
615
- 'driver_version' => Chef::Provisioning::AWSDriver::VERSION,
616
- 'image_id' => actual_image.image_id,
617
- 'allocated_at' => Time.now.to_i,
618
- 'from-instance' => image_options[:instance_id]
619
- }
620
- image_spec.driver_url = driver_url
590
+ # Image methods
591
+ def allocate_image(action_handler, image_spec, image_options, machine_spec, machine_options)
592
+ actual_image = image_for(image_spec)
593
+ image_options = deep_symbolize_keys(image_options)
594
+ machine_options = deep_symbolize_keys(machine_options)
595
+ aws_tags = image_options.delete(:aws_tags) || {}
596
+ if actual_image.nil? || !actual_image.exists? || actual_image.state.to_sym == :failed
597
+ action_handler.perform_action "Create image #{image_spec.name} from machine #{machine_spec.name} with options #{image_options.inspect}" do
598
+ image_options[:name] ||= image_spec.name
599
+ image_options[:instance_id] ||= machine_spec.reference["instance_id"]
600
+ image_options[:description] ||= "Image #{image_spec.name} created from machine #{machine_spec.name}"
601
+ Chef::Log.debug "AWS Image options: #{image_options.inspect}"
602
+ image_type = ec2_client.create_image(image_options.to_hash)
603
+ actual_image = ec2_resource.image(image_type.image_id)
604
+ image_spec.reference = {
605
+ "driver_version" => Chef::Provisioning::AWSDriver::VERSION,
606
+ "image_id" => actual_image.image_id,
607
+ "allocated_at" => Time.now.to_i,
608
+ "from-instance" => image_options[:instance_id]
609
+ }
610
+ image_spec.driver_url = driver_url
611
+ end
612
+ end
613
+ aws_tags["from-instance"] = image_options[:instance_id] if image_options[:instance_id]
614
+ converge_ec2_tags(actual_image, aws_tags, action_handler)
621
615
  end
622
- end
623
- aws_tags['from-instance'] = image_options[:instance_id] if image_options[:instance_id]
624
- converge_ec2_tags(actual_image, aws_tags, action_handler)
625
- end
626
616
 
627
- def ready_image(action_handler, image_spec, image_options)
628
- actual_image = image_for(image_spec)
629
- if actual_image.nil? || !actual_image.exists?
630
- raise 'Cannot ready an image that does not exist'
631
- else
632
- image_options = deep_symbolize_keys(image_options)
633
- aws_tags = image_options.delete(:aws_tags) || {}
634
- aws_tags['from-instance'] = image_spec.reference['from-instance'] if image_spec.reference['from-instance']
635
- converge_ec2_tags(actual_image, aws_tags, action_handler)
636
- if actual_image.state.to_sym != :available
637
- action_handler.report_progress 'Waiting for image to be ready ...'
638
- wait_until_ready_image(action_handler, image_spec, actual_image)
617
+ def ready_image(action_handler, image_spec, image_options)
618
+ actual_image = image_for(image_spec)
619
+ if actual_image.nil? || !actual_image.exists?
620
+ raise "Cannot ready an image that does not exist"
621
+ else
622
+ image_options = deep_symbolize_keys(image_options)
623
+ aws_tags = image_options.delete(:aws_tags) || {}
624
+ aws_tags["from-instance"] = image_spec.reference["from-instance"] if image_spec.reference["from-instance"]
625
+ converge_ec2_tags(actual_image, aws_tags, action_handler)
626
+ if actual_image.state.to_sym != :available
627
+ action_handler.report_progress "Waiting for image to be ready ..."
628
+ wait_until_ready_image(action_handler, image_spec, actual_image)
629
+ end
630
+ end
639
631
  end
640
- end
641
- end
642
632
 
643
- def destroy_image(action_handler, image_spec, image_options)
644
- image_options = deep_symbolize_keys(image_options)
645
- # TODO the driver should automatically be set by `inline_resource`
646
- d = self
647
- Provisioning.inline_resource(action_handler) do
648
- aws_image image_spec.name do
649
- action :destroy
650
- driver d
651
- chef_server image_spec.managed_entry_store.chef_server
652
- managed_entry_store image_spec.managed_entry_store
633
+ def destroy_image(action_handler, image_spec, image_options)
634
+ image_options = deep_symbolize_keys(image_options)
635
+ # TODO: the driver should automatically be set by `inline_resource`
636
+ d = self
637
+ Provisioning.inline_resource(action_handler) do
638
+ aws_image image_spec.name do
639
+ action :destroy
640
+ driver d
641
+ chef_server image_spec.managed_entry_store.chef_server
642
+ managed_entry_store image_spec.managed_entry_store
643
+ end
644
+ end
653
645
  end
654
- end
655
- end
656
646
 
657
- def user_data
658
- # TODO: Make this use HTTPS at some point.
659
- <<EOD
647
+ def user_data
648
+ # TODO: Make this use HTTPS at some point.
649
+ <<EOD
660
650
  <powershell>
661
651
  winrm quickconfig -q
662
652
  winrm set winrm/config/winrs '@{MaxMemoryPerShellMB="300"}'
@@ -672,9 +662,9 @@ sc config winrm start=auto
672
662
  net start winrm
673
663
  </powershell>
674
664
  EOD
675
- end
665
+ end
676
666
 
677
- def https_user_data
667
+ def https_user_data
678
668
  <<EOD
679
669
  <powershell>
680
670
  winrm quickconfig -q
@@ -710,116 +700,116 @@ sc config winrm start=auto
710
700
  net start winrm
711
701
  </powershell>
712
702
  EOD
713
- end
703
+ end
714
704
 
715
- # Machine methods
716
- def allocate_machine(action_handler, machine_spec, machine_options)
717
- machine_options = deep_symbolize_keys(machine_options)
718
- instance = instance_for(machine_spec)
719
- bootstrap_options = bootstrap_options_for(action_handler, machine_spec, machine_options)
705
+ # Machine methods
706
+ def allocate_machine(action_handler, machine_spec, machine_options)
707
+ machine_options = deep_symbolize_keys(machine_options)
708
+ instance = instance_for(machine_spec)
709
+ bootstrap_options = bootstrap_options_for(action_handler, machine_spec, machine_options)
720
710
 
721
- if instance == nil || !instance.exists? || instance.state.name == "terminated"
722
- action_handler.perform_action "Create #{machine_spec.name} with AMI #{bootstrap_options[:image_id]} in #{region}" do
723
- Chef::Log.debug "Creating instance with bootstrap options #{bootstrap_options}"
724
- instance = create_instance_and_reference(bootstrap_options, action_handler, machine_spec, machine_options)
711
+ if instance.nil? || !instance.exists? || instance.state.name == "terminated"
712
+ action_handler.perform_action "Create #{machine_spec.name} with AMI #{bootstrap_options[:image_id]} in #{region}" do
713
+ Chef::Log.debug "Creating instance with bootstrap options #{bootstrap_options}"
714
+ instance = create_instance_and_reference(bootstrap_options, action_handler, machine_spec, machine_options)
715
+ end
716
+ end
717
+ converge_ec2_tags(instance, machine_options[:aws_tags], action_handler)
725
718
  end
726
- end
727
- converge_ec2_tags(instance, machine_options[:aws_tags], action_handler)
728
- end
729
719
 
730
- def allocate_machines(action_handler, specs_and_options, parallelizer)
731
- create_servers(action_handler, specs_and_options, parallelizer) do |machine_spec, server|
732
- yield machine_spec
733
- end
734
- specs_and_options.keys
735
- end
720
+ def allocate_machines(action_handler, specs_and_options, parallelizer)
721
+ create_servers(action_handler, specs_and_options, parallelizer) do |machine_spec, _server|
722
+ yield machine_spec
723
+ end
724
+ specs_and_options.keys
725
+ end
736
726
 
737
- def ready_machine(action_handler, machine_spec, machine_options)
738
- machine_options = deep_symbolize_keys(machine_options)
739
- instance = instance_for(machine_spec)
740
- converge_ec2_tags(instance, machine_options[:aws_tags], action_handler)
727
+ def ready_machine(action_handler, machine_spec, machine_options)
728
+ machine_options = deep_symbolize_keys(machine_options)
729
+ instance = instance_for(machine_spec)
730
+ converge_ec2_tags(instance, machine_options[:aws_tags], action_handler)
741
731
 
742
- if instance.nil?
743
- raise "Machine #{machine_spec.name} does not have an instance associated with it, or instance does not exist."
744
- end
732
+ if instance.nil?
733
+ raise "Machine #{machine_spec.name} does not have an instance associated with it, or instance does not exist."
734
+ end
745
735
 
746
- if instance.state.name != "running"
747
- wait_until_machine(action_handler, machine_spec, "finish stopping", instance) { |instance| instance.state.name != "stopping" }
748
- if instance.state.name == "stopped"
749
- action_handler.perform_action "Start #{machine_spec.name} (#{machine_spec.reference['instance_id']}) in #{region} ..." do
750
- instance.start
736
+ if instance.state.name != "running"
737
+ wait_until_machine(action_handler, machine_spec, "finish stopping", instance) { |instance| instance.state.name != "stopping" }
738
+ if instance.state.name == "stopped"
739
+ action_handler.perform_action "Start #{machine_spec.name} (#{machine_spec.reference['instance_id']}) in #{region} ..." do
740
+ instance.start
741
+ end
742
+ end
743
+ wait_until_instance_running(action_handler, machine_spec, instance)
751
744
  end
752
- end
753
- wait_until_instance_running(action_handler, machine_spec, instance)
754
- end
755
745
 
756
- # Windows machines potentially do a bunch of extra stuff - setting hostname,
757
- # sending out encrypted password, restarting instance, etc.
758
- if machine_spec.reference['is_windows']
759
- wait_until_machine(action_handler, machine_spec, "receive 'Windows is ready' message from the AWS console", instance) { |instance|
760
- instance.console_output.output
761
- # seems to be a bug as we need to run this twice
762
- # to consistently ensure the output is fully pulled
763
- encoded_output = instance.console_output.output
764
- if encoded_output.nil? || encoded_output.empty?
765
- false
766
- else
767
- output = Base64.decode64(encoded_output)
768
- output =~ /Message: Windows is Ready to use/
746
+ # Windows machines potentially do a bunch of extra stuff - setting hostname,
747
+ # sending out encrypted password, restarting instance, etc.
748
+ if machine_spec.reference["is_windows"]
749
+ wait_until_machine(action_handler, machine_spec, "receive 'Windows is ready' message from the AWS console", instance) do |instance|
750
+ instance.console_output.output
751
+ # seems to be a bug as we need to run this twice
752
+ # to consistently ensure the output is fully pulled
753
+ encoded_output = instance.console_output.output
754
+ if encoded_output.nil? || encoded_output.empty?
755
+ false
756
+ else
757
+ output = Base64.decode64(encoded_output)
758
+ output =~ /Message: Windows is Ready to use/
759
+ end
760
+ end
769
761
  end
770
- }
771
- end
772
- wait_for_transport(action_handler, machine_spec, machine_options, instance)
773
- machine_for(machine_spec, machine_options, instance)
774
- end
762
+ wait_for_transport(action_handler, machine_spec, machine_options, instance)
763
+ machine_for(machine_spec, machine_options, instance)
764
+ end
775
765
 
776
- def connect_to_machine(name, chef_server = nil)
777
- if name.is_a?(MachineSpec)
778
- machine_spec = name
779
- else
780
- machine_spec = Chef::Provisioning::ChefMachineSpec.get(name, chef_server)
781
- end
766
+ def connect_to_machine(name, chef_server = nil)
767
+ machine_spec = if name.is_a?(MachineSpec)
768
+ name
769
+ else
770
+ Chef::Provisioning::ChefMachineSpec.get(name, chef_server)
771
+ end
782
772
 
783
- machine_for(machine_spec, machine_spec.reference)
784
- end
773
+ machine_for(machine_spec, machine_spec.reference)
774
+ end
785
775
 
786
- def stop_machine(action_handler, machine_spec, machine_options)
787
- machine_options = deep_symbolize_keys(machine_options)
788
- instance = instance_for(machine_spec)
789
- if instance && instance.exists?
790
- wait_until_machine(action_handler, machine_spec, "finish coming up so we can stop it", instance) { |instance| instance.state.name != "pending" }
791
- if instance.state.name == "running"
792
- action_handler.perform_action "Stop #{machine_spec.name} (#{instance.id}) in #{region} ..." do
793
- instance.stop
776
+ def stop_machine(action_handler, machine_spec, machine_options)
777
+ machine_options = deep_symbolize_keys(machine_options)
778
+ instance = instance_for(machine_spec)
779
+ if instance && instance.exists?
780
+ wait_until_machine(action_handler, machine_spec, "finish coming up so we can stop it", instance) { |instance| instance.state.name != "pending" }
781
+ if instance.state.name == "running"
782
+ action_handler.perform_action "Stop #{machine_spec.name} (#{instance.id}) in #{region} ..." do
783
+ instance.stop
784
+ end
785
+ end
786
+ wait_until_machine(action_handler, machine_spec, "stop", instance) { |instance| %w{stopped terminated}.include?(instance.state.name) }
794
787
  end
795
788
  end
796
- wait_until_machine(action_handler, machine_spec, "stop", instance) { |instance| %w[stopped terminated].include?(instance.state.name) }
797
- end
798
- end
799
789
 
800
- def destroy_machine(action_handler, machine_spec, machine_options)
801
- machine_options = deep_symbolize_keys(machine_options)
802
- d = self
803
- Provisioning.inline_resource(action_handler) do
804
- aws_instance machine_spec.name do
805
- action :destroy
806
- driver d
807
- chef_server machine_spec.managed_entry_store.chef_server
808
- managed_entry_store machine_spec.managed_entry_store
809
- end
810
- end
790
+ def destroy_machine(action_handler, machine_spec, machine_options)
791
+ machine_options = deep_symbolize_keys(machine_options)
792
+ d = self
793
+ Provisioning.inline_resource(action_handler) do
794
+ aws_instance machine_spec.name do
795
+ action :destroy
796
+ driver d
797
+ chef_server machine_spec.managed_entry_store.chef_server
798
+ managed_entry_store machine_spec.managed_entry_store
799
+ end
800
+ end
811
801
 
812
- # TODO move this into the aws_instance provider somehow
813
- strategy = convergence_strategy_for(machine_spec, machine_options)
814
- strategy.cleanup_convergence(action_handler, machine_spec)
815
- end
802
+ # TODO: move this into the aws_instance provider somehow
803
+ strategy = convergence_strategy_for(machine_spec, machine_options)
804
+ strategy.cleanup_convergence(action_handler, machine_spec)
805
+ end
816
806
 
817
- def ec2
818
- @ec2 ||= ::Aws::EC2::Client.new(aws_config)
819
- end
807
+ def ec2
808
+ @ec2 ||= ::Aws::EC2::Client.new(aws_config)
809
+ end
820
810
 
821
- AWS_V2_SERVICES.each do |load_name, short_name|
822
- class_eval <<-META
811
+ AWS_V2_SERVICES.each do |load_name, short_name|
812
+ class_eval <<-META
823
813
 
824
814
  def #{short_name}_client
825
815
  @#{short_name}_client ||= ::Aws::#{load_name}::Client.new(**aws_config_2)
@@ -829,829 +819,826 @@ EOD
829
819
  @#{short_name}_resource ||= ::Aws::#{load_name}::Resource.new(**(aws_config_2.merge({client: #{short_name}_client})))
830
820
  end
831
821
 
832
- META
833
- end
834
-
835
- def elb
836
- @elb ||= ::Aws::ElasticLoadBalancing::Client.new(aws_config)
837
- end
838
-
839
- def elasticache
840
- @elasticache ||= ::Aws::ElastiCache::Client.new(aws_config)
841
- end
842
-
843
- def iam
844
- @iam ||= ::Aws::IAM::Client.new(aws_config)
845
- end
822
+ META
823
+ end
846
824
 
847
- def rds
848
- @rds ||= ::Aws::RDS::Client.new(aws_config)
849
- end
825
+ def elb
826
+ @elb ||= ::Aws::ElasticLoadBalancing::Client.new(aws_config)
827
+ end
850
828
 
851
- def s3_client
852
- @s3 ||= ::Aws::S3::Client.new( aws_config)
853
- end
829
+ def elasticache
830
+ @elasticache ||= ::Aws::ElastiCache::Client.new(aws_config)
831
+ end
854
832
 
855
- def sns
856
- @sns ||= ::Aws::SNS::Client.new(aws_config)
857
- end
833
+ def iam
834
+ @iam ||= ::Aws::IAM::Client.new(aws_config)
835
+ end
858
836
 
859
- def sqs
860
- @sqs ||= ::Aws::SQS::Client.new(aws_config)
861
- end
837
+ def rds
838
+ @rds ||= ::Aws::RDS::Client.new(aws_config)
839
+ end
862
840
 
863
- def auto_scaling
864
- @auto_scaling ||= ::Aws::AutoScaling.new(config: aws_config)
865
- end
841
+ def s3_client
842
+ @s3 ||= ::Aws::S3::Client.new(aws_config)
843
+ end
866
844
 
867
- def build_arn(partition: 'aws', service: nil, region: aws_config[:region], account_id: self.account_id, resource: nil)
868
- "arn:#{partition}:#{service}:#{region}:#{account_id}:#{resource}"
869
- end
845
+ def sns
846
+ @sns ||= ::Aws::SNS::Client.new(aws_config)
847
+ end
870
848
 
871
- def parse_arn(arn)
872
- parts = arn.split(':', 6)
873
- {
874
- partition: parts[1],
875
- service: parts[2],
876
- region: parts[3],
877
- account_id: parts[4],
878
- resource: parts[5]
879
- }
880
- end
849
+ def sqs
850
+ @sqs ||= ::Aws::SQS::Client.new(aws_config)
851
+ end
881
852
 
882
- def account_id
883
- begin
884
- # We've got an AWS account root credential or an IAM admin with access rights
885
- current_user = iam.get_user
886
- arn = current_user[:user][:arn]
887
- rescue ::Aws::IAM::Errors::AccessDenied => e
888
- # If we don't have access, the error message still tells us our account ID and user ...
889
- # https://forums.aws.amazon.com/thread.jspa?messageID=394344
890
- if e.to_s !~ /\b(arn:aws:iam::[0-9]{12}:\S*)/
891
- raise "IAM error response for GetUser did not include user ARN. Can't retrieve account ID."
892
- end
893
- arn = $1
894
- end
895
- parse_arn(arn)[:account_id]
896
- end
853
+ def auto_scaling
854
+ @auto_scaling ||= ::Aws::AutoScaling.new(config: aws_config)
855
+ end
897
856
 
898
- # For creating things like AWS keypairs exclusively
899
- @@chef_default_lock = Mutex.new
857
+ def build_arn(partition: "aws", service: nil, region: aws_config[:region], account_id: self.account_id, resource: nil)
858
+ "arn:#{partition}:#{service}:#{region}:#{account_id}:#{resource}"
859
+ end
900
860
 
901
- def machine_for(machine_spec, machine_options, instance = nil)
902
- instance ||= instance_for(machine_spec)
861
+ def parse_arn(arn)
862
+ parts = arn.split(":", 6)
863
+ {
864
+ partition: parts[1],
865
+ service: parts[2],
866
+ region: parts[3],
867
+ account_id: parts[4],
868
+ resource: parts[5]
869
+ }
870
+ end
903
871
 
904
- if !instance
905
- raise "Instance for node #{machine_spec.name} has not been created!"
906
- end
872
+ def account_id
873
+ begin
874
+ # We've got an AWS account root credential or an IAM admin with access rights
875
+ current_user = iam.get_user
876
+ arn = current_user[:user][:arn]
877
+ rescue ::Aws::IAM::Errors::AccessDenied => e
878
+ # If we don't have access, the error message still tells us our account ID and user ...
879
+ # https://forums.aws.amazon.com/thread.jspa?messageID=394344
880
+ if e.to_s !~ /\b(arn:aws:iam::[0-9]{12}:\S*)/
881
+ raise "IAM error response for GetUser did not include user ARN. Can't retrieve account ID."
882
+ end
883
+ arn = Regexp.last_match(1)
884
+ end
885
+ parse_arn(arn)[:account_id]
886
+ end
907
887
 
908
- if machine_spec.reference['is_windows']
909
- Chef::Provisioning::Machine::WindowsMachine.new(machine_spec, transport_for(machine_spec, machine_options, instance), convergence_strategy_for(machine_spec, machine_options))
910
- else
911
- Chef::Provisioning::Machine::UnixMachine.new(machine_spec, transport_for(machine_spec, machine_options, instance), convergence_strategy_for(machine_spec, machine_options))
912
- end
913
- end
888
+ # For creating things like AWS keypairs exclusively
889
+ @@chef_default_lock = Mutex.new
914
890
 
915
- def bootstrap_options_for(action_handler, machine_spec, machine_options)
916
- bootstrap_options = deep_symbolize_keys(machine_options[:bootstrap_options])
917
- if bootstrap_options==nil
918
- bootstrap_options=Hash({})
919
- end
920
- # These are hardcoded for now - only 1 machine at a time
921
- bootstrap_options[:min_count] = bootstrap_options[:max_count] = 1
922
- bootstrap_options[:instance_type] ||= default_instance_type
923
- image_id = machine_options[:from_image] || bootstrap_options[:image_id] || machine_options[:image_id] || default_ami_for_region(region)
924
- bootstrap_options[:image_id] = image_id
925
- bootstrap_options.delete(:key_path)
926
- if !bootstrap_options[:key_name]
927
- Chef::Log.debug('No key specified, generating a default one...')
928
- bootstrap_options[:key_name] = default_aws_keypair(action_handler, machine_spec)
929
- end
930
- if bootstrap_options[:user_data]
931
- bootstrap_options[:user_data] = Base64.encode64(bootstrap_options[:user_data])
932
- end
891
+ def machine_for(machine_spec, machine_options, instance = nil)
892
+ instance ||= instance_for(machine_spec)
933
893
 
934
- # V1 -> V2 backwards compatability support
935
- unless bootstrap_options.fetch(:monitoring_enabled, nil).nil?
936
- bootstrap_options[:monitoring] = {enabled: bootstrap_options.delete(:monitoring_enabled)}
937
- end
938
- placement = {}
939
- if bootstrap_options[:availability_zone]
940
- placement[:availability_zone] = bootstrap_options.delete(:availability_zone)
941
- end
942
- if bootstrap_options[:placement_group]
943
- placement[:group_name] = bootstrap_options.delete(:placement_group)
944
- end
945
- unless bootstrap_options.fetch(:dedicated_tenancy, nil).nil?
946
- placement[:tenancy] = bootstrap_options.delete(:dedicated_tenancy) ? "dedicated" : "default"
947
- end
948
- unless placement.empty?
949
- bootstrap_options[:placement] = placement
950
- end
951
- if bootstrap_options[:subnet]
952
- bootstrap_options[:subnet_id] = bootstrap_options.delete(:subnet)
953
- end
954
- if bootstrap_options[:iam_instance_profile] && bootstrap_options[:iam_instance_profile].is_a?(String)
955
- bootstrap_options[:iam_instance_profile] = {name: bootstrap_options[:iam_instance_profile]}
956
- end
894
+ unless instance
895
+ raise "Instance for node #{machine_spec.name} has not been created!"
896
+ end
957
897
 
958
- if machine_options[:is_windows]
959
- Chef::Log.debug "Setting Default windows userdata based on WinRM transport"
960
- if bootstrap_options[:user_data].nil?
961
- case machine_options[:winrm_transport]
962
- when 'https'
963
- data = https_user_data
898
+ if machine_spec.reference["is_windows"]
899
+ Chef::Provisioning::Machine::WindowsMachine.new(machine_spec, transport_for(machine_spec, machine_options, instance), convergence_strategy_for(machine_spec, machine_options))
964
900
  else
965
- data = user_data
901
+ Chef::Provisioning::Machine::UnixMachine.new(machine_spec, transport_for(machine_spec, machine_options, instance), convergence_strategy_for(machine_spec, machine_options))
966
902
  end
967
- bootstrap_options[:user_data] = Base64.encode64(data)
968
903
  end
969
- else
970
- Chef::Log.debug "Non-windows, not setting Default userdata"
971
- end
972
-
973
- bootstrap_options = AWSResource.lookup_options(bootstrap_options, managed_entry_store: machine_spec.managed_entry_store, driver: self)
974
904
 
975
- # In the migration from V1 to V2 we still support associate_public_ip_address at the top level
976
- # we do this after the lookup because we have to copy any present subnets, etc. into the
977
- # network interfaces block
978
- unless bootstrap_options.fetch(:associate_public_ip_address, nil).nil?
979
- if bootstrap_options[:network_interfaces]
980
- raise "If you specify network_interfaces you must specify associate_public_ip_address in that list"
981
- end
982
- network_interface = {
983
- :device_index => 0,
984
- :associate_public_ip_address => bootstrap_options.delete(:associate_public_ip_address),
985
- :delete_on_termination => true
986
- }
987
- if bootstrap_options[:subnet_id]
988
- network_interface[:subnet_id] = bootstrap_options.delete(:subnet_id)
989
- end
990
- if bootstrap_options[:private_ip_address]
991
- network_interface[:private_ip_address] = bootstrap_options.delete(:private_ip_address)
992
- end
993
- if bootstrap_options[:security_group_ids]
994
- network_interface[:groups] = bootstrap_options.delete(:security_group_ids)
995
- end
996
- bootstrap_options[:network_interfaces] = [network_interface]
997
- end
905
+ def bootstrap_options_for(action_handler, machine_spec, machine_options)
906
+ bootstrap_options = deep_symbolize_keys(machine_options[:bootstrap_options])
907
+ bootstrap_options = Hash({}) if bootstrap_options.nil?
908
+ # These are hardcoded for now - only 1 machine at a time
909
+ bootstrap_options[:min_count] = bootstrap_options[:max_count] = 1
910
+ bootstrap_options[:instance_type] ||= default_instance_type
911
+ image_id = machine_options[:from_image] || bootstrap_options[:image_id] || machine_options[:image_id] || default_ami_for_region(region)
912
+ bootstrap_options[:image_id] = image_id
913
+ bootstrap_options.delete(:key_path)
914
+ unless bootstrap_options[:key_name]
915
+ Chef::Log.debug("No key specified, generating a default one...")
916
+ bootstrap_options[:key_name] = default_aws_keypair(action_handler, machine_spec)
917
+ end
918
+ if bootstrap_options[:user_data]
919
+ bootstrap_options[:user_data] = Base64.encode64(bootstrap_options[:user_data])
920
+ end
998
921
 
999
- Chef::Log.debug "AWS Bootstrap options: #{bootstrap_options.inspect}"
1000
- deep_symbolize_keys(bootstrap_options)
1001
- end
922
+ # V1 -> V2 backwards compatability support
923
+ unless bootstrap_options.fetch(:monitoring_enabled, nil).nil?
924
+ bootstrap_options[:monitoring] = { enabled: bootstrap_options.delete(:monitoring_enabled) }
925
+ end
926
+ placement = {}
927
+ if bootstrap_options[:availability_zone]
928
+ placement[:availability_zone] = bootstrap_options.delete(:availability_zone)
929
+ end
930
+ if bootstrap_options[:placement_group]
931
+ placement[:group_name] = bootstrap_options.delete(:placement_group)
932
+ end
933
+ unless bootstrap_options.fetch(:dedicated_tenancy, nil).nil?
934
+ placement[:tenancy] = bootstrap_options.delete(:dedicated_tenancy) ? "dedicated" : "default"
935
+ end
936
+ bootstrap_options[:placement] = placement unless placement.empty?
937
+ if bootstrap_options[:subnet]
938
+ bootstrap_options[:subnet_id] = bootstrap_options.delete(:subnet)
939
+ end
940
+ if bootstrap_options[:iam_instance_profile] && bootstrap_options[:iam_instance_profile].is_a?(String)
941
+ bootstrap_options[:iam_instance_profile] = { name: bootstrap_options[:iam_instance_profile] }
942
+ end
1002
943
 
1003
- def default_ssh_username
1004
- 'ubuntu'
1005
- end
944
+ if machine_options[:is_windows]
945
+ Chef::Log.debug "Setting Default windows userdata based on WinRM transport"
946
+ if bootstrap_options[:user_data].nil?
947
+ data = case machine_options[:winrm_transport]
948
+ when "https"
949
+ https_user_data
950
+ else
951
+ user_data
952
+ end
953
+ bootstrap_options[:user_data] = Base64.encode64(data)
954
+ end
955
+ else
956
+ Chef::Log.debug "Non-windows, not setting Default userdata"
957
+ end
1006
958
 
1007
- def default_winrm_username
1008
- 'Administrator'
1009
- end
959
+ bootstrap_options = AWSResource.lookup_options(bootstrap_options, managed_entry_store: machine_spec.managed_entry_store, driver: self)
1010
960
 
1011
- def default_winrm_transport
1012
- 'http'
1013
- end
961
+ # In the migration from V1 to V2 we still support associate_public_ip_address at the top level
962
+ # we do this after the lookup because we have to copy any present subnets, etc. into the
963
+ # network interfaces block
964
+ unless bootstrap_options.fetch(:associate_public_ip_address, nil).nil?
965
+ if bootstrap_options[:network_interfaces]
966
+ raise "If you specify network_interfaces you must specify associate_public_ip_address in that list"
967
+ end
968
+ network_interface = {
969
+ device_index: 0,
970
+ associate_public_ip_address: bootstrap_options.delete(:associate_public_ip_address),
971
+ delete_on_termination: true
972
+ }
973
+ if bootstrap_options[:subnet_id]
974
+ network_interface[:subnet_id] = bootstrap_options.delete(:subnet_id)
975
+ end
976
+ if bootstrap_options[:private_ip_address]
977
+ network_interface[:private_ip_address] = bootstrap_options.delete(:private_ip_address)
978
+ end
979
+ if bootstrap_options[:security_group_ids]
980
+ network_interface[:groups] = bootstrap_options.delete(:security_group_ids)
981
+ end
982
+ bootstrap_options[:network_interfaces] = [network_interface]
983
+ end
1014
984
 
1015
- def keypair_for(bootstrap_options)
1016
- if bootstrap_options[:key_name]
1017
- keypair_name = bootstrap_options[:key_name]
1018
- actual_key_pair = ec2_resource.key_pair(keypair_name)
1019
- if !actual_key_pair.exists?
1020
- ec2_resource.key_pairs.create(keypair_name)
985
+ Chef::Log.debug "AWS Bootstrap options: #{bootstrap_options.inspect}"
986
+ deep_symbolize_keys(bootstrap_options)
1021
987
  end
1022
- actual_key_pair
1023
- end
1024
- end
1025
988
 
1026
- def load_balancer_for(lb_spec)
1027
- Chef::Resource::AwsLoadBalancer.get_aws_object(lb_spec.name, driver: self, managed_entry_store: lb_spec.managed_entry_store, required: false)
1028
- end
1029
-
1030
- def instance_for(machine_spec)
1031
- if machine_spec.reference
1032
- if machine_spec.driver_url != driver_url
1033
- raise "Switching a machine's driver from #{machine_spec.driver_url} to #{driver_url} is not currently supported! Use machine :destroy and then re-create the machine on the new driver."
989
+ def default_ssh_username
990
+ "ubuntu"
1034
991
  end
1035
- Chef::Resource::AwsInstance.get_aws_object(machine_spec.reference['instance_id'], driver: self, managed_entry_store: machine_spec.managed_entry_store, required: false)
1036
- end
1037
- end
1038
992
 
1039
- def instances_for(machine_specs)
1040
- result = {}
1041
- machine_specs.each { |machine_spec| result[machine_spec] = instance_for(machine_spec) }
1042
- result
1043
- end
993
+ def default_winrm_username
994
+ "Administrator"
995
+ end
1044
996
 
1045
- def image_for(image_spec)
1046
- Chef::Resource::AwsImage.get_aws_object(image_spec.name, driver: self, managed_entry_store: image_spec.managed_entry_store, required: false)
1047
- end
997
+ def default_winrm_transport
998
+ "http"
999
+ end
1048
1000
 
1049
- def transport_for(machine_spec, machine_options, instance)
1050
- if machine_spec.reference['is_windows']
1051
- create_winrm_transport(machine_spec, machine_options, instance)
1052
- else
1053
- create_ssh_transport(machine_spec, machine_options, instance)
1054
- end
1055
- end
1001
+ def keypair_for(bootstrap_options)
1002
+ if bootstrap_options[:key_name]
1003
+ keypair_name = bootstrap_options[:key_name]
1004
+ actual_key_pair = ec2_resource.key_pair(keypair_name)
1005
+ unless actual_key_pair.exists?
1006
+ ec2_resource.key_pairs.create(keypair_name)
1007
+ end
1008
+ actual_key_pair
1009
+ end
1010
+ end
1056
1011
 
1057
- def aws_credentials
1058
- # Grab the list of possible credentials
1059
- @aws_credentials ||= if driver_options[:aws_credentials]
1060
- driver_options[:aws_credentials]
1061
- else
1062
- credentials = Credentials.new
1063
- if driver_options[:aws_config_file]
1064
- credentials.load_ini(driver_options[:aws_config_file])
1065
- elsif driver_options[:aws_csv_file]
1066
- credentials.load_csv(driver_options[:aws_csv_file])
1067
- else
1068
- credentials.load_default
1069
- end
1070
- credentials
1071
- end
1072
- end
1012
+ def load_balancer_for(lb_spec)
1013
+ Chef::Resource::AwsLoadBalancer.get_aws_object(lb_spec.name, driver: self, managed_entry_store: lb_spec.managed_entry_store, required: false)
1014
+ end
1073
1015
 
1074
- def default_ami_arch
1075
- 'amd64'
1076
- end
1016
+ def instance_for(machine_spec)
1017
+ if machine_spec.reference
1018
+ if machine_spec.driver_url != driver_url
1019
+ raise "Switching a machine's driver from #{machine_spec.driver_url} to #{driver_url} is not currently supported! Use machine :destroy and then re-create the machine on the new driver."
1020
+ end
1021
+ Chef::Resource::AwsInstance.get_aws_object(machine_spec.reference["instance_id"], driver: self, managed_entry_store: machine_spec.managed_entry_store, required: false)
1022
+ end
1023
+ end
1077
1024
 
1078
- def default_ami_release
1079
- 'vivid'
1080
- end
1025
+ def instances_for(machine_specs)
1026
+ result = {}
1027
+ machine_specs.each { |machine_spec| result[machine_spec] = instance_for(machine_spec) }
1028
+ result
1029
+ end
1081
1030
 
1082
- def default_ami_root_store
1083
- 'ebs'
1084
- end
1031
+ def image_for(image_spec)
1032
+ Chef::Resource::AwsImage.get_aws_object(image_spec.name, driver: self, managed_entry_store: image_spec.managed_entry_store, required: false)
1033
+ end
1085
1034
 
1086
- def default_ami_virtualization_type
1087
- 'hvm'
1088
- end
1035
+ def transport_for(machine_spec, machine_options, instance)
1036
+ if machine_spec.reference["is_windows"]
1037
+ create_winrm_transport(machine_spec, machine_options, instance)
1038
+ else
1039
+ create_ssh_transport(machine_spec, machine_options, instance)
1040
+ end
1041
+ end
1089
1042
 
1090
- def default_ami_for_criteria(region, arch, release, root_store, virtualization_type)
1091
- ami = Ubuntu.release(release).amis.find do |ami|
1092
- ami.arch == arch &&
1093
- ami.root_store == root_store &&
1094
- ami.region == region &&
1095
- ami.virtualization_type == virtualization_type
1096
- end
1043
+ def aws_credentials
1044
+ # Grab the list of possible credentials
1045
+ @aws_credentials ||= if driver_options[:aws_credentials]
1046
+ driver_options[:aws_credentials]
1047
+ else
1048
+ credentials = Credentials.new
1049
+ if driver_options[:aws_config_file]
1050
+ credentials.load_ini(driver_options[:aws_config_file])
1051
+ elsif driver_options[:aws_csv_file]
1052
+ credentials.load_csv(driver_options[:aws_csv_file])
1053
+ else
1054
+ credentials.load_default
1055
+ end
1056
+ credentials
1057
+ end
1058
+ end
1097
1059
 
1098
- ami.name || fail("Default AMI not found")
1099
- end
1060
+ def default_ami_arch
1061
+ "amd64"
1062
+ end
1100
1063
 
1101
- def default_ami_for_region(region, criteria = {})
1102
- Chef::Log.debug("Choosing default AMI for region '#{region}'")
1064
+ def default_ami_release
1065
+ "vivid"
1066
+ end
1103
1067
 
1104
- arch = criteria['arch'] || default_ami_arch
1105
- release = criteria['release'] || default_ami_release
1106
- root_store = criteria['root_store'] || default_ami_root_store
1107
- virtualization_type = criteria['virtualization_type'] || default_ami_virtualization_type
1068
+ def default_ami_root_store
1069
+ "ebs"
1070
+ end
1108
1071
 
1109
- default_ami_for_criteria(region, arch, release, root_store, virtualization_type)
1110
- end
1072
+ def default_ami_virtualization_type
1073
+ "hvm"
1074
+ end
1111
1075
 
1112
- def create_winrm_transport(machine_spec, machine_options, instance)
1113
- remote_host = determine_remote_host(machine_spec, instance)
1114
- username = machine_spec.reference['winrm_username'] ||
1115
- machine_options[:winrm_username] ||
1116
- default_winrm_username
1117
- # default to http for now, should upgrade to https when knife support self-signed
1118
- transport_type = machine_spec.reference['winrm_transport'] ||
1119
- machine_options[:winrm_transport] ||
1120
- default_winrm_transport
1121
- type = case transport_type
1122
- when 'http'
1123
- :plaintext
1124
- when 'https'
1125
- :ssl
1126
- end
1127
- port = machine_spec.reference['winrm_port'] ||
1128
- machine_options[:winrm_port] ||
1129
- case transport_type
1130
- when 'http'
1131
- '5985'
1132
- when 'https'
1133
- '5986'
1134
- end
1135
- endpoint = "#{transport_type}://#{remote_host}:#{port}/wsman"
1136
-
1137
- pem_bytes = get_private_key(instance.key_name)
1138
-
1139
- password = machine_spec.reference['winrm_password'] ||
1140
- machine_options[:winrm_password] ||
1141
- begin
1142
- if machine_spec.reference['winrm_encrypted_password']
1143
- decoded = Base64.decode64(machine_spec.reference['winrm_encrypted_password'])
1144
- else
1145
- encrypted_admin_password = instance.password_data.password_data
1146
- if encrypted_admin_password.nil? || encrypted_admin_password.empty?
1147
- raise "You did not specify winrm_password in the machine options and no encrytpted password could be fetched from the instance"
1148
- end
1149
- machine_spec.reference['winrm_encrypted_password']||=encrypted_admin_password
1150
- # ^^ saves encrypted password to the machine_spec
1151
- decoded = Base64.decode64(encrypted_admin_password)
1152
- end
1153
- # decrypt so we can utilize
1154
- private_key = OpenSSL::PKey::RSA.new(get_private_key(instance.key_name))
1155
- private_key.private_decrypt decoded
1156
- end
1076
+ def default_ami_for_criteria(region, arch, release, root_store, virtualization_type)
1077
+ ami = Ubuntu.release(release).amis.find do |ami|
1078
+ ami.arch == arch &&
1079
+ ami.root_store == root_store &&
1080
+ ami.region == region &&
1081
+ ami.virtualization_type == virtualization_type
1082
+ end
1157
1083
 
1158
- disable_sspi = machine_spec.reference['winrm_disable_sspi'] ||
1159
- machine_options[:winrm_disable_sspi] ||
1160
- false # default to Negotiate
1161
- basic_auth_only = machine_spec.reference['winrm_basic_auth_only'] ||
1162
- machine_options[:winrm_basic_auth_only] ||
1163
- false # disallow Basic auth by default
1164
- no_ssl_peer_verification = machine_spec.reference['winrm_no_ssl_peer_verification'] ||
1165
- machine_options[:winrm_no_ssl_peer_verification] ||
1166
- false #disallow MITM potential by default
1167
-
1168
- winrm_options = {
1169
- user: username,
1170
- pass: password,
1171
- disable_sspi: disable_sspi,
1172
- basic_auth_only: basic_auth_only,
1173
- no_ssl_peer_verification: no_ssl_peer_verification,
1174
- }
1175
-
1176
- if no_ssl_peer_verification or type != :ssl
1177
- # => we won't verify certs at all
1178
- Chef::Log.info "No SSL or no peer verification"
1179
- elsif machine_spec.reference['winrm_ssl_thumbprint']
1180
- # we have stored the cert
1181
- Chef::Log.info "Using stored fingerprint"
1182
- else
1183
- # we need to retrieve the cert and verify it by connecting just to
1184
- # retrieve the ssl certificate and compare it to what we see in the
1185
- # console logs
1186
- instance.console_output.data.output
1187
- # again this seem to need to be run twice, to ensure
1188
- encoded_output = instance.console_output.data.output
1189
- console_lines = Base64.decode64(encoded_output).lines
1190
- fp_context = OpenSSL::SSL::SSLContext.new
1191
- tcp_connection = TCPSocket.new(instance.private_ip_address, port)
1192
- ssl_connection = OpenSSL::SSL::SSLSocket.new(tcp_connection, fp_context)
1193
-
1194
- begin
1195
- ssl_connection.connect
1196
- rescue OpenSSL::SSL::SSLError => e
1197
- raise e unless e.message =~ /bad signature/
1198
- ensure
1199
- tcp_connection.close
1084
+ ami.name || raise("Default AMI not found")
1200
1085
  end
1201
1086
 
1202
- winrm_cert = ssl_connection.peer_cert_chain.first
1087
+ def default_ami_for_region(region, criteria = {})
1088
+ Chef::Log.debug("Choosing default AMI for region '#{region}'")
1203
1089
 
1204
- rdp_thumbprint = console_lines.grep(
1205
- /RDPCERTIFICATE-THUMBPRINT/)[-1].split(': ').last.chomp
1206
- rdp_subject = console_lines.grep(
1207
- /RDPCERTIFICATE-SUBJECTNAME/)[-1].split(': ').last.chomp
1208
- winrm_subject = winrm_cert.subject.to_s.split('=').last.upcase
1209
- winrm_thumbprint=OpenSSL::Digest::SHA1.new(winrm_cert.to_der).to_s.upcase
1090
+ arch = criteria["arch"] || default_ami_arch
1091
+ release = criteria["release"] || default_ami_release
1092
+ root_store = criteria["root_store"] || default_ami_root_store
1093
+ virtualization_type = criteria["virtualization_type"] || default_ami_virtualization_type
1210
1094
 
1211
- if rdp_subject != winrm_subject or rdp_thumbprint != winrm_thumbprint
1212
- Chef::Log.fatal "Winrm ssl port certificate differs from rdp console logs"
1213
- end
1214
- # now cache these for later use in the reference
1215
- if machine_spec.reference['winrm_ssl_subject'] != winrm_subject
1216
- machine_spec.reference['winrm_ssl_subject'] = winrm_subject
1217
- end
1218
- if machine_spec.reference['winrm_ssl_thumbprint'] != winrm_thumbprint
1219
- machine_spec.reference['winrm_ssl_thumbprint'] = winrm_thumbprint
1220
- end
1221
- if machine_spec.reference['winrm_ssl_cert'] != winrm_cert.to_pem
1222
- machine_spec.reference['winrm_ssl_cert'] = winrm_cert.to_pem
1095
+ default_ami_for_criteria(region, arch, release, root_store, virtualization_type)
1223
1096
  end
1224
- end
1225
-
1226
- if machine_spec.reference['winrm_ssl_thumbprint']
1227
- winrm_options[:ssl_peer_fingerprint] = machine_spec.reference['winrm_ssl_thumbprint']
1228
- end
1229
1097
 
1230
- Chef::Provisioning::Transport::WinRM.new("#{endpoint}", type, winrm_options, {})
1231
- end
1098
+ def create_winrm_transport(machine_spec, machine_options, instance)
1099
+ remote_host = determine_remote_host(machine_spec, instance)
1100
+ username = machine_spec.reference["winrm_username"] ||
1101
+ machine_options[:winrm_username] ||
1102
+ default_winrm_username
1103
+ # default to http for now, should upgrade to https when knife support self-signed
1104
+ transport_type = machine_spec.reference["winrm_transport"] ||
1105
+ machine_options[:winrm_transport] ||
1106
+ default_winrm_transport
1107
+ type = case transport_type
1108
+ when "http"
1109
+ :plaintext
1110
+ when "https"
1111
+ :ssl
1112
+ end
1113
+ port = machine_spec.reference["winrm_port"] ||
1114
+ machine_options[:winrm_port] ||
1115
+ case transport_type
1116
+ when "http"
1117
+ "5985"
1118
+ when "https"
1119
+ "5986"
1120
+ end
1121
+ endpoint = "#{transport_type}://#{remote_host}:#{port}/wsman"
1122
+
1123
+ pem_bytes = get_private_key(instance.key_name)
1124
+
1125
+ password = machine_spec.reference["winrm_password"] ||
1126
+ machine_options[:winrm_password] ||
1127
+ begin
1128
+ if machine_spec.reference["winrm_encrypted_password"]
1129
+ decoded = Base64.decode64(machine_spec.reference["winrm_encrypted_password"])
1130
+ else
1131
+ encrypted_admin_password = instance.password_data.password_data
1132
+ if encrypted_admin_password.nil? || encrypted_admin_password.empty?
1133
+ raise "You did not specify winrm_password in the machine options and no encrytpted password could be fetched from the instance"
1134
+ end
1135
+ machine_spec.reference["winrm_encrypted_password"] ||= encrypted_admin_password
1136
+ # ^^ saves encrypted password to the machine_spec
1137
+ decoded = Base64.decode64(encrypted_admin_password)
1138
+ end
1139
+ # decrypt so we can utilize
1140
+ private_key = OpenSSL::PKey::RSA.new(get_private_key(instance.key_name))
1141
+ private_key.private_decrypt decoded
1142
+ end
1232
1143
 
1233
- def create_ssh_transport(machine_spec, machine_options, instance)
1234
- ssh_options = ssh_options_for(machine_spec, machine_options, instance)
1235
- username = machine_spec.reference['ssh_username'] || machine_options[:ssh_username] || default_ssh_username
1236
- if machine_options.has_key?(:ssh_username) && machine_options[:ssh_username] != machine_spec.reference['ssh_username']
1237
- Chef::Log.warn("Server #{machine_spec.name} was created with SSH username #{machine_spec.reference['ssh_username']} and machine_options specifies username #{machine_options[:ssh_username]}. Using #{machine_spec.reference['ssh_username']}. Please edit the node and change the chef_provisioning.reference.ssh_username attribute if you want to change it.")
1238
- end
1239
- options = {}
1240
- if machine_spec.reference[:sudo] || (!machine_spec.reference.has_key?(:sudo) && username != 'root')
1241
- options[:prefix] = 'sudo '
1242
- end
1144
+ disable_sspi = machine_spec.reference["winrm_disable_sspi"] ||
1145
+ machine_options[:winrm_disable_sspi] ||
1146
+ false # default to Negotiate
1147
+ basic_auth_only = machine_spec.reference["winrm_basic_auth_only"] ||
1148
+ machine_options[:winrm_basic_auth_only] ||
1149
+ false # disallow Basic auth by default
1150
+ no_ssl_peer_verification = machine_spec.reference["winrm_no_ssl_peer_verification"] ||
1151
+ machine_options[:winrm_no_ssl_peer_verification] ||
1152
+ false # disallow MITM potential by default
1153
+
1154
+ winrm_options = {
1155
+ user: username,
1156
+ pass: password,
1157
+ disable_sspi: disable_sspi,
1158
+ basic_auth_only: basic_auth_only,
1159
+ no_ssl_peer_verification: no_ssl_peer_verification
1160
+ }
1243
1161
 
1244
- remote_host = determine_remote_host(machine_spec, instance)
1162
+ if no_ssl_peer_verification || (type != :ssl)
1163
+ # => we won't verify certs at all
1164
+ Chef::Log.info "No SSL or no peer verification"
1165
+ elsif machine_spec.reference["winrm_ssl_thumbprint"]
1166
+ # we have stored the cert
1167
+ Chef::Log.info "Using stored fingerprint"
1168
+ else
1169
+ # we need to retrieve the cert and verify it by connecting just to
1170
+ # retrieve the ssl certificate and compare it to what we see in the
1171
+ # console logs
1172
+ instance.console_output.data.output
1173
+ # again this seem to need to be run twice, to ensure
1174
+ encoded_output = instance.console_output.data.output
1175
+ console_lines = Base64.decode64(encoded_output).lines
1176
+ fp_context = OpenSSL::SSL::SSLContext.new
1177
+ tcp_connection = TCPSocket.new(instance.private_ip_address, port)
1178
+ ssl_connection = OpenSSL::SSL::SSLSocket.new(tcp_connection, fp_context)
1179
+
1180
+ begin
1181
+ ssl_connection.connect
1182
+ rescue OpenSSL::SSL::SSLError => e
1183
+ raise e unless e.message =~ /bad signature/
1184
+ ensure
1185
+ tcp_connection.close
1186
+ end
1245
1187
 
1246
- #Enable pty by default
1247
- options[:ssh_pty_enable] = true
1188
+ winrm_cert = ssl_connection.peer_cert_chain.first
1248
1189
 
1249
- if machine_spec.reference.has_key?('ssh_gateway')
1250
- options[:ssh_gateway] = machine_spec.reference['ssh_gateway']
1251
- elsif machine_options[:ssh_gateway]
1252
- options[:ssh_gateway] = machine_options[:ssh_gateway]
1253
- end
1190
+ rdp_thumbprint = console_lines.grep(
1191
+ /RDPCERTIFICATE-THUMBPRINT/
1192
+ )[-1].split(": ").last.chomp
1193
+ rdp_subject = console_lines.grep(
1194
+ /RDPCERTIFICATE-SUBJECTNAME/
1195
+ )[-1].split(": ").last.chomp
1196
+ winrm_subject = winrm_cert.subject.to_s.split("=").last.upcase
1197
+ winrm_thumbprint = OpenSSL::Digest::SHA1.new(winrm_cert.to_der).to_s.upcase
1254
1198
 
1255
- Chef::Provisioning::Transport::SSH.new(remote_host, username, ssh_options, options, config)
1256
- end
1199
+ if (rdp_subject != winrm_subject) || (rdp_thumbprint != winrm_thumbprint)
1200
+ Chef::Log.fatal "Winrm ssl port certificate differs from rdp console logs"
1201
+ end
1202
+ # now cache these for later use in the reference
1203
+ if machine_spec.reference["winrm_ssl_subject"] != winrm_subject
1204
+ machine_spec.reference["winrm_ssl_subject"] = winrm_subject
1205
+ end
1206
+ if machine_spec.reference["winrm_ssl_thumbprint"] != winrm_thumbprint
1207
+ machine_spec.reference["winrm_ssl_thumbprint"] = winrm_thumbprint
1208
+ end
1209
+ if machine_spec.reference["winrm_ssl_cert"] != winrm_cert.to_pem
1210
+ machine_spec.reference["winrm_ssl_cert"] = winrm_cert.to_pem
1211
+ end
1212
+ end
1257
1213
 
1258
- def determine_remote_host(machine_spec, instance)
1259
- transport_address_location = (machine_spec.reference['transport_address_location'] || :none).to_sym
1260
- if machine_spec.reference['use_private_ip_for_ssh']
1261
- # The machine_spec has the old config key, lets update it - a successful chef converge will save the machine_spec
1262
- # TODO in 2.0 get rid of this update
1263
- machine_spec.reference.delete('use_private_ip_for_ssh')
1264
- machine_spec.reference['transport_address_location'] = :private_ip
1265
- instance.private_ip_address
1266
- elsif transport_address_location == :private_ip
1267
- instance.private_ip_address
1268
- elsif transport_address_location == :dns
1269
- instance.dns_name
1270
- elsif !instance.public_ip_address && instance.private_ip_address
1271
- Chef::Log.warn("Server #{machine_spec.name} has no public ip address. Using private ip '#{instance.private_ip_address}'. Set machine_options ':transport_address_location => :private_ip' if this will always be the case ...")
1272
- instance.private_ip_address
1273
- elsif instance.public_ip_address
1274
- instance.public_ip_address
1275
- else
1276
- raise "Server #{instance.id} has no private or public IP address!"
1277
- end
1278
- end
1214
+ if machine_spec.reference["winrm_ssl_thumbprint"]
1215
+ winrm_options[:ssl_peer_fingerprint] = machine_spec.reference["winrm_ssl_thumbprint"]
1216
+ end
1279
1217
 
1280
- def private_key_for(machine_spec, machine_options, instance)
1281
- if instance.respond_to?(:private_key) && instance.private_key
1282
- instance.private_key
1283
- elsif instance.respond_to?(:key_name) && instance.key_name
1284
- key = get_private_key(instance.key_name)
1285
- unless key
1286
- raise "Server has key name '#{instance.key_name}', but the corresponding private key was not found locally. Check if the key is in Chef::Config.private_key_paths: #{Chef::Config.private_key_paths.join(', ')}"
1287
- end
1288
- key
1289
- elsif machine_spec.reference['key_name']
1290
- key = get_private_key(machine_spec.reference['key_name'])
1291
- unless key
1292
- raise "Server was created with key name '#{machine_spec.reference['key_name']}', but the corresponding private key was not found locally. Check if the key is in Chef::Config.private_key_paths: #{Chef::Config.private_key_paths.join(', ')}"
1293
- end
1294
- key
1295
- elsif machine_options[:bootstrap_options] && machine_options[:bootstrap_options][:key_path]
1296
- IO.read(machine_options[:bootstrap_options][:key_path])
1297
- elsif machine_options[:bootstrap_options] && machine_options[:bootstrap_options][:key_name]
1298
- get_private_key(machine_options[:bootstrap_options][:key_name])
1299
- else
1300
- # TODO make a way to suggest other keys to try ...
1301
- raise "No key found to connect to #{machine_spec.name} (#{machine_spec.reference.inspect})!"
1302
- end
1303
- end
1218
+ Chef::Provisioning::Transport::WinRM.new(endpoint.to_s, type, winrm_options, {})
1219
+ end
1304
1220
 
1305
- def ssh_options_for(machine_spec, machine_options, instance)
1306
- result = {
1307
- # TODO create a user known hosts file
1308
- # :user_known_hosts_file => vagrant_ssh_config['UserKnownHostsFile'],
1309
- # :paranoid => true,
1310
- :auth_methods => [ 'publickey' ],
1311
- :keys_only => true,
1312
- :host_key_alias => "#{instance.id}.AWS"
1313
- }.merge(machine_options[:ssh_options] || {})
1314
- unless result.has_key?(:key_data)
1315
- result[:keys_only] = true
1316
- result[:key_data] = [ private_key_for(machine_spec, machine_options, instance) ]
1317
- end
1318
- result
1319
- end
1221
+ def create_ssh_transport(machine_spec, machine_options, instance)
1222
+ ssh_options = ssh_options_for(machine_spec, machine_options, instance)
1223
+ username = machine_spec.reference["ssh_username"] || machine_options[:ssh_username] || default_ssh_username
1224
+ if machine_options.key?(:ssh_username) && machine_options[:ssh_username] != machine_spec.reference["ssh_username"]
1225
+ Chef::Log.warn("Server #{machine_spec.name} was created with SSH username #{machine_spec.reference['ssh_username']} and machine_options specifies username #{machine_options[:ssh_username]}. Using #{machine_spec.reference['ssh_username']}. Please edit the node and change the chef_provisioning.reference.ssh_username attribute if you want to change it.")
1226
+ end
1227
+ options = {}
1228
+ if machine_spec.reference[:sudo] || (!machine_spec.reference.key?(:sudo) && username != "root")
1229
+ options[:prefix] = "sudo "
1230
+ end
1320
1231
 
1321
- def convergence_strategy_for(machine_spec, machine_options)
1322
- # Tell Ohai that this is an EC2 instance so that it runs the EC2 plugin
1323
- convergence_options = Cheffish::MergedConfig.new(
1324
- machine_options[:convergence_options] || {},
1325
- ohai_hints: { 'ec2' => '' })
1326
- convergence_options=deep_symbolize_keys(convergence_options)
1232
+ remote_host = determine_remote_host(machine_spec, instance)
1327
1233
 
1328
- # Defaults
1329
- if !machine_spec.reference
1330
- return Chef::Provisioning::ConvergenceStrategy::NoConverge.new(convergence_options, config)
1331
- end
1234
+ # Enable pty by default
1235
+ options[:ssh_pty_enable] = true
1332
1236
 
1333
- if machine_spec.reference['is_windows']
1334
- Chef::Provisioning::ConvergenceStrategy::InstallMsi.new(convergence_options, config)
1335
- elsif machine_options[:cached_installer] == true
1336
- Chef::Provisioning::ConvergenceStrategy::InstallCached.new(convergence_options, config)
1337
- else
1338
- Chef::Provisioning::ConvergenceStrategy::InstallSh.new(convergence_options, config)
1339
- end
1340
- end
1237
+ if machine_spec.reference.key?("ssh_gateway")
1238
+ options[:ssh_gateway] = machine_spec.reference["ssh_gateway"]
1239
+ elsif machine_options[:ssh_gateway]
1240
+ options[:ssh_gateway] = machine_options[:ssh_gateway]
1241
+ end
1341
1242
 
1342
- def wait_until_ready_image(action_handler, image_spec, image=nil)
1343
- wait_until_image(action_handler, image_spec, image) { |image| image.state.to_sym == :available }
1344
- action_handler.report_progress "Image #{image_spec.name} is now ready"
1345
- end
1243
+ Chef::Provisioning::Transport::SSH.new(remote_host, username, ssh_options, options, config)
1244
+ end
1346
1245
 
1347
- def wait_until_image(action_handler, image_spec, image=nil, &block)
1348
- image ||= image_for(image_spec)
1349
- sleep_time = 10
1350
- unless yield(image)
1351
- if action_handler.should_perform_actions
1352
- action_handler.report_progress "waiting for #{image_spec.name} (#{image.id} on #{driver_url}) to be ready ..."
1353
- max_wait_time = Chef::Config.chef_provisioning[:image_max_wait_time] || 300
1354
- Retryable.retryable(
1355
- :tries => (max_wait_time/sleep_time).to_i,
1356
- :sleep => sleep_time,
1357
- :matching => /did not become ready within/
1358
- ) do |retries, exception|
1359
- action_handler.report_progress "been waiting #{retries*sleep_time}/#{max_wait_time} -- sleeping #{sleep_time} seconds for #{image_spec.name} (#{image.id} on #{driver_url}) to become ready ..."
1360
- # We have to manually reload the instance each loop, otherwise data is stale
1361
- image.reload
1362
- unless yield(image)
1363
- raise "Image #{image.id} did not become ready within #{max_wait_time} seconds"
1364
- end
1246
+ def determine_remote_host(machine_spec, instance)
1247
+ transport_address_location = (machine_spec.reference["transport_address_location"] || :none).to_sym
1248
+ if machine_spec.reference["use_private_ip_for_ssh"]
1249
+ # The machine_spec has the old config key, lets update it - a successful chef converge will save the machine_spec
1250
+ # TODO in 2.0 get rid of this update
1251
+ machine_spec.reference.delete("use_private_ip_for_ssh")
1252
+ machine_spec.reference["transport_address_location"] = :private_ip
1253
+ instance.private_ip_address
1254
+ elsif transport_address_location == :private_ip
1255
+ instance.private_ip_address
1256
+ elsif transport_address_location == :dns
1257
+ instance.dns_name
1258
+ elsif !instance.public_ip_address && instance.private_ip_address
1259
+ Chef::Log.warn("Server #{machine_spec.name} has no public ip address. Using private ip '#{instance.private_ip_address}'. Set machine_options ':transport_address_location => :private_ip' if this will always be the case ...")
1260
+ instance.private_ip_address
1261
+ elsif instance.public_ip_address
1262
+ instance.public_ip_address
1263
+ else
1264
+ raise "Server #{instance.id} has no private or public IP address!"
1365
1265
  end
1366
1266
  end
1367
- end
1368
- end
1369
-
1370
- def wait_until_instance_running(action_handler, machine_spec, instance=nil)
1371
- wait_until_machine(action_handler, machine_spec, "become ready", instance) { |instance|
1372
- instance.state.name == "running"
1373
- }
1374
- end
1375
1267
 
1376
- def wait_until_machine(action_handler, machine_spec, output_msg, instance=nil, &block)
1377
- instance ||= instance_for(machine_spec)
1378
- sleep_time = 10
1379
- unless yield(instance)
1380
- if action_handler.should_perform_actions
1381
- action_handler.report_progress "waiting for #{machine_spec.name} (#{instance.id} on #{driver_url}) to #{output_msg} ..."
1382
- max_wait_time = Chef::Config.chef_provisioning[:machine_max_wait_time] || 120
1383
- Retryable.retryable(
1384
- :tries => (max_wait_time/sleep_time).to_i,
1385
- :sleep => sleep_time,
1386
- :matching => /did not #{output_msg} within/
1387
- ) do |retries, exception|
1388
- action_handler.report_progress "been waiting #{sleep_time*retries}/#{max_wait_time} -- sleeping #{sleep_time} seconds for #{machine_spec.name} (#{instance.id} on #{driver_url}) to #{output_msg} ..."
1389
- # We have to manually reload the instance each loop, otherwise data is stale
1390
- instance.reload
1391
- unless yield(instance)
1392
- raise "Instance #{machine_spec.name} (#{instance.id} on #{driver_url}) did not #{output_msg} within #{max_wait_time} seconds"
1268
+ def private_key_for(machine_spec, machine_options, instance)
1269
+ if instance.respond_to?(:private_key) && instance.private_key
1270
+ instance.private_key
1271
+ elsif instance.respond_to?(:key_name) && instance.key_name
1272
+ key = get_private_key(instance.key_name)
1273
+ unless key
1274
+ raise "Server has key name '#{instance.key_name}', but the corresponding private key was not found locally. Check if the key is in Chef::Config.private_key_paths: #{Chef::Config.private_key_paths.join(', ')}"
1275
+ end
1276
+ key
1277
+ elsif machine_spec.reference["key_name"]
1278
+ key = get_private_key(machine_spec.reference["key_name"])
1279
+ unless key
1280
+ raise "Server was created with key name '#{machine_spec.reference['key_name']}', but the corresponding private key was not found locally. Check if the key is in Chef::Config.private_key_paths: #{Chef::Config.private_key_paths.join(', ')}"
1393
1281
  end
1282
+ key
1283
+ elsif machine_options[:bootstrap_options] && machine_options[:bootstrap_options][:key_path]
1284
+ IO.read(machine_options[:bootstrap_options][:key_path])
1285
+ elsif machine_options[:bootstrap_options] && machine_options[:bootstrap_options][:key_name]
1286
+ get_private_key(machine_options[:bootstrap_options][:key_name])
1287
+ else
1288
+ # TODO: make a way to suggest other keys to try ...
1289
+ raise "No key found to connect to #{machine_spec.name} (#{machine_spec.reference.inspect})!"
1394
1290
  end
1395
1291
  end
1396
- end
1397
- end
1398
1292
 
1399
- def wait_for_transport(action_handler, machine_spec, machine_options, instance=nil)
1400
- instance ||= instance_for(machine_spec)
1401
- sleep_time = 10
1402
- transport = transport_for(machine_spec, machine_options, instance)
1403
- unless instance.state.name.eql?("running") && transport.available?
1404
- if action_handler.should_perform_actions
1405
- action_handler.report_progress "waiting for #{machine_spec.name} (#{instance.id} on #{driver_url}) to be connectable (transport up and running) ..."
1406
- max_wait_time = Chef::Config.chef_provisioning[:machine_max_wait_time] || 120
1407
- Retryable.retryable(
1408
- :tries => (max_wait_time/sleep_time).to_i,
1409
- :sleep => sleep_time,
1410
- :matching => /did not become connectable within/
1411
- ) do |retries, exception|
1412
- action_handler.report_progress "been waiting #{sleep_time*retries}/#{max_wait_time} -- sleeping #{sleep_time} seconds for #{machine_spec.name} (#{instance.id} on #{driver_url}) to become connectable ..."
1413
- unless transport.available?
1414
- raise "Instance #{machine_spec.name} (#{instance.id} on #{driver_url}) did not become connectable within #{max_wait_time} seconds"
1415
- end
1293
+ def ssh_options_for(machine_spec, machine_options, instance)
1294
+ result = {
1295
+ # TODO: create a user known hosts file
1296
+ # :user_known_hosts_file => vagrant_ssh_config['UserKnownHostsFile'],
1297
+ # :paranoid => true,
1298
+ auth_methods: ["publickey"],
1299
+ keys_only: true,
1300
+ host_key_alias: "#{instance.id}.AWS"
1301
+ }.merge(machine_options[:ssh_options] || {})
1302
+ unless result.key?(:key_data)
1303
+ result[:keys_only] = true
1304
+ result[:key_data] = [private_key_for(machine_spec, machine_options, instance)]
1416
1305
  end
1306
+ result
1417
1307
  end
1418
- end
1419
- end
1420
1308
 
1421
- def default_aws_keypair_name(machine_spec)
1422
- if machine_spec.reference &&
1423
- Gem::Version.new(machine_spec.reference['driver_version']) < Gem::Version.new('0.10')
1424
- 'metal_default'
1425
- else
1426
- 'chef_default'
1427
- end
1428
- end
1309
+ def convergence_strategy_for(machine_spec, machine_options)
1310
+ # Tell Ohai that this is an EC2 instance so that it runs the EC2 plugin
1311
+ convergence_options = Cheffish::MergedConfig.new(
1312
+ machine_options[:convergence_options] || {},
1313
+ ohai_hints: { "ec2" => "" }
1314
+ )
1315
+ convergence_options = deep_symbolize_keys(convergence_options)
1316
+
1317
+ # Defaults
1318
+ unless machine_spec.reference
1319
+ return Chef::Provisioning::ConvergenceStrategy::NoConverge.new(convergence_options, config)
1320
+ end
1429
1321
 
1430
- def default_aws_keypair(action_handler, machine_spec)
1431
- driver = self
1432
- default_key_name = default_aws_keypair_name(machine_spec)
1433
- updated = @@chef_default_lock.synchronize do
1434
- Provisioning.inline_resource(action_handler) do
1435
- aws_key_pair default_key_name do
1436
- driver driver
1437
- chef_server machine_spec.managed_entry_store.chef_server
1438
- managed_entry_store machine_spec.managed_entry_store
1439
- allow_overwrite true
1322
+ if machine_spec.reference["is_windows"]
1323
+ Chef::Provisioning::ConvergenceStrategy::InstallMsi.new(convergence_options, config)
1324
+ elsif machine_options[:cached_installer] == true
1325
+ Chef::Provisioning::ConvergenceStrategy::InstallCached.new(convergence_options, config)
1326
+ else
1327
+ Chef::Provisioning::ConvergenceStrategy::InstallSh.new(convergence_options, config)
1440
1328
  end
1441
1329
  end
1442
- end
1443
1330
 
1444
- # Only warn the first time
1445
- default_warning = 'Using default key, which is not shared between machines! It is recommended to create an AWS key pair with the aws_key_pair resource, and set :bootstrap_options => { :key_name => <key name> }'
1446
- Chef::Log.warn(default_warning) if updated
1331
+ def wait_until_ready_image(action_handler, image_spec, image = nil)
1332
+ wait_until_image(action_handler, image_spec, image) { |image| image.state.to_sym == :available }
1333
+ action_handler.report_progress "Image #{image_spec.name} is now ready"
1334
+ end
1447
1335
 
1448
- default_key_name
1449
- end
1336
+ def wait_until_image(action_handler, image_spec, image = nil)
1337
+ image ||= image_for(image_spec)
1338
+ sleep_time = 10
1339
+ unless yield(image)
1340
+ if action_handler.should_perform_actions
1341
+ action_handler.report_progress "waiting for #{image_spec.name} (#{image.id} on #{driver_url}) to be ready ..."
1342
+ max_wait_time = Chef::Config.chef_provisioning[:image_max_wait_time] || 300
1343
+ Retryable.retryable(
1344
+ tries: (max_wait_time / sleep_time).to_i,
1345
+ sleep: sleep_time,
1346
+ matching: /did not become ready within/
1347
+ ) do |retries, _exception|
1348
+ action_handler.report_progress "been waiting #{retries * sleep_time}/#{max_wait_time} -- sleeping #{sleep_time} seconds for #{image_spec.name} (#{image.id} on #{driver_url}) to become ready ..."
1349
+ # We have to manually reload the instance each loop, otherwise data is stale
1350
+ image.reload
1351
+ unless yield(image)
1352
+ raise "Image #{image.id} did not become ready within #{max_wait_time} seconds"
1353
+ end
1354
+ end
1355
+ end
1356
+ end
1357
+ end
1450
1358
 
1451
- def create_servers(action_handler, specs_and_options, parallelizer)
1452
- specs_and_servers = instances_for(specs_and_options.keys)
1359
+ def wait_until_instance_running(action_handler, machine_spec, instance = nil)
1360
+ wait_until_machine(action_handler, machine_spec, "become ready", instance) do |instance|
1361
+ instance.state.name == "running"
1362
+ end
1363
+ end
1453
1364
 
1454
- by_bootstrap_options = {}
1455
- specs_and_options.each do |machine_spec, machine_options|
1456
- instance = specs_and_servers[machine_spec]
1457
- if instance
1458
- if instance.state.name == "terminated"
1459
- Chef::Log.warn "Machine #{machine_spec.name} (#{instance.id}) is terminated. Recreating ..."
1460
- else
1461
- # Even though the instance has been created the tags could be incorrect if it
1462
- # was created before tags were introduced
1463
- converge_ec2_tags(instance, machine_options[:aws_tags], action_handler)
1464
- yield machine_spec, instance if block_given?
1465
- next
1365
+ def wait_until_machine(action_handler, machine_spec, output_msg, instance = nil)
1366
+ instance ||= instance_for(machine_spec)
1367
+ sleep_time = 10
1368
+ unless yield(instance)
1369
+ if action_handler.should_perform_actions
1370
+ action_handler.report_progress "waiting for #{machine_spec.name} (#{instance.id} on #{driver_url}) to #{output_msg} ..."
1371
+ max_wait_time = Chef::Config.chef_provisioning[:machine_max_wait_time] || 120
1372
+ Retryable.retryable(
1373
+ tries: (max_wait_time / sleep_time).to_i,
1374
+ sleep: sleep_time,
1375
+ matching: /did not #{output_msg} within/
1376
+ ) do |retries, _exception|
1377
+ action_handler.report_progress "been waiting #{sleep_time * retries}/#{max_wait_time} -- sleeping #{sleep_time} seconds for #{machine_spec.name} (#{instance.id} on #{driver_url}) to #{output_msg} ..."
1378
+ # We have to manually reload the instance each loop, otherwise data is stale
1379
+ instance.reload
1380
+ unless yield(instance)
1381
+ raise "Instance #{machine_spec.name} (#{instance.id} on #{driver_url}) did not #{output_msg} within #{max_wait_time} seconds"
1382
+ end
1383
+ end
1384
+ end
1466
1385
  end
1467
- elsif machine_spec.reference
1468
- Chef::Log.warn "Machine #{machine_spec.name} (#{machine_spec.reference['instance_id']} on #{driver_url}) no longer exists. Recreating ..."
1469
1386
  end
1470
1387
 
1471
- bootstrap_options = bootstrap_options_for(action_handler, machine_spec, machine_options)
1472
- by_bootstrap_options[bootstrap_options] ||= []
1473
- by_bootstrap_options[bootstrap_options] << machine_spec
1474
- end
1388
+ def wait_for_transport(action_handler, machine_spec, machine_options, instance = nil)
1389
+ instance ||= instance_for(machine_spec)
1390
+ sleep_time = 10
1391
+ transport = transport_for(machine_spec, machine_options, instance)
1392
+ unless instance.state.name.eql?("running") && transport.available?
1393
+ if action_handler.should_perform_actions
1394
+ action_handler.report_progress "waiting for #{machine_spec.name} (#{instance.id} on #{driver_url}) to be connectable (transport up and running) ..."
1395
+ max_wait_time = Chef::Config.chef_provisioning[:machine_max_wait_time] || 120
1396
+ Retryable.retryable(
1397
+ tries: (max_wait_time / sleep_time).to_i,
1398
+ sleep: sleep_time,
1399
+ matching: /did not become connectable within/
1400
+ ) do |retries, _exception|
1401
+ action_handler.report_progress "been waiting #{sleep_time * retries}/#{max_wait_time} -- sleeping #{sleep_time} seconds for #{machine_spec.name} (#{instance.id} on #{driver_url}) to become connectable ..."
1402
+ unless transport.available?
1403
+ raise "Instance #{machine_spec.name} (#{instance.id} on #{driver_url}) did not become connectable within #{max_wait_time} seconds"
1404
+ end
1405
+ end
1406
+ end
1407
+ end
1408
+ end
1475
1409
 
1476
- # Create the servers in parallel
1477
- parallelizer.parallelize(by_bootstrap_options) do |bootstrap_options, machine_specs|
1478
- machine_description = if machine_specs.size == 1
1479
- "machine #{machine_specs.first.name}"
1480
- else
1481
- "machines #{machine_specs.map { |s| s.name }.join(", ")}"
1410
+ def default_aws_keypair_name(machine_spec)
1411
+ if machine_spec.reference &&
1412
+ Gem::Version.new(machine_spec.reference["driver_version"]) < Gem::Version.new("0.10")
1413
+ "metal_default"
1414
+ else
1415
+ "chef_default"
1416
+ end
1482
1417
  end
1483
- description = [ "creating #{machine_description} on #{driver_url}" ]
1484
- bootstrap_options.each_pair { |key,value| description << " #{key}: #{value.inspect}" }
1485
- action_handler.report_progress description
1486
- if action_handler.should_perform_actions
1487
- # Actually create the servers
1488
- parallelizer.parallelize(1.upto(machine_specs.size)) do |i|
1489
1418
 
1490
- # Assign each one to a machine spec
1491
- machine_spec = machine_specs.pop
1492
- machine_options = specs_and_options[machine_spec]
1419
+ def default_aws_keypair(action_handler, machine_spec)
1420
+ driver = self
1421
+ default_key_name = default_aws_keypair_name(machine_spec)
1422
+ updated = @@chef_default_lock.synchronize do
1423
+ Provisioning.inline_resource(action_handler) do
1424
+ aws_key_pair default_key_name do
1425
+ driver driver
1426
+ chef_server machine_spec.managed_entry_store.chef_server
1427
+ managed_entry_store machine_spec.managed_entry_store
1428
+ allow_overwrite true
1429
+ end
1430
+ end
1431
+ end
1493
1432
 
1494
- clean_bootstrap_options = Marshal.load(Marshal.dump(bootstrap_options))
1495
- instance = create_instance_and_reference(clean_bootstrap_options, action_handler, machine_spec, machine_options)
1496
- converge_ec2_tags(instance, machine_options[:aws_tags], action_handler)
1433
+ # Only warn the first time
1434
+ default_warning = "Using default key, which is not shared between machines! It is recommended to create an AWS key pair with the aws_key_pair resource, and set :bootstrap_options => { :key_name => <key name> }"
1435
+ Chef::Log.warn(default_warning) if updated
1497
1436
 
1498
- action_handler.performed_action "machine #{machine_spec.name} created as #{instance.id} on #{driver_url}"
1437
+ default_key_name
1438
+ end
1499
1439
 
1500
- yield machine_spec, instance if block_given?
1501
- end.to_a
1440
+ def create_servers(action_handler, specs_and_options, parallelizer)
1441
+ specs_and_servers = instances_for(specs_and_options.keys)
1442
+
1443
+ by_bootstrap_options = {}
1444
+ specs_and_options.each do |machine_spec, machine_options|
1445
+ instance = specs_and_servers[machine_spec]
1446
+ if instance
1447
+ if instance.state.name == "terminated"
1448
+ Chef::Log.warn "Machine #{machine_spec.name} (#{instance.id}) is terminated. Recreating ..."
1449
+ else
1450
+ # Even though the instance has been created the tags could be incorrect if it
1451
+ # was created before tags were introduced
1452
+ converge_ec2_tags(instance, machine_options[:aws_tags], action_handler)
1453
+ yield machine_spec, instance if block_given?
1454
+ next
1455
+ end
1456
+ elsif machine_spec.reference
1457
+ Chef::Log.warn "Machine #{machine_spec.name} (#{machine_spec.reference['instance_id']} on #{driver_url}) no longer exists. Recreating ..."
1458
+ end
1502
1459
 
1503
- if machine_specs.size > 0
1504
- raise "Not all machines were created by create_servers"
1460
+ bootstrap_options = bootstrap_options_for(action_handler, machine_spec, machine_options)
1461
+ by_bootstrap_options[bootstrap_options] ||= []
1462
+ by_bootstrap_options[bootstrap_options] << machine_spec
1505
1463
  end
1464
+
1465
+ # Create the servers in parallel
1466
+ parallelizer.parallelize(by_bootstrap_options) do |bootstrap_options, machine_specs|
1467
+ machine_description = if machine_specs.size == 1
1468
+ "machine #{machine_specs.first.name}"
1469
+ else
1470
+ "machines #{machine_specs.map(&:name).join(', ')}"
1471
+ end
1472
+ description = ["creating #{machine_description} on #{driver_url}"]
1473
+ bootstrap_options.each_pair { |key, value| description << " #{key}: #{value.inspect}" }
1474
+ action_handler.report_progress description
1475
+ if action_handler.should_perform_actions
1476
+ # Actually create the servers
1477
+ parallelizer.parallelize(1.upto(machine_specs.size)) do |_i|
1478
+ # Assign each one to a machine spec
1479
+ machine_spec = machine_specs.pop
1480
+ machine_options = specs_and_options[machine_spec]
1481
+
1482
+ clean_bootstrap_options = Marshal.load(Marshal.dump(bootstrap_options))
1483
+ instance = create_instance_and_reference(clean_bootstrap_options, action_handler, machine_spec, machine_options)
1484
+ converge_ec2_tags(instance, machine_options[:aws_tags], action_handler)
1485
+
1486
+ action_handler.performed_action "machine #{machine_spec.name} created as #{instance.id} on #{driver_url}"
1487
+
1488
+ yield machine_spec, instance if block_given?
1489
+ end.to_a
1490
+
1491
+ unless machine_specs.empty?
1492
+ raise "Not all machines were created by create_servers"
1493
+ end
1494
+ end
1495
+ end.to_a
1506
1496
  end
1507
- end.to_a
1508
- end
1509
1497
 
1510
- def converge_ec2_tags(aws_object, tags, action_handler)
1511
- ec2_strategy = Chef::Provisioning::AWSDriver::TaggingStrategy::EC2.new(
1512
- ec2_client,
1513
- aws_object.id,
1514
- tags
1515
- )
1516
- aws_tagger = Chef::Provisioning::AWSDriver::AWSTagger.new(ec2_strategy, action_handler)
1517
- aws_tagger.converge_tags
1518
- end
1498
+ def converge_ec2_tags(aws_object, tags, action_handler)
1499
+ ec2_strategy = Chef::Provisioning::AWSDriver::TaggingStrategy::EC2.new(
1500
+ ec2_client,
1501
+ aws_object.id,
1502
+ tags
1503
+ )
1504
+ aws_tagger = Chef::Provisioning::AWSDriver::AWSTagger.new(ec2_strategy, action_handler)
1505
+ aws_tagger.converge_tags
1506
+ end
1519
1507
 
1520
- def converge_elb_tags(aws_object, tags, action_handler)
1521
- elb_strategy = Chef::Provisioning::AWSDriver::TaggingStrategy::ELB.new(
1522
- elb_client,
1523
- aws_object.load_balancer_name,
1524
- tags
1525
- )
1526
- aws_tagger = Chef::Provisioning::AWSDriver::AWSTagger.new(elb_strategy, action_handler)
1527
- aws_tagger.converge_tags
1528
- end
1508
+ def converge_elb_tags(aws_object, tags, action_handler)
1509
+ elb_strategy = Chef::Provisioning::AWSDriver::TaggingStrategy::ELB.new(
1510
+ elb_client,
1511
+ aws_object.load_balancer_name,
1512
+ tags
1513
+ )
1514
+ aws_tagger = Chef::Provisioning::AWSDriver::AWSTagger.new(elb_strategy, action_handler)
1515
+ aws_tagger.converge_tags
1516
+ end
1529
1517
 
1530
- def create_instance_and_reference(bootstrap_options, action_handler, machine_spec, machine_options)
1531
- instance = nil
1532
- # IAM says the instance profile is ready, but EC2 doesn't think it is
1533
- # Not using retry_with_backoff here because we need to match on a string
1534
- Retryable.retryable(
1535
- :tries => 10,
1536
- :sleep => lambda { |n| [2**n, 16].min },
1537
- :on => ::Aws::EC2::Errors::InvalidParameterValue,
1538
- :matching => /Invalid IAM Instance Profile name/
1539
- ) do |retries, exception|
1540
- Chef::Log.debug("Instance creation InvalidParameterValue exception is #{exception.inspect}")
1541
- instance = ec2_resource.create_instances(bootstrap_options.to_hash)[0]
1542
- end
1518
+ def create_instance_and_reference(bootstrap_options, action_handler, machine_spec, machine_options)
1519
+ instance = nil
1520
+ # IAM says the instance profile is ready, but EC2 doesn't think it is
1521
+ # Not using retry_with_backoff here because we need to match on a string
1522
+ Retryable.retryable(
1523
+ tries: 10,
1524
+ sleep: ->(n) { [2**n, 16].min },
1525
+ on: ::Aws::EC2::Errors::InvalidParameterValue,
1526
+ matching: /Invalid IAM Instance Profile name/
1527
+ ) do |_retries, exception|
1528
+ Chef::Log.debug("Instance creation InvalidParameterValue exception is #{exception.inspect}")
1529
+ instance = ec2_resource.create_instances(bootstrap_options.to_hash)[0]
1530
+ end
1543
1531
 
1544
- # Make sure the instance is ready to be tagged
1545
- instance.wait_until_exists
1532
+ # Make sure the instance is ready to be tagged
1533
+ instance.wait_until_exists
1546
1534
 
1547
- # Sometimes tagging fails even though the instance 'exists'
1548
- Chef::Provisioning::AWSDriver::AWSProvider.retry_with_backoff(::Aws::EC2::Errors::InvalidInstanceIDNotFound) do
1549
- instance.create_tags({tags: [{key: "Name", value: machine_spec.name}]})
1550
- end
1551
- if machine_options.has_key?(:source_dest_check)
1552
- instance.modify_attribute({
1553
- source_dest_check: {
1554
- value: machine_options[:source_dest_check]
1535
+ # Sometimes tagging fails even though the instance 'exists'
1536
+ Chef::Provisioning::AWSDriver::AWSProvider.retry_with_backoff(::Aws::EC2::Errors::InvalidInstanceIDNotFound) do
1537
+ instance.create_tags(tags: [{ key: "Name", value: machine_spec.name }])
1538
+ end
1539
+ if machine_options.key?(:source_dest_check)
1540
+ instance.modify_attribute(
1541
+ source_dest_check: {
1542
+ value: machine_options[:source_dest_check]
1543
+ }
1544
+ )
1545
+ end
1546
+ machine_spec.reference = {
1547
+ "driver_version" => Chef::Provisioning::AWSDriver::VERSION,
1548
+ "allocated_at" => Time.now.utc.to_s,
1549
+ "host_node" => action_handler.host_node,
1550
+ "image_id" => bootstrap_options[:image_id],
1551
+ "instance_id" => instance.id
1555
1552
  }
1556
- })
1557
- end
1558
- machine_spec.reference = {
1559
- 'driver_version' => Chef::Provisioning::AWSDriver::VERSION,
1560
- 'allocated_at' => Time.now.utc.to_s,
1561
- 'host_node' => action_handler.host_node,
1562
- 'image_id' => bootstrap_options[:image_id],
1563
- 'instance_id' => instance.id
1564
- }
1565
- machine_spec.driver_url = driver_url
1566
- machine_spec.reference['key_name'] = bootstrap_options[:key_name] if bootstrap_options[:key_name]
1567
- # TODO 2.0 We no longer support `use_private_ip_for_ssh`, only `transport_address_location`
1568
- if machine_options[:use_private_ip_for_ssh]
1569
- unless @transport_address_location_warned
1570
- Chef::Log.warn("The machine_option ':use_private_ip_for_ssh' has been deprecated, use ':transport_address_location'")
1571
- @transport_address_location_warned = true
1572
- end
1573
- machine_options[:transport_address_location] ||= :private_ip
1574
- end
1575
- %w(is_windows winrm_username winrm_port winrm_password ssh_username sudo transport_address_location ssh_gateway).each do |key|
1576
- machine_spec.reference[key] = machine_options[key.to_sym] if machine_options[key.to_sym]
1577
- end
1578
- instance
1579
- end
1580
-
1581
- def get_listeners(listeners)
1582
- case listeners
1583
- when Hash
1584
- listeners.map do |from, to|
1585
- from = get_listener(from)
1586
- from.delete(:instance_port)
1587
- from.delete(:instance_protocol)
1588
- to = get_listener(to)
1589
- to.delete(:load_balancer_port)
1590
- to.delete(:protocol)
1591
- to.merge(from)
1592
- end
1593
- when Array
1594
- listeners.map { |listener| get_listener(listener) }
1595
- when nil
1596
- nil
1597
- else
1598
- [ get_listener(listeners) ]
1599
- end
1600
- end
1553
+ machine_spec.driver_url = driver_url
1554
+ machine_spec.reference["key_name"] = bootstrap_options[:key_name] if bootstrap_options[:key_name]
1555
+ # TODO: 2.0 We no longer support `use_private_ip_for_ssh`, only `transport_address_location`
1556
+ if machine_options[:use_private_ip_for_ssh]
1557
+ unless @transport_address_location_warned
1558
+ Chef::Log.warn("The machine_option ':use_private_ip_for_ssh' has been deprecated, use ':transport_address_location'")
1559
+ @transport_address_location_warned = true
1560
+ end
1561
+ machine_options[:transport_address_location] ||= :private_ip
1562
+ end
1563
+ %w{is_windows winrm_username winrm_port winrm_password ssh_username sudo transport_address_location ssh_gateway}.each do |key|
1564
+ machine_spec.reference[key] = machine_options[key.to_sym] if machine_options[key.to_sym]
1565
+ end
1566
+ instance
1567
+ end
1601
1568
 
1602
- def get_listener(listener)
1603
- result = {}
1604
-
1605
- case listener
1606
- when Hash
1607
- result.merge!(listener)
1608
- when Array
1609
- result[:load_balancer_port] = listener[0] if listener.size >= 1
1610
- result[:protocol] = listener[1] if listener.size >= 2
1611
- when Symbol,String
1612
- result[:protocol] = listener
1613
- when Integer
1614
- result[:load_balancer_port] = listener
1615
- else
1616
- raise "Invalid listener #{listener}"
1617
- end
1569
+ def get_listeners(listeners)
1570
+ case listeners
1571
+ when Hash
1572
+ listeners.map do |from, to|
1573
+ from = get_listener(from)
1574
+ from.delete(:instance_port)
1575
+ from.delete(:instance_protocol)
1576
+ to = get_listener(to)
1577
+ to.delete(:load_balancer_port)
1578
+ to.delete(:protocol)
1579
+ to.merge(from)
1580
+ end
1581
+ when Array
1582
+ listeners.map { |listener| get_listener(listener) }
1583
+ when nil
1584
+ nil
1585
+ else
1586
+ [get_listener(listeners)]
1587
+ end
1588
+ end
1618
1589
 
1619
- # If either port or protocol are set, set the other
1620
- if result[:load_balancer_port] && !result[:protocol]
1621
- result[:protocol] = PROTOCOL_DEFAULTS[result[:load_balancer_port]]
1622
- elsif result[:protocol] && !result[:load_balancer_port]
1623
- result[:load_balancer_port] = PORT_DEFAULTS[result[:protocol]]
1624
- end
1625
- if result[:instance_port] && !result[:instance_protocol]
1626
- result[:instance_protocol] = PROTOCOL_DEFAULTS[result[:instance_port]]
1627
- elsif result[:instance_protocol] && !result[:instance_port]
1628
- result[:instance_port] = PORT_DEFAULTS[result[:instance_protocol]]
1629
- end
1590
+ def get_listener(listener)
1591
+ result = {}
1592
+
1593
+ case listener
1594
+ when Hash
1595
+ result.merge!(listener)
1596
+ when Array
1597
+ result[:load_balancer_port] = listener[0] if listener.size >= 1
1598
+ result[:protocol] = listener[1] if listener.size >= 2
1599
+ when Symbol, String
1600
+ result[:protocol] = listener
1601
+ when Integer
1602
+ result[:load_balancer_port] = listener
1603
+ else
1604
+ raise "Invalid listener #{listener}"
1605
+ end
1630
1606
 
1631
- # If instance_port is still unset, copy port/protocol over
1632
- result[:instance_port] ||= result[:load_balancer_port]
1633
- result[:instance_protocol] ||= result[:protocol]
1607
+ # If either port or protocol are set, set the other
1608
+ if result[:load_balancer_port] && !result[:protocol]
1609
+ result[:protocol] = PROTOCOL_DEFAULTS[result[:load_balancer_port]]
1610
+ elsif result[:protocol] && !result[:load_balancer_port]
1611
+ result[:load_balancer_port] = PORT_DEFAULTS[result[:protocol]]
1612
+ end
1613
+ if result[:instance_port] && !result[:instance_protocol]
1614
+ result[:instance_protocol] = PROTOCOL_DEFAULTS[result[:instance_port]]
1615
+ elsif result[:instance_protocol] && !result[:instance_port]
1616
+ result[:instance_port] = PORT_DEFAULTS[result[:instance_protocol]]
1617
+ end
1634
1618
 
1635
- result
1636
- end
1619
+ # If instance_port is still unset, copy port/protocol over
1620
+ result[:instance_port] ||= result[:load_balancer_port]
1621
+ result[:instance_protocol] ||= result[:protocol]
1637
1622
 
1638
- def default_instance_type
1639
- 't2.micro'
1640
- end
1623
+ result
1624
+ end
1641
1625
 
1642
- PORT_DEFAULTS = {
1643
- :http => 80,
1644
- :https => 443,
1645
- }
1646
- PROTOCOL_DEFAULTS = {
1647
- 25 => :tcp,
1648
- 80 => :http,
1649
- 443 => :https,
1650
- 465 => :ssl,
1651
- 587 => :tcp,
1652
- }
1626
+ def default_instance_type
1627
+ "t2.micro"
1628
+ end
1653
1629
 
1630
+ PORT_DEFAULTS = {
1631
+ http: 80,
1632
+ https: 443
1633
+ }.freeze
1634
+ PROTOCOL_DEFAULTS = {
1635
+ 25 => :tcp,
1636
+ 80 => :http,
1637
+ 443 => :https,
1638
+ 465 => :ssl,
1639
+ 587 => :tcp
1640
+ }.freeze
1641
+ end
1642
+ end
1654
1643
  end
1655
1644
  end
1656
- end
1657
- end