chef-provisioning-aws 0.4.0 → 0.5.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (53) hide show
  1. checksums.yaml +4 -4
  2. data/Rakefile +2 -0
  3. data/lib/chef/provider/aws_auto_scaling_group.rb +30 -41
  4. data/lib/chef/provider/aws_dhcp_options.rb +70 -0
  5. data/lib/chef/provider/aws_ebs_volume.rb +182 -34
  6. data/lib/chef/provider/aws_eip_address.rb +63 -60
  7. data/lib/chef/provider/aws_key_pair.rb +18 -27
  8. data/lib/chef/provider/aws_launch_configuration.rb +50 -0
  9. data/lib/chef/provider/aws_route_table.rb +122 -0
  10. data/lib/chef/provider/aws_s3_bucket.rb +42 -49
  11. data/lib/chef/provider/aws_security_group.rb +252 -59
  12. data/lib/chef/provider/aws_sns_topic.rb +10 -26
  13. data/lib/chef/provider/aws_sqs_queue.rb +16 -38
  14. data/lib/chef/provider/aws_subnet.rb +85 -32
  15. data/lib/chef/provider/aws_vpc.rb +163 -23
  16. data/lib/chef/provisioning/aws_driver.rb +18 -1
  17. data/lib/chef/provisioning/aws_driver/aws_provider.rb +206 -0
  18. data/lib/chef/provisioning/aws_driver/aws_resource.rb +186 -0
  19. data/lib/chef/provisioning/aws_driver/aws_resource_with_entry.rb +114 -0
  20. data/lib/chef/provisioning/aws_driver/driver.rb +317 -255
  21. data/lib/chef/provisioning/aws_driver/resources.rb +8 -5
  22. data/lib/chef/provisioning/aws_driver/super_lwrp.rb +45 -0
  23. data/lib/chef/provisioning/aws_driver/version.rb +1 -1
  24. data/lib/chef/resource/aws_auto_scaling_group.rb +15 -13
  25. data/lib/chef/resource/aws_dhcp_options.rb +57 -0
  26. data/lib/chef/resource/aws_ebs_volume.rb +20 -22
  27. data/lib/chef/resource/aws_eip_address.rb +50 -25
  28. data/lib/chef/resource/aws_image.rb +20 -0
  29. data/lib/chef/resource/aws_instance.rb +20 -0
  30. data/lib/chef/resource/aws_internet_gateway.rb +16 -0
  31. data/lib/chef/resource/aws_key_pair.rb +6 -10
  32. data/lib/chef/resource/aws_launch_configuration.rb +15 -0
  33. data/lib/chef/resource/aws_load_balancer.rb +16 -0
  34. data/lib/chef/resource/aws_network_interface.rb +16 -0
  35. data/lib/chef/resource/aws_route_table.rb +76 -0
  36. data/lib/chef/resource/aws_s3_bucket.rb +8 -18
  37. data/lib/chef/resource/aws_security_group.rb +49 -19
  38. data/lib/chef/resource/aws_sns_topic.rb +14 -15
  39. data/lib/chef/resource/aws_sqs_queue.rb +16 -14
  40. data/lib/chef/resource/aws_subnet.rb +87 -17
  41. data/lib/chef/resource/aws_vpc.rb +137 -15
  42. data/spec/integration/aws_security_group_spec.rb +55 -0
  43. data/spec/spec_helper.rb +8 -2
  44. data/spec/support/aws_support.rb +211 -0
  45. metadata +33 -10
  46. data/lib/chef/provider/aws_launch_config.rb +0 -43
  47. data/lib/chef/provider/aws_provider.rb +0 -22
  48. data/lib/chef/provisioning/aws_driver/aws_profile.rb +0 -73
  49. data/lib/chef/resource/aws_launch_config.rb +0 -14
  50. data/lib/chef/resource/aws_resource.rb +0 -10
  51. data/spec/chef_zero_rspec_helper.rb +0 -8
  52. data/spec/unit/provider/aws_subnet_spec.rb +0 -67
  53. data/spec/unit/resource/aws_subnet_spec.rb +0 -23
@@ -47,10 +47,11 @@ module AWSDriver
47
47
  region = nil if region && region.empty?
48
48
 
49
49
  credentials = profile_name ? aws_credentials[profile_name] : aws_credentials.default
50
- @aws_config = AWS::Core::Configuration.new(
50
+ @aws_config = AWS.config(
51
51
  access_key_id: credentials[:aws_access_key_id],
52
52
  secret_access_key: credentials[:aws_secret_access_key],
53
- region: region || credentials[:region]
53
+ region: region || credentials[:region],
54
+ logger: Chef::Log.logger
54
55
  )
55
56
  end
56
57
 
@@ -60,50 +61,30 @@ module AWSDriver
60
61
 
61
62
  # Load balancer methods
62
63
  def allocate_load_balancer(action_handler, lb_spec, lb_options, machine_specs)
63
- lb_options ||= {}
64
- if lb_options[:security_group_ids]
65
- security_groups = ec2.security_groups.filter('group-id', lb_options[:security_group_ids]).to_a
66
- elsif lb_options[:security_group_names]
67
- security_groups = ec2.security_groups.filter('group-name', lb_options[:security_group_names]).to_a
68
- else
69
- security_groups = []
70
- end
71
- security_group_ids = security_groups.map { |sg| sg.id }
72
-
73
- availability_zones = lb_options[:availability_zones] || []
74
- subnets = lb_options[:subnets] || []
75
- listeners = lb_options[:listeners]
76
- scheme = lb_options[:scheme]
77
-
78
- validate_listeners(listeners)
79
- if !availability_zones.empty? && !subnets.empty?
80
- raise "You cannot specify both `availability_zones` and `subnets`"
81
- end
82
-
83
- lb_optionals = {}
84
- lb_optionals[:security_groups] = security_group_ids unless security_group_ids.empty?
85
- lb_optionals[:availability_zones] = availability_zones unless availability_zones.empty?
86
- lb_optionals[:subnets] = subnets unless subnets.empty?
87
- lb_optionals[:listeners] = listeners if listeners
88
- lb_optionals[:scheme] = scheme if scheme
64
+ lb_options = AWSResource.lookup_options(lb_options || {}, managed_entry_store: lb_spec.managed_entry_store, driver: self)
89
65
 
90
66
  old_elb = nil
91
67
  actual_elb = load_balancer_for(lb_spec)
92
- if !actual_elb.exists?
68
+ if !actual_elb || !actual_elb.exists?
69
+ lb_options[:listeners] ||= get_listeners(:http)
70
+ if !lb_options[:subnets] && !lb_options[:availability_zones] && machine_specs
71
+ lb_options[:subnets] = machine_specs.map { |s| ec2.instances[s.reference['instance_id']].subnet }.uniq
72
+ end
73
+
93
74
  perform_action = proc { |desc, &block| action_handler.perform_action(desc, &block) }
75
+ Chef::Log.debug "AWS Load Balancer options: #{lb_options.inspect}"
94
76
 
95
- security_group_names = security_groups.map { |sg| sg.name }.join(",")
77
+ updates = [ "create load balancer #{lb_spec.name} in #{aws_config.region}" ]
78
+ updates << " enable availability zones #{lb_options[:availability_zones]}" if lb_options[:availability_zones]
79
+ updates << " attach subnets #{lb_options[:subnets].join(', ')}" if lb_options[:subnets]
80
+ updates << " with listeners #{lb_options[:listeners]}" if lb_options[:listeners]
81
+ updates << " with security groups #{lb_options[:security_groups]}" if lb_options[:security_groups]
96
82
 
97
- updates = [ "Create load balancer #{lb_spec.name} in #{aws_config.region}" ]
98
- updates << " enable availability zones #{availability_zones.join(', ')}" if availability_zones.size > 0
99
- updates << " attach subnets #{subnets.join(', ')}" if subnets.size > 0
100
- updates << " with listeners #{listeners.join(', ')}" if listeners && listeners.size > 0
101
- updates << " with security groups #{security_group_names}" if security_group_names
102
83
 
103
84
  action_handler.perform_action updates do
104
- actual_elb = elb.load_balancers.create(lb_spec.name, lb_optionals)
85
+ actual_elb = elb.load_balancers.create(lb_spec.name, lb_options)
105
86
 
106
- lb_spec.location = {
87
+ lb_spec.reference = {
107
88
  'driver_url' => driver_url,
108
89
  'driver_version' => Chef::Provisioning::AWSDriver::VERSION,
109
90
  'allocated_at' => Time.now.utc.to_s,
@@ -117,24 +98,28 @@ module AWSDriver
117
98
  end
118
99
 
119
100
  # TODO: refactor this whole giant method into many smaller method calls
101
+ # TODO if we update scheme, we don't need to run any of the other updates.
102
+ # Also, if things aren't specified (such as machines / listeners), we
103
+ # need to grab them from the actual load balancer so we don't lose them.
104
+ # i.e. load_balancer 'blah' do
105
+ # lb_options: { scheme: 'other_scheme' }
106
+ # end
107
+ # TODO we will leak the actual_elb if we fail to finish creating it
120
108
  # Update scheme - scheme is immutable once set, so if it is changing we need to delete the old
121
109
  # ELB and create a new one
122
- if scheme && scheme.downcase != actual_elb.scheme
123
- desc = [" updating scheme to #{scheme}"]
110
+ if lb_options[:scheme] && lb_options[:scheme].downcase != actual_elb.scheme
111
+ desc = [" updating scheme to #{lb_options[:scheme]}"]
124
112
  desc << " WARN: scheme is immutable, so deleting and re-creating the ELB"
125
113
  perform_action.call(desc) do
126
114
  old_elb = actual_elb
127
- actual_elb = elb.load_balancers.create(lb_spec.name, lb_optionals)
115
+ actual_elb = elb.load_balancers.create(lb_spec.name, lb_options)
128
116
  end
129
117
  end
130
118
 
131
119
  # Update security groups
132
- if security_group_ids.empty?
133
- Chef::Log.debug("No Security Groups specified. Load_balancer[#{actual_elb.name}] cannot have " +
134
- "empty Security Groups, so assuming it only currently has the default Security Group. No action taken.")
135
- else
120
+ if lb_options[:security_groups]
136
121
  current = actual_elb.security_group_ids
137
- desired = security_group_ids
122
+ desired = lb_options[:security_groups]
138
123
  if current != desired
139
124
  perform_action.call(" updating security groups to #{desired.to_a}") do
140
125
  elb.client.apply_security_groups_to_load_balancer(
@@ -145,148 +130,155 @@ module AWSDriver
145
130
  end
146
131
  end
147
132
 
148
- # A subnet always belongs to an availability zone. When specifying a ELB spec, you can either
149
- # specify subnets OR AZs but not both. You cannot specify multiple subnets in the same AZ.
150
- # You must specify at least 1 subnet or AZ. On an update you cannot remove all subnets
151
- # or AZs - it must belong to one.
152
- if !availability_zones.empty? && !subnets.empty?
153
- # We do this check here because there is no atomic call we can make to specify both
154
- # subnets and AZs at the same time
155
- raise "You cannot specify both `availability_zones` and `subnets`"
156
- end
157
- # Users can switch from availability zones to subnets or vice versa. To ensure we do not
158
- # unassign all (which causes an AWS error) we first add all available ones, then remove
159
- # an unecessary ones
160
- actual_zones_subnets = {}
161
- actual_elb.subnets.each do |subnet|
162
- actual_zones_subnets[subnet.id] = subnet.availability_zone.name
163
- end
164
-
165
- # Only 1 of subnet or AZ will be populated b/c of our check earlier
166
- desired_subnets_zones = {}
167
- availability_zones.each do |zone|
168
- # If the user specifies availability zone, we find the default subnet for that
169
- # AZ because this duplicates the create logic
170
- zone = zone.downcase
171
- filters = [
172
- {:name => 'availabilityZone', :values => [zone]},
173
- {:name => 'defaultForAz', :values => ['true']}
174
- ]
175
- default_subnet = ec2.client.describe_subnets(:filters => filters)[:subnet_set]
176
- if default_subnet.size != 1
177
- raise "Could not find default subnet in availability zone #{zone}"
133
+ if lb_options[:availability_zones] || lb_options[:subnets]
134
+ # A subnet always belongs to an availability zone. When specifying a ELB spec, you can either
135
+ # specify subnets OR AZs but not both. You cannot specify multiple subnets in the same AZ.
136
+ # You must specify at least 1 subnet or AZ. On an update you cannot remove all subnets
137
+ # or AZs - it must belong to one.
138
+ if lb_options[:availability_zones] && lb_options[:subnets]
139
+ # We do this check here because there is no atomic call we can make to specify both
140
+ # subnets and AZs at the same time
141
+ raise "You cannot specify both `availability_zones` and `subnets`"
178
142
  end
179
- default_subnet = default_subnet[0]
180
- desired_subnets_zones[default_subnet[:subnet_id]] = zone
181
- end
182
- unless subnets.empty?
183
- subnet_query = ec2.client.describe_subnets(:subnet_ids => subnets)[:subnet_set]
184
- # AWS raises an error on an unknown subnet, but not an unknown AZ
185
- subnet_query.each do |subnet|
186
- zone = subnet[:availability_zone].downcase
187
- desired_subnets_zones[subnet[:subnet_id]] = zone
143
+
144
+ # Users can switch from availability zones to subnets or vice versa. To ensure we do not
145
+ # unassign all (which causes an AWS error) we first add all available ones, then remove
146
+ # an unecessary ones
147
+ actual_zones_subnets = {}
148
+ actual_elb.subnets.each do |subnet|
149
+ actual_zones_subnets[subnet.id] = subnet.availability_zone.name
188
150
  end
189
- end
190
151
 
191
- # We only bother attaching subnets, because doing this automatically attaches the AZ
192
- attach_subnets = desired_subnets_zones.keys - actual_zones_subnets.keys
193
- unless attach_subnets.empty?
194
- action = " attach subnets #{attach_subnets.join(', ')}"
195
- enable_zones = (desired_subnets_zones.map {|s,z| z if attach_subnets.include?(s)}).compact
196
- action += " (availability zones #{enable_zones.join(', ')})"
197
- perform_action.call(action) do
198
- begin
199
- elb.client.attach_load_balancer_to_subnets(
200
- load_balancer_name: actual_elb.name,
201
- subnets: attach_subnets
202
- )
203
- rescue AWS::ELB::Errors::InvalidConfigurationRequest
204
- raise "You cannot currently move from 1 subnet to another in the same availability zone. " +
205
- "Amazon does not have an atomic operation which allows this. You must create a new " +
206
- "ELB with the correct subnets and move instances into it. Tried to attach subets " +
207
- "#{attach_subnets.join(', ')} (availability zones #{enable_zones.join(', ')}) to " +
208
- "existing ELB named #{actual_elb.name}"
152
+ # Only 1 of subnet or AZ will be populated b/c of our check earlier
153
+ desired_subnets_zones = {}
154
+ if lb_options[:availability_zones]
155
+ lb_options[:availability_zones].each do |zone|
156
+ # If the user specifies availability zone, we find the default subnet for that
157
+ # AZ because this duplicates the create logic
158
+ zone = zone.downcase
159
+ filters = [
160
+ {:name => 'availabilityZone', :values => [zone]},
161
+ {:name => 'defaultForAz', :values => ['true']}
162
+ ]
163
+ default_subnet = ec2.client.describe_subnets(:filters => filters)[:subnet_set]
164
+ if default_subnet.size != 1
165
+ raise "Could not find default subnet in availability zone #{zone}"
166
+ end
167
+ default_subnet = default_subnet[0]
168
+ desired_subnets_zones[default_subnet[:subnet_id]] = zone
169
+ end
170
+ end
171
+ unless lb_options[:subnets] && lb_options[:subnets.empty?]
172
+ subnet_query = ec2.client.describe_subnets(:subnet_ids => lb_options[:subnets])[:subnet_set]
173
+ # AWS raises an error on an unknown subnet, but not an unknown AZ
174
+ subnet_query.each do |subnet|
175
+ zone = subnet[:availability_zone].downcase
176
+ desired_subnets_zones[subnet[:subnet_id]] = zone
177
+ end
178
+ end
179
+
180
+ # We only bother attaching subnets, because doing this automatically attaches the AZ
181
+ attach_subnets = desired_subnets_zones.keys - actual_zones_subnets.keys
182
+ unless attach_subnets.empty?
183
+ action = " attach subnets #{attach_subnets.join(', ')}"
184
+ enable_zones = (desired_subnets_zones.map {|s,z| z if attach_subnets.include?(s)}).compact
185
+ action += " (availability zones #{enable_zones.join(', ')})"
186
+ perform_action.call(action) do
187
+ begin
188
+ elb.client.attach_load_balancer_to_subnets(
189
+ load_balancer_name: actual_elb.name,
190
+ subnets: attach_subnets
191
+ )
192
+ rescue AWS::ELB::Errors::InvalidConfigurationRequest
193
+ raise "You cannot currently move from 1 subnet to another in the same availability zone. " +
194
+ "Amazon does not have an atomic operation which allows this. You must create a new " +
195
+ "ELB with the correct subnets and move instances into it. Tried to attach subets " +
196
+ "#{attach_subnets.join(', ')} (availability zones #{enable_zones.join(', ')}) to " +
197
+ "existing ELB named #{actual_elb.name}"
198
+ end
209
199
  end
210
200
  end
211
- end
212
201
 
213
- detach_subnets = actual_zones_subnets.keys - desired_subnets_zones.keys
214
- unless detach_subnets.empty?
215
- action = " detach subnets #{detach_subnets.join(', ')}"
216
- disable_zones = (actual_zones_subnets.map {|s,z| z if detach_subnets.include?(s)}).compact
217
- action += " (availability zones #{disable_zones.join(', ')})"
218
- perform_action.call(action) do
219
- elb.client.detach_load_balancer_from_subnets(
220
- load_balancer_name: actual_elb.name,
221
- subnets: detach_subnets
222
- )
202
+ detach_subnets = actual_zones_subnets.keys - desired_subnets_zones.keys
203
+ unless detach_subnets.empty?
204
+ action = " detach subnets #{detach_subnets.join(', ')}"
205
+ disable_zones = (actual_zones_subnets.map {|s,z| z if detach_subnets.include?(s)}).compact
206
+ action += " (availability zones #{disable_zones.join(', ')})"
207
+ perform_action.call(action) do
208
+ elb.client.detach_load_balancer_from_subnets(
209
+ load_balancer_name: actual_elb.name,
210
+ subnets: detach_subnets
211
+ )
212
+ end
223
213
  end
224
214
  end
225
215
 
226
216
  # Update listeners - THIS IS NOT ATOMIC
227
- add_listeners = {}
228
- listeners.each { |l| add_listeners[l[:port]] = l } if listeners
229
- actual_elb.listeners.each do |listener|
230
- desired_listener = add_listeners.delete(listener.port)
231
- if desired_listener
232
-
233
- # listener.(port|protocol|instance_port|instance_protocol) are immutable for the life
234
- # of the listener - must create a new one and delete old one
235
- immutable_updates = []
236
- if listener.protocol != desired_listener[:protocol].to_sym.downcase
237
- immutable_updates << " update protocol from #{listener.protocol.inspect} to #{desired_listener[:protocol].inspect}"
238
- end
239
- if listener.instance_port != desired_listener[:instance_port]
240
- immutable_updates << " update instance port from #{listener.instance_port.inspect} to #{desired_listener[:instance_port].inspect}"
241
- end
242
- if listener.instance_protocol != desired_listener[:instance_protocol].to_sym.downcase
243
- immutable_updates << " update instance protocol from #{listener.instance_protocol.inspect} to #{desired_listener[:instance_protocol].inspect}"
244
- end
245
- if !immutable_updates.empty?
246
- perform_action.call(immutable_updates) do
247
- listener.delete
248
- actual_elb.listeners.create(desired_listener)
217
+ if lb_options[:listeners]
218
+ add_listeners = {}
219
+ lb_options[:listeners].each { |l| add_listeners[l[:port]] = l }
220
+ actual_elb.listeners.each do |listener|
221
+ desired_listener = add_listeners.delete(listener.port)
222
+ if desired_listener
223
+
224
+ # listener.(port|protocol|instance_port|instance_protocol) are immutable for the life
225
+ # of the listener - must create a new one and delete old one
226
+ immutable_updates = []
227
+ if listener.protocol != desired_listener[:protocol].to_sym.downcase
228
+ immutable_updates << " update protocol from #{listener.protocol.inspect} to #{desired_listener[:protocol].inspect}"
249
229
  end
250
- elsif listener.server_certificate != desired_listener[:server_certificate]
251
- # Server certificate is mutable - if no immutable changes required a full recreate, update cert
252
- perform_action.call(" update server certificate from #{listener.server_certificate} to #{desired_listener[:server_certificate]}") do
253
- listener.server_certificate = desired_listener[:server_certificate]
230
+ if listener.instance_port != desired_listener[:instance_port]
231
+ immutable_updates << " update instance port from #{listener.instance_port.inspect} to #{desired_listener[:instance_port].inspect}"
232
+ end
233
+ if listener.instance_protocol != desired_listener[:instance_protocol].to_sym.downcase
234
+ immutable_updates << " update instance protocol from #{listener.instance_protocol.inspect} to #{desired_listener[:instance_protocol].inspect}"
235
+ end
236
+ if !immutable_updates.empty?
237
+ perform_action.call(immutable_updates) do
238
+ listener.delete
239
+ actual_elb.listeners.create(desired_listener)
240
+ end
241
+ elsif listener.server_certificate != desired_listener[:server_certificate]
242
+ # Server certificate is mutable - if no immutable changes required a full recreate, update cert
243
+ perform_action.call(" update server certificate from #{listener.server_certificate} to #{desired_listener[:server_certificate]}") do
244
+ listener.server_certificate = desired_listener[:server_certificate]
245
+ end
254
246
  end
255
- end
256
247
 
257
- else
258
- perform_action.call(" remove listener #{listener.port}") do
259
- listener.delete
248
+ else
249
+ perform_action.call(" remove listener #{listener.port}") do
250
+ listener.delete
251
+ end
260
252
  end
261
253
  end
262
- end
263
- add_listeners.values.each do |listener|
264
- updates = [ " add listener #{listener[:port]}" ]
265
- updates << " set protocol to #{listener[:protocol].inspect}"
266
- updates << " set instance port to #{listener[:instance_port].inspect}"
267
- updates << " set instance protocol to #{listener[:instance_protocol].inspect}"
268
- updates << " set server certificate to #{listener[:server_certificate]}" if listener[:server_certificate]
269
- perform_action.call(updates) do
270
- actual_elb.listeners.create(listener)
254
+ add_listeners.values.each do |listener|
255
+ updates = [ " add listener #{listener[:port]}" ]
256
+ updates << " set protocol to #{listener[:protocol].inspect}"
257
+ updates << " set instance port to #{listener[:instance_port].inspect}"
258
+ updates << " set instance protocol to #{listener[:instance_protocol].inspect}"
259
+ updates << " set server certificate to #{listener[:server_certificate]}" if listener[:server_certificate]
260
+ perform_action.call(updates) do
261
+ actual_elb.listeners.create(listener)
262
+ end
271
263
  end
272
264
  end
273
265
  end
274
266
 
275
267
  # Update instance list, but only if there are machines specified
276
- actual_instance_ids = actual_elb.instances.map { |i| i.instance_id }
277
-
278
268
  if machine_specs
279
- instances_to_add = machine_specs.select { |s| !actual_instance_ids.include?(s.location['instance_id']) }
280
- instance_ids_to_remove = actual_instance_ids - machine_specs.map { |s| s.location['instance_id'] }
281
-
269
+ actual_instance_ids = actual_elb.instances.map { |i| i.instance_id }
270
+
271
+ instances_to_add = machine_specs.select { |s| !actual_instance_ids.include?(s.reference['instance_id']) }
272
+ instance_ids_to_remove = actual_instance_ids - machine_specs.map { |s| s.reference['instance_id'] }
273
+
282
274
  if instances_to_add.size > 0
283
275
  perform_action.call(" add machines #{instances_to_add.map { |s| s.name }.join(', ')}") do
284
- instance_ids_to_add = instances_to_add.map { |s| s.location['instance_id'] }
276
+ instance_ids_to_add = instances_to_add.map { |s| s.reference['instance_id'] }
285
277
  Chef::Log.debug("Adding instances #{instance_ids_to_add.join(', ')} to load balancer #{actual_elb.name} in region #{aws_config.region}")
286
278
  actual_elb.instances.add(instance_ids_to_add)
287
279
  end
288
280
  end
289
-
281
+
290
282
  if instance_ids_to_remove.size > 0
291
283
  perform_action.call(" remove instances #{instance_ids_to_remove}") do
292
284
  actual_elb.instances.remove(instance_ids_to_remove)
@@ -303,12 +295,12 @@ module AWSDriver
303
295
  # Something went wrong before we could moved instances from the old ELB to the new one
304
296
  # Don't delete the old ELB, but warn users there could now be 2 ELBs with the same name
305
297
  unless old_elb.nil?
306
- Chef::Log.warn("It is possible there are now 2 ELB instances - #{old_elb.id} and #{actual_elb.id}. " +
298
+ Chef::Log.warn("It is possible there are now 2 ELB instances - #{old_elb.name} and #{actual_elb.name}. " +
307
299
  "Determine which is correct and manually clean up the other.")
308
300
  end
309
301
  end
310
302
 
311
- def ready_load_balancer(action_handler, lb_spec, lb_options, machine_specs)
303
+ def ready_load_balancer(action_handler, lb_spec, lb_options, machine_spec)
312
304
  end
313
305
 
314
306
  def destroy_load_balancer(action_handler, lb_spec, lb_options)
@@ -327,27 +319,21 @@ module AWSDriver
327
319
  end
328
320
 
329
321
  # Image methods
330
- def allocate_image(action_handler, image_spec, image_options, machine_spec)
322
+ def allocate_image(action_handler, image_spec, image_options, machine_spec, machine_options)
331
323
  actual_image = image_for(image_spec)
332
324
  if actual_image.nil? || !actual_image.exists? || actual_image.state == :failed
333
325
  action_handler.perform_action "Create image #{image_spec.name} from machine #{machine_spec.name} with options #{image_options.inspect}" do
334
326
  image_options[:name] ||= image_spec.name
335
- image_options[:instance_id] ||= machine_spec.location['instance_id']
327
+ image_options[:instance_id] ||= machine_spec.reference['instance_id']
336
328
  image_options[:description] ||= "Image #{image_spec.name} created from machine #{machine_spec.name}"
337
329
  Chef::Log.debug "AWS Image options: #{image_options.inspect}"
338
330
  image = ec2.images.create(image_options.to_hash)
339
- image_spec.location = {
331
+ image_spec.reference = {
340
332
  'driver_url' => driver_url,
341
333
  'driver_version' => Chef::Provisioning::AWSDriver::VERSION,
342
334
  'image_id' => image.id,
343
335
  'allocated_at' => Time.now.to_i
344
336
  }
345
- image_spec.machine_options ||= {}
346
- image_spec.machine_options.merge!({
347
- :bootstrap_options => {
348
- :image_id => image.id
349
- }
350
- })
351
337
  end
352
338
  end
353
339
  end
@@ -368,10 +354,12 @@ module AWSDriver
368
354
 
369
355
  def destroy_image(action_handler, image_spec, image_options)
370
356
  actual_image = image_for(image_spec)
371
- snapshots = snapshots_for(image_spec)
372
357
  if actual_image.nil? || !actual_image.exists?
373
358
  Chef::Log.warn "Image #{image_spec.name} doesn't exist"
374
359
  else
360
+ snapshots = actual_image.block_device_mappings.map do |dev, opts|
361
+ ec2.snapshots[opts[:snapshot_id]]
362
+ end
375
363
  action_handler.perform_action "De-registering image #{image_spec.name}" do
376
364
  actual_image.deregister
377
365
  end
@@ -420,7 +408,7 @@ EOD
420
408
  sleep 5 while instance.status == :pending
421
409
  # TODO add other tags identifying user / node url (same as fog)
422
410
  instance.tags['Name'] = machine_spec.name
423
- machine_spec.location = {
411
+ machine_spec.reference = {
424
412
  'driver_url' => driver_url,
425
413
  'driver_version' => Chef::Provisioning::AWSDriver::VERSION,
426
414
  'allocated_at' => Time.now.utc.to_s,
@@ -428,9 +416,9 @@ EOD
428
416
  'image_id' => bootstrap_options[:image_id],
429
417
  'instance_id' => instance.id
430
418
  }
431
- machine_spec.location['key_name'] = bootstrap_options[:key_name] if bootstrap_options[:key_name]
419
+ machine_spec.reference['key_name'] = bootstrap_options[:key_name] if bootstrap_options[:key_name]
432
420
  %w(is_windows ssh_username sudo use_private_ip_for_ssh ssh_gateway).each do |key|
433
- machine_spec.location[key] = machine_options[key.to_sym] if machine_options[key.to_sym]
421
+ machine_spec.reference[key] = machine_options[key.to_sym] if machine_options[key.to_sym]
434
422
  end
435
423
  end
436
424
  end
@@ -455,7 +443,7 @@ EOD
455
443
  if instance.status != :running
456
444
  wait_until_machine(action_handler, machine_spec, instance) { instance.status != :stopping }
457
445
  if instance.status == :stopped
458
- action_handler.perform_action "Start #{machine_spec.name} (#{machine_spec.location['instance_id']}) in #{aws_config.region} ..." do
446
+ action_handler.perform_action "Start #{machine_spec.name} (#{machine_spec.reference['instance_id']}) in #{aws_config.region} ..." do
459
447
  instance.start
460
448
  end
461
449
  end
@@ -473,19 +461,19 @@ EOD
473
461
  machine_spec = Chef::Provisioning::ChefMachineSpec.get(name, chef_server)
474
462
  end
475
463
 
476
- machine_for(machine_spec, machine_spec.location)
464
+ machine_for(machine_spec, machine_spec.reference)
477
465
  end
478
466
 
479
467
  def destroy_machine(action_handler, machine_spec, machine_options)
480
468
  instance = instance_for(machine_spec)
481
469
  if instance && instance.exists?
482
470
  # TODO do we need to wait_until(action_handler, machine_spec, instance) { instance.status != :shutting_down } ?
483
- action_handler.perform_action "Terminate #{machine_spec.name} (#{machine_spec.location['instance_id']}) in #{aws_config.region} ..." do
471
+ action_handler.perform_action "Terminate #{machine_spec.name} (#{machine_spec.reference['instance_id']}) in #{aws_config.region} ..." do
484
472
  instance.terminate
485
- machine_spec.location = nil
473
+ machine_spec.reference = nil
486
474
  end
487
475
  else
488
- Chef::Log.warn "Instance #{machine_spec.location['instance_id']} doesn't exist for #{machine_spec.name}"
476
+ Chef::Log.warn "Instance #{machine_spec.reference['instance_id']} doesn't exist for #{machine_spec.name}"
489
477
  end
490
478
 
491
479
  strategy = convergence_strategy_for(machine_spec, machine_options)
@@ -500,6 +488,14 @@ EOD
500
488
  @elb ||= AWS::ELB.new(config: aws_config)
501
489
  end
502
490
 
491
+ def iam
492
+ @iam ||= AWS::IAM.new(config: aws_config)
493
+ end
494
+
495
+ def s3
496
+ @s3 ||= AWS::S3.new(config: aws_config)
497
+ end
498
+
503
499
  def sns
504
500
  @sns ||= AWS::SNS.new(config: aws_config)
505
501
  end
@@ -508,15 +504,40 @@ EOD
508
504
  @sqs ||= AWS::SQS.new(config: aws_config)
509
505
  end
510
506
 
511
- def s3
512
- @s3 ||= AWS::S3.new(config: aws_config)
513
- end
514
-
515
507
  def auto_scaling
516
508
  @auto_scaling ||= AWS::AutoScaling.new(config: aws_config)
517
509
  end
518
510
 
519
- private
511
+ def build_arn(partition: 'aws', service: nil, region: aws_config.region, account_id: self.account_id, resource: nil)
512
+ "arn:#{partition}:#{service}:#{region}:#{account_id}:#{resource}"
513
+ end
514
+
515
+ def parse_arn(arn)
516
+ parts = arn.split(':', 6)
517
+ {
518
+ partition: parts[1],
519
+ service: parts[2],
520
+ region: parts[3],
521
+ account_id: parts[4],
522
+ resource: parts[5]
523
+ }
524
+ end
525
+
526
+ def account_id
527
+ begin
528
+ # We've got an AWS account root credential or an IAM admin with access rights
529
+ current_user = iam.client.get_user
530
+ arn = current_user[:user][:arn]
531
+ rescue AWS::IAM::Errors::AccessDenied => e
532
+ # If we don't have access, the error message still tells us our account ID and user ...
533
+ # https://forums.aws.amazon.com/thread.jspa?messageID=394344
534
+ if e.to_s !~ /\b(arn:aws:iam::[0-9]{12}:\S*)/
535
+ raise "IAM error response for GetUser did not include user ARN. Can't retrieve account ID."
536
+ end
537
+ arn = $1
538
+ end
539
+ parse_arn(arn)[:account_id]
540
+ end
520
541
 
521
542
  # For creating things like AWS keypairs exclusively
522
543
  @@chef_default_lock = Mutex.new
@@ -528,7 +549,7 @@ EOD
528
549
  raise "Instance for node #{machine_spec.name} has not been created!"
529
550
  end
530
551
 
531
- if machine_spec.location['is_windows']
552
+ if machine_spec.reference['is_windows']
532
553
  Chef::Provisioning::Machine::WindowsMachine.new(machine_spec, transport_for(machine_spec, machine_options, instance), convergence_strategy_for(machine_spec, machine_options))
533
554
  else
534
555
  Chef::Provisioning::Machine::UnixMachine.new(machine_spec, transport_for(machine_spec, machine_options, instance), convergence_strategy_for(machine_spec, machine_options))
@@ -551,6 +572,7 @@ EOD
551
572
  Chef::Log.debug "Non-windows, not setting userdata"
552
573
  end
553
574
 
575
+ bootstrap_options = AWSResource.lookup_options(bootstrap_options, managed_entry_store: machine_spec.managed_entry_store, driver: self)
554
576
  Chef::Log.debug "AWS Bootstrap options: #{bootstrap_options.inspect}"
555
577
  bootstrap_options
556
578
  end
@@ -571,62 +593,36 @@ EOD
571
593
  end
572
594
 
573
595
  def load_balancer_for(lb_spec)
574
- if lb_spec.name
575
- elb.load_balancers[lb_spec.name]
576
- else
577
- nil
578
- end
596
+ Chef::Resource::AwsLoadBalancer.get_aws_object(lb_spec.name, driver: self, managed_entry_store: lb_spec.managed_entry_store, required: false)
579
597
  end
580
598
 
581
599
  def instance_for(machine_spec)
582
- if machine_spec.location && machine_spec.location['instance_id']
583
- ec2.instances[machine_spec.location['instance_id']]
600
+ if machine_spec.reference
601
+ if machine_spec.reference['driver_url'] != driver_url
602
+ raise "Switching a machine's driver from #{machine_spec.reference['driver_url']} to #{driver_url} is not currently supported! Use machine :destroy and then re-create the machine on the new driver."
603
+ end
604
+ Chef::Resource::AwsInstance.get_aws_object(machine_spec.reference['instance_id'], driver: self, managed_entry_store: machine_spec.managed_entry_store, required: false)
584
605
  end
585
606
  end
586
607
 
587
608
  def instances_for(machine_specs)
588
609
  result = {}
589
- machine_specs.each do |machine_spec|
590
- if machine_spec.location && machine_spec.location['instance_id']
591
- if machine_spec.location['driver_url'] != driver_url
592
- raise "Switching a machine's driver from #{machine_spec.location['driver_url']} to #{driver_url} is not currently supported! Use machine :destroy and then re-create the machine on the new driver."
593
- end
594
- #returns nil if not found
595
- result[machine_spec] = ec2.instances[machine_spec.location['instance_id']]
596
- end
597
- end
610
+ machine_specs.each { |machine_spec| result[machine_spec] = instance_for(machine_spec) }
598
611
  result
599
612
  end
600
613
 
601
614
  def image_for(image_spec)
602
- if image_spec.location && image_spec.location['image_id']
603
- ec2.images[image_spec.location['image_id']]
604
- end
605
- end
606
-
607
- def snapshots_for(image_spec)
608
- if image_spec.location && image_spec.location['image_id']
609
- actual_image = image_for(image_spec)
610
- snapshots = []
611
- actual_image.block_device_mappings.each do |dev, opts|
612
- snapshots << ec2.snapshots[opts[:snapshot_id]]
613
- end
614
- snapshots
615
- end
615
+ Chef::Resource::AwsImage.get_aws_object(image_spec.name, driver: self, managed_entry_store: image_spec.managed_entry_store, required: false)
616
616
  end
617
617
 
618
618
  def transport_for(machine_spec, machine_options, instance)
619
- if machine_spec.location['is_windows']
619
+ if machine_spec.reference['is_windows']
620
620
  create_winrm_transport(machine_spec, machine_options, instance)
621
621
  else
622
622
  create_ssh_transport(machine_spec, machine_options, instance)
623
623
  end
624
624
  end
625
625
 
626
- def compute_options
627
-
628
- end
629
-
630
626
  def aws_credentials
631
627
  # Grab the list of possible credentials
632
628
  @aws_credentials ||= if driver_options[:aws_credentials]
@@ -674,7 +670,7 @@ EOD
674
670
  def create_winrm_transport(machine_spec, machine_options, instance)
675
671
  remote_host = determine_remote_host(machine_spec, instance)
676
672
 
677
- port = machine_spec.location['winrm_port'] || 5985
673
+ port = machine_spec.reference['winrm_port'] || 5985
678
674
  endpoint = "http://#{remote_host}:#{port}/wsman"
679
675
  type = :plaintext
680
676
  pem_bytes = get_private_key(instance.key_name)
@@ -685,7 +681,7 @@ EOD
685
681
  decrypted_password = private_key.private_decrypt decoded
686
682
 
687
683
  winrm_options = {
688
- :user => machine_spec.location['winrm_username'] || 'Administrator',
684
+ :user => machine_spec.reference['winrm_username'] || 'Administrator',
689
685
  :pass => decrypted_password,
690
686
  :disable_sspi => true,
691
687
  :basic_auth_only => true
@@ -699,7 +695,7 @@ EOD
699
695
  sleep_time = 10
700
696
  max_wait_time = 900 # 15 minutes
701
697
  encrypted_admin_password = nil
702
- instance_id = machine_spec.location['instance_id']
698
+ instance_id = machine_spec.reference['instance_id']
703
699
 
704
700
  Chef::Log.info "waiting for #{machine_spec.name}'s admin password to be available..."
705
701
  while time_elapsed < max_wait_time && encrypted_admin_password.nil?
@@ -720,12 +716,12 @@ EOD
720
716
 
721
717
  def create_ssh_transport(machine_spec, machine_options, instance)
722
718
  ssh_options = ssh_options_for(machine_spec, machine_options, instance)
723
- username = machine_spec.location['ssh_username'] || machine_options[:ssh_username] || default_ssh_username
724
- if machine_options.has_key?(:ssh_username) && machine_options[:ssh_username] != machine_spec.location['ssh_username']
725
- Chef::Log.warn("Server #{machine_spec.name} was created with SSH username #{machine_spec.location['ssh_username']} and machine_options specifies username #{machine_options[:ssh_username]}. Using #{machine_spec.location['ssh_username']}. Please edit the node and change the chef_provisioning.location.ssh_username attribute if you want to change it.")
719
+ username = machine_spec.reference['ssh_username'] || machine_options[:ssh_username] || default_ssh_username
720
+ if machine_options.has_key?(:ssh_username) && machine_options[:ssh_username] != machine_spec.reference['ssh_username']
721
+ Chef::Log.warn("Server #{machine_spec.name} was created with SSH username #{machine_spec.reference['ssh_username']} and machine_options specifies username #{machine_options[:ssh_username]}. Using #{machine_spec.reference['ssh_username']}. Please edit the node and change the chef_provisioning.reference.ssh_username attribute if you want to change it.")
726
722
  end
727
723
  options = {}
728
- if machine_spec.location[:sudo] || (!machine_spec.location.has_key?(:sudo) && username != 'root')
724
+ if machine_spec.reference[:sudo] || (!machine_spec.reference.has_key?(:sudo) && username != 'root')
729
725
  options[:prefix] = 'sudo '
730
726
  end
731
727
 
@@ -733,13 +729,13 @@ EOD
733
729
 
734
730
  #Enable pty by default
735
731
  options[:ssh_pty_enable] = true
736
- options[:ssh_gateway] = machine_spec.location['ssh_gateway'] if machine_spec.location.has_key?('ssh_gateway')
732
+ options[:ssh_gateway] = machine_spec.reference['ssh_gateway'] if machine_spec.reference.has_key?('ssh_gateway')
737
733
 
738
734
  Chef::Provisioning::Transport::SSH.new(remote_host, username, ssh_options, options, config)
739
735
  end
740
736
 
741
737
  def determine_remote_host(machine_spec, instance)
742
- if machine_spec.location['use_private_ip_for_ssh']
738
+ if machine_spec.reference['use_private_ip_for_ssh']
743
739
  instance.private_ip_address
744
740
  elsif !instance.public_ip_address
745
741
  Chef::Log.warn("Server #{machine_spec.name} has no public ip address. Using private ip '#{instance.private_ip_address}'. Set driver option 'use_private_ip_for_ssh' => true if this will always be the case ...")
@@ -768,10 +764,10 @@ EOD
768
764
  raise "Server has key name '#{instance.key_name}', but the corresponding private key was not found locally. Check if the key is in Chef::Config.private_key_paths: #{Chef::Config.private_key_paths.join(', ')}"
769
765
  end
770
766
  result[:key_data] = [ key ]
771
- elsif machine_spec.location['key_name']
772
- key = get_private_key(machine_spec.location['key_name'])
767
+ elsif machine_spec.reference['key_name']
768
+ key = get_private_key(machine_spec.reference['key_name'])
773
769
  unless key
774
- raise "Server was created with key name '#{machine_spec.location['key_name']}', but the corresponding private key was not found locally. Check if the key is in Chef::Config.private_key_paths: #{Chef::Config.private_key_paths.join(', ')}"
770
+ raise "Server was created with key name '#{machine_spec.reference['key_name']}', but the corresponding private key was not found locally. Check if the key is in Chef::Config.private_key_paths: #{Chef::Config.private_key_paths.join(', ')}"
775
771
  end
776
772
  result[:key_data] = [ key ]
777
773
  elsif machine_options[:bootstrap_options] && machine_options[:bootstrap_options][:key_path]
@@ -780,7 +776,7 @@ EOD
780
776
  result[:key_data] = [ get_private_key(machine_options[:bootstrap_options][:key_name]) ]
781
777
  else
782
778
  # TODO make a way to suggest other keys to try ...
783
- raise "No key found to connect to #{machine_spec.name} (#{machine_spec.location.inspect})!"
779
+ raise "No key found to connect to #{machine_spec.name} (#{machine_spec.reference.inspect})!"
784
780
  end
785
781
  result
786
782
  end
@@ -792,11 +788,11 @@ EOD
792
788
  ohai_hints: { 'ec2' => '' })
793
789
 
794
790
  # Defaults
795
- if !machine_spec.location
791
+ if !machine_spec.reference
796
792
  return Chef::Provisioning::ConvergenceStrategy::NoConverge.new(convergence_options, config)
797
793
  end
798
794
 
799
- if machine_spec.location['is_windows']
795
+ if machine_spec.reference['is_windows']
800
796
  Chef::Provisioning::ConvergenceStrategy::InstallMsi.new(convergence_options, config)
801
797
  elsif machine_options[:cached_installer] == true
802
798
  Chef::Provisioning::ConvergenceStrategy::InstallCached.new(convergence_options, config)
@@ -816,11 +812,14 @@ EOD
816
812
  max_wait_time = 120
817
813
  if !yield(image)
818
814
  action_handler.report_progress "waiting for #{image_spec.name} (#{image.id} on #{driver_url}) to be ready ..."
819
- while time_elapsed < 120 && !yield(image)
815
+ while time_elapsed < max_wait_time && !yield(image)
820
816
  action_handler.report_progress "been waiting #{time_elapsed}/#{max_wait_time} -- sleeping #{sleep_time} seconds for #{image_spec.name} (#{image.id} on #{driver_url}) to be ready ..."
821
817
  sleep(sleep_time)
822
818
  time_elapsed += sleep_time
823
819
  end
820
+ unless yield(image)
821
+ raise "Image #{image.id} did not become ready within 120 seconds"
822
+ end
824
823
  action_handler.report_progress "Image #{image_spec.name} is now ready"
825
824
  end
826
825
  end
@@ -837,11 +836,14 @@ EOD
837
836
  if !yield(instance)
838
837
  if action_handler.should_perform_actions
839
838
  action_handler.report_progress "waiting for #{machine_spec.name} (#{instance.id} on #{driver_url}) to be ready ..."
840
- while time_elapsed < 120 && !yield(instance)
839
+ while time_elapsed < max_wait_time && !yield(instance)
841
840
  action_handler.report_progress "been waiting #{time_elapsed}/#{max_wait_time} -- sleeping #{sleep_time} seconds for #{machine_spec.name} (#{instance.id} on #{driver_url}) to be ready ..."
842
841
  sleep(sleep_time)
843
842
  time_elapsed += sleep_time
844
843
  end
844
+ unless yield(instance)
845
+ raise "Image #{instance.id} did not become ready within 120 seconds"
846
+ end
845
847
  action_handler.report_progress "#{machine_spec.name} is now ready"
846
848
  end
847
849
  end
@@ -856,7 +858,7 @@ EOD
856
858
  unless transport.available?
857
859
  if action_handler.should_perform_actions
858
860
  action_handler.report_progress "waiting for #{machine_spec.name} (#{instance.id} on #{driver_url}) to be connectable (transport up and running) ..."
859
- while time_elapsed < 120 && !transport.available?
861
+ while time_elapsed < max_wait_time && !transport.available?
860
862
  action_handler.report_progress "been waiting #{time_elapsed}/#{max_wait_time} -- sleeping #{sleep_time} seconds for #{machine_spec.name} (#{instance.id} on #{driver_url}) to be connectable ..."
861
863
  sleep(sleep_time)
862
864
  time_elapsed += sleep_time
@@ -868,8 +870,8 @@ EOD
868
870
  end
869
871
 
870
872
  def default_aws_keypair_name(machine_spec)
871
- if machine_spec.location &&
872
- Gem::Version.new(machine_spec.location['driver_version']) < Gem::Version.new('0.10')
873
+ if machine_spec.reference &&
874
+ Gem::Version.new(machine_spec.reference['driver_version']) < Gem::Version.new('0.10')
873
875
  'metal_default'
874
876
  else
875
877
  'chef_default'
@@ -908,8 +910,8 @@ EOD
908
910
  yield machine_spec, actual_instance if block_given?
909
911
  next
910
912
  end
911
- elsif machine_spec.location
912
- Chef::Log.warn "Machine #{machine_spec.name} (#{machine_spec.location['instance_id']} on #{driver_url}) no longer exists. Recreating ..."
913
+ elsif machine_spec.reference
914
+ Chef::Log.warn "Machine #{machine_spec.name} (#{machine_spec.reference['instance_id']} on #{driver_url}) no longer exists. Recreating ..."
913
915
  end
914
916
 
915
917
  bootstrap_options = bootstrap_options_for(action_handler, machine_spec, machine_options)
@@ -934,7 +936,7 @@ EOD
934
936
  # Assign each one to a machine spec
935
937
  machine_spec = machine_specs.pop
936
938
  machine_options = specs_and_options[machine_spec]
937
- machine_spec.location = {
939
+ machine_spec.reference = {
938
940
  'driver_url' => driver_url,
939
941
  'driver_version' => Chef::Provisioning::AWSDriver::VERSION,
940
942
  'allocated_at' => Time.now.utc.to_s,
@@ -943,9 +945,9 @@ EOD
943
945
  'instance_id' => instance.id
944
946
  }
945
947
  instance.tags['Name'] = machine_spec.name
946
- machine_spec.location['key_name'] = bootstrap_options[:key_name] if bootstrap_options[:key_name]
948
+ machine_spec.reference['key_name'] = bootstrap_options[:key_name] if bootstrap_options[:key_name]
947
949
  %w(is_windows ssh_username sudo use_private_ip_for_ssh ssh_gateway).each do |key|
948
- machine_spec.location[key] = machine_options[key.to_sym] if machine_options[key.to_sym]
950
+ machine_spec.reference[key] = machine_options[key.to_sym] if machine_options[key.to_sym]
949
951
  end
950
952
  action_handler.performed_action "machine #{machine_spec.name} created as #{instance.id} on #{driver_url}"
951
953
 
@@ -969,19 +971,79 @@ EOD
969
971
  end.to_a
970
972
  end
971
973
 
972
- # The listeners API is different between the SDK v1 and v2
973
- # http://docs.aws.amazon.com/AWSRubySDK/latest/AWS/ELB/Listener.html
974
- VALID_LISTENER_KEYS = [:port, :protocol, :instance_port, :instance_protocol]
975
- def validate_listeners(listeners)
976
- listeners.each do |listener|
977
- listener.keys.each do |k|
978
- unless VALID_LISTENER_KEYS.include?(k)
979
- raise "#{k} is an invalid listener key, can be one of #{VALID_LISTENER_KEYS.inspect}"
980
- end
974
+ def get_listeners(listeners)
975
+ case listeners
976
+ when Hash
977
+ listeners.map do |from, to|
978
+ from = get_listener(from)
979
+ from.delete(:instance_port)
980
+ from.delete(:instance_protocol)
981
+ to = get_listener(to)
982
+ to.delete(:port)
983
+ to.delete(:protocol)
984
+ to.merge(from)
981
985
  end
986
+ when Array
987
+ listeners.map { |listener| get_listener(listener) }
988
+ when nil
989
+ nil
990
+ else
991
+ [ get_listener(listeners) ]
982
992
  end
983
993
  end
984
994
 
995
+ def get_listener(listener)
996
+ result = {}
997
+
998
+ case listener
999
+ when Hash
1000
+ result.merge!(listener)
1001
+ when Array
1002
+ result[:port] = listener[0] if listener.size >= 1
1003
+ result[:protocol] = listener[1] if listener.size >= 2
1004
+ when Symbol,String
1005
+ result[:protocol] = listener
1006
+ when Integer
1007
+ result[:port] = listener
1008
+ else
1009
+ raise "Invalid listener #{listener}"
1010
+ end
1011
+
1012
+ # If either port or protocol are set, set the other
1013
+ if result[:port] && !result[:protocol]
1014
+ result[:protocol] = PROTOCOL_DEFAULTS[result[:port]]
1015
+ elsif result[:protocol] && !result[:port]
1016
+ result[:port] = PORT_DEFAULTS[result[:protocol]]
1017
+ end
1018
+ if result[:instance_port] && !result[:instance_protocol]
1019
+ result[:instance_protocol] = PROTOCOL_DEFAULTS[result[:instance_port]]
1020
+ elsif result[:instance_protocol] && !result[:instance_port]
1021
+ result[:instance_port] = PORT_DEFAULTS[result[:instance_protocol]]
1022
+ end
1023
+
1024
+ # If instance_port is still unset, copy port/protocol over
1025
+ result[:instance_port] ||= result[:port]
1026
+ result[:instance_protocol] ||= result[:protocol]
1027
+
1028
+ result
1029
+ end
1030
+
1031
+ def default_instance_type
1032
+ 't1.micro'
1033
+ end
1034
+
1035
+ PORT_DEFAULTS = {
1036
+ :http => 80,
1037
+ :https => 443,
1038
+ }
1039
+ PROTOCOL_DEFAULTS = {
1040
+ 25 => :tcp,
1041
+ 80 => :http,
1042
+ 443 => :https,
1043
+ 465 => :ssl,
1044
+ 587 => :tcp,
1045
+ }
1046
+
985
1047
  end
986
1048
  end
987
1049
  end