chef-provisioning-aws 3.0.4 → 3.0.6
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/Gemfile +16 -5
- data/Rakefile +15 -6
- data/chef-provisioning-aws.gemspec +17 -17
- data/lib/chef/provider/aws_auto_scaling_group.rb +5 -6
- data/lib/chef/provider/aws_cache_cluster.rb +21 -15
- data/lib/chef/provider/aws_cache_replication_group.rb +12 -8
- data/lib/chef/provider/aws_cache_subnet_group.rb +3 -3
- data/lib/chef/provider/aws_cloudsearch_domain.rb +9 -13
- data/lib/chef/provider/aws_cloudwatch_alarm.rb +10 -12
- data/lib/chef/provider/aws_dhcp_options.rb +18 -21
- data/lib/chef/provider/aws_ebs_volume.rb +24 -26
- data/lib/chef/provider/aws_eip_address.rb +10 -13
- data/lib/chef/provider/aws_elasticsearch_domain.rb +19 -18
- data/lib/chef/provider/aws_iam_instance_profile.rb +5 -7
- data/lib/chef/provider/aws_iam_role.rb +14 -17
- data/lib/chef/provider/aws_image.rb +6 -6
- data/lib/chef/provider/aws_instance.rb +5 -5
- data/lib/chef/provider/aws_internet_gateway.rb +8 -11
- data/lib/chef/provider/aws_key_pair.rb +15 -17
- data/lib/chef/provider/aws_launch_configuration.rb +11 -14
- data/lib/chef/provider/aws_load_balancer.rb +1 -2
- data/lib/chef/provider/aws_nat_gateway.rb +6 -7
- data/lib/chef/provider/aws_network_acl.rb +28 -29
- data/lib/chef/provider/aws_network_interface.rb +25 -27
- data/lib/chef/provider/aws_rds_instance.rb +12 -13
- data/lib/chef/provider/aws_rds_parameter_group.rb +8 -8
- data/lib/chef/provider/aws_rds_subnet_group.rb +8 -9
- data/lib/chef/provider/aws_route_table.rb +19 -20
- data/lib/chef/provider/aws_s3_bucket.rb +22 -25
- data/lib/chef/provider/aws_security_group.rb +268 -285
- data/lib/chef/provider/aws_server_certificate.rb +6 -5
- data/lib/chef/provider/aws_sns_topic.rb +4 -6
- data/lib/chef/provider/aws_sqs_queue.rb +3 -4
- data/lib/chef/provider/aws_subnet.rb +29 -34
- data/lib/chef/provider/aws_vpc.rb +108 -116
- data/lib/chef/provider/aws_vpc_peering_connection.rb +11 -11
- data/lib/chef/provisioning/aws_driver.rb +4 -2
- data/lib/chef/provisioning/aws_driver/aws_provider.rb +234 -241
- data/lib/chef/provisioning/aws_driver/aws_rds_resource.rb +5 -7
- data/lib/chef/provisioning/aws_driver/aws_resource.rb +182 -185
- data/lib/chef/provisioning/aws_driver/aws_resource_with_entry.rb +17 -17
- data/lib/chef/provisioning/aws_driver/aws_taggable.rb +13 -15
- data/lib/chef/provisioning/aws_driver/aws_tagger.rb +47 -48
- data/lib/chef/provisioning/aws_driver/credentials.rb +96 -100
- data/lib/chef/provisioning/aws_driver/credentials2.rb +42 -45
- data/lib/chef/provisioning/aws_driver/driver.rb +1349 -1362
- data/lib/chef/provisioning/aws_driver/exceptions.rb +10 -12
- data/lib/chef/provisioning/aws_driver/super_lwrp.rb +60 -60
- data/lib/chef/provisioning/aws_driver/tagging_strategy/auto_scaling.rb +49 -50
- data/lib/chef/provisioning/aws_driver/tagging_strategy/ec2.rb +37 -38
- data/lib/chef/provisioning/aws_driver/tagging_strategy/elasticsearch.rb +14 -15
- data/lib/chef/provisioning/aws_driver/tagging_strategy/elb.rb +29 -31
- data/lib/chef/provisioning/aws_driver/tagging_strategy/rds.rb +39 -40
- data/lib/chef/provisioning/aws_driver/tagging_strategy/s3.rb +41 -43
- data/lib/chef/provisioning/aws_driver/version.rb +5 -5
- data/lib/chef/provisioning/driver_init/aws.rb +2 -2
- data/lib/chef/resource/aws_auto_scaling_group.rb +1 -1
- data/lib/chef/resource/aws_cache_cluster.rb +9 -12
- data/lib/chef/resource/aws_cache_replication_group.rb +9 -11
- data/lib/chef/resource/aws_cache_subnet_group.rb +8 -10
- data/lib/chef/resource/aws_cloudsearch_domain.rb +4 -5
- data/lib/chef/resource/aws_cloudwatch_alarm.rb +17 -18
- data/lib/chef/resource/aws_dhcp_options.rb +2 -2
- data/lib/chef/resource/aws_ebs_volume.rb +10 -10
- data/lib/chef/resource/aws_eip_address.rb +5 -5
- data/lib/chef/resource/aws_elasticsearch_domain.rb +4 -4
- data/lib/chef/resource/aws_iam_instance_profile.rb +4 -5
- data/lib/chef/resource/aws_iam_role.rb +2 -3
- data/lib/chef/resource/aws_image.rb +3 -3
- data/lib/chef/resource/aws_instance.rb +4 -4
- data/lib/chef/resource/aws_internet_gateway.rb +3 -3
- data/lib/chef/resource/aws_key_pair.rb +7 -7
- data/lib/chef/resource/aws_launch_configuration.rb +4 -4
- data/lib/chef/resource/aws_load_balancer.rb +7 -7
- data/lib/chef/resource/aws_nat_gateway.rb +11 -11
- data/lib/chef/resource/aws_network_acl.rb +7 -8
- data/lib/chef/resource/aws_network_interface.rb +9 -9
- data/lib/chef/resource/aws_rds_instance.rb +4 -4
- data/lib/chef/resource/aws_rds_parameter_group.rb +3 -3
- data/lib/chef/resource/aws_rds_subnet_group.rb +4 -4
- data/lib/chef/resource/aws_route53_hosted_zone.rb +37 -40
- data/lib/chef/resource/aws_route53_record_set.rb +22 -24
- data/lib/chef/resource/aws_route_table.rb +7 -7
- data/lib/chef/resource/aws_s3_bucket.rb +7 -7
- data/lib/chef/resource/aws_security_group.rb +10 -10
- data/lib/chef/resource/aws_server_certificate.rb +6 -8
- data/lib/chef/resource/aws_sns_topic.rb +2 -2
- data/lib/chef/resource/aws_sqs_queue.rb +5 -7
- data/lib/chef/resource/aws_subnet.rb +9 -9
- data/lib/chef/resource/aws_vpc.rb +11 -11
- data/lib/chef/resource/aws_vpc_peering_connection.rb +4 -4
- data/spec/aws_support.rb +44 -45
- data/spec/aws_support/aws_resource_run_wrapper.rb +2 -2
- data/spec/aws_support/deep_matcher.rb +2 -3
- data/spec/aws_support/deep_matcher/fuzzy_match_objects.rb +6 -9
- data/spec/aws_support/deep_matcher/match_values_failure_messages.rb +30 -37
- data/spec/aws_support/deep_matcher/matchable_array.rb +0 -1
- data/spec/aws_support/deep_matcher/matchable_object.rb +1 -2
- data/spec/aws_support/deep_matcher/rspec_monkeypatches.rb +4 -4
- data/spec/aws_support/delayed_stream.rb +2 -2
- data/spec/aws_support/matchers/create_an_aws_object.rb +6 -6
- data/spec/aws_support/matchers/destroy_an_aws_object.rb +6 -6
- data/spec/aws_support/matchers/have_aws_object_tags.rb +4 -5
- data/spec/aws_support/matchers/match_an_aws_object.rb +5 -6
- data/spec/aws_support/matchers/update_an_aws_object.rb +6 -7
- data/spec/integration/aws_auto_scaling_group_spec.rb +56 -64
- data/spec/integration/aws_cache_cluster_spec.rb +70 -71
- data/spec/integration/aws_cache_subnet_group_spec.rb +13 -14
- data/spec/integration/aws_cloudsearch_domain_spec.rb +6 -8
- data/spec/integration/aws_cloudwatch_alarm_spec.rb +200 -208
- data/spec/integration/aws_dhcp_options_spec.rb +32 -43
- data/spec/integration/aws_ebs_volume_spec.rb +52 -73
- data/spec/integration/aws_eip_address_spec.rb +24 -31
- data/spec/integration/aws_elasticsearch_domain_spec.rb +31 -33
- data/spec/integration/aws_iam_instance_profile_spec.rb +36 -45
- data/spec/integration/aws_iam_role_spec.rb +39 -46
- data/spec/integration/aws_internet_gateway_spec.rb +64 -75
- data/spec/integration/aws_key_pair_spec.rb +6 -6
- data/spec/integration/aws_launch_configuration_spec.rb +17 -18
- data/spec/integration/aws_nat_gateway_spec.rb +21 -24
- data/spec/integration/aws_network_acl_spec.rb +81 -95
- data/spec/integration/aws_network_interface_spec.rb +28 -43
- data/spec/integration/aws_rds_instance_spec.rb +29 -40
- data/spec/integration/aws_rds_parameter_group_spec.rb +32 -35
- data/spec/integration/aws_rds_subnet_group_spec.rb +30 -40
- data/spec/integration/aws_route53_hosted_zone_spec.rb +205 -205
- data/spec/integration/aws_route_table_spec.rb +118 -136
- data/spec/integration/aws_s3_bucket_spec.rb +19 -27
- data/spec/integration/aws_security_group_spec.rb +369 -388
- data/spec/integration/aws_server_certificate_spec.rb +16 -18
- data/spec/integration/aws_subnet_spec.rb +44 -58
- data/spec/integration/aws_vpc_peering_connection_spec.rb +43 -50
- data/spec/integration/aws_vpc_spec.rb +99 -115
- data/spec/integration/load_balancer_spec.rb +169 -183
- data/spec/integration/machine_batch_spec.rb +24 -31
- data/spec/integration/machine_image_spec.rb +54 -66
- data/spec/integration/machine_spec.rb +216 -237
- data/spec/persistence_file.txt +219 -0
- data/spec/spec_helper.rb +16 -17
- data/spec/unit/chef/provisioning/aws_driver/credentials_spec.rb +67 -74
- data/spec/unit/chef/provisioning/aws_driver/driver_spec.rb +29 -29
- data/spec/unit/chef/provisioning/aws_driver/route53_spec.rb +13 -15
- metadata +4 -3
@@ -1,29 +1,29 @@
|
|
1
|
-
require
|
2
|
-
require
|
3
|
-
require
|
4
|
-
require
|
5
|
-
require
|
6
|
-
require
|
7
|
-
require
|
8
|
-
require
|
9
|
-
require
|
10
|
-
require
|
11
|
-
require
|
12
|
-
require
|
13
|
-
|
14
|
-
require
|
15
|
-
require
|
16
|
-
require
|
17
|
-
require
|
18
|
-
require
|
19
|
-
require
|
20
|
-
require
|
21
|
-
|
22
|
-
require
|
23
|
-
require
|
24
|
-
require
|
25
|
-
require
|
26
|
-
require
|
1
|
+
require "chef/mixin/shell_out"
|
2
|
+
require "chef/mixin/deep_merge"
|
3
|
+
require "chef/provisioning/driver"
|
4
|
+
require "chef/provisioning/convergence_strategy/install_cached"
|
5
|
+
require "chef/provisioning/convergence_strategy/install_sh"
|
6
|
+
require "chef/provisioning/convergence_strategy/install_msi"
|
7
|
+
require "chef/provisioning/convergence_strategy/no_converge"
|
8
|
+
require "chef/provisioning/transport/ssh"
|
9
|
+
require "chef/provisioning/transport/winrm"
|
10
|
+
require "chef/provisioning/machine/windows_machine"
|
11
|
+
require "chef/provisioning/machine/unix_machine"
|
12
|
+
require "chef/provisioning/machine_spec"
|
13
|
+
|
14
|
+
require "chef/provisioning/aws_driver/aws_resource"
|
15
|
+
require "chef/provisioning/aws_driver/tagging_strategy/ec2"
|
16
|
+
require "chef/provisioning/aws_driver/tagging_strategy/elb"
|
17
|
+
require "chef/provisioning/aws_driver/version"
|
18
|
+
require "chef/provisioning/aws_driver/credentials"
|
19
|
+
require "chef/provisioning/aws_driver/credentials2"
|
20
|
+
require "chef/provisioning/aws_driver/aws_tagger"
|
21
|
+
|
22
|
+
require "yaml"
|
23
|
+
require "aws-sdk"
|
24
|
+
require "retryable"
|
25
|
+
require "ubuntu_ami"
|
26
|
+
require "base64"
|
27
27
|
|
28
28
|
# loads the entire aws-sdk
|
29
29
|
Aws.eager_autoload!
|
@@ -37,466 +37,463 @@ AWS_V2_SERVICES = {
|
|
37
37
|
"RDS" => "rds",
|
38
38
|
"CloudWatch" => "cloudwatch",
|
39
39
|
"AutoScaling" => "auto_scaling"
|
40
|
-
}
|
41
|
-
Aws.eager_autoload!(:
|
40
|
+
}.freeze
|
41
|
+
Aws.eager_autoload!(services: AWS_V2_SERVICES.keys)
|
42
42
|
|
43
43
|
# Need to load the resources after the SDK because `aws_sdk_types` can mess
|
44
44
|
# up AWS loading if they are loaded too early
|
45
|
-
require
|
46
|
-
require
|
47
|
-
require
|
48
|
-
require
|
45
|
+
require "chef/resource/aws_key_pair"
|
46
|
+
require "chef/resource/aws_instance"
|
47
|
+
require "chef/resource/aws_image"
|
48
|
+
require "chef/resource/aws_load_balancer"
|
49
49
|
|
50
50
|
# We add the appropriate attributes to the base resources for tagging support
|
51
51
|
class Chef
|
52
|
-
class Resource
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
52
|
+
class Resource
|
53
|
+
class Machine
|
54
|
+
include Chef::Provisioning::AWSDriver::AWSTaggable
|
55
|
+
end
|
56
|
+
class MachineImage
|
57
|
+
include Chef::Provisioning::AWSDriver::AWSTaggable
|
58
|
+
end
|
59
|
+
class LoadBalancer
|
60
|
+
include Chef::Provisioning::AWSDriver::AWSTaggable
|
61
|
+
end
|
61
62
|
end
|
62
63
|
end
|
63
|
-
end
|
64
64
|
|
65
|
-
require
|
65
|
+
require "chef/provider/load_balancer"
|
66
66
|
class Chef
|
67
|
-
class Provider
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
67
|
+
class Provider
|
68
|
+
class LoadBalancer
|
69
|
+
# We override this so we can specify a machine name as `i-123456`
|
70
|
+
# This is totally a hack until we move away from base resources
|
71
|
+
def get_machine_spec!(machine_name)
|
72
|
+
if machine_name =~ /^i-[0-9a-f]+/
|
73
|
+
Struct.new(:name, :reference).new(machine_name, "instance_id" => machine_name)
|
74
|
+
else
|
75
|
+
Chef::Log.debug "Getting machine spec for #{machine_name}"
|
76
|
+
Provisioning.chef_managed_entry_store(new_resource.chef_server).get!(:machine, machine_name)
|
77
|
+
end
|
77
78
|
end
|
78
79
|
end
|
79
80
|
end
|
80
81
|
end
|
81
|
-
end
|
82
82
|
|
83
83
|
Chef::Provider::Machine.additional_machine_option_keys << :aws_tags
|
84
84
|
Chef::Provider::MachineImage.additional_image_option_keys << :aws_tags
|
85
85
|
Chef::Provider::LoadBalancer.additional_lb_option_keys << :aws_tags
|
86
86
|
|
87
87
|
class Chef
|
88
|
-
module Provisioning
|
89
|
-
module AWSDriver
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
Driver.new(driver_url, config)
|
104
|
-
end
|
105
|
-
|
106
|
-
def initialize(driver_url, config)
|
107
|
-
super
|
108
|
-
|
109
|
-
_, profile_name, region = driver_url.split(':')
|
110
|
-
profile_name = nil if profile_name && profile_name.empty?
|
111
|
-
region = nil if region && region.empty?
|
112
|
-
|
113
|
-
credentials = profile_name ? aws_credentials[profile_name] : aws_credentials.default
|
114
|
-
@aws_config = Aws.config.update(
|
115
|
-
access_key_id: credentials[:aws_access_key_id],
|
116
|
-
secret_access_key: credentials[:aws_secret_access_key],
|
117
|
-
region: region || credentials[:region],
|
118
|
-
http_proxy: credentials[:proxy_uri] || nil,
|
119
|
-
session_token: credentials[:aws_session_token] || nil,
|
120
|
-
logger: Chef::Log.logger
|
121
|
-
)
|
122
|
-
|
123
|
-
# TODO document how users could add something to the Aws.config themselves if they want to
|
124
|
-
# Right now we are supporting both V1 and V2, so we create 2 config sets
|
125
|
-
credentials2 = Credentials2.new(:profile_name => profile_name)
|
126
|
-
Chef::Config.chef_provisioning ||= {}
|
127
|
-
@aws_config_2 = {
|
128
|
-
credentials: credentials2.get_credentials,
|
129
|
-
region: region || ENV["AWS_DEFAULT_REGION"] || credentials[:region],
|
130
|
-
# TODO when we get rid of V1 replace the credentials class with something that knows how
|
131
|
-
# to read ~/.aws/config
|
132
|
-
:http_proxy => credentials[:proxy_uri] || nil,
|
133
|
-
logger: Chef::Log.logger,
|
134
|
-
retry_limit: Chef::Config.chef_provisioning[:aws_retry_limit] || 5
|
135
|
-
}
|
136
|
-
|
137
|
-
driver = self
|
138
|
-
Chef::Resource::Machine.send(:define_method, :aws_object) do
|
139
|
-
resource = Chef::Resource::AwsInstance.new(name, nil)
|
140
|
-
resource.driver driver
|
141
|
-
resource.managed_entry_store Chef::Provisioning.chef_managed_entry_store
|
142
|
-
resource.aws_object
|
143
|
-
end
|
144
|
-
Chef::Resource::MachineImage.send(:define_method, :aws_object) do
|
145
|
-
resource = Chef::Resource::AwsImage.new(name, nil)
|
146
|
-
resource.driver driver
|
147
|
-
resource.managed_entry_store Chef::Provisioning.chef_managed_entry_store
|
148
|
-
resource.aws_object
|
149
|
-
end
|
150
|
-
Chef::Resource::LoadBalancer.send(:define_method, :aws_object) do
|
151
|
-
resource = Chef::Resource::AwsLoadBalancer.new(name, nil)
|
152
|
-
resource.driver driver
|
153
|
-
resource.managed_entry_store Chef::Provisioning.chef_managed_entry_store
|
154
|
-
resource.aws_object
|
155
|
-
end
|
156
|
-
end
|
157
|
-
|
158
|
-
def region
|
159
|
-
aws_config_2[:region]
|
160
|
-
end
|
161
|
-
|
162
|
-
def cloudsearch
|
163
|
-
@cloudsearch ||= Aws::CloudSearch::Client.new(aws_config)
|
164
|
-
end
|
165
|
-
|
166
|
-
def self.canonicalize_url(driver_url, config)
|
167
|
-
[ driver_url, config ]
|
168
|
-
end
|
169
|
-
|
170
|
-
def deep_symbolize_keys(hash_like)
|
171
|
-
# Process arrays first...
|
172
|
-
if hash_like.is_a?(Array)
|
173
|
-
# Node attributes are an ImmutableArray so lets convert them to an array first
|
174
|
-
hash_like = hash_like.to_a
|
175
|
-
hash_like.length.times do |e|
|
176
|
-
hash_like[e]=deep_symbolize_keys(hash_like[e]) if hash_like[e].respond_to?(:values) or hash_like[e].is_a?(Array)
|
88
|
+
module Provisioning
|
89
|
+
module AWSDriver
|
90
|
+
# Provisions machines using the AWS SDK
|
91
|
+
class Driver < Chef::Provisioning::Driver
|
92
|
+
include Chef::Mixin::ShellOut
|
93
|
+
include Chef::Mixin::DeepMerge
|
94
|
+
|
95
|
+
attr_reader :aws_config, :aws_config_2
|
96
|
+
|
97
|
+
# URL scheme:
|
98
|
+
# aws:profilename:region
|
99
|
+
# TODO: migration path from fog:AWS - parse that URL
|
100
|
+
# canonical URL calls realpath on <path>
|
101
|
+
def self.from_url(driver_url, config)
|
102
|
+
Driver.new(driver_url, config)
|
177
103
|
end
|
178
|
-
return hash_like
|
179
|
-
end
|
180
|
-
# Otherwise return ourselves if not a hash
|
181
|
-
return hash_like if not hash_like.respond_to?(:values)
|
182
|
-
# Otherwise we are hash like, push on through...
|
183
|
-
if hash_like.nil? || hash_like.empty?
|
184
|
-
return {}
|
185
|
-
end
|
186
|
-
r = {}
|
187
|
-
hash_like.each do |key, value|
|
188
|
-
value = deep_symbolize_keys(value) if value.respond_to?(:values) or value.is_a?(Array)
|
189
|
-
r[key.to_sym] = value
|
190
|
-
end
|
191
|
-
r
|
192
|
-
end
|
193
|
-
|
194
|
-
# Load balancer methods
|
195
|
-
def allocate_load_balancer(action_handler, lb_spec, lb_options, machine_specs)
|
196
|
-
lb_options = deep_symbolize_keys(lb_options)
|
197
|
-
lb_options = AWSResource.lookup_options(lb_options, managed_entry_store: lb_spec.managed_entry_store, driver: self)
|
198
104
|
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
updates << " with tags #{lb_options[:aws_tags]}" if lb_options[:aws_tags]
|
230
|
-
|
231
|
-
action_handler.perform_action updates do
|
232
|
-
# IAM says the server certificate exists, but ELB throws this error
|
233
|
-
Chef::Provisioning::AWSDriver::AWSProvider.retry_with_backoff(::Aws::ElasticLoadBalancing::Errors::CertificateNotFound) do
|
234
|
-
lb_options[:listeners].each do |listener|
|
235
|
-
if listener.has_key?(:server_certificate)
|
236
|
-
listener[:ssl_certificate_id] = listener.delete(:server_certificate)
|
237
|
-
listener[:ssl_certificate_id] = listener[:ssl_certificate_id][:arn]
|
238
|
-
end
|
239
|
-
end
|
105
|
+
def initialize(driver_url, config)
|
106
|
+
super
|
107
|
+
|
108
|
+
_, profile_name, region = driver_url.split(":")
|
109
|
+
profile_name = nil if profile_name && profile_name.empty?
|
110
|
+
region = nil if region && region.empty?
|
111
|
+
|
112
|
+
credentials = profile_name ? aws_credentials[profile_name] : aws_credentials.default
|
113
|
+
@aws_config = Aws.config.update(
|
114
|
+
access_key_id: credentials[:aws_access_key_id],
|
115
|
+
secret_access_key: credentials[:aws_secret_access_key],
|
116
|
+
region: region || credentials[:region],
|
117
|
+
http_proxy: credentials[:proxy_uri] || nil,
|
118
|
+
session_token: credentials[:aws_session_token] || nil,
|
119
|
+
logger: Chef::Log.logger
|
120
|
+
)
|
121
|
+
|
122
|
+
# TODO: document how users could add something to the Aws.config themselves if they want to
|
123
|
+
# Right now we are supporting both V1 and V2, so we create 2 config sets
|
124
|
+
credentials2 = Credentials2.new(profile_name: profile_name)
|
125
|
+
Chef::Config.chef_provisioning ||= {}
|
126
|
+
@aws_config_2 = {
|
127
|
+
credentials: credentials2.get_credentials,
|
128
|
+
region: region || ENV["AWS_DEFAULT_REGION"] || credentials[:region],
|
129
|
+
# TODO: when we get rid of V1 replace the credentials class with something that knows how
|
130
|
+
# to read ~/.aws/config
|
131
|
+
http_proxy: credentials[:proxy_uri] || nil,
|
132
|
+
logger: Chef::Log.logger,
|
133
|
+
retry_limit: Chef::Config.chef_provisioning[:aws_retry_limit] || 5
|
134
|
+
}
|
240
135
|
|
241
|
-
|
242
|
-
|
136
|
+
driver = self
|
137
|
+
Chef::Resource::Machine.send(:define_method, :aws_object) do
|
138
|
+
resource = Chef::Resource::AwsInstance.new(name, nil)
|
139
|
+
resource.driver driver
|
140
|
+
resource.managed_entry_store Chef::Provisioning.chef_managed_entry_store
|
141
|
+
resource.aws_object
|
243
142
|
end
|
143
|
+
Chef::Resource::MachineImage.send(:define_method, :aws_object) do
|
144
|
+
resource = Chef::Resource::AwsImage.new(name, nil)
|
145
|
+
resource.driver driver
|
146
|
+
resource.managed_entry_store Chef::Provisioning.chef_managed_entry_store
|
147
|
+
resource.aws_object
|
148
|
+
end
|
149
|
+
Chef::Resource::LoadBalancer.send(:define_method, :aws_object) do
|
150
|
+
resource = Chef::Resource::AwsLoadBalancer.new(name, nil)
|
151
|
+
resource.driver driver
|
152
|
+
resource.managed_entry_store Chef::Provisioning.chef_managed_entry_store
|
153
|
+
resource.aws_object
|
154
|
+
end
|
155
|
+
end
|
244
156
|
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
lb_spec.reference = {
|
249
|
-
'driver_version' => Chef::Provisioning::AWSDriver::VERSION,
|
250
|
-
'allocated_at' => Time.now.utc.to_s,
|
251
|
-
}
|
252
|
-
lb_spec.driver_url = driver_url
|
157
|
+
def region
|
158
|
+
aws_config_2[:region]
|
253
159
|
end
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
perform_action = proc { |desc, &block| action_handler.perform_action(desc, &block) }
|
258
|
-
action_handler.perform_action [ "Update load balancer #{lb_spec.name} in #{region}", desc ].flatten, &block
|
160
|
+
|
161
|
+
def cloudsearch
|
162
|
+
@cloudsearch ||= Aws::CloudSearch::Client.new(aws_config)
|
259
163
|
end
|
260
164
|
|
261
|
-
|
262
|
-
|
263
|
-
# TODO CloudFormation automatically recreates the load_balancer, we should too
|
264
|
-
raise "Scheme is immutable - you need to :destroy and :create the load_balancer to recreated it with the new scheme"
|
165
|
+
def self.canonicalize_url(driver_url, config)
|
166
|
+
[driver_url, config]
|
265
167
|
end
|
266
168
|
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
load_balancer_name: actual_elb.load_balancer_name,
|
275
|
-
security_groups: desired.to_a
|
276
|
-
)
|
169
|
+
def deep_symbolize_keys(hash_like)
|
170
|
+
# Process arrays first...
|
171
|
+
if hash_like.is_a?(Array)
|
172
|
+
# Node attributes are an ImmutableArray so lets convert them to an array first
|
173
|
+
hash_like = hash_like.to_a
|
174
|
+
hash_like.length.times do |e|
|
175
|
+
hash_like[e] = deep_symbolize_keys(hash_like[e]) if hash_like[e].respond_to?(:values) || hash_like[e].is_a?(Array)
|
277
176
|
end
|
177
|
+
return hash_like
|
278
178
|
end
|
179
|
+
# Otherwise return ourselves if not a hash
|
180
|
+
return hash_like unless hash_like.respond_to?(:values)
|
181
|
+
# Otherwise we are hash like, push on through...
|
182
|
+
return {} if hash_like.nil? || hash_like.empty?
|
183
|
+
r = {}
|
184
|
+
hash_like.each do |key, value|
|
185
|
+
value = deep_symbolize_keys(value) if value.respond_to?(:values) || value.is_a?(Array)
|
186
|
+
r[key.to_sym] = value
|
187
|
+
end
|
188
|
+
r
|
279
189
|
end
|
280
190
|
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
end
|
291
|
-
|
292
|
-
# Users can switch from availability zones to subnets or vice versa. To ensure we do not
|
293
|
-
# unassign all (which causes an AWS error) we first add all available ones, then remove
|
294
|
-
# an unecessary ones
|
295
|
-
actual_zones_subnets = {}
|
296
|
-
actual_elb.subnets.each do |subnet|
|
297
|
-
actual_zones_subnets[subnet] = Chef::Resource::AwsSubnet.get_aws_object(subnet, driver: self).availability_zone
|
298
|
-
end
|
299
|
-
|
300
|
-
# Only 1 of subnet or AZ will be populated b/c of our check earlier
|
301
|
-
desired_subnets_zones = {}
|
302
|
-
if lb_options[:availability_zones]
|
303
|
-
lb_options[:availability_zones].each do |zone|
|
304
|
-
# If the user specifies availability zone, we find the default subnet for that
|
305
|
-
# AZ because this duplicates the create logic
|
306
|
-
zone = zone.downcase
|
307
|
-
filters = [
|
308
|
-
{:name => 'availabilityZone', :values => [zone]},
|
309
|
-
{:name => 'defaultForAz', :values => ['true']}
|
310
|
-
]
|
311
|
-
default_subnet = ec2_client.describe_subnets(:filters => filters)[:subnets]
|
312
|
-
if default_subnet.size != 1
|
313
|
-
raise "Could not find default subnet in availability zone #{zone}"
|
314
|
-
end
|
315
|
-
default_subnet = default_subnet[0]
|
316
|
-
desired_subnets_zones[default_subnet[:subnet_id]] = zone
|
191
|
+
# Load balancer methods
|
192
|
+
def allocate_load_balancer(action_handler, lb_spec, lb_options, machine_specs)
|
193
|
+
lb_options = deep_symbolize_keys(lb_options)
|
194
|
+
lb_options = AWSResource.lookup_options(lb_options, managed_entry_store: lb_spec.managed_entry_store, driver: self)
|
195
|
+
|
196
|
+
# renaming lb_options[:port] to lb_options[:load_balancer_port]
|
197
|
+
if lb_options[:listeners]
|
198
|
+
lb_options[:listeners].each do |listener|
|
199
|
+
listener[:load_balancer_port] = listener.delete(:port) if listener[:port]
|
317
200
|
end
|
318
201
|
end
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
202
|
+
# We delete the attributes, tags, health check, and sticky sessions here because they are not valid in the create call
|
203
|
+
# and must be applied afterward
|
204
|
+
lb_attributes = lb_options.delete(:attributes)
|
205
|
+
lb_aws_tags = lb_options.delete(:aws_tags)
|
206
|
+
health_check = lb_options.delete(:health_check)
|
207
|
+
sticky_sessions = lb_options.delete(:sticky_sessions)
|
208
|
+
|
209
|
+
old_elb = nil
|
210
|
+
actual_elb = load_balancer_for(lb_spec)
|
211
|
+
if actual_elb.nil?
|
212
|
+
lb_options[:listeners] ||= get_listeners(:http)
|
213
|
+
|
214
|
+
if !lb_options[:subnets] && !lb_options[:availability_zones] && machine_specs
|
215
|
+
lb_options[:subnets] = machine_specs.map { |s| ec2_resource.instance(s.reference["instance_id"]).subnet.id }.uniq
|
325
216
|
end
|
326
|
-
end
|
327
217
|
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
218
|
+
perform_action = proc { |desc, &block| action_handler.perform_action(desc, &block) }
|
219
|
+
Chef::Log.debug "AWS Load Balancer options: #{lb_options.inspect}"
|
220
|
+
|
221
|
+
updates = ["create load balancer #{lb_spec.name} in #{region}"]
|
222
|
+
updates << " enable availability zones #{lb_options[:availability_zones]}" if lb_options[:availability_zones]
|
223
|
+
updates << " attach subnets #{lb_options[:subnets].join(', ')}" if lb_options[:subnets]
|
224
|
+
updates << " with listeners #{lb_options[:listeners]}" if lb_options[:listeners]
|
225
|
+
updates << " with security groups #{lb_options[:security_groups]}" if lb_options[:security_groups]
|
226
|
+
updates << " with tags #{lb_options[:aws_tags]}" if lb_options[:aws_tags]
|
227
|
+
|
228
|
+
action_handler.perform_action updates do
|
229
|
+
# IAM says the server certificate exists, but ELB throws this error
|
230
|
+
Chef::Provisioning::AWSDriver::AWSProvider.retry_with_backoff(::Aws::ElasticLoadBalancing::Errors::CertificateNotFound) do
|
231
|
+
lb_options[:listeners].each do |listener|
|
232
|
+
if listener.key?(:server_certificate)
|
233
|
+
listener[:ssl_certificate_id] = listener.delete(:server_certificate)
|
234
|
+
listener[:ssl_certificate_id] = listener[:ssl_certificate_id][:arn]
|
235
|
+
end
|
236
|
+
end
|
237
|
+
|
238
|
+
lb_options[:load_balancer_name] = lb_spec.name
|
239
|
+
actual_elb = elb.create_load_balancer(lb_options)
|
347
240
|
end
|
241
|
+
|
242
|
+
# load aws object for load balancer after create
|
243
|
+
actual_elb = load_balancer_for(lb_spec)
|
244
|
+
|
245
|
+
lb_spec.reference = {
|
246
|
+
"driver_version" => Chef::Provisioning::AWSDriver::VERSION,
|
247
|
+
"allocated_at" => Time.now.utc.to_s
|
248
|
+
}
|
249
|
+
lb_spec.driver_url = driver_url
|
250
|
+
end
|
251
|
+
else
|
252
|
+
# Header gets printed the first time we make an update
|
253
|
+
perform_action = proc do |desc, &block|
|
254
|
+
perform_action = proc { |desc, &block| action_handler.perform_action(desc, &block) }
|
255
|
+
action_handler.perform_action ["Update load balancer #{lb_spec.name} in #{region}", desc].flatten, &block
|
348
256
|
end
|
349
|
-
end
|
350
257
|
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
action += " (availability zones #{disable_zones.join(', ')})"
|
356
|
-
perform_action.call(action) do
|
357
|
-
elb.detach_load_balancer_from_subnets(
|
358
|
-
load_balancer_name: actual_elb.load_balancer_name,
|
359
|
-
subnets: detach_subnets
|
360
|
-
)
|
258
|
+
# TODO: refactor this whole giant method into many smaller method calls
|
259
|
+
if lb_options[:scheme] && lb_options[:scheme].downcase != actual_elb.scheme
|
260
|
+
# TODO: CloudFormation automatically recreates the load_balancer, we should too
|
261
|
+
raise "Scheme is immutable - you need to :destroy and :create the load_balancer to recreated it with the new scheme"
|
361
262
|
end
|
362
|
-
end
|
363
|
-
end
|
364
263
|
|
365
|
-
|
366
|
-
|
367
|
-
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
264
|
+
# Update security groups
|
265
|
+
if lb_options[:security_groups]
|
266
|
+
current = actual_elb.security_groups
|
267
|
+
desired = lb_options[:security_groups]
|
268
|
+
if current != desired
|
269
|
+
perform_action.call(" updating security groups to #{desired.to_a}") do
|
270
|
+
elb_client.apply_security_groups_to_load_balancer(
|
271
|
+
load_balancer_name: actual_elb.load_balancer_name,
|
272
|
+
security_groups: desired.to_a
|
273
|
+
)
|
274
|
+
end
|
275
|
+
end
|
276
|
+
end
|
372
277
|
|
373
|
-
if
|
374
|
-
#
|
375
|
-
#
|
376
|
-
|
377
|
-
|
378
|
-
|
278
|
+
if lb_options[:availability_zones] || lb_options[:subnets]
|
279
|
+
# A subnet always belongs to an availability zone. When specifying a ELB spec, you can either
|
280
|
+
# specify subnets OR AZs but not both. You cannot specify multiple subnets in the same AZ.
|
281
|
+
# You must specify at least 1 subnet or AZ. On an update you cannot remove all subnets
|
282
|
+
# or AZs - it must belong to one.
|
283
|
+
if lb_options[:availability_zones] && lb_options[:subnets]
|
284
|
+
# We do this check here because there is no atomic call we can make to specify both
|
285
|
+
# subnets and AZs at the same time
|
286
|
+
raise "You cannot specify both `availability_zones` and `subnets`"
|
379
287
|
end
|
380
288
|
|
381
|
-
|
382
|
-
|
289
|
+
# Users can switch from availability zones to subnets or vice versa. To ensure we do not
|
290
|
+
# unassign all (which causes an AWS error) we first add all available ones, then remove
|
291
|
+
# an unecessary ones
|
292
|
+
actual_zones_subnets = {}
|
293
|
+
actual_elb.subnets.each do |subnet|
|
294
|
+
actual_zones_subnets[subnet] = Chef::Resource::AwsSubnet.get_aws_object(subnet, driver: self).availability_zone
|
383
295
|
end
|
384
296
|
|
385
|
-
|
386
|
-
|
297
|
+
# Only 1 of subnet or AZ will be populated b/c of our check earlier
|
298
|
+
desired_subnets_zones = {}
|
299
|
+
if lb_options[:availability_zones]
|
300
|
+
lb_options[:availability_zones].each do |zone|
|
301
|
+
# If the user specifies availability zone, we find the default subnet for that
|
302
|
+
# AZ because this duplicates the create logic
|
303
|
+
zone = zone.downcase
|
304
|
+
filters = [
|
305
|
+
{ name: "availabilityZone", values: [zone] },
|
306
|
+
{ name: "defaultForAz", values: ["true"] }
|
307
|
+
]
|
308
|
+
default_subnet = ec2_client.describe_subnets(filters: filters)[:subnets]
|
309
|
+
if default_subnet.size != 1
|
310
|
+
raise "Could not find default subnet in availability zone #{zone}"
|
311
|
+
end
|
312
|
+
default_subnet = default_subnet[0]
|
313
|
+
desired_subnets_zones[default_subnet[:subnet_id]] = zone
|
314
|
+
end
|
315
|
+
end
|
316
|
+
unless lb_options[:subnets].nil? || lb_options[:subnets].empty?
|
317
|
+
subnet_query = ec2_client.describe_subnets(subnet_ids: lb_options[:subnets])[:subnets]
|
318
|
+
# AWS raises an error on an unknown subnet, but not an unknown AZ
|
319
|
+
subnet_query.each do |subnet|
|
320
|
+
zone = subnet[:availability_zone].downcase
|
321
|
+
desired_subnets_zones[subnet[:subnet_id]] = zone
|
322
|
+
end
|
387
323
|
end
|
388
324
|
|
389
|
-
|
390
|
-
|
391
|
-
|
392
|
-
|
393
|
-
|
325
|
+
# We only bother attaching subnets, because doing this automatically attaches the AZ
|
326
|
+
attach_subnets = desired_subnets_zones.keys - actual_zones_subnets.keys
|
327
|
+
unless attach_subnets.empty?
|
328
|
+
action = " attach subnets #{attach_subnets.join(', ')}"
|
329
|
+
enable_zones = (desired_subnets_zones.map { |s, z| z if attach_subnets.include?(s) }).compact
|
330
|
+
action += " (availability zones #{enable_zones.join(', ')})"
|
331
|
+
perform_action.call(action) do
|
332
|
+
begin
|
333
|
+
elb.attach_load_balancer_to_subnets(
|
334
|
+
load_balancer_name: actual_elb.load_balancer_name,
|
335
|
+
subnets: attach_subnets
|
336
|
+
)
|
337
|
+
rescue ::Aws::ElasticLoadBalancing::Errors::InvalidConfigurationRequest => e
|
338
|
+
Chef::Log.error "You cannot currently move from 1 subnet to another in the same availability zone. " \
|
339
|
+
"Amazon does not have an atomic operation which allows this. You must create a new " \
|
340
|
+
"ELB with the correct subnets and move instances into it. Tried to attach subets " \
|
341
|
+
"#{attach_subnets.join(', ')} (availability zones #{enable_zones.join(', ')}) to " \
|
342
|
+
"existing ELB named #{actual_elb.load_balancer_name}"
|
343
|
+
raise e
|
344
|
+
end
|
394
345
|
end
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
346
|
+
end
|
347
|
+
|
348
|
+
detach_subnets = actual_zones_subnets.keys - desired_subnets_zones.keys
|
349
|
+
unless detach_subnets.empty?
|
350
|
+
action = " detach subnets #{detach_subnets.join(', ')}"
|
351
|
+
disable_zones = (actual_zones_subnets.map { |s, z| z if detach_subnets.include?(s) }).compact
|
352
|
+
action += " (availability zones #{disable_zones.join(', ')})"
|
353
|
+
perform_action.call(action) do
|
354
|
+
elb.detach_load_balancer_from_subnets(
|
400
355
|
load_balancer_name: actual_elb.load_balancer_name,
|
401
|
-
|
402
|
-
|
403
|
-
|
356
|
+
subnets: detach_subnets
|
357
|
+
)
|
358
|
+
end
|
359
|
+
end
|
360
|
+
end
|
361
|
+
|
362
|
+
# Update listeners - THIS IS NOT ATOMIC
|
363
|
+
if lb_options[:listeners]
|
364
|
+
add_listeners = {}
|
365
|
+
lb_options[:listeners].each { |l| add_listeners[l[:load_balancer_port]] = l }
|
366
|
+
actual_elb.listener_descriptions.each do |listener_description|
|
367
|
+
listener = listener_description.listener
|
368
|
+
desired_listener = add_listeners.delete(listener.load_balancer_port)
|
369
|
+
|
370
|
+
if desired_listener
|
371
|
+
# listener.(port|protocol|instance_port|instance_protocol) are immutable for the life
|
372
|
+
# of the listener - must create a new one and delete old one
|
373
|
+
immutable_updates = []
|
374
|
+
if listener.protocol != desired_listener[:protocol].to_s.upcase
|
375
|
+
immutable_updates << " update protocol from #{listener.protocol.inspect} to #{desired_listener[:protocol].inspect}"
|
376
|
+
end
|
377
|
+
|
378
|
+
if listener.instance_port != desired_listener[:instance_port]
|
379
|
+
immutable_updates << " update instance port from #{listener.instance_port.inspect} to #{desired_listener[:instance_port].inspect}"
|
380
|
+
end
|
381
|
+
|
382
|
+
if listener.instance_protocol != desired_listener[:instance_protocol].to_s.upcase
|
383
|
+
immutable_updates << " update instance protocol from #{listener.instance_protocol.inspect} to #{desired_listener[:instance_protocol].inspect}"
|
384
|
+
end
|
385
|
+
|
386
|
+
if !immutable_updates.empty?
|
387
|
+
perform_action.call(immutable_updates) do
|
388
|
+
elb.delete_load_balancer_listeners(load_balancer_name: actual_elb.load_balancer_name, load_balancer_ports: [listener.load_balancer_port])
|
389
|
+
elb.create_load_balancer_listeners(listeners: [desired_listener], load_balancer_name: actual_elb.load_balancer_name)
|
390
|
+
# actual_elb.listeners.create(desired_listener)
|
391
|
+
end
|
392
|
+
elsif listener.ssl_certificate_id && !server_certificate_eql?(listener.ssl_certificate_id,
|
393
|
+
server_cert_from_spec(desired_listener))
|
394
|
+
# Server certificate is mutable - if no immutable changes required a full recreate, update cert
|
395
|
+
perform_action.call(" update server certificate from #{listener.ssl_certificate_id} to #{server_cert_from_spec(desired_listener)}") do
|
396
|
+
elb.set_load_balancer_listener_ssl_certificate(
|
397
|
+
load_balancer_name: actual_elb.load_balancer_name,
|
398
|
+
load_balancer_port: listener.load_balancer_port,
|
399
|
+
ssl_certificate_id: server_cert_from_spec(desired_listener)
|
400
|
+
)
|
401
|
+
end
|
402
|
+
end
|
403
|
+
else
|
404
|
+
perform_action.call(" remove listener #{listener.load_balancer_port}") do
|
405
|
+
elb.delete_load_balancer_listeners(load_balancer_name: actual_elb.load_balancer_name, load_balancer_ports: [listener.load_balancer_port])
|
406
|
+
end
|
404
407
|
end
|
405
408
|
end
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
+
|
410
|
+
add_listeners.values.each do |listener|
|
411
|
+
updates = [" add listener #{listener[:load_balancer_port]}"]
|
412
|
+
updates << " set protocol to #{listener[:protocol].inspect}"
|
413
|
+
updates << " set instance port to #{listener[:instance_port].inspect}"
|
414
|
+
updates << " set instance protocol to #{listener[:instance_protocol].inspect}"
|
415
|
+
updates << " set server certificate to #{server_cert_from_spec(listener)}" if server_cert_from_spec(listener)
|
416
|
+
perform_action.call(updates) do
|
417
|
+
elb.create_load_balancer_listeners(listeners: [listener], load_balancer_name: actual_elb.load_balancer_name)
|
418
|
+
end
|
409
419
|
end
|
410
420
|
end
|
411
421
|
end
|
412
422
|
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
423
|
+
converge_elb_tags(actual_elb, lb_aws_tags, action_handler)
|
424
|
+
|
425
|
+
# Update load balancer attributes
|
426
|
+
if lb_attributes
|
427
|
+
current = elb.describe_load_balancer_attributes(load_balancer_name: actual_elb.load_balancer_name)[:load_balancer_attributes].to_hash
|
428
|
+
# Need to do a deep copy w/ Marshal load/dump to avoid overwriting current
|
429
|
+
desired = deep_merge!(lb_attributes, Marshal.load(Marshal.dump(current)))
|
430
|
+
if current != desired
|
431
|
+
perform_action.call(" updating attributes to #{desired.inspect}") do
|
432
|
+
elb.modify_load_balancer_attributes(
|
433
|
+
load_balancer_name: actual_elb.load_balancer_name,
|
434
|
+
load_balancer_attributes: desired.to_hash
|
435
|
+
)
|
436
|
+
end
|
421
437
|
end
|
422
438
|
end
|
423
|
-
end
|
424
|
-
end
|
425
439
|
|
426
|
-
|
427
|
-
|
428
|
-
|
429
|
-
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
)
|
440
|
+
# Update the load balancer health check, as above
|
441
|
+
if health_check
|
442
|
+
current = elb.describe_load_balancers(load_balancer_names: [actual_elb.load_balancer_name])[:load_balancer_descriptions][0][:health_check].to_hash
|
443
|
+
desired = deep_merge!(health_check, Marshal.load(Marshal.dump(current)))
|
444
|
+
if current != desired
|
445
|
+
perform_action.call(" updating health check to #{desired.inspect}") do
|
446
|
+
elb.configure_health_check(
|
447
|
+
load_balancer_name: actual_elb.load_balancer_name,
|
448
|
+
health_check: desired.to_hash
|
449
|
+
)
|
450
|
+
end
|
451
|
+
end
|
439
452
|
end
|
440
|
-
end
|
441
|
-
end
|
442
453
|
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
if current != desired
|
448
|
-
perform_action.call(" updating health check to #{desired.inspect}") do
|
449
|
-
elb.configure_health_check(
|
450
|
-
load_balancer_name: actual_elb.load_balancer_name,
|
451
|
-
health_check: desired.to_hash
|
452
|
-
)
|
453
|
-
end
|
454
|
-
end
|
455
|
-
end
|
454
|
+
# Update the load balancer sticky sessions
|
455
|
+
if sticky_sessions
|
456
|
+
policy_name = "#{actual_elb.load_balancer_name}-sticky-session-policy"
|
457
|
+
policies = elb.describe_load_balancer_policies(load_balancer_name: actual_elb.load_balancer_name)
|
456
458
|
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
policies = elb.describe_load_balancer_policies(load_balancer_name: actual_elb.load_balancer_name)
|
461
|
-
|
462
|
-
existing_cookie_policy = policies[:policy_descriptions].detect { |pd| pd[:policy_type_name] == 'AppCookieStickinessPolicyType' && pd[:policy_name] == policy_name}
|
463
|
-
existing_cookie_name = existing_cookie_policy ? (existing_cookie_policy[:policy_attribute_descriptions].detect { |pad| pad[:attribute_name] == 'CookieName' })[:attribute_value] : nil
|
464
|
-
desired_cookie_name = sticky_sessions[:cookie_name]
|
465
|
-
|
466
|
-
# Create or update the policy to have the desired cookie_name
|
467
|
-
if existing_cookie_policy.nil?
|
468
|
-
perform_action.call(" creating sticky sessions with cookie_name #{desired_cookie_name}") do
|
469
|
-
elb.create_app_cookie_stickiness_policy(
|
470
|
-
load_balancer_name: actual_elb.load_balancer_name,
|
471
|
-
policy_name: policy_name,
|
472
|
-
cookie_name: desired_cookie_name
|
473
|
-
)
|
474
|
-
end
|
475
|
-
elsif existing_cookie_name && existing_cookie_name != desired_cookie_name
|
476
|
-
perform_action.call(" updating sticky sessions from cookie_name #{existing_cookie_name} to cookie_name #{desired_cookie_name}") do
|
477
|
-
elb.delete_load_balancer_policy(
|
478
|
-
load_balancer_name: actual_elb.load_balancer_name,
|
479
|
-
policy_name: policy_name
|
480
|
-
)
|
481
|
-
elb.create_app_cookie_stickiness_policy(
|
482
|
-
load_balancer_name: actual_elb.load_balancer_name,
|
483
|
-
policy_name: policy_name,
|
484
|
-
cookie_name: desired_cookie_name
|
485
|
-
)
|
486
|
-
end
|
487
|
-
end
|
459
|
+
existing_cookie_policy = policies[:policy_descriptions].detect { |pd| pd[:policy_type_name] == "AppCookieStickinessPolicyType" && pd[:policy_name] == policy_name }
|
460
|
+
existing_cookie_name = existing_cookie_policy ? (existing_cookie_policy[:policy_attribute_descriptions].detect { |pad| pad[:attribute_name] == "CookieName" })[:attribute_value] : nil
|
461
|
+
desired_cookie_name = sticky_sessions[:cookie_name]
|
488
462
|
|
489
|
-
|
490
|
-
|
491
|
-
|
463
|
+
# Create or update the policy to have the desired cookie_name
|
464
|
+
if existing_cookie_policy.nil?
|
465
|
+
perform_action.call(" creating sticky sessions with cookie_name #{desired_cookie_name}") do
|
466
|
+
elb.create_app_cookie_stickiness_policy(
|
467
|
+
load_balancer_name: actual_elb.load_balancer_name,
|
468
|
+
policy_name: policy_name,
|
469
|
+
cookie_name: desired_cookie_name
|
470
|
+
)
|
471
|
+
end
|
472
|
+
elsif existing_cookie_name && existing_cookie_name != desired_cookie_name
|
473
|
+
perform_action.call(" updating sticky sessions from cookie_name #{existing_cookie_name} to cookie_name #{desired_cookie_name}") do
|
474
|
+
elb.delete_load_balancer_policy(
|
475
|
+
load_balancer_name: actual_elb.load_balancer_name,
|
476
|
+
policy_name: policy_name
|
477
|
+
)
|
478
|
+
elb.create_app_cookie_stickiness_policy(
|
479
|
+
load_balancer_name: actual_elb.load_balancer_name,
|
480
|
+
policy_name: policy_name,
|
481
|
+
cookie_name: desired_cookie_name
|
482
|
+
)
|
483
|
+
end
|
484
|
+
end
|
492
485
|
|
493
|
-
|
494
|
-
|
486
|
+
# Ensure the policy is attached to the appropriate listener
|
487
|
+
elb_description = elb.describe_load_balancers(load_balancer_names: [actual_elb.load_balancer_name])[:load_balancer_descriptions].first
|
488
|
+
listeners = elb_description[:listener_descriptions]
|
495
489
|
|
496
|
-
|
497
|
-
|
490
|
+
sticky_sessions[:ports].each do |ss_port|
|
491
|
+
listener = listeners.detect { |ld| ld[:listener][:load_balancer_port] == ss_port }
|
498
492
|
|
499
|
-
|
493
|
+
next if listener.nil?
|
494
|
+
policy_names = listener[:policy_names]
|
495
|
+
|
496
|
+
next if policy_names.include?(policy_name)
|
500
497
|
policy_names << policy_name
|
501
498
|
|
502
499
|
elb.set_load_balancer_policies_of_listener(
|
@@ -506,157 +503,150 @@ module AWSDriver
|
|
506
503
|
)
|
507
504
|
end
|
508
505
|
end
|
509
|
-
end
|
510
|
-
end
|
511
506
|
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
|
525
|
-
|
526
|
-
|
507
|
+
# Update instance list, but only if there are machines specified
|
508
|
+
if machine_specs
|
509
|
+
instances_to_add = []
|
510
|
+
if actual_elb.instances
|
511
|
+
assigned_instance_ids = actual_elb.instances.map(&:instance_id)
|
512
|
+
instances_to_add = machine_specs.reject { |s| assigned_instance_ids.include?(s.reference["instance_id"]) }
|
513
|
+
instance_ids_to_remove = assigned_instance_ids - machine_specs.map { |s| s.reference["instance_id"] }
|
514
|
+
end
|
515
|
+
|
516
|
+
unless instances_to_add.empty?
|
517
|
+
perform_action.call(" add machines #{instances_to_add.map(&:name).join(', ')}") do
|
518
|
+
instance_ids_to_add = instances_to_add.map { |s| s.reference["instance_id"] }
|
519
|
+
Chef::Log.debug("Adding instances #{instance_ids_to_add.join(', ')} to load balancer #{actual_elb.load_balancer_name} in region #{region}")
|
520
|
+
instances_to_add.each do |instance|
|
521
|
+
elb.register_instances_with_load_balancer(instances: [{ instance_id: instance.reference["instance_id"] }], load_balancer_name: actual_elb.load_balancer_name)
|
522
|
+
end
|
523
|
+
end
|
524
|
+
end
|
525
|
+
|
526
|
+
unless instance_ids_to_remove.empty?
|
527
|
+
perform_action.call(" remove instances #{instance_ids_to_remove}") do
|
528
|
+
instances_to_remove = Hash[instance_ids_to_remove.map { |id| [:instance_id, id] }]
|
529
|
+
elb.deregister_instances_from_load_balancer(instances: [instances_to_remove], load_balancer_name: actual_elb.load_balancer_name)
|
530
|
+
end
|
527
531
|
end
|
528
532
|
end
|
529
|
-
end
|
530
533
|
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
-
|
534
|
+
# We have successfully switched all our instances to the (possibly) new LB
|
535
|
+
# so it is safe to delete the old one.
|
536
|
+
old_elb.delete unless old_elb.nil?
|
537
|
+
ensure
|
538
|
+
# Something went wrong before we could moved instances from the old ELB to the new one
|
539
|
+
# Don't delete the old ELB, but warn users there could now be 2 ELBs with the same name
|
540
|
+
unless old_elb.nil?
|
541
|
+
Chef::Log.warn("It is possible there are now 2 ELB instances - #{old_elb.load_balancer_name} and #{actual_elb.load_balancer_name}. " \
|
542
|
+
"Determine which is correct and manually clean up the other.")
|
535
543
|
end
|
536
544
|
end
|
537
|
-
end
|
538
545
|
|
539
|
-
|
540
|
-
|
541
|
-
|
542
|
-
|
543
|
-
|
544
|
-
|
545
|
-
|
546
|
-
# Don't delete the old ELB, but warn users there could now be 2 ELBs with the same name
|
547
|
-
unless old_elb.nil?
|
548
|
-
Chef::Log.warn("It is possible there are now 2 ELB instances - #{old_elb.load_balancer_name} and #{actual_elb.load_balancer_name}. " +
|
549
|
-
"Determine which is correct and manually clean up the other.")
|
550
|
-
end
|
551
|
-
end
|
546
|
+
# Compare two server certificates by casting them both to strings.
|
547
|
+
#
|
548
|
+
# The parameters should either be a String containing the
|
549
|
+
# certificate ARN, or a IAM::ServerCertificate object.
|
550
|
+
def server_certificate_eql?(cert1, cert2)
|
551
|
+
server_cert_to_string(cert1) == server_cert_to_string(cert2)
|
552
|
+
end
|
552
553
|
|
553
|
-
|
554
|
-
|
555
|
-
|
556
|
-
|
557
|
-
|
558
|
-
|
559
|
-
|
554
|
+
def server_cert_to_string(cert)
|
555
|
+
if cert.is_a?(Hash) && cert.key?(:arn)
|
556
|
+
cert[:arn]
|
557
|
+
else
|
558
|
+
cert
|
559
|
+
end
|
560
|
+
end
|
560
561
|
|
561
|
-
|
562
|
-
|
563
|
-
|
564
|
-
|
565
|
-
|
566
|
-
|
567
|
-
|
562
|
+
# Retreive the server certificate from a listener spec, prefering
|
563
|
+
# the server_certificate key.
|
564
|
+
def server_cert_from_spec(spec)
|
565
|
+
if spec[:server_certificate]
|
566
|
+
spec[:server_certificate]
|
567
|
+
elsif spec[:ssl_certificate_id]
|
568
|
+
spec[:ssl_certificate_id]
|
569
|
+
end
|
570
|
+
end
|
568
571
|
|
569
|
-
|
570
|
-
# the server_certificate key.
|
571
|
-
def server_cert_from_spec(spec)
|
572
|
-
if spec[:server_certificate]
|
573
|
-
spec[:server_certificate]
|
574
|
-
elsif spec[:ssl_certificate_id]
|
575
|
-
spec[:ssl_certificate_id]
|
576
|
-
else
|
577
|
-
nil
|
578
|
-
end
|
579
|
-
end
|
572
|
+
def ready_load_balancer(action_handler, lb_spec, lb_options, machine_spec); end
|
580
573
|
|
581
|
-
|
582
|
-
|
574
|
+
def destroy_load_balancer(action_handler, lb_spec, lb_options)
|
575
|
+
lb_options = deep_symbolize_keys(lb_options)
|
576
|
+
return if lb_spec.nil?
|
583
577
|
|
584
|
-
|
585
|
-
|
586
|
-
|
578
|
+
actual_elb = load_balancer_for(lb_spec)
|
579
|
+
if actual_elb
|
580
|
+
# Remove ELB from AWS
|
581
|
+
action_handler.perform_action "Deleting EC2 ELB #{lb_spec.id}" do
|
582
|
+
elb.delete_load_balancer(load_balancer_name: actual_elb.load_balancer_name)
|
583
|
+
end
|
584
|
+
end
|
587
585
|
|
588
|
-
|
589
|
-
|
590
|
-
# Remove ELB from AWS
|
591
|
-
action_handler.perform_action "Deleting EC2 ELB #{lb_spec.id}" do
|
592
|
-
elb.delete_load_balancer({load_balancer_name: actual_elb.load_balancer_name })
|
586
|
+
# Remove LB spec from databag
|
587
|
+
lb_spec.delete(action_handler)
|
593
588
|
end
|
594
|
-
end
|
595
|
-
|
596
|
-
# Remove LB spec from databag
|
597
|
-
lb_spec.delete(action_handler)
|
598
|
-
end
|
599
589
|
|
600
|
-
|
601
|
-
|
602
|
-
|
603
|
-
|
604
|
-
|
605
|
-
|
606
|
-
|
607
|
-
|
608
|
-
|
609
|
-
|
610
|
-
|
611
|
-
|
612
|
-
|
613
|
-
|
614
|
-
|
615
|
-
|
616
|
-
|
617
|
-
|
618
|
-
|
619
|
-
|
620
|
-
|
590
|
+
# Image methods
|
591
|
+
def allocate_image(action_handler, image_spec, image_options, machine_spec, machine_options)
|
592
|
+
actual_image = image_for(image_spec)
|
593
|
+
image_options = deep_symbolize_keys(image_options)
|
594
|
+
machine_options = deep_symbolize_keys(machine_options)
|
595
|
+
aws_tags = image_options.delete(:aws_tags) || {}
|
596
|
+
if actual_image.nil? || !actual_image.exists? || actual_image.state.to_sym == :failed
|
597
|
+
action_handler.perform_action "Create image #{image_spec.name} from machine #{machine_spec.name} with options #{image_options.inspect}" do
|
598
|
+
image_options[:name] ||= image_spec.name
|
599
|
+
image_options[:instance_id] ||= machine_spec.reference["instance_id"]
|
600
|
+
image_options[:description] ||= "Image #{image_spec.name} created from machine #{machine_spec.name}"
|
601
|
+
Chef::Log.debug "AWS Image options: #{image_options.inspect}"
|
602
|
+
image_type = ec2_client.create_image(image_options.to_hash)
|
603
|
+
actual_image = ec2_resource.image(image_type.image_id)
|
604
|
+
image_spec.reference = {
|
605
|
+
"driver_version" => Chef::Provisioning::AWSDriver::VERSION,
|
606
|
+
"image_id" => actual_image.image_id,
|
607
|
+
"allocated_at" => Time.now.to_i,
|
608
|
+
"from-instance" => image_options[:instance_id]
|
609
|
+
}
|
610
|
+
image_spec.driver_url = driver_url
|
611
|
+
end
|
612
|
+
end
|
613
|
+
aws_tags["from-instance"] = image_options[:instance_id] if image_options[:instance_id]
|
614
|
+
converge_ec2_tags(actual_image, aws_tags, action_handler)
|
621
615
|
end
|
622
|
-
end
|
623
|
-
aws_tags['from-instance'] = image_options[:instance_id] if image_options[:instance_id]
|
624
|
-
converge_ec2_tags(actual_image, aws_tags, action_handler)
|
625
|
-
end
|
626
616
|
|
627
|
-
|
628
|
-
|
629
|
-
|
630
|
-
|
631
|
-
|
632
|
-
|
633
|
-
|
634
|
-
|
635
|
-
|
636
|
-
|
637
|
-
|
638
|
-
|
617
|
+
def ready_image(action_handler, image_spec, image_options)
|
618
|
+
actual_image = image_for(image_spec)
|
619
|
+
if actual_image.nil? || !actual_image.exists?
|
620
|
+
raise "Cannot ready an image that does not exist"
|
621
|
+
else
|
622
|
+
image_options = deep_symbolize_keys(image_options)
|
623
|
+
aws_tags = image_options.delete(:aws_tags) || {}
|
624
|
+
aws_tags["from-instance"] = image_spec.reference["from-instance"] if image_spec.reference["from-instance"]
|
625
|
+
converge_ec2_tags(actual_image, aws_tags, action_handler)
|
626
|
+
if actual_image.state.to_sym != :available
|
627
|
+
action_handler.report_progress "Waiting for image to be ready ..."
|
628
|
+
wait_until_ready_image(action_handler, image_spec, actual_image)
|
629
|
+
end
|
630
|
+
end
|
639
631
|
end
|
640
|
-
end
|
641
|
-
end
|
642
632
|
|
643
|
-
|
644
|
-
|
645
|
-
|
646
|
-
|
647
|
-
|
648
|
-
|
649
|
-
|
650
|
-
|
651
|
-
|
652
|
-
|
633
|
+
def destroy_image(action_handler, image_spec, image_options)
|
634
|
+
image_options = deep_symbolize_keys(image_options)
|
635
|
+
# TODO: the driver should automatically be set by `inline_resource`
|
636
|
+
d = self
|
637
|
+
Provisioning.inline_resource(action_handler) do
|
638
|
+
aws_image image_spec.name do
|
639
|
+
action :destroy
|
640
|
+
driver d
|
641
|
+
chef_server image_spec.managed_entry_store.chef_server
|
642
|
+
managed_entry_store image_spec.managed_entry_store
|
643
|
+
end
|
644
|
+
end
|
653
645
|
end
|
654
|
-
end
|
655
|
-
end
|
656
646
|
|
657
|
-
|
658
|
-
|
659
|
-
|
647
|
+
def user_data
|
648
|
+
# TODO: Make this use HTTPS at some point.
|
649
|
+
<<EOD
|
660
650
|
<powershell>
|
661
651
|
winrm quickconfig -q
|
662
652
|
winrm set winrm/config/winrs '@{MaxMemoryPerShellMB="300"}'
|
@@ -672,9 +662,9 @@ sc config winrm start=auto
|
|
672
662
|
net start winrm
|
673
663
|
</powershell>
|
674
664
|
EOD
|
675
|
-
|
665
|
+
end
|
676
666
|
|
677
|
-
|
667
|
+
def https_user_data
|
678
668
|
<<EOD
|
679
669
|
<powershell>
|
680
670
|
winrm quickconfig -q
|
@@ -710,116 +700,116 @@ sc config winrm start=auto
|
|
710
700
|
net start winrm
|
711
701
|
</powershell>
|
712
702
|
EOD
|
713
|
-
|
703
|
+
end
|
714
704
|
|
715
|
-
|
716
|
-
|
717
|
-
|
718
|
-
|
719
|
-
|
705
|
+
# Machine methods
|
706
|
+
def allocate_machine(action_handler, machine_spec, machine_options)
|
707
|
+
machine_options = deep_symbolize_keys(machine_options)
|
708
|
+
instance = instance_for(machine_spec)
|
709
|
+
bootstrap_options = bootstrap_options_for(action_handler, machine_spec, machine_options)
|
720
710
|
|
721
|
-
|
722
|
-
|
723
|
-
|
724
|
-
|
711
|
+
if instance.nil? || !instance.exists? || instance.state.name == "terminated"
|
712
|
+
action_handler.perform_action "Create #{machine_spec.name} with AMI #{bootstrap_options[:image_id]} in #{region}" do
|
713
|
+
Chef::Log.debug "Creating instance with bootstrap options #{bootstrap_options}"
|
714
|
+
instance = create_instance_and_reference(bootstrap_options, action_handler, machine_spec, machine_options)
|
715
|
+
end
|
716
|
+
end
|
717
|
+
converge_ec2_tags(instance, machine_options[:aws_tags], action_handler)
|
725
718
|
end
|
726
|
-
end
|
727
|
-
converge_ec2_tags(instance, machine_options[:aws_tags], action_handler)
|
728
|
-
end
|
729
719
|
|
730
|
-
|
731
|
-
|
732
|
-
|
733
|
-
|
734
|
-
|
735
|
-
|
720
|
+
def allocate_machines(action_handler, specs_and_options, parallelizer)
|
721
|
+
create_servers(action_handler, specs_and_options, parallelizer) do |machine_spec, _server|
|
722
|
+
yield machine_spec
|
723
|
+
end
|
724
|
+
specs_and_options.keys
|
725
|
+
end
|
736
726
|
|
737
|
-
|
738
|
-
|
739
|
-
|
740
|
-
|
727
|
+
def ready_machine(action_handler, machine_spec, machine_options)
|
728
|
+
machine_options = deep_symbolize_keys(machine_options)
|
729
|
+
instance = instance_for(machine_spec)
|
730
|
+
converge_ec2_tags(instance, machine_options[:aws_tags], action_handler)
|
741
731
|
|
742
|
-
|
743
|
-
|
744
|
-
|
732
|
+
if instance.nil?
|
733
|
+
raise "Machine #{machine_spec.name} does not have an instance associated with it, or instance does not exist."
|
734
|
+
end
|
745
735
|
|
746
|
-
|
747
|
-
|
748
|
-
|
749
|
-
|
750
|
-
|
736
|
+
if instance.state.name != "running"
|
737
|
+
wait_until_machine(action_handler, machine_spec, "finish stopping", instance) { |instance| instance.state.name != "stopping" }
|
738
|
+
if instance.state.name == "stopped"
|
739
|
+
action_handler.perform_action "Start #{machine_spec.name} (#{machine_spec.reference['instance_id']}) in #{region} ..." do
|
740
|
+
instance.start
|
741
|
+
end
|
742
|
+
end
|
743
|
+
wait_until_instance_running(action_handler, machine_spec, instance)
|
751
744
|
end
|
752
|
-
end
|
753
|
-
wait_until_instance_running(action_handler, machine_spec, instance)
|
754
|
-
end
|
755
745
|
|
756
|
-
|
757
|
-
|
758
|
-
|
759
|
-
|
760
|
-
|
761
|
-
|
762
|
-
|
763
|
-
|
764
|
-
|
765
|
-
|
766
|
-
|
767
|
-
|
768
|
-
|
746
|
+
# Windows machines potentially do a bunch of extra stuff - setting hostname,
|
747
|
+
# sending out encrypted password, restarting instance, etc.
|
748
|
+
if machine_spec.reference["is_windows"]
|
749
|
+
wait_until_machine(action_handler, machine_spec, "receive 'Windows is ready' message from the AWS console", instance) do |instance|
|
750
|
+
instance.console_output.output
|
751
|
+
# seems to be a bug as we need to run this twice
|
752
|
+
# to consistently ensure the output is fully pulled
|
753
|
+
encoded_output = instance.console_output.output
|
754
|
+
if encoded_output.nil? || encoded_output.empty?
|
755
|
+
false
|
756
|
+
else
|
757
|
+
output = Base64.decode64(encoded_output)
|
758
|
+
output =~ /Message: Windows is Ready to use/
|
759
|
+
end
|
760
|
+
end
|
769
761
|
end
|
770
|
-
|
771
|
-
|
772
|
-
|
773
|
-
machine_for(machine_spec, machine_options, instance)
|
774
|
-
end
|
762
|
+
wait_for_transport(action_handler, machine_spec, machine_options, instance)
|
763
|
+
machine_for(machine_spec, machine_options, instance)
|
764
|
+
end
|
775
765
|
|
776
|
-
|
777
|
-
|
778
|
-
|
779
|
-
|
780
|
-
|
781
|
-
|
766
|
+
def connect_to_machine(name, chef_server = nil)
|
767
|
+
machine_spec = if name.is_a?(MachineSpec)
|
768
|
+
name
|
769
|
+
else
|
770
|
+
Chef::Provisioning::ChefMachineSpec.get(name, chef_server)
|
771
|
+
end
|
782
772
|
|
783
|
-
|
784
|
-
|
773
|
+
machine_for(machine_spec, machine_spec.reference)
|
774
|
+
end
|
785
775
|
|
786
|
-
|
787
|
-
|
788
|
-
|
789
|
-
|
790
|
-
|
791
|
-
|
792
|
-
|
793
|
-
|
776
|
+
def stop_machine(action_handler, machine_spec, machine_options)
|
777
|
+
machine_options = deep_symbolize_keys(machine_options)
|
778
|
+
instance = instance_for(machine_spec)
|
779
|
+
if instance && instance.exists?
|
780
|
+
wait_until_machine(action_handler, machine_spec, "finish coming up so we can stop it", instance) { |instance| instance.state.name != "pending" }
|
781
|
+
if instance.state.name == "running"
|
782
|
+
action_handler.perform_action "Stop #{machine_spec.name} (#{instance.id}) in #{region} ..." do
|
783
|
+
instance.stop
|
784
|
+
end
|
785
|
+
end
|
786
|
+
wait_until_machine(action_handler, machine_spec, "stop", instance) { |instance| %w{stopped terminated}.include?(instance.state.name) }
|
794
787
|
end
|
795
788
|
end
|
796
|
-
wait_until_machine(action_handler, machine_spec, "stop", instance) { |instance| %w[stopped terminated].include?(instance.state.name) }
|
797
|
-
end
|
798
|
-
end
|
799
789
|
|
800
|
-
|
801
|
-
|
802
|
-
|
803
|
-
|
804
|
-
|
805
|
-
|
806
|
-
|
807
|
-
|
808
|
-
|
809
|
-
|
810
|
-
|
790
|
+
def destroy_machine(action_handler, machine_spec, machine_options)
|
791
|
+
machine_options = deep_symbolize_keys(machine_options)
|
792
|
+
d = self
|
793
|
+
Provisioning.inline_resource(action_handler) do
|
794
|
+
aws_instance machine_spec.name do
|
795
|
+
action :destroy
|
796
|
+
driver d
|
797
|
+
chef_server machine_spec.managed_entry_store.chef_server
|
798
|
+
managed_entry_store machine_spec.managed_entry_store
|
799
|
+
end
|
800
|
+
end
|
811
801
|
|
812
|
-
|
813
|
-
|
814
|
-
|
815
|
-
|
802
|
+
# TODO: move this into the aws_instance provider somehow
|
803
|
+
strategy = convergence_strategy_for(machine_spec, machine_options)
|
804
|
+
strategy.cleanup_convergence(action_handler, machine_spec)
|
805
|
+
end
|
816
806
|
|
817
|
-
|
818
|
-
|
819
|
-
|
807
|
+
def ec2
|
808
|
+
@ec2 ||= ::Aws::EC2::Client.new(aws_config)
|
809
|
+
end
|
820
810
|
|
821
|
-
|
822
|
-
|
811
|
+
AWS_V2_SERVICES.each do |load_name, short_name|
|
812
|
+
class_eval <<-META
|
823
813
|
|
824
814
|
def #{short_name}_client
|
825
815
|
@#{short_name}_client ||= ::Aws::#{load_name}::Client.new(**aws_config_2)
|
@@ -829,829 +819,826 @@ EOD
|
|
829
819
|
@#{short_name}_resource ||= ::Aws::#{load_name}::Resource.new(**(aws_config_2.merge({client: #{short_name}_client})))
|
830
820
|
end
|
831
821
|
|
832
|
-
|
833
|
-
|
834
|
-
|
835
|
-
def elb
|
836
|
-
@elb ||= ::Aws::ElasticLoadBalancing::Client.new(aws_config)
|
837
|
-
end
|
838
|
-
|
839
|
-
def elasticache
|
840
|
-
@elasticache ||= ::Aws::ElastiCache::Client.new(aws_config)
|
841
|
-
end
|
842
|
-
|
843
|
-
def iam
|
844
|
-
@iam ||= ::Aws::IAM::Client.new(aws_config)
|
845
|
-
end
|
822
|
+
META
|
823
|
+
end
|
846
824
|
|
847
|
-
|
848
|
-
|
849
|
-
|
825
|
+
def elb
|
826
|
+
@elb ||= ::Aws::ElasticLoadBalancing::Client.new(aws_config)
|
827
|
+
end
|
850
828
|
|
851
|
-
|
852
|
-
|
853
|
-
|
829
|
+
def elasticache
|
830
|
+
@elasticache ||= ::Aws::ElastiCache::Client.new(aws_config)
|
831
|
+
end
|
854
832
|
|
855
|
-
|
856
|
-
|
857
|
-
|
833
|
+
def iam
|
834
|
+
@iam ||= ::Aws::IAM::Client.new(aws_config)
|
835
|
+
end
|
858
836
|
|
859
|
-
|
860
|
-
|
861
|
-
|
837
|
+
def rds
|
838
|
+
@rds ||= ::Aws::RDS::Client.new(aws_config)
|
839
|
+
end
|
862
840
|
|
863
|
-
|
864
|
-
|
865
|
-
|
841
|
+
def s3_client
|
842
|
+
@s3 ||= ::Aws::S3::Client.new(aws_config)
|
843
|
+
end
|
866
844
|
|
867
|
-
|
868
|
-
|
869
|
-
|
845
|
+
def sns
|
846
|
+
@sns ||= ::Aws::SNS::Client.new(aws_config)
|
847
|
+
end
|
870
848
|
|
871
|
-
|
872
|
-
|
873
|
-
|
874
|
-
partition: parts[1],
|
875
|
-
service: parts[2],
|
876
|
-
region: parts[3],
|
877
|
-
account_id: parts[4],
|
878
|
-
resource: parts[5]
|
879
|
-
}
|
880
|
-
end
|
849
|
+
def sqs
|
850
|
+
@sqs ||= ::Aws::SQS::Client.new(aws_config)
|
851
|
+
end
|
881
852
|
|
882
|
-
|
883
|
-
|
884
|
-
|
885
|
-
current_user = iam.get_user
|
886
|
-
arn = current_user[:user][:arn]
|
887
|
-
rescue ::Aws::IAM::Errors::AccessDenied => e
|
888
|
-
# If we don't have access, the error message still tells us our account ID and user ...
|
889
|
-
# https://forums.aws.amazon.com/thread.jspa?messageID=394344
|
890
|
-
if e.to_s !~ /\b(arn:aws:iam::[0-9]{12}:\S*)/
|
891
|
-
raise "IAM error response for GetUser did not include user ARN. Can't retrieve account ID."
|
892
|
-
end
|
893
|
-
arn = $1
|
894
|
-
end
|
895
|
-
parse_arn(arn)[:account_id]
|
896
|
-
end
|
853
|
+
def auto_scaling
|
854
|
+
@auto_scaling ||= ::Aws::AutoScaling.new(config: aws_config)
|
855
|
+
end
|
897
856
|
|
898
|
-
|
899
|
-
|
857
|
+
def build_arn(partition: "aws", service: nil, region: aws_config[:region], account_id: self.account_id, resource: nil)
|
858
|
+
"arn:#{partition}:#{service}:#{region}:#{account_id}:#{resource}"
|
859
|
+
end
|
900
860
|
|
901
|
-
|
902
|
-
|
861
|
+
def parse_arn(arn)
|
862
|
+
parts = arn.split(":", 6)
|
863
|
+
{
|
864
|
+
partition: parts[1],
|
865
|
+
service: parts[2],
|
866
|
+
region: parts[3],
|
867
|
+
account_id: parts[4],
|
868
|
+
resource: parts[5]
|
869
|
+
}
|
870
|
+
end
|
903
871
|
|
904
|
-
|
905
|
-
|
906
|
-
|
872
|
+
def account_id
|
873
|
+
begin
|
874
|
+
# We've got an AWS account root credential or an IAM admin with access rights
|
875
|
+
current_user = iam.get_user
|
876
|
+
arn = current_user[:user][:arn]
|
877
|
+
rescue ::Aws::IAM::Errors::AccessDenied => e
|
878
|
+
# If we don't have access, the error message still tells us our account ID and user ...
|
879
|
+
# https://forums.aws.amazon.com/thread.jspa?messageID=394344
|
880
|
+
if e.to_s !~ /\b(arn:aws:iam::[0-9]{12}:\S*)/
|
881
|
+
raise "IAM error response for GetUser did not include user ARN. Can't retrieve account ID."
|
882
|
+
end
|
883
|
+
arn = Regexp.last_match(1)
|
884
|
+
end
|
885
|
+
parse_arn(arn)[:account_id]
|
886
|
+
end
|
907
887
|
|
908
|
-
|
909
|
-
|
910
|
-
else
|
911
|
-
Chef::Provisioning::Machine::UnixMachine.new(machine_spec, transport_for(machine_spec, machine_options, instance), convergence_strategy_for(machine_spec, machine_options))
|
912
|
-
end
|
913
|
-
end
|
888
|
+
# For creating things like AWS keypairs exclusively
|
889
|
+
@@chef_default_lock = Mutex.new
|
914
890
|
|
915
|
-
|
916
|
-
|
917
|
-
if bootstrap_options==nil
|
918
|
-
bootstrap_options=Hash({})
|
919
|
-
end
|
920
|
-
# These are hardcoded for now - only 1 machine at a time
|
921
|
-
bootstrap_options[:min_count] = bootstrap_options[:max_count] = 1
|
922
|
-
bootstrap_options[:instance_type] ||= default_instance_type
|
923
|
-
image_id = machine_options[:from_image] || bootstrap_options[:image_id] || machine_options[:image_id] || default_ami_for_region(region)
|
924
|
-
bootstrap_options[:image_id] = image_id
|
925
|
-
bootstrap_options.delete(:key_path)
|
926
|
-
if !bootstrap_options[:key_name]
|
927
|
-
Chef::Log.debug('No key specified, generating a default one...')
|
928
|
-
bootstrap_options[:key_name] = default_aws_keypair(action_handler, machine_spec)
|
929
|
-
end
|
930
|
-
if bootstrap_options[:user_data]
|
931
|
-
bootstrap_options[:user_data] = Base64.encode64(bootstrap_options[:user_data])
|
932
|
-
end
|
891
|
+
def machine_for(machine_spec, machine_options, instance = nil)
|
892
|
+
instance ||= instance_for(machine_spec)
|
933
893
|
|
934
|
-
|
935
|
-
|
936
|
-
|
937
|
-
end
|
938
|
-
placement = {}
|
939
|
-
if bootstrap_options[:availability_zone]
|
940
|
-
placement[:availability_zone] = bootstrap_options.delete(:availability_zone)
|
941
|
-
end
|
942
|
-
if bootstrap_options[:placement_group]
|
943
|
-
placement[:group_name] = bootstrap_options.delete(:placement_group)
|
944
|
-
end
|
945
|
-
unless bootstrap_options.fetch(:dedicated_tenancy, nil).nil?
|
946
|
-
placement[:tenancy] = bootstrap_options.delete(:dedicated_tenancy) ? "dedicated" : "default"
|
947
|
-
end
|
948
|
-
unless placement.empty?
|
949
|
-
bootstrap_options[:placement] = placement
|
950
|
-
end
|
951
|
-
if bootstrap_options[:subnet]
|
952
|
-
bootstrap_options[:subnet_id] = bootstrap_options.delete(:subnet)
|
953
|
-
end
|
954
|
-
if bootstrap_options[:iam_instance_profile] && bootstrap_options[:iam_instance_profile].is_a?(String)
|
955
|
-
bootstrap_options[:iam_instance_profile] = {name: bootstrap_options[:iam_instance_profile]}
|
956
|
-
end
|
894
|
+
unless instance
|
895
|
+
raise "Instance for node #{machine_spec.name} has not been created!"
|
896
|
+
end
|
957
897
|
|
958
|
-
|
959
|
-
|
960
|
-
if bootstrap_options[:user_data].nil?
|
961
|
-
case machine_options[:winrm_transport]
|
962
|
-
when 'https'
|
963
|
-
data = https_user_data
|
898
|
+
if machine_spec.reference["is_windows"]
|
899
|
+
Chef::Provisioning::Machine::WindowsMachine.new(machine_spec, transport_for(machine_spec, machine_options, instance), convergence_strategy_for(machine_spec, machine_options))
|
964
900
|
else
|
965
|
-
|
901
|
+
Chef::Provisioning::Machine::UnixMachine.new(machine_spec, transport_for(machine_spec, machine_options, instance), convergence_strategy_for(machine_spec, machine_options))
|
966
902
|
end
|
967
|
-
bootstrap_options[:user_data] = Base64.encode64(data)
|
968
903
|
end
|
969
|
-
else
|
970
|
-
Chef::Log.debug "Non-windows, not setting Default userdata"
|
971
|
-
end
|
972
|
-
|
973
|
-
bootstrap_options = AWSResource.lookup_options(bootstrap_options, managed_entry_store: machine_spec.managed_entry_store, driver: self)
|
974
904
|
|
975
|
-
|
976
|
-
|
977
|
-
|
978
|
-
|
979
|
-
|
980
|
-
|
981
|
-
|
982
|
-
|
983
|
-
:
|
984
|
-
|
985
|
-
|
986
|
-
|
987
|
-
|
988
|
-
|
989
|
-
|
990
|
-
|
991
|
-
network_interface[:private_ip_address] = bootstrap_options.delete(:private_ip_address)
|
992
|
-
end
|
993
|
-
if bootstrap_options[:security_group_ids]
|
994
|
-
network_interface[:groups] = bootstrap_options.delete(:security_group_ids)
|
995
|
-
end
|
996
|
-
bootstrap_options[:network_interfaces] = [network_interface]
|
997
|
-
end
|
905
|
+
def bootstrap_options_for(action_handler, machine_spec, machine_options)
|
906
|
+
bootstrap_options = deep_symbolize_keys(machine_options[:bootstrap_options])
|
907
|
+
bootstrap_options = Hash({}) if bootstrap_options.nil?
|
908
|
+
# These are hardcoded for now - only 1 machine at a time
|
909
|
+
bootstrap_options[:min_count] = bootstrap_options[:max_count] = 1
|
910
|
+
bootstrap_options[:instance_type] ||= default_instance_type
|
911
|
+
image_id = machine_options[:from_image] || bootstrap_options[:image_id] || machine_options[:image_id] || default_ami_for_region(region)
|
912
|
+
bootstrap_options[:image_id] = image_id
|
913
|
+
bootstrap_options.delete(:key_path)
|
914
|
+
unless bootstrap_options[:key_name]
|
915
|
+
Chef::Log.debug("No key specified, generating a default one...")
|
916
|
+
bootstrap_options[:key_name] = default_aws_keypair(action_handler, machine_spec)
|
917
|
+
end
|
918
|
+
if bootstrap_options[:user_data]
|
919
|
+
bootstrap_options[:user_data] = Base64.encode64(bootstrap_options[:user_data])
|
920
|
+
end
|
998
921
|
|
999
|
-
|
1000
|
-
|
1001
|
-
|
922
|
+
# V1 -> V2 backwards compatability support
|
923
|
+
unless bootstrap_options.fetch(:monitoring_enabled, nil).nil?
|
924
|
+
bootstrap_options[:monitoring] = { enabled: bootstrap_options.delete(:monitoring_enabled) }
|
925
|
+
end
|
926
|
+
placement = {}
|
927
|
+
if bootstrap_options[:availability_zone]
|
928
|
+
placement[:availability_zone] = bootstrap_options.delete(:availability_zone)
|
929
|
+
end
|
930
|
+
if bootstrap_options[:placement_group]
|
931
|
+
placement[:group_name] = bootstrap_options.delete(:placement_group)
|
932
|
+
end
|
933
|
+
unless bootstrap_options.fetch(:dedicated_tenancy, nil).nil?
|
934
|
+
placement[:tenancy] = bootstrap_options.delete(:dedicated_tenancy) ? "dedicated" : "default"
|
935
|
+
end
|
936
|
+
bootstrap_options[:placement] = placement unless placement.empty?
|
937
|
+
if bootstrap_options[:subnet]
|
938
|
+
bootstrap_options[:subnet_id] = bootstrap_options.delete(:subnet)
|
939
|
+
end
|
940
|
+
if bootstrap_options[:iam_instance_profile] && bootstrap_options[:iam_instance_profile].is_a?(String)
|
941
|
+
bootstrap_options[:iam_instance_profile] = { name: bootstrap_options[:iam_instance_profile] }
|
942
|
+
end
|
1002
943
|
|
1003
|
-
|
1004
|
-
|
1005
|
-
|
944
|
+
if machine_options[:is_windows]
|
945
|
+
Chef::Log.debug "Setting Default windows userdata based on WinRM transport"
|
946
|
+
if bootstrap_options[:user_data].nil?
|
947
|
+
data = case machine_options[:winrm_transport]
|
948
|
+
when "https"
|
949
|
+
https_user_data
|
950
|
+
else
|
951
|
+
user_data
|
952
|
+
end
|
953
|
+
bootstrap_options[:user_data] = Base64.encode64(data)
|
954
|
+
end
|
955
|
+
else
|
956
|
+
Chef::Log.debug "Non-windows, not setting Default userdata"
|
957
|
+
end
|
1006
958
|
|
1007
|
-
|
1008
|
-
'Administrator'
|
1009
|
-
end
|
959
|
+
bootstrap_options = AWSResource.lookup_options(bootstrap_options, managed_entry_store: machine_spec.managed_entry_store, driver: self)
|
1010
960
|
|
1011
|
-
|
1012
|
-
|
1013
|
-
|
961
|
+
# In the migration from V1 to V2 we still support associate_public_ip_address at the top level
|
962
|
+
# we do this after the lookup because we have to copy any present subnets, etc. into the
|
963
|
+
# network interfaces block
|
964
|
+
unless bootstrap_options.fetch(:associate_public_ip_address, nil).nil?
|
965
|
+
if bootstrap_options[:network_interfaces]
|
966
|
+
raise "If you specify network_interfaces you must specify associate_public_ip_address in that list"
|
967
|
+
end
|
968
|
+
network_interface = {
|
969
|
+
device_index: 0,
|
970
|
+
associate_public_ip_address: bootstrap_options.delete(:associate_public_ip_address),
|
971
|
+
delete_on_termination: true
|
972
|
+
}
|
973
|
+
if bootstrap_options[:subnet_id]
|
974
|
+
network_interface[:subnet_id] = bootstrap_options.delete(:subnet_id)
|
975
|
+
end
|
976
|
+
if bootstrap_options[:private_ip_address]
|
977
|
+
network_interface[:private_ip_address] = bootstrap_options.delete(:private_ip_address)
|
978
|
+
end
|
979
|
+
if bootstrap_options[:security_group_ids]
|
980
|
+
network_interface[:groups] = bootstrap_options.delete(:security_group_ids)
|
981
|
+
end
|
982
|
+
bootstrap_options[:network_interfaces] = [network_interface]
|
983
|
+
end
|
1014
984
|
|
1015
|
-
|
1016
|
-
|
1017
|
-
keypair_name = bootstrap_options[:key_name]
|
1018
|
-
actual_key_pair = ec2_resource.key_pair(keypair_name)
|
1019
|
-
if !actual_key_pair.exists?
|
1020
|
-
ec2_resource.key_pairs.create(keypair_name)
|
985
|
+
Chef::Log.debug "AWS Bootstrap options: #{bootstrap_options.inspect}"
|
986
|
+
deep_symbolize_keys(bootstrap_options)
|
1021
987
|
end
|
1022
|
-
actual_key_pair
|
1023
|
-
end
|
1024
|
-
end
|
1025
988
|
|
1026
|
-
|
1027
|
-
|
1028
|
-
end
|
1029
|
-
|
1030
|
-
def instance_for(machine_spec)
|
1031
|
-
if machine_spec.reference
|
1032
|
-
if machine_spec.driver_url != driver_url
|
1033
|
-
raise "Switching a machine's driver from #{machine_spec.driver_url} to #{driver_url} is not currently supported! Use machine :destroy and then re-create the machine on the new driver."
|
989
|
+
def default_ssh_username
|
990
|
+
"ubuntu"
|
1034
991
|
end
|
1035
|
-
Chef::Resource::AwsInstance.get_aws_object(machine_spec.reference['instance_id'], driver: self, managed_entry_store: machine_spec.managed_entry_store, required: false)
|
1036
|
-
end
|
1037
|
-
end
|
1038
992
|
|
1039
|
-
|
1040
|
-
|
1041
|
-
|
1042
|
-
result
|
1043
|
-
end
|
993
|
+
def default_winrm_username
|
994
|
+
"Administrator"
|
995
|
+
end
|
1044
996
|
|
1045
|
-
|
1046
|
-
|
1047
|
-
|
997
|
+
def default_winrm_transport
|
998
|
+
"http"
|
999
|
+
end
|
1048
1000
|
|
1049
|
-
|
1050
|
-
|
1051
|
-
|
1052
|
-
|
1053
|
-
|
1054
|
-
|
1055
|
-
|
1001
|
+
def keypair_for(bootstrap_options)
|
1002
|
+
if bootstrap_options[:key_name]
|
1003
|
+
keypair_name = bootstrap_options[:key_name]
|
1004
|
+
actual_key_pair = ec2_resource.key_pair(keypair_name)
|
1005
|
+
unless actual_key_pair.exists?
|
1006
|
+
ec2_resource.key_pairs.create(keypair_name)
|
1007
|
+
end
|
1008
|
+
actual_key_pair
|
1009
|
+
end
|
1010
|
+
end
|
1056
1011
|
|
1057
|
-
|
1058
|
-
|
1059
|
-
|
1060
|
-
driver_options[:aws_credentials]
|
1061
|
-
else
|
1062
|
-
credentials = Credentials.new
|
1063
|
-
if driver_options[:aws_config_file]
|
1064
|
-
credentials.load_ini(driver_options[:aws_config_file])
|
1065
|
-
elsif driver_options[:aws_csv_file]
|
1066
|
-
credentials.load_csv(driver_options[:aws_csv_file])
|
1067
|
-
else
|
1068
|
-
credentials.load_default
|
1069
|
-
end
|
1070
|
-
credentials
|
1071
|
-
end
|
1072
|
-
end
|
1012
|
+
def load_balancer_for(lb_spec)
|
1013
|
+
Chef::Resource::AwsLoadBalancer.get_aws_object(lb_spec.name, driver: self, managed_entry_store: lb_spec.managed_entry_store, required: false)
|
1014
|
+
end
|
1073
1015
|
|
1074
|
-
|
1075
|
-
|
1076
|
-
|
1016
|
+
def instance_for(machine_spec)
|
1017
|
+
if machine_spec.reference
|
1018
|
+
if machine_spec.driver_url != driver_url
|
1019
|
+
raise "Switching a machine's driver from #{machine_spec.driver_url} to #{driver_url} is not currently supported! Use machine :destroy and then re-create the machine on the new driver."
|
1020
|
+
end
|
1021
|
+
Chef::Resource::AwsInstance.get_aws_object(machine_spec.reference["instance_id"], driver: self, managed_entry_store: machine_spec.managed_entry_store, required: false)
|
1022
|
+
end
|
1023
|
+
end
|
1077
1024
|
|
1078
|
-
|
1079
|
-
|
1080
|
-
|
1025
|
+
def instances_for(machine_specs)
|
1026
|
+
result = {}
|
1027
|
+
machine_specs.each { |machine_spec| result[machine_spec] = instance_for(machine_spec) }
|
1028
|
+
result
|
1029
|
+
end
|
1081
1030
|
|
1082
|
-
|
1083
|
-
|
1084
|
-
|
1031
|
+
def image_for(image_spec)
|
1032
|
+
Chef::Resource::AwsImage.get_aws_object(image_spec.name, driver: self, managed_entry_store: image_spec.managed_entry_store, required: false)
|
1033
|
+
end
|
1085
1034
|
|
1086
|
-
|
1087
|
-
|
1088
|
-
|
1035
|
+
def transport_for(machine_spec, machine_options, instance)
|
1036
|
+
if machine_spec.reference["is_windows"]
|
1037
|
+
create_winrm_transport(machine_spec, machine_options, instance)
|
1038
|
+
else
|
1039
|
+
create_ssh_transport(machine_spec, machine_options, instance)
|
1040
|
+
end
|
1041
|
+
end
|
1089
1042
|
|
1090
|
-
|
1091
|
-
|
1092
|
-
|
1093
|
-
|
1094
|
-
|
1095
|
-
|
1096
|
-
|
1043
|
+
def aws_credentials
|
1044
|
+
# Grab the list of possible credentials
|
1045
|
+
@aws_credentials ||= if driver_options[:aws_credentials]
|
1046
|
+
driver_options[:aws_credentials]
|
1047
|
+
else
|
1048
|
+
credentials = Credentials.new
|
1049
|
+
if driver_options[:aws_config_file]
|
1050
|
+
credentials.load_ini(driver_options[:aws_config_file])
|
1051
|
+
elsif driver_options[:aws_csv_file]
|
1052
|
+
credentials.load_csv(driver_options[:aws_csv_file])
|
1053
|
+
else
|
1054
|
+
credentials.load_default
|
1055
|
+
end
|
1056
|
+
credentials
|
1057
|
+
end
|
1058
|
+
end
|
1097
1059
|
|
1098
|
-
|
1099
|
-
|
1060
|
+
def default_ami_arch
|
1061
|
+
"amd64"
|
1062
|
+
end
|
1100
1063
|
|
1101
|
-
|
1102
|
-
|
1064
|
+
def default_ami_release
|
1065
|
+
"vivid"
|
1066
|
+
end
|
1103
1067
|
|
1104
|
-
|
1105
|
-
|
1106
|
-
|
1107
|
-
virtualization_type = criteria['virtualization_type'] || default_ami_virtualization_type
|
1068
|
+
def default_ami_root_store
|
1069
|
+
"ebs"
|
1070
|
+
end
|
1108
1071
|
|
1109
|
-
|
1110
|
-
|
1072
|
+
def default_ami_virtualization_type
|
1073
|
+
"hvm"
|
1074
|
+
end
|
1111
1075
|
|
1112
|
-
|
1113
|
-
|
1114
|
-
|
1115
|
-
|
1116
|
-
|
1117
|
-
|
1118
|
-
|
1119
|
-
machine_options[:winrm_transport] ||
|
1120
|
-
default_winrm_transport
|
1121
|
-
type = case transport_type
|
1122
|
-
when 'http'
|
1123
|
-
:plaintext
|
1124
|
-
when 'https'
|
1125
|
-
:ssl
|
1126
|
-
end
|
1127
|
-
port = machine_spec.reference['winrm_port'] ||
|
1128
|
-
machine_options[:winrm_port] ||
|
1129
|
-
case transport_type
|
1130
|
-
when 'http'
|
1131
|
-
'5985'
|
1132
|
-
when 'https'
|
1133
|
-
'5986'
|
1134
|
-
end
|
1135
|
-
endpoint = "#{transport_type}://#{remote_host}:#{port}/wsman"
|
1136
|
-
|
1137
|
-
pem_bytes = get_private_key(instance.key_name)
|
1138
|
-
|
1139
|
-
password = machine_spec.reference['winrm_password'] ||
|
1140
|
-
machine_options[:winrm_password] ||
|
1141
|
-
begin
|
1142
|
-
if machine_spec.reference['winrm_encrypted_password']
|
1143
|
-
decoded = Base64.decode64(machine_spec.reference['winrm_encrypted_password'])
|
1144
|
-
else
|
1145
|
-
encrypted_admin_password = instance.password_data.password_data
|
1146
|
-
if encrypted_admin_password.nil? || encrypted_admin_password.empty?
|
1147
|
-
raise "You did not specify winrm_password in the machine options and no encrytpted password could be fetched from the instance"
|
1148
|
-
end
|
1149
|
-
machine_spec.reference['winrm_encrypted_password']||=encrypted_admin_password
|
1150
|
-
# ^^ saves encrypted password to the machine_spec
|
1151
|
-
decoded = Base64.decode64(encrypted_admin_password)
|
1152
|
-
end
|
1153
|
-
# decrypt so we can utilize
|
1154
|
-
private_key = OpenSSL::PKey::RSA.new(get_private_key(instance.key_name))
|
1155
|
-
private_key.private_decrypt decoded
|
1156
|
-
end
|
1076
|
+
def default_ami_for_criteria(region, arch, release, root_store, virtualization_type)
|
1077
|
+
ami = Ubuntu.release(release).amis.find do |ami|
|
1078
|
+
ami.arch == arch &&
|
1079
|
+
ami.root_store == root_store &&
|
1080
|
+
ami.region == region &&
|
1081
|
+
ami.virtualization_type == virtualization_type
|
1082
|
+
end
|
1157
1083
|
|
1158
|
-
|
1159
|
-
machine_options[:winrm_disable_sspi] ||
|
1160
|
-
false # default to Negotiate
|
1161
|
-
basic_auth_only = machine_spec.reference['winrm_basic_auth_only'] ||
|
1162
|
-
machine_options[:winrm_basic_auth_only] ||
|
1163
|
-
false # disallow Basic auth by default
|
1164
|
-
no_ssl_peer_verification = machine_spec.reference['winrm_no_ssl_peer_verification'] ||
|
1165
|
-
machine_options[:winrm_no_ssl_peer_verification] ||
|
1166
|
-
false #disallow MITM potential by default
|
1167
|
-
|
1168
|
-
winrm_options = {
|
1169
|
-
user: username,
|
1170
|
-
pass: password,
|
1171
|
-
disable_sspi: disable_sspi,
|
1172
|
-
basic_auth_only: basic_auth_only,
|
1173
|
-
no_ssl_peer_verification: no_ssl_peer_verification,
|
1174
|
-
}
|
1175
|
-
|
1176
|
-
if no_ssl_peer_verification or type != :ssl
|
1177
|
-
# => we won't verify certs at all
|
1178
|
-
Chef::Log.info "No SSL or no peer verification"
|
1179
|
-
elsif machine_spec.reference['winrm_ssl_thumbprint']
|
1180
|
-
# we have stored the cert
|
1181
|
-
Chef::Log.info "Using stored fingerprint"
|
1182
|
-
else
|
1183
|
-
# we need to retrieve the cert and verify it by connecting just to
|
1184
|
-
# retrieve the ssl certificate and compare it to what we see in the
|
1185
|
-
# console logs
|
1186
|
-
instance.console_output.data.output
|
1187
|
-
# again this seem to need to be run twice, to ensure
|
1188
|
-
encoded_output = instance.console_output.data.output
|
1189
|
-
console_lines = Base64.decode64(encoded_output).lines
|
1190
|
-
fp_context = OpenSSL::SSL::SSLContext.new
|
1191
|
-
tcp_connection = TCPSocket.new(instance.private_ip_address, port)
|
1192
|
-
ssl_connection = OpenSSL::SSL::SSLSocket.new(tcp_connection, fp_context)
|
1193
|
-
|
1194
|
-
begin
|
1195
|
-
ssl_connection.connect
|
1196
|
-
rescue OpenSSL::SSL::SSLError => e
|
1197
|
-
raise e unless e.message =~ /bad signature/
|
1198
|
-
ensure
|
1199
|
-
tcp_connection.close
|
1084
|
+
ami.name || raise("Default AMI not found")
|
1200
1085
|
end
|
1201
1086
|
|
1202
|
-
|
1087
|
+
def default_ami_for_region(region, criteria = {})
|
1088
|
+
Chef::Log.debug("Choosing default AMI for region '#{region}'")
|
1203
1089
|
|
1204
|
-
|
1205
|
-
|
1206
|
-
|
1207
|
-
|
1208
|
-
winrm_subject = winrm_cert.subject.to_s.split('=').last.upcase
|
1209
|
-
winrm_thumbprint=OpenSSL::Digest::SHA1.new(winrm_cert.to_der).to_s.upcase
|
1090
|
+
arch = criteria["arch"] || default_ami_arch
|
1091
|
+
release = criteria["release"] || default_ami_release
|
1092
|
+
root_store = criteria["root_store"] || default_ami_root_store
|
1093
|
+
virtualization_type = criteria["virtualization_type"] || default_ami_virtualization_type
|
1210
1094
|
|
1211
|
-
|
1212
|
-
Chef::Log.fatal "Winrm ssl port certificate differs from rdp console logs"
|
1213
|
-
end
|
1214
|
-
# now cache these for later use in the reference
|
1215
|
-
if machine_spec.reference['winrm_ssl_subject'] != winrm_subject
|
1216
|
-
machine_spec.reference['winrm_ssl_subject'] = winrm_subject
|
1217
|
-
end
|
1218
|
-
if machine_spec.reference['winrm_ssl_thumbprint'] != winrm_thumbprint
|
1219
|
-
machine_spec.reference['winrm_ssl_thumbprint'] = winrm_thumbprint
|
1220
|
-
end
|
1221
|
-
if machine_spec.reference['winrm_ssl_cert'] != winrm_cert.to_pem
|
1222
|
-
machine_spec.reference['winrm_ssl_cert'] = winrm_cert.to_pem
|
1095
|
+
default_ami_for_criteria(region, arch, release, root_store, virtualization_type)
|
1223
1096
|
end
|
1224
|
-
end
|
1225
|
-
|
1226
|
-
if machine_spec.reference['winrm_ssl_thumbprint']
|
1227
|
-
winrm_options[:ssl_peer_fingerprint] = machine_spec.reference['winrm_ssl_thumbprint']
|
1228
|
-
end
|
1229
1097
|
|
1230
|
-
|
1231
|
-
|
1098
|
+
def create_winrm_transport(machine_spec, machine_options, instance)
|
1099
|
+
remote_host = determine_remote_host(machine_spec, instance)
|
1100
|
+
username = machine_spec.reference["winrm_username"] ||
|
1101
|
+
machine_options[:winrm_username] ||
|
1102
|
+
default_winrm_username
|
1103
|
+
# default to http for now, should upgrade to https when knife support self-signed
|
1104
|
+
transport_type = machine_spec.reference["winrm_transport"] ||
|
1105
|
+
machine_options[:winrm_transport] ||
|
1106
|
+
default_winrm_transport
|
1107
|
+
type = case transport_type
|
1108
|
+
when "http"
|
1109
|
+
:plaintext
|
1110
|
+
when "https"
|
1111
|
+
:ssl
|
1112
|
+
end
|
1113
|
+
port = machine_spec.reference["winrm_port"] ||
|
1114
|
+
machine_options[:winrm_port] ||
|
1115
|
+
case transport_type
|
1116
|
+
when "http"
|
1117
|
+
"5985"
|
1118
|
+
when "https"
|
1119
|
+
"5986"
|
1120
|
+
end
|
1121
|
+
endpoint = "#{transport_type}://#{remote_host}:#{port}/wsman"
|
1122
|
+
|
1123
|
+
pem_bytes = get_private_key(instance.key_name)
|
1124
|
+
|
1125
|
+
password = machine_spec.reference["winrm_password"] ||
|
1126
|
+
machine_options[:winrm_password] ||
|
1127
|
+
begin
|
1128
|
+
if machine_spec.reference["winrm_encrypted_password"]
|
1129
|
+
decoded = Base64.decode64(machine_spec.reference["winrm_encrypted_password"])
|
1130
|
+
else
|
1131
|
+
encrypted_admin_password = instance.password_data.password_data
|
1132
|
+
if encrypted_admin_password.nil? || encrypted_admin_password.empty?
|
1133
|
+
raise "You did not specify winrm_password in the machine options and no encrytpted password could be fetched from the instance"
|
1134
|
+
end
|
1135
|
+
machine_spec.reference["winrm_encrypted_password"] ||= encrypted_admin_password
|
1136
|
+
# ^^ saves encrypted password to the machine_spec
|
1137
|
+
decoded = Base64.decode64(encrypted_admin_password)
|
1138
|
+
end
|
1139
|
+
# decrypt so we can utilize
|
1140
|
+
private_key = OpenSSL::PKey::RSA.new(get_private_key(instance.key_name))
|
1141
|
+
private_key.private_decrypt decoded
|
1142
|
+
end
|
1232
1143
|
|
1233
|
-
|
1234
|
-
|
1235
|
-
|
1236
|
-
|
1237
|
-
|
1238
|
-
|
1239
|
-
|
1240
|
-
|
1241
|
-
|
1242
|
-
|
1144
|
+
disable_sspi = machine_spec.reference["winrm_disable_sspi"] ||
|
1145
|
+
machine_options[:winrm_disable_sspi] ||
|
1146
|
+
false # default to Negotiate
|
1147
|
+
basic_auth_only = machine_spec.reference["winrm_basic_auth_only"] ||
|
1148
|
+
machine_options[:winrm_basic_auth_only] ||
|
1149
|
+
false # disallow Basic auth by default
|
1150
|
+
no_ssl_peer_verification = machine_spec.reference["winrm_no_ssl_peer_verification"] ||
|
1151
|
+
machine_options[:winrm_no_ssl_peer_verification] ||
|
1152
|
+
false # disallow MITM potential by default
|
1153
|
+
|
1154
|
+
winrm_options = {
|
1155
|
+
user: username,
|
1156
|
+
pass: password,
|
1157
|
+
disable_sspi: disable_sspi,
|
1158
|
+
basic_auth_only: basic_auth_only,
|
1159
|
+
no_ssl_peer_verification: no_ssl_peer_verification
|
1160
|
+
}
|
1243
1161
|
|
1244
|
-
|
1162
|
+
if no_ssl_peer_verification || (type != :ssl)
|
1163
|
+
# => we won't verify certs at all
|
1164
|
+
Chef::Log.info "No SSL or no peer verification"
|
1165
|
+
elsif machine_spec.reference["winrm_ssl_thumbprint"]
|
1166
|
+
# we have stored the cert
|
1167
|
+
Chef::Log.info "Using stored fingerprint"
|
1168
|
+
else
|
1169
|
+
# we need to retrieve the cert and verify it by connecting just to
|
1170
|
+
# retrieve the ssl certificate and compare it to what we see in the
|
1171
|
+
# console logs
|
1172
|
+
instance.console_output.data.output
|
1173
|
+
# again this seem to need to be run twice, to ensure
|
1174
|
+
encoded_output = instance.console_output.data.output
|
1175
|
+
console_lines = Base64.decode64(encoded_output).lines
|
1176
|
+
fp_context = OpenSSL::SSL::SSLContext.new
|
1177
|
+
tcp_connection = TCPSocket.new(instance.private_ip_address, port)
|
1178
|
+
ssl_connection = OpenSSL::SSL::SSLSocket.new(tcp_connection, fp_context)
|
1179
|
+
|
1180
|
+
begin
|
1181
|
+
ssl_connection.connect
|
1182
|
+
rescue OpenSSL::SSL::SSLError => e
|
1183
|
+
raise e unless e.message =~ /bad signature/
|
1184
|
+
ensure
|
1185
|
+
tcp_connection.close
|
1186
|
+
end
|
1245
1187
|
|
1246
|
-
|
1247
|
-
options[:ssh_pty_enable] = true
|
1188
|
+
winrm_cert = ssl_connection.peer_cert_chain.first
|
1248
1189
|
|
1249
|
-
|
1250
|
-
|
1251
|
-
|
1252
|
-
|
1253
|
-
|
1190
|
+
rdp_thumbprint = console_lines.grep(
|
1191
|
+
/RDPCERTIFICATE-THUMBPRINT/
|
1192
|
+
)[-1].split(": ").last.chomp
|
1193
|
+
rdp_subject = console_lines.grep(
|
1194
|
+
/RDPCERTIFICATE-SUBJECTNAME/
|
1195
|
+
)[-1].split(": ").last.chomp
|
1196
|
+
winrm_subject = winrm_cert.subject.to_s.split("=").last.upcase
|
1197
|
+
winrm_thumbprint = OpenSSL::Digest::SHA1.new(winrm_cert.to_der).to_s.upcase
|
1254
1198
|
|
1255
|
-
|
1256
|
-
|
1199
|
+
if (rdp_subject != winrm_subject) || (rdp_thumbprint != winrm_thumbprint)
|
1200
|
+
Chef::Log.fatal "Winrm ssl port certificate differs from rdp console logs"
|
1201
|
+
end
|
1202
|
+
# now cache these for later use in the reference
|
1203
|
+
if machine_spec.reference["winrm_ssl_subject"] != winrm_subject
|
1204
|
+
machine_spec.reference["winrm_ssl_subject"] = winrm_subject
|
1205
|
+
end
|
1206
|
+
if machine_spec.reference["winrm_ssl_thumbprint"] != winrm_thumbprint
|
1207
|
+
machine_spec.reference["winrm_ssl_thumbprint"] = winrm_thumbprint
|
1208
|
+
end
|
1209
|
+
if machine_spec.reference["winrm_ssl_cert"] != winrm_cert.to_pem
|
1210
|
+
machine_spec.reference["winrm_ssl_cert"] = winrm_cert.to_pem
|
1211
|
+
end
|
1212
|
+
end
|
1257
1213
|
|
1258
|
-
|
1259
|
-
|
1260
|
-
|
1261
|
-
# The machine_spec has the old config key, lets update it - a successful chef converge will save the machine_spec
|
1262
|
-
# TODO in 2.0 get rid of this update
|
1263
|
-
machine_spec.reference.delete('use_private_ip_for_ssh')
|
1264
|
-
machine_spec.reference['transport_address_location'] = :private_ip
|
1265
|
-
instance.private_ip_address
|
1266
|
-
elsif transport_address_location == :private_ip
|
1267
|
-
instance.private_ip_address
|
1268
|
-
elsif transport_address_location == :dns
|
1269
|
-
instance.dns_name
|
1270
|
-
elsif !instance.public_ip_address && instance.private_ip_address
|
1271
|
-
Chef::Log.warn("Server #{machine_spec.name} has no public ip address. Using private ip '#{instance.private_ip_address}'. Set machine_options ':transport_address_location => :private_ip' if this will always be the case ...")
|
1272
|
-
instance.private_ip_address
|
1273
|
-
elsif instance.public_ip_address
|
1274
|
-
instance.public_ip_address
|
1275
|
-
else
|
1276
|
-
raise "Server #{instance.id} has no private or public IP address!"
|
1277
|
-
end
|
1278
|
-
end
|
1214
|
+
if machine_spec.reference["winrm_ssl_thumbprint"]
|
1215
|
+
winrm_options[:ssl_peer_fingerprint] = machine_spec.reference["winrm_ssl_thumbprint"]
|
1216
|
+
end
|
1279
1217
|
|
1280
|
-
|
1281
|
-
|
1282
|
-
instance.private_key
|
1283
|
-
elsif instance.respond_to?(:key_name) && instance.key_name
|
1284
|
-
key = get_private_key(instance.key_name)
|
1285
|
-
unless key
|
1286
|
-
raise "Server has key name '#{instance.key_name}', but the corresponding private key was not found locally. Check if the key is in Chef::Config.private_key_paths: #{Chef::Config.private_key_paths.join(', ')}"
|
1287
|
-
end
|
1288
|
-
key
|
1289
|
-
elsif machine_spec.reference['key_name']
|
1290
|
-
key = get_private_key(machine_spec.reference['key_name'])
|
1291
|
-
unless key
|
1292
|
-
raise "Server was created with key name '#{machine_spec.reference['key_name']}', but the corresponding private key was not found locally. Check if the key is in Chef::Config.private_key_paths: #{Chef::Config.private_key_paths.join(', ')}"
|
1293
|
-
end
|
1294
|
-
key
|
1295
|
-
elsif machine_options[:bootstrap_options] && machine_options[:bootstrap_options][:key_path]
|
1296
|
-
IO.read(machine_options[:bootstrap_options][:key_path])
|
1297
|
-
elsif machine_options[:bootstrap_options] && machine_options[:bootstrap_options][:key_name]
|
1298
|
-
get_private_key(machine_options[:bootstrap_options][:key_name])
|
1299
|
-
else
|
1300
|
-
# TODO make a way to suggest other keys to try ...
|
1301
|
-
raise "No key found to connect to #{machine_spec.name} (#{machine_spec.reference.inspect})!"
|
1302
|
-
end
|
1303
|
-
end
|
1218
|
+
Chef::Provisioning::Transport::WinRM.new(endpoint.to_s, type, winrm_options, {})
|
1219
|
+
end
|
1304
1220
|
|
1305
|
-
|
1306
|
-
|
1307
|
-
|
1308
|
-
|
1309
|
-
|
1310
|
-
|
1311
|
-
|
1312
|
-
|
1313
|
-
|
1314
|
-
|
1315
|
-
result[:keys_only] = true
|
1316
|
-
result[:key_data] = [ private_key_for(machine_spec, machine_options, instance) ]
|
1317
|
-
end
|
1318
|
-
result
|
1319
|
-
end
|
1221
|
+
def create_ssh_transport(machine_spec, machine_options, instance)
|
1222
|
+
ssh_options = ssh_options_for(machine_spec, machine_options, instance)
|
1223
|
+
username = machine_spec.reference["ssh_username"] || machine_options[:ssh_username] || default_ssh_username
|
1224
|
+
if machine_options.key?(:ssh_username) && machine_options[:ssh_username] != machine_spec.reference["ssh_username"]
|
1225
|
+
Chef::Log.warn("Server #{machine_spec.name} was created with SSH username #{machine_spec.reference['ssh_username']} and machine_options specifies username #{machine_options[:ssh_username]}. Using #{machine_spec.reference['ssh_username']}. Please edit the node and change the chef_provisioning.reference.ssh_username attribute if you want to change it.")
|
1226
|
+
end
|
1227
|
+
options = {}
|
1228
|
+
if machine_spec.reference[:sudo] || (!machine_spec.reference.key?(:sudo) && username != "root")
|
1229
|
+
options[:prefix] = "sudo "
|
1230
|
+
end
|
1320
1231
|
|
1321
|
-
|
1322
|
-
# Tell Ohai that this is an EC2 instance so that it runs the EC2 plugin
|
1323
|
-
convergence_options = Cheffish::MergedConfig.new(
|
1324
|
-
machine_options[:convergence_options] || {},
|
1325
|
-
ohai_hints: { 'ec2' => '' })
|
1326
|
-
convergence_options=deep_symbolize_keys(convergence_options)
|
1232
|
+
remote_host = determine_remote_host(machine_spec, instance)
|
1327
1233
|
|
1328
|
-
|
1329
|
-
|
1330
|
-
return Chef::Provisioning::ConvergenceStrategy::NoConverge.new(convergence_options, config)
|
1331
|
-
end
|
1234
|
+
# Enable pty by default
|
1235
|
+
options[:ssh_pty_enable] = true
|
1332
1236
|
|
1333
|
-
|
1334
|
-
|
1335
|
-
|
1336
|
-
|
1337
|
-
|
1338
|
-
Chef::Provisioning::ConvergenceStrategy::InstallSh.new(convergence_options, config)
|
1339
|
-
end
|
1340
|
-
end
|
1237
|
+
if machine_spec.reference.key?("ssh_gateway")
|
1238
|
+
options[:ssh_gateway] = machine_spec.reference["ssh_gateway"]
|
1239
|
+
elsif machine_options[:ssh_gateway]
|
1240
|
+
options[:ssh_gateway] = machine_options[:ssh_gateway]
|
1241
|
+
end
|
1341
1242
|
|
1342
|
-
|
1343
|
-
|
1344
|
-
action_handler.report_progress "Image #{image_spec.name} is now ready"
|
1345
|
-
end
|
1243
|
+
Chef::Provisioning::Transport::SSH.new(remote_host, username, ssh_options, options, config)
|
1244
|
+
end
|
1346
1245
|
|
1347
|
-
|
1348
|
-
|
1349
|
-
|
1350
|
-
|
1351
|
-
|
1352
|
-
|
1353
|
-
|
1354
|
-
|
1355
|
-
|
1356
|
-
|
1357
|
-
|
1358
|
-
|
1359
|
-
|
1360
|
-
#
|
1361
|
-
|
1362
|
-
|
1363
|
-
|
1364
|
-
|
1246
|
+
def determine_remote_host(machine_spec, instance)
|
1247
|
+
transport_address_location = (machine_spec.reference["transport_address_location"] || :none).to_sym
|
1248
|
+
if machine_spec.reference["use_private_ip_for_ssh"]
|
1249
|
+
# The machine_spec has the old config key, lets update it - a successful chef converge will save the machine_spec
|
1250
|
+
# TODO in 2.0 get rid of this update
|
1251
|
+
machine_spec.reference.delete("use_private_ip_for_ssh")
|
1252
|
+
machine_spec.reference["transport_address_location"] = :private_ip
|
1253
|
+
instance.private_ip_address
|
1254
|
+
elsif transport_address_location == :private_ip
|
1255
|
+
instance.private_ip_address
|
1256
|
+
elsif transport_address_location == :dns
|
1257
|
+
instance.dns_name
|
1258
|
+
elsif !instance.public_ip_address && instance.private_ip_address
|
1259
|
+
Chef::Log.warn("Server #{machine_spec.name} has no public ip address. Using private ip '#{instance.private_ip_address}'. Set machine_options ':transport_address_location => :private_ip' if this will always be the case ...")
|
1260
|
+
instance.private_ip_address
|
1261
|
+
elsif instance.public_ip_address
|
1262
|
+
instance.public_ip_address
|
1263
|
+
else
|
1264
|
+
raise "Server #{instance.id} has no private or public IP address!"
|
1365
1265
|
end
|
1366
1266
|
end
|
1367
|
-
end
|
1368
|
-
end
|
1369
|
-
|
1370
|
-
def wait_until_instance_running(action_handler, machine_spec, instance=nil)
|
1371
|
-
wait_until_machine(action_handler, machine_spec, "become ready", instance) { |instance|
|
1372
|
-
instance.state.name == "running"
|
1373
|
-
}
|
1374
|
-
end
|
1375
1267
|
|
1376
|
-
|
1377
|
-
|
1378
|
-
|
1379
|
-
|
1380
|
-
|
1381
|
-
|
1382
|
-
|
1383
|
-
|
1384
|
-
|
1385
|
-
|
1386
|
-
|
1387
|
-
|
1388
|
-
|
1389
|
-
# We have to manually reload the instance each loop, otherwise data is stale
|
1390
|
-
instance.reload
|
1391
|
-
unless yield(instance)
|
1392
|
-
raise "Instance #{machine_spec.name} (#{instance.id} on #{driver_url}) did not #{output_msg} within #{max_wait_time} seconds"
|
1268
|
+
def private_key_for(machine_spec, machine_options, instance)
|
1269
|
+
if instance.respond_to?(:private_key) && instance.private_key
|
1270
|
+
instance.private_key
|
1271
|
+
elsif instance.respond_to?(:key_name) && instance.key_name
|
1272
|
+
key = get_private_key(instance.key_name)
|
1273
|
+
unless key
|
1274
|
+
raise "Server has key name '#{instance.key_name}', but the corresponding private key was not found locally. Check if the key is in Chef::Config.private_key_paths: #{Chef::Config.private_key_paths.join(', ')}"
|
1275
|
+
end
|
1276
|
+
key
|
1277
|
+
elsif machine_spec.reference["key_name"]
|
1278
|
+
key = get_private_key(machine_spec.reference["key_name"])
|
1279
|
+
unless key
|
1280
|
+
raise "Server was created with key name '#{machine_spec.reference['key_name']}', but the corresponding private key was not found locally. Check if the key is in Chef::Config.private_key_paths: #{Chef::Config.private_key_paths.join(', ')}"
|
1393
1281
|
end
|
1282
|
+
key
|
1283
|
+
elsif machine_options[:bootstrap_options] && machine_options[:bootstrap_options][:key_path]
|
1284
|
+
IO.read(machine_options[:bootstrap_options][:key_path])
|
1285
|
+
elsif machine_options[:bootstrap_options] && machine_options[:bootstrap_options][:key_name]
|
1286
|
+
get_private_key(machine_options[:bootstrap_options][:key_name])
|
1287
|
+
else
|
1288
|
+
# TODO: make a way to suggest other keys to try ...
|
1289
|
+
raise "No key found to connect to #{machine_spec.name} (#{machine_spec.reference.inspect})!"
|
1394
1290
|
end
|
1395
1291
|
end
|
1396
|
-
end
|
1397
|
-
end
|
1398
1292
|
|
1399
|
-
|
1400
|
-
|
1401
|
-
|
1402
|
-
|
1403
|
-
|
1404
|
-
|
1405
|
-
|
1406
|
-
|
1407
|
-
|
1408
|
-
|
1409
|
-
:
|
1410
|
-
:
|
1411
|
-
) do |retries, exception|
|
1412
|
-
action_handler.report_progress "been waiting #{sleep_time*retries}/#{max_wait_time} -- sleeping #{sleep_time} seconds for #{machine_spec.name} (#{instance.id} on #{driver_url}) to become connectable ..."
|
1413
|
-
unless transport.available?
|
1414
|
-
raise "Instance #{machine_spec.name} (#{instance.id} on #{driver_url}) did not become connectable within #{max_wait_time} seconds"
|
1415
|
-
end
|
1293
|
+
def ssh_options_for(machine_spec, machine_options, instance)
|
1294
|
+
result = {
|
1295
|
+
# TODO: create a user known hosts file
|
1296
|
+
# :user_known_hosts_file => vagrant_ssh_config['UserKnownHostsFile'],
|
1297
|
+
# :paranoid => true,
|
1298
|
+
auth_methods: ["publickey"],
|
1299
|
+
keys_only: true,
|
1300
|
+
host_key_alias: "#{instance.id}.AWS"
|
1301
|
+
}.merge(machine_options[:ssh_options] || {})
|
1302
|
+
unless result.key?(:key_data)
|
1303
|
+
result[:keys_only] = true
|
1304
|
+
result[:key_data] = [private_key_for(machine_spec, machine_options, instance)]
|
1416
1305
|
end
|
1306
|
+
result
|
1417
1307
|
end
|
1418
|
-
end
|
1419
|
-
end
|
1420
1308
|
|
1421
|
-
|
1422
|
-
|
1423
|
-
|
1424
|
-
|
1425
|
-
|
1426
|
-
|
1427
|
-
|
1428
|
-
|
1309
|
+
def convergence_strategy_for(machine_spec, machine_options)
|
1310
|
+
# Tell Ohai that this is an EC2 instance so that it runs the EC2 plugin
|
1311
|
+
convergence_options = Cheffish::MergedConfig.new(
|
1312
|
+
machine_options[:convergence_options] || {},
|
1313
|
+
ohai_hints: { "ec2" => "" }
|
1314
|
+
)
|
1315
|
+
convergence_options = deep_symbolize_keys(convergence_options)
|
1316
|
+
|
1317
|
+
# Defaults
|
1318
|
+
unless machine_spec.reference
|
1319
|
+
return Chef::Provisioning::ConvergenceStrategy::NoConverge.new(convergence_options, config)
|
1320
|
+
end
|
1429
1321
|
|
1430
|
-
|
1431
|
-
|
1432
|
-
|
1433
|
-
|
1434
|
-
|
1435
|
-
|
1436
|
-
driver driver
|
1437
|
-
chef_server machine_spec.managed_entry_store.chef_server
|
1438
|
-
managed_entry_store machine_spec.managed_entry_store
|
1439
|
-
allow_overwrite true
|
1322
|
+
if machine_spec.reference["is_windows"]
|
1323
|
+
Chef::Provisioning::ConvergenceStrategy::InstallMsi.new(convergence_options, config)
|
1324
|
+
elsif machine_options[:cached_installer] == true
|
1325
|
+
Chef::Provisioning::ConvergenceStrategy::InstallCached.new(convergence_options, config)
|
1326
|
+
else
|
1327
|
+
Chef::Provisioning::ConvergenceStrategy::InstallSh.new(convergence_options, config)
|
1440
1328
|
end
|
1441
1329
|
end
|
1442
|
-
end
|
1443
1330
|
|
1444
|
-
|
1445
|
-
|
1446
|
-
|
1331
|
+
def wait_until_ready_image(action_handler, image_spec, image = nil)
|
1332
|
+
wait_until_image(action_handler, image_spec, image) { |image| image.state.to_sym == :available }
|
1333
|
+
action_handler.report_progress "Image #{image_spec.name} is now ready"
|
1334
|
+
end
|
1447
1335
|
|
1448
|
-
|
1449
|
-
|
1336
|
+
def wait_until_image(action_handler, image_spec, image = nil)
|
1337
|
+
image ||= image_for(image_spec)
|
1338
|
+
sleep_time = 10
|
1339
|
+
unless yield(image)
|
1340
|
+
if action_handler.should_perform_actions
|
1341
|
+
action_handler.report_progress "waiting for #{image_spec.name} (#{image.id} on #{driver_url}) to be ready ..."
|
1342
|
+
max_wait_time = Chef::Config.chef_provisioning[:image_max_wait_time] || 300
|
1343
|
+
Retryable.retryable(
|
1344
|
+
tries: (max_wait_time / sleep_time).to_i,
|
1345
|
+
sleep: sleep_time,
|
1346
|
+
matching: /did not become ready within/
|
1347
|
+
) do |retries, _exception|
|
1348
|
+
action_handler.report_progress "been waiting #{retries * sleep_time}/#{max_wait_time} -- sleeping #{sleep_time} seconds for #{image_spec.name} (#{image.id} on #{driver_url}) to become ready ..."
|
1349
|
+
# We have to manually reload the instance each loop, otherwise data is stale
|
1350
|
+
image.reload
|
1351
|
+
unless yield(image)
|
1352
|
+
raise "Image #{image.id} did not become ready within #{max_wait_time} seconds"
|
1353
|
+
end
|
1354
|
+
end
|
1355
|
+
end
|
1356
|
+
end
|
1357
|
+
end
|
1450
1358
|
|
1451
|
-
|
1452
|
-
|
1359
|
+
def wait_until_instance_running(action_handler, machine_spec, instance = nil)
|
1360
|
+
wait_until_machine(action_handler, machine_spec, "become ready", instance) do |instance|
|
1361
|
+
instance.state.name == "running"
|
1362
|
+
end
|
1363
|
+
end
|
1453
1364
|
|
1454
|
-
|
1455
|
-
|
1456
|
-
|
1457
|
-
|
1458
|
-
|
1459
|
-
|
1460
|
-
|
1461
|
-
|
1462
|
-
|
1463
|
-
|
1464
|
-
|
1465
|
-
|
1365
|
+
def wait_until_machine(action_handler, machine_spec, output_msg, instance = nil)
|
1366
|
+
instance ||= instance_for(machine_spec)
|
1367
|
+
sleep_time = 10
|
1368
|
+
unless yield(instance)
|
1369
|
+
if action_handler.should_perform_actions
|
1370
|
+
action_handler.report_progress "waiting for #{machine_spec.name} (#{instance.id} on #{driver_url}) to #{output_msg} ..."
|
1371
|
+
max_wait_time = Chef::Config.chef_provisioning[:machine_max_wait_time] || 120
|
1372
|
+
Retryable.retryable(
|
1373
|
+
tries: (max_wait_time / sleep_time).to_i,
|
1374
|
+
sleep: sleep_time,
|
1375
|
+
matching: /did not #{output_msg} within/
|
1376
|
+
) do |retries, _exception|
|
1377
|
+
action_handler.report_progress "been waiting #{sleep_time * retries}/#{max_wait_time} -- sleeping #{sleep_time} seconds for #{machine_spec.name} (#{instance.id} on #{driver_url}) to #{output_msg} ..."
|
1378
|
+
# We have to manually reload the instance each loop, otherwise data is stale
|
1379
|
+
instance.reload
|
1380
|
+
unless yield(instance)
|
1381
|
+
raise "Instance #{machine_spec.name} (#{instance.id} on #{driver_url}) did not #{output_msg} within #{max_wait_time} seconds"
|
1382
|
+
end
|
1383
|
+
end
|
1384
|
+
end
|
1466
1385
|
end
|
1467
|
-
elsif machine_spec.reference
|
1468
|
-
Chef::Log.warn "Machine #{machine_spec.name} (#{machine_spec.reference['instance_id']} on #{driver_url}) no longer exists. Recreating ..."
|
1469
1386
|
end
|
1470
1387
|
|
1471
|
-
|
1472
|
-
|
1473
|
-
|
1474
|
-
|
1388
|
+
def wait_for_transport(action_handler, machine_spec, machine_options, instance = nil)
|
1389
|
+
instance ||= instance_for(machine_spec)
|
1390
|
+
sleep_time = 10
|
1391
|
+
transport = transport_for(machine_spec, machine_options, instance)
|
1392
|
+
unless instance.state.name.eql?("running") && transport.available?
|
1393
|
+
if action_handler.should_perform_actions
|
1394
|
+
action_handler.report_progress "waiting for #{machine_spec.name} (#{instance.id} on #{driver_url}) to be connectable (transport up and running) ..."
|
1395
|
+
max_wait_time = Chef::Config.chef_provisioning[:machine_max_wait_time] || 120
|
1396
|
+
Retryable.retryable(
|
1397
|
+
tries: (max_wait_time / sleep_time).to_i,
|
1398
|
+
sleep: sleep_time,
|
1399
|
+
matching: /did not become connectable within/
|
1400
|
+
) do |retries, _exception|
|
1401
|
+
action_handler.report_progress "been waiting #{sleep_time * retries}/#{max_wait_time} -- sleeping #{sleep_time} seconds for #{machine_spec.name} (#{instance.id} on #{driver_url}) to become connectable ..."
|
1402
|
+
unless transport.available?
|
1403
|
+
raise "Instance #{machine_spec.name} (#{instance.id} on #{driver_url}) did not become connectable within #{max_wait_time} seconds"
|
1404
|
+
end
|
1405
|
+
end
|
1406
|
+
end
|
1407
|
+
end
|
1408
|
+
end
|
1475
1409
|
|
1476
|
-
|
1477
|
-
|
1478
|
-
|
1479
|
-
|
1480
|
-
|
1481
|
-
|
1410
|
+
def default_aws_keypair_name(machine_spec)
|
1411
|
+
if machine_spec.reference &&
|
1412
|
+
Gem::Version.new(machine_spec.reference["driver_version"]) < Gem::Version.new("0.10")
|
1413
|
+
"metal_default"
|
1414
|
+
else
|
1415
|
+
"chef_default"
|
1416
|
+
end
|
1482
1417
|
end
|
1483
|
-
description = [ "creating #{machine_description} on #{driver_url}" ]
|
1484
|
-
bootstrap_options.each_pair { |key,value| description << " #{key}: #{value.inspect}" }
|
1485
|
-
action_handler.report_progress description
|
1486
|
-
if action_handler.should_perform_actions
|
1487
|
-
# Actually create the servers
|
1488
|
-
parallelizer.parallelize(1.upto(machine_specs.size)) do |i|
|
1489
1418
|
|
1490
|
-
|
1491
|
-
|
1492
|
-
|
1419
|
+
def default_aws_keypair(action_handler, machine_spec)
|
1420
|
+
driver = self
|
1421
|
+
default_key_name = default_aws_keypair_name(machine_spec)
|
1422
|
+
updated = @@chef_default_lock.synchronize do
|
1423
|
+
Provisioning.inline_resource(action_handler) do
|
1424
|
+
aws_key_pair default_key_name do
|
1425
|
+
driver driver
|
1426
|
+
chef_server machine_spec.managed_entry_store.chef_server
|
1427
|
+
managed_entry_store machine_spec.managed_entry_store
|
1428
|
+
allow_overwrite true
|
1429
|
+
end
|
1430
|
+
end
|
1431
|
+
end
|
1493
1432
|
|
1494
|
-
|
1495
|
-
|
1496
|
-
|
1433
|
+
# Only warn the first time
|
1434
|
+
default_warning = "Using default key, which is not shared between machines! It is recommended to create an AWS key pair with the aws_key_pair resource, and set :bootstrap_options => { :key_name => <key name> }"
|
1435
|
+
Chef::Log.warn(default_warning) if updated
|
1497
1436
|
|
1498
|
-
|
1437
|
+
default_key_name
|
1438
|
+
end
|
1499
1439
|
|
1500
|
-
|
1501
|
-
|
1440
|
+
def create_servers(action_handler, specs_and_options, parallelizer)
|
1441
|
+
specs_and_servers = instances_for(specs_and_options.keys)
|
1442
|
+
|
1443
|
+
by_bootstrap_options = {}
|
1444
|
+
specs_and_options.each do |machine_spec, machine_options|
|
1445
|
+
instance = specs_and_servers[machine_spec]
|
1446
|
+
if instance
|
1447
|
+
if instance.state.name == "terminated"
|
1448
|
+
Chef::Log.warn "Machine #{machine_spec.name} (#{instance.id}) is terminated. Recreating ..."
|
1449
|
+
else
|
1450
|
+
# Even though the instance has been created the tags could be incorrect if it
|
1451
|
+
# was created before tags were introduced
|
1452
|
+
converge_ec2_tags(instance, machine_options[:aws_tags], action_handler)
|
1453
|
+
yield machine_spec, instance if block_given?
|
1454
|
+
next
|
1455
|
+
end
|
1456
|
+
elsif machine_spec.reference
|
1457
|
+
Chef::Log.warn "Machine #{machine_spec.name} (#{machine_spec.reference['instance_id']} on #{driver_url}) no longer exists. Recreating ..."
|
1458
|
+
end
|
1502
1459
|
|
1503
|
-
|
1504
|
-
|
1460
|
+
bootstrap_options = bootstrap_options_for(action_handler, machine_spec, machine_options)
|
1461
|
+
by_bootstrap_options[bootstrap_options] ||= []
|
1462
|
+
by_bootstrap_options[bootstrap_options] << machine_spec
|
1505
1463
|
end
|
1464
|
+
|
1465
|
+
# Create the servers in parallel
|
1466
|
+
parallelizer.parallelize(by_bootstrap_options) do |bootstrap_options, machine_specs|
|
1467
|
+
machine_description = if machine_specs.size == 1
|
1468
|
+
"machine #{machine_specs.first.name}"
|
1469
|
+
else
|
1470
|
+
"machines #{machine_specs.map(&:name).join(', ')}"
|
1471
|
+
end
|
1472
|
+
description = ["creating #{machine_description} on #{driver_url}"]
|
1473
|
+
bootstrap_options.each_pair { |key, value| description << " #{key}: #{value.inspect}" }
|
1474
|
+
action_handler.report_progress description
|
1475
|
+
if action_handler.should_perform_actions
|
1476
|
+
# Actually create the servers
|
1477
|
+
parallelizer.parallelize(1.upto(machine_specs.size)) do |_i|
|
1478
|
+
# Assign each one to a machine spec
|
1479
|
+
machine_spec = machine_specs.pop
|
1480
|
+
machine_options = specs_and_options[machine_spec]
|
1481
|
+
|
1482
|
+
clean_bootstrap_options = Marshal.load(Marshal.dump(bootstrap_options))
|
1483
|
+
instance = create_instance_and_reference(clean_bootstrap_options, action_handler, machine_spec, machine_options)
|
1484
|
+
converge_ec2_tags(instance, machine_options[:aws_tags], action_handler)
|
1485
|
+
|
1486
|
+
action_handler.performed_action "machine #{machine_spec.name} created as #{instance.id} on #{driver_url}"
|
1487
|
+
|
1488
|
+
yield machine_spec, instance if block_given?
|
1489
|
+
end.to_a
|
1490
|
+
|
1491
|
+
unless machine_specs.empty?
|
1492
|
+
raise "Not all machines were created by create_servers"
|
1493
|
+
end
|
1494
|
+
end
|
1495
|
+
end.to_a
|
1506
1496
|
end
|
1507
|
-
end.to_a
|
1508
|
-
end
|
1509
1497
|
|
1510
|
-
|
1511
|
-
|
1512
|
-
|
1513
|
-
|
1514
|
-
|
1515
|
-
|
1516
|
-
|
1517
|
-
|
1518
|
-
|
1498
|
+
def converge_ec2_tags(aws_object, tags, action_handler)
|
1499
|
+
ec2_strategy = Chef::Provisioning::AWSDriver::TaggingStrategy::EC2.new(
|
1500
|
+
ec2_client,
|
1501
|
+
aws_object.id,
|
1502
|
+
tags
|
1503
|
+
)
|
1504
|
+
aws_tagger = Chef::Provisioning::AWSDriver::AWSTagger.new(ec2_strategy, action_handler)
|
1505
|
+
aws_tagger.converge_tags
|
1506
|
+
end
|
1519
1507
|
|
1520
|
-
|
1521
|
-
|
1522
|
-
|
1523
|
-
|
1524
|
-
|
1525
|
-
|
1526
|
-
|
1527
|
-
|
1528
|
-
|
1508
|
+
def converge_elb_tags(aws_object, tags, action_handler)
|
1509
|
+
elb_strategy = Chef::Provisioning::AWSDriver::TaggingStrategy::ELB.new(
|
1510
|
+
elb_client,
|
1511
|
+
aws_object.load_balancer_name,
|
1512
|
+
tags
|
1513
|
+
)
|
1514
|
+
aws_tagger = Chef::Provisioning::AWSDriver::AWSTagger.new(elb_strategy, action_handler)
|
1515
|
+
aws_tagger.converge_tags
|
1516
|
+
end
|
1529
1517
|
|
1530
|
-
|
1531
|
-
|
1532
|
-
|
1533
|
-
|
1534
|
-
|
1535
|
-
|
1536
|
-
|
1537
|
-
|
1538
|
-
|
1539
|
-
|
1540
|
-
|
1541
|
-
|
1542
|
-
|
1518
|
+
def create_instance_and_reference(bootstrap_options, action_handler, machine_spec, machine_options)
|
1519
|
+
instance = nil
|
1520
|
+
# IAM says the instance profile is ready, but EC2 doesn't think it is
|
1521
|
+
# Not using retry_with_backoff here because we need to match on a string
|
1522
|
+
Retryable.retryable(
|
1523
|
+
tries: 10,
|
1524
|
+
sleep: ->(n) { [2**n, 16].min },
|
1525
|
+
on: ::Aws::EC2::Errors::InvalidParameterValue,
|
1526
|
+
matching: /Invalid IAM Instance Profile name/
|
1527
|
+
) do |_retries, exception|
|
1528
|
+
Chef::Log.debug("Instance creation InvalidParameterValue exception is #{exception.inspect}")
|
1529
|
+
instance = ec2_resource.create_instances(bootstrap_options.to_hash)[0]
|
1530
|
+
end
|
1543
1531
|
|
1544
|
-
|
1545
|
-
|
1532
|
+
# Make sure the instance is ready to be tagged
|
1533
|
+
instance.wait_until_exists
|
1546
1534
|
|
1547
|
-
|
1548
|
-
|
1549
|
-
|
1550
|
-
|
1551
|
-
|
1552
|
-
|
1553
|
-
|
1554
|
-
|
1535
|
+
# Sometimes tagging fails even though the instance 'exists'
|
1536
|
+
Chef::Provisioning::AWSDriver::AWSProvider.retry_with_backoff(::Aws::EC2::Errors::InvalidInstanceIDNotFound) do
|
1537
|
+
instance.create_tags(tags: [{ key: "Name", value: machine_spec.name }])
|
1538
|
+
end
|
1539
|
+
if machine_options.key?(:source_dest_check)
|
1540
|
+
instance.modify_attribute(
|
1541
|
+
source_dest_check: {
|
1542
|
+
value: machine_options[:source_dest_check]
|
1543
|
+
}
|
1544
|
+
)
|
1545
|
+
end
|
1546
|
+
machine_spec.reference = {
|
1547
|
+
"driver_version" => Chef::Provisioning::AWSDriver::VERSION,
|
1548
|
+
"allocated_at" => Time.now.utc.to_s,
|
1549
|
+
"host_node" => action_handler.host_node,
|
1550
|
+
"image_id" => bootstrap_options[:image_id],
|
1551
|
+
"instance_id" => instance.id
|
1555
1552
|
}
|
1556
|
-
|
1557
|
-
|
1558
|
-
|
1559
|
-
|
1560
|
-
|
1561
|
-
|
1562
|
-
|
1563
|
-
|
1564
|
-
|
1565
|
-
|
1566
|
-
|
1567
|
-
|
1568
|
-
|
1569
|
-
|
1570
|
-
|
1571
|
-
@transport_address_location_warned = true
|
1572
|
-
end
|
1573
|
-
machine_options[:transport_address_location] ||= :private_ip
|
1574
|
-
end
|
1575
|
-
%w(is_windows winrm_username winrm_port winrm_password ssh_username sudo transport_address_location ssh_gateway).each do |key|
|
1576
|
-
machine_spec.reference[key] = machine_options[key.to_sym] if machine_options[key.to_sym]
|
1577
|
-
end
|
1578
|
-
instance
|
1579
|
-
end
|
1580
|
-
|
1581
|
-
def get_listeners(listeners)
|
1582
|
-
case listeners
|
1583
|
-
when Hash
|
1584
|
-
listeners.map do |from, to|
|
1585
|
-
from = get_listener(from)
|
1586
|
-
from.delete(:instance_port)
|
1587
|
-
from.delete(:instance_protocol)
|
1588
|
-
to = get_listener(to)
|
1589
|
-
to.delete(:load_balancer_port)
|
1590
|
-
to.delete(:protocol)
|
1591
|
-
to.merge(from)
|
1592
|
-
end
|
1593
|
-
when Array
|
1594
|
-
listeners.map { |listener| get_listener(listener) }
|
1595
|
-
when nil
|
1596
|
-
nil
|
1597
|
-
else
|
1598
|
-
[ get_listener(listeners) ]
|
1599
|
-
end
|
1600
|
-
end
|
1553
|
+
machine_spec.driver_url = driver_url
|
1554
|
+
machine_spec.reference["key_name"] = bootstrap_options[:key_name] if bootstrap_options[:key_name]
|
1555
|
+
# TODO: 2.0 We no longer support `use_private_ip_for_ssh`, only `transport_address_location`
|
1556
|
+
if machine_options[:use_private_ip_for_ssh]
|
1557
|
+
unless @transport_address_location_warned
|
1558
|
+
Chef::Log.warn("The machine_option ':use_private_ip_for_ssh' has been deprecated, use ':transport_address_location'")
|
1559
|
+
@transport_address_location_warned = true
|
1560
|
+
end
|
1561
|
+
machine_options[:transport_address_location] ||= :private_ip
|
1562
|
+
end
|
1563
|
+
%w{is_windows winrm_username winrm_port winrm_password ssh_username sudo transport_address_location ssh_gateway}.each do |key|
|
1564
|
+
machine_spec.reference[key] = machine_options[key.to_sym] if machine_options[key.to_sym]
|
1565
|
+
end
|
1566
|
+
instance
|
1567
|
+
end
|
1601
1568
|
|
1602
|
-
|
1603
|
-
|
1604
|
-
|
1605
|
-
|
1606
|
-
|
1607
|
-
|
1608
|
-
|
1609
|
-
|
1610
|
-
|
1611
|
-
|
1612
|
-
|
1613
|
-
|
1614
|
-
|
1615
|
-
|
1616
|
-
|
1617
|
-
|
1569
|
+
def get_listeners(listeners)
|
1570
|
+
case listeners
|
1571
|
+
when Hash
|
1572
|
+
listeners.map do |from, to|
|
1573
|
+
from = get_listener(from)
|
1574
|
+
from.delete(:instance_port)
|
1575
|
+
from.delete(:instance_protocol)
|
1576
|
+
to = get_listener(to)
|
1577
|
+
to.delete(:load_balancer_port)
|
1578
|
+
to.delete(:protocol)
|
1579
|
+
to.merge(from)
|
1580
|
+
end
|
1581
|
+
when Array
|
1582
|
+
listeners.map { |listener| get_listener(listener) }
|
1583
|
+
when nil
|
1584
|
+
nil
|
1585
|
+
else
|
1586
|
+
[get_listener(listeners)]
|
1587
|
+
end
|
1588
|
+
end
|
1618
1589
|
|
1619
|
-
|
1620
|
-
|
1621
|
-
|
1622
|
-
|
1623
|
-
|
1624
|
-
|
1625
|
-
|
1626
|
-
|
1627
|
-
|
1628
|
-
|
1629
|
-
|
1590
|
+
def get_listener(listener)
|
1591
|
+
result = {}
|
1592
|
+
|
1593
|
+
case listener
|
1594
|
+
when Hash
|
1595
|
+
result.merge!(listener)
|
1596
|
+
when Array
|
1597
|
+
result[:load_balancer_port] = listener[0] if listener.size >= 1
|
1598
|
+
result[:protocol] = listener[1] if listener.size >= 2
|
1599
|
+
when Symbol, String
|
1600
|
+
result[:protocol] = listener
|
1601
|
+
when Integer
|
1602
|
+
result[:load_balancer_port] = listener
|
1603
|
+
else
|
1604
|
+
raise "Invalid listener #{listener}"
|
1605
|
+
end
|
1630
1606
|
|
1631
|
-
|
1632
|
-
|
1633
|
-
|
1607
|
+
# If either port or protocol are set, set the other
|
1608
|
+
if result[:load_balancer_port] && !result[:protocol]
|
1609
|
+
result[:protocol] = PROTOCOL_DEFAULTS[result[:load_balancer_port]]
|
1610
|
+
elsif result[:protocol] && !result[:load_balancer_port]
|
1611
|
+
result[:load_balancer_port] = PORT_DEFAULTS[result[:protocol]]
|
1612
|
+
end
|
1613
|
+
if result[:instance_port] && !result[:instance_protocol]
|
1614
|
+
result[:instance_protocol] = PROTOCOL_DEFAULTS[result[:instance_port]]
|
1615
|
+
elsif result[:instance_protocol] && !result[:instance_port]
|
1616
|
+
result[:instance_port] = PORT_DEFAULTS[result[:instance_protocol]]
|
1617
|
+
end
|
1634
1618
|
|
1635
|
-
|
1636
|
-
|
1619
|
+
# If instance_port is still unset, copy port/protocol over
|
1620
|
+
result[:instance_port] ||= result[:load_balancer_port]
|
1621
|
+
result[:instance_protocol] ||= result[:protocol]
|
1637
1622
|
|
1638
|
-
|
1639
|
-
|
1640
|
-
end
|
1623
|
+
result
|
1624
|
+
end
|
1641
1625
|
|
1642
|
-
|
1643
|
-
|
1644
|
-
|
1645
|
-
}
|
1646
|
-
PROTOCOL_DEFAULTS = {
|
1647
|
-
25 => :tcp,
|
1648
|
-
80 => :http,
|
1649
|
-
443 => :https,
|
1650
|
-
465 => :ssl,
|
1651
|
-
587 => :tcp,
|
1652
|
-
}
|
1626
|
+
def default_instance_type
|
1627
|
+
"t2.micro"
|
1628
|
+
end
|
1653
1629
|
|
1630
|
+
PORT_DEFAULTS = {
|
1631
|
+
http: 80,
|
1632
|
+
https: 443
|
1633
|
+
}.freeze
|
1634
|
+
PROTOCOL_DEFAULTS = {
|
1635
|
+
25 => :tcp,
|
1636
|
+
80 => :http,
|
1637
|
+
443 => :https,
|
1638
|
+
465 => :ssl,
|
1639
|
+
587 => :tcp
|
1640
|
+
}.freeze
|
1641
|
+
end
|
1642
|
+
end
|
1654
1643
|
end
|
1655
1644
|
end
|
1656
|
-
end
|
1657
|
-
end
|