chef-provisioning-aws 1.6.1 → 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +37 -0
- data/Rakefile +8 -5
- data/chef-provisioning-aws.gemspec +3 -3
- data/lib/chef/provider/aws_cloudsearch_domain.rb +5 -3
- data/lib/chef/provider/aws_elasticsearch_domain.rb +131 -0
- data/lib/chef/provider/aws_key_pair.rb +2 -2
- data/lib/chef/provider/aws_rds_instance.rb +7 -5
- data/lib/chef/provider/aws_rds_subnet_group.rb +7 -7
- data/lib/chef/provider/aws_route_table.rb +5 -1
- data/lib/chef/provider/aws_server_certificate.rb +4 -3
- data/lib/chef/provisioning/aws_driver.rb +1 -0
- data/lib/chef/provisioning/aws_driver/aws_provider.rb +2 -1
- data/lib/chef/provisioning/aws_driver/driver.rb +109 -38
- data/lib/chef/provisioning/aws_driver/tagging_strategy/elasticsearch.rb +40 -0
- data/lib/chef/provisioning/aws_driver/version.rb +1 -1
- data/lib/chef/resource/aws_eip_address.rb +4 -24
- data/lib/chef/resource/aws_elasticsearch_domain.rb +42 -0
- data/lib/chef/resource/aws_rds_instance.rb +12 -7
- data/lib/chef/resource/aws_route53_hosted_zone.rb +1 -1
- data/spec/aws_support.rb +2 -2
- data/spec/integration/aws_eip_address_spec.rb +32 -18
- data/spec/integration/aws_elasticsearch_domain_spec.rb +119 -0
- data/spec/integration/aws_key_pair_spec.rb +2 -1
- data/spec/integration/aws_rds_instance_spec.rb +3 -3
- data/spec/integration/aws_route53_hosted_zone_spec.rb +11 -0
- data/spec/integration/aws_route_table_spec.rb +40 -44
- data/spec/integration/aws_server_certificate_spec.rb +12 -0
- data/spec/integration/load_balancer_spec.rb +47 -1
- data/spec/integration/machine_spec.rb +32 -25
- metadata +28 -6
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 1484fb7faef2c0ce2abadef678b520d88219f7de
|
4
|
+
data.tar.gz: c7f948fc458daa4f0ba1ba2294f17bf9e368133b
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 397a01589702e2d9439a7209dbaaaf3548ca9d5cc7707fd2b2ed05a69dc5f0a9c43fc0524559e4c288e5d76e8180ff6696810fd0e22d9b3c44c80418110d3e9e
|
7
|
+
data.tar.gz: 090afbab837aadcf3c502cbc96ec633069cc6685aa67a1c56ae2fc93cc9ace1e8ca725c8735fd7304e9bb991bc887c9d5ab1aced3c869152df4f211ce65508de
|
data/README.md
CHANGED
@@ -187,10 +187,47 @@ load_balancer "my_elb" do
|
|
187
187
|
|
188
188
|
The available parameters for `load_balancer_options` can be viewed in the [aws docs](http://docs.aws.amazon.com/AWSRubySDK/latest/AWS/ELB/Client.html#create_load_balancer-instance_method).
|
189
189
|
|
190
|
+
If you wish to enable sticky sessions, pass a `sticky_sessions` key to the
|
191
|
+
`load_balancer_options` and specify a cookie name and the ports that should be
|
192
|
+
sticky. In the above example, it would look like this:
|
193
|
+
|
194
|
+
```ruby
|
195
|
+
machine 'test1'
|
196
|
+
m2 = machine 'test2'
|
197
|
+
load_balancer "my_elb" do
|
198
|
+
machines ['test1', m2]
|
199
|
+
load_balancer_options({
|
200
|
+
subnets: subnets,
|
201
|
+
security_groups: [load_balancer_sg],
|
202
|
+
listeners: [
|
203
|
+
{
|
204
|
+
instance_port: 8080,
|
205
|
+
protocol: 'HTTP',
|
206
|
+
instance_protocol: 'HTTP',
|
207
|
+
port: 80
|
208
|
+
},
|
209
|
+
{
|
210
|
+
instance_port: 8080,
|
211
|
+
protocol: 'HTTPS',
|
212
|
+
instance_protocol: 'HTTP',
|
213
|
+
port: 443,
|
214
|
+
ssl_certificate_id: "arn:aws:iam::360965486607:server-certificate/cloudfront/foreflight-2015-07-09"
|
215
|
+
}
|
216
|
+
],
|
217
|
+
sticky_sessions: {
|
218
|
+
cookie_name: 'my-app-cookie',
|
219
|
+
ports: [80, 443]
|
220
|
+
}
|
221
|
+
})
|
222
|
+
```
|
223
|
+
|
190
224
|
NOTES:
|
191
225
|
|
192
226
|
1. You can specify either `ssl_certificate_id` or `server_certificate` in a listener but the value to both parameters should be the ARN of an existing IAM::ServerCertificate object.
|
193
227
|
|
228
|
+
2. The `sticky_sessions` option currently only supports Application-Controlled
|
229
|
+
Session Stickiness.
|
230
|
+
|
194
231
|
# RDS Instance Options
|
195
232
|
|
196
233
|
### Additional Options
|
data/Rakefile
CHANGED
@@ -4,8 +4,6 @@ require 'rspec/core/rake_task'
|
|
4
4
|
|
5
5
|
task :default => :spec
|
6
6
|
|
7
|
-
ENV['AWS_TEST_DRIVER'] ||= "aws"
|
8
|
-
|
9
7
|
desc "run all non-integration specs"
|
10
8
|
RSpec::Core::RakeTask.new(:spec) do |spec|
|
11
9
|
spec.pattern = 'spec/**/*_spec.rb'
|
@@ -39,9 +37,14 @@ end
|
|
39
37
|
|
40
38
|
desc "travis specific task - runs CI integration tests (regular and super_slow in parallel) and sets up travis specific ENV variables"
|
41
39
|
task :travis, [:sub_task] do |t, args|
|
42
|
-
|
43
|
-
|
44
|
-
|
40
|
+
sub_task = args[:sub_task]
|
41
|
+
if sub_task == "super_slow"
|
42
|
+
pattern = "load_balancer_spec.rb,aws_route_table_spec.rb,machine_spec.rb,aws_eip_address_spec.rb" # This is a comma seperated list
|
43
|
+
pattern = pattern.split(",").map {|p| "spec/integration/**/*#{p}"}.join(",")
|
44
|
+
else
|
45
|
+
pattern = 'spec/integration/**/*_spec.rb'
|
46
|
+
end
|
47
|
+
Rake::Task[sub_task].invoke(pattern)
|
45
48
|
end
|
46
49
|
|
47
50
|
desc "travis task for machine_image tests - these take so long to run that we only run the first test"
|
@@ -15,9 +15,9 @@ Gem::Specification.new do |s|
|
|
15
15
|
s.add_dependency 'chef-provisioning', '~> 1.4'
|
16
16
|
|
17
17
|
s.add_dependency 'aws-sdk-v1', '>= 1.59.0'
|
18
|
-
s.add_dependency 'aws-sdk', '
|
19
|
-
s.add_dependency 'retryable', '~> 2.0.1'
|
20
|
-
s.add_dependency 'ubuntu_ami', '~> 0.4.1'
|
18
|
+
s.add_dependency 'aws-sdk', ['>= 2.1.26', '< 3.0']
|
19
|
+
s.add_dependency 'retryable', '~> 2.0', '>= 2.0.1'
|
20
|
+
s.add_dependency 'ubuntu_ami', '~> 0.4', '>= 0.4.1'
|
21
21
|
|
22
22
|
# chef-zero is only a development dependency because we leverage its RSpec support
|
23
23
|
s.add_development_dependency 'chef-zero', '~> 4.2'
|
@@ -2,7 +2,7 @@ require 'chef/provisioning/aws_driver/aws_provider'
|
|
2
2
|
|
3
3
|
class Chef::Provider::AwsCloudsearchDomain < Chef::Provisioning::AWSDriver::AWSProvider
|
4
4
|
provides :aws_cloudsearch_domain
|
5
|
-
|
5
|
+
|
6
6
|
def create_aws_object
|
7
7
|
domain = nil # define here to ensure it is available outside of the coverge_by scope
|
8
8
|
converge_by "create CloudSearch domain #{new_resource.name}" do
|
@@ -127,8 +127,10 @@ class Chef::Provider::AwsCloudsearchDomain < Chef::Provisioning::AWSDriver::AWSP
|
|
127
127
|
end
|
128
128
|
|
129
129
|
def create_index_fields
|
130
|
-
new_resource.index_fields.
|
131
|
-
|
130
|
+
unless new_resource.index_fields.nil?
|
131
|
+
new_resource.index_fields.each do |field|
|
132
|
+
create_index_field(field)
|
133
|
+
end
|
132
134
|
end
|
133
135
|
end
|
134
136
|
|
@@ -0,0 +1,131 @@
|
|
1
|
+
require 'chef/provisioning/aws_driver/aws_provider'
|
2
|
+
require 'chef/provisioning/aws_driver/tagging_strategy/elasticsearch'
|
3
|
+
|
4
|
+
class Chef::Provider::AwsElasticsearchDomain < Chef::Provisioning::AWSDriver::AWSProvider
|
5
|
+
provides :aws_elasticsearch_domain
|
6
|
+
|
7
|
+
def create_aws_object
|
8
|
+
converge_by "create Elasticsearch domain #{new_resource.domain_name}" do
|
9
|
+
es_client.create_elasticsearch_domain(update_payload)
|
10
|
+
end
|
11
|
+
end
|
12
|
+
|
13
|
+
def destroy_aws_object(domain)
|
14
|
+
converge_by "destroy Elasticsearch domain #{new_resource.domain_name}" do
|
15
|
+
es_client.delete_elasticsearch_domain({domain_name: new_resource.domain_name})
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
def update_aws_object(domain)
|
20
|
+
updates = required_updates(domain)
|
21
|
+
if ! updates.empty?
|
22
|
+
converge_by updates do
|
23
|
+
es_client.update_elasticsearch_domain_config(update_payload)
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
def aws_tagger
|
29
|
+
@aws_tagger ||= begin
|
30
|
+
strategy = Chef::Provisioning::AWSDriver::TaggingStrategy::Elasticsearch.new(
|
31
|
+
es_client,
|
32
|
+
new_resource.aws_object.arn,
|
33
|
+
new_resource.aws_tags)
|
34
|
+
Chef::Provisioning::AWSDriver::AWSTagger.new(strategy, action_handler)
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
def converge_tags
|
39
|
+
aws_tagger.converge_tags
|
40
|
+
end
|
41
|
+
|
42
|
+
private
|
43
|
+
|
44
|
+
def required_updates(object)
|
45
|
+
ret = []
|
46
|
+
ret << " update cluster configuration" if cluster_options_changed?(object)
|
47
|
+
ret << " update ebs options" if ebs_options_changed?(object)
|
48
|
+
ret << " update snapshot options" if snapshot_options_changed?(object)
|
49
|
+
ret << " update access policy" if access_policy_changed?(object)
|
50
|
+
ret.unshift("update Elasticsearch domain #{new_resource.name}") unless ret.empty?
|
51
|
+
ret
|
52
|
+
end
|
53
|
+
|
54
|
+
def update_payload
|
55
|
+
payload = {domain_name: new_resource.domain_name}
|
56
|
+
payload.merge!(ebs_options) if ebs_options_present?
|
57
|
+
payload.merge!(cluster_options) if cluster_options_present?
|
58
|
+
payload.merge!(snapshot_options) if snapshot_options_present?
|
59
|
+
payload[:access_policies] = new_resource.access_policies if new_resource.access_policies
|
60
|
+
payload
|
61
|
+
end
|
62
|
+
|
63
|
+
EBS_OPTIONS = %i(ebs_enabled volume_type volume_size iops)
|
64
|
+
def ebs_options
|
65
|
+
opts = EBS_OPTIONS.inject({}) do |accum, i|
|
66
|
+
new_resource.send(i).nil? ? accum : accum.merge({i => new_resource.send(i)})
|
67
|
+
end
|
68
|
+
{ebs_options: opts}
|
69
|
+
end
|
70
|
+
|
71
|
+
def ebs_options_present?
|
72
|
+
EBS_OPTIONS.any? {|i| !new_resource.send(i).nil? }
|
73
|
+
end
|
74
|
+
|
75
|
+
def ebs_options_changed?(object)
|
76
|
+
changed?(ebs_options[:ebs_options], object.ebs_options)
|
77
|
+
end
|
78
|
+
|
79
|
+
CLUSTER_OPTIONS = %i(instance_type instance_count dedicated_master_enabled
|
80
|
+
dedicated_master_type dedicated_master_count zone_awareness_enabled)
|
81
|
+
|
82
|
+
def cluster_options
|
83
|
+
opts = CLUSTER_OPTIONS.inject({}) do |accum, i|
|
84
|
+
new_resource.send(i).nil? ? accum : accum.merge({i => new_resource.send(i)})
|
85
|
+
end
|
86
|
+
{elasticsearch_cluster_config: opts}
|
87
|
+
end
|
88
|
+
|
89
|
+
def cluster_options_present?
|
90
|
+
CLUSTER_OPTIONS.any? {|i| !new_resource.send(i).nil? }
|
91
|
+
end
|
92
|
+
|
93
|
+
def cluster_options_changed?(object)
|
94
|
+
changed?(cluster_options[:elasticsearch_cluster_config], object.elasticsearch_cluster_config)
|
95
|
+
end
|
96
|
+
|
97
|
+
def snapshot_options
|
98
|
+
if !new_resource.automated_snapshot_start_hour.nil?
|
99
|
+
{snapshot_options: { automated_snapshot_start_hour: new_resource.automated_snapshot_start_hour }}
|
100
|
+
else
|
101
|
+
{}
|
102
|
+
end
|
103
|
+
end
|
104
|
+
|
105
|
+
def snapshot_options_present?
|
106
|
+
! new_resource.automated_snapshot_start_hour.nil?
|
107
|
+
end
|
108
|
+
|
109
|
+
def snapshot_options_changed?(object)
|
110
|
+
changed?(snapshot_options[:snapshot_options] || {}, object.snapshot_options)
|
111
|
+
end
|
112
|
+
|
113
|
+
def access_policy_changed?(object)
|
114
|
+
if new_resource.access_policies
|
115
|
+
Chef::JSONCompat.parse(object.access_policies) != Chef::JSONCompat.parse(new_resource.access_policies)
|
116
|
+
else
|
117
|
+
false
|
118
|
+
end
|
119
|
+
end
|
120
|
+
|
121
|
+
def changed?(desired, actual)
|
122
|
+
desired.each do |key, value|
|
123
|
+
return true if actual[key] != value
|
124
|
+
end
|
125
|
+
false
|
126
|
+
end
|
127
|
+
|
128
|
+
def es_client
|
129
|
+
@es_client ||= new_resource.driver.elasticsearch_client
|
130
|
+
end
|
131
|
+
end
|
@@ -5,7 +5,7 @@ require 'aws-sdk-v1'
|
|
5
5
|
|
6
6
|
class Chef::Provider::AwsKeyPair < Chef::Provisioning::AWSDriver::AWSProvider
|
7
7
|
provides :aws_key_pair
|
8
|
-
|
8
|
+
|
9
9
|
action :create do
|
10
10
|
create_key(:create)
|
11
11
|
end
|
@@ -171,7 +171,7 @@ class Chef::Provider::AwsKeyPair < Chef::Provisioning::AWSDriver::AWSProvider
|
|
171
171
|
if current_key_pair
|
172
172
|
@current_fingerprint = current_key_pair.fingerprint
|
173
173
|
else
|
174
|
-
current_resource.action :destroy
|
174
|
+
current_resource.action [:destroy]
|
175
175
|
end
|
176
176
|
|
177
177
|
if new_private_key_path && ::File.exist?(new_private_key_path)
|
@@ -26,7 +26,7 @@ class Chef::Provider::AwsRdsInstance < Chef::Provisioning::AWSDriver::AWSProvide
|
|
26
26
|
|
27
27
|
def create_aws_object
|
28
28
|
converge_by "create RDS instance #{new_resource.db_instance_identifier} in #{region}" do
|
29
|
-
new_resource.driver.
|
29
|
+
new_resource.driver.rds_resource.create_db_instance(options_hash)
|
30
30
|
end
|
31
31
|
end
|
32
32
|
|
@@ -38,12 +38,14 @@ class Chef::Provider::AwsRdsInstance < Chef::Provisioning::AWSDriver::AWSProvide
|
|
38
38
|
converge_by "waited until RDS instance #{new_resource.name} was deleted" do
|
39
39
|
wait_for(
|
40
40
|
aws_object: instance,
|
41
|
-
|
42
|
-
|
43
|
-
|
41
|
+
# http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Status.html
|
42
|
+
# It cannot _actually_ return a deleted status, we're just looking for the error
|
43
|
+
query_method: :db_instance_status,
|
44
|
+
expected_responses: ['deleted'],
|
45
|
+
acceptable_errors: [::Aws::RDS::Errors::DBInstanceNotFound],
|
44
46
|
tries: 60,
|
45
47
|
sleep: 10
|
46
|
-
)
|
48
|
+
) { |instance| instance.reload }
|
47
49
|
end
|
48
50
|
end
|
49
51
|
|
@@ -12,14 +12,14 @@ class Chef::Provider::AwsRdsSubnetGroup < Chef::Provisioning::AWSDriver::AWSProv
|
|
12
12
|
end
|
13
13
|
end
|
14
14
|
|
15
|
-
def destroy_aws_object(
|
15
|
+
def destroy_aws_object(subnet_group)
|
16
16
|
converge_by "delete RDS subnet group #{new_resource.name} in #{region}" do
|
17
17
|
driver.delete_db_subnet_group(db_subnet_group_name: new_resource.name)
|
18
18
|
end
|
19
19
|
end
|
20
20
|
|
21
|
-
def update_aws_object(
|
22
|
-
updates = required_updates(
|
21
|
+
def update_aws_object(subnet_group)
|
22
|
+
updates = required_updates(subnet_group)
|
23
23
|
if ! updates.empty?
|
24
24
|
converge_by updates do
|
25
25
|
driver.modify_db_subnet_group(desired_options)
|
@@ -37,18 +37,18 @@ class Chef::Provider::AwsRdsSubnetGroup < Chef::Provisioning::AWSDriver::AWSProv
|
|
37
37
|
end
|
38
38
|
end
|
39
39
|
|
40
|
-
# Given an existing
|
40
|
+
# Given an existing subnet group, return an array of update descriptions
|
41
41
|
# representing the updates that need to be made.
|
42
42
|
#
|
43
43
|
# If no updates are needed, an empty array is returned.
|
44
44
|
#
|
45
|
-
def required_updates(
|
45
|
+
def required_updates(subnet_group)
|
46
46
|
ret = []
|
47
|
-
if desired_options[:db_subnet_group_description] !=
|
47
|
+
if desired_options[:db_subnet_group_description] != subnet_group[:db_subnet_group_description]
|
48
48
|
ret << " set group description to #{desired_options[:db_subnet_group_description]}"
|
49
49
|
end
|
50
50
|
|
51
|
-
if ! xor_array(desired_options[:subnet_ids], subnet_ids(
|
51
|
+
if ! xor_array(desired_options[:subnet_ids], subnet_ids(subnet_group[:subnets])).empty?
|
52
52
|
ret << " set subnets to #{desired_options[:subnet_ids]}"
|
53
53
|
end
|
54
54
|
|
@@ -87,7 +87,7 @@ class Chef::Provider::AwsRouteTable < Chef::Provisioning::AWSDriver::AWSProvider
|
|
87
87
|
current_route = current_routes.delete(destination_cidr_block)
|
88
88
|
current_target = current_route.gateway_id || current_route.instance_id || current_route.network_interface_id || current_route.vpc_peering_connection_id
|
89
89
|
if current_target != target
|
90
|
-
action_handler.perform_action "reroute #{destination_cidr_block} to #{route_target} (#{target}) instead of #{
|
90
|
+
action_handler.perform_action "reroute #{destination_cidr_block} to #{route_target} (#{target}) instead of #{current_target}" do
|
91
91
|
current_route.replace(options)
|
92
92
|
end
|
93
93
|
end
|
@@ -144,6 +144,8 @@ class Chef::Provider::AwsRouteTable < Chef::Provisioning::AWSDriver::AWSProvider
|
|
144
144
|
route_target = { network_interface: route_target }
|
145
145
|
when /^pcx-[A-Fa-f0-9]{8}$/, Chef::Resource::AwsVpcPeeringConnection, ::Aws::EC2::VpcPeeringConnection
|
146
146
|
route_target = { vpc_peering_connection: route_target }
|
147
|
+
when /^vgw-[A-Fa-f0-9]{8}$/
|
148
|
+
route_target = { virtual_private_gateway: route_target }
|
147
149
|
when String, Chef::Resource::AwsInstance
|
148
150
|
route_target = { instance: route_target }
|
149
151
|
when Chef::Resource::Machine
|
@@ -169,6 +171,8 @@ class Chef::Provider::AwsRouteTable < Chef::Provisioning::AWSDriver::AWSProvider
|
|
169
171
|
updated_route_target[:gateway_id] = Chef::Resource::AwsInternetGateway.get_aws_object_id(value, resource: new_resource)
|
170
172
|
when :vpc_peering_connection
|
171
173
|
updated_route_target[:vpc_peering_connection_id] = Chef::Resource::AwsVpcPeeringConnection.get_aws_object_id(value, resource: new_resource)
|
174
|
+
when :virtual_private_gateway
|
175
|
+
updated_route_target[:gateway_id] = value
|
172
176
|
end
|
173
177
|
end
|
174
178
|
updated_route_target
|
@@ -9,12 +9,13 @@ class Chef::Provider::AwsServerCertificate < Chef::Provisioning::AWSDriver::AWSP
|
|
9
9
|
|
10
10
|
def create_aws_object
|
11
11
|
converge_by "create server certificate #{new_resource.name}" do
|
12
|
-
|
12
|
+
opts = {
|
13
13
|
:name => new_resource.name,
|
14
14
|
:certificate_body => new_resource.certificate_body,
|
15
|
-
:certificate_chain => new_resource.certificate_chain,
|
16
15
|
:private_key => new_resource.private_key
|
17
|
-
|
16
|
+
}
|
17
|
+
opts[:certificate_chain] = new_resource.certificate_chain if new_resource.certificate_chain
|
18
|
+
new_resource.driver.iam.server_certificates.upload(**opts)
|
18
19
|
end
|
19
20
|
end
|
20
21
|
|
@@ -9,6 +9,7 @@ require "chef/resource/aws_cloudsearch_domain"
|
|
9
9
|
require "chef/resource/aws_dhcp_options"
|
10
10
|
require "chef/resource/aws_ebs_volume"
|
11
11
|
require "chef/resource/aws_eip_address"
|
12
|
+
require "chef/resource/aws_elasticsearch_domain"
|
12
13
|
require "chef/resource/aws_iam_role"
|
13
14
|
require "chef/resource/aws_iam_instance_profile"
|
14
15
|
require "chef/resource/aws_image"
|
@@ -270,8 +270,9 @@ class AWSProvider < Chef::Provider::LWRPBase
|
|
270
270
|
|
271
271
|
Retryable.retryable(:tries => tries, :sleep => sleep) do |retries, exception|
|
272
272
|
action_handler.report_progress "waited #{retries*sleep}/#{tries*sleep}s for <#{aws_object.class}:#{aws_object.id}>##{query_method} state to change to #{expected_responses.inspect}..."
|
273
|
-
Chef::Log.debug("Current exception in wait_for is #{exception.inspect}")
|
273
|
+
Chef::Log.debug("Current exception in wait_for is #{exception.inspect}") if exception
|
274
274
|
begin
|
275
|
+
yield(aws_object) if block_given?
|
275
276
|
current_response = aws_object.send(query_method)
|
276
277
|
Chef::Log.debug("Current response in wait_for from [#{query_method}] is #{current_response}")
|
277
278
|
unless expected_responses.include?(current_response)
|
@@ -33,7 +33,9 @@ AWS_V2_SERVICES = {
|
|
33
33
|
"Route53" => "route53",
|
34
34
|
"S3" => "s3",
|
35
35
|
"ElasticLoadBalancing" => "elb",
|
36
|
+
"ElasticsearchService" => "elasticsearch",
|
36
37
|
"IAM" => "iam",
|
38
|
+
"RDS" => "rds",
|
37
39
|
}
|
38
40
|
Aws.eager_autoload!(:services => AWS_V2_SERVICES.keys)
|
39
41
|
|
@@ -160,11 +162,12 @@ module AWSDriver
|
|
160
162
|
def allocate_load_balancer(action_handler, lb_spec, lb_options, machine_specs)
|
161
163
|
lb_options = (lb_options || {}).to_h
|
162
164
|
lb_options = AWSResource.lookup_options(lb_options, managed_entry_store: lb_spec.managed_entry_store, driver: self)
|
163
|
-
# We delete the attributes
|
165
|
+
# We delete the attributes, tags, health check, and sticky sessions here because they are not valid in the create call
|
164
166
|
# and must be applied afterward
|
165
167
|
lb_attributes = lb_options.delete(:attributes)
|
166
168
|
lb_aws_tags = lb_options.delete(:aws_tags)
|
167
169
|
health_check = lb_options.delete(:health_check)
|
170
|
+
sticky_sessions = lb_options.delete(:sticky_sessions)
|
168
171
|
|
169
172
|
old_elb = nil
|
170
173
|
actual_elb = load_balancer_for(lb_spec)
|
@@ -390,6 +393,61 @@ module AWSDriver
|
|
390
393
|
end
|
391
394
|
end
|
392
395
|
|
396
|
+
# Update the load balancer sticky sessions
|
397
|
+
if sticky_sessions
|
398
|
+
policy_name = "#{actual_elb.name}-sticky-session-policy"
|
399
|
+
policies = elb.client.describe_load_balancer_policies(load_balancer_name: actual_elb.name)
|
400
|
+
|
401
|
+
existing_cookie_policy = policies[:policy_descriptions].detect { |pd| pd[:policy_type_name] == 'AppCookieStickinessPolicyType' && pd[:policy_name] == policy_name}
|
402
|
+
existing_cookie_name = existing_cookie_policy ? (existing_cookie_policy[:policy_attribute_descriptions].detect { |pad| pad[:attribute_name] == 'CookieName' })[:attribute_value] : nil
|
403
|
+
desired_cookie_name = sticky_sessions[:cookie_name]
|
404
|
+
|
405
|
+
# Create or update the policy to have the desired cookie_name
|
406
|
+
if existing_cookie_policy.nil?
|
407
|
+
perform_action.call(" creating sticky sessions with cookie_name #{desired_cookie_name}") do
|
408
|
+
elb.client.create_app_cookie_stickiness_policy(
|
409
|
+
load_balancer_name: actual_elb.name,
|
410
|
+
policy_name: policy_name,
|
411
|
+
cookie_name: desired_cookie_name
|
412
|
+
)
|
413
|
+
end
|
414
|
+
elsif existing_cookie_name && existing_cookie_name != desired_cookie_name
|
415
|
+
perform_action.call(" updating sticky sessions from cookie_name #{existing_cookie_name} to cookie_name #{desired_cookie_name}") do
|
416
|
+
elb.client.delete_load_balancer_policy(
|
417
|
+
load_balancer_name: actual_elb.name,
|
418
|
+
policy_name: policy_name
|
419
|
+
)
|
420
|
+
elb.client.create_app_cookie_stickiness_policy(
|
421
|
+
load_balancer_name: actual_elb.name,
|
422
|
+
policy_name: policy_name,
|
423
|
+
cookie_name: desired_cookie_name
|
424
|
+
)
|
425
|
+
end
|
426
|
+
end
|
427
|
+
|
428
|
+
# Ensure the policy is attached to the appropriate listener
|
429
|
+
elb_description = elb.client.describe_load_balancers(load_balancer_names: [actual_elb.name])[:load_balancer_descriptions].first
|
430
|
+
listeners = elb_description[:listener_descriptions]
|
431
|
+
|
432
|
+
sticky_sessions[:ports].each do |ss_port|
|
433
|
+
listener = listeners.detect { |ld| ld[:listener][:load_balancer_port] == ss_port }
|
434
|
+
|
435
|
+
unless listener.nil?
|
436
|
+
policy_names = listener[:policy_names]
|
437
|
+
|
438
|
+
unless policy_names.include?(policy_name)
|
439
|
+
policy_names << policy_name
|
440
|
+
|
441
|
+
elb.client.set_load_balancer_policies_of_listener(
|
442
|
+
load_balancer_name: actual_elb.name,
|
443
|
+
load_balancer_port: ss_port,
|
444
|
+
policy_names: policy_names
|
445
|
+
)
|
446
|
+
end
|
447
|
+
end
|
448
|
+
end
|
449
|
+
end
|
450
|
+
|
393
451
|
# Update instance list, but only if there are machines specified
|
394
452
|
if machine_specs
|
395
453
|
assigned_instance_ids = actual_elb.instances.map { |i| i.instance_id }
|
@@ -521,6 +579,8 @@ module AWSDriver
|
|
521
579
|
aws_image image_spec.name do
|
522
580
|
action :destroy
|
523
581
|
driver d
|
582
|
+
chef_server image_spec.managed_entry_store.chef_server
|
583
|
+
managed_entry_store image_spec.managed_entry_store
|
524
584
|
end
|
525
585
|
end
|
526
586
|
end
|
@@ -597,7 +657,7 @@ EOD
|
|
597
657
|
end
|
598
658
|
}
|
599
659
|
end
|
600
|
-
wait_for_transport(action_handler, machine_spec, machine_options)
|
660
|
+
wait_for_transport(action_handler, machine_spec, machine_options, instance)
|
601
661
|
machine_for(machine_spec, machine_options, instance)
|
602
662
|
end
|
603
663
|
|
@@ -630,6 +690,8 @@ EOD
|
|
630
690
|
aws_instance machine_spec.name do
|
631
691
|
action :destroy
|
632
692
|
driver d
|
693
|
+
chef_server machine_spec.managed_entry_store.chef_server
|
694
|
+
managed_entry_store machine_spec.managed_entry_store
|
633
695
|
end
|
634
696
|
end
|
635
697
|
|
@@ -1041,70 +1103,77 @@ EOD
|
|
1041
1103
|
|
1042
1104
|
def wait_until_ready_image(action_handler, image_spec, image=nil)
|
1043
1105
|
wait_until_image(action_handler, image_spec, image) { image.state == :available }
|
1106
|
+
action_handler.report_progress "Image #{image_spec.name} is now ready"
|
1044
1107
|
end
|
1045
1108
|
|
1046
1109
|
def wait_until_image(action_handler, image_spec, image=nil, &block)
|
1047
1110
|
image ||= image_for(image_spec)
|
1048
|
-
time_elapsed = 0
|
1049
1111
|
sleep_time = 10
|
1050
|
-
|
1051
|
-
|
1052
|
-
|
1053
|
-
|
1054
|
-
|
1055
|
-
|
1056
|
-
|
1057
|
-
|
1058
|
-
|
1059
|
-
|
1112
|
+
unless yield(image)
|
1113
|
+
if action_handler.should_perform_actions
|
1114
|
+
action_handler.report_progress "waiting for #{image_spec.name} (#{image.id} on #{driver_url}) to be ready ..."
|
1115
|
+
max_wait_time = Chef::Config.chef_provisioning[:image_max_wait_time] || 300
|
1116
|
+
Retryable.retryable(
|
1117
|
+
:tries => (max_wait_time/sleep_time).to_i,
|
1118
|
+
:sleep => sleep_time,
|
1119
|
+
:matching => /did not become ready within/
|
1120
|
+
) do |retries, exception|
|
1121
|
+
action_handler.report_progress "been waiting #{retries*sleep_time}/#{max_wait_time} -- sleeping #{sleep_time} seconds for #{image_spec.name} (#{image.id} on #{driver_url}) to become ready ..."
|
1122
|
+
unless yield(image)
|
1123
|
+
raise "Image #{image.id} did not become ready within #{max_wait_time} seconds"
|
1124
|
+
end
|
1125
|
+
end
|
1060
1126
|
end
|
1061
|
-
action_handler.report_progress "Image #{image_spec.name} is now ready"
|
1062
1127
|
end
|
1063
1128
|
end
|
1064
1129
|
|
1065
1130
|
def wait_until_instance_running(action_handler, machine_spec, instance=nil)
|
1066
|
-
wait_until_machine(action_handler, machine_spec, "
|
1131
|
+
wait_until_machine(action_handler, machine_spec, "become ready", instance) { |instance|
|
1067
1132
|
instance.state.name == "running"
|
1068
1133
|
}
|
1069
1134
|
end
|
1070
1135
|
|
1071
1136
|
def wait_until_machine(action_handler, machine_spec, output_msg, instance=nil, &block)
|
1072
1137
|
instance ||= instance_for(machine_spec)
|
1073
|
-
|
1074
|
-
|
1075
|
-
|
1076
|
-
action_handler.report_progress "been waiting #{delay*attempts}/#{delay*max_attempts} -- sleeping #{delay} seconds for #{machine_spec.name} (#{instance.id} on #{driver_url}) to #{output_msg} ..."
|
1077
|
-
end
|
1078
|
-
if action_handler.should_perform_actions
|
1079
|
-
no_wait = yield(instance)
|
1080
|
-
unless no_wait
|
1138
|
+
sleep_time = 10
|
1139
|
+
unless yield(instance)
|
1140
|
+
if action_handler.should_perform_actions
|
1081
1141
|
action_handler.report_progress "waiting for #{machine_spec.name} (#{instance.id} on #{driver_url}) to #{output_msg} ..."
|
1082
|
-
|
1083
|
-
|
1142
|
+
max_wait_time = Chef::Config.chef_provisioning[:machine_max_wait_time] || 120
|
1143
|
+
Retryable.retryable(
|
1144
|
+
:tries => (max_wait_time/sleep_time).to_i,
|
1145
|
+
:sleep => sleep_time,
|
1146
|
+
:matching => /did not #{output_msg} within/
|
1147
|
+
) do |retries, exception|
|
1148
|
+
action_handler.report_progress "been waiting #{sleep_time*retries}/#{max_wait_time} -- sleeping #{sleep_time} seconds for #{machine_spec.name} (#{instance.id} on #{driver_url}) to #{output_msg} ..."
|
1149
|
+
# We have to manually reload the instance each loop, otherwise data is stale
|
1150
|
+
instance.reload
|
1151
|
+
unless yield(instance)
|
1152
|
+
raise "Instance #{machine_spec.name} (#{instance.id} on #{driver_url}) did not #{output_msg} within #{max_wait_time} seconds"
|
1153
|
+
end
|
1084
1154
|
end
|
1085
1155
|
end
|
1086
1156
|
end
|
1087
|
-
# We need an instance.reload here because the `wait_until` does not reload the instance it is called on,
|
1088
|
-
# only the instance that is passed to the block
|
1089
|
-
instance.reload
|
1090
1157
|
end
|
1091
1158
|
|
1092
|
-
def wait_for_transport(action_handler, machine_spec, machine_options)
|
1093
|
-
instance
|
1094
|
-
time_elapsed = 0
|
1159
|
+
def wait_for_transport(action_handler, machine_spec, machine_options, instance=nil)
|
1160
|
+
instance ||= instance_for(machine_spec)
|
1095
1161
|
sleep_time = 10
|
1096
|
-
max_wait_time = Chef::Config.chef_provisioning[:machine_max_wait_time] || 120
|
1097
1162
|
transport = transport_for(machine_spec, machine_options, instance)
|
1098
1163
|
unless transport.available?
|
1099
1164
|
if action_handler.should_perform_actions
|
1100
1165
|
action_handler.report_progress "waiting for #{machine_spec.name} (#{instance.id} on #{driver_url}) to be connectable (transport up and running) ..."
|
1101
|
-
|
1102
|
-
|
1103
|
-
|
1104
|
-
|
1166
|
+
max_wait_time = Chef::Config.chef_provisioning[:machine_max_wait_time] || 120
|
1167
|
+
Retryable.retryable(
|
1168
|
+
:tries => (max_wait_time/sleep_time).to_i,
|
1169
|
+
:sleep => sleep_time,
|
1170
|
+
:matching => /did not become connectable within/
|
1171
|
+
) do |retries, exception|
|
1172
|
+
action_handler.report_progress "been waiting #{sleep_time*retries}/#{max_wait_time} -- sleeping #{sleep_time} seconds for #{machine_spec.name} (#{instance.id} on #{driver_url}) to become connectable ..."
|
1173
|
+
unless transport.available?
|
1174
|
+
raise "Instance #{machine_spec.name} (#{instance.id} on #{driver_url}) did not become connectable within #{max_wait_time} seconds"
|
1175
|
+
end
|
1105
1176
|
end
|
1106
|
-
|
1107
|
-
action_handler.report_progress "#{machine_spec.name} is now connectable"
|
1108
1177
|
end
|
1109
1178
|
end
|
1110
1179
|
end
|
@@ -1125,6 +1194,8 @@ EOD
|
|
1125
1194
|
Provisioning.inline_resource(action_handler) do
|
1126
1195
|
aws_key_pair default_key_name do
|
1127
1196
|
driver driver
|
1197
|
+
chef_server machine_spec.managed_entry_store.chef_server
|
1198
|
+
managed_entry_store machine_spec.managed_entry_store
|
1128
1199
|
allow_overwrite true
|
1129
1200
|
end
|
1130
1201
|
end
|