ironfan 4.3.4 → 4.4.0
Sign up to get free protection for your applications and to get access to all the features.
- data/CHANGELOG.md +7 -0
- data/ELB.md +121 -0
- data/Gemfile +1 -0
- data/Rakefile +4 -0
- data/VERSION +1 -1
- data/ironfan.gemspec +48 -3
- data/lib/chef/knife/cluster_launch.rb +5 -0
- data/lib/chef/knife/cluster_proxy.rb +3 -3
- data/lib/chef/knife/cluster_sync.rb +4 -0
- data/lib/chef/knife/ironfan_knife_common.rb +17 -6
- data/lib/chef/knife/ironfan_script.rb +29 -11
- data/lib/ironfan.rb +2 -2
- data/lib/ironfan/broker/computer.rb +8 -3
- data/lib/ironfan/dsl/ec2.rb +133 -2
- data/lib/ironfan/headers.rb +4 -0
- data/lib/ironfan/provider.rb +48 -3
- data/lib/ironfan/provider/ec2.rb +23 -8
- data/lib/ironfan/provider/ec2/elastic_load_balancer.rb +239 -0
- data/lib/ironfan/provider/ec2/iam_server_certificate.rb +101 -0
- data/lib/ironfan/provider/ec2/machine.rb +8 -0
- data/lib/ironfan/provider/ec2/security_group.rb +3 -5
- data/lib/ironfan/requirements.rb +2 -0
- data/notes/Home.md +45 -0
- data/notes/INSTALL-cloud_setup.md +103 -0
- data/notes/INSTALL.md +134 -0
- data/notes/Ironfan-Roadmap.md +70 -0
- data/notes/advanced-superpowers.md +16 -0
- data/notes/aws_servers.jpg +0 -0
- data/notes/aws_user_key.png +0 -0
- data/notes/cookbook-versioning.md +11 -0
- data/notes/core_concepts.md +200 -0
- data/notes/declaring_volumes.md +3 -0
- data/notes/design_notes-aspect_oriented_devops.md +36 -0
- data/notes/design_notes-ci_testing.md +169 -0
- data/notes/design_notes-cookbook_event_ordering.md +249 -0
- data/notes/design_notes-meta_discovery.md +59 -0
- data/notes/ec2-pricing_and_capacity.md +69 -0
- data/notes/ec2-pricing_and_capacity.numbers +0 -0
- data/notes/homebase-layout.txt +102 -0
- data/notes/knife-cluster-commands.md +18 -0
- data/notes/named-cloud-objects.md +11 -0
- data/notes/opscode_org_key.png +0 -0
- data/notes/opscode_user_key.png +0 -0
- data/notes/philosophy.md +13 -0
- data/notes/rake_tasks.md +24 -0
- data/notes/renamed-recipes.txt +142 -0
- data/notes/silverware.md +85 -0
- data/notes/style_guide.md +300 -0
- data/notes/tips_and_troubleshooting.md +92 -0
- data/notes/version-3_2.md +273 -0
- data/notes/walkthrough-hadoop.md +168 -0
- data/notes/walkthrough-web.md +166 -0
- data/spec/fixtures/ec2/elb/snakeoil.crt +35 -0
- data/spec/fixtures/ec2/elb/snakeoil.key +51 -0
- data/spec/integration/minimal-chef-repo/chefignore +41 -0
- data/spec/integration/minimal-chef-repo/environments/_default.json +12 -0
- data/spec/integration/minimal-chef-repo/knife/credentials/knife-org.rb +19 -0
- data/spec/integration/minimal-chef-repo/knife/credentials/knife-user-ironfantester.rb +9 -0
- data/spec/integration/minimal-chef-repo/knife/knife.rb +66 -0
- data/spec/integration/minimal-chef-repo/roles/systemwide.rb +10 -0
- data/spec/integration/spec/elb_build_spec.rb +95 -0
- data/spec/integration/spec_helper.rb +16 -0
- data/spec/integration/spec_helper/launch_cluster.rb +55 -0
- data/spec/ironfan/ec2/elb_spec.rb +95 -0
- data/spec/ironfan/ec2/security_group_spec.rb +0 -6
- metadata +60 -3
data/lib/ironfan/headers.rb
CHANGED
@@ -26,6 +26,8 @@ module Ironfan
|
|
26
26
|
class Cloud < Ironfan::Dsl; end
|
27
27
|
class Ec2 < Cloud
|
28
28
|
class SecurityGroup < Ironfan::Dsl; end
|
29
|
+
class ElasticLoadBalancer < Ironfan::Dsl; end
|
30
|
+
class IamServerCertificate < Ironfan::Dsl; end
|
29
31
|
end
|
30
32
|
class VirtualBox < Cloud; end
|
31
33
|
end
|
@@ -49,6 +51,8 @@ module Ironfan
|
|
49
51
|
class Keypair < Ironfan::Provider::Resource; end
|
50
52
|
class PlacementGroup < Ironfan::Provider::Resource; end
|
51
53
|
class SecurityGroup < Ironfan::Provider::Resource; end
|
54
|
+
class ElasticLoadBalancer < Ironfan::Provider::Resource; end
|
55
|
+
class IamServerCertificate < Ironfan::Provider::Resource; end
|
52
56
|
end
|
53
57
|
class VirtualBox < Ironfan::IaasProvider
|
54
58
|
class Machine < Ironfan::IaasProvider::Machine; end
|
data/lib/ironfan/provider.rb
CHANGED
@@ -31,6 +31,7 @@ module Ironfan
|
|
31
31
|
def self.load(cluster)
|
32
32
|
Ironfan.parallel (resources) do |r|
|
33
33
|
type = r.resource_type.to_s
|
34
|
+
r.forget!
|
34
35
|
Ironfan.substep(cluster.name, "loading #{type}s")
|
35
36
|
r.load! cluster
|
36
37
|
Ironfan.substep(cluster.name, "loaded #{type}s")
|
@@ -41,6 +42,11 @@ module Ironfan
|
|
41
42
|
resources.each {|r| r.validate_resources! computers }
|
42
43
|
end
|
43
44
|
|
45
|
+
def self.aggregate!(computers)
|
46
|
+
resources.each do |r|
|
47
|
+
r.aggregate!(computers) if r.shared?
|
48
|
+
end
|
49
|
+
end
|
44
50
|
|
45
51
|
class Resource < Builder
|
46
52
|
@@known = {}
|
@@ -81,15 +87,16 @@ module Ironfan
|
|
81
87
|
#
|
82
88
|
def self.create!(*p) Ironfan.noop(self,__method__,*p); end
|
83
89
|
def self.save!(*p) Ironfan.noop(self,__method__,*p); end
|
90
|
+
def self.aggregate!(*p) Ironfan.noop(self,__method__,*p); end
|
84
91
|
def self.destroy!(*p) Ironfan.noop(self,__method__,*p); end
|
85
92
|
|
86
93
|
#
|
87
94
|
# Utilities
|
88
95
|
#
|
89
96
|
[:shared?, :multiple?, :load!,:validate_computer!,
|
90
|
-
:validate_resources!,:create!,:save!,:destroy!].each do |method_name|
|
91
|
-
|
92
|
-
|
97
|
+
:validate_resources!,:create!,:save!,:aggregate!,:destroy!].each do |method_name|
|
98
|
+
define_method(method_name) {|*p| self.class.send(method_name,*p) }
|
99
|
+
end
|
93
100
|
|
94
101
|
def self.remember(resource,options={})
|
95
102
|
index = options[:id] || resource.name
|
@@ -113,6 +120,10 @@ module Ironfan
|
|
113
120
|
self.known[id]
|
114
121
|
end
|
115
122
|
|
123
|
+
def self.forget!
|
124
|
+
@@known[self.name] = { }
|
125
|
+
end
|
126
|
+
|
116
127
|
def self.forget(id)
|
117
128
|
self.known.delete(id)
|
118
129
|
end
|
@@ -121,6 +132,40 @@ module Ironfan
|
|
121
132
|
def self.known
|
122
133
|
@@known[self.name] ||= {}
|
123
134
|
end
|
135
|
+
|
136
|
+
def self.patiently(name, error_class, options={})
|
137
|
+
options[:message] ||= 'ignoring %s'
|
138
|
+
options[:wait_time] ||= 1
|
139
|
+
options[:max_tries] ||= 10
|
140
|
+
|
141
|
+
success = false
|
142
|
+
tries = 0
|
143
|
+
until success or (tries > options[:max_tries]) do
|
144
|
+
begin
|
145
|
+
result = yield
|
146
|
+
success = true # If we made it to this line, the yield didn't raise an exception
|
147
|
+
rescue error_class => e
|
148
|
+
tries += 1
|
149
|
+
if options[:ignore] and options[:ignore].call(e)
|
150
|
+
success = true
|
151
|
+
Ironfan.substep(name, options[:message] % e.message, options[:display] ? :red : :gray)
|
152
|
+
else
|
153
|
+
Ironfan.substep(name, options[:message] % e.message, options[:display] ? :red : :gray)
|
154
|
+
Ironfan.substep(name, "sleeping #{options[:sleep_time]} second(s) before trying again")
|
155
|
+
sleep options[:wait_time]
|
156
|
+
result = e
|
157
|
+
end
|
158
|
+
end
|
159
|
+
end
|
160
|
+
|
161
|
+
if success
|
162
|
+
return result
|
163
|
+
else
|
164
|
+
ui.warn("Gave up after #{options[:max_tries]} attempts to execute #{name} code")
|
165
|
+
raise result
|
166
|
+
end
|
167
|
+
end
|
168
|
+
|
124
169
|
end
|
125
170
|
|
126
171
|
end
|
data/lib/ironfan/provider/ec2.rb
CHANGED
@@ -5,19 +5,24 @@ module Ironfan
|
|
5
5
|
self.handle = :ec2
|
6
6
|
|
7
7
|
def self.resources
|
8
|
-
[ Machine, EbsVolume, Keypair, SecurityGroup ]
|
8
|
+
[ Machine, EbsVolume, Keypair, SecurityGroup, IamServerCertificate, ElasticLoadBalancer ]
|
9
9
|
end
|
10
10
|
|
11
11
|
#
|
12
12
|
# Utility functions
|
13
13
|
#
|
14
14
|
def self.connection
|
15
|
-
@@connection ||= Fog::Compute.new({
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
15
|
+
@@connection ||= Fog::Compute.new(self.aws_credentials.merge({ :provider => 'AWS' }))
|
16
|
+
end
|
17
|
+
|
18
|
+
def self.elb
|
19
|
+
@@elb ||= Fog::AWS::ELB.new(self.aws_credentials)
|
20
|
+
end
|
21
|
+
|
22
|
+
def self.iam
|
23
|
+
credentials = self.aws_credentials
|
24
|
+
credentials.delete(:region)
|
25
|
+
@@iam ||= Fog::AWS::IAM.new(credentials)
|
21
26
|
end
|
22
27
|
|
23
28
|
def self.aws_account_id()
|
@@ -42,7 +47,17 @@ module Ironfan
|
|
42
47
|
def self.applicable(computer)
|
43
48
|
computer.server and computer.server.clouds.include?(:ec2)
|
44
49
|
end
|
45
|
-
end
|
46
50
|
|
51
|
+
private
|
52
|
+
|
53
|
+
def self.aws_credentials
|
54
|
+
return {
|
55
|
+
:aws_access_key_id => Chef::Config[:knife][:aws_access_key_id],
|
56
|
+
:aws_secret_access_key => Chef::Config[:knife][:aws_secret_access_key],
|
57
|
+
:region => Chef::Config[:knife][:region]
|
58
|
+
}
|
59
|
+
end
|
60
|
+
|
61
|
+
end
|
47
62
|
end
|
48
63
|
end
|
@@ -0,0 +1,239 @@
|
|
1
|
+
module Ironfan
|
2
|
+
class Provider
|
3
|
+
class Ec2
|
4
|
+
|
5
|
+
class ElasticLoadBalancer < Ironfan::Provider::Resource
|
6
|
+
delegate :availability_zones,
|
7
|
+
:configure_health_check,
|
8
|
+
:deregister_instances,
|
9
|
+
:disable_availability_zones,
|
10
|
+
:enable_availability_zones,
|
11
|
+
:health_check,
|
12
|
+
:instances,
|
13
|
+
:listeners,
|
14
|
+
:policies,
|
15
|
+
:register_instances,
|
16
|
+
:source_group,
|
17
|
+
:to => :adaptee
|
18
|
+
|
19
|
+
def self.shared?() true; end
|
20
|
+
def self.multiple?() true; end
|
21
|
+
def self.resource_type() :elastic_load_balancer; end
|
22
|
+
def self.expected_ids(computer)
|
23
|
+
ec2 = computer.server.cloud(:ec2)
|
24
|
+
ec2.elastic_load_balancers.values.map { |elb| self.full_name(computer, elb) }.uniq
|
25
|
+
end
|
26
|
+
|
27
|
+
def name()
|
28
|
+
adaptee.id
|
29
|
+
end
|
30
|
+
|
31
|
+
#
|
32
|
+
# Discovery
|
33
|
+
#
|
34
|
+
def self.load!(cluster=nil)
|
35
|
+
Ec2.elb.load_balancers.each do |raw|
|
36
|
+
next if raw.blank?
|
37
|
+
elb = ElasticLoadBalancer.new(:adaptee => raw)
|
38
|
+
remember(elb)
|
39
|
+
Chef::Log.debug("Loaded #{elb}: #{elb.inspect}")
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
def receive_adaptee(obj)
|
44
|
+
obj = Ec2.elb.load_balancer.new(obj) if obj.is_a?(Hash)
|
45
|
+
super
|
46
|
+
end
|
47
|
+
|
48
|
+
def to_s
|
49
|
+
"<%-15s>" % name
|
50
|
+
end
|
51
|
+
|
52
|
+
#
|
53
|
+
# Manipulation
|
54
|
+
#
|
55
|
+
def self.aggregate!(computers)
|
56
|
+
ec2_computers = computers.select { |c| Ec2.applicable c }
|
57
|
+
return if ec2_computers.empty?
|
58
|
+
|
59
|
+
load! # Find out which ELBs already exist in EC2
|
60
|
+
|
61
|
+
running_computers = ec2_computers.select { |c| c.running? }
|
62
|
+
elbs_for_running_computers = running_computers.map { |c| self.expected_ids(c) }.flatten.uniq
|
63
|
+
elbs_for_stopped_computers = ec2_computers.select { |c| not c.running? }.map { |c| self.expected_ids(c) }.flatten.uniq
|
64
|
+
elbs_to_start = [ elbs_for_running_computers ].flatten.compact.reject { |elb_name| recall? elb_name }
|
65
|
+
elbs_to_stop = [ elbs_for_stopped_computers - elbs_for_running_computers ].flatten.compact.select { |elb_name| recall? elb_name }
|
66
|
+
|
67
|
+
elbs_to_stop.each do |elb_name|
|
68
|
+
Ironfan.step(elb_name, "stopping unused elastic load balancer #{elb_name}", :blue)
|
69
|
+
Ec2.elb.delete_load_balancer(elb_name)
|
70
|
+
forget elb_name
|
71
|
+
end
|
72
|
+
|
73
|
+
[ elbs_to_start, elbs_for_running_computers ].flatten.sort.uniq.each do |elb_name|
|
74
|
+
computers_using_this_elb = running_computers.select { |c| self.expected_ids(c).include?(elb_name) }
|
75
|
+
self.start_or_sync_elb(elb_name, computers_using_this_elb, elbs_to_start.include?(elb_name))
|
76
|
+
end
|
77
|
+
load!
|
78
|
+
|
79
|
+
end
|
80
|
+
|
81
|
+
private
|
82
|
+
|
83
|
+
def self.start_or_sync_elb(elb_name, computers, start_elb)
|
84
|
+
|
85
|
+
# We'll need to know which computers are using this ELB. There must be some, or
|
86
|
+
# we wouldn't be in this method.
|
87
|
+
availability_zones = computers.map { |c| c.machine.availability_zone }.uniq.sort
|
88
|
+
health_check, listeners, ssl_policy = self.fog_elb_parameters(elb_name, computers.first)
|
89
|
+
|
90
|
+
if start_elb
|
91
|
+
Ironfan.step(elb_name, "creating elastic load balancer", :blue)
|
92
|
+
self.patiently(elb_name, Fog::AWS::IAM::NotFound, :message => "waiting for SSL certificate(s) to appear", :display => true) do
|
93
|
+
Ec2.elb.create_load_balancer(availability_zones, elb_name, listeners)
|
94
|
+
end
|
95
|
+
load! # Repopulate known list with native ELB object
|
96
|
+
end
|
97
|
+
|
98
|
+
elb = recall(elb_name)
|
99
|
+
Ironfan.step(elb.name, "syncing elastic load balancer", :blue)
|
100
|
+
|
101
|
+
# Did the list of availability zones for this ELB change?
|
102
|
+
if availability_zones != elb.availability_zones.sort
|
103
|
+
Ironfan.step(elb.name, " updating availability zones to #{availability_zones.join(', ')}", :blue)
|
104
|
+
to_add = [ availability_zones - elb.availability_zones ]
|
105
|
+
to_remove = [ elb.availability_zones - availability_zones ]
|
106
|
+
elb.enable_availability_zones(to_add) unless to_add.empty?
|
107
|
+
elb.disable_availability_zones(to_remove) unless to_remove.empty?
|
108
|
+
end
|
109
|
+
|
110
|
+
# Did the health check configuration change?
|
111
|
+
if health_check != elb.health_check
|
112
|
+
Ironfan.step(elb.name, " updating health check", :blue)
|
113
|
+
elb.configure_health_check(health_check)
|
114
|
+
end
|
115
|
+
|
116
|
+
# Make sure SSL policy exists and is set on all SSL-enabled load balancer ports
|
117
|
+
Ironfan.step(elb.name, " syncing generated policy #{ssl_policy[:name]}", :blue)
|
118
|
+
Ec2.elb.create_load_balancer_policy(elb.name, ssl_policy[:name], 'SSLNegotiationPolicyType', ssl_policy[:attributes])
|
119
|
+
|
120
|
+
# Did the listener configuration change?
|
121
|
+
all_lb_ports = listeners.map { |l| l['LoadBalancerPort'] }.sort.uniq
|
122
|
+
remove_listeners = [ ]
|
123
|
+
elb.listeners.each do |el|
|
124
|
+
match = listeners.detect { |l|
|
125
|
+
l['Protocol'].eql?(el.protocol) &&
|
126
|
+
l['LoadBalancerPort'].eql?(el.lb_port) &&
|
127
|
+
l['InstanceProtocol'].eql?(el.instance_protocol) &&
|
128
|
+
l['InstancePort'].eql?(el.instance_port) &&
|
129
|
+
l['SSLCertificateId'].eql?(el.ssl_id)
|
130
|
+
}
|
131
|
+
if match
|
132
|
+
listeners.reject! { |l| l.eql? match }
|
133
|
+
else
|
134
|
+
remove_listeners << el.lb_port
|
135
|
+
end
|
136
|
+
end
|
137
|
+
|
138
|
+
reload = false
|
139
|
+
unless remove_listeners.empty?
|
140
|
+
Ironfan.step(elb.name, " removing listener from ports #{remove_listeners.join(', ')}", :blue)
|
141
|
+
Ec2.elb.delete_load_balancer_listeners(elb.name, remove_listeners)
|
142
|
+
reload = true
|
143
|
+
end
|
144
|
+
|
145
|
+
unless listeners.empty?
|
146
|
+
Ironfan.step(elb.name, " adding listeners on ports #{listeners.map { |l| l['LoadBalancerPort'] }.join(', ')}", :blue)
|
147
|
+
self.patiently(elb_name, Fog::AWS::IAM::NotFound, :message => "waiting for SSL certificate(s) to appear", :display => true) do
|
148
|
+
Ec2.elb.create_load_balancer_listeners(elb.name, listeners)
|
149
|
+
end
|
150
|
+
reload = true
|
151
|
+
end
|
152
|
+
|
153
|
+
if reload
|
154
|
+
forget elb.name
|
155
|
+
Ironfan.step(elb.name, " reloading from EC2", :blue)
|
156
|
+
load!
|
157
|
+
elb = recall elb.name
|
158
|
+
end
|
159
|
+
|
160
|
+
removed_policies = [ ]
|
161
|
+
elb.listeners.each do |l|
|
162
|
+
l.policy_names.reject { |p| p == ssl_policy[:name] }.each do |remove|
|
163
|
+
removed_policies << remove
|
164
|
+
Ironfan.step(elb.name, " removing unused policy #{remove} from port #{l.lb_port} listener", :blue)
|
165
|
+
end
|
166
|
+
if l.ssl_id and !l.policy_names.include?(ssl_policy[:name])
|
167
|
+
Ironfan.step(elb.name, " adding policy #{ssl_policy[:name]} to port #{l.lb_port} listener", :blue)
|
168
|
+
Ec2.elb.set_load_balancer_policies_of_listener(elb.name, l.lb_port, [ ssl_policy[:name] ])
|
169
|
+
end
|
170
|
+
end
|
171
|
+
|
172
|
+
removed_policies.each do |remove|
|
173
|
+
Ironfan.step(elb.name, " deleting now-unused policy #{remove}", :blue)
|
174
|
+
Ec2.elb.delete_load_balancer_policy(elb.name, remove)
|
175
|
+
end
|
176
|
+
|
177
|
+
# Did the list of instances change?
|
178
|
+
running_instances = computers.map { |c| c.machine.id }.sort
|
179
|
+
if running_instances != elb.instances.sort
|
180
|
+
Ironfan.step(elb.name, " updating instance list", :blue)
|
181
|
+
to_add = running_instances - elb.instances
|
182
|
+
unless to_add.empty?
|
183
|
+
Ironfan.step(elb.name, " adding instances #{to_add.join(', ')}", :blue)
|
184
|
+
elb.register_instances(to_add)
|
185
|
+
end
|
186
|
+
to_remove = elb.instances - running_instances
|
187
|
+
unless to_remove.empty?
|
188
|
+
Ironfan.step(elb.name, " removing instances #{to_remove.join(', ')}", :blue)
|
189
|
+
elb.deregister_instances(to_remove)
|
190
|
+
end
|
191
|
+
end
|
192
|
+
|
193
|
+
# Make sure that all of the relevant security groups allow access to the ELB
|
194
|
+
# on the health check and listener ports
|
195
|
+
elb_sg = elb.source_group
|
196
|
+
all_facet_sgs = computers.map { |c| "#{c.server.cluster_name}-#{c.server.facet_name}" }.uniq.map do |sg_name|
|
197
|
+
Ironfan::Provider::Ec2::SecurityGroup.recall sg_name
|
198
|
+
end
|
199
|
+
|
200
|
+
all_facet_sgs.map do |facet_sg|
|
201
|
+
self.patiently(facet_sg.name, Fog::Compute::AWS::Error, :ignore => Proc.new { |e| e.message =~ /InvalidPermission\.Duplicate/ }) do
|
202
|
+
facet_sg.authorize_port_range(1..65535, :group => { elb_sg['OwnerAlias'] => elb_sg['GroupName'] })
|
203
|
+
end
|
204
|
+
end
|
205
|
+
|
206
|
+
end
|
207
|
+
|
208
|
+
def self.full_name(computer, elb)
|
209
|
+
"ironfan-%s-%s" % [ computer.server.cluster_name, elb.name ]
|
210
|
+
end
|
211
|
+
|
212
|
+
def self.fog_elb_parameters(elb_name, computer)
|
213
|
+
elb_name = elb_name.sub("ironfan-#{computer.server.cluster_name}-", '')
|
214
|
+
cloud = computer.server.cloud(:ec2)
|
215
|
+
elb = cloud.elastic_load_balancers[elb_name]
|
216
|
+
|
217
|
+
# Health checking parameters
|
218
|
+
health_check = elb.health_check.to_fog
|
219
|
+
|
220
|
+
# Port/protocol listening configurations
|
221
|
+
cert_lookup = { }
|
222
|
+
cloud.iam_server_certificates.keys.each do |cert_key|
|
223
|
+
cert = cloud.iam_server_certificates[cert_key]
|
224
|
+
id = Ironfan::Provider::Ec2::IamServerCertificate.expected_id(computer, cert)
|
225
|
+
cert_lookup[cert_key] = Ironfan::Provider::Ec2::IamServerCertificate.recall(id)['Arn']
|
226
|
+
end
|
227
|
+
listeners = elb.listeners_to_fog(cert_lookup)
|
228
|
+
|
229
|
+
# The SSL policy, if any, for this ELB
|
230
|
+
ssl_policy = elb.ssl_policy_to_fog
|
231
|
+
|
232
|
+
# A list of parameters that can be used in Fog calls
|
233
|
+
[ health_check, listeners, ssl_policy ]
|
234
|
+
end
|
235
|
+
|
236
|
+
end
|
237
|
+
end
|
238
|
+
end
|
239
|
+
end
|
@@ -0,0 +1,101 @@
|
|
1
|
+
module Ironfan
|
2
|
+
class Provider
|
3
|
+
class Ec2
|
4
|
+
|
5
|
+
# Fog::AWS doesn't seem to have native models for IAM ServerCertificate
|
6
|
+
# using Hash semantics instead
|
7
|
+
class IamServerCertificate < Ironfan::Provider::Resource
|
8
|
+
delegate :[],:[]=, :to => :adaptee
|
9
|
+
|
10
|
+
ARN_PREFIX = "iamss_arn"
|
11
|
+
|
12
|
+
def self.shared?() true; end
|
13
|
+
def self.multiple?() true; end
|
14
|
+
def self.resource_type() :iam_server_certificate; end
|
15
|
+
def self.expected_ids(computer)
|
16
|
+
ec2 = computer.server.cloud(:ec2)
|
17
|
+
ec2.iam_server_certificates.values.map do |cert|
|
18
|
+
self.expected_id(computer, cert)
|
19
|
+
end
|
20
|
+
end
|
21
|
+
|
22
|
+
def name()
|
23
|
+
self['ServerCertificateName']
|
24
|
+
end
|
25
|
+
|
26
|
+
#
|
27
|
+
# Discovery
|
28
|
+
#
|
29
|
+
def self.load!(cluster=nil)
|
30
|
+
Ec2.iam.list_server_certificates.body['Certificates'].each do |cert|
|
31
|
+
iss = new(:adaptee => cert)
|
32
|
+
remember(iss, { :id => cert['ServerCertificateName'] })
|
33
|
+
remember(iss, { :id => "#{ARN_PREFIX}:#{cert['Arn']}" })
|
34
|
+
Chef::Log.debug("Loaded #{cert.inspect}")
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
def to_s
|
39
|
+
"<%-20s %-32s>" % [ self['ServerCertificateName'], self['Arn']]
|
40
|
+
end
|
41
|
+
|
42
|
+
#
|
43
|
+
# Manipulation
|
44
|
+
#
|
45
|
+
|
46
|
+
# Create any certificates that are needed by any clouds in which there are running computers
|
47
|
+
def self.aggregate!(computers)
|
48
|
+
ec2_computers = computers.select { |c| Ec2.applicable c }
|
49
|
+
return if ec2_computers.empty?
|
50
|
+
|
51
|
+
load! # Find out which certificates already exist in EC2
|
52
|
+
certs_for_running_servers = ec2_computers.select { |c| c.running? }.map { |c| self.expected_ids(c) }.flatten.uniq
|
53
|
+
certs_for_stopped_servers = ec2_computers.select { |c| not c.running? }.map { |c| self.expected_ids(c) }.flatten.uniq
|
54
|
+
certs_to_start = [ certs_for_running_servers ].flatten.compact.reject { |cert_name| recall? cert_name }
|
55
|
+
certs_to_stop = [ certs_for_stopped_servers - certs_for_running_servers ].flatten.compact.select { |cert_name| recall? cert_name }
|
56
|
+
|
57
|
+
certs_to_start.each do |cert_name|
|
58
|
+
if cert_name =~ /^#{ARN_PREFIX}:(.+)$/
|
59
|
+
error = "Cannot create an IAM server certificate with an explicit ARN #{$1}. Explicit ARNs can only be used to capture existing IAM server certificates created outside of Ironfan."
|
60
|
+
puts error and raise error
|
61
|
+
else
|
62
|
+
Ironfan.step(cert_name, "creating server certificate", :blue)
|
63
|
+
computer = ec2_computers.select { |c| self.expected_ids(c).include?(cert_name) }.values.first
|
64
|
+
use_name = cert_name.sub("ironfan-#{computer.server.cluster_name}-", '')
|
65
|
+
cert_prov = computer.server.cloud(:ec2).iam_server_certificates[use_name]
|
66
|
+
options = cert_prov.certificate_chain.nil? ? { } : { 'CertificateChain' => cert_prov.certificate_chain }
|
67
|
+
Ec2.iam.upload_server_certificate(cert_prov.certificate, cert_prov.private_key, cert_name, options)
|
68
|
+
end
|
69
|
+
end
|
70
|
+
|
71
|
+
certs_to_stop.each do |cert_name|
|
72
|
+
if cert_name !~ /^#{ARN_PREFIX}:(.+)$/
|
73
|
+
Ironfan.step(cert_name, "appears to be unused; you may want to remove it manually", :red)
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
load! # Get new list of native certificates via reload
|
78
|
+
end
|
79
|
+
|
80
|
+
def self.full_name(computer, cert)
|
81
|
+
"ironfan-%s-%s" % [ computer.server.cluster_name, cert.name ]
|
82
|
+
end
|
83
|
+
|
84
|
+
def self.expected_id(computer, cert)
|
85
|
+
n = self.full_name(computer, cert)
|
86
|
+
if cert.arn
|
87
|
+
Chef::Log.info("Using explicit IAMServerCertificate ARN #{cert.arn} instead of inferred name #{n}")
|
88
|
+
"#{ARN_PREFIX}:#{cert.arn}"
|
89
|
+
else
|
90
|
+
if n.length > 32
|
91
|
+
error = "Excessively long certificate name #{n}, must be <= 32 characters"
|
92
|
+
puts error and raise error
|
93
|
+
end
|
94
|
+
n
|
95
|
+
end
|
96
|
+
end
|
97
|
+
|
98
|
+
end
|
99
|
+
end
|
100
|
+
end
|
101
|
+
end
|