the-maestro 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.document +5 -0
- data/.gitignore +25 -0
- data/LICENSE +23 -0
- data/README.rdoc +378 -0
- data/Rakefile +116 -0
- data/VERSION +1 -0
- data/lib/maestro.rb +354 -0
- data/lib/maestro/cloud.rb +384 -0
- data/lib/maestro/cloud/aws.rb +1231 -0
- data/lib/maestro/dsl_property.rb +15 -0
- data/lib/maestro/log4r/console_formatter.rb +18 -0
- data/lib/maestro/log4r/file_formatter.rb +24 -0
- data/lib/maestro/node.rb +123 -0
- data/lib/maestro/operating_system.rb +53 -0
- data/lib/maestro/operating_system/cent_os.rb +23 -0
- data/lib/maestro/operating_system/debian.rb +40 -0
- data/lib/maestro/operating_system/fedora.rb +23 -0
- data/lib/maestro/operating_system/ubuntu.rb +100 -0
- data/lib/maestro/role.rb +36 -0
- data/lib/maestro/tasks.rb +52 -0
- data/lib/maestro/validator.rb +32 -0
- data/rails/init.rb +1 -0
- data/test/integration/base_aws.rb +156 -0
- data/test/integration/fixtures/config/maestro/cookbooks/emacs/metadata.json +41 -0
- data/test/integration/fixtures/config/maestro/cookbooks/emacs/metadata.rb +3 -0
- data/test/integration/fixtures/config/maestro/cookbooks/emacs/recipes/default.rb +21 -0
- data/test/integration/fixtures/config/maestro/roles/default.json +9 -0
- data/test/integration/fixtures/config/maestro/roles/web.json +9 -0
- data/test/integration/helper.rb +8 -0
- data/test/integration/test_aws_cloud.rb +805 -0
- data/test/integration/test_cent_os.rb +104 -0
- data/test/integration/test_debian.rb +119 -0
- data/test/integration/test_fedora.rb +104 -0
- data/test/integration/test_ubuntu.rb +149 -0
- data/test/unit/fixtures/invalid-clouds-not-a-directory/config/maestro/clouds +1 -0
- data/test/unit/fixtures/invalid-cookbooks-not-a-directory/config/maestro/cookbooks +0 -0
- data/test/unit/fixtures/invalid-maestro-not-a-directory/config/maestro +0 -0
- data/test/unit/fixtures/invalid-missing-cookbooks/config/maestro/clouds/valid.yml +21 -0
- data/test/unit/fixtures/invalid-missing-roles/config/maestro/clouds/valid.yml +21 -0
- data/test/unit/fixtures/invalid-roles-not-a-directory/config/maestro/roles +1 -0
- data/test/unit/fixtures/ssh/id_rsa-maestro-test-keypair +27 -0
- data/test/unit/helper.rb +6 -0
- data/test/unit/test_aws_cloud.rb +133 -0
- data/test/unit/test_aws_ec2_node.rb +76 -0
- data/test/unit/test_aws_elb_node.rb +221 -0
- data/test/unit/test_aws_rds_node.rb +380 -0
- data/test/unit/test_cent_os.rb +28 -0
- data/test/unit/test_cloud.rb +142 -0
- data/test/unit/test_debian.rb +62 -0
- data/test/unit/test_fedora.rb +28 -0
- data/test/unit/test_invalid_mode.rb +11 -0
- data/test/unit/test_maestro.rb +140 -0
- data/test/unit/test_node.rb +50 -0
- data/test/unit/test_operating_system.rb +19 -0
- data/test/unit/test_rails_mode.rb +77 -0
- data/test/unit/test_role.rb +59 -0
- data/test/unit/test_standalone_mode.rb +75 -0
- data/test/unit/test_ubuntu.rb +95 -0
- data/the-maestro.gemspec +150 -0
- metadata +228 -0
@@ -0,0 +1,1231 @@
|
|
1
|
+
require "AWS"
|
2
|
+
require "aws/s3"
|
3
|
+
require "maestro/role"
|
4
|
+
|
5
|
+
|
6
|
+
# disable "warning: peer certificate won't be verified in this SSL session" messages
|
7
|
+
class Net::HTTP
|
8
|
+
alias_method :old_initialize, :initialize
|
9
|
+
def initialize(*args)
|
10
|
+
old_initialize(*args)
|
11
|
+
@ssl_context = OpenSSL::SSL::SSLContext.new
|
12
|
+
@ssl_context.verify_mode = OpenSSL::SSL::VERIFY_NONE
|
13
|
+
end
|
14
|
+
end
|
15
|
+
|
16
|
+
|
17
|
+
module Maestro
|
18
|
+
module Cloud
|
19
|
+
# Amazon Web Services cloud
|
20
|
+
class Aws < Base
|
21
|
+
|
22
|
+
MAESTRO_NODE_PREFIX = "node."
|
23
|
+
MAESTRO_ROLE_PREFIX = "role."
|
24
|
+
MAESTRO_DEFAULT_ROLE = "default"
|
25
|
+
|
26
|
+
# Array of all ec2 security groups names in this Cloud
|
27
|
+
attr_reader :ec2_security_groups
|
28
|
+
# Array of ec2 security group names for the Roles in this Cloud
|
29
|
+
attr_reader :role_ec2_security_groups
|
30
|
+
# Array of ec2 security group names for the Maestro::Node::Aws::Ec2 Nodes in this Cloud
|
31
|
+
attr_reader :node_ec2_security_groups
|
32
|
+
# the default ec2 security group name for this Cloud
|
33
|
+
attr_reader :default_ec2_security_group
|
34
|
+
# Array of all rds db parameter group names in this Cloud
|
35
|
+
attr_reader :db_parameter_groups
|
36
|
+
# Array of all rds db security group names in this Cloud
|
37
|
+
attr_reader :db_security_groups
|
38
|
+
# Hash of Ec2 Nodes
|
39
|
+
attr_reader :ec2_nodes
|
40
|
+
# Hash of Elb Nodes
|
41
|
+
attr_reader :elb_nodes
|
42
|
+
# Hash of Rds Nodes
|
43
|
+
attr_reader :rds_nodes
|
44
|
+
dsl_property :aws_account_id, :aws_access_key, :aws_secret_access_key, :chef_bucket
|
45
|
+
|
46
|
+
def initialize(name, cfg_file=nil, &block)
|
47
|
+
@ec2_nodes = Hash.new
|
48
|
+
@elb_nodes = Hash.new
|
49
|
+
@rds_nodes = Hash.new
|
50
|
+
super(name, cfg_file, &block)
|
51
|
+
@ec2_security_groups = Array.new
|
52
|
+
@role_ec2_security_groups = Array.new
|
53
|
+
@node_ec2_security_groups = Array.new
|
54
|
+
@default_ec2_security_group = role_ec2_security_group_name(MAESTRO_DEFAULT_ROLE)
|
55
|
+
@role_ec2_security_groups << @default_ec2_security_group
|
56
|
+
@ec2_nodes.values.each {|ec2| ec2.set_default_security_group(@default_ec2_security_group)}
|
57
|
+
@db_parameter_groups = Array.new
|
58
|
+
@db_security_groups = Array.new
|
59
|
+
@rds_nodes.values.each do |rds|
|
60
|
+
@db_parameter_groups << rds.db_parameter_group_name
|
61
|
+
@db_security_groups << rds.db_security_group_name
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
# creates a Maestro::Node::Aws::Ec2 Node
|
66
|
+
def ec2_node(name, &block)
|
67
|
+
if @nodes.has_key?(name)
|
68
|
+
invalidate "Duplicate node definition: #{name}"
|
69
|
+
else
|
70
|
+
ec2 = Maestro::Node::Aws::Ec2.new(name, self, &block)
|
71
|
+
@nodes[name] = ec2
|
72
|
+
@ec2_nodes[name] = ec2
|
73
|
+
@configurable_nodes[name] = ec2
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
# creates a Maestro::Node::Aws::Elb Node
|
78
|
+
def elb_node(name, &block)
|
79
|
+
if @nodes.has_key?(name)
|
80
|
+
invalidate "Duplicate node definition: #{name}"
|
81
|
+
else
|
82
|
+
elb = Maestro::Node::Aws::Elb.new(name, self, &block)
|
83
|
+
@nodes[name] = elb
|
84
|
+
@elb_nodes[name] = elb
|
85
|
+
end
|
86
|
+
end
|
87
|
+
|
88
|
+
# creates a Maestro::Node::Aws::Rds Node
|
89
|
+
def rds_node(name, &block)
|
90
|
+
if @nodes.has_key?(name)
|
91
|
+
invalidate "Duplicate node definition: #{name}"
|
92
|
+
else
|
93
|
+
rds = Maestro::Node::Aws::Rds.new(name, self, &block)
|
94
|
+
@nodes[name] = rds
|
95
|
+
@rds_nodes[name] = rds
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
99
|
+
# establishes a connection to Amazon
|
100
|
+
def connect!
|
101
|
+
@ec2 = AWS::EC2::Base.new(:access_key_id => aws_access_key, :secret_access_key => aws_secret_access_key, :use_ssl => true)
|
102
|
+
@elb = AWS::ELB::Base.new(:access_key_id => aws_access_key, :secret_access_key => aws_secret_access_key, :use_ssl => true)
|
103
|
+
@rds = AWS::RDS::Base.new(:access_key_id => aws_access_key, :secret_access_key => aws_secret_access_key, :use_ssl => true)
|
104
|
+
s3_logger = Logger.new(STDOUT)
|
105
|
+
s3_logger.level = Logger::FATAL
|
106
|
+
AWS::S3::Base.establish_connection!(:access_key_id => aws_access_key, :secret_access_key => aws_secret_access_key, :use_ssl => true)
|
107
|
+
end
|
108
|
+
|
109
|
+
# Reports the current status of this Cloud
|
110
|
+
def status
|
111
|
+
connect!
|
112
|
+
super
|
113
|
+
end
|
114
|
+
|
115
|
+
# Starts this Cloud. Takes no action if the Cloud is already running as currently configured
|
116
|
+
def start
|
117
|
+
connect!
|
118
|
+
super
|
119
|
+
ensure_rds_security_groups if !@rds_nodes.empty?
|
120
|
+
ensure_rds_db_parameter_groups if !@rds_nodes.empty?
|
121
|
+
ensure_ec2_security_groups if !@ec2_nodes.empty?
|
122
|
+
ensure_rds_db_security_groups if !@rds_nodes.empty?
|
123
|
+
ensure_nodes_running
|
124
|
+
ensure_elastic_ips if !@ec2_nodes.empty?
|
125
|
+
ensure_ebs_volumes if !@ec2_nodes.empty?
|
126
|
+
end
|
127
|
+
|
128
|
+
# Configures the Nodes in this Cloud
|
129
|
+
def configure
|
130
|
+
connect!
|
131
|
+
get_configurable_node_hostnames
|
132
|
+
upload_chef_assets
|
133
|
+
super
|
134
|
+
end
|
135
|
+
|
136
|
+
# Updates this Cloud based on the current configuration
|
137
|
+
def update
|
138
|
+
connect!
|
139
|
+
super
|
140
|
+
# TODO:
|
141
|
+
# Need to account for @elb.enable_availability_zones_for_load_balancer
|
142
|
+
# in update if the availability zones of ec2 instances added/removed from
|
143
|
+
# the lb changes the zones. ADD TESTS FOR THIS WORKFLOW!
|
144
|
+
end
|
145
|
+
|
146
|
+
# Shuts down this Cloud. Takes no action if the Cloud is not running
|
147
|
+
def shutdown
|
148
|
+
connect!
|
149
|
+
super
|
150
|
+
ensure_nodes_terminated
|
151
|
+
end
|
152
|
+
|
153
|
+
# Reboots the given Rds Node
|
154
|
+
def reboot_rds_node(node_name)
|
155
|
+
to_be_watched = Array.new
|
156
|
+
node = @rds_nodes[node_name]
|
157
|
+
@logger.info "Rebooting Node #{node_name}..."
|
158
|
+
@rds.reboot_db_instance(:db_instance_identifier => node.db_instance_identifier)
|
159
|
+
to_be_watched << node_name
|
160
|
+
STDOUT.sync = true
|
161
|
+
@logger.progress "Waiting for Node #{node_name} to reboot. This may take several minutes..."
|
162
|
+
while !to_be_watched.empty?
|
163
|
+
instances = @rds.describe_db_instances
|
164
|
+
instance = find_rds_node_instance(node.db_instance_identifier, instances)
|
165
|
+
if !instance.nil? && instance.DBInstanceStatus.eql?("available")
|
166
|
+
@logger.info ""
|
167
|
+
@logger.info "Node #{node_name} rebooted"
|
168
|
+
to_be_watched.delete(node_name)
|
169
|
+
elsif !instance.nil? && instance.DBInstanceStatus.eql?("failed")
|
170
|
+
@logger.info ""
|
171
|
+
@logger.info "Node #{node_name} failed to reboot!"
|
172
|
+
to_be_watched.delete(node_name)
|
173
|
+
else
|
174
|
+
@logger.progress "."
|
175
|
+
end
|
176
|
+
sleep 5 if !to_be_watched.empty?
|
177
|
+
end
|
178
|
+
end
|
179
|
+
|
180
|
+
# Reports the current status of all Nodes in this Cloud
|
181
|
+
def node_statuses
|
182
|
+
elb_node_statuses if !@elb_nodes.empty?
|
183
|
+
ec2_node_statuses if !@ec2_nodes.empty?
|
184
|
+
rds_node_statuses if !@rds_nodes.empty?
|
185
|
+
end
|
186
|
+
|
187
|
+
# Reports the current status of all Rds Nodes in this Cloud
|
188
|
+
def rds_node_statuses
|
189
|
+
all_instances = @rds.describe_db_instances
|
190
|
+
@rds_nodes.each_pair do |node_name, node|
|
191
|
+
node_instance = find_rds_node_instance(node_name, all_instances)
|
192
|
+
if node_instance.nil?
|
193
|
+
@logger.info " #{node_name}: not running"
|
194
|
+
else
|
195
|
+
@logger.info " #{node_name}: #{node_instance.DBInstanceStatus} (host: #{node_instance.Endpoint.Address}, port: #{node_instance.Endpoint.Port})"
|
196
|
+
end
|
197
|
+
end
|
198
|
+
end
|
199
|
+
|
200
|
+
# Reports the current status of all Ec2 Nodes in this Cloud
|
201
|
+
def ec2_node_statuses
|
202
|
+
all_instances = @ec2.describe_instances
|
203
|
+
@ec2_nodes.each_pair do |node_name, node|
|
204
|
+
node_instance = find_ec2_node_instance(node_name, all_instances)
|
205
|
+
if node_instance.nil?
|
206
|
+
@logger.info " #{node_name}: not running"
|
207
|
+
else
|
208
|
+
@logger.info " #{node_name}: #{node_instance.instanceState.name} (instance #{node_instance.instanceId}, host: #{node_instance.dnsName})"
|
209
|
+
end
|
210
|
+
end
|
211
|
+
end
|
212
|
+
|
213
|
+
# Reports the current status of all Elb Nodes in this Cloud
|
214
|
+
def elb_node_statuses
|
215
|
+
all_balancers = @elb.describe_load_balancers
|
216
|
+
@elb_nodes.each_pair do |node_name, node|
|
217
|
+
node_balancer = find_elb_node_instance(node_name, all_balancers)
|
218
|
+
if node_balancer.nil?
|
219
|
+
@logger.info " #{node_name}: not running"
|
220
|
+
else
|
221
|
+
@logger.info " #{node_name}: running (host: #{node_balancer.DNSName})"
|
222
|
+
@logger.info " #{node_name} registered instances health:"
|
223
|
+
health = @elb.describe_instance_health(:load_balancer_name => node.load_balancer_name)
|
224
|
+
all_instances = @ec2.describe_instances
|
225
|
+
node.ec2_nodes.each do |ec2_node_name|
|
226
|
+
ec2_instance = find_ec2_node_instance(ec2_node_name, all_instances)
|
227
|
+
ec2_node = @ec2_nodes[ec2_node_name]
|
228
|
+
health_member = health.DescribeInstanceHealthResult.InstanceStates.member.select {|member| member if member.InstanceId.eql?(ec2_instance.instanceId)}
|
229
|
+
@logger.info " #{node_name.gsub(/./, ' ')} #{ec2_node_name}: #{health_member[0].State} (#{health_member[0].Description})"
|
230
|
+
end
|
231
|
+
end
|
232
|
+
end
|
233
|
+
end
|
234
|
+
|
235
|
+
# finds the db instance instance tagged as the given node_name, or nil if not found
|
236
|
+
def find_rds_node_instance(node_name, db_instances)
|
237
|
+
node_instance = nil
|
238
|
+
return node_instance if db_instances.nil? || db_instances.empty? || db_instances.DescribeDBInstancesResult.nil? || db_instances.DescribeDBInstancesResult.DBInstances.nil?
|
239
|
+
db_instance = db_instances.DescribeDBInstancesResult.DBInstances.DBInstance
|
240
|
+
if db_instance.is_a?(Array)
|
241
|
+
db_instance.each {|db| node_instance = db if (db.DBInstanceIdentifier.eql?(node_name) && !db.DBInstanceStatus.eql?("deleted"))}
|
242
|
+
elsif db_instance.is_a?(Hash)
|
243
|
+
node_instance = db_instance if (db_instance.DBInstanceIdentifier.eql?(node_name) && !db_instance.DBInstanceStatus.eql?("deleted"))
|
244
|
+
end
|
245
|
+
node_instance
|
246
|
+
end
|
247
|
+
|
248
|
+
# finds the non-terminated ec2 instance tagged as the given node_name, or nil if not found
|
249
|
+
def find_ec2_node_instance(node_name, instances)
|
250
|
+
node_instance = nil
|
251
|
+
return node_instance if instances.nil? || instances.empty? || instances.reservationSet.nil? || instances.reservationSet.item.nil? || instances.reservationSet.item.empty?
|
252
|
+
tag = @ec2_nodes[node_name].node_security_group
|
253
|
+
instances.reservationSet.item.each do |reservation|
|
254
|
+
if reservation.groupSet.item.any? {|group| group.groupId.eql?(tag)}
|
255
|
+
node_instance = reservation.instancesSet.item.detect {|instance| !instance.instanceState.name.eql?("terminated")}
|
256
|
+
end
|
257
|
+
end
|
258
|
+
node_instance
|
259
|
+
end
|
260
|
+
|
261
|
+
# finds the load balancer instance tagged as the given node_name, or nil if not found
|
262
|
+
def find_elb_node_instance(node_name, balancers)
|
263
|
+
node = @elb_nodes[node_name]
|
264
|
+
node_instance = nil
|
265
|
+
return node_instance if node.nil? || balancers.nil? || balancers.empty? || balancers.DescribeLoadBalancersResult.nil? || balancers.DescribeLoadBalancersResult.LoadBalancerDescriptions.nil?
|
266
|
+
balancers.DescribeLoadBalancersResult.LoadBalancerDescriptions.member.each do |desc|
|
267
|
+
if desc.LoadBalancerName.eql?(node.load_balancer_name)
|
268
|
+
node_instance = desc
|
269
|
+
end
|
270
|
+
end
|
271
|
+
node_instance
|
272
|
+
end
|
273
|
+
|
274
|
+
# ensures that the EC2 security groups of this cloud are created and configured
|
275
|
+
def ensure_ec2_security_groups
|
276
|
+
# the default security group applied to all nodes
|
277
|
+
ensure_ec2_security_group(@default_ec2_security_group)
|
278
|
+
ensure_ec2_security_group_name_configuration(@default_ec2_security_group, @default_ec2_security_group, aws_account_id)
|
279
|
+
ensure_ec2_security_group_cidr_configuration(@default_ec2_security_group, "22", "22", "tcp")
|
280
|
+
# set up node groups
|
281
|
+
@ec2_nodes.values.each do |node|
|
282
|
+
ensure_ec2_security_group(node.node_security_group)
|
283
|
+
@node_ec2_security_groups << node.node_security_group
|
284
|
+
end
|
285
|
+
# set up role groups
|
286
|
+
role_groups = Hash.new
|
287
|
+
@roles.keys.collect {|role_name| role_groups[role_name] = role_ec2_security_group_name(role_name)}
|
288
|
+
role_groups.values.each {|group| ensure_ec2_security_group(group)}
|
289
|
+
@role_ec2_security_groups = @role_ec2_security_groups + role_groups.values
|
290
|
+
@roles.each_pair do |name, role|
|
291
|
+
if !role.public_ports.nil? && !role.public_ports.empty?
|
292
|
+
role.public_ports.each {|port| ensure_ec2_security_group_cidr_configuration(role_ec2_security_group_name(name), port, port, "tcp")}
|
293
|
+
end
|
294
|
+
end
|
295
|
+
# collect all groups
|
296
|
+
@ec2_security_groups = @ec2_security_groups + @node_ec2_security_groups
|
297
|
+
@ec2_security_groups = @ec2_security_groups + @role_ec2_security_groups
|
298
|
+
end
|
299
|
+
|
300
|
+
# returns an ec2 security group name to tag an instance as being in a role, using the default naming convention
|
301
|
+
def role_ec2_security_group_name(role_name)
|
302
|
+
"#{@name}.#{MAESTRO_ROLE_PREFIX}#{role_name}"
|
303
|
+
end
|
304
|
+
|
305
|
+
# ensures that the nodes of this cloud are running
|
306
|
+
def ensure_nodes_running
|
307
|
+
ensure_rds_nodes if !@rds_nodes.empty?
|
308
|
+
ensure_ec2_nodes if !@ec2_nodes.empty?
|
309
|
+
ensure_elb_nodes if !@elb_nodes.empty?
|
310
|
+
end
|
311
|
+
|
312
|
+
# ensures that the Rds db parameter groups of this cloud are configured
|
313
|
+
def ensure_rds_db_parameter_groups
|
314
|
+
@rds_nodes.each_pair do |node_name, node|
|
315
|
+
if !node.db_parameter_group_name.nil?
|
316
|
+
begin
|
317
|
+
group = @rds.describe_db_parameter_groups(:db_parameter_group_name => node.db_parameter_group_name)
|
318
|
+
@logger.info "Node #{node.name}'s db parameter group already exists (#{node.db_parameter_group_name})"
|
319
|
+
rescue AWS::Error => aws_error
|
320
|
+
if aws_error.message.eql? "DBParameterGroup #{node.db_parameter_group_name} not found."
|
321
|
+
@rds.create_db_parameter_group(:db_parameter_group_name => node.db_parameter_group_name, :engine => node.engine, :description => "The #{node.cloud.name} Cloud's #{node.name} Node's DB Parameter group")
|
322
|
+
group = @rds.describe_db_parameter_groups(:db_parameter_group_name => node.db_parameter_group_name)
|
323
|
+
@logger.info "Created db parameter group for Node #{node.name} (#{node.db_parameter_group_name})"
|
324
|
+
else
|
325
|
+
@logger.error "ERROR! Unexpected error retrieving db parameter groups: #{aws_error.message}"
|
326
|
+
end
|
327
|
+
end
|
328
|
+
if !group.nil?
|
329
|
+
# must modify the db param group 20 at a time.
|
330
|
+
parameters = Array.new
|
331
|
+
node.db_parameters.each do |p|
|
332
|
+
parameters << {:name => p[:name], :value => p[:value], :apply_method => "pending-reboot"}
|
333
|
+
end
|
334
|
+
parameters.each_slice(20) do |slice|
|
335
|
+
begin
|
336
|
+
@rds.modify_db_parameter_group(:db_parameter_group_name => node.db_parameter_group_name, :parameters => slice)
|
337
|
+
rescue AWS::InvalidParameterValue => invalid_param
|
338
|
+
@logger.error "ERROR! #{invalid_param.message}"
|
339
|
+
end
|
340
|
+
end
|
341
|
+
@logger.info "Updated Node #{node.name}'s db parameter group (#{node.db_parameter_group_name}). Changes will be reflected when the Node is next rebooted."
|
342
|
+
end
|
343
|
+
end
|
344
|
+
end
|
345
|
+
end
|
346
|
+
|
347
|
+
# ensures that the Rds security groups of this cloud are configured
|
348
|
+
def ensure_rds_db_security_groups
|
349
|
+
@rds_nodes.each_pair do |node_name, node|
|
350
|
+
begin
|
351
|
+
group = @rds.describe_db_security_groups(:db_security_group_name => node.db_security_group_name)
|
352
|
+
@logger.info "Node #{node.name}'s db security group already exists (#{node.db_security_group_name})"
|
353
|
+
rescue AWS::Error => aws_error
|
354
|
+
if aws_error.message.eql? "DBSecurityGroup #{node.db_security_group_name} not found."
|
355
|
+
@rds.create_db_security_group(:db_security_group_name => node.db_security_group_name, :db_security_group_description => "The #{node.cloud.name} Cloud's #{node.name} Node's DB Security group")
|
356
|
+
group = @rds.describe_db_security_groups(:db_security_group_name => node.db_security_group_name)
|
357
|
+
@logger.info "Created db security group for Node #{node.name} (#{node.db_security_group_name})"
|
358
|
+
else
|
359
|
+
@logger.error "ERROR! Unexpected error retrieving db security groups: #{aws_error.message}"
|
360
|
+
end
|
361
|
+
end
|
362
|
+
if !group.nil? && !@ec2_nodes.empty?
|
363
|
+
if group.DescribeDBSecurityGroupsResult.DBSecurityGroups.DBSecurityGroup.EC2SecurityGroups.nil?
|
364
|
+
@rds.authorize_db_security_group(:db_security_group_name => node.db_security_group_name, :ec2_security_group_name => @default_ec2_security_group, :ec2_security_group_owner_id => aws_account_id)
|
365
|
+
@logger.info "Authorized network ingress from Nodes #{@ec2_nodes.keys.inspect} to Node #{node.name}"
|
366
|
+
else
|
367
|
+
@logger.info "Network ingress from Nodes #{@ec2_nodes.keys.inspect} to Node #{node.name} already authorized"
|
368
|
+
end
|
369
|
+
end
|
370
|
+
end
|
371
|
+
end
|
372
|
+
|
373
|
+
# ensures that the Rds nodes of this cloud are running
|
374
|
+
def ensure_rds_nodes
|
375
|
+
all_instances = @rds.describe_db_instances
|
376
|
+
to_be_started = Array.new
|
377
|
+
to_be_watched = Array.new
|
378
|
+
@rds_nodes.each_pair do |node_name, node|
|
379
|
+
node_instance = find_rds_node_instance(node.db_instance_identifier, all_instances)
|
380
|
+
if node_instance.nil?
|
381
|
+
@logger.info "Node #{node_name} not running. Starting..."
|
382
|
+
to_be_started << node_name
|
383
|
+
elsif node_instance.DBInstanceStatus.eql?("deleting")
|
384
|
+
@logger.info "Node #{node_name} deleting. Re-creating..."
|
385
|
+
to_be_started << node_name
|
386
|
+
elsif (node_instance.DBInstanceStatus.eql?("creating"))
|
387
|
+
@logger.info "Node #{node_name} starting up..."
|
388
|
+
to_be_watched << node_name
|
389
|
+
elsif (node_instance.DBInstanceStatus.eql?("rebooting"))
|
390
|
+
@logger.info "Node #{node_name} rebooting..."
|
391
|
+
to_be_watched << node_name
|
392
|
+
elsif (node_instance.DBInstanceStatus.eql?("modifying"))
|
393
|
+
@logger.info "Node #{node_name} being modified..."
|
394
|
+
to_be_watched << node_name
|
395
|
+
elsif (node_instance.DBInstanceStatus.eql?("resetting-mastercredentials"))
|
396
|
+
@logger.info "Node #{node_name} resetting master credentials..."
|
397
|
+
to_be_watched << node_name
|
398
|
+
elsif (node_instance.DBInstanceStatus.eql?("available"))
|
399
|
+
@logger.info "Node #{node_name} already running (host: #{node_instance.Endpoint.Address}, port: #{node_instance.Endpoint.Port})"
|
400
|
+
elsif (node_instance.DBInstanceStatus.eql?("backing-up"))
|
401
|
+
@logger.info "Node #{node_name} already running (host: #{node_instance.Endpoint.Address}, port: #{node_instance.Endpoint.Port})"
|
402
|
+
elsif (node_instance.DBInstanceStatus.eql?("failed"))
|
403
|
+
@logger.info "Node #{node_name} in a failed state (host: #{node_instance.Endpoint.Address}, port: #{node_instance.Endpoint.Port})"
|
404
|
+
elsif (node_instance.DBInstanceStatus.eql?("storage-full"))
|
405
|
+
@logger.info "Node #{node_name} in a failed state due to storage full (host: #{node_instance.Endpoint.Address}, port: #{node_instance.Endpoint.Port})"
|
406
|
+
end
|
407
|
+
end
|
408
|
+
to_be_started.each do |node_name|
|
409
|
+
node = @nodes[node_name]
|
410
|
+
result = @rds.create_db_instance(:db_instance_identifier => node.db_instance_identifier, :allocated_storage => node.allocated_storage, :db_instance_class => node.db_instance_class, :engine => node.engine, :master_username => node.master_username, :master_user_password => node.master_user_password, :port => node.port, :availability_zone => node.availability_zone, :preferred_maintenance_window => node.preferred_maintenance_window, :backup_retention_period => node.backup_retention_period, :preferred_backup_window => node.preferred_backup_window)
|
411
|
+
to_be_watched << node_name
|
412
|
+
end
|
413
|
+
STDOUT.sync = true
|
414
|
+
@logger.progress "Waiting for Nodes #{to_be_watched.inspect} to start. This may take several minutes..." if !to_be_watched.empty?
|
415
|
+
while !to_be_watched.empty?
|
416
|
+
instances = @rds.describe_db_instances
|
417
|
+
to_be_watched.each do |node_name|
|
418
|
+
node = @nodes[node_name]
|
419
|
+
instance = find_rds_node_instance(node.db_instance_identifier, instances)
|
420
|
+
if !instance.nil? && instance.DBInstanceStatus.eql?("available")
|
421
|
+
@logger.progress "\n"
|
422
|
+
@logger.info "Node #{node_name} started (host: #{instance.Endpoint.Address}, port: #{instance.Endpoint.Port})"
|
423
|
+
to_be_watched.delete(node_name)
|
424
|
+
@logger.progress "Waiting for Nodes #{to_be_watched.inspect} to start. This may take several minutes..." if !to_be_watched.empty?
|
425
|
+
elsif !instance.nil? && instance.DBInstanceStatus.eql?("failed")
|
426
|
+
@logger.progress "\n"
|
427
|
+
@logger.info "Node #{node_name} failed to start!"
|
428
|
+
to_be_watched.delete(node_name)
|
429
|
+
@logger.progress "Waiting for Nodes #{to_be_watched.inspect} to start. This may take several minutes..." if !to_be_watched.empty?
|
430
|
+
else
|
431
|
+
@logger.progress "."
|
432
|
+
end
|
433
|
+
end
|
434
|
+
sleep 5 if !to_be_watched.empty?
|
435
|
+
end
|
436
|
+
end
|
437
|
+
|
438
|
+
# ensures that the Ec2 nodes of this cloud are running
|
439
|
+
def ensure_ec2_nodes
|
440
|
+
all_instances = @ec2.describe_instances()
|
441
|
+
to_be_started = Array.new
|
442
|
+
to_be_watched = Array.new
|
443
|
+
@ec2_nodes.keys.each do |node_name|
|
444
|
+
node_instance = find_ec2_node_instance(node_name, all_instances)
|
445
|
+
if node_instance.nil?
|
446
|
+
@logger.info "Node #{node_name} not running. Starting..."
|
447
|
+
to_be_started << node_name
|
448
|
+
elsif node_instance.instanceState.name.eql?("shutting-down")
|
449
|
+
@logger.info "Node #{node_name} shutting down. Re-starting..."
|
450
|
+
elsif node_instance.instanceState.name.eql?("pending")
|
451
|
+
@logger.info "Node #{node_name} starting up..."
|
452
|
+
to_be_watched << node_name
|
453
|
+
else
|
454
|
+
@logger.info "Node #{node_name} already running (instance #{node_instance.instanceId}, host: #{node_instance.dnsName})"
|
455
|
+
end
|
456
|
+
end
|
457
|
+
to_be_started.each do |node_name|
|
458
|
+
node = @nodes[node_name]
|
459
|
+
@ec2.run_instances(:image_id => node.ami, :min_count => 1, :max_count => 1, :key_name => keypair_name, :instance_type => node.instance_type, :availability_zone => node.availability_zone, :security_group => node.security_groups)
|
460
|
+
to_be_watched << node_name
|
461
|
+
end
|
462
|
+
STDOUT.sync = true
|
463
|
+
@logger.progress "Waiting for Nodes #{to_be_watched.inspect} to start..." if !to_be_watched.empty?
|
464
|
+
while !to_be_watched.empty?
|
465
|
+
instances = @ec2.describe_instances()
|
466
|
+
to_be_watched.each do |node_name|
|
467
|
+
instance = find_ec2_node_instance(node_name, instances)
|
468
|
+
if !instance.nil? && instance.instanceState.name.eql?("running")
|
469
|
+
@logger.progress "\n"
|
470
|
+
@logger.info "Node #{node_name} started (instance #{instance.instanceId}, host: #{instance.dnsName})"
|
471
|
+
to_be_watched.delete(node_name)
|
472
|
+
@logger.progress "Waiting for Nodes #{to_be_watched.inspect} to start..." if !to_be_watched.empty?
|
473
|
+
else
|
474
|
+
@logger.progress "."
|
475
|
+
end
|
476
|
+
end
|
477
|
+
sleep 5 if !to_be_watched.empty?
|
478
|
+
end
|
479
|
+
end
|
480
|
+
|
481
|
+
# ensures that the Elb nodes of this cloud are running
|
482
|
+
def ensure_elb_nodes
|
483
|
+
all_balancers = @elb.describe_load_balancers()
|
484
|
+
to_be_started = Array.new
|
485
|
+
@elb_nodes.keys.each do |node_name|
|
486
|
+
node_instance = find_elb_node_instance(node_name, all_balancers)
|
487
|
+
if node_instance.nil?
|
488
|
+
@logger.info "Node #{node_name} not running. Starting..."
|
489
|
+
to_be_started << node_name
|
490
|
+
else
|
491
|
+
@logger.info "Node #{node_name} already running (host: #{node_instance.DNSName})"
|
492
|
+
end
|
493
|
+
end
|
494
|
+
to_be_started.each do |node_name|
|
495
|
+
node = @nodes[node_name]
|
496
|
+
# TODO: What to do about availability zones tied to this elb's instances, but not specified here? Validation error? Leave it to the user?
|
497
|
+
elb = @elb.create_load_balancer(:load_balancer_name => node.load_balancer_name, :availability_zones => node.availability_zones, :listeners => node.listeners)
|
498
|
+
node.hostname = elb.CreateLoadBalancerResult.DNSName
|
499
|
+
@logger.info "Node #{node_name} started (host: #{node.hostname})"
|
500
|
+
if !node.health_check.nil?
|
501
|
+
@elb.configure_health_check({:health_check => node.health_check,
|
502
|
+
:load_balancer_name => node.load_balancer_name})
|
503
|
+
end
|
504
|
+
if !node.ec2_nodes.nil?
|
505
|
+
instance_ids = Array.new
|
506
|
+
all_instances = @ec2.describe_instances()
|
507
|
+
node.ec2_nodes.each do |ec2_node_name|
|
508
|
+
instance = find_ec2_node_instance(ec2_node_name, all_instances)
|
509
|
+
if instance.nil?
|
510
|
+
@logger.error "ERROR: Ec2 node '#{ec2_node_name}' is not running to map to Elb node '#{node.name}'"
|
511
|
+
else
|
512
|
+
instance_ids << instance.instanceId
|
513
|
+
end
|
514
|
+
end
|
515
|
+
instance_ids.sort!
|
516
|
+
begin
|
517
|
+
response = @elb.register_instances_with_load_balancer(:load_balancer_name => node.load_balancer_name, :instances => instance_ids)
|
518
|
+
if !response.RegisterInstancesWithLoadBalancerResult.nil? && !response.RegisterInstancesWithLoadBalancerResult.Instances.nil?
|
519
|
+
registered_instances = Array.new
|
520
|
+
response.RegisterInstancesWithLoadBalancerResult.Instances.member.each do |member|
|
521
|
+
registered_instances << member.InstanceId
|
522
|
+
end
|
523
|
+
registered_instances.sort!
|
524
|
+
if instance_ids.eql?(registered_instances)
|
525
|
+
@logger.info "Registered Ec2 Nodes #{node.ec2_nodes.inspect} with Elb Node #{node_name}"
|
526
|
+
else
|
527
|
+
@logger.error "ERROR: Could not register all Ec2 Nodes #{node.ec2_nodes.inspect} with Elb Node #{node_name}. The following instances are registered: #{registered_instances}"
|
528
|
+
end
|
529
|
+
else
|
530
|
+
@logger.error "ERROR: Could not register Ec2 Nodes #{node.ec2_nodes.inspect} with Elb Node #{node_name}"
|
531
|
+
end
|
532
|
+
rescue AWS::Error => aws_error
|
533
|
+
@logger.error "ERROR: Could not register Ec2 Nodes #{node.ec2_nodes.inspect} with Elb Node #{node_name}: #{aws_error.message}"
|
534
|
+
end
|
535
|
+
end
|
536
|
+
end
|
537
|
+
end
|
538
|
+
|
539
|
+
# predicate indicating if the given Elastic IP address is allocated to this Cloud's AWS account
|
540
|
+
def elastic_ip_allocated?(elastic_ip)
|
541
|
+
begin
|
542
|
+
ip = @ec2.describe_addresses(:public_ip => [elastic_ip])
|
543
|
+
return true if !ip.nil?
|
544
|
+
rescue AWS::Error => aws_error
|
545
|
+
return false if aws_error.message.eql? "Address '#{elastic_ip}' not found."
|
546
|
+
end
|
547
|
+
return false
|
548
|
+
end
|
549
|
+
|
550
|
+
# returns the instance_id which the given Elastic IP is associated with, or nil if it is not associated
|
551
|
+
def elastic_ip_association(elastic_ip)
|
552
|
+
begin
|
553
|
+
ip = @ec2.describe_addresses(:public_ip => [elastic_ip])
|
554
|
+
return ip.addressesSet.item[0].instanceId if !ip.nil? && !ip.addressesSet.nil?
|
555
|
+
rescue AWS::Error => aws_error
|
556
|
+
return nil if aws_error.message.eql? "Address '#{elastic_ip}' not found."
|
557
|
+
end
|
558
|
+
return nil
|
559
|
+
end
|
560
|
+
|
561
|
+
# ensures that all configured Elastic IPs have been associated to the given nodes
|
562
|
+
def ensure_elastic_ips
|
563
|
+
all_instances = @ec2.describe_instances()
|
564
|
+
@ec2_nodes.each_pair do |node_name, node|
|
565
|
+
node_instance = find_ec2_node_instance(node_name, all_instances)
|
566
|
+
if !node.elastic_ip.nil?
|
567
|
+
if node_instance.nil?
|
568
|
+
@logger.error "ERROR: Node #{node_name} doesn't appear to be running to associate with Elastic IP #{node.elastic_ip}"
|
569
|
+
else
|
570
|
+
if elastic_ip_allocated?(node.elastic_ip)
|
571
|
+
associated_instance_id = elastic_ip_association(node.elastic_ip)
|
572
|
+
if associated_instance_id.eql?(node_instance.instanceId)
|
573
|
+
@logger.info "Elastic IP Address #{node.elastic_ip} is already associated with Node #{node_name}"
|
574
|
+
else
|
575
|
+
if associated_instance_id.nil?
|
576
|
+
@ec2.associate_address(:instance_id => node_instance.instanceId, :public_ip => node.elastic_ip)
|
577
|
+
@logger.info "Associated Elastic IP Address #{node.elastic_ip} with Node #{node_name}"
|
578
|
+
else
|
579
|
+
@logger.info "Elastic IP Address #{node.elastic_ip} is associated with the wrong instance (instance #{associated_instance_id}). Disassociating."
|
580
|
+
@ec2.disassociate_address(:public_ip => node.elastic_ip)
|
581
|
+
@ec2.associate_address(:instance_id => node_instance.instanceId, :public_ip => node.elastic_ip)
|
582
|
+
@logger.info "Associated Elastic IP Address #{node.elastic_ip} with Node #{node_name}"
|
583
|
+
end
|
584
|
+
end
|
585
|
+
else
|
586
|
+
@logger.error "ERROR: Elastic IP Address #{node.elastic_ip} is not allocated to this AWS Account"
|
587
|
+
end
|
588
|
+
end
|
589
|
+
end
|
590
|
+
end
|
591
|
+
end
|
592
|
+
|
593
|
+
# predicate indicating if the given EBS volume is allocated to this Cloud's AWS account
|
594
|
+
def ebs_volume_allocated?(volume_id)
|
595
|
+
begin
|
596
|
+
volume = @ec2.describe_volumes(:volume_id => [volume_id])
|
597
|
+
return true if !volume.nil?
|
598
|
+
rescue AWS::Error => aws_error
|
599
|
+
return false if aws_error.message.eql? "The volume '#{volume_id}' does not exist."
|
600
|
+
end
|
601
|
+
return false
|
602
|
+
end
|
603
|
+
|
604
|
+
# returns the instance_id which the given EBS volume is associated with, or nil if it is not associated
|
605
|
+
def ebs_volume_association(volume_id)
|
606
|
+
begin
|
607
|
+
volume = @ec2.describe_volumes(:volume_id => [volume_id])
|
608
|
+
if !volume.nil? && !volume.volumeSet.nil? && !volume.volumeSet.item.nil? && !volume.volumeSet.item[0].attachmentSet.nil?
|
609
|
+
return volume.volumeSet.item[0].attachmentSet.item[0].instanceId
|
610
|
+
end
|
611
|
+
rescue AWS::Error => aws_error
|
612
|
+
return nil if aws_error.message.eql? "The volume '#{volume_id}' does not exist."
|
613
|
+
end
|
614
|
+
return nil
|
615
|
+
end
|
616
|
+
|
617
|
+
# ensures that all configured EBS volumes have been associated to the given nodes
|
618
|
+
def ensure_ebs_volumes
|
619
|
+
all_instances = @ec2.describe_instances()
|
620
|
+
@ec2_nodes.each_pair do |node_name, node|
|
621
|
+
node_instance = find_ec2_node_instance(node_name, all_instances)
|
622
|
+
if !node.ebs_volume_id.nil? && !node.ebs_device.nil?
|
623
|
+
if node_instance.nil?
|
624
|
+
@logger.error "ERROR: Node #{node_name} doesn't appear to be running to attach EBS Volume #{node.ebs_volume_id}"
|
625
|
+
else
|
626
|
+
if ebs_volume_allocated?(node.ebs_volume_id)
|
627
|
+
associated_instance_id = ebs_volume_association(node.ebs_volume_id)
|
628
|
+
if associated_instance_id.eql?(node_instance.instanceId)
|
629
|
+
@logger.info "EBS Volume #{node.ebs_volume_id} is already attached to Node #{node_name}"
|
630
|
+
else
|
631
|
+
begin
|
632
|
+
STDOUT.sync = true
|
633
|
+
if associated_instance_id.nil?
|
634
|
+
@logger.progress "Attaching EBS Volume #{node.ebs_volume_id} to Node #{node_name}..."
|
635
|
+
@ec2.attach_volume(:instance_id => node_instance.instanceId, :volume_id => node.ebs_volume_id, :device => node.ebs_device)
|
636
|
+
to_be_watched = [node.ebs_volume_id]
|
637
|
+
while !to_be_watched.empty?
|
638
|
+
volumes = @ec2.describe_volumes(:volume_id => to_be_watched[0])
|
639
|
+
if !volumes.volumeSet.item[0].attachmentSet.nil? && volumes.volumeSet.item[0].attachmentSet.item[0].status.eql?("attached")
|
640
|
+
to_be_watched.clear
|
641
|
+
else
|
642
|
+
@logger.progress "."
|
643
|
+
end
|
644
|
+
sleep 5 if !to_be_watched.empty?
|
645
|
+
end
|
646
|
+
@logger.info "done."
|
647
|
+
else
|
648
|
+
@logger.progress "EBS Volume #{node.ebs_volume_id} is attached to the wrong instance (instance #{associated_instance_id}). Detaching..."
|
649
|
+
@ec2.detach_volume(:volume_id => node.ebs_volume_id)
|
650
|
+
to_be_watched = [node.ebs_volume_id]
|
651
|
+
while !to_be_watched.empty?
|
652
|
+
volumes = @ec2.describe_volumes(:volume_id => to_be_watched[0])
|
653
|
+
if volumes.volumeSet.item[0].status.eql? "available"
|
654
|
+
to_be_watched.clear
|
655
|
+
else
|
656
|
+
@logger.progress "."
|
657
|
+
end
|
658
|
+
sleep 5 if !to_be_watched.empty?
|
659
|
+
end
|
660
|
+
@logger.info "done."
|
661
|
+
@logger.progress "Attaching EBS Volume #{node.ebs_volume_id} to Node #{node_name}..."
|
662
|
+
@ec2.attach_volume(:instance_id => node_instance.instanceId, :volume_id => node.ebs_volume_id, :device => node.ebs_device)
|
663
|
+
to_be_watched = [node.ebs_volume_id]
|
664
|
+
while !to_be_watched.empty?
|
665
|
+
volumes = @ec2.describe_volumes(:volume_id => to_be_watched[0])
|
666
|
+
if !volumes.volumeSet.item[0].attachmentSet.nil? && volumes.volumeSet.item[0].attachmentSet.item[0].status.eql?("attached")
|
667
|
+
to_be_watched.clear
|
668
|
+
else
|
669
|
+
@logger.progress "."
|
670
|
+
end
|
671
|
+
sleep 5 if !to_be_watched.empty?
|
672
|
+
end
|
673
|
+
@logger.info "done."
|
674
|
+
end
|
675
|
+
rescue AWS::Error => aws_error
|
676
|
+
@logger.error "Error attaching EBS Volume #{node.ebs_volume_id} to Node #{node_name}: #{aws_error.inspect}"
|
677
|
+
end
|
678
|
+
end
|
679
|
+
else
|
680
|
+
@logger.error "ERROR: EBS Volume #{node.ebs_volume_id} is not allocated to this AWS Account"
|
681
|
+
end
|
682
|
+
end
|
683
|
+
end
|
684
|
+
end
|
685
|
+
end
|
686
|
+
|
687
|
+
# ensures the project's Chef cookbooks and roles are deployed to the configured S3 Bucket
|
688
|
+
def upload_chef_assets
|
689
|
+
bucket = AWS::S3::Bucket.find(chef_bucket)
|
690
|
+
if bucket.nil?
|
691
|
+
@logger.info "Creating S3 Bucket '#{chef_bucket}'..."
|
692
|
+
bucket = AWS::S3::Bucket.create(chef_bucket, :access => :private)
|
693
|
+
@logger.info "Created S3 Bucket '#{chef_bucket}'" if !bucket.nil?
|
694
|
+
end
|
695
|
+
|
696
|
+
@logger.info "Packaging Chef assets..."
|
697
|
+
chef_tgz = Maestro.chef_archive
|
698
|
+
@logger.info "Uploading Chef assets to S3 bucket '#{chef_bucket}'..."
|
699
|
+
AWS::S3::S3Object.store(MAESTRO_CHEF_ARCHIVE, File.open(chef_tgz, "r"), chef_bucket, :access => :private)
|
700
|
+
@logger.info "Chef assets uploaded to S3 Bucket '#{chef_bucket}' as key '#{MAESTRO_CHEF_ARCHIVE}'"
|
701
|
+
|
702
|
+
@logger.info "Uploading Node JSON files to S3 Bucket '#{chef_bucket}'..." if !@configurable_nodes.empty?
|
703
|
+
@configurable_nodes.each_pair do |node_name, node|
|
704
|
+
AWS::S3::S3Object.store(node.json_filename, node.json, chef_bucket, :access => :private)
|
705
|
+
@logger.info "Node #{node.name} JSON file uploaded to S3 Bucket '#{chef_bucket}' as key '#{node.json_filename}'"
|
706
|
+
end
|
707
|
+
end
|
708
|
+
|
709
|
+
# Returns the URL to the Chef assets tar ball
|
710
|
+
def chef_assets_url
|
711
|
+
AWS::S3::S3Object.url_for(MAESTRO_CHEF_ARCHIVE, chef_bucket, :expires_in => 600, :use_ssl => true)
|
712
|
+
end
|
713
|
+
|
714
|
+
# Returns the URL for the given node's Chef JSON file
|
715
|
+
def node_json_url(node)
|
716
|
+
AWS::S3::S3Object.url_for(node.json_filename, chef_bucket, :expires_in => 600, :use_ssl => true)
|
717
|
+
end
|
718
|
+
|
719
|
+
# Collects the current hostnames of all running Configurable Nodes
|
720
|
+
def get_configurable_node_hostnames
|
721
|
+
all_instances = @ec2.describe_instances()
|
722
|
+
@ec2_nodes.each_pair do |node_name, node|
|
723
|
+
node_instance = find_ec2_node_instance(node_name, all_instances)
|
724
|
+
if node_instance.nil?
|
725
|
+
@logger.error "ERROR: node #{node_name} not running!"
|
726
|
+
else
|
727
|
+
node.hostname = node_instance.dnsName
|
728
|
+
end
|
729
|
+
end
|
730
|
+
end
|
731
|
+
|
732
|
+
# Ensures that the Nodes of this Cloud are terminated
|
733
|
+
def ensure_nodes_terminated
|
734
|
+
ensure_elb_nodes_terminated
|
735
|
+
ensure_ec2_nodes_terminated
|
736
|
+
ensure_rds_nodes_terminated
|
737
|
+
end
|
738
|
+
|
739
|
+
# Ensures that the Ec2 Nodes of this Cloud are terminated
|
740
|
+
def ensure_ec2_nodes_terminated
|
741
|
+
all_instances = @ec2.describe_instances()
|
742
|
+
to_be_terminated = Array.new
|
743
|
+
to_be_watched = Array.new
|
744
|
+
@ec2_nodes.each_pair do |node_name, node|
|
745
|
+
node_instance = find_ec2_node_instance(node_name, all_instances)
|
746
|
+
if node_instance.nil?
|
747
|
+
@logger.info "Node #{node_name} already terminated"
|
748
|
+
elsif node_instance.instanceState.name.eql?("shutting-down")
|
749
|
+
@logger.info "Node #{node_name} terminating..."
|
750
|
+
to_be_watched << node_name
|
751
|
+
elsif node_instance.instanceState.name.eql?("pending") || node_instance.instanceState.name.eql?("running")
|
752
|
+
@logger.info "Node #{node_name} running. Terminating..."
|
753
|
+
to_be_terminated << node_instance.instanceId
|
754
|
+
to_be_watched << node_name
|
755
|
+
end
|
756
|
+
end
|
757
|
+
if !to_be_terminated.empty?
|
758
|
+
@ec2.terminate_instances(:instance_id => to_be_terminated)
|
759
|
+
@logger.progress "Waiting for Nodes #{to_be_watched.inspect} to terminate..." if !to_be_watched.empty?
|
760
|
+
end
|
761
|
+
STDOUT.sync = true
|
762
|
+
while !to_be_watched.empty?
|
763
|
+
instances = @ec2.describe_instances()
|
764
|
+
to_be_watched.each do |node_name|
|
765
|
+
instance = find_ec2_node_instance(node_name, instances)
|
766
|
+
if instance.nil?
|
767
|
+
@logger.progress "\n"
|
768
|
+
@logger.info "Node #{node_name} terminated"
|
769
|
+
to_be_watched.delete(node_name)
|
770
|
+
@logger.progress "Waiting for Nodes #{to_be_watched.inspect} to terminate..." if !to_be_watched.empty?
|
771
|
+
else
|
772
|
+
@logger.progress "."
|
773
|
+
end
|
774
|
+
end
|
775
|
+
sleep 5 if !to_be_watched.empty?
|
776
|
+
end
|
777
|
+
end
|
778
|
+
|
779
|
+
# Ensures that the Elb Nodes of this Cloud are terminated
|
780
|
+
def ensure_elb_nodes_terminated
|
781
|
+
balancers = @elb.describe_load_balancers
|
782
|
+
to_be_deleted = Hash.new
|
783
|
+
@elb_nodes.each_pair do |node_name, node|
|
784
|
+
instance = find_elb_node_instance(node_name, balancers)
|
785
|
+
if !instance.nil?
|
786
|
+
@logger.info "Node #{node_name} terminating..."
|
787
|
+
to_be_deleted[node_name] = node.load_balancer_name
|
788
|
+
else
|
789
|
+
@logger.info "Node #{node_name} already terminated"
|
790
|
+
end
|
791
|
+
end
|
792
|
+
if !to_be_deleted.empty?
|
793
|
+
to_be_deleted.each_pair do |node_name, load_balancer_name|
|
794
|
+
@elb.delete_load_balancer(:load_balancer_name => load_balancer_name)
|
795
|
+
@logger.info "Node #{node_name} terminated"
|
796
|
+
end
|
797
|
+
end
|
798
|
+
end
|
799
|
+
|
800
|
+
# Ensures that the Rds Nodes of this Cloud are terminated
|
801
|
+
def ensure_rds_nodes_terminated
|
802
|
+
all_instances = @rds.describe_db_instances
|
803
|
+
wait_for = Hash.new
|
804
|
+
to_be_terminated = Array.new
|
805
|
+
to_be_watched = Array.new
|
806
|
+
@rds_nodes.each_pair do |node_name, node|
|
807
|
+
node_instance = find_rds_node_instance(node.db_instance_identifier, all_instances)
|
808
|
+
if node_instance.nil?
|
809
|
+
@logger.info "Node #{node_name} already terminated"
|
810
|
+
elsif node_instance.DBInstanceStatus.eql?("deleting")
|
811
|
+
@logger.info "Node #{node_name} terminating..."
|
812
|
+
to_be_watched << node_name
|
813
|
+
elsif (node_instance.DBInstanceStatus.eql?("creating") ||
|
814
|
+
node_instance.DBInstanceStatus.eql?("rebooting") ||
|
815
|
+
node_instance.DBInstanceStatus.eql?("modifying") ||
|
816
|
+
node_instance.DBInstanceStatus.eql?("resetting-mastercredentials") ||
|
817
|
+
node_instance.DBInstanceStatus.eql?("backing-up"))
|
818
|
+
@logger.info "Waiting for Node #{node_name} to finish #{node_instance.DBInstanceStatus} before terminating..."
|
819
|
+
wait_for[node_name] = node_instance.DBInstanceStatus
|
820
|
+
elsif (node_instance.DBInstanceStatus.eql?("available") ||
|
821
|
+
node_instance.DBInstanceStatus.eql?("failed") ||
|
822
|
+
node_instance.DBInstanceStatus.eql?("storage-full"))
|
823
|
+
@logger.info "Node #{node_name} running. Terminating..."
|
824
|
+
to_be_terminated << node_name
|
825
|
+
end
|
826
|
+
end
|
827
|
+
|
828
|
+
@logger.progress "Waiting for Nodes #{wait_for.keys.inspect}..." if !wait_for.empty?
|
829
|
+
while !wait_for.empty?
|
830
|
+
instances = @rds.describe_db_instances
|
831
|
+
wait_for.each_pair do |node_name, status|
|
832
|
+
node = @nodes[node_name]
|
833
|
+
node_instance = find_rds_node_instance(node.db_instance_identifier, instances)
|
834
|
+
if (node_instance.DBInstanceStatus.eql?("available") ||
|
835
|
+
node_instance.DBInstanceStatus.eql?("failed") ||
|
836
|
+
node_instance.DBInstanceStatus.eql?("storage-full"))
|
837
|
+
@logger.progress "\n"
|
838
|
+
@logger.info "Node #{node_name} done #{status}. Terminating..."
|
839
|
+
wait_for.delete(node_name)
|
840
|
+
to_be_terminated << node_name
|
841
|
+
@logger.progress "Waiting for Nodes #{wait_for.keys.inspect}..." if !wait_for.empty?
|
842
|
+
else
|
843
|
+
@logger.progress "."
|
844
|
+
end
|
845
|
+
end
|
846
|
+
sleep 5 if !wait_for.empty?
|
847
|
+
end
|
848
|
+
|
849
|
+
to_be_terminated.each do |node_name|
|
850
|
+
node = @nodes[node_name]
|
851
|
+
now = DateTime.now
|
852
|
+
final_snapshot = node.db_instance_identifier + "-" + now.to_s.gsub(/:/, '')
|
853
|
+
@logger.info "Terminating Node #{node_name} with final snapshot id '#{final_snapshot}' ..."
|
854
|
+
result = @rds.delete_db_instance(:db_instance_identifier => node.db_instance_identifier, :final_db_snapshot_identifier => final_snapshot)
|
855
|
+
to_be_watched << node_name
|
856
|
+
end
|
857
|
+
STDOUT.sync = true
|
858
|
+
@logger.progress "Waiting for Nodes #{to_be_watched.inspect} to terminate. This may take several minutes..." if !to_be_watched.empty?
|
859
|
+
while !to_be_watched.empty?
|
860
|
+
instances = @rds.describe_db_instances
|
861
|
+
to_be_watched.each do |node_name|
|
862
|
+
node = @nodes[node_name]
|
863
|
+
instance = find_rds_node_instance(node.db_instance_identifier, instances)
|
864
|
+
if instance.nil?
|
865
|
+
@logger.progress "\n"
|
866
|
+
@logger.info "Node #{node_name} terminated"
|
867
|
+
to_be_watched.delete(node_name)
|
868
|
+
@logger.progress "Waiting for Nodes #{to_be_watched.inspect} to terminate. This may take several minutes..." if !to_be_watched.empty?
|
869
|
+
else
|
870
|
+
@logger.progress "."
|
871
|
+
end
|
872
|
+
end
|
873
|
+
sleep 5 if !to_be_watched.empty?
|
874
|
+
end
|
875
|
+
end
|
876
|
+
|
877
|
+
|
878
|
+
private
|
879
|
+
|
880
|
+
# validates this Aws instance
|
881
|
+
def validate_internal
|
882
|
+
super
|
883
|
+
invalidate "Missing aws_account_id" if aws_account_id.nil?
|
884
|
+
invalidate "Missing aws_access_key" if aws_access_key.nil?
|
885
|
+
invalidate "Missing aws_secret_access_key" if aws_secret_access_key.nil?
|
886
|
+
invalidate "Missing chef_bucket" if chef_bucket.nil?
|
887
|
+
end
|
888
|
+
|
889
|
+
# Ensures that the given EC2 security group exists. Creates it if it does not exist.
|
890
|
+
def ensure_ec2_security_group(group_name)
|
891
|
+
security_groups = @ec2.describe_security_groups()
|
892
|
+
names = Array.new
|
893
|
+
if !security_groups.nil? && !security_groups.securityGroupInfo.nil? && !security_groups.securityGroupInfo.item.nil?
|
894
|
+
security_groups.securityGroupInfo.item.each {|group| names << group.groupName}
|
895
|
+
end
|
896
|
+
unless names.include?(group_name)
|
897
|
+
@ec2.create_security_group(:group_name => group_name, :group_description => "#{group_name} group")
|
898
|
+
end
|
899
|
+
end
|
900
|
+
|
901
|
+
# Ensures that the given EC2 security group cidr range configuration exists
|
902
|
+
# * group_name - the security group name to configure
|
903
|
+
# * from_port - the port to allow from
|
904
|
+
# * to_port - the port to allow to
|
905
|
+
# * protocol - the protocol to allow (one of 'tcp', 'udp', or 'icmp')
|
906
|
+
# * cidr_ip - optional cidr IP address configuration
|
907
|
+
def ensure_ec2_security_group_cidr_configuration(group_name, from_port, to_port, protocol, cidr_ip='0.0.0.0/0')
|
908
|
+
security_group = @ec2.describe_security_groups(:group_name => [group_name])
|
909
|
+
found_rule = false
|
910
|
+
if !security_group.nil?
|
911
|
+
ip_permissions = security_group.securityGroupInfo.item[0].ipPermissions
|
912
|
+
if !ip_permissions.nil?
|
913
|
+
ip_permissions.item.each do |permission|
|
914
|
+
if !permission.ipProtocol.nil? && permission.ipProtocol.eql?(protocol) && permission.fromPort.eql?(from_port.to_s) && permission.toPort.eql?(to_port.to_s) && permission.ipRanges.item[0].cidrIp.eql?(cidr_ip)
|
915
|
+
found_rule = true
|
916
|
+
end
|
917
|
+
end
|
918
|
+
end
|
919
|
+
end
|
920
|
+
if !found_rule
|
921
|
+
@ec2.authorize_security_group_ingress(:group_name => group_name,
|
922
|
+
:ip_protocol => protocol,
|
923
|
+
:from_port => from_port,
|
924
|
+
:to_port => to_port,
|
925
|
+
:cidr_ip => cidr_ip)
|
926
|
+
end
|
927
|
+
end
|
928
|
+
|
929
|
+
# Ensures that the given EC2 security group name configuration exists
|
930
|
+
# * group_name - the security group granting access to
|
931
|
+
# * their_group_name - the security group granting access from
|
932
|
+
# * their_account_id - the account id of their_group_name
|
933
|
+
def ensure_ec2_security_group_name_configuration(group_name, their_group_name, their_account_id)
|
934
|
+
security_group = @ec2.describe_security_groups(:group_name => [group_name])
|
935
|
+
found_rule = false
|
936
|
+
if !security_group.nil?
|
937
|
+
ip_permissions = security_group.securityGroupInfo.item[0].ipPermissions
|
938
|
+
if !ip_permissions.nil?
|
939
|
+
ip_permissions.item.each do |permission|
|
940
|
+
if !permission.groups.nil? && permission.groups.item[0].groupName.eql?(their_group_name) && permission.groups.item[0].userId.gsub(/-/,'').eql?(their_account_id.gsub(/-/,''))
|
941
|
+
found_rule = true
|
942
|
+
end
|
943
|
+
end
|
944
|
+
end
|
945
|
+
end
|
946
|
+
if !found_rule
|
947
|
+
@ec2.authorize_security_group_ingress(:group_name => group_name,
|
948
|
+
:source_security_group_name => their_group_name,
|
949
|
+
:source_security_group_owner_id => their_account_id)
|
950
|
+
end
|
951
|
+
end
|
952
|
+
end
|
953
|
+
end
|
954
|
+
end
|
955
|
+
|
956
|
+
|
957
|
+
module Maestro
|
958
|
+
module Node
|
959
|
+
module Aws
|
960
|
+
# Amazon EC2 Node
|
961
|
+
class Ec2 < Configurable
|
962
|
+
|
963
|
+
dsl_property :ami, :ssh_user, :instance_type, :availability_zone, :elastic_ip, :ebs_volume_id, :ebs_device
|
964
|
+
attr_accessor :security_groups
|
965
|
+
attr_accessor :node_security_group
|
966
|
+
attr_accessor :role_security_groups
|
967
|
+
|
968
|
+
# Creates a new Ec2 Node
|
969
|
+
def initialize(name, cloud, &block)
|
970
|
+
super(name, cloud, &block)
|
971
|
+
@security_groups = Array.new
|
972
|
+
@role_security_groups = Array.new
|
973
|
+
@node_security_group = "#{node_prefix}#{@name}"
|
974
|
+
@security_groups << @node_security_group
|
975
|
+
if !@roles.nil? && !@roles.empty?
|
976
|
+
@roles.each do |role_name|
|
977
|
+
role_security_group = "#{role_prefix}#{role_name}"
|
978
|
+
@role_security_groups << role_security_group
|
979
|
+
@security_groups << role_security_group
|
980
|
+
end
|
981
|
+
end
|
982
|
+
end
|
983
|
+
|
984
|
+
# sets the default security group for the cloud on this Ec2 Node
|
985
|
+
def set_default_security_group(security_group)
|
986
|
+
@security_groups << security_group if !@security_groups.include?(security_group)
|
987
|
+
@role_security_groups << security_group if !@role_security_groups.include?(security_group)
|
988
|
+
end
|
989
|
+
|
990
|
+
|
991
|
+
private
|
992
|
+
|
993
|
+
# returns the security group name prefix to be used for all node security groups pertaining to this Cloud
|
994
|
+
def node_prefix()
|
995
|
+
"#{@cloud.name}.#{Maestro::Cloud::Aws::MAESTRO_NODE_PREFIX}"
|
996
|
+
end
|
997
|
+
|
998
|
+
# returns the security group name prefix to be used for all role security groups pertaining to this Cloud
|
999
|
+
def role_prefix()
|
1000
|
+
"#{@cloud.name}.#{Maestro::Cloud::Aws::MAESTRO_ROLE_PREFIX}"
|
1001
|
+
end
|
1002
|
+
|
1003
|
+
# validates this Ec2 Node
|
1004
|
+
def validate_internal
|
1005
|
+
super
|
1006
|
+
invalidate "'#{@name}' node missing ami" if ami.nil?
|
1007
|
+
invalidate "'#{@name}' node missing instance_type" if instance_type.nil?
|
1008
|
+
invalidate "'#{@name}' node missing availability_zone" if availability_zone.nil?
|
1009
|
+
if (!ebs_volume_id.nil? && ebs_device.nil?)
|
1010
|
+
invalidate "'#{@name}' node missing ebs_device (you must specify both ebs_volume_id and ebs_device)"
|
1011
|
+
end
|
1012
|
+
if (ebs_volume_id.nil? && !ebs_device.nil?)
|
1013
|
+
invalidate "'#{@name}' node missing ebs_volume_id (you must specify both ebs_volume_id and ebs_device)"
|
1014
|
+
end
|
1015
|
+
end
|
1016
|
+
end
|
1017
|
+
end
|
1018
|
+
end
|
1019
|
+
end
|
1020
|
+
|
1021
|
+
|
1022
|
+
module Maestro
|
1023
|
+
module Node
|
1024
|
+
module Aws
|
1025
|
+
# Amazon ELB Node
|
1026
|
+
class Elb < Base
|
1027
|
+
|
1028
|
+
# The load balancer name of this node
|
1029
|
+
attr_reader :load_balancer_name
|
1030
|
+
dsl_property :listeners, :ec2_nodes, :availability_zones, :health_check
|
1031
|
+
|
1032
|
+
def initialize(name, cloud, &block)
|
1033
|
+
super(name, cloud, &block)
|
1034
|
+
@load_balancer_name = set_load_balancer_name
|
1035
|
+
end
|
1036
|
+
|
1037
|
+
|
1038
|
+
private
|
1039
|
+
|
1040
|
+
# Sets the load balancer name to use for this Elb Node.
|
1041
|
+
# ELB names may only have letters, digits, and dashes, and may not be longer
|
1042
|
+
# than 32 characters. This method will remove any invalid characters from the calculated name.
|
1043
|
+
# If the calculated elb node name is > 32 characters, this method will truncate the name
|
1044
|
+
# to the last 32 characters of the calculated name. This name may NOT be unique across all
|
1045
|
+
# of your clouds.
|
1046
|
+
def set_load_balancer_name
|
1047
|
+
str = "#{@cloud.name.to_s.gsub(/[^[:alnum:]-]/, '')}-#{@name.to_s.gsub(/[^[:alnum:]-]/, '')}"
|
1048
|
+
str = str[str.size-32,32] if str.size > 32
|
1049
|
+
return str
|
1050
|
+
end
|
1051
|
+
|
1052
|
+
# validates this Elb
|
1053
|
+
def validate_internal
|
1054
|
+
super
|
1055
|
+
invalidate "'#{@name}' node's name must be less than 32 characters" if @name.length > 32
|
1056
|
+
invalidate "'#{@name}' node's name must start with a letter" unless @name =~ /^[A-Za-z]/
|
1057
|
+
invalidate "'#{@name}' node's name may only contain alphanumerics and hyphens" unless @name =~ /^[a-zA-Z][[:alnum:]-]{1,62}/
|
1058
|
+
invalidate "'#{@name}' node's name must not end with a hypen" if @name =~ /-$/
|
1059
|
+
invalidate "'#{@name}' node's name must not contain two consecutive hyphens" if @name =~ /--/
|
1060
|
+
invalidate "'#{@name}' node missing listeners" if listeners.nil?
|
1061
|
+
invalidate "'#{@name}' node's listeners must be an Array of Hashes" if !listeners.is_a?(Array)
|
1062
|
+
if !listeners.nil? && listeners.is_a?(Array)
|
1063
|
+
listeners.each do |listener|
|
1064
|
+
if !listener.is_a?(Hash)
|
1065
|
+
invalidate "'#{@name}' node's listeners must be an Array of Hashes"
|
1066
|
+
else
|
1067
|
+
invalidate "'#{@name}' node's listeners Hash missing :load_balancer_port key" if !listener.has_key?(:load_balancer_port)
|
1068
|
+
invalidate "'#{@name}' node's listeners Hash missing :instance_port key" if !listener.has_key?(:instance_port)
|
1069
|
+
invalidate "'#{@name}' node's listeners Hash missing :protocol key" if !listener.has_key?(:protocol)
|
1070
|
+
end
|
1071
|
+
end
|
1072
|
+
end
|
1073
|
+
invalidate "'#{@name}' node missing ec2_nodes collection" if ec2_nodes.nil?
|
1074
|
+
invalidate "'#{@name}' node ec2_nodes collection is not an Array (found #{ec2_nodes.class})" if !ec2_nodes.is_a?(Array)
|
1075
|
+
invalidate "'#{@name}' node missing availability_zones collection" if availability_zones.nil?
|
1076
|
+
invalidate "'#{@name}' node availability_zones collection is not an Array (found #{availability_zones.class})" if !availability_zones.is_a?(Array)
|
1077
|
+
if !health_check.is_a?(Hash)
|
1078
|
+
invalidate "'#{@name}' node's health_check must be a Hash"
|
1079
|
+
else
|
1080
|
+
invalidate "'#{@name}' node's health_check Hash missing :target key" if !health_check.has_key?(:target)
|
1081
|
+
invalidate "'#{@name}' node's health_check Hash missing :timeout key" if !health_check.has_key?(:timeout)
|
1082
|
+
invalidate "'#{@name}' node's health_check Hash missing :interval key" if !health_check.has_key?(:interval)
|
1083
|
+
invalidate "'#{@name}' node's health_check Hash missing :unhealthy_threshold key" if !health_check.has_key?(:unhealthy_threshold)
|
1084
|
+
invalidate "'#{@name}' node's health_check Hash missing :healthy_threshold key" if !health_check.has_key?(:healthy_threshold)
|
1085
|
+
end
|
1086
|
+
end
|
1087
|
+
end
|
1088
|
+
end
|
1089
|
+
end
|
1090
|
+
end
|
1091
|
+
|
1092
|
+
|
1093
|
+
module Maestro
|
1094
|
+
module Node
|
1095
|
+
module Aws
|
1096
|
+
# Amazon RDS Node
|
1097
|
+
class Rds < Base
|
1098
|
+
|
1099
|
+
# the db_instance_identifier of this node
|
1100
|
+
attr_reader :db_instance_identifier
|
1101
|
+
|
1102
|
+
# the db parameter group name of this node
|
1103
|
+
attr_reader :db_parameter_group_name
|
1104
|
+
|
1105
|
+
# the db security group name of this node
|
1106
|
+
attr_reader :db_security_group_name
|
1107
|
+
|
1108
|
+
dsl_property :availability_zone, :engine, :db_instance_class, :master_username, :master_user_password,
|
1109
|
+
:port, :allocated_storage, :backup_retention_period, :preferred_maintenance_window,
|
1110
|
+
:preferred_backup_window, :db_parameters
|
1111
|
+
|
1112
|
+
def initialize(name, cloud, &block)
|
1113
|
+
super(name, cloud, &block)
|
1114
|
+
@db_instance_identifier = set_db_instance_identifier
|
1115
|
+
@db_parameter_group_name = set_db_parameter_group_name if !db_parameters.nil? && !db_parameters.empty?
|
1116
|
+
@db_security_group_name = set_db_security_group_name
|
1117
|
+
end
|
1118
|
+
|
1119
|
+
|
1120
|
+
private
|
1121
|
+
|
1122
|
+
# Returns a name to tag an RDS instance as being an Rds Node.
|
1123
|
+
# RDS names may only have letters, digits, and dashes, and may not be longer
|
1124
|
+
# than 63 characters. This method will remove any invalid characters from the calculated name.
|
1125
|
+
# If the calculated elb node name is > 63 characters, this method will truncate the name
|
1126
|
+
# to the last 63 characters of the calculated name. This name may NOT be unique across all
|
1127
|
+
# of your clouds.
|
1128
|
+
def set_db_instance_identifier
|
1129
|
+
str = "#{@cloud.name.to_s.gsub(/[^[:alnum:]-]/, '')}-#{@name.to_s.gsub(/[^[:alnum:]-]/, '')}"
|
1130
|
+
str = str[str.size-63,63] if str.size > 63
|
1131
|
+
return str
|
1132
|
+
end
|
1133
|
+
|
1134
|
+
# Returns the name of this RDS node's db parameter group.
|
1135
|
+
# Parameter group names may only have letters, digits, and dashes, and may not be longer
|
1136
|
+
# than 255 characters. This method will remove any invalid characters from the calculated name.
|
1137
|
+
# If the calculated elb node name is > 255 characters, this method will truncate the name
|
1138
|
+
# to the last 255 characters of the calculated name. This name may NOT be unique across all
|
1139
|
+
# of your clouds.
|
1140
|
+
def set_db_parameter_group_name
|
1141
|
+
str = "#{@cloud.name.to_s.gsub(/[^[:alnum:]-]/, '')}-#{@name.to_s.gsub(/[^[:alnum:]-]/, '')}-dbparams"
|
1142
|
+
str = str[str.size-255,255] if str.size > 255
|
1143
|
+
return str
|
1144
|
+
end
|
1145
|
+
|
1146
|
+
# Returns the name of this RDS node's db security group.
|
1147
|
+
# DB Security group names may only have letters, digits, and dashes, and may not be longer
|
1148
|
+
# than 255 characters. This method will remove any invalid characters from the calculated name.
|
1149
|
+
# If the calculated elb node name is > 255 characters, this method will truncate the name
|
1150
|
+
# to the last 255 characters of the calculated name. This name may NOT be unique across all
|
1151
|
+
# of your clouds.
|
1152
|
+
def set_db_security_group_name
|
1153
|
+
str = "#{@cloud.name.to_s.gsub(/[^[:alnum:]-]/, '')}-#{@name.to_s.gsub(/[^[:alnum:]-]/, '')}-security-group"
|
1154
|
+
str = str[str.size-255,255] if str.size > 255
|
1155
|
+
return str
|
1156
|
+
end
|
1157
|
+
|
1158
|
+
# validates this Rds
|
1159
|
+
def validate_internal
|
1160
|
+
super
|
1161
|
+
invalidate "'#{@name}' node's name must be less than 64 characters" if @name.length > 63
|
1162
|
+
invalidate "'#{@name}' node's name must start with a letter" unless @name =~ /^[A-Za-z]/
|
1163
|
+
invalidate "'#{@name}' node's name may only contain alphanumerics and hyphens" unless @name =~ /^[a-zA-Z][[:alnum:]-]{1,62}/
|
1164
|
+
invalidate "'#{@name}' node's name must not end with a hypen" if @name =~ /-$/
|
1165
|
+
invalidate "'#{@name}' node's name must not contain two consecutive hyphens" if @name =~ /--/
|
1166
|
+
|
1167
|
+
invalidate "'#{@name}' node missing availability_zone" if availability_zone.nil?
|
1168
|
+
|
1169
|
+
invalidate "'#{@name}' node missing engine" if engine.nil?
|
1170
|
+
engines = ["MySQL5.1"]
|
1171
|
+
invalidate "'#{@name}' node engine is invalid" if !engines.include?(engine)
|
1172
|
+
|
1173
|
+
invalidate "'#{@name}' node missing db_instance_class" if db_instance_class.nil?
|
1174
|
+
db_instance_classes = ["db.m1.small", "db.m1.large", "db.m1.xlarge", "db.m2.2xlarge", "db.m2.4xlarge"]
|
1175
|
+
invalidate "'#{@name}' node db_instance_class is invalid" if !db_instance_classes.include?(db_instance_class)
|
1176
|
+
|
1177
|
+
if !master_username.nil?
|
1178
|
+
invalidate "'#{@name}' node's master_username must be less than 16 characters" if master_username.length > 15
|
1179
|
+
invalidate "'#{@name}' node's master_username must start with a letter" unless master_username =~ /^[A-Za-z]/
|
1180
|
+
invalidate "'#{@name}' node's master_username may only contain alphanumerics" unless master_username =~ /^[a-zA-Z][[:alnum:]]{0,14}$/
|
1181
|
+
else
|
1182
|
+
invalidate "'#{@name}' node missing master_username"
|
1183
|
+
end
|
1184
|
+
|
1185
|
+
if !master_user_password.nil?
|
1186
|
+
invalidate "'#{@name}' node's master_user_password must be between 4 and 16 characters in length" if master_user_password.length < 4 || master_user_password.length > 16
|
1187
|
+
invalidate "'#{@name}' node's master_user_password may only contain alphanumerics" unless master_user_password =~ /^[[:alnum:]]{4,16}$/
|
1188
|
+
else
|
1189
|
+
invalidate "'#{@name}' node missing master_user_password"
|
1190
|
+
end
|
1191
|
+
|
1192
|
+
if !port.nil?
|
1193
|
+
if port.respond_to? :to_i
|
1194
|
+
invalidate "node's port must be between 1150 and 65535" if port.to_i < 1150 || port.to_i > 65535
|
1195
|
+
else
|
1196
|
+
invalidate "'#{@name}' node's port must be a number"
|
1197
|
+
end
|
1198
|
+
else
|
1199
|
+
invalidate "'#{@name}' node missing port"
|
1200
|
+
end
|
1201
|
+
|
1202
|
+
if !allocated_storage.nil?
|
1203
|
+
if allocated_storage.respond_to? :to_i
|
1204
|
+
invalidate "node's allocated_storage must be between 5 and 1024" if allocated_storage.to_i < 5 || allocated_storage.to_i > 1024
|
1205
|
+
else
|
1206
|
+
invalidate "'#{@name}' node's allocated_storage must be a number"
|
1207
|
+
end
|
1208
|
+
else
|
1209
|
+
invalidate "'#{@name}' node missing allocated_storage"
|
1210
|
+
end
|
1211
|
+
|
1212
|
+
if !preferred_maintenance_window.nil?
|
1213
|
+
invalidate "'#{@name}' node's preferred_maintenance_window must be in UTC format 'ddd:hh24:mi-ddd:hh24:mi'" unless preferred_maintenance_window =~ /^(Mon|Tue|Wed|Thu|Fri|Sat|Sun):(0[0-9]|1[0-9]|2[0-3]):[0-5][0-9]-(Mon|Tue|Wed|Thu|Fri|Sat|Sun):(0[0-9]|1[0-9]|2[0-3]):[0-5][0-9]$/
|
1214
|
+
end
|
1215
|
+
|
1216
|
+
if !backup_retention_period.nil?
|
1217
|
+
if backup_retention_period.respond_to? :to_i
|
1218
|
+
invalidate "'#{@name}' node's backup_retention_period must be between 0 and 8" unless backup_retention_period.to_i >= 0 && backup_retention_period.to_i <= 8
|
1219
|
+
else
|
1220
|
+
invalidate "'#{@name}' node's backup_retention_period must be a number"
|
1221
|
+
end
|
1222
|
+
end
|
1223
|
+
|
1224
|
+
if !preferred_backup_window.nil?
|
1225
|
+
invalidate "'#{@name}' node's preferred_backup_window must be in UTC format 'hh24:mi-hh24:mi'" unless preferred_backup_window =~ /^(0[0-9]|1[0-9]|2[0-3]):[0-5][0-9]-(0[0-9]|1[0-9]|2[0-3]):[0-5][0-9]$/
|
1226
|
+
end
|
1227
|
+
end
|
1228
|
+
end
|
1229
|
+
end
|
1230
|
+
end
|
1231
|
+
end
|