rubber 2.3.1 → 2.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG +15 -0
- data/lib/rubber/cloud.rb +1 -1
- data/lib/rubber/cloud/aws.rb +376 -10
- data/lib/rubber/cloud/base.rb +152 -0
- data/lib/rubber/cloud/digital_ocean.rb +96 -0
- data/lib/rubber/cloud/fog.rb +8 -200
- data/lib/rubber/cloud/generic.rb +68 -0
- data/lib/rubber/instance.rb +6 -1
- data/lib/rubber/recipes/rubber/deploy.rb +16 -14
- data/lib/rubber/recipes/rubber/instances.rb +21 -14
- data/lib/rubber/recipes/rubber/security_groups.rb +4 -187
- data/lib/rubber/recipes/rubber/setup.rb +25 -1
- data/lib/rubber/recipes/rubber/utils.rb +7 -4
- data/lib/rubber/util.rb +4 -0
- data/lib/rubber/version.rb +1 -1
- data/templates/base/config/deploy.rb +11 -5
- data/templates/base/config/rubber/rubber.yml +15 -2
- data/templates/postgresql/config/rubber/role/postgresql/pg_hba.conf +4 -2
- data/test/cloud/aws_test.rb +1 -1
- data/test/cloud/digital_ocean_test.rb +70 -0
- data/test/cloud/fog_test.rb +4 -2
- data/test/fixtures/basic/test.pem.pub +1 -0
- data/test/util_test.rb +8 -0
- metadata +7 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: a078638f1d84a5dda8e80d2276dac5c6351bea83
|
4
|
+
data.tar.gz: 54de9550eefdde6a4529336615d37f6eba5860be
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 689ed2c04caacc4448c02da2ed3bf1605de897709bc3391d393a611d41ce626165f6cb3149975aad7ba26f689936aca98a01989bc81f5dbb1dc7e23fe7f13202
|
7
|
+
data.tar.gz: a6f0cce4b68dcce1a2473806211c5953e6f0910defa0a2b3a236b4e77352b4abdae978b6012228329064af799349ceea22e10c2428cb8fc827aef5c14d994c27
|
data/CHANGELOG
CHANGED
@@ -1,3 +1,18 @@
|
|
1
|
+
2.4.0 (05/24/2013)
|
2
|
+
------------------
|
3
|
+
|
4
|
+
New Features:
|
5
|
+
============
|
6
|
+
|
7
|
+
[core] DigitalOcean is now supported as a cloud provider
|
8
|
+
[core] Added a "generic" cloud provider for connecting to dedicated hardware, VMs, and otherwise unsupported cloud providers
|
9
|
+
|
10
|
+
|
11
|
+
Bug Fixes:
|
12
|
+
=========
|
13
|
+
|
14
|
+
[core] Trigger rubber:config after deploy:update_code so machines with both a DB and the asset pipeline can bootstrap properly <dc5d0b2>
|
15
|
+
|
1
16
|
2.3.1 (05/12/2013)
|
2
17
|
------------------
|
3
18
|
|
data/lib/rubber/cloud.rb
CHANGED
@@ -5,7 +5,7 @@ module Rubber
|
|
5
5
|
|
6
6
|
def self.get_provider(provider, env, capistrano)
|
7
7
|
require "rubber/cloud/#{provider}"
|
8
|
-
clazz = Rubber::Cloud.const_get(provider
|
8
|
+
clazz = Rubber::Cloud.const_get(Rubber::Util.camelcase(provider))
|
9
9
|
provider_env = env.cloud_providers[provider]
|
10
10
|
return clazz.new(provider_env, capistrano)
|
11
11
|
end
|
data/lib/rubber/cloud/aws.rb
CHANGED
@@ -8,24 +8,81 @@ module Rubber
|
|
8
8
|
|
9
9
|
def initialize(env, capistrano)
|
10
10
|
|
11
|
-
|
12
|
-
|
13
|
-
|
11
|
+
compute_credentials = {
|
12
|
+
:aws_access_key_id => env.access_key,
|
13
|
+
:aws_secret_access_key => env.secret_access_key
|
14
14
|
}
|
15
|
+
|
16
|
+
storage_credentials = {
|
17
|
+
:provider => 'AWS',
|
18
|
+
:aws_access_key_id => env.access_key,
|
19
|
+
:aws_secret_access_key => env.secret_access_key
|
20
|
+
}
|
21
|
+
|
22
|
+
@table_store = ::Fog::AWS::SimpleDB.new(compute_credentials)
|
15
23
|
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
env['
|
24
|
+
compute_credentials[:region] = env.region
|
25
|
+
@elb = ::Fog::AWS::ELB.new(compute_credentials)
|
26
|
+
|
27
|
+
compute_credentials[:provider] = 'AWS' # We need to set the provider after the SimpleDB init because it fails if the provider value is specified.
|
28
|
+
|
29
|
+
env['compute_credentials'] = compute_credentials
|
30
|
+
env['storage_credentials'] = storage_credentials
|
23
31
|
super(env, capistrano)
|
24
32
|
end
|
25
33
|
|
26
34
|
def table_store(table_key)
|
27
35
|
return Rubber::Cloud::AwsTableStore.new(@table_store, table_key)
|
28
36
|
end
|
37
|
+
|
38
|
+
def describe_instances(instance_id=nil)
|
39
|
+
instances = []
|
40
|
+
opts = {}
|
41
|
+
opts["instance-id"] = instance_id if instance_id
|
42
|
+
|
43
|
+
response = @compute_provider.servers.all(opts)
|
44
|
+
response.each do |item|
|
45
|
+
instance = {}
|
46
|
+
instance[:id] = item.id
|
47
|
+
instance[:type] = item.flavor_id
|
48
|
+
instance[:external_host] = item.dns_name
|
49
|
+
instance[:external_ip] = item.public_ip_address
|
50
|
+
instance[:internal_host] = item.private_dns_name
|
51
|
+
instance[:internal_ip] = item.private_ip_address
|
52
|
+
instance[:state] = item.state
|
53
|
+
instance[:zone] = item.availability_zone
|
54
|
+
instance[:provider] = 'aws'
|
55
|
+
instance[:platform] = item.platform || 'linux'
|
56
|
+
instance[:root_device_type] = item.root_device_type
|
57
|
+
instances << instance
|
58
|
+
end
|
59
|
+
|
60
|
+
return instances
|
61
|
+
end
|
62
|
+
|
63
|
+
def active_state
|
64
|
+
'running'
|
65
|
+
end
|
66
|
+
|
67
|
+
def before_create_instance(instance_alias, role_names)
|
68
|
+
setup_security_groups(instance_alias, role_names)
|
69
|
+
end
|
70
|
+
|
71
|
+
def after_create_instance(instance)
|
72
|
+
# Sometimes tag creation will fail, indicating that the instance doesn't exist yet even though it does. It seems to
|
73
|
+
# be a propagation delay on Amazon's end, so the best we can do is wait and try again.
|
74
|
+
Rubber::Util.retry_on_failure(StandardError, :retry_sleep => 0.5, :retry_count => 100) do
|
75
|
+
Rubber::Tag::update_instance_tags(instance.name)
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
def after_refresh_instance(instance)
|
80
|
+
# Sometimes tag creation will fail, indicating that the instance doesn't exist yet even though it does. It seems to
|
81
|
+
# be a propagation delay on Amazon's end, so the best we can do is wait and try again.
|
82
|
+
Rubber::Util.retry_on_failure(StandardError, :retry_sleep => 0.5, :retry_count => 100) do
|
83
|
+
Rubber::Tag::update_instance_tags(instance.name)
|
84
|
+
end
|
85
|
+
end
|
29
86
|
|
30
87
|
def create_image(image_name)
|
31
88
|
|
@@ -118,6 +175,315 @@ module Rubber
|
|
118
175
|
return lbs
|
119
176
|
end
|
120
177
|
|
178
|
+
def describe_availability_zones
|
179
|
+
zones = []
|
180
|
+
response = @compute_provider.describe_availability_zones()
|
181
|
+
items = response.body["availabilityZoneInfo"]
|
182
|
+
items.each do |item|
|
183
|
+
zone = {}
|
184
|
+
zone[:name] = item["zoneName"]
|
185
|
+
zone[:state] =item["zoneState"]
|
186
|
+
zones << zone
|
187
|
+
end
|
188
|
+
return zones
|
189
|
+
end
|
190
|
+
|
191
|
+
def create_spot_instance_request(spot_price, ami, ami_type, security_groups, availability_zone)
|
192
|
+
response = @compute_provider.spot_requests.create(:price => spot_price,
|
193
|
+
:image_id => ami,
|
194
|
+
:flavor_id => ami_type,
|
195
|
+
:groups => security_groups,
|
196
|
+
:availability_zone => availability_zone,
|
197
|
+
:key_name => env.key_name)
|
198
|
+
request_id = response.id
|
199
|
+
return request_id
|
200
|
+
end
|
201
|
+
|
202
|
+
def describe_spot_instance_requests(request_id=nil)
|
203
|
+
requests = []
|
204
|
+
opts = {}
|
205
|
+
opts["spot-instance-request-id"] = request_id if request_id
|
206
|
+
response = @compute_provider.spot_requests.all(opts)
|
207
|
+
response.each do |item|
|
208
|
+
request = {}
|
209
|
+
request[:id] = item.id
|
210
|
+
request[:spot_price] = item.price
|
211
|
+
request[:state] = item.state
|
212
|
+
request[:created_at] = item.created_at
|
213
|
+
request[:type] = item.flavor_id
|
214
|
+
request[:image_id] = item.image_id
|
215
|
+
request[:instance_id] = item.instance_id
|
216
|
+
requests << request
|
217
|
+
end
|
218
|
+
return requests
|
219
|
+
end
|
220
|
+
|
221
|
+
def setup_security_groups(host=nil, roles=[])
|
222
|
+
rubber_cfg = Rubber::Configuration.get_configuration(Rubber.env)
|
223
|
+
scoped_env = rubber_cfg.environment.bind(roles, host)
|
224
|
+
security_group_defns = Hash[scoped_env.security_groups.to_a]
|
225
|
+
|
226
|
+
if scoped_env.auto_security_groups
|
227
|
+
sghosts = (scoped_env.rubber_instances.collect{|ic| ic.name } + [host]).uniq.compact
|
228
|
+
sgroles = (scoped_env.rubber_instances.all_roles + roles).uniq.compact
|
229
|
+
security_group_defns = inject_auto_security_groups(security_group_defns, sghosts, sgroles)
|
230
|
+
end
|
231
|
+
|
232
|
+
sync_security_groups(security_group_defns)
|
233
|
+
end
|
234
|
+
|
235
|
+
def describe_security_groups(group_name=nil)
|
236
|
+
groups = []
|
237
|
+
|
238
|
+
opts = {}
|
239
|
+
opts["group-name"] = group_name if group_name
|
240
|
+
response = @compute_provider.security_groups.all(opts)
|
241
|
+
|
242
|
+
response.each do |item|
|
243
|
+
group = {}
|
244
|
+
group[:name] = item.name
|
245
|
+
group[:description] = item.description
|
246
|
+
|
247
|
+
item.ip_permissions.each do |ip_item|
|
248
|
+
group[:permissions] ||= []
|
249
|
+
rule = {}
|
250
|
+
|
251
|
+
rule[:protocol] = ip_item["ipProtocol"]
|
252
|
+
rule[:from_port] = ip_item["fromPort"]
|
253
|
+
rule[:to_port] = ip_item["toPort"]
|
254
|
+
|
255
|
+
ip_item["groups"].each do |rule_group|
|
256
|
+
rule[:source_groups] ||= []
|
257
|
+
source_group = {}
|
258
|
+
source_group[:account] = rule_group["userId"]
|
259
|
+
source_group[:name] = rule_group["groupName"]
|
260
|
+
rule[:source_groups] << source_group
|
261
|
+
end if ip_item["groups"]
|
262
|
+
|
263
|
+
ip_item["ipRanges"].each do |ip_range|
|
264
|
+
rule[:source_ips] ||= []
|
265
|
+
rule[:source_ips] << ip_range["cidrIp"]
|
266
|
+
end if ip_item["ipRanges"]
|
267
|
+
|
268
|
+
group[:permissions] << rule
|
269
|
+
end
|
270
|
+
|
271
|
+
groups << group
|
272
|
+
end
|
273
|
+
|
274
|
+
groups
|
275
|
+
end
|
276
|
+
|
277
|
+
def create_volume(size, zone)
|
278
|
+
volume = @compute_provider.volumes.create(:size => size.to_s, :availability_zone => zone)
|
279
|
+
return volume.id
|
280
|
+
end
|
281
|
+
|
282
|
+
def attach_volume(volume_id, instance_id, device)
|
283
|
+
volume = @compute_provider.volumes.get(volume_id)
|
284
|
+
server = @compute_provider.servers.get(instance_id)
|
285
|
+
volume.device = device
|
286
|
+
volume.server = server
|
287
|
+
end
|
288
|
+
|
289
|
+
def detach_volume(volume_id, force=true)
|
290
|
+
volume = @compute_provider.volumes.get(volume_id)
|
291
|
+
force ? volume.force_detach : (volume.server = nil)
|
292
|
+
end
|
293
|
+
|
294
|
+
def describe_volumes(volume_id=nil)
|
295
|
+
volumes = []
|
296
|
+
opts = {}
|
297
|
+
opts[:'volume-id'] = volume_id if volume_id
|
298
|
+
response = @compute_provider.volumes.all(opts)
|
299
|
+
response.each do |item|
|
300
|
+
volume = {}
|
301
|
+
volume[:id] = item.id
|
302
|
+
volume[:status] = item.state
|
303
|
+
if item.server_id
|
304
|
+
volume[:attachment_instance_id] = item.server_id
|
305
|
+
volume[:attachment_status] = item.attached_at ? "attached" : "waiting"
|
306
|
+
end
|
307
|
+
volumes << volume
|
308
|
+
end
|
309
|
+
return volumes
|
310
|
+
end
|
311
|
+
|
312
|
+
def destroy_volume(volume_id)
|
313
|
+
@compute_provider.volumes.get(volume_id).destroy
|
314
|
+
end
|
315
|
+
|
316
|
+
# resource_id is any Amazon resource ID (e.g., instance ID or volume ID)
|
317
|
+
# tags is a hash of tag_name => tag_value pairs
|
318
|
+
def create_tags(resource_id, tags)
|
319
|
+
# Tags need to be created individually in fog
|
320
|
+
tags.each do |k, v|
|
321
|
+
@compute_provider.tags.create(:resource_id => resource_id,
|
322
|
+
:key => k.to_s, :value => v.to_s)
|
323
|
+
end
|
324
|
+
end
|
325
|
+
|
326
|
+
private
|
327
|
+
|
328
|
+
def create_security_group(group_name, group_description)
|
329
|
+
@compute_provider.security_groups.create(:name => group_name, :description => group_description)
|
330
|
+
end
|
331
|
+
|
332
|
+
def destroy_security_group(group_name)
|
333
|
+
@compute_provider.security_groups.get(group_name).destroy
|
334
|
+
end
|
335
|
+
|
336
|
+
def add_security_group_rule(group_name, protocol, from_port, to_port, source)
|
337
|
+
group = @compute_provider.security_groups.get(group_name)
|
338
|
+
opts = {:ip_protocol => protocol || 'tcp'}
|
339
|
+
|
340
|
+
if source.instance_of? Hash
|
341
|
+
opts[:group] = {source[:account] => source[:name]}
|
342
|
+
else
|
343
|
+
opts[:cidr_ip] = source
|
344
|
+
end
|
345
|
+
|
346
|
+
group.authorize_port_range(from_port.to_i..to_port.to_i, opts)
|
347
|
+
end
|
348
|
+
|
349
|
+
def remove_security_group_rule(group_name, protocol, from_port, to_port, source)
|
350
|
+
group = @compute_provider.security_groups.get(group_name)
|
351
|
+
opts = {:ip_protocol => protocol || 'tcp'}
|
352
|
+
|
353
|
+
if source.instance_of? Hash
|
354
|
+
opts[:group] = {source[:account] => source[:name]}
|
355
|
+
else
|
356
|
+
opts[:cidr_ip] = source
|
357
|
+
end
|
358
|
+
|
359
|
+
group.revoke_port_range(from_port.to_i..to_port.to_i, opts)
|
360
|
+
end
|
361
|
+
|
362
|
+
def sync_security_groups(groups)
|
363
|
+
return unless groups
|
364
|
+
|
365
|
+
groups = Rubber::Util::stringify(groups)
|
366
|
+
groups = isolate_groups(groups)
|
367
|
+
group_keys = groups.keys.clone()
|
368
|
+
|
369
|
+
# For each group that does already exist in cloud
|
370
|
+
cloud_groups = describe_security_groups()
|
371
|
+
cloud_groups.each do |cloud_group|
|
372
|
+
group_name = cloud_group[:name]
|
373
|
+
|
374
|
+
# skip those groups that don't belong to this project/env
|
375
|
+
next if env.isolate_security_groups && group_name !~ /^#{isolate_prefix}/
|
376
|
+
|
377
|
+
if group_keys.delete(group_name)
|
378
|
+
# sync rules
|
379
|
+
capistrano.logger.debug "Security Group already in cloud, syncing rules: #{group_name}"
|
380
|
+
group = groups[group_name]
|
381
|
+
|
382
|
+
# convert the special case default rule into what it actually looks like when
|
383
|
+
# we query ec2 so that we can match things up when syncing
|
384
|
+
rules = group['rules'].clone
|
385
|
+
group['rules'].each do |rule|
|
386
|
+
if [2, 3].include?(rule.size) && rule['source_group_name'] && rule['source_group_account']
|
387
|
+
rules << rule.merge({'protocol' => 'tcp', 'from_port' => '1', 'to_port' => '65535' })
|
388
|
+
rules << rule.merge({'protocol' => 'udp', 'from_port' => '1', 'to_port' => '65535' })
|
389
|
+
rules << rule.merge({'protocol' => 'icmp', 'from_port' => '-1', 'to_port' => '-1' })
|
390
|
+
rules.delete(rule)
|
391
|
+
end
|
392
|
+
end
|
393
|
+
|
394
|
+
rule_maps = []
|
395
|
+
|
396
|
+
# first collect the rule maps from the request (group/user pairs are duplicated for tcp/udp/icmp,
|
397
|
+
# so we need to do this up frnot and remove duplicates before checking against the local rubber rules)
|
398
|
+
cloud_group[:permissions].each do |rule|
|
399
|
+
source_groups = rule.delete(:source_groups)
|
400
|
+
if source_groups
|
401
|
+
source_groups.each do |source_group|
|
402
|
+
rule_map = rule.clone
|
403
|
+
rule_map.delete(:source_ips)
|
404
|
+
rule_map[:source_group_name] = source_group[:name]
|
405
|
+
rule_map[:source_group_account] = source_group[:account]
|
406
|
+
rule_map = Rubber::Util::stringify(rule_map)
|
407
|
+
rule_maps << rule_map unless rule_maps.include?(rule_map)
|
408
|
+
end
|
409
|
+
else
|
410
|
+
rule_map = Rubber::Util::stringify(rule)
|
411
|
+
rule_maps << rule_map unless rule_maps.include?(rule_map)
|
412
|
+
end
|
413
|
+
end if cloud_group[:permissions]
|
414
|
+
# For each rule, if it exists, do nothing, otherwise remove it as its no longer defined locally
|
415
|
+
rule_maps.each do |rule_map|
|
416
|
+
if rules.delete(rule_map)
|
417
|
+
# rules match, don't need to do anything
|
418
|
+
# logger.debug "Rule in sync: #{rule_map.inspect}"
|
419
|
+
else
|
420
|
+
# rules don't match, remove them from cloud and re-add below
|
421
|
+
answer = nil
|
422
|
+
msg = "Rule '#{rule_map.inspect}' exists in cloud, but not locally"
|
423
|
+
if env.prompt_for_security_group_sync
|
424
|
+
answer = Capistrano::CLI.ui.ask("#{msg}, remove from cloud? [y/N]: ")
|
425
|
+
else
|
426
|
+
capistrano.logger.info(msg)
|
427
|
+
end
|
428
|
+
|
429
|
+
if answer =~ /^y/
|
430
|
+
rule_map = Rubber::Util::symbolize_keys(rule_map)
|
431
|
+
if rule_map[:source_group_name]
|
432
|
+
remove_security_group_rule(group_name, rule_map[:protocol], rule_map[:from_port], rule_map[:to_port], {:name => rule_map[:source_group_name], :account => rule_map[:source_group_account]})
|
433
|
+
else
|
434
|
+
rule_map[:source_ips].each do |source_ip|
|
435
|
+
remove_security_group_rule(group_name, rule_map[:protocol], rule_map[:from_port], rule_map[:to_port], source_ip)
|
436
|
+
end if rule_map[:source_ips]
|
437
|
+
end
|
438
|
+
end
|
439
|
+
end
|
440
|
+
end
|
441
|
+
|
442
|
+
rules.each do |rule_map|
|
443
|
+
# create non-existing rules
|
444
|
+
capistrano.logger.debug "Missing rule, creating: #{rule_map.inspect}"
|
445
|
+
rule_map = Rubber::Util::symbolize_keys(rule_map)
|
446
|
+
if rule_map[:source_group_name]
|
447
|
+
add_security_group_rule(group_name, rule_map[:protocol], rule_map[:from_port], rule_map[:to_port], {:name => rule_map[:source_group_name], :account => rule_map[:source_group_account]})
|
448
|
+
else
|
449
|
+
rule_map[:source_ips].each do |source_ip|
|
450
|
+
add_security_group_rule(group_name, rule_map[:protocol], rule_map[:from_port], rule_map[:to_port], source_ip)
|
451
|
+
end if rule_map[:source_ips]
|
452
|
+
end
|
453
|
+
end
|
454
|
+
else
|
455
|
+
# delete group
|
456
|
+
answer = nil
|
457
|
+
msg = "Security group '#{group_name}' exists in cloud but not locally"
|
458
|
+
if env.prompt_for_security_group_sync
|
459
|
+
answer = Capistrano::CLI.ui.ask("#{msg}, remove from cloud? [y/N]: ")
|
460
|
+
else
|
461
|
+
capistrano.logger.debug(msg)
|
462
|
+
end
|
463
|
+
destroy_security_group(group_name) if answer =~ /^y/
|
464
|
+
end
|
465
|
+
end
|
466
|
+
|
467
|
+
# For each group that didnt already exist in cloud
|
468
|
+
group_keys.each do |group_name|
|
469
|
+
group = groups[group_name]
|
470
|
+
capistrano.logger.debug "Creating new security group: #{group_name}"
|
471
|
+
# create each group
|
472
|
+
create_security_group(group_name, group['description'])
|
473
|
+
# create rules for group
|
474
|
+
group['rules'].each do |rule_map|
|
475
|
+
capistrano.logger.debug "Creating new rule: #{rule_map.inspect}"
|
476
|
+
rule_map = Rubber::Util::symbolize_keys(rule_map)
|
477
|
+
if rule_map[:source_group_name]
|
478
|
+
add_security_group_rule(group_name, rule_map[:protocol], rule_map[:from_port], rule_map[:to_port], {:name => rule_map[:source_group_name], :account => rule_map[:source_group_account]})
|
479
|
+
else
|
480
|
+
rule_map[:source_ips].each do |source_ip|
|
481
|
+
add_security_group_rule(group_name, rule_map[:protocol], rule_map[:from_port], rule_map[:to_port], source_ip)
|
482
|
+
end if rule_map[:source_ips]
|
483
|
+
end
|
484
|
+
end
|
485
|
+
end
|
486
|
+
end
|
121
487
|
end
|
122
488
|
|
123
489
|
end
|
data/lib/rubber/cloud/base.rb
CHANGED
@@ -10,6 +10,158 @@ module Rubber
|
|
10
10
|
@capistrano = capistrano
|
11
11
|
end
|
12
12
|
|
13
|
+
def before_create_instance(instance_alias, role_names)
|
14
|
+
# No-op by default.
|
15
|
+
end
|
16
|
+
|
17
|
+
def after_create_instance(instance)
|
18
|
+
# No-op by default.
|
19
|
+
end
|
20
|
+
|
21
|
+
def before_refresh_instance(instance)
|
22
|
+
# No-op by default.
|
23
|
+
end
|
24
|
+
|
25
|
+
def after_refresh_instance(instance)
|
26
|
+
setup_security_groups(instance.name, instance.role_names)
|
27
|
+
end
|
28
|
+
|
29
|
+
def isolate_prefix
|
30
|
+
"#{env.app_name}_#{Rubber.env}_"
|
31
|
+
end
|
32
|
+
|
33
|
+
def active_state
|
34
|
+
raise NotImplementedError, "active_state not implemented in base adapter"
|
35
|
+
end
|
36
|
+
|
37
|
+
def isolate_group_name(group_name)
|
38
|
+
if env.isolate_security_groups
|
39
|
+
group_name =~ /^#{isolate_prefix}/ ? group_name : "#{isolate_prefix}#{group_name}"
|
40
|
+
else
|
41
|
+
group_name
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
def isolate_groups(groups)
|
46
|
+
renamed = {}
|
47
|
+
|
48
|
+
groups.each do |name, group|
|
49
|
+
new_name = isolate_group_name(name)
|
50
|
+
new_group = Marshal.load(Marshal.dump(group))
|
51
|
+
|
52
|
+
new_group['rules'].each do |rule|
|
53
|
+
old_ref_name = rule['source_group_name']
|
54
|
+
if old_ref_name
|
55
|
+
# don't mangle names if the user specifies this is an external group they are giving access to.
|
56
|
+
# remove the external_group key to allow this to match with groups retrieved from cloud
|
57
|
+
is_external = rule.delete('external_group')
|
58
|
+
if ! is_external && old_ref_name !~ /^#{isolate_prefix}/
|
59
|
+
rule['source_group_name'] = isolate_group_name(old_ref_name)
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
renamed[new_name] = new_group
|
65
|
+
end
|
66
|
+
|
67
|
+
renamed
|
68
|
+
end
|
69
|
+
|
70
|
+
def inject_auto_security_groups(groups, hosts, roles)
|
71
|
+
hosts.each do |name|
|
72
|
+
group_name = name
|
73
|
+
groups[group_name] ||= {'description' => "Rubber automatic security group for host: #{name}", 'rules' => []}
|
74
|
+
end
|
75
|
+
roles.each do |name|
|
76
|
+
group_name = name
|
77
|
+
groups[group_name] ||= {'description' => "Rubber automatic security group for role: #{name}", 'rules' => []}
|
78
|
+
end
|
79
|
+
|
80
|
+
groups
|
81
|
+
end
|
82
|
+
|
83
|
+
def setup_security_groups(host=nil, roles=[])
|
84
|
+
raise "Digital Ocean provider can only set up one host a time" if host.split(',').size != 1
|
85
|
+
|
86
|
+
rubber_cfg = Rubber::Configuration.get_configuration(Rubber.env)
|
87
|
+
scoped_env = rubber_cfg.environment.bind(roles, host)
|
88
|
+
security_group_defns = Hash[scoped_env.security_groups.to_a]
|
89
|
+
|
90
|
+
|
91
|
+
if scoped_env.auto_security_groups
|
92
|
+
sghosts = (scoped_env.rubber_instances.collect{|ic| ic.name } + [host]).uniq.compact
|
93
|
+
sgroles = (scoped_env.rubber_instances.all_roles + roles).uniq.compact
|
94
|
+
security_group_defns = inject_auto_security_groups(security_group_defns, sghosts, sgroles)
|
95
|
+
end
|
96
|
+
|
97
|
+
groups = Rubber::Util::stringify(security_group_defns)
|
98
|
+
groups = isolate_groups(groups)
|
99
|
+
|
100
|
+
script = <<-ENDSCRIPT
|
101
|
+
# Clear out all firewall rules to start.
|
102
|
+
iptables -F
|
103
|
+
|
104
|
+
iptables -I INPUT 1 -i lo -j ACCEPT -m comment --comment 'Enable connections on loopback devices.'
|
105
|
+
iptables -I INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT -m comment --comment 'Always allow established connections to remain connected.'
|
106
|
+
ENDSCRIPT
|
107
|
+
|
108
|
+
instance = scoped_env.rubber_instances[host]
|
109
|
+
instance.security_groups.each do |group_name|
|
110
|
+
group = groups[group_name]
|
111
|
+
|
112
|
+
group['rules'].each do |rule|
|
113
|
+
protocol = rule['protocol']
|
114
|
+
from_port = rule.has_key?('from_port') ? rule['from_port'].to_i : nil
|
115
|
+
to_port = rule.has_key?('to_port') ? rule['to_port'].to_i : nil
|
116
|
+
source_ips = rule['source_ips']
|
117
|
+
|
118
|
+
if protocol && from_port && to_port && source_ips
|
119
|
+
(from_port..to_port).each do |port|
|
120
|
+
source_ips.each do |source|
|
121
|
+
script << "\niptables -A INPUT -p #{protocol} --dport #{port} --source #{source} -j ACCEPT -m comment --comment '#{group_name}'"
|
122
|
+
end
|
123
|
+
end
|
124
|
+
end
|
125
|
+
end
|
126
|
+
end
|
127
|
+
|
128
|
+
script << "\niptables -A INPUT -j DROP -m comment --comment 'Disable all other connections.'"
|
129
|
+
|
130
|
+
capistrano.run_script 'setup_firewall_rules', script, :hosts => instance.external_ip
|
131
|
+
end
|
132
|
+
|
133
|
+
def describe_security_groups(group_name=nil)
|
134
|
+
rules = capistrano.capture("iptables -S INPUT", :hosts => rubber_env.rubber_instances.collect(&:external_ip)).strip.split("\r\n")
|
135
|
+
scoped_rules = rules.select { |r| r =~ /dport/ }
|
136
|
+
|
137
|
+
groups = []
|
138
|
+
|
139
|
+
scoped_rules.each do |rule|
|
140
|
+
group = {}
|
141
|
+
discovered_rule = {}
|
142
|
+
|
143
|
+
parts = rule.split(' ').each_slice(2).to_a
|
144
|
+
parts.each do |arg, value|
|
145
|
+
case arg
|
146
|
+
when '-p' then discovered_rule[:protocol] = value
|
147
|
+
when '--dport' then discovered_rule[:from_port] = value; discovered_rule[:to_port] = value
|
148
|
+
when '--comment' then group[:name] = value
|
149
|
+
end
|
150
|
+
end
|
151
|
+
|
152
|
+
# Consolidate rules for groups with the same name.
|
153
|
+
existing_group = groups.find { |g| g[:name] == group[:name]}
|
154
|
+
if existing_group
|
155
|
+
existing_group[:permissions] << discovered_rule
|
156
|
+
else
|
157
|
+
group[:permissions] = [discovered_rule]
|
158
|
+
groups << group
|
159
|
+
end
|
160
|
+
end
|
161
|
+
|
162
|
+
groups
|
163
|
+
end
|
164
|
+
|
13
165
|
end
|
14
166
|
|
15
167
|
end
|