sml-rubber 0.9.1
Sign up to get free protection for your applications and to get access to all the features.
- data/COPYING +339 -0
- data/README +6 -0
- data/TODO +9 -0
- data/VERSION +1 -0
- data/generators/vulcanize/USAGE +6 -0
- data/generators/vulcanize/templates/apache/config/rubber/deploy-apache.rb +45 -0
- data/generators/vulcanize/templates/apache/config/rubber/role/web/deflate.conf +10 -0
- data/generators/vulcanize/templates/apache/config/rubber/role/web/expires.conf +9 -0
- data/generators/vulcanize/templates/apache/config/rubber/role/web/headers.conf +6 -0
- data/generators/vulcanize/templates/apache/config/rubber/role/web/setenvif.conf +52 -0
- data/generators/vulcanize/templates/apache/config/rubber/role/web/vhost.conf +27 -0
- data/generators/vulcanize/templates/apache/config/rubber/rubber-apache.yml +15 -0
- data/generators/vulcanize/templates/apache/templates.yml +1 -0
- data/generators/vulcanize/templates/base/Capfile +17 -0
- data/generators/vulcanize/templates/base/config/deploy.rb +77 -0
- data/generators/vulcanize/templates/base/config/rubber/common/crontab +16 -0
- data/generators/vulcanize/templates/base/config/rubber/common/profile.rc +9 -0
- data/generators/vulcanize/templates/base/config/rubber/deploy-setup.rb +56 -0
- data/generators/vulcanize/templates/base/config/rubber/rubber.yml +221 -0
- data/generators/vulcanize/templates/base/lib/tasks/rubber.rake +18 -0
- data/generators/vulcanize/templates/base/script/cron-rake +18 -0
- data/generators/vulcanize/templates/base/script/cron-runner +18 -0
- data/generators/vulcanize/templates/base/script/cron-sh +67 -0
- data/generators/vulcanize/templates/base/templates.yml +1 -0
- data/generators/vulcanize/templates/complete_mysql/templates.yml +6 -0
- data/generators/vulcanize/templates/complete_passenger_mysql/templates.yml +8 -0
- data/generators/vulcanize/templates/cruise/config/rubber/deploy-cruise.rb +74 -0
- data/generators/vulcanize/templates/cruise/config/rubber/role/cruise/cruise +40 -0
- data/generators/vulcanize/templates/cruise/config/rubber/role/cruise/my.cnf +165 -0
- data/generators/vulcanize/templates/cruise/config/rubber/role/cruise/production.rb +8 -0
- data/generators/vulcanize/templates/cruise/config/rubber/role/cruise/site_config.rb +76 -0
- data/generators/vulcanize/templates/cruise/config/rubber/role/web_tools/cruise-nginx.conf +11 -0
- data/generators/vulcanize/templates/cruise/config/rubber/rubber-cruise.yml +18 -0
- data/generators/vulcanize/templates/cruise/templates.yml +1 -0
- data/generators/vulcanize/templates/haproxy/config/rubber/deploy-haproxy.rb +45 -0
- data/generators/vulcanize/templates/haproxy/config/rubber/role/haproxy/haproxy-default.conf +8 -0
- data/generators/vulcanize/templates/haproxy/config/rubber/role/haproxy/haproxy.conf +44 -0
- data/generators/vulcanize/templates/haproxy/config/rubber/role/haproxy/monit-haproxy.conf +9 -0
- data/generators/vulcanize/templates/haproxy/config/rubber/role/haproxy/syslog-haproxy.conf +6 -0
- data/generators/vulcanize/templates/haproxy/config/rubber/role/haproxy/syslogd-default.conf +17 -0
- data/generators/vulcanize/templates/haproxy/config/rubber/role/web_tools/haproxy-nginx.conf +10 -0
- data/generators/vulcanize/templates/haproxy/config/rubber/rubber-haproxy.yml +12 -0
- data/generators/vulcanize/templates/haproxy/templates.yml +1 -0
- data/generators/vulcanize/templates/memcached/config/memcached.yml +28 -0
- data/generators/vulcanize/templates/memcached/config/rubber/common/memcached.yml +14 -0
- data/generators/vulcanize/templates/memcached/config/rubber/role/memcached/memcached.conf +52 -0
- data/generators/vulcanize/templates/memcached/config/rubber/role/memcached/memcached_munin_plugin +249 -0
- data/generators/vulcanize/templates/memcached/config/rubber/rubber-memcached.yml +7 -0
- data/generators/vulcanize/templates/memcached/templates.yml +1 -0
- data/generators/vulcanize/templates/minimal_mysql/templates.yml +7 -0
- data/generators/vulcanize/templates/minimal_nodb/templates.yml +6 -0
- data/generators/vulcanize/templates/mongrel/config/rubber/deploy-mongrel.rb +75 -0
- data/generators/vulcanize/templates/mongrel/config/rubber/role/app/mongrel_cluster.yml +12 -0
- data/generators/vulcanize/templates/mongrel/config/rubber/role/app/monit-mongrel.conf +20 -0
- data/generators/vulcanize/templates/mongrel/config/rubber/rubber-mongrel.yml +9 -0
- data/generators/vulcanize/templates/mongrel/templates.yml +1 -0
- data/generators/vulcanize/templates/monit/config/rubber/common/monit-default.conf +15 -0
- data/generators/vulcanize/templates/monit/config/rubber/common/monit.conf +251 -0
- data/generators/vulcanize/templates/monit/config/rubber/deploy-monit.rb +32 -0
- data/generators/vulcanize/templates/monit/config/rubber/role/web_tools/monit-admin-nginx.conf +10 -0
- data/generators/vulcanize/templates/monit/config/rubber/rubber-monit.yml +6 -0
- data/generators/vulcanize/templates/monit/templates.yml +1 -0
- data/generators/vulcanize/templates/munin/config/rubber/common/monit-munin.conf +8 -0
- data/generators/vulcanize/templates/munin/config/rubber/common/munin-node.conf +48 -0
- data/generators/vulcanize/templates/munin/config/rubber/deploy-munin.rb +30 -0
- data/generators/vulcanize/templates/munin/config/rubber/role/web_tools/munin-nginx.conf +8 -0
- data/generators/vulcanize/templates/munin/config/rubber/role/web_tools/munin-plugins.conf +31 -0
- data/generators/vulcanize/templates/munin/config/rubber/role/web_tools/munin.conf +80 -0
- data/generators/vulcanize/templates/munin/config/rubber/rubber-munin.yml +8 -0
- data/generators/vulcanize/templates/munin/script/munin/example_mysql_query.rb +57 -0
- data/generators/vulcanize/templates/munin/script/munin/example_simple.rb +24 -0
- data/generators/vulcanize/templates/munin/templates.yml +1 -0
- data/generators/vulcanize/templates/mysql/config/rubber/common/database.yml +11 -0
- data/generators/vulcanize/templates/mysql/config/rubber/deploy-mysql.rb +178 -0
- data/generators/vulcanize/templates/mysql/config/rubber/role/db/crontab +14 -0
- data/generators/vulcanize/templates/mysql/config/rubber/role/db/monit-mysql.cnf +10 -0
- data/generators/vulcanize/templates/mysql/config/rubber/role/db/my.cnf +167 -0
- data/generators/vulcanize/templates/mysql/config/rubber/role/mysql_slave/mysql_slave_munin_plugin +51 -0
- data/generators/vulcanize/templates/mysql/config/rubber/rubber-mysql.yml +38 -0
- data/generators/vulcanize/templates/mysql/templates.yml +1 -0
- data/generators/vulcanize/templates/mysql_cluster/config/rubber/common/mysql_cluster_migrations.rb +13 -0
- data/generators/vulcanize/templates/mysql_cluster/config/rubber/deploy-mysql_cluster.rb +173 -0
- data/generators/vulcanize/templates/mysql_cluster/config/rubber/role/mysql_data/my.cnf +15 -0
- data/generators/vulcanize/templates/mysql_cluster/config/rubber/role/mysql_mgm/ndb_mgmd.cnf +39 -0
- data/generators/vulcanize/templates/mysql_cluster/config/rubber/role/mysql_sql/monit-mysql_cluster_sql.cnf +10 -0
- data/generators/vulcanize/templates/mysql_cluster/config/rubber/role/mysql_sql/my.cnf +23 -0
- data/generators/vulcanize/templates/mysql_cluster/config/rubber/rubber-mysql_cluster.yml +32 -0
- data/generators/vulcanize/templates/mysql_cluster/templates.yml +1 -0
- data/generators/vulcanize/templates/mysql_proxy/config/rubber/common/database.yml +16 -0
- data/generators/vulcanize/templates/mysql_proxy/config/rubber/common/monit-mysql_proxy.cnf +10 -0
- data/generators/vulcanize/templates/mysql_proxy/config/rubber/common/mysql-proxy +153 -0
- data/generators/vulcanize/templates/mysql_proxy/config/rubber/common/mysql-proxy.conf +10 -0
- data/generators/vulcanize/templates/mysql_proxy/config/rubber/common/mysql-proxy.lua +5 -0
- data/generators/vulcanize/templates/mysql_proxy/config/rubber/deploy-mysql_proxy.rb +52 -0
- data/generators/vulcanize/templates/mysql_proxy/config/rubber/rubber-mysql_proxy.yml +11 -0
- data/generators/vulcanize/templates/mysql_proxy/templates.yml +1 -0
- data/generators/vulcanize/templates/nginx/config/rubber/deploy-nginx.rb +45 -0
- data/generators/vulcanize/templates/nginx/config/rubber/role/web/crontab +9 -0
- data/generators/vulcanize/templates/nginx/config/rubber/role/web/monit-nginx.conf +9 -0
- data/generators/vulcanize/templates/nginx/config/rubber/role/web/nginx.conf +133 -0
- data/generators/vulcanize/templates/nginx/config/rubber/role/web_tools/index.html +23 -0
- data/generators/vulcanize/templates/nginx/config/rubber/role/web_tools/nginx-tools.conf +74 -0
- data/generators/vulcanize/templates/nginx/config/rubber/rubber-nginx.yml +33 -0
- data/generators/vulcanize/templates/nginx/templates.yml +1 -0
- data/generators/vulcanize/templates/passenger/config/rubber/deploy-passenger.rb +27 -0
- data/generators/vulcanize/templates/passenger/config/rubber/role/web/passenger.conf +8 -0
- data/generators/vulcanize/templates/passenger/config/rubber/rubber-passenger.yml +4 -0
- data/generators/vulcanize/templates/passenger/templates.yml +1 -0
- data/generators/vulcanize/templates/sphinx/config/rubber/common/sphinx.yml +46 -0
- data/generators/vulcanize/templates/sphinx/config/rubber/deploy-sphinx.rb +112 -0
- data/generators/vulcanize/templates/sphinx/config/rubber/role/sphinx/crontab +11 -0
- data/generators/vulcanize/templates/sphinx/config/rubber/role/sphinx/monit-sphinx.conf +10 -0
- data/generators/vulcanize/templates/sphinx/config/rubber/rubber-sphinx.yml +6 -0
- data/generators/vulcanize/templates/sphinx/templates.yml +1 -0
- data/generators/vulcanize/vulcanize_generator.rb +67 -0
- data/lib/capistrano/hostcmd.rb +12 -0
- data/lib/rubber.rb +37 -0
- data/lib/rubber/capistrano.rb +1 -0
- data/lib/rubber/cloud.rb +13 -0
- data/lib/rubber/cloud/aws.rb +261 -0
- data/lib/rubber/cloud/base.rb +16 -0
- data/lib/rubber/configuration.rb +47 -0
- data/lib/rubber/dns.rb +13 -0
- data/lib/rubber/dns/base.rb +69 -0
- data/lib/rubber/dns/dyndns.rb +63 -0
- data/lib/rubber/dns/nettica.rb +56 -0
- data/lib/rubber/dns/zerigo.rb +121 -0
- data/lib/rubber/environment.rb +161 -0
- data/lib/rubber/generator.rb +197 -0
- data/lib/rubber/instance.rb +113 -0
- data/lib/rubber/recipes/rubber.rb +88 -0
- data/lib/rubber/recipes/rubber/bundles.rb +28 -0
- data/lib/rubber/recipes/rubber/deploy.rb +66 -0
- data/lib/rubber/recipes/rubber/instances.rb +298 -0
- data/lib/rubber/recipes/rubber/security_groups.rb +149 -0
- data/lib/rubber/recipes/rubber/setup.rb +285 -0
- data/lib/rubber/recipes/rubber/static_ips.rb +107 -0
- data/lib/rubber/recipes/rubber/utils.rb +195 -0
- data/lib/rubber/recipes/rubber/volumes.rb +263 -0
- data/lib/rubber/tasks/rubber.rb +218 -0
- data/lib/rubber/util.rb +33 -0
- data/test/environment_test.rb +118 -0
- data/test/generator_test.rb +323 -0
- data/test/instance_test.rb +38 -0
- data/test/test_helper.rb +4 -0
- data/test/util_test.rb +16 -0
- metadata +246 -0
@@ -0,0 +1,107 @@
|
|
1
|
+
namespace :rubber do
|
2
|
+
|
3
|
+
|
4
|
+
desc <<-DESC
|
5
|
+
Sets up static IPs for the instances configured to have them
|
6
|
+
DESC
|
7
|
+
required_task :setup_static_ips do
|
8
|
+
rubber_cfg.instance.each do |ic|
|
9
|
+
env = rubber_cfg.environment.bind(ic.role_names, ic.name)
|
10
|
+
if env.use_static_ip
|
11
|
+
artifacts = rubber_cfg.instance.artifacts
|
12
|
+
ip = artifacts['static_ips'][ic.name] rescue nil
|
13
|
+
|
14
|
+
# first allocate the static ip if we don't have a global record (artifacts) for it
|
15
|
+
if ! ip
|
16
|
+
logger.info "Allocating static IP for #{ic.full_name}"
|
17
|
+
ip = allocate_static_ip()
|
18
|
+
artifacts['static_ips'][ic.name] = ip
|
19
|
+
rubber_cfg.instance.save
|
20
|
+
end
|
21
|
+
|
22
|
+
# then, associate it if we don't have a record (on instance) of association
|
23
|
+
if ! ic.static_ip
|
24
|
+
logger.info "Associating static ip #{ip} with #{ic.full_name}"
|
25
|
+
associate_static_ip(ip, ic.instance_id)
|
26
|
+
|
27
|
+
instance = cloud.describe_instances(:instance_id => ic.instance_id).first
|
28
|
+
ic.external_host = instance[:external_host]
|
29
|
+
ic.internal_host = instance[:internal_host]
|
30
|
+
ic.external_ip = ip
|
31
|
+
ic.static_ip = ip
|
32
|
+
rubber_cfg.instance.save()
|
33
|
+
|
34
|
+
logger.info "Waiting for static ip to associate"
|
35
|
+
while true do
|
36
|
+
task :_wait_for_static_ip, :hosts => ip do
|
37
|
+
run "echo"
|
38
|
+
end
|
39
|
+
begin
|
40
|
+
_wait_for_static_ip
|
41
|
+
rescue ConnectionError
|
42
|
+
sleep 2
|
43
|
+
logger.info "Failed to connect to static ip #{ip}, retrying"
|
44
|
+
retry
|
45
|
+
end
|
46
|
+
break
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
desc <<-DESC
|
55
|
+
Shows the configured static IPs
|
56
|
+
DESC
|
57
|
+
required_task :describe_static_ips do
|
58
|
+
results = []
|
59
|
+
format = "%-10s %-15s %-30s"
|
60
|
+
results << format % %w[InstanceID IP Alias]
|
61
|
+
|
62
|
+
ips = cloud.describe_addresses()
|
63
|
+
ips.each do |ip_data|
|
64
|
+
instance_id = ip_data[:instance_id]
|
65
|
+
ip = ip_data[:ip]
|
66
|
+
|
67
|
+
local_alias = find_alias(ip, instance_id, false)
|
68
|
+
|
69
|
+
results << format % [instance_id || "Unassigned", ip, local_alias || "Unknown"]
|
70
|
+
end
|
71
|
+
|
72
|
+
results.each {|r| logger.info r}
|
73
|
+
end
|
74
|
+
|
75
|
+
desc <<-DESC
|
76
|
+
Deallocates the given static ip
|
77
|
+
DESC
|
78
|
+
required_task :destroy_static_ip do
|
79
|
+
ip = get_env('IP', "Static IP (run rubber:describe_static_ips for a list)", true)
|
80
|
+
destroy_static_ip(ip)
|
81
|
+
end
|
82
|
+
|
83
|
+
def allocate_static_ip()
|
84
|
+
ip = cloud.create_static_ip()
|
85
|
+
fatal "Failed to allocate static ip" if ip.nil?
|
86
|
+
return ip
|
87
|
+
end
|
88
|
+
|
89
|
+
def associate_static_ip(ip, instance_id)
|
90
|
+
success = cloud.attach_static_ip(ip, instance_id)
|
91
|
+
fatal "Failed to associate static ip" unless success
|
92
|
+
end
|
93
|
+
|
94
|
+
def destroy_static_ip(ip)
|
95
|
+
logger.info "Releasing static ip: #{ip}"
|
96
|
+
cloud.destroy_static_ip(ip) rescue logger.info("IP was not attached")
|
97
|
+
|
98
|
+
logger.info "Removing ip #{ip} from rubber instances file"
|
99
|
+
artifacts = rubber_cfg.instance.artifacts
|
100
|
+
artifacts['static_ips'].delete_if {|k,v| v == ip}
|
101
|
+
rubber_cfg.instance.each do |ic|
|
102
|
+
ic.static_ip = nil if ic.static_ip == ip
|
103
|
+
end
|
104
|
+
rubber_cfg.instance.save
|
105
|
+
end
|
106
|
+
|
107
|
+
end
|
@@ -0,0 +1,195 @@
|
|
1
|
+
namespace :rubber do
|
2
|
+
|
3
|
+
desc <<-DESC
|
4
|
+
Convenience task for creating a staging instance for the given RUBBER_ENV/RAILS_ENV.
|
5
|
+
By default this task assigns all known roles when creating the instance,
|
6
|
+
but you can specify a different default in rubber.yml:staging_roles
|
7
|
+
At the end, the instance will be up and running
|
8
|
+
e.g. RUBBER_ENV=matt cap create_staging
|
9
|
+
DESC
|
10
|
+
required_task :create_staging do
|
11
|
+
if rubber_cfg.instance.size > 0
|
12
|
+
value = Capistrano::CLI.ui.ask("The #{RUBBER_ENV} environment already has instances, Are you SURE you want to create a staging instance that may interact with them [y/N]?: ")
|
13
|
+
fatal("Exiting", 0) if value !~ /^y/
|
14
|
+
end
|
15
|
+
instance_alias = ENV['ALIAS'] = rubber.get_env("ALIAS", "Hostname to use for staging instance", true, RUBBER_ENV)
|
16
|
+
default_roles = rubber_cfg.environment.bind().staging_roles || "*"
|
17
|
+
roles = ENV['ROLES'] = rubber.get_env("ROLES", "Roles to use for staging instance", true, default_roles)
|
18
|
+
if rubber_cfg.instance[instance_alias]
|
19
|
+
logger.info "Instance already exists, skipping to bootstrap"
|
20
|
+
else
|
21
|
+
rubber.create
|
22
|
+
end
|
23
|
+
rubber.bootstrap
|
24
|
+
# stop everything in case we have a bundled instance with monit, etc starting at boot
|
25
|
+
deploy.stop rescue nil
|
26
|
+
# bootstrap_db does setup/update_code, so since release directory
|
27
|
+
# variable gets reused by cap, we have to just do the symlink here - doing
|
28
|
+
# a update again will fail
|
29
|
+
deploy.symlink
|
30
|
+
deploy.migrate
|
31
|
+
deploy.start
|
32
|
+
end
|
33
|
+
|
34
|
+
desc <<-DESC
|
35
|
+
Destroy the staging instance for the given RUBBER_ENV.
|
36
|
+
DESC
|
37
|
+
task :destroy_staging do
|
38
|
+
ENV['ALIAS'] = rubber.get_env("ALIAS", "Hostname of staging instance to be destroyed", true, RUBBER_ENV)
|
39
|
+
rubber.destroy
|
40
|
+
end
|
41
|
+
|
42
|
+
desc <<-DESC
|
43
|
+
Live tail of rails log files for all machines
|
44
|
+
By default tails the rails logs for the current RUBBER_ENV, but one can
|
45
|
+
set FILE=/path/file.*.glob to tails a different set
|
46
|
+
DESC
|
47
|
+
task :tail_logs, :roles => :app do
|
48
|
+
log_file_glob = rubber.get_env("FILE", "Log files to tail", true, "#{current_path}/log/#{RUBBER_ENV}*.log")
|
49
|
+
run "tail -qf #{log_file_glob}" do |channel, stream, data|
|
50
|
+
puts # for an extra line break before the host name
|
51
|
+
puts data
|
52
|
+
break if stream == :err
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
# Use instead of task to define a capistrano task that runs serially instead of in parallel
|
57
|
+
# The :groups option specifies how many groups to partition the servers into so that we can
|
58
|
+
# do the task for N (= total/groups) servers at a time. When multiple roles are supplied,
|
59
|
+
# this tries to be intelligent and slice up each role independently, but runs the slices together
|
60
|
+
# so that things don't take too long, e.g. adding an :api role to some :app servers, when restarting
|
61
|
+
# you don't want to do the api first, then the others as this would take a long time, so instead
|
62
|
+
# it does some :api and some :app, then some more of each
|
63
|
+
#
|
64
|
+
def serial_task(ns, name, options = {}, &block)
|
65
|
+
# first figure out server names for the passed in roles - when no roles
|
66
|
+
# are passed in, use all servers
|
67
|
+
serial_roles = Array(options[:roles])
|
68
|
+
servers = {}
|
69
|
+
if serial_roles.empty?
|
70
|
+
all_servers = []
|
71
|
+
self.roles.each do |rolename, serverdefs|
|
72
|
+
all_servers += serverdefs.collect {|server| server.host}
|
73
|
+
end
|
74
|
+
servers[:_serial_all] = all_servers.uniq.sort
|
75
|
+
else
|
76
|
+
# get servers for each role
|
77
|
+
self.roles.each do |rolename, serverdefs|
|
78
|
+
if serial_roles.include?(rolename)
|
79
|
+
servers[rolename] ||= []
|
80
|
+
servers[rolename] += serverdefs.collect {|server| server.host}
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
# Remove duplication of servers - roles which come first in list
|
85
|
+
# have precedence, so the servers show up in that group
|
86
|
+
serial_roles.each_with_index do |rolename, i|
|
87
|
+
servers[rolename] ||= []
|
88
|
+
serial_roles[i+1..-1].each do |r|
|
89
|
+
servers[r] -= servers[rolename]
|
90
|
+
end
|
91
|
+
servers[rolename] = servers[rolename].uniq.sort
|
92
|
+
end
|
93
|
+
end
|
94
|
+
|
95
|
+
# group each role's servers into slices, but combine slices across roles
|
96
|
+
slices = []
|
97
|
+
servers.each do |rolename, svrs|
|
98
|
+
next if svrs.size == 0
|
99
|
+
# figure out size of each slice by deviding server count by # of groups
|
100
|
+
slice_size = svrs.size / (options.delete(:groups) || 2)
|
101
|
+
slice_size = 1 if slice_size == 0
|
102
|
+
slice_idx = 0
|
103
|
+
svrs.each_slice(slice_size) do |srv_slice|
|
104
|
+
slices[slice_idx] ||= []
|
105
|
+
slices[slice_idx] += srv_slice
|
106
|
+
slice_idx += 1
|
107
|
+
end
|
108
|
+
end
|
109
|
+
# for each slice, define a new task specific to the hosts in that slice
|
110
|
+
task_syms = []
|
111
|
+
slices.each do |server_group|
|
112
|
+
servers = server_group.map{|s| s.gsub(/\..*/, '')}.join("_")
|
113
|
+
task_sym = "_serial_task_#{name.to_s}_#{servers}".to_sym
|
114
|
+
task_syms << task_sym
|
115
|
+
ns.task task_sym, options.merge(:hosts => server_group), &block
|
116
|
+
end
|
117
|
+
|
118
|
+
# create the top level task that calls all the serial ones
|
119
|
+
ns.task name, options do
|
120
|
+
task_syms.each do |t|
|
121
|
+
ns.send t
|
122
|
+
end
|
123
|
+
end
|
124
|
+
end
|
125
|
+
|
126
|
+
def find_alias(ip, instance_id, do_connect=true)
|
127
|
+
if instance_id
|
128
|
+
instance = rubber_cfg.instance.find {|i| i.instance_id == instance_id }
|
129
|
+
local_alias = instance.full_name if instance
|
130
|
+
end
|
131
|
+
local_alias ||= File.read("/etc/hosts").grep(/#{ip}/).first.split[1] rescue nil
|
132
|
+
if ! local_alias && do_connect
|
133
|
+
task :_get_ip, :hosts => ip do
|
134
|
+
local_alias = "* " + capture("hostname").strip
|
135
|
+
end
|
136
|
+
_get_ip rescue ConnectionError
|
137
|
+
end
|
138
|
+
return local_alias
|
139
|
+
end
|
140
|
+
|
141
|
+
def prepare_script(name, contents)
|
142
|
+
script = "/tmp/#{name}"
|
143
|
+
# this lets us abort a script if a command in the middle of it errors out
|
144
|
+
env = rubber_cfg.environment.bind()
|
145
|
+
contents = "#{env.stop_on_error_cmd}\n#{contents}" if env.stop_on_error_cmd
|
146
|
+
put(contents, script)
|
147
|
+
return script
|
148
|
+
end
|
149
|
+
|
150
|
+
def run_script(name, contents)
|
151
|
+
script = prepare_script(name, contents)
|
152
|
+
run "sh #{script}"
|
153
|
+
end
|
154
|
+
|
155
|
+
def sudo_script(name, contents)
|
156
|
+
script = prepare_script(name, contents)
|
157
|
+
sudo "sh #{script}"
|
158
|
+
end
|
159
|
+
|
160
|
+
def get_env(name, desc, required=false, default=nil)
|
161
|
+
value = ENV.delete(name)
|
162
|
+
msg = "#{desc}"
|
163
|
+
msg << " [#{default}]" if default
|
164
|
+
msg << ": "
|
165
|
+
value = Capistrano::CLI.ui.ask(msg) unless value
|
166
|
+
value = value.size == 0 ? default : value
|
167
|
+
fatal "#{name} is required, pass using environment or enter at prompt" if required && ! value
|
168
|
+
return value
|
169
|
+
end
|
170
|
+
|
171
|
+
def fatal(msg, code=1)
|
172
|
+
logger.info msg
|
173
|
+
exit code
|
174
|
+
end
|
175
|
+
|
176
|
+
# Returns a map of "hostvar_<hostname>" => value for the given config value for each instance host
|
177
|
+
# This is used to run capistrano tasks scoped to the correct role/host so that a config value
|
178
|
+
# specific to a role/host will only be used for that role/host, e.g. the list of packages to
|
179
|
+
# be installed.
|
180
|
+
def get_host_options(cfg_name, &block)
|
181
|
+
opts = {}
|
182
|
+
rubber_cfg.instance.each do | ic|
|
183
|
+
env = rubber_cfg.environment.bind(ic.role_names, ic.name)
|
184
|
+
cfg_value = env[cfg_name]
|
185
|
+
if cfg_value
|
186
|
+
if block
|
187
|
+
cfg_value = block.call(cfg_value)
|
188
|
+
end
|
189
|
+
opts["hostvar_#{ic.full_name}"] = cfg_value if cfg_value && cfg_value.strip.size > 0
|
190
|
+
end
|
191
|
+
end
|
192
|
+
return opts
|
193
|
+
end
|
194
|
+
|
195
|
+
end
|
@@ -0,0 +1,263 @@
|
|
1
|
+
namespace :rubber do
|
2
|
+
|
3
|
+
desc <<-DESC
|
4
|
+
Sets up persistent volumes in the cloud
|
5
|
+
All volumes defined in rubber.yml will be created if neccessary, and attached/mounted on their associated instances
|
6
|
+
DESC
|
7
|
+
required_task :setup_volumes do
|
8
|
+
rubber_cfg.instance.filtered.each do |ic|
|
9
|
+
env = rubber_cfg.environment.bind(ic.role_names, ic.name)
|
10
|
+
created_vols = []
|
11
|
+
vol_specs = env.volumes || []
|
12
|
+
vol_specs.each do |vol_spec|
|
13
|
+
created_vols << setup_volume(ic, vol_spec)
|
14
|
+
end
|
15
|
+
created_vols.compact!
|
16
|
+
|
17
|
+
created_parts = []
|
18
|
+
partition_specs = env.local_volumes || []
|
19
|
+
partition_specs.each do |partition_spec|
|
20
|
+
created_parts << setup_partition(ic, partition_spec)
|
21
|
+
end
|
22
|
+
created_parts.compact!
|
23
|
+
zero_partitions(ic, created_parts)
|
24
|
+
created_vols += created_parts
|
25
|
+
|
26
|
+
created_vols = created_vols.compact.uniq
|
27
|
+
raid_specs = env.raid_volumes || []
|
28
|
+
raid_specs.each do |raid_spec|
|
29
|
+
# we want to format if we created the ec2 volumes, or if we don't have any
|
30
|
+
# ec2 volumes and are just creating raid array from ephemeral stores
|
31
|
+
format = raid_spec['source_devices'].all? {|dev| created_vols.include?(dev)}
|
32
|
+
setup_raid_volume(ic, raid_spec, format)
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
desc <<-DESC
|
38
|
+
Shows the configured persistent volumes
|
39
|
+
DESC
|
40
|
+
required_task :describe_volumes do
|
41
|
+
results = []
|
42
|
+
format = "%-20s %-15s %-15s %-20s"
|
43
|
+
results << format % %w[Id Status Attached Instance]
|
44
|
+
|
45
|
+
volumes = cloud.describe_volumes()
|
46
|
+
volumes.each do |volume|
|
47
|
+
results << format % [volume[:id], volume[:status], volume[:attachment_status], volume[:attachment_instance_id]]
|
48
|
+
end
|
49
|
+
|
50
|
+
results.each {|r| logger.info r}
|
51
|
+
end
|
52
|
+
|
53
|
+
desc <<-DESC
|
54
|
+
Shows the configured persistent volumes
|
55
|
+
DESC
|
56
|
+
required_task :destroy_volume do
|
57
|
+
volume_id = get_env('VOLUME_ID', "Volume ID", true)
|
58
|
+
destroy_volume(volume_id)
|
59
|
+
end
|
60
|
+
|
61
|
+
def create_volume(size, zone)
|
62
|
+
volumeId = cloud.create_volume(size.to_s, zone)
|
63
|
+
fatal "Failed to create volume" if volumeId.nil?
|
64
|
+
return volumeId
|
65
|
+
end
|
66
|
+
|
67
|
+
def attach_volume(vol_id, instance_id, device)
|
68
|
+
cloud.attach_volume(vol_id, instance_id, device)
|
69
|
+
end
|
70
|
+
|
71
|
+
def setup_volume(ic, vol_spec)
|
72
|
+
created = nil
|
73
|
+
key = "#{ic.name}_#{vol_spec['device']}"
|
74
|
+
artifacts = rubber_cfg.instance.artifacts
|
75
|
+
vol_id = artifacts['volumes'][key]
|
76
|
+
|
77
|
+
# first create the volume if we don't have a global record (artifacts) for it
|
78
|
+
if ! vol_id
|
79
|
+
logger.info "Creating volume for #{ic.full_name}:#{vol_spec['device']}"
|
80
|
+
vol_id = create_volume(vol_spec['size'], vol_spec['zone'])
|
81
|
+
artifacts['volumes'][key] = vol_id
|
82
|
+
rubber_cfg.instance.save
|
83
|
+
created = vol_spec['device']
|
84
|
+
end
|
85
|
+
|
86
|
+
# then, attach it if we don't have a record (on instance) of attachment
|
87
|
+
ic.volumes ||= []
|
88
|
+
if ! ic.volumes.include?(vol_id)
|
89
|
+
logger.info "Attaching volume #{vol_id} to #{ic.full_name}:#{vol_spec['device']}"
|
90
|
+
attach_volume(vol_id, ic.instance_id, vol_spec['device'])
|
91
|
+
ic.volumes << vol_id
|
92
|
+
rubber_cfg.instance.save
|
93
|
+
|
94
|
+
print "Waiting for volume to attach"
|
95
|
+
while true do
|
96
|
+
print "."
|
97
|
+
sleep 2
|
98
|
+
volume = cloud.describe_volumes(vol_id).first
|
99
|
+
break if volume[:status] == "in-use"
|
100
|
+
end
|
101
|
+
print "\n"
|
102
|
+
|
103
|
+
# we don't mount/format at this time if we are doing a RAID array
|
104
|
+
if vol_spec['mount'] && vol_spec['filesystem']
|
105
|
+
# then format/mount/etc if we don't have an entry in hosts file
|
106
|
+
task :_setup_volume, :hosts => ic.external_ip do
|
107
|
+
rubber.run_script 'setup_volume', <<-ENDSCRIPT
|
108
|
+
if ! grep -q '#{vol_spec['mount']}' /etc/fstab; then
|
109
|
+
if mount | grep -q '#{vol_spec['mount']}'; then
|
110
|
+
umount '#{vol_spec['mount']}'
|
111
|
+
fi
|
112
|
+
mv /etc/fstab /etc/fstab.bak
|
113
|
+
cat /etc/fstab.bak | grep -v '#{vol_spec['mount']}' > /etc/fstab
|
114
|
+
echo '#{vol_spec['device']} #{vol_spec['mount']} #{vol_spec['filesystem']} noatime 0 0 # rubber volume #{vol_id}' >> /etc/fstab
|
115
|
+
|
116
|
+
#{('yes | mkfs -t ' + vol_spec['filesystem'] + ' ' + vol_spec['device']) if created}
|
117
|
+
mkdir -p '#{vol_spec['mount']}'
|
118
|
+
mount '#{vol_spec['mount']}'
|
119
|
+
fi
|
120
|
+
ENDSCRIPT
|
121
|
+
end
|
122
|
+
_setup_volume
|
123
|
+
end
|
124
|
+
|
125
|
+
end
|
126
|
+
return created
|
127
|
+
end
|
128
|
+
|
129
|
+
def setup_partition(ic, partition_spec)
|
130
|
+
created = nil
|
131
|
+
part_id = partition_spec['partition_device']
|
132
|
+
|
133
|
+
# Only create the partition if we haven't already done so
|
134
|
+
ic.partitions ||= []
|
135
|
+
if ! ic.partitions.include?(part_id)
|
136
|
+
# then format/mount/etc if we don't have an entry in hosts file
|
137
|
+
task :_setup_partition, :hosts => ic.external_ip do
|
138
|
+
rubber.run_script 'setup_partition', <<-ENDSCRIPT
|
139
|
+
if ! fdisk -l 2>&1 | grep -q '#{partition_spec['partition_device']}'; then
|
140
|
+
if grep -q '#{partition_spec['disk_device']}\\b' /etc/fstab; then
|
141
|
+
umount #{partition_spec['disk_device']}
|
142
|
+
mv /etc/fstab /etc/fstab.bak
|
143
|
+
cat /etc/fstab.bak | grep -v '#{partition_spec['disk_device']}\\b' > /etc/fstab
|
144
|
+
fi
|
145
|
+
|
146
|
+
# partition format is: Start (blank is first available),Size(MB due to -uM),Id(83=linux,82=swap,etc),Bootable
|
147
|
+
echo "#{partition_spec['start']},#{partition_spec['size']},#{partition_spec['type']},#{partition_spec['bootable']}" | sfdisk -L -uM #{partition_spec['disk_device']}
|
148
|
+
fi
|
149
|
+
ENDSCRIPT
|
150
|
+
end
|
151
|
+
_setup_partition
|
152
|
+
|
153
|
+
ic.partitions << part_id
|
154
|
+
rubber_cfg.instance.save
|
155
|
+
created = part_id
|
156
|
+
|
157
|
+
end
|
158
|
+
|
159
|
+
return created
|
160
|
+
end
|
161
|
+
|
162
|
+
def zero_partitions(ic, partitions)
|
163
|
+
env = rubber_cfg.environment.bind(ic.role_names, ic.name)
|
164
|
+
|
165
|
+
# don't zero out the ones that we weren't told to
|
166
|
+
partitions.delete_if do |part|
|
167
|
+
spec = env.local_volumes.find {|s| s['partition_device'] == part}
|
168
|
+
! spec['zero']
|
169
|
+
end
|
170
|
+
|
171
|
+
if partitions.size > 0
|
172
|
+
zero_script = ""
|
173
|
+
partitions.each do |partition|
|
174
|
+
zero_script << "nohup dd if=/dev/zero bs=1M of=#{partition} &> /dev/null &\n"
|
175
|
+
end
|
176
|
+
# then format/mount/etc if we don't have an entry in hosts file
|
177
|
+
task :_zero_partitions, :hosts => ic.external_ip do
|
178
|
+
rubber.run_script 'zero_partitions', <<-ENDSCRIPT
|
179
|
+
# zero out parition for performance (see amazon DevGuide)
|
180
|
+
echo "Zeroing out raid partitions to improve performance, this way take a while"
|
181
|
+
#{zero_script}
|
182
|
+
|
183
|
+
echo "Waiting for partitions to zero out"
|
184
|
+
while true; do
|
185
|
+
if ! ps ax | grep -q "[d]d.*/dev/zero"; then exit; fi
|
186
|
+
echo -n .
|
187
|
+
sleep 1
|
188
|
+
done
|
189
|
+
ENDSCRIPT
|
190
|
+
end
|
191
|
+
_zero_partitions
|
192
|
+
end
|
193
|
+
end
|
194
|
+
|
195
|
+
def setup_raid_volume(ic, raid_spec, create=false)
|
196
|
+
if create
|
197
|
+
mdadm_init = "mdadm --create #{raid_spec['device']} --level #{raid_spec['raid_level']} --raid-devices #{raid_spec['source_devices'].size} #{raid_spec['source_devices'].sort.join(' ')}"
|
198
|
+
else
|
199
|
+
mdadm_init = "mdadm --assemble #{raid_spec['device']} #{raid_spec['source_devices'].sort.join(' ')}"
|
200
|
+
end
|
201
|
+
|
202
|
+
task :_setup_raid_volume, :hosts => ic.external_ip do
|
203
|
+
rubber.run_script 'setup_raid_volume', <<-ENDSCRIPT
|
204
|
+
if ! grep -q '#{raid_spec['device']}' /etc/fstab; then
|
205
|
+
if mount | grep -q '#{raid_spec['mount']}'; then
|
206
|
+
umount '#{raid_spec['mount']}'
|
207
|
+
fi
|
208
|
+
mv /etc/fstab /etc/fstab.bak
|
209
|
+
cat /etc/fstab.bak | grep -v '#{raid_spec['mount']}' > /etc/fstab
|
210
|
+
echo '#{raid_spec['device']} #{raid_spec['mount']} #{raid_spec['filesystem']} noatime 0 0 # rubber raid volume' >> /etc/fstab
|
211
|
+
|
212
|
+
# seems to help devices initialize, otherwise mdadm fails because
|
213
|
+
# device not ready even though ec2 says the volume is attached
|
214
|
+
fdisk -l &> /dev/null
|
215
|
+
|
216
|
+
#{mdadm_init}
|
217
|
+
|
218
|
+
# set reconstruction speed
|
219
|
+
echo $((30*1024)) > /proc/sys/dev/raid/speed_limit_min
|
220
|
+
|
221
|
+
echo 'DEVICE /dev/hd*[0-9] /dev/sd*[0-9]' > /etc/mdadm/mdadm.conf
|
222
|
+
mdadm --detail --scan >> /etc/mdadm/mdadm.conf
|
223
|
+
|
224
|
+
mv /etc/rc.local /etc/rc.local.bak
|
225
|
+
echo "mdadm --assemble --scan" > /etc/rc.local
|
226
|
+
chmod +x /etc/rc.local
|
227
|
+
|
228
|
+
#{('yes | mkfs -t ' + raid_spec['filesystem'] + ' ' + raid_spec['device']) if create}
|
229
|
+
mkdir -p '#{raid_spec['mount']}'
|
230
|
+
mount '#{raid_spec['mount']}'
|
231
|
+
fi
|
232
|
+
ENDSCRIPT
|
233
|
+
end
|
234
|
+
_setup_raid_volume
|
235
|
+
end
|
236
|
+
|
237
|
+
def destroy_volume(volume_id)
|
238
|
+
|
239
|
+
logger.info "Detaching volume #{volume_id}"
|
240
|
+
cloud.detach_volume(volume_id) rescue logger.info("Volume was not attached")
|
241
|
+
|
242
|
+
print "Waiting for volume to detach"
|
243
|
+
while true do
|
244
|
+
print "."
|
245
|
+
sleep 2
|
246
|
+
volume = cloud.describe_volumes(volume_id).first
|
247
|
+
break if !volume || volume[:attachment_status] == "detached"
|
248
|
+
end
|
249
|
+
print "\n"
|
250
|
+
|
251
|
+
logger.info "Deleting volume #{volume_id}"
|
252
|
+
cloud.destroy_volume(volume_id)
|
253
|
+
|
254
|
+
logger.info "Removing volume #{volume_id} from rubber instances file"
|
255
|
+
artifacts = rubber_cfg.instance.artifacts
|
256
|
+
artifacts['volumes'].delete_if {|k,v| v == volume_id}
|
257
|
+
rubber_cfg.instance.each do |ic|
|
258
|
+
ic.volumes.delete(volume_id) if ic.volumes
|
259
|
+
end
|
260
|
+
rubber_cfg.instance.save
|
261
|
+
end
|
262
|
+
|
263
|
+
end
|