nirvdrum-rubber 1.1.7
Sign up to get free protection for your applications and to get access to all the features.
- data/CHANGELOG +146 -0
- data/COPYING +339 -0
- data/README +6 -0
- data/TODO +11 -0
- data/VERSION +1 -0
- data/bin/vulcanize +41 -0
- data/generators/vulcanize/USAGE +6 -0
- data/generators/vulcanize/templates/apache/config/rubber/deploy-apache.rb +51 -0
- data/generators/vulcanize/templates/apache/config/rubber/role/apache/deflate.conf +10 -0
- data/generators/vulcanize/templates/apache/config/rubber/role/apache/expires.conf +9 -0
- data/generators/vulcanize/templates/apache/config/rubber/role/apache/headers.conf +6 -0
- data/generators/vulcanize/templates/apache/config/rubber/role/apache/monit-apache.conf +8 -0
- data/generators/vulcanize/templates/apache/config/rubber/role/apache/ports.conf +5 -0
- data/generators/vulcanize/templates/apache/config/rubber/role/apache/setenvif.conf +52 -0
- data/generators/vulcanize/templates/apache/config/rubber/role/web_tools/tools-apache-vhost.conf +66 -0
- data/generators/vulcanize/templates/apache/config/rubber/role/web_tools/tools-apache.auth +7 -0
- data/generators/vulcanize/templates/apache/config/rubber/role/web_tools/tools-index.html +34 -0
- data/generators/vulcanize/templates/apache/config/rubber/rubber-apache.yml +6 -0
- data/generators/vulcanize/templates/apache/templates.yml +1 -0
- data/generators/vulcanize/templates/base/Capfile +14 -0
- data/generators/vulcanize/templates/base/config/deploy.rb +55 -0
- data/generators/vulcanize/templates/base/config/rubber/common/crontab +17 -0
- data/generators/vulcanize/templates/base/config/rubber/common/monit-postfix.conf +8 -0
- data/generators/vulcanize/templates/base/config/rubber/common/rubber.profile +14 -0
- data/generators/vulcanize/templates/base/config/rubber/deploy-setup.rb +84 -0
- data/generators/vulcanize/templates/base/config/rubber/rubber-dns.yml +79 -0
- data/generators/vulcanize/templates/base/config/rubber/rubber.yml +227 -0
- data/generators/vulcanize/templates/base/lib/tasks/rubber.rake +15 -0
- data/generators/vulcanize/templates/base/script/cron-rake +18 -0
- data/generators/vulcanize/templates/base/script/cron-runner +18 -0
- data/generators/vulcanize/templates/base/script/cron-sh +67 -0
- data/generators/vulcanize/templates/base/templates.yml +1 -0
- data/generators/vulcanize/templates/complete_mongrel_mysql/config/rubber/role/haproxy/haproxy-mongrel.conf +23 -0
- data/generators/vulcanize/templates/complete_mongrel_mysql/config/rubber/role/nginx/nginx-mongrel.conf +113 -0
- data/generators/vulcanize/templates/complete_mongrel_mysql/config/rubber/rubber-complete.yml +41 -0
- data/generators/vulcanize/templates/complete_mongrel_mysql/templates.yml +6 -0
- data/generators/vulcanize/templates/complete_passenger_mysql/config/rubber/role/haproxy/haproxy-passenger.conf +21 -0
- data/generators/vulcanize/templates/complete_passenger_mysql/config/rubber/rubber-complete.yml +40 -0
- data/generators/vulcanize/templates/complete_passenger_mysql/templates.yml +10 -0
- data/generators/vulcanize/templates/cruise/config/rubber/deploy-cruise.rb +72 -0
- data/generators/vulcanize/templates/cruise/config/rubber/role/cruise/cruise +40 -0
- data/generators/vulcanize/templates/cruise/config/rubber/role/cruise/my.cnf +165 -0
- data/generators/vulcanize/templates/cruise/config/rubber/role/cruise/production.rb +8 -0
- data/generators/vulcanize/templates/cruise/config/rubber/role/cruise/site_config.rb +76 -0
- data/generators/vulcanize/templates/cruise/config/rubber/role/web_tools/cruise-nginx.conf +11 -0
- data/generators/vulcanize/templates/cruise/config/rubber/rubber-cruise.yml +18 -0
- data/generators/vulcanize/templates/cruise/templates.yml +1 -0
- data/generators/vulcanize/templates/haproxy/config/rubber/deploy-haproxy.rb +45 -0
- data/generators/vulcanize/templates/haproxy/config/rubber/role/haproxy/haproxy-base.conf +26 -0
- data/generators/vulcanize/templates/haproxy/config/rubber/role/haproxy/haproxy-default.conf +8 -0
- data/generators/vulcanize/templates/haproxy/config/rubber/role/haproxy/monit-haproxy.conf +9 -0
- data/generators/vulcanize/templates/haproxy/config/rubber/role/haproxy/syslog-haproxy.conf +6 -0
- data/generators/vulcanize/templates/haproxy/config/rubber/role/haproxy/syslogd-default.conf +17 -0
- data/generators/vulcanize/templates/haproxy/config/rubber/role/web_tools/haproxy-nginx.conf +10 -0
- data/generators/vulcanize/templates/haproxy/config/rubber/rubber-haproxy.yml +7 -0
- data/generators/vulcanize/templates/haproxy/templates.yml +1 -0
- data/generators/vulcanize/templates/jetty/config/rubber/deploy-jetty.rb +59 -0
- data/generators/vulcanize/templates/jetty/config/rubber/role/jetty/jetty.sh +589 -0
- data/generators/vulcanize/templates/jetty/config/rubber/role/jetty/jetty.xml +199 -0
- data/generators/vulcanize/templates/jetty/config/rubber/role/jetty/monit-jetty.conf +9 -0
- data/generators/vulcanize/templates/jetty/config/rubber/rubber-jetty.yml +10 -0
- data/generators/vulcanize/templates/jetty/templates.yml +1 -0
- data/generators/vulcanize/templates/memcached/config/memcached.yml +28 -0
- data/generators/vulcanize/templates/memcached/config/rubber/common/memcached.yml +14 -0
- data/generators/vulcanize/templates/memcached/config/rubber/role/memcached/memcached.conf +52 -0
- data/generators/vulcanize/templates/memcached/config/rubber/role/memcached/memcached_munin_plugin +249 -0
- data/generators/vulcanize/templates/memcached/config/rubber/rubber-memcached.yml +7 -0
- data/generators/vulcanize/templates/memcached/templates.yml +1 -0
- data/generators/vulcanize/templates/minimal_mysql/templates.yml +7 -0
- data/generators/vulcanize/templates/minimal_nodb/templates.yml +6 -0
- data/generators/vulcanize/templates/mongrel/config/rubber/deploy-mongrel.rb +75 -0
- data/generators/vulcanize/templates/mongrel/config/rubber/role/mongrel/mongrel_cluster.yml +12 -0
- data/generators/vulcanize/templates/mongrel/config/rubber/role/mongrel/monit-mongrel.conf +20 -0
- data/generators/vulcanize/templates/mongrel/config/rubber/rubber-mongrel.yml +9 -0
- data/generators/vulcanize/templates/mongrel/templates.yml +1 -0
- data/generators/vulcanize/templates/monit/config/rubber/common/monit-default.conf +15 -0
- data/generators/vulcanize/templates/monit/config/rubber/common/monit.conf +251 -0
- data/generators/vulcanize/templates/monit/config/rubber/deploy-monit.rb +32 -0
- data/generators/vulcanize/templates/monit/config/rubber/role/web_tools/monit-admin-nginx.conf +10 -0
- data/generators/vulcanize/templates/monit/config/rubber/rubber-monit.yml +6 -0
- data/generators/vulcanize/templates/monit/templates.yml +1 -0
- data/generators/vulcanize/templates/munin/config/rubber/common/monit-munin.conf +8 -0
- data/generators/vulcanize/templates/munin/config/rubber/common/munin-node.conf +48 -0
- data/generators/vulcanize/templates/munin/config/rubber/common/munin-plugins.conf +9 -0
- data/generators/vulcanize/templates/munin/config/rubber/deploy-munin.rb +46 -0
- data/generators/vulcanize/templates/munin/config/rubber/role/web_tools/munin-nginx.conf +8 -0
- data/generators/vulcanize/templates/munin/config/rubber/role/web_tools/munin-plugins.conf +31 -0
- data/generators/vulcanize/templates/munin/config/rubber/role/web_tools/munin.conf +80 -0
- data/generators/vulcanize/templates/munin/config/rubber/rubber-munin.yml +8 -0
- data/generators/vulcanize/templates/munin/script/munin/example_mysql_query.rb +57 -0
- data/generators/vulcanize/templates/munin/script/munin/example_simple.rb +24 -0
- data/generators/vulcanize/templates/munin/templates.yml +1 -0
- data/generators/vulcanize/templates/mysql/config/rubber/common/database.yml +11 -0
- data/generators/vulcanize/templates/mysql/config/rubber/deploy-mysql.rb +156 -0
- data/generators/vulcanize/templates/mysql/config/rubber/role/db/crontab +14 -0
- data/generators/vulcanize/templates/mysql/config/rubber/role/db/monit-mysql.cnf +10 -0
- data/generators/vulcanize/templates/mysql/config/rubber/role/db/my.cnf +167 -0
- data/generators/vulcanize/templates/mysql/config/rubber/role/mysql_slave/mysql_slave_munin_plugin +51 -0
- data/generators/vulcanize/templates/mysql/config/rubber/rubber-mysql.yml +46 -0
- data/generators/vulcanize/templates/mysql/templates.yml +1 -0
- data/generators/vulcanize/templates/mysql_cluster/config/rubber/common/mysql_cluster_migrations.rb +13 -0
- data/generators/vulcanize/templates/mysql_cluster/config/rubber/deploy-mysql_cluster.rb +173 -0
- data/generators/vulcanize/templates/mysql_cluster/config/rubber/role/mysql_data/my.cnf +15 -0
- data/generators/vulcanize/templates/mysql_cluster/config/rubber/role/mysql_mgm/ndb_mgmd.cnf +39 -0
- data/generators/vulcanize/templates/mysql_cluster/config/rubber/role/mysql_sql/monit-mysql_cluster_sql.cnf +10 -0
- data/generators/vulcanize/templates/mysql_cluster/config/rubber/role/mysql_sql/my.cnf +23 -0
- data/generators/vulcanize/templates/mysql_cluster/config/rubber/rubber-mysql_cluster.yml +32 -0
- data/generators/vulcanize/templates/mysql_cluster/templates.yml +1 -0
- data/generators/vulcanize/templates/mysql_proxy/config/rubber/common/database.yml +16 -0
- data/generators/vulcanize/templates/mysql_proxy/config/rubber/common/monit-mysql_proxy.cnf +10 -0
- data/generators/vulcanize/templates/mysql_proxy/config/rubber/common/mysql-proxy +153 -0
- data/generators/vulcanize/templates/mysql_proxy/config/rubber/common/mysql-proxy.conf +10 -0
- data/generators/vulcanize/templates/mysql_proxy/config/rubber/common/mysql-proxy.lua +5 -0
- data/generators/vulcanize/templates/mysql_proxy/config/rubber/deploy-mysql_proxy.rb +52 -0
- data/generators/vulcanize/templates/mysql_proxy/config/rubber/rubber-mysql_proxy.yml +11 -0
- data/generators/vulcanize/templates/mysql_proxy/templates.yml +1 -0
- data/generators/vulcanize/templates/nginx/config/rubber/deploy-nginx.rb +45 -0
- data/generators/vulcanize/templates/nginx/config/rubber/role/nginx/crontab +9 -0
- data/generators/vulcanize/templates/nginx/config/rubber/role/nginx/monit-nginx.conf +8 -0
- data/generators/vulcanize/templates/nginx/config/rubber/role/nginx/nginx.conf +42 -0
- data/generators/vulcanize/templates/nginx/config/rubber/role/web_tools/nginx-tools.conf +55 -0
- data/generators/vulcanize/templates/nginx/config/rubber/role/web_tools/tools-index.html +30 -0
- data/generators/vulcanize/templates/nginx/config/rubber/role/web_tools/tools-nginx.auth +7 -0
- data/generators/vulcanize/templates/nginx/config/rubber/rubber-nginx.yml +10 -0
- data/generators/vulcanize/templates/nginx/templates.yml +1 -0
- data/generators/vulcanize/templates/passenger/config/rubber/deploy-passenger.rb +37 -0
- data/generators/vulcanize/templates/passenger/config/rubber/role/passenger/munin-passenger-memory.conf +34 -0
- data/generators/vulcanize/templates/passenger/config/rubber/role/passenger/munin-passenger-sudoers.conf +7 -0
- data/generators/vulcanize/templates/passenger/config/rubber/role/passenger/munin-passenger.conf +47 -0
- data/generators/vulcanize/templates/passenger/config/rubber/role/passenger/passenger-apache-vhost.conf +46 -0
- data/generators/vulcanize/templates/passenger/config/rubber/role/passenger/passenger.conf +10 -0
- data/generators/vulcanize/templates/passenger/config/rubber/rubber-passenger.yml +15 -0
- data/generators/vulcanize/templates/passenger/templates.yml +3 -0
- data/generators/vulcanize/templates/redis/config/rubber/deploy-redis.rb +36 -0
- data/generators/vulcanize/templates/redis/config/rubber/role/redis/crontab +8 -0
- data/generators/vulcanize/templates/redis/config/rubber/role/redis/monit-redis.conf +9 -0
- data/generators/vulcanize/templates/redis/config/rubber/role/redis/redis.conf +141 -0
- data/generators/vulcanize/templates/redis/config/rubber/rubber-redis.yml +4 -0
- data/generators/vulcanize/templates/redis/templates.yml +1 -0
- data/generators/vulcanize/templates/resque/config/rubber/deploy-resque-worker-default.rb +38 -0
- data/generators/vulcanize/templates/resque/config/rubber/deploy-resque.rb +39 -0
- data/generators/vulcanize/templates/resque/config/rubber/role/resque_worker_default/monit-resque_worker_default.conf +19 -0
- data/generators/vulcanize/templates/resque/config/rubber/rubber-resque.yml +10 -0
- data/generators/vulcanize/templates/resque/templates.yml +3 -0
- data/generators/vulcanize/templates/sphinx/config/rubber/common/sphinx.yml +46 -0
- data/generators/vulcanize/templates/sphinx/config/rubber/deploy-sphinx.rb +112 -0
- data/generators/vulcanize/templates/sphinx/config/rubber/role/sphinx/crontab +11 -0
- data/generators/vulcanize/templates/sphinx/config/rubber/role/sphinx/monit-sphinx.conf +10 -0
- data/generators/vulcanize/templates/sphinx/config/rubber/rubber-sphinx.yml +6 -0
- data/generators/vulcanize/templates/sphinx/templates.yml +1 -0
- data/generators/vulcanize/vulcanize_generator.rb +67 -0
- data/lib/capistrano/hostcmd.rb +12 -0
- data/lib/rubber.rb +38 -0
- data/lib/rubber/capistrano.rb +1 -0
- data/lib/rubber/cloud.rb +13 -0
- data/lib/rubber/cloud/aws.rb +334 -0
- data/lib/rubber/cloud/base.rb +16 -0
- data/lib/rubber/configuration.rb +47 -0
- data/lib/rubber/dns.rb +13 -0
- data/lib/rubber/dns/base.rb +84 -0
- data/lib/rubber/dns/dyndns.rb +78 -0
- data/lib/rubber/dns/nettica.rb +117 -0
- data/lib/rubber/dns/zerigo.rb +174 -0
- data/lib/rubber/environment.rb +169 -0
- data/lib/rubber/generator.rb +197 -0
- data/lib/rubber/instance.rb +166 -0
- data/lib/rubber/recipes/rubber.rb +89 -0
- data/lib/rubber/recipes/rubber/bundles.rb +28 -0
- data/lib/rubber/recipes/rubber/deploy.rb +90 -0
- data/lib/rubber/recipes/rubber/instances.rb +393 -0
- data/lib/rubber/recipes/rubber/load_balancers.rb +44 -0
- data/lib/rubber/recipes/rubber/security_groups.rb +189 -0
- data/lib/rubber/recipes/rubber/setup.rb +457 -0
- data/lib/rubber/recipes/rubber/spot_requests.rb +17 -0
- data/lib/rubber/recipes/rubber/static_ips.rb +107 -0
- data/lib/rubber/recipes/rubber/utils.rb +203 -0
- data/lib/rubber/recipes/rubber/volumes.rb +264 -0
- data/lib/rubber/tasks/rubber.rb +279 -0
- data/lib/rubber/util.rb +37 -0
- data/rails/init.rb +9 -0
- data/test/environment_test.rb +133 -0
- data/test/generator_test.rb +323 -0
- data/test/instance_test.rb +93 -0
- data/test/test_helper.rb +8 -0
- data/test/util_test.rb +16 -0
- metadata +298 -0
@@ -0,0 +1,17 @@
|
|
1
|
+
namespace :rubber do
|
2
|
+
|
3
|
+
desc "Describes all your spot instance requests"
|
4
|
+
required_task :describe_spot_instance_requests do
|
5
|
+
requests = cloud.describe_spot_instance_requests()
|
6
|
+
requests.each do |request|
|
7
|
+
logger.info "======================"
|
8
|
+
logger.info "ID: #{request[:id]}"
|
9
|
+
logger.info "Created at: #{request[:created_at]}"
|
10
|
+
logger.info "Max. price: $#{request[:spot_price]}"
|
11
|
+
logger.info "State: #{request[:state]}"
|
12
|
+
logger.info "Instance type: #{request[:type]}"
|
13
|
+
logger.info "AMI: #{request[:image_id]}"
|
14
|
+
end
|
15
|
+
end
|
16
|
+
|
17
|
+
end
|
@@ -0,0 +1,107 @@
|
|
1
|
+
namespace :rubber do
|
2
|
+
|
3
|
+
|
4
|
+
desc <<-DESC
|
5
|
+
Sets up static IPs for the instances configured to have them
|
6
|
+
DESC
|
7
|
+
required_task :setup_static_ips do
|
8
|
+
rubber_instances.each do |ic|
|
9
|
+
env = rubber_cfg.environment.bind(ic.role_names, ic.name)
|
10
|
+
if env.use_static_ip
|
11
|
+
artifacts = rubber_instances.artifacts
|
12
|
+
ip = artifacts['static_ips'][ic.name] rescue nil
|
13
|
+
|
14
|
+
# first allocate the static ip if we don't have a global record (artifacts) for it
|
15
|
+
if ! ip
|
16
|
+
logger.info "Allocating static IP for #{ic.full_name}"
|
17
|
+
ip = allocate_static_ip()
|
18
|
+
artifacts['static_ips'][ic.name] = ip
|
19
|
+
rubber_instances.save
|
20
|
+
end
|
21
|
+
|
22
|
+
# then, associate it if we don't have a record (on instance) of association
|
23
|
+
if ! ic.static_ip
|
24
|
+
logger.info "Associating static ip #{ip} with #{ic.full_name}"
|
25
|
+
associate_static_ip(ip, ic.instance_id)
|
26
|
+
|
27
|
+
instance = cloud.describe_instances(ic.instance_id).first
|
28
|
+
ic.external_host = instance[:external_host]
|
29
|
+
ic.internal_host = instance[:internal_host]
|
30
|
+
ic.external_ip = ip
|
31
|
+
ic.static_ip = ip
|
32
|
+
rubber_instances.save()
|
33
|
+
|
34
|
+
logger.info "Waiting for static ip to associate"
|
35
|
+
while true do
|
36
|
+
task :_wait_for_static_ip, :hosts => ip do
|
37
|
+
run "echo"
|
38
|
+
end
|
39
|
+
begin
|
40
|
+
_wait_for_static_ip
|
41
|
+
rescue ConnectionError
|
42
|
+
sleep 2
|
43
|
+
logger.info "Failed to connect to static ip #{ip}, retrying"
|
44
|
+
retry
|
45
|
+
end
|
46
|
+
break
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
desc <<-DESC
|
55
|
+
Shows the configured static IPs
|
56
|
+
DESC
|
57
|
+
required_task :describe_static_ips do
|
58
|
+
results = []
|
59
|
+
format = "%-10s %-15s %-30s"
|
60
|
+
results << format % %w[InstanceID IP Alias]
|
61
|
+
|
62
|
+
ips = cloud.describe_static_ips()
|
63
|
+
ips.each do |ip_data|
|
64
|
+
instance_id = ip_data[:instance_id]
|
65
|
+
ip = ip_data[:ip]
|
66
|
+
|
67
|
+
local_alias = find_alias(ip, instance_id, false)
|
68
|
+
|
69
|
+
results << format % [instance_id || "Unassigned", ip, local_alias || "Unknown"]
|
70
|
+
end
|
71
|
+
|
72
|
+
results.each {|r| logger.info r}
|
73
|
+
end
|
74
|
+
|
75
|
+
desc <<-DESC
|
76
|
+
Deallocates the given static ip
|
77
|
+
DESC
|
78
|
+
required_task :destroy_static_ip do
|
79
|
+
ip = get_env('IP', "Static IP (run rubber:describe_static_ips for a list)", true)
|
80
|
+
destroy_static_ip(ip)
|
81
|
+
end
|
82
|
+
|
83
|
+
def allocate_static_ip()
|
84
|
+
ip = cloud.create_static_ip()
|
85
|
+
fatal "Failed to allocate static ip" if ip.nil?
|
86
|
+
return ip
|
87
|
+
end
|
88
|
+
|
89
|
+
def associate_static_ip(ip, instance_id)
|
90
|
+
success = cloud.attach_static_ip(ip, instance_id)
|
91
|
+
fatal "Failed to associate static ip" unless success
|
92
|
+
end
|
93
|
+
|
94
|
+
def destroy_static_ip(ip)
|
95
|
+
logger.info "Releasing static ip: #{ip}"
|
96
|
+
cloud.destroy_static_ip(ip) rescue logger.info("IP was not attached")
|
97
|
+
|
98
|
+
logger.info "Removing ip #{ip} from rubber instances file"
|
99
|
+
artifacts = rubber_instances.artifacts
|
100
|
+
artifacts['static_ips'].delete_if {|k,v| v == ip}
|
101
|
+
rubber_instances.each do |ic|
|
102
|
+
ic.static_ip = nil if ic.static_ip == ip
|
103
|
+
end
|
104
|
+
rubber_instances.save
|
105
|
+
end
|
106
|
+
|
107
|
+
end
|
@@ -0,0 +1,203 @@
|
|
1
|
+
namespace :rubber do
|
2
|
+
|
3
|
+
desc <<-DESC
|
4
|
+
Convenience task for creating a staging instance for the given RUBBER_ENV/RAILS_ENV.
|
5
|
+
By default this task assigns all known roles when creating the instance,
|
6
|
+
but you can specify a different default in rubber.yml:staging_roles
|
7
|
+
At the end, the instance will be up and running
|
8
|
+
e.g. RUBBER_ENV=matt cap create_staging
|
9
|
+
DESC
|
10
|
+
required_task :create_staging do
|
11
|
+
if rubber_instances.size > 0
|
12
|
+
value = Capistrano::CLI.ui.ask("The #{RUBBER_ENV} environment already has instances, Are you SURE you want to create a staging instance that may interact with them [y/N]?: ")
|
13
|
+
fatal("Exiting", 0) if value !~ /^y/
|
14
|
+
end
|
15
|
+
instance_alias = ENV['ALIAS'] = rubber.get_env("ALIAS", "Hostname to use for staging instance", true, RUBBER_ENV)
|
16
|
+
default_roles = rubber_env.staging_roles || "*"
|
17
|
+
roles = ENV['ROLES'] = rubber.get_env("ROLES", "Roles to use for staging instance", true, default_roles)
|
18
|
+
|
19
|
+
# some bootstraps update code (bootstrap_db) but if you don't have that role, need to do it here
|
20
|
+
# Since release directory variable gets reused by cap, we have to just do the symlink here - doing
|
21
|
+
# a update again will fail
|
22
|
+
set :rubber_code_was_updated, false
|
23
|
+
after "deploy:update_code" do
|
24
|
+
set :rubber_code_was_updated, true
|
25
|
+
end
|
26
|
+
|
27
|
+
if rubber_instances[instance_alias]
|
28
|
+
logger.info "Instance already exists, skipping to bootstrap"
|
29
|
+
else
|
30
|
+
rubber.create
|
31
|
+
end
|
32
|
+
rubber.bootstrap
|
33
|
+
# stop everything in case we have a bundled instance with monit, etc starting at boot
|
34
|
+
deploy.stop rescue nil
|
35
|
+
if ! rubber_code_was_updated
|
36
|
+
deploy.update_code
|
37
|
+
end
|
38
|
+
deploy.symlink
|
39
|
+
deploy.migrate
|
40
|
+
deploy.start
|
41
|
+
end
|
42
|
+
|
43
|
+
desc <<-DESC
|
44
|
+
Destroy the staging instance for the given RUBBER_ENV.
|
45
|
+
DESC
|
46
|
+
task :destroy_staging do
|
47
|
+
ENV['ALIAS'] = rubber.get_env("ALIAS", "Hostname of staging instance to be destroyed", true, RUBBER_ENV)
|
48
|
+
rubber.destroy
|
49
|
+
end
|
50
|
+
|
51
|
+
desc <<-DESC
|
52
|
+
Live tail of rails log files for all machines
|
53
|
+
By default tails the rails logs for the current RUBBER_ENV, but one can
|
54
|
+
set FILE=/path/file.*.glob to tails a different set
|
55
|
+
DESC
|
56
|
+
task :tail_logs, :roles => :app do
|
57
|
+
log_file_glob = rubber.get_env("FILE", "Log files to tail", true, "#{current_path}/log/#{RUBBER_ENV}*.log")
|
58
|
+
run "tail -qf #{log_file_glob}" do |channel, stream, data|
|
59
|
+
puts # for an extra line break before the host name
|
60
|
+
puts data
|
61
|
+
break if stream == :err
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
# Use instead of task to define a capistrano task that runs serially instead of in parallel
|
66
|
+
# The :groups option specifies how many groups to partition the servers into so that we can
|
67
|
+
# do the task for N (= total/groups) servers at a time. When multiple roles are supplied,
|
68
|
+
# this tries to be intelligent and slice up each role independently, but runs the slices together
|
69
|
+
# so that things don't take too long, e.g. adding an :api role to some :app servers, when restarting
|
70
|
+
# you don't want to do the api first, then the others as this would take a long time, so instead
|
71
|
+
# it does some :api and some :app, then some more of each
|
72
|
+
#
|
73
|
+
def serial_task(ns, name, options = {}, &block)
|
74
|
+
# first figure out server names for the passed in roles - when no roles
|
75
|
+
# are passed in, use all servers
|
76
|
+
serial_roles = Array(options[:roles])
|
77
|
+
servers = {}
|
78
|
+
if serial_roles.empty?
|
79
|
+
all_servers = []
|
80
|
+
self.roles.each do |rolename, serverdefs|
|
81
|
+
all_servers += serverdefs.collect {|server| server.host}
|
82
|
+
end
|
83
|
+
servers[:_serial_all] = all_servers.uniq.sort
|
84
|
+
else
|
85
|
+
# get servers for each role
|
86
|
+
self.roles.each do |rolename, serverdefs|
|
87
|
+
if serial_roles.include?(rolename)
|
88
|
+
servers[rolename] ||= []
|
89
|
+
servers[rolename] += serverdefs.collect {|server| server.host}
|
90
|
+
end
|
91
|
+
end
|
92
|
+
|
93
|
+
# Remove duplication of servers - roles which come first in list
|
94
|
+
# have precedence, so the servers show up in that group
|
95
|
+
serial_roles.each_with_index do |rolename, i|
|
96
|
+
servers[rolename] ||= []
|
97
|
+
serial_roles[i+1..-1].each do |r|
|
98
|
+
servers[r] -= servers[rolename]
|
99
|
+
end
|
100
|
+
servers[rolename] = servers[rolename].uniq.sort
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
# group each role's servers into slices, but combine slices across roles
|
105
|
+
slices = []
|
106
|
+
servers.each do |rolename, svrs|
|
107
|
+
next if svrs.size == 0
|
108
|
+
# figure out size of each slice by dividing server count by # of groups
|
109
|
+
slice_size = (Float(svrs.size) / (options.delete(:groups) || 2)).round
|
110
|
+
slice_size = 1 if slice_size == 0
|
111
|
+
slice_idx = 0
|
112
|
+
svrs.each_slice(slice_size) do |srv_slice|
|
113
|
+
slices[slice_idx] ||= []
|
114
|
+
slices[slice_idx] += srv_slice
|
115
|
+
slice_idx += 1
|
116
|
+
end
|
117
|
+
end
|
118
|
+
# for each slice, define a new task specific to the hosts in that slice
|
119
|
+
task_syms = []
|
120
|
+
slices.each do |server_group|
|
121
|
+
servers = server_group.map{|s| s.gsub(/\..*/, '')}.join("_")
|
122
|
+
task_sym = "_serial_task_#{name.to_s}_#{servers}".to_sym
|
123
|
+
task_syms << task_sym
|
124
|
+
ns.task task_sym, options.merge(:hosts => server_group), &block
|
125
|
+
end
|
126
|
+
|
127
|
+
# create the top level task that calls all the serial ones
|
128
|
+
ns.task name, options do
|
129
|
+
task_syms.each do |t|
|
130
|
+
ns.send t
|
131
|
+
end
|
132
|
+
end
|
133
|
+
end
|
134
|
+
|
135
|
+
def find_alias(ip, instance_id, do_connect=true)
|
136
|
+
if instance_id
|
137
|
+
instance = rubber_instances.find {|i| i.instance_id == instance_id }
|
138
|
+
local_alias = instance.full_name if instance
|
139
|
+
end
|
140
|
+
local_alias ||= File.read("/etc/hosts").grep(/#{ip}/).first.split[1] rescue nil
|
141
|
+
if ! local_alias && do_connect
|
142
|
+
task :_get_ip, :hosts => ip do
|
143
|
+
local_alias = "* " + capture("hostname").strip
|
144
|
+
end
|
145
|
+
_get_ip rescue ConnectionError
|
146
|
+
end
|
147
|
+
return local_alias
|
148
|
+
end
|
149
|
+
|
150
|
+
def prepare_script(name, contents)
|
151
|
+
script = "/tmp/#{name}"
|
152
|
+
# this lets us abort a script if a command in the middle of it errors out
|
153
|
+
contents = "#{rubber_env.stop_on_error_cmd}\n#{contents}" if rubber_env.stop_on_error_cmd
|
154
|
+
put(contents, script)
|
155
|
+
return script
|
156
|
+
end
|
157
|
+
|
158
|
+
def run_script(name, contents)
|
159
|
+
script = prepare_script(name, contents)
|
160
|
+
run "sh #{script}"
|
161
|
+
end
|
162
|
+
|
163
|
+
def sudo_script(name, contents)
|
164
|
+
script = prepare_script(name, contents)
|
165
|
+
sudo "sh #{script}"
|
166
|
+
end
|
167
|
+
|
168
|
+
def get_env(name, desc, required=false, default=nil)
|
169
|
+
value = ENV.delete(name)
|
170
|
+
msg = "#{desc}"
|
171
|
+
msg << " [#{default}]" if default
|
172
|
+
msg << ": "
|
173
|
+
value = Capistrano::CLI.ui.ask(msg) unless value
|
174
|
+
value = value.size == 0 ? default : value
|
175
|
+
fatal "#{name} is required, pass using environment or enter at prompt" if required && ! value
|
176
|
+
return value
|
177
|
+
end
|
178
|
+
|
179
|
+
def fatal(msg, code=1)
|
180
|
+
logger.info msg
|
181
|
+
exit code
|
182
|
+
end
|
183
|
+
|
184
|
+
# Returns a map of "hostvar_<hostname>" => value for the given config value for each instance host
|
185
|
+
# This is used to run capistrano tasks scoped to the correct role/host so that a config value
|
186
|
+
# specific to a role/host will only be used for that role/host, e.g. the list of packages to
|
187
|
+
# be installed.
|
188
|
+
def get_host_options(cfg_name, &block)
|
189
|
+
opts = {}
|
190
|
+
rubber_instances.each do | ic|
|
191
|
+
env = rubber_cfg.environment.bind(ic.role_names, ic.name)
|
192
|
+
cfg_value = env[cfg_name]
|
193
|
+
if cfg_value
|
194
|
+
if block
|
195
|
+
cfg_value = block.call(cfg_value)
|
196
|
+
end
|
197
|
+
opts["hostvar_#{ic.full_name}"] = cfg_value if cfg_value && cfg_value.strip.size > 0
|
198
|
+
end
|
199
|
+
end
|
200
|
+
return opts
|
201
|
+
end
|
202
|
+
|
203
|
+
end
|
@@ -0,0 +1,264 @@
|
|
1
|
+
namespace :rubber do
|
2
|
+
|
3
|
+
desc <<-DESC
|
4
|
+
Sets up persistent volumes in the cloud
|
5
|
+
All volumes defined in rubber.yml will be created if neccessary, and attached/mounted on their associated instances
|
6
|
+
DESC
|
7
|
+
required_task :setup_volumes do
|
8
|
+
rubber_instances.filtered.each do |ic|
|
9
|
+
env = rubber_cfg.environment.bind(ic.role_names, ic.name)
|
10
|
+
created_vols = []
|
11
|
+
vol_specs = env.volumes || []
|
12
|
+
vol_specs.each do |vol_spec|
|
13
|
+
created_vols << setup_volume(ic, vol_spec)
|
14
|
+
end
|
15
|
+
created_vols.compact!
|
16
|
+
|
17
|
+
created_parts = []
|
18
|
+
partition_specs = env.local_volumes || []
|
19
|
+
partition_specs.each do |partition_spec|
|
20
|
+
created_parts << setup_partition(ic, partition_spec)
|
21
|
+
end
|
22
|
+
created_parts.compact!
|
23
|
+
zero_partitions(ic, created_parts)
|
24
|
+
created_vols += created_parts
|
25
|
+
|
26
|
+
created_vols = created_vols.compact.uniq
|
27
|
+
raid_specs = env.raid_volumes || []
|
28
|
+
raid_specs.each do |raid_spec|
|
29
|
+
# we want to format if we created the ec2 volumes, or if we don't have any
|
30
|
+
# ec2 volumes and are just creating raid array from ephemeral stores
|
31
|
+
format = raid_spec['source_devices'].all? {|dev| created_vols.include?(dev)}
|
32
|
+
setup_raid_volume(ic, raid_spec, format)
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
desc <<-DESC
|
38
|
+
Shows the configured persistent volumes
|
39
|
+
DESC
|
40
|
+
required_task :describe_volumes do
|
41
|
+
results = []
|
42
|
+
format = "%-20s %-15s %-15s %-20s"
|
43
|
+
results << format % %w[Id Status Attached Instance]
|
44
|
+
|
45
|
+
volumes = cloud.describe_volumes()
|
46
|
+
volumes.each do |volume|
|
47
|
+
results << format % [volume[:id], volume[:status], volume[:attachment_status], volume[:attachment_instance_id]]
|
48
|
+
end
|
49
|
+
|
50
|
+
results.each {|r| logger.info r}
|
51
|
+
end
|
52
|
+
|
53
|
+
desc <<-DESC
|
54
|
+
Shows the configured persistent volumes
|
55
|
+
DESC
|
56
|
+
required_task :destroy_volume do
|
57
|
+
volume_id = get_env('VOLUME_ID', "Volume ID", true)
|
58
|
+
destroy_volume(volume_id)
|
59
|
+
end
|
60
|
+
|
61
|
+
def create_volume(size, zone)
|
62
|
+
volumeId = cloud.create_volume(size.to_s, zone)
|
63
|
+
fatal "Failed to create volume" if volumeId.nil?
|
64
|
+
return volumeId
|
65
|
+
end
|
66
|
+
|
67
|
+
def attach_volume(vol_id, instance_id, device)
|
68
|
+
cloud.attach_volume(vol_id, instance_id, device)
|
69
|
+
end
|
70
|
+
|
71
|
+
def setup_volume(ic, vol_spec)
|
72
|
+
created = nil
|
73
|
+
key = "#{ic.name}_#{vol_spec['device']}"
|
74
|
+
artifacts = rubber_instances.artifacts
|
75
|
+
vol_id = artifacts['volumes'][key]
|
76
|
+
|
77
|
+
# first create the volume if we don't have a global record (artifacts) for it
|
78
|
+
if ! vol_id
|
79
|
+
logger.info "Creating volume for #{ic.full_name}:#{vol_spec['device']}"
|
80
|
+
vol_id = create_volume(vol_spec['size'], vol_spec['zone'])
|
81
|
+
artifacts['volumes'][key] = vol_id
|
82
|
+
rubber_instances.save
|
83
|
+
created = vol_spec['device']
|
84
|
+
end
|
85
|
+
|
86
|
+
# then, attach it if we don't have a record (on instance) of attachment
|
87
|
+
ic.volumes ||= []
|
88
|
+
if ! ic.volumes.include?(vol_id)
|
89
|
+
logger.info "Attaching volume #{vol_id} to #{ic.full_name}:#{vol_spec['device']}"
|
90
|
+
attach_volume(vol_id, ic.instance_id, vol_spec['device'])
|
91
|
+
ic.volumes << vol_id
|
92
|
+
rubber_instances.save
|
93
|
+
|
94
|
+
print "Waiting for volume to attach"
|
95
|
+
while true do
|
96
|
+
print "."
|
97
|
+
sleep 2
|
98
|
+
volume = cloud.describe_volumes(vol_id).first
|
99
|
+
break if volume[:status] == "in-use"
|
100
|
+
end
|
101
|
+
print "\n"
|
102
|
+
|
103
|
+
# we don't mount/format at this time if we are doing a RAID array
|
104
|
+
if vol_spec['mount'] && vol_spec['filesystem']
|
105
|
+
# then format/mount/etc if we don't have an entry in hosts file
|
106
|
+
task :_setup_volume, :hosts => ic.external_ip do
|
107
|
+
rubber.run_script 'setup_volume', <<-ENDSCRIPT
|
108
|
+
if ! grep -q '#{vol_spec['mount']}' /etc/fstab; then
|
109
|
+
if mount | grep -q '#{vol_spec['mount']}'; then
|
110
|
+
umount '#{vol_spec['mount']}'
|
111
|
+
fi
|
112
|
+
mv /etc/fstab /etc/fstab.bak
|
113
|
+
cat /etc/fstab.bak | grep -v '#{vol_spec['mount']}' > /etc/fstab
|
114
|
+
echo '#{vol_spec['device']} #{vol_spec['mount']} #{vol_spec['filesystem']} noatime 0 0 # rubber volume #{vol_id}' >> /etc/fstab
|
115
|
+
|
116
|
+
#{('yes | mkfs -t ' + vol_spec['filesystem'] + ' ' + vol_spec['device']) if created}
|
117
|
+
mkdir -p '#{vol_spec['mount']}'
|
118
|
+
mount '#{vol_spec['mount']}'
|
119
|
+
fi
|
120
|
+
ENDSCRIPT
|
121
|
+
end
|
122
|
+
_setup_volume
|
123
|
+
end
|
124
|
+
|
125
|
+
end
|
126
|
+
return created
|
127
|
+
end
|
128
|
+
|
129
|
+
def setup_partition(ic, partition_spec)
|
130
|
+
created = nil
|
131
|
+
part_id = partition_spec['partition_device']
|
132
|
+
|
133
|
+
# Only create the partition if we haven't already done so
|
134
|
+
ic.partitions ||= []
|
135
|
+
if ! ic.partitions.include?(part_id)
|
136
|
+
# then format/mount/etc if we don't have an entry in hosts file
|
137
|
+
task :_setup_partition, :hosts => ic.external_ip do
|
138
|
+
rubber.run_script 'setup_partition', <<-ENDSCRIPT
|
139
|
+
if ! fdisk -l 2>&1 | grep -q '#{partition_spec['partition_device']}'; then
|
140
|
+
if grep -q '#{partition_spec['disk_device']}\\b' /etc/fstab; then
|
141
|
+
umount #{partition_spec['disk_device']}
|
142
|
+
mv /etc/fstab /etc/fstab.bak
|
143
|
+
cat /etc/fstab.bak | grep -v '#{partition_spec['disk_device']}\\b' > /etc/fstab
|
144
|
+
fi
|
145
|
+
|
146
|
+
# partition format is: Start (blank is first available),Size(MB due to -uM),Id(83=linux,82=swap,etc),Bootable
|
147
|
+
echo "#{partition_spec['start']},#{partition_spec['size']},#{partition_spec['type']},#{partition_spec['bootable']}" | sfdisk -L -uM #{partition_spec['disk_device']}
|
148
|
+
fi
|
149
|
+
ENDSCRIPT
|
150
|
+
end
|
151
|
+
_setup_partition
|
152
|
+
|
153
|
+
ic.partitions << part_id
|
154
|
+
rubber_instances.save
|
155
|
+
created = part_id
|
156
|
+
|
157
|
+
end
|
158
|
+
|
159
|
+
return created
|
160
|
+
end
|
161
|
+
|
162
|
+
def zero_partitions(ic, partitions)
|
163
|
+
env = rubber_cfg.environment.bind(ic.role_names, ic.name)
|
164
|
+
|
165
|
+
# don't zero out the ones that we weren't told to
|
166
|
+
partitions.delete_if do |part|
|
167
|
+
spec = env.local_volumes.find {|s| s['partition_device'] == part}
|
168
|
+
! spec['zero']
|
169
|
+
end
|
170
|
+
|
171
|
+
if partitions.size > 0
|
172
|
+
zero_script = ""
|
173
|
+
partitions.each do |partition|
|
174
|
+
zero_script << "nohup dd if=/dev/zero bs=1M of=#{partition} &> /dev/null &\n"
|
175
|
+
end
|
176
|
+
# then format/mount/etc if we don't have an entry in hosts file
|
177
|
+
task :_zero_partitions, :hosts => ic.external_ip do
|
178
|
+
rubber.run_script 'zero_partitions', <<-ENDSCRIPT
|
179
|
+
# zero out parition for performance (see amazon DevGuide)
|
180
|
+
echo "Zeroing out raid partitions to improve performance, this way take a while"
|
181
|
+
#{zero_script}
|
182
|
+
|
183
|
+
echo "Waiting for partitions to zero out"
|
184
|
+
while true; do
|
185
|
+
if ! ps ax | grep -q "[d]d.*/dev/zero"; then exit; fi
|
186
|
+
echo -n .
|
187
|
+
sleep 1
|
188
|
+
done
|
189
|
+
ENDSCRIPT
|
190
|
+
end
|
191
|
+
_zero_partitions
|
192
|
+
end
|
193
|
+
end
|
194
|
+
|
195
|
+
def setup_raid_volume(ic, raid_spec, create=false)
|
196
|
+
if create
|
197
|
+
mdadm_init = "mdadm --create #{raid_spec['device']} --level #{raid_spec['raid_level']} --raid-devices #{raid_spec['source_devices'].size} #{raid_spec['source_devices'].sort.join(' ')}"
|
198
|
+
else
|
199
|
+
mdadm_init = "mdadm --assemble #{raid_spec['device']} #{raid_spec['source_devices'].sort.join(' ')}"
|
200
|
+
end
|
201
|
+
|
202
|
+
task :_setup_raid_volume, :hosts => ic.external_ip do
|
203
|
+
rubber.run_script 'setup_raid_volume', <<-ENDSCRIPT
|
204
|
+
if ! grep -q '#{raid_spec['device']}' /etc/fstab; then
|
205
|
+
if mount | grep -q '#{raid_spec['mount']}'; then
|
206
|
+
umount '#{raid_spec['mount']}'
|
207
|
+
fi
|
208
|
+
mv /etc/fstab /etc/fstab.bak
|
209
|
+
cat /etc/fstab.bak | grep -v '#{raid_spec['mount']}' > /etc/fstab
|
210
|
+
echo '#{raid_spec['device']} #{raid_spec['mount']} #{raid_spec['filesystem']} noatime 0 0 # rubber raid volume' >> /etc/fstab
|
211
|
+
|
212
|
+
# seems to help devices initialize, otherwise mdadm fails because
|
213
|
+
# device not ready even though ec2 says the volume is attached
|
214
|
+
fdisk -l &> /dev/null
|
215
|
+
|
216
|
+
#{mdadm_init}
|
217
|
+
|
218
|
+
# set reconstruction speed
|
219
|
+
echo $((30*1024)) > /proc/sys/dev/raid/speed_limit_min
|
220
|
+
|
221
|
+
echo 'DEVICE /dev/hd*[0-9] /dev/sd*[0-9]' > /etc/mdadm/mdadm.conf
|
222
|
+
mdadm --detail --scan >> /etc/mdadm/mdadm.conf
|
223
|
+
|
224
|
+
mv /etc/rc.local /etc/rc.local.bak
|
225
|
+
echo "mdadm --assemble --scan" > /etc/rc.local
|
226
|
+
chmod +x /etc/rc.local
|
227
|
+
|
228
|
+
#{('yes | mkfs -t ' + raid_spec['filesystem'] + ' ' + raid_spec['device']) if create}
|
229
|
+
mkdir -p '#{raid_spec['mount']}'
|
230
|
+
mount '#{raid_spec['mount']}'
|
231
|
+
fi
|
232
|
+
ENDSCRIPT
|
233
|
+
end
|
234
|
+
_setup_raid_volume
|
235
|
+
end
|
236
|
+
|
237
|
+
def destroy_volume(volume_id)
|
238
|
+
|
239
|
+
logger.info "Detaching volume #{volume_id}"
|
240
|
+
cloud.detach_volume(volume_id) rescue logger.info("Volume was not attached")
|
241
|
+
|
242
|
+
print "Waiting for volume to detach"
|
243
|
+
while true do
|
244
|
+
print "."
|
245
|
+
sleep 2
|
246
|
+
volume = cloud.describe_volumes(volume_id).first
|
247
|
+
status = volume && volume[:attachment_status]
|
248
|
+
break if !status || status == "detached"
|
249
|
+
end
|
250
|
+
print "\n"
|
251
|
+
|
252
|
+
logger.info "Deleting volume #{volume_id}"
|
253
|
+
cloud.destroy_volume(volume_id)
|
254
|
+
|
255
|
+
logger.info "Removing volume #{volume_id} from rubber instances file"
|
256
|
+
artifacts = rubber_instances.artifacts
|
257
|
+
artifacts['volumes'].delete_if {|k,v| v == volume_id}
|
258
|
+
rubber_instances.each do |ic|
|
259
|
+
ic.volumes.delete(volume_id) if ic.volumes
|
260
|
+
end
|
261
|
+
rubber_instances.save
|
262
|
+
end
|
263
|
+
|
264
|
+
end
|