eycap 0.4.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/History.txt +169 -0
- data/Manifest.txt +29 -0
- data/README.txt +65 -0
- data/Rakefile +33 -0
- data/lib/capistrano/recipes/deploy/strategy/filtered_remote_cache.rb +57 -0
- data/lib/capistrano/recipes/deploy/strategy/unshared_remote_cache.rb +21 -0
- data/lib/eycap.rb +3 -0
- data/lib/eycap/lib/ey_logger.rb +125 -0
- data/lib/eycap/lib/ey_logger_hooks.rb +14 -0
- data/lib/eycap/recipes.rb +24 -0
- data/lib/eycap/recipes/apache.rb +10 -0
- data/lib/eycap/recipes/backgroundrb.rb +23 -0
- data/lib/eycap/recipes/bundler.rb +12 -0
- data/lib/eycap/recipes/database.rb +74 -0
- data/lib/eycap/recipes/deploy.rb +151 -0
- data/lib/eycap/recipes/ferret.rb +20 -0
- data/lib/eycap/recipes/juggernaut.rb +19 -0
- data/lib/eycap/recipes/memcached.rb +24 -0
- data/lib/eycap/recipes/mongrel.rb +56 -0
- data/lib/eycap/recipes/monit.rb +17 -0
- data/lib/eycap/recipes/nginx.rb +43 -0
- data/lib/eycap/recipes/passenger.rb +10 -0
- data/lib/eycap/recipes/slice.rb +21 -0
- data/lib/eycap/recipes/solr.rb +37 -0
- data/lib/eycap/recipes/sphinx.rb +76 -0
- data/lib/eycap/recipes/templates/maintenance.rhtml +53 -0
- data/lib/eycap/recipes/tomcat.rb +16 -0
- data/test/test_eycap.rb +11 -0
- data/test/test_helper.rb +2 -0
- metadata +106 -0
@@ -0,0 +1,14 @@
|
|
1
|
+
require File.join(File.dirname(__FILE__), "ey_logger")
|
2
|
+
|
3
|
+
# These tasks are setup to use with the logger as post commit hooks.
|
4
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
5
|
+
namespace :ey_logger do
|
6
|
+
task :upload_log_to_slice, :except => { :no_release => true} do
|
7
|
+
logger = Capistrano::EYLogger
|
8
|
+
run "mkdir -p #{shared_path}/deploy_logs"
|
9
|
+
put File.open(logger.log_file_path).read, "#{shared_path}/deploy_logs/#{logger.remote_log_file_name}"
|
10
|
+
end
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
Capistrano::EYLogger.post_process_hook("ey_logger:upload_log_to_slice")
|
@@ -0,0 +1,24 @@
|
|
1
|
+
require 'eycap/lib/ey_logger'
|
2
|
+
require 'eycap/lib/ey_logger_hooks'
|
3
|
+
require 'eycap/recipes/database'
|
4
|
+
require 'eycap/recipes/ferret'
|
5
|
+
require 'eycap/recipes/mongrel'
|
6
|
+
require 'eycap/recipes/nginx'
|
7
|
+
require 'eycap/recipes/slice'
|
8
|
+
require 'eycap/recipes/deploy'
|
9
|
+
require 'eycap/recipes/sphinx'
|
10
|
+
require 'eycap/recipes/backgroundrb'
|
11
|
+
require 'eycap/recipes/memcached'
|
12
|
+
require 'eycap/recipes/solr'
|
13
|
+
require 'eycap/recipes/monit'
|
14
|
+
require 'eycap/recipes/tomcat'
|
15
|
+
require 'eycap/recipes/juggernaut'
|
16
|
+
require 'eycap/recipes/passenger'
|
17
|
+
require 'eycap/recipes/apache'
|
18
|
+
require 'eycap/recipes/bundler'
|
19
|
+
|
20
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
21
|
+
default_run_options[:pty] = true if respond_to?(:default_run_options)
|
22
|
+
set :keep_releases, 3
|
23
|
+
set :runner, defer { user }
|
24
|
+
end
|
@@ -0,0 +1,10 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
namespace :apache do
|
3
|
+
[:stop, :start, :restart, :reload].each do |action|
|
4
|
+
desc "#{action.to_s.capitalize} Apache"
|
5
|
+
task action, :roles => :web do
|
6
|
+
sudo "/etc/init.d/apache2 #{action.to_s}"
|
7
|
+
end
|
8
|
+
end
|
9
|
+
end
|
10
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
|
3
|
+
namespace :bdrb do
|
4
|
+
desc "After update_code you want to reindex"
|
5
|
+
task :reindex, :roles => [:app], :only => {:backgroundrb => true} do
|
6
|
+
run "/engineyard/bin/searchd #{application} reindex"
|
7
|
+
end
|
8
|
+
|
9
|
+
desc "Start Backgroundrb"
|
10
|
+
task :start, :roles => [:app], :only => {:backgroundrb => true} do
|
11
|
+
sudo "/usr/bin/monit start all -g backgroundrb_#{application}"
|
12
|
+
end
|
13
|
+
desc "Stop Backgroundrb"
|
14
|
+
task :stop, :roles => [:app], :only => {:backgroundrb => true} do
|
15
|
+
sudo "/usr/bin/monit stop all -g backgroundrb_#{application}"
|
16
|
+
end
|
17
|
+
desc "Restart Backgroundrb"
|
18
|
+
task :restart, :roles => [:app], :only => {:backgroundrb => true} do
|
19
|
+
sudo "/usr/bin/monit restart all -g backgroundrb_#{application}"
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
end
|
@@ -0,0 +1,12 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
namespace :bundler do
|
3
|
+
desc "Automatically installed your bundled gems if a Gemfile exists"
|
4
|
+
task :bundle_gems do
|
5
|
+
%w(vendor bin).each do |dirname|
|
6
|
+
run "if [ -f #{release_path}/Gemfile ]; then mkdir -p #{shared_path}/bundling/#{dirname} && cd #{release_path} && ln -s #{shared_path}/bundling/#{dirname} .; fi"
|
7
|
+
end
|
8
|
+
run "if [ -f #{release_path}/Gemfile ]; then cd #{release_path} && gem bundle; fi"
|
9
|
+
end
|
10
|
+
after "deploy:symlink_configs","bundler:bundle_gems"
|
11
|
+
end
|
12
|
+
end
|
@@ -0,0 +1,74 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
|
3
|
+
namespace :db do
|
4
|
+
task :backup_name, :roles => :db, :only => { :primary => true } do
|
5
|
+
now = Time.now
|
6
|
+
run "mkdir -p #{shared_path}/db_backups"
|
7
|
+
backup_time = [now.year,now.month,now.day,now.hour,now.min,now.sec].join('-')
|
8
|
+
set :backup_file, "#{shared_path}/db_backups/#{environment_database}-snapshot-#{backup_time}.sql"
|
9
|
+
end
|
10
|
+
|
11
|
+
desc "Clone Production Database to Staging Database."
|
12
|
+
task :clone_prod_to_staging, :roles => :db, :only => { :primary => true } do
|
13
|
+
|
14
|
+
# This task currently runs only on traditional EY offerings.
|
15
|
+
# You need to have both a production and staging environment defined in
|
16
|
+
# your deploy.rb file.
|
17
|
+
|
18
|
+
backup_name
|
19
|
+
on_rollback { run "rm -f #{backup_file}" }
|
20
|
+
run("cat #{shared_path}/config/database.yml") { |channel, stream, data| @environment_info = YAML.load(data)[rails_env] }
|
21
|
+
|
22
|
+
if @environment_info['adapter'] == 'mysql'
|
23
|
+
dbhost = @environment_info['host']
|
24
|
+
dbhost = environment_dbhost.sub('-master', '') + '-replica'
|
25
|
+
run "mysqldump --add-drop-table -u #{dbuser} -h #{dbhost} -p #{environment_database} | bzip2 -c > #{backup_file}.bz2" do |ch, stream, out |
|
26
|
+
ch.send_data "#{dbpass}\n" if out=~ /^Enter password:/
|
27
|
+
end
|
28
|
+
run "bzcat #{backup_file}.bz2 | mysql -u #{dbuser} -p -h #{staging_dbhost} #{staging_database}" do |ch, stream, out|
|
29
|
+
ch.send_data "#{dbpass}\n" if out=~ /^Enter password:/
|
30
|
+
end
|
31
|
+
else
|
32
|
+
run "pg_dump -W -c -U #{dbuser} -h #{production_dbhost} #{production_database} | bzip2 -c > #{backup_file}.bz2" do |ch, stream, out|
|
33
|
+
ch.send_data "#{dbpass}\n" if out=~ /^Password:/
|
34
|
+
end
|
35
|
+
run "bzcat #{backup_file}.bz2 | psql -W -U #{dbuser} -h #{staging_dbhost} #{staging_database}" do |ch, stream, out|
|
36
|
+
ch.send_data "#{dbpass}\n" if out=~ /^Password/
|
37
|
+
end
|
38
|
+
end
|
39
|
+
run "rm -f #{backup_file}.bz2"
|
40
|
+
end
|
41
|
+
|
42
|
+
desc "Backup your MySQL or PostgreSQL database to shared_path+/db_backups"
|
43
|
+
task :dump, :roles => :db, :only => {:primary => true} do
|
44
|
+
backup_name
|
45
|
+
run("cat #{shared_path}/config/database.yml") { |channel, stream, data| @environment_info = YAML.load(data)[rails_env] }
|
46
|
+
if @environment_info['adapter'] == 'mysql'
|
47
|
+
dbhost = @environment_info['host']
|
48
|
+
dbhost = environment_dbhost.sub('-master', '') + '-replica' if dbhost != 'localhost' # added for Solo offering, which uses localhost
|
49
|
+
run "mysqldump --add-drop-table -u #{dbuser} -h #{dbhost} -p #{environment_database} | bzip2 -c > #{backup_file}.bz2" do |ch, stream, out |
|
50
|
+
ch.send_data "#{dbpass}\n" if out=~ /^Enter password:/
|
51
|
+
end
|
52
|
+
else
|
53
|
+
run "pg_dump -W -c -U #{dbuser} -h #{environment_dbhost} #{environment_database} | bzip2 -c > #{backup_file}.bz2" do |ch, stream, out |
|
54
|
+
ch.send_data "#{dbpass}\n" if out=~ /^Password:/
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
desc "Sync your production database to your local workstation"
|
60
|
+
task :clone_to_local, :roles => :db, :only => {:primary => true} do
|
61
|
+
backup_name
|
62
|
+
dump
|
63
|
+
get "#{backup_file}.bz2", "/tmp/#{application}.sql.bz2"
|
64
|
+
development_info = YAML.load_file("config/database.yml")['development']
|
65
|
+
if development_info['adapter'] == 'mysql'
|
66
|
+
run_str = "bzcat /tmp/#{application}.sql.bz2 | mysql -u #{development_info['username']} --password='#{development_info['password']}' -h #{development_info['host']} #{development_info['database']}"
|
67
|
+
else
|
68
|
+
run_str = "PGPASSWORD=#{development_info['password']} bzcat /tmp/#{application}.sql.bz2 | psql -U #{development_info['username']} -h #{development_info['host']} #{development_info['database']}"
|
69
|
+
end
|
70
|
+
%x!#{run_str}!
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
end
|
@@ -0,0 +1,151 @@
|
|
1
|
+
require File.join(File.dirname(__FILE__), "..", "lib", "ey_logger.rb")
|
2
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
3
|
+
|
4
|
+
namespace :deploy do
|
5
|
+
# This is here to hook into the logger for deploy and deploy:long tasks
|
6
|
+
["deploy", "deploy:long"].each do |tsk|
|
7
|
+
before(tsk) do
|
8
|
+
Capistrano::EYLogger.setup( self, tsk )
|
9
|
+
at_exit{ Capistrano::EYLogger.post_process if Capistrano::EYLogger.setup? }
|
10
|
+
end
|
11
|
+
end
|
12
|
+
|
13
|
+
desc "Link the database.yml and mongrel_cluster.yml files into the current release path."
|
14
|
+
task :symlink_configs, :roles => :app, :except => {:no_release => true} do
|
15
|
+
run <<-CMD
|
16
|
+
cd #{latest_release} &&
|
17
|
+
ln -nfs #{shared_path}/config/database.yml #{latest_release}/config/database.yml &&
|
18
|
+
ln -nfs #{shared_path}/config/mongrel_cluster.yml #{latest_release}/config/mongrel_cluster.yml
|
19
|
+
CMD
|
20
|
+
end
|
21
|
+
|
22
|
+
desc <<-DESC
|
23
|
+
Run the migrate rake task. By default, it runs this in most recently \
|
24
|
+
deployed version of the app. However, you can specify a different release \
|
25
|
+
via the migrate_target variable, which must be one of :latest (for the \
|
26
|
+
default behavior), or :current (for the release indicated by the \
|
27
|
+
`current' symlink). Strings will work for those values instead of symbols, \
|
28
|
+
too. You can also specify additional environment variables to pass to rake \
|
29
|
+
via the migrate_env variable. Finally, you can specify the full path to the \
|
30
|
+
rake executable by setting the rake variable. The defaults are:
|
31
|
+
|
32
|
+
set :rake, "rake"
|
33
|
+
set :framework, "merb"
|
34
|
+
set :merb_env, "production"
|
35
|
+
set :migrate_env, ""
|
36
|
+
set :migrate_target, :latest
|
37
|
+
DESC
|
38
|
+
task :migrate, :roles => :db, :only => { :primary => true } do
|
39
|
+
rake = fetch(:rake, "rake")
|
40
|
+
|
41
|
+
framework = fetch(:framework, "rails")
|
42
|
+
if framework.match(/^rails$/i)
|
43
|
+
app_env = fetch(:rails_env, "production")
|
44
|
+
else
|
45
|
+
app_env = fetch("#{framework.downcase}_env".to_sym, "production")
|
46
|
+
end
|
47
|
+
|
48
|
+
migrate_env = fetch(:migrate_env, "")
|
49
|
+
migrate_target = fetch(:migrate_target, :latest)
|
50
|
+
|
51
|
+
directory = case migrate_target.to_sym
|
52
|
+
when :current then current_path
|
53
|
+
when :latest then current_release
|
54
|
+
else raise ArgumentError, "unknown migration target #{migrate_target.inspect}"
|
55
|
+
end
|
56
|
+
|
57
|
+
run "cd #{directory}; #{rake} #{framework.upcase}_ENV=#{app_env} #{migrate_env} db:migrate"
|
58
|
+
end
|
59
|
+
|
60
|
+
desc "Display the maintenance.html page while deploying with migrations. Then it restarts and enables the site again."
|
61
|
+
task :long do
|
62
|
+
transaction do
|
63
|
+
update_code
|
64
|
+
web.disable
|
65
|
+
symlink
|
66
|
+
migrate
|
67
|
+
end
|
68
|
+
|
69
|
+
restart
|
70
|
+
web.enable
|
71
|
+
end
|
72
|
+
|
73
|
+
desc "Restart the Mongrel processes on the app slices."
|
74
|
+
task :restart, :roles => :app do
|
75
|
+
mongrel.restart
|
76
|
+
end
|
77
|
+
|
78
|
+
desc "Start the Mongrel processes on the app slices."
|
79
|
+
task :spinner, :roles => :app do
|
80
|
+
mongrel.start
|
81
|
+
end
|
82
|
+
|
83
|
+
desc "Start the Mongrel processes on the app slices."
|
84
|
+
task :start, :roles => :app do
|
85
|
+
mongrel.start
|
86
|
+
end
|
87
|
+
|
88
|
+
desc "Stop the Mongrel processes on the app slices."
|
89
|
+
task :stop, :roles => :app do
|
90
|
+
mongrel.stop
|
91
|
+
end
|
92
|
+
|
93
|
+
namespace :web do
|
94
|
+
desc <<-DESC
|
95
|
+
Present a maintenance page to visitors. Disables your application's web \
|
96
|
+
interface by writing a "maintenance.html" file to each web server. The \
|
97
|
+
servers must be configured to detect the presence of this file, and if \
|
98
|
+
it is present, always display it instead of performing the request.
|
99
|
+
|
100
|
+
By default, the maintenance page will just say the site is down for \
|
101
|
+
"maintenance", and will be back "shortly", but you can customize the \
|
102
|
+
page by specifying the REASON and UNTIL environment variables:
|
103
|
+
|
104
|
+
$ cap deploy:web:disable \\
|
105
|
+
REASON="hardware upgrade" \\
|
106
|
+
UNTIL="12pm Central Time"
|
107
|
+
|
108
|
+
Further customization copy your html file to shared_path+'/system/maintenance.html.custom'.
|
109
|
+
If this file exists it will be used instead of the default capistrano ugly page
|
110
|
+
DESC
|
111
|
+
task :disable, :roles => :web, :except => { :no_release => true } do
|
112
|
+
maint_file = "#{shared_path}/system/maintenance.html"
|
113
|
+
require 'erb'
|
114
|
+
on_rollback { run "rm #{shared_path}/system/maintenance.html" }
|
115
|
+
|
116
|
+
reason = ENV['REASON']
|
117
|
+
deadline = ENV['UNTIL']
|
118
|
+
|
119
|
+
template = File.read(File.join(File.dirname(__FILE__), "templates", "maintenance.rhtml"))
|
120
|
+
result = ERB.new(template).result(binding)
|
121
|
+
|
122
|
+
put result, "#{shared_path}/system/maintenance.html.tmp", :mode => 0644
|
123
|
+
run "if [ -f #{shared_path}/system/maintenance.html.custom ]; then cp #{shared_path}/system/maintenance.html.custom #{maint_file}; else cp #{shared_path}/system/maintenance.html.tmp #{maint_file}; fi"
|
124
|
+
end
|
125
|
+
end
|
126
|
+
|
127
|
+
namespace :notify do
|
128
|
+
task :start, :roles => :app do
|
129
|
+
begin
|
130
|
+
run %(curl -X POST -d "application=#{application rescue 'unknown'}" http://weather.engineyard.com/`hostname`/deploy_start)
|
131
|
+
rescue
|
132
|
+
puts "Deploy notification failed"
|
133
|
+
end
|
134
|
+
end
|
135
|
+
|
136
|
+
task :stop, :roles => :app do
|
137
|
+
begin
|
138
|
+
run %(curl -X POST -d "application=#{application rescue 'unknown'}" http://weather.engineyard.com/`hostname`/deploy_stop)
|
139
|
+
rescue
|
140
|
+
puts "Deploy notification failed"
|
141
|
+
end
|
142
|
+
end
|
143
|
+
end
|
144
|
+
end
|
145
|
+
|
146
|
+
["deploy", "deploy:long"].each do |tsk|
|
147
|
+
before(tsk, "deploy:notify:start")
|
148
|
+
after(tsk, "deploy:notify:stop")
|
149
|
+
end
|
150
|
+
|
151
|
+
end
|
@@ -0,0 +1,20 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
|
3
|
+
namespace :ferret do
|
4
|
+
desc "After update_code you want to symlink the index and ferret_server.yml file into place"
|
5
|
+
task :symlink_configs, :roles => [:app], :except => {:no_release => true, :ferret => false} do
|
6
|
+
run <<-CMD
|
7
|
+
cd #{latest_release} &&
|
8
|
+
ln -nfs #{shared_path}/config/ferret_server.yml #{latest_release}/config/ferret_server.yml &&
|
9
|
+
if [ -d #{latest_release}/index ]; then mv #{latest_release}/index #{latest_release}/index.bak; fi &&
|
10
|
+
ln -nfs #{shared_path}/index #{latest_release}/index
|
11
|
+
CMD
|
12
|
+
end
|
13
|
+
[:start,:stop,:restart].each do |op|
|
14
|
+
desc "#{op} ferret server"
|
15
|
+
task op, :roles => [:app], :except => {:no_release => true, :ferret => false} do
|
16
|
+
sudo "/usr/bin/monit #{op} all -g ferret_#{application}"
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
@@ -0,0 +1,19 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
|
3
|
+
namespace :juggernaut do
|
4
|
+
desc "After update_code you want to symlink the juggernaut.yml file into place"
|
5
|
+
task :symlink_configs, :roles => [:app], :except => {:no_release => true, :juggernaut => false} do
|
6
|
+
run <<-CMD
|
7
|
+
cd #{latest_release} &&
|
8
|
+
ln -nfs #{shared_path}/config/juggernaut.yml #{latest_release}/config/juggernaut.yml &&
|
9
|
+
ln -nfs #{shared_path}/config/juggernaut_hosts.yml #{latest_release}/config/juggernaut_hosts.yml
|
10
|
+
CMD
|
11
|
+
end
|
12
|
+
[:start,:stop,:restart].each do |op|
|
13
|
+
desc "#{op} juggernaut server"
|
14
|
+
task op, :roles => [:app], :except => {:no_release => true, :juggernaut => false} do
|
15
|
+
sudo "/usr/bin/monit #{op} all -g juggernaut_#{application}"
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
@@ -0,0 +1,24 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
namespace :memcached do
|
3
|
+
desc "Start memcached"
|
4
|
+
task :start, :roles => [:app], :only => {:memcached => true} do
|
5
|
+
sudo "/etc/init.d/memcached start"
|
6
|
+
end
|
7
|
+
desc "Stop memcached"
|
8
|
+
task :stop, :roles => [:app], :only => {:memcached => true} do
|
9
|
+
sudo "/etc/init.d/memcached stop"
|
10
|
+
end
|
11
|
+
desc "Restart memcached"
|
12
|
+
task :restart, :roles => [:app], :only => {:memcached => true} do
|
13
|
+
sudo "/etc/init.d/memcached restart"
|
14
|
+
end
|
15
|
+
desc "Flush memcached - this assumes memcached is on port 11211"
|
16
|
+
task :flush, :roles => [:app], :only => {:memcached => true} do
|
17
|
+
sudo "echo 'flush_all' | nc -q 1 localhost 11211"
|
18
|
+
end
|
19
|
+
desc "Symlink the memcached.yml file into place if it exists"
|
20
|
+
task :symlink_configs, :roles => [:app], :only => {:memcached => true }, :except => { :no_release => true } do
|
21
|
+
run "if [ -f #{shared_path}/config/memcached.yml ]; then ln -nfs #{shared_path}/config/memcached.yml #{latest_release}/config/memcached.yml; fi"
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
@@ -0,0 +1,56 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
namespace :mongrel do
|
3
|
+
desc <<-DESC
|
4
|
+
Start Mongrel processes on the app server. This uses the :use_sudo variable to determine whether to use sudo or not. By default, :use_sudo is
|
5
|
+
set to true.
|
6
|
+
DESC
|
7
|
+
task :start, :roles => [:app], :except => {:mongrel => false} do
|
8
|
+
sudo "/usr/bin/monit start all -g #{monit_group}"
|
9
|
+
end
|
10
|
+
|
11
|
+
desc <<-DESC
|
12
|
+
Restart the Mongrel processes on the app server by starting and stopping the cluster. This uses the :use_sudo
|
13
|
+
variable to determine whether to use sudo or not. By default, :use_sudo is set to true.
|
14
|
+
DESC
|
15
|
+
task :restart, :roles => [:app], :except => {:mongrel => false} do
|
16
|
+
sudo "/usr/bin/monit restart all -g #{monit_group}"
|
17
|
+
end
|
18
|
+
|
19
|
+
desc <<-DESC
|
20
|
+
Stop the Mongrel processes on the app server. This uses the :use_sudo
|
21
|
+
variable to determine whether to use sudo or not. By default, :use_sudo is
|
22
|
+
set to true.
|
23
|
+
DESC
|
24
|
+
task :stop, :roles => [:app], :except => {:mongrel => false} do
|
25
|
+
sudo "/usr/bin/monit stop all -g #{monit_group}"
|
26
|
+
end
|
27
|
+
|
28
|
+
desc <<-DESC
|
29
|
+
Start mongrels in a loop, with a defer of [default] 30 seconds between each single mongrel restart.
|
30
|
+
DESC
|
31
|
+
task :rolling_restart, :roles => [:app], :except => {:mongrel => false} do
|
32
|
+
|
33
|
+
set :mongrel_restart_delay, 30
|
34
|
+
|
35
|
+
# need a script due to weird escapes run by sudo "X".
|
36
|
+
script = File.open("/tmp/rolling.reboot", 'w+')
|
37
|
+
script.puts "#!/bin/bash"
|
38
|
+
script.puts "export monit_group=#{monit_group}"
|
39
|
+
script.puts "export mongrel_restart_delay=#{mongrel_restart_delay}"
|
40
|
+
# here's the need for single quoted - sed ? - (no escaping).
|
41
|
+
script.puts 'for port in $(monit summary | grep mongrel | sed -r \'s/[^0-9]*([0-9]+).*/\1/\'); do echo "Executing monit restart mongrel_${monit_group}_${port}"; /usr/bin/monit restart mongrel_${monit_group}_${port}; echo "sleeping $mongrel_restart_delay"; sleep ${mongrel_restart_delay}; done'
|
42
|
+
script.close
|
43
|
+
|
44
|
+
upload(script.path, script.path, :via=> :scp)
|
45
|
+
|
46
|
+
#it's in the script, on the remote server, execute it.
|
47
|
+
sudo "chmod +x #{script.path}"
|
48
|
+
sudo "#{script.path}"
|
49
|
+
#cleanup
|
50
|
+
sudo "rm #{script.path}"
|
51
|
+
require 'fileutils' ; FileUtils.rm(script.path)
|
52
|
+
puts "Done."
|
53
|
+
end
|
54
|
+
|
55
|
+
end #namespace
|
56
|
+
end #Capistrano::Configuration
|
@@ -0,0 +1,17 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
|
3
|
+
namespace :monit do
|
4
|
+
desc "Get the status of your mongrels"
|
5
|
+
task :status, :roles => :app do
|
6
|
+
@monit_output ||= { }
|
7
|
+
sudo "/usr/bin/monit status" do |channel, stream, data|
|
8
|
+
@monit_output[channel[:server].to_s] ||= [ ]
|
9
|
+
@monit_output[channel[:server].to_s].push(data.chomp)
|
10
|
+
end
|
11
|
+
@monit_output.each do |k,v|
|
12
|
+
puts "#{k} -> #{'*'*55}"
|
13
|
+
puts v.join("\n")
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|