newrelic-eycap 0.5.6.0
Sign up to get free protection for your applications and to get access to all the features.
- data/.gitignore +1 -0
- data/History.txt +193 -0
- data/Manifest.txt +30 -0
- data/README.txt +66 -0
- data/Rakefile +20 -0
- data/eycap.gemspec +25 -0
- data/lib/capistrano/recipes/deploy/strategy/filtered_remote_cache.rb +57 -0
- data/lib/capistrano/recipes/deploy/strategy/unshared_remote_cache.rb +21 -0
- data/lib/eycap.rb +3 -0
- data/lib/eycap/lib/ey_logger.rb +125 -0
- data/lib/eycap/lib/ey_logger_hooks.rb +14 -0
- data/lib/eycap/recipes.rb +25 -0
- data/lib/eycap/recipes/apache.rb +10 -0
- data/lib/eycap/recipes/backgroundrb.rb +23 -0
- data/lib/eycap/recipes/bundler.rb +9 -0
- data/lib/eycap/recipes/database.rb +69 -0
- data/lib/eycap/recipes/deploy.rb +151 -0
- data/lib/eycap/recipes/ferret.rb +20 -0
- data/lib/eycap/recipes/juggernaut.rb +19 -0
- data/lib/eycap/recipes/memcached.rb +24 -0
- data/lib/eycap/recipes/mongrel.rb +52 -0
- data/lib/eycap/recipes/monit.rb +17 -0
- data/lib/eycap/recipes/nginx.rb +43 -0
- data/lib/eycap/recipes/passenger.rb +10 -0
- data/lib/eycap/recipes/slice.rb +21 -0
- data/lib/eycap/recipes/solr.rb +37 -0
- data/lib/eycap/recipes/sphinx.rb +76 -0
- data/lib/eycap/recipes/ssl.rb +15 -0
- data/lib/eycap/recipes/templates/maintenance.rhtml +53 -0
- data/lib/eycap/recipes/tomcat.rb +16 -0
- data/test/test_helper.rb +2 -0
- metadata +106 -0
@@ -0,0 +1,14 @@
|
|
1
|
+
require File.join(File.dirname(__FILE__), "ey_logger")
|
2
|
+
|
3
|
+
# These tasks are setup to use with the logger as post commit hooks.
|
4
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
5
|
+
namespace :ey_logger do
|
6
|
+
task :upload_log_to_slice, :except => { :no_release => true} do
|
7
|
+
logger = Capistrano::EYLogger
|
8
|
+
run "mkdir -p #{shared_path}/deploy_logs"
|
9
|
+
put File.open(logger.log_file_path).read, "#{shared_path}/deploy_logs/#{logger.remote_log_file_name}"
|
10
|
+
end
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
Capistrano::EYLogger.post_process_hook("ey_logger:upload_log_to_slice")
|
@@ -0,0 +1,25 @@
|
|
1
|
+
require 'eycap/lib/ey_logger'
|
2
|
+
require 'eycap/lib/ey_logger_hooks'
|
3
|
+
require 'eycap/recipes/database'
|
4
|
+
require 'eycap/recipes/ferret'
|
5
|
+
require 'eycap/recipes/mongrel'
|
6
|
+
require 'eycap/recipes/nginx'
|
7
|
+
require 'eycap/recipes/slice'
|
8
|
+
require 'eycap/recipes/deploy'
|
9
|
+
require 'eycap/recipes/sphinx'
|
10
|
+
require 'eycap/recipes/backgroundrb'
|
11
|
+
require 'eycap/recipes/memcached'
|
12
|
+
require 'eycap/recipes/solr'
|
13
|
+
require 'eycap/recipes/monit'
|
14
|
+
require 'eycap/recipes/tomcat'
|
15
|
+
require 'eycap/recipes/juggernaut'
|
16
|
+
require 'eycap/recipes/passenger'
|
17
|
+
require 'eycap/recipes/apache'
|
18
|
+
require 'eycap/recipes/bundler'
|
19
|
+
require 'eycap/recipes/ssl'
|
20
|
+
|
21
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
22
|
+
default_run_options[:pty] = true if respond_to?(:default_run_options)
|
23
|
+
set :keep_releases, 3
|
24
|
+
set :runner, defer { user }
|
25
|
+
end
|
@@ -0,0 +1,10 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
namespace :apache do
|
3
|
+
[:stop, :start, :restart, :reload].each do |action|
|
4
|
+
desc "#{action.to_s.capitalize} Apache"
|
5
|
+
task action, :roles => :web do
|
6
|
+
sudo "/etc/init.d/apache2 #{action.to_s}"
|
7
|
+
end
|
8
|
+
end
|
9
|
+
end
|
10
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
|
3
|
+
namespace :bdrb do
|
4
|
+
desc "After update_code you want to reindex"
|
5
|
+
task :reindex, :roles => [:app], :only => {:backgroundrb => true} do
|
6
|
+
run "/engineyard/bin/searchd #{application} reindex"
|
7
|
+
end
|
8
|
+
|
9
|
+
desc "Start Backgroundrb"
|
10
|
+
task :start, :roles => [:app], :only => {:backgroundrb => true} do
|
11
|
+
sudo "/usr/bin/monit start all -g backgroundrb_#{application}"
|
12
|
+
end
|
13
|
+
desc "Stop Backgroundrb"
|
14
|
+
task :stop, :roles => [:app], :only => {:backgroundrb => true} do
|
15
|
+
sudo "/usr/bin/monit stop all -g backgroundrb_#{application}"
|
16
|
+
end
|
17
|
+
desc "Restart Backgroundrb"
|
18
|
+
task :restart, :roles => [:app], :only => {:backgroundrb => true} do
|
19
|
+
sudo "/usr/bin/monit restart all -g backgroundrb_#{application}"
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
end
|
@@ -0,0 +1,9 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
namespace :bundler do
|
3
|
+
desc "Automatically installed your bundled gems if a Gemfile exists"
|
4
|
+
task :bundle_gems do
|
5
|
+
run "if [ -f #{release_path}/Gemfile ]; then cd #{release_path} && bundle install --without=test,development; fi"
|
6
|
+
end
|
7
|
+
after "deploy:symlink_configs","bundler:bundle_gems"
|
8
|
+
end
|
9
|
+
end
|
@@ -0,0 +1,69 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
|
3
|
+
namespace :db do
|
4
|
+
task :backup_name, :roles => :db, :only => { :primary => true } do
|
5
|
+
now = Time.now
|
6
|
+
run "mkdir -p #{shared_path}/db_backups"
|
7
|
+
backup_time = [now.year,now.month,now.day,now.hour,now.min,now.sec].join('-')
|
8
|
+
set :backup_file, "#{shared_path}/db_backups/#{environment_database}-snapshot-#{backup_time}.sql"
|
9
|
+
end
|
10
|
+
|
11
|
+
desc "Clone Production Database to Staging Database."
|
12
|
+
task :clone_prod_to_staging, :roles => :db, :only => { :primary => true } do
|
13
|
+
|
14
|
+
# This task currently runs only on traditional EY offerings.
|
15
|
+
# You need to have both a production and staging environment defined in
|
16
|
+
# your deploy.rb file.
|
17
|
+
|
18
|
+
backup_name unless exists?(:backup_file)
|
19
|
+
run("cat #{shared_path}/config/database.yml") { |channel, stream, data| @environment_info = YAML.load(data)[rails_env] }
|
20
|
+
dump
|
21
|
+
|
22
|
+
if @environment_info['adapter'] == 'mysql'
|
23
|
+
run "gunzip < #{backup_file}.gz | mysql -u #{dbuser} -p -h #{staging_dbhost} #{staging_database}" do |ch, stream, out|
|
24
|
+
ch.send_data "#{dbpass}\n" if out=~ /^Enter password:/
|
25
|
+
end
|
26
|
+
else
|
27
|
+
run "gunzip < #{backup_file}.gz | psql -W -U #{dbuser} -h #{staging_dbhost} #{staging_database}" do |ch, stream, out|
|
28
|
+
ch.send_data "#{dbpass}\n" if out=~ /^Password/
|
29
|
+
end
|
30
|
+
end
|
31
|
+
run "rm -f #{backup_file}.gz"
|
32
|
+
end
|
33
|
+
|
34
|
+
desc "Backup your MySQL or PostgreSQL database to shared_path+/db_backups"
|
35
|
+
task :dump, :roles => :db, :only => {:primary => true} do
|
36
|
+
backup_name unless exists?(:backup_file)
|
37
|
+
on_rollback { run "rm -f #{backup_file}" }
|
38
|
+
run("cat #{shared_path}/config/database.yml") { |channel, stream, data| @environment_info = YAML.load(data)[rails_env] }
|
39
|
+
|
40
|
+
if @environment_info['adapter'] == 'mysql'
|
41
|
+
dbhost = @environment_info['host']
|
42
|
+
dbhost = environment_dbhost.sub('-master', '') + '-replica' if dbhost != 'localhost' # added for Solo offering, which uses localhost
|
43
|
+
run "mysqldump --add-drop-table -u #{dbuser} -h #{dbhost} -p #{environment_database} | gzip -c > #{backup_file}.gz" do |ch, stream, out |
|
44
|
+
ch.send_data "#{dbpass}\n" if out=~ /^Enter password:/
|
45
|
+
end
|
46
|
+
else
|
47
|
+
run "pg_dump -W -c -U #{dbuser} -h #{environment_dbhost} #{environment_database} | gzip -c > #{backup_file}.gz" do |ch, stream, out |
|
48
|
+
ch.send_data "#{dbpass}\n" if out=~ /^Password:/
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
desc "Sync your production database to your local workstation"
|
54
|
+
task :clone_to_local, :roles => :db, :only => {:primary => true} do
|
55
|
+
backup_name unless exists?(:backup_file)
|
56
|
+
dump
|
57
|
+
get "#{backup_file}.gz", "/tmp/#{application}.sql.gz"
|
58
|
+
development_info = YAML.load_file("config/database.yml")['development']
|
59
|
+
if development_info['adapter'] == 'mysql'
|
60
|
+
run_str = "gunzip < /tmp/#{application}.sql.gz | mysql -u #{development_info['username']} --password='#{development_info['password']}' -h #{development_info['host']} #{development_info['database']}"
|
61
|
+
else
|
62
|
+
run_str = "PGPASSWORD=#{development_info['password']} gunzip < /tmp/#{application}.sql.gz | psql -U #{development_info['username']} -h #{development_info['host']} #{development_info['database']}"
|
63
|
+
end
|
64
|
+
%x!#{run_str}!
|
65
|
+
run "rm -f #{backup_file}.gz"
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
end
|
@@ -0,0 +1,151 @@
|
|
1
|
+
require File.join(File.dirname(__FILE__), "..", "lib", "ey_logger.rb")
|
2
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
3
|
+
|
4
|
+
namespace :deploy do
|
5
|
+
# This is here to hook into the logger for deploy and deploy:long tasks
|
6
|
+
["deploy", "deploy:long"].each do |tsk|
|
7
|
+
before(tsk) do
|
8
|
+
Capistrano::EYLogger.setup( self, tsk )
|
9
|
+
at_exit{ Capistrano::EYLogger.post_process if Capistrano::EYLogger.setup? }
|
10
|
+
end
|
11
|
+
end
|
12
|
+
|
13
|
+
desc "Link the database.yml and mongrel_cluster.yml files into the current release path."
|
14
|
+
task :symlink_configs, :roles => :app, :except => {:no_release => true} do
|
15
|
+
run <<-CMD
|
16
|
+
cd #{latest_release} &&
|
17
|
+
ln -nfs #{shared_path}/config/database.yml #{latest_release}/config/database.yml &&
|
18
|
+
ln -nfs #{shared_path}/config/mongrel_cluster.yml #{latest_release}/config/mongrel_cluster.yml
|
19
|
+
CMD
|
20
|
+
end
|
21
|
+
|
22
|
+
desc <<-DESC
|
23
|
+
Run the migrate rake task. By default, it runs this in most recently \
|
24
|
+
deployed version of the app. However, you can specify a different release \
|
25
|
+
via the migrate_target variable, which must be one of :latest (for the \
|
26
|
+
default behavior), or :current (for the release indicated by the \
|
27
|
+
`current' symlink). Strings will work for those values instead of symbols, \
|
28
|
+
too. You can also specify additional environment variables to pass to rake \
|
29
|
+
via the migrate_env variable. Finally, you can specify the full path to the \
|
30
|
+
rake executable by setting the rake variable. The defaults are:
|
31
|
+
|
32
|
+
set :rake, "rake"
|
33
|
+
set :framework, "merb"
|
34
|
+
set :merb_env, "production"
|
35
|
+
set :migrate_env, ""
|
36
|
+
set :migrate_target, :latest
|
37
|
+
DESC
|
38
|
+
task :migrate, :roles => :db, :only => { :primary => true } do
|
39
|
+
rake = fetch(:rake, "rake")
|
40
|
+
|
41
|
+
framework = fetch(:framework, "rails")
|
42
|
+
if framework.match(/^rails$/i)
|
43
|
+
app_env = fetch(:rails_env, "production")
|
44
|
+
else
|
45
|
+
app_env = fetch("#{framework.downcase}_env".to_sym, "production")
|
46
|
+
end
|
47
|
+
|
48
|
+
migrate_env = fetch(:migrate_env, "")
|
49
|
+
migrate_target = fetch(:migrate_target, :latest)
|
50
|
+
|
51
|
+
directory = case migrate_target.to_sym
|
52
|
+
when :current then current_path
|
53
|
+
when :latest then current_release
|
54
|
+
else raise ArgumentError, "unknown migration target #{migrate_target.inspect}"
|
55
|
+
end
|
56
|
+
|
57
|
+
run "cd #{directory}; #{rake} #{framework.upcase}_ENV=#{app_env} #{migrate_env} db:migrate"
|
58
|
+
end
|
59
|
+
|
60
|
+
desc "Display the maintenance.html page while deploying with migrations. Then it restarts and enables the site again."
|
61
|
+
task :long do
|
62
|
+
transaction do
|
63
|
+
update_code
|
64
|
+
web.disable
|
65
|
+
symlink
|
66
|
+
migrate
|
67
|
+
end
|
68
|
+
|
69
|
+
restart
|
70
|
+
web.enable
|
71
|
+
end
|
72
|
+
|
73
|
+
desc "Restart the Mongrel processes on the app slices."
|
74
|
+
task :restart, :roles => :app do
|
75
|
+
mongrel.restart
|
76
|
+
end
|
77
|
+
|
78
|
+
desc "Start the Mongrel processes on the app slices."
|
79
|
+
task :spinner, :roles => :app do
|
80
|
+
mongrel.start
|
81
|
+
end
|
82
|
+
|
83
|
+
desc "Start the Mongrel processes on the app slices."
|
84
|
+
task :start, :roles => :app do
|
85
|
+
mongrel.start
|
86
|
+
end
|
87
|
+
|
88
|
+
desc "Stop the Mongrel processes on the app slices."
|
89
|
+
task :stop, :roles => :app do
|
90
|
+
mongrel.stop
|
91
|
+
end
|
92
|
+
|
93
|
+
namespace :web do
|
94
|
+
desc <<-DESC
|
95
|
+
Present a maintenance page to visitors. Disables your application's web \
|
96
|
+
interface by writing a "maintenance.html" file to each web server. The \
|
97
|
+
servers must be configured to detect the presence of this file, and if \
|
98
|
+
it is present, always display it instead of performing the request.
|
99
|
+
|
100
|
+
By default, the maintenance page will just say the site is down for \
|
101
|
+
"maintenance", and will be back "shortly", but you can customize the \
|
102
|
+
page by specifying the REASON and UNTIL environment variables:
|
103
|
+
|
104
|
+
$ cap deploy:web:disable \\
|
105
|
+
REASON="hardware upgrade" \\
|
106
|
+
UNTIL="12pm Central Time"
|
107
|
+
|
108
|
+
Further customization copy your html file to shared_path+'/system/maintenance.html.custom'.
|
109
|
+
If this file exists it will be used instead of the default capistrano ugly page
|
110
|
+
DESC
|
111
|
+
task :disable, :roles => :web, :except => { :no_release => true } do
|
112
|
+
maint_file = "#{shared_path}/system/maintenance.html"
|
113
|
+
require 'erb'
|
114
|
+
on_rollback { run "rm #{shared_path}/system/maintenance.html" }
|
115
|
+
|
116
|
+
reason = ENV['REASON']
|
117
|
+
deadline = ENV['UNTIL']
|
118
|
+
|
119
|
+
template = File.read(File.join(File.dirname(__FILE__), "templates", "maintenance.rhtml"))
|
120
|
+
result = ERB.new(template).result(binding)
|
121
|
+
|
122
|
+
put result, "#{shared_path}/system/maintenance.html.tmp", :mode => 0644
|
123
|
+
run "if [ -f #{shared_path}/system/maintenance.html.custom ]; then cp #{shared_path}/system/maintenance.html.custom #{maint_file}; else cp #{shared_path}/system/maintenance.html.tmp #{maint_file}; fi"
|
124
|
+
end
|
125
|
+
end
|
126
|
+
|
127
|
+
namespace :notify do
|
128
|
+
task :start, :roles => :app do
|
129
|
+
begin
|
130
|
+
run %(curl -X POST -d "application=#{application rescue 'unknown'}" http://weather.engineyard.com/`hostname`/deploy_start -fs)
|
131
|
+
rescue
|
132
|
+
puts "Warning: We couldn't notify EY of your deploy, but don't worry, everything is fine"
|
133
|
+
end
|
134
|
+
end
|
135
|
+
|
136
|
+
task :stop, :roles => :app do
|
137
|
+
begin
|
138
|
+
run %(curl -X POST -d "application=#{application rescue 'unknown'}" http://weather.engineyard.com/`hostname`/deploy_stop -fs)
|
139
|
+
rescue
|
140
|
+
puts "Warning: We couldn't notify EY of your deploy finishing, but don't worry, everything is fine"
|
141
|
+
end
|
142
|
+
end
|
143
|
+
end
|
144
|
+
end
|
145
|
+
|
146
|
+
["deploy", "deploy:long"].each do |tsk|
|
147
|
+
before(tsk, "deploy:notify:start")
|
148
|
+
after(tsk, "deploy:notify:stop")
|
149
|
+
end
|
150
|
+
|
151
|
+
end
|
@@ -0,0 +1,20 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
|
3
|
+
namespace :ferret do
|
4
|
+
desc "After update_code you want to symlink the index and ferret_server.yml file into place"
|
5
|
+
task :symlink_configs, :roles => [:app], :except => {:no_release => true, :ferret => false} do
|
6
|
+
run <<-CMD
|
7
|
+
cd #{latest_release} &&
|
8
|
+
ln -nfs #{shared_path}/config/ferret_server.yml #{latest_release}/config/ferret_server.yml &&
|
9
|
+
if [ -d #{latest_release}/index ]; then mv #{latest_release}/index #{latest_release}/index.bak; fi &&
|
10
|
+
ln -nfs #{shared_path}/index #{latest_release}/index
|
11
|
+
CMD
|
12
|
+
end
|
13
|
+
[:start,:stop,:restart].each do |op|
|
14
|
+
desc "#{op} ferret server"
|
15
|
+
task op, :roles => [:app], :except => {:no_release => true, :ferret => false} do
|
16
|
+
sudo "/usr/bin/monit #{op} all -g ferret_#{application}"
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
@@ -0,0 +1,19 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
|
3
|
+
namespace :juggernaut do
|
4
|
+
desc "After update_code you want to symlink the juggernaut.yml file into place"
|
5
|
+
task :symlink_configs, :roles => [:app], :except => {:no_release => true, :juggernaut => false} do
|
6
|
+
run <<-CMD
|
7
|
+
cd #{latest_release} &&
|
8
|
+
ln -nfs #{shared_path}/config/juggernaut.yml #{latest_release}/config/juggernaut.yml &&
|
9
|
+
ln -nfs #{shared_path}/config/juggernaut_hosts.yml #{latest_release}/config/juggernaut_hosts.yml
|
10
|
+
CMD
|
11
|
+
end
|
12
|
+
[:start,:stop,:restart].each do |op|
|
13
|
+
desc "#{op} juggernaut server"
|
14
|
+
task op, :roles => [:app], :except => {:no_release => true, :juggernaut => false} do
|
15
|
+
sudo "/usr/bin/monit #{op} all -g juggernaut_#{application}"
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
@@ -0,0 +1,24 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
namespace :memcached do
|
3
|
+
desc "Start memcached"
|
4
|
+
task :start, :roles => [:app], :only => {:memcached => true} do
|
5
|
+
sudo "/etc/init.d/memcached start"
|
6
|
+
end
|
7
|
+
desc "Stop memcached"
|
8
|
+
task :stop, :roles => [:app], :only => {:memcached => true} do
|
9
|
+
sudo "/etc/init.d/memcached stop"
|
10
|
+
end
|
11
|
+
desc "Restart memcached"
|
12
|
+
task :restart, :roles => [:app], :only => {:memcached => true} do
|
13
|
+
sudo "/etc/init.d/memcached restart"
|
14
|
+
end
|
15
|
+
desc "Flush memcached - this assumes memcached is on port 11211"
|
16
|
+
task :flush, :roles => [:app], :only => {:memcached => true} do
|
17
|
+
sudo "echo 'flush_all' | nc -q 1 localhost 11211"
|
18
|
+
end
|
19
|
+
desc "Symlink the memcached.yml file into place if it exists"
|
20
|
+
task :symlink_configs, :roles => [:app], :only => {:memcached => true }, :except => { :no_release => true } do
|
21
|
+
run "if [ -f #{shared_path}/config/memcached.yml ]; then ln -nfs #{shared_path}/config/memcached.yml #{latest_release}/config/memcached.yml; fi"
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
@@ -0,0 +1,52 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
namespace :mongrel do
|
3
|
+
desc <<-DESC
|
4
|
+
Start Mongrel processes on the app server. This uses the :use_sudo variable to determine whether to use sudo or not. By default, :use_sudo is
|
5
|
+
set to true.
|
6
|
+
DESC
|
7
|
+
task :start, :roles => [:app], :except => {:mongrel => false} do
|
8
|
+
sudo "/usr/bin/monit start all -g #{monit_group}"
|
9
|
+
end
|
10
|
+
|
11
|
+
desc <<-DESC
|
12
|
+
Restart the Mongrel processes on the app server by starting and stopping the cluster. This uses the :use_sudo
|
13
|
+
variable to determine whether to use sudo or not. By default, :use_sudo is set to true.
|
14
|
+
DESC
|
15
|
+
task :restart, :roles => [:app], :except => {:mongrel => false} do
|
16
|
+
sudo "/usr/bin/monit restart all -g #{monit_group}"
|
17
|
+
end
|
18
|
+
|
19
|
+
desc <<-DESC
|
20
|
+
Stop the Mongrel processes on the app server. This uses the :use_sudo
|
21
|
+
variable to determine whether to use sudo or not. By default, :use_sudo is
|
22
|
+
set to true.
|
23
|
+
DESC
|
24
|
+
task :stop, :roles => [:app], :except => {:mongrel => false} do
|
25
|
+
sudo "/usr/bin/monit stop all -g #{monit_group}"
|
26
|
+
end
|
27
|
+
|
28
|
+
desc <<-DESC
|
29
|
+
Start mongrels in a loop, with a defer of [default] 30 seconds between each single mongrel restart.
|
30
|
+
DESC
|
31
|
+
task :rolling_restart, :roles => [:app], :except => {:mongrel => false} do
|
32
|
+
|
33
|
+
set :mongrel_restart_delay, 30
|
34
|
+
|
35
|
+
mongrels = {}
|
36
|
+
sudo "/usr/bin/monit summary" do | channel, stream_id, output |
|
37
|
+
output.split("\n").map(&:chomp).each do | line|
|
38
|
+
if line =~ /'(mongrel_#{monit_group}.*)'/
|
39
|
+
(mongrels[channel[:host]] ||= []) << $1
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
parallel do |session|
|
44
|
+
mongrels.each do | host, ids |
|
45
|
+
cmd = ids.map{|id| "echo 'restart #{id}'; #{sudo} /usr/bin/monit restart #{id}"}.join("; sleep #{mongrel_restart_delay};")
|
46
|
+
session.when "server.host == '#{host}'", cmd
|
47
|
+
end
|
48
|
+
end unless mongrels.empty?
|
49
|
+
end
|
50
|
+
|
51
|
+
end #namespace
|
52
|
+
end #Capistrano::Configuration
|
@@ -0,0 +1,17 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
|
3
|
+
namespace :monit do
|
4
|
+
desc "Get the status of your mongrels"
|
5
|
+
task :status, :roles => :app do
|
6
|
+
@monit_output ||= { }
|
7
|
+
sudo "/usr/bin/monit status" do |channel, stream, data|
|
8
|
+
@monit_output[channel[:server].to_s] ||= [ ]
|
9
|
+
@monit_output[channel[:server].to_s].push(data.chomp)
|
10
|
+
end
|
11
|
+
@monit_output.each do |k,v|
|
12
|
+
puts "#{k} -> #{'*'*55}"
|
13
|
+
puts v.join("\n")
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|