gramos-robbie 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/README.txt +1 -0
- data/lib/robbie/lib/ey_logger.rb +125 -0
- data/lib/robbie/lib/ey_logger_hooks.rb +14 -0
- data/lib/robbie/recipes/admin.rb +59 -0
- data/lib/robbie/recipes/assets.rb +25 -0
- data/lib/robbie/recipes/backgroundrb.rb +23 -0
- data/lib/robbie/recipes/database.rb +98 -0
- data/lib/robbie/recipes/deploy.rb +128 -0
- data/lib/robbie/recipes/ferret.rb +20 -0
- data/lib/robbie/recipes/juggernaut.rb +19 -0
- data/lib/robbie/recipes/memcached.rb +20 -0
- data/lib/robbie/recipes/mongrel.rb +28 -0
- data/lib/robbie/recipes/monit.rb +66 -0
- data/lib/robbie/recipes/nginx.rb +127 -0
- data/lib/robbie/recipes/shooting_star.rb +49 -0
- data/lib/robbie/recipes/slice.rb +21 -0
- data/lib/robbie/recipes/solr.rb +37 -0
- data/lib/robbie/recipes/sphinx.rb +76 -0
- data/lib/robbie/recipes/templates/maintenance.rhtml +53 -0
- data/lib/robbie/recipes/tomcat.rb +16 -0
- data/lib/robbie/recipes.rb +25 -0
- data/lib/robbie/version.rb +10 -0
- data/lib/robbie.rb +6 -0
- metadata +87 -0
data/README.txt
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
= robbie
|
@@ -0,0 +1,125 @@
|
|
1
|
+
require 'tmpdir'
|
2
|
+
require 'fileutils'
|
3
|
+
module Capistrano
|
4
|
+
|
5
|
+
class Logger
|
6
|
+
|
7
|
+
def ey_log(level, message, line_prefix = nil)
|
8
|
+
EYLogger.log(level, message, line_prefix) if EYLogger.setup?
|
9
|
+
log_without_ey_logging(level, message, line_prefix)
|
10
|
+
end
|
11
|
+
|
12
|
+
unless method_defined?(:log_without_ey_logging)
|
13
|
+
alias_method :log_without_ey_logging, :log
|
14
|
+
alias_method :log, :ey_log
|
15
|
+
end
|
16
|
+
|
17
|
+
def close
|
18
|
+
device.close if @needs_close
|
19
|
+
EYLogger.close if EYLogger.setup?
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
class EYLogger
|
24
|
+
|
25
|
+
# Sets up the EYLogger to beging capturing capistrano's logging. You should pass the capistrno configuration
|
26
|
+
# and the deploy type as a string. The deploy type is for reporting purposes only but must be included.
|
27
|
+
def self.setup(configuration, deploy_type, options = {})
|
28
|
+
@_configuration = configuration
|
29
|
+
@_deploy_type = deploy_type.gsub(/:/, "_")
|
30
|
+
@_log_path = options[:deploy_log_path] || Dir.tmpdir
|
31
|
+
@_log_path << "/" unless @_log_path =~ /\/$/
|
32
|
+
FileUtils.mkdir_p(@_log_path)
|
33
|
+
@_setup = true
|
34
|
+
@_success = true
|
35
|
+
end
|
36
|
+
|
37
|
+
def self.log(level, message, line_prefix=nil)
|
38
|
+
return nil unless setup?
|
39
|
+
@release_name = @_configuration[:release_name] if @release_name.nil?
|
40
|
+
@_log_file_path = @_log_path + @release_name + ".log" unless @_log_file_path
|
41
|
+
@_deploy_log_file = File.open(@_log_file_path, "w") if @_deploy_log_file.nil?
|
42
|
+
|
43
|
+
indent = "%*s" % [Logger::MAX_LEVEL, "*" * (Logger::MAX_LEVEL - level)]
|
44
|
+
message.each do |line|
|
45
|
+
if line_prefix
|
46
|
+
@_deploy_log_file << "#{indent} [#{line_prefix}] #{line.strip}\n"
|
47
|
+
else
|
48
|
+
@_deploy_log_file << "#{indent} #{line.strip}\n"
|
49
|
+
end
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
def self.post_process
|
54
|
+
unless ::Interrupt === $!
|
55
|
+
puts "\n\nPlease wait while the log file is processed\n"
|
56
|
+
# Should dump the stack trace of an exception if there is one
|
57
|
+
error = $!
|
58
|
+
unless error.nil?
|
59
|
+
@_deploy_log_file << error.message << "\n"
|
60
|
+
@_deploy_log_file << error.backtrace.join("\n")
|
61
|
+
@_success = false
|
62
|
+
end
|
63
|
+
self.close
|
64
|
+
|
65
|
+
hooks = [:any]
|
66
|
+
hooks << self.successful? ? :success : :failure
|
67
|
+
puts "Executing Post Processing Hooks"
|
68
|
+
hooks.each do |h|
|
69
|
+
@_post_process_hooks[h].each do |key|
|
70
|
+
@_configuration.parent.find_and_execute_task(key)
|
71
|
+
end
|
72
|
+
end
|
73
|
+
puts "Finished Post Processing Hooks"
|
74
|
+
end
|
75
|
+
end
|
76
|
+
|
77
|
+
# Adds a post processing hook.
|
78
|
+
#
|
79
|
+
# Provide a task name to execute. These tasks are executed after capistrano has actually run its course.
|
80
|
+
#
|
81
|
+
# Takes a key to control when the hook is executed.'
|
82
|
+
# :any - always executed
|
83
|
+
# :success - only execute on success
|
84
|
+
# :failure - only execute on failure
|
85
|
+
#
|
86
|
+
# ==== Example
|
87
|
+
# Capistrano::EYLogger.post_process_hook( "ey_logger:upload_log_to_slice", :any)
|
88
|
+
#
|
89
|
+
def self.post_process_hook(task, key = :any)
|
90
|
+
@_post_process_hooks ||= Hash.new{|h,k| h[k] = []}
|
91
|
+
@_post_process_hooks[key] << task
|
92
|
+
end
|
93
|
+
|
94
|
+
def self.setup?
|
95
|
+
!!@_setup
|
96
|
+
end
|
97
|
+
|
98
|
+
def self.deploy_type
|
99
|
+
@_deploy_type
|
100
|
+
end
|
101
|
+
|
102
|
+
def self.successful?
|
103
|
+
!!@_success
|
104
|
+
end
|
105
|
+
|
106
|
+
def self.failure?
|
107
|
+
!@_success
|
108
|
+
end
|
109
|
+
|
110
|
+
def self.log_file_path
|
111
|
+
@_log_file_path
|
112
|
+
end
|
113
|
+
|
114
|
+
def self.remote_log_file_name
|
115
|
+
@_log_file_name ||= "#{@_configuration[:release_name]}-#{@_deploy_type}-#{self.successful? ? "SUCCESS" : "FAILURE"}.log"
|
116
|
+
end
|
117
|
+
|
118
|
+
def self.close
|
119
|
+
@_deploy_log_file.flush unless @_deploy_log_file.nil?
|
120
|
+
@_deploy_log_file.close unless @_deploy_log_file.nil?
|
121
|
+
@_setup = false
|
122
|
+
end
|
123
|
+
|
124
|
+
end
|
125
|
+
end
|
@@ -0,0 +1,14 @@
|
|
1
|
+
require File.join(File.dirname(__FILE__), "ey_logger")
|
2
|
+
|
3
|
+
# These tasks are setup to use with the logger as post commit hooks.
|
4
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
5
|
+
namespace :ey_logger do
|
6
|
+
task :upload_log_to_slice, :except => { :no_release => true} do
|
7
|
+
logger = Capistrano::EYLogger
|
8
|
+
run "mkdir -p #{shared_path}/deploy_logs"
|
9
|
+
put File.open(logger.log_file_path).read, "#{shared_path}/deploy_logs/#{logger.remote_log_file_name}"
|
10
|
+
end
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
Capistrano::EYLogger.post_process_hook("ey_logger:upload_log_to_slice")
|
@@ -0,0 +1,59 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
|
3
|
+
##########################################################################
|
4
|
+
# ADMIN and MONITORING
|
5
|
+
|
6
|
+
namespace :admin do
|
7
|
+
|
8
|
+
desc "tail log files"
|
9
|
+
task :tail_logs, :roles => :app do
|
10
|
+
run "tail -f #{shared_path}/log/*.log" do |channel, stream, data|
|
11
|
+
puts "\n\n"# for an extra line break before the host name
|
12
|
+
puts "#{channel[:host]}: #{data}"
|
13
|
+
break if stream == :err
|
14
|
+
end
|
15
|
+
end
|
16
|
+
|
17
|
+
desc "show server conections"
|
18
|
+
task :netstat, :roles => :app do
|
19
|
+
run "netstat -a" do |channel, stream, data|
|
20
|
+
puts "\n\n"# for an extra line break before the host name
|
21
|
+
puts "#{data}"
|
22
|
+
break if stream == :err
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
desc 'Pone los permisos necesarios para la carpeta de la app'
|
27
|
+
task :set_perms, :roles => :app do
|
28
|
+
sudo "chown -R #{user}:#{mongrel_group} #{deploy_to}/"
|
29
|
+
sudo "chmod 774 -R #{deploy_to}"
|
30
|
+
end
|
31
|
+
|
32
|
+
###############################################################################
|
33
|
+
# SETUP SSH
|
34
|
+
|
35
|
+
desc "Copies contents of ssh public keys into authorized_keys file"
|
36
|
+
task :setup_ssh_keys do
|
37
|
+
|
38
|
+
unless ssh_options[:keys]
|
39
|
+
puts <<-ERROR
|
40
|
+
|
41
|
+
You need to define the name of your SSH key(s)
|
42
|
+
e.g. ssh_options[:keys] = %w(/Users/someuser/.ssh/id_dsa)
|
43
|
+
|
44
|
+
You can put this in your .caprc file in your home directory.
|
45
|
+
|
46
|
+
ERROR
|
47
|
+
exit
|
48
|
+
end
|
49
|
+
|
50
|
+
sudo "test -d ~/.ssh || mkdir ~/.ssh"
|
51
|
+
sudo "chmod 0700 ~/.ssh"
|
52
|
+
put(ssh_options[:keys].collect{|key| File.read(key+'.pub')}.join("\n"),
|
53
|
+
File.join('/home', user, '.ssh/authorized_keys'),
|
54
|
+
:mode => 0600 )
|
55
|
+
end
|
56
|
+
|
57
|
+
end
|
58
|
+
|
59
|
+
end
|
@@ -0,0 +1,25 @@
|
|
1
|
+
Capistrano::Configuration.instance.load do
|
2
|
+
|
3
|
+
#############################################################################
|
4
|
+
# ASSETS
|
5
|
+
|
6
|
+
namespace :assets do
|
7
|
+
task :symlink, :roles => :app do
|
8
|
+
assets.create_dirs
|
9
|
+
run <<-CMD
|
10
|
+
rm -rf #{release_path}/public/uploads;
|
11
|
+
rm -rf #{release_path}/public/images/avatars;
|
12
|
+
ln -nfs #{shared_path}/uploads #{release_path}/public/uploads;
|
13
|
+
ln -nfs #{shared_path}/avatars #{release_path}/public/images/avatars;
|
14
|
+
CMD
|
15
|
+
end
|
16
|
+
task :create_dirs, :roles => :app do
|
17
|
+
%w(uploads avatars).each do |name|
|
18
|
+
run "mkdir -p #{shared_path}/#{name}; true"
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
after "deploy:update_code" , "assets:symlink"
|
24
|
+
|
25
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
|
3
|
+
namespace :bdrb do
|
4
|
+
desc "After update_code you want to reindex"
|
5
|
+
task :reindex, :roles => [:app], :only => {:backgroundrb => true} do
|
6
|
+
run "/engineyard/bin/searchd #{application} reindex"
|
7
|
+
end
|
8
|
+
|
9
|
+
desc "Start Backgroundrb"
|
10
|
+
task :start, :roles => [:app], :only => {:backgroundrb => true} do
|
11
|
+
sudo "/usr/bin/monit start all -g backgroundrb_#{application}"
|
12
|
+
end
|
13
|
+
desc "Stop Backgroundrb"
|
14
|
+
task :stop, :roles => [:app], :only => {:backgroundrb => true} do
|
15
|
+
sudo "/usr/bin/monit stop all -g backgroundrb_#{application}"
|
16
|
+
end
|
17
|
+
desc "Restart Backgroundrb"
|
18
|
+
task :restart, :roles => [:app], :only => {:backgroundrb => true} do
|
19
|
+
sudo "/usr/bin/monit restart all -g backgroundrb_#{application}"
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
end
|
@@ -0,0 +1,98 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
|
3
|
+
namespace :db do
|
4
|
+
task :backup_name, :roles => :db, :only => { :primary => true } do
|
5
|
+
now = Time.now
|
6
|
+
run "mkdir -p #{shared_path}/db_backups"
|
7
|
+
backup_time = [now.year,now.month,now.day,now.hour,now.min,now.sec].join('-')
|
8
|
+
set :backup_file, "#{shared_path}/db_backups/#{environment_database}-snapshot-#{backup_time}.sql"
|
9
|
+
end
|
10
|
+
|
11
|
+
desc "Clone Production Database to Staging Database."
|
12
|
+
task :clone_prod_to_staging, :roles => :db, :only => { :primary => true } do
|
13
|
+
backup_name
|
14
|
+
on_rollback { run "rm -f #{backup_file}" }
|
15
|
+
run("cat #{shared_path}/config/database.yml") { |channel, stream, data| @environment_info = YAML.load(data)[rails_env] }
|
16
|
+
if @environment_info['adapter'] == 'mysql'
|
17
|
+
run "mysqldump --add-drop-table -u #{dbuser} -h #{production_dbhost.gsub('-master', '-replica')} -p#{dbpass} #{production_database} > #{backup_file}"
|
18
|
+
run "mysql -u #{dbuser} -p#{dbpass} -h #{staging_dbhost} #{staging_database} < #{backup_file}"
|
19
|
+
else
|
20
|
+
run "PGPASSWORD=#{dbpass} pg_dump -c -U #{dbuser} -h #{production_dbhost} -f #{backup_file} #{production_database}"
|
21
|
+
run "PGPASSWORD=#{dbpass} psql -U #{dbuser} -h #{staging_dbhost} -f #{backup_file} #{staging_database}"
|
22
|
+
end
|
23
|
+
run "rm -f #{backup_file}"
|
24
|
+
end
|
25
|
+
|
26
|
+
desc "Backup your MySQL or PostgreSQL database to shared_path+/db_backups"
|
27
|
+
task :dump, :roles => :db, :only => {:primary => true} do
|
28
|
+
backup_name
|
29
|
+
run("cat #{shared_path}/config/database.yml") { |channel, stream, data| @environment_info = YAML.load(data)[rails_env] }
|
30
|
+
if @environment_info['adapter'] == 'mysql'
|
31
|
+
run "mysqldump --add-drop-table -u #{dbuser} -h #{environment_dbhost.gsub('-master', '-replica')} -p#{dbpass} #{environment_database} | bzip2 -c > #{backup_file}.bz2"
|
32
|
+
else
|
33
|
+
run "PGPASSWORD=#{dbpass} pg_dump -c -U #{dbuser} -h #{environment_dbhost} #{environment_database} | bzip2 -c > #{backup_file}.bz2"
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
desc "Sync your production database to your local workstation"
|
38
|
+
task :clone_to_local, :roles => :db, :only => {:primary => true} do
|
39
|
+
backup_name
|
40
|
+
dump
|
41
|
+
get "#{backup_file}.bz2", "/tmp/#{application}.sql.gz"
|
42
|
+
development_info = YAML.load_file("config/database.yml")['development']
|
43
|
+
if development_info['adapter'] == 'mysql'
|
44
|
+
run_str = "bzcat /tmp/#{application}.sql.gz | mysql -u #{development_info['username']} -p#{development_info['password']} -h #{development_info['host']} #{development_info['database']}"
|
45
|
+
else
|
46
|
+
run_str = "PGPASSWORD=#{development_info['password']} bzcat /tmp/#{application}.sql.gz | psql -U #{development_info['username']} -h #{development_info['host']} #{development_info['database']}"
|
47
|
+
end
|
48
|
+
%x!#{run_str}!
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
###########################################################################
|
53
|
+
# DATABASE YAML
|
54
|
+
|
55
|
+
# after "deploy:setup", :db
|
56
|
+
# after "deploy:update_code", "db:symlink"
|
57
|
+
|
58
|
+
namespace :db do
|
59
|
+
|
60
|
+
desc "Create database yaml in shared path"
|
61
|
+
task :default do
|
62
|
+
db_config = ERB.new <<-EOF
|
63
|
+
base: &base
|
64
|
+
adapter: mysql
|
65
|
+
username: #{runner}
|
66
|
+
password: #{password}
|
67
|
+
host: 127.0.0.1
|
68
|
+
|
69
|
+
development:
|
70
|
+
database: #{db_name}_dev
|
71
|
+
<<: *base
|
72
|
+
|
73
|
+
test:
|
74
|
+
database: #{db_name}_test
|
75
|
+
<<: *base
|
76
|
+
|
77
|
+
production:
|
78
|
+
database: #{db_name}_prod
|
79
|
+
shooting_star:
|
80
|
+
server: #{shoot_server}:#{shoot_server_port}
|
81
|
+
shooter: druby://localhost:#{shoot_drb_port}
|
82
|
+
<<: *base
|
83
|
+
EOF
|
84
|
+
|
85
|
+
sudo "mkdir -p #{shared_path}/config"
|
86
|
+
sudo "chown -R #{user}:#{mongrel_group} #{deploy_to}/"
|
87
|
+
sudo "chmod 774 -R #{shared_path}/config"
|
88
|
+
put db_config.result, "#{shared_path}/config/database.yml"
|
89
|
+
end
|
90
|
+
|
91
|
+
desc "Make symlink for database yaml"
|
92
|
+
task :symlink do
|
93
|
+
run "ln -nfs #{shared_path}/config/database.yml #{release_path}/config/database.yml"
|
94
|
+
end
|
95
|
+
|
96
|
+
end
|
97
|
+
|
98
|
+
end
|
@@ -0,0 +1,128 @@
|
|
1
|
+
require File.join(File.dirname(__FILE__), "..", "lib", "ey_logger.rb")
|
2
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
3
|
+
|
4
|
+
namespace :deploy do
|
5
|
+
# This is here to hook into the logger for deploy and deploy:long tasks
|
6
|
+
["deploy", "deploy:long"].each do |tsk|
|
7
|
+
before(tsk) do
|
8
|
+
Capistrano::EYLogger.setup( self, tsk )
|
9
|
+
at_exit{ Capistrano::EYLogger.post_process if Capistrano::EYLogger.setup? }
|
10
|
+
end
|
11
|
+
end
|
12
|
+
|
13
|
+
desc "Link the database.yml and mongrel_cluster.yml files into the current release path."
|
14
|
+
task :symlink_configs, :roles => :app, :except => {:no_release => true} do
|
15
|
+
run <<-CMD
|
16
|
+
cd #{latest_release} &&
|
17
|
+
ln -nfs #{shared_path}/config/database.yml #{latest_release}/config/database.yml &&
|
18
|
+
ln -nfs #{shared_path}/config/mongrel_cluster.yml #{latest_release}/config/mongrel_cluster.yml
|
19
|
+
CMD
|
20
|
+
end
|
21
|
+
|
22
|
+
desc <<-DESC
|
23
|
+
Run the migrate rake task. By default, it runs this in most recently \
|
24
|
+
deployed version of the app. However, you can specify a different release \
|
25
|
+
via the migrate_target variable, which must be one of :latest (for the \
|
26
|
+
default behavior), or :current (for the release indicated by the \
|
27
|
+
`current' symlink). Strings will work for those values instead of symbols, \
|
28
|
+
too. You can also specify additional environment variables to pass to rake \
|
29
|
+
via the migrate_env variable. Finally, you can specify the full path to the \
|
30
|
+
rake executable by setting the rake variable. The defaults are:
|
31
|
+
|
32
|
+
set :rake, "rake"
|
33
|
+
set :framework, "merb"
|
34
|
+
set :merb_env, "production"
|
35
|
+
set :migrate_env, ""
|
36
|
+
set :migrate_target, :latest
|
37
|
+
DESC
|
38
|
+
task :migrate, :roles => :db, :only => { :primary => true } do
|
39
|
+
rake = fetch(:rake, "rake")
|
40
|
+
|
41
|
+
framework = fetch(:framework, "rails")
|
42
|
+
if framework.match(/^rails$/i)
|
43
|
+
app_env = fetch(:rails_env, "production")
|
44
|
+
else
|
45
|
+
app_env = fetch("#{framework.downcase}_env".to_sym, "production")
|
46
|
+
end
|
47
|
+
|
48
|
+
migrate_env = fetch(:migrate_env, "")
|
49
|
+
migrate_target = fetch(:migrate_target, :latest)
|
50
|
+
|
51
|
+
directory = case migrate_target.to_sym
|
52
|
+
when :current then current_path
|
53
|
+
when :latest then current_release
|
54
|
+
else raise ArgumentError, "unknown migration target #{migrate_target.inspect}"
|
55
|
+
end
|
56
|
+
|
57
|
+
run "cd #{directory}; #{rake} #{framework.upcase}_ENV=#{app_env} #{migrate_env} db:migrate"
|
58
|
+
end
|
59
|
+
|
60
|
+
desc "Display the maintenance.html page while deploying with migrations. Then it restarts and enables the site again."
|
61
|
+
task :long do
|
62
|
+
transaction do
|
63
|
+
update_code
|
64
|
+
web.disable
|
65
|
+
symlink
|
66
|
+
migrate
|
67
|
+
end
|
68
|
+
|
69
|
+
restart
|
70
|
+
web.enable
|
71
|
+
end
|
72
|
+
|
73
|
+
desc "Restart the Mongrel processes on the app slices."
|
74
|
+
task :restart, :roles => :app do
|
75
|
+
mongrel.restart
|
76
|
+
end
|
77
|
+
|
78
|
+
desc "Start the Mongrel processes on the app slices."
|
79
|
+
task :spinner, :roles => :app do
|
80
|
+
mongrel.start
|
81
|
+
end
|
82
|
+
|
83
|
+
desc "Start the Mongrel processes on the app slices."
|
84
|
+
task :start, :roles => :app do
|
85
|
+
mongrel.start
|
86
|
+
end
|
87
|
+
|
88
|
+
desc "Stop the Mongrel processes on the app slices."
|
89
|
+
task :stop, :roles => :app do
|
90
|
+
mongrel.stop
|
91
|
+
end
|
92
|
+
|
93
|
+
namespace :web do
|
94
|
+
desc <<-DESC
|
95
|
+
Present a maintenance page to visitors. Disables your application's web \
|
96
|
+
interface by writing a "maintenance.html" file to each web server. The \
|
97
|
+
servers must be configured to detect the presence of this file, and if \
|
98
|
+
it is present, always display it instead of performing the request.
|
99
|
+
|
100
|
+
By default, the maintenance page will just say the site is down for \
|
101
|
+
"maintenance", and will be back "shortly", but you can customize the \
|
102
|
+
page by specifying the REASON and UNTIL environment variables:
|
103
|
+
|
104
|
+
$ cap deploy:web:disable \\
|
105
|
+
REASON="hardware upgrade" \\
|
106
|
+
UNTIL="12pm Central Time"
|
107
|
+
|
108
|
+
Further customization copy your html file to shared_path+'/system/maintenance.html.custom'.
|
109
|
+
If this file exists it will be used instead of the default capistrano ugly page
|
110
|
+
DESC
|
111
|
+
task :disable, :roles => :web, :except => { :no_release => true } do
|
112
|
+
maint_file = "#{shared_path}/system/maintenance.html"
|
113
|
+
require 'erb'
|
114
|
+
on_rollback { run "rm #{shared_path}/system/maintenance.html" }
|
115
|
+
|
116
|
+
reason = ENV['REASON']
|
117
|
+
deadline = ENV['UNTIL']
|
118
|
+
|
119
|
+
template = File.read(File.join(File.dirname(__FILE__), "templates", "maintenance.rhtml"))
|
120
|
+
result = ERB.new(template).result(binding)
|
121
|
+
|
122
|
+
put result, "#{shared_path}/system/maintenance.html.tmp", :mode => 0644
|
123
|
+
run "if [ -f #{shared_path}/system/maintenance.html.custom ]; then cp #{shared_path}/system/maintenance.html.custom #{maint_file}; else cp #{shared_path}/system/maintenance.html.tmp #{maint_file}; fi"
|
124
|
+
end
|
125
|
+
end
|
126
|
+
end
|
127
|
+
|
128
|
+
end
|
@@ -0,0 +1,20 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
|
3
|
+
namespace :ferret do
|
4
|
+
desc "After update_code you want to symlink the index and ferret_server.yml file into place"
|
5
|
+
task :symlink_configs, :roles => [:app], :except => {:no_release => true, :ferret => false} do
|
6
|
+
run <<-CMD
|
7
|
+
cd #{latest_release} &&
|
8
|
+
ln -nfs #{shared_path}/config/ferret_server.yml #{latest_release}/config/ferret_server.yml &&
|
9
|
+
if [ -d #{latest_release}/index ]; then mv #{latest_release}/index #{latest_release}/index.bak; fi &&
|
10
|
+
ln -nfs #{shared_path}/index #{latest_release}/index
|
11
|
+
CMD
|
12
|
+
end
|
13
|
+
[:start,:stop,:restart].each do |op|
|
14
|
+
desc "#{op} ferret server"
|
15
|
+
task op, :roles => [:app], :except => {:no_release => true, :ferret => false} do
|
16
|
+
sudo "/usr/bin/monit #{op} all -g ferret_#{application}"
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
@@ -0,0 +1,19 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
|
3
|
+
namespace :juggernaut do
|
4
|
+
desc "After update_code you want to symlink the juggernaut.yml file into place"
|
5
|
+
task :symlink_configs, :roles => [:app], :except => {:no_release => true, :juggernaut => false} do
|
6
|
+
run <<-CMD
|
7
|
+
cd #{latest_release} &&
|
8
|
+
ln -nfs #{shared_path}/config/juggernaut.yml #{latest_release}/config/juggernaut.yml &&
|
9
|
+
ln -nfs #{shared_path}/config/juggernaut_hosts.yml #{latest_release}/config/juggernaut_hosts.yml
|
10
|
+
CMD
|
11
|
+
end
|
12
|
+
[:start,:stop,:restart].each do |op|
|
13
|
+
desc "#{op} juggernaut server"
|
14
|
+
task op, :roles => [:app], :except => {:no_release => true, :juggernaut => false} do
|
15
|
+
sudo "/usr/bin/monit #{op} all -g juggernaut_#{application}"
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
@@ -0,0 +1,20 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
namespace :memcached do
|
3
|
+
desc "Start memcached"
|
4
|
+
task :start, :roles => [:app], :only => {:memcached => true} do
|
5
|
+
sudo "/etc/init.d/memcached start"
|
6
|
+
end
|
7
|
+
desc "Stop memcached"
|
8
|
+
task :stop, :roles => [:app], :only => {:memcached => true} do
|
9
|
+
sudo "/etc/init.d/memcached stop"
|
10
|
+
end
|
11
|
+
desc "Restart memcached"
|
12
|
+
task :restart, :roles => [:app], :only => {:memcached => true} do
|
13
|
+
sudo "/etc/init.d/memcached restart"
|
14
|
+
end
|
15
|
+
desc "Symlink the memcached.yml file into place if it exists"
|
16
|
+
task :symlink_configs, :roles => [:app], :only => {:memcached => true }, :except => { :no_release => true } do
|
17
|
+
run "if [ -f #{shared_path}/config/memcached.yml ]; then ln -nfs #{shared_path}/config/memcached.yml #{latest_release}/config/memcached.yml; fi"
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
@@ -0,0 +1,28 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
namespace :mongrel do
|
3
|
+
desc <<-DESC
|
4
|
+
Start Mongrel processes on the app server. This uses the :use_sudo variable to determine whether to use sudo or not. By default, :use_sudo is
|
5
|
+
set to true.
|
6
|
+
DESC
|
7
|
+
task :start, :roles => [:app], :except => {:mongrel => false} do
|
8
|
+
sudo "/usr/bin/monit start all -g #{monit_group}"
|
9
|
+
end
|
10
|
+
|
11
|
+
desc <<-DESC
|
12
|
+
Restart the Mongrel processes on the app server by starting and stopping the cluster. This uses the :use_sudo
|
13
|
+
variable to determine whether to use sudo or not. By default, :use_sudo is set to true.
|
14
|
+
DESC
|
15
|
+
task :restart, :roles => [:app], :except => {:mongrel => false} do
|
16
|
+
sudo "/usr/bin/monit restart all -g #{monit_group}"
|
17
|
+
end
|
18
|
+
|
19
|
+
desc <<-DESC
|
20
|
+
Stop the Mongrel processes on the app server. This uses the :use_sudo
|
21
|
+
variable to determine whether to use sudo or not. By default, :use_sudo is
|
22
|
+
set to true.
|
23
|
+
DESC
|
24
|
+
task :stop, :roles => [:app], :except => {:mongrel => false} do
|
25
|
+
sudo "/usr/bin/monit stop all -g #{monit_group}"
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
@@ -0,0 +1,66 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
##########################################################################
|
3
|
+
# monit
|
4
|
+
|
5
|
+
namespace :monit do
|
6
|
+
desc "Get the status of your mongrels"
|
7
|
+
task :status, :roles => :app do
|
8
|
+
@monit_output ||= { }
|
9
|
+
sudo "/usr/bin/monit status" do |channel, stream, data|
|
10
|
+
@monit_output[channel[:server].to_s] ||= [ ]
|
11
|
+
@monit_output[channel[:server].to_s].push(data.chomp)
|
12
|
+
end
|
13
|
+
@monit_output.each do |k,v|
|
14
|
+
puts "#{k} -> #{'*'*55}"
|
15
|
+
puts v.join("\n")
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
|
20
|
+
|
21
|
+
|
22
|
+
desc "reload monit server"
|
23
|
+
task :reload do
|
24
|
+
run "sudo /etc/init.d/monit force-reload"
|
25
|
+
end
|
26
|
+
|
27
|
+
desc "Create monit file in /etc/monit.d/ror-apps/"
|
28
|
+
task :default do
|
29
|
+
monit_config_str = ""
|
30
|
+
cluster_mongrel_port = mongrel_port.to_i
|
31
|
+
|
32
|
+
cluster_mongrel_port.upto(cluster_mongrel_port + mongrel_servers - 1) {|cluster_port|
|
33
|
+
monit_config_str += <<-EOF
|
34
|
+
|
35
|
+
###############################################################################################
|
36
|
+
# #{application}
|
37
|
+
|
38
|
+
check process mongrel_#{cluster_port} with pidfile #{shared_path}/pids/mongrel.#{cluster_port}.pid
|
39
|
+
start program = "/usr/bin/mongrel_rails cluster::start -C #{mongrel_conf} --clean --only #{cluster_port}"
|
40
|
+
stop program = "/usr/bin/mongrel_rails cluster::stop -C #{mongrel_conf} --clean --only #{cluster_port}"
|
41
|
+
|
42
|
+
if failed host 127.0.0.1 port #{cluster_port} protocol http
|
43
|
+
with timeout 10 seconds
|
44
|
+
then restart
|
45
|
+
|
46
|
+
if totalmem > 100 Mb then restart
|
47
|
+
if cpu is greater than 90% for 2 cycles then alert
|
48
|
+
if cpu > 90% for 5 cycles then restart
|
49
|
+
if loadavg(5min) greater than 10 for 8 cycles then restart
|
50
|
+
if 5 restarts within 5 cycles then timeout
|
51
|
+
group mongrel-#{application}
|
52
|
+
EOF
|
53
|
+
}
|
54
|
+
|
55
|
+
monit_config = ERB.new(monit_config_str)
|
56
|
+
sudo "mkdir -p /etc/monit.d/ror-apps/"
|
57
|
+
sudo "chown -R #{user}:#{mongrel_group} /etc/monit.d/ror-apps/"
|
58
|
+
sudo "chmod 774 -R /etc/monit.d/ror-apps/"
|
59
|
+
put monit_config.result, "/etc/monit.d/ror-apps/#{application}"
|
60
|
+
end
|
61
|
+
|
62
|
+
end
|
63
|
+
|
64
|
+
|
65
|
+
end
|
66
|
+
|
@@ -0,0 +1,127 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
|
3
|
+
######################################################################################
|
4
|
+
# NGINX
|
5
|
+
|
6
|
+
namespace :nginx do
|
7
|
+
desc "Start Nginx on the app slices."
|
8
|
+
task :start, :roles => :app do
|
9
|
+
sudo "/etc/init.d/nginx start"
|
10
|
+
end
|
11
|
+
|
12
|
+
desc "Restart the Nginx processes on the app slices."
|
13
|
+
task :restart , :roles => :app do
|
14
|
+
sudo "/etc/init.d/nginx restart"
|
15
|
+
end
|
16
|
+
|
17
|
+
desc "Stop the Nginx processes on the app slices."
|
18
|
+
task :stop , :roles => :app do
|
19
|
+
sudo "/etc/init.d/nginx stop"
|
20
|
+
end
|
21
|
+
|
22
|
+
desc "Tail the nginx access logs for this application"
|
23
|
+
task :tail, :roles => :app do
|
24
|
+
run "tail -f /var/log/engineyard/nginx/#{application}.access.log" do |channel, stream, data|
|
25
|
+
puts "#{channel[:server]}: #{data}" unless data =~ /^10\.[01]\.0/ # skips lb pull pages
|
26
|
+
break if stream == :err
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
desc "Tail the nginx error logs on the app slices"
|
31
|
+
task :tail_error, :roles => :app do
|
32
|
+
run "tail -f /var/log/engineyard/nginx/error.log" do |channel, stream, data|
|
33
|
+
puts "#{channel[:server]}: #{data}" unless data =~ /^10\.[01]\.0/ # skips lb pull pages
|
34
|
+
break if stream == :err
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
@nginx_path = "/etc/nginx"
|
39
|
+
@tmp = "/tmp"
|
40
|
+
@enabled_path = "#{@nginx_path}/sites-enabled"
|
41
|
+
@available_path = "#{@nginx_path}/sites-available"
|
42
|
+
|
43
|
+
desc 'Habilita un host virtual'
|
44
|
+
task :en_vhost do
|
45
|
+
sudo "test -L #{@available_path}/#{application} || sudo ln -s #{@available_path}/#{application}; true " +
|
46
|
+
"#{@enabled_path}/#{application}"
|
47
|
+
end
|
48
|
+
|
49
|
+
desc 'Deshabilita un host virtual'
|
50
|
+
task :dis_vhost do
|
51
|
+
sudo "rm -f #{@enabled_path}/#{application}"
|
52
|
+
end
|
53
|
+
|
54
|
+
desc 'Hace un reload del nginx'
|
55
|
+
task :reload do
|
56
|
+
sudo "/etc/init.d/nginx reload"
|
57
|
+
end
|
58
|
+
|
59
|
+
desc "Agrega una vhost para nginx"
|
60
|
+
task :add_vhost do
|
61
|
+
nginx_config = ERB.new <<-EOF
|
62
|
+
upstream mongrel-#{application} {
|
63
|
+
server 127.0.0.1:#{mongrel_port};
|
64
|
+
server 127.0.0.1:#{mongrel_port.to_i + 1};
|
65
|
+
}
|
66
|
+
server {
|
67
|
+
listen 80;
|
68
|
+
server_name #{nginx_server_name};
|
69
|
+
root #{deploy_to}/current/public;
|
70
|
+
index index.html index.htm;
|
71
|
+
|
72
|
+
access_log /var/log/nginx/localhost.access.log;
|
73
|
+
|
74
|
+
location / {
|
75
|
+
index index.html index.htm;
|
76
|
+
proxy_set_header X-Real-IP $remote_addr;
|
77
|
+
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
78
|
+
proxy_set_header Host $http_host;
|
79
|
+
proxy_redirect false;
|
80
|
+
|
81
|
+
# If the file exists as a static file serve it directly without
|
82
|
+
# running all the other rewite tests on it
|
83
|
+
if (-f $request_filename) {
|
84
|
+
break;
|
85
|
+
}
|
86
|
+
|
87
|
+
if (-f $request_filename/index.html) {
|
88
|
+
rewrite (.*) $1/index.html break;
|
89
|
+
}
|
90
|
+
|
91
|
+
if (-f $request_filename.html) {
|
92
|
+
rewrite (.*) $1.html break;
|
93
|
+
}
|
94
|
+
|
95
|
+
if (!-f $request_filename) {
|
96
|
+
proxy_pass http://mongrel-#{application};
|
97
|
+
break;
|
98
|
+
}
|
99
|
+
|
100
|
+
}
|
101
|
+
|
102
|
+
#error_page 404 /404.html;
|
103
|
+
|
104
|
+
# redirect server error pages to the static page /50x.html
|
105
|
+
#
|
106
|
+
error_page 500 502 503 504 /500.html;
|
107
|
+
location = /500.html {
|
108
|
+
root #{deploy_to}/current/public/;
|
109
|
+
}
|
110
|
+
|
111
|
+
}
|
112
|
+
EOF
|
113
|
+
sudo "test -d #{@nginx_path}/sites-available || mkdir #{@nginx_path}/sites-available"
|
114
|
+
sudo "test -d #{@nginx_path}/sites-enabled || mkdir #{@nginx_path}/sites-enabled"
|
115
|
+
put nginx_config.result, "#{@tmp}/#{application}"
|
116
|
+
sudo "cp #{@tmp}/#{application} #{@nginx_path}/sites-available/#{application}"
|
117
|
+
sudo "rm #{@tmp}/#{application}"
|
118
|
+
end
|
119
|
+
|
120
|
+
end
|
121
|
+
|
122
|
+
# after "nginx:add_vhost", "nginx:en_vhost"
|
123
|
+
# after "deploy:cold", "nginx:add_vhost"
|
124
|
+
# after "deploy:cold", "nginx:reload"
|
125
|
+
end
|
126
|
+
|
127
|
+
|
@@ -0,0 +1,49 @@
|
|
1
|
+
require 'erb'
|
2
|
+
|
3
|
+
Capistrano::Configuration.instance.load do
|
4
|
+
|
5
|
+
##########################################################################
|
6
|
+
# shoot_star for CHAT system
|
7
|
+
|
8
|
+
namespace :shooting_star do
|
9
|
+
|
10
|
+
desc "start shoot_star server"
|
11
|
+
task :start do
|
12
|
+
run "cd #{latest_release} && sudo nohup shooting_star start -d"
|
13
|
+
end
|
14
|
+
|
15
|
+
desc "stop shoot_star server"
|
16
|
+
task :stop do
|
17
|
+
run "cd #{latest_release} && sudo shooting_star stop"
|
18
|
+
end
|
19
|
+
|
20
|
+
desc "restart shoot_star server"
|
21
|
+
task :restart do
|
22
|
+
stop
|
23
|
+
start
|
24
|
+
end
|
25
|
+
|
26
|
+
desc "Create shooting_star yaml in shared path"
|
27
|
+
task :default do
|
28
|
+
shoot_config = ERB.new <<-EOF
|
29
|
+
server:
|
30
|
+
host: 0.0.0.0
|
31
|
+
port: #{shoot_server_port}
|
32
|
+
shooter:
|
33
|
+
uri: druby://0.0.0.0:#{shoot_drb_port}
|
34
|
+
EOF
|
35
|
+
|
36
|
+
sudo "mkdir -p #{shared_path}/config"
|
37
|
+
sudo "chown -R #{user}:#{mongrel_group} #{deploy_to}/"
|
38
|
+
sudo "chmod 774 -R #{shared_path}/config"
|
39
|
+
put shoot_config.result, "#{shared_path}/config/shooting_star.yml"
|
40
|
+
end
|
41
|
+
|
42
|
+
desc "Make symlink for shooting star yaml"
|
43
|
+
task :symlink do
|
44
|
+
run "ln -nfs #{shared_path}/config/shooting_star.yml #{release_path}/config/shooting_star.yml"
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
|
49
|
+
end
|
@@ -0,0 +1,21 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
|
3
|
+
namespace :slice do
|
4
|
+
desc "Tail the Rails production log for this environment"
|
5
|
+
task :tail_production_logs, :roles => :app do
|
6
|
+
run "tail -f #{shared_path}/log/#{rails_env}.log" do |channel, stream, data|
|
7
|
+
puts # for an extra line break before the host name
|
8
|
+
puts "#{channel[:server]} -> #{data}"
|
9
|
+
break if stream == :err
|
10
|
+
end
|
11
|
+
end
|
12
|
+
desc "Tail the Mongrel logs this environment"
|
13
|
+
task :tail_mongrel_logs, :roles => :app do
|
14
|
+
run "tail -f #{shared_path}/log/mongrel*.log" do |channel, stream, data|
|
15
|
+
puts # for an extra line break before the host name
|
16
|
+
puts "#{channel[:server]} -> #{data}"
|
17
|
+
break if stream == :err
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
@@ -0,0 +1,37 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
|
3
|
+
namespace :solr do
|
4
|
+
desc "After update_code you want to symlink the index and ferret_server.yml file into place"
|
5
|
+
task :symlink_configs, :roles => [:app], :except => {:no_release => true} do
|
6
|
+
run <<-CMD
|
7
|
+
cd #{latest_release} && ln -nfs #{shared_path}/config/solr.yml #{latest_release}/config/solr.yml
|
8
|
+
CMD
|
9
|
+
end
|
10
|
+
|
11
|
+
[:start,:stop,:restart].each do |op|
|
12
|
+
desc "#{op} ferret server"
|
13
|
+
task op, :roles => [:app], :only => {:solr => true} do
|
14
|
+
sudo "/usr/bin/monit #{op} all -g solr_#{application}"
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
namespace :tail do
|
19
|
+
desc "Tail the Solr logs this environment"
|
20
|
+
task :logs, :roles => [:app], :only => {:solr => true} do
|
21
|
+
run "tail -f /var/log/engineyard/solr/#{application}.log" do |channel, stream, data|
|
22
|
+
puts # for an extra line break before the host name
|
23
|
+
puts "#{channel[:server]} -> #{data}"
|
24
|
+
break if stream == :err
|
25
|
+
end
|
26
|
+
end
|
27
|
+
desc "Tail the Solr error logs this environment"
|
28
|
+
task :errors, :roles => [:app], :only => {:solr => true} do
|
29
|
+
run "tail -f /var/log/engineyard/solr/#{application}.err.log" do |channel, stream, data|
|
30
|
+
puts # for an extra line break before the host name
|
31
|
+
puts "#{channel[:server]} -> #{data}"
|
32
|
+
break if stream == :err
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
@@ -0,0 +1,76 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
|
3
|
+
namespace :sphinx do
|
4
|
+
desc "After update_code you want to configure, then reindex"
|
5
|
+
task :configure, :roles => [:app], :only => {:sphinx => true}, :except => {:no_release => true} do
|
6
|
+
run "/engineyard/bin/searchd #{application} configure"
|
7
|
+
end
|
8
|
+
|
9
|
+
desc "After configure you want to reindex"
|
10
|
+
task :reindex, :roles => [:app], :only => {:sphinx => true} do
|
11
|
+
run "/engineyard/bin/searchd #{application} reindex"
|
12
|
+
end
|
13
|
+
|
14
|
+
desc "Start Sphinx Searchd"
|
15
|
+
task :start, :roles => [:app], :only => {:sphinx => true} do
|
16
|
+
sudo "/usr/bin/monit start all -g sphinx_#{application}"
|
17
|
+
end
|
18
|
+
|
19
|
+
desc "Stop Sphinx Searchd"
|
20
|
+
task :stop, :roles => [:app], :only => {:sphinx => true} do
|
21
|
+
sudo "/usr/bin/monit stop all -g sphinx_#{application}"
|
22
|
+
end
|
23
|
+
|
24
|
+
desc "Restart Sphinx Searchd"
|
25
|
+
task :restart, :roles => [:app], :only => {:sphinx => true} do
|
26
|
+
sudo "/usr/bin/monit restart all -g sphinx_#{application}"
|
27
|
+
end
|
28
|
+
|
29
|
+
task :symlink, :roles => [:app], :only => {:sphinx => true}, :except => {:no_release => true} do
|
30
|
+
run "if [ -d #{latest_release}/config/ultrasphinx ]; then mv #{latest_release}/config/ultrasphinx #{latest_release}/config/ultrasphinx.bak; fi"
|
31
|
+
run "ln -nfs #{shared_path}/config/ultrasphinx #{latest_release}/config/ultrasphinx"
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
namespace :acts_as_sphinx do
|
36
|
+
desc "After update_code you to to reindex"
|
37
|
+
task :reindex, :roles => [:app], :only => {:sphinx => true} do
|
38
|
+
run "/engineyard/bin/acts_as_sphinx_searchd #{application} reindex"
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
namespace :thinking_sphinx do
|
43
|
+
desc "After update_code you want to configure, then reindex"
|
44
|
+
task :configure, :roles => [:app], :only => {:sphinx => true}, :except => {:no_release => true} do
|
45
|
+
run "/engineyard/bin/thinking_sphinx_searchd #{application} configure"
|
46
|
+
end
|
47
|
+
|
48
|
+
desc "After configure you want to reindex"
|
49
|
+
task :reindex, :roles => [:app], :only => {:sphinx => true} do
|
50
|
+
run "/engineyard/bin/thinking_sphinx_searchd #{application} reindex"
|
51
|
+
end
|
52
|
+
|
53
|
+
task :symlink, :roles => [:app], :only => {:sphinx => true}, :except => {:no_release => true} do
|
54
|
+
run "if [ -d #{latest_release}/config/thinkingsphinx ]; then mv #{latest_release}/config/thinkingsphinx #{latest_release}/config/thinkingsphinx.bak; fi"
|
55
|
+
run "ln -nfs #{shared_path}/config/thinkingsphinx #{latest_release}/config/thinkingsphinx"
|
56
|
+
run "ln -nfs #{shared_path}/config/sphinx.yml #{latest_release}/config/sphinx.yml"
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
namespace :ultrasphinx do
|
61
|
+
desc "After update_code you want to configure, then reindex"
|
62
|
+
task :configure, :roles => [:app], :only => {:sphinx => true}, :except => {:no_release => true} do
|
63
|
+
run "/engineyard/bin/ultrasphinx_searchd #{application} configure"
|
64
|
+
end
|
65
|
+
|
66
|
+
desc "After configure you want to reindex"
|
67
|
+
task :reindex, :roles => [:app], :only => {:sphinx => true} do
|
68
|
+
run "/engineyard/bin/ultrasphinx_searchd #{application} reindex"
|
69
|
+
end
|
70
|
+
|
71
|
+
task :symlink, :roles => [:app], :only => {:sphinx => true}, :except => {:no_release => true} do
|
72
|
+
run "if [ -d #{latest_release}/config/ultrasphinx ]; then mv #{latest_release}/config/ultrasphinx #{latest_release}/config/ultrasphinx.bak; fi"
|
73
|
+
run "ln -nfs #{shared_path}/config/ultrasphinx #{latest_release}/config/ultrasphinx"
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
@@ -0,0 +1,53 @@
|
|
1
|
+
|
2
|
+
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
|
3
|
+
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
|
4
|
+
|
5
|
+
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
|
6
|
+
|
7
|
+
<head>
|
8
|
+
<meta http-equiv="content-type" content="text/html;charset=UTF-8" />
|
9
|
+
<title>System down for maintenance</title>
|
10
|
+
|
11
|
+
<style type="text/css">
|
12
|
+
div.outer {
|
13
|
+
position: absolute;
|
14
|
+
left: 50%;
|
15
|
+
top: 50%;
|
16
|
+
width: 500px;
|
17
|
+
height: 300px;
|
18
|
+
margin-left: -260px;
|
19
|
+
margin-top: -150px;
|
20
|
+
}
|
21
|
+
|
22
|
+
.DialogBody {
|
23
|
+
margin: 0;
|
24
|
+
padding: 10px;
|
25
|
+
text-align: left;
|
26
|
+
border: 1px solid #ccc;
|
27
|
+
border-right: 1px solid #999;
|
28
|
+
border-bottom: 1px solid #999;
|
29
|
+
background-color: #fff;
|
30
|
+
}
|
31
|
+
|
32
|
+
body { background-color: #fff; }
|
33
|
+
</style>
|
34
|
+
</head>
|
35
|
+
|
36
|
+
<body>
|
37
|
+
|
38
|
+
<div class="outer">
|
39
|
+
<div class="DialogBody" style="text-align: center;">
|
40
|
+
<div style="text-align: center; width: 200px; margin: 0 auto;">
|
41
|
+
<p style="color: red; font-size: 16px; line-height: 20px;">
|
42
|
+
The system is down for <%= reason ? reason : "maintenance" %>
|
43
|
+
as of <%= Time.now.strftime("%H:%M %Z") %>.
|
44
|
+
</p>
|
45
|
+
<p style="color: #666;">
|
46
|
+
It'll be back <%= deadline ? deadline : "shortly" %>.
|
47
|
+
</p>
|
48
|
+
</div>
|
49
|
+
</div>
|
50
|
+
</div>
|
51
|
+
|
52
|
+
</body>
|
53
|
+
</html>
|
@@ -0,0 +1,16 @@
|
|
1
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
2
|
+
namespace :tomcat do
|
3
|
+
desc "Start tomcat"
|
4
|
+
task :start, :roles => [:app], :only => {:tomcat => true} do
|
5
|
+
sudo "/etc/init.d/tomcat start"
|
6
|
+
end
|
7
|
+
desc "Stop tomcat"
|
8
|
+
task :stop, :roles => [:app], :only => {:tomcat => true} do
|
9
|
+
sudo "/etc/init.d/tomcat stop"
|
10
|
+
end
|
11
|
+
desc "Restart tomcat"
|
12
|
+
task :restart, :roles => [:app], :only => {:tomcat => true} do
|
13
|
+
sudo "/etc/init.d/tomcat restart"
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
@@ -0,0 +1,25 @@
|
|
1
|
+
require 'robbie/lib/ey_logger'
|
2
|
+
require 'robbie/lib/ey_logger_hooks'
|
3
|
+
require 'robbie/recipes/database'
|
4
|
+
require 'robbie/recipes/assets'
|
5
|
+
require 'robbie/recipes/shooting_star'
|
6
|
+
require 'robbie/recipes/ferret'
|
7
|
+
require 'robbie/recipes/mongrel'
|
8
|
+
require 'robbie/recipes/nginx'
|
9
|
+
require 'robbie/recipes/slice'
|
10
|
+
require 'robbie/recipes/deploy'
|
11
|
+
require 'robbie/recipes/sphinx'
|
12
|
+
require 'robbie/recipes/backgroundrb'
|
13
|
+
require 'robbie/recipes/memcached'
|
14
|
+
require 'robbie/recipes/solr'
|
15
|
+
require 'robbie/recipes/monit'
|
16
|
+
require 'robbie/recipes/tomcat'
|
17
|
+
require 'robbie/recipes/juggernaut'
|
18
|
+
|
19
|
+
Capistrano::Configuration.instance(:must_exist).load do
|
20
|
+
|
21
|
+
default_run_options[:pty] = true if respond_to?(:default_run_options)
|
22
|
+
set :keep_releases, 3
|
23
|
+
set :runner, defer { user }
|
24
|
+
|
25
|
+
end
|
data/lib/robbie.rb
ADDED
metadata
ADDED
@@ -0,0 +1,87 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: gramos-robbie
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.0.1
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- "Gast\xF3n Ramos"
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
|
12
|
+
date: 2008-10-06 00:00:00 -07:00
|
13
|
+
default_executable:
|
14
|
+
dependencies:
|
15
|
+
- !ruby/object:Gem::Dependency
|
16
|
+
name: capistrano
|
17
|
+
version_requirement:
|
18
|
+
version_requirements: !ruby/object:Gem::Requirement
|
19
|
+
requirements:
|
20
|
+
- - ">="
|
21
|
+
- !ruby/object:Gem::Version
|
22
|
+
version: 2.2.0
|
23
|
+
version:
|
24
|
+
description: A bunch of useful recipes (based on eycap) to help deployment
|
25
|
+
email: ramos.gaston@gmail.com
|
26
|
+
executables: []
|
27
|
+
|
28
|
+
extensions: []
|
29
|
+
|
30
|
+
extra_rdoc_files: []
|
31
|
+
|
32
|
+
files:
|
33
|
+
- lib/robbie
|
34
|
+
- lib/robbie/lib
|
35
|
+
- lib/robbie/lib/ey_logger.rb
|
36
|
+
- lib/robbie/lib/ey_logger_hooks.rb
|
37
|
+
- lib/robbie/recipes
|
38
|
+
- lib/robbie/recipes/backgroundrb.rb
|
39
|
+
- lib/robbie/recipes/database.rb
|
40
|
+
- lib/robbie/recipes/deploy.rb
|
41
|
+
- lib/robbie/recipes/ferret.rb
|
42
|
+
- lib/robbie/recipes/juggernaut.rb
|
43
|
+
- lib/robbie/recipes/memcached.rb
|
44
|
+
- lib/robbie/recipes/mongrel.rb
|
45
|
+
- lib/robbie/recipes/monit.rb
|
46
|
+
- lib/robbie/recipes/nginx.rb
|
47
|
+
- lib/robbie/recipes/slice.rb
|
48
|
+
- lib/robbie/recipes/solr.rb
|
49
|
+
- lib/robbie/recipes/sphinx.rb
|
50
|
+
- lib/robbie/recipes/templates
|
51
|
+
- lib/robbie/recipes/templates/maintenance.rhtml
|
52
|
+
- lib/robbie/recipes/tomcat.rb
|
53
|
+
- lib/robbie/recipes/shooting_star.rb
|
54
|
+
- lib/robbie/recipes/admin.rb
|
55
|
+
- lib/robbie/recipes/assets.rb
|
56
|
+
- lib/robbie/recipes.rb
|
57
|
+
- lib/robbie/version.rb
|
58
|
+
- lib/robbie.rb
|
59
|
+
- README.txt
|
60
|
+
has_rdoc: true
|
61
|
+
homepage: ""
|
62
|
+
post_install_message:
|
63
|
+
rdoc_options: []
|
64
|
+
|
65
|
+
require_paths:
|
66
|
+
- lib
|
67
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
68
|
+
requirements:
|
69
|
+
- - ">="
|
70
|
+
- !ruby/object:Gem::Version
|
71
|
+
version: "0"
|
72
|
+
version:
|
73
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
74
|
+
requirements:
|
75
|
+
- - ">="
|
76
|
+
- !ruby/object:Gem::Version
|
77
|
+
version: "0"
|
78
|
+
version:
|
79
|
+
requirements: []
|
80
|
+
|
81
|
+
rubyforge_project: robbie
|
82
|
+
rubygems_version: 1.2.0
|
83
|
+
signing_key:
|
84
|
+
specification_version: 2
|
85
|
+
summary: Capistrano tasks
|
86
|
+
test_files: []
|
87
|
+
|