rubber 1.2.1 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. data/CHANGELOG +8 -0
  2. data/VERSION +1 -1
  3. data/generators/vulcanize/templates/apache/config/rubber/deploy-apache.rb +4 -4
  4. data/generators/vulcanize/templates/base/config/rubber/deploy-setup.rb +4 -4
  5. data/generators/vulcanize/templates/base/config/rubber/rubber-dns.yml +0 -1
  6. data/generators/vulcanize/templates/base/config/rubber/rubber.yml +4 -4
  7. data/generators/vulcanize/templates/complete_mongrel_mysql/config/rubber/role/haproxy/haproxy-mongrel.conf +2 -2
  8. data/generators/vulcanize/templates/complete_passenger_mysql/config/rubber/role/haproxy/haproxy-passenger.conf +1 -1
  9. data/generators/vulcanize/templates/cruise/config/rubber/deploy-cruise.rb +2 -2
  10. data/generators/vulcanize/templates/haproxy/config/rubber/deploy-haproxy.rb +4 -4
  11. data/generators/vulcanize/templates/haproxy/config/rubber/role/haproxy/syslog-haproxy.conf +8 -3
  12. data/generators/vulcanize/templates/haproxy/config/rubber/role/haproxy/syslogd-default.conf +4 -0
  13. data/generators/vulcanize/templates/jetty/config/rubber/deploy-jetty.rb +4 -4
  14. data/generators/vulcanize/templates/mongrel/config/rubber/deploy-mongrel.rb +4 -4
  15. data/generators/vulcanize/templates/monit/config/rubber/common/monit.conf +0 -1
  16. data/generators/vulcanize/templates/monit/config/rubber/deploy-monit.rb +4 -3
  17. data/generators/vulcanize/templates/munin/config/rubber/deploy-munin.rb +3 -3
  18. data/generators/vulcanize/templates/munin/config/rubber/role/web_tools/munin-plugins.conf +1 -1
  19. data/generators/vulcanize/templates/mysql/config/rubber/deploy-mysql.rb +106 -8
  20. data/generators/vulcanize/templates/mysql/config/rubber/role/db/apparmor-mysql.conf +46 -0
  21. data/generators/vulcanize/templates/mysql/config/rubber/role/db/my.cnf +2 -6
  22. data/generators/vulcanize/templates/mysql/config/rubber/rubber-mysql.yml +1 -1
  23. data/generators/vulcanize/templates/nginx/config/rubber/deploy-nginx.rb +4 -4
  24. data/generators/vulcanize/templates/passenger/config/rubber/deploy-passenger.rb +1 -1
  25. data/generators/vulcanize/templates/passenger/config/rubber/rubber-passenger.yml +1 -1
  26. data/generators/vulcanize/templates/redis/config/rubber/deploy-redis.rb +5 -5
  27. data/generators/vulcanize/templates/resque/config/rubber/deploy-resque.rb +1 -1
  28. data/generators/vulcanize/templates/sphinx/config/rubber/deploy-sphinx.rb +7 -6
  29. data/lib/rubber/dns/zerigo.rb +59 -124
  30. data/lib/rubber/environment.rb +5 -5
  31. data/lib/rubber/generator.rb +29 -21
  32. data/lib/rubber/recipes/rubber/deploy.rb +2 -2
  33. data/lib/rubber/recipes/rubber/instances.rb +3 -0
  34. data/lib/rubber/recipes/rubber/setup.rb +74 -46
  35. data/lib/rubber/recipes/rubber/utils.rb +2 -2
  36. data/lib/rubber/recipes/rubber/volumes.rb +4 -4
  37. data/test/generator_test.rb +44 -0
  38. data/test/test_helper.rb +6 -0
  39. data/test/zerigo_test.rb +87 -0
  40. metadata +20 -6
data/CHANGELOG CHANGED
@@ -1,3 +1,11 @@
1
+ 1.2.1
2
+ -----
3
+
4
+ Jeweler will release to gemcutter. <5ab598f> [Kevin Menard]
5
+ Work around issue with capistrano 2.5.16. <ce9617b> [Kevin Menard]
6
+ Bumped to the latest version of redis. <e32f072> [Kevin Menard]
7
+ Bumped to latest version of passenger. <251f2ee> [Kevin Menard]
8
+
1
9
  1.2.0
2
10
  -----
3
11
 
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.2.1
1
+ 1.3.0
@@ -15,10 +15,10 @@ namespace :rubber do
15
15
  # rubber auto-roles don't get defined till after all tasks are defined
16
16
  on :load do
17
17
  rubber.serial_task self, :serial_restart, :roles => :apache do
18
- run "/etc/init.d/apache2 restart"
18
+ sudo "/etc/init.d/apache2 restart"
19
19
  end
20
20
  rubber.serial_task self, :serial_reload, :roles => :apache do
21
- run "if ! ps ax | grep -v grep | grep -c apache2 &> /dev/null; then /etc/init.d/apache2 start; else /etc/init.d/apache2 reload; fi"
21
+ sudo "sh -c 'if ! ps ax | grep -v grep | grep -c apache2 &> /dev/null; then /etc/init.d/apache2 start; else /etc/init.d/apache2 reload; fi'"
22
22
  end
23
23
  end
24
24
 
@@ -28,12 +28,12 @@ namespace :rubber do
28
28
 
29
29
  desc "Stops the apache web server"
30
30
  task :stop, :roles => :apache, :on_error => :continue do
31
- run "/etc/init.d/apache2 stop"
31
+ sudo "/etc/init.d/apache2 stop"
32
32
  end
33
33
 
34
34
  desc "Starts the apache web server"
35
35
  task :start, :roles => :apache do
36
- run "/etc/init.d/apache2 start"
36
+ sudo "/etc/init.d/apache2 start"
37
37
  end
38
38
 
39
39
  desc "Restarts the apache web server"
@@ -20,7 +20,7 @@ namespace :rubber do
20
20
  if ent_ruby_hosts.size > 0
21
21
  task :_install_enterprise_ruby, :hosts => ent_ruby_hosts do
22
22
  ver = "1.8.7-2010.01"
23
- rubber.run_script "install_ruby-enterprise", <<-ENDSCRIPT
23
+ rubber.sudo_script "install_ruby-enterprise", <<-ENDSCRIPT
24
24
  if [[ ! `ruby --version 2> /dev/null` =~ "Ruby Enterprise Edition 2010.01" ]]; then
25
25
  arch=`uname -m`
26
26
  if [ "$arch" = "x86_64" ]; then
@@ -43,8 +43,8 @@ namespace :rubber do
43
43
  # The ubuntu rubygem package is woefully out of date, so install it manually
44
44
  after "rubber:install_packages", "rubber:base:install_rubygems"
45
45
  task :install_rubygems do
46
- ver = "1.3.5"
47
- src_url = "http://rubyforge.org/frs/download.php/60718/rubygems-#{ver}.tgz"
46
+ ver = "1.3.6"
47
+ src_url = "http://production.cf.rubygems.org/rubygems/rubygems-#{ver}.tgz"
48
48
  rubber.sudo_script 'install_rubygems', <<-ENDSCRIPT
49
49
  if [[ `gem --version 2>&1` != "#{ver}" ]]; then
50
50
  wget -qNP /tmp #{src_url}
@@ -59,7 +59,7 @@ namespace :rubber do
59
59
 
60
60
  after "rubber:install_packages", "rubber:base:configure_git" if scm == "git"
61
61
  task :configure_git do
62
- rubber.run_script 'configure_git', <<-ENDSCRIPT
62
+ rubber.sudo_script 'configure_git', <<-ENDSCRIPT
63
63
  if [[ "#{repository}" =~ "@" ]]; then
64
64
  # Get host key for src machine to prevent ssh from failing
65
65
  rm -f ~/.ssh/known_hosts
@@ -11,7 +11,6 @@ dns_providers:
11
11
  type: A
12
12
  ttl: 300
13
13
  zerigo:
14
- customer_id: 1234
15
14
  email: foo@bar.com
16
15
  token: hexxy
17
16
  type: A
@@ -75,10 +75,10 @@ cloud_providers:
75
75
 
76
76
  # REQUIRED: the ami and instance type for creating instances
77
77
  # The Ubuntu images at http://alestic.com/ work well
78
- # Ubuntu 9.04 Jaunty base install 32-bit 2.6.21.7-2.fc8xen ami-ccf615a5
79
- # Ubuntu 9.04 Jaunty base install 64-bit 2.6.21.7-2.fc8xen ami-eef61587
78
+ # Ubuntu 9.10 Karmic base install 32-bit ami-bb709dd2
79
+ # Ubuntu 9.10 Karmic base install 64-bit ami-55739e3c
80
80
  #
81
- image_id: ami-ccf615a5
81
+ image_id: ami-bb709dd2
82
82
  # m1.small or m1.large or m1.xlarge
83
83
  image_type: m1.small
84
84
 
@@ -143,7 +143,7 @@ ruby_prefix: "#{use_enterprise_ruby ? '/usr/local' : '/usr'}"
143
143
  packages: [postfix, build-essential, ruby-full, ruby1.8-dev, rake, irb, git-core]
144
144
 
145
145
  # OPTIONAL: gem sources to setup for rubygems
146
- gemsources: ["http://gemcutter.org", "http://gems.rubyforge.org/", "http://gems.github.com"]
146
+ # gemsources: ["http://rubygems.org", "http://gems.github.com"]
147
147
 
148
148
  # OPTIONAL: The gems to install on all instances
149
149
  # You can install a specific version of a gem by using a sub-array of gem, version
@@ -8,14 +8,14 @@ listen mongrel_proxy 0.0.0.0:<%= rubber_env.haproxy_listen_port %>
8
8
  # Since we do "fair" load balancing to the mongrels, we don't want to share mongrels
9
9
  # between haproxy instances otherwise we still end up with requests getting queued behind
10
10
  # slow ones.
11
- mongrel_hosts = rubber_instances.for_role('mongrel').collect { |i| i.name }
11
+ app_hosts = rubber_instances.for_role('app').collect { |i| i.name }
12
12
  haproxy_hosts = rubber_instances.for_role('haproxy').collect { |i| i.name }
13
13
  ports = (rubber_env.mongrel_base_port.to_i ... rubber_env.mongrel_base_port.to_i + rubber_env.mongrel_count.to_i).to_a
14
14
  host_count = haproxy_hosts.size
15
15
  host_mod = haproxy_hosts.sort.index(rubber_env.host) rescue 0
16
16
  ports = ports.find_all {|x| x % host_count == host_mod}
17
17
  %>
18
- <% mongrel_hosts.each do |server| %>
18
+ <% app_hosts.each do |server| %>
19
19
  <% ports.each do |port| %>
20
20
  server <%= server %>_<%= port %> <%= server %>:<%= port %> maxconn 1 check
21
21
  <% end %>
@@ -4,7 +4,7 @@
4
4
  %>
5
5
 
6
6
  <%
7
- backend_hosts = rubber_instances.for_role('passenger').collect { |i| i.name }
7
+ backend_hosts = rubber_instances.for_role('app').collect { |i| i.name }
8
8
  %>
9
9
 
10
10
  listen passenger_proxy 0.0.0.0:<%= rubber_env.web_port %>
@@ -54,12 +54,12 @@ namespace :rubber do
54
54
 
55
55
  desc "Start cruise control daemon"
56
56
  task :start, :roles => :cruise do
57
- run "/etc/init.d/cruise start"
57
+ sudo "/etc/init.d/cruise start"
58
58
  end
59
59
 
60
60
  desc "Stop cruise control daemon"
61
61
  task :stop, :roles => :cruise, :on_error => :continue do
62
- run "/etc/init.d/cruise stop"
62
+ sudo "/etc/init.d/cruise stop"
63
63
  end
64
64
 
65
65
  desc "Restart cruise control daemon"
@@ -9,10 +9,10 @@ namespace :rubber do
9
9
  # rubber auto-roles don't get defined till after all tasks are defined
10
10
  on :load do
11
11
  rubber.serial_task self, :serial_restart, :roles => :haproxy do
12
- run "/etc/init.d/haproxy restart"
12
+ sudo "/etc/init.d/haproxy restart"
13
13
  end
14
14
  rubber.serial_task self, :serial_reload, :roles => :haproxy do
15
- run "if ! ps ax | grep -v grep | grep -c haproxy &> /dev/null; then /etc/init.d/haproxy start; else /etc/init.d/haproxy reload; fi"
15
+ sudo "sh -c 'if ! ps ax | grep -v grep | grep -c haproxy &> /dev/null; then /etc/init.d/haproxy start; else /etc/init.d/haproxy reload; fi'"
16
16
  end
17
17
  end
18
18
 
@@ -22,12 +22,12 @@ namespace :rubber do
22
22
 
23
23
  desc "Stops the haproxy server"
24
24
  task :stop, :roles => :haproxy, :on_error => :continue do
25
- run "/etc/init.d/haproxy stop"
25
+ sudo "/etc/init.d/haproxy stop"
26
26
  end
27
27
 
28
28
  desc "Starts the haproxy server"
29
29
  task :start, :roles => :haproxy do
30
- run "/etc/init.d/haproxy start"
30
+ sudo "/etc/init.d/haproxy start"
31
31
  end
32
32
 
33
33
  desc "Restarts the haproxy server"
@@ -1,6 +1,11 @@
1
1
  <%
2
- @path = "/etc/syslog.conf"
3
- @post = "/etc/init.d/sysklogd restart"
4
- @additive = ["# rubber-haproxy-start", "# rubber-haproxy-end"]
2
+ is_rsyslog = File.exist?('/etc/init.d/rsyslog')
3
+ if is_rsyslog
4
+ @path = "/etc/rsyslog.d/haproxy.conf"
5
+ @post = "/etc/init.d/rsyslog restart"
6
+ else
7
+ @path = "/etc/syslog.conf"
8
+ @post = "/etc/init.d/sysklogd restart"
9
+ end
5
10
  %>
6
11
  local0.* /var/log/haproxy.log
@@ -1,7 +1,11 @@
1
1
  <%
2
+ is_rsyslog = File.exist?('/etc/init.d/rsyslog')
3
+ @skip = is_rsyslog
2
4
  @path = "/etc/default/syslogd"
3
5
  @post = "/etc/init.d/sysklogd restart"
4
6
  %>
7
+ # This file goes away once we remove backward compatibility for sysklog
8
+ # in alestic AMIs
5
9
  #
6
10
  # Top configuration file for syslogd
7
11
  #
@@ -36,7 +36,7 @@ namespace :rubber do
36
36
  after "rubber:setup_app_permissions", "rubber:jetty:setup_jetty_permissions"
37
37
 
38
38
  task :setup_jetty_permissions, :roles => :jetty do
39
- run "chown -R #{rubber_env.app_user}:#{rubber_env.app_user} #{rubber_env.jetty_dir}"
39
+ sudo "chown -R #{rubber_env.app_user}:#{rubber_env.app_user} #{rubber_env.jetty_dir}"
40
40
  end
41
41
 
42
42
  before "deploy:stop", "rubber:jetty:stop"
@@ -44,15 +44,15 @@ namespace :rubber do
44
44
  after "deploy:restart", "rubber:jetty:restart"
45
45
 
46
46
  task :restart, :roles => :jetty do
47
- run "#{rubber_env.jetty_dir}/bin/jetty.sh restart"
47
+ sudo "#{rubber_env.jetty_dir}/bin/jetty.sh restart"
48
48
  end
49
49
 
50
50
  task :stop, :roles => :jetty do
51
- run "#{rubber_env.jetty_dir}/bin/jetty.sh stop"
51
+ sudo "#{rubber_env.jetty_dir}/bin/jetty.sh stop"
52
52
  end
53
53
 
54
54
  task :start, :roles => :jetty do
55
- run "#{rubber_env.jetty_dir}/bin/jetty.sh start"
55
+ sudo "#{rubber_env.jetty_dir}/bin/jetty.sh start"
56
56
  end
57
57
 
58
58
  end
@@ -21,16 +21,16 @@ namespace :rubber do
21
21
 
22
22
 
23
23
  def mongrel_stop
24
- run "cd #{current_path} && mongrel_rails cluster::stop"
24
+ sudo "cd #{current_path} && mongrel_rails cluster::stop"
25
25
  sleep 5 # Give the graceful stop a chance to complete
26
- run "cd #{current_path} && mongrel_rails cluster::stop --force --clean"
26
+ sudo "cd #{current_path} && mongrel_rails cluster::stop --force --clean"
27
27
  end
28
28
 
29
29
  def mongrel_start
30
- run "cd #{current_path} && mongrel_rails cluster::start --clean"
30
+ sudo "cd #{current_path} && mongrel_rails cluster::start --clean"
31
31
  pid_cnt = rubber_env.mongrel_count
32
32
  logger.info "Waiting for mongrel pid files to show up"
33
- run "while ((`ls #{current_path}/tmp/pids/mongrel.*.pid 2> /dev/null | wc -l` < #{pid_cnt})); do sleep 1; done"
33
+ sudo "sh -c 'while ((`ls #{current_path}/tmp/pids/mongrel.*.pid 2> /dev/null | wc -l` < #{pid_cnt})); do sleep 1; done'"
34
34
  end
35
35
 
36
36
  # serial_task can only be called after roles defined - not normally a problem, but
@@ -95,7 +95,6 @@ set alert <%= rubber_env.admin_email %> only on {
95
95
  gid
96
96
  icmp
97
97
  invalid
98
- match
99
98
  nonexist
100
99
  permission
101
100
  resource
@@ -1,6 +1,7 @@
1
1
 
2
2
  namespace :rubber do
3
-
4
3
  namespace :monit do
4
+
5
+ namespace :monit do
5
6
 
6
7
  rubber.allow_optional_tasks(self)
7
8
 
@@ -13,12 +14,12 @@ namespace :rubber do
13
14
 
14
15
  desc "Start monit daemon monitoring"
15
16
  task :start do
16
- run "/etc/init.d/monit start"
17
+ sudo "/etc/init.d/monit start"
17
18
  end
18
19
 
19
20
  desc "Stop monit daemon monitoring"
20
21
  task :stop, :on_error => :continue do
21
- run "/etc/init.d/monit stop"
22
+ sudo "/etc/init.d/monit stop"
22
23
  end
23
24
 
24
25
  desc "Restart monit daemon monitoring"
@@ -14,7 +14,7 @@ namespace :rubber do
14
14
  Reconfigures munin
15
15
  DESC
16
16
  task :custom_install do
17
- rubber.run_script 'setup_munin_plugins', <<-ENDSCRIPT
17
+ rubber.sudo_script 'setup_munin_plugins', <<-ENDSCRIPT
18
18
  munin-node-configure --shell --remove-also > /tmp/setup-munin-plugins
19
19
  sh /tmp/setup-munin-plugins
20
20
  ENDSCRIPT
@@ -27,12 +27,12 @@ namespace :rubber do
27
27
 
28
28
  desc "Start munin system monitoring"
29
29
  task :start do
30
- run "/etc/init.d/munin-node start"
30
+ sudo "/etc/init.d/munin-node start"
31
31
  end
32
32
 
33
33
  desc "Stop munin system monitoring"
34
34
  task :stop, :on_error => :continue do
35
- run "/etc/init.d/munin-node stop"
35
+ sudo "/etc/init.d/munin-node stop"
36
36
  end
37
37
 
38
38
  desc "Restart munin system monitoring"
@@ -22,7 +22,7 @@
22
22
  File.open("#{plugin_dir}/#{plugin_name}", 'w') do |p|
23
23
  p.chmod(0755)
24
24
  p.puts('#!/bin/sh')
25
- p.puts("RUBBER_ENV=#{RUBBER_ENV} #{script} $*")
25
+ p.puts("RUBBER_ENV=#{RUBBER_ENV} #{rubber_env.ruby_prefix}/bin/ruby #{script} $*")
26
26
  end
27
27
  end
28
28
  %>
@@ -36,8 +36,7 @@ namespace :rubber do
36
36
  exists = capture("echo $(ls #{env.db_data_dir}/ 2> /dev/null)")
37
37
  if exists.strip.size == 0
38
38
  common_bootstrap("mysql_master")
39
- sudo "dpkg-reconfigure --frontend=noninteractive mysql-server-5.0"
40
- sleep 5
39
+
41
40
  pass = "identified by '#{env.db_pass}'" if env.db_pass
42
41
  sudo "mysql -u root -e 'create database #{env.db_name};'"
43
42
  sudo "mysql -u root -e \"grant all on *.* to '#{env.db_user}'@'%' #{pass};\""
@@ -57,8 +56,6 @@ namespace :rubber do
57
56
  exists = capture("echo $(ls #{env.db_data_dir}/ 2> /dev/null)")
58
57
  if exists.strip.size == 0
59
58
  common_bootstrap("mysql_slave")
60
- sudo "dpkg-reconfigure --frontend=noninteractive mysql-server-5.0"
61
- sleep 5
62
59
 
63
60
  master = rubber_instances.for_role("mysql_master").first
64
61
 
@@ -80,7 +77,7 @@ namespace :rubber do
80
77
  if source == master
81
78
  logger.info "Creating slave from a dump of master #{source_host}"
82
79
  sudo "mysql -u root -e \"change master to master_host='#{master_host}', master_user='#{env.db_replicator_user}' #{master_pass}\""
83
- sudo "mysqldump -u #{env.db_user} #{pass} -h #{source_host} --all-databases --master-data=1 | mysql -u root"
80
+ sudo "sh -c 'mysqldump -u #{env.db_user} #{pass} -h #{source_host} --all-databases --master-data=1 | mysql -u root'"
84
81
  else
85
82
  logger.info "Creating slave from a dump of slave #{source_host}"
86
83
  sudo "mysql -u #{env.db_user} #{pass} -h #{source_host} -e \"stop slave;\""
@@ -88,11 +85,15 @@ namespace :rubber do
88
85
  slave_config = Hash[*slave_status.scan(/([^\s:]+): ([^\s]*)/).flatten]
89
86
  log_file = slave_config['Master_Log_File']
90
87
  log_pos = slave_config['Read_Master_Log_Pos']
91
- sudo "mysqldump -u #{env.db_user} #{pass} -h #{source_host} --all-databases --master-data=1 | mysql -u root"
88
+ sudo "sh -c 'mysqldump -u #{env.db_user} #{pass} -h #{source_host} --all-databases --master-data=1 | mysql -u root'"
92
89
  sudo "mysql -u root -e \"change master to master_host='#{master_host}', master_user='#{env.db_replicator_user}', master_log_file='#{log_file}', master_log_pos=#{log_pos} #{master_pass}\""
93
90
  sudo "mysql -u #{env.db_user} #{pass} -h #{source_host} -e \"start slave;\""
94
91
  end
95
92
 
93
+ # this doesn't work without agent forwarding which sudo breaks, as well as not having your
94
+ # ec2 private key ssh-added on workstation
95
+ # sudo "scp -o \"StrictHostKeyChecking=no\" #{source_host}:/etc/mysql/debian.cnf /etc/mysql"
96
+
96
97
  sudo "mysql -u root -e \"flush privileges;\""
97
98
  sudo "mysql -u root -e \"start slave;\""
98
99
  end
@@ -113,7 +114,14 @@ namespace :rubber do
113
114
  deploy.update_code
114
115
 
115
116
  # Gen just the conf for the given mysql role
116
- rubber.run_config(:RUBBER_ENV => RUBBER_ENV, :FILE => "role/#{role}|role/db/my.cnf", :deploy_path => release_path)
117
+ rubber.run_config(:RUBBER_ENV => RUBBER_ENV, :FILE => "role/#{role}|role/db/", :FORCE => true, :deploy_path => release_path)
118
+
119
+ # reconfigure mysql so that it sets up data dir in /mnt with correct files
120
+ sudo_script 'reconfigure-mysql', <<-ENDSCRIPT
121
+ server_package=`dpkg -l | grep mysql-server-[0-9] | awk '{print $2}'`
122
+ dpkg-reconfigure --frontend=noninteractive $server_package
123
+ ENDSCRIPT
124
+ sleep 5
117
125
  end
118
126
 
119
127
  before "rubber:munin:custom_install", "rubber:mysql:custom_install_munin"
@@ -122,7 +130,7 @@ namespace :rubber do
122
130
  Installs some extra munin graphs
123
131
  DESC
124
132
  task :custom_install_munin, :roles => [:mysql_master, :mysql_slave] do
125
- rubber.run_script 'install_munin_mysql', <<-ENDSCRIPT
133
+ rubber.sudo_script 'install_munin_mysql', <<-ENDSCRIPT
126
134
  if [ ! -f /usr/share/munin/plugins/mysql_ ]; then
127
135
  wget -q -O /usr/share/munin/plugins/mysql_ http://github.com/kjellm/munin-mysql/raw/master/mysql_
128
136
  wget -q -O /etc/munin/plugin-conf.d/mysql_.conf http://github.com/kjellm/munin-mysql/raw/master/mysql_.conf
@@ -150,6 +158,96 @@ namespace :rubber do
150
158
  task :restart, :roles => [:mysql_master, :mysql_slave] do
151
159
  sudo "/etc/init.d/mysql restart"
152
160
  end
161
+
162
+ desc <<-DESC
163
+ Backup production database using rake task rubber:backup_db
164
+ DESC
165
+ task :backup, :roles => [:mysql_master, :mysql_slave] do
166
+ master_instances = rubber_instances.for_role("mysql_master")
167
+ slaves = rubber_instances.for_role("mysql_slave")
168
+
169
+ # Select only one instance for backup. Favor slave database.
170
+ selected_mysql_instance = (slaves+master_instances).first
171
+
172
+ task_name = "_backup_mysql_slave_#{selected_mysql_instance.full_name}".to_sym()
173
+ task task_name, :hosts => selected_mysql_instance.full_name do
174
+ run "cd #{current_path} && RUBBER_ENV=production RAILS_ENV=production RUBYOPT=rubygems BACKUP_DIR=/mnt/db_backups DBUSER=#{rubber_env.db_user} DBPASS=#{rubber_env.db_pass} DBNAME=#{rubber_env.db_name} DBHOST=#{selected_mysql_instance.full_name} rake rubber:backup_db"
175
+ end
176
+ send task_name
177
+ end
178
+
179
+ desc <<-DESC
180
+ Restore production database from s3 using rake task rubber:restore_db_s3
181
+ DESC
182
+ task :restore_s3, :roles => [:mysql_master, :mysql_slave] do
183
+ master_instances = rubber_instances.for_role("mysql_master")
184
+ slaves = rubber_instances.for_role("mysql_slave")
185
+
186
+ for instance in master_instances+slaves
187
+ task_name = "_restore_mysql_s3_#{instance.full_name}".to_sym()
188
+ task task_name, :hosts => instance.full_name do
189
+ run "cd #{current_path} && RUBBER_ENV=production RAILS_ENV=production RUBYOPT=rubygems BACKUP_DIR=/mnt/db_backups DBUSER=#{rubber_env.db_user} DBPASS=#{rubber_env.db_pass} DBNAME=#{rubber_env.db_name} DBHOST=#{instance.full_name} rake rubber:restore_db_s3"
190
+ end
191
+ send task_name
192
+ end
193
+ end
194
+
195
+ desc <<-DESC
196
+ Overwrite ec2 production database with export from local production database.
197
+ DESC
198
+ task :local_to_ec2, :roles => [:mysql_master, :mysql_slave] do
199
+ require 'yaml'
200
+ master_instances = rubber_instances.for_role("mysql_master")
201
+ slaves = rubber_instances.for_role("mysql_slave")
202
+
203
+ # Select only one instance for backup. Favor slave database.
204
+ selected_mysql_instance = (slaves+master_instances).first
205
+
206
+ task_name = "_load_local_to_#{selected_mysql_instance.full_name}".to_sym()
207
+ task task_name, :hosts => selected_mysql_instance.full_name do
208
+
209
+ # Dump Local to tmp folder
210
+ filename = "#{application}.local.#{Time.now.to_i}.sql.gz"
211
+ backup_file = "/tmp/#{filename}"
212
+ on_rollback { delete file }
213
+ FileUtils.mkdir_p(File.dirname(backup_file))
214
+
215
+ # Use database.yml to get connection params
216
+ db = YAML::load(ERB.new(IO.read(File.join(File.dirname(__FILE__), '..','database.yml'))).result)['production']
217
+ user = db['username']
218
+ pass = db['passsword']
219
+ pass = nil if pass and pass.strip.size == 0
220
+ host = db['host']
221
+ name = db['database']
222
+
223
+ raise "No db_backup_cmd defined in rubber.yml, cannot backup!" unless rubber_env.db_backup_cmd
224
+ db_backup_cmd = rubber_env.db_backup_cmd.gsub(/%([^%]+)%/, '#{\1}')
225
+ db_backup_cmd = eval('%Q{' + db_backup_cmd + '}')
226
+
227
+ # mysqldump (or backup app) needs to be in your path
228
+ puts "Backing up database with command:"
229
+ system(db_backup_cmd)
230
+ puts "Created backup: #{backup_file}"
231
+
232
+ # Upload Local to S3
233
+ cloud_provider = rubber_env.cloud_providers[rubber_env.cloud_provider]
234
+ s3_prefix = "db/"
235
+ backup_bucket = cloud_provider.backup_bucket
236
+ if backup_bucket
237
+ AWS::S3::Base.establish_connection!(:access_key_id => cloud_provider.access_key, :secret_access_key => cloud_provider.secret_access_key)
238
+ unless AWS::S3::Bucket.list.find { |b| b.name == backup_bucket }
239
+ AWS::S3::Bucket.create(backup_bucket)
240
+ end
241
+ dest = "#{s3_prefix}#{File.basename(backup_file)}"
242
+ puts "Saving db dump to S3: #{backup_bucket}:#{dest}"
243
+ AWS::S3::S3Object.store(dest, open(backup_file), backup_bucket)
244
+ end
245
+
246
+ send :restore_s3
247
+
248
+ end
249
+ send task_name
250
+ end
153
251
 
154
252
  end
155
253