rubber 1.8.0 → 1.9.0

Sign up to get free protection for your applications and to get access to all the features.
data/CHANGELOG CHANGED
@@ -1,3 +1,32 @@
1
+ 1.8.0
2
+ -----
3
+
4
+ Regenerate gemspec for version 1.8.0 <1a924f5> [Kevin Menard]
5
+ Bumped the version of Passenger to 3.0.1. <c419f34> [Kevin Menard]
6
+ Bumped the version of RVM. <52c9891> [Kevin Menard]
7
+ Merge branch 'tags_support' <3396ceb> [Kevin Menard]
8
+ Handle Amazon being slow to update. <e89a7ae> [Kevin Menard]
9
+ Added a comment to help people that change their ServerName from the default. <71ce99a> [Kevin Menard]
10
+ We need this perl package for the postgres munin plugins. <4ae9e6c> [Kevin Menard]
11
+ We need a newer amazon-ec2 for tags support. <90b20ba> [Kevin Menard]
12
+ Added support for creating and updating tags on EC2 instances. <d29eda9> [Kevin Menard]
13
+ Merge branch 'master' of github.com:wr0ngway/rubber <5ca9ce0> [Kevin Menard]
14
+ Fixed issue with RVM, ruby 1.9.2, and passenger munin plugins. <623400b> [Kevin Menard]
15
+ Set up munin for PostgreSQL. <4f9874e> [Kevin Menard]
16
+ Fixes for raiding ephemeral drives <7a0c0d2> [Matt Conway]
17
+ Watch attachment status for more reliable ebs mounts <44ebdae> [Matt Conway]
18
+ Updated config for PostgreSQL 9. <9ade749> [Kevin Menard]
19
+ Don't show Passenger 3 friendly error pages in production. <126093a> [Kevin Menard]
20
+ Updated config for redis 2.0. <f4ed84a> [Kevin Menard]
21
+ Support installation of redis 2.0. <e340091> [Kevin Menard]
22
+ Merge branch 'master' of https://github.com/caike/rubber <4269178> [Kevin Menard]
23
+ Fixed bug with RVM ruby location and updated to the latest version. <bd8805b> [Kevin Menard]
24
+ Improved passenger 3 setup. <b72c5c0> [Kevin Menard]
25
+ Changes passenger generator to use version 3.0.0 and removes deprecated option. <b64f623> [Kristopher Murata]
26
+ Skipping SSL certificate check on wget to github due to wget bug. More info: http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=409938 <3d0562e> [caike souza]
27
+ Merge branch 'master' of github.com:wr0ngway/rubber <48269b5> [Matt Conway]
28
+ allow rubber:config in test as well as dev <8954cd6> [Matt Conway]
29
+
1
30
  1.7.2
2
31
  -----
3
32
 
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.8.0
1
+ 1.9.0
@@ -59,7 +59,7 @@ namespace :rubber do
59
59
  # Use database.yml to get connection params
60
60
  db = YAML::load(ERB.new(IO.read(File.join(File.dirname(__FILE__), '..','database.yml'))).result)[RUBBER_ENV]
61
61
  user = db['username']
62
- pass = db['passsword']
62
+ pass = db['password']
63
63
  pass = nil if pass and pass.strip.size == 0
64
64
  host = db['host']
65
65
  name = db['database']
@@ -58,8 +58,8 @@ cloud_providers:
58
58
  #
59
59
  # NOTE: for some reason Capistrano requires you to have both the public and
60
60
  # the private key in the same folder, the public key should have the
61
- # extension ".pub". The easiest (only?) way to get your hand on this is to
62
- # create an instance, ssh into it, and copy the file /mnt/openssh_id.pub
61
+ # extension ".pub". The easiest way to get your hand on this is to create the
62
+ # public key from the private key: ssh-keygen -y -f gsg-keypair > gsg-keypair.pub
63
63
  #
64
64
  key_name: gsg-keypair
65
65
  key_file: "#{Dir[(File.expand_path('~') rescue '/root') + '/.ec2/*' + cloud_providers.aws.key_name].first}"
@@ -7,7 +7,7 @@ namespace :rubber do
7
7
 
8
8
  before "rubber:install_packages", "rubber:postgresql:setup_apt_sources"
9
9
 
10
- task :setup_apt_sources, :roles => :redis do
10
+ task :setup_apt_sources, :roles => [:postgresql_master, :postgresql_save] do
11
11
  rsudo "add-apt-repository ppa:pitti/postgresql"
12
12
  end
13
13
 
@@ -17,7 +17,7 @@ namespace :rubber do
17
17
 
18
18
  task :custom_install, :roles => :sphinx do
19
19
  # install sphinx from source
20
- ver = "0.9.8.1"
20
+ ver = "0.9.9"
21
21
  rubber.sudo_script 'install_sphinx', <<-ENDSCRIPT
22
22
  # check if already installed
23
23
  if [ -x /usr/local/bin/searchd ]
@@ -32,7 +32,7 @@ namespace :rubber do
32
32
  TMPDIR=`mktemp -d` || exit 1
33
33
  cd $TMPDIR
34
34
  echo 'Downloading'
35
- wget -qN http://www.sphinxsearch.com/downloads/sphinx-#{ver}.tar.gz
35
+ wget -qN http://www.sphinxsearch.com/files/sphinx-#{ver}.tar.gz
36
36
  echo 'Unpacking'
37
37
  tar xf sphinx-#{ver}.tar.gz
38
38
  cd sphinx-#{ver}
@@ -59,7 +59,7 @@ namespace :rubber do
59
59
  # Use database.yml to get connection params
60
60
  db = YAML::load(ERB.new(IO.read(File.join(File.dirname(__FILE__), '..','database.yml'))).result)[RUBBER_ENV]
61
61
  user = db['username']
62
- pass = db['passsword']
62
+ pass = db['password']
63
63
  pass = nil if pass and pass.strip.size == 0
64
64
  host = db['host']
65
65
  name = db['database']
@@ -58,8 +58,8 @@ cloud_providers:
58
58
  #
59
59
  # NOTE: for some reason Capistrano requires you to have both the public and
60
60
  # the private key in the same folder, the public key should have the
61
- # extension ".pub". The easiest (only?) way to get your hand on this is to
62
- # create an instance, ssh into it, and copy the file /mnt/openssh_id.pub
61
+ # extension ".pub". The easiest way to get your hand on this is to create the
62
+ # public key from the private key: ssh-keygen -y -f gsg-keypair > gsg-keypair.pub
63
63
  #
64
64
  key_name: gsg-keypair
65
65
  key_file: "#{Dir[(File.expand_path('~') rescue '/root') + '/.ec2/*' + cloud_providers.aws.key_name].first}"
@@ -7,7 +7,7 @@ namespace :rubber do
7
7
 
8
8
  before "rubber:install_packages", "rubber:postgresql:setup_apt_sources"
9
9
 
10
- task :setup_apt_sources, :roles => :redis do
10
+ task :setup_apt_sources, :roles => [:postgresql_master, :postgresql_save] do
11
11
  rsudo "add-apt-repository ppa:pitti/postgresql"
12
12
  end
13
13
 
@@ -17,7 +17,7 @@ namespace :rubber do
17
17
 
18
18
  task :custom_install, :roles => :sphinx do
19
19
  # install sphinx from source
20
- ver = "0.9.8.1"
20
+ ver = "0.9.9"
21
21
  rubber.sudo_script 'install_sphinx', <<-ENDSCRIPT
22
22
  # check if already installed
23
23
  if [ -x /usr/local/bin/searchd ]
@@ -32,7 +32,7 @@ namespace :rubber do
32
32
  TMPDIR=`mktemp -d` || exit 1
33
33
  cd $TMPDIR
34
34
  echo 'Downloading'
35
- wget -qN http://www.sphinxsearch.com/downloads/sphinx-#{ver}.tar.gz
35
+ wget -qN http://www.sphinxsearch.com/files/sphinx-#{ver}.tar.gz
36
36
  echo 'Unpacking'
37
37
  tar xf sphinx-#{ver}.tar.gz
38
38
  cd sphinx-#{ver}
@@ -32,7 +32,12 @@ module Rubber
32
32
  def read_config(file)
33
33
  Rubber.logger.debug{"Reading rubber configuration from #{file}"}
34
34
  if File.exist?(file)
35
- @items = Environment.combine(@items, YAML.load_file(file) || {})
35
+ begin
36
+ @items = Environment.combine(@items, YAML.load_file(file) || {})
37
+ rescue Exception => e
38
+ Rubber.logger.error{"Unable to read rubber configuration from #{file}"}
39
+ raise
40
+ end
36
41
  end
37
42
  end
38
43
 
@@ -2,7 +2,7 @@ namespace :rubber do
2
2
 
3
3
  desc <<-DESC
4
4
  Sets up persistent volumes in the cloud
5
- All volumes defined in rubber.yml will be created if neccessary, and attached/mounted on their associated instances
5
+ All volumes defined in rubber.yml will be created if necessary, and attached/mounted on their associated instances
6
6
  DESC
7
7
  required_task :setup_volumes do
8
8
  rubber_instances.filtered.each do |ic|
@@ -31,6 +31,11 @@ namespace :rubber do
31
31
  format = raid_spec['source_devices'].all? {|dev| created_vols.include?(dev)}
32
32
  setup_raid_volume(ic, raid_spec, format)
33
33
  end
34
+
35
+ lvm_volume_group_specs = env.lvm_volume_groups || []
36
+ lvm_volume_group_specs.each do |lvm_volume_group_spec|
37
+ setup_lvm_group(ic, lvm_volume_group_spec)
38
+ end
34
39
  end
35
40
  end
36
41
 
@@ -114,8 +119,8 @@ namespace :rubber do
114
119
  echo '#{vol_spec['device']} #{vol_spec['mount']} #{vol_spec['filesystem']} noatime 0 0 # rubber volume #{vol_id}' >> /etc/fstab
115
120
 
116
121
  #{('yes | mkfs -t ' + vol_spec['filesystem'] + ' ' + vol_spec['device']) if created}
117
- mkdir -p '#{vol_spec['mount']}'
118
- mount '#{vol_spec['mount']}'
122
+ #{("mkdir -p '#{vol_spec['mount']}'") if vol_spec['mount']}
123
+ #{("mount '#{vol_spec['mount']}'") if vol_spec['mount']}
119
124
  fi
120
125
  ENDSCRIPT
121
126
  end
@@ -195,7 +200,7 @@ namespace :rubber do
195
200
 
196
201
  def setup_raid_volume(ic, raid_spec, create=false)
197
202
  if create
198
- mdadm_init = "yes | mdadm --create #{raid_spec['device']} --level #{raid_spec['raid_level']} --raid-devices #{raid_spec['source_devices'].size} #{raid_spec['source_devices'].sort.join(' ')}"
203
+ mdadm_init = "yes | mdadm --create #{raid_spec['device']} --metadata=1.1 --level #{raid_spec['raid_level']} --raid-devices #{raid_spec['source_devices'].size} #{raid_spec['source_devices'].sort.join(' ')}"
199
204
  else
200
205
  mdadm_init = "yes | mdadm --assemble #{raid_spec['device']} #{raid_spec['source_devices'].sort.join(' ')}"
201
206
  end
@@ -219,7 +224,8 @@ namespace :rubber do
219
224
  # set reconstruction speed
220
225
  echo $((30*1024)) > /proc/sys/dev/raid/speed_limit_min
221
226
 
222
- echo 'DEVICE /dev/hd*[0-9] /dev/sd*[0-9]' > /etc/mdadm/mdadm.conf
227
+ echo 'MAILADDR #{rubber_env.admin_email}' > /etc/mdadm/mdadm.conf
228
+ echo 'DEVICE /dev/hd*[0-9] /dev/sd*[0-9]' >> /etc/mdadm/mdadm.conf
223
229
  mdadm --detail --scan >> /etc/mdadm/mdadm.conf
224
230
 
225
231
  mv /etc/rc.local /etc/rc.local.bak
@@ -235,6 +241,97 @@ namespace :rubber do
235
241
  _setup_raid_volume
236
242
  end
237
243
 
244
+ def setup_lvm_group(ic, lvm_volume_group_spec)
245
+ physical_volumes = lvm_volume_group_spec['physical_volumes'].kind_of?(Array) ? lvm_volume_group_spec['physical_volumes'] : [lvm_volume_group_spec['physical_volumes']]
246
+ volume_group_name = lvm_volume_group_spec['name']
247
+ extent_size = lvm_volume_group_spec['extent_size'] || 32
248
+
249
+ volumes = lvm_volume_group_spec['volumes'] || []
250
+
251
+ def create_logical_volume_in_bash(volume, volume_group_name)
252
+ device_name = "/dev/#{volume_group_name}/#{volume['name']}"
253
+
254
+ resize_command =
255
+ case volume['filesystem']
256
+ when 'xfs'
257
+ "xfs_growfs '#{volume['mount']}'"
258
+ when 'reiserfs'
259
+ "resize_reiserfs -f #{device_name}"
260
+ when 'jfs'
261
+ "mount -o remount,resize #{volume['mount']}"
262
+ when /^ext/
263
+ <<-RESIZE_COMMAND
264
+ umount #{device_name}
265
+ ext2resize #{device_name}
266
+ mount #{volume['mount']}
267
+ RESIZE_COMMAND
268
+ else
269
+ raise "Do not know how to resize filesystem '#{volume['filesystem']}'"
270
+ end
271
+
272
+ <<-ENDSCRIPT
273
+ # Add the logical volume mount point to /etc/fstab.
274
+ if ! grep -q '#{volume['mount']}' /etc/fstab; then
275
+ if mount | grep -q '#{volume['mount']}'; then
276
+ umount '#{volume['mount']}'
277
+ fi
278
+
279
+ mv /etc/fstab /etc/fstab.bak
280
+ cat /etc/fstab.bak | grep -v '#{volume['mount']}' > /etc/fstab
281
+ echo '#{device_name} #{volume['mount']} #{volume['filesystem']} noatime 0 0 # rubber LVM volume' >> /etc/fstab
282
+ fi
283
+
284
+ # Check if the logical volume exists or not.
285
+ if ! lvdisplay #{device_name} >> /dev/null 2>&1; then
286
+ # Create the logical volume.
287
+ lvcreate -L #{volume['size']}G -i #{volume['stripes'] || 1} -n#{volume['name']} #{volume_group_name}
288
+
289
+ # Format the logical volume.
290
+ yes | mkfs -t #{volume['filesystem']} #{volume['filesystem_opts']} #{device_name}
291
+
292
+ # Create the mount point.
293
+ mkdir -p '#{volume['mount']}'
294
+
295
+ # Mount the volume.
296
+ mount '#{volume['mount']}'
297
+ else
298
+ # Try to extend the volume size.
299
+ if lvextend -L #{volume['size']}G -i #{volume['stripes'] || 1} #{device_name} >> /dev/null 2&>1; then
300
+
301
+ # If we actually resized the volume, then we need to resize the filesystem.
302
+ #{resize_command}
303
+ fi
304
+ fi
305
+ ENDSCRIPT
306
+ end
307
+
308
+ task :_setup_lvm_group, :hosts => ic.external_ip do
309
+ rubber.sudo_script 'setup_lvm_group', <<-ENDSCRIPT
310
+ # Check and see if the physical volume is already set up for LVM. If not, initialize it to be so.
311
+ for device in #{physical_volumes.join(' ')}
312
+ do
313
+ if ! pvdisplay $device >> /dev/null 2>&1; then
314
+ pvcreate $device
315
+
316
+ # See if the volume group already exists. If so, add the new physical volume to it.
317
+ if vgdisplay #{volume_group_name} >> /dev/null 2>&1; then
318
+ vgextend #{volume_group_name} $device
319
+ fi
320
+ fi
321
+ done
322
+
323
+ # If the volume group does not exist yet, construct it with all the physical volumes.
324
+ if ! vgdisplay #{volume_group_name} >> /dev/null 2>&1; then
325
+ vgcreate #{volume_group_name} #{physical_volumes.join(' ')} -s #{extent_size}
326
+ fi
327
+
328
+ # Set up each of the logical volumes.
329
+ #{volumes.collect { |volume| create_logical_volume_in_bash(volume, volume_group_name) }.join("\n\n") }
330
+ ENDSCRIPT
331
+ end
332
+ _setup_lvm_group
333
+ end
334
+
238
335
  def destroy_volume(volume_id)
239
336
 
240
337
  logger.info "Detaching volume #{volume_id}"
metadata CHANGED
@@ -1,13 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: rubber
3
3
  version: !ruby/object:Gem::Version
4
- hash: 55
4
+ hash: 51
5
5
  prerelease: false
6
6
  segments:
7
7
  - 1
8
- - 8
8
+ - 9
9
9
  - 0
10
- version: 1.8.0
10
+ version: 1.9.0
11
11
  platform: ruby
12
12
  authors:
13
13
  - Matt Conway
@@ -15,7 +15,7 @@ autorequire:
15
15
  bindir: bin
16
16
  cert_chain: []
17
17
 
18
- date: 2010-12-07 00:00:00 -05:00
18
+ date: 2011-01-26 00:00:00 -05:00
19
19
  default_executable: vulcanize
20
20
  dependencies:
21
21
  - !ruby/object:Gem::Dependency