rubber 1.10.2 → 1.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. data/CHANGELOG +11 -0
  2. data/VERSION +1 -1
  3. data/generators/vulcanize/templates/base/Rakefile +17 -13
  4. data/generators/vulcanize/templates/base/config/rubber/deploy-setup.rb +2 -0
  5. data/generators/vulcanize/templates/base/config/rubber/rubber-rvm.yml +5 -1
  6. data/generators/vulcanize/templates/mysql/config/rubber/deploy-mysql.rb +2 -2
  7. data/generators/vulcanize/templates/passenger/config/rubber/rubber-passenger.yml +1 -1
  8. data/generators/vulcanize/templates/redis/config/rubber/deploy-redis.rb +12 -5
  9. data/generators/vulcanize/templates/redis/config/rubber/role/redis/redis.conf +114 -8
  10. data/generators/vulcanize/templates/redis/config/rubber/rubber-redis.yml +1 -1
  11. data/generators/vulcanize/templates/sphinx/config/rubber/common/sphinx.yml +1 -1
  12. data/generators/vulcanize/templates/sphinx/config/rubber/role/sphinx/monit-sphinx.conf +1 -1
  13. data/lib/generators/vulcanize/templates/base/Rakefile +17 -13
  14. data/lib/generators/vulcanize/templates/base/config/rubber/deploy-setup.rb +2 -0
  15. data/lib/generators/vulcanize/templates/base/config/rubber/rubber-rvm.yml +5 -1
  16. data/lib/generators/vulcanize/templates/mysql/config/rubber/deploy-mysql.rb +2 -2
  17. data/lib/generators/vulcanize/templates/passenger/config/rubber/rubber-passenger.yml +1 -1
  18. data/lib/generators/vulcanize/templates/redis/config/rubber/deploy-redis.rb +12 -5
  19. data/lib/generators/vulcanize/templates/redis/config/rubber/role/redis/redis.conf +114 -8
  20. data/lib/generators/vulcanize/templates/redis/config/rubber/rubber-redis.yml +1 -1
  21. data/lib/generators/vulcanize/templates/sphinx/config/rubber/common/sphinx.yml +1 -1
  22. data/lib/generators/vulcanize/templates/sphinx/config/rubber/role/sphinx/monit-sphinx.conf +1 -1
  23. data/lib/rubber/recipes/rubber/volumes.rb +28 -9
  24. data/lib/rubber/tasks/rubber.rb +1 -1
  25. metadata +6 -19
data/CHANGELOG CHANGED
@@ -1,3 +1,14 @@
1
+ 1.10.2
2
+ -----
3
+
4
+ Regenerate gemspec for version 1.10.2 <4f49701> [Kevin Menard]
5
+ Merge branch 'master' of github.com:wr0ngway/rubber <e32671f> [Kevin Menard]
6
+ Fixed issue #41: rvm installation exits prematurely. <8a4bced> [Kevin Menard]
7
+ Fixed issue #28: postgresql with password fails <97270e3> [Kevin Menard]
8
+ fix file write for db restoration <51b1117> [Matt Conway]
9
+ Bumped passenger version. <2547950> [Kevin Menard]
10
+ Note that a release had been yanked. <d636389> [Kevin Menard]
11
+
1
12
  1.10.1
2
13
  -----
3
14
 
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.10.2
1
+ 1.11.0
@@ -2,22 +2,26 @@ require 'rake'
2
2
  require 'rake/testtask'
3
3
  require 'rake/rdoctask'
4
4
 
5
- RAILS_LOADER
5
+ if ARGV.grep(/rubber:/).empty?
6
+ RAILS_LOADER
6
7
 
7
- env = ENV['RUBBER_ENV'] ||= (ENV['RAILS_ENV'] || 'development')
8
- RAILS_ENV = ENV['RAILS_ENV'] = env
9
- root = File.dirname(__FILE__)
8
+ env = ENV['RUBBER_ENV'] ||= (ENV['RAILS_ENV'] || 'development')
9
+ RAILS_ENV = ENV['RAILS_ENV'] = env
10
+ root = File.dirname(__FILE__)
10
11
 
11
- require 'rubber'
12
+ require 'rubber'
12
13
 
13
- Rubber::initialize(root, env)
14
+ Rubber::initialize(root, env)
14
15
 
15
- require 'rubber/tasks/rubber'
16
+ require 'rubber/tasks/rubber'
16
17
 
17
- task :console do
18
- ARGV.clear
19
- require "irb"
20
- IRB.start
21
- end
18
+ task :console do
19
+ ARGV.clear
20
+ require "irb"
21
+ IRB.start
22
+ end
22
23
 
23
- RAILS_TASKS
24
+ RAILS_TASKS
25
+ else
26
+ load File.join(File.dirname(__FILE__), 'lib/tasks/rubber.rake')
27
+ end
@@ -37,6 +37,8 @@ namespace :rubber do
37
37
 
38
38
  bash ./scripts/install
39
39
 
40
+ sed -i 's/rubygems_version=.*/rubygems_version=#{rubber_env.rubygems_version}/' #{rubber_env.rvm_prefix}/config/db
41
+
40
42
  #
41
43
  # end rvm install script
42
44
 
@@ -12,12 +12,16 @@
12
12
  packages: [build-essential, git-core, subversion, curl, autoconf, bison, ruby, zlib1g-dev, libssl-dev, libreadline5-dev, libxml2-dev]
13
13
 
14
14
  # REQUIRED: the version of rvm itself
15
- rvm_version: 1.2.6
15
+ rvm_version: 1.2.7
16
16
 
17
17
  # REQUIRED: Set to the rvm version string for the ruby version you wish to use
18
18
  # Run "rvm list known" to see the list of possible options
19
19
  rvm_ruby: 1.9.2
20
20
 
21
+ # REQUIRED: Set to version of RubyGems you want RVM to install. Note that older RVMs do not
22
+ # know about newer RubyGems. So you may need to bump your rvm_version to get the latest RubyGems release.
23
+ rubygems_version: 1.5.2
24
+
21
25
  # When rvm is installed as root, and you set the default, it symlinks ruby executables
22
26
  # into /usr/local/rvm
23
27
  ruby_prefix: /usr/local/rvm
@@ -80,7 +80,7 @@ namespace :rubber do
80
80
  logger.info "Creating slave from a dump of master #{source_host}"
81
81
  rubber.sudo_script "create_slave_db_from_master", <<-ENDSCRIPT
82
82
  mysql -u root -e "change master to master_host='#{master_host}', master_user='#{env.db_replicator_user}' #{master_pass}"
83
- mysqldump -u #{env.db_user} #{env.db_pass.nil? ? '' : '--password ' + env.db_pass} -h #{source_host} --all-databases --master-data=1 | mysql -u root
83
+ mysqldump -u #{env.db_user} #{env.db_pass.nil? ? '' : '--password=' + env.db_pass} -h #{source_host} --all-databases --master-data=1 | mysql -u root
84
84
  ENDSCRIPT
85
85
  else
86
86
  logger.info "Creating slave from a dump of slave #{source_host}"
@@ -90,7 +90,7 @@ namespace :rubber do
90
90
  log_file = slave_config['Master_Log_File']
91
91
  log_pos = slave_config['Read_Master_Log_Pos']
92
92
  rubber.sudo_script "create_slave_db_from_slave", <<-ENDSCRIPT
93
- mysqldump -u #{env.db_user} --password #{env.db_pass} -h #{source_host} --all-databases --master-data=1 | mysql -u root
93
+ mysqldump -u #{env.db_user} #{env.db_pass.nil? ? '' : '--password=' + env.db_pass} -h #{source_host} --all-databases --master-data=1 | mysql -u root
94
94
  mysql -u root -e "change master to master_host='#{master_host}', master_user='#{env.db_replicator_user}', master_log_file='#{log_file}', master_log_pos=#{log_pos} #{master_pass}"
95
95
  mysql -u #{env.db_user} --password #{env.db_pass} -h #{source_host} -e "start slave;"
96
96
  ENDSCRIPT
@@ -1,4 +1,4 @@
1
- passenger_version: 3.0.2
1
+ passenger_version: 3.0.4
2
2
  passenger_root: "#{rvm_gem_home}/gems/passenger-#{passenger_version}"
3
3
  passenger_ruby: "#{rvm_prefix}/wrappers/#{`bash -l -c 'rvm strings #{rvm_ruby}'`.strip}/ruby"
4
4
  passenger_lib: "#{passenger_root}/ext/apache2/mod_passenger.so"
@@ -26,11 +26,11 @@ namespace :rubber do
26
26
  # Install the binaries.
27
27
  /etc/init.d/redis-server stop
28
28
 
29
- mv redis-benchmark /usr/bin/
30
- mv redis-check-aof /usr/bin/
31
- mv redis-check-dump /usr/bin/
32
- mv redis-cli /usr/bin/
33
- mv redis-server /usr/bin/
29
+ mv src/redis-benchmark /usr/bin/
30
+ mv src/redis-check-aof /usr/bin/
31
+ mv src/redis-check-dump /usr/bin/
32
+ mv src/redis-cli /usr/bin/
33
+ mv src/redis-server /usr/bin/
34
34
 
35
35
  /etc/init.d/redis-server start
36
36
 
@@ -49,6 +49,13 @@ namespace :rubber do
49
49
  mkdir -p #{rubber_env.redis_db_dir}
50
50
  chown -R redis:redis #{rubber_env.redis_db_dir}
51
51
  ENDSCRIPT
52
+
53
+ # After everything installed on machines, we need the source tree
54
+ # on hosts in order to run rubber:config for bootstrapping the db
55
+ rubber.update_code_for_bootstrap
56
+
57
+ # Gen just the conf for cassandra
58
+ rubber.run_config(:RUBBER_ENV => RUBBER_ENV, :FILE => "role/redis", :FORCE => true, :deploy_path => release_path)
52
59
  end
53
60
 
54
61
  desc "Stops the redis server"
@@ -29,7 +29,8 @@ daemonize yes
29
29
  # default. You can specify a custom pid file location here.
30
30
  pidfile /var/run/redis.pid
31
31
 
32
- # Accept connections on the specified port, default is 6379
32
+ # Accept connections on the specified port, default is 6379.
33
+ # If port 0 is specified Redis will not listen on a TCP socket.
33
34
  port 6379
34
35
 
35
36
  # If you want you can bind a single interface, if the bind option is not
@@ -37,6 +38,12 @@ port 6379
37
38
  #
38
39
  # bind 127.0.0.1
39
40
 
41
+ # Specify the path for the unix socket that will be used to listen for
42
+ # incoming connections. There is no default, so Redis will not listen
43
+ # on a unix socket when not specified.
44
+ #
45
+ # unixsocket /tmp/redis.sock
46
+
40
47
  # Close the connection after a client is idle for N seconds (0 to disable)
41
48
  timeout 300
42
49
 
@@ -53,6 +60,16 @@ loglevel notice
53
60
  # output for logging but daemonize, logs will be sent to /dev/null
54
61
  logfile /var/log/redis/redis-server.log
55
62
 
63
+ # To enable logging to the system logger, just set 'syslog-enabled' to yes,
64
+ # and optionally update the other syslog parameters to suit your needs.
65
+ # syslog-enabled no
66
+
67
+ # Specify the syslog identity.
68
+ # syslog-ident redis
69
+
70
+ # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
71
+ # syslog-facility local0
72
+
56
73
  # Set the number of databases. The default database is DB 0, you can select
57
74
  # a different one on a per-connection basis using SELECT <dbid> where
58
75
  # dbid is a number between 0 and 'databases'-1
@@ -119,6 +136,20 @@ slaveof <%= redis_master.name %> 6379
119
136
  # refuse the slave request.
120
137
  #
121
138
  # masterauth <master-password>
139
+
140
+
141
+ # When a slave lost the connection with the master, or when the replication
142
+ # is still in progress, the slave can act in two different ways:
143
+ #
144
+ # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
145
+ # still reply to client requests, possibly with out of data data, or the
146
+ # data set may just be empty if this is the first synchronization.
147
+ #
148
+ # 2) if slave-serve-stale data is set to 'no' the slave will reply with
149
+ # an error "SYNC with master in progress" to all the kind of commands
150
+ # but to INFO and SLAVEOF.
151
+ #
152
+ slave-serve-stale-data yes
122
153
  <%- end %>
123
154
 
124
155
  ################################## SECURITY ###################################
@@ -136,6 +167,22 @@ slaveof <%= redis_master.name %> 6379
136
167
  #
137
168
  # requirepass foobared
138
169
 
170
+ # Command renaming.
171
+ #
172
+ # It is possilbe to change the name of dangerous commands in a shared
173
+ # environment. For instance the CONFIG command may be renamed into something
174
+ # of hard to guess so that it will be still available for internal-use
175
+ # tools but not available for general clients.
176
+ #
177
+ # Example:
178
+ #
179
+ # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
180
+ #
181
+ # It is also possilbe to completely kill a command renaming it into
182
+ # an empty string:
183
+ #
184
+ # rename-command CONFIG ""
185
+
139
186
  ################################### LIMITS ####################################
140
187
 
141
188
  # Set the max number of connected clients at the same time. By default there
@@ -165,6 +212,37 @@ slaveof <%= redis_master.name %> 6379
165
212
  #
166
213
  # maxmemory <bytes>
167
214
 
215
+ # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
216
+ # is reached? You can select among five behavior:
217
+ #
218
+ # volatile-lru -> remove the key with an expire set using an LRU algorithm
219
+ # allkeys-lru -> remove any key accordingly to the LRU algorithm
220
+ # volatile-random -> remove a random key with an expire set
221
+ # allkeys->random -> remove a random key, any key
222
+ # volatile-ttl -> remove the key with the nearest expire time (minor TTL)
223
+ # noeviction -> don't expire at all, just return an error on write operations
224
+ #
225
+ # Note: with all the kind of policies, Redis will return an error on write
226
+ # operations, when there are not suitable keys for eviction.
227
+ #
228
+ # At the date of writing this commands are: set setnx setex append
229
+ # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
230
+ # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
231
+ # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
232
+ # getset mset msetnx exec sort
233
+ #
234
+ # The default is:
235
+ #
236
+ # maxmemory-policy volatile-lru
237
+
238
+ # LRU and minimal TTL algorithms are not precise algorithms but approximated
239
+ # algorithms (in order to save memory), so you can select as well the sample
240
+ # size to check. For instance for default Redis will check three keys and
241
+ # pick the one that was used less recently, you can change the sample size
242
+ # using the following configuration directive.
243
+ #
244
+ # maxmemory-samples 3
245
+
168
246
  ############################## APPEND ONLY MODE ###############################
169
247
 
170
248
  # By default Redis asynchronously dumps the dataset on disk. If you can live
@@ -212,6 +290,26 @@ appendonly no
212
290
  appendfsync everysec
213
291
  # appendfsync no
214
292
 
293
+ # When the AOF fsync policy is set to always or everysec, and a background
294
+ # saving process (a background save or AOF log background rewriting) is
295
+ # performing a lot of I/O against the disk, in some Linux configurations
296
+ # Redis may block too long on the fsync() call. Note that there is no fix for
297
+ # this currently, as even performing fsync in a different thread will block
298
+ # our synchronous write(2) call.
299
+ #
300
+ # In order to mitigate this problem it's possible to use the following option
301
+ # that will prevent fsync() from being called in the main process while a
302
+ # BGSAVE or BGREWRITEAOF is in progress.
303
+ #
304
+ # This means that while another child is saving the durability of Redis is
305
+ # the same as "appendfsync none", that in pratical terms means that it is
306
+ # possible to lost up to 30 seconds of log in the worst scenario (with the
307
+ # default Linux settings).
308
+ #
309
+ # If you have latency problems turn this to "yes". Otherwise leave it as
310
+ # "no" that is the safest pick from the point of view of durability.
311
+ no-appendfsync-on-rewrite no
312
+
215
313
  ################################ VIRTUAL MEMORY ###############################
216
314
 
217
315
  # Virtual Memory allows Redis to work with datasets bigger than the actual
@@ -285,17 +383,25 @@ vm-max-threads 4
285
383
 
286
384
  ############################### ADVANCED CONFIG ###############################
287
385
 
288
- # Glue small output buffers together in order to send small replies in a
289
- # single TCP packet. Uses a bit more CPU but most of the times it is a win
290
- # in terms of number of queries per second. Use 'yes' if unsure.
291
- glueoutputbuf yes
292
-
293
386
  # Hashes are encoded in a special way (much more memory efficient) when they
294
387
  # have at max a given numer of elements, and the biggest element does not
295
388
  # exceed a given threshold. You can configure this limits with the following
296
389
  # configuration directives.
297
- hash-max-zipmap-entries 64
298
- hash-max-zipmap-value 512
390
+ hash-max-zipmap-entries 512
391
+ hash-max-zipmap-value 64
392
+
393
+ # Similarly to hashes, small lists are also encoded in a special way in order
394
+ # to save a lot of space. The special representation is only used when
395
+ # you are under the following limits:
396
+ list-max-ziplist-entries 512
397
+ list-max-ziplist-value 64
398
+
399
+ # Sets have a special encoding in just one case: when a set is composed
400
+ # of just strings that happens to be integers in radix 10 in the range
401
+ # of 64 bit signed integers.
402
+ # The following configuration setting sets the limit in the size of the
403
+ # set in order to use this special memory saving encoding.
404
+ set-max-intset-entries 512
299
405
 
300
406
  # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
301
407
  # order to help rehashing the main Redis hash table (the one mapping top-level
@@ -1,4 +1,4 @@
1
- redis_server_version: 2.0.4
1
+ redis_server_version: 2.2.1
2
2
 
3
3
  redis_db_dir: /mnt/redis
4
4
 
@@ -12,7 +12,7 @@
12
12
  # pid file:: log/searchd.#{environment}.pid
13
13
  # searchd files:: db/sphinx/#{environment}/
14
14
  # address:: 127.0.0.1
15
- # port:: 3312
15
+ # port:: 9312
16
16
  # allow star:: false
17
17
  # min prefix length:: 1
18
18
  # min infix length:: 1
@@ -10,4 +10,4 @@ check process sphinx with pidfile <%= pidfile %>
10
10
  group sphinx-<%= RUBBER_ENV %>
11
11
  start program = "/usr/bin/sudo -H -u <%= rubber_env.app_user %> bash -l -c '<%= start_program %>'"
12
12
  stop program = "/usr/bin/sudo -H -u <%= rubber_env.app_user %> bash -l -c '<%= stop_program %>'"
13
- if failed host <%= rubber_env.host %> port 3312 with timeout 5 seconds for 5 cycles then restart
13
+ if failed host <%= rubber_env.host %> port 9312 with timeout 5 seconds for 5 cycles then restart
@@ -2,22 +2,26 @@ require 'rake'
2
2
  require 'rake/testtask'
3
3
  require 'rake/rdoctask'
4
4
 
5
- RAILS_LOADER
5
+ if ARGV.grep(/rubber:/).empty?
6
+ RAILS_LOADER
6
7
 
7
- env = ENV['RUBBER_ENV'] ||= (ENV['RAILS_ENV'] || 'development')
8
- RAILS_ENV = ENV['RAILS_ENV'] = env
9
- root = File.dirname(__FILE__)
8
+ env = ENV['RUBBER_ENV'] ||= (ENV['RAILS_ENV'] || 'development')
9
+ RAILS_ENV = ENV['RAILS_ENV'] = env
10
+ root = File.dirname(__FILE__)
10
11
 
11
- require 'rubber'
12
+ require 'rubber'
12
13
 
13
- Rubber::initialize(root, env)
14
+ Rubber::initialize(root, env)
14
15
 
15
- require 'rubber/tasks/rubber'
16
+ require 'rubber/tasks/rubber'
16
17
 
17
- task :console do
18
- ARGV.clear
19
- require "irb"
20
- IRB.start
21
- end
18
+ task :console do
19
+ ARGV.clear
20
+ require "irb"
21
+ IRB.start
22
+ end
22
23
 
23
- RAILS_TASKS
24
+ RAILS_TASKS
25
+ else
26
+ load File.join(File.dirname(__FILE__), 'lib/tasks/rubber.rake')
27
+ end
@@ -37,6 +37,8 @@ namespace :rubber do
37
37
 
38
38
  bash ./scripts/install
39
39
 
40
+ sed -i 's/rubygems_version=.*/rubygems_version=#{rubber_env.rubygems_version}/' #{rubber_env.rvm_prefix}/config/db
41
+
40
42
  #
41
43
  # end rvm install script
42
44
 
@@ -12,12 +12,16 @@
12
12
  packages: [build-essential, git-core, subversion, curl, autoconf, bison, ruby, zlib1g-dev, libssl-dev, libreadline5-dev, libxml2-dev]
13
13
 
14
14
  # REQUIRED: the version of rvm itself
15
- rvm_version: 1.2.6
15
+ rvm_version: 1.2.7
16
16
 
17
17
  # REQUIRED: Set to the rvm version string for the ruby version you wish to use
18
18
  # Run "rvm list known" to see the list of possible options
19
19
  rvm_ruby: 1.9.2
20
20
 
21
+ # REQUIRED: Set to version of RubyGems you want RVM to install. Note that older RVMs do not
22
+ # know about newer RubyGems. So you may need to bump your rvm_version to get the latest RubyGems release.
23
+ rubygems_version: 1.5.2
24
+
21
25
  # When rvm is installed as root, and you set the default, it symlinks ruby executables
22
26
  # into /usr/local/rvm
23
27
  ruby_prefix: /usr/local/rvm
@@ -80,7 +80,7 @@ namespace :rubber do
80
80
  logger.info "Creating slave from a dump of master #{source_host}"
81
81
  rubber.sudo_script "create_slave_db_from_master", <<-ENDSCRIPT
82
82
  mysql -u root -e "change master to master_host='#{master_host}', master_user='#{env.db_replicator_user}' #{master_pass}"
83
- mysqldump -u #{env.db_user} #{env.db_pass.nil? ? '' : '--password ' + env.db_pass} -h #{source_host} --all-databases --master-data=1 | mysql -u root
83
+ mysqldump -u #{env.db_user} #{env.db_pass.nil? ? '' : '--password=' + env.db_pass} -h #{source_host} --all-databases --master-data=1 | mysql -u root
84
84
  ENDSCRIPT
85
85
  else
86
86
  logger.info "Creating slave from a dump of slave #{source_host}"
@@ -90,7 +90,7 @@ namespace :rubber do
90
90
  log_file = slave_config['Master_Log_File']
91
91
  log_pos = slave_config['Read_Master_Log_Pos']
92
92
  rubber.sudo_script "create_slave_db_from_slave", <<-ENDSCRIPT
93
- mysqldump -u #{env.db_user} --password #{env.db_pass} -h #{source_host} --all-databases --master-data=1 | mysql -u root
93
+ mysqldump -u #{env.db_user} #{env.db_pass.nil? ? '' : '--password=' + env.db_pass} -h #{source_host} --all-databases --master-data=1 | mysql -u root
94
94
  mysql -u root -e "change master to master_host='#{master_host}', master_user='#{env.db_replicator_user}', master_log_file='#{log_file}', master_log_pos=#{log_pos} #{master_pass}"
95
95
  mysql -u #{env.db_user} --password #{env.db_pass} -h #{source_host} -e "start slave;"
96
96
  ENDSCRIPT
@@ -1,4 +1,4 @@
1
- passenger_version: 3.0.2
1
+ passenger_version: 3.0.4
2
2
  passenger_root: "#{rvm_gem_home}/gems/passenger-#{passenger_version}"
3
3
  passenger_ruby: "#{rvm_prefix}/wrappers/#{`bash -l -c 'rvm strings #{rvm_ruby}'`.strip}/ruby"
4
4
  passenger_lib: "#{passenger_root}/ext/apache2/mod_passenger.so"
@@ -26,11 +26,11 @@ namespace :rubber do
26
26
  # Install the binaries.
27
27
  /etc/init.d/redis-server stop
28
28
 
29
- mv redis-benchmark /usr/bin/
30
- mv redis-check-aof /usr/bin/
31
- mv redis-check-dump /usr/bin/
32
- mv redis-cli /usr/bin/
33
- mv redis-server /usr/bin/
29
+ mv src/redis-benchmark /usr/bin/
30
+ mv src/redis-check-aof /usr/bin/
31
+ mv src/redis-check-dump /usr/bin/
32
+ mv src/redis-cli /usr/bin/
33
+ mv src/redis-server /usr/bin/
34
34
 
35
35
  /etc/init.d/redis-server start
36
36
 
@@ -49,6 +49,13 @@ namespace :rubber do
49
49
  mkdir -p #{rubber_env.redis_db_dir}
50
50
  chown -R redis:redis #{rubber_env.redis_db_dir}
51
51
  ENDSCRIPT
52
+
53
+ # After everything installed on machines, we need the source tree
54
+ # on hosts in order to run rubber:config for bootstrapping the db
55
+ rubber.update_code_for_bootstrap
56
+
57
+ # Gen just the conf for cassandra
58
+ rubber.run_config(:RUBBER_ENV => RUBBER_ENV, :FILE => "role/redis", :FORCE => true, :deploy_path => release_path)
52
59
  end
53
60
 
54
61
  desc "Stops the redis server"
@@ -29,7 +29,8 @@ daemonize yes
29
29
  # default. You can specify a custom pid file location here.
30
30
  pidfile /var/run/redis.pid
31
31
 
32
- # Accept connections on the specified port, default is 6379
32
+ # Accept connections on the specified port, default is 6379.
33
+ # If port 0 is specified Redis will not listen on a TCP socket.
33
34
  port 6379
34
35
 
35
36
  # If you want you can bind a single interface, if the bind option is not
@@ -37,6 +38,12 @@ port 6379
37
38
  #
38
39
  # bind 127.0.0.1
39
40
 
41
+ # Specify the path for the unix socket that will be used to listen for
42
+ # incoming connections. There is no default, so Redis will not listen
43
+ # on a unix socket when not specified.
44
+ #
45
+ # unixsocket /tmp/redis.sock
46
+
40
47
  # Close the connection after a client is idle for N seconds (0 to disable)
41
48
  timeout 300
42
49
 
@@ -53,6 +60,16 @@ loglevel notice
53
60
  # output for logging but daemonize, logs will be sent to /dev/null
54
61
  logfile /var/log/redis/redis-server.log
55
62
 
63
+ # To enable logging to the system logger, just set 'syslog-enabled' to yes,
64
+ # and optionally update the other syslog parameters to suit your needs.
65
+ # syslog-enabled no
66
+
67
+ # Specify the syslog identity.
68
+ # syslog-ident redis
69
+
70
+ # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
71
+ # syslog-facility local0
72
+
56
73
  # Set the number of databases. The default database is DB 0, you can select
57
74
  # a different one on a per-connection basis using SELECT <dbid> where
58
75
  # dbid is a number between 0 and 'databases'-1
@@ -119,6 +136,20 @@ slaveof <%= redis_master.name %> 6379
119
136
  # refuse the slave request.
120
137
  #
121
138
  # masterauth <master-password>
139
+
140
+
141
+ # When a slave lost the connection with the master, or when the replication
142
+ # is still in progress, the slave can act in two different ways:
143
+ #
144
+ # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
145
+ # still reply to client requests, possibly with out of data data, or the
146
+ # data set may just be empty if this is the first synchronization.
147
+ #
148
+ # 2) if slave-serve-stale data is set to 'no' the slave will reply with
149
+ # an error "SYNC with master in progress" to all the kind of commands
150
+ # but to INFO and SLAVEOF.
151
+ #
152
+ slave-serve-stale-data yes
122
153
  <%- end %>
123
154
 
124
155
  ################################## SECURITY ###################################
@@ -136,6 +167,22 @@ slaveof <%= redis_master.name %> 6379
136
167
  #
137
168
  # requirepass foobared
138
169
 
170
+ # Command renaming.
171
+ #
172
+ # It is possilbe to change the name of dangerous commands in a shared
173
+ # environment. For instance the CONFIG command may be renamed into something
174
+ # of hard to guess so that it will be still available for internal-use
175
+ # tools but not available for general clients.
176
+ #
177
+ # Example:
178
+ #
179
+ # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
180
+ #
181
+ # It is also possilbe to completely kill a command renaming it into
182
+ # an empty string:
183
+ #
184
+ # rename-command CONFIG ""
185
+
139
186
  ################################### LIMITS ####################################
140
187
 
141
188
  # Set the max number of connected clients at the same time. By default there
@@ -165,6 +212,37 @@ slaveof <%= redis_master.name %> 6379
165
212
  #
166
213
  # maxmemory <bytes>
167
214
 
215
+ # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
216
+ # is reached? You can select among five behavior:
217
+ #
218
+ # volatile-lru -> remove the key with an expire set using an LRU algorithm
219
+ # allkeys-lru -> remove any key accordingly to the LRU algorithm
220
+ # volatile-random -> remove a random key with an expire set
221
+ # allkeys->random -> remove a random key, any key
222
+ # volatile-ttl -> remove the key with the nearest expire time (minor TTL)
223
+ # noeviction -> don't expire at all, just return an error on write operations
224
+ #
225
+ # Note: with all the kind of policies, Redis will return an error on write
226
+ # operations, when there are not suitable keys for eviction.
227
+ #
228
+ # At the date of writing this commands are: set setnx setex append
229
+ # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
230
+ # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
231
+ # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
232
+ # getset mset msetnx exec sort
233
+ #
234
+ # The default is:
235
+ #
236
+ # maxmemory-policy volatile-lru
237
+
238
+ # LRU and minimal TTL algorithms are not precise algorithms but approximated
239
+ # algorithms (in order to save memory), so you can select as well the sample
240
+ # size to check. For instance for default Redis will check three keys and
241
+ # pick the one that was used less recently, you can change the sample size
242
+ # using the following configuration directive.
243
+ #
244
+ # maxmemory-samples 3
245
+
168
246
  ############################## APPEND ONLY MODE ###############################
169
247
 
170
248
  # By default Redis asynchronously dumps the dataset on disk. If you can live
@@ -212,6 +290,26 @@ appendonly no
212
290
  appendfsync everysec
213
291
  # appendfsync no
214
292
 
293
+ # When the AOF fsync policy is set to always or everysec, and a background
294
+ # saving process (a background save or AOF log background rewriting) is
295
+ # performing a lot of I/O against the disk, in some Linux configurations
296
+ # Redis may block too long on the fsync() call. Note that there is no fix for
297
+ # this currently, as even performing fsync in a different thread will block
298
+ # our synchronous write(2) call.
299
+ #
300
+ # In order to mitigate this problem it's possible to use the following option
301
+ # that will prevent fsync() from being called in the main process while a
302
+ # BGSAVE or BGREWRITEAOF is in progress.
303
+ #
304
+ # This means that while another child is saving the durability of Redis is
305
+ # the same as "appendfsync none", that in pratical terms means that it is
306
+ # possible to lost up to 30 seconds of log in the worst scenario (with the
307
+ # default Linux settings).
308
+ #
309
+ # If you have latency problems turn this to "yes". Otherwise leave it as
310
+ # "no" that is the safest pick from the point of view of durability.
311
+ no-appendfsync-on-rewrite no
312
+
215
313
  ################################ VIRTUAL MEMORY ###############################
216
314
 
217
315
  # Virtual Memory allows Redis to work with datasets bigger than the actual
@@ -285,17 +383,25 @@ vm-max-threads 4
285
383
 
286
384
  ############################### ADVANCED CONFIG ###############################
287
385
 
288
- # Glue small output buffers together in order to send small replies in a
289
- # single TCP packet. Uses a bit more CPU but most of the times it is a win
290
- # in terms of number of queries per second. Use 'yes' if unsure.
291
- glueoutputbuf yes
292
-
293
386
  # Hashes are encoded in a special way (much more memory efficient) when they
294
387
  # have at max a given numer of elements, and the biggest element does not
295
388
  # exceed a given threshold. You can configure this limits with the following
296
389
  # configuration directives.
297
- hash-max-zipmap-entries 64
298
- hash-max-zipmap-value 512
390
+ hash-max-zipmap-entries 512
391
+ hash-max-zipmap-value 64
392
+
393
+ # Similarly to hashes, small lists are also encoded in a special way in order
394
+ # to save a lot of space. The special representation is only used when
395
+ # you are under the following limits:
396
+ list-max-ziplist-entries 512
397
+ list-max-ziplist-value 64
398
+
399
+ # Sets have a special encoding in just one case: when a set is composed
400
+ # of just strings that happens to be integers in radix 10 in the range
401
+ # of 64 bit signed integers.
402
+ # The following configuration setting sets the limit in the size of the
403
+ # set in order to use this special memory saving encoding.
404
+ set-max-intset-entries 512
299
405
 
300
406
  # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
301
407
  # order to help rehashing the main Redis hash table (the one mapping top-level
@@ -1,4 +1,4 @@
1
- redis_server_version: 2.0.4
1
+ redis_server_version: 2.2.1
2
2
 
3
3
  redis_db_dir: /mnt/redis
4
4
 
@@ -12,7 +12,7 @@
12
12
  # pid file:: log/searchd.#{environment}.pid
13
13
  # searchd files:: db/sphinx/#{environment}/
14
14
  # address:: 127.0.0.1
15
- # port:: 3312
15
+ # port:: 9312
16
16
  # allow star:: false
17
17
  # min prefix length:: 1
18
18
  # min infix length:: 1
@@ -10,4 +10,4 @@ check process sphinx with pidfile <%= pidfile %>
10
10
  group sphinx-<%= RUBBER_ENV %>
11
11
  start program = "/usr/bin/sudo -H -u <%= rubber_env.app_user %> bash -l -c '<%= start_program %>'"
12
12
  stop program = "/usr/bin/sudo -H -u <%= rubber_env.app_user %> bash -l -c '<%= stop_program %>'"
13
- if failed host <%= rubber_env.host %> port 3312 with timeout 5 seconds for 5 cycles then restart
13
+ if failed host <%= rubber_env.host %> port 9312 with timeout 5 seconds for 5 cycles then restart
@@ -56,13 +56,21 @@ namespace :rubber do
56
56
  end
57
57
 
58
58
  desc <<-DESC
59
- Shows the configured persistent volumes
59
+ Destroys the configured persistent volumes
60
60
  DESC
61
61
  required_task :destroy_volume do
62
62
  volume_id = get_env('VOLUME_ID', "Volume ID", true)
63
63
  destroy_volume(volume_id)
64
64
  end
65
65
 
66
+ desc <<-DESC
67
+ Detaches the configured persistent volumes
68
+ DESC
69
+ required_task :detach_volume do
70
+ volume_id = get_env('VOLUME_ID', "Volume ID", true)
71
+ detach_volume(volume_id)
72
+ end
73
+
66
74
  def create_volume(size, zone)
67
75
  volumeId = cloud.create_volume(size.to_s, zone)
68
76
  fatal "Failed to create volume" if volumeId.nil?
@@ -272,13 +280,13 @@ namespace :rubber do
272
280
 
273
281
  <<-ENDSCRIPT
274
282
  # Add the logical volume mount point to /etc/fstab.
275
- if ! grep -q '#{volume['mount']}' /etc/fstab; then
283
+ if ! grep -q '#{volume['name']}' /etc/fstab; then
276
284
  if mount | grep -q '#{volume['mount']}'; then
277
285
  umount '#{volume['mount']}'
278
286
  fi
279
287
 
280
288
  mv /etc/fstab /etc/fstab.bak
281
- cat /etc/fstab.bak | grep -v '#{volume['mount']}' > /etc/fstab
289
+ cat /etc/fstab.bak | grep -v '#{volume['mount']}\\b' > /etc/fstab
282
290
  echo '#{device_name} #{volume['mount']} #{volume['filesystem']} noatime 0 0 # rubber LVM volume' >> /etc/fstab
283
291
  fi
284
292
 
@@ -312,6 +320,10 @@ namespace :rubber do
312
320
  for device in #{physical_volumes.join(' ')}
313
321
  do
314
322
  if ! pvdisplay $device >> /dev/null 2>&1; then
323
+ if grep $device /etc/mtab; then
324
+ umount $device
325
+ fi
326
+
315
327
  pvcreate $device
316
328
 
317
329
  # See if the volume group already exists. If so, add the new physical volume to it.
@@ -333,8 +345,7 @@ namespace :rubber do
333
345
  _setup_lvm_group
334
346
  end
335
347
 
336
- def destroy_volume(volume_id)
337
-
348
+ def detach_volume(volume_id)
338
349
  logger.info "Detaching volume #{volume_id}"
339
350
  cloud.detach_volume(volume_id) rescue logger.info("Volume was not attached")
340
351
 
@@ -348,16 +359,24 @@ namespace :rubber do
348
359
  end
349
360
  print "\n"
350
361
 
362
+ logger.info "Detaching volume #{volume_id} from rubber instances file"
363
+ rubber_instances.each do |ic|
364
+ ic.volumes.delete(volume_id) if ic.volumes
365
+ end
366
+ rubber_instances.save
367
+
368
+ end
369
+
370
+ def destroy_volume(volume_id)
371
+ detach_volume(volume_id)
372
+
351
373
  logger.info "Deleting volume #{volume_id}"
352
374
  cloud.destroy_volume(volume_id)
353
375
 
354
376
  logger.info "Removing volume #{volume_id} from rubber instances file"
355
377
  artifacts = rubber_instances.artifacts
356
378
  artifacts['volumes'].delete_if {|k,v| v == volume_id}
357
- rubber_instances.each do |ic|
358
- ic.volumes.delete(volume_id) if ic.volumes
359
- end
360
379
  rubber_instances.save
361
380
  end
362
-
381
+
363
382
  end
@@ -176,7 +176,7 @@ namespace :rubber do
176
176
 
177
177
  user = get_env('DBUSER', true)
178
178
  pass = get_env('DBPASS')
179
- pass = nil if pass.strip.size == 0
179
+ pass = nil if (pass.nil? || pass.strip.size == 0)
180
180
  host = get_env('DBHOST', true)
181
181
  name = get_env('DBNAME', true)
182
182
 
metadata CHANGED
@@ -5,9 +5,9 @@ version: !ruby/object:Gem::Version
5
5
  prerelease:
6
6
  segments:
7
7
  - 1
8
- - 10
9
- - 2
10
- version: 1.10.2
8
+ - 11
9
+ - 0
10
+ version: 1.11.0
11
11
  platform: ruby
12
12
  authors:
13
13
  - Matt Conway
@@ -15,7 +15,7 @@ autorequire:
15
15
  bindir: bin
16
16
  cert_chain: []
17
17
 
18
- date: 2011-02-21 00:00:00 -05:00
18
+ date: 2011-03-04 00:00:00 -05:00
19
19
  default_executable: vulcanize
20
20
  dependencies:
21
21
  - !ruby/object:Gem::Dependency
@@ -486,20 +486,7 @@ has_rdoc: true
486
486
  homepage: http://github.com/wr0ngway/rubber
487
487
  licenses: []
488
488
 
489
- post_install_message: |+
490
-
491
- ********************************************************************************
492
-
493
- Thank you for installing rubber. Please note that this is a major upgrade
494
- and we've moved towards using RVM for Ruby configuration on your EC2 instances.
495
-
496
- If you're upgrading rubber, please make sure to read the upgrade notes and
497
- make the necessary configuration changes:
498
-
499
- http://wiki.github.com/wr0ngway/rubber/upgrading
500
-
501
- ********************************************************************************
502
-
489
+ post_install_message:
503
490
  rdoc_options: []
504
491
 
505
492
  require_paths:
@@ -525,7 +512,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
525
512
  requirements: []
526
513
 
527
514
  rubyforge_project: rubber
528
- rubygems_version: 1.5.2
515
+ rubygems_version: 1.5.3
529
516
  signing_key:
530
517
  specification_version: 3
531
518
  summary: A capistrano plugin for managing multi-instance deployments to the cloud (ec2)