pvcglue 0.1.19 → 0.1.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 9b41533643419c1df16e610b0ede09ae0ee64857
4
- data.tar.gz: 0e1ccf31c5e651d1e7ee97a7258d93c15fff6002
3
+ metadata.gz: 8b8ffc336dfdd07570db2d254d7aa3e3962bc46e
4
+ data.tar.gz: e916de10d914a58ce35a64749afe31bc02a7c023
5
5
  SHA512:
6
- metadata.gz: 26edde8f93741d015d24a278d13ea9163d70f5978d37ac40d7e34786cda3bc1310ada65657f806b3aae5f406553bc0e0073742dc6f8af7b321799ae963cf8bb6
7
- data.tar.gz: cde008ef3991255eb23f8f88e8142aaf27834bf50d4c51bca7b2ab466ff808cf9b9f1581eb89b1cdd775f2eefaf9a559a4d7a76bc67e04f217c5a6f187e84f03
6
+ metadata.gz: 4edb635b8bc3662bb47670b857cfdeb82920dceb7b181da8393c7f6c8ad4d9322008f2cf5df9f50bca0d5babfb3716ff1e5e6295e626b29b3b111d11da41395b
7
+ data.tar.gz: ed7dfcc56e2f5df166259ad84c38d647ff5fd4b29c35a77ea58bfe51af8a78b7115d42ea1492166a15f907459ab706928449d508940f48c40c41d346afa78a74
data/README.md CHANGED
@@ -165,6 +165,40 @@ And then execute:
165
165
 
166
166
  $ bundle
167
167
 
168
+
169
+ Notes:
170
+
171
+ .ruby-version
172
+ .ruby-gemset
173
+ Ruby verions in Gemfile
174
+
175
+ pvc manager configure # once per machine
176
+ pvc manager bootstrap
177
+
178
+ add *.toml and *.dump to .gitignore
179
+
180
+
181
+ create main toml file
182
+
183
+ add maintenance/maintenance.html
184
+
185
+ pvc alpha bootstrap
186
+ pvc alpha pvcify
187
+ pvc alpha build
188
+ pvc alpha deploy
189
+
190
+ modify config/initializers/secret_token.rb to include
191
+
192
+ if Rails.env.production?
193
+ Store::Application.config.secret_token = ENV['RAILS_SECRET_TOKEN'] || raise('No secret token specified. :(')
194
+ else
195
+ Store::Application.config.secret_token = '1175ba3ce440d59b811fd9464c5657ca09f171535fd885719f263ddec3af6ef5df42c266d96868479357b7959b05cfcac7dd1663a696931c5f347c96665e1623'
196
+ end
197
+
198
+
199
+
200
+
201
+
168
202
  ## Troubleshooting
169
203
 
170
204
  ### If you see this while trying to deploy
data/lib/pvcglue/cloud.rb CHANGED
@@ -56,7 +56,8 @@ module Pvcglue
56
56
  end
57
57
 
58
58
  def stage_roles
59
- raise(Thor::Error, "Stage not defined: #{stage_name}.") if stage.nil?
59
+ raise("Stage not defined: #{stage_name}.") if stage.nil?
60
+ # raise(Thor::Error, "Stage not defined: #{stage_name}.") if stage.nil?
60
61
  stage[:roles]
61
62
  end
62
63
 
@@ -73,7 +74,7 @@ module Pvcglue
73
74
  end
74
75
 
75
76
  # find node by full node_name or by matching prefix of node_name
76
- def find_node(node_name)
77
+ def find_node(node_name, raise_error = true)
77
78
  puts "*"*80
78
79
  raise(Thor::Error, "Node not specified.") if node_name.nil? || node_name.empty?
79
80
  return {node_name => nodes_in_stage[node_name]} if nodes_in_stage[node_name]
@@ -82,7 +83,8 @@ module Pvcglue
82
83
  puts key
83
84
  return {key => value} if key.start_with?(node_name)
84
85
  end
85
- raise(Thor::Error, "Not found: #{node_name} in #{stage_name}.")
86
+ raise("Not found: #{node_name} in #{stage_name}.") if raise_error
87
+ # raise(Thor::Error, "Not found: #{node_name} in #{stage_name}.")
86
88
  end
87
89
 
88
90
  def nodes_in_stage(role_filter = 'all')
data/lib/pvcglue/db.rb CHANGED
@@ -17,6 +17,7 @@ module Pvcglue
17
17
 
18
18
  desc "pull", "Pull copy of database from remote stage. Pass -f to exclude tables defined in the configuration file. If no tables are specified in the `excluded_db_tables` option, 'versions' will be used by default."
19
19
  method_option :fast, :type => :boolean, :aliases => "-f"
20
+
20
21
  def pull(file_name = nil)
21
22
  raise(Thor::Error, "Stage required.") if Pvcglue.cloud.stage_name.nil?
22
23
  pg_dump(self.class.remote, file_name, options[:fast])
@@ -26,7 +27,7 @@ module Pvcglue
26
27
 
27
28
  def dump(file_name = nil)
28
29
  raise(Thor::Error, "Stage should not be set for this command. (Use 'pull' for remote databases.)") unless Pvcglue.cloud.stage_name.nil?
29
- pg_dump(self.class.local, file_name)
30
+ pg_dump(self.class.local, file_name, options[:fast])
30
31
  end
31
32
 
32
33
  desc "restore", "restore"
@@ -59,7 +60,7 @@ module Pvcglue
59
60
  File.join(Pvcglue::Capistrano.application_config_dir, 'database.yml')
60
61
  end
61
62
 
62
- class Db_Config < Struct.new(:host, :port, :database, :username, :password)
63
+ class Db_Config < Struct.new(:host, :port, :database, :username, :password, :kind)
63
64
  end
64
65
 
65
66
  def self.local_info
@@ -76,7 +77,7 @@ module Pvcglue
76
77
  def self.local
77
78
  @local ||= begin
78
79
  dev = local_info["development"]
79
- Db_Config.new(dev["host"], dev["port"], dev["database"], dev["username"], dev["password"])
80
+ Db_Config.new(dev["host"], dev["port"], dev["database"], dev["username"], dev["password"], :local)
80
81
  end
81
82
  end
82
83
 
@@ -88,7 +89,9 @@ module Pvcglue
88
89
  env["DB_USER_POSTGRES_PORT"],
89
90
  env["DB_USER_POSTGRES_DATABASE"],
90
91
  env["DB_USER_POSTGRES_USERNAME"],
91
- env["DB_USER_POSTGRES_PASSWORD"])
92
+ env["DB_USER_POSTGRES_PASSWORD"],
93
+ :remote
94
+ )
92
95
  end
93
96
  end
94
97
 
@@ -125,11 +128,14 @@ module Pvcglue
125
128
 
126
129
 
127
130
  def pg_dump(db, file_name, fast)
128
- host = Pvcglue.cloud.nodes_in_stage('db')['db']['public_ip']
129
- port = Pvcglue.cloud.port_in_context(:shell)
130
- user = 'deploy'
131
131
  file_name = self.class.file_helper(file_name)
132
132
 
133
+ if db.kind == :remote
134
+ host = Pvcglue.cloud.nodes_in_stage('db')['db']['public_ip']
135
+ port = Pvcglue.cloud.port_in_context(:shell)
136
+ user = 'deploy'
137
+ end
138
+
133
139
  cmd = "pg_dump -Fc --no-acl --no-owner -h #{db.host} -p #{db.port}"
134
140
  cmd += " -U #{db.username}" if db.username
135
141
  if fast
@@ -140,42 +146,62 @@ module Pvcglue
140
146
  cmd += " #{db.database} -v -f #{file_name}"
141
147
 
142
148
  puts cmd
143
- unless Pvcglue.run_remote(host, port, user, " PGPASSWORD=#{db.password} #{cmd}")
144
- puts "ERROR:"
145
- puts $?.inspect
146
- raise(Thor::Error, "Error: #{$?}")
147
- end
148
149
 
149
- cmd = %{scp -P #{port} #{user}@#{host}:#{file_name} #{file_name}}
150
- puts "Running `#{cmd}`"
150
+ cmd = " PGPASSWORD=#{db.password} #{cmd}"
151
+
152
+ if db.kind == :remote
153
+ unless Pvcglue.run_remote(host, port, user, cmd)
154
+ puts "ERROR:"
155
+ puts $?.inspect
156
+ raise(Thor::Error, "Error: #{$?}")
157
+ end
151
158
 
152
- unless system cmd
153
- raise(Thor::Error, "Error: #{$?}")
159
+ cmd = %{scp -P #{port} #{user}@#{host}:#{file_name} #{file_name}}
160
+ puts "Running `#{cmd}`"
161
+
162
+ unless system cmd
163
+ raise(Thor::Error, "Error: #{$?}")
164
+ end
165
+ else
166
+ unless system(cmd)
167
+ puts "ERROR:"
168
+ puts $?.inspect
169
+ raise(Thor::Error, "Error: #{$?}")
170
+ end
154
171
  end
155
172
  end
156
173
 
157
174
  def pg_restore(db, file_name)
158
175
  Pvcglue.cloud.stage_name == 'production' && destroy_prod?
159
-
160
- host = Pvcglue.cloud.nodes_in_stage('db')['db']['public_ip']
161
- port = Pvcglue.cloud.port_in_context(:shell)
162
- user = 'deploy'
163
176
  file_name = self.class.file_helper(file_name)
164
177
 
165
- cmd = %{scp -P #{port} #{file_name} #{user}@#{host}:#{file_name}}
166
- unless system cmd
167
- raise(Thor::Error, "Error: #{$?}")
178
+ if db.kind == :remote
179
+ host = Pvcglue.cloud.nodes_in_stage('db')['db']['public_ip']
180
+ port = Pvcglue.cloud.port_in_context(:shell)
181
+ user = 'deploy'
182
+
183
+ cmd = %{scp -P #{port} #{file_name} #{user}@#{host}:#{file_name}}
184
+ unless system cmd
185
+ raise(Thor::Error, "Error: #{$?}")
186
+ end
168
187
  end
169
188
 
170
189
  cmd = "pg_restore --verbose --clean --no-acl --no-owner -h #{db.host} -p #{db.port}"
171
190
  cmd += " -U #{db.username}" if db.username
172
- cmd += " -d #{db.database} #{self.class.file_helper(file_name)}"
191
+ cmd += " -d #{db.database} #{file_name}"
173
192
  puts cmd
174
193
 
175
- unless Pvcglue.run_remote(host, port, user, " PGPASSWORD=#{db.password} #{cmd}")
176
- puts "ERROR:"
177
- puts $?.inspect
194
+ if db.kind == :remote
195
+ unless Pvcglue.run_remote(host, port, user, " PGPASSWORD=#{db.password} #{cmd}")
196
+ puts "ERROR:"
197
+ puts $?.inspect
198
+ end
199
+ else
200
+ unless system(" PGPASSWORD=#{db.password} #{cmd}")
201
+ raise(Thor::Error, "Error: #{$?}")
202
+ end
178
203
  end
204
+
179
205
  end
180
206
 
181
207
  def pg_destroy(dest)
@@ -200,11 +226,11 @@ module Pvcglue
200
226
  "fi "\
201
227
  "done; "\
202
228
  "done < dd.sql; "
203
-
229
+
204
230
  cmd = "psql #{dest.username} -c "
205
231
  cmd += sql
206
232
  cmd += " > dd.sql;"
207
- cmd += bash
233
+ cmd += bash
208
234
  cmd += "rm dd.sql"
209
235
  puts cmd
210
236
  unless system({"PGPASSWORD" => dest.password}, cmd)
data/lib/pvcglue/env.rb CHANGED
@@ -87,7 +87,8 @@ module Pvcglue
87
87
  'DB_USER_POSTGRES_USERNAME' => "#{Pvcglue.cloud.app_name}_#{Pvcglue.cloud.stage_name_validated}",
88
88
  'DB_USER_POSTGRES_DATABASE' => "#{Pvcglue.cloud.app_name}_#{Pvcglue.cloud.stage_name_validated}",
89
89
  'DB_USER_POSTGRES_PASSWORD' => new_password,
90
- 'MEMCACHE_SERVERS' => memcached_host
90
+ 'MEMCACHE_SERVERS' => memcached_host,
91
+ 'REDIS_SERVER' => redis_host
91
92
  }
92
93
  end
93
94
 
@@ -97,8 +98,13 @@ module Pvcglue
97
98
  end
98
99
 
99
100
  def self.memcached_host
100
- node = Pvcglue.cloud.find_node('memcached')
101
- "#{node['memcached']['private_ip']}:11211"
101
+ node = Pvcglue.cloud.find_node('memcached', false)
102
+ node ? "#{node['memcached']['private_ip']}:11211" : ""
103
+ end
104
+
105
+ def self.redis_host
106
+ node = Pvcglue.cloud.find_node('redis', false)
107
+ node ? "#{node['redis']['private_ip']}:6379" : ""
102
108
  end
103
109
 
104
110
  def self.new_password
data/lib/pvcglue/local.rb CHANGED
@@ -1,6 +1,7 @@
1
1
  module Pvcglue
2
2
  class Local
3
- MACHINES = %w(manager lb web web_2 db memcached)
3
+ # MACHINES = %w(manager lb web web_2 db memcached)
4
+ MACHINES = %w(manager lb web db)
4
5
 
5
6
  def self.vagrant(command)
6
7
  raise(Thor::Error, "This command can only be used for the 'local' and 'test' stages.") unless Pvcglue.cloud.stage_name.in? %w(local test)
@@ -67,9 +68,11 @@ module Pvcglue
67
68
  {"db_rebuild" => true,
68
69
  "domains" => ["#{app_name}.local"],
69
70
  "ssl" => "none",
70
- "roles" =>
71
- {"caching" =>
72
- {"memcached" => {"private_ip" => "0.0.0.0", "public_ip" => "0.0.0.0"}},
71
+ "roles" => {
72
+ # "caching" =>
73
+ # {"memcached" => {"private_ip" => "0.0.0.0", "public_ip" => "0.0.0.0"}},
74
+ "redis" =>
75
+ {"redis" => {"private_ip" => "0.0.0.0", "public_ip" => "0.0.0.0"}},
73
76
  "db" => {"db" => {"private_ip" => "0.0.0.0", "public_ip" => "0.0.0.0"}},
74
77
  "lb" =>
75
78
  {"lb" =>
@@ -115,10 +118,12 @@ module Pvcglue
115
118
  # puts data[app_name][:stages][:local][:roles][:caching][:memcached][:public_ip].inspect
116
119
  # puts "*"*80
117
120
  stage_name = Pvcglue.cloud.stage_name
118
- data[app_name][:stages][stage_name][:roles][:caching][:memcached][:public_ip] = machines[:memcached][:public_ip]
119
- data[app_name][:stages][stage_name][:roles][:caching][:memcached][:private_ip] = machines[:memcached][:private_ip]
121
+ # data[app_name][:stages][stage_name][:roles][:caching][:memcached][:public_ip] = machines[:memcached][:public_ip]
122
+ # data[app_name][:stages][stage_name][:roles][:caching][:memcached][:private_ip] = machines[:memcached][:private_ip]
120
123
  data[app_name][:stages][stage_name][:roles][:db][:db][:public_ip] = machines[:db][:public_ip]
121
124
  data[app_name][:stages][stage_name][:roles][:db][:db][:private_ip] = machines[:db][:private_ip]
125
+ data[app_name][:stages][stage_name][:roles][:redis][:redis][:public_ip] = machines[:db][:public_ip]
126
+ data[app_name][:stages][stage_name][:roles][:redis][:redis][:private_ip] = machines[:db][:private_ip]
122
127
  data[app_name][:stages][stage_name][:roles][:lb][:lb][:public_ip] = machines[:lb][:public_ip]
123
128
  data[app_name][:stages][stage_name][:roles][:lb][:lb][:private_ip] = machines[:lb][:private_ip]
124
129
  data[app_name][:stages][stage_name][:roles][:web][:web_1][:public_ip] = machines[:web][:public_ip]
data/lib/pvcglue/nodes.rb CHANGED
@@ -15,7 +15,7 @@ module Pvcglue
15
15
  def run
16
16
  puts "This is where it should configure the nodes for #{@roles_filter}. :)"
17
17
 
18
- %w(lb db web caching).each do |role|
18
+ %w(lb db web caching redis).each do |role|
19
19
  if apply_role?(role)
20
20
  Pvcglue::Packages.apply(role.to_sym, :build, Pvcglue.cloud.nodes_in_stage(role))
21
21
  end
@@ -0,0 +1,14 @@
1
+ apt_package 'redis-server'
2
+
3
+ package 'redis' do
4
+ depends_on 'redis-server'
5
+ file({
6
+ :template => Pvcglue.template_file_name('redis.conf.erb'),
7
+ :destination => '/etc/redis/redis.conf',
8
+ :create_dirs => false,
9
+ :permissions => 0644,
10
+ :user => 'root',
11
+ :group => 'root'
12
+ }) { sudo('service redis-server restart') }
13
+ end
14
+
@@ -0,0 +1,417 @@
1
+ # Redis configuration file example
2
+
3
+ # Note on units: when memory size is needed, it is possible to specifiy
4
+ # it in the usual form of 1k 5GB 4M and so forth:
5
+ #
6
+ # 1k => 1000 bytes
7
+ # 1kb => 1024 bytes
8
+ # 1m => 1000000 bytes
9
+ # 1mb => 1024*1024 bytes
10
+ # 1g => 1000000000 bytes
11
+ # 1gb => 1024*1024*1024 bytes
12
+ #
13
+ # units are case insensitive so 1GB 1Gb 1gB are all the same.
14
+
15
+ # By default Redis does not run as a daemon. Use 'yes' if you need it.
16
+ # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
17
+ daemonize yes
18
+
19
+ # When running daemonized, Redis writes a pid file in /var/run/redis.pid by
20
+ # default. You can specify a custom pid file location here.
21
+ pidfile /var/run/redis/redis-server.pid
22
+
23
+ # Accept connections on the specified port, default is 6379.
24
+ # If port 0 is specified Redis will not listen on a TCP socket.
25
+ port 6379
26
+
27
+ # If you want you can bind a single interface, if the bind option is not
28
+ # specified all the interfaces will listen for incoming connections.
29
+ #
30
+ # bind 127.0.0.1
31
+
32
+ # Specify the path for the unix socket that will be used to listen for
33
+ # incoming connections. There is no default, so Redis will not listen
34
+ # on a unix socket when not specified.
35
+ #
36
+ # unixsocket /var/run/redis/redis.sock
37
+
38
+ # Close the connection after a client is idle for N seconds (0 to disable)
39
+ timeout 300
40
+
41
+ # Set server verbosity to 'debug'
42
+ # it can be one of:
43
+ # debug (a lot of information, useful for development/testing)
44
+ # verbose (many rarely useful info, but not a mess like the debug level)
45
+ # notice (moderately verbose, what you want in production probably)
46
+ # warning (only very important / critical messages are logged)
47
+ loglevel notice
48
+
49
+ # Specify the log file name. Also 'stdout' can be used to force
50
+ # Redis to log on the standard output. Note that if you use standard
51
+ # output for logging but daemonize, logs will be sent to /dev/null
52
+ logfile /var/log/redis/redis-server.log
53
+
54
+ # To enable logging to the system logger, just set 'syslog-enabled' to yes,
55
+ # and optionally update the other syslog parameters to suit your needs.
56
+ # syslog-enabled no
57
+
58
+ # Specify the syslog identity.
59
+ # syslog-ident redis
60
+
61
+ # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
62
+ # syslog-facility local0
63
+
64
+ # Set the number of databases. The default database is DB 0, you can select
65
+ # a different one on a per-connection basis using SELECT <dbid> where
66
+ # dbid is a number between 0 and 'databases'-1
67
+ databases 16
68
+
69
+ ################################ SNAPSHOTTING #################################
70
+ #
71
+ # Save the DB on disk:
72
+ #
73
+ # save <seconds> <changes>
74
+ #
75
+ # Will save the DB if both the given number of seconds and the given
76
+ # number of write operations against the DB occurred.
77
+ #
78
+ # In the example below the behaviour will be to save:
79
+ # after 900 sec (15 min) if at least 1 key changed
80
+ # after 300 sec (5 min) if at least 10 keys changed
81
+ # after 60 sec if at least 10000 keys changed
82
+ #
83
+ # Note: you can disable saving at all commenting all the "save" lines.
84
+
85
+ save 900 1
86
+ save 300 10
87
+ save 60 10000
88
+
89
+ # Compress string objects using LZF when dump .rdb databases?
90
+ # For default that's set to 'yes' as it's almost always a win.
91
+ # If you want to save some CPU in the saving child set it to 'no' but
92
+ # the dataset will likely be bigger if you have compressible values or keys.
93
+ rdbcompression yes
94
+
95
+ # The filename where to dump the DB
96
+ dbfilename dump.rdb
97
+
98
+ # The working directory.
99
+ #
100
+ # The DB will be written inside this directory, with the filename specified
101
+ # above using the 'dbfilename' configuration directive.
102
+ #
103
+ # Also the Append Only File will be created inside this directory.
104
+ #
105
+ # Note that you must specify a directory here, not a file name.
106
+ dir /var/lib/redis
107
+
108
+ ################################# REPLICATION #################################
109
+
110
+ # Master-Slave replication. Use slaveof to make a Redis instance a copy of
111
+ # another Redis server. Note that the configuration is local to the slave
112
+ # so for example it is possible to configure the slave to save the DB with a
113
+ # different interval, or to listen to another port, and so on.
114
+ #
115
+ # slaveof <masterip> <masterport>
116
+
117
+ # If the master is password protected (using the "requirepass" configuration
118
+ # directive below) it is possible to tell the slave to authenticate before
119
+ # starting the replication synchronization process, otherwise the master will
120
+ # refuse the slave request.
121
+ #
122
+ # masterauth <master-password>
123
+
124
+ # When a slave lost the connection with the master, or when the replication
125
+ # is still in progress, the slave can act in two different ways:
126
+ #
127
+ # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
128
+ # still reply to client requests, possibly with out of data data, or the
129
+ # data set may just be empty if this is the first synchronization.
130
+ #
131
+ # 2) if slave-serve-stale data is set to 'no' the slave will reply with
132
+ # an error "SYNC with master in progress" to all the kind of commands
133
+ # but to INFO and SLAVEOF.
134
+ #
135
+ slave-serve-stale-data yes
136
+
137
+ ################################## SECURITY ###################################
138
+
139
+ # Require clients to issue AUTH <PASSWORD> before processing any other
140
+ # commands. This might be useful in environments in which you do not trust
141
+ # others with access to the host running redis-server.
142
+ #
143
+ # This should stay commented out for backward compatibility and because most
144
+ # people do not need auth (e.g. they run their own servers).
145
+ #
146
+ # Warning: since Redis is pretty fast an outside user can try up to
147
+ # 150k passwords per second against a good box. This means that you should
148
+ # use a very strong password otherwise it will be very easy to break.
149
+ #
150
+ # requirepass foobared
151
+
152
+ # Command renaming.
153
+ #
154
+ # It is possilbe to change the name of dangerous commands in a shared
155
+ # environment. For instance the CONFIG command may be renamed into something
156
+ # of hard to guess so that it will be still available for internal-use
157
+ # tools but not available for general clients.
158
+ #
159
+ # Example:
160
+ #
161
+ # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
162
+ #
163
+ # It is also possilbe to completely kill a command renaming it into
164
+ # an empty string:
165
+ #
166
+ # rename-command CONFIG ""
167
+
168
+ ################################### LIMITS ####################################
169
+
170
+ # Set the max number of connected clients at the same time. By default there
171
+ # is no limit, and it's up to the number of file descriptors the Redis process
172
+ # is able to open. The special value '0' means no limits.
173
+ # Once the limit is reached Redis will close all the new connections sending
174
+ # an error 'max number of clients reached'.
175
+ #
176
+ # maxclients 128
177
+
178
+ # Don't use more memory than the specified amount of bytes.
179
+ # When the memory limit is reached Redis will try to remove keys with an
180
+ # EXPIRE set. It will try to start freeing keys that are going to expire
181
+ # in little time and preserve keys with a longer time to live.
182
+ # Redis will also try to remove objects from free lists if possible.
183
+ #
184
+ # If all this fails, Redis will start to reply with errors to commands
185
+ # that will use more memory, like SET, LPUSH, and so on, and will continue
186
+ # to reply to most read-only commands like GET.
187
+ #
188
+ # WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
189
+ # 'state' server or cache, not as a real DB. When Redis is used as a real
190
+ # database the memory usage will grow over the weeks, it will be obvious if
191
+ # it is going to use too much memory in the long run, and you'll have the time
192
+ # to upgrade. With maxmemory after the limit is reached you'll start to get
193
+ # errors for write operations, and this may even lead to DB inconsistency.
194
+ #
195
+ # maxmemory <bytes>
196
+
197
+ # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
198
+ # is reached? You can select among five behavior:
199
+ #
200
+ # volatile-lru -> remove the key with an expire set using an LRU algorithm
201
+ # allkeys-lru -> remove any key accordingly to the LRU algorithm
202
+ # volatile-random -> remove a random key with an expire set
203
+ # allkeys->random -> remove a random key, any key
204
+ # volatile-ttl -> remove the key with the nearest expire time (minor TTL)
205
+ # noeviction -> don't expire at all, just return an error on write operations
206
+ #
207
+ # Note: with all the kind of policies, Redis will return an error on write
208
+ # operations, when there are not suitable keys for eviction.
209
+ #
210
+ # At the date of writing this commands are: set setnx setex append
211
+ # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
212
+ # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
213
+ # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
214
+ # getset mset msetnx exec sort
215
+ #
216
+ # The default is:
217
+ #
218
+ # maxmemory-policy volatile-lru
219
+
220
+ # LRU and minimal TTL algorithms are not precise algorithms but approximated
221
+ # algorithms (in order to save memory), so you can select as well the sample
222
+ # size to check. For instance for default Redis will check three keys and
223
+ # pick the one that was used less recently, you can change the sample size
224
+ # using the following configuration directive.
225
+ #
226
+ # maxmemory-samples 3
227
+
228
+ ############################## APPEND ONLY MODE ###############################
229
+
230
+ # By default Redis asynchronously dumps the dataset on disk. If you can live
231
+ # with the idea that the latest records will be lost if something like a crash
232
+ # happens this is the preferred way to run Redis. If instead you care a lot
233
+ # about your data and don't want to that a single record can get lost you should
234
+ # enable the append only mode: when this mode is enabled Redis will append
235
+ # every write operation received in the file appendonly.aof. This file will
236
+ # be read on startup in order to rebuild the full dataset in memory.
237
+ #
238
+ # Note that you can have both the async dumps and the append only file if you
239
+ # like (you have to comment the "save" statements above to disable the dumps).
240
+ # Still if append only mode is enabled Redis will load the data from the
241
+ # log file at startup ignoring the dump.rdb file.
242
+ #
243
+ # IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
244
+ # log file in background when it gets too big.
245
+
246
+ appendonly no
247
+
248
+ # The name of the append only file (default: "appendonly.aof")
249
+ # appendfilename appendonly.aof
250
+
251
+ # The fsync() call tells the Operating System to actually write data on disk
252
+ # instead to wait for more data in the output buffer. Some OS will really flush
253
+ # data on disk, some other OS will just try to do it ASAP.
254
+ #
255
+ # Redis supports three different modes:
256
+ #
257
+ # no: don't fsync, just let the OS flush the data when it wants. Faster.
258
+ # always: fsync after every write to the append only log . Slow, Safest.
259
+ # everysec: fsync only if one second passed since the last fsync. Compromise.
260
+ #
261
+ # The default is "everysec" that's usually the right compromise between
262
+ # speed and data safety. It's up to you to understand if you can relax this to
263
+ # "no" that will will let the operating system flush the output buffer when
264
+ # it wants, for better performances (but if you can live with the idea of
265
+ # some data loss consider the default persistence mode that's snapshotting),
266
+ # or on the contrary, use "always" that's very slow but a bit safer than
267
+ # everysec.
268
+ #
269
+ # If unsure, use "everysec".
270
+
271
+ # appendfsync always
272
+ appendfsync everysec
273
+ # appendfsync no
274
+
275
+ # When the AOF fsync policy is set to always or everysec, and a background
276
+ # saving process (a background save or AOF log background rewriting) is
277
+ # performing a lot of I/O against the disk, in some Linux configurations
278
+ # Redis may block too long on the fsync() call. Note that there is no fix for
279
+ # this currently, as even performing fsync in a different thread will block
280
+ # our synchronous write(2) call.
281
+ #
282
+ # In order to mitigate this problem it's possible to use the following option
283
+ # that will prevent fsync() from being called in the main process while a
284
+ # BGSAVE or BGREWRITEAOF is in progress.
285
+ #
286
+ # This means that while another child is saving the durability of Redis is
287
+ # the same as "appendfsync none", that in pratical terms means that it is
288
+ # possible to lost up to 30 seconds of log in the worst scenario (with the
289
+ # default Linux settings).
290
+ #
291
+ # If you have latency problems turn this to "yes". Otherwise leave it as
292
+ # "no" that is the safest pick from the point of view of durability.
293
+ no-appendfsync-on-rewrite no
294
+
295
+ ################################ VIRTUAL MEMORY ###############################
296
+
297
+ # Virtual Memory allows Redis to work with datasets bigger than the actual
298
+ # amount of RAM needed to hold the whole dataset in memory.
299
+ # In order to do so very used keys are taken in memory while the other keys
300
+ # are swapped into a swap file, similarly to what operating systems do
301
+ # with memory pages.
302
+ #
303
+ # To enable VM just set 'vm-enabled' to yes, and set the following three
304
+ # VM parameters accordingly to your needs.
305
+
306
+ vm-enabled no
307
+ # vm-enabled yes
308
+
309
+ # This is the path of the Redis swap file. As you can guess, swap files
310
+ # can't be shared by different Redis instances, so make sure to use a swap
311
+ # file for every redis process you are running. Redis will complain if the
312
+ # swap file is already in use.
313
+ #
314
+ # The best kind of storage for the Redis swap file (that's accessed at random)
315
+ # is a Solid State Disk (SSD).
316
+ #
317
+ # *** WARNING *** if you are using a shared hosting the default of putting
318
+ # the swap file under /tmp is not secure. Create a dir with access granted
319
+ # only to Redis user and configure Redis to create the swap file there.
320
+ vm-swap-file /var/lib/redis/redis.swap
321
+
322
+ # vm-max-memory configures the VM to use at max the specified amount of
323
+ # RAM. Everything that deos not fit will be swapped on disk *if* possible, that
324
+ # is, if there is still enough contiguous space in the swap file.
325
+ #
326
+ # With vm-max-memory 0 the system will swap everything it can. Not a good
327
+ # default, just specify the max amount of RAM you can in bytes, but it's
328
+ # better to leave some margin. For instance specify an amount of RAM
329
+ # that's more or less between 60 and 80% of your free RAM.
330
+ vm-max-memory 0
331
+
332
+ # Redis swap files is split into pages. An object can be saved using multiple
333
+ # contiguous pages, but pages can't be shared between different objects.
334
+ # So if your page is too big, small objects swapped out on disk will waste
335
+ # a lot of space. If you page is too small, there is less space in the swap
336
+ # file (assuming you configured the same number of total swap file pages).
337
+ #
338
+ # If you use a lot of small objects, use a page size of 64 or 32 bytes.
339
+ # If you use a lot of big objects, use a bigger page size.
340
+ # If unsure, use the default :)
341
+ vm-page-size 32
342
+
343
+ # Number of total memory pages in the swap file.
344
+ # Given that the page table (a bitmap of free/used pages) is taken in memory,
345
+ # every 8 pages on disk will consume 1 byte of RAM.
346
+ #
347
+ # The total swap size is vm-page-size * vm-pages
348
+ #
349
+ # With the default of 32-bytes memory pages and 134217728 pages Redis will
350
+ # use a 4 GB swap file, that will use 16 MB of RAM for the page table.
351
+ #
352
+ # It's better to use the smallest acceptable value for your application,
353
+ # but the default is large in order to work in most conditions.
354
+ vm-pages 134217728
355
+
356
+ # Max number of VM I/O threads running at the same time.
357
+ # This threads are used to read/write data from/to swap file, since they
358
+ # also encode and decode objects from disk to memory or the reverse, a bigger
359
+ # number of threads can help with big objects even if they can't help with
360
+ # I/O itself as the physical device may not be able to couple with many
361
+ # reads/writes operations at the same time.
362
+ #
363
+ # The special value of 0 turn off threaded I/O and enables the blocking
364
+ # Virtual Memory implementation.
365
+ vm-max-threads 4
366
+
367
+ ############################### ADVANCED CONFIG ###############################
368
+
369
+ # Hashes are encoded in a special way (much more memory efficient) when they
370
+ # have at max a given numer of elements, and the biggest element does not
371
+ # exceed a given threshold. You can configure this limits with the following
372
+ # configuration directives.
373
+ hash-max-zipmap-entries 512
374
+ hash-max-zipmap-value 64
375
+
376
+ # Similarly to hashes, small lists are also encoded in a special way in order
377
+ # to save a lot of space. The special representation is only used when
378
+ # you are under the following limits:
379
+ list-max-ziplist-entries 512
380
+ list-max-ziplist-value 64
381
+
382
+ # Sets have a special encoding in just one case: when a set is composed
383
+ # of just strings that happens to be integers in radix 10 in the range
384
+ # of 64 bit signed integers.
385
+ # The following configuration setting sets the limit in the size of the
386
+ # set in order to use this special memory saving encoding.
387
+ set-max-intset-entries 512
388
+
389
+ # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
390
+ # order to help rehashing the main Redis hash table (the one mapping top-level
391
+ # keys to values). The hash table implementation redis uses (see dict.c)
392
+ # performs a lazy rehashing: the more operation you run into an hash table
393
+ # that is rhashing, the more rehashing "steps" are performed, so if the
394
+ # server is idle the rehashing is never complete and some more memory is used
395
+ # by the hash table.
396
+ #
397
+ # The default is to use this millisecond 10 times every second in order to
398
+ # active rehashing the main dictionaries, freeing memory when possible.
399
+ #
400
+ # If unsure:
401
+ # use "activerehashing no" if you have hard latency requirements and it is
402
+ # not a good thing in your environment that Redis can reply form time to time
403
+ # to queries with 2 milliseconds delay.
404
+ #
405
+ # use "activerehashing yes" if you don't have such hard requirements but
406
+ # want to free memory asap when possible.
407
+ activerehashing yes
408
+
409
+ ################################## INCLUDES ###################################
410
+
411
+ # Include one or more other config files here. This is useful if you
412
+ # have a standard template that goes to all redis server but also need
413
+ # to customize a few per-server settings. Include files can include
414
+ # other files, so use this wisely.
415
+ #
416
+ # include /path/to/local.conf
417
+ # include /path/to/other.conf
@@ -1,3 +1,3 @@
1
1
  module Pvcglue
2
- VERSION = "0.1.19"
2
+ VERSION = "0.1.20"
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: pvcglue
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.19
4
+ version: 0.1.20
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrew Lyric
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2015-05-06 00:00:00.000000000 Z
11
+ date: 2015-05-08 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: bundler
@@ -243,6 +243,7 @@ files:
243
243
  - lib/pvcglue/packages/role_db.rb
244
244
  - lib/pvcglue/packages/role_lb.rb
245
245
  - lib/pvcglue/packages/role_memcached.rb
246
+ - lib/pvcglue/packages/role_redis.rb
246
247
  - lib/pvcglue/packages/role_web.rb
247
248
  - lib/pvcglue/packages/rvm.rb
248
249
  - lib/pvcglue/packages/timezone.rb
@@ -264,6 +265,7 @@ files:
264
265
  - lib/pvcglue/templates/passenger.list.erb
265
266
  - lib/pvcglue/templates/pg_hba.conf.erb
266
267
  - lib/pvcglue/templates/postgresql.conf.erb
268
+ - lib/pvcglue/templates/redis.conf.erb
267
269
  - lib/pvcglue/templates/sshd_config.erb
268
270
  - lib/pvcglue/templates/stage-deploy.rb.erb
269
271
  - lib/pvcglue/templates/timezone.erb