mobilize-ssh 1.0.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,33 @@
1
+ class Net::SSH::Gateway
2
+ def self.run(gname,guser,name,user,command,gopts={},opts={})
3
+ gateway = self.new(gname,guser,gopts)
4
+ gateway.ssh(name,user,opts) do |ssh|
5
+ stderr,stdout = ["",""]
6
+ ssh.exec!(command) do |ch, stream, data|
7
+ if stream == :stderr
8
+ stderr += data
9
+ else
10
+ stdout += data
11
+ end
12
+ end
13
+ raise stderr if stderr.length>0
14
+ return stdout
15
+ end
16
+ end
17
+ def self.sync(gname,guser,name,user,from_path,to_path,gopts={},opts={})
18
+ gateway = self.new(gname,guser,gopts)
19
+ gateway.scp(name,user,opts) do |scp|
20
+ scp.upload!(from_path,to_path,:recursive=>true)
21
+ end
22
+ return true
23
+ end
24
+ #allow scp through gateway
25
+ def scp(name, user, opts={}, &block)
26
+ local_port = open(name, opts[:port] || 22)
27
+ begin
28
+ Net::SCP.start("127.0.0.1", user, opts.merge(:port => local_port), &block)
29
+ ensure
30
+ close(local_port) if block || $!
31
+ end
32
+ end
33
+ end
@@ -0,0 +1,13 @@
1
+ class Socket
2
+ def Socket.official_hostname
3
+ begin
4
+ Socket.gethostbyname(Socket.gethostname).first
5
+ rescue
6
+ Socket.gethostname
7
+ end
8
+ end
9
+
10
+ def Socket.domain_name
11
+ Socket.official_hostname.split(".")[-2..-1].join(".")
12
+ end
13
+ end
@@ -0,0 +1,5 @@
1
+ class String
2
+ def to_md5
3
+ Digest::MD5.hexdigest(self)
4
+ end
5
+ end
@@ -0,0 +1,151 @@
1
+ module Mobilize
2
+ module Ssh
3
+ def Ssh.config
4
+ Base.config('ssh')
5
+ end
6
+
7
+ def Ssh.tmp_file_dir
8
+ Ssh.config['tmp_file_dir']
9
+ end
10
+
11
+ def Ssh.host(node)
12
+ Ssh.config['nodes'][node]['host']
13
+ end
14
+
15
+ def Ssh.gateway(node)
16
+ Ssh.config['nodes'][node]['gateway']
17
+ end
18
+
19
+ #determine if current machine is on host domain, needs gateway if one is provided and it is not
20
+ def Ssh.needs_gateway?(node)
21
+ host_domain_name = Ssh.host(node)['name'].split(".")[-2..-1].join(".")
22
+ return true if Ssh.gateway(node) and Socket.domain_name != host_domain_name
23
+ end
24
+
25
+ def Ssh.pop_comm_dir(comm_dir,file_hash)
26
+ "rm -rf #{comm_dir}".bash
27
+ file_hash.each do |fname,fdata|
28
+ fpath = "#{comm_dir}/#{fname}"
29
+ #for now, only gz is binary
30
+ binary = fname.ends_with?(".gz") ? true : false
31
+ #read data from cache, put it in a tmp_file
32
+ Ssh.tmp_file(fdata,binary,fpath)
33
+ end
34
+ return true if file_hash.keys.length>0
35
+ end
36
+
37
+ def Ssh.scp(node,from_path,to_path)
38
+ name,key,port,user = Ssh.host(node).ie{|h| ['name','key','port','user'].map{|k| h[k]}}
39
+ key_path = "#{Base.root}/#{key}"
40
+ opts = {:port=>(port || 22),:keys=>key_path}
41
+ if Ssh.needs_gateway?(node)
42
+ gname,gkey,gport,guser = Ssh.gateway(node).ie{|h| ['name','key','port','user'].map{|k| h[k]}}
43
+ gkey_path = "#{Base.root}/#{gkey}"
44
+ gopts = {:port=>(gport || 22),:keys=>gkey_path}
45
+ return Net::SSH::Gateway.sync(gname,guser,name,user,from_path,to_path,gopts,opts)
46
+ else
47
+ Net::SCP.start(name,user,opts) do |scp|
48
+ scp.upload!(from_path,to_path,:recursive=>true)
49
+ end
50
+ end
51
+ return true
52
+ end
53
+
54
+ def Ssh.run(node,command,file_hash=nil,su_user=nil)
55
+ name,key,port,user = Ssh.host(node).ie{|h| ['name','key','port','user'].map{|k| h[k]}}
56
+ key_path = "#{Base.root}/#{key}"
57
+ opts = {:port=>(port || 22),:keys=>key_path}
58
+ su_user ||= user
59
+ file_hash ||= {}
60
+ #make sure the dir for this command is clear
61
+ comm_md5 = [su_user,node,command,file_hash.keys.to_s].join.to_md5
62
+ comm_dir = "#{Ssh.tmp_file_dir}#{comm_md5}"
63
+ #populate comm dir with any files
64
+ Ssh.pop_comm_dir(comm_dir,file_hash)
65
+ #move any files up to the node
66
+ rem_dir = nil
67
+ if File.exists?(comm_dir)
68
+ #make sure user starts in rem_dir
69
+ rem_dir = "#{comm_md5}/"
70
+ command = ["cd #{rem_dir}",command].join(";")
71
+ Ssh.scp(node,comm_dir,rem_dir)
72
+ "rm -rf #{comm_dir}".bash
73
+ if su_user
74
+ chown_command = "sudo chown -R #{su_user} #{rem_dir}"
75
+ Ssh.run(node,chown_command)
76
+ end
77
+ end
78
+ if su_user != user
79
+ #wrap the command in sudo su -c
80
+ command = %{sudo su #{su_user} -c "#{command}"}
81
+ end
82
+ result = nil
83
+ #one with gateway, one without
84
+ if Ssh.needs_gateway?(node)
85
+ gname,gkey,gport,guser = Ssh.gateway(node).ie{|h| ['name','key','port','user'].map{|k| h[k]}}
86
+ gkey_path = "#{Base.root}/#{gkey}"
87
+ gopts = {:port=>(gport || 22),:keys=>gkey_path}
88
+ result = Net::SSH::Gateway.run(gname,guser,name,user,command,gopts,opts)
89
+ else
90
+ Net::SSH.start(name,user,opts) do |ssh|
91
+ result = ssh.run(command)
92
+ end
93
+ end
94
+ #delete remote dir if necessary
95
+ if rem_dir
96
+ del_cmd = "rm -rf #{rem_dir}"
97
+ if su_user
98
+ del_cmd = %{sudo su #{su_user} -c "#{del_cmd}"}
99
+ end
100
+ Ssh.run(node,del_cmd)
101
+ end
102
+ result
103
+ end
104
+
105
+ def Ssh.read(node,path)
106
+ Ssh.run(node,"cat #{path}")
107
+ end
108
+
109
+ def Ssh.write(node,fdata,to_path,binary=false)
110
+ from_path = Ssh.tmp_file(fdata,binary)
111
+ Ssh.scp(node,from_path,to_path)
112
+ "rm #{from_path}".bash
113
+ return true
114
+ end
115
+
116
+ def Ssh.tmp_file(fdata,binary=false,fpath=nil)
117
+ #creates a file under tmp/files with an md5 from the data
118
+ tmp_file_path = fpath || "#{Ssh.tmp_file_dir}#{(fdata + Time.now.utc.to_f.to_s).to_md5}"
119
+ write_mode = binary ? "wb" : "w"
120
+ #make sure folder is created
121
+ "mkdir -p #{tmp_file_path.split("/")[0..-2].join("/")}".bash
122
+ #write data to path
123
+ File.open(tmp_file_path,write_mode) {|f| f.print(fdata)}
124
+ return tmp_file_path
125
+ end
126
+
127
+ def Ssh.get_file_hash(gsheet_paths,gdrive_slot)
128
+ file_hash = {}
129
+ gsheet_paths.map do |gpath|
130
+ string = Gsheet.find_by_path(gpath,gdrive_slot).to_tsv
131
+ fname = gpath.split("/").last
132
+ {fname => string}
133
+ end.each do |f|
134
+ file_hash = f.merge(file_hash)
135
+ end
136
+ file_hash
137
+ end
138
+
139
+ def Ssh.run_by_task_path(task_path)
140
+ t = Task.where(:path=>task_path).first
141
+ params = t.params
142
+ node, command = [params[0],params[1]]
143
+ file_hash = if params[2]
144
+ gsheet_paths = params[2..-1] if params[2]
145
+ gdrive_slot = Gdrive.slot_worker_by_path(task_path)
146
+ Ssh.get_file_hash(gsheet_paths,gdrive_slot)
147
+ end
148
+ Ssh.run(node,command,file_hash)
149
+ end
150
+ end
151
+ end
@@ -0,0 +1,25 @@
1
+ namespace :mobilize_ssh do
2
+ desc "Set up config and log folders and files"
3
+ task :setup do
4
+ sample_dir = File.dirname(__FILE__) + '/../samples/'
5
+ sample_files = Dir.entries(sample_dir)
6
+ config_dir = (ENV['MOBILIZE_CONFIG_DIR'] ||= "config/mobilize/")
7
+ log_dir = (ENV['MOBILIZE_LOG_DIR'] ||= "log/")
8
+ full_config_dir = "#{ENV['PWD']}/#{config_dir}"
9
+ full_log_dir = "#{ENV['PWD']}/#{log_dir}"
10
+ unless File.exists?(full_config_dir)
11
+ puts "creating #{config_dir}"
12
+ `mkdir -p #{full_config_dir}`
13
+ end
14
+ unless File.exists?(full_log_dir)
15
+ puts "creating #{log_dir}"
16
+ `mkdir -p #{full_log_dir}`
17
+ end
18
+ sample_files.each do |fname|
19
+ unless File.exists?("#{full_config_dir}#{fname}")
20
+ puts "creating #{config_dir}#{fname}"
21
+ `cp #{sample_dir}#{fname} #{full_config_dir}#{fname}`
22
+ end
23
+ end
24
+ end
25
+ end
@@ -0,0 +1,5 @@
1
+ module Mobilize
2
+ module Ssh
3
+ VERSION = "1.0.1"
4
+ end
5
+ end
@@ -0,0 +1,18 @@
1
+ development:
2
+ tmp_file_dir: "tmp/file/"
3
+ nodes:
4
+ dev_node:
5
+ host: {name: dev-host.com, key: "config/mobilize/ssh_private.key", port: 22, user: host_user}
6
+ gateway: {name: dev-gateway.com, key: "config/mobilize/ssh_private.key", port: 22, user: gateway_user}
7
+ test:
8
+ tmp_file_dir: "tmp/file/"
9
+ nodes:
10
+ test_node:
11
+ host: {name: test-host.com, key: "config/mobilize/ssh_private.key", port: 22, user: host_user}
12
+ gateway: {name: test-gateway.com, key: "config/mobilize/ssh_private.key", port: 22, user: gateway_user}
13
+ production:
14
+ tmp_file_dir: "tmp/file/"
15
+ nodes:
16
+ prod_node:
17
+ host: {name: prod-host.com, key: "config/mobilize/ssh_private.key", port: 22, user: host_user}
18
+ gateway: {name: prod-gateway.com, key: "config/mobilize/ssh_private.key", port: 22, user: gateway_user}
@@ -0,0 +1,24 @@
1
+ # -*- encoding: utf-8 -*-
2
+ lib = File.expand_path('../lib', __FILE__)
3
+ $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
4
+ require 'mobilize-ssh/version'
5
+
6
+ Gem::Specification.new do |gem|
7
+ gem.name = "mobilize-ssh"
8
+ gem.version = Mobilize::Ssh::VERSION
9
+ gem.authors = ["Cassio Paes-Leme"]
10
+ gem.email = ["cpaesleme@ngmoco.com"]
11
+ gem.description = %q{mobilize-ssh allows you to automate ssh commands and files across hosts}
12
+ gem.summary = %q{extend mobilize-base with the ability to run files across hosts}
13
+ gem.homepage = ""
14
+
15
+ gem.files = `git ls-files`.split($/)
16
+ gem.executables = gem.files.grep(%r{^bin/}).map{ |f| File.basename(f) }
17
+ gem.test_files = gem.files.grep(%r{^(test|spec|features)/})
18
+ gem.require_paths = ["lib"]
19
+ gem.add_runtime_dependency "mobilize-base","1.0.4"
20
+ gem.add_runtime_dependency "net-ssh"
21
+ gem.add_runtime_dependency "net-scp"
22
+ gem.add_runtime_dependency "net-ssh-gateway"
23
+
24
+ end
data/test/code.rb ADDED
@@ -0,0 +1,2 @@
1
+ command = File.open("code.sh").read
2
+ puts `#{command}`
data/test/code.sh ADDED
@@ -0,0 +1 @@
1
+ tail /var/log/syslog
@@ -0,0 +1,48 @@
1
+ require 'test_helper'
2
+
3
+ describe "Mobilize" do
4
+
5
+ def before
6
+ puts 'nothing before'
7
+ end
8
+
9
+ # enqueues 4 workers on Resque
10
+ it "runs integration test" do
11
+
12
+ puts "restart workers"
13
+ Mobilize::Jobtracker.restart_workers!
14
+
15
+ gdrive_slot = Mobilize::Gdrive.owner_email
16
+ puts "create user 'mobilize'"
17
+ user_name = gdrive_slot.split("@").first
18
+ u = Mobilize::User.where(:name=>user_name).first
19
+ r = u.runner
20
+
21
+ puts "add test code"
22
+ rb_code_sheet = Mobilize::Gsheet.find_or_create_by_path("#{r.path.split("/")[0..-2].join("/")}/code.rb",gdrive_slot)
23
+ rb_code_tsv = File.open("#{Mobilize::Base.root}/test/code.rb").read
24
+ rb_code_sheet.write(rb_code_tsv)
25
+
26
+ sh_code_sheet = Mobilize::Gsheet.find_or_create_by_path("#{r.path.split("/")[0..-2].join("/")}/code.sh",gdrive_slot)
27
+ sh_code_tsv = File.open("#{Mobilize::Base.root}/test/code.sh").read
28
+ sh_code_sheet.write(sh_code_tsv)
29
+
30
+ jobs_sheet = r.gsheet(gdrive_slot)
31
+
32
+ ssh_job_rows = ::YAML.load_file("#{Mobilize::Base.root}/test/ssh_job_rows.yml")
33
+ jobs_sheet.add_or_update_rows(ssh_job_rows)
34
+
35
+ puts "job row added, force enqueued runner, wait 90s"
36
+ r.enqueue!
37
+ sleep 90
38
+
39
+ puts "update job status and activity"
40
+ r.update_gsheet(gdrive_slot)
41
+
42
+ puts "jobtracker posted data to test sheet"
43
+ ssh_target_sheet = Mobilize::Gsheet.find_by_path("#{r.path.split("/")[0..-2].join("/")}/test_ssh.out",gdrive_slot)
44
+
45
+ assert ssh_target_sheet.to_tsv.length > 100
46
+ end
47
+
48
+ end
@@ -0,0 +1,540 @@
1
+ # Redis configuration file example
2
+
3
+ # Note on units: when memory size is needed, it is possible to specify
4
+ # it in the usual form of 1k 5GB 4M and so forth:
5
+ #
6
+ # 1k => 1000 bytes
7
+ # 1kb => 1024 bytes
8
+ # 1m => 1000000 bytes
9
+ # 1mb => 1024*1024 bytes
10
+ # 1g => 1000000000 bytes
11
+ # 1gb => 1024*1024*1024 bytes
12
+ #
13
+ # units are case insensitive so 1GB 1Gb 1gB are all the same.
14
+
15
+ # By default Redis does not run as a daemon. Use 'yes' if you need it.
16
+ # Note that Redis will write a pid file in /usr/local/var/run/redis.pid when daemonized.
17
+ daemonize yes
18
+
19
+ # When running daemonized, Redis writes a pid file in /usr/local/var/run/redis.pid by
20
+ # default. You can specify a custom pid file location here.
21
+ pidfile /usr/local/var/run/redis.pid
22
+
23
+ # Accept connections on the specified port, default is 6379.
24
+ # If port 0 is specified Redis will not listen on a TCP socket.
25
+ port 9736
26
+
27
+ # If you want you can bind a single interface, if the bind option is not
28
+ # specified all the interfaces will listen for incoming connections.
29
+ #
30
+ bind 127.0.0.1
31
+
32
+ # Specify the path for the unix socket that will be used to listen for
33
+ # incoming connections. There is no default, so Redis will not listen
34
+ # on a unix socket when not specified.
35
+ #
36
+ # unixsocket /tmp/redis.sock
37
+ # unixsocketperm 755
38
+
39
+ # Close the connection after a client is idle for N seconds (0 to disable)
40
+ timeout 0
41
+
42
+ # Set server verbosity to 'debug'
43
+ # it can be one of:
44
+ # debug (a lot of information, useful for development/testing)
45
+ # verbose (many rarely useful info, but not a mess like the debug level)
46
+ # notice (moderately verbose, what you want in production probably)
47
+ # warning (only very important / critical messages are logged)
48
+ loglevel notice
49
+
50
+ # Specify the log file name. Also 'stdout' can be used to force
51
+ # Redis to log on the standard output. Note that if you use standard
52
+ # output for logging but daemonize, logs will be sent to /dev/null
53
+ logfile stdout
54
+
55
+ # To enable logging to the system logger, just set 'syslog-enabled' to yes,
56
+ # and optionally update the other syslog parameters to suit your needs.
57
+ # syslog-enabled no
58
+
59
+ # Specify the syslog identity.
60
+ # syslog-ident redis
61
+
62
+ # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
63
+ # syslog-facility local0
64
+
65
+ # Set the number of databases. The default database is DB 0, you can select
66
+ # a different one on a per-connection basis using SELECT <dbid> where
67
+ # dbid is a number between 0 and 'databases'-1
68
+ databases 16
69
+
70
+ ################################ SNAPSHOTTING #################################
71
+ #
72
+ # Save the DB on disk:
73
+ #
74
+ # save <seconds> <changes>
75
+ #
76
+ # Will save the DB if both the given number of seconds and the given
77
+ # number of write operations against the DB occurred.
78
+ #
79
+ # In the example below the behaviour will be to save:
80
+ # after 900 sec (15 min) if at least 1 key changed
81
+ # after 300 sec (5 min) if at least 10 keys changed
82
+ # after 60 sec if at least 10000 keys changed
83
+ #
84
+ # Note: you can disable saving at all commenting all the "save" lines.
85
+ #
86
+ # It is also possible to remove all the previously configured save
87
+ # points by adding a save directive with a single empty string argument
88
+ # like in the following example:
89
+ #
90
+ # save ""
91
+
92
+ save 900 1
93
+ save 300 10
94
+ save 60 10000
95
+
96
+ # By default Redis will stop accepting writes if RDB snapshots are enabled
97
+ # (at least one save point) and the latest background save failed.
98
+ # This will make the user aware (in an hard way) that data is not persisting
99
+ # on disk properly, otherwise chances are that no one will notice and some
100
+ # distater will happen.
101
+ #
102
+ # If the background saving process will start working again Redis will
103
+ # automatically allow writes again.
104
+ #
105
+ # However if you have setup your proper monitoring of the Redis server
106
+ # and persistence, you may want to disable this feature so that Redis will
107
+ # continue to work as usually even if there are problems with disk,
108
+ # permissions, and so forth.
109
+ stop-writes-on-bgsave-error yes
110
+
111
+ # Compress string objects using LZF when dump .rdb databases?
112
+ # For default that's set to 'yes' as it's almost always a win.
113
+ # If you want to save some CPU in the saving child set it to 'no' but
114
+ # the dataset will likely be bigger if you have compressible values or keys.
115
+ rdbcompression yes
116
+
117
+ # Since verison 5 of RDB a CRC64 checksum is placed at the end of the file.
118
+ # This makes the format more resistant to corruption but there is a performance
119
+ # hit to pay (around 10%) when saving and loading RDB files, so you can disable it
120
+ # for maximum performances.
121
+ #
122
+ # RDB files created with checksum disabled have a checksum of zero that will
123
+ # tell the loading code to skip the check.
124
+ rdbchecksum yes
125
+
126
+ # The filename where to dump the DB
127
+ dbfilename dump.rdb
128
+
129
+ # The working directory.
130
+ #
131
+ # The DB will be written inside this directory, with the filename specified
132
+ # above using the 'dbfilename' configuration directive.
133
+ #
134
+ # Also the Append Only File will be created inside this directory.
135
+ #
136
+ # Note that you must specify a directory here, not a file name.
137
+ dir ./test/
138
+
139
+ ################################# REPLICATION #################################
140
+
141
+ # Master-Slave replication. Use slaveof to make a Redis instance a copy of
142
+ # another Redis server. Note that the configuration is local to the slave
143
+ # so for example it is possible to configure the slave to save the DB with a
144
+ # different interval, or to listen to another port, and so on.
145
+ #
146
+ # slaveof <masterip> <masterport>
147
+
148
+ # If the master is password protected (using the "requirepass" configuration
149
+ # directive below) it is possible to tell the slave to authenticate before
150
+ # starting the replication synchronization process, otherwise the master will
151
+ # refuse the slave request.
152
+ #
153
+ # masterauth <master-password>
154
+
155
+ # When a slave lost the connection with the master, or when the replication
156
+ # is still in progress, the slave can act in two different ways:
157
+ #
158
+ # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
159
+ # still reply to client requests, possibly with out of date data, or the
160
+ # data set may just be empty if this is the first synchronization.
161
+ #
162
+ # 2) if slave-serve-stale data is set to 'no' the slave will reply with
163
+ # an error "SYNC with master in progress" to all the kind of commands
164
+ # but to INFO and SLAVEOF.
165
+ #
166
+ slave-serve-stale-data yes
167
+
168
+ # You can configure a slave instance to accept writes or not. Writing against
169
+ # a slave instance may be useful to store some ephemeral data (because data
170
+ # written on a slave will be easily deleted after resync with the master) but
171
+ # may also cause problems if clients are writing to it because of a
172
+ # misconfiguration.
173
+ #
174
+ # Since Redis 2.6 by default slaves are read-only.
175
+ #
176
+ # Note: read only slaves are not designed to be exposed to untrusted clients
177
+ # on the internet. It's just a protection layer against misuse of the instance.
178
+ # Still a read only slave exports by default all the administrative commands
179
+ # such as CONFIG, DEBUG, and so forth. To a limited extend you can improve
180
+ # security of read only slaves using 'rename-command' to shadow all the
181
+ # administrative / dangerous commands.
182
+ slave-read-only yes
183
+
184
+ # Slaves send PINGs to server in a predefined interval. It's possible to change
185
+ # this interval with the repl_ping_slave_period option. The default value is 10
186
+ # seconds.
187
+ #
188
+ # repl-ping-slave-period 10
189
+
190
+ # The following option sets a timeout for both Bulk transfer I/O timeout and
191
+ # master data or ping response timeout. The default value is 60 seconds.
192
+ #
193
+ # It is important to make sure that this value is greater than the value
194
+ # specified for repl-ping-slave-period otherwise a timeout will be detected
195
+ # every time there is low traffic between the master and the slave.
196
+ #
197
+ # repl-timeout 60
198
+
199
+ # The slave priority is an integer number published by Redis in the INFO output.
200
+ # It is used by Redis Sentinel in order to select a slave to promote into a
201
+ # master if the master is no longer working correctly.
202
+ #
203
+ # A slave with a low priority number is considered better for promotion, so
204
+ # for instance if there are three slaves with priority 10, 100, 25 Sentinel will
205
+ # pick the one wtih priority 10, that is the lowest.
206
+ #
207
+ # However a special priority of 0 marks the slave as not able to perform the
208
+ # role of master, so a slave with priority of 0 will never be selected by
209
+ # Redis Sentinel for promotion.
210
+ #
211
+ # By default the priority is 100.
212
+ slave-priority 100
213
+
214
+ ################################## SECURITY ###################################
215
+
216
+ # Require clients to issue AUTH <PASSWORD> before processing any other
217
+ # commands. This might be useful in environments in which you do not trust
218
+ # others with access to the host running redis-server.
219
+ #
220
+ # This should stay commented out for backward compatibility and because most
221
+ # people do not need auth (e.g. they run their own servers).
222
+ #
223
+ # Warning: since Redis is pretty fast an outside user can try up to
224
+ # 150k passwords per second against a good box. This means that you should
225
+ # use a very strong password otherwise it will be very easy to break.
226
+ #
227
+ # requirepass foobared
228
+
229
+ # Command renaming.
230
+ #
231
+ # It is possible to change the name of dangerous commands in a shared
232
+ # environment. For instance the CONFIG command may be renamed into something
233
+ # of hard to guess so that it will be still available for internal-use
234
+ # tools but not available for general clients.
235
+ #
236
+ # Example:
237
+ #
238
+ # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
239
+ #
240
+ # It is also possible to completely kill a command renaming it into
241
+ # an empty string:
242
+ #
243
+ # rename-command CONFIG ""
244
+
245
+ ################################### LIMITS ####################################
246
+
247
+ # Set the max number of connected clients at the same time. By default
248
+ # this limit is set to 10000 clients, however if the Redis server is not
249
+ # able ot configure the process file limit to allow for the specified limit
250
+ # the max number of allowed clients is set to the current file limit
251
+ # minus 32 (as Redis reserves a few file descriptors for internal uses).
252
+ #
253
+ # Once the limit is reached Redis will close all the new connections sending
254
+ # an error 'max number of clients reached'.
255
+ #
256
+ # maxclients 10000
257
+
258
+ # Don't use more memory than the specified amount of bytes.
259
+ # When the memory limit is reached Redis will try to remove keys
260
+ # accordingly to the eviction policy selected (see maxmemmory-policy).
261
+ #
262
+ # If Redis can't remove keys according to the policy, or if the policy is
263
+ # set to 'noeviction', Redis will start to reply with errors to commands
264
+ # that would use more memory, like SET, LPUSH, and so on, and will continue
265
+ # to reply to read-only commands like GET.
266
+ #
267
+ # This option is usually useful when using Redis as an LRU cache, or to set
268
+ # an hard memory limit for an instance (using the 'noeviction' policy).
269
+ #
270
+ # WARNING: If you have slaves attached to an instance with maxmemory on,
271
+ # the size of the output buffers needed to feed the slaves are subtracted
272
+ # from the used memory count, so that network problems / resyncs will
273
+ # not trigger a loop where keys are evicted, and in turn the output
274
+ # buffer of slaves is full with DELs of keys evicted triggering the deletion
275
+ # of more keys, and so forth until the database is completely emptied.
276
+ #
277
+ # In short... if you have slaves attached it is suggested that you set a lower
278
+ # limit for maxmemory so that there is some free RAM on the system for slave
279
+ # output buffers (but this is not needed if the policy is 'noeviction').
280
+ #
281
+ # maxmemory <bytes>
282
+
283
+ # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
284
+ # is reached? You can select among five behavior:
285
+ #
286
+ # volatile-lru -> remove the key with an expire set using an LRU algorithm
287
+ # allkeys-lru -> remove any key accordingly to the LRU algorithm
288
+ # volatile-random -> remove a random key with an expire set
289
+ # allkeys-random -> remove a random key, any key
290
+ # volatile-ttl -> remove the key with the nearest expire time (minor TTL)
291
+ # noeviction -> don't expire at all, just return an error on write operations
292
+ #
293
+ # Note: with all the kind of policies, Redis will return an error on write
294
+ # operations, when there are not suitable keys for eviction.
295
+ #
296
+ # At the date of writing this commands are: set setnx setex append
297
+ # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
298
+ # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
299
+ # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
300
+ # getset mset msetnx exec sort
301
+ #
302
+ # The default is:
303
+ #
304
+ # maxmemory-policy volatile-lru
305
+
306
+ # LRU and minimal TTL algorithms are not precise algorithms but approximated
307
+ # algorithms (in order to save memory), so you can select as well the sample
308
+ # size to check. For instance for default Redis will check three keys and
309
+ # pick the one that was used less recently, you can change the sample size
310
+ # using the following configuration directive.
311
+ #
312
+ # maxmemory-samples 3
313
+
314
+ ############################## APPEND ONLY MODE ###############################
315
+
316
+ # By default Redis asynchronously dumps the dataset on disk. This mode is
317
+ # good enough in many applications, but an issue with the Redis process or
318
+ # a power outage may result into a few minutes of writes lost (depending on
319
+ # the configured save points).
320
+ #
321
+ # The Append Only File is an alternative persistence mode that provides
322
+ # much better durability. For instance using the default data fsync policy
323
+ # (see later in the config file) Redis can lose just one second of writes in a
324
+ # dramatic event like a server power outage, or a single write if something
325
+ # wrong with the Redis process itself happens, but the operating system is
326
+ # still running correctly.
327
+ #
328
+ # AOF and RDB persistence can be enabled at the same time without problems.
329
+ # If the AOF is enabled on startup Redis will load the AOF, that is the file
330
+ # with the better durability guarantees.
331
+ #
332
+ # Please check http://redis.io/topics/persistence for more information.
333
+
334
+ appendonly no
335
+
336
+ # The name of the append only file (default: "appendonly.aof")
337
+ # appendfilename appendonly.aof
338
+
339
+ # The fsync() call tells the Operating System to actually write data on disk
340
+ # instead to wait for more data in the output buffer. Some OS will really flush
341
+ # data on disk, some other OS will just try to do it ASAP.
342
+ #
343
+ # Redis supports three different modes:
344
+ #
345
+ # no: don't fsync, just let the OS flush the data when it wants. Faster.
346
+ # always: fsync after every write to the append only log . Slow, Safest.
347
+ # everysec: fsync only one time every second. Compromise.
348
+ #
349
+ # The default is "everysec" that's usually the right compromise between
350
+ # speed and data safety. It's up to you to understand if you can relax this to
351
+ # "no" that will let the operating system flush the output buffer when
352
+ # it wants, for better performances (but if you can live with the idea of
353
+ # some data loss consider the default persistence mode that's snapshotting),
354
+ # or on the contrary, use "always" that's very slow but a bit safer than
355
+ # everysec.
356
+ #
357
+ # More details please check the following article:
358
+ # http://antirez.com/post/redis-persistence-demystified.html
359
+ #
360
+ # If unsure, use "everysec".
361
+
362
+ # appendfsync always
363
+ appendfsync everysec
364
+ # appendfsync no
365
+
366
+ # When the AOF fsync policy is set to always or everysec, and a background
367
+ # saving process (a background save or AOF log background rewriting) is
368
+ # performing a lot of I/O against the disk, in some Linux configurations
369
+ # Redis may block too long on the fsync() call. Note that there is no fix for
370
+ # this currently, as even performing fsync in a different thread will block
371
+ # our synchronous write(2) call.
372
+ #
373
+ # In order to mitigate this problem it's possible to use the following option
374
+ # that will prevent fsync() from being called in the main process while a
375
+ # BGSAVE or BGREWRITEAOF is in progress.
376
+ #
377
+ # This means that while another child is saving the durability of Redis is
378
+ # the same as "appendfsync none", that in practical terms means that it is
379
+ # possible to lost up to 30 seconds of log in the worst scenario (with the
380
+ # default Linux settings).
381
+ #
382
+ # If you have latency problems turn this to "yes". Otherwise leave it as
383
+ # "no" that is the safest pick from the point of view of durability.
384
+ no-appendfsync-on-rewrite no
385
+
386
+ # Automatic rewrite of the append only file.
387
+ # Redis is able to automatically rewrite the log file implicitly calling
388
+ # BGREWRITEAOF when the AOF log size will growth by the specified percentage.
389
+ #
390
+ # This is how it works: Redis remembers the size of the AOF file after the
391
+ # latest rewrite (or if no rewrite happened since the restart, the size of
392
+ # the AOF at startup is used).
393
+ #
394
+ # This base size is compared to the current size. If the current size is
395
+ # bigger than the specified percentage, the rewrite is triggered. Also
396
+ # you need to specify a minimal size for the AOF file to be rewritten, this
397
+ # is useful to avoid rewriting the AOF file even if the percentage increase
398
+ # is reached but it is still pretty small.
399
+ #
400
+ # Specify a percentage of zero in order to disable the automatic AOF
401
+ # rewrite feature.
402
+
403
+ auto-aof-rewrite-percentage 100
404
+ auto-aof-rewrite-min-size 64mb
405
+
406
+ ################################ LUA SCRIPTING ###############################
407
+
408
+ # Max execution time of a Lua script in milliseconds.
409
+ #
410
+ # If the maximum execution time is reached Redis will log that a script is
411
+ # still in execution after the maximum allowed time and will start to
412
+ # reply to queries with an error.
413
+ #
414
+ # When a long running script exceed the maximum execution time only the
415
+ # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
416
+ # used to stop a script that did not yet called write commands. The second
417
+ # is the only way to shut down the server in the case a write commands was
418
+ # already issue by the script but the user don't want to wait for the natural
419
+ # termination of the script.
420
+ #
421
+ # Set it to 0 or a negative value for unlimited execution without warnings.
422
+ lua-time-limit 5000
423
+
424
+ ################################## SLOW LOG ###################################
425
+
426
+ # The Redis Slow Log is a system to log queries that exceeded a specified
427
+ # execution time. The execution time does not include the I/O operations
428
+ # like talking with the client, sending the reply and so forth,
429
+ # but just the time needed to actually execute the command (this is the only
430
+ # stage of command execution where the thread is blocked and can not serve
431
+ # other requests in the meantime).
432
+ #
433
+ # You can configure the slow log with two parameters: one tells Redis
434
+ # what is the execution time, in microseconds, to exceed in order for the
435
+ # command to get logged, and the other parameter is the length of the
436
+ # slow log. When a new command is logged the oldest one is removed from the
437
+ # queue of logged commands.
438
+
439
+ # The following time is expressed in microseconds, so 1000000 is equivalent
440
+ # to one second. Note that a negative number disables the slow log, while
441
+ # a value of zero forces the logging of every command.
442
+ slowlog-log-slower-than 10000
443
+
444
+ # There is no limit to this length. Just be aware that it will consume memory.
445
+ # You can reclaim memory used by the slow log with SLOWLOG RESET.
446
+ slowlog-max-len 128
447
+
448
+ ############################### ADVANCED CONFIG ###############################
449
+
450
+ # Hashes are encoded using a memory efficient data structure when they have a
451
+ # small number of entries, and the biggest entry does not exceed a given
452
+ # threshold. These thresholds can be configured using the following directives.
453
+ hash-max-ziplist-entries 512
454
+ hash-max-ziplist-value 64
455
+
456
+ # Similarly to hashes, small lists are also encoded in a special way in order
457
+ # to save a lot of space. The special representation is only used when
458
+ # you are under the following limits:
459
+ list-max-ziplist-entries 512
460
+ list-max-ziplist-value 64
461
+
462
+ # Sets have a special encoding in just one case: when a set is composed
463
+ # of just strings that happens to be integers in radix 10 in the range
464
+ # of 64 bit signed integers.
465
+ # The following configuration setting sets the limit in the size of the
466
+ # set in order to use this special memory saving encoding.
467
+ set-max-intset-entries 512
468
+
469
+ # Similarly to hashes and lists, sorted sets are also specially encoded in
470
+ # order to save a lot of space. This encoding is only used when the length and
471
+ # elements of a sorted set are below the following limits:
472
+ zset-max-ziplist-entries 128
473
+ zset-max-ziplist-value 64
474
+
475
+ # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
476
+ # order to help rehashing the main Redis hash table (the one mapping top-level
477
+ # keys to values). The hash table implementation Redis uses (see dict.c)
478
+ # performs a lazy rehashing: the more operation you run into an hash table
479
+ # that is rehashing, the more rehashing "steps" are performed, so if the
480
+ # server is idle the rehashing is never complete and some more memory is used
481
+ # by the hash table.
482
+ #
483
+ # The default is to use this millisecond 10 times every second in order to
484
+ # active rehashing the main dictionaries, freeing memory when possible.
485
+ #
486
+ # If unsure:
487
+ # use "activerehashing no" if you have hard latency requirements and it is
488
+ # not a good thing in your environment that Redis can reply form time to time
489
+ # to queries with 2 milliseconds delay.
490
+ #
491
+ # use "activerehashing yes" if you don't have such hard requirements but
492
+ # want to free memory asap when possible.
493
+ activerehashing yes
494
+
495
+ # The client output buffer limits can be used to force disconnection of clients
496
+ # that are not reading data from the server fast enough for some reason (a
497
+ # common reason is that a Pub/Sub client can't consume messages as fast as the
498
+ # publisher can produce them).
499
+ #
500
+ # The limit can be set differently for the three different classes of clients:
501
+ #
502
+ # normal -> normal clients
503
+ # slave -> slave clients and MONITOR clients
504
+ # pubsub -> clients subcribed to at least one pubsub channel or pattern
505
+ #
506
+ # The syntax of every client-output-buffer-limit directive is the following:
507
+ #
508
+ # client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
509
+ #
510
+ # A client is immediately disconnected once the hard limit is reached, or if
511
+ # the soft limit is reached and remains reached for the specified number of
512
+ # seconds (continuously).
513
+ # So for instance if the hard limit is 32 megabytes and the soft limit is
514
+ # 16 megabytes / 10 seconds, the client will get disconnected immediately
515
+ # if the size of the output buffers reach 32 megabytes, but will also get
516
+ # disconnected if the client reaches 16 megabytes and continuously overcomes
517
+ # the limit for 10 seconds.
518
+ #
519
+ # By default normal clients are not limited because they don't receive data
520
+ # without asking (in a push way), but just after a request, so only
521
+ # asynchronous clients may create a scenario where data is requested faster
522
+ # than it can read.
523
+ #
524
+ # Instead there is a default limit for pubsub and slave clients, since
525
+ # subscribers and slaves receive data in a push fashion.
526
+ #
527
+ # Both the hard or the soft limit can be disabled just setting it to zero.
528
+ client-output-buffer-limit normal 0 0 0
529
+ client-output-buffer-limit slave 256mb 64mb 60
530
+ client-output-buffer-limit pubsub 32mb 8mb 60
531
+
532
+ ################################## INCLUDES ###################################
533
+
534
+ # Include one or more other config files here. This is useful if you
535
+ # have a standard template that goes to all Redis server but also need
536
+ # to customize a few per-server settings. Include files can include
537
+ # other files, so use this wisely.
538
+ #
539
+ # include /path/to/local.conf
540
+ # include /path/to/other.conf