beetle 0.4.3 → 0.4.4

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 0e984630440947f81643287a32934138fcdaee58
4
- data.tar.gz: 421b42f6d56a837ba9e2a05a0d0f0087f8d5a737
3
+ metadata.gz: ce5804b3e44afd72e9f650d3599d9a57ed4cc4b5
4
+ data.tar.gz: f3ea8613d41b1a28f6ed89af6b85cffc7502cb02
5
5
  SHA512:
6
- metadata.gz: c91106551e7e66cf01cdf614130568abe45dd490bcf51c8e9669214c20af29d91a883c545fea13ae3555a2e8a5c5609784d7b7bbb7e01db733a4c06eefdee91f
7
- data.tar.gz: 588b1921ed5a473953572fcf542c62a6a5344700eef6c50d821ee0430a3e38576ee740e0c3330295ff011a887aa63eebd7c56cc056f01a011a502cd4cd2adf46
6
+ metadata.gz: 3615e631ca7dc4a2dfe90f5fa04425237e5c2148eb567b881f086eb467d4f605cc52bec4c3d2969abeb93c641bc2e15a8eb56548895227d7e19ff4ef16943fbc
7
+ data.tar.gz: c718fccb2c910adb9f5cedf698ca1f0396d9d8b96971eb5ed5853c66da384a6b4eab9aa1e0b1d0f5f9a54a2697132df4c812d565bc9c44b531e0d34811af21b7
data/RELEASE_NOTES.rdoc CHANGED
@@ -1,5 +1,11 @@
1
1
  = Release Notes
2
2
 
3
+ == Version 0.4.4
4
+ * added command to show beetle version: "beetle --version"
5
+ * configuration server tracks ids of unknown clients
6
+ * configuration clients now sends heartbeats
7
+ * configuration server tracks last seen times of clients, based on heartbeat
8
+
3
9
  == Version 0.4.3
4
10
  * fixed a race condition which could lead to duplicate message processing
5
11
  * fixed eventmachine shutdown sequence problem, which led to ACKs
data/Rakefile CHANGED
@@ -70,16 +70,18 @@ namespace :rabbit do
70
70
  end
71
71
 
72
72
  namespace :redis do
73
- def config_file(suffix)
74
- File.expand_path(File.dirname(__FILE__)+"/etc/redis-#{suffix}.conf")
75
- end
76
- desc "start main redis"
77
- task :start1 do
78
- exec "redis-server #{config_file(:master)}"
79
- end
80
- desc "start slave redis"
81
- task :start2 do
82
- exec "redis-server #{config_file(:slave)}"
73
+ namespace :start do
74
+ def config_file(suffix)
75
+ File.expand_path(File.dirname(__FILE__)+"/etc/redis-#{suffix}.conf")
76
+ end
77
+ desc "start redis master"
78
+ task :master do
79
+ exec "redis-server #{config_file(:master)}"
80
+ end
81
+ desc "start redis slave"
82
+ task :slave do
83
+ exec "redis-server #{config_file(:slave)}"
84
+ end
83
85
  end
84
86
  end
85
87
 
@@ -1,42 +1,128 @@
1
- # Redis configuration file example
1
+ # Redis configuration file example.
2
+ #
3
+ # Note that in order to read the configuration file, Redis must be
4
+ # started with the file path as first argument:
5
+ #
6
+ # ./redis-server /path/to/redis.conf
7
+
8
+ # Note on units: when memory size is needed, it is possible to specify
9
+ # it in the usual form of 1k 5GB 4M and so forth:
10
+ #
11
+ # 1k => 1000 bytes
12
+ # 1kb => 1024 bytes
13
+ # 1m => 1000000 bytes
14
+ # 1mb => 1024*1024 bytes
15
+ # 1g => 1000000000 bytes
16
+ # 1gb => 1024*1024*1024 bytes
17
+ #
18
+ # units are case insensitive so 1GB 1Gb 1gB are all the same.
19
+
20
+ ################################## INCLUDES ###################################
21
+
22
+ # Include one or more other config files here. This is useful if you
23
+ # have a standard template that goes to all Redis servers but also need
24
+ # to customize a few per-server settings. Include files can include
25
+ # other files, so use this wisely.
26
+ #
27
+ # Notice option "include" won't be rewritten by command "CONFIG REWRITE"
28
+ # from admin or Redis Sentinel. Since Redis always uses the last processed
29
+ # line as value of a configuration directive, you'd better put includes
30
+ # at the beginning of this file to avoid overwriting config change at runtime.
31
+ #
32
+ # If instead you are interested in using includes to override configuration
33
+ # options, it is better to use include as the last line.
34
+ #
35
+ # include /path/to/local.conf
36
+ # include /path/to/other.conf
37
+
38
+ ################################ GENERAL #####################################
2
39
 
3
40
  # By default Redis does not run as a daemon. Use 'yes' if you need it.
4
41
  # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
5
42
  daemonize yes
6
43
 
7
- # When run as a daemon, Redis write a pid file in /var/run/redis.pid by default.
8
- # You can specify a custom pid file location here.
44
+ # When running daemonized, Redis writes a pid file in /var/run/redis.pid by
45
+ # default. You can specify a custom pid file location here.
9
46
  pidfile <%= pid_file %>
10
47
 
11
- # Accept connections on the specified port, default is 6379
48
+ # Accept connections on the specified port, default is 6379.
49
+ # If port 0 is specified Redis will not listen on a TCP socket.
12
50
  port <%= port %>
13
51
 
14
- # If you want you can bind a single interface, if the bind option is not
15
- # specified all the interfaces will listen for connections.
52
+ # TCP listen() backlog.
16
53
  #
54
+ # In high requests-per-second environments you need an high backlog in order
55
+ # to avoid slow clients connections issues. Note that the Linux kernel
56
+ # will silently truncate it to the value of /proc/sys/net/core/somaxconn so
57
+ # make sure to raise both the value of somaxconn and tcp_max_syn_backlog
58
+ # in order to get the desired effect.
59
+ # tcp-backlog 511
60
+
61
+ # By default Redis listens for connections from all the network interfaces
62
+ # available on the server. It is possible to listen to just one or multiple
63
+ # interfaces using the "bind" configuration directive, followed by one or
64
+ # more IP addresses.
65
+ #
66
+ # Examples:
67
+ #
68
+ # bind 192.168.1.100 10.0.0.1
17
69
  # bind 127.0.0.1
18
70
 
71
+ # Specify the path for the Unix socket that will be used to listen for
72
+ # incoming connections. There is no default, so Redis will not listen
73
+ # on a unix socket when not specified.
74
+ #
75
+ # unixsocket /tmp/redis.sock
76
+ # unixsocketperm 700
77
+
19
78
  # Close the connection after a client is idle for N seconds (0 to disable)
20
79
  timeout 300
21
80
 
22
- # Set server verbosity to 'debug'
23
- # it can be one of:
81
+ # TCP keepalive.
82
+ #
83
+ # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
84
+ # of communication. This is useful for two reasons:
85
+ #
86
+ # 1) Detect dead peers.
87
+ # 2) Take the connection alive from the point of view of network
88
+ # equipment in the middle.
89
+ #
90
+ # On Linux, the specified value (in seconds) is the period used to send ACKs.
91
+ # Note that to close the connection the double of the time is needed.
92
+ # On other kernels the period depends on the kernel configuration.
93
+ #
94
+ # A reasonable value for this option is 60 seconds.
95
+ tcp-keepalive 0
96
+
97
+ # Specify the server verbosity level.
98
+ # This can be one of:
24
99
  # debug (a lot of information, useful for development/testing)
100
+ # verbose (many rarely useful info, but not a mess like the debug level)
25
101
  # notice (moderately verbose, what you want in production probably)
26
102
  # warning (only very important / critical messages are logged)
27
103
  loglevel debug
28
104
 
29
- # Specify the log file name. Also 'stdout' can be used to force
30
- # the demon to log on the standard output. Note that if you use standard
105
+ # Specify the log file name. Also the empty string can be used to force
106
+ # Redis to log on the standard output. Note that if you use standard
31
107
  # output for logging but daemonize, logs will be sent to /dev/null
32
108
  logfile <%= log_file %>
33
109
 
110
+ # To enable logging to the system logger, just set 'syslog-enabled' to yes,
111
+ # and optionally update the other syslog parameters to suit your needs.
112
+ # syslog-enabled no
113
+
114
+ # Specify the syslog identity.
115
+ # syslog-ident redis
116
+
117
+ # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
118
+ # syslog-facility local0
119
+
34
120
  # Set the number of databases. The default database is DB 0, you can select
35
121
  # a different one on a per-connection basis using SELECT <dbid> where
36
122
  # dbid is a number between 0 and 'databases'-1
37
123
  databases 16
38
124
 
39
- ################################ SNAPSHOTTING #################################
125
+ ################################ SNAPSHOTTING ################################
40
126
  #
41
127
  # Save the DB on disk:
42
128
  #
@@ -49,29 +135,77 @@ databases 16
49
135
  # after 900 sec (15 min) if at least 1 key changed
50
136
  # after 300 sec (5 min) if at least 10 keys changed
51
137
  # after 60 sec if at least 10000 keys changed
138
+ #
139
+ # Note: you can disable saving completely by commenting out all "save" lines.
140
+ #
141
+ # It is also possible to remove all the previously configured save
142
+ # points by adding a save directive with a single empty string argument
143
+ # like in the following example:
144
+ #
145
+ # save ""
146
+
52
147
  save 900 1
53
148
  save 300 10
54
149
  save 60 10000
55
150
 
151
+ # By default Redis will stop accepting writes if RDB snapshots are enabled
152
+ # (at least one save point) and the latest background save failed.
153
+ # This will make the user aware (in a hard way) that data is not persisting
154
+ # on disk properly, otherwise chances are that no one will notice and some
155
+ # disaster will happen.
156
+ #
157
+ # If the background saving process will start working again Redis will
158
+ # automatically allow writes again.
159
+ #
160
+ # However if you have setup your proper monitoring of the Redis server
161
+ # and persistence, you may want to disable this feature so that Redis will
162
+ # continue to work as usual even if there are problems with disk,
163
+ # permissions, and so forth.
164
+ stop-writes-on-bgsave-error no
165
+
56
166
  # Compress string objects using LZF when dump .rdb databases?
57
167
  # For default that's set to 'yes' as it's almost always a win.
58
168
  # If you want to save some CPU in the saving child set it to 'no' but
59
169
  # the dataset will likely be bigger if you have compressible values or keys.
60
170
  rdbcompression yes
61
171
 
172
+ # Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
173
+ # This makes the format more resistant to corruption but there is a performance
174
+ # hit to pay (around 10%) when saving and loading RDB files, so you can disable it
175
+ # for maximum performances.
176
+ #
177
+ # RDB files created with checksum disabled have a checksum of zero that will
178
+ # tell the loading code to skip the check.
179
+ rdbchecksum yes
180
+
62
181
  # The filename where to dump the DB
63
182
  dbfilename dump.rdb
64
183
 
65
- # For default save/load DB in/from the working directory
66
- # Note that you must specify a directory not a file name.
184
+ # The working directory.
185
+ #
186
+ # The DB will be written inside this directory, with the filename specified
187
+ # above using the 'dbfilename' configuration directive.
188
+ #
189
+ # The Append Only File will also be created inside this directory.
190
+ #
191
+ # Note that you must specify a directory here, not a file name.
67
192
  dir <%= dir %>
68
193
 
69
194
  ################################# REPLICATION #################################
70
195
 
71
196
  # Master-Slave replication. Use slaveof to make a Redis instance a copy of
72
- # another Redis server. Note that the configuration is local to the slave
73
- # so for example it is possible to configure the slave to save the DB with a
74
- # different interval, or to listen to another port, and so on.
197
+ # another Redis server. A few things to understand ASAP about Redis replication.
198
+ #
199
+ # 1) Redis replication is asynchronous, but you can configure a master to
200
+ # stop accepting writes if it appears to be not connected with at least
201
+ # a given number of slaves.
202
+ # 2) Redis slaves are able to perform a partial resynchronization with the
203
+ # master if the replication link is lost for a relatively small amount of
204
+ # time. You may want to configure the replication backlog size (see the next
205
+ # sections of this file) with a sensible value depending on your needs.
206
+ # 3) Replication is automatic and does not need user intervention. After a
207
+ # network partition slaves automatically try to reconnect to masters
208
+ # and resynchronize with them.
75
209
  #
76
210
  # slaveof <masterip> <masterport>
77
211
 
@@ -82,6 +216,170 @@ dir <%= dir %>
82
216
  #
83
217
  # masterauth <master-password>
84
218
 
219
+ # When a slave loses its connection with the master, or when the replication
220
+ # is still in progress, the slave can act in two different ways:
221
+ #
222
+ # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
223
+ # still reply to client requests, possibly with out of date data, or the
224
+ # data set may just be empty if this is the first synchronization.
225
+ #
226
+ # 2) if slave-serve-stale-data is set to 'no' the slave will reply with
227
+ # an error "SYNC with master in progress" to all the kind of commands
228
+ # but to INFO and SLAVEOF.
229
+ #
230
+ slave-serve-stale-data yes
231
+
232
+ # You can configure a slave instance to accept writes or not. Writing against
233
+ # a slave instance may be useful to store some ephemeral data (because data
234
+ # written on a slave will be easily deleted after resync with the master) but
235
+ # may also cause problems if clients are writing to it because of a
236
+ # misconfiguration.
237
+ #
238
+ # Since Redis 2.6 by default slaves are read-only.
239
+ #
240
+ # Note: read only slaves are not designed to be exposed to untrusted clients
241
+ # on the internet. It's just a protection layer against misuse of the instance.
242
+ # Still a read only slave exports by default all the administrative commands
243
+ # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
244
+ # security of read only slaves using 'rename-command' to shadow all the
245
+ # administrative / dangerous commands.
246
+ slave-read-only no
247
+
248
+ # Replication SYNC strategy: disk or socket.
249
+ #
250
+ # -------------------------------------------------------
251
+ # WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY
252
+ # -------------------------------------------------------
253
+ #
254
+ # New slaves and reconnecting slaves that are not able to continue the replication
255
+ # process just receiving differences, need to do what is called a "full
256
+ # synchronization". An RDB file is transmitted from the master to the slaves.
257
+ # The transmission can happen in two different ways:
258
+ #
259
+ # 1) Disk-backed: The Redis master creates a new process that writes the RDB
260
+ # file on disk. Later the file is transferred by the parent
261
+ # process to the slaves incrementally.
262
+ # 2) Diskless: The Redis master creates a new process that directly writes the
263
+ # RDB file to slave sockets, without touching the disk at all.
264
+ #
265
+ # With disk-backed replication, while the RDB file is generated, more slaves
266
+ # can be queued and served with the RDB file as soon as the current child producing
267
+ # the RDB file finishes its work. With diskless replication instead once
268
+ # the transfer starts, new slaves arriving will be queued and a new transfer
269
+ # will start when the current one terminates.
270
+ #
271
+ # When diskless replication is used, the master waits a configurable amount of
272
+ # time (in seconds) before starting the transfer in the hope that multiple slaves
273
+ # will arrive and the transfer can be parallelized.
274
+ #
275
+ # With slow disks and fast (large bandwidth) networks, diskless replication
276
+ # works better.
277
+ repl-diskless-sync no
278
+
279
+ # When diskless replication is enabled, it is possible to configure the delay
280
+ # the server waits in order to spawn the child that trnasfers the RDB via socket
281
+ # to the slaves.
282
+ #
283
+ # This is important since once the transfer starts, it is not possible to serve
284
+ # new slaves arriving, that will be queued for the next RDB transfer, so the server
285
+ # waits a delay in order to let more slaves arrive.
286
+ #
287
+ # The delay is specified in seconds, and by default is 5 seconds. To disable
288
+ # it entirely just set it to 0 seconds and the transfer will start ASAP.
289
+ repl-diskless-sync-delay 5
290
+
291
+ # Slaves send PINGs to server in a predefined interval. It's possible to change
292
+ # this interval with the repl_ping_slave_period option. The default value is 10
293
+ # seconds.
294
+ #
295
+ # repl-ping-slave-period 10
296
+
297
+ # The following option sets the replication timeout for:
298
+ #
299
+ # 1) Bulk transfer I/O during SYNC, from the point of view of slave.
300
+ # 2) Master timeout from the point of view of slaves (data, pings).
301
+ # 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
302
+ #
303
+ # It is important to make sure that this value is greater than the value
304
+ # specified for repl-ping-slave-period otherwise a timeout will be detected
305
+ # every time there is low traffic between the master and the slave.
306
+ #
307
+ # repl-timeout 60
308
+
309
+ # Disable TCP_NODELAY on the slave socket after SYNC?
310
+ #
311
+ # If you select "yes" Redis will use a smaller number of TCP packets and
312
+ # less bandwidth to send data to slaves. But this can add a delay for
313
+ # the data to appear on the slave side, up to 40 milliseconds with
314
+ # Linux kernels using a default configuration.
315
+ #
316
+ # If you select "no" the delay for data to appear on the slave side will
317
+ # be reduced but more bandwidth will be used for replication.
318
+ #
319
+ # By default we optimize for low latency, but in very high traffic conditions
320
+ # or when the master and slaves are many hops away, turning this to "yes" may
321
+ # be a good idea.
322
+ repl-disable-tcp-nodelay no
323
+
324
+ # Set the replication backlog size. The backlog is a buffer that accumulates
325
+ # slave data when slaves are disconnected for some time, so that when a slave
326
+ # wants to reconnect again, often a full resync is not needed, but a partial
327
+ # resync is enough, just passing the portion of data the slave missed while
328
+ # disconnected.
329
+ #
330
+ # The bigger the replication backlog, the longer the time the slave can be
331
+ # disconnected and later be able to perform a partial resynchronization.
332
+ #
333
+ # The backlog is only allocated once there is at least a slave connected.
334
+ #
335
+ # repl-backlog-size 1mb
336
+
337
+ # After a master has no longer connected slaves for some time, the backlog
338
+ # will be freed. The following option configures the amount of seconds that
339
+ # need to elapse, starting from the time the last slave disconnected, for
340
+ # the backlog buffer to be freed.
341
+ #
342
+ # A value of 0 means to never release the backlog.
343
+ #
344
+ # repl-backlog-ttl 3600
345
+
346
+ # The slave priority is an integer number published by Redis in the INFO output.
347
+ # It is used by Redis Sentinel in order to select a slave to promote into a
348
+ # master if the master is no longer working correctly.
349
+ #
350
+ # A slave with a low priority number is considered better for promotion, so
351
+ # for instance if there are three slaves with priority 10, 100, 25 Sentinel will
352
+ # pick the one with priority 10, that is the lowest.
353
+ #
354
+ # However a special priority of 0 marks the slave as not able to perform the
355
+ # role of master, so a slave with priority of 0 will never be selected by
356
+ # Redis Sentinel for promotion.
357
+ #
358
+ # By default the priority is 100.
359
+ slave-priority 100
360
+
361
+ # It is possible for a master to stop accepting writes if there are less than
362
+ # N slaves connected, having a lag less or equal than M seconds.
363
+ #
364
+ # The N slaves need to be in "online" state.
365
+ #
366
+ # The lag in seconds, that must be <= the specified value, is calculated from
367
+ # the last ping received from the slave, that is usually sent every second.
368
+ #
369
+ # This option does not GUARANTEE that N replicas will accept the write, but
370
+ # will limit the window of exposure for lost writes in case not enough slaves
371
+ # are available, to the specified number of seconds.
372
+ #
373
+ # For example to require at least 3 slaves with a lag <= 10 seconds use:
374
+ #
375
+ # min-slaves-to-write 3
376
+ # min-slaves-max-lag 10
377
+ #
378
+ # Setting one or the other to 0 disables the feature.
379
+ #
380
+ # By default min-slaves-to-write is set to 0 (feature disabled) and
381
+ # min-slaves-max-lag is set to 10.
382
+
85
383
  ################################## SECURITY ###################################
86
384
 
87
385
  # Require clients to issue AUTH <PASSWORD> before processing any other
@@ -91,94 +389,444 @@ dir <%= dir %>
91
389
  # This should stay commented out for backward compatibility and because most
92
390
  # people do not need auth (e.g. they run their own servers).
93
391
  #
392
+ # Warning: since Redis is pretty fast an outside user can try up to
393
+ # 150k passwords per second against a good box. This means that you should
394
+ # use a very strong password otherwise it will be very easy to break.
395
+ #
94
396
  # requirepass foobared
95
397
 
398
+ # Command renaming.
399
+ #
400
+ # It is possible to change the name of dangerous commands in a shared
401
+ # environment. For instance the CONFIG command may be renamed into something
402
+ # hard to guess so that it will still be available for internal-use tools
403
+ # but not available for general clients.
404
+ #
405
+ # Example:
406
+ #
407
+ # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
408
+ #
409
+ # It is also possible to completely kill a command by renaming it into
410
+ # an empty string:
411
+ #
412
+ # rename-command CONFIG ""
413
+ #
414
+ # Please note that changing the name of commands that are logged into the
415
+ # AOF file or transmitted to slaves may cause problems.
416
+
96
417
  ################################### LIMITS ####################################
97
418
 
98
- # Set the max number of connected clients at the same time. By default there
99
- # is no limit, and it's up to the number of file descriptors the Redis process
100
- # is able to open. The special value '0' means no limts.
419
+ # Set the max number of connected clients at the same time. By default
420
+ # this limit is set to 10000 clients, however if the Redis server is not
421
+ # able to configure the process file limit to allow for the specified limit
422
+ # the max number of allowed clients is set to the current file limit
423
+ # minus 32 (as Redis reserves a few file descriptors for internal uses).
424
+ #
101
425
  # Once the limit is reached Redis will close all the new connections sending
102
426
  # an error 'max number of clients reached'.
103
427
  #
104
- maxclients 128
428
+ # maxclients 10000
105
429
 
106
430
  # Don't use more memory than the specified amount of bytes.
107
- # When the memory limit is reached Redis will try to remove keys with an
108
- # EXPIRE set. It will try to start freeing keys that are going to expire
109
- # in little time and preserve keys with a longer time to live.
110
- # Redis will also try to remove objects from free lists if possible.
111
- #
112
- # If all this fails, Redis will start to reply with errors to commands
113
- # that will use more memory, like SET, LPUSH, and so on, and will continue
114
- # to reply to most read-only commands like GET.
115
- #
116
- # WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
117
- # 'state' server or cache, not as a real DB. When Redis is used as a real
118
- # database the memory usage will grow over the weeks, it will be obvious if
119
- # it is going to use too much memory in the long run, and you'll have the time
120
- # to upgrade. With maxmemory after the limit is reached you'll start to get
121
- # errors for write operations, and this may even lead to DB inconsistency.
431
+ # When the memory limit is reached Redis will try to remove keys
432
+ # according to the eviction policy selected (see maxmemory-policy).
433
+ #
434
+ # If Redis can't remove keys according to the policy, or if the policy is
435
+ # set to 'noeviction', Redis will start to reply with errors to commands
436
+ # that would use more memory, like SET, LPUSH, and so on, and will continue
437
+ # to reply to read-only commands like GET.
438
+ #
439
+ # This option is usually useful when using Redis as an LRU cache, or to set
440
+ # a hard memory limit for an instance (using the 'noeviction' policy).
441
+ #
442
+ # WARNING: If you have slaves attached to an instance with maxmemory on,
443
+ # the size of the output buffers needed to feed the slaves are subtracted
444
+ # from the used memory count, so that network problems / resyncs will
445
+ # not trigger a loop where keys are evicted, and in turn the output
446
+ # buffer of slaves is full with DELs of keys evicted triggering the deletion
447
+ # of more keys, and so forth until the database is completely emptied.
448
+ #
449
+ # In short... if you have slaves attached it is suggested that you set a lower
450
+ # limit for maxmemory so that there is some free RAM on the system for slave
451
+ # output buffers (but this is not needed if the policy is 'noeviction').
122
452
  #
123
453
  # maxmemory <bytes>
124
454
 
455
+ # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
456
+ # is reached. You can select among five behaviors:
457
+ #
458
+ # volatile-lru -> remove the key with an expire set using an LRU algorithm
459
+ # allkeys-lru -> remove any key according to the LRU algorithm
460
+ # volatile-random -> remove a random key with an expire set
461
+ # allkeys-random -> remove a random key, any key
462
+ # volatile-ttl -> remove the key with the nearest expire time (minor TTL)
463
+ # noeviction -> don't expire at all, just return an error on write operations
464
+ #
465
+ # Note: with any of the above policies, Redis will return an error on write
466
+ # operations, when there are no suitable keys for eviction.
467
+ #
468
+ # At the date of writing these commands are: set setnx setex append
469
+ # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
470
+ # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
471
+ # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
472
+ # getset mset msetnx exec sort
473
+ #
474
+ # The default is:
475
+ #
476
+ # maxmemory-policy volatile-lru
477
+
478
+ # LRU and minimal TTL algorithms are not precise algorithms but approximated
479
+ # algorithms (in order to save memory), so you can select as well the sample
480
+ # size to check. For instance for default Redis will check three keys and
481
+ # pick the one that was used less recently, you can change the sample size
482
+ # using the following configuration directive.
483
+ #
484
+ # maxmemory-samples 3
485
+
125
486
  ############################## APPEND ONLY MODE ###############################
126
487
 
127
- # By default Redis asynchronously dumps the dataset on disk. If you can live
128
- # with the idea that the latest records will be lost if something like a crash
129
- # happens this is the preferred way to run Redis. If instead you care a lot
130
- # about your data and don't want to that a single record can get lost you should
131
- # enable the append only mode: when this mode is enabled Redis will append
132
- # every write operation received in the file appendonly.log. This file will
133
- # be read on startup in order to rebuild the full dataset in memory.
488
+ # By default Redis asynchronously dumps the dataset on disk. This mode is
489
+ # good enough in many applications, but an issue with the Redis process or
490
+ # a power outage may result into a few minutes of writes lost (depending on
491
+ # the configured save points).
134
492
  #
135
- # Note that you can have both the async dumps and the append only file if you
136
- # like (you have to comment the "save" statements above to disable the dumps).
137
- # Still if append only mode is enabled Redis will load the data from the
138
- # log file at startup ignoring the dump.rdb file.
493
+ # The Append Only File is an alternative persistence mode that provides
494
+ # much better durability. For instance using the default data fsync policy
495
+ # (see later in the config file) Redis can lose just one second of writes in a
496
+ # dramatic event like a server power outage, or a single write if something
497
+ # wrong with the Redis process itself happens, but the operating system is
498
+ # still running correctly.
139
499
  #
140
- # The name of the append only file is "appendonly.log"
500
+ # AOF and RDB persistence can be enabled at the same time without problems.
501
+ # If the AOF is enabled on startup Redis will load the AOF, that is the file
502
+ # with the better durability guarantees.
141
503
  #
142
- # IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
143
- # log file in background when it gets too big.
504
+ # Please check http://redis.io/topics/persistence for more information.
144
505
 
145
506
  appendonly yes
146
507
 
508
+ # The name of the append only file (default: "appendonly.aof")
509
+
510
+ appendfilename "appendonly.aof"
511
+
147
512
  # The fsync() call tells the Operating System to actually write data on disk
148
- # instead to wait for more data in the output buffer. Some OS will really flush
513
+ # instead of waiting for more data in the output buffer. Some OS will really flush
149
514
  # data on disk, some other OS will just try to do it ASAP.
150
515
  #
151
516
  # Redis supports three different modes:
152
517
  #
153
518
  # no: don't fsync, just let the OS flush the data when it wants. Faster.
154
- # always: fsync after every write to the append only log . Slow, Safest.
155
- # everysec: fsync only if one second passed since the last fsync. Compromise.
519
+ # always: fsync after every write to the append only log. Slow, Safest.
520
+ # everysec: fsync only one time every second. Compromise.
521
+ #
522
+ # The default is "everysec", as that's usually the right compromise between
523
+ # speed and data safety. It's up to you to understand if you can relax this to
524
+ # "no" that will let the operating system flush the output buffer when
525
+ # it wants, for better performances (but if you can live with the idea of
526
+ # some data loss consider the default persistence mode that's snapshotting),
527
+ # or on the contrary, use "always" that's very slow but a bit safer than
528
+ # everysec.
156
529
  #
157
- # The default is "always" that's the safer of the options. It's up to you to
158
- # understand if you can relax this to "everysec" that will fsync every second
159
- # or to "no" that will let the operating system flush the output buffer when
160
- # it want, for better performances (but if you can live with the idea of
161
- # some data loss consider the default persistence mode that's snapshotting).
530
+ # More details please check the following article:
531
+ # http://antirez.com/post/redis-persistence-demystified.html
532
+ #
533
+ # If unsure, use "everysec".
162
534
 
163
535
  # appendfsync always
164
536
  appendfsync everysec
165
537
  # appendfsync no
166
538
 
539
+ # When the AOF fsync policy is set to always or everysec, and a background
540
+ # saving process (a background save or AOF log background rewriting) is
541
+ # performing a lot of I/O against the disk, in some Linux configurations
542
+ # Redis may block too long on the fsync() call. Note that there is no fix for
543
+ # this currently, as even performing fsync in a different thread will block
544
+ # our synchronous write(2) call.
545
+ #
546
+ # In order to mitigate this problem it's possible to use the following option
547
+ # that will prevent fsync() from being called in the main process while a
548
+ # BGSAVE or BGREWRITEAOF is in progress.
549
+ #
550
+ # This means that while another child is saving, the durability of Redis is
551
+ # the same as "appendfsync none". In practical terms, this means that it is
552
+ # possible to lose up to 30 seconds of log in the worst scenario (with the
553
+ # default Linux settings).
554
+ #
555
+ # If you have latency problems turn this to "yes". Otherwise leave it as
556
+ # "no" that is the safest pick from the point of view of durability.
557
+
558
+ no-appendfsync-on-rewrite no
559
+
560
+ # Automatic rewrite of the append only file.
561
+ # Redis is able to automatically rewrite the log file implicitly calling
562
+ # BGREWRITEAOF when the AOF log size grows by the specified percentage.
563
+ #
564
+ # This is how it works: Redis remembers the size of the AOF file after the
565
+ # latest rewrite (if no rewrite has happened since the restart, the size of
566
+ # the AOF at startup is used).
567
+ #
568
+ # This base size is compared to the current size. If the current size is
569
+ # bigger than the specified percentage, the rewrite is triggered. Also
570
+ # you need to specify a minimal size for the AOF file to be rewritten, this
571
+ # is useful to avoid rewriting the AOF file even if the percentage increase
572
+ # is reached but it is still pretty small.
573
+ #
574
+ # Specify a percentage of zero in order to disable the automatic AOF
575
+ # rewrite feature.
576
+
577
+ auto-aof-rewrite-percentage 100
578
+ auto-aof-rewrite-min-size 64mb
579
+
580
+ # An AOF file may be found to be truncated at the end during the Redis
581
+ # startup process, when the AOF data gets loaded back into memory.
582
+ # This may happen when the system where Redis is running
583
+ # crashes, especially when an ext4 filesystem is mounted without the
584
+ # data=ordered option (however this can't happen when Redis itself
585
+ # crashes or aborts but the operating system still works correctly).
586
+ #
587
+ # Redis can either exit with an error when this happens, or load as much
588
+ # data as possible (the default now) and start if the AOF file is found
589
+ # to be truncated at the end. The following option controls this behavior.
590
+ #
591
+ # If aof-load-truncated is set to yes, a truncated AOF file is loaded and
592
+ # the Redis server starts emitting a log to inform the user of the event.
593
+ # Otherwise if the option is set to no, the server aborts with an error
594
+ # and refuses to start. When the option is set to no, the user requires
595
+ # to fix the AOF file using the "redis-check-aof" utility before to restart
596
+ # the server.
597
+ #
598
+ # Note that if the AOF file will be found to be corrupted in the middle
599
+ # the server will still exit with an error. This option only applies when
600
+ # Redis will try to read more data from the AOF file but not enough bytes
601
+ # will be found.
602
+ aof-load-truncated yes
603
+
604
+ ################################ LUA SCRIPTING ###############################
605
+
606
+ # Max execution time of a Lua script in milliseconds.
607
+ #
608
+ # If the maximum execution time is reached Redis will log that a script is
609
+ # still in execution after the maximum allowed time and will start to
610
+ # reply to queries with an error.
611
+ #
612
+ # When a long running script exceeds the maximum execution time only the
613
+ # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
614
+ # used to stop a script that did not yet called write commands. The second
615
+ # is the only way to shut down the server in the case a write command was
616
+ # already issued by the script but the user doesn't want to wait for the natural
617
+ # termination of the script.
618
+ #
619
+ # Set it to 0 or a negative value for unlimited execution without warnings.
620
+ lua-time-limit 5000
621
+
622
+ ################################## SLOW LOG ###################################
623
+
624
+ # The Redis Slow Log is a system to log queries that exceeded a specified
625
+ # execution time. The execution time does not include the I/O operations
626
+ # like talking with the client, sending the reply and so forth,
627
+ # but just the time needed to actually execute the command (this is the only
628
+ # stage of command execution where the thread is blocked and can not serve
629
+ # other requests in the meantime).
630
+ #
631
+ # You can configure the slow log with two parameters: one tells Redis
632
+ # what is the execution time, in microseconds, to exceed in order for the
633
+ # command to get logged, and the other parameter is the length of the
634
+ # slow log. When a new command is logged the oldest one is removed from the
635
+ # queue of logged commands.
636
+
637
+ # The following time is expressed in microseconds, so 1000000 is equivalent
638
+ # to one second. Note that a negative number disables the slow log, while
639
+ # a value of zero forces the logging of every command.
640
+ slowlog-log-slower-than 10000
641
+
642
+ # There is no limit to this length. Just be aware that it will consume memory.
643
+ # You can reclaim memory used by the slow log with SLOWLOG RESET.
644
+ slowlog-max-len 128
645
+
646
+ ################################ LATENCY MONITOR ##############################
647
+
648
+ # The Redis latency monitoring subsystem samples different operations
649
+ # at runtime in order to collect data related to possible sources of
650
+ # latency of a Redis instance.
651
+ #
652
+ # Via the LATENCY command this information is available to the user that can
653
+ # print graphs and obtain reports.
654
+ #
655
+ # The system only logs operations that were performed in a time equal or
656
+ # greater than the amount of milliseconds specified via the
657
+ # latency-monitor-threshold configuration directive. When its value is set
658
+ # to zero, the latency monitor is turned off.
659
+ #
660
+ # By default latency monitoring is disabled since it is mostly not needed
661
+ # if you don't have latency issues, and collecting data has a performance
662
+ # impact, that while very small, can be measured under big load. Latency
663
+ # monitoring can easily be enalbed at runtime using the command
664
+ # "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
665
+ latency-monitor-threshold 0
666
+
667
+ ############################# Event notification ##############################
668
+
669
+ # Redis can notify Pub/Sub clients about events happening in the key space.
670
+ # This feature is documented at http://redis.io/topics/notifications
671
+ #
672
+ # For instance if keyspace events notification is enabled, and a client
673
+ # performs a DEL operation on key "foo" stored in the Database 0, two
674
+ # messages will be published via Pub/Sub:
675
+ #
676
+ # PUBLISH __keyspace@0__:foo del
677
+ # PUBLISH __keyevent@0__:del foo
678
+ #
679
+ # It is possible to select the events that Redis will notify among a set
680
+ # of classes. Every class is identified by a single character:
681
+ #
682
+ # K Keyspace events, published with __keyspace@<db>__ prefix.
683
+ # E Keyevent events, published with __keyevent@<db>__ prefix.
684
+ # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
685
+ # $ String commands
686
+ # l List commands
687
+ # s Set commands
688
+ # h Hash commands
689
+ # z Sorted set commands
690
+ # x Expired events (events generated every time a key expires)
691
+ # e Evicted events (events generated when a key is evicted for maxmemory)
692
+ # A Alias for g$lshzxe, so that the "AKE" string means all the events.
693
+ #
694
+ # The "notify-keyspace-events" takes as argument a string that is composed
695
+ # of zero or multiple characters. The empty string means that notifications
696
+ # are disabled.
697
+ #
698
+ # Example: to enable list and generic events, from the point of view of the
699
+ # event name, use:
700
+ #
701
+ # notify-keyspace-events Elg
702
+ #
703
+ # Example 2: to get the stream of the expired keys subscribing to channel
704
+ # name __keyevent@0__:expired use:
705
+ #
706
+ # notify-keyspace-events Ex
707
+ #
708
+ # By default all notifications are disabled because most users don't need
709
+ # this feature and the feature has some overhead. Note that if you don't
710
+ # specify at least one of K or E, no events will be delivered.
711
+ notify-keyspace-events ""
712
+
167
713
  ############################### ADVANCED CONFIG ###############################
168
714
 
169
- # Use object sharing. Can save a lot of memory if you have many common
170
- # string in your dataset, but performs lookups against the shared objects
171
- # pool so it uses more CPU and can be a bit slower. Usually it's a good
172
- # idea.
173
- #
174
- # When object sharing is enabled (shareobjects yes) you can use
175
- # shareobjectspoolsize to control the size of the pool used in order to try
176
- # object sharing. A bigger pool size will lead to better sharing capabilities.
177
- # In general you want this value to be at least the double of the number of
178
- # very common strings you have in your dataset.
179
- #
180
- # WARNING: object sharing is experimental, don't enable this feature
181
- # in production before of Redis 1.0-stable. Still please try this feature in
182
- # your development environment so that we can test it better.
183
- # shareobjects no
184
- # shareobjectspoolsize 1024
715
+ # Hashes are encoded using a memory efficient data structure when they have a
716
+ # small number of entries, and the biggest entry does not exceed a given
717
+ # threshold. These thresholds can be configured using the following directives.
718
+ hash-max-ziplist-entries 512
719
+ hash-max-ziplist-value 64
720
+
721
+ # Similarly to hashes, small lists are also encoded in a special way in order
722
+ # to save a lot of space. The special representation is only used when
723
+ # you are under the following limits:
724
+ list-max-ziplist-entries 512
725
+ list-max-ziplist-value 64
726
+
727
+ # Sets have a special encoding in just one case: when a set is composed
728
+ # of just strings that happen to be integers in radix 10 in the range
729
+ # of 64 bit signed integers.
730
+ # The following configuration setting sets the limit in the size of the
731
+ # set in order to use this special memory saving encoding.
732
+ set-max-intset-entries 512
733
+
734
+ # Similarly to hashes and lists, sorted sets are also specially encoded in
735
+ # order to save a lot of space. This encoding is only used when the length and
736
+ # elements of a sorted set are below the following limits:
737
+ zset-max-ziplist-entries 128
738
+ zset-max-ziplist-value 64
739
+
740
+ # HyperLogLog sparse representation bytes limit. The limit includes the
741
+ # 16 bytes header. When an HyperLogLog using the sparse representation crosses
742
+ # this limit, it is converted into the dense representation.
743
+ #
744
+ # A value greater than 16000 is totally useless, since at that point the
745
+ # dense representation is more memory efficient.
746
+ #
747
+ # The suggested value is ~ 3000 in order to have the benefits of
748
+ # the space efficient encoding without slowing down too much PFADD,
749
+ # which is O(N) with the sparse encoding. The value can be raised to
750
+ # ~ 10000 when CPU is not a concern, but space is, and the data set is
751
+ # composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
752
+ hll-sparse-max-bytes 3000
753
+
754
+ # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
755
+ # order to help rehashing the main Redis hash table (the one mapping top-level
756
+ # keys to values). The hash table implementation Redis uses (see dict.c)
757
+ # performs a lazy rehashing: the more operation you run into a hash table
758
+ # that is rehashing, the more rehashing "steps" are performed, so if the
759
+ # server is idle the rehashing is never complete and some more memory is used
760
+ # by the hash table.
761
+ #
762
+ # The default is to use this millisecond 10 times every second in order to
763
+ # actively rehash the main dictionaries, freeing memory when possible.
764
+ #
765
+ # If unsure:
766
+ # use "activerehashing no" if you have hard latency requirements and it is
767
+ # not a good thing in your environment that Redis can reply from time to time
768
+ # to queries with 2 milliseconds delay.
769
+ #
770
+ # use "activerehashing yes" if you don't have such hard requirements but
771
+ # want to free memory asap when possible.
772
+ activerehashing yes
773
+
774
+ # The client output buffer limits can be used to force disconnection of clients
775
+ # that are not reading data from the server fast enough for some reason (a
776
+ # common reason is that a Pub/Sub client can't consume messages as fast as the
777
+ # publisher can produce them).
778
+ #
779
+ # The limit can be set differently for the three different classes of clients:
780
+ #
781
+ # normal -> normal clients including MONITOR clients
782
+ # slave -> slave clients
783
+ # pubsub -> clients subscribed to at least one pubsub channel or pattern
784
+ #
785
+ # The syntax of every client-output-buffer-limit directive is the following:
786
+ #
787
+ # client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
788
+ #
789
+ # A client is immediately disconnected once the hard limit is reached, or if
790
+ # the soft limit is reached and remains reached for the specified number of
791
+ # seconds (continuously).
792
+ # So for instance if the hard limit is 32 megabytes and the soft limit is
793
+ # 16 megabytes / 10 seconds, the client will get disconnected immediately
794
+ # if the size of the output buffers reach 32 megabytes, but will also get
795
+ # disconnected if the client reaches 16 megabytes and continuously overcomes
796
+ # the limit for 10 seconds.
797
+ #
798
+ # By default normal clients are not limited because they don't receive data
799
+ # without asking (in a push way), but just after a request, so only
800
+ # asynchronous clients may create a scenario where data is requested faster
801
+ # than it can read.
802
+ #
803
+ # Instead there is a default limit for pubsub and slave clients, since
804
+ # subscribers and slaves receive data in a push fashion.
805
+ #
806
+ # Both the hard or the soft limit can be disabled by setting them to zero.
807
+ client-output-buffer-limit normal 0 0 0
808
+ client-output-buffer-limit slave 256mb 64mb 60
809
+ client-output-buffer-limit pubsub 32mb 8mb 60
810
+
811
+ # Redis calls an internal function to perform many background tasks, like
812
+ # closing connections of clients in timeout, purging expired keys that are
813
+ # never requested, and so forth.
814
+ #
815
+ # Not all tasks are performed with the same frequency, but Redis checks for
816
+ # tasks to perform according to the specified "hz" value.
817
+ #
818
+ # By default "hz" is set to 10. Raising the value will use more CPU when
819
+ # Redis is idle, but at the same time will make Redis more responsive when
820
+ # there are many keys expiring at the same time, and timeouts may be
821
+ # handled with more precision.
822
+ #
823
+ # The range is between 1 and 500, however a value over 100 is usually not
824
+ # a good idea. Most users should use the default of 10 and raise this up to
825
+ # 100 only in environments where very low latency is required.
826
+ hz 10
827
+
828
+ # When a child rewrites the AOF file, if the following option is enabled
829
+ # the file will be fsync-ed every 32 MB of data generated. This is useful
830
+ # in order to commit the file to the disk more incrementally and avoid
831
+ # big latency spikes.
832
+ aof-rewrite-incremental-fsync yes