redis-store 1.1.3 → 1.1.4

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of redis-store might be problematic. Click here for more details.

@@ -1,417 +1,46 @@
1
- # Redis configuration file example
2
-
3
- # Note on units: when memory size is needed, it is possible to specifiy
4
- # it in the usual form of 1k 5GB 4M and so forth:
5
- #
6
- # 1k => 1000 bytes
7
- # 1kb => 1024 bytes
8
- # 1m => 1000000 bytes
9
- # 1mb => 1024*1024 bytes
10
- # 1g => 1000000000 bytes
11
- # 1gb => 1024*1024*1024 bytes
12
- #
13
- # units are case insensitive so 1GB 1Gb 1gB are all the same.
14
-
15
- # By default Redis does not run as a daemon. Use 'yes' if you need it.
16
- # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
17
1
  daemonize yes
18
-
19
- # When running daemonized, Redis writes a pid file in /var/run/redis.pid by
20
- # default. You can specify a custom pid file location here.
21
2
  pidfile ./tmp/pids/node-two.pid
22
-
23
- # Accept connections on the specified port, default is 6379.
24
- # If port 0 is specified Redis will not listen on a TCP socket.
25
3
  port 6381
26
-
27
- # If you want you can bind a single interface, if the bind option is not
28
- # specified all the interfaces will listen for incoming connections.
29
- #
30
- # bind 127.0.0.1
31
-
32
- # Specify the path for the unix socket that will be used to listen for
33
- # incoming connections. There is no default, so Redis will not listen
34
- # on a unix socket when not specified.
35
- #
36
- # unixsocket /tmp/redis.sock
37
-
38
- # Close the connection after a client is idle for N seconds (0 to disable)
39
- timeout 300
40
-
41
- # Set server verbosity to 'debug'
42
- # it can be one of:
43
- # debug (a lot of information, useful for development/testing)
44
- # verbose (many rarely useful info, but not a mess like the debug level)
45
- # notice (moderately verbose, what you want in production probably)
46
- # warning (only very important / critical messages are logged)
4
+ timeout 0
47
5
  loglevel verbose
48
-
49
- # Specify the log file name. Also 'stdout' can be used to force
50
- # Redis to log on the standard output. Note that if you use standard
51
- # output for logging but daemonize, logs will be sent to /dev/null
52
6
  logfile stdout
53
-
54
- # To enable logging to the system logger, just set 'syslog-enabled' to yes,
55
- # and optionally update the other syslog parameters to suit your needs.
56
- # syslog-enabled no
57
-
58
- # Specify the syslog identity.
59
- # syslog-ident redis
60
-
61
- # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
62
- # syslog-facility local0
63
-
64
- # Set the number of databases. The default database is DB 0, you can select
65
- # a different one on a per-connection basis using SELECT <dbid> where
66
- # dbid is a number between 0 and 'databases'-1
67
7
  databases 16
68
8
 
69
- ################################ SNAPSHOTTING #################################
70
- #
71
- # Save the DB on disk:
72
- #
73
- # save <seconds> <changes>
74
- #
75
- # Will save the DB if both the given number of seconds and the given
76
- # number of write operations against the DB occurred.
77
- #
78
- # In the example below the behaviour will be to save:
79
- # after 900 sec (15 min) if at least 1 key changed
80
- # after 300 sec (5 min) if at least 10 keys changed
81
- # after 60 sec if at least 10000 keys changed
82
- #
83
- # Note: you can disable saving at all commenting all the "save" lines.
84
-
85
9
  save 900 1
86
10
  save 300 10
87
11
  save 60 10000
88
12
 
89
- # Compress string objects using LZF when dump .rdb databases?
90
- # For default that's set to 'yes' as it's almost always a win.
91
- # If you want to save some CPU in the saving child set it to 'no' but
92
- # the dataset will likely be bigger if you have compressible values or keys.
13
+ # stop-writes-on-bgsave-error yes
93
14
  rdbcompression yes
94
-
95
- # The filename where to dump the DB
15
+ # rdbchecksum yes
96
16
  dbfilename tmp/node-two-dump.rdb
97
-
98
- # The working directory.
99
- #
100
- # The DB will be written inside this directory, with the filename specified
101
- # above using the 'dbfilename' configuration directive.
102
- #
103
- # Also the Append Only File will be created inside this directory.
104
- #
105
- # Note that you must specify a directory here, not a file name.
106
17
  dir ./
107
18
 
108
- ################################# REPLICATION #################################
109
-
110
- # Master-Slave replication. Use slaveof to make a Redis instance a copy of
111
- # another Redis server. Note that the configuration is local to the slave
112
- # so for example it is possible to configure the slave to save the DB with a
113
- # different interval, or to listen to another port, and so on.
114
- #
115
- # slaveof 127.0.0.1 6380
116
-
117
- # If the master is password protected (using the "requirepass" configuration
118
- # directive below) it is possible to tell the slave to authenticate before
119
- # starting the replication synchronization process, otherwise the master will
120
- # refuse the slave request.
121
- #
122
- # masterauth <master-password>
123
-
124
- # When a slave lost the connection with the master, or when the replication
125
- # is still in progress, the slave can act in two different ways:
126
- #
127
- # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
128
- # still reply to client requests, possibly with out of data data, or the
129
- # data set may just be empty if this is the first synchronization.
130
- #
131
- # 2) if slave-serve-stale data is set to 'no' the slave will reply with
132
- # an error "SYNC with master in progress" to all the kind of commands
133
- # but to INFO and SLAVEOF.
134
- #
135
19
  slave-serve-stale-data yes
136
-
137
- ################################## SECURITY ###################################
138
-
139
- # Require clients to issue AUTH <PASSWORD> before processing any other
140
- # commands. This might be useful in environments in which you do not trust
141
- # others with access to the host running redis-server.
142
- #
143
- # This should stay commented out for backward compatibility and because most
144
- # people do not need auth (e.g. they run their own servers).
145
- #
146
- # Warning: since Redis is pretty fast an outside user can try up to
147
- # 150k passwords per second against a good box. This means that you should
148
- # use a very strong password otherwise it will be very easy to break.
149
- #
150
- # requirepass foobared
151
-
152
- # Command renaming.
153
- #
154
- # It is possilbe to change the name of dangerous commands in a shared
155
- # environment. For instance the CONFIG command may be renamed into something
156
- # of hard to guess so that it will be still available for internal-use
157
- # tools but not available for general clients.
158
- #
159
- # Example:
160
- #
161
- # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
162
- #
163
- # It is also possilbe to completely kill a command renaming it into
164
- # an empty string:
165
- #
166
- # rename-command CONFIG ""
167
-
168
- ################################### LIMITS ####################################
169
-
170
- # Set the max number of connected clients at the same time. By default there
171
- # is no limit, and it's up to the number of file descriptors the Redis process
172
- # is able to open. The special value '0' means no limits.
173
- # Once the limit is reached Redis will close all the new connections sending
174
- # an error 'max number of clients reached'.
175
- #
176
- # maxclients 128
177
-
178
- # Don't use more memory than the specified amount of bytes.
179
- # When the memory limit is reached Redis will try to remove keys with an
180
- # EXPIRE set. It will try to start freeing keys that are going to expire
181
- # in little time and preserve keys with a longer time to live.
182
- # Redis will also try to remove objects from free lists if possible.
183
- #
184
- # If all this fails, Redis will start to reply with errors to commands
185
- # that will use more memory, like SET, LPUSH, and so on, and will continue
186
- # to reply to most read-only commands like GET.
187
- #
188
- # WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
189
- # 'state' server or cache, not as a real DB. When Redis is used as a real
190
- # database the memory usage will grow over the weeks, it will be obvious if
191
- # it is going to use too much memory in the long run, and you'll have the time
192
- # to upgrade. With maxmemory after the limit is reached you'll start to get
193
- # errors for write operations, and this may even lead to DB inconsistency.
194
- #
195
- # maxmemory <bytes>
196
-
197
- # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
198
- # is reached? You can select among five behavior:
199
- #
200
- # volatile-lru -> remove the key with an expire set using an LRU algorithm
201
- # allkeys-lru -> remove any key accordingly to the LRU algorithm
202
- # volatile-random -> remove a random key with an expire set
203
- # allkeys->random -> remove a random key, any key
204
- # volatile-ttl -> remove the key with the nearest expire time (minor TTL)
205
- # noeviction -> don't expire at all, just return an error on write operations
206
- #
207
- # Note: with all the kind of policies, Redis will return an error on write
208
- # operations, when there are not suitable keys for eviction.
209
- #
210
- # At the date of writing this commands are: set setnx setex append
211
- # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
212
- # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
213
- # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
214
- # getset mset msetnx exec sort
215
- #
216
- # The default is:
217
- #
218
- # maxmemory-policy volatile-lru
219
-
220
- # LRU and minimal TTL algorithms are not precise algorithms but approximated
221
- # algorithms (in order to save memory), so you can select as well the sample
222
- # size to check. For instance for default Redis will check three keys and
223
- # pick the one that was used less recently, you can change the sample size
224
- # using the following configuration directive.
225
- #
226
- # maxmemory-samples 3
227
-
228
- ############################## APPEND ONLY MODE ###############################
229
-
230
- # By default Redis asynchronously dumps the dataset on disk. If you can live
231
- # with the idea that the latest records will be lost if something like a crash
232
- # happens this is the preferred way to run Redis. If instead you care a lot
233
- # about your data and don't want to that a single record can get lost you should
234
- # enable the append only mode: when this mode is enabled Redis will append
235
- # every write operation received in the file appendonly.aof. This file will
236
- # be read on startup in order to rebuild the full dataset in memory.
237
- #
238
- # Note that you can have both the async dumps and the append only file if you
239
- # like (you have to comment the "save" statements above to disable the dumps).
240
- # Still if append only mode is enabled Redis will load the data from the
241
- # log file at startup ignoring the dump.rdb file.
242
- #
243
- # IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
244
- # log file in background when it gets too big.
20
+ # slave-read-only yes
21
+ # slave-priority 100
245
22
 
246
23
  appendonly no
247
-
248
- # The name of the append only file (default: "appendonly.aof")
249
- # appendfilename appendonly.aof
250
-
251
- # The fsync() call tells the Operating System to actually write data on disk
252
- # instead to wait for more data in the output buffer. Some OS will really flush
253
- # data on disk, some other OS will just try to do it ASAP.
254
- #
255
- # Redis supports three different modes:
256
- #
257
- # no: don't fsync, just let the OS flush the data when it wants. Faster.
258
- # always: fsync after every write to the append only log . Slow, Safest.
259
- # everysec: fsync only if one second passed since the last fsync. Compromise.
260
- #
261
- # The default is "everysec" that's usually the right compromise between
262
- # speed and data safety. It's up to you to understand if you can relax this to
263
- # "no" that will will let the operating system flush the output buffer when
264
- # it wants, for better performances (but if you can live with the idea of
265
- # some data loss consider the default persistence mode that's snapshotting),
266
- # or on the contrary, use "always" that's very slow but a bit safer than
267
- # everysec.
268
- #
269
- # If unsure, use "everysec".
270
-
271
- # appendfsync always
272
24
  appendfsync everysec
273
- # appendfsync no
274
-
275
- # When the AOF fsync policy is set to always or everysec, and a background
276
- # saving process (a background save or AOF log background rewriting) is
277
- # performing a lot of I/O against the disk, in some Linux configurations
278
- # Redis may block too long on the fsync() call. Note that there is no fix for
279
- # this currently, as even performing fsync in a different thread will block
280
- # our synchronous write(2) call.
281
- #
282
- # In order to mitigate this problem it's possible to use the following option
283
- # that will prevent fsync() from being called in the main process while a
284
- # BGSAVE or BGREWRITEAOF is in progress.
285
- #
286
- # This means that while another child is saving the durability of Redis is
287
- # the same as "appendfsync none", that in pratical terms means that it is
288
- # possible to lost up to 30 seconds of log in the worst scenario (with the
289
- # default Linux settings).
290
- #
291
- # If you have latency problems turn this to "yes". Otherwise leave it as
292
- # "no" that is the safest pick from the point of view of durability.
293
25
  no-appendfsync-on-rewrite no
26
+ # auto-aof-rewrite-percentage 100
27
+ # auto-aof-rewrite-min-size 64mb
294
28
 
295
- ################################ VIRTUAL MEMORY ###############################
29
+ # lua-time-limit 5000
296
30
 
297
- # Virtual Memory allows Redis to work with datasets bigger than the actual
298
- # amount of RAM needed to hold the whole dataset in memory.
299
- # In order to do so very used keys are taken in memory while the other keys
300
- # are swapped into a swap file, similarly to what operating systems do
301
- # with memory pages.
302
- #
303
- # To enable VM just set 'vm-enabled' to yes, and set the following three
304
- # VM parameters accordingly to your needs.
31
+ # slowlog-log-slower-than 10000
32
+ # slowlog-max-len 128
305
33
 
306
- vm-enabled no
307
- # vm-enabled yes
308
-
309
- # This is the path of the Redis swap file. As you can guess, swap files
310
- # can't be shared by different Redis instances, so make sure to use a swap
311
- # file for every redis process you are running. Redis will complain if the
312
- # swap file is already in use.
313
- #
314
- # The best kind of storage for the Redis swap file (that's accessed at random)
315
- # is a Solid State Disk (SSD).
316
- #
317
- # *** WARNING *** if you are using a shared hosting the default of putting
318
- # the swap file under /tmp is not secure. Create a dir with access granted
319
- # only to Redis user and configure Redis to create the swap file there.
320
- vm-swap-file /tmp/redis.swap
321
-
322
- # vm-max-memory configures the VM to use at max the specified amount of
323
- # RAM. Everything that deos not fit will be swapped on disk *if* possible, that
324
- # is, if there is still enough contiguous space in the swap file.
325
- #
326
- # With vm-max-memory 0 the system will swap everything it can. Not a good
327
- # default, just specify the max amount of RAM you can in bytes, but it's
328
- # better to leave some margin. For instance specify an amount of RAM
329
- # that's more or less between 60 and 80% of your free RAM.
330
- vm-max-memory 0
331
-
332
- # Redis swap files is split into pages. An object can be saved using multiple
333
- # contiguous pages, but pages can't be shared between different objects.
334
- # So if your page is too big, small objects swapped out on disk will waste
335
- # a lot of space. If you page is too small, there is less space in the swap
336
- # file (assuming you configured the same number of total swap file pages).
337
- #
338
- # If you use a lot of small objects, use a page size of 64 or 32 bytes.
339
- # If you use a lot of big objects, use a bigger page size.
340
- # If unsure, use the default :)
341
- vm-page-size 32
342
-
343
- # Number of total memory pages in the swap file.
344
- # Given that the page table (a bitmap of free/used pages) is taken in memory,
345
- # every 8 pages on disk will consume 1 byte of RAM.
346
- #
347
- # The total swap size is vm-page-size * vm-pages
348
- #
349
- # With the default of 32-bytes memory pages and 134217728 pages Redis will
350
- # use a 4 GB swap file, that will use 16 MB of RAM for the page table.
351
- #
352
- # It's better to use the smallest acceptable value for your application,
353
- # but the default is large in order to work in most conditions.
354
- vm-pages 134217728
355
-
356
- # Max number of VM I/O threads running at the same time.
357
- # This threads are used to read/write data from/to swap file, since they
358
- # also encode and decode objects from disk to memory or the reverse, a bigger
359
- # number of threads can help with big objects even if they can't help with
360
- # I/O itself as the physical device may not be able to couple with many
361
- # reads/writes operations at the same time.
362
- #
363
- # The special value of 0 turn off threaded I/O and enables the blocking
364
- # Virtual Memory implementation.
365
- vm-max-threads 4
366
-
367
- ############################### ADVANCED CONFIG ###############################
368
-
369
- # Hashes are encoded in a special way (much more memory efficient) when they
370
- # have at max a given numer of elements, and the biggest element does not
371
- # exceed a given threshold. You can configure this limits with the following
372
- # configuration directives.
373
- hash-max-zipmap-entries 512
374
- hash-max-zipmap-value 64
375
-
376
- # Similarly to hashes, small lists are also encoded in a special way in order
377
- # to save a lot of space. The special representation is only used when
378
- # you are under the following limits:
34
+ # hash-max-ziplist-entries 512
35
+ # hash-max-ziplist-value 64
379
36
  list-max-ziplist-entries 512
380
37
  list-max-ziplist-value 64
381
-
382
- # Sets have a special encoding in just one case: when a set is composed
383
- # of just strings that happens to be integers in radix 10 in the range
384
- # of 64 bit signed integers.
385
- # The following configuration setting sets the limit in the size of the
386
- # set in order to use this special memory saving encoding.
387
38
  set-max-intset-entries 512
39
+ # zset-max-ziplist-entries 128
40
+ # zset-max-ziplist-value 64
388
41
 
389
- # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
390
- # order to help rehashing the main Redis hash table (the one mapping top-level
391
- # keys to values). The hash table implementation redis uses (see dict.c)
392
- # performs a lazy rehashing: the more operation you run into an hash table
393
- # that is rhashing, the more rehashing "steps" are performed, so if the
394
- # server is idle the rehashing is never complete and some more memory is used
395
- # by the hash table.
396
- #
397
- # The default is to use this millisecond 10 times every second in order to
398
- # active rehashing the main dictionaries, freeing memory when possible.
399
- #
400
- # If unsure:
401
- # use "activerehashing no" if you have hard latency requirements and it is
402
- # not a good thing in your environment that Redis can reply form time to time
403
- # to queries with 2 milliseconds delay.
404
- #
405
- # use "activerehashing yes" if you don't have such hard requirements but
406
- # want to free memory asap when possible.
407
42
  activerehashing yes
408
43
 
409
- ################################## INCLUDES ###################################
410
-
411
- # Include one or more other config files here. This is useful if you
412
- # have a standard template that goes to all redis server but also need
413
- # to customize a few per-server settings. Include files can include
414
- # other files, so use this wisely.
415
- #
416
- # include /path/to/local.conf
417
- # include /path/to/other.conf
44
+ # client-output-buffer-limit normal 0 0 0
45
+ # client-output-buffer-limit slave 256mb 64mb 60
46
+ # client-output-buffer-limit pubsub 32mb 8mb 60
@@ -1,418 +1,46 @@
1
- # Redis configuration file example
2
-
3
- # Note on units: when memory size is needed, it is possible to specifiy
4
- # it in the usual form of 1k 5GB 4M and so forth:
5
- #
6
- # 1k => 1000 bytes
7
- # 1kb => 1024 bytes
8
- # 1m => 1000000 bytes
9
- # 1mb => 1024*1024 bytes
10
- # 1g => 1000000000 bytes
11
- # 1gb => 1024*1024*1024 bytes
12
- #
13
- # units are case insensitive so 1GB 1Gb 1gB are all the same.
14
-
15
- # By default Redis does not run as a daemon. Use 'yes' if you need it.
16
- # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
17
1
  daemonize yes
18
-
19
- # When running daemonized, Redis writes a pid file in /var/run/redis.pid by
20
- # default. You can specify a custom pid file location here.
21
- # pidfile /var/run/redis.pid
22
2
  pidfile ./tmp/pids/redis.pid
23
-
24
- # Accept connections on the specified port, default is 6379.
25
- # If port 0 is specified Redis will not listen on a TCP socket.
26
3
  port 6379
27
-
28
- # If you want you can bind a single interface, if the bind option is not
29
- # specified all the interfaces will listen for incoming connections.
30
- #
31
- # bind 127.0.0.1
32
-
33
- # Specify the path for the unix socket that will be used to listen for
34
- # incoming connections. There is no default, so Redis will not listen
35
- # on a unix socket when not specified.
36
- #
37
- # unixsocket /tmp/redis.sock
38
-
39
- # Close the connection after a client is idle for N seconds (0 to disable)
40
- timeout 300
41
-
42
- # Set server verbosity to 'debug'
43
- # it can be one of:
44
- # debug (a lot of information, useful for development/testing)
45
- # verbose (many rarely useful info, but not a mess like the debug level)
46
- # notice (moderately verbose, what you want in production probably)
47
- # warning (only very important / critical messages are logged)
4
+ timeout 0
48
5
  loglevel verbose
49
-
50
- # Specify the log file name. Also 'stdout' can be used to force
51
- # Redis to log on the standard output. Note that if you use standard
52
- # output for logging but daemonize, logs will be sent to /dev/null
53
6
  logfile stdout
54
-
55
- # To enable logging to the system logger, just set 'syslog-enabled' to yes,
56
- # and optionally update the other syslog parameters to suit your needs.
57
- # syslog-enabled no
58
-
59
- # Specify the syslog identity.
60
- # syslog-ident redis
61
-
62
- # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
63
- # syslog-facility local0
64
-
65
- # Set the number of databases. The default database is DB 0, you can select
66
- # a different one on a per-connection basis using SELECT <dbid> where
67
- # dbid is a number between 0 and 'databases'-1
68
7
  databases 16
69
8
 
70
- ################################ SNAPSHOTTING #################################
71
- #
72
- # Save the DB on disk:
73
- #
74
- # save <seconds> <changes>
75
- #
76
- # Will save the DB if both the given number of seconds and the given
77
- # number of write operations against the DB occurred.
78
- #
79
- # In the example below the behaviour will be to save:
80
- # after 900 sec (15 min) if at least 1 key changed
81
- # after 300 sec (5 min) if at least 10 keys changed
82
- # after 60 sec if at least 10000 keys changed
83
- #
84
- # Note: you can disable saving at all commenting all the "save" lines.
85
-
86
9
  save 900 1
87
10
  save 300 10
88
11
  save 60 10000
89
12
 
90
- # Compress string objects using LZF when dump .rdb databases?
91
- # For default that's set to 'yes' as it's almost always a win.
92
- # If you want to save some CPU in the saving child set it to 'no' but
93
- # the dataset will likely be bigger if you have compressible values or keys.
13
+ # stop-writes-on-bgsave-error yes
94
14
  rdbcompression yes
95
-
96
- # The filename where to dump the DB
15
+ # rdbchecksum yes
97
16
  dbfilename tmp/dump.rdb
98
-
99
- # The working directory.
100
- #
101
- # The DB will be written inside this directory, with the filename specified
102
- # above using the 'dbfilename' configuration directive.
103
- #
104
- # Also the Append Only File will be created inside this directory.
105
- #
106
- # Note that you must specify a directory here, not a file name.
107
17
  dir ./
108
18
 
109
- ################################# REPLICATION #################################
110
-
111
- # Master-Slave replication. Use slaveof to make a Redis instance a copy of
112
- # another Redis server. Note that the configuration is local to the slave
113
- # so for example it is possible to configure the slave to save the DB with a
114
- # different interval, or to listen to another port, and so on.
115
- #
116
- # slaveof <masterip> <masterport>
117
-
118
- # If the master is password protected (using the "requirepass" configuration
119
- # directive below) it is possible to tell the slave to authenticate before
120
- # starting the replication synchronization process, otherwise the master will
121
- # refuse the slave request.
122
- #
123
- # masterauth <master-password>
124
-
125
- # When a slave lost the connection with the master, or when the replication
126
- # is still in progress, the slave can act in two different ways:
127
- #
128
- # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
129
- # still reply to client requests, possibly with out of data data, or the
130
- # data set may just be empty if this is the first synchronization.
131
- #
132
- # 2) if slave-serve-stale data is set to 'no' the slave will reply with
133
- # an error "SYNC with master in progress" to all the kind of commands
134
- # but to INFO and SLAVEOF.
135
- #
136
19
  slave-serve-stale-data yes
137
-
138
- ################################## SECURITY ###################################
139
-
140
- # Require clients to issue AUTH <PASSWORD> before processing any other
141
- # commands. This might be useful in environments in which you do not trust
142
- # others with access to the host running redis-server.
143
- #
144
- # This should stay commented out for backward compatibility and because most
145
- # people do not need auth (e.g. they run their own servers).
146
- #
147
- # Warning: since Redis is pretty fast an outside user can try up to
148
- # 150k passwords per second against a good box. This means that you should
149
- # use a very strong password otherwise it will be very easy to break.
150
- #
151
- # requirepass foobared
152
-
153
- # Command renaming.
154
- #
155
- # It is possilbe to change the name of dangerous commands in a shared
156
- # environment. For instance the CONFIG command may be renamed into something
157
- # of hard to guess so that it will be still available for internal-use
158
- # tools but not available for general clients.
159
- #
160
- # Example:
161
- #
162
- # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
163
- #
164
- # It is also possilbe to completely kill a command renaming it into
165
- # an empty string:
166
- #
167
- # rename-command CONFIG ""
168
-
169
- ################################### LIMITS ####################################
170
-
171
- # Set the max number of connected clients at the same time. By default there
172
- # is no limit, and it's up to the number of file descriptors the Redis process
173
- # is able to open. The special value '0' means no limits.
174
- # Once the limit is reached Redis will close all the new connections sending
175
- # an error 'max number of clients reached'.
176
- #
177
- # maxclients 128
178
-
179
- # Don't use more memory than the specified amount of bytes.
180
- # When the memory limit is reached Redis will try to remove keys with an
181
- # EXPIRE set. It will try to start freeing keys that are going to expire
182
- # in little time and preserve keys with a longer time to live.
183
- # Redis will also try to remove objects from free lists if possible.
184
- #
185
- # If all this fails, Redis will start to reply with errors to commands
186
- # that will use more memory, like SET, LPUSH, and so on, and will continue
187
- # to reply to most read-only commands like GET.
188
- #
189
- # WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
190
- # 'state' server or cache, not as a real DB. When Redis is used as a real
191
- # database the memory usage will grow over the weeks, it will be obvious if
192
- # it is going to use too much memory in the long run, and you'll have the time
193
- # to upgrade. With maxmemory after the limit is reached you'll start to get
194
- # errors for write operations, and this may even lead to DB inconsistency.
195
- #
196
- # maxmemory <bytes>
197
-
198
- # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
199
- # is reached? You can select among five behavior:
200
- #
201
- # volatile-lru -> remove the key with an expire set using an LRU algorithm
202
- # allkeys-lru -> remove any key accordingly to the LRU algorithm
203
- # volatile-random -> remove a random key with an expire set
204
- # allkeys->random -> remove a random key, any key
205
- # volatile-ttl -> remove the key with the nearest expire time (minor TTL)
206
- # noeviction -> don't expire at all, just return an error on write operations
207
- #
208
- # Note: with all the kind of policies, Redis will return an error on write
209
- # operations, when there are not suitable keys for eviction.
210
- #
211
- # At the date of writing this commands are: set setnx setex append
212
- # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
213
- # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
214
- # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
215
- # getset mset msetnx exec sort
216
- #
217
- # The default is:
218
- #
219
- # maxmemory-policy volatile-lru
220
-
221
- # LRU and minimal TTL algorithms are not precise algorithms but approximated
222
- # algorithms (in order to save memory), so you can select as well the sample
223
- # size to check. For instance for default Redis will check three keys and
224
- # pick the one that was used less recently, you can change the sample size
225
- # using the following configuration directive.
226
- #
227
- # maxmemory-samples 3
228
-
229
- ############################## APPEND ONLY MODE ###############################
230
-
231
- # By default Redis asynchronously dumps the dataset on disk. If you can live
232
- # with the idea that the latest records will be lost if something like a crash
233
- # happens this is the preferred way to run Redis. If instead you care a lot
234
- # about your data and don't want to that a single record can get lost you should
235
- # enable the append only mode: when this mode is enabled Redis will append
236
- # every write operation received in the file appendonly.aof. This file will
237
- # be read on startup in order to rebuild the full dataset in memory.
238
- #
239
- # Note that you can have both the async dumps and the append only file if you
240
- # like (you have to comment the "save" statements above to disable the dumps).
241
- # Still if append only mode is enabled Redis will load the data from the
242
- # log file at startup ignoring the dump.rdb file.
243
- #
244
- # IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
245
- # log file in background when it gets too big.
20
+ # slave-read-only yes
21
+ # slave-priority 100
246
22
 
247
23
  appendonly no
248
-
249
- # The name of the append only file (default: "appendonly.aof")
250
- # appendfilename appendonly.aof
251
-
252
- # The fsync() call tells the Operating System to actually write data on disk
253
- # instead to wait for more data in the output buffer. Some OS will really flush
254
- # data on disk, some other OS will just try to do it ASAP.
255
- #
256
- # Redis supports three different modes:
257
- #
258
- # no: don't fsync, just let the OS flush the data when it wants. Faster.
259
- # always: fsync after every write to the append only log . Slow, Safest.
260
- # everysec: fsync only if one second passed since the last fsync. Compromise.
261
- #
262
- # The default is "everysec" that's usually the right compromise between
263
- # speed and data safety. It's up to you to understand if you can relax this to
264
- # "no" that will will let the operating system flush the output buffer when
265
- # it wants, for better performances (but if you can live with the idea of
266
- # some data loss consider the default persistence mode that's snapshotting),
267
- # or on the contrary, use "always" that's very slow but a bit safer than
268
- # everysec.
269
- #
270
- # If unsure, use "everysec".
271
-
272
- # appendfsync always
273
24
  appendfsync everysec
274
- # appendfsync no
275
-
276
- # When the AOF fsync policy is set to always or everysec, and a background
277
- # saving process (a background save or AOF log background rewriting) is
278
- # performing a lot of I/O against the disk, in some Linux configurations
279
- # Redis may block too long on the fsync() call. Note that there is no fix for
280
- # this currently, as even performing fsync in a different thread will block
281
- # our synchronous write(2) call.
282
- #
283
- # In order to mitigate this problem it's possible to use the following option
284
- # that will prevent fsync() from being called in the main process while a
285
- # BGSAVE or BGREWRITEAOF is in progress.
286
- #
287
- # This means that while another child is saving the durability of Redis is
288
- # the same as "appendfsync none", that in pratical terms means that it is
289
- # possible to lost up to 30 seconds of log in the worst scenario (with the
290
- # default Linux settings).
291
- #
292
- # If you have latency problems turn this to "yes". Otherwise leave it as
293
- # "no" that is the safest pick from the point of view of durability.
294
25
  no-appendfsync-on-rewrite no
26
+ # auto-aof-rewrite-percentage 100
27
+ # auto-aof-rewrite-min-size 64mb
295
28
 
296
- ################################ VIRTUAL MEMORY ###############################
29
+ # lua-time-limit 5000
297
30
 
298
- # Virtual Memory allows Redis to work with datasets bigger than the actual
299
- # amount of RAM needed to hold the whole dataset in memory.
300
- # In order to do so very used keys are taken in memory while the other keys
301
- # are swapped into a swap file, similarly to what operating systems do
302
- # with memory pages.
303
- #
304
- # To enable VM just set 'vm-enabled' to yes, and set the following three
305
- # VM parameters accordingly to your needs.
31
+ # slowlog-log-slower-than 10000
32
+ # slowlog-max-len 128
306
33
 
307
- vm-enabled no
308
- # vm-enabled yes
309
-
310
- # This is the path of the Redis swap file. As you can guess, swap files
311
- # can't be shared by different Redis instances, so make sure to use a swap
312
- # file for every redis process you are running. Redis will complain if the
313
- # swap file is already in use.
314
- #
315
- # The best kind of storage for the Redis swap file (that's accessed at random)
316
- # is a Solid State Disk (SSD).
317
- #
318
- # *** WARNING *** if you are using a shared hosting the default of putting
319
- # the swap file under /tmp is not secure. Create a dir with access granted
320
- # only to Redis user and configure Redis to create the swap file there.
321
- vm-swap-file /tmp/redis.swap
322
-
323
- # vm-max-memory configures the VM to use at max the specified amount of
324
- # RAM. Everything that deos not fit will be swapped on disk *if* possible, that
325
- # is, if there is still enough contiguous space in the swap file.
326
- #
327
- # With vm-max-memory 0 the system will swap everything it can. Not a good
328
- # default, just specify the max amount of RAM you can in bytes, but it's
329
- # better to leave some margin. For instance specify an amount of RAM
330
- # that's more or less between 60 and 80% of your free RAM.
331
- vm-max-memory 0
332
-
333
- # Redis swap files is split into pages. An object can be saved using multiple
334
- # contiguous pages, but pages can't be shared between different objects.
335
- # So if your page is too big, small objects swapped out on disk will waste
336
- # a lot of space. If you page is too small, there is less space in the swap
337
- # file (assuming you configured the same number of total swap file pages).
338
- #
339
- # If you use a lot of small objects, use a page size of 64 or 32 bytes.
340
- # If you use a lot of big objects, use a bigger page size.
341
- # If unsure, use the default :)
342
- vm-page-size 32
343
-
344
- # Number of total memory pages in the swap file.
345
- # Given that the page table (a bitmap of free/used pages) is taken in memory,
346
- # every 8 pages on disk will consume 1 byte of RAM.
347
- #
348
- # The total swap size is vm-page-size * vm-pages
349
- #
350
- # With the default of 32-bytes memory pages and 134217728 pages Redis will
351
- # use a 4 GB swap file, that will use 16 MB of RAM for the page table.
352
- #
353
- # It's better to use the smallest acceptable value for your application,
354
- # but the default is large in order to work in most conditions.
355
- vm-pages 134217728
356
-
357
- # Max number of VM I/O threads running at the same time.
358
- # This threads are used to read/write data from/to swap file, since they
359
- # also encode and decode objects from disk to memory or the reverse, a bigger
360
- # number of threads can help with big objects even if they can't help with
361
- # I/O itself as the physical device may not be able to couple with many
362
- # reads/writes operations at the same time.
363
- #
364
- # The special value of 0 turn off threaded I/O and enables the blocking
365
- # Virtual Memory implementation.
366
- vm-max-threads 4
367
-
368
- ############################### ADVANCED CONFIG ###############################
369
-
370
- # Hashes are encoded in a special way (much more memory efficient) when they
371
- # have at max a given numer of elements, and the biggest element does not
372
- # exceed a given threshold. You can configure this limits with the following
373
- # configuration directives.
374
- hash-max-zipmap-entries 512
375
- hash-max-zipmap-value 64
376
-
377
- # Similarly to hashes, small lists are also encoded in a special way in order
378
- # to save a lot of space. The special representation is only used when
379
- # you are under the following limits:
34
+ # hash-max-ziplist-entries 512
35
+ # hash-max-ziplist-value 64
380
36
  list-max-ziplist-entries 512
381
37
  list-max-ziplist-value 64
382
-
383
- # Sets have a special encoding in just one case: when a set is composed
384
- # of just strings that happens to be integers in radix 10 in the range
385
- # of 64 bit signed integers.
386
- # The following configuration setting sets the limit in the size of the
387
- # set in order to use this special memory saving encoding.
388
38
  set-max-intset-entries 512
39
+ # zset-max-ziplist-entries 128
40
+ # zset-max-ziplist-value 64
389
41
 
390
- # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
391
- # order to help rehashing the main Redis hash table (the one mapping top-level
392
- # keys to values). The hash table implementation redis uses (see dict.c)
393
- # performs a lazy rehashing: the more operation you run into an hash table
394
- # that is rhashing, the more rehashing "steps" are performed, so if the
395
- # server is idle the rehashing is never complete and some more memory is used
396
- # by the hash table.
397
- #
398
- # The default is to use this millisecond 10 times every second in order to
399
- # active rehashing the main dictionaries, freeing memory when possible.
400
- #
401
- # If unsure:
402
- # use "activerehashing no" if you have hard latency requirements and it is
403
- # not a good thing in your environment that Redis can reply form time to time
404
- # to queries with 2 milliseconds delay.
405
- #
406
- # use "activerehashing yes" if you don't have such hard requirements but
407
- # want to free memory asap when possible.
408
42
  activerehashing yes
409
43
 
410
- ################################## INCLUDES ###################################
411
-
412
- # Include one or more other config files here. This is useful if you
413
- # have a standard template that goes to all redis server but also need
414
- # to customize a few per-server settings. Include files can include
415
- # other files, so use this wisely.
416
- #
417
- # include /path/to/local.conf
418
- # include /path/to/other.conf
44
+ # client-output-buffer-limit normal 0 0 0
45
+ # client-output-buffer-limit slave 256mb 64mb 60
46
+ # client-output-buffer-limit pubsub 32mb 8mb 60