capistrano3-ubuntu-server-prepare 0.0.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,22 @@
1
+ description "nginx http daemon"
2
+ author "George Shammas <georgyo@gmail.com>"
3
+
4
+ start on (filesystem and net-device-up IFACE=lo)
5
+ stop on runlevel [!2345]
6
+
7
+ env DAEMON=/usr/local/nginx/sbin/nginx
8
+ env PID=/var/www/run/nginx.pid
9
+
10
+ expect fork
11
+ respawn
12
+ respawn limit 10 5
13
+ #oom never
14
+
15
+ pre-start script
16
+ $DAEMON -t
17
+ if [ $? -ne 0 ]
18
+ then exit $?
19
+ fi
20
+ end script
21
+
22
+ exec $DAEMON
@@ -0,0 +1,827 @@
1
+ # Redis configuration file example
2
+
3
+ # Note on units: when memory size is needed, it is possible to specify
4
+ # it in the usual form of 1k 5GB 4M and so forth:
5
+ #
6
+ # 1k => 1000 bytes
7
+ # 1kb => 1024 bytes
8
+ # 1m => 1000000 bytes
9
+ # 1mb => 1024*1024 bytes
10
+ # 1g => 1000000000 bytes
11
+ # 1gb => 1024*1024*1024 bytes
12
+ #
13
+ # units are case insensitive so 1GB 1Gb 1gB are all the same.
14
+
15
+ ################################## INCLUDES ###################################
16
+
17
+ # Include one or more other config files here. This is useful if you
18
+ # have a standard template that goes to all Redis servers but also need
19
+ # to customize a few per-server settings. Include files can include
20
+ # other files, so use this wisely.
21
+ #
22
+ # Notice option "include" won't be rewritten by command "CONFIG REWRITE"
23
+ # from admin or Redis Sentinel. Since Redis always uses the last processed
24
+ # line as value of a configuration directive, you'd better put includes
25
+ # at the beginning of this file to avoid overwriting config change at runtime.
26
+ #
27
+ # If instead you are interested in using includes to override configuration
28
+ # options, it is better to use include as the last line.
29
+ #
30
+ # include /path/to/local.conf
31
+ # include /path/to/other.conf
32
+
33
+ ################################ GENERAL #####################################
34
+
35
+ # By default Redis does not run as a daemon. Use 'yes' if you need it.
36
+ # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
37
+ daemonize no
38
+
39
+ # When running daemonized, Redis writes a pid file in /var/run/redis.pid by
40
+ # default. You can specify a custom pid file location here.
41
+ pidfile /var/www/run/redis.pid
42
+
43
+ # Accept connections on the specified port, default is 6379.
44
+ # If port 0 is specified Redis will not listen on a TCP socket.
45
+ port 6379
46
+
47
+ # TCP listen() backlog.
48
+ #
49
+ # In high requests-per-second environments you need an high backlog in order
50
+ # to avoid slow clients connections issues. Note that the Linux kernel
51
+ # will silently truncate it to the value of /proc/sys/net/core/somaxconn so
52
+ # make sure to raise both the value of somaxconn and tcp_max_syn_backlog
53
+ # in order to get the desired effect.
54
+ tcp-backlog 511
55
+
56
+ # By default Redis listens for connections from all the network interfaces
57
+ # available on the server. It is possible to listen to just one or multiple
58
+ # interfaces using the "bind" configuration directive, followed by one or
59
+ # more IP addresses.
60
+ #
61
+ # Examples:
62
+ #
63
+ # bind 192.168.1.100 10.0.0.1
64
+ # bind 127.0.0.1
65
+
66
+ # Specify the path for the Unix socket that will be used to listen for
67
+ # incoming connections. There is no default, so Redis will not listen
68
+ # on a unix socket when not specified.
69
+ #
70
+ # unixsocket /tmp/redis.sock
71
+ # unixsocketperm 700
72
+
73
+ # Close the connection after a client is idle for N seconds (0 to disable)
74
+ timeout 0
75
+
76
+ # TCP keepalive.
77
+ #
78
+ # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
79
+ # of communication. This is useful for two reasons:
80
+ #
81
+ # 1) Detect dead peers.
82
+ # 2) Take the connection alive from the point of view of network
83
+ # equipment in the middle.
84
+ #
85
+ # On Linux, the specified value (in seconds) is the period used to send ACKs.
86
+ # Note that to close the connection the double of the time is needed.
87
+ # On other kernels the period depends on the kernel configuration.
88
+ #
89
+ # A reasonable value for this option is 60 seconds.
90
+ tcp-keepalive 0
91
+
92
+ # Specify the server verbosity level.
93
+ # This can be one of:
94
+ # debug (a lot of information, useful for development/testing)
95
+ # verbose (many rarely useful info, but not a mess like the debug level)
96
+ # notice (moderately verbose, what you want in production probably)
97
+ # warning (only very important / critical messages are logged)
98
+ loglevel notice
99
+
100
+ # Specify the log file name. Also the empty string can be used to force
101
+ # Redis to log on the standard output. Note that if you use standard
102
+ # output for logging but daemonize, logs will be sent to /dev/null
103
+ logfile ""
104
+
105
+ # To enable logging to the system logger, just set 'syslog-enabled' to yes,
106
+ # and optionally update the other syslog parameters to suit your needs.
107
+ # syslog-enabled no
108
+
109
+ # Specify the syslog identity.
110
+ # syslog-ident redis
111
+
112
+ # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
113
+ # syslog-facility local0
114
+
115
+ # Set the number of databases. The default database is DB 0, you can select
116
+ # a different one on a per-connection basis using SELECT <dbid> where
117
+ # dbid is a number between 0 and 'databases'-1
118
+ databases 16
119
+
120
+ ################################ SNAPSHOTTING ################################
121
+ #
122
+ # Save the DB on disk:
123
+ #
124
+ # save <seconds> <changes>
125
+ #
126
+ # Will save the DB if both the given number of seconds and the given
127
+ # number of write operations against the DB occurred.
128
+ #
129
+ # In the example below the behaviour will be to save:
130
+ # after 900 sec (15 min) if at least 1 key changed
131
+ # after 300 sec (5 min) if at least 10 keys changed
132
+ # after 60 sec if at least 10000 keys changed
133
+ #
134
+ # Note: you can disable saving completely by commenting out all "save" lines.
135
+ #
136
+ # It is also possible to remove all the previously configured save
137
+ # points by adding a save directive with a single empty string argument
138
+ # like in the following example:
139
+ #
140
+ # save ""
141
+
142
+ save 900 1
143
+ save 300 10
144
+ save 60 10000
145
+
146
+ # By default Redis will stop accepting writes if RDB snapshots are enabled
147
+ # (at least one save point) and the latest background save failed.
148
+ # This will make the user aware (in a hard way) that data is not persisting
149
+ # on disk properly, otherwise chances are that no one will notice and some
150
+ # disaster will happen.
151
+ #
152
+ # If the background saving process will start working again Redis will
153
+ # automatically allow writes again.
154
+ #
155
+ # However if you have setup your proper monitoring of the Redis server
156
+ # and persistence, you may want to disable this feature so that Redis will
157
+ # continue to work as usual even if there are problems with disk,
158
+ # permissions, and so forth.
159
+ stop-writes-on-bgsave-error yes
160
+
161
+ # Compress string objects using LZF when dump .rdb databases?
162
+ # For default that's set to 'yes' as it's almost always a win.
163
+ # If you want to save some CPU in the saving child set it to 'no' but
164
+ # the dataset will likely be bigger if you have compressible values or keys.
165
+ rdbcompression yes
166
+
167
+ # Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
168
+ # This makes the format more resistant to corruption but there is a performance
169
+ # hit to pay (around 10%) when saving and loading RDB files, so you can disable it
170
+ # for maximum performances.
171
+ #
172
+ # RDB files created with checksum disabled have a checksum of zero that will
173
+ # tell the loading code to skip the check.
174
+ rdbchecksum yes
175
+
176
+ # The filename where to dump the DB
177
+ dbfilename dump.rdb
178
+
179
+ # The working directory.
180
+ #
181
+ # The DB will be written inside this directory, with the filename specified
182
+ # above using the 'dbfilename' configuration directive.
183
+ #
184
+ # The Append Only File will also be created inside this directory.
185
+ #
186
+ # Note that you must specify a directory here, not a file name.
187
+ dir /var/www/other
188
+
189
+ ################################# REPLICATION #################################
190
+
191
+ # Master-Slave replication. Use slaveof to make a Redis instance a copy of
192
+ # another Redis server. A few things to understand ASAP about Redis replication.
193
+ #
194
+ # 1) Redis replication is asynchronous, but you can configure a master to
195
+ # stop accepting writes if it appears to be not connected with at least
196
+ # a given number of slaves.
197
+ # 2) Redis slaves are able to perform a partial resynchronization with the
198
+ # master if the replication link is lost for a relatively small amount of
199
+ # time. You may want to configure the replication backlog size (see the next
200
+ # sections of this file) with a sensible value depending on your needs.
201
+ # 3) Replication is automatic and does not need user intervention. After a
202
+ # network partition slaves automatically try to reconnect to masters
203
+ # and resynchronize with them.
204
+ #
205
+ # slaveof <masterip> <masterport>
206
+
207
+ # If the master is password protected (using the "requirepass" configuration
208
+ # directive below) it is possible to tell the slave to authenticate before
209
+ # starting the replication synchronization process, otherwise the master will
210
+ # refuse the slave request.
211
+ #
212
+ # masterauth <master-password>
213
+
214
+ # When a slave loses its connection with the master, or when the replication
215
+ # is still in progress, the slave can act in two different ways:
216
+ #
217
+ # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
218
+ # still reply to client requests, possibly with out of date data, or the
219
+ # data set may just be empty if this is the first synchronization.
220
+ #
221
+ # 2) if slave-serve-stale-data is set to 'no' the slave will reply with
222
+ # an error "SYNC with master in progress" to all the kind of commands
223
+ # but to INFO and SLAVEOF.
224
+ #
225
+ slave-serve-stale-data yes
226
+
227
+ # You can configure a slave instance to accept writes or not. Writing against
228
+ # a slave instance may be useful to store some ephemeral data (because data
229
+ # written on a slave will be easily deleted after resync with the master) but
230
+ # may also cause problems if clients are writing to it because of a
231
+ # misconfiguration.
232
+ #
233
+ # Since Redis 2.6 by default slaves are read-only.
234
+ #
235
+ # Note: read only slaves are not designed to be exposed to untrusted clients
236
+ # on the internet. It's just a protection layer against misuse of the instance.
237
+ # Still a read only slave exports by default all the administrative commands
238
+ # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
239
+ # security of read only slaves using 'rename-command' to shadow all the
240
+ # administrative / dangerous commands.
241
+ slave-read-only yes
242
+
243
+ # Replication SYNC strategy: disk or socket.
244
+ #
245
+ # -------------------------------------------------------
246
+ # WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY
247
+ # -------------------------------------------------------
248
+ #
249
+ # New slaves and reconnecting slaves that are not able to continue the replication
250
+ # process just receiving differences, need to do what is called a "full
251
+ # synchronization". An RDB file is transmitted from the master to the slaves.
252
+ # The transmission can happen in two different ways:
253
+ #
254
+ # 1) Disk-backed: The Redis master creates a new process that writes the RDB
255
+ # file on disk. Later the file is transferred by the parent
256
+ # process to the slaves incrementally.
257
+ # 2) Diskless: The Redis master creates a new process that directly writes the
258
+ # RDB file to slave sockets, without touching the disk at all.
259
+ #
260
+ # With disk-backed replication, while the RDB file is generated, more slaves
261
+ # can be queued and served with the RDB file as soon as the current child producing
262
+ # the RDB file finishes its work. With diskless replication instead once
263
+ # the transfer starts, new slaves arriving will be queued and a new transfer
264
+ # will start when the current one terminates.
265
+ #
266
+ # When diskless replication is used, the master waits a configurable amount of
267
+ # time (in seconds) before starting the transfer in the hope that multiple slaves
268
+ # will arrive and the transfer can be parallelized.
269
+ #
270
+ # With slow disks and fast (large bandwidth) networks, diskless replication
271
+ # works better.
272
+ repl-diskless-sync no
273
+
274
+ # When diskless replication is enabled, it is possible to configure the delay
275
+ # the server waits in order to spawn the child that trnasfers the RDB via socket
276
+ # to the slaves.
277
+ #
278
+ # This is important since once the transfer starts, it is not possible to serve
279
+ # new slaves arriving, that will be queued for the next RDB transfer, so the server
280
+ # waits a delay in order to let more slaves arrive.
281
+ #
282
+ # The delay is specified in seconds, and by default is 5 seconds. To disable
283
+ # it entirely just set it to 0 seconds and the transfer will start ASAP.
284
+ repl-diskless-sync-delay 5
285
+
286
+ # Slaves send PINGs to server in a predefined interval. It's possible to change
287
+ # this interval with the repl_ping_slave_period option. The default value is 10
288
+ # seconds.
289
+ #
290
+ # repl-ping-slave-period 10
291
+
292
+ # The following option sets the replication timeout for:
293
+ #
294
+ # 1) Bulk transfer I/O during SYNC, from the point of view of slave.
295
+ # 2) Master timeout from the point of view of slaves (data, pings).
296
+ # 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
297
+ #
298
+ # It is important to make sure that this value is greater than the value
299
+ # specified for repl-ping-slave-period otherwise a timeout will be detected
300
+ # every time there is low traffic between the master and the slave.
301
+ #
302
+ # repl-timeout 60
303
+
304
+ # Disable TCP_NODELAY on the slave socket after SYNC?
305
+ #
306
+ # If you select "yes" Redis will use a smaller number of TCP packets and
307
+ # less bandwidth to send data to slaves. But this can add a delay for
308
+ # the data to appear on the slave side, up to 40 milliseconds with
309
+ # Linux kernels using a default configuration.
310
+ #
311
+ # If you select "no" the delay for data to appear on the slave side will
312
+ # be reduced but more bandwidth will be used for replication.
313
+ #
314
+ # By default we optimize for low latency, but in very high traffic conditions
315
+ # or when the master and slaves are many hops away, turning this to "yes" may
316
+ # be a good idea.
317
+ repl-disable-tcp-nodelay no
318
+
319
+ # Set the replication backlog size. The backlog is a buffer that accumulates
320
+ # slave data when slaves are disconnected for some time, so that when a slave
321
+ # wants to reconnect again, often a full resync is not needed, but a partial
322
+ # resync is enough, just passing the portion of data the slave missed while
323
+ # disconnected.
324
+ #
325
+ # The bigger the replication backlog, the longer the time the slave can be
326
+ # disconnected and later be able to perform a partial resynchronization.
327
+ #
328
+ # The backlog is only allocated once there is at least a slave connected.
329
+ #
330
+ # repl-backlog-size 1mb
331
+
332
+ # After a master has no longer connected slaves for some time, the backlog
333
+ # will be freed. The following option configures the amount of seconds that
334
+ # need to elapse, starting from the time the last slave disconnected, for
335
+ # the backlog buffer to be freed.
336
+ #
337
+ # A value of 0 means to never release the backlog.
338
+ #
339
+ # repl-backlog-ttl 3600
340
+
341
+ # The slave priority is an integer number published by Redis in the INFO output.
342
+ # It is used by Redis Sentinel in order to select a slave to promote into a
343
+ # master if the master is no longer working correctly.
344
+ #
345
+ # A slave with a low priority number is considered better for promotion, so
346
+ # for instance if there are three slaves with priority 10, 100, 25 Sentinel will
347
+ # pick the one with priority 10, that is the lowest.
348
+ #
349
+ # However a special priority of 0 marks the slave as not able to perform the
350
+ # role of master, so a slave with priority of 0 will never be selected by
351
+ # Redis Sentinel for promotion.
352
+ #
353
+ # By default the priority is 100.
354
+ slave-priority 100
355
+
356
+ # It is possible for a master to stop accepting writes if there are less than
357
+ # N slaves connected, having a lag less or equal than M seconds.
358
+ #
359
+ # The N slaves need to be in "online" state.
360
+ #
361
+ # The lag in seconds, that must be <= the specified value, is calculated from
362
+ # the last ping received from the slave, that is usually sent every second.
363
+ #
364
+ # This option does not GUARANTEE that N replicas will accept the write, but
365
+ # will limit the window of exposure for lost writes in case not enough slaves
366
+ # are available, to the specified number of seconds.
367
+ #
368
+ # For example to require at least 3 slaves with a lag <= 10 seconds use:
369
+ #
370
+ # min-slaves-to-write 3
371
+ # min-slaves-max-lag 10
372
+ #
373
+ # Setting one or the other to 0 disables the feature.
374
+ #
375
+ # By default min-slaves-to-write is set to 0 (feature disabled) and
376
+ # min-slaves-max-lag is set to 10.
377
+
378
+ ################################## SECURITY ###################################
379
+
380
+ # Require clients to issue AUTH <PASSWORD> before processing any other
381
+ # commands. This might be useful in environments in which you do not trust
382
+ # others with access to the host running redis-server.
383
+ #
384
+ # This should stay commented out for backward compatibility and because most
385
+ # people do not need auth (e.g. they run their own servers).
386
+ #
387
+ # Warning: since Redis is pretty fast an outside user can try up to
388
+ # 150k passwords per second against a good box. This means that you should
389
+ # use a very strong password otherwise it will be very easy to break.
390
+ #
391
+ # requirepass foobared
392
+
393
+ # Command renaming.
394
+ #
395
+ # It is possible to change the name of dangerous commands in a shared
396
+ # environment. For instance the CONFIG command may be renamed into something
397
+ # hard to guess so that it will still be available for internal-use tools
398
+ # but not available for general clients.
399
+ #
400
+ # Example:
401
+ #
402
+ # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
403
+ #
404
+ # It is also possible to completely kill a command by renaming it into
405
+ # an empty string:
406
+ #
407
+ # rename-command CONFIG ""
408
+ #
409
+ # Please note that changing the name of commands that are logged into the
410
+ # AOF file or transmitted to slaves may cause problems.
411
+
412
+ ################################### LIMITS ####################################
413
+
414
+ # Set the max number of connected clients at the same time. By default
415
+ # this limit is set to 10000 clients, however if the Redis server is not
416
+ # able to configure the process file limit to allow for the specified limit
417
+ # the max number of allowed clients is set to the current file limit
418
+ # minus 32 (as Redis reserves a few file descriptors for internal uses).
419
+ #
420
+ # Once the limit is reached Redis will close all the new connections sending
421
+ # an error 'max number of clients reached'.
422
+ #
423
+ # maxclients 10000
424
+
425
+ # Don't use more memory than the specified amount of bytes.
426
+ # When the memory limit is reached Redis will try to remove keys
427
+ # according to the eviction policy selected (see maxmemory-policy).
428
+ #
429
+ # If Redis can't remove keys according to the policy, or if the policy is
430
+ # set to 'noeviction', Redis will start to reply with errors to commands
431
+ # that would use more memory, like SET, LPUSH, and so on, and will continue
432
+ # to reply to read-only commands like GET.
433
+ #
434
+ # This option is usually useful when using Redis as an LRU cache, or to set
435
+ # a hard memory limit for an instance (using the 'noeviction' policy).
436
+ #
437
+ # WARNING: If you have slaves attached to an instance with maxmemory on,
438
+ # the size of the output buffers needed to feed the slaves are subtracted
439
+ # from the used memory count, so that network problems / resyncs will
440
+ # not trigger a loop where keys are evicted, and in turn the output
441
+ # buffer of slaves is full with DELs of keys evicted triggering the deletion
442
+ # of more keys, and so forth until the database is completely emptied.
443
+ #
444
+ # In short... if you have slaves attached it is suggested that you set a lower
445
+ # limit for maxmemory so that there is some free RAM on the system for slave
446
+ # output buffers (but this is not needed if the policy is 'noeviction').
447
+ #
448
+ # maxmemory <bytes>
449
+
450
+ # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
451
+ # is reached. You can select among five behaviors:
452
+ #
453
+ # volatile-lru -> remove the key with an expire set using an LRU algorithm
454
+ # allkeys-lru -> remove any key according to the LRU algorithm
455
+ # volatile-random -> remove a random key with an expire set
456
+ # allkeys-random -> remove a random key, any key
457
+ # volatile-ttl -> remove the key with the nearest expire time (minor TTL)
458
+ # noeviction -> don't expire at all, just return an error on write operations
459
+ #
460
+ # Note: with any of the above policies, Redis will return an error on write
461
+ # operations, when there are no suitable keys for eviction.
462
+ #
463
+ # At the date of writing these commands are: set setnx setex append
464
+ # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
465
+ # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
466
+ # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
467
+ # getset mset msetnx exec sort
468
+ #
469
+ # The default is:
470
+ #
471
+ # maxmemory-policy volatile-lru
472
+
473
+ # LRU and minimal TTL algorithms are not precise algorithms but approximated
474
+ # algorithms (in order to save memory), so you can select as well the sample
475
+ # size to check. For instance for default Redis will check three keys and
476
+ # pick the one that was used less recently, you can change the sample size
477
+ # using the following configuration directive.
478
+ #
479
+ # maxmemory-samples 3
480
+
481
+ ############################## APPEND ONLY MODE ###############################
482
+
483
+ # By default Redis asynchronously dumps the dataset on disk. This mode is
484
+ # good enough in many applications, but an issue with the Redis process or
485
+ # a power outage may result into a few minutes of writes lost (depending on
486
+ # the configured save points).
487
+ #
488
+ # The Append Only File is an alternative persistence mode that provides
489
+ # much better durability. For instance using the default data fsync policy
490
+ # (see later in the config file) Redis can lose just one second of writes in a
491
+ # dramatic event like a server power outage, or a single write if something
492
+ # wrong with the Redis process itself happens, but the operating system is
493
+ # still running correctly.
494
+ #
495
+ # AOF and RDB persistence can be enabled at the same time without problems.
496
+ # If the AOF is enabled on startup Redis will load the AOF, that is the file
497
+ # with the better durability guarantees.
498
+ #
499
+ # Please check http://redis.io/topics/persistence for more information.
500
+
501
+ appendonly no
502
+
503
+ # The name of the append only file (default: "appendonly.aof")
504
+
505
+ appendfilename "appendonly.aof"
506
+
507
+ # The fsync() call tells the Operating System to actually write data on disk
508
+ # instead of waiting for more data in the output buffer. Some OS will really flush
509
+ # data on disk, some other OS will just try to do it ASAP.
510
+ #
511
+ # Redis supports three different modes:
512
+ #
513
+ # no: don't fsync, just let the OS flush the data when it wants. Faster.
514
+ # always: fsync after every write to the append only log. Slow, Safest.
515
+ # everysec: fsync only one time every second. Compromise.
516
+ #
517
+ # The default is "everysec", as that's usually the right compromise between
518
+ # speed and data safety. It's up to you to understand if you can relax this to
519
+ # "no" that will let the operating system flush the output buffer when
520
+ # it wants, for better performances (but if you can live with the idea of
521
+ # some data loss consider the default persistence mode that's snapshotting),
522
+ # or on the contrary, use "always" that's very slow but a bit safer than
523
+ # everysec.
524
+ #
525
+ # More details please check the following article:
526
+ # http://antirez.com/post/redis-persistence-demystified.html
527
+ #
528
+ # If unsure, use "everysec".
529
+
530
+ # appendfsync always
531
+ appendfsync everysec
532
+ # appendfsync no
533
+
534
+ # When the AOF fsync policy is set to always or everysec, and a background
535
+ # saving process (a background save or AOF log background rewriting) is
536
+ # performing a lot of I/O against the disk, in some Linux configurations
537
+ # Redis may block too long on the fsync() call. Note that there is no fix for
538
+ # this currently, as even performing fsync in a different thread will block
539
+ # our synchronous write(2) call.
540
+ #
541
+ # In order to mitigate this problem it's possible to use the following option
542
+ # that will prevent fsync() from being called in the main process while a
543
+ # BGSAVE or BGREWRITEAOF is in progress.
544
+ #
545
+ # This means that while another child is saving, the durability of Redis is
546
+ # the same as "appendfsync none". In practical terms, this means that it is
547
+ # possible to lose up to 30 seconds of log in the worst scenario (with the
548
+ # default Linux settings).
549
+ #
550
+ # If you have latency problems turn this to "yes". Otherwise leave it as
551
+ # "no" that is the safest pick from the point of view of durability.
552
+
553
+ no-appendfsync-on-rewrite no
554
+
555
+ # Automatic rewrite of the append only file.
556
+ # Redis is able to automatically rewrite the log file implicitly calling
557
+ # BGREWRITEAOF when the AOF log size grows by the specified percentage.
558
+ #
559
+ # This is how it works: Redis remembers the size of the AOF file after the
560
+ # latest rewrite (if no rewrite has happened since the restart, the size of
561
+ # the AOF at startup is used).
562
+ #
563
+ # This base size is compared to the current size. If the current size is
564
+ # bigger than the specified percentage, the rewrite is triggered. Also
565
+ # you need to specify a minimal size for the AOF file to be rewritten, this
566
+ # is useful to avoid rewriting the AOF file even if the percentage increase
567
+ # is reached but it is still pretty small.
568
+ #
569
+ # Specify a percentage of zero in order to disable the automatic AOF
570
+ # rewrite feature.
571
+
572
+ auto-aof-rewrite-percentage 100
573
+ auto-aof-rewrite-min-size 64mb
574
+
575
+ # An AOF file may be found to be truncated at the end during the Redis
576
+ # startup process, when the AOF data gets loaded back into memory.
577
+ # This may happen when the system where Redis is running
578
+ # crashes, especially when an ext4 filesystem is mounted without the
579
+ # data=ordered option (however this can't happen when Redis itself
580
+ # crashes or aborts but the operating system still works correctly).
581
+ #
582
+ # Redis can either exit with an error when this happens, or load as much
583
+ # data as possible (the default now) and start if the AOF file is found
584
+ # to be truncated at the end. The following option controls this behavior.
585
+ #
586
+ # If aof-load-truncated is set to yes, a truncated AOF file is loaded and
587
+ # the Redis server starts emitting a log to inform the user of the event.
588
+ # Otherwise if the option is set to no, the server aborts with an error
589
+ # and refuses to start. When the option is set to no, the user requires
590
+ # to fix the AOF file using the "redis-check-aof" utility before to restart
591
+ # the server.
592
+ #
593
+ # Note that if the AOF file will be found to be corrupted in the middle
594
+ # the server will still exit with an error. This option only applies when
595
+ # Redis will try to read more data from the AOF file but not enough bytes
596
+ # will be found.
597
+ aof-load-truncated yes
598
+
599
+ ################################ LUA SCRIPTING ###############################
600
+
601
+ # Max execution time of a Lua script in milliseconds.
602
+ #
603
+ # If the maximum execution time is reached Redis will log that a script is
604
+ # still in execution after the maximum allowed time and will start to
605
+ # reply to queries with an error.
606
+ #
607
+ # When a long running script exceeds the maximum execution time only the
608
+ # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
609
+ # used to stop a script that did not yet called write commands. The second
610
+ # is the only way to shut down the server in the case a write command was
611
+ # already issued by the script but the user doesn't want to wait for the natural
612
+ # termination of the script.
613
+ #
614
+ # Set it to 0 or a negative value for unlimited execution without warnings.
615
+ lua-time-limit 5000
616
+
617
+ ################################## SLOW LOG ###################################
618
+
619
+ # The Redis Slow Log is a system to log queries that exceeded a specified
620
+ # execution time. The execution time does not include the I/O operations
621
+ # like talking with the client, sending the reply and so forth,
622
+ # but just the time needed to actually execute the command (this is the only
623
+ # stage of command execution where the thread is blocked and can not serve
624
+ # other requests in the meantime).
625
+ #
626
+ # You can configure the slow log with two parameters: one tells Redis
627
+ # what is the execution time, in microseconds, to exceed in order for the
628
+ # command to get logged, and the other parameter is the length of the
629
+ # slow log. When a new command is logged the oldest one is removed from the
630
+ # queue of logged commands.
631
+
632
+ # The following time is expressed in microseconds, so 1000000 is equivalent
633
+ # to one second. Note that a negative number disables the slow log, while
634
+ # a value of zero forces the logging of every command.
635
+ slowlog-log-slower-than 10000
636
+
637
+ # There is no limit to this length. Just be aware that it will consume memory.
638
+ # You can reclaim memory used by the slow log with SLOWLOG RESET.
639
+ slowlog-max-len 128
640
+
641
+ ################################ LATENCY MONITOR ##############################
642
+
643
+ # The Redis latency monitoring subsystem samples different operations
644
+ # at runtime in order to collect data related to possible sources of
645
+ # latency of a Redis instance.
646
+ #
647
+ # Via the LATENCY command this information is available to the user that can
648
+ # print graphs and obtain reports.
649
+ #
650
+ # The system only logs operations that were performed in a time equal or
651
+ # greater than the amount of milliseconds specified via the
652
+ # latency-monitor-threshold configuration directive. When its value is set
653
+ # to zero, the latency monitor is turned off.
654
+ #
655
+ # By default latency monitoring is disabled since it is mostly not needed
656
+ # if you don't have latency issues, and collecting data has a performance
657
+ # impact, that while very small, can be measured under big load. Latency
658
+ # monitoring can easily be enalbed at runtime using the command
659
+ # "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
660
+ latency-monitor-threshold 0
661
+
662
+ ############################# Event notification ##############################
663
+
664
+ # Redis can notify Pub/Sub clients about events happening in the key space.
665
+ # This feature is documented at http://redis.io/topics/notifications
666
+ #
667
+ # For instance if keyspace events notification is enabled, and a client
668
+ # performs a DEL operation on key "foo" stored in the Database 0, two
669
+ # messages will be published via Pub/Sub:
670
+ #
671
+ # PUBLISH __keyspace@0__:foo del
672
+ # PUBLISH __keyevent@0__:del foo
673
+ #
674
+ # It is possible to select the events that Redis will notify among a set
675
+ # of classes. Every class is identified by a single character:
676
+ #
677
+ # K Keyspace events, published with __keyspace@<db>__ prefix.
678
+ # E Keyevent events, published with __keyevent@<db>__ prefix.
679
+ # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
680
+ # $ String commands
681
+ # l List commands
682
+ # s Set commands
683
+ # h Hash commands
684
+ # z Sorted set commands
685
+ # x Expired events (events generated every time a key expires)
686
+ # e Evicted events (events generated when a key is evicted for maxmemory)
687
+ # A Alias for g$lshzxe, so that the "AKE" string means all the events.
688
+ #
689
+ # The "notify-keyspace-events" takes as argument a string that is composed
690
+ # of zero or multiple characters. The empty string means that notifications
691
+ # are disabled.
692
+ #
693
+ # Example: to enable list and generic events, from the point of view of the
694
+ # event name, use:
695
+ #
696
+ # notify-keyspace-events Elg
697
+ #
698
+ # Example 2: to get the stream of the expired keys subscribing to channel
699
+ # name __keyevent@0__:expired use:
700
+ #
701
+ # notify-keyspace-events Ex
702
+ #
703
+ # By default all notifications are disabled because most users don't need
704
+ # this feature and the feature has some overhead. Note that if you don't
705
+ # specify at least one of K or E, no events will be delivered.
706
+ notify-keyspace-events ""
707
+
708
+ ############################### ADVANCED CONFIG ###############################
709
+
710
+ # Hashes are encoded using a memory efficient data structure when they have a
711
+ # small number of entries, and the biggest entry does not exceed a given
712
+ # threshold. These thresholds can be configured using the following directives.
713
+ hash-max-ziplist-entries 512
714
+ hash-max-ziplist-value 64
715
+
716
+ # Similarly to hashes, small lists are also encoded in a special way in order
717
+ # to save a lot of space. The special representation is only used when
718
+ # you are under the following limits:
719
+ list-max-ziplist-entries 512
720
+ list-max-ziplist-value 64
721
+
722
+ # Sets have a special encoding in just one case: when a set is composed
723
+ # of just strings that happen to be integers in radix 10 in the range
724
+ # of 64 bit signed integers.
725
+ # The following configuration setting sets the limit in the size of the
726
+ # set in order to use this special memory saving encoding.
727
+ set-max-intset-entries 512
728
+
729
+ # Similarly to hashes and lists, sorted sets are also specially encoded in
730
+ # order to save a lot of space. This encoding is only used when the length and
731
+ # elements of a sorted set are below the following limits:
732
+ zset-max-ziplist-entries 128
733
+ zset-max-ziplist-value 64
734
+
735
+ # HyperLogLog sparse representation bytes limit. The limit includes the
736
+ # 16 bytes header. When an HyperLogLog using the sparse representation crosses
737
+ # this limit, it is converted into the dense representation.
738
+ #
739
+ # A value greater than 16000 is totally useless, since at that point the
740
+ # dense representation is more memory efficient.
741
+ #
742
+ # The suggested value is ~ 3000 in order to have the benefits of
743
+ # the space efficient encoding without slowing down too much PFADD,
744
+ # which is O(N) with the sparse encoding. The value can be raised to
745
+ # ~ 10000 when CPU is not a concern, but space is, and the data set is
746
+ # composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
747
+ hll-sparse-max-bytes 3000
748
+
749
+ # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
750
+ # order to help rehashing the main Redis hash table (the one mapping top-level
751
+ # keys to values). The hash table implementation Redis uses (see dict.c)
752
+ # performs a lazy rehashing: the more operation you run into a hash table
753
+ # that is rehashing, the more rehashing "steps" are performed, so if the
754
+ # server is idle the rehashing is never complete and some more memory is used
755
+ # by the hash table.
756
+ #
757
+ # The default is to use this millisecond 10 times every second in order to
758
+ # actively rehash the main dictionaries, freeing memory when possible.
759
+ #
760
+ # If unsure:
761
+ # use "activerehashing no" if you have hard latency requirements and it is
762
+ # not a good thing in your environment that Redis can reply from time to time
763
+ # to queries with 2 milliseconds delay.
764
+ #
765
+ # use "activerehashing yes" if you don't have such hard requirements but
766
+ # want to free memory asap when possible.
767
+ activerehashing yes
768
+
769
+ # The client output buffer limits can be used to force disconnection of clients
770
+ # that are not reading data from the server fast enough for some reason (a
771
+ # common reason is that a Pub/Sub client can't consume messages as fast as the
772
+ # publisher can produce them).
773
+ #
774
+ # The limit can be set differently for the three different classes of clients:
775
+ #
776
+ # normal -> normal clients including MONITOR clients
777
+ # slave -> slave clients
778
+ # pubsub -> clients subscribed to at least one pubsub channel or pattern
779
+ #
780
+ # The syntax of every client-output-buffer-limit directive is the following:
781
+ #
782
+ # client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
783
+ #
784
+ # A client is immediately disconnected once the hard limit is reached, or if
785
+ # the soft limit is reached and remains reached for the specified number of
786
+ # seconds (continuously).
787
+ # So for instance if the hard limit is 32 megabytes and the soft limit is
788
+ # 16 megabytes / 10 seconds, the client will get disconnected immediately
789
+ # if the size of the output buffers reach 32 megabytes, but will also get
790
+ # disconnected if the client reaches 16 megabytes and continuously overcomes
791
+ # the limit for 10 seconds.
792
+ #
793
+ # By default normal clients are not limited because they don't receive data
794
+ # without asking (in a push way), but just after a request, so only
795
+ # asynchronous clients may create a scenario where data is requested faster
796
+ # than it can read.
797
+ #
798
+ # Instead there is a default limit for pubsub and slave clients, since
799
+ # subscribers and slaves receive data in a push fashion.
800
+ #
801
+ # Both the hard or the soft limit can be disabled by setting them to zero.
802
+ client-output-buffer-limit normal 0 0 0
803
+ client-output-buffer-limit slave 256mb 64mb 60
804
+ client-output-buffer-limit pubsub 32mb 8mb 60
805
+
806
+ # Redis calls an internal function to perform many background tasks, like
807
+ # closing connections of clients in timeout, purging expired keys that are
808
+ # never requested, and so forth.
809
+ #
810
+ # Not all tasks are performed with the same frequency, but Redis checks for
811
+ # tasks to perform according to the specified "hz" value.
812
+ #
813
+ # By default "hz" is set to 10. Raising the value will use more CPU when
814
+ # Redis is idle, but at the same time will make Redis more responsive when
815
+ # there are many keys expiring at the same time, and timeouts may be
816
+ # handled with more precision.
817
+ #
818
+ # The range is between 1 and 500, however a value over 100 is usually not
819
+ # a good idea. Most users should use the default of 10 and raise this up to
820
+ # 100 only in environments where very low latency is required.
821
+ hz 10
822
+
823
+ # When a child rewrites the AOF file, if the following option is enabled
824
+ # the file will be fsync-ed every 32 MB of data generated. This is useful
825
+ # in order to commit the file to the disk more incrementally and avoid
826
+ # big latency spikes.
827
+ aof-rewrite-incremental-fsync yes