honkster-redis-store 0.3.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,168 @@
1
+ require File.join(File.dirname(__FILE__), "/../../spec_helper")
2
+
3
+ module ActiveSupport
4
+ module Cache
5
+ describe "ActiveSupport::Cache::RedisStore" do
6
+ before(:each) do
7
+ @store = ActiveSupport::Cache::RedisStore.new
8
+ @dstore = ActiveSupport::Cache::RedisStore.new "localhost:6380/1", "localhost:6381/1"
9
+ @rabbit = OpenStruct.new :name => "bunny"
10
+ @white_rabbit = OpenStruct.new :color => "white"
11
+ with_store_management do |store|
12
+ store.write "rabbit", @rabbit
13
+ store.delete "counter"
14
+ store.delete "rub-a-dub"
15
+ end
16
+ end
17
+
18
+ it "should accept connection params" do
19
+ redis = instantiate_store
20
+ redis.to_s.should == "Redis Client connected to 127.0.0.1:6379 against DB 0"
21
+
22
+ redis = instantiate_store "localhost"
23
+ redis.to_s.should == "Redis Client connected to localhost:6379 against DB 0"
24
+
25
+ redis = instantiate_store "localhost:6380"
26
+ redis.to_s.should == "Redis Client connected to localhost:6380 against DB 0"
27
+
28
+ redis = instantiate_store "localhost:6380/13"
29
+ redis.to_s.should == "Redis Client connected to localhost:6380 against DB 13"
30
+ end
31
+
32
+ it "should instantiate a ring" do
33
+ store = instantiate_store
34
+ store.should be_kind_of(MarshaledRedis)
35
+ store = instantiate_store ["localhost:6379/0", "localhost:6379/1"]
36
+ store.should be_kind_of(DistributedMarshaledRedis)
37
+ end
38
+
39
+ it "should read the data" do
40
+ with_store_management do |store|
41
+ store.read("rabbit").should === @rabbit
42
+ end
43
+ end
44
+
45
+ it "should write the data" do
46
+ with_store_management do |store|
47
+ store.write "rabbit", @white_rabbit
48
+ store.read("rabbit").should === @white_rabbit
49
+ end
50
+ end
51
+
52
+ it "should write the data with expiration time" do
53
+ with_store_management do |store|
54
+ store.write "rabbit", @white_rabbit, :expires_in => 1.second
55
+ store.read("rabbit").should === @white_rabbit ; sleep 2
56
+ store.read("rabbit").should be_nil
57
+ end
58
+ end
59
+
60
+ it "should not write data if :unless_exist option is true" do
61
+ with_store_management do |store|
62
+ store.write "rabbit", @white_rabbit, :unless_exist => true
63
+ store.read("rabbit").should === @rabbit
64
+ end
65
+ end
66
+
67
+ it "should read raw data" do
68
+ with_store_management do |store|
69
+ store.read("rabbit", :raw => true).should == "\004\bU:\017OpenStruct{\006:\tname\"\nbunny"
70
+ end
71
+ end
72
+
73
+ it "should write raw data" do
74
+ with_store_management do |store|
75
+ store.write "rabbit", @white_rabbit, :raw => true
76
+ store.read("rabbit", :raw => true).should == %(#<OpenStruct color="white">)
77
+ end
78
+ end
79
+
80
+ it "should delete data" do
81
+ with_store_management do |store|
82
+ store.delete "rabbit"
83
+ store.read("rabbit").should be_nil
84
+ end
85
+ end
86
+
87
+ it "should delete matched data" do
88
+ with_store_management do |store|
89
+ store.delete_matched "rabb*"
90
+ store.read("rabbit").should be_nil
91
+ end
92
+ end
93
+
94
+ it "should verify existence of an object in the store" do
95
+ with_store_management do |store|
96
+ store.exist?("rabbit").should be_true
97
+ store.exist?("rab-a-dub").should be_false
98
+ end
99
+ end
100
+
101
+ it "should increment a key" do
102
+ with_store_management do |store|
103
+ 3.times { store.increment "counter" }
104
+ store.read("counter", :raw => true).to_i.should == 3
105
+ end
106
+ end
107
+
108
+ it "should decrement a key" do
109
+ with_store_management do |store|
110
+ 3.times { store.increment "counter" }
111
+ 2.times { store.decrement "counter" }
112
+ store.read("counter", :raw => true).to_i.should == 1
113
+ end
114
+ end
115
+
116
+ it "should increment a key by given value" do
117
+ with_store_management do |store|
118
+ store.increment "counter", 3
119
+ store.read("counter", :raw => true).to_i.should == 3
120
+ end
121
+ end
122
+
123
+ it "should decrement a key by given value" do
124
+ with_store_management do |store|
125
+ 3.times { store.increment "counter" }
126
+ store.decrement "counter", 2
127
+ store.read("counter", :raw => true).to_i.should == 1
128
+ end
129
+ end
130
+
131
+ it "should clear the store" do
132
+ with_store_management do |store|
133
+ store.clear
134
+ store.instance_variable_get(:@data).keys("*").flatten.should be_empty
135
+ end
136
+ end
137
+
138
+ it "should return store stats" do
139
+ with_store_management do |store|
140
+ store.stats.should_not be_empty
141
+ end
142
+ end
143
+
144
+ it "should fetch data" do
145
+ with_store_management do |store|
146
+ store.fetch("rabbit").should == @rabbit
147
+ store.fetch("rub-a-dub").should be_nil
148
+ store.fetch("rub-a-dub") { "Flora de Cana" }
149
+ store.fetch("rub-a-dub").should === "Flora de Cana"
150
+ store.fetch("rabbit", :force => true).should be_nil # force cache miss
151
+ store.fetch("rabbit", :force => true, :expires_in => 1.second) { @white_rabbit }
152
+ store.fetch("rabbit").should === @white_rabbit ; sleep 2
153
+ store.fetch("rabbit").should be_nil
154
+ end
155
+ end
156
+
157
+ private
158
+ def instantiate_store(addresses = nil)
159
+ ActiveSupport::Cache::RedisStore.new(addresses).instance_variable_get(:@data)
160
+ end
161
+
162
+ def with_store_management
163
+ yield @store
164
+ yield @dstore
165
+ end
166
+ end
167
+ end
168
+ end
@@ -0,0 +1,189 @@
1
+ require File.join(File.dirname(__FILE__), "/../../spec_helper")
2
+
3
+ class App
4
+ def initialize
5
+ @values = {}
6
+ end
7
+
8
+ def set(key, value)
9
+ @values[key] = value
10
+ end
11
+
12
+ def get(key)
13
+ @values[key]
14
+ end
15
+ end
16
+
17
+ module Sinatra
18
+ module Cache
19
+ describe "Sinatra::Cache::RedisStore" do
20
+ before(:each) do
21
+ @store = Sinatra::Cache::RedisStore.new
22
+ @dstore = Sinatra::Cache::RedisStore.new "localhost:6380/1", "localhost:6381/1"
23
+ @rabbit = OpenStruct.new :name => "bunny"
24
+ @white_rabbit = OpenStruct.new :color => "white"
25
+ with_store_management do |store|
26
+ store.write "rabbit", @rabbit
27
+ store.delete "counter"
28
+ store.delete "rub-a-dub"
29
+ end
30
+ end
31
+
32
+ it "should register as extension" do
33
+ app = App.new
34
+ Sinatra::Cache.register(app)
35
+ store = app.get(:cache)
36
+ store.should be_kind_of(RedisStore)
37
+ end
38
+
39
+ it "should accept connection params" do
40
+ redis = instantiate_store
41
+ redis.to_s.should == "Redis Client connected to 127.0.0.1:6379 against DB 0"
42
+
43
+ redis = instantiate_store "localhost"
44
+ redis.to_s.should == "Redis Client connected to localhost:6379 against DB 0"
45
+
46
+ redis = instantiate_store "localhost:6380"
47
+ redis.to_s.should == "Redis Client connected to localhost:6380 against DB 0"
48
+
49
+ redis = instantiate_store "localhost:6380/13"
50
+ redis.to_s.should == "Redis Client connected to localhost:6380 against DB 13"
51
+ end
52
+
53
+ it "should instantiate a ring" do
54
+ store = instantiate_store
55
+ store.should be_kind_of(MarshaledRedis)
56
+ store = instantiate_store ["localhost:6379/0", "localhost:6379/1"]
57
+ store.should be_kind_of(DistributedMarshaledRedis)
58
+ end
59
+
60
+ it "should read the data" do
61
+ with_store_management do |store|
62
+ store.read("rabbit").should === @rabbit
63
+ end
64
+ end
65
+
66
+ it "should write the data" do
67
+ with_store_management do |store|
68
+ store.write "rabbit", @white_rabbit
69
+ store.read("rabbit").should === @white_rabbit
70
+ end
71
+ end
72
+
73
+ it "should write the data with expiration time" do
74
+ with_store_management do |store|
75
+ store.write "rabbit", @white_rabbit, :expires_in => 1.second
76
+ store.read("rabbit").should === @white_rabbit ; sleep 2
77
+ store.read("rabbit").should be_nil
78
+ end
79
+ end
80
+
81
+ it "should not write data if :unless_exist option is true" do
82
+ with_store_management do |store|
83
+ store.write "rabbit", @white_rabbit, :unless_exist => true
84
+ store.read("rabbit").should === @rabbit
85
+ end
86
+ end
87
+
88
+ it "should read raw data" do
89
+ with_store_management do |store|
90
+ store.read("rabbit", :raw => true).should == "\004\bU:\017OpenStruct{\006:\tname\"\nbunny"
91
+ end
92
+ end
93
+
94
+ it "should write raw data" do
95
+ with_store_management do |store|
96
+ store.write "rabbit", @white_rabbit, :raw => true
97
+ store.read("rabbit", :raw => true).should == %(#<OpenStruct color="white">)
98
+ end
99
+ end
100
+
101
+ it "should delete data" do
102
+ with_store_management do |store|
103
+ store.delete "rabbit"
104
+ store.read("rabbit").should be_nil
105
+ end
106
+ end
107
+
108
+ it "should delete matched data" do
109
+ with_store_management do |store|
110
+ store.delete_matched "rabb*"
111
+ store.read("rabbit").should be_nil
112
+ end
113
+ end
114
+
115
+ it "should verify existence of an object in the store" do
116
+ with_store_management do |store|
117
+ store.exist?("rabbit").should be_true
118
+ store.exist?("rab-a-dub").should be_false
119
+ end
120
+ end
121
+
122
+ it "should increment a key" do
123
+ with_store_management do |store|
124
+ 3.times { store.increment "counter" }
125
+ store.read("counter", :raw => true).to_i.should == 3
126
+ end
127
+ end
128
+
129
+ it "should decrement a key" do
130
+ with_store_management do |store|
131
+ 3.times { store.increment "counter" }
132
+ 2.times { store.decrement "counter" }
133
+ store.read("counter", :raw => true).to_i.should == 1
134
+ end
135
+ end
136
+
137
+ it "should increment a key by given value" do
138
+ with_store_management do |store|
139
+ store.increment "counter", 3
140
+ store.read("counter", :raw => true).to_i.should == 3
141
+ end
142
+ end
143
+
144
+ it "should decrement a key by given value" do
145
+ with_store_management do |store|
146
+ 3.times { store.increment "counter" }
147
+ store.decrement "counter", 2
148
+ store.read("counter", :raw => true).to_i.should == 1
149
+ end
150
+ end
151
+
152
+ it "should clear the store" do
153
+ with_store_management do |store|
154
+ store.clear
155
+ store.instance_variable_get(:@data).keys("*").flatten.should be_empty
156
+ end
157
+ end
158
+
159
+ it "should return store stats" do
160
+ with_store_management do |store|
161
+ store.stats.should_not be_empty
162
+ end
163
+ end
164
+
165
+ it "should fetch data" do
166
+ with_store_management do |store|
167
+ store.fetch("rabbit").should == @rabbit
168
+ store.fetch("rub-a-dub").should be_nil
169
+ store.fetch("rub-a-dub") { "Flora de Cana" }
170
+ store.fetch("rub-a-dub").should === "Flora de Cana"
171
+ store.fetch("rabbit", :force => true).should be_nil # force cache miss
172
+ store.fetch("rabbit", :force => true, :expires_in => 1.second) { @white_rabbit }
173
+ store.fetch("rabbit").should === @white_rabbit ; sleep 2
174
+ store.fetch("rabbit").should be_nil
175
+ end
176
+ end
177
+
178
+ private
179
+ def instantiate_store(addresses = nil)
180
+ Sinatra::Cache::RedisStore.new(addresses).instance_variable_get(:@data)
181
+ end
182
+
183
+ def with_store_management
184
+ yield @store
185
+ yield @dstore
186
+ end
187
+ end
188
+ end
189
+ end
@@ -0,0 +1,171 @@
1
+ # Redis configuration file example
2
+
3
+ # By default Redis does not run as a daemon. Use 'yes' if you need it.
4
+ # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
5
+ daemonize no
6
+
7
+ # When run as a daemon, Redis write a pid file in /var/run/redis.pid by default.
8
+ # You can specify a custom pid file location here.
9
+ pidfile /var/run/redis.pid
10
+
11
+ # Accept connections on the specified port, default is 6379
12
+ port 6380
13
+
14
+ # If you want you can bind a single interface, if the bind option is not
15
+ # specified all the interfaces will listen for connections.
16
+ #
17
+ # bind 127.0.0.1
18
+
19
+ # Close the connection after a client is idle for N seconds (0 to disable)
20
+ timeout 300
21
+
22
+ # Save the DB on disk:
23
+ #
24
+ # save <seconds> <changes>
25
+ #
26
+ # Will save the DB if both the given number of seconds and the given
27
+ # number of write operations against the DB occurred.
28
+ #
29
+ # In the example below the behaviour will be to save:
30
+ # after 900 sec (15 min) if at least 1 key changed
31
+ # after 300 sec (5 min) if at least 10 keys changed
32
+ # after 60 sec if at least 10000 keys changed
33
+ save 900 1
34
+ save 300 10
35
+ save 60 10000
36
+
37
+ # The filename where to dump the DB
38
+ dbfilename master-dump.rdb
39
+
40
+ # For default save/load DB in/from the working directory
41
+ # Note that you must specify a directory not a file name.
42
+ dir ./
43
+
44
+ # Set server verbosity to 'debug'
45
+ # it can be one of:
46
+ # debug (a lot of information, useful for development/testing)
47
+ # notice (moderately verbose, what you want in production probably)
48
+ # warning (only very important / critical messages are logged)
49
+ loglevel debug
50
+
51
+ # Specify the log file name. Also 'stdout' can be used to force
52
+ # the demon to log on the standard output. Note that if you use standard
53
+ # output for logging but daemonize, logs will be sent to /dev/null
54
+ logfile stdout
55
+
56
+ # Set the number of databases. The default database is DB 0, you can select
57
+ # a different one on a per-connection basis using SELECT <dbid> where
58
+ # dbid is a number between 0 and 'databases'-1
59
+ databases 16
60
+
61
+ ################################# REPLICATION #################################
62
+
63
+ # Master-Slave replication. Use slaveof to make a Redis instance a copy of
64
+ # another Redis server. Note that the configuration is local to the slave
65
+ # so for example it is possible to configure the slave to save the DB with a
66
+ # different interval, or to listen to another port, and so on.
67
+
68
+ # slaveof <masterip> <masterport>
69
+
70
+ ################################## SECURITY ###################################
71
+
72
+ # Require clients to issue AUTH <PASSWORD> before processing any other
73
+ # commands. This might be useful in environments in which you do not trust
74
+ # others with access to the host running redis-server.
75
+ #
76
+ # This should stay commented out for backward compatibility and because most
77
+ # people do not need auth (e.g. they run their own servers).
78
+
79
+ # requirepass foobared
80
+
81
+ ################################### LIMITS ####################################
82
+
83
+ # Set the max number of connected clients at the same time. By default there
84
+ # is no limit, and it's up to the number of file descriptors the Redis process
85
+ # is able to open. The special value '0' means no limts.
86
+ # Once the limit is reached Redis will close all the new connections sending
87
+ # an error 'max number of clients reached'.
88
+
89
+ # maxclients 128
90
+
91
+ # Don't use more memory than the specified amount of bytes.
92
+ # When the memory limit is reached Redis will try to remove keys with an
93
+ # EXPIRE set. It will try to start freeing keys that are going to expire
94
+ # in little time and preserve keys with a longer time to live.
95
+ # Redis will also try to remove objects from free lists if possible.
96
+ #
97
+ # If all this fails, Redis will start to reply with errors to commands
98
+ # that will use more memory, like SET, LPUSH, and so on, and will continue
99
+ # to reply to most read-only commands like GET.
100
+ #
101
+ # WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
102
+ # 'state' server or cache, not as a real DB. When Redis is used as a real
103
+ # database the memory usage will grow over the weeks, it will be obvious if
104
+ # it is going to use too much memory in the long run, and you'll have the time
105
+ # to upgrade. With maxmemory after the limit is reached you'll start to get
106
+ # errors for write operations, and this may even lead to DB inconsistency.
107
+
108
+ # maxmemory <bytes>
109
+
110
+ ############################## APPEND ONLY MODE ###############################
111
+
112
+ # By default Redis asynchronously dumps the dataset on disk. If you can live
113
+ # with the idea that the latest records will be lost if something like a crash
114
+ # happens this is the preferred way to run Redis. If instead you care a lot
115
+ # about your data and don't want to that a single record can get lost you should
116
+ # enable the append only mode: when this mode is enabled Redis will append
117
+ # every write operation received in the file appendonly.log. This file will
118
+ # be read on startup in order to rebuild the full dataset in memory.
119
+ #
120
+ # Note that you can have both the async dumps and the append only file if you
121
+ # like (you have to comment the "save" statements above to disable the dumps).
122
+ # Still if append only mode is enabled Redis will load the data from the
123
+ # log file at startup ignoring the dump.rdb file.
124
+ #
125
+ # The name of the append only file is "appendonly.log"
126
+
127
+ #appendonly no
128
+
129
+ # The fsync() call tells the Operating System to actually write data on disk
130
+ # instead to wait for more data in the output buffer. Some OS will really flush
131
+ # data on disk, some other OS will just try to do it ASAP.
132
+ #
133
+ # Redis supports three different modes:
134
+ #
135
+ # no: don't fsync, just let the OS flush the data when it wants. Faster.
136
+ # always: fsync after every write to the append only log . Slow, Safest.
137
+ # everysec: fsync only if one second passed since the last fsync. Compromise.
138
+ #
139
+ # The default is "always" that's the safer of the options. It's up to you to
140
+ # understand if you can relax this to "everysec" that will fsync every second
141
+ # or to "no" that will let the operating system flush the output buffer when
142
+ # it want, for better performances (but if you can live with the idea of
143
+ # some data loss consider the default persistence mode that's snapshotting).
144
+
145
+ #appendfsync always
146
+ # appendfsync everysec
147
+ # appendfsync no
148
+
149
+ ############################### ADVANCED CONFIG ###############################
150
+
151
+ # Glue small output buffers together in order to send small replies in a
152
+ # single TCP packet. Uses a bit more CPU but most of the times it is a win
153
+ # in terms of number of queries per second. Use 'yes' if unsure.
154
+ glueoutputbuf yes
155
+
156
+ # Use object sharing. Can save a lot of memory if you have many common
157
+ # string in your dataset, but performs lookups against the shared objects
158
+ # pool so it uses more CPU and can be a bit slower. Usually it's a good
159
+ # idea.
160
+ #
161
+ # When object sharing is enabled (shareobjects yes) you can use
162
+ # shareobjectspoolsize to control the size of the pool used in order to try
163
+ # object sharing. A bigger pool size will lead to better sharing capabilities.
164
+ # In general you want this value to be at least the double of the number of
165
+ # very common strings you have in your dataset.
166
+ #
167
+ # WARNING: object sharing is experimental, don't enable this feature
168
+ # in production before of Redis 1.0-stable. Still please try this feature in
169
+ # your development environment so that we can test it better.
170
+ #shareobjects no
171
+ #shareobjectspoolsize 1024
@@ -0,0 +1,171 @@
1
+ # Redis configuration file example
2
+
3
+ # By default Redis does not run as a daemon. Use 'yes' if you need it.
4
+ # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
5
+ daemonize no
6
+
7
+ # When run as a daemon, Redis write a pid file in /var/run/redis.pid by default.
8
+ # You can specify a custom pid file location here.
9
+ pidfile /var/run/redis.pid
10
+
11
+ # Accept connections on the specified port, default is 6379
12
+ port 6379
13
+
14
+ # If you want you can bind a single interface, if the bind option is not
15
+ # specified all the interfaces will listen for connections.
16
+ #
17
+ # bind 127.0.0.1
18
+
19
+ # Close the connection after a client is idle for N seconds (0 to disable)
20
+ timeout 300
21
+
22
+ # Save the DB on disk:
23
+ #
24
+ # save <seconds> <changes>
25
+ #
26
+ # Will save the DB if both the given number of seconds and the given
27
+ # number of write operations against the DB occurred.
28
+ #
29
+ # In the example below the behaviour will be to save:
30
+ # after 900 sec (15 min) if at least 1 key changed
31
+ # after 300 sec (5 min) if at least 10 keys changed
32
+ # after 60 sec if at least 10000 keys changed
33
+ save 900 1
34
+ save 300 10
35
+ save 60 10000
36
+
37
+ # The filename where to dump the DB
38
+ dbfilename dump.rdb
39
+
40
+ # For default save/load DB in/from the working directory
41
+ # Note that you must specify a directory not a file name.
42
+ dir ./
43
+
44
+ # Set server verbosity to 'debug'
45
+ # it can be one of:
46
+ # debug (a lot of information, useful for development/testing)
47
+ # notice (moderately verbose, what you want in production probably)
48
+ # warning (only very important / critical messages are logged)
49
+ loglevel debug
50
+
51
+ # Specify the log file name. Also 'stdout' can be used to force
52
+ # the demon to log on the standard output. Note that if you use standard
53
+ # output for logging but daemonize, logs will be sent to /dev/null
54
+ logfile stdout
55
+
56
+ # Set the number of databases. The default database is DB 0, you can select
57
+ # a different one on a per-connection basis using SELECT <dbid> where
58
+ # dbid is a number between 0 and 'databases'-1
59
+ databases 16
60
+
61
+ ################################# REPLICATION #################################
62
+
63
+ # Master-Slave replication. Use slaveof to make a Redis instance a copy of
64
+ # another Redis server. Note that the configuration is local to the slave
65
+ # so for example it is possible to configure the slave to save the DB with a
66
+ # different interval, or to listen to another port, and so on.
67
+
68
+ # slaveof <masterip> <masterport>
69
+
70
+ ################################## SECURITY ###################################
71
+
72
+ # Require clients to issue AUTH <PASSWORD> before processing any other
73
+ # commands. This might be useful in environments in which you do not trust
74
+ # others with access to the host running redis-server.
75
+ #
76
+ # This should stay commented out for backward compatibility and because most
77
+ # people do not need auth (e.g. they run their own servers).
78
+
79
+ # requirepass foobared
80
+
81
+ ################################### LIMITS ####################################
82
+
83
+ # Set the max number of connected clients at the same time. By default there
84
+ # is no limit, and it's up to the number of file descriptors the Redis process
85
+ # is able to open. The special value '0' means no limts.
86
+ # Once the limit is reached Redis will close all the new connections sending
87
+ # an error 'max number of clients reached'.
88
+
89
+ # maxclients 128
90
+
91
+ # Don't use more memory than the specified amount of bytes.
92
+ # When the memory limit is reached Redis will try to remove keys with an
93
+ # EXPIRE set. It will try to start freeing keys that are going to expire
94
+ # in little time and preserve keys with a longer time to live.
95
+ # Redis will also try to remove objects from free lists if possible.
96
+ #
97
+ # If all this fails, Redis will start to reply with errors to commands
98
+ # that will use more memory, like SET, LPUSH, and so on, and will continue
99
+ # to reply to most read-only commands like GET.
100
+ #
101
+ # WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
102
+ # 'state' server or cache, not as a real DB. When Redis is used as a real
103
+ # database the memory usage will grow over the weeks, it will be obvious if
104
+ # it is going to use too much memory in the long run, and you'll have the time
105
+ # to upgrade. With maxmemory after the limit is reached you'll start to get
106
+ # errors for write operations, and this may even lead to DB inconsistency.
107
+
108
+ # maxmemory <bytes>
109
+
110
+ ############################## APPEND ONLY MODE ###############################
111
+
112
+ # By default Redis asynchronously dumps the dataset on disk. If you can live
113
+ # with the idea that the latest records will be lost if something like a crash
114
+ # happens this is the preferred way to run Redis. If instead you care a lot
115
+ # about your data and don't want to that a single record can get lost you should
116
+ # enable the append only mode: when this mode is enabled Redis will append
117
+ # every write operation received in the file appendonly.log. This file will
118
+ # be read on startup in order to rebuild the full dataset in memory.
119
+ #
120
+ # Note that you can have both the async dumps and the append only file if you
121
+ # like (you have to comment the "save" statements above to disable the dumps).
122
+ # Still if append only mode is enabled Redis will load the data from the
123
+ # log file at startup ignoring the dump.rdb file.
124
+ #
125
+ # The name of the append only file is "appendonly.log"
126
+
127
+ #appendonly no
128
+
129
+ # The fsync() call tells the Operating System to actually write data on disk
130
+ # instead to wait for more data in the output buffer. Some OS will really flush
131
+ # data on disk, some other OS will just try to do it ASAP.
132
+ #
133
+ # Redis supports three different modes:
134
+ #
135
+ # no: don't fsync, just let the OS flush the data when it wants. Faster.
136
+ # always: fsync after every write to the append only log . Slow, Safest.
137
+ # everysec: fsync only if one second passed since the last fsync. Compromise.
138
+ #
139
+ # The default is "always" that's the safer of the options. It's up to you to
140
+ # understand if you can relax this to "everysec" that will fsync every second
141
+ # or to "no" that will let the operating system flush the output buffer when
142
+ # it want, for better performances (but if you can live with the idea of
143
+ # some data loss consider the default persistence mode that's snapshotting).
144
+
145
+ #appendfsync always
146
+ # appendfsync everysec
147
+ # appendfsync no
148
+
149
+ ############################### ADVANCED CONFIG ###############################
150
+
151
+ # Glue small output buffers together in order to send small replies in a
152
+ # single TCP packet. Uses a bit more CPU but most of the times it is a win
153
+ # in terms of number of queries per second. Use 'yes' if unsure.
154
+ glueoutputbuf yes
155
+
156
+ # Use object sharing. Can save a lot of memory if you have many common
157
+ # string in your dataset, but performs lookups against the shared objects
158
+ # pool so it uses more CPU and can be a bit slower. Usually it's a good
159
+ # idea.
160
+ #
161
+ # When object sharing is enabled (shareobjects yes) you can use
162
+ # shareobjectspoolsize to control the size of the pool used in order to try
163
+ # object sharing. A bigger pool size will lead to better sharing capabilities.
164
+ # In general you want this value to be at least the double of the number of
165
+ # very common strings you have in your dataset.
166
+ #
167
+ # WARNING: object sharing is experimental, don't enable this feature
168
+ # in production before of Redis 1.0-stable. Still please try this feature in
169
+ # your development environment so that we can test it better.
170
+ #shareobjects no
171
+ #shareobjectspoolsize 1024