honkster-redis-store 0.3.10

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,171 @@
1
+ # Redis configuration file example
2
+
3
+ # By default Redis does not run as a daemon. Use 'yes' if you need it.
4
+ # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
5
+ daemonize no
6
+
7
+ # When run as a daemon, Redis write a pid file in /var/run/redis.pid by default.
8
+ # You can specify a custom pid file location here.
9
+ pidfile /var/run/redis.pid
10
+
11
+ # Accept connections on the specified port, default is 6379
12
+ port 6381
13
+
14
+ # If you want you can bind a single interface, if the bind option is not
15
+ # specified all the interfaces will listen for connections.
16
+ #
17
+ # bind 127.0.0.1
18
+
19
+ # Close the connection after a client is idle for N seconds (0 to disable)
20
+ timeout 300
21
+
22
+ # Save the DB on disk:
23
+ #
24
+ # save <seconds> <changes>
25
+ #
26
+ # Will save the DB if both the given number of seconds and the given
27
+ # number of write operations against the DB occurred.
28
+ #
29
+ # In the example below the behaviour will be to save:
30
+ # after 900 sec (15 min) if at least 1 key changed
31
+ # after 300 sec (5 min) if at least 10 keys changed
32
+ # after 60 sec if at least 10000 keys changed
33
+ save 900 1
34
+ save 300 10
35
+ save 60 10000
36
+
37
+ # The filename where to dump the DB
38
+ dbfilename slave-dump.rdb
39
+
40
+ # For default save/load DB in/from the working directory
41
+ # Note that you must specify a directory not a file name.
42
+ dir ./
43
+
44
+ # Set server verbosity to 'debug'
45
+ # it can be one of:
46
+ # debug (a lot of information, useful for development/testing)
47
+ # notice (moderately verbose, what you want in production probably)
48
+ # warning (only very important / critical messages are logged)
49
+ loglevel debug
50
+
51
+ # Specify the log file name. Also 'stdout' can be used to force
52
+ # the demon to log on the standard output. Note that if you use standard
53
+ # output for logging but daemonize, logs will be sent to /dev/null
54
+ logfile stdout
55
+
56
+ # Set the number of databases. The default database is DB 0, you can select
57
+ # a different one on a per-connection basis using SELECT <dbid> where
58
+ # dbid is a number between 0 and 'databases'-1
59
+ databases 16
60
+
61
+ ################################# REPLICATION #################################
62
+
63
+ # Master-Slave replication. Use slaveof to make a Redis instance a copy of
64
+ # another Redis server. Note that the configuration is local to the slave
65
+ # so for example it is possible to configure the slave to save the DB with a
66
+ # different interval, or to listen to another port, and so on.
67
+
68
+ slaveof localhost 6380
69
+
70
+ ################################## SECURITY ###################################
71
+
72
+ # Require clients to issue AUTH <PASSWORD> before processing any other
73
+ # commands. This might be useful in environments in which you do not trust
74
+ # others with access to the host running redis-server.
75
+ #
76
+ # This should stay commented out for backward compatibility and because most
77
+ # people do not need auth (e.g. they run their own servers).
78
+
79
+ # requirepass foobared
80
+
81
+ ################################### LIMITS ####################################
82
+
83
+ # Set the max number of connected clients at the same time. By default there
84
+ # is no limit, and it's up to the number of file descriptors the Redis process
85
+ # is able to open. The special value '0' means no limts.
86
+ # Once the limit is reached Redis will close all the new connections sending
87
+ # an error 'max number of clients reached'.
88
+
89
+ # maxclients 128
90
+
91
+ # Don't use more memory than the specified amount of bytes.
92
+ # When the memory limit is reached Redis will try to remove keys with an
93
+ # EXPIRE set. It will try to start freeing keys that are going to expire
94
+ # in little time and preserve keys with a longer time to live.
95
+ # Redis will also try to remove objects from free lists if possible.
96
+ #
97
+ # If all this fails, Redis will start to reply with errors to commands
98
+ # that will use more memory, like SET, LPUSH, and so on, and will continue
99
+ # to reply to most read-only commands like GET.
100
+ #
101
+ # WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
102
+ # 'state' server or cache, not as a real DB. When Redis is used as a real
103
+ # database the memory usage will grow over the weeks, it will be obvious if
104
+ # it is going to use too much memory in the long run, and you'll have the time
105
+ # to upgrade. With maxmemory after the limit is reached you'll start to get
106
+ # errors for write operations, and this may even lead to DB inconsistency.
107
+
108
+ # maxmemory <bytes>
109
+
110
+ ############################## APPEND ONLY MODE ###############################
111
+
112
+ # By default Redis asynchronously dumps the dataset on disk. If you can live
113
+ # with the idea that the latest records will be lost if something like a crash
114
+ # happens this is the preferred way to run Redis. If instead you care a lot
115
+ # about your data and don't want to that a single record can get lost you should
116
+ # enable the append only mode: when this mode is enabled Redis will append
117
+ # every write operation received in the file appendonly.log. This file will
118
+ # be read on startup in order to rebuild the full dataset in memory.
119
+ #
120
+ # Note that you can have both the async dumps and the append only file if you
121
+ # like (you have to comment the "save" statements above to disable the dumps).
122
+ # Still if append only mode is enabled Redis will load the data from the
123
+ # log file at startup ignoring the dump.rdb file.
124
+ #
125
+ # The name of the append only file is "appendonly.log"
126
+
127
+ #appendonly no
128
+
129
+ # The fsync() call tells the Operating System to actually write data on disk
130
+ # instead to wait for more data in the output buffer. Some OS will really flush
131
+ # data on disk, some other OS will just try to do it ASAP.
132
+ #
133
+ # Redis supports three different modes:
134
+ #
135
+ # no: don't fsync, just let the OS flush the data when it wants. Faster.
136
+ # always: fsync after every write to the append only log . Slow, Safest.
137
+ # everysec: fsync only if one second passed since the last fsync. Compromise.
138
+ #
139
+ # The default is "always" that's the safer of the options. It's up to you to
140
+ # understand if you can relax this to "everysec" that will fsync every second
141
+ # or to "no" that will let the operating system flush the output buffer when
142
+ # it want, for better performances (but if you can live with the idea of
143
+ # some data loss consider the default persistence mode that's snapshotting).
144
+
145
+ #appendfsync always
146
+ # appendfsync everysec
147
+ # appendfsync no
148
+
149
+ ############################### ADVANCED CONFIG ###############################
150
+
151
+ # Glue small output buffers together in order to send small replies in a
152
+ # single TCP packet. Uses a bit more CPU but most of the times it is a win
153
+ # in terms of number of queries per second. Use 'yes' if unsure.
154
+ glueoutputbuf yes
155
+
156
+ # Use object sharing. Can save a lot of memory if you have many common
157
+ # string in your dataset, but performs lookups against the shared objects
158
+ # pool so it uses more CPU and can be a bit slower. Usually it's a good
159
+ # idea.
160
+ #
161
+ # When object sharing is enabled (shareobjects yes) you can use
162
+ # shareobjectspoolsize to control the size of the pool used in order to try
163
+ # object sharing. A bigger pool size will lead to better sharing capabilities.
164
+ # In general you want this value to be at least the double of the number of
165
+ # very common strings you have in your dataset.
166
+ #
167
+ # WARNING: object sharing is experimental, don't enable this feature
168
+ # in production before of Redis 1.0-stable. Still please try this feature in
169
+ # your development environment so that we can test it better.
170
+ #shareobjects no
171
+ #shareobjectspoolsize 1024
@@ -0,0 +1,118 @@
1
+ require File.join(File.dirname(__FILE__), "/../../../spec_helper")
2
+
3
+ class Object
4
+ def sha_like?
5
+ length == 40 && self =~ /^[0-9a-z]+$/
6
+ end
7
+ end
8
+
9
+ module Rack
10
+ module Cache
11
+ class EntityStore
12
+ # courtesy of http://github.com/rtomayko/rack-cache team
13
+ describe "Rack::Cache::EntityStore::Redis" do
14
+ before(:each) do
15
+ @store = Rack::Cache::EntityStore::Redis.new :host => "localhost"
16
+ end
17
+
18
+ # Redis store specific examples ===========================================
19
+
20
+ it "should have the class referenced by homonym constant" do
21
+ Rack::Cache::EntityStore::REDIS.should be(Rack::Cache::EntityStore::Redis)
22
+ end
23
+
24
+ it "should resolve the connection uri" do
25
+ cache = Rack::Cache::EntityStore::Redis.resolve(uri("redis://127.0.0.1")).cache
26
+ cache.should be_kind_of(::Redis::Client)
27
+ cache.to_s.should == "Redis Client connected to 127.0.0.1:6379 against DB 0"
28
+
29
+ cache = Rack::Cache::EntityStore::Redis.resolve(uri("redis://127.0.0.1:6380")).cache
30
+ cache.to_s.should == "Redis Client connected to 127.0.0.1:6380 against DB 0"
31
+
32
+ cache = Rack::Cache::EntityStore::Redis.resolve(uri("redis://127.0.0.1/13")).cache
33
+ cache.to_s.should == "Redis Client connected to 127.0.0.1:6379 against DB 13"
34
+ end
35
+
36
+ # Entity store shared examples ===========================================
37
+
38
+ it 'responds to all required messages' do
39
+ %w[read open write exist?].each do |message|
40
+ @store.should respond_to(message)
41
+ end
42
+ end
43
+
44
+ it 'stores bodies with #write' do
45
+ key, size = @store.write(['My wild love went riding,'])
46
+ key.should_not be_nil
47
+ key.should be_sha_like
48
+
49
+ data = @store.read(key)
50
+ data.should == 'My wild love went riding,'
51
+ end
52
+
53
+ it 'correctly determines whether cached body exists for key with #exist?' do
54
+ key, size = @store.write(['She rode to the devil,'])
55
+ @store.should be_exist(key)
56
+ @store.should_not be_exist('938jasddj83jasdh4438021ksdfjsdfjsdsf')
57
+ end
58
+
59
+ it 'can read data written with #write' do
60
+ key, size = @store.write(['And asked him to pay.'])
61
+ data = @store.read(key)
62
+ data.should == 'And asked him to pay.'
63
+ end
64
+
65
+ it 'gives a 40 character SHA1 hex digest from #write' do
66
+ key, size = @store.write(['she rode to the sea;'])
67
+ key.should_not be_nil
68
+ key.length.should == 40
69
+ key.should =~ /^[0-9a-z]+$/
70
+ key.should == '90a4c84d51a277f3dafc34693ca264531b9f51b6'
71
+ end
72
+
73
+ it 'returns the entire body as a String from #read' do
74
+ key, size = @store.write(['She gathered together'])
75
+ @store.read(key).should == 'She gathered together'
76
+ end
77
+
78
+ it 'returns nil from #read when key does not exist' do
79
+ @store.read('87fe0a1ae82a518592f6b12b0183e950b4541c62').should be_nil
80
+ end
81
+
82
+ it 'returns a Rack compatible body from #open' do
83
+ key, size = @store.write(['Some shells for her hair.'])
84
+ body = @store.open(key)
85
+ body.should respond_to(:each)
86
+ buf = ''
87
+ body.each { |part| buf << part }
88
+ buf.should == 'Some shells for her hair.'
89
+ end
90
+
91
+ it 'returns nil from #open when key does not exist' do
92
+ @store.open('87fe0a1ae82a518592f6b12b0183e950b4541c62').should be_nil
93
+ end
94
+
95
+ it 'can store largish bodies with binary data' do
96
+ pony = ::File.open(::File.dirname(__FILE__) + '/pony.jpg', 'rb') { |f| f.read }
97
+ key, size = @store.write([pony])
98
+ key.should == 'd0f30d8659b4d268c5c64385d9790024c2d78deb'
99
+ data = @store.read(key)
100
+ data.length.should == pony.length
101
+ data.hash.should == pony.hash
102
+ end
103
+
104
+ it 'deletes stored entries with #purge' do
105
+ key, size = @store.write(['My wild love went riding,'])
106
+ @store.purge(key).should be_nil
107
+ @store.read(key).should be_nil
108
+ end
109
+
110
+ # Helper Methods =============================================================
111
+
112
+ define_method :uri do |uri|
113
+ URI.parse uri
114
+ end
115
+ end
116
+ end
117
+ end
118
+ end
@@ -0,0 +1,255 @@
1
+ require File.join(File.dirname(__FILE__), "/../../../spec_helper")
2
+
3
+ module Rack
4
+ module Cache
5
+ class MetaStore
6
+ # courtesy of http://github.com/rtomayko/rack-cache team
7
+ describe "Rack::Cache::MetaStore::Redis" do
8
+ before :each do
9
+ @store = Rack::Cache::MetaStore::Redis.resolve uri("redis://127.0.0.1")
10
+ @entity_store = Rack::Cache::EntityStore::Redis.resolve uri("redis://127.0.0.1:6380")
11
+ @request = mock_request('/', {})
12
+ @response = mock_response(200, {}, ['hello world'])
13
+ end
14
+
15
+ after :each do
16
+ @store.cache.flushall
17
+ @entity_store.cache.flushall
18
+ end
19
+
20
+ it "should have the class referenced by homonym constant" do
21
+ Rack::Cache::MetaStore::REDIS.should be(Rack::Cache::MetaStore::Redis)
22
+ end
23
+
24
+ it "should resolve the connection uri" do
25
+ cache = Rack::Cache::MetaStore::Redis.resolve(uri("redis://127.0.0.1")).cache
26
+ cache.should be_kind_of(::MarshaledRedis)
27
+ cache.to_s.should == "Redis Client connected to 127.0.0.1:6379 against DB 0"
28
+
29
+ cache = Rack::Cache::MetaStore::Redis.resolve(uri("redis://127.0.0.1:6380")).cache
30
+ cache.to_s.should == "Redis Client connected to 127.0.0.1:6380 against DB 0"
31
+
32
+ cache = Rack::Cache::MetaStore::Redis.resolve(uri("redis://127.0.0.1/13")).cache
33
+ cache.to_s.should == "Redis Client connected to 127.0.0.1:6379 against DB 13"
34
+ end
35
+
36
+ # Low-level implementation methods ===========================================
37
+
38
+ it 'writes a list of negotation tuples with #write' do
39
+ lambda { @store.write('/test', [[{}, {}]]) }.should_not raise_error
40
+ end
41
+
42
+ it 'reads a list of negotation tuples with #read' do
43
+ @store.write('/test', [[{},{}],[{},{}]])
44
+ tuples = @store.read('/test')
45
+ tuples.should == [ [{},{}], [{},{}] ]
46
+ end
47
+
48
+ it 'reads an empty list with #read when nothing cached at key' do
49
+ @store.read('/nothing').should be_empty
50
+ end
51
+
52
+ it 'removes entries for key with #purge' do
53
+ @store.write('/test', [[{},{}]])
54
+ @store.read('/test').should_not be_empty
55
+
56
+ @store.purge('/test')
57
+ @store.read('/test').should be_empty
58
+ end
59
+
60
+ it 'succeeds when purging non-existing entries' do
61
+ @store.read('/test').should be_empty
62
+ @store.purge('/test')
63
+ end
64
+
65
+ it 'returns nil from #purge' do
66
+ @store.write('/test', [[{},{}]])
67
+ @store.purge('/test').should be_nil
68
+ @store.read('/test').should == []
69
+ end
70
+
71
+ %w[/test http://example.com:8080/ /test?x=y /test?x=y&p=q].each do |key|
72
+ it "can read and write key: '#{key}'" do
73
+ lambda { @store.write(key, [[{},{}]]) }.should_not raise_error
74
+ @store.read(key).should == [[{},{}]]
75
+ end
76
+ end
77
+
78
+ it "can read and write fairly large keys" do
79
+ key = "b" * 4096
80
+ lambda { @store.write(key, [[{},{}]]) }.should_not raise_error
81
+ @store.read(key).should == [[{},{}]]
82
+ end
83
+
84
+ it "allows custom cache keys from block" do
85
+ request = mock_request('/test', {})
86
+ request.env['rack-cache.cache_key'] =
87
+ lambda { |request| request.path_info.reverse }
88
+ @store.cache_key(request).should == 'tset/'
89
+ end
90
+
91
+ it "allows custom cache keys from class" do
92
+ request = mock_request('/test', {})
93
+ request.env['rack-cache.cache_key'] = Class.new do
94
+ def self.call(request); request.path_info.reverse end
95
+ end
96
+ @store.cache_key(request).should == 'tset/'
97
+ end
98
+
99
+ # Abstract methods ===========================================================
100
+
101
+ # Stores an entry for the given request args, returns a url encoded cache key
102
+ # for the request.
103
+ define_method :store_simple_entry do |*request_args|
104
+ path, headers = request_args
105
+ @request = mock_request(path || '/test', headers || {})
106
+ @response = mock_response(200, {'Cache-Control' => 'max-age=420'}, ['test'])
107
+ body = @response.body
108
+ cache_key = @store.store(@request, @response, @entity_store)
109
+ @response.body.should_not equal(body)
110
+ cache_key
111
+ end
112
+
113
+ it 'stores a cache entry' do
114
+ cache_key = store_simple_entry
115
+ @store.read(cache_key).should_not be_empty
116
+ end
117
+
118
+ it 'sets the X-Content-Digest response header before storing' do
119
+ cache_key = store_simple_entry
120
+ req, res = @store.read(cache_key).first
121
+ res['X-Content-Digest'].should == 'a94a8fe5ccb19ba61c4c0873d391e987982fbbd3'
122
+ end
123
+
124
+ it 'finds a stored entry with #lookup' do
125
+ store_simple_entry
126
+ response = @store.lookup(@request, @entity_store)
127
+ response.should_not be_nil
128
+ response.should be_kind_of(Rack::Cache::Response)
129
+ end
130
+
131
+ it 'does not find an entry with #lookup when none exists' do
132
+ req = mock_request('/test', {'HTTP_FOO' => 'Foo', 'HTTP_BAR' => 'Bar'})
133
+ @store.lookup(req, @entity_store).should be_nil
134
+ end
135
+
136
+ it "canonizes urls for cache keys" do
137
+ store_simple_entry(path='/test?x=y&p=q')
138
+
139
+ hits_req = mock_request(path, {})
140
+ miss_req = mock_request('/test?p=x', {})
141
+
142
+ @store.lookup(hits_req, @entity_store).should_not be_nil
143
+ @store.lookup(miss_req, @entity_store).should be_nil
144
+ end
145
+
146
+ it 'does not find an entry with #lookup when the body does not exist' do
147
+ store_simple_entry
148
+ @response.headers['X-Content-Digest'].should_not be_nil
149
+ @entity_store.purge(@response.headers['X-Content-Digest'])
150
+ @store.lookup(@request, @entity_store).should be_nil
151
+ end
152
+
153
+ it 'restores response headers properly with #lookup' do
154
+ store_simple_entry
155
+ response = @store.lookup(@request, @entity_store)
156
+ response.headers.should == @response.headers.merge('Content-Length' => '4')
157
+ end
158
+
159
+ it 'restores response body from entity store with #lookup' do
160
+ store_simple_entry
161
+ response = @store.lookup(@request, @entity_store)
162
+ body = '' ; response.body.each {|p| body << p}
163
+ body.should == 'test'
164
+ end
165
+
166
+ it 'invalidates meta and entity store entries with #invalidate' do
167
+ store_simple_entry
168
+ @store.invalidate(@request, @entity_store)
169
+ response = @store.lookup(@request, @entity_store)
170
+ response.should be_kind_of(Rack::Cache::Response)
171
+ response.should_not be_fresh
172
+ end
173
+
174
+ it 'succeeds quietly when #invalidate called with no matching entries' do
175
+ req = mock_request('/test', {})
176
+ @store.invalidate(req, @entity_store)
177
+ @store.lookup(@request, @entity_store).should be_nil
178
+ end
179
+
180
+ # Vary =======================================================================
181
+
182
+ it 'does not return entries that Vary with #lookup' do
183
+ req1 = mock_request('/test', {'HTTP_FOO' => 'Foo', 'HTTP_BAR' => 'Bar'})
184
+ req2 = mock_request('/test', {'HTTP_FOO' => 'Bling', 'HTTP_BAR' => 'Bam'})
185
+ res = mock_response(200, {'Vary' => 'Foo Bar'}, ['test'])
186
+ @store.store(req1, res, @entity_store)
187
+
188
+ @store.lookup(req2, @entity_store).should be_nil
189
+ end
190
+
191
+ it 'stores multiple responses for each Vary combination' do
192
+ req1 = mock_request('/test', {'HTTP_FOO' => 'Foo', 'HTTP_BAR' => 'Bar'})
193
+ res1 = mock_response(200, {'Vary' => 'Foo Bar'}, ['test 1'])
194
+ key = @store.store(req1, res1, @entity_store)
195
+
196
+ req2 = mock_request('/test', {'HTTP_FOO' => 'Bling', 'HTTP_BAR' => 'Bam'})
197
+ res2 = mock_response(200, {'Vary' => 'Foo Bar'}, ['test 2'])
198
+ @store.store(req2, res2, @entity_store)
199
+
200
+ req3 = mock_request('/test', {'HTTP_FOO' => 'Baz', 'HTTP_BAR' => 'Boom'})
201
+ res3 = mock_response(200, {'Vary' => 'Foo Bar'}, ['test 3'])
202
+ @store.store(req3, res3, @entity_store)
203
+
204
+ slurp(@store.lookup(req3, @entity_store).body).should == 'test 3'
205
+ slurp(@store.lookup(req1, @entity_store).body).should == 'test 1'
206
+ slurp(@store.lookup(req2, @entity_store).body).should == 'test 2'
207
+
208
+ @store.read(key).length.should == 3
209
+ end
210
+
211
+ it 'overwrites non-varying responses with #store' do
212
+ req1 = mock_request('/test', {'HTTP_FOO' => 'Foo', 'HTTP_BAR' => 'Bar'})
213
+ res1 = mock_response(200, {'Vary' => 'Foo Bar'}, ['test 1'])
214
+ key = @store.store(req1, res1, @entity_store)
215
+ slurp(@store.lookup(req1, @entity_store).body).should == 'test 1'
216
+
217
+ req2 = mock_request('/test', {'HTTP_FOO' => 'Bling', 'HTTP_BAR' => 'Bam'})
218
+ res2 = mock_response(200, {'Vary' => 'Foo Bar'}, ['test 2'])
219
+ @store.store(req2, res2, @entity_store)
220
+ slurp(@store.lookup(req2, @entity_store).body).should == 'test 2'
221
+
222
+ req3 = mock_request('/test', {'HTTP_FOO' => 'Foo', 'HTTP_BAR' => 'Bar'})
223
+ res3 = mock_response(200, {'Vary' => 'Foo Bar'}, ['test 3'])
224
+ @store.store(req3, res3, @entity_store)
225
+ slurp(@store.lookup(req1, @entity_store).body).should == 'test 3'
226
+
227
+ @store.read(key).length.should == 2
228
+ end
229
+
230
+ # Helper Methods =============================================================
231
+
232
+ define_method :mock_request do |uri,opts|
233
+ env = Rack::MockRequest.env_for(uri, opts || {})
234
+ Rack::Cache::Request.new(env)
235
+ end
236
+
237
+ define_method :mock_response do |status,headers,body|
238
+ headers ||= {}
239
+ body = Array(body).compact
240
+ Rack::Cache::Response.new(status, headers, body)
241
+ end
242
+
243
+ define_method :slurp do |body|
244
+ buf = ''
245
+ body.each {|part| buf << part }
246
+ buf
247
+ end
248
+
249
+ define_method :uri do |uri|
250
+ URI.parse uri
251
+ end
252
+ end
253
+ end
254
+ end
255
+ end