redis-store 0.3.6

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of redis-store might be problematic. Click here for more details.

@@ -0,0 +1,132 @@
1
+ # Redis configuration file example
2
+
3
+ # By default Redis does not run as a daemon. Use 'yes' if you need it.
4
+ # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
5
+ daemonize yes
6
+
7
+ # When run as a daemon, Redis write a pid file in /var/run/redis.pid by default.
8
+ # You can specify a custom pid file location here.
9
+ pidfile ./tmp/redis-single.pid
10
+
11
+ # Accept connections on the specified port, default is 6379
12
+ port 6379
13
+
14
+ # If you want you can bind a single interface, if the bind option is not
15
+ # specified all the interfaces will listen for connections.
16
+ #
17
+ bind 127.0.0.1
18
+
19
+ # Close the connection after a client is idle for N seconds (0 to disable)
20
+ timeout 5
21
+
22
+ # Save the DB on disk:
23
+ #
24
+ # save <seconds> <changes>
25
+ #
26
+ # Will save the DB if both the given number of seconds and the given
27
+ # number of write operations against the DB occurred.
28
+ #
29
+ # In the example below the behaviour will be to save:
30
+ # after 900 sec (15 min) if at least 1 key changed
31
+ # after 300 sec (5 min) if at least 10 keys changed
32
+ # after 60 sec if at least 10000 keys changed
33
+ save 900 1
34
+ save 300 10
35
+ save 60 10000
36
+
37
+ # The filename where to dump the DB
38
+ dbfilename single.rdb
39
+
40
+ # For default save/load DB in/from the working directory
41
+ # Note that you must specify a directory not a file name.
42
+ dir ./tmp
43
+
44
+ # Set server verbosity to 'debug'
45
+ # it can be one of:
46
+ # debug (a lot of information, useful for development/testing)
47
+ # notice (moderately verbose, what you want in production probably)
48
+ # warning (only very important / critical messages are logged)
49
+ loglevel debug
50
+
51
+ # Specify the log file name. Also 'stdout' can be used to force
52
+ # the demon to log on the standard output. Note that if you use standard
53
+ # output for logging but daemonize, logs will be sent to /dev/null
54
+ logfile stdout
55
+
56
+ # Set the number of databases. The default database is DB 0, you can select
57
+ # a different one on a per-connection basis using SELECT <dbid> where
58
+ # dbid is a number between 0 and 'databases'-1
59
+ databases 16
60
+
61
+ ################################# REPLICATION #################################
62
+
63
+ # Master-Slave replication. Use slaveof to make a Redis instance a copy of
64
+ # another Redis server. Note that the configuration is local to the slave
65
+ # so for example it is possible to configure the slave to save the DB with a
66
+ # different interval, or to listen to another port, and so on.
67
+
68
+ # slaveof <masterip> <masterport>
69
+
70
+ ################################## SECURITY ###################################
71
+
72
+ # Require clients to issue AUTH <PASSWORD> before processing any other
73
+ # commands. This might be useful in environments in which you do not trust
74
+ # others with access to the host running redis-server.
75
+ #
76
+ # This should stay commented out for backward compatibility and because most
77
+ # people do not need auth (e.g. they run their own servers).
78
+
79
+ # requirepass foobared
80
+
81
+ ################################### LIMITS ####################################
82
+
83
+ # Set the max number of connected clients at the same time. By default there
84
+ # is no limit, and it's up to the number of file descriptors the Redis process
85
+ # is able to open. The special value '0' means no limts.
86
+ # Once the limit is reached Redis will close all the new connections sending
87
+ # an error 'max number of clients reached'.
88
+
89
+ # maxclients 128
90
+
91
+ # Don't use more memory than the specified amount of bytes.
92
+ # When the memory limit is reached Redis will try to remove keys with an
93
+ # EXPIRE set. It will try to start freeing keys that are going to expire
94
+ # in little time and preserve keys with a longer time to live.
95
+ # Redis will also try to remove objects from free lists if possible.
96
+ #
97
+ # If all this fails, Redis will start to reply with errors to commands
98
+ # that will use more memory, like SET, LPUSH, and so on, and will continue
99
+ # to reply to most read-only commands like GET.
100
+ #
101
+ # WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
102
+ # 'state' server or cache, not as a real DB. When Redis is used as a real
103
+ # database the memory usage will grow over the weeks, it will be obvious if
104
+ # it is going to use too much memory in the long run, and you'll have the time
105
+ # to upgrade. With maxmemory after the limit is reached you'll start to get
106
+ # errors for write operations, and this may even lead to DB inconsistency.
107
+
108
+ # maxmemory <bytes>
109
+
110
+ ############################### ADVANCED CONFIG ###############################
111
+
112
+ # Glue small output buffers together in order to send small replies in a
113
+ # single TCP packet. Uses a bit more CPU but most of the times it is a win
114
+ # in terms of number of queries per second. Use 'yes' if unsure.
115
+ glueoutputbuf yes
116
+
117
+ # Use object sharing. Can save a lot of memory if you have many common
118
+ # string in your dataset, but performs lookups against the shared objects
119
+ # pool so it uses more CPU and can be a bit slower. Usually it's a good
120
+ # idea.
121
+ #
122
+ # When object sharing is enabled (shareobjects yes) you can use
123
+ # shareobjectspoolsize to control the size of the pool used in order to try
124
+ # object sharing. A bigger pool size will lead to better sharing capabilities.
125
+ # In general you want this value to be at least the double of the number of
126
+ # very common strings you have in your dataset.
127
+ #
128
+ # WARNING: object sharing is experimental, don't enable this feature
129
+ # in production before of Redis 1.0-stable. Still please try this feature in
130
+ # your development environment so that we can test it better.
131
+ shareobjects no
132
+ shareobjectspoolsize 1024
@@ -0,0 +1,132 @@
1
+ # Redis configuration file example
2
+
3
+ # By default Redis does not run as a daemon. Use 'yes' if you need it.
4
+ # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
5
+ daemonize yes
6
+
7
+ # When run as a daemon, Redis write a pid file in /var/run/redis.pid by default.
8
+ # You can specify a custom pid file location here.
9
+ pidfile ./tmp/redis-slave.pid
10
+
11
+ # Accept connections on the specified port, default is 6379
12
+ port 6381
13
+
14
+ # If you want you can bind a single interface, if the bind option is not
15
+ # specified all the interfaces will listen for connections.
16
+ #
17
+ bind 127.0.0.1
18
+
19
+ # Close the connection after a client is idle for N seconds (0 to disable)
20
+ timeout 300
21
+
22
+ # Save the DB on disk:
23
+ #
24
+ # save <seconds> <changes>
25
+ #
26
+ # Will save the DB if both the given number of seconds and the given
27
+ # number of write operations against the DB occurred.
28
+ #
29
+ # In the example below the behaviour will be to save:
30
+ # after 900 sec (15 min) if at least 1 key changed
31
+ # after 300 sec (5 min) if at least 10 keys changed
32
+ # after 60 sec if at least 10000 keys changed
33
+ save 900 1
34
+ save 300 10
35
+ save 60 10000
36
+
37
+ # The filename where to dump the DB
38
+ dbfilename slave.rdb
39
+
40
+ # For default save/load DB in/from the working directory
41
+ # Note that you must specify a directory not a file name.
42
+ dir ./tmp
43
+
44
+ # Set server verbosity to 'debug'
45
+ # it can be one of:
46
+ # debug (a lot of information, useful for development/testing)
47
+ # notice (moderately verbose, what you want in production probably)
48
+ # warning (only very important / critical messages are logged)
49
+ loglevel debug
50
+
51
+ # Specify the log file name. Also 'stdout' can be used to force
52
+ # the demon to log on the standard output. Note that if you use standard
53
+ # output for logging but daemonize, logs will be sent to /dev/null
54
+ logfile stdout
55
+
56
+ # Set the number of databases. The default database is DB 0, you can select
57
+ # a different one on a per-connection basis using SELECT <dbid> where
58
+ # dbid is a number between 0 and 'databases'-1
59
+ databases 16
60
+
61
+ ################################# REPLICATION #################################
62
+
63
+ # Master-Slave replication. Use slaveof to make a Redis instance a copy of
64
+ # another Redis server. Note that the configuration is local to the slave
65
+ # so for example it is possible to configure the slave to save the DB with a
66
+ # different interval, or to listen to another port, and so on.
67
+
68
+ slaveof 127.0.0.1 6380
69
+
70
+ ################################## SECURITY ###################################
71
+
72
+ # Require clients to issue AUTH <PASSWORD> before processing any other
73
+ # commands. This might be useful in environments in which you do not trust
74
+ # others with access to the host running redis-server.
75
+ #
76
+ # This should stay commented out for backward compatibility and because most
77
+ # people do not need auth (e.g. they run their own servers).
78
+
79
+ # requirepass foobared
80
+
81
+ ################################### LIMITS ####################################
82
+
83
+ # Set the max number of connected clients at the same time. By default there
84
+ # is no limit, and it's up to the number of file descriptors the Redis process
85
+ # is able to open. The special value '0' means no limts.
86
+ # Once the limit is reached Redis will close all the new connections sending
87
+ # an error 'max number of clients reached'.
88
+
89
+ # maxclients 128
90
+
91
+ # Don't use more memory than the specified amount of bytes.
92
+ # When the memory limit is reached Redis will try to remove keys with an
93
+ # EXPIRE set. It will try to start freeing keys that are going to expire
94
+ # in little time and preserve keys with a longer time to live.
95
+ # Redis will also try to remove objects from free lists if possible.
96
+ #
97
+ # If all this fails, Redis will start to reply with errors to commands
98
+ # that will use more memory, like SET, LPUSH, and so on, and will continue
99
+ # to reply to most read-only commands like GET.
100
+ #
101
+ # WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
102
+ # 'state' server or cache, not as a real DB. When Redis is used as a real
103
+ # database the memory usage will grow over the weeks, it will be obvious if
104
+ # it is going to use too much memory in the long run, and you'll have the time
105
+ # to upgrade. With maxmemory after the limit is reached you'll start to get
106
+ # errors for write operations, and this may even lead to DB inconsistency.
107
+
108
+ # maxmemory <bytes>
109
+
110
+ ############################### ADVANCED CONFIG ###############################
111
+
112
+ # Glue small output buffers together in order to send small replies in a
113
+ # single TCP packet. Uses a bit more CPU but most of the times it is a win
114
+ # in terms of number of queries per second. Use 'yes' if unsure.
115
+ glueoutputbuf yes
116
+
117
+ # Use object sharing. Can save a lot of memory if you have many common
118
+ # string in your dataset, but performs lookups against the shared objects
119
+ # pool so it uses more CPU and can be a bit slower. Usually it's a good
120
+ # idea.
121
+ #
122
+ # When object sharing is enabled (shareobjects yes) you can use
123
+ # shareobjectspoolsize to control the size of the pool used in order to try
124
+ # object sharing. A bigger pool size will lead to better sharing capabilities.
125
+ # In general you want this value to be at least the double of the number of
126
+ # very common strings you have in your dataset.
127
+ #
128
+ # WARNING: object sharing is experimental, don't enable this feature
129
+ # in production before of Redis 1.0-stable. Still please try this feature in
130
+ # your development environment so that we can test it better.
131
+ shareobjects no
132
+ shareobjectspoolsize 1024
@@ -0,0 +1,108 @@
1
+ require File.join(File.dirname(__FILE__), "/../../../spec_helper")
2
+
3
+ module Rack
4
+ module Cache
5
+ class EntityStore
6
+ describe "Rack::Cache::EntityStore::Redis" do
7
+ before(:each) do
8
+ @store = Rack::Cache::EntityStore::Redis.new :host => "localhost"
9
+ end
10
+
11
+ it "should have the class referenced by homonym constant" do
12
+ Rack::Cache::EntityStore::REDIS.should be(Rack::Cache::EntityStore::Redis)
13
+ end
14
+
15
+ it "should resolve the connection uri" do
16
+ cache = Rack::Cache::EntityStore::Redis.resolve(uri("redis://127.0.0.1")).cache
17
+ cache.should be_kind_of(::Redis)
18
+ cache.host.should == "127.0.0.1"
19
+ cache.port.should == 6379
20
+ cache.db.should == 0
21
+
22
+ cache = Rack::Cache::EntityStore::Redis.resolve(uri("redis://127.0.0.1:6380")).cache
23
+ cache.port.should == 6380
24
+
25
+ cache = Rack::Cache::EntityStore::Redis.resolve(uri("redis://127.0.0.1/13")).cache
26
+ cache.db.should == 13
27
+ end
28
+
29
+ it 'responds to all required messages' do
30
+ %w[read open write exist?].each do |message|
31
+ @store.should respond_to(message)
32
+ end
33
+ end
34
+
35
+ it 'stores bodies with #write' do
36
+ key, size = @store.write(['My wild love went riding,'])
37
+ key.should_not be_nil
38
+ # key.should be_sha_like TODO re-enable
39
+
40
+ data = @store.read(key)
41
+ data.should == 'My wild love went riding,'
42
+ end
43
+
44
+ it 'correctly determines whether cached body exists for key with #exist?' do
45
+ key, size = @store.write(['She rode to the devil,'])
46
+ @store.should be_exist(key)
47
+ @store.should_not be_exist('938jasddj83jasdh4438021ksdfjsdfjsdsf')
48
+ end
49
+
50
+ it 'can read data written with #write' do
51
+ key, size = @store.write(['And asked him to pay.'])
52
+ data = @store.read(key)
53
+ data.should == 'And asked him to pay.'
54
+ end
55
+
56
+ it 'gives a 40 character SHA1 hex digest from #write' do
57
+ key, size = @store.write(['she rode to the sea;'])
58
+ key.should_not be_nil
59
+ key.length.should == 40
60
+ key.should =~ /^[0-9a-z]+$/
61
+ key.should == '90a4c84d51a277f3dafc34693ca264531b9f51b6'
62
+ end
63
+
64
+ it 'returns the entire body as a String from #read' do
65
+ key, size = @store.write(['She gathered together'])
66
+ @store.read(key).should == 'She gathered together'
67
+ end
68
+
69
+ it 'returns nil from #read when key does not exist' do
70
+ @store.read('87fe0a1ae82a518592f6b12b0183e950b4541c62').should be_nil
71
+ end
72
+
73
+ it 'returns a Rack compatible body from #open' do
74
+ key, size = @store.write(['Some shells for her hair.'])
75
+ body = @store.open(key)
76
+ body.should respond_to(:each)
77
+ buf = ''
78
+ body.each { |part| buf << part }
79
+ buf.should == 'Some shells for her hair.'
80
+ end
81
+
82
+ it 'returns nil from #open when key does not exist' do
83
+ @store.open('87fe0a1ae82a518592f6b12b0183e950b4541c62').should be_nil
84
+ end
85
+
86
+ it 'can store largish bodies with binary data' do
87
+ pony = ::File.open(::File.dirname(__FILE__) + '/pony.jpg', 'rb') { |f| f.read }
88
+ key, size = @store.write([pony])
89
+ key.should == 'd0f30d8659b4d268c5c64385d9790024c2d78deb'
90
+ data = @store.read(key)
91
+ data.length.should == pony.length
92
+ data.hash.should == pony.hash
93
+ end
94
+
95
+ it 'deletes stored entries with #purge' do
96
+ key, size = @store.write(['My wild love went riding,'])
97
+ @store.purge(key).should be_nil
98
+ @store.read(key).should be_nil
99
+ end
100
+
101
+ private
102
+ def uri(uri)
103
+ URI.parse uri
104
+ end
105
+ end
106
+ end
107
+ end
108
+ end
@@ -0,0 +1,102 @@
1
+ require File.join(File.dirname(__FILE__), "/../../../spec_helper")
2
+
3
+ module Rack
4
+ module Cache
5
+ class MetaStore
6
+ describe "Rack::Cache::MetaStore::Redis" do
7
+ before(:each) do
8
+ @store = Rack::Cache::MetaStore::Redis.resolve uri("redis://127.0.0.1")
9
+ end
10
+
11
+ it "should have the class referenced by homonym constant" do
12
+ Rack::Cache::MetaStore::REDIS.should be(Rack::Cache::MetaStore::Redis)
13
+ end
14
+
15
+ it "should resolve the connection uri" do
16
+ cache = Rack::Cache::MetaStore::Redis.resolve(uri("redis://127.0.0.1")).cache
17
+ cache.should be_kind_of(::MarshaledRedis)
18
+ cache.host.should == "127.0.0.1"
19
+ cache.port.should == 6379
20
+ cache.db.should == 0
21
+
22
+ cache = Rack::Cache::MetaStore::Redis.resolve(uri("redis://127.0.0.1:6380")).cache
23
+ cache.port.should == 6380
24
+
25
+ cache = Rack::Cache::MetaStore::Redis.resolve(uri("redis://127.0.0.1/13")).cache
26
+ cache.db.should == 13
27
+ end
28
+
29
+ it 'writes a list of negotation tuples with #write' do
30
+ lambda { @store.write('/test', [[{}, {}]]) }.should_not raise_error
31
+ end
32
+
33
+ it 'reads a list of negotation tuples with #read' do
34
+ @store.write('/test', [[{},{}],[{},{}]])
35
+ tuples = @store.read('/test')
36
+ tuples.should == [ [{},{}], [{},{}] ]
37
+ end
38
+
39
+ it 'reads an empty list with #read when nothing cached at key' do
40
+ @store.read('/nothing').should be_empty
41
+ end
42
+
43
+ it 'removes entries for key with #purge' do
44
+ @store.write('/test', [[{},{}]])
45
+ @store.read('/test').should_not be_empty
46
+
47
+ @store.purge('/test')
48
+ @store.read('/test').should be_empty
49
+ end
50
+
51
+ it 'succeeds when purging non-existing entries' do
52
+ @store.read('/test').should be_empty
53
+ @store.purge('/test')
54
+ end
55
+
56
+ it 'returns nil from #purge' do
57
+ @store.write('/test', [[{},{}]])
58
+ @store.purge('/test').should be_nil
59
+ @store.read('/test').should == []
60
+ end
61
+
62
+ %w[/test http://example.com:8080/ /test?x=y /test?x=y&p=q].each do |key|
63
+ it "can read and write key: '#{key}'" do
64
+ lambda { @store.write(key, [[{},{}]]) }.should_not raise_error
65
+ @store.read(key).should == [[{},{}]]
66
+ end
67
+ end
68
+
69
+ it "can read and write fairly large keys" do
70
+ key = "b" * 4096
71
+ lambda { @store.write(key, [[{},{}]]) }.should_not raise_error
72
+ @store.read(key).should == [[{},{}]]
73
+ end
74
+
75
+ it "allows custom cache keys from block" do
76
+ request = mock_request('/test', {})
77
+ request.env['rack-cache.cache_key'] =
78
+ lambda { |request| request.path_info.reverse }
79
+ @store.cache_key(request).should == 'tset/'
80
+ end
81
+
82
+ it "allows custom cache keys from class" do
83
+ request = mock_request('/test', {})
84
+ request.env['rack-cache.cache_key'] = Class.new do
85
+ def self.call(request); request.path_info.reverse end
86
+ end
87
+ @store.cache_key(request).should == 'tset/'
88
+ end
89
+
90
+ private
91
+ def mock_request(uri, opts)
92
+ env = Rack::MockRequest.env_for(uri, opts || {})
93
+ Rack::Cache::Request.new(env)
94
+ end
95
+
96
+ def uri(uri)
97
+ URI.parse uri
98
+ end
99
+ end
100
+ end
101
+ end
102
+ end