redis_orm 0.7 → 0.8

Sign up to get free protection for your applications and to get access to all the features.
Files changed (65) hide show
  1. checksums.yaml +7 -0
  2. data/README.md +1 -1
  3. data/lib/redis_orm.rb +8 -13
  4. data/lib/redis_orm/associations/has_many.rb +7 -3
  5. data/lib/redis_orm/redis_orm.rb +3 -5
  6. metadata +64 -124
  7. data/Gemfile +0 -10
  8. data/Manifest +0 -74
  9. data/Rakefile +0 -25
  10. data/benchmarks/sortable_benchmark.rb +0 -45
  11. data/redis_orm.gemspec +0 -45
  12. data/spec/generators/model_generator_spec.rb +0 -29
  13. data/spec/spec_helper.rb +0 -17
  14. data/test/association_indices_test.rb +0 -168
  15. data/test/associations_test.rb +0 -294
  16. data/test/atomicity_test.rb +0 -36
  17. data/test/basic_functionality_test.rb +0 -204
  18. data/test/callbacks_test.rb +0 -49
  19. data/test/changes_array_test.rb +0 -25
  20. data/test/classes/album.rb +0 -6
  21. data/test/classes/article.rb +0 -7
  22. data/test/classes/article_with_comments.rb +0 -8
  23. data/test/classes/book.rb +0 -6
  24. data/test/classes/catalog_item.rb +0 -5
  25. data/test/classes/category.rb +0 -7
  26. data/test/classes/city.rb +0 -7
  27. data/test/classes/comment.rb +0 -26
  28. data/test/classes/country.rb +0 -5
  29. data/test/classes/custom_user.rb +0 -8
  30. data/test/classes/cutout.rb +0 -20
  31. data/test/classes/cutout_aggregator.rb +0 -5
  32. data/test/classes/default_user.rb +0 -10
  33. data/test/classes/dynamic_finder_user.rb +0 -8
  34. data/test/classes/empty_person.rb +0 -2
  35. data/test/classes/expire_user.rb +0 -8
  36. data/test/classes/expire_user_with_predicate.rb +0 -13
  37. data/test/classes/giftcard.rb +0 -6
  38. data/test/classes/jigsaw.rb +0 -4
  39. data/test/classes/location.rb +0 -5
  40. data/test/classes/message.rb +0 -4
  41. data/test/classes/note.rb +0 -5
  42. data/test/classes/omni_user.rb +0 -8
  43. data/test/classes/person.rb +0 -6
  44. data/test/classes/photo.rb +0 -21
  45. data/test/classes/profile.rb +0 -9
  46. data/test/classes/sortable_user.rb +0 -11
  47. data/test/classes/timestamp.rb +0 -3
  48. data/test/classes/user.rb +0 -39
  49. data/test/classes/uuid_default_user.rb +0 -12
  50. data/test/classes/uuid_timestamp.rb +0 -5
  51. data/test/classes/uuid_user.rb +0 -13
  52. data/test/dynamic_finders_test.rb +0 -51
  53. data/test/exceptions_test.rb +0 -47
  54. data/test/expire_records_test.rb +0 -64
  55. data/test/has_one_has_many_test.rb +0 -42
  56. data/test/indices_test.rb +0 -63
  57. data/test/modules/belongs_to_model_within_module.rb +0 -6
  58. data/test/modules/has_many_model_within_module.rb +0 -11
  59. data/test/options_test.rb +0 -226
  60. data/test/polymorphic_test.rb +0 -65
  61. data/test/redis.conf +0 -417
  62. data/test/sortable_test.rb +0 -116
  63. data/test/test_helper.rb +0 -37
  64. data/test/uuid_as_id_test.rb +0 -178
  65. data/test/validations_test.rb +0 -20
data/test/indices_test.rb DELETED
@@ -1,63 +0,0 @@
1
- require File.dirname(File.expand_path(__FILE__)) + '/test_helper.rb'
2
-
3
- describe "check indices" do
4
- it "should change index accordingly to the changes in the model" do
5
- user = User.new :first_name => "Robert", :last_name => "Pirsig"
6
- user.save
7
-
8
- u = User.find_by_first_name("Robert")
9
- u.id.should == user.id
10
-
11
- u = User.find_by_first_name_and_last_name("Robert", "Pirsig")
12
- u.id.should == user.id
13
-
14
- u.first_name = "Chris"
15
- u.save
16
-
17
- User.find_by_first_name("Robert").should == nil
18
-
19
- User.find_by_first_name_and_last_name("Robert", "Pirsig").should == nil
20
-
21
- User.find_by_first_name("Chris").id.should == user.id
22
- User.find_by_last_name("Pirsig").id.should == user.id
23
- User.find_by_first_name_and_last_name("Chris", "Pirsig").id.should == user.id
24
- end
25
-
26
- it "should change index accordingly to the changes in the model (test #update_attributes method)" do
27
- user = User.new :first_name => "Robert", :last_name => "Pirsig"
28
- user.save
29
-
30
- u = User.find_by_first_name("Robert")
31
- u.id.should == user.id
32
-
33
- u = User.find_by_first_name_and_last_name("Robert", "Pirsig")
34
- u.id.should == user.id
35
-
36
- u.update_attributes :first_name => "Christofer", :last_name => "Robin"
37
-
38
- User.find_by_first_name("Robert").should == nil
39
- User.find_by_last_name("Pirsig").should == nil
40
- User.find_by_first_name_and_last_name("Robert", "Pirsig").should == nil
41
-
42
- User.find_by_first_name("Christofer").id.should == user.id
43
- User.find_by_last_name("Robin").id.should == user.id
44
- User.find_by_first_name_and_last_name("Christofer", "Robin").id.should == user.id
45
- end
46
-
47
- it "should create case insensitive indices too" do
48
- ou = OmniUser.new :email => "GERMAN@Ya.ru", :uid => 2718281828
49
- ou.save
50
-
51
- OmniUser.count.should == 1
52
- OmniUser.find_by_email("german@ya.ru").should be
53
- OmniUser.find_all_by_email("german@ya.ru").count.should == 1
54
-
55
- OmniUser.find_by_email_and_uid("german@ya.ru", 2718281828).should be
56
- OmniUser.find_all_by_email_and_uid("german@ya.ru", 2718281828).count.should == 1
57
-
58
- OmniUser.find_by_email("geRman@yA.rU").should be
59
- OmniUser.find_all_by_email_and_uid("GerMan@Ya.ru", 2718281828).count.should == 1
60
-
61
- OmniUser.find_all_by_email_and_uid("german@ya.ru", 2718281829).count.should == 0
62
- end
63
- end
@@ -1,6 +0,0 @@
1
- module BelongsToModelWithinModule
2
- class Reply < RedisOrm::Base
3
- property :body, String, :default => "test"
4
- belongs_to :article, :as => :essay
5
- end
6
- end
@@ -1,11 +0,0 @@
1
- module HasManyModelWithinModule
2
- class SpecialComment < RedisOrm::Base
3
- property :body, String, :default => "test"
4
- belongs_to :brochure, :as => :book
5
- end
6
-
7
- class Brochure < RedisOrm::Base
8
- property :title, String
9
- has_many :special_comments
10
- end
11
- end
data/test/options_test.rb DELETED
@@ -1,226 +0,0 @@
1
- require File.dirname(File.expand_path(__FILE__)) + '/test_helper.rb'
2
-
3
- describe "test options" do
4
- before(:each) do
5
- @album = Album.new
6
- @album.title = "my 1st album"
7
- @album.save
8
-
9
- @album.should be
10
- @album.title.should == "my 1st album"
11
-
12
- @photo1 = Photo.new :image => "facepalm.jpg", :image_type => "jpg", :checked => true
13
- @photo1.save
14
- @photo1.should be
15
- @photo1.image.should == "facepalm.jpg"
16
- @photo1.image_type.should == "jpg"
17
-
18
- @photo2 = Photo.new :image => "boobs.png", :image_type => "png", :inverted => false
19
- @photo2.save
20
- @photo2.should be
21
- @photo2.image.should == "boobs.png"
22
- @photo2.image_type.should == "png"
23
- end
24
-
25
- it "should behave like expected for #find and #find! methods (nb exceptions with #find! are tested in exceptions_test.rb file)" do
26
- Album.find(@album.id).should == @album
27
- Album.find!(@album.id).should == @album
28
-
29
- Album.find(:first).should == @album
30
- Album.find!(:first).should == @album
31
-
32
- Album.find(:all, :limit => 1).size.should == 1
33
- Album.find!(:all, :limit => 1).size.should == 1
34
- end
35
-
36
- it "should return correct array when :limit and :offset options are provided" do
37
- @album.photos.count.should == 0
38
-
39
- @album.photos.all(:limit => 2, :offset => 0).should == []
40
-
41
- @album.photos << [@photo1, @photo2]
42
-
43
- @album.photos.all(:limit => 0, :offset => 0).should == []
44
- @album.photos.all(:limit => 1, :offset => 0).size.should == 1
45
- @album.photos.all(:limit => 2, :offset => 0).size.should == 2 #[@photo1, @photo2]
46
-
47
- @album.photos.all(:limit => 0, :offset => 0).should == []
48
- @album.photos.all(:limit => 1, :offset => 1).size.should == 1 # [@photo2]
49
- @album.photos.all(:limit => 2, :offset => 2).should == []
50
-
51
- @album.photos.find(:all, :limit => 1, :offset => 1).size.should == 1
52
-
53
- Photo.find(:all).size.should == 2
54
-
55
- Photo.find(:first).should == @photo1
56
- Photo.find(:last).should == @photo2
57
-
58
- Photo.find(:all, :conditions => {:image => "facepalm.jpg"}).size.should == 1
59
- Photo.find(:all, :conditions => {:image => "boobs.png"}).size.should == 1
60
-
61
- Photo.find(:all, :conditions => {:image => "facepalm.jpg", :image_type => "jpg"}).size.should == 1
62
- Photo.find(:all, :conditions => {:image => "boobs.png", :image_type => "png"}).size.should == 1
63
-
64
- Photo.find(:first, :conditions => {:image => "facepalm.jpg"}).should == @photo1
65
- Photo.find(:first, :conditions => {:image => "boobs.png"}).should == @photo2
66
-
67
- Photo.find(:first, :conditions => {:image => "facepalm.jpg", :image_type => "jpg"}).should == @photo1
68
- Photo.find(:first, :conditions => {:image => "boobs.png", :image_type => "png"}).should == @photo2
69
-
70
- Photo.find(:last, :conditions => {:image => "facepalm.jpg"}).should == @photo1
71
- Photo.find(:last, :conditions => {:image => "boobs.png"}).should == @photo2
72
-
73
- Photo.find(:last, :conditions => {:image => "facepalm.jpg", :image_type => "jpg"}).should == @photo1
74
- Photo.find(:last, :conditions => {:image => "boobs.png", :image_type => "png"}).should == @photo2
75
- end
76
-
77
- it "should accept options for #first and #last methods" do
78
- Photo.first(:conditions => {:image => "facepalm.jpg"}).should == @photo1
79
- Photo.first(:conditions => {:image => "boobs.png"}).should == @photo2
80
-
81
- Photo.last(:conditions => {:image => "facepalm.jpg", :image_type => "jpg"}).should == @photo1
82
- Photo.last(:conditions => {:image => "boobs.png", :image_type => "png"}).should == @photo2
83
- end
84
-
85
- it "should correctly save boolean values" do
86
- $redis.hgetall("photo:#{@photo1.id}")["inverted"].should == "true"
87
- $redis.hgetall("photo:#{@photo2.id}")["inverted"].should == "false"
88
-
89
- @photo1.inverted.should == true
90
- @photo2.inverted.should == false
91
-
92
- $redis.zrange("photo:inverted:true", 0, -1).should include(@photo1.id.to_s)
93
- $redis.zrange("photo:inverted:false", 0, -1).should include(@photo2.id.to_s)
94
-
95
- $redis.hgetall("photo:#{@photo1.id}")["checked"].should == "true"
96
- $redis.hgetall("photo:#{@photo2.id}")["checked"].should == "false"
97
-
98
- @photo1.checked.should == true
99
- @photo2.checked.should == false
100
-
101
- $redis.zrange("photo:checked:true", 0, -1).should include(@photo1.id.to_s)
102
- $redis.zrange("photo:checked:false", 0, -1).should include(@photo2.id.to_s)
103
- end
104
-
105
- it "should search on bool values properly" do
106
- Photo.find(:all, :conditions => {:checked => true}).size.should == 1
107
- Photo.find(:all, :conditions => {:checked => true}).first.id.should == @photo1.id
108
- Photo.find(:all, :conditions => {:checked => false}).size.should == 1
109
- Photo.find(:all, :conditions => {:checked => false}).first.id.should == @photo2.id
110
-
111
- Photo.find(:all, :conditions => {:inverted => true}).size.should == 1
112
- Photo.find(:all, :conditions => {:inverted => true}).first.id.should == @photo1.id
113
- Photo.find(:all, :conditions => {:inverted => false}).size.should == 1
114
- Photo.find(:all, :conditions => {:inverted => false}).first.id.should == @photo2.id
115
- end
116
-
117
- it "should return correct array when :order option is provided" do
118
- Photo.all(:order => "asc").map{|p| p.id}.should == [@photo1.id, @photo2.id]
119
- Photo.all(:order => "desc").map{|p| p.id}.should == [@photo2.id, @photo1.id]
120
-
121
- Photo.all(:order => "asc", :limit => 1).map{|p| p.id}.should == [@photo1.id]
122
- Photo.all(:order => "desc", :limit => 1).map{|p| p.id}.should == [@photo2.id]
123
-
124
- Photo.all(:order => "asc", :limit => 1, :offset => 1).map{|p| p.id}.should == [@photo2.id]
125
- Photo.all(:order => "desc", :limit => 1, :offset => 1).map{|p| p.id}.should == [@photo1.id]
126
-
127
- # testing #find method
128
- Photo.find(:all, :order => "asc").map{|p| p.id}.should == [@photo1.id, @photo2.id]
129
- Photo.find(:all, :order => "desc").map{|p| p.id}.should == [@photo2.id, @photo1.id]
130
-
131
- Photo.find(:all, :order => "asc", :limit => 1).map{|p| p.id}.should == [@photo1.id]
132
- Photo.find(:all, :order => "desc", :limit => 1).map{|p| p.id}.should == [@photo2.id]
133
-
134
- Photo.find(:first, :order => "asc", :limit => 1, :offset => 1).id.should == @photo2.id
135
- Photo.find(:first, :order => "desc", :limit => 1, :offset => 1).id.should == @photo1.id
136
-
137
- Photo.find(:last, :order => "asc").id.should == @photo2.id
138
- Photo.find(:last, :order => "desc").id.should == @photo1.id
139
-
140
- @album.photos.count.should == 0
141
- @album.photos.all(:limit => 2, :offset => 0).should == []
142
- @album.photos << @photo2
143
- @album.photos << @photo1
144
-
145
- @album.photos.all(:order => "asc").map{|p| p.id}.should == [@photo2.id, @photo1.id]
146
- @album.photos.all(:order => "desc").map{|p| p.id}.should == [@photo1.id, @photo2.id]
147
- @album.photos.all(:order => "asc", :limit => 1).map{|p| p.id}.should == [@photo2.id]
148
- @album.photos.all(:order => "desc", :limit => 1).map{|p| p.id}.should == [@photo1.id]
149
- @album.photos.all(:order => "asc", :limit => 1, :offset => 1).map{|p| p.id}.should == [@photo1.id]
150
- @album.photos.all(:order => "desc", :limit => 1, :offset => 1).map{|p| p.id}.should == [@photo2.id]
151
-
152
- @album.photos.find(:all, :order => "asc").map{|p| p.id}.should == [@photo2.id, @photo1.id]
153
- @album.photos.find(:all, :order => "desc").map{|p| p.id}.should == [@photo1.id, @photo2.id]
154
-
155
- @album.photos.find(:first, :order => "asc").id.should == @photo2.id
156
- @album.photos.find(:first, :order => "desc").id.should == @photo1.id
157
-
158
- @album.photos.find(:last, :order => "asc").id.should == @photo1.id
159
- @album.photos.find(:last, :order => "desc").id.should == @photo2.id
160
-
161
- @album.photos.find(:last, :order => "desc", :offset => 2).should == nil
162
- @album.photos.find(:first, :order => "desc", :offset => 2).should == nil
163
-
164
- @album.photos.find(:all, :order => "asc", :limit => 1, :offset => 1).map{|p| p.id}.should == [@photo1.id]
165
- @album.photos.find(:all, :order => "desc", :limit => 1, :offset => 1).map{|p| p.id}.should == [@photo2.id]
166
- end
167
-
168
- it "should delete associated records when :dependant => :destroy in *has_many* assoc" do
169
- @album.photos << [@photo1, @photo2]
170
-
171
- @album.photos.count.should == 2
172
-
173
- Photo.count.should == 2
174
- @album.destroy
175
- Photo.count.should == 0
176
- Album.count.should == 0
177
- end
178
-
179
- it "should *NOT* delete associated records when :dependant => :nullify or empty in *has_many* assoc" do
180
- Photo.count.should == 2
181
-
182
- category = Category.new
183
- category.title = "cats"
184
- category.save
185
-
186
- Category.count.should == 1
187
-
188
- category.photos << [@photo1, @photo2]
189
- category.photos.count.should == 2
190
-
191
- category.destroy
192
-
193
- Photo.count.should == 2
194
- Category.count.should == 0
195
- end
196
-
197
- it "should delete associated records when :dependant => :destroy and leave them otherwise in *has_one* assoc" do
198
- user = User.new
199
- user.name = "Dmitrii Samoilov"
200
- user.save
201
- user.should be
202
-
203
- user.photo = @photo1
204
-
205
- user.photo.id.should == @photo1.id
206
-
207
- User.count.should == 1
208
- Photo.count.should == 2
209
- user.destroy
210
- Photo.count.should == 1
211
- User.count.should == 0
212
- end
213
-
214
- it "should delete link to associated record when record was deleted" do
215
- @album.photos << [@photo1, @photo2]
216
-
217
- @album.photos.count.should == 2
218
-
219
- Photo.count.should == 2
220
- @photo1.destroy
221
- Photo.count.should == 1
222
-
223
- @album.photos.count.should == 1
224
- @album.photos.size.should == 1
225
- end
226
- end
@@ -1,65 +0,0 @@
1
- require File.dirname(File.expand_path(__FILE__)) + '/test_helper.rb'
2
-
3
- describe "check polymorphic property" do
4
- it "should provide proper associations and save records correctly for has_one/belongs_to polymorphic" do
5
- book = Book.new :title => "Permutation City", :author => "Egan Greg", :price => 1529
6
- book.save
7
-
8
- giftcard = Giftcard.create :title => "Happy New Year!"
9
-
10
- ci1 = CatalogItem.create :title => giftcard.title
11
- ci1.resource = giftcard
12
-
13
- ci2 = CatalogItem.create :title => book.title
14
- ci2.resource = book
15
-
16
- CatalogItem.count.should == 2
17
- [ci1, ci2].collect{|ci| ci.title}.should == [giftcard.title, book.title]
18
-
19
- ci1.resource.title.should == giftcard.title
20
- ci2.resource.title.should == book.title
21
-
22
- Book.first.catalog_item.should be
23
- Book.first.catalog_item.id.should == ci2.id
24
-
25
- Giftcard.first.catalog_item.should be
26
- Giftcard.first.catalog_item.id.should == ci1.id
27
- end
28
-
29
- it "should provide proper associations and save records correctly for has_many/belongs_to polymorphic" do
30
- country = Country.create :name => "Ukraine"
31
- city = City.create :name => "Lviv"
32
-
33
- person = Person.create :name => "german"
34
- person.location = country
35
-
36
- Person.first.location.id.should == country.id
37
- City.first.people.count.should == 0
38
- Country.first.people.count.should == 1
39
- Country.first.people[0].id.should == person.id
40
-
41
- person = Person.first
42
- person.location = city
43
-
44
- Person.first.location.id.should == city.id
45
- City.first.people.count.should == 1
46
- City.first.people[0].id.should == person.id
47
- Country.first.people.count.should == 0
48
- end
49
-
50
- it "should delete records properly" do
51
- country = Country.create :name => "Ukraine"
52
- person = Person.create :name => "german"
53
- person.location = country
54
-
55
- Person.first.location.id.should == country.id
56
- Country.first.people.count.should == 1
57
- Country.first.people[0].id.should == person.id
58
-
59
- person.destroy
60
- Person.count.should == 0
61
- $redis.hgetall("user:#{person.id}").should == {}
62
- $redis.zrank("user:ids", person.id).should == nil
63
- Country.first.people.count.should == 0
64
- end
65
- end
data/test/redis.conf DELETED
@@ -1,417 +0,0 @@
1
- # Redis configuration file example
2
-
3
- # Note on units: when memory size is needed, it is possible to specifiy
4
- # it in the usual form of 1k 5GB 4M and so forth:
5
- #
6
- # 1k => 1000 bytes
7
- # 1kb => 1024 bytes
8
- # 1m => 1000000 bytes
9
- # 1mb => 1024*1024 bytes
10
- # 1g => 1000000000 bytes
11
- # 1gb => 1024*1024*1024 bytes
12
- #
13
- # units are case insensitive so 1GB 1Gb 1gB are all the same.
14
-
15
- # By default Redis does not run as a daemon. Use 'yes' if you need it.
16
- # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
17
- daemonize no
18
-
19
- # When running daemonized, Redis writes a pid file in /var/run/redis.pid by
20
- # default. You can specify a custom pid file location here.
21
- pidfile redis.pid
22
-
23
- # Accept connections on the specified port, default is 6379.
24
- # If port 0 is specified Redis will not listen on a TCP socket.
25
- port 0
26
-
27
- # If you want you can bind a single interface, if the bind option is not
28
- # specified all the interfaces will listen for incoming connections.
29
- #
30
- # bind 127.0.0.1
31
-
32
- # Specify the path for the unix socket that will be used to listen for
33
- # incoming connections. There is no default, so Redis will not listen
34
- # on a unix socket when not specified.
35
- #
36
- unixsocket redis.sock
37
-
38
- # Close the connection after a client is idle for N seconds (0 to disable)
39
- timeout 300
40
-
41
- # Set server verbosity to 'debug'
42
- # it can be one of:
43
- # debug (a lot of information, useful for development/testing)
44
- # verbose (many rarely useful info, but not a mess like the debug level)
45
- # notice (moderately verbose, what you want in production probably)
46
- # warning (only very important / critical messages are logged)
47
- loglevel verbose
48
-
49
- # Specify the log file name. Also 'stdout' can be used to force
50
- # Redis to log on the standard output. Note that if you use standard
51
- # output for logging but daemonize, logs will be sent to /dev/null
52
- logfile stdout
53
-
54
- # To enable logging to the system logger, just set 'syslog-enabled' to yes,
55
- # and optionally update the other syslog parameters to suit your needs.
56
- # syslog-enabled no
57
-
58
- # Specify the syslog identity.
59
- # syslog-ident redis
60
-
61
- # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
62
- # syslog-facility local0
63
-
64
- # Set the number of databases. The default database is DB 0, you can select
65
- # a different one on a per-connection basis using SELECT <dbid> where
66
- # dbid is a number between 0 and 'databases'-1
67
- databases 2
68
-
69
- ################################ SNAPSHOTTING #################################
70
- #
71
- # Save the DB on disk:
72
- #
73
- # save <seconds> <changes>
74
- #
75
- # Will save the DB if both the given number of seconds and the given
76
- # number of write operations against the DB occurred.
77
- #
78
- # In the example below the behaviour will be to save:
79
- # after 900 sec (15 min) if at least 1 key changed
80
- # after 300 sec (5 min) if at least 10 keys changed
81
- # after 60 sec if at least 10000 keys changed
82
- #
83
- # Note: you can disable saving at all commenting all the "save" lines.
84
-
85
- save 10 1
86
- save 300 10
87
- save 60 10000
88
-
89
- # Compress string objects using LZF when dump .rdb databases?
90
- # For default that's set to 'yes' as it's almost always a win.
91
- # If you want to save some CPU in the saving child set it to 'no' but
92
- # the dataset will likely be bigger if you have compressible values or keys.
93
- rdbcompression yes
94
-
95
- # The filename where to dump the DB
96
- dbfilename dump.rdb
97
-
98
- # The working directory.
99
- #
100
- # The DB will be written inside this directory, with the filename specified
101
- # above using the 'dbfilename' configuration directive.
102
- #
103
- # Also the Append Only File will be created inside this directory.
104
- #
105
- # Note that you must specify a directory here, not a file name.
106
- dir ./
107
-
108
- ################################# REPLICATION #################################
109
-
110
- # Master-Slave replication. Use slaveof to make a Redis instance a copy of
111
- # another Redis server. Note that the configuration is local to the slave
112
- # so for example it is possible to configure the slave to save the DB with a
113
- # different interval, or to listen to another port, and so on.
114
- #
115
- # slaveof <masterip> <masterport>
116
-
117
- # If the master is password protected (using the "requirepass" configuration
118
- # directive below) it is possible to tell the slave to authenticate before
119
- # starting the replication synchronization process, otherwise the master will
120
- # refuse the slave request.
121
- #
122
- # masterauth <master-password>
123
-
124
- # When a slave lost the connection with the master, or when the replication
125
- # is still in progress, the slave can act in two different ways:
126
- #
127
- # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
128
- # still reply to client requests, possibly with out of data data, or the
129
- # data set may just be empty if this is the first synchronization.
130
- #
131
- # 2) if slave-serve-stale data is set to 'no' the slave will reply with
132
- # an error "SYNC with master in progress" to all the kind of commands
133
- # but to INFO and SLAVEOF.
134
- #
135
- slave-serve-stale-data yes
136
-
137
- ################################## SECURITY ###################################
138
-
139
- # Require clients to issue AUTH <PASSWORD> before processing any other
140
- # commands. This might be useful in environments in which you do not trust
141
- # others with access to the host running redis-server.
142
- #
143
- # This should stay commented out for backward compatibility and because most
144
- # people do not need auth (e.g. they run their own servers).
145
- #
146
- # Warning: since Redis is pretty fast an outside user can try up to
147
- # 150k passwords per second against a good box. This means that you should
148
- # use a very strong password otherwise it will be very easy to break.
149
- #
150
- # requirepass foobared
151
-
152
- # Command renaming.
153
- #
154
- # It is possilbe to change the name of dangerous commands in a shared
155
- # environment. For instance the CONFIG command may be renamed into something
156
- # of hard to guess so that it will be still available for internal-use
157
- # tools but not available for general clients.
158
- #
159
- # Example:
160
- #
161
- # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
162
- #
163
- # It is also possilbe to completely kill a command renaming it into
164
- # an empty string:
165
- #
166
- # rename-command CONFIG ""
167
-
168
- ################################### LIMITS ####################################
169
-
170
- # Set the max number of connected clients at the same time. By default there
171
- # is no limit, and it's up to the number of file descriptors the Redis process
172
- # is able to open. The special value '0' means no limits.
173
- # Once the limit is reached Redis will close all the new connections sending
174
- # an error 'max number of clients reached'.
175
- #
176
- # maxclients 128
177
-
178
- # Don't use more memory than the specified amount of bytes.
179
- # When the memory limit is reached Redis will try to remove keys with an
180
- # EXPIRE set. It will try to start freeing keys that are going to expire
181
- # in little time and preserve keys with a longer time to live.
182
- # Redis will also try to remove objects from free lists if possible.
183
- #
184
- # If all this fails, Redis will start to reply with errors to commands
185
- # that will use more memory, like SET, LPUSH, and so on, and will continue
186
- # to reply to most read-only commands like GET.
187
- #
188
- # WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
189
- # 'state' server or cache, not as a real DB. When Redis is used as a real
190
- # database the memory usage will grow over the weeks, it will be obvious if
191
- # it is going to use too much memory in the long run, and you'll have the time
192
- # to upgrade. With maxmemory after the limit is reached you'll start to get
193
- # errors for write operations, and this may even lead to DB inconsistency.
194
- #
195
- # maxmemory <bytes>
196
-
197
- # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
198
- # is reached? You can select among five behavior:
199
- #
200
- # volatile-lru -> remove the key with an expire set using an LRU algorithm
201
- # allkeys-lru -> remove any key accordingly to the LRU algorithm
202
- # volatile-random -> remove a random key with an expire set
203
- # allkeys->random -> remove a random key, any key
204
- # volatile-ttl -> remove the key with the nearest expire time (minor TTL)
205
- # noeviction -> don't expire at all, just return an error on write operations
206
- #
207
- # Note: with all the kind of policies, Redis will return an error on write
208
- # operations, when there are not suitable keys for eviction.
209
- #
210
- # At the date of writing this commands are: set setnx setex append
211
- # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
212
- # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
213
- # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
214
- # getset mset msetnx exec sort
215
- #
216
- # The default is:
217
- #
218
- # maxmemory-policy volatile-lru
219
-
220
- # LRU and minimal TTL algorithms are not precise algorithms but approximated
221
- # algorithms (in order to save memory), so you can select as well the sample
222
- # size to check. For instance for default Redis will check three keys and
223
- # pick the one that was used less recently, you can change the sample size
224
- # using the following configuration directive.
225
- #
226
- # maxmemory-samples 3
227
-
228
- ############################## APPEND ONLY MODE ###############################
229
-
230
- # By default Redis asynchronously dumps the dataset on disk. If you can live
231
- # with the idea that the latest records will be lost if something like a crash
232
- # happens this is the preferred way to run Redis. If instead you care a lot
233
- # about your data and don't want to that a single record can get lost you should
234
- # enable the append only mode: when this mode is enabled Redis will append
235
- # every write operation received in the file appendonly.aof. This file will
236
- # be read on startup in order to rebuild the full dataset in memory.
237
- #
238
- # Note that you can have both the async dumps and the append only file if you
239
- # like (you have to comment the "save" statements above to disable the dumps).
240
- # Still if append only mode is enabled Redis will load the data from the
241
- # log file at startup ignoring the dump.rdb file.
242
- #
243
- # IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
244
- # log file in background when it gets too big.
245
-
246
- appendonly no
247
-
248
- # The name of the append only file (default: "appendonly.aof")
249
- # appendfilename appendonly.aof
250
-
251
- # The fsync() call tells the Operating System to actually write data on disk
252
- # instead to wait for more data in the output buffer. Some OS will really flush
253
- # data on disk, some other OS will just try to do it ASAP.
254
- #
255
- # Redis supports three different modes:
256
- #
257
- # no: don't fsync, just let the OS flush the data when it wants. Faster.
258
- # always: fsync after every write to the append only log . Slow, Safest.
259
- # everysec: fsync only if one second passed since the last fsync. Compromise.
260
- #
261
- # The default is "everysec" that's usually the right compromise between
262
- # speed and data safety. It's up to you to understand if you can relax this to
263
- # "no" that will will let the operating system flush the output buffer when
264
- # it wants, for better performances (but if you can live with the idea of
265
- # some data loss consider the default persistence mode that's snapshotting),
266
- # or on the contrary, use "always" that's very slow but a bit safer than
267
- # everysec.
268
- #
269
- # If unsure, use "everysec".
270
-
271
- # appendfsync always
272
- appendfsync everysec
273
- # appendfsync no
274
-
275
- # When the AOF fsync policy is set to always or everysec, and a background
276
- # saving process (a background save or AOF log background rewriting) is
277
- # performing a lot of I/O against the disk, in some Linux configurations
278
- # Redis may block too long on the fsync() call. Note that there is no fix for
279
- # this currently, as even performing fsync in a different thread will block
280
- # our synchronous write(2) call.
281
- #
282
- # In order to mitigate this problem it's possible to use the following option
283
- # that will prevent fsync() from being called in the main process while a
284
- # BGSAVE or BGREWRITEAOF is in progress.
285
- #
286
- # This means that while another child is saving the durability of Redis is
287
- # the same as "appendfsync none", that in pratical terms means that it is
288
- # possible to lost up to 30 seconds of log in the worst scenario (with the
289
- # default Linux settings).
290
- #
291
- # If you have latency problems turn this to "yes". Otherwise leave it as
292
- # "no" that is the safest pick from the point of view of durability.
293
- no-appendfsync-on-rewrite no
294
-
295
- ################################ VIRTUAL MEMORY ###############################
296
-
297
- # Virtual Memory allows Redis to work with datasets bigger than the actual
298
- # amount of RAM needed to hold the whole dataset in memory.
299
- # In order to do so very used keys are taken in memory while the other keys
300
- # are swapped into a swap file, similarly to what operating systems do
301
- # with memory pages.
302
- #
303
- # To enable VM just set 'vm-enabled' to yes, and set the following three
304
- # VM parameters accordingly to your needs.
305
-
306
- #vm-enabled no
307
- # vm-enabled yes
308
-
309
- # This is the path of the Redis swap file. As you can guess, swap files
310
- # can't be shared by different Redis instances, so make sure to use a swap
311
- # file for every redis process you are running. Redis will complain if the
312
- # swap file is already in use.
313
- #
314
- # The best kind of storage for the Redis swap file (that's accessed at random)
315
- # is a Solid State Disk (SSD).
316
- #
317
- # *** WARNING *** if you are using a shared hosting the default of putting
318
- # the swap file under /tmp is not secure. Create a dir with access granted
319
- # only to Redis user and configure Redis to create the swap file there.
320
- #vm-swap-file /tmp/redis.swap
321
-
322
- # vm-max-memory configures the VM to use at max the specified amount of
323
- # RAM. Everything that deos not fit will be swapped on disk *if* possible, that
324
- # is, if there is still enough contiguous space in the swap file.
325
- #
326
- # With vm-max-memory 0 the system will swap everything it can. Not a good
327
- # default, just specify the max amount of RAM you can in bytes, but it's
328
- # better to leave some margin. For instance specify an amount of RAM
329
- # that's more or less between 60 and 80% of your free RAM.
330
- #vm-max-memory 0
331
-
332
- # Redis swap files is split into pages. An object can be saved using multiple
333
- # contiguous pages, but pages can't be shared between different objects.
334
- # So if your page is too big, small objects swapped out on disk will waste
335
- # a lot of space. If you page is too small, there is less space in the swap
336
- # file (assuming you configured the same number of total swap file pages).
337
- #
338
- # If you use a lot of small objects, use a page size of 64 or 32 bytes.
339
- # If you use a lot of big objects, use a bigger page size.
340
- # If unsure, use the default :)
341
- #vm-page-size 32
342
-
343
- # Number of total memory pages in the swap file.
344
- # Given that the page table (a bitmap of free/used pages) is taken in memory,
345
- # every 8 pages on disk will consume 1 byte of RAM.
346
- #
347
- # The total swap size is vm-page-size * vm-pages
348
- #
349
- # With the default of 32-bytes memory pages and 134217728 pages Redis will
350
- # use a 4 GB swap file, that will use 16 MB of RAM for the page table.
351
- #
352
- # It's better to use the smallest acceptable value for your application,
353
- # but the default is large in order to work in most conditions.
354
- #vm-pages 134217728
355
-
356
- # Max number of VM I/O threads running at the same time.
357
- # This threads are used to read/write data from/to swap file, since they
358
- # also encode and decode objects from disk to memory or the reverse, a bigger
359
- # number of threads can help with big objects even if they can't help with
360
- # I/O itself as the physical device may not be able to couple with many
361
- # reads/writes operations at the same time.
362
- #
363
- # The special value of 0 turn off threaded I/O and enables the blocking
364
- # Virtual Memory implementation.
365
- #vm-max-threads 4
366
-
367
- ############################### ADVANCED CONFIG ###############################
368
-
369
- # Hashes are encoded in a special way (much more memory efficient) when they
370
- # have at max a given numer of elements, and the biggest element does not
371
- # exceed a given threshold. You can configure this limits with the following
372
- # configuration directives.
373
- #hash-max-zipmap-entries 512
374
- #hash-max-zipmap-value 64
375
-
376
- # Similarly to hashes, small lists are also encoded in a special way in order
377
- # to save a lot of space. The special representation is only used when
378
- # you are under the following limits:
379
- list-max-ziplist-entries 512
380
- list-max-ziplist-value 64
381
-
382
- # Sets have a special encoding in just one case: when a set is composed
383
- # of just strings that happens to be integers in radix 10 in the range
384
- # of 64 bit signed integers.
385
- # The following configuration setting sets the limit in the size of the
386
- # set in order to use this special memory saving encoding.
387
- set-max-intset-entries 512
388
-
389
- # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
390
- # order to help rehashing the main Redis hash table (the one mapping top-level
391
- # keys to values). The hash table implementation redis uses (see dict.c)
392
- # performs a lazy rehashing: the more operation you run into an hash table
393
- # that is rhashing, the more rehashing "steps" are performed, so if the
394
- # server is idle the rehashing is never complete and some more memory is used
395
- # by the hash table.
396
- #
397
- # The default is to use this millisecond 10 times every second in order to
398
- # active rehashing the main dictionaries, freeing memory when possible.
399
- #
400
- # If unsure:
401
- # use "activerehashing no" if you have hard latency requirements and it is
402
- # not a good thing in your environment that Redis can reply form time to time
403
- # to queries with 2 milliseconds delay.
404
- #
405
- # use "activerehashing yes" if you don't have such hard requirements but
406
- # want to free memory asap when possible.
407
- activerehashing yes
408
-
409
- ################################## INCLUDES ###################################
410
-
411
- # Include one or more other config files here. This is useful if you
412
- # have a standard template that goes to all redis server but also need
413
- # to customize a few per-server settings. Include files can include
414
- # other files, so use this wisely.
415
- #
416
- # include /path/to/local.conf
417
- # include /path/to/other.conf