segmented-memcache 1.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +1 -0
- data/LICENSE +20 -0
- data/README.rdoc +215 -0
- data/Rakefile +56 -0
- data/VERSION.yml +5 -0
- data/lib/memcache/local_server.rb +107 -0
- data/lib/memcache/migration.rb +23 -0
- data/lib/memcache/null_server.rb +30 -0
- data/lib/memcache/pg_server.rb +163 -0
- data/lib/memcache/segmented_server.rb +116 -0
- data/lib/memcache/server.rb +265 -0
- data/lib/memcache.rb +409 -0
- data/segmented-memcache.gemspec +68 -0
- data/test/memcache_local_server_test.rb +11 -0
- data/test/memcache_null_server_test.rb +65 -0
- data/test/memcache_pg_server_test.rb +28 -0
- data/test/memcache_segmented_server_test.rb +21 -0
- data/test/memcache_server_test.rb +35 -0
- data/test/memcache_server_test_helper.rb +159 -0
- data/test/memcache_test.rb +233 -0
- data/test/test_helper.rb +26 -0
- metadata +83 -0
data/.gitignore
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
pkg
|
data/LICENSE
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
Copyright (c) 2009 Justin Balthrop
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining
|
4
|
+
a copy of this software and associated documentation files (the
|
5
|
+
"Software"), to deal in the Software without restriction, including
|
6
|
+
without limitation the rights to use, copy, modify, merge, publish,
|
7
|
+
distribute, sublicense, and/or sell copies of the Software, and to
|
8
|
+
permit persons to whom the Software is furnished to do so, subject to
|
9
|
+
the following conditions:
|
10
|
+
|
11
|
+
The above copyright notice and this permission notice shall be
|
12
|
+
included in all copies or substantial portions of the Software.
|
13
|
+
|
14
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
15
|
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
16
|
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
17
|
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
18
|
+
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
19
|
+
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
20
|
+
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
data/README.rdoc
ADDED
@@ -0,0 +1,215 @@
|
|
1
|
+
== DEVELON
|
2
|
+
|
3
|
+
This is a fork of Justin Balthrop's memcache gem. The corrupted segment management, get and set methods for +SegmentedServer+ have been fixed. Now used memory is slightly inferior and there are no dangling segments.
|
4
|
+
|
5
|
+
To cache active record models with their _associations_ in a module you must call some code that will exploit the contents, to assure the internal data will be saved within main records. Also to unmarshal the associations you must have the class called in the context of get.
|
6
|
+
Example:
|
7
|
+
|
8
|
+
class Tree < ActiveRecord::Base
|
9
|
+
has_many :branches
|
10
|
+
end
|
11
|
+
|
12
|
+
class Branch < ActiveRecord::Base
|
13
|
+
belongs_to :tree
|
14
|
+
end
|
15
|
+
|
16
|
+
module Foo
|
17
|
+
|
18
|
+
def self.test1(params)
|
19
|
+
params.each {}
|
20
|
+
cache.set('test', params)
|
21
|
+
end
|
22
|
+
|
23
|
+
def self.test2(classes)
|
24
|
+
classes.each {|k| k.class}
|
25
|
+
cache.get('test')
|
26
|
+
end
|
27
|
+
|
28
|
+
end
|
29
|
+
|
30
|
+
t = Tree.create
|
31
|
+
t.branches.create
|
32
|
+
Foo.test1 [t] #this will cache t with its branch
|
33
|
+
Foo.test2 [Tree, Branch] #this will load from cache the tree with his branch
|
34
|
+
|
35
|
+
== Installation
|
36
|
+
|
37
|
+
$ sudo gem install segmented-memcache --source http://gemcutter.org
|
38
|
+
|
39
|
+
--------------------------------------------------------------------------------------------
|
40
|
+
|
41
|
+
= memcache
|
42
|
+
|
43
|
+
This is the Geni memcached client. It started out as a fork of fiveruns/memcache-client,
|
44
|
+
which was a fork of seattle.rb's memcache-client, but over time, our client has diverged,
|
45
|
+
and I've rewritten most of the code. Of course, a lot of credit is due to those whose code
|
46
|
+
served as a starting point for this code.
|
47
|
+
|
48
|
+
== Usage
|
49
|
+
|
50
|
+
cache = Memcache.new(:server => "localhost:11211")
|
51
|
+
cache.set('stuff', [:symbol, 'String', 1, {:bar => 5}])
|
52
|
+
cache.get('stuff')
|
53
|
+
=> [:symbol, "String", 1, {:bar => 5}]
|
54
|
+
|
55
|
+
cache['things'] = {:foo => '1', :bar => [1,2,3]}
|
56
|
+
cache['things']
|
57
|
+
=> {:foo => "1", :bar => [1,2,3]}
|
58
|
+
|
59
|
+
== How is this different from memcache-client?
|
60
|
+
|
61
|
+
Like memcache-client, _memcache_ (shown in italics when I am referring to this
|
62
|
+
library) is a memcached client, but it differs significantly from memcache-client in
|
63
|
+
several important ways.
|
64
|
+
|
65
|
+
=== Interface
|
66
|
+
|
67
|
+
I tried to keep the basic interface as similar as I could to memcache-client. In some
|
68
|
+
cases, _memcache_ can be a near drop-in replacement for memcache-client. However, I did
|
69
|
+
rename the main class from +MemCache+ to +Memcache+ to prevent confusion and to force
|
70
|
+
those switching to _memcache_ to update their code. Here are the notable interface
|
71
|
+
changes:
|
72
|
+
|
73
|
+
- +expiry+ and +raw+ are specified as options in a hash now, instead of as unnamed parameters.
|
74
|
+
|
75
|
+
cache.set('foo', :a, :expiry => 10.minutes)
|
76
|
+
cache.set('bar', :b, :expiry => Time.parse('5:51pm Nov 24, 2018'))
|
77
|
+
cache.set('baz', 'c', :expiry => 30.minutes, :raw => true)
|
78
|
+
|
79
|
+
- +get_multi+ has been replaced by a more versatile +get+ interface. If the first argument is
|
80
|
+
an array, then a hash of key/value pairs is returned. If the first argument is not an
|
81
|
+
array, then the value alone is returned.
|
82
|
+
|
83
|
+
cache.get('foo') # => :a
|
84
|
+
cache.get(['foo', 'bar']) # => {"foo"=>:a, "bar"=>:b}
|
85
|
+
cache.get(['foo']) # => {"foo"=>:a}
|
86
|
+
|
87
|
+
- +get+ also supports updating the expiry for a single key. this can be used to keep
|
88
|
+
frequently accessed data in cache longer than less accessed data, though usually the
|
89
|
+
memcached LRU algorithm will be sufficient.
|
90
|
+
|
91
|
+
cache.get('foo', :expiry => 1.day)
|
92
|
+
|
93
|
+
- Support for flags has been added to all methods. So you can store additional metadata on
|
94
|
+
each value. Depending on which server version you are using, flags can be 16 bit or 32
|
95
|
+
bit unsigned integers (though it seems that memcache 1.4.1 returns signed values if the
|
96
|
+
upper bit is set).
|
97
|
+
|
98
|
+
cache.set('foo', :aquatic, :flags => 0b11101111)
|
99
|
+
value = cache.get('foo')
|
100
|
+
=> :aquatic
|
101
|
+
value.memcache_flags.to_s(2)
|
102
|
+
=> "11101111"
|
103
|
+
|
104
|
+
cache.set('foo', 'aquatic', :raw => true, :flags => 0xff08)
|
105
|
+
cache.get('foo', :raw => true).memcache_flags.to_s(2)
|
106
|
+
=> "1111111100001000"
|
107
|
+
|
108
|
+
- +incr+ and +decr+ automatically initialize the value to 0 if the key doesn't
|
109
|
+
exist. The +count+ method returns the integer count associated with a given key.
|
110
|
+
|
111
|
+
cache.count('hits') # => 0
|
112
|
+
cache.incr('hits', 52) # => 52
|
113
|
+
cache.decr('hits', 9) # => 43
|
114
|
+
cache.count('hits') # => 43
|
115
|
+
|
116
|
+
- In addition to +add+, which was already supported, support has been added for +replace+,
|
117
|
+
+append+ and +prepend+ from the memcached protocol.
|
118
|
+
|
119
|
+
cache.add('foo', 1)
|
120
|
+
cache.add('foo', 0)
|
121
|
+
cache.get('foo')
|
122
|
+
=> 1
|
123
|
+
|
124
|
+
cache.replace('foo', 2)
|
125
|
+
cache.get('foo')
|
126
|
+
=> 2
|
127
|
+
|
128
|
+
cache.write('foo', 'bar') ## shortcut for cache.set('foo', 'bar', :raw => true)
|
129
|
+
cache.append('foo', 'none') ## append and prepend only works on raw values
|
130
|
+
cache.prepend('foo', 'foo') ##
|
131
|
+
cache.read('foo') ## shortcut for cache.get('foo', :raw => true)
|
132
|
+
=> "foobarnone"
|
133
|
+
|
134
|
+
- Support has also been added for +cas+ (compare-and-set).
|
135
|
+
|
136
|
+
value = cache.get('foo', :cas => true)
|
137
|
+
cache.cas('foo', value.upcase, :cas => value.memcache_cas)
|
138
|
+
cache.get('foo')
|
139
|
+
=> "FOOBARNONE"
|
140
|
+
|
141
|
+
value = cache.get('foo', :cas => true)
|
142
|
+
cache.set('foo', 'modified')
|
143
|
+
cache.cas('foo', value.downcase, :cas => value.memcache_cas)
|
144
|
+
cache.get('foo')
|
145
|
+
=> "modified"
|
146
|
+
|
147
|
+
- Several additional convenience methods have been added including +get_or_add+,
|
148
|
+
+get_or_set+, +update+, +get_some+, +lock+, +unlock+, and +with_lock+.
|
149
|
+
|
150
|
+
=== Implementation
|
151
|
+
|
152
|
+
The underlying architechture of _memcache_ is more modular than memcache-client.
|
153
|
+
A given +Memcache+ instance has a group of servers, just like before, but much more of the
|
154
|
+
functionality in encapsulated inside the <tt>Memcache::Server</tt> object. Really, a +Server+
|
155
|
+
object is a thin wrapper around an remote memcached server that takes care of the socket
|
156
|
+
and protocol details along with basic error handling. The +Memcache+ class handles the
|
157
|
+
partitioning algorithm, marshaling of ruby objects and various higher-level methods.
|
158
|
+
|
159
|
+
By encapsulating the protocol inside the +Server+ object, it becomes very easy to plug-in
|
160
|
+
alternate server implementations. Right now, there are two basic, alternate servers:
|
161
|
+
|
162
|
+
[+LocalServer+] This is an in-process server for storing keys and values in local
|
163
|
+
memory. It is good for testing, when you don't want to spin up an instance
|
164
|
+
of memcached, and also as a second level of caching. For example, in a web
|
165
|
+
application, you can use this as a quick cache which lasts for the
|
166
|
+
duration of a request.
|
167
|
+
|
168
|
+
[+PGServer+] This is an implementation of memcached functionality using SQL. It stores all
|
169
|
+
data in a single postgres table and uses +PGConn+ to select and update this
|
170
|
+
table. This works well as a permanent cache or in the case when your objects
|
171
|
+
are very large. It can also be used in a multi-level cache setup with
|
172
|
+
<tt>Memcache::Server</tt> to provide persistence without sacrificing speed.
|
173
|
+
|
174
|
+
=== Very Large Values
|
175
|
+
|
176
|
+
Memcached limits the size of values to 1MB. This is done to reduce memory usage, but it
|
177
|
+
means that large data structures, which are also often costly to compute, cannot be stored
|
178
|
+
easily. We solve this problem by providing an additional server called
|
179
|
+
<tt>Memcache::SegmentedServer</tt>. It inherits from <tt>Memcache::Server</tt>, but
|
180
|
+
includes code to segment and reassemble large values. Mike Stangel at Geni originally
|
181
|
+
wrote this code as an extension to memcache-client and I adapted it for the new
|
182
|
+
architecture.
|
183
|
+
|
184
|
+
You can use segmented values either by passing +SegmentedServer+ objects to +Memcache+, or
|
185
|
+
you can use the +segment_large_values+ option.
|
186
|
+
|
187
|
+
server = Memcache::SegmentedServer.new(:host => 'localhost', :port => 11211)
|
188
|
+
cache = Memcache.new(:server => server)
|
189
|
+
|
190
|
+
cache = Memcache.new(:server => 'localhost:11211', :segment_large_values => true)
|
191
|
+
|
192
|
+
=== Error Handling and Recovery
|
193
|
+
|
194
|
+
We handle errors differently in _memcache_ than memcache-client does. Whenever there is a
|
195
|
+
connection error or other fatal error, memcache-client marks the offending server as dead
|
196
|
+
for 30 seconds, and all calls that require that server fail for the next 30 seconds. This
|
197
|
+
was unacceptable for us in a production environment. We tried changing the retry timeout
|
198
|
+
to 1 second, but still found our exception logs filling up with failed web requests
|
199
|
+
whenever a network connection was broken.
|
200
|
+
|
201
|
+
So, the default behavior in _memcache_ is for reads to be stable even if the underlying
|
202
|
+
server is unavailable. This means, that instead of raising an exception, a read will just
|
203
|
+
return nil if the server is down. Of course, you need to monitor your memcached servers to
|
204
|
+
make sure they aren't down for long, but this allows your site to be resilient to minor
|
205
|
+
network blips. Any error that occurs while unmarshalling a stored object will also return nil.
|
206
|
+
|
207
|
+
Writes, on the other hand, cannot just be ignored when the server is down. For this reason,
|
208
|
+
every write operation is retried once by closing and reopening the connection before
|
209
|
+
finally marking a server as dead and raising an exception. We will not attempt to read
|
210
|
+
from a dead server for 5 seconds, but a write will always attempt to revive a dead server
|
211
|
+
by attempting to connect.
|
212
|
+
|
213
|
+
== License:
|
214
|
+
|
215
|
+
Copyright (c) 2009 Justin Balthrop, Geni.com, Develon s.r.l.; Published under The MIT License, see LICENSE
|
data/Rakefile
ADDED
@@ -0,0 +1,56 @@
|
|
1
|
+
require 'rubygems'
|
2
|
+
require 'rake'
|
3
|
+
|
4
|
+
begin
|
5
|
+
require 'jeweler'
|
6
|
+
Jeweler::Tasks.new do |gem|
|
7
|
+
gem.name = "memcache"
|
8
|
+
gem.summary = %Q{Advanced ruby memcache client}
|
9
|
+
gem.description = %Q{Ruby client for memcached supporting advanced protocol features and pluggable architecture.}
|
10
|
+
gem.email = "code@justinbalthrop.com"
|
11
|
+
gem.homepage = "http://github.com/ninjudd/memcache"
|
12
|
+
gem.authors = ["Justin Balthrop"]
|
13
|
+
# gem is a Gem::Specification... see http://www.rubygems.org/read/chapter/20 for additional settings
|
14
|
+
end
|
15
|
+
Jeweler::GemcutterTasks.new
|
16
|
+
rescue LoadError
|
17
|
+
puts "Jeweler (or a dependency) not available. Install it with: sudo gem install jeweler"
|
18
|
+
end
|
19
|
+
|
20
|
+
require 'rake/testtask'
|
21
|
+
Rake::TestTask.new(:test) do |test|
|
22
|
+
test.libs << 'lib' << 'test'
|
23
|
+
test.pattern = 'test/**/*_test.rb'
|
24
|
+
test.verbose = true
|
25
|
+
end
|
26
|
+
|
27
|
+
begin
|
28
|
+
require 'rcov/rcovtask'
|
29
|
+
Rcov::RcovTask.new do |test|
|
30
|
+
test.libs << 'test'
|
31
|
+
test.pattern = 'test/**/*_test.rb'
|
32
|
+
test.verbose = true
|
33
|
+
end
|
34
|
+
rescue LoadError
|
35
|
+
task :rcov do
|
36
|
+
abort "RCov is not available. In order to run rcov, you must: sudo gem install spicycode-rcov"
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
task :test => :check_dependencies
|
41
|
+
|
42
|
+
task :default => :test
|
43
|
+
|
44
|
+
require 'rake/rdoctask'
|
45
|
+
Rake::RDocTask.new do |rdoc|
|
46
|
+
if File.exist?('VERSION')
|
47
|
+
version = File.read('VERSION')
|
48
|
+
else
|
49
|
+
version = ""
|
50
|
+
end
|
51
|
+
|
52
|
+
rdoc.rdoc_dir = 'rdoc'
|
53
|
+
rdoc.title = "memcache #{version}"
|
54
|
+
rdoc.rdoc_files.include('README*')
|
55
|
+
rdoc.rdoc_files.include('lib/**/*.rb')
|
56
|
+
end
|
data/VERSION.yml
ADDED
@@ -0,0 +1,107 @@
|
|
1
|
+
class Memcache
|
2
|
+
class LocalServer
|
3
|
+
def initialize
|
4
|
+
@data = {}
|
5
|
+
@expiry = {}
|
6
|
+
end
|
7
|
+
|
8
|
+
def name
|
9
|
+
"local:#{hash}"
|
10
|
+
end
|
11
|
+
|
12
|
+
def stats
|
13
|
+
{ # curr_items may include items that have expired.
|
14
|
+
'curr_items' => @data.size,
|
15
|
+
'expiry_count' => @expiry.size,
|
16
|
+
}
|
17
|
+
end
|
18
|
+
|
19
|
+
def flush_all(delay = 0)
|
20
|
+
raise 'flush_all not supported with delay' if delay != 0
|
21
|
+
@data.clear
|
22
|
+
@expiry.clear
|
23
|
+
end
|
24
|
+
|
25
|
+
def gets(keys)
|
26
|
+
get(keys, true)
|
27
|
+
end
|
28
|
+
|
29
|
+
def get(keys, cas = false)
|
30
|
+
if keys.kind_of?(Array)
|
31
|
+
hash = {}
|
32
|
+
keys.each do |key|
|
33
|
+
key = key.to_s
|
34
|
+
val = get(key)
|
35
|
+
hash[key] = val if val
|
36
|
+
end
|
37
|
+
hash
|
38
|
+
else
|
39
|
+
key = keys.to_s
|
40
|
+
if @expiry[key] and Time.now > @expiry[key]
|
41
|
+
@data[key] = nil
|
42
|
+
@expiry[key] = nil
|
43
|
+
end
|
44
|
+
@data[key]
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
def incr(key, amount = 1)
|
49
|
+
key = key.to_s
|
50
|
+
value = get(key)
|
51
|
+
return unless value
|
52
|
+
return unless value =~ /^\d+$/
|
53
|
+
|
54
|
+
value = value.to_i + amount
|
55
|
+
value = 0 if value < 0
|
56
|
+
@data[key] = value.to_s
|
57
|
+
value
|
58
|
+
end
|
59
|
+
|
60
|
+
def decr(key, amount = 1)
|
61
|
+
incr(key, -amount)
|
62
|
+
end
|
63
|
+
|
64
|
+
def delete(key)
|
65
|
+
@data.delete(key.to_s)
|
66
|
+
end
|
67
|
+
|
68
|
+
def set(key, value, expiry = 0, flags = 0)
|
69
|
+
key = key.to_s
|
70
|
+
@data[key] = value.to_s
|
71
|
+
if expiry.kind_of?(Time)
|
72
|
+
@expiry[key] = expiry
|
73
|
+
else
|
74
|
+
expiry = expiry.to_i
|
75
|
+
@expiry[key] = expiry == 0 ? nil : Time.now + expiry
|
76
|
+
end
|
77
|
+
value
|
78
|
+
end
|
79
|
+
|
80
|
+
def cas(key, value, cas, expiry = 0, flags = 0)
|
81
|
+
# No cas implementation yet, just do a set for now.
|
82
|
+
set(key, value, expiry, flags)
|
83
|
+
end
|
84
|
+
|
85
|
+
def add(key, value, expiry = 0, flags = 0)
|
86
|
+
return nil if get(key)
|
87
|
+
set(key, value, expiry)
|
88
|
+
end
|
89
|
+
|
90
|
+
def replace(key, value, expiry = 0, flags = 0)
|
91
|
+
return nil if get(key).nil?
|
92
|
+
set(key, value, expiry)
|
93
|
+
end
|
94
|
+
|
95
|
+
def append(key, value)
|
96
|
+
existing = get(key)
|
97
|
+
return nil if existing.nil?
|
98
|
+
set(key, existing + value)
|
99
|
+
end
|
100
|
+
|
101
|
+
def prepend(key, value)
|
102
|
+
existing = get(key)
|
103
|
+
return nil if existing.nil?
|
104
|
+
set(key, value + existing)
|
105
|
+
end
|
106
|
+
end
|
107
|
+
end
|
@@ -0,0 +1,23 @@
|
|
1
|
+
class Memcache
|
2
|
+
class Migration < ActiveRecord::Migration
|
3
|
+
class << self
|
4
|
+
attr_accessor :table
|
5
|
+
end
|
6
|
+
|
7
|
+
def self.up
|
8
|
+
create_table table, :id => false do |t|
|
9
|
+
t.string :key
|
10
|
+
t.text :value
|
11
|
+
t.timestamp :expires_at
|
12
|
+
t.timestamp :updated_at
|
13
|
+
end
|
14
|
+
|
15
|
+
add_index table, [:key], :unique => true
|
16
|
+
add_index table, [:expires_at]
|
17
|
+
end
|
18
|
+
|
19
|
+
def self.down
|
20
|
+
drop_table table
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
@@ -0,0 +1,30 @@
|
|
1
|
+
class Memcache
|
2
|
+
class NullServer
|
3
|
+
def name
|
4
|
+
"null"
|
5
|
+
end
|
6
|
+
|
7
|
+
def flush_all(delay = nil)
|
8
|
+
end
|
9
|
+
|
10
|
+
def get(keys)
|
11
|
+
keys.kind_of?(Array) ? {} : nil
|
12
|
+
end
|
13
|
+
|
14
|
+
def incr(key, amount = nil)
|
15
|
+
nil
|
16
|
+
end
|
17
|
+
|
18
|
+
def delete(key, expiry = nil)
|
19
|
+
nil
|
20
|
+
end
|
21
|
+
|
22
|
+
def set(key, value, expiry = nil)
|
23
|
+
nil
|
24
|
+
end
|
25
|
+
|
26
|
+
def add(key, value, expiry = nil)
|
27
|
+
nil
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|