memcachedb-client 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/FAQ.rdoc +31 -0
- data/History.rdoc +2 -0
- data/LICENSE.txt +28 -0
- data/README.rdoc +98 -0
- data/Rakefile +38 -0
- data/VERSION.yml +5 -0
- data/lib/continuum_native.rb +41 -0
- data/lib/memcachedb.rb +1222 -0
- data/performance.txt +143 -0
- data/test/test_benchmark.rb +134 -0
- data/test/test_mem_cache.rb +1220 -0
- metadata +70 -0
data/FAQ.rdoc
ADDED
@@ -0,0 +1,31 @@
|
|
1
|
+
= Memcache-client FAQ
|
2
|
+
|
3
|
+
== Does memcachedb-client work with Ruby 1.9?
|
4
|
+
|
5
|
+
Yes, Ruby 1.9 is supported. The test suite should pass completely on 1.8.6 and 1.9.1.
|
6
|
+
|
7
|
+
|
8
|
+
== I'm seeing "execution expired" or "time's up!" errors, what's that all about?
|
9
|
+
|
10
|
+
memcache-client 1.6.x+ now has socket operations timed out by default. This is to prevent
|
11
|
+
the Ruby process from hanging if memcached or starling get into a bad state, which has been
|
12
|
+
seen in production by both 37signals and FiveRuns. The default timeout is 0.5 seconds, which
|
13
|
+
should be more than enough time under normal circumstances. It's possible to hit a storm of
|
14
|
+
concurrent events which cause this timer to expire: a large Ruby VM can cause the GC to take
|
15
|
+
a while, while also storing a large (500k-1MB value), for example.
|
16
|
+
|
17
|
+
You can increase the timeout or disable them completely with the following configuration:
|
18
|
+
|
19
|
+
Rails:
|
20
|
+
config.cache_store = :mem_cache_store, 'server1', 'server2', { :timeout => nil } # no timeout
|
21
|
+
|
22
|
+
native:
|
23
|
+
MemCache.new ['server1', 'server2'], { :timeout => 1.0 } # 1 second timeout
|
24
|
+
|
25
|
+
|
26
|
+
== Isn't Evan Weaver's memcached gem faster?
|
27
|
+
|
28
|
+
The latest version of memcached-client is anywhere from 33% to 100% slower than memcached in various benchmarks. Keep in mind this means that 10,000 get requests take 1.8 sec instead of 1.2 seconds.
|
29
|
+
In practice, memcache-client is unlikely to be a bottleneck in your system but there is always going
|
30
|
+
to be an overhead to pure Ruby. memcache-client does have the advantage of built-in integration into
|
31
|
+
Rails and should work on non-MRI platforms: JRuby, MacRuby, etc.
|
data/History.rdoc
ADDED
data/LICENSE.txt
ADDED
@@ -0,0 +1,28 @@
|
|
1
|
+
Copyright 2005-2009 Bob Cottrell, Eric Hodel, Mike Perham.
|
2
|
+
All rights reserved.
|
3
|
+
|
4
|
+
Redistribution and use in source and binary forms, with or without
|
5
|
+
modification, are permitted provided that the following conditions
|
6
|
+
are met:
|
7
|
+
|
8
|
+
1. Redistributions of source code must retain the above copyright
|
9
|
+
notice, this list of conditions and the following disclaimer.
|
10
|
+
2. Redistributions in binary form must reproduce the above copyright
|
11
|
+
notice, this list of conditions and the following disclaimer in the
|
12
|
+
documentation and/or other materials provided with the distribution.
|
13
|
+
3. Neither the names of the authors nor the names of their contributors
|
14
|
+
may be used to endorse or promote products derived from this software
|
15
|
+
without specific prior written permission.
|
16
|
+
|
17
|
+
THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS
|
18
|
+
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
19
|
+
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
20
|
+
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
|
21
|
+
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
|
22
|
+
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
|
23
|
+
OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
24
|
+
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
25
|
+
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
26
|
+
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
27
|
+
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
28
|
+
|
data/README.rdoc
ADDED
@@ -0,0 +1,98 @@
|
|
1
|
+
= memcachedb-client
|
2
|
+
|
3
|
+
A ruby library for accessing memcachedb: http://memcachedb.org/
|
4
|
+
|
5
|
+
Source:
|
6
|
+
http://github.com/juggy/memcachedb-client
|
7
|
+
|
8
|
+
To install the gem
|
9
|
+
|
10
|
+
== Using memcachedb-client
|
11
|
+
|
12
|
+
With a single server:
|
13
|
+
|
14
|
+
CACHE = MemCacheDb.new :servers=>'localhost:11211'
|
15
|
+
|
16
|
+
With multiple master servers
|
17
|
+
|
18
|
+
CACHE = MemCacheDb.new [{:servers=>'host1:11211', :name=>'host1'}, {:servers=>'host2:11211', :name=>'host2'}]
|
19
|
+
|
20
|
+
With a single master-slave configuration
|
21
|
+
|
22
|
+
CACHE = MemCacheDb.new :servers=>['localhost:11211', 'localhost:11212']
|
23
|
+
|
24
|
+
With multiple master-slave configuration
|
25
|
+
|
26
|
+
CACHE = MemCacheDb.new [{:servers=>['host1:11211', 'host1:11212'], :name=>'host1'}, {:servers=>['host2:11211', 'host2:11212'], :name=>'host2'}]
|
27
|
+
|
28
|
+
* The master will be determined automatically
|
29
|
+
|
30
|
+
== Starting memcachedb as a master-slave configuration
|
31
|
+
|
32
|
+
memcachedb -p11211 -r -H ./memcache1 -N -R 127.0.0.1:21211 -O 127.0.0.1:31211
|
33
|
+
|
34
|
+
-p11211 : the memcache port the server will listen to
|
35
|
+
-R 127.0.0.1:21211 : the port the replication server will listen to for this instance
|
36
|
+
-O 127.0.0.1:31211 : each other replication server in the master-slave group
|
37
|
+
-The master will be elected once all servers are started.
|
38
|
+
|
39
|
+
== Questions on memcachedb-client ?
|
40
|
+
|
41
|
+
- julien.guimont@gmail.com
|
42
|
+
|
43
|
+
|
44
|
+
===========================
|
45
|
+
memcache-client Documentation
|
46
|
+
|
47
|
+
|
48
|
+
= memcache-client
|
49
|
+
|
50
|
+
A ruby library for accessing memcached.
|
51
|
+
|
52
|
+
Source:
|
53
|
+
|
54
|
+
http://github.com/mperham/memcache-client
|
55
|
+
|
56
|
+
== Installing memcache-client
|
57
|
+
|
58
|
+
Just install the gem:
|
59
|
+
|
60
|
+
$ sudo gem install memcache-client
|
61
|
+
|
62
|
+
== Using memcache-client
|
63
|
+
|
64
|
+
With one server:
|
65
|
+
|
66
|
+
CACHE = MemCache.new 'localhost:11211'
|
67
|
+
|
68
|
+
Or with multiple servers:
|
69
|
+
|
70
|
+
CACHE = MemCache.new %w[one.example.com:11211 two.example.com:11211]
|
71
|
+
|
72
|
+
|
73
|
+
== Tuning memcache-client
|
74
|
+
|
75
|
+
The MemCache.new method takes a number of options which can be useful at times. Please
|
76
|
+
read the source comments there for an overview. If you are using Ruby 1.8.x and using
|
77
|
+
multiple memcached servers, you should install the RubyInline gem for ultimate performance.
|
78
|
+
|
79
|
+
|
80
|
+
== Using memcache-client with Rails
|
81
|
+
|
82
|
+
Rails 2.1+ includes memcache-client 1.5.0 out of the box. See ActiveSupport::Cache::MemCacheStore
|
83
|
+
and the Rails.cache method for more details. Rails 2.3+ will use the latest memcache-client
|
84
|
+
gem installed.
|
85
|
+
|
86
|
+
|
87
|
+
== Questions?
|
88
|
+
|
89
|
+
memcache-client is maintained by Mike Perham and was originally written by Bob Cottrell,
|
90
|
+
Eric Hodel and the seattle.rb crew.
|
91
|
+
|
92
|
+
Email:: mailto:mperham@gmail.com
|
93
|
+
Twitter:: mperham[http://twitter.com/mperham]
|
94
|
+
WWW:: http://mikeperham.com
|
95
|
+
|
96
|
+
If my work on memcache-client is something you support, please take a moment to
|
97
|
+
recommend me at WWR[http://workingwithrails.com/person/10797-mike-perham]. I'm not
|
98
|
+
asking for money, just a electronic "thumbs up".
|
data/Rakefile
ADDED
@@ -0,0 +1,38 @@
|
|
1
|
+
# vim: syntax=Ruby
|
2
|
+
require 'rubygems'
|
3
|
+
require 'rake/rdoctask'
|
4
|
+
require 'rake/testtask'
|
5
|
+
|
6
|
+
begin
|
7
|
+
require 'jeweler'
|
8
|
+
Jeweler::Tasks.new do |s|
|
9
|
+
s.name = "memcachedb-client"
|
10
|
+
s.summary = s.description = "A Ruby library for accessing memcachedb."
|
11
|
+
s.email = "julien.guimont@gmail.com"
|
12
|
+
s.homepage = "http://github.com/juggy/memcachedb-client"
|
13
|
+
s.authors = ['Eric Hodel', 'Robert Cottrell', 'Mike Perham', 'Julien Guimont']
|
14
|
+
s.has_rdoc = true
|
15
|
+
s.files = FileList["[A-Z]*", "{lib,test}/**/*", 'performance.txt']
|
16
|
+
s.test_files = FileList["test/test_*.rb"]
|
17
|
+
end
|
18
|
+
Jeweler::GemcutterTasks.new
|
19
|
+
rescue LoadError
|
20
|
+
puts "Jeweler not available. Install it for jeweler-related tasks with: sudo gem install jeweler"
|
21
|
+
end
|
22
|
+
|
23
|
+
|
24
|
+
Rake::RDocTask.new do |rd|
|
25
|
+
rd.main = "README.rdoc"
|
26
|
+
rd.rdoc_files.include("README.rdoc", "FAQ.rdoc", "History.rdoc", "lib/memcachedb.rb")
|
27
|
+
rd.rdoc_dir = 'doc'
|
28
|
+
end
|
29
|
+
|
30
|
+
Rake::TestTask.new do |t|
|
31
|
+
t.warning = true
|
32
|
+
end
|
33
|
+
|
34
|
+
task :default => :test
|
35
|
+
|
36
|
+
task :rcov do
|
37
|
+
`rcov -Ilib test/*.rb`
|
38
|
+
end
|
data/VERSION.yml
ADDED
@@ -0,0 +1,41 @@
|
|
1
|
+
module Continuum
|
2
|
+
|
3
|
+
class << self
|
4
|
+
|
5
|
+
# Native extension to perform the binary search within the continuum
|
6
|
+
# space. There's a pure ruby version in memcache.rb so this is purely
|
7
|
+
# optional for performance and only necessary if you are using multiple
|
8
|
+
# memcached servers.
|
9
|
+
begin
|
10
|
+
require 'inline'
|
11
|
+
inline do |builder|
|
12
|
+
builder.c <<-EOM
|
13
|
+
int binary_search(VALUE ary, unsigned int r) {
|
14
|
+
int upper = RARRAY_LEN(ary) - 1;
|
15
|
+
int lower = 0;
|
16
|
+
int idx = 0;
|
17
|
+
ID value = rb_intern("value");
|
18
|
+
|
19
|
+
while (lower <= upper) {
|
20
|
+
idx = (lower + upper) / 2;
|
21
|
+
|
22
|
+
VALUE continuumValue = rb_funcall(RARRAY_PTR(ary)[idx], value, 0);
|
23
|
+
unsigned int l = NUM2UINT(continuumValue);
|
24
|
+
if (l == r) {
|
25
|
+
return idx;
|
26
|
+
}
|
27
|
+
else if (l > r) {
|
28
|
+
upper = idx - 1;
|
29
|
+
}
|
30
|
+
else {
|
31
|
+
lower = idx + 1;
|
32
|
+
}
|
33
|
+
}
|
34
|
+
return upper;
|
35
|
+
}
|
36
|
+
EOM
|
37
|
+
end
|
38
|
+
rescue Exception => e
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
data/lib/memcachedb.rb
ADDED
@@ -0,0 +1,1222 @@
|
|
1
|
+
$TESTING = defined?($TESTING) && $TESTING
|
2
|
+
|
3
|
+
require 'socket'
|
4
|
+
require 'thread'
|
5
|
+
require 'zlib'
|
6
|
+
require 'yaml'
|
7
|
+
require 'digest/sha1'
|
8
|
+
require 'net/protocol'
|
9
|
+
|
10
|
+
begin
|
11
|
+
# Try to use the SystemTimer gem instead of Ruby's timeout library
|
12
|
+
# when running on something that looks like Ruby 1.8.x. See:
|
13
|
+
# http://ph7spot.com/articles/system_timer
|
14
|
+
# We don't want to bother trying to load SystemTimer on jruby and
|
15
|
+
# ruby 1.9+
|
16
|
+
if defined?(JRUBY_VERSION) || (RUBY_VERSION >= '1.9')
|
17
|
+
require 'timeout'
|
18
|
+
MemCacheDbTimer = Timeout
|
19
|
+
else
|
20
|
+
require 'system_timer'
|
21
|
+
MemCacheDbTimer = SystemTimer
|
22
|
+
end
|
23
|
+
rescue LoadError => e
|
24
|
+
puts "[MemCacheDb-client] Could not load SystemTimer gem, falling back to Ruby's slower/unsafe timeout library: #{e.message}"
|
25
|
+
require 'timeout'
|
26
|
+
MemCacheDbTimer = Timeout
|
27
|
+
end
|
28
|
+
|
29
|
+
|
30
|
+
##
|
31
|
+
# A Ruby client library for MemCacheDbd.
|
32
|
+
#
|
33
|
+
|
34
|
+
class MemCacheDb
|
35
|
+
|
36
|
+
##
|
37
|
+
# The version of MemCacheDb you are using.
|
38
|
+
|
39
|
+
VERSION = begin
|
40
|
+
config = YAML.load(File.read(File.dirname(__FILE__) + '/../VERSION.yml'))
|
41
|
+
"#{config[:major]}.#{config[:minor]}.#{config[:patch]}"
|
42
|
+
end
|
43
|
+
|
44
|
+
##
|
45
|
+
# Default options for the cache object.
|
46
|
+
|
47
|
+
DEFAULT_OPTIONS = {
|
48
|
+
:namespace => nil,
|
49
|
+
:readonly => false,
|
50
|
+
:multithread => true,
|
51
|
+
:failover => true,
|
52
|
+
:timeout => 0.5,
|
53
|
+
:logger => nil,
|
54
|
+
:no_reply => false,
|
55
|
+
:check_size => true,
|
56
|
+
:autofix_keys => false,
|
57
|
+
:namespace_separator => ':',
|
58
|
+
}
|
59
|
+
|
60
|
+
##
|
61
|
+
# Default MemCacheDbd port.
|
62
|
+
|
63
|
+
DEFAULT_PORT = 11211
|
64
|
+
|
65
|
+
##
|
66
|
+
# Default MemCacheDbd server weight.
|
67
|
+
|
68
|
+
DEFAULT_WEIGHT = 1
|
69
|
+
|
70
|
+
##
|
71
|
+
# The namespace for this instance
|
72
|
+
|
73
|
+
attr_reader :namespace
|
74
|
+
|
75
|
+
##
|
76
|
+
# The multithread setting for this instance
|
77
|
+
|
78
|
+
attr_reader :multithread
|
79
|
+
|
80
|
+
##
|
81
|
+
# Whether to try to fix keys that are too long and will be truncated by
|
82
|
+
# using their SHA1 hash instead.
|
83
|
+
# The hash is only used on keys longer than 250 characters, or containing spaces,
|
84
|
+
# to avoid impacting performance unnecesarily.
|
85
|
+
#
|
86
|
+
# In theory, your code should generate correct keys when calling MemCacheDb,
|
87
|
+
# so it's your responsibility and you should try to fix this problem at its source.
|
88
|
+
#
|
89
|
+
# But if that's not possible, enable this option and MemCacheDb-client will give you a hand.
|
90
|
+
|
91
|
+
attr_reader :autofix_keys
|
92
|
+
|
93
|
+
##
|
94
|
+
# The servers this client talks to. Play at your own peril.
|
95
|
+
|
96
|
+
attr_reader :servers
|
97
|
+
|
98
|
+
##
|
99
|
+
# The groups of servers that behaves in a master/slave configuration
|
100
|
+
attr_reader :groups
|
101
|
+
|
102
|
+
##
|
103
|
+
# Socket timeout limit with this client, defaults to 0.5 sec.
|
104
|
+
# Set to nil to disable timeouts.
|
105
|
+
|
106
|
+
attr_reader :timeout
|
107
|
+
|
108
|
+
##
|
109
|
+
# Should the client try to failover to another server if the
|
110
|
+
# first server is down? Defaults to true.
|
111
|
+
|
112
|
+
attr_reader :failover
|
113
|
+
|
114
|
+
##
|
115
|
+
# Log debug/info/warn/error to the given Logger, defaults to nil.
|
116
|
+
|
117
|
+
attr_reader :logger
|
118
|
+
|
119
|
+
##
|
120
|
+
# Don't send or look for a reply from the MemCacheDbd server for write operations.
|
121
|
+
# Please note this feature only works in MemCacheDbd 1.2.5 and later. Earlier
|
122
|
+
# versions will reply with "ERROR".
|
123
|
+
attr_reader :no_reply
|
124
|
+
|
125
|
+
##
|
126
|
+
# Accepts a list of +servers+ and a list of +opts+. +servers+ may be
|
127
|
+
# omitted. See +servers=+ for acceptable server list arguments.
|
128
|
+
#
|
129
|
+
# Valid options for +opts+ are:
|
130
|
+
#
|
131
|
+
# [:namespace] Prepends this value to all keys added or retrieved.
|
132
|
+
# [:readonly] Raises an exception on cache writes when true.
|
133
|
+
# [:multithread] Wraps cache access in a Mutex for thread safety. Defaults to true.
|
134
|
+
# [:failover] Should the client try to failover to another server if the
|
135
|
+
# first server is down? Defaults to true.
|
136
|
+
# [:timeout] Time to use as the socket read timeout. Defaults to 0.5 sec,
|
137
|
+
# set to nil to disable timeouts.
|
138
|
+
# [:logger] Logger to use for info/debug output, defaults to nil
|
139
|
+
# [:no_reply] Don't bother looking for a reply for write operations (i.e. they
|
140
|
+
# become 'fire and forget'), MemCacheDbd 1.2.5 and later only, speeds up
|
141
|
+
# set/add/delete/incr/decr significantly.
|
142
|
+
# [:check_size] Raises a MemCacheDbError if the value to be set is greater than 1 MB, which
|
143
|
+
# is the maximum key size for the standard MemCacheDbd server. Defaults to true.
|
144
|
+
# [:autofix_keys] If a key is longer than 250 characters or contains spaces,
|
145
|
+
# use an SHA1 hash instead, to prevent collisions on truncated keys.
|
146
|
+
# Other options are ignored.
|
147
|
+
|
148
|
+
def initialize(*args)
|
149
|
+
groups = []
|
150
|
+
opts = {}
|
151
|
+
|
152
|
+
case args.length
|
153
|
+
when 0 then # NOP
|
154
|
+
when 1 then
|
155
|
+
arg = args.shift
|
156
|
+
case arg
|
157
|
+
when Hash then groups << arg
|
158
|
+
when Array then groups = arg
|
159
|
+
else raise ArgumentError, 'first argument must be Array, Hash'
|
160
|
+
end
|
161
|
+
when 2 then
|
162
|
+
groups, opts = args
|
163
|
+
else
|
164
|
+
raise ArgumentError, "wrong number of arguments (#{args.length} for 2)"
|
165
|
+
end
|
166
|
+
|
167
|
+
opts = DEFAULT_OPTIONS.merge opts
|
168
|
+
@namespace = opts[:namespace]
|
169
|
+
@readonly = opts[:readonly]
|
170
|
+
@multithread = opts[:multithread]
|
171
|
+
@autofix_keys = opts[:autofix_keys]
|
172
|
+
@timeout = opts[:timeout]
|
173
|
+
@failover = opts[:failover]
|
174
|
+
@logger = opts[:logger]
|
175
|
+
@no_reply = opts[:no_reply]
|
176
|
+
@check_size = opts[:check_size]
|
177
|
+
@namespace_separator = opts[:namespace_separator]
|
178
|
+
@mutex = Mutex.new if @multithread
|
179
|
+
|
180
|
+
logger.info { "MemCacheDb-client #{VERSION} #{Array(groups).inspect}" } if logger
|
181
|
+
|
182
|
+
Thread.current[:memcachedb_client] = self.object_id if !@multithread
|
183
|
+
self.groups = groups
|
184
|
+
end
|
185
|
+
|
186
|
+
##
|
187
|
+
# Returns a string representation of the cache object.
|
188
|
+
|
189
|
+
def inspect
|
190
|
+
"<MemCacheDb: %d groups, ns: %p, ro: %p>" %
|
191
|
+
[@groups.length, @namespace, @readonly]
|
192
|
+
end
|
193
|
+
|
194
|
+
##
|
195
|
+
# Returns whether there is at least one active server for the object.
|
196
|
+
|
197
|
+
def active?
|
198
|
+
not groups.empty?
|
199
|
+
end
|
200
|
+
|
201
|
+
##
|
202
|
+
# Returns whether or not the cache object was created read only.
|
203
|
+
|
204
|
+
def readonly?
|
205
|
+
@readonly
|
206
|
+
end
|
207
|
+
|
208
|
+
##
|
209
|
+
# Set the groups that the requests will be distributed between. Entries
|
210
|
+
# must be a hash in the form {:servers=>[...], :weight=>1} or
|
211
|
+
# MemCacheDb::Group objects.
|
212
|
+
#
|
213
|
+
|
214
|
+
def groups=(groups)
|
215
|
+
@groups = Array(groups).collect do |group|
|
216
|
+
case group
|
217
|
+
when Hash
|
218
|
+
servers = create_servers(group[:servers])
|
219
|
+
Group.new(self, servers, group[:name], group[:weight])
|
220
|
+
else
|
221
|
+
group
|
222
|
+
end
|
223
|
+
end
|
224
|
+
@continuum = create_continuum_for(@groups) if @groups.size > 1
|
225
|
+
|
226
|
+
@groups
|
227
|
+
end
|
228
|
+
|
229
|
+
|
230
|
+
def create_servers(servers)
|
231
|
+
Array(servers).collect do |server|
|
232
|
+
case server
|
233
|
+
when String
|
234
|
+
host, port, weight = server.split ':', 3
|
235
|
+
port ||= DEFAULT_PORT
|
236
|
+
weight ||= DEFAULT_WEIGHT
|
237
|
+
Server.new self, host, port, weight
|
238
|
+
else
|
239
|
+
server
|
240
|
+
end
|
241
|
+
end
|
242
|
+
end
|
243
|
+
|
244
|
+
##
|
245
|
+
# Decrements the value for +key+ by +amount+ and returns the new value.
|
246
|
+
# +key+ must already exist. If +key+ is not an integer, it is assumed to be
|
247
|
+
# 0. +key+ can not be decremented below 0.
|
248
|
+
|
249
|
+
def decr(key, amount = 1)
|
250
|
+
raise MemCacheDbError, "Update of readonly cache" if @readonly
|
251
|
+
with_server(key) do |server, cache_key|
|
252
|
+
cache_decr server, cache_key, amount
|
253
|
+
end
|
254
|
+
rescue TypeError => err
|
255
|
+
handle_error nil, err
|
256
|
+
end
|
257
|
+
|
258
|
+
##
|
259
|
+
# Retrieves +key+ from MemCacheDb. If +raw+ is false, the value will be
|
260
|
+
# unmarshalled.
|
261
|
+
|
262
|
+
def get(key, raw = false)
|
263
|
+
with_server(key, true) do |server, cache_key|
|
264
|
+
logger.debug { "get #{key} from #{server.inspect}" } if logger
|
265
|
+
value = cache_get server, cache_key
|
266
|
+
return nil if value.nil?
|
267
|
+
value = Marshal.load value unless raw
|
268
|
+
return value
|
269
|
+
end
|
270
|
+
rescue TypeError => err
|
271
|
+
handle_error nil, err
|
272
|
+
end
|
273
|
+
|
274
|
+
##
|
275
|
+
# Performs a +get+ with the given +key+. If
|
276
|
+
# the value does not exist and a block was given,
|
277
|
+
# the block will be called and the result saved via +add+.
|
278
|
+
#
|
279
|
+
# If you do not provide a block, using this
|
280
|
+
# method is the same as using +get+.
|
281
|
+
#
|
282
|
+
def fetch(key, expiry = 0, raw = false)
|
283
|
+
value = get(key, raw)
|
284
|
+
|
285
|
+
if value.nil? && block_given?
|
286
|
+
value = yield
|
287
|
+
add(key, value, expiry, raw)
|
288
|
+
end
|
289
|
+
|
290
|
+
value
|
291
|
+
end
|
292
|
+
|
293
|
+
##
|
294
|
+
# Retrieves multiple values from MemCacheDbd in parallel, if possible.
|
295
|
+
#
|
296
|
+
# The MemCacheDbd protocol supports the ability to retrieve multiple
|
297
|
+
# keys in a single request. Pass in an array of keys to this method
|
298
|
+
# and it will:
|
299
|
+
#
|
300
|
+
# 1. map the key to the appropriate MemCacheDbd server
|
301
|
+
# 2. send a single request to each server that has one or more key values
|
302
|
+
#
|
303
|
+
# Returns a hash of values.
|
304
|
+
#
|
305
|
+
# cache["a"] = 1
|
306
|
+
# cache["b"] = 2
|
307
|
+
# cache.get_multi "a", "b" # => { "a" => 1, "b" => 2 }
|
308
|
+
#
|
309
|
+
# Note that get_multi assumes the values are marshalled.
|
310
|
+
|
311
|
+
def get_multi(*keys)
|
312
|
+
raise MemCacheDbError, 'No active servers' unless active?
|
313
|
+
|
314
|
+
keys.flatten!
|
315
|
+
key_count = keys.length
|
316
|
+
cache_keys = {}
|
317
|
+
server_keys = Hash.new { |h,k| h[k] = [] }
|
318
|
+
|
319
|
+
# map keys to servers
|
320
|
+
keys.each do |key|
|
321
|
+
server, cache_key = request_setup key, true
|
322
|
+
cache_keys[cache_key] = key
|
323
|
+
server_keys[server] << cache_key
|
324
|
+
end
|
325
|
+
|
326
|
+
results = {}
|
327
|
+
server_keys.each do |server, keys_for_server|
|
328
|
+
keys_for_server_str = keys_for_server.join ' '
|
329
|
+
begin
|
330
|
+
values = cache_get_multi server, keys_for_server_str
|
331
|
+
values.each do |key, value|
|
332
|
+
results[cache_keys[key]] = Marshal.load value
|
333
|
+
end
|
334
|
+
rescue IndexError => e
|
335
|
+
# Ignore this server and try the others
|
336
|
+
logger.warn { "Unable to retrieve #{keys_for_server.size} elements from #{server.inspect}: #{e.message}"} if logger
|
337
|
+
end
|
338
|
+
end
|
339
|
+
|
340
|
+
return results
|
341
|
+
rescue TypeError => err
|
342
|
+
handle_error nil, err
|
343
|
+
end
|
344
|
+
|
345
|
+
##
|
346
|
+
# Increments the value for +key+ by +amount+ and returns the new value.
|
347
|
+
# +key+ must already exist. If +key+ is not an integer, it is assumed to be
|
348
|
+
# 0.
|
349
|
+
|
350
|
+
def incr(key, amount = 1)
|
351
|
+
raise MemCacheDbError, "Update of readonly cache" if @readonly
|
352
|
+
with_server(key) do |server, cache_key|
|
353
|
+
cache_incr server, cache_key, amount
|
354
|
+
end
|
355
|
+
rescue TypeError => err
|
356
|
+
handle_error nil, err
|
357
|
+
end
|
358
|
+
|
359
|
+
##
|
360
|
+
# Add +key+ to the cache with value +value+ that expires in +expiry+
|
361
|
+
# seconds. If +raw+ is true, +value+ will not be Marshalled.
|
362
|
+
#
|
363
|
+
# Warning: Readers should not call this method in the event of a cache miss;
|
364
|
+
# see MemCacheDb#add.
|
365
|
+
|
366
|
+
ONE_MB = 1024 * 1024
|
367
|
+
|
368
|
+
def set(key, value, expiry = 0, raw = false)
|
369
|
+
raise MemCacheDbError, "Update of readonly cache" if @readonly
|
370
|
+
|
371
|
+
value = Marshal.dump value unless raw
|
372
|
+
with_server(key) do |server, cache_key|
|
373
|
+
logger.debug { "set #{key} to #{server.inspect}: #{value.to_s.size}" } if logger
|
374
|
+
|
375
|
+
if @check_size && value.to_s.size > ONE_MB
|
376
|
+
raise MemCacheDbError, "Value too large, MemCacheDbd can only store 1MB of data per key"
|
377
|
+
end
|
378
|
+
|
379
|
+
command = "set #{cache_key} 0 #{expiry} #{value.to_s.size}#{noreply}\r\n#{value}\r\n"
|
380
|
+
|
381
|
+
with_socket_management(server) do |socket|
|
382
|
+
socket.write command
|
383
|
+
break nil if @no_reply
|
384
|
+
result = socket.gets
|
385
|
+
raise_on_error_response! result
|
386
|
+
|
387
|
+
if result.nil?
|
388
|
+
server.close
|
389
|
+
raise MemCacheDbError, "lost connection to #{server.host}:#{server.port}"
|
390
|
+
end
|
391
|
+
|
392
|
+
result
|
393
|
+
end
|
394
|
+
end
|
395
|
+
end
|
396
|
+
|
397
|
+
##
|
398
|
+
# "cas" is a check and set operation which means "store this data but
|
399
|
+
# only if no one else has updated since I last fetched it." This can
|
400
|
+
# be used as a form of optimistic locking.
|
401
|
+
#
|
402
|
+
# Works in block form like so:
|
403
|
+
# cache.cas('some-key') do |value|
|
404
|
+
# value + 1
|
405
|
+
# end
|
406
|
+
#
|
407
|
+
# Returns:
|
408
|
+
# +nil+ if the value was not found on the MemCacheDbd server.
|
409
|
+
# +STORED+ if the value was updated successfully
|
410
|
+
# +EXISTS+ if the value was updated by someone else since last fetch
|
411
|
+
|
412
|
+
def cas(key, expiry=0, raw=false)
|
413
|
+
raise MemCacheDbError, "Update of readonly cache" if @readonly
|
414
|
+
raise MemCacheDbError, "A block is required" unless block_given?
|
415
|
+
|
416
|
+
(value, token) = gets(key, raw)
|
417
|
+
return nil unless value
|
418
|
+
updated = yield value
|
419
|
+
value = raw ? updated : Marshal.dump(updated)
|
420
|
+
|
421
|
+
with_server(key) do |server, cache_key|
|
422
|
+
logger.debug { "cas #{key} to #{server.inspect}: #{value.to_s.size}" } if logger
|
423
|
+
command = "cas #{cache_key} 0 #{expiry} #{value.to_s.size} #{token}#{noreply}\r\n#{value}\r\n"
|
424
|
+
|
425
|
+
with_socket_management(server) do |socket|
|
426
|
+
socket.write command
|
427
|
+
break nil if @no_reply
|
428
|
+
result = socket.gets
|
429
|
+
raise_on_error_response! result
|
430
|
+
|
431
|
+
if result.nil?
|
432
|
+
server.close
|
433
|
+
raise MemCacheDbError, "lost connection to #{server.host}:#{server.port}"
|
434
|
+
end
|
435
|
+
|
436
|
+
result
|
437
|
+
end
|
438
|
+
end
|
439
|
+
end
|
440
|
+
|
441
|
+
##
|
442
|
+
# Add +key+ to the cache with value +value+ that expires in +expiry+
|
443
|
+
# seconds, but only if +key+ does not already exist in the cache.
|
444
|
+
# If +raw+ is true, +value+ will not be Marshalled.
|
445
|
+
#
|
446
|
+
# Readers should call this method in the event of a cache miss, not
|
447
|
+
# MemCacheDb#set.
|
448
|
+
|
449
|
+
def add(key, value, expiry = 0, raw = false)
|
450
|
+
raise MemCacheDbError, "Update of readonly cache" if @readonly
|
451
|
+
value = Marshal.dump value unless raw
|
452
|
+
with_server(key) do |server, cache_key|
|
453
|
+
logger.debug { "add #{key} to #{server}: #{value ? value.to_s.size : 'nil'}" } if logger
|
454
|
+
command = "add #{cache_key} 0 #{expiry} #{value.to_s.size}#{noreply}\r\n#{value}\r\n"
|
455
|
+
|
456
|
+
with_socket_management(server) do |socket|
|
457
|
+
socket.write command
|
458
|
+
break nil if @no_reply
|
459
|
+
result = socket.gets
|
460
|
+
raise_on_error_response! result
|
461
|
+
result
|
462
|
+
end
|
463
|
+
end
|
464
|
+
end
|
465
|
+
|
466
|
+
##
|
467
|
+
# Add +key+ to the cache with value +value+ that expires in +expiry+
|
468
|
+
# seconds, but only if +key+ already exists in the cache.
|
469
|
+
# If +raw+ is true, +value+ will not be Marshalled.
|
470
|
+
def replace(key, value, expiry = 0, raw = false)
|
471
|
+
raise MemCacheDbError, "Update of readonly cache" if @readonly
|
472
|
+
value = Marshal.dump value unless raw
|
473
|
+
with_server(key) do |server, cache_key|
|
474
|
+
logger.debug { "replace #{key} to #{server}: #{value ? value.to_s.size : 'nil'}" } if logger
|
475
|
+
command = "replace #{cache_key} 0 #{expiry} #{value.to_s.size}#{noreply}\r\n#{value}\r\n"
|
476
|
+
|
477
|
+
with_socket_management(server) do |socket|
|
478
|
+
socket.write command
|
479
|
+
break nil if @no_reply
|
480
|
+
result = socket.gets
|
481
|
+
raise_on_error_response! result
|
482
|
+
result
|
483
|
+
end
|
484
|
+
end
|
485
|
+
end
|
486
|
+
|
487
|
+
##
|
488
|
+
# Append - 'add this data to an existing key after existing data'
|
489
|
+
# Please note the value is always passed to MemCacheDbd as raw since it
|
490
|
+
# doesn't make a lot of sense to concatenate marshalled data together.
|
491
|
+
def append(key, value)
|
492
|
+
raise MemCacheDbError, "Update of readonly cache" if @readonly
|
493
|
+
with_server(key) do |server, cache_key|
|
494
|
+
logger.debug { "append #{key} to #{server}: #{value ? value.to_s.size : 'nil'}" } if logger
|
495
|
+
command = "append #{cache_key} 0 0 #{value.to_s.size}#{noreply}\r\n#{value}\r\n"
|
496
|
+
|
497
|
+
with_socket_management(server) do |socket|
|
498
|
+
socket.write command
|
499
|
+
break nil if @no_reply
|
500
|
+
result = socket.gets
|
501
|
+
raise_on_error_response! result
|
502
|
+
result
|
503
|
+
end
|
504
|
+
end
|
505
|
+
end
|
506
|
+
|
507
|
+
##
|
508
|
+
# Prepend - 'add this data to an existing key before existing data'
|
509
|
+
# Please note the value is always passed to MemCacheDbd as raw since it
|
510
|
+
# doesn't make a lot of sense to concatenate marshalled data together.
|
511
|
+
def prepend(key, value)
|
512
|
+
raise MemCacheDbError, "Update of readonly cache" if @readonly
|
513
|
+
with_server(key) do |server, cache_key|
|
514
|
+
logger.debug { "prepend #{key} to #{server}: #{value ? value.to_s.size : 'nil'}" } if logger
|
515
|
+
command = "prepend #{cache_key} 0 0 #{value.to_s.size}#{noreply}\r\n#{value}\r\n"
|
516
|
+
|
517
|
+
with_socket_management(server) do |socket|
|
518
|
+
socket.write command
|
519
|
+
break nil if @no_reply
|
520
|
+
result = socket.gets
|
521
|
+
raise_on_error_response! result
|
522
|
+
result
|
523
|
+
end
|
524
|
+
end
|
525
|
+
end
|
526
|
+
|
527
|
+
##
|
528
|
+
# Removes +key+ from the cache.
|
529
|
+
# +expiry+ is ignored as it has been removed from the latest MemCacheDbd version.
|
530
|
+
|
531
|
+
def delete(key, expiry = 0)
|
532
|
+
raise MemCacheDbError, "Update of readonly cache" if @readonly
|
533
|
+
with_server(key) do |server, cache_key|
|
534
|
+
with_socket_management(server) do |socket|
|
535
|
+
logger.debug { "delete #{cache_key} on #{server}" } if logger
|
536
|
+
socket.write "delete #{cache_key}#{noreply}\r\n"
|
537
|
+
break nil if @no_reply
|
538
|
+
result = socket.gets
|
539
|
+
raise_on_error_response! result
|
540
|
+
result
|
541
|
+
end
|
542
|
+
end
|
543
|
+
end
|
544
|
+
|
545
|
+
|
546
|
+
|
547
|
+
##
|
548
|
+
# Returns statistics for each MemCacheDbd server. An explanation of the
|
549
|
+
# statistics can be found in the MemCacheDbd docs:
|
550
|
+
#
|
551
|
+
# http://code.sixapart.com/svn/MemCacheDbd/trunk/server/doc/protocol.txt
|
552
|
+
#
|
553
|
+
# Example:
|
554
|
+
#
|
555
|
+
# >> pp CACHE.stats
|
556
|
+
# {"localhost:11211"=>
|
557
|
+
# {"bytes"=>4718,
|
558
|
+
# "pid"=>20188,
|
559
|
+
# "connection_structures"=>4,
|
560
|
+
# "time"=>1162278121,
|
561
|
+
# "pointer_size"=>32,
|
562
|
+
# "limit_maxbytes"=>67108864,
|
563
|
+
# "cmd_get"=>14532,
|
564
|
+
# "version"=>"1.2.0",
|
565
|
+
# "bytes_written"=>432583,
|
566
|
+
# "cmd_set"=>32,
|
567
|
+
# "get_misses"=>0,
|
568
|
+
# "total_connections"=>19,
|
569
|
+
# "curr_connections"=>3,
|
570
|
+
# "curr_items"=>4,
|
571
|
+
# "uptime"=>1557,
|
572
|
+
# "get_hits"=>14532,
|
573
|
+
# "total_items"=>32,
|
574
|
+
# "rusage_system"=>0.313952,
|
575
|
+
# "rusage_user"=>0.119981,
|
576
|
+
# "bytes_read"=>190619}}
|
577
|
+
# => nil
|
578
|
+
|
579
|
+
def stats
|
580
|
+
raise MemCacheDbError, "No active servers" unless active?
|
581
|
+
server_stats = {}
|
582
|
+
|
583
|
+
@groups.each do |group|
|
584
|
+
group.servers.each do |server|
|
585
|
+
next unless server.alive?
|
586
|
+
|
587
|
+
with_socket_management(server) do |socket|
|
588
|
+
value = nil
|
589
|
+
socket.write "stats\r\n"
|
590
|
+
stats = {}
|
591
|
+
while line = socket.gets do
|
592
|
+
raise_on_error_response! line
|
593
|
+
break if line == "END\r\n"
|
594
|
+
if line =~ /\ASTAT ([\S]+) ([\w\.\:]+)/ then
|
595
|
+
name, value = $1, $2
|
596
|
+
stats[name] = case name
|
597
|
+
when 'version'
|
598
|
+
value
|
599
|
+
when 'rusage_user', 'rusage_system' then
|
600
|
+
seconds, microseconds = value.split(/:/, 2)
|
601
|
+
microseconds ||= 0
|
602
|
+
Float(seconds) + (Float(microseconds) / 1_000_000)
|
603
|
+
else
|
604
|
+
if value =~ /\A\d+\Z/ then
|
605
|
+
value.to_i
|
606
|
+
else
|
607
|
+
value
|
608
|
+
end
|
609
|
+
end
|
610
|
+
end
|
611
|
+
end
|
612
|
+
server_stats["#{server.host}:#{server.port}"] = stats
|
613
|
+
end
|
614
|
+
end
|
615
|
+
end
|
616
|
+
|
617
|
+
raise MemCacheDbError, "No active servers" if server_stats.empty?
|
618
|
+
server_stats
|
619
|
+
end
|
620
|
+
|
621
|
+
##
|
622
|
+
# Shortcut to get a value from the cache.
|
623
|
+
|
624
|
+
alias [] get
|
625
|
+
|
626
|
+
##
|
627
|
+
# Shortcut to save a value in the cache. This method does not set an
|
628
|
+
# expiration on the entry. Use set to specify an explicit expiry.
|
629
|
+
|
630
|
+
def []=(key, value)
|
631
|
+
set key, value
|
632
|
+
end
|
633
|
+
|
634
|
+
protected unless $TESTING
|
635
|
+
|
636
|
+
##
|
637
|
+
# Create a key for the cache, incorporating the namespace qualifier if
|
638
|
+
# requested.
|
639
|
+
|
640
|
+
def make_cache_key(key)
|
641
|
+
if @autofix_keys && (key =~ /\s/ || key_length(key) > 250)
|
642
|
+
key = "#{Digest::SHA1.hexdigest(key)}-autofixed"
|
643
|
+
end
|
644
|
+
|
645
|
+
if namespace.nil?
|
646
|
+
key
|
647
|
+
else
|
648
|
+
"#{@namespace}#{@namespace_separator}#{key}"
|
649
|
+
end
|
650
|
+
end
|
651
|
+
|
652
|
+
##
|
653
|
+
# Calculate length of the key, including the namespace and namespace-separator.
|
654
|
+
|
655
|
+
def key_length(key)
|
656
|
+
key.length + (namespace.nil? ? 0 : ( namespace.length + (@namespace_separator.nil? ? 0 : @namespace_separator.length) ) )
|
657
|
+
end
|
658
|
+
|
659
|
+
##
|
660
|
+
# Returns an interoperable hash value for +key+. (I think, docs are
|
661
|
+
# sketchy for down servers).
|
662
|
+
|
663
|
+
def hash_for(key)
|
664
|
+
Zlib.crc32(key)
|
665
|
+
end
|
666
|
+
|
667
|
+
##
|
668
|
+
# Pick a group to handle the request based on a hash of the key.
|
669
|
+
|
670
|
+
def get_group_for_key(key, options = {})
|
671
|
+
raise ArgumentError, "illegal character in key #{key.inspect}" if
|
672
|
+
key =~ /\s/
|
673
|
+
raise ArgumentError, "key too long #{key.inspect}" if key.length > 250
|
674
|
+
raise MemCacheDbError, "No servers available" if @groups.empty?
|
675
|
+
return @groups.first if @groups.length == 1
|
676
|
+
|
677
|
+
hkey = hash_for(key)
|
678
|
+
|
679
|
+
20.times do |try|
|
680
|
+
entryidx = Continuum.binary_search(@continuum, hkey)
|
681
|
+
server = @continuum[entryidx].server
|
682
|
+
return server if server.alive?
|
683
|
+
break unless failover
|
684
|
+
hkey = hash_for "#{try}#{key}"
|
685
|
+
end
|
686
|
+
|
687
|
+
raise MemCacheDbError, "No servers available"
|
688
|
+
end
|
689
|
+
|
690
|
+
##
|
691
|
+
# Performs a raw decr for +cache_key+ from +server+. Returns nil if not
|
692
|
+
# found.
|
693
|
+
|
694
|
+
def cache_decr(server, cache_key, amount)
|
695
|
+
with_socket_management(server) do |socket|
|
696
|
+
socket.write "decr #{cache_key} #{amount}#{noreply}\r\n"
|
697
|
+
break nil if @no_reply
|
698
|
+
text = socket.gets
|
699
|
+
raise_on_error_response! text
|
700
|
+
return nil if text == "NOT_FOUND\r\n"
|
701
|
+
return text.to_i
|
702
|
+
end
|
703
|
+
end
|
704
|
+
|
705
|
+
##
|
706
|
+
# Fetches the raw data for +cache_key+ from +server+. Returns nil on cache
|
707
|
+
# miss.
|
708
|
+
|
709
|
+
def cache_get(server, cache_key)
|
710
|
+
with_socket_management(server) do |socket|
|
711
|
+
socket.write "get #{cache_key}\r\n"
|
712
|
+
keyline = socket.gets # "VALUE <key> <flags> <bytes>\r\n"
|
713
|
+
|
714
|
+
if keyline.nil? then
|
715
|
+
server.close
|
716
|
+
raise MemCacheDbError, "lost connection to #{server.host}:#{server.port}"
|
717
|
+
end
|
718
|
+
|
719
|
+
raise_on_error_response! keyline
|
720
|
+
return nil if keyline == "END\r\n"
|
721
|
+
|
722
|
+
unless keyline =~ /(\d+)\r/ then
|
723
|
+
server.close
|
724
|
+
raise MemCacheDbError, "unexpected response #{keyline.inspect}"
|
725
|
+
end
|
726
|
+
value = socket.read $1.to_i
|
727
|
+
socket.read 2 # "\r\n"
|
728
|
+
socket.gets # "END\r\n"
|
729
|
+
return value
|
730
|
+
end
|
731
|
+
end
|
732
|
+
|
733
|
+
def gets(key, raw = false)
|
734
|
+
with_server(key, true) do |server, cache_key|
|
735
|
+
logger.debug { "gets #{key} from #{server.inspect}" } if logger
|
736
|
+
result = with_socket_management(server) do |socket|
|
737
|
+
socket.write "gets #{cache_key}\r\n"
|
738
|
+
keyline = socket.gets # "VALUE <key> <flags> <bytes> <cas token>\r\n"
|
739
|
+
|
740
|
+
if keyline.nil? then
|
741
|
+
server.close
|
742
|
+
raise MemCacheDbError, "lost connection to #{server.host}:#{server.port}"
|
743
|
+
end
|
744
|
+
|
745
|
+
raise_on_error_response! keyline
|
746
|
+
return nil if keyline == "END\r\n"
|
747
|
+
|
748
|
+
unless keyline =~ /(\d+) (\w+)\r/ then
|
749
|
+
server.close
|
750
|
+
raise MemCacheDbError, "unexpected response #{keyline.inspect}"
|
751
|
+
end
|
752
|
+
value = socket.read $1.to_i
|
753
|
+
socket.read 2 # "\r\n"
|
754
|
+
socket.gets # "END\r\n"
|
755
|
+
[value, $2]
|
756
|
+
end
|
757
|
+
result[0] = Marshal.load result[0] unless raw
|
758
|
+
result
|
759
|
+
end
|
760
|
+
rescue TypeError => err
|
761
|
+
handle_error nil, err
|
762
|
+
end
|
763
|
+
|
764
|
+
|
765
|
+
##
|
766
|
+
# Fetches +cache_keys+ from +server+ using a multi-get.
|
767
|
+
|
768
|
+
def cache_get_multi(server, cache_keys)
|
769
|
+
with_socket_management(server) do |socket|
|
770
|
+
values = {}
|
771
|
+
socket.write "get #{cache_keys}\r\n"
|
772
|
+
|
773
|
+
while keyline = socket.gets do
|
774
|
+
return values if keyline == "END\r\n"
|
775
|
+
raise_on_error_response! keyline
|
776
|
+
|
777
|
+
unless keyline =~ /\AVALUE (.+) (.+) (.+)/ then
|
778
|
+
server.close
|
779
|
+
raise MemCacheDbError, "unexpected response #{keyline.inspect}"
|
780
|
+
end
|
781
|
+
|
782
|
+
key, data_length = $1, $3
|
783
|
+
values[$1] = socket.read data_length.to_i
|
784
|
+
socket.read(2) # "\r\n"
|
785
|
+
end
|
786
|
+
|
787
|
+
server.close
|
788
|
+
raise MemCacheDbError, "lost connection to #{server.host}:#{server.port}" # TODO: retry here too
|
789
|
+
end
|
790
|
+
end
|
791
|
+
|
792
|
+
##
|
793
|
+
# Performs a raw incr for +cache_key+ from +server+. Returns nil if not
|
794
|
+
# found.
|
795
|
+
|
796
|
+
def cache_incr(server, cache_key, amount)
|
797
|
+
with_socket_management(server) do |socket|
|
798
|
+
socket.write "incr #{cache_key} #{amount}#{noreply}\r\n"
|
799
|
+
break nil if @no_reply
|
800
|
+
text = socket.gets
|
801
|
+
raise_on_error_response! text
|
802
|
+
return nil if text == "NOT_FOUND\r\n"
|
803
|
+
return text.to_i
|
804
|
+
end
|
805
|
+
end
|
806
|
+
|
807
|
+
##
|
808
|
+
# Gets or creates a socket connected to the given server, and yields it
|
809
|
+
# to the block, wrapped in a mutex synchronization if @multithread is true.
|
810
|
+
#
|
811
|
+
# If a socket error (SocketError, SystemCallError, IOError) or protocol error
|
812
|
+
# (MemCacheDbError) is raised by the block, closes the socket, attempts to
|
813
|
+
# connect again, and retries the block (once). If an error is again raised,
|
814
|
+
# reraises it as MemCacheDbError.
|
815
|
+
#
|
816
|
+
# If unable to connect to the server (or if in the reconnect wait period),
|
817
|
+
# raises MemCacheDbError. Note that the socket connect code marks a server
|
818
|
+
# dead for a timeout period, so retrying does not apply to connection attempt
|
819
|
+
# failures (but does still apply to unexpectedly lost connections etc.).
|
820
|
+
|
821
|
+
def with_socket_management(server, &block)
|
822
|
+
check_multithread_status!
|
823
|
+
@mutex.lock if @multithread
|
824
|
+
retried = false
|
825
|
+
|
826
|
+
begin
|
827
|
+
socket = server.socket
|
828
|
+
# Raise an IndexError to show this server is out of whack. If were inside
|
829
|
+
# a with_server block, we'll catch it and attempt to restart the operation.
|
830
|
+
|
831
|
+
raise IndexError, "No connection to server (#{server.status})" if socket.nil?
|
832
|
+
|
833
|
+
block.call(socket)
|
834
|
+
|
835
|
+
rescue SocketError, Errno::EAGAIN, Timeout::Error => err
|
836
|
+
|
837
|
+
logger.warn { "Socket failure: #{err.message}" } if logger
|
838
|
+
server.mark_dead(err)
|
839
|
+
handle_error(server, err)
|
840
|
+
|
841
|
+
rescue MemCacheDbError, SystemCallError, IOError => err
|
842
|
+
logger.warn { "Generic failure: #{err.class.name}: #{err.message}" } if logger
|
843
|
+
handle_error(server, err) if retried || socket.nil?
|
844
|
+
retried = true
|
845
|
+
retry
|
846
|
+
end
|
847
|
+
ensure
|
848
|
+
@mutex.unlock if @multithread
|
849
|
+
end
|
850
|
+
|
851
|
+
def with_server(key, read = false)
|
852
|
+
retried = false
|
853
|
+
begin
|
854
|
+
server, cache_key = request_setup(key, read)
|
855
|
+
yield server, cache_key
|
856
|
+
rescue IndexError => e
|
857
|
+
logger.warn { "Server failed: #{e.class.name}: #{e.message}" } if logger
|
858
|
+
if !retried && @groups.size > 1
|
859
|
+
logger.info { "Connection to server #{server.inspect} DIED! Retrying operation..." } if logger
|
860
|
+
retried = true
|
861
|
+
retry
|
862
|
+
end
|
863
|
+
handle_error(nil, e)
|
864
|
+
end
|
865
|
+
end
|
866
|
+
|
867
|
+
##
|
868
|
+
# Handles +error+ from +server+.
|
869
|
+
|
870
|
+
def handle_error(server, error)
|
871
|
+
raise error if error.is_a?(MemCacheDbError)
|
872
|
+
server.close if server && server.status == "CONNECTED"
|
873
|
+
new_error = MemCacheDbError.new error.message
|
874
|
+
new_error.set_backtrace error.backtrace
|
875
|
+
raise new_error
|
876
|
+
end
|
877
|
+
|
878
|
+
def noreply
|
879
|
+
@no_reply ? ' noreply' : ''
|
880
|
+
end
|
881
|
+
|
882
|
+
##
|
883
|
+
# Performs setup for making a request with +key+ from MemCacheDbd. Returns
|
884
|
+
# the server to fetch the key from and the complete key to use.
|
885
|
+
|
886
|
+
def request_setup(key, read = false)
|
887
|
+
raise MemCacheDbError, 'No active servers' unless active?
|
888
|
+
cache_key = make_cache_key key
|
889
|
+
group = get_group_for_key cache_key
|
890
|
+
server = read ? group.next_slave : group.master
|
891
|
+
return server, cache_key
|
892
|
+
end
|
893
|
+
|
894
|
+
def raise_on_error_response!(response)
|
895
|
+
if response =~ /\A(?:CLIENT_|SERVER_)?ERROR(.*)/
|
896
|
+
raise MemCacheDbError, $1.strip
|
897
|
+
end
|
898
|
+
end
|
899
|
+
|
900
|
+
def create_continuum_for(groups)
|
901
|
+
total_weight = groups.inject(0) { |memo, gr| memo + gr.weight }
|
902
|
+
continuum = []
|
903
|
+
|
904
|
+
groups.each do |group|
|
905
|
+
entry_count_for(group, groups.size, total_weight).times do |idx|
|
906
|
+
hash = Digest::SHA1.hexdigest("#{group.name}:#{idx}")
|
907
|
+
value = Integer("0x#{hash[0..7]}")
|
908
|
+
continuum << Continuum::Entry.new(value, group)
|
909
|
+
end
|
910
|
+
end
|
911
|
+
|
912
|
+
continuum.sort { |a, b| a.value <=> b.value }
|
913
|
+
end
|
914
|
+
|
915
|
+
def entry_count_for(server, total_servers, total_weight)
|
916
|
+
((total_servers * Continuum::POINTS_PER_SERVER * server.weight) / Float(total_weight)).floor
|
917
|
+
end
|
918
|
+
|
919
|
+
def check_multithread_status!
|
920
|
+
return if @multithread
|
921
|
+
|
922
|
+
if Thread.current[:memcachedb_client] != self.object_id
|
923
|
+
raise MemCacheDbError, <<-EOM
|
924
|
+
You are accessing this MemCacheDb-client instance from multiple threads but have not enabled multithread support.
|
925
|
+
Normally: MemCacheDb.new(['localhost:11211'], :multithread => true)
|
926
|
+
In Rails: config.cache_store = [:mem_cache_store, 'localhost:11211', { :multithread => true }]
|
927
|
+
EOM
|
928
|
+
end
|
929
|
+
end
|
930
|
+
|
931
|
+
|
932
|
+
class Group
|
933
|
+
attr_reader :weight
|
934
|
+
attr_reader :servers
|
935
|
+
attr_reader :name
|
936
|
+
|
937
|
+
def initialize(memcache, servers, name, weight)
|
938
|
+
@memcache = memcache
|
939
|
+
@logger = memcache.logger
|
940
|
+
@servers = servers
|
941
|
+
@slaves = []
|
942
|
+
@weight = weight || DEFAULT_WEIGHT
|
943
|
+
@roundrobin = 0
|
944
|
+
@name = name || 'default'
|
945
|
+
determine_master
|
946
|
+
end
|
947
|
+
|
948
|
+
def master
|
949
|
+
determine_master unless @master.alive?
|
950
|
+
@master
|
951
|
+
end
|
952
|
+
|
953
|
+
def next_slave
|
954
|
+
@roundrobin = ((@roundrobin + 1) >= @slaves.size ? 0 : @roundrobin + 1)
|
955
|
+
server = @slaves[@roundrobin]
|
956
|
+
server = next_slave unless server.alive?
|
957
|
+
server
|
958
|
+
end
|
959
|
+
|
960
|
+
|
961
|
+
def alive?
|
962
|
+
master.alive?
|
963
|
+
end
|
964
|
+
|
965
|
+
protected
|
966
|
+
def determine_master
|
967
|
+
@slaves = []
|
968
|
+
@servers.each do |s|
|
969
|
+
if s.alive?
|
970
|
+
#check if able to write
|
971
|
+
command = "set CLIENT_TEST_MASTER 0 0 1\r\n0\r\n"
|
972
|
+
|
973
|
+
@memcache.with_socket_management(s) do |socket|
|
974
|
+
socket.write command
|
975
|
+
result = socket.gets
|
976
|
+
if result == "STORED\r\n"
|
977
|
+
#this is the master
|
978
|
+
@master = s
|
979
|
+
break
|
980
|
+
end
|
981
|
+
end
|
982
|
+
end
|
983
|
+
end
|
984
|
+
#masters can also be used to read
|
985
|
+
@slaves = @servers
|
986
|
+
raise MemCacheDbError.new 'No Master Server found' if @master.nil?
|
987
|
+
p "Using master #{@master.host}:#{@master.port}"
|
988
|
+
@logger.info "Using master #{@master}" if @logger
|
989
|
+
@logger.info "Using slaves #{@slaves}" if @logger
|
990
|
+
end
|
991
|
+
end
|
992
|
+
|
993
|
+
##
|
994
|
+
# This class represents a memcached server instance.
|
995
|
+
|
996
|
+
class Server
|
997
|
+
|
998
|
+
##
|
999
|
+
# The amount of time to wait before attempting to re-establish a
|
1000
|
+
# connection with a server that is marked dead.
|
1001
|
+
|
1002
|
+
RETRY_DELAY = 30.0
|
1003
|
+
|
1004
|
+
##
|
1005
|
+
# The host the memcached server is running on.
|
1006
|
+
|
1007
|
+
attr_reader :host
|
1008
|
+
|
1009
|
+
##
|
1010
|
+
# The port the memcached server is listening on.
|
1011
|
+
|
1012
|
+
attr_reader :port
|
1013
|
+
|
1014
|
+
##
|
1015
|
+
# The weight given to the server.
|
1016
|
+
|
1017
|
+
attr_reader :weight
|
1018
|
+
|
1019
|
+
##
|
1020
|
+
# The time of next retry if the connection is dead.
|
1021
|
+
|
1022
|
+
attr_reader :retry
|
1023
|
+
|
1024
|
+
##
|
1025
|
+
# A text status string describing the state of the server.
|
1026
|
+
|
1027
|
+
attr_reader :status
|
1028
|
+
|
1029
|
+
attr_reader :logger
|
1030
|
+
|
1031
|
+
##
|
1032
|
+
# Create a new MemCacheDb::Server object for the memcached instance
|
1033
|
+
# listening on the given host and port, weighted by the given weight.
|
1034
|
+
|
1035
|
+
def initialize(memcache, host, port = DEFAULT_PORT, weight = DEFAULT_WEIGHT)
|
1036
|
+
raise ArgumentError, "No host specified" if host.nil? or host.empty?
|
1037
|
+
raise ArgumentError, "No port specified" if port.nil? or port.to_i.zero?
|
1038
|
+
|
1039
|
+
@host = host
|
1040
|
+
@port = port.to_i
|
1041
|
+
@weight = weight.to_i
|
1042
|
+
|
1043
|
+
@sock = nil
|
1044
|
+
@retry = nil
|
1045
|
+
@status = 'NOT CONNECTED'
|
1046
|
+
@timeout = memcache.timeout
|
1047
|
+
@logger = memcache.logger
|
1048
|
+
end
|
1049
|
+
|
1050
|
+
##
|
1051
|
+
# Return a string representation of the server object.
|
1052
|
+
|
1053
|
+
def inspect
|
1054
|
+
"<MemCacheDb::Server: %s:%d [%d] (%s)>" % [@host, @port, @weight, @status]
|
1055
|
+
end
|
1056
|
+
|
1057
|
+
##
|
1058
|
+
# Check whether the server connection is alive. This will cause the
|
1059
|
+
# socket to attempt to connect if it isn't already connected and or if
|
1060
|
+
# the server was previously marked as down and the retry time has
|
1061
|
+
# been exceeded.
|
1062
|
+
|
1063
|
+
def alive?
|
1064
|
+
!!socket
|
1065
|
+
end
|
1066
|
+
|
1067
|
+
##
|
1068
|
+
# Try to connect to the memcached server targeted by this object.
|
1069
|
+
# Returns the connected socket object on success or nil on failure.
|
1070
|
+
|
1071
|
+
def socket
|
1072
|
+
return @sock if @sock and not @sock.closed?
|
1073
|
+
|
1074
|
+
@sock = nil
|
1075
|
+
|
1076
|
+
# If the host was dead, don't retry for a while.
|
1077
|
+
return if @retry and @retry > Time.now
|
1078
|
+
|
1079
|
+
# Attempt to connect if not already connected.
|
1080
|
+
begin
|
1081
|
+
@sock = connect_to(@host, @port, @timeout)
|
1082
|
+
@sock.setsockopt Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1
|
1083
|
+
@retry = nil
|
1084
|
+
@status = 'CONNECTED'
|
1085
|
+
rescue SocketError, SystemCallError, IOError, Timeout::Error => err
|
1086
|
+
logger.warn { "Unable to open socket: #{err.class.name}, #{err.message}" } if logger
|
1087
|
+
mark_dead err
|
1088
|
+
end
|
1089
|
+
|
1090
|
+
return @sock
|
1091
|
+
end
|
1092
|
+
|
1093
|
+
def connect_to(host, port, timeout=nil)
|
1094
|
+
sock = nil
|
1095
|
+
if timeout
|
1096
|
+
MemCacheDbTimer.timeout(timeout) do
|
1097
|
+
sock = TCPSocket.new(host, port)
|
1098
|
+
end
|
1099
|
+
else
|
1100
|
+
sock = TCPSocket.new(host, port)
|
1101
|
+
end
|
1102
|
+
|
1103
|
+
io = MemCacheDb::BufferedIO.new(sock)
|
1104
|
+
io.read_timeout = timeout
|
1105
|
+
# Getting reports from several customers, including 37signals,
|
1106
|
+
# that the non-blocking timeouts in 1.7.5 don't seem to be reliable.
|
1107
|
+
# It can't hurt to set the underlying socket timeout also, if possible.
|
1108
|
+
if timeout
|
1109
|
+
secs = Integer(timeout)
|
1110
|
+
usecs = Integer((timeout - secs) * 1_000_000)
|
1111
|
+
optval = [secs, usecs].pack("l_2")
|
1112
|
+
begin
|
1113
|
+
io.setsockopt Socket::SOL_SOCKET, Socket::SO_RCVTIMEO, optval
|
1114
|
+
io.setsockopt Socket::SOL_SOCKET, Socket::SO_SNDTIMEO, optval
|
1115
|
+
rescue Exception => ex
|
1116
|
+
# Solaris, for one, does not like/support socket timeouts.
|
1117
|
+
@logger.info "[memcachedb-client] Unable to use raw socket timeouts: #{ex.class.name}: #{ex.message}" if @logger
|
1118
|
+
end
|
1119
|
+
end
|
1120
|
+
io
|
1121
|
+
end
|
1122
|
+
|
1123
|
+
##
|
1124
|
+
# Close the connection to the memcached server targeted by this
|
1125
|
+
# object. The server is not considered dead.
|
1126
|
+
|
1127
|
+
def close
|
1128
|
+
@sock.close if @sock && !@sock.closed?
|
1129
|
+
@sock = nil
|
1130
|
+
@retry = nil
|
1131
|
+
@status = "NOT CONNECTED"
|
1132
|
+
end
|
1133
|
+
|
1134
|
+
##
|
1135
|
+
# Mark the server as dead and close its socket.
|
1136
|
+
|
1137
|
+
def mark_dead(error)
|
1138
|
+
@sock.close if @sock && !@sock.closed?
|
1139
|
+
@sock = nil
|
1140
|
+
@retry = Time.now + RETRY_DELAY
|
1141
|
+
|
1142
|
+
reason = "#{error.class.name}: #{error.message}"
|
1143
|
+
@status = sprintf "%s:%s DEAD (%s), will retry at %s", @host, @port, reason, @retry
|
1144
|
+
@logger.info { @status } if @logger
|
1145
|
+
end
|
1146
|
+
|
1147
|
+
end
|
1148
|
+
|
1149
|
+
##
|
1150
|
+
# Base MemCacheDb exception class.
|
1151
|
+
|
1152
|
+
class MemCacheDbError < RuntimeError; end
|
1153
|
+
|
1154
|
+
class BufferedIO < Net::BufferedIO # :nodoc:
|
1155
|
+
BUFSIZE = 1024 * 16
|
1156
|
+
|
1157
|
+
if RUBY_VERSION < '1.9.1'
|
1158
|
+
def rbuf_fill
|
1159
|
+
begin
|
1160
|
+
@rbuf << @io.read_nonblock(BUFSIZE)
|
1161
|
+
rescue Errno::EWOULDBLOCK
|
1162
|
+
retry unless @read_timeout
|
1163
|
+
if IO.select([@io], nil, nil, @read_timeout)
|
1164
|
+
retry
|
1165
|
+
else
|
1166
|
+
raise Timeout::Error, 'IO timeout'
|
1167
|
+
end
|
1168
|
+
end
|
1169
|
+
end
|
1170
|
+
end
|
1171
|
+
|
1172
|
+
def setsockopt(*args)
|
1173
|
+
@io.setsockopt(*args)
|
1174
|
+
end
|
1175
|
+
|
1176
|
+
def gets
|
1177
|
+
readuntil("\n")
|
1178
|
+
end
|
1179
|
+
end
|
1180
|
+
|
1181
|
+
end
|
1182
|
+
|
1183
|
+
module Continuum
|
1184
|
+
POINTS_PER_SERVER = 160 # this is the default in libmemcached
|
1185
|
+
|
1186
|
+
# Find the closest index in Continuum with value <= the given value
|
1187
|
+
def self.binary_search(ary, value, &block)
|
1188
|
+
upper = ary.size - 1
|
1189
|
+
lower = 0
|
1190
|
+
idx = 0
|
1191
|
+
|
1192
|
+
while(lower <= upper) do
|
1193
|
+
idx = (lower + upper) / 2
|
1194
|
+
comp = ary[idx].value <=> value
|
1195
|
+
|
1196
|
+
if comp == 0
|
1197
|
+
return idx
|
1198
|
+
elsif comp > 0
|
1199
|
+
upper = idx - 1
|
1200
|
+
else
|
1201
|
+
lower = idx + 1
|
1202
|
+
end
|
1203
|
+
end
|
1204
|
+
return upper
|
1205
|
+
end
|
1206
|
+
|
1207
|
+
class Entry
|
1208
|
+
attr_reader :value
|
1209
|
+
attr_reader :server
|
1210
|
+
|
1211
|
+
def initialize(val, srv)
|
1212
|
+
@value = val
|
1213
|
+
@server = srv
|
1214
|
+
end
|
1215
|
+
|
1216
|
+
def inspect
|
1217
|
+
"<#{value}, #{server.host}:#{server.port}>"
|
1218
|
+
end
|
1219
|
+
end
|
1220
|
+
|
1221
|
+
end
|
1222
|
+
require 'continuum_native'
|