Ruby-MemCache 0.0.1
Sign up to get free protection for your applications and to get access to all the features.
- data/.irbrc +38 -0
- data/README +69 -0
- data/docs/CATALOG +9 -0
- data/docs/makedocs.rb +191 -0
- data/install.rb +185 -0
- data/lib/memcache.rb +1266 -0
- data/test.rb +126 -0
- data/tests/errorhandling.tests.rb +120 -0
- data/tests/getset.tests.rb +365 -0
- data/tests/instantiation.tests.rb +226 -0
- data/tests/mctestcase.rb +379 -0
- data/tests/require.tests.rb +37 -0
- data/tests/stats.tests.rb +216 -0
- data/utils.rb +672 -0
- metadata +61 -0
data/lib/memcache.rb
ADDED
@@ -0,0 +1,1266 @@
|
|
1
|
+
#!/usr/bin/ruby
|
2
|
+
#
|
3
|
+
# A Ruby client library for memcached (memory cache daemon)
|
4
|
+
#
|
5
|
+
# == Synopsis
|
6
|
+
#
|
7
|
+
# require 'memcache'
|
8
|
+
#
|
9
|
+
# cache = MemCache::new '10.0.0.15:11211',
|
10
|
+
# '10.0.0.15:11212',
|
11
|
+
# '10.0.0.17:11211:3', # weighted
|
12
|
+
# :debug => true,
|
13
|
+
# :c_threshold => 100_000,
|
14
|
+
# :compression => false,
|
15
|
+
# :namespace => 'foo'
|
16
|
+
# cache.servers += [ "10.0.0.15:11211:5" ]
|
17
|
+
# cache.c_threshold = 10_000
|
18
|
+
# cache.compression = true
|
19
|
+
#
|
20
|
+
# # Cache simple values with simple String or Symbol keys
|
21
|
+
# cache["my_key"] = "Some value"
|
22
|
+
# cache[:other_key] = "Another value"
|
23
|
+
#
|
24
|
+
# # ...or more-complex values
|
25
|
+
# cache["object_key"] = { 'complex' => [ "object", 2, 4 ] }
|
26
|
+
#
|
27
|
+
# # ...or more-complex keys
|
28
|
+
# cache[ Time::now.to_a[1..7] ] ||= 0
|
29
|
+
#
|
30
|
+
# # ...or both
|
31
|
+
# cache[userObject] = { :attempts => 0, :edges => [], :nodes => [] }
|
32
|
+
#
|
33
|
+
# val = cache["my_key"] # => "Some value"
|
34
|
+
# val = cache["object_key"] # => {"complex" => ["object",2,4]}
|
35
|
+
# print val['complex'][2] # => 4
|
36
|
+
#
|
37
|
+
# == Notes
|
38
|
+
#
|
39
|
+
# * Symbols are stringified currently because that's the only way to guarantee
|
40
|
+
# that they hash to the same value across processes.
|
41
|
+
#
|
42
|
+
#
|
43
|
+
# == Known Bugs
|
44
|
+
#
|
45
|
+
# * If one or more memcacheds error when asked for 'map' or 'malloc' stats, it
|
46
|
+
# won't be possible to retrieve them from any of the other servers,
|
47
|
+
# either. This is due to the way that the client handles server error
|
48
|
+
# conditions, and needs rethinking.
|
49
|
+
#
|
50
|
+
#
|
51
|
+
# == Authors
|
52
|
+
#
|
53
|
+
# * Michael Granger <ged@FaerieMUD.org>
|
54
|
+
#
|
55
|
+
# Thanks to Martin Chase and Rick Bradley for peer review, bugfixes, and
|
56
|
+
# suggestions.
|
57
|
+
#
|
58
|
+
#
|
59
|
+
# == Copyright
|
60
|
+
#
|
61
|
+
# Copyright (c) 2003, 2004 The FaerieMUD Consortium. All rights reserved.
|
62
|
+
#
|
63
|
+
# This module is free software. You may use, modify, and/or redistribute this
|
64
|
+
# software under the same terms as Ruby.
|
65
|
+
#
|
66
|
+
#
|
67
|
+
# == Subversion Id
|
68
|
+
#
|
69
|
+
# $Id: memcache.rb 31 2004-11-13 17:32:54Z ged $
|
70
|
+
#
|
71
|
+
|
72
|
+
require 'io/reactor'
|
73
|
+
require 'socket'
|
74
|
+
require 'sync'
|
75
|
+
require 'timeout'
|
76
|
+
require 'zlib'
|
77
|
+
require 'uri'
|
78
|
+
|
79
|
+
|
80
|
+
### A Ruby implementation of the 'memcached' client interface.
|
81
|
+
class MemCache
|
82
|
+
include Socket::Constants
|
83
|
+
|
84
|
+
|
85
|
+
### Class constants
|
86
|
+
|
87
|
+
# SVN Revision
|
88
|
+
SVNRev = %q$Rev: 31 $
|
89
|
+
|
90
|
+
# SVN Id
|
91
|
+
SVNId = %q$Id: memcache.rb 31 2004-11-13 17:32:54Z ged $
|
92
|
+
|
93
|
+
# SVN URL
|
94
|
+
SVNURL = %q$URL: svn+ssh://svn.FaerieMUD.org/var/svn/RMemCache/trunk/lib/memcache.rb $
|
95
|
+
|
96
|
+
# Default compression threshold.
|
97
|
+
DefaultCThreshold = 10_000
|
98
|
+
|
99
|
+
# Default memcached port
|
100
|
+
DefaultPort = 11211
|
101
|
+
|
102
|
+
# Default 'weight' value assigned to a server.
|
103
|
+
DefaultServerWeight = 1
|
104
|
+
|
105
|
+
# Minimum percentage length compressed values have to be to be preferred
|
106
|
+
# over the uncompressed version.
|
107
|
+
MinCompressionRatio = 0.80
|
108
|
+
|
109
|
+
# Default constructor options
|
110
|
+
DefaultOptions = {
|
111
|
+
:debug => false,
|
112
|
+
:c_threshold => DefaultCThreshold,
|
113
|
+
:compression => true,
|
114
|
+
:namespace => nil,
|
115
|
+
:readonly => false,
|
116
|
+
:urlencode => true,
|
117
|
+
}
|
118
|
+
|
119
|
+
# Storage flags
|
120
|
+
F_SERIALIZED = 1
|
121
|
+
F_COMPRESSED = 2
|
122
|
+
F_ESCAPED = 4
|
123
|
+
F_NUMERIC = 8
|
124
|
+
|
125
|
+
# Line-ending
|
126
|
+
CRLF = "\r\n"
|
127
|
+
|
128
|
+
# Flags to use for the BasicSocket#send call. Note that Ruby's socket
|
129
|
+
# library doesn't define MSG_NOSIGNAL, but if it ever does it'll be used.
|
130
|
+
SendFlags = 0
|
131
|
+
SendFlags |= Socket.const_get( :MSG_NOSIGNAL ) if
|
132
|
+
Socket.const_defined?( :MSG_NOSIGNAL )
|
133
|
+
|
134
|
+
# Patterns for matching against server error replies
|
135
|
+
GENERAL_ERROR = /^ERROR\r\n/
|
136
|
+
CLIENT_ERROR = /^CLIENT_ERROR\s+([^\r\n]+)\r\n/
|
137
|
+
SERVER_ERROR = /^SERVER_ERROR\s+([^\r\n]+)\r\n/
|
138
|
+
ANY_ERROR = Regexp::union( GENERAL_ERROR, CLIENT_ERROR, SERVER_ERROR )
|
139
|
+
|
140
|
+
# Terminator regexps for the two styles of replies from memcached
|
141
|
+
LINE_TERMINATOR = Regexp::union( /\r\n$/, ANY_ERROR )
|
142
|
+
MULTILINE_TERMINATOR = Regexp::union( /^END\r\n$/, ANY_ERROR )
|
143
|
+
|
144
|
+
# Callables to convert various part of the server stats reply to appropriate
|
145
|
+
# object types.
|
146
|
+
StatConverters = {
|
147
|
+
:__default__ => lambda {|stat| Integer(stat) },
|
148
|
+
:version => lambda {|stat| stat }, # Already a String
|
149
|
+
:rusage_user => lambda {|stat|
|
150
|
+
seconds, microseconds = stat.split(/:/, 2)
|
151
|
+
Float(seconds) + (Float(microseconds) / 1_000_000)
|
152
|
+
},
|
153
|
+
:rusage_system => lambda {|stat|
|
154
|
+
seconds, microseconds = stat.split(/:/, 2)
|
155
|
+
Float(seconds) + (Float(microseconds) / 1_000_000)
|
156
|
+
}
|
157
|
+
}
|
158
|
+
|
159
|
+
|
160
|
+
|
161
|
+
#################################################################
|
162
|
+
### I N S T A N C E M E T H O D S
|
163
|
+
#################################################################
|
164
|
+
|
165
|
+
### Create a new memcache object that will distribute gets and sets between
|
166
|
+
### the specified +servers+. You can also pass one or more options as hash
|
167
|
+
### arguments. Valid options are:
|
168
|
+
### [<b>:compression</b>]
|
169
|
+
### Set the compression flag. See #use_compression? for more info.
|
170
|
+
### [<b>:c_threshold</b>]
|
171
|
+
### Set the compression threshold, in bytes. See #c_threshold for more
|
172
|
+
### info.
|
173
|
+
### [<b>:debug</b>]
|
174
|
+
### Send debugging output to the object specified as a value if it
|
175
|
+
### responds to #call, and to $deferr if set to anything else but +false+
|
176
|
+
### or +nil+.
|
177
|
+
### [<b>:namespace</b>]
|
178
|
+
### If specified, all keys will have the given value prepended before
|
179
|
+
### accessing the cache. Defaults to +nil+.
|
180
|
+
### [<b>:urlencode</b>]
|
181
|
+
### If this is set, all keys and values will be urlencoded. If this is not
|
182
|
+
### set, keys and/or values with certain characters in them may generate
|
183
|
+
### client errors when interacting with the cache, but the values used
|
184
|
+
### will be more compatible with those set by other clients. Defaults to
|
185
|
+
### +true+.
|
186
|
+
### [<b>:readonly</b>]
|
187
|
+
### If this is set, any attempt to write to the cache will generate an
|
188
|
+
### exception. Defaults to +false+.
|
189
|
+
### If a +block+ is given, it is used as the default hash function for
|
190
|
+
### determining which server the key (given as an argument to the block) is
|
191
|
+
### stored/fetched from.
|
192
|
+
def initialize( *servers, &block )
|
193
|
+
opts = servers.pop if servers.last.is_a?( Hash )
|
194
|
+
opts = DefaultOptions.merge( opts || {} )
|
195
|
+
|
196
|
+
@debug = opts[:debug]
|
197
|
+
|
198
|
+
@c_threshold = opts[:c_threshold]
|
199
|
+
@compression = opts[:compression]
|
200
|
+
@namespace = opts[:namespace]
|
201
|
+
@readonly = opts[:readonly]
|
202
|
+
@urlencode = opts[:urlencode]
|
203
|
+
|
204
|
+
@buckets = nil
|
205
|
+
@hashfunc = block || lambda {|val| val.hash}
|
206
|
+
@mutex = Sync::new
|
207
|
+
|
208
|
+
@reactor = IO::Reactor::new
|
209
|
+
|
210
|
+
# Stats is an auto-vivifying hash -- an access to a key that hasn't yet
|
211
|
+
# been created generates a new stats subhash
|
212
|
+
@stats = Hash::new {|hsh,k|
|
213
|
+
hsh[k] = {:count => 0, :utime => 0.0, :stime => 0.0}
|
214
|
+
}
|
215
|
+
@stats_callback = nil
|
216
|
+
|
217
|
+
self.servers = servers
|
218
|
+
end
|
219
|
+
|
220
|
+
|
221
|
+
### Return a human-readable version of the cache object.
|
222
|
+
def inspect
|
223
|
+
"<MemCache: %d servers/%s buckets: ns: %p, debug: %p, cmp: %p, ro: %p>" % [
|
224
|
+
@servers.nitems,
|
225
|
+
@buckets.nil? ? "?" : @buckets.nitems,
|
226
|
+
@namespace,
|
227
|
+
@debug,
|
228
|
+
@compression,
|
229
|
+
@readonly,
|
230
|
+
]
|
231
|
+
end
|
232
|
+
|
233
|
+
|
234
|
+
######
|
235
|
+
public
|
236
|
+
######
|
237
|
+
|
238
|
+
# The compression threshold setting, in bytes. Values larger than this
|
239
|
+
# threshold will be compressed by #[]= (and #set) and decompressed by #[]
|
240
|
+
# (and #get).
|
241
|
+
attr_accessor :c_threshold
|
242
|
+
alias_method :compression_threshold, :c_threshold
|
243
|
+
|
244
|
+
# Turn compression on or off temporarily.
|
245
|
+
attr_accessor :compression
|
246
|
+
|
247
|
+
# Debugging flag -- when set to +true+, debugging output will be send to
|
248
|
+
# $deferr. If set to an object which supports either #<< or #call, debugging
|
249
|
+
# output will be sent to it via this method instead (#call being
|
250
|
+
# preferred). If set to +false+ or +nil+, no debugging will be generated.
|
251
|
+
attr_accessor :debug
|
252
|
+
|
253
|
+
# The function (a Method or Proc object) which will be used to hash keys for
|
254
|
+
# determining where values are stored.
|
255
|
+
attr_accessor :hashfunc
|
256
|
+
|
257
|
+
# The Array of MemCache::Server objects that represent the memcached
|
258
|
+
# instances the client will use.
|
259
|
+
attr_reader :servers
|
260
|
+
|
261
|
+
# The namespace that will be prepended to all keys set/fetched from the
|
262
|
+
# cache.
|
263
|
+
attr_accessor :namespace
|
264
|
+
|
265
|
+
# Hash of counts of cache operations, keyed by operation (e.g., +:delete+,
|
266
|
+
# +:flush_all+, +:set+, +:add+, etc.). Each value of the hash is another
|
267
|
+
# hash with statistics for the corresponding operation:
|
268
|
+
# {
|
269
|
+
# :stime => <total system time of all calls>,
|
270
|
+
# :utime => <total user time> of all calls,
|
271
|
+
# :count => <number of calls>,
|
272
|
+
# }
|
273
|
+
attr_reader :stats
|
274
|
+
|
275
|
+
# Hash of system/user time-tuples for each op
|
276
|
+
attr_reader :times
|
277
|
+
|
278
|
+
# Settable statistics callback -- setting this to an object that responds to
|
279
|
+
# #call will cause it to be called once for each operation with the
|
280
|
+
# operation type (as a Symbol), and Struct::Tms objects created immediately
|
281
|
+
# before and after the operation.
|
282
|
+
attr_accessor :stats_callback
|
283
|
+
|
284
|
+
# The Sync mutex object for the cache
|
285
|
+
attr_reader :mutex
|
286
|
+
|
287
|
+
|
288
|
+
### Returns +true+ if the cache was created read-only.
|
289
|
+
def readonly?
|
290
|
+
@readonly
|
291
|
+
end
|
292
|
+
|
293
|
+
|
294
|
+
### Set the servers the memcache will distribute gets and sets
|
295
|
+
### between. Arguments can be either Strings of the form
|
296
|
+
### <tt>"hostname:port"</tt> (or "hostname:port:weight"), or
|
297
|
+
### MemCache::Server objects.
|
298
|
+
def servers=( servers )
|
299
|
+
@mutex.synchronize( Sync::EX ) {
|
300
|
+
@servers = servers.collect {|svr|
|
301
|
+
self.debug_msg( "Transforming svr = %p", svr )
|
302
|
+
|
303
|
+
case svr
|
304
|
+
when String
|
305
|
+
host, port, weight = svr.split( /:/, 3 )
|
306
|
+
weight ||= DefaultServerWeight
|
307
|
+
port ||= DefaultPort
|
308
|
+
Server::new( host, port.to_i, weight )
|
309
|
+
|
310
|
+
when Array
|
311
|
+
host, port = svr[0].split(/:/, 2)
|
312
|
+
weight = svr[1] || DefaultServerWeight
|
313
|
+
port ||= DefaultPort
|
314
|
+
Server::new( host, port.to_i, weight )
|
315
|
+
|
316
|
+
when Server
|
317
|
+
svr
|
318
|
+
|
319
|
+
else
|
320
|
+
raise TypeError, "cannot convert %s to MemCache::Server" %
|
321
|
+
svr.class.name
|
322
|
+
end
|
323
|
+
}
|
324
|
+
|
325
|
+
@buckets = nil
|
326
|
+
}
|
327
|
+
|
328
|
+
return @servers # (ignored)
|
329
|
+
end
|
330
|
+
|
331
|
+
|
332
|
+
### Returns +true+ if there is at least one active server for the receiver.
|
333
|
+
def active?
|
334
|
+
not @servers.empty?
|
335
|
+
end
|
336
|
+
|
337
|
+
|
338
|
+
### Fetch and return the values associated with the given +keys+ from the
|
339
|
+
### cache. Returns +nil+ for any value that wasn't in the cache.
|
340
|
+
def get( *keys )
|
341
|
+
raise MemCacheError, "no active servers" unless self.active?
|
342
|
+
hash = nil
|
343
|
+
|
344
|
+
@mutex.synchronize( Sync::SH ) {
|
345
|
+
hash = self.fetch( :get, *keys )
|
346
|
+
}
|
347
|
+
|
348
|
+
return *(hash.values_at( *keys ))
|
349
|
+
end
|
350
|
+
alias_method :[], :get
|
351
|
+
|
352
|
+
|
353
|
+
### Fetch and return the values associated the the given +keys+ from the
|
354
|
+
### cache as a Hash object. Returns +nil+ for any value that wasn't in the
|
355
|
+
### cache.
|
356
|
+
def get_hash( *keys )
|
357
|
+
raise MemCacheError, "no active servers" unless self.active?
|
358
|
+
return @mutex.synchronize( Sync::SH ) {
|
359
|
+
self.fetch( :get_hash, *keys )
|
360
|
+
}
|
361
|
+
end
|
362
|
+
|
363
|
+
|
364
|
+
### Fetch, delete, and return the given +keys+ atomically from the cache.
|
365
|
+
#def take( *keys )
|
366
|
+
# raise MemCacheError, "no active servers" unless self.active?
|
367
|
+
# raise MemCacheError, "readonly cache" if self.readonly?
|
368
|
+
#
|
369
|
+
# hash = @mutex.synchronize( Sync::EX ) {
|
370
|
+
# self.fetch( :take, *keys )
|
371
|
+
# }
|
372
|
+
#
|
373
|
+
# return hash[*keys]
|
374
|
+
#end
|
375
|
+
|
376
|
+
|
377
|
+
### Unconditionally set the entry in the cache under the given +key+ to
|
378
|
+
### +value+, returning +true+ on success. The optional +exptime+ argument
|
379
|
+
### specifies an expiration time for the tuple, in seconds relative to the
|
380
|
+
### present if it's less than 60*60*24*30 (30 days), or as an absolute Unix
|
381
|
+
### time (E.g., Time#to_i) if greater. If +exptime+ is +0+, the entry will
|
382
|
+
### never expire.
|
383
|
+
def set( key, val, exptime=0 )
|
384
|
+
raise MemCacheError, "no active servers" unless self.active?
|
385
|
+
raise MemCacheError, "readonly cache" if self.readonly?
|
386
|
+
rval = nil
|
387
|
+
|
388
|
+
@mutex.synchronize( Sync::EX ) {
|
389
|
+
rval = self.store( :set, key, val, exptime )
|
390
|
+
}
|
391
|
+
|
392
|
+
return rval
|
393
|
+
end
|
394
|
+
|
395
|
+
|
396
|
+
### Multi-set method; unconditionally set each key/value pair in
|
397
|
+
### +pairs+. The call to set each value is done synchronously, but until
|
398
|
+
### memcached supports a multi-set operation this is only a little more
|
399
|
+
### efficient than calling #set for each pair yourself.
|
400
|
+
def set_many( pairs )
|
401
|
+
raise MemCacheError, "no active servers" unless self.active?
|
402
|
+
raise MemCacheError, "readonly cache" if self.readonly?
|
403
|
+
raise MemCacheError,
|
404
|
+
"expected an object that responds to the #each_pair message" unless
|
405
|
+
pairs.respond_to?( :each_pair )
|
406
|
+
|
407
|
+
rvals = []
|
408
|
+
|
409
|
+
# Just iterate over the pairs, setting them one-by-one until memcached
|
410
|
+
# supports multi-set.
|
411
|
+
@mutex.synchronize( Sync::EX ) {
|
412
|
+
pairs.each_pair do |key, val|
|
413
|
+
rvals << self.store( :set, key, val, 0 )
|
414
|
+
end
|
415
|
+
}
|
416
|
+
|
417
|
+
return rvals
|
418
|
+
end
|
419
|
+
|
420
|
+
|
421
|
+
### Index assignment method. Supports slice-setting, e.g.:
|
422
|
+
### cache[ :foo, :bar ] = 12, "darkwood"
|
423
|
+
### This uses #set_many internally if there is more than one key, or #set if
|
424
|
+
### there is only one.
|
425
|
+
def []=( *args )
|
426
|
+
raise MemCacheError, "no active servers" unless self.active?
|
427
|
+
raise MemCacheError, "readonly cache" if self.readonly?
|
428
|
+
|
429
|
+
# Use #set if there's only one pair
|
430
|
+
if args.length <= 2
|
431
|
+
self.set( *args )
|
432
|
+
else
|
433
|
+
# Args from a slice-style call like
|
434
|
+
# cache[ :foo, :bar ] = 1, 2
|
435
|
+
# will be passed in like:
|
436
|
+
# ( :foo, :bar, [1, 2] )
|
437
|
+
# so just shift the value part off, transpose them into a Hash and
|
438
|
+
# pass them on to #set_many.
|
439
|
+
vals = args.pop
|
440
|
+
vals = [vals] unless vals.is_a?( Array ) # Handle [:a,:b] = 1
|
441
|
+
pairs = Hash[ *([ args, vals ].transpose) ]
|
442
|
+
self.set_many( pairs )
|
443
|
+
end
|
444
|
+
|
445
|
+
# It doesn't matter what this returns, as Ruby ignores it for some
|
446
|
+
# reason.
|
447
|
+
return nil
|
448
|
+
end
|
449
|
+
|
450
|
+
|
451
|
+
### Like #set, but only stores the tuple if it doesn't already exist.
|
452
|
+
def add( key, val, exptime=0 )
|
453
|
+
raise MemCacheError, "no active servers" unless self.active?
|
454
|
+
raise MemCacheError, "readonly cache" if self.readonly?
|
455
|
+
|
456
|
+
@mutex.synchronize( Sync::EX ) {
|
457
|
+
self.store( :add, key, val, exptime )
|
458
|
+
}
|
459
|
+
end
|
460
|
+
|
461
|
+
|
462
|
+
### Like #set, but only stores the tuple if it already exists.
|
463
|
+
def replace( key, val, exptime=0 )
|
464
|
+
raise MemCacheError, "no active servers" unless self.active?
|
465
|
+
raise MemCacheError, "readonly cache" if self.readonly?
|
466
|
+
|
467
|
+
@mutex.synchronize( Sync::EX ) {
|
468
|
+
self.store( :replace, key, val, exptime )
|
469
|
+
}
|
470
|
+
end
|
471
|
+
|
472
|
+
|
473
|
+
### Atomically increment the value associated with +key+ by +val+. Returns
|
474
|
+
### +nil+ if the value doesn't exist in the cache, or the new value after
|
475
|
+
### incrementing if it does. +val+ should be zero or greater. Overflow on
|
476
|
+
### the server is not checked. Beware of values approaching 2**32.
|
477
|
+
def incr( key, val=1 )
|
478
|
+
raise MemCacheError, "no active servers" unless self.active?
|
479
|
+
raise MemCacheError, "readonly cache" if self.readonly?
|
480
|
+
|
481
|
+
@mutex.synchronize( Sync::EX ) {
|
482
|
+
self.incrdecr( :incr, key, val )
|
483
|
+
}
|
484
|
+
end
|
485
|
+
|
486
|
+
|
487
|
+
### Like #incr, but decrements. Unlike #incr, underflow is checked, and new
|
488
|
+
### values are capped at 0. If server value is 1, a decrement of 2 returns
|
489
|
+
### 0, not -1.
|
490
|
+
def decr( key, val=1 )
|
491
|
+
raise MemCacheError, "no active servers" unless self.active?
|
492
|
+
raise MemCacheError, "readonly cache" if self.readonly?
|
493
|
+
|
494
|
+
@mutex.synchronize( Sync::EX ) {
|
495
|
+
self.incrdecr( :decr, key, val )
|
496
|
+
}
|
497
|
+
end
|
498
|
+
|
499
|
+
|
500
|
+
### Delete the entry with the specified key, optionally at the specified
|
501
|
+
### +time+.
|
502
|
+
def delete( key, time=nil )
|
503
|
+
raise MemCacheError, "no active servers" unless self.active?
|
504
|
+
raise MemCacheError, "readonly cache" if self.readonly?
|
505
|
+
svr = nil
|
506
|
+
|
507
|
+
res = @mutex.synchronize( Sync::EX ) {
|
508
|
+
svr = self.get_server( key )
|
509
|
+
cachekey = self.make_cache_key( key )
|
510
|
+
|
511
|
+
self.add_stat( :delete ) do
|
512
|
+
cmd = "delete %s%s" % [ cachekey, time ? " #{time.to_i}" : "" ]
|
513
|
+
self.send( svr => cmd )
|
514
|
+
end
|
515
|
+
}
|
516
|
+
|
517
|
+
res && res[svr].rstrip == "DELETED"
|
518
|
+
end
|
519
|
+
|
520
|
+
|
521
|
+
### Mark all entries on all servers as expired.
|
522
|
+
def flush_all
|
523
|
+
raise MemCacheError, "no active servers" unless self.active?
|
524
|
+
raise MemCacheError, "readonly cache" if self.readonly?
|
525
|
+
|
526
|
+
res = @mutex.synchronize( Sync::EX ) {
|
527
|
+
|
528
|
+
# Build commandset for servers that are alive
|
529
|
+
servers = @servers.select {|svr| svr.alive? }
|
530
|
+
cmds = self.make_command_map( "flush_all", servers )
|
531
|
+
|
532
|
+
# Send them in parallel
|
533
|
+
self.add_stat( :flush_all ) {
|
534
|
+
self.send( cmds )
|
535
|
+
}
|
536
|
+
}
|
537
|
+
|
538
|
+
!res.find {|svr,st| st.rstrip != 'OK'}
|
539
|
+
end
|
540
|
+
alias_method :clear, :flush_all
|
541
|
+
|
542
|
+
|
543
|
+
### Return a hash of statistics hashes for each of the specified +servers+.
|
544
|
+
def server_stats( servers=@servers )
|
545
|
+
|
546
|
+
# Build commandset for servers that are alive
|
547
|
+
asvrs = servers.select {|svr| svr.alive?}
|
548
|
+
cmds = self.make_command_map( "stats", asvrs )
|
549
|
+
|
550
|
+
# Send them in parallel
|
551
|
+
return self.add_stat( :server_stats ) do
|
552
|
+
self.send( cmds ) do |svr,reply|
|
553
|
+
self.parse_stats( reply )
|
554
|
+
end
|
555
|
+
end
|
556
|
+
end
|
557
|
+
|
558
|
+
|
559
|
+
### Reset statistics on the given +servers+.
|
560
|
+
def server_reset_stats( servers=@servers )
|
561
|
+
|
562
|
+
# Build commandset for servers that are alive
|
563
|
+
asvrs = servers.select {|svr| svr.alive? }
|
564
|
+
cmds = self.make_command_map( "stats reset", asvrs )
|
565
|
+
|
566
|
+
# Send them in parallel
|
567
|
+
return self.add_stat( :server_reset_stats ) do
|
568
|
+
self.send( cmds ) do |svr,reply|
|
569
|
+
reply.rstrip == "RESET"
|
570
|
+
end
|
571
|
+
end
|
572
|
+
end
|
573
|
+
|
574
|
+
|
575
|
+
### Return memory maps from the specified +servers+ (not supported on all
|
576
|
+
### platforms)
|
577
|
+
def server_map_stats( servers=@servers )
|
578
|
+
|
579
|
+
# Build commandset for servers that are alive
|
580
|
+
asvrs = servers.select {|svr| svr.alive? }
|
581
|
+
cmds = self.make_command_map( "stats maps", asvrs )
|
582
|
+
|
583
|
+
# Send them in parallel
|
584
|
+
return self.add_stat( :server_map_stats ) do
|
585
|
+
self.send( cmds )
|
586
|
+
end
|
587
|
+
rescue MemCache::ServerError => err
|
588
|
+
self.debug_msg "%p doesn't support 'stats maps'" % err.server
|
589
|
+
return {}
|
590
|
+
end
|
591
|
+
|
592
|
+
|
593
|
+
### Return malloc stats from the specified +servers+ (not supported on all
|
594
|
+
### platforms)
|
595
|
+
def server_malloc_stats( servers=@servers )
|
596
|
+
|
597
|
+
# Build commandset for servers that are alive
|
598
|
+
asvrs = servers.select {|svr| svr.alive? }
|
599
|
+
cmds = self.make_command_map( "stats malloc", asvrs )
|
600
|
+
|
601
|
+
# Send them in parallel
|
602
|
+
return self.add_stat( :server_malloc_stats ) do
|
603
|
+
self.send( cmds ) do |svr,reply|
|
604
|
+
self.parse_stats( reply )
|
605
|
+
end
|
606
|
+
end
|
607
|
+
rescue MemCache::InternalError
|
608
|
+
self.debug_msg( "One or more servers doesn't support 'stats malloc'" )
|
609
|
+
return {}
|
610
|
+
end
|
611
|
+
|
612
|
+
|
613
|
+
### Return slab stats from the specified +servers+
|
614
|
+
def server_slab_stats( servers=@servers )
|
615
|
+
|
616
|
+
# Build commandset for servers that are alive
|
617
|
+
asvrs = servers.select {|svr| svr.alive? }
|
618
|
+
cmds = self.make_command_map( "stats slabs", asvrs )
|
619
|
+
|
620
|
+
# Send them in parallel
|
621
|
+
return self.add_stat( :server_slab_stats ) do
|
622
|
+
self.send( cmds ) do |svr,reply|
|
623
|
+
### :TODO: I could parse the results from this further to split
|
624
|
+
### out the individual slabs into their own sub-hashes, but this
|
625
|
+
### will work for now.
|
626
|
+
self.parse_stats( reply )
|
627
|
+
end
|
628
|
+
end
|
629
|
+
end
|
630
|
+
|
631
|
+
|
632
|
+
### Return item stats from the specified +servers+
|
633
|
+
def server_item_stats( servers=@servers )
|
634
|
+
|
635
|
+
# Build commandset for servers that are alive
|
636
|
+
asvrs = servers.select {|svr| svr.alive? }
|
637
|
+
cmds = self.make_command_map( "stats items", asvrs )
|
638
|
+
|
639
|
+
# Send them in parallel
|
640
|
+
return self.add_stat( :server_stats_items ) do
|
641
|
+
self.send( cmds ) do |svr,reply|
|
642
|
+
self.parse_stats( reply )
|
643
|
+
end
|
644
|
+
end
|
645
|
+
end
|
646
|
+
|
647
|
+
|
648
|
+
### Return item size stats from the specified +servers+
|
649
|
+
def server_size_stats( servers=@servers )
|
650
|
+
|
651
|
+
# Build commandset for servers that are alive
|
652
|
+
asvrs = servers.select {|svr| svr.alive? }
|
653
|
+
cmds = self.make_command_map( "stats sizes", asvrs )
|
654
|
+
|
655
|
+
# Send them in parallel
|
656
|
+
return self.add_stat( :server_stats_sizes ) do
|
657
|
+
self.send( cmds ) do |svr,reply|
|
658
|
+
reply.sub( /#{CRLF}END#{CRLF}/, '' ).split( /#{CRLF}/ )
|
659
|
+
end
|
660
|
+
end
|
661
|
+
end
|
662
|
+
|
663
|
+
|
664
|
+
|
665
|
+
#########
|
666
|
+
protected
|
667
|
+
#########
|
668
|
+
|
669
|
+
### Create a hash mapping the specified command to each of the given
|
670
|
+
### +servers+.
|
671
|
+
def make_command_map( command, servers=@servers )
|
672
|
+
Hash[ *([servers, [command]*servers.nitems].transpose.flatten) ]
|
673
|
+
end
|
674
|
+
|
675
|
+
|
676
|
+
### Parse raw statistics lines from a memcached 'stats' +reply+ and return a
|
677
|
+
### Hash.
|
678
|
+
def parse_stats( reply )
|
679
|
+
|
680
|
+
# Trim off the footer
|
681
|
+
reply.sub!( /#{CRLF}END#{CRLF}/, '' )
|
682
|
+
|
683
|
+
# Make a hash out of the other values
|
684
|
+
pairs = reply.split( /#{CRLF}/ ).collect {|line|
|
685
|
+
stat, name, val = line.split(/\s+/, 3)
|
686
|
+
name = name.to_sym
|
687
|
+
|
688
|
+
if StatConverters.key?( name )
|
689
|
+
val = StatConverters[ name ].call( val )
|
690
|
+
else
|
691
|
+
val = StatConverters[ :__default__ ].call( val )
|
692
|
+
end
|
693
|
+
|
694
|
+
[name,val]
|
695
|
+
}
|
696
|
+
|
697
|
+
return Hash[ *(pairs.flatten) ]
|
698
|
+
end
|
699
|
+
|
700
|
+
|
701
|
+
### Get the server corresponding to the given +key+.
|
702
|
+
def get_server( key )
|
703
|
+
@mutex.synchronize( Sync::SH ) {
|
704
|
+
return @servers.first if @servers.length == 1
|
705
|
+
|
706
|
+
# If the key is an integer, it's assumed to be a precomputed hash
|
707
|
+
# key so don't bother hashing it. Otherwise use the hashing function
|
708
|
+
# to come up with a hash of the key to determine which server to
|
709
|
+
# talk to
|
710
|
+
hkey = nil
|
711
|
+
if key.is_a?( Integer )
|
712
|
+
hkey = key
|
713
|
+
else
|
714
|
+
hkey = @hashfunc.call( key )
|
715
|
+
end
|
716
|
+
|
717
|
+
# Set up buckets if they haven't been already
|
718
|
+
unless @buckets
|
719
|
+
@mutex.synchronize( Sync::EX ) {
|
720
|
+
# Check again after switching to an exclusive lock
|
721
|
+
unless @buckets
|
722
|
+
@buckets = []
|
723
|
+
@servers.each do |svr|
|
724
|
+
self.debug_msg( "Adding %d buckets for %p", svr.weight, svr )
|
725
|
+
svr.weight.times { @buckets.push(svr) }
|
726
|
+
end
|
727
|
+
end
|
728
|
+
}
|
729
|
+
end
|
730
|
+
|
731
|
+
# Fetch a server for the given key, retrying if that server is
|
732
|
+
# offline
|
733
|
+
20.times do |tries|
|
734
|
+
svr = @buckets[ (hkey + tries) % @buckets.nitems ]
|
735
|
+
break if svr.alive?
|
736
|
+
svr = nil
|
737
|
+
end
|
738
|
+
|
739
|
+
raise MemCacheError, "No servers available" if svr.nil?
|
740
|
+
return svr
|
741
|
+
}
|
742
|
+
end
|
743
|
+
|
744
|
+
|
745
|
+
### Store the specified +value+ to the cache associated with the specified
|
746
|
+
### +key+ and expiration time +exptime+.
|
747
|
+
def store( type, key, val, exptime )
|
748
|
+
return self.delete( key ) if val.nil?
|
749
|
+
svr = self.get_server( key )
|
750
|
+
cachekey = self.make_cache_key( key )
|
751
|
+
res = nil
|
752
|
+
|
753
|
+
self.add_stat( type ) {
|
754
|
+
# Prep the value for storage
|
755
|
+
sval, flags = self.prep_value( val )
|
756
|
+
|
757
|
+
# Form the command
|
758
|
+
cmd = []
|
759
|
+
cmd << "%s %s %d %d %d" %
|
760
|
+
[ type, cachekey, flags, exptime, sval.length ]
|
761
|
+
cmd << sval
|
762
|
+
self.debug_msg( "Storing with: %p", cmd )
|
763
|
+
|
764
|
+
# Send the command and read the reply
|
765
|
+
res = self.send( svr => cmd )
|
766
|
+
}
|
767
|
+
|
768
|
+
# Check for an appropriate server response
|
769
|
+
return (res && res[svr] && res[svr].rstrip == "STORED")
|
770
|
+
end
|
771
|
+
|
772
|
+
|
773
|
+
### Fetch the values corresponding to the given +keys+ from the cache and
|
774
|
+
### return them as a Hash.
|
775
|
+
def fetch( type, *keys )
|
776
|
+
|
777
|
+
# Make a hash to hold servers => commands for the keys to be fetched,
|
778
|
+
# and one to match cache keys to user keys.
|
779
|
+
map = Hash::new {|hsh,key| hsh[key] = 'get'}
|
780
|
+
cachekeys = {}
|
781
|
+
|
782
|
+
res = {}
|
783
|
+
self.add_stat( type ) {
|
784
|
+
|
785
|
+
# Map the key's server to the command to fetch its value
|
786
|
+
keys.each do |key|
|
787
|
+
svr = self.get_server( key )
|
788
|
+
|
789
|
+
ckey = self.make_cache_key( key )
|
790
|
+
cachekeys[ ckey ] = key
|
791
|
+
map[ svr ] << " " + ckey
|
792
|
+
end
|
793
|
+
|
794
|
+
# Send the commands and map the results hash into the return hash
|
795
|
+
self.send( map, true ) do |svr, reply|
|
796
|
+
|
797
|
+
# Iterate over the replies, stripping first the 'VALUE
|
798
|
+
# <cachekey> <flags> <len>' line with a regexp and then the data
|
799
|
+
# line by length as specified by the VALUE line.
|
800
|
+
while reply.sub!( /^VALUE (\S+) (\d+) (\d+)\r\n/, '' )
|
801
|
+
ckey, flags, len = $1, $2.to_i, $3.to_i
|
802
|
+
|
803
|
+
# Restore compressed and thawed values that require it.
|
804
|
+
data = reply.slice!( 0, len + 2 ) # + CRLF
|
805
|
+
rval = self.restore( data[0,len], flags )
|
806
|
+
|
807
|
+
res[ cachekeys[ckey] ] = rval
|
808
|
+
end
|
809
|
+
|
810
|
+
unless reply == "END" + CRLF
|
811
|
+
raise MemCacheError, "Malformed reply fetched from %p: %p" %
|
812
|
+
[ svr, rval ]
|
813
|
+
end
|
814
|
+
end
|
815
|
+
}
|
816
|
+
|
817
|
+
return res
|
818
|
+
end
|
819
|
+
|
820
|
+
|
821
|
+
### Increment/decrement the value associated with +key+ on the server by
|
822
|
+
### +val+.
|
823
|
+
def incrdecr( type, key, val )
|
824
|
+
svr = self.get_server( key )
|
825
|
+
cachekey = self.make_cache_key( key )
|
826
|
+
|
827
|
+
# Form the command, send it, and read the reply
|
828
|
+
res = self.add_stat( type ) {
|
829
|
+
cmd = "%s %s %d" % [ type, cachekey, val ]
|
830
|
+
self.send( svr => cmd )
|
831
|
+
}
|
832
|
+
|
833
|
+
# De-stringify the number if it is one and return it as an Integer, or
|
834
|
+
# nil if it isn't a number.
|
835
|
+
if /^(\d+)/.match( res[svr] )
|
836
|
+
return Integer( $1 )
|
837
|
+
else
|
838
|
+
return nil
|
839
|
+
end
|
840
|
+
end
|
841
|
+
|
842
|
+
|
843
|
+
### Prepare the specified value +val+ for insertion into the cache,
|
844
|
+
### serializing and compressing as necessary/configured.
|
845
|
+
def prep_value( val )
|
846
|
+
sval = nil
|
847
|
+
flags = 0
|
848
|
+
|
849
|
+
# Serialize if something other than a String, Numeric
|
850
|
+
case val
|
851
|
+
when String
|
852
|
+
sval = val.dup
|
853
|
+
when Numeric
|
854
|
+
sval = val.to_s
|
855
|
+
flags |= F_NUMERIC
|
856
|
+
else
|
857
|
+
self.debug_msg "Serializing %p" % val
|
858
|
+
sval = Marshal::dump( val )
|
859
|
+
flags |= F_SERIALIZED
|
860
|
+
end
|
861
|
+
|
862
|
+
# Compress if compression is enabled, the value exceeds the
|
863
|
+
# compression threshold, and the compressed value is smaller than
|
864
|
+
# the uncompressed version.
|
865
|
+
if @compress && sval.length > @c_threshold
|
866
|
+
zipped = Zlib::ZStream::Deflate::deflate( sval )
|
867
|
+
|
868
|
+
if zipped.length < (sval.length * MinCompressionRatio)
|
869
|
+
self.debug_msg "Using compressed value (%d/%d)" %
|
870
|
+
[ zipped.length, sval.length ]
|
871
|
+
sval = zipped
|
872
|
+
flags |= F_COMPRESSED
|
873
|
+
end
|
874
|
+
end
|
875
|
+
|
876
|
+
# Urlencode unless told not to
|
877
|
+
unless !@urlencode
|
878
|
+
sval = URI::escape( sval )
|
879
|
+
flags |= F_ESCAPED
|
880
|
+
end
|
881
|
+
|
882
|
+
return sval, flags
|
883
|
+
end
|
884
|
+
|
885
|
+
|
886
|
+
### Restore the specified value +val+ from the form inserted into the cache,
|
887
|
+
### given the specified +flags+.
|
888
|
+
def restore( val, flags=0 )
|
889
|
+
self.debug_msg( "Restoring value %p (flags: %d)", val, flags )
|
890
|
+
rval = val.dup
|
891
|
+
|
892
|
+
# De-urlencode
|
893
|
+
if (flags & F_ESCAPED).nonzero?
|
894
|
+
rval = URI::unescape( rval )
|
895
|
+
end
|
896
|
+
|
897
|
+
# Decompress
|
898
|
+
if (flags & F_COMPRESSED).nonzero?
|
899
|
+
rval = Zlib::ZStream::Inflate::inflate( rval )
|
900
|
+
end
|
901
|
+
|
902
|
+
# Unserialize
|
903
|
+
if (flags & F_SERIALIZED).nonzero?
|
904
|
+
rval = Marshal::load( rval )
|
905
|
+
end
|
906
|
+
|
907
|
+
if (flags & F_NUMERIC).nonzero?
|
908
|
+
if /\./.match( rval )
|
909
|
+
rval = Float( rval )
|
910
|
+
else
|
911
|
+
rval = Integer( rval )
|
912
|
+
end
|
913
|
+
end
|
914
|
+
|
915
|
+
return rval
|
916
|
+
end
|
917
|
+
|
918
|
+
|
919
|
+
### Statistics wrapper: increment the execution count and processor times
|
920
|
+
### for the given operation +type+ for the specified +server+.
|
921
|
+
def add_stat( type )
|
922
|
+
raise LocalJumpError, "no block given" unless block_given?
|
923
|
+
|
924
|
+
# Time the block
|
925
|
+
starttime = Process::times
|
926
|
+
res = yield
|
927
|
+
endtime = Process::times
|
928
|
+
|
929
|
+
# Add time/call stats callback
|
930
|
+
@stats[type][:count] += 1
|
931
|
+
@stats[type][:utime] += endtime.utime - starttime.utime
|
932
|
+
@stats[type][:stime] += endtime.stime - starttime.stime
|
933
|
+
@stats_callback.call( type, starttime, endtime ) if @stats_callback
|
934
|
+
|
935
|
+
return res
|
936
|
+
end
|
937
|
+
|
938
|
+
|
939
|
+
### Write a message (formed +sprintf+-style with +fmt+ and +args+) to the
|
940
|
+
### debugging callback in @debug, to $stderr if @debug doesn't appear to be
|
941
|
+
### a callable object but is still +true+. If @debug is +nil+ or +false+, do
|
942
|
+
### nothing.
|
943
|
+
def debug_msg( fmt, *args )
|
944
|
+
return unless @debug
|
945
|
+
|
946
|
+
if @debug.respond_to?( :call )
|
947
|
+
@debug.call( fmt % args )
|
948
|
+
elsif @debug.respond_to?( :<< )
|
949
|
+
@debug << "#{fmt}\n" % args
|
950
|
+
else
|
951
|
+
$stderr.puts( fmt % args )
|
952
|
+
end
|
953
|
+
end
|
954
|
+
|
955
|
+
|
956
|
+
### Create a key for the cache from any object. Strings are used as-is,
|
957
|
+
### Symbols are stringified, and other values use their #hash method.
|
958
|
+
def make_cache_key( key )
|
959
|
+
ck = @namespace ? "#@namespace:" : ""
|
960
|
+
|
961
|
+
case key
|
962
|
+
when String, Symbol
|
963
|
+
ck += key.to_s
|
964
|
+
else
|
965
|
+
ck += "%s" % key.hash
|
966
|
+
end
|
967
|
+
|
968
|
+
ck = URI::escape( ck ) unless !@urlencode
|
969
|
+
|
970
|
+
self.debug_msg( "Cache key for %p: %p", key, ck )
|
971
|
+
return ck
|
972
|
+
end
|
973
|
+
|
974
|
+
|
975
|
+
### Socket IO Methods
|
976
|
+
|
977
|
+
### Given +pairs+ of MemCache::Server objects and Strings or Arrays of
|
978
|
+
### commands for each server, do multiplexed IO between all of them, reading
|
979
|
+
### single-line responses.
|
980
|
+
def send( pairs, multiline=false )
|
981
|
+
raise TypeError, "type mismatch: #{pairs.class.name} given" unless
|
982
|
+
pairs.is_a?( Hash )
|
983
|
+
buffers = {}
|
984
|
+
rval = {}
|
985
|
+
|
986
|
+
# Fetch the Method object for the IO handler
|
987
|
+
handler = self.method( :handle_line_io )
|
988
|
+
|
989
|
+
# Set up the buffers and reactor for the exchange
|
990
|
+
pairs.each do |server,cmds|
|
991
|
+
unless server.alive?
|
992
|
+
rval[server] = nil
|
993
|
+
pairs.delete( server )
|
994
|
+
next
|
995
|
+
end
|
996
|
+
|
997
|
+
# Handle either Arrayish or Stringish commandsets
|
998
|
+
wbuf = cmds.respond_to?( :join ) ? cmds.join( CRLF ) : cmds.to_s
|
999
|
+
self.debug_msg( "Created command %p for %p", wbuf, server )
|
1000
|
+
wbuf += CRLF
|
1001
|
+
|
1002
|
+
# Make a buffer tuple (read/write) for the server
|
1003
|
+
buffers[server] = { :rbuf => '', :wbuf => wbuf }
|
1004
|
+
|
1005
|
+
# Register the server's socket with the reactor
|
1006
|
+
@reactor.register( server.socket, :write, :read, server,
|
1007
|
+
buffers[server], multiline, &handler )
|
1008
|
+
end
|
1009
|
+
|
1010
|
+
# Do all the IO at once
|
1011
|
+
self.debug_msg( "Reactor starting for %d IOs", @reactor.handles.length )
|
1012
|
+
@reactor.poll until @reactor.empty?
|
1013
|
+
self.debug_msg( "Reactor finished." )
|
1014
|
+
|
1015
|
+
# Build the return value, delegating the processing to a block if one
|
1016
|
+
# was given.
|
1017
|
+
pairs.each {|server,cmds|
|
1018
|
+
|
1019
|
+
# Handle protocol errors if they happen. I have no idea if this is
|
1020
|
+
# desirable/correct behavior: none of the other clients react to
|
1021
|
+
# CLIENT_ERROR or SERVER_ERROR at all; in fact, I think they'd all
|
1022
|
+
# hang on one like this one did before I added them to the
|
1023
|
+
# terminator pattern in #handle_line_io. So this may change in the
|
1024
|
+
# future if it ends up being better to just ignore errors, try to
|
1025
|
+
# cache/fetch what we can, and hope returning nil will suffice in
|
1026
|
+
# the face of error conditions
|
1027
|
+
self.handle_protocol_error( buffers[server][:rbuf], server ) if
|
1028
|
+
ANY_ERROR.match( buffers[server][:rbuf] )
|
1029
|
+
|
1030
|
+
# If the caller is doing processing on the reply, yield each buffer
|
1031
|
+
# in turn. Otherwise, just use the raw buffer as the return value
|
1032
|
+
if block_given?
|
1033
|
+
self.debug_msg( "Yielding value/s %p for %p",
|
1034
|
+
buffers[server][:rbuf], server )
|
1035
|
+
rval[server] = yield( server, buffers[server][:rbuf] )
|
1036
|
+
else
|
1037
|
+
rval[server] = buffers[server][:rbuf]
|
1038
|
+
end
|
1039
|
+
}
|
1040
|
+
|
1041
|
+
return rval
|
1042
|
+
end
|
1043
|
+
|
1044
|
+
|
1045
|
+
### Handle an IO event +ev+ on the given +sock+ for the specified +server+,
|
1046
|
+
### expecting single-line syntax (i.e., ends with CRLF).
|
1047
|
+
def handle_line_io( sock, ev, server, buffers, multiline=false )
|
1048
|
+
self.debug_msg( "Line IO (ml=%p) event for %p: %s: %p - %p",
|
1049
|
+
multiline, sock, ev, server, buffers )
|
1050
|
+
|
1051
|
+
# Set the terminator pattern based on whether multiline is turned on or
|
1052
|
+
# not.
|
1053
|
+
terminator = multiline ? MULTILINE_TERMINATOR : LINE_TERMINATOR
|
1054
|
+
|
1055
|
+
# Handle the event
|
1056
|
+
case ev
|
1057
|
+
when :read
|
1058
|
+
len = buffers[:rbuf].length
|
1059
|
+
buffers[:rbuf] << sock.sysread( 256 )
|
1060
|
+
self.debug_msg "Read %d bytes." % [ buffers[:rbuf].length - len ]
|
1061
|
+
|
1062
|
+
# If we've read the reply, then we're done with this socket
|
1063
|
+
# completely.
|
1064
|
+
if terminator.match( buffers[:rbuf] )
|
1065
|
+
self.debug_msg "Done with read for %p: %p", sock, buffers[:rbuf]
|
1066
|
+
@reactor.remove( sock )
|
1067
|
+
end
|
1068
|
+
|
1069
|
+
when :write
|
1070
|
+
res = sock.send( buffers[:wbuf], SendFlags )
|
1071
|
+
self.debug_msg( "Wrote %d bytes.", res )
|
1072
|
+
buffers[:wbuf].slice!( 0, res ) unless res.zero?
|
1073
|
+
|
1074
|
+
# If the write buffer's done, then we don't care about writability
|
1075
|
+
# anymore, so clear that event.
|
1076
|
+
if buffers[:wbuf].empty?
|
1077
|
+
self.debug_msg "Done with write for %p" % sock
|
1078
|
+
@reactor.disableEvents( sock, :write )
|
1079
|
+
end
|
1080
|
+
|
1081
|
+
when :err
|
1082
|
+
so_error = sock.getsockopt( SOL_SOCKET, SO_ERROR )
|
1083
|
+
self.debug_msg "Socket error on %p: %s" % [ sock, so_error ]
|
1084
|
+
@reactor.remove( sock )
|
1085
|
+
server.mark_dead( so_error )
|
1086
|
+
|
1087
|
+
else
|
1088
|
+
raise ArgumentError, "Unhandled reactor event type: #{ev}"
|
1089
|
+
end
|
1090
|
+
rescue EOFError, IOError => err
|
1091
|
+
@reactor.remove( sock )
|
1092
|
+
server.mark_dead( err.message )
|
1093
|
+
end
|
1094
|
+
|
1095
|
+
|
1096
|
+
### Handle error messages defined in the memcached protocol. The +buffer+
|
1097
|
+
### argument will be parsed for the error type, and, if appropriate, the
|
1098
|
+
### error message. The +server+ argument is only used in the case of
|
1099
|
+
### +SERVER_ERROR+, in which case the raised exception will contain that
|
1100
|
+
### object. The +depth+ argument is used to specify the call depth from
|
1101
|
+
### which the exception's stacktrace should be gathered.
|
1102
|
+
def handle_protocol_error( buffer, server, depth=4 )
|
1103
|
+
case buffer
|
1104
|
+
when CLIENT_ERROR
|
1105
|
+
raise ClientError, $1, caller(depth)
|
1106
|
+
|
1107
|
+
when SERVER_ERROR
|
1108
|
+
raise ServerError::new( server ), $1, caller(depth)
|
1109
|
+
|
1110
|
+
else
|
1111
|
+
raise InternalError, "Unknown internal error", caller(depth)
|
1112
|
+
end
|
1113
|
+
end
|
1114
|
+
|
1115
|
+
|
1116
|
+
|
1117
|
+
#####################################################################
|
1118
|
+
### I N T E R I O R C L A S S E S
|
1119
|
+
#####################################################################
|
1120
|
+
|
1121
|
+
### A Multiton datatype to represent a potential memcached server
|
1122
|
+
### connection.
|
1123
|
+
class Server
|
1124
|
+
|
1125
|
+
# Default timeout for connections to memcached servers.
|
1126
|
+
ConnectTimeout = 0.25
|
1127
|
+
|
1128
|
+
|
1129
|
+
#############################################################
|
1130
|
+
### I N S T A N C E M E T H O D S
|
1131
|
+
#############################################################
|
1132
|
+
|
1133
|
+
### Create a new MemCache::Server object for the memcached instance
|
1134
|
+
### listening on the given +host+ and +port+, weighted with the given
|
1135
|
+
### +weight+.
|
1136
|
+
def initialize( host, port=11211, weight=DefaultServerWeight )
|
1137
|
+
if host.nil? || host.empty?
|
1138
|
+
raise ArgumentError, "Illegal host %p" % host
|
1139
|
+
elsif port.nil? || port.to_i.zero?
|
1140
|
+
raise ArgumentError, "Illegal port %p" % port
|
1141
|
+
end
|
1142
|
+
|
1143
|
+
@host = host
|
1144
|
+
@port = port
|
1145
|
+
@weight = weight
|
1146
|
+
|
1147
|
+
@sock = nil
|
1148
|
+
@retry = nil
|
1149
|
+
@status = "not yet connected"
|
1150
|
+
end
|
1151
|
+
|
1152
|
+
|
1153
|
+
######
|
1154
|
+
public
|
1155
|
+
######
|
1156
|
+
|
1157
|
+
# The host the memcached server is running on
|
1158
|
+
attr_reader :host
|
1159
|
+
|
1160
|
+
# The port the memcached is listening on
|
1161
|
+
attr_reader :port
|
1162
|
+
|
1163
|
+
# The weight given to the server
|
1164
|
+
attr_reader :weight
|
1165
|
+
|
1166
|
+
# The Time of next connection retry if the object is dead.
|
1167
|
+
attr_reader :retry
|
1168
|
+
|
1169
|
+
# A text status string describing the state of the server.
|
1170
|
+
attr_reader :status
|
1171
|
+
|
1172
|
+
|
1173
|
+
### Return a string representation of the server object.
|
1174
|
+
def inspect
|
1175
|
+
return "<MemCache::Server: %s:%d [%d] (%s)>" % [
|
1176
|
+
@host,
|
1177
|
+
@port,
|
1178
|
+
@weight,
|
1179
|
+
@status,
|
1180
|
+
]
|
1181
|
+
end
|
1182
|
+
|
1183
|
+
|
1184
|
+
### Test the server for aliveness, returning +true+ if the object was
|
1185
|
+
### able to connect. This will cause the socket connection to be opened
|
1186
|
+
### if it isn't already.
|
1187
|
+
def alive?
|
1188
|
+
return !self.socket.nil?
|
1189
|
+
end
|
1190
|
+
|
1191
|
+
|
1192
|
+
### Try to connect to the memcached targeted by this object. Returns the
|
1193
|
+
### connected socket object on success; sets @dead and returns +nil+ on
|
1194
|
+
### any failure.
|
1195
|
+
def socket
|
1196
|
+
|
1197
|
+
# Connect if not already connected
|
1198
|
+
unless @sock || (!@sock.nil? && @sock.closed?)
|
1199
|
+
|
1200
|
+
# If the host was dead, don't retry for a while
|
1201
|
+
if @retry
|
1202
|
+
return nil if @retry > Time::now
|
1203
|
+
end
|
1204
|
+
|
1205
|
+
# Attempt to connect,
|
1206
|
+
begin
|
1207
|
+
@sock = timeout( ConnectTimeout ) {
|
1208
|
+
TCPSocket::new( @host, @port )
|
1209
|
+
}
|
1210
|
+
@status = "connected"
|
1211
|
+
rescue SystemCallError, IOError, TimeoutError => err
|
1212
|
+
self.mark_dead( err.message )
|
1213
|
+
end
|
1214
|
+
end
|
1215
|
+
|
1216
|
+
return @sock
|
1217
|
+
end
|
1218
|
+
|
1219
|
+
|
1220
|
+
### Mark the server as dead for 30 seconds and close its socket. The
|
1221
|
+
### specified +reason+ will be used to construct an appropriate status
|
1222
|
+
### message.
|
1223
|
+
def mark_dead( reason="Unknown error" )
|
1224
|
+
@sock.close if @sock && !@sock.closed?
|
1225
|
+
@sock = nil
|
1226
|
+
@retry = Time::now + ( 30 + rand(10) )
|
1227
|
+
@status = "DEAD: %s: Will retry at %s" %
|
1228
|
+
[ reason, @retry ]
|
1229
|
+
end
|
1230
|
+
|
1231
|
+
|
1232
|
+
end # class Server
|
1233
|
+
|
1234
|
+
|
1235
|
+
#################################################################
|
1236
|
+
### E X C E P T I O N C L A S S E S
|
1237
|
+
#################################################################
|
1238
|
+
|
1239
|
+
### Base MemCache exception class
|
1240
|
+
class MemCacheError < ::Exception
|
1241
|
+
end
|
1242
|
+
|
1243
|
+
### MemCache internal error class -- instances of this class mean that there
|
1244
|
+
### is some internal error either in the memcache client lib or the
|
1245
|
+
### memcached server it's talking to.
|
1246
|
+
class InternalError < MemCacheError
|
1247
|
+
end
|
1248
|
+
|
1249
|
+
### MemCache client error class -- this is raised if a "CLIENT_ERROR
|
1250
|
+
### <error>\r\n" is seen in the dialog with a server.
|
1251
|
+
class ClientError < InternalError
|
1252
|
+
end
|
1253
|
+
|
1254
|
+
### MemCache server error class -- this is raised if a "SERVER_ERROR
|
1255
|
+
### <error>\r\n" is seen in the dialog with a server.
|
1256
|
+
class ServerError < InternalError
|
1257
|
+
def initialize( svr )
|
1258
|
+
@server = svr
|
1259
|
+
end
|
1260
|
+
|
1261
|
+
attr_reader :server
|
1262
|
+
end
|
1263
|
+
|
1264
|
+
|
1265
|
+
end # class memcache
|
1266
|
+
|