ninjudd-memcache 0.9.0 → 0.9.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/VERSION +1 -1
- data/lib/memcache.rb +31 -785
- data/lib/memcache/local_server.rb +12 -12
- data/lib/memcache/pg_server.rb +5 -1
- data/memcache.gemspec +66 -0
- data/test/memcache_pg_server_test.rb +1 -1
- data/test/memcache_test.rb +3 -3
- data/test/test_helper.rb +2 -0
- metadata +3 -11
- data/lib/memcache_extended.rb +0 -120
- data/lib/memcache_mock.rb +0 -137
- data/lib/memcache_util.rb +0 -90
- data/test/test_mem_cache.rb +0 -739
- data/test/test_memcache_extended.rb +0 -44
- data/test/test_memcache_mock.rb +0 -94
data/VERSION
CHANGED
@@ -1 +1 @@
|
|
1
|
-
0.9.
|
1
|
+
0.9.1
|
data/lib/memcache.rb
CHANGED
@@ -1,136 +1,19 @@
|
|
1
|
-
$TESTING = defined?($TESTING) && $TESTING
|
2
|
-
|
3
|
-
require 'socket'
|
4
|
-
require 'thread'
|
5
|
-
require 'timeout'
|
6
|
-
require 'rubygems'
|
7
|
-
require File.dirname(__FILE__) + '/memcache_mock'
|
8
1
|
require 'zlib'
|
9
2
|
|
10
|
-
<<<<<<< HEAD
|
11
|
-
##
|
12
|
-
# A Ruby client library for memcached.
|
13
|
-
#
|
14
|
-
# This is intended to provide access to basic memcached functionality. It
|
15
|
-
# does not attempt to be complete implementation of the entire API, but it is
|
16
|
-
# approaching a complete implementation.
|
17
|
-
=======
|
18
3
|
$:.unshift(File.dirname(__FILE__))
|
19
4
|
require 'memcache/server'
|
20
5
|
require 'memcache/local_server'
|
21
6
|
require 'memcache/segmented_server'
|
22
|
-
>>>>>>> refactor
|
23
7
|
|
24
|
-
class
|
25
|
-
|
26
|
-
# The version of MemCache you are using.
|
8
|
+
class Memcache
|
9
|
+
VERSION = '0.9.0'
|
27
10
|
|
28
|
-
<<<<<<< HEAD
|
29
|
-
VERSION = '1.5.0.3' unless defined? VERSION
|
30
|
-
=======
|
31
11
|
DEFAULT_EXPIRY = 0
|
32
12
|
LOCK_TIMEOUT = 5
|
33
13
|
WRITE_LOCK_WAIT = 0.001
|
34
|
-
>>>>>>> refactor
|
35
|
-
|
36
|
-
##
|
37
|
-
# Default options for the cache object.
|
38
|
-
|
39
|
-
<<<<<<< HEAD
|
40
|
-
DEFAULT_OPTIONS = {
|
41
|
-
:namespace => nil,
|
42
|
-
:readonly => false,
|
43
|
-
:multithread => false,
|
44
|
-
} unless defined? DEFAULT_OPTIONS
|
45
|
-
|
46
|
-
##
|
47
|
-
# Default memcached port.
|
48
|
-
|
49
|
-
DEFAULT_PORT = 11211 unless defined? DEFAULT_PORT
|
50
|
-
|
51
|
-
##
|
52
|
-
# Default memcached server weight.
|
53
|
-
|
54
|
-
DEFAULT_WEIGHT = 1 unless defined? DEFAULT_WEIGHT
|
55
|
-
|
56
|
-
##
|
57
|
-
# Default number of servers to try connecting to.
|
58
|
-
|
59
|
-
DEFAULT_FALLBACK = 20 unless defined? DEFAULT_FALLBACK
|
60
|
-
|
61
|
-
##
|
62
|
-
# Default expiry if none is specified.
|
63
|
-
|
64
|
-
DEFAULT_EXPIRY = 0 unless defined? DEFAULT_EXPIRY
|
65
|
-
|
66
|
-
##
|
67
|
-
# The amount of time to wait for a response from a memcached server. If a
|
68
|
-
# response is not completed within this time, the connection to the server
|
69
|
-
# will be closed and an error will be raised.
|
70
|
-
|
71
|
-
attr_accessor :request_timeout
|
72
14
|
|
73
|
-
|
74
|
-
# The multithread setting for this instance
|
15
|
+
attr_reader :default_expiry, :default_namespace, :servers
|
75
16
|
|
76
|
-
attr_reader :multithread
|
77
|
-
|
78
|
-
##
|
79
|
-
# The servers this client talks to. Play at your own peril.
|
80
|
-
|
81
|
-
attr_reader :servers
|
82
|
-
|
83
|
-
##
|
84
|
-
# Number of servers to try connecting to.
|
85
|
-
attr_reader :fallback
|
86
|
-
|
87
|
-
##
|
88
|
-
# Default expiry if none is specified.
|
89
|
-
attr_reader :default_expiry
|
90
|
-
|
91
|
-
##
|
92
|
-
# Accepts a list of +servers+ and a list of +opts+. +servers+ may be
|
93
|
-
# omitted. See +servers=+ for acceptable server list arguments.
|
94
|
-
#
|
95
|
-
# Valid options for +opts+ are:
|
96
|
-
#
|
97
|
-
# [:namespace] Prepends this value to all keys added or retrieved.
|
98
|
-
# [:readonly] Raises an exeception on cache writes when true.
|
99
|
-
# [:multithread] Wraps cache access in a Mutex for thread safety.
|
100
|
-
# [:fallback] Number of servers to try before failing.
|
101
|
-
# [:default_expiry] Default expiry if none is specified.
|
102
|
-
# Other options are ignored.
|
103
|
-
|
104
|
-
def initialize(*args)
|
105
|
-
servers = []
|
106
|
-
opts = {}
|
107
|
-
|
108
|
-
case args.length
|
109
|
-
when 0 then # NOP
|
110
|
-
when 1 then
|
111
|
-
arg = args.shift
|
112
|
-
case arg
|
113
|
-
when Hash then opts = arg
|
114
|
-
when Array then servers = arg
|
115
|
-
when String then servers = [arg]
|
116
|
-
else raise ArgumentError, 'first argument must be Array, Hash or String'
|
117
|
-
end
|
118
|
-
when 2 then
|
119
|
-
servers, opts = args
|
120
|
-
else
|
121
|
-
raise ArgumentError, "wrong number of arguments (#{args.length} for 2)"
|
122
|
-
end
|
123
|
-
|
124
|
-
opts = DEFAULT_OPTIONS.merge opts
|
125
|
-
@namespace = opts[:namespace]
|
126
|
-
@readonly = opts[:readonly]
|
127
|
-
@multithread = opts[:multithread]
|
128
|
-
@fallback = opts[:fallback] || DEFAULT_FALLBACK
|
129
|
-
@default_expiry = opts[:default_expiry].to_i || DEFAULT_EXPIRY
|
130
|
-
@mutex = Mutex.new if @multithread
|
131
|
-
@buckets = []
|
132
|
-
self.servers = servers
|
133
|
-
=======
|
134
17
|
def initialize(opts)
|
135
18
|
@readonly = opts[:readonly]
|
136
19
|
@default_expiry = opts[:default_expiry] || DEFAULT_EXPIRY
|
@@ -154,81 +37,31 @@ class MemCache
|
|
154
37
|
|
155
38
|
def inspect
|
156
39
|
"<Memcache: %d servers, ns: %p, ro: %p>" % [@servers.length, namespace, @readonly]
|
157
|
-
>>>>>>> refactor
|
158
40
|
end
|
159
41
|
|
160
|
-
##
|
161
|
-
# Returns the namespace for the current thread.
|
162
|
-
|
163
42
|
def namespace
|
164
|
-
<<<<<<< HEAD
|
165
|
-
Thread.current[:memcache_namespace] || @namespace
|
166
|
-
=======
|
167
43
|
@namespace || default_namespace
|
168
|
-
>>>>>>> refactor
|
169
44
|
end
|
170
45
|
|
171
|
-
##
|
172
|
-
# Set the namespace for the current thread.
|
173
|
-
|
174
46
|
def namespace=(namespace)
|
175
|
-
<<<<<<< HEAD
|
176
|
-
if namespace == @namespace
|
177
|
-
Thread.current[:memcache_namespace] = nil
|
178
|
-
=======
|
179
47
|
if default_namespace == namespace
|
180
48
|
@namespace = nil
|
181
|
-
>>>>>>> refactor
|
182
49
|
else
|
183
50
|
@namespace = namespace
|
184
51
|
end
|
185
52
|
end
|
186
53
|
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
##
|
197
|
-
# Returns whether there is at least one active server for the object.
|
198
|
-
|
199
|
-
def active?
|
200
|
-
not @servers.empty?
|
201
|
-
end
|
202
|
-
|
203
|
-
##
|
204
|
-
# Returns whether or not the cache object was created read only.
|
205
|
-
|
206
|
-
def readonly?
|
207
|
-
@readonly
|
54
|
+
def in_namespace(namespace)
|
55
|
+
# Temporarily change the namespace for convenience.
|
56
|
+
begin
|
57
|
+
old_namespace = self.namespace
|
58
|
+
self.namespace = "#{old_namespace}#{namespace}"
|
59
|
+
yield
|
60
|
+
ensure
|
61
|
+
self.namespace = old_namespace
|
62
|
+
end
|
208
63
|
end
|
209
64
|
|
210
|
-
##
|
211
|
-
# Set the servers that the requests will be distributed between. Entries
|
212
|
-
# can be either strings of the form "hostname:port" or
|
213
|
-
# "hostname:port:weight" or MemCache::Server objects.
|
214
|
-
|
215
|
-
def servers=(servers)
|
216
|
-
# Create the server objects.
|
217
|
-
@servers = servers.collect do |server|
|
218
|
-
case server
|
219
|
-
when String
|
220
|
-
host, port, weight = server.split ':', 3
|
221
|
-
port ||= DEFAULT_PORT
|
222
|
-
weight ||= DEFAULT_WEIGHT
|
223
|
-
Server.new self, host, port, weight
|
224
|
-
when Server
|
225
|
-
if server.memcache.multithread != @multithread then
|
226
|
-
raise ArgumentError, "can't mix threaded and non-threaded servers"
|
227
|
-
end
|
228
|
-
server
|
229
|
-
else
|
230
|
-
raise TypeError, "cannot convert #{server.class} into MemCache::Server"
|
231
|
-
=======
|
232
65
|
def get(keys, opts = {})
|
233
66
|
raise 'opts must be hash' unless opts.kind_of?(Hash)
|
234
67
|
|
@@ -242,89 +75,9 @@ class MemCache
|
|
242
75
|
server(key).cas(key, value, value.memcache_cas, opts[:expiry]) if value
|
243
76
|
else
|
244
77
|
value = server(key).get(key, opts[:cas])
|
245
|
-
>>>>>>> refactor
|
246
78
|
end
|
247
79
|
unmarshal(value, opts)
|
248
80
|
end
|
249
|
-
<<<<<<< HEAD
|
250
|
-
|
251
|
-
# Create an array of server buckets for weight selection of servers.
|
252
|
-
@buckets = []
|
253
|
-
@servers.each do |server|
|
254
|
-
server.weight.times { @buckets.push(server) }
|
255
|
-
end
|
256
|
-
end
|
257
|
-
|
258
|
-
##
|
259
|
-
# Decrements the value for +key+ by +amount+ and returns the new value.
|
260
|
-
# +key+ must already exist. If +key+ is not an integer, it is assumed to be
|
261
|
-
# 0. +key+ can not be decremented below 0.
|
262
|
-
|
263
|
-
def decr(key, amount = 1)
|
264
|
-
raise MemCacheError, "Update of readonly cache" if @readonly
|
265
|
-
with_server(key) do |server, cache_key|
|
266
|
-
cache_decr server, cache_key, amount
|
267
|
-
end
|
268
|
-
rescue TypeError => err
|
269
|
-
handle_error nil, err
|
270
|
-
end
|
271
|
-
|
272
|
-
##
|
273
|
-
# Retrieves +key+ from memcache. If +raw+ is false, the value will be
|
274
|
-
# unmarshalled.
|
275
|
-
|
276
|
-
def get(key, raw = false)
|
277
|
-
with_server(key) do |server, cache_key|
|
278
|
-
value = cache_get server, cache_key
|
279
|
-
return nil if value.nil?
|
280
|
-
value = Marshal.load(value) unless raw
|
281
|
-
return value
|
282
|
-
end
|
283
|
-
rescue TypeError => err
|
284
|
-
handle_error nil, err
|
285
|
-
end
|
286
|
-
|
287
|
-
##
|
288
|
-
# Retrieves multiple values from memcached in parallel, if possible.
|
289
|
-
#
|
290
|
-
# The memcached protocol supports the ability to retrieve multiple
|
291
|
-
# keys in a single request. Pass in an array of keys to this method
|
292
|
-
# and it will:
|
293
|
-
#
|
294
|
-
# 1. map the key to the appropriate memcached server
|
295
|
-
# 2. send a single request to each server that has one or more key values
|
296
|
-
#
|
297
|
-
# Returns a hash of values.
|
298
|
-
#
|
299
|
-
# cache["a"] = 1
|
300
|
-
# cache["b"] = 2
|
301
|
-
# cache.get_multi "a", "b" # => { "a" => 1, "b" => 2 }
|
302
|
-
|
303
|
-
def get_multi(*keys)
|
304
|
-
raise MemCacheError, 'No active servers' unless active?
|
305
|
-
|
306
|
-
opts = keys.last.kind_of?(Hash) ? keys.pop : {}
|
307
|
-
|
308
|
-
keys.flatten!
|
309
|
-
key_count = keys.length
|
310
|
-
cache_keys = {}
|
311
|
-
server_keys = Hash.new { |h,k| h[k] = [] }
|
312
|
-
|
313
|
-
# map keys to servers
|
314
|
-
keys.each do |key|
|
315
|
-
server, cache_key = request_setup key
|
316
|
-
cache_keys[cache_key] = key
|
317
|
-
server_keys[server] << cache_key
|
318
|
-
end
|
319
|
-
|
320
|
-
results = {}
|
321
|
-
|
322
|
-
server_keys.each do |server, keys|
|
323
|
-
keys = keys.join ' '
|
324
|
-
values = cache_get_multi server, keys
|
325
|
-
values.each do |key, value|
|
326
|
-
results[cache_keys[key]] = opts[:raw] ? value : Marshal.load(value)
|
327
|
-
=======
|
328
81
|
end
|
329
82
|
|
330
83
|
def read(keys, opts = {})
|
@@ -449,73 +202,11 @@ class MemCache
|
|
449
202
|
yield(keys_to_fetch).each do |key, value|
|
450
203
|
self.send(method, key, value, opts) unless opts[:disable] or opts[:disable_write]
|
451
204
|
records[key] = value
|
452
|
-
>>>>>>> refactor
|
453
205
|
end
|
454
206
|
end
|
455
|
-
|
456
|
-
return results
|
457
|
-
rescue TypeError => err
|
458
|
-
handle_error nil, err
|
459
|
-
end
|
460
|
-
|
461
|
-
<<<<<<< HEAD
|
462
|
-
##
|
463
|
-
# Increments the value for +key+ by +amount+ and returns the new value.
|
464
|
-
# +key+ must already exist. If +key+ is not an integer, it is assumed to be
|
465
|
-
# 0.
|
466
|
-
|
467
|
-
def incr(key, amount = 1)
|
468
|
-
raise MemCacheError, "Update of readonly cache" if @readonly
|
469
|
-
with_server(key) do |server, cache_key|
|
470
|
-
cache_incr server, cache_key, amount
|
471
|
-
end
|
472
|
-
rescue TypeError => err
|
473
|
-
handle_error nil, err
|
207
|
+
records
|
474
208
|
end
|
475
|
-
|
476
|
-
##
|
477
|
-
# Add +key+ to the cache with value +value+ that expires in +expiry+
|
478
|
-
# seconds. If +raw+ is true, +value+ will not be Marshalled.
|
479
|
-
#
|
480
|
-
# Warning: Readers should not call this method in the event of a cache miss;
|
481
|
-
# see MemCache#add.
|
482
|
-
|
483
|
-
def set(key, value, expiry = default_expiry, raw = false)
|
484
|
-
raise MemCacheError, "Update of readonly cache" if @readonly
|
485
|
-
with_server(key) do |server, cache_key|
|
486
|
-
value = Marshal.dump value unless raw
|
487
|
-
cache_store(:set, cache_key, value, expiry, server)
|
488
|
-
end
|
489
|
-
end
|
490
|
-
|
491
|
-
##
|
492
|
-
# Add +key+ to the cache with value +value+ that expires in +expiry+
|
493
|
-
# seconds, but only if +key+ does not already exist in the cache.
|
494
|
-
# If +raw+ is true, +value+ will not be Marshalled.
|
495
|
-
#
|
496
|
-
# Readers should call this method in the event of a cache miss, not
|
497
|
-
# MemCache#set or MemCache#[]=.
|
498
|
-
|
499
|
-
def add(key, value, expiry = default_expiry, raw = false)
|
500
|
-
raise MemCacheError, "Update of readonly cache" if @readonly
|
501
|
-
with_server(key) do |server, cache_key|
|
502
|
-
value = Marshal.dump value unless raw
|
503
|
-
cache_store(:add, cache_key, value, expiry, server)
|
504
|
-
end
|
505
|
-
end
|
506
|
-
|
507
|
-
##
|
508
|
-
# Removes +key+ from the cache in +expiry+ seconds.
|
509
209
|
|
510
|
-
def delete(key, expiry = default_expiry)
|
511
|
-
raise MemCacheError, "Update of readonly cache" if @readonly
|
512
|
-
server, cache_key = request_setup key
|
513
|
-
|
514
|
-
with_socket_management(server) do |socket|
|
515
|
-
socket.write "delete #{cache_key} #{expiry}\r\n"
|
516
|
-
socket.gets
|
517
|
-
end
|
518
|
-
=======
|
519
210
|
def lock(key, opts = {})
|
520
211
|
# Returns false if the lock already exists.
|
521
212
|
expiry = opts[:expiry] || LOCK_TIMEOUT
|
@@ -546,133 +237,39 @@ class MemCache
|
|
546
237
|
def delete(key)
|
547
238
|
key = cache_key(key)
|
548
239
|
server(key).delete(key)
|
549
|
-
>>>>>>> refactor
|
550
240
|
end
|
551
241
|
|
552
|
-
|
553
|
-
|
242
|
+
def flush_all(opts = {})
|
243
|
+
delay = opts[:delay].to_i
|
244
|
+
interval = opts[:interval].to_i
|
554
245
|
|
555
|
-
|
556
|
-
|
557
|
-
|
558
|
-
begin
|
559
|
-
@mutex.lock if @multithread
|
560
|
-
@servers.each do |server|
|
561
|
-
with_socket_management(server) do |socket|
|
562
|
-
socket.write "flush_all\r\n"
|
563
|
-
result = socket.gets
|
564
|
-
raise MemCacheError, $2.strip if result =~ /^(SERVER_)?ERROR(.*)/
|
565
|
-
end
|
566
|
-
end
|
567
|
-
ensure
|
568
|
-
@mutex.unlock if @multithread
|
246
|
+
servers.each do |server|
|
247
|
+
server.flush_all(delay)
|
248
|
+
delay += interval
|
569
249
|
end
|
570
250
|
end
|
571
251
|
|
572
|
-
##
|
573
|
-
# Reset the connection to all memcache servers. This should be called if
|
574
|
-
# there is a problem with a cache lookup that might have left the connection
|
575
|
-
# in a corrupted state.
|
576
|
-
|
577
252
|
def reset
|
578
|
-
<<<<<<< HEAD
|
579
|
-
@servers.each { |server| server.close }
|
580
|
-
=======
|
581
253
|
servers.each {|server| server.close}
|
582
|
-
>>>>>>> refactor
|
583
254
|
end
|
584
255
|
|
585
|
-
##
|
586
|
-
# Returns statistics for each memcached server. An explanation of the
|
587
|
-
# statistics can be found in the memcached docs:
|
588
|
-
#
|
589
|
-
# http://code.sixapart.com/svn/memcached/trunk/server/doc/protocol.txt
|
590
|
-
#
|
591
|
-
# Example:
|
592
|
-
#
|
593
|
-
# >> pp CACHE.stats
|
594
|
-
# {"localhost:11211"=>
|
595
|
-
# {"bytes"=>4718,
|
596
|
-
# "pid"=>20188,
|
597
|
-
# "connection_structures"=>4,
|
598
|
-
# "time"=>1162278121,
|
599
|
-
# "pointer_size"=>32,
|
600
|
-
# "limit_maxbytes"=>67108864,
|
601
|
-
# "cmd_get"=>14532,
|
602
|
-
# "version"=>"1.2.0",
|
603
|
-
# "bytes_written"=>432583,
|
604
|
-
# "cmd_set"=>32,
|
605
|
-
# "get_misses"=>0,
|
606
|
-
# "total_connections"=>19,
|
607
|
-
# "curr_connections"=>3,
|
608
|
-
# "curr_items"=>4,
|
609
|
-
# "uptime"=>1557,
|
610
|
-
# "get_hits"=>14532,
|
611
|
-
# "total_items"=>32,
|
612
|
-
# "rusage_system"=>0.313952,
|
613
|
-
# "rusage_user"=>0.119981,
|
614
|
-
# "bytes_read"=>190619}}
|
615
|
-
# => nil
|
616
|
-
|
617
256
|
def stats
|
618
|
-
|
619
|
-
|
620
|
-
|
621
|
-
@servers.each do |server|
|
622
|
-
next unless server.alive?
|
623
|
-
with_socket_management(server) do |socket|
|
624
|
-
value = nil # TODO: why is this line here?
|
625
|
-
socket.write "stats\r\n"
|
626
|
-
stats = {}
|
627
|
-
while line = socket.gets do
|
628
|
-
break if line == "END\r\n"
|
629
|
-
if line =~ /^STAT ([\w]+) ([\w\.\:]+)/ then
|
630
|
-
name, value = $1, $2
|
631
|
-
stats[name] = case name
|
632
|
-
when 'version'
|
633
|
-
value
|
634
|
-
when 'rusage_user', 'rusage_system' then
|
635
|
-
seconds, microseconds = value.split(/:/, 2)
|
636
|
-
microseconds ||= 0
|
637
|
-
Float(seconds) + (Float(microseconds) / 1_000_000)
|
638
|
-
else
|
639
|
-
if value =~ /^\d+$/ then
|
640
|
-
value.to_i
|
641
|
-
else
|
642
|
-
value
|
643
|
-
end
|
644
|
-
end
|
645
|
-
end
|
646
|
-
end
|
647
|
-
server_stats["#{server.host}:#{server.port}"] = stats
|
648
|
-
end
|
257
|
+
stats = {}
|
258
|
+
servers.each do |server|
|
259
|
+
stats[server.name] = server.stats
|
649
260
|
end
|
650
|
-
|
651
|
-
server_stats
|
261
|
+
stats
|
652
262
|
end
|
653
263
|
|
654
|
-
|
655
|
-
# Shortcut to get a value from the cache.
|
656
|
-
|
264
|
+
alias clear flush_all
|
657
265
|
alias [] get
|
658
266
|
|
659
|
-
##
|
660
|
-
# Shortcut to save a value in the cache. This method does not set an
|
661
|
-
# expiration on the entry. Use set to specify an explicit expiry.
|
662
|
-
|
663
267
|
def []=(key, value)
|
664
268
|
set(key, value)
|
665
269
|
end
|
666
270
|
|
667
|
-
|
668
|
-
|
669
|
-
<<<<<<< HEAD
|
670
|
-
##
|
671
|
-
# Create a key for the cache, incorporating the namespace qualifier if
|
672
|
-
# requested.
|
271
|
+
protected
|
673
272
|
|
674
|
-
def make_cache_key(key)
|
675
|
-
=======
|
676
273
|
def multi_get(keys, opts = {})
|
677
274
|
return {} if keys.empty?
|
678
275
|
|
@@ -699,7 +296,6 @@ class MemCache
|
|
699
296
|
end
|
700
297
|
|
701
298
|
def cache_key(key)
|
702
|
-
>>>>>>> refactor
|
703
299
|
safe_key = key ? key.to_s.gsub(/%/, '%%').gsub(/ /, '%s') : key
|
704
300
|
if namespace.nil? then
|
705
301
|
safe_key
|
@@ -723,357 +319,15 @@ class MemCache
|
|
723
319
|
nil
|
724
320
|
end
|
725
321
|
|
726
|
-
|
727
|
-
# Pick a server to handle the request based on a hash of the key.
|
728
|
-
|
729
|
-
def get_server_for_key(key)
|
730
|
-
raise ArgumentError, "illegal character in key #{key.inspect}" if key =~ /\s/
|
322
|
+
def server(key)
|
731
323
|
raise ArgumentError, "key too long #{key.inspect}" if key.length > 250
|
732
|
-
|
733
|
-
return @servers.first if @servers.length == 1
|
734
|
-
|
735
|
-
hkey = hash_for key
|
736
|
-
|
737
|
-
fallback.times do |try|
|
738
|
-
server = @buckets[hkey % @buckets.nitems]
|
739
|
-
return server if server.alive?
|
740
|
-
hkey += hash_for "#{try}#{key}"
|
741
|
-
end
|
742
|
-
|
743
|
-
raise MemCacheError, "No servers available"
|
744
|
-
end
|
324
|
+
return servers.first if servers.length == 1
|
745
325
|
|
746
|
-
|
747
|
-
|
748
|
-
# sketchy for down servers).
|
749
|
-
|
750
|
-
def hash_for(key)
|
751
|
-
(Zlib.crc32(key) >> 16) & 0x7fff
|
752
|
-
end
|
753
|
-
|
754
|
-
##
|
755
|
-
# Performs a raw decr for +cache_key+ from +server+. Returns nil if not
|
756
|
-
# found.
|
757
|
-
|
758
|
-
def cache_decr(server, cache_key, amount)
|
759
|
-
with_socket_management(server) do |socket|
|
760
|
-
socket.write "decr #{cache_key} #{amount}\r\n"
|
761
|
-
text = socket.gets
|
762
|
-
return nil if text == "NOT_FOUND\r\n"
|
763
|
-
return text.to_i
|
764
|
-
end
|
326
|
+
n = Zlib.crc32(key) % servers.length
|
327
|
+
servers[n]
|
765
328
|
end
|
766
329
|
|
767
|
-
|
768
|
-
# Fetches the raw data for +cache_key+ from +server+. Returns nil on cache
|
769
|
-
# miss.
|
770
|
-
|
771
|
-
def cache_get(server, cache_key)
|
772
|
-
with_socket_management(server) do |socket|
|
773
|
-
socket.write "get #{cache_key}\r\n"
|
774
|
-
keyline = socket.gets # "VALUE <key> <flags> <bytes>\r\n"
|
775
|
-
|
776
|
-
if keyline.nil? then
|
777
|
-
server.close
|
778
|
-
raise MemCacheError, "lost connection to #{server.host}:#{server.port}" # TODO: retry here too
|
779
|
-
end
|
780
|
-
|
781
|
-
return nil if keyline == "END\r\n"
|
782
|
-
|
783
|
-
unless keyline =~ /(\d+)\r/ then
|
784
|
-
server.close
|
785
|
-
raise MemCacheError, "unexpected response #{keyline.inspect}"
|
786
|
-
end
|
787
|
-
value = socket.read $1.to_i
|
788
|
-
socket.read 2 # "\r\n"
|
789
|
-
socket.gets # "END\r\n"
|
790
|
-
return value
|
791
|
-
end
|
792
|
-
end
|
793
|
-
|
794
|
-
##
|
795
|
-
# Fetches +cache_keys+ from +server+ using a multi-get.
|
796
|
-
|
797
|
-
def cache_get_multi(server, cache_keys)
|
798
|
-
with_socket_management(server) do |socket|
|
799
|
-
values = {}
|
800
|
-
socket.write "get #{cache_keys}\r\n"
|
801
|
-
|
802
|
-
while keyline = socket.gets do
|
803
|
-
return values if keyline == "END\r\n"
|
804
|
-
|
805
|
-
unless keyline =~ /^VALUE (.+) (.+) (.+)/ then
|
806
|
-
server.close
|
807
|
-
raise MemCacheError, "unexpected response #{keyline.inspect}"
|
808
|
-
end
|
809
|
-
|
810
|
-
key, data_length = $1, $3
|
811
|
-
values[$1] = socket.read data_length.to_i
|
812
|
-
socket.read(2) # "\r\n"
|
813
|
-
end
|
814
|
-
|
815
|
-
server.close
|
816
|
-
raise MemCacheError, "lost connection to #{server.host}:#{server.port}" # TODO: retry here too
|
817
|
-
end
|
818
|
-
end
|
819
|
-
|
820
|
-
##
|
821
|
-
# Stores +value+ to +cache_keys+ on +server+ using +method+ (must be :set or :add).
|
822
|
-
|
823
|
-
def cache_store(method, cache_key, value, expiry, server)
|
824
|
-
raise MemCacheError, "unknown store method #{method}" unless [:set, :add].include?(method)
|
825
|
-
|
826
|
-
command = "#{method} #{cache_key} 0 #{expiry} #{value.to_s.size}\r\n#{value}\r\n"
|
827
|
-
|
828
|
-
with_socket_management(server) do |socket|
|
829
|
-
socket.write command
|
830
|
-
result = socket.gets
|
831
|
-
if result.nil?
|
832
|
-
server.close
|
833
|
-
raise MemCacheError, "lost connection to #{server.host}:#{server.port}"
|
834
|
-
end
|
835
|
-
|
836
|
-
if result =~ /^SERVER_ERROR (.*)/
|
837
|
-
server.close
|
838
|
-
raise MemCacheError, "%s:\n%s" % [$1.strip, Marshal.restore(value).inspect]
|
839
|
-
end
|
840
|
-
|
841
|
-
result
|
842
|
-
end
|
843
|
-
end
|
844
|
-
|
845
|
-
##
|
846
|
-
# Performs a raw incr for +cache_key+ from +server+. Returns nil if not
|
847
|
-
# found.
|
848
|
-
|
849
|
-
def cache_incr(server, cache_key, amount)
|
850
|
-
with_socket_management(server) do |socket|
|
851
|
-
socket.write "incr #{cache_key} #{amount}\r\n"
|
852
|
-
text = socket.gets
|
853
|
-
return nil if text == "NOT_FOUND\r\n"
|
854
|
-
return text.to_i
|
855
|
-
end
|
856
|
-
end
|
857
|
-
|
858
|
-
##
|
859
|
-
# Gets or creates a socket connected to the given server, and yields it
|
860
|
-
# to the block. If a socket error (SocketError, SystemCallError, IOError)
|
861
|
-
# or protocol error (MemCacheError) is raised by the block, closes the
|
862
|
-
# socket, attempts to connect again, and retries the block (once). If
|
863
|
-
# an error is again raised, reraises it as MemCacheError.
|
864
|
-
# If unable to connect to the server (or if in the reconnect wait period),
|
865
|
-
# raises MemCacheError - note that the socket connect code marks a server
|
866
|
-
# dead for a timeout period, so retrying does not apply to connection attempt
|
867
|
-
# failures (but does still apply to unexpectedly lost connections etc.).
|
868
|
-
# Wraps the whole lot in mutex synchronization if @multithread is true.
|
869
|
-
|
870
|
-
def with_socket_management(server, &block)
|
871
|
-
@mutex.lock if @multithread
|
872
|
-
retried = false
|
873
|
-
begin
|
874
|
-
socket = server.socket
|
875
|
-
# Raise an IndexError to show this server is out of whack.
|
876
|
-
# We'll catch it in higher-level code and attempt to restart the operation.
|
877
|
-
raise IndexError, "No connection to server (#{server.status})" if socket.nil?
|
878
|
-
|
879
|
-
block.call(socket)
|
880
|
-
rescue MemCacheError, SocketError, SystemCallError, IOError => err
|
881
|
-
handle_error(server, err) if retried || socket.nil?
|
882
|
-
retried = true
|
883
|
-
retry
|
884
|
-
rescue Exception => err
|
885
|
-
server.close
|
886
|
-
raise err
|
887
|
-
end
|
888
|
-
ensure
|
889
|
-
@mutex.unlock if @multithread
|
890
|
-
end
|
891
|
-
|
892
|
-
def with_server(key)
|
893
|
-
retried = false
|
894
|
-
begin
|
895
|
-
server, cache_key = request_setup(key)
|
896
|
-
yield server, cache_key
|
897
|
-
rescue IndexError => e
|
898
|
-
if !retried && @servers.size > 1
|
899
|
-
puts "Connection to server #{server.inspect} DIED! Retrying operation..."
|
900
|
-
retried = true
|
901
|
-
retry
|
902
|
-
end
|
903
|
-
handle_error(nil, e)
|
904
|
-
end
|
905
|
-
end
|
906
|
-
|
907
|
-
##
|
908
|
-
# Handles +error+ from +server+.
|
909
|
-
|
910
|
-
def handle_error(server, error)
|
911
|
-
raise error if error.is_a?(MemCacheError)
|
912
|
-
server.close if server
|
913
|
-
new_error = MemCacheError.new error.message
|
914
|
-
new_error.set_backtrace error.backtrace
|
915
|
-
raise new_error
|
916
|
-
end
|
917
|
-
|
918
|
-
##
|
919
|
-
# Performs setup for making a request with +key+ from memcached. Returns
|
920
|
-
# the server to fetch the key from and the complete key to use.
|
921
|
-
|
922
|
-
def request_setup(key)
|
923
|
-
raise MemCacheError, 'No active servers' unless active?
|
924
|
-
cache_key = make_cache_key key
|
925
|
-
server = get_server_for_key cache_key
|
926
|
-
return server, cache_key
|
927
|
-
end
|
928
|
-
|
929
|
-
##
|
930
|
-
# This class represents a memcached server instance.
|
931
|
-
|
932
|
-
class Server
|
933
|
-
|
934
|
-
##
|
935
|
-
# The amount of time to wait to establish a connection with a memcached
|
936
|
-
# server. If a connection cannot be established within this time limit,
|
937
|
-
# the server will be marked as down.
|
938
|
-
|
939
|
-
CONNECT_TIMEOUT = 1.0 unless defined? CONNECT_TIMEOUT
|
940
|
-
|
941
|
-
##
|
942
|
-
# The amount of time to wait before attempting to re-establish a
|
943
|
-
# connection with a server that is marked dead.
|
944
|
-
|
945
|
-
RETRY_DELAY = 1.0 unless defined? RETRY_DELAY
|
946
|
-
|
947
|
-
##
|
948
|
-
# The host the memcached server is running on.
|
949
|
-
|
950
|
-
attr_reader :host
|
951
|
-
|
952
|
-
##
|
953
|
-
# The port the memcached server is listening on.
|
954
|
-
|
955
|
-
attr_reader :port
|
956
|
-
|
957
|
-
##
|
958
|
-
# The weight given to the server.
|
959
|
-
|
960
|
-
attr_reader :weight
|
961
|
-
|
962
|
-
##
|
963
|
-
# The time of next retry if the connection is dead.
|
964
|
-
|
965
|
-
attr_reader :retry
|
966
|
-
|
967
|
-
##
|
968
|
-
# A text status string describing the state of the server.
|
969
|
-
|
970
|
-
attr_reader :status
|
971
|
-
|
972
|
-
##
|
973
|
-
# Create a new MemCache::Server object for the memcached instance
|
974
|
-
# listening on the given host and port, weighted by the given weight.
|
975
|
-
|
976
|
-
def initialize(memcache, host, port = DEFAULT_PORT, weight = DEFAULT_WEIGHT)
|
977
|
-
raise ArgumentError, "No host specified" if host.nil? or host.empty?
|
978
|
-
raise ArgumentError, "No port specified" if port.nil? or port.to_i.zero?
|
979
|
-
|
980
|
-
@memcache = memcache
|
981
|
-
@host = host
|
982
|
-
@port = port.to_i
|
983
|
-
@weight = weight.to_i
|
984
|
-
|
985
|
-
@multithread = @memcache.multithread
|
986
|
-
@mutex = Mutex.new
|
987
|
-
|
988
|
-
@sock = nil
|
989
|
-
@retry = nil
|
990
|
-
@status = 'NOT CONNECTED'
|
991
|
-
end
|
992
|
-
|
993
|
-
##
|
994
|
-
# Return a string representation of the server object.
|
995
|
-
|
996
|
-
def inspect
|
997
|
-
"<MemCache::Server: %s:%d [%d] (%s)>" % [@host, @port, @weight, @status]
|
998
|
-
end
|
999
|
-
|
1000
|
-
##
|
1001
|
-
# Check whether the server connection is alive. This will cause the
|
1002
|
-
# socket to attempt to connect if it isn't already connected and or if
|
1003
|
-
# the server was previously marked as down and the retry time has
|
1004
|
-
# been exceeded.
|
1005
|
-
|
1006
|
-
def alive?
|
1007
|
-
!!socket
|
1008
|
-
end
|
1009
|
-
|
1010
|
-
##
|
1011
|
-
# Try to connect to the memcached server targeted by this object.
|
1012
|
-
# Returns the connected socket object on success or nil on failure.
|
1013
|
-
|
1014
|
-
def socket
|
1015
|
-
@mutex.lock if @multithread
|
1016
|
-
return @sock if @sock and not @sock.closed?
|
1017
|
-
|
1018
|
-
@sock = nil
|
1019
|
-
|
1020
|
-
# If the host was dead, don't retry for a while.
|
1021
|
-
return if @retry and @retry > Time.now
|
1022
|
-
|
1023
|
-
# Attempt to connect if not already connected.
|
1024
|
-
begin
|
1025
|
-
@sock = timeout CONNECT_TIMEOUT do
|
1026
|
-
TCPSocket.new @host, @port
|
1027
|
-
end
|
1028
|
-
if Socket.constants.include? 'TCP_NODELAY' then
|
1029
|
-
@sock.setsockopt Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1
|
1030
|
-
end
|
1031
|
-
@retry = nil
|
1032
|
-
@status = 'CONNECTED'
|
1033
|
-
rescue SocketError, SystemCallError, IOError, Timeout::Error => err
|
1034
|
-
mark_dead err.message
|
1035
|
-
end
|
1036
|
-
|
1037
|
-
return @sock
|
1038
|
-
ensure
|
1039
|
-
@mutex.unlock if @multithread
|
1040
|
-
end
|
1041
|
-
|
1042
|
-
##
|
1043
|
-
# Close the connection to the memcached server targeted by this
|
1044
|
-
# object. The server is not considered dead.
|
1045
|
-
|
1046
|
-
def close
|
1047
|
-
@mutex.lock if @multithread
|
1048
|
-
@sock.close if @sock && !@sock.closed?
|
1049
|
-
@sock = nil
|
1050
|
-
@retry = nil
|
1051
|
-
@status = "NOT CONNECTED"
|
1052
|
-
ensure
|
1053
|
-
@mutex.unlock if @multithread
|
1054
|
-
end
|
1055
|
-
|
1056
|
-
private
|
1057
|
-
|
1058
|
-
##
|
1059
|
-
# Mark the server as dead and close its socket.
|
1060
|
-
|
1061
|
-
def mark_dead(reason = "Unknown error")
|
1062
|
-
@sock.close if @sock && !@sock.closed?
|
1063
|
-
@sock = nil
|
1064
|
-
@retry = Time.now + RETRY_DELAY
|
1065
|
-
|
1066
|
-
@status = sprintf "DEAD: %s, will retry at %s", reason, @retry
|
1067
|
-
end
|
1068
|
-
|
1069
|
-
end
|
1070
|
-
|
1071
|
-
##
|
1072
|
-
# Base MemCache exception class.
|
1073
|
-
|
1074
|
-
class MemCacheError < RuntimeError; end
|
1075
|
-
|
1076
|
-
class CachePool
|
330
|
+
class Pool
|
1077
331
|
attr_reader :fallback
|
1078
332
|
|
1079
333
|
def initialize
|
@@ -1097,22 +351,14 @@ class MemCache
|
|
1097
351
|
def []=(scope, cache)
|
1098
352
|
@cache_by_scope[scope.to_sym] = cache
|
1099
353
|
end
|
1100
|
-
|
1101
|
-
def reset
|
1102
|
-
@cache_by_scope.values.each {|c| c.reset}
|
1103
|
-
end
|
1104
354
|
end
|
1105
355
|
|
1106
356
|
def self.pool
|
1107
|
-
@@cache_pool ||=
|
357
|
+
@@cache_pool ||= Pool.new
|
1108
358
|
end
|
1109
|
-
|
1110
359
|
end
|
1111
360
|
|
1112
|
-
<<<<<<< HEAD
|
1113
|
-
=======
|
1114
361
|
# Add flags and cas
|
1115
362
|
class Object
|
1116
363
|
attr_accessor :memcache_flags, :memcache_cas
|
1117
364
|
end
|
1118
|
-
>>>>>>> refactor
|