jjp-memcache-client 1.8.7

Sign up to get free protection for your applications and to get access to all the features.
data/bin/memcached_top ADDED
@@ -0,0 +1,56 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require 'optparse'
4
+ require 'ostruct'
5
+ require 'socket'
6
+
7
+ @options = OpenStruct.new
8
+ @options.hostname = 'localhost'
9
+ @options.port = 11211
10
+
11
+ op = OptionParser.new do |opts|
12
+ opts.banner = "View memcached server statistics\nUsage: #{$0} [options]"
13
+ opts.separator "General Options:"
14
+ opts.on("-h HOSTNAME", "--hostname=HOSTNAME", "Hostname [default: localhost]") do |h|
15
+ @options.hostname = h
16
+ end
17
+ opts.on("-p PORT", "--port=PORT", Integer, "Port [default: 11211]") do |p|
18
+ @options.port = p
19
+ end
20
+ opts.on_tail("--help", "Show this message") do
21
+ puts opts
22
+ exit
23
+ end
24
+ end
25
+ op.parse!
26
+
27
+ def stats_data
28
+ data = ''
29
+ sock = TCPSocket.new(@options.hostname, @options.port)
30
+ sock.print("stats\r\n")
31
+ sock.flush
32
+ # memcached does not close the socket once it is done writing
33
+ # the stats data. We need to read line by line until we detect
34
+ # the END line and then stop/close on our side.
35
+ stats = sock.gets
36
+ while true
37
+ data += stats
38
+ break if stats.strip == 'END'
39
+ stats = sock.gets
40
+ end
41
+ sock.close
42
+ data
43
+ end
44
+
45
+ def parse(stats_data)
46
+ stats = []
47
+ stats_data.each_line do |line|
48
+ stats << "#{$1}: #{$2}" if line =~ /STAT (\w+) (\S+)/
49
+ end
50
+ stats.sort
51
+ end
52
+
53
+ stats = parse(stats_data)
54
+ stats.each do |stat|
55
+ puts stat
56
+ end
@@ -0,0 +1,41 @@
1
+ module Continuum
2
+
3
+ class << self
4
+
5
+ # Native extension to perform the binary search within the continuum
6
+ # space. There's a pure ruby version in memcache.rb so this is purely
7
+ # optional for performance and only necessary if you are using multiple
8
+ # memcached servers.
9
+ begin
10
+ require 'inline'
11
+ inline do |builder|
12
+ builder.c <<-EOM
13
+ int binary_search(VALUE ary, unsigned int r) {
14
+ int upper = RARRAY_LEN(ary) - 1;
15
+ int lower = 0;
16
+ int idx = 0;
17
+ ID value = rb_intern("value");
18
+
19
+ while (lower <= upper) {
20
+ idx = (lower + upper) / 2;
21
+
22
+ VALUE continuumValue = rb_funcall(RARRAY_PTR(ary)[idx], value, 0);
23
+ unsigned int l = NUM2UINT(continuumValue);
24
+ if (l == r) {
25
+ return idx;
26
+ }
27
+ else if (l > r) {
28
+ upper = idx - 1;
29
+ }
30
+ else {
31
+ lower = idx + 1;
32
+ }
33
+ }
34
+ return upper;
35
+ }
36
+ EOM
37
+ end
38
+ rescue Exception => e
39
+ end
40
+ end
41
+ end
data/lib/memcache.rb ADDED
@@ -0,0 +1,1210 @@
1
+ # encoding: utf-8
2
+ $TESTING = defined?($TESTING) && $TESTING
3
+
4
+ require 'socket'
5
+ require 'thread'
6
+ require 'zlib'
7
+ require 'digest/sha1'
8
+ require 'net/protocol'
9
+ require 'memcache/version'
10
+
11
+ begin
12
+ # Try to use the SystemTimer gem instead of Ruby's timeout library
13
+ # when running on Ruby 1.8.x. See:
14
+ # http://ph7spot.com/articles/system_timer
15
+ # We don't want to bother trying to load SystemTimer on jruby,
16
+ # ruby 1.9+ and rbx.
17
+ if !defined?(RUBY_ENGINE) || (RUBY_ENGINE == 'ruby' && RUBY_VERSION < '1.9.0')
18
+ require 'system_timer'
19
+ MemCacheTimer = SystemTimer
20
+ else
21
+ require 'timeout'
22
+ MemCacheTimer = Timeout
23
+ end
24
+ rescue LoadError => e
25
+ require 'timeout'
26
+ MemCacheTimer = Timeout
27
+ end
28
+
29
+ if !''.respond_to?(:bytesize)
30
+ class String
31
+ alias_method :bytesize, :size
32
+ end
33
+ end
34
+
35
+
36
+ ##
37
+ # A Ruby client library for memcached.
38
+ #
39
+
40
+ class MemCache
41
+
42
+ ##
43
+ # Default options for the cache object.
44
+
45
+ DEFAULT_OPTIONS = {
46
+ :namespace => nil,
47
+ :readonly => false,
48
+ :multithread => true,
49
+ :failover => true,
50
+ :timeout => 0.5,
51
+ :logger => nil,
52
+ :no_reply => false,
53
+ :check_size => true,
54
+ :autofix_keys => false,
55
+ :namespace_separator => ':',
56
+ }
57
+
58
+ ##
59
+ # Default memcached port.
60
+
61
+ DEFAULT_PORT = 11211
62
+
63
+ ##
64
+ # Default memcached server weight.
65
+
66
+ DEFAULT_WEIGHT = 1
67
+
68
+ ##
69
+ # The namespace for this instance
70
+
71
+ attr_reader :namespace
72
+
73
+ ##
74
+ # The multithread setting for this instance
75
+
76
+ attr_reader :multithread
77
+
78
+ ##
79
+ # Whether to try to fix keys that are too long and will be truncated by
80
+ # using their SHA1 hash instead.
81
+ # The hash is only used on keys longer than 250 characters, or containing spaces,
82
+ # to avoid impacting performance unnecesarily.
83
+ #
84
+ # In theory, your code should generate correct keys when calling memcache,
85
+ # so it's your responsibility and you should try to fix this problem at its source.
86
+ #
87
+ # But if that's not possible, enable this option and memcache-client will give you a hand.
88
+
89
+ attr_reader :autofix_keys
90
+
91
+ ##
92
+ # The servers this client talks to. Play at your own peril.
93
+
94
+ attr_reader :servers
95
+
96
+ ##
97
+ # Socket timeout limit with this client, defaults to 0.5 sec.
98
+ # Set to nil to disable timeouts.
99
+
100
+ attr_reader :timeout
101
+
102
+ ##
103
+ # Should the client try to failover to another server if the
104
+ # first server is down? Defaults to true.
105
+
106
+ attr_reader :failover
107
+
108
+ ##
109
+ # Log debug/info/warn/error to the given Logger, defaults to nil.
110
+
111
+ attr_reader :logger
112
+
113
+ ##
114
+ # Don't send or look for a reply from the memcached server for write operations.
115
+ # Please note this feature only works in memcached 1.2.5 and later. Earlier
116
+ # versions will reply with "ERROR".
117
+ attr_reader :no_reply
118
+
119
+ ##
120
+ # Accepts a list of +servers+ and a list of +opts+. +servers+ may be
121
+ # omitted. See +servers=+ for acceptable server list arguments.
122
+ #
123
+ # Valid options for +opts+ are:
124
+ #
125
+ # [:namespace] Prepends this value to all keys added or retrieved.
126
+ # [:readonly] Raises an exception on cache writes when true.
127
+ # [:multithread] Wraps cache access in a Mutex for thread safety. Defaults to true.
128
+ # [:failover] Should the client try to failover to another server if the
129
+ # first server is down? Defaults to true.
130
+ # [:timeout] Time to use as the socket read timeout. Defaults to 0.5 sec,
131
+ # set to nil to disable timeouts.
132
+ # [:logger] Logger to use for info/debug output, defaults to nil
133
+ # [:no_reply] Don't bother looking for a reply for write operations (i.e. they
134
+ # become 'fire and forget'), memcached 1.2.5 and later only, speeds up
135
+ # set/add/delete/incr/decr significantly.
136
+ # [:check_size] Raises a MemCacheError if the value to be set is greater than 1 MB, which
137
+ # is the maximum key size for the standard memcached server. Defaults to true.
138
+ # [:autofix_keys] If a key is longer than 250 characters or contains spaces,
139
+ # use an SHA1 hash instead, to prevent collisions on truncated keys.
140
+ # Other options are ignored.
141
+
142
+ def initialize(*args)
143
+ servers = []
144
+ opts = {}
145
+
146
+ case args.length
147
+ when 0 then # NOP
148
+ when 1 then
149
+ arg = args.shift
150
+ case arg
151
+ when Hash then opts = arg
152
+ when Array then servers = arg
153
+ when String then servers = [arg]
154
+ else raise ArgumentError, 'first argument must be Array, Hash or String'
155
+ end
156
+ when 2 then
157
+ servers, opts = args
158
+ else
159
+ raise ArgumentError, "wrong number of arguments (#{args.length} for 2)"
160
+ end
161
+
162
+ @evented = defined?(EM) && EM.reactor_running?
163
+ opts = DEFAULT_OPTIONS.merge opts
164
+ @namespace = opts[:namespace]
165
+ @readonly = opts[:readonly]
166
+ @multithread = opts[:multithread] && !@evented
167
+ @autofix_keys = opts[:autofix_keys]
168
+ @timeout = opts[:timeout]
169
+ @failover = opts[:failover]
170
+ @logger = opts[:logger]
171
+ @no_reply = opts[:no_reply]
172
+ @check_size = opts[:check_size]
173
+ @namespace_separator = opts[:namespace_separator]
174
+ @mutex = Mutex.new if @multithread
175
+
176
+ logger.info { "memcache-client #{VERSION} #{Array(servers).inspect}" } if logger
177
+
178
+ Thread.current[:memcache_client] = self.object_id if !@multithread
179
+
180
+
181
+ self.servers = servers
182
+ end
183
+
184
+ ##
185
+ # Returns a string representation of the cache object.
186
+
187
+ def inspect
188
+ "<MemCache: %d servers, ns: %p, ro: %p>" %
189
+ [@servers.length, @namespace, @readonly]
190
+ end
191
+
192
+ ##
193
+ # Returns whether there is at least one active server for the object.
194
+
195
+ def active?
196
+ not @servers.empty?
197
+ end
198
+
199
+ ##
200
+ # Returns whether or not the cache object was created read only.
201
+
202
+ def readonly?
203
+ @readonly
204
+ end
205
+
206
+ ##
207
+ # Set the servers that the requests will be distributed between. Entries
208
+ # can be either strings of the form "hostname:port" or
209
+ # "hostname:port:weight" or MemCache::Server objects.
210
+ #
211
+ def servers=(servers)
212
+ # Create the server objects.
213
+ @servers = Array(servers).collect do |server|
214
+ case server
215
+ when String
216
+ host, port, weight = server.split ':', 3
217
+ port ||= DEFAULT_PORT
218
+ weight ||= DEFAULT_WEIGHT
219
+ Server.new self, host, port, weight
220
+ else
221
+ server
222
+ end
223
+ end
224
+
225
+ logger.debug { "Servers now: #{@servers.inspect}" } if logger
226
+
227
+ # There's no point in doing this if there's only one server
228
+ @continuum = create_continuum_for(@servers) if @servers.size > 1
229
+
230
+ @servers
231
+ end
232
+
233
+ ##
234
+ # Decrements the value for +key+ by +amount+ and returns the new value.
235
+ # +key+ must already exist. If +key+ is not an integer, it is assumed to be
236
+ # 0. +key+ can not be decremented below 0.
237
+
238
+ def decr(key, amount = 1)
239
+ raise MemCacheError, "Update of readonly cache" if @readonly
240
+ with_server(key) do |server, cache_key|
241
+ cache_decr server, cache_key, amount
242
+ end
243
+ rescue TypeError => err
244
+ handle_error nil, err
245
+ end
246
+
247
+ ##
248
+ # Retrieves +key+ from memcache. If +raw+ is false, the value will be
249
+ # unmarshalled.
250
+
251
+ def get(key, raw = false)
252
+ with_server(key) do |server, cache_key|
253
+ logger.debug { "get #{key} from #{server.inspect}" } if logger
254
+ value = cache_get server, cache_key
255
+ return nil if value.nil?
256
+ value = Marshal.load value unless raw
257
+ return value
258
+ end
259
+ rescue TypeError => err
260
+ handle_error nil, err
261
+ end
262
+
263
+ ##
264
+ # Performs a +get+ with the given +key+. If
265
+ # the value does not exist and a block was given,
266
+ # the block will be called and the result saved via +add+.
267
+ #
268
+ # If you do not provide a block, using this
269
+ # method is the same as using +get+.
270
+ #
271
+ def fetch(key, expiry = 0, raw = false)
272
+ value = get(key, raw)
273
+
274
+ if value.nil? && block_given?
275
+ value = yield
276
+ add(key, value, expiry, raw)
277
+ end
278
+
279
+ value
280
+ end
281
+
282
+ ##
283
+ # Retrieves multiple values from memcached in parallel, if possible.
284
+ #
285
+ # The memcached protocol supports the ability to retrieve multiple
286
+ # keys in a single request. Pass in an array of keys to this method
287
+ # and it will:
288
+ #
289
+ # 1. map the key to the appropriate memcached server
290
+ # 2. send a single request to each server that has one or more key values
291
+ #
292
+ # Returns a hash of values.
293
+ #
294
+ # cache["a"] = 1
295
+ # cache["b"] = 2
296
+ # cache.get_multi "a", "b" # => { "a" => 1, "b" => 2 }
297
+ #
298
+ # Note that get_multi assumes the values are marshalled. You can pass
299
+ # in :raw => true to bypass value marshalling.
300
+ #
301
+ # cache.get_multi('a', 'b', ..., :raw => true)
302
+
303
+ def get_multi(*keys)
304
+ raise MemCacheError, 'No active servers' unless active?
305
+
306
+ opts = keys.last.is_a?(Hash) ? keys.pop : {}
307
+
308
+ keys.flatten!
309
+ key_count = keys.length
310
+ cache_keys = {}
311
+ server_keys = Hash.new { |h,k| h[k] = [] }
312
+
313
+ # map keys to servers
314
+ keys.each do |key|
315
+ server, cache_key = request_setup key
316
+ cache_keys[cache_key] = key
317
+ server_keys[server] << cache_key
318
+ end
319
+
320
+ results = {}
321
+ raw = opts[:raw] || false
322
+ server_keys.each do |server, keys_for_server|
323
+ keys_for_server_str = keys_for_server.join ' '
324
+ begin
325
+ values = cache_get_multi server, keys_for_server_str
326
+ values.each do |key, value|
327
+ results[cache_keys[key]] = raw ? value : Marshal.load(value)
328
+ end
329
+ rescue IndexError => e
330
+ # Ignore this server and try the others
331
+ logger.warn { "Unable to retrieve #{keys_for_server.size} elements from #{server.inspect}: #{e.message}"} if logger
332
+ end
333
+ end
334
+
335
+ return results
336
+ rescue TypeError => err
337
+ handle_error nil, err
338
+ end
339
+
340
+ ##
341
+ # Increments the value for +key+ by +amount+ and returns the new value.
342
+ # +key+ must already exist. If +key+ is not an integer, it is assumed to be
343
+ # 0.
344
+
345
+ def incr(key, amount = 1)
346
+ raise MemCacheError, "Update of readonly cache" if @readonly
347
+ with_server(key) do |server, cache_key|
348
+ cache_incr server, cache_key, amount
349
+ end
350
+ rescue TypeError => err
351
+ handle_error nil, err
352
+ end
353
+
354
+ ##
355
+ # Add +key+ to the cache with value +value+ that expires in +expiry+
356
+ # seconds. If +raw+ is true, +value+ will not be Marshalled.
357
+ #
358
+ # Warning: Readers should not call this method in the event of a cache miss;
359
+ # see MemCache#add.
360
+
361
+ ONE_MB = 1024 * 1024
362
+
363
+ def set(key, value, expiry = 0, raw = false)
364
+ raise MemCacheError, "Update of readonly cache" if @readonly
365
+
366
+ value = Marshal.dump value unless raw
367
+ with_server(key) do |server, cache_key|
368
+ logger.debug { "set #{key} to #{server.inspect}: #{value.to_s.bytesize}" } if logger
369
+
370
+ if @check_size && value.to_s.bytesize > ONE_MB
371
+ raise MemCacheError, "Value in key '#{cache_key}' too large, memcached can only store 1MB of data per key"
372
+ end
373
+
374
+ with_socket_management(server) do |socket|
375
+ socket.write "set #{cache_key} 0 #{expiry} #{value.to_s.bytesize}#{noreply}\r\n"
376
+ socket.write value.to_s
377
+ socket.write "\r\n"
378
+ break nil if @no_reply
379
+ result = socket.gets
380
+ raise_on_error_response! result
381
+
382
+ if result.nil?
383
+ server.close
384
+ raise MemCacheError, "lost connection to #{server.host}:#{server.port}"
385
+ end
386
+
387
+ result
388
+ end
389
+ end
390
+ end
391
+
392
+ ##
393
+ # "cas" is a check and set operation which means "store this data but
394
+ # only if no one else has updated since I last fetched it." This can
395
+ # be used as a form of optimistic locking.
396
+ #
397
+ # Works in block form like so:
398
+ # cache.cas('some-key') do |value|
399
+ # value + 1
400
+ # end
401
+ #
402
+ # Returns:
403
+ # +nil+ if the value was not found on the memcached server.
404
+ # +STORED+ if the value was updated successfully
405
+ # +EXISTS+ if the value was updated by someone else since last fetch
406
+
407
+ def cas(key, expiry=0, raw=false)
408
+ raise MemCacheError, "Update of readonly cache" if @readonly
409
+ raise MemCacheError, "A block is required" unless block_given?
410
+
411
+ (value, token) = gets(key, raw)
412
+ return nil unless value
413
+ updated = yield value
414
+ value = raw ? updated : Marshal.dump(updated)
415
+
416
+ with_server(key) do |server, cache_key|
417
+ logger.debug { "cas #{key} to #{server.inspect}: #{value.to_s.bytesize}" } if logger
418
+ command = "cas #{cache_key} 0 #{expiry} #{value.to_s.bytesize} #{token}#{noreply}\r\n#{value}\r\n"
419
+
420
+ with_socket_management(server) do |socket|
421
+ socket.write command
422
+ break nil if @no_reply
423
+ result = socket.gets
424
+ raise_on_error_response! result
425
+
426
+ if result.nil?
427
+ server.close
428
+ raise MemCacheError, "lost connection to #{server.host}:#{server.port}"
429
+ end
430
+
431
+ result
432
+ end
433
+ end
434
+ end
435
+
436
+ ##
437
+ # Add +key+ to the cache with value +value+ that expires in +expiry+
438
+ # seconds, but only if +key+ does not already exist in the cache.
439
+ # If +raw+ is true, +value+ will not be Marshalled.
440
+ #
441
+ # Readers should call this method in the event of a cache miss, not
442
+ # MemCache#set.
443
+
444
+ def add(key, value, expiry = 0, raw = false)
445
+ raise MemCacheError, "Update of readonly cache" if @readonly
446
+ value = Marshal.dump value unless raw
447
+ with_server(key) do |server, cache_key|
448
+ logger.debug { "add #{key} to #{server}: #{value ? value.to_s.bytesize : 'nil'}" } if logger
449
+ command = "add #{cache_key} 0 #{expiry} #{value.to_s.bytesize}#{noreply}\r\n#{value}\r\n"
450
+
451
+ with_socket_management(server) do |socket|
452
+ socket.write command
453
+ break nil if @no_reply
454
+ result = socket.gets
455
+ raise_on_error_response! result
456
+ result
457
+ end
458
+ end
459
+ end
460
+
461
+ ##
462
+ # Add +key+ to the cache with value +value+ that expires in +expiry+
463
+ # seconds, but only if +key+ already exists in the cache.
464
+ # If +raw+ is true, +value+ will not be Marshalled.
465
+ def replace(key, value, expiry = 0, raw = false)
466
+ raise MemCacheError, "Update of readonly cache" if @readonly
467
+ value = Marshal.dump value unless raw
468
+ with_server(key) do |server, cache_key|
469
+ logger.debug { "replace #{key} to #{server}: #{value ? value.to_s.bytesize : 'nil'}" } if logger
470
+ command = "replace #{cache_key} 0 #{expiry} #{value.to_s.bytesize}#{noreply}\r\n#{value}\r\n"
471
+
472
+ with_socket_management(server) do |socket|
473
+ socket.write command
474
+ break nil if @no_reply
475
+ result = socket.gets
476
+ raise_on_error_response! result
477
+ result
478
+ end
479
+ end
480
+ end
481
+
482
+ ##
483
+ # Append - 'add this data to an existing key after existing data'
484
+ # Please note the value is always passed to memcached as raw since it
485
+ # doesn't make a lot of sense to concatenate marshalled data together.
486
+ def append(key, value)
487
+ raise MemCacheError, "Update of readonly cache" if @readonly
488
+ with_server(key) do |server, cache_key|
489
+ logger.debug { "append #{key} to #{server}: #{value ? value.to_s.bytesize : 'nil'}" } if logger
490
+ command = "append #{cache_key} 0 0 #{value.to_s.bytesize}#{noreply}\r\n#{value}\r\n"
491
+
492
+ with_socket_management(server) do |socket|
493
+ socket.write command
494
+ break nil if @no_reply
495
+ result = socket.gets
496
+ raise_on_error_response! result
497
+ result
498
+ end
499
+ end
500
+ end
501
+
502
+ ##
503
+ # Prepend - 'add this data to an existing key before existing data'
504
+ # Please note the value is always passed to memcached as raw since it
505
+ # doesn't make a lot of sense to concatenate marshalled data together.
506
+ def prepend(key, value)
507
+ raise MemCacheError, "Update of readonly cache" if @readonly
508
+ with_server(key) do |server, cache_key|
509
+ logger.debug { "prepend #{key} to #{server}: #{value ? value.to_s.bytesize : 'nil'}" } if logger
510
+ command = "prepend #{cache_key} 0 0 #{value.to_s.bytesize}#{noreply}\r\n#{value}\r\n"
511
+
512
+ with_socket_management(server) do |socket|
513
+ socket.write command
514
+ break nil if @no_reply
515
+ result = socket.gets
516
+ raise_on_error_response! result
517
+ result
518
+ end
519
+ end
520
+ end
521
+
522
+ ##
523
+ # Removes +key+ from the cache.
524
+ # +expiry+ is ignored as it has been removed from the latest memcached version.
525
+
526
+ def delete(key, expiry = 0)
527
+ raise MemCacheError, "Update of readonly cache" if @readonly
528
+ with_server(key) do |server, cache_key|
529
+ with_socket_management(server) do |socket|
530
+ logger.debug { "delete #{cache_key} on #{server}" } if logger
531
+ socket.write "delete #{cache_key}#{noreply}\r\n"
532
+ break nil if @no_reply
533
+ result = socket.gets
534
+ raise_on_error_response! result
535
+ result
536
+ end
537
+ end
538
+ end
539
+
540
+ ##
541
+ # Flush the cache from all memcache servers.
542
+ # A non-zero value for +delay+ will ensure that the flush
543
+ # is propogated slowly through your memcached server farm.
544
+ # The Nth server will be flushed N*delay seconds from now,
545
+ # asynchronously so this method returns quickly.
546
+ # This prevents a huge database spike due to a total
547
+ # flush all at once.
548
+
549
+ def flush_all(delay=0)
550
+ raise MemCacheError, 'No active servers' unless active?
551
+ raise MemCacheError, "Update of readonly cache" if @readonly
552
+
553
+ begin
554
+ delay_time = 0
555
+ @servers.each do |server|
556
+ with_socket_management(server) do |socket|
557
+ logger.debug { "flush_all #{delay_time} on #{server}" } if logger
558
+ if delay == 0 # older versions of memcached will fail silently otherwise
559
+ socket.write "flush_all#{noreply}\r\n"
560
+ else
561
+ socket.write "flush_all #{delay_time}#{noreply}\r\n"
562
+ end
563
+ break nil if @no_reply
564
+ result = socket.gets
565
+ raise_on_error_response! result
566
+ result
567
+ end
568
+ delay_time += delay
569
+ end
570
+ rescue IndexError => err
571
+ handle_error nil, err
572
+ end
573
+ end
574
+
575
+ ##
576
+ # Reset the connection to all memcache servers. This should be called if
577
+ # there is a problem with a cache lookup that might have left the connection
578
+ # in a corrupted state.
579
+
580
+ def reset
581
+ @servers.each { |server| server.close }
582
+ end
583
+
584
+ ##
585
+ # Returns statistics for each memcached server. An explanation of the
586
+ # statistics can be found in the memcached docs:
587
+ #
588
+ # http://code.sixapart.com/svn/memcached/trunk/server/doc/protocol.txt
589
+ #
590
+ # Example:
591
+ #
592
+ # >> pp CACHE.stats
593
+ # {"localhost:11211"=>
594
+ # {"bytes"=>4718,
595
+ # "pid"=>20188,
596
+ # "connection_structures"=>4,
597
+ # "time"=>1162278121,
598
+ # "pointer_size"=>32,
599
+ # "limit_maxbytes"=>67108864,
600
+ # "cmd_get"=>14532,
601
+ # "version"=>"1.2.0",
602
+ # "bytes_written"=>432583,
603
+ # "cmd_set"=>32,
604
+ # "get_misses"=>0,
605
+ # "total_connections"=>19,
606
+ # "curr_connections"=>3,
607
+ # "curr_items"=>4,
608
+ # "uptime"=>1557,
609
+ # "get_hits"=>14532,
610
+ # "total_items"=>32,
611
+ # "rusage_system"=>0.313952,
612
+ # "rusage_user"=>0.119981,
613
+ # "bytes_read"=>190619}}
614
+ # => nil
615
+
616
+ def stats
617
+ raise MemCacheError, "No active servers" unless active?
618
+ server_stats = {}
619
+
620
+ @servers.each do |server|
621
+ next unless server.alive?
622
+
623
+ with_socket_management(server) do |socket|
624
+ value = nil
625
+ socket.write "stats\r\n"
626
+ stats = {}
627
+ while line = socket.gets do
628
+ raise_on_error_response! line
629
+ break if line == "END\r\n"
630
+ if line =~ /\ASTAT ([\S]+) ([\w\.\:]+)/ then
631
+ name, value = $1, $2
632
+ stats[name] = case name
633
+ when 'version'
634
+ value
635
+ when 'rusage_user', 'rusage_system' then
636
+ seconds, microseconds = value.split(/:/, 2)
637
+ microseconds ||= 0
638
+ Float(seconds) + (Float(microseconds) / 1_000_000)
639
+ else
640
+ if value =~ /\A\d+\Z/ then
641
+ value.to_i
642
+ else
643
+ value
644
+ end
645
+ end
646
+ end
647
+ end
648
+ server_stats["#{server.host}:#{server.port}"] = stats
649
+ end
650
+ end
651
+
652
+ raise MemCacheError, "No active servers" if server_stats.empty?
653
+ server_stats
654
+ end
655
+
656
+ ##
657
+ # Shortcut to get a value from the cache.
658
+
659
+ alias [] get
660
+
661
+ ##
662
+ # Shortcut to save a value in the cache. This method does not set an
663
+ # expiration on the entry. Use set to specify an explicit expiry.
664
+
665
+ def []=(key, value)
666
+ set key, value
667
+ end
668
+
669
+ protected unless $TESTING
670
+
671
+ ##
672
+ # Create a key for the cache, incorporating the namespace qualifier if
673
+ # requested.
674
+
675
+ def make_cache_key(key)
676
+ if @autofix_keys && (key =~ /\s/ || key_length(key) > 250)
677
+ key = "#{Digest::SHA1.hexdigest(key)}-autofixed"
678
+ end
679
+
680
+ if namespace.nil?
681
+ key
682
+ else
683
+ "#{@namespace}#{@namespace_separator}#{key}"
684
+ end
685
+ end
686
+
687
+ ##
688
+ # Calculate length of the key, including the namespace and namespace-separator.
689
+
690
+ def key_length(key)
691
+ key.length + (namespace.nil? ? 0 : ( namespace.length + (@namespace_separator.nil? ? 0 : @namespace_separator.length) ) )
692
+ end
693
+
694
+ ##
695
+ # Returns an interoperable hash value for +key+. (I think, docs are
696
+ # sketchy for down servers).
697
+
698
+ def hash_for(key)
699
+ Zlib.crc32(key)
700
+ end
701
+
702
+ ##
703
+ # Pick a server to handle the request based on a hash of the key.
704
+
705
+ def get_server_for_key(key, options = {})
706
+ raise ArgumentError, "illegal character in key #{key.inspect}" if
707
+ key =~ /\s/
708
+ raise ArgumentError, "key cannot be blank" if key.nil? || key.strip.size == 0
709
+ raise ArgumentError, "key too long #{key.inspect}" if key.length > 250
710
+ raise MemCacheError, "No servers available" if @servers.empty?
711
+ return @servers.first if @servers.length == 1
712
+
713
+ hkey = hash_for(key)
714
+
715
+ 20.times do |try|
716
+ entryidx = Continuum.binary_search(@continuum, hkey)
717
+ server = @continuum[entryidx].server
718
+ return server if server.alive?
719
+ break unless failover
720
+ hkey = hash_for "#{try}#{key}"
721
+ end
722
+
723
+ raise MemCacheError, "No servers available"
724
+ end
725
+
726
+ ##
727
+ # Performs a raw decr for +cache_key+ from +server+. Returns nil if not
728
+ # found.
729
+
730
+ def cache_decr(server, cache_key, amount)
731
+ with_socket_management(server) do |socket|
732
+ socket.write "decr #{cache_key} #{amount}#{noreply}\r\n"
733
+ break nil if @no_reply
734
+ text = socket.gets
735
+ raise_on_error_response! text
736
+ return nil if text == "NOT_FOUND\r\n"
737
+ return text.to_i
738
+ end
739
+ end
740
+
741
+ ##
742
+ # Fetches the raw data for +cache_key+ from +server+. Returns nil on cache
743
+ # miss.
744
+
745
+ def cache_get(server, cache_key)
746
+ with_socket_management(server) do |socket|
747
+ socket.write "get #{cache_key}\r\n"
748
+ keyline = socket.gets # "VALUE <key> <flags> <bytes>\r\n"
749
+
750
+ if keyline.nil? then
751
+ server.close
752
+ raise MemCacheError, "lost connection to #{server.host}:#{server.port}"
753
+ end
754
+
755
+ raise_on_error_response! keyline
756
+ return nil if keyline == "END\r\n"
757
+
758
+ unless keyline =~ /(\d+)\r/ then
759
+ server.close
760
+ raise MemCacheError, "unexpected response #{keyline.inspect}"
761
+ end
762
+ value = socket.read $1.to_i
763
+ socket.read 2 # "\r\n"
764
+ socket.gets # "END\r\n"
765
+ return value
766
+ end
767
+ end
768
+
769
+ def gets(key, raw = false)
770
+ with_server(key) do |server, cache_key|
771
+ logger.debug { "gets #{key} from #{server.inspect}" } if logger
772
+ result = with_socket_management(server) do |socket|
773
+ socket.write "gets #{cache_key}\r\n"
774
+ keyline = socket.gets # "VALUE <key> <flags> <bytes> <cas token>\r\n"
775
+
776
+ if keyline.nil? then
777
+ server.close
778
+ raise MemCacheError, "lost connection to #{server.host}:#{server.port}"
779
+ end
780
+
781
+ raise_on_error_response! keyline
782
+ return nil if keyline == "END\r\n"
783
+
784
+ unless keyline =~ /(\d+) (\w+)\r/ then
785
+ server.close
786
+ raise MemCacheError, "unexpected response #{keyline.inspect}"
787
+ end
788
+ value = socket.read $1.to_i
789
+ socket.read 2 # "\r\n"
790
+ socket.gets # "END\r\n"
791
+ [value, $2]
792
+ end
793
+ result[0] = Marshal.load result[0] unless raw
794
+ result
795
+ end
796
+ rescue TypeError => err
797
+ handle_error nil, err
798
+ end
799
+
800
+
801
+ ##
802
+ # Fetches +cache_keys+ from +server+ using a multi-get.
803
+
804
+ def cache_get_multi(server, cache_keys)
805
+ with_socket_management(server) do |socket|
806
+ values = {}
807
+ socket.write "get #{cache_keys}\r\n"
808
+
809
+ while keyline = socket.gets do
810
+ return values if keyline == "END\r\n"
811
+ raise_on_error_response! keyline
812
+
813
+ unless keyline =~ /\AVALUE (.+) (.+) (.+)/ then
814
+ server.close
815
+ raise MemCacheError, "unexpected response #{keyline.inspect}"
816
+ end
817
+
818
+ key, data_length = $1, $3
819
+ values[$1] = socket.read data_length.to_i
820
+ socket.read(2) # "\r\n"
821
+ end
822
+
823
+ server.close
824
+ raise MemCacheError, "lost connection to #{server.host}:#{server.port}" # TODO: retry here too
825
+ end
826
+ end
827
+
828
+ ##
829
+ # Performs a raw incr for +cache_key+ from +server+. Returns nil if not
830
+ # found.
831
+
832
+ def cache_incr(server, cache_key, amount)
833
+ with_socket_management(server) do |socket|
834
+ socket.write "incr #{cache_key} #{amount}#{noreply}\r\n"
835
+ break nil if @no_reply
836
+ text = socket.gets
837
+ raise_on_error_response! text
838
+ return nil if text == "NOT_FOUND\r\n"
839
+ return text.to_i
840
+ end
841
+ end
842
+
843
+ ##
844
+ # Gets or creates a socket connected to the given server, and yields it
845
+ # to the block, wrapped in a mutex synchronization if @multithread is true.
846
+ #
847
+ # If a socket error (SocketError, SystemCallError, IOError) or protocol error
848
+ # (MemCacheError) is raised by the block, closes the socket, attempts to
849
+ # connect again, and retries the block (once). If an error is again raised,
850
+ # reraises it as MemCacheError.
851
+ #
852
+ # If unable to connect to the server (or if in the reconnect wait period),
853
+ # raises MemCacheError. Note that the socket connect code marks a server
854
+ # dead for a timeout period, so retrying does not apply to connection attempt
855
+ # failures (but does still apply to unexpectedly lost connections etc.).
856
+
857
+ def with_socket_management(server, &block)
858
+ check_multithread_status!
859
+
860
+ @mutex.lock if @multithread
861
+ retried = false
862
+
863
+ begin
864
+ socket = server.socket
865
+
866
+ # Raise an IndexError to show this server is out of whack. If were inside
867
+ # a with_server block, we'll catch it and attempt to restart the operation.
868
+
869
+ raise IndexError, "No connection to server (#{server.status})" if socket.nil?
870
+
871
+ block.call(socket)
872
+
873
+ rescue SocketError, Errno::EAGAIN, Timeout::Error => err
874
+ logger.warn { "Socket failure: #{err.message}" } if logger
875
+ server.mark_dead(err)
876
+ handle_error(server, err)
877
+
878
+ rescue MemCacheError, SystemCallError, IOError => err
879
+ logger.warn { "Generic failure: #{err.class.name}: #{err.message}" } if logger
880
+ handle_error(server, err) if retried || socket.nil?
881
+ retried = true
882
+ retry
883
+ end
884
+ ensure
885
+ @mutex.unlock if @multithread
886
+ end
887
+
888
+ def with_server(key)
889
+ retried = false
890
+ begin
891
+ server, cache_key = request_setup(key)
892
+ yield server, cache_key
893
+ rescue IndexError => e
894
+ logger.warn { "Server failed: #{e.class.name}: #{e.message}" } if logger
895
+ if !retried && @servers.size > 1
896
+ logger.info { "Connection to server #{server.inspect} DIED! Retrying operation..." } if logger
897
+ retried = true
898
+ retry
899
+ end
900
+ handle_error(nil, e)
901
+ end
902
+ end
903
+
904
+ ##
905
+ # Handles +error+ from +server+.
906
+
907
+ def handle_error(server, error)
908
+ raise error if error.is_a?(MemCacheError)
909
+ server.close if server && server.status == "CONNECTED"
910
+ new_error = MemCacheError.new error.message
911
+ new_error.set_backtrace error.backtrace
912
+ raise new_error
913
+ end
914
+
915
+ def noreply
916
+ @no_reply ? ' noreply' : ''
917
+ end
918
+
919
+ ##
920
+ # Performs setup for making a request with +key+ from memcached. Returns
921
+ # the server to fetch the key from and the complete key to use.
922
+
923
+ def request_setup(key)
924
+ raise MemCacheError, 'No active servers' unless active?
925
+ cache_key = make_cache_key key
926
+ server = get_server_for_key cache_key
927
+ return server, cache_key
928
+ end
929
+
930
+ def raise_on_error_response!(response)
931
+ if response =~ /\A(?:CLIENT_|SERVER_)?ERROR(.*)/
932
+ raise MemCacheError, $1.strip
933
+ end
934
+ end
935
+
936
+ def create_continuum_for(servers)
937
+ total_weight = servers.inject(0) { |memo, srv| memo + srv.weight }
938
+ continuum = []
939
+
940
+ servers.each do |server|
941
+ entry_count_for(server, servers.size, total_weight).times do |idx|
942
+ hash = Digest::SHA1.hexdigest("#{server.host}:#{server.port}:#{idx}")
943
+ value = Integer("0x#{hash[0..7]}")
944
+ continuum << Continuum::Entry.new(value, server)
945
+ end
946
+ end
947
+
948
+ continuum.sort { |a, b| a.value <=> b.value }
949
+ end
950
+
951
+ def entry_count_for(server, total_servers, total_weight)
952
+ ((total_servers * Continuum::POINTS_PER_SERVER * server.weight) / Float(total_weight)).floor
953
+ end
954
+
955
+ def check_multithread_status!
956
+ return if @multithread
957
+ return if @evented
958
+
959
+ if Thread.current[:memcache_client] != self.object_id
960
+ raise MemCacheError, <<-EOM
961
+ You are accessing this memcache-client instance from multiple threads but have not enabled multithread support.
962
+ Normally: MemCache.new(['localhost:11211'], :multithread => true)
963
+ In Rails: config.cache_store = [:mem_cache_store, 'localhost:11211', { :multithread => true }]
964
+ EOM
965
+ end
966
+ end
967
+
968
+ ##
969
+ # This class represents a memcached server instance.
970
+
971
+ class Server
972
+
973
+ ##
974
+ # The amount of time to wait before attempting to re-establish a
975
+ # connection with a server that is marked dead.
976
+
977
+ RETRY_DELAY = 30.0
978
+
979
+ ##
980
+ # The host the memcached server is running on.
981
+
982
+ attr_reader :host
983
+
984
+ ##
985
+ # The port the memcached server is listening on.
986
+
987
+ attr_reader :port
988
+
989
+ ##
990
+ # The weight given to the server.
991
+
992
+ attr_reader :weight
993
+
994
+ ##
995
+ # The time of next retry if the connection is dead.
996
+
997
+ attr_reader :retry
998
+
999
+ ##
1000
+ # A text status string describing the state of the server.
1001
+
1002
+ attr_reader :status
1003
+
1004
+ attr_reader :logger
1005
+
1006
+ ##
1007
+ # Create a new MemCache::Server object for the memcached instance
1008
+ # listening on the given host and port, weighted by the given weight.
1009
+
1010
+ def initialize(memcache, host, port = DEFAULT_PORT, weight = DEFAULT_WEIGHT)
1011
+ raise ArgumentError, "No host specified" if host.nil? or host.empty?
1012
+ raise ArgumentError, "No port specified" if port.nil? or port.to_i.zero?
1013
+
1014
+ @host = host
1015
+ @port = port.to_i
1016
+ @weight = weight.to_i
1017
+
1018
+ @sock = nil
1019
+ @retry = nil
1020
+ @status = 'NOT CONNECTED'
1021
+ @timeout = memcache.timeout
1022
+ @logger = memcache.logger
1023
+
1024
+ if defined?(EM) and EM.reactor_running? and defined?(MemCache::EventedServer)
1025
+ self.extend(MemCache::EventedServer)
1026
+ end
1027
+ end
1028
+
1029
+ ##
1030
+ # Return a string representation of the server object.
1031
+
1032
+ def inspect
1033
+ "<MemCache::Server: %s:%d [%d] (%s)>" % [@host, @port, @weight, @status]
1034
+ end
1035
+
1036
+ ##
1037
+ # Check whether the server connection is alive. This will cause the
1038
+ # socket to attempt to connect if it isn't already connected and or if
1039
+ # the server was previously marked as down and the retry time has
1040
+ # been exceeded.
1041
+
1042
+ def alive?
1043
+ !!socket
1044
+ end
1045
+
1046
+ ##
1047
+ # Try to connect to the memcached server targeted by this object.
1048
+ # Returns the connected socket object on success or nil on failure.
1049
+
1050
+ def socket
1051
+ return @sock if @sock and not @sock.closed?
1052
+
1053
+ @sock = nil
1054
+
1055
+ # If the host was dead, don't retry for a while.
1056
+ return if @retry and @retry > Time.now
1057
+
1058
+ # Attempt to connect if not already connected.
1059
+ begin
1060
+ @sock = connect_to(@host, @port, @timeout)
1061
+ @sock.setsockopt Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1
1062
+ @retry = nil
1063
+ @status = 'CONNECTED'
1064
+ rescue SocketError, SystemCallError, IOError, Timeout::Error => err
1065
+ logger.warn { "Unable to open socket: #{err.class.name}, #{err.message}" } if logger
1066
+ mark_dead err
1067
+ end
1068
+
1069
+ return @sock
1070
+ end
1071
+
1072
+ def connect_to(host, port, timeout=nil)
1073
+ sock = nil
1074
+ if timeout
1075
+ MemCacheTimer.timeout(timeout) do
1076
+ sock = TCPSocket.new(host, port)
1077
+ end
1078
+ else
1079
+ sock = TCPSocket.new(host, port)
1080
+ end
1081
+
1082
+ io = MemCache::BufferedIO.new(sock)
1083
+ io.read_timeout = timeout
1084
+ # Getting reports from several customers, including 37signals,
1085
+ # that the non-blocking timeouts in 1.7.5 don't seem to be reliable.
1086
+ # It can't hurt to set the underlying socket timeout also, if possible.
1087
+ if timeout
1088
+ secs = Integer(timeout)
1089
+ usecs = Integer((timeout - secs) * 1_000_000)
1090
+ optval = [secs, usecs].pack("l_2")
1091
+ begin
1092
+ io.setsockopt Socket::SOL_SOCKET, Socket::SO_RCVTIMEO, optval
1093
+ io.setsockopt Socket::SOL_SOCKET, Socket::SO_SNDTIMEO, optval
1094
+ rescue Exception => ex
1095
+ # Solaris, for one, does not like/support socket timeouts.
1096
+ @logger.info "[memcache-client] Unable to use raw socket timeouts: #{ex.class.name}: #{ex.message}" if @logger
1097
+ end
1098
+ end
1099
+ io
1100
+ end
1101
+
1102
+ ##
1103
+ # Close the connection to the memcached server targeted by this
1104
+ # object. The server is not considered dead.
1105
+
1106
+ def close
1107
+ @sock.close if @sock && !@sock.closed?
1108
+ @sock = nil
1109
+ @retry = nil
1110
+ @status = "NOT CONNECTED"
1111
+ end
1112
+
1113
+ ##
1114
+ # Mark the server as dead and close its socket.
1115
+
1116
+ def mark_dead(error)
1117
+ close
1118
+ @retry = Time.now + RETRY_DELAY
1119
+
1120
+ reason = "#{error.class.name}: #{error.message}"
1121
+ @status = sprintf "%s:%s DEAD (%s), will retry at %s", @host, @port, reason, @retry
1122
+ @logger.info { @status } if @logger
1123
+ end
1124
+
1125
+ end
1126
+
1127
+ ##
1128
+ # Base MemCache exception class.
1129
+
1130
+ class MemCacheError < RuntimeError; end
1131
+
1132
+ class BufferedIO < Net::BufferedIO # :nodoc:
1133
+ BUFSIZE = 1024 * 16
1134
+
1135
+ if RUBY_VERSION < '1.9.1'
1136
+ def rbuf_fill
1137
+ begin
1138
+ @rbuf << @io.read_nonblock(BUFSIZE)
1139
+ rescue Errno::EWOULDBLOCK
1140
+ retry unless @read_timeout
1141
+ if IO.select([@io], nil, nil, @read_timeout)
1142
+ retry
1143
+ else
1144
+ raise Timeout::Error, 'IO timeout'
1145
+ end
1146
+ end
1147
+ end
1148
+ end
1149
+
1150
+ def setsockopt(*args)
1151
+ @io.setsockopt(*args)
1152
+ end
1153
+
1154
+ def gets
1155
+ encode(readuntil("\n"))
1156
+ end
1157
+
1158
+ if defined?(Encoding)
1159
+ def encode(str)
1160
+ str.force_encoding(Encoding.default_external)
1161
+ end
1162
+ else
1163
+ def encode(str)
1164
+ str
1165
+ end
1166
+ end
1167
+ end
1168
+
1169
+ end
1170
+
1171
+ module Continuum
1172
+ POINTS_PER_SERVER = 160 # this is the default in libmemcached
1173
+
1174
+ # Find the closest index in Continuum with value <= the given value
1175
+ def self.binary_search(ary, value, &block)
1176
+ upper = ary.size - 1
1177
+ lower = 0
1178
+ idx = 0
1179
+
1180
+ while(lower <= upper) do
1181
+ idx = (lower + upper) / 2
1182
+ comp = ary[idx].value <=> value
1183
+
1184
+ if comp == 0
1185
+ return idx
1186
+ elsif comp > 0
1187
+ upper = idx - 1
1188
+ else
1189
+ lower = idx + 1
1190
+ end
1191
+ end
1192
+ return upper
1193
+ end
1194
+
1195
+ class Entry
1196
+ attr_reader :value
1197
+ attr_reader :server
1198
+
1199
+ def initialize(val, srv)
1200
+ @value = val
1201
+ @server = srv
1202
+ end
1203
+
1204
+ def inspect
1205
+ "<#{value}, #{server.host}:#{server.port}>"
1206
+ end
1207
+ end
1208
+
1209
+ end
1210
+ require 'continuum_native'