dsander-memcache-client 1.7.7.pre

Sign up to get free protection for your applications and to get access to all the features.
data/lib/memcache.rb ADDED
@@ -0,0 +1,1209 @@
1
+ $TESTING = defined?($TESTING) && $TESTING
2
+
3
+ require 'socket'
4
+ require 'thread'
5
+ require 'zlib'
6
+ require 'digest/sha1'
7
+ require 'net/protocol'
8
+ require 'yaml'
9
+
10
+ begin
11
+ # Try to use the SystemTimer gem instead of Ruby's timeout library
12
+ # when running on something that looks like Ruby 1.8.x. See:
13
+ # http://ph7spot.com/articles/system_timer
14
+ # We don't want to bother trying to load SystemTimer on jruby and
15
+ # ruby 1.9+
16
+ if defined?(JRUBY_VERSION) || (RUBY_VERSION >= '1.9')
17
+ require 'timeout'
18
+ MemCacheTimer = Timeout
19
+ else
20
+ require 'system_timer'
21
+ MemCacheTimer = SystemTimer
22
+ end
23
+ rescue LoadError => e
24
+ puts "[memcache-client] Could not load SystemTimer gem, falling back to Ruby's slower/unsafe timeout library: #{e.message}"
25
+ require 'timeout'
26
+ MemCacheTimer = Timeout
27
+ end
28
+
29
+
30
+ ##
31
+ # A Ruby client library for memcached.
32
+ #
33
+
34
+ class MemCache
35
+
36
+ ##
37
+ # The version of MemCache you are using.
38
+
39
+ VERSION = begin
40
+ config = YAML.load(File.read(File.dirname(__FILE__) + '/../VERSION.yml'))
41
+ "#{config[:major]}.#{config[:minor]}.#{config[:patch]}" + (config[:build] ? ".#{config[:build]}" : '')
42
+ end
43
+
44
+ ##
45
+ # Default options for the cache object.
46
+
47
+ DEFAULT_OPTIONS = {
48
+ :namespace => nil,
49
+ :readonly => false,
50
+ :multithread => true,
51
+ :failover => true,
52
+ :timeout => 0.5,
53
+ :logger => nil,
54
+ :no_reply => false,
55
+ :check_size => true,
56
+ :autofix_keys => false,
57
+ :gzip => true,
58
+ :namespace_separator => ':',
59
+ }
60
+
61
+ ##
62
+ # Default memcached port.
63
+
64
+ DEFAULT_PORT = 11211
65
+
66
+ ##
67
+ # Default memcached server weight.
68
+
69
+ DEFAULT_WEIGHT = 1
70
+
71
+ ##
72
+ # The namespace for this instance
73
+
74
+ attr_reader :namespace
75
+
76
+ ##
77
+ # The multithread setting for this instance
78
+
79
+ attr_reader :multithread
80
+
81
+ ##
82
+ # Whether to try to fix keys that are too long and will be truncated by
83
+ # using their SHA1 hash instead.
84
+ # The hash is only used on keys longer than 250 characters, or containing spaces,
85
+ # to avoid impacting performance unnecesarily.
86
+ #
87
+ # In theory, your code should generate correct keys when calling memcache,
88
+ # so it's your responsibility and you should try to fix this problem at its source.
89
+ #
90
+ # But if that's not possible, enable this option and memcache-client will give you a hand.
91
+
92
+ attr_reader :autofix_keys
93
+
94
+ ##
95
+ # The servers this client talks to. Play at your own peril.
96
+
97
+ attr_reader :servers
98
+
99
+ ##
100
+ # Socket timeout limit with this client, defaults to 0.5 sec.
101
+ # Set to nil to disable timeouts.
102
+
103
+ attr_reader :timeout
104
+
105
+ ##
106
+ # Should the client try to failover to another server if the
107
+ # first server is down? Defaults to true.
108
+
109
+ attr_reader :failover
110
+
111
+ ##
112
+ # Log debug/info/warn/error to the given Logger, defaults to nil.
113
+
114
+ attr_reader :logger
115
+
116
+ ##
117
+ # Don't send or look for a reply from the memcached server for write operations.
118
+ # Please note this feature only works in memcached 1.2.5 and later. Earlier
119
+ # versions will reply with "ERROR".
120
+ attr_reader :no_reply
121
+
122
+ ##
123
+ # Compress the data before sending it to memcached (slower but can save a lot of ram)
124
+ attr_reader :gzip
125
+
126
+ ##
127
+ # Accepts a list of +servers+ and a list of +opts+. +servers+ may be
128
+ # omitted. See +servers=+ for acceptable server list arguments.
129
+ #
130
+ # Valid options for +opts+ are:
131
+ #
132
+ # [:namespace] Prepends this value to all keys added or retrieved.
133
+ # [:readonly] Raises an exception on cache writes when true.
134
+ # [:multithread] Wraps cache access in a Mutex for thread safety. Defaults to true.
135
+ # [:failover] Should the client try to failover to another server if the
136
+ # first server is down? Defaults to true.
137
+ # [:timeout] Time to use as the socket read timeout. Defaults to 0.5 sec,
138
+ # set to nil to disable timeouts.
139
+ # [:logger] Logger to use for info/debug output, defaults to nil
140
+ # [:no_reply] Don't bother looking for a reply for write operations (i.e. they
141
+ # become 'fire and forget'), memcached 1.2.5 and later only, speeds up
142
+ # set/add/delete/incr/decr significantly.
143
+ # [:check_size] Raises a MemCacheError if the value to be set is greater than 1 MB, which
144
+ # is the maximum key size for the standard memcached server. Defaults to true.
145
+ # [:autofix_keys] If a key is longer than 250 characters or contains spaces,
146
+ # use an SHA1 hash instead, to prevent collisions on truncated keys.
147
+ # Other options are ignored.
148
+
149
+ def initialize(*args)
150
+ servers = []
151
+ opts = {}
152
+
153
+ case args.length
154
+ when 0 then # NOP
155
+ when 1 then
156
+ arg = args.shift
157
+ case arg
158
+ when Hash then opts = arg
159
+ when Array then servers = arg
160
+ when String then servers = [arg]
161
+ else raise ArgumentError, 'first argument must be Array, Hash or String'
162
+ end
163
+ when 2 then
164
+ servers, opts = args
165
+ else
166
+ raise ArgumentError, "wrong number of arguments (#{args.length} for 2)"
167
+ end
168
+
169
+ opts = DEFAULT_OPTIONS.merge opts
170
+ @namespace = opts[:namespace]
171
+ @readonly = opts[:readonly]
172
+ @multithread = opts[:multithread]
173
+ @autofix_keys = opts[:autofix_keys]
174
+ @timeout = opts[:timeout]
175
+ @failover = opts[:failover]
176
+ @logger = opts[:logger]
177
+ @no_reply = opts[:no_reply]
178
+ @check_size = opts[:check_size]
179
+ @gzip = opts[:gzip]
180
+ @namespace_separator = opts[:namespace_separator]
181
+ @mutex = Mutex.new if @multithread
182
+
183
+ logger.info { "memcache-client #{VERSION} #{Array(servers).inspect}" } if logger
184
+
185
+ Thread.current[:memcache_client] = self.object_id if !@multithread
186
+
187
+ self.servers = servers
188
+ end
189
+
190
+ ##
191
+ # Returns a string representation of the cache object.
192
+
193
+ def inspect
194
+ "<MemCache: %d servers, ns: %p, ro: %p>" %
195
+ [@servers.length, @namespace, @readonly]
196
+ end
197
+
198
+ ##
199
+ # Returns whether there is at least one active server for the object.
200
+
201
+ def active?
202
+ not @servers.empty?
203
+ end
204
+
205
+ ##
206
+ # Returns whether or not the cache object was created read only.
207
+
208
+ def readonly?
209
+ @readonly
210
+ end
211
+
212
+ ##
213
+ # Returns the compressed value
214
+
215
+ def compress(value)
216
+ if @gzip
217
+ Zlib::Deflate.deflate(value)
218
+ else
219
+ value
220
+ end
221
+ end
222
+
223
+ ##
224
+ # Return the decompressed data
225
+
226
+ def decompress(value)
227
+ if @gzip
228
+ Zlib::Inflate.inflate(value)
229
+ else
230
+ value
231
+ end
232
+ end
233
+ ##
234
+ # Set the servers that the requests will be distributed between. Entries
235
+ # can be either strings of the form "hostname:port" or
236
+ # "hostname:port:weight" or MemCache::Server objects.
237
+ #
238
+ def servers=(servers)
239
+ # Create the server objects.
240
+ @servers = Array(servers).collect do |server|
241
+ case server
242
+ when String
243
+ host, port, weight = server.split ':', 3
244
+ port ||= DEFAULT_PORT
245
+ weight ||= DEFAULT_WEIGHT
246
+ Server.new self, host, port, weight
247
+ else
248
+ server
249
+ end
250
+ end
251
+
252
+ logger.debug { "Servers now: #{@servers.inspect}" } if logger
253
+
254
+ # There's no point in doing this if there's only one server
255
+ @continuum = create_continuum_for(@servers) if @servers.size > 1
256
+
257
+ @servers
258
+ end
259
+
260
+ ##
261
+ # Decrements the value for +key+ by +amount+ and returns the new value.
262
+ # +key+ must already exist. If +key+ is not an integer, it is assumed to be
263
+ # 0. +key+ can not be decremented below 0.
264
+
265
+ def decr(key, amount = 1)
266
+ raise MemCacheError, "Update of readonly cache" if @readonly
267
+ with_server(key) do |server, cache_key|
268
+ cache_decr server, cache_key, amount
269
+ end
270
+ rescue TypeError => err
271
+ handle_error nil, err
272
+ end
273
+
274
+ ##
275
+ # Retrieves +key+ from memcache. If +raw+ is false, the value will be
276
+ # unmarshalled.
277
+
278
+ def get(key, raw = false)
279
+ with_server(key) do |server, cache_key|
280
+ logger.debug { "get #{key} from #{server.inspect}" } if logger
281
+ value = cache_get server, cache_key
282
+ return nil if value.nil?
283
+ value = Marshal.load decompress(value) unless raw
284
+ return value
285
+ end
286
+ rescue TypeError => err
287
+ handle_error nil, err
288
+ end
289
+
290
+ ##
291
+ # Performs a +get+ with the given +key+. If
292
+ # the value does not exist and a block was given,
293
+ # the block will be called and the result saved via +add+.
294
+ #
295
+ # If you do not provide a block, using this
296
+ # method is the same as using +get+.
297
+ #
298
+ def fetch(key, expiry = 0, raw = false)
299
+ value = get(key, raw)
300
+
301
+ if value.nil? && block_given?
302
+ value = yield
303
+ add(key, value, expiry, raw)
304
+ end
305
+
306
+ value
307
+ end
308
+
309
+ ##
310
+ # Retrieves multiple values from memcached in parallel, if possible.
311
+ #
312
+ # The memcached protocol supports the ability to retrieve multiple
313
+ # keys in a single request. Pass in an array of keys to this method
314
+ # and it will:
315
+ #
316
+ # 1. map the key to the appropriate memcached server
317
+ # 2. send a single request to each server that has one or more key values
318
+ #
319
+ # Returns a hash of values.
320
+ #
321
+ # cache["a"] = 1
322
+ # cache["b"] = 2
323
+ # cache.get_multi "a", "b" # => { "a" => 1, "b" => 2 }
324
+ #
325
+ # Note that get_multi assumes the values are marshalled.
326
+
327
+ def get_multi(*keys)
328
+ raise MemCacheError, 'No active servers' unless active?
329
+
330
+ keys.flatten!
331
+ key_count = keys.length
332
+ cache_keys = {}
333
+ server_keys = Hash.new { |h,k| h[k] = [] }
334
+
335
+ # map keys to servers
336
+ keys.each do |key|
337
+ server, cache_key = request_setup key
338
+ cache_keys[cache_key] = key
339
+ server_keys[server] << cache_key
340
+ end
341
+
342
+ results = {}
343
+
344
+ server_keys.each do |server, keys_for_server|
345
+ keys_for_server_str = keys_for_server.join ' '
346
+ begin
347
+ values = cache_get_multi server, keys_for_server_str
348
+ values.each do |key, value|
349
+ results[cache_keys[key]] = Marshal.load decompress(value)
350
+ end
351
+ rescue IndexError => e
352
+ # Ignore this server and try the others
353
+ logger.warn { "Unable to retrieve #{keys_for_server.size} elements from #{server.inspect}: #{e.message}"} if logger
354
+ end
355
+ end
356
+
357
+ return results
358
+ rescue TypeError => err
359
+ handle_error nil, err
360
+ end
361
+
362
+ ##
363
+ # Increments the value for +key+ by +amount+ and returns the new value.
364
+ # +key+ must already exist. If +key+ is not an integer, it is assumed to be
365
+ # 0.
366
+
367
+ def incr(key, amount = 1)
368
+ raise MemCacheError, "Update of readonly cache" if @readonly
369
+ with_server(key) do |server, cache_key|
370
+ cache_incr server, cache_key, amount
371
+ end
372
+ rescue TypeError => err
373
+ handle_error nil, err
374
+ end
375
+
376
+ ##
377
+ # Add +key+ to the cache with value +value+ that expires in +expiry+
378
+ # seconds. If +raw+ is true, +value+ will not be Marshalled.
379
+ #
380
+ # Warning: Readers should not call this method in the event of a cache miss;
381
+ # see MemCache#add.
382
+
383
+ ONE_MB = 1024 * 1024
384
+
385
+ def set(key, value, expiry = 0, raw = false)
386
+ raise MemCacheError, "Update of readonly cache" if @readonly
387
+
388
+ value = compress(Marshal.dump value) unless raw
389
+ with_server(key) do |server, cache_key|
390
+ logger.debug { "set #{key} to #{server.inspect}: #{value.to_s.size}" } if logger
391
+
392
+ if @check_size && value.to_s.size > ONE_MB
393
+ raise MemCacheError, "Value too large, memcached can only store 1MB of data per key"
394
+ end
395
+
396
+ command = "set #{cache_key} 0 #{expiry} #{value.to_s.size}#{noreply}\r\n#{value}\r\n"
397
+
398
+ with_socket_management(server) do |socket|
399
+ socket.write command
400
+ break nil if @no_reply
401
+ result = socket.gets
402
+ raise_on_error_response! result
403
+
404
+ if result.nil?
405
+ server.close
406
+ raise MemCacheError, "lost connection to #{server.host}:#{server.port}"
407
+ end
408
+
409
+ result
410
+ end
411
+ end
412
+ end
413
+
414
+ ##
415
+ # "cas" is a check and set operation which means "store this data but
416
+ # only if no one else has updated since I last fetched it." This can
417
+ # be used as a form of optimistic locking.
418
+ #
419
+ # Works in block form like so:
420
+ # cache.cas('some-key') do |value|
421
+ # value + 1
422
+ # end
423
+ #
424
+ # Returns:
425
+ # +nil+ if the value was not found on the memcached server.
426
+ # +STORED+ if the value was updated successfully
427
+ # +EXISTS+ if the value was updated by someone else since last fetch
428
+
429
+ def cas(key, expiry=0, raw=false)
430
+ raise MemCacheError, "Update of readonly cache" if @readonly
431
+ raise MemCacheError, "A block is required" unless block_given?
432
+
433
+ (value, token) = gets(key, raw)
434
+ return nil unless value
435
+ updated = yield value
436
+ value = compress(Marshal.dump updated) unless raw
437
+
438
+ with_server(key) do |server, cache_key|
439
+ logger.debug { "cas #{key} to #{server.inspect}: #{value.to_s.size}" } if logger
440
+ command = "cas #{cache_key} 0 #{expiry} #{value.to_s.size} #{token}#{noreply}\r\n#{value}\r\n"
441
+
442
+ with_socket_management(server) do |socket|
443
+ socket.write command
444
+ break nil if @no_reply
445
+ result = socket.gets
446
+ raise_on_error_response! result
447
+
448
+ if result.nil?
449
+ server.close
450
+ raise MemCacheError, "lost connection to #{server.host}:#{server.port}"
451
+ end
452
+
453
+ result
454
+ end
455
+ end
456
+ end
457
+
458
+ ##
459
+ # Add +key+ to the cache with value +value+ that expires in +expiry+
460
+ # seconds, but only if +key+ does not already exist in the cache.
461
+ # If +raw+ is true, +value+ will not be Marshalled.
462
+ #
463
+ # Readers should call this method in the event of a cache miss, not
464
+ # MemCache#set.
465
+
466
+ def add(key, value, expiry = 0, raw = false)
467
+ raise MemCacheError, "Update of readonly cache" if @readonly
468
+ value = compress(Marshal.dump value) unless raw
469
+ with_server(key) do |server, cache_key|
470
+ logger.debug { "add #{key} to #{server}: #{value ? value.to_s.size : 'nil'}" } if logger
471
+ command = "add #{cache_key} 0 #{expiry} #{value.to_s.size}#{noreply}\r\n#{value}\r\n"
472
+
473
+ with_socket_management(server) do |socket|
474
+ socket.write command
475
+ break nil if @no_reply
476
+ result = socket.gets
477
+ raise_on_error_response! result
478
+ result
479
+ end
480
+ end
481
+ end
482
+
483
+ ##
484
+ # Add +key+ to the cache with value +value+ that expires in +expiry+
485
+ # seconds, but only if +key+ already exists in the cache.
486
+ # If +raw+ is true, +value+ will not be Marshalled.
487
+ def replace(key, value, expiry = 0, raw = false)
488
+ raise MemCacheError, "Update of readonly cache" if @readonly
489
+ value = compress(Marshal.dump value) unless raw
490
+ with_server(key) do |server, cache_key|
491
+ logger.debug { "replace #{key} to #{server}: #{value ? value.to_s.size : 'nil'}" } if logger
492
+ command = "replace #{cache_key} 0 #{expiry} #{value.to_s.size}#{noreply}\r\n#{value}\r\n"
493
+
494
+ with_socket_management(server) do |socket|
495
+ socket.write command
496
+ break nil if @no_reply
497
+ result = socket.gets
498
+ raise_on_error_response! result
499
+ result
500
+ end
501
+ end
502
+ end
503
+
504
+ ##
505
+ # Append - 'add this data to an existing key after existing data'
506
+ # Please note the value is always passed to memcached as raw since it
507
+ # doesn't make a lot of sense to concatenate marshalled data together.
508
+ def append(key, value)
509
+ raise MemCacheError, "Update of readonly cache" if @readonly
510
+ with_server(key) do |server, cache_key|
511
+ logger.debug { "append #{key} to #{server}: #{value ? value.to_s.size : 'nil'}" } if logger
512
+ command = "append #{cache_key} 0 0 #{value.to_s.size}#{noreply}\r\n#{value}\r\n"
513
+
514
+ with_socket_management(server) do |socket|
515
+ socket.write command
516
+ break nil if @no_reply
517
+ result = socket.gets
518
+ raise_on_error_response! result
519
+ result
520
+ end
521
+ end
522
+ end
523
+
524
+ ##
525
+ # Prepend - 'add this data to an existing key before existing data'
526
+ # Please note the value is always passed to memcached as raw since it
527
+ # doesn't make a lot of sense to concatenate marshalled data together.
528
+ def prepend(key, value)
529
+ raise MemCacheError, "Update of readonly cache" if @readonly
530
+ with_server(key) do |server, cache_key|
531
+ logger.debug { "prepend #{key} to #{server}: #{value ? value.to_s.size : 'nil'}" } if logger
532
+ command = "prepend #{cache_key} 0 0 #{value.to_s.size}#{noreply}\r\n#{value}\r\n"
533
+
534
+ with_socket_management(server) do |socket|
535
+ socket.write command
536
+ break nil if @no_reply
537
+ result = socket.gets
538
+ raise_on_error_response! result
539
+ result
540
+ end
541
+ end
542
+ end
543
+
544
+ ##
545
+ # Removes +key+ from the cache in +expiry+ seconds.
546
+
547
+ def delete(key, expiry = 0)
548
+ raise MemCacheError, "Update of readonly cache" if @readonly
549
+ with_server(key) do |server, cache_key|
550
+ with_socket_management(server) do |socket|
551
+ logger.debug { "delete #{cache_key} on #{server}" } if logger
552
+ socket.write "delete #{cache_key} #{expiry}#{noreply}\r\n"
553
+ break nil if @no_reply
554
+ result = socket.gets
555
+ raise_on_error_response! result
556
+ result
557
+ end
558
+ end
559
+ end
560
+
561
+ ##
562
+ # Flush the cache from all memcache servers.
563
+ # A non-zero value for +delay+ will ensure that the flush
564
+ # is propogated slowly through your memcached server farm.
565
+ # The Nth server will be flushed N*delay seconds from now,
566
+ # asynchronously so this method returns quickly.
567
+ # This prevents a huge database spike due to a total
568
+ # flush all at once.
569
+
570
+ def flush_all(delay=0)
571
+ raise MemCacheError, 'No active servers' unless active?
572
+ raise MemCacheError, "Update of readonly cache" if @readonly
573
+
574
+ begin
575
+ delay_time = 0
576
+ @servers.each do |server|
577
+ with_socket_management(server) do |socket|
578
+ logger.debug { "flush_all #{delay_time} on #{server}" } if logger
579
+ if delay == 0 # older versions of memcached will fail silently otherwise
580
+ socket.write "flush_all#{noreply}\r\n"
581
+ else
582
+ socket.write "flush_all #{delay_time}#{noreply}\r\n"
583
+ end
584
+ break nil if @no_reply
585
+ result = socket.gets
586
+ raise_on_error_response! result
587
+ result
588
+ end
589
+ delay_time += delay
590
+ end
591
+ rescue IndexError => err
592
+ handle_error nil, err
593
+ end
594
+ end
595
+
596
+ ##
597
+ # Reset the connection to all memcache servers. This should be called if
598
+ # there is a problem with a cache lookup that might have left the connection
599
+ # in a corrupted state.
600
+
601
+ def reset
602
+ @servers.each { |server| server.close }
603
+ end
604
+
605
+ ##
606
+ # Returns statistics for each memcached server. An explanation of the
607
+ # statistics can be found in the memcached docs:
608
+ #
609
+ # http://code.sixapart.com/svn/memcached/trunk/server/doc/protocol.txt
610
+ #
611
+ # Example:
612
+ #
613
+ # >> pp CACHE.stats
614
+ # {"localhost:11211"=>
615
+ # {"bytes"=>4718,
616
+ # "pid"=>20188,
617
+ # "connection_structures"=>4,
618
+ # "time"=>1162278121,
619
+ # "pointer_size"=>32,
620
+ # "limit_maxbytes"=>67108864,
621
+ # "cmd_get"=>14532,
622
+ # "version"=>"1.2.0",
623
+ # "bytes_written"=>432583,
624
+ # "cmd_set"=>32,
625
+ # "get_misses"=>0,
626
+ # "total_connections"=>19,
627
+ # "curr_connections"=>3,
628
+ # "curr_items"=>4,
629
+ # "uptime"=>1557,
630
+ # "get_hits"=>14532,
631
+ # "total_items"=>32,
632
+ # "rusage_system"=>0.313952,
633
+ # "rusage_user"=>0.119981,
634
+ # "bytes_read"=>190619}}
635
+ # => nil
636
+
637
+ def stats
638
+ raise MemCacheError, "No active servers" unless active?
639
+ server_stats = {}
640
+
641
+ @servers.each do |server|
642
+ next unless server.alive?
643
+
644
+ with_socket_management(server) do |socket|
645
+ value = nil
646
+ socket.write "stats\r\n"
647
+ stats = {}
648
+ while line = socket.gets do
649
+ raise_on_error_response! line
650
+ break if line == "END\r\n"
651
+ if line =~ /\ASTAT ([\S]+) ([\w\.\:]+)/ then
652
+ name, value = $1, $2
653
+ stats[name] = case name
654
+ when 'version'
655
+ value
656
+ when 'rusage_user', 'rusage_system' then
657
+ seconds, microseconds = value.split(/:/, 2)
658
+ microseconds ||= 0
659
+ Float(seconds) + (Float(microseconds) / 1_000_000)
660
+ else
661
+ if value =~ /\A\d+\Z/ then
662
+ value.to_i
663
+ else
664
+ value
665
+ end
666
+ end
667
+ end
668
+ end
669
+ server_stats["#{server.host}:#{server.port}"] = stats
670
+ end
671
+ end
672
+
673
+ raise MemCacheError, "No active servers" if server_stats.empty?
674
+ server_stats
675
+ end
676
+
677
+ ##
678
+ # Shortcut to get a value from the cache.
679
+
680
+ alias [] get
681
+
682
+ ##
683
+ # Shortcut to save a value in the cache. This method does not set an
684
+ # expiration on the entry. Use set to specify an explicit expiry.
685
+
686
+ def []=(key, value)
687
+ set key, value
688
+ end
689
+
690
+ protected unless $TESTING
691
+
692
+ ##
693
+ # Create a key for the cache, incorporating the namespace qualifier if
694
+ # requested.
695
+
696
+ def make_cache_key(key)
697
+ if @autofix_keys and (key =~ /\s/ or (key.length + (namespace.nil? ? 0 : namespace.length)) > 250)
698
+ key = "#{Digest::SHA1.hexdigest(key)}-autofixed"
699
+ end
700
+
701
+ if namespace.nil? then
702
+ key
703
+ else
704
+ "#{@namespace}#{@namespace_separator}#{key}"
705
+ end
706
+ end
707
+
708
+ ##
709
+ # Returns an interoperable hash value for +key+. (I think, docs are
710
+ # sketchy for down servers).
711
+
712
+ def hash_for(key)
713
+ Zlib.crc32(key)
714
+ end
715
+
716
+ ##
717
+ # Pick a server to handle the request based on a hash of the key.
718
+
719
+ def get_server_for_key(key, options = {})
720
+ raise ArgumentError, "illegal character in key #{key.inspect}" if
721
+ key =~ /\s/
722
+ raise ArgumentError, "key too long #{key.inspect}" if key.length > 250
723
+ raise MemCacheError, "No servers available" if @servers.empty?
724
+ return @servers.first if @servers.length == 1
725
+
726
+ hkey = hash_for(key)
727
+
728
+ 20.times do |try|
729
+ entryidx = Continuum.binary_search(@continuum, hkey)
730
+ server = @continuum[entryidx].server
731
+ return server if server.alive?
732
+ break unless failover
733
+ hkey = hash_for "#{try}#{key}"
734
+ end
735
+
736
+ raise MemCacheError, "No servers available"
737
+ end
738
+
739
+ ##
740
+ # Performs a raw decr for +cache_key+ from +server+. Returns nil if not
741
+ # found.
742
+
743
+ def cache_decr(server, cache_key, amount)
744
+ with_socket_management(server) do |socket|
745
+ socket.write "decr #{cache_key} #{amount}#{noreply}\r\n"
746
+ break nil if @no_reply
747
+ text = socket.gets
748
+ raise_on_error_response! text
749
+ return nil if text == "NOT_FOUND\r\n"
750
+ return text.to_i
751
+ end
752
+ end
753
+
754
+ ##
755
+ # Fetches the raw data for +cache_key+ from +server+. Returns nil on cache
756
+ # miss.
757
+
758
+ def cache_get(server, cache_key)
759
+ with_socket_management(server) do |socket|
760
+ socket.write "get #{cache_key}\r\n"
761
+ keyline = socket.gets # "VALUE <key> <flags> <bytes>\r\n"
762
+
763
+ if keyline.nil? then
764
+ server.close
765
+ raise MemCacheError, "lost connection to #{server.host}:#{server.port}"
766
+ end
767
+
768
+ raise_on_error_response! keyline
769
+ return nil if keyline == "END\r\n"
770
+
771
+ unless keyline =~ /(\d+)\r/ then
772
+ server.close
773
+ raise MemCacheError, "unexpected response #{keyline.inspect}"
774
+ end
775
+ value = socket.read $1.to_i
776
+ socket.read 2 # "\r\n"
777
+ socket.gets # "END\r\n"
778
+ return value
779
+ end
780
+ end
781
+
782
+ def gets(key, raw = false)
783
+ with_server(key) do |server, cache_key|
784
+ logger.debug { "gets #{key} from #{server.inspect}" } if logger
785
+ result = with_socket_management(server) do |socket|
786
+ socket.write "gets #{cache_key}\r\n"
787
+ keyline = socket.gets # "VALUE <key> <flags> <bytes> <cas token>\r\n"
788
+
789
+ if keyline.nil? then
790
+ server.close
791
+ raise MemCacheError, "lost connection to #{server.host}:#{server.port}"
792
+ end
793
+
794
+ raise_on_error_response! keyline
795
+ return nil if keyline == "END\r\n"
796
+
797
+ unless keyline =~ /(\d+) (\w+)\r/ then
798
+ server.close
799
+ raise MemCacheError, "unexpected response #{keyline.inspect}"
800
+ end
801
+ value = socket.read $1.to_i
802
+ socket.read 2 # "\r\n"
803
+ socket.gets # "END\r\n"
804
+ [value, $2]
805
+ end
806
+ result[0] = Marshal.load decompress(result[0]) unless raw
807
+ result
808
+ end
809
+ rescue TypeError => err
810
+ handle_error nil, err
811
+ end
812
+
813
+
814
+ ##
815
+ # Fetches +cache_keys+ from +server+ using a multi-get.
816
+
817
+ def cache_get_multi(server, cache_keys)
818
+ with_socket_management(server) do |socket|
819
+ values = {}
820
+ socket.write "get #{cache_keys}\r\n"
821
+
822
+ while keyline = socket.gets do
823
+ return values if keyline == "END\r\n"
824
+ raise_on_error_response! keyline
825
+
826
+ unless keyline =~ /\AVALUE (.+) (.+) (.+)/ then
827
+ server.close
828
+ raise MemCacheError, "unexpected response #{keyline.inspect}"
829
+ end
830
+
831
+ key, data_length = $1, $3
832
+ values[$1] = socket.read data_length.to_i
833
+ socket.read(2) # "\r\n"
834
+ end
835
+
836
+ server.close
837
+ raise MemCacheError, "lost connection to #{server.host}:#{server.port}" # TODO: retry here too
838
+ end
839
+ end
840
+
841
+ ##
842
+ # Performs a raw incr for +cache_key+ from +server+. Returns nil if not
843
+ # found.
844
+
845
+ def cache_incr(server, cache_key, amount)
846
+ with_socket_management(server) do |socket|
847
+ socket.write "incr #{cache_key} #{amount}#{noreply}\r\n"
848
+ break nil if @no_reply
849
+ text = socket.gets
850
+ raise_on_error_response! text
851
+ return nil if text == "NOT_FOUND\r\n"
852
+ return text.to_i
853
+ end
854
+ end
855
+
856
+ ##
857
+ # Gets or creates a socket connected to the given server, and yields it
858
+ # to the block, wrapped in a mutex synchronization if @multithread is true.
859
+ #
860
+ # If a socket error (SocketError, SystemCallError, IOError) or protocol error
861
+ # (MemCacheError) is raised by the block, closes the socket, attempts to
862
+ # connect again, and retries the block (once). If an error is again raised,
863
+ # reraises it as MemCacheError.
864
+ #
865
+ # If unable to connect to the server (or if in the reconnect wait period),
866
+ # raises MemCacheError. Note that the socket connect code marks a server
867
+ # dead for a timeout period, so retrying does not apply to connection attempt
868
+ # failures (but does still apply to unexpectedly lost connections etc.).
869
+
870
+ def with_socket_management(server, &block)
871
+ check_multithread_status!
872
+
873
+ @mutex.lock if @multithread
874
+ retried = false
875
+
876
+ begin
877
+ socket = server.socket
878
+
879
+ # Raise an IndexError to show this server is out of whack. If were inside
880
+ # a with_server block, we'll catch it and attempt to restart the operation.
881
+
882
+ raise IndexError, "No connection to server (#{server.status})" if socket.nil?
883
+
884
+ block.call(socket)
885
+
886
+ rescue SocketError, Errno::EAGAIN, Timeout::Error => err
887
+ logger.warn { "Socket failure: #{err.message}" } if logger
888
+ server.mark_dead(err)
889
+ handle_error(server, err)
890
+
891
+ rescue MemCacheError, SystemCallError, IOError => err
892
+ logger.warn { "Generic failure: #{err.class.name}: #{err.message}" } if logger
893
+ handle_error(server, err) if retried || socket.nil?
894
+ retried = true
895
+ retry
896
+ end
897
+ ensure
898
+ @mutex.unlock if @multithread
899
+ end
900
+
901
+ def with_server(key)
902
+ retried = false
903
+ begin
904
+ server, cache_key = request_setup(key)
905
+ yield server, cache_key
906
+ rescue IndexError => e
907
+ logger.warn { "Server failed: #{e.class.name}: #{e.message}" } if logger
908
+ if !retried && @servers.size > 1
909
+ logger.info { "Connection to server #{server.inspect} DIED! Retrying operation..." } if logger
910
+ retried = true
911
+ retry
912
+ end
913
+ handle_error(nil, e)
914
+ end
915
+ end
916
+
917
+ ##
918
+ # Handles +error+ from +server+.
919
+
920
+ def handle_error(server, error)
921
+ raise error if error.is_a?(MemCacheError)
922
+ server.close if server && server.status == "CONNECTED"
923
+ new_error = MemCacheError.new error.message
924
+ new_error.set_backtrace error.backtrace
925
+ raise new_error
926
+ end
927
+
928
+ def noreply
929
+ @no_reply ? ' noreply' : ''
930
+ end
931
+
932
+ ##
933
+ # Performs setup for making a request with +key+ from memcached. Returns
934
+ # the server to fetch the key from and the complete key to use.
935
+
936
+ def request_setup(key)
937
+ raise MemCacheError, 'No active servers' unless active?
938
+ cache_key = make_cache_key key
939
+ server = get_server_for_key cache_key
940
+ return server, cache_key
941
+ end
942
+
943
+ def raise_on_error_response!(response)
944
+ if response =~ /\A(?:CLIENT_|SERVER_)?ERROR(.*)/
945
+ raise MemCacheError, $1.strip
946
+ end
947
+ end
948
+
949
+ def create_continuum_for(servers)
950
+ total_weight = servers.inject(0) { |memo, srv| memo + srv.weight }
951
+ continuum = []
952
+
953
+ servers.each do |server|
954
+ entry_count_for(server, servers.size, total_weight).times do |idx|
955
+ hash = Digest::SHA1.hexdigest("#{server.host}:#{server.port}:#{idx}")
956
+ value = Integer("0x#{hash[0..7]}")
957
+ continuum << Continuum::Entry.new(value, server)
958
+ end
959
+ end
960
+
961
+ continuum.sort { |a, b| a.value <=> b.value }
962
+ end
963
+
964
+ def entry_count_for(server, total_servers, total_weight)
965
+ ((total_servers * Continuum::POINTS_PER_SERVER * server.weight) / Float(total_weight)).floor
966
+ end
967
+
968
+ def check_multithread_status!
969
+ return if @multithread
970
+
971
+ if Thread.current[:memcache_client] != self.object_id
972
+ raise MemCacheError, <<-EOM
973
+ You are accessing this memcache-client instance from multiple threads but have not enabled multithread support.
974
+ Normally: MemCache.new(['localhost:11211'], :multithread => true)
975
+ In Rails: config.cache_store = [:mem_cache_store, 'localhost:11211', { :multithread => true }]
976
+ EOM
977
+ end
978
+ end
979
+
980
+ ##
981
+ # This class represents a memcached server instance.
982
+
983
+ class Server
984
+
985
+ ##
986
+ # The amount of time to wait before attempting to re-establish a
987
+ # connection with a server that is marked dead.
988
+
989
+ RETRY_DELAY = 30.0
990
+
991
+ ##
992
+ # The host the memcached server is running on.
993
+
994
+ attr_reader :host
995
+
996
+ ##
997
+ # The port the memcached server is listening on.
998
+
999
+ attr_reader :port
1000
+
1001
+ ##
1002
+ # The weight given to the server.
1003
+
1004
+ attr_reader :weight
1005
+
1006
+ ##
1007
+ # The time of next retry if the connection is dead.
1008
+
1009
+ attr_reader :retry
1010
+
1011
+ ##
1012
+ # A text status string describing the state of the server.
1013
+
1014
+ attr_reader :status
1015
+
1016
+ attr_reader :logger
1017
+
1018
+ ##
1019
+ # Create a new MemCache::Server object for the memcached instance
1020
+ # listening on the given host and port, weighted by the given weight.
1021
+
1022
+ def initialize(memcache, host, port = DEFAULT_PORT, weight = DEFAULT_WEIGHT)
1023
+ raise ArgumentError, "No host specified" if host.nil? or host.empty?
1024
+ raise ArgumentError, "No port specified" if port.nil? or port.to_i.zero?
1025
+
1026
+ @host = host
1027
+ @port = port.to_i
1028
+ @weight = weight.to_i
1029
+
1030
+ @sock = nil
1031
+ @retry = nil
1032
+ @status = 'NOT CONNECTED'
1033
+ @timeout = memcache.timeout
1034
+ @logger = memcache.logger
1035
+ end
1036
+
1037
+ ##
1038
+ # Return a string representation of the server object.
1039
+
1040
+ def inspect
1041
+ "<MemCache::Server: %s:%d [%d] (%s)>" % [@host, @port, @weight, @status]
1042
+ end
1043
+
1044
+ ##
1045
+ # Check whether the server connection is alive. This will cause the
1046
+ # socket to attempt to connect if it isn't already connected and or if
1047
+ # the server was previously marked as down and the retry time has
1048
+ # been exceeded.
1049
+
1050
+ def alive?
1051
+ !!socket
1052
+ end
1053
+
1054
+ ##
1055
+ # Try to connect to the memcached server targeted by this object.
1056
+ # Returns the connected socket object on success or nil on failure.
1057
+
1058
+ def socket
1059
+ return @sock if @sock and not @sock.closed?
1060
+
1061
+ @sock = nil
1062
+
1063
+ # If the host was dead, don't retry for a while.
1064
+ return if @retry and @retry > Time.now
1065
+
1066
+ # Attempt to connect if not already connected.
1067
+ begin
1068
+ @sock = connect_to(@host, @port, @timeout)
1069
+ @sock.setsockopt Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1
1070
+ @retry = nil
1071
+ @status = 'CONNECTED'
1072
+ rescue SocketError, SystemCallError, IOError, Timeout::Error => err
1073
+ logger.warn { "Unable to open socket: #{err.class.name}, #{err.message}" } if logger
1074
+ mark_dead err
1075
+ end
1076
+
1077
+ return @sock
1078
+ end
1079
+
1080
+ def connect_to(host, port, timeout=nil)
1081
+ sock = nil
1082
+ if timeout
1083
+ MemCacheTimer.timeout(timeout) do
1084
+ sock = TCPSocket.new(host, port)
1085
+ end
1086
+ else
1087
+ sock = TCPSocket.new(host, port)
1088
+ end
1089
+
1090
+ io = MemCache::BufferedIO.new(sock)
1091
+ io.read_timeout = timeout
1092
+ # Getting reports from several customers, including 37signals,
1093
+ # that the non-blocking timeouts in 1.7.5 don't seem to be reliable.
1094
+ # It can't hurt to set the underlying socket timeout also, if possible.
1095
+ if timeout
1096
+ secs = Integer(timeout)
1097
+ usecs = Integer((timeout - secs) * 1_000_000)
1098
+ optval = [secs, usecs].pack("l_2")
1099
+ begin
1100
+ io.setsockopt Socket::SOL_SOCKET, Socket::SO_RCVTIMEO, optval
1101
+ io.setsockopt Socket::SOL_SOCKET, Socket::SO_SNDTIMEO, optval
1102
+ rescue Exception => ex
1103
+ # Solaris, for one, does not like/support socket timeouts.
1104
+ @logger.info "[memcache-client] Unable to use raw socket timeouts: #{ex.class.name}: #{ex.message}" if @logger
1105
+ end
1106
+ end
1107
+ io
1108
+ end
1109
+
1110
+ ##
1111
+ # Close the connection to the memcached server targeted by this
1112
+ # object. The server is not considered dead.
1113
+
1114
+ def close
1115
+ @sock.close if @sock && !@sock.closed?
1116
+ @sock = nil
1117
+ @retry = nil
1118
+ @status = "NOT CONNECTED"
1119
+ end
1120
+
1121
+ ##
1122
+ # Mark the server as dead and close its socket.
1123
+
1124
+ def mark_dead(error)
1125
+ @sock.close if @sock && !@sock.closed?
1126
+ @sock = nil
1127
+ @retry = Time.now + RETRY_DELAY
1128
+
1129
+ reason = "#{error.class.name}: #{error.message}"
1130
+ @status = sprintf "%s:%s DEAD (%s), will retry at %s", @host, @port, reason, @retry
1131
+ @logger.info { @status } if @logger
1132
+ end
1133
+
1134
+ end
1135
+
1136
+ ##
1137
+ # Base MemCache exception class.
1138
+
1139
+ class MemCacheError < RuntimeError; end
1140
+
1141
+ class BufferedIO < Net::BufferedIO # :nodoc:
1142
+ BUFSIZE = 1024 * 16
1143
+
1144
+ if RUBY_VERSION < '1.9.1'
1145
+ def rbuf_fill
1146
+ begin
1147
+ @rbuf << @io.read_nonblock(BUFSIZE)
1148
+ rescue Errno::EWOULDBLOCK
1149
+ retry unless @read_timeout
1150
+ if IO.select([@io], nil, nil, @read_timeout)
1151
+ retry
1152
+ else
1153
+ raise Timeout::Error, 'IO timeout'
1154
+ end
1155
+ end
1156
+ end
1157
+ end
1158
+
1159
+ def setsockopt(*args)
1160
+ @io.setsockopt(*args)
1161
+ end
1162
+
1163
+ def gets
1164
+ readuntil("\n")
1165
+ end
1166
+ end
1167
+
1168
+ end
1169
+
1170
+ module Continuum
1171
+ POINTS_PER_SERVER = 160 # this is the default in libmemcached
1172
+
1173
+ # Find the closest index in Continuum with value <= the given value
1174
+ def self.binary_search(ary, value, &block)
1175
+ upper = ary.size - 1
1176
+ lower = 0
1177
+ idx = 0
1178
+
1179
+ while(lower <= upper) do
1180
+ idx = (lower + upper) / 2
1181
+ comp = ary[idx].value <=> value
1182
+
1183
+ if comp == 0
1184
+ return idx
1185
+ elsif comp > 0
1186
+ upper = idx - 1
1187
+ else
1188
+ lower = idx + 1
1189
+ end
1190
+ end
1191
+ return upper
1192
+ end
1193
+
1194
+ class Entry
1195
+ attr_reader :value
1196
+ attr_reader :server
1197
+
1198
+ def initialize(val, srv)
1199
+ @value = val
1200
+ @server = srv
1201
+ end
1202
+
1203
+ def inspect
1204
+ "<#{value}, #{server.host}:#{server.port}>"
1205
+ end
1206
+ end
1207
+
1208
+ end
1209
+ require 'continuum_native'