memcache-client_extensions 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/README +90 -0
- data/lib/memcache-client_extensions.rb +165 -0
- metadata +44 -0
data/README
ADDED
@@ -0,0 +1,90 @@
|
|
1
|
+
=MemCacheClient Extensions
|
2
|
+
|
3
|
+
== About
|
4
|
+
|
5
|
+
The memcache-client_extensions plugins adds three new commands to the memcache
|
6
|
+
client API:
|
7
|
+
|
8
|
+
1. get_multi : retrieve more than 1 key in parallel
|
9
|
+
2. stats : retrieve server performance and utilization statistics
|
10
|
+
3. flush_all : empty all information stored in memcached
|
11
|
+
|
12
|
+
== Installation
|
13
|
+
|
14
|
+
1. This plugin requires that the memcache-client gem is installed.
|
15
|
+
# gem install memcache-client
|
16
|
+
|
17
|
+
2. Install the plugin or the gem
|
18
|
+
$ script/plugin install svn://rubyforge.org/var/svn/zventstools/projects/memcache-client_extensions
|
19
|
+
- OR -
|
20
|
+
# gem install memcache-client_extensions
|
21
|
+
|
22
|
+
== get_multi
|
23
|
+
|
24
|
+
Retrieve multiple values from memcached in parallel, if possible.
|
25
|
+
The memcached protocol supports the ability to retrieve multiple
|
26
|
+
keys in a single request. Pass in an array of keys to this method
|
27
|
+
and it will:
|
28
|
+
a. map the key to the appropriate memcached server
|
29
|
+
b. send a single request to each server that has one or more key values
|
30
|
+
|
31
|
+
Returns a hash of values.
|
32
|
+
|
33
|
+
>> CACHE["a"] = 1
|
34
|
+
=> 1
|
35
|
+
>> CACHE["b"] = 2
|
36
|
+
=> 2
|
37
|
+
>> CACHE.get_multi(["a","b"])
|
38
|
+
=> {"a"=>1, "b"=>2}
|
39
|
+
|
40
|
+
Here's a benchmark showing the speedup:
|
41
|
+
|
42
|
+
CACHE["a"] = 1
|
43
|
+
CACHE["b"] = 2
|
44
|
+
CACHE["c"] = 3
|
45
|
+
CACHE["d"] = 4
|
46
|
+
keys = ["a","b","c","d","e"]
|
47
|
+
Benchmark.bm do |x|
|
48
|
+
x.report { for i in 1..1000; keys.each{|k| CACHE.get(k);} end }
|
49
|
+
x.report { for i in 1..1000; CACHE.get_multi(keys); end }
|
50
|
+
end
|
51
|
+
|
52
|
+
returns:
|
53
|
+
user system total real
|
54
|
+
0.180000 0.130000 0.310000 ( 0.459418)
|
55
|
+
0.200000 0.030000 0.230000 ( 0.269632)
|
56
|
+
|
57
|
+
There's a fair amount of non-DRY between get_multi and get (and
|
58
|
+
threadsafe_cache_get/multi_threadsafe_cache_get and
|
59
|
+
cache_get/multi_cache_get for that matter) but I think it's worth it
|
60
|
+
since the extra overhead to handle multiple return values is unneeded
|
61
|
+
for a single-key get (which is by far the most common case).
|
62
|
+
|
63
|
+
== stats
|
64
|
+
|
65
|
+
The stats method returns statistics for each memcached server. An
|
66
|
+
explanation of the statistics can be found in the memcached docs:
|
67
|
+
http://cvs.danga.com/browse.cgi/wcmtools/memcached/doc/protocol.txt?rev=HEAD&content-type=text/plain
|
68
|
+
|
69
|
+
Example:
|
70
|
+
>> CACHE.stats
|
71
|
+
=> {"localhost:11211"=>{"pid"=>"20188", "bytes"=>"4718", "connection_structures"=>"4", "time"=>"1162278121", "pointer_size"=>"32", "limit_maxbytes"=>"67108864", "version"=>"1.2.0", "cmd_get"=>"14532", "cmd_set"=>"32", "bytes_written"=>"432583", "uptime"=>"1557", "curr_items"=>"4", "curr_connections"=>"3", "total_connections"=>"19", "get_misses"=>"0", "rusage_user"=>"0.119981", "rusage_system"=>"0.313952", "total_items"=>"32", "get_hits"=>"14532", "bytes_read"=>"190619"}}
|
72
|
+
|
73
|
+
== flush_all
|
74
|
+
|
75
|
+
The flush_all method empties all cache namespaces on all memcached servers.
|
76
|
+
This method is very useful for testing your code with memcached since
|
77
|
+
you normally want to reset the cache to a known (empty) state at the
|
78
|
+
beginning of each test.
|
79
|
+
|
80
|
+
== Bugs, Code and Contributing
|
81
|
+
|
82
|
+
There's a RubyForge project set up at:
|
83
|
+
|
84
|
+
http://rubyforge.org/projects/zventstools/
|
85
|
+
|
86
|
+
Anonymous SVN access:
|
87
|
+
|
88
|
+
$ svn checkout svn://rubyforge.org/var/svn/zventstools
|
89
|
+
|
90
|
+
Author: Tyler Kovacs (tyler dot kovacs at gmail dot com)
|
@@ -0,0 +1,165 @@
|
|
1
|
+
class MemCache
|
2
|
+
@@server_stats = Hash.new
|
3
|
+
cattr_accessor :server_stats
|
4
|
+
|
5
|
+
# The flush_all method empties all cache namespaces on all memcached servers.
|
6
|
+
# This method is very useful for testing your code with memcached since
|
7
|
+
# you normally want to reset the cache to a known (empty) state at the
|
8
|
+
# beginning of each test.
|
9
|
+
def flush_all
|
10
|
+
@mutex.lock if @multithread
|
11
|
+
|
12
|
+
raise MemCacheError, "No active servers" unless self.active?
|
13
|
+
|
14
|
+
@servers.each do |server|
|
15
|
+
sock = server.socket
|
16
|
+
if sock.nil?
|
17
|
+
raise MemCacheError, "No connection to server"
|
18
|
+
end
|
19
|
+
|
20
|
+
value = nil
|
21
|
+
begin
|
22
|
+
sock.write "flush_all\r\n"
|
23
|
+
text = sock.gets # "OK\r\n"
|
24
|
+
rescue SystemCallError, IOError => err
|
25
|
+
server.close
|
26
|
+
raise MemCacheError, err.message
|
27
|
+
end
|
28
|
+
end
|
29
|
+
ensure
|
30
|
+
@mutex.unlock if @multithread
|
31
|
+
end
|
32
|
+
|
33
|
+
# The stats method returns statistics for each memcached server. An
|
34
|
+
# explanation of the statistics can be found in the memcached docs:
|
35
|
+
# http://cvs.danga.com/browse.cgi/wcmtools/memcached/doc/protocol.txt?rev=HEAD&content-type=text/plain
|
36
|
+
#
|
37
|
+
# Example:
|
38
|
+
# >> CACHE.stats
|
39
|
+
# => {"localhost:11211"=>{"pid"=>"20188", "bytes"=>"4718", "connection_structures"=>"4", "time"=>"1162278121", "pointer_size"=>"32", "limit_maxbytes"=>"67108864", "version"=>"1.2.0", "cmd_get"=>"14532", "cmd_set"=>"32", "bytes_written"=>"432583", "uptime"=>"1557", "curr_items"=>"4", "curr_connections"=>"3", "total_connections"=>"19", "get_misses"=>"0", "rusage_user"=>"0.119981", "rusage_system"=>"0.313952", "total_items"=>"32", "get_hits"=>"14532", "bytes_read"=>"190619"}}
|
40
|
+
def stats
|
41
|
+
raise MemCacheError, "No active servers" unless self.active?
|
42
|
+
|
43
|
+
@servers.each do |server|
|
44
|
+
sock = server.socket
|
45
|
+
if sock.nil?
|
46
|
+
raise MemCacheError, "No connection to server"
|
47
|
+
end
|
48
|
+
|
49
|
+
value = nil
|
50
|
+
begin
|
51
|
+
sock.write "stats\r\n"
|
52
|
+
stats = {}
|
53
|
+
while line = sock.gets
|
54
|
+
break if (line.strip rescue "END") == "END"
|
55
|
+
line =~ /^STAT ([\w]+) ([\d.]+)/
|
56
|
+
stats[$1] = $2
|
57
|
+
end
|
58
|
+
@@server_stats["#{server.host}:#{server.port}"] = stats.clone
|
59
|
+
rescue SystemCallError, IOError => err
|
60
|
+
server.close
|
61
|
+
raise MemCacheError, err.message
|
62
|
+
end
|
63
|
+
end
|
64
|
+
@@server_stats
|
65
|
+
end
|
66
|
+
|
67
|
+
# Retrieve multiple values from memcached in parallel, if possible.
|
68
|
+
# The memcached protocol supports the ability to retrieve multiple
|
69
|
+
# keys in a single request. Pass in an array of keys to this method
|
70
|
+
# and it will:
|
71
|
+
# a. map the key to the appropriate memcached server
|
72
|
+
# b. send a single request to each server that has one or more key values
|
73
|
+
#
|
74
|
+
# Returns a hash of values.
|
75
|
+
#
|
76
|
+
# >> CACHE["a"] = 1
|
77
|
+
# => 1
|
78
|
+
# >> CACHE["b"] = 2
|
79
|
+
# => 2
|
80
|
+
# >> CACHE.get_multi(["a","b"])
|
81
|
+
# => {"a"=>1, "b"=>2}
|
82
|
+
#
|
83
|
+
# Here's a benchmark showing the speedup:
|
84
|
+
#
|
85
|
+
# CACHE["a"] = 1
|
86
|
+
# CACHE["b"] = 2
|
87
|
+
# CACHE["c"] = 3
|
88
|
+
# CACHE["d"] = 4
|
89
|
+
# keys = ["a","b","c","d","e"]
|
90
|
+
# Benchmark.bm do |x|
|
91
|
+
# x.report { for i in 1..1000; keys.each{|k| CACHE.get(k);} end }
|
92
|
+
# x.report { for i in 1..1000; CACHE.get_multi(keys); end }
|
93
|
+
# end
|
94
|
+
#
|
95
|
+
# returns:
|
96
|
+
# user system total real
|
97
|
+
# 0.180000 0.130000 0.310000 ( 0.459418)
|
98
|
+
# 0.200000 0.030000 0.230000 ( 0.269632)
|
99
|
+
#
|
100
|
+
# There's a fair amount of non-DRY between get_multi and get (and
|
101
|
+
# threadsafe_cache_get/multi_threadsafe_cache_get and
|
102
|
+
# cache_get/multi_cache_get for that matter) but I think it's worth it
|
103
|
+
# since the extra overhead to handle multiple return values is unneeded
|
104
|
+
# for a single-key get (which is by far the most common case).
|
105
|
+
def get_multi(keys)
|
106
|
+
raise MemCacheError, 'No active servers' unless active?
|
107
|
+
|
108
|
+
key_count = keys.length
|
109
|
+
cache_keys_keys = {}
|
110
|
+
servers_cache_keys = {}
|
111
|
+
|
112
|
+
# retrieve the server to key mapping so that we know which servers to
|
113
|
+
# send the requests to (different keys can come from different servers)
|
114
|
+
for key in keys
|
115
|
+
cache_key = make_cache_key(key)
|
116
|
+
cache_keys_keys[cache_key] = key
|
117
|
+
server = get_server_for_key(cache_key)
|
118
|
+
raise MemCacheError, 'No connection to server' if server.socket.nil?
|
119
|
+
servers_cache_keys[server] ||= []
|
120
|
+
servers_cache_keys[server] << cache_key
|
121
|
+
end
|
122
|
+
|
123
|
+
values = {}
|
124
|
+
for server in servers_cache_keys.keys
|
125
|
+
values.merge!(if @multithread then
|
126
|
+
multi_threadsafe_cache_get(server, servers_cache_keys[server].join(" "))
|
127
|
+
else
|
128
|
+
multi_cache_get(server, servers_cache_keys[server].join(" "))
|
129
|
+
end)
|
130
|
+
end
|
131
|
+
|
132
|
+
# Return the unmarshaled value.
|
133
|
+
return_values = {}
|
134
|
+
values.each_pair{|k,v| return_values[cache_keys_keys[k]] = Marshal.load(v)}
|
135
|
+
return return_values
|
136
|
+
rescue ArgumentError, TypeError, SystemCallError, IOError => err
|
137
|
+
server.close
|
138
|
+
new_err = MemCacheError.new err.message
|
139
|
+
new_err.set_backtrace err.backtrace
|
140
|
+
raise new_err
|
141
|
+
end
|
142
|
+
|
143
|
+
def multi_threadsafe_cache_get(socket, cache_key) # :nodoc:
|
144
|
+
@mutex.lock
|
145
|
+
multi_cache_get(socket, cache_key)
|
146
|
+
ensure
|
147
|
+
@mutex.unlock
|
148
|
+
end
|
149
|
+
|
150
|
+
def multi_cache_get(server, cache_key)
|
151
|
+
values = {}
|
152
|
+
socket = server.socket
|
153
|
+
socket.write "get #{cache_key}\r\n"
|
154
|
+
|
155
|
+
while keyline = socket.gets
|
156
|
+
break if (keyline.strip rescue "END") == "END"
|
157
|
+
keyline =~ /^VALUE (.+) (.+) (.+)/
|
158
|
+
key, data_length = $1, $3
|
159
|
+
values[$1] = socket.read data_length.to_i
|
160
|
+
socket.read(2) # "\r\n"
|
161
|
+
end
|
162
|
+
|
163
|
+
return values
|
164
|
+
end
|
165
|
+
end
|
metadata
ADDED
@@ -0,0 +1,44 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
rubygems_version: 0.8.10
|
3
|
+
specification_version: 1
|
4
|
+
name: memcache-client_extensions
|
5
|
+
version: !ruby/object:Gem::Version
|
6
|
+
version: 0.0.1
|
7
|
+
date: 2006-11-01
|
8
|
+
summary: Add parallel get_multi support, stats retrieval and flush_all command to memcache-client
|
9
|
+
require_paths:
|
10
|
+
- lib
|
11
|
+
email: tyler.kovacs@zvents.com
|
12
|
+
homepage: http://blog.zvents.com/2006/11/1/rails-plugin-memcacheclient-extensions
|
13
|
+
rubyforge_project:
|
14
|
+
description:
|
15
|
+
autorequire: memcache-client_extensions
|
16
|
+
default_executable:
|
17
|
+
bindir: bin
|
18
|
+
has_rdoc: true
|
19
|
+
required_ruby_version: !ruby/object:Gem::Version::Requirement
|
20
|
+
requirements:
|
21
|
+
- - ">"
|
22
|
+
- !ruby/object:Gem::Version
|
23
|
+
version: 0.0.0
|
24
|
+
version:
|
25
|
+
platform: ruby
|
26
|
+
authors:
|
27
|
+
- Tyler Kovacs
|
28
|
+
files:
|
29
|
+
- lib/memcache-client_extensions.rb
|
30
|
+
- README
|
31
|
+
test_files: []
|
32
|
+
|
33
|
+
rdoc_options: []
|
34
|
+
|
35
|
+
extra_rdoc_files:
|
36
|
+
- README
|
37
|
+
executables: []
|
38
|
+
|
39
|
+
extensions: []
|
40
|
+
|
41
|
+
requirements: []
|
42
|
+
|
43
|
+
dependencies: []
|
44
|
+
|