remcached 0.3.1
Sign up to get free protection for your applications and to get access to all the features.
- data/.gitignore +2 -0
- data/README.rst +105 -0
- data/Rakefile +13 -0
- data/VERSION.yml +4 -0
- data/examples/fill.rb +70 -0
- data/lib/remcached/client.rb +143 -0
- data/lib/remcached/const.rb +64 -0
- data/lib/remcached/pack_array.rb +46 -0
- data/lib/remcached/packet.rb +275 -0
- data/lib/remcached.rb +158 -0
- data/remcached.gemspec +55 -0
- data/spec/client_spec.rb +36 -0
- data/spec/memcached_spec.rb +268 -0
- data/spec/packet_spec.rb +125 -0
- metadata +71 -0
data/.gitignore
ADDED
data/README.rst
ADDED
@@ -0,0 +1,105 @@
|
|
1
|
+
remcached
|
2
|
+
=========
|
3
|
+
|
4
|
+
* **Ruby EventMachine memCACHED client implementation**
|
5
|
+
* provides a direct interface to the memcached protocol and its
|
6
|
+
semantics
|
7
|
+
* uses the memcached `binary protocol`_ to reduce parsing overhead on
|
8
|
+
the server side (requires memcached >= 1.3)
|
9
|
+
* supports multiple servers with simple round-robin key hashing
|
10
|
+
(**TODO:** implement the libketama algorithm) in a fault-tolerant
|
11
|
+
way
|
12
|
+
* writing your own abstraction layer is recommended
|
13
|
+
* uses RSpec
|
14
|
+
* partially documented in RDoc-style
|
15
|
+
|
16
|
+
|
17
|
+
Callbacks
|
18
|
+
---------
|
19
|
+
|
20
|
+
Each request `may` be passed a callback. These are not two-cased
|
21
|
+
(success & failure) EM deferrables, but standard Ruby callbacks. The
|
22
|
+
rationale behind this is that there are no usual success/failure
|
23
|
+
responses, but you will want to evaluate a ``response[:status]``
|
24
|
+
yourself to check for cache miss, version conflict, network
|
25
|
+
disconnects etc.
|
26
|
+
|
27
|
+
A callback may be kept if it returns ``:proceed`` to catch
|
28
|
+
multi-response commands such as ``STAT``.
|
29
|
+
|
30
|
+
remcached has been built with **fault tolerance** in mind: a callback
|
31
|
+
will be called with just ``{:status => Memcached::Errors::DISCONNECTED}``
|
32
|
+
if the network connection has went away. Thus, you can expect your
|
33
|
+
callback will be called, except of course you're using `quiet`
|
34
|
+
commands. In that case, only a "non-usual response" from the server or
|
35
|
+
a network failure will invoke your block.
|
36
|
+
|
37
|
+
|
38
|
+
Multi commands
|
39
|
+
--------------
|
40
|
+
|
41
|
+
The technique is described in the `binary protocol`_ spec in section
|
42
|
+
**4.2**. ``Memcached.multi_operation`` will help you exactly with
|
43
|
+
that, sending lots of those `quiet` commands, except for the last,
|
44
|
+
which will be a `normal` command to trigger an acknowledge for all
|
45
|
+
commands.
|
46
|
+
|
47
|
+
This is of course implemented per-server to accomodate
|
48
|
+
load-balancing.
|
49
|
+
|
50
|
+
|
51
|
+
Usage
|
52
|
+
-----
|
53
|
+
|
54
|
+
First, pass your memcached servers to the library::
|
55
|
+
|
56
|
+
Memcached.servers = %w(localhost localhost:11212 localhost:11213)
|
57
|
+
|
58
|
+
Note that it won't be connected immediately. Use ``Memcached.usable?``
|
59
|
+
to check. This however complicates your own code and you can check
|
60
|
+
``response[:status] == Memcached::Errors::DISCONNECTED`` for network
|
61
|
+
errors in all your response callbacks.
|
62
|
+
|
63
|
+
Further usage is pretty straight-forward::
|
64
|
+
|
65
|
+
Memcached.get(:key => 'Hello') do |response|
|
66
|
+
case response[:status]
|
67
|
+
when Memcached::Errors::NO_ERROR
|
68
|
+
use_cached_value response[:value] # ...
|
69
|
+
when Memcached::Errors::KEY_NOT_FOUND
|
70
|
+
refresh_cache! # ...
|
71
|
+
when Memcached::Errors::DISCONNECTED
|
72
|
+
proceed_uncached # ...
|
73
|
+
else
|
74
|
+
cry_for_help # ...
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
78
|
+
Memcached.set(:key => 'Hello', :value => 'World',
|
79
|
+
:expiration => 600) do |response|
|
80
|
+
case response[:status]
|
81
|
+
when Memcached::Errors::NO_ERROR
|
82
|
+
# That's good
|
83
|
+
when Memcached::Errors::DISCONNECTED
|
84
|
+
# Maybe stop filling the cache for now?
|
85
|
+
else
|
86
|
+
# What could've gone wrong?
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
90
|
+
|
91
|
+
Multi-commands may require a bit of precaution::
|
92
|
+
|
93
|
+
Memcached.multi_get([{:key => 'foo'},
|
94
|
+
{:key => 'bar'}]) do |responses|
|
95
|
+
# responses is now a hash of Key => Response
|
96
|
+
end
|
97
|
+
|
98
|
+
It's not guaranteed that any of these keys will be present in the
|
99
|
+
response. Moreover, they may be present even if they are a usual
|
100
|
+
response because the last request is always non-quiet.
|
101
|
+
|
102
|
+
|
103
|
+
**HAPPY CACHING!**
|
104
|
+
|
105
|
+
.. _binary protocol: http://code.google.com/p/memcached/wiki/MemcacheBinaryProtocol
|
data/Rakefile
ADDED
@@ -0,0 +1,13 @@
|
|
1
|
+
begin
|
2
|
+
require 'jeweler'
|
3
|
+
Jeweler::Tasks.new do |gemspec|
|
4
|
+
gemspec.name = "remcached"
|
5
|
+
gemspec.summary = "Ruby EventMachine memcached client"
|
6
|
+
gemspec.description = gemspec.summary
|
7
|
+
gemspec.email = "astro@spaceboyz.net"
|
8
|
+
gemspec.homepage = "http://github.com/astro/remcached/"
|
9
|
+
gemspec.authors = ["Stephan Maka"]
|
10
|
+
end
|
11
|
+
rescue LoadError
|
12
|
+
puts "Jeweler not available. Install it with: sudo gem install technicalpickles-jeweler -s http://gems.github.com"
|
13
|
+
end
|
data/VERSION.yml
ADDED
data/examples/fill.rb
ADDED
@@ -0,0 +1,70 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
# Experimentally determine how many items fit in your memcached
|
4
|
+
# instance. Adjust parameters below for your scenario.
|
5
|
+
|
6
|
+
BATCH_SIZE = 1000
|
7
|
+
KEY_SIZE = 20
|
8
|
+
VALUE_SIZE = 20
|
9
|
+
|
10
|
+
|
11
|
+
$: << File.dirname(__FILE__) + "/../lib"
|
12
|
+
require 'remcached'
|
13
|
+
|
14
|
+
EM.run do
|
15
|
+
@total = 0
|
16
|
+
|
17
|
+
# Action
|
18
|
+
def fill
|
19
|
+
old_total = @total
|
20
|
+
|
21
|
+
reqs = (1..BATCH_SIZE).map {
|
22
|
+
@total += 1
|
23
|
+
{ :key => sprintf("%0#{KEY_SIZE}X", @total),
|
24
|
+
:value => sprintf("%0#{VALUE_SIZE}X", @total)
|
25
|
+
}
|
26
|
+
}
|
27
|
+
Memcached.multi_add(reqs) do |resps|
|
28
|
+
resps.each do |key,resp|
|
29
|
+
case resp[:status]
|
30
|
+
when Memcached::Errors::NO_ERROR
|
31
|
+
:ok
|
32
|
+
when Memcached::Errors::KEY_EXISTS
|
33
|
+
@total -= 1
|
34
|
+
else
|
35
|
+
puts "Cannot set #{key}: status=#{resp[:status].inspect}"
|
36
|
+
@total -= 1
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
puts "Added #{@total - old_total}, now: #{@total}"
|
41
|
+
if Memcached.usable?
|
42
|
+
stats = {}
|
43
|
+
Memcached.usable_clients[0].stats do |resp|
|
44
|
+
if resp[:key] != ''
|
45
|
+
stats[resp[:key]] = resp[:value]
|
46
|
+
else
|
47
|
+
puts "Stats: #{stats['bytes']} bytes in #{stats['curr_items']} of #{stats['total_items']} items"
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
# Next round:
|
52
|
+
fill
|
53
|
+
else
|
54
|
+
EM.stop
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
58
|
+
|
59
|
+
# Initialization & start
|
60
|
+
Memcached.servers = %w(localhost)
|
61
|
+
@t = EM::PeriodicTimer.new(0.01) do
|
62
|
+
if Memcached.usable?
|
63
|
+
puts "Connected to server"
|
64
|
+
@t.cancel
|
65
|
+
fill
|
66
|
+
else
|
67
|
+
puts "Waiting for server connection..."
|
68
|
+
end
|
69
|
+
end
|
70
|
+
end
|
@@ -0,0 +1,143 @@
|
|
1
|
+
require 'eventmachine'
|
2
|
+
|
3
|
+
module Memcached
|
4
|
+
class Connection < EventMachine::Connection
|
5
|
+
def self.connect(host, port=11211, &connect_callback)
|
6
|
+
df = EventMachine::DefaultDeferrable.new
|
7
|
+
df.callback &connect_callback
|
8
|
+
|
9
|
+
EventMachine.connect(host, port, self) do |me|
|
10
|
+
me.instance_eval {
|
11
|
+
@host, @port = host, port
|
12
|
+
@connect_deferrable = df
|
13
|
+
}
|
14
|
+
end
|
15
|
+
end
|
16
|
+
|
17
|
+
def connected?
|
18
|
+
@connected
|
19
|
+
end
|
20
|
+
|
21
|
+
def reconnect
|
22
|
+
@connect_deferrable = EventMachine::DefaultDeferrable.new
|
23
|
+
super @host, @port
|
24
|
+
@connect_deferrable
|
25
|
+
end
|
26
|
+
|
27
|
+
def post_init
|
28
|
+
@recv_buf = ""
|
29
|
+
@recv_state = :header
|
30
|
+
@connected = false
|
31
|
+
end
|
32
|
+
|
33
|
+
def connection_completed
|
34
|
+
@connected = true
|
35
|
+
@connect_deferrable.succeed(self)
|
36
|
+
end
|
37
|
+
|
38
|
+
RECONNECT_DELAY = 10
|
39
|
+
RECONNECT_JITTER = 5
|
40
|
+
def unbind
|
41
|
+
@connected = false
|
42
|
+
EventMachine::Timer.new(RECONNECT_DELAY + rand(RECONNECT_JITTER),
|
43
|
+
method(:reconnect))
|
44
|
+
end
|
45
|
+
|
46
|
+
def send_packet(pkt)
|
47
|
+
send_data pkt.to_s
|
48
|
+
end
|
49
|
+
|
50
|
+
def receive_data(data)
|
51
|
+
@recv_buf += data
|
52
|
+
|
53
|
+
done = false
|
54
|
+
while not done
|
55
|
+
|
56
|
+
if @recv_state == :header && @recv_buf.length >= 24
|
57
|
+
@received = Response.parse_header(@recv_buf[0..23])
|
58
|
+
@recv_buf = @recv_buf[24..-1]
|
59
|
+
@recv_state = :body
|
60
|
+
|
61
|
+
elsif @recv_state == :body && @recv_buf.length >= @received[:total_body_length]
|
62
|
+
@recv_buf = @received.parse_body(@recv_buf)
|
63
|
+
receive_packet(@received)
|
64
|
+
|
65
|
+
@recv_state = :header
|
66
|
+
|
67
|
+
else
|
68
|
+
done = true
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
class Client < Connection
|
75
|
+
def post_init
|
76
|
+
super
|
77
|
+
@opaque_counter = 0
|
78
|
+
@pending = []
|
79
|
+
end
|
80
|
+
|
81
|
+
def unbind
|
82
|
+
super
|
83
|
+
@pending.each do |opaque, callback|
|
84
|
+
callback.call :status => Errors::DISCONNECTED
|
85
|
+
end
|
86
|
+
@pending = []
|
87
|
+
end
|
88
|
+
|
89
|
+
def send_request(pkt, &callback)
|
90
|
+
@opaque_counter += 1
|
91
|
+
@opaque_counter %= 1 << 32
|
92
|
+
pkt[:opaque] = @opaque_counter
|
93
|
+
send_packet pkt
|
94
|
+
|
95
|
+
if callback
|
96
|
+
@pending << [@opaque_counter, callback]
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
100
|
+
##
|
101
|
+
# memcached responses possess the same order as their
|
102
|
+
# corresponding requests. Therefore quiet requests that have not
|
103
|
+
# yielded responses will be dropped silently to free memory from
|
104
|
+
# +@pending+
|
105
|
+
#
|
106
|
+
# When a callback has been fired and returned +:proceed+ without a
|
107
|
+
# succeeding packet, we still keep it referenced around for
|
108
|
+
# commands such as STAT which has multiple response packets.
|
109
|
+
def receive_packet(response)
|
110
|
+
pending_pos = nil
|
111
|
+
pending_callback = nil
|
112
|
+
@pending.each_with_index do |(pending_opaque,pending_cb),i|
|
113
|
+
if response[:opaque] == pending_opaque
|
114
|
+
pending_pos = i
|
115
|
+
pending_callback = pending_cb
|
116
|
+
break
|
117
|
+
end
|
118
|
+
end
|
119
|
+
|
120
|
+
if pending_pos
|
121
|
+
@pending = @pending[pending_pos..-1]
|
122
|
+
begin
|
123
|
+
if pending_callback.call(response) != :proceed
|
124
|
+
@pending.shift
|
125
|
+
end
|
126
|
+
rescue Exception => e
|
127
|
+
$stderr.puts "#{e.class}: #{e}\n" + e.backtrace.join("\n")
|
128
|
+
end
|
129
|
+
end
|
130
|
+
end
|
131
|
+
|
132
|
+
# Callback will be called multiple times
|
133
|
+
def stats(contents={}, &callback)
|
134
|
+
send_request Request::Stats.new(contents) do |result|
|
135
|
+
callback.call result
|
136
|
+
|
137
|
+
if result[:status] == Errors::NO_ERROR && result[:key] != ''
|
138
|
+
:proceed
|
139
|
+
end
|
140
|
+
end
|
141
|
+
end
|
142
|
+
end
|
143
|
+
end
|
@@ -0,0 +1,64 @@
|
|
1
|
+
module Memcached
|
2
|
+
module Datatypes
|
3
|
+
RAW_BYTES = 0x00
|
4
|
+
end
|
5
|
+
|
6
|
+
module Errors
|
7
|
+
NO_ERROR = 0x0000
|
8
|
+
KEY_NOT_FOUND = 0x0001
|
9
|
+
KEY_EXISTS = 0x0002
|
10
|
+
VALUE_TOO_LARGE = 0x0003
|
11
|
+
INVALID_ARGS = 0x0004
|
12
|
+
ITEM_NOT_STORED = 0x0005
|
13
|
+
NON_NUMERIC_VALUE = 0x0006
|
14
|
+
|
15
|
+
DISCONNECTED = 0xffff
|
16
|
+
end
|
17
|
+
|
18
|
+
module Commands
|
19
|
+
GET = 0x00
|
20
|
+
SET = 0x01
|
21
|
+
ADD = 0x02
|
22
|
+
REPLACE = 0x03
|
23
|
+
DELETE = 0x04
|
24
|
+
INCREMENT = 0x05
|
25
|
+
DECREMENT = 0x06
|
26
|
+
QUIT = 0x07
|
27
|
+
STAT = 0x10
|
28
|
+
GETQ = 0x09
|
29
|
+
SETQ = 0x11
|
30
|
+
ADDQ = 0x12
|
31
|
+
DELETEQ = 0x14
|
32
|
+
|
33
|
+
=begin
|
34
|
+
Possible values of the one-byte field:
|
35
|
+
0x00 Get
|
36
|
+
0x01 Set
|
37
|
+
0x02 Add
|
38
|
+
0x03 Replace
|
39
|
+
0x04 Delete
|
40
|
+
0x05 Increment
|
41
|
+
0x06 Decrement
|
42
|
+
0x07 Quit
|
43
|
+
0x08 Flush
|
44
|
+
0x09 GetQ
|
45
|
+
0x0A No-op
|
46
|
+
0x0B Version
|
47
|
+
0x0C GetK
|
48
|
+
0x0D GetKQ
|
49
|
+
0x0E Append
|
50
|
+
0x0F Prepend
|
51
|
+
0x10 Stat
|
52
|
+
0x11 SetQ
|
53
|
+
0x12 AddQ
|
54
|
+
0x13 ReplaceQ
|
55
|
+
0x14 DeleteQ
|
56
|
+
0x15 IncrementQ
|
57
|
+
0x16 DecrementQ
|
58
|
+
0x17 QuitQ
|
59
|
+
0x18 FlushQ
|
60
|
+
0x19 AppendQ
|
61
|
+
0x1A PrependQ
|
62
|
+
=end
|
63
|
+
end
|
64
|
+
end
|
@@ -0,0 +1,46 @@
|
|
1
|
+
##
|
2
|
+
# Works exactly like Array#pack and String#unpack, except that it
|
3
|
+
# inverts 'q' & 'Q' prior packing/after unpacking. This is done to
|
4
|
+
# achieve network byte order for these values on a little-endian machine.
|
5
|
+
#
|
6
|
+
# FIXME: implement check for big-endian machines.
|
7
|
+
module Memcached::PackArray
|
8
|
+
def self.pack(ary, fmt1)
|
9
|
+
fmt2 = ''
|
10
|
+
values = []
|
11
|
+
fmt1.each_char do |c|
|
12
|
+
if c == 'Q' || c == 'q'
|
13
|
+
fmt2 += 'a8'
|
14
|
+
values << [ary.shift].pack(c).reverse
|
15
|
+
else
|
16
|
+
fmt2 += c
|
17
|
+
values << ary.shift
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
values.pack(fmt2)
|
22
|
+
end
|
23
|
+
|
24
|
+
def self.unpack(buf, fmt1)
|
25
|
+
fmt2 = ''
|
26
|
+
reverse = []
|
27
|
+
i = 0
|
28
|
+
fmt1.each_char do |c|
|
29
|
+
if c == 'Q' || c == 'q'
|
30
|
+
fmt2 += 'a8'
|
31
|
+
reverse << [i, c]
|
32
|
+
else
|
33
|
+
fmt2 += c
|
34
|
+
end
|
35
|
+
i += 1
|
36
|
+
end
|
37
|
+
|
38
|
+
ary = buf.unpack(fmt2)
|
39
|
+
|
40
|
+
reverse.each do |i, c|
|
41
|
+
ary[i], = ary[i].reverse.unpack(c)
|
42
|
+
end
|
43
|
+
|
44
|
+
ary
|
45
|
+
end
|
46
|
+
end
|