dsander-redis 1.0.6
Sign up to get free protection for your applications and to get access to all the features.
- data/LICENSE +20 -0
- data/README.markdown +112 -0
- data/Rakefile +75 -0
- data/lib/edis.rb +3 -0
- data/lib/redis.rb +25 -0
- data/lib/redis/client.rb +577 -0
- data/lib/redis/dist_redis.rb +118 -0
- data/lib/redis/event_machine.rb +181 -0
- data/lib/redis/hash_ring.rb +131 -0
- data/lib/redis/pipeline.rb +19 -0
- data/lib/redis/raketasks.rb +1 -0
- data/lib/redis/subscribe.rb +16 -0
- data/tasks/redis.tasks.rb +140 -0
- metadata +80 -0
@@ -0,0 +1,118 @@
|
|
1
|
+
require 'redis/hash_ring'
|
2
|
+
|
3
|
+
class Redis
|
4
|
+
class DistRedis
|
5
|
+
attr_reader :ring
|
6
|
+
def initialize(opts={})
|
7
|
+
hosts = []
|
8
|
+
|
9
|
+
db = opts[:db] || nil
|
10
|
+
timeout = opts[:timeout] || nil
|
11
|
+
|
12
|
+
raise "No hosts given" unless opts[:hosts]
|
13
|
+
|
14
|
+
opts[:hosts].each do |h|
|
15
|
+
host, port = h.split(':')
|
16
|
+
hosts << Client.new(:host => host, :port => port, :db => db, :timeout => timeout)
|
17
|
+
end
|
18
|
+
|
19
|
+
@ring = HashRing.new hosts
|
20
|
+
end
|
21
|
+
|
22
|
+
def node_for_key(key)
|
23
|
+
key = $1 if key =~ /\{(.*)?\}/
|
24
|
+
@ring.get_node(key)
|
25
|
+
end
|
26
|
+
|
27
|
+
def add_server(server)
|
28
|
+
server, port = server.split(':')
|
29
|
+
@ring.add_node Client.new(:host => server, :port => port)
|
30
|
+
end
|
31
|
+
|
32
|
+
def method_missing(sym, *args, &blk)
|
33
|
+
if redis = node_for_key(args.first.to_s)
|
34
|
+
redis.send sym, *args, &blk
|
35
|
+
else
|
36
|
+
super
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
def node_keys(glob)
|
41
|
+
@ring.nodes.map do |red|
|
42
|
+
red.keys(glob)
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
def keys(glob)
|
47
|
+
node_keys(glob).flatten
|
48
|
+
end
|
49
|
+
|
50
|
+
def save
|
51
|
+
on_each_node :save
|
52
|
+
end
|
53
|
+
|
54
|
+
def bgsave
|
55
|
+
on_each_node :bgsave
|
56
|
+
end
|
57
|
+
|
58
|
+
def quit
|
59
|
+
on_each_node :quit
|
60
|
+
end
|
61
|
+
|
62
|
+
def flush_all
|
63
|
+
on_each_node :flush_all
|
64
|
+
end
|
65
|
+
alias_method :flushall, :flush_all
|
66
|
+
|
67
|
+
def flush_db
|
68
|
+
on_each_node :flush_db
|
69
|
+
end
|
70
|
+
alias_method :flushdb, :flush_db
|
71
|
+
|
72
|
+
def delete_cloud!
|
73
|
+
@ring.nodes.each do |red|
|
74
|
+
red.keys("*").each do |key|
|
75
|
+
red.del key
|
76
|
+
end
|
77
|
+
end
|
78
|
+
end
|
79
|
+
|
80
|
+
def on_each_node(command, *args)
|
81
|
+
@ring.nodes.each do |red|
|
82
|
+
red.send(command, *args)
|
83
|
+
end
|
84
|
+
end
|
85
|
+
|
86
|
+
def mset()
|
87
|
+
|
88
|
+
end
|
89
|
+
|
90
|
+
def mget(*keyz)
|
91
|
+
results = {}
|
92
|
+
kbn = keys_by_node(keyz)
|
93
|
+
kbn.each do |node, node_keyz|
|
94
|
+
node.mapped_mget(*node_keyz).each do |k, v|
|
95
|
+
results[k] = v
|
96
|
+
end
|
97
|
+
end
|
98
|
+
keyz.flatten.map { |k| results[k] }
|
99
|
+
end
|
100
|
+
|
101
|
+
def keys_by_node(*keyz)
|
102
|
+
keyz.flatten.inject({}) do |kbn, k|
|
103
|
+
node = node_for_key(k)
|
104
|
+
next if kbn[node] && kbn[node].include?(k)
|
105
|
+
kbn[node] ||= []
|
106
|
+
kbn[node] << k
|
107
|
+
kbn
|
108
|
+
end
|
109
|
+
end
|
110
|
+
|
111
|
+
def type(key)
|
112
|
+
method_missing(:type, key)
|
113
|
+
end
|
114
|
+
end
|
115
|
+
end
|
116
|
+
|
117
|
+
# For backwards compatibility
|
118
|
+
DistRedis = Redis::DistRedis
|
@@ -0,0 +1,181 @@
|
|
1
|
+
# Extensions for using memcache-client with EventMachine
|
2
|
+
|
3
|
+
raise "redis/event_machine requires Ruby 1.9" if RUBY_VERSION < '1.9'
|
4
|
+
|
5
|
+
require 'redis'
|
6
|
+
require 'eventmachine'
|
7
|
+
require 'fiber'
|
8
|
+
|
9
|
+
class Redis
|
10
|
+
|
11
|
+
# Since we are working in a single Thread, multiple Fiber environment,
|
12
|
+
# disable the multithread Mutex as it will not work.
|
13
|
+
# DEFAULT_OPTIONS[:multithread] = false
|
14
|
+
|
15
|
+
module EventedClient
|
16
|
+
|
17
|
+
def fiber_key
|
18
|
+
@fiber_key ||= "redis-#{@host}-#{@port}"
|
19
|
+
end
|
20
|
+
|
21
|
+
def connect_to(host,port)
|
22
|
+
log("Redis >> Using EM connection")
|
23
|
+
sock = Thread.current[fiber_key]
|
24
|
+
return @sock if @sock and not @sock.closed?
|
25
|
+
|
26
|
+
Thread.current[fiber_key] = nil
|
27
|
+
|
28
|
+
# If the host was dead, don't retry for a while.
|
29
|
+
#return if @retry and @retry > Time.now
|
30
|
+
|
31
|
+
Thread.current[fiber_key] ||= begin
|
32
|
+
@sock = EM::SocketConnection.connect(host, port, @timeout)
|
33
|
+
yielding = true
|
34
|
+
fiber = Fiber.current
|
35
|
+
@sock.callback do
|
36
|
+
log("Redis >> Connected")
|
37
|
+
@status = 'CONNECTED'
|
38
|
+
@retry = nil
|
39
|
+
yielding = false
|
40
|
+
fiber.resume if Fiber.current != fiber
|
41
|
+
log("Redis >> Done with callback")
|
42
|
+
end
|
43
|
+
@sock.errback do
|
44
|
+
@sock = nil
|
45
|
+
yielding = false
|
46
|
+
fiber.resume if Fiber.current != fiber
|
47
|
+
end
|
48
|
+
Fiber.yield if yielding
|
49
|
+
@sock
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
def close
|
54
|
+
sock = Thread.current[fiber_key]
|
55
|
+
if sock
|
56
|
+
sock.close if !sock.closed?
|
57
|
+
Thread.current[fiber_key] = nil
|
58
|
+
end
|
59
|
+
@retry = nil
|
60
|
+
@status = "NOT CONNECTED"
|
61
|
+
end
|
62
|
+
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
module EM
|
67
|
+
module SocketConnection
|
68
|
+
include EM::Deferrable
|
69
|
+
|
70
|
+
def self.connect(host, port, timeout)
|
71
|
+
EM.connect(host, port, self) do |conn|
|
72
|
+
conn.pending_connect_timeout = timeout
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
def initialize
|
77
|
+
@connected = false
|
78
|
+
@index = 0
|
79
|
+
@buf = ''
|
80
|
+
end
|
81
|
+
|
82
|
+
def closed?
|
83
|
+
!@connected
|
84
|
+
end
|
85
|
+
|
86
|
+
def close
|
87
|
+
@connected = false
|
88
|
+
close_connection(true)
|
89
|
+
end
|
90
|
+
|
91
|
+
def write(buf)
|
92
|
+
send_data(buf)
|
93
|
+
end
|
94
|
+
|
95
|
+
def read(size)
|
96
|
+
if can_read?(size)
|
97
|
+
#puts("Redis >> can read")
|
98
|
+
yank(size)
|
99
|
+
else
|
100
|
+
#puts("Redis >> cant read")
|
101
|
+
fiber = Fiber.current
|
102
|
+
@size = size
|
103
|
+
@callback = proc { |data|
|
104
|
+
fiber.resume(data)
|
105
|
+
}
|
106
|
+
#puts @callback
|
107
|
+
# TODO Can leak fiber if the connection dies while
|
108
|
+
# this fiber is yielded, waiting for data
|
109
|
+
Fiber.yield
|
110
|
+
end
|
111
|
+
end
|
112
|
+
|
113
|
+
SEP = "\r\n"
|
114
|
+
|
115
|
+
def gets
|
116
|
+
#puts "Gets #{@buf.inspect} #{@index}"
|
117
|
+
while true
|
118
|
+
# Read to ensure we have some data in the buffer
|
119
|
+
line = read(1)
|
120
|
+
# Reset the buffer index to zero
|
121
|
+
@buf = @buf.slice(@index..-1)
|
122
|
+
#puts "#{@buf.inspect}"
|
123
|
+
@index = 0
|
124
|
+
if eol = @buf.index(SEP)
|
125
|
+
line << yank(eol + SEP.size)
|
126
|
+
break
|
127
|
+
else
|
128
|
+
# EOL not in the current buffer
|
129
|
+
line << yank(@buf.size)
|
130
|
+
end
|
131
|
+
end
|
132
|
+
line
|
133
|
+
end
|
134
|
+
|
135
|
+
def can_read?(size)
|
136
|
+
@buf.size >= @index + size
|
137
|
+
end
|
138
|
+
|
139
|
+
# EM callbacks
|
140
|
+
|
141
|
+
def receive_data(data)
|
142
|
+
@buf << data
|
143
|
+
if @callback and can_read?(@size)
|
144
|
+
callback = @callback
|
145
|
+
#puts "Now can read #{@size} for\n#{@callback}"
|
146
|
+
data = yank(@size)
|
147
|
+
@callback = @size = nil
|
148
|
+
callback.call(data)
|
149
|
+
end
|
150
|
+
end
|
151
|
+
|
152
|
+
def post_init
|
153
|
+
@connected = true
|
154
|
+
succeed
|
155
|
+
end
|
156
|
+
|
157
|
+
def unbind
|
158
|
+
if @connected
|
159
|
+
@connected = false
|
160
|
+
else
|
161
|
+
fail
|
162
|
+
end
|
163
|
+
end
|
164
|
+
|
165
|
+
private
|
166
|
+
|
167
|
+
BUFFER_SIZE = 4096
|
168
|
+
|
169
|
+
def yank(len)
|
170
|
+
data = @buf.slice(@index, len)
|
171
|
+
@index += len
|
172
|
+
@index = @buf.size if @index > @buf.size
|
173
|
+
if @index >= BUFFER_SIZE
|
174
|
+
@buf = @buf.slice(@index..-1)
|
175
|
+
@index = 0
|
176
|
+
end
|
177
|
+
data
|
178
|
+
end
|
179
|
+
|
180
|
+
end
|
181
|
+
end
|
@@ -0,0 +1,131 @@
|
|
1
|
+
require 'zlib'
|
2
|
+
|
3
|
+
class Redis
|
4
|
+
class HashRing
|
5
|
+
|
6
|
+
POINTS_PER_SERVER = 160 # this is the default in libmemcached
|
7
|
+
|
8
|
+
attr_reader :ring, :sorted_keys, :replicas, :nodes
|
9
|
+
|
10
|
+
# nodes is a list of objects that have a proper to_s representation.
|
11
|
+
# replicas indicates how many virtual points should be used pr. node,
|
12
|
+
# replicas are required to improve the distribution.
|
13
|
+
def initialize(nodes=[], replicas=POINTS_PER_SERVER)
|
14
|
+
@replicas = replicas
|
15
|
+
@ring = {}
|
16
|
+
@nodes = []
|
17
|
+
@sorted_keys = []
|
18
|
+
nodes.each do |node|
|
19
|
+
add_node(node)
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
# Adds a `node` to the hash ring (including a number of replicas).
|
24
|
+
def add_node(node)
|
25
|
+
@nodes << node
|
26
|
+
@replicas.times do |i|
|
27
|
+
key = Zlib.crc32("#{node}:#{i}")
|
28
|
+
@ring[key] = node
|
29
|
+
@sorted_keys << key
|
30
|
+
end
|
31
|
+
@sorted_keys.sort!
|
32
|
+
end
|
33
|
+
|
34
|
+
def remove_node(node)
|
35
|
+
@nodes.reject!{|n| n.to_s == node.to_s}
|
36
|
+
@replicas.times do |i|
|
37
|
+
key = Zlib.crc32("#{node}:#{i}")
|
38
|
+
@ring.delete(key)
|
39
|
+
@sorted_keys.reject! {|k| k == key}
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
# get the node in the hash ring for this key
|
44
|
+
def get_node(key)
|
45
|
+
get_node_pos(key)[0]
|
46
|
+
end
|
47
|
+
|
48
|
+
def get_node_pos(key)
|
49
|
+
return [nil,nil] if @ring.size == 0
|
50
|
+
crc = Zlib.crc32(key)
|
51
|
+
idx = HashRing.binary_search(@sorted_keys, crc)
|
52
|
+
return [@ring[@sorted_keys[idx]], idx]
|
53
|
+
end
|
54
|
+
|
55
|
+
def iter_nodes(key)
|
56
|
+
return [nil,nil] if @ring.size == 0
|
57
|
+
node, pos = get_node_pos(key)
|
58
|
+
@sorted_keys[pos..-1].each do |k|
|
59
|
+
yield @ring[k]
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
class << self
|
64
|
+
|
65
|
+
# gem install RubyInline to use this code
|
66
|
+
# Native extension to perform the binary search within the hashring.
|
67
|
+
# There's a pure ruby version below so this is purely optional
|
68
|
+
# for performance. In testing 20k gets and sets, the native
|
69
|
+
# binary search shaved about 12% off the runtime (9sec -> 8sec).
|
70
|
+
begin
|
71
|
+
require 'inline'
|
72
|
+
inline do |builder|
|
73
|
+
builder.c <<-EOM
|
74
|
+
int binary_search(VALUE ary, unsigned int r) {
|
75
|
+
int upper = RARRAY_LEN(ary) - 1;
|
76
|
+
int lower = 0;
|
77
|
+
int idx = 0;
|
78
|
+
|
79
|
+
while (lower <= upper) {
|
80
|
+
idx = (lower + upper) / 2;
|
81
|
+
|
82
|
+
VALUE continuumValue = RARRAY_PTR(ary)[idx];
|
83
|
+
unsigned int l = NUM2UINT(continuumValue);
|
84
|
+
if (l == r) {
|
85
|
+
return idx;
|
86
|
+
}
|
87
|
+
else if (l > r) {
|
88
|
+
upper = idx - 1;
|
89
|
+
}
|
90
|
+
else {
|
91
|
+
lower = idx + 1;
|
92
|
+
}
|
93
|
+
}
|
94
|
+
if (upper < 0) {
|
95
|
+
upper = RARRAY_LEN(ary) - 1;
|
96
|
+
}
|
97
|
+
return upper;
|
98
|
+
}
|
99
|
+
EOM
|
100
|
+
end
|
101
|
+
rescue Exception => e
|
102
|
+
# Find the closest index in HashRing with value <= the given value
|
103
|
+
def binary_search(ary, value, &block)
|
104
|
+
upper = ary.size - 1
|
105
|
+
lower = 0
|
106
|
+
idx = 0
|
107
|
+
|
108
|
+
while(lower <= upper) do
|
109
|
+
idx = (lower + upper) / 2
|
110
|
+
comp = ary[idx] <=> value
|
111
|
+
|
112
|
+
if comp == 0
|
113
|
+
return idx
|
114
|
+
elsif comp > 0
|
115
|
+
upper = idx - 1
|
116
|
+
else
|
117
|
+
lower = idx + 1
|
118
|
+
end
|
119
|
+
end
|
120
|
+
|
121
|
+
if upper < 0
|
122
|
+
upper = ary.size - 1
|
123
|
+
end
|
124
|
+
return upper
|
125
|
+
end
|
126
|
+
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
130
|
+
end
|
131
|
+
end
|
@@ -0,0 +1,19 @@
|
|
1
|
+
class Redis
|
2
|
+
class Pipeline < Client
|
3
|
+
BUFFER_SIZE = 50_000
|
4
|
+
|
5
|
+
def initialize(redis)
|
6
|
+
@redis = redis
|
7
|
+
@commands = []
|
8
|
+
end
|
9
|
+
|
10
|
+
def call_command(command)
|
11
|
+
@commands << command
|
12
|
+
end
|
13
|
+
|
14
|
+
def execute
|
15
|
+
return if @commands.empty?
|
16
|
+
@redis.call_command(@commands)
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|