redis 0.0.1 → 0.1
Sign up to get free protection for your applications and to get access to all the features.
- data/LICENSE +20 -0
- data/README.markdown +36 -0
- data/Rakefile +62 -0
- data/lib/dist_redis.rb +124 -0
- data/lib/hash_ring.rb +128 -0
- data/lib/pipeline.rb +23 -0
- data/lib/redis.rb +352 -0
- data/lib/redis/raketasks.rb +1 -0
- data/spec/redis_spec.rb +524 -0
- data/spec/spec_helper.rb +4 -0
- data/tasks/redis.tasks.rb +136 -0
- metadata +39 -15
data/LICENSE
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
Copyright (c) 2009 Ezra Zygmuntowicz
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining
|
4
|
+
a copy of this software and associated documentation files (the
|
5
|
+
"Software"), to deal in the Software without restriction, including
|
6
|
+
without limitation the rights to use, copy, modify, merge, publish,
|
7
|
+
distribute, sublicense, and/or sell copies of the Software, and to
|
8
|
+
permit persons to whom the Software is furnished to do so, subject to
|
9
|
+
the following conditions:
|
10
|
+
|
11
|
+
The above copyright notice and this permission notice shall be
|
12
|
+
included in all copies or substantial portions of the Software.
|
13
|
+
|
14
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
15
|
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
16
|
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
17
|
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
18
|
+
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
19
|
+
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
20
|
+
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
data/README.markdown
ADDED
@@ -0,0 +1,36 @@
|
|
1
|
+
# redis-rb
|
2
|
+
|
3
|
+
A ruby client library for the redis key value storage system.
|
4
|
+
|
5
|
+
## Information about redis
|
6
|
+
|
7
|
+
Redis is a key value store with some interesting features:
|
8
|
+
1. It's fast.
|
9
|
+
2. Keys are strings but values can have types of "NONE", "STRING", "LIST", or "SET". List's can be atomically push'd, pop'd, lpush'd, lpop'd and indexed. This allows you to store things like lists of comments under one key while retaining the ability to append comments without reading and putting back the whole list.
|
10
|
+
|
11
|
+
See [redis on code.google.com](http://code.google.com/p/redis/wiki/README) for more information.
|
12
|
+
|
13
|
+
See the build on [RunCodeRun](http://runcoderun.com/rsanheim/redis-rb)
|
14
|
+
|
15
|
+
## Dependencies
|
16
|
+
|
17
|
+
1. rspec -
|
18
|
+
sudo gem install rspec
|
19
|
+
|
20
|
+
2. redis -
|
21
|
+
|
22
|
+
rake redis:install
|
23
|
+
|
24
|
+
2. dtach -
|
25
|
+
|
26
|
+
rake dtach:install
|
27
|
+
|
28
|
+
3. git - git is the new black.
|
29
|
+
|
30
|
+
## Setup
|
31
|
+
|
32
|
+
Use the tasks mentioned above (in Dependencies) to get your machine setup.
|
33
|
+
|
34
|
+
## Examples
|
35
|
+
|
36
|
+
Check the examples/ directory. *Note* you need to have redis-server running first.
|
data/Rakefile
ADDED
@@ -0,0 +1,62 @@
|
|
1
|
+
require 'rubygems'
|
2
|
+
require 'rake/gempackagetask'
|
3
|
+
require 'rubygems/specification'
|
4
|
+
require 'date'
|
5
|
+
require 'spec/rake/spectask'
|
6
|
+
require 'tasks/redis.tasks'
|
7
|
+
|
8
|
+
|
9
|
+
GEM = 'redis'
|
10
|
+
GEM_NAME = 'redis'
|
11
|
+
GEM_VERSION = '0.1'
|
12
|
+
AUTHORS = ['Ezra Zygmuntowicz', 'Taylor Weibley', 'Matthew Clark', 'Brian McKinney', 'Salvatore Sanfilippo', 'Luca Guidi']
|
13
|
+
EMAIL = "ez@engineyard.com"
|
14
|
+
HOMEPAGE = "http://github.com/ezmobius/redis-rb"
|
15
|
+
SUMMARY = "Ruby client library for redis key value storage server"
|
16
|
+
|
17
|
+
spec = Gem::Specification.new do |s|
|
18
|
+
s.name = GEM
|
19
|
+
s.version = GEM_VERSION
|
20
|
+
s.platform = Gem::Platform::RUBY
|
21
|
+
s.has_rdoc = true
|
22
|
+
s.extra_rdoc_files = ["LICENSE"]
|
23
|
+
s.summary = SUMMARY
|
24
|
+
s.description = s.summary
|
25
|
+
s.authors = AUTHORS
|
26
|
+
s.email = EMAIL
|
27
|
+
s.homepage = HOMEPAGE
|
28
|
+
s.add_dependency "rspec"
|
29
|
+
s.require_path = 'lib'
|
30
|
+
s.autorequire = GEM
|
31
|
+
s.files = %w(LICENSE README.markdown Rakefile) + Dir.glob("{lib,tasks,spec}/**/*")
|
32
|
+
end
|
33
|
+
|
34
|
+
task :default => :spec
|
35
|
+
|
36
|
+
desc "Run specs"
|
37
|
+
Spec::Rake::SpecTask.new do |t|
|
38
|
+
t.spec_files = FileList['spec/**/*_spec.rb']
|
39
|
+
t.spec_opts = %w(-fs --color)
|
40
|
+
end
|
41
|
+
|
42
|
+
Rake::GemPackageTask.new(spec) do |pkg|
|
43
|
+
pkg.gem_spec = spec
|
44
|
+
end
|
45
|
+
|
46
|
+
desc "install the gem locally"
|
47
|
+
task :install => [:package] do
|
48
|
+
sh %{sudo gem install pkg/#{GEM}-#{GEM_VERSION}}
|
49
|
+
end
|
50
|
+
|
51
|
+
desc "create a gemspec file"
|
52
|
+
task :make_spec do
|
53
|
+
File.open("#{GEM}.gemspec", "w") do |file|
|
54
|
+
file.puts spec.to_ruby
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
desc "Run all examples with RCov"
|
59
|
+
Spec::Rake::SpecTask.new(:rcov) do |t|
|
60
|
+
t.spec_files = FileList['spec/**/*_spec.rb']
|
61
|
+
t.rcov = true
|
62
|
+
end
|
data/lib/dist_redis.rb
ADDED
@@ -0,0 +1,124 @@
|
|
1
|
+
require 'redis'
|
2
|
+
require 'hash_ring'
|
3
|
+
class DistRedis
|
4
|
+
attr_reader :ring
|
5
|
+
def initialize(opts={})
|
6
|
+
hosts = []
|
7
|
+
|
8
|
+
db = opts[:db] || nil
|
9
|
+
timeout = opts[:timeout] || nil
|
10
|
+
|
11
|
+
raise Error, "No hosts given" unless opts[:hosts]
|
12
|
+
|
13
|
+
opts[:hosts].each do |h|
|
14
|
+
host, port = h.split(':')
|
15
|
+
hosts << Redis.new(:host => host, :port => port, :db => db, :timeout => timeout)
|
16
|
+
end
|
17
|
+
|
18
|
+
@ring = HashRing.new hosts
|
19
|
+
end
|
20
|
+
|
21
|
+
def node_for_key(key)
|
22
|
+
key = $1 if key =~ /\{(.*)?\}/
|
23
|
+
@ring.get_node(key)
|
24
|
+
end
|
25
|
+
|
26
|
+
def add_server(server)
|
27
|
+
server, port = server.split(':')
|
28
|
+
@ring.add_node Redis.new(:host => server, :port => port)
|
29
|
+
end
|
30
|
+
|
31
|
+
def method_missing(sym, *args, &blk)
|
32
|
+
if redis = node_for_key(args.first.to_s)
|
33
|
+
redis.send sym, *args, &blk
|
34
|
+
else
|
35
|
+
super
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
def keys(glob)
|
40
|
+
@ring.nodes.map do |red|
|
41
|
+
red.keys(glob)
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
def save
|
46
|
+
on_each_node :save
|
47
|
+
end
|
48
|
+
|
49
|
+
def bgsave
|
50
|
+
on_each_node :bgsave
|
51
|
+
end
|
52
|
+
|
53
|
+
def quit
|
54
|
+
on_each_node :quit
|
55
|
+
end
|
56
|
+
|
57
|
+
def flush_all
|
58
|
+
on_each_node :flush_all
|
59
|
+
end
|
60
|
+
alias_method :flushall, :flush_all
|
61
|
+
|
62
|
+
def flush_db
|
63
|
+
on_each_node :flush_db
|
64
|
+
end
|
65
|
+
alias_method :flushdb, :flush_db
|
66
|
+
|
67
|
+
def delete_cloud!
|
68
|
+
@ring.nodes.each do |red|
|
69
|
+
red.keys("*").each do |key|
|
70
|
+
red.delete key
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|
74
|
+
|
75
|
+
def on_each_node(command, *args)
|
76
|
+
@ring.nodes.each do |red|
|
77
|
+
red.send(command, *args)
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
end
|
82
|
+
|
83
|
+
|
84
|
+
if __FILE__ == $0
|
85
|
+
|
86
|
+
r = DistRedis.new 'localhost:6379', 'localhost:6380', 'localhost:6381', 'localhost:6382'
|
87
|
+
r['urmom'] = 'urmom'
|
88
|
+
r['urdad'] = 'urdad'
|
89
|
+
r['urmom1'] = 'urmom1'
|
90
|
+
r['urdad1'] = 'urdad1'
|
91
|
+
r['urmom2'] = 'urmom2'
|
92
|
+
r['urdad2'] = 'urdad2'
|
93
|
+
r['urmom3'] = 'urmom3'
|
94
|
+
r['urdad3'] = 'urdad3'
|
95
|
+
p r['urmom']
|
96
|
+
p r['urdad']
|
97
|
+
p r['urmom1']
|
98
|
+
p r['urdad1']
|
99
|
+
p r['urmom2']
|
100
|
+
p r['urdad2']
|
101
|
+
p r['urmom3']
|
102
|
+
p r['urdad3']
|
103
|
+
|
104
|
+
r.push_tail 'listor', 'foo1'
|
105
|
+
r.push_tail 'listor', 'foo2'
|
106
|
+
r.push_tail 'listor', 'foo3'
|
107
|
+
r.push_tail 'listor', 'foo4'
|
108
|
+
r.push_tail 'listor', 'foo5'
|
109
|
+
|
110
|
+
p r.pop_tail('listor')
|
111
|
+
p r.pop_tail('listor')
|
112
|
+
p r.pop_tail('listor')
|
113
|
+
p r.pop_tail('listor')
|
114
|
+
p r.pop_tail('listor')
|
115
|
+
|
116
|
+
puts "key distribution:"
|
117
|
+
|
118
|
+
r.ring.nodes.each do |red|
|
119
|
+
p [red.port, red.keys("*")]
|
120
|
+
end
|
121
|
+
r.delete_cloud!
|
122
|
+
p r.keys('*')
|
123
|
+
|
124
|
+
end
|
data/lib/hash_ring.rb
ADDED
@@ -0,0 +1,128 @@
|
|
1
|
+
require 'zlib'
|
2
|
+
|
3
|
+
class HashRing
|
4
|
+
|
5
|
+
POINTS_PER_SERVER = 160 # this is the default in libmemcached
|
6
|
+
|
7
|
+
attr_reader :ring, :sorted_keys, :replicas, :nodes
|
8
|
+
|
9
|
+
# nodes is a list of objects that have a proper to_s representation.
|
10
|
+
# replicas indicates how many virtual points should be used pr. node,
|
11
|
+
# replicas are required to improve the distribution.
|
12
|
+
def initialize(nodes=[], replicas=POINTS_PER_SERVER)
|
13
|
+
@replicas = replicas
|
14
|
+
@ring = {}
|
15
|
+
@nodes = []
|
16
|
+
@sorted_keys = []
|
17
|
+
nodes.each do |node|
|
18
|
+
add_node(node)
|
19
|
+
end
|
20
|
+
end
|
21
|
+
|
22
|
+
# Adds a `node` to the hash ring (including a number of replicas).
|
23
|
+
def add_node(node)
|
24
|
+
@nodes << node
|
25
|
+
@replicas.times do |i|
|
26
|
+
key = Zlib.crc32("#{node}:#{i}")
|
27
|
+
@ring[key] = node
|
28
|
+
@sorted_keys << key
|
29
|
+
end
|
30
|
+
@sorted_keys.sort!
|
31
|
+
end
|
32
|
+
|
33
|
+
def remove_node(node)
|
34
|
+
@nodes.reject!{|n| n.to_s == node.to_s}
|
35
|
+
@replicas.times do |i|
|
36
|
+
key = Zlib.crc32("#{node}:#{i}")
|
37
|
+
@ring.delete(key)
|
38
|
+
@sorted_keys.reject! {|k| k == key}
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
# get the node in the hash ring for this key
|
43
|
+
def get_node(key)
|
44
|
+
get_node_pos(key)[0]
|
45
|
+
end
|
46
|
+
|
47
|
+
def get_node_pos(key)
|
48
|
+
return [nil,nil] if @ring.size == 0
|
49
|
+
crc = Zlib.crc32(key)
|
50
|
+
idx = HashRing.binary_search(@sorted_keys, crc)
|
51
|
+
return [@ring[@sorted_keys[idx]], idx]
|
52
|
+
end
|
53
|
+
|
54
|
+
def iter_nodes(key)
|
55
|
+
return [nil,nil] if @ring.size == 0
|
56
|
+
node, pos = get_node_pos(key)
|
57
|
+
@sorted_keys[pos..-1].each do |k|
|
58
|
+
yield @ring[k]
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
class << self
|
63
|
+
|
64
|
+
# gem install RubyInline to use this code
|
65
|
+
# Native extension to perform the binary search within the hashring.
|
66
|
+
# There's a pure ruby version below so this is purely optional
|
67
|
+
# for performance. In testing 20k gets and sets, the native
|
68
|
+
# binary search shaved about 12% off the runtime (9sec -> 8sec).
|
69
|
+
begin
|
70
|
+
require 'inline'
|
71
|
+
inline do |builder|
|
72
|
+
builder.c <<-EOM
|
73
|
+
int binary_search(VALUE ary, unsigned int r) {
|
74
|
+
int upper = RARRAY_LEN(ary) - 1;
|
75
|
+
int lower = 0;
|
76
|
+
int idx = 0;
|
77
|
+
|
78
|
+
while (lower <= upper) {
|
79
|
+
idx = (lower + upper) / 2;
|
80
|
+
|
81
|
+
VALUE continuumValue = RARRAY_PTR(ary)[idx];
|
82
|
+
unsigned int l = NUM2UINT(continuumValue);
|
83
|
+
if (l == r) {
|
84
|
+
return idx;
|
85
|
+
}
|
86
|
+
else if (l > r) {
|
87
|
+
upper = idx - 1;
|
88
|
+
}
|
89
|
+
else {
|
90
|
+
lower = idx + 1;
|
91
|
+
}
|
92
|
+
}
|
93
|
+
return upper;
|
94
|
+
}
|
95
|
+
EOM
|
96
|
+
end
|
97
|
+
rescue Exception => e
|
98
|
+
# Find the closest index in HashRing with value <= the given value
|
99
|
+
def binary_search(ary, value, &block)
|
100
|
+
upper = ary.size - 1
|
101
|
+
lower = 0
|
102
|
+
idx = 0
|
103
|
+
|
104
|
+
while(lower <= upper) do
|
105
|
+
idx = (lower + upper) / 2
|
106
|
+
comp = ary[idx] <=> value
|
107
|
+
|
108
|
+
if comp == 0
|
109
|
+
return idx
|
110
|
+
elsif comp > 0
|
111
|
+
upper = idx - 1
|
112
|
+
else
|
113
|
+
lower = idx + 1
|
114
|
+
end
|
115
|
+
end
|
116
|
+
return upper
|
117
|
+
end
|
118
|
+
|
119
|
+
end
|
120
|
+
end
|
121
|
+
|
122
|
+
end
|
123
|
+
|
124
|
+
# ring = HashRing.new ['server1', 'server2', 'server3']
|
125
|
+
# p ring
|
126
|
+
# #
|
127
|
+
# p ring.get_node "kjhjkjlkjlkkh"
|
128
|
+
#
|
data/lib/pipeline.rb
ADDED
@@ -0,0 +1,23 @@
|
|
1
|
+
require "redis"
|
2
|
+
|
3
|
+
class Redis
|
4
|
+
class Pipeline < Redis
|
5
|
+
BUFFER_SIZE = 50_000
|
6
|
+
|
7
|
+
def initialize(redis)
|
8
|
+
@redis = redis
|
9
|
+
@commands = []
|
10
|
+
end
|
11
|
+
|
12
|
+
def call_command(command)
|
13
|
+
@commands << command
|
14
|
+
end
|
15
|
+
|
16
|
+
def execute
|
17
|
+
return if @commands.empty?
|
18
|
+
@redis.call_command(@commands)
|
19
|
+
@commands.clear
|
20
|
+
end
|
21
|
+
|
22
|
+
end
|
23
|
+
end
|
data/lib/redis.rb
ADDED
@@ -0,0 +1,352 @@
|
|
1
|
+
require 'socket'
|
2
|
+
require File.join(File.dirname(__FILE__),'pipeline')
|
3
|
+
|
4
|
+
begin
|
5
|
+
if RUBY_VERSION >= '1.9'
|
6
|
+
require 'timeout'
|
7
|
+
RedisTimer = Timeout
|
8
|
+
else
|
9
|
+
require 'system_timer'
|
10
|
+
RedisTimer = SystemTimer
|
11
|
+
end
|
12
|
+
rescue LoadError
|
13
|
+
RedisTimer = nil
|
14
|
+
end
|
15
|
+
|
16
|
+
class Redis
|
17
|
+
OK = "OK".freeze
|
18
|
+
MINUS = "-".freeze
|
19
|
+
PLUS = "+".freeze
|
20
|
+
COLON = ":".freeze
|
21
|
+
DOLLAR = "$".freeze
|
22
|
+
ASTERISK = "*".freeze
|
23
|
+
|
24
|
+
BULK_COMMANDS = {
|
25
|
+
"set" => true,
|
26
|
+
"setnx" => true,
|
27
|
+
"rpush" => true,
|
28
|
+
"lpush" => true,
|
29
|
+
"lset" => true,
|
30
|
+
"lrem" => true,
|
31
|
+
"sadd" => true,
|
32
|
+
"srem" => true,
|
33
|
+
"sismember" => true,
|
34
|
+
"echo" => true,
|
35
|
+
"getset" => true,
|
36
|
+
"smove" => true
|
37
|
+
}
|
38
|
+
|
39
|
+
MULTI_BULK_COMMANDS = {
|
40
|
+
"mset" => true,
|
41
|
+
"msetnx" => true
|
42
|
+
}
|
43
|
+
|
44
|
+
BOOLEAN_PROCESSOR = lambda{|r| r == 1 }
|
45
|
+
|
46
|
+
REPLY_PROCESSOR = {
|
47
|
+
"exists" => BOOLEAN_PROCESSOR,
|
48
|
+
"sismember" => BOOLEAN_PROCESSOR,
|
49
|
+
"sadd" => BOOLEAN_PROCESSOR,
|
50
|
+
"srem" => BOOLEAN_PROCESSOR,
|
51
|
+
"smove" => BOOLEAN_PROCESSOR,
|
52
|
+
"move" => BOOLEAN_PROCESSOR,
|
53
|
+
"setnx" => BOOLEAN_PROCESSOR,
|
54
|
+
"del" => BOOLEAN_PROCESSOR,
|
55
|
+
"renamenx" => BOOLEAN_PROCESSOR,
|
56
|
+
"expire" => BOOLEAN_PROCESSOR,
|
57
|
+
"keys" => lambda{|r| r.split(" ")},
|
58
|
+
"info" => lambda{|r|
|
59
|
+
info = {}
|
60
|
+
r.each_line {|kv|
|
61
|
+
k,v = kv.split(":",2).map{|x| x.chomp}
|
62
|
+
info[k.to_sym] = v
|
63
|
+
}
|
64
|
+
info
|
65
|
+
}
|
66
|
+
}
|
67
|
+
|
68
|
+
ALIASES = {
|
69
|
+
"flush_db" => "flushdb",
|
70
|
+
"flush_all" => "flushall",
|
71
|
+
"last_save" => "lastsave",
|
72
|
+
"key?" => "exists",
|
73
|
+
"delete" => "del",
|
74
|
+
"randkey" => "randomkey",
|
75
|
+
"list_length" => "llen",
|
76
|
+
"push_tail" => "rpush",
|
77
|
+
"push_head" => "lpush",
|
78
|
+
"pop_tail" => "rpop",
|
79
|
+
"pop_head" => "lpop",
|
80
|
+
"list_set" => "lset",
|
81
|
+
"list_range" => "lrange",
|
82
|
+
"list_trim" => "ltrim",
|
83
|
+
"list_index" => "lindex",
|
84
|
+
"list_rm" => "lrem",
|
85
|
+
"set_add" => "sadd",
|
86
|
+
"set_delete" => "srem",
|
87
|
+
"set_count" => "scard",
|
88
|
+
"set_member?" => "sismember",
|
89
|
+
"set_members" => "smembers",
|
90
|
+
"set_intersect" => "sinter",
|
91
|
+
"set_intersect_store" => "sinterstore",
|
92
|
+
"set_inter_store" => "sinterstore",
|
93
|
+
"set_union" => "sunion",
|
94
|
+
"set_union_store" => "sunionstore",
|
95
|
+
"set_diff" => "sdiff",
|
96
|
+
"set_diff_store" => "sdiffstore",
|
97
|
+
"set_move" => "smove",
|
98
|
+
"set_unless_exists" => "setnx",
|
99
|
+
"rename_unless_exists" => "renamenx",
|
100
|
+
"type?" => "type"
|
101
|
+
}
|
102
|
+
|
103
|
+
DISABLED_COMMANDS = {
|
104
|
+
"monitor" => true,
|
105
|
+
"sync" => true
|
106
|
+
}
|
107
|
+
|
108
|
+
def initialize(options = {})
|
109
|
+
@host = options[:host] || '127.0.0.1'
|
110
|
+
@port = (options[:port] || 6379).to_i
|
111
|
+
@db = (options[:db] || 0).to_i
|
112
|
+
@timeout = (options[:timeout] || 5).to_i
|
113
|
+
@password = options[:password]
|
114
|
+
@logger = options[:logger]
|
115
|
+
@thread_safe = options[:thread_safe]
|
116
|
+
@mutex = Mutex.new if @thread_safe
|
117
|
+
|
118
|
+
@logger.info { self.to_s } if @logger
|
119
|
+
end
|
120
|
+
|
121
|
+
def to_s
|
122
|
+
"Redis Client connected to #{server} against DB #{@db}"
|
123
|
+
end
|
124
|
+
|
125
|
+
def server
|
126
|
+
"#{@host}:#{@port}"
|
127
|
+
end
|
128
|
+
|
129
|
+
def connect_to_server
|
130
|
+
@sock = connect_to(@host, @port, @timeout == 0 ? nil : @timeout)
|
131
|
+
call_command(["auth",@password]) if @password
|
132
|
+
call_command(["select",@db]) unless @db == 0
|
133
|
+
end
|
134
|
+
|
135
|
+
def connect_to(host, port, timeout=nil)
|
136
|
+
# We support connect() timeout only if system_timer is availabe
|
137
|
+
# or if we are running against Ruby >= 1.9
|
138
|
+
# Timeout reading from the socket instead will be supported anyway.
|
139
|
+
if @timeout != 0 and RedisTimer
|
140
|
+
begin
|
141
|
+
sock = TCPSocket.new(host, port)
|
142
|
+
rescue Timeout::Error
|
143
|
+
@sock = nil
|
144
|
+
raise Timeout::Error, "Timeout connecting to the server"
|
145
|
+
end
|
146
|
+
else
|
147
|
+
sock = TCPSocket.new(host, port)
|
148
|
+
end
|
149
|
+
sock.setsockopt Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1
|
150
|
+
|
151
|
+
# If the timeout is set we set the low level socket options in order
|
152
|
+
# to make sure a blocking read will return after the specified number
|
153
|
+
# of seconds. This hack is from memcached ruby client.
|
154
|
+
if timeout
|
155
|
+
secs = Integer(timeout)
|
156
|
+
usecs = Integer((timeout - secs) * 1_000_000)
|
157
|
+
optval = [secs, usecs].pack("l_2")
|
158
|
+
sock.setsockopt Socket::SOL_SOCKET, Socket::SO_RCVTIMEO, optval
|
159
|
+
sock.setsockopt Socket::SOL_SOCKET, Socket::SO_SNDTIMEO, optval
|
160
|
+
end
|
161
|
+
sock
|
162
|
+
end
|
163
|
+
|
164
|
+
def method_missing(*argv)
|
165
|
+
call_command(argv)
|
166
|
+
end
|
167
|
+
|
168
|
+
def call_command(argv)
|
169
|
+
@logger.debug { argv.inspect } if @logger
|
170
|
+
|
171
|
+
# this wrapper to raw_call_command handle reconnection on socket
|
172
|
+
# error. We try to reconnect just one time, otherwise let the error
|
173
|
+
# araise.
|
174
|
+
connect_to_server if !@sock
|
175
|
+
|
176
|
+
begin
|
177
|
+
raw_call_command(argv.dup)
|
178
|
+
rescue Errno::ECONNRESET, Errno::EPIPE, Errno::ECONNABORTED
|
179
|
+
@sock.close
|
180
|
+
@sock = nil
|
181
|
+
connect_to_server
|
182
|
+
raw_call_command(argv.dup)
|
183
|
+
end
|
184
|
+
end
|
185
|
+
|
186
|
+
def raw_call_command(argvp)
|
187
|
+
pipeline = argvp[0].is_a?(Array)
|
188
|
+
|
189
|
+
unless pipeline
|
190
|
+
argvv = [argvp]
|
191
|
+
else
|
192
|
+
argvv = argvp
|
193
|
+
end
|
194
|
+
|
195
|
+
if MULTI_BULK_COMMANDS[argvv.flatten[0].to_s]
|
196
|
+
# TODO improve this code
|
197
|
+
argvp = argvv.flatten
|
198
|
+
values = argvp.pop.to_a.flatten
|
199
|
+
argvp = values.unshift(argvp[0])
|
200
|
+
command = ["*#{argvp.size}"]
|
201
|
+
argvp.each do |v|
|
202
|
+
v = v.to_s
|
203
|
+
command << "$#{get_size(v)}"
|
204
|
+
command << v
|
205
|
+
end
|
206
|
+
command = command.map {|cmd| "#{cmd}\r\n"}.join
|
207
|
+
else
|
208
|
+
command = ""
|
209
|
+
argvv.each do |argv|
|
210
|
+
bulk = nil
|
211
|
+
argv[0] = argv[0].to_s.downcase
|
212
|
+
argv[0] = ALIASES[argv[0]] if ALIASES[argv[0]]
|
213
|
+
raise "#{argv[0]} command is disabled" if DISABLED_COMMANDS[argv[0]]
|
214
|
+
if BULK_COMMANDS[argv[0]] and argv.length > 1
|
215
|
+
bulk = argv[-1].to_s
|
216
|
+
argv[-1] = get_size(bulk)
|
217
|
+
end
|
218
|
+
command << "#{argv.join(' ')}\r\n"
|
219
|
+
command << "#{bulk}\r\n" if bulk
|
220
|
+
end
|
221
|
+
end
|
222
|
+
results = maybe_lock { process_command(command, argvv) }
|
223
|
+
|
224
|
+
return pipeline ? results : results[0]
|
225
|
+
end
|
226
|
+
|
227
|
+
def process_command(command, argvv)
|
228
|
+
@sock.write(command)
|
229
|
+
argvv.map do |argv|
|
230
|
+
processor = REPLY_PROCESSOR[argv[0]]
|
231
|
+
processor ? processor.call(read_reply) : read_reply
|
232
|
+
end
|
233
|
+
end
|
234
|
+
|
235
|
+
def maybe_lock(&block)
|
236
|
+
if @thread_safe
|
237
|
+
@mutex.synchronize &block
|
238
|
+
else
|
239
|
+
block.call
|
240
|
+
end
|
241
|
+
end
|
242
|
+
|
243
|
+
def select(*args)
|
244
|
+
raise "SELECT not allowed, use the :db option when creating the object"
|
245
|
+
end
|
246
|
+
|
247
|
+
def [](key)
|
248
|
+
self.get(key)
|
249
|
+
end
|
250
|
+
|
251
|
+
def []=(key,value)
|
252
|
+
set(key,value)
|
253
|
+
end
|
254
|
+
|
255
|
+
def set(key, value, expiry=nil)
|
256
|
+
s = call_command([:set, key, value]) == OK
|
257
|
+
expire(key, expiry) if s && expiry
|
258
|
+
s
|
259
|
+
end
|
260
|
+
|
261
|
+
def sort(key, options = {})
|
262
|
+
cmd = ["SORT"]
|
263
|
+
cmd << key
|
264
|
+
cmd << "BY #{options[:by]}" if options[:by]
|
265
|
+
cmd << "GET #{[options[:get]].flatten * ' GET '}" if options[:get]
|
266
|
+
cmd << "#{options[:order]}" if options[:order]
|
267
|
+
cmd << "LIMIT #{options[:limit].join(' ')}" if options[:limit]
|
268
|
+
call_command(cmd)
|
269
|
+
end
|
270
|
+
|
271
|
+
def incr(key, increment = nil)
|
272
|
+
call_command(increment ? ["incrby",key,increment] : ["incr",key])
|
273
|
+
end
|
274
|
+
|
275
|
+
def decr(key,decrement = nil)
|
276
|
+
call_command(decrement ? ["decrby",key,decrement] : ["decr",key])
|
277
|
+
end
|
278
|
+
|
279
|
+
# Similar to memcache.rb's #get_multi, returns a hash mapping
|
280
|
+
# keys to values.
|
281
|
+
def mapped_mget(*keys)
|
282
|
+
result = {}
|
283
|
+
mget(*keys).each do |value|
|
284
|
+
key = keys.shift
|
285
|
+
result.merge!(key => value) unless value.nil?
|
286
|
+
end
|
287
|
+
result
|
288
|
+
end
|
289
|
+
|
290
|
+
# Ruby defines a now deprecated type method so we need to override it here
|
291
|
+
# since it will never hit method_missing
|
292
|
+
def type(key)
|
293
|
+
call_command(['type', key])
|
294
|
+
end
|
295
|
+
|
296
|
+
def quit
|
297
|
+
call_command(['quit'])
|
298
|
+
rescue Errno::ECONNRESET
|
299
|
+
end
|
300
|
+
|
301
|
+
def pipelined(&block)
|
302
|
+
pipeline = Pipeline.new self
|
303
|
+
yield pipeline
|
304
|
+
pipeline.execute
|
305
|
+
end
|
306
|
+
|
307
|
+
def read_reply
|
308
|
+
# We read the first byte using read() mainly because gets() is
|
309
|
+
# immune to raw socket timeouts.
|
310
|
+
begin
|
311
|
+
rtype = @sock.read(1)
|
312
|
+
rescue Errno::EAGAIN
|
313
|
+
# We want to make sure it reconnects on the next command after the
|
314
|
+
# timeout. Otherwise the server may reply in the meantime leaving
|
315
|
+
# the protocol in a desync status.
|
316
|
+
@sock = nil
|
317
|
+
raise Errno::EAGAIN, "Timeout reading from the socket"
|
318
|
+
end
|
319
|
+
|
320
|
+
raise Errno::ECONNRESET,"Connection lost" if !rtype
|
321
|
+
line = @sock.gets
|
322
|
+
case rtype
|
323
|
+
when MINUS
|
324
|
+
raise MINUS + line.strip
|
325
|
+
when PLUS
|
326
|
+
line.strip
|
327
|
+
when COLON
|
328
|
+
line.to_i
|
329
|
+
when DOLLAR
|
330
|
+
bulklen = line.to_i
|
331
|
+
return nil if bulklen == -1
|
332
|
+
data = @sock.read(bulklen)
|
333
|
+
@sock.read(2) # CRLF
|
334
|
+
data
|
335
|
+
when ASTERISK
|
336
|
+
objects = line.to_i
|
337
|
+
return nil if bulklen == -1
|
338
|
+
res = []
|
339
|
+
objects.times {
|
340
|
+
res << read_reply
|
341
|
+
}
|
342
|
+
res
|
343
|
+
else
|
344
|
+
raise "Protocol error, got '#{rtype}' as initial reply byte"
|
345
|
+
end
|
346
|
+
end
|
347
|
+
|
348
|
+
private
|
349
|
+
def get_size(string)
|
350
|
+
string.respond_to?(:bytesize) ? string.bytesize : string.size
|
351
|
+
end
|
352
|
+
end
|