vanity 1.3.0 → 1.4.0.beta
Sign up to get free protection for your applications and to get access to all the features.
- data/CHANGELOG +61 -3
- data/Gemfile +22 -14
- data/README.rdoc +9 -4
- data/Rakefile +72 -12
- data/bin/vanity +16 -4
- data/lib/vanity.rb +7 -5
- data/lib/vanity/adapters/abstract_adapter.rb +135 -0
- data/lib/vanity/adapters/mock_adapter.rb +157 -0
- data/lib/vanity/adapters/mongo_adapter.rb +162 -0
- data/lib/vanity/adapters/redis_adapter.rb +154 -0
- data/lib/vanity/backport.rb +0 -17
- data/lib/vanity/commands/upgrade.rb +34 -0
- data/lib/vanity/experiment/ab_test.rb +46 -41
- data/lib/vanity/experiment/base.rb +13 -15
- data/lib/vanity/frameworks/rails.rb +5 -9
- data/lib/vanity/metric/active_record.rb +10 -4
- data/lib/vanity/metric/base.rb +46 -23
- data/lib/vanity/metric/google_analytics.rb +7 -0
- data/lib/vanity/metric/remote.rb +53 -0
- data/lib/vanity/playground.rb +133 -49
- data/test/{ab_test_test.rb → experiment/ab_test.rb} +47 -3
- data/test/{experiment_test.rb → experiment/base_test.rb} +8 -8
- data/test/metric/active_record_test.rb +253 -0
- data/test/metric/base_test.rb +293 -0
- data/test/metric/google_analytics_test.rb +104 -0
- data/test/metric/remote_test.rb +108 -0
- data/test/myapp/app/controllers/application_controller.rbc +66 -0
- data/test/myapp/app/controllers/main_controller.rb +3 -3
- data/test/myapp/app/controllers/main_controller.rbc +347 -0
- data/test/myapp/config/boot.rbc +2534 -0
- data/test/myapp/config/environment.rbc +403 -0
- data/test/myapp/config/routes.rbc +174 -0
- data/test/myapp/log/production.log +2601 -0
- data/test/passenger_test.rb +14 -5
- data/test/passenger_test.rbc +0 -0
- data/test/playground_test.rbc +256 -0
- data/test/rails_test.rb +75 -22
- data/test/rails_test.rbc +4086 -0
- data/test/test_helper.rb +30 -7
- data/test/test_helper.rbc +4297 -0
- data/vanity.gemspec +6 -2
- metadata +74 -73
- data/lib/vanity/commands.rb +0 -2
- data/lib/vanity/mock_redis.rb +0 -76
- data/test/metric_test.rb +0 -622
- data/vendor/cache/RedCloth-4.2.2.gem +0 -0
- data/vendor/cache/actionmailer-2.3.5.gem +0 -0
- data/vendor/cache/actionpack-2.3.5.gem +0 -0
- data/vendor/cache/activerecord-2.3.5.gem +0 -0
- data/vendor/cache/activeresource-2.3.5.gem +0 -0
- data/vendor/cache/activesupport-2.3.5.gem +0 -0
- data/vendor/cache/autotest-4.2.7.gem +0 -0
- data/vendor/cache/autotest-fsevent-0.2.1.gem +0 -0
- data/vendor/cache/autotest-growl-0.2.0.gem +0 -0
- data/vendor/cache/bundler-0.9.7.gem +0 -0
- data/vendor/cache/classifier-1.3.1.gem +0 -0
- data/vendor/cache/directory_watcher-1.3.1.gem +0 -0
- data/vendor/cache/fastthread-1.0.7.gem +0 -0
- data/vendor/cache/garb-0.7.0.gem +0 -0
- data/vendor/cache/happymapper-0.3.0.gem +0 -0
- data/vendor/cache/jekyll-0.5.7.gem +0 -0
- data/vendor/cache/libxml-ruby-1.1.3.gem +0 -0
- data/vendor/cache/liquid-2.0.0.gem +0 -0
- data/vendor/cache/maruku-0.6.0.gem +0 -0
- data/vendor/cache/mocha-0.9.8.gem +0 -0
- data/vendor/cache/open4-1.0.1.gem +0 -0
- data/vendor/cache/passenger-2.2.9.gem +0 -0
- data/vendor/cache/rack-1.0.1.gem +0 -0
- data/vendor/cache/rails-2.3.5.gem +0 -0
- data/vendor/cache/rake-0.8.7.gem +0 -0
- data/vendor/cache/rubygems-update-1.3.5.gem +0 -0
- data/vendor/cache/shoulda-2.10.3.gem +0 -0
- data/vendor/cache/sqlite3-ruby-1.2.5.gem +0 -0
- data/vendor/cache/stemmer-1.0.1.gem +0 -0
- data/vendor/cache/syntax-1.0.0.gem +0 -0
- data/vendor/cache/sys-uname-0.8.4.gem +0 -0
- data/vendor/cache/timecop-0.3.4.gem +0 -0
- data/vendor/redis-rb/LICENSE +0 -20
- data/vendor/redis-rb/README.markdown +0 -36
- data/vendor/redis-rb/Rakefile +0 -62
- data/vendor/redis-rb/bench.rb +0 -44
- data/vendor/redis-rb/benchmarking/suite.rb +0 -24
- data/vendor/redis-rb/benchmarking/worker.rb +0 -71
- data/vendor/redis-rb/bin/distredis +0 -33
- data/vendor/redis-rb/examples/basic.rb +0 -16
- data/vendor/redis-rb/examples/incr-decr.rb +0 -18
- data/vendor/redis-rb/examples/list.rb +0 -26
- data/vendor/redis-rb/examples/sets.rb +0 -36
- data/vendor/redis-rb/lib/dist_redis.rb +0 -124
- data/vendor/redis-rb/lib/hash_ring.rb +0 -128
- data/vendor/redis-rb/lib/pipeline.rb +0 -21
- data/vendor/redis-rb/lib/redis.rb +0 -370
- data/vendor/redis-rb/lib/redis/raketasks.rb +0 -1
- data/vendor/redis-rb/profile.rb +0 -22
- data/vendor/redis-rb/redis-rb.gemspec +0 -30
- data/vendor/redis-rb/spec/redis_spec.rb +0 -637
- data/vendor/redis-rb/spec/spec_helper.rb +0 -4
- data/vendor/redis-rb/speed.rb +0 -16
- data/vendor/redis-rb/tasks/redis.tasks.rb +0 -140
@@ -1,124 +0,0 @@
|
|
1
|
-
require 'redis'
|
2
|
-
require 'hash_ring'
|
3
|
-
class DistRedis
|
4
|
-
attr_reader :ring
|
5
|
-
def initialize(opts={})
|
6
|
-
hosts = []
|
7
|
-
|
8
|
-
db = opts[:db] || nil
|
9
|
-
timeout = opts[:timeout] || nil
|
10
|
-
|
11
|
-
raise Error, "No hosts given" unless opts[:hosts]
|
12
|
-
|
13
|
-
opts[:hosts].each do |h|
|
14
|
-
host, port = h.split(':')
|
15
|
-
hosts << Redis.new(:host => host, :port => port, :db => db, :timeout => timeout)
|
16
|
-
end
|
17
|
-
|
18
|
-
@ring = HashRing.new hosts
|
19
|
-
end
|
20
|
-
|
21
|
-
def node_for_key(key)
|
22
|
-
key = $1 if key =~ /\{(.*)?\}/
|
23
|
-
@ring.get_node(key)
|
24
|
-
end
|
25
|
-
|
26
|
-
def add_server(server)
|
27
|
-
server, port = server.split(':')
|
28
|
-
@ring.add_node Redis.new(:host => server, :port => port)
|
29
|
-
end
|
30
|
-
|
31
|
-
def method_missing(sym, *args, &blk)
|
32
|
-
if redis = node_for_key(args.first.to_s)
|
33
|
-
redis.send sym, *args, &blk
|
34
|
-
else
|
35
|
-
super
|
36
|
-
end
|
37
|
-
end
|
38
|
-
|
39
|
-
def keys(glob)
|
40
|
-
@ring.nodes.map do |red|
|
41
|
-
red.keys(glob)
|
42
|
-
end
|
43
|
-
end
|
44
|
-
|
45
|
-
def save
|
46
|
-
on_each_node :save
|
47
|
-
end
|
48
|
-
|
49
|
-
def bgsave
|
50
|
-
on_each_node :bgsave
|
51
|
-
end
|
52
|
-
|
53
|
-
def quit
|
54
|
-
on_each_node :quit
|
55
|
-
end
|
56
|
-
|
57
|
-
def flush_all
|
58
|
-
on_each_node :flush_all
|
59
|
-
end
|
60
|
-
alias_method :flushall, :flush_all
|
61
|
-
|
62
|
-
def flush_db
|
63
|
-
on_each_node :flush_db
|
64
|
-
end
|
65
|
-
alias_method :flushdb, :flush_db
|
66
|
-
|
67
|
-
def delete_cloud!
|
68
|
-
@ring.nodes.each do |red|
|
69
|
-
red.keys("*").each do |key|
|
70
|
-
red.delete key
|
71
|
-
end
|
72
|
-
end
|
73
|
-
end
|
74
|
-
|
75
|
-
def on_each_node(command, *args)
|
76
|
-
@ring.nodes.each do |red|
|
77
|
-
red.send(command, *args)
|
78
|
-
end
|
79
|
-
end
|
80
|
-
|
81
|
-
end
|
82
|
-
|
83
|
-
|
84
|
-
if __FILE__ == $0
|
85
|
-
|
86
|
-
r = DistRedis.new 'localhost:6379', 'localhost:6380', 'localhost:6381', 'localhost:6382'
|
87
|
-
r['urmom'] = 'urmom'
|
88
|
-
r['urdad'] = 'urdad'
|
89
|
-
r['urmom1'] = 'urmom1'
|
90
|
-
r['urdad1'] = 'urdad1'
|
91
|
-
r['urmom2'] = 'urmom2'
|
92
|
-
r['urdad2'] = 'urdad2'
|
93
|
-
r['urmom3'] = 'urmom3'
|
94
|
-
r['urdad3'] = 'urdad3'
|
95
|
-
p r['urmom']
|
96
|
-
p r['urdad']
|
97
|
-
p r['urmom1']
|
98
|
-
p r['urdad1']
|
99
|
-
p r['urmom2']
|
100
|
-
p r['urdad2']
|
101
|
-
p r['urmom3']
|
102
|
-
p r['urdad3']
|
103
|
-
|
104
|
-
r.push_tail 'listor', 'foo1'
|
105
|
-
r.push_tail 'listor', 'foo2'
|
106
|
-
r.push_tail 'listor', 'foo3'
|
107
|
-
r.push_tail 'listor', 'foo4'
|
108
|
-
r.push_tail 'listor', 'foo5'
|
109
|
-
|
110
|
-
p r.pop_tail('listor')
|
111
|
-
p r.pop_tail('listor')
|
112
|
-
p r.pop_tail('listor')
|
113
|
-
p r.pop_tail('listor')
|
114
|
-
p r.pop_tail('listor')
|
115
|
-
|
116
|
-
puts "key distribution:"
|
117
|
-
|
118
|
-
r.ring.nodes.each do |red|
|
119
|
-
p [red.port, red.keys("*")]
|
120
|
-
end
|
121
|
-
r.delete_cloud!
|
122
|
-
p r.keys('*')
|
123
|
-
|
124
|
-
end
|
@@ -1,128 +0,0 @@
|
|
1
|
-
require 'zlib'
|
2
|
-
|
3
|
-
class HashRing
|
4
|
-
|
5
|
-
POINTS_PER_SERVER = 160 # this is the default in libmemcached
|
6
|
-
|
7
|
-
attr_reader :ring, :sorted_keys, :replicas, :nodes
|
8
|
-
|
9
|
-
# nodes is a list of objects that have a proper to_s representation.
|
10
|
-
# replicas indicates how many virtual points should be used pr. node,
|
11
|
-
# replicas are required to improve the distribution.
|
12
|
-
def initialize(nodes=[], replicas=POINTS_PER_SERVER)
|
13
|
-
@replicas = replicas
|
14
|
-
@ring = {}
|
15
|
-
@nodes = []
|
16
|
-
@sorted_keys = []
|
17
|
-
nodes.each do |node|
|
18
|
-
add_node(node)
|
19
|
-
end
|
20
|
-
end
|
21
|
-
|
22
|
-
# Adds a `node` to the hash ring (including a number of replicas).
|
23
|
-
def add_node(node)
|
24
|
-
@nodes << node
|
25
|
-
@replicas.times do |i|
|
26
|
-
key = Zlib.crc32("#{node}:#{i}")
|
27
|
-
@ring[key] = node
|
28
|
-
@sorted_keys << key
|
29
|
-
end
|
30
|
-
@sorted_keys.sort!
|
31
|
-
end
|
32
|
-
|
33
|
-
def remove_node(node)
|
34
|
-
@nodes.reject!{|n| n.to_s == node.to_s}
|
35
|
-
@replicas.times do |i|
|
36
|
-
key = Zlib.crc32("#{node}:#{i}")
|
37
|
-
@ring.delete(key)
|
38
|
-
@sorted_keys.reject! {|k| k == key}
|
39
|
-
end
|
40
|
-
end
|
41
|
-
|
42
|
-
# get the node in the hash ring for this key
|
43
|
-
def get_node(key)
|
44
|
-
get_node_pos(key)[0]
|
45
|
-
end
|
46
|
-
|
47
|
-
def get_node_pos(key)
|
48
|
-
return [nil,nil] if @ring.size == 0
|
49
|
-
crc = Zlib.crc32(key)
|
50
|
-
idx = HashRing.binary_search(@sorted_keys, crc)
|
51
|
-
return [@ring[@sorted_keys[idx]], idx]
|
52
|
-
end
|
53
|
-
|
54
|
-
def iter_nodes(key)
|
55
|
-
return [nil,nil] if @ring.size == 0
|
56
|
-
node, pos = get_node_pos(key)
|
57
|
-
@sorted_keys[pos..-1].each do |k|
|
58
|
-
yield @ring[k]
|
59
|
-
end
|
60
|
-
end
|
61
|
-
|
62
|
-
class << self
|
63
|
-
|
64
|
-
# gem install RubyInline to use this code
|
65
|
-
# Native extension to perform the binary search within the hashring.
|
66
|
-
# There's a pure ruby version below so this is purely optional
|
67
|
-
# for performance. In testing 20k gets and sets, the native
|
68
|
-
# binary search shaved about 12% off the runtime (9sec -> 8sec).
|
69
|
-
begin
|
70
|
-
require 'inline'
|
71
|
-
inline do |builder|
|
72
|
-
builder.c <<-EOM
|
73
|
-
int binary_search(VALUE ary, unsigned int r) {
|
74
|
-
int upper = RARRAY_LEN(ary) - 1;
|
75
|
-
int lower = 0;
|
76
|
-
int idx = 0;
|
77
|
-
|
78
|
-
while (lower <= upper) {
|
79
|
-
idx = (lower + upper) / 2;
|
80
|
-
|
81
|
-
VALUE continuumValue = RARRAY_PTR(ary)[idx];
|
82
|
-
unsigned int l = NUM2UINT(continuumValue);
|
83
|
-
if (l == r) {
|
84
|
-
return idx;
|
85
|
-
}
|
86
|
-
else if (l > r) {
|
87
|
-
upper = idx - 1;
|
88
|
-
}
|
89
|
-
else {
|
90
|
-
lower = idx + 1;
|
91
|
-
}
|
92
|
-
}
|
93
|
-
return upper;
|
94
|
-
}
|
95
|
-
EOM
|
96
|
-
end
|
97
|
-
rescue Exception => e
|
98
|
-
# Find the closest index in HashRing with value <= the given value
|
99
|
-
def binary_search(ary, value, &block)
|
100
|
-
upper = ary.size - 1
|
101
|
-
lower = 0
|
102
|
-
idx = 0
|
103
|
-
|
104
|
-
while(lower <= upper) do
|
105
|
-
idx = (lower + upper) / 2
|
106
|
-
comp = ary[idx] <=> value
|
107
|
-
|
108
|
-
if comp == 0
|
109
|
-
return idx
|
110
|
-
elsif comp > 0
|
111
|
-
upper = idx - 1
|
112
|
-
else
|
113
|
-
lower = idx + 1
|
114
|
-
end
|
115
|
-
end
|
116
|
-
return upper
|
117
|
-
end
|
118
|
-
|
119
|
-
end
|
120
|
-
end
|
121
|
-
|
122
|
-
end
|
123
|
-
|
124
|
-
# ring = HashRing.new ['server1', 'server2', 'server3']
|
125
|
-
# p ring
|
126
|
-
# #
|
127
|
-
# p ring.get_node "kjhjkjlkjlkkh"
|
128
|
-
#
|
@@ -1,21 +0,0 @@
|
|
1
|
-
class Redis
|
2
|
-
class Pipeline < Redis
|
3
|
-
BUFFER_SIZE = 50_000
|
4
|
-
|
5
|
-
def initialize(redis)
|
6
|
-
@redis = redis
|
7
|
-
@commands = []
|
8
|
-
end
|
9
|
-
|
10
|
-
def call_command(command)
|
11
|
-
@commands << command
|
12
|
-
end
|
13
|
-
|
14
|
-
def execute
|
15
|
-
return if @commands.empty?
|
16
|
-
@redis.call_command(@commands)
|
17
|
-
@commands.clear
|
18
|
-
end
|
19
|
-
|
20
|
-
end
|
21
|
-
end
|
@@ -1,370 +0,0 @@
|
|
1
|
-
require 'socket'
|
2
|
-
require File.join(File.dirname(__FILE__),'pipeline')
|
3
|
-
|
4
|
-
begin
|
5
|
-
if RUBY_VERSION >= '1.9'
|
6
|
-
require 'timeout'
|
7
|
-
RedisTimer = Timeout
|
8
|
-
else
|
9
|
-
require 'system_timer'
|
10
|
-
RedisTimer = SystemTimer
|
11
|
-
end
|
12
|
-
rescue LoadError
|
13
|
-
RedisTimer = nil
|
14
|
-
end
|
15
|
-
|
16
|
-
class Redis
|
17
|
-
OK = "OK".freeze
|
18
|
-
MINUS = "-".freeze
|
19
|
-
PLUS = "+".freeze
|
20
|
-
COLON = ":".freeze
|
21
|
-
DOLLAR = "$".freeze
|
22
|
-
ASTERISK = "*".freeze
|
23
|
-
|
24
|
-
BULK_COMMANDS = {
|
25
|
-
"set" => true,
|
26
|
-
"setnx" => true,
|
27
|
-
"rpush" => true,
|
28
|
-
"lpush" => true,
|
29
|
-
"lset" => true,
|
30
|
-
"lrem" => true,
|
31
|
-
"sadd" => true,
|
32
|
-
"srem" => true,
|
33
|
-
"sismember" => true,
|
34
|
-
"rpoplpush" => true,
|
35
|
-
"echo" => true,
|
36
|
-
"getset" => true,
|
37
|
-
"smove" => true,
|
38
|
-
"zadd" => true,
|
39
|
-
"zrem" => true,
|
40
|
-
"zscore" => true
|
41
|
-
}
|
42
|
-
|
43
|
-
MULTI_BULK_COMMANDS = {
|
44
|
-
"mset" => true,
|
45
|
-
"msetnx" => true
|
46
|
-
}
|
47
|
-
|
48
|
-
BOOLEAN_PROCESSOR = lambda{|r| r == 1 }
|
49
|
-
|
50
|
-
REPLY_PROCESSOR = {
|
51
|
-
"exists" => BOOLEAN_PROCESSOR,
|
52
|
-
"sismember" => BOOLEAN_PROCESSOR,
|
53
|
-
"sadd" => BOOLEAN_PROCESSOR,
|
54
|
-
"srem" => BOOLEAN_PROCESSOR,
|
55
|
-
"smove" => BOOLEAN_PROCESSOR,
|
56
|
-
"zadd" => BOOLEAN_PROCESSOR,
|
57
|
-
"zrem" => BOOLEAN_PROCESSOR,
|
58
|
-
"move" => BOOLEAN_PROCESSOR,
|
59
|
-
"setnx" => BOOLEAN_PROCESSOR,
|
60
|
-
"del" => BOOLEAN_PROCESSOR,
|
61
|
-
"renamenx" => BOOLEAN_PROCESSOR,
|
62
|
-
"expire" => BOOLEAN_PROCESSOR,
|
63
|
-
"keys" => lambda{|r| r.split(" ")},
|
64
|
-
"info" => lambda{|r|
|
65
|
-
info = {}
|
66
|
-
r.each_line {|kv|
|
67
|
-
k,v = kv.split(":",2).map{|x| x.chomp}
|
68
|
-
info[k.to_sym] = v
|
69
|
-
}
|
70
|
-
info
|
71
|
-
}
|
72
|
-
}
|
73
|
-
|
74
|
-
ALIASES = {
|
75
|
-
"flush_db" => "flushdb",
|
76
|
-
"flush_all" => "flushall",
|
77
|
-
"last_save" => "lastsave",
|
78
|
-
"key?" => "exists",
|
79
|
-
"delete" => "del",
|
80
|
-
"randkey" => "randomkey",
|
81
|
-
"list_length" => "llen",
|
82
|
-
"push_tail" => "rpush",
|
83
|
-
"push_head" => "lpush",
|
84
|
-
"pop_tail" => "rpop",
|
85
|
-
"pop_head" => "lpop",
|
86
|
-
"list_set" => "lset",
|
87
|
-
"list_range" => "lrange",
|
88
|
-
"list_trim" => "ltrim",
|
89
|
-
"list_index" => "lindex",
|
90
|
-
"list_rm" => "lrem",
|
91
|
-
"set_add" => "sadd",
|
92
|
-
"set_delete" => "srem",
|
93
|
-
"set_count" => "scard",
|
94
|
-
"set_member?" => "sismember",
|
95
|
-
"set_members" => "smembers",
|
96
|
-
"set_intersect" => "sinter",
|
97
|
-
"set_intersect_store" => "sinterstore",
|
98
|
-
"set_inter_store" => "sinterstore",
|
99
|
-
"set_union" => "sunion",
|
100
|
-
"set_union_store" => "sunionstore",
|
101
|
-
"set_diff" => "sdiff",
|
102
|
-
"set_diff_store" => "sdiffstore",
|
103
|
-
"set_move" => "smove",
|
104
|
-
"set_unless_exists" => "setnx",
|
105
|
-
"rename_unless_exists" => "renamenx",
|
106
|
-
"type?" => "type",
|
107
|
-
"zset_add" => "zadd",
|
108
|
-
"zset_count" => 'zcard',
|
109
|
-
"zset_range_by_score" => 'zrangebyscore',
|
110
|
-
"zset_reverse_range" => 'zrevrange',
|
111
|
-
"zset_range" => 'zrange',
|
112
|
-
"zset_delete" => 'zrem',
|
113
|
-
"zset_score" => 'zscore'
|
114
|
-
}
|
115
|
-
|
116
|
-
DISABLED_COMMANDS = {
|
117
|
-
"monitor" => true,
|
118
|
-
"sync" => true
|
119
|
-
}
|
120
|
-
|
121
|
-
def initialize(options = {})
|
122
|
-
@host = options[:host] || '127.0.0.1'
|
123
|
-
@port = (options[:port] || 6379).to_i
|
124
|
-
@db = (options[:db] || 0).to_i
|
125
|
-
@timeout = (options[:timeout] || 5).to_i
|
126
|
-
@password = options[:password]
|
127
|
-
@logger = options[:logger]
|
128
|
-
@thread_safe = options[:thread_safe]
|
129
|
-
@mutex = Mutex.new if @thread_safe
|
130
|
-
|
131
|
-
@logger.info { self.to_s } if @logger
|
132
|
-
end
|
133
|
-
|
134
|
-
def to_s
|
135
|
-
"Redis Client connected to #{server} against DB #{@db}"
|
136
|
-
end
|
137
|
-
|
138
|
-
def server
|
139
|
-
"#{@host}:#{@port}"
|
140
|
-
end
|
141
|
-
|
142
|
-
def connect_to_server
|
143
|
-
@sock = connect_to(@host, @port, @timeout == 0 ? nil : @timeout)
|
144
|
-
call_command(["auth",@password]) if @password
|
145
|
-
call_command(["select",@db]) unless @db == 0
|
146
|
-
end
|
147
|
-
|
148
|
-
def connect_to(host, port, timeout=nil)
|
149
|
-
# We support connect() timeout only if system_timer is availabe
|
150
|
-
# or if we are running against Ruby >= 1.9
|
151
|
-
# Timeout reading from the socket instead will be supported anyway.
|
152
|
-
if @timeout != 0 and RedisTimer
|
153
|
-
begin
|
154
|
-
sock = TCPSocket.new(host, port)
|
155
|
-
rescue Timeout::Error
|
156
|
-
@sock = nil
|
157
|
-
raise Timeout::Error, "Timeout connecting to the server"
|
158
|
-
end
|
159
|
-
else
|
160
|
-
sock = TCPSocket.new(host, port)
|
161
|
-
end
|
162
|
-
sock.setsockopt Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1
|
163
|
-
|
164
|
-
# If the timeout is set we set the low level socket options in order
|
165
|
-
# to make sure a blocking read will return after the specified number
|
166
|
-
# of seconds. This hack is from memcached ruby client.
|
167
|
-
if timeout
|
168
|
-
secs = Integer(timeout)
|
169
|
-
usecs = Integer((timeout - secs) * 1_000_000)
|
170
|
-
optval = [secs, usecs].pack("l_2")
|
171
|
-
begin
|
172
|
-
sock.setsockopt Socket::SOL_SOCKET, Socket::SO_RCVTIMEO, optval
|
173
|
-
sock.setsockopt Socket::SOL_SOCKET, Socket::SO_SNDTIMEO, optval
|
174
|
-
rescue Exception => ex
|
175
|
-
# Solaris, for one, does not like/support socket timeouts.
|
176
|
-
@logger.info "Unable to use raw socket timeouts: #{ex.class.name}: #{ex.message}" if @logger
|
177
|
-
end
|
178
|
-
end
|
179
|
-
sock
|
180
|
-
end
|
181
|
-
|
182
|
-
def method_missing(*argv)
|
183
|
-
call_command(argv)
|
184
|
-
end
|
185
|
-
|
186
|
-
def call_command(argv)
|
187
|
-
@logger.debug { argv.inspect } if @logger
|
188
|
-
|
189
|
-
# this wrapper to raw_call_command handle reconnection on socket
|
190
|
-
# error. We try to reconnect just one time, otherwise let the error
|
191
|
-
# araise.
|
192
|
-
connect_to_server if !@sock
|
193
|
-
|
194
|
-
begin
|
195
|
-
raw_call_command(argv.dup)
|
196
|
-
rescue Errno::ECONNRESET, Errno::EPIPE, Errno::ECONNABORTED
|
197
|
-
@sock.close
|
198
|
-
@sock = nil
|
199
|
-
connect_to_server
|
200
|
-
raw_call_command(argv.dup)
|
201
|
-
end
|
202
|
-
end
|
203
|
-
|
204
|
-
def raw_call_command(argvp)
|
205
|
-
pipeline = argvp[0].is_a?(Array)
|
206
|
-
|
207
|
-
unless pipeline
|
208
|
-
argvv = [argvp]
|
209
|
-
else
|
210
|
-
argvv = argvp
|
211
|
-
end
|
212
|
-
|
213
|
-
if MULTI_BULK_COMMANDS[argvv.flatten[0].to_s]
|
214
|
-
# TODO improve this code
|
215
|
-
argvp = argvv.flatten
|
216
|
-
values = argvp.pop.to_a.flatten
|
217
|
-
argvp = values.unshift(argvp[0])
|
218
|
-
command = ["*#{argvp.size}"]
|
219
|
-
argvp.each do |v|
|
220
|
-
v = v.to_s
|
221
|
-
command << "$#{get_size(v)}"
|
222
|
-
command << v
|
223
|
-
end
|
224
|
-
command = command.map {|cmd| "#{cmd}\r\n"}.join
|
225
|
-
else
|
226
|
-
command = ""
|
227
|
-
argvv.each do |argv|
|
228
|
-
bulk = nil
|
229
|
-
argv[0] = argv[0].to_s.downcase
|
230
|
-
argv[0] = ALIASES[argv[0]] if ALIASES[argv[0]]
|
231
|
-
raise "#{argv[0]} command is disabled" if DISABLED_COMMANDS[argv[0]]
|
232
|
-
if BULK_COMMANDS[argv[0]] and argv.length > 1
|
233
|
-
bulk = argv[-1].to_s
|
234
|
-
argv[-1] = get_size(bulk)
|
235
|
-
end
|
236
|
-
command << "#{argv.join(' ')}\r\n"
|
237
|
-
command << "#{bulk}\r\n" if bulk
|
238
|
-
end
|
239
|
-
end
|
240
|
-
results = maybe_lock { process_command(command, argvv) }
|
241
|
-
|
242
|
-
return pipeline ? results : results[0]
|
243
|
-
end
|
244
|
-
|
245
|
-
def process_command(command, argvv)
|
246
|
-
@sock.write(command)
|
247
|
-
argvv.map do |argv|
|
248
|
-
processor = REPLY_PROCESSOR[argv[0]]
|
249
|
-
processor ? processor.call(read_reply) : read_reply
|
250
|
-
end
|
251
|
-
end
|
252
|
-
|
253
|
-
def maybe_lock(&block)
|
254
|
-
if @thread_safe
|
255
|
-
@mutex.synchronize &block
|
256
|
-
else
|
257
|
-
block.call
|
258
|
-
end
|
259
|
-
end
|
260
|
-
|
261
|
-
def select(*args)
|
262
|
-
raise "SELECT not allowed, use the :db option when creating the object"
|
263
|
-
end
|
264
|
-
|
265
|
-
def [](key)
|
266
|
-
self.get(key)
|
267
|
-
end
|
268
|
-
|
269
|
-
def []=(key,value)
|
270
|
-
set(key,value)
|
271
|
-
end
|
272
|
-
|
273
|
-
def set(key, value, expiry=nil)
|
274
|
-
s = call_command([:set, key, value]) == OK
|
275
|
-
expire(key, expiry) if s && expiry
|
276
|
-
s
|
277
|
-
end
|
278
|
-
|
279
|
-
def sort(key, options = {})
|
280
|
-
cmd = ["SORT"]
|
281
|
-
cmd << key
|
282
|
-
cmd << "BY #{options[:by]}" if options[:by]
|
283
|
-
cmd << "GET #{[options[:get]].flatten * ' GET '}" if options[:get]
|
284
|
-
cmd << "#{options[:order]}" if options[:order]
|
285
|
-
cmd << "LIMIT #{options[:limit].join(' ')}" if options[:limit]
|
286
|
-
call_command(cmd)
|
287
|
-
end
|
288
|
-
|
289
|
-
def incr(key, increment = nil)
|
290
|
-
call_command(increment ? ["incrby",key,increment] : ["incr",key])
|
291
|
-
end
|
292
|
-
|
293
|
-
def decr(key,decrement = nil)
|
294
|
-
call_command(decrement ? ["decrby",key,decrement] : ["decr",key])
|
295
|
-
end
|
296
|
-
|
297
|
-
# Similar to memcache.rb's #get_multi, returns a hash mapping
|
298
|
-
# keys to values.
|
299
|
-
def mapped_mget(*keys)
|
300
|
-
result = {}
|
301
|
-
mget(*keys).each do |value|
|
302
|
-
key = keys.shift
|
303
|
-
result.merge!(key => value) unless value.nil?
|
304
|
-
end
|
305
|
-
result
|
306
|
-
end
|
307
|
-
|
308
|
-
# Ruby defines a now deprecated type method so we need to override it here
|
309
|
-
# since it will never hit method_missing
|
310
|
-
def type(key)
|
311
|
-
call_command(['type', key])
|
312
|
-
end
|
313
|
-
|
314
|
-
def quit
|
315
|
-
call_command(['quit'])
|
316
|
-
rescue Errno::ECONNRESET
|
317
|
-
end
|
318
|
-
|
319
|
-
def pipelined(&block)
|
320
|
-
pipeline = Pipeline.new self
|
321
|
-
yield pipeline
|
322
|
-
pipeline.execute
|
323
|
-
end
|
324
|
-
|
325
|
-
def read_reply
|
326
|
-
# We read the first byte using read() mainly because gets() is
|
327
|
-
# immune to raw socket timeouts.
|
328
|
-
begin
|
329
|
-
rtype = @sock.read(1)
|
330
|
-
rescue Errno::EAGAIN
|
331
|
-
# We want to make sure it reconnects on the next command after the
|
332
|
-
# timeout. Otherwise the server may reply in the meantime leaving
|
333
|
-
# the protocol in a desync status.
|
334
|
-
@sock = nil
|
335
|
-
raise Errno::EAGAIN, "Timeout reading from the socket"
|
336
|
-
end
|
337
|
-
|
338
|
-
raise Errno::ECONNRESET,"Connection lost" if !rtype
|
339
|
-
line = @sock.gets
|
340
|
-
case rtype
|
341
|
-
when MINUS
|
342
|
-
raise MINUS + line.strip
|
343
|
-
when PLUS
|
344
|
-
line.strip
|
345
|
-
when COLON
|
346
|
-
line.to_i
|
347
|
-
when DOLLAR
|
348
|
-
bulklen = line.to_i
|
349
|
-
return nil if bulklen == -1
|
350
|
-
data = @sock.read(bulklen)
|
351
|
-
@sock.read(2) # CRLF
|
352
|
-
data
|
353
|
-
when ASTERISK
|
354
|
-
objects = line.to_i
|
355
|
-
return nil if bulklen == -1
|
356
|
-
res = []
|
357
|
-
objects.times {
|
358
|
-
res << read_reply
|
359
|
-
}
|
360
|
-
res
|
361
|
-
else
|
362
|
-
raise "Protocol error, got '#{rtype}' as initial reply byte"
|
363
|
-
end
|
364
|
-
end
|
365
|
-
|
366
|
-
private
|
367
|
-
def get_size(string)
|
368
|
-
string.respond_to?(:bytesize) ? string.bytesize : string.size
|
369
|
-
end
|
370
|
-
end
|