herdis 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/bin/herdis +6 -0
- data/lib/herdis/client.rb +136 -0
- data/lib/herdis/common.rb +10 -0
- data/lib/herdis/handlers/add_shards.rb +20 -0
- data/lib/herdis/handlers/add_shepherd.rb +21 -0
- data/lib/herdis/handlers/cluster.rb +21 -0
- data/lib/herdis/handlers/common.rb +18 -0
- data/lib/herdis/handlers/info.rb +21 -0
- data/lib/herdis/handlers/join_cluster.rb +18 -0
- data/lib/herdis/handlers/ping.rb +17 -0
- data/lib/herdis/handlers/remove_shards.rb +20 -0
- data/lib/herdis/handlers/remove_shepherd.rb +18 -0
- data/lib/herdis/handlers/sanity.rb +21 -0
- data/lib/herdis/handlers/shutdown.rb +18 -0
- data/lib/herdis/plugins/shepherd_connection.rb +49 -0
- data/lib/herdis/rack/default_headers.rb +15 -0
- data/lib/herdis/rack/favicon.rb +31 -0
- data/lib/herdis/rack/host_parameter.rb +22 -0
- data/lib/herdis/rmerge.rb +74 -0
- data/lib/herdis/server.rb +59 -0
- data/lib/herdis/shepherd.rb +531 -0
- metadata +136 -0
data/bin/herdis
ADDED
@@ -0,0 +1,136 @@
|
|
1
|
+
|
2
|
+
require 'hiredis'
|
3
|
+
require 'redis'
|
4
|
+
require 'redis/distributed'
|
5
|
+
require 'yajl'
|
6
|
+
require 'digest/sha1'
|
7
|
+
require 'pp'
|
8
|
+
|
9
|
+
$LOAD_PATH.unshift(File.expand_path('lib'))
|
10
|
+
|
11
|
+
require 'herdis/common'
|
12
|
+
|
13
|
+
module Herdis
|
14
|
+
|
15
|
+
class Client
|
16
|
+
|
17
|
+
class ReDistributed < Redis::Distributed
|
18
|
+
|
19
|
+
attr_reader :nodes
|
20
|
+
|
21
|
+
def initialize(urls, options = {})
|
22
|
+
@tag = options.delete(:tag) || /^\{(.+?)\}/
|
23
|
+
@default_options = options
|
24
|
+
@nodes = urls.map { |url| Redis.connect(options.merge(:url => url)) }
|
25
|
+
@subscribed_node = nil
|
26
|
+
end
|
27
|
+
|
28
|
+
def node_for(key)
|
29
|
+
@nodes[Digest::SHA1.hexdigest(key_tag(key.to_s) || key.to_s).to_i(16) % @nodes.size]
|
30
|
+
end
|
31
|
+
|
32
|
+
def add_node(url)
|
33
|
+
raise "You can't add nodes to #{self}!"
|
34
|
+
end
|
35
|
+
|
36
|
+
end
|
37
|
+
|
38
|
+
class DeadClusterException < RuntimeError
|
39
|
+
end
|
40
|
+
|
41
|
+
attr_reader :options, :shepherds, :dredis
|
42
|
+
|
43
|
+
def initialize(*args)
|
44
|
+
@options = args.last.is_a?(Hash) ? args.pop : {}
|
45
|
+
@shepherds = {}
|
46
|
+
args.each_with_index do |url, index|
|
47
|
+
@shepherds["initial#{index}"] = {"url" => url}
|
48
|
+
end
|
49
|
+
begin
|
50
|
+
refresh_cluster
|
51
|
+
rescue DeadClusterException => e
|
52
|
+
raise "No such cluster: #{url}"
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
def create_urls(cluster)
|
57
|
+
hash = {}
|
58
|
+
cluster.each do |shepherd_id, shepherd_status|
|
59
|
+
shepherd_url = URI.parse(shepherd_status["url"])
|
60
|
+
(shepherd_status["masters"] || []).each do |shard_id|
|
61
|
+
hash[shard_id.to_i] = "redis://#{shepherd_url.host}:#{shepherd_status["first_port"].to_i + shard_id.to_i}/"
|
62
|
+
end
|
63
|
+
end
|
64
|
+
urls = hash.keys.sort.collect do |key|
|
65
|
+
hash[key]
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
def validate(urls)
|
70
|
+
unless urls.size == Herdis::Common::SHARDS
|
71
|
+
raise "Broken cluster, there should be #{Herdis::Common::SHARDS} shards, but are #{urls.size}"
|
72
|
+
end
|
73
|
+
creators = Set.new
|
74
|
+
urls.each_with_index do |url, index|
|
75
|
+
parsed = URI.parse(url)
|
76
|
+
r = Redis.new(:host => parsed.host, :port => parsed.port)
|
77
|
+
claimed_shard = r.get("Herdis::Shepherd::Shard.id").to_i
|
78
|
+
creators << r.get("Herdis::Shepherd::Shard.created_by")
|
79
|
+
raise "Broken cluster, shard #{index} claims to be shard #{claimed_shard}" unless claimed_shard == index
|
80
|
+
raise "Broken cluster, multiple creators: #{creators.inspect}" if creators.size > 1
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
def refresh_cluster
|
85
|
+
cluster = nil
|
86
|
+
while cluster.nil?
|
87
|
+
raise DeadClusterException.new if @shepherds.empty?
|
88
|
+
random_shepherd_id = @shepherds.keys[rand(@shepherds.size)]
|
89
|
+
cluster_request =
|
90
|
+
EM::HttpRequest.new(@shepherds[random_shepherd_id]["url"]).get(:path => "/cluster",
|
91
|
+
:head => {"Accept" => "application/json"})
|
92
|
+
if cluster_request.response_header.status == 0
|
93
|
+
@shepherds.delete(random_shepherd_id)
|
94
|
+
else
|
95
|
+
cluster = Yajl::Parser.parse(cluster_request.response)
|
96
|
+
begin
|
97
|
+
urls = create_urls(cluster)
|
98
|
+
validate(urls)
|
99
|
+
rescue Errno::ECONNREFUSED => e
|
100
|
+
cluster = nil
|
101
|
+
rescue RuntimeError => e
|
102
|
+
if e.message == "ERR operation not permitted"
|
103
|
+
cluster = nil
|
104
|
+
else
|
105
|
+
raise e
|
106
|
+
end
|
107
|
+
end
|
108
|
+
end
|
109
|
+
end
|
110
|
+
@shepherds = cluster
|
111
|
+
@dredis = ReDistributed.new(urls,
|
112
|
+
@options)
|
113
|
+
end
|
114
|
+
|
115
|
+
def method_missing(meth, *args, &block)
|
116
|
+
begin
|
117
|
+
@dredis.send(meth, *args, &block)
|
118
|
+
rescue DeadClusterException => e
|
119
|
+
refresh_cluster
|
120
|
+
retry
|
121
|
+
rescue Errno::ECONNREFUSED => e
|
122
|
+
refresh_cluster
|
123
|
+
retry
|
124
|
+
rescue RuntimeError => e
|
125
|
+
if e.message == "ERR operation not permitted"
|
126
|
+
refresh_cluster
|
127
|
+
retry
|
128
|
+
else
|
129
|
+
raise e
|
130
|
+
end
|
131
|
+
end
|
132
|
+
end
|
133
|
+
|
134
|
+
end
|
135
|
+
|
136
|
+
end
|
@@ -0,0 +1,20 @@
|
|
1
|
+
|
2
|
+
module Herdis
|
3
|
+
|
4
|
+
module Handlers
|
5
|
+
|
6
|
+
class AddShards < Goliath::API
|
7
|
+
include Common
|
8
|
+
|
9
|
+
def response(env)
|
10
|
+
shard_ids = env['params']["shard_ids"]
|
11
|
+
shepherd_id = env['params'][:shepherd_id]
|
12
|
+
Herdis::Plugins::ShepherdConnection.shepherd.add_shards(shepherd_id, shard_ids)
|
13
|
+
[201, {}, ""]
|
14
|
+
end
|
15
|
+
|
16
|
+
end
|
17
|
+
|
18
|
+
end
|
19
|
+
|
20
|
+
end
|
@@ -0,0 +1,21 @@
|
|
1
|
+
|
2
|
+
module Herdis
|
3
|
+
|
4
|
+
module Handlers
|
5
|
+
|
6
|
+
class AddShepherd < Goliath::API
|
7
|
+
include Common
|
8
|
+
|
9
|
+
def response(env)
|
10
|
+
data = env['params']
|
11
|
+
data.delete(:shepherd_id)
|
12
|
+
data.delete("shepherd_id")
|
13
|
+
Herdis::Plugins::ShepherdConnection.shepherd.add_shepherd(data)
|
14
|
+
[201, {}, ""]
|
15
|
+
end
|
16
|
+
|
17
|
+
end
|
18
|
+
|
19
|
+
end
|
20
|
+
|
21
|
+
end
|
@@ -0,0 +1,21 @@
|
|
1
|
+
|
2
|
+
module Herdis
|
3
|
+
|
4
|
+
module Handlers
|
5
|
+
|
6
|
+
class Cluster < Goliath::API
|
7
|
+
include Common
|
8
|
+
|
9
|
+
def response(env)
|
10
|
+
if Herdis::Plugins::ShepherdConnection.shepherd.nil?
|
11
|
+
[404, {}, ""]
|
12
|
+
else
|
13
|
+
[200, {}, Herdis::Plugins::ShepherdConnection.shepherd.cluster_status]
|
14
|
+
end
|
15
|
+
end
|
16
|
+
|
17
|
+
end
|
18
|
+
|
19
|
+
end
|
20
|
+
|
21
|
+
end
|
@@ -0,0 +1,18 @@
|
|
1
|
+
|
2
|
+
module Herdis
|
3
|
+
module Handlers
|
4
|
+
module Common
|
5
|
+
|
6
|
+
def self.included(base)
|
7
|
+
base.use AsyncRack::Runtime
|
8
|
+
base.use AsyncRack::Deflater
|
9
|
+
base.use Goliath::Rack::DefaultMimeType
|
10
|
+
base.use Goliath::Rack::Formatters::JSON
|
11
|
+
base.use Goliath::Rack::Params
|
12
|
+
base.use Herdis::Rack::DefaultHeaders
|
13
|
+
base.use Herdis::Rack::HostParameter
|
14
|
+
end
|
15
|
+
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
@@ -0,0 +1,21 @@
|
|
1
|
+
|
2
|
+
module Herdis
|
3
|
+
|
4
|
+
module Handlers
|
5
|
+
|
6
|
+
class Info < Goliath::API
|
7
|
+
include Common
|
8
|
+
|
9
|
+
def response(env)
|
10
|
+
if Herdis::Plugins::ShepherdConnection.shepherd.nil?
|
11
|
+
[404, {}, ""]
|
12
|
+
else
|
13
|
+
[200, {}, Herdis::Plugins::ShepherdConnection.shepherd.cluster_info]
|
14
|
+
end
|
15
|
+
end
|
16
|
+
|
17
|
+
end
|
18
|
+
|
19
|
+
end
|
20
|
+
|
21
|
+
end
|
@@ -0,0 +1,18 @@
|
|
1
|
+
|
2
|
+
module Herdis
|
3
|
+
|
4
|
+
module Handlers
|
5
|
+
|
6
|
+
class JoinCluster < Goliath::API
|
7
|
+
include Common
|
8
|
+
|
9
|
+
def response(env)
|
10
|
+
Herdis::Plugins::ShepherdConnection.shepherd.join_cluster(env['params']['url'])
|
11
|
+
[204, {}, ""]
|
12
|
+
end
|
13
|
+
|
14
|
+
end
|
15
|
+
|
16
|
+
end
|
17
|
+
|
18
|
+
end
|
@@ -0,0 +1,20 @@
|
|
1
|
+
|
2
|
+
module Herdis
|
3
|
+
|
4
|
+
module Handlers
|
5
|
+
|
6
|
+
class RemoveShards < Goliath::API
|
7
|
+
include Common
|
8
|
+
|
9
|
+
def response(env)
|
10
|
+
shard_ids = env['params']["shard_ids"]
|
11
|
+
shepherd_id = env['params'][:shepherd_id]
|
12
|
+
Herdis::Plugins::ShepherdConnection.shepherd.remove_shards(shepherd_id, shard_ids)
|
13
|
+
[204, {}, ""]
|
14
|
+
end
|
15
|
+
|
16
|
+
end
|
17
|
+
|
18
|
+
end
|
19
|
+
|
20
|
+
end
|
@@ -0,0 +1,18 @@
|
|
1
|
+
|
2
|
+
module Herdis
|
3
|
+
|
4
|
+
module Handlers
|
5
|
+
|
6
|
+
class RemoveShepherd < Goliath::API
|
7
|
+
include Common
|
8
|
+
|
9
|
+
def response(env)
|
10
|
+
Herdis::Plugins::ShepherdConnection.shepherd.remove_shepherd(env['params'][:shepherd_id])
|
11
|
+
[204, {}, ""]
|
12
|
+
end
|
13
|
+
|
14
|
+
end
|
15
|
+
|
16
|
+
end
|
17
|
+
|
18
|
+
end
|
@@ -0,0 +1,21 @@
|
|
1
|
+
|
2
|
+
module Herdis
|
3
|
+
|
4
|
+
module Handlers
|
5
|
+
|
6
|
+
class Sanity < Goliath::API
|
7
|
+
include Common
|
8
|
+
|
9
|
+
def response(env)
|
10
|
+
if Herdis::Plugins::ShepherdConnection.shepherd.nil?
|
11
|
+
[404, {}, ""]
|
12
|
+
else
|
13
|
+
[200, {}, Herdis::Plugins::ShepherdConnection.shepherd.sanity]
|
14
|
+
end
|
15
|
+
end
|
16
|
+
|
17
|
+
end
|
18
|
+
|
19
|
+
end
|
20
|
+
|
21
|
+
end
|
@@ -0,0 +1,49 @@
|
|
1
|
+
module Herdis
|
2
|
+
module Plugins
|
3
|
+
class ShepherdConnection
|
4
|
+
|
5
|
+
@@shepherd = nil
|
6
|
+
|
7
|
+
def self.shepherd
|
8
|
+
@@shepherd
|
9
|
+
end
|
10
|
+
|
11
|
+
def self.shutdown
|
12
|
+
@@shepherd.shutdown unless @@shepherd.nil?
|
13
|
+
@@shepherd = nil
|
14
|
+
end
|
15
|
+
|
16
|
+
def initialize(port, config, status, logger)
|
17
|
+
@port = port
|
18
|
+
@logger = logger
|
19
|
+
end
|
20
|
+
|
21
|
+
def run
|
22
|
+
opts = {}
|
23
|
+
copy_from_env(opts, :first_port, :to_i)
|
24
|
+
copy_from_env(opts, :dir)
|
25
|
+
copy_from_env(opts, :shepherd_id)
|
26
|
+
copy_from_env(opts, :inmemory)
|
27
|
+
copy_from_env(opts, :redundancy, :to_i)
|
28
|
+
copy_from_env(opts, :connect_to)
|
29
|
+
opts[:port] = @port
|
30
|
+
opts[:logger] = @logger
|
31
|
+
@@shepherd = Herdis::Shepherd.new(opts)
|
32
|
+
end
|
33
|
+
|
34
|
+
private
|
35
|
+
|
36
|
+
def copy_from_env(hash, key, *mutators)
|
37
|
+
env_key = key.to_s.upcase
|
38
|
+
env_key = "SHEPHERD_#{env_key}" unless env_key.index("SHEPHERD_") == 0
|
39
|
+
if ENV[env_key]
|
40
|
+
hash[key] = ENV[env_key]
|
41
|
+
mutators.each do |mutator|
|
42
|
+
hash[key] = hash[key].send(mutator)
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
@@ -0,0 +1,15 @@
|
|
1
|
+
module Herdis
|
2
|
+
module Rack
|
3
|
+
class DefaultHeaders
|
4
|
+
include Goliath::Rack::AsyncMiddleware
|
5
|
+
|
6
|
+
DEFAULT_HEADERS = {
|
7
|
+
'Content-Type' => 'application/json'
|
8
|
+
}
|
9
|
+
|
10
|
+
def post_process(env, status, headers, body)
|
11
|
+
[status, headers.merge(DEFAULT_HEADERS), body]
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
@@ -0,0 +1,31 @@
|
|
1
|
+
require 'time'
|
2
|
+
|
3
|
+
# Reads a favicon.ico statically at load time, renders it on any request for
|
4
|
+
# '/favicon.ico', and sends every other request on downstream.
|
5
|
+
#
|
6
|
+
# Rack::Static is a better option if you're serving several static assets.
|
7
|
+
#
|
8
|
+
module Herdis
|
9
|
+
module Rack
|
10
|
+
class Favicon
|
11
|
+
def initialize(app, filename)
|
12
|
+
@app = app
|
13
|
+
@favicon = File.read(File.join(filename))
|
14
|
+
@expires = Time.at(Time.now + (60 * 60 * 24 * 7)).utc.rfc822.to_s
|
15
|
+
@last_modified = File.mtime(filename).utc.rfc822.to_s
|
16
|
+
end
|
17
|
+
|
18
|
+
def call(env)
|
19
|
+
if env['REQUEST_PATH'] == '/favicon.ico'
|
20
|
+
env.logger.info('Serving favicon.ico')
|
21
|
+
|
22
|
+
[200, {'Last-Modified' => @last_modified,
|
23
|
+
'Expires' => @expires,
|
24
|
+
'Content-Type' => "image/png"}, @favicon]
|
25
|
+
else
|
26
|
+
@app.call(env)
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
@@ -0,0 +1,22 @@
|
|
1
|
+
|
2
|
+
module Herdis
|
3
|
+
|
4
|
+
module Rack
|
5
|
+
|
6
|
+
class HostParameter
|
7
|
+
|
8
|
+
include Goliath::Rack::AsyncMiddleware
|
9
|
+
|
10
|
+
def call(env)
|
11
|
+
class << Fiber.current
|
12
|
+
attr_accessor :host
|
13
|
+
end
|
14
|
+
Fiber.current.host = env["SERVER_NAME"]
|
15
|
+
super(env)
|
16
|
+
end
|
17
|
+
|
18
|
+
end
|
19
|
+
|
20
|
+
end
|
21
|
+
|
22
|
+
end
|
@@ -0,0 +1,74 @@
|
|
1
|
+
#
|
2
|
+
# = Hash Recursive Merge
|
3
|
+
#
|
4
|
+
# Merges a Ruby Hash recursively, Also known as deep merge.
|
5
|
+
# Recursive version of Hash#merge and Hash#merge!.
|
6
|
+
#
|
7
|
+
# Category:: Ruby
|
8
|
+
# Package:: Hash
|
9
|
+
# Author:: Simone Carletti <weppos@weppos.net>
|
10
|
+
# Copyright:: 2007-2008 The Authors
|
11
|
+
# License:: MIT License
|
12
|
+
# Link:: http://www.simonecarletti.com/
|
13
|
+
# Source:: http://gist.github.com/gists/6391/
|
14
|
+
#
|
15
|
+
module HashRecursiveMerge
|
16
|
+
|
17
|
+
#
|
18
|
+
# Recursive version of Hash#merge!
|
19
|
+
#
|
20
|
+
# Adds the contents of +other_hash+ to +hsh+,
|
21
|
+
# merging entries in +hsh+ with duplicate keys with those from +other_hash+.
|
22
|
+
#
|
23
|
+
# Compared with Hash#merge!, this method supports nested hashes.
|
24
|
+
# When both +hsh+ and +other_hash+ contains an entry with the same key,
|
25
|
+
# it merges and returns the values from both arrays.
|
26
|
+
#
|
27
|
+
# h1 = {"a" => 100, "b" => 200, "c" => {"c1" => 12, "c2" => 14}}
|
28
|
+
# h2 = {"b" => 254, "c" => 300, "c" => {"c1" => 16, "c3" => 94}}
|
29
|
+
# h1.rmerge!(h2) #=> {"a" => 100, "b" => 254, "c" => {"c1" => 16, "c2" => 14, "c3" => 94}}
|
30
|
+
#
|
31
|
+
# Simply using Hash#merge! would return
|
32
|
+
#
|
33
|
+
# h1.merge!(h2) #=> {"a" => 100, "b" = >254, "c" => {"c1" => 16, "c3" => 94}}
|
34
|
+
#
|
35
|
+
def rmerge!(other_hash)
|
36
|
+
merge!(other_hash) do |key, oldval, newval|
|
37
|
+
oldval.class == self.class ? oldval.rmerge!(newval) : newval
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
#
|
42
|
+
# Recursive version of Hash#merge
|
43
|
+
#
|
44
|
+
# Compared with Hash#merge!, this method supports nested hashes.
|
45
|
+
# When both +hsh+ and +other_hash+ contains an entry with the same key,
|
46
|
+
# it merges and returns the values from both arrays.
|
47
|
+
#
|
48
|
+
# Compared with Hash#merge, this method provides a different approch
|
49
|
+
# for merging nasted hashes.
|
50
|
+
# If the value of a given key is an Hash and both +other_hash+ abd +hsh
|
51
|
+
# includes the same key, the value is merged instead replaced with
|
52
|
+
# +other_hash+ value.
|
53
|
+
#
|
54
|
+
# h1 = {"a" => 100, "b" => 200, "c" => {"c1" => 12, "c2" => 14}}
|
55
|
+
# h2 = {"b" => 254, "c" => 300, "c" => {"c1" => 16, "c3" => 94}}
|
56
|
+
# h1.rmerge(h2) #=> {"a" => 100, "b" => 254, "c" => {"c1" => 16, "c2" => 14, "c3" => 94}}
|
57
|
+
#
|
58
|
+
# Simply using Hash#merge would return
|
59
|
+
#
|
60
|
+
# h1.merge(h2) #=> {"a" => 100, "b" = >254, "c" => {"c1" => 16, "c3" => 94}}
|
61
|
+
#
|
62
|
+
def rmerge(other_hash)
|
63
|
+
r = {}
|
64
|
+
merge(other_hash) do |key, oldval, newval|
|
65
|
+
r[key] = oldval.class == self.class ? oldval.rmerge(newval) : newval
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
end
|
70
|
+
|
71
|
+
|
72
|
+
class Hash
|
73
|
+
include HashRecursiveMerge
|
74
|
+
end
|
@@ -0,0 +1,59 @@
|
|
1
|
+
|
2
|
+
require 'em-synchrony'
|
3
|
+
require 'em-synchrony/em-http'
|
4
|
+
require 'hiredis'
|
5
|
+
require 'redis'
|
6
|
+
require 'goliath'
|
7
|
+
require 'yajl'
|
8
|
+
require 'pp'
|
9
|
+
|
10
|
+
$LOAD_PATH.unshift(File.expand_path('lib'))
|
11
|
+
|
12
|
+
require 'herdis/rmerge'
|
13
|
+
require 'herdis/common'
|
14
|
+
require 'herdis/shepherd'
|
15
|
+
require 'herdis/plugins/shepherd_connection'
|
16
|
+
require 'herdis/rack/default_headers'
|
17
|
+
require 'herdis/rack/favicon'
|
18
|
+
require 'herdis/rack/host_parameter'
|
19
|
+
require 'herdis/handlers/common'
|
20
|
+
require 'herdis/handlers/cluster'
|
21
|
+
require 'herdis/handlers/join_cluster'
|
22
|
+
require 'herdis/handlers/remove_shepherd'
|
23
|
+
require 'herdis/handlers/shutdown'
|
24
|
+
require 'herdis/handlers/ping'
|
25
|
+
require 'herdis/handlers/info'
|
26
|
+
require 'herdis/handlers/sanity'
|
27
|
+
require 'herdis/handlers/add_shepherd'
|
28
|
+
require 'herdis/handlers/add_shards'
|
29
|
+
require 'herdis/handlers/remove_shepherd'
|
30
|
+
require 'herdis/handlers/remove_shards'
|
31
|
+
|
32
|
+
module Herdis
|
33
|
+
|
34
|
+
class Server < Goliath::API
|
35
|
+
|
36
|
+
plugin Herdis::Plugins::ShepherdConnection
|
37
|
+
use Herdis::Rack::Favicon, File.join(File.dirname(__FILE__), "..", "..", "assets", "shepherd.png")
|
38
|
+
|
39
|
+
head '/', Herdis::Handlers::Ping
|
40
|
+
|
41
|
+
get '/', Herdis::Handlers::Info
|
42
|
+
get '/cluster', Herdis::Handlers::Cluster
|
43
|
+
get '/sanity', Herdis::Handlers::Sanity
|
44
|
+
|
45
|
+
post '/', Herdis::Handlers::JoinCluster
|
46
|
+
|
47
|
+
post '/:shepherd_id/shards', Herdis::Handlers::AddShards
|
48
|
+
delete '/:shepherd_id/shards', Herdis::Handlers::RemoveShards
|
49
|
+
|
50
|
+
put '/:shepherd_id', Herdis::Handlers::AddShepherd
|
51
|
+
delete '/:shepherd_id', Herdis::Handlers::RemoveShepherd
|
52
|
+
|
53
|
+
delete '/', Herdis::Handlers::Shutdown
|
54
|
+
end
|
55
|
+
|
56
|
+
end
|
57
|
+
|
58
|
+
Goliath::Application.app_class = Herdis::Server
|
59
|
+
|
@@ -0,0 +1,531 @@
|
|
1
|
+
|
2
|
+
module Herdis
|
3
|
+
|
4
|
+
class Shepherd
|
5
|
+
|
6
|
+
CHECK_SLAVE_TIMER = (ENV["SHEPHERD_CHECK_SLAVE_TIMER"] || 10).to_f
|
7
|
+
CHECK_PREDECESSOR_TIMER = (ENV["SHEPHERD_CHECK_PREDECESSOR_TIMER"] || 1).to_f
|
8
|
+
|
9
|
+
class Shard
|
10
|
+
|
11
|
+
attr_reader :shepherd
|
12
|
+
attr_reader :id
|
13
|
+
attr_reader :master
|
14
|
+
def initialize(options = {})
|
15
|
+
@shepherd = options.delete(:shepherd)
|
16
|
+
@id = options.delete(:id)
|
17
|
+
@master = options.delete(:master)
|
18
|
+
Dir.mkdir(dir) unless Dir.exists?(dir)
|
19
|
+
initialize_redis
|
20
|
+
end
|
21
|
+
def dir
|
22
|
+
File.join(shepherd.dir, "shard#{id}")
|
23
|
+
end
|
24
|
+
def port
|
25
|
+
shepherd.first_port + id.to_i
|
26
|
+
end
|
27
|
+
def inmemory
|
28
|
+
shepherd.inmemory
|
29
|
+
end
|
30
|
+
def connection
|
31
|
+
if master
|
32
|
+
@connection ||= Redis.new(:host => "localhost", :port => port, :password => "slaved")
|
33
|
+
else
|
34
|
+
@connection ||= Redis.new(:host => "localhost", :port => port)
|
35
|
+
end
|
36
|
+
end
|
37
|
+
def inspect
|
38
|
+
begin
|
39
|
+
super
|
40
|
+
rescue Errno::ECONNREFUSED => e
|
41
|
+
"#<#{self.class.name} @dir=#{dir} @port=#{port} CLOSED>"
|
42
|
+
end
|
43
|
+
end
|
44
|
+
def liberate!
|
45
|
+
if master
|
46
|
+
@master = nil
|
47
|
+
begin
|
48
|
+
connection.slaveof("NO", "ONE")
|
49
|
+
connection.config("set", "requirepass", "")
|
50
|
+
shepherd.slaves.delete(id.to_s)
|
51
|
+
shepherd.masters[id.to_s] = self
|
52
|
+
rescue RuntimeError => e
|
53
|
+
if e.message == "LOADING Redis is loading the dataset in memory"
|
54
|
+
EM::Synchrony.sleep(0.1)
|
55
|
+
else
|
56
|
+
raise e
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
61
|
+
def enslave!(external_uri)
|
62
|
+
unless external_uri == master
|
63
|
+
connection.shutdown
|
64
|
+
@master = external_uri
|
65
|
+
@connection = nil
|
66
|
+
initialize_redis
|
67
|
+
shepherd.masters.delete(id.to_s)
|
68
|
+
shepherd.slaves[id.to_s] = self
|
69
|
+
end
|
70
|
+
end
|
71
|
+
def initialize_redis
|
72
|
+
begin
|
73
|
+
begin
|
74
|
+
connection.shutdown
|
75
|
+
rescue RuntimeError => e
|
76
|
+
if e.message == "ERR Client sent AUTH, but no password is set"
|
77
|
+
Redis.new(:host => "localhost", :port => port).shutdown
|
78
|
+
else
|
79
|
+
raise e
|
80
|
+
end
|
81
|
+
end
|
82
|
+
rescue Errno::ECONNREFUSED => e
|
83
|
+
end
|
84
|
+
io = IO.popen("#{shepherd.redis} -", "w")
|
85
|
+
write_configuration(io)
|
86
|
+
unless master
|
87
|
+
initialization = Proc.new do |p|
|
88
|
+
begin
|
89
|
+
connection.set("#{self.class.name}.id", id)
|
90
|
+
connection.set("#{self.class.name}.created_at", Time.now.to_i)
|
91
|
+
connection.set("#{self.class.name}.created_by", shepherd.shepherd_id)
|
92
|
+
rescue Errno::ECONNREFUSED => e
|
93
|
+
EM.add_timer(0.1) do
|
94
|
+
p.call(p)
|
95
|
+
end
|
96
|
+
end
|
97
|
+
end
|
98
|
+
EM.add_timer(0.1) do
|
99
|
+
initialization.call(initialization)
|
100
|
+
end
|
101
|
+
end
|
102
|
+
end
|
103
|
+
def write_configuration(io)
|
104
|
+
io.puts("daemonize yes")
|
105
|
+
io.puts("pidfile #{dir}/pid")
|
106
|
+
io.puts("port #{port}")
|
107
|
+
io.puts("timeout 300")
|
108
|
+
if master
|
109
|
+
io.puts("slaveof #{master.host} #{master.port}")
|
110
|
+
io.puts("requirepass slaved")
|
111
|
+
end
|
112
|
+
unless inmemory
|
113
|
+
io.puts("save 900 1")
|
114
|
+
io.puts("save 300 10")
|
115
|
+
io.puts("save 60 10000")
|
116
|
+
io.puts("dbfilename dump.rdb")
|
117
|
+
end
|
118
|
+
io.puts("dir #{dir}")
|
119
|
+
io.puts("logfile stdout")
|
120
|
+
io.close
|
121
|
+
end
|
122
|
+
end
|
123
|
+
|
124
|
+
attr_reader :dir
|
125
|
+
attr_reader :redis
|
126
|
+
attr_reader :first_port
|
127
|
+
attr_reader :inmemory
|
128
|
+
attr_reader :shepherd_id
|
129
|
+
attr_reader :redundancy
|
130
|
+
attr_reader :port
|
131
|
+
attr_reader :logger
|
132
|
+
|
133
|
+
attr_reader :masters
|
134
|
+
attr_reader :slaves
|
135
|
+
attr_reader :shepherds
|
136
|
+
|
137
|
+
def initialize(options = {})
|
138
|
+
@dir = options.delete(:dir) || File.join(ENV["HOME"], ".herdis")
|
139
|
+
@redis = options.delete(:redis) || "redis-server"
|
140
|
+
@port = options.delete(:port) || 9000
|
141
|
+
@logger = options.delete(:logger)
|
142
|
+
@first_port = options.delete(:first_port) || 9080
|
143
|
+
@inmemory = options.delete(:inmemory)
|
144
|
+
@redundancy = options.delete(:redundancy) || 2
|
145
|
+
@shepherd_id = options.delete(:shepherd_id) || rand(1 << 256).to_s(36)
|
146
|
+
Dir.mkdir(dir) unless Dir.exists?(dir)
|
147
|
+
|
148
|
+
@shepherds = {}
|
149
|
+
@slaves = {}
|
150
|
+
@masters = {}
|
151
|
+
|
152
|
+
at_exit do
|
153
|
+
shutdown
|
154
|
+
end
|
155
|
+
|
156
|
+
if connect_to = options.delete(:connect_to)
|
157
|
+
join_cluster(connect_to)
|
158
|
+
else
|
159
|
+
Herdis::Common::SHARDS.times do |shard_id|
|
160
|
+
create_master_shard(shard_id)
|
161
|
+
end
|
162
|
+
@shepherds[shepherd_id] = shepherd_status
|
163
|
+
end
|
164
|
+
end
|
165
|
+
|
166
|
+
def ensure_slave_check
|
167
|
+
@check_slave_timer ||= EM.add_periodic_timer(CHECK_SLAVE_TIMER) do
|
168
|
+
check_slaves
|
169
|
+
end
|
170
|
+
end
|
171
|
+
|
172
|
+
def ensure_predecessor_check
|
173
|
+
@check_predecessor_timer ||= EM.add_periodic_timer(CHECK_PREDECESSOR_TIMER) do
|
174
|
+
check_predecessor
|
175
|
+
end
|
176
|
+
end
|
177
|
+
|
178
|
+
def to_each_sibling(method, options, &block)
|
179
|
+
default_options = {:head => {"Content-Type" => "application/json"}}
|
180
|
+
multi = EM::Synchrony::Multi.new
|
181
|
+
shepherds.each do |shepherd_id, shepherd|
|
182
|
+
unless shepherd_id == self.shepherd_id
|
183
|
+
multi.add(shepherd_id,
|
184
|
+
EM::HttpRequest.new(shepherd["url"]).send(method,
|
185
|
+
default_options.rmerge(options)))
|
186
|
+
end
|
187
|
+
end
|
188
|
+
yield
|
189
|
+
Fiber.new do
|
190
|
+
multi.perform while !multi.finished?
|
191
|
+
end.resume
|
192
|
+
end
|
193
|
+
|
194
|
+
def join_cluster(url)
|
195
|
+
shutdown
|
196
|
+
@shepherds = Yajl::Parser.parse(EM::HttpRequest.new(url).get(:path => "/cluster",
|
197
|
+
:head => {"Content-Type" => "application/json"}).response)
|
198
|
+
add_shepherd(shepherd_status)
|
199
|
+
end
|
200
|
+
|
201
|
+
def add_shepherd(shepherd_status)
|
202
|
+
unless shepherd_status == shepherds[shepherd_status["id"]]
|
203
|
+
shepherds[shepherd_status["id"]] = shepherd_status
|
204
|
+
to_each_sibling(:aput,
|
205
|
+
:path => "/#{shepherd_status["id"]}",
|
206
|
+
:body => Yajl::Encoder.encode(shepherd_status)) do
|
207
|
+
check_shards
|
208
|
+
end
|
209
|
+
end
|
210
|
+
end
|
211
|
+
|
212
|
+
def remove_shepherd(shepherd_id)
|
213
|
+
if shepherds.include?(shepherd_id)
|
214
|
+
shepherds.delete(shepherd_id)
|
215
|
+
to_each_sibling(:adelete,
|
216
|
+
:path => "/#{shepherd_id}") do
|
217
|
+
check_shards
|
218
|
+
end
|
219
|
+
end
|
220
|
+
end
|
221
|
+
|
222
|
+
def add_shards(shepherd_id, shard_ids, check = true)
|
223
|
+
if shepherd_state = shepherds[shepherd_id]
|
224
|
+
shard_id_set = Set.new(shard_ids)
|
225
|
+
current_shard_id_set = Set.new(shepherd_state["masters"])
|
226
|
+
unless current_shard_id_set.superset?(shard_id_set)
|
227
|
+
shepherd_state["masters"] = (current_shard_id_set | shard_id_set).to_a
|
228
|
+
to_each_sibling(:apost,
|
229
|
+
:path => "/#{shepherd_id}/shards",
|
230
|
+
:body => Yajl::Encoder.encode(:shard_ids => shard_ids.to_a)) do
|
231
|
+
check_shards if check
|
232
|
+
end
|
233
|
+
end
|
234
|
+
end
|
235
|
+
end
|
236
|
+
|
237
|
+
def remove_shards(shepherd_id, shard_ids, check = true)
|
238
|
+
if shepherd_state = shepherds[shepherd_id]
|
239
|
+
shard_id_set = Set.new(shard_ids)
|
240
|
+
current_shard_id_set = Set.new(shepherd_state["masters"])
|
241
|
+
unless (shard_id_set & current_shard_id_set).empty?
|
242
|
+
shepherd_state["masters"] = (current_shard_id_set - shard_id_set).to_a
|
243
|
+
to_each_sibling(:adelete,
|
244
|
+
:path => "/#{shepherd_id}/shards",
|
245
|
+
:body => Yajl::Encoder.encode(:shard_ids => shard_ids.to_a)) do
|
246
|
+
check_shards if check
|
247
|
+
end
|
248
|
+
end
|
249
|
+
end
|
250
|
+
end
|
251
|
+
|
252
|
+
def host
|
253
|
+
@host ||= Fiber.current.host if Fiber.current.respond_to?(:host)
|
254
|
+
@host || "localhost"
|
255
|
+
end
|
256
|
+
|
257
|
+
def url
|
258
|
+
"http://#{host}:#{port}"
|
259
|
+
end
|
260
|
+
|
261
|
+
def shepherd_status
|
262
|
+
{
|
263
|
+
"url" => url,
|
264
|
+
"id" => shepherd_id,
|
265
|
+
"first_port" => first_port,
|
266
|
+
"masters" => masters.keys
|
267
|
+
}
|
268
|
+
end
|
269
|
+
|
270
|
+
def cluster_info
|
271
|
+
{
|
272
|
+
"shepherd_id" => shepherd_id,
|
273
|
+
"ordinal" => ordinal,
|
274
|
+
"slaves" => slaves.keys,
|
275
|
+
"masters" => masters.keys,
|
276
|
+
"redundancy" => redundancy,
|
277
|
+
"siblings" => shepherds.keys.sort,
|
278
|
+
"inmemory" => inmemory,
|
279
|
+
"check_slave_timer" => CHECK_SLAVE_TIMER,
|
280
|
+
"check_predecessor_timer" => CHECK_PREDECESSOR_TIMER,
|
281
|
+
"sanity" => "#{url}/sanity",
|
282
|
+
"cluster" => "#{url}/cluster"
|
283
|
+
}
|
284
|
+
end
|
285
|
+
|
286
|
+
def sanity
|
287
|
+
creators = Set.new
|
288
|
+
min_created_at = nil
|
289
|
+
max_created_at = nil
|
290
|
+
masters = 0
|
291
|
+
slaves = 0
|
292
|
+
consistent = true
|
293
|
+
urls = []
|
294
|
+
shard_status.each do |shard_url|
|
295
|
+
urls << shard_url
|
296
|
+
url = URI.parse(shard_url)
|
297
|
+
shard_connection = Redis.new(:host => url.host, :port => url.port)
|
298
|
+
info = shard_connection.info
|
299
|
+
masters += 1 if info["role"] == "master"
|
300
|
+
slaves += 1 if info["role"] == "slave"
|
301
|
+
created_at = shard_connection.get("#{Herdis::Shepherd::Shard.name}.created_at").to_i
|
302
|
+
min_created_at = created_at if min_created_at.nil? || created_at < min_created_at
|
303
|
+
max_created_at = created_at if max_created_at.nil? || created_at > max_created_at
|
304
|
+
creators << shard_connection.get("#{Herdis::Shepherd::Shard.name}.created_by")
|
305
|
+
end
|
306
|
+
{
|
307
|
+
:creators => creators.to_a,
|
308
|
+
:consistent => creators.size == 1 && masters == Herdis::Common::SHARDS && slaves == 0,
|
309
|
+
:min_created_at => Time.at(min_created_at).to_s,
|
310
|
+
:max_created_at => Time.at(max_created_at).to_s,
|
311
|
+
:masters => masters,
|
312
|
+
:slaves => slaves,
|
313
|
+
:shards => urls
|
314
|
+
}
|
315
|
+
end
|
316
|
+
|
317
|
+
def shard_status
|
318
|
+
rval = []
|
319
|
+
cluster_status.each do |shepherd_id, shepherd_status|
|
320
|
+
shepherd_status["masters"].each do |shard_id|
|
321
|
+
if rval[shard_id.to_i].nil?
|
322
|
+
rval[shard_id.to_i] = "redis://#{host}:#{shard_id.to_i + shepherd_status["first_port"].to_i}/"
|
323
|
+
else
|
324
|
+
raise Goliath::Validation::InternalServerError.new("Duplicate masters: #{shard_id}")
|
325
|
+
end
|
326
|
+
end
|
327
|
+
end
|
328
|
+
rval
|
329
|
+
end
|
330
|
+
|
331
|
+
def cluster_status
|
332
|
+
shepherds.merge(shepherd_id => shepherd_status)
|
333
|
+
end
|
334
|
+
|
335
|
+
def shutdown_shard(shard_id)
|
336
|
+
if shard = masters[shard_id.to_s]
|
337
|
+
shard.connection.shutdown
|
338
|
+
masters.delete(shard_id.to_s)
|
339
|
+
end
|
340
|
+
end
|
341
|
+
|
342
|
+
def shutdown_slave(shard_id)
|
343
|
+
if shard = slaves[shard_id.to_s]
|
344
|
+
shard.connection.shutdown
|
345
|
+
slaves.delete(shard_id.to_s)
|
346
|
+
end
|
347
|
+
end
|
348
|
+
|
349
|
+
def shutdown
|
350
|
+
masters.keys.each do |shard_id|
|
351
|
+
shutdown_shard(shard_id)
|
352
|
+
end
|
353
|
+
slaves.keys.each do |shard_id|
|
354
|
+
shutdown_slave(shard_id)
|
355
|
+
end
|
356
|
+
end
|
357
|
+
|
358
|
+
def create_external_shards(should_be_owned)
|
359
|
+
shepherds.values.inject({}) do |sum, shepherd_status|
|
360
|
+
if shepherd_status["id"] == shepherd_id
|
361
|
+
sum
|
362
|
+
else
|
363
|
+
shepherd_url = URI.parse(shepherd_status["url"])
|
364
|
+
sum.merge(shepherd_status["masters"].inject({}) do |sum, shard_id|
|
365
|
+
shepherd_url = URI.parse(shepherd_status["url"])
|
366
|
+
sum.merge(shard_id.to_s => URI.parse("redis://#{shepherd_url.host}:#{shepherd_status["first_port"].to_i + shard_id.to_i}/"))
|
367
|
+
end)
|
368
|
+
end
|
369
|
+
end
|
370
|
+
end
|
371
|
+
|
372
|
+
def check_shards
|
373
|
+
should_be_owned = owned_shards
|
374
|
+
should_be_backed_up = backup_shards
|
375
|
+
master_ids = Set.new(masters.keys)
|
376
|
+
slave_ids = Set.new(slaves.keys)
|
377
|
+
external_shards = create_external_shards(should_be_owned)
|
378
|
+
externally_running = Set.new(external_shards.keys)
|
379
|
+
|
380
|
+
needs_to_be_liberated = slave_ids - externally_running
|
381
|
+
needs_to_be_enslaved = (master_ids & externally_running & should_be_backed_up) - should_be_owned
|
382
|
+
needs_to_be_directed = slave_ids & externally_running & (should_be_backed_up | should_be_owned)
|
383
|
+
slaves_needing_to_be_shut_down = (slave_ids & externally_running) - (should_be_backed_up | should_be_owned)
|
384
|
+
masters_needing_to_be_shut_down = (master_ids & externally_running) - (should_be_backed_up | should_be_owned)
|
385
|
+
new_slaves_needed = ((should_be_backed_up | should_be_owned) & externally_running) - (slave_ids | master_ids)
|
386
|
+
|
387
|
+
handled = Set.new
|
388
|
+
|
389
|
+
logger.debug "#{shepherd_id} *** liberating #{needs_to_be_liberated.inspect}" unless needs_to_be_liberated.empty?
|
390
|
+
needs_to_be_liberated.each do |shard_id|
|
391
|
+
handled.add(shard_id.to_s)
|
392
|
+
slaves[shard_id.to_s].liberate!
|
393
|
+
end
|
394
|
+
add_shards(shepherd_id, needs_to_be_liberated, false)
|
395
|
+
|
396
|
+
logger.debug "#{shepherd_id} *** enslaving #{needs_to_be_enslaved.inspect}" unless needs_to_be_enslaved.empty?
|
397
|
+
needs_to_be_enslaved.each do |shard_id|
|
398
|
+
raise "Already liberated #{shard_id}!" if handled.include?(shard_id.to_s)
|
399
|
+
handled.add(shard_id.to_s)
|
400
|
+
masters[shard_id.to_s].enslave!(external_shards[shard_id.to_s])
|
401
|
+
end
|
402
|
+
remove_shards(shepherd_id, needs_to_be_enslaved, false)
|
403
|
+
|
404
|
+
logger.debug "#{shepherd_id} *** redirecting #{needs_to_be_directed.inspect}" unless needs_to_be_directed.empty?
|
405
|
+
needs_to_be_directed.each do |shard_id|
|
406
|
+
raise "Already liberated or enslaved #{shard_id}!" if handled.include?(shard_id.to_s)
|
407
|
+
handled.add(shard_id.to_s)
|
408
|
+
slaves[shard_id.to_s].enslave!(external_shards[shard_id.to_s])
|
409
|
+
end
|
410
|
+
|
411
|
+
logger.debug "#{shepherd_id} *** killing masters #{masters_needing_to_be_shut_down.inspect}" unless masters_needing_to_be_shut_down.empty?
|
412
|
+
masters_needing_to_be_shut_down.each do |shard_id|
|
413
|
+
raise "Already liberated, enslaved or directed #{shard_id}!" if handled.include?(shard_id.to_s)
|
414
|
+
handled.add(shard_id.to_s)
|
415
|
+
shutdown_shard(shard_id)
|
416
|
+
end
|
417
|
+
remove_shards(shepherd_id, masters_needing_to_be_shut_down, false)
|
418
|
+
|
419
|
+
logger.debug "#{shepherd_id} *** killing slaves #{slaves_needing_to_be_shut_down.inspect}" unless slaves_needing_to_be_shut_down.empty?
|
420
|
+
slaves_needing_to_be_shut_down.each do |shard_id|
|
421
|
+
raise "Already liberated, enslaved, directed or shut down #{shard_id}!" if handled.include?(shard_id.to_s)
|
422
|
+
handled.add(shard_id.to_s)
|
423
|
+
shutdown_slave(shard_id)
|
424
|
+
end
|
425
|
+
|
426
|
+
logger.debug "#{shepherd_id} *** creating slaves #{new_slaves_needed.inspect}" unless new_slaves_needed.empty?
|
427
|
+
new_slaves_needed.each do |shard_id|
|
428
|
+
raise "Already liberated, enslaved, directed or shut down #{shard_id}!" if handled.include?(shard_id.to_s)
|
429
|
+
handled.add(shard_id.to_s)
|
430
|
+
create_slave_shard(shard_id.to_s, external_shards[shard_id.to_s])
|
431
|
+
end
|
432
|
+
|
433
|
+
ensure_slave_check
|
434
|
+
ensure_predecessor_check
|
435
|
+
end
|
436
|
+
|
437
|
+
def check_slaves
|
438
|
+
revolution = Set.new
|
439
|
+
(owned_shards & slaves.keys).each do |shard_id|
|
440
|
+
shard = slaves[shard_id.to_s]
|
441
|
+
if shard.connection.info["master_sync_in_progress"] == "0"
|
442
|
+
revolution << shard_id.to_s
|
443
|
+
end
|
444
|
+
end
|
445
|
+
unless revolution.empty?
|
446
|
+
logger.debug "#{shepherd_id} *** revolting #{revolution.inspect}"
|
447
|
+
add_shards(shepherd_id, revolution.to_a)
|
448
|
+
end
|
449
|
+
end
|
450
|
+
|
451
|
+
def create_master_shard(shard_id)
|
452
|
+
masters[shard_id.to_s] = create_shard(shard_id)
|
453
|
+
end
|
454
|
+
|
455
|
+
def status
|
456
|
+
204
|
457
|
+
end
|
458
|
+
|
459
|
+
def ordered_shepherd_keys
|
460
|
+
shepherds.keys.sort
|
461
|
+
end
|
462
|
+
|
463
|
+
def ordinal
|
464
|
+
shepherds.keys.sort.index(shepherd_id)
|
465
|
+
end
|
466
|
+
|
467
|
+
def backup_ordinals
|
468
|
+
rval = Set.new
|
469
|
+
ordered_keys = ordered_shepherd_keys
|
470
|
+
my_index = ordered_keys.index(shepherd_id)
|
471
|
+
[redundancy, ordered_keys.size - 1].min.times do |n|
|
472
|
+
rval << (my_index - n - 1) % ordered_keys.size
|
473
|
+
end
|
474
|
+
rval
|
475
|
+
end
|
476
|
+
|
477
|
+
def backup_shards
|
478
|
+
rval = Set.new
|
479
|
+
backup_ordinals.each do |ordinal|
|
480
|
+
rval += shards_owned_by(ordinal)
|
481
|
+
end
|
482
|
+
rval
|
483
|
+
end
|
484
|
+
|
485
|
+
def shards_owned_by(ordinal)
|
486
|
+
rval = Set.new
|
487
|
+
while ordinal < Herdis::Common::SHARDS
|
488
|
+
rval << ordinal.to_s
|
489
|
+
ordinal += shepherds.size
|
490
|
+
end
|
491
|
+
rval
|
492
|
+
end
|
493
|
+
|
494
|
+
def owned_shards
|
495
|
+
shards_owned_by(ordinal)
|
496
|
+
end
|
497
|
+
|
498
|
+
def predecessor
|
499
|
+
ordered_keys = ordered_shepherd_keys
|
500
|
+
my_index = ordered_keys.index(shepherd_id)
|
501
|
+
if my_index == 0
|
502
|
+
shepherds[ordered_keys.last]
|
503
|
+
else
|
504
|
+
shepherds[ordered_keys[my_index - 1]]
|
505
|
+
end
|
506
|
+
end
|
507
|
+
|
508
|
+
def check_predecessor
|
509
|
+
pre = predecessor
|
510
|
+
if pre && pre["id"] != shepherd_id
|
511
|
+
Fiber.new do
|
512
|
+
if EM::HttpRequest.new(pre["url"]).head.response_header.status != 204
|
513
|
+
logger.warn("#{shepherd_id} *** dropping #{pre["id"]} due to failure to respond to ping")
|
514
|
+
remove_shepherd(pre["id"])
|
515
|
+
end
|
516
|
+
end.resume
|
517
|
+
end
|
518
|
+
end
|
519
|
+
|
520
|
+
def create_slave_shard(shard_id, external_uri)
|
521
|
+
slaves[shard_id.to_s] = create_shard(shard_id, :master => external_uri)
|
522
|
+
end
|
523
|
+
|
524
|
+
def create_shard(shard_id, options = {})
|
525
|
+
Shard.new(options.merge(:shepherd => self,
|
526
|
+
:id => shard_id))
|
527
|
+
end
|
528
|
+
|
529
|
+
end
|
530
|
+
|
531
|
+
end
|
metadata
ADDED
@@ -0,0 +1,136 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: herdis
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.0.1
|
5
|
+
prerelease:
|
6
|
+
platform: ruby
|
7
|
+
authors:
|
8
|
+
- Martin Bruse
|
9
|
+
autorequire:
|
10
|
+
bindir: bin
|
11
|
+
cert_chain: []
|
12
|
+
date: 2012-03-14 00:00:00.000000000 Z
|
13
|
+
dependencies:
|
14
|
+
- !ruby/object:Gem::Dependency
|
15
|
+
name: hiredis
|
16
|
+
requirement: &70331042073380 !ruby/object:Gem::Requirement
|
17
|
+
none: false
|
18
|
+
requirements:
|
19
|
+
- - ! '>='
|
20
|
+
- !ruby/object:Gem::Version
|
21
|
+
version: '0'
|
22
|
+
type: :runtime
|
23
|
+
prerelease: false
|
24
|
+
version_requirements: *70331042073380
|
25
|
+
- !ruby/object:Gem::Dependency
|
26
|
+
name: em-synchrony
|
27
|
+
requirement: &70331042072880 !ruby/object:Gem::Requirement
|
28
|
+
none: false
|
29
|
+
requirements:
|
30
|
+
- - ! '>='
|
31
|
+
- !ruby/object:Gem::Version
|
32
|
+
version: '0'
|
33
|
+
type: :runtime
|
34
|
+
prerelease: false
|
35
|
+
version_requirements: *70331042072880
|
36
|
+
- !ruby/object:Gem::Dependency
|
37
|
+
name: em-http-request
|
38
|
+
requirement: &70331042072400 !ruby/object:Gem::Requirement
|
39
|
+
none: false
|
40
|
+
requirements:
|
41
|
+
- - ! '>='
|
42
|
+
- !ruby/object:Gem::Version
|
43
|
+
version: '0'
|
44
|
+
type: :runtime
|
45
|
+
prerelease: false
|
46
|
+
version_requirements: *70331042072400
|
47
|
+
- !ruby/object:Gem::Dependency
|
48
|
+
name: redis
|
49
|
+
requirement: &70331042071940 !ruby/object:Gem::Requirement
|
50
|
+
none: false
|
51
|
+
requirements:
|
52
|
+
- - ! '>='
|
53
|
+
- !ruby/object:Gem::Version
|
54
|
+
version: '0'
|
55
|
+
type: :runtime
|
56
|
+
prerelease: false
|
57
|
+
version_requirements: *70331042071940
|
58
|
+
- !ruby/object:Gem::Dependency
|
59
|
+
name: yajl-ruby
|
60
|
+
requirement: &70331042071440 !ruby/object:Gem::Requirement
|
61
|
+
none: false
|
62
|
+
requirements:
|
63
|
+
- - ! '>='
|
64
|
+
- !ruby/object:Gem::Version
|
65
|
+
version: '0'
|
66
|
+
type: :runtime
|
67
|
+
prerelease: false
|
68
|
+
version_requirements: *70331042071440
|
69
|
+
- !ruby/object:Gem::Dependency
|
70
|
+
name: goliath
|
71
|
+
requirement: &70331042071020 !ruby/object:Gem::Requirement
|
72
|
+
none: false
|
73
|
+
requirements:
|
74
|
+
- - ! '>='
|
75
|
+
- !ruby/object:Gem::Version
|
76
|
+
version: '0'
|
77
|
+
type: :runtime
|
78
|
+
prerelease: false
|
79
|
+
version_requirements: *70331042071020
|
80
|
+
description: ! 'A Redis herder for simplifying Redis presharding
|
81
|
+
|
82
|
+
'
|
83
|
+
email: zondolfin at gmail dot com
|
84
|
+
executables:
|
85
|
+
- herdis
|
86
|
+
extensions: []
|
87
|
+
extra_rdoc_files: []
|
88
|
+
files:
|
89
|
+
- lib/herdis/client.rb
|
90
|
+
- lib/herdis/common.rb
|
91
|
+
- lib/herdis/handlers/add_shards.rb
|
92
|
+
- lib/herdis/handlers/add_shepherd.rb
|
93
|
+
- lib/herdis/handlers/cluster.rb
|
94
|
+
- lib/herdis/handlers/common.rb
|
95
|
+
- lib/herdis/handlers/info.rb
|
96
|
+
- lib/herdis/handlers/join_cluster.rb
|
97
|
+
- lib/herdis/handlers/ping.rb
|
98
|
+
- lib/herdis/handlers/remove_shards.rb
|
99
|
+
- lib/herdis/handlers/remove_shepherd.rb
|
100
|
+
- lib/herdis/handlers/sanity.rb
|
101
|
+
- lib/herdis/handlers/shutdown.rb
|
102
|
+
- lib/herdis/plugins/shepherd_connection.rb
|
103
|
+
- lib/herdis/rack/default_headers.rb
|
104
|
+
- lib/herdis/rack/favicon.rb
|
105
|
+
- lib/herdis/rack/host_parameter.rb
|
106
|
+
- lib/herdis/rmerge.rb
|
107
|
+
- lib/herdis/server.rb
|
108
|
+
- lib/herdis/shepherd.rb
|
109
|
+
- bin/herdis
|
110
|
+
homepage: http://github.com/zond/herdis
|
111
|
+
licenses: []
|
112
|
+
post_install_message:
|
113
|
+
rdoc_options:
|
114
|
+
- --line-numbers
|
115
|
+
- --inline-source
|
116
|
+
require_paths:
|
117
|
+
- lib
|
118
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
119
|
+
none: false
|
120
|
+
requirements:
|
121
|
+
- - ! '>='
|
122
|
+
- !ruby/object:Gem::Version
|
123
|
+
version: '0'
|
124
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
125
|
+
none: false
|
126
|
+
requirements:
|
127
|
+
- - ! '>='
|
128
|
+
- !ruby/object:Gem::Version
|
129
|
+
version: '0'
|
130
|
+
requirements: []
|
131
|
+
rubyforge_project:
|
132
|
+
rubygems_version: 1.8.15
|
133
|
+
signing_key:
|
134
|
+
specification_version: 3
|
135
|
+
summary: A Redis herder for simplifying Redis presharding
|
136
|
+
test_files: []
|