arya-pandemic 0.2
Sign up to get free protection for your applications and to get access to all the features.
- data/README.markdown +99 -0
- data/Rakefile +14 -0
- data/lib/pandemic.rb +40 -0
- data/lib/pandemic/client_side/cluster_connection.rb +129 -0
- data/lib/pandemic/client_side/config.rb +33 -0
- data/lib/pandemic/client_side/connection.rb +31 -0
- data/lib/pandemic/client_side/connection_proxy.rb +15 -0
- data/lib/pandemic/client_side/pandemize.rb +17 -0
- data/lib/pandemic/connection_pool.rb +117 -0
- data/lib/pandemic/mutex_counter.rb +24 -0
- data/lib/pandemic/server_side/client.rb +86 -0
- data/lib/pandemic/server_side/config.rb +55 -0
- data/lib/pandemic/server_side/handler.rb +27 -0
- data/lib/pandemic/server_side/peer.rb +203 -0
- data/lib/pandemic/server_side/request.rb +72 -0
- data/lib/pandemic/server_side/server.rb +231 -0
- data/lib/pandemic/util.rb +26 -0
- data/pandemic.gemspec +31 -0
- metadata +91 -0
@@ -0,0 +1,24 @@
|
|
1
|
+
module Pandemic
|
2
|
+
class MutexCounter
|
3
|
+
MAX = (2 ** 30) - 1
|
4
|
+
def initialize
|
5
|
+
@mutex = Mutex.new
|
6
|
+
@counter = 0
|
7
|
+
@resets = 0
|
8
|
+
end
|
9
|
+
|
10
|
+
def real_total
|
11
|
+
@mutex.synchronize { (@resets * MAX) + @counter }
|
12
|
+
end
|
13
|
+
|
14
|
+
def inc
|
15
|
+
@mutex.synchronize do
|
16
|
+
if @counter >= MAX
|
17
|
+
@counter = 0 # to avoid Bignum, it's about 4x slower
|
18
|
+
@resets += 1
|
19
|
+
end
|
20
|
+
@counter += 1
|
21
|
+
end
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
@@ -0,0 +1,86 @@
|
|
1
|
+
module Pandemic
|
2
|
+
module ServerSide
|
3
|
+
class Client
|
4
|
+
class DisconnectClient < Exception; end
|
5
|
+
include Util
|
6
|
+
|
7
|
+
attr_accessor :received_requests, :responded_requests
|
8
|
+
|
9
|
+
def initialize(connection, server)
|
10
|
+
@connection = connection
|
11
|
+
@server = server
|
12
|
+
@received_requests = 0
|
13
|
+
@responded_requests = 0
|
14
|
+
end
|
15
|
+
|
16
|
+
def listen
|
17
|
+
unless @connection.nil?
|
18
|
+
@listener_thread.kill if @listener_thread
|
19
|
+
@listener_thread = Thread.new do
|
20
|
+
begin
|
21
|
+
while @server.running
|
22
|
+
debug("Waiting for incoming request")
|
23
|
+
request = @connection.gets
|
24
|
+
info("Received incoming request")
|
25
|
+
@received_requests += 1
|
26
|
+
|
27
|
+
if request.nil?
|
28
|
+
debug("Incoming request is nil")
|
29
|
+
@connection.close
|
30
|
+
@connection = nil
|
31
|
+
break
|
32
|
+
elsif request.strip! =~ /^([0-9]+)$/ # currently only asking for request size
|
33
|
+
size = $1.to_i
|
34
|
+
debug("Reading request body (size #{size})")
|
35
|
+
body = @connection.read(size)
|
36
|
+
debug("Finished reading request body")
|
37
|
+
|
38
|
+
response = handle_request(body)
|
39
|
+
|
40
|
+
debug("Writing response to client")
|
41
|
+
@connection.write("#{response.size}\n#{response}")
|
42
|
+
@connection.flush
|
43
|
+
@responded_requests += 1
|
44
|
+
debug("Finished writing response to client")
|
45
|
+
end
|
46
|
+
end
|
47
|
+
rescue DisconnectClient
|
48
|
+
info("Closing client connection")
|
49
|
+
@connection.close unless @connection.nil? || @connection.closed?
|
50
|
+
rescue Exception => e
|
51
|
+
warn("Unhandled exception in client listen thread: #{e.inspect}")
|
52
|
+
ensure
|
53
|
+
@server.client_closed(self)
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
57
|
+
return self
|
58
|
+
end
|
59
|
+
|
60
|
+
def close
|
61
|
+
@listener_thread.raise(DisconnectClient)
|
62
|
+
end
|
63
|
+
|
64
|
+
def handle_request(request)
|
65
|
+
@server.handle_client_request(Request.new(request))
|
66
|
+
end
|
67
|
+
|
68
|
+
private
|
69
|
+
def signature
|
70
|
+
@signature ||= @connection.peeraddr.values_at(3,1).join(":")
|
71
|
+
end
|
72
|
+
|
73
|
+
def debug(msg)
|
74
|
+
logger.debug("Client #{signature}") {msg}
|
75
|
+
end
|
76
|
+
|
77
|
+
def info(msg)
|
78
|
+
logger.info("Client #{signature}") {msg}
|
79
|
+
end
|
80
|
+
|
81
|
+
def warn(msg)
|
82
|
+
logger.warn("Client #{signature}") {msg}
|
83
|
+
end
|
84
|
+
end
|
85
|
+
end
|
86
|
+
end
|
@@ -0,0 +1,55 @@
|
|
1
|
+
module Pandemic
|
2
|
+
module ServerSide
|
3
|
+
class Config
|
4
|
+
class << self
|
5
|
+
attr_accessor :bind_to, :servers, :response_timeout
|
6
|
+
def load
|
7
|
+
path = extract_config_path
|
8
|
+
yaml = YAML.load_file(path)
|
9
|
+
|
10
|
+
@server_map = yaml['servers'] || []
|
11
|
+
@servers = @server_map.is_a?(Hash) ? @server_map.values : @server_map
|
12
|
+
@servers = @servers.collect { |s| s.is_a?(Hash) ? s.keys.first : s }
|
13
|
+
|
14
|
+
@response_timeout = (yaml['response_timeout'] || 1).to_f
|
15
|
+
@bind_to = extract_bind_to
|
16
|
+
raise "Interface to bind to is nil." unless @bind_to
|
17
|
+
end
|
18
|
+
|
19
|
+
def get(*args)
|
20
|
+
args.size == 1 ? @options[args.first] : @options.values_at(*args) if @options
|
21
|
+
end
|
22
|
+
|
23
|
+
private
|
24
|
+
def extract_bind_to
|
25
|
+
index = ARGV.index('-i')
|
26
|
+
index2 = ARGV.index('-a')
|
27
|
+
|
28
|
+
if index && (key = ARGV[index + 1])
|
29
|
+
key = key.to_i if @server_map.is_a?(Array)
|
30
|
+
server = @server_map[key]
|
31
|
+
if server.is_a?(Hash)
|
32
|
+
@options = server.values.first # there should only be one
|
33
|
+
@server_map[key].keys.first
|
34
|
+
else
|
35
|
+
server
|
36
|
+
end
|
37
|
+
elsif index2 && (host = ARGV[index2 + 1])
|
38
|
+
host
|
39
|
+
else
|
40
|
+
raise "You must specify which interface to bind to."
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
def extract_config_path
|
45
|
+
index = ARGV.index('-c')
|
46
|
+
if index && (path = ARGV[index + 1])
|
47
|
+
path
|
48
|
+
else
|
49
|
+
"pandemic_server.yml"
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
@@ -0,0 +1,27 @@
|
|
1
|
+
module Pandemic
|
2
|
+
module ServerSide
|
3
|
+
class Handler
|
4
|
+
def config
|
5
|
+
Config
|
6
|
+
end
|
7
|
+
|
8
|
+
def map(request, servers)
|
9
|
+
map = {}
|
10
|
+
servers.each do |server, status|
|
11
|
+
if status != :disconnected
|
12
|
+
map[server] = request.body
|
13
|
+
end
|
14
|
+
end
|
15
|
+
map
|
16
|
+
end
|
17
|
+
|
18
|
+
def reduce(request)
|
19
|
+
request.responses.join("")
|
20
|
+
end
|
21
|
+
|
22
|
+
def process(body)
|
23
|
+
body
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
@@ -0,0 +1,203 @@
|
|
1
|
+
module Pandemic
|
2
|
+
module ServerSide
|
3
|
+
class Peer
|
4
|
+
class PeerUnavailableException < Exception; end
|
5
|
+
include Util
|
6
|
+
attr_reader :host, :port
|
7
|
+
|
8
|
+
def initialize(addr, server)
|
9
|
+
@host, @port = host_port(addr)
|
10
|
+
@server = server
|
11
|
+
@pending_requests = with_mutex({})
|
12
|
+
@incoming_connection_listeners = []
|
13
|
+
@inc_threads_mutex = Mutex.new
|
14
|
+
initialize_connection_pool
|
15
|
+
end
|
16
|
+
|
17
|
+
def connect
|
18
|
+
return if connected?
|
19
|
+
debug("Forced connection to peer")
|
20
|
+
1.times { @connection_pool.add_connection! }
|
21
|
+
end
|
22
|
+
|
23
|
+
def disconnect
|
24
|
+
debug("Disconnecting from peer")
|
25
|
+
@connection_pool.disconnect
|
26
|
+
end
|
27
|
+
|
28
|
+
def connected?
|
29
|
+
@connection_pool.connected?
|
30
|
+
end
|
31
|
+
|
32
|
+
def client_request(request, body)
|
33
|
+
debug("Sending client's request to peer")
|
34
|
+
# TODO: Consider adding back threads here if it will be faster that way in Ruby 1.9
|
35
|
+
@connection_pool.with_connection do |connection|
|
36
|
+
if connection && !connection.closed?
|
37
|
+
@pending_requests.synchronize do
|
38
|
+
@pending_requests[request.hash] = request
|
39
|
+
end
|
40
|
+
debug("Writing client's request")
|
41
|
+
connection.write("PROCESS #{request.hash} #{body.size}\n#{body}")
|
42
|
+
connection.flush
|
43
|
+
debug("Finished writing client's request")
|
44
|
+
end # TODO: else? fail silently? reconnect?
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
def add_incoming_connection(conn)
|
49
|
+
debug("Adding incoming connection")
|
50
|
+
|
51
|
+
connect # if we're not connected, we should be
|
52
|
+
|
53
|
+
thread = Thread.new(conn) do |connection|
|
54
|
+
begin
|
55
|
+
debug("Incoming connection thread started")
|
56
|
+
while @server.running
|
57
|
+
debug("Listening for incoming requests")
|
58
|
+
request = connection.gets
|
59
|
+
debug("Read incoming request from peer")
|
60
|
+
|
61
|
+
if request.nil?
|
62
|
+
debug("Incoming connection request is nil")
|
63
|
+
break
|
64
|
+
else
|
65
|
+
debug("Received incoming (#{request.strip})")
|
66
|
+
handle_incoming_request(request, connection) if request =~ /^PROCESS/
|
67
|
+
handle_incoming_response(request, connection) if request =~ /^RESPONSE/
|
68
|
+
end
|
69
|
+
end
|
70
|
+
rescue Exception => e
|
71
|
+
warn("Unhandled exception in peer listener thread: #{e.inspect}")
|
72
|
+
ensure
|
73
|
+
debug("Incoming connection closing")
|
74
|
+
conn.close if conn && !conn.closed?
|
75
|
+
@inc_threads_mutex.synchronize { @incoming_connection_listeners.delete(Thread.current)}
|
76
|
+
if @incoming_connection_listeners.empty?
|
77
|
+
disconnect
|
78
|
+
end
|
79
|
+
end
|
80
|
+
end
|
81
|
+
|
82
|
+
@inc_threads_mutex.synchronize {@incoming_connection_listeners.push(thread) if thread.alive? }
|
83
|
+
end
|
84
|
+
|
85
|
+
private
|
86
|
+
def initialize_connection_pool
|
87
|
+
return if @connection_pool
|
88
|
+
@connection_pool = ConnectionPool.new
|
89
|
+
|
90
|
+
@connection_pool.create_connection do
|
91
|
+
connection = nil
|
92
|
+
begin
|
93
|
+
connection = TCPSocket.new(@host, @port)
|
94
|
+
rescue Errno::ETIMEDOUT, Errno::ECONNREFUSED
|
95
|
+
connection = nil
|
96
|
+
rescue Exception => e
|
97
|
+
warn("Unhandled exception in create connection block: #{e.inspect}")
|
98
|
+
end
|
99
|
+
if connection
|
100
|
+
connection.setsockopt(Socket::IPPROTO_TCP, Socket::TCP_NODELAY, 1) if Socket.constants.include?('TCP_NODELAY')
|
101
|
+
connection.write("SERVER #{@server.signature}\n")
|
102
|
+
end
|
103
|
+
connection
|
104
|
+
end
|
105
|
+
|
106
|
+
end
|
107
|
+
|
108
|
+
def handle_incoming_request(request, connection)
|
109
|
+
debug("Identified as request")
|
110
|
+
if request.strip =~ /^PROCESS ([A-Za-z0-9]+) ([0-9]+)$/
|
111
|
+
hash = $1
|
112
|
+
size = $2.to_i
|
113
|
+
debug("Incoming request: #{hash} #{size}")
|
114
|
+
begin
|
115
|
+
debug("Reading request body")
|
116
|
+
request_body = connection.read(size)
|
117
|
+
debug("Finished reading request body")
|
118
|
+
rescue EOFError, TruncatedDataError
|
119
|
+
debug("Failed to read request body")
|
120
|
+
# TODO: what to do here?
|
121
|
+
return false
|
122
|
+
rescue Exception => e
|
123
|
+
warn("Unhandled exception in incoming request read: #{e.inspect}")
|
124
|
+
end
|
125
|
+
debug("Processing body")
|
126
|
+
process_request(hash, request_body)
|
127
|
+
else
|
128
|
+
warn("Malformed incoming request: #{request.strip}")
|
129
|
+
# when the incoming request was malformed
|
130
|
+
# TODO: what to do here?
|
131
|
+
end
|
132
|
+
end
|
133
|
+
|
134
|
+
def handle_incoming_response(response, connection)
|
135
|
+
if response.strip =~ /^RESPONSE ([A-Za-z0-9]+) ([0-9]+)$/
|
136
|
+
hash = $1
|
137
|
+
size = $2.to_i
|
138
|
+
debug("Incoming response: #{hash} #{size}")
|
139
|
+
begin
|
140
|
+
debug("Reading response body")
|
141
|
+
response_body = connection.read(size)
|
142
|
+
debug("Finished reading response body")
|
143
|
+
rescue EOFError, TruncatedDataError
|
144
|
+
debug("Failed to read response body")
|
145
|
+
# TODO: what to do here?
|
146
|
+
return false
|
147
|
+
rescue Exception => e
|
148
|
+
warn("Unhandled exception in incoming response read: #{e.inspect}")
|
149
|
+
end
|
150
|
+
process_response(hash, response_body)
|
151
|
+
else
|
152
|
+
warn("Malformed incoming response: #{response.strip}")
|
153
|
+
# when the incoming response was malformed
|
154
|
+
# TODO: what to do here?
|
155
|
+
end
|
156
|
+
end
|
157
|
+
|
158
|
+
|
159
|
+
def process_request(hash, body)
|
160
|
+
Thread.new do
|
161
|
+
begin
|
162
|
+
debug("Starting processing thread (#{hash})")
|
163
|
+
response = @server.process(body)
|
164
|
+
debug("Processing finished (#{hash})")
|
165
|
+
@connection_pool.with_connection do |connection|
|
166
|
+
debug( "Sending response (#{hash})")
|
167
|
+
connection.write("RESPONSE #{hash} #{response.size}\n#{response}")
|
168
|
+
connection.flush
|
169
|
+
debug( "Finished sending response (#{hash})")
|
170
|
+
end
|
171
|
+
rescue Exception => e
|
172
|
+
warn("Unhandled exception in process request thread: #{e.inspect}")
|
173
|
+
end
|
174
|
+
end
|
175
|
+
end
|
176
|
+
|
177
|
+
def process_response(hash, body)
|
178
|
+
Thread.new do
|
179
|
+
begin
|
180
|
+
debug("Finding original request (#{hash})")
|
181
|
+
original_request = @pending_requests.synchronize { @pending_requests.delete(hash) }
|
182
|
+
if original_request
|
183
|
+
debug("Found original request, adding response")
|
184
|
+
original_request.add_response(body)
|
185
|
+
else
|
186
|
+
warn("Original response not found (#{hash})")
|
187
|
+
end
|
188
|
+
rescue Exception => e
|
189
|
+
warn("Unhandled exception in process response thread: #{e.inspect}")
|
190
|
+
end
|
191
|
+
end
|
192
|
+
end
|
193
|
+
|
194
|
+
def debug(msg)
|
195
|
+
logger.debug("Peer #{@host}:#{@port}") { msg }
|
196
|
+
end
|
197
|
+
|
198
|
+
def warn(msg)
|
199
|
+
logger.warn("Peer #{@host}:#{@port}") { msg }
|
200
|
+
end
|
201
|
+
end
|
202
|
+
end
|
203
|
+
end
|
@@ -0,0 +1,72 @@
|
|
1
|
+
module Pandemic
|
2
|
+
module ServerSide
|
3
|
+
class Request
|
4
|
+
include Util
|
5
|
+
|
6
|
+
@@request_count = MutexCounter.new
|
7
|
+
@@late_responses = MutexCounter.new
|
8
|
+
attr_reader :body
|
9
|
+
attr_accessor :max_responses
|
10
|
+
|
11
|
+
def self.total_request_count
|
12
|
+
@@request_count.real_total
|
13
|
+
end
|
14
|
+
|
15
|
+
def self.total_late_responses
|
16
|
+
@@late_responses.real_total
|
17
|
+
end
|
18
|
+
|
19
|
+
def initialize(body)
|
20
|
+
@request_number = @@request_count.inc
|
21
|
+
@body = body
|
22
|
+
@responses = []
|
23
|
+
@responses_mutex = Mutex.new
|
24
|
+
@complete = false
|
25
|
+
end
|
26
|
+
|
27
|
+
def add_response(response)
|
28
|
+
@responses_mutex.synchronize do
|
29
|
+
if @responses.frozen? # too late
|
30
|
+
@@late_responses.inc
|
31
|
+
return
|
32
|
+
end
|
33
|
+
debug("Adding response")
|
34
|
+
@responses << response
|
35
|
+
if @max_responses && @responses.size >= @max_responses
|
36
|
+
debug("Hit max responses, waking up waiting thread")
|
37
|
+
@waiting_thread.wakeup if @waiting_thread && @waiting_thread.status == "sleep"
|
38
|
+
@complete = true
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
def responses
|
44
|
+
@responses # its frozen so we don't have to worry about mutex
|
45
|
+
end
|
46
|
+
|
47
|
+
def wait_for_responses
|
48
|
+
return if @complete
|
49
|
+
@waiting_thread = Thread.current
|
50
|
+
sleep Config.response_timeout
|
51
|
+
# there is a race case where if the sleep finishes,
|
52
|
+
# and response comes in and has the mutex, and then array is frozen
|
53
|
+
# it would be ideal to use monitor wait/signal here but the monitor implementation is currently flawed
|
54
|
+
@responses_mutex.synchronize { @responses.freeze }
|
55
|
+
@waiting_thread = nil
|
56
|
+
end
|
57
|
+
|
58
|
+
def hash
|
59
|
+
@hash ||= Digest::MD5.hexdigest("#{@request_number} #{@body}")[0,10]
|
60
|
+
end
|
61
|
+
|
62
|
+
private
|
63
|
+
def debug(msg)
|
64
|
+
logger.debug("Request #{hash}") {msg}
|
65
|
+
end
|
66
|
+
|
67
|
+
def info(msg)
|
68
|
+
logger.info("Request #{hash}") {msg}
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|