rjr 0.12.2 → 0.15.1
Sign up to get free protection for your applications and to get access to all the features.
- data/README.md +49 -36
- data/Rakefile +2 -0
- data/bin/rjr-client +11 -9
- data/bin/rjr-server +12 -10
- data/examples/amqp.rb +29 -0
- data/examples/client.rb +32 -0
- data/examples/complete.rb +36 -0
- data/examples/local.rb +29 -0
- data/examples/server.rb +26 -0
- data/examples/tcp.rb +29 -0
- data/examples/web.rb +22 -0
- data/examples/ws.rb +29 -0
- data/lib/rjr/common.rb +7 -12
- data/lib/rjr/dispatcher.rb +171 -239
- data/lib/rjr/em_adapter.rb +33 -66
- data/lib/rjr/message.rb +43 -12
- data/lib/rjr/node.rb +197 -103
- data/lib/rjr/nodes/amqp.rb +216 -0
- data/lib/rjr/nodes/easy.rb +159 -0
- data/lib/rjr/nodes/local.rb +118 -0
- data/lib/rjr/{missing_node.rb → nodes/missing.rb} +4 -2
- data/lib/rjr/nodes/multi.rb +79 -0
- data/lib/rjr/nodes/tcp.rb +211 -0
- data/lib/rjr/nodes/web.rb +197 -0
- data/lib/rjr/nodes/ws.rb +187 -0
- data/lib/rjr/stats.rb +70 -0
- data/lib/rjr/thread_pool.rb +178 -123
- data/site/index.html +45 -0
- data/site/jquery-latest.js +9404 -0
- data/site/jrw.js +297 -0
- data/site/json.js +199 -0
- data/specs/dispatcher_spec.rb +244 -198
- data/specs/em_adapter_spec.rb +52 -80
- data/specs/message_spec.rb +223 -197
- data/specs/node_spec.rb +67 -163
- data/specs/nodes/amqp_spec.rb +82 -0
- data/specs/nodes/easy_spec.rb +13 -0
- data/specs/nodes/local_spec.rb +72 -0
- data/specs/nodes/multi_spec.rb +65 -0
- data/specs/nodes/tcp_spec.rb +75 -0
- data/specs/nodes/web_spec.rb +77 -0
- data/specs/nodes/ws_spec.rb +78 -0
- data/specs/stats_spec.rb +59 -0
- data/specs/thread_pool_spec.rb +44 -35
- metadata +40 -30
- data/lib/rjr/amqp_node.rb +0 -330
- data/lib/rjr/inspect.rb +0 -65
- data/lib/rjr/local_node.rb +0 -150
- data/lib/rjr/multi_node.rb +0 -65
- data/lib/rjr/tcp_node.rb +0 -323
- data/lib/rjr/thread_pool2.rb +0 -272
- data/lib/rjr/util.rb +0 -104
- data/lib/rjr/web_node.rb +0 -266
- data/lib/rjr/ws_node.rb +0 -289
- data/lib/rjr.rb +0 -16
- data/specs/amqp_node_spec.rb +0 -31
- data/specs/inspect_spec.rb +0 -60
- data/specs/local_node_spec.rb +0 -43
- data/specs/multi_node_spec.rb +0 -45
- data/specs/tcp_node_spec.rb +0 -33
- data/specs/util_spec.rb +0 -46
- data/specs/web_node_spec.rb +0 -32
- data/specs/ws_node_spec.rb +0 -32
- /data/lib/rjr/{tcp_node2.rb → nodes/tcp2.rb} +0 -0
- /data/lib/rjr/{udp_node.rb → nodes/udp.rb} +0 -0
data/lib/rjr/multi_node.rb
DELETED
@@ -1,65 +0,0 @@
|
|
1
|
-
# RJR MultiNode Endpoint
|
2
|
-
#
|
3
|
-
# Implements the RJR::Node interface to satisty JSON-RPC requests over multiple protocols
|
4
|
-
#
|
5
|
-
# Copyright (C) 2012 Mohammed Morsi <mo@morsi.org>
|
6
|
-
# Licensed under the Apache License, Version 2.0
|
7
|
-
|
8
|
-
require 'eventmachine'
|
9
|
-
require 'rjr/node'
|
10
|
-
require 'rjr/message'
|
11
|
-
|
12
|
-
module RJR
|
13
|
-
|
14
|
-
# Multiple node definition, allows a developer to easily multiplex transport
|
15
|
-
# mechanisms to serve JSON-RPC requests over.
|
16
|
-
#
|
17
|
-
# @example Listening for json-rpc requests over amqp, tcp, http, and websockets
|
18
|
-
# # register rjr dispatchers (see RJR::Dispatcher)
|
19
|
-
# RJR::Dispatcher.add_handler('hello') { |name|
|
20
|
-
# # optionally use @rjr_node_type to handle different transport types
|
21
|
-
# "Hello #{name}!"
|
22
|
-
# }
|
23
|
-
#
|
24
|
-
# amqp_server = RJR::TCPNode.new :node_id => 'amqp_server', :broker => 'localhost'
|
25
|
-
# tcp_server = RJR::TCPNode.new :node_id => 'tcp_server', :host => 'localhost', :port => '7777'
|
26
|
-
# web_server = RJR::WebNode.new :node_id => 'tcp_server', :host => 'localhost', :port => '80'
|
27
|
-
# ws_server = RJR::WebNode.new :node_id => 'tcp_server', :host => 'localhost', :port => '8080'
|
28
|
-
#
|
29
|
-
# server = RJR::MultiNode.new :node_id => 'server',
|
30
|
-
# :nodes => [amqp_server, tcp_server, web_server, ws_server]
|
31
|
-
# server.listen
|
32
|
-
# server.join
|
33
|
-
#
|
34
|
-
# # invoke requests as you normally would via any protocol
|
35
|
-
#
|
36
|
-
class MultiNode < RJR::Node
|
37
|
-
# Return the nodes
|
38
|
-
attr_reader :nodes
|
39
|
-
|
40
|
-
# MultiNode initializer
|
41
|
-
# @param [Hash] args the options to create the tcp node with
|
42
|
-
# @option args [Array<RJR::Node>] :nodes array of nodes to use to listen to new requests on
|
43
|
-
def initialize(args = {})
|
44
|
-
super(args)
|
45
|
-
@nodes = args[:nodes]
|
46
|
-
end
|
47
|
-
|
48
|
-
# Add node to multinode
|
49
|
-
# @param [RJR::Node] node the node to add
|
50
|
-
def <<(node)
|
51
|
-
@nodes << node
|
52
|
-
end
|
53
|
-
|
54
|
-
|
55
|
-
# Instruct Node to start listening for and dispatching rpc requests
|
56
|
-
#
|
57
|
-
# Implementation of {RJR::Node#listen}
|
58
|
-
def listen
|
59
|
-
@nodes.each { |node|
|
60
|
-
node.listen
|
61
|
-
}
|
62
|
-
end
|
63
|
-
end
|
64
|
-
|
65
|
-
end
|
data/lib/rjr/tcp_node.rb
DELETED
@@ -1,323 +0,0 @@
|
|
1
|
-
# RJR TCP Endpoint
|
2
|
-
#
|
3
|
-
# Implements the RJR::Node interface to satisty JSON-RPC requests over the TCP protocol
|
4
|
-
#
|
5
|
-
# Copyright (C) 2012 Mohammed Morsi <mo@morsi.org>
|
6
|
-
# Licensed under the Apache License, Version 2.0
|
7
|
-
|
8
|
-
require 'uri'
|
9
|
-
require 'socket'
|
10
|
-
require 'eventmachine'
|
11
|
-
|
12
|
-
require 'rjr/node'
|
13
|
-
require 'rjr/message'
|
14
|
-
require 'rjr/message'
|
15
|
-
require 'rjr/dispatcher'
|
16
|
-
require 'rjr/errors'
|
17
|
-
require 'rjr/thread_pool2'
|
18
|
-
|
19
|
-
module RJR
|
20
|
-
|
21
|
-
# TCP node callback interface, used to invoke json-rpc methods
|
22
|
-
# against a remote node via a tcp socket connection previously opened
|
23
|
-
#
|
24
|
-
# After a node sends a json-rpc request to another, the either node may send
|
25
|
-
# additional requests to each other via the socket already established until
|
26
|
-
# it is closed on either end
|
27
|
-
class TCPNodeCallback
|
28
|
-
|
29
|
-
# TCPNodeCallback initializer
|
30
|
-
# @param [Hash] args the options to create the tcp node callback with
|
31
|
-
# @option args [TCPNodeEndpoint] :endpoint tcp node endpoint used to send/receive messages
|
32
|
-
# @option args [Hash] :headers hash of rjr message headers present in client request when callback is established
|
33
|
-
def initialize(args = {})
|
34
|
-
@endpoint = args[:endpoint]
|
35
|
-
@message_headers = args[:headers]
|
36
|
-
end
|
37
|
-
|
38
|
-
# Implementation of {RJR::NodeCallback#invoke}
|
39
|
-
def invoke(callback_method, *data)
|
40
|
-
msg = NotificationMessage.new :method => callback_method, :args => data, :headers => @message_headers
|
41
|
-
# TODO surround w/ begin/rescue block incase of socket errors / raise RJR::ConnectionError
|
42
|
-
@endpoint.safe_send msg.to_s
|
43
|
-
end
|
44
|
-
end
|
45
|
-
|
46
|
-
# @private
|
47
|
-
# Helper class intialized by eventmachine encapsulating a socket connection
|
48
|
-
class TCPNodeEndpoint < EventMachine::Connection
|
49
|
-
|
50
|
-
attr_reader :host
|
51
|
-
attr_reader :port
|
52
|
-
|
53
|
-
# TCPNodeEndpoint intializer
|
54
|
-
#
|
55
|
-
# specify the TCPNode establishing the connection
|
56
|
-
def initialize(args = {})
|
57
|
-
@rjr_node = args[:rjr_node]
|
58
|
-
@host = args[:host]
|
59
|
-
@port = args[:port]
|
60
|
-
|
61
|
-
# used to serialize requests to send data via a connection
|
62
|
-
@send_lock = Mutex.new
|
63
|
-
end
|
64
|
-
|
65
|
-
# {EventMachine::Connection#receive_data} callback, handle request / response messages
|
66
|
-
def receive_data(data)
|
67
|
-
# a large json-rpc message may be split over multiple packets (invocations of receive_data)
|
68
|
-
# and multiple messages may be concatinated into one packet
|
69
|
-
@data ||= ""
|
70
|
-
@data += data
|
71
|
-
while extracted = MessageUtil.retrieve_json(@data)
|
72
|
-
msg, @data = *extracted
|
73
|
-
if RequestMessage.is_request_message?(msg)
|
74
|
-
ThreadPool2Manager << ThreadPool2Job.new(msg) { |m| handle_request(m, false) }
|
75
|
-
|
76
|
-
elsif NotificationMessage.is_notification_message?(msg)
|
77
|
-
ThreadPool2Manager << ThreadPool2Job.new(msg) { |m| handle_request(m, true) }
|
78
|
-
|
79
|
-
elsif ResponseMessage.is_response_message?(msg)
|
80
|
-
handle_response(msg)
|
81
|
-
|
82
|
-
end
|
83
|
-
end
|
84
|
-
end
|
85
|
-
|
86
|
-
# {EventMachine::Connection#unbind} callback, connection was closed
|
87
|
-
def unbind
|
88
|
-
end
|
89
|
-
|
90
|
-
# Helper to send data safely, this should be invoked instead of send_data
|
91
|
-
# in all cases
|
92
|
-
def safe_send(data)
|
93
|
-
@send_lock.synchronize{
|
94
|
-
send_data(data)
|
95
|
-
}
|
96
|
-
end
|
97
|
-
|
98
|
-
|
99
|
-
private
|
100
|
-
|
101
|
-
# Internal helper, handle request message received
|
102
|
-
def handle_request(data, notification=false)
|
103
|
-
# XXX hack to handle client disconnection (should grap port/ip immediately on connection and use that)
|
104
|
-
client_port,client_ip = nil,nil
|
105
|
-
begin
|
106
|
-
client_port, client_ip = Socket.unpack_sockaddr_in(get_peername)
|
107
|
-
rescue Exception=>e
|
108
|
-
end
|
109
|
-
|
110
|
-
msg = notification ? NotificationMessage.new(:message => data, :headers => @rjr_node.message_headers) :
|
111
|
-
RequestMessage.new(:message => data, :headers => @rjr_node.message_headers)
|
112
|
-
headers = @rjr_node.message_headers.merge(msg.headers)
|
113
|
-
result = Dispatcher.dispatch_request(msg.jr_method,
|
114
|
-
:method_args => msg.jr_args,
|
115
|
-
:headers => headers,
|
116
|
-
:client_ip => client_ip,
|
117
|
-
:client_port => client_port,
|
118
|
-
:rjr_node => @rjr_node,
|
119
|
-
:rjr_node_id => @rjr_node.node_id,
|
120
|
-
:rjr_node_type => TCPNode::RJR_NODE_TYPE,
|
121
|
-
:rjr_callback =>
|
122
|
-
TCPNodeCallback.new(:endpoint => self,
|
123
|
-
:headers => headers))
|
124
|
-
unless notification
|
125
|
-
response = ResponseMessage.new(:id => msg.msg_id, :result => result, :headers => headers)
|
126
|
-
safe_send(response.to_s)
|
127
|
-
end
|
128
|
-
end
|
129
|
-
|
130
|
-
# Internal helper, handle response message received
|
131
|
-
def handle_response(data)
|
132
|
-
msg = ResponseMessage.new(:message => data, :headers => @rjr_node.message_headers)
|
133
|
-
res = err = nil
|
134
|
-
begin
|
135
|
-
res = Dispatcher.handle_response(msg.result)
|
136
|
-
rescue Exception => e
|
137
|
-
err = e
|
138
|
-
end
|
139
|
-
|
140
|
-
@rjr_node.response_lock.synchronize {
|
141
|
-
result = [msg.msg_id, res]
|
142
|
-
result << err if !err.nil?
|
143
|
-
@rjr_node.responses << result
|
144
|
-
@rjr_node.response_cv.signal
|
145
|
-
}
|
146
|
-
end
|
147
|
-
end
|
148
|
-
|
149
|
-
# TCP node definition, listen for and invoke json-rpc requests via TCP sockets
|
150
|
-
#
|
151
|
-
# Clients should specify the hostname / port when listening for requests and
|
152
|
-
# when invoking them.
|
153
|
-
#
|
154
|
-
# @example Listening for json-rpc requests over tcp
|
155
|
-
# # register rjr dispatchers (see RJR::Dispatcher)
|
156
|
-
# RJR::Dispatcher.add_handler('hello') { |name|
|
157
|
-
# "Hello #{name}!"
|
158
|
-
# }
|
159
|
-
#
|
160
|
-
# # initialize node, listen, and block
|
161
|
-
# server = RJR::TCPNode.new :node_id => 'server', :host => 'localhost', :port => '7777'
|
162
|
-
# server.listen
|
163
|
-
# server.join
|
164
|
-
#
|
165
|
-
# @example Invoking json-rpc requests over tcp
|
166
|
-
# client = RJR::TCPNode.new :node_id => 'client', :host => 'localhost', :port => '8888'
|
167
|
-
# puts client.invoke_request('jsonrpc://localhost:7777', 'hello', 'mo')
|
168
|
-
#
|
169
|
-
class TCPNode < RJR::Node
|
170
|
-
RJR_NODE_TYPE = :tcp
|
171
|
-
|
172
|
-
attr_accessor :connections
|
173
|
-
|
174
|
-
attr_accessor :response_lock
|
175
|
-
attr_accessor :response_cv
|
176
|
-
attr_accessor :responses
|
177
|
-
|
178
|
-
private
|
179
|
-
# Internal helper, initialize new connection
|
180
|
-
def init_node(args={}, &on_init)
|
181
|
-
host,port = args[:host], args[:port]
|
182
|
-
connection = nil
|
183
|
-
@connections_lock.synchronize {
|
184
|
-
connection = @connections.find { |c|
|
185
|
-
port == c.port && host == c.host
|
186
|
-
}
|
187
|
-
if connection.nil?
|
188
|
-
connection =
|
189
|
-
EventMachine::connect host, port,
|
190
|
-
TCPNodeEndpoint, args
|
191
|
-
@connections << connection
|
192
|
-
end
|
193
|
-
}
|
194
|
-
on_init.call(connection)
|
195
|
-
end
|
196
|
-
|
197
|
-
# Internal helper, block until response matching message id is received
|
198
|
-
def wait_for_result(message)
|
199
|
-
res = nil
|
200
|
-
while res.nil?
|
201
|
-
@response_lock.synchronize{
|
202
|
-
# FIXME throw err if more than 1 match found
|
203
|
-
res = @responses.select { |response| message.msg_id == response.first }.first
|
204
|
-
if !res.nil?
|
205
|
-
@responses.delete(res)
|
206
|
-
|
207
|
-
else
|
208
|
-
@response_cv.signal
|
209
|
-
@response_cv.wait @response_lock
|
210
|
-
|
211
|
-
end
|
212
|
-
}
|
213
|
-
end
|
214
|
-
return res
|
215
|
-
end
|
216
|
-
|
217
|
-
public
|
218
|
-
# TCPNode initializer
|
219
|
-
# @param [Hash] args the options to create the tcp node with
|
220
|
-
# @option args [String] :host the hostname/ip which to listen on
|
221
|
-
# @option args [Integer] :port the port which to listen on
|
222
|
-
def initialize(args = {})
|
223
|
-
super(args)
|
224
|
-
@host = args[:host]
|
225
|
-
@port = args[:port]
|
226
|
-
|
227
|
-
@connections = []
|
228
|
-
@connections_lock = Mutex.new
|
229
|
-
|
230
|
-
@response_lock = Mutex.new
|
231
|
-
@response_cv = ConditionVariable.new
|
232
|
-
@responses = []
|
233
|
-
|
234
|
-
@connection_event_handlers = {:closed => [], :error => []}
|
235
|
-
end
|
236
|
-
|
237
|
-
# Register connection event handler
|
238
|
-
# @param [:error, :close] event the event to register the handler for
|
239
|
-
# @param [Callable] handler block param to be added to array of handlers that are called when event occurs
|
240
|
-
# @yield [TCPNode] self is passed to each registered handler when event occurs
|
241
|
-
def on(event, &handler)
|
242
|
-
if @connection_event_handlers.keys.include?(event)
|
243
|
-
@connection_event_handlers[event] << handler
|
244
|
-
end
|
245
|
-
end
|
246
|
-
|
247
|
-
# Instruct Node to start listening for and dispatching rpc requests
|
248
|
-
#
|
249
|
-
# Implementation of {RJR::Node#listen}
|
250
|
-
def listen
|
251
|
-
em_run {
|
252
|
-
EventMachine::start_server @host, @port, TCPNodeEndpoint, { :rjr_node => self }
|
253
|
-
}
|
254
|
-
end
|
255
|
-
|
256
|
-
# Instructs node to send rpc request, and wait for / return response.
|
257
|
-
#
|
258
|
-
# Do not invoke directly from em event loop or callback as will block the message
|
259
|
-
# subscription used to receive responses
|
260
|
-
#
|
261
|
-
# @param [String] uri location of node to send request to, should be
|
262
|
-
# in format of jsonrpc://hostname:port
|
263
|
-
# @param [String] rpc_method json-rpc method to invoke on destination
|
264
|
-
# @param [Array] args array of arguments to convert to json and invoke remote method wtih
|
265
|
-
def invoke_request(uri, rpc_method, *args)
|
266
|
-
uri = URI.parse(uri)
|
267
|
-
host,port = uri.host, uri.port
|
268
|
-
|
269
|
-
message = RequestMessage.new :method => rpc_method,
|
270
|
-
:args => args,
|
271
|
-
:headers => @message_headers
|
272
|
-
em_run{
|
273
|
-
init_node(:host => host, :port => port,
|
274
|
-
:rjr_node => self) { |c|
|
275
|
-
c.safe_send message.to_s
|
276
|
-
}
|
277
|
-
}
|
278
|
-
|
279
|
-
# TODO optional timeout for response ?
|
280
|
-
result = wait_for_result(message)
|
281
|
-
|
282
|
-
if result.size > 2
|
283
|
-
raise Exception, result[2]
|
284
|
-
end
|
285
|
-
return result[1]
|
286
|
-
end
|
287
|
-
|
288
|
-
# Instructs node to send rpc notification (immadiately returns / no response is generated)
|
289
|
-
#
|
290
|
-
# @param [String] uri location of node to send notification to, should be
|
291
|
-
# in format of jsonrpc://hostname:port
|
292
|
-
# @param [String] rpc_method json-rpc method to invoke on destination
|
293
|
-
# @param [Array] args array of arguments to convert to json and invoke remote method wtih
|
294
|
-
def send_notification(uri, rpc_method, *args)
|
295
|
-
# will block until message is published
|
296
|
-
published_l = Mutex.new
|
297
|
-
published_c = ConditionVariable.new
|
298
|
-
|
299
|
-
uri = URI.parse(uri)
|
300
|
-
host,port = uri.host, uri.port
|
301
|
-
|
302
|
-
invoked = false
|
303
|
-
conn = nil
|
304
|
-
message = NotificationMessage.new :method => rpc_method,
|
305
|
-
:args => args,
|
306
|
-
:headers => @message_headers
|
307
|
-
em_run{
|
308
|
-
init_node(:host => host, :port => port,
|
309
|
-
:rjr_node => self) { |c|
|
310
|
-
conn = c
|
311
|
-
c.safe_send message.to_s
|
312
|
-
# XXX big bug w/ tcp node, this should be invoked only when
|
313
|
-
# we are sure event machine sent message
|
314
|
-
published_l.synchronize { invoked = true ; published_c.signal }
|
315
|
-
}
|
316
|
-
}
|
317
|
-
published_l.synchronize { published_c.wait published_l unless invoked }
|
318
|
-
#sleep 0.01 until conn.get_outbound_data_size == 0
|
319
|
-
nil
|
320
|
-
end
|
321
|
-
end
|
322
|
-
|
323
|
-
end # module RJR
|
data/lib/rjr/thread_pool2.rb
DELETED
@@ -1,272 +0,0 @@
|
|
1
|
-
# Thread Pool (second implementation)
|
2
|
-
#
|
3
|
-
# Copyright (C) 2010-2012 Mohammed Morsi <mo@morsi.org>
|
4
|
-
# Licensed under the Apache License, Version 2.0
|
5
|
-
|
6
|
-
require 'singleton'
|
7
|
-
|
8
|
-
# Work item to be executed in a thread launched by {ThreadPool2}.
|
9
|
-
#
|
10
|
-
# The end user just need to initialize this class with the handle
|
11
|
-
# to the job to be executed and the params to pass to it, before
|
12
|
-
# handing it off to the thread pool that will take care of the rest.
|
13
|
-
class ThreadPool2Job
|
14
|
-
attr_accessor :handler
|
15
|
-
attr_accessor :params
|
16
|
-
|
17
|
-
# used internally by the thread pool system, these shouldn't
|
18
|
-
# be set or used by the end user
|
19
|
-
attr_accessor :timestamp
|
20
|
-
attr_accessor :thread
|
21
|
-
attr_accessor :pool_lock
|
22
|
-
attr_reader :being_executed
|
23
|
-
|
24
|
-
# ThreadPoolJob initializer
|
25
|
-
# @param [Array] params arguments to pass to the job when it is invoked
|
26
|
-
# @param [Callable] block handle to callable object corresponding to job to invoke
|
27
|
-
def initialize(*params, &block)
|
28
|
-
@params = params
|
29
|
-
@handler = block
|
30
|
-
@being_executed = false
|
31
|
-
@timestamp = nil
|
32
|
-
end
|
33
|
-
|
34
|
-
# Return string representation of thread pool job
|
35
|
-
def to_s
|
36
|
-
"thread_pool2_job-#{@handler.source_location}-#{@params}"
|
37
|
-
end
|
38
|
-
|
39
|
-
def being_executed?
|
40
|
-
@being_executed
|
41
|
-
end
|
42
|
-
|
43
|
-
def completed?
|
44
|
-
!@timestamp.nil? && !@being_executed
|
45
|
-
end
|
46
|
-
|
47
|
-
# Set job metadata and execute job with specified params
|
48
|
-
def exec
|
49
|
-
# synchronized so that both timestamp is set and being_executed
|
50
|
-
# set to true before the possiblity of a timeout management
|
51
|
-
# check (see handle_timeout! below)
|
52
|
-
@pool_lock.synchronize{
|
53
|
-
@thread = Thread.current
|
54
|
-
@being_executed = true
|
55
|
-
@timestamp = Time.now
|
56
|
-
}
|
57
|
-
|
58
|
-
@handler.call *@params
|
59
|
-
|
60
|
-
# synchronized so as to ensure that a timeout check does not
|
61
|
-
# occur until before (in which case thread is killed during
|
62
|
-
# the check as one atomic operation) or after (in which case
|
63
|
-
# job is marked as completed, and thread is not killed / goes
|
64
|
-
# onto pull anther job)
|
65
|
-
@pool_lock.synchronize{
|
66
|
-
@being_executed = false
|
67
|
-
}
|
68
|
-
end
|
69
|
-
|
70
|
-
# Check timeout and kill thread if it exceeded.
|
71
|
-
def handle_timeout!(timeout)
|
72
|
-
# Synchronized so that check and kill operation occur as an
|
73
|
-
# atomic operation, see exec above
|
74
|
-
@pool_lock.synchronize {
|
75
|
-
if @being_executed && (Time.now - @timestamp) > timeout
|
76
|
-
RJR::Logger.debug "timeout detected on thread #{@thread} started at #{@timestamp}"
|
77
|
-
@thread.kill
|
78
|
-
return true
|
79
|
-
end
|
80
|
-
return false
|
81
|
-
}
|
82
|
-
end
|
83
|
-
end
|
84
|
-
|
85
|
-
# Utility to launches a specified number of threads on instantiation,
|
86
|
-
# assigning work to them in order as it arrives.
|
87
|
-
#
|
88
|
-
# Supports optional timeout which allows the developer to kill and restart
|
89
|
-
# threads if a job is taking too long to run.
|
90
|
-
#
|
91
|
-
# Second (and hopefully better) thread pool implementation.
|
92
|
-
#
|
93
|
-
# TODO move to the RJR namespace
|
94
|
-
class ThreadPool2
|
95
|
-
private
|
96
|
-
|
97
|
-
# Internal helper, launch worker thread
|
98
|
-
#
|
99
|
-
# Should only be launched from within the pool_lock
|
100
|
-
def launch_worker
|
101
|
-
@worker_threads << Thread.new {
|
102
|
-
while work = @work_queue.pop
|
103
|
-
begin
|
104
|
-
#RJR::Logger.debug "launch thread pool job #{work}"
|
105
|
-
work.pool_lock = @pool_lock
|
106
|
-
@running_queue << work
|
107
|
-
work.exec
|
108
|
-
# TODO cleaner / more immediate way to pop item off running_queue
|
109
|
-
#RJR::Logger.debug "finished thread pool job #{work}"
|
110
|
-
rescue Exception => e
|
111
|
-
# FIXME also send to rjr logger at a critical level
|
112
|
-
puts "Thread raised Fatal Exception #{e}"
|
113
|
-
puts "\n#{e.backtrace.join("\n")}"
|
114
|
-
end
|
115
|
-
end
|
116
|
-
} unless @worker_threads.size == @num_threads
|
117
|
-
end
|
118
|
-
|
119
|
-
# Internal helper, performs checks on workers
|
120
|
-
def check_workers
|
121
|
-
if @terminate
|
122
|
-
@pool_lock.synchronize {
|
123
|
-
@worker_threads.each { |t|
|
124
|
-
t.kill
|
125
|
-
}
|
126
|
-
@worker_threads = []
|
127
|
-
}
|
128
|
-
|
129
|
-
elsif @timeout
|
130
|
-
readd = []
|
131
|
-
while @running_queue.size > 0 && work = @running_queue.pop
|
132
|
-
if @timeout && work.handle_timeout!(@timeout)
|
133
|
-
@pool_lock.synchronize {
|
134
|
-
@worker_threads.delete(work.thread)
|
135
|
-
launch_worker
|
136
|
-
}
|
137
|
-
elsif !work.completed?
|
138
|
-
readd << work
|
139
|
-
end
|
140
|
-
end
|
141
|
-
readd.each { |work| @running_queue << work }
|
142
|
-
end
|
143
|
-
end
|
144
|
-
|
145
|
-
# Internal helper, launch management thread
|
146
|
-
#
|
147
|
-
# Should only be launched from within the pool_lock
|
148
|
-
def launch_manager
|
149
|
-
@manager_thread = Thread.new {
|
150
|
-
until @terminate
|
151
|
-
# sleep needs to occur b4 check workers so
|
152
|
-
# workers are guaranteed to be terminated on @terminate
|
153
|
-
# !FIXME! this enforces a mandatory setting of @timeout which was never intended:
|
154
|
-
sleep @timeout
|
155
|
-
check_workers
|
156
|
-
end
|
157
|
-
check_workers
|
158
|
-
@pool_lock.synchronize { @manager_thread = nil }
|
159
|
-
} unless @manager_thread
|
160
|
-
end
|
161
|
-
|
162
|
-
public
|
163
|
-
# Create a thread pool with a specified number of threads
|
164
|
-
# @param [Integer] num_threads the number of worker threads to create
|
165
|
-
# @param [Hash] args optional arguments to initialize thread pool with
|
166
|
-
# @option args [Integer] :timeout optional timeout to use to kill long running worker jobs
|
167
|
-
def initialize(num_threads, args = {})
|
168
|
-
@work_queue = Queue.new
|
169
|
-
@running_queue = Queue.new
|
170
|
-
|
171
|
-
@num_threads = num_threads
|
172
|
-
@pool_lock = Mutex.new
|
173
|
-
@worker_threads = []
|
174
|
-
|
175
|
-
@timeout = args[:timeout]
|
176
|
-
|
177
|
-
ObjectSpace.define_finalizer(self, self.class.finalize(self))
|
178
|
-
end
|
179
|
-
|
180
|
-
# Return internal thread pool state in string
|
181
|
-
def inspect
|
182
|
-
"wq#{@work_queue.size}/\
|
183
|
-
rq#{@running_queue.size}/\
|
184
|
-
nt#{@num_threads.size}/\
|
185
|
-
wt#{@worker_threads.select { |wt| ['sleep', 'run'].include?(wt.status) }.size}ok-\
|
186
|
-
#{@worker_threads.select { |wt| ['aborting', false, nil].include?(wt.status) }.size}nok/\
|
187
|
-
to#{@timeout}"
|
188
|
-
end
|
189
|
-
|
190
|
-
# Start the thread pool
|
191
|
-
def start
|
192
|
-
# clear work and timeout queues?
|
193
|
-
@pool_lock.synchronize {
|
194
|
-
@terminate = false
|
195
|
-
launch_manager
|
196
|
-
0.upto(@num_threads) { |i| launch_worker }
|
197
|
-
}
|
198
|
-
end
|
199
|
-
|
200
|
-
# Ruby ObjectSpace finalizer to ensure that thread pool terminates all
|
201
|
-
# threads when object is destroyed
|
202
|
-
def self.finalize(thread_pool)
|
203
|
-
proc { thread_pool.stop ; thread_pool.join }
|
204
|
-
end
|
205
|
-
|
206
|
-
# Return boolean indicating if thread pool is running.
|
207
|
-
#
|
208
|
-
# If at least one worker thread isn't terminated, the pool is still considered running
|
209
|
-
def running?
|
210
|
-
@pool_lock.synchronize { @worker_threads.size != 0 && @worker_threads.all? { |t| t.status } }
|
211
|
-
end
|
212
|
-
|
213
|
-
# Add work to the pool
|
214
|
-
# @param [ThreadPool2Job] work job to execute in first available thread
|
215
|
-
def <<(work)
|
216
|
-
# TODO option to increase worker threads if work queue gets saturated
|
217
|
-
@work_queue.push work
|
218
|
-
end
|
219
|
-
|
220
|
-
# Terminate the thread pool, stopping all worker threads
|
221
|
-
def stop
|
222
|
-
@pool_lock.synchronize {
|
223
|
-
@terminate = true
|
224
|
-
|
225
|
-
# wakeup management thread so it can kill workers
|
226
|
-
# before terminating on its own
|
227
|
-
begin
|
228
|
-
@manager_thread.wakeup
|
229
|
-
|
230
|
-
# incase thread wakes up / terminates on its own
|
231
|
-
rescue ThreadError
|
232
|
-
|
233
|
-
end
|
234
|
-
}
|
235
|
-
join
|
236
|
-
end
|
237
|
-
|
238
|
-
# Block until all worker threads have finished executing
|
239
|
-
def join
|
240
|
-
#@pool_lock.synchronize { @worker_threads.each { |t| t.join unless @terminate } }
|
241
|
-
th = nil
|
242
|
-
@pool_lock.synchronize { th = @manager_thread if @manager_thread }
|
243
|
-
th.join if th
|
244
|
-
end
|
245
|
-
end
|
246
|
-
|
247
|
-
# Providers an interface to access a shared thread pool.
|
248
|
-
#
|
249
|
-
# Thread pool operations may be invoked on this class after
|
250
|
-
# the 'init' method is called
|
251
|
-
#
|
252
|
-
# ThreadPool2Manager.init
|
253
|
-
# ThreadPool2Manager << ThreadPool2Job(:foo) { "do something" }
|
254
|
-
class ThreadPool2Manager
|
255
|
-
# Initialize thread pool if it doesn't exist
|
256
|
-
def self.init(num_threads, params = {})
|
257
|
-
if @thread_pool.nil?
|
258
|
-
@thread_pool = ThreadPool2.new(num_threads, params)
|
259
|
-
end
|
260
|
-
@thread_pool.start
|
261
|
-
end
|
262
|
-
|
263
|
-
# Return shared thread pool
|
264
|
-
def self.thread_pool
|
265
|
-
@thread_pool
|
266
|
-
end
|
267
|
-
|
268
|
-
# Delegates all methods invoked on calls to thread pool
|
269
|
-
def self.method_missing(method_id, *args, &bl)
|
270
|
-
@thread_pool.send method_id, *args, &bl
|
271
|
-
end
|
272
|
-
end
|