isono 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- data/LICENSE +202 -0
- data/NOTICE +2 -0
- data/bin/cli +122 -0
- data/isono.gemspec +47 -0
- data/lib/ext/shellwords.rb +172 -0
- data/lib/isono.rb +61 -0
- data/lib/isono/amqp_client.rb +169 -0
- data/lib/isono/daemonize.rb +96 -0
- data/lib/isono/event_delegate_context.rb +56 -0
- data/lib/isono/event_observable.rb +86 -0
- data/lib/isono/logger.rb +48 -0
- data/lib/isono/manifest.rb +161 -0
- data/lib/isono/messaging_client.rb +116 -0
- data/lib/isono/models/event_log.rb +28 -0
- data/lib/isono/models/job_state.rb +35 -0
- data/lib/isono/models/node_state.rb +70 -0
- data/lib/isono/models/resource_instance.rb +35 -0
- data/lib/isono/node.rb +158 -0
- data/lib/isono/node_modules/base.rb +65 -0
- data/lib/isono/node_modules/data_store.rb +57 -0
- data/lib/isono/node_modules/event_channel.rb +72 -0
- data/lib/isono/node_modules/event_logger.rb +39 -0
- data/lib/isono/node_modules/job_channel.rb +86 -0
- data/lib/isono/node_modules/job_collector.rb +47 -0
- data/lib/isono/node_modules/job_worker.rb +152 -0
- data/lib/isono/node_modules/node_collector.rb +87 -0
- data/lib/isono/node_modules/node_heartbeat.rb +26 -0
- data/lib/isono/node_modules/rpc_channel.rb +482 -0
- data/lib/isono/rack.rb +67 -0
- data/lib/isono/rack/builder.rb +40 -0
- data/lib/isono/rack/data_store.rb +20 -0
- data/lib/isono/rack/job.rb +74 -0
- data/lib/isono/rack/map.rb +56 -0
- data/lib/isono/rack/object_method.rb +20 -0
- data/lib/isono/rack/proc.rb +50 -0
- data/lib/isono/rack/thread_pass.rb +22 -0
- data/lib/isono/resource_manifest.rb +273 -0
- data/lib/isono/runner/agent.rb +89 -0
- data/lib/isono/runner/rpc_server.rb +198 -0
- data/lib/isono/serializer.rb +43 -0
- data/lib/isono/thread_pool.rb +169 -0
- data/lib/isono/util.rb +212 -0
- metadata +185 -0
@@ -0,0 +1,47 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
|
3
|
+
module Isono
|
4
|
+
module NodeModules
|
5
|
+
class JobCollector < Base
|
6
|
+
|
7
|
+
initialize_hook do
|
8
|
+
rpc = RpcChannel.new(node)
|
9
|
+
|
10
|
+
app = Rack::DataStore.new(Dispatch.new)
|
11
|
+
|
12
|
+
rpc.register_endpoint('job-collector', app)
|
13
|
+
end
|
14
|
+
|
15
|
+
terminate_hook do
|
16
|
+
end
|
17
|
+
|
18
|
+
class Dispatch
|
19
|
+
# Register new job
|
20
|
+
def regist
|
21
|
+
params = @req.args[0]
|
22
|
+
params[:node_id]=@req.sender
|
23
|
+
job =Models::JobState.new
|
24
|
+
job.set_fields(params, [:job_id, :parent_job_id, :node_id, :state]).save
|
25
|
+
end
|
26
|
+
|
27
|
+
def update
|
28
|
+
params = @req.args[0]
|
29
|
+
job = Models::JobState.find(:job_id=>params[:job_id])
|
30
|
+
raise "Unknown or JOB ID: #{params[:job_id]}" if job.nil?
|
31
|
+
job.set_fields(params, [:state, :started_at, :finished_at]).save
|
32
|
+
end
|
33
|
+
|
34
|
+
def call(req, res)
|
35
|
+
@req, @res = req, res
|
36
|
+
raise Rack::UnknownMethodError if @req.command == 'call'
|
37
|
+
m = self.method(@req.command)
|
38
|
+
raise Rack::UnknownMethodError if m.nil?
|
39
|
+
|
40
|
+
ret = m.call
|
41
|
+
@res.response(nil) if @res.responded?
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
@@ -0,0 +1,152 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
|
3
|
+
require 'statemachine'
|
4
|
+
|
5
|
+
module Isono
|
6
|
+
module NodeModules
|
7
|
+
class JobWorker < Base
|
8
|
+
include Logger
|
9
|
+
|
10
|
+
JOB_CTX_KEY=:job_worker_ctx
|
11
|
+
|
12
|
+
initialize_hook do
|
13
|
+
@thread_pool = ThreadPool.new(10)
|
14
|
+
@active_jobs = {}
|
15
|
+
|
16
|
+
RpcChannel.new(node).register_endpoint("job-stats.#{node.node_id}", proc { |req, res|
|
17
|
+
case res.command
|
18
|
+
when 'get'
|
19
|
+
res.response({ :active_jobs => @active_jobs.map {|j| j.to_hash } })
|
20
|
+
else
|
21
|
+
raise Rack::UnknownMethodError
|
22
|
+
end
|
23
|
+
})
|
24
|
+
end
|
25
|
+
|
26
|
+
terminate_hook do
|
27
|
+
@thread_pool.shutdown
|
28
|
+
end
|
29
|
+
|
30
|
+
# Start a new long term job.
|
31
|
+
#
|
32
|
+
# @param [String] parent_id Parent Job ID for new job
|
33
|
+
# @param [Proc,nil] run_cb
|
34
|
+
# @param [Proc,nil] fail_cb
|
35
|
+
# @yield The block used as run_cb
|
36
|
+
#
|
37
|
+
# @example Simply set run block as yield block and parent job ID.
|
38
|
+
# run('xxxxxx') {
|
39
|
+
# # do something
|
40
|
+
# }
|
41
|
+
# @example Set proc{} to both run and fail block.
|
42
|
+
# run(proc{
|
43
|
+
# # do something
|
44
|
+
# }, proc{
|
45
|
+
# # do rollback on fail
|
46
|
+
# })
|
47
|
+
def run(parent_id=nil, run_cb=nil, fail_cb=nil, &blk)
|
48
|
+
if run_cb.is_a?(Proc)
|
49
|
+
job = JobContext.new(run_cb, parent_id)
|
50
|
+
job.fail_cb = fail_cb if fail_cb.is_a?(Proc)
|
51
|
+
elsif blk
|
52
|
+
job = JobContext.new(blk, parent_id)
|
53
|
+
else
|
54
|
+
raise ArgumentError, "callbacks were not set propery"
|
55
|
+
end
|
56
|
+
@active_jobs[job.job_id] = job
|
57
|
+
rpc = RpcChannel.new(node)
|
58
|
+
|
59
|
+
rpc.request('job-collector', 'regist', job.to_hash) { |req|
|
60
|
+
req.oneshot = true
|
61
|
+
}
|
62
|
+
|
63
|
+
@thread_pool.pass {
|
64
|
+
begin
|
65
|
+
Thread.current[JOB_CTX_KEY]=job
|
66
|
+
job.stm.on_start
|
67
|
+
rpc.request('job-collector', 'update', job.to_hash) { |req|
|
68
|
+
req.oneshot = true
|
69
|
+
}
|
70
|
+
job.run_cb.call
|
71
|
+
job.stm.on_done
|
72
|
+
rescue Exception => e
|
73
|
+
job.stm.on_fail(e)
|
74
|
+
if job.fail_cb
|
75
|
+
job.fail_cb.arity == 1 ? job.fail_cb.call(e) : job.fail_cb.call
|
76
|
+
end
|
77
|
+
ensure
|
78
|
+
Thread.current[JOB_CTX_KEY]=nil
|
79
|
+
EventMachine.schedule {
|
80
|
+
rpc.request('job-collector', 'update', job.to_hash) { |req|
|
81
|
+
req.oneshot = true
|
82
|
+
}
|
83
|
+
@active_jobs.delete(job.job_id)
|
84
|
+
}
|
85
|
+
end
|
86
|
+
}
|
87
|
+
job
|
88
|
+
end
|
89
|
+
|
90
|
+
class JobContext < OpenStruct
|
91
|
+
include Logger
|
92
|
+
attr_reader :stm, :run_cb
|
93
|
+
attr_accessor :fail_cb
|
94
|
+
|
95
|
+
def initialize(run_cb, parent_id=nil)
|
96
|
+
super({:job_id=>Util.gen_id,
|
97
|
+
:parent_job_id=> parent_id,
|
98
|
+
:started_at=>nil,
|
99
|
+
:finished_at=>nil,
|
100
|
+
})
|
101
|
+
|
102
|
+
@run_cb=run_cb
|
103
|
+
@fail_cb=nil
|
104
|
+
|
105
|
+
@stm = Statemachine.build {
|
106
|
+
startstate :init
|
107
|
+
trans :init, :on_start, :running, :on_start
|
108
|
+
trans :running, :on_done, :done, :on_done
|
109
|
+
trans :running, :on_fail, :failed, :on_fail
|
110
|
+
trans :init, :on_fail, :failed, :on_fail
|
111
|
+
}
|
112
|
+
@stm.context = self
|
113
|
+
end
|
114
|
+
|
115
|
+
def state
|
116
|
+
stm.state
|
117
|
+
end
|
118
|
+
|
119
|
+
def to_hash
|
120
|
+
@table.dup.merge({:state=>@stm.state})
|
121
|
+
end
|
122
|
+
|
123
|
+
def elapsed_time
|
124
|
+
if finished_at && started_at
|
125
|
+
finished_at - started_at
|
126
|
+
else
|
127
|
+
0
|
128
|
+
end
|
129
|
+
end
|
130
|
+
|
131
|
+
private
|
132
|
+
def on_start
|
133
|
+
self.started_at = Time.now
|
134
|
+
logger.info("Job start #{job_id}")
|
135
|
+
end
|
136
|
+
|
137
|
+
def on_done
|
138
|
+
self.finished_at = Time.now
|
139
|
+
logger.info("Job complete #{job_id}: #{elapsed_time} sec")
|
140
|
+
end
|
141
|
+
|
142
|
+
def on_fail(e)
|
143
|
+
self.finished_at = Time.now
|
144
|
+
logger.error("Job failed #{job_id}: #{e}")
|
145
|
+
logger.error(e)
|
146
|
+
end
|
147
|
+
|
148
|
+
end
|
149
|
+
|
150
|
+
end
|
151
|
+
end
|
152
|
+
end
|
@@ -0,0 +1,87 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
|
3
|
+
module Isono
|
4
|
+
module NodeModules
|
5
|
+
class NodeCollector < Base
|
6
|
+
include Logger
|
7
|
+
|
8
|
+
config_section do
|
9
|
+
desc "time in second to recognize if the agent is timed out"
|
10
|
+
timeout_sec (60*20).to_f
|
11
|
+
desc "the agent to be killed from the datasource after the time of second"
|
12
|
+
kill_sec (60*20*2).to_f
|
13
|
+
desc ""
|
14
|
+
gc_period 20.0
|
15
|
+
end
|
16
|
+
|
17
|
+
initialize_hook do
|
18
|
+
# GC event trigger for agent timer & status
|
19
|
+
@gc_timer = EM::PeriodicTimer.new(config_section.gc_period) {
|
20
|
+
event = EventChannel.new(self.node)
|
21
|
+
DataStore.pass {
|
22
|
+
# Sqlite3 is unlikely to modify table while iterating
|
23
|
+
# the result set. the following is the case of the
|
24
|
+
# iteration for the opened result set.
|
25
|
+
# Models::AgentPool.dataset.each { |row|
|
26
|
+
#
|
27
|
+
# while Model.dataset.all, it returns a Ruby array
|
28
|
+
# containing rows so that i had no table lock exception.
|
29
|
+
# see:
|
30
|
+
# http://www.mail-archive.com/sqlite-users@sqlite.org/msg03328.html
|
31
|
+
# TODO: paging support for the large result set.
|
32
|
+
Models::NodeState.dataset.all.each { |row|
|
33
|
+
|
34
|
+
sm = row.state_machine
|
35
|
+
next if sm.state == :offline
|
36
|
+
|
37
|
+
diff_time = Time.now - row[:last_ping_at]
|
38
|
+
if sm.state != :timeout && diff_time > config_section.timeout_sec
|
39
|
+
sm.on_timeout
|
40
|
+
row.save_changes
|
41
|
+
event.publish('node_collector/timedout', :args=>[row.values])
|
42
|
+
end
|
43
|
+
|
44
|
+
if diff_time > config_section.kill_sec
|
45
|
+
sm.on_unmonitor
|
46
|
+
|
47
|
+
event.publish('node_collector/killed', :args=>[row.values])
|
48
|
+
row.delete
|
49
|
+
end
|
50
|
+
}
|
51
|
+
}
|
52
|
+
}
|
53
|
+
|
54
|
+
rpc = RpcChannel.new(node)
|
55
|
+
app = Rack::ObjectMethod.new(myinstance)
|
56
|
+
rpc.register_endpoint('node-collector', Rack.build do
|
57
|
+
use Rack::DataStore
|
58
|
+
run app
|
59
|
+
end)
|
60
|
+
end
|
61
|
+
|
62
|
+
terminate_hook do
|
63
|
+
@gc_timer.cancel
|
64
|
+
end
|
65
|
+
|
66
|
+
def list
|
67
|
+
Models::NodeState.dataset.all.map{|r| r.values }
|
68
|
+
end
|
69
|
+
|
70
|
+
def notify(node_id, boot_token)
|
71
|
+
event = EventChannel.new(node)
|
72
|
+
|
73
|
+
a = Models::NodeState.find(:node_id=>node_id) || Models::NodeState.new(:node_id=>node_id)
|
74
|
+
a.state_machine.on_ping
|
75
|
+
if a.new?
|
76
|
+
a.boot_token = boot_token
|
77
|
+
a.save
|
78
|
+
event.publish('node_collector/monitored', :args=>[a.values])
|
79
|
+
else
|
80
|
+
a.save_changes
|
81
|
+
#event.publish('node_collector/pong')
|
82
|
+
end
|
83
|
+
end
|
84
|
+
|
85
|
+
end
|
86
|
+
end
|
87
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
|
3
|
+
module Isono
|
4
|
+
module NodeModules
|
5
|
+
class NodeHeartbeat < Base
|
6
|
+
|
7
|
+
config_section do |c|
|
8
|
+
desc "second(s) to wait until send the next heartbeat signal"
|
9
|
+
heartbeat_offset_time 10
|
10
|
+
end
|
11
|
+
|
12
|
+
initialize_hook do
|
13
|
+
@timer = EventMachine::PeriodicTimer.new(config_section.heartbeat_offset_time.to_f) {
|
14
|
+
rpc = RpcChannel.new(node)
|
15
|
+
rpc.request('node-collector', 'notify', manifest.node_id, node.boot_token) do |req|
|
16
|
+
req.oneshot = true
|
17
|
+
end
|
18
|
+
}
|
19
|
+
end
|
20
|
+
|
21
|
+
terminate_hook do
|
22
|
+
@timer.cancel
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
@@ -0,0 +1,482 @@
|
|
1
|
+
# -*- coding: utf-8 -*-
|
2
|
+
|
3
|
+
require 'thread'
|
4
|
+
require 'statemachine'
|
5
|
+
require 'ostruct'
|
6
|
+
|
7
|
+
module Isono
|
8
|
+
module NodeModules
|
9
|
+
class RpcChannel < Base
|
10
|
+
include Logger
|
11
|
+
|
12
|
+
AMQP_EXCHANGE='isono.rpc'
|
13
|
+
|
14
|
+
class RpcError < RuntimeError; end
|
15
|
+
class UnknownEndpointError < RpcError; end
|
16
|
+
class DuplicateEndpointError < RpcError; end
|
17
|
+
|
18
|
+
config_section do
|
19
|
+
desc "default timeout duration in second until receive response."
|
20
|
+
timeout_sec (60*3).to_f
|
21
|
+
end
|
22
|
+
|
23
|
+
initialize_hook do
|
24
|
+
@active_requests = {}
|
25
|
+
@endpoints = {}
|
26
|
+
amq.direct(AMQP_EXCHANGE, {:auto_delete=>true})
|
27
|
+
amq.queue("command-recv.#{manifest.node_id}", {:exclusive=>true}).subscribe { |header, data|
|
28
|
+
event = EventChannel.new(self.node)
|
29
|
+
req = @active_requests[header.message_id]
|
30
|
+
if req
|
31
|
+
data = Serializer.instance.unmarshal(data)
|
32
|
+
req.process_event(:on_received, data)
|
33
|
+
event.publish('rpc/response_received', :args=>[header.message_id])
|
34
|
+
begin
|
35
|
+
case data[:type]
|
36
|
+
when :inprogress
|
37
|
+
req.progress_cb.call(data[:msg]) if req.progress_cb
|
38
|
+
when :error
|
39
|
+
req.process_event(:on_error, data)
|
40
|
+
req.error_cb.call(data[:msg]) if req.error_cb
|
41
|
+
else
|
42
|
+
req.process_event(:on_success, data)
|
43
|
+
req.success_cb.call(data[:msg]) if req.success_cb
|
44
|
+
end
|
45
|
+
rescue => e
|
46
|
+
logger.error(e)
|
47
|
+
ensure
|
48
|
+
if req.state == :done
|
49
|
+
@active_requests.delete req.ticket
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
53
|
+
}
|
54
|
+
|
55
|
+
# RPC endpoint for statistics info of this node.
|
56
|
+
myinstance.register_endpoint("rpc-stats.#{manifest.node_id}", proc { |req, res|
|
57
|
+
case req.command
|
58
|
+
when 'get'
|
59
|
+
res.response({:active_requests => @active_requests.map {|a| a.hash },
|
60
|
+
:endpoints => @endpoints.keys
|
61
|
+
})
|
62
|
+
else
|
63
|
+
raise Rack::UnknownMethodError
|
64
|
+
end
|
65
|
+
})
|
66
|
+
end
|
67
|
+
|
68
|
+
terminate_hook do
|
69
|
+
@endpoints.keys.each { |ns|
|
70
|
+
myinstance.unregister_endpoint(ns)
|
71
|
+
}
|
72
|
+
amq.queue("command-recv.#{manifest.node_id}", {:exclusive=>true}).delete
|
73
|
+
end
|
74
|
+
|
75
|
+
# Make a RPC request to an endpoint.
|
76
|
+
#
|
77
|
+
# @param [String] endpoint
|
78
|
+
# @param [String] command
|
79
|
+
# @param [Array] args
|
80
|
+
# @param [Proc] &blk Block to setup the request context.
|
81
|
+
# @return [RequestContext,any]
|
82
|
+
#
|
83
|
+
# @example create a sync RPC request.
|
84
|
+
# rpc.request('endpoint1', 'func1', xxxx)
|
85
|
+
# @example call RPC in async mode.
|
86
|
+
# rpc.request('endpoint1', 'func1', xxxx) { |req|
|
87
|
+
# req.on_success { |r|
|
88
|
+
# puts r
|
89
|
+
# }
|
90
|
+
# req.on_error { |r|
|
91
|
+
# puts r
|
92
|
+
# }
|
93
|
+
# }
|
94
|
+
#
|
95
|
+
# @example setup request context and do wait().
|
96
|
+
# Note that callbacks are
|
97
|
+
# rpc.request('endpoint1', 'func1', xxxx) { |req|
|
98
|
+
# # send new attribute
|
99
|
+
# req.request[:xxxx] = "sdfsdf"
|
100
|
+
# # returns synchronized RequestContext to block caller.
|
101
|
+
# req.synchronize
|
102
|
+
# }.wait # request() get back the altered RequestCotenxt that has wait().
|
103
|
+
#
|
104
|
+
# @example Create async oneshot call. (do not expect response)
|
105
|
+
# rpc.request('endpoint1', 'func1') { |req|
|
106
|
+
# req.oneshot = true
|
107
|
+
# }
|
108
|
+
def request(endpoint, command, *args, &blk)
|
109
|
+
req = RequestContext.new(endpoint, command, args)
|
110
|
+
# the block is to setup the request context prior to sending.
|
111
|
+
if blk
|
112
|
+
# async
|
113
|
+
r = blk.call(req)
|
114
|
+
req = r if r.is_a?(RequestContext)
|
115
|
+
if req.oneshot
|
116
|
+
send_request(req)
|
117
|
+
else
|
118
|
+
check_endpoint(endpoint) { |result|
|
119
|
+
if result
|
120
|
+
send_request(req)
|
121
|
+
else
|
122
|
+
e = UnknownEndpointError.new(endpoint)
|
123
|
+
req.error_cb.call(e) if req.error_cb
|
124
|
+
end
|
125
|
+
}
|
126
|
+
end
|
127
|
+
|
128
|
+
req
|
129
|
+
else
|
130
|
+
# sync
|
131
|
+
req = req.synchronize
|
132
|
+
check_endpoint(endpoint) || raise(UnknownEndpointError, endpoint)
|
133
|
+
send_request(req)
|
134
|
+
req.wait
|
135
|
+
end
|
136
|
+
end
|
137
|
+
|
138
|
+
# Register a new RPC endpoint.
|
139
|
+
#
|
140
|
+
# This method works in sync mode if called at non-EM reactor thread.
|
141
|
+
# @param [String] endpoint
|
142
|
+
# @param [has call() method] app
|
143
|
+
# @param [Hash] opts
|
144
|
+
def register_endpoint(endpoint, app, opts={})
|
145
|
+
raise TypeError unless app.respond_to?(:call)
|
146
|
+
opts = {:exclusive=>true}.merge(opts)
|
147
|
+
@endpoints[endpoint]={:app=>app, :opts=>opts}
|
148
|
+
|
149
|
+
# create receive queue for new RPC endpoint.
|
150
|
+
endpoint_proc = proc { |header, data|
|
151
|
+
|
152
|
+
data = Serializer.instance.unmarshal(data)
|
153
|
+
event.publish('rpc/request_received', :args=>[header.message_id])
|
154
|
+
|
155
|
+
resctx = if data[:oneshot]
|
156
|
+
OneshotResponseContext.new(self.node, header)
|
157
|
+
else
|
158
|
+
ResponseContext.new(self.node, header)
|
159
|
+
end
|
160
|
+
begin
|
161
|
+
req = Rack::Request.new({:sender=>header.reply_to['command-recv.'.size..-1],
|
162
|
+
:message_id=>header.message_id
|
163
|
+
}.merge(data))
|
164
|
+
res = Rack::Response.new(resctx)
|
165
|
+
ret = app.call(req, res)
|
166
|
+
rescue Exception => e
|
167
|
+
logger.error(e)
|
168
|
+
resctx.response(e) unless resctx.responded?
|
169
|
+
end
|
170
|
+
}
|
171
|
+
|
172
|
+
setup_proc = proc {
|
173
|
+
amq.queue(endpoint_queue_name(endpoint), {:exclusive=>false, :auto_delete=>true}).bind(
|
174
|
+
AMQP_EXCHANGE, {:key=>endpoint_queue_name(endpoint)}
|
175
|
+
).subscribe(:ack=>true, &endpoint_proc)
|
176
|
+
event.publish('rpc/register', :args=>[endpoint])
|
177
|
+
}
|
178
|
+
|
179
|
+
dm = Util::DeferedMsg.new(30)
|
180
|
+
|
181
|
+
EventMachine.schedule {
|
182
|
+
amq.queue(endpoint_queue_name(endpoint), {:exclusive=>false, :auto_delete=>true}).status { |messages, consumers|
|
183
|
+
if opts[:exclusive]
|
184
|
+
if consumers.to_i == 0
|
185
|
+
setup_proc.call
|
186
|
+
dm.success
|
187
|
+
else
|
188
|
+
dm.error(DuplicateEndpointError.new("Endpoint is already locked: #{endpoint}"))
|
189
|
+
end
|
190
|
+
else
|
191
|
+
setup_proc.call
|
192
|
+
dm.success
|
193
|
+
end
|
194
|
+
|
195
|
+
# expect to raise DuplicateEndpointError if endpoint exists.
|
196
|
+
# ignore the case of success.
|
197
|
+
dm.wait
|
198
|
+
}
|
199
|
+
}
|
200
|
+
|
201
|
+
dm.wait unless EventMachine.reactor_thread?
|
202
|
+
end
|
203
|
+
|
204
|
+
# Unregister endpoint.
|
205
|
+
#
|
206
|
+
# @param [String] endpoint endpoint name to be removed
|
207
|
+
def unregister_endpoint(endpoint)
|
208
|
+
if @endpoints.delete(endpoint)
|
209
|
+
dm = Util::DeferedMsg.new(30)
|
210
|
+
EventMachine.schedule {
|
211
|
+
amq.queue(endpoint_queue_name(endpoint)).delete
|
212
|
+
event.publish('rpc/unregister', :args=>[endpoint])
|
213
|
+
dm.success
|
214
|
+
}
|
215
|
+
dm.wait unless EventMachine.reactor_thread?
|
216
|
+
end
|
217
|
+
end
|
218
|
+
|
219
|
+
# Check if the endpoint exists.
|
220
|
+
# @param [String] endpoint endpoint name to be checked
|
221
|
+
def check_endpoint(endpoint, &blk)
|
222
|
+
if blk
|
223
|
+
else
|
224
|
+
dm = Util::DeferedMsg.new(30)
|
225
|
+
end
|
226
|
+
|
227
|
+
EventMachine.schedule {
|
228
|
+
amq.queue(endpoint_queue_name(endpoint), {:exclusive=>false, :auto_delete=>true}).status { |messages, consumers|
|
229
|
+
res = consumers.to_i > 0
|
230
|
+
if blk
|
231
|
+
blk.call(res)
|
232
|
+
else
|
233
|
+
dm.success(res)
|
234
|
+
end
|
235
|
+
}
|
236
|
+
}
|
237
|
+
|
238
|
+
if blk
|
239
|
+
else
|
240
|
+
dm.wait unless EventMachine.reactor_thread?
|
241
|
+
end
|
242
|
+
end
|
243
|
+
|
244
|
+
private
|
245
|
+
def endpoint_queue_name(ns)
|
246
|
+
"isono.rpc.endpoint.#{ns}"
|
247
|
+
end
|
248
|
+
|
249
|
+
def event
|
250
|
+
@event ||= EventChannel.new(node)
|
251
|
+
end
|
252
|
+
|
253
|
+
# Publish a RPC request asynchronously.
|
254
|
+
# @param [RequestContext] req Request context object to be
|
255
|
+
# sent. If the context's state is not :init, it will fail.
|
256
|
+
def send_request(req)
|
257
|
+
raise TypeError if !req.is_a?(RequestContext)
|
258
|
+
raise "Request context seems to be sent already: #{req.state}" if req.state != :init
|
259
|
+
|
260
|
+
# possible timeout_sec values:
|
261
|
+
# timeout_sec == -1.0 : to be overwritten to the default timeout.
|
262
|
+
# timeout_sec == 0.0 : never be timed out.
|
263
|
+
# timeout_sec > 0.0 : wait for the user set timeout.
|
264
|
+
if req.timeout_sec == -1.0
|
265
|
+
# set default timeout if no one updated the initial value.
|
266
|
+
req.timeout_sec = config_section.timeout_sec
|
267
|
+
end
|
268
|
+
|
269
|
+
if req.timeout_sec > 0.0
|
270
|
+
# register the timeout hook.
|
271
|
+
req.timer = EventMachine::Timer.new(req.timeout_sec) {
|
272
|
+
@active_requests.delete req.ticket
|
273
|
+
req.error_cb.call(:timeout) if req.error_cb
|
274
|
+
}
|
275
|
+
end
|
276
|
+
|
277
|
+
req.process_event(:on_ready)
|
278
|
+
|
279
|
+
EventMachine.schedule {
|
280
|
+
if !req.oneshot
|
281
|
+
@active_requests[req.ticket] = req
|
282
|
+
end
|
283
|
+
|
284
|
+
amq.direct(AMQP_EXCHANGE).publish(
|
285
|
+
Serializer.instance.marshal(req.request_hash),
|
286
|
+
{:message_id => req.ticket,
|
287
|
+
:key => endpoint_queue_name(req.endpoint),
|
288
|
+
:reply_to=>"command-recv.#{manifest.node_id}"}
|
289
|
+
)
|
290
|
+
req.process_event(:on_sent)
|
291
|
+
event.publish('rpc/request_sent', :args=>[req.hash])
|
292
|
+
}
|
293
|
+
end
|
294
|
+
|
295
|
+
class ResponseContext
|
296
|
+
attr_reader :node, :header
|
297
|
+
|
298
|
+
def initialize(node, header)
|
299
|
+
@responded = false
|
300
|
+
@node = node
|
301
|
+
@header = header
|
302
|
+
end
|
303
|
+
|
304
|
+
def responded?
|
305
|
+
@responded
|
306
|
+
end
|
307
|
+
|
308
|
+
def progress(ret)
|
309
|
+
EM.schedule {
|
310
|
+
publish(:inprogress, ret)
|
311
|
+
}
|
312
|
+
end
|
313
|
+
|
314
|
+
def response(ret)
|
315
|
+
raise "" if @responded
|
316
|
+
|
317
|
+
EM.schedule {
|
318
|
+
@header.ack
|
319
|
+
if ret.is_a? Exception
|
320
|
+
publish(:error, {:message=> ret.message, :error_type => ret.class.to_s})
|
321
|
+
else
|
322
|
+
publish(:success, ret)
|
323
|
+
end
|
324
|
+
EventChannel.new(@node).publish('rpc/response_sent', :args=>[@header.message_id])
|
325
|
+
}
|
326
|
+
@responded = true
|
327
|
+
end
|
328
|
+
|
329
|
+
|
330
|
+
private
|
331
|
+
def publish(type, body)
|
332
|
+
@node.amq.direct('').publish(Serializer.instance.marshal({:type=>type, :msg=>body}),
|
333
|
+
{:key=>@header.reply_to,
|
334
|
+
:message_id=>@header.message_id}
|
335
|
+
)
|
336
|
+
end
|
337
|
+
end
|
338
|
+
|
339
|
+
# Do nothing when the endpoint trys to send back in case of
|
340
|
+
# oneshot request.
|
341
|
+
class OneshotResponseContext < ResponseContext
|
342
|
+
def progress(ret)
|
343
|
+
end
|
344
|
+
|
345
|
+
def response(ret)
|
346
|
+
raise "" if @responded
|
347
|
+
|
348
|
+
EM.schedule {
|
349
|
+
@header.ack
|
350
|
+
EventChannel.new(@node).publish('rpc/response_sent', :args=>[@header.message_id])
|
351
|
+
}
|
352
|
+
@responded = true
|
353
|
+
end
|
354
|
+
end
|
355
|
+
|
356
|
+
class RequestContext < OpenStruct
|
357
|
+
# They are not to be appeared in @table so that won't be inspect().
|
358
|
+
attr_reader :error_cb, :success_cb, :progress_cb
|
359
|
+
attr_accessor :timer
|
360
|
+
|
361
|
+
def initialize(endpoint, command, args)
|
362
|
+
super({:request=>{
|
363
|
+
:endpoint=> endpoint,
|
364
|
+
:command => command,
|
365
|
+
:args => args
|
366
|
+
},
|
367
|
+
:endpoint=> endpoint,
|
368
|
+
:command => command,
|
369
|
+
:ticket => Util.gen_id,
|
370
|
+
:timeout_sec => -1.0,
|
371
|
+
:oneshot => false,
|
372
|
+
:sent_at => nil,
|
373
|
+
:completed_at => nil,
|
374
|
+
:complete_status => nil,
|
375
|
+
})
|
376
|
+
|
377
|
+
@success_cb = nil
|
378
|
+
@progress_cb = nil
|
379
|
+
@error_cb = nil
|
380
|
+
@timer = nil
|
381
|
+
|
382
|
+
@stm = Statemachine.build {
|
383
|
+
trans :init, :on_ready, :ready
|
384
|
+
trans :ready, :on_sent, :waiting, proc {
|
385
|
+
self.sent_at=Time.now
|
386
|
+
# freeze request hash not to be modified after sending.
|
387
|
+
self.request.freeze
|
388
|
+
}
|
389
|
+
trans :waiting, :on_received, :waiting
|
390
|
+
trans :waiting, :on_error, :done, proc {
|
391
|
+
self.completed_at=Time.now
|
392
|
+
@timer.cancel if @timer
|
393
|
+
self.complete_status = :fail
|
394
|
+
}
|
395
|
+
trans :waiting, :on_success, :done, proc {
|
396
|
+
self.completed_at=Time.now
|
397
|
+
@timer.cancel if @timer
|
398
|
+
self.complete_status = :success
|
399
|
+
}
|
400
|
+
}
|
401
|
+
@stm.context = self
|
402
|
+
end
|
403
|
+
|
404
|
+
def state
|
405
|
+
@stm.state
|
406
|
+
end
|
407
|
+
|
408
|
+
def process_event(ev, *args)
|
409
|
+
@stm.process_event(ev, *args)
|
410
|
+
end
|
411
|
+
|
412
|
+
def elapsed_time
|
413
|
+
self.completed_at.nil? ? nil : (self.completed_at - self.sent_at)
|
414
|
+
end
|
415
|
+
|
416
|
+
def hash
|
417
|
+
# state, sent_at received_at are readonly values so they are
|
418
|
+
# not pushed in @table.
|
419
|
+
@table.dup.merge({:state=>self.state})
|
420
|
+
end
|
421
|
+
|
422
|
+
def request_hash
|
423
|
+
request.merge({:oneshot=>oneshot})
|
424
|
+
end
|
425
|
+
|
426
|
+
def on_success(&blk)
|
427
|
+
raise ArgumentError unless blk
|
428
|
+
@success_cb = blk
|
429
|
+
end
|
430
|
+
|
431
|
+
def on_progress(&blk)
|
432
|
+
raise ArgumentError unless blk
|
433
|
+
@progress_cb = blk
|
434
|
+
end
|
435
|
+
|
436
|
+
def on_error(&blk)
|
437
|
+
raise ArgumentError unless blk
|
438
|
+
@error_cb = blk
|
439
|
+
end
|
440
|
+
|
441
|
+
def synchronize
|
442
|
+
self.extend RequestSynchronize
|
443
|
+
self
|
444
|
+
end
|
445
|
+
|
446
|
+
module RequestSynchronize
|
447
|
+
def self.extended(mod)
|
448
|
+
raise TypeError, "This module is applicable only for RequestContext" unless mod.is_a?(RequestContext)
|
449
|
+
# overwrite callbacks
|
450
|
+
mod.instance_eval {
|
451
|
+
@q = ::Queue.new
|
452
|
+
|
453
|
+
on_success { |r|
|
454
|
+
@q << [:success, r]
|
455
|
+
}
|
456
|
+
on_error { |r|
|
457
|
+
@q << [:error, r]
|
458
|
+
}
|
459
|
+
}
|
460
|
+
end
|
461
|
+
|
462
|
+
public
|
463
|
+
def wait()
|
464
|
+
raise "response was received already." if state == :done
|
465
|
+
raise "wait() has to be called at outside of the EventMachine's main loop." if EventMachine.reactor_thread?
|
466
|
+
|
467
|
+
r = @q.deq
|
468
|
+
|
469
|
+
case r[0]
|
470
|
+
when :success
|
471
|
+
r[1]
|
472
|
+
when :error
|
473
|
+
raise RpcError, r[1]
|
474
|
+
end
|
475
|
+
end
|
476
|
+
end
|
477
|
+
|
478
|
+
end
|
479
|
+
|
480
|
+
end
|
481
|
+
end
|
482
|
+
end
|