donkey 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- data/LICENSE +20 -0
- data/README.textile +162 -0
- data/Rakefile +57 -0
- data/VERSION.yml +4 -0
- data/lib/ass.rb +125 -0
- data/lib/ass/actor.rb +50 -0
- data/lib/ass/amqp.rb +19 -0
- data/lib/ass/callback_factory.rb +95 -0
- data/lib/ass/client.rb +19 -0
- data/lib/ass/peeper.rb +38 -0
- data/lib/ass/rpc.rb +180 -0
- data/lib/ass/serializers.rb +26 -0
- data/lib/ass/server.rb +181 -0
- data/lib/ass/topic.rb +90 -0
- data/spec/actor_spec.rb +83 -0
- data/spec/ass_spec.rb +425 -0
- data/spec/client_spec.rb +50 -0
- data/spec/rpc_spec.rb +73 -0
- data/test/ass_test.rb +7 -0
- data/test/test_helper.rb +10 -0
- metadata +85 -0
data/lib/ass/client.rb
ADDED
@@ -0,0 +1,19 @@
|
|
1
|
+
class ASS::Client
|
2
|
+
def initialize(opts={})
|
3
|
+
@rpc_opts = opts
|
4
|
+
end
|
5
|
+
|
6
|
+
def rpc
|
7
|
+
# should lazy start the RPC server
|
8
|
+
@rpc ||= ASS.rpc(@rpc_opts)
|
9
|
+
end
|
10
|
+
|
11
|
+
def cast(name,method,data,opts={},meta=nil)
|
12
|
+
ASS.cast(name,method,data,opts,meta)
|
13
|
+
end
|
14
|
+
|
15
|
+
# makes synchronized call through ASS::RPC
|
16
|
+
def call(name,method,data,opts={},meta=nil)
|
17
|
+
rpc.call(name,method,data,opts,meta)
|
18
|
+
end
|
19
|
+
end
|
data/lib/ass/peeper.rb
ADDED
@@ -0,0 +1,38 @@
|
|
1
|
+
# TODO should prolly have the option of using
|
2
|
+
# non auto-delete queues. This would be useful
|
3
|
+
# for logger. Maybe if a peeper name is given,
|
4
|
+
# then create queues with options.
|
5
|
+
class Peeper
|
6
|
+
include Callback
|
7
|
+
attr_reader :server_name
|
8
|
+
def initialize(server_name,callback)
|
9
|
+
@server_name = server_name
|
10
|
+
@clients = {}
|
11
|
+
@callback = build_callback(callback)
|
12
|
+
|
13
|
+
uid = "#{@server_name}.peeper.#{rand 999_999_999_999}"
|
14
|
+
q = MQ.queue uid, :auto_delete => true
|
15
|
+
q.bind(@server_name) # messages to the server would be duplicated here.
|
16
|
+
q.subscribe { |info,payload|
|
17
|
+
payload = ::Marshal.load(payload)
|
18
|
+
# sets context, but doesn't make the call
|
19
|
+
obj = prepare_callback(@callback,info,payload)
|
20
|
+
# there is a specific method we want to call.
|
21
|
+
obj.server(payload[:method],payload[:data])
|
22
|
+
|
23
|
+
# bind to peep client message queue if we've not seen it before.
|
24
|
+
unless @clients.has_key? info.routing_key
|
25
|
+
@clients[info.routing_key] = true
|
26
|
+
client_q = MQ.queue "#{uid}--#{info.routing_key}",
|
27
|
+
:auto_delete => true
|
28
|
+
# messages to the client would be duplicated here.
|
29
|
+
client_q.bind("#{server_name}--", :routing_key => info.routing_key)
|
30
|
+
client_q.subscribe { |info,payload|
|
31
|
+
payload = ::Marshal.load(payload)
|
32
|
+
obj = prepare_callback(@callback,info,payload)
|
33
|
+
obj.client(payload[:method],payload[:data])
|
34
|
+
}
|
35
|
+
end
|
36
|
+
}
|
37
|
+
end
|
38
|
+
end
|
data/lib/ass/rpc.rb
ADDED
@@ -0,0 +1,180 @@
|
|
1
|
+
# A RPC client is a transient entity that dies
|
2
|
+
# with the process that created it. Its purpose
|
3
|
+
# is only to provide a synchronized interface to
|
4
|
+
# the asynchronous services.
|
5
|
+
require 'thread'
|
6
|
+
require 'monitor'
|
7
|
+
class ASS::RPC
|
8
|
+
# stolen from nanite
|
9
|
+
def self.random_id
|
10
|
+
values = [
|
11
|
+
rand(0x0010000),
|
12
|
+
rand(0x0010000),
|
13
|
+
rand(0x0010000),
|
14
|
+
rand(0x0010000),
|
15
|
+
rand(0x0010000),
|
16
|
+
rand(0x1000000),
|
17
|
+
rand(0x1000000),
|
18
|
+
]
|
19
|
+
"%04x%04x%04x%04x%04x%06x%06x" % values
|
20
|
+
end
|
21
|
+
|
22
|
+
class Future
|
23
|
+
# TODO set meta
|
24
|
+
attr_reader :message_id
|
25
|
+
attr_accessor :header, :data, :method, :meta
|
26
|
+
attr_accessor :timeout
|
27
|
+
def initialize(rpc,message_id)
|
28
|
+
@message_id = message_id
|
29
|
+
@rpc = rpc
|
30
|
+
@timeout = false
|
31
|
+
@done = false
|
32
|
+
end
|
33
|
+
|
34
|
+
def wait(timeout=nil,&block)
|
35
|
+
@rpc.wait(self,timeout,&block) # synchronous call that will block
|
36
|
+
end
|
37
|
+
|
38
|
+
def done!
|
39
|
+
@done = true
|
40
|
+
end
|
41
|
+
|
42
|
+
def done?
|
43
|
+
@done
|
44
|
+
end
|
45
|
+
|
46
|
+
def timeout?
|
47
|
+
@timeout
|
48
|
+
end
|
49
|
+
|
50
|
+
def inspect
|
51
|
+
"#<#{self.class} #{message_id}>"
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
attr_reader :name
|
56
|
+
attr_reader :buffer, :futures, :ready
|
57
|
+
def initialize(opts={})
|
58
|
+
raise "can't run rpc client in the same thread as eventmachine" if EM.reactor_thread?
|
59
|
+
self.extend(MonitorMixin)
|
60
|
+
@seq = 0
|
61
|
+
# queue is used be used to synchronize RPC
|
62
|
+
# user thread and the AMQP eventmachine thread.
|
63
|
+
@buffer = Queue.new
|
64
|
+
@ready = {} # the ready results not yet waited
|
65
|
+
@futures = {} # all futures not yet waited for.
|
66
|
+
# Creates an exclusive queue to serve the RPC client.
|
67
|
+
@rpc_id = ASS::RPC.random_id.to_s
|
68
|
+
buffer = @buffer # closure binding for reactor
|
69
|
+
exchange = ASS.mq.direct("__rpc__")
|
70
|
+
@name = "__rpc__#{@rpc_id}"
|
71
|
+
queue = ASS.mq.queue(@name,
|
72
|
+
:exclusive => true,
|
73
|
+
:auto_delete => true)
|
74
|
+
queue.bind("__rpc__",:routing_key => @rpc_id)
|
75
|
+
queue.subscribe { |header,payload|
|
76
|
+
payload = ::Marshal.load(payload)
|
77
|
+
buffer << [header,payload]
|
78
|
+
}
|
79
|
+
end
|
80
|
+
|
81
|
+
def call(server_name,method,data=nil,opts={},meta=nil)
|
82
|
+
self.synchronize do
|
83
|
+
message_id = @seq.to_s # message gotta be unique for this RPC client.
|
84
|
+
# by default route message to the exchange @name@, with routing key @name@
|
85
|
+
ASS.call(server_name,
|
86
|
+
method,
|
87
|
+
data,
|
88
|
+
# can't override these options
|
89
|
+
opts.merge(:message_id => message_id,
|
90
|
+
:reply_to => "__rpc__",
|
91
|
+
:key => @rpc_id),
|
92
|
+
meta)
|
93
|
+
@seq += 1
|
94
|
+
@futures[message_id] = Future.new(self,message_id)
|
95
|
+
end
|
96
|
+
end
|
97
|
+
|
98
|
+
# the idea is to block on a synchronized queue
|
99
|
+
# until we get the future we want.
|
100
|
+
#
|
101
|
+
# WARNING: blocks forever if the thread
|
102
|
+
# calling wait is the same as the EventMachine
|
103
|
+
# thread.
|
104
|
+
#
|
105
|
+
# It is safe (btw) to use the RPC client within
|
106
|
+
# an ASS server/actor, because the wait is in an
|
107
|
+
# EM worker thread, rather than the EM thread
|
108
|
+
# itself. The EM thread is still free to process
|
109
|
+
# the queue. CAVEAT: you could run out of EM
|
110
|
+
# worker threads.
|
111
|
+
def wait(future,timeout=nil)
|
112
|
+
return future.data if future.done? # future was waited before
|
113
|
+
# we can have more fine grained synchronization later.
|
114
|
+
## easiest thing to do (later) is use threadsafe hash for @futures and @ready.
|
115
|
+
### But it's actually trickier than
|
116
|
+
### that. Before each @buffer.pop, a thread
|
117
|
+
### has to check again if it sees the result
|
118
|
+
### in @ready.
|
119
|
+
self.synchronize do
|
120
|
+
timer = nil
|
121
|
+
if timeout
|
122
|
+
timer = EM.add_timer(timeout) {
|
123
|
+
@buffer << [:timeout,future.message_id]
|
124
|
+
}
|
125
|
+
end
|
126
|
+
ready_future = nil
|
127
|
+
if @ready.has_key? future.message_id
|
128
|
+
@ready.delete future.message_id
|
129
|
+
ready_future = future
|
130
|
+
else
|
131
|
+
while true
|
132
|
+
header,payload = @buffer.pop # synchronize. like erlang's mailbox select.
|
133
|
+
if header == :timeout # timeout the future we are waiting for.
|
134
|
+
message_id = payload
|
135
|
+
# if we got a timeout from previous wait. throw it away.
|
136
|
+
next if future.message_id != message_id
|
137
|
+
future.timeout = true
|
138
|
+
future.done!
|
139
|
+
@futures.delete future.message_id
|
140
|
+
return yield # return the value of timeout block
|
141
|
+
end
|
142
|
+
data = payload["data"]
|
143
|
+
some_future = @futures[header.message_id]
|
144
|
+
# If we didn't find the future among the
|
145
|
+
# future, it must have timedout. Just
|
146
|
+
# throw result away and keep processing.
|
147
|
+
next unless some_future
|
148
|
+
some_future.timeout = false
|
149
|
+
some_future.header = header
|
150
|
+
some_future.data = data
|
151
|
+
some_future.method = payload["method"]
|
152
|
+
some_future.meta = payload["meta"]
|
153
|
+
if some_future == future
|
154
|
+
# The future we are waiting for
|
155
|
+
EM.cancel_timer(timer) if timer
|
156
|
+
ready_future = future
|
157
|
+
break
|
158
|
+
else
|
159
|
+
# Ready, but we are not waiting for it. Save for later.
|
160
|
+
@ready[some_future.message_id] = some_future
|
161
|
+
end
|
162
|
+
end
|
163
|
+
end
|
164
|
+
ready_future.done!
|
165
|
+
@futures.delete ready_future.message_id
|
166
|
+
return ready_future.data
|
167
|
+
end
|
168
|
+
|
169
|
+
end
|
170
|
+
|
171
|
+
def waitall
|
172
|
+
@futures.values.map { |k,v|
|
173
|
+
wait(v)
|
174
|
+
}
|
175
|
+
end
|
176
|
+
|
177
|
+
def inspect
|
178
|
+
"#<#{self.class} #{self.name}>"
|
179
|
+
end
|
180
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
module ASS
|
2
|
+
module JSON
|
3
|
+
require 'json'
|
4
|
+
def self.load(raw)
|
5
|
+
JSON.parse(raw)
|
6
|
+
end
|
7
|
+
|
8
|
+
def self.dump(obj)
|
9
|
+
obj.to_json
|
10
|
+
end
|
11
|
+
end
|
12
|
+
|
13
|
+
module Marshal
|
14
|
+
def self.load(raw)
|
15
|
+
::Marshal.load(raw)
|
16
|
+
end
|
17
|
+
|
18
|
+
def self.dump(obj)
|
19
|
+
::Marshal.dump(obj)
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
# mongodb BSON
|
24
|
+
module BSON
|
25
|
+
end
|
26
|
+
end
|
data/lib/ass/server.rb
ADDED
@@ -0,0 +1,181 @@
|
|
1
|
+
class ASS::Server
|
2
|
+
attr_reader :name
|
3
|
+
|
4
|
+
def initialize(name,opts={})
|
5
|
+
@name = name
|
6
|
+
# the server is a fanout (ignores routing key)
|
7
|
+
@exchange = ASS.mq.fanout(name,opts)
|
8
|
+
end
|
9
|
+
|
10
|
+
def exchange
|
11
|
+
@exchange
|
12
|
+
end
|
13
|
+
|
14
|
+
def queue(opts={})
|
15
|
+
unless @queue
|
16
|
+
@queue ||= ASS.mq.queue(self.name,opts)
|
17
|
+
@queue.bind(self.exchange)
|
18
|
+
end
|
19
|
+
self
|
20
|
+
end
|
21
|
+
|
22
|
+
# takes options available to MQ::Queue# takes options available to MQ::Queue#subscribe
|
23
|
+
def react(_callback=nil,_opts=nil,&_block)
|
24
|
+
if _block
|
25
|
+
_opts = _callback
|
26
|
+
_callback = _block
|
27
|
+
end
|
28
|
+
_opts = {} if _opts.nil?
|
29
|
+
|
30
|
+
# second call would just swap out the callback.
|
31
|
+
@factory = ASS::CallbackFactory.new(_callback)
|
32
|
+
|
33
|
+
return(self) if @subscribed
|
34
|
+
@subscribed = true
|
35
|
+
@ack = _opts[:ack]
|
36
|
+
self.queue unless @queue
|
37
|
+
|
38
|
+
# yikes!! potential for scary bugs
|
39
|
+
@queue.subscribe(_opts) do |info,payload|
|
40
|
+
payload = ASS.serializer.load(payload)
|
41
|
+
#p [info,payload]
|
42
|
+
callback_object = @factory.callback_for(self,info,payload)
|
43
|
+
proc { #|callback_object=prepare_callback(@callback,info,payload)|
|
44
|
+
operation = proc {
|
45
|
+
with_handlers do
|
46
|
+
callback_object.send(:on_call,payload["data"])
|
47
|
+
end
|
48
|
+
}
|
49
|
+
done = proc { |result|
|
50
|
+
# the client MUST exist, otherwise it's an error.
|
51
|
+
## FIXME it's bad if the server dies b/c
|
52
|
+
## the client isn't there. It's bad that
|
53
|
+
## this can cause the server to fail.
|
54
|
+
##
|
55
|
+
## I am not sure what happens if message
|
56
|
+
## is unroutable. I think it's just
|
57
|
+
## silently dropped unless the mandatory
|
58
|
+
## option is given.
|
59
|
+
case status = result[0]
|
60
|
+
when :ok
|
61
|
+
if info.reply_to
|
62
|
+
data = result[1]
|
63
|
+
# respond with cast (we don't want
|
64
|
+
# to get a response to our response,
|
65
|
+
# then respond to the response of
|
66
|
+
# this response, and so on.)
|
67
|
+
ASS.cast(info.reply_to,
|
68
|
+
payload["method"],
|
69
|
+
data, {
|
70
|
+
:routing_key => info.routing_key,
|
71
|
+
:message_id => info.message_id},
|
72
|
+
payload["meta"])
|
73
|
+
end
|
74
|
+
info.ack if @ack
|
75
|
+
when :resend
|
76
|
+
# resend the same message
|
77
|
+
ASS.call(self.name,
|
78
|
+
payload["method"],
|
79
|
+
payload["data"], {
|
80
|
+
:reply_to => info.reply_to, # this could be nil for cast
|
81
|
+
:routing_key => info.routing_key,
|
82
|
+
:message_id => info.message_id},
|
83
|
+
payload["meta"])
|
84
|
+
info.ack if @ack
|
85
|
+
when :discard
|
86
|
+
# no response back to client
|
87
|
+
info.ack if @ack
|
88
|
+
when :error
|
89
|
+
# programmatic error. don't ack
|
90
|
+
error = result[1]
|
91
|
+
if callback_object.respond_to?(:on_error)
|
92
|
+
begin
|
93
|
+
callback_object.on_error(error,payload["data"])
|
94
|
+
info.ack if @ack # successful error handling
|
95
|
+
rescue => more_error
|
96
|
+
$stderr.puts more_error
|
97
|
+
$stderr.puts more_error.backtrace
|
98
|
+
ASS.stop
|
99
|
+
end
|
100
|
+
else
|
101
|
+
# unhandled error
|
102
|
+
$stderr.puts error
|
103
|
+
$stderr.puts error.backtrace
|
104
|
+
ASS.stop
|
105
|
+
end
|
106
|
+
# don't ack.
|
107
|
+
end
|
108
|
+
}
|
109
|
+
EM.defer operation, done
|
110
|
+
}.call
|
111
|
+
|
112
|
+
|
113
|
+
end
|
114
|
+
self
|
115
|
+
end
|
116
|
+
|
117
|
+
# unsuscribe from the queue
|
118
|
+
def stop(&block) # allows callback
|
119
|
+
if block
|
120
|
+
@queue.unsubscribe(&block)
|
121
|
+
else
|
122
|
+
@queue.unsubscribe
|
123
|
+
end
|
124
|
+
@subscribed = false
|
125
|
+
end
|
126
|
+
|
127
|
+
def call(name,method,data,opts={},meta=nil)
|
128
|
+
reply_to = opts[:reply_to] || self.name
|
129
|
+
ASS.call(name,
|
130
|
+
method,
|
131
|
+
data,
|
132
|
+
opts.merge(:reply_to => reply_to),
|
133
|
+
meta)
|
134
|
+
|
135
|
+
end
|
136
|
+
|
137
|
+
def cast(name,method,data,opts={},meta=nil)
|
138
|
+
reply_to = nil # the remote server will not reply
|
139
|
+
ASS.call(name,
|
140
|
+
method,
|
141
|
+
data,
|
142
|
+
opts.merge(:reply_to => nil),
|
143
|
+
meta)
|
144
|
+
end
|
145
|
+
|
146
|
+
def inspect
|
147
|
+
"#<#{self.class} #{self.name}>"
|
148
|
+
end
|
149
|
+
|
150
|
+
private
|
151
|
+
|
152
|
+
def with_handlers
|
153
|
+
not_discarded = false
|
154
|
+
not_resent = false
|
155
|
+
not_raised = false
|
156
|
+
result = nil
|
157
|
+
error = nil
|
158
|
+
catch(:__ass_discard) do
|
159
|
+
catch(:__ass_resend) do
|
160
|
+
begin
|
161
|
+
result = yield
|
162
|
+
not_raised = true
|
163
|
+
rescue => e
|
164
|
+
error = e
|
165
|
+
end
|
166
|
+
not_resent = true
|
167
|
+
end
|
168
|
+
not_discarded = true
|
169
|
+
end
|
170
|
+
|
171
|
+
if not_discarded && not_resent && not_raised
|
172
|
+
[:ok,result]
|
173
|
+
elsif not_discarded == false
|
174
|
+
[:discard]
|
175
|
+
elsif not_resent == false
|
176
|
+
[:resend] # resend original payload
|
177
|
+
elsif not_raised == false
|
178
|
+
[:error,error]
|
179
|
+
end
|
180
|
+
end
|
181
|
+
end
|
data/lib/ass/topic.rb
ADDED
@@ -0,0 +1,90 @@
|
|
1
|
+
class ASS::Topic
|
2
|
+
class << self
|
3
|
+
def tunnel(name,opts={})
|
4
|
+
MQ.topic(name,opts)
|
5
|
+
end
|
6
|
+
|
7
|
+
def event(name,key,data,opts={})
|
8
|
+
ASS.dummy_exchange(name).publish(ASS.serializer.dump(data),
|
9
|
+
opts.merge(:routing_key => key))
|
10
|
+
end
|
11
|
+
|
12
|
+
def funnel(tunnel_name,funnel_name,key_matcher,&block)
|
13
|
+
# actor should respond to on_event(key,data)
|
14
|
+
funnel = Funnel.new(tunnel_name,funnel_name,key_matcher)
|
15
|
+
if block
|
16
|
+
funnel.react(&block)
|
17
|
+
end
|
18
|
+
funnel
|
19
|
+
end
|
20
|
+
end
|
21
|
+
|
22
|
+
class Funnel
|
23
|
+
def initialize(tunnel_name,funnel_name,key_matcher)
|
24
|
+
@funnel_name = funnel_name
|
25
|
+
@exchange = ASS.dummy_exchange(tunnel_name)
|
26
|
+
@matcher = key_matcher
|
27
|
+
end
|
28
|
+
|
29
|
+
def queue(opts={})
|
30
|
+
unless @queue
|
31
|
+
@queue = MQ.queue(@funnel_name,opts)
|
32
|
+
@queue.bind(@exchange.name,
|
33
|
+
opts.merge({ :key => @matcher }))
|
34
|
+
end
|
35
|
+
@queue
|
36
|
+
end
|
37
|
+
|
38
|
+
def react(callback=nil,opts={},&block)
|
39
|
+
callback = build_callback(callback || block)
|
40
|
+
me = self
|
41
|
+
self.queue.subscribe(opts) do |info,payload|
|
42
|
+
data = ASS.serializer.load(payload)
|
43
|
+
handler = callback.new
|
44
|
+
work = lambda {
|
45
|
+
begin
|
46
|
+
handler.send(:on_event,info.routing_key,data)
|
47
|
+
rescue => e
|
48
|
+
me.unhandled_error(e)
|
49
|
+
end
|
50
|
+
}
|
51
|
+
done = lambda { |_|
|
52
|
+
# nothing left to do
|
53
|
+
}
|
54
|
+
EM.defer work, done
|
55
|
+
end
|
56
|
+
end
|
57
|
+
|
58
|
+
def unhandled_error(e)
|
59
|
+
$stderr.puts e
|
60
|
+
$stderr.puts e.backtrace
|
61
|
+
ASS.stop
|
62
|
+
raise e
|
63
|
+
end
|
64
|
+
|
65
|
+
def build_callback(callback)
|
66
|
+
c = case callback
|
67
|
+
when Proc
|
68
|
+
Class.new &callback
|
69
|
+
when Class
|
70
|
+
callback
|
71
|
+
when Module
|
72
|
+
Class.new { include callback }
|
73
|
+
else
|
74
|
+
raise "can build topic callback from one of Proc, Class, Module"
|
75
|
+
end
|
76
|
+
raise "must react to on_event" unless c.public_method_defined?(:on_event)
|
77
|
+
c
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
|
82
|
+
# def initialize(name,opts={})
|
83
|
+
# @exchange = MQ.topic(name,opts)
|
84
|
+
# end
|
85
|
+
|
86
|
+
# def publish(key,payload,opts={})
|
87
|
+
# @exchange.publish(::Marshal.dump(payload),opts.merge(:routing_key => key))
|
88
|
+
# end
|
89
|
+
|
90
|
+
end
|