eventbox 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- checksums.yaml.gz.sig +1 -0
- data.tar.gz.sig +0 -0
- data/.appveyor.yml +28 -0
- data/.gitignore +8 -0
- data/.travis.yml +16 -0
- data/.yardopts +9 -0
- data/CODE_OF_CONDUCT.md +74 -0
- data/Gemfile +10 -0
- data/LICENSE.txt +21 -0
- data/README.md +381 -0
- data/Rakefile +14 -0
- data/bin/console +14 -0
- data/bin/setup +8 -0
- data/docs/downloads.md +143 -0
- data/docs/server.md +88 -0
- data/docs/threadpool.md +73 -0
- data/eventbox.gemspec +31 -0
- data/lib/eventbox.rb +270 -0
- data/lib/eventbox/argument_wrapper.rb +76 -0
- data/lib/eventbox/boxable.rb +298 -0
- data/lib/eventbox/event_loop.rb +385 -0
- data/lib/eventbox/object_registry.rb +35 -0
- data/lib/eventbox/sanitizer.rb +342 -0
- data/lib/eventbox/thread_pool.rb +170 -0
- data/lib/eventbox/timer.rb +172 -0
- data/lib/eventbox/version.rb +5 -0
- metadata +156 -0
- metadata.gz.sig +0 -0
@@ -0,0 +1,76 @@
|
|
1
|
+
# frozen-string-literal: true
|
2
|
+
|
3
|
+
class Eventbox
|
4
|
+
module ArgumentWrapper
|
5
|
+
def self.build(method, name)
|
6
|
+
parameters = method.parameters
|
7
|
+
if parameters.find { |t, n| n.to_s.start_with?("€") }
|
8
|
+
|
9
|
+
# Change a Proc object to a Method, so that we are able to differ between :opt and :req parameters.
|
10
|
+
# This is because Ruby (wrongly IMHO) reports required parameters as optional.
|
11
|
+
# The only way to get the true parameter types is through define_method.
|
12
|
+
is_proc = Proc === method
|
13
|
+
if is_proc
|
14
|
+
cl = Class.new do
|
15
|
+
define_method(:to_method, &method)
|
16
|
+
end
|
17
|
+
method = cl.instance_method(:to_method)
|
18
|
+
parameters = method.parameters
|
19
|
+
end
|
20
|
+
|
21
|
+
decls = []
|
22
|
+
convs = []
|
23
|
+
rets = []
|
24
|
+
parameters.each_with_index do |(t, n), i|
|
25
|
+
€var = n.to_s.start_with?("€")
|
26
|
+
case t
|
27
|
+
when :req
|
28
|
+
decls << n
|
29
|
+
if €var
|
30
|
+
convs << "#{n} = WrappedObject.new(#{n}, source_event_loop, :#{n})"
|
31
|
+
end
|
32
|
+
rets << n
|
33
|
+
when :opt
|
34
|
+
decls << "#{n}=nil"
|
35
|
+
if €var
|
36
|
+
convs << "#{n} = #{n} ? WrappedObject.new(#{n}, source_event_loop, :#{n}) : []"
|
37
|
+
end
|
38
|
+
rets << "*#{n}"
|
39
|
+
when :rest
|
40
|
+
decls << "*#{n}"
|
41
|
+
if €var
|
42
|
+
convs << "#{n}.map!{|v| WrappedObject.new(v, source_event_loop, :#{n}) }"
|
43
|
+
end
|
44
|
+
rets << "*#{n}"
|
45
|
+
when :keyreq
|
46
|
+
decls << "#{n}:"
|
47
|
+
if €var
|
48
|
+
convs << "#{n} = WrappedObject.new(#{n}, source_event_loop, :#{n})"
|
49
|
+
end
|
50
|
+
rets << "#{n}: #{n}"
|
51
|
+
when :key
|
52
|
+
decls << "#{n}:nil"
|
53
|
+
if €var
|
54
|
+
convs << "#{n} = #{n} ? {#{n}: WrappedObject.new(#{n}, source_event_loop, :#{n})} : {}"
|
55
|
+
else
|
56
|
+
convs << "#{n} = #{n} ? {#{n}: #{n}} : {}"
|
57
|
+
end
|
58
|
+
rets << "**#{n}"
|
59
|
+
when :keyrest
|
60
|
+
decls << "**#{n}"
|
61
|
+
if €var
|
62
|
+
convs << "#{n}.each{|k, v| #{n}[k] = WrappedObject.new(v, source_event_loop, :#{n}) }"
|
63
|
+
end
|
64
|
+
rets << "**#{n}"
|
65
|
+
when :block
|
66
|
+
if €var
|
67
|
+
raise "block to `#{name}' can't be wrapped"
|
68
|
+
end
|
69
|
+
end
|
70
|
+
end
|
71
|
+
code = "#{is_proc ? :proc : :lambda} do |source_event_loop#{decls.map{|s| ",#{s}"}.join }| # #{name}\n #{convs.join("\n")}\n [#{rets.join(",")}]\nend"
|
72
|
+
instance_eval(code, "wrapper code defined in #{__FILE__}:#{__LINE__} for #{name}")
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
@@ -0,0 +1,298 @@
|
|
1
|
+
# frozen-string-literal: true
|
2
|
+
|
3
|
+
class Eventbox
|
4
|
+
|
5
|
+
# Extend modules with Eventbox method creation functions
|
6
|
+
#
|
7
|
+
# This works like so:
|
8
|
+
#
|
9
|
+
# module MyHelpers
|
10
|
+
# extend Eventbox::Boxable
|
11
|
+
# sync_call def hello
|
12
|
+
# puts "hello!"
|
13
|
+
# end
|
14
|
+
# end
|
15
|
+
#
|
16
|
+
# class MyBox < Eventbox
|
17
|
+
# include MyHelpers
|
18
|
+
# end
|
19
|
+
#
|
20
|
+
# MyBox.new.hello # prints "hello!"
|
21
|
+
#
|
22
|
+
module Boxable
|
23
|
+
private
|
24
|
+
|
25
|
+
# @private
|
26
|
+
def with_block_or_def(name, block, &cexec)
|
27
|
+
alias_method("__#{name}__", name)
|
28
|
+
private("__#{name}__")
|
29
|
+
remove_method(name)
|
30
|
+
define_method(name, &cexec)
|
31
|
+
private name if name == :init
|
32
|
+
name
|
33
|
+
end
|
34
|
+
|
35
|
+
# Define a threadsafe method for asynchronous (fire-and-forget) calls.
|
36
|
+
#
|
37
|
+
# The created method can be safely called from any thread.
|
38
|
+
# All method arguments are passed through the {Sanitizer}.
|
39
|
+
# Arguments prefixed by a € sign are automatically passed as {Eventbox::WrappedObject}.
|
40
|
+
#
|
41
|
+
# The method itself might not do any blocking calls or expensive computations - this would impair responsiveness of the {Eventbox} instance.
|
42
|
+
# Instead use {action} in these cases.
|
43
|
+
#
|
44
|
+
# In contrast to {sync_call} it's not possible to call external blocks or proc objects from {async_call} methods.
|
45
|
+
#
|
46
|
+
# The method always returns +self+ to the caller.
|
47
|
+
def async_call(name, &block)
|
48
|
+
unbound_method = self.instance_method(name)
|
49
|
+
wrapper = ArgumentWrapper.build(unbound_method, name)
|
50
|
+
with_block_or_def(name, block) do |*args, &cb|
|
51
|
+
if @__event_loop__.event_scope?
|
52
|
+
# Use the correct method within the class hierarchy, instead of just self.send(*args).
|
53
|
+
# Otherwise super() would start an infinite recursion.
|
54
|
+
unbound_method.bind(eventbox).call(*args, &cb)
|
55
|
+
else
|
56
|
+
@__event_loop__.async_call(eventbox, name, args, cb, wrapper)
|
57
|
+
end
|
58
|
+
self
|
59
|
+
end
|
60
|
+
end
|
61
|
+
|
62
|
+
# Define a method for synchronous calls.
|
63
|
+
#
|
64
|
+
# The created method can be safely called from any thread.
|
65
|
+
# It is simular to {async_call}, but the method waits until the method body is executed and returns its return value.
|
66
|
+
# Since all processing within the event scope of an {Eventbox} instance must not involve blocking operations, sync calls can only return immediate values.
|
67
|
+
# For deferred results use {yield_call} instead.
|
68
|
+
#
|
69
|
+
# It's possible to call external blocks or proc objects from {sync_call} methods.
|
70
|
+
# Blocks are executed by the same thread that calls the {sync_call} method to that time.
|
71
|
+
#
|
72
|
+
# All method arguments as well as the result value are passed through the {Sanitizer}.
|
73
|
+
# Arguments prefixed by a € sign are automatically passed as {Eventbox::WrappedObject}.
|
74
|
+
#
|
75
|
+
# The method itself might not do any blocking calls or expensive computations - this would impair responsiveness of the {Eventbox} instance.
|
76
|
+
# Instead use {action} in these cases.
|
77
|
+
def sync_call(name, &block)
|
78
|
+
unbound_method = self.instance_method(name)
|
79
|
+
wrapper = ArgumentWrapper.build(unbound_method, name)
|
80
|
+
with_block_or_def(name, block) do |*args, &cb|
|
81
|
+
if @__event_loop__.event_scope?
|
82
|
+
unbound_method.bind(eventbox).call(*args, &cb)
|
83
|
+
else
|
84
|
+
answer_queue = Queue.new
|
85
|
+
sel = @__event_loop__.sync_call(eventbox, name, args, cb, answer_queue, wrapper)
|
86
|
+
@__event_loop__.callback_loop(answer_queue, sel)
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
90
|
+
|
91
|
+
# Define a method for calls with deferred result.
|
92
|
+
#
|
93
|
+
# This call type is simular to {sync_call}, however it's not the result of the method that is returned.
|
94
|
+
# Instead the method is called with one {CompletionProc additional argument} in the event scope, which is used to yield a result value or raise an exception.
|
95
|
+
# In contrast to a +return+ statement, the execution of the method continues after yielding a result.
|
96
|
+
#
|
97
|
+
# The result value can be yielded within the called method, but it can also be stored and called by any other event scope or external method, leading to a deferred method return.
|
98
|
+
# The external thread calling this method is suspended until a result is yielded.
|
99
|
+
# However the Eventbox object keeps responsive to calls from other threads.
|
100
|
+
#
|
101
|
+
# The created method can be safely called from any thread.
|
102
|
+
# If yield methods are called in the event scope, they must get a Proc object as the last argument.
|
103
|
+
# It is called when a result was yielded.
|
104
|
+
#
|
105
|
+
# It's possible to call external blocks or proc objects from {yield_call} methods up to the point when the result was yielded.
|
106
|
+
# Blocks are executed by the same thread that calls the {yield_call} method to that time.
|
107
|
+
#
|
108
|
+
# All method arguments as well as the result value are passed through the {Sanitizer}.
|
109
|
+
# Arguments prefixed by a € sign are automatically passed as {Eventbox::WrappedObject}.
|
110
|
+
#
|
111
|
+
# The method itself as well as the Proc object might not do any blocking calls or expensive computations - this would impair responsiveness of the {Eventbox} instance.
|
112
|
+
# Instead use {action} in these cases.
|
113
|
+
def yield_call(name, &block)
|
114
|
+
unbound_method = self.instance_method(name)
|
115
|
+
wrapper = ArgumentWrapper.build(unbound_method, name)
|
116
|
+
with_block_or_def(name, block) do |*args, **kwargs, &cb|
|
117
|
+
if @__event_loop__.event_scope?
|
118
|
+
@__event_loop__.safe_yield_result(args, name)
|
119
|
+
args << kwargs unless kwargs.empty?
|
120
|
+
unbound_method.bind(eventbox).call(*args, &cb)
|
121
|
+
self
|
122
|
+
else
|
123
|
+
answer_queue = Queue.new
|
124
|
+
sel = @__event_loop__.yield_call(eventbox, name, args, kwargs, cb, answer_queue, wrapper)
|
125
|
+
@__event_loop__.callback_loop(answer_queue, sel)
|
126
|
+
end
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
130
|
+
# Threadsafe write access to instance variables.
|
131
|
+
def attr_writer(name)
|
132
|
+
async_call(define_method("#{name}=") do |value|
|
133
|
+
instance_variable_set("@#{name}", value)
|
134
|
+
end)
|
135
|
+
end
|
136
|
+
|
137
|
+
# Threadsafe read access to instance variables.
|
138
|
+
def attr_reader(name)
|
139
|
+
sync_call(define_method("#{name}") do
|
140
|
+
instance_variable_get("@#{name}")
|
141
|
+
end)
|
142
|
+
end
|
143
|
+
|
144
|
+
# Threadsafe read and write access to instance variables.
|
145
|
+
#
|
146
|
+
# Attention: Be careful with read-modify-write operations - they are *not* atomic but are executed as two independent operations.
|
147
|
+
#
|
148
|
+
# This will lose counter increments, since `counter` is incremented in a non-atomic manner:
|
149
|
+
# attr_accessor :counter
|
150
|
+
# async_call def start
|
151
|
+
# 10.times { do_something }
|
152
|
+
# end
|
153
|
+
# action def do_something
|
154
|
+
# self.counter += 1
|
155
|
+
# end
|
156
|
+
#
|
157
|
+
# Instead don't use accessors but do increments within one method call like so:
|
158
|
+
# async_call def start
|
159
|
+
# 10.times { do_something }
|
160
|
+
# end
|
161
|
+
# action def do_something
|
162
|
+
# increment 1
|
163
|
+
# end
|
164
|
+
# async_call def increment(by)
|
165
|
+
# @counter += by
|
166
|
+
# end
|
167
|
+
def attr_accessor(name)
|
168
|
+
attr_reader name
|
169
|
+
attr_writer name
|
170
|
+
end
|
171
|
+
|
172
|
+
# Define a private method for asynchronous execution.
|
173
|
+
#
|
174
|
+
# The call to the action method returns immediately after starting a new action.
|
175
|
+
# It returns an {Action} object.
|
176
|
+
# By default each call to an action method spawns a new thread which executes the code of the action definition.
|
177
|
+
# Alternatively a threadpool can be assigned by {with_options}.
|
178
|
+
#
|
179
|
+
# All method arguments are passed through the {Sanitizer}.
|
180
|
+
#
|
181
|
+
# Actions can return state changes or objects to the event loop by calls to methods created by {async_call}, {sync_call} or {yield_call} or through calling {async_proc}, {sync_proc} or {yield_proc} objects.
|
182
|
+
# To avoid unsafe shared objects, an action has it's own set of local variables or instance variables.
|
183
|
+
# It doesn't have access to variables defined by other methods.
|
184
|
+
#
|
185
|
+
# The {Action} object can be used to interrupt the program execution by an exception.
|
186
|
+
# See {Eventbox::Action} for further information.
|
187
|
+
# If the action method accepts one more argument than given to the action call, it is set to corresponding {Action} instance:
|
188
|
+
# async_call def init
|
189
|
+
# do_something("value1")
|
190
|
+
# end
|
191
|
+
# action def do_something(str, action)
|
192
|
+
# str # => "value1"
|
193
|
+
# action.current? # => true
|
194
|
+
# # `action' can be passed to event scope or external scope,
|
195
|
+
# # in order to send a signals per Action#raise
|
196
|
+
# end
|
197
|
+
#
|
198
|
+
def action(name, &block)
|
199
|
+
unbound_method = self.instance_method(name)
|
200
|
+
with_block_or_def(name, block) do |*args, &cb|
|
201
|
+
raise InvalidAccess, "action must not be called with a block" if cb
|
202
|
+
|
203
|
+
gc_actions = self.class.eventbox_options[:gc_actions]
|
204
|
+
sandbox = self.class.allocate
|
205
|
+
sandbox.instance_variable_set(:@__event_loop__, @__event_loop__)
|
206
|
+
sandbox.instance_variable_set(:@__eventbox__, gc_actions ? WeakRef.new(self) : self)
|
207
|
+
meth = unbound_method.bind(sandbox)
|
208
|
+
|
209
|
+
if @__event_loop__.event_scope?
|
210
|
+
args = Sanitizer.sanitize_values(args, @__event_loop__, nil)
|
211
|
+
end
|
212
|
+
# Start a new action thread and return an Action instance
|
213
|
+
@__event_loop__.start_action(meth, name, args)
|
214
|
+
end
|
215
|
+
private name
|
216
|
+
name
|
217
|
+
end
|
218
|
+
end
|
219
|
+
|
220
|
+
# An Action object is thin wrapper for a Ruby thread.
|
221
|
+
#
|
222
|
+
# It is returned by {Eventbox#action} and optionally passed as last argument to action methods.
|
223
|
+
# It can be used to interrupt the program execution by an exception.
|
224
|
+
#
|
225
|
+
# However in contrast to ruby's builtin threads, any interruption must be explicit allowed.
|
226
|
+
# Exceptions raised to an action thread are delayed until a code block is reached which explicit allows interruption.
|
227
|
+
# The only exception which is delivered to the action thread by default is {Eventbox::AbortAction}.
|
228
|
+
# It is raised by {Eventbox#shutdown!} and is delivered as soon as a blocking operation is executed.
|
229
|
+
#
|
230
|
+
# An Action object can be used to stop the action while blocking operations.
|
231
|
+
# It should be made sure, that the `rescue` statement is outside of the block to `handle_interrupt`.
|
232
|
+
# Otherwise it could happen, that the rescuing code is interrupted by the signal.
|
233
|
+
# Sending custom signals to an action works like:
|
234
|
+
#
|
235
|
+
# class MySignal < Interrupt
|
236
|
+
# end
|
237
|
+
#
|
238
|
+
# async_call def init
|
239
|
+
# a = start_sleep
|
240
|
+
# a.raise(MySignal)
|
241
|
+
# end
|
242
|
+
#
|
243
|
+
# action def start_sleep
|
244
|
+
# Thread.handle_interrupt(MySignal => :on_blocking) do
|
245
|
+
# sleep
|
246
|
+
# end
|
247
|
+
# rescue MySignal
|
248
|
+
# puts "well-rested"
|
249
|
+
# end
|
250
|
+
class Action
|
251
|
+
attr_reader :name
|
252
|
+
|
253
|
+
def initialize(name, thread, event_loop)
|
254
|
+
@name = name
|
255
|
+
@thread = thread
|
256
|
+
@event_loop = event_loop
|
257
|
+
end
|
258
|
+
|
259
|
+
attr_reader :event_loop
|
260
|
+
private :event_loop
|
261
|
+
|
262
|
+
# Send a signal to the running action.
|
263
|
+
#
|
264
|
+
# The signal must be kind of Exception.
|
265
|
+
# See {Action} about asynchronous delivery of signals.
|
266
|
+
#
|
267
|
+
# This method does nothing if the action is already finished.
|
268
|
+
#
|
269
|
+
# If {raise} is called within the action (#current? returns `true`), all exceptions are delivered immediately.
|
270
|
+
# This happens regardless of the current interrupt mask set by `Thread.handle_interrupt`.
|
271
|
+
def raise(*args)
|
272
|
+
# ignore raise, if sent from the action thread
|
273
|
+
if AbortAction === args[0] || (Module === args[0] && args[0].ancestors.include?(AbortAction))
|
274
|
+
::Kernel.raise InvalidAccess, "Use of Eventbox::AbortAction is not allowed - use Action#abort or a custom exception subclass"
|
275
|
+
end
|
276
|
+
|
277
|
+
if @event_loop.event_scope?
|
278
|
+
args = Sanitizer.sanitize_values(args, @event_loop, nil)
|
279
|
+
end
|
280
|
+
@thread.raise(*args)
|
281
|
+
end
|
282
|
+
|
283
|
+
# Send a AbortAction to the running thread.
|
284
|
+
def abort
|
285
|
+
@thread.raise AbortAction
|
286
|
+
end
|
287
|
+
|
288
|
+
# Belongs the current thread to this action.
|
289
|
+
def current?
|
290
|
+
@thread.respond_to?(:current?) ? @thread.current? : (@thread == Thread.current)
|
291
|
+
end
|
292
|
+
|
293
|
+
# @private
|
294
|
+
def join
|
295
|
+
@thread.join
|
296
|
+
end
|
297
|
+
end
|
298
|
+
end
|
@@ -0,0 +1,385 @@
|
|
1
|
+
# frozen-string-literal: true
|
2
|
+
|
3
|
+
class Eventbox
|
4
|
+
# @private
|
5
|
+
#
|
6
|
+
# This class manages the calls to event scope methods and procs comparable to an event loop.
|
7
|
+
# It doesn't use an explicit event loop, but uses the calling thread to process the event.
|
8
|
+
#
|
9
|
+
# All methods prefixed with "_" requires @mutex acquired to be called.
|
10
|
+
class EventLoop
|
11
|
+
def initialize(threadpool, guard_time)
|
12
|
+
@threadpool = threadpool
|
13
|
+
@running_actions = []
|
14
|
+
@running_actions_for_gc = []
|
15
|
+
@mutex = Mutex.new
|
16
|
+
@shutdown = false
|
17
|
+
@guard_time_proc = case guard_time
|
18
|
+
when NilClass
|
19
|
+
nil
|
20
|
+
when Numeric
|
21
|
+
guard_time and proc do |dt, name|
|
22
|
+
if dt > guard_time
|
23
|
+
ecaller = caller.find{|t| !(t=~/lib\/eventbox(\/|\.rb:)/) }
|
24
|
+
warn "guard time exceeded: #{"%2.3f" % dt} sec (limit is #{guard_time}) in `#{name}' called from `#{ecaller}' - please move blocking tasks to actions"
|
25
|
+
end
|
26
|
+
end
|
27
|
+
when Proc
|
28
|
+
guard_time
|
29
|
+
else
|
30
|
+
raise ArgumentError, "guard_time should be Numeric, Proc or nil"
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
# Abort all running action threads.
|
35
|
+
def send_shutdown(object_id=nil)
|
36
|
+
# warn "shutdown called for object #{object_id} with #{@running_actions.size} threads #{@running_actions.map(&:object_id).join(",")}"
|
37
|
+
|
38
|
+
# The finalizer doesn't allow suspension per Mutex, so that we access a read-only copy of @running_actions.
|
39
|
+
# To avoid race conditions with thread creation, set a flag before the loop.
|
40
|
+
@shutdown = true
|
41
|
+
|
42
|
+
# terminate all running action threads
|
43
|
+
begin
|
44
|
+
@running_actions_for_gc.each(&:abort)
|
45
|
+
rescue ThreadError
|
46
|
+
# ThreadPool requires to lock a mutex, which fails in trap context.
|
47
|
+
# So defer the abort through another thread.
|
48
|
+
Thread.new do
|
49
|
+
@running_actions_for_gc.each(&:abort)
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
nil
|
54
|
+
end
|
55
|
+
|
56
|
+
def shutdown(&completion_block)
|
57
|
+
send_shutdown
|
58
|
+
if event_scope?
|
59
|
+
if completion_block
|
60
|
+
completion_block = new_async_proc(&completion_block)
|
61
|
+
|
62
|
+
# Thread might not be tagged to a calling event scope
|
63
|
+
source_event_loop = Thread.current.thread_variable_get(:__event_loop__)
|
64
|
+
Thread.current.thread_variable_set(:__event_loop__, nil)
|
65
|
+
begin
|
66
|
+
@threadpool.new do
|
67
|
+
@running_actions_for_gc.each(&:join)
|
68
|
+
completion_block.call
|
69
|
+
end
|
70
|
+
ensure
|
71
|
+
Thread.current.thread_variable_set(:__event_loop__, source_event_loop)
|
72
|
+
end
|
73
|
+
end
|
74
|
+
else
|
75
|
+
raise InvalidAccess, "external shutdown call doesn't take a block but blocks until threads have terminated" if completion_block
|
76
|
+
@running_actions_for_gc.each(&:join)
|
77
|
+
end
|
78
|
+
end
|
79
|
+
|
80
|
+
# Make a copy of the thread list for use in shutdown.
|
81
|
+
# The copy is replaced per an atomic operation, so that it can be read lock-free in shutdown.
|
82
|
+
def _update_action_threads_for_gc
|
83
|
+
@running_actions_for_gc = @running_actions.dup
|
84
|
+
end
|
85
|
+
|
86
|
+
# Is the caller running within the event scope context?
|
87
|
+
def event_scope?
|
88
|
+
@mutex.owned?
|
89
|
+
end
|
90
|
+
|
91
|
+
def synchronize_external
|
92
|
+
if event_scope?
|
93
|
+
yield
|
94
|
+
else
|
95
|
+
@mutex.synchronize do
|
96
|
+
yield
|
97
|
+
end
|
98
|
+
end
|
99
|
+
end
|
100
|
+
|
101
|
+
def with_call_frame(name, answer_queue)
|
102
|
+
source_event_loop = Thread.current.thread_variable_get(:__event_loop__)
|
103
|
+
@mutex.lock
|
104
|
+
begin
|
105
|
+
Thread.current.thread_variable_set(:__event_loop__, self)
|
106
|
+
@latest_answer_queue = answer_queue
|
107
|
+
@latest_call_name = name
|
108
|
+
start_time = Time.now
|
109
|
+
yield(source_event_loop)
|
110
|
+
ensure
|
111
|
+
@latest_answer_queue = nil
|
112
|
+
@latest_call_name = nil
|
113
|
+
@mutex.unlock
|
114
|
+
diff_time = Time.now - start_time
|
115
|
+
@guard_time_proc&.call(diff_time, name)
|
116
|
+
Thread.current.thread_variable_set(:__event_loop__, source_event_loop)
|
117
|
+
end
|
118
|
+
source_event_loop
|
119
|
+
end
|
120
|
+
|
121
|
+
def async_call(box, name, args, block, wrapper)
|
122
|
+
with_call_frame(name, nil) do |source_event_loop|
|
123
|
+
args = wrapper.call(source_event_loop, *args) if wrapper
|
124
|
+
args = Sanitizer.sanitize_values(args, source_event_loop, self, name)
|
125
|
+
block = Sanitizer.sanitize_value(block, source_event_loop, self, name)
|
126
|
+
box.send("__#{name}__", *args, &block)
|
127
|
+
end
|
128
|
+
end
|
129
|
+
|
130
|
+
def sync_call(box, name, args, block, answer_queue, wrapper)
|
131
|
+
with_call_frame(name, answer_queue) do |source_event_loop|
|
132
|
+
args = wrapper.call(source_event_loop, *args) if wrapper
|
133
|
+
args = Sanitizer.sanitize_values(args, source_event_loop, self, name)
|
134
|
+
block = Sanitizer.sanitize_value(block, source_event_loop, self, name)
|
135
|
+
res = box.send("__#{name}__", *args, &block)
|
136
|
+
res = Sanitizer.sanitize_value(res, self, source_event_loop)
|
137
|
+
answer_queue << res
|
138
|
+
end
|
139
|
+
end
|
140
|
+
|
141
|
+
def yield_call(box, name, args, kwargs, block, answer_queue, wrapper)
|
142
|
+
with_call_frame(name, answer_queue) do |source_event_loop|
|
143
|
+
args << _completion_proc(answer_queue, name, source_event_loop)
|
144
|
+
args << kwargs unless kwargs.empty?
|
145
|
+
args = wrapper.call(source_event_loop, *args) if wrapper
|
146
|
+
args = Sanitizer.sanitize_values(args, source_event_loop, self, name)
|
147
|
+
block = Sanitizer.sanitize_value(block, source_event_loop, self, name)
|
148
|
+
box.send("__#{name}__", *args, &block)
|
149
|
+
end
|
150
|
+
end
|
151
|
+
|
152
|
+
# Anonymous version of async_call
|
153
|
+
def async_proc_call(pr, args, arg_block, wrapper)
|
154
|
+
with_call_frame(AsyncProc, nil) do |source_event_loop|
|
155
|
+
args = wrapper.call(source_event_loop, *args) if wrapper
|
156
|
+
args = Sanitizer.sanitize_values(args, source_event_loop, self)
|
157
|
+
arg_block = Sanitizer.sanitize_value(arg_block, source_event_loop, self)
|
158
|
+
pr.yield(*args, &arg_block)
|
159
|
+
end
|
160
|
+
end
|
161
|
+
|
162
|
+
# Anonymous version of sync_call
|
163
|
+
def sync_proc_call(pr, args, arg_block, answer_queue, wrapper)
|
164
|
+
with_call_frame(SyncProc, answer_queue) do |source_event_loop|
|
165
|
+
args = wrapper.call(source_event_loop, *args) if wrapper
|
166
|
+
args = Sanitizer.sanitize_values(args, source_event_loop, self)
|
167
|
+
arg_block = Sanitizer.sanitize_value(arg_block, source_event_loop, self)
|
168
|
+
res = pr.yield(*args, &arg_block)
|
169
|
+
res = Sanitizer.sanitize_value(res, self, source_event_loop)
|
170
|
+
answer_queue << res
|
171
|
+
end
|
172
|
+
end
|
173
|
+
|
174
|
+
# Anonymous version of yield_call
|
175
|
+
def yield_proc_call(pr, args, kwargs, arg_block, answer_queue, wrapper)
|
176
|
+
with_call_frame(YieldProc, answer_queue) do |source_event_loop|
|
177
|
+
args << _completion_proc(answer_queue, pr, source_event_loop)
|
178
|
+
args << kwargs unless kwargs.empty?
|
179
|
+
args = wrapper.call(source_event_loop, *args) if wrapper
|
180
|
+
args = Sanitizer.sanitize_values(args, source_event_loop, self)
|
181
|
+
arg_block = Sanitizer.sanitize_value(arg_block, source_event_loop, self)
|
182
|
+
pr.yield(*args, &arg_block)
|
183
|
+
end
|
184
|
+
end
|
185
|
+
|
186
|
+
# Called when an external proc finished
|
187
|
+
def external_proc_result(cbresult, res)
|
188
|
+
with_call_frame(ExternalProc, nil) do
|
189
|
+
cbresult.yield(*res)
|
190
|
+
end
|
191
|
+
end
|
192
|
+
|
193
|
+
def new_async_proc(name=nil, klass=AsyncProc, &block)
|
194
|
+
raise InvalidAccess, "async_proc outside of the event scope is not allowed" unless event_scope?
|
195
|
+
wrapper = ArgumentWrapper.build(block, "async_proc #{name}")
|
196
|
+
pr = klass.new do |*args, &arg_block|
|
197
|
+
if event_scope?
|
198
|
+
# called in the event scope
|
199
|
+
block.yield(*args, &arg_block)
|
200
|
+
else
|
201
|
+
# called externally
|
202
|
+
async_proc_call(block, args, arg_block, wrapper)
|
203
|
+
end
|
204
|
+
pr
|
205
|
+
end
|
206
|
+
end
|
207
|
+
|
208
|
+
def new_sync_proc(name=nil, &block)
|
209
|
+
raise InvalidAccess, "sync_proc outside of the event scope is not allowed" unless event_scope?
|
210
|
+
wrapper = ArgumentWrapper.build(block, "sync_proc #{name}")
|
211
|
+
SyncProc.new do |*args, &arg_block|
|
212
|
+
if event_scope?
|
213
|
+
# called in the event scope
|
214
|
+
block.yield(*args, &arg_block)
|
215
|
+
else
|
216
|
+
# called externally
|
217
|
+
answer_queue = Queue.new
|
218
|
+
sel = sync_proc_call(block, args, arg_block, answer_queue, wrapper)
|
219
|
+
callback_loop(answer_queue, sel)
|
220
|
+
end
|
221
|
+
end
|
222
|
+
end
|
223
|
+
|
224
|
+
def new_yield_proc(name=nil, &block)
|
225
|
+
raise InvalidAccess, "yield_proc outside of the event scope is not allowed" unless event_scope?
|
226
|
+
wrapper = ArgumentWrapper.build(block, "yield_proc #{name}")
|
227
|
+
YieldProc.new do |*args, **kwargs, &arg_block|
|
228
|
+
if event_scope?
|
229
|
+
# called in the event scope
|
230
|
+
safe_yield_result(args, block)
|
231
|
+
args << kwargs unless kwargs.empty?
|
232
|
+
block.yield(*args, &arg_block)
|
233
|
+
nil
|
234
|
+
else
|
235
|
+
# called externally
|
236
|
+
answer_queue = Queue.new
|
237
|
+
sel = yield_proc_call(block, args, kwargs, arg_block, answer_queue, wrapper)
|
238
|
+
callback_loop(answer_queue, sel)
|
239
|
+
end
|
240
|
+
end
|
241
|
+
end
|
242
|
+
|
243
|
+
def safe_yield_result(args, name)
|
244
|
+
complete = args.last
|
245
|
+
unless Proc === complete
|
246
|
+
if Proc === name
|
247
|
+
raise InvalidAccess, "yield_proc #{name.inspect} must be called with a Proc object in the event scope but got #{complete.class}"
|
248
|
+
else
|
249
|
+
raise InvalidAccess, "yield_call `#{name}' must be called with a Proc object in the event scope but got #{complete.class}"
|
250
|
+
end
|
251
|
+
end
|
252
|
+
args[-1] = proc do |*cargs, &cblock|
|
253
|
+
unless complete
|
254
|
+
if Proc === name
|
255
|
+
raise MultipleResults, "received multiple results for #{name.inspect}"
|
256
|
+
else
|
257
|
+
raise MultipleResults, "received multiple results for method `#{name}'"
|
258
|
+
end
|
259
|
+
end
|
260
|
+
res = complete.yield(*cargs, &cblock)
|
261
|
+
complete = nil
|
262
|
+
res
|
263
|
+
end
|
264
|
+
end
|
265
|
+
|
266
|
+
private def _completion_proc(answer_queue, name, source_event_loop)
|
267
|
+
new_async_proc(name, CompletionProc) do |*resu|
|
268
|
+
unless answer_queue
|
269
|
+
if Proc === name
|
270
|
+
raise MultipleResults, "received multiple results for #{name.inspect}"
|
271
|
+
else
|
272
|
+
raise MultipleResults, "received multiple results for method `#{name}'"
|
273
|
+
end
|
274
|
+
end
|
275
|
+
resu = Sanitizer.sanitize_values(resu, self, source_event_loop)
|
276
|
+
resu = Sanitizer.return_args(resu)
|
277
|
+
answer_queue << resu
|
278
|
+
answer_queue = nil
|
279
|
+
end
|
280
|
+
end
|
281
|
+
|
282
|
+
def callback_loop(answer_queue, source_event_loop)
|
283
|
+
loop do
|
284
|
+
rets = answer_queue.deq
|
285
|
+
case rets
|
286
|
+
when Callback
|
287
|
+
cbres = rets.block.yield(*rets.args, &rets.arg_block)
|
288
|
+
|
289
|
+
if rets.cbresult
|
290
|
+
cbres = Sanitizer.sanitize_value(cbres, source_event_loop, self)
|
291
|
+
external_proc_result(rets.cbresult, cbres)
|
292
|
+
end
|
293
|
+
when WrappedException
|
294
|
+
answer_queue.close if answer_queue.respond_to?(:close)
|
295
|
+
raise(*rets.exc)
|
296
|
+
else
|
297
|
+
answer_queue.close if answer_queue.respond_to?(:close)
|
298
|
+
return rets
|
299
|
+
end
|
300
|
+
end
|
301
|
+
end
|
302
|
+
|
303
|
+
# Mark an object as to be shared instead of copied.
|
304
|
+
def shared_object(object)
|
305
|
+
if event_scope?
|
306
|
+
ObjectRegistry.set_tag(object, self)
|
307
|
+
else
|
308
|
+
ObjectRegistry.set_tag(object, ExternalSharedObject)
|
309
|
+
end
|
310
|
+
object
|
311
|
+
end
|
312
|
+
|
313
|
+
def thread_finished(action)
|
314
|
+
@mutex.synchronize do
|
315
|
+
@running_actions.delete(action) or raise(ArgumentError, "unknown action has finished: #{action}")
|
316
|
+
_update_action_threads_for_gc
|
317
|
+
end
|
318
|
+
end
|
319
|
+
|
320
|
+
Callback = Struct.new :block, :args, :arg_block, :cbresult
|
321
|
+
|
322
|
+
def _external_proc_call(block, name, args, arg_block, cbresult, source_event_loop)
|
323
|
+
if @latest_answer_queue
|
324
|
+
args = Sanitizer.sanitize_values(args, self, source_event_loop)
|
325
|
+
arg_block = Sanitizer.sanitize_value(arg_block, self, source_event_loop)
|
326
|
+
@latest_answer_queue << Callback.new(block, args, arg_block, cbresult)
|
327
|
+
nil
|
328
|
+
else
|
329
|
+
raise(InvalidAccess, "closure #{"defined by `#{name}' " if name}was yielded by `#{@latest_call_name}', which must a sync_call, yield_call, sync_proc or yield_proc")
|
330
|
+
end
|
331
|
+
end
|
332
|
+
|
333
|
+
def start_action(meth, name, args)
|
334
|
+
# Actions might not be tagged to a calling event scope
|
335
|
+
source_event_loop = Thread.current.thread_variable_get(:__event_loop__)
|
336
|
+
Thread.current.thread_variable_set(:__event_loop__, nil)
|
337
|
+
|
338
|
+
qu = Queue.new
|
339
|
+
|
340
|
+
new_thread = Thread.handle_interrupt(Exception => :never) do
|
341
|
+
@threadpool.new do
|
342
|
+
begin
|
343
|
+
Thread.handle_interrupt(AbortAction => :on_blocking) do
|
344
|
+
if meth.arity == args.length
|
345
|
+
meth.call(*args)
|
346
|
+
else
|
347
|
+
meth.call(*args, qu.deq)
|
348
|
+
end
|
349
|
+
end
|
350
|
+
rescue AbortAction
|
351
|
+
# Do nothing, just exit the action
|
352
|
+
rescue WeakRef::RefError
|
353
|
+
# It can happen that the GC already swept the Eventbox instance, before some instance action is in a blocking state.
|
354
|
+
# In this case access to the Eventbox instance raises a RefError.
|
355
|
+
# Since it's now impossible to execute the action up to a blocking state, abort the action prematurely.
|
356
|
+
raise unless @shutdown
|
357
|
+
ensure
|
358
|
+
thread_finished(qu.deq)
|
359
|
+
end
|
360
|
+
end
|
361
|
+
end
|
362
|
+
|
363
|
+
a = Action.new(name, new_thread, self)
|
364
|
+
|
365
|
+
# Add to the list of running actions
|
366
|
+
synchronize_external do
|
367
|
+
@running_actions << a
|
368
|
+
_update_action_threads_for_gc
|
369
|
+
end
|
370
|
+
|
371
|
+
# Enqueue the action twice (for call and for finish)
|
372
|
+
qu << a << a
|
373
|
+
|
374
|
+
# @shutdown is set without a lock, so that we need to re-check, if it was set while start_action
|
375
|
+
if @shutdown
|
376
|
+
a.abort
|
377
|
+
a.join
|
378
|
+
end
|
379
|
+
|
380
|
+
a
|
381
|
+
ensure
|
382
|
+
Thread.current.thread_variable_set(:__event_loop__, source_event_loop)
|
383
|
+
end
|
384
|
+
end
|
385
|
+
end
|