concurrent-ruby 0.7.0-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. data/LICENSE.txt +21 -0
  2. data/README.md +217 -0
  3. data/lib/concurrent.rb +45 -0
  4. data/lib/concurrent/actor.rb +104 -0
  5. data/lib/concurrent/actor/behaviour.rb +70 -0
  6. data/lib/concurrent/actor/behaviour/abstract.rb +48 -0
  7. data/lib/concurrent/actor/behaviour/awaits.rb +21 -0
  8. data/lib/concurrent/actor/behaviour/buffer.rb +54 -0
  9. data/lib/concurrent/actor/behaviour/errors_on_unknown_message.rb +12 -0
  10. data/lib/concurrent/actor/behaviour/executes_context.rb +18 -0
  11. data/lib/concurrent/actor/behaviour/linking.rb +42 -0
  12. data/lib/concurrent/actor/behaviour/pausing.rb +77 -0
  13. data/lib/concurrent/actor/behaviour/removes_child.rb +16 -0
  14. data/lib/concurrent/actor/behaviour/sets_results.rb +36 -0
  15. data/lib/concurrent/actor/behaviour/supervised.rb +58 -0
  16. data/lib/concurrent/actor/behaviour/supervising.rb +34 -0
  17. data/lib/concurrent/actor/behaviour/terminates_children.rb +13 -0
  18. data/lib/concurrent/actor/behaviour/termination.rb +54 -0
  19. data/lib/concurrent/actor/context.rb +153 -0
  20. data/lib/concurrent/actor/core.rb +213 -0
  21. data/lib/concurrent/actor/default_dead_letter_handler.rb +9 -0
  22. data/lib/concurrent/actor/envelope.rb +41 -0
  23. data/lib/concurrent/actor/errors.rb +27 -0
  24. data/lib/concurrent/actor/internal_delegations.rb +49 -0
  25. data/lib/concurrent/actor/public_delegations.rb +40 -0
  26. data/lib/concurrent/actor/reference.rb +81 -0
  27. data/lib/concurrent/actor/root.rb +37 -0
  28. data/lib/concurrent/actor/type_check.rb +48 -0
  29. data/lib/concurrent/actor/utils.rb +10 -0
  30. data/lib/concurrent/actor/utils/ad_hoc.rb +21 -0
  31. data/lib/concurrent/actor/utils/balancer.rb +40 -0
  32. data/lib/concurrent/actor/utils/broadcast.rb +52 -0
  33. data/lib/concurrent/actor/utils/pool.rb +59 -0
  34. data/lib/concurrent/actress.rb +3 -0
  35. data/lib/concurrent/agent.rb +230 -0
  36. data/lib/concurrent/async.rb +284 -0
  37. data/lib/concurrent/atomic.rb +91 -0
  38. data/lib/concurrent/atomic/atomic_boolean.rb +202 -0
  39. data/lib/concurrent/atomic/atomic_fixnum.rb +203 -0
  40. data/lib/concurrent/atomic/condition.rb +67 -0
  41. data/lib/concurrent/atomic/copy_on_notify_observer_set.rb +118 -0
  42. data/lib/concurrent/atomic/copy_on_write_observer_set.rb +117 -0
  43. data/lib/concurrent/atomic/count_down_latch.rb +116 -0
  44. data/lib/concurrent/atomic/cyclic_barrier.rb +106 -0
  45. data/lib/concurrent/atomic/event.rb +98 -0
  46. data/lib/concurrent/atomic/synchronization.rb +51 -0
  47. data/lib/concurrent/atomic/thread_local_var.rb +82 -0
  48. data/lib/concurrent/atomic_reference/concurrent_update_error.rb +8 -0
  49. data/lib/concurrent/atomic_reference/direct_update.rb +50 -0
  50. data/lib/concurrent/atomic_reference/jruby.rb +14 -0
  51. data/lib/concurrent/atomic_reference/mutex_atomic.rb +77 -0
  52. data/lib/concurrent/atomic_reference/numeric_cas_wrapper.rb +25 -0
  53. data/lib/concurrent/atomic_reference/rbx.rb +19 -0
  54. data/lib/concurrent/atomic_reference/ruby.rb +37 -0
  55. data/lib/concurrent/atomics.rb +11 -0
  56. data/lib/concurrent/channel/buffered_channel.rb +85 -0
  57. data/lib/concurrent/channel/channel.rb +41 -0
  58. data/lib/concurrent/channel/unbuffered_channel.rb +35 -0
  59. data/lib/concurrent/channel/waitable_list.rb +40 -0
  60. data/lib/concurrent/channels.rb +5 -0
  61. data/lib/concurrent/collection/blocking_ring_buffer.rb +71 -0
  62. data/lib/concurrent/collection/priority_queue.rb +305 -0
  63. data/lib/concurrent/collection/ring_buffer.rb +59 -0
  64. data/lib/concurrent/collections.rb +3 -0
  65. data/lib/concurrent/configuration.rb +161 -0
  66. data/lib/concurrent/dataflow.rb +108 -0
  67. data/lib/concurrent/delay.rb +104 -0
  68. data/lib/concurrent/dereferenceable.rb +101 -0
  69. data/lib/concurrent/errors.rb +30 -0
  70. data/lib/concurrent/exchanger.rb +34 -0
  71. data/lib/concurrent/executor/cached_thread_pool.rb +44 -0
  72. data/lib/concurrent/executor/executor.rb +282 -0
  73. data/lib/concurrent/executor/fixed_thread_pool.rb +33 -0
  74. data/lib/concurrent/executor/immediate_executor.rb +65 -0
  75. data/lib/concurrent/executor/java_cached_thread_pool.rb +31 -0
  76. data/lib/concurrent/executor/java_fixed_thread_pool.rb +41 -0
  77. data/lib/concurrent/executor/java_single_thread_executor.rb +22 -0
  78. data/lib/concurrent/executor/java_thread_pool_executor.rb +180 -0
  79. data/lib/concurrent/executor/per_thread_executor.rb +100 -0
  80. data/lib/concurrent/executor/ruby_cached_thread_pool.rb +29 -0
  81. data/lib/concurrent/executor/ruby_fixed_thread_pool.rb +32 -0
  82. data/lib/concurrent/executor/ruby_single_thread_executor.rb +74 -0
  83. data/lib/concurrent/executor/ruby_thread_pool_executor.rb +288 -0
  84. data/lib/concurrent/executor/ruby_thread_pool_worker.rb +72 -0
  85. data/lib/concurrent/executor/safe_task_executor.rb +35 -0
  86. data/lib/concurrent/executor/serialized_execution.rb +126 -0
  87. data/lib/concurrent/executor/single_thread_executor.rb +35 -0
  88. data/lib/concurrent/executor/thread_pool_executor.rb +68 -0
  89. data/lib/concurrent/executor/timer_set.rb +143 -0
  90. data/lib/concurrent/executors.rb +9 -0
  91. data/lib/concurrent/future.rb +125 -0
  92. data/lib/concurrent/ivar.rb +111 -0
  93. data/lib/concurrent/lazy_register.rb +58 -0
  94. data/lib/concurrent/logging.rb +17 -0
  95. data/lib/concurrent/mvar.rb +200 -0
  96. data/lib/concurrent/obligation.rb +171 -0
  97. data/lib/concurrent/observable.rb +40 -0
  98. data/lib/concurrent/options_parser.rb +48 -0
  99. data/lib/concurrent/promise.rb +170 -0
  100. data/lib/concurrent/scheduled_task.rb +79 -0
  101. data/lib/concurrent/timer_task.rb +341 -0
  102. data/lib/concurrent/tvar.rb +248 -0
  103. data/lib/concurrent/utilities.rb +3 -0
  104. data/lib/concurrent/utility/processor_count.rb +152 -0
  105. data/lib/concurrent/utility/timeout.rb +35 -0
  106. data/lib/concurrent/utility/timer.rb +21 -0
  107. data/lib/concurrent/version.rb +3 -0
  108. data/lib/concurrent_ruby.rb +1 -0
  109. data/lib/concurrent_ruby_ext.jar +0 -0
  110. data/lib/concurrent_ruby_ext.so +0 -0
  111. data/lib/extension_helper.rb +28 -0
  112. metadata +163 -0
@@ -0,0 +1,288 @@
1
+ require 'thread'
2
+
3
+ require_relative 'executor'
4
+ require 'concurrent/atomic/event'
5
+ require 'concurrent/executor/ruby_thread_pool_worker'
6
+
7
+ module Concurrent
8
+
9
+ # @!macro thread_pool_executor
10
+ class RubyThreadPoolExecutor
11
+ include RubyExecutor
12
+
13
+ # Default maximum number of threads that will be created in the pool.
14
+ DEFAULT_MAX_POOL_SIZE = 2**15 # 32768
15
+
16
+ # Default minimum number of threads that will be retained in the pool.
17
+ DEFAULT_MIN_POOL_SIZE = 0
18
+
19
+ # Default maximum number of tasks that may be added to the task queue.
20
+ DEFAULT_MAX_QUEUE_SIZE = 0
21
+
22
+ # Default maximum number of seconds a thread in the pool may remain idle
23
+ # before being reclaimed.
24
+ DEFAULT_THREAD_IDLETIMEOUT = 60
25
+
26
+ # The set of possible overflow policies that may be set at thread pool creation.
27
+ OVERFLOW_POLICIES = [:abort, :discard, :caller_runs]
28
+
29
+ # The maximum number of threads that may be created in the pool.
30
+ attr_reader :max_length
31
+
32
+ # The minimum number of threads that may be retained in the pool.
33
+ attr_reader :min_length
34
+
35
+ # The largest number of threads that have been created in the pool since construction.
36
+ attr_reader :largest_length
37
+
38
+ # The number of tasks that have been scheduled for execution on the pool since construction.
39
+ attr_reader :scheduled_task_count
40
+
41
+ # The number of tasks that have been completed by the pool since construction.
42
+ attr_reader :completed_task_count
43
+
44
+ # The number of seconds that a thread may be idle before being reclaimed.
45
+ attr_reader :idletime
46
+
47
+ # The maximum number of tasks that may be waiting in the work queue at any one time.
48
+ # When the queue size reaches `max_queue` subsequent tasks will be rejected in
49
+ # accordance with the configured `overflow_policy`.
50
+ attr_reader :max_queue
51
+
52
+ # The policy defining how rejected tasks (tasks received once the queue size reaches
53
+ # the configured `max_queue`) are handled. Must be one of the values specified in
54
+ # `OVERFLOW_POLICIES`.
55
+ attr_reader :overflow_policy
56
+
57
+ # Create a new thread pool.
58
+ #
59
+ # @param [Hash] opts the options which configure the thread pool
60
+ #
61
+ # @option opts [Integer] :max_threads (DEFAULT_MAX_POOL_SIZE) the maximum
62
+ # number of threads to be created
63
+ # @option opts [Integer] :min_threads (DEFAULT_MIN_POOL_SIZE) the minimum
64
+ # number of threads to be retained
65
+ # @option opts [Integer] :idletime (DEFAULT_THREAD_IDLETIMEOUT) the maximum
66
+ # number of seconds a thread may be idle before being reclaimed
67
+ # @option opts [Integer] :max_queue (DEFAULT_MAX_QUEUE_SIZE) the maximum
68
+ # number of tasks allowed in the work queue at any one time; a value of
69
+ # zero means the queue may grow without bounnd
70
+ # @option opts [Symbol] :overflow_policy (:abort) the policy for handling new
71
+ # tasks that are received when the queue size has reached `max_queue`
72
+ #
73
+ # @raise [ArgumentError] if `:max_threads` is less than one
74
+ # @raise [ArgumentError] if `:min_threads` is less than zero
75
+ # @raise [ArgumentError] if `:overflow_policy` is not one of the values specified
76
+ # in `OVERFLOW_POLICIES`
77
+ #
78
+ # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html
79
+ def initialize(opts = {})
80
+ @min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i
81
+ @max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i
82
+ @idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i
83
+ @max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i
84
+ @overflow_policy = opts.fetch(:overflow_policy, :abort)
85
+
86
+ raise ArgumentError.new('max_threads must be greater than zero') if @max_length <= 0
87
+ raise ArgumentError.new('min_threads cannot be less than zero') if @min_length < 0
88
+ raise ArgumentError.new("#{overflow_policy} is not a valid overflow policy") unless OVERFLOW_POLICIES.include?(@overflow_policy)
89
+ raise ArgumentError.new('min_threads cannot be more than max_threads') if min_length > max_length
90
+
91
+ init_executor
92
+
93
+ @pool = []
94
+ @queue = Queue.new
95
+ @scheduled_task_count = 0
96
+ @completed_task_count = 0
97
+ @largest_length = 0
98
+
99
+ @gc_interval = opts.fetch(:gc_interval, 1).to_i # undocumented
100
+ @last_gc_time = Time.now.to_f - [1.0, (@gc_interval * 2.0)].max
101
+ end
102
+
103
+ # @!macro executor_module_method_can_overflow_question
104
+ def can_overflow?
105
+ @max_queue != 0
106
+ end
107
+
108
+ # The number of threads currently in the pool.
109
+ #
110
+ # @return [Integer] the length
111
+ def length
112
+ mutex.synchronize{ running? ? @pool.length : 0 }
113
+ end
114
+ alias_method :current_length, :length
115
+
116
+ # The number of tasks in the queue awaiting execution.
117
+ #
118
+ # @return [Integer] the queue_length
119
+ def queue_length
120
+ mutex.synchronize{ running? ? @queue.length : 0 }
121
+ end
122
+
123
+ # Number of tasks that may be enqueued before reaching `max_queue` and rejecting
124
+ # new tasks. A value of -1 indicates that the queue may grow without bound.
125
+ #
126
+ # @return [Integer] the remaining_capacity
127
+ def remaining_capacity
128
+ mutex.synchronize { @max_queue == 0 ? -1 : @max_queue - @queue.length }
129
+ end
130
+
131
+ # Returns an array with the status of each thread in the pool
132
+ #
133
+ # This method is deprecated and will be removed soon.
134
+ def status
135
+ warn '[DEPRECATED] `status` is deprecated and will be removed soon.'
136
+ mutex.synchronize { @pool.collect { |worker| worker.status } }
137
+ end
138
+
139
+ # Run on task completion.
140
+ #
141
+ # @!visibility private
142
+ def on_end_task
143
+ mutex.synchronize do
144
+ @completed_task_count += 1 #if success
145
+ break unless running?
146
+ end
147
+ end
148
+
149
+ # Run when a thread worker exits.
150
+ #
151
+ # @!visibility private
152
+ def on_worker_exit(worker)
153
+ mutex.synchronize do
154
+ @pool.delete(worker)
155
+ if @pool.empty? && ! running?
156
+ stop_event.set
157
+ stopped_event.set
158
+ end
159
+ end
160
+ end
161
+
162
+ protected
163
+
164
+ # @!visibility private
165
+ def execute(*args, &task)
166
+ prune_pool
167
+ if ensure_capacity?
168
+ @scheduled_task_count += 1
169
+ @queue << [args, task]
170
+ else
171
+ handle_overflow(*args, &task) if @max_queue != 0 && @queue.length >= @max_queue
172
+ end
173
+ end
174
+
175
+ # @!visibility private
176
+ def shutdown_execution
177
+ if @pool.empty?
178
+ stopped_event.set
179
+ else
180
+ @pool.length.times{ @queue << :stop }
181
+ end
182
+ end
183
+
184
+ # @!visibility private
185
+ def kill_execution
186
+ @queue.clear
187
+ drain_pool
188
+ end
189
+
190
+ # Check the thread pool configuration and determine if the pool
191
+ # has enought capacity to handle the request. Will grow the size
192
+ # of the pool if necessary.
193
+ #
194
+ # @return [Boolean] true if the pool has enough capacity else false
195
+ #
196
+ # @!visibility private
197
+ def ensure_capacity?
198
+ additional = 0
199
+ capacity = true
200
+
201
+ if @pool.size < @min_length
202
+ additional = @min_length - @pool.size
203
+ elsif @queue.empty? && @queue.num_waiting >= 1
204
+ additional = 0
205
+ elsif @pool.size == 0 && @min_length == 0
206
+ additional = 1
207
+ elsif @pool.size < @max_length || @max_length == 0
208
+ additional = 1
209
+ elsif @max_queue == 0 || @queue.size < @max_queue
210
+ additional = 0
211
+ else
212
+ capacity = false
213
+ end
214
+
215
+ additional.times do
216
+ @pool << create_worker_thread
217
+ end
218
+
219
+ if additional > 0
220
+ @largest_length = [@largest_length, @pool.length].max
221
+ end
222
+
223
+ capacity
224
+ end
225
+
226
+ # Handler which executes the `overflow_policy` once the queue size
227
+ # reaches `max_queue`.
228
+ #
229
+ # @param [Array] args the arguments to the task which is being handled.
230
+ #
231
+ # @!visibility private
232
+ def handle_overflow(*args)
233
+ case @overflow_policy
234
+ when :abort
235
+ raise RejectedExecutionError
236
+ when :discard
237
+ false
238
+ when :caller_runs
239
+ begin
240
+ yield(*args)
241
+ rescue => ex
242
+ # let it fail
243
+ log DEBUG, ex
244
+ end
245
+ true
246
+ end
247
+ end
248
+
249
+ # Scan all threads in the pool and reclaim any that are dead or
250
+ # have been idle too long. Will check the last time the pool was
251
+ # pruned and only run if the configured garbage collection
252
+ # interval has passed.
253
+ #
254
+ # @!visibility private
255
+ def prune_pool
256
+ if Time.now.to_f - @gc_interval >= @last_gc_time
257
+ @pool.delete_if do |worker|
258
+ worker.dead? ||
259
+ (@idletime == 0 ? false : Time.now.to_f - @idletime > worker.last_activity)
260
+ end
261
+ @last_gc_time = Time.now.to_f
262
+ end
263
+ end
264
+
265
+ # Reclaim all threads in the pool.
266
+ #
267
+ # @!visibility private
268
+ def drain_pool
269
+ @pool.each {|worker| worker.kill }
270
+ @pool.clear
271
+ end
272
+
273
+ # Create a single worker thread to be added to the pool.
274
+ #
275
+ # @return [Thread] the new thread.
276
+ #
277
+ # @!visibility private
278
+ def create_worker_thread
279
+ wrkr = RubyThreadPoolWorker.new(@queue, self)
280
+ Thread.new(wrkr, self) do |worker, parent|
281
+ Thread.current.abort_on_exception = false
282
+ worker.run
283
+ parent.on_worker_exit(worker)
284
+ end
285
+ return wrkr
286
+ end
287
+ end
288
+ end
@@ -0,0 +1,72 @@
1
+ require 'thread'
2
+ require 'concurrent/logging'
3
+
4
+ module Concurrent
5
+
6
+ # @!visibility private
7
+ class RubyThreadPoolWorker
8
+ include Logging
9
+
10
+ # @!visibility private
11
+ def initialize(queue, parent)
12
+ @queue = queue
13
+ @parent = parent
14
+ @mutex = Mutex.new
15
+ @last_activity = Time.now.to_f
16
+ end
17
+
18
+ # @!visibility private
19
+ def dead?
20
+ return @mutex.synchronize do
21
+ @thread.nil? ? false : ! @thread.alive?
22
+ end
23
+ end
24
+
25
+ # @!visibility private
26
+ def last_activity
27
+ @mutex.synchronize { @last_activity }
28
+ end
29
+
30
+ def status
31
+ @mutex.synchronize do
32
+ return 'not running' if @thread.nil?
33
+ @thread.status
34
+ end
35
+ end
36
+
37
+ # @!visibility private
38
+ def kill
39
+ @mutex.synchronize do
40
+ Thread.kill(@thread) unless @thread.nil?
41
+ @thread = nil
42
+ end
43
+ end
44
+
45
+ # @!visibility private
46
+ def run(thread = Thread.current)
47
+ @mutex.synchronize do
48
+ raise StandardError.new('already running') unless @thread.nil?
49
+ @thread = thread
50
+ end
51
+
52
+ loop do
53
+ task = @queue.pop
54
+ if task == :stop
55
+ @thread = nil
56
+ @parent.on_worker_exit(self)
57
+ break
58
+ end
59
+
60
+ begin
61
+ task.last.call(*task.first)
62
+ rescue => ex
63
+ # let it fail
64
+ log DEBUG, ex
65
+ ensure
66
+ @last_activity = Time.now.to_f
67
+ @parent.on_end_task
68
+ end
69
+ end
70
+ end
71
+ end
72
+ end
@@ -0,0 +1,35 @@
1
+ require 'thread'
2
+
3
+ module Concurrent
4
+
5
+ # A simple utility class that executes a callable and returns and array of three elements:
6
+ # success - indicating if the callable has been executed without errors
7
+ # value - filled by the callable result if it has been executed without errors, nil otherwise
8
+ # reason - the error risen by the callable if it has been executed with errors, nil otherwise
9
+ class SafeTaskExecutor
10
+
11
+ def initialize(task, opts = {})
12
+ @task = task
13
+ @mutex = Mutex.new
14
+ @exception_class = opts.fetch(:rescue_exception, false) ? Exception : StandardError
15
+ end
16
+
17
+ # @return [Array]
18
+ def execute(*args)
19
+ @mutex.synchronize do
20
+ success = false
21
+ value = reason = nil
22
+
23
+ begin
24
+ value = @task.call(*args)
25
+ success = true
26
+ rescue @exception_class => ex
27
+ reason = ex
28
+ success = false
29
+ end
30
+
31
+ [success, value, reason]
32
+ end
33
+ end
34
+ end
35
+ end
@@ -0,0 +1,126 @@
1
+ require 'delegate'
2
+ require 'concurrent/executor/executor'
3
+ require 'concurrent/logging'
4
+ require 'concurrent/atomic/synchronization'
5
+
6
+ module Concurrent
7
+
8
+ # Ensures passed jobs in a serialized order never running at the same time.
9
+ class SerializedExecution
10
+ include Logging
11
+ include Synchronization
12
+
13
+ Job = Struct.new(:executor, :args, :block) do
14
+ def call
15
+ block.call *args
16
+ end
17
+ end
18
+
19
+ def initialize
20
+ synchronize do
21
+ @being_executed = false
22
+ @stash = []
23
+ end
24
+ end
25
+
26
+ # Submit a task to the executor for asynchronous processing.
27
+ #
28
+ # @param [Executor] executor to be used for this job
29
+ #
30
+ # @param [Array] args zero or more arguments to be passed to the task
31
+ #
32
+ # @yield the asynchronous task to perform
33
+ #
34
+ # @return [Boolean] `true` if the task is queued, `false` if the executor
35
+ # is not running
36
+ #
37
+ # @raise [ArgumentError] if no task is given
38
+ def post(executor, *args, &task)
39
+ posts [[executor, args, task]]
40
+ true
41
+ end
42
+
43
+ # As {#post} but allows to submit multiple tasks at once, it's guaranteed that they will not
44
+ # be interleaved by other tasks.
45
+ #
46
+ # @param [Array<Array(Executor, Array<Object>, Proc)>] posts array of triplets where
47
+ # first is a {Executor}, second is array of args for task, third is a task (Proc)
48
+ def posts(posts)
49
+ # if can_overflow?
50
+ # raise ArgumentError, 'SerializedExecution does not support thread-pools which can overflow'
51
+ # end
52
+
53
+ return nil if posts.empty?
54
+
55
+ jobs = posts.map { |executor, args, task| Job.new executor, args, task }
56
+
57
+ job_to_post = synchronize do
58
+ if @being_executed
59
+ @stash.push(*jobs)
60
+ nil
61
+ else
62
+ @being_executed = true
63
+ @stash.push(*jobs[1..-1])
64
+ jobs.first
65
+ end
66
+ end
67
+
68
+ call_job job_to_post if job_to_post
69
+ true
70
+ end
71
+
72
+ private
73
+
74
+ def call_job(job)
75
+ did_it_run = begin
76
+ job.executor.post { work(job) }
77
+ true
78
+ rescue RejectedExecutionError => ex
79
+ false
80
+ end
81
+
82
+ # TODO not the best idea to run it myself
83
+ unless did_it_run
84
+ begin
85
+ work job
86
+ rescue => ex
87
+ # let it fail
88
+ log DEBUG, ex
89
+ end
90
+ end
91
+ end
92
+
93
+ # ensures next job is executed if any is stashed
94
+ def work(job)
95
+ job.call
96
+ ensure
97
+ synchronize do
98
+ job = @stash.shift || (@being_executed = false)
99
+ end
100
+
101
+ call_job job if job
102
+ end
103
+ end
104
+
105
+ # A wrapper/delegator for any `Executor` or `ExecutorService` that
106
+ # guarantees serialized execution of tasks.
107
+ #
108
+ # @see [SimpleDelegator](http://www.ruby-doc.org/stdlib-2.1.2/libdoc/delegate/rdoc/SimpleDelegator.html)
109
+ # @see Concurrent::SerializedExecution
110
+ class SerializedExecutionDelegator < SimpleDelegator
111
+ include SerialExecutor
112
+
113
+ def initialize(executor)
114
+ @executor = executor
115
+ @serializer = SerializedExecution.new
116
+ super(executor)
117
+ end
118
+
119
+ # @!macro executor_method_post
120
+ def post(*args, &task)
121
+ raise ArgumentError.new('no block given') unless block_given?
122
+ return false unless running?
123
+ @serializer.post(@executor, *args, &task)
124
+ end
125
+ end
126
+ end