concurrent-ruby 1.1.5
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGELOG.md +478 -0
- data/Gemfile +41 -0
- data/LICENSE.md +23 -0
- data/README.md +381 -0
- data/Rakefile +327 -0
- data/ext/concurrent-ruby/ConcurrentRubyService.java +17 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/AtomicReferenceLibrary.java +175 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/JRubyMapBackendLibrary.java +248 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicBooleanLibrary.java +93 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicFixnumLibrary.java +113 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaSemaphoreLibrary.java +159 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/SynchronizationLibrary.java +307 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMap.java +31 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMapV8.java +3863 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/LongAdder.java +203 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/Striped64.java +342 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/ConcurrentHashMapV8.java +3800 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/LongAdder.java +204 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/Striped64.java +291 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166y/ThreadLocalRandom.java +199 -0
- data/lib/concurrent-ruby.rb +1 -0
- data/lib/concurrent.rb +134 -0
- data/lib/concurrent/agent.rb +587 -0
- data/lib/concurrent/array.rb +66 -0
- data/lib/concurrent/async.rb +459 -0
- data/lib/concurrent/atom.rb +222 -0
- data/lib/concurrent/atomic/abstract_thread_local_var.rb +66 -0
- data/lib/concurrent/atomic/atomic_boolean.rb +126 -0
- data/lib/concurrent/atomic/atomic_fixnum.rb +143 -0
- data/lib/concurrent/atomic/atomic_markable_reference.rb +164 -0
- data/lib/concurrent/atomic/atomic_reference.rb +204 -0
- data/lib/concurrent/atomic/count_down_latch.rb +100 -0
- data/lib/concurrent/atomic/cyclic_barrier.rb +128 -0
- data/lib/concurrent/atomic/event.rb +109 -0
- data/lib/concurrent/atomic/java_count_down_latch.rb +42 -0
- data/lib/concurrent/atomic/java_thread_local_var.rb +37 -0
- data/lib/concurrent/atomic/mutex_atomic_boolean.rb +62 -0
- data/lib/concurrent/atomic/mutex_atomic_fixnum.rb +75 -0
- data/lib/concurrent/atomic/mutex_count_down_latch.rb +44 -0
- data/lib/concurrent/atomic/mutex_semaphore.rb +115 -0
- data/lib/concurrent/atomic/read_write_lock.rb +254 -0
- data/lib/concurrent/atomic/reentrant_read_write_lock.rb +379 -0
- data/lib/concurrent/atomic/ruby_thread_local_var.rb +161 -0
- data/lib/concurrent/atomic/semaphore.rb +145 -0
- data/lib/concurrent/atomic/thread_local_var.rb +104 -0
- data/lib/concurrent/atomic_reference/mutex_atomic.rb +56 -0
- data/lib/concurrent/atomic_reference/numeric_cas_wrapper.rb +28 -0
- data/lib/concurrent/atomics.rb +10 -0
- data/lib/concurrent/collection/copy_on_notify_observer_set.rb +107 -0
- data/lib/concurrent/collection/copy_on_write_observer_set.rb +111 -0
- data/lib/concurrent/collection/java_non_concurrent_priority_queue.rb +84 -0
- data/lib/concurrent/collection/lock_free_stack.rb +158 -0
- data/lib/concurrent/collection/map/atomic_reference_map_backend.rb +927 -0
- data/lib/concurrent/collection/map/mri_map_backend.rb +66 -0
- data/lib/concurrent/collection/map/non_concurrent_map_backend.rb +140 -0
- data/lib/concurrent/collection/map/synchronized_map_backend.rb +82 -0
- data/lib/concurrent/collection/non_concurrent_priority_queue.rb +143 -0
- data/lib/concurrent/collection/ruby_non_concurrent_priority_queue.rb +150 -0
- data/lib/concurrent/concern/deprecation.rb +34 -0
- data/lib/concurrent/concern/dereferenceable.rb +73 -0
- data/lib/concurrent/concern/logging.rb +32 -0
- data/lib/concurrent/concern/obligation.rb +220 -0
- data/lib/concurrent/concern/observable.rb +110 -0
- data/lib/concurrent/concurrent_ruby.jar +0 -0
- data/lib/concurrent/configuration.rb +184 -0
- data/lib/concurrent/constants.rb +8 -0
- data/lib/concurrent/dataflow.rb +81 -0
- data/lib/concurrent/delay.rb +199 -0
- data/lib/concurrent/errors.rb +69 -0
- data/lib/concurrent/exchanger.rb +352 -0
- data/lib/concurrent/executor/abstract_executor_service.rb +134 -0
- data/lib/concurrent/executor/cached_thread_pool.rb +62 -0
- data/lib/concurrent/executor/executor_service.rb +185 -0
- data/lib/concurrent/executor/fixed_thread_pool.rb +206 -0
- data/lib/concurrent/executor/immediate_executor.rb +66 -0
- data/lib/concurrent/executor/indirect_immediate_executor.rb +44 -0
- data/lib/concurrent/executor/java_executor_service.rb +91 -0
- data/lib/concurrent/executor/java_single_thread_executor.rb +29 -0
- data/lib/concurrent/executor/java_thread_pool_executor.rb +123 -0
- data/lib/concurrent/executor/ruby_executor_service.rb +78 -0
- data/lib/concurrent/executor/ruby_single_thread_executor.rb +22 -0
- data/lib/concurrent/executor/ruby_thread_pool_executor.rb +362 -0
- data/lib/concurrent/executor/safe_task_executor.rb +35 -0
- data/lib/concurrent/executor/serial_executor_service.rb +34 -0
- data/lib/concurrent/executor/serialized_execution.rb +107 -0
- data/lib/concurrent/executor/serialized_execution_delegator.rb +28 -0
- data/lib/concurrent/executor/simple_executor_service.rb +100 -0
- data/lib/concurrent/executor/single_thread_executor.rb +56 -0
- data/lib/concurrent/executor/thread_pool_executor.rb +87 -0
- data/lib/concurrent/executor/timer_set.rb +173 -0
- data/lib/concurrent/executors.rb +20 -0
- data/lib/concurrent/future.rb +141 -0
- data/lib/concurrent/hash.rb +59 -0
- data/lib/concurrent/immutable_struct.rb +93 -0
- data/lib/concurrent/ivar.rb +207 -0
- data/lib/concurrent/map.rb +337 -0
- data/lib/concurrent/maybe.rb +229 -0
- data/lib/concurrent/mutable_struct.rb +229 -0
- data/lib/concurrent/mvar.rb +242 -0
- data/lib/concurrent/options.rb +42 -0
- data/lib/concurrent/promise.rb +579 -0
- data/lib/concurrent/promises.rb +2167 -0
- data/lib/concurrent/re_include.rb +58 -0
- data/lib/concurrent/scheduled_task.rb +318 -0
- data/lib/concurrent/set.rb +66 -0
- data/lib/concurrent/settable_struct.rb +129 -0
- data/lib/concurrent/synchronization.rb +30 -0
- data/lib/concurrent/synchronization/abstract_lockable_object.rb +98 -0
- data/lib/concurrent/synchronization/abstract_object.rb +24 -0
- data/lib/concurrent/synchronization/abstract_struct.rb +160 -0
- data/lib/concurrent/synchronization/condition.rb +60 -0
- data/lib/concurrent/synchronization/jruby_lockable_object.rb +13 -0
- data/lib/concurrent/synchronization/jruby_object.rb +45 -0
- data/lib/concurrent/synchronization/lock.rb +36 -0
- data/lib/concurrent/synchronization/lockable_object.rb +74 -0
- data/lib/concurrent/synchronization/mri_object.rb +44 -0
- data/lib/concurrent/synchronization/mutex_lockable_object.rb +76 -0
- data/lib/concurrent/synchronization/object.rb +183 -0
- data/lib/concurrent/synchronization/rbx_lockable_object.rb +65 -0
- data/lib/concurrent/synchronization/rbx_object.rb +49 -0
- data/lib/concurrent/synchronization/truffleruby_object.rb +47 -0
- data/lib/concurrent/synchronization/volatile.rb +36 -0
- data/lib/concurrent/thread_safe/synchronized_delegator.rb +50 -0
- data/lib/concurrent/thread_safe/util.rb +16 -0
- data/lib/concurrent/thread_safe/util/adder.rb +74 -0
- data/lib/concurrent/thread_safe/util/cheap_lockable.rb +118 -0
- data/lib/concurrent/thread_safe/util/data_structures.rb +63 -0
- data/lib/concurrent/thread_safe/util/power_of_two_tuple.rb +38 -0
- data/lib/concurrent/thread_safe/util/striped64.rb +246 -0
- data/lib/concurrent/thread_safe/util/volatile.rb +75 -0
- data/lib/concurrent/thread_safe/util/xor_shift_random.rb +50 -0
- data/lib/concurrent/timer_task.rb +334 -0
- data/lib/concurrent/tuple.rb +86 -0
- data/lib/concurrent/tvar.rb +258 -0
- data/lib/concurrent/utility/at_exit.rb +97 -0
- data/lib/concurrent/utility/engine.rb +56 -0
- data/lib/concurrent/utility/monotonic_time.rb +58 -0
- data/lib/concurrent/utility/native_extension_loader.rb +79 -0
- data/lib/concurrent/utility/native_integer.rb +53 -0
- data/lib/concurrent/utility/processor_counter.rb +158 -0
- data/lib/concurrent/version.rb +3 -0
- metadata +193 -0
@@ -0,0 +1,362 @@
|
|
1
|
+
require 'thread'
|
2
|
+
require 'concurrent/atomic/event'
|
3
|
+
require 'concurrent/concern/logging'
|
4
|
+
require 'concurrent/executor/ruby_executor_service'
|
5
|
+
require 'concurrent/utility/monotonic_time'
|
6
|
+
|
7
|
+
module Concurrent
|
8
|
+
|
9
|
+
# @!macro thread_pool_executor
|
10
|
+
# @!macro thread_pool_options
|
11
|
+
# @!visibility private
|
12
|
+
class RubyThreadPoolExecutor < RubyExecutorService
|
13
|
+
|
14
|
+
# @!macro thread_pool_executor_constant_default_max_pool_size
|
15
|
+
DEFAULT_MAX_POOL_SIZE = 2_147_483_647 # java.lang.Integer::MAX_VALUE
|
16
|
+
|
17
|
+
# @!macro thread_pool_executor_constant_default_min_pool_size
|
18
|
+
DEFAULT_MIN_POOL_SIZE = 0
|
19
|
+
|
20
|
+
# @!macro thread_pool_executor_constant_default_max_queue_size
|
21
|
+
DEFAULT_MAX_QUEUE_SIZE = 0
|
22
|
+
|
23
|
+
# @!macro thread_pool_executor_constant_default_thread_timeout
|
24
|
+
DEFAULT_THREAD_IDLETIMEOUT = 60
|
25
|
+
|
26
|
+
# @!macro thread_pool_executor_attr_reader_max_length
|
27
|
+
attr_reader :max_length
|
28
|
+
|
29
|
+
# @!macro thread_pool_executor_attr_reader_min_length
|
30
|
+
attr_reader :min_length
|
31
|
+
|
32
|
+
# @!macro thread_pool_executor_attr_reader_idletime
|
33
|
+
attr_reader :idletime
|
34
|
+
|
35
|
+
# @!macro thread_pool_executor_attr_reader_max_queue
|
36
|
+
attr_reader :max_queue
|
37
|
+
|
38
|
+
# @!macro thread_pool_executor_method_initialize
|
39
|
+
def initialize(opts = {})
|
40
|
+
super(opts)
|
41
|
+
end
|
42
|
+
|
43
|
+
# @!macro thread_pool_executor_attr_reader_largest_length
|
44
|
+
def largest_length
|
45
|
+
synchronize { @largest_length }
|
46
|
+
end
|
47
|
+
|
48
|
+
# @!macro thread_pool_executor_attr_reader_scheduled_task_count
|
49
|
+
def scheduled_task_count
|
50
|
+
synchronize { @scheduled_task_count }
|
51
|
+
end
|
52
|
+
|
53
|
+
# @!macro thread_pool_executor_attr_reader_completed_task_count
|
54
|
+
def completed_task_count
|
55
|
+
synchronize { @completed_task_count }
|
56
|
+
end
|
57
|
+
|
58
|
+
# @!macro executor_service_method_can_overflow_question
|
59
|
+
def can_overflow?
|
60
|
+
synchronize { ns_limited_queue? }
|
61
|
+
end
|
62
|
+
|
63
|
+
# @!macro thread_pool_executor_attr_reader_length
|
64
|
+
def length
|
65
|
+
synchronize { @pool.length }
|
66
|
+
end
|
67
|
+
|
68
|
+
# @!macro thread_pool_executor_attr_reader_queue_length
|
69
|
+
def queue_length
|
70
|
+
synchronize { @queue.length }
|
71
|
+
end
|
72
|
+
|
73
|
+
# @!macro thread_pool_executor_attr_reader_remaining_capacity
|
74
|
+
def remaining_capacity
|
75
|
+
synchronize do
|
76
|
+
if ns_limited_queue?
|
77
|
+
@max_queue - @queue.length
|
78
|
+
else
|
79
|
+
-1
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
83
|
+
|
84
|
+
# @!visibility private
|
85
|
+
def remove_busy_worker(worker)
|
86
|
+
synchronize { ns_remove_busy_worker worker }
|
87
|
+
end
|
88
|
+
|
89
|
+
# @!visibility private
|
90
|
+
def ready_worker(worker)
|
91
|
+
synchronize { ns_ready_worker worker }
|
92
|
+
end
|
93
|
+
|
94
|
+
# @!visibility private
|
95
|
+
def worker_not_old_enough(worker)
|
96
|
+
synchronize { ns_worker_not_old_enough worker }
|
97
|
+
end
|
98
|
+
|
99
|
+
# @!visibility private
|
100
|
+
def worker_died(worker)
|
101
|
+
synchronize { ns_worker_died worker }
|
102
|
+
end
|
103
|
+
|
104
|
+
# @!visibility private
|
105
|
+
def worker_task_completed
|
106
|
+
synchronize { @completed_task_count += 1 }
|
107
|
+
end
|
108
|
+
|
109
|
+
private
|
110
|
+
|
111
|
+
# @!visibility private
|
112
|
+
def ns_initialize(opts)
|
113
|
+
@min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i
|
114
|
+
@max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i
|
115
|
+
@idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i
|
116
|
+
@max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i
|
117
|
+
@fallback_policy = opts.fetch(:fallback_policy, :abort)
|
118
|
+
raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICIES.include?(@fallback_policy)
|
119
|
+
|
120
|
+
raise ArgumentError.new("`max_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @max_length < DEFAULT_MIN_POOL_SIZE
|
121
|
+
raise ArgumentError.new("`max_threads` cannot be greater than #{DEFAULT_MAX_POOL_SIZE}") if @max_length > DEFAULT_MAX_POOL_SIZE
|
122
|
+
raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @min_length < DEFAULT_MIN_POOL_SIZE
|
123
|
+
raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length
|
124
|
+
|
125
|
+
self.auto_terminate = opts.fetch(:auto_terminate, true)
|
126
|
+
|
127
|
+
@pool = [] # all workers
|
128
|
+
@ready = [] # used as a stash (most idle worker is at the start)
|
129
|
+
@queue = [] # used as queue
|
130
|
+
# @ready or @queue is empty at all times
|
131
|
+
@scheduled_task_count = 0
|
132
|
+
@completed_task_count = 0
|
133
|
+
@largest_length = 0
|
134
|
+
@ruby_pid = $$ # detects if Ruby has forked
|
135
|
+
|
136
|
+
@gc_interval = opts.fetch(:gc_interval, @idletime / 2.0).to_i # undocumented
|
137
|
+
@next_gc_time = Concurrent.monotonic_time + @gc_interval
|
138
|
+
end
|
139
|
+
|
140
|
+
# @!visibility private
|
141
|
+
def ns_limited_queue?
|
142
|
+
@max_queue != 0
|
143
|
+
end
|
144
|
+
|
145
|
+
# @!visibility private
|
146
|
+
def ns_execute(*args, &task)
|
147
|
+
ns_reset_if_forked
|
148
|
+
|
149
|
+
if ns_assign_worker(*args, &task) || ns_enqueue(*args, &task)
|
150
|
+
@scheduled_task_count += 1
|
151
|
+
else
|
152
|
+
handle_fallback(*args, &task)
|
153
|
+
end
|
154
|
+
|
155
|
+
ns_prune_pool if @next_gc_time < Concurrent.monotonic_time
|
156
|
+
end
|
157
|
+
|
158
|
+
# @!visibility private
|
159
|
+
def ns_shutdown_execution
|
160
|
+
ns_reset_if_forked
|
161
|
+
|
162
|
+
if @pool.empty?
|
163
|
+
# nothing to do
|
164
|
+
stopped_event.set
|
165
|
+
end
|
166
|
+
|
167
|
+
if @queue.empty?
|
168
|
+
# no more tasks will be accepted, just stop all workers
|
169
|
+
@pool.each(&:stop)
|
170
|
+
end
|
171
|
+
end
|
172
|
+
|
173
|
+
# @!visibility private
|
174
|
+
def ns_kill_execution
|
175
|
+
# TODO log out unprocessed tasks in queue
|
176
|
+
# TODO try to shutdown first?
|
177
|
+
@pool.each(&:kill)
|
178
|
+
@pool.clear
|
179
|
+
@ready.clear
|
180
|
+
end
|
181
|
+
|
182
|
+
# tries to assign task to a worker, tries to get one from @ready or to create new one
|
183
|
+
# @return [true, false] if task is assigned to a worker
|
184
|
+
#
|
185
|
+
# @!visibility private
|
186
|
+
def ns_assign_worker(*args, &task)
|
187
|
+
# keep growing if the pool is not at the minimum yet
|
188
|
+
worker = (@ready.pop if @pool.size >= @min_length) || ns_add_busy_worker
|
189
|
+
if worker
|
190
|
+
worker << [task, args]
|
191
|
+
true
|
192
|
+
else
|
193
|
+
false
|
194
|
+
end
|
195
|
+
rescue ThreadError
|
196
|
+
# Raised when the operating system refuses to create the new thread
|
197
|
+
return false
|
198
|
+
end
|
199
|
+
|
200
|
+
# tries to enqueue task
|
201
|
+
# @return [true, false] if enqueued
|
202
|
+
#
|
203
|
+
# @!visibility private
|
204
|
+
def ns_enqueue(*args, &task)
|
205
|
+
if !ns_limited_queue? || @queue.size < @max_queue
|
206
|
+
@queue << [task, args]
|
207
|
+
true
|
208
|
+
else
|
209
|
+
false
|
210
|
+
end
|
211
|
+
end
|
212
|
+
|
213
|
+
# @!visibility private
|
214
|
+
def ns_worker_died(worker)
|
215
|
+
ns_remove_busy_worker worker
|
216
|
+
replacement_worker = ns_add_busy_worker
|
217
|
+
ns_ready_worker replacement_worker, false if replacement_worker
|
218
|
+
end
|
219
|
+
|
220
|
+
# creates new worker which has to receive work to do after it's added
|
221
|
+
# @return [nil, Worker] nil of max capacity is reached
|
222
|
+
#
|
223
|
+
# @!visibility private
|
224
|
+
def ns_add_busy_worker
|
225
|
+
return if @pool.size >= @max_length
|
226
|
+
|
227
|
+
@pool << (worker = Worker.new(self))
|
228
|
+
@largest_length = @pool.length if @pool.length > @largest_length
|
229
|
+
worker
|
230
|
+
end
|
231
|
+
|
232
|
+
# handle ready worker, giving it new job or assigning back to @ready
|
233
|
+
#
|
234
|
+
# @!visibility private
|
235
|
+
def ns_ready_worker(worker, success = true)
|
236
|
+
task_and_args = @queue.shift
|
237
|
+
if task_and_args
|
238
|
+
worker << task_and_args
|
239
|
+
else
|
240
|
+
# stop workers when !running?, do not return them to @ready
|
241
|
+
if running?
|
242
|
+
@ready.push(worker)
|
243
|
+
else
|
244
|
+
worker.stop
|
245
|
+
end
|
246
|
+
end
|
247
|
+
end
|
248
|
+
|
249
|
+
# returns back worker to @ready which was not idle for enough time
|
250
|
+
#
|
251
|
+
# @!visibility private
|
252
|
+
def ns_worker_not_old_enough(worker)
|
253
|
+
# let's put workers coming from idle_test back to the start (as the oldest worker)
|
254
|
+
@ready.unshift(worker)
|
255
|
+
true
|
256
|
+
end
|
257
|
+
|
258
|
+
# removes a worker which is not in not tracked in @ready
|
259
|
+
#
|
260
|
+
# @!visibility private
|
261
|
+
def ns_remove_busy_worker(worker)
|
262
|
+
@pool.delete(worker)
|
263
|
+
stopped_event.set if @pool.empty? && !running?
|
264
|
+
true
|
265
|
+
end
|
266
|
+
|
267
|
+
# try oldest worker if it is idle for enough time, it's returned back at the start
|
268
|
+
#
|
269
|
+
# @!visibility private
|
270
|
+
def ns_prune_pool
|
271
|
+
return if @pool.size <= @min_length
|
272
|
+
|
273
|
+
last_used = @ready.shift
|
274
|
+
last_used << :idle_test if last_used
|
275
|
+
|
276
|
+
@next_gc_time = Concurrent.monotonic_time + @gc_interval
|
277
|
+
end
|
278
|
+
|
279
|
+
def ns_reset_if_forked
|
280
|
+
if $$ != @ruby_pid
|
281
|
+
@queue.clear
|
282
|
+
@ready.clear
|
283
|
+
@pool.clear
|
284
|
+
@scheduled_task_count = 0
|
285
|
+
@completed_task_count = 0
|
286
|
+
@largest_length = 0
|
287
|
+
@ruby_pid = $$
|
288
|
+
end
|
289
|
+
end
|
290
|
+
|
291
|
+
# @!visibility private
|
292
|
+
class Worker
|
293
|
+
include Concern::Logging
|
294
|
+
|
295
|
+
def initialize(pool)
|
296
|
+
# instance variables accessed only under pool's lock so no need to sync here again
|
297
|
+
@queue = Queue.new
|
298
|
+
@pool = pool
|
299
|
+
@thread = create_worker @queue, pool, pool.idletime
|
300
|
+
end
|
301
|
+
|
302
|
+
def <<(message)
|
303
|
+
@queue << message
|
304
|
+
end
|
305
|
+
|
306
|
+
def stop
|
307
|
+
@queue << :stop
|
308
|
+
end
|
309
|
+
|
310
|
+
def kill
|
311
|
+
@thread.kill
|
312
|
+
end
|
313
|
+
|
314
|
+
private
|
315
|
+
|
316
|
+
def create_worker(queue, pool, idletime)
|
317
|
+
Thread.new(queue, pool, idletime) do |my_queue, my_pool, my_idletime|
|
318
|
+
last_message = Concurrent.monotonic_time
|
319
|
+
catch(:stop) do
|
320
|
+
loop do
|
321
|
+
|
322
|
+
case message = my_queue.pop
|
323
|
+
when :idle_test
|
324
|
+
if (Concurrent.monotonic_time - last_message) > my_idletime
|
325
|
+
my_pool.remove_busy_worker(self)
|
326
|
+
throw :stop
|
327
|
+
else
|
328
|
+
my_pool.worker_not_old_enough(self)
|
329
|
+
end
|
330
|
+
|
331
|
+
when :stop
|
332
|
+
my_pool.remove_busy_worker(self)
|
333
|
+
throw :stop
|
334
|
+
|
335
|
+
else
|
336
|
+
task, args = message
|
337
|
+
run_task my_pool, task, args
|
338
|
+
last_message = Concurrent.monotonic_time
|
339
|
+
|
340
|
+
my_pool.ready_worker(self)
|
341
|
+
end
|
342
|
+
end
|
343
|
+
end
|
344
|
+
end
|
345
|
+
end
|
346
|
+
|
347
|
+
def run_task(pool, task, args)
|
348
|
+
task.call(*args)
|
349
|
+
pool.worker_task_completed
|
350
|
+
rescue => ex
|
351
|
+
# let it fail
|
352
|
+
log DEBUG, ex
|
353
|
+
rescue Exception => ex
|
354
|
+
log ERROR, ex
|
355
|
+
pool.worker_died(self)
|
356
|
+
throw :stop
|
357
|
+
end
|
358
|
+
end
|
359
|
+
|
360
|
+
private_constant :Worker
|
361
|
+
end
|
362
|
+
end
|
@@ -0,0 +1,35 @@
|
|
1
|
+
require 'concurrent/synchronization'
|
2
|
+
|
3
|
+
module Concurrent
|
4
|
+
|
5
|
+
# A simple utility class that executes a callable and returns and array of three elements:
|
6
|
+
# success - indicating if the callable has been executed without errors
|
7
|
+
# value - filled by the callable result if it has been executed without errors, nil otherwise
|
8
|
+
# reason - the error risen by the callable if it has been executed with errors, nil otherwise
|
9
|
+
class SafeTaskExecutor < Synchronization::LockableObject
|
10
|
+
|
11
|
+
def initialize(task, opts = {})
|
12
|
+
@task = task
|
13
|
+
@exception_class = opts.fetch(:rescue_exception, false) ? Exception : StandardError
|
14
|
+
super() # ensures visibility
|
15
|
+
end
|
16
|
+
|
17
|
+
# @return [Array]
|
18
|
+
def execute(*args)
|
19
|
+
synchronize do
|
20
|
+
success = false
|
21
|
+
value = reason = nil
|
22
|
+
|
23
|
+
begin
|
24
|
+
value = @task.call(*args)
|
25
|
+
success = true
|
26
|
+
rescue @exception_class => ex
|
27
|
+
reason = ex
|
28
|
+
success = false
|
29
|
+
end
|
30
|
+
|
31
|
+
[success, value, reason]
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
@@ -0,0 +1,34 @@
|
|
1
|
+
require 'concurrent/executor/executor_service'
|
2
|
+
|
3
|
+
module Concurrent
|
4
|
+
|
5
|
+
# Indicates that the including `ExecutorService` guarantees
|
6
|
+
# that all operations will occur in the order they are post and that no
|
7
|
+
# two operations may occur simultaneously. This module provides no
|
8
|
+
# functionality and provides no guarantees. That is the responsibility
|
9
|
+
# of the including class. This module exists solely to allow the including
|
10
|
+
# object to be interrogated for its serialization status.
|
11
|
+
#
|
12
|
+
# @example
|
13
|
+
# class Foo
|
14
|
+
# include Concurrent::SerialExecutor
|
15
|
+
# end
|
16
|
+
#
|
17
|
+
# foo = Foo.new
|
18
|
+
#
|
19
|
+
# foo.is_a? Concurrent::ExecutorService #=> true
|
20
|
+
# foo.is_a? Concurrent::SerialExecutor #=> true
|
21
|
+
# foo.serialized? #=> true
|
22
|
+
#
|
23
|
+
# @!visibility private
|
24
|
+
module SerialExecutorService
|
25
|
+
include ExecutorService
|
26
|
+
|
27
|
+
# @!macro executor_service_method_serialized_question
|
28
|
+
#
|
29
|
+
# @note Always returns `true`
|
30
|
+
def serialized?
|
31
|
+
true
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
@@ -0,0 +1,107 @@
|
|
1
|
+
require 'concurrent/errors'
|
2
|
+
require 'concurrent/concern/logging'
|
3
|
+
require 'concurrent/synchronization'
|
4
|
+
|
5
|
+
module Concurrent
|
6
|
+
|
7
|
+
# Ensures passed jobs in a serialized order never running at the same time.
|
8
|
+
class SerializedExecution < Synchronization::LockableObject
|
9
|
+
include Concern::Logging
|
10
|
+
|
11
|
+
def initialize()
|
12
|
+
super()
|
13
|
+
synchronize { ns_initialize }
|
14
|
+
end
|
15
|
+
|
16
|
+
Job = Struct.new(:executor, :args, :block) do
|
17
|
+
def call
|
18
|
+
block.call(*args)
|
19
|
+
end
|
20
|
+
end
|
21
|
+
|
22
|
+
# Submit a task to the executor for asynchronous processing.
|
23
|
+
#
|
24
|
+
# @param [Executor] executor to be used for this job
|
25
|
+
#
|
26
|
+
# @param [Array] args zero or more arguments to be passed to the task
|
27
|
+
#
|
28
|
+
# @yield the asynchronous task to perform
|
29
|
+
#
|
30
|
+
# @return [Boolean] `true` if the task is queued, `false` if the executor
|
31
|
+
# is not running
|
32
|
+
#
|
33
|
+
# @raise [ArgumentError] if no task is given
|
34
|
+
def post(executor, *args, &task)
|
35
|
+
posts [[executor, args, task]]
|
36
|
+
true
|
37
|
+
end
|
38
|
+
|
39
|
+
# As {#post} but allows to submit multiple tasks at once, it's guaranteed that they will not
|
40
|
+
# be interleaved by other tasks.
|
41
|
+
#
|
42
|
+
# @param [Array<Array(ExecutorService, Array<Object>, Proc)>] posts array of triplets where
|
43
|
+
# first is a {ExecutorService}, second is array of args for task, third is a task (Proc)
|
44
|
+
def posts(posts)
|
45
|
+
# if can_overflow?
|
46
|
+
# raise ArgumentError, 'SerializedExecution does not support thread-pools which can overflow'
|
47
|
+
# end
|
48
|
+
|
49
|
+
return nil if posts.empty?
|
50
|
+
|
51
|
+
jobs = posts.map { |executor, args, task| Job.new executor, args, task }
|
52
|
+
|
53
|
+
job_to_post = synchronize do
|
54
|
+
if @being_executed
|
55
|
+
@stash.push(*jobs)
|
56
|
+
nil
|
57
|
+
else
|
58
|
+
@being_executed = true
|
59
|
+
@stash.push(*jobs[1..-1])
|
60
|
+
jobs.first
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
call_job job_to_post if job_to_post
|
65
|
+
true
|
66
|
+
end
|
67
|
+
|
68
|
+
private
|
69
|
+
|
70
|
+
def ns_initialize
|
71
|
+
@being_executed = false
|
72
|
+
@stash = []
|
73
|
+
end
|
74
|
+
|
75
|
+
def call_job(job)
|
76
|
+
did_it_run = begin
|
77
|
+
job.executor.post { work(job) }
|
78
|
+
true
|
79
|
+
rescue RejectedExecutionError => ex
|
80
|
+
false
|
81
|
+
end
|
82
|
+
|
83
|
+
# TODO not the best idea to run it myself
|
84
|
+
unless did_it_run
|
85
|
+
begin
|
86
|
+
work job
|
87
|
+
rescue => ex
|
88
|
+
# let it fail
|
89
|
+
log DEBUG, ex
|
90
|
+
end
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
# ensures next job is executed if any is stashed
|
95
|
+
def work(job)
|
96
|
+
job.call
|
97
|
+
ensure
|
98
|
+
synchronize do
|
99
|
+
job = @stash.shift || (@being_executed = false)
|
100
|
+
end
|
101
|
+
|
102
|
+
# TODO maybe be able to tell caching pool to just enqueue this job, because the current one end at the end
|
103
|
+
# of this block
|
104
|
+
call_job job if job
|
105
|
+
end
|
106
|
+
end
|
107
|
+
end
|