o-concurrent-ruby 1.1.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +542 -0
- data/Gemfile +37 -0
- data/LICENSE.txt +21 -0
- data/README.md +404 -0
- data/Rakefile +307 -0
- data/ext/concurrent-ruby/ConcurrentRubyService.java +17 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/AtomicReferenceLibrary.java +175 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/JRubyMapBackendLibrary.java +248 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicBooleanLibrary.java +93 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaAtomicFixnumLibrary.java +113 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/JavaSemaphoreLibrary.java +189 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/SynchronizationLibrary.java +307 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMap.java +31 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/ConcurrentHashMapV8.java +3863 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/LongAdder.java +203 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/Striped64.java +342 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/ConcurrentHashMapV8.java +3800 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/LongAdder.java +204 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166e/nounsafe/Striped64.java +291 -0
- data/ext/concurrent-ruby/com/concurrent_ruby/ext/jsr166y/ThreadLocalRandom.java +199 -0
- data/lib/concurrent-ruby/concurrent/agent.rb +587 -0
- data/lib/concurrent-ruby/concurrent/array.rb +66 -0
- data/lib/concurrent-ruby/concurrent/async.rb +449 -0
- data/lib/concurrent-ruby/concurrent/atom.rb +222 -0
- data/lib/concurrent-ruby/concurrent/atomic/abstract_thread_local_var.rb +66 -0
- data/lib/concurrent-ruby/concurrent/atomic/atomic_boolean.rb +126 -0
- data/lib/concurrent-ruby/concurrent/atomic/atomic_fixnum.rb +143 -0
- data/lib/concurrent-ruby/concurrent/atomic/atomic_markable_reference.rb +164 -0
- data/lib/concurrent-ruby/concurrent/atomic/atomic_reference.rb +205 -0
- data/lib/concurrent-ruby/concurrent/atomic/count_down_latch.rb +100 -0
- data/lib/concurrent-ruby/concurrent/atomic/cyclic_barrier.rb +128 -0
- data/lib/concurrent-ruby/concurrent/atomic/event.rb +109 -0
- data/lib/concurrent-ruby/concurrent/atomic/java_count_down_latch.rb +42 -0
- data/lib/concurrent-ruby/concurrent/atomic/java_thread_local_var.rb +37 -0
- data/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_boolean.rb +62 -0
- data/lib/concurrent-ruby/concurrent/atomic/mutex_atomic_fixnum.rb +75 -0
- data/lib/concurrent-ruby/concurrent/atomic/mutex_count_down_latch.rb +44 -0
- data/lib/concurrent-ruby/concurrent/atomic/mutex_semaphore.rb +131 -0
- data/lib/concurrent-ruby/concurrent/atomic/read_write_lock.rb +254 -0
- data/lib/concurrent-ruby/concurrent/atomic/reentrant_read_write_lock.rb +377 -0
- data/lib/concurrent-ruby/concurrent/atomic/ruby_thread_local_var.rb +181 -0
- data/lib/concurrent-ruby/concurrent/atomic/semaphore.rb +166 -0
- data/lib/concurrent-ruby/concurrent/atomic/thread_local_var.rb +104 -0
- data/lib/concurrent-ruby/concurrent/atomic_reference/mutex_atomic.rb +56 -0
- data/lib/concurrent-ruby/concurrent/atomic_reference/numeric_cas_wrapper.rb +28 -0
- data/lib/concurrent-ruby/concurrent/atomics.rb +10 -0
- data/lib/concurrent-ruby/concurrent/collection/copy_on_notify_observer_set.rb +107 -0
- data/lib/concurrent-ruby/concurrent/collection/copy_on_write_observer_set.rb +111 -0
- data/lib/concurrent-ruby/concurrent/collection/java_non_concurrent_priority_queue.rb +84 -0
- data/lib/concurrent-ruby/concurrent/collection/lock_free_stack.rb +158 -0
- data/lib/concurrent-ruby/concurrent/collection/map/atomic_reference_map_backend.rb +927 -0
- data/lib/concurrent-ruby/concurrent/collection/map/mri_map_backend.rb +66 -0
- data/lib/concurrent-ruby/concurrent/collection/map/non_concurrent_map_backend.rb +140 -0
- data/lib/concurrent-ruby/concurrent/collection/map/synchronized_map_backend.rb +82 -0
- data/lib/concurrent-ruby/concurrent/collection/map/truffleruby_map_backend.rb +14 -0
- data/lib/concurrent-ruby/concurrent/collection/non_concurrent_priority_queue.rb +143 -0
- data/lib/concurrent-ruby/concurrent/collection/ruby_non_concurrent_priority_queue.rb +160 -0
- data/lib/concurrent-ruby/concurrent/concern/deprecation.rb +34 -0
- data/lib/concurrent-ruby/concurrent/concern/dereferenceable.rb +73 -0
- data/lib/concurrent-ruby/concurrent/concern/logging.rb +32 -0
- data/lib/concurrent-ruby/concurrent/concern/obligation.rb +220 -0
- data/lib/concurrent-ruby/concurrent/concern/observable.rb +110 -0
- data/lib/concurrent-ruby/concurrent/configuration.rb +188 -0
- data/lib/concurrent-ruby/concurrent/constants.rb +8 -0
- data/lib/concurrent-ruby/concurrent/dataflow.rb +81 -0
- data/lib/concurrent-ruby/concurrent/delay.rb +199 -0
- data/lib/concurrent-ruby/concurrent/errors.rb +69 -0
- data/lib/concurrent-ruby/concurrent/exchanger.rb +352 -0
- data/lib/concurrent-ruby/concurrent/executor/abstract_executor_service.rb +131 -0
- data/lib/concurrent-ruby/concurrent/executor/cached_thread_pool.rb +62 -0
- data/lib/concurrent-ruby/concurrent/executor/executor_service.rb +185 -0
- data/lib/concurrent-ruby/concurrent/executor/fixed_thread_pool.rb +220 -0
- data/lib/concurrent-ruby/concurrent/executor/immediate_executor.rb +66 -0
- data/lib/concurrent-ruby/concurrent/executor/indirect_immediate_executor.rb +44 -0
- data/lib/concurrent-ruby/concurrent/executor/java_executor_service.rb +103 -0
- data/lib/concurrent-ruby/concurrent/executor/java_single_thread_executor.rb +30 -0
- data/lib/concurrent-ruby/concurrent/executor/java_thread_pool_executor.rb +140 -0
- data/lib/concurrent-ruby/concurrent/executor/ruby_executor_service.rb +82 -0
- data/lib/concurrent-ruby/concurrent/executor/ruby_single_thread_executor.rb +21 -0
- data/lib/concurrent-ruby/concurrent/executor/ruby_thread_pool_executor.rb +368 -0
- data/lib/concurrent-ruby/concurrent/executor/safe_task_executor.rb +35 -0
- data/lib/concurrent-ruby/concurrent/executor/serial_executor_service.rb +34 -0
- data/lib/concurrent-ruby/concurrent/executor/serialized_execution.rb +107 -0
- data/lib/concurrent-ruby/concurrent/executor/serialized_execution_delegator.rb +28 -0
- data/lib/concurrent-ruby/concurrent/executor/simple_executor_service.rb +100 -0
- data/lib/concurrent-ruby/concurrent/executor/single_thread_executor.rb +57 -0
- data/lib/concurrent-ruby/concurrent/executor/thread_pool_executor.rb +88 -0
- data/lib/concurrent-ruby/concurrent/executor/timer_set.rb +172 -0
- data/lib/concurrent-ruby/concurrent/executors.rb +20 -0
- data/lib/concurrent-ruby/concurrent/future.rb +141 -0
- data/lib/concurrent-ruby/concurrent/hash.rb +59 -0
- data/lib/concurrent-ruby/concurrent/immutable_struct.rb +101 -0
- data/lib/concurrent-ruby/concurrent/ivar.rb +207 -0
- data/lib/concurrent-ruby/concurrent/map.rb +346 -0
- data/lib/concurrent-ruby/concurrent/maybe.rb +229 -0
- data/lib/concurrent-ruby/concurrent/mutable_struct.rb +239 -0
- data/lib/concurrent-ruby/concurrent/mvar.rb +242 -0
- data/lib/concurrent-ruby/concurrent/options.rb +42 -0
- data/lib/concurrent-ruby/concurrent/promise.rb +580 -0
- data/lib/concurrent-ruby/concurrent/promises.rb +2167 -0
- data/lib/concurrent-ruby/concurrent/re_include.rb +58 -0
- data/lib/concurrent-ruby/concurrent/scheduled_task.rb +331 -0
- data/lib/concurrent-ruby/concurrent/set.rb +74 -0
- data/lib/concurrent-ruby/concurrent/settable_struct.rb +139 -0
- data/lib/concurrent-ruby/concurrent/synchronization/abstract_lockable_object.rb +98 -0
- data/lib/concurrent-ruby/concurrent/synchronization/abstract_object.rb +24 -0
- data/lib/concurrent-ruby/concurrent/synchronization/abstract_struct.rb +171 -0
- data/lib/concurrent-ruby/concurrent/synchronization/condition.rb +60 -0
- data/lib/concurrent-ruby/concurrent/synchronization/jruby_lockable_object.rb +13 -0
- data/lib/concurrent-ruby/concurrent/synchronization/jruby_object.rb +45 -0
- data/lib/concurrent-ruby/concurrent/synchronization/lock.rb +36 -0
- data/lib/concurrent-ruby/concurrent/synchronization/lockable_object.rb +72 -0
- data/lib/concurrent-ruby/concurrent/synchronization/mri_object.rb +44 -0
- data/lib/concurrent-ruby/concurrent/synchronization/mutex_lockable_object.rb +88 -0
- data/lib/concurrent-ruby/concurrent/synchronization/object.rb +183 -0
- data/lib/concurrent-ruby/concurrent/synchronization/rbx_lockable_object.rb +71 -0
- data/lib/concurrent-ruby/concurrent/synchronization/rbx_object.rb +49 -0
- data/lib/concurrent-ruby/concurrent/synchronization/truffleruby_object.rb +47 -0
- data/lib/concurrent-ruby/concurrent/synchronization/volatile.rb +36 -0
- data/lib/concurrent-ruby/concurrent/synchronization.rb +30 -0
- data/lib/concurrent-ruby/concurrent/thread_safe/synchronized_delegator.rb +50 -0
- data/lib/concurrent-ruby/concurrent/thread_safe/util/adder.rb +74 -0
- data/lib/concurrent-ruby/concurrent/thread_safe/util/cheap_lockable.rb +118 -0
- data/lib/concurrent-ruby/concurrent/thread_safe/util/data_structures.rb +88 -0
- data/lib/concurrent-ruby/concurrent/thread_safe/util/power_of_two_tuple.rb +38 -0
- data/lib/concurrent-ruby/concurrent/thread_safe/util/striped64.rb +246 -0
- data/lib/concurrent-ruby/concurrent/thread_safe/util/volatile.rb +75 -0
- data/lib/concurrent-ruby/concurrent/thread_safe/util/xor_shift_random.rb +50 -0
- data/lib/concurrent-ruby/concurrent/thread_safe/util.rb +16 -0
- data/lib/concurrent-ruby/concurrent/timer_task.rb +311 -0
- data/lib/concurrent-ruby/concurrent/tuple.rb +86 -0
- data/lib/concurrent-ruby/concurrent/tvar.rb +221 -0
- data/lib/concurrent-ruby/concurrent/utility/engine.rb +56 -0
- data/lib/concurrent-ruby/concurrent/utility/monotonic_time.rb +90 -0
- data/lib/concurrent-ruby/concurrent/utility/native_extension_loader.rb +79 -0
- data/lib/concurrent-ruby/concurrent/utility/native_integer.rb +53 -0
- data/lib/concurrent-ruby/concurrent/utility/processor_counter.rb +130 -0
- data/lib/concurrent-ruby/concurrent/version.rb +3 -0
- data/lib/concurrent-ruby/concurrent-ruby.rb +5 -0
- data/lib/concurrent-ruby/concurrent.rb +134 -0
- metadata +192 -0
@@ -0,0 +1,368 @@
|
|
1
|
+
require 'thread'
|
2
|
+
require 'concurrent/atomic/event'
|
3
|
+
require 'concurrent/concern/logging'
|
4
|
+
require 'concurrent/executor/ruby_executor_service'
|
5
|
+
require 'concurrent/utility/monotonic_time'
|
6
|
+
|
7
|
+
module Concurrent
|
8
|
+
|
9
|
+
# @!macro thread_pool_executor
|
10
|
+
# @!macro thread_pool_options
|
11
|
+
# @!visibility private
|
12
|
+
class RubyThreadPoolExecutor < RubyExecutorService
|
13
|
+
|
14
|
+
# @!macro thread_pool_executor_constant_default_max_pool_size
|
15
|
+
DEFAULT_MAX_POOL_SIZE = 2_147_483_647 # java.lang.Integer::MAX_VALUE
|
16
|
+
|
17
|
+
# @!macro thread_pool_executor_constant_default_min_pool_size
|
18
|
+
DEFAULT_MIN_POOL_SIZE = 0
|
19
|
+
|
20
|
+
# @!macro thread_pool_executor_constant_default_max_queue_size
|
21
|
+
DEFAULT_MAX_QUEUE_SIZE = 0
|
22
|
+
|
23
|
+
# @!macro thread_pool_executor_constant_default_thread_timeout
|
24
|
+
DEFAULT_THREAD_IDLETIMEOUT = 60
|
25
|
+
|
26
|
+
# @!macro thread_pool_executor_constant_default_synchronous
|
27
|
+
DEFAULT_SYNCHRONOUS = false
|
28
|
+
|
29
|
+
# @!macro thread_pool_executor_attr_reader_max_length
|
30
|
+
attr_reader :max_length
|
31
|
+
|
32
|
+
# @!macro thread_pool_executor_attr_reader_min_length
|
33
|
+
attr_reader :min_length
|
34
|
+
|
35
|
+
# @!macro thread_pool_executor_attr_reader_idletime
|
36
|
+
attr_reader :idletime
|
37
|
+
|
38
|
+
# @!macro thread_pool_executor_attr_reader_max_queue
|
39
|
+
attr_reader :max_queue
|
40
|
+
|
41
|
+
# @!macro thread_pool_executor_attr_reader_synchronous
|
42
|
+
attr_reader :synchronous
|
43
|
+
|
44
|
+
# @!macro thread_pool_executor_method_initialize
|
45
|
+
def initialize(opts = {})
|
46
|
+
super(opts)
|
47
|
+
end
|
48
|
+
|
49
|
+
# @!macro thread_pool_executor_attr_reader_largest_length
|
50
|
+
def largest_length
|
51
|
+
synchronize { @largest_length }
|
52
|
+
end
|
53
|
+
|
54
|
+
# @!macro thread_pool_executor_attr_reader_scheduled_task_count
|
55
|
+
def scheduled_task_count
|
56
|
+
synchronize { @scheduled_task_count }
|
57
|
+
end
|
58
|
+
|
59
|
+
# @!macro thread_pool_executor_attr_reader_completed_task_count
|
60
|
+
def completed_task_count
|
61
|
+
synchronize { @completed_task_count }
|
62
|
+
end
|
63
|
+
|
64
|
+
# @!macro executor_service_method_can_overflow_question
|
65
|
+
def can_overflow?
|
66
|
+
synchronize { ns_limited_queue? }
|
67
|
+
end
|
68
|
+
|
69
|
+
# @!macro thread_pool_executor_attr_reader_length
|
70
|
+
def length
|
71
|
+
synchronize { @pool.length }
|
72
|
+
end
|
73
|
+
|
74
|
+
# @!macro thread_pool_executor_attr_reader_queue_length
|
75
|
+
def queue_length
|
76
|
+
synchronize { @queue.length }
|
77
|
+
end
|
78
|
+
|
79
|
+
# @!macro thread_pool_executor_attr_reader_remaining_capacity
|
80
|
+
def remaining_capacity
|
81
|
+
synchronize do
|
82
|
+
if ns_limited_queue?
|
83
|
+
@max_queue - @queue.length
|
84
|
+
else
|
85
|
+
-1
|
86
|
+
end
|
87
|
+
end
|
88
|
+
end
|
89
|
+
|
90
|
+
# @!visibility private
|
91
|
+
def remove_busy_worker(worker)
|
92
|
+
synchronize { ns_remove_busy_worker worker }
|
93
|
+
end
|
94
|
+
|
95
|
+
# @!visibility private
|
96
|
+
def ready_worker(worker, last_message)
|
97
|
+
synchronize { ns_ready_worker worker, last_message }
|
98
|
+
end
|
99
|
+
|
100
|
+
# @!visibility private
|
101
|
+
def worker_died(worker)
|
102
|
+
synchronize { ns_worker_died worker }
|
103
|
+
end
|
104
|
+
|
105
|
+
# @!visibility private
|
106
|
+
def worker_task_completed
|
107
|
+
synchronize { @completed_task_count += 1 }
|
108
|
+
end
|
109
|
+
|
110
|
+
# @!macro thread_pool_executor_method_prune_pool
|
111
|
+
def prune_pool
|
112
|
+
synchronize { ns_prune_pool }
|
113
|
+
end
|
114
|
+
|
115
|
+
private
|
116
|
+
|
117
|
+
# @!visibility private
|
118
|
+
def ns_initialize(opts)
|
119
|
+
@min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i
|
120
|
+
@max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i
|
121
|
+
@idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i
|
122
|
+
@max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i
|
123
|
+
@synchronous = opts.fetch(:synchronous, DEFAULT_SYNCHRONOUS)
|
124
|
+
@fallback_policy = opts.fetch(:fallback_policy, :abort)
|
125
|
+
|
126
|
+
raise ArgumentError.new("`synchronous` cannot be set unless `max_queue` is 0") if @synchronous && @max_queue > 0
|
127
|
+
raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICIES.include?(@fallback_policy)
|
128
|
+
raise ArgumentError.new("`max_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @max_length < DEFAULT_MIN_POOL_SIZE
|
129
|
+
raise ArgumentError.new("`max_threads` cannot be greater than #{DEFAULT_MAX_POOL_SIZE}") if @max_length > DEFAULT_MAX_POOL_SIZE
|
130
|
+
raise ArgumentError.new("`min_threads` cannot be less than #{DEFAULT_MIN_POOL_SIZE}") if @min_length < DEFAULT_MIN_POOL_SIZE
|
131
|
+
raise ArgumentError.new("`min_threads` cannot be more than `max_threads`") if min_length > max_length
|
132
|
+
|
133
|
+
@pool = [] # all workers
|
134
|
+
@ready = [] # used as a stash (most idle worker is at the start)
|
135
|
+
@queue = [] # used as queue
|
136
|
+
# @ready or @queue is empty at all times
|
137
|
+
@scheduled_task_count = 0
|
138
|
+
@completed_task_count = 0
|
139
|
+
@largest_length = 0
|
140
|
+
@workers_counter = 0
|
141
|
+
@ruby_pid = $$ # detects if Ruby has forked
|
142
|
+
|
143
|
+
@gc_interval = opts.fetch(:gc_interval, @idletime / 2.0).to_i # undocumented
|
144
|
+
@next_gc_time = Concurrent.monotonic_time + @gc_interval
|
145
|
+
end
|
146
|
+
|
147
|
+
# @!visibility private
|
148
|
+
def ns_limited_queue?
|
149
|
+
@max_queue != 0
|
150
|
+
end
|
151
|
+
|
152
|
+
# @!visibility private
|
153
|
+
def ns_execute(*args, &task)
|
154
|
+
ns_reset_if_forked
|
155
|
+
|
156
|
+
if ns_assign_worker(*args, &task) || ns_enqueue(*args, &task)
|
157
|
+
@scheduled_task_count += 1
|
158
|
+
else
|
159
|
+
return fallback_action(*args, &task)
|
160
|
+
end
|
161
|
+
|
162
|
+
ns_prune_pool if @next_gc_time < Concurrent.monotonic_time
|
163
|
+
nil
|
164
|
+
end
|
165
|
+
|
166
|
+
# @!visibility private
|
167
|
+
def ns_shutdown_execution
|
168
|
+
ns_reset_if_forked
|
169
|
+
|
170
|
+
if @pool.empty?
|
171
|
+
# nothing to do
|
172
|
+
stopped_event.set
|
173
|
+
end
|
174
|
+
|
175
|
+
if @queue.empty?
|
176
|
+
# no more tasks will be accepted, just stop all workers
|
177
|
+
@pool.each(&:stop)
|
178
|
+
end
|
179
|
+
end
|
180
|
+
|
181
|
+
# @!visibility private
|
182
|
+
def ns_kill_execution
|
183
|
+
# TODO log out unprocessed tasks in queue
|
184
|
+
# TODO try to shutdown first?
|
185
|
+
@pool.each(&:kill)
|
186
|
+
@pool.clear
|
187
|
+
@ready.clear
|
188
|
+
end
|
189
|
+
|
190
|
+
# tries to assign task to a worker, tries to get one from @ready or to create new one
|
191
|
+
# @return [true, false] if task is assigned to a worker
|
192
|
+
#
|
193
|
+
# @!visibility private
|
194
|
+
def ns_assign_worker(*args, &task)
|
195
|
+
# keep growing if the pool is not at the minimum yet
|
196
|
+
worker, _ = (@ready.pop if @pool.size >= @min_length) || ns_add_busy_worker
|
197
|
+
if worker
|
198
|
+
worker << [task, args]
|
199
|
+
true
|
200
|
+
else
|
201
|
+
false
|
202
|
+
end
|
203
|
+
rescue ThreadError
|
204
|
+
# Raised when the operating system refuses to create the new thread
|
205
|
+
return false
|
206
|
+
end
|
207
|
+
|
208
|
+
# tries to enqueue task
|
209
|
+
# @return [true, false] if enqueued
|
210
|
+
#
|
211
|
+
# @!visibility private
|
212
|
+
def ns_enqueue(*args, &task)
|
213
|
+
return false if @synchronous
|
214
|
+
|
215
|
+
if !ns_limited_queue? || @queue.size < @max_queue
|
216
|
+
@queue << [task, args]
|
217
|
+
true
|
218
|
+
else
|
219
|
+
false
|
220
|
+
end
|
221
|
+
end
|
222
|
+
|
223
|
+
# @!visibility private
|
224
|
+
def ns_worker_died(worker)
|
225
|
+
ns_remove_busy_worker worker
|
226
|
+
replacement_worker = ns_add_busy_worker
|
227
|
+
ns_ready_worker replacement_worker, Concurrent.monotonic_time, false if replacement_worker
|
228
|
+
end
|
229
|
+
|
230
|
+
# creates new worker which has to receive work to do after it's added
|
231
|
+
# @return [nil, Worker] nil of max capacity is reached
|
232
|
+
#
|
233
|
+
# @!visibility private
|
234
|
+
def ns_add_busy_worker
|
235
|
+
return if @pool.size >= @max_length
|
236
|
+
|
237
|
+
@workers_counter += 1
|
238
|
+
@pool << (worker = Worker.new(self, @workers_counter))
|
239
|
+
@largest_length = @pool.length if @pool.length > @largest_length
|
240
|
+
worker
|
241
|
+
end
|
242
|
+
|
243
|
+
# handle ready worker, giving it new job or assigning back to @ready
|
244
|
+
#
|
245
|
+
# @!visibility private
|
246
|
+
def ns_ready_worker(worker, last_message, success = true)
|
247
|
+
task_and_args = @queue.shift
|
248
|
+
if task_and_args
|
249
|
+
worker << task_and_args
|
250
|
+
else
|
251
|
+
# stop workers when !running?, do not return them to @ready
|
252
|
+
if running?
|
253
|
+
raise unless last_message
|
254
|
+
@ready.push([worker, last_message])
|
255
|
+
else
|
256
|
+
worker.stop
|
257
|
+
end
|
258
|
+
end
|
259
|
+
end
|
260
|
+
|
261
|
+
# removes a worker which is not in not tracked in @ready
|
262
|
+
#
|
263
|
+
# @!visibility private
|
264
|
+
def ns_remove_busy_worker(worker)
|
265
|
+
@pool.delete(worker)
|
266
|
+
stopped_event.set if @pool.empty? && !running?
|
267
|
+
true
|
268
|
+
end
|
269
|
+
|
270
|
+
# try oldest worker if it is idle for enough time, it's returned back at the start
|
271
|
+
#
|
272
|
+
# @!visibility private
|
273
|
+
def ns_prune_pool
|
274
|
+
now = Concurrent.monotonic_time
|
275
|
+
stopped_workers = 0
|
276
|
+
while !@ready.empty? && (@pool.size - stopped_workers > @min_length)
|
277
|
+
worker, last_message = @ready.first
|
278
|
+
if now - last_message > self.idletime
|
279
|
+
stopped_workers += 1
|
280
|
+
@ready.shift
|
281
|
+
worker << :stop
|
282
|
+
else break
|
283
|
+
end
|
284
|
+
end
|
285
|
+
|
286
|
+
@next_gc_time = Concurrent.monotonic_time + @gc_interval
|
287
|
+
end
|
288
|
+
|
289
|
+
def ns_reset_if_forked
|
290
|
+
if $$ != @ruby_pid
|
291
|
+
@queue.clear
|
292
|
+
@ready.clear
|
293
|
+
@pool.clear
|
294
|
+
@scheduled_task_count = 0
|
295
|
+
@completed_task_count = 0
|
296
|
+
@largest_length = 0
|
297
|
+
@workers_counter = 0
|
298
|
+
@ruby_pid = $$
|
299
|
+
end
|
300
|
+
end
|
301
|
+
|
302
|
+
# @!visibility private
|
303
|
+
class Worker
|
304
|
+
include Concern::Logging
|
305
|
+
|
306
|
+
def initialize(pool, id)
|
307
|
+
# instance variables accessed only under pool's lock so no need to sync here again
|
308
|
+
@queue = Queue.new
|
309
|
+
@pool = pool
|
310
|
+
@thread = create_worker @queue, pool, pool.idletime
|
311
|
+
|
312
|
+
if @thread.respond_to?(:name=)
|
313
|
+
@thread.name = [pool.name, 'worker', id].compact.join('-')
|
314
|
+
end
|
315
|
+
end
|
316
|
+
|
317
|
+
def <<(message)
|
318
|
+
@queue << message
|
319
|
+
end
|
320
|
+
|
321
|
+
def stop
|
322
|
+
@queue << :stop
|
323
|
+
end
|
324
|
+
|
325
|
+
def kill
|
326
|
+
@thread.kill
|
327
|
+
end
|
328
|
+
|
329
|
+
private
|
330
|
+
|
331
|
+
def create_worker(queue, pool, idletime)
|
332
|
+
Thread.new(queue, pool, idletime) do |my_queue, my_pool, my_idletime|
|
333
|
+
catch(:stop) do
|
334
|
+
loop do
|
335
|
+
|
336
|
+
case message = my_queue.pop
|
337
|
+
when :stop
|
338
|
+
my_pool.remove_busy_worker(self)
|
339
|
+
throw :stop
|
340
|
+
|
341
|
+
else
|
342
|
+
task, args = message
|
343
|
+
run_task my_pool, task, args
|
344
|
+
my_pool.ready_worker(self, Concurrent.monotonic_time)
|
345
|
+
end
|
346
|
+
end
|
347
|
+
end
|
348
|
+
end
|
349
|
+
end
|
350
|
+
|
351
|
+
def run_task(pool, task, args)
|
352
|
+
task.call(*args)
|
353
|
+
pool.worker_task_completed
|
354
|
+
rescue NoMemoryError, KeyboardInterrupt, SystemExit
|
355
|
+
raise
|
356
|
+
rescue => ex
|
357
|
+
# let it fail
|
358
|
+
log ERROR, ex
|
359
|
+
rescue Exception => ex
|
360
|
+
log ERROR, ex
|
361
|
+
pool.worker_died(self)
|
362
|
+
throw :stop
|
363
|
+
end
|
364
|
+
end
|
365
|
+
|
366
|
+
private_constant :Worker
|
367
|
+
end
|
368
|
+
end
|
@@ -0,0 +1,35 @@
|
|
1
|
+
require 'concurrent/synchronization'
|
2
|
+
|
3
|
+
module Concurrent
|
4
|
+
|
5
|
+
# A simple utility class that executes a callable and returns and array of three elements:
|
6
|
+
# success - indicating if the callable has been executed without errors
|
7
|
+
# value - filled by the callable result if it has been executed without errors, nil otherwise
|
8
|
+
# reason - the error risen by the callable if it has been executed with errors, nil otherwise
|
9
|
+
class SafeTaskExecutor < Synchronization::LockableObject
|
10
|
+
|
11
|
+
def initialize(task, opts = {})
|
12
|
+
@task = task
|
13
|
+
@exception_class = opts.fetch(:rescue_exception, false) ? Exception : StandardError
|
14
|
+
super() # ensures visibility
|
15
|
+
end
|
16
|
+
|
17
|
+
# @return [Array]
|
18
|
+
def execute(*args)
|
19
|
+
success = true
|
20
|
+
value = reason = nil
|
21
|
+
|
22
|
+
synchronize do
|
23
|
+
begin
|
24
|
+
value = @task.call(*args)
|
25
|
+
success = true
|
26
|
+
rescue @exception_class => ex
|
27
|
+
reason = ex
|
28
|
+
success = false
|
29
|
+
end
|
30
|
+
end
|
31
|
+
|
32
|
+
[success, value, reason]
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
@@ -0,0 +1,34 @@
|
|
1
|
+
require 'concurrent/executor/executor_service'
|
2
|
+
|
3
|
+
module Concurrent
|
4
|
+
|
5
|
+
# Indicates that the including `ExecutorService` guarantees
|
6
|
+
# that all operations will occur in the order they are post and that no
|
7
|
+
# two operations may occur simultaneously. This module provides no
|
8
|
+
# functionality and provides no guarantees. That is the responsibility
|
9
|
+
# of the including class. This module exists solely to allow the including
|
10
|
+
# object to be interrogated for its serialization status.
|
11
|
+
#
|
12
|
+
# @example
|
13
|
+
# class Foo
|
14
|
+
# include Concurrent::SerialExecutor
|
15
|
+
# end
|
16
|
+
#
|
17
|
+
# foo = Foo.new
|
18
|
+
#
|
19
|
+
# foo.is_a? Concurrent::ExecutorService #=> true
|
20
|
+
# foo.is_a? Concurrent::SerialExecutor #=> true
|
21
|
+
# foo.serialized? #=> true
|
22
|
+
#
|
23
|
+
# @!visibility private
|
24
|
+
module SerialExecutorService
|
25
|
+
include ExecutorService
|
26
|
+
|
27
|
+
# @!macro executor_service_method_serialized_question
|
28
|
+
#
|
29
|
+
# @note Always returns `true`
|
30
|
+
def serialized?
|
31
|
+
true
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
@@ -0,0 +1,107 @@
|
|
1
|
+
require 'concurrent/errors'
|
2
|
+
require 'concurrent/concern/logging'
|
3
|
+
require 'concurrent/synchronization'
|
4
|
+
|
5
|
+
module Concurrent
|
6
|
+
|
7
|
+
# Ensures passed jobs in a serialized order never running at the same time.
|
8
|
+
class SerializedExecution < Synchronization::LockableObject
|
9
|
+
include Concern::Logging
|
10
|
+
|
11
|
+
def initialize()
|
12
|
+
super()
|
13
|
+
synchronize { ns_initialize }
|
14
|
+
end
|
15
|
+
|
16
|
+
Job = Struct.new(:executor, :args, :block) do
|
17
|
+
def call
|
18
|
+
block.call(*args)
|
19
|
+
end
|
20
|
+
end
|
21
|
+
|
22
|
+
# Submit a task to the executor for asynchronous processing.
|
23
|
+
#
|
24
|
+
# @param [Executor] executor to be used for this job
|
25
|
+
#
|
26
|
+
# @param [Array] args zero or more arguments to be passed to the task
|
27
|
+
#
|
28
|
+
# @yield the asynchronous task to perform
|
29
|
+
#
|
30
|
+
# @return [Boolean] `true` if the task is queued, `false` if the executor
|
31
|
+
# is not running
|
32
|
+
#
|
33
|
+
# @raise [ArgumentError] if no task is given
|
34
|
+
def post(executor, *args, &task)
|
35
|
+
posts [[executor, args, task]]
|
36
|
+
true
|
37
|
+
end
|
38
|
+
|
39
|
+
# As {#post} but allows to submit multiple tasks at once, it's guaranteed that they will not
|
40
|
+
# be interleaved by other tasks.
|
41
|
+
#
|
42
|
+
# @param [Array<Array(ExecutorService, Array<Object>, Proc)>] posts array of triplets where
|
43
|
+
# first is a {ExecutorService}, second is array of args for task, third is a task (Proc)
|
44
|
+
def posts(posts)
|
45
|
+
# if can_overflow?
|
46
|
+
# raise ArgumentError, 'SerializedExecution does not support thread-pools which can overflow'
|
47
|
+
# end
|
48
|
+
|
49
|
+
return nil if posts.empty?
|
50
|
+
|
51
|
+
jobs = posts.map { |executor, args, task| Job.new executor, args, task }
|
52
|
+
|
53
|
+
job_to_post = synchronize do
|
54
|
+
if @being_executed
|
55
|
+
@stash.push(*jobs)
|
56
|
+
nil
|
57
|
+
else
|
58
|
+
@being_executed = true
|
59
|
+
@stash.push(*jobs[1..-1])
|
60
|
+
jobs.first
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
call_job job_to_post if job_to_post
|
65
|
+
true
|
66
|
+
end
|
67
|
+
|
68
|
+
private
|
69
|
+
|
70
|
+
def ns_initialize
|
71
|
+
@being_executed = false
|
72
|
+
@stash = []
|
73
|
+
end
|
74
|
+
|
75
|
+
def call_job(job)
|
76
|
+
did_it_run = begin
|
77
|
+
job.executor.post { work(job) }
|
78
|
+
true
|
79
|
+
rescue RejectedExecutionError => ex
|
80
|
+
false
|
81
|
+
end
|
82
|
+
|
83
|
+
# TODO not the best idea to run it myself
|
84
|
+
unless did_it_run
|
85
|
+
begin
|
86
|
+
work job
|
87
|
+
rescue => ex
|
88
|
+
# let it fail
|
89
|
+
log DEBUG, ex
|
90
|
+
end
|
91
|
+
end
|
92
|
+
end
|
93
|
+
|
94
|
+
# ensures next job is executed if any is stashed
|
95
|
+
def work(job)
|
96
|
+
job.call
|
97
|
+
ensure
|
98
|
+
synchronize do
|
99
|
+
job = @stash.shift || (@being_executed = false)
|
100
|
+
end
|
101
|
+
|
102
|
+
# TODO maybe be able to tell caching pool to just enqueue this job, because the current one end at the end
|
103
|
+
# of this block
|
104
|
+
call_job job if job
|
105
|
+
end
|
106
|
+
end
|
107
|
+
end
|
@@ -0,0 +1,28 @@
|
|
1
|
+
require 'delegate'
|
2
|
+
require 'concurrent/executor/serial_executor_service'
|
3
|
+
require 'concurrent/executor/serialized_execution'
|
4
|
+
|
5
|
+
module Concurrent
|
6
|
+
|
7
|
+
# A wrapper/delegator for any `ExecutorService` that
|
8
|
+
# guarantees serialized execution of tasks.
|
9
|
+
#
|
10
|
+
# @see [SimpleDelegator](http://www.ruby-doc.org/stdlib-2.1.2/libdoc/delegate/rdoc/SimpleDelegator.html)
|
11
|
+
# @see Concurrent::SerializedExecution
|
12
|
+
class SerializedExecutionDelegator < SimpleDelegator
|
13
|
+
include SerialExecutorService
|
14
|
+
|
15
|
+
def initialize(executor)
|
16
|
+
@executor = executor
|
17
|
+
@serializer = SerializedExecution.new
|
18
|
+
super(executor)
|
19
|
+
end
|
20
|
+
|
21
|
+
# @!macro executor_service_method_post
|
22
|
+
def post(*args, &task)
|
23
|
+
raise ArgumentError.new('no block given') unless block_given?
|
24
|
+
return false unless running?
|
25
|
+
@serializer.post(@executor, *args, &task)
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
@@ -0,0 +1,100 @@
|
|
1
|
+
require 'concurrent/atomics'
|
2
|
+
require 'concurrent/executor/executor_service'
|
3
|
+
|
4
|
+
module Concurrent
|
5
|
+
|
6
|
+
# An executor service in which every operation spawns a new,
|
7
|
+
# independently operating thread.
|
8
|
+
#
|
9
|
+
# This is perhaps the most inefficient executor service in this
|
10
|
+
# library. It exists mainly for testing an debugging. Thread creation
|
11
|
+
# and management is expensive in Ruby and this executor performs no
|
12
|
+
# resource pooling. This can be very beneficial during testing and
|
13
|
+
# debugging because it decouples the using code from the underlying
|
14
|
+
# executor implementation. In production this executor will likely
|
15
|
+
# lead to suboptimal performance.
|
16
|
+
#
|
17
|
+
# @note Intended for use primarily in testing and debugging.
|
18
|
+
class SimpleExecutorService < RubyExecutorService
|
19
|
+
|
20
|
+
# @!macro executor_service_method_post
|
21
|
+
def self.post(*args)
|
22
|
+
raise ArgumentError.new('no block given') unless block_given?
|
23
|
+
Thread.new(*args) do
|
24
|
+
Thread.current.abort_on_exception = false
|
25
|
+
yield(*args)
|
26
|
+
end
|
27
|
+
true
|
28
|
+
end
|
29
|
+
|
30
|
+
# @!macro executor_service_method_left_shift
|
31
|
+
def self.<<(task)
|
32
|
+
post(&task)
|
33
|
+
self
|
34
|
+
end
|
35
|
+
|
36
|
+
# @!macro executor_service_method_post
|
37
|
+
def post(*args, &task)
|
38
|
+
raise ArgumentError.new('no block given') unless block_given?
|
39
|
+
return false unless running?
|
40
|
+
@count.increment
|
41
|
+
Thread.new(*args) do
|
42
|
+
Thread.current.abort_on_exception = false
|
43
|
+
begin
|
44
|
+
yield(*args)
|
45
|
+
ensure
|
46
|
+
@count.decrement
|
47
|
+
@stopped.set if @running.false? && @count.value == 0
|
48
|
+
end
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
# @!macro executor_service_method_left_shift
|
53
|
+
def <<(task)
|
54
|
+
post(&task)
|
55
|
+
self
|
56
|
+
end
|
57
|
+
|
58
|
+
# @!macro executor_service_method_running_question
|
59
|
+
def running?
|
60
|
+
@running.true?
|
61
|
+
end
|
62
|
+
|
63
|
+
# @!macro executor_service_method_shuttingdown_question
|
64
|
+
def shuttingdown?
|
65
|
+
@running.false? && ! @stopped.set?
|
66
|
+
end
|
67
|
+
|
68
|
+
# @!macro executor_service_method_shutdown_question
|
69
|
+
def shutdown?
|
70
|
+
@stopped.set?
|
71
|
+
end
|
72
|
+
|
73
|
+
# @!macro executor_service_method_shutdown
|
74
|
+
def shutdown
|
75
|
+
@running.make_false
|
76
|
+
@stopped.set if @count.value == 0
|
77
|
+
true
|
78
|
+
end
|
79
|
+
|
80
|
+
# @!macro executor_service_method_kill
|
81
|
+
def kill
|
82
|
+
@running.make_false
|
83
|
+
@stopped.set
|
84
|
+
true
|
85
|
+
end
|
86
|
+
|
87
|
+
# @!macro executor_service_method_wait_for_termination
|
88
|
+
def wait_for_termination(timeout = nil)
|
89
|
+
@stopped.wait(timeout)
|
90
|
+
end
|
91
|
+
|
92
|
+
private
|
93
|
+
|
94
|
+
def ns_initialize(*args)
|
95
|
+
@running = Concurrent::AtomicBoolean.new(true)
|
96
|
+
@stopped = Concurrent::Event.new
|
97
|
+
@count = Concurrent::AtomicFixnum.new(0)
|
98
|
+
end
|
99
|
+
end
|
100
|
+
end
|