contender 0.1.1 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/lib/contender.rb +45 -1
- data/lib/contender/copy_on_write_array.rb +19 -0
- data/lib/contender/copy_on_write_set.rb +17 -0
- data/lib/contender/copy_on_write_structure.rb +91 -0
- data/lib/contender/countdown_latch.rb +7 -10
- data/lib/contender/counter.rb +30 -0
- data/lib/contender/direct_executor.rb +5 -2
- data/lib/contender/errors.rb +23 -4
- data/lib/contender/executor.rb +5 -4
- data/lib/contender/executor_service.rb +94 -0
- data/lib/contender/future.rb +37 -0
- data/lib/contender/future_task.rb +99 -0
- data/lib/contender/linked_queue.rb +338 -0
- data/lib/contender/pool.rb +3 -3
- data/lib/contender/pool/pool_executor.rb +697 -0
- data/lib/contender/pool/pool_worker.rb +19 -60
- data/lib/contender/pool/rejection_policy.rb +61 -0
- data/lib/contender/queue.rb +87 -0
- data/lib/contender/thread_factory.rb +37 -0
- data/lib/contender/version.rb +1 -1
- data/spec/copy_on_write_array_spec.rb +20 -0
- data/spec/copy_on_write_set_spec.rb +25 -0
- data/spec/countdown_latch_spec.rb +136 -0
- data/spec/counter_spec.rb +26 -0
- data/spec/direct_executor_spec.rb +21 -0
- data/spec/future_task_spec.rb +170 -0
- data/spec/linked_queue_spec.rb +25 -0
- data/spec/pool/executor_spec.rb +436 -0
- data/spec/pool/executor_stress_spec.rb +31 -0
- data/spec/spec_helper.rb +11 -0
- data/spec/wait_helper.rb +14 -0
- metadata +149 -22
- data/lib/contender/pool/task.rb +0 -52
- data/lib/contender/pool/task_queue.rb +0 -80
- data/lib/contender/pool/thread_pool_executor.rb +0 -167
@@ -0,0 +1,99 @@
|
|
1
|
+
module Contender
|
2
|
+
class FutureTask < Future
|
3
|
+
# @param [Object] callable
|
4
|
+
# @return [undefined]
|
5
|
+
def initialize(callable)
|
6
|
+
@callable = callable
|
7
|
+
|
8
|
+
@mutex = Mutex.new
|
9
|
+
@condition = ConditionVariable.new
|
10
|
+
|
11
|
+
@state = :ready
|
12
|
+
end
|
13
|
+
|
14
|
+
# @api public
|
15
|
+
# @param [Boolean] should_interrupt
|
16
|
+
# @return [Boolean] True if this future was cancelled
|
17
|
+
def cancel(should_interrupt)
|
18
|
+
@mutex.synchronize do
|
19
|
+
return false if done?
|
20
|
+
|
21
|
+
if @state == :running
|
22
|
+
return false unless should_interrupt
|
23
|
+
@thread.raise InterruptError
|
24
|
+
end
|
25
|
+
|
26
|
+
@state = :cancelled
|
27
|
+
@condition.broadcast
|
28
|
+
end
|
29
|
+
|
30
|
+
return true
|
31
|
+
end
|
32
|
+
|
33
|
+
# @api public
|
34
|
+
# @return [Boolean]
|
35
|
+
def cancelled?
|
36
|
+
@state == :cancelled
|
37
|
+
end
|
38
|
+
|
39
|
+
# @api public
|
40
|
+
# @return [Boolean]
|
41
|
+
def done?
|
42
|
+
@state == :ran || @state == :cancelled
|
43
|
+
end
|
44
|
+
|
45
|
+
# @api public
|
46
|
+
# @raise [ExecutionError] If the result of the operation was an exception
|
47
|
+
# @raise [TimeoutError] If the timeout was reached before the operation completed
|
48
|
+
# @raise [CancellationError] If the operation was cancelled
|
49
|
+
# @param [Integer] timeout Time to wait for the result
|
50
|
+
# @return [Object] The result of the future
|
51
|
+
def result(timeout = nil)
|
52
|
+
@mutex.synchronize do
|
53
|
+
unless done?
|
54
|
+
@condition.wait @mutex, timeout
|
55
|
+
|
56
|
+
unless done?
|
57
|
+
raise TimeoutError, 'The operation did not complete before the given timeout'
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
if @state == :cancelled
|
62
|
+
raise CancellationError, 'Task was cancelled before it could be completed'
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
if @exception
|
67
|
+
raise ExecutionError, @exception
|
68
|
+
end
|
69
|
+
|
70
|
+
@result
|
71
|
+
end
|
72
|
+
|
73
|
+
# @api public
|
74
|
+
# @return [undefined]
|
75
|
+
def call
|
76
|
+
@mutex.synchronize do
|
77
|
+
return unless @state == :ready
|
78
|
+
|
79
|
+
@state = :running
|
80
|
+
@thread = Thread.current
|
81
|
+
end
|
82
|
+
|
83
|
+
begin
|
84
|
+
@result = @callable.call
|
85
|
+
rescue => exception
|
86
|
+
@exception = exception
|
87
|
+
end
|
88
|
+
|
89
|
+
@mutex.synchronize do
|
90
|
+
return unless @state == :running
|
91
|
+
|
92
|
+
@state = :ran
|
93
|
+
@condition.broadcast
|
94
|
+
end
|
95
|
+
ensure
|
96
|
+
@thread = nil
|
97
|
+
end
|
98
|
+
end # FutureTask
|
99
|
+
end
|
@@ -0,0 +1,338 @@
|
|
1
|
+
module Contender
|
2
|
+
class LinkedQueue < Queue
|
3
|
+
def initialize(capacity = FIXNUM_MAX)
|
4
|
+
raise ArgumentError if capacity <= 0
|
5
|
+
|
6
|
+
@capacity = capacity
|
7
|
+
# Invariant: head.item == nil
|
8
|
+
# Invariant: tail.next == nil
|
9
|
+
@head = @tail = Node.new
|
10
|
+
@count = Counter.new
|
11
|
+
|
12
|
+
@take_lock = Monitor.new
|
13
|
+
@not_empty = @take_lock.new_cond
|
14
|
+
@put_lock = Monitor.new
|
15
|
+
@not_full = @put_lock.new_cond
|
16
|
+
end
|
17
|
+
|
18
|
+
# @param [Object] element
|
19
|
+
# @param [Float] timeout
|
20
|
+
# @return [Boolean]
|
21
|
+
def offer(element, timeout = nil)
|
22
|
+
validate_element element
|
23
|
+
|
24
|
+
count = 0
|
25
|
+
node = Node.new element
|
26
|
+
|
27
|
+
@put_lock.synchronize do
|
28
|
+
if size == @capacity
|
29
|
+
return false unless timeout
|
30
|
+
|
31
|
+
@not_full.wait timeout
|
32
|
+
return false if size == @capacity
|
33
|
+
end
|
34
|
+
|
35
|
+
enqueue node
|
36
|
+
count = @count.increment
|
37
|
+
if count < @capacity
|
38
|
+
@not_full.signal
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
if count > 0
|
43
|
+
signal_not_empty
|
44
|
+
end
|
45
|
+
|
46
|
+
return true
|
47
|
+
end
|
48
|
+
|
49
|
+
# @param [Object] element
|
50
|
+
# @return [undefined]
|
51
|
+
def put(element)
|
52
|
+
validate_element element
|
53
|
+
|
54
|
+
count = 0
|
55
|
+
node = Node.new element
|
56
|
+
|
57
|
+
@put_lock.synchronize do
|
58
|
+
while size == @capacity
|
59
|
+
@not_full.wait
|
60
|
+
end
|
61
|
+
|
62
|
+
enqueue node
|
63
|
+
count = @count.increment
|
64
|
+
if count < @capacity
|
65
|
+
@not_full.signal
|
66
|
+
end
|
67
|
+
end
|
68
|
+
|
69
|
+
if count > 0
|
70
|
+
signal_not_empty
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
# @param [Float] timeout
|
75
|
+
# @return [Object]
|
76
|
+
def poll(timeout = nil)
|
77
|
+
count = 0
|
78
|
+
element = nil
|
79
|
+
|
80
|
+
@take_lock.synchronize do
|
81
|
+
if size == 0
|
82
|
+
return unless timeout
|
83
|
+
|
84
|
+
@not_empty.wait timeout
|
85
|
+
return if size == 0
|
86
|
+
end
|
87
|
+
|
88
|
+
element = dequeue
|
89
|
+
count = @count.decrement
|
90
|
+
|
91
|
+
if count > 0
|
92
|
+
@not_empty.signal
|
93
|
+
end
|
94
|
+
end
|
95
|
+
|
96
|
+
if count < @capacity
|
97
|
+
signal_not_full
|
98
|
+
end
|
99
|
+
|
100
|
+
return element
|
101
|
+
end
|
102
|
+
|
103
|
+
# @return [Object]
|
104
|
+
def take
|
105
|
+
count = 0
|
106
|
+
element = nil
|
107
|
+
|
108
|
+
@take_lock.synchronize do
|
109
|
+
while size == 0
|
110
|
+
@not_empty.wait
|
111
|
+
end
|
112
|
+
|
113
|
+
element = dequeue
|
114
|
+
count = @count.decrement
|
115
|
+
|
116
|
+
if count > 0
|
117
|
+
@not_empty.signal
|
118
|
+
end
|
119
|
+
end
|
120
|
+
|
121
|
+
if count < @capacity
|
122
|
+
signal_not_full
|
123
|
+
end
|
124
|
+
|
125
|
+
return element
|
126
|
+
end
|
127
|
+
|
128
|
+
# @return [Object]
|
129
|
+
def peek
|
130
|
+
return if size == 0
|
131
|
+
|
132
|
+
@take_lock.synchronize do
|
133
|
+
first = @head.next
|
134
|
+
return first.element if first
|
135
|
+
end
|
136
|
+
end
|
137
|
+
|
138
|
+
# @return [undefined]
|
139
|
+
def clear
|
140
|
+
raise NotImplementedError
|
141
|
+
end
|
142
|
+
|
143
|
+
# @yield [Object]
|
144
|
+
# @return [undefined]
|
145
|
+
def each(&block)
|
146
|
+
to_a.each &block
|
147
|
+
end
|
148
|
+
|
149
|
+
# @param [Object] element
|
150
|
+
# @return [Boolean]
|
151
|
+
def delete(element)
|
152
|
+
return false if element.nil?
|
153
|
+
|
154
|
+
full_synchronize do
|
155
|
+
trail = @head
|
156
|
+
interior = trail.next
|
157
|
+
|
158
|
+
while interior
|
159
|
+
if element == interior.element
|
160
|
+
unlink interior, trail
|
161
|
+
return true
|
162
|
+
end
|
163
|
+
|
164
|
+
trail = interior
|
165
|
+
interior = interior.next
|
166
|
+
end
|
167
|
+
end
|
168
|
+
|
169
|
+
return false
|
170
|
+
end
|
171
|
+
|
172
|
+
# @param [Array] target
|
173
|
+
# @param [Integer] max_elements
|
174
|
+
# @return [Integer]
|
175
|
+
def drain_to(target, max_elements = FIXNUM_MAX)
|
176
|
+
should_signal = false
|
177
|
+
|
178
|
+
@take_lock.synchronize do
|
179
|
+
n = [max_elements, size].min
|
180
|
+
i = 0
|
181
|
+
|
182
|
+
head = @head
|
183
|
+
|
184
|
+
begin
|
185
|
+
while i < n
|
186
|
+
interior = head.next
|
187
|
+
target.add interior.element!
|
188
|
+
head.next = head
|
189
|
+
head = interior
|
190
|
+
|
191
|
+
i += 1
|
192
|
+
end
|
193
|
+
|
194
|
+
return n
|
195
|
+
ensure
|
196
|
+
if i > 0
|
197
|
+
@head = head
|
198
|
+
should_signal = @count.get_and_add(-i) == capacity
|
199
|
+
end
|
200
|
+
end
|
201
|
+
end
|
202
|
+
ensure
|
203
|
+
signal_not_full if should_signal
|
204
|
+
end
|
205
|
+
|
206
|
+
def include?(element)
|
207
|
+
return false if element.nil?
|
208
|
+
|
209
|
+
full_synchronize do
|
210
|
+
current = @head.next
|
211
|
+
while current
|
212
|
+
return true if element == current.element
|
213
|
+
current = current.next
|
214
|
+
end
|
215
|
+
end
|
216
|
+
|
217
|
+
return false
|
218
|
+
end
|
219
|
+
|
220
|
+
# @return [Integer]
|
221
|
+
def capacity_remaining
|
222
|
+
@capacity - @count.get
|
223
|
+
end
|
224
|
+
|
225
|
+
# @return [Integer]
|
226
|
+
def size
|
227
|
+
@count.get
|
228
|
+
end
|
229
|
+
|
230
|
+
# @return [Array]
|
231
|
+
def to_a
|
232
|
+
full_synchronize do
|
233
|
+
result = Array.new size
|
234
|
+
|
235
|
+
current = @head.next
|
236
|
+
while current
|
237
|
+
result.push current.element
|
238
|
+
current = current.next
|
239
|
+
end
|
240
|
+
|
241
|
+
result
|
242
|
+
end
|
243
|
+
end
|
244
|
+
|
245
|
+
protected
|
246
|
+
|
247
|
+
# @yield
|
248
|
+
# @return [undefined]
|
249
|
+
def full_synchronize
|
250
|
+
@take_lock.mon_enter
|
251
|
+
@put_lock.mon_enter
|
252
|
+
|
253
|
+
begin
|
254
|
+
yield
|
255
|
+
ensure
|
256
|
+
@take_lock.mon_exit
|
257
|
+
@put_lock.mon_exit
|
258
|
+
end
|
259
|
+
end
|
260
|
+
|
261
|
+
private
|
262
|
+
|
263
|
+
def validate_element(element)
|
264
|
+
if element.nil?
|
265
|
+
raise ArgumentError, 'This queue does not support nil elements'
|
266
|
+
end
|
267
|
+
end
|
268
|
+
|
269
|
+
def signal_not_empty
|
270
|
+
@take_lock.synchronize do
|
271
|
+
@not_empty.signal
|
272
|
+
end
|
273
|
+
end
|
274
|
+
|
275
|
+
def signal_not_full
|
276
|
+
@put_lock.synchronize do
|
277
|
+
@not_full.signal
|
278
|
+
end
|
279
|
+
end
|
280
|
+
|
281
|
+
# Must have full lock
|
282
|
+
def unlink(interior, trail)
|
283
|
+
# Release the reference to the element
|
284
|
+
interior.clear_element
|
285
|
+
trail.next = interior.next
|
286
|
+
|
287
|
+
if @tail == interior
|
288
|
+
@tail = trail
|
289
|
+
end
|
290
|
+
|
291
|
+
if @count.decrement < @capacity
|
292
|
+
@not_full.signal
|
293
|
+
end
|
294
|
+
end
|
295
|
+
|
296
|
+
# Must have put lock
|
297
|
+
def enqueue(node)
|
298
|
+
@tail = @tail.next = node
|
299
|
+
end
|
300
|
+
|
301
|
+
# Must have take lock
|
302
|
+
def dequeue
|
303
|
+
head = @head
|
304
|
+
first = head.next
|
305
|
+
head.next = head # help GC
|
306
|
+
|
307
|
+
@head = first
|
308
|
+
|
309
|
+
first.element!
|
310
|
+
end
|
311
|
+
|
312
|
+
# Definition of a node in the linked list
|
313
|
+
class Node
|
314
|
+
# @return [Object]
|
315
|
+
attr_accessor :element
|
316
|
+
# @return [Node]
|
317
|
+
attr_accessor :next
|
318
|
+
|
319
|
+
def initialize(element = nil)
|
320
|
+
@element = element
|
321
|
+
end
|
322
|
+
|
323
|
+
def element!
|
324
|
+
@element.tap do
|
325
|
+
@element = nil
|
326
|
+
end
|
327
|
+
end
|
328
|
+
|
329
|
+
def clear_element
|
330
|
+
@element = nil
|
331
|
+
end
|
332
|
+
|
333
|
+
def clear_next
|
334
|
+
@next = nil
|
335
|
+
end
|
336
|
+
end # Node
|
337
|
+
end # LinkedQueue
|
338
|
+
end
|
data/lib/contender/pool.rb
CHANGED
@@ -0,0 +1,697 @@
|
|
1
|
+
module Contender
|
2
|
+
module Pool
|
3
|
+
# Executor that uses a thread pool to execute tasks asynchronously
|
4
|
+
#
|
5
|
+
# Made idiomatic by Ian Unruh
|
6
|
+
# Original author is Doug Lea, from the JDK
|
7
|
+
class PoolExecutor < ExecutorService
|
8
|
+
# @return [Integer]
|
9
|
+
attr_reader :core_size
|
10
|
+
|
11
|
+
# @return [Integer]
|
12
|
+
attr_reader :maximum_size
|
13
|
+
|
14
|
+
# @return [Integer]
|
15
|
+
attr_reader :largest_size
|
16
|
+
|
17
|
+
# @return [Boolean]
|
18
|
+
attr_reader :allow_core_timeout
|
19
|
+
|
20
|
+
# @return [Float]
|
21
|
+
attr_reader :work_timeout
|
22
|
+
|
23
|
+
# @return [ThreadFactory]
|
24
|
+
attr_reader :thread_factory
|
25
|
+
|
26
|
+
# @return [Queue]
|
27
|
+
attr_reader :queue
|
28
|
+
|
29
|
+
# @return [RejectionPolicy]
|
30
|
+
attr_accessor :rejection_policy
|
31
|
+
|
32
|
+
# @raise [ArgumentError]
|
33
|
+
# If the core size is less than zero
|
34
|
+
# @raise [ArgumentError]
|
35
|
+
# If the maximum size is less than one or less than the core size
|
36
|
+
# @raise [ArgumentError]
|
37
|
+
# If the work timeout is less than zero
|
38
|
+
#
|
39
|
+
# @param [Integer] core_size
|
40
|
+
# @param [Integer] maximum_size
|
41
|
+
# @param [Float] work_timeout
|
42
|
+
# @param [Queue] queue
|
43
|
+
# Used to hold tasks that will be executed by the thread pool
|
44
|
+
# @param [ThreadFactory] thread_factory
|
45
|
+
# Used to create threads for new pool workers
|
46
|
+
#
|
47
|
+
# @return [undefined]
|
48
|
+
def initialize(core_size, maximum_size, work_timeout, queue, thread_factory)
|
49
|
+
raise ArgumentError if core_size < 0
|
50
|
+
raise ArgumentError if maximum_size <= 0 || maximum_size < core_size
|
51
|
+
raise ArgumentError if work_timeout < 0
|
52
|
+
|
53
|
+
@queue = queue
|
54
|
+
@thread_factory = thread_factory
|
55
|
+
@rejection_policy = AbortPolicy.new
|
56
|
+
|
57
|
+
@core_size = core_size
|
58
|
+
@maximum_size = maximum_size
|
59
|
+
@allow_core_timeout = false
|
60
|
+
@work_timeout = work_timeout
|
61
|
+
|
62
|
+
@control = Atomic.new(control_for(RUNNING, 0))
|
63
|
+
|
64
|
+
@monitor = Monitor.new
|
65
|
+
@termination = @monitor.new_cond
|
66
|
+
|
67
|
+
# The following instance variables are guarded by the monitor
|
68
|
+
@workers = Set.new
|
69
|
+
|
70
|
+
@largest_size = 0
|
71
|
+
@completed_task_count = 0
|
72
|
+
end
|
73
|
+
|
74
|
+
# @api public
|
75
|
+
# @raise [TaskRejectionError] If task was rejected by the thread pool
|
76
|
+
# @param [Object] task
|
77
|
+
# @return [undefined]
|
78
|
+
def execute(task = nil, &block)
|
79
|
+
task ||= block
|
80
|
+
|
81
|
+
raise ArgumentError unless task
|
82
|
+
|
83
|
+
return if try_core_worker task
|
84
|
+
|
85
|
+
# Core worker couldn't be started with the task; enqueue it instead
|
86
|
+
control = current_control
|
87
|
+
|
88
|
+
if control.state == RUNNING && @queue.offer(task)
|
89
|
+
after_task_enqueue task
|
90
|
+
return
|
91
|
+
end
|
92
|
+
|
93
|
+
# Either the pool is shutting down or the queue is full
|
94
|
+
unless add_worker false, task
|
95
|
+
reject task
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
99
|
+
# @!group State management
|
100
|
+
|
101
|
+
# @api public
|
102
|
+
# @return [undefined]
|
103
|
+
def shutdown
|
104
|
+
synchronize do
|
105
|
+
advance_state_to SHUTDOWN
|
106
|
+
interrupt_idle_workers
|
107
|
+
on_shutdown
|
108
|
+
end
|
109
|
+
|
110
|
+
try_terminate
|
111
|
+
end
|
112
|
+
|
113
|
+
# @api public
|
114
|
+
# @return [Array]
|
115
|
+
def shutdown!
|
116
|
+
tasks = Array.new
|
117
|
+
|
118
|
+
synchronize do
|
119
|
+
advance_state_to STOP
|
120
|
+
interrupt_workers
|
121
|
+
|
122
|
+
@queue.drain_to tasks
|
123
|
+
end
|
124
|
+
|
125
|
+
try_terminate
|
126
|
+
tasks
|
127
|
+
end
|
128
|
+
|
129
|
+
# @api public
|
130
|
+
# @param [Float] timeout
|
131
|
+
# @return [Boolean]
|
132
|
+
def await_termination(timeout)
|
133
|
+
synchronize do
|
134
|
+
return true if terminated?
|
135
|
+
|
136
|
+
@termination.wait timeout
|
137
|
+
terminated?
|
138
|
+
end
|
139
|
+
end
|
140
|
+
|
141
|
+
# @api public
|
142
|
+
# @return [Boolean]
|
143
|
+
def shutdown?
|
144
|
+
current_control.state != RUNNING
|
145
|
+
end
|
146
|
+
|
147
|
+
# @api public
|
148
|
+
# @return [Boolean]
|
149
|
+
def terminating?
|
150
|
+
state = current_control.state
|
151
|
+
state > RUNNING && state < TERMINATED
|
152
|
+
end
|
153
|
+
|
154
|
+
# @api public
|
155
|
+
# @return [Boolean]
|
156
|
+
def terminated?
|
157
|
+
current_control.state == TERMINATED
|
158
|
+
end
|
159
|
+
|
160
|
+
# @!endgroup
|
161
|
+
|
162
|
+
# @!group Pool size management
|
163
|
+
|
164
|
+
# @api public
|
165
|
+
# @return [Boolean]
|
166
|
+
def prestart
|
167
|
+
current_control.worker_count < @core_size && add_worker(true)
|
168
|
+
end
|
169
|
+
|
170
|
+
# @api public
|
171
|
+
# @return [Integer]
|
172
|
+
def prestart!
|
173
|
+
count = 0
|
174
|
+
|
175
|
+
while add_worker true
|
176
|
+
count += 1
|
177
|
+
end
|
178
|
+
|
179
|
+
count
|
180
|
+
end
|
181
|
+
|
182
|
+
# @api public
|
183
|
+
# @param [Integer] core_size
|
184
|
+
# @return [undefined]
|
185
|
+
def core_size=(core_size)
|
186
|
+
raise ArgumentError if core_size < 0
|
187
|
+
|
188
|
+
if @maximum_size < core_size
|
189
|
+
@maximum_size = core_size
|
190
|
+
end
|
191
|
+
|
192
|
+
delta = core_size - @core_size
|
193
|
+
@core_size = core_size
|
194
|
+
|
195
|
+
if current_control.worker_count > core_size
|
196
|
+
interrupt_idle_workers
|
197
|
+
elsif delta > 0
|
198
|
+
k = [delta, backlog].min
|
199
|
+
|
200
|
+
# Expand the core size to meet the needs of the work backlog
|
201
|
+
loop do
|
202
|
+
break unless k > 0
|
203
|
+
break unless add_worker false
|
204
|
+
|
205
|
+
k -= 1
|
206
|
+
|
207
|
+
break if @queue.empty?
|
208
|
+
end
|
209
|
+
end
|
210
|
+
end
|
211
|
+
|
212
|
+
# @api public
|
213
|
+
# @param [Integer] maximum_size
|
214
|
+
# @return [undefined]
|
215
|
+
def maximum_size=(maximum_size)
|
216
|
+
raise ArgumentError if maximum_size <= 0 || maximum_size < @core_size
|
217
|
+
|
218
|
+
@maximum_size = maximum_size
|
219
|
+
|
220
|
+
if current_control.worker_count > maximum_size
|
221
|
+
interrupt_idle_workers
|
222
|
+
end
|
223
|
+
end
|
224
|
+
|
225
|
+
# @api public
|
226
|
+
# @param [Boolean] allow_core_timeout
|
227
|
+
# @return [undefined]
|
228
|
+
def allow_core_timeout=(allow_core_timeout)
|
229
|
+
raise ArgumentError if allow_core_timeout && @work_timeout <= 0
|
230
|
+
|
231
|
+
return if allow_core_timeout == @allow_core_timeout
|
232
|
+
|
233
|
+
@allow_core_timeout = allow_core_timeout
|
234
|
+
|
235
|
+
if allow_core_timeout
|
236
|
+
interrupt_idle_workers
|
237
|
+
end
|
238
|
+
end
|
239
|
+
|
240
|
+
# @api public
|
241
|
+
# @param [Float] work_timeout
|
242
|
+
# @return [undefined]
|
243
|
+
def work_timeout=(work_timeout)
|
244
|
+
raise ArgumentError if work_timeout < 0
|
245
|
+
raise ArgumentError if work_timeout == 0 && @allow_core_timeout
|
246
|
+
|
247
|
+
delta = work_timeout - @work_timeout
|
248
|
+
@work_timeout = work_timeout
|
249
|
+
|
250
|
+
if delta < 0
|
251
|
+
interrupt_idle_workers
|
252
|
+
end
|
253
|
+
end
|
254
|
+
|
255
|
+
# @!endgroup
|
256
|
+
|
257
|
+
# @!group Work queue management
|
258
|
+
|
259
|
+
# @api public
|
260
|
+
# @param [Object] task
|
261
|
+
# @return [Boolean]
|
262
|
+
def remove(task)
|
263
|
+
removed = @queue.delete task
|
264
|
+
try_terminate
|
265
|
+
removed
|
266
|
+
end
|
267
|
+
|
268
|
+
# @!endgroup
|
269
|
+
|
270
|
+
# @!group Statistics
|
271
|
+
|
272
|
+
# @api public
|
273
|
+
# @return [Integer]
|
274
|
+
def backlog
|
275
|
+
@queue.size
|
276
|
+
end
|
277
|
+
|
278
|
+
# @api public
|
279
|
+
# @return [Integer]
|
280
|
+
def current_size
|
281
|
+
synchronize do
|
282
|
+
return 0 if current_control.state > STOP
|
283
|
+
return @workers.size
|
284
|
+
end
|
285
|
+
end
|
286
|
+
|
287
|
+
# @api public
|
288
|
+
# @return [Integer]
|
289
|
+
def active_count
|
290
|
+
synchronize do
|
291
|
+
@workers.count do |worker|
|
292
|
+
worker.locked?
|
293
|
+
end
|
294
|
+
end
|
295
|
+
end
|
296
|
+
|
297
|
+
# @api public
|
298
|
+
# @return [Integer]
|
299
|
+
def total_tasks
|
300
|
+
completed_tasks + backlog
|
301
|
+
end
|
302
|
+
|
303
|
+
# @api public
|
304
|
+
# @return [Integer]
|
305
|
+
def completed_tasks
|
306
|
+
synchronize do
|
307
|
+
total = @completed_task_count
|
308
|
+
|
309
|
+
@workers.each do |worker|
|
310
|
+
total += worker.completed_task_count
|
311
|
+
end
|
312
|
+
|
313
|
+
total
|
314
|
+
end
|
315
|
+
end
|
316
|
+
|
317
|
+
# @!endgroup
|
318
|
+
|
319
|
+
# @api public
|
320
|
+
# @return [String]
|
321
|
+
def to_s
|
322
|
+
state = current_control.state
|
323
|
+
|
324
|
+
state_text = "Running" if state == RUNNING
|
325
|
+
state_text = "Shutting down" if state > RUNNING && state < TERMINATED
|
326
|
+
state_text = "Terminated" if state == TERMINATED
|
327
|
+
|
328
|
+
"{#{state_text}" +
|
329
|
+
", pool size = #{current_size}" +
|
330
|
+
", active threads = #{active_count}" +
|
331
|
+
", queued tasks = #{backlog}" +
|
332
|
+
", completed tasks = #{completed_tasks}" +
|
333
|
+
"}"
|
334
|
+
end
|
335
|
+
|
336
|
+
protected
|
337
|
+
|
338
|
+
# @param [Object] task
|
339
|
+
# @param [Thread] thread
|
340
|
+
# @return [undefined]
|
341
|
+
def before_execution(task, thread); end
|
342
|
+
|
343
|
+
# @param [Object] task
|
344
|
+
# @param [Exception] exception
|
345
|
+
# @return [undefined]
|
346
|
+
def after_execution(task, exception); end
|
347
|
+
|
348
|
+
# @return [undefined]
|
349
|
+
def on_shutdown; end
|
350
|
+
|
351
|
+
# @return [undefined]
|
352
|
+
def on_termination; end
|
353
|
+
|
354
|
+
# @param [Object] task
|
355
|
+
# @return [undefined]
|
356
|
+
def reject(task)
|
357
|
+
@rejection_policy.on_rejection task, self
|
358
|
+
end
|
359
|
+
|
360
|
+
# @return [undefined]
|
361
|
+
def try_terminate
|
362
|
+
loop do
|
363
|
+
control = current_control
|
364
|
+
|
365
|
+
state = control.state
|
366
|
+
worker_count = control.worker_count
|
367
|
+
|
368
|
+
return if state == RUNNING || state > STOP
|
369
|
+
return if state == SHUTDOWN && @queue.size > 0
|
370
|
+
|
371
|
+
if worker_count > 0
|
372
|
+
# Eligible to terminate, but there are still workers left
|
373
|
+
interrupt_idle_workers true
|
374
|
+
return
|
375
|
+
end
|
376
|
+
|
377
|
+
break if @control.compare_and_swap(control, control_for(TIDYING, 0))
|
378
|
+
end
|
379
|
+
|
380
|
+
terminate
|
381
|
+
end
|
382
|
+
|
383
|
+
# @return [PoolControl]
|
384
|
+
def current_control
|
385
|
+
@control.value
|
386
|
+
end
|
387
|
+
|
388
|
+
private
|
389
|
+
|
390
|
+
RUNNING = 0
|
391
|
+
SHUTDOWN = 1
|
392
|
+
STOP = 2
|
393
|
+
TIDYING = 3
|
394
|
+
TERMINATED = 4
|
395
|
+
|
396
|
+
# @param [Integer] state
|
397
|
+
# @param [Integer] worker_count
|
398
|
+
# @return [PoolControl]
|
399
|
+
def control_for(state, worker_count)
|
400
|
+
PoolControl.new state, worker_count
|
401
|
+
end
|
402
|
+
|
403
|
+
# @param [Integer] state
|
404
|
+
# @return [undefined]
|
405
|
+
def advance_state_to(state)
|
406
|
+
loop do
|
407
|
+
ctl = current_control
|
408
|
+
return if ctl.state >= state ||
|
409
|
+
@control.compare_and_swap(ctl, control_for(state, ctl.worker_count))
|
410
|
+
end
|
411
|
+
end
|
412
|
+
|
413
|
+
# @param [PoolControl] exp
|
414
|
+
# @return [Boolean]
|
415
|
+
def compare_and_increment_worker_count(exp)
|
416
|
+
@control.compare_and_swap(exp, control_for(exp.state, exp.worker_count + 1))
|
417
|
+
end
|
418
|
+
|
419
|
+
# @param [PoolControl] exp
|
420
|
+
# @return [Boolean]
|
421
|
+
def compare_and_decrement_worker_count(exp)
|
422
|
+
@control.compare_and_swap(exp, control_for(exp.state, exp.worker_count - 1))
|
423
|
+
end
|
424
|
+
|
425
|
+
# @return [undefined]
|
426
|
+
def decrement_worker_count
|
427
|
+
loop do
|
428
|
+
return if compare_and_decrement_worker_count current_control
|
429
|
+
end
|
430
|
+
end
|
431
|
+
|
432
|
+
# @yield
|
433
|
+
# @return [undefined]
|
434
|
+
def synchronize(&block)
|
435
|
+
@monitor.synchronize &block
|
436
|
+
end
|
437
|
+
|
438
|
+
# @return [undefined]
|
439
|
+
def interrupt_workers
|
440
|
+
synchronize do
|
441
|
+
@workers.each do |worker|
|
442
|
+
worker.interrupt
|
443
|
+
end
|
444
|
+
end
|
445
|
+
end
|
446
|
+
|
447
|
+
# @param [Boolean] single
|
448
|
+
# @return [undefined]
|
449
|
+
def interrupt_idle_workers(single = false)
|
450
|
+
synchronize do
|
451
|
+
@workers.each do |worker|
|
452
|
+
if worker.try_lock
|
453
|
+
worker.interrupt
|
454
|
+
worker.unlock
|
455
|
+
end
|
456
|
+
|
457
|
+
break if single
|
458
|
+
end
|
459
|
+
end
|
460
|
+
end
|
461
|
+
|
462
|
+
# @return [undefined]
|
463
|
+
def terminate
|
464
|
+
synchronize do
|
465
|
+
begin
|
466
|
+
on_termination
|
467
|
+
ensure
|
468
|
+
@control.value = control_for(TERMINATED, 0)
|
469
|
+
@termination.broadcast
|
470
|
+
end
|
471
|
+
end
|
472
|
+
end
|
473
|
+
|
474
|
+
# @param [Object] first_task
|
475
|
+
# @return [Boolean]
|
476
|
+
def try_core_worker(first_task)
|
477
|
+
if current_control.worker_count < @core_size
|
478
|
+
add_worker true, first_task
|
479
|
+
end
|
480
|
+
end
|
481
|
+
|
482
|
+
# @param [Object] task
|
483
|
+
# @return [undefined]
|
484
|
+
def after_task_enqueue(task)
|
485
|
+
control = current_control
|
486
|
+
if control.state != RUNNING && remove(task)
|
487
|
+
# The pool state changed after the task got enqueued, but the task was able to be
|
488
|
+
# removed from the work queue before it could be processed
|
489
|
+
reject task
|
490
|
+
elsif control.worker_count == 0
|
491
|
+
# There are no workers to process the task, add one
|
492
|
+
add_worker false
|
493
|
+
end
|
494
|
+
end
|
495
|
+
|
496
|
+
# @param [Boolean] core
|
497
|
+
# @param [Object] first_task
|
498
|
+
# @return [Boolean]
|
499
|
+
def add_worker(core, first_task = nil)
|
500
|
+
unless add_worker? core, first_task
|
501
|
+
return false
|
502
|
+
end
|
503
|
+
|
504
|
+
worker = PoolWorker.new thread_factory, first_task
|
505
|
+
|
506
|
+
synchronize do
|
507
|
+
control = current_control
|
508
|
+
state = control.state
|
509
|
+
|
510
|
+
if state >= SHUTDOWN && !(state == SHUTDOWN && first_task.nil?)
|
511
|
+
decrement_worker_count
|
512
|
+
try_terminate
|
513
|
+
|
514
|
+
return false
|
515
|
+
end
|
516
|
+
|
517
|
+
@workers.add worker
|
518
|
+
|
519
|
+
count = @workers.size
|
520
|
+
@largest_size = count if count > @largest_size
|
521
|
+
end
|
522
|
+
|
523
|
+
worker.start do
|
524
|
+
run_worker worker
|
525
|
+
end
|
526
|
+
|
527
|
+
return true
|
528
|
+
end
|
529
|
+
|
530
|
+
def add_worker?(core, first_task)
|
531
|
+
loop do
|
532
|
+
control = current_control
|
533
|
+
state = control.state
|
534
|
+
|
535
|
+
if state == SHUTDOWN
|
536
|
+
return false unless first_task || @queue.size > 0
|
537
|
+
elsif state > SHUTDOWN
|
538
|
+
return false
|
539
|
+
end
|
540
|
+
|
541
|
+
loop do
|
542
|
+
worker_count = control.worker_count
|
543
|
+
|
544
|
+
maximum = @core_size if core
|
545
|
+
maximum ||= @maximum_size
|
546
|
+
|
547
|
+
return false unless worker_count < maximum
|
548
|
+
|
549
|
+
# CAS was successful, worker can be started
|
550
|
+
return true if compare_and_increment_worker_count control
|
551
|
+
|
552
|
+
# Pool state changed, retry the entire loop
|
553
|
+
break unless current_control.state == state
|
554
|
+
|
555
|
+
# Worker count changed, retry inter loop
|
556
|
+
control = current_control
|
557
|
+
end
|
558
|
+
end
|
559
|
+
end
|
560
|
+
|
561
|
+
# @param [PoolWorker] worker
|
562
|
+
# @return [undefined]
|
563
|
+
def run_worker(worker)
|
564
|
+
task = worker.first_task!
|
565
|
+
|
566
|
+
clean_exit = false
|
567
|
+
|
568
|
+
loop do
|
569
|
+
task ||= next_task
|
570
|
+
break unless task
|
571
|
+
|
572
|
+
worker.lock
|
573
|
+
begin
|
574
|
+
before_execution task, worker.thread
|
575
|
+
exception = nil
|
576
|
+
|
577
|
+
begin
|
578
|
+
task.call
|
579
|
+
rescue Exception
|
580
|
+
exception = $!
|
581
|
+
raise
|
582
|
+
ensure
|
583
|
+
after_execution task, exception
|
584
|
+
end
|
585
|
+
ensure
|
586
|
+
task = nil
|
587
|
+
|
588
|
+
worker.on_task_completion
|
589
|
+
worker.unlock
|
590
|
+
end
|
591
|
+
end
|
592
|
+
|
593
|
+
clean_exit = true
|
594
|
+
ensure
|
595
|
+
process_worker_exit worker, clean_exit
|
596
|
+
end
|
597
|
+
|
598
|
+
# @return [Object]
|
599
|
+
def next_task
|
600
|
+
timed_out = false
|
601
|
+
|
602
|
+
loop do
|
603
|
+
failed = false
|
604
|
+
|
605
|
+
control = current_control
|
606
|
+
state = control.state
|
607
|
+
|
608
|
+
if state > SHUTDOWN || (state == SHUTDOWN && @queue.empty?)
|
609
|
+
decrement_worker_count
|
610
|
+
return
|
611
|
+
end
|
612
|
+
|
613
|
+
timed = false
|
614
|
+
|
615
|
+
loop do
|
616
|
+
worker_count = control.worker_count
|
617
|
+
timed = @allow_core_timeout || worker_count > @core_size
|
618
|
+
|
619
|
+
if worker_count <= @maximum_size
|
620
|
+
break unless timed && timed_out
|
621
|
+
end
|
622
|
+
|
623
|
+
# Pool has either gone over its maximum size OR the worker thread has timed out
|
624
|
+
# Now attempt to just quietly remove this worker
|
625
|
+
return if compare_and_decrement_worker_count control
|
626
|
+
|
627
|
+
# The compare and swap of the worker count failed
|
628
|
+
unless current_control.state == state
|
629
|
+
# The compare and swap operation failed because the pool state changed
|
630
|
+
# Start completely over so that the pool state is checked
|
631
|
+
failed = true
|
632
|
+
break
|
633
|
+
end
|
634
|
+
|
635
|
+
# The compare and swap operation failed because the worker count changed
|
636
|
+
# Start this inner loop over and ensure that this worker is still eligible for culling
|
637
|
+
|
638
|
+
control = current_control
|
639
|
+
end
|
640
|
+
|
641
|
+
next if failed
|
642
|
+
|
643
|
+
begin
|
644
|
+
if timed
|
645
|
+
task = @queue.poll @work_timeout
|
646
|
+
else
|
647
|
+
task = @queue.take
|
648
|
+
end
|
649
|
+
|
650
|
+
return task if task
|
651
|
+
|
652
|
+
timed_out = true
|
653
|
+
rescue Interrupt
|
654
|
+
# Something interrupted the worker, this may signal the need to reduce the size of the
|
655
|
+
# pool or prepare for termination
|
656
|
+
timed_out = false
|
657
|
+
end
|
658
|
+
end
|
659
|
+
end
|
660
|
+
|
661
|
+
# @param [PoolWorker] worker
|
662
|
+
# @param [Boolean] clean_exit
|
663
|
+
# @return [undefined]
|
664
|
+
def process_worker_exit(worker, clean_exit)
|
665
|
+
unless clean_exit
|
666
|
+
# If worker did not exit cleanly, then the worker count was not adjusted
|
667
|
+
decrement_worker_count
|
668
|
+
end
|
669
|
+
|
670
|
+
synchronize do
|
671
|
+
@completed_task_count += worker.completed_task_count
|
672
|
+
@workers.delete worker
|
673
|
+
end
|
674
|
+
|
675
|
+
try_terminate
|
676
|
+
|
677
|
+
control = current_control
|
678
|
+
if control.state < STOP
|
679
|
+
if clean_exit
|
680
|
+
# Some action caused the pool size to change
|
681
|
+
minimum = (@allow_core_timeout && 0) || @core_size
|
682
|
+
minimum = 1 if minimum == 0 && @queue.size > 0
|
683
|
+
|
684
|
+
# No need to replace the dying worker with a new thread
|
685
|
+
return unless control.worker_count < minimum
|
686
|
+
end
|
687
|
+
|
688
|
+
# Dying worker will be replaced with a new one
|
689
|
+
add_worker false
|
690
|
+
end
|
691
|
+
end
|
692
|
+
end
|
693
|
+
|
694
|
+
# Used in atomic operations relating to the state of the pool
|
695
|
+
PoolControl = Struct.new :state, :worker_count
|
696
|
+
end
|
697
|
+
end
|