concurrent-ruby 0.6.0.pre.1 → 0.6.0.pre.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +16 -0
- data/lib/concurrent.rb +9 -29
- data/lib/concurrent/{actor.rb → actor/actor.rb} +3 -3
- data/lib/concurrent/actor/actor_context.rb +77 -0
- data/lib/concurrent/actor/actor_ref.rb +67 -0
- data/lib/concurrent/{postable.rb → actor/postable.rb} +1 -1
- data/lib/concurrent/actor/simple_actor_ref.rb +94 -0
- data/lib/concurrent/actors.rb +5 -0
- data/lib/concurrent/agent.rb +81 -47
- data/lib/concurrent/async.rb +35 -35
- data/lib/concurrent/atomic/atomic_boolean.rb +157 -0
- data/lib/concurrent/atomic/atomic_fixnum.rb +170 -0
- data/lib/concurrent/{condition.rb → atomic/condition.rb} +0 -0
- data/lib/concurrent/{copy_on_notify_observer_set.rb → atomic/copy_on_notify_observer_set.rb} +48 -13
- data/lib/concurrent/{copy_on_write_observer_set.rb → atomic/copy_on_write_observer_set.rb} +41 -20
- data/lib/concurrent/atomic/count_down_latch.rb +116 -0
- data/lib/concurrent/atomic/cyclic_barrier.rb +106 -0
- data/lib/concurrent/atomic/event.rb +103 -0
- data/lib/concurrent/{thread_local_var.rb → atomic/thread_local_var.rb} +0 -0
- data/lib/concurrent/atomics.rb +9 -0
- data/lib/concurrent/channel/buffered_channel.rb +6 -4
- data/lib/concurrent/channel/channel.rb +30 -2
- data/lib/concurrent/channel/unbuffered_channel.rb +2 -2
- data/lib/concurrent/channel/waitable_list.rb +3 -1
- data/lib/concurrent/channels.rb +5 -0
- data/lib/concurrent/{channel → collection}/blocking_ring_buffer.rb +16 -5
- data/lib/concurrent/collection/priority_queue.rb +305 -0
- data/lib/concurrent/{channel → collection}/ring_buffer.rb +6 -1
- data/lib/concurrent/collections.rb +3 -0
- data/lib/concurrent/configuration.rb +68 -19
- data/lib/concurrent/dataflow.rb +9 -9
- data/lib/concurrent/delay.rb +21 -13
- data/lib/concurrent/dereferenceable.rb +40 -33
- data/lib/concurrent/exchanger.rb +3 -0
- data/lib/concurrent/{cached_thread_pool.rb → executor/cached_thread_pool.rb} +8 -9
- data/lib/concurrent/executor/executor.rb +222 -0
- data/lib/concurrent/{fixed_thread_pool.rb → executor/fixed_thread_pool.rb} +6 -7
- data/lib/concurrent/{immediate_executor.rb → executor/immediate_executor.rb} +5 -5
- data/lib/concurrent/executor/java_cached_thread_pool.rb +31 -0
- data/lib/concurrent/{java_fixed_thread_pool.rb → executor/java_fixed_thread_pool.rb} +7 -11
- data/lib/concurrent/executor/java_single_thread_executor.rb +21 -0
- data/lib/concurrent/{java_thread_pool_executor.rb → executor/java_thread_pool_executor.rb} +66 -77
- data/lib/concurrent/executor/one_by_one.rb +65 -0
- data/lib/concurrent/{per_thread_executor.rb → executor/per_thread_executor.rb} +4 -4
- data/lib/concurrent/executor/ruby_cached_thread_pool.rb +29 -0
- data/lib/concurrent/{ruby_fixed_thread_pool.rb → executor/ruby_fixed_thread_pool.rb} +5 -4
- data/lib/concurrent/executor/ruby_single_thread_executor.rb +72 -0
- data/lib/concurrent/executor/ruby_thread_pool_executor.rb +282 -0
- data/lib/concurrent/{ruby_thread_pool_worker.rb → executor/ruby_thread_pool_worker.rb} +6 -6
- data/lib/concurrent/{safe_task_executor.rb → executor/safe_task_executor.rb} +20 -13
- data/lib/concurrent/executor/single_thread_executor.rb +35 -0
- data/lib/concurrent/executor/thread_pool_executor.rb +68 -0
- data/lib/concurrent/executor/timer_set.rb +138 -0
- data/lib/concurrent/executors.rb +9 -0
- data/lib/concurrent/future.rb +39 -40
- data/lib/concurrent/ivar.rb +22 -15
- data/lib/concurrent/mvar.rb +2 -1
- data/lib/concurrent/obligation.rb +9 -3
- data/lib/concurrent/observable.rb +33 -0
- data/lib/concurrent/options_parser.rb +46 -0
- data/lib/concurrent/promise.rb +23 -24
- data/lib/concurrent/scheduled_task.rb +21 -45
- data/lib/concurrent/timer_task.rb +204 -126
- data/lib/concurrent/tvar.rb +1 -1
- data/lib/concurrent/utilities.rb +3 -36
- data/lib/concurrent/{processor_count.rb → utility/processor_count.rb} +1 -1
- data/lib/concurrent/utility/timeout.rb +36 -0
- data/lib/concurrent/utility/timer.rb +21 -0
- data/lib/concurrent/version.rb +1 -1
- data/lib/concurrent_ruby_ext.bundle +0 -0
- data/spec/concurrent/{actor_context_spec.rb → actor/actor_context_spec.rb} +0 -8
- data/spec/concurrent/{actor_ref_shared.rb → actor/actor_ref_shared.rb} +9 -59
- data/spec/concurrent/{actor_spec.rb → actor/actor_spec.rb} +43 -41
- data/spec/concurrent/{postable_shared.rb → actor/postable_shared.rb} +0 -0
- data/spec/concurrent/actor/simple_actor_ref_spec.rb +135 -0
- data/spec/concurrent/agent_spec.rb +160 -71
- data/spec/concurrent/atomic/atomic_boolean_spec.rb +172 -0
- data/spec/concurrent/atomic/atomic_fixnum_spec.rb +186 -0
- data/spec/concurrent/{condition_spec.rb → atomic/condition_spec.rb} +2 -2
- data/spec/concurrent/{copy_on_notify_observer_set_spec.rb → atomic/copy_on_notify_observer_set_spec.rb} +0 -0
- data/spec/concurrent/{copy_on_write_observer_set_spec.rb → atomic/copy_on_write_observer_set_spec.rb} +0 -0
- data/spec/concurrent/atomic/count_down_latch_spec.rb +151 -0
- data/spec/concurrent/atomic/cyclic_barrier_spec.rb +248 -0
- data/spec/concurrent/{event_spec.rb → atomic/event_spec.rb} +18 -3
- data/spec/concurrent/{observer_set_shared.rb → atomic/observer_set_shared.rb} +15 -6
- data/spec/concurrent/{thread_local_var_spec.rb → atomic/thread_local_var_spec.rb} +0 -0
- data/spec/concurrent/channel/buffered_channel_spec.rb +1 -1
- data/spec/concurrent/channel/channel_spec.rb +6 -4
- data/spec/concurrent/channel/probe_spec.rb +37 -9
- data/spec/concurrent/channel/unbuffered_channel_spec.rb +2 -2
- data/spec/concurrent/{channel → collection}/blocking_ring_buffer_spec.rb +0 -0
- data/spec/concurrent/collection/priority_queue_spec.rb +317 -0
- data/spec/concurrent/{channel → collection}/ring_buffer_spec.rb +0 -0
- data/spec/concurrent/configuration_spec.rb +4 -70
- data/spec/concurrent/dereferenceable_shared.rb +5 -4
- data/spec/concurrent/exchanger_spec.rb +10 -5
- data/spec/concurrent/{cached_thread_pool_shared.rb → executor/cached_thread_pool_shared.rb} +15 -37
- data/spec/concurrent/{fixed_thread_pool_shared.rb → executor/fixed_thread_pool_shared.rb} +0 -0
- data/spec/concurrent/{global_thread_pool_shared.rb → executor/global_thread_pool_shared.rb} +10 -8
- data/spec/concurrent/{immediate_executor_spec.rb → executor/immediate_executor_spec.rb} +0 -0
- data/spec/concurrent/{java_cached_thread_pool_spec.rb → executor/java_cached_thread_pool_spec.rb} +1 -21
- data/spec/concurrent/{java_fixed_thread_pool_spec.rb → executor/java_fixed_thread_pool_spec.rb} +0 -0
- data/spec/concurrent/executor/java_single_thread_executor_spec.rb +21 -0
- data/spec/concurrent/{java_thread_pool_executor_spec.rb → executor/java_thread_pool_executor_spec.rb} +0 -0
- data/spec/concurrent/{per_thread_executor_spec.rb → executor/per_thread_executor_spec.rb} +0 -4
- data/spec/concurrent/{ruby_cached_thread_pool_spec.rb → executor/ruby_cached_thread_pool_spec.rb} +1 -1
- data/spec/concurrent/{ruby_fixed_thread_pool_spec.rb → executor/ruby_fixed_thread_pool_spec.rb} +0 -0
- data/spec/concurrent/executor/ruby_single_thread_executor_spec.rb +18 -0
- data/spec/concurrent/{ruby_thread_pool_executor_spec.rb → executor/ruby_thread_pool_executor_spec.rb} +12 -24
- data/spec/concurrent/executor/safe_task_executor_spec.rb +103 -0
- data/spec/concurrent/{thread_pool_class_cast_spec.rb → executor/thread_pool_class_cast_spec.rb} +12 -0
- data/spec/concurrent/{thread_pool_executor_shared.rb → executor/thread_pool_executor_shared.rb} +0 -0
- data/spec/concurrent/{thread_pool_shared.rb → executor/thread_pool_shared.rb} +84 -119
- data/spec/concurrent/executor/timer_set_spec.rb +183 -0
- data/spec/concurrent/future_spec.rb +12 -0
- data/spec/concurrent/ivar_spec.rb +11 -1
- data/spec/concurrent/observable_shared.rb +173 -0
- data/spec/concurrent/observable_spec.rb +51 -0
- data/spec/concurrent/options_parser_spec.rb +71 -0
- data/spec/concurrent/runnable_shared.rb +6 -0
- data/spec/concurrent/scheduled_task_spec.rb +60 -40
- data/spec/concurrent/timer_task_spec.rb +130 -144
- data/spec/concurrent/{processor_count_spec.rb → utility/processor_count_spec.rb} +0 -0
- data/spec/concurrent/{utilities_spec.rb → utility/timeout_spec.rb} +0 -0
- data/spec/concurrent/utility/timer_spec.rb +52 -0
- metadata +147 -108
- data/lib/concurrent/actor_context.rb +0 -31
- data/lib/concurrent/actor_ref.rb +0 -39
- data/lib/concurrent/atomic.rb +0 -121
- data/lib/concurrent/channel/probe.rb +0 -19
- data/lib/concurrent/count_down_latch.rb +0 -60
- data/lib/concurrent/event.rb +0 -80
- data/lib/concurrent/java_cached_thread_pool.rb +0 -45
- data/lib/concurrent/ruby_cached_thread_pool.rb +0 -37
- data/lib/concurrent/ruby_thread_pool_executor.rb +0 -268
- data/lib/concurrent/simple_actor_ref.rb +0 -124
- data/lib/concurrent/thread_pool_executor.rb +0 -30
- data/spec/concurrent/atomic_spec.rb +0 -201
- data/spec/concurrent/count_down_latch_spec.rb +0 -125
- data/spec/concurrent/safe_task_executor_spec.rb +0 -58
- data/spec/concurrent/simple_actor_ref_spec.rb +0 -219
@@ -1,19 +0,0 @@
|
|
1
|
-
module Concurrent
|
2
|
-
class Probe < IVar
|
3
|
-
|
4
|
-
def initialize(value = NO_VALUE, opts = {})
|
5
|
-
super(value, opts)
|
6
|
-
end
|
7
|
-
|
8
|
-
def set_unless_assigned(value)
|
9
|
-
mutex.synchronize do
|
10
|
-
return false if [:fulfilled, :rejected].include? @state
|
11
|
-
|
12
|
-
set_state(true, value, nil)
|
13
|
-
event.set
|
14
|
-
true
|
15
|
-
end
|
16
|
-
|
17
|
-
end
|
18
|
-
end
|
19
|
-
end
|
@@ -1,60 +0,0 @@
|
|
1
|
-
module Concurrent
|
2
|
-
|
3
|
-
# A synchronization object that allows one thread to wait on multiple other threads.
|
4
|
-
# The thread that will wait creates a +CountDownLatch+ and sets the initial value
|
5
|
-
# (normally equal to the number of other threads). The initiating thread passes the
|
6
|
-
# latch to the other threads then waits for the other threads by calling the +#wait+
|
7
|
-
# method. Each of the other threads calls +#count_down+ when done with its work.
|
8
|
-
# When the latch counter reaches zero the waiting thread is unblocked and continues
|
9
|
-
# with its work. A +CountDownLatch+ can be used only once. Its value cannot be reset.
|
10
|
-
class CountDownLatch
|
11
|
-
|
12
|
-
# Create a new +CountDownLatch+ with the initial +count+.
|
13
|
-
#
|
14
|
-
# @param [Fixnum] count the initial count
|
15
|
-
#
|
16
|
-
# @raise [ArgumentError] if +count+ is not an integer or is less than zero
|
17
|
-
def initialize(count)
|
18
|
-
unless count.is_a?(Fixnum) && count >= 0
|
19
|
-
raise ArgumentError.new('count must be in integer greater than or equal zero')
|
20
|
-
end
|
21
|
-
@mutex = Mutex.new
|
22
|
-
@condition = Condition.new
|
23
|
-
@count = count
|
24
|
-
end
|
25
|
-
|
26
|
-
# Block on the latch until the counter reaches zero or until +timeout+ is reached.
|
27
|
-
#
|
28
|
-
# @param [Fixnum] timeout the number of seconds to wait for the counter or +nil+
|
29
|
-
# to block indefinitely
|
30
|
-
# @return [Boolean] +true+ if the +count+ reaches zero else false on +timeout+
|
31
|
-
def wait(timeout = nil)
|
32
|
-
@mutex.synchronize do
|
33
|
-
|
34
|
-
remaining = Condition::Result.new(timeout)
|
35
|
-
while @count > 0 && remaining.can_wait?
|
36
|
-
remaining = @condition.wait(@mutex, remaining.remaining_time)
|
37
|
-
end
|
38
|
-
|
39
|
-
@count == 0
|
40
|
-
end
|
41
|
-
end
|
42
|
-
|
43
|
-
# Signal the latch to decrement the counter. Will signal all blocked threads when
|
44
|
-
# the +count+ reaches zero.
|
45
|
-
def count_down
|
46
|
-
@mutex.synchronize do
|
47
|
-
@count -= 1 if @count > 0
|
48
|
-
@condition.broadcast if @count == 0
|
49
|
-
end
|
50
|
-
end
|
51
|
-
|
52
|
-
# The current value of the counter.
|
53
|
-
#
|
54
|
-
# @return [Fixnum] the current value of the counter
|
55
|
-
def count
|
56
|
-
@mutex.synchronize { @count }
|
57
|
-
end
|
58
|
-
|
59
|
-
end
|
60
|
-
end
|
data/lib/concurrent/event.rb
DELETED
@@ -1,80 +0,0 @@
|
|
1
|
-
require 'thread'
|
2
|
-
require 'concurrent/utilities'
|
3
|
-
require 'concurrent/condition'
|
4
|
-
|
5
|
-
module Concurrent
|
6
|
-
|
7
|
-
# Old school kernel-style event reminiscent of Win32 programming in C++.
|
8
|
-
#
|
9
|
-
# When an +Event+ is created it is in the +unset+ state. Threads can choose to
|
10
|
-
# +#wait+ on the event, blocking until released by another thread. When one
|
11
|
-
# thread wants to alert all blocking threads it calls the +#set+ method which
|
12
|
-
# will then wake up all listeners. Once an +Event+ has been set it remains set.
|
13
|
-
# New threads calling +#wait+ will return immediately. An +Event+ may be
|
14
|
-
# +#reset+ at any time once it has been set.
|
15
|
-
#
|
16
|
-
# @see http://msdn.microsoft.com/en-us/library/windows/desktop/ms682655.aspx
|
17
|
-
class Event
|
18
|
-
|
19
|
-
# Creates a new +Event+ in the unset state. Threads calling +#wait+ on the
|
20
|
-
# +Event+ will block.
|
21
|
-
def initialize
|
22
|
-
@set = false
|
23
|
-
@mutex = Mutex.new
|
24
|
-
@condition = Condition.new
|
25
|
-
end
|
26
|
-
|
27
|
-
# Is the object in the set state?
|
28
|
-
#
|
29
|
-
# @return [Boolean] indicating whether or not the +Event+ has been set
|
30
|
-
def set?
|
31
|
-
@mutex.synchronize do
|
32
|
-
@set
|
33
|
-
end
|
34
|
-
end
|
35
|
-
|
36
|
-
# Trigger the event, setting the state to +set+ and releasing all threads
|
37
|
-
# waiting on the event. Has no effect if the +Event+ has already been set.
|
38
|
-
#
|
39
|
-
# @return [Boolean] should always return +true+
|
40
|
-
def set
|
41
|
-
@mutex.synchronize do
|
42
|
-
return true if @set
|
43
|
-
@set = true
|
44
|
-
@condition.broadcast
|
45
|
-
end
|
46
|
-
|
47
|
-
true
|
48
|
-
end
|
49
|
-
|
50
|
-
# Reset a previously set event back to the +unset+ state.
|
51
|
-
# Has no effect if the +Event+ has not yet been set.
|
52
|
-
#
|
53
|
-
# @return [Boolean] should always return +true+
|
54
|
-
def reset
|
55
|
-
@mutex.synchronize do
|
56
|
-
@set = false
|
57
|
-
end
|
58
|
-
|
59
|
-
true
|
60
|
-
end
|
61
|
-
|
62
|
-
# Wait a given number of seconds for the +Event+ to be set by another
|
63
|
-
# thread. Will wait forever when no +timeout+ value is given. Returns
|
64
|
-
# immediately if the +Event+ has already been set.
|
65
|
-
#
|
66
|
-
# @return [Boolean] true if the +Event+ was set before timeout else false
|
67
|
-
def wait(timeout = nil)
|
68
|
-
@mutex.synchronize do
|
69
|
-
return true if @set
|
70
|
-
|
71
|
-
remaining = Condition::Result.new(timeout)
|
72
|
-
while !@set && remaining.can_wait?
|
73
|
-
remaining = @condition.wait(@mutex, remaining.remaining_time)
|
74
|
-
end
|
75
|
-
|
76
|
-
@set
|
77
|
-
end
|
78
|
-
end
|
79
|
-
end
|
80
|
-
end
|
@@ -1,45 +0,0 @@
|
|
1
|
-
if RUBY_PLATFORM == 'java'
|
2
|
-
|
3
|
-
require 'concurrent/java_thread_pool_executor'
|
4
|
-
|
5
|
-
module Concurrent
|
6
|
-
|
7
|
-
# @!macro cached_thread_pool
|
8
|
-
class JavaCachedThreadPool < JavaThreadPoolExecutor
|
9
|
-
|
10
|
-
# Create a new thread pool.
|
11
|
-
#
|
12
|
-
# @param [Hash] opts the options defining pool behavior.
|
13
|
-
# @option opts [Integer] :max_threads (+DEFAULT_MAX_POOL_SIZE+) maximum number
|
14
|
-
# of threads which may be created in the pool
|
15
|
-
# @option opts [Integer] :idletime (+DEFAULT_THREAD_IDLETIMEOUT+) maximum
|
16
|
-
# number of seconds a thread may be idle before it is reclaimed
|
17
|
-
# @option opts [Symbol] :overflow_policy (+:abort+) the overflow policy
|
18
|
-
#
|
19
|
-
# @raise [ArgumentError] if +max_threads+ is less than or equal to zero
|
20
|
-
# @raise [ArgumentError] if +idletime+ is less than or equal to zero
|
21
|
-
# @raise [ArgumentError] if +overflow_policy+ is not a known policy
|
22
|
-
#
|
23
|
-
# @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/Executors.html#newCachedThreadPool--
|
24
|
-
def initialize(opts = {})
|
25
|
-
max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i
|
26
|
-
idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i
|
27
|
-
@overflow_policy = opts.fetch(:overflow_policy, :abort)
|
28
|
-
@max_queue = 0
|
29
|
-
|
30
|
-
raise ArgumentError.new('idletime must be greater than zero') if idletime <= 0
|
31
|
-
raise ArgumentError.new('max_threads must be greater than zero') if max_length <= 0
|
32
|
-
raise ArgumentError.new("#{@overflow_policy} is not a valid overflow policy") unless OVERFLOW_POLICIES.keys.include?(@overflow_policy)
|
33
|
-
|
34
|
-
@executor = java.util.concurrent.ThreadPoolExecutor.new(
|
35
|
-
@max_queue, max_length,
|
36
|
-
idletime, java.util.concurrent.TimeUnit::SECONDS,
|
37
|
-
java.util.concurrent.SynchronousQueue.new,
|
38
|
-
OVERFLOW_POLICIES[@overflow_policy].new)
|
39
|
-
|
40
|
-
# without this the process may fail to exit
|
41
|
-
at_exit { self.kill }
|
42
|
-
end
|
43
|
-
end
|
44
|
-
end
|
45
|
-
end
|
@@ -1,37 +0,0 @@
|
|
1
|
-
require 'concurrent/ruby_thread_pool_executor'
|
2
|
-
|
3
|
-
module Concurrent
|
4
|
-
|
5
|
-
# @!macro cached_thread_pool
|
6
|
-
class RubyCachedThreadPool < RubyThreadPoolExecutor
|
7
|
-
|
8
|
-
# Create a new thread pool.
|
9
|
-
#
|
10
|
-
# @param [Hash] opts the options defining pool behavior.
|
11
|
-
# @option opts [Integer] :max_threads (+DEFAULT_MAX_POOL_SIZE+) maximum number
|
12
|
-
# of threads which may be created in the pool
|
13
|
-
# @option opts [Integer] :idletime (+DEFAULT_THREAD_IDLETIMEOUT+) maximum
|
14
|
-
# number of seconds a thread may be idle before it is reclaimed
|
15
|
-
#
|
16
|
-
# @raise [ArgumentError] if +max_threads+ is less than or equal to zero
|
17
|
-
# @raise [ArgumentError] if +idletime+ is less than or equal to zero
|
18
|
-
# @raise [ArgumentError] if +overflow_policy+ is not a known policy
|
19
|
-
def initialize(opts = {})
|
20
|
-
max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i
|
21
|
-
idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i
|
22
|
-
overflow_policy = opts.fetch(:overflow_policy, :abort)
|
23
|
-
|
24
|
-
raise ArgumentError.new('idletime must be greater than zero') if idletime <= 0
|
25
|
-
raise ArgumentError.new('max_threads must be greater than zero') if max_length <= 0
|
26
|
-
raise ArgumentError.new("#{overflow_policy} is not a valid overflow policy") unless OVERFLOW_POLICIES.include?(overflow_policy)
|
27
|
-
|
28
|
-
opts = opts.merge(
|
29
|
-
min_threads: 0,
|
30
|
-
max_threads: max_length,
|
31
|
-
num_threads: overflow_policy,
|
32
|
-
idletime: idletime
|
33
|
-
)
|
34
|
-
super(opts)
|
35
|
-
end
|
36
|
-
end
|
37
|
-
end
|
@@ -1,268 +0,0 @@
|
|
1
|
-
require 'thread'
|
2
|
-
|
3
|
-
require 'concurrent/event'
|
4
|
-
require 'concurrent/ruby_thread_pool_worker'
|
5
|
-
|
6
|
-
module Concurrent
|
7
|
-
|
8
|
-
RejectedExecutionError = Class.new(StandardError) unless defined? RejectedExecutionError
|
9
|
-
|
10
|
-
# @!macro thread_pool_executor
|
11
|
-
class RubyThreadPoolExecutor
|
12
|
-
|
13
|
-
# The maximum number of threads that will be created in the pool
|
14
|
-
# (unless overridden during construction).
|
15
|
-
DEFAULT_MAX_POOL_SIZE = 2**15 # 32768
|
16
|
-
|
17
|
-
# The minimum number of threads that will be created in the pool
|
18
|
-
# (unless overridden during construction).
|
19
|
-
DEFAULT_MIN_POOL_SIZE = 0
|
20
|
-
|
21
|
-
DEFAULT_MAX_QUEUE_SIZE = 0
|
22
|
-
|
23
|
-
# The maximum number of seconds a thread in the pool may remain idle before
|
24
|
-
# being reclaimed (unless overridden during construction).
|
25
|
-
DEFAULT_THREAD_IDLETIMEOUT = 60
|
26
|
-
|
27
|
-
OVERFLOW_POLICIES = [:abort, :discard, :caller_runs]
|
28
|
-
|
29
|
-
# The maximum number of threads that may be created in the pool.
|
30
|
-
attr_reader :max_length
|
31
|
-
attr_reader :min_length
|
32
|
-
|
33
|
-
attr_reader :largest_length
|
34
|
-
|
35
|
-
attr_reader :scheduled_task_count
|
36
|
-
attr_reader :completed_task_count
|
37
|
-
|
38
|
-
attr_reader :idletime
|
39
|
-
|
40
|
-
attr_reader :max_queue
|
41
|
-
|
42
|
-
attr_reader :overflow_policy
|
43
|
-
|
44
|
-
# Create a new thread pool.
|
45
|
-
#
|
46
|
-
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html
|
47
|
-
def initialize(opts = {})
|
48
|
-
@min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i
|
49
|
-
@max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i
|
50
|
-
@idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i
|
51
|
-
@max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i
|
52
|
-
@overflow_policy = opts.fetch(:overflow_policy, :abort)
|
53
|
-
|
54
|
-
raise ArgumentError.new('max_threads must be greater than zero') if @max_length <= 0
|
55
|
-
raise ArgumentError.new('min_threads cannot be less than zero') if @min_length < 0
|
56
|
-
raise ArgumentError.new("#{overflow_policy} is not a valid overflow policy") unless OVERFLOW_POLICIES.include?(@overflow_policy)
|
57
|
-
|
58
|
-
@state = :running
|
59
|
-
@pool = []
|
60
|
-
@terminator = Event.new
|
61
|
-
@queue = Queue.new
|
62
|
-
@mutex = Mutex.new
|
63
|
-
@scheduled_task_count = 0
|
64
|
-
@completed_task_count = 0
|
65
|
-
@largest_length = 0
|
66
|
-
|
67
|
-
@gc_interval = opts.fetch(:gc_interval, 1).to_i # undocumented
|
68
|
-
@last_gc_time = Time.now.to_f - [1.0, (@gc_interval * 2.0)].max
|
69
|
-
end
|
70
|
-
|
71
|
-
def length
|
72
|
-
@mutex.synchronize do
|
73
|
-
@state != :shutdown ? @pool.length : 0
|
74
|
-
end
|
75
|
-
end
|
76
|
-
alias_method :current_length, :length
|
77
|
-
|
78
|
-
def queue_length
|
79
|
-
@queue.length
|
80
|
-
end
|
81
|
-
|
82
|
-
def remaining_capacity
|
83
|
-
@mutex.synchronize { @max_queue == 0 ? -1 : @max_queue - @queue.length }
|
84
|
-
end
|
85
|
-
|
86
|
-
# Is the thread pool running?
|
87
|
-
#
|
88
|
-
# @return [Boolean] +true+ when running, +false+ when shutting down or shutdown
|
89
|
-
def running?
|
90
|
-
@mutex.synchronize { @state == :running }
|
91
|
-
end
|
92
|
-
|
93
|
-
# Returns an array with the status of each thread in the pool
|
94
|
-
#
|
95
|
-
# This method is deprecated and will be removed soon.
|
96
|
-
def status
|
97
|
-
warn '[DEPRECATED] `status` is deprecated and will be removed soon.'
|
98
|
-
@mutex.synchronize { @pool.collect { |worker| worker.status } }
|
99
|
-
end
|
100
|
-
|
101
|
-
# Is the thread pool shutdown?
|
102
|
-
#
|
103
|
-
# @return [Boolean] +true+ when shutdown, +false+ when shutting down or running
|
104
|
-
def shutdown?
|
105
|
-
@mutex.synchronize { @state != :running }
|
106
|
-
end
|
107
|
-
|
108
|
-
# Block until thread pool shutdown is complete or until +timeout+ seconds have
|
109
|
-
# passed.
|
110
|
-
#
|
111
|
-
# @note Does not initiate shutdown or termination. Either +shutdown+ or +kill+
|
112
|
-
# must be called before this method (or on another thread).
|
113
|
-
#
|
114
|
-
# @param [Integer] timeout the maximum number of seconds to wait for shutdown to complete
|
115
|
-
#
|
116
|
-
# @return [Boolean] +true+ if shutdown complete or false on +timeout+
|
117
|
-
def wait_for_termination(timeout)
|
118
|
-
return @terminator.wait(timeout.to_i)
|
119
|
-
end
|
120
|
-
|
121
|
-
# Submit a task to the thread pool for asynchronous processing.
|
122
|
-
#
|
123
|
-
# @param [Array] args zero or more arguments to be passed to the task
|
124
|
-
#
|
125
|
-
# @yield the asynchronous task to perform
|
126
|
-
#
|
127
|
-
# @return [Boolean] +true+ if the task is queued, +false+ if the thread pool
|
128
|
-
# is not running
|
129
|
-
#
|
130
|
-
# @raise [ArgumentError] if no task is given
|
131
|
-
def post(*args, &task)
|
132
|
-
raise ArgumentError.new('no block given') unless block_given?
|
133
|
-
@mutex.synchronize do
|
134
|
-
break false unless @state == :running
|
135
|
-
return handle_overflow(*args, &task) if @max_queue != 0 && @queue.length >= @max_queue
|
136
|
-
@scheduled_task_count += 1
|
137
|
-
@queue << [args, task]
|
138
|
-
if Time.now.to_f - @gc_interval >= @last_gc_time
|
139
|
-
prune_pool
|
140
|
-
@last_gc_time = Time.now.to_f
|
141
|
-
end
|
142
|
-
grow_pool
|
143
|
-
true
|
144
|
-
end
|
145
|
-
end
|
146
|
-
|
147
|
-
# Submit a task to the thread pool for asynchronous processing.
|
148
|
-
#
|
149
|
-
# @param [Proc] task the asynchronous task to perform
|
150
|
-
#
|
151
|
-
# @return [self] returns itself
|
152
|
-
def <<(task)
|
153
|
-
self.post(&task)
|
154
|
-
return self
|
155
|
-
end
|
156
|
-
|
157
|
-
# Begin an orderly shutdown. Tasks already in the queue will be executed,
|
158
|
-
# but no new tasks will be accepted. Has no additional effect if the
|
159
|
-
# thread pool is not running.
|
160
|
-
def shutdown
|
161
|
-
@mutex.synchronize do
|
162
|
-
break unless @state == :running
|
163
|
-
@queue.clear
|
164
|
-
if @pool.empty?
|
165
|
-
@state = :shutdown
|
166
|
-
@terminator.set
|
167
|
-
else
|
168
|
-
@state = :shuttingdown
|
169
|
-
@pool.length.times{ @queue << :stop }
|
170
|
-
end
|
171
|
-
end
|
172
|
-
end
|
173
|
-
|
174
|
-
# Begin an immediate shutdown. In-progress tasks will be allowed to
|
175
|
-
# complete but enqueued tasks will be dismissed and no new tasks
|
176
|
-
# will be accepted. Has no additional effect if the thread pool is
|
177
|
-
# not running.
|
178
|
-
def kill
|
179
|
-
@mutex.synchronize do
|
180
|
-
break if @state == :shutdown
|
181
|
-
@queue.clear
|
182
|
-
@state = :shutdown
|
183
|
-
drain_pool
|
184
|
-
@terminator.set
|
185
|
-
end
|
186
|
-
end
|
187
|
-
|
188
|
-
# @!visibility private
|
189
|
-
def on_end_task # :nodoc:
|
190
|
-
@mutex.synchronize do
|
191
|
-
@completed_task_count += 1 #if success
|
192
|
-
break unless @state == :running
|
193
|
-
end
|
194
|
-
end
|
195
|
-
|
196
|
-
# @!visibility private
|
197
|
-
def on_worker_exit(worker) # :nodoc:
|
198
|
-
@mutex.synchronize do
|
199
|
-
@pool.delete(worker)
|
200
|
-
if @pool.empty? && @state != :running
|
201
|
-
@state = :shutdown
|
202
|
-
@terminator.set
|
203
|
-
end
|
204
|
-
end
|
205
|
-
end
|
206
|
-
|
207
|
-
protected
|
208
|
-
|
209
|
-
# @!visibility private
|
210
|
-
def handle_overflow(*args) # :nodoc:
|
211
|
-
case @overflow_policy
|
212
|
-
when :abort
|
213
|
-
raise RejectedExecutionError
|
214
|
-
when :discard
|
215
|
-
false
|
216
|
-
when :caller_runs
|
217
|
-
begin
|
218
|
-
yield(*args)
|
219
|
-
rescue
|
220
|
-
# let it fail
|
221
|
-
end
|
222
|
-
true
|
223
|
-
end
|
224
|
-
end
|
225
|
-
|
226
|
-
# @!visibility private
|
227
|
-
def prune_pool # :nodoc:
|
228
|
-
@pool.delete_if do |worker|
|
229
|
-
worker.dead? ||
|
230
|
-
(@idletime == 0 ? false : Time.now.to_f - @idletime > worker.last_activity)
|
231
|
-
end
|
232
|
-
end
|
233
|
-
|
234
|
-
# @!visibility private
|
235
|
-
def grow_pool # :nodoc:
|
236
|
-
if @min_length > @pool.length
|
237
|
-
additional = @min_length - @pool.length
|
238
|
-
elsif @pool.length < @max_length && ! @queue.empty?
|
239
|
-
# NOTE: does not take into account idle threads
|
240
|
-
additional = 1
|
241
|
-
else
|
242
|
-
additional = 0
|
243
|
-
end
|
244
|
-
additional.times do
|
245
|
-
break if @pool.length >= @max_length
|
246
|
-
@pool << create_worker_thread
|
247
|
-
end
|
248
|
-
@largest_length = [@largest_length, @pool.length].max
|
249
|
-
end
|
250
|
-
|
251
|
-
# @!visibility private
|
252
|
-
def drain_pool # :nodoc:
|
253
|
-
@pool.each {|worker| worker.kill }
|
254
|
-
@pool.clear
|
255
|
-
end
|
256
|
-
|
257
|
-
# @!visibility private
|
258
|
-
def create_worker_thread # :nodoc:
|
259
|
-
wrkr = RubyThreadPoolWorker.new(@queue, self)
|
260
|
-
Thread.new(wrkr, self) do |worker, parent|
|
261
|
-
Thread.current.abort_on_exception = false
|
262
|
-
worker.run
|
263
|
-
parent.on_worker_exit(worker)
|
264
|
-
end
|
265
|
-
return wrkr
|
266
|
-
end
|
267
|
-
end
|
268
|
-
end
|