concurrent-ruby 1.0.0.pre1 → 1.0.0.pre2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +14 -1
- data/README.md +16 -18
- data/lib/concurrent.rb +3 -3
- data/lib/concurrent/agent.rb +583 -0
- data/lib/concurrent/array.rb +1 -0
- data/lib/concurrent/async.rb +236 -111
- data/lib/concurrent/atom.rb +101 -46
- data/lib/concurrent/atomic/atomic_boolean.rb +2 -0
- data/lib/concurrent/atomic/atomic_fixnum.rb +2 -0
- data/lib/concurrent/atomic/cyclic_barrier.rb +1 -1
- data/lib/concurrent/atomic/event.rb +1 -1
- data/lib/concurrent/atomic/mutex_atomic_boolean.rb +1 -1
- data/lib/concurrent/atomic/mutex_atomic_fixnum.rb +1 -1
- data/lib/concurrent/atomic/mutex_count_down_latch.rb +1 -1
- data/lib/concurrent/atomic/mutex_semaphore.rb +2 -2
- data/lib/concurrent/atomic/read_write_lock.rb +5 -4
- data/lib/concurrent/atomic/reentrant_read_write_lock.rb +3 -1
- data/lib/concurrent/atomic/thread_local_var.rb +2 -0
- data/lib/concurrent/atomic_reference/mutex_atomic.rb +1 -1
- data/lib/concurrent/atomics.rb +6 -4
- data/lib/concurrent/collection/copy_on_notify_observer_set.rb +7 -15
- data/lib/concurrent/collection/copy_on_write_observer_set.rb +7 -15
- data/lib/concurrent/collection/map/atomic_reference_map_backend.rb +5 -0
- data/lib/concurrent/concern/observable.rb +38 -13
- data/lib/concurrent/configuration.rb +5 -4
- data/lib/concurrent/delay.rb +9 -8
- data/lib/concurrent/exchanger.rb +2 -0
- data/lib/concurrent/executor/abstract_executor_service.rb +2 -2
- data/lib/concurrent/executor/java_single_thread_executor.rb +0 -1
- data/lib/concurrent/executor/ruby_executor_service.rb +10 -4
- data/lib/concurrent/executor/ruby_single_thread_executor.rb +10 -68
- data/lib/concurrent/executor/safe_task_executor.rb +7 -8
- data/lib/concurrent/executor/serialized_execution.rb +4 -4
- data/lib/concurrent/executor/single_thread_executor.rb +20 -10
- data/lib/concurrent/executor/timer_set.rb +4 -2
- data/lib/concurrent/executors.rb +0 -1
- data/lib/concurrent/future.rb +3 -2
- data/lib/concurrent/hash.rb +1 -1
- data/lib/concurrent/immutable_struct.rb +5 -1
- data/lib/concurrent/ivar.rb +1 -1
- data/lib/concurrent/mutable_struct.rb +7 -6
- data/lib/concurrent/{executor/executor.rb → options.rb} +4 -3
- data/lib/concurrent/promise.rb +3 -2
- data/lib/concurrent/scheduled_task.rb +3 -2
- data/lib/concurrent/settable_struct.rb +5 -4
- data/lib/concurrent/synchronization.rb +11 -3
- data/lib/concurrent/synchronization/abstract_lockable_object.rb +117 -0
- data/lib/concurrent/synchronization/abstract_object.rb +16 -129
- data/lib/concurrent/synchronization/abstract_struct.rb +2 -3
- data/lib/concurrent/synchronization/condition.rb +6 -4
- data/lib/concurrent/synchronization/jruby_lockable_object.rb +13 -0
- data/lib/concurrent/synchronization/{java_object.rb → jruby_object.rb} +5 -3
- data/lib/concurrent/synchronization/lock.rb +3 -2
- data/lib/concurrent/synchronization/lockable_object.rb +59 -0
- data/lib/concurrent/synchronization/mri_lockable_object.rb +71 -0
- data/lib/concurrent/synchronization/mri_object.rb +35 -0
- data/lib/concurrent/synchronization/object.rb +111 -39
- data/lib/concurrent/synchronization/rbx_lockable_object.rb +64 -0
- data/lib/concurrent/synchronization/rbx_object.rb +17 -68
- data/lib/concurrent/thread_safe/util.rb +0 -9
- data/lib/concurrent/thread_safe/util/adder.rb +3 -0
- data/lib/concurrent/thread_safe/util/array_hash_rbx.rb +3 -1
- data/lib/concurrent/thread_safe/util/cheap_lockable.rb +3 -0
- data/lib/concurrent/thread_safe/util/power_of_two_tuple.rb +1 -0
- data/lib/concurrent/thread_safe/util/striped64.rb +6 -1
- data/lib/concurrent/thread_safe/util/volatile.rb +2 -0
- data/lib/concurrent/thread_safe/util/xor_shift_random.rb +2 -0
- data/lib/concurrent/tvar.rb +36 -0
- data/lib/concurrent/utility/at_exit.rb +1 -1
- data/lib/concurrent/utility/monotonic_time.rb +3 -4
- data/lib/concurrent/utility/native_extension_loader.rb +1 -1
- data/lib/concurrent/version.rb +2 -2
- metadata +12 -7
- data/lib/concurrent/synchronization/monitor_object.rb +0 -27
- data/lib/concurrent/synchronization/mutex_object.rb +0 -43
@@ -1,4 +1,9 @@
|
|
1
1
|
require 'concurrent/thread_safe/util'
|
2
|
+
require 'concurrent/thread_safe/util/adder'
|
3
|
+
require 'concurrent/thread_safe/util/cheap_lockable'
|
4
|
+
require 'concurrent/thread_safe/util/power_of_two_tuple'
|
5
|
+
require 'concurrent/thread_safe/util/volatile'
|
6
|
+
require 'concurrent/thread_safe/util/xor_shift_random'
|
2
7
|
|
3
8
|
module Concurrent
|
4
9
|
|
@@ -21,8 +21,8 @@ module Concurrent
|
|
21
21
|
#
|
22
22
|
# In a multi threaded environment things are more complex. The `subject` must
|
23
23
|
# synchronize the access to its data structure and to do so currently we're
|
24
|
-
# using two specialized ObserverSet: CopyOnWriteObserverSet
|
25
|
-
# CopyOnNotifyObserverSet.
|
24
|
+
# using two specialized ObserverSet: {Concurrent::Concern::CopyOnWriteObserverSet}
|
25
|
+
# and {Concurrent::Concern::CopyOnNotifyObserverSet}.
|
26
26
|
#
|
27
27
|
# When implementing and `observer` there's a very important rule to remember:
|
28
28
|
# **there are no guarantees about the thread that will execute the callback**
|
@@ -49,30 +49,55 @@ module Concurrent
|
|
49
49
|
# or an AtomicFixum)
|
50
50
|
module Observable
|
51
51
|
|
52
|
-
#
|
53
|
-
|
54
|
-
|
52
|
+
# @!macro [attach] observable_add_observer
|
53
|
+
#
|
54
|
+
# Adds an observer to this set. If a block is passed, the observer will be
|
55
|
+
# created by this method and no other params should be passed.
|
56
|
+
#
|
57
|
+
# @param [Object] observer the observer to add
|
58
|
+
# @param [Symbol] func the function to call on the observer during notification.
|
59
|
+
# Default is :update
|
60
|
+
# @return [Object] the added observer
|
61
|
+
def add_observer(observer = nil, func = :update, &block)
|
62
|
+
observers.add_observer(observer, func, &block)
|
55
63
|
end
|
56
64
|
|
57
|
-
#
|
65
|
+
# As `#add_observer` but can be used for chaining.
|
66
|
+
#
|
67
|
+
# @param [Object] observer the observer to add
|
68
|
+
# @param [Symbol] func the function to call on the observer during notification.
|
58
69
|
# @return [Observable] self
|
59
|
-
def with_observer(
|
60
|
-
add_observer(
|
70
|
+
def with_observer(observer = nil, func = :update, &block)
|
71
|
+
add_observer(observer, func, &block)
|
61
72
|
self
|
62
73
|
end
|
63
74
|
|
64
|
-
#
|
65
|
-
|
66
|
-
|
75
|
+
# @!macro [attach] observable_delete_observer
|
76
|
+
#
|
77
|
+
# Remove `observer` as an observer on this object so that it will no
|
78
|
+
# longer receive notifications.
|
79
|
+
#
|
80
|
+
# @param [Object] observer the observer to remove
|
81
|
+
# @return [Object] the deleted observer
|
82
|
+
def delete_observer(observer)
|
83
|
+
observers.delete_observer(observer)
|
67
84
|
end
|
68
85
|
|
69
|
-
#
|
86
|
+
# @!macro [attach] observable_delete_observers
|
87
|
+
#
|
88
|
+
# Remove all observers associated with this object.
|
89
|
+
#
|
90
|
+
# @return [Observable] self
|
70
91
|
def delete_observers
|
71
92
|
observers.delete_observers
|
72
93
|
self
|
73
94
|
end
|
74
95
|
|
75
|
-
#
|
96
|
+
# @!macro [attach] observable_count_observers
|
97
|
+
#
|
98
|
+
# Return the number of observers associated with this object.
|
99
|
+
#
|
100
|
+
# @return [Integer] the observers count
|
76
101
|
def count_observers
|
77
102
|
observers.count_observers
|
78
103
|
end
|
@@ -3,16 +3,17 @@ require 'concurrent/delay'
|
|
3
3
|
require 'concurrent/errors'
|
4
4
|
require 'concurrent/atomic/atomic_reference'
|
5
5
|
require 'concurrent/concern/logging'
|
6
|
-
require 'concurrent/executor/timer_set'
|
7
6
|
require 'concurrent/executor/immediate_executor'
|
8
|
-
require 'concurrent/executor/fixed_thread_pool'
|
9
|
-
require 'concurrent/executor/thread_pool_executor'
|
10
7
|
require 'concurrent/utility/at_exit'
|
11
8
|
require 'concurrent/utility/processor_counter'
|
12
9
|
|
13
10
|
module Concurrent
|
14
11
|
extend Concern::Logging
|
15
12
|
|
13
|
+
autoload :Options, 'concurrent/options'
|
14
|
+
autoload :TimerSet, 'concurrent/executor/timer_set'
|
15
|
+
autoload :ThreadPoolExecutor, 'concurrent/executor/thread_pool_executor'
|
16
|
+
|
16
17
|
# @return [Logger] Logger with provided level and output.
|
17
18
|
def self.create_stdlib_logger(level = Logger::FATAL, output = $stderr)
|
18
19
|
logger = Logger.new(output)
|
@@ -125,7 +126,7 @@ module Concurrent
|
|
125
126
|
# - :immediate - {Concurrent.global_immediate_executor}
|
126
127
|
# @return [Executor]
|
127
128
|
def self.executor(executor_identifier)
|
128
|
-
|
129
|
+
Options.executor(executor_identifier)
|
129
130
|
end
|
130
131
|
|
131
132
|
def self.new_fast_executor(opts = {})
|
data/lib/concurrent/delay.rb
CHANGED
@@ -1,12 +1,12 @@
|
|
1
1
|
require 'thread'
|
2
|
-
require 'concurrent/configuration'
|
3
2
|
require 'concurrent/concern/obligation'
|
4
|
-
require 'concurrent/executor/executor'
|
5
3
|
require 'concurrent/executor/immediate_executor'
|
6
4
|
require 'concurrent/synchronization'
|
7
5
|
|
8
6
|
module Concurrent
|
9
7
|
|
8
|
+
autoload :Options, 'concurrent/options'
|
9
|
+
|
10
10
|
# Lazy evaluation of a block yielding an immutable result. Useful for
|
11
11
|
# expensive operations that may never be needed. It may be non-blocking,
|
12
12
|
# supports the `Concern::Obligation` interface, and accepts the injection of
|
@@ -40,7 +40,7 @@ module Concurrent
|
|
40
40
|
# execute on the given executor, allowing the call to timeout.
|
41
41
|
#
|
42
42
|
# @see Concurrent::Concern::Dereferenceable
|
43
|
-
class Delay < Synchronization::
|
43
|
+
class Delay < Synchronization::LockableObject
|
44
44
|
include Concern::Obligation
|
45
45
|
|
46
46
|
# NOTE: Because the global thread pools are lazy-loaded with these objects
|
@@ -74,7 +74,7 @@ module Concurrent
|
|
74
74
|
#
|
75
75
|
# @!macro delay_note_regarding_blocking
|
76
76
|
def value(timeout = nil)
|
77
|
-
if @
|
77
|
+
if @executor # TODO (pitr 12-Sep-2015): broken unsafe read?
|
78
78
|
super
|
79
79
|
else
|
80
80
|
# this function has been optimized for performance and
|
@@ -108,7 +108,7 @@ module Concurrent
|
|
108
108
|
#
|
109
109
|
# @!macro delay_note_regarding_blocking
|
110
110
|
def value!(timeout = nil)
|
111
|
-
if @
|
111
|
+
if @executor
|
112
112
|
super
|
113
113
|
else
|
114
114
|
result = value
|
@@ -127,7 +127,7 @@ module Concurrent
|
|
127
127
|
#
|
128
128
|
# @!macro delay_note_regarding_blocking
|
129
129
|
def wait(timeout = nil)
|
130
|
-
if @
|
130
|
+
if @executor
|
131
131
|
execute_task_once
|
132
132
|
super(timeout)
|
133
133
|
else
|
@@ -157,7 +157,7 @@ module Concurrent
|
|
157
157
|
def ns_initialize(opts, &block)
|
158
158
|
init_obligation(self)
|
159
159
|
set_deref_options(opts)
|
160
|
-
@
|
160
|
+
@executor = opts[:executor]
|
161
161
|
|
162
162
|
@task = block
|
163
163
|
@state = :pending
|
@@ -177,7 +177,8 @@ module Concurrent
|
|
177
177
|
end
|
178
178
|
|
179
179
|
if execute
|
180
|
-
|
180
|
+
executor = Options.executor_from_options(executor: @executor)
|
181
|
+
executor.post do
|
181
182
|
begin
|
182
183
|
result = task.call
|
183
184
|
success = true
|
data/lib/concurrent/exchanger.rb
CHANGED
@@ -13,6 +13,8 @@ module Concurrent
|
|
13
13
|
# pairs. Each thread presents some object on entry to the exchange method,
|
14
14
|
# matches with a partner thread, and receives its partner's object on return.
|
15
15
|
#
|
16
|
+
# @!macro thread_safe_variable_comparison
|
17
|
+
#
|
16
18
|
# This implementation is very simple, using only a single slot for each
|
17
19
|
# exchanger (unlike more advanced implementations which use an "arena").
|
18
20
|
# This approach will work perfectly fine when there are only a few threads
|
@@ -1,13 +1,13 @@
|
|
1
1
|
require 'concurrent/errors'
|
2
2
|
require 'concurrent/executor/executor_service'
|
3
|
-
require 'concurrent/synchronization
|
3
|
+
require 'concurrent/synchronization'
|
4
4
|
require 'concurrent/utility/at_exit'
|
5
5
|
|
6
6
|
module Concurrent
|
7
7
|
|
8
8
|
# @!macro abstract_executor_service_public_api
|
9
9
|
# @!visibility private
|
10
|
-
class AbstractExecutorService < Synchronization::
|
10
|
+
class AbstractExecutorService < Synchronization::LockableObject
|
11
11
|
include ExecutorService
|
12
12
|
|
13
13
|
# The set of possible fallback policies that may be set at thread pool creation.
|
@@ -6,12 +6,12 @@ module Concurrent
|
|
6
6
|
# @!macro abstract_executor_service_public_api
|
7
7
|
# @!visibility private
|
8
8
|
class RubyExecutorService < AbstractExecutorService
|
9
|
+
safe_initialization!
|
9
10
|
|
10
11
|
def initialize(*args, &block)
|
11
12
|
super
|
12
|
-
@
|
13
|
-
@
|
14
|
-
ensure_ivar_visibility!
|
13
|
+
@StopEvent = Event.new
|
14
|
+
@StoppedEvent = Event.new
|
15
15
|
end
|
16
16
|
|
17
17
|
def post(*args, &task)
|
@@ -51,7 +51,13 @@ module Concurrent
|
|
51
51
|
|
52
52
|
private
|
53
53
|
|
54
|
-
|
54
|
+
def stop_event
|
55
|
+
@StopEvent
|
56
|
+
end
|
57
|
+
|
58
|
+
def stopped_event
|
59
|
+
@StoppedEvent
|
60
|
+
end
|
55
61
|
|
56
62
|
def ns_shutdown_execution
|
57
63
|
stopped_event.set
|
@@ -1,80 +1,22 @@
|
|
1
|
-
require '
|
2
|
-
require 'concurrent/executor/ruby_executor_service'
|
3
|
-
require 'concurrent/executor/serial_executor_service'
|
1
|
+
require 'concurrent/executor/ruby_thread_pool_executor'
|
4
2
|
|
5
3
|
module Concurrent
|
6
4
|
|
7
5
|
# @!macro single_thread_executor
|
8
|
-
# @!macro thread_pool_options
|
9
6
|
# @!macro abstract_executor_service_public_api
|
10
7
|
# @!visibility private
|
11
|
-
class RubySingleThreadExecutor <
|
12
|
-
include SerialExecutorService
|
8
|
+
class RubySingleThreadExecutor < RubyThreadPoolExecutor
|
13
9
|
|
14
10
|
# @!macro single_thread_executor_method_initialize
|
15
11
|
def initialize(opts = {})
|
16
|
-
super
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
@fallback_policy = opts.fetch(:fallback_policy, :discard)
|
25
|
-
raise ArgumentError.new("#{@fallback_policy} is not a valid fallback policy") unless FALLBACK_POLICIES.include?(@fallback_policy)
|
26
|
-
self.auto_terminate = opts.fetch(:auto_terminate, true)
|
27
|
-
end
|
28
|
-
|
29
|
-
# @!visibility private
|
30
|
-
def ns_execute(*args, &task)
|
31
|
-
supervise
|
32
|
-
@queue << [args, task]
|
33
|
-
end
|
34
|
-
|
35
|
-
# @!visibility private
|
36
|
-
def ns_shutdown_execution
|
37
|
-
@queue << :stop
|
38
|
-
stopped_event.set unless alive?
|
39
|
-
end
|
40
|
-
|
41
|
-
# @!visibility private
|
42
|
-
def ns_kill_execution
|
43
|
-
@queue.clear
|
44
|
-
@thread.kill if alive?
|
45
|
-
end
|
46
|
-
|
47
|
-
# @!visibility private
|
48
|
-
def alive?
|
49
|
-
@thread && @thread.alive?
|
50
|
-
end
|
51
|
-
|
52
|
-
# @!visibility private
|
53
|
-
def supervise
|
54
|
-
@thread = new_worker_thread unless alive?
|
55
|
-
end
|
56
|
-
|
57
|
-
# @!visibility private
|
58
|
-
def new_worker_thread
|
59
|
-
Thread.new do
|
60
|
-
Thread.current.abort_on_exception = false
|
61
|
-
work
|
62
|
-
end
|
63
|
-
end
|
64
|
-
|
65
|
-
# @!visibility private
|
66
|
-
def work
|
67
|
-
loop do
|
68
|
-
task = @queue.pop
|
69
|
-
break if task == :stop
|
70
|
-
begin
|
71
|
-
task.last.call(*task.first)
|
72
|
-
rescue => ex
|
73
|
-
# let it fail
|
74
|
-
log DEBUG, ex
|
75
|
-
end
|
76
|
-
end
|
77
|
-
stopped_event.set
|
12
|
+
super(
|
13
|
+
min_threads: 1,
|
14
|
+
max_threads: 1,
|
15
|
+
max_queue: 0,
|
16
|
+
idletime: DEFAULT_THREAD_IDLETIMEOUT,
|
17
|
+
fallback_policy: opts.fetch(:fallback_policy, :discard),
|
18
|
+
auto_terminate: opts.fetch(:auto_terminate, true)
|
19
|
+
)
|
78
20
|
end
|
79
21
|
end
|
80
22
|
end
|
@@ -1,4 +1,4 @@
|
|
1
|
-
require 'concurrent/synchronization
|
1
|
+
require 'concurrent/synchronization'
|
2
2
|
|
3
3
|
module Concurrent
|
4
4
|
|
@@ -6,26 +6,25 @@ module Concurrent
|
|
6
6
|
# success - indicating if the callable has been executed without errors
|
7
7
|
# value - filled by the callable result if it has been executed without errors, nil otherwise
|
8
8
|
# reason - the error risen by the callable if it has been executed with errors, nil otherwise
|
9
|
-
class SafeTaskExecutor < Synchronization::
|
9
|
+
class SafeTaskExecutor < Synchronization::LockableObject
|
10
10
|
|
11
11
|
def initialize(task, opts = {})
|
12
|
-
|
13
|
-
@task = task
|
12
|
+
@task = task
|
14
13
|
@exception_class = opts.fetch(:rescue_exception, false) ? Exception : StandardError
|
15
|
-
|
14
|
+
super() # ensures visibility
|
16
15
|
end
|
17
16
|
|
18
17
|
# @return [Array]
|
19
18
|
def execute(*args)
|
20
19
|
synchronize do
|
21
20
|
success = false
|
22
|
-
value
|
21
|
+
value = reason = nil
|
23
22
|
|
24
23
|
begin
|
25
|
-
value
|
24
|
+
value = @task.call(*args)
|
26
25
|
success = true
|
27
26
|
rescue @exception_class => ex
|
28
|
-
reason
|
27
|
+
reason = ex
|
29
28
|
success = false
|
30
29
|
end
|
31
30
|
|
@@ -1,11 +1,11 @@
|
|
1
1
|
require 'concurrent/errors'
|
2
2
|
require 'concurrent/concern/logging'
|
3
|
-
require 'concurrent/synchronization
|
3
|
+
require 'concurrent/synchronization'
|
4
4
|
|
5
5
|
module Concurrent
|
6
6
|
|
7
7
|
# Ensures passed jobs in a serialized order never running at the same time.
|
8
|
-
class SerializedExecution < Synchronization::
|
8
|
+
class SerializedExecution < Synchronization::LockableObject
|
9
9
|
include Concern::Logging
|
10
10
|
|
11
11
|
def initialize()
|
@@ -39,8 +39,8 @@ module Concurrent
|
|
39
39
|
# As {#post} but allows to submit multiple tasks at once, it's guaranteed that they will not
|
40
40
|
# be interleaved by other tasks.
|
41
41
|
#
|
42
|
-
# @param [Array<Array(
|
43
|
-
# first is a {
|
42
|
+
# @param [Array<Array(ExecutorService, Array<Object>, Proc)>] posts array of triplets where
|
43
|
+
# first is a {ExecutorService}, second is array of args for task, third is a task (Proc)
|
44
44
|
def posts(posts)
|
45
45
|
# if can_overflow?
|
46
46
|
# raise ArgumentError, 'SerializedExecution does not support thread-pools which can overflow'
|
@@ -16,15 +16,22 @@ module Concurrent
|
|
16
16
|
|
17
17
|
# @!macro [attach] single_thread_executor
|
18
18
|
#
|
19
|
-
# A thread pool with a
|
20
|
-
#
|
21
|
-
#
|
22
|
-
# Should a thread crash for any reason the thread will immediately be removed
|
23
|
-
# from the pool and replaced.
|
19
|
+
# A thread pool with a single thread an unlimited queue. Should the thread
|
20
|
+
# die for any reason it will be removed and replaced, thus ensuring that
|
21
|
+
# the executor will always remain viable and available to process jobs.
|
24
22
|
#
|
25
|
-
#
|
23
|
+
# A common pattern for background processing is to create a single thread
|
24
|
+
# on which an infinite loop is run. The thread's loop blocks on an input
|
25
|
+
# source (perhaps blocking I/O or a queue) and processes each input as it
|
26
|
+
# is received. This pattern has several issues. The thread itself is highly
|
27
|
+
# susceptible to errors during processing. Also, the thread itself must be
|
28
|
+
# constantly monitored and restarted should it die. `SingleThreadExecutor`
|
29
|
+
# encapsulates all these bahaviors. The task processor is highly resilient
|
30
|
+
# to errors from within tasks. Also, should the thread die it will
|
31
|
+
# automatically be restarted.
|
32
|
+
#
|
33
|
+
# The API and behavior of this class are based on Java's `SingleThreadExecutor`.
|
26
34
|
#
|
27
|
-
# @!macro thread_pool_options
|
28
35
|
# @!macro abstract_executor_service_public_api
|
29
36
|
class SingleThreadExecutor < SingleThreadExecutorImplementation
|
30
37
|
|
@@ -32,9 +39,12 @@ module Concurrent
|
|
32
39
|
#
|
33
40
|
# Create a new thread pool.
|
34
41
|
#
|
35
|
-
# @option opts [Symbol] :fallback_policy (:discard) the policy for
|
36
|
-
#
|
37
|
-
#
|
42
|
+
# @option opts [Symbol] :fallback_policy (:discard) the policy for handling new
|
43
|
+
# tasks that are received when the queue size has reached
|
44
|
+
# `max_queue` or the executor has shut down
|
45
|
+
#
|
46
|
+
# @raise [ArgumentError] if `:fallback_policy` is not one of the values specified
|
47
|
+
# in `FALLBACK_POLICIES`
|
38
48
|
#
|
39
49
|
# @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html
|
40
50
|
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html
|