concurrent-ruby 0.7.0.rc2-java → 0.7.1-java
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/CHANGELOG.md +138 -0
- data/README.md +108 -95
- data/lib/concurrent/actor.rb +12 -13
- data/lib/concurrent/actor/behaviour/errors_on_unknown_message.rb +1 -1
- data/lib/concurrent/actor/behaviour/executes_context.rb +1 -1
- data/lib/concurrent/actor/behaviour/linking.rb +4 -1
- data/lib/concurrent/actor/behaviour/pausing.rb +2 -2
- data/lib/concurrent/actor/behaviour/supervised.rb +3 -2
- data/lib/concurrent/actor/behaviour/terminates_children.rb +1 -1
- data/lib/concurrent/actor/behaviour/termination.rb +1 -1
- data/lib/concurrent/actor/context.rb +2 -1
- data/lib/concurrent/actor/core.rb +8 -4
- data/lib/concurrent/actor/utils.rb +10 -0
- data/lib/concurrent/actor/utils/ad_hoc.rb +21 -0
- data/lib/concurrent/actor/utils/balancer.rb +42 -0
- data/lib/concurrent/actor/utils/broadcast.rb +22 -6
- data/lib/concurrent/actor/utils/pool.rb +59 -0
- data/lib/concurrent/agent.rb +1 -22
- data/lib/concurrent/async.rb +1 -79
- data/lib/concurrent/atomic.rb +20 -26
- data/lib/concurrent/atomic/atomic_boolean.rb +4 -1
- data/lib/concurrent/atomic/atomic_fixnum.rb +4 -1
- data/lib/concurrent/atomic/thread_local_var.rb +71 -24
- data/lib/concurrent/atomic_reference/jruby.rb +10 -6
- data/lib/concurrent/atomic_reference/ruby.rb +14 -10
- data/lib/concurrent/atomics.rb +0 -1
- data/lib/concurrent/configuration.rb +11 -5
- data/lib/concurrent/dataflow.rb +1 -30
- data/lib/concurrent/dereferenceable.rb +9 -2
- data/lib/concurrent/executor/indirect_immediate_executor.rb +46 -0
- data/lib/concurrent/executor/java_thread_pool_executor.rb +2 -4
- data/lib/concurrent/executor/ruby_thread_pool_executor.rb +24 -22
- data/lib/concurrent/executor/serialized_execution.rb +36 -23
- data/lib/concurrent/executor/thread_pool_executor.rb +2 -0
- data/lib/concurrent/executor/timer_set.rb +7 -8
- data/lib/concurrent/executors.rb +1 -0
- data/lib/concurrent/future.rb +7 -29
- data/lib/concurrent/ivar.rb +9 -0
- data/lib/concurrent/logging.rb +3 -0
- data/lib/concurrent/mvar.rb +26 -9
- data/lib/concurrent/observable.rb +33 -0
- data/lib/concurrent/promise.rb +59 -1
- data/lib/concurrent/scheduled_task.rb +1 -0
- data/lib/concurrent/timer_task.rb +18 -18
- data/lib/concurrent/tvar.rb +3 -1
- data/lib/concurrent/version.rb +1 -1
- data/lib/concurrent_ruby_ext.jar +0 -0
- data/lib/concurrent_ruby_ext.so +0 -0
- data/lib/extension_helper.rb +25 -6
- metadata +15 -7
- data/lib/concurrent/actor/ad_hoc.rb +0 -19
- data/lib/concurrent/actor/utills.rb +0 -7
@@ -1,3 +1,6 @@
|
|
1
|
+
require_relative '../../extension_helper'
|
2
|
+
Concurrent.safe_require_c_extensions
|
3
|
+
|
1
4
|
module Concurrent
|
2
5
|
|
3
6
|
# @!macro [attach] atomic_boolean
|
@@ -159,7 +162,7 @@ module Concurrent
|
|
159
162
|
class AtomicBoolean < JavaAtomicBoolean
|
160
163
|
end
|
161
164
|
|
162
|
-
elsif
|
165
|
+
elsif Concurrent.allow_c_native_class?('CAtomicBoolean')
|
163
166
|
|
164
167
|
# @!macro atomic_boolean
|
165
168
|
class CAtomicBoolean
|
@@ -1,3 +1,6 @@
|
|
1
|
+
require_relative '../../extension_helper'
|
2
|
+
Concurrent.safe_require_c_extensions
|
3
|
+
|
1
4
|
module Concurrent
|
2
5
|
|
3
6
|
# @!macro [attach] atomic_fixnum
|
@@ -163,7 +166,7 @@ module Concurrent
|
|
163
166
|
class AtomicFixnum < JavaAtomicFixnum
|
164
167
|
end
|
165
168
|
|
166
|
-
elsif
|
169
|
+
elsif Concurrent.allow_c_native_class?('CAtomicFixnum')
|
167
170
|
|
168
171
|
# @!macro atomic_fixnum
|
169
172
|
class CAtomicFixnum
|
@@ -2,41 +2,83 @@ require 'concurrent/atomic'
|
|
2
2
|
|
3
3
|
module Concurrent
|
4
4
|
|
5
|
-
|
5
|
+
# @!macro [attach] abstract_thread_local_var
|
6
|
+
# A `ThreadLocalVar` is a variable where the value is different for each thread.
|
7
|
+
# Each variable may have a default value, but when you modify the variable only
|
8
|
+
# the current thread will ever see that change.
|
9
|
+
#
|
10
|
+
# @example
|
11
|
+
# v = ThreadLocalVar.new(14)
|
12
|
+
# v.value #=> 14
|
13
|
+
# v.value = 2
|
14
|
+
# v.value #=> 2
|
15
|
+
#
|
16
|
+
# @example
|
17
|
+
# v = ThreadLocalVar.new(14)
|
18
|
+
#
|
19
|
+
# t1 = Thread.new do
|
20
|
+
# v.value #=> 14
|
21
|
+
# v.value = 1
|
22
|
+
# v.value #=> 1
|
23
|
+
# end
|
24
|
+
#
|
25
|
+
# t2 = Thread.new do
|
26
|
+
# v.value #=> 14
|
27
|
+
# v.value = 2
|
28
|
+
# v.value #=> 2
|
29
|
+
# end
|
30
|
+
#
|
31
|
+
# v.value #=> 14
|
32
|
+
class AbstractThreadLocalVar
|
6
33
|
|
7
|
-
|
8
|
-
@storage = Atomic.new Hash.new
|
9
|
-
end
|
34
|
+
module ThreadLocalRubyStorage
|
10
35
|
|
11
|
-
|
12
|
-
@storage.get[Thread.current]
|
13
|
-
end
|
36
|
+
protected
|
14
37
|
|
15
|
-
|
16
|
-
|
17
|
-
|
38
|
+
unless RUBY_PLATFORM == 'java'
|
39
|
+
require 'ref'
|
40
|
+
end
|
18
41
|
|
19
|
-
|
42
|
+
def allocate_storage
|
43
|
+
@storage = Ref::WeakKeyMap.new
|
44
|
+
end
|
20
45
|
|
21
|
-
|
46
|
+
def get
|
47
|
+
@storage[Thread.current]
|
48
|
+
end
|
22
49
|
|
23
|
-
|
50
|
+
def set(value, &block)
|
51
|
+
key = Thread.current
|
24
52
|
|
25
|
-
|
26
|
-
@var = java.lang.ThreadLocal.new
|
27
|
-
end
|
53
|
+
@storage[key] = value
|
28
54
|
|
29
|
-
|
30
|
-
|
55
|
+
if block_given?
|
56
|
+
begin
|
57
|
+
block.call
|
58
|
+
ensure
|
59
|
+
@storage.delete key
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
31
63
|
end
|
32
64
|
|
33
|
-
|
34
|
-
@var.set(value)
|
35
|
-
end
|
65
|
+
module ThreadLocalJavaStorage
|
36
66
|
|
37
|
-
|
67
|
+
protected
|
38
68
|
|
39
|
-
|
69
|
+
def allocate_storage
|
70
|
+
@var = java.lang.ThreadLocal.new
|
71
|
+
end
|
72
|
+
|
73
|
+
def get
|
74
|
+
@var.get
|
75
|
+
end
|
76
|
+
|
77
|
+
def set(value)
|
78
|
+
@var.set(value)
|
79
|
+
end
|
80
|
+
|
81
|
+
end
|
40
82
|
|
41
83
|
NIL_SENTINEL = Object.new
|
42
84
|
|
@@ -58,19 +100,24 @@ module Concurrent
|
|
58
100
|
end
|
59
101
|
|
60
102
|
def value=(value)
|
103
|
+
bind value
|
104
|
+
end
|
105
|
+
|
106
|
+
def bind(value, &block)
|
61
107
|
if value.nil?
|
62
108
|
stored_value = NIL_SENTINEL
|
63
109
|
else
|
64
110
|
stored_value = value
|
65
111
|
end
|
66
112
|
|
67
|
-
set stored_value
|
113
|
+
set stored_value, &block
|
68
114
|
|
69
115
|
value
|
70
116
|
end
|
71
117
|
|
72
118
|
end
|
73
119
|
|
120
|
+
# @!macro abstract_thread_local_var
|
74
121
|
class ThreadLocalVar < AbstractThreadLocalVar
|
75
122
|
if RUBY_PLATFORM == 'java'
|
76
123
|
include ThreadLocalJavaStorage
|
@@ -1,10 +1,14 @@
|
|
1
|
-
|
2
|
-
|
1
|
+
require_relative '../../extension_helper'
|
2
|
+
Concurrent.safe_require_java_extensions
|
3
3
|
|
4
|
-
|
4
|
+
if defined?(Concurrent::JavaAtomic)
|
5
|
+
require 'concurrent/atomic_reference/direct_update'
|
5
6
|
|
6
|
-
|
7
|
-
|
8
|
-
|
7
|
+
module Concurrent
|
8
|
+
|
9
|
+
# @!macro atomic_reference
|
10
|
+
class JavaAtomic
|
11
|
+
include Concurrent::AtomicDirectUpdate
|
12
|
+
end
|
9
13
|
end
|
10
14
|
end
|
@@ -1,8 +1,12 @@
|
|
1
|
-
|
2
|
-
|
3
|
-
|
4
|
-
|
5
|
-
|
1
|
+
require_relative '../../extension_helper'
|
2
|
+
|
3
|
+
if Concurrent.allow_c_extensions?
|
4
|
+
begin
|
5
|
+
require 'concurrent_ruby_ext'
|
6
|
+
rescue LoadError
|
7
|
+
# may be a Windows cross-compiled native gem
|
8
|
+
require "#{RUBY_VERSION[0..2]}/concurrent_ruby_ext"
|
9
|
+
end
|
6
10
|
end
|
7
11
|
|
8
12
|
require 'concurrent/atomic_reference/direct_update'
|
@@ -14,19 +18,19 @@ module Concurrent
|
|
14
18
|
class CAtomic
|
15
19
|
include Concurrent::AtomicDirectUpdate
|
16
20
|
include Concurrent::AtomicNumericCompareAndSetWrapper
|
17
|
-
|
21
|
+
|
18
22
|
# @!method initialize
|
19
23
|
# @!macro atomic_reference_method_initialize
|
20
|
-
|
24
|
+
|
21
25
|
# @!method get
|
22
26
|
# @!macro atomic_reference_method_get
|
23
|
-
|
27
|
+
|
24
28
|
# @!method set
|
25
29
|
# @!macro atomic_reference_method_set
|
26
|
-
|
30
|
+
|
27
31
|
# @!method get_and_set
|
28
32
|
# @!macro atomic_reference_method_get_and_set
|
29
|
-
|
33
|
+
|
30
34
|
# @!method _compare_and_set
|
31
35
|
# @!macro atomic_reference_method_compare_and_set
|
32
36
|
end
|
data/lib/concurrent/atomics.rb
CHANGED
@@ -7,5 +7,4 @@ require 'concurrent/atomic/copy_on_write_observer_set'
|
|
7
7
|
require 'concurrent/atomic/cyclic_barrier'
|
8
8
|
require 'concurrent/atomic/count_down_latch'
|
9
9
|
require 'concurrent/atomic/event'
|
10
|
-
require 'concurrent/atomic/thread_local_var'
|
11
10
|
require 'concurrent/atomic/synchronization'
|
@@ -17,6 +17,9 @@ module Concurrent
|
|
17
17
|
# lambda { |level, progname, message = nil, &block| _ }
|
18
18
|
attr_accessor :logger
|
19
19
|
|
20
|
+
# defines if executors should be auto-terminated in at_exit callback
|
21
|
+
attr_accessor :auto_terminate
|
22
|
+
|
20
23
|
# Create a new configuration object.
|
21
24
|
def initialize
|
22
25
|
immediate_executor = ImmediateExecutor.new
|
@@ -24,6 +27,7 @@ module Concurrent
|
|
24
27
|
@global_operation_pool = Delay.new(executor: immediate_executor) { new_operation_pool }
|
25
28
|
@global_timer_set = Delay.new(executor: immediate_executor) { Concurrent::TimerSet.new }
|
26
29
|
@logger = no_logger
|
30
|
+
@auto_terminate = true
|
27
31
|
end
|
28
32
|
|
29
33
|
# if assigned to {#logger}, it will log nothing.
|
@@ -129,6 +133,12 @@ module Concurrent
|
|
129
133
|
yield(configuration)
|
130
134
|
end
|
131
135
|
|
136
|
+
def self.finalize_global_executors
|
137
|
+
self.finalize_executor(self.configuration.global_timer_set)
|
138
|
+
self.finalize_executor(self.configuration.global_task_pool)
|
139
|
+
self.finalize_executor(self.configuration.global_operation_pool)
|
140
|
+
end
|
141
|
+
|
132
142
|
private
|
133
143
|
|
134
144
|
# Attempt to properly shutdown the given executor using the `shutdown` or
|
@@ -150,12 +160,8 @@ module Concurrent
|
|
150
160
|
false
|
151
161
|
end
|
152
162
|
|
153
|
-
|
154
163
|
# set exit hook to shutdown global thread pools
|
155
164
|
at_exit do
|
156
|
-
|
157
|
-
self.finalize_executor(self.configuration.global_task_pool)
|
158
|
-
self.finalize_executor(self.configuration.global_operation_pool)
|
159
|
-
# TODO may break other test suites using concurrent-ruby, terminates before test is run
|
165
|
+
finalize_global_executors if configuration.auto_terminate
|
160
166
|
end
|
161
167
|
end
|
data/lib/concurrent/dataflow.rb
CHANGED
@@ -19,36 +19,7 @@ module Concurrent
|
|
19
19
|
end
|
20
20
|
end
|
21
21
|
|
22
|
-
#
|
23
|
-
# data dependencies are available. Data dependencies are `Future` values. The
|
24
|
-
# dataflow task itself is also a `Future` value, so you can build up a graph of
|
25
|
-
# these tasks, each of which is run when all the data and other tasks it depends
|
26
|
-
# on are available or completed.
|
27
|
-
#
|
28
|
-
# Our syntax is somewhat related to that of Akka's `flow` and Habanero Java's
|
29
|
-
# `DataDrivenFuture`. However unlike Akka we don't schedule a task at all until
|
30
|
-
# it is ready to run, and unlike Habanero Java we pass the data values into the
|
31
|
-
# task instead of dereferencing them again in the task.
|
32
|
-
#
|
33
|
-
# The theory of dataflow goes back to the 80s. In the terminology of the literature,
|
34
|
-
# our implementation is coarse-grained, in that each task can be many instructions,
|
35
|
-
# and dynamic in that you can create more tasks within other tasks.
|
36
|
-
#
|
37
|
-
# @example Parallel Fibonacci calculator
|
38
|
-
# def fib(n)
|
39
|
-
# if n < 2
|
40
|
-
# Concurrent::dataflow { n }
|
41
|
-
# else
|
42
|
-
# n1 = fib(n - 1)
|
43
|
-
# n2 = fib(n - 2)
|
44
|
-
# Concurrent::dataflow(n1, n2) { |v1, v2| v1 + v2 }
|
45
|
-
# end
|
46
|
-
# end
|
47
|
-
#
|
48
|
-
# f = fib(14) #=> #<Concurrent::Future:0x000001019a26d8 ...
|
49
|
-
#
|
50
|
-
# # wait up to 1 second for the answer...
|
51
|
-
# f.value(1) #=> 377
|
22
|
+
# {include:file:doc/dataflow.md}
|
52
23
|
#
|
53
24
|
# @param [Future] inputs zero or more `Future` operations that this dataflow depends upon
|
54
25
|
#
|
@@ -3,8 +3,15 @@ module Concurrent
|
|
3
3
|
# Object references in Ruby are mutable. This can lead to serious problems when
|
4
4
|
# the `#value` of a concurrent object is a mutable reference. Which is always the
|
5
5
|
# case unless the value is a `Fixnum`, `Symbol`, or similar "primitive" data type.
|
6
|
-
# Most classes in this library that expose a `#value` getter method do so using
|
7
|
-
#
|
6
|
+
# Most classes in this library that expose a `#value` getter method do so using the
|
7
|
+
# `Dereferenceable` mixin module.
|
8
|
+
#
|
9
|
+
# Objects with this mixin can be configured with a few options that can help protect
|
10
|
+
# the program from potentially dangerous operations.
|
11
|
+
#
|
12
|
+
# * `:dup_on_deref` when true will call the `#dup` method on the `value` object every time the `#value` method is called (default: false)
|
13
|
+
# * `:freeze_on_deref` when true will call the `#freeze` method on the `value` object every time the `#value` method is called (default: false)
|
14
|
+
# * `:copy_on_deref` when given a `Proc` object the `Proc` will be run every time the `#value` method is called. The `Proc` will be given the current `value` as its only parameter and the result returned by the block will be the return value of the `#value` call. When `nil` this option will be ignored (default: nil)
|
8
15
|
module Dereferenceable
|
9
16
|
|
10
17
|
# Return the value this object represents after applying the options specified
|
@@ -0,0 +1,46 @@
|
|
1
|
+
require 'concurrent/executor/executor'
|
2
|
+
|
3
|
+
module Concurrent
|
4
|
+
# An executor service which runs all operations on a new thread, blocking
|
5
|
+
# until it completes. Operations are performed in the order they are received
|
6
|
+
# and no two operations can be performed simultaneously.
|
7
|
+
#
|
8
|
+
# This executor service exists mainly for testing an debugging. When used it
|
9
|
+
# immediately runs every `#post` operation on a new thread, blocking the
|
10
|
+
# current thread until the operation is complete. This is similar to how the
|
11
|
+
# ImmediateExecutor works, but the operation has the full stack of the new
|
12
|
+
# thread at its disposal. This can be helpful when the operations will spawn
|
13
|
+
# more operations on the same executor and so on - such a situation might
|
14
|
+
# overflow the single stack in case of an ImmediateExecutor, which is
|
15
|
+
# inconsistent with how it would behave for a threaded executor.
|
16
|
+
#
|
17
|
+
# @note Intended for use primarily in testing and debugging.
|
18
|
+
class IndirectImmediateExecutor < ImmediateExecutor
|
19
|
+
# Creates a new executor
|
20
|
+
def initialize
|
21
|
+
super
|
22
|
+
@internal_executor = PerThreadExecutor.new
|
23
|
+
end
|
24
|
+
|
25
|
+
# @!macro executor_method_post
|
26
|
+
def post(*args, &task)
|
27
|
+
raise ArgumentError.new("no block given") unless block_given?
|
28
|
+
return false unless running?
|
29
|
+
|
30
|
+
event = Concurrent::Event.new
|
31
|
+
internal_executor.post do
|
32
|
+
begin
|
33
|
+
task.call(*args)
|
34
|
+
ensure
|
35
|
+
event.set
|
36
|
+
end
|
37
|
+
end
|
38
|
+
event.wait
|
39
|
+
|
40
|
+
true
|
41
|
+
end
|
42
|
+
|
43
|
+
private
|
44
|
+
attr_reader :internal_executor
|
45
|
+
end
|
46
|
+
end
|
@@ -74,9 +74,7 @@ if RUBY_PLATFORM == 'java'
|
|
74
74
|
raise ArgumentError.new('min_threads cannot be more than max_threads') if min_length > max_length
|
75
75
|
raise ArgumentError.new("#{@overflow_policy} is not a valid overflow policy") unless OVERFLOW_POLICIES.keys.include?(@overflow_policy)
|
76
76
|
|
77
|
-
if
|
78
|
-
queue = java.util.concurrent.SynchronousQueue.new
|
79
|
-
elsif @max_queue == 0
|
77
|
+
if @max_queue == 0
|
80
78
|
queue = java.util.concurrent.LinkedBlockingQueue.new
|
81
79
|
else
|
82
80
|
queue = java.util.concurrent.LinkedBlockingQueue.new(@max_queue)
|
@@ -90,7 +88,7 @@ if RUBY_PLATFORM == 'java'
|
|
90
88
|
set_shutdown_hook
|
91
89
|
end
|
92
90
|
|
93
|
-
|
91
|
+
# @!macro executor_module_method_can_overflow_question
|
94
92
|
def can_overflow?
|
95
93
|
@max_queue != 0
|
96
94
|
end
|
@@ -11,20 +11,20 @@ module Concurrent
|
|
11
11
|
include RubyExecutor
|
12
12
|
|
13
13
|
# Default maximum number of threads that will be created in the pool.
|
14
|
-
DEFAULT_MAX_POOL_SIZE
|
14
|
+
DEFAULT_MAX_POOL_SIZE = 2**15 # 32768
|
15
15
|
|
16
16
|
# Default minimum number of threads that will be retained in the pool.
|
17
|
-
DEFAULT_MIN_POOL_SIZE
|
17
|
+
DEFAULT_MIN_POOL_SIZE = 0
|
18
18
|
|
19
19
|
# Default maximum number of tasks that may be added to the task queue.
|
20
|
-
DEFAULT_MAX_QUEUE_SIZE
|
20
|
+
DEFAULT_MAX_QUEUE_SIZE = 0
|
21
21
|
|
22
22
|
# Default maximum number of seconds a thread in the pool may remain idle
|
23
23
|
# before being reclaimed.
|
24
24
|
DEFAULT_THREAD_IDLETIMEOUT = 60
|
25
25
|
|
26
26
|
# The set of possible overflow policies that may be set at thread pool creation.
|
27
|
-
OVERFLOW_POLICIES
|
27
|
+
OVERFLOW_POLICIES = [:abort, :discard, :caller_runs]
|
28
28
|
|
29
29
|
# The maximum number of threads that may be created in the pool.
|
30
30
|
attr_reader :max_length
|
@@ -77,10 +77,10 @@ module Concurrent
|
|
77
77
|
#
|
78
78
|
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html
|
79
79
|
def initialize(opts = {})
|
80
|
-
@min_length
|
81
|
-
@max_length
|
82
|
-
@idletime
|
83
|
-
@max_queue
|
80
|
+
@min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i
|
81
|
+
@max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i
|
82
|
+
@idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i
|
83
|
+
@max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i
|
84
84
|
@overflow_policy = opts.fetch(:overflow_policy, :abort)
|
85
85
|
|
86
86
|
raise ArgumentError.new('max_threads must be greater than zero') if @max_length <= 0
|
@@ -90,13 +90,13 @@ module Concurrent
|
|
90
90
|
|
91
91
|
init_executor
|
92
92
|
|
93
|
-
@pool
|
94
|
-
@queue
|
93
|
+
@pool = []
|
94
|
+
@queue = Queue.new
|
95
95
|
@scheduled_task_count = 0
|
96
96
|
@completed_task_count = 0
|
97
|
-
@largest_length
|
97
|
+
@largest_length = 0
|
98
98
|
|
99
|
-
@gc_interval
|
99
|
+
@gc_interval = opts.fetch(:gc_interval, 1).to_i # undocumented
|
100
100
|
@last_gc_time = Time.now.to_f - [1.0, (@gc_interval * 2.0)].max
|
101
101
|
end
|
102
102
|
|
@@ -109,15 +109,16 @@ module Concurrent
|
|
109
109
|
#
|
110
110
|
# @return [Integer] the length
|
111
111
|
def length
|
112
|
-
mutex.synchronize{ running? ? @pool.length : 0 }
|
112
|
+
mutex.synchronize { running? ? @pool.length : 0 }
|
113
113
|
end
|
114
|
+
|
114
115
|
alias_method :current_length, :length
|
115
116
|
|
116
117
|
# The number of tasks in the queue awaiting execution.
|
117
118
|
#
|
118
119
|
# @return [Integer] the queue_length
|
119
120
|
def queue_length
|
120
|
-
mutex.synchronize{ running? ? @queue.length : 0 }
|
121
|
+
mutex.synchronize { running? ? @queue.length : 0 }
|
121
122
|
end
|
122
123
|
|
123
124
|
# Number of tasks that may be enqueued before reaching `max_queue` and rejecting
|
@@ -152,7 +153,7 @@ module Concurrent
|
|
152
153
|
def on_worker_exit(worker)
|
153
154
|
mutex.synchronize do
|
154
155
|
@pool.delete(worker)
|
155
|
-
if @pool.empty? && !
|
156
|
+
if @pool.empty? && !running?
|
156
157
|
stop_event.set
|
157
158
|
stopped_event.set
|
158
159
|
end
|
@@ -177,7 +178,7 @@ module Concurrent
|
|
177
178
|
if @pool.empty?
|
178
179
|
stopped_event.set
|
179
180
|
else
|
180
|
-
@pool.length.times{ @queue << :stop }
|
181
|
+
@pool.length.times { @queue << :stop }
|
181
182
|
end
|
182
183
|
end
|
183
184
|
|
@@ -196,7 +197,7 @@ module Concurrent
|
|
196
197
|
# @!visibility private
|
197
198
|
def ensure_capacity?
|
198
199
|
additional = 0
|
199
|
-
capacity
|
200
|
+
capacity = true
|
200
201
|
|
201
202
|
if @pool.size < @min_length
|
202
203
|
additional = @min_length - @pool.size
|
@@ -254,10 +255,11 @@ module Concurrent
|
|
254
255
|
# @!visibility private
|
255
256
|
def prune_pool
|
256
257
|
if Time.now.to_f - @gc_interval >= @last_gc_time
|
257
|
-
@pool.delete_if
|
258
|
-
|
259
|
-
|
260
|
-
|
258
|
+
@pool.delete_if { |worker| worker.dead? }
|
259
|
+
# send :stop for each thread over idletime
|
260
|
+
@pool.
|
261
|
+
select { |worker| @idletime != 0 && Time.now.to_f - @idletime > worker.last_activity }.
|
262
|
+
each { @queue << :stop }
|
261
263
|
@last_gc_time = Time.now.to_f
|
262
264
|
end
|
263
265
|
end
|
@@ -266,7 +268,7 @@ module Concurrent
|
|
266
268
|
#
|
267
269
|
# @!visibility private
|
268
270
|
def drain_pool
|
269
|
-
@pool.each {|worker| worker.kill }
|
271
|
+
@pool.each { |worker| worker.kill }
|
270
272
|
@pool.clear
|
271
273
|
end
|
272
274
|
|