concurrent-ruby 0.6.0.pre.1 → 0.6.0.pre.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (142) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +16 -0
  3. data/lib/concurrent.rb +9 -29
  4. data/lib/concurrent/{actor.rb → actor/actor.rb} +3 -3
  5. data/lib/concurrent/actor/actor_context.rb +77 -0
  6. data/lib/concurrent/actor/actor_ref.rb +67 -0
  7. data/lib/concurrent/{postable.rb → actor/postable.rb} +1 -1
  8. data/lib/concurrent/actor/simple_actor_ref.rb +94 -0
  9. data/lib/concurrent/actors.rb +5 -0
  10. data/lib/concurrent/agent.rb +81 -47
  11. data/lib/concurrent/async.rb +35 -35
  12. data/lib/concurrent/atomic/atomic_boolean.rb +157 -0
  13. data/lib/concurrent/atomic/atomic_fixnum.rb +170 -0
  14. data/lib/concurrent/{condition.rb → atomic/condition.rb} +0 -0
  15. data/lib/concurrent/{copy_on_notify_observer_set.rb → atomic/copy_on_notify_observer_set.rb} +48 -13
  16. data/lib/concurrent/{copy_on_write_observer_set.rb → atomic/copy_on_write_observer_set.rb} +41 -20
  17. data/lib/concurrent/atomic/count_down_latch.rb +116 -0
  18. data/lib/concurrent/atomic/cyclic_barrier.rb +106 -0
  19. data/lib/concurrent/atomic/event.rb +103 -0
  20. data/lib/concurrent/{thread_local_var.rb → atomic/thread_local_var.rb} +0 -0
  21. data/lib/concurrent/atomics.rb +9 -0
  22. data/lib/concurrent/channel/buffered_channel.rb +6 -4
  23. data/lib/concurrent/channel/channel.rb +30 -2
  24. data/lib/concurrent/channel/unbuffered_channel.rb +2 -2
  25. data/lib/concurrent/channel/waitable_list.rb +3 -1
  26. data/lib/concurrent/channels.rb +5 -0
  27. data/lib/concurrent/{channel → collection}/blocking_ring_buffer.rb +16 -5
  28. data/lib/concurrent/collection/priority_queue.rb +305 -0
  29. data/lib/concurrent/{channel → collection}/ring_buffer.rb +6 -1
  30. data/lib/concurrent/collections.rb +3 -0
  31. data/lib/concurrent/configuration.rb +68 -19
  32. data/lib/concurrent/dataflow.rb +9 -9
  33. data/lib/concurrent/delay.rb +21 -13
  34. data/lib/concurrent/dereferenceable.rb +40 -33
  35. data/lib/concurrent/exchanger.rb +3 -0
  36. data/lib/concurrent/{cached_thread_pool.rb → executor/cached_thread_pool.rb} +8 -9
  37. data/lib/concurrent/executor/executor.rb +222 -0
  38. data/lib/concurrent/{fixed_thread_pool.rb → executor/fixed_thread_pool.rb} +6 -7
  39. data/lib/concurrent/{immediate_executor.rb → executor/immediate_executor.rb} +5 -5
  40. data/lib/concurrent/executor/java_cached_thread_pool.rb +31 -0
  41. data/lib/concurrent/{java_fixed_thread_pool.rb → executor/java_fixed_thread_pool.rb} +7 -11
  42. data/lib/concurrent/executor/java_single_thread_executor.rb +21 -0
  43. data/lib/concurrent/{java_thread_pool_executor.rb → executor/java_thread_pool_executor.rb} +66 -77
  44. data/lib/concurrent/executor/one_by_one.rb +65 -0
  45. data/lib/concurrent/{per_thread_executor.rb → executor/per_thread_executor.rb} +4 -4
  46. data/lib/concurrent/executor/ruby_cached_thread_pool.rb +29 -0
  47. data/lib/concurrent/{ruby_fixed_thread_pool.rb → executor/ruby_fixed_thread_pool.rb} +5 -4
  48. data/lib/concurrent/executor/ruby_single_thread_executor.rb +72 -0
  49. data/lib/concurrent/executor/ruby_thread_pool_executor.rb +282 -0
  50. data/lib/concurrent/{ruby_thread_pool_worker.rb → executor/ruby_thread_pool_worker.rb} +6 -6
  51. data/lib/concurrent/{safe_task_executor.rb → executor/safe_task_executor.rb} +20 -13
  52. data/lib/concurrent/executor/single_thread_executor.rb +35 -0
  53. data/lib/concurrent/executor/thread_pool_executor.rb +68 -0
  54. data/lib/concurrent/executor/timer_set.rb +138 -0
  55. data/lib/concurrent/executors.rb +9 -0
  56. data/lib/concurrent/future.rb +39 -40
  57. data/lib/concurrent/ivar.rb +22 -15
  58. data/lib/concurrent/mvar.rb +2 -1
  59. data/lib/concurrent/obligation.rb +9 -3
  60. data/lib/concurrent/observable.rb +33 -0
  61. data/lib/concurrent/options_parser.rb +46 -0
  62. data/lib/concurrent/promise.rb +23 -24
  63. data/lib/concurrent/scheduled_task.rb +21 -45
  64. data/lib/concurrent/timer_task.rb +204 -126
  65. data/lib/concurrent/tvar.rb +1 -1
  66. data/lib/concurrent/utilities.rb +3 -36
  67. data/lib/concurrent/{processor_count.rb → utility/processor_count.rb} +1 -1
  68. data/lib/concurrent/utility/timeout.rb +36 -0
  69. data/lib/concurrent/utility/timer.rb +21 -0
  70. data/lib/concurrent/version.rb +1 -1
  71. data/lib/concurrent_ruby_ext.bundle +0 -0
  72. data/spec/concurrent/{actor_context_spec.rb → actor/actor_context_spec.rb} +0 -8
  73. data/spec/concurrent/{actor_ref_shared.rb → actor/actor_ref_shared.rb} +9 -59
  74. data/spec/concurrent/{actor_spec.rb → actor/actor_spec.rb} +43 -41
  75. data/spec/concurrent/{postable_shared.rb → actor/postable_shared.rb} +0 -0
  76. data/spec/concurrent/actor/simple_actor_ref_spec.rb +135 -0
  77. data/spec/concurrent/agent_spec.rb +160 -71
  78. data/spec/concurrent/atomic/atomic_boolean_spec.rb +172 -0
  79. data/spec/concurrent/atomic/atomic_fixnum_spec.rb +186 -0
  80. data/spec/concurrent/{condition_spec.rb → atomic/condition_spec.rb} +2 -2
  81. data/spec/concurrent/{copy_on_notify_observer_set_spec.rb → atomic/copy_on_notify_observer_set_spec.rb} +0 -0
  82. data/spec/concurrent/{copy_on_write_observer_set_spec.rb → atomic/copy_on_write_observer_set_spec.rb} +0 -0
  83. data/spec/concurrent/atomic/count_down_latch_spec.rb +151 -0
  84. data/spec/concurrent/atomic/cyclic_barrier_spec.rb +248 -0
  85. data/spec/concurrent/{event_spec.rb → atomic/event_spec.rb} +18 -3
  86. data/spec/concurrent/{observer_set_shared.rb → atomic/observer_set_shared.rb} +15 -6
  87. data/spec/concurrent/{thread_local_var_spec.rb → atomic/thread_local_var_spec.rb} +0 -0
  88. data/spec/concurrent/channel/buffered_channel_spec.rb +1 -1
  89. data/spec/concurrent/channel/channel_spec.rb +6 -4
  90. data/spec/concurrent/channel/probe_spec.rb +37 -9
  91. data/spec/concurrent/channel/unbuffered_channel_spec.rb +2 -2
  92. data/spec/concurrent/{channel → collection}/blocking_ring_buffer_spec.rb +0 -0
  93. data/spec/concurrent/collection/priority_queue_spec.rb +317 -0
  94. data/spec/concurrent/{channel → collection}/ring_buffer_spec.rb +0 -0
  95. data/spec/concurrent/configuration_spec.rb +4 -70
  96. data/spec/concurrent/dereferenceable_shared.rb +5 -4
  97. data/spec/concurrent/exchanger_spec.rb +10 -5
  98. data/spec/concurrent/{cached_thread_pool_shared.rb → executor/cached_thread_pool_shared.rb} +15 -37
  99. data/spec/concurrent/{fixed_thread_pool_shared.rb → executor/fixed_thread_pool_shared.rb} +0 -0
  100. data/spec/concurrent/{global_thread_pool_shared.rb → executor/global_thread_pool_shared.rb} +10 -8
  101. data/spec/concurrent/{immediate_executor_spec.rb → executor/immediate_executor_spec.rb} +0 -0
  102. data/spec/concurrent/{java_cached_thread_pool_spec.rb → executor/java_cached_thread_pool_spec.rb} +1 -21
  103. data/spec/concurrent/{java_fixed_thread_pool_spec.rb → executor/java_fixed_thread_pool_spec.rb} +0 -0
  104. data/spec/concurrent/executor/java_single_thread_executor_spec.rb +21 -0
  105. data/spec/concurrent/{java_thread_pool_executor_spec.rb → executor/java_thread_pool_executor_spec.rb} +0 -0
  106. data/spec/concurrent/{per_thread_executor_spec.rb → executor/per_thread_executor_spec.rb} +0 -4
  107. data/spec/concurrent/{ruby_cached_thread_pool_spec.rb → executor/ruby_cached_thread_pool_spec.rb} +1 -1
  108. data/spec/concurrent/{ruby_fixed_thread_pool_spec.rb → executor/ruby_fixed_thread_pool_spec.rb} +0 -0
  109. data/spec/concurrent/executor/ruby_single_thread_executor_spec.rb +18 -0
  110. data/spec/concurrent/{ruby_thread_pool_executor_spec.rb → executor/ruby_thread_pool_executor_spec.rb} +12 -24
  111. data/spec/concurrent/executor/safe_task_executor_spec.rb +103 -0
  112. data/spec/concurrent/{thread_pool_class_cast_spec.rb → executor/thread_pool_class_cast_spec.rb} +12 -0
  113. data/spec/concurrent/{thread_pool_executor_shared.rb → executor/thread_pool_executor_shared.rb} +0 -0
  114. data/spec/concurrent/{thread_pool_shared.rb → executor/thread_pool_shared.rb} +84 -119
  115. data/spec/concurrent/executor/timer_set_spec.rb +183 -0
  116. data/spec/concurrent/future_spec.rb +12 -0
  117. data/spec/concurrent/ivar_spec.rb +11 -1
  118. data/spec/concurrent/observable_shared.rb +173 -0
  119. data/spec/concurrent/observable_spec.rb +51 -0
  120. data/spec/concurrent/options_parser_spec.rb +71 -0
  121. data/spec/concurrent/runnable_shared.rb +6 -0
  122. data/spec/concurrent/scheduled_task_spec.rb +60 -40
  123. data/spec/concurrent/timer_task_spec.rb +130 -144
  124. data/spec/concurrent/{processor_count_spec.rb → utility/processor_count_spec.rb} +0 -0
  125. data/spec/concurrent/{utilities_spec.rb → utility/timeout_spec.rb} +0 -0
  126. data/spec/concurrent/utility/timer_spec.rb +52 -0
  127. metadata +147 -108
  128. data/lib/concurrent/actor_context.rb +0 -31
  129. data/lib/concurrent/actor_ref.rb +0 -39
  130. data/lib/concurrent/atomic.rb +0 -121
  131. data/lib/concurrent/channel/probe.rb +0 -19
  132. data/lib/concurrent/count_down_latch.rb +0 -60
  133. data/lib/concurrent/event.rb +0 -80
  134. data/lib/concurrent/java_cached_thread_pool.rb +0 -45
  135. data/lib/concurrent/ruby_cached_thread_pool.rb +0 -37
  136. data/lib/concurrent/ruby_thread_pool_executor.rb +0 -268
  137. data/lib/concurrent/simple_actor_ref.rb +0 -124
  138. data/lib/concurrent/thread_pool_executor.rb +0 -30
  139. data/spec/concurrent/atomic_spec.rb +0 -201
  140. data/spec/concurrent/count_down_latch_spec.rb +0 -125
  141. data/spec/concurrent/safe_task_executor_spec.rb +0 -58
  142. data/spec/concurrent/simple_actor_ref_spec.rb +0 -219
@@ -0,0 +1,65 @@
1
+ module Concurrent
2
+
3
+ # Ensures that jobs are passed to the underlying executor one by one,
4
+ # never running at the same time.
5
+ class OneByOne
6
+
7
+ Job = Struct.new(:executor, :args, :block) do
8
+ def call
9
+ block.call *args
10
+ end
11
+ end
12
+
13
+ def initialize
14
+ @being_executed = false
15
+ @stash = []
16
+ @mutex = Mutex.new
17
+ end
18
+
19
+ # Submit a task to the executor for asynchronous processing.
20
+ #
21
+ # @param [Executor] executor to be used for this job
22
+ #
23
+ # @param [Array] args zero or more arguments to be passed to the task
24
+ #
25
+ # @yield the asynchronous task to perform
26
+ #
27
+ # @return [Boolean] `true` if the task is queued, `false` if the executor
28
+ # is not running
29
+ #
30
+ # @raise [ArgumentError] if no task is given
31
+ def post(executor, *args, &task)
32
+ return nil if task.nil?
33
+ job = Job.new executor, args, task
34
+
35
+ @mutex.lock
36
+ post = if @being_executed
37
+ @stash << job
38
+ false
39
+ else
40
+ @being_executed = true
41
+ end
42
+ @mutex.unlock
43
+
44
+ call_job job if post
45
+ true
46
+ end
47
+
48
+ private
49
+
50
+ def call_job(job)
51
+ job.executor.post { work(job) }
52
+ end
53
+
54
+ # ensures next job is executed if any is stashed
55
+ def work(job)
56
+ job.call
57
+ ensure
58
+ @mutex.lock
59
+ job = @stash.shift || (@being_executed = false)
60
+ @mutex.unlock
61
+ call_job job if job
62
+ end
63
+
64
+ end
65
+ end
@@ -11,12 +11,12 @@ module Concurrent
11
11
  return true
12
12
  end
13
13
 
14
- def post(*args, &block)
15
- return PerThreadExecutor.post(*args, &block)
14
+ def post(*args, &task)
15
+ return PerThreadExecutor.post(*args, &task)
16
16
  end
17
17
 
18
- def <<(block)
19
- PerThreadExecutor.post(&block)
18
+ def <<(task)
19
+ PerThreadExecutor.post(&task)
20
20
  return self
21
21
  end
22
22
  end
@@ -0,0 +1,29 @@
1
+ require 'concurrent/executor/ruby_thread_pool_executor'
2
+
3
+ module Concurrent
4
+
5
+ # @!macro cached_thread_pool
6
+ class RubyCachedThreadPool < RubyThreadPoolExecutor
7
+
8
+ # Create a new thread pool.
9
+ #
10
+ # @param [Hash] opts the options defining pool behavior.
11
+ # number of seconds a thread may be idle before it is reclaimed
12
+ #
13
+ # @raise [ArgumentError] if `overflow_policy` is not a known policy
14
+ def initialize(opts = {})
15
+ overflow_policy = opts.fetch(:overflow_policy, :abort)
16
+
17
+ raise ArgumentError.new("#{overflow_policy} is not a valid overflow policy") unless OVERFLOW_POLICIES.include?(overflow_policy)
18
+
19
+ opts = opts.merge(
20
+ min_threads: 0,
21
+ max_threads: DEFAULT_MAX_POOL_SIZE,
22
+ num_threads: overflow_policy,
23
+ max_queue: DEFAULT_MAX_QUEUE_SIZE,
24
+ idletime: DEFAULT_THREAD_IDLETIMEOUT
25
+ )
26
+ super(opts)
27
+ end
28
+ end
29
+ end
@@ -1,4 +1,4 @@
1
- require 'concurrent/ruby_thread_pool_executor'
1
+ require 'concurrent/executor/ruby_thread_pool_executor'
2
2
 
3
3
  module Concurrent
4
4
 
@@ -9,10 +9,10 @@ module Concurrent
9
9
  #
10
10
  # @param [Integer] num_threads the number of threads to allocate
11
11
  # @param [Hash] opts the options defining pool behavior.
12
- # @option opts [Symbol] :overflow_policy (+:abort+) the overflow policy
12
+ # @option opts [Symbol] :overflow_policy (`:abort`) the overflow policy
13
13
  #
14
- # @raise [ArgumentError] if +num_threads+ is less than or equal to zero
15
- # @raise [ArgumentError] if +overflow_policy+ is not a known policy
14
+ # @raise [ArgumentError] if `num_threads` is less than or equal to zero
15
+ # @raise [ArgumentError] if `overflow_policy` is not a known policy
16
16
  def initialize(num_threads, opts = {})
17
17
  overflow_policy = opts.fetch(:overflow_policy, :abort)
18
18
 
@@ -23,6 +23,7 @@ module Concurrent
23
23
  min_threads: num_threads,
24
24
  max_threads: num_threads,
25
25
  num_threads: overflow_policy,
26
+ max_queue: DEFAULT_MAX_QUEUE_SIZE,
26
27
  idletime: 0
27
28
  )
28
29
  super(opts)
@@ -0,0 +1,72 @@
1
+ require_relative 'executor'
2
+
3
+ module Concurrent
4
+
5
+ # @!macro single_thread_executor
6
+ class RubySingleThreadExecutor
7
+ include Executor
8
+
9
+ # Create a new thread pool.
10
+ #
11
+ # @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html
12
+ # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html
13
+ # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html
14
+ def initialize(opts = {})
15
+ @queue = Queue.new
16
+ @thread = nil
17
+ init_executor
18
+ end
19
+
20
+ protected
21
+
22
+ # @!visibility private
23
+ def execute(*args, &task)
24
+ supervise
25
+ @queue << [args, task]
26
+ end
27
+
28
+ # @!visibility private
29
+ def shutdown_execution
30
+ @queue << :stop
31
+ stopped_event.set unless alive?
32
+ end
33
+
34
+ # @!visibility private
35
+ def kill_execution
36
+ @queue.clear
37
+ @thread.kill if alive?
38
+ end
39
+
40
+ # @!visibility private
41
+ def alive?
42
+ @thread && @thread.alive?
43
+ end
44
+
45
+ # @!visibility private
46
+ def supervise
47
+ @thread = new_worker_thread unless alive?
48
+ end
49
+
50
+ # @!visibility private
51
+ def new_worker_thread
52
+ Thread.new do
53
+ Thread.current.abort_on_exception = false
54
+ work
55
+ end
56
+ end
57
+
58
+ # @!visibility private
59
+ def work
60
+ loop do
61
+ task = @queue.pop
62
+ break if task == :stop
63
+ begin
64
+ task.last.call(*task.first)
65
+ rescue => ex
66
+ # let it fail
67
+ end
68
+ end
69
+ stopped_event.set
70
+ end
71
+ end
72
+ end
@@ -0,0 +1,282 @@
1
+ require 'thread'
2
+
3
+ require_relative 'executor'
4
+ require 'concurrent/atomic/event'
5
+ require 'concurrent/executor/ruby_thread_pool_worker'
6
+
7
+ module Concurrent
8
+
9
+ # @!macro thread_pool_executor
10
+ class RubyThreadPoolExecutor
11
+ include Executor
12
+
13
+ # Default maximum number of threads that will be created in the pool.
14
+ DEFAULT_MAX_POOL_SIZE = 2**15 # 32768
15
+
16
+ # Default minimum number of threads that will be retained in the pool.
17
+ DEFAULT_MIN_POOL_SIZE = 0
18
+
19
+ # Default maximum number of tasks that may be added to the task queue.
20
+ DEFAULT_MAX_QUEUE_SIZE = 0
21
+
22
+ # Default maximum number of seconds a thread in the pool may remain idle
23
+ # before being reclaimed.
24
+ DEFAULT_THREAD_IDLETIMEOUT = 60
25
+
26
+ # The set of possible overflow policies that may be set at thread pool creation.
27
+ OVERFLOW_POLICIES = [:abort, :discard, :caller_runs]
28
+
29
+ # The maximum number of threads that may be created in the pool.
30
+ attr_reader :max_length
31
+
32
+ # The minimum number of threads that may be retained in the pool.
33
+ attr_reader :min_length
34
+
35
+ # The largest number of threads that have been created in the pool since construction.
36
+ attr_reader :largest_length
37
+
38
+ # The number of tasks that have been scheduled for execution on the pool since construction.
39
+ attr_reader :scheduled_task_count
40
+
41
+ # The number of tasks that have been completed by the pool since construction.
42
+ attr_reader :completed_task_count
43
+
44
+ # The number of seconds that a thread may be idle before being reclaimed.
45
+ attr_reader :idletime
46
+
47
+ # The maximum number of tasks that may be waiting in the work queue at any one time.
48
+ # When the queue size reaches `max_queue` subsequent tasks will be rejected in
49
+ # accordance with the configured `overflow_policy`.
50
+ attr_reader :max_queue
51
+
52
+ # The policy defining how rejected tasks (tasks received once the queue size reaches
53
+ # the configured `max_queue`) are handled. Must be one of the values specified in
54
+ # `OVERFLOW_POLICIES`.
55
+ attr_reader :overflow_policy
56
+
57
+ # Create a new thread pool.
58
+ #
59
+ # @param [Hash] opts the options which configure the thread pool
60
+ #
61
+ # @option opts [Integer] :max_threads (DEFAULT_MAX_POOL_SIZE) the maximum
62
+ # number of threads to be created
63
+ # @option opts [Integer] :min_threads (DEFAULT_MIN_POOL_SIZE) the minimum
64
+ # number of threads to be retained
65
+ # @option opts [Integer] :idletime (DEFAULT_THREAD_IDLETIMEOUT) the maximum
66
+ # number of seconds a thread may be idle before being reclaimed
67
+ # @option opts [Integer] :max_queue (DEFAULT_MAX_QUEUE_SIZE) the maximum
68
+ # number of tasks allowed in the work queue at any one time; a value of
69
+ # zero means the queue may grow without bounnd
70
+ # @option opts [Symbol] :overflow_policy (:abort) the policy for handling new
71
+ # tasks that are received when the queue size has reached `max_queue`
72
+ #
73
+ # @raise [ArgumentError] if `:max_threads` is less than one
74
+ # @raise [ArgumentError] if `:min_threads` is less than zero
75
+ # @raise [ArgumentError] if `:overflow_policy` is not one of the values specified
76
+ # in `OVERFLOW_POLICIES`
77
+ #
78
+ # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html
79
+ def initialize(opts = {})
80
+ @min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i
81
+ @max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i
82
+ @idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i
83
+ @max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i
84
+ @overflow_policy = opts.fetch(:overflow_policy, :abort)
85
+
86
+ raise ArgumentError.new('max_threads must be greater than zero') if @max_length <= 0
87
+ raise ArgumentError.new('min_threads cannot be less than zero') if @min_length < 0
88
+ raise ArgumentError.new("#{overflow_policy} is not a valid overflow policy") unless OVERFLOW_POLICIES.include?(@overflow_policy)
89
+
90
+ init_executor
91
+
92
+ @pool = []
93
+ @queue = Queue.new
94
+ @scheduled_task_count = 0
95
+ @completed_task_count = 0
96
+ @largest_length = 0
97
+
98
+ @gc_interval = opts.fetch(:gc_interval, 1).to_i # undocumented
99
+ @last_gc_time = Time.now.to_f - [1.0, (@gc_interval * 2.0)].max
100
+ end
101
+
102
+ # The number of threads currently in the pool.
103
+ #
104
+ # @return [Integer] the length
105
+ def length
106
+ mutex.synchronize{ running? ? @pool.length : 0 }
107
+ end
108
+ alias_method :current_length, :length
109
+
110
+ # The number of tasks in the queue awaiting execution.
111
+ #
112
+ # @return [Integer] the queue_length
113
+ def queue_length
114
+ mutex.synchronize{ running? ? @queue.length : 0 }
115
+ end
116
+
117
+ # Number of tasks that may be enqueued before reaching `max_queue` and rejecting
118
+ # new tasks. A value of -1 indicates that the queue may grow without bound.
119
+ #
120
+ # @return [Integer] the remaining_capacity
121
+ def remaining_capacity
122
+ mutex.synchronize { @max_queue == 0 ? -1 : @max_queue - @queue.length }
123
+ end
124
+
125
+ # Returns an array with the status of each thread in the pool
126
+ #
127
+ # This method is deprecated and will be removed soon.
128
+ def status
129
+ warn '[DEPRECATED] `status` is deprecated and will be removed soon.'
130
+ mutex.synchronize { @pool.collect { |worker| worker.status } }
131
+ end
132
+
133
+ # Run on task completion.
134
+ #
135
+ # @!visibility private
136
+ def on_end_task
137
+ mutex.synchronize do
138
+ @completed_task_count += 1 #if success
139
+ break unless running?
140
+ end
141
+ end
142
+
143
+ # Run when a thread worker exits.
144
+ #
145
+ # @!visibility private
146
+ def on_worker_exit(worker)
147
+ mutex.synchronize do
148
+ @pool.delete(worker)
149
+ if @pool.empty? && ! running?
150
+ stop_event.set
151
+ stopped_event.set
152
+ end
153
+ end
154
+ end
155
+
156
+ protected
157
+
158
+ # @!visibility private
159
+ def execute(*args, &task)
160
+ prune_pool
161
+ if ensure_capacity?
162
+ @scheduled_task_count += 1
163
+ @queue << [args, task]
164
+ else
165
+ handle_overflow(*args, &task) if @max_queue != 0 && @queue.length >= @max_queue
166
+ end
167
+ end
168
+
169
+ # @!visibility private
170
+ def shutdown_execution
171
+ @queue.clear
172
+ if @pool.empty?
173
+ stopped_event.set
174
+ else
175
+ @pool.length.times{ @queue << :stop }
176
+ end
177
+ end
178
+
179
+ # @!visibility private
180
+ def kill_execution
181
+ @queue.clear
182
+ drain_pool
183
+ end
184
+
185
+ # Check the thread pool configuration and determine if the pool
186
+ # has enought capacity to handle the request. Will grow the size
187
+ # of the pool if necessary.
188
+ #
189
+ # @return [Boolean] true if the pool has enough capacity else false
190
+ #
191
+ # @!visibility private
192
+ def ensure_capacity?
193
+ additional = 0
194
+ capacity = true
195
+
196
+ if @pool.size < @min_length
197
+ additional = @min_length - @pool.size
198
+ elsif @queue.empty? && @queue.num_waiting >= 1
199
+ additional = 0
200
+ elsif @pool.size == 0 && @min_length == 0
201
+ additional = 1
202
+ elsif @pool.size < @max_length || @max_length == 0
203
+ additional = 1
204
+ elsif @max_queue == 0 || @queue.size < @max_queue
205
+ additional = 0
206
+ else
207
+ capacity = false
208
+ end
209
+
210
+ additional.times do
211
+ @pool << create_worker_thread
212
+ end
213
+
214
+ if additional > 0
215
+ @largest_length = [@largest_length, @pool.length].max
216
+ end
217
+
218
+ capacity
219
+ end
220
+
221
+ # Handler which executes the `overflow_policy` once the queue size
222
+ # reaches `max_queue`.
223
+ #
224
+ # @param [Array] args the arguments to the task which is being handled.
225
+ #
226
+ # @!visibility private
227
+ def handle_overflow(*args)
228
+ case @overflow_policy
229
+ when :abort
230
+ raise RejectedExecutionError
231
+ when :discard
232
+ false
233
+ when :caller_runs
234
+ begin
235
+ yield(*args)
236
+ rescue
237
+ # let it fail
238
+ end
239
+ true
240
+ end
241
+ end
242
+
243
+ # Scan all threads in the pool and reclaim any that are dead or
244
+ # have been idle too long. Will check the last time the pool was
245
+ # pruned and only run if the configured garbage collection
246
+ # interval has passed.
247
+ #
248
+ # @!visibility private
249
+ def prune_pool
250
+ if Time.now.to_f - @gc_interval >= @last_gc_time
251
+ @pool.delete_if do |worker|
252
+ worker.dead? ||
253
+ (@idletime == 0 ? false : Time.now.to_f - @idletime > worker.last_activity)
254
+ end
255
+ @last_gc_time = Time.now.to_f
256
+ end
257
+ end
258
+
259
+ # Reclaim all threads in the pool.
260
+ #
261
+ # @!visibility private
262
+ def drain_pool
263
+ @pool.each {|worker| worker.kill }
264
+ @pool.clear
265
+ end
266
+
267
+ # Create a single worker thread to be added to the pool.
268
+ #
269
+ # @return [Thread] the new thread.
270
+ #
271
+ # @!visibility private
272
+ def create_worker_thread
273
+ wrkr = RubyThreadPoolWorker.new(@queue, self)
274
+ Thread.new(wrkr, self) do |worker, parent|
275
+ Thread.current.abort_on_exception = false
276
+ worker.run
277
+ parent.on_worker_exit(worker)
278
+ end
279
+ return wrkr
280
+ end
281
+ end
282
+ end