concurrent-ruby 0.8.0.pre2-java → 0.9.0-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (145) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +114 -3
  3. data/README.md +111 -55
  4. data/lib/concurrent.rb +90 -14
  5. data/lib/concurrent/async.rb +143 -51
  6. data/lib/concurrent/atom.rb +131 -0
  7. data/lib/concurrent/atomic/atomic_boolean.rb +57 -107
  8. data/lib/concurrent/atomic/atomic_fixnum.rb +73 -101
  9. data/lib/concurrent/atomic/atomic_reference.rb +49 -0
  10. data/lib/concurrent/atomic/condition.rb +23 -12
  11. data/lib/concurrent/atomic/count_down_latch.rb +23 -21
  12. data/lib/concurrent/atomic/cyclic_barrier.rb +47 -47
  13. data/lib/concurrent/atomic/event.rb +33 -42
  14. data/lib/concurrent/atomic/read_write_lock.rb +252 -0
  15. data/lib/concurrent/atomic/semaphore.rb +64 -89
  16. data/lib/concurrent/atomic/thread_local_var.rb +130 -58
  17. data/lib/concurrent/atomic/thread_local_var/weak_key_map.rb +236 -0
  18. data/lib/concurrent/atomic_reference/direct_update.rb +34 -3
  19. data/lib/concurrent/atomic_reference/jruby.rb +6 -3
  20. data/lib/concurrent/atomic_reference/mutex_atomic.rb +17 -39
  21. data/lib/concurrent/atomic_reference/numeric_cas_wrapper.rb +3 -0
  22. data/lib/concurrent/atomic_reference/rbx.rb +4 -1
  23. data/lib/concurrent/atomic_reference/ruby.rb +6 -3
  24. data/lib/concurrent/atomics.rb +74 -4
  25. data/lib/concurrent/collection/copy_on_notify_observer_set.rb +115 -0
  26. data/lib/concurrent/collection/copy_on_write_observer_set.rb +119 -0
  27. data/lib/concurrent/collection/priority_queue.rb +300 -245
  28. data/lib/concurrent/concern/deprecation.rb +34 -0
  29. data/lib/concurrent/concern/dereferenceable.rb +88 -0
  30. data/lib/concurrent/concern/logging.rb +27 -0
  31. data/lib/concurrent/concern/obligation.rb +228 -0
  32. data/lib/concurrent/concern/observable.rb +85 -0
  33. data/lib/concurrent/configuration.rb +234 -109
  34. data/lib/concurrent/dataflow.rb +2 -3
  35. data/lib/concurrent/delay.rb +141 -50
  36. data/lib/concurrent/edge.rb +30 -0
  37. data/lib/concurrent/errors.rb +19 -7
  38. data/lib/concurrent/exchanger.rb +25 -1
  39. data/lib/concurrent/executor/cached_thread_pool.rb +51 -33
  40. data/lib/concurrent/executor/executor.rb +46 -299
  41. data/lib/concurrent/executor/executor_service.rb +521 -0
  42. data/lib/concurrent/executor/fixed_thread_pool.rb +196 -23
  43. data/lib/concurrent/executor/immediate_executor.rb +9 -9
  44. data/lib/concurrent/executor/indirect_immediate_executor.rb +4 -3
  45. data/lib/concurrent/executor/java_single_thread_executor.rb +17 -16
  46. data/lib/concurrent/executor/java_thread_pool_executor.rb +55 -102
  47. data/lib/concurrent/executor/ruby_single_thread_executor.rb +14 -16
  48. data/lib/concurrent/executor/ruby_thread_pool_executor.rb +250 -166
  49. data/lib/concurrent/executor/safe_task_executor.rb +5 -4
  50. data/lib/concurrent/executor/serialized_execution.rb +22 -18
  51. data/lib/concurrent/executor/{per_thread_executor.rb → simple_executor_service.rb} +29 -20
  52. data/lib/concurrent/executor/single_thread_executor.rb +32 -21
  53. data/lib/concurrent/executor/thread_pool_executor.rb +73 -60
  54. data/lib/concurrent/executor/timer_set.rb +96 -84
  55. data/lib/concurrent/executors.rb +1 -1
  56. data/lib/concurrent/future.rb +71 -38
  57. data/lib/concurrent/immutable_struct.rb +89 -0
  58. data/lib/concurrent/ivar.rb +152 -60
  59. data/lib/concurrent/lazy_register.rb +40 -20
  60. data/lib/concurrent/maybe.rb +226 -0
  61. data/lib/concurrent/mutable_struct.rb +227 -0
  62. data/lib/concurrent/mvar.rb +44 -43
  63. data/lib/concurrent/promise.rb +229 -136
  64. data/lib/concurrent/scheduled_task.rb +341 -43
  65. data/lib/concurrent/settable_struct.rb +127 -0
  66. data/lib/concurrent/synchronization.rb +17 -0
  67. data/lib/concurrent/synchronization/abstract_object.rb +163 -0
  68. data/lib/concurrent/synchronization/abstract_struct.rb +158 -0
  69. data/lib/concurrent/synchronization/condition.rb +53 -0
  70. data/lib/concurrent/synchronization/java_object.rb +34 -0
  71. data/lib/concurrent/synchronization/lock.rb +32 -0
  72. data/lib/concurrent/synchronization/monitor_object.rb +26 -0
  73. data/lib/concurrent/synchronization/mutex_object.rb +43 -0
  74. data/lib/concurrent/synchronization/object.rb +78 -0
  75. data/lib/concurrent/synchronization/rbx_object.rb +75 -0
  76. data/lib/concurrent/timer_task.rb +92 -103
  77. data/lib/concurrent/tvar.rb +42 -38
  78. data/lib/concurrent/utilities.rb +3 -1
  79. data/lib/concurrent/utility/at_exit.rb +97 -0
  80. data/lib/concurrent/utility/engine.rb +44 -0
  81. data/lib/concurrent/utility/monotonic_time.rb +59 -0
  82. data/lib/concurrent/utility/native_extension_loader.rb +56 -0
  83. data/lib/concurrent/utility/processor_counter.rb +156 -0
  84. data/lib/concurrent/utility/timeout.rb +18 -14
  85. data/lib/concurrent/utility/timer.rb +11 -6
  86. data/lib/concurrent/version.rb +2 -1
  87. data/lib/concurrent_ruby.rb +1 -0
  88. data/lib/concurrent_ruby_ext.jar +0 -0
  89. metadata +46 -66
  90. data/lib/concurrent/actor.rb +0 -103
  91. data/lib/concurrent/actor/behaviour.rb +0 -70
  92. data/lib/concurrent/actor/behaviour/abstract.rb +0 -48
  93. data/lib/concurrent/actor/behaviour/awaits.rb +0 -21
  94. data/lib/concurrent/actor/behaviour/buffer.rb +0 -54
  95. data/lib/concurrent/actor/behaviour/errors_on_unknown_message.rb +0 -12
  96. data/lib/concurrent/actor/behaviour/executes_context.rb +0 -18
  97. data/lib/concurrent/actor/behaviour/linking.rb +0 -45
  98. data/lib/concurrent/actor/behaviour/pausing.rb +0 -77
  99. data/lib/concurrent/actor/behaviour/removes_child.rb +0 -16
  100. data/lib/concurrent/actor/behaviour/sets_results.rb +0 -36
  101. data/lib/concurrent/actor/behaviour/supervised.rb +0 -59
  102. data/lib/concurrent/actor/behaviour/supervising.rb +0 -34
  103. data/lib/concurrent/actor/behaviour/terminates_children.rb +0 -13
  104. data/lib/concurrent/actor/behaviour/termination.rb +0 -54
  105. data/lib/concurrent/actor/context.rb +0 -154
  106. data/lib/concurrent/actor/core.rb +0 -217
  107. data/lib/concurrent/actor/default_dead_letter_handler.rb +0 -9
  108. data/lib/concurrent/actor/envelope.rb +0 -41
  109. data/lib/concurrent/actor/errors.rb +0 -27
  110. data/lib/concurrent/actor/internal_delegations.rb +0 -49
  111. data/lib/concurrent/actor/public_delegations.rb +0 -40
  112. data/lib/concurrent/actor/reference.rb +0 -81
  113. data/lib/concurrent/actor/root.rb +0 -37
  114. data/lib/concurrent/actor/type_check.rb +0 -48
  115. data/lib/concurrent/actor/utils.rb +0 -10
  116. data/lib/concurrent/actor/utils/ad_hoc.rb +0 -21
  117. data/lib/concurrent/actor/utils/balancer.rb +0 -42
  118. data/lib/concurrent/actor/utils/broadcast.rb +0 -52
  119. data/lib/concurrent/actor/utils/pool.rb +0 -59
  120. data/lib/concurrent/actress.rb +0 -3
  121. data/lib/concurrent/agent.rb +0 -209
  122. data/lib/concurrent/atomic.rb +0 -92
  123. data/lib/concurrent/atomic/copy_on_notify_observer_set.rb +0 -118
  124. data/lib/concurrent/atomic/copy_on_write_observer_set.rb +0 -117
  125. data/lib/concurrent/atomic/synchronization.rb +0 -51
  126. data/lib/concurrent/channel/buffered_channel.rb +0 -85
  127. data/lib/concurrent/channel/channel.rb +0 -41
  128. data/lib/concurrent/channel/unbuffered_channel.rb +0 -35
  129. data/lib/concurrent/channel/waitable_list.rb +0 -40
  130. data/lib/concurrent/channels.rb +0 -5
  131. data/lib/concurrent/collection/blocking_ring_buffer.rb +0 -71
  132. data/lib/concurrent/collection/ring_buffer.rb +0 -59
  133. data/lib/concurrent/collections.rb +0 -3
  134. data/lib/concurrent/dereferenceable.rb +0 -108
  135. data/lib/concurrent/executor/java_cached_thread_pool.rb +0 -32
  136. data/lib/concurrent/executor/java_fixed_thread_pool.rb +0 -31
  137. data/lib/concurrent/executor/ruby_cached_thread_pool.rb +0 -29
  138. data/lib/concurrent/executor/ruby_fixed_thread_pool.rb +0 -32
  139. data/lib/concurrent/executor/ruby_thread_pool_worker.rb +0 -73
  140. data/lib/concurrent/logging.rb +0 -20
  141. data/lib/concurrent/obligation.rb +0 -171
  142. data/lib/concurrent/observable.rb +0 -73
  143. data/lib/concurrent/options_parser.rb +0 -48
  144. data/lib/concurrent/utility/processor_count.rb +0 -152
  145. data/lib/extension_helper.rb +0 -37
@@ -1,4 +1,4 @@
1
- require 'thread'
1
+ require 'concurrent/synchronization'
2
2
 
3
3
  module Concurrent
4
4
 
@@ -6,17 +6,18 @@ module Concurrent
6
6
  # success - indicating if the callable has been executed without errors
7
7
  # value - filled by the callable result if it has been executed without errors, nil otherwise
8
8
  # reason - the error risen by the callable if it has been executed with errors, nil otherwise
9
- class SafeTaskExecutor
9
+ class SafeTaskExecutor < Synchronization::Object
10
10
 
11
11
  def initialize(task, opts = {})
12
+ super()
12
13
  @task = task
13
- @mutex = Mutex.new
14
14
  @exception_class = opts.fetch(:rescue_exception, false) ? Exception : StandardError
15
+ ensure_ivar_visibility!
15
16
  end
16
17
 
17
18
  # @return [Array]
18
19
  def execute(*args)
19
- @mutex.synchronize do
20
+ synchronize do
20
21
  success = false
21
22
  value = reason = nil
22
23
 
@@ -1,14 +1,18 @@
1
1
  require 'delegate'
2
- require 'concurrent/executor/executor'
3
- require 'concurrent/logging'
4
- require 'concurrent/atomic/synchronization'
2
+ require 'concurrent/executor/executor_service'
3
+ require 'concurrent/concern/logging'
4
+ require 'concurrent/synchronization'
5
5
 
6
6
  module Concurrent
7
7
 
8
8
  # Ensures passed jobs in a serialized order never running at the same time.
9
- class SerializedExecution
10
- include Logging
11
- include Synchronization
9
+ class SerializedExecution < Synchronization::Object
10
+ include Concern::Logging
11
+
12
+ def initialize()
13
+ super()
14
+ synchronize { ns_initialize }
15
+ end
12
16
 
13
17
  Job = Struct.new(:executor, :args, :block) do
14
18
  def call
@@ -16,13 +20,6 @@ module Concurrent
16
20
  end
17
21
  end
18
22
 
19
- def initialize
20
- synchronize do
21
- @being_executed = false
22
- @stash = []
23
- end
24
- end
25
-
26
23
  # Submit a task to the executor for asynchronous processing.
27
24
  #
28
25
  # @param [Executor] executor to be used for this job
@@ -69,7 +66,12 @@ module Concurrent
69
66
  true
70
67
  end
71
68
 
72
- private
69
+ protected
70
+
71
+ def ns_initialize
72
+ @being_executed = false
73
+ @stash = []
74
+ end
73
75
 
74
76
  def call_job(job)
75
77
  did_it_run = begin
@@ -98,17 +100,19 @@ module Concurrent
98
100
  job = @stash.shift || (@being_executed = false)
99
101
  end
100
102
 
103
+ # TODO maybe be able to tell caching pool to just enqueue this job, because the current one end at the end
104
+ # of this block
101
105
  call_job job if job
102
106
  end
103
107
  end
104
108
 
105
- # A wrapper/delegator for any `Executor` or `ExecutorService` that
109
+ # A wrapper/delegator for any `ExecutorService` that
106
110
  # guarantees serialized execution of tasks.
107
111
  #
108
112
  # @see [SimpleDelegator](http://www.ruby-doc.org/stdlib-2.1.2/libdoc/delegate/rdoc/SimpleDelegator.html)
109
113
  # @see Concurrent::SerializedExecution
110
114
  class SerializedExecutionDelegator < SimpleDelegator
111
- include SerialExecutor
115
+ include SerialExecutorService
112
116
 
113
117
  def initialize(executor)
114
118
  @executor = executor
@@ -116,9 +120,9 @@ module Concurrent
116
120
  super(executor)
117
121
  end
118
122
 
119
- # @!macro executor_method_post
123
+ # @!macro executor_service_method_post
120
124
  def post(*args, &task)
121
- raise ArgumentError.new('no block given') unless block_given?
125
+ Kernel.raise ArgumentError.new('no block given') unless block_given?
122
126
  return false unless running?
123
127
  @serializer.post(@executor, *args, &task)
124
128
  end
@@ -1,5 +1,5 @@
1
1
  require 'concurrent/atomics'
2
- require 'concurrent/executor/executor'
2
+ require 'concurrent/executor/executor_service'
3
3
 
4
4
  module Concurrent
5
5
 
@@ -15,17 +15,9 @@ module Concurrent
15
15
  # lead to suboptimal performance.
16
16
  #
17
17
  # @note Intended for use primarily in testing and debugging.
18
- class PerThreadExecutor
19
- include Executor
18
+ class SimpleExecutorService < RubyExecutorService
20
19
 
21
- # Creates a new executor
22
- def initialize
23
- @running = Concurrent::AtomicBoolean.new(true)
24
- @stopped = Concurrent::Event.new
25
- @count = Concurrent::AtomicFixnum.new(0)
26
- end
27
-
28
- # @!macro executor_method_post
20
+ # @!macro executor_service_method_post
29
21
  def self.post(*args)
30
22
  raise ArgumentError.new('no block given') unless block_given?
31
23
  Thread.new(*args) do
@@ -35,13 +27,13 @@ module Concurrent
35
27
  true
36
28
  end
37
29
 
38
- # @!macro executor_method_left_shift
30
+ # @!macro executor_service_method_left_shift
39
31
  def self.<<(task)
40
32
  post(&task)
41
33
  self
42
34
  end
43
35
 
44
- # @!macro executor_method_post
36
+ # @!macro executor_service_method_post
45
37
  def post(*args, &task)
46
38
  raise ArgumentError.new('no block given') unless block_given?
47
39
  return false unless running?
@@ -57,44 +49,61 @@ module Concurrent
57
49
  end
58
50
  end
59
51
 
60
- # @!macro executor_method_left_shift
52
+ # @!macro executor_service_method_left_shift
61
53
  def <<(task)
62
54
  post(&task)
63
55
  self
64
56
  end
65
57
 
66
- # @!macro executor_method_running_question
58
+ # @!macro executor_service_method_running_question
67
59
  def running?
68
60
  @running.true?
69
61
  end
70
62
 
71
- # @!macro executor_method_shuttingdown_question
63
+ # @!macro executor_service_method_shuttingdown_question
72
64
  def shuttingdown?
73
65
  @running.false? && ! @stopped.set?
74
66
  end
75
67
 
76
- # @!macro executor_method_shutdown_question
68
+ # @!macro executor_service_method_shutdown_question
77
69
  def shutdown?
78
70
  @stopped.set?
79
71
  end
80
72
 
81
- # @!macro executor_method_shutdown
73
+ # @!macro executor_service_method_shutdown
82
74
  def shutdown
83
75
  @running.make_false
84
76
  @stopped.set if @count.value == 0
85
77
  true
86
78
  end
87
79
 
88
- # @!macro executor_method_kill
80
+ # @!macro executor_service_method_kill
89
81
  def kill
90
82
  @running.make_false
91
83
  @stopped.set
92
84
  true
93
85
  end
94
86
 
95
- # @!macro executor_method_wait_for_termination
87
+ # @!macro executor_service_method_wait_for_termination
96
88
  def wait_for_termination(timeout = nil)
97
89
  @stopped.wait(timeout)
98
90
  end
91
+
92
+ protected
93
+
94
+ def ns_initialize
95
+ @running = Concurrent::AtomicBoolean.new(true)
96
+ @stopped = Concurrent::Event.new
97
+ @count = Concurrent::AtomicFixnum.new(0)
98
+ end
99
+ end
100
+
101
+ # @deprecated
102
+ class PerThreadExecutor < SimpleExecutorService
103
+
104
+ def initialize
105
+ deprecated 'use SimpleExecutorService instead'
106
+ super
107
+ end
99
108
  end
100
109
  end
@@ -2,34 +2,45 @@ require 'concurrent/executor/ruby_single_thread_executor'
2
2
 
3
3
  module Concurrent
4
4
 
5
- if RUBY_PLATFORM == 'java'
6
-
5
+ if Concurrent.on_jruby?
7
6
  require 'concurrent/executor/java_single_thread_executor'
7
+ end
8
8
 
9
- # @!macro [attach] single_thread_executor
10
- #
11
- # A thread pool with a set number of threads. The number of threads in the pool
12
- # is set on construction and remains constant. When all threads are busy new
13
- # tasks `#post` to the thread pool are enqueued until a thread becomes available.
14
- # Should a thread crash for any reason the thread will immediately be removed
15
- # from the pool and replaced.
16
- #
17
- # The API and behavior of this class are based on Java's `SingleThreadExecutor`
9
+ SingleThreadExecutorImplementation = case
10
+ when Concurrent.on_jruby?
11
+ JavaSingleThreadExecutor
12
+ else
13
+ RubySingleThreadExecutor
14
+ end
15
+ private_constant :SingleThreadExecutorImplementation
16
+
17
+ # @!macro [attach] single_thread_executor
18
+ #
19
+ # A thread pool with a set number of threads. The number of threads in the pool
20
+ # is set on construction and remains constant. When all threads are busy new
21
+ # tasks `#post` to the thread pool are enqueued until a thread becomes available.
22
+ # Should a thread crash for any reason the thread will immediately be removed
23
+ # from the pool and replaced.
24
+ #
25
+ # The API and behavior of this class are based on Java's `SingleThreadExecutor`
26
+ #
27
+ # @!macro thread_pool_options
28
+ # @!macro abstract_executor_service_public_api
29
+ class SingleThreadExecutor < SingleThreadExecutorImplementation
30
+
31
+ # @!macro [new] single_thread_executor_method_initialize
18
32
  #
19
- # @note When running on the JVM (JRuby) this class will inherit from `JavaSingleThreadExecutor`.
20
- # On all other platforms it will inherit from `RubySingleThreadExecutor`.
33
+ # Create a new thread pool.
21
34
  #
22
- # @see Concurrent::RubySingleThreadExecutor
23
- # @see Concurrent::JavaSingleThreadExecutor
35
+ # @option opts [Symbol] :fallback_policy (:discard) the policy for
36
+ # handling new tasks that are received when the queue size has
37
+ # reached `max_queue` or after the executor has shut down
24
38
  #
25
39
  # @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html
26
40
  # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html
27
41
  # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html
28
- class SingleThreadExecutor < JavaSingleThreadExecutor
29
- end
30
- else
31
- # @!macro single_thread_executor
32
- class SingleThreadExecutor < RubySingleThreadExecutor
33
- end
42
+
43
+ # @!method initialize(opts = {})
44
+ # @!macro single_thread_executor_method_initialize
34
45
  end
35
46
  end
@@ -1,68 +1,81 @@
1
+ require 'concurrent/utility/engine'
1
2
  require 'concurrent/executor/ruby_thread_pool_executor'
2
3
 
3
4
  module Concurrent
4
5
 
5
- if RUBY_PLATFORM == 'java'
6
+ if Concurrent.on_jruby?
6
7
  require 'concurrent/executor/java_thread_pool_executor'
7
- # @!macro [attach] thread_pool_executor
8
- #
9
- # An abstraction composed of one or more threads and a task queue. Tasks
10
- # (blocks or `proc` objects) are submit to the pool and added to the queue.
11
- # The threads in the pool remove the tasks and execute them in the order
12
- # they were received. When there are more tasks queued than there are
13
- # threads to execute them the pool will create new threads, up to the
14
- # configured maximum. Similarly, threads that are idle for too long will
15
- # be garbage collected, down to the configured minimum options. Should a
16
- # thread crash it, too, will be garbage collected.
17
- #
18
- # `ThreadPoolExecutor` is based on the Java class of the same name. From
19
- # the official Java documentationa;
20
- #
21
- # > Thread pools address two different problems: they usually provide
22
- # > improved performance when executing large numbers of asynchronous tasks,
23
- # > due to reduced per-task invocation overhead, and they provide a means
24
- # > of bounding and managing the resources, including threads, consumed
25
- # > when executing a collection of tasks. Each ThreadPoolExecutor also
26
- # > maintains some basic statistics, such as the number of completed tasks.
27
- # >
28
- # > To be useful across a wide range of contexts, this class provides many
29
- # > adjustable parameters and extensibility hooks. However, programmers are
30
- # > urged to use the more convenient Executors factory methods
31
- # > [CachedThreadPool] (unbounded thread pool, with automatic thread reclamation),
32
- # > [FixedThreadPool] (fixed size thread pool) and [SingleThreadExecutor] (single
33
- # > background thread), that preconfigure settings for the most common usage
34
- # > scenarios.
35
- #
36
- # Thread pools support several configuration options:
37
- #
38
- # * `max_threads`: The maximum number of threads that may be created in the pool.
39
- # * `min_threads`: The minimum number of threads that may be retained in the pool.
40
- # * `idletime`: The number of seconds that a thread may be idle before being reclaimed.
41
- # * `max_queue`: The maximum number of tasks that may be waiting in the work queue at
42
- # any one time. When the queue size reaches `max_queue` subsequent tasks will be
43
- # rejected in accordance with the configured `fallback_policy`.
44
- # * `fallback_policy`: The policy defining how rejected tasks are handled. #
45
- #
46
- # Three fallback policies are supported:
47
- #
48
- # * `:abort`: Raise a `RejectedExecutionError` exception and discard the task.
49
- # * `:discard`: Discard the task and return false.
50
- # * `:caller_runs`: Execute the task on the calling thread.
51
- #
52
- # @note When running on the JVM (JRuby) this class will inherit from `JavaThreadPoolExecutor`.
53
- # On all other platforms it will inherit from `RubyThreadPoolExecutor`.
54
- #
55
- # @see Concurrent::RubyThreadPoolExecutor
56
- # @see Concurrent::JavaThreadPoolExecutor
8
+ end
9
+
10
+ ThreadPoolExecutorImplementation = case
11
+ when Concurrent.on_jruby?
12
+ JavaThreadPoolExecutor
13
+ else
14
+ RubyThreadPoolExecutor
15
+ end
16
+ private_constant :ThreadPoolExecutorImplementation
17
+
18
+ # @!macro [attach] thread_pool_executor
19
+ #
20
+ # An abstraction composed of one or more threads and a task queue. Tasks
21
+ # (blocks or `proc` objects) are submit to the pool and added to the queue.
22
+ # The threads in the pool remove the tasks and execute them in the order
23
+ # they were received. When there are more tasks queued than there are
24
+ # threads to execute them the pool will create new threads, up to the
25
+ # configured maximum. Similarly, threads that are idle for too long will
26
+ # be garbage collected, down to the configured minimum options. Should a
27
+ # thread crash it, too, will be garbage collected.
28
+ #
29
+ # `ThreadPoolExecutor` is based on the Java class of the same name. From
30
+ # the official Java documentationa;
31
+ #
32
+ # > Thread pools address two different problems: they usually provide
33
+ # > improved performance when executing large numbers of asynchronous tasks,
34
+ # > due to reduced per-task invocation overhead, and they provide a means
35
+ # > of bounding and managing the resources, including threads, consumed
36
+ # > when executing a collection of tasks. Each ThreadPoolExecutor also
37
+ # > maintains some basic statistics, such as the number of completed tasks.
38
+ # >
39
+ # > To be useful across a wide range of contexts, this class provides many
40
+ # > adjustable parameters and extensibility hooks. However, programmers are
41
+ # > urged to use the more convenient Executors factory methods
42
+ # > [CachedThreadPool] (unbounded thread pool, with automatic thread reclamation),
43
+ # > [FixedThreadPool] (fixed size thread pool) and [SingleThreadExecutor] (single
44
+ # > background thread), that preconfigure settings for the most common usage
45
+ # > scenarios.
46
+ #
47
+ # @!macro thread_pool_options
48
+ #
49
+ # @!macro thread_pool_executor_public_api
50
+ class ThreadPoolExecutor < ThreadPoolExecutorImplementation
51
+
52
+ # @!macro [new] thread_pool_executor_method_initialize
57
53
  #
58
- # @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html
59
- # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html
60
- # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html
61
- class ThreadPoolExecutor < JavaThreadPoolExecutor
62
- end
63
- else
64
- # @!macro thread_pool_executor
65
- class ThreadPoolExecutor < RubyThreadPoolExecutor
66
- end
54
+ # Create a new thread pool.
55
+ #
56
+ # @param [Hash] opts the options which configure the thread pool.
57
+ #
58
+ # @option opts [Integer] :max_threads (DEFAULT_MAX_POOL_SIZE) the maximum
59
+ # number of threads to be created
60
+ # @option opts [Integer] :min_threads (DEFAULT_MIN_POOL_SIZE) the minimum
61
+ # number of threads to be retained
62
+ # @option opts [Integer] :idletime (DEFAULT_THREAD_IDLETIMEOUT) the maximum
63
+ # number of seconds a thread may be idle before being reclaimed
64
+ # @option opts [Integer] :max_queue (DEFAULT_MAX_QUEUE_SIZE) the maximum
65
+ # number of tasks allowed in the work queue at any one time; a value of
66
+ # zero means the queue may grow without bound
67
+ # @option opts [Symbol] :fallback_policy (:abort) the policy for handling new
68
+ # tasks that are received when the queue size has reached
69
+ # `max_queue` or the executor has shut down
70
+ #
71
+ # @raise [ArgumentError] if `:max_threads` is less than one
72
+ # @raise [ArgumentError] if `:min_threads` is less than zero
73
+ # @raise [ArgumentError] if `:fallback_policy` is not one of the values specified
74
+ # in `FALLBACK_POLICIES`
75
+ #
76
+ # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html
77
+
78
+ # @!method initialize(opts = {})
79
+ # @!macro thread_pool_executor_method_initialize
67
80
  end
68
81
  end
@@ -1,114 +1,126 @@
1
- require 'thread'
2
- require_relative 'executor'
3
- require 'concurrent/options_parser'
1
+ require 'concurrent/scheduled_task'
4
2
  require 'concurrent/atomic/event'
5
3
  require 'concurrent/collection/priority_queue'
4
+ require 'concurrent/concern/deprecation'
5
+ require 'concurrent/executor/executor_service'
6
6
  require 'concurrent/executor/single_thread_executor'
7
7
 
8
8
  module Concurrent
9
9
 
10
- # Executes a collection of tasks at the specified times. A master thread
10
+ # Executes a collection of tasks, each after a given delay. A master task
11
11
  # monitors the set and schedules each task for execution at the appropriate
12
- # time. Tasks are run on the global task pool or on the supplied executor.
13
- class TimerSet
14
- include RubyExecutor
12
+ # time. Tasks are run on the global thread pool or on the supplied executor.
13
+ # Each task is represented as a `ScheduledTask`.
14
+ #
15
+ # @see Concurrent::ScheduledTask
16
+ #
17
+ # @!macro monotonic_clock_warning
18
+ class TimerSet < RubyExecutorService
19
+ extend Concern::Deprecation
15
20
 
16
21
  # Create a new set of timed tasks.
17
22
  #
18
- # @param [Hash] opts the options controlling how the future will be processed
19
- # @option opts [Boolean] :operation (false) when `true` will execute the future on the global
20
- # operation pool (for long-running operations), when `false` will execute the future on the
21
- # global task pool (for short-running tasks)
22
- # @option opts [object] :executor when provided will run all operations on
23
- # this executor rather than the global thread pool (overrides :operation)
23
+ # @!macro [attach] executor_options
24
+ #
25
+ # @param [Hash] opts the options used to specify the executor on which to perform actions
26
+ # @option opts [Executor] :executor when set use the given `Executor` instance.
27
+ # Three special values are also supported: `:task` returns the global task pool,
28
+ # `:operation` returns the global operation pool, and `:immediate` returns a new
29
+ # `ImmediateExecutor` object.
24
30
  def initialize(opts = {})
25
- @queue = PriorityQueue.new(order: :min)
26
- @task_executor = OptionsParser::get_executor_from(opts) || Concurrent.configuration.global_task_pool
27
- @timer_executor = SingleThreadExecutor.new
28
- @condition = Condition.new
29
- init_executor
31
+ super(opts)
30
32
  end
31
33
 
32
- # Post a task to be execute at the specified time. The given time may be either
33
- # a `Time` object or the number of seconds to wait. If the intended execution
34
- # time is within 1/100th of a second of the current time the task will be
35
- # immediately post to the executor.
34
+ # Post a task to be execute run after a given delay (in seconds). If the
35
+ # delay is less than 1/100th of a second the task will be immediately post
36
+ # to the executor.
37
+ #
38
+ # @param [Float] delay the number of seconds to wait for before executing the task.
39
+ # @param [Array<Object>] args the arguments passed to the task on execution.
36
40
  #
37
- # @param [Object] intended_time the time to schedule the task for execution
41
+ # @yield the task to be performed.
38
42
  #
39
- # @yield the task to be performed
43
+ # @return [Concurrent::ScheduledTask, false] IVar representing the task if the post
44
+ # is successful; false after shutdown.
40
45
  #
41
- # @return [Boolean] true if the message is post, false after shutdown
46
+ # @raise [ArgumentError] if the intended execution time is not in the future.
47
+ # @raise [ArgumentError] if no block is given.
42
48
  #
43
- # @raise [ArgumentError] if the intended execution time is not in the future
44
- # @raise [ArgumentError] if no block is given
45
- def post(intended_time, *args, &task)
46
- time = TimerSet.calculate_schedule_time(intended_time).to_f
49
+ # @!macro deprecated_scheduling_by_clock_time
50
+ def post(delay, *args, &task)
47
51
  raise ArgumentError.new('no block given') unless block_given?
48
-
49
- mutex.synchronize do
50
- return false unless running?
51
-
52
- if (time - Time.now.to_f) <= 0.01
53
- @task_executor.post(*args, &task)
54
- else
55
- @queue.push(Task.new(time, args, task))
56
- @timer_executor.post(&method(:process_tasks))
57
- end
58
- end
59
-
60
- @condition.signal
61
- true
52
+ return false unless running?
53
+ opts = {
54
+ executor: @task_executor,
55
+ args: args,
56
+ timer_set: self
57
+ }
58
+ task = ScheduledTask.execute(delay, opts, &task) # may raise exception
59
+ task.unscheduled? ? false : task
62
60
  end
63
61
 
64
- # For a timer, #kill is like an orderly shutdown, except we need to manually
65
- # (and destructively) clear the queue first
62
+ # Begin an immediate shutdown. In-progress tasks will be allowed to
63
+ # complete but enqueued tasks will be dismissed and no new tasks
64
+ # will be accepted. Has no additional effect if the thread pool is
65
+ # not running.
66
66
  def kill
67
- mutex.synchronize { @queue.clear }
68
67
  shutdown
69
68
  end
70
69
 
71
- # Calculate an Epoch time with milliseconds at which to execute a
72
- # task. If the given time is a `Time` object it will be converted
73
- # accordingly. If the time is an integer value greater than zero
74
- # it will be understood as a number of seconds in the future and
75
- # will be added to the current time to calculate Epoch.
70
+ private :<<
71
+
72
+ protected
73
+
74
+ # Initialize the object.
76
75
  #
77
- # @param [Object] intended_time the time (as a `Time` object or an integer)
78
- # to schedule the task for execution
79
- # @param [Time] now (Time.now) the time from which to calculate an interval
76
+ # @param [Hash] opts the options to create the object with.
77
+ # @!visibility private
78
+ def ns_initialize(opts)
79
+ @queue = Collection::PriorityQueue.new(order: :min)
80
+ @task_executor = Executor.executor_from_options(opts) || Concurrent.global_io_executor
81
+ @timer_executor = SingleThreadExecutor.new
82
+ @condition = Event.new
83
+ self.auto_terminate = opts.fetch(:auto_terminate, true)
84
+ end
85
+
86
+ # Post the task to the internal queue.
80
87
  #
81
- # @return [Fixnum] the intended time as seconds/millis from Epoch
88
+ # @note This is intended as a callback method from ScheduledTask
89
+ # only. It is not intended to be used directly. Post a task
90
+ # by using the `SchedulesTask#execute` method.
82
91
  #
83
- # @raise [ArgumentError] if the intended execution time is not in the future
84
- def self.calculate_schedule_time(intended_time, now = Time.now)
85
- if intended_time.is_a?(Time)
86
- raise ArgumentError.new('schedule time must be in the future') if intended_time <= now
87
- intended_time
92
+ # @!visibility private
93
+ def post_task(task)
94
+ synchronize{ ns_post_task(task) }
95
+ end
96
+
97
+ # @!visibility private
98
+ def ns_post_task(task)
99
+ return false unless ns_running?
100
+ if (task.initial_delay) <= 0.01
101
+ task.executor.post{ task.process_task }
88
102
  else
89
- raise ArgumentError.new('seconds must be greater than zero') if intended_time.to_f < 0.0
90
- now + intended_time
103
+ @queue.push(task)
104
+ # only post the process method when the queue is empty
105
+ @timer_executor.post(&method(:process_tasks)) if @queue.length == 1
106
+ @condition.set
91
107
  end
108
+ true
92
109
  end
93
110
 
94
- private
95
-
96
- # A struct for encapsulating a task and its intended execution time.
97
- # It facilitates proper prioritization by overriding the comparison
98
- # (spaceship) operator as a comparison of the intended execution
99
- # times.
111
+ # Remove the given task from the queue.
112
+ #
113
+ # @note This is intended as a callback method from `ScheduledTask`
114
+ # only. It is not intended to be used directly. Cancel a task
115
+ # by using the `ScheduledTask#cancel` method.
100
116
  #
101
117
  # @!visibility private
102
- Task = Struct.new(:time, :args, :op) do
103
- include Comparable
104
-
105
- def <=>(other)
106
- self.time <=> other.time
107
- end
118
+ def remove_task(task)
119
+ synchronize{ @queue.delete(task) }
108
120
  end
109
121
 
110
- private_constant :Task
111
-
122
+ # `ExecutorServic` callback called during shutdown.
123
+ #
112
124
  # @!visibility private
113
125
  def shutdown_execution
114
126
  @queue.clear
@@ -124,11 +136,13 @@ module Concurrent
124
136
  # @!visibility private
125
137
  def process_tasks
126
138
  loop do
127
- task = mutex.synchronize { @queue.peek }
139
+ task = synchronize { @condition.reset; @queue.peek }
128
140
  break unless task
129
- interval = task.time - Time.now.to_f
130
141
 
131
- if interval <= 0
142
+ now = Concurrent.monotonic_time
143
+ diff = task.schedule_time - now
144
+
145
+ if diff <= 0
132
146
  # We need to remove the task from the queue before passing
133
147
  # it to the executor, to avoid race conditions where we pass
134
148
  # the peek'ed task to the executor and then pop a different
@@ -141,12 +155,10 @@ module Concurrent
141
155
  # the only reader, so whatever timer is at the head of the
142
156
  # queue now must have the same pop time, or a closer one, as
143
157
  # when we peeked).
144
- task = mutex.synchronize { @queue.pop }
145
- @task_executor.post(*task.args, &task.op)
158
+ task = synchronize { @queue.pop }
159
+ task.executor.post{ task.process_task }
146
160
  else
147
- mutex.synchronize do
148
- @condition.wait(mutex, [interval, 60].min)
149
- end
161
+ @condition.wait([diff, 60].min)
150
162
  end
151
163
  end
152
164
  end