concurrent-ruby 0.7.0.rc0-x86-linux

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. checksums.yaml +15 -0
  2. data/LICENSE.txt +21 -0
  3. data/README.md +166 -0
  4. data/ext/concurrent_ruby_ext/atomic_reference.c +78 -0
  5. data/ext/concurrent_ruby_ext/atomic_reference.h +12 -0
  6. data/ext/concurrent_ruby_ext/extconf.rb +59 -0
  7. data/ext/concurrent_ruby_ext/rb_concurrent.c +28 -0
  8. data/lib/concurrent.rb +45 -0
  9. data/lib/concurrent/actress.rb +221 -0
  10. data/lib/concurrent/actress/ad_hoc.rb +20 -0
  11. data/lib/concurrent/actress/context.rb +98 -0
  12. data/lib/concurrent/actress/core.rb +228 -0
  13. data/lib/concurrent/actress/core_delegations.rb +42 -0
  14. data/lib/concurrent/actress/envelope.rb +41 -0
  15. data/lib/concurrent/actress/errors.rb +14 -0
  16. data/lib/concurrent/actress/reference.rb +64 -0
  17. data/lib/concurrent/actress/type_check.rb +48 -0
  18. data/lib/concurrent/agent.rb +232 -0
  19. data/lib/concurrent/async.rb +319 -0
  20. data/lib/concurrent/atomic.rb +46 -0
  21. data/lib/concurrent/atomic/atomic_boolean.rb +157 -0
  22. data/lib/concurrent/atomic/atomic_fixnum.rb +162 -0
  23. data/lib/concurrent/atomic/condition.rb +67 -0
  24. data/lib/concurrent/atomic/copy_on_notify_observer_set.rb +118 -0
  25. data/lib/concurrent/atomic/copy_on_write_observer_set.rb +117 -0
  26. data/lib/concurrent/atomic/count_down_latch.rb +116 -0
  27. data/lib/concurrent/atomic/cyclic_barrier.rb +106 -0
  28. data/lib/concurrent/atomic/event.rb +98 -0
  29. data/lib/concurrent/atomic/thread_local_var.rb +117 -0
  30. data/lib/concurrent/atomic_reference/concurrent_update_error.rb +7 -0
  31. data/lib/concurrent/atomic_reference/delegated_update.rb +28 -0
  32. data/lib/concurrent/atomic_reference/direct_update.rb +28 -0
  33. data/lib/concurrent/atomic_reference/jruby.rb +8 -0
  34. data/lib/concurrent/atomic_reference/mutex_atomic.rb +47 -0
  35. data/lib/concurrent/atomic_reference/numeric_cas_wrapper.rb +24 -0
  36. data/lib/concurrent/atomic_reference/rbx.rb +16 -0
  37. data/lib/concurrent/atomic_reference/ruby.rb +16 -0
  38. data/lib/concurrent/atomics.rb +10 -0
  39. data/lib/concurrent/channel/buffered_channel.rb +85 -0
  40. data/lib/concurrent/channel/channel.rb +41 -0
  41. data/lib/concurrent/channel/unbuffered_channel.rb +34 -0
  42. data/lib/concurrent/channel/waitable_list.rb +40 -0
  43. data/lib/concurrent/channels.rb +5 -0
  44. data/lib/concurrent/collection/blocking_ring_buffer.rb +71 -0
  45. data/lib/concurrent/collection/priority_queue.rb +305 -0
  46. data/lib/concurrent/collection/ring_buffer.rb +59 -0
  47. data/lib/concurrent/collections.rb +3 -0
  48. data/lib/concurrent/configuration.rb +158 -0
  49. data/lib/concurrent/dataflow.rb +91 -0
  50. data/lib/concurrent/delay.rb +112 -0
  51. data/lib/concurrent/dereferenceable.rb +101 -0
  52. data/lib/concurrent/errors.rb +30 -0
  53. data/lib/concurrent/exchanger.rb +34 -0
  54. data/lib/concurrent/executor/cached_thread_pool.rb +44 -0
  55. data/lib/concurrent/executor/executor.rb +229 -0
  56. data/lib/concurrent/executor/fixed_thread_pool.rb +33 -0
  57. data/lib/concurrent/executor/immediate_executor.rb +16 -0
  58. data/lib/concurrent/executor/java_cached_thread_pool.rb +31 -0
  59. data/lib/concurrent/executor/java_fixed_thread_pool.rb +33 -0
  60. data/lib/concurrent/executor/java_single_thread_executor.rb +21 -0
  61. data/lib/concurrent/executor/java_thread_pool_executor.rb +187 -0
  62. data/lib/concurrent/executor/per_thread_executor.rb +24 -0
  63. data/lib/concurrent/executor/ruby_cached_thread_pool.rb +29 -0
  64. data/lib/concurrent/executor/ruby_fixed_thread_pool.rb +32 -0
  65. data/lib/concurrent/executor/ruby_single_thread_executor.rb +73 -0
  66. data/lib/concurrent/executor/ruby_thread_pool_executor.rb +286 -0
  67. data/lib/concurrent/executor/ruby_thread_pool_worker.rb +72 -0
  68. data/lib/concurrent/executor/safe_task_executor.rb +35 -0
  69. data/lib/concurrent/executor/serialized_execution.rb +90 -0
  70. data/lib/concurrent/executor/single_thread_executor.rb +35 -0
  71. data/lib/concurrent/executor/thread_pool_executor.rb +68 -0
  72. data/lib/concurrent/executor/timer_set.rb +143 -0
  73. data/lib/concurrent/executors.rb +9 -0
  74. data/lib/concurrent/future.rb +124 -0
  75. data/lib/concurrent/ivar.rb +111 -0
  76. data/lib/concurrent/logging.rb +17 -0
  77. data/lib/concurrent/mvar.rb +200 -0
  78. data/lib/concurrent/obligation.rb +171 -0
  79. data/lib/concurrent/observable.rb +40 -0
  80. data/lib/concurrent/options_parser.rb +46 -0
  81. data/lib/concurrent/promise.rb +169 -0
  82. data/lib/concurrent/scheduled_task.rb +78 -0
  83. data/lib/concurrent/supervisor.rb +343 -0
  84. data/lib/concurrent/timer_task.rb +341 -0
  85. data/lib/concurrent/tvar.rb +252 -0
  86. data/lib/concurrent/utilities.rb +3 -0
  87. data/lib/concurrent/utility/processor_count.rb +150 -0
  88. data/lib/concurrent/utility/timeout.rb +35 -0
  89. data/lib/concurrent/utility/timer.rb +21 -0
  90. data/lib/concurrent/version.rb +3 -0
  91. data/lib/concurrent_ruby.rb +1 -0
  92. data/lib/concurrent_ruby_ext.so +0 -0
  93. data/lib/extension_helper.rb +9 -0
  94. metadata +140 -0
@@ -0,0 +1,91 @@
1
+ require 'concurrent/future'
2
+ require 'concurrent/atomic/atomic_fixnum'
3
+ require 'concurrent/executor/per_thread_executor'
4
+
5
+ module Concurrent
6
+
7
+ # @!visibility private
8
+ class DependencyCounter # :nodoc:
9
+
10
+ def initialize(count, &block)
11
+ @counter = AtomicFixnum.new(count)
12
+ @block = block
13
+ end
14
+
15
+ def update(time, value, reason)
16
+ if @counter.decrement == 0
17
+ @block.call
18
+ end
19
+ end
20
+ end
21
+
22
+ # Dataflow allows you to create a task that will be scheduled then all of its
23
+ # data dependencies are available. Data dependencies are `Future` values. The
24
+ # dataflow task itself is also a `Future` value, so you can build up a graph of
25
+ # these tasks, each of which is run when all the data and other tasks it depends
26
+ # on are available or completed.
27
+ #
28
+ # Our syntax is somewhat related to that of Akka's `flow` and Habanero Java's
29
+ # `DataDrivenFuture`. However unlike Akka we don't schedule a task at all until
30
+ # it is ready to run, and unlike Habanero Java we pass the data values into the
31
+ # task instead of dereferencing them again in the task.
32
+ #
33
+ # The theory of dataflow goes back to the 80s. In the terminology of the literature,
34
+ # our implementation is coarse-grained, in that each task can be many instructions,
35
+ # and dynamic in that you can create more tasks within other tasks.
36
+ #
37
+ # @example Parallel Fibonacci calculator
38
+ # def fib(n)
39
+ # if n < 2
40
+ # Concurrent::dataflow { n }
41
+ # else
42
+ # n1 = fib(n - 1)
43
+ # n2 = fib(n - 2)
44
+ # Concurrent::dataflow(n1, n2) { |v1, v2| v1 + v2 }
45
+ # end
46
+ # end
47
+ #
48
+ # f = fib(14) #=> #<Concurrent::Future:0x000001019a26d8 ...
49
+ #
50
+ # # wait up to 1 second for the answer...
51
+ # f.value(1) #=> 377
52
+ #
53
+ # @param [Future] inputs zero or more `Future` operations that this dataflow depends upon
54
+ #
55
+ # @yield The operation to perform once all the dependencies are met
56
+ # @yieldparam [Future] inputs each of the `Future` inputs to the dataflow
57
+ # @yieldreturn [Object] the result of the block operation
58
+ #
59
+ # @return [Object] the result of all the operations
60
+ #
61
+ # @raise [ArgumentError] if no block is given
62
+ # @raise [ArgumentError] if any of the inputs are not `IVar`s
63
+ def dataflow(*inputs, &block)
64
+ dataflow_with(Concurrent.configuration.global_task_pool, *inputs, &block)
65
+ end
66
+ module_function :dataflow
67
+
68
+ def dataflow_with(executor, *inputs, &block)
69
+ raise ArgumentError.new('an executor must be provided') if executor.nil?
70
+ raise ArgumentError.new('no block given') unless block_given?
71
+ raise ArgumentError.new('not all dependencies are IVars') unless inputs.all? { |input| input.is_a? IVar }
72
+
73
+ result = Future.new(executor: executor) do
74
+ values = inputs.map { |input| input.value }
75
+ block.call(*values)
76
+ end
77
+
78
+ if inputs.empty?
79
+ result.execute
80
+ else
81
+ counter = DependencyCounter.new(inputs.size) { result.execute }
82
+
83
+ inputs.each do |input|
84
+ input.add_observer counter
85
+ end
86
+ end
87
+
88
+ result
89
+ end
90
+ module_function :dataflow_with
91
+ end
@@ -0,0 +1,112 @@
1
+ require 'thread'
2
+ require 'concurrent/obligation'
3
+
4
+ module Concurrent
5
+
6
+ # Lazy evaluation of a block yielding an immutable result. Useful for expensive
7
+ # operations that may never be needed.
8
+ #
9
+ # A `Delay` is similar to `Future` but solves a different problem.
10
+ # Where a `Future` schedules an operation for immediate execution and
11
+ # performs the operation asynchronously, a `Delay` (as the name implies)
12
+ # delays execution of the operation until the result is actually needed.
13
+ #
14
+ # When a `Delay` is created its state is set to `pending`. The value and
15
+ # reason are both `nil`. The first time the `#value` method is called the
16
+ # enclosed opration will be run and the calling thread will block. Other
17
+ # threads attempting to call `#value` will block as well. Once the operation
18
+ # is complete the *value* will be set to the result of the operation or the
19
+ # *reason* will be set to the raised exception, as appropriate. All threads
20
+ # blocked on `#value` will return. Subsequent calls to `#value` will immediately
21
+ # return the cached value. The operation will only be run once. This means that
22
+ # any side effects created by the operation will only happen once as well.
23
+ #
24
+ # `Delay` includes the `Concurrent::Dereferenceable` mixin to support thread
25
+ # safety of the reference returned by `#value`.
26
+ #
27
+ # @since 0.6.0
28
+ #
29
+ # @see Concurrent::Dereferenceable
30
+ #
31
+ # @see http://clojuredocs.org/clojure_core/clojure.core/delay
32
+ # @see http://aphyr.com/posts/306-clojure-from-the-ground-up-state
33
+ class Delay
34
+ include Obligation
35
+
36
+ # Create a new `Delay` in the `:pending` state.
37
+ #
38
+ # @yield the delayed operation to perform
39
+ #
40
+ # @param [Hash] opts the options to create a message with
41
+ # @option opts [String] :dup_on_deref (false) call `#dup` before returning the data
42
+ # @option opts [String] :freeze_on_deref (false) call `#freeze` before returning the data
43
+ # @option opts [String] :copy_on_deref (nil) call the given `Proc` passing the internal value and
44
+ # returning the value returned from the proc
45
+ #
46
+ # @raise [ArgumentError] if no block is given
47
+ def initialize(opts = {}, &block)
48
+ raise ArgumentError.new('no block given') unless block_given?
49
+
50
+ init_obligation
51
+ @state = :pending
52
+ @task = block
53
+ set_deref_options(opts)
54
+ end
55
+
56
+ # Return the (possibly memoized) value of the delayed operation.
57
+ #
58
+ # If the state is `:pending` then the calling thread will block while the
59
+ # operation is performed. All other threads simultaneously calling `#value`
60
+ # will block as well. Once the operation is complete (either `:fulfilled` or
61
+ # `:rejected`) all waiting threads will unblock and the new value will be
62
+ # returned.
63
+ #
64
+ # If the state is not `:pending` when `#value` is called the (possibly memoized)
65
+ # value will be returned without blocking and without performing the operation
66
+ # again.
67
+ #
68
+ # Regardless of the final disposition all `Dereferenceable` options set during
69
+ # object construction will be honored.
70
+ #
71
+ # @return [Object] the (possibly memoized) result of the block operation
72
+ #
73
+ # @see Concurrent::Dereferenceable
74
+ def value
75
+ mutex.lock
76
+ execute_task_once
77
+ apply_deref_options(@value)
78
+ ensure
79
+ mutex.unlock
80
+ end
81
+
82
+ # reconfigures the block returning the value if still #incomplete?
83
+ # @yield the delayed operation to perform
84
+ # @return [true, false] if success
85
+ def reconfigure(&block)
86
+ mutex.lock
87
+ raise ArgumentError.new('no block given') unless block_given?
88
+ if @state == :pending
89
+ @task = block
90
+ true
91
+ else
92
+ false
93
+ end
94
+ ensure
95
+ mutex.unlock
96
+ end
97
+
98
+ private
99
+
100
+ def execute_task_once
101
+ if @state == :pending
102
+ begin
103
+ @value = @task.call
104
+ @state = :fulfilled
105
+ rescue => ex
106
+ @reason = ex
107
+ @state = :rejected
108
+ end
109
+ end
110
+ end
111
+ end
112
+ end
@@ -0,0 +1,101 @@
1
+ module Concurrent
2
+
3
+ # Object references in Ruby are mutable. This can lead to serious problems when
4
+ # the `#value` of a concurrent object is a mutable reference. Which is always the
5
+ # case unless the value is a `Fixnum`, `Symbol`, or similar "primitive" data type.
6
+ # Most classes in this library that expose a `#value` getter method do so using
7
+ # this mixin module.
8
+ module Dereferenceable
9
+
10
+ # Return the value this object represents after applying the options specified
11
+ # by the `#set_deref_options` method.
12
+ #
13
+ # When multiple deref options are set the order of operations is strictly defined.
14
+ # The order of deref operations is:
15
+ # * `:copy_on_deref`
16
+ # * `:dup_on_deref`
17
+ # * `:freeze_on_deref`
18
+ #
19
+ # Because of this ordering there is no need to `#freeze` an object created by a
20
+ # provided `:copy_on_deref` block. Simply set `:freeze_on_deref` to `true`.
21
+ # Setting both `:dup_on_deref` to `true` and `:freeze_on_deref` to `true` is
22
+ # as close to the behavior of a "pure" functional language (like Erlang, Clojure,
23
+ # or Haskell) as we are likely to get in Ruby.
24
+ #
25
+ # This method is thread-safe and synchronized with the internal `#mutex`.
26
+ #
27
+ # @return [Object] the current value of the object
28
+ def value
29
+ mutex.lock
30
+ apply_deref_options(@value)
31
+ ensure
32
+ mutex.unlock
33
+ end
34
+
35
+ alias_method :deref, :value
36
+
37
+ protected
38
+
39
+ # Set the internal value of this object
40
+ #
41
+ # @param [Object] val the new value
42
+ def value=(val)
43
+ mutex.lock
44
+ @value = val
45
+ ensure
46
+ mutex.unlock
47
+ end
48
+
49
+ # A mutex lock used for synchronizing thread-safe operations. Methods defined
50
+ # by `Dereferenceable` are synchronized using the `Mutex` returned from this
51
+ # method. Operations performed by the including class that operate on the
52
+ # `@value` instance variable should be locked with this `Mutex`.
53
+ #
54
+ # @return [Mutex] the synchronization object
55
+ def mutex
56
+ @mutex
57
+ end
58
+
59
+ # Initializes the internal `Mutex`.
60
+ #
61
+ # @note This method *must* be called from within the constructor of the including class.
62
+ #
63
+ # @see #mutex
64
+ def init_mutex
65
+ @mutex = Mutex.new
66
+ end
67
+
68
+ # Set the options which define the operations #value performs before
69
+ # returning data to the caller (dereferencing).
70
+ #
71
+ # @note Most classes that include this module will call `#set_deref_options`
72
+ # from within the constructor, thus allowing these options to be set at
73
+ # object creation.
74
+ #
75
+ # @param [Hash] opts the options defining dereference behavior.
76
+ # @option opts [String] :dup_on_deref (false) call `#dup` before returning the data
77
+ # @option opts [String] :freeze_on_deref (false) call `#freeze` before returning the data
78
+ # @option opts [String] :copy_on_deref (nil) call the given `Proc` passing the internal value and
79
+ # returning the value returned from the proc
80
+ def set_deref_options(opts = {})
81
+ mutex.lock
82
+ @dup_on_deref = opts[:dup_on_deref] || opts[:dup]
83
+ @freeze_on_deref = opts[:freeze_on_deref] || opts[:freeze]
84
+ @copy_on_deref = opts[:copy_on_deref] || opts[:copy]
85
+ @do_nothing_on_deref = !(@dup_on_deref || @freeze_on_deref || @copy_on_deref)
86
+ nil
87
+ ensure
88
+ mutex.unlock
89
+ end
90
+
91
+ # @!visibility private
92
+ def apply_deref_options(value) # :nodoc:
93
+ return nil if value.nil?
94
+ return value if @do_nothing_on_deref
95
+ value = @copy_on_deref.call(value) if @copy_on_deref
96
+ value = value.dup if @dup_on_deref
97
+ value = value.freeze if @freeze_on_deref
98
+ value
99
+ end
100
+ end
101
+ end
@@ -0,0 +1,30 @@
1
+ module Concurrent
2
+
3
+ # Raised when errors occur during configuration.
4
+ ConfigurationError = Class.new(StandardError)
5
+
6
+ # Raised when a lifecycle method (such as `stop`) is called in an improper
7
+ # sequence or when the object is in an inappropriate state.
8
+ LifecycleError = Class.new(StandardError)
9
+
10
+ # Raised when an object's methods are called when it has not been
11
+ # properly initialized.
12
+ InitializationError = Class.new(StandardError)
13
+
14
+ # Raised when an object with a start/stop lifecycle has been started an
15
+ # excessive number of times. Often used in conjunction with a restart
16
+ # policy or strategy.
17
+ MaxRestartFrequencyError = Class.new(StandardError)
18
+
19
+ # Raised when an attempt is made to modify an immutable object
20
+ # (such as an `IVar`) after its final state has been set.
21
+ MultipleAssignmentError = Class.new(StandardError)
22
+
23
+ # Raised by an `Executor` when it is unable to process a given task,
24
+ # possibly because of a reject policy or other internal error.
25
+ RejectedExecutionError = Class.new(StandardError)
26
+
27
+ # Raised when an operation times out.
28
+ TimeoutError = Class.new(StandardError)
29
+
30
+ end
@@ -0,0 +1,34 @@
1
+ module Concurrent
2
+ class Exchanger
3
+
4
+ EMPTY = Object.new
5
+
6
+ def initialize(opts = {})
7
+ @first = MVar.new(EMPTY, opts)
8
+ @second = MVar.new(MVar::EMPTY, opts)
9
+ end
10
+
11
+ # @param [Object] value the value to exchange with an other thread
12
+ # @param [Numeric] timeout the maximum time in second to wait for one other thread. nil (default value) means no timeout
13
+ # @return [Object] the value exchanged by the other thread; nil if timed out
14
+ def exchange(value, timeout = nil)
15
+ first = @first.take(timeout)
16
+ if first == MVar::TIMEOUT
17
+ nil
18
+ elsif first == EMPTY
19
+ @first.put value
20
+ second = @second.take timeout
21
+ if second == MVar::TIMEOUT
22
+ nil
23
+ else
24
+ second
25
+ end
26
+ else
27
+ @first.put EMPTY
28
+ @second.put value
29
+ first
30
+ end
31
+ end
32
+
33
+ end
34
+ end
@@ -0,0 +1,44 @@
1
+ require 'concurrent/executor/ruby_cached_thread_pool'
2
+
3
+ module Concurrent
4
+
5
+ if RUBY_PLATFORM == 'java'
6
+ require 'concurrent/executor/java_cached_thread_pool'
7
+ # @!macro [attach] cached_thread_pool
8
+ # A thread pool that dynamically grows and shrinks to fit the current workload.
9
+ # New threads are created as needed, existing threads are reused, and threads
10
+ # that remain idle for too long are killed and removed from the pool. These
11
+ # pools are particularly suited to applications that perform a high volume of
12
+ # short-lived tasks.
13
+ #
14
+ # On creation a `CachedThreadPool` has zero running threads. New threads are
15
+ # created on the pool as new operations are `#post`. The size of the pool
16
+ # will grow until `#max_length` threads are in the pool or until the number
17
+ # of threads exceeds the number of running and pending operations. When a new
18
+ # operation is post to the pool the first available idle thread will be tasked
19
+ # with the new operation.
20
+ #
21
+ # Should a thread crash for any reason the thread will immediately be removed
22
+ # from the pool. Similarly, threads which remain idle for an extended period
23
+ # of time will be killed and reclaimed. Thus these thread pools are very
24
+ # efficient at reclaiming unused resources.
25
+ #
26
+ # The API and behavior of this class are based on Java's `CachedThreadPool`
27
+ #
28
+ # @note When running on the JVM (JRuby) this class will inherit from `JavaCachedThreadPool`.
29
+ # On all other platforms it will inherit from `RubyCachedThreadPool`.
30
+ #
31
+ # @see Concurrent::RubyCachedThreadPool
32
+ # @see Concurrent::JavaCachedThreadPool
33
+ #
34
+ # @see http://docs.oracle.com/javase/tutorial/essential/concurrency/pools.html
35
+ # @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/Executors.html
36
+ # @see http://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ExecutorService.html
37
+ class CachedThreadPool < JavaCachedThreadPool
38
+ end
39
+ else
40
+ # @!macro cached_thread_pool
41
+ class CachedThreadPool < RubyCachedThreadPool
42
+ end
43
+ end
44
+ end
@@ -0,0 +1,229 @@
1
+ require 'concurrent/errors'
2
+ require 'concurrent/logging'
3
+ require 'concurrent/atomic/event'
4
+
5
+ module Concurrent
6
+
7
+ module Executor
8
+ def can_overflow?
9
+ false
10
+ end
11
+ end
12
+
13
+ module RubyExecutor
14
+ include Executor
15
+ include Logging
16
+
17
+ # Submit a task to the executor for asynchronous processing.
18
+ #
19
+ # @param [Array] args zero or more arguments to be passed to the task
20
+ #
21
+ # @yield the asynchronous task to perform
22
+ #
23
+ # @return [Boolean] `true` if the task is queued, `false` if the executor
24
+ # is not running
25
+ #
26
+ # @raise [ArgumentError] if no task is given
27
+ def post(*args, &task)
28
+ raise ArgumentError.new('no block given') unless block_given?
29
+ mutex.synchronize do
30
+ return false unless running?
31
+ execute(*args, &task)
32
+ true
33
+ end
34
+ end
35
+
36
+ # Submit a task to the executor for asynchronous processing.
37
+ #
38
+ # @param [Proc] task the asynchronous task to perform
39
+ #
40
+ # @return [self] returns itself
41
+ def <<(task)
42
+ post(&task)
43
+ self
44
+ end
45
+
46
+ # Is the executor running?
47
+ #
48
+ # @return [Boolean] `true` when running, `false` when shutting down or shutdown
49
+ def running?
50
+ ! stop_event.set?
51
+ end
52
+
53
+ # Is the executor shuttingdown?
54
+ #
55
+ # @return [Boolean] `true` when not running and not shutdown, else `false`
56
+ def shuttingdown?
57
+ ! (running? || shutdown?)
58
+ end
59
+
60
+ # Is the executor shutdown?
61
+ #
62
+ # @return [Boolean] `true` when shutdown, `false` when shutting down or running
63
+ def shutdown?
64
+ stopped_event.set?
65
+ end
66
+
67
+ # Begin an orderly shutdown. Tasks already in the queue will be executed,
68
+ # but no new tasks will be accepted. Has no additional effect if the
69
+ # thread pool is not running.
70
+ def shutdown
71
+ mutex.synchronize do
72
+ break unless running?
73
+ stop_event.set
74
+ shutdown_execution
75
+ end
76
+ true
77
+ end
78
+
79
+ # Begin an immediate shutdown. In-progress tasks will be allowed to
80
+ # complete but enqueued tasks will be dismissed and no new tasks
81
+ # will be accepted. Has no additional effect if the thread pool is
82
+ # not running.
83
+ def kill
84
+ mutex.synchronize do
85
+ break if shutdown?
86
+ stop_event.set
87
+ kill_execution
88
+ stopped_event.set
89
+ end
90
+ true
91
+ end
92
+
93
+ # Block until executor shutdown is complete or until `timeout` seconds have
94
+ # passed.
95
+ #
96
+ # @note Does not initiate shutdown or termination. Either `shutdown` or `kill`
97
+ # must be called before this method (or on another thread).
98
+ #
99
+ # @param [Integer] timeout the maximum number of seconds to wait for shutdown to complete
100
+ #
101
+ # @return [Boolean] `true` if shutdown complete or false on `timeout`
102
+ def wait_for_termination(timeout = nil)
103
+ stopped_event.wait(timeout)
104
+ end
105
+
106
+ protected
107
+
108
+ attr_reader :mutex, :stop_event, :stopped_event
109
+
110
+ def init_executor
111
+ @mutex = Mutex.new
112
+ @stop_event = Event.new
113
+ @stopped_event = Event.new
114
+ end
115
+
116
+ def execute(*args, &task)
117
+ raise NotImplementedError
118
+ end
119
+
120
+ def shutdown_execution
121
+ stopped_event.set
122
+ end
123
+
124
+ def kill_execution
125
+ # do nothing
126
+ end
127
+ end
128
+
129
+ if RUBY_PLATFORM == 'java'
130
+
131
+ module JavaExecutor
132
+ include Executor
133
+
134
+ # Submit a task to the executor for asynchronous processing.
135
+ #
136
+ # @param [Array] args zero or more arguments to be passed to the task
137
+ #
138
+ # @yield the asynchronous task to perform
139
+ #
140
+ # @return [Boolean] `true` if the task is queued, `false` if the executor
141
+ # is not running
142
+ #
143
+ # @raise [ArgumentError] if no task is given
144
+ def post(*args)
145
+ raise ArgumentError.new('no block given') unless block_given?
146
+ if running?
147
+ @executor.submit{ yield(*args) }
148
+ true
149
+ else
150
+ false
151
+ end
152
+ rescue Java::JavaUtilConcurrent::RejectedExecutionException => ex
153
+ raise RejectedExecutionError
154
+ end
155
+
156
+ # Submit a task to the executor for asynchronous processing.
157
+ #
158
+ # @param [Proc] task the asynchronous task to perform
159
+ #
160
+ # @return [self] returns itself
161
+ def <<(task)
162
+ post(&task)
163
+ self
164
+ end
165
+
166
+ # Is the executor running?
167
+ #
168
+ # @return [Boolean] `true` when running, `false` when shutting down or shutdown
169
+ def running?
170
+ ! (shuttingdown? || shutdown?)
171
+ end
172
+
173
+ # Is the executor shuttingdown?
174
+ #
175
+ # @return [Boolean] `true` when not running and not shutdown, else `false`
176
+ def shuttingdown?
177
+ if @executor.respond_to? :isTerminating
178
+ @executor.isTerminating
179
+ else
180
+ false
181
+ end
182
+ end
183
+
184
+ # Is the executor shutdown?
185
+ #
186
+ # @return [Boolean] `true` when shutdown, `false` when shutting down or running
187
+ def shutdown?
188
+ @executor.isShutdown || @executor.isTerminated
189
+ end
190
+
191
+ # Block until executor shutdown is complete or until `timeout` seconds have
192
+ # passed.
193
+ #
194
+ # @note Does not initiate shutdown or termination. Either `shutdown` or `kill`
195
+ # must be called before this method (or on another thread).
196
+ #
197
+ # @param [Integer] timeout the maximum number of seconds to wait for shutdown to complete
198
+ #
199
+ # @return [Boolean] `true` if shutdown complete or false on `timeout`
200
+ def wait_for_termination(timeout)
201
+ @executor.awaitTermination(1000 * timeout, java.util.concurrent.TimeUnit::MILLISECONDS)
202
+ end
203
+
204
+ # Begin an orderly shutdown. Tasks already in the queue will be executed,
205
+ # but no new tasks will be accepted. Has no additional effect if the
206
+ # executor is not running.
207
+ def shutdown
208
+ @executor.shutdown
209
+ nil
210
+ end
211
+
212
+ # Begin an immediate shutdown. In-progress tasks will be allowed to
213
+ # complete but enqueued tasks will be dismissed and no new tasks
214
+ # will be accepted. Has no additional effect if the executor is
215
+ # not running.
216
+ def kill
217
+ @executor.shutdownNow
218
+ nil
219
+ end
220
+
221
+ protected
222
+
223
+ def set_shutdown_hook
224
+ # without this the process may fail to exit
225
+ at_exit { self.kill }
226
+ end
227
+ end
228
+ end
229
+ end