concurrent-ruby 0.7.0.rc2-java → 0.7.1-java

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. data/CHANGELOG.md +138 -0
  2. data/README.md +108 -95
  3. data/lib/concurrent/actor.rb +12 -13
  4. data/lib/concurrent/actor/behaviour/errors_on_unknown_message.rb +1 -1
  5. data/lib/concurrent/actor/behaviour/executes_context.rb +1 -1
  6. data/lib/concurrent/actor/behaviour/linking.rb +4 -1
  7. data/lib/concurrent/actor/behaviour/pausing.rb +2 -2
  8. data/lib/concurrent/actor/behaviour/supervised.rb +3 -2
  9. data/lib/concurrent/actor/behaviour/terminates_children.rb +1 -1
  10. data/lib/concurrent/actor/behaviour/termination.rb +1 -1
  11. data/lib/concurrent/actor/context.rb +2 -1
  12. data/lib/concurrent/actor/core.rb +8 -4
  13. data/lib/concurrent/actor/utils.rb +10 -0
  14. data/lib/concurrent/actor/utils/ad_hoc.rb +21 -0
  15. data/lib/concurrent/actor/utils/balancer.rb +42 -0
  16. data/lib/concurrent/actor/utils/broadcast.rb +22 -6
  17. data/lib/concurrent/actor/utils/pool.rb +59 -0
  18. data/lib/concurrent/agent.rb +1 -22
  19. data/lib/concurrent/async.rb +1 -79
  20. data/lib/concurrent/atomic.rb +20 -26
  21. data/lib/concurrent/atomic/atomic_boolean.rb +4 -1
  22. data/lib/concurrent/atomic/atomic_fixnum.rb +4 -1
  23. data/lib/concurrent/atomic/thread_local_var.rb +71 -24
  24. data/lib/concurrent/atomic_reference/jruby.rb +10 -6
  25. data/lib/concurrent/atomic_reference/ruby.rb +14 -10
  26. data/lib/concurrent/atomics.rb +0 -1
  27. data/lib/concurrent/configuration.rb +11 -5
  28. data/lib/concurrent/dataflow.rb +1 -30
  29. data/lib/concurrent/dereferenceable.rb +9 -2
  30. data/lib/concurrent/executor/indirect_immediate_executor.rb +46 -0
  31. data/lib/concurrent/executor/java_thread_pool_executor.rb +2 -4
  32. data/lib/concurrent/executor/ruby_thread_pool_executor.rb +24 -22
  33. data/lib/concurrent/executor/serialized_execution.rb +36 -23
  34. data/lib/concurrent/executor/thread_pool_executor.rb +2 -0
  35. data/lib/concurrent/executor/timer_set.rb +7 -8
  36. data/lib/concurrent/executors.rb +1 -0
  37. data/lib/concurrent/future.rb +7 -29
  38. data/lib/concurrent/ivar.rb +9 -0
  39. data/lib/concurrent/logging.rb +3 -0
  40. data/lib/concurrent/mvar.rb +26 -9
  41. data/lib/concurrent/observable.rb +33 -0
  42. data/lib/concurrent/promise.rb +59 -1
  43. data/lib/concurrent/scheduled_task.rb +1 -0
  44. data/lib/concurrent/timer_task.rb +18 -18
  45. data/lib/concurrent/tvar.rb +3 -1
  46. data/lib/concurrent/version.rb +1 -1
  47. data/lib/concurrent_ruby_ext.jar +0 -0
  48. data/lib/concurrent_ruby_ext.so +0 -0
  49. data/lib/extension_helper.rb +25 -6
  50. metadata +15 -7
  51. data/lib/concurrent/actor/ad_hoc.rb +0 -19
  52. data/lib/concurrent/actor/utills.rb +0 -7
@@ -1,12 +1,14 @@
1
1
  require 'delegate'
2
2
  require 'concurrent/executor/executor'
3
3
  require 'concurrent/logging'
4
+ require 'concurrent/atomic/synchronization'
4
5
 
5
6
  module Concurrent
6
7
 
7
8
  # Ensures passed jobs in a serialized order never running at the same time.
8
9
  class SerializedExecution
9
10
  include Logging
11
+ include Synchronization
10
12
 
11
13
  Job = Struct.new(:executor, :args, :block) do
12
14
  def call
@@ -15,9 +17,10 @@ module Concurrent
15
17
  end
16
18
 
17
19
  def initialize
18
- @being_executed = false
19
- @stash = []
20
- @mutex = Mutex.new
20
+ synchronize do
21
+ @being_executed = false
22
+ @stash = []
23
+ end
21
24
  end
22
25
 
23
26
  # Submit a task to the executor for asynchronous processing.
@@ -33,23 +36,36 @@ module Concurrent
33
36
  #
34
37
  # @raise [ArgumentError] if no task is given
35
38
  def post(executor, *args, &task)
36
- return nil if task.nil?
37
-
38
- job = Job.new executor, args, task
39
-
40
- begin
41
- @mutex.lock
42
- post = if @being_executed
43
- @stash << job
44
- false
45
- else
46
- @being_executed = true
47
- end
48
- ensure
49
- @mutex.unlock
39
+ posts [[executor, args, task]]
40
+ true
41
+ end
42
+
43
+ # As {#post} but allows to submit multiple tasks at once, it's guaranteed that they will not
44
+ # be interleaved by other tasks.
45
+ #
46
+ # @param [Array<Array(Executor, Array<Object>, Proc)>] posts array of triplets where
47
+ # first is a {Executor}, second is array of args for task, third is a task (Proc)
48
+ def posts(posts)
49
+ # if can_overflow?
50
+ # raise ArgumentError, 'SerializedExecution does not support thread-pools which can overflow'
51
+ # end
52
+
53
+ return nil if posts.empty?
54
+
55
+ jobs = posts.map { |executor, args, task| Job.new executor, args, task }
56
+
57
+ job_to_post = synchronize do
58
+ if @being_executed
59
+ @stash.push(*jobs)
60
+ nil
61
+ else
62
+ @being_executed = true
63
+ @stash.push(*jobs[1..-1])
64
+ jobs.first
65
+ end
50
66
  end
51
67
 
52
- call_job job if post
68
+ call_job job_to_post if job_to_post
53
69
  true
54
70
  end
55
71
 
@@ -78,11 +94,8 @@ module Concurrent
78
94
  def work(job)
79
95
  job.call
80
96
  ensure
81
- begin
82
- @mutex.lock
97
+ synchronize do
83
98
  job = @stash.shift || (@being_executed = false)
84
- ensure
85
- @mutex.unlock
86
99
  end
87
100
 
88
101
  call_job job if job
@@ -98,7 +111,7 @@ module Concurrent
98
111
  include SerialExecutor
99
112
 
100
113
  def initialize(executor)
101
- @executor = executor
114
+ @executor = executor
102
115
  @serializer = SerializedExecution.new
103
116
  super(executor)
104
117
  end
@@ -49,6 +49,8 @@ module Concurrent
49
49
  # * `:discard`: Silently discard the task and return `nil` as the task result.
50
50
  # * `:caller_runs`: Execute the task on the calling thread.
51
51
  #
52
+ # {include:file:doc/thread_pools.md}
53
+ #
52
54
  # @note When running on the JVM (JRuby) this class will inherit from `JavaThreadPoolExecutor`.
53
55
  # On all other platforms it will inherit from `RubyThreadPoolExecutor`.
54
56
  #
@@ -22,10 +22,10 @@ module Concurrent
22
22
  # @option opts [object] :executor when provided will run all operations on
23
23
  # this executor rather than the global thread pool (overrides :operation)
24
24
  def initialize(opts = {})
25
- @queue = PriorityQueue.new(order: :min)
26
- @task_executor = OptionsParser::get_executor_from(opts) || Concurrent.configuration.global_task_pool
25
+ @queue = PriorityQueue.new(order: :min)
26
+ @task_executor = OptionsParser::get_executor_from(opts) || Concurrent.configuration.global_task_pool
27
27
  @timer_executor = SingleThreadExecutor.new
28
- @condition = Condition.new
28
+ @condition = Condition.new
29
29
  init_executor
30
30
  end
31
31
 
@@ -64,7 +64,7 @@ module Concurrent
64
64
  # For a timer, #kill is like an orderly shutdown, except we need to manually
65
65
  # (and destructively) clear the queue first
66
66
  def kill
67
- @queue.clear
67
+ mutex.synchronize { @queue.clear }
68
68
  shutdown
69
69
  end
70
70
 
@@ -124,14 +124,13 @@ module Concurrent
124
124
  # @!visibility private
125
125
  def process_tasks
126
126
  loop do
127
- break if @queue.empty?
128
-
129
- task = @queue.peek
127
+ task = mutex.synchronize { @queue.peek }
128
+ break unless task
130
129
  interval = task.time - Time.now.to_f
131
130
 
132
131
  if interval <= 0
133
132
  @task_executor.post(*task.args, &task.op)
134
- @queue.pop
133
+ mutex.synchronize { @queue.pop }
135
134
  else
136
135
  mutex.synchronize do
137
136
  @condition.wait(mutex, [interval, 60].min)
@@ -1,6 +1,7 @@
1
1
  require 'concurrent/executor/cached_thread_pool'
2
2
  require 'concurrent/executor/fixed_thread_pool'
3
3
  require 'concurrent/executor/immediate_executor'
4
+ require 'concurrent/executor/indirect_immediate_executor'
4
5
  require 'concurrent/executor/per_thread_executor'
5
6
  require 'concurrent/executor/safe_task_executor'
6
7
  require 'concurrent/executor/single_thread_executor'
@@ -6,35 +6,7 @@ require 'concurrent/executor/safe_task_executor'
6
6
 
7
7
  module Concurrent
8
8
 
9
- # A `Future` represents a promise to complete an action at some time in the future.
10
- # The action is atomic and permanent. The idea behind a future is to send an operation
11
- # for asynchronous completion, do other stuff, then return and retrieve the result
12
- # of the async operation at a later time.
13
- #
14
- # A `Future` has four possible states: *:unscheduled*, *:pending*, *:rejected*, or *:fulfilled*.
15
- # When a `Future` is created its state is set to *:unscheduled*. Once the `#execute` method is
16
- # called the state becomes *:pending* and will remain in that state until processing is
17
- # complete. A completed `Future` is either *:rejected*, indicating that an exception was
18
- # thrown during processing, or *:fulfilled*, indicating success. If a `Future` is *:fulfilled*
19
- # its `value` will be updated to reflect the result of the operation. If *:rejected* the
20
- # `reason` will be updated with a reference to the thrown exception. The predicate methods
21
- # `#unscheduled?`, `#pending?`, `#rejected?`, and `fulfilled?` can be called at any time to
22
- # obtain the state of the `Future`, as can the `#state` method, which returns a symbol.
23
- #
24
- # Retrieving the value of a `Future` is done through the `#value` (alias: `#deref`) method.
25
- # Obtaining the value of a `Future` is a potentially blocking operation. When a `Future` is
26
- # *:rejected* a call to `#value` will return `nil` immediately. When a `Future` is
27
- # *:fulfilled* a call to `#value` will immediately return the current value. When a
28
- # `Future` is *:pending* a call to `#value` will block until the `Future` is either
29
- # *:rejected* or *:fulfilled*. A *timeout* value can be passed to `#value` to limit how
30
- # long the call will block. If `nil` the call will block indefinitely. If `0` the call will
31
- # not block. Any other integer or float value will indicate the maximum number of seconds to block.
32
- #
33
- # The `Future` class also includes the behavior of the Ruby standard library `Observable` module,
34
- # but does so in a thread-safe way. On fulfillment or rejection all observers will be notified
35
- # according to the normal `Observable` behavior. The observer callback function will be called
36
- # with three parameters: the `Time` of fulfillment/rejection, the final `value`, and the final
37
- # `reason`. Observers added after fulfillment/rejection will still be notified as normal.
9
+ # {include:file:doc/future.md}
38
10
  #
39
11
  # @see http://ruby-doc.org/stdlib-2.1.1/libdoc/observer/rdoc/Observable.html Ruby Observable module
40
12
  # @see http://clojuredocs.org/clojure_core/clojure.core/future Clojure's future function
@@ -94,6 +66,12 @@ module Concurrent
94
66
  #
95
67
  # @yield the asynchronous operation to perform
96
68
  #
69
+ # @param [Hash] opts the options controlling how the future will be processed
70
+ # @option opts [Boolean] :operation (false) when `true` will execute the future on the global
71
+ # operation pool (for long-running operations), when `false` will execute the future on the
72
+ # global task pool (for short-running tasks)
73
+ # @option opts [object] :executor when provided will run all operations on
74
+ # this executor rather than the global thread pool (overrides :operation)
97
75
  # @option opts [String] :dup_on_deref (false) call `#dup` before returning the data
98
76
  # @option opts [String] :freeze_on_deref (false) call `#freeze` before returning the data
99
77
  # @option opts [String] :copy_on_deref (nil) call the given `Proc` passing the internal value and
@@ -6,6 +6,10 @@ require 'concurrent/observable'
6
6
 
7
7
  module Concurrent
8
8
 
9
+ # An `IVar` is like a future that you can assign. As a future is a value that is being computed that you can wait on, an `IVar` is a value that is waiting to be assigned, that you can wait on. `IVars` are single assignment and deterministic.
10
+ #
11
+ # Then, express futures as an asynchronous computation that assigns an `IVar`. The `IVar` becomes the primitive on which [futures](Future) and [dataflow](Dataflow) are built.
12
+ #
9
13
  # An `IVar` is a single-element container that is normally created empty, and
10
14
  # can only be set once. The I in `IVar` stands for immutable. Reading an `IVar`
11
15
  # normally blocks until it is set. It is safe to set and read an `IVar` from
@@ -15,6 +19,11 @@ module Concurrent
15
19
  # a `Future`. If you want to create a graph of parallel tasks all executed when
16
20
  # the values they depend on are ready you want `dataflow`. `IVar` is generally
17
21
  # a low-level primitive.
22
+ #
23
+ # **See Also:**
24
+ #
25
+ # * For the theory: Arvind, R. Nikhil, and K. Pingali. [I-Structures: Data structures for parallel computing](http://dl.acm.org/citation.cfm?id=69562). In Proceedings of Workshop on Graph Reduction, 1986.
26
+ # * For recent application: [DataDrivenFuture in Habanero Java from Rice](http://www.cs.rice.edu/~vs3/hjlib/doc/edu/rice/hj/api/HjDataDrivenFuture.html).
18
27
  #
19
28
  # @example Create, set and get an `IVar`
20
29
  # ivar = Concurrent::IVar.new
@@ -12,6 +12,9 @@ module Concurrent
12
12
  # @yieldreturn [String] a message
13
13
  def log(level, progname, message = nil, &block)
14
14
  (@logger || Concurrent.configuration.logger).call level, progname, message, &block
15
+ rescue => error
16
+ $stderr.puts "`Concurrent.configuration.logger` failed to log #{[level, progname, message, block]}\n" +
17
+ "#{error.message} (#{error.class})\n#{error.backtrace.join "\n"}"
15
18
  end
16
19
  end
17
20
  end
@@ -4,16 +4,33 @@ require 'concurrent/atomic/event'
4
4
 
5
5
  module Concurrent
6
6
 
7
- # An `MVar` is a single-element container that blocks on `get` if it is empty,
8
- # and blocks on `put` if it is full. It is safe to use an `MVar` from
9
- # multiple threads. `MVar` can be seen as a single-element blocking queue, or
10
- # a rendezvous variable.
7
+ # An `MVar` is a synchronized single element container. They are empty or contain one item.
8
+ # Taking a value from an empty `MVar` blocks, as does putting a value into a full one.
9
+ # You can either think of them as blocking queue of length one, or a special kind of
10
+ # mutable variable.
11
+ #
12
+ # On top of the fundamental `#put` and `#take` operations, we also provide a `#mutate`
13
+ # that is atomic with respect to operations on the same instance. These operations all
14
+ # support timeouts.
15
+ #
16
+ # We also support non-blocking operations `#try_put!` and `#try_take!`, a `#set!` that
17
+ # ignores existing values, a `#value` that returns the value without removing it or
18
+ # returns `MVar::EMPTY`, and a `#modify!` that yields `MVar::EMPTY` if the `MVar` is
19
+ # empty and can be used to set `MVar::EMPTY`. You shouldn't use these operations in the
20
+ # first instance.
21
+ #
22
+ # `MVar` is a [Dereferenceable](Dereferenceable).
23
+ #
24
+ # `MVar` is related to M-structures in Id, `MVar` in Haskell and `SyncVar` in Scala.
11
25
  #
12
- # An `MVar` is typically used to transfer objects between threads, where the
13
- # sending thread will block if the previous message hasn't been taken yet by the
14
- # receiving thread. It can also be used to control access to some global shared
15
- # state, where threads `take` the value, perform some operation, and then
16
- # `put` it back.
26
+ # Note that unlike the original Haskell paper, our `#take` is blocking. This is how
27
+ # Haskell and Scala do it today.
28
+ #
29
+ # **See Also:**
30
+ #
31
+ # 1. P. Barth, R. Nikhil, and Arvind. [M-Structures: Extending a parallel, non-
32
+ # strict, functional language with state](http://dl.acm.org/citation.cfm?id=652538). In Proceedings of the 5th ACM Conference on Functional Programming Languages and Computer Architecture (FPCA), 1991.
33
+ # 2. S. Peyton Jones, A. Gordon, and S. Finne. [Concurrent Haskell](http://dl.acm.org/citation.cfm?id=237794). In Proceedings of the 23rd Symposium on Principles of Programming Languages (PoPL), 1996.
17
34
  class MVar
18
35
 
19
36
  include Dereferenceable
@@ -3,6 +3,39 @@ require 'concurrent/atomic/copy_on_write_observer_set'
3
3
 
4
4
  module Concurrent
5
5
 
6
+ # The [observer pattern](http://en.wikipedia.org/wiki/Observer_pattern) is one of the most useful design pattern.
7
+ #
8
+ # The workflow is very simple:
9
+ # - an `observer` can register itself to a `subject` via a callback
10
+ # - many `observers` can be registered to the same `subject`
11
+ # - the `subject` notifies all registered observers when its status changes
12
+ # - an `observer` can deregister itself when is no more interested to receive event notifications
13
+ #
14
+ # In a single threaded environment the whole pattern is very easy: the `subject` can use a simple data structure to manage all its subscribed `observer`s and every `observer` can react directly to every event without caring about synchronization.
15
+ #
16
+ # In a multi threaded environment things are more complex.
17
+ # The `subject` must synchronize the access to its data structure and to do so currently we're using two specialized ObserverSet: CopyOnWriteObserverSet and CopyOnNotifyObserverSet.
18
+ #
19
+ # When implementing and `observer` there's a very important rule to remember: **there are no guarantees about the thread that will execute the callback**
20
+ #
21
+ # Let's take this example
22
+ # ```
23
+ # class Observer
24
+ # def initialize
25
+ # @count = 0
26
+ # end
27
+ #
28
+ # def update
29
+ # @count += 1
30
+ # end
31
+ # end
32
+ #
33
+ # obs = Observer.new
34
+ # [obj1, obj2, obj3, obj4].each { |o| o.add_observer(obs) }
35
+ # # execute [obj1, obj2, obj3, obj4]
36
+ # ```
37
+ #
38
+ # `obs` is wrong because the variable `@count` can be accessed by different threads at the same time, so it should be synchronized (using either a Mutex or an AtomicFixum)
6
39
  module Observable
7
40
 
8
41
  # @return [Object] the added observer
@@ -5,8 +5,9 @@ require 'concurrent/options_parser'
5
5
 
6
6
  module Concurrent
7
7
 
8
- # TODO unify promise and future to single class, with dataflow
8
+ # {include:file:doc/promise.md}
9
9
  class Promise
10
+ # TODO unify promise and future to single class, with dataflow
10
11
  include Obligation
11
12
 
12
13
  # Initialize a new Promise with the provided options.
@@ -110,6 +111,63 @@ module Concurrent
110
111
  alias_method :catch, :rescue
111
112
  alias_method :on_error, :rescue
112
113
 
114
+ # Yield the successful result to the block that returns a promise. If that
115
+ # promise is also successful the result is the result of the yielded promise.
116
+ # If either part fails the whole also fails.
117
+ #
118
+ # @example
119
+ # Promise.execute { 1 }.flat_map { |v| Promise.execute { v + 2 } }.value! #=> 3
120
+ #
121
+ # @return [Promise]
122
+ def flat_map(&block)
123
+ child = Promise.new(
124
+ parent: self,
125
+ executor: ImmediateExecutor.new,
126
+ )
127
+
128
+ on_error { |e| child.on_reject(e) }
129
+ on_success do |result1|
130
+ begin
131
+ inner = block.call(result1)
132
+ inner.execute
133
+ inner.on_success { |result2| child.on_fulfill(result2) }
134
+ inner.on_error { |e| child.on_reject(e) }
135
+ rescue => e
136
+ child.on_reject(e)
137
+ end
138
+ end
139
+
140
+ child
141
+ end
142
+
143
+ # Builds a promise that produces the result of promises in an Array
144
+ # and fails if any of them fails.
145
+ #
146
+ # @param [Array<Promise>] promises
147
+ #
148
+ # @return [Promise<Array>]
149
+ def self.zip(*promises)
150
+ zero = fulfill([], executor: ImmediateExecutor.new)
151
+
152
+ promises.reduce(zero) do |p1, p2|
153
+ p1.flat_map do |results|
154
+ p2.then do |next_result|
155
+ results << next_result
156
+ end
157
+ end
158
+ end
159
+ end
160
+
161
+ # Builds a promise that produces the result of self and others in an Array
162
+ # and fails if any of them fails.
163
+ #
164
+ # @param [Array<Promise>] others
165
+ #
166
+ # @return [Promise<Array>]
167
+ def zip(*others)
168
+ self.class.zip(self, *others)
169
+ end
170
+
113
171
  protected
114
172
 
115
173
  def set_pending
@@ -4,6 +4,7 @@ require 'concurrent/executor/safe_task_executor'
4
4
 
5
5
  module Concurrent
6
6
 
7
+ # {include:file:doc/scheduled_task.md}
7
8
  class ScheduledTask < IVar
8
9
 
9
10
  attr_reader :schedule_time
@@ -10,7 +10,7 @@ module Concurrent
10
10
  # intervals. The thread that performs the task sleeps for the given interval then
11
11
  # wakes up and performs the task. Lather, rinse, repeat... This pattern causes two
12
12
  # problems. First, it is difficult to test the business logic of the task because the
13
- # task itself is tightly coupled with the concurrency logic. Second, an exception in
13
+ # task itself is tightly coupled with the concurrency logic. Second, an exception
14
14
  # raised while performing the task can cause the entire thread to abend. In a
15
15
  # long-running application where the task thread is intended to run for days/weeks/years
16
16
  # a crashed task thread can pose a significant problem. `TimerTask` alleviates both problems.
@@ -23,13 +23,13 @@ module Concurrent
23
23
  # performing logging or ancillary operations. `TimerTask` can also be configured with a
24
24
  # timeout value allowing it to kill a task that runs too long.
25
25
  #
26
- # One other advantage of `TimerTask` is it forces the business logic to be completely decoupled
26
+ # One other advantage of `TimerTask` is that it forces the business logic to be completely decoupled
27
27
  # from the concurrency logic. The business logic can be tested separately then passed to the
28
28
  # `TimerTask` for scheduling and running.
29
29
  #
30
30
  # In some cases it may be necessary for a `TimerTask` to affect its own execution cycle.
31
- # To facilitate this a reference to the task object is passed into the block as a block
32
- # argument every time the task is executed.
31
+ # To facilitate this, a reference to the TimerTask instance is passed as an argument
32
+ # to the provided block every time the task is executed.
33
33
  #
34
34
  # The `TimerTask` class includes the `Dereferenceable` mixin module so the result of
35
35
  # the last execution is always available via the `#value` method. Derefencing options
@@ -39,13 +39,13 @@ module Concurrent
39
39
  # `TimerTask` supports notification through the Ruby standard library
40
40
  # {http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html Observable}
41
41
  # module. On execution the `TimerTask` will notify the observers
42
- # with threes arguments: time of execution, the result of the block (or nil on failure),
42
+ # with three arguments: time of execution, the result of the block (or nil on failure),
43
43
  # and any raised exceptions (or nil on success). If the timeout interval is exceeded
44
44
  # the observer will receive a `Concurrent::TimeoutError` object as the third argument.
45
45
  #
46
46
  # @example Basic usage
47
47
  # task = Concurrent::TimerTask.new{ puts 'Boom!' }
48
- # task.run!
48
+ # task.execute
49
49
  #
50
50
  # task.execution_interval #=> 60 (default)
51
51
  # task.timeout_interval #=> 30 (default)
@@ -53,7 +53,7 @@ module Concurrent
53
53
  # # wait 60 seconds...
54
54
  # #=> 'Boom!'
55
55
  #
56
- # task.stop #=> true
56
+ # task.shutdown #=> true
57
57
  #
58
58
  # @example Configuring `:execution_interval` and `:timeout_interval`
59
59
  # task = Concurrent::TimerTask.new(execution_interval: 5, timeout_interval: 5) do
@@ -65,7 +65,7 @@ module Concurrent
65
65
  #
66
66
  # @example Immediate execution with `:run_now`
67
67
  # task = Concurrent::TimerTask.new(run_now: true){ puts 'Boom!' }
68
- # task.run!
68
+ # task.execute
69
69
  #
70
70
  # #=> 'Boom!'
71
71
  #
@@ -75,7 +75,7 @@ module Concurrent
75
75
  # execution_interval: 5
76
76
  # ){ Time.now }
77
77
  #
78
- # task.run!
78
+ # task.execute
79
79
  # Time.now #=> 2013-11-07 18:06:50 -0500
80
80
  # sleep(10)
81
81
  # task.value #=> 2013-11-07 18:06:55 -0500
@@ -87,11 +87,11 @@ module Concurrent
87
87
  # task.execution_interval += 1
88
88
  # if task.execution_interval > 5
89
89
  # puts 'Stopping...'
90
- # task.stop
90
+ # task.shutdown
91
91
  # end
92
92
  # end
93
93
  #
94
- # timer_task.run # blocking call - this task will stop itself
94
+ # timer_task.execute # blocking call - this task will stop itself
95
95
  # #=> Boom!
96
96
  # #=> Boom! Boom!
97
97
  # #=> Boom! Boom! Boom!
@@ -114,30 +114,30 @@ module Concurrent
114
114
  #
115
115
  # task = Concurrent::TimerTask.new(execution_interval: 1, timeout_interval: 1){ 42 }
116
116
  # task.add_observer(TaskObserver.new)
117
- # task.run!
117
+ # task.execute
118
118
  #
119
119
  # #=> (2013-10-13 19:08:58 -0400) Execution successfully returned 42
120
120
  # #=> (2013-10-13 19:08:59 -0400) Execution successfully returned 42
121
121
  # #=> (2013-10-13 19:09:00 -0400) Execution successfully returned 42
122
- # task.stop
122
+ # task.shutdown
123
123
  #
124
124
  # task = Concurrent::TimerTask.new(execution_interval: 1, timeout_interval: 1){ sleep }
125
125
  # task.add_observer(TaskObserver.new)
126
- # task.run!
126
+ # task.execute
127
127
  #
128
128
  # #=> (2013-10-13 19:07:25 -0400) Execution timed out
129
129
  # #=> (2013-10-13 19:07:27 -0400) Execution timed out
130
130
  # #=> (2013-10-13 19:07:29 -0400) Execution timed out
131
- # task.stop
131
+ # task.shutdown
132
132
  #
133
133
  # task = Concurrent::TimerTask.new(execution_interval: 1){ raise StandardError }
134
134
  # task.add_observer(TaskObserver.new)
135
- # task.run!
135
+ # task.execute
136
136
  #
137
137
  # #=> (2013-10-13 19:09:37 -0400) Execution failed with error StandardError
138
138
  # #=> (2013-10-13 19:09:38 -0400) Execution failed with error StandardError
139
139
  # #=> (2013-10-13 19:09:39 -0400) Execution failed with error StandardError
140
- # task.stop
140
+ # task.shutdown
141
141
  #
142
142
  # @see http://ruby-doc.org/stdlib-2.0/libdoc/observer/rdoc/Observable.html
143
143
  # @see http://docs.oracle.com/javase/7/docs/api/java/util/TimerTask.html
@@ -316,7 +316,7 @@ module Concurrent
316
316
  # @!visibility private
317
317
  def execute_task(completion)
318
318
  return unless @running.true?
319
- Concurrent::timer(timeout_interval, completion, &method(:timeout_task))
319
+ Concurrent::timer(execution_interval, completion, &method(:timeout_task))
320
320
  success, value, reason = @executor.execute(self)
321
321
  if completion.try?
322
322
  self.value = value