concurrent-ruby 0.7.0-x64-mingw32 → 0.7.1-x64-mingw32
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +8 -8
- data/CHANGELOG.md +138 -0
- data/README.md +73 -105
- data/lib/2.0/concurrent_ruby_ext.so +0 -0
- data/lib/concurrent/actor.rb +11 -12
- data/lib/concurrent/actor/behaviour/errors_on_unknown_message.rb +1 -1
- data/lib/concurrent/actor/behaviour/linking.rb +4 -1
- data/lib/concurrent/actor/behaviour/pausing.rb +2 -2
- data/lib/concurrent/actor/behaviour/supervised.rb +2 -1
- data/lib/concurrent/actor/behaviour/termination.rb +1 -1
- data/lib/concurrent/actor/context.rb +2 -1
- data/lib/concurrent/actor/core.rb +7 -3
- data/lib/concurrent/actor/utils/balancer.rb +4 -2
- data/lib/concurrent/actor/utils/pool.rb +1 -1
- data/lib/concurrent/agent.rb +1 -22
- data/lib/concurrent/async.rb +1 -79
- data/lib/concurrent/atomic.rb +1 -1
- data/lib/concurrent/atomic/thread_local_var.rb +71 -24
- data/lib/concurrent/atomics.rb +0 -1
- data/lib/concurrent/configuration.rb +11 -5
- data/lib/concurrent/dataflow.rb +1 -30
- data/lib/concurrent/dereferenceable.rb +9 -2
- data/lib/concurrent/executor/indirect_immediate_executor.rb +46 -0
- data/lib/concurrent/executor/java_thread_pool_executor.rb +2 -4
- data/lib/concurrent/executor/ruby_thread_pool_executor.rb +24 -22
- data/lib/concurrent/executor/thread_pool_executor.rb +2 -0
- data/lib/concurrent/executor/timer_set.rb +7 -8
- data/lib/concurrent/executors.rb +1 -0
- data/lib/concurrent/future.rb +7 -29
- data/lib/concurrent/ivar.rb +9 -0
- data/lib/concurrent/logging.rb +3 -0
- data/lib/concurrent/mvar.rb +26 -9
- data/lib/concurrent/observable.rb +33 -0
- data/lib/concurrent/promise.rb +59 -1
- data/lib/concurrent/scheduled_task.rb +1 -0
- data/lib/concurrent/timer_task.rb +18 -18
- data/lib/concurrent/tvar.rb +2 -0
- data/lib/concurrent/version.rb +1 -1
- data/lib/concurrent_ruby_ext.so +0 -0
- metadata +21 -4
@@ -3,8 +3,15 @@ module Concurrent
|
|
3
3
|
# Object references in Ruby are mutable. This can lead to serious problems when
|
4
4
|
# the `#value` of a concurrent object is a mutable reference. Which is always the
|
5
5
|
# case unless the value is a `Fixnum`, `Symbol`, or similar "primitive" data type.
|
6
|
-
# Most classes in this library that expose a `#value` getter method do so using
|
7
|
-
#
|
6
|
+
# Most classes in this library that expose a `#value` getter method do so using the
|
7
|
+
# `Dereferenceable` mixin module.
|
8
|
+
#
|
9
|
+
# Objects with this mixin can be configured with a few options that can help protect
|
10
|
+
# the program from potentially dangerous operations.
|
11
|
+
#
|
12
|
+
# * `:dup_on_deref` when true will call the `#dup` method on the `value` object every time the `#value` method is called (default: false)
|
13
|
+
# * `:freeze_on_deref` when true will call the `#freeze` method on the `value` object every time the `#value` method is called (default: false)
|
14
|
+
# * `:copy_on_deref` when given a `Proc` object the `Proc` will be run every time the `#value` method is called. The `Proc` will be given the current `value` as its only parameter and the result returned by the block will be the return value of the `#value` call. When `nil` this option will be ignored (default: nil)
|
8
15
|
module Dereferenceable
|
9
16
|
|
10
17
|
# Return the value this object represents after applying the options specified
|
@@ -0,0 +1,46 @@
|
|
1
|
+
require 'concurrent/executor/executor'
|
2
|
+
|
3
|
+
module Concurrent
|
4
|
+
# An executor service which runs all operations on a new thread, blocking
|
5
|
+
# until it completes. Operations are performed in the order they are received
|
6
|
+
# and no two operations can be performed simultaneously.
|
7
|
+
#
|
8
|
+
# This executor service exists mainly for testing an debugging. When used it
|
9
|
+
# immediately runs every `#post` operation on a new thread, blocking the
|
10
|
+
# current thread until the operation is complete. This is similar to how the
|
11
|
+
# ImmediateExecutor works, but the operation has the full stack of the new
|
12
|
+
# thread at its disposal. This can be helpful when the operations will spawn
|
13
|
+
# more operations on the same executor and so on - such a situation might
|
14
|
+
# overflow the single stack in case of an ImmediateExecutor, which is
|
15
|
+
# inconsistent with how it would behave for a threaded executor.
|
16
|
+
#
|
17
|
+
# @note Intended for use primarily in testing and debugging.
|
18
|
+
class IndirectImmediateExecutor < ImmediateExecutor
|
19
|
+
# Creates a new executor
|
20
|
+
def initialize
|
21
|
+
super
|
22
|
+
@internal_executor = PerThreadExecutor.new
|
23
|
+
end
|
24
|
+
|
25
|
+
# @!macro executor_method_post
|
26
|
+
def post(*args, &task)
|
27
|
+
raise ArgumentError.new("no block given") unless block_given?
|
28
|
+
return false unless running?
|
29
|
+
|
30
|
+
event = Concurrent::Event.new
|
31
|
+
internal_executor.post do
|
32
|
+
begin
|
33
|
+
task.call(*args)
|
34
|
+
ensure
|
35
|
+
event.set
|
36
|
+
end
|
37
|
+
end
|
38
|
+
event.wait
|
39
|
+
|
40
|
+
true
|
41
|
+
end
|
42
|
+
|
43
|
+
private
|
44
|
+
attr_reader :internal_executor
|
45
|
+
end
|
46
|
+
end
|
@@ -74,9 +74,7 @@ if RUBY_PLATFORM == 'java'
|
|
74
74
|
raise ArgumentError.new('min_threads cannot be more than max_threads') if min_length > max_length
|
75
75
|
raise ArgumentError.new("#{@overflow_policy} is not a valid overflow policy") unless OVERFLOW_POLICIES.keys.include?(@overflow_policy)
|
76
76
|
|
77
|
-
if
|
78
|
-
queue = java.util.concurrent.SynchronousQueue.new
|
79
|
-
elsif @max_queue == 0
|
77
|
+
if @max_queue == 0
|
80
78
|
queue = java.util.concurrent.LinkedBlockingQueue.new
|
81
79
|
else
|
82
80
|
queue = java.util.concurrent.LinkedBlockingQueue.new(@max_queue)
|
@@ -90,7 +88,7 @@ if RUBY_PLATFORM == 'java'
|
|
90
88
|
set_shutdown_hook
|
91
89
|
end
|
92
90
|
|
93
|
-
|
91
|
+
# @!macro executor_module_method_can_overflow_question
|
94
92
|
def can_overflow?
|
95
93
|
@max_queue != 0
|
96
94
|
end
|
@@ -11,20 +11,20 @@ module Concurrent
|
|
11
11
|
include RubyExecutor
|
12
12
|
|
13
13
|
# Default maximum number of threads that will be created in the pool.
|
14
|
-
DEFAULT_MAX_POOL_SIZE
|
14
|
+
DEFAULT_MAX_POOL_SIZE = 2**15 # 32768
|
15
15
|
|
16
16
|
# Default minimum number of threads that will be retained in the pool.
|
17
|
-
DEFAULT_MIN_POOL_SIZE
|
17
|
+
DEFAULT_MIN_POOL_SIZE = 0
|
18
18
|
|
19
19
|
# Default maximum number of tasks that may be added to the task queue.
|
20
|
-
DEFAULT_MAX_QUEUE_SIZE
|
20
|
+
DEFAULT_MAX_QUEUE_SIZE = 0
|
21
21
|
|
22
22
|
# Default maximum number of seconds a thread in the pool may remain idle
|
23
23
|
# before being reclaimed.
|
24
24
|
DEFAULT_THREAD_IDLETIMEOUT = 60
|
25
25
|
|
26
26
|
# The set of possible overflow policies that may be set at thread pool creation.
|
27
|
-
OVERFLOW_POLICIES
|
27
|
+
OVERFLOW_POLICIES = [:abort, :discard, :caller_runs]
|
28
28
|
|
29
29
|
# The maximum number of threads that may be created in the pool.
|
30
30
|
attr_reader :max_length
|
@@ -77,10 +77,10 @@ module Concurrent
|
|
77
77
|
#
|
78
78
|
# @see http://docs.oracle.com/javase/7/docs/api/java/util/concurrent/ThreadPoolExecutor.html
|
79
79
|
def initialize(opts = {})
|
80
|
-
@min_length
|
81
|
-
@max_length
|
82
|
-
@idletime
|
83
|
-
@max_queue
|
80
|
+
@min_length = opts.fetch(:min_threads, DEFAULT_MIN_POOL_SIZE).to_i
|
81
|
+
@max_length = opts.fetch(:max_threads, DEFAULT_MAX_POOL_SIZE).to_i
|
82
|
+
@idletime = opts.fetch(:idletime, DEFAULT_THREAD_IDLETIMEOUT).to_i
|
83
|
+
@max_queue = opts.fetch(:max_queue, DEFAULT_MAX_QUEUE_SIZE).to_i
|
84
84
|
@overflow_policy = opts.fetch(:overflow_policy, :abort)
|
85
85
|
|
86
86
|
raise ArgumentError.new('max_threads must be greater than zero') if @max_length <= 0
|
@@ -90,13 +90,13 @@ module Concurrent
|
|
90
90
|
|
91
91
|
init_executor
|
92
92
|
|
93
|
-
@pool
|
94
|
-
@queue
|
93
|
+
@pool = []
|
94
|
+
@queue = Queue.new
|
95
95
|
@scheduled_task_count = 0
|
96
96
|
@completed_task_count = 0
|
97
|
-
@largest_length
|
97
|
+
@largest_length = 0
|
98
98
|
|
99
|
-
@gc_interval
|
99
|
+
@gc_interval = opts.fetch(:gc_interval, 1).to_i # undocumented
|
100
100
|
@last_gc_time = Time.now.to_f - [1.0, (@gc_interval * 2.0)].max
|
101
101
|
end
|
102
102
|
|
@@ -109,15 +109,16 @@ module Concurrent
|
|
109
109
|
#
|
110
110
|
# @return [Integer] the length
|
111
111
|
def length
|
112
|
-
mutex.synchronize{ running? ? @pool.length : 0 }
|
112
|
+
mutex.synchronize { running? ? @pool.length : 0 }
|
113
113
|
end
|
114
|
+
|
114
115
|
alias_method :current_length, :length
|
115
116
|
|
116
117
|
# The number of tasks in the queue awaiting execution.
|
117
118
|
#
|
118
119
|
# @return [Integer] the queue_length
|
119
120
|
def queue_length
|
120
|
-
mutex.synchronize{ running? ? @queue.length : 0 }
|
121
|
+
mutex.synchronize { running? ? @queue.length : 0 }
|
121
122
|
end
|
122
123
|
|
123
124
|
# Number of tasks that may be enqueued before reaching `max_queue` and rejecting
|
@@ -152,7 +153,7 @@ module Concurrent
|
|
152
153
|
def on_worker_exit(worker)
|
153
154
|
mutex.synchronize do
|
154
155
|
@pool.delete(worker)
|
155
|
-
if @pool.empty? && !
|
156
|
+
if @pool.empty? && !running?
|
156
157
|
stop_event.set
|
157
158
|
stopped_event.set
|
158
159
|
end
|
@@ -177,7 +178,7 @@ module Concurrent
|
|
177
178
|
if @pool.empty?
|
178
179
|
stopped_event.set
|
179
180
|
else
|
180
|
-
@pool.length.times{ @queue << :stop }
|
181
|
+
@pool.length.times { @queue << :stop }
|
181
182
|
end
|
182
183
|
end
|
183
184
|
|
@@ -196,7 +197,7 @@ module Concurrent
|
|
196
197
|
# @!visibility private
|
197
198
|
def ensure_capacity?
|
198
199
|
additional = 0
|
199
|
-
capacity
|
200
|
+
capacity = true
|
200
201
|
|
201
202
|
if @pool.size < @min_length
|
202
203
|
additional = @min_length - @pool.size
|
@@ -254,10 +255,11 @@ module Concurrent
|
|
254
255
|
# @!visibility private
|
255
256
|
def prune_pool
|
256
257
|
if Time.now.to_f - @gc_interval >= @last_gc_time
|
257
|
-
@pool.delete_if
|
258
|
-
|
259
|
-
|
260
|
-
|
258
|
+
@pool.delete_if { |worker| worker.dead? }
|
259
|
+
# send :stop for each thread over idletime
|
260
|
+
@pool.
|
261
|
+
select { |worker| @idletime != 0 && Time.now.to_f - @idletime > worker.last_activity }.
|
262
|
+
each { @queue << :stop }
|
261
263
|
@last_gc_time = Time.now.to_f
|
262
264
|
end
|
263
265
|
end
|
@@ -266,7 +268,7 @@ module Concurrent
|
|
266
268
|
#
|
267
269
|
# @!visibility private
|
268
270
|
def drain_pool
|
269
|
-
@pool.each {|worker| worker.kill }
|
271
|
+
@pool.each { |worker| worker.kill }
|
270
272
|
@pool.clear
|
271
273
|
end
|
272
274
|
|
@@ -49,6 +49,8 @@ module Concurrent
|
|
49
49
|
# * `:discard`: Silently discard the task and return `nil` as the task result.
|
50
50
|
# * `:caller_runs`: Execute the task on the calling thread.
|
51
51
|
#
|
52
|
+
# {include:file:doc/thread_pools.md}
|
53
|
+
#
|
52
54
|
# @note When running on the JVM (JRuby) this class will inherit from `JavaThreadPoolExecutor`.
|
53
55
|
# On all other platforms it will inherit from `RubyThreadPoolExecutor`.
|
54
56
|
#
|
@@ -22,10 +22,10 @@ module Concurrent
|
|
22
22
|
# @option opts [object] :executor when provided will run all operations on
|
23
23
|
# this executor rather than the global thread pool (overrides :operation)
|
24
24
|
def initialize(opts = {})
|
25
|
-
@queue
|
26
|
-
@task_executor
|
25
|
+
@queue = PriorityQueue.new(order: :min)
|
26
|
+
@task_executor = OptionsParser::get_executor_from(opts) || Concurrent.configuration.global_task_pool
|
27
27
|
@timer_executor = SingleThreadExecutor.new
|
28
|
-
@condition
|
28
|
+
@condition = Condition.new
|
29
29
|
init_executor
|
30
30
|
end
|
31
31
|
|
@@ -64,7 +64,7 @@ module Concurrent
|
|
64
64
|
# For a timer, #kill is like an orderly shutdown, except we need to manually
|
65
65
|
# (and destructively) clear the queue first
|
66
66
|
def kill
|
67
|
-
@queue.clear
|
67
|
+
mutex.synchronize { @queue.clear }
|
68
68
|
shutdown
|
69
69
|
end
|
70
70
|
|
@@ -124,14 +124,13 @@ module Concurrent
|
|
124
124
|
# @!visibility private
|
125
125
|
def process_tasks
|
126
126
|
loop do
|
127
|
-
|
128
|
-
|
129
|
-
task = @queue.peek
|
127
|
+
task = mutex.synchronize { @queue.peek }
|
128
|
+
break unless task
|
130
129
|
interval = task.time - Time.now.to_f
|
131
130
|
|
132
131
|
if interval <= 0
|
133
132
|
@task_executor.post(*task.args, &task.op)
|
134
|
-
@queue.pop
|
133
|
+
mutex.synchronize { @queue.pop }
|
135
134
|
else
|
136
135
|
mutex.synchronize do
|
137
136
|
@condition.wait(mutex, [interval, 60].min)
|
data/lib/concurrent/executors.rb
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
require 'concurrent/executor/cached_thread_pool'
|
2
2
|
require 'concurrent/executor/fixed_thread_pool'
|
3
3
|
require 'concurrent/executor/immediate_executor'
|
4
|
+
require 'concurrent/executor/indirect_immediate_executor'
|
4
5
|
require 'concurrent/executor/per_thread_executor'
|
5
6
|
require 'concurrent/executor/safe_task_executor'
|
6
7
|
require 'concurrent/executor/single_thread_executor'
|
data/lib/concurrent/future.rb
CHANGED
@@ -6,35 +6,7 @@ require 'concurrent/executor/safe_task_executor'
|
|
6
6
|
|
7
7
|
module Concurrent
|
8
8
|
|
9
|
-
#
|
10
|
-
# The action is atomic and permanent. The idea behind a future is to send an operation
|
11
|
-
# for asynchronous completion, do other stuff, then return and retrieve the result
|
12
|
-
# of the async operation at a later time.
|
13
|
-
#
|
14
|
-
# A `Future` has four possible states: *:unscheduled*, *:pending*, *:rejected*, or *:fulfilled*.
|
15
|
-
# When a `Future` is created its state is set to *:unscheduled*. Once the `#execute` method is
|
16
|
-
# called the state becomes *:pending* and will remain in that state until processing is
|
17
|
-
# complete. A completed `Future` is either *:rejected*, indicating that an exception was
|
18
|
-
# thrown during processing, or *:fulfilled*, indicating success. If a `Future` is *:fulfilled*
|
19
|
-
# its `value` will be updated to reflect the result of the operation. If *:rejected* the
|
20
|
-
# `reason` will be updated with a reference to the thrown exception. The predicate methods
|
21
|
-
# `#unscheduled?`, `#pending?`, `#rejected?`, and `fulfilled?` can be called at any time to
|
22
|
-
# obtain the state of the `Future`, as can the `#state` method, which returns a symbol.
|
23
|
-
#
|
24
|
-
# Retrieving the value of a `Future` is done through the `#value` (alias: `#deref`) method.
|
25
|
-
# Obtaining the value of a `Future` is a potentially blocking operation. When a `Future` is
|
26
|
-
# *:rejected* a call to `#value` will return `nil` immediately. When a `Future` is
|
27
|
-
# *:fulfilled* a call to `#value` will immediately return the current value. When a
|
28
|
-
# `Future` is *:pending* a call to `#value` will block until the `Future` is either
|
29
|
-
# *:rejected* or *:fulfilled*. A *timeout* value can be passed to `#value` to limit how
|
30
|
-
# long the call will block. If `nil` the call will block indefinitely. If `0` the call will
|
31
|
-
# not block. Any other integer or float value will indicate the maximum number of seconds to block.
|
32
|
-
#
|
33
|
-
# The `Future` class also includes the behavior of the Ruby standard library `Observable` module,
|
34
|
-
# but does so in a thread-safe way. On fulfillment or rejection all observers will be notified
|
35
|
-
# according to the normal `Observable` behavior. The observer callback function will be called
|
36
|
-
# with three parameters: the `Time` of fulfillment/rejection, the final `value`, and the final
|
37
|
-
# `reason`. Observers added after fulfillment/rejection will still be notified as normal.
|
9
|
+
# {include:file:doc/future.md}
|
38
10
|
#
|
39
11
|
# @see http://ruby-doc.org/stdlib-2.1.1/libdoc/observer/rdoc/Observable.html Ruby Observable module
|
40
12
|
# @see http://clojuredocs.org/clojure_core/clojure.core/future Clojure's future function
|
@@ -94,6 +66,12 @@ module Concurrent
|
|
94
66
|
#
|
95
67
|
# @yield the asynchronous operation to perform
|
96
68
|
#
|
69
|
+
# @param [Hash] opts the options controlling how the future will be processed
|
70
|
+
# @option opts [Boolean] :operation (false) when `true` will execute the future on the global
|
71
|
+
# operation pool (for long-running operations), when `false` will execute the future on the
|
72
|
+
# global task pool (for short-running tasks)
|
73
|
+
# @option opts [object] :executor when provided will run all operations on
|
74
|
+
# this executor rather than the global thread pool (overrides :operation)
|
97
75
|
# @option opts [String] :dup_on_deref (false) call `#dup` before returning the data
|
98
76
|
# @option opts [String] :freeze_on_deref (false) call `#freeze` before returning the data
|
99
77
|
# @option opts [String] :copy_on_deref (nil) call the given `Proc` passing the internal value and
|
data/lib/concurrent/ivar.rb
CHANGED
@@ -6,6 +6,10 @@ require 'concurrent/observable'
|
|
6
6
|
|
7
7
|
module Concurrent
|
8
8
|
|
9
|
+
# An `IVar` is like a future that you can assign. As a future is a value that is being computed that you can wait on, an `IVar` is a value that is waiting to be assigned, that you can wait on. `IVars` are single assignment and deterministic.
|
10
|
+
#
|
11
|
+
# Then, express futures as an asynchronous computation that assigns an `IVar`. The `IVar` becomes the primitive on which [futures](Future) and [dataflow](Dataflow) are built.
|
12
|
+
#
|
9
13
|
# An `IVar` is a single-element container that is normally created empty, and
|
10
14
|
# can only be set once. The I in `IVar` stands for immutable. Reading an `IVar`
|
11
15
|
# normally blocks until it is set. It is safe to set and read an `IVar` from
|
@@ -15,6 +19,11 @@ module Concurrent
|
|
15
19
|
# a `Future`. If you want to create a graph of parallel tasks all executed when
|
16
20
|
# the values they depend on are ready you want `dataflow`. `IVar` is generally
|
17
21
|
# a low-level primitive.
|
22
|
+
#
|
23
|
+
# **See Also:**
|
24
|
+
#
|
25
|
+
# * For the theory: Arvind, R. Nikhil, and K. Pingali. [I-Structures: Data structures for parallel computing](http://dl.acm.org/citation.cfm?id=69562). In Proceedings of Workshop on Graph Reduction, 1986.
|
26
|
+
# * For recent application: [DataDrivenFuture in Habanero Java from Rice](http://www.cs.rice.edu/~vs3/hjlib/doc/edu/rice/hj/api/HjDataDrivenFuture.html).
|
18
27
|
#
|
19
28
|
# @example Create, set and get an `IVar`
|
20
29
|
# ivar = Concurrent::IVar.new
|
data/lib/concurrent/logging.rb
CHANGED
@@ -12,6 +12,9 @@ module Concurrent
|
|
12
12
|
# @yieldreturn [String] a message
|
13
13
|
def log(level, progname, message = nil, &block)
|
14
14
|
(@logger || Concurrent.configuration.logger).call level, progname, message, &block
|
15
|
+
rescue => error
|
16
|
+
$stderr.puts "`Concurrent.configuration.logger` failed to log #{[level, progname, message, block]}\n" +
|
17
|
+
"#{error.message} (#{error.class})\n#{error.backtrace.join "\n"}"
|
15
18
|
end
|
16
19
|
end
|
17
20
|
end
|
data/lib/concurrent/mvar.rb
CHANGED
@@ -4,16 +4,33 @@ require 'concurrent/atomic/event'
|
|
4
4
|
|
5
5
|
module Concurrent
|
6
6
|
|
7
|
-
# An `MVar` is a single
|
8
|
-
#
|
9
|
-
#
|
10
|
-
#
|
7
|
+
# An `MVar` is a synchronized single element container. They are empty or contain one item.
|
8
|
+
# Taking a value from an empty `MVar` blocks, as does putting a value into a full one.
|
9
|
+
# You can either think of them as blocking queue of length one, or a special kind of
|
10
|
+
# mutable variable.
|
11
|
+
#
|
12
|
+
# On top of the fundamental `#put` and `#take` operations, we also provide a `#mutate`
|
13
|
+
# that is atomic with respect to operations on the same instance. These operations all
|
14
|
+
# support timeouts.
|
15
|
+
#
|
16
|
+
# We also support non-blocking operations `#try_put!` and `#try_take!`, a `#set!` that
|
17
|
+
# ignores existing values, a `#value` that returns the value without removing it or
|
18
|
+
# returns `MVar::EMPTY`, and a `#modify!` that yields `MVar::EMPTY` if the `MVar` is
|
19
|
+
# empty and can be used to set `MVar::EMPTY`. You shouldn't use these operations in the
|
20
|
+
# first instance.
|
21
|
+
#
|
22
|
+
# `MVar` is a [Dereferenceable](Dereferenceable).
|
23
|
+
#
|
24
|
+
# `MVar` is related to M-structures in Id, `MVar` in Haskell and `SyncVar` in Scala.
|
11
25
|
#
|
12
|
-
#
|
13
|
-
#
|
14
|
-
#
|
15
|
-
#
|
16
|
-
#
|
26
|
+
# Note that unlike the original Haskell paper, our `#take` is blocking. This is how
|
27
|
+
# Haskell and Scala do it today.
|
28
|
+
#
|
29
|
+
# **See Also:**
|
30
|
+
#
|
31
|
+
# 1. P. Barth, R. Nikhil, and Arvind. [M-Structures: Extending a parallel, non-
|
32
|
+
# strict, functional language with state](http://dl.acm.org/citation.cfm?id=652538). In Proceedings of the 5th ACM Conference on Functional Programming Languages and Computer Architecture (FPCA), 1991.
|
33
|
+
# 2. S. Peyton Jones, A. Gordon, and S. Finne. [Concurrent Haskell](http://dl.acm.org/citation.cfm?id=237794). In Proceedings of the 23rd Symposium on Principles of Programming Languages (PoPL), 1996.
|
17
34
|
class MVar
|
18
35
|
|
19
36
|
include Dereferenceable
|
@@ -3,6 +3,39 @@ require 'concurrent/atomic/copy_on_write_observer_set'
|
|
3
3
|
|
4
4
|
module Concurrent
|
5
5
|
|
6
|
+
# The [observer pattern](http://en.wikipedia.org/wiki/Observer_pattern) is one of the most useful design pattern.
|
7
|
+
#
|
8
|
+
# The workflow is very simple:
|
9
|
+
# - an `observer` can register itself to a `subject` via a callback
|
10
|
+
# - many `observers` can be registered to the same `subject`
|
11
|
+
# - the `subject` notifies all registered observers when its status changes
|
12
|
+
# - an `observer` can deregister itself when is no more interested to receive event notifications
|
13
|
+
#
|
14
|
+
# In a single threaded environment the whole pattern is very easy: the `subject` can use a simple data structure to manage all its subscribed `observer`s and every `observer` can react directly to every event without caring about synchronization.
|
15
|
+
#
|
16
|
+
# In a multi threaded environment things are more complex.
|
17
|
+
# The `subject` must synchronize the access to its data structure and to do so currently we're using two specialized ObserverSet: CopyOnWriteObserverSet and CopyOnNotifyObserverSet.
|
18
|
+
#
|
19
|
+
# When implementing and `observer` there's a very important rule to remember: **there are no guarantees about the thread that will execute the callback**
|
20
|
+
#
|
21
|
+
# Let's take this example
|
22
|
+
# ```
|
23
|
+
# class Observer
|
24
|
+
# def initialize
|
25
|
+
# @count = 0
|
26
|
+
# end
|
27
|
+
#
|
28
|
+
# def update
|
29
|
+
# @count += 1
|
30
|
+
# end
|
31
|
+
# end
|
32
|
+
#
|
33
|
+
# obs = Observer.new
|
34
|
+
# [obj1, obj2, obj3, obj4].each { |o| o.add_observer(obs) }
|
35
|
+
# # execute [obj1, obj2, obj3, obj4]
|
36
|
+
# ```
|
37
|
+
#
|
38
|
+
# `obs` is wrong because the variable `@count` can be accessed by different threads at the same time, so it should be synchronized (using either a Mutex or an AtomicFixum)
|
6
39
|
module Observable
|
7
40
|
|
8
41
|
# @return [Object] the added observer
|
data/lib/concurrent/promise.rb
CHANGED
@@ -5,8 +5,9 @@ require 'concurrent/options_parser'
|
|
5
5
|
|
6
6
|
module Concurrent
|
7
7
|
|
8
|
-
#
|
8
|
+
# {include:file:doc/promise.md}
|
9
9
|
class Promise
|
10
|
+
# TODO unify promise and future to single class, with dataflow
|
10
11
|
include Obligation
|
11
12
|
|
12
13
|
# Initialize a new Promise with the provided options.
|
@@ -110,6 +111,63 @@ module Concurrent
|
|
110
111
|
alias_method :catch, :rescue
|
111
112
|
alias_method :on_error, :rescue
|
112
113
|
|
114
|
+
# Yield the successful result to the block that returns a promise. If that
|
115
|
+
# promise is also successful the result is the result of the yielded promise.
|
116
|
+
# If either part fails the whole also fails.
|
117
|
+
#
|
118
|
+
# @example
|
119
|
+
# Promise.execute { 1 }.flat_map { |v| Promise.execute { v + 2 } }.value! #=> 3
|
120
|
+
#
|
121
|
+
# @return [Promise]
|
122
|
+
def flat_map(&block)
|
123
|
+
child = Promise.new(
|
124
|
+
parent: self,
|
125
|
+
executor: ImmediateExecutor.new,
|
126
|
+
)
|
127
|
+
|
128
|
+
on_error { |e| child.on_reject(e) }
|
129
|
+
on_success do |result1|
|
130
|
+
begin
|
131
|
+
inner = block.call(result1)
|
132
|
+
inner.execute
|
133
|
+
inner.on_success { |result2| child.on_fulfill(result2) }
|
134
|
+
inner.on_error { |e| child.on_reject(e) }
|
135
|
+
rescue => e
|
136
|
+
child.on_reject(e)
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
140
|
+
child
|
141
|
+
end
|
142
|
+
|
143
|
+
# Builds a promise that produces the result of promises in an Array
|
144
|
+
# and fails if any of them fails.
|
145
|
+
#
|
146
|
+
# @param [Array<Promise>] promises
|
147
|
+
#
|
148
|
+
# @return [Promise<Array>]
|
149
|
+
def self.zip(*promises)
|
150
|
+
zero = fulfill([], executor: ImmediateExecutor.new)
|
151
|
+
|
152
|
+
promises.reduce(zero) do |p1, p2|
|
153
|
+
p1.flat_map do |results|
|
154
|
+
p2.then do |next_result|
|
155
|
+
results << next_result
|
156
|
+
end
|
157
|
+
end
|
158
|
+
end
|
159
|
+
end
|
160
|
+
|
161
|
+
# Builds a promise that produces the result of self and others in an Array
|
162
|
+
# and fails if any of them fails.
|
163
|
+
#
|
164
|
+
# @param [Array<Promise>] others
|
165
|
+
#
|
166
|
+
# @return [Promise<Array>]
|
167
|
+
def zip(*others)
|
168
|
+
self.class.zip(self, *others)
|
169
|
+
end
|
170
|
+
|
113
171
|
protected
|
114
172
|
|
115
173
|
def set_pending
|