concurrent-ruby-edge 0.3.1 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -5
- data/CHANGELOG.md +552 -0
- data/LICENSE.txt +18 -18
- data/README.md +261 -103
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/behaviour/abstract.rb +2 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/behaviour/awaits.rb +2 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/behaviour/buffer.rb +2 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/behaviour/errors_on_unknown_message.rb +2 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/behaviour/executes_context.rb +2 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/behaviour/linking.rb +2 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/behaviour/pausing.rb +2 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/behaviour/removes_child.rb +2 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/behaviour/sets_results.rb +2 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/behaviour/supervising.rb +2 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/behaviour/termination.rb +3 -1
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/behaviour.rb +1 -1
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/context.rb +3 -1
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/core.rb +5 -4
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/default_dead_letter_handler.rb +2 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/envelope.rb +2 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/errors.rb +2 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/internal_delegations.rb +3 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/reference.rb +9 -8
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/root.rb +3 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/utils/ad_hoc.rb +2 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/utils/balancer.rb +2 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/utils/broadcast.rb +1 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/utils/pool.rb +1 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor.rb +11 -6
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/channel/buffer/base.rb +14 -14
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/channel/buffer/dropping.rb +1 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/channel/buffer/sliding.rb +1 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/channel/buffer/unbuffered.rb +1 -1
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/channel/tick.rb +1 -1
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/channel.rb +3 -2
- data/lib/concurrent-ruby-edge/concurrent/edge/cancellation.rb +107 -0
- data/lib/concurrent-ruby-edge/concurrent/edge/channel.rb +453 -0
- data/lib/concurrent-ruby-edge/concurrent/edge/erlang_actor.rb +1549 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/edge/lock_free_linked_set/node.rb +2 -2
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/edge/lock_free_linked_set.rb +8 -7
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/edge/lock_free_queue.rb +2 -0
- data/lib/{concurrent → concurrent-ruby-edge/concurrent}/edge/old_channel_integration.rb +2 -0
- data/lib/concurrent-ruby-edge/concurrent/edge/processing_actor.rb +184 -0
- data/lib/concurrent-ruby-edge/concurrent/edge/promises.rb +174 -0
- data/lib/concurrent-ruby-edge/concurrent/edge/throttle.rb +229 -0
- data/lib/concurrent-ruby-edge/concurrent/edge/version.rb +3 -0
- data/lib/concurrent-ruby-edge/concurrent/edge.rb +21 -0
- data/lib/concurrent-ruby-edge/concurrent/executor/wrapping_executor.rb +50 -0
- data/lib/concurrent-ruby-edge/concurrent/lazy_register.rb +83 -0
- data/lib/{concurrent-edge.rb → concurrent-ruby-edge/concurrent-edge.rb} +5 -4
- metadata +71 -67
- data/lib/concurrent/edge/atomic_markable_reference.rb +0 -184
- data/lib/concurrent/edge/cancellation.rb +0 -138
- data/lib/concurrent/edge/lock_free_stack.rb +0 -126
- data/lib/concurrent/edge/processing_actor.rb +0 -161
- data/lib/concurrent/edge/promises.rb +0 -2111
- data/lib/concurrent/edge/throttle.rb +0 -192
- /data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/public_delegations.rb +0 -0
- /data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/type_check.rb +0 -0
- /data/lib/{concurrent → concurrent-ruby-edge/concurrent}/actor/utils.rb +0 -0
- /data/lib/{concurrent → concurrent-ruby-edge/concurrent}/channel/buffer/buffered.rb +0 -0
- /data/lib/{concurrent → concurrent-ruby-edge/concurrent}/channel/buffer/ticker.rb +0 -0
- /data/lib/{concurrent → concurrent-ruby-edge/concurrent}/channel/buffer/timer.rb +0 -0
- /data/lib/{concurrent → concurrent-ruby-edge/concurrent}/channel/buffer.rb +0 -0
- /data/lib/{concurrent → concurrent-ruby-edge/concurrent}/channel/selector/after_clause.rb +0 -0
- /data/lib/{concurrent → concurrent-ruby-edge/concurrent}/channel/selector/default_clause.rb +0 -0
- /data/lib/{concurrent → concurrent-ruby-edge/concurrent}/channel/selector/error_clause.rb +0 -0
- /data/lib/{concurrent → concurrent-ruby-edge/concurrent}/channel/selector/put_clause.rb +0 -0
- /data/lib/{concurrent → concurrent-ruby-edge/concurrent}/channel/selector/take_clause.rb +0 -0
- /data/lib/{concurrent → concurrent-ruby-edge/concurrent}/channel/selector.rb +0 -0
- /data/lib/{concurrent → concurrent-ruby-edge/concurrent}/edge/lock_free_linked_set/window.rb +0 -0
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
require 'concurrent/
|
|
1
|
+
require 'concurrent/atomic/atomic_markable_reference'
|
|
2
2
|
|
|
3
3
|
module Concurrent
|
|
4
4
|
module Edge
|
|
@@ -10,7 +10,7 @@ module Concurrent
|
|
|
10
10
|
|
|
11
11
|
def initialize(data = nil, successor = nil)
|
|
12
12
|
super()
|
|
13
|
-
@SuccessorReference
|
|
13
|
+
@SuccessorReference = AtomicMarkableReference.new(successor || Tail.new)
|
|
14
14
|
@Data = data
|
|
15
15
|
@Key = key_for data
|
|
16
16
|
end
|
|
@@ -18,12 +18,13 @@ module Concurrent
|
|
|
18
18
|
#
|
|
19
19
|
# This algorithm is a variation of the Nonblocking Linked Set found in
|
|
20
20
|
# 'The Art of Multiprocessor Programming' by Herlihy and Shavit.
|
|
21
|
+
# @!macro warn.edge
|
|
21
22
|
class LockFreeLinkedSet
|
|
22
23
|
include Enumerable
|
|
23
24
|
|
|
24
|
-
# @!macro
|
|
25
|
+
# @!macro lock_free_linked_list_method_initialize
|
|
25
26
|
#
|
|
26
|
-
#
|
|
27
|
+
# @param [Fixnum] initial_size the size of the linked_list to initialize
|
|
27
28
|
def initialize(initial_size = 0, val = nil)
|
|
28
29
|
@head = Head.new
|
|
29
30
|
|
|
@@ -33,7 +34,7 @@ module Concurrent
|
|
|
33
34
|
end
|
|
34
35
|
end
|
|
35
36
|
|
|
36
|
-
# @!macro
|
|
37
|
+
# @!macro lock_free_linked_list_method_add
|
|
37
38
|
#
|
|
38
39
|
# Atomically adds the item to the set if it does not yet exist. Note:
|
|
39
40
|
# internally the set uses `Object#hash` to compare equality of items,
|
|
@@ -61,7 +62,7 @@ module Concurrent
|
|
|
61
62
|
end
|
|
62
63
|
end
|
|
63
64
|
|
|
64
|
-
# @!macro
|
|
65
|
+
# @!macro lock_free_linked_list_method_<<
|
|
65
66
|
#
|
|
66
67
|
# Atomically adds the item to the set if it does not yet exist.
|
|
67
68
|
#
|
|
@@ -73,7 +74,7 @@ module Concurrent
|
|
|
73
74
|
self
|
|
74
75
|
end
|
|
75
76
|
|
|
76
|
-
# @!macro
|
|
77
|
+
# @!macro lock_free_linked_list_method_contains
|
|
77
78
|
#
|
|
78
79
|
# Atomically checks to see if the set contains an item. This method
|
|
79
80
|
# compares equality based on the `Object#hash` method, meaning that the
|
|
@@ -94,7 +95,7 @@ module Concurrent
|
|
|
94
95
|
curr == item && !marked
|
|
95
96
|
end
|
|
96
97
|
|
|
97
|
-
# @!macro
|
|
98
|
+
# @!macro lock_free_linked_list_method_remove
|
|
98
99
|
#
|
|
99
100
|
# Atomically attempts to remove an item, comparing using `Object#hash`.
|
|
100
101
|
#
|
|
@@ -120,7 +121,7 @@ module Concurrent
|
|
|
120
121
|
end
|
|
121
122
|
end
|
|
122
123
|
|
|
123
|
-
# @!macro
|
|
124
|
+
# @!macro lock_free_linked_list_method_each
|
|
124
125
|
#
|
|
125
126
|
# An iterator to loop through the set.
|
|
126
127
|
#
|
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
require 'concurrent/synchronization/object'
|
|
2
|
+
require 'concurrent/promises'
|
|
3
|
+
require 'concurrent/edge/channel'
|
|
4
|
+
|
|
5
|
+
module Concurrent
|
|
6
|
+
|
|
7
|
+
# A new implementation of actor which also simulates the process, therefore it can be used
|
|
8
|
+
# in the same way as Erlang's actors but **without** occupying thread. A tens of thousands
|
|
9
|
+
# ProcessingActors can run at the same time sharing a thread pool.
|
|
10
|
+
# @example
|
|
11
|
+
# # Runs on a pool, does not consume 50_000 threads
|
|
12
|
+
# actors = 50_000.times.map do |i|
|
|
13
|
+
# Concurrent::ProcessingActor.act(i) { |a, i| a.receive.then_on(:fast, i) { |m, i| m + i } }
|
|
14
|
+
# end
|
|
15
|
+
#
|
|
16
|
+
# actors.each { |a| a.tell 1 }
|
|
17
|
+
# values = actors.map(&:termination).map(&:value)
|
|
18
|
+
# values[0,5] # => [1, 2, 3, 4, 5]
|
|
19
|
+
# values[-5, 5] # => [49996, 49997, 49998, 49999, 50000]
|
|
20
|
+
# @!macro warn.edge
|
|
21
|
+
class ProcessingActor < Synchronization::Object
|
|
22
|
+
|
|
23
|
+
# TODO (pitr-ch 29-Jan-2019): simplify as much as possible, maybe even do not delegate to mailbox, no ask linking etc
|
|
24
|
+
# TODO (pitr-ch 03-Feb-2019): remove completely
|
|
25
|
+
|
|
26
|
+
safe_initialization!
|
|
27
|
+
|
|
28
|
+
# @return [Promises::Channel] actor's mailbox.
|
|
29
|
+
def mailbox
|
|
30
|
+
@Mailbox
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
# @return [Promises::Future(Object)] a future which is resolved when the actor ends its processing.
|
|
34
|
+
# It can either be fulfilled with a value when actor ends normally or rejected with
|
|
35
|
+
# a reason (exception) when actor fails.
|
|
36
|
+
def termination
|
|
37
|
+
@Terminated.with_hidden_resolvable
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
# Creates an actor.
|
|
41
|
+
# @see #act_listening Behaves the same way, but does not take mailbox as a first argument.
|
|
42
|
+
# @return [ProcessingActor]
|
|
43
|
+
# @example
|
|
44
|
+
# actor = Concurrent::ProcessingActor.act do |actor|
|
|
45
|
+
# actor.receive.then do |message|
|
|
46
|
+
# # the actor ends normally with message
|
|
47
|
+
# message
|
|
48
|
+
# end
|
|
49
|
+
# end
|
|
50
|
+
#
|
|
51
|
+
# actor.tell :a_message
|
|
52
|
+
# # => <#Concurrent::ProcessingActor:0x7fff11280560 termination:pending>
|
|
53
|
+
# actor.termination.value! # => :a_message
|
|
54
|
+
def self.act(*args, &process)
|
|
55
|
+
act_listening Promises::Channel.new, *args, &process
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
# Creates an actor listening to a specified channel (mailbox).
|
|
59
|
+
# @param [Object] args Arguments passed to the process.
|
|
60
|
+
# @param [Promises::Channel] channel which serves as mailing box. The channel can have limited
|
|
61
|
+
# size to achieve backpressure.
|
|
62
|
+
# @yield [actor, *args] to the process to get back a future which represents the actors execution.
|
|
63
|
+
# @yieldparam [ProcessingActor] actor
|
|
64
|
+
# @yieldparam [Object] *args
|
|
65
|
+
# @yieldreturn [Promises::Future(Object)] a future representing next step of execution
|
|
66
|
+
# @return [ProcessingActor]
|
|
67
|
+
def self.act_listening(channel, *args, &process)
|
|
68
|
+
ProcessingActor.new channel, *args, &process
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
# # Receives a message when available, used in the actor's process.
|
|
72
|
+
# # @return [Promises::Future(Object)] a future which will be fulfilled with a message from
|
|
73
|
+
# # mailbox when it is available.
|
|
74
|
+
# def receive(*channels)
|
|
75
|
+
# channels = [@Mailbox] if channels.empty?
|
|
76
|
+
# Promises::Channel.select(*channels)
|
|
77
|
+
# # TODO (pitr-ch 27-Dec-2016): support patterns
|
|
78
|
+
# # - put any received message aside if it does not match
|
|
79
|
+
# # - on each receive call check the messages put aside
|
|
80
|
+
# # - track where the message came from, cannot later receive m form other channel only because it matches
|
|
81
|
+
# end
|
|
82
|
+
|
|
83
|
+
def receive(channel = mailbox)
|
|
84
|
+
channel.pop_op
|
|
85
|
+
end
|
|
86
|
+
|
|
87
|
+
# Tells a message to the actor. May block current thread if the mailbox is full.
|
|
88
|
+
# {#tell_op} is a better option since it does not block. It's usually used to integrate with
|
|
89
|
+
# threading code.
|
|
90
|
+
# @example
|
|
91
|
+
# Thread.new(actor) do |actor|
|
|
92
|
+
# # ...
|
|
93
|
+
# actor.tell! :a_message # blocks until the message is told
|
|
94
|
+
# # (there is a space for it in the channel)
|
|
95
|
+
# # ...
|
|
96
|
+
# end
|
|
97
|
+
# @param [Object] message
|
|
98
|
+
# @return [self]
|
|
99
|
+
def tell!(message)
|
|
100
|
+
@Mailbox.push(message)
|
|
101
|
+
self
|
|
102
|
+
end
|
|
103
|
+
|
|
104
|
+
# Tells a message to the actor.
|
|
105
|
+
# @param [Object] message
|
|
106
|
+
# @return [Promises::Future(ProcessingActor)] a future which will be fulfilled with the actor
|
|
107
|
+
# when the message is pushed to mailbox.
|
|
108
|
+
def tell_op(message)
|
|
109
|
+
@Mailbox.push_op(message).then(self) { |_ch, actor| actor }
|
|
110
|
+
end
|
|
111
|
+
|
|
112
|
+
# # Simplifies common pattern when a message sender also requires an answer to the message
|
|
113
|
+
# # from the actor. It appends a resolvable_future for the answer after the message.
|
|
114
|
+
# # @todo has to be nice also on the receive side, cannot make structure like this [message = [...], answer]
|
|
115
|
+
# # all receives should receive something friendly
|
|
116
|
+
# # @param [Object] message
|
|
117
|
+
# # @param [Promises::ResolvableFuture] answer
|
|
118
|
+
# # @return [Promises::Future] a future which will be fulfilled with the answer to the message
|
|
119
|
+
# # @example
|
|
120
|
+
# # add_once_actor = Concurrent::ProcessingActor.act do |actor|
|
|
121
|
+
# # actor.receive.then do |(a, b), answer|
|
|
122
|
+
# # result = a + b
|
|
123
|
+
# # answer.fulfill result
|
|
124
|
+
# # # terminate with result value
|
|
125
|
+
# # result
|
|
126
|
+
# # end
|
|
127
|
+
# # end
|
|
128
|
+
# # # => <#Concurrent::ProcessingActor:0x7fcd1315f6e8 termination:pending>
|
|
129
|
+
# #
|
|
130
|
+
# # add_once_actor.ask([1, 2]).value! # => 3
|
|
131
|
+
# # # fails the actor already added once
|
|
132
|
+
# # add_once_actor.ask(%w(ab cd)).reason
|
|
133
|
+
# # # => #<RuntimeError: actor terminated normally before answering with a value: 3>
|
|
134
|
+
# # add_once_actor.termination.value! # => 3
|
|
135
|
+
# def ask(message, answer = Promises.resolvable_future)
|
|
136
|
+
# raise 'to be removed'
|
|
137
|
+
#
|
|
138
|
+
# # TODO (pitr-ch 12-Dec-2018): REMOVE, the process ends up as another future not a value, no nice way to do ask in the actor
|
|
139
|
+
# tell [message, answer]
|
|
140
|
+
# # do not leave answers unanswered when actor terminates.
|
|
141
|
+
# Promises.any(
|
|
142
|
+
# Promises.fulfilled_future(:answer).zip(answer),
|
|
143
|
+
# Promises.fulfilled_future(:termination).zip(@Terminated)
|
|
144
|
+
# ).chain do |fulfilled, (which, value), (_, reason)|
|
|
145
|
+
# # TODO (pitr-ch 20-Jan-2017): we have to know which future was resolved
|
|
146
|
+
# # TODO (pitr-ch 20-Jan-2017): make the combinator programmable, so anyone can create what is needed
|
|
147
|
+
# # FIXME (pitr-ch 19-Jan-2017): ensure no callbacks are accumulated on @Terminated
|
|
148
|
+
# if which == :termination
|
|
149
|
+
# raise reason.nil? ? format('actor terminated normally before answering with a value: %s', value) : reason
|
|
150
|
+
# else
|
|
151
|
+
# fulfilled ? value : raise(reason)
|
|
152
|
+
# end
|
|
153
|
+
# end
|
|
154
|
+
# end
|
|
155
|
+
|
|
156
|
+
# actor.ask2 { |a| [:count, a] }
|
|
157
|
+
def ask_op(answer = Promises.resolvable_future, &message_provider)
|
|
158
|
+
# TODO (pitr-ch 12-Dec-2018): is it ok to let the answers be unanswered when the actor terminates
|
|
159
|
+
tell_op(message_provider.call(answer)).then(answer) { |_, a| a }
|
|
160
|
+
|
|
161
|
+
# answer.chain { |v| [true, v] } | @Terminated.then
|
|
162
|
+
end
|
|
163
|
+
|
|
164
|
+
# @return [String] string representation.
|
|
165
|
+
def to_s
|
|
166
|
+
format '%s termination: %s>', super[0..-2], termination.state
|
|
167
|
+
end
|
|
168
|
+
|
|
169
|
+
alias_method :inspect, :to_s
|
|
170
|
+
|
|
171
|
+
def to_ary
|
|
172
|
+
[@Mailbox, @Terminated]
|
|
173
|
+
end
|
|
174
|
+
|
|
175
|
+
private
|
|
176
|
+
|
|
177
|
+
def initialize(channel, *args, &process)
|
|
178
|
+
@Mailbox = channel
|
|
179
|
+
@Terminated = Promises.future(self, *args, &process).run
|
|
180
|
+
super()
|
|
181
|
+
end
|
|
182
|
+
|
|
183
|
+
end
|
|
184
|
+
end
|
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
# TODO try stealing pool, each thread has it's own queue
|
|
2
|
+
|
|
3
|
+
require 'concurrent/promises'
|
|
4
|
+
|
|
5
|
+
module Concurrent
|
|
6
|
+
module Promises
|
|
7
|
+
|
|
8
|
+
class Future < AbstractEventFuture
|
|
9
|
+
|
|
10
|
+
# @!macro warn.edge
|
|
11
|
+
module ActorIntegration
|
|
12
|
+
# Asks the actor with its value.
|
|
13
|
+
# @return [Future] new future with the response form the actor
|
|
14
|
+
def then_ask(actor)
|
|
15
|
+
self.then(actor) { |v, a| a.ask_op(v) }.flat
|
|
16
|
+
end
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
include ActorIntegration
|
|
20
|
+
|
|
21
|
+
# @!macro warn.edge
|
|
22
|
+
module FlatShortcuts
|
|
23
|
+
|
|
24
|
+
# @return [Future]
|
|
25
|
+
def then_flat_future(*args, &block)
|
|
26
|
+
self.then(*args, &block).flat_future
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
alias_method :then_flat, :then_flat_future
|
|
30
|
+
|
|
31
|
+
# @return [Future]
|
|
32
|
+
def then_flat_future_on(executor, *args, &block)
|
|
33
|
+
self.then_on(executor, *args, &block).flat_future
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
alias_method :then_flat_on, :then_flat_future_on
|
|
37
|
+
|
|
38
|
+
# @return [Event]
|
|
39
|
+
def then_flat_event(*args, &block)
|
|
40
|
+
self.then(*args, &block).flat_event
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
# @return [Event]
|
|
44
|
+
def then_flat_event_on(executor, *args, &block)
|
|
45
|
+
self.then_on(executor, *args, &block).flat_event
|
|
46
|
+
end
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
include FlatShortcuts
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
class Future < AbstractEventFuture
|
|
53
|
+
# @!macro warn.edge
|
|
54
|
+
module NewChannelIntegration
|
|
55
|
+
|
|
56
|
+
# @param [Channel] channel to push to.
|
|
57
|
+
# @return [Future] a future which is fulfilled after the message is pushed to the channel.
|
|
58
|
+
# May take a moment if the channel is full.
|
|
59
|
+
def then_channel_push(channel)
|
|
60
|
+
self.then(channel) { |value, ch| ch.push_op value }.flat_future
|
|
61
|
+
end
|
|
62
|
+
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
include NewChannelIntegration
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
module FactoryMethods
|
|
69
|
+
# @!macro promises.shortcut.on
|
|
70
|
+
# @return [Future]
|
|
71
|
+
# @!macro warn.edge
|
|
72
|
+
def zip_futures_over(enumerable, &future_factory)
|
|
73
|
+
zip_futures_over_on default_executor, enumerable, &future_factory
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
# Creates new future which is resolved after all the futures created by future_factory from
|
|
77
|
+
# enumerable elements are resolved. Simplified it does:
|
|
78
|
+
# `zip(*enumerable.map { |e| future e, &future_factory })`
|
|
79
|
+
# @example
|
|
80
|
+
# # `#succ` calls are executed in parallel
|
|
81
|
+
# zip_futures_over_on(:io, [1, 2], &:succ).value! # => [2, 3]
|
|
82
|
+
#
|
|
83
|
+
# @!macro promises.param.default_executor
|
|
84
|
+
# @param [Enumerable] enumerable
|
|
85
|
+
# @yield a task to be executed in future
|
|
86
|
+
# @yieldparam [Object] element from enumerable
|
|
87
|
+
# @yieldreturn [Object] a value of the future
|
|
88
|
+
# @return [Future]
|
|
89
|
+
# @!macro warn.edge
|
|
90
|
+
def zip_futures_over_on(default_executor, enumerable, &future_factory)
|
|
91
|
+
# ZipFuturesPromise.new_blocked_by(futures_and_or_events, default_executor).future
|
|
92
|
+
zip_futures_on(default_executor, *enumerable.map { |e| future e, &future_factory })
|
|
93
|
+
end
|
|
94
|
+
end
|
|
95
|
+
|
|
96
|
+
module Resolvable
|
|
97
|
+
include InternalStates
|
|
98
|
+
|
|
99
|
+
# Reserves the event or future, if reserved others are prevented from resolving it.
|
|
100
|
+
# Advanced feature.
|
|
101
|
+
# Be careful about the order of reservation to avoid deadlocks,
|
|
102
|
+
# the method blocks if the future or event is already reserved
|
|
103
|
+
# until it is released or resolved.
|
|
104
|
+
#
|
|
105
|
+
# @example
|
|
106
|
+
# f = Concurrent::Promises.resolvable_future
|
|
107
|
+
# reserved = f.reserve
|
|
108
|
+
# Thread.new { f.resolve true, :val, nil } # fails
|
|
109
|
+
# f.resolve true, :val, nil, true if reserved # must be called only if reserved
|
|
110
|
+
# @return [true, false] on successful reservation
|
|
111
|
+
def reserve
|
|
112
|
+
while true
|
|
113
|
+
return true if compare_and_set_internal_state(PENDING, RESERVED)
|
|
114
|
+
return false if resolved?
|
|
115
|
+
# FIXME (pitr-ch 17-Jan-2019): sleep until given up or resolved instead of busy wait
|
|
116
|
+
Thread.pass
|
|
117
|
+
end
|
|
118
|
+
end
|
|
119
|
+
|
|
120
|
+
# @return [true, false] on successful release of the reservation
|
|
121
|
+
def release
|
|
122
|
+
compare_and_set_internal_state(RESERVED, PENDING)
|
|
123
|
+
end
|
|
124
|
+
|
|
125
|
+
# @return [Comparable] an item to sort the resolvable events or futures
|
|
126
|
+
# by to get the right global locking order of resolvable events or futures
|
|
127
|
+
# @see .atomic_resolution
|
|
128
|
+
def self.locking_order_by(resolvable)
|
|
129
|
+
resolvable.object_id
|
|
130
|
+
end
|
|
131
|
+
|
|
132
|
+
# Resolves all passed events and futures to the given resolutions
|
|
133
|
+
# if possible (all are unresolved) or none.
|
|
134
|
+
#
|
|
135
|
+
# @param [Hash{Resolvable=>resolve_arguments}, Array<Array(Resolvable, resolve_arguments)>] resolvable_map
|
|
136
|
+
# collection of resolvable events and futures which should be resolved all at once
|
|
137
|
+
# and what should they be resolved to, examples:
|
|
138
|
+
# ```ruby
|
|
139
|
+
# { a_resolvable_future1 => [true, :val, nil],
|
|
140
|
+
# a_resolvable_future2 => [false, nil, :err],
|
|
141
|
+
# a_resolvable_event => [] }
|
|
142
|
+
# ```
|
|
143
|
+
# or
|
|
144
|
+
# ```ruby
|
|
145
|
+
# [[a_resolvable_future1, [true, :val, nil]],
|
|
146
|
+
# [a_resolvable_future2, [false, nil, :err]],
|
|
147
|
+
# [a_resolvable_event, []]]
|
|
148
|
+
# ```
|
|
149
|
+
# @return [true, false] if success
|
|
150
|
+
def self.atomic_resolution(resolvable_map)
|
|
151
|
+
# atomic_resolution event => [], future => [true, :v, nil]
|
|
152
|
+
sorted = resolvable_map.to_a.sort_by { |resolvable, _| locking_order_by resolvable }
|
|
153
|
+
|
|
154
|
+
reserved = 0
|
|
155
|
+
while reserved < sorted.size && sorted[reserved].first.reserve
|
|
156
|
+
reserved += 1
|
|
157
|
+
end
|
|
158
|
+
|
|
159
|
+
if reserved == sorted.size
|
|
160
|
+
sorted.each { |resolvable, args| resolvable.resolve(*args, true, true) }
|
|
161
|
+
true
|
|
162
|
+
else
|
|
163
|
+
while reserved > 0
|
|
164
|
+
reserved -= 1
|
|
165
|
+
raise 'has to be reserved' unless sorted[reserved].first.release
|
|
166
|
+
end
|
|
167
|
+
false
|
|
168
|
+
end
|
|
169
|
+
end
|
|
170
|
+
end
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
end
|
|
174
|
+
end
|
|
@@ -0,0 +1,229 @@
|
|
|
1
|
+
require 'concurrent/edge/lock_free_queue'
|
|
2
|
+
require 'concurrent/promises'
|
|
3
|
+
require 'concurrent/synchronization/object'
|
|
4
|
+
|
|
5
|
+
module Concurrent
|
|
6
|
+
# A tool managing concurrency level of tasks.
|
|
7
|
+
# The maximum capacity is set in constructor.
|
|
8
|
+
# Each acquire will lower the available capacity and release will increase it.
|
|
9
|
+
# When there is no available capacity the current thread may either be blocked or
|
|
10
|
+
# an event is returned which will be resolved when capacity becomes available.
|
|
11
|
+
#
|
|
12
|
+
# The more common usage of the Throttle is with a proxy executor
|
|
13
|
+
# `a_throttle.on(Concurrent.global_io_executor)`.
|
|
14
|
+
# Anything executed on the proxy executor will be throttled and
|
|
15
|
+
# execute on the given executor. There can be more than one proxy executors.
|
|
16
|
+
# All abstractions which execute tasks have option to specify executor,
|
|
17
|
+
# therefore the proxy executor can be injected to any abstraction
|
|
18
|
+
# throttling its concurrency level.
|
|
19
|
+
#
|
|
20
|
+
# {include:file:docs-source/throttle.out.md}
|
|
21
|
+
#
|
|
22
|
+
# @!macro warn.edge
|
|
23
|
+
class Throttle < Synchronization::Object
|
|
24
|
+
safe_initialization!
|
|
25
|
+
|
|
26
|
+
attr_atomic(:capacity)
|
|
27
|
+
private :capacity, :capacity=, :swap_capacity, :compare_and_set_capacity, :update_capacity
|
|
28
|
+
|
|
29
|
+
# @return [Integer] The available capacity.
|
|
30
|
+
def available_capacity
|
|
31
|
+
current_capacity = capacity
|
|
32
|
+
current_capacity >= 0 ? current_capacity : 0
|
|
33
|
+
end
|
|
34
|
+
|
|
35
|
+
# Create throttle.
|
|
36
|
+
# @param [Integer] capacity How many tasks using this throttle can run at the same time.
|
|
37
|
+
def initialize(capacity)
|
|
38
|
+
super()
|
|
39
|
+
@MaxCapacity = capacity
|
|
40
|
+
@Queue = LockFreeQueue.new
|
|
41
|
+
@executor_cache = [nil, nil]
|
|
42
|
+
self.capacity = capacity
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
# @return [Integer] The maximum capacity.
|
|
46
|
+
def max_capacity
|
|
47
|
+
@MaxCapacity
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
# Blocks current thread until there is capacity available in the throttle.
|
|
51
|
+
# The acquired capacity has to be returned to the throttle by calling {#release}.
|
|
52
|
+
# If block is passed then the block is called after the capacity is acquired and
|
|
53
|
+
# it is automatically released after the block is executed.
|
|
54
|
+
#
|
|
55
|
+
# @param [Numeric] timeout the maximum time in second to wait.
|
|
56
|
+
# @yield [] block to execute after the capacity is acquired
|
|
57
|
+
# @return [Object, self, true, false]
|
|
58
|
+
# * When no timeout and no block it returns self
|
|
59
|
+
# * When no timeout and with block it returns the result of the block
|
|
60
|
+
# * When with timeout and no block it returns true when acquired and false when timed out
|
|
61
|
+
# * When with timeout and with block it returns the result of the block of nil on timing out
|
|
62
|
+
# @see #release
|
|
63
|
+
def acquire(timeout = nil, &block)
|
|
64
|
+
event = acquire_or_event
|
|
65
|
+
if event
|
|
66
|
+
within_timeout = event.wait(timeout)
|
|
67
|
+
# release immediately when acquired later after the timeout since it is unused
|
|
68
|
+
event.on_resolution!(self, &:release) unless within_timeout
|
|
69
|
+
else
|
|
70
|
+
within_timeout = true
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
called = false
|
|
74
|
+
if timeout
|
|
75
|
+
if block
|
|
76
|
+
if within_timeout
|
|
77
|
+
called = true
|
|
78
|
+
block.call
|
|
79
|
+
else
|
|
80
|
+
nil
|
|
81
|
+
end
|
|
82
|
+
else
|
|
83
|
+
within_timeout
|
|
84
|
+
end
|
|
85
|
+
else
|
|
86
|
+
if block
|
|
87
|
+
called = true
|
|
88
|
+
block.call
|
|
89
|
+
else
|
|
90
|
+
self
|
|
91
|
+
end
|
|
92
|
+
end
|
|
93
|
+
ensure
|
|
94
|
+
release if called
|
|
95
|
+
end
|
|
96
|
+
|
|
97
|
+
# Tries to acquire capacity from the throttle.
|
|
98
|
+
# Returns true when there is capacity available.
|
|
99
|
+
# The acquired capacity has to be returned to the throttle by calling {#release}.
|
|
100
|
+
# @return [true, false]
|
|
101
|
+
# @see #release
|
|
102
|
+
def try_acquire
|
|
103
|
+
while true
|
|
104
|
+
current_capacity = capacity
|
|
105
|
+
if current_capacity > 0
|
|
106
|
+
return true if compare_and_set_capacity(
|
|
107
|
+
current_capacity, current_capacity - 1)
|
|
108
|
+
else
|
|
109
|
+
return false
|
|
110
|
+
end
|
|
111
|
+
end
|
|
112
|
+
end
|
|
113
|
+
|
|
114
|
+
# Releases previously acquired capacity back to Throttle.
|
|
115
|
+
# Has to be called exactly once for each acquired capacity.
|
|
116
|
+
# @return [self]
|
|
117
|
+
# @see #acquire_operation, #acquire, #try_acquire
|
|
118
|
+
def release
|
|
119
|
+
while true
|
|
120
|
+
current_capacity = capacity
|
|
121
|
+
if compare_and_set_capacity current_capacity, current_capacity + 1
|
|
122
|
+
if current_capacity < 0
|
|
123
|
+
# release called after trigger which pushed a trigger, busy wait is ok
|
|
124
|
+
Thread.pass until (trigger = @Queue.pop)
|
|
125
|
+
trigger.resolve
|
|
126
|
+
end
|
|
127
|
+
return self
|
|
128
|
+
end
|
|
129
|
+
end
|
|
130
|
+
end
|
|
131
|
+
|
|
132
|
+
# @return [String] Short string representation.
|
|
133
|
+
def to_s
|
|
134
|
+
format '%s capacity available %d of %d>', super[0..-2], capacity, @MaxCapacity
|
|
135
|
+
end
|
|
136
|
+
|
|
137
|
+
alias_method :inspect, :to_s
|
|
138
|
+
|
|
139
|
+
# @!visibility private
|
|
140
|
+
def acquire_or_event
|
|
141
|
+
while true
|
|
142
|
+
current_capacity = capacity
|
|
143
|
+
if compare_and_set_capacity current_capacity, current_capacity - 1
|
|
144
|
+
if current_capacity > 0
|
|
145
|
+
return nil
|
|
146
|
+
else
|
|
147
|
+
event = Promises.resolvable_event
|
|
148
|
+
@Queue.push event
|
|
149
|
+
return event
|
|
150
|
+
end
|
|
151
|
+
end
|
|
152
|
+
end
|
|
153
|
+
end
|
|
154
|
+
|
|
155
|
+
include Promises::FactoryMethods
|
|
156
|
+
|
|
157
|
+
# @param [ExecutorService] executor
|
|
158
|
+
# @return [ExecutorService] An executor which wraps given executor and allows to post tasks only
|
|
159
|
+
# as available capacity in the throttle allows.
|
|
160
|
+
# @example throttling future
|
|
161
|
+
# a_future.then_on(a_throttle.on(:io)) { a_throttled_task }
|
|
162
|
+
def on(executor = Promises::FactoryMethods.default_executor)
|
|
163
|
+
current_executor, current_cache = @executor_cache
|
|
164
|
+
return current_cache if current_executor == executor && current_cache
|
|
165
|
+
|
|
166
|
+
if current_executor.nil?
|
|
167
|
+
# cache first proxy
|
|
168
|
+
proxy_executor = ProxyExecutor.new(self, Concurrent.executor(executor))
|
|
169
|
+
@executor_cache = [executor, proxy_executor]
|
|
170
|
+
return proxy_executor
|
|
171
|
+
else
|
|
172
|
+
# do not cache more than 1 executor
|
|
173
|
+
ProxyExecutor.new(self, Concurrent.executor(executor))
|
|
174
|
+
end
|
|
175
|
+
end
|
|
176
|
+
|
|
177
|
+
# Uses executor provided by {#on} therefore
|
|
178
|
+
# all events and futures created using factory methods on this object will be throttled.
|
|
179
|
+
# Overrides {Promises::FactoryMethods#default_executor}.
|
|
180
|
+
#
|
|
181
|
+
# @return [ExecutorService]
|
|
182
|
+
# @see Promises::FactoryMethods#default_executor
|
|
183
|
+
def default_executor
|
|
184
|
+
on(super)
|
|
185
|
+
end
|
|
186
|
+
|
|
187
|
+
class ProxyExecutor < Synchronization::Object
|
|
188
|
+
safe_initialization!
|
|
189
|
+
|
|
190
|
+
include ExecutorService
|
|
191
|
+
|
|
192
|
+
def initialize(throttle, executor)
|
|
193
|
+
super()
|
|
194
|
+
@Throttle = throttle
|
|
195
|
+
@Executor = executor
|
|
196
|
+
end
|
|
197
|
+
|
|
198
|
+
def post(*args, &task)
|
|
199
|
+
if (event = @Throttle.acquire_or_event)
|
|
200
|
+
event.on_resolution! { inner_post(*args, &task) }
|
|
201
|
+
else
|
|
202
|
+
inner_post(*args, &task)
|
|
203
|
+
end
|
|
204
|
+
end
|
|
205
|
+
|
|
206
|
+
def can_overflow?
|
|
207
|
+
@Executor.can_overflow?
|
|
208
|
+
end
|
|
209
|
+
|
|
210
|
+
def serialized?
|
|
211
|
+
@Executor.serialized?
|
|
212
|
+
end
|
|
213
|
+
|
|
214
|
+
private
|
|
215
|
+
|
|
216
|
+
def inner_post(*arguments, &task)
|
|
217
|
+
@Executor.post(*arguments) do |*args|
|
|
218
|
+
begin
|
|
219
|
+
task.call(*args)
|
|
220
|
+
ensure
|
|
221
|
+
@Throttle.release
|
|
222
|
+
end
|
|
223
|
+
end
|
|
224
|
+
end
|
|
225
|
+
end
|
|
226
|
+
|
|
227
|
+
private_constant :ProxyExecutor
|
|
228
|
+
end
|
|
229
|
+
end
|