concurrent-ruby-edge 0.4.1 → 0.5.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -15,9 +15,9 @@ module Concurrent
15
15
  # values[-5, 5] # => [49996, 49997, 49998, 49999, 50000]
16
16
  # @!macro warn.edge
17
17
  class ProcessingActor < Synchronization::Object
18
- # TODO (pitr-ch 18-Dec-2016): (un)linking, bidirectional, sends special message, multiple link calls has no effect,
19
- # TODO (pitr-ch 21-Dec-2016): Make terminated a cancellation token?
20
- # link_spawn atomic, Can it be fixed by sending exit when linked dead actor?
18
+
19
+ # TODO (pitr-ch 29-Jan-2019): simplify as much as possible, maybe even do not delegate to mailbox, no ask linking etc
20
+ # TODO (pitr-ch 03-Feb-2019): remove completely
21
21
 
22
22
  safe_initialization!
23
23
 
@@ -55,31 +55,33 @@ module Concurrent
55
55
  # @param [Object] args Arguments passed to the process.
56
56
  # @param [Promises::Channel] channel which serves as mailing box. The channel can have limited
57
57
  # size to achieve backpressure.
58
- # @yield args to the process to get back a future which represents the actors execution.
58
+ # @yield [actor, *args] to the process to get back a future which represents the actors execution.
59
+ # @yieldparam [ProcessingActor] actor
59
60
  # @yieldparam [Object] *args
60
61
  # @yieldreturn [Promises::Future(Object)] a future representing next step of execution
61
62
  # @return [ProcessingActor]
62
- # @example
63
- # # TODO (pitr-ch 19-Jan-2017): actor with limited mailbox
64
63
  def self.act_listening(channel, *args, &process)
65
- actor = ProcessingActor.new channel
66
- Promises.
67
- future(actor, *args, &process).
68
- run.
69
- chain_resolvable(actor.instance_variable_get(:@Terminated))
70
- actor
64
+ ProcessingActor.new channel, *args, &process
71
65
  end
72
66
 
73
- # Receives a message when available, used in the actor's process.
74
- # @return [Promises::Future(Object)] a future which will be fulfilled with a message from
75
- # mailbox when it is available.
76
- def receive(probe = Promises.resolvable_future)
77
- # TODO (pitr-ch 27-Dec-2016): patterns
78
- @Mailbox.pop probe
67
+ # # Receives a message when available, used in the actor's process.
68
+ # # @return [Promises::Future(Object)] a future which will be fulfilled with a message from
69
+ # # mailbox when it is available.
70
+ # def receive(*channels)
71
+ # channels = [@Mailbox] if channels.empty?
72
+ # Promises::Channel.select(*channels)
73
+ # # TODO (pitr-ch 27-Dec-2016): support patterns
74
+ # # - put any received message aside if it does not match
75
+ # # - on each receive call check the messages put aside
76
+ # # - track where the message came from, cannot later receive m form other channel only because it matches
77
+ # end
78
+
79
+ def receive(channel = mailbox)
80
+ channel.pop_op
79
81
  end
80
82
 
81
83
  # Tells a message to the actor. May block current thread if the mailbox is full.
82
- # {#tell} is a better option since it does not block. It's usually used to integrate with
84
+ # {#tell_op} is a better option since it does not block. It's usually used to integrate with
83
85
  # threading code.
84
86
  # @example
85
87
  # Thread.new(actor) do |actor|
@@ -91,7 +93,7 @@ module Concurrent
91
93
  # @param [Object] message
92
94
  # @return [self]
93
95
  def tell!(message)
94
- @Mailbox.push(message).wait!
96
+ @Mailbox.push(message)
95
97
  self
96
98
  end
97
99
 
@@ -99,61 +101,78 @@ module Concurrent
99
101
  # @param [Object] message
100
102
  # @return [Promises::Future(ProcessingActor)] a future which will be fulfilled with the actor
101
103
  # when the message is pushed to mailbox.
102
- def tell(message)
103
- @Mailbox.push(message).then(self) { |_, actor| actor }
104
+ def tell_op(message)
105
+ @Mailbox.push_op(message).then(self) { |_ch, actor| actor }
104
106
  end
105
107
 
106
- # Simplifies common pattern when a message sender also requires an answer to the message
107
- # from the actor. It appends a resolvable_future for the answer after the message.
108
- # @todo has to be nice also on the receive side, cannot make structure like this [message = [...], answer]
109
- # all receives should receive something friendly
110
- # @param [Object] message
111
- # @param [Promises::ResolvableFuture] answer
112
- # @return [Promises::Future] a future which will be fulfilled with the answer to the message
113
- # @example
114
- # add_once_actor = Concurrent::ProcessingActor.act do |actor|
115
- # actor.receive.then do |(a, b), answer|
116
- # result = a + b
117
- # answer.fulfill result
118
- # # terminate with result value
119
- # result
120
- # end
121
- # end
122
- # # => <#Concurrent::ProcessingActor:0x7fcd1315f6e8 termination:pending>
108
+ # # Simplifies common pattern when a message sender also requires an answer to the message
109
+ # # from the actor. It appends a resolvable_future for the answer after the message.
110
+ # # @todo has to be nice also on the receive side, cannot make structure like this [message = [...], answer]
111
+ # # all receives should receive something friendly
112
+ # # @param [Object] message
113
+ # # @param [Promises::ResolvableFuture] answer
114
+ # # @return [Promises::Future] a future which will be fulfilled with the answer to the message
115
+ # # @example
116
+ # # add_once_actor = Concurrent::ProcessingActor.act do |actor|
117
+ # # actor.receive.then do |(a, b), answer|
118
+ # # result = a + b
119
+ # # answer.fulfill result
120
+ # # # terminate with result value
121
+ # # result
122
+ # # end
123
+ # # end
124
+ # # # => <#Concurrent::ProcessingActor:0x7fcd1315f6e8 termination:pending>
125
+ # #
126
+ # # add_once_actor.ask([1, 2]).value! # => 3
127
+ # # # fails the actor already added once
128
+ # # add_once_actor.ask(%w(ab cd)).reason
129
+ # # # => #<RuntimeError: actor terminated normally before answering with a value: 3>
130
+ # # add_once_actor.termination.value! # => 3
131
+ # def ask(message, answer = Promises.resolvable_future)
132
+ # raise 'to be removed'
123
133
  #
124
- # add_once_actor.ask([1, 2]).value! # => 3
125
- # # fails the actor already added once
126
- # add_once_actor.ask(%w(ab cd)).reason
127
- # # => #<RuntimeError: actor terminated normally before answering with a value: 3>
128
- # add_once_actor.termination.value! # => 3
129
- def ask(message, answer = Promises.resolvable_future)
130
- tell [message, answer]
131
- # do not leave answers unanswered when actor terminates.
132
- Promises.any(
133
- Promises.fulfilled_future(:answer).zip(answer),
134
- Promises.fulfilled_future(:termination).zip(@Terminated)
135
- ).chain do |fulfilled, (which, value), (_, reason)|
136
- # TODO (pitr-ch 20-Jan-2017): we have to know which future was resolved
137
- # TODO (pitr-ch 20-Jan-2017): make the combinator programmable, so anyone can create what is needed
138
- # FIXME (pitr-ch 19-Jan-2017): ensure no callbacks are accumulated on @Terminated
139
- if which == :termination
140
- raise reason.nil? ? format('actor terminated normally before answering with a value: %s', value) : reason
141
- else
142
- fulfilled ? value : raise(reason)
143
- end
144
- end
134
+ # # TODO (pitr-ch 12-Dec-2018): REMOVE, the process ends up as another future not a value, no nice way to do ask in the actor
135
+ # tell [message, answer]
136
+ # # do not leave answers unanswered when actor terminates.
137
+ # Promises.any(
138
+ # Promises.fulfilled_future(:answer).zip(answer),
139
+ # Promises.fulfilled_future(:termination).zip(@Terminated)
140
+ # ).chain do |fulfilled, (which, value), (_, reason)|
141
+ # # TODO (pitr-ch 20-Jan-2017): we have to know which future was resolved
142
+ # # TODO (pitr-ch 20-Jan-2017): make the combinator programmable, so anyone can create what is needed
143
+ # # FIXME (pitr-ch 19-Jan-2017): ensure no callbacks are accumulated on @Terminated
144
+ # if which == :termination
145
+ # raise reason.nil? ? format('actor terminated normally before answering with a value: %s', value) : reason
146
+ # else
147
+ # fulfilled ? value : raise(reason)
148
+ # end
149
+ # end
150
+ # end
151
+
152
+ # actor.ask2 { |a| [:count, a] }
153
+ def ask_op(answer = Promises.resolvable_future, &message_provider)
154
+ # TODO (pitr-ch 12-Dec-2018): is it ok to let the answers be unanswered when the actor terminates
155
+ tell_op(message_provider.call(answer)).then(answer) { |_, a| a }
156
+
157
+ # answer.chain { |v| [true, v] } | @Terminated.then
145
158
  end
146
159
 
147
160
  # @return [String] string representation.
148
- def inspect
149
- format '%s termination:%s>', super[0..-2], termination.state
161
+ def to_s
162
+ format '%s termination: %s>', super[0..-2], termination.state
163
+ end
164
+
165
+ alias_method :inspect, :to_s
166
+
167
+ def to_ary
168
+ [@Mailbox, @Terminated]
150
169
  end
151
170
 
152
171
  private
153
172
 
154
- def initialize(channel = Promises::Channel.new)
173
+ def initialize(channel, *args, &process)
155
174
  @Mailbox = channel
156
- @Terminated = Promises.resolvable_future
175
+ @Terminated = Promises.future(self, *args, &process).run
157
176
  super()
158
177
  end
159
178
 
@@ -12,7 +12,7 @@ module Concurrent
12
12
  # Asks the actor with its value.
13
13
  # @return [Future] new future with the response form the actor
14
14
  def then_ask(actor)
15
- self.then { |v| actor.ask(v) }.flat
15
+ self.then(actor) { |v, a| a.ask_op(v) }.flat
16
16
  end
17
17
  end
18
18
 
@@ -49,97 +49,6 @@ module Concurrent
49
49
  include FlatShortcuts
50
50
  end
51
51
 
52
- # @!macro warn.edge
53
- class Channel < Concurrent::Synchronization::Object
54
- safe_initialization!
55
-
56
- # Default size of the Channel, makes it accept unlimited number of messages.
57
- UNLIMITED = ::Object.new
58
- UNLIMITED.singleton_class.class_eval do
59
- include Comparable
60
-
61
- def <=>(other)
62
- 1
63
- end
64
-
65
- def to_s
66
- 'unlimited'
67
- end
68
- end
69
-
70
- # A channel to pass messages between promises. The size is limited to support back pressure.
71
- # @param [Integer, UNLIMITED] size the maximum number of messages stored in the channel.
72
- def initialize(size = UNLIMITED)
73
- super()
74
- @Size = size
75
- # TODO (pitr-ch 26-Dec-2016): replace with lock-free implementation
76
- @Mutex = Mutex.new
77
- @Probes = []
78
- @Messages = []
79
- @PendingPush = []
80
- end
81
-
82
-
83
- # Returns future which will fulfill when the message is added to the channel. Its value is the message.
84
- # @param [Object] message
85
- # @return [Future]
86
- def push(message)
87
- @Mutex.synchronize do
88
- while true
89
- if @Probes.empty?
90
- if @Size > @Messages.size
91
- @Messages.push message
92
- return Promises.fulfilled_future message
93
- else
94
- pushed = Promises.resolvable_future
95
- @PendingPush.push [message, pushed]
96
- return pushed.with_hidden_resolvable
97
- end
98
- else
99
- probe = @Probes.shift
100
- if probe.fulfill [self, message], false
101
- return Promises.fulfilled_future(message)
102
- end
103
- end
104
- end
105
- end
106
- end
107
-
108
- # Returns a future witch will become fulfilled with a value from the channel when one is available.
109
- # @param [ResolvableFuture] probe the future which will be fulfilled with a channel value
110
- # @return [Future] the probe, its value will be the message when available.
111
- def pop(probe = Concurrent::Promises.resolvable_future)
112
- # TODO (pitr-ch 26-Dec-2016): improve performance
113
- pop_for_select(probe).then(&:last)
114
- end
115
-
116
- # @!visibility private
117
- def pop_for_select(probe = Concurrent::Promises.resolvable_future)
118
- @Mutex.synchronize do
119
- if @Messages.empty?
120
- @Probes.push probe
121
- else
122
- message = @Messages.shift
123
- probe.fulfill [self, message]
124
-
125
- unless @PendingPush.empty?
126
- message, pushed = @PendingPush.shift
127
- @Messages.push message
128
- pushed.fulfill message
129
- end
130
- end
131
- end
132
- probe
133
- end
134
-
135
- # @return [String] Short string representation.
136
- def to_s
137
- format '%s size:%s>', super[0..-2], @Size
138
- end
139
-
140
- alias_method :inspect, :to_s
141
- end
142
-
143
52
  class Future < AbstractEventFuture
144
53
  # @!macro warn.edge
145
54
  module NewChannelIntegration
@@ -147,8 +56,8 @@ module Concurrent
147
56
  # @param [Channel] channel to push to.
148
57
  # @return [Future] a future which is fulfilled after the message is pushed to the channel.
149
58
  # May take a moment if the channel is full.
150
- def then_push_channel(channel)
151
- self.then { |value| channel.push value }.flat_future
59
+ def then_channel_push(channel)
60
+ self.then(channel) { |value, ch| ch.push_op value }.flat_future
152
61
  end
153
62
 
154
63
  end
@@ -157,22 +66,6 @@ module Concurrent
157
66
  end
158
67
 
159
68
  module FactoryMethods
160
- # @!macro warn.edge
161
- module NewChannelIntegration
162
-
163
- # Selects a channel which is ready to be read from.
164
- # @param [Channel] channels
165
- # @return [Future] a future which is fulfilled with pair [channel, message] when one of the channels is
166
- # available for reading
167
- def select_channel(*channels)
168
- probe = Promises.resolvable_future
169
- channels.each { |ch| ch.pop_for_select probe }
170
- probe
171
- end
172
- end
173
-
174
- include NewChannelIntegration
175
-
176
69
  # @!macro promises.shortcut.on
177
70
  # @return [Future]
178
71
  # @!macro warn.edge
@@ -200,5 +93,82 @@ module Concurrent
200
93
  end
201
94
  end
202
95
 
96
+ module Resolvable
97
+ include InternalStates
98
+
99
+ # Reserves the event or future, if reserved others are prevented from resolving it.
100
+ # Advanced feature.
101
+ # Be careful about the order of reservation to avoid deadlocks,
102
+ # the method blocks if the future or event is already reserved
103
+ # until it is released or resolved.
104
+ #
105
+ # @example
106
+ # f = Concurrent::Promises.resolvable_future
107
+ # reserved = f.reserve
108
+ # Thread.new { f.resolve true, :val, nil } # fails
109
+ # f.resolve true, :val, nil, true if reserved # must be called only if reserved
110
+ # @return [true, false] on successful reservation
111
+ def reserve
112
+ while true
113
+ return true if compare_and_set_internal_state(PENDING, RESERVED)
114
+ return false if resolved?
115
+ # FIXME (pitr-ch 17-Jan-2019): sleep until given up or resolved instead of busy wait
116
+ Thread.pass
117
+ end
118
+ end
119
+
120
+ # @return [true, false] on successful release of the reservation
121
+ def release
122
+ compare_and_set_internal_state(RESERVED, PENDING)
123
+ end
124
+
125
+ # @return [Comparable] an item to sort the resolvable events or futures
126
+ # by to get the right global locking order of resolvable events or futures
127
+ # @see .atomic_resolution
128
+ def self.locking_order_by(resolvable)
129
+ resolvable.object_id
130
+ end
131
+
132
+ # Resolves all passed events and futures to the given resolutions
133
+ # if possible (all are unresolved) or none.
134
+ #
135
+ # @param [Hash{Resolvable=>resolve_arguments}, Array<Array(Resolvable, resolve_arguments)>] resolvable_map
136
+ # collection of resolvable events and futures which should be resolved all at once
137
+ # and what should they be resolved to, examples:
138
+ # ```ruby
139
+ # { a_resolvable_future1 => [true, :val, nil],
140
+ # a_resolvable_future2 => [false, nil, :err],
141
+ # a_resolvable_event => [] }
142
+ # ```
143
+ # or
144
+ # ```ruby
145
+ # [[a_resolvable_future1, [true, :val, nil]],
146
+ # [a_resolvable_future2, [false, nil, :err]],
147
+ # [a_resolvable_event, []]]
148
+ # ```
149
+ # @return [true, false] if success
150
+ def self.atomic_resolution(resolvable_map)
151
+ # atomic_resolution event => [], future => [true, :v, nil]
152
+ sorted = resolvable_map.to_a.sort_by { |resolvable, _| locking_order_by resolvable }
153
+
154
+ reserved = 0
155
+ while reserved < sorted.size && sorted[reserved].first.reserve
156
+ reserved += 1
157
+ end
158
+
159
+ if reserved == sorted.size
160
+ sorted.each { |resolvable, args| resolvable.resolve(*args, true, true) }
161
+ true
162
+ else
163
+ while reserved > 0
164
+ reserved -= 1
165
+ raise 'has to be reserved' unless sorted[reserved].first.release
166
+ end
167
+ false
168
+ end
169
+ end
170
+ end
171
+
172
+
203
173
  end
204
174
  end
@@ -1,102 +1,121 @@
1
1
  module Concurrent
2
- # @!macro throttle.example.throttled_block
3
- # @example
4
- # max_two = Throttle.new 2
5
- # 10.times.map do
6
- # Thread.new do
7
- # max_two.throttled_block do
8
- # # Only 2 at the same time
9
- # do_stuff
10
- # end
11
- # end
12
- # end
13
- # @!macro throttle.example.throttled_future
14
- # @example
15
- # throttle.throttled_future(1) do |arg|
16
- # arg.succ
17
- # end
18
- # @!macro throttle.example.throttled_future_chain
19
- # @example
20
- # throttle.throttled_future_chain do |trigger|
21
- # trigger.
22
- # # 2 throttled promises
23
- # chain { 1 }.
24
- # then(&:succ)
25
- # end
26
- # @!macro throttle.example.then_throttled_by
27
- # @example
28
- # data = (1..5).to_a
29
- # db = data.reduce({}) { |h, v| h.update v => v.to_s }
30
- # max_two = Throttle.new 2
2
+ # A tool managing concurrency level of tasks.
3
+ # The maximum capacity is set in constructor.
4
+ # Each acquire will lower the available capacity and release will increase it.
5
+ # When there is no available capacity the current thread may either be blocked or
6
+ # an event is returned which will be resolved when capacity becomes available.
31
7
  #
32
- # futures = data.map do |data|
33
- # Promises.future(data) do |data|
34
- # # un-throttled, concurrency level equal data.size
35
- # data + 1
36
- # end.then_throttled_by(max_two, db) do |v, db|
37
- # # throttled, only 2 tasks executed at the same time
38
- # # e.g. limiting access to db
39
- # db[v]
40
- # end
41
- # end
8
+ # The more common usage of the Throttle is with a proxy executor
9
+ # `a_throttle.on(Concurrent.global_io_executor)`.
10
+ # Anything executed on the proxy executor will be throttled and
11
+ # execute on the given executor. There can be more than one proxy executors.
12
+ # All abstractions which execute tasks have option to specify executor,
13
+ # therefore the proxy executor can be injected to any abstraction
14
+ # throttling its concurrency level.
42
15
  #
43
- # futures.map(&:value!) # => [2, 3, 4, 5, nil]
44
-
45
- # A tool manage concurrency level of future tasks.
16
+ # {include:file:docs-source/throttle.out.md}
46
17
  #
47
- # @!macro throttle.example.then_throttled_by
48
- # @!macro throttle.example.throttled_future
49
- # @!macro throttle.example.throttled_future_chain
50
- # @!macro throttle.example.throttled_block
51
18
  # @!macro warn.edge
52
19
  class Throttle < Synchronization::Object
53
- # TODO (pitr-ch 21-Dec-2016): consider using sized channel for implementation instead when available
54
-
55
20
  safe_initialization!
56
- attr_atomic(:can_run)
57
- private :can_run, :can_run=, :swap_can_run, :compare_and_set_can_run, :update_can_run
58
21
 
59
- # New throttle.
60
- # @param [Integer] limit
61
- def initialize(limit)
22
+ attr_atomic(:capacity)
23
+ private :capacity, :capacity=, :swap_capacity, :compare_and_set_capacity, :update_capacity
24
+
25
+ # @return [Integer] The available capacity.
26
+ def available_capacity
27
+ current_capacity = capacity
28
+ current_capacity >= 0 ? current_capacity : 0
29
+ end
30
+
31
+ # Create throttle.
32
+ # @param [Integer] capacity How many tasks using this throttle can run at the same time.
33
+ def initialize(capacity)
62
34
  super()
63
- @Limit = limit
64
- self.can_run = limit
65
- @Queue = LockFreeQueue.new
35
+ @MaxCapacity = capacity
36
+ @Queue = LockFreeQueue.new
37
+ @executor_cache = [nil, nil]
38
+ self.capacity = capacity
66
39
  end
67
40
 
68
- # @return [Integer] The limit.
69
- def limit
70
- @Limit
41
+ # @return [Integer] The maximum capacity.
42
+ def max_capacity
43
+ @MaxCapacity
71
44
  end
72
45
 
73
- # New event which will be resolved when depending tasks can execute.
74
- # Has to be used and after the critical work is done {#release} must be called exactly once.
75
- # @return [Promises::Event]
46
+ # Blocks current thread until there is capacity available in the throttle.
47
+ # The acquired capacity has to be returned to the throttle by calling {#release}.
48
+ # If block is passed then the block is called after the capacity is acquired and
49
+ # it is automatically released after the block is executed.
50
+ #
51
+ # @param [Numeric] timeout the maximum time in second to wait.
52
+ # @yield [] block to execute after the capacity is acquired
53
+ # @return [Object, self, true, false]
54
+ # * When no timeout and no block it returns self
55
+ # * When no timeout and with block it returns the result of the block
56
+ # * When with timeout and no block it returns true when acquired and false when timed out
57
+ # * When with timeout and with block it returns the result of the block of nil on timing out
76
58
  # @see #release
77
- def trigger
78
- while true
79
- current_can_run = can_run
80
- if compare_and_set_can_run current_can_run, current_can_run - 1
81
- if current_can_run > 0
82
- return Promises.resolved_event
59
+ def acquire(timeout = nil, &block)
60
+ event = acquire_or_event
61
+ if event
62
+ within_timeout = event.wait(timeout)
63
+ # release immediately when acquired later after the timeout since it is unused
64
+ event.on_resolution!(self, &:release) unless within_timeout
65
+ else
66
+ within_timeout = true
67
+ end
68
+
69
+ called = false
70
+ if timeout
71
+ if block
72
+ if within_timeout
73
+ called = true
74
+ block.call
83
75
  else
84
- event = Promises.resolvable_event
85
- @Queue.push event
86
- return event
76
+ nil
87
77
  end
78
+ else
79
+ within_timeout
80
+ end
81
+ else
82
+ if block
83
+ called = true
84
+ block.call
85
+ else
86
+ self
87
+ end
88
+ end
89
+ ensure
90
+ release if called
91
+ end
92
+
93
+ # Tries to acquire capacity from the throttle.
94
+ # Returns true when there is capacity available.
95
+ # The acquired capacity has to be returned to the throttle by calling {#release}.
96
+ # @return [true, false]
97
+ # @see #release
98
+ def try_acquire
99
+ while true
100
+ current_capacity = capacity
101
+ if current_capacity > 0
102
+ return true if compare_and_set_capacity(
103
+ current_capacity, current_capacity - 1)
104
+ else
105
+ return false
88
106
  end
89
107
  end
90
108
  end
91
109
 
92
- # Has to be called once for each trigger after it is ok to execute another throttled task.
110
+ # Releases previously acquired capacity back to Throttle.
111
+ # Has to be called exactly once for each acquired capacity.
93
112
  # @return [self]
94
- # @see #trigger
113
+ # @see #acquire_operation, #acquire, #try_acquire
95
114
  def release
96
115
  while true
97
- current_can_run = can_run
98
- if compare_and_set_can_run current_can_run, current_can_run + 1
99
- if current_can_run < 0
116
+ current_capacity = capacity
117
+ if compare_and_set_capacity current_capacity, current_capacity + 1
118
+ if current_capacity < 0
100
119
  # release called after trigger which pushed a trigger, busy wait is ok
101
120
  Thread.pass until (trigger = @Queue.pop)
102
121
  trigger.resolve
@@ -106,94 +125,101 @@ module Concurrent
106
125
  end
107
126
  end
108
127
 
109
- # Blocks current thread until the block can be executed.
110
- # @yield to throttled block
111
- # @yieldreturn [Object] is used as a result of the method
112
- # @return [Object] the result of the block
113
- # @!macro throttle.example.throttled_block
114
- def throttled_block(&block)
115
- trigger.wait
116
- block.call
117
- ensure
118
- release
119
- end
120
-
121
128
  # @return [String] Short string representation.
122
129
  def to_s
123
- format '%s limit:%s can_run:%d>', super[0..-2], @Limit, can_run
130
+ format '%s capacity available %d of %d>', super[0..-2], capacity, @MaxCapacity
124
131
  end
125
132
 
126
133
  alias_method :inspect, :to_s
127
134
 
128
- module PromisesIntegration
129
-
130
- # Allows to throttle a chain of promises.
131
- # @yield [trigger] a trigger which has to be used to build up a chain of promises, the last one is result
132
- # of the block. When the last one resolves, {Throttle#release} is called on the throttle.
133
- # @yieldparam [Promises::Event, Promises::Future] trigger
134
- # @yieldreturn [Promises::Event, Promises::Future] The final future of the throttled chain.
135
- # @return [Promises::Event, Promises::Future] The final future of the throttled chain.
136
- # @!macro throttle.example.throttled_future_chain
137
- def throttled_future_chain(&throttled_futures)
138
- throttled_futures.call(trigger).on_resolution! { release }
135
+ # @!visibility private
136
+ def acquire_or_event
137
+ while true
138
+ current_capacity = capacity
139
+ if compare_and_set_capacity current_capacity, current_capacity - 1
140
+ if current_capacity > 0
141
+ return nil
142
+ else
143
+ event = Promises.resolvable_event
144
+ @Queue.push event
145
+ return event
146
+ end
147
+ end
139
148
  end
149
+ end
140
150
 
141
- # Behaves as {Promises::FactoryMethods#future} but the future is throttled.
142
- # @return [Promises::Future]
143
- # @see Promises::FactoryMethods#future
144
- # @!macro throttle.example.throttled_future
145
- def throttled_future(*args, &task)
146
- trigger.chain(*args, &task).on_resolution! { release }
151
+ include Promises::FactoryMethods
152
+
153
+ # @param [ExecutorService] executor
154
+ # @return [ExecutorService] An executor which wraps given executor and allows to post tasks only
155
+ # as available capacity in the throttle allows.
156
+ # @example throttling future
157
+ # a_future.then_on(a_throttle.on(:io)) { a_throttled_task }
158
+ def on(executor = Promises::FactoryMethods.default_executor)
159
+ current_executor, current_cache = @executor_cache
160
+ return current_cache if current_executor == executor && current_cache
161
+
162
+ if current_executor.nil?
163
+ # cache first proxy
164
+ proxy_executor = ProxyExecutor.new(self, Concurrent.executor(executor))
165
+ @executor_cache = [executor, proxy_executor]
166
+ return proxy_executor
167
+ else
168
+ # do not cache more than 1 executor
169
+ ProxyExecutor.new(self, Concurrent.executor(executor))
147
170
  end
148
171
  end
149
172
 
150
- include PromisesIntegration
151
- end
173
+ # Uses executor provided by {#on} therefore
174
+ # all events and futures created using factory methods on this object will be throttled.
175
+ # Overrides {Promises::FactoryMethods#default_executor}.
176
+ #
177
+ # @return [ExecutorService]
178
+ # @see Promises::FactoryMethods#default_executor
179
+ def default_executor
180
+ on(super)
181
+ end
152
182
 
153
- module Promises
183
+ class ProxyExecutor < Synchronization::Object
184
+ safe_initialization!
154
185
 
155
- class AbstractEventFuture < Synchronization::Object
156
- module ThrottleIntegration
186
+ include ExecutorService
157
187
 
158
- # @yieldparam [Future] a trigger
159
- # @yieldreturn [Future, Event]
160
- # @return [Future, Event]
161
- def throttled_by(throttle, &throttled_futures)
162
- a_trigger = self & self.chain { throttle.trigger }.flat_event
163
- throttled_futures.call(a_trigger).on_resolution! { throttle.release }
164
- end
188
+ def initialize(throttle, executor)
189
+ super()
190
+ @Throttle = throttle
191
+ @Executor = executor
192
+ end
165
193
 
166
- # Behaves as {AbstractEventFuture#chain} but the it is throttled.
167
- # @return [Future, Event]
168
- # @see AbstractEventFuture#chain
169
- def chain_throttled_by(throttle, *args, &block)
170
- throttled_by(throttle) { |trigger| trigger.chain(*args, &block) }
194
+ def post(*args, &task)
195
+ if (event = @Throttle.acquire_or_event)
196
+ event.on_resolution! { inner_post(*args, &task) }
197
+ else
198
+ inner_post(*args, &task)
171
199
  end
172
200
  end
173
201
 
174
- include ThrottleIntegration
175
- end
202
+ def can_overflow?
203
+ @Executor.can_overflow?
204
+ end
176
205
 
177
- class Future < AbstractEventFuture
178
- module ThrottleIntegration
206
+ def serialized?
207
+ @Executor.serialized?
208
+ end
179
209
 
180
- # Behaves as {Future#then} but the it is throttled.
181
- # @return [Future]
182
- # @see Future#then
183
- # @!macro throttle.example.then_throttled_by
184
- def then_throttled_by(throttle, *args, &block)
185
- throttled_by(throttle) { |trigger| trigger.then(*args, &block) }
186
- end
210
+ private
187
211
 
188
- # Behaves as {Future#rescue} but the it is throttled.
189
- # @return [Future]
190
- # @see Future#rescue
191
- def rescue_throttled_by(throttle, *args, &block)
192
- throttled_by(throttle) { |trigger| trigger.rescue(*args, &block) }
212
+ def inner_post(*arguments, &task)
213
+ @Executor.post(*arguments) do |*args|
214
+ begin
215
+ task.call(*args)
216
+ ensure
217
+ @Throttle.release
218
+ end
193
219
  end
194
220
  end
195
-
196
- include ThrottleIntegration
197
221
  end
222
+
223
+ private_constant :ProxyExecutor
198
224
  end
199
225
  end